metafor/0000755000176200001440000000000014060150153011702 5ustar liggesusersmetafor/NAMESPACE0000644000176200001440000000717514054724035013144 0ustar liggesusersexportPattern("^[^\\.]") import(stats) import(utils) import(graphics) import(grDevices) import(methods) import(Matrix) importFrom(nlme, ranef) export(ranef) import(mathjaxr) S3method("[", list.rma) S3method("$<-", list.rma) S3method("[", escalc) S3method(addpoly, default) S3method(addpoly, rma) S3method(aggregate, escalc) S3method(AIC, rma) S3method(anova, rma) S3method(as.data.frame, list.rma) S3method(as.matrix, list.rma) S3method(baujat, rma) S3method(BIC, rma) S3method(blup, rma.uni) S3method(cbind, escalc) S3method(coef, rma) S3method(coef, summary.rma) S3method(coef, permutest.rma.uni) S3method(confint, rma.glmm) S3method(confint, rma.mh) S3method(confint, rma.mv) S3method(confint, rma.peto) S3method(confint, rma.uni) S3method(confint, rma.uni.selmodel) S3method(confint, rma.ls) S3method(cooks.distance, rma.mv) S3method(cooks.distance, rma.uni) S3method(cumul, rma.mh) S3method(cumul, rma.peto) S3method(cumul, rma.uni) S3method(deviance, rma) S3method(df.residual, rma) S3method(dfbetas, rma.mv) S3method(dfbetas, rma.uni) S3method(fitstats, rma) S3method(fitted, rma) S3method(forest, default) S3method(forest, rma) S3method(forest, cumul.rma) S3method(formula, rma) S3method(funnel, default) S3method(funnel, rma) S3method(gosh, rma) S3method(hatvalues, rma.mv) S3method(hatvalues, rma.uni) S3method(hc, rma.uni) S3method(influence, rma.uni) S3method(labbe, rma) S3method(leave1out, rma.mh) S3method(leave1out, rma.peto) S3method(leave1out, rma.uni) S3method(logLik, rma) S3method(regplot, rma) S3method(model.matrix, rma) S3method(nobs, rma) S3method(permutest, rma.uni) S3method(plot, cumul.rma) S3method(plot, gosh.rma) S3method(plot, infl.rma.uni) S3method(plot, profile.rma) S3method(plot, rma.glmm) S3method(plot, rma.mh) S3method(plot, rma.peto) S3method(plot, rma.uni) S3method(plot, rma.uni.selmodel) S3method(points, regplot) S3method(predict, rma) S3method(predict, rma.ls) S3method(print, anova.rma) S3method(print, confint.rma) S3method(print, escalc) S3method(print, fsn) S3method(print, gosh.rma) S3method(print, infl.rma.uni) S3method(print, list.rma) S3method(head, list.rma) S3method(tail, list.rma) S3method(print, list.confint.rma) S3method(print, hc.rma.uni) S3method(print, matreg) S3method(print, permutest.rma.uni) S3method(print, profile.rma) S3method(print, ranktest) S3method(print, regtest) S3method(print, rma.glmm) S3method(print, rma.mh) S3method(print, rma.mv) S3method(print, rma.peto) S3method(print, rma.uni) S3method(print, robust.rma) S3method(print, summary.rma) S3method(print, tes) S3method(print, vif.rma) S3method(profile, rma.mv) S3method(profile, rma.uni) S3method(profile, rma.uni.selmodel) S3method(profile, rma.ls) S3method(qqnorm, rma.glmm) S3method(qqnorm, rma.mh) S3method(qqnorm, rma.mv) S3method(qqnorm, rma.peto) S3method(qqnorm, rma.uni) S3method(radial, rma) S3method(ranef, rma.mv) S3method(ranef, rma.uni) S3method(ranktest, default) S3method(ranktest, rma) S3method(rbind, escalc) S3method(regtest, default) S3method(regtest, rma) S3method(reporter, rma.uni) S3method(residuals, rma) S3method(robust, rma.mv) S3method(robust, rma.uni) S3method(selmodel, rma.uni) S3method(rstandard, rma.mh) S3method(rstandard, rma.mv) S3method(rstandard, rma.peto) S3method(rstandard, rma.uni) S3method(rstudent, rma.mh) S3method(rstudent, rma.mv) S3method(rstudent, rma.peto) S3method(rstudent, rma.uni) S3method(simulate, rma) S3method(summary, escalc) S3method(summary, rma) S3method(tes, default) S3method(tes, rma) S3method(trimfill, rma.uni) S3method(update, rma) S3method(vif, rma) S3method(vcov, rma) S3method(weights, rma.glmm) S3method(weights, rma.mh) S3method(weights, rma.mv) S3method(weights, rma.peto) S3method(weights, rma.uni) metafor/README.md0000644000176200001440000001351214056237142013174 0ustar liggesusersmetafor: A Meta-Analysis Package for R ====================================== [![License: GPL (>=2)](https://img.shields.io/badge/license-GPL-blue)](https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html) [![R build status](https://github.com/wviechtb/metafor/workflows/R-CMD-check/badge.svg)](https://github.com/wviechtb/metafor/actions) [![Code Coverage](https://codecov.io/gh/wviechtb/metafor/branch/master/graph/badge.svg)](https://codecov.io/gh/wviechtb/metafor) [![CRAN Version](https://www.r-pkg.org/badges/version/metafor)](https://cran.r-project.org/package=metafor) [![devel Version](https://img.shields.io/badge/devel-3.0--2-brightgreen.svg)](https://www.metafor-project.org/doku.php/installation#development_version) [![Monthly Downloads](https://cranlogs.r-pkg.org/badges/metafor)](https://cranlogs.r-pkg.org/badges/metafor) [![Total Downloads](https://cranlogs.r-pkg.org/badges/grand-total/metafor)](https://cranlogs.r-pkg.org/badges/grand-total/metafor) ## Description The `metafor` package is a comprehensive collection of functions for conducting meta-analyses in R. The package includes functions to calculate various effect sizes or outcome measures, fit fixed-, random-, and mixed-effects models to such data, carry out moderator and meta-regression analyses, and create various types of meta-analytical plots (e.g., forest, funnel, radial, L'Abbé, Baujat, bubble, and GOSH plots). For meta-analyses of binomial and person-time data, the package also provides functions that implement specialized methods, including the Mantel-Haenszel method, Peto's method, and a variety of suitable generalized linear (mixed-effects) models (i.e., mixed-effects logistic and Poisson regression models). Finally, the package provides functionality for fitting meta-analytic multivariate/multilevel models that account for non-independent sampling errors and/or true effects (e.g., due to the inclusion of multiple treatment studies, multiple endpoints, or other forms of clustering). Network meta-analyses and meta-analyses accounting for known correlation structures (e.g., due to phylogenetic relatedness) can also be conducted. ## Package Website The `metafor` package website can be found at [https://www.metafor-project.org](https://www.metafor-project.org). On the website, you can find: * some [news](https://www.metafor-project.org/doku.php/news:news) concerning the package and/or its development, * a more detailed description of the [package features](https://www.metafor-project.org/doku.php/features), * a log of the [package updates](https://www.metafor-project.org/doku.php/updates) that have been made over the years, * a [to-do list](https://www.metafor-project.org/doku.php/todo) and a description of planned features to be implemented in the future, * information on how to [download and install](https://www.metafor-project.org/doku.php/installation) the package, * information on how to obtain [documentation and help](https://www.metafor-project.org/doku.php/help) with using the package, * some [analysis examples](https://www.metafor-project.org/doku.php/analyses) that illustrate various models, methods, and techniques, * a little showcase of [plots and figures](https://www.metafor-project.org/doku.php/plots) that can be created with the package, * some [tips and notes](https://www.metafor-project.org/doku.php/tips) that may be useful when working with the package, * a list of people that have in some shape or form [contributed](https://www.metafor-project.org/doku.php/contributors) to the development of the package, * a [frequently asked questions](https://www.metafor-project.org/doku.php/faq) section, and * some [links](https://www.metafor-project.org/doku.php/links) to other websites related to software for meta-analysis. ## Documentation A good starting place for those interested in using the `metafor` package is the following paper: Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. *Journal of Statistical Software, 36*(3), 1-48. [https://doi.org/10.18637/jss.v036.i03](https://doi.org/10.18637/jss.v036.i03) In addition to reading the paper, carefully read the [package intro](https://wviechtb.github.io/metafor/reference/metafor-package.html) and then the help pages for the [`escalc`](https://wviechtb.github.io/metafor/reference/escalc.html) and the [`rma.uni`](https://wviechtb.github.io/metafor/reference/rma.uni.html) functions (or the [`rma.mh`](https://wviechtb.github.io/metafor/reference/rma.mh.html), [`rma.peto`](https://wviechtb.github.io/metafor/reference/rma.peto.html), [`rma.glmm`](https://wviechtb.github.io/metafor/reference/rma.glmm.html), [`rma.mv`](https://wviechtb.github.io/metafor/reference/rma.mv.html) functions if you intend to use these methods). The help pages for these functions provide links to many additional functions, which can be used after fitting a model. You can also read the entire documentation online at [https://wviechtb.github.io/metafor/](https://wviechtb.github.io/metafor/) (where it is nicely formatted, equations are shown correctly, and the output from all examples is provided). ## Installation The current official (i.e., [CRAN](https://cran.r-project.org/package=metafor)) release can be installed within R with: ```r install.packages("metafor") ``` The development version of the package can be installed with: ```r install.packages("remotes") remotes::install_github("wviechtb/metafor") ``` This builds the package from source based on the current version on [GitHub](https://github.com/wviechtb/metafor). ## Meta The metafor package was written by [Wolfgang Viechtbauer](https://www.wvbauer.com/). It is licensed under the [GNU General Public License](https://www.gnu.org/licenses/old-licenses/gpl-2.0.txt). For citation info, type `citation(package='metafor')` in R. To report any issues or bugs or to suggest enhancements to the package, please go [here](https://github.com/wviechtb/metafor/issues). metafor/data/0000755000176200001440000000000014045307460012623 5ustar liggesusersmetafor/data/dat.raudenbush1985.rda0000644000176200001440000000170514030671437016556 0ustar liggesusers]hWgvĵ}%/[5h#Uo&$DJA^2cffw?A)VA &Cq E[(FP3n) ߞswͪe(^ >Pkb)J +@NIR+bLxL]X7B'Bx .PT``"M>**{&UY+W&Vjaf Q]P3Q=#4 .88x|jy K3cr3-Ɍ7g-juCoa tCf(5Θ!'@fZOtYĄ;M]V.qdlYTi.UiBMixeIOy#fE]>^V>KUGz×2mpqgλ 58kab|r:X4b g@ 5ev^4OPbMu5kVDo3 QkJQȱO9r3/E۟#}S7[' .^2߇=mǦ$ aM9HF,Hm-qG-<8o|t1jf [n]دcUDAOA@;0sTARA⯟~~@\]{u{W7a2+{;ϒ%yzQoŭγs᫇N=^C.vA;)<~Һ]N)WrI*fe̜FlDE'S\sD1\M$y 2mq`JU6pcœ"&R+1X[[,4(LH*F\"Z%햩].Fz<.މAU|e4azcdɣ8:>Ȣ(ܪeZ~hb_R|MQC%T vwP*^ķ!uDV$8qid&Ȃ`WLL3ҺXp7X$S<(m"`AvOߗڥ-+kKNIV񪻝GO@\2~ʭأ)SLC@1ţ/S xbLd^>]^RXLkPޗ?48z3zI%9 8W 4ܰ K 6} M%s4 5!wm]]]+=:>yѝeW?UּoWa|CЊy ht]d<7acȗſѮLZ ~c] ȟ^ռ"?VM:!ϰ;:M0;v?`Op 0Isp9O{-ZRÿz]Fu{q0cq^ṡv-ؿkz>> f\fyU&*9>ǫo||Ca=,\c2(ʷ`h_9{m/TL9\-? bԿ gR{NQqe{~\^RʕoUBc杫oon0~>qMk6u8uy.!X7[:xuԿu_S鎎6ز̴|ᭊ*YG¦C$42嵏MI+ZU )9ͩW=9zCG^4jŬYeYPhr!p'Go϶\XNRnxUY&6M AͱGfɩ~ MSM;:NM*~u3`⤝3]g !_ުg <6-1[\~j6^]7~0j/|Ї]ҘʹI ظ* |O,nyVܽ.j;}9ߨmG5GomE &šϷ;}?// TŜ`Q*oxzς/Nh*=ܧTYh{, s_8}'\k7|58+LykvcB:K } cap҇ab=-w7KrPTLN,#Rk+T0p5i޻vǻ)|Vpz_÷G%j'|)84 Ծ8>wtG`%簼"fi>Y%qMHC*7ׅݍLC* S{aСF-ZѸt'ܾenmetafor/data/dat.hine1989.rda0000644000176200001440000000043514030671437015344 0ustar liggesusers]N0]')4#$^[ RaiR!ILk(]n<#@\X/w"%D$%pIpu6!$Z;l%$O'SCk-@wmѿ\o urɥ[aL7gq3.%+7\z|#p ^HU˟ >kyvwиDWKI[uQ9wQ$b{pBfOb_Dz'AyG?݄~UU7̏^"metafor/data/dat.pritz1997.rda0000644000176200001440000000057114030671437015571 0ustar liggesusers]O0+ !b4vUAA$Qcp}"5k'ɿ?@|ݺ.wm?WAw|]BH8K8%QU(3bj}\c@ FHH!ɔrjUFUP:TCm51-|dP <<}}=Ι<&1K6S P,7HIT an A6C@=_'†/1[ JvvO4v<r(=ӯ9[4=iFtMm5ukŷ_ti~cT7^JÂJ'R,YY6,lbM EX'^C~l~qYmetafor/data/dat.bourassa1996.rda0000644000176200001440000000362014030671442016231 0ustar liggesusersoUl]Z< ҕ5J[ %Cݽά3՘+ F(1"!1#~71xNܙ۲N.mM!?3s9wflس"'cA[! ڹu ccpmJq;mLnY|ڵad7  @hh=ڙ~I ڟL-4+43,`< 7 eEV` X*$ng^"k0SA^"k08{̱̿db52K),WXRaj:8M+Z$;(pp_\fnIRpKAO)hەO^HZ ضܐ8-[wGw$ӆ.Y{ \dS[ 2d.aP(]S̹3ۅkنc4XH&m,W_[ݧ}9>vKkmsm_bD>6ɡ N-t?V+ɡv#7K p8 |~N)|pzgJ·Yy c d |3CojEn_Q`V(s:Un(ÑѠ/Ά>VfތeEvd+i+Re>?p'z磦gh\7MQq O/\gcj 0s\Ou^ժuPpGϚ?gncN 7C=`\9\A?ƮO,KTE'2?7*KRC1SnNVm|ljUM_)Tv!W=/Op?j+_E^*dr1|-_YQ(d(`A3sl\q' Fp]cBX jCF A,Ԧi@U)̢V) MSŮU{e &@s>z(q6qR)si4'Y60D/}Ƽfb 6,$] p P̠F9^78c%B{0&l/l2y9' ʡl9 lS2LF)?d{6#|? eN 6pjc^s#/QJ|u< SXCCyun$}M:Kb(x*ձɱa3'N68n:NTp8L`2i p&8 Ӂ d f=\py `p% \ k@>(\ p#  -`!(wEeTJpT>P j@-XRAap?X4rXV#Q 4 $x < ς@3x^/e X^k:xSZ'{=EZN}.gN&fG׉#>tZN_IJII">{lX ]|F, ׻P{#7I4~^hz?}65L#i6ޤ4ϻӣN^m{UjN];z9Ǵn-mtmN1m9c>_kYg\ݧ8,w8{{ Zz̖4ȶ&G6o9EWQ;vο`|mf?L;栽<됧D;]մSlk6mZg#p|+f{!>tkW;c>0.Չvܼp]I?;_|~7?SGݧzko3֡?W7Kug׹"o}K8R?׮_߮j_ǎWWU=t60.1z`h\9w0@`bhR^E;s+(;tͣss3oI JKĮ0g%sd$eV;"{yN2~Y2טw\jjQq*,ebhݳ,۽$nmN\׏*`Y.G^_ ED3Mj睢Y*V6]ע d\bdKLH8:eMLU&.7s[F抔WWJjE˥R9UUjۦ]Zf;y,\D5ٚ Ah-qĠX`!WɎ}w6Eo_WktCC'Z-S'W$xq9ytɳboZ5XBeMg;T3Y$fd@"#A*&5'zy&|shZej簕7sSj0W={.D,;9;8,gj7Ju 6UbY8S# 0årfnWtpFgStM˾ :X$4[6YmSnCaMd]H-|Hө\S>뙭[κEhB1Q ^_.6 +6BRם nVew۶@ -kЁlb b^?ștuDɮAv鐳<7dJMRa^h2٠FsDxsP•PwX t\3G暥3֍J7h-Ptx TjR9V*y>b4+߬]N48u;fo5ǦL,GdqaVsu U^4hAM[kƋa'_X.Q % cq `[0 6AnF,`6Q],SGE&u'Qg f|?vz? 1Q>>>o;(+ƛ'9 8}7̥}0ğI~o@P/Fзc}<&qYz~`+9kXX[<1,c`+ g| tc0NyvgPr2jc|c(?\.]!f/a} F|k '~EA79.b|ݿ+\A-~xߨms OQ* v2Üg}21 'b)`g{F;2A^%X^`LxWo@WӕЇ>{x9N<,xw1^35_ :k_UG^⺼2?+_A<yamOhr"(5Yēr'q+x_B6?KZ?K͟F޸ƟhĆ~#ʄ v$zrRv5fo<&w~G[RhqKoHߙ9K,v$&[g_>BzGh q4 O ɬ9݄E[˥^mf6xD[kw#t9m$Ad16L)kȽ+I"'yVob[xFmn.Ύb 7,A k5֞zc\-ﻪOYM;^^}qo EO%g}~h>/ÑĦ5jWU59}>u+ε>8)|Ic4'$q|Ae'eV_y-y[rV$Ii<˂LVriiNsǮAFUߴ]ʷի=μ̙ki%bJx%WP1- _Jdv\rl%FɩrOZaC2:pX.d$v; 6sY2Ke V,xYeilX>);i(Z,K+9 -RS"lZwGTlgZv`;Oϟ ᄃImetafor/data/dat.bornmann2007.rda0000644000176200001440000000317214044042622016203 0ustar liggesusersZ]lE޳4qچPiM߶qZ&9|ݝI]@@%$"xE$$(BTB  x H /}B  ۳}v\%җ]w9:x|GVBHH 6% & ` `4,V)Rmۃ5+ _[faF6BoӒjjMQÞ~Lu3Tm{+5Nj!- ۇRp谡Pjލ.FUL)ִ[1y!jmcO5d]Ōhv͝hO { U_IYElC%)? qVUHQ4^%ECֳ;IUUS42aM_;T;Ҕ-Kے.{]-=s[}gW:41'ɗu\޿-_sTʮ}I)4bLa)Jan2F o; Uu@O/K4*HLgj{\0axiB/ċ2Nk-d>{m?E9+cs2f=>~Oot#};9ލn(Ɗ>?>zg_n|qtQN8=Azw?׏f-_2J|@|ppxzl<gN+u4j?X,[lנ)(9#SD5I k#[#1EWLƔIZ|]I׆BWIUOa >*Y{wG]1r_1[em\滉 LpUӁ1~toNt|#Є8&V=m':aH<I6)< \.?/E  q^Ex<O#?,dqi7s\1}8=<s@By ˗cS _ Y7bXS\%!kYfqm`䴓{_iv8N'χȫwC_wq`ju:@ܑ۽7!zX&\C65A`VC.'{5x`/-&ylMh[X,sȭ}عu?0G|x~PI{}=>]8{~=_xoU۩$hy@>v[p|Jy8tVGYg0ޙO#cfy VV̛#NmnS4m2HLfQ6k c_AW$ B(iSZ' caE$6YۻfC Jݥ /XV?B-(H Ga;r?s/$Gn<[̓ !B)5R[]%}[0頢p9x%A' cowYo N On ^}ɻe=;V4;/:xwy-.v; <̂lCQ`(1[}HObz~X;ty nu_|2_wYlVse+rbr/1$ۡNʉI"Jx?ZEu{/vw4K3`ɇy 8Z`*80xTp @92& nGVXV:w߼pk g,0cli/t y<^8(4yrEhQRkOZ3-Dwы6ickcBh\.] X5ig^|͛š^{ 3 4 |Ⱦ#Eftg{>o\N6DP̯ BBzH[E8ŏ_.:7Er]*Jx. >S]hu$O~'_Ql#}dRemĊZE>2;*^ь$~:JE??~.?t*@X(7wqAК\!HV@ވ> 0bqjB` BleQH?p[Je ՚V#BP i5TCB2J* ؘۄϟ)ktjT7`򰂋E(zSwIei9-|^y೙>.~|Oހz͒2Fa laT;{^S`P A{ͣC]i^oО~'{N^ot4d߁񯦮i|$ɼQTd@M+5{wbՏI蟛e-@p՛L0y*ӎ[gƣ5Ul?l)׈@}ZFSj2MJ*v~;I-nb鶦ϒq"̸/㇒/ 7R'W92͒4{V&n]st}wP گ?#CGqA<9mz3:|q["N3.G&J|n1/+3n_o֓CWr蟓wwve>W6!Q'X{js0+i6l]}y_|^zǾдRhgrTrfn6s/Dis)GZPc:L/9_TG#z%&<&ytk.me_-YZW>I9$e/o$-c☵Hto&UЂL]A^+0G)5(żmĞZfƿьZãT%4:ϿiS4'; LY4-f Ue^Vu'9,j]A,1xd?_ҍU@6`ݒ]d1q`6b֚Gu|+S/vBq?{!߽iRߋ|q)\G5%X/|oo4vr9Չwn!'v~Ͼ.?U][Mb@3p[d/rM<>g Z )H;s),K z_ ^}'u; !B "x!=|%=(A$V'!U{¼θ'r4JsCvQdǩ3-&{əկ͝+;UZr/|5{y Q{xK'q_jډs49~I%xG}Fgi_]ю8_;!yͱstL Tھt/."Ku~#Oc=JgxC>:A1=tG3J = ^Hdvr3qzb9)^K5]Hz: dt\ >GVY[h!DuS{yGiy$E,.ԛHaiLۿzߑs'拝S:V:#w;c!MYzzF~{e= jvgg/,Wwt'&,jj0_~bo?о~M8_?|k%H~.c?_̨Tqrc_Й>Y~\SQS)l uj*'MEݔSkkPT]8_)kj1^~U[?S)^詬V5zҍki1Ԟ\yImd/c=y~3|3 L|ml;bJΟ?(؅*dʃ ++-qz_S!Jmetafor/data/dat.egger2001.rda0000644000176200001440000000073214030671442015456 0ustar liggesusersujAu]M۔B/z)$Ы4!A@p-1\@ >CMZ j3;9c{t|ZfY-v;,r0X\D/d^o a F'ui cT (RL "i XϘ+'R'16WH)a :AG&@ }P t^RDqIz-V݀y0+ #a2N:GVڼ'Z^o;7n_~?3K#F1#idgkkFP <lw D?`Xwyy:WgF?n]r+SKZOpW&5\˭L&KY)metafor/data/dat.viechtbauer2021.rda0000644000176200001440000000062014030671444016666 0ustar liggesusers].Q7Zw%4Du#XRHK7.hTE7JbgX{ HT43Ϝxw)jhdzF)&Q^SqL9Ўr,<,oF t׊6ׁNt=䙹K7(BU轃x>=L $Xd].Wg.6Bްyu qbkxiȝ1o1+ T.w#%7,g~-cQy}ϿEr W#}SY֕e#cz~ӱgTA^,FF.h ׺RmԖR`V0]|8O|kޔ> JGݭ14metafor/data/dat.kalaian1996.rda0000644000176200001440000000333714030671443016020 0ustar liggesusersݙ PG (Zcd..D$KLe-$*DuhRQJhJR)4g^0#h*C}wޙ!%)w7q.+As09\uif/Ж-jo⍼9:6Li f@s@/ 8 8;x4 մμ44m  .@(/@WDQ@O 1@_`;X` 0 /D IO0 L%WfG$tp&j9Z$--A9T˙̜>Crxs6gu@{#{䳧fP mS,iF1N݉$mmA=T۝|i vkz˴iFӞ[cv0Xy 9haQ'fm&i[oZiJX $=$VuF|ڇwxsfgy5Fiߘ;8wDOF\/_:+7)(hߘ gZ7[qN\:X:TqA 4 13OIRJ_t2Ty3R8R]F Oj:.Jcr~r"褌W z>\WG z>#CO uʬJ?yH~8񰼏cuEwyG~N j"DZydjM.ْ>)pg4ٺZed_n҃ɒ+t0ES CL9UH1NZ\R~`[-p\.)vq,&p#9\Jh")=XMAR:7RO5}V @c\ʈcۖ&|G) dRAq7+O%URzR%iIv]x+~׼E7*|ƶ =o~toy(YݐG -%qR_6=,)9iKRr}}>)Ν.k^}^7s5Vf)iS肂n&ɂott&UzLT3KB)gƷ+W|6;du~=@R4}@7dvݱ#>PCQTT?pT6~^F x;,} g{Ƨ޿|b34̝mGY.h͚m%%1.Gae\kzf\uKeTzY-Q\ǽ{4DukbFtGM|ݥ mmetafor/data/dat.graves2010.rda0000644000176200001440000000072714030671443015661 0ustar liggesusersm?O@_;Ԅ@*T !5*1PU1d: v``00f 0  ~Uѓ=zř-Ĉ(Da|BX)큔_6s5{5g="*PD!R+2fTXTDXw<?{Y^V~._q\_zIS)AQ9d(;` RRS!u5TҸ) a>D1NlvNn?ƣOdb%]g h txy6 [(M3xs@$0?XD1B`XK8`$ˁD`d gKҁ@ X}72i 1_y%̿K~7cC^xl?OSgl_H4}S,[|˂5m1o4edbzvuYOB"DQuffJ(a pI]m27W.Q?4FQ4 WJb™7X) (N*%ìɩs|\z|+Oǀ)=p =~zBЯw$GϚt_|gг~OQŜ6mX_dk[ աl2ѳPKfZ)St)E!sZ1ks~7e[[nڴcؔHEQ{nSl ,xZ"eʰ?CfqY{|k,pM{baLF-g 'Xw_}@܆7tmetafor/data/dat.berkey1998.rda0000644000176200001440000000110414030671441015667 0ustar liggesusers r0b```f`BfV 54Mׂ a siԢJCKK,0P$ \`PŬP,V1U 3#(?W!D!1G8Of^JF*|y9Ĉ%e%딚ZW*xP|PőP,ac9P] .{"U׉OYM(ߎ׭r;/,e[deCh7sSKR^ 3)d$#AqAWt}y#T+^f^8'ֻcq{ y`1dOV/>kW-7?anT\'uC3&3‡ 3^z{a j½(ɚZ $N`IQfbÖXZ_T&!4KKsSa0 24DsgQ~)ejGO&l)%at9 PTqTf!{ah*p,CWɚX SsNjqrbN2,#ҊA?Pbmetafor/data/dat.molloy2014.rda0000644000176200001440000000134114030671442015701 0ustar liggesusersVMLA^Z*h Cb^ڈ5&fZ hbvhG3uvAϞ5ы /r01?$j41owg !N쾶/94j kN75@FH R^޽ud4nr6_Զ9kƲQJnK"/-1n\:_ 2J.& \Й%9,8 5&1 p +ͽ;Ary1pk%´%G%"xB,۩x,` 0Lf>(:F0F8 o{WNŗ;cѓnN?yfqsjx>50|pf"#uOD@3NZjt1o˱O=3~}D%)jVkoVFpێ8# g.O7.:EbZ'ZVeC1\ȿS!.XT9/WQPӭB?UzꞀ.duz}JκJ:Jznq[+ҥDF!~PA76P۪pDu$\~o(cNWL N},I(0KKbx(eN3d"Eh✝ 8簰Ogmetafor/data/dat.pignon2000.rda0000644000176200001440000000226414030671441015657 0ustar liggesusersV]oV&.N4́;m 4`qYMN0Zb*v?]ד&rդفNH]n>{8+SKjP(4da1ޱs M32QʳFkyqIӁ)5>jG|9ނ^~|B~|>nLeAN 5<@򀧁 _ր P}uװn vUȷaWK` ~7E)[eQ- {~V-vq9{fٛ®.7: Wzֶs ^GL=4N,b vFT4QQOyײKTUɖ,[kLh&kf-Q!4L3^VR xW,"Iѥ+d0^-ۆ,nm4E%N^,J9\Ji*,$,9QJVcs`y5/R1LVDJ Wl[;1>;m% MഅŦ\LHҮ_w# EQN\p@]iNl M! J;lyy jWqקe9M) V.N O(;i'LۭnmS#&"ׂܣyiqFzܡ~sf mP쾍>>X>zz?A?1or㜛P0r{?:~AYη~ $,x\yG^}A?Qo\zXGֻs F} %ˣuq?Ciew%_ "*|OuOw:n$K[xqo"mUkӑO@wX;OUG]I ;rvK[+žSy+8R?[X"t8&?3ywB翿;߼#7`!oN&D8c$.v<귾]*cC%Q metafor/data/dat.laopaiboon2015.rda0000644000176200001440000000102314030671443016510 0ustar liggesusers͔[kA'eŢ  i)B>⅖C'ai/PQ&~9ɢ ̹̜υy!ǯހ)/_ܹ'!SӔIYn02].؞鑜X1DY=P2΢CM;KFq. v39zD2HIԱ\gi.u%d%DCO)ZG_|{kJ qlXv;)6nsNuhg.^pr oMF xVG} keOxbk]& Vաx_{(Q:62=C^G5]mǹFu|ulskTUDyVYE@iҳWY})b0͓[6ĶG>^T7& %`a7ZuǼZ?ʍ `:^tLvuw{|^5? ~+1=metafor/data/dat.hasselblad1998.rda0000644000176200001440000000164114030671442016517 0ustar liggesusers͗KOQSʥ@*`EI%nȡ=c3d:FҘrʅ;5Ļ??0q=|Pb {||oHh1cGt7aTE J<ǸNtZ&coFA h[͘PRLͩ\ T@Ndl[A#`'v= &ApGQp  NЪz"|9Q'JRD | /|!o|"/QhSdUDpC .:<3'rScɢOVbm&V,u[1my/DƊ绨݄p҆i.eR~1šSbb0Q^ R*J@)oj7iѤQH~Uz;tuװ;CnOr,4m '`$#OYkWvw܅#6f8GR_V́Eo/=V)*^@5)h.^A5WAG..YZͽ2iLOX6C ^{v-p\_Y]zq|G^C2xJJ hLlF]󌛰v3GuM2EM|Q!pJǞ 嫠Ɵx)Rs29p metafor/data/dat.crede2010.rda0000644000176200001440000000261114030671444015447 0ustar liggesusersՙ{PTU.!%ČZ6hC vu`K45f֨9MPVXd8C ("ey?$G5Mi]p~ˏXCA/?^>x)<-1A֣pڻ-:]2M+WĬYoQ ^7~3w@3 L'f`f9`.|ĽDXBbRp?x,a <"@$"r\ 1J "Vj kDx̅x`qO{ \ @8fB%(&*ih#jh^!Q4jҔ 2BSJ>~4cDzԿZ\jL.>s}#64e^8WX9M>OT'Lfj'w+zsL>w$[Hz->xI3\<7C!FpEG3\DZcZl /GZMaQ~rmMt0#Gک82V3G~ZMо5~ćw<O›"O߭#?~YU% ALC ߄\p#(SYƚc]5|0;ֽzӷlL gX԰ʏK\"kX|9:mŻ_nruj85mw0?ꙝ]VW3%8u A+Y[yz V$ kusYU@1F9#O3N kzRܪ2iTҺRInW p\.!? _ vRF[ E$Aru 2Z=0` l~P9ҒӠ8EFoKOK+CEƶ8qj}U5Xd ]iϱ^{uXE`0P ԛ}DM&Qssk_Vii,Qt ׀짗4րj@Oc5'}H}S(P-$K5V0.̷ Ƕ ǶK%|$eu|jQhǰ.K5bv`㖚WZg^Z3t,^UOGu c{m~.O % Fեj~R i嵓+)Z%:^/Y CYkBgU앋f`@-[Zm>q;_IثOJ҈SlB]3D M)Ļ3_1=vfO%2qz^B>u(,BT{)="$U瑉~O#L&US4Bd ,l] ZEϣ:?\dyƏTըhw8:El݋$:Tw.p?;Ƨ;$Dоm=}N|I.A_?ch>Am=Z ׏y1_rNNV t `Ng8atrf JDǓS9|{SoOsd*}8=prxɜ>c$)c?Qg(48 ]/J+Y98 1s:B)-}9]$wa128%nk3-q@}\I0+ɞCZ6$?#[1q%oFާ(V5 iQ:s14Oc0Frب\szOm>É-SSR#eQ *)a_ aY7BeK!'Y$?#eoDޠԗ=a_*ݕ/s9(+>Oē8Yq{j(l. ycK'cNL,ɂǛ._*ZAd$Hy M 8lap؀6 8la;p؁v8a;p؁p8p8p8'p8 Np8\p.p 8\<<)metafor/data/dat.senn2013.rda0000644000176200001440000000240314030671442015330 0ustar liggesusersW{lE_ZL|P.W酐9V*0w.ힻ{}iDb Qch$1l}3QQ-ًwWQcf=ٙfOK0L XRpv`Ub^kЋ3%qt-[`|ә[o&~v=gMuoB%qbgE*r@1q8ނ\ڇ =@/];W;8 ,G /̧q6Qذ'׷áµ򜂻E/B6dJI]li'ղa> ]|XbqeF'!B@j9ޟ^ez%$s>2\' |X+Ŀ6(G>4LnX^! }vvߋߥ2Zc_ku}4Sv31 h U,v } MݼJR/|NPfS(|73ؘH^OoX6E.Qr!hx17CFYGZ7w01k6 xWAu'Y6:imؾ%ǯ^v7lW#m[ӏm>dGdˣi[PDw{ }0hi7w~79E۝<EyNpӣуF׌sڭhև]03i j?|3] 눆Hz>giRE?B2L+zЈK/1tѫ,BN4AyF}Ow,O' + %$e%7dż|bOCK̑\b e3IZ?HCz$vT?-ԗ]Uû{dmetafor/data/dat.lim2014.rda0000644000176200001440000013076314043577276015200 0ustar liggesusers}`y-%ْPXU,V K.H$HI)nҜ:sIqےe[drK{yifٝ;ޡK!؛篳;9x׍uq&9/s~ye;ޖɜ~XTq][QTwuS&.멥-+X#^B&}xZuB] &[ l'e W*k ^Bx^>v2"M@LpKJp; ^mYog' 2!&aG00N 4 $8Hp.ad.i3AD)38 f \AH0,|2|- ~G ~'~/*:o6Ûm#}? x+O Ng s KwW"x = %ex>H!|7! "x/|I %xyo=?3?#wT`HX^@"am` `u0t1tdr *& `]O@o[NYk7M ĻE""!?kAx"~,%"~xz=EbL Tb`}}틮w]37޷g4;z&皞qiGvk=S$fwI?A=Wۋ|y>ɹ v-uȟtv@87J^ }M9bk.V0 <{ֵ9t}+AUx`?t|fq uEP°@crLt( w͞MwE-\&ͻx6],J !e NY'!~ws'ϵ:% o;e= .A%-JV@x w+bE8eDx薘ExFһuI7TAh5ϋH d6b #Q>)`$sOpgBto֔.rEI3yԈ_*JBFt=>/r\ 0)(/ȹJNqI!kE"shRCJa lX}V VZ]DBDE̊k1cSWeW#(V7 QQ !sGR.A7 ArѤ_(Ĝ<챃v0iHP ;eOYtvW*wJX?3H}1ޗOBcAêe1JD>!\ET RL&׃+,.ے"#?7\5K9$QHNI]Z2SѸCҽM̠$h}0ޞ㉲@|?I ޛǜеH2+w&^Z%wSt|`|niPd1l8~г07^0q$z'Rcj%rؗ\P}W"lxd0TZ>Ў*PX !/8r@Gy$%an9)J%ʤ,+T0".mPMyp/#'s>:=ѢQe{JPIЖ&"Fw _R26B†9fW5Bk0jێµ 0m '0 ${5nS3p-p= Pw}R5ӝCRgޓ~%=B9Tdh_&֡/)O?OS4(]Jq Ws!:ƤŮvr~{uМDm vV昂#vPp<mЯ}N$PߤTÙCia@2 `"^Sk oYND*;s jN&=!IX̄˺IC.)H Z 1/NA9^JPB?sjTiM--wRԂ, d_sR gp1'm AQRm_oG}h>FsbP{dxa=^HLCm{~(z'e>tcCWÃqP:sXp4 7,M'p#TI4 gq:*dwaA6]D7hLY: 9O*č$^2Nq1ܨeʥMKMJHŜ/MT+%?iH jmñ)#$$VdY^vY&~>%.9%咪WHw)ar\p6z;(m/aQJ솢_&$'2-ĶK+ѳz: ӒԒ}SȈS.˹N3l;33NT wK=I\ݪfƞmm '646*ةz0FI!؉]'d[ls_w1|Yֈ5 0v~~; JX' "Ne!_'eH /%'-c[FE y+ QQpO6ɉ6Y;j#!2t\QǞQlR^Oލ!ؤ_(^cXk& ը'Is&9(cPq˕c"(9!bٕ Rka^=F~zd{KRϨ1U540ƅ\V&HJ~*[kũl=!osrheݥ؍\^w7 N(ST[Qrk`׶m˟6Uʨr!a[Nвn,gmWZͩsl+;!̑1BU>N`Oه~2WIэۡ 0{"v1 .l֊Lt 3@n*=6X{2ֆ&E"@#M[B_%S7ڼSSYeC*:1]{6f=x~!޽p ܊4Pb xq4G\qKح=9G0zGB:cGvIhQZ - ʃ4 D9^ƣDޘA؁Eed;AqۍJKIc j,Ė!˝)hɤ%ݵ5^{"_izdxkF4]@(H;lHRy\y@b.bo 8V5r9$3sx-FAߵ=8}4a%9zYzB^LyW?oF=ԃJ+VpA6ц_y\K7M9Ɖ}j?+d "Ɗ쭈;%qQQo Knf`u62M }d<)1[/~FڵH!p|i4Br#9#FcZ@橊 WK5SJxðVVj֋E #{v;/ ȯr8mE!&x@XvvY~shpTb1 RI=䊰J-K~eD\|u˶ HfWPMyP /Ź<4j04k7:*qN1ډzL$:BN!n%ecWSU$bDFf=RI8I YAekǁE(Һ?lITE8T9-d]$gӢFNѾ;]){>4Kn'#%1Gz1҅¨>R U1=Dq *"`F$-JPjSLDudbQZmm5C )޸, P:)Fͅ&D(O漬NM[W !)WާH._f!XiyWU~lh݈O2]i*tj*btӝGŽ׉OH$K| lF[U -ZI-]i'v"m#2 hj"z0iˎ"8H4Z-ݸ`uZFCMI/@.[ѝ"Hj2EBNnl*Рpl_8[PFS:ڙ#L jͰnaN#Rr+ogke^#C % ; TB,B TЀrST$*HH,M iN?nD8a~"utWPY=1fAN_C"7a-{I OF/?<9׮&7ᐜJ8[9 a擗Kbek 0yD 5.tЩ~QF$M:+∀^pE .R6JՁj;G Nxc6Ȕ͒bqM6$/m#JvkW4%UԦjHw+Ҳ4$3q:JFR -wp-;o|.a.9dN%OXߵk40w턲ᄬ774y&:Dh)l"`E S.EQNDEWROLDt-`&rt+-OoD DeTFL -؋B{RQ8F 8'INO:swm_dKmF7﯑[8HLdu`\T'&\@GJ2fKjYC{8l5P%[Ъl4`/و]LIQ%"*.tɻj2Mtߧ4H.'!7$<.97nJiɞҎڕwq;/7kKeẉ.ȡG hgMJkcڰhܼ=ҬjSesC& FjN+H^E~2- Sd;QSae/u<#ald$R4IgE/'q_}bmk.Pf""1iD@$Z٦f\  ;UdմT2B1=s4xi)Gv_fVڵ]{"Y4JFD ;\\iڦQ%J)jJv<~做+9 @%>EWHZ\#{FZ 1kVIQSۚ-CU}l^t8 8xMyY]1X 4c4I;"%Q3'Ľ颤$ vM\}wW9.pIH5P%#4Yi4-F]JcTy%~M.Wє'u:Lb2Q7 ң",(p6`% hzc=N,n繶'`kg;}2in4jxqL%]Wki Wn+wCtZ?3-䓕 Od6M 6"ͅK$#٦vn8R[ Rd靃n¿_ Io6LnIve*]?M /|Ӗu8ϗ0:?"Hw@~ljM {D탤ߜoY'&]fuU* цiJT.3`5oi-e,Aum9Ac]K'qtTnN*JD"¤vSZlJgq~ MkIEr /m̺ۤ*C0\5[.}֝sNhd ℛ, #O! OGIq^BB=9OND;19JT\ϳL }EC)B!,@iZHgהcqK\(j3-+lvx6\7 uU?ON|'ɩy6PO|_tr?#̮ʟ~rT<)8RE'=X㌳ O;kS'34N;8G7$$t{׹_~u׹_'81ug<_1v~u׹_~u׹_~uvd6`-_їk2G߶ic?я[٣_J+_}?soUߗ},>kٷ^O>: ٯݛ/piOnvd?/mWY}.W,`ߓ_ſ.s[/ܕͳgFf@S|ch?=*_}Sa˿g}}?هW_=埠Qг4|(f1QL=ƲqF(Q}=|ӛq^c,-}ُ*}7ߑ}˧4ؓ 'v"?w ]x~ o/sw՗)E{Lz1=M>oO8 >t䏲@n*fz\W% 4.0٧S".7Ejq#<A^sB'_5}O_Br~e",I ?tDXds o>}Gz~7ad܏)gn1滣̷OѪR~Op2w?w{4#i/>L_gx?Ӂ~f@ٿa~} 'O0?> pqw.jy)YH2X] Nꨒ;c:y }gO̿_V[|3!_Ow^3vcyQ9|N2dS,Sgyܚ>5E?FN;z<~l ?7O0a滏3~ ~u3j?f?|uYs櫄n4?hkT>t>[ͧG1b~?->+EVzA/Kquٿy1>~o=\K]'ZhO+з_Mqv^xڧX/_#{W/~9Xooj]9e,SW}Jg`yjz i{F?O Z|Z>$ϗ>y|A^D0uq0ke~n1}}AQ>/1g91*ǘޟ@2$ޚ2GXN4Dޥ[/Vg2,<0A n{N6B:`-o溭? % wcMt}% +:WWqa@"/f~y}C7 }NDW{-4C<x)a;_F%G'd|ŷ^Уo_}RN*6/1N6طx.q0NpǺFiC(zq<4Ό172. eI~~-igw-~>5}n5_}}[t:{zxΠA8n-wrz|k2:y_sc/gvwfMt|op/ g1ngϠfmvvs tg#/S[/5q-0>Sk8Ie37<֩gG<GW? xOTA5*P/A]|;xv~NҰfۅ9ȓƿޜ^".(@st$iE?ߧ]fiE.$"7? ݌lj5Q\IB[.ͤr-!@WqDS`"V2RY: fA C { PmR &L+K)_#u M"&x ww~(?$xsF 1&x+O ޞQNIW"x_oa1# 9iPh  >B@4fLdIŭSGRe~<@y/0%`XB}+_e8J(cAx)g% A[O_/[F9AJ(EntE:"#"%mk8; mE"]`acgmg n]H_X0{4 e%@z""onPƼ&-Kp/:e[YDX$˭rAI8ہw&mw44;>OΗ .8U g=}h/@ϵ:% o;e= .A! ;\՚b)ّ+J6Mٛg|>U^z4 7g@t3N:sv $<lOONU  {#Pʼnl>6@A|n٣$ǸSP4Fךh5X3wācu{#Cv"fEi~ҕ[m탱rJq~npAd/!SE'ւ@[IiMkvGld6=,u<~Y:l{~\b@Z|a" Eh-b .T)OxR RMKX*gDe1x<З,߅qH#/UBV`K{@fyoB%ilIdLq?wӹt`CeOVjp@POʷ?pW =| È ;Zj$ԸB}ўWð('(/HCdĆyߗj&S8nATp`X7^C@2? c .ma˯ &܅F(AP;=RqKh6H;2"[]hZsY({qQ ZkmE7I?-rFCl9Hnﱱ5dܻ@E󃲭kj9Z@Ŋ0{|RJ+ PeX- \< F.Ŵֽy|H54yxJ4VN*{f|\vb%Fk4[$8BEbrF0nh/qR:Βc{s 6 6j&Q)9(9`?IsmϺ+?N>pAL֙w}=[ Rm!y}H381Zn#74l%xil%QUӀD'_wg_w$v;$J[PsH}NGNF29ۗȍ9lϨ8Bz3Iz 7,AvȮhkwu+0<ާyJ"%]5cs1-" EY*<\sN^=`I52'yB>?+|1BiȗMF&z+b= clV#$."i H)ubyX\X#SwC_EcNr*Q {4;=R0}4!)EUENh"L~"_pwiR-V(+&7.z:x-;{ ST/$~JkLZvbê+OEbK9]SU9b;RTi=*UУ\_:9}]fE-:{b`pQrc"*4{ـSy(JF`bN0LM{Q2f #w^Y]SO6Q"p6ܭQ+\x%Ut'=~.ʾwxəeci߹na0 =ؠZ=I+@CrB6ZΗ;a vxo(o %/&~P)ӈ_Uz"{;yz^wq66îFY_=Z}lޢ"O}iȠ#cEN-<Δ92B<~,݉R2Nƻu~s=*o.J^jj'6xN]勸-]zO:w8>vưcSg;^ jA]`_Yc-#*ܒPrNH1(_AaѼS\6|43T@n+YUx-6_eYOm,s~7X'V>{LTPr'9qN4@Q _:KP3B&^$e5a . yŶ4gnc>xS=oƅGJyrP{ҙA/=AeL /B,~ +ѳzw3 ӒcbH,hN(}mS[#N,*69I33D; &nC& GC $IyF=Am4C;h:ĮR}Ӻls_lG?BŠ(\lieJDX' Ne!BN'8\JKEK^#Dy<4nơ0BZF+qITz1ޏrѸ"#BoIq^at8}=dlBp=Ih0ފLQOԶc(r 8nYSFdz;`ëǜb%wX #T&WIUlF4 +e{TkcQ,BƳ$1%q8l7_nd`;'sa[b3bp!ŵ|ڨp7nvEUz;Wx1 obPўDE3^7i !VZ`+š>"Mَgfc5FPԈ'upr0'v_a.h1)U{=vрH ^>ϗԹPG98 O`{FhtX]Uu~1= uuZd 339%s&j^n e%װ?#'5l&Ǽ#ʱP[6|.K,}2Mɨ@-U#"SzpDH]Eˊ_5dzh%v~36SIZO!$D|PzCU$GT9!,Xpqq$y!\VJnjST'21Ml=x5Z=r"?0qG rpI`#%ۦO@(''m]qSTF*jy/}Jz3&O7b$*2G P@Ldm ;'swPCiT_蓯KDܧg J@DDy2PHMMHȶ] x胆/mb8 8ַ]7H.q15#NW'oZq E :8%{لk#dXJ)6 `E+crp(-]GU%,NC4ЖO )Azb7vN:D U/l,􎋺z Y6LC o^2)D"0^IG $|K<$FGi 1rB*jvM^U! V$]z0Փ>?M6|)EJa r/I2}P?=@Nt*2- uHмFň]Z\=/Y6c$%({{ԉ)Qpny۳ .!:PiN':@`Yc:)B]FےͨkE ?/LTkm;lFF'^܆q+E6o)hu$>$cY[yuJ˩rxՔSudfPqxՏ]S7=Wl2daBJ<=/259E>8.DɁ*]VQe'3Ak/XoLȍ7ZY ifbJݖ@nI ;^ä75 =pu:s,pv&жaG瘒-XPd89I1%c'&. )AO-m=`.>HTYZEkD8+\[YzyQ99~#֨a1eڳ!5mQ/aѻ7{)V^Ī܃]Qb=2 >JU|co6t6Y.%E2 J +}W1j7dHٔP~G?M[sg$5{Jt f< 52X{đ~hV ]Hj{!0 PPyB}N'0r'GRM+䍖UwgJik2M _jdnVwꘖ1m }Hx M{.-wa{מ;TD0s!Ae2\οw."M|z7b,>,x똭 GM ,NP2eRR%Owo[:8e;QrG_\lTW낺JE=Z#Gi5=1c_n1܎]"UBa)բ0A*U%[-g&`+?g\+:\*p2~4}镉7)|]8uʻd$QF6@td>Dvf"\uWYjx`_4XS%VfwV#_7{ɽ\ڴ.%iۧsC]XWR ;JOvnycw~wijuMm[۔3qn_z]; 'MUvomsU{zM-F} 6׷hoW+6~nium<ݻMݤ[Zmy%5]Svp%rfYVpm)n4[A&hc^,y9b@ G5iդ[ZKX+>W+<66qvvI+jGzX;Ww@6tЧ9~.`[LЪ&Vmf #j'n,{mDA+nk5&Ϳh'iZrg{fW;9F/{m6vziZrCӺ KH\oKb opg< WKmlڝ- X4vsܽ*M-Ƕ=Ԧ/vS4;5uKG[VV=lk;[᭩ƹB]|>%v˵w~׷宛輝Ejɏz{lӅHvG1t"$ NBNfӀ I>%@Z="eG闸Ŋ~͉Z !Ys)*\ib'EִyFS"(fu뢘֪ 9Dpl}|8qDAM'^#,$5B)(O4'VEbm "([|KdP4j`BN 2Zjr ב4*h$ Jɬ4LkаBXv^;3JjvXRR܈&НD"'j"$.4XOKCu q'߇s朚 ^B4#r F͹v5Q^n|pJ) ٚ,fl LGHEe/tj_m3H:Ex2˩"yX$1}؟s v@s{Jj./bnjձvP'O250{Rk!)v״6I3"؟cbJ<e.vˆn9S݃()ePumV]56 U%ɭR=SX%GrWє4K/mDCȣ>VyB՝_W]5faװcj 'sD(9dt&4Czw3 Vs67GilVeǝaAi(į&EJʱMF[l"L%{8l$M[rb]J1}+ɆNՏL86#8aKU=SYH3NU&cZsXkT[rTvJ?o|߮| ⺢Ss7<9ھ˜my~PxE9&;T}Gn1Etncm,Kƫ c>\4XbBذկPD;L/1АTܞS[$yHq3)O"#h34ˆ5*riA:dS!bo)/{!-- j\&UDդ,DR01P2Tj5i `a 5XuE\K-'!gișj:ۋ~V5Wjb<J)fCFR󐔈+E[5^/Ip)'|gzN9f*<4ʋ$$HjR^jcc|j" UۙvDrX 2rko vM#2(T%Sd4T&KԶbM$y U)VPs /n57AbLhrbǐtԂД&7H:q/}!1eUuec; dMGd&'oO!t).l͉=9D@ 4{ (0R8 nI] R"i{o$ lٵhtXƦ,X[#JƹIY keiF#>^ d鐰6x[$bHnM"4+,cgư7̉$ecE#9NERV%ffndвSCk)}ɝT7y/CY^QmI~v㛰kjMYQ_%HJ2So!1:K6ӗWUa\Ƙ75Q&[։ou4KxyG]Segs3!;@wT$>IlcOOb_Ez_DyJx}{B<;YL|uYDhg-,rtYl3[:E-rn~ٱܻ^ͧfE;"vND="ӏؓt}4Hi/%>٩ ǁm}φaTN4Sg8Ί8KT[,gy!^N3?vV'gyi/8:~}zR%㕓##~~9-Ϧ1u׹_gJ*Z9s&^u6zJs~u׹_~BujS=3gM>ܯs:ܯs^ؿ2 x3}WײOm£o>yG~ou٧=5wu?쓨~?쇾|% ߞ}#_J+OPg6{2 ٿg~|5gڏ]ُ|L6>Qu٣?zgx\Gig|ٻ+q]x],~?{?~YR׫|C(~c|߃\994m?g~?QC q/=^Oqsz>}h|rX~O><~9d=>Ρ>rAOd [/!gJ.h?x%W:>t8G4}Xx1g9E><}10|>#L7OiqxQ'.2=1=;OP4Yx$[l|.)^Oo_#j%gv.ySI$ބ4{Vvc\˟u}Wgu~?)_W_ft$ˉD>([ӿã,4~YG@٧88}MoO2_M_ϲ~/맣zR|>z ٣>~Gr 2]=4ާ$tpI"*#jzEӵ~] OpZn=<˙g!^ry-}kRܕ} sy5?=|,V7Kg9Ϻy쳬O~9dݵt$Yӌ_G\'XjUWI%?}塖Zo>zIO(KGxw >CZn=rTX?xgOb{o<*f2dˢyz((5J V[c5r=·f~_ͳJ.,~E/>yӧ֗GY^sDJɓLoY'=Eſb}N!Z>Q\|F.e-?н^:}EƗ{5?|Q{Oi;PC_W|ʗ>,OZky8Ǚ^v\SۻRQJ>KLqn$7z}PO#ӁRf5t%y~w1s߲]]ˍ(9Ti-?۩q->W|Cb;[Qzݵ|YXNk<]A?4xZ.>0ߧ9xi=ֿn{"Y^?K֋w4=hJ v~S,OQ-Gߟ3`ov~^2hBWOϲ>}S,S:b$d66nK !n'G 6\F] rfJ3k}#"#|=}['%\^>E/85tc 2wsvXo#x5e7Nz θcۯglp?p ".!c|vׁL o"S[Λd^B_QvR3pr1a; w"+>~.5WTsx?c:iFRr.z 3ή97{;s63454ťL73>W1-\ITQ֎L7R-Uӄ'k3RH]~{FQSP?,pFrW.d p"tdTF,x9t ;3d)(YW(]";+ %Zaٌ*?#m̽7!7go$YG2JBBG}oF9 i26%_&_#u(Y[Uw~~eŷ1&xkFo(v?H>9_%; ]&x_o6}(9D6uC&G3.8'2J/|>MO9>O/hvhu:65jvhh; 05-Mvh4Ѐ4֤,f g xcFY~.ӨMMoJh?ʤZO2ھh `)0 -;zh5+3طg8 =c}==IW@LPuM|rA {W~xvBzX5HsɀW;zr}h6ϋցZ+r蓺Ab{w۳ܰFך NX`8plLq6h`pJSbD#}1~Gݳ/$(۶)`-+<ۏ"e&.SZ= *PETrہ}Rd5mϏK( Cޟ,SDvrN]"8LtPT#" G9$HR$!Pf3h.GɆ)No hgf5=CzBGtHT,5uxK9pϨk'; δ\!4 I窱X~X2$jZM"+""ޚՈ]PM{$*d 9nkr ֡>kt(0I" Q̾3_^څda Sk )&Oyg՘Spl)3͢~]>Er G.*'s0 Ν\7% ~ցk6.8E&itXSc({rG* ޘUJ2O﹂zu<>%BBЩi;%3-D0Kjɏpd90 0oQ?k5[D?gRpkp.άc+XG&}B~ cl~*$g3ꄡ\ѴƜ*H{ r+Y A5`@+м4r\2IwE%eRHiV%`4E&}P!$c~O,H}l>ZOqKuT~P^Y 7QK_ F9h0bI-"*U;#1($3x|d?~Px=y7wpEScL2E$z-UWU̥ d/5!i3Vmԝ-ٱ-v h$>Y꧒D9*cBcyG׭ڡ`I0cjD:%Iƞi<Ѹ/M 1%Wwu&+mH.a`6%Á33C sOx 4eRiYnJEyM8__3X @(V[&${ՄMw[v9׶ޯqsR%jfg׵Q/"žu#ӵjgvthN1FOuBo g,f9cI_~;t{Am ,+ <xE/Y;A)5y52/99bty+چ?6I cX%"\()ZV[a,wtDƙQ'!r̍c1=y۟ zÞ'?( G _jA9S~M¯[TB;B{? HP)IגWy}޽szMƽ=F[ѾswnӼզ[Z7^ڽjumooJo\~ xZy=EV@ }7wtS? Zz fՉ.u*_ad.j^I:{ɱS&۲]Q42[]әzH$\DպwzD{aL+IUu_lLmӔX^Υ%?0P9]t>tM-mK(H)cCܚSLƿq?M+󗡏{1vx)֋X =?aQk6--݄;/ }sNM}gεحүߵ!H0I4BÉt:bqg E6ר?[w:A-Z-.tryQpC}sp4̣'BoKpF?'V5f;8jma2̵ _o^āC1x,ӝD$ۂlhܙ#+l9 'G1~hLv8AD W﯑lWppŊhD.3L٠ƑSKvqvt^!t$kB/:(ܘ'=+*S\Ab3(O|EğDX=cp&BQB4e'H`Q x]Nj2^\zk .hEbrZցU"R^ ɮM֣ܗV%qw wLA@J֖8U 7UDY["eIʵ=5td!\3 oGq"x Y\jx,W| w)"ldVti#gT0^ +R ЉWyGcNyosRQkkLJۥNcHڔ}*dЏaAjy6Hޖm$.Jl8l\\^tyJq|S>S{ytƩ 3Eȧ'&Wģ"ϰ/ʫV}), o9?OSNO8%<;{RI_3Ws0H-/_Uyw)8TEλ=Xw>¿<Wuy[y}OjQg$ߋ?cyV24?ߟX->&rB}bE?-_Jyh5>wD|,񿕿xuϫw?|iw^w> =?rgRdk:ܣNFeC~Dd8CV4HE0EE~oeT3l:0׌ês"C2~mϨ,l0 M$o̯lisdzl}CFeLCWx9:7rjd2 dlCν#EAޏn< ~2!K3 f.1C"r"ky|ȽE# sMq|5yn3i>f|` 1P~+pvn~AG2i.oK<[2ied{}FEOd&ɿ9̲9uddkȅ|6N].%R'laJ91! HST:3*aU'DzFhs\WLcd,3!&,ǯfҴɵL6U%BrF$C_B2>$i"5Όb,n^ />LdH7S6$iڥA@$1DENф4"XB'W.2MŞXs$^DbD$^l*3*!#Tf bYHFGZB4-gtMHg'3ǦK4bdW .8U g~#D55ہE:yy9ӗM/OcRx=:@Lq@Aty/i&C*זPi( IV ឴Ѹ9NƐL5GȊV OߣtN[E)TXZu"(#z&i!L I nұeJ #quC'Vr]ZBoTɼ֔KR{\t#}{2SrK 4b^ cYk9ѢUPݚ5ɉh]mvȮ rfcuH%B̚&L6~swsUw켧& 3랔܊K7#sJ~Sw:seNRΦ2alAMX&X;6MS 'Hzl$9 hb=[Jؽ 6#6OgA9.lwF8fKD_;'*K|( Fķ[mOpϧ2o 3w>ӌWi\SUiR+1j|2r_q cɠŜ{ؖ 7p3%{mۆ]S e;(] aسfU8{e{?l+mbWE7خ^\i;֞'%+ WD;)o^D*2.2""ذJ|e}y/u74]+]eVի[V}NhdJ=}SqqVmfH,A,oi |4rK /qv~UÄNAiQ`-$W |b 2ܡ37'<}7mݴ@c⥪(QYq&/rV)b]"vmMrʴRya&;֥]BVeɦ۴iCo%fokg4WjejѦmRX(hve1G04]h®1C l`d2SWj UR7my9H kǁ֌DT]5dmAHjB/EXS[tfv% yOW&5w%ݝ8-2_jmSvM8YJ*]v,w5[rG!2K9F:g7IcCc??9H*}Q\[pHi?~9E9wds .~Qu8=_r3<Ή,s8>Lyٵ{LºǜR^9S<9'ӜSs CN ϛs~&IR]$gϜUӹPq\<~WnZGUMLuZUC ;iA?99ܥ ޞùB4]ԹUu.|qNW]ϹY uNR=?7-ۘ 7j:+Lq^YcBWC7v5άz9H<8'jV}sꜰzohC䜶#YGyez9U)Yo!s*mKGۄΙβ?|*w;/ӹ9ר}ʹeTT0^/ J$o$Or.>TgUڬ:%!cIk婖̯{sޯp&Mל3&ΓqHSt:W*{2i J]^C: $D=Ļ'2*=m:bz.Ӊ/>1&$BDDFǟ?JHfy߇~uHcvwfTzWqBʟftdTAJp#n+􃝌y,80$D|_7ϥ%<wf\g\"/^~f~&1tf\sr"%L0 oyXxl0E^(|ulʷYCgTEӹ8 WgR Qݻ2Y uNR6gҌ9 iAd>Q9G0"hsgmu""r"c"X&$X csܱǫ\ۜEXb~ES$ vI%; *f^ՌŊȱ:G2*/*"/,"k:3O3*K#$_dR `EET?2iT!j=r"&uvZFeDI OFXWv ij3_"%_+hT 6EX$)D2ab0!fT]VΟ\aax91$u+prW0\ŀ{IY0Zgx%* uCdŰFnfH:Z$p};ëHZZe5INHpev~jk\30`L$-(83``$-X+@ºH[$ɭp}Iu;p] 0̀>^O pM*2l!dUpM:0ˀ2s"ij|ý ڄ % hp}΀6a5ie? !-҄3:ZH/'5$MiO2 &ig~4 &j/3~׿ {I{Z ob1.iV4 f?b@Ҷi[3e6?ex;ß1}9if/pN@z_3e h>| b0G2A(_㞏1| $çNT<\cJZehk+/jҕ4- +Eݡ'e=mMܬ z֌ԓlqh] zR[t 3 ݃1C`=+ 1sNu+a@OJㆾa]k'9׺t JXГ↞խu%f]q+U= z9еV# O t JXZWV%$,C_kN}Bl^$<IXZWjTJi9BꅾqÃ%<֕ZWjz.8o ] z@s!@`34vSWҺRz_947 ]xIXZWГ'a=CO|ZWJ:u%AIxГ 'úRz~7+AOJ :{]= bzR{eГ̠'wfJAO[֕sӺ'AO33u%4ISӺ$6Þ޴==9+Gu%<;qCԞ֕'i]?Iل1CdOPJxГL'BO&^ ГfܗiЕ3ROʏf=y$#d˙DWfBՌژg$RnX>z2DF̓'"xZy7z^Ɂc5&,Cުnٚ/V08SwRܝǙ]aP]p# ENjN(8Yۑ鉁Qu<_żS]GKX14$\C'r8=Fu'Kt" Nͦ~y^=̳C9Ѵ?*n/{bã:4Dǝc\-9;9۞(t1!9E\?lN(Ŷ4E}"$1N,q-qVGmjq5٘'4QP(ޖ#CyUn:_ӳ]qC1v6aV8~n:25)\;6GX(ӑ[twCCni $@>G/{eΆ)3Z'X78L 4waH8g%B̔QO|q(7)s ݕñ'I^.cyNqk!Gux.o,GSv>JKFWvp.=HC%+;=+'Zu.Dի]wCކjdG" FOnb]&`YYW2,prn8$<`Gʄ*ȾL\#mHQZZ.}(n3-)n"?6䑝2>Ҵh'(M0Qf uuu 8!58J⾈P؆Kj[M_-MӘ9akB#BM,j;YǜZN= z ɦy`pØCJ! 4ғT7e|R"5.&1Et^D@t*͟0s®|wZڜ6U›T%3a\P"μSʅőbkCq\r%sTlmaSo4ۆ+.ucg;q%boV$hNcZd'&ҳD'ItdbO(M&hssmnI+ѨλZZY]2L3Qip=N¦ʺHj8aLBe7mwuߌWKf&[F11Sj5"4 &=?qLA骿?M t+N mrqsKh߃EE#+# Vy'!*OKt|4Q}%yO02f\|:C̢9U'I+3.#4s( kեPZ<5Xm-'SDugݞTu8`lyh:C#LW5KH8!*}|e(.i$G~,W aB$QN;]奸R \D`KĢ_%m^H˞tJlgiw lGNl˨#Y*<̟: *mq^F&Y: 3;R H汱d2l9i١b}mԙTe;O'ə4!>M^M)@Unrѝ؏|VӬ\*BKVhQ\&RPx&)IG'xvS]!҄JyX3Hӟ>|$Ik5-=YZe;LZ 1mie˳QӷON|Pvs>)$C1iQRk1)"cޭ,fcP 'Φ7wbSsLvHϗ&GںN]i=MI[T\&yjʷMѸDy>.{w~›q}n@ j7_@UIidH;1#~H\>e#~"~HҌA3οu7}`@d13#tE|G1e~^iA3f޾3?(sЦ9GG`Mfs䏎Q?o_L&siA3׈iA~|e]ȟ70/鈟"}p*^Q?fiFoa͈\?Q?h|\[2?fܘ?"΀k}p.G:LiY*f蟏6VG&VGڌ4=a,[Y ƛ/0ocda\?€_aW2<ʀ> 7gxex0Yk3`&ʾŀ9=!d'f@a _ ÿ;0?@ѬpFby \2B!Ŵb 3l ȀNEͰe 0le@6 0R.gxʮ`a `@U W\p-ʮc'x%ëv02\8#; v&71Lp í (vW3^:oc@Yl5 `1 2 1afaˀgcecgpaI$@z͝ &[`!f;Ж5|7^π6l5E0@ À cl5Ue>C^@A|me,l7DIvC$l7)Ifo$eGJj-$ޔh%7DJvQ D" hė2 2oF ~QFC!d$"%Ik%$$u$h47-(4)IDJ" цFC$"шd$o2Rhd-q(CD%mHёl!:RFaq 6:"i]GId$nߒIDFgMh|Nʴ% ME6o U}41"`&6a}5FltDa'f`DH&>7}FKF# 6:oI$`# 9`# kK"aMX[)Fm<[ܰ98B:>G>/uE$k$MFE"" 늈4-qDaMhߗQ:MGEϬɈ4+nIDaHߒH6g<MFF~6Wk5m%ёllDHImsn! MFIDH&#$vߴHmpdt$"ud$o| MmLrfϴ d$ұ[QXOlHdd$ud$BñuLb!2QtT$l7DEj Q~Ct$"#a㰴InHm!Bё M5"$a!:~Ct$l7DG"*RoIDG"$a!J! ~C$l7}Nh ' `i ' )[톓pNHۍOa'*Ms.k:ef NfW9oڶH!jbt7%u\Kvƴ ȹqAF~!N۟ܰݶY7}p]Z]mx6yc>p6 oԥ( H*lVvե }@t.:2 }yXC²tag8?oV%g; k|$Bop}8L`rZ1z/*̳bOH.A䰌B)DlES 5HjF ~֧mȷU~SۅHn|#xh˱BM> E\f]M[r4+OҡHD h9A wS &UU |aGvz#2+ mHZ_S2%KuLc:uXn: 4௾F>MW #EE&oHBIZoڇwf5VPq^! B:G6"ꈷ)tг=B&1zĐGfTNWnɀ\F%iٞ_-?BesPf|1, J_7,ѫ&>@V>r$U*1?l;SqjYHhĠ`_ s@Ȉc@h,6 '7D Ɍu_7Ӎ m߷JوhMp{c=LT]2Q〈 Gll2[Ts,Vo*fuSIseM 3O> 4j&yIt"ĥ:t<2,k-Ot2թ4N65qB$ ˉ9Y|JsY sܪOãڞT' Actz'dD#cTB#09/.Y;D$i# ]F},$yuBst|˳ NI2K'sG̣ ZyTr(ߣ-pY!TS- T*pm/=fjss$K9bVePe ^XHvTj 38<9j^82x%ۚ}Dkdgl;'x$gpّctvyNx~\٬ [F7`1mIINw& ugBx()&iNuNMNd 1(*`ԴU2b` 餗4W.u Is3if4:=\`&M)%- 7t{oȨ ?I~"g2iR7rF# _Ȥ9U89N~mr \$&I9.IG-Ppz[1Ag 8 2c gN?INt@I/#$s {$q:U {f@[%)eG3I0}z<!N$i4G3i꿑iLQyi\"YBW_9=YyyQ>oY_hx}&Y_h~E!}gd~ݙQ<;~,tghh,'<{{&YN+y3*],Ague Lů8x@tͳH{ ~_:Q"Cg2_q@,5ʨ~_Τ⠁ɳDYFA <@t+ԷW:@:ɨ_q@, ' 4"Z(~E  pt+_|~́MSl!۳m"mɸ4 |Ayaȷd} A\2c fWm"څu 8Ex؜/qv!?߁:$/XՆt%O^oorӲ\gTiv7#ىd:@'37aɗ"37V8Ջ΍Ŗ{W/V|.+."4/J1^G-BVw hBsmNn̺qx-nЅ8J ʦv;/ݖx҆~׆^IkV/C0t\T"57$d)f\L<@>W=Y\Nױ+}i5C3+ףJ]~krܟzx9tqh!MV<#!k)hޘulrT|@! ]GT1p,m Rzs{OM782LF%#ƻ(ȥ{qeH]>yOK%_Dysa-jij]l/O6p} NJnEY6jHSqx bOüw*uuڏ2LQUЧIϜH*(敀6s ) 3`ơd?"?Ȁ}fz/GЧq{o4ib 3QFޘQ{!??uzo~/3WPc2 [g}d?\{|ݼ2q0ZgI~cx{&14"c}M}g&#~wF5ig}t?S<@s>췁߆6z @oey@ﷁȨ6?8ფ֊='=7?@w^'_i{m쵁4\QωN|䁝F@z 6o{lnϤ{li>l8Co<4pI!쳁fc ω3)` 6DF9уi^LlwgR^ma}Ev2]mlv4AϓzP)Oa:vh,^q\/K;mvZ{QuOUU+mR8ܑovރ /.K]M"ވY<;K}b_6wD`f_f/хuw,Ɓdcܩ _jt/twݶ۪8)ǧht鹘d06ANk2%Vdz9]%r >!k!~EB1uO>ƞ(悗 ^wBNz2k6$ݙOUcmetafor/data/dat.vanhowe1999.rda0000644000176200001440000000135714030671443016072 0ustar liggesusers핿o@7IP(! @@[~UM)BڪB0\öYgU&b?#,L F-wǹ>}{~~.wwqtaP6 㯣G07;V9ey"w &Qu0!" ΑVr@℠^*с5Ҥ3% 34(ED3Juս&g!-fc\&NQQK#=PjG0X"YkDSk.џOߧ&8ie2nE0 _yb=OeSk A:Is1 ty>3z[^d9ro Vvpn xc/zYa,<9 2N|+w>ȗyv0^n. p>(e^apT1pL:Դ+ϝO~Ni@+ЅI# Xp|3 l`?XǙ\r }۪s!ثT9a<U~9wrO(W/cU[Y'hGR=_V9s\C uWbWo\@ܓd簷2wj F#پٺ$Zw2Ϊ% OJ)G3_m8WT  / RՅʙ4?͒Fmetafor/data/dat.debruin2009.rda0000644000176200001440000000113414030671441016021 0ustar liggesusers?l@MURH B*4@H(EBH9j#B‚; lH,, ܝ>'wi|TE{߽8kYkē0EMK<^#<_U/Lp )# -2WY#5T"m;\OiC>q["ձTEp ܡI,q0 攳߆vt.~\CX9W)G߯V:Ȁ`jΩ\F0 $8Cz_O(<H7%_{?||'S-h]*y+ZS$iwS9]\ސ%dd=^}`Rc"U۽0fDԊib >XόHytz~c= >? R metafor/data/dat.assink2016.rda0000644000176200001440000000351214030671444015664 0ustar liggesusers XMǷ.kIȸq IncT$ Ƹ cjƘBK%UqP)I͖&5Ik&.3gs׻={7cӻND]At)tik6B"PA+C@.2)Kݖ[YX6Oȅ7GMDh]Z ǢЭ6#Aam7ҾE}ݺֽ;u+yuYs[XWv|ǎD's#~Ϛ4WCoͻ ѕ0";DwѓE>D_џ@ $ SbaFC ’"%t.H :=$#Qhb 7! [b1DL&vTb1'fL‘E&9\b1X@,$ L~ZVld{GV7l}mh=vSG /s-P-sg!gCɏg*?*ZvzI3Tyxf*/:w6%ch <6kotC y}kܢw E9@ߘfF$OpM~:0ݾs7p3P+,\H gX]<$˫PFHղv;ݺK%ǵ-J%*7wLT,=16*-<`e}/Gوs6?*刺T4--z#>'mV[oh]%l~IaM<=b$Mٷ\#_jf:GBؕ#C` G+DF"!#l[9+8Bm:R L1tsQ*vꆁZ{78|6!)Q!oO*9i0 !Z!g]ANv$>#8W)|BsovE./aZةRkV?nkSb`)#[/aLH< '\b3 K*{1 ?#bjGu#~9VB`Vv?/VGCD9sJCl0vcA~Ď`>|%.~[#\yKjXPS-*&nƞ`v߹VyVuX~m{rͳ[Ɵl#f%51ۿSPs*W}/R{+ a3:-!ڲtP(٩-Tzy}@C\*j^.VW:J/R꫐y W/_wū]\eJѿxSW.IJs1dFS]]H'metafor/data/dat.craft2003.rda0000644000176200001440000000104714030671443015467 0ustar liggesusers r0b```f`BfV 54Mׂ a siޔĴ#ca Hm cf q`1X Up`5>Xp`]86Ɓp`7X(=!AO_d/Hp&oGVO@ y{"U׉`u.B_h|ۿ~~.X3gMw4@4?me`_d߀D}=lb)kPojje ,[ޫ58HPw fG`[˵DAqo@?E?Z ̽c7L``ާ%19?.υҰ~ԜQst]ZFd9 9 zּbhbg TB9LyppL,mnErNb1 #VM9gQ~U5S W> metafor/data/dat.damico2009.rda0000644000176200001440000000076214030671443015635 0ustar liggesusers}KO1ǝ͋TqIT*UH PsHx p֒w+W>WGV.\ M7H[όg3W?l)oc},y럗*3p2<=5997#Zt udeE"i8"[T<0vkQ+k+M] Bcs?rV&_fOZ't[[b۶8DQehZ RZue\4\Fq`46\vP_ xrD$Nʃ`clљcrk`Tk0&wLˎF茀g x|SrΝ~FAHzz Nfp~dXڬ'WWljs57dDEHNP3{`U6LƬU:ܕBV2n*n4Փ^2zjM&=ƣj7Wmetafor/data/dat.normand1999.rda0000644000176200001440000000061514030671436016057 0ustar liggesusers]N@G(B1(( PDI\_bD!a;S2-wn}}͝x@Csν}gYXY),i_W,A9<@\:FW6 g@S+ci.e&JqH ^HX}PCW@VZ琺SG( z?yB= d*oLl+.þP{10S !xO tm@@\ ;1!W+4R Րbun1 ؊yоLhf9 FgF5ׄ,Bg.Q9/xh6b)Z*WM;n˯_metafor/data/dat.lau1992.rda0000644000176200001440000000131014030671443015162 0ustar liggesusers}IkA{zz2Af`${, 2StnbG*^D w?O~ϼ7Y7UիU]ׇ{y0_2)]1 sm&²+J__{A7dG\VR85$WÑmcLE5ZɔC*Ѱ? BݴFہScvљ9 ҳ•ZԸ+o/+;ge'Dւ}Ro3 n,Or٣%.1u | ; _+(TZAT{u? Z8b)2"YWԸxfEWʜRykkkmetafor/data/dat.dorn2007.rda0000644000176200001440000000137514030671443015342 0ustar liggesusersU[O@.voQQD\Ć M\Y /fhGҝӖ cb1_a@|{ΥLgXyi𯛠jfaRC|ɸh2;|0/2^c|[Lƽ1ĘӀ>U" `ҔDŽ]g҉ 銭:$ \Ye.*oQ2ü& }mPy'ahWlqՃwQxxxW-35K'/ʥ(%\F 4INs`b7ŝxʼxB[WK_lVxuKKXӗߏ.< +T;@2kV8jZk~Wf)x1syQh'Fydݴ =b Ӛe+s0rT׊Dn}u%QOqgn$8jFg ( %j^řz݀  Z&OOq-/a|pWUaTkoSe'jvcCe cL칈zqv/"~ ~<ۘw3ѧ}`O)Xf;NK ȡoroMۏD(Ut7˖nHۍ;\nH.U*X':lǶ2|)L0[ V)??m]metafor/data/dat.begg1989.rda0000644000176200001440000000130514030671441015315 0ustar liggesusers r0b```f`BfV 54Mׂ a sitCK KzaX3A13@1+T=;s1L 1? @nf'ן&HڨDHf݂׹l /0(}*~O:[ 40#u@P~C1@7SP}WT]i+XbMX8T-}&B&$A>S37}\yNΙ 0~ˊ+hʛ`Ebrɞ,g&8v{߲'meE̱֮~e@eU2k8 ^/~X襏߷NI/E~S>[TKh;{ZrZڗ1Ͷf dÂIf{Hy,``ZtksKh_u+A&ѾU޾r:cTy#^wYI7I̾aau>1t.>++<;0fF |ּbhгKJS*a)dI,-EOe&LAq*Tf>gQ~Pgj-%3= P2SŌHASYC%Ysaʘ`I-NNIE`^Zy{[K:| +Q0d$EXRCaA7Pv4J%)ʐ ї.9Xr xs>ݢM)bQU9('Q,e&ݚ>n3^痤<%bAJb1W:CԻE'r( m#ResjʾD/3q/{~MlUuCVq q@6\mo6y8C}~UbyjfLWr{O}̝|*ŊʟvO$dr,iKpĄ${88 Go?oCu]A``, jͨiftpѫXzP!CxqИ Lэ#H ^; H "荹a%+jI6\ڷ󦒫gGI)?KQߐ^9~NoGfM{k\\M΢r;4v_JrV:F\3GrPM 0;rS5Yޗ0:̣y7!\T|HH=vb.຃C9+V/9mzAgȉżaYX}ͳ8w1A7;Twq5YtOL}+ϓ itܯ|1cߓ{u{Tc[Y}^x6يv{}8lQ%UCwIi on<_@.AF?Yu^q N,&?/5כ֏XN6Bg&Zy~",%$LruO*֏Xם<ٹ䓵Rp1߀݈cVoGb+q_l?z%w)Ve^Rs Wྙ*8|sVWzLfeV'I(SP/ubEMͧ8Ӈ˛#~-* C7! 'ؽ報28zp^M/XLrWG: G4); 郺BKH`DDXv|=DiPt?mzD -^[]utbZovnb߯o=UIׄG3?=w ֟6c y2⸧۽g}|s.*Lm%o`7b}Vyfh&q^*8" GvMAB^KV7үŚmEƨbl]P+M8tdQg8(쩔=%P}&rӌg={6LSfA5D%M1P=pu,^ѯ7nT metafor/data/dat.maire2019.rda0000644000176200001440000000464514044315220015474 0ustar liggesusersZ}l;?ۀ \G@eINƉlܝ8n(]W--*UEMh(Ţ I: mi}f={s{ P~~޼̛73{.ʛSCI#.NᖤW.wWḓ NJÍ:O;H3$kavtN6ܦ)^Sg?Rb*{J)Zܟ[3+ { WI~D/g}V7?c`Hji-ǓǫX|ůXJKl*O*O)>>ǯx-JvJvJR'ZAV0y-LɭzMɧX uD\FF'Oⲹsفz7sي6r4ʶ!.(Ls܆5c=_ؑw5[2"{o7M{oT:o^3\gӶ>s&н`=?8EUjO\{\\?ƫ8M ]r_> ?L{m^'Kv_)VVL|Bm4~;p7h_nG_sfl|"zpB劣om/WgD?@ 4˺#'s+~8_?B25qo\FUhz1U]e=ֿ/F){1-b)X^K^e5rq"7BtC<4͇]Sv7.]6dyn~.J'=T9]ҡǏ?QCz)<3FjcϗP;z)%QKD)!!EWa/x|ǥ部qM\>z Fn1~t>V;p{ a\x ᲡK7qc loڧ}W1O_zh^kyɺڴ-sߡۅL_Qq|ď;/} i;~vvYU&9;?2.2LnwL[[PȊBF3 ;]qF=٣YWjZ[vّNj\VO|pT0y7\T .{L|<n 5E\uh\s]K3$s8n&}v2IϤ,I1Y8NwZp!w7 d&Tg l8$>;Ib . KL\\ >u@l9z&p|ƚKrH0dr` 0Uǜ^g%` %ՙ i=&\N.qNzJsGٱ91^c%džLASR.V}9NPxX'WHۀ=ݓM@;fcI=@$hʶ v?:7`a'19yy&ekmR.=D]=X:xJ%`/\|yJҳaۂ>q4Yp ݤϸ'<_Ak-H׸܃(2YH#a’}2#g|հt &Lcʠu4Hv_0)镄ҁі?V9KZS}!mq*LJ@`@,bzBgrIC>cCyXiA l_x񮍺l޶YD׺|ع"Y\X0x([8eɗXȤfN' ^ D0 eY?;Y` @^B>|m,/յu?9\展m,ٖ[ȱ?غ[yE$v !V♡-Ljsfei+C0?9yĥ ϔxC S.>UųmךD .l7Y_M|Ϸm1Ǥ1?6_tw5t36B&eOYc3h.m{y .Y"X|;T|[xdG$*b+R*[ӽ}O+>>^bxRŧT (>>>AVPL^KZb!1ܪ˫|I ±a"2`3Ggլ,:E_4_4A8A8̳s҉ni޺j1Lqh-metafor/data/dat.lopez2019.rda0000644000176200001440000001237014045321123015522 0ustar liggesusers] tSO*TZE,miI S}CE* \6QPEɦ"XVˢ?޹sM9g|3m^Z+0peH G;*|7\J+NFg<K`%fszGl9zkx7)N|%Rt8}{)5Xf1( OGߧ)0сSD(0~p!`jՕ?]vj2Ly쌯ض~3jjF.sdjE N94B(FeO \nIpZby4,&* g/X8.Z cPHOpHE"TCk|~tIE=sFĂ%M ,5&7sU,T"7:bC1ahGDֽ~"w5t w}k4:TBٸD j ^/<zFxp{sPj{rnT9@;C1,̟7R,C ˗'7'W28:ek\:T;%a>L*}^1:zf`vTnNCu:Th1F1FsKmzxm 뙍T~Y{<IJq2q\ml٘O;^@Վyp6}Xt\S0=~U'.ˇ_6x{C^nj~{klmݛ}lJl\\9L~ugw;y#l4qG\Oe7ëZ설{~vޓt>>6ag{[tγ 6t1kIV$6_| ۮ,lNf)xqgxix@#telzjwvc1_xR۬ /0<>3:mf</y81<+b?lRKm:V7s;?:3-ˇz7 _#[/C>½ m-ls/`O.}Mx'b0o6_V? Ь6i5yC G l<6s{(E܋ O q J[_./ȅ|N:>8x`ψvel ~{3+:j / T&pӬfc#:bs/?.I!Y͈k} lBfvƛ9+w^Fx?O)^V;s[ʃ{6>|M'M7bCa*gkHx߇#a]'r(꺞Z&a{]64A!Wp_H{~cս+rǰƄCHSps]acoƯXWRGQ ^$W#y<{3?H烥\ڹn"϶< 8~B+KWEܱX2XGf` Ak[w÷.SwX$ h~ð7'Y9k_0O+Wz!q]=ltA}(NG>N?u^. 9 LON>΋O<3vowZlX"SV nm("S~|i?n6Lȃ`~ W?NW ˵QӠB`9^Ԟ3%Qi>*Gxy3+ zdՃ3`VҴo 8ajqCA?vgJl 'Q3u- .KVl>Vɷq6"]Ω`" |X*`%2 yQX wK 9678_ϐ nMծnWGAuG .oY5Xrc}^n`9>x5Kbb~0&|_o%;.!σUu,X #̍.g1Xx6Xˡ>\![`]Bn=~XûXS蠶ϡGJATm`'f^½ l^,OM~%:.{%Lk}Scn0L@#ckwKХ7|;W} X^ǑR(([瑱=(L*ޜ;`+>.WI^ys*>GgoDy4a\ZB׆W|:v}(9C/NfFhmwkCَ-_NV6J1\5jPXJަ.>%B'n(PkWN.>|܎ƉMmnLɴ׊멽)8ɿ귾FnB:j_EH+M3o8'_7WO%)25,N.S8 72<&q:o~𽊶!m[QXBd\bO=@5* 9S[B[8Q#;=y 7hLk{:m~)(o6APQa!UPc__Z?x]BiE? %.݉}=+i?TK*ׅ_8h;8As- _S|Y6rzG~<%:.!*?wڟ,jېrbq6NoMZ/T|uݔI9WsMֵ-C)'q3܊ ih{%o7JO>5߁Vm"hNA6o74N|cRCJ."ؽ?`D$z}uxLSq ΅; 厊 o+=ڑx8n\{^?ᴻ*}U"峻(Hb NO=+ERFa0Or .7ze"CɌT8QpAB%2g|˜W4HL)g|L L@-OG;Sq:;#RIŎC"qV#P3]HOOz3̤g!=+ = = PCO0CO0@0 @0 @0 @0 @0 H0H0H0H0H0D0LD0LD0LD0LD0LL0L0L0L0L0B0,B0,B0,B0,B0,J0J0J0J0J0F0lF0lF0lF0lF0ltyyRW'uR uR$uR"uRWBIh: M'$4t#E:^BKhz M/%4^B3Hh $4f AB3Hh (%4fЌQB3JhF (%4fLIB3Ih& $$4fYB3Khf ,%4fYBHh % Ijmetafor/data/dat.bonett2010.rda0000644000176200001440000000063414030671437015665 0ustar liggesusersS=OP}Dc`SjĨ ˓>浀l#:몳3fW+hlsz?yܶE( b(w +9F ;cW>-⺪QD 8 +Rp+𣀱@3~Ei5Ҝ)5i%g'r(|lP>2nkP\.ȩ,)c # ȓԮl4o%=9s7m_ۜmc:3M\2s}my c[xZFcGpI,Gi,cr2&B>~V[x>'3W |Y҂AzLP% d-҈Q`"P^7eAq<:]'A/ a>c\;^C{ {{Fڴh_;EZf%ۅvc+Y3ZSς28O>ۘ?=N7x~π)fC d)ZNҁ7T q%*=.8 ;um%,ɰ넨,G+*lHSguVHfQFm9j3/ 9l0T&Rdʶ9u)wa#2MɎ3:p[IM2Ql(6 4dؤwVe#La5LV[T[E ie̟&fR~$͌nr::iB v`ʾ:_nO~ZJGgcQJ׉35uLg(w\agE=LR`C<ܥFѳ1eIBM x>]o-(5*,Q#cYOZE}x61H;ҼBCcB=dh/:+QMb q.!#_}K0󈻖 XSC@=?|?qq?WD|2"ޙU׮j@=s ȯYu`|X}H0@u^+KK'=v7) \}y{1Y=(~K# sOd.޼A~~fۍ$ߛx5̰d࿪}(I"nm@:cr,'m(C w~ pp% <" metafor/data/dat.bcg.rda0000644000176200001440000000110314030671436014611 0ustar liggesusersAkAߦInDCb"bA/hm͘n3ّ٩ P(xPPQѣogAbO3nR**Ǜ{o{ +'(XK*\o69l.xZ+Y΁&챾 ƔM]W2edWuY)꓋2^eg.1^Jml'ۺ j(hrI0r C׼X[ҼHq5se/2Ty+\~!x ހ`wx^uQyӨ`4;m>'xk8݀1Ds*Or B֐4}OT#  ྭm:{>e؏D'.8y&s8G.nebM`.Uz>+o132Eq޹L|7sy&t2| 3Wjn"jSYdI2&gZߒqx'Y=XK0n; j_ T(tUr`A~LO/[EHmetafor/data/dat.lee2004.rda0000644000176200001440000000065314030671440015135 0ustar liggesusers}N@B[DHĨ;]*j " h$&t$&Sʝ ʝxI|<iKϭsڳbe/YI2b,+fBey)wX܀14d卼/Daggl0{a:S5E1 J>)Y0G-7˽(&ئ" #lج`:$N;=!C]*sY`吴Y3%j:K?A||oLmӂD+Cu\PYv,Ub lI9P,~k|Sb-;g9#Q!y+%(]؅%kJ’'K<|;>JFYV!Ŵ_-xG4nlH|Nlxb}~R,)Yd)vmf-MOꯇn4$5^7(5PS|Xr 0 .AK77龏⍞,uĥTm촞SdͱqK+_7pagrO:x2m:|+``}tj]cil|&#5xg < =jsێDyr;CnLMKGn/eB&D̕_)Q {ݜ1q>ce,_>N!ͯ~i~# metafor/data/dat.baskerville2012.rda0000644000176200001440000000210314030671444016666 0ustar liggesusersWMlEqIjIrQAU@łH\ʸm(I7M*^O%k]¹g8Ҟ{C*!"#TDS DY>S}w޾秗RK)XO,,p 0ӭ. '5SLVzq]1KmV iJkz Lf﯈q[n9r VH]2/Pz^ 21=!Ez(W5E9YKt1iʱKTd9:"I7/( ס:4Z|2xRE:tgs-inHpY⊦ t _5`F ]z;P>M'CmònN!eci2[/wzxuWY? aġf~YMrO^F9P3f( | |h MA#{rPDLwz.,ȠE,*/y17e"(?Úr}rVo *m\bGj^δkj̼6L3G33pc}L 9dTB~ikt[;< < >OA=>C-^_mQoȘq&I-%Q{I1^$2þC=š._-_:Ǣ'p#&2ZQ;pf0kl?0v/šQǯ^:i~-zf'}?f &n8p o-SW^WX]6A~.M7/g(}] 9o@נnGy]UdCZ5$ qϖJYAxN>u-+6 kOYWZYV$Ew$JزV*˚oKS$ȅժP˨, d4dM[n+敱nOжخa[LPVfYf^_?j( metafor/data/dat.linde2005.rda0000644000176200001440000000226414030671441015465 0ustar liggesusersoE'L-Hć &Fp M]ťN=7Y5!=p =rB_|=g'v 1O3͛7gg&Vsc,"BE77V_`,acPOԸ;gVMd< 8ʤf=CB'1 ʉi8&)`}ǀ)4pyjd-nΥ:izRmFnsm˨X)R~WWȦ0jzO')uĎMES-=TNSe,qa9 p]2{vVD%L'כiZ^['~uۼqŶ܆ !hk0][ +ZZ٨\+z'^ruEo;45 -~ͼܡKB"+q}ý) Z_1hbhsmpGx|[ܪ *&;ig>Ry_..b_|]?X/yCru@Tr@ lIMM]XJEa0ӬwD$Jp*m]:םjӆCа3]GsV Kqh&շF~#}˄҅~A>2k^"SQ |DC /mrR.K`*5s3^C_JEyGIޔ;)c{+;ׁ]j-sm0BPEA>Q詎AυvjtEGWfjw,Wvuˠ-kwsAxm!öցñ'GR[oQ O\rHq/len< iwzU;5)KX|LsPSoq metafor/data/dat.moura2021.rda0000644000176200001440000014161114043566443015524 0ustar liggesusers u 7`7`䐢%Zޗ`0 9@$EVwtQ]լGbhNNɎ%v xm٦6Kd%[b'έ[5=3=3=@S}ﭺsR<#ȷEȷ};FCڙD޵~TTwVLRx,Y 8^y; o.*ڠbT8bX.@Z7_*"#4' +Jq2f1EWx4CƆaueF3dnLpء> Q!nwGYxgMca}u(98ﶔb޶ֹJ%=6 VQsΦbh;Pjy*BL#aK9,gy [x#%֧T?擶fxR)`뇏(u"jenErhs5nB|ZFS#VTLY0jݴ[UtճOR](sVjusAMZ5VE3pswY.UH[-T] uIV߭*QZ8Lŧ9wj%ZmR2SII}*it{ٮr7VIհ~+ݾ[Z]ɽ_eϖ ˄e2s+sN:Y6I(iJ3j-qdݭɶ ֬eĈXk%]nш0!L_:'*p30k1)j#w(9݃Y͔;x'Es=FfW eՕ]ꔦ nӏoۥomZuS7u7|a/z^7]l9:omVI55"f.m "PHjqf)gȢ()Jj%]28h$ڦl-Rt݉$eP,)]g:eR{m,zĬoLVJ&HmU rkFT}m>8R7Ua3-EӚPfrwr uL{;ǪN| 8MrR8|D% GߍNwi^5M̘y,)TŰ[NkG̊>.#UT5'd?S꾜"8bں;V0bpsNi2KT x#%Ni,6q`1QtoM$>qEhLݯTߣJv(Eު=ʤjTff&5gH7! 2z:5qZ{{>ş٣Nj>}!k)Cos!޴aI{4,=NձM>s5ݰXuIy<&4 m »^lya^aottEۜ{b qU+$N2_{y5XfCxyk4|r%ط]Zkg$g^0-gX`[`zƽz%Zb- fv^h&BXԬVݥhV.o^ͩ$j..a?T6+JB7]T̼rL+$ Z6,Vƻ,(5.̹karl wYZ9+զdxmٵ7sR59bo3Z:lSM8Vtya^v-"%մ]|b4eʤjQ|(ge:_wBCʷswCNnSP"&LʩӌZ޶J^7p 筹, I]@堙զM[/Ț.| PY{JA>g;Tme֩Ϣzw長ˇ^A )ZUiZ%L:~S;ͺf,e6I'`),zO)QRѤ)i7VSZE$~U΄S,j~lwmX{Vs%LE,w qǮ"v H4k:̛sކ}J])1V*[oEfI9NQ[ zݴ܅HV DY_c>Hnғ䍔\r~uwٱr iOz^A=ۧ o:g٧E26<"%#fܔw>M[ѐԦYWI"mNTõ̹73%>;j-qrckF`T&^"Bָ)-˸>՝IVߧB}lϴy3̣nwzYkekK}~S}fIiN_9L+yL]|'uWۋ3+EնZ4lnY[9:lڵkg!E;^S|暴vbSq?5;o-n1&ݯb% cm?+WSt'Ws\_PV+w~R$L\N}`R6VZ0mt]eEJ__Zȩ6Dݶ;Xl_lٯM)t?~Yɋ͟-MI)y~_j4hNM"%>mo>3Ɵ3o9vI~g/IuOΐ怢|*w@5wr6P$>3ΉS9Vi7PZ]kAZM4nxBы@MQ+hCJC7-}:er޼KUUjMy_n8POi8x"i`Ǘ 쬒 %&GZ͑&%, @~ l+lnr3{7(P YW!LVF/rWV_vL '֫&_5`e&6 6VT'z.ҟ=5ZY}X= 17Z=1,:&yV9L싐;G`4ӡ&0oQ*wDƔRRqs7Y-\9R39`.UXn( p&Ußäd EyJCYfX!Uf(͟~;2a 1 N1}yĚ|_piyR6ݯL]Q'&0I]OM^3w3a'Ԓ=.s(c2۔5赦2/:$>x7>PLÝNNOkUniJr2sT}#/= Ubɯ3a5C1s˨"?hŮ/YU&Za~75Ph┸qTU,5D)rKx<ky7%gBlĵ",yđɾY= t럥2UKG*ut(әIF'@xD:ţeaWA0U`X&Vq@iM=OWZ<´kQq,Ą7[ &yD &j$Qi[f̼qT,QĹ"u>^Zm@͐u _XUT9%O]#R{ZRG–QK++n1IkjYX[EFٷeka"M*M[cf\?jS޴";ϼu5Y*]̩* X-)eVߴֺZ>|P+d8LEh0$HnB3 Jn5|Z&\T+6yEMMw-N]ON!-n:h3kviلsr<ʪTN.L_`A;'TfR3Gug,ISi #yD0/uL!{E>e9z5腦]0gr)tS;śhZӂcO/~2AZˣM2'WsݘGn+5̼'3%DaVCtvTłlB-L1T+N-ڹql / rn}]$/,J~YX5co ei]7J֌iYgl*1O~9%ӳH8Fe:~)Øߛ!@<aΙL4Z&E޺(_*Ʊ]v<?L ӮִL,qmTj1 $kX]QkMS| }ʳ hfaF36SkRkZ1cڱW.2u]tMl'ʷ7eag9#|VL˄iarJ8fx8'v{֛xI^fR5üenlJ=6휵7af79wwH9 pw E[qzz5JUz!ֹZj)ۚCzKi. kgм7ʕ*rJ򝳦7P"'U.6ϼ/{e[Kmu ̊VS\tU%$M'K}`61 `pv4טd8R,VQsS=,Cքu+u>Hp4-61Q/OTm1 ^&iTrYZsb*|Pm1__V26=h b,AT)%[~I䰜B<%3L+G@Rt L-Lܥȼ!^w?Լ1ƇRI3~ mCԳsn"mH3R9/I$0y2Uj0sCfINuX+:=驪L߾o⪇{8uUCbA)Z?,)6}^xy#e99B{wRNuyqҢ9nJy%s4۵X#dt;Jךv:g|DT , Uwj5ժ+u4w=őw>N|7z̀owwFcq`0fs{%.\8w~ރ{{/~߅w}7~}}~߃}~߇{?~}?~ߏ=ߣ9 ?" " " ǐ?1!!!C?!A? BC!a?0?#0A# ~?? #?@ (?"G8E(裀(蛀.TGQq8?GQq8[BP-o~ [BP-o~ P P P P P P P P QQQQQQQQۨF6Qmo~ۨF6Qmo~OI?'Q$DOI?'Q$DAGPAGPAGPAGPEGQQEGQQEGQQEGQQ7P7P7P7P7P7P7P7PO)?PBS O)?PBS                 ?@O'P ?@O'P C v :DD;= +ǎǞc?< ;@y+طpI$? O'I$? O'I$? O'I$? O'S) ?OS) ?OS) ?OS) ?Oi4? Oi4? Oi4? Oi4? Oqǁqǁqǁqǁqǁqǁqǁqǁ3 ?g3 ?g3 ?g3 ?gY,? gY,? gY,? gY,? gs9?s9?s9?s9?y<?y<?y<?y<? _/ _/ _/ _/E"_/E"_/E"_/E"_/K% _/K% _/K% _/K% _/e2 _/e2 _/e2 _/e2 _/+ _W+ _W+ _W+ _WU* _WU* _WU* _WU* _Wk5 _׀k5 _׀k5 _׀k5 _׀w^z@?~zQ$ ~QC?(`X_q@_< ?:@_뀠| _| d ODA ?O'@Q@? ?O'@GA D?OO'@'@7A ħA @? ?x?OO'#I$r? O'A( ? O'GAI䣀I$ O''AAI䛀I$ӀI$ ? | ? O O'O'AS@ ?5OAS)T?OO@S)Q@ ?UO@SAS)뀠)M@ ?i@ ?u?OOO@S@SA4?=OAӠit? OOAӣi4Q@4?]OAAӠi뀠i4M@4?i@4?}? OOOAAӠ ?πg@3 & πgvg@3Q@ ?( ?g@3c u@ ?& ?4 ?πgπg@3og@3wA3l? gAA@,?ςgAAY( ? }? VAY1@,?: ? }? ~? ^ςgAogAٷAY;Y,.ρAs9 ?@s]9n?= ρ窀9 ?{?Ͻ ρ> ρ@sAs9ۀ9@w!σAy ?A]y<~?? σ竀y< ??Ͽ σ? σAAyۀy<@</@ a_/DA .@0 _//@ U@p _/_//@ /@ WA [m@ _A@"8_/E"q _/AA* _<_/_/AoAOA+E"-@"6 _|_/EAK%  _/@KQ@ _/@KGAK%R_//@K@K7AK%ҧAK%@   _z_/_/.A7@7@7A7@7@7A7@7@7A7@7@7FA7@7@7ߨooox=OwyޭT*e孜,NijYvI3Te^YkkS-R v*˸qY1kJUS YuJ_Q+Ok̲^mu]1ȆV VД:y`7j9 I6aZVPfY*H`eYQ4 {fy-xP n]Oͼ"jYe%_1R;3-Upղ&~!qki),',\ Jnɉ2BΪy*!KiMSMögmiE͋6O 6Ld$AۆykBY)6cÈB0 e&_̛>T 2%MW30S򘩒L,1Θg^;hX]?ڰ)$%%%o+efVrW XPV\=a%I!o2C I$ZMQ}^^FCe [-/XFʢVUԢ8_#5$%ƽ~= 7x宽P\ZVD*i26z̙W*vM$tW´?WdcJ509L;&oU,:ʪRWJƒ-XZRtJ ]wVPEZirjZX1nרUk2ήoJe8gDGUAaX]n+g^k0hb>cMWao/ΛMj˥ыYtqKU˄y{z]x 1l**Y4jot5&fy[3cC۫ϏBp*JŬ)Uq[+oʰE St/?Mcղj]%8k_K=H{4%obok%se0Ş)bkm2\2/U*k}L67u>a8P%CJU-炮X\ܚ(tB E;--a=bcX <9luj8H^FS!7;dS7%+֝<)*<eRϩ2ջ3:_΄ی˪R_qbѲVts獞iMDPCz-@!TTgr'hvj~Lw*Y3f{z [Jb*&E|:;ORK%kz+-L%۽НU$ŊQ3g$JjYk1ٵdbJ(-ERonD%_YGtE5Ր'T3<֭(iJ^rniW &$4]J&e\fUq*Ow_w@+X8:T.VS48@6I]&ynr qgmq9bFePM0k<2rCL&5{T« _%]qUj)] 7-"׋`NDb#b!pJ ܒ~`g[o&rߨ K ̒V'Sٌ8ڨd\#yQ,{U5KdVlryd&m[65#ZA3sȮ%m3f7ޫn5"r{\0K[=\(;;MNaUZ˪R&|4y"@_/ z6ԦwE%$ $o8 $R;S>f5Qlh}^ C>q59% *M()B-; eͬuo:jYȏ|Uv"HāDMNZu{*V[L#81a: 3Y;$N3~Ʒ}V篛 HæhyU Y}q ܳ}K?#c!9bm vWPtǸVꪧT['S_i*6,Y폑DVf=nmYY# :760mqni1o/ֹWTA,!:k*`Vm5NrZUuho^;V ןLBq GNʤ&kݪ=k q>O!=rjM~$һMv4pNDb67sJ<*VE ΌoΧJw&öe1C5.mW!"%n;VxɱYǢd4FdU)Mϓ2L<qmX3>.׌"D*Ef,Q08L4KumKxrޞ2\E8Ky)Ze=hRǷ%a`gd1Yt+R( Jt4CKv$BX4Acou.%*R( jުV$iR\lt"~w\j/-D&el)8~2byS&%Y'İԊVrBSVǨ'Od,s`:b8%SXXru<.k:!,qX'qoYl..Tn aVk%qMh<[r7kUO┣*Yv$2+ۆkwmԸrL6JzѳܬKnTGMRc3^m gX+@gZ΢hZanr}{ƽMA\ѲnfMLZ+V %\:#GZU) RƫbU^W-N&K[6Iiu[WjuMl"At0ن|a5]Z\,˯$U9:^`r D2c|~x4&t@!l6q(RT_R.<i4?4LvSd"Z;'өͭ'PoDN-PԪdh*\ #ZMʱ ]rZt6ջsO#T˫_1ZAEt% g˸pל|+9ߺߺEDvǝCfP~" 2VCd5t]eoUTl-aK8){їaɰd?r7=Umt1dd˨6Y@QWɅE"˼H? @ 1*,Ƹeon&-~+ ~D"ebPt_,1y'ƱLr%H}*MȘl~"wVUIՙ:lJXގƽbL*/7Ti+N<=S%,r5و%<$L,+5f"m;'Ln ,1LQ(}E8rI9U}*j]scު74b-e;OeYϩٙso{@b⋙=B1'Vsm:Lg&bI9''P JnkӤMY-Y(4IqJMm$ 0TR|3_b- u6Ο0'}.JJag~vaٰ\n XpΫptW:ų=.Ӌq6D{}\,ӫw;hZ6̦29x9qYvPN CUJNֱBC)IgN* d)9v3J@jE5E3d$[Pn)㶚W,lrquq=@T4s X PYr , 1xM[T|GAP-=Dcr{HkjNt~"Pv㾆5/>֚Ӌۣ]ۼ@VrlX$,r-qu4HYsHϦF4j/(E"߉QL%,Jy>pΤj<-MGs]xhMW삐ލ=r4;r*s2hiR+Rqv1wG|P۩h&ɫoѪfvmϤ22ʳ)qV>? qH:]X'7XJU9_.K Koy mM{Aju&/qҎZx7+幰d{1eYfIlX9#%2B PVʪsHRh:,7# !V|-梉h|Vnǯ+ŨEUmweI%4~UKݤ:)W0fb o5WXln`jh`9" ApG8ǔ*f/;=`()C=Wi["脜 #+ʰJU5WK%әdZm#eӉm<6.ƩHt8xE!YםLͿLggS&ܞzP#niօ֩z~&A6YeAn**zRSpy]pqQgT$Iz'{[ŀ+3!NNE!9+T%UK܃_E9I뭣ezE}h\w=buӉ;OjbaAc@/+_Lͥp/<^bcV/DlZ[jYʪ!59r gźjdDGʖ'CeLZC$c>}n^o'I:O)٬ȭ9@RbSp Ju/Jm?$ S \eFUR94$dP7evXSAlPuC*Uͺ*>9/s:߼[|Y $&b*58̪9+ڪJR[K{$9be!V|:G+t"KDS%(FUjj9X/FemW8\NgQ+M)JA0,T,&[=""\pb*;dULfe=Yr&tR*%G\~5ၣib [sH$2(6>fZ &=G|ǨUdT.[&Mg:V-ѢI[ 7ݦZy{PbۈVЌfѕtRJ.>1xZ,}Deb:d/iՊslpx6i0ǫ⇉T&߹ﹻQ!wä 9uf:{}K/:/êjr.-HZX|pᬟcJ3%.Iv[`b**ݍ4Ԇ̂qeE:>&h4&ɨm =L v d2}qC2v&c`)xRcwުP]%%\D8axXsLV0^IR0_#"$LeX4, ESѭ|r8ITZ8%n_U pxŢIoWpB72; j&Ojx;BWy[%z;O9ATt~"knY*Nں+R'%;rhF۱~]YVC$I5R`X`3b) d21B|$tH<K]ټW"4ֱ*PPV,{RxѬ5wy^)D<_,҃{-^12('V;%uDck0ꆎD'd̶ؒiY|b".+/Կ]U-RJ,ݲsАY(u[L~.6Go%K!4L䗑t:-J Y_) BUZp4s"ORi k-LWT]QD}U^|Ub9Wfݗ!8&> sܶ=*ߝOB`T v< KXqo_fx^l5-&IN,gsG<(y0(l kMeak-Hn)%g 0NYo[ۖ=Vì4! $b[ "ղRo<ؾF*FIܠ؜rhAz &teBUŁæo%;2>n%'Me䚤 .Z? b?~6DJW*%"QŪyL*M,-aWFź6!6+ęYOfg-f9~P,Fݎ1ղ8UݴvI~d b\zҪ51f:s hJ)dcLҋb_! k$S<_9YQ w'gnq>$BD xZ.^"ɛanrU&9:JYy 'm$J"'wHe<}i?Qp ^! TYgto:'xM:>/')~'yâ(ڟ#?sɅ&F\s#\yuhmIDRn(K ћTt(BI6>@FTM#J,%3b\%I4aO5kL~R-a/j(b8莿)IӣS2WxHEm\܊< , ;*4)J)i+HuV,[xl.JCFPlT8%k"dPa(ldǽON\D>bV ‡z$dV.1RLT sCU,Ut_;޴?0#-(K⧴&}1Ǣ9i>ɗ>)C\h;}RȧEy3=+yv$PBc*9?-gJaFlbk9 ;8oYu'R:Ga:JŅ-CěiUo!ܹx.NJxۈ8 =Ob=-d~~4jjYL&r/NTv3aDO\NH/rܨ)+-JD.Ops0wsBT0CN|vxFQ\VEhXzJOjVk⊚)?̦OauDTEUTE1O{&hBSunO2T-j(FO㓧wu.,ϛpV=2%TfnŠܟ5*YsxJ.hZq[lqOKGQKfۨMU/Y'wAESan.J6 a>52G R&xd'3w4 bU;YQjx^mC5[wc+aSmK3!4bI.#PESi gb`^lZ$5qT8 wOz+?[UE!ym2ݱf-Tz3x01* &q eͬu+ ͪj|qS?ݶOV͂R_z:R]w;. aVrZΑhXlМЫi`w*xB^Vatgxs-+nۘ D >\pdRT@=B=tvx{9Y-_GZb|;~v|AےG;1v6Z0gh O_q)>k+yKŖK=Mi^ fc?oY̺Dmg/V%:NԿ՘quB% ! bNc@> i~_`u|(Z 6,Fmj2z!yNg:ifRs۳s{48_Z=&$-"5ohBs}\z}{gw:i~IN ?ßj֬U%^ьzg-u蒾XjVDy[B}Aw.vaANBLjb9l\kwH~}MFJf\IZ9E H)Yw/?R"Ε-W˕_{V^s{'ngׄ~Y1H #AսE|iWB7A|r+j~ڴ1ss~+uai̳"1Ӟh w{9^|W-uDŽ [4녱=pP<7-5g=H0vO"0滿hfiB&64ا̺ٷpE~;Ƿ'g.D'yɻcd93܉/;5ka^nnU|}{^߬MbBٜ2gN=aJ_#r6a>c{j=ŔT SgRoX;^Vj(q\=MLs+>'FʶctI2{fnrXh\WFt ڄZ0&ԊB"yrd.QrS[~Z/m*GS.r-Xl 42W[CR67)ӝx+ZʍFIWHW>L&7h_]+ jFQ=:I>'g!~߭]ɒ2|CfaFыfYvm2Ej5!bJHfMĝsS[=?-7ֵtN&9C.c<@O^ab/f/@\2MY5kj 󏶭~tۼMrocy˅宦r7UppNKb gH'pzw.+5X5 RlTu놻ݘpeWm3ϓw]q_mGoWuuy͜uH vF0\v߰Vxf愖`,Eo42)FS _jMjzVk vәq f*$jHv?`&ws[Ѧ͐\K^~2+Aقu-`iRjhS*ؕ RdUCt ;F'X։=tJ} 宖wPkBD5e Z;}=k(Ġ;βVfr:92V;Lh4jY+J5klmhSG[v}ʦTômiM͝m?W3VoS3:/kkx[`{bHg9( 觴)L^ZZEYbhkEbv:u.KUYbARQj3J)>1LU-Wy!Lid#]tm[V+6Z͵@6P튙9s_.(S78\aw:KXO:fwa"eg NW/5j;z1mf:|M:]ߨ^ Vme-xSlWK5l1'/ Wf N4HjBM,L]+ )"yK1z~VP!vZQ\ nXeZy4v;PZX+F-D]fu}&_05L S_j7K)du {Q]'vk6ЬZul+ۋ֕?S[LYn@:N^)M̲s)/fMJ? %xWݵM'p[ni!}Sls;k>SRpyi iEMhufC#|ZTmfhbb1Bϙá'u1VQ5/X՚nk @]ö:{tMc(Fg&EF#Vt\v(bfJkm~yx_ݚcMǸk@o+U "O[L.`f[*k5i&bkjU%諶\ ;/3,Vm6ҞڅތҭQ܍rmE#pPvUyo LjVЍ-i&67?i쬑Ҧ#ϛV a-LZG8oZytۺ6(ᐚ@#ݾGg3u6NV3 }=`Wp=.Ѹ^erk7jVԺ2?C:pY_#ޓ'S +8IBg&Ajo]mVw܂::PV]cghznG'Z}X7:|Aɷ٬cJhVQ[YNQa[Ь2j ]l=CEyMj>͸n#sQ+|OKwlV8g96,3Hnj7樆o܂JGsݓ6iˬ;Ҫji|?3ݜQ<}^7)\/ XPdƇ7jWռ5MnM(vR|ـ񰁼S5%KX]PITطfKh񹄪VҔd^+;s;Cw*m]E@Œ6j_GL% 4}>j9( ̙9>||[UL,Nmk 4N\!汦7L08fqw#8}3}"ӬhQm1ٮ ܆^&g~[ m?^mYփqGiE5hLvkFt4d 7:Z0qVK3%3pa+\iI&@.ށcXNPkl[6bӾV4n 6|k;Pz C!q6 o kxz,>^SdyK{Ux׸|%Azd{{9nf]v~f(:`~a{vzywvOlN0z`~q/J((:nO[:`r58>{d'۾ןCvuQ@AhOrJ{go}y=-(,t!ɘh[zlû &^+dNN?ѳS=T c/8!{2 "ĞS:@»nde5| WL{?8E}i/ )#{Uͳ3’/l4tP :K5R HeҷvEx-E^',v;ldܬTW~؈-;j.i'Cwi_2R$B,^߽ۅ!G]" o6:r(/vMkK.W^Yw?,BK9;O/9-ҸCS(û%Ԩsī.4».aqCiB`3NMXֿdXț'`x]w Pa;2t/~ m[nfH~ỿ:{;}%+WB k{Q7ϒ}/Cf B)».[deY~0ۥ{?݅sM]chY6:[bE?czիWصq7".ڸ] YJknnsjK5ux_ݵ|7 Z.û+ûPXnȍPtIv'] 2r[&b*G1[Nw!w}|#kze}tسxnWYzGZ}%Tz6zbJEHwr;Iͦח]}ϮRKaD-ρ<[B`9yKɰs -nzR.O.ETEK—6K*YUWG?ޅwqfs?v'ws39R\ͷK?»917fz}2{JZnŇ}l,^']Nnq޽,XwR{N_^` n?.9 W oe8^»0bzhscז!f kws`ǯŽY TL?ie c4?g;E"0! aC0! aC0! aC0! aC0! aC0! aC0! aC-twHnyDTTkP1cjEƒ D"ʡ߫/_}97C_'O}}W_$F"_~Qbϟ?7 -r?25'8%_Td~A]/ۡ?G=P m[gQϟ/|P88@ro/y;.8w vq˜^7=hl7 nܮ9˟LnקkaP[2v Oҍz'czߵeo~y h6e?>m>.|Əݲ_ _p;Ds}S_zl苏lw7+O5ν2_oܮA?@9?tm3^'֧ ˿cѦ?Q'g r;o|Vobט/o.n-;x3Kr$ܿ,ϗ|<޾q?~3< 2|ͯ7-fy} v {/8 }{_5<'П,63xҿ/^/r? G2i `qyn'w:SF{yٻd{ma__ϯ0h{+xLߘ~ʁA<^,/+Loqc_oNg>f:r;ؿo)x\z8vR2헿~|l\ _]ҷP?Ng~z |^v?""?w5Wؾt{揟q=Cuܳr,|o,~_?EF=R*/~b[icrȵ /U_;ݕr+~`>!+GA[? =;_^S>CA~%'d`6C?Ć>_B.|e-Ϡ7WSG%8u淡?8~Esi3$@c# _9ҟ 7/-P)aJ0~aX~q~ܵ/fr3Nχo`[o/`<. L }3w|tS]e߾3{u_[e<省:Կ@_X#3LƏ w_c/@^Kݲ~˩q_o,)`v1#b`;e푿sԤ-r囐,'@75a孎_ ġ =||9r`|~8+w"pg+!-\ߗeېoa+^q n_+"/?8g;w!/=C| ?f>_g6wa{1~\tf|Kyat@?=Xiw} a;Ecrw xr?9?v"$w<~Iun~׵c/nep{gfo4Ir^)0d܆x@8r0xܱ|IYqzğ¾&\v,v̯9Sw zmw~Ыw1X_]| G嘴_~GxW ~6ga˿@þ\瞟o#9 ~~%@?\f|vȴ?3c,G9G= ed9y!Nr}k6l7튠'eA{1da0N3zr Y꧝@9N;g2/ÿCOM;8N#,i?/ #+I_Y3 d?+7cb~aA<ye?Q3'A; ~9+ m/ ~* OY~vǹǒ?xjOAK2-X/Q#!ۡA܅ͯ!!o/z ~C,=?{"0dz|n?gêo>_D? ]#R_ '7/_??_҅AH}c>05<8%)_qw?7`2 /Г.oCI~!wy#8oq }#OxRq~~޺EP099"l2l?<`>AoS<zoռ΂Z`q.vϯ0I>^A<_/`!W,>rǃF0}.eO{?a<^8`%lOq/3/0N?/ 1ǂ~ 9/vh'=<gu,/rv75gC y\L7=|9Jh\g\ mYNf_ # KlooA|=<~x$?',~7GP'W:َ^Aq{W7}9eqG8ƛ;A:ѡx c,y}קo> 9_|GoN9 aG̳rT΃_t'eïg#;X.v' 4/ǿG.#Nt{ް?U̳LO_hSy}lj q~ߎ]Pn)CnLJ815J2yp;Oe6/?i3=Wv:َx 9 9~!~$?=?a9`~2_0/<~ KoL`r 'x9g?똸}'R^ WH3\7}#r2ǿq/G<毃a0yA;~_9N7Gmlpe|^6,o>'C>3o7 G.["W_?n/Y/\ ?č^8OÏNAq}?(_u1we"w)$~P\!(Ǯ7 ?HoT@4~/0>CH/1]ri=ǚ@Ng:p.( q$Ǿcr2c|~[5vÀ㻚X3 8 xJ{o8a:dz=;P]ǚt2>t9q?2^r: Ãǚgzu|3ߜE7݃FwpJ  '7/{ڔcHg ϱ`9Qrmxgr; wB GQ.Hw,YrGb}'!g}$Y? oczs=rC}8X3|3.C;Cr `}r~fc=t<xaɃW=rOXIJ3^`yAt}J3v0ȧ\5M V>x4fr?r#hֿǮ4Cƣmsl'p=7,ǚ Y޳I|!`d ɡ`kXD.@w\*hr.\N cfY_~4x3AS|M,v!}|P,7ߣ(`~eΐA;˱@[.|,?Y@NJ3vپ{f|/ ].dy]B{5ړc͐o֓x>Ǭ_X2Lo/2p,!q:|vp\s/ Ay~* ^ Qf:r?kƋr91<iltrq~%Cg? 6p&`>e:g>|>6Bَd=,3<Wcz2 31]>l'1^&?w[9_h~nz|,BQ Cڅ*&+8ѿy A#' BLͶx\r͐1f:>֜w!o=^e{1Tw53v ˱Ch?ӟ݁,.lp\ɠ_<al3߽M>˯W\`0te|y -f](? 86|בQjg]V=#_] 8gp~oiF#x]V݆8t~W{&<rߏ|My n'^ @|bCQ@<=,xq?zg_(q2'm1fvlO{?X8f<^Wkz5O~[@:Gyh׿Bx|C7yon4kS.97@:.,_gF!q]~v5d{i_tjQlw|`9#(~z߂4 9vW( :X?kop60ƒǚa =%rW;c@z=Aky0uA`vy=F0n4EXGy 'C7; C?ymG s q?Xr؞ڧ1 ˳=ul3X^v|p~/b\mNrc_ 3]ܯc671}<5CG. ra E Y}"#~qI/0u< \? #wVÓn(F7&V;"~R'&BΚ6zYv^^jvZnv k@L^QuKQNM3 ``BSSש1U;kE$P 7VMSWwr'6֪TNƝ-r7Jմee:G]V|?˸Gt\_%ҾK1.?K6]_p}iKo \"/"c_K ]x]wK;tĽ]7]NCK92+]]Erw?̍ZAJ\+<3}o]t y׷#ݸDKat݀FWޯ\k57K]CZ\+V%7qs"k\qm]Pv[hvrwnw "enދkK˕s/Aەw曆•е.Wst帒dILJJJL Wϧ}xgNԕŕ%@%wvdY$n%Y+wwy ύP,Օd=%ڋK=u/1VG~zwq(]!=YIvʏ~%M:p(EݤGW.^]w2E\{"BF|ƒƹ"rb'|Iq uMΠ-&GX&ɀXEr!hE,a0G;,B2 BrAa lEL0B2BYa؍rDD?X-cUX[X b['IaEDTƪ"sޒCaE'/"QҘvXDDh<;,Bc#hEh;,"xAП^|@XDDП4&FM#DӈHGa m"" " gMNr~гD$W]I^!lm!Mvia hBIi;[lxxНDDBW W=I4\At[!tIgv{;_ABG} ԯ+QB?RQ:1iV(TѯkVP߮\!LS6ҕ-B gϵV9lZAB׈kl Kqo-BMc[!p-ڻb vЁ ؾ uDG9"XHUE9΄EH C$oewl@|uztY.ީi}[?=w4 /CNhS_;ă?Ц|vpz𽃀˷뿶thg6/X[ywA8Сܻ4~`_X~{6[mô9dَvuWP͕m?FNLHo`i:mUG뛶. os~oo;xLNEyD,AO;Y@|7Hr?mP?m+O+x_[{:yVv_aY?:;q>hv|}gI~T;~q`~;꿁ֿ;^ˮ60П='AhӮ@zh wemti:MH w`p{齠|hn\8WڴgZ@Ev㢓֡Ӟk#{%哶g ;1NukWuAy4Km'l|čpCbp𺺽v]UV7+ڸYbt*uԺT͢"\*G֚ኡu/#ʫUK,76-TN%L&hÁ1FD<'~,JNX6n^uʖbW5d2MskNjS.f\.ɈmQ8Ϧs5K+E[w^Kr9~ݚCUԊrizY_{bYƣNtF W ͮ9bװaoniz1IDәt R)3t2КMc6aZթ)g\ nVcf9.vDѵՐ2ٯXM0ΫeKXdF&]D2=?{էRIɺŻ EUIӉxTZfAu*C?'i}E4Y~Q{bB3l ٖjY5li2LHe{Zq[(||p< 23R0S}t4˴# B-b6spn{/)k4Rv^;l(G$,\{Qt0JdWdf_CgjLg.Gx+׹b"IB:XrC ɧ+TӀFU+uS+.gdSYL4IY=VU- (abl4rkԀY1jYb[Gvs *ʹjJ"V2G$eD+ufc!M^aX<2YJb|e1(uFfVV\*3+ղV4p22u.*JLX.*eY" 42kIFc_(HT'8M9P7nkd4o.(FLKd4u0afS}Zj+Q~L+D:O&e>f GH]l.+ɴ~ت--3R61)g7*F$T]Se6?JQ\e,גiUCgc$km^i|n#y?EgZlxp mc?IJ5tw.jWTKiW]MHmuYUTÐb:މ6#Lbqg-H{(IzjNZ\]"Ky!wv?7eݨ7hD0t,v$zu)KM2_* 2nn]&Ȍ2í;diVYasg28B ͇TC Ը48F_%I7NTkB!YD'QfR%ذO2l`ҚύYרk"X)O +!Er6%|QK}b"Kҗ"7@KcthZ{ {Zdf*M{3 ٲJY'=KR1\,4!]F eDiV-Kv`:KDEkEĄFd7.Nv5W4b.yCR DI4h=qD*#{݈_9ܲ׮(R !Jv"EaQ һ\v+<`-{1! u)(4q~ՇQF&x&|h* Fg,cPD]<+JlUp{p7geSCuEx#&Â8u` 7/^/K>vRx 3!> ̓ND1ŖarHM@?A=IVFÚ{jRٸd5zVU0rLRֱy@xBn:7a4 Sye.c(@Rd4HC])I !sѬA1q"nZ6LA)2/d|j &9j D&!12\sPǴRŶT8񙿎FgtT@'Oj9zJ:[$!jhQ*axmfF K&\u쇣퍷cF1ٚiMe32ut"zenqLa*y#qzV*z$΍%1.ez7K"?cI HI{쒊RbrrY 5y5J'0'U1JdAc$ij9U\W!ƥyhYWY^pRLd R ^+%:*ـ($l "*֤)yQ2v8AyfM& ȡ)/"!3qoݧY69UŒ!3HBge1$B!x˒D:*9jI\t0(ٺI|hN e\X6cͻqPNjAYLjEw<  a*prdH~HC!,2Җ.9 2eIVw54)8flHkׂ#4$--5PȻ19eM=JΥRKc6{$M쌮aEyp( cT]5AM'UFزdk#%]đnٲ'wD69ˍׄ Rr[G<$Jcd&3d H>0 y {~<n[uDN~v7 $y#EM6 !j fyCAT#P+vCj)\74JUöH,sf;R5P*,:+#* CNDq療XQdxyqP0DRĒ-@Ҏ\LG MU.-H+d d3r1ͺIo'Y-YћMfa2ݾG(m1" V `.9gBѶxwϐH$=W)J:1C%Fh"%MۧMnVrwR6/KzR{"F:h^9o`UьZ>t0G1]iL$IǺSJEuO$uF&eYώ!8b2 stB$,U\"EBJ6YzAB@ZDemaq#DErkPqN@t|-Tt]droeªFkANYHXvHE!2בcp/YE&$&iZŜebJf'in-7jNd2E TJznuLX -=UQ כ۝y3ьoqʐ[\(⥨ܗ8o\ۜlrpG&Ml?daҴJR&N`t;ZSIR 5^1dLJZ zC'eGQk:Cu-BmňSd"ވ_`Z2i܆܂RpHsi&~)a2_9,U7:ᒢ7 e!ń\D| bЌP1uUv AlO/|P0aM~[rm_( R"$&SG w=VGѝwGqeBM_k GN;ICSHٹa!K;w,&IYCestnpzJ(d&dz,+֪@CzW֠NpEdz.ح2Y{@,O{P}Z9GзsH z}v](!, ɓ2C`)DA.ȗauMe3qΤ}oغ9IfZjb9f Ӕ֝Eub<(G06^[xFs洤e⒯6t;8Rcp3yrQlTC9țIdvhM{֘S[KWj1h^crl =Lbv dⒽo-t1YeK*R&ɟJ G+ 3ųTɃ3B))l,I(_ A"Q(^!-K2Lఱ4+oi*%LH$ȡwXxD*L50lɗI2?m\b^ ̳̤|1,Dq&7ExP9eeGNm.I%󖼽n'#Vk Q> v-adyyrw=p!fB䅘|ާϑ1ǨDKmCs YEEIm aE)70LԼW1 7ܑ<.G%w{Wv Jn.Mn}D[:Z*@2H𭌢""]   P\er4Lʵz+ɝwݞ'9n"=/ 2ъ6;O".5ؠiL$(xf Ϝq|-YrT3;;2'RsԔ(r( 5srbakB7r6i $WͿs񨷎ѲcrV)Xi 拥!\/:a_-{YiLymH;IiۨQ3\+B^D\qt &s+"gȩ'գ-Mwl|""TF5Tߝc~ƽA_yj"@óe|HCrΩDlJoy-&KYkFVm5edJuw^[,C/%2p4;fbNWlK "pLF\t< 4n_-7XlOˁvMɋyR'^b4gݸw22[uh}Ec$\啢*ڈYwI~҈(:V %ĠqQg)ĦbmxBq6Ř&"/&*,۝DF|3vt, ]6i\đHҞ ▨(%S@ښ[er YbDByz%dƱg`9VoYlm)*i(]ڄ ' '=w #?O~T-U[<Kx1p"MJEA+"ߛ#WhƦJKqYsgǭ's3u u sFL,e"[Dx TSRX-x=(Wjv I@A9eXbEG^tZ?@aXSR ߭j^5ՂX 5@& h|=&}$.^RѴnPql1su}<Jc&w"yвIP&TڋrE1TB[WL g#R&ފpM%ȕ $Ӵ3;v ` 2]:"^-#k>!,7Zb z+N7@,5B}6Ir4ʦ8ھOyv#s1R- +溄(F*NV݀{LMAijZՍfÙj񺈳܉ QZ֑#J.NuD٣rL/EDە^ߒY?"EynhBqら2MMb]k1S%dJnH)KovuUy CQFwR <au^^Kds`MV*:Md+ ))Q 51㞒ޣ7LjMFfQDo7HGI4Jʫ\5r.a!6]E!y˓j)M! ȵL\Z[yyM:ggTl7(b%fR2:~ Jb2\"o8?[+~Lr/Cॼe8ęii44LV)*#Jn; BhaUӲX%^z*|5T񴲻CB OYg`٭"عb)tݙ/&)e T;s_$6Qza;D"z*6b7keT3(#i5 :ˆbImۭpсz@7yspJ#odNL+#Qr6WYy67 xK(3l@ vz"ڕ(mEhABmetafor/data/dat.gibson2002.rda0000644000176200001440000000134014030671441015642 0ustar liggesusersTKhA&ل>,6ZL+X M*Z+Q *E&d!٭ zs="jEOB/zE7?6Kh _ !O+ B>@[`dɘw,3=6(h{{}hj:Jl9eBY`vAe{da%2 ˦W*(rA7%Wjk[7J-%e5Yִn&kTj)dX-Ǒ,2x}A뀷5;(+q'cy 0 hnn>*v=)M }m>Zdzo#m [N -c:5~ֵ/s?Wsg=V[tB60N?aިCƺ#)~)5I#pgx pgq1aA=sݢ"gR%ËXWާQIC?־B>ZPf}5|7EaO}y0|:sK>4aE!.BY!hC cQG{+@)9(W=2HMuFc~uM;nJXVv ,CojW]6j3ݶH{,\\n.Wp}j zg sdRmϒE:0V#);qnoo;ίUmetafor/data/dat.riley2003.rda0000644000176200001440000000200014030671442015501 0ustar liggesusersMPSW_^")ZQ[[ŶՔJG-`ŏlNf@,7]5 -%H t w'Jnx/ݏs9sK8YVU ;wϜ.sq.>t⾶.iZ`:fx-. "kH. HYK^$Kd=yF lWxl@o6x`@1x|:GO| # s|NA%7t^/L|FҶ1uĻk=oUo~7éK/-Yў{5Ri Oޥ&ijo5kQs7J[V,3iZb:%gmٸuC7;ˈ\䮺mgIkFBjܤEM^lτD 㜥NO)^`ĽduЎZ]yx 갵8zX8#yC'$ºO~;Lޒm~X9yZr_dY]k] c9Vg]uηY/PC,hi!4- S9-TAvU$w$>8hW+ +v{J=^9OӝWNi/w}~~.yN@{.8')$.~]&ve٧ANs^77oS/].o.)qSIUG$˾6YzsSEzz˾=W'9.e8_w>f<. 6|53R*6q)b"&ƖƆXܦ"w/cyy9uofiGִ#{.4j "Dm0F6EMޯQ#A metafor/data/dat.nielweise2007.rda0000644000176200001440000000075414030671440016361 0ustar liggesusersmnPO|Pz}lhTUB[Fv98Tb`+[nR w!VD/3s.L<{7{M| >XT!a;ߥeNM\ZF.anJg@)Tf se֙ٿ6}:u=-;w[4Ih%93FXݾ=fkiBi1VvۮKdY*EpIq?#T3R+s|QiIqjA>>PU\A=/#y `3 rk=i95/Ijي^x[p װ ހs` ܵ'L FEXf:1}뾽.Wd8Lb#;2qңX>?'?0metafor/data/dat.ishak2007.rda0000644000176200001440000000310714030671441015470 0ustar liggesusersXRGcc0~aTɏ`AHH-o>ݡUEuPmyjebԂ n5mʋήZo[]ߖރ<x2z CK~?h>Kbx}<}3Z\/~xgBG,ў!Е B[<.I>jbx, |>+hg:gCߟ$4!8BÇ-Qh{DfcPgܟ=TͮCO@E;'=Ł}/ye"l4ڣo7_J뒙λx Z.hL-iu25 ztY*-Ǫq>:̺:,V5N䏥yφӎ(RUPL?u%kio:ky`_98K=S`tAw/yM׉c#_N?dpsr/lLhpHhpH[/{ XQ;o[o 4~Q:PQg2d)V_6h]lo}J5_{!ԖTWaG:ꑒSµ](cGwlO?metafor/data/dat.obrien2003.rda0000644000176200001440000000201714045213027015640 0ustar liggesusersXMlF~MB[  Qq6Ҥ*P@7d-ڕm 'zI9!U G.zAP~pUL։җ޼{39y,+-@ANC S_Y42venͳ2؅ڑ r 4B ]CO EЫ]s4ּY~gSk>\Y͍ڹ&mәq;[_;-RXݧU7{¤;=?;X:ײxݜΞ,oXIi^=ƚ4yuXemu,SfԳt+fx4ޟ[~>[[tҾ}ZyKkmF *Ax=CEJr/x]O_ak3,GD;* %N֙TGz5t|T֘OlZuE;~]T;IWkW?.#ӮR3'T/D|Ɠϓ!Dc 'b::OWzκL%oΙW!!DnγS@ 4D}"iŸo0/lp[}qσ96_aq:~LӦSs7sаL=/3%E̽| pp̯3,_[ey D|oGϠ!L/vMc_8m2A{{qI/`cAsfמ􃖵 '7%y|VfYMɚs2pVVkZjI+Wopm镧n`6g/ E ́Y礄x@|4DIbH2,IIIʒi "c bJFN:i褡N:i褡N:i0`H# F4i0`aAiUה>+metafor/data/dat.colditz1994.rda0000644000176200001440000000111314030671436016056 0ustar liggesuserskAߦaR1/ЃH1=(jmN7cN-I=/ TTgAbN3nR**Ûy~ͼ7tT"7Òr7ڭl"X'E[33 T6Xى_A쳶&(o};T2dhW+ػS(d^F<ԫL\LXLicW,[dG\ 8-hCoݿsKkmwu)J8G \<8+HK;$)w/G=.<cV <SƘFEI smxw6' ?5>U:zūp##hl=Q[w6;8X?Wh]T ?M_!:r8y"oykgѥ픯t4R]o{CWZ2R}R \̓+3$Obvdo(Y|%^{)[c˂d#U'XW+Ka[ ܨ0`<)imңmetafor/data/dat.hahn2001.rda0000644000176200001440000000055214030671444015305 0ustar liggesusersmKO@B FƥA i#Rb"+@6e4_)wxi{gni^uNK4%SۭXZf rsQo0LBM2퇋cI12&l] 8ѺmXۘo@ <=Mo8rvϐ30:ъzwi,.{VΞ6)I>s+SIDJ]flf wU峊V$a%~V| g13-a{yR.[X [>\n|ZUmetafor/data/dat.nielweise2008.rda0000644000176200001440000000057014030671440016356 0ustar liggesusersR=OA]8OQƒ1ĘhHhWYK`/[t ? + [{; \e.fwvcfY<9[e-9Rk_a, :\bƬN%,8He˼UErΥ)a+-T<"yA[,œX ٌT+şP:;  x^94 D5:5H om1sԵ9<y&i{;6ѫo;BB=J]#yWtq!ckpyO#S (ۡI,Tg,Ԇ ~K\q8w+WhQ;ƹf\#l>9bޤ<1Fy4Սo4q͞fZ<{+|.|hwLiuן}w}Тnf*z8R:g.!A3f:AWnQzpa[{Rnأw=˖6gv2a,:jK٥L0"++Ҙ &'db]Fl4iAڸ]pdS RXYlp+zQ%|CHC̵}Gfzf٘'zn[sF43WMU9-z?ɏ7 NN0w݀i}R/h1/\ E^1pFjuZ-nׂc s}Zp7` K|BۺȋsRT۶tXIQ$9Rܥדo=rn]b^ eY_19hɓ_dY݇~~(\n<ǖa`JU?WT_"r,׾A\_;FFL~׊^r7+}Wo]>疓 R2!jRavLU׏jȎ *K[{`0rq^і6RFe=]p~n1U|/Y\2fR]~IqkkA}H'TEr\:{+JX{S)ާ8qb:j\\-e TE !9>ӺI]0W#gPvnu_7"BHmetafor/data/dat.yusuf1985.rda0000644000176200001440000000332514030671440015563 0ustar liggesusersYoU_q|Ih 0_IZHک×4q(N {o-uT qT 8!!!q?  .!nʌwf$J{)7yD]|{WӴIP䩥cEM"=.v}.7Y,NL AF2uxn|i-%rCNDnLR>KՐj^NHTR)=躢WO(zU%]tY! fH7CNHTR I0 C!PH0 C!$^Tt%ՕWWayP^2 AATT R/( o[A y s-A;:л]sZmn쫴mߪ[6=^t=jhYjެZVw5|XV3m[I^|nsYxuK y=}Zr 틾شY c!V^TO/,ΆunSpB|Sd6x`atLBkNIfElӭsy]EtE85):ٙIkE474ytK*K=}̬,,9gJL\=iѼ Ϣ |pJT)sϯNk.^+lr\%Ua aq޹n OEf\s\s5ξ.afGty@ =cȖB83%1:mú͆%G2 Wh5qX]4.tDb+-e*h_ mTvKw{E8+PѴ̔ Hﴵ,FgMk?xt4ӋȮ^>|ّΈg?";dDIsp"O7t#\\['%ؽiso_5[wUmpcf7ۻC#_>Ѳd/S`h5`UIg`?1(gt6݊]cEPǿ>)yd?_˟&@"= O<: GaCi:[Kp(b=S_,Fuzvƛ<=V9,'1 7Lq:5CrA ? "y RMoSӏ qd 0ƣaGgEBЂ} k-yA-R!ү7pvPə&ȓZ0Wq,Oxθ/1L;rmт75G*b)qc )ЦXQX&B;i:Gc{#w r6NkOhr>w5K||hi_Ң=_>ȋPo %>{ȓ|qYH`k; u3ǹÑ>ߦhx8#M_,O =Kqa.NxMnT5[8oօ/yby箎1:l8¿y7?metafor/man/0000755000176200001440000000000014055736403012471 5ustar liggesusersmetafor/man/dat.raudenbush1985.Rd0000644000176200001440000001063114055736403016217 0ustar liggesusers\name{dat.raudenbush1985} \docType{data} \alias{dat.raudenbush1985} \title{Studies on Assessing the Effects of Teacher Expectations on Pupil IQ} \description{Results from 19 studies examining how teachers' expectations about their pupils can influence actual IQ levels.} \usage{dat.raudenbush1985} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{weeks} \tab \code{numeric} \tab weeks of contact prior to expectancy induction \cr \bold{setting} \tab \code{character} \tab whether tests were group or individually administered \cr \bold{tester} \tab \code{character} \tab whether test administrator was aware or blind \cr \bold{n1i} \tab \code{numeric} \tab sample size of experimental group \cr \bold{n2i} \tab \code{numeric} \tab sample size of control group \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance } } \details{ In the so-called \sQuote{Pygmalion study} (Rosenthal & Jacobson, 1968), \dQuote{all of the predominantly poor children in the so-called Oak elementary school were administered a test pretentiously labeled the \sQuote{Harvard Test of Inflected Acquisition.} After explaining that this newly designed instrument had identified those children most likely to show dramatic intellectual growth during the coming year, the experimenters gave the names of these \sQuote{bloomers} to the teachers. In truth, the test was a traditional IQ test and the \sQuote{bloomers} were a randomly selected 20\% of the student population. After retesting the children 8 months later, the experimenters reported that those predicted to bloom had in fact gained significantly more in total IQ (nearly 4 points) and reasoning IQ (7 points) than the control group children. Further, at the end of the study, the teachers rated the experimental children as intellectually more curious, happier, better adjusted, and less in need of approval than their control group peers} (Raudenbush, 1984). In the following years, a series of studies were conducted attempting to replicate this rather controversial finding. However, the great majority of those studies were unable to demonstrate a statistically significant difference between the two experimental groups in terms of IQ scores. Raudenbush (1984) conducted a meta-analysis based on 19 such studies to further examine the evidence for the existence of the \sQuote{Pygmalion effect}. The dataset includes the results from these studies. The outcome measure used for the meta-analysis was the standardized mean difference (\code{yi}), with positive values indicating that the supposed \sQuote{bloomers} had, on average, higher IQ scores than those in the control group. The \code{weeks} variable indicates the number of weeks of prior contact between teachers and students before the expectancy induction. Testing was done either in a group setting or individually, which is indicated by the \code{setting} variable. Finally, the \code{tester} variable indicates whether the test administrators were either aware or blind to the researcher-provided designations of the children's intellectual potential. The data in this dataset were obtained from Raudenbush and Bryk (1985) with information on the \code{setting} and \code{tester} variables extracted from Raudenbush (1984). } \source{ Raudenbush, S. W. (1984). Magnitude of teacher expectancy effects on pupil IQ as a function of the credibility of expectancy induction: A synthesis of findings from 18 experiments. \emph{Journal of Educational Psychology}, \bold{76}(1), 85--97. \verb{https://doi.org/10.1037/0022-0663.76.1.85} Raudenbush, S. W., & Bryk, A. S. (1985). Empirical Bayes meta-analysis. \emph{Journal of Educational Statistics}, \bold{10}(2), 75--98. \verb{https://doi.org/10.3102/10769986010002075} } \examples{ ### copy data into 'dat' and examine data dat <- dat.raudenbush1985 dat ### random-effects model res <- rma(yi, vi, data=dat) res ### create weeks variable where values larger than 3 are set to 3 dat$weeks.c <- ifelse(dat$week > 3, 3, dat$week) ### mixed-effects model with weeks.c variable as moderator res <- rma(yi, vi, mods=~weeks.c, data=dat, digits=3) res } \keyword{datasets} metafor/man/dat.vanhowe1999.Rd0000644000176200001440000000763414055736403015544 0ustar liggesusers\name{dat.vanhowe1999} \docType{data} \alias{dat.vanhowe1999} \title{Studies on the Association between Circumcision and HIV Infection} \description{Results from 33 studies examining the association between male circumcision and HIV infection. \loadmathjax} \usage{ dat.vanhowe1999 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab study author \cr \bold{category} \tab \code{character} \tab study type (high-risk group, partner study, or population survey) \cr \bold{non.pos} \tab \code{numeric} \tab number of non-circumcised HIV positive cases \cr \bold{non.neg} \tab \code{numeric} \tab number of non-circumcised HIV negative cases \cr \bold{cir.pos} \tab \code{numeric} \tab number of circumcised HIV positive cases \cr \bold{cir.neg} \tab \code{numeric} \tab number of circumcised HIV negative cases } } \details{ The 33 studies provide data in terms of \mjeqn{2 \times 2}{2x2} tables in the form: \tabular{lcc}{ \tab HIV positive \tab HIV negative \cr non-circumcised \tab \code{non.pos} \tab \code{non.neg} \cr circumcised \tab \code{cir.pos} \tab \code{cir.neg} } The goal of the meta-analysis was to examine if the risk of an HIV infection differs between non-circumcised versus circumcised men. The dataset is interesting because it can be used to illustrate the difference between naively pooling results by summing up the counts across studies and then computing the odds ratio based on the aggregated table (as was done by Van Howe, 1999) and conducting a proper meta-analysis (as illustrated by O'Farrell & Egger, 2000). In fact, a proper meta-analysis shows that the HIV infection risk is on average higher in non-circumcised men, which is the opposite of what the naive pooling approach yields (which makes this an illustration of Simpson's paradox). } \source{ Van Howe, R. S. (1999). Circumcision and HIV infection: Review of the literature and meta-analysis. \emph{International Journal of STD & AIDS}, \bold{10}(1), 8--16. \verb{https://doi.org/10.1258/0956462991913015} } \references{ O'Farrell, N., & Egger, M. (2000). Circumcision in men and the prevention of HIV infection: A 'meta-analysis' revisited. \emph{International Journal of STD & AIDS}, \bold{11}(3), 137--142. \verb{https://doi.org/10.1258/0956462001915480} } \examples{ ### copy data into 'dat' dat <- dat.vanhowe1999 ### naive pooling by summing up the counts within categories and then ### computing the odds ratios and corresponding confidence intervals cat1 <- with(dat[dat$category=="high-risk group",], escalc(measure="OR", ai=sum(non.pos), bi=sum(non.neg), ci=sum(cir.pos), di=sum(cir.neg))) cat2 <- with(dat[dat$category=="partner study",], escalc(measure="OR", ai=sum(non.pos), bi=sum(non.neg), ci=sum(cir.pos), di=sum(cir.neg))) cat3 <- with(dat[dat$category=="population survey",], escalc(measure="OR", ai=sum(non.pos), bi=sum(non.neg), ci=sum(cir.pos), di=sum(cir.neg))) summary(cat1, transf=exp, digits=2) summary(cat2, transf=exp, digits=2) summary(cat3, transf=exp, digits=2) ### naive pooling across all studies all <- escalc(measure="OR", ai=sum(dat$non.pos), bi=sum(dat$non.neg), ci=sum(dat$cir.pos), di=sum(dat$cir.neg)) summary(all, transf=exp, digits=2) ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=non.pos, bi=non.neg, ci=cir.pos, di=cir.neg, data=dat) dat ### random-effects model res <- rma(yi, vi, data=dat, method="DL") res predict(res, transf=exp, digits=2) ### random-effects model within subgroups res <- rma(yi, vi, data=dat, method="DL", subset=category=="high-risk group") predict(res, transf=exp, digits=2) res <- rma(yi, vi, data=dat, method="DL", subset=category=="partner study") predict(res, transf=exp, digits=2) res <- rma(yi, vi, data=dat, method="DL", subset=category=="population survey") predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/dat.gibson2002.Rd0000644000176200001440000001161314055736403015316 0ustar liggesusers\name{dat.gibson2002} \docType{data} \alias{dat.gibson2002} \title{Studies on the Effectiveness of Self-Management Education and Regular Medical Review for Adults with Asthma} \description{Results from 15 trials examining the effectiveness of self-management education and regular medical review for adults with asthma.} \usage{dat.gibson2002} \format{The data frame contains the following columns: \tabular{lll}{ \bold{author} \tab \code{character} \tab first author of study \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{n1i} \tab \code{numeric} \tab number of participants in the intervention group \cr \bold{m1i} \tab \code{numeric} \tab mean number of days off work/school in the intervention group \cr \bold{sd1i} \tab \code{numeric} \tab standard deviation of the number of days off work/school in the intervention group \cr \bold{n2i} \tab \code{numeric} \tab number of participants in the control/comparison group \cr \bold{m2i} \tab \code{numeric} \tab mean number of days off work/school in the control/comparison group \cr \bold{sd2i} \tab \code{numeric} \tab standard deviation of the number of days off work/school in the control/comparison group \cr \bold{ai} \tab \code{numeric} \tab number of participants who had one or more days off work/school in the intervention group \cr \bold{bi} \tab \code{numeric} \tab number of participants who no days off work/school in the intervention group \cr \bold{ci} \tab \code{numeric} \tab number of participants who had one or more days off work/school in the control/comparison group \cr \bold{di} \tab \code{numeric} \tab number of participants who no days off work/school in the control/comparison group \cr \bold{type} \tab \code{numeric} \tab numeric code for the intervention type (see \sQuote{Details}) } } \details{ Asthma management guidelines typically recommend for patients to receive education and regular medical review. While self-management programs have been shown to increase patient knowledge, it is less clear to what extent they actually impact health outcomes. The systematic review by Gibson et al. (2002) examined the effectiveness of self-management education and regular medical review for adults with asthma. In each study, participants receiving a certain management intervention were compared against those in a control/comparison group with respect to a variety of health outcomes. One of the outcomes examined in a number of studies was the number of days off work/school. The majority of studies reporting this outcome provided means and standard deviations allowing a meta-analysis of standardized mean differences. Seven studies also reported the number of participants who had one or more days off work/school in each group. These studies could be meta-analyzed using, for example, (log) risk ratios. Finally, one could also consider a combined analysis based on standardized mean differences computed from the means and standard deviations where available and using probit transformed risk differences (which also provide estimates of the standardized mean difference) for the remaining studies. Some degree of patient education was provided in all studies. In addition, the \code{type} variable indicates what additional intervention components were included in each study: \enumerate{ \item optimal self-management (writing action plan, self-monitoring, regular medical review), \item self-monitoring and regular medical review, \item self-monitoring only, \item regular medical review only, \item written action plan only. } } \source{ Gibson, P. G., Powell, H., Wilson, A., Abramson, M. J., Haywood, P., Bauman, A., Hensley, M. J., Walters, E. H., & Roberts, J. J. L. (2002). Self-management education and regular practitioner review for adults with asthma. \emph{Cochrane Database of Systematic Reviews}, \bold{3}, CD001117. \verb{https://doi.org/10.1002/14651858.CD001117} } \examples{ ### copy data into 'dat' and examine data dat <- dat.gibson2002 dat ### fixed-effects model analysis of the standardized mean differences dat <- escalc(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat) res <- rma(yi, vi, data=dat, method="FE") print(res, digits=2) ### fixed-effects model analysis of the (log) risk ratios dat <- escalc(measure="RR", ai=ai, bi=bi, ci=ci, di=di, data=dat) res <- rma(yi, vi, data=dat, method="FE") print(res, digits=2) predict(res, transf=exp, digits=2) ### fixed-effects model analysis of the standardized mean differences and the probit transformed ### risk differences (which also provide estimates of the standardized mean difference) dat <- escalc(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat) dat <- escalc(measure="PBIT", ai=ai, bi=bi, ci=ci, di=di, data=dat, replace=FALSE) dat res <- rma(yi, vi, data=dat, method="FE") print(res, digits=2) } \keyword{datasets} metafor/man/methods.list.rma.Rd0000644000176200001440000000171114055736403016153 0ustar liggesusers\name{methods.list.rma} \alias{methods.list.rma} \alias{as.data.frame.list.rma} \alias{as.matrix.list.rma} \alias{[.list.rma} \alias{head.list.rma} \alias{tail.list.rma} \alias{$<-.list.rma} \title{Methods for 'list.rma' Objects} \description{ Methods for objects of class \code{"list.rma"}. } \usage{ \method{as.data.frame}{list.rma}(x, \dots) \method{as.matrix}{list.rma}(x, \dots) \method{[}{list.rma}(x, i, \dots) \method{head}{list.rma}(x, n=6L, \dots) \method{tail}{list.rma}(x, n=6L, \dots) \method{$}{list.rma}(x, name) <- value } \arguments{ \item{x}{an object of class \code{"list.rma"}.} \item{\dots}{other arguments.} } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \keyword{internal} metafor/man/dat.debruin2009.Rd0000644000176200001440000000643714055736403015504 0ustar liggesusers\name{dat.debruin2009} \docType{data} \alias{dat.debruin2009} \title{Studies on Standard Care Quality and HAART-Adherence} \description{Results from 13 trials providing information about standard care quality and HAART-adherence in control groups.} \usage{dat.debruin2009} \format{The data frame contains the following columns: \tabular{lll}{ \bold{author} \tab \code{character} \tab (first) author of study \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{scq} \tab \code{numeric} \tab standard care quality \cr \bold{ni} \tab \code{numeric} \tab number of patients in the standard care group \cr \bold{xi} \tab \code{numeric} \tab number of patients with an undetectable viral load in standard care group \cr \bold{mi} \tab \code{numeric} \tab number of patients with a detectable viral load in standard care group \cr \bold{ethnicity} \tab \code{character} \tab dominant ethnicity of the patients in the standard care group \cr \bold{patients} \tab \code{character} \tab inclusion of patients continuing or starting (a new) treatment \cr \bold{select} \tab \code{character} \tab baseline selection of patients with adherence problems or no selection \cr \bold{sens} \tab \code{character} \tab sensitivity of viral load assessments (<400 vs. >=400 copies/ml) } } \details{ Highly active antiretroviral therapy (HAART) refers to a combination of multiple antiretroviral drugs that can effectively suppress the HIV virus. However, achieving viral suppression (to the point that the virus becomes essentially undetectable in a blood sample) requires high levels of adherence to an often complicated medication regimen. A number of trials have examined various interventions that aim to increase adherence levels. In each trial, patients receiving the intervention are compared to patients in a control group receiving standard care (often referred to as \sQuote{care as usual}). However, the quality of standard care can vary substantially between these studies. de Bruin et al. (2009) assessed the quality of standard care provided (based on a quantification of the number of behavior change techniques applied) and examined to what extent the quality of standard care was related to the proportion of patients achieving effective viral suppression in the control groups. } \source{ de Bruin, M., Viechtbauer, W., Hospers, H. J., Schaalma, H. P., & Kok, G. (2009). Standard care quality determines treatment outcomes in control groups of HAART-adherence intervention studies: Implications for the interpretation and comparison of intervention effects. \emph{Health Psychology}, \bold{28}(6), 668--674. \verb{https://doi.org/10.1037/a0015989} } \examples{ ### copy data into 'dat' dat <- dat.debruin2009 ### calculate proportions and corresponding sampling variances dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat) dat ### random-effects model res <- rma(yi, vi, data=dat) print(res, digits=2) ### mixed-effects meta-regression model with all predictors/covariates res <- rma(yi, vi, mods = ~ scq + ethnicity + patients + select + sens, data=dat) print(res, digits=3) ### mixed-effects meta-regression model with scq and ethnicity as predictors/covariates res <- rma(yi, vi, mods = ~ scq + ethnicity, data=dat) print(res, digits=3) } \keyword{datasets} metafor/man/regplot.Rd0000644000176200001440000003674014055736403014446 0ustar liggesusers\name{regplot} \alias{regplot} \alias{regplot.rma} \alias{points.regplot} \title{Scatter Plots / Bubble Plots} \description{ Function to create scatter plots / bubble plots based on meta-regression models. \loadmathjax } \usage{ regplot(x, \dots) \method{regplot}{rma}(x, mod, pred=TRUE, ci=TRUE, pi=FALSE, shade=TRUE, xlim, ylim, predlim, olim, xlab, ylab, at, digits=2L, transf, atransf, targs, level=x$level, pch=21, psize, plim=c(0.5,3), col="black", bg="darkgray", grid=FALSE, refline, label=FALSE, offset=c(1,1), labsize=1, lcol, lwd, lty, legend=FALSE, xvals, \dots) \method{points}{regplot}(x, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}, \code{"rma.mv"}, or \code{"rma.glmm"} including one or multiple moderators (or an object of class \code{"regplot"} for \code{points}).} \item{mod}{either a scalar to specify the position of the moderator variable in the model or a character string to specify the name of the moderator variable.} \item{pred}{logical to indicate whether the (marginal) regression line based on the moderator should be added to the plot (the default is \code{TRUE}). Can also be an object from \code{\link{predict.rma}}. See \sQuote{Details}.} \item{ci}{logical to indicate whether the corresponding confidence interval bounds should be added to the plot (the default is \code{TRUE}).} \item{pi}{logical to indicate whether the corresponding prediction interval bounds should be added to the plot (the default is \code{FALSE}).} \item{shade}{logical to indicate whether the confidence/prediction interval regions should be shaded (the default is \code{TRUE}). Can also be a two-element character vector to specify the colors for shading the confidence and prediction interval regions (if shading only the former, a single color can also be specified).} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{predlim}{optional argument to specify the limits of the (marginal) regression line. If unspecified, the limits are based on the range of the moderator variable.} \item{olim}{optional argument to specify observation/outcome limits. If unspecified, no limits are used.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{at}{position of the y-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the y-axis should be rounded. When specifying an integer (e.g., \code{2L}), trailing zeros after the decimal mark are dropped for the y-axis labels. When specifying a numeric value (e.g., \code{2}), trailing zeros are retained.} \item{transf}{optional argument to specify a function that should be used to transform the observed outcomes, predicted values, and confidence/prediction interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the y-axis labels (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{level}{numeric value between 0 and 100 to specify the confidence/prediction interval level (the default is to take the value from the object).} \item{pch}{plotting symbol to use for the observed outcomes. By default, a filled circle is used. Can also be a vector of values. See \code{\link{points}} for other options.} \item{psize}{optional numeric value to specify the point sizes for the observed outcomes. If unspecified, the point sizes are a function of the model weights. Can also be a vector of values. Can also be a character string (either \code{"seinv"} or \code{"vinv"}) to make the point sizes proportional to the inverse standard errors or inverse sampling variances.} \item{plim}{numeric vector of length 2 to scale the point sizes (ignored when a numeric value or vector is specified for \code{psize}). See \sQuote{Details}.} \item{col}{character string to specify the name of a color to use for plotting the observed outcomes (the default is \code{"black"}). Can also be a vector of color names.} \item{bg}{character string to specify the name of a background color for open plot symbols (the default is \code{"darkgray"}). Can also be a vector of color names.} \item{grid}{logical to specify whether a grid should be added to the plot. Can also be a color name.} \item{refline}{optional numeric value to specify the location of a horizontal reference line that should be added to the plot.} \item{label}{argument to control the labeling of the points (the default is \code{FALSE}). See \sQuote{Details}.} \item{offset}{argument to control the distance between the points and the corresponding labels. See \sQuote{Details}.} \item{labsize}{numeric value to control the size of the labels.} \item{lcol}{optional vector of (up to) four elements to specify the color of the regression line, of the confidence interval bounds, of the prediction interval bounds, and of the horizontal reference line.} \item{lty}{optional vector of (up to) four elements to specify the line type of the regression line, of the confidence interval bounds, of the prediction interval bounds, and of the horizontal reference line.} \item{lwd}{optional vector of (up to) four elements to specify the line width of the regression line, of the confidence interval bounds, of the prediction interval bounds, and of the horizontal reference line.} \item{legend}{logical to indicate whether a legend should be added to the plot (the default is \code{FALSE}). Can also be a keyword to indicate the position of the legend (see \code{\link{legend}}).} \item{xvals}{optional numeric vector to specify the values of the moderator for which predicted values should be computed. Needs to be specified when passing an object from \code{\link{predict.rma}} to the \code{pred} argument. See \sQuote{Details}.} \item{\dots}{other arguments.} } \details{ The function draws a scatter plot of the values of a moderator variable in a meta-regression model (on the x-axis) against the observed effect sizes or outcomes (on the y-axis). The regression line from the model (with corresponding confidence interval bounds) is added to the plot by default. These types of plots are also often referred to as \sQuote{bubble plots} as the points are typically drawn in different sizes to reflect their precision or weight in the model. By default (i.e., when \code{psize} is not specified), the size of the points is a function of the square root of the model weights. This way, their area is proportional to the the weights. However, the point sizes are rescaled so that the smallest point size is \code{plim[1]} and the largest point size is \code{plim[2]}. As a result, their relative sizes (i.e., areas) no longer exactly correspond to their relative weights. If exactly relative point sizes are desired, one can set \code{plim[2]} to \code{NA}, in which case the points are rescaled so that the smallest point size corresponds to \code{plim[1]} and all other points are scaled accordingly. As a result, the largest point may be very large. Alternatively, one can set \code{plim[1]} to \code{NA}, in which case the points are rescaled so that the largest point size corresponds to \code{plim[2]} and all other points are scaled accordingly. As a result, the smallest point may be very small. To avoid the latter, one can also set \code{plim[3]}, which enforces a minimal point size. One can also set \code{psize} to a scalar (e.g., \code{psize=1}) to avoid that the points are drawn in different sizes. One can also specify the point sizes manually by passing a vector of the appropriate length to \code{psize}. Finally, one can also set \code{psize} to either \code{"seinv"} or \code{"vinv"} to make the point sizes proportional to the inverse standard errors or inverse sampling variances. For a model with more than one predictor, the regression line reflects the \sQuote{marginal} relationship between the chosen moderator and the effect sizes or outcomes (i.e., all other moderators except the one being plotted are held constant at their means). With the \code{label} argument, one can control whether points in the plot will be labeled. If \code{label="all"} (or \code{label=TRUE}), all points in the plot will be labeled. If \code{label="ciout"} or \code{label="piout"}, points falling outside of the confidence/prediction interval will be labeled. Alternatively, one can set this argument to a logical or numeric vector to specify which points should be labeled. The labels are placed above the points when they fall above the regression line and otherwise below. With the \code{offset} argument, one can adjust the distance between the labels and the corresponding points. This can either be a single numeric value, which is used as a multiplicative factor for the point sizes (so that the distance between labels and points is larger for larger points) or a numeric vector with two values, where the first is used as an additive factor independent of the point sizes and the second again as a multiplicative factor for the point sizes. The values are given as percentages of the y-axis range. It may take some trial and error to find two values for the \code{offset} argument so that the labels are placed right next to the boundary of the points. With \code{labsize}, one can control the size of the labels. One can also pass an object from \code{\link{predict.rma}} to the \code{pred} argument. This can be useful when the meta-regression model reflects a more complex relationship between the moderator variable and the effect sizes or outcomes (e.g., when using polynomials or splines) or when the model involves interactions. In this case, one also needs to specify the \code{xvals} argument. See \sQuote{Examples}. } \note{ For certain types of models, it may not be possible to draw the prediction interval bounds (if this is the case, a warning will be issued). When specifying vectors for \code{pch}, \code{psize}, \code{col}, \code{bg}, and/or \code{label}, the variables specified are assumed to be of the same length as the data passed to the model fitting function. Any subsetting and removal of studies with missing values is automatically applied to the variables specified via these arguments. If the outcome measure used for creating the plot is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those limits (the observed outcomes and confidence/prediction intervals cannot exceed those bounds then). } \value{ An object of class \code{"regplot"} with components: \item{slab}{the study labels} \item{ids}{the study ids} \item{xi}{the x-axis coordinates of the points that were plotted.} \item{yi}{the y-axis coordinates of the points that were plotted.} \item{pch}{the plotting symbols of the points that were plotted.} \item{psize}{the point sizes of the points that were plotted.} \item{col}{the colors of the points that were plotted.} \item{bg}{the background colors of the points that were plotted.} \item{label}{logical vector indicating whether a point was labeled or not.} Note that the object is returned invisibly. Using \code{points.regplot}, one can redraw the points in case one wants to superimpose the points on top of any elements that were added manually to the plot (see \sQuote{Examples}). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Thompson, S. G., & Higgins, J. P. T. (2002). How should meta-regression analyses be undertaken and interpreted? \emph{Statistics in Medicine}, \bold{21}(11), 1559--1573. \verb{https://doi.org/10.1002/sim.1187} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### copy BCG vaccine data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) ############################################################################ ### fit mixed-effects model with absolute latitude as a moderator res <- rma(yi, vi, mods = ~ ablat, data=dat) res ### draw plot regplot(res, mod="ablat", xlab="Absolute Latitude") ### adjust x-axis limits and back-transform to risk ratios regplot(res, mod="ablat", xlab="Absolute Latitude", xlim=c(0,60), transf=exp) ### also extend the prediction limits for the regression line regplot(res, mod="ablat", xlab="Absolute Latitude", xlim=c(0,60), predlim=c(0,60), transf=exp) ### add the prediction interval to the plot, add a reference line at 1, and add a legend regplot(res, mod="ablat", pi=TRUE, xlab="Absolute Latitude", xlim=c(0,60), predlim=c(0,60), transf=exp, refline=1, legend=TRUE) ### label points outside of the prediction interval regplot(res, mod="ablat", pi=TRUE, xlab="Absolute Latitude", xlim=c(0,60), predlim=c(0,60), transf=exp, refline=1, legend=TRUE, label="piout", labsize=0.8) ############################################################################ ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) res ### plot the marginal relationships regplot(res, mod="ablat", xlab="Absolute Latitude") regplot(res, mod="year", xlab="Publication Year") ############################################################################ ### fit a quadratic polynomial meta-regression model res <- rma(yi, vi, mods = ~ ablat + I(ablat^2), data=dat) res ### compute predicted values using predict() xs <- seq(0,60,length=601) tmp <- predict(res, newmods=cbind(xs, xs^2)) ### can now pass these results to the 'pred' argument (and have to specify xvals accordingly) regplot(res, mod="ablat", pred=tmp, xlab="Absolute Latitude", xlim=c(0,60), xvals=xs) ### back-transform to risk ratios and add reference line regplot(res, mod="ablat", pred=tmp, xlab="Absolute Latitude", xlim=c(0,60), xvals=xs, transf=exp, refline=1) ############################################################################ ### fit a model with an interaction between a quantitative and a categorical predictor ### (note: just for illustration purposes; this model is too complex for this dataset) res <- rma(yi, vi, mods = ~ ablat * alloc, data=dat) res ### draw bubble plot but do not add regression line or CI tmp <- regplot(res, mod="ablat", xlab="Absolute Latitude", xlim=c(0,60), pred=FALSE, ci=FALSE) ### add regression lines for the three alloc levels xs <- seq(0, 60, length=100) preds <- predict(res, newmods=cbind(xs, 0, 0, 0, 0)) lines(xs, preds$pred, lwd=3) preds <- predict(res, newmods=cbind(xs, 1, 0, xs, 0)) lines(xs, preds$pred, lwd=3) preds <- predict(res, newmods=cbind(xs, 0, 1, 0, xs)) lines(xs, preds$pred, lwd=3) ### add points back to the plot (so they are on top of the lines) points(tmp) } \keyword{hplot} metafor/man/dat.curtis1998.Rd0000644000176200001440000001244614055736403015402 0ustar liggesusers\name{dat.curtis1998} \docType{data} \alias{dat.curtis1998} \title{Studies on the Effects of Elevated CO2 Levels on Woody Plant Mass} \description{Results from studies examining the effects of elevated CO2 levels on woody plant mass.} \usage{dat.curtis1998} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab observation number \cr \bold{paper} \tab \code{numeric} \tab paper number \cr \bold{genus} \tab \code{character} \tab genus name \cr \bold{species} \tab \code{character} \tab species name \cr \bold{fungrp} \tab \code{character} \tab plant functional group \cr \bold{co2.ambi} \tab \code{numeric} \tab ambient CO2 level (control group) \cr \bold{co2.elev} \tab \code{numeric} \tab elevated CO2 level (treatment group) \cr \bold{units} \tab \code{character} \tab units for CO2 exposure levels \cr \bold{time} \tab \code{numeric} \tab maximum length of time (days) of CO2 exposure \cr \bold{pot} \tab \code{character} \tab growing method (see \sQuote{Details}) \cr \bold{method} \tab \code{character} \tab CO2 exposure facility (see \sQuote{Details}) \cr \bold{stock} \tab \code{character} \tab planting stock code \cr \bold{xtrt} \tab \code{character} \tab interacting treatment code (see \sQuote{Details}) \cr \bold{level} \tab \code{character} \tab interacting treatment level codes (see \sQuote{Details}) \cr \bold{m1i} \tab \code{numeric} \tab mean plant mass under elevated CO2 level (treatment group) \cr \bold{sd1i} \tab \code{numeric} \tab standard deviation of plant mass underelevated CO2 level (treatment group) \cr \bold{n1i} \tab \code{numeric} \tab number of observations under elevated CO2 level (treatment group) \cr \bold{m2i} \tab \code{numeric} \tab mean plant mass under ambient CO2 level (control group) \cr \bold{sd2i} \tab \code{numeric} \tab standard deviation of plant mass under ambient CO2 level (control group) \cr \bold{n2i} \tab \code{numeric} \tab number of observations under ambient CO2 level (control group) } } \details{ The studies included in this dataset compared the total above- plus below-ground biomass (in grams) for plants that were either exposed to ambient (around 35 Pa) and elevated CO2 levels (around twice the ambient level). The \code{co2.ambi} and \code{co2.elev} variables indicate the CO2 levels in the control and treatment groups, respectively (with the \code{units} variable specifying the units for the CO2 exposure levels). Many of the studies also varied one or more additional environmental variables (defined by the \code{xtrt} and \code{level} variables): \itemize{ \item NONE = no additional treatment factor \item FERT = soil fertility (either a \code{CONTROL}, \code{HIGH}, or \code{LOW} level) \item LIGHT = light treatment (always a \code{LOW} light level) \item FERT+L = soil fertility and light (a \code{LOW} light and soil fertility level) \item H2O = well watered vs drought (either a \code{WW} or \code{DRT} level) \item TEMP = temperature treatment (either a \code{HIGH} or \code{LOW} level) \item OZONE = ozone exposure (either a \code{HIGH} or \code{LOW} level) \item UVB = ultraviolet-B radiation exposure (either a \code{HIGH} or \code{LOW} level) } In addition, the studies differed with respect to various design variables, including CO2 exposure duration (\code{time}), growing method (\code{pot}: number = pot size in liters; \code{GRND} = plants rooted in ground; \code{HYDRO} = solution or aeroponic culture), CO2 exposure facility (\code{method}: \code{GC} = growth chamber; \code{GH} = greenhouse; \code{OTC} = field-based open-top chamber), and planting stock (\code{stock}: \code{SEED} = plants started from seeds; \code{SAP} = plants started from cuttings). The goal of the meta-analysis was to examine the effects of elevated CO2 levels on plant physiology and growth and the interacting effects of the environmental (and design) variables. } \source{ Hedges, L. V., Gurevitch, J., & Curtis, P. S. (1999). The meta-analysis of response ratios in experimental ecology. \emph{Ecology}, \bold{80}(4), 1150--1156. \verb{https://doi.org/10.1890/0012-9658(1999)080[1150:TMAORR]2.0.CO;2} (data obtained from \emph{Ecological Archives}, E080-008-S1, at: \url{https://esapubs.org/archive/ecol/E080/008/}) } \references{ Curtis, P. S., & Wang, X. (1998). A meta-analysis of elevated CO2 effects on woody plant mass, form, and physiology. \emph{Oecologia}, \bold{113}(3), 299--313. \verb{https://doi.org/10.1007/s004420050381} } \examples{ ### copy data into 'dat' dat <- dat.curtis1998 ### calculate (log transformed) ratios of means and corresponding sampling variances dat <- escalc(measure="ROM", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat) dat ### meta-analysis using a random-effects model res <- rma(yi, vi, method="DL", data=dat) res ### average ratio of means with 95\% CI predict(res, transf=exp, digits=2) ### meta-analysis for plants grown under nutrient stress res <- rma(yi, vi, method="DL", data=dat, subset=(xtrt=="FERT" & level=="LOW")) predict(res, transf=exp, digits=2) ### meta-analysis for plants grown under low light conditions res <- rma(yi, vi, method="DL", data=dat, subset=(xtrt=="LIGHT" & level=="LOW")) predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/labbe.Rd0000644000176200001440000001660414055736403014034 0ustar liggesusers\name{labbe} \alias{labbe} \alias{labbe.rma} \title{L'Abbe Plots for 'rma' Objects} \description{ Function to create \enc{L'Abbé}{L'Abbe} plots for objects of class \code{"rma"}. \loadmathjax } \usage{ labbe(x, \dots) \method{labbe}{rma}(x, xlim, ylim, xlab, ylab, add=x$add, to=x$to, transf, targs, pch=21, psize, plim=c(0.5,3.5), col, bg, grid=FALSE, lty, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}. See \sQuote{Details}.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{add}{See \sQuote{Details} and the documentation of the \code{\link{escalc}} function for more details.} \item{to}{See \sQuote{Details} and the documentation of the \code{\link{escalc}} function for more details.} \item{transf}{optional argument to specify a function that should be used to transform the outcomes (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{pch}{plotting symbol to use for the outcomes. By default, a filled circle is used. Can also be a vector of values. See \code{\link{points}} for other options.} \item{psize}{optional numeric vector to specify the point sizes for the outcomes. If unspecified, the point sizes are a function of the precision of the outcomes. Can also be a vector of values.} \item{plim}{numeric vector of length 2 to scale the point sizes (ignored when \code{psize} is specified). See \sQuote{Details}.} \item{col}{optional character string to specify the name of a color to use for the points (\code{"black"} is used by default if not specified). Can also be a vector of color names.} \item{bg}{optional character string to specify the name of a background color for open plot symbols (\code{"gray"} is used by default if not specified). Can also be a vector of color names. Set to \code{NA} to make the plotting symbols transparent.} \item{grid}{logical to specify whether a grid should be added to the plot (can also be a color name).} \item{lty}{optional character vector to specify the line type for the diagonal reference line of no effect and the line that indicates the estimated effect based on the fitted model. If unspecified, the function sets this to \code{c("solid","dashed")} by default (use \code{"blank"} to suppress a line).} \item{\dots}{other arguments.} } \details{ The model specified via \code{x} must be a model without moderators (i.e., either a fixed- or a random-effects model) fitted with either the \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, or \code{\link{rma.glmm}} functions. Moreover, the model must have been fitted with \code{measure} set equal to \code{"RD"} (for risk differences), \code{"RR"} (for risk ratios), \code{"OR"} (for odds ratios), \code{"AS"} (for arcsine square root transformed risk differences), \code{"IRR"} (for incidence rate ratios), \code{"IRD"} (for incidence rate differences), or \code{"IRSD"} (for square root transformed incidence rate differences). The function calculates the arm-level outcomes for the two groups (e.g., treatment and control) and plots them against each other. In particular, the function plots the raw proportions of the two groups against each other when analyzing risk differences, the log of the proportions when analyzing (log) risk ratios, the log odds when analyzing (log) odds ratios, the arcsine square root transformed proportions when analyzing arcsine square root transformed risk differences, the raw incidence rates when analyzing incidence rate differences, the log of the incidence rates when analyzing (log) incidence rate ratios, and the square root transformed incidence rates when analyzing square root transformed incidence rate differences. The \code{transf} argument can be used to transform these values (e.g., \code{transf=exp} to transform the log of the proportions back to raw proportions; see also \link{transf}). As described under the documentation for the \code{\link{escalc}} function, zero cells can lead to problems when calculating particular outcomes. Adding a small constant to the cells of the \mjeqn{2 \times 2}{2x2} tables is a common solution to this problem. By default, the functions adopts the same method for handling zero cells as was used when fitting the model. By default (i.e., when \code{psize} is not specified), the size of the points is a function of the precision (i.e., inverse standard error) of the outcomes. This way, more precise estimates are visually more prominent in the plot. By making the point sizes a function of the inverse standard error of the estimates, their area is proportional to the inverse sampling variances, which corresponds to the weights they would receive in a fixed-effects model. However, the point sizes are rescaled so that the smallest point size is \code{plim[1]} and the largest point size is \code{plim[2]}. As a result, their relative sizes (i.e., areas) no longer exactly correspond to their relative weights in such a model. If exactly relative point sizes are desired, one can set \code{plim[2]} to \code{NA}, in which case the points are rescaled so that the smallest point size corresponds to \code{plim[1]} and all other points are scaled accordingly. As a result, the largest point may be very large. Alternatively, one can set \code{plim[1]} to \code{NA}, in which case the points are rescaled so that the largest point size corresponds to \code{plim[2]} and all other points are scaled accordingly. The solid line corresponds to identical outcomes in the two groups (i.e., the absence of a difference between the two groups). The dashed line indicates the estimated effect based on the fitted model. } \value{ A data frame with components: \item{x}{the x-axis coordinates of the points that were plotted.} \item{y}{the y-axis coordinates of the points that were plotted.} \item{cex}{the point sizes.} \item{pch}{the plotting symbols.} \item{col}{the point colors.} \item{bg}{the background colors.} \item{ids}{the study id numbers.} \item{slab}{the study labels.} Note that the data frame is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ \enc{L'Abbé}{L'Abbe}, K. A., Detsky, A. S., & O'Rourke, K. (1987). Meta-analysis in clinical research. \emph{Annals of Internal Medicine}, \bold{107}(2), 224--233. \verb{https://doi.org/10.7326/0003-4819-107-2-224} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}} } \examples{ ### meta-analysis of the log risk ratios using a random-effects model res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### default plot labbe(res) ### funnel plot with risk values on the x- and y-axis and add grid labbe(res, transf=exp, grid=TRUE) } \keyword{hplot} metafor/man/dat.crede2010.Rd0000644000176200001440000000727014055736403015122 0ustar liggesusers\name{dat.crede2010} \docType{data} \alias{dat.crede2010} \title{Studies on the Relationship between Class Attendance and Grades in College Students} \description{Results from 68 studies on the relationship between class attendence and class performance and/or grade point average in college students.} \usage{dat.crede2010} \format{The data frame contains the following columns: \tabular{lll}{ \bold{studyid} \tab \code{numeric} \tab study number \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{source} \tab \code{character} \tab study source (journal, dissertation, other) \cr \bold{sampleid} \tab \code{numeric} \tab sample within study number \cr \bold{criterion} \tab \code{character} \tab criterion variable (grade, gpa) \cr \bold{class} \tab \code{character} \tab class type (science, nonscience) \cr \bold{ni} \tab \code{numeric} \tab sample size \cr \bold{ri} \tab \code{numeric} \tab observed correlation } } \details{ The 68 studies included in this dataset provide information about the relationship between class attendance of college students and their performance (i.e., grade) in the class and/or their overall grade point average. Some studies included multiple samples and hence the dataset actually contains 97 correlation coefficients. The dataset was obtained via personal communication. Note that this dataset differs just slightly from the one used by Credé et al. (2010). } \source{ Personal communication. } \references{ Credé, M., Roch, S. G., & Kieszczynka, U. M. (2010). Class attendance in college: A meta-analytic review of the relationship of class attendance with grades and student characteristics. \emph{Review of Educational Research}, \bold{80}(2), 272--295. \verb{https://doi.org/10.3102/0034654310362998} } \examples{ ### copy data into 'dat' dat <- dat.crede2010 ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat) ############################################################################ ### meta-analysis for the relationship between attendance and grades res <- rma(yi, vi, data=dat, subset=criterion=="grade") res ### estimated average correlation with 95\% CI/PI predict(res, transf=transf.ztor, digits=2) ### examine if relationship between attendance and grades differs for nonscience/science classes res <- rma(yi, vi, mods = ~ class, data=dat, subset=criterion=="grade") res ### estimated average correlations for nonscience and science classes predict(res, newmods=c(0,1), transf=transf.ztor, digits=2) ### examine if relationship between attendance and grades has changed over time res <- rma(yi, vi, mods = ~ year, data=dat, subset=criterion=="grade") res ############################################################################ ### meta-analysis for the relationship between attendance and GPA res <- rma(yi, vi, data=dat, subset=criterion=="gpa") res ### estimated average correlation with 95\% CI/PI predict(res, transf=transf.ztor, digits=2) ### examine if relationship between attendance and GPA has changed over time res <- rma(yi, vi, mods = ~ year, data=dat, subset=criterion=="gpa") res ############################################################################ ### use a multilevel model to examine the relationship between attendance and grades res <- rma.mv(yi, vi, random = ~ 1 | studyid/sampleid, data=dat, subset=criterion=="grade") res predict(res, transf=transf.ztor, digits=2) ### use a multilevel model to examine the relationship between attendance and gpa res <- rma.mv(yi, vi, random = ~ 1 | studyid/sampleid, data=dat, subset=criterion=="gpa") res predict(res, transf=transf.ztor, digits=2) } \keyword{datasets} metafor/man/dat.landenberger2005.Rd0000644000176200001440000001225314055736403016471 0ustar liggesusers\name{dat.landenberger2005} \docType{data} \alias{dat.landenberger2005} \title{Studies on the Effectiveness of CBT for Reducing Recidivism} \description{Results from 58 studies on the effectiveness of cognitive-behavioral therapy (CBT) for reducing recidivism in juvenile and adult offenders.} \usage{dat.landenberger2005} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab (first) author and year \cr \bold{pubtype} \tab \code{character} \tab publication type (book chapter, journal article, report, or thesis) \cr \bold{country} \tab \code{character} \tab country where study was carried out (Canada, New Zealand, UK, or USA) \cr \bold{design} \tab \code{character} \tab study design (matched groups, nonequivalent groups, or randomized trial) \cr \bold{program} \tab \code{character} \tab purpose of setting up the CBT program (for demonstration, practice, or research purposes) \cr \bold{setting} \tab \code{character} \tab treatment setting (community or prison) \cr \bold{designprob} \tab \code{character} \tab indication of study design problems (no, favors the control group, or favors the treatment group) \cr \bold{n.ctrl.rec} \tab \code{numeric} \tab number of recidivists in the control group \cr \bold{n.ctrl.non} \tab \code{numeric} \tab number of non-recidivists in the control group \cr \bold{n.cbt.rec} \tab \code{numeric} \tab number of recidivists in the CBT group \cr \bold{n.cbt.non} \tab \code{numeric} \tab number of non-recidivists in the CBT group \cr \bold{interval} \tab \code{numeric} \tab recidivism interval (in months) \cr \bold{group} \tab \code{numeric} \tab study group (adults or juveniles) \cr \bold{age} \tab \code{numeric} \tab mean age of the study group \cr \bold{male} \tab \code{numeric} \tab percentage of males in the study group \cr \bold{minority} \tab \code{numeric} \tab percentage of minorities in the study group \cr \bold{length} \tab \code{numeric} \tab treatment length (in weeks) \cr \bold{sessions} \tab \code{numeric} \tab number of CBT sessions per week \cr \bold{hrs_week} \tab \code{numeric} \tab treatment hours per week \cr \bold{hrs_total} \tab \code{numeric} \tab total hours of treatment \cr \bold{cbt.cogskills} \tab \code{character} \tab CBT component: cognitive skills (yes, no) \cr \bold{cbt.cogrestruct} \tab \code{character} \tab CBT component: cognitive restructuring (yes, no) \cr \bold{cbt.intpprbsolv} \tab \code{character} \tab CBT component: interpersonal problem solving (yes, no) \cr \bold{cbt.socskills} \tab \code{character} \tab CBT component: social skills (yes, no) \cr \bold{cbt.angerctrl} \tab \code{character} \tab CBT component: anger control (yes, no) \cr \bold{cbt.victimimpact} \tab \code{character} \tab CBT component: victim impact (yes, no) \cr \bold{cbt.subabuse} \tab \code{character} \tab CBT component: substance abuse (yes, no) \cr \bold{cbt.behavmod} \tab \code{character} \tab CBT component: behavior modification (yes, no) \cr \bold{cbt.relapseprev} \tab \code{character} \tab CBT component: relapse prevention (yes, no) \cr \bold{cbt.moralrsng} \tab \code{character} \tab CBT component: moral reasoning (yes, no) \cr \bold{cbt.roletaking} \tab \code{character} \tab CBT component: role taking (yes, no) \cr \bold{cbt.other} \tab \code{character} \tab CBT component: other (yes, no) } } \details{ Landenberger and Lipsey (2005) conducted a meta-analysis of 58 experimental and quasi-experimental studies of the effects of cognitive-behavioral therapy (CBT) on the recidivism rates of adult and juvenile offenders (see also Lipsey et al., 2007). The present dataset includes the results of these studies and a range of potential moderator variables to identify factors associated with variation in treatment effects. } \source{ Personal communication. } \references{ Landenberger, N. A., & Lipsey, M. W. (2005). The positive effects of cognitive-behavioral programs for offenders: A meta-analysis of factors associated with effective treatment. \emph{Journal of Experimental Criminology}, \bold{1}, 451--476. \verb{https://doi.org/10.1007/s11292-005-3541-7} Lipsey, M. W., Landenberger, N. A., & Wilson, S. J. (2007). Effects of cognitive-behavioral programs for criminal offenders. \emph{Campbell Systematic Reviews}, \bold{3}(1), 1--27. \verb{https://doi.org/10.4073/csr.2007.6} } \examples{ ### copy data into 'dat' and examine data dat <- dat.landenberger2005 head(dat) ### calculate log odds ratios (for non-recidivism in CBT vs. control groups) and sampling variances dat <- escalc(measure="OR", ai=n.cbt.non, bi=n.cbt.rec, ci=n.ctrl.non, di=n.ctrl.rec, data=dat) ### fit random-effects model res <- rma(yi, vi, data=dat) res ### estimated average OR and corresponding 95% CI/PI predict(res, transf=exp, digits=2) ### examine if number of treatment sessions per week is a potential moderator res <- rma(yi, vi, mods = ~ sessions, data=dat) res ### predicted ORs for 1, 2, 5, or 10 sessions per week predict(res, newmods=c(1,2,5,10), transf=exp, digits=2) } \keyword{datasets} metafor/man/plot.gosh.rma.Rd0000644000176200001440000001207514055736403015460 0ustar liggesusers\name{plot.gosh.rma} \alias{plot.gosh.rma} \title{Plot Method for 'gosh.rma' Objects} \description{ Plot method for objects of class \code{"gosh.rma"}. } \usage{ \method{plot}{gosh.rma}(x, het="I2", pch=16, cex=0.5, out, col, alpha, border, xlim, ylim, xhist=TRUE, yhist=TRUE, hh=0.3, breaks, adjust, lwd, labels, \dots) } \arguments{ \item{x}{an object of class \code{"gosh.rma"}.} \item{het}{character string to specify the heterogeneity measure to plot. Either \code{"I2"}, \code{"H2"}, \code{"QE"}, or \code{"tau2"} (the last only for random/mixed-effects models).} \item{pch}{plotting symbol to use. By default, a borderless filled circle is used. See \code{\link{points}} for other options.} \item{cex}{symbol expansion factor.} \item{out}{optional integer to specify the number of a study that may be a potential outlier. If specified, subsets containing the specified study are drawn in a different color than those not containing the study.} \item{col}{optional character string to specify the name of a color to use for the points (if not provided, points are drawn in black). When \code{out} is used, two colors should be specified (if not provided, red is used for subsets containing the specified study and blue otherwise).} \item{alpha}{optional alpha transparency value for the points (0 means fully transparent and 1 means opaque). If unspecified, the function tries to set this to a sensible value.} \item{border}{optional character string to specify the name of a color to use for the borders of the histogram (if not provided, borders are drawn in white). Set to \code{FALSE} to omit the borders.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xhist}{logical to specify whether a histogram should be drawn for the x-axis (the default is \code{TRUE}).} \item{yhist}{logical to specify whether a histogram should be drawn for the y-axis (the default is \code{TRUE}).} \item{hh}{optional numeric value (or vector of two values) to adjust the height of the histogram(s). Must be between 0 and 1, but should not be too close to 0 or 1, as otherwise the plot cannot be drawn.} \item{breaks}{optional argument passed on to \code{\link{hist}} for choosing the (number of) breakpoints of the histogram(s).} \item{adjust}{optional argument passed on to \code{\link{density}} for adjusting the bandwidth of the kernel density estimate(s) (values larger than 1 result in more smoothing).} \item{lwd}{optional numeric value to specify the line width of the estimated densities. Set to \code{0} to omit the line(s).} \item{labels}{optional argument to specify the x-axis and y-axis labels (or passed on to \code{\link{pairs}} to specify the names of the variables in the scatter plot matrix).} \item{\dots}{other arguments.} } \details{ For models without moderators, the function draws a scatter plot of the model estimates on the x-axis against the chosen measure of heterogeneity on the y-axis. Histograms of the respective distributions (with kernel density estimates superimposed) are shown in the margins (when \code{xhist=TRUE} and \code{yhist=TRUE}). For models with moderators, the function draws a scatter plot matrix (with the \code{\link{pairs}} function) of the chosen measure of heterogeneity and each of the model coefficients. Histograms of the variables plotted are shown along the diagonal, with kernel density estimates of the distributions superimposed. Arguments \code{xlim}, \code{ylim}, \code{xhist}, and \code{yhist} are then ignored (argument \code{hh} can then be used to compress/stretch the height of the distributions shown along the diagonal). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Olkin, I., Dahabreh, I. J., & Trikalinos, T. A. (2012). GOSH - a graphical display of study heterogeneity. \emph{Research Synthesis Methods}, \bold{3}(3), 214--223. \verb{https://doi.org/10.1002/jrsm.1053} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W. (2021). Model checking in meta-analysis. In C. H. Schmid, T. Stijnen, & I. R. White (Eds.), \emph{Handbook of meta-analysis} (pp. 219--254). Boca Raton, FL: CRC Press. \verb{https://doi.org/10.1201/9781315119403} } \seealso{ \code{\link{gosh.rma}} } \examples{ ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.egger2001) ### meta-analysis of all trials including ISIS-4 using a FE model res <- rma(yi, vi, data=dat, method="FE") ### fit FE model to all possible subsets (65535 models) \dontrun{ sav <- gosh(res, progbar=FALSE) ### create GOSH plot ### red points for subsets that include and blue points ### for subsets that exclude study 16 (the ISIS-4 trial) plot(sav, out=16, breaks=100)} } \keyword{hplot} metafor/man/dat.nielweise2008.Rd0000644000176200001440000000565114055736403016034 0ustar liggesusers\name{dat.nielweise2008} \docType{data} \alias{dat.nielweise2008} \title{Studies on Anti-Infective-Treated Central Venous Catheters for Prevention of Catheter-Related Bloodstream Infections} \description{Results from 18 studies comparing the risk of catheter-related bloodstream infection when using anti-infective-treated versus standard catheters for total parenteral nutrition or chemotherapy.} \usage{dat.nielweise2008} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{authors} \tab \code{character} \tab study authors \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{x1i} \tab \code{numeric} \tab number of CRBSIs in patients receiving an anti-infective catheter \cr \bold{t1i} \tab \code{numeric} \tab total number of catheter days for patients receiving an anti-infective catheter \cr \bold{x2i} \tab \code{numeric} \tab number of CRBSIs in patients receiving a standard catheter \cr \bold{t2i} \tab \code{numeric} \tab total number of catheter days for patients receiving a standard catheter } } \details{ The use of a central venous catheter may lead to a catheter-related bloodstream infection (CRBSI), which in turn increases the risk of morbidity and mortality. Anti-infective-treated catheters have been developed that are meant to reduce the risk of CRBSIs. Niel-Weise et al. (2008) conducted a meta-analysis of studies comparing infection risk when using anti-infective-treated versus standard catheters for total parenteral nutrition or chemotherapy. The results from 9 such studies are included in this dataset. The dataset was used in the article by Stijnen et al. (2010) to illustrate various generalized linear mixed-effects models for the meta-analysis of incidence rates and incidence rate ratios (see \sQuote{References}). } \source{ Niel-Weise, B. S., Stijnen, T., & van den Broek, P. J. (2008). Anti-infective-treated central venous catheters for total parenteral nutrition or chemotherapy: A systematic review. \emph{Journal of Hospital Infection}, \bold{69}(2), 114--123. \verb{https://doi.org/10.1016/j.jhin.2008.02.020} } \references{ Stijnen, T., Hamza, T. H., & Ozdemir, P. (2010). Random effects meta-analysis of event outcome in the framework of the generalized linear mixed model with applications in sparse data. \emph{Statistics in Medicine}, \bold{29}(29), 3046--3067. \verb{https://doi.org/10.1002/sim.4040} } \examples{ ### copy data into 'dat' and examine data dat <- dat.nielweise2008 dat ### standard (inverse-variance) random-effects model res <- rma(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat) print(res, digits=3) predict(res, transf=exp, digits=2) ### random-effects conditional Poisson model \dontrun{ res <- rma.glmm(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat, model="CM.EL") print(res, digits=3) predict(res, transf=exp, digits=2)} } \keyword{datasets} metafor/man/replmiss.Rd0000644000176200001440000000133214055736403014615 0ustar liggesusers\name{replmiss} \alias{replmiss} \title{Replace Missing Values in a Vector} \description{ Function to replace missing (\code{NA}) values in a vector. } \usage{ replmiss(x, y) } \arguments{ \item{x}{vector that may include one or more missing values.} \item{y}{either a scalar or a vector of the same length as \code{x} with the value(s) to replace missing values with.} } \value{ Vector \code{x} with the missing values replaced based on the scalar or vector \code{y}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \examples{ x <- c(4,2,7,NA,1,NA,5) x <- replmiss(x,0) x x <- c(4,2,7,NA,1,NA,5) y <- c(2,3,6,5,8,1,2) x <- replmiss(x,y) x } \keyword{manip} metafor/man/print.permutest.rma.uni.Rd0000644000176200001440000000377514055736403017527 0ustar liggesusers\name{print.permutest.rma.uni} \alias{print.permutest.rma.uni} \title{Print Method for 'permutest.rma.uni' Objects} \description{ Print method for objects of class \code{"permutest.rma.uni"}. } \usage{ \method{print}{permutest.rma.uni}(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) } \arguments{ \item{x}{an object of class \code{"permutest.rma.uni"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{signif.stars}{logical to specify whether p-values should be encoded visually with \sQuote{significance stars}. Defaults to the \code{show.signif.stars} slot of \code{\link{options}}.} \item{signif.legend}{logical to specify whether the legend for the \sQuote{significance stars} should be printed. Defaults to the value for \code{signif.stars}.} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item the results of the omnibus test of moderators. Suppressed if the model includes only one coefficient (e.g., only an intercept, like in the fixed- and random-effects model). The p-value based on the permutation test is indicated by \code{p-val*}. \item a table with the estimated coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds. The p-values based on the permutation tests are indicated by \code{pval*}. When permutation-based CIs have been obtained, then the CI bounds are indicated with \code{ci.lb*} and \code{ci.ub*}. } } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{permutest.rma.uni}} } \keyword{print} metafor/man/dat.graves2010.Rd0000644000176200001440000000355614055736403015332 0ustar liggesusers\name{dat.graves2010} \docType{data} \alias{dat.graves2010} \title{Studies on the Effectiveness of Injected Cholera Vaccines} \description{Results from 17 studies on the effectiveness of injected vaccines against cholera.} \usage{ dat.graves2010 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab author/study name and publication year \cr \bold{ai} \tab \code{numeric} \tab number of cholera cases in the vaccinated group \cr \bold{n1i} \tab \code{numeric} \tab number of individuals in the vaccinated group \cr \bold{ci} \tab \code{numeric} \tab number of cholera cases in the placebo group \cr \bold{n2i} \tab \code{numeric} \tab number of individuals in the placebo group } } \details{ Cholera is an infection caused by certain strains of the bacterium \emph{Vibrio cholerae}. When untreated, mortality rates can be as high as 50-60\%. Proper sanitation practices are usually effective in preventing outbreaks, but a number of oral and injectable vaccines have also been developed. The Cochrane review by Graves et al. (2010) examined the effectiveness of injectable vaccines for preventing cholera cases and death. The present dataset includes results from 17 studies that reported the number of cholera cases in vaccinated and placebo/comparison groups up to 7 months after the treatment. } \source{ Graves, P. M., Deeks, J. J., Demicheli, V., & Jefferson, T. (2010). Vaccines for preventing cholera: Killed whole cell or other subunit vaccines (injected). \emph{Cochrane Database of Systematic Reviews}, \bold{8}, CD000974. \verb{https://doi.org/10.1002/14651858.CD000974.pub2} } \examples{ ### copy data into 'dat' and examine data dat <- dat.graves2010 dat ### analysis using the Mantel-Haenszel method rma.mh(measure="RR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, digits=2) } \keyword{datasets} metafor/man/dat.viechtbauer2021.Rd0000644000176200001440000002144214055736403016340 0ustar liggesusers\name{dat.viechtbauer2021} \docType{data} \alias{dat.viechtbauer2021} \title{Studies to Illustrate Model Checking Methods} \description{Results from 20 hypothetical randomized clinical trials examining the effectiveness of a medication for treating some disease.} \usage{ dat.viechtbauer2021 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{numeric} \tab trial number \cr \bold{nTi} \tab \code{numeric} \tab number of patients in the treatment group \cr \bold{nCi} \tab \code{numeric} \tab number of patients in the control group \cr \bold{xTi} \tab \code{numeric} \tab number of patients in the treatment group with remission \cr \bold{xCi} \tab \code{numeric} \tab number of patients in the control group with remission \cr \bold{dose} \tab \code{numeric} \tab dosage of the medication provided to patients in the treatment group (in milligrams per day) } } \details{ The dataset was constructed for the purposes of illustrating the model checking and diagnostic methods described in Viechtbauer (2021). The code below provides the results for many of the analyses and plots discussed in the book chapter. } \source{ Viechtbauer, W. (2021). Model checking in meta-analysis. In C. H. Schmid, T. Stijnen, & I. R. White (Eds.), \emph{Handbook of meta-analysis} (pp. 219-254). Boca Raton, FL: CRC Press. \verb{https://doi.org/10.1201/9781315119403} } \examples{ ### copy data into 'dat' dat <- dat.viechtbauer2021 ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=xTi, n1i=nTi, ci=xCi, n2i=nCi, add=1/2, to="all", data=dat) dat ### number of studies k <- nrow(dat) ### fit models res.CE <- rma(yi, vi, data=dat, method="FE") res.CE res.RE <- rma(yi, vi, data=dat, method="DL") res.RE res.MR <- rma(yi, vi, mods = ~ dose, data=dat, method="FE") res.MR res.ME <- rma(yi, vi, mods = ~ dose, data=dat, method="DL") res.ME ### forest and bubble plot par(mar=c(5,4,1,2)) forest(dat$yi, dat$vi, psize=0.8, efac=0, xlim=c(-4,6), ylim=c(-3,23), cex=1, width=c(5,5,5), xlab="Log Odds Ratio (LnOR)") addpoly(res.CE, row=-1.5, cex=1, width=c(5,5,5), mlab="CE Model") addpoly(res.RE, row=-2.5, cex=1, width=c(5,5,5), mlab="RE Model") text(-4, 22, "Trial", pos=4, font=2) text( 6, 22, "LnOR [95\% CI]", pos=2, font=2) abline(h=0) tmp <- regplot(res.ME, xlim=c(0,250), ylim=c(-1,1.5), predlim=c(0,250), shade=FALSE, digits=1, xlab="Dosage (mg per day)", psize="seinv", plim=c(NA,5), bty="l", las=1, lty=c("solid", "dashed"), label=TRUE, labsize=0.8, offset=c(1,0.7)) res.sub <- rma(yi, vi, mods = ~ dose, data=dat, method="DL", subset=-6) abline(res.sub, lty="dotted") points(tmp$xi, tmp$yi, pch=21, cex=tmp$psize, col="black", bg="darkgray") par(mar=c(5,4,4,2)) ### number of standardized deleted residuals larger than +-1.96 in each model sum(abs(rstudent(res.CE)$z) >= qnorm(.975)) sum(abs(rstudent(res.MR)$z) >= qnorm(.975)) sum(abs(rstudent(res.RE)$z) >= qnorm(.975)) sum(abs(rstudent(res.ME)$z) >= qnorm(.975)) ### plot of the standardized deleted residuals for the RE and ME models plot(NA, NA, xlim=c(1,20), ylim=c(-4,4), xlab="Study", ylab="Standardized (Deleted) Residual", xaxt="n", main="Random-Effects Model", las=1) axis(side=1, at=1:20) abline(h=c(-1.96,1.96), lty="dotted") abline(h=0) points(1:20, rstandard(res.RE)$z, type="o", pch=19, col="gray70") points(1:20, rstudent(res.RE)$z, type="o", pch=19) legend("top", pch=19, col=c("gray70","black"), lty="solid", legend=c("Standardized Residuals","Standardized Deleted Residuals"), bty="n") plot(NA, NA, xlim=c(1,20), ylim=c(-4,4), xlab="Study", ylab="Standardized (Deleted) Residual", xaxt="n", main="Mixed-Effects Model", las=1) axis(side=1, at=1:20) abline(h=c(-1.96,1.96), lty="dotted") abline(h=0) points(1:20, rstandard(res.ME)$z, type="o", pch=19, col="gray70") points(1:20, rstudent(res.ME)$z, type="o", pch=19) legend("top", pch=19, col=c("gray70","black"), lty="solid", legend=c("Standardized Residuals","Standardized Deleted Residuals"), bty="n") ### Baujat plots baujat(res.CE, main="Common-Effects Model", xlab="Squared Pearson Residual", ylim=c(0,5), las=1) baujat(res.ME, main="Mixed-Effects Model", ylim=c(0,2), las=1) ### GOSH plots (skipped because this takes quite some time to run) if (FALSE) { res.GOSH.CE <- gosh(res.CE, subsets=10^7) plot(res.GOSH.CE, cex=0.2, out=6, xlim=c(-0.25,1.25), breaks=c(200,100)) res.GOSH.ME <- gosh(res.ME, subsets=10^7) plot(res.GOSH.ME, het="tau2", out=6, breaks=50, adjust=0.6, las=1) } ### plot of treatment dosage against the standardized residuals plot(dat$dose, rstandard(res.ME)$z, pch=19, xlab="Dosage (mg per day)", ylab="Standardized Residual", xlim=c(0,250), ylim=c(-2.5,2.5), las=1) abline(h=c(-1.96,1.96), lty="dotted", lwd=2) abline(h=0) title("Standardized Residual Plot") text(dat$dose[6], rstandard(res.ME)$z[6], "6", pos=4, offset=0.4) ### quadratic polynomial model rma(yi, vi, mods = ~ dose + I(dose^2), data=dat, method="DL") ### lack-of-fit model resLOF <- rma(yi, vi, mods = ~ dose + factor(dose), data=dat, method="DL", btt=3:9) resLOF ### scatter plot to illustrate the lack-of-fit model regplot(res.ME, xlim=c(0,250), ylim=c(-1.0,1.5), xlab="Dosage (mg per day)", ci=FALSE, predlim=c(0,250), psize=1, pch=19, col="gray60", digits=1, lwd=1, bty="l", las=1) dosages <- sort(unique(dat$dose)) lines(dosages, fitted(resLOF)[match(dosages, dat$dose)], type="o", pch=19, cex=2, lwd=2) points(dat$dose, dat$yi, pch=19, col="gray60") legend("bottomright", legend=c("Linear Model", "Lack-of-Fit Model"), pch=c(NA,19), col="black", lty="solid", lwd=c(1,2), pt.cex=c(1,2), seg.len=4, bty="n") ### checking normality of the standardized deleted residuals qqnorm(res.ME, type="rstudent", main="Standardized Deleted Residuals", pch=19, label="out", lwd=2, pos=24, ylim=c(-4,3), lty=c("solid", "dotted"), las=1) ### checking normality of the random effects sav <- qqnorm(ranef(res.ME)$pred, main="BLUPs of the Random Effects", cex=1, pch=19, xlim=c(-2.2,2.2), ylim=c(-0.6,0.6), las=1) abline(a=0, b=sd(ranef(res.ME)$pred), lwd=2) text(sav$x[6], sav$y[6], "6", pos=4, offset=0.4) ### hat values for the CE and RE models plot(NA, NA, xlim=c(1,20), ylim=c(0,0.21), xaxt="n", las=1, xlab="Study", ylab="Hat Value") axis(1, 1:20, cex.axis=1) points(hatvalues(res.CE), type="o", pch=19, col="gray70") points(hatvalues(res.RE), type="o", pch=19) abline(h=1/20, lty="dotted", lwd=2) title("Hat Values for the CE/RE Models") legend("topright", pch=19, col=c("gray70","black"), lty="solid", legend=c("Common-Effects Model", "Random-Effects Model"), bty="n") ### heatmap of the hat matrix for the ME model cols <- colorRampPalette(c("blue", "white", "red"))(101) h <- hatvalues(res.ME, type="matrix") image(1:nrow(h), 1:ncol(h), t(h[nrow(h):1,]), axes=FALSE, xlab="Influence of the Observed Effect of Study ...", ylab="On the Fitted Value of Study ...", col=cols, zlim=c(-max(abs(h)),max(abs(h)))) axis(1, 1:20, tick=FALSE) axis(2, 1:20, labels=20:1, las=1, tick=FALSE) abline(h=seq(0.5,20.5,by=1), col="white") abline(v=seq(0.5,20.5,by=1), col="white") points(1:20, 20:1, pch=19, cex=0.4) title("Heatmap for the Mixed-Effects Model") ### plot of leverages versus standardized residuals for the ME model plot(hatvalues(res.ME), rstudent(res.ME)$z, pch=19, cex=0.2+3*sqrt(cooks.distance(res.ME)), las=1, xlab="Leverage (Hat Value)", ylab="Standardized Deleted Residual", xlim=c(0,0.35), ylim=c(-3.5,2.5)) abline(h=c(-1.96,1.96), lty="dotted", lwd=2) abline(h=0, lwd=2) ids <- c(3,6,9) text(hatvalues(res.ME)[ids] + c(0,0.013,0.010), rstudent(res.ME)$z[ids] - c(0.18,0,0), ids) title("Leverage vs. Standardized Deleted Residuals") ### plot of the Cook's distances for the ME model plot(1:20, cooks.distance(res.ME), ylim=c(0,1.6), type="o", pch=19, las=1, xaxt="n", yaxt="n", xlab="Study", ylab="Cook's Distance") axis(1, 1:20, cex.axis=1) axis(2, seq(0,1.6,by=0.4), las=1) title("Cook's Distances") ### plot of the leave-one-out estimates of tau^2 for the ME model x <- influence(res.ME) plot(1:20, x$inf$tau2.del, ylim=c(0,0.15), type="o", pch=19, las=1, xaxt="n", xlab="Study", ylab=expression(paste("Estimate of ", tau^2, " without the ", italic(i), "th study"))) abline(h=res.ME$tau2, lty="dashed") axis(1, 1:20) title("Residual Heterogeneity Estimates") ### plot of the covariance ratios for the ME model plot(1:20, x$inf$cov.r, ylim=c(0,2.0), type="o", pch=19, las=1, xaxt="n", xlab="Study", ylab="Covariance Ratio") abline(h=1, lty="dashed") axis(1, 1:20) title("Covariance Ratios") ### fit mixed-effects model without studies 3 and/or 6 rma(yi, vi, mods = ~ dose, data=dat, method="DL", subset=-3) rma(yi, vi, mods = ~ dose, data=dat, method="DL", subset=-6) rma(yi, vi, mods = ~ dose, data=dat, method="DL", subset=-c(3,6)) } \keyword{datasets} metafor/man/dat.knapp2017.Rd0000644000176200001440000000760214055736403015157 0ustar liggesusers\name{dat.knapp2017} \docType{data} \alias{dat.knapp2017} \title{Studies on Differences in Planning Performance in Schizophrenia Patients versus Healthy Controls} \description{Results from 31 studies examining differences in planning performance in schizophrenia patients versus healthy controls.} \usage{dat.knapp2017} \format{The data frame contains the following columns: \tabular{lll}{ \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{study} \tab \code{numeric} \tab study id number \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference for planning performance \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance \cr \bold{difficulty} \tab \code{numeric} \tab task difficulty \cr \bold{n_sz} \tab \code{numeric} \tab number of schizophrenic patients \cr \bold{n_hc} \tab \code{numeric} \tab number of healthy controls \cr \bold{comp} \tab \code{numeric} \tab id for comparisons within studies \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference for IQ \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance } } \details{ The studies included in this dataset examined differences between schizophrenia patients and healthy controls with respect to their performance on the tower of London test (\url{https://en.wikipedia.org/wiki/Tower_of_London_test}) or a similar cognitive tasks measuring planning ability. The outcome measure for this meta-analysis was the standardized mean difference (with positive values indicating better performance in the healthy controls compared to the schizophrenia patients). The dataset has a more complex structure for two reasons: \enumerate{ \item Studies 2, 3, 9, and 20 included more than schizophrenia patient group and the standardized mean differences were computed by comparing these groups against a single healthy control group. \item Studies 6, 12, 14, 15, 18, 19, 22, and 26 had the patients and controls complete different tasks of varying complexity (essentially the average number of moves required to complete a task). } Both of these issues lead to correlated sampling errors, which should be taken into consideration in the analysis. } \source{ Knapp, F., Viechtbauer, W., Leonhart, R., Nitschke, K., & Kaller, C. P. (2017). Planning performance in schizophrenia patients: A meta-analysis of the influence of task difficulty and clinical and sociodemographic variables. \emph{Psychological Medicine}, \bold{47}(11), 2002--2016. \verb{https://doi.org/10.1017/S0033291717000459} } \examples{ ### copy data into 'dat' and examine data dat <- dat.knapp2017 dat ### fit a standard random-effects model ignoring correlated sampling errors res <- rma(yi, vi, data=dat) res ### fit a multilevel model with random effects for studies and comparisons within studies res <- rma.mv(yi, vi, random = ~ 1 | study/comp, data=dat) res ### construct an approximate V matrix assuming a correlation of 0.4 for the sampling errors ### of different comparisons within the same study V <- lapply(split(dat$vi, dat$study), function(v) { S <- diag(sqrt(v), nrow=length(v), ncol=length(v)) R <- matrix(0.4, nrow=length(v), ncol=length(v)) diag(R) <- 1 S \%*\% R \%*\% S }) V <- bldiag(V, order=dat$study) ### fit the same multilevel model, but now use this V matrix in the model res <- rma.mv(yi, V, random = ~ 1 | study/comp, data=dat) res ### use cluster-robust inference methods based on this model robust(res, cluster=dat$study) ### examine if task difficulty is a potential moderator of the effect res <- rma.mv(yi, V, mods = ~ difficulty, random = ~ 1 | study/comp, data=dat) res sav <- robust(res, cluster=dat$study) sav ### draw bubble plot regplot(sav, xlab="Task Difficulty", ylab="Standardized Mean Difference", las=1, digits=1, bty="l") } \keyword{datasets} metafor/man/rcalc.Rd0000644000176200001440000001603414055736403014050 0ustar liggesusers\name{rcalc} \alias{rcalc} \title{Calculate the Variance-Covariance of Correlation Coefficients} \description{ The function can be used to calculate the variance-covariance matrix of correlation coefficients computed based on the same sample of subjects. \loadmathjax } \usage{ rcalc(x, ni, data, rtoz=FALSE, nfun="min", sparse=FALSE, \dots) } \arguments{ \item{x}{a formula of the form \code{ri ~ var1 + var2 | study}. See \sQuote{Details}.} \item{ni}{vector to specify the sample sizes based on which the correlations were computed.} \item{data}{data frame containing the variables specified via the formula (and the sample sizes).} \item{rtoz}{logical to specify whether to transform the correlations via Fisher's r-to-z transformation (the default is \code{FALSE}).} \item{nfun}{a character string to specify how the \sQuote{common} sample size within each study should be computed. Possible options are \code{"min"} (for the minimum), \code{"harmonic"} (for the harmonic mean), or \code{"mean"} (for the arithmetic mean). Can also be a function. See \sQuote{Details}.} \item{sparse}{logical to specify whether the variance-covariance matrix should be returned as a sparse matrix (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ A meta-analysis of correlation coefficients may involve multiple correlation coefficients extracted from the same study. When these correlations are computed based on the same sample of subjects, then they are typically not independent. The \code{rcalc} function can be used to create a dataset with the correlation coefficients (possibly transformed with Fisher's r-to-z transformation) and the corresponding variance-covariance matrix. The dataset and variance-covariance matrix can then be further meta-analyzed using the \code{\link{rma.mv}} function. When computing the covariance between two correlation coefficients, we can distinguish two cases. In the first case, one of the variables involved in the two correlation coefficients is the same. For example, in \mjseqn{r_{12}} and \mjseqn{r_{13}}, variable 1 is common to both correlation coefficients. This is sometimes called the (partially) \sQuote{overlapping} case. The covariance between the two correlation coefficients, \mjeqn{\mbox{Cov}[r_{12}, r_{13}]}{Cov[r_{12}, r_{13}]}, then depends on the degree of correlation between variables 2 and 3 (i.e., \mjseqn{r_{23}}). In the second case, none of the variables are common to both correlation coefficients. For example, this would be the case if we have correlations \mjseqn{r_{12}} and \mjseqn{r_{34}} based on 4 variables. This sometimes called the \sQuote{non-overlapping} case. The covariance between the two correlation coefficients, \mjeqn{\mbox{Cov}[r_{12}, r_{34}]}{Cov[r_{12}, r_{34}]}, then depends on \mjseqn{r_{13}}, \mjseqn{r_{14}}, \mjseqn{r_{23}}, and \mjseqn{r_{24}}. Equations for these covariances can be found, for example, in Steiger (1980) and Olkin and Finn (1990). To use the \code{rcalc} function, one needs to construct a data frame that contains a study identifier (say \code{study}), two variable identifiers (say \code{var1} and \code{var2}), the corresponding correlation coefficient (say \code{ri}), and the sample size based on which the correlation coefficient was computed (say \code{ni}). Then the first argument should be a formula of the form \code{ri ~ var1 + var2 | study}, argument \code{ni} is set equal to the variable name containing the sample sizes, and the data frame containing these variables is specified via the \code{data} argument. When using the function for a single study, one can leave out the study identifier from the formula. When argument \code{rtoz} is set to \code{TRUE}, then the correlations are transformed with Fisher's r-to-z transformation and the variance-covariance matrix is computed for the transformed values. In some cases, the sample size may not be identical within a study (e.g., \mjseqn{r_{12}} may have been computed based on 120 subjects while \mjseqn{r_{13}} was computed based on 118 subjects due to 2 missing values in variable 3). For constructing the variance-covariance matrix, we need to assume a \sQuote{common} sample size for all correlation coefficients within the study. Argument \code{nfun} provides some options for how the common sample size should be computed. Possible options are \code{"min"} (for using the minimum sample size within a study as the common sample size), \code{"harmonic"} (for using the harmonic mean), or \code{"mean"} (for using the arithmetic mean). The default is \code{"min"}, which is a conservative choice (i.e., it will overestimate the sampling variances of coefficients that were computed based on a sample size that was actually larger than the minimum sample size). One can also specify a function via the \code{nfun} argument (which should take a numeric vector as input and return a single value). } \value{ A list containing the following components: \item{dat}{a data frame with the study identifier, the two variable identifiers, a variable pair identifier, the correlation coefficients (possibly transformed with Fisher's r-to-z transformation), and the (common) sample sizes.} \item{V}{corresponding variance-covariance matrix (given as a sparse matrix when \code{sparse=TRUE}).} Note that a particular covariance can only be computed when all of the correlation coefficients involved in the covariance equation are included in the dataset. If one or more coefficients needed for the computation are missing, then the resulting covariance will also be missing (i.e., \code{NA}). } \note{ For raw correlation coefficients, the variance-covariance matrix is computed with \mjseqn{n-1} in the denominator (instead of \mjseqn{n} as suggested in Steiger, 1980, and Olkin & Finn, 1990). This is more consistent with the usual equation for computing the sampling variance of a correlation coefficient (with \mjseqn{n-1} in the denominator). For raw and r-to-z transformed coefficients, the variance-covariance matrix will only be computed when the (common) sample size for a study is at least 5. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Olkin, I., & Finn, J. D. (1990). Testing correlated correlations. \emph{Psychological Bulletin}, \bold{108}(2), 330--333. \verb{https://doi.org/10.1037/0033-2909.108.2.330} Steiger, J. H. (1980). Tests for comparing elements of a correlation matrix. \emph{Psychological Bulletin}, \bold{87}(2), 245--251. \verb{https://doi.org/10.1037/0033-2909.87.2.245} } \seealso{ \code{\link{rma.mv}}, \code{\link{dat.craft2003}} } \examples{ ### copy data into 'dat' dat <- dat.craft2003 ### construct dataset and var-cov matrix of the correlations tmp <- rcalc(ri ~ var1 + var2 | study, ni=ni, data=dat) V <- tmp$V dat <- tmp$dat ### examine data for study 1 dat[dat$study == 1,] V[dat$study == 1, dat$study == 1] ### examine data for study 6 dat[dat$study == 6,] V[dat$study == 6, dat$study == 6] ### examine data for study 17 dat[dat$study == 17,] V[dat$study == 17, dat$study == 17] } \keyword{datagen} metafor/man/dat.linde2005.Rd0000644000176200001440000001042514055736403015133 0ustar liggesusers\name{dat.linde2005} \docType{data} \alias{dat.linde2005} \title{Studies on the Effectiveness of St. John's Wort for Treating Depression} \description{Results from 26 studies on the effectiveness of Hypericum perforatum extracts (St. John's wort) for treating depression.} \usage{dat.linde2005} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study number \cr \bold{study} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{country} \tab \code{character} \tab study location \cr \bold{ni} \tab \code{numeric} \tab total sample size \cr \bold{major} \tab \code{numeric} \tab sample restricted to patients who met criteria for major depression \cr \bold{baseline} \tab \code{numeric} \tab HRSD baseline score \cr \bold{version} \tab \code{numeric} \tab HRSD version (17 or 21 items) \cr \bold{duration} \tab \code{numeric} \tab study duration (in weeks) \cr \bold{prep} \tab \code{character} \tab Hypericum extract preparation \cr \bold{dosage} \tab \code{numeric} \tab dosage (in mg) \cr \bold{response} \tab \code{numeric} \tab definition of response (see \sQuote{Details}) \cr \bold{ai} \tab \code{numeric} \tab number of responses in treatment group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in treatment group \cr \bold{ci} \tab \code{numeric} \tab number of responses in placebo group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in placebo group \cr \bold{group} \tab \code{numeric} \tab stratification variable used by the authors (see \sQuote{Details}) } } \details{ The dataset includes the results from 26 double-blind placebo-controlled trials on the effectiveness of Hypericum perforatum extracts (St. John's wort) for treating depression (note that 2 studies did not provide sufficient response information). Data were extracted from Table 1 and Figure 3 from Linde et al. (2005). For study duration, the assessment week (instead of the total study duration) was coded for Philipp et al. (1999) and Montgomery et al. (2000). For dosage, the midpoint was coded when a range of values was given. The definition of what constitutes a \code{response} differed across studies and is coded as follows: \enumerate{ \item HRSD score reduction of at least 50\% or HRSD score after therapy <10, \item HRSD reduction of at least 50\%, \item based on HRSD scale but exact definition not reported, \item global patient assessment of efficacy, \item at least \sQuote{much improved} on the Clinical Global Impression sub-scale for global improvement. } The \code{group} variable corresponds to the variable used by Linde et al. (2005) to stratify their analyses and is coded as follows: \enumerate{ \item smaller trials restricted to major depression, \item larger trials restricted to major depression, \item smaller trials not restricted to major depression, \item larger trials not restricted to major depression. } } \source{ Linde, K., Berner, M., Egger, M., & Mulrow, C. (2005). St John's wort for depression: Meta-analysis of randomised controlled trials. \emph{British Journal of Psychiatry}, \bold{186}(2), 99--107. \verb{https://doi.org/10.1192/bjp.186.2.99} } \references{ Viechtbauer, W. (2007). Accounting for heterogeneity via random-effects models and moderator analyses in meta-analysis. \emph{Zeitschrift \enc{für}{fuer} Psychologie / Journal of Psychology}, \bold{215}(2), 104--121. \verb{https://doi.org/10.1027/0044-3409.215.2.104} } \examples{ ### copy data into 'dat' dat <- dat.linde2005 ### remove studies with no response information and study with no responses in either group dat <- dat[-c(5,6,26),] ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat) dat ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat, method="DL") res ### mixed-effects meta-regression model with stratification variable res <- rma(yi, vi, mods = ~ factor(group) - 1, data=dat, method="DL") res ### predicted average risk ratio for each level of the stratification variable predict(res, newmods=diag(4), transf=exp, digits=2) } \keyword{datasets} metafor/man/gosh.Rd0000644000176200001440000001415014055736403013721 0ustar liggesusers\name{gosh} \alias{gosh} \alias{gosh.rma} \title{GOSH Plots for 'rma' Objects} \description{ Function to create GOSH plots for objects of class \code{"rma"}. \loadmathjax } \usage{ gosh(x, \dots) \method{gosh}{rma}(x, subsets, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{subsets}{optional integer to specify the number of subsets.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{TRUE}).} \item{parallel}{character string to specify whether parallel processing should be used (the default is \code{"no"}). For parallel processing, set to either \code{"snow"} or \code{"multicore"}. See \sQuote{Details}.} \item{ncpus}{integer to specify the number of processes to use in the parallel processing.} \item{cl}{optional cluster to use if \code{parallel="snow"}. If not supplied, a cluster on the local machine is created for the duration of the call.} \item{\dots}{other arguments.} } \details{ The model specified via \code{x} must be a model fitted with either the \code{\link{rma.uni}}, \code{\link{rma.mh}}, or \code{\link{rma.peto}} function. Olkin et al. (2012) proposed the GOSH (graphical display of study heterogeneity) plot, which is based on examining the results of a fixed-effects model in all possible subsets of size \mjseqn{1, \ldots, k} of the \mjseqn{k} studies included in a meta-analysis. In a homogeneous set of studies, the model estimates obtained this way should form a roughly symmetric, contiguous, and unimodal distribution. On the other hand, when the distribution is multimodal, then this suggests the presence of heterogeneity, possibly due to outliers and/or distinct subgroupings of studies. Plotting the estimates against some measure of heterogeneity (e.g., \mjseqn{I^2}, \mjseqn{H^2}, or the \mjseqn{Q}-statistic) can also help to reveal subclusters, which are indicative of heterogeneity. The same type of plot can be produced by first fitting a fixed-effects model with either the \code{\link{rma.uni}} (using \code{method="FE"}), \code{\link{rma.mh}}, or \code{\link{rma.peto}} functions and then passing the fitted model object to the \code{gosh} function and then plotting the results. For models fitted with the \code{\link{rma.uni}} function (which may involve moderators and/or may be random/mixed-effects models), the idea underlying this type of plot can be generalized (Viechtbauer, 2021) by examining the distribution of all model coefficients, plotting them against each other, and against some measure of (residual) heterogeneity (including the estimate of \mjseqn{\tau^2}). Note that for models without moderators, application of the method requires fitting a total of \mjseqn{2^k - 1} models, which could be an excessively large number when \mjseqn{k} is large. For example, for \mjseqn{k=10}, there are only 1023 possible subsets, but for \mjseqn{k=20}, this number already grows to 1048575. For even larger \mjseqn{k}, it may become computationally infeasible to consider all possible subsets. Instead, we can then examine (a sufficiently large number of) random subsets. By default, if the number of possible subsets is \mjseqn{\le 10^6}, the function will consider all possible subsets and otherwise \mjseqn{10^6} random subsets. One can use the \code{subsets} argument to specify a different number of subsets to consider. If \code{subsets} is specified and it is actually larger than the number of possible subsets, then the function automatically only considers the possible subsets and does not use random subsets. On machines with multiple cores, one can try to speed things up by delegating the model fitting to separate worker processes, that is, by setting \code{parallel="snow"} or \code{parallel="multicore"} and \code{ncpus} to some value larger than 1. Parallel processing makes use of the \code{\link[parallel]{parallel}} package, using the \code{\link[parallel]{makePSOCKcluster}} and \code{\link[parallel]{parLapply}} functions when \code{parallel="snow"} or using \code{\link[parallel]{mclapply}} when \code{parallel="multicore"} (the latter only works on Unix/Linux-alikes). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. } \value{ An object of class \code{"gosh.rma"}. The object is a list containing the following components: \item{res}{a data frame with the results for each subset (various heterogeneity statistics and the model coefficient(s)).} \item{incl}{a matrix indicating which studies were included in which subset.} \item{\dots}{some additional elements/values.} The results can be printed with the \code{\link{print.gosh.rma}} function and plotted with the \code{\link{plot.gosh.rma}} function. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Olkin, I., Dahabreh, I. J., & Trikalinos, T. A. (2012). GOSH - a graphical display of study heterogeneity. \emph{Research Synthesis Methods}, \bold{3}(3), 214--223. \verb{https://doi.org/10.1002/jrsm.1053} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W. (2021). Model checking in meta-analysis. In C. H. Schmid, T. Stijnen, & I. R. White (Eds.), \emph{Handbook of meta-analysis} (pp. 219--254). Boca Raton, FL: CRC Press. \verb{https://doi.org/10.1201/9781315119403} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{influence.rma.uni}}, } \examples{ ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.egger2001) ### meta-analysis of all trials including ISIS-4 using a FE model res <- rma(yi, vi, data=dat, method="FE") ### fit FE model to all possible subsets (65535 models) \dontrun{ sav <- gosh(res, progbar=FALSE) sav ### create GOSH plot ### red points for subsets that include and blue points ### for subsets that exclude study 16 (the ISIS-4 trial) plot(sav, out=16, breaks=100)} } \keyword{methods} metafor/man/dat.bangertdrowns2004.Rd0000644000176200001440000000606614055736403016724 0ustar liggesusers\name{dat.bangertdrowns2004} \docType{data} \alias{dat.bangertdrowns2004} \title{Studies on the Effectiveness of Writing-to-Learn Interventions} \description{Results from 48 studies on the effectiveness of school-based writing-to-learn interventions on academic achievement. \loadmathjax} \usage{dat.bangertdrowns2004} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study number \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{grade} \tab \code{numeric} \tab grade level (1 = elementary; 2 = middle; 3 = high-school; 4 = college) \cr \bold{length} \tab \code{numeric} \tab treatment length (in weeks) \cr \bold{minutes} \tab \code{numeric} \tab minutes per assignment \cr \bold{wic} \tab \code{numeric} \tab writing tasks were completed in class (0 = no; 1 = yes) \cr \bold{feedback} \tab \code{numeric} \tab feedback on writing was provided (0 = no; 1 = yes) \cr \bold{info} \tab \code{numeric} \tab writing contained informational components (0 = no; 1 = yes) \cr \bold{pers} \tab \code{numeric} \tab writing contained personal components (0 = no; 1 = yes) \cr \bold{imag} \tab \code{numeric} \tab writing contained imaginative components (0 = no; 1 = yes) \cr \bold{meta} \tab \code{numeric} \tab prompts for metacognitive reflection (0 = no; 1 = yes) \cr \bold{subject} \tab \code{character} \tab subject matter \cr \bold{ni} \tab \code{numeric} \tab total sample size of the study \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance } } \details{ In each of the studies included in this meta-analysis, an experimental group (i.e., a group of students that received instruction with increased emphasis on writing tasks) was compared against a control group (i.e., a group of students that received conventional instruction) with respect to some content-related measure of academic achievement (e.g., final grade, an exam/quiz/test score). The outcome measure for this meta-analysis was the standardized mean difference (with positive values indicating a higher mean level of academic achievement in the intervention group). The standardized mean differences given here are bias-corrected and therefore differ slightly from the values reported in the article. Also, since only the total sample size is given in the article, the sampling variances were computed under the assumption that \mjeqn{n_{i1} = n_{i2} = n_i / 2}{n_i1 = n_i2 = n_i / 2}. } \source{ Bangert-Drowns, R. L., Hurley, M. M., & Wilkinson, B. (2004). The effects of school-based writing-to-learn interventions on academic achievement: A meta-analysis. \emph{Review of Educational Research}, \bold{74}(1), 29--58. \verb{https://doi.org/10.3102/00346543074001029} } \examples{ ### copy data into 'dat' and examine data dat <- dat.bangertdrowns2004 dat ### fit random-effects model res <- rma(yi, vi, data=dat) res } \keyword{datasets} metafor/man/dat.baskerville2012.Rd0000644000176200001440000000734414055736403016347 0ustar liggesusers\name{dat.baskerville2012} \docType{data} \alias{dat.baskerville2012} \title{Studies on the Effectiveness of Practice Facilitation Interventions} \description{Results from 23 studies on the effectiveness of practice facilitation interventions within the primary care practice setting.} \usage{dat.baskerville2012} \format{The data frame contains the following columns: \tabular{lll}{ \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{score} \tab \code{numeric} \tab quality score (0 to 12 scale) \cr \bold{design} \tab \code{character} \tab study design (cct = controlled clinical trial, rct = randomized clinical trial, crct = cluster randomized clinical trial) \cr \bold{alloconc} \tab \code{numeric} \tab allocation concealed (0 = no, 1 = yes) \cr \bold{blind} \tab \code{numeric} \tab single- or double-blind study (0 = no, 1 = yes) \cr \bold{itt} \tab \code{numeric} \tab intention to treat analysis (0 = no, 1 = yes) \cr \bold{fumonths} \tab \code{numeric} \tab follow-up months \cr \bold{retention} \tab \code{numeric} \tab retention (in percent) \cr \bold{country} \tab \code{character} \tab country where study was conducted \cr \bold{outcomes} \tab \code{numeric} \tab number of outcomes assessed \cr \bold{duration} \tab \code{numeric} \tab duration of intervention \cr \bold{pperf} \tab \code{numeric} \tab practices per facilitator \cr \bold{meetings} \tab \code{numeric} \tab (average) number of meetings \cr \bold{hours} \tab \code{numeric} \tab (average) hours per meeting \cr \bold{tailor} \tab \code{numeric} \tab intervention tailored to the context and needs of the practice (0 = no, 1 = yes) \cr \bold{smd} \tab \code{numeric} \tab standardized mean difference \cr \bold{se} \tab \code{numeric} \tab corresponding standard error } } \details{ Baskerville et al. (2012) describe outreach or practice facilitation as a "multifaceted approach that involves skilled individuals who enable others, through a range of intervention components and approaches, to address the challenges in implementing evidence-based care guidelines within the primary care setting". The studies included in this dataset examined the effectiveness of practice facilitation interventions for improving some relevant evidence-based practice behavior. The effect was quantified in terms of a standardized mean difference, comparing the change (from pre- to post-intervention) in the intervention versus the comparison group (or the difference from baseline in prospective cohort studies). } \source{ Baskerville, N. B., Liddy, C., & Hogg, W. (2012). Systematic review and meta-analysis of practice facilitation within primary care settings. \emph{Annals of Family Medicine}, \bold{10}(1), 63--74. \verb{https://doi.org/10.1370/afm.1312} } \examples{ ### copy data into 'dat' and examine data dat <- dat.baskerville2012 dat ### random-effects model res <- rma(smd, sei=se, data=dat, method="DL") print(res, digits=2) ### funnel plot funnel(res, xlab="Standardized Mean Difference", ylim=c(0,0.6)) ### rank and regression tests for funnel plot asymmetry ranktest(res) regtest(res) ### meta-regression analyses examining various potential moderators rma(smd, sei=se, mods = ~ score, data=dat, method="DL") rma(smd, sei=se, mods = ~ alloconc, data=dat, method="DL") rma(smd, sei=se, mods = ~ blind, data=dat, method="DL") rma(smd, sei=se, mods = ~ itt, data=dat, method="DL") rma(smd, sei=se, mods = ~ duration, data=dat, method="DL") rma(smd, sei=se, mods = ~ tailor, data=dat, method="DL") rma(smd, sei=se, mods = ~ pperf, data=dat, method="DL") rma(smd, sei=se, mods = ~ I(meetings * hours), data=dat, method="DL") } \keyword{datasets} metafor/man/permutest.Rd0000644000176200001440000002727114055736403015021 0ustar liggesusers\name{permutest} \alias{permutest} \alias{permutest.rma.uni} \title{Permutation Tests for 'rma.uni' Objects} \description{ The function carries out permutation tests for objects of class \code{"rma.uni"}. \loadmathjax } \usage{ permutest(x, \dots) \method{permutest}{rma.uni}(x, exact=FALSE, iter=1000, permci=FALSE, progbar=TRUE, retpermdist=FALSE, digits, control, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}.} \item{exact}{logical to specify whether an exact permutation test should be carried out or not (the default is \code{FALSE}). See \sQuote{Details}.} \item{iter}{integer to specify the number of iterations for the permutation test when not doing an exact test (the default is \code{1000} iterations).} \item{permci}{logical to specify whether permutation-based CIs should also be calculated (the default is \code{FALSE}). Can also be a vector of indices to specify for which coefficients a permutation-based CI should be obtained.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{TRUE}).} \item{retpermdist}{logical to specify whether the permutation distributions of the test statistics should be returned (the default is \code{FALSE}).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{control}{list of control values for numerical comparisons (\code{comptol}) and for \code{uniroot} (i.e., \code{tol} and \code{maxiter}). The latter is only relevant when \code{permci=TRUE}. See \sQuote{Note}.} \item{\dots}{other arguments.} } \details{ For models without moderators, the permutation test is carried out by permuting the signs of the observed effect sizes or outcomes. The (two-sided) p-value of the permutation test is then equal to the proportion of times that the absolute value of the test statistic under the permuted data is as extreme or more extreme than under the actually observed data. See Follmann and Proschan (1999) for more details. For models with moderators, the permutation test is carried out by permuting the rows of the model matrix (i.e., \mjseqn{X}). The (two-sided) p-value for a particular model coefficient is then equal to the proportion of times that the absolute value of the test statistic for the coefficient under the permuted data is as extreme or more extreme than under the actually observed data. Similarly, for the omnibus test, the p-value is the proportion of times that the test statistic for the omnibus test is as extreme or more extreme than the actually observed one. See Higgins and Thompson (2004) and Viechtbauer et al. (2015) for more details. If \code{exact=TRUE}, the function will try to carry out an exact permutation test. An exact permutation test requires fitting the model to each possible permutation once. However, the number of possible permutations increases rapidly with the number of outcomes/studies (i.e., \mjseqn{k}). For models without moderators, there are \mjseqn{2^k} possible permutations of the signs. Therefore, for \mjseqn{k=5}, there are 32 possible permutations, for \mjseqn{k=10}, there are already 1024, and for \mjseqn{k=20}, there are over one million permutations of the signs. For models with moderators, the increase in the number of possible permutations may be even more severe. The total number of possible permutations of the model matrix is \mjseqn{k!}. Therefore, for \mjseqn{k=5}, there are 120 possible permutations, for \mjseqn{k=10}, there are 3,628,800, and for \mjseqn{k=20}, there are over \mjeqn{10^{18}}{10^18} permutations of the model matrix. Therefore, going through all possible permutations may become infeasible. Instead of using an exact permutation test, one can set \code{exact=FALSE} (which is also the default). In that case, the function approximates the exact permutation-based p-value(s) by going through a smaller number (as specified by the \code{iter} argument) of \emph{random} permutations. Therefore, running the function twice on the same data can yield (slightly) different p-values. Setting \code{iter} sufficiently large ensures that the results become stable. Note that if \code{exact=FALSE} and \code{iter} is actually larger than the number of iterations required for an exact permutation test, then an exact test will be carried out. For models with moderators, the exact permutation test actually only requires fitting the model to each \emph{unique} permutation of the model matrix. The number of unique permutations will be smaller than \mjseqn{k!} when the model matrix contains recurring rows. This may be the case when only including categorical moderators (i.e., factors) in the model or when any quantitative moderators included in the model can only take on a small number of unique values. When \code{exact=TRUE}, the function therefore uses an algorithm to restrict the test to only the unique permutations of the model matrix, which may make the use of the exact test feasible even when \mjseqn{k} is large. When using random permutations, the function ensures that the very first permutation will always correspond to the original data. This avoids p-values equal to 0. When \code{permci=TRUE}, the function also tries to obtain permutation-based CIs of the model coefficient(s). This is done by shifting the observed effect sizes or outcomes by some amount and finding the most extreme values for this amount for which the permutation-based test would just lead to non-rejection. This is computationally expensive and may take a long time to complete. For models with moderators, one can also set \code{permci} to a vector of indices to specify for which coefficient(s) a permutation-based CI should be obtained. When the algorithm fails to determine a particular CI bound, it will be shown as \code{NA} in the output. } \value{ An object of class \code{"permutest.rma.uni"}. The object is a list containing the following components: \item{pval}{p-value(s) based on the permutation test.} \item{QMp}{p-value for the omnibus test of moderators based on the permutation test.} \item{zval.perm}{values of the test statistics of the coefficients under the various permutations (only when \code{retpermdist=TRUE}).} \item{b.perm}{the model coefficients under the various permutations (only when \code{retpermdist=TRUE}).} \item{QM.perm}{the test statistic of the omnibus test of moderators under the various permutations (only when \code{retpermdist=TRUE}).} \item{ci.lb}{lower bound of the confidence intervals for the coefficients (permutation-based when \code{permci=TRUE}).} \item{ci.ub}{upper bound of the confidence intervals for the coefficients (permutation-based when \code{permci=TRUE}).} \item{\dots}{some additional elements/values are passed on.} The results are formatted and printed with the \code{\link{print.permutest.rma.uni}} function. One can also use \code{\link{coef.permutest.rma.uni}} to obtain the table with the model coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds. } \note{ The p-values obtained with permutation tests cannot reach conventional levels of statistical significance (i.e., \mjseqn{p \le .05}) when \mjseqn{k} is very small. In particular, for models without moderators, the smallest possible (two-sided) p-value is .0625 when \mjseqn{k=5} and .03125 when \mjseqn{k=6}. Therefore, the permutation test is only able to reject the null hypothesis at \mjseqn{\alpha=.05} when \mjseqn{k} is at least equal to 6. For models with moderators, the smallest possible (two-sided) p-value for a particular model coefficient is .0833 when \mjseqn{k=4} and .0167 when \mjseqn{k=5} (assuming that each row in the model matrix is unique). Therefore, the permutation test is only able to reject the null hypothesis at \mjseqn{\alpha=.05} when \mjseqn{k} is at least equal to 5. Consequently, permutation-based CIs can also only be obtained when \mjseqn{k} is sufficiently large. When the number of permutations required for the exact test is so large as to be essentially indistinguishable from infinity (e.g., \code{factorial(200)}), the function will terminate with an error. Determining whether a test statistic under the permuted data is as extreme or more extreme than under the actually observed data requires making \code{>=} or \code{<=} comparisons. To avoid problems due to the finite precision with which computers generally represent numbers, the function uses a numerical tolerance (control argument \code{comptol}, which is set equal to \code{.Machine$double.eps^0.5} by default) when making such comparisons (e.g., instead of \code{sqrt(3)^2 - 3 >= 0}, which may evaluate to \code{FALSE}, we can use \code{sqrt(3)^2 - 3 >= 0 - .Machine$double.eps^0.5}, which should evaluate to \code{TRUE}). When obtaining permutation-based CIs, the function makes use of \code{\link{uniroot}}. By default, the desired accuracy is set equal to \code{.Machine$double.eps^0.25} and the maximum number of iterations to \code{100}. The desired accuracy and the maximum number of iterations can be adjusted with the \code{control} argument (i.e., \code{control=list(tol=value, maxiter=value)}). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Follmann, D. A., & Proschan, M. A. (1999). Valid inference in random effects meta-analysis. \emph{Biometrics}, \bold{55}(3), 732--737. \verb{https://doi.org/10.1111/j.0006-341x.1999.00732.x} Good, P. I. (2009). \emph{Permutation, parametric, and bootstrap tests of hypotheses} (3rd ed.). New York: Springer. Higgins, J. P. T., & Thompson, S. G. (2004). Controlling the risk of spurious findings from meta-regression. \emph{Statistics in Medicine}, \bold{23}(11), 1663--1682. \verb{https://doi.org/10.1002/sim.1752} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., \enc{López-López}{Lopez-Lopez}, J. A., \enc{Sánchez-Meca}{Sanchez-Meca}, J., & \enc{Marín-Martínez}{Marin-Martinez}, F. (2015). A comparison of procedures to test for moderators in mixed-effects meta-regression models. \emph{Psychological Methods}, \bold{20}(3), 360--374. \verb{https://doi.org/10.1037/met0000023} } \seealso{ \code{\link{rma.uni}}, \code{\link{print.permutest.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### random-effects model res <- rma(yi, vi, data=dat) res ### permutation test (approximate and exact) \dontrun{ permutest(res) permutest(res, exact=TRUE)} ### mixed-effects model with two moderators (absolute latitude and publication year) res <- rma(yi, vi, mods = ~ ablat + year, data=dat) res ### permutation test (approximate only; exact not feasible) \dontrun{ permres <- permutest(res, iter=10000, retpermdist=TRUE) permres ### histogram of permutation distribution for absolute latitude ### dashed horizontal line: the observed value of the test statistic ### red curve: standard normal density ### blue curve: kernel density estimate of the permutation distribution ### note that the tail area under the permutation distribution is larger ### than under a standard normal density (hence, the larger p-value) hist(permres$zval.perm[,2], breaks=120, freq=FALSE, xlim=c(-5,5), ylim=c(0,.4), main="Permutation Distribution", xlab="Value of Test Statistic", col="gray90") abline(v=res$zval[2], lwd=2, lty="dashed") abline(v=0, lwd=2) curve(dnorm, from=-5, to=5, add=TRUE, lwd=2, col=rgb(1,0,0,alpha=.7)) lines(density(permres$zval.perm[,2]), lwd=2, col=rgb(0,0,1,alpha=.7)) } } \keyword{models} metafor/man/escalc.Rd0000644000176200001440000017542514055736403014230 0ustar liggesusers\name{escalc} \alias{escalc} \title{Calculate Effect Sizes and Outcome Measures} \description{ The function can be used to calculate various effect sizes or outcome measures (and the corresponding sampling variances) that are commonly used in meta-analyses. \loadmathjax } \usage{ escalc(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, r2i, ni, yi, vi, sei, data, slab, subset, include, add=1/2, to="only0", drop00=FALSE, vtype="LS", var.names=c("yi","vi"), add.measure=FALSE, append=TRUE, replace=TRUE, digits, \dots) } \arguments{ \item{measure}{a character string to specify which effect size or outcome measure should be calculated. See \sQuote{Details} for possible options and how the data needed to compute the selected effect size or outcome measure should then be specified.} \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell).} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell).} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell).} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell).} \item{n1i}{vector to specify the group sizes or row totals (first group/row).} \item{n2i}{vector to specify the group sizes or row totals (second group/row).} \item{x1i}{vector to specify the number of events (first group).} \item{x2i}{vector to specify the number of events (second group).} \item{t1i}{vector to specify the total person-times (first group).} \item{t2i}{vector to specify the total person-times (second group).} \item{m1i}{vector to specify the means (first group or time point).} \item{m2i}{vector to specify the means (second group or time point).} \item{sd1i}{vector to specify the standard deviations (first group or time point).} \item{sd2i}{vector to specify the standard deviations (second group or time point).} \item{xi}{vector to specify the frequencies of the event of interest.} \item{mi}{vector to specify the frequencies of the complement of the event of interest or the group means.} \item{ri}{vector to specify the raw correlation coefficients.} \item{ti}{vector to specify the total person-times.} \item{sdi}{vector to specify the standard deviations.} \item{r2i}{vector to specify the \mjseqn{R^2} values.} \item{ni}{vector to specify the sample/group sizes.} \item{yi}{vector to specify the observed effect sizes or outcomes.} \item{vi}{vector to specify the corresponding sampling variances.} \item{sei}{vector to specify the corresponding standard errors.} \item{data}{optional data frame containing the variables given to the arguments above.} \item{slab}{optional vector with labels for the studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that will be included in the data frame returned by the function.} \item{include}{optional (logical or numeric) vector to specify the subset of studies for which the measure should be calculated. See the \sQuote{Value} section for more details.} \item{add}{a non-negative number to specify the amount to add to zero cells, counts, or frequencies. See \sQuote{Details}.} \item{to}{a character string to specify when the values under \code{add} should be added (either \code{"all"}, \code{"only0"}, \code{"if0all"}, or \code{"none"}). See \sQuote{Details}.} \item{drop00}{logical to specify whether studies with no cases/events (or only cases) in both groups should be dropped when calculating the observed effect sizes or outcomes. See \sQuote{Details}.} \item{vtype}{a character string to specify the type of sampling variances to calculate. See \sQuote{Details}.} \item{var.names}{character string with two elements to specify the name of the variable for the observed effect sizes or outcomes and the name of the variable for the corresponding sampling variances (the defaults are \code{"yi"} and \code{"vi"}).} \item{add.measure}{logical to specify whether a variable should be added to the data frame (with default name \code{"measure"}) that indicates the type of outcome measure computed. When using this option, \code{var.names} can have a third element to change this variable name.} \item{append}{logical to specify whether the data frame provided via the \code{data} argument should be returned together with the observed effect sizes or outcomes and corresponding sampling variances (the default is \code{TRUE}).} \item{replace}{logical to specify whether existing values for \code{yi} and \code{vi} in the data frame should be replaced or not. Only relevant when \code{append=TRUE} and the data frame already contains the \code{yi} and \code{vi} variables. If \code{replace=TRUE} (the default), all of the existing values will be overwritten. If \code{replace=FALSE}, only \code{NA} values will be replaced. See the \sQuote{Value} section for more details.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4. Note that the values are stored without rounding in the returned object.} \item{\dots}{other arguments.} } \details{ Before a meta-analysis can be conducted, the relevant results from each study must be quantified in such a way that the resulting values can be further aggregated and compared. Depending on (a) the goals of the meta-analysis, (b) the design and types of studies included, and (c) the information provided therein, one of the various effect size or outcome measures described below may be appropriate for the meta-analysis and can be computed with the \code{escalc} function. The \code{measure} argument is a character string to specify the outcome measure that should be calculated (see below for the various options), arguments \code{ai} through \code{ni} are then used to specify the information needed to calculate the various measures (depending on the chosen outcome measure, different arguments need to be specified), and \code{data} can be used to specify a data frame containing the variables given to the previous arguments. The \code{add}, \code{to}, and \code{drop00} arguments may be needed when dealing with frequency or count data that may need special handling when some of the frequencies or counts are equal to zero (see below for details). Finally, the \code{vtype} argument is used to specify how the sampling variances should be estimated (again, see below for details). To provide a structure to the various effect size or outcome measures that can be calculated with the \code{escalc} function, we can distinguish between measures that are used to: \itemize{ \item contrast two independent (either experimentally created or naturally occurring) groups, \item describe the direction and strength of the association between two variables, \item summarize some characteristic or attribute of individual groups, or \item quantify change within a single group or the difference between two matched pairs samples. } Furthermore, where appropriate, we can further distinguish between measures that are applicable when the characteristic, response, or dependent variable assessed in the individual studies is: \itemize{ \item a dichotomous (binary) variable (e.g., remission versus no remission), \item a count of events per time unit (e.g., number of migraines per year), \item a quantitative variable (e.g., amount of depression as assessed by a rating scale). } \subsection{Outcome Measures for Two-Group Comparisons}{ In many meta-analyses, the goal is to synthesize the results from studies that compare or contrast two groups. The groups may be experimentally defined (e.g., a treatment and a control group created via random assignment) or may occur naturally (e.g., men and women, employees working under high- versus low-stress conditions, people exposed to some environmental risk factor versus those not exposed). \subsection{Measures for Dichotomous Variables}{ In various fields (such as the health and medical sciences), the response variable measured is often dichotomous (binary), so that the data from a study comparing two different groups can be expressed in terms of a \mjeqn{2 \times 2}{2x2} table, such as: \tabular{lccc}{ \tab outcome 1 \tab outcome 2 \tab total \cr group 1 \tab \code{ai} \tab \code{bi} \tab \code{n1i} \cr group 2 \tab \code{ci} \tab \code{di} \tab \code{n2i} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies (i.e., the number of people falling into a particular category) and \code{n1i} and \code{n2i} are the row totals (i.e., the group sizes). For example, in a set of randomized clinical trials, group 1 and group 2 may refer to the treatment and placebo/control group, respectively, with outcome 1 denoting some event of interest (e.g., death, complications, failure to improve under the treatment) and outcome 2 its complement. Similarly, in a set of cohort studies, group 1 and group 2 may denote those who engage in and those who do not engage in a potentially harmful behavior (e.g., smoking), with outcome 1 denoting the development of a particular disease (e.g., lung cancer) during the follow-up period. Finally, in a set of case-control studies, group 1 and group 2 may refer to those with the disease (i.e., cases) and those free of the disease (i.e., controls), with outcome 1 denoting, for example, exposure to some environmental risk factor in the past and outcome 2 non-exposure. Note that in all of these examples, the stratified sampling scheme fixes the row totals (i.e., the group sizes) by design. A meta-analysis of studies reporting results in terms of \mjeqn{2 \times 2}{2x2} tables can be based on one of several different outcome measures, including the risk ratio (also called the relative risk), the odds ratio, the risk difference, and the arcsine square root transformed risk difference (e.g., Fleiss & Berlin, 2009, \enc{Rücker}{Ruecker} et al., 2009). For any of these outcome measures, one needs to specify the cell frequencies via the \code{ai}, \code{bi}, \code{ci}, and \code{di} arguments (or alternatively, one can use the \code{ai}, \code{ci}, \code{n1i}, and \code{n2i} arguments). The options for the \code{measure} argument are then: \itemize{ \item \code{"RR"} for the \emph{log risk ratio}, \item \code{"OR"} for the \emph{log odds ratio}, \item \code{"RD"} for the \emph{risk difference}, \item \code{"AS"} for the \emph{arcsine square root transformed risk difference} (\enc{Rücker}{Ruecker} et al., 2009), \item \code{"PETO"} for the \emph{log odds ratio} estimated with Peto's method (Yusuf et al., 1985). } Note that the log is taken of the risk ratio and the odds ratio, which makes these outcome measures symmetric around 0 and yields corresponding sampling distributions that are closer to normality. Also, when multiplied by 2, the arcsine square root transformed risk difference is actually identical to Cohen's h (Cohen, 1988). Cell entries with a zero count can be problematic, especially for the risk ratio and the odds ratio. Adding a small constant to the cells of the \mjeqn{2 \times 2}{2x2} tables is a common solution to this problem. When \code{to="only0"} (the default), the value of \code{add} (the default is \code{1/2}; but see \sQuote{Note}) is added to each cell of those \mjeqn{2 \times 2}{2x2} tables with at least one cell equal to 0. When \code{to="all"}, the value of \code{add} is added to each cell of all \mjeqn{2 \times 2}{2x2} tables. When \code{to="if0all"}, the value of \code{add} is added to each cell of all \mjeqn{2 \times 2}{2x2} tables, but only when there is at least one \mjeqn{2 \times 2}{2x2} table with a zero cell. Setting \code{to="none"} or \code{add=0} has the same effect: No adjustment to the observed table frequencies is made. Depending on the outcome measure and the data, this may lead to division by zero inside of the function (when this occurs, the resulting value is recoded to \code{NA}). Also, studies where \code{ai=ci=0} or \code{bi=di=0} may be considered to be uninformative about the size of the effect and dropping such studies has sometimes been recommended (Higgins et al., 2019). This can be done by setting \code{drop00=TRUE}. The values for such studies will then be set to \code{NA}. Datasets corresponding to data of this type are provided in \code{\link{dat.bcg}}, \code{\link{dat.collins1985a}}, \code{\link{dat.collins1985b}}, \code{\link{dat.egger2001}}, \code{\link{dat.hine1989}}, \code{\link{dat.laopaiboon2015}}, \code{\link{dat.lee2004}}, \code{\link{dat.li2007}}, \code{\link{dat.linde2005}}, \code{\link{dat.nielweise2007}}, and \code{\link{dat.yusuf1985}}. Assuming that the dichotomous outcome is actually a dichotomized version of the responses on an underlying quantitative scale, it is also possible to estimate the standardized mean difference based on \mjeqn{2 \times 2}{2x2} table data, using either the probit transformed risk difference or a transformation of the odds ratio (e.g., Cox & Snell, 1989; Chinn, 2000; Hasselblad & Hedges, 1995; \enc{Sánchez-Meca}{Sanchez-Meca} et al., 2003). The options for the \code{measure} argument are then: \itemize{ \item \code{"PBIT"} for the \emph{probit transformed risk difference} as an estimate of the standardized mean difference, \item \code{"OR2DN"} for the \emph{transformed odds ratio} as an estimate of the standardized mean difference (assuming normal distributions), \item \code{"OR2DL"} for the \emph{transformed odds ratio} as an estimate of the standardized mean difference (assuming logistic distributions). } The probit transformation assumes that the responses on the underlying quantitative scale are normally distributed. There are two versions of the odds ratio transformation, the first also assuming normal distributions within the two groups, while the second assumes that the responses within groups follow logistic distributions. A dataset corresponding to data of this type is provided in \code{\link{dat.gibson2002}}. } \subsection{Measures for Event Counts}{ In medical and epidemiological studies comparing two different groups (e.g., treated versus untreated patients, exposed versus unexposed individuals), results are sometimes reported in terms of event counts (i.e., the number of events, such as strokes or myocardial infarctions) over a certain period of time. Data of this type are also referred to as \sQuote{person-time data}. Assume that the studies report data in the form: \tabular{lcc}{ \tab number of events \tab total person-time \cr group 1 \tab \code{x1i} \tab \code{t1i} \cr group 2 \tab \code{x2i} \tab \code{t2i} } where \code{x1i} and \code{x2i} denote the number of events in the first and the second group, respectively, and \code{t1i} and \code{t2i} the corresponding total person-times at risk. Often, the person-time is measured in years, so that \code{t1i} and \code{t2i} denote the total number of follow-up years in the two groups. This form of data is fundamentally different from what was described in the previous section, since the total follow-up time may differ even for groups of the same size and the individuals studied may experience the event of interest multiple times. Hence, different outcome measures than the ones described in the previous section need to be considered when data are reported in this format. These include the incidence rate ratio, the incidence rate difference, and the square root transformed incidence rate difference (Bagos & Nikolopoulos, 2009; Rothman et al., 2008). For any of these outcome measures, one needs to specify the total number of events via the \code{x1i} and \code{x2i} arguments and the corresponding total person-time values via the \code{t1i} and \code{t2i} arguments. The options for the \code{measure} argument are then: \itemize{ \item \code{"IRR"} for the \emph{log incidence rate ratio}, \item \code{"IRD"} for the \emph{incidence rate difference}, \item \code{"IRSD"} for the \emph{square root transformed incidence rate difference}. } Note that the log is taken of the incidence rate ratio, which makes this outcome measure symmetric around 0 and yields a corresponding sampling distribution that is closer to normality. Studies with zero events in one or both groups can be problematic, especially for the incidence rate ratio. Adding a small constant to the number of events is a common solution to this problem. When \code{to="only0"} (the default), the value of \code{add} (the default is \code{1/2}; but see \sQuote{Note}) is added to \code{x1i} and \code{x2i} only in the studies that have zero events in one or both groups. When \code{to="all"}, the value of \code{add} is added to \code{x1i} and \code{x2i} in all studies. When \code{to="if0all"}, the value of \code{add} is added to \code{x1i} and \code{x2i} in all studies, but only when there is at least one study with zero events in one or both groups. Setting \code{to="none"} or \code{add=0} has the same effect: No adjustment to the observed number of events is made. Depending on the outcome measure and the data, this may lead to division by zero inside of the function (when this occurs, the resulting value is recoded to \code{NA}). Like for \mjeqn{2 \times 2}{2x2} table data, studies where \code{x1i=x2i=0} may be considered to be uninformative about the size of the effect and dropping such studies has sometimes been recommended. This can be done by setting \code{drop00=TRUE}. The values for such studies will then be set to \code{NA}. Datasets corresponding to data of this type are provided in \code{\link{dat.hart1999}} and \code{\link{dat.nielweise2008}}. } \subsection{Measures for Quantitative Variables}{ When the response or dependent variable assessed in the individual studies is measured on some quantitative scale, it is customary to report certain summary statistics, such as the mean and standard deviation of the observations. The data layout for a study comparing two groups with respect to such a variable is then of the form: \tabular{lccc}{ \tab mean \tab standard deviation \tab group size \cr group 1 \tab \code{m1i} \tab \code{sd1i} \tab \code{n1i} \cr group 2 \tab \code{m2i} \tab \code{sd2i} \tab \code{n2i} } where \code{m1i} and \code{m2i} are the observed means of the two groups, \code{sd1i} and \code{sd2i} are the observed standard deviations, and \code{n1i} and \code{n2i} denote the number of individuals in each group. Again, the two groups may be experimentally created (e.g., a treatment and control group based on random assignment) or naturally occurring (e.g., men and women). In either case, the raw mean difference, the standardized mean difference, and the (log transformed) ratio of means (also called log response ratio) are useful outcome measures when meta-analyzing studies of this type. The options for the \code{measure} argument are then: \itemize{ \item \code{"MD"} for the \emph{raw mean difference} (e.g., Borenstein, 2009), \item \code{"SMD"} for the \emph{standardized mean difference} (Hedges, 1981), \item \code{"SMDH"} for the \emph{standardized mean difference} with heteroscedastic population variances in the two groups (Bonett, 2008, 2009), \item \code{"ROM"} for the \emph{log transformed ratio of means} (Hedges et al., 1999; Lajeunesse, 2011). } For \code{measure="ROM"}, the log is taken of the ratio of means, which makes this outcome measure symmetric around 0 and yields a corresponding sampling distribution that is closer to normality. Hence, this measure cannot be computed when \code{m1i} and \code{m2i} have opposite signs (i.e., it is meant to be used for ratio scale measurements, where both means should be positive anyway). For \code{measure="SMD"}, the positive bias in the standardized mean difference is automatically corrected for within the function, yielding Hedges' g (Hedges, 1981). Similarly, the same bias correction is applied for \code{measure="SMDH"} (Bonett, 2009). For \code{measure="MD"}, one can choose between \code{vtype="LS"} (the default) and \code{vtype="HO"}. The former computes the sampling variances without assuming homoscedasticity (i.e., that the true variances of the measurements are the same in group 1 and group 2 within each study), while the latter assumes homoscedasticity (equations 12.5 and 12.3 in Borenstein, 2009, respectively). For \code{measure="SMD"}, one can choose between \code{vtype="LS"} (the default) for the usual large-sample approximation to compute the sampling variances (equation 8 in Hedges, 1982), \code{vtype="UB"} to compute unbiased estimates of the sampling variances (equation 9 in Hedges, 1983), \code{vtype="LS2"} to compute the sampling variances as described in Borenstein (2009) (i.e., equation 12.17), and \code{vtype="AV"} to compute the sampling variances with the usual large-sample approximation but plugging the sample-size weighted average of the Hedges' g values into the equation. For \code{measure="ROM"}, one can choose between \code{vtype="LS"} (the default) for the usual large-sample approximation to compute the sampling variances (equation 1 in Hedges et al., 1999), \code{vtype="HO"} to compute the sampling variances assuming homoscedasticity (the unnumbered equation after equation 1 in Hedges et al., 1999), \code{vtype="AV"} to compute the sampling variances assuming homoscedasticity of the coefficient of variation within each group across studies, and \code{vtype="AVHO"} to compute the sampling variances assuming homoscedasticity of the coefficient of variation for both groups across studies. Datasets corresponding to data of this type are provided in \code{\link{dat.normand1999}} and \code{\link{dat.curtis1998}}. It is also possible to transform standardized mean differences into log odds ratios (e.g., Cox & Snell, 1989; Chinn, 2000; Hasselblad & Hedges, 1995; \enc{Sánchez-Meca}{Sanchez-Meca} et al., 2003). The options for the \code{measure} argument are then: \itemize{ \item \code{"D2ORN"} for the \emph{transformed standardized mean difference} as an estimate of the log odds ratio (assuming normal distributions), \item \code{"D2ORL"} for the \emph{transformed standardized mean difference} as an estimate of the log odds ratio (assuming logistic distributions). } Both of these transformations provide an estimate of the log odds ratio, the first assuming that the responses within the two groups are normally distributed, while the second assumes that the responses follow logistic distributions. A dataset illustrating the combined analysis of standardized mean differences and probit transformed risk differences is provided in \code{\link{dat.gibson2002}}. Finally, interest may also be focused on differences between the two groups with respect to their variability. Here, the (log transformed) ratio of the coefficient of variation of the two groups (also called the coefficient of variation ratio) can be a useful measure (Nakagawa et al., 2015). If focus is solely on the variability of the measurements within the two groups, then the (log transformed) ratio of the standard deviations (also called the variability ratio) can be used (Nakagawa et al., 2015). For the latter, one only needs to specify \code{sd1i}, \code{sd2i}, \code{n1i}, and \code{n2i}. The options for the \code{measure} argument are: \itemize{ \item \code{"CVR"} for the \emph{log transformed coefficient of variation ratio}, \item \code{"VR"} for the \emph{log transformed variability ratio}. } Note that a slight bias correction is applied for both of these measures (Nakagawa et al., 2015). Also, the sampling variance for \code{measure="CVR"} is computed as given by equation 12 in Nakagawa et al. (2015), but without the \sQuote{\mjseqn{-2 \rho \ldots}} terms, since for normally distributed data (which we assume here) the mean and variance (and transformations thereof) are independent. } } \subsection{Outcome Measures for Variable Association}{ Meta-analyses are often used to synthesize studies that examine the direction and strength of the association between two variables measured concurrently and/or without manipulation by experimenters. In this section, a variety of outcome measures will be discussed that may be suitable for a meta-analyses with this purpose. We can distinguish between measures that are applicable when both variables are measured on quantitative scales, when both variables measured are dichotomous, and when the two variables are of mixed types. \subsection{Measures for Two Quantitative Variables}{ The (Pearson or product-moment) correlation coefficient quantifies the direction and strength of the (linear) relationship between two quantitative variables and is therefore frequently used as the outcome measure for meta-analyses. Two alternative measures are a bias-corrected version of the correlation coefficient and Fisher's r-to-z transformed correlation coefficient. For these measures, one needs to specify \code{ri}, the vector with the raw correlation coefficients, and \code{ni}, the corresponding sample sizes. The options for the \code{measure} argument are then: \itemize{ \item \code{"COR"} for the \emph{raw correlation coefficient}, \item \code{"UCOR"} for the \emph{raw correlation coefficient} corrected for its slight negative bias (based on equation 2.3 in Olkin & Pratt, 1958), \item \code{"ZCOR"} for \emph{Fisher's r-to-z transformed correlation coefficient} (Fisher, 1921). } For \code{measure="COR"} and \code{measure="UCOR"}, one can choose between \code{vtype="LS"} (the default) for the usual large-sample approximation to compute the sampling variances (i.e., plugging the (biased-corrected) correlation coefficients into equation 12.27 in Borenstein, 2009), \code{vtype="UB"} to compute unbiased estimates of the sampling variances (see Hedges, 1989, but using the exact equation instead of the approximation), and \code{vtype="AV"} to compute the sampling variances with the usual large-sample approximation but plugging the sample-size weighted average of the (bias-corrected) correlation coefficients into the equation. Datasets corresponding to data of this type are provided in \code{\link{dat.mcdaniel1994}} and \code{\link{dat.molloy2014}}. For meta-analyses involving multiple correlations extracted from the same sample, see also the \code{\link{rcalc}} function. } \subsection{Measures for Two Dichotomous Variables}{ When the goal of a meta-analysis is to examine the relationship between two dichotomous variables, the data for each study can again be presented in the form of a \mjeqn{2 \times 2}{2x2} table, except that there may not be a clear distinction between the grouping variable and the outcome variable. Moreover, the table may be a result of cross-sectional (i.e., multinomial) sampling, where none of the table margins (except the total sample size) are fixed by the study design. The phi coefficient and the odds ratio are commonly used measures of association for \mjeqn{2 \times 2}{2x2} table data (e.g., Fleiss & Berlin, 2009). The latter is particularly advantageous, as it is directly comparable to values obtained from stratified sampling (as described earlier). Yule's Q and Yule's Y (Yule, 1912) are additional measures of association for \mjeqn{2 \times 2}{2x2} table data (although they are not typically used in meta-analyses). Finally, assuming that the two dichotomous variables are actually dichotomized versions of the responses on two underlying quantitative scales (and assuming that the two variables follow a bivariate normal distribution), it is also possible to estimate the correlation between the two variables using the tetrachoric correlation coefficient (Pearson, 1900; Kirk, 1973). For any of these outcome measures, one needs to specify the cell frequencies via the \code{ai}, \code{bi}, \code{ci}, and \code{di} arguments (or alternatively, one can use the \code{ai}, \code{ci}, \code{n1i}, and \code{n2i} arguments). The options for the \code{measure} argument are then: \itemize{ \item \code{"OR"} for the \emph{log odds ratio}, \item \code{"PHI"} for the \emph{phi coefficient}, \item \code{"YUQ"} for \emph{Yule's Q} (Yule, 1912), \item \code{"YUY"} for \emph{Yule's Y} (Yule, 1912), \item \code{"RTET"} for the \emph{tetrachoric correlation coefficient}. } Tables with one or more zero counts are handled as described earlier. For \code{measure="PHI"}, one must indicate via \code{vtype="ST"} or \code{vtype="CS"} whether the data for the studies were obtained using stratified or cross-sectional (i.e., multinomial) sampling, respectively (it is also possible to specify an entire vector for the \code{vtype} argument in case the sampling scheme differed for the various studies). A dataset corresponding to data of this type is provided in \code{\link{dat.bourassa1996}}. } \subsection{Measures for Mixed Variable Types}{ Finally, we can consider outcome measures that can be used to describe the relationship between two variables, where one variable is dichotomous and the other variable measures some quantitative characteristic. In that case, it is likely that study authors again report summary statistics, such as the mean and standard deviation of the measurements within the two groups (defined by the dichotomous variable). Based on this information, one can compute the point-biserial correlation coefficient (Tate, 1954) as a measure of association between the two variables. If the dichotomous variable is actually a dichotomized version of the responses on an underlying quantitative scale (and assuming that the two variables follow a bivariate normal distribution), it is also possible to estimate the correlation between the two variables using the biserial correlation coefficient (Pearson, 1909; Soper, 1914; Jacobs & Viechtbauer, 2017). Here, one again needs to specify \code{m1i} and \code{m2i} for the observed means of the two groups, \code{sd1i} and \code{sd2i} for the observed standard deviations, and \code{n1i} and \code{n2i} for the number of individuals in each group. The options for the \code{measure} argument are then: \itemize{ \item \code{"RPB"} for the \emph{point-biserial correlation coefficient}, \item \code{"RBIS"} for the \emph{biserial correlation coefficient}. } For \code{measure="RPB"}, one must indicate via \code{vtype="ST"} or \code{vtype="CS"} whether the data for the studies were obtained using stratified or cross-sectional (i.e., multinomial) sampling, respectively (it is also possible to specify an entire vector for the \code{vtype} argument in case the sampling scheme differed for the various studies). } } \subsection{Outcome Measures for Individual Groups}{ In this section, outcome measures will be described which may be useful when the goal of a meta-analysis is to synthesize studies that characterize some property of individual groups. We will again distinguish between measures that are applicable when the characteristic of interest is a dichotomous variable, when the characteristic represents an event count, or when the characteristic assessed is a quantitative variable. \subsection{Measures for Dichotomous Variables}{ A meta-analysis may be conducted to aggregate studies that provide data about individual groups with respect to a dichotomous dependent variable. Here, one needs to specify \code{xi} and \code{ni}, denoting the number of individuals experiencing the event of interest and the total number of individuals within each study, respectively. Instead of specifying \code{ni}, one can use \code{mi} to specify the number of individuals that do not experience the event of interest. The options for the \code{measure} argument are then: \itemize{ \item \code{"PR"} for the \emph{raw proportion}, \item \code{"PLN"} for the \emph{log transformed proportion}, \item \code{"PLO"} for the \emph{logit transformed proportion} (i.e., log odds), \item \code{"PAS"} for the \emph{arcsine square root transformed proportion} (i.e., the angular transformation), \item \code{"PFT"} for the \emph{Freeman-Tukey double arcsine transformed proportion} (Freeman & Tukey, 1950). } Zero cell entries can be problematic for certain outcome measures. When \code{to="only0"} (the default), the value of \code{add} (the default is \code{1/2}; but see \sQuote{Note}) is added to \code{xi} and \code{mi} only for studies where \code{xi} or \code{mi} is equal to 0. When \code{to="all"}, the value of \code{add} is added to \code{xi} and \code{mi} in all studies. When \code{to="if0all"}, the value of \code{add} is added in all studies, but only when there is at least one study with a zero value for \code{xi} or \code{mi}. Setting \code{to="none"} or \code{add=0} has the same effect: No adjustment to the observed values is made. Depending on the outcome measure and the data, this may lead to division by zero inside of the function (when this occurs, the resulting value is recoded to \code{NA}). Datasets corresponding to data of this type are provided in \code{\link{dat.pritz1997}} and \code{\link{dat.debruin2009}}. } \subsection{Measures for Event Counts}{ Various measures can be used to characterize individual groups when the dependent variable assessed is an event count. Here, one needs to specify \code{xi} and \code{ti}, denoting the number of events that occurred and the total person-times at risk, respectively. The options for the \code{measure} argument are then: \itemize{ \item \code{"IR"} for the \emph{raw incidence rate}, \item \code{"IRLN"} for the \emph{log transformed incidence rate}, \item \code{"IRS"} for the \emph{square root transformed incidence rate}, \item \code{"IRFT"} for the \emph{Freeman-Tukey transformed incidence rate} (Freeman & Tukey, 1950). } Measures \code{"IR"} and \code{"IRLN"} can also be used when meta-analyzing standardized incidence ratios (SIRs), where the observed number of events is divided by the expected number of events. In this case, arguments \code{xi} and \code{ti} are used to specify the observed and expected number of events in the studies. Since SIRs are not symmetric around 1, it is usually more appropriate to meta-analyze the log transformed SIRs (i.e., using measure \code{"IRLN"}), which are symmetric around 0. Studies with zero events can be problematic, especially for the log transformed incidence rate. Adding a small constant to the number of events is a common solution to this problem. When \code{to="only0"} (the default), the value of \code{add} (the default is \code{1/2}; but see \sQuote{Note}) is added to \code{xi} only in the studies that have zero events. When \code{to="all"}, the value of \code{add} is added to \code{xi} in all studies. When \code{to="if0all"}, the value of \code{add} is added to \code{xi} in all studies, but only when there is at least one study with zero events. Setting \code{to="none"} or \code{add=0} has the same effect: No adjustment to the observed number of events is made. Depending on the outcome measure and the data, this may lead to division by zero inside of the function (when this occurs, the resulting value is recoded to \code{NA}). } \subsection{Measures for Quantitative Variables}{ The goal of a meta-analysis may also be to characterize individual groups, where the response, characteristic, or dependent variable assessed in the individual studies is measured on some quantitative scale. In the simplest case, the raw mean for the quantitative variable is reported for each group, which then becomes the observed outcome for the meta-analysis. Here, one needs to specify \code{mi}, \code{sdi}, and \code{ni} for the observed means, the observed standard deviations, and the sample sizes, respectively. For ratio scale measurements, the log transformed mean or the log transformed coefficient of variation (with bias correction) may also be of interest (Nakagawa et al., 2015). If focus is solely on the variability of the measurements, then the log transformed standard deviation (with bias correction) is a useful measure (Nakagawa et al., 2015; Raudenbush & Bryk, 1987). Here, one only needs to specify \code{sdi} and \code{ni}. The options for the \code{measure} argument are: \itemize{ \item \code{"MN"} for the \emph{raw mean}, \item \code{"MNLN"} for the \emph{log transformed mean}, \item \code{"CVLN"} for the \emph{log transformed coefficient of variation}, \item \code{"SDLN"} for the \emph{log transformed standard deviation}. } Note that \code{sdi} is used to specify the standard deviations of the observed values of the response, characteristic, or dependent variable and not the standard errors of the means. Also, the sampling variance for \code{measure="CVLN"} is computed as given by equation 27 in Nakagawa et al. (2015), but without the \sQuote{\mjseqn{-2 \rho \ldots}} term, since for normally distributed data (which we assume here) the mean and variance (and transformations thereof) are independent. } } \subsection{Outcome Measures for Change or Matched Pairs}{ A more complicated situation arises when the purpose of the meta-analysis is to assess the amount of change within individual groups (e.g., before and after a treatment or under two different treatments) or when dealing with matched pairs designs. \subsection{Measures for Dichotomous Variables}{ For dichotomous variables, the data for a study of this type gives rise to a paired \mjeqn{2 \times 2}{2x2} table, which is of the form: \tabular{lcc}{ \tab trt 2 outcome 1 \tab trt 2 outcome 2 \cr trt 1 outcome 1 \tab \code{ai} \tab \code{bi} \cr trt 1 outcome 2 \tab \code{ci} \tab \code{di} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies. Note that \sQuote{trt1} and \sQuote{trt2} may be applied to a single group of subjects or to matched pairs of subjects. Also, \sQuote{trt1} and \sQuote{trt2} might refer to two different time points (e.g., before and after a treatment). In any case, the data from such a study can be rearranged into a marginal table of the form: \tabular{lcc}{ \tab outcome 1 \tab outcome 2 \cr trt 1 \tab \code{ai+bi} \tab \code{ci+di} \cr trt 2 \tab \code{ai+ci} \tab \code{bi+di} } which is of the same form as a \mjeqn{2 \times 2}{2x2} table that would arise in a study comparing/contrasting two independent groups. The options for the \code{measure} argument that will compute outcome measures based on the marginal table are: \itemize{ \item \code{"MPRR"} for the matched pairs \emph{marginal log risk ratio}, \item \code{"MPOR"} for the matched pairs \emph{marginal log odds ratio}, \item \code{"MPRD"} for the matched pairs \emph{marginal risk difference}. } See Becker and Balagtas (1993), Curtin et al. (2002), Elbourne et al. (2002), Fagerland et al. (2014), May and Johnson (1997), Newcombe (1998), Stedman et al. (2011), and Zou (2007) for discussions of these measures. The options for the \code{measure} argument that will compute outcome measures based on the paired table are: \itemize{ \item \code{"MPORC"} for the \emph{conditional log odds ratio}, \item \code{"MPPETO"} for the \emph{conditional log odds ratio} estimated with Peto's method. } See Curtin et al. (2002) and Zou (2007) for discussions of these measures. } \subsection{Measures for Quantitative Variables}{ When the response or dependent variable assessed in the individual studies is measured on some quantitative scale, the raw mean change, standardized versions thereof, or the (log transformed) ratio of means (log response ratio) can be used as outcome measures (Becker, 1988; Gibbons et al., 1993; Lajeunesse, 2011; Morris, 2000). Here, one needs to specify \code{m1i} and \code{m2i}, the observed means at the two measurement occasions, \code{sd1i} and \code{sd2i} for the corresponding observed standard deviations, \code{ri} for the correlation between the measurements at the two measurement occasions, and \code{ni} for the sample size. The options for the \code{measure} argument are then: \itemize{ \item \code{"MC"} for the \emph{raw mean change}, \item \code{"SMCC"} for the \emph{standardized mean change} using change score standardization (Gibbons et al., 1993), \item \code{"SMCR"} for the \emph{standardized mean change} using raw score standardization (Becker, 1988), \item \code{"SMCRH"} for the \emph{standardized mean change} using raw score standardization with heteroscedastic population variances at the two measurement occasions (Bonett, 2008), \item \code{"ROMC"} for the \emph{log transformed ratio of means} (Lajeunesse, 2011). } See also Morris and DeShon (2002) for a thorough discussion of the difference between the change score measures. A few notes about the change score measures. In practice, one often has a mix of information available from the individual studies to compute these measures. In particular, if \code{m1i} and \code{m2i} are unknown, but the raw mean change is directly reported in a particular study, then one can set \code{m1i} to that value and \code{m2i} to 0 (making sure that the raw mean change was computed as \code{m1i-m2i} within that study and not the other way around). Also, for the raw mean change (\code{"MC"}) or the standardized mean change using change score standardization (\code{"SMCC"}), if \code{sd1i}, \code{sd2i}, and \code{ri} are unknown, but the standard deviation of the change scores is directly reported, then one can set \code{sd1i} to that value and both \code{sd2i} and \code{ri} to 0. Finally, for the standardized mean change using raw score standardization (\code{"SMCR"}), argument \code{sd2i} is actually not needed, as the standardization is only based on \code{sd1i} (Becker, 1988; Morris, 2000), which is usually the pre-test standard deviation (if the post-test standard deviation should be used, then set \code{sd1i} to that). Note that all of these measures are also applicable for matched-pairs designs (subscripts 1 and 2 then simply denote the first and second group that are formed by the matching). Finally, interest may also be focused on differences in the variability of the measurements at the two measurement occasions (or between the two matched groups). Here, the (log transformed) ratio of the coefficient of variation (also called the coefficient of variation ratio) can be a useful measure (Nakagawa et al., 2015). If focus is solely on the variability of the measurements, then the (log transformed) ratio of the standard deviations (also called the variability ratio) can be used (Nakagawa et al., 2015). For the latter, one only needs to specify \code{sd1i}, \code{sd2i}, \code{ni}, and \code{ri}. The options for the \code{measure} argument are: \itemize{ \item \code{"CVRC"} for the \emph{log transformed coefficient of variation ratio}, \item \code{"VRC"} for the \emph{log transformed variability ratio}. } The definitions of these measures are the same as given in Nakagawa et al. (2015) but are here computed for two sets of dependent measurements. Hence, the computation of the sampling variances are adjusted to take the correlation between the measurements into consideration. } } \subsection{Other Outcome Measures for Meta-Analyses}{ Other outcome measures are sometimes used for meta-analyses that do not directly fall into the categories above. These are described in this section. \subsection{Cronbach's alpha and Transformations Thereof}{ Meta-analytic methods can also be used to aggregate Cronbach's alpha values from multiple studies. This is usually referred to as a \sQuote{reliability generalization meta-analysis} (Vacha-Haase, 1998). Here, one needs to specify \code{ai}, \code{mi}, and \code{ni} for the observed alpha values, the number of items/replications/parts of the measurement instrument, and the sample sizes, respectively. One can either directly analyze the raw Cronbach's alpha values or transformations thereof (Bonett, 2002, 2010; Hakstian & Whalen, 1976). The options for the \code{measure} argument are then: \itemize{ \item \code{"ARAW"} for \emph{raw alpha} values, \item \code{"AHW"} for \emph{transformed alpha values} (Hakstian & Whalen, 1976), \item \code{"ABT"} for \emph{transformed alpha values} (Bonett, 2002). } Note that the transformations implemented here are slightly different from the ones described by Hakstian and Whalen (1976) and Bonett (2002). In particular, for \code{"AHW"}, the transformation \mjeqn{1-(1-\alpha)^{1/3}}{1-(1-\alpha)^(1/3)} is used, while for \code{"ABT"}, the transformation \mjeqn{-\ln(1-\alpha)}{-ln(1-\alpha)} is used. This ensures that the transformed values are monotonically increasing functions of \mjseqn{\alpha}. A dataset corresponding to data of this type is provided in \code{\link{dat.bonett2010}}. } \subsection{Partial and Semi-Partial Correlations}{ Aloe and Becker (2012), Aloe and Thompson (2013), and Aloe (2014) describe the use of partial and semi-partial correlation coefficients as a method for meta-analyzing the results from regression models (when the focus is on a common regression coefficient of interest across studies). To compute these measures, one needs to specify \code{ti} for the test statistics (i.e., t-tests) of the regression coefficient of interest, \code{ni} for the sample sizes of the studies, \code{mi} for the number of predictors in the regression models, and \code{r2i} for the \mjseqn{R^2} value of the regression models (the latter is only needed when \code{measure="SPCOR"}). The options for the \code{measure} argument are then: \itemize{ \item \code{"PCOR"} for the \emph{partial correlation coefficient}, \item \code{"ZPCOR"} for \emph{Fisher's r-to-z transformed partial correlation coefficient}, \item \code{"SPCOR"} for the \emph{semi-partial correlation coefficient}. } Note that the sign of the (semi-)partial correlation coefficients is determined based on the signs of the values specified via the \code{ti} argument. Also, the Fisher transformation can only be applied to partial correlation coefficient, not semi-partial coefficients. } } \subsection{Converting a Data Frame to an 'escalc' Object}{ The function can also be used to convert a regular data frame to an \sQuote{escalc} object. One simply sets the \code{measure} argument to one of the options described above (or to \code{measure="GEN"} for a generic outcome measure not further specified) and passes the observed effect sizes or outcomes via the \code{yi} argument and the corresponding sampling variances via the \code{vi} argument (or the standard errors via the \code{sei} argument). } } \value{ An object of class \code{c("escalc","data.frame")}. The object is a data frame containing the following components: \item{yi}{observed effect sizes or outcomes.} \item{vi}{corresponding sampling variances.} If \code{append=TRUE} and a data frame was specified via the \code{data} argument, then \code{yi} and \code{vi} are appended to this data frame. Note that the \code{var.names} argument actually specifies the names of these two variables (\code{yi} and \code{vi} are the defaults). If the data frame already contains two variables with names as specified by the \code{var.names} argument, the values for these two variables will be overwritten when \code{replace=TRUE} (which is the default). By setting \code{replace=FALSE}, only values that are \code{NA} will be replaced. The \code{subset} argument can be used to select the studies that will be included in the data frame returned by the function. On the other hand, the \code{include} argument simply selects for which studies the measure will be computed (if it shouldn't be computed for all of them). The object is formatted and printed with the \code{\link{print.escalc}} function. The \code{\link{summary.escalc}} function can be used to obtain confidence intervals for the individual outcomes. With the \code{\link{aggregate.escalc}} function, one can aggregate multiple effect sizes or outcomes belonging to the same study (or some other clustering variable) into a single combined effect size or outcome. } \note{ The variable names specified under \code{var.names} should be syntactically valid variable names. If necessary, they are adjusted so that they are. Although the default value for \code{add} is \code{1/2}, for certain measures the use of such a bias correction makes little sense and for these measures, the function internally sets \code{add=0}. This applies to the following measures: \code{"AS"}, \code{"PHI"}, \code{"RTET"}, \code{"IRSD"}, \code{"PAS"}, \code{"PFT"}, \code{"IRS"}, and \code{"IRFT"}. One can still force the use of the bias correction by explicitly setting the \code{add} argument to some non-zero value. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Aloe, A. M. (2014). An empirical investigation of partial effect sizes in meta-analysis of correlational data. \emph{Journal of General Psychology}, \bold{141}(1), 47--64. \verb{https://doi.org/10.1080/00221309.2013.853021} Aloe, A. M., & Becker, B. J. (2012). An effect size for regression predictors in meta-analysis. \emph{Journal of Educational and Behavioral Statistics}, \bold{37}(2), 278--297. \verb{https://doi.org/10.3102/1076998610396901} Aloe, A. M., & Thompson, C. G. (2013). The synthesis of partial effect sizes. \emph{Journal of the Society for Social Work and Research}, \bold{4}(4), 390--405. \verb{https://doi.org/10.5243/jsswr.2013.24} Bagos, P. G., & Nikolopoulos, G. K. (2009). Mixed-effects Poisson regression models for meta-analysis of follow-up studies with constant or varying durations. \emph{The International Journal of Biostatistics}, \bold{5}(1). \verb{https://doi.org/10.2202/1557-4679.1168} Becker, B. J. (1988). Synthesizing standardized mean-change measures. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{41}(2), 257--278. \verb{https://doi.org/10.1111/j.2044-8317.1988.tb00901.x} Becker, M. P., & Balagtas, C. C. (1993). Marginal modeling of binary cross-over data. \emph{Biometrics}, \bold{49}(4), 997--1009. \verb{https://doi.org/10.2307/2532242} Bonett, D. G. (2002). Sample size requirements for testing and estimating coefficient alpha. \emph{Journal of Educational and Behavioral Statistics}, \bold{27}(4), 335--340. \verb{https://doi.org/10.3102/10769986027004335} Bonett, D. G. (2008). Confidence intervals for standardized linear contrasts of means. \emph{Psychological Methods}, \bold{13}(2), 99--109. \verb{https://doi.org/10.1037/1082-989X.13.2.99} Bonett, D. G. (2009). Meta-analytic interval estimation for standardized and unstandardized mean differences. \emph{Psychological Methods}, \bold{14}(3), 225--238. \verb{https://doi.org/10.1037/a0016619} Bonett, D. G. (2010). Varying coefficient meta-analytic methods for alpha reliability. \emph{Psychological Methods}, \bold{15}(4), 368--385. \verb{https://doi.org/10.1037/a0020142} Borenstein, M. (2009). Effect sizes for continuous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 221--235). New York: Russell Sage Foundation. Chinn, S. (2000). A simple method for converting an odds ratio to effect size for use in meta-analysis. \emph{Statistics in Medicine}, \bold{19}(22), 3127--3131. \verb{https://doi.org/10.1002/1097-0258(20001130)19:22<3127::aid-sim784>3.0.co;2-m} Cohen, J. (1988). \emph{Statistical power analysis for the behavioral sciences} (2nd ed.). Hillsdale, NJ: Lawrence Erlbaum Associates. Cox, D. R., & Snell, E. J. (1989). \emph{Analysis of binary data} (2nd ed.). London: Chapman & Hall. Curtin, F., Elbourne, D., & Altman, D. G. (2002). Meta-analysis combining parallel and cross-over clinical trials. II: Binary outcomes. \emph{Statistics in Medicine}, \bold{21}(15), 2145--2159. \verb{https://doi.org/10.1002/sim.1206} Elbourne, D. R., Altman, D. G., Higgins, J. P. T., Curtin, F., Worthington, H. V., & Vail, A. (2002). Meta-analyses involving cross-over trials: Methodological issues. \emph{International Journal of Epidemiology}, \bold{31}(1), 140--149. \verb{https://doi.org/10.1093/ije/31.1.140} Fagerland, M. W., Lydersen, S., & Laake, P. (2014). Recommended tests and confidence intervals for paired binomial proportions. \emph{Statistics in Medicine}, \bold{33}(16), 2850--2875. \verb{https://doi.org/10.1002/sim.6148} Fisher, R. A. (1921). On the \dQuote{probable error} of a coefficient of correlation deduced from a small sample. \emph{Metron}, \bold{1}, 1--32. \verb{http://hdl.handle.net/2440/15169} Fleiss, J. L., & Berlin, J. (2009). Effect sizes for dichotomous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 237--253). New York: Russell Sage Foundation. Freeman, M. F., & Tukey, J. W. (1950). Transformations related to the angular and the square root. \emph{Annals of Mathematical Statistics}, \bold{21}(4), 607--611. \verb{https://doi.org/10.1214/aoms/1177729756} Gibbons, R. D., Hedeker, D. R., & Davis, J. M. (1993). Estimation of effect size from a series of experiments involving paired comparisons. \emph{Journal of Educational Statistics}, \bold{18}(3), 271--279. \verb{https://doi.org/10.3102/10769986018003271} Hakstian, A. R., & Whalen, T. E. (1976). A k-sample significance test for independent alpha coefficients. \emph{Psychometrika}, \bold{41}(2), 219--231. \verb{https://doi.org/10.1007/BF02291840} Hasselblad, V., & Hedges, L. V. (1995). Meta-analysis of screening and diagnostic tests. Psychological Bulletin, 117(1), 167-178. \verb{https://doi.org/10.1037/0033-2909.117.1.167} Hedges, L. V. (1981). Distribution theory for Glass's estimator of effect size and related estimators. \emph{Journal of Educational Statistics}, \bold{6}(2), 107--128. \verb{https://doi.org/10.3102/10769986006002107} Hedges, L. V. (1982). Estimation of effect size from a series of independent experiments. \emph{Psychological Bulletin}, \bold{92}(2), 490--499. \verb{https://doi.org/10.1037/0033-2909.92.2.490} Hedges, L. V. (1983). A random effects model for effect sizes. \emph{Psychological Bulletin}, \bold{93}(2), 388--395. \verb{https://doi.org/10.1037/0033-2909.93.2.388} Hedges, L. V. (1989). An unbiased correction for sampling error in validity generalization studies. \emph{Journal of Applied Psychology}, \bold{74}(3), 469--477. \verb{https://doi.org/10.1037/0021-9010.74.3.469} Hedges, L. V., Gurevitch, J., & Curtis, P. S. (1999). The meta-analysis of response ratios in experimental ecology. \emph{Ecology}, \bold{80}(4), 1150--1156. \verb{https://doi.org/10.1890/0012-9658(1999)080[1150:TMAORR]2.0.CO;2} Higgins, J. P. T., Thomas, J., Chandler, J., Cumpston, M., Li, T., Page, M. J., & Welch, V. A. (Eds.) (2019). \emph{Cochrane handbook for systematic reviews of interventions} (2nd ed.). Chichester, UK: Wiley. \verb{https://training.cochrane.org/handbook} Jacobs, P., & Viechtbauer, W. (2017). Estimation of the biserial correlation and its sampling variance for use in meta-analysis. \emph{Research Synthesis Methods}, \bold{8}(2), 161--180. \verb{https://doi.org/10.1002/jrsm.1218} Kirk, D. B. (1973). On the numerical approximation of the bivariate normal (tetrachoric) correlation coefficient. \emph{Psychometrika}, \bold{38}(2), 259--268. \verb{https://doi.org/10.1007/BF02291118} Lajeunesse, M. J. (2011). On the meta-analysis of response ratios for studies with correlated and multi-group designs. \emph{Ecology}, \bold{92}(11), 2049--2055. \verb{https://doi.org/10.1890/11-0423.1} May, W. L., & Johnson, W. D. (1997). Confidence intervals for differences in correlated binary proportions. \emph{Statistics in Medicine}, \bold{16}(18), 2127--2136. \verb{https://doi.org/10.1002/(SICI)1097-0258(19970930)16:18<2127::AID-SIM633>3.0.CO;2-W} Morris, S. B. (2000). Distribution of the standardized mean change effect size for meta-analysis on repeated measures. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{53}(1), 17--29. \verb{https://doi.org/10.1348/000711000159150} Morris, S. B., & DeShon, R. P. (2002). Combining effect size estimates in meta-analysis with repeated measures and independent-groups designs. \emph{Psychological Methods}, \bold{7}(1), 105--125. \verb{https://doi.org/10.1037/1082-989x.7.1.105} Nakagawa, S., Poulin, R., Mengersen, K., Reinhold, K., Engqvist, L., Lagisz, M., & Senior, A. M. (2015). Meta-analysis of variation: Ecological and evolutionary applications and beyond. \emph{Methods in Ecology and Evolution}, \bold{6}(2), 143--152. \verb{https://doi.org/10.1111/2041-210x.12309} Newcombe, R. G. (1998). Improved confidence intervals for the difference between binomial proportions based on paired data. \emph{Statistics in Medicine}, \bold{17}(22), 2635--2650. \verb{https://doi.org/10.1002/(SICI)1097-0258(19981130)17:22<2635::AID-SIM954>3.0.CO;2-C} Olkin, I., & Pratt, J. W. (1958). Unbiased estimation of certain correlation coefficients. \emph{Annals of Mathematical Statistics}, \bold{29}(1), 201--211. \verb{https://doi.org/10.1214/aoms/1177706717} Pearson, K. (1900). Mathematical contributions to the theory of evolution. VII. On the correlation of characters not quantitatively measurable. \emph{Philosophical Transactions of the Royal Society of London, Series A}, \bold{195}, 1--47. \verb{https://doi.org/10.1098/rsta.1900.0022} Pearson, K. (1909). On a new method of determining correlation between a measured character A, and a character B, of which only the percentage of cases wherein B exceeds (or falls short of) a given intensity is recorded for each grade of A. \emph{Biometrika}, \bold{7}(1/2), 96--105. \verb{https://doi.org/10.1093/biomet/7.1-2.96} Raudenbush, S. W., & Bryk, A. S. (1987). Examining correlates of diversity. \emph{Journal of Educational Statistics}, \bold{12}(3), 241--269. \verb{https://doi.org/10.3102/10769986012003241} Rothman, K. J., Greenland, S., & Lash, T. L. (2008). \emph{Modern epidemiology} (3rd ed.). Philadelphia: Lippincott Williams & Wilkins. \enc{Rücker}{Ruecker}, G., Schwarzer, G., Carpenter, J., & Olkin, I. (2009). Why add anything to nothing? The arcsine difference as a measure of treatment effect in meta-analysis with zero cells. \emph{Statistics in Medicine}, \bold{28}(5), 721--738. \verb{https://doi.org/10.1002/sim.3511} \enc{Sánchez-Meca}{Sanchez-Meca}, J., \enc{Marín-Martínez}{Marin-Martinez}, F., & \enc{Chacón-Moscoso}{Chacon-Moscoso}, S. (2003). Effect-size indices for dichotomized outcomes in meta-analysis. \emph{Psychological Methods}, \bold{8}(4), 448--467. \verb{https://doi.org/10.1037/1082-989X.8.4.448} Soper, H. E. (1914). On the probable error of the bi-serial expression for the correlation coefficient. \emph{Biometrika}, \bold{10}(2/3), 384--390. \verb{https://doi.org/10.1093/biomet/10.2-3.384} Stedman, M. R., Curtin, F., Elbourne, D. R., Kesselheim, A. S., & Brookhart, M. A. (2011). Meta-analyses involving cross-over trials: Methodological issues. \emph{International Journal of Epidemiology}, \bold{40}(6), 1732--1734. \verb{https://doi.org/10.1093/ije/dyp345} Tate, R. F. (1954). Correlation between a discrete and a continuous variable: Point-biserial correlation. \emph{Annals of Mathematical Statistics}, \bold{25}(3), 603--607. \verb{https://doi.org/10.1214/aoms/1177728730} Vacha-Haase, T. (1998). Reliability generalization: Exploring variance in measurement error affecting score reliability across studies. \emph{Educational and Psychological Measurement}, \bold{58}(1), 6--20. \verb{https://doi.org/10.1177/0013164498058001002} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Yule, G. U. (1912). On the methods of measuring association between two attributes. \emph{Journal of the Royal Statistical Society}, \bold{75}(6), 579--652. \verb{https://doi.org/10.2307/2340126} Yusuf, S., Peto, R., Lewis, J., Collins, R., & Sleight, P. (1985). Beta blockade during and after myocardial infarction: An overview of the randomized trials. \emph{Progress in Cardiovascular Disease}, \bold{27}(5), 335--371. \verb{https://doi.org/10.1016/s0033-0620(85)80003-7} Zou, G. Y. (2007). One relative risk versus two odds ratios: Implications for meta-analyses involving paired and unpaired binary data. \emph{Clinical Trials}, \bold{4}(1), 25--31. \verb{https://doi.org/10.1177/1740774506075667} } \seealso{ \code{\link{print.escalc}}, \code{\link{summary.escalc}}, \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat ### suppose that for a particular study, yi and vi are known (i.e., have ### already been calculated) but the 2x2 table counts are not known; with ### replace=FALSE, the yi and vi values for that study are not replaced dat[1:12,10:11] <- NA dat[13,4:7] <- NA dat dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, replace=FALSE) dat ### illustrate difference between 'subset' and 'include' arguments escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, include=1:6) ### convert a regular data frame to an 'escalc' object ### dataset from Lipsey & Wilson (2001), Table 7.1, page 130 dat <- data.frame(id = c(100, 308, 1596, 2479, 9021, 9028, 161, 172, 537, 7049), yi = c(-0.33, 0.32, 0.39, 0.31, 0.17, 0.64, -0.33, 0.15, -0.02, 0.00), vi = c(0.084, 0.035, 0.017, 0.034, 0.072, 0.117, 0.102, 0.093, 0.012, 0.067), random = c(0, 0, 0, 0, 0, 0, 1, 1, 1, 1), intensity = c(7, 3, 7, 5, 7, 7, 4, 4, 5, 6)) dat <- escalc(measure="SMD", yi=yi, vi=vi, data=dat, slab=paste("Study ID:", id), digits=3) dat } \keyword{datagen} metafor/man/dat.anand1999.Rd0000644000176200001440000000607214055736403015151 0ustar liggesusers\name{dat.anand1999} \docType{data} \alias{dat.anand1999} \title{Studies on the Effectiveness of Oral Anticoagulants in Patients with Coronary Artery Disease} \description{Results from 34 trials examining the effectiveness of oral anticoagulants in patients with coronary artery disease.} \usage{ dat.anand1999 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab author(s) or trial name \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{intensity} \tab \code{character} \tab intensity of anticoagulation (low, medium, or high) \cr \bold{asp.t} \tab \code{numeric} \tab concomitant use of aspirin in the treatment group (0 = no, 1 = yes) \cr \bold{asp.c} \tab \code{numeric} \tab concomitant use of aspirin in the control group (0 = no, 1 = yes) \cr \bold{ai} \tab \code{numeric} \tab number of deaths in the treatment group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the treatment group \cr \bold{ci} \tab \code{numeric} \tab number of deaths in the control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the control group } } \details{ The dataset includes the results from 34 randomized clinical trials that examined the effectiveness of oral anticoagulants in patients with coronary artery disease. The results given here are focused on the total mortality in the treatment versus control groups. } \note{ Strictly speaking, there are only 31 trials, since Breddin et al. (1980) and ATACS (1990) are multiarm trials. According to a correction, \code{dat.anand1999$ci[29]} should be 1. But then \code{dat.anand1999$ci[21]} would also have to be 1 (if these data indeed refer to the same control group). This appears contradictory, so this correction was not made. } \source{ Anand, S. S., & Yusuf, S. (1999). Oral anticoagulant therapy in patients with coronary artery disease: A meta-analysis. \emph{Journal of the American Medical Association}, \bold{282}(21), 2058--2067. \verb{https://doi.org/10.1001/jama.282.21.2058} } \examples{ ### copy data into 'dat' and examine data dat <- dat.anand1999 dat ### High-Intensity OA vs Control rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(intensity=="high" & asp.t==0 & asp.c==0), digits=2) ### High- or Moderate-Intensity OA vs Aspirin rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(intensity \%in\% c("high","moderate") & asp.t==0 & asp.c==1), digits=2) ### Moderate-Intensity OA vs Control rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(intensity=="moderate" & asp.t==0 & asp.c==0), digits=2) ### High- or Moderate-Intensity OA and Aspirin vs Aspirin rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(intensity \%in\% c("high","moderate") & asp.t==1 & asp.c==1), digits=2) ### Low-Intensity OA and Aspirin vs Aspirin rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(intensity=="low" & asp.t==1 & asp.c==1), digits=2) } \keyword{datasets} metafor/man/dat.hasselblad1998.Rd0000644000176200001440000001467414055736403016200 0ustar liggesusers\name{dat.hasselblad1998} \docType{data} \alias{dat.hasselblad1998} \title{Studies on the Effectiveness of Counseling for Smoking Cessation} \description{Results from 24 studies on the effectiveness of various counseling types for smoking cessation.} \usage{dat.hasselblad1998} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab id number for each treatment arm \cr \bold{study} \tab \code{numeric} \tab study id number \cr \bold{authors} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{trt} \tab \code{character} \tab intervention group \cr \bold{xi} \tab \code{numeric} \tab number of individuals abstinent \cr \bold{ni} \tab \code{numeric} \tab number of individuals in group } } \details{ The dataset includes the results from 24 studies on the effectiveness of various counseling types for smoking cessation (i.e., self-help, individual counseling, group counseling, and no contact). The dataset indicates the total number of individuals within each study arm and the number that were abstinent from 6 to 12 months. The majority of the studies compared two interventions types against each other, while 2 studies compared three types against each other simultaneously. The data can be used for a \sQuote{network meta-analysis} (also called a \sQuote{mixed treatment comparison}). The code below shows how such an analysis can be conducted using an arm-based and a contrast-based model (see Salanti et al., 2008, for more details). } \source{ Hasselblad, V. (1998). Meta-analysis of multitreatment studies. \emph{Medical Decision Making}, \bold{18}(1), 37--43. \verb{https://doi.org/10.1177/0272989X9801800110} } \references{ Gleser, L. J., & Olkin, I. (2009). Stochastically dependent effect sizes. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 357--376). New York: Russell Sage Foundation. Law, M., Jackson, D., Turner, R., Rhodes, K., & Viechtbauer, W. (2016). Two new methods to fit models for network meta-analysis with random inconsistency effects. \emph{BMC Medical Research Methodology}, \bold{16}, 87. \verb{https://doi.org/10.1186/s12874-016-0184-5} Salanti, G., Higgins, J. P. T., Ades, A. E., & Ioannidis, J. P. A. (2008). Evaluation of networks of randomized trials. \emph{Statistical Methods in Medical Research}, \bold{17}(3), 279--301. \verb{https://doi.org/10.1177/0962280207080643} } \examples{ ### copy data into 'dat' dat <- dat.hasselblad1998 dat ### create network graph ('igraph' package must be installed) \dontrun{ require(igraph) pairs <- data.frame(do.call(rbind, sapply(split(dat$trt, dat$study), function(x) t(combn(x,2)))), stringsAsFactors=FALSE) lvls <- c("no_contact", "self_help", "ind_counseling", "grp_counseling") pairs$X1 <- factor(pairs$X1, levels=lvls) pairs$X2 <- factor(pairs$X2, levels=lvls) tab <- table(pairs[,1], pairs[,2]) tab # adjacency matrix g <- graph_from_adjacency_matrix(tab, mode = "plus", weighted=TRUE, diag=FALSE) vertex_attr(g, "name") <- c("No Contact", "Self-Help", "Individual\nCounseling", "Group\nCounseling") plot(g, edge.curved=FALSE, edge.width=E(g)$weight, layout=layout_on_grid, vertex.size=45, vertex.color="lightgray", vertex.label.color="black", vertex.label.font=2)} ### calculate log odds for each study arm dat <- escalc(measure="PLO", xi=xi, ni=ni, add=1/2, to="all", data=dat) dat ### convert trt variable to factor with desired ordering of levels dat$trt <- factor(dat$trt, levels=c("no_contact", "self_help", "ind_counseling", "grp_counseling")) ### add a space before each level (this makes the output a bit more legible) levels(dat$trt) <- paste0(" ", levels(dat$trt)) ### network meta-analysis using an arm-based model with fixed study effects ### by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons res <- rma.mv(yi, vi, mods = ~ factor(study) + trt - 1, random = ~ trt | study, rho=1/2, data=dat, btt="trt") res ### all pairwise odds ratios of interventions versus no contact predict(res, newmods=cbind(matrix(0, nrow=3, ncol=24), diag(3)), intercept=FALSE, transf=exp, digits=2) ### all pairwise odds ratios comparing interventions (ic vs sh, gc vs sh, and gc vs ic) predict(res, newmods=cbind(matrix(0, nrow=3, ncol=24), rbind(c(-1,1,0), c(-1,0,1), c(0,-1,1))), intercept=FALSE, transf=exp, digits=2) ### forest plot of ORs of interventions versus no contact dev.new(width=7, height=4) par(mar=c(5,4,1,2)) forest(c(0,res$beta[25:27]), sei=c(0,res$se[25:27]), psize=1, xlim=c(-3,4), digits=c(2,1), efac=2, slab=c("No Contact", "Self-Help", "Individual Counseling", "Group Counseling"), atransf=exp, at=log(c(.5, 1, 2, 4, 8)), xlab="Odds Ratio for Intervention vs. No Contact", header=c("Intervention", "Odds Ratio [95\% CI]")) ############################################################################ ### restructure dataset to a contrast-based format dat <- to.wide(dat.hasselblad1998, study="study", grp="trt", ref="no_contact", grpvars=6:7) ### calculate log odds ratios for each treatment comparison dat <- escalc(measure="OR", ai=xi.1, n1i=ni.1, ci=xi.2, n2i=ni.2, add=1/2, to="all", data=dat) dat ### calculate the variance-covariance matrix of the log odds ratios for multitreatment studies ### see Gleser & Olkin (2009), equation (19.11), for the covariance equation calc.v <- function(x) { v <- matrix(1/(x$xi.2[1]+1/2) + 1/(x$ni.2[1] - x$xi.2[1] + 1/2), nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) ### add contrast matrix to dataset dat <- contrmat(dat, grp1="trt.1", grp2="trt.2") dat ### network meta-analysis using a contrast-based random-effects model ### by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons res <- rma.mv(yi, V, mods = ~ self_help + ind_counseling + grp_counseling - 1, random = ~ comp | study, rho=1/2, data=dat) res ### predicted odds ratios of interventions versus no contact predict(res, newmods=diag(3), transf=exp, digits=2) ### fit random inconsistency effects model (see Law et al., 2016) res <- rma.mv(yi, V, mods = ~ self_help + ind_counseling + grp_counseling - 1, random = list(~ comp | study, ~ comp | design), rho=1/2, phi=1/2, data=dat) res } \keyword{datasets} metafor/man/coef.permutest.rma.uni.Rd0000644000176200001440000000357114055736403017301 0ustar liggesusers\name{coef.permutest.rma.uni} \alias{coef.permutest.rma.uni} \title{Extract the Model Coefficient Table from 'permutest.rma.uni' Objects} \description{ The function extracts the estimated model coefficients, corresponding standard errors, test statistics, p-values (based on the permutation tests), and confidence interval bounds from objects of class \code{"permutest.rma.uni"}. } \usage{ \method{coef}{permutest.rma.uni}(object, \dots) } \arguments{ \item{object}{an object of class \code{"permutest.rma.uni"}.} \item{\dots}{other arguments.} } \value{ A data frame with the following elements: \item{estimate}{estimated model coefficient(s).} \item{se}{corresponding standard error(s).} \item{zval}{corresponding test statistic(s).} \item{pval}{p-value(s) based on the permutation test(s).} \item{ci.lb}{lower bound of the (permutation-based) confidence interval(s).} \item{ci.ub}{upper bound of the (permutation-based) confidence interval(s).} When the model was fitted with \code{test="t"} or \code{test="knha"}, then \code{zval} is called \code{tval} in the data frame that is returned by the function. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{permutest.rma.uni}}, \code{\link{rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### carry out permutation test \dontrun{ sav <- permutest(res) coef(sav)} } \keyword{models} metafor/man/rma.glmm.Rd0000644000176200001440000006356414055736403014510 0ustar liggesusers\name{rma.glmm} \alias{rma.glmm} \title{Meta-Analysis via Generalized Linear (Mixed-Effects) Models} \description{ Function to fit meta-analytic fixed- and random/mixed-effects models with or without moderators via generalized linear (mixed-effects) models. See below and the documentation of the \pkg{\link{metafor-package}} for more details on these models. \loadmathjax } \usage{ rma.glmm(ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, xi, mi, ti, ni, mods, measure, intercept=TRUE, data, slab, subset, add=1/2, to="only0", drop00=TRUE, vtype="LS", model="UM.FS", method="ML", test="z", level=95, digits, btt, nAGQ=7, verbose=FALSE, control, \dots) } \arguments{ \item{ai}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{bi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ci}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{di}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{n1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{n2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{x1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{x2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{t1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{t2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{xi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{mi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ti}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ni}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{mods}{optional argument to include one or more moderators in the model. A single moderator can be given as a vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving a matrix with \mjseqn{k} rows and as many columns as there are moderator variables. Alternatively, a model \code{\link{formula}} can be used to specify the model. See \sQuote{Details}.} \item{measure}{character string to specify the outcome measure to use for the meta-analysis. Possible options are the odds ratio (\code{"OR"}), the incidence rate ratio (\code{"IRR"}), the logit transformed proportion (\code{"PLO"}), or the log transformed incidence rate (\code{"IRLN"}).} \item{intercept}{logical to specify whether an intercept should be added to the model (the default is \code{TRUE}).} \item{data}{optional data frame containing the data supplied to the function.} \item{slab}{optional vector with labels for the \mjseqn{k} studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be used for the analysis.} \item{add}{non-negative number to specify the amount to add to zero cells, counts, or frequencies when calculating the observed effect sizes or outcomes of the individual studies. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{to}{character string to specify when the values under \code{add} should be added (either \code{"only0"}, \code{"all"}, \code{"if0all"}, or \code{"none"}). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{drop00}{logical to specify whether studies with no cases/events (or only cases) in both groups should be dropped. See the documentation of the \code{\link{escalc}} function for more details.} \item{vtype}{character string to specify the type of sampling variances to calculate when calculating the observed effect sizes or outcomes. See the documentation of the \code{\link{escalc}} function for more details.} \item{model}{character string to specify the general model type to use for the analysis (either \code{"UM.FS"} (the default), \code{"UM.RS"}, \code{"CM.EL"}, or \code{"CM.AL"}). See \sQuote{Details}.} \item{method}{character string to specify whether a fixed- or a random/mixed-effects model should be fitted. A fixed-effects model (with or without moderators) is fitted when using \code{method="FE"}. Random/mixed-effects models are fitted by setting \code{method="ML"} (the default). See \sQuote{Details}.} \item{test}{character string to specify how test statistics and confidence intervals for the fixed effects should be computed. By default (\code{test="z"}), Wald-type tests and CIs are obtained, which are based on a standard normal distribution. When \code{test="t"}, a t-distribution is used instead. See \sQuote{Details}.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{btt}{optional vector of indices to specify which coefficients to include in the omnibus test of moderators. Can also be a string to grep for. See \sQuote{Details}.} \item{nAGQ}{positive integer to specify the number of points per axis for evaluating the adaptive Gauss-Hermite approximation to the log-likelihood. The default is 7. Setting this to 1 corresponds to the Laplacian approximation. See \sQuote{Note}.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}). Can also be an integer. Values > 1 generate more verbose output. See \sQuote{Note}.} \item{control}{optional list of control values for the estimation algorithms. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{additional arguments.} } \details{ \bold{Specifying the Data} The function can be used in conjunction with the following effect size or outcome measures: \itemize{ \item \code{measure="OR"} for odds ratios (analyzed in log units) \item \code{measure="IRR"} for incidence rate ratios (analyzed in log units) \item \code{measure="PLO"} for logit transformed proportions (i.e., log odds) \item \code{measure="IRLN"} for log transformed incidence rates. } The \code{\link{escalc}} function describes the data/arguments that should be specified/used for these measures. \bold{Specifying the Model} A variety of model types are available when analyzing \mjeqn{2 \times 2}{2x2} table data (i.e., when \code{measure="OR"}) or two-group event count data (i.e., when \code{measure="IRR"}): \itemize{ \item \code{model="UM.FS"} for an unconditional generalized linear mixed-effects model with fixed study effects \item \code{model="UM.RS"} for an unconditional generalized linear mixed-effects model with random study effects \item \code{model="CM.AL"} for a conditional generalized linear mixed-effects model (approximate likelihood) \item \code{model="CM.EL"} for a conditional generalized linear mixed-effects model (exact likelihood). } For \code{measure="OR"}, models \code{"UM.FS"} and \code{"UM.RS"} are essentially (mixed-effects) logistic regression models, while for \code{measure="IRR"}, these models are (mixed-effects) Poisson regression models. A choice must be made on how to model study level variability (i.e., differences in outcomes across studies irrespective of group membership). One can choose between using fixed study effects (which means that \mjseqn{k} dummy variables are added to the model) or random study effects (which means that random effects corresponding to the levels of the study factor are added to the model). The conditional model (\code{model="CM.EL"}) avoids having to model study level variability by conditioning on the total numbers of cases/events in each study. For \code{measure="OR"}, this leads to a non-central hypergeometric distribution for the data within each study and the corresponding model is then a (mixed-effects) conditional logistic model. Fitting this model can be difficult and computationally expensive. When the number of cases in each study is small relative to the group sizes, one can approximate the exact likelihood by a binomial distribution, which leads to a regular (mixed-effects) logistic regression model (\code{model="CM.AL"}). For \code{measure="IRR"}, the conditional model leads directly to a binomial distribution for the data within each study and the resulting model is again a (mixed-effects) logistic regression model (no approximate likelihood model is needed here). When analyzing proportions (i.e., \code{measure="PLO"}) or incidence rates (i.e., \code{measure="IRLN"}) of individual groups, the model type is always a (mixed-effects) logistic or Poisson regression model, respectively (i.e., the \code{model} argument is not relevant here). Aside from choosing the general model type, one has to decide whether to fit a fixed- or random-effects model to the data. A \emph{fixed-effects model} is fitted by setting \code{method="FE"}. A \emph{random-effects model} is fitted by setting \code{method="ML"} (the default). Note that random-effects models with dichotomous data are often referred to as \sQuote{binomial-normal} models in the meta-analytic literature. Analogously, for event count data, such models could be referred to as \sQuote{Poisson-normal} models. One or more moderators can be included in all of these models via the \code{mods} argument. A single moderator can be given as a (row or column) vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving an appropriate model matrix (i.e., \mjseqn{X}) with \mjseqn{k} rows and as many columns as there are moderator variables (e.g., \code{mods = cbind(mod1, mod2, mod3)}, where \code{mod1}, \code{mod2}, and \code{mod3} correspond to the names of the variables for three moderator variables). The intercept is added to the model matrix by default unless \code{intercept=FALSE}. Alternatively, one can use standard \code{\link{formula}} syntax to specify the model. In this case, the \code{mods} argument should be set equal to a one-sided formula of the form \code{mods = ~ model} (e.g., \code{mods = ~ mod1 + mod2 + mod3}). Interactions, polynomial terms, and factors can be easily added to the model in this manner. When specifying a model formula via the \code{mods} argument, the \code{intercept} argument is ignored. Instead, the inclusion/exclusion of the intercept is controlled by the specified formula (e.g., \code{mods = ~ mod1 + mod2 + mod3 - 1} would lead to the removal of the intercept). \bold{Fixed-, Saturated-, and Random/Mixed-Effects Models} When fitting a particular model, actually up to three different models are fitted within the function: \itemize{ \item the fixed-effects model (i.e., where \mjseqn{\tau^2} is set to 0), \item the saturated model (i.e., the model with a deviance of 0), and \item the random/mixed-effects model (i.e., where \mjseqn{\tau^2} is estimated) (only if \code{method="ML"}). } The saturated model is obtained by adding as many dummy variables to the model as needed so that the model deviance is equal to zero. Even when \code{method="ML"}, the fixed-effects and saturated models are fitted, as they are used to compute the test statistics for the Wald-type and likelihood ratio tests for (residual) heterogeneity (see below). \bold{Omnibus Test of Moderators} For models including moderators, an omnibus test of all model coefficients is conducted that excludes the intercept (the first coefficient) if it is included in the model. If no intercept is included in the model, then the omnibus test includes all of the coefficients in the model including the first. Alternatively, one can manually specify the indices of the coefficients to test via the \code{btt} argument. For example, with \code{btt=c(3,4)}, only the third and fourth coefficient from the model would be included in the test (if an intercept is included in the model, then it corresponds to the first coefficient in the model). Instead of specifying the coefficient numbers, one can specify a string for \code{btt}. In that case, \code{\link{grep}} will be used to search for all coefficient names that match the string. The omnibus test is called the \mjseqn{Q_M}-test and follows, under the assumptions of the model, a chi-square distribution with \mjseqn{m} degrees of freedom (with \mjseqn{m} denoting the number of coefficients tested) under the null hypothesis (that the true value of all coefficients tested is equal to 0). \bold{Categorical Moderators} Categorical moderator variables can be included in the model via the \code{mods} argument in the same way that appropriately (dummy) coded categorical independent variables can be included in linear models. One can either do the dummy coding manually or use a model formula together with the \code{\link{factor}} function to let \R handle the coding automatically (note that string/character variables in a model formula are automatically converted to factors). \bold{Tests and Confidence Intervals} By default, tests of individual coefficients in the model (and the corresponding confidence intervals) are based on a standard normal distribution, while the omnibus test is based on a chi-square distribution (see above). As an alternative, one can set \code{test="t"}, in which case tests of individual coefficients and confidence intervals are based on a t-distribution with \mjseqn{k-p} degrees of freedom, while the omnibus test statistic then uses an F-distribution with \mjseqn{m} and \mjseqn{k-p} degrees of freedom (with \mjseqn{k} denoting the total number of estimates included in the analysis and \mjseqn{p} the total number of model coefficients including the intercept if it is present). Note that \code{test="t"} is not the same as \code{test="knha"} in \code{\link{rma.uni}}, as no adjustment to the standard errors of the estimated coefficients is made. \bold{Tests for (Residual) Heterogeneity} Two different tests for (residual) heterogeneity are automatically carried out by the function. The first is a Wald-type test, which tests the coefficients corresponding to the dummy variables added in the saturated model for significance. The second is a likelihood ratio test, which tests the same set of coefficients, but does so by computing \mjseqn{-2} times the difference in the log-likelihood of the fixed-effects and the saturated model. These two tests are not identical for the types of models fitted by the \code{rma.glmm} function and may even lead to conflicting conclusions. \bold{Observed Effect Sizes or Outcomes of the Individual Studies} The various models do not require the calculation of the observed effect sizes or outcomes of the individual studies (e.g., the observed odds ratios of the \mjseqn{k} studies) and directly make use of the table/event counts. Zero cells/events are not a problem (except in extreme cases, such as when one of the two outcomes never occurs or when there are no events in any of the studies). Therefore, it is unnecessary to add some constant to the cell/event counts when there are zero cells/events. However, for plotting and various other functions, it is necessary to calculate the observed effect sizes or outcomes for the \mjseqn{k} studies. Here, zero cells/events can be problematic, so adding a constant value to the cell/event counts ensures that all \mjseqn{k} values can be calculated. The \code{add} and \code{to} arguments are used to specify what value should be added to the cell/event counts and under what circumstances when calculating the observed effect sizes or outcomes. The documentation of the \code{\link{escalc}} function explains how the \code{add} and \code{to} arguments work. Note that \code{drop00} is set to \code{TRUE} by default, since studies where \code{ai=ci=0} or \code{bi=di=0} or studies where \code{x1i=x2i=0} are uninformative about the size of the effect. } \value{ An object of class \code{c("rma.glmm","rma")}. The object is a list containing the following components: \item{beta}{estimated coefficients of the model.} \item{se}{standard errors of the coefficients.} \item{zval}{test statistics of the coefficients.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub}{upper bound of the confidence intervals for the coefficients.} \item{vb}{variance-covariance matrix of the estimated coefficients.} \item{tau2}{estimated amount of (residual) heterogeneity. Always \code{0} when \code{method="FE"}.} \item{sigma2}{estimated amount of study level variability (only for \code{model="UM.RS"}).} \item{k}{number of studies included in the analysis.} \item{p}{number of coefficients in the model (including the intercept).} \item{m}{number of coefficients included in the omnibus test of moderators.} \item{QE.Wld}{Wald-type test statistic of the test for (residual) heterogeneity.} \item{QEp.Wld}{corresponding p-value.} \item{QE.LRT}{likelihood ratio test statistic of the test for (residual) heterogeneity.} \item{QEp.LRT}{corresponding p-value.} \item{QM}{test statistic of the omnibus test of moderators.} \item{QMp}{corresponding p-value.} \item{I2}{value of \mjseqn{I^2}.} \item{H2}{value of \mjseqn{H^2}.} \item{int.only}{logical that indicates whether the model is an intercept-only model.} \item{yi, vi, X}{the vector of outcomes, the corresponding sampling variances, and the model matrix.} \item{fit.stats}{a list with the log-likelihood, deviance, AIC, BIC, and AICc values.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.glmm}} function. If fit statistics should also be given, use \code{\link{summary.rma}} (or use the \code{\link{fitstats.rma}} function to extract them). } \note{ Fitting the various types of models requires several different iterative algorithms: \itemize{ \item For \code{model="UM.FS"} and \code{model="CM.AL"}, iteratively reweighted least squares (IWLS) as implemented in the \code{\link{glm}} function is used for fitting the fixed-effects and the saturated models. For \code{method="ML"}, adaptive Gauss-Hermite quadrature as implemented in the \code{\link[lme4]{glmer}} function is used. The same applies when \code{model="CM.EL"} is used in combination with \code{measure="IRR"} or when \code{measure="PLO"} or \code{measure="IRLN"} (regardless of the model type). \item For \code{model="UM.RS"}, adaptive Gauss-Hermite quadrature as implemented in the \code{\link[lme4]{glmer}} function is used to fit all of the models. \item For \code{model="CM.EL"} and \code{measure="OR"}, the quasi-Newton method (\code{"BFGS"}) as implemented in the \code{\link{optim}} function is used by default for fitting the fixed-effects and the saturated models. For \code{method="ML"}, the same algorithm is used, together with adaptive quadrature as implemented in the \code{\link{integrate}} function (for the integration over the density of the non-central hypergeometric distribution). Standard errors of the parameter estimates are obtained by inverting the Hessian, which is numerically approximated using the \code{\link[numDeriv]{hessian}} function. } When \code{model="CM.EL"} and \code{measure="OR"}, actually \code{model="CM.AL"} is used first to obtain starting values for \code{\link{optim}}, so either 4 (if \code{method="FE"}) or 6 (if \code{method="ML"}) models need to be fitted in total. Various control parameters can be adjusted via the \code{control} argument: \itemize{ \item \code{optimizer} is set by default to \code{\link[stats:optim]{"optim"}}, but can be set to \code{\link[stats:nlminb]{"nlminb"}} or one of the optimizers from the \pkg{minqa} package (i.e., \code{\link[minqa:bobyqa]{"bobyqa"}}, \code{\link[minqa:newuoa]{"newuoa"}}, or \code{\link[minqa:uobyqa]{"uobyqa"}}), \item \code{optmethod} is used to set the \code{method} argument for \code{\link{optim}} (the default is \code{"BFGS"}), \item \code{optCtrl} is a list of named arguments to be passed on to the \code{control} argument of the chosen optimizer, \item \code{glmCtrl} is a list of named arguments to be passed on to the \code{control} argument of the \code{\link{glm}} function, \item \code{glmerCtrl} is a list of named arguments to be passed on to the \code{control} argument of the \code{\link[lme4]{glmer}} function, and \item \code{intCtrl} is a list of named arguments (i.e., \code{rel.tol} and \code{subdivisions}) to be passed on to the \code{\link{integrate}} function. \item \code{hessianCtrl} is a list of named arguments to be passed on to the \code{method.args} argument of the \code{\link[numDeriv]{hessian}} function. For some borderline cases, it may be necessary to bump up the \code{r} argument to a higher number to get sufficient accuracy when approximating the Hessian numerically (the default is \code{control=list(hessianCtrl=list(r=16))}). } Also, for \code{\link[lme4]{glmer}}, the \code{nAGQ} argument is used to specify the number of quadrature points. The default value is 7, which should provide sufficient accuracy in the evaluation of the log-likelihood in most cases, but at the expense of speed. Setting this to 1 corresponds to the Laplacian approximation (which is faster, but less accurate). Information on the progress of the various algorithms can be obtained by setting \code{verbose=TRUE}. Since fitting the various models can be computationally expensive, this option is useful to determine how the model fitting is progressing. One can also set \code{verbose} to an integer (\code{verbose=2} yields even more information and \code{verbose=3} also sets \code{option(warn=1)} temporarily). For \code{model="CM.EL"} and \code{measure="OR"}, optimization involves repeated calculation of the density of the non-central hypergeometric distribution. When \code{method="ML"}, this also requires integration over the same density. This is currently implemented in a rather brute-force manner and may not be numerically stable, especially when models with moderators are fitted. Stability can be improved by scaling the moderators in a similar manner (i.e., don't use a moderator that is coded 0 and 1, while another uses values in the 1000s). For models with an intercept and moderators, the function actually rescales (non-dummy) variables to z-scores during the model fitting (results are given after back-scaling, so this should be transparent to the user). For models without an intercept, this is not done, so sensitivity analyses are highly recommended here (to ensure that the results do not depend on the scaling of the moderators). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} Code for computing the density of the non-central hypergeometric distribution comes from the \href{https://cran.r-project.org/package=MCMCpack}{MCMCpack} package, which in turn is based on Liao and Rosen (2001). } \references{ Agresti, A. (2002). \emph{Categorical data analysis} (2nd. ed). Hoboken, NJ: Wiley. Bagos, P. G., & Nikolopoulos, G. K. (2009). Mixed-effects Poisson regression models for meta-analysis of follow-up studies with constant or varying durations. \emph{The International Journal of Biostatistics}, \bold{5}(1). \verb{https://doi.org/10.2202/1557-4679.1168} van Houwelingen, H. C., Zwinderman, K. H., & Stijnen, T. (1993). A bivariate approach to meta-analysis. \emph{Statistics in Medicine}, \bold{12}(24), 2273--2284. \verb{https://doi.org/10.1002/sim.4780122405} Liao, J. G., & Rosen, O. (2001). Fast and stable algorithms for computing and sampling from the noncentral hypergeometric distribution. \emph{American Statistician}, \bold{55}(4), 366--369. \verb{https://doi.org/10.1198/000313001753272547} Stijnen, T., Hamza, T. H., & Ozdemir, P. (2010). Random effects meta-analysis of event outcome in the framework of the generalized linear mixed model with applications in sparse data. \emph{Statistics in Medicine}, \bold{29}(29), 3046--3067. \verb{https://doi.org/10.1002/sim.4040} Turner, R. M., Omar, R. Z., Yang, M., Goldstein, H., & Thompson, S. G. (2000). A multilevel model framework for meta-analysis of clinical trials with binary outcomes. \emph{Statistics in Medicine}, \bold{19}(24), 3417--3432. \verb{https://doi.org/10.1002/1097-0258(20001230)19:24<3417::aid-sim614>3.0.co;2-l} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, and \code{\link{rma.mv}} for other model fitting functions. \code{\link{dat.nielweise2007}}, \code{\link{dat.nielweise2008}}, \code{\link{dat.collins1985a}}, and \code{\link{dat.pritz1997}} for further examples of the use of the \code{rma.glmm} function. } \examples{ ### random-effects model using rma.uni() (standard RE model analysis) rma(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, method="ML") ### random-effects models using rma.glmm() (require 'lme4' package) ### unconditional model with fixed study effects \dontrun{ rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, model="UM.FS")} ### unconditional model with random study effects \dontrun{ rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, model="UM.RS")} ### conditional model with approximate likelihood \dontrun{ rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, model="CM.AL")} ### conditional model with exact likelihood ### note: fitting this model may take a bit of time, so be patient \dontrun{ rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, model="CM.EL")} } \keyword{models} metafor/man/dat.colditz1994.Rd0000644000176200001440000000743014055736403015532 0ustar liggesusers\name{dat.colditz1994} \docType{data} \alias{dat.colditz1994} \alias{dat.bcg} \title{Studies on the Effectiveness of the BCG Vaccine Against Tuberculosis} \description{Results from 13 studies examining the effectiveness of the Bacillus Calmette-Guerin (BCG) vaccine against tuberculosis. \loadmathjax} \usage{ dat.colditz1994 dat.bcg } \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{numeric} \tab trial number \cr \bold{author} \tab \code{character} \tab author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{tpos} \tab \code{numeric} \tab number of TB positive cases in the treated (vaccinated) group \cr \bold{tneg} \tab \code{numeric} \tab number of TB negative cases in the treated (vaccinated) group \cr \bold{cpos} \tab \code{numeric} \tab number of TB positive cases in the control (non-vaccinated) group \cr \bold{cneg} \tab \code{numeric} \tab number of TB negative cases in the control (non-vaccinated) group \cr \bold{ablat} \tab \code{numeric} \tab absolute latitude of the study location (in degrees) \cr \bold{alloc} \tab \code{character} \tab method of treatment allocation (random, alternate, or systematic assignment) } } \details{ The 13 studies provide data in terms of \mjeqn{2 \times 2}{2x2} tables in the form: \tabular{lcc}{ \tab TB positive \tab TB negative \cr vaccinated group \tab \code{tpos} \tab \code{tneg} \cr control group \tab \code{cpos} \tab \code{cneg} } The goal of the meta-analysis was to examine the overall effectiveness of the BCG vaccine for preventing tuberculosis and to examine moderators that may potentially influence the size of the effect. The dataset has been used in several publications to illustrate meta-analytic methods (see \sQuote{References}). } \source{ Colditz, G. A., Brewer, T. F., Berkey, C. S., Wilson, M. E., Burdick, E., Fineberg, H. V., & Mosteller, F. (1994). Efficacy of BCG vaccine in the prevention of tuberculosis: Meta-analysis of the published literature. \emph{Journal of the American Medical Association}, \bold{271}(9), 698--702. \verb{https://doi.org/10.1001/jama.1994.03510330076038} } \references{ Berkey, C. S., Hoaglin, D. C., Mosteller, F., & Colditz, G. A. (1995). A random-effects regression model for meta-analysis. \emph{Statistics in Medicine}, \bold{14}(4), 395--411. \verb{https://doi.org/10.1002/sim.4780140406} van Houwelingen, H. C., Arends, L. R., & Stijnen, T. (2002). Advanced methods in meta-analysis: Multivariate approach and meta-regression. \emph{Statistics in Medicine}, \bold{21}(4), 589--624. \verb{https://doi.org/10.1002/sim.1040} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \examples{ ### copy data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) dat ### random-effects model res <- rma(yi, vi, data=dat) res ### average risk ratio with 95\% CI predict(res, transf=exp) ### mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) res ### predicted average risk ratios for 10-60 degrees absolute latitude ### holding the publication year constant at 1970 predict(res, newmods=cbind(seq(from=10, to=60, by=10), 1970), transf=exp) ### note: the interpretation of the results is difficult because absolute ### latitude and publication year are strongly correlated (the more recent ### studies were conducted closer to the equator) plot(dat$year, dat$ablat, pch=19, xlab="Publication Year", ylab="Absolute Lattitude") cor(dat$year, dat$ablat) } \keyword{datasets} metafor/man/vcov.rma.Rd0000644000176200001440000000531214055736403014514 0ustar liggesusers\name{vcov.rma} \alias{vcov} \alias{vcov.rma} \title{Extract Various Types of Variance-Covariance Matrices from 'rma' Objects} \description{ The function extracts various types of variance-covariance matrices from objects of class \code{"rma"}. By default, the variance-covariance matrix of the parameter estimates (fixed effects) is returned. \loadmathjax } \usage{ \method{vcov}{rma}(object, type="fixed", \dots) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{type}{character string to specify the type of variance-covariance matrix to return: \code{type="fixed"} returns the variance-covariance matrix of the fixed effects (the default), \code{type="obs"} returns the marginal variance-covariance matrix of the observed effect sizes or outcomes, \code{type="fitted"} returns the variance-covariance matrix of the fitted values, \code{type="resid"} returns the variance-covariance matrix of the residuals.} \item{\dots}{other arguments.} } \details{ Note that \code{type="obs"} currently only works for object of class \code{"rma.uni"} and \code{"rma.mv"}. For objects of class \code{"rma.uni"}, the marginal variance-covariance matrix of the observed effect sizes or outcomes is just a diagonal matrix with \mjeqn{\hat{\tau}^2 + v_i}{\tau^2 + v_i} along the diagonal, where \mjeqn{\hat{\tau}^2}{\tau^2} is the estimated amount of (residual) heterogeneity (set to 0 in fixed-effects models) and \mjseqn{v_i} is the sampling variance of the \mjseqn{i}th study. For objects of class \code{"rma.mv"}, the structure can be more complex and depends on the random effects included in the model. } \value{ A matrix corresponding to the requested variance-covariance matrix. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### var-cov matrix of the fixed effects (i.e., the model coefficients) vcov(res) ### marginal var-cov matrix of the observed log risk ratios vcov(res, type="obs") ### var-cov matrix of the fitted values vcov(res, type="fitted") ### var-cov matrix of the residuals vcov(res, type="resid") } \keyword{models} metafor/man/funnel.Rd0000644000176200001440000004016214055736403014252 0ustar liggesusers\name{funnel} \alias{funnel} \alias{funnel.rma} \alias{funnel.default} \title{Funnel Plots} \description{ Function to create funnel plots. \loadmathjax } \usage{ funnel(x, \dots) \method{funnel}{rma}(x, yaxis="sei", xlim, ylim, xlab, ylab, steps=5, at, atransf, targs, digits, level=x$level, addtau2=FALSE, type="rstandard", back="lightgray", shade="white", hlines="white", refline, lty=3, pch=19, pch.fill=21, col, bg, label=FALSE, offset=0.4, legend=FALSE, ci.res=1000, \dots) \method{funnel}{default}(x, vi, sei, ni, subset, yaxis="sei", xlim, ylim, xlab, ylab, steps=5, at, atransf, targs, digits, level=95, back="lightgray", shade="white", hlines="white", refline=0, lty=3, pch=19, col, bg, label=FALSE, offset=0.4, legend=FALSE, ci.res=1000, \dots) } \arguments{ \item{x}{an object of class \code{"rma"} or a vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances (needed if \code{x} is a vector with the observed effect sizes or outcomes).} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{ni}{vector with the corresponding sample sizes. Only relevant when passing a vector via \code{x}.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the plot. Only relevant when passing a vector via \code{x}.} \item{yaxis}{either \code{"sei"}, \code{"vi"}, \code{"seinv"}, \code{"vinv"}, \code{"ni"}, \code{"ninv"}, \code{"sqrtni"}, \code{"sqrtninv"}, \code{"lni"}, or \code{"wi"} to indicate what values should be placed on the y-axis. See \sQuote{Details}.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{steps}{the number of tick marks for the y-axis (the default is 5).} \item{at}{position of the x-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{atransf}{optional argument to specify a function that should be used to transform the x-axis labels (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{atransf}.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the x- and y-axis should be rounded. Can also be a vector of two integers, the first to specify the number of decimal places for the x-axis, the second for the y-axis labels (e.g., \code{digits=c(2,3)}). If unspecified, the function tries to set the argument to some sensible values.} \item{level}{numeric value between 0 and 100 to specify the level of the pseudo confidence interval region (for \code{"rma"} objects, the default is to take the value from the object). May also be a vector of values to obtain multiple regions. See \sQuote{Examples}.} \item{addtau2}{logical to indicate whether the amount of heterogeneity should be accounted for when drawing the pseudo confidence interval region (the default is \code{FALSE}). Ignored when the model includes moderators and residuals are plotted. See \sQuote{Details}.} \item{type}{either \code{"rstandard"} (default) or \code{"rstudent"} to specify whether the usual or deleted residuals should be used in creating the funnel plot when the model involves moderators. See \sQuote{Details}.} \item{back}{color to use for the background of the plotting region (default is \code{"lightgray"}).} \item{shade}{color to use for shading the pseudo confidence interval region (default is \code{"white"}). When \code{level} is a vector of values, different shading colors can be specified for each region.} \item{hlines}{color of the horizontal reference lines (default is \code{"white"}).} \item{refline}{numeric value to specify the location of the vertical \sQuote{reference} line and where the pseudo confidence interval should be centered. If unspecified, the reference line is drawn at the fixed- or random-effects model estimate when the model does not include moderators and at zero when moderators are included (and therefore residuals are plotted) or when directly plotting observed outcomes.} \item{lty}{line type for the pseudo confidence interval region and the reference line. The default is to draw dotted lines (see \code{\link{par}} for other options). Can also be a vector to specify the two line types separately.} \item{pch}{plotting symbol to use for the observed outcomes. By default, a filled circle is used. Can also be a vector of values. See \code{\link{points}} for other options.} \item{pch.fill}{plotting symbol to use for the outcomes filled in by the trim and fill method. By default, a circle is used. Only relevant when plotting an object created by the \code{\link{trimfill}} function.} \item{col}{optional character string to specify the name of a color to use for the points (\code{"black"} is used by default if not specified). Can also be a vector of color names.} \item{bg}{optional character string to specify the name of a background color for open plot symbols (\code{"white"} is used by default if not specified). Can also be a vector of color names.} \item{label}{argument to control the labeling of the points (the default is \code{FALSE}). See \sQuote{Details}.} \item{offset}{argument to control the distance between the points and the corresponding labels.} \item{legend}{logical to indicate whether a legend should be added to the plot (the default is \code{FALSE}). Can also be a keyword to indicate the position of the legend (see \code{\link{legend}}).} \item{ci.res}{integer to specify the number of y-axis values at which to calculate the bounds of the pseudo confidence interval. The default is \code{1000}, which usually provides a sufficient resolution for the plotting.} \item{\dots}{other arguments.} } \details{ For fixed- and random-effects models (i.e., models not involving moderators), the plot shows the observed effect sizes or outcomes on the x-axis against the corresponding standard errors (i.e., the square root of the sampling variances) on the y-axis. A vertical line indicates the estimate based on the model. A pseudo confidence interval region is drawn around this value with bounds equal to \mjeqn{\pm 1.96 \mbox{SE}}{±1.96*SE}, where \mjeqn{\mbox{SE}}{SE} is the standard error value from the y-axis (assuming \code{level=95}). If \code{addtau2=TRUE} (only for models of class \code{"rma.uni"}), then the bounds of the pseudo confidence interval region are equal to \mjeqn{\pm 1.96 \sqrt{\mbox{SE}^2 + \hat{\tau}^2}}{±1.96*\sqrt(SE^2 + \tau^2)}, where \mjeqn{\hat{\tau}^2}{\tau^2} is the amount of heterogeneity as estimated by the model. For models involving moderators, the plot shows the residuals on the x-axis against their corresponding standard errors. Either the usual or deleted residuals can be used for that purpose (set via the \code{type} argument). See \code{\link{residuals.rma}} for more details on the different types of residuals. With the \code{atransf} argument, the labels on the x-axis can be transformed with some suitable function. For example, when plotting log odds ratios, one could use \code{transf=exp} to obtain a funnel plot with the values on the x-axis corresponding to the odds ratios. See also \link{transf} for some other useful transformation functions in the context of a meta-analysis. Instead of placing the standard errors on the y-axis, several other options are available by setting the \code{yaxis} argument to: \itemize{ \item \code{yaxis="vi"} for the sampling variances, \item \code{yaxis="seinv"} for the inverse of the standard errors, \item \code{yaxis="vinv"} for the inverse of the sampling variances, \item \code{yaxis="ni"} for the sample sizes, \item \code{yaxis="ninv"} for the inverse of the sample sizes, \item \code{yaxis="sqrtni"} for the square root of the sample sizes, \item \code{yaxis="sqrtninv"} for the inverse square root of the sample sizes, \item \code{yaxis="lni"} for the log of the sample sizes, \item \code{yaxis="wi"} for the weights. } However, only when \code{yaxis="sei"} (the default) will the pseudo confidence region have the expected (upside-down) funnel shape with straight lines. Also, when placing (a function of) the sample sizes on the y-axis or the weights, then the pseudo confidence region cannot be drawn. See Sterne and Egger (2001) for more details on the choice of the y-axis. If the object passed to the function comes from the \code{\link{trimfill}} function, the outcomes that are filled in by the trim and fill method are also added to the funnel plot. The symbol to use for plotting the filled in values can be specified via the \code{pch.fill} argument. One can also directly pass a vector with the observed effect sizes or outcomes (via \code{x}) and the corresponding sampling variances (via \code{vi}), standard errors (via \code{sei}), and/or sample sizes (via \code{ni}) to the function. By default, the vertical reference line is then drawn at zero. The arguments \code{back}, \code{shade}, and \code{hlines} can be set to \code{NULL} to suppress the shading and the horizontal reference line. With the \code{label} argument, one can control whether points in the plot will be labeled. If \code{label="all"} (or \code{label=TRUE}), all points in the plot will be labeled. If \code{label="out"}, points falling outside of the pseudo confidence region will be labeled. Finally, one can also set this argument to a numeric value (between 1 and \mjseqn{k}) to specify how many of the most extreme points should be labeled (e.g., with \code{label=1} only the most extreme point would be labeled, while with \code{label=3}, the most extreme, and the second and third most extreme points would be labeled). With the \code{offset} argument, one can adjust the distance between the labels and the corresponding points. } \note{ Placing (a function of) the sample sizes on the y-axis (i.e., using \code{yaxis="ni"}, \code{yaxis="ninv"}, \code{yaxis="sqrtni"}, \code{yaxis="sqrtninv"}, or \code{yaxis="lni"}) is only possible when information about the sample sizes is actually stored within the object passed to the \code{funnel} function. That should automatically be the case when the observed effect sizes or outcomes were computed with the \code{\link{escalc}} function or when the observed effect sizes or outcomes were computed within the model fitting function. On the other hand, this will not be the case when \code{\link{rma.uni}} was used together with the \code{yi} and \code{vi} arguments and the \code{yi} and \code{vi} values were \emph{not} computed with \code{\link{escalc}}. In that case, it is still possible to pass information about the sample sizes to the \code{\link{rma.uni}} function (e.g., use \code{rma.uni(yi, vi, ni=ni, data=dat)}, where data frame \code{dat} includes a variable called \code{ni} with the sample sizes). When using unweighted estimation, using \code{yaxis="wi"} will place all points on a horizontal line. When directly passing a vector with the observed effect sizes or outcomes to the function, \code{yaxis="wi"} is equivalent to \code{yaxis="vinv"}, except that the weights are expressed in percent. When specifying vectors for \code{pch}, \code{col}, and/or \code{bg}, the variables specified are assumed to be of the same length as the data passed to the funnel function or the model fitting function (when using \code{funnel} on a model object). Any subsetting and removal of studies with missing values is automatically applied to the variables specified via these arguments. } \value{ A data frame with components: \item{x}{the x-axis coordinates of the points that were plotted.} \item{y}{the y-axis coordinates of the points that were plotted.} \item{slab}{the study labels.} Note that the data frame is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Light, R. J., & Pillemer, D. B. (1984). \emph{Summing up: The science of reviewing research}. Cambridge, MA: Harvard University Press. Peters, J. L., Sutton, A. J., Jones, D. R., Abrams, K. R., & Rushton, L. (2008). Contour-enhanced meta-analysis funnel plots help distinguish publication bias from other causes of asymmetry. \emph{Journal of Clinical Epidemiology}, \bold{61}(10), 991--996. \verb{https://doi.org/10.1016/j.jclinepi.2007.11.010} Sterne, J. A. C., & Egger, M. (2001). Funnel plots for detecting bias in meta-analysis: Guidelines on choice of axis. \emph{Journal of Clinical Epidemiology}, \bold{54}(10), 1046--1055. \verb{https://doi.org/10.1016/s0895-4356(01)00377-8} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}}, \code{\link{trimfill}}, \code{\link{regtest}} } \examples{ ### copy BCG vaccine data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) ### fit random-effects model res <- rma(yi, vi, data=dat, slab=paste(dat$author, dat$year, sep=", ")) ### draw a standard funnel plot funnel(res) ### show risk ratio values on x-axis (log scale) funnel(res, atransf=exp) ### label points outside of the pseudo confidence interval region funnel(res, atransf=exp, label="out") ### passing log risk ratios and sampling variances directly to the function ### note: same plot, except that reference line is centered at zero funnel(dat$yi, dat$vi) ### can accomplish the same thing by setting refline=0 funnel(res, refline=0) ### adjust the position of the x-axis labels, number of digits, and y-axis limits funnel(res, atransf=exp, at=log(c(.125, .25, .5, 1, 2)), digits=3L, ylim=c(0,.8)) ### contour-enhanced funnel plot centered at 0 (see Peters et al., 2008) funnel(res, level=c(90, 95, 99), shade=c("white", "gray55", "gray75"), refline=0, legend=TRUE) ### same, but show risk ratio values on the x-axis and some further adjustments funnel(res, level=c(90, 95, 99), shade=c("white", "gray55", "gray75"), digits=3L, ylim=c(0,.8), refline=0, legend=TRUE, atransf=exp, at=log(c(.125, .25, .5, 1, 2, 4, 8))) ### illustrate the use of vectors for 'pch' and 'col' res <- rma(yi, vi, data=dat, subset=2:10) funnel(res, pch=ifelse(dat$yi > -1, 19, 21), col=ifelse(sqrt(dat$vi) > .3, "red", "blue")) ### can add a second funnel via (undocumented) argument refline2 funnel(res, atransf=exp, at=log(c(.125, .25, .5, 1, 2, 4)), digits=3L, ylim=c(0,.8), refline2=0) ### mixed-effects model with absolute latitude in the model res <- rma(yi, vi, mods = ~ ablat, data=dat) ### funnel plot of the residuals funnel(res) ### simulate a large meta-analytic dataset (correlations with rho = 0.2) ### with no heterogeneity or publication bias; then try out different ### versions of the funnel plot gencor <- function(rhoi, ni) { x1 <- rnorm(ni, mean=0, sd=1) x2 <- rnorm(ni, mean=0, sd=1) x3 <- rhoi*x1 + sqrt(1-rhoi^2)*x2 cor(x1, x3) } set.seed(1234) k <- 200 ### number of studies to simulate ni <- round(rchisq(k, df=2) * 20 + 20) ### simulate sample sizes (skewed distribution) ri <- mapply(gencor, rep(0.2,k), ni) ### simulate correlations res <- rma(measure="ZCOR", ri=ri, ni=ni, method="FE") ### use r-to-z transformed correlations funnel(res, yaxis="sei") funnel(res, yaxis="vi") funnel(res, yaxis="seinv") funnel(res, yaxis="vinv") funnel(res, yaxis="ni") funnel(res, yaxis="ninv") funnel(res, yaxis="sqrtni") funnel(res, yaxis="sqrtninv") funnel(res, yaxis="lni") funnel(res, yaxis="wi") } \keyword{hplot} metafor/man/dat.yusuf1985.Rd0000644000176200001440000000471214055736403015235 0ustar liggesusers\name{dat.yusuf1985} \docType{data} \alias{dat.yusuf1985} \title{Studies of Beta Blockers During and After Myocardial Infarction} \description{Results from studies examining the effectiveness of beta blockers for reducing mortality and reinfarction. \loadmathjax} \usage{dat.yusuf1985} \format{The data frame contains the following columns: \tabular{lll}{ \bold{table} \tab \code{character} \tab table number \cr \bold{id} \tab \code{character} \tab trial id number \cr \bold{trial} \tab \code{character} \tab trial name or first author \cr \bold{ai} \tab \code{numeric} \tab number of deaths/reinfarctions in treatment group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in treatment group \cr \bold{ci} \tab \code{numeric} \tab number of deaths/reinfarctions in control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in control group } } \details{ The dataset contains table 6 (total mortality from short-term trials of oral beta blockers), 9 (total mortality at one week from trials with an initial IV dose of a beta blocker), 10 (total mortality from long-term trials with treatment starting late and mortality from day 8 onwards in long-term trials that began early and continued after discharge), 11 (nonfatal reinfarction from long-term trials of beta blockers), 12a (sudden death in long-term beta blocker trials), and 12b (nonsudden death in long-term beta blocker trials) from the meta-analysis by Yusuf et al. (1985) on the effectiveness of of beta blockers for reducing mortality and reinfarction. The article also describes what is sometimes called Peto's one-step method for meta-analyzing \mjeqn{2 \times 2}{2x2} table data. This method is implemented in the \code{\link{rma.peto}} function. } \source{ Yusuf, S., Peto, R., Lewis, J., Collins, R., & Sleight, P. (1985). Beta blockade during and after myocardial infarction: An overview of the randomized trials. \emph{Progress in Cardiovascular Disease}, \bold{27}(5), 335--371. \verb{https://doi.org/10.1016/s0033-0620(85)80003-7} } \examples{ ### copy data into 'dat' dat <- dat.yusuf1985 ### to select a table for the analysis tab <- "6" ### either: 6, 9, 10, 11, 12a, 12b ### to double-check total counts as reported in article apply(dat[dat$table==tab,4:7], 2, sum, na.rm=TRUE) ### meta-analysis using Peto's one-step method res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(table==tab)) res predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/dat.begg1989.Rd0000644000176200001440000000706414055736403014775 0ustar liggesusers\name{dat.begg1989} \docType{data} \alias{dat.begg1989} \title{Studies on Bone-Marrow Transplantation versus Chemotherapy for the Treatment of Leukemia} \description{Results from controlled and uncontrolled studies on the effectiveness of allogeneic bone-marrow transplantation (BMT) and conventional chemotherapy (CMO) in the treatment of acute nonlymphocytic leukemia.} \usage{dat.begg1989} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{trt} \tab \code{character} \tab treatment (BMT or CMO) \cr \bold{arms} \tab \code{numeric} \tab number of arms in the study (1 = uncontrolled studies; 2 = controlled studies) \cr \bold{yi} \tab \code{numeric} \tab 2-year disease-free survival rates \cr \bold{sei} \tab \code{numeric} \tab corresponding standard errors \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variances } } \details{ The dataset includes the results from controlled and uncontrolled studies on the 2-year disease-free survival rate in patients with acute nonlymphocytic leukemia receiving either allogeneic bone-marrow transplantation (BMT) or conventional chemotherapy (CMO). In the controlled (two-arm) studies (studies 1-4), a cohort of patients in complete remission and potentially eligible for BMT was assembled, and those who consented and for whom a donor could be found received BMT, with the remaining patients used as controls (receiving CMO). In the uncontrolled (one-arm) studies (studies 5-16), only a single group was studied, receiving either BMT or CMO. The data in this dataset were obtained from Table 1 in Begg and Pilote (1991, p. 902). } \source{ Begg, C. B., & Pilote, L. (1991). A model for incorporating historical controls into a meta-analysis. \emph{Biometrics}, \bold{47}(3), 899--906. \verb{https://doi.org/10.2307/2532647} } \references{ Begg, C. B., Pilote, L., & McGlave, P. B. (1989). Bone marrow transplantation versus chemotherapy in acute non-lymphocytic leukemia: A meta-analytic review. \emph{European Journal of Cancer and Clinical Oncology}, \bold{25}(11), 1519--1523. \verb{https://doi.org/10.1016/0277-5379(89)90291-5} } \examples{ ### copy data into 'dat' and examine data dat <- dat.begg1989 dat ### turn trt and arms into factors and set reference levels dat$trt <- relevel(factor(dat$trt), ref="CMO") dat$arms <- relevel(factor(dat$arms), ref="2") ### create data frame with the treatment differences for the controlled studies dat2 <- data.frame(yi = dat$yi[c(1,3,5,7)] - dat$yi[c(2,4,6,8)], vi = dat$vi[c(1,3,5,7)] + dat$vi[c(2,4,6,8)]) dat2 ### DerSimonian and Laird method using the treatment differences res <- rma(yi, vi, data=dat2, method="DL", digits=2) res ### Begg & Pilote (1991) model incorporating the uncontrolled studies res <- rma.mv(yi, vi, mods = ~ trt, random = ~ 1 | study, data=dat, method="ML", digits=2) res ### model involving bias terms for the uncontrolled studies res <- rma.mv(yi, vi, mods = ~ trt + trt:arms, random = ~ 1 | study, data=dat, method="ML", digits=2) res ### model with a random treatment effect res <- rma.mv(yi, vi, mods = ~ trt, random = list(~ 1 | study, ~ trt | study), struct="UN", tau2=c(0,NA), rho=0, data=dat, method="ML", digits=2) res ### model with a random treatment effect, but with equal variances in both arms res <- rma.mv(yi, vi, mods = ~ trt, random = list(~ 1 | study, ~ trt | study), struct="CS", rho=0, data=dat, method="ML", digits=2) res } \keyword{datasets} metafor/man/dat.molloy2014.Rd0000644000176200001440000000730514055736403015356 0ustar liggesusers\name{dat.molloy2014} \docType{data} \alias{dat.molloy2014} \title{Studies on the Relationship between Conscientiousness and Medication Adherence} \description{Results from 16 studies on the correlation between conscientiousness and medication adherence.} \usage{dat.molloy2014} \format{The data frame contains the following columns: \tabular{lll}{ \bold{authors} \tab \code{character} \tab study authors \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ni} \tab \code{numeric} \tab sample size of the study \cr \bold{ri} \tab \code{numeric} \tab observed correlation \cr \bold{controls} \tab \code{character} \tab number of variables controlled for \cr \bold{design} \tab \code{character} \tab whether a cross-sectional or prospective design was used \cr \bold{a_measure} \tab \code{character} \tab type of adherence measure (self-report or other) \cr \bold{c_measure} \tab \code{character} \tab type of conscientiousness measure (NEO or other) \cr \bold{meanage} \tab \code{numeric} \tab mean age of the sample \cr \bold{quality} \tab \code{numeric} \tab methodological quality } } \details{ Conscientiousness, one of the big-5 personality traits, can be defined as \dQuote{socially prescribed impulse control that facilitates task- and goal-directed behaviour, such as thinking before acting, delaying gratification, following norms and rules and planning, organising and prioritising tasks} (John & Srivastava, 1999). Conscientiousness has been shown to be related to a number of health-related behaviors (e.g., tobacco/alcohol/drug use, diet and activity patterns, risky behaviors). A recent meta-analysis by Molloy et al. (2014) examined to what extent conscientiousness is related to medication adherence, that is, the extent to which (typically chronically ill) patients follow a prescribed medication regimen (e.g., taking a daily dose of a cholesterol lowering drug in patients with high LDL serum cholesterol levels). The results from the 16 studies included in this meta-analysis are provided in this dataset. Variable \code{a_measure} indicates whether adherence was measured based on self-reports or a more \sQuote{objective} measure (e.g., electronic monitoring of pill bottle openings, pill counts). Variable \code{c_measure} indicates whether conscientiousness was measured with some version of the NEO personality inventory or some other scale. Methodological quality was scored by the authors on a 1 to 4 scale with higher scores indicating higher quality (see article for details on how this score was derived). } \source{ Molloy, G. J., O'Carroll, R. E., & Ferguson, E. (2014). Conscientiousness and medication adherence: A meta-analysis. \emph{Annals of Behavioral Medicine}, \bold{47}(1), 92--101. \verb{https://doi.org/10.1007/s12160-013-9524-4} } \references{ John, O. P., & Srivastava, S. (1999). The Big Five Trait taxonomy: History, measurement, and theoretical perspectives. In L. A. Pervin & O. P. John (Eds.), \emph{Handbook of personality: Theory and research} (2nd ed., pp. 102-138). New York: Guilford Press. } \examples{ ### copy data into 'dat' dat <- dat.molloy2014 ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat, slab=paste(authors, year, sep=", ")) dat ### meta-analysis of the transformed correlations using a random-effects model res <- rma(yi, vi, data=dat) res ### average correlation with 95\% CI predict(res, digits=3, transf=transf.ztor) ### forest plot forest(res, addpred=TRUE, xlim=c(-1.6,1.6), atransf=transf.ztor, at=transf.rtoz(c(-.4,-.2,0,.2,.4,.6)), digits=c(2,1), cex=.8, header="Author(s), Year") ### funnel plot funnel(res) } \keyword{datasets} metafor/man/rma.peto.Rd0000644000176200001440000002574614055736403014523 0ustar liggesusers\name{rma.peto} \alias{rma.peto} \title{Meta-Analysis via Peto's Method} \description{ Function to fit fixed-effects models to \mjeqn{2 \times 2}{2x2} table data via Peto's method. See below and the documentation of the \pkg{\link{metafor-package}} for more details on these models. \loadmathjax } \usage{ rma.peto(ai, bi, ci, di, n1i, n2i, data, slab, subset, add=1/2, to="only0", drop00=TRUE, level=95, digits, verbose=FALSE, \dots) } \arguments{ \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{n1i}{vector to specify the group sizes or row totals (first group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{n2i}{vector to specify the group sizes or row totals (second group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{data}{optional data frame containing the data supplied to the function.} \item{slab}{optional vector with labels for the \mjseqn{k} studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be used for the analysis.} \item{add}{non-negative number to specify the amount to add to zero cells, counts, or frequencies when calculating the observed effect sizes or outcomes of the individual studies. Can also be a vector of two numbers, where the first number is used in the calculation of the observed effect sizes outcomes and the second number is used when applying Peto's method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{to}{character string to specify when the values under \code{add} should be added (either \code{"only0"}, \code{"all"}, \code{"if0all"}, or \code{"none"}). Can also be a character vector, where the first string again applies when calculating the observed effect sizes or outcomes and the second string when applying Peto's method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{drop00}{logical to specify whether studies with no cases (or only cases) in both groups should be dropped when calculating the observed effect sizes or outcomes (the outcomes for such studies are set to \code{NA}). Can also be a vector of two logicals, where the first applies to the calculation of the observed effect sizes or outcomes and the second when applying Peto's method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}).} \item{\dots}{additional arguments.} } \details{ \bold{Specifying the Data} The studies are assumed to provide data in terms of \mjeqn{2 \times 2}{2x2} tables of the form: \tabular{lccc}{ \tab outcome 1 \tab outcome 2 \tab total \cr group 1 \tab \code{ai} \tab \code{bi} \tab \code{n1i} \cr group 2 \tab \code{ci} \tab \code{di} \tab \code{n2i} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies and \code{n1i} and \code{n2i} the row totals. For example, in a set of randomized clinical trials (RCTs) or cohort studies, group 1 and group 2 may refer to the treatment (exposed) and placebo/control (not exposed) group, with outcome 1 denoting some event of interest (e.g., death) and outcome 2 its complement. In a set of case-control studies, group 1 and group 2 may refer to the group of cases and the group of controls, with outcome 1 denoting, for example, exposure to some risk factor and outcome 2 non-exposure. \bold{Peto's Method} An approach for aggregating data of this type was suggested by Peto (see Yusuf et al., 1985). The method provides a weighted estimate of the (log) odds ratio under a fixed-effects model. The method is particularly advantageous when the event of interest is rare, but it should only be used when the group sizes within the individual studies are not too dissimilar and the effect sizes are generally small (Greenland & Salvan, 1990; Sweeting et al., 2004; Bradburn et al., 2007). Note that the printed results are given both in terms of the log and the raw units (for easier interpretation). \bold{Observed Effect Sizes or Outcomes of the Individual Studies} Peto's method itself does not require the calculation of the observed (log) odds ratios of the individual studies and directly makes use of the \mjeqn{2 \times 2}{2x2} table counts. Zero cells are not a problem (except in extreme cases, such as when one of the two outcomes never occurs in any of the tables). Therefore, it is unnecessary to add some constant to the cell counts when there are zero cells. However, for plotting and various other functions, it is necessary to calculate the observed (log) odds ratios for the \mjseqn{k} studies. Here, zero cells can be problematic, so adding a constant value to the cell counts ensures that all \mjseqn{k} values can be calculated. The \code{add} and \code{to} arguments are used to specify what value should be added to the cell frequencies and under what circumstances when calculating the observed (log) odds ratios and when applying Peto's method. Similarly, the \code{drop00} argument is used to specify how studies with no cases (or only cases) in both groups should be handled. The documentation of the \code{\link{escalc}} function explains how the \code{add}, \code{to}, and \code{drop00} arguments work. If only a single value for these arguments is specified (as per default), then these values are used when calculating the observed (log) odds ratios and no adjustment to the cell counts is made when applying Peto's method. Alternatively, when specifying two values for these arguments, the first value applies when calculating the observed (log) odds ratios and the second value when applying Peto's method. Note that \code{drop00} is set to \code{TRUE} by default. Therefore, the observed (log) odds ratios for studies where \code{ai=ci=0} or \code{bi=di=0} are set to \code{NA}. When applying Peto's method, such studies are not explicitly dropped (unless the second value of \code{drop00} argument is also set to \code{TRUE}), but this is practically not necessary, as they do not actually influence the results (assuming no adjustment to the cell/event counts are made when applying Peto's method). } \value{ An object of class \code{c("rma.peto","rma")}. The object is a list containing the following components: \item{beta}{aggregated log odds ratio.} \item{se}{standard error of the aggregated value.} \item{zval}{test statistics of the aggregated value.} \item{pval}{corresponding p-value.} \item{ci.lb}{lower bound of the confidence interval.} \item{ci.ub}{upper bound of the confidence interval.} \item{QE}{test statistic of the test for heterogeneity.} \item{QEp}{corresponding p-value.} \item{k}{number of studies included in the analysis.} \item{yi, vi}{the vector of individual log odds ratios and corresponding sampling variances.} \item{fit.stats}{a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.peto}} function. If fit statistics should also be given, use \code{\link{summary.rma}} (or use the \code{\link{fitstats.rma}} function to extract them). The \code{\link{residuals.rma}}, \code{\link{rstandard.rma.peto}}, and \code{\link{rstudent.rma.peto}} functions extract raw and standardized residuals. Leave-one-out diagnostics can be obtained with \code{\link{leave1out.rma.peto}}. Forest, funnel, radial, \enc{L'Abbé}{L'Abbe}, and Baujat plots can be obtained with \code{\link{forest.rma}}, \code{\link{funnel.rma}}, \code{\link{radial.rma}}, \code{\link{labbe.rma}}, and \code{\link{baujat.rma}}. The \code{\link{qqnorm.rma.peto}} function provides normal QQ plots of the standardized residuals. One can also just call \code{\link{plot.rma.peto}} on the fitted model object to obtain various plots at once. A cumulative meta-analysis (i.e., adding one observation at a time) can be obtained with \code{\link{cumul.rma.peto}}. Other extractor functions include \code{\link{coef.rma}}, \code{\link{vcov.rma}}, \code{\link{logLik.rma}}, \code{\link{deviance.rma}}, \code{\link{AIC.rma}}, and \code{\link{BIC.rma}}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Bradburn, M. J., Deeks, J. J., Berlin, J. A., & Localio, A. R. (2007). Much ado about nothing: A comparison of the performance of meta-analytical methods with rare events. \emph{Statistics in Medicine}, \bold{26}(1), 53--77. \verb{https://doi.org/10.1002/sim.2528} Greenland, S., & Salvan, A. (1990). Bias in the one-step method for pooling study results. \emph{Statistics in Medicine}, \bold{9}(3), 247--252. \verb{https://doi.org/10.1002/sim.4780090307} Sweeting, M. J., Sutton, A. J., & Lambert, P. C. (2004). What to add to nothing? Use and avoidance of continuity corrections in meta-analysis of sparse data. \emph{Statistics in Medicine}, \bold{23}(9), 1351--1375. \verb{https://doi.org/10.1002/sim.1761} Yusuf, S., Peto, R., Lewis, J., Collins, R., & Sleight, P. (1985). Beta blockade during and after myocardial infarction: An overview of the randomized trials. \emph{Progress in Cardiovascular Disease}, \bold{27}(5), 335--371. \verb{https://doi.org/10.1016/s0033-0620(85)80003-7} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.glmm}}, \code{\link{rma.mh}}, and \code{\link{rma.mv}} for other model fitting functions. \code{\link{dat.collins1985a}}, \code{\link{dat.collins1985b}}, and \code{\link{dat.yusuf1985}} for further examples of the use of the \code{rma.peto} function. } \examples{ ### meta-analysis of the (log) odds ratios using Peto's method rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) } \keyword{models} metafor/man/model.matrix.rma.Rd0000644000176200001440000000210714055736403016141 0ustar liggesusers\name{model.matrix.rma} \alias{model.matrix} \alias{model.matrix.rma} \title{Model Matrix for 'rma' Objects} \description{ The function extracts the model matrix for objects of class \code{"rma"}. } \usage{ \method{model.matrix}{rma}(object, \dots) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{\dots}{other arguments.} } \value{ The model matrix. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{fitted.rma}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### extract the model matrix model.matrix(res) } \keyword{models} metafor/man/print.fsn.Rd0000644000176200001440000000166214055736403014706 0ustar liggesusers\name{print.fsn} \alias{print.fsn} \title{Print Method for 'fsn' Objects} \description{ Print method for objects of class \code{"fsn"}. } \usage{ \method{print}{fsn}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"fsn"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output shows the results from the fail-safe N calculation. } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{fsn}} } \keyword{print} metafor/man/print.gosh.rma.Rd0000644000176200001440000000227314055736403015635 0ustar liggesusers\name{print.gosh.rma} \alias{print.gosh.rma} \title{Print Method for 'gosh.rma' Objects} \description{ Print method for objects of class \code{"gosh.rma"}. } \usage{ \method{print}{gosh.rma}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"gosh.rma"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output shows how many model fits were attempted, how many succeeded, and summary statistics (i.e., the mean, minimum, first quartile, median, third quartile, and maximum) for the various measures of (residual) heterogeneity and the model coefficient(s) computed across all of the subsets. } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{gosh.rma}} } \keyword{print} metafor/man/dat.damico2009.Rd0000644000176200001440000000406114055736403015277 0ustar liggesusers\name{dat.damico2009} \docType{data} \alias{dat.damico2009} \title{Studies on Topical plus Systemic Antibiotics to Prevent Respiratory Tract Infections} \description{Results from 16 studies examining the effectiveness of topical plus systemic antibiotics to prevent respiratory tract infections (RTIs).} \usage{ dat.damico2009 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab first author \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{xt} \tab \code{numeric} \tab number of RTIs in the treatment group \cr \bold{nt} \tab \code{numeric} \tab number of patients in the treatment group \cr \bold{xc} \tab \code{numeric} \tab number of RTIs in the control group \cr \bold{nc} \tab \code{numeric} \tab number of patients in the control group \cr \bold{conceal} \tab \code{numeric} \tab allocation concealment (0 = not adequate, 1 = adequate) \cr \bold{blind} \tab \code{numeric} \tab blinding (0 = open, 1 = double-blind) } } \details{ The dataset includes the results from 16 studies that examined the effectiveness of topical plus systemic antibiotics versus no prophylaxis to prevent respiratory tract infections (RTIs). } \source{ D'Amico, R., Pifferi, S., Torri, V., Brazzi, L., Parmelli, E., & Liberati, A. (2009). Antibiotic prophylaxis to reduce respiratory tract infections and mortality in adults receiving intensive care. \emph{Cochrane Database of Systematic Reviews}, \bold{4}, CD000022. \verb{https://doi.org/10.1002/14651858.CD000022.pub3} } \examples{ ### copy data into 'dat' and examine data dat <- dat.damico2009 dat ### meta-analysis of the (log) odds ratios using the Mantel-Haenszel method rma.mh(measure="OR", ai=xt, n1i=nt, ci=xc, n2i=nc, data=dat, digits=2) ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=xt, n1i=nt, ci=xc, n2i=nc, data=dat) ### meta-analysis using a random-effects model res <- rma(yi, vi, data=dat, method="DL") res predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/ranef.Rd0000644000176200001440000001111114055736403014046 0ustar liggesusers\name{ranef} \alias{ranef} \alias{ranef.rma.uni} \alias{ranef.rma.mv} \title{Best Linear Unbiased Predictions for 'rma.uni' and 'rma.mv' Objects} \description{ The function calculates best linear unbiased predictions (BLUPs) of the random effects for objects of class \code{"rma.uni"} and \code{"rma.mv"}. Corresponding standard errors and prediction interval bounds are also provided. \loadmathjax } \usage{ \method{ranef}{rma.uni}(object, level, digits, transf, targs, \dots) \method{ranef}{rma.mv}(object, level, digits, transf, targs, verbose=FALSE, \dots) } \arguments{ \item{object}{an object of class \code{"rma.uni"} or \code{"rma.mv"}.} \item{level}{numeric value between 0 and 100 to specify the prediction interval level. If unspecified, the default is to take the value from the object.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the predicted values and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{verbose}{logical to specify whether output should be generated on the progress of the computations (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \value{ For objects of class \code{"rma.uni"}, an object of class \code{"list.rma"}. The object is a list containing the following components: \item{pred}{predicted values.} \item{se}{corresponding standard errors.} \item{pi.lb}{lower bound of the prediction intervals.} \item{pi.ub}{upper bound of the prediction intervals.} \item{\dots}{some additional elements/values.} The object is formatted and printed with \code{\link{print.list.rma}}. For objects of class \code{"rma.mv"}, a list of data frames with the same components as described above. } \note{ For best linear unbiased predictions that combine the fitted values based on the fixed effects and the estimated contributions of the random effects, see \code{\link{blup}}. For predicted/fitted values that are based only on the fixed effects of the model, see \code{\link{fitted.rma}} and \code{\link{predict.rma}}. Fixed-effects models (with or without moderators) do not contain random study effects. The BLUPs for these models will therefore be 0. When using the \code{transf} argument, the transformation is applied to the predicted values and the corresponding interval bounds. The standard errors are then set equal to \code{NA} and are omitted from the printed output. By default, a standard normal distribution is used to calculate the prediction intervals. When the model was fitted with \code{test="t"} or \code{test="knha"}, then a t-distribution with \mjseqn{k-p} degrees of freedom is used. To be precise, it should be noted that the function actually calculates empirical BLUPs (eBLUPs), since the predicted values are a function of the estimated variance component(s). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Kackar, R. N., & Harville, D. A. (1981). Unbiasedness of two-stage estimation and prediction procedures for mixed linear models. Communications in Statistics, Theory and Methods, \bold{10}(13), 1249--1261. \verb{https://doi.org/10.1080/03610928108828108} Raudenbush, S. W., & Bryk, A. S. (1985). Empirical Bayes meta-analysis. \emph{Journal of Educational Statistics}, \bold{10}(2), 75--98. \verb{https://doi.org/10.3102/10769986010002075} Robinson, G. K. (1991). That BLUP is a good thing: The estimation of random effects. \emph{Statistical Science}, \bold{6}(1), 15--32. \verb{https://doi.org/10.1214/ss/1177011926} Searle, S. R., Casella, G., & McCulloch, C. E. (1992). \emph{Variance components}. Hoboken, NJ: Wiley. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}}, \code{\link{predict.rma}}, \code{\link{fitted.rma}}, \code{\link{blup.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat) ### BLUPs of the random effects ranef(res) } \keyword{models} metafor/man/print.regtest.rma.Rd0000644000176200001440000000334014055736403016346 0ustar liggesusers\name{print.regtest} \alias{print.regtest} \title{Print Method for 'regtest' Objects} \description{ Print method for objects of class \code{"regtest"}. } \usage{ \method{print}{regtest}(x, digits=x$digits, ret.fit=x$ret.fit, \dots) } \arguments{ \item{x}{an object of class \code{"regtest"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{ret.fit}{logical to specify whether the full results from the fitted model should also be returned. If unspecified, the default is to take the value from the object.} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item the model used for the regression test \item the predictor used for the regression test \item the results from the fitted model (only when \code{ret.fit=TRUE}) \item the test statistic of the test that the predictor is unreleated to the outcomes \item the degrees of freedom of the test statistic (only if the test statistic follows a t-distribution) \item the corresponding p-value \item the \sQuote{limit estimate} and its corresponding CI (only for predictors \code{"sei"} \code{"vi"}, \code{"ninv"}, or \code{"sqrtninv"} and when the model does not contain any additional moderators) } } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{regtest}} } \keyword{print} metafor/man/simulate.rma.Rd0000644000176200001440000000522714055736403015367 0ustar liggesusers\name{simulate.rma} \alias{simulate} \alias{simulate.rma} \title{Simulate Method for 'rma' Objects} \description{ The function simulates effect sizes or outcomes based on \code{"rma"} model object. } \usage{ \method{simulate}{rma}(object, nsim = 1, seed = NULL, olim, \dots) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{nsim}{number of response vectors to simulate (defaults to 1).} \item{seed}{an object to specify if and how the random number generator should be initialized (\sQuote{seeded}). Either \code{NULL} or an integer that will be used in a call to \code{set.seed} before simulating the response vectors. If set, the value is saved as the \code{"seed"} attribute of the returned value. The default, \code{NULL} will not change the random generator state, and return \code{\link{.Random.seed}} as the \code{"seed"} attribute; see \sQuote{Value}.} \item{olim}{optional argument to specify observation/outcome limits for the simulated values. If unspecified, no limits are used.} \item{\dots}{other arguments.} } \details{ The model specified via \code{object} must be a model fitted with either the \code{\link{rma.uni}} or \code{\link{rma.mv}} function. } \value{ A data frame with \code{nsim} columns with the simulated effect sizes or outcomes. The data frame comes with an attribute \code{"seed"}. If argument \code{seed} is \code{NULL}, the attribute is the value of \code{\link{.Random.seed}} before the simulation was started; otherwise it is the value of the \code{seed} argument with a \code{"kind"} attribute with value \code{as.list(RNGkind())}. } \note{ If the outcome measure used for the analysis is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those observation/outcome limits when simulating values (simulated values cannot exceed those bounds then). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}} } \examples{ ### copy BCG vaccine data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) dat ### fit random-effects model res <- rma(yi, vi, data=dat) res ### simulate 10 sets of new outcomes based on the fitted model newdat <- simulate(res, nsim=10, seed=1234) newdat } \keyword{datagen} metafor/man/methods.escalc.Rd0000644000176200001440000000137714055736403015664 0ustar liggesusers\name{methods.escalc} \alias{methods.escalc} \alias{[.escalc} \alias{cbind.escalc} \alias{rbind.escalc} \title{Methods for 'escalc' Objects} \description{ Methods for objects of class \code{"escalc"}. } \usage{ \method{[}{escalc}(x, i, \dots) \method{cbind}{escalc}(\dots, deparse.level=1) \method{rbind}{escalc}(\dots, deparse.level=1) } \arguments{ \item{x}{an object of class \code{"escalc"}.} \item{\dots}{other arguments.} } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \keyword{internal} metafor/man/dat.cohen1981.Rd0000644000176200001440000000445614055736403015157 0ustar liggesusers\name{dat.cohen1981} \docType{data} \alias{dat.cohen1981} \title{Studies on the Relationship between Course Instructor Ratings and Student Achievement} \description{Results from 20 studies on the correlation between course instructor ratings and student achievement.} \usage{dat.cohen1981} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab study author(s) and year \cr \bold{sample} \tab \code{character} \tab course type \cr \bold{control} \tab \code{character} \tab ability control \cr \bold{ni} \tab \code{numeric} \tab sample size of the study (number of sections) \cr \bold{ri} \tab \code{numeric} \tab observed correlation } } \details{ The studies included in this dataset examined to what extent students' ratings of a course instructor correlated with their achievement in the course. Instead of correlating individual ratings and achievement scores, the studies were carried out in multisection courses, in which the sections had different instructors but all sections used a common achievement measure (e.g., a final exam). The correlation coefficients reflect the correlation between the mean instructor rating and the mean achievement score of each section. Hence, the unit of analysis are the sections, not the individuals. Note that this dataset (extracted from Table A.3 in Cooper & Hedges, 1994) only contains studies with at least 10 sections. } \source{ Cooper, H., & Hedges, L. V. (1994). Appendix A: Data Sets. In H. Cooper & L. V. Hedges (Eds.), \emph{The handbook of research synthesis} (pp. 543-547). New York: Russell Sage Foundation. } \references{ Cohen, P. A. (1981). Student ratings of instruction and student achievement: A meta-analysis of multisection validity studies. \emph{Review of Educational Research}, \bold{51}(3), 281--309. \verb{https://doi.org/10.3102/00346543051003281} } \examples{ ### copy data into 'dat' dat <- dat.cohen1981 ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat[c(1,4,5)]) dat ### meta-analysis of the transformed correlations using a random-effects model res <- rma(yi, vi, data=dat, digits=2) res ### predicted average correlation with 95\% CI predict(res, transf=transf.ztor) } \keyword{datasets} metafor/man/hc.Rd0000644000176200001440000001124314055736403013353 0ustar liggesusers\name{hc} \alias{hc} \alias{hc.rma.uni} \title{Meta-Analysis based on the Method by Henmi and Copas (2010)} \description{ The function can be used to obtain an estimate of the average true outcome and corresponding confidence interval under a random-effects model using the method described by Henmi and Copas (2010). } \usage{ hc(object, \dots) \method{hc}{rma.uni}(object, digits, transf, targs, control, \dots) } \arguments{ \item{object}{an object of class \code{"rma.uni"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the estimate and the corresponding interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{control}{list of control values for the iterative algorithm. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{other arguments.} } \details{ The model specified via \code{object} must be a model without moderators (i.e., either a fixed- or a random-effects model and not a fixed-effects with moderators or mixed-effects model). When using the usual method for fitting a random-effects model (i.e., weighted estimation with inverse-variance weights), the weights assigned to smaller and larger studies become more uniform as the amount of heterogeneity increases. As a consequence, the estimated average outcome could become increasingly biased under certain forms of publication bias (where smaller studies on one side of the funnel plot are missing). The method by Henmi and Copas (2010) tries to counteract this problem by providing an estimate of the average true outcome that is based on inverse-variance weights as used under a fixed-effects model (which do not take the amount of heterogeneity into consideration). The amount of heterogeneity is still estimated (with the DerSimonian-Laird estimator) and incorporated into the standard error of the estimated average outcome and the corresponding confidence interval. Currently, there is only a method for handling objects of class \code{"rma.uni"} with the \code{hc} function. It therefore provides a method for conducting a sensitivity analysis after the model has been fitted with the \code{\link{rma.uni}} function. } \value{ An object of class \code{"hc.rma.uni"}. The object is a list containing the following components: \item{beta}{estimated average true outcome.} \item{se}{corresponding standard error.} \item{ci.lb}{lower bound of the confidence intervals for the average true outcome.} \item{ci.ub}{upper bound of the confidence intervals for the average true outcome.} \item{\dots}{some additional elements/values.} The results are formatted and printed with the \code{\link{print.hc.rma.uni}} function. } \note{ The method makes use of the \code{\link{uniroot}} function. By default, the desired accuracy is set equal to \code{.Machine$double.eps^0.25} and the maximum number of iterations to \code{1000}. The desired accuracy (\code{tol}) and the maximum number of iterations (\code{maxiter}) can be adjusted with the \code{control} argument (i.e., \code{control=list(tol=value, maxiter=value)}). } \author{ Original code by Henmi and Copas (2010). Corrected for typos by Michael Dewey (\email{lists@dewey.myzen.co.uk}). Incorporated into the package with some small adjustments for consistency with the other functions in the package by Wolfgang Viechtbauer (\email{wvb@metafor-project.org}). } \references{ Henmi, M., & Copas, J. B. (2010). Confidence intervals for random effects meta-analysis and robustness to publication bias. \emph{Statistics in Medicine}, \bold{29}(29), 2969--2983. \verb{https://doi.org/10.1002/sim.4029} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}} } \examples{ ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.lee2004) dat ### meta-analysis based on log odds ratios res <- rma(yi, vi, data=dat) res ### funnel plot as in Henmi and Copas (2010) funnel(res, yaxis="seinv", refline=0, xlim=c(-3,3), ylim=c(.5,3.5), steps=7, digits=1, back="white") ### use method by Henmi and Copas (2010) as a sensitivity analysis hc(res) ### back-transform results to odds ratio scale hc(res, transf=exp) } \keyword{htest} metafor/man/dat.li2007.Rd0000644000176200001440000000413614055736403014450 0ustar liggesusers\name{dat.li2007} \docType{data} \alias{dat.li2007} \title{Studies on the Effectiveness of Intravenous Magnesium in Acute Myocardial Infarction} \description{Results from 22 trials examining the effectiveness of intravenous magnesium in the prevention of death following acute myocardial infarction.} \usage{ dat.li2007 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab trial id number \cr \bold{study} \tab \code{character} \tab first author or trial name \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ai} \tab \code{numeric} \tab number of deaths in the magnesium group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the magnesium group \cr \bold{ci} \tab \code{numeric} \tab number of deaths in the control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the control group } } \details{ The dataset includes the results from 22 randomized clinical trials that examined the effectiveness of intravenous magnesium in the prevention of death following acute myocardial infarction. It is similar to the dataset \code{\link{dat.egger2001}}, with some slight differences in the included trials and data used. } \source{ Li, J., Zhang, Q., Zhang, M., & Egger, M. (2007). Intravenous magnesium for acute myocardial infarction. \emph{Cochrane Database of Systematic Reviews}, \bold{2}, CD002755. \verb{https://doi.org/10.1002/14651858.CD002755.pub2} } \seealso{ \code{\link{dat.egger2001}} } \examples{ ### copy data into 'dat' and examine data dat <- dat.li2007 dat ### meta-analysis of all trials except ISIS-4 res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, method="FE", subset=-14) print(res, digits=2) predict(res, transf=exp, digits=2) ### meta-analysis of all trials including ISIS-4 res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, method="FE") print(res, digits=2) predict(res, transf=exp, digits=2) ### contour-enhanced funnel plot centered at 0 funnel(res, refline=0, level=c(90, 95, 99), shade=c("white", "gray", "darkgray")) } \keyword{datasets} metafor/man/to.long.Rd0000644000176200001440000001654214055736403014350 0ustar liggesusers\name{to.long} \alias{to.long} \title{Convert Data from Vector to Long Format} \description{ The function converts summary data in vector format to the corresponding long format. \loadmathjax } \usage{ to.long(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, ni, data, slab, subset, add=1/2, to="none", drop00=FALSE, vlong=FALSE, append=TRUE, var.names) } \arguments{ \item{measure}{a character string to specify the effect size or outcome measure corresponding to the summary data supplied. See \sQuote{Details} and the documentation of the \code{\link{escalc}} function for possible options.} \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell).} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell).} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell).} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell).} \item{n1i}{vector to specify the group sizes or row totals (first group/row).} \item{n2i}{vector to specify the group sizes or row totals (second group/row).} \item{x1i}{vector to specify the number of events (first group).} \item{x2i}{vector to specify the number of events (second group).} \item{t1i}{vector to specify the total person-times (first group).} \item{t2i}{vector to specify the total person-times (second group).} \item{m1i}{vector to specify the means (first group or time point).} \item{m2i}{vector to specify the means (second group or time point).} \item{sd1i}{vector to specify the standard deviations (first group or time point).} \item{sd2i}{vector to specify the standard deviations (second group or time point).} \item{xi}{vector to specify the frequencies of the event of interest.} \item{mi}{vector to specify the frequencies of the complement of the event of interest or the group means.} \item{ri}{vector to specify the raw correlation coefficients.} \item{ti}{vector to specify the total person-times.} \item{sdi}{vector to specify the standard deviations.} \item{ni}{vector to specify the sample/group sizes.} \item{data}{optional data frame containing the variables given to the arguments above.} \item{slab}{optional vector with labels for the studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should included in the data frame returned by the function.} \item{add}{see the documentation of the \code{\link{escalc}} function.} \item{to}{see the documentation of the \code{\link{escalc}} function.} \item{drop00}{see the documentation of the \code{\link{escalc}} function.} \item{vlong}{optional logical whether a very long format should be used (only relevant for \mjeqn{2 \times 2}{2x2} or \mjeqn{1 \times 2}{1x2} table data).} \item{append}{logical to specify whether the data frame specified via the \code{data} argument (if one has been specified) should be returned together with the long format data (the default is \code{TRUE}).} \item{var.names}{optional vector with variable names (length depends on the data type). If unspecified, the function sets appropriate variable names by default.} } \details{ The \code{\link{escalc}} function describes a wide variety of effect size or outcome measures that can be computed for a meta-analysis. The summary data used to compute those measures are typically contained in vectors, each element corresponding to a study. The \code{to.long} function takes this information and constructs a long format dataset from these data. For example, in various fields (such as the health and medical sciences), the response variable measured is often dichotomous (binary), so that the data from a study comparing two different groups can be expressed in terms of a \mjeqn{2 \times 2}{2x2} table, such as: \tabular{lccc}{ \tab outcome 1 \tab outcome 2 \tab total \cr group 1 \tab \code{ai} \tab \code{bi} \tab \code{n1i} \cr group 2 \tab \code{ci} \tab \code{di} \tab \code{n2i} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies (i.e., the number of people falling into a particular category) and \code{n1i} and \code{n2i} the row totals (i.e., the group sizes). The cell frequencies in \mjseqn{k} such \mjeqn{2 \times 2}{2x2} tables can be specified via the \code{ai}, \code{bi}, \code{ci}, and \code{di} arguments (or alternatively, via the \code{ai}, \code{ci}, \code{n1i}, and \code{n2i} arguments). The function then creates the corresponding long format dataset. The \code{measure} argument should then be set equal to one of the outcome measures that can be computed based on this type of data, such as \code{"RR"}, \code{"OR"}, \code{"RD"} (it is not relevant which specific measure is chosen, as long as it corresponds to the specified summary data). See the documentation of the \code{\link{escalc}} function for more details on the types of data formats available. The long format for data of this type consists of two rows per study, a factor indicating the study (default name \code{study}), a dummy variable indicating the group (default name \code{group}, coded as 1 and 2), and two variables indicating the number of individuals experiencing outcome 1 or outcome 2 (default names \code{out1} and \code{out2}). Alternatively, if \code{vlong=TRUE}, then the long format consists of four rows per study, a factor indicating the study (default name \code{study}), a dummy variable indicating the group (default name \code{group}, coded as 1 and 2), a dummy variable indicating the outcome (default name \code{outcome}, coded as 1 and 2), and a variable indicating the frequency of the respective outcome (default name \code{freq}). The default variable names can be changed via the \code{var.names} argument (must be of the appropriate length, depending on the data type). The examples below illustrate the use of this function. } \value{ A data frame with either \mjseqn{k}, \mjeqn{2 \times k}{2*k}, or \mjeqn{4 \times k}{4*k} rows and an appropriate number of columns (depending on the data type) with the data in long format. If \code{append=TRUE} and a data frame was specified via the \code{data} argument, then the data in long format are appended to the original data frame (with rows repeated an appropriate number of times). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{escalc}}, \code{\link{to.table}} } \examples{ ### convert data to long format dat <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat ### extra long format dat <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, vlong=TRUE) dat ### convert data to long format dat <- to.long(measure="IRR", x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat.hart1999, var.names=c("id", "group", "events", "ptime")) dat ### convert data to long format dat <- to.long(measure="MD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat.normand1999, var.names=c("id", "group", "mean", "sd", "n")) dat } \keyword{manip} metafor/man/plot.rma.uni.selmodel.Rd0000644000176200001440000001122314055736403017110 0ustar liggesusers\name{plot.rma.uni.selmodel} \alias{plot.rma.uni.selmodel} \title{Plot Method for 'plot.rma.uni.selmodel' Objects} \description{ Plot method for objects of class \code{"plot.rma.uni.selmodel"}. \loadmathjax } \usage{ \method{plot}{rma.uni.selmodel}(x, xlim, ylim, n=1000, prec="max", scale=FALSE, ci=FALSE, reps=1000, rug=TRUE, add=FALSE, lty=c("solid","dotted"), lwd=c(2,1), \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni.selmodel"}.} \item{xlim}{x-axis limits. Essentially the range of p-values for which the selection function should be drawn. If unspecified, the function sets the limits automatically.} \item{ylim}{y-axis limits. If unspecified, the function sets the limits automatically.} \item{n}{numeric value to specify for how many p-values within the x-axis limits the function value should be computed (the default is 1000).} \item{prec}{either a character string (with options \code{"max"}, \code{"min"}, \code{"mean"}, or \code{"median"}) or a numeric value. See \sQuote{Details}.} \item{scale}{logical to specify whether the function values should be rescaled to a 0 to 1 range (the default is \code{FALSE}).} \item{ci}{logical to specify whether a confidence interval should be drawn around the selection function (the default is \code{FALSE}). Can also be a string (with options \code{"boot"} or \code{"wald"}). See \sQuote{Details}.} \item{reps}{numeric value to specify the number of bootstrap samples to draw for generating the confidence interval bounds (the default is 1000).} \item{rug}{logical to specify whether the observed p-values should be added as tick marks on the x-axis (the default is \code{TRUE}).} \item{add}{logical to specify whether the function should be added to an existing plot (the default is \code{FALSE}).} \item{lty}{the line types for the selection function and the confidence interval bounds.} \item{lwd}{the line widths for the selection function and the confidence interval bounds.} \item{\dots}{other arguments.} } \details{ The function can be used to draw the estimated selection function based on objects of class \code{"plot.rma.uni.selmodel"}. When the selection function incorporates a measure of precision (which, strictly speaking, is really a measure of imprecision), one can specify for which level of precision the selection function should be drawn. When \code{prec="max"}, then the function is drawn for the \emph{least} precise study (maximum imprecision), when \code{prec="min"}, then the function is drawn for the \emph{most} precise study (minimum imprecision), while \code{prec="mean"} and \code{prec="median"} will show the function for the mean and median level of imprecision, respectively. Alternatively, one can specify a numeric value for argument \code{prec} to specify the precision value (where \code{prec="max"} corresponds to \code{prec=1} and higher levels of precision to \code{prec} values below 1). When \code{ci=TRUE} (or equivalently, \code{ci="boot"}), a confidence interval is drawn around the selection function. The bounds of this interval are generated using parametric bootstrapping, with argument \code{reps} controlling the number of bootstrap samples to draw for generating the confidence interval bounds. When both \code{n} and \code{reps} are large, constructing the confidence interval can take a few moments to complete. For models where the selection function involves a single \mjseqn{\delta} parameter, one can also set \code{ci="wald"}, in which case the confidence interval will be constructed based on the Wald-type CI of the \mjseqn{\delta} parameter (doing so is much quicker than using parametric bootstrapping). This option is also available for step function models (even if they involve multiple \mjseqn{\delta} parameters). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{selmodel.rma.uni}} } \examples{ ### copy data into 'dat' and examine data dat <- dat.hackshaw1998 ### fit random-effects model using the log odds ratios res <- rma(yi, vi, data=dat, method="ML") res ### fit step selection model sel1 <- selmodel(res, type="stepfun", steps=c(0.05, 0.10, 0.50, 1.00)) ### plot selection function plot(sel1, scale=TRUE) ### fit negative exponential selection model sel2 <- selmodel(res, type="negexp") ### add selection function to the existing plot plot(sel2, add=TRUE, col="blue") ### plot selection function with CI plot(sel1, ci="wald") } \keyword{hplot} metafor/man/vec2mat.Rd0000644000176200001440000000261014055736403014320 0ustar liggesusers\name{vec2mat} \alias{vec2mat} \title{Convert a Vector into a Square Matrix} \description{ Function to convert a vector into a square matrix by filling up the lower triangular part of the matrix. } \usage{ vec2mat(x, diag=FALSE, corr=!diag, dimnames) } \arguments{ \item{x}{a vector of the correct length.} \item{diag}{logical to specify whether the vector also contains the diagonal values of the lower triangular part of the matrix (the default is \code{FALSE}).} \item{corr}{logical to specify whether the diagonal of the matrix should be replaced with 1's (the default is to do this when \code{diag=FALSE}).} \item{dimnames}{optional vector of the correct length with the dimension names of the matrix.} } \details{ The values in \code{x} are filled into the lower triangular part of a square matrix with the appropriate dimensions (which are determined based on the length of \code{x}). If \code{diag=TRUE}, then \code{x} is assumed to also contain the diagonal values of the lower triangular part of the matrix. If \code{corr=TRUE}, then the diagonal of the matrix is replaced with 1's. } \value{ A matrix. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \examples{ vec2mat(1:6, corr=FALSE) vec2mat(seq(0.2, 0.7, by=0.1), corr=TRUE) vec2mat(1:10, diag=TRUE) vec2mat(1:6, corr=FALSE, dimnames=c("A","B","C","D")) } \keyword{manip} metafor/man/dat.hackshaw1998.Rd0000644000176200001440000000574314055736403015664 0ustar liggesusers\name{dat.hackshaw1998} \docType{data} \alias{dat.hackshaw1998} \title{Studies on the Risk of Lung Cancer in Women Exposed to Environmental Tobacco Smoke} \description{Results from 37 studies on the risk of lung cancer in women exposed to environmental tobacco smoke (ETS) from their smoking spouse.} \usage{dat.hackshaw1998} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{author} \tab \code{character} \tab first author of study \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{country} \tab \code{character} \tab country where study was conducted \cr \bold{design} \tab \code{character} \tab study design (either cohort or case-control) \cr \bold{cases} \tab \code{numeric} \tab number of lung cancer cases \cr \bold{or} \tab \code{numeric} \tab odds ratio \cr \bold{or.lb} \tab \code{numeric} \tab lower bound of 95\% CI for the odds ratio \cr \bold{or.ub} \tab \code{numeric} \tab upper bound of 95\% CI for the odds ratio \cr \bold{yi} \tab \code{numeric} \tab log odds ratio \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance } } \details{ The dataset includes the results from 37 studies (4 cohort, 33 case-control) examining if women (who are lifelong nonsmokers) have an elevated risk for lung cancer due to exposure to environmental tobacco smoke (ETS) from their smoking spouse. Values of the log odds ratio greater than 0 indicate an increased risk of cancer in exposed women compared to women not exposed to ETS from their spouse. Note that the log odds ratios and corresponding sampling variances were back-calculated from the reported odds ratios and confidence interval (CI) bounds (see \sQuote{Examples}). Since the reported values were rounded to some extent, this introduces some minor inaccuracies into the back-calculations. The overall estimate reported in Hackshaw et al. (1997) and Hackshaw (1998) can be fully reproduced though. } \source{ Hackshaw, A. K., Law, M. R., & Wald, N. J. (1997). The accumulated evidence on lung cancer and environmental tobacco smoke. \emph{British Medical Journal}, \bold{315}(7114), 980--988. \verb{https://doi.org/10.1136/bmj.315.7114.980} Hackshaw, A. K. (1998). Lung cancer and passive smoking. \emph{Statistical Methods in Medical Research}, \bold{7}(2), 119--136. \verb{https://doi.org/10.1177/096228029800700203} } \examples{ ### copy data into 'dat' and examine data dat <- dat.hackshaw1998 dat ### random-effects model using the log odds ratios res <- rma(yi, vi, data=dat, method="DL") res ### estimated average odds ratio with CI (and prediction interval) predict(res, transf=exp, digits=2) ### illustrate how the log odds ratios and corresponding sampling variances ### were back-calculated based on the reported odds ratios and CI bounds dat$yi <- log(dat$or) dat$vi <- ((log(dat$or.ub) - log(dat$or.lb)) / (2*qnorm(.975)))^2 } \keyword{datasets} metafor/man/dat.maire2019.Rd0000644000176200001440000000743414055736403015150 0ustar liggesusers\name{dat.maire2019} \docType{data} \alias{dat.maire2019} \title{Studies on Temporal Trends in Fish Community Structures in French Rivers} \description{Results from studies examining changes in the abundance of fish species in French rivers.} \usage{ dat.maire2019 } \format{The object is a list containing a data frame called \code{dat} that contains the following columns and distance matrix called \code{dmat}: \tabular{lll}{ \bold{site} \tab \code{character} \tab study site \cr \bold{station} \tab \code{character} \tab sampling station at site \cr \bold{site_station} \tab \code{character} \tab site and station combined \cr \bold{s1} \tab \code{numeric} \tab Mann-Kendal trend statistic for relative abundance of non-local species \cr \bold{vars1} \tab \code{numeric} \tab corresponding sampling variance (corrected for temporal autocorrelation) \cr \bold{s2} \tab \code{numeric} \tab Mann-Kendal trend statistic for relative abundance of northern species \cr \bold{vars2} \tab \code{numeric} \tab corresponding sampling variance (corrected for temporal autocorrelation) \cr \bold{s3} \tab \code{numeric} \tab Mann-Kendal trend statistic for relative abundance of non-native species \cr \bold{vars3} \tab \code{numeric} \tab corresponding sampling variance (corrected for temporal autocorrelation) \cr \bold{const} \tab \code{numeric} \tab constant value of 1 } } \details{ The dataset includes the results from 35 sampling stations (at 11 sites along various French rivers) examining the abundance of various fish species over time (i.e., over 19-37 years, all until 2015). The temporal trend in these abundance data was quantified in terms of Mann-Kendal trend statistics, with positive values indicating monotonically increasing trends. The corresponding sampling variances were corrected for the temporal autocorrelation in the data (Hamed & Rao, 1998). The distance matrix \code{dmat} indicates the distance of the sampling stations (1-423 river-km). For stations not connected through the river network, a high distance value of 10,000 river-km was set (effectively forcing the spatial correlation to be 0 for such stations). The dataset can be used to illustrate a meta-analysis allowing for spatial correlation in the outcomes. } \source{ Maire, A., Thierry, E., Viechtbauer, W., & Daufresne, M. (2019). Poleward shift in large-river fish communities detected with a novel meta-analysis framework. \emph{Freshwater Biology}, \bold{64}(6), 1143--1156. \verb{https://doi.org/10.1111/fwb.13291} } \references{ Hamed, K. H., & Rao, A. R. (1998). A modified Mann-Kendall trend test for autocorrelated data. \emph{Journal of Hydrology}, \bold{204}(1-4), 182--196. \verb{https://doi.org/10.1016/S0022-1694(97)00125-X} } \examples{ ### copy data into 'dat' and examine data dat <- dat.maire2019$dat dat ### copy distance matrix into 'dmat' and examine first 5 rows/columns dmat <- dat.maire2019$dmat dmat[1:5,1:5] ### fit a standard random-effects model ignoring spatial correlation res1 <- rma.mv(s1, vars1, random = ~ 1 | site_station, data=dat) res1 ### fit model allowing for spatial correlation res2 <- rma.mv(s1, vars1, random = ~ site_station | const, struct="SPGAU", data=dat, dist=list(dmat), control=list(rho.init=10)) res2 ### add random effects for sites and stations within sites res3 <- rma.mv(s1, vars1, random = list(~ 1 | site/station, ~ site_station | const), struct="SPGAU", data=dat, dist=list(dmat), control=list(rho.init=10)) res3 ### likelihood ratio tests comparing the models anova(res1, res2) anova(res2, res3) ### profile likelihood plots for model res2 profile(res2, cline=TRUE) ### effective range (river-km for which the spatial correlation is >= .05) sqrt(3) * res2$rho } \keyword{datasets} metafor/man/formula.rma.Rd0000644000176200001440000000266314055736403015212 0ustar liggesusers\name{formula.rma} \alias{formula} \alias{formula.rma} \title{Model Formulae for 'rma' Objects} \description{ The function extracts model formulae for objects of class \code{"rma"}. } \usage{ \method{formula}{rma}(x, type="mods", \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{type}{the formula which should be returned; either \code{"mods"} (default), \code{"yi"} (in case argument \code{yi} was used to specify a formula), or \code{"scale"} (only for location-scale models).} \item{\dots}{other arguments.} } \value{ The requested formula. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}} } \examples{ ### copy BCG vaccine data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, slab=paste(author, ", ", year, sep="")) ### mixed-effects meta-regression model res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat) formula(res, type="mods") ### specify moderators via 'yi' argument res <- rma(yi ~ ablat + alloc, vi, data=dat) formula(res, type="yi") } \keyword{models} metafor/man/dat.lau1992.Rd0000644000176200001440000000453014055736403014637 0ustar liggesusers\name{dat.lau1992} \docType{data} \alias{dat.lau1992} \title{Studies on Intravenous Streptokinase for Acute Myocardial Infarction} \description{Results from 33 trials comparing intravenous streptokinase versus placebo or no therapy in patients who had been hospitalized for acute myocardial infarction.} \usage{dat.lau1992} \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{character} \tab trial name \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ai} \tab \code{numeric} \tab number of deaths in the streptokinase group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the streptokinase group \cr \bold{ci} \tab \code{numeric} \tab number of deaths in the control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the control group } } \details{ In the paper by Lau et al. (1992), the data are used to illustrate the idea of a cumulative meta-analysis, where the results are updated as each trial is added to the dataset. See \sQuote{Examples} for code that replicates the results and shows corresponding forest plots. } \source{ Lau, J., Antman, E. M., Jimenez-Silva, J., Kupelnick, B., Mosteller, F., & Chalmers, T. C. (1992). Cumulative meta-analysis of therapeutic trials for myocardial infarction. \emph{New England Journal of Medicine}, \bold{327}(4), 248--254. \verb{https://doi.org/10.1056/NEJM199207233270406} } \examples{ ### copy data into 'dat' and examine data dat <- dat.lau1992 dat ### meta-analysis of log odds ratios using the MH method res <- rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, slab=trial) print(res, digits=2) ### forest plot forest(res, xlim=c(-10,9), atransf=exp, at=log(c(.01, 0.1, 1, 10, 100)), header=TRUE, top=2, ilab=dat$year, ilab.xpos=-6) text(-6, 35, "Year", font=2) ### cumulative meta-analysis sav <- cumul(res) ### forest plot of the cumulative results forest(sav, xlim=c(-5,4), atransf=exp, at=log(c(0.1, 0.5, 1, 2, 10)), header=TRUE, top=2, ilab=dat$year, ilab.xpos=-3) text(-3, 35, "Year", font=2) id <- c(4, 8, 15, 33) # rows for which the z/p-values should be shown (as in Lau et al., 1992) text(1.1, (res$k:1)[id], paste0("z = ", formatC(sav$zval[id], format="f", digits=2), ", p = ", formatC(sav$pval[id], format="f", digits=4))) } \keyword{datasets} metafor/man/dat.kalaian1996.Rd0000644000176200001440000001131514055736403015461 0ustar liggesusers\name{dat.kalaian1996} \docType{data} \alias{dat.kalaian1996} \title{Studies on the Effectiveness of Coaching for the SAT} \description{Results from studies examining the effectiveness of coaching on the performance on the Scholastic Aptitude Test (SAT).} \usage{dat.kalaian1996} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab row (effect) id \cr \bold{study} \tab \code{character} \tab study identifier \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{n1i} \tab \code{numeric} \tab number of participants in the coached group \cr \bold{n2i} \tab \code{numeric} \tab number of participants in the uncoached group \cr \bold{outcome} \tab \code{character} \tab subtest (verbal or math) \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance \cr \bold{hrs} \tab \code{numeric} \tab hours of coaching \cr \bold{ets} \tab \code{numeric} \tab study conducted by the Educational Testing Service (ETS) (0 = no, 1 = yes) \cr \bold{homework} \tab \code{numeric} \tab assignment of homework outside of the coaching course (0 = no, 1 = yes) \cr \bold{type} \tab \code{numeric} \tab study type (1 = randomized study, 2 = matched study, 3 = nonequivalent comparison study) } } \details{ The effectiveness of coaching for the Scholastic Aptitude Test (SAT) has been examined in numerous studies. This dataset contains standardized mean differences comparing the performance of a coached versus uncoached group on the verbal and/or math subtest of the SAT. Studies may report a standardized mean difference for the verbal subtest, the math subtest, or both. In the latter case, the two standardized mean differences are not independent (since they were measured in the same group of subjects). The number of hours of coaching (variable \code{hrs}), whether the study was conducted by the Educational Testing Service (variable \code{ets}), whether homework was assigned outside of the coaching course (variable \code{homework}), and the study type (variable \code{type}) may be potential moderators of the treatment effect. } \note{ The dataset was obtained from Table 1 in Kalaian and Raudenbush (1996). However, there appear to be some inconsistencies between the data in the table and those that were actually used for the analyses (see \sQuote{Examples}). } \source{ Kalaian, H. A., & Raudenbush, S. W. (1996). A multivariate mixed linear model for meta-analysis. \emph{Psychological Methods}, \bold{1}(3), 227--235. \verb{https://doi.org/10.1037/1082-989X.1.3.227} } \examples{ ### copy data into 'dat' dat <- dat.kalaian1996 ### check ranges range(dat$yi[dat$outcome == "verbal"]) # -0.35 to 0.74 according to page 230 range(dat$yi[dat$outcome == "math"]) # -0.53 to 0.60 according to page 231 ### comparing this with Figure 1 in the paper reveals some discrepancies par(mfrow=c(1,2), mar=c(5,4,1,1)) plot(log(dat$hrs[dat$outcome == "verbal"]), dat$yi[dat$outcome == "verbal"], pch=19, xlab="Log(Coaching Hours)", ylab="Effect Size (verbal)", xlim=c(1,6), ylim=c(-0.5,1), xaxs="i", yaxs="i") abline(h=c(-0.5,0,0.5), lty="dotted") abline(v=log(c(5,18)), lty="dotted") plot(log(dat$hrs[dat$outcome == "math"]), dat$yi[dat$outcome == "math"], pch=19, xlab="Log(Coaching Hours)", ylab="Effect Size (math)", xlim=c(1,6), ylim=c(-1.0,1), xaxs="i", yaxs="i") abline(h=c(-0.5,0,0.5), lty="dotted") abline(v=log(c(5,18)), lty="dotted") ### construct variance-covariance matrices assuming rho = 0.66 vcalc <- function(v, rho) { S <- diag(sqrt(v), nrow=length(v), ncol=length(v)) R <- matrix(rho, nrow=length(v), ncol=length(v)) diag(R) <- 1 S \%*\% R \%*\% S } V <- lapply(split(dat$v, dat$study), vcalc, rho=0.66) V <- bldiag(V, order=dat$study) ### fit multivariate random-effects model res <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | study, struct="UN", data=dat, digits=3) res ### test whether the effect differs for the math and verbal subtest anova(res, X=c(1,-1)) ### log-transform and mean center the hours of coaching variable dat$loghrs <- log(dat$hrs) - mean(log(dat$hrs), na.rm=TRUE) ### fit multivariate model with log(hrs) as moderator res <- rma.mv(yi, V, mods = ~ outcome + outcome:loghrs - 1, random = ~ outcome | study, struct="UN", data=dat, digits=3) res ### fit model with tau2 = 0 for outcome verbal (which also constrains rho = 0) res <- rma.mv(yi, V, mods = ~ outcome + outcome:loghrs - 1, random = ~ outcome | study, struct="UN", tau2=c(NA,0), data=dat, digits=3) res } \keyword{datasets} metafor/man/addpoly.default.Rd0000644000176200001440000001305714055736403016045 0ustar liggesusers\name{addpoly.default} \alias{addpoly.default} \title{Add Polygons to Forest Plots (Default Method)} \description{ Function to add one or more polygons to a forest plot. } \usage{ \method{addpoly}{default}(x, vi, sei, ci.lb, ci.ub, pi.lb, pi.ub, rows=-1, level=95, annotate=TRUE, digits=2, width, mlab, transf, atransf, targs, efac=1, col, border, fonts, cex, \dots) } \arguments{ \item{x}{vector with the values at which the polygons should be drawn.} \item{vi}{vector with the corresponding variances.} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{ci.lb}{vector with the corresponding lower confidence interval bounds. Not needed if \code{vi} or \code{sei} is specified. See \sQuote{Details}.} \item{ci.ub}{vector with the corresponding upper confidence interval bounds. Not needed if \code{vi} or \code{sei} is specified. See \sQuote{Details}.} \item{pi.lb}{optional vector with the corresponding lower prediction interval bounds.} \item{pi.ub}{optional vector with the corresponding upper prediction interval bounds.} \item{rows}{vector to specify the rows (or more generally, the horizontal positions) for plotting the polygons (defaults is \code{-1}). Can also be a single value to specify the row (horizontal position) of the first polygon (the remaining polygons are then plotted below this starting row).} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{annotate}{logical to specify whether annotations should be added to the plot for the polygons that are drawn (the default is \code{TRUE}).} \item{digits}{integer to specify the number of decimal places to which the annotations should be rounded (the default is 2).} \item{width}{optional integer to manually adjust the width of the columns for the annotations.} \item{mlab}{optional character vector with the same length as \code{x} giving labels for the polygons that are drawn.} \item{transf}{optional argument to specify a function that should be used to transform the \code{x} values and confidence interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the annotations (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{efac}{vertical expansion factor for the polygons. The default value of 1 should usually work okay.} \item{col}{optional character string to specify the name of a color to use for the polygons. If unspecified, the function sets a default color.} \item{border}{optional character string to specify the name of a color to use for the border of the polygons. If unspecified, the function sets a default color.} \item{fonts}{optional character string to specify the font to use for the labels and annotations. If unspecified, the default font is used.} \item{cex}{optional symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{\dots}{other arguments.} } \details{ The function can be used to add one or more polygons to an existing forest plot created with the \code{\link{forest}} function. For example, summary estimates based on a model involving moderators can be added to the plot this way (see \sQuote{Examples}). To use the function, one should specify the values at which the polygons should be drawn (via the \code{x} argument) together with the corresponding variances (via the \code{vi} argument) or with the corresponding standard errors (via the \code{sei} argument). Alternatively, one can specify the values at which the polygons should be drawn together with the corresponding confidence interval bounds (via the \code{ci.lb} and \code{ci.ub} arguments). Optionally, one can also specify the bounds of the corresponding prediction interval bounds via the \code{pi.lb} and \code{pi.ub} arguments. The arguments \code{transf}, \code{atransf}, \code{efac}, and \code{cex} should always be set equal to the same values used to create the forest plot. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest.rma}}, \code{\link{forest.default}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude as a moderator res <- rma(yi, vi, mods = ~ ablat, slab=paste(author, year, sep=", "), data=dat) ### forest plot of the observed risk ratios forest(res, addfit=FALSE, atransf=exp, xlim=c(-8,5), ylim=c(-4.5,16), cex=.8, order=dat$ablat, ilab=dat$ablat, ilab.xpos=-4, header="Author(s) and Year") ### predicted average log risk ratios for 10, 30, and 50 degrees absolute latitude x <- predict(res, newmods=c(10, 30, 50)) ### add predicted average risk ratios to forest plot addpoly(x$pred, sei=x$se, atransf=exp, rows=-2, mlab=c("- at 10 Degrees", "- at 30 Degrees", "- at 50 Degrees"), cex=.8) abline(h=0) text(-8, -1, "Model-Based Estimates:", pos=4, cex=.8) text(-4, 15, "Latitude", cex=.8, font=2) } \keyword{aplot} metafor/man/dat.pritz1997.Rd0000644000176200001440000000541214055736403015233 0ustar liggesusers\name{dat.pritz1997} \docType{data} \alias{dat.pritz1997} \title{Studies on the Effectiveness of Hyperdynamic Therapy for Treating Cerebral Vasospasm} \description{Results from 14 studies on the effectiveness of hyperdynamic therapy for treating cerebral vasospasm.} \usage{dat.pritz1997} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{authors} \tab \code{character} \tab study authors \cr \bold{xi} \tab \code{numeric} \tab number of patients that improved with hyperdynamic therapy \cr \bold{ni} \tab \code{numeric} \tab total number of patients treated } } \details{ As described in Zhou et al. (1999), "hyperdynamic therapy refers to induced hypertension and hypervolaemia (volume expansion) to treat ischaemic symptoms due to vasospasm, and the success of this therapy is defined as clinical improvement in terms of neurologic deficits." For each study that was included in the meta-analysis, the dataset includes information on the number of patients that improved under this form of therapy and the total number of patients that were treated. The goal of the meta-analysis is to estimate the true (average) success rate of hyperdynamic therapy. } \source{ Zhou, X.-H., Brizendine, E. J., & Pritz, M. B. (1999). Methods for combining rates from several studies. \emph{Statistics in Medicine}, \bold{18}(5), 557--566. \verb{https://doi.org/10.1002/(SICI)1097-0258(19990315)18:5<557::AID-SIM53>3.0.CO;2-F} } \references{ Pritz, M. B. (1997). Treatment of cerebral vasospasm due to aneurysmal subarachnoid hemorrhage: Past, present, and future of hyperdynamic therapy. \emph{Neurosurgery Quarterly}, \bold{7}(4), 273--285. } \examples{ ### copy data into 'dat' and examine data dat <- dat.pritz1997 dat ### computation of "weighted average" in Zhou et al. (1999), Table IV dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat, add=0) theta.hat <- sum(dat$ni * dat$yi) / sum(dat$ni) se.theta.hat <- sqrt(sum(dat$ni^2 * dat$vi) / sum(dat$ni)^2) ci.lb <- theta.hat - 1.96 * se.theta.hat ci.ub <- theta.hat + 1.96 * se.theta.hat round(c(estimate = theta.hat, se = se.theta.hat, ci.lb = ci.lb, ci.ub = ci.ub), 4) ### this is identical to a FE model with sample size weights rma(yi, vi, weights=ni, method="FE", data=dat) ### random-effects model with raw proportions dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat) res <- rma(yi, vi, data=dat) predict(res) ### random-effects model with logit transformed proportions dat <- escalc(measure="PLO", xi=xi, ni=ni, data=dat) res <- rma(yi, vi, data=dat) predict(res, transf=transf.ilogit) ### mixed-effects logistic regression model res <- rma.glmm(measure="PLO", xi=xi, ni=ni, data=dat) predict(res, transf=transf.ilogit) } \keyword{datasets} metafor/man/dat.ishak2007.Rd0000644000176200001440000001110214055736403015132 0ustar liggesusers\name{dat.ishak2007} \docType{data} \alias{dat.ishak2007} \title{Studies on Deep-Brain Stimulation in Patients with Parkinson's disease} \description{Results from 46 studies examining the effects of deep-brain stimulation on motor skills of patients with Parkinson's disease.} \usage{dat.ishak2007} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab (first) author and year \cr \bold{y1i} \tab \code{numeric} \tab observed mean difference at 3 months \cr \bold{v1i} \tab \code{numeric} \tab sampling variance of the mean difference at 3 months \cr \bold{y2i} \tab \code{numeric} \tab observed mean difference at 6 months \cr \bold{v2i} \tab \code{numeric} \tab sampling variance of the mean difference at 6 months \cr \bold{y3i} \tab \code{numeric} \tab observed mean difference at 12 months \cr \bold{v3i} \tab \code{numeric} \tab sampling variance of the mean difference at 12 months \cr \bold{y4i} \tab \code{numeric} \tab observed mean difference at the long-term follow-up \cr \bold{v4i} \tab \code{numeric} \tab sampling variance of the mean difference at the long-term follow-up \cr \bold{mdur} \tab \code{numeric} \tab mean disease duration (in years) \cr \bold{mbase} \tab \code{numeric} \tab mean baseline UPDRS score } } \details{ Deep-brain stimulation (DBS), which is delivered through thin surgically implanted wires in specific areas of the brain and controlled by the patient, is meant to provide relief of the debilitating symptoms of Parkinson's disease. The dataset includes the results from 46 studies examining the effects of DBS of the subthalamic nucleus on motor functioning, measured with the Unified Parkinson's Disease Rating Scale (UPDRS). The effect size measure for this meta-analysis was the mean difference of the scores while the stimulator is active and the baseline scores (before implantation of the stimulator). Since lower scores on the UPDRS indicate better functioning, negative numbers indicate improvements in motor skills. Effects were generally measured at 3, 6, and 12 months after implantation of the stimulator, with some studies also including a further long-term follow-up. However, the number of measurements differed between studies - hence the missing data on some of the measurement occasions. Since the same patients were followed over time within a study, effect size estimates from multiple measurement occasions are likely to be correlated. A multivariate model accounting for the correlation in the effects can be used to meta-analyze these data. A difficulty with this approach is the lack of information about the correlation of the measurements over time in the individual studies. The approach taken by Ishak et al. (2007) was to assume an autoregressive (AR1) structure for the estimates within the individual studies. In addition, the correlation in the true effects was modeled, again using an autoregressive structure. } \source{ Ishak, K. J., Platt, R. W., Joseph, L., Hanley, J. A., & Caro, J. J. (2007). Meta-analysis of longitudinal studies. \emph{Clinical Trials}, \bold{4}(5), 525--539. \verb{https://doi.org/10.1177/1740774507083567} } \examples{ ### copy data into 'dat' and examine data dat <- dat.ishak2007 dat ### create long format dataset dat.long <- reshape(dat, direction="long", idvar="study", v.names=c("yi","vi"), varying=list(c(2,4,6,8), c(3,5,7,9))) dat.long <- dat.long[order(dat.long$study, dat.long$time),] rownames(dat.long) <- 1:nrow(dat.long) ### remove missing measurement occasions from dat.long is.miss <- is.na(dat.long$yi) dat.long <- dat.long[!is.miss,] ### construct the full (block diagonal) V matrix with an AR(1) structure rho.within <- .97 ### value as estimated by Ishak et al. (2007) V <- lapply(split(with(dat, cbind(v1i, v2i, v3i, v4i)), dat$study), diag) V <- lapply(V, function(v) sqrt(v) \%*\% toeplitz(ARMAacf(ar=rho.within, lag.max=3)) \%*\% sqrt(v)) V <- bldiag(V) V <- V[!is.miss,!is.miss] ### remove missing measurement occasions from V ### plot data with(dat.long, interaction.plot(time, study, yi, type="b", pch=19, lty="solid", xaxt="n", legend=FALSE, xlab="Time Point", ylab="Mean Difference", bty="l")) axis(side=1, at=1:4, lab=c("1 (3 months)", "2 (6 months)", "3 (12 months)", "4 (12+ months)")) ### multivariate model with heteroscedastic AR(1) structure for the true effects res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ time | study, struct = "HAR", data = dat.long) print(res, digits=2) } \keyword{datasets} metafor/man/print.hc.rma.uni.Rd0000644000176200001440000000307314055736403016060 0ustar liggesusers\name{print.hc.rma.uni} \alias{print.hc.rma.uni} \title{Print Method for 'hc.rma.uni' Objects} \description{ Print method for objects of class \code{"hc.rma.uni"}. \loadmathjax } \usage{ \method{print}{hc.rma.uni}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"hc.rma.uni"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output is a data frame with two rows, the first (labeled \code{rma}) corresponding to the results based on the usual estimation method, the second (labeled \code{hc}) corresponding to the results based on the method by Henmi and Copas (2010). The data frame includes the following variables: \itemize{ \item the method used to estimate \mjseqn{\tau^2} (always \code{DL} for \code{hc}) \item the estimated amount of heterogeneity \item the estimated average true outcome \item the corresponding standard error (\code{NA} when \code{transf} argument has been used) \item the lower and upper confidence interval bounds } } \value{ The function returns the data frame invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{hc.rma.uni}} } \keyword{print} metafor/man/aggregate.escalc.Rd0000644000176200001440000002361114055736403016142 0ustar liggesusers\name{aggregate.escalc} \alias{aggregate} \alias{aggregate.escalc} \title{Aggregate Multiple Effect Sizes or Outcomes Within Studies} \description{ The function can be used to aggregate multiple effect sizes or outcomes belonging to the same study (or to the same level of some other clustering variable) into a single combined effect size or outcome. \loadmathjax } \usage{ \method{aggregate}{escalc}(x, cluster, time, V, struct="CS", rho, phi, weighted=TRUE, fun, na.rm=TRUE, subset, select, digits, \dots) } \arguments{ \item{x}{an object of class \code{"escalc"}.} \item{cluster}{vector to specify the clustering variable (e.g., study).} \item{time}{optional vector to specify the time points (only relevant when \code{struct="CAR"} or \code{struct="CS+CAR"}).} \item{V}{optional argument to specify the variance-covariance matrix of the sampling errors. If not specified, argument \code{struct} is used to specify the variance-covariance structure.} \item{struct}{character string to specify the variance-covariance structure of the sampling errors within the same cluster (either \code{"ID"}, \code{"CS"}, \code{"CAR"}, or \code{"CS+CAR"}). See \sQuote{Details}.} \item{rho}{value of the correlation of the sampling errors within clusters (when \code{struct="CS"} or \code{struct="CS+CAR"}). Can also be a vector with the value of the correlation for each cluster.} \item{phi}{value of the autocorrelation of the sampling errors within clusters (when \code{struct="CAR"} or \code{struct="CS+CAR"}). Can also be a vector with the value of the autocorrelation for each cluster.} \item{weighted}{logical to specify whether estimates within clusters should be aggregated using inverse-variance weighting (the default is \code{TRUE}). If set to \code{FALSE}, unweighted averages are computed.} \item{fun}{optional list with three functions for aggregating other variables besides the effect sizes or outcomes within clusters (for numeric/integer variables, for logicals, and for all other types, respectively).} \item{na.rm}{logical to specify whether \code{NA} values should be removed before aggregating values within clusters. Can also be a vector with two logicals (the first pertains to the effect sizes or outcomes, the second to all other variables).} \item{subset}{optional (logical or numeric) vector to specify the subset of rows to include when aggregating the effect sizes or outcomes.} \item{select}{optional vector to specify the names of the variables to include in the aggregated dataset.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ In many meta-analyses, multiple effect sizes or outcomes can be extracted from the same study. Ideally, such structures should be analyzed using an appropriate multilevel/multivariate model as can be fitted with the \code{\link{rma.mv}} function. However, there may occasionally be reasons for aggregating multiple effect sizes or outcomes belonging to the same study (or to the same level of some other clustering variable) into a single combined effect size or outcome. The present function can be used for this purpose. The input must be an object of class \code{"escalc"}. The error \sQuote{\code{Error in match.fun(FUN): argument "FUN" is missing, with no default}} indicates that a regular data frame was passed to the function, but this does not work. One can turn a regular data frame (containing the effect sizes or outcomes and the corresponding sampling variances) into an \code{"escalc"} object with the \code{\link{escalc}} function. See the \sQuote{Examples} below for an illustration of this. The \code{cluster} variable is used to specify which estimates/outcomes belong to the same study/cluster. In the simplest case, the estimates/outcomes within clusters (or, to be precise, their sampling errors) are assumed to be independent. This is usually a safe assumption as long as each study participant (or whatever the unit of analysis is) only contributes data to a single estimate/outcome. For example, if a study provides effect size estimates for male and female subjects separately, then the sampling errors can usually be assumed to be independent. In this case, one can set \code{struct="ID"} and multiple estimates/outcomes within the same cluster are combined using standard inverse-variance weighting (i.e., using weighted least squares) under the assumption of independence. In other cases, the estimates/outcomes within clusters cannot be assumed to be independent. For example, if multiple effect size estimates are computed for the same group of subjects (e.g., for different dependent variables), then the estimates are likely to be correlated. If the actual correlation between the estimates is unknown, one can often still make an educated guess and set argument \code{rho} to this value, which is then assumed to be the same for all pairs of estimates within clusters when \code{struct="CS"} (for a compound symmetric structure). Multiple estimates/outcomes within the same cluster are then combined using inverse-variance weighting taking their correlation into consideration (i.e., using generalized least squares). One can also specify a different value of \code{rho} for each cluster by passing a vector (of the same length as the number of clusters) to this argument. If multiple effect size estimates are computed for the same group of subjects at different time points, then it may be more sensible to assume that the correlation between estimates decreases as a function of the distance between the time points. If so, one can specify \code{struct="CAR"} (for a continuous-time autoregressive structure), set \code{phi} to the autocorrelation (for two estimates one time-unit apart), and use argument \code{time} to specify the actual time points corresponding to the estimates. The correlation between two estimates, \mjseqn{y_{ij}} and \mjseqn{y_{ij'}}, in the \mjseqn{i}th cluster, with time points \mjseqn{t_{ij}} and \mjseqn{t_{ij'}}, is then given by \mjseqn{\phi^{|t_{ij} - t_{ij'}|}}. One can also specify a different value of \code{phi} for each cluster by passing a vector (of the same length as the number of clusters) to this argument. One can also combine the compound symmetric and autoregressive structures by specifying \code{struct="CS+CAR"}. In this case, one must specify both \code{rho} and \code{phi}. The correlation between two estimates, \mjseqn{y_{ij}} and \mjseqn{y_{ij'}}, in the \mjseqn{i}th cluster, with time points \mjseqn{t_{ij}} and \mjseqn{t_{ij'}}, is then given by \mjseqn{\rho + (1 - \rho) \phi^{|t_{ij} - t_{ij'}|}}. Finally, if one actually knows the correlation (and hence the covariance) between each pair of estimates, one can also specify the entire variance-covariance matrix of the estimates (or more precisely, their sampling errors) via the \code{V} argument. In this case, arguments \code{struct}, \code{rho}, and \code{phi} are ignored. Instead of using inverse-variance weighting (i.e., weighted/generalized least squares) to combine the estimates within clusters, one can set \code{weighted=FALSE} in which case the estimates are averaged within clusters without any weighting. Other variables (besides the estimates) will also be aggregated to the cluster level. By default, numeric/integer type variables are averaged, logicals are also averaged (yielding the proportion of \code{TRUE} values), and for all other types of variables (e.g., character variables or factors) the most frequent category/level is returned. One can also specify a list of three functions via the \code{fun} argument for aggregating variables belong to these three types. Argument \code{na.rm} controls how missing values should be handled. By default, any missing estimates are first removed before aggregating the non-missing values within each cluster. The same applies when aggregating the other variables. One can also specify a vector with two logicals for the \code{na.rm} argument to control how missing values should be handled when aggregating the estimates and when aggregating all other variables. } \value{ An object of class \code{c("escalc","data.frame")} that contains the (selected) variables aggregated to the cluster level. The object is formatted and printed with the \code{\link{print.escalc}} function. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{escalc}} } \examples{ ### copy data into 'dat' and examine data dat <- dat.konstantopoulos2011 dat ### aggregate estimates to the district level, assuming independent sampling ### errors for multiples studies/schools within the same district agg <- aggregate(dat, cluster=district, struct="ID") agg ### copy data into 'dat' and examine data dat <- dat.assink2016 dat ### note: 'dat' is a regular data frame class(dat) ### turn data frame into an 'escalc' object dat <- escalc(yi=yi, vi=vi, data=dat) class(dat) ### aggregate the estimates to the study level, assuming a CS structure for ### the sampling errors within studies with a correlation of 0.6 agg <- aggregate(dat, cluster=study, rho=0.6) agg ### reshape 'dat.ishak2007' into long format dat <- dat.ishak2007 dat <- reshape(dat.ishak2007, direction="long", idvar="study", v.names=c("yi","vi"), varying=list(c(2,4,6,8), c(3,5,7,9))) dat <- dat[order(dat$study, dat$time),] is.miss <- is.na(dat$yi) dat <- dat[!is.miss,] rownames(dat) <- NULL dat ### aggregate the estimates to the study level, assuming a CAR structure for ### the sampling errors within studies with an autocorrelation of 0.9 agg <- aggregate(dat, cluster=study, struct="CAR", time=time, phi=0.9) agg } \keyword{models} metafor/man/metafor.news.Rd0000644000176200001440000000126714055736403015376 0ustar liggesusers\name{metafor.news} \alias{metafor.news} \title{Read News File of the Metafor Package} \description{ Read news file of the \pkg{\link{metafor-package}}. } \usage{ metafor.news() } \details{ The function is just a wrapper for \code{news(package="metafor")} which parses and displays the \file{NEWS} file of the package. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \examples{ \dontrun{ metafor.news()} } \keyword{utilities} metafor/man/robust.Rd0000644000176200001440000002020614055736403014276 0ustar liggesusers\name{robust} \alias{robust} \alias{robust.rma.uni} \alias{robust.rma.mv} \title{(Cluster) Robust Tests and Confidence Intervals for 'rma' Objects} \description{ The function provides (cluster) robust tests and confidence intervals of the model coefficients for objects of class \code{"rma"}. \loadmathjax } \usage{ robust(x, cluster, \dots) \method{robust}{rma.uni}(x, cluster, adjust=TRUE, digits, \dots) \method{robust}{rma.mv}(x, cluster, adjust=TRUE, digits, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"} or \code{"rma.mv"}.} \item{cluster}{a vector to specify a clustering variable to use for constructing the sandwich estimator of the variance-covariance matrix.} \item{adjust}{logical to specify whether a small-sample correction should be applied to the variance-covariance matrix.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{\dots}{other arguments.} } \details{ The function constructs a (cluster) robust estimate of the variance-covariance matrix of the model coefficients based on a sandwich-type estimator and then computes tests and confidence intervals of the model coefficients. Tests of individual coefficients and confidence intervals are based on a t-distribution with \mjseqn{n-p} degrees of freedom is used, while the omnibus test statistic uses an F-distribution with \mjseqn{m} and \mjseqn{n-p} degrees of freedom, where \mjseqn{n} is the number of clusters, \mjseqn{p} denotes the total number of model coefficients (including the intercept if it is present), and \mjseqn{m} denotes the number of coefficients tested (in the omnibus test). When \code{adjust=TRUE} (the default), the (cluster) robust estimate of the variance-covariance matrix is multiplied by the factor \mjseqn{n/(n-p)}, which serves as a small-sample adjustment that tends to improve the performance of the method when the number of clusters is small. For even better small-sample adjustments (Pustejovsky & Tipton, 2018), see the \href{https://cran.r-project.org/package=clubSandwich}{clubSandwich} package, which nicely works together with the \pkg{metafor} package (see \sQuote{Examples}). } \value{ An object of class \code{"robust.rma"}. The object is a list containing the following components: \item{beta}{estimated coefficients of the model.} \item{se}{robust standard errors of the coefficients.} \item{zval}{test statistics of the coefficients.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub}{upper bound of the confidence intervals for the coefficients.} \item{vb}{robust variance-covariance matrix of the estimated coefficients.} \item{QM}{test statistic of the omnibus test of moderators.} \item{QMp}{corresponding p-value.} \item{\dots}{some additional elements/values.} The results are formatted and printed with the \code{\link{print.robust.rma}} function. } \note{ The variable specified via \code{cluster} is assumed to be of the same length as the data originally passed to the \code{rma.uni} or \code{rma.mv} functions. Any subsetting and removal of studies with missing values that was applied during the model fitting is also automatically applied to the variable specified via the \code{cluster} argument. The idea of the robust (sandwich-type) estimator for models with unspecified heteroscedasticity can be traced back to Eicker (1967), Huber (1967), and White (1980). Hence, the method in general is often referred to as the Eicker-Huber-White method. Some small-sample improvements to the method are described by MacKinnon and White (1985). The extension to the cluster robust estimator can be found in Froot (1989) and Williams (2000). Cameron and Miller (2015) provide an extensive overview of cluster robust methods. Sidik and Jonkman (2005, 2006) introduced robust methods in the meta-analytic context for standard random/mixed-effects models. The use of the cluster robust estimator for multivariate/multilevel meta-analytic models is described in Hedges, Tipton, and Johnson (2010). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Cameron, A. C., & Miller, D. L. (2015). A practitioner's guide to cluster-robust inference. \emph{Journal of Human Resources}, \bold{50}(2), 317--372. \verb{https://doi.org/10.3368/jhr.50.2.317} Eicker, F. (1967). Limit theorems for regressions with unequal and dependent errors. In L. M. LeCam & J. Neyman (Eds.), \emph{Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability} (pp. 59--82). Berkeley: University of California Press. Froot, K. A. (1989). Consistent covariance matrix estimation with cross-sectional dependence and heteroskedasticity in financial data. \emph{Journal of Financial and Quantitative Analysis}, \bold{24}(3), 333--355. \verb{https://doi.org/10.2307/2330815} Hedges, L. V., Tipton, E., & Johnson, M. C. (2010). Robust variance estimation in meta-regression with dependent effect size estimates. \emph{Research Synthesis Methods}, \bold{1}(1), 39--65. \verb{https://doi.org/10.1002/jrsm.5} Huber, P. (1967). The behavior of maximum-likelihood estimates under nonstandard conditions. In L. M. LeCam & J. Neyman (Eds.), \emph{Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability} (pp. 221--233). Berkeley: University of California Press. MacKinnon, J. G., & White, H. (1985). Some heteroskedasticity-consistent covariance matrix estimators with improved finite sample properties. \emph{Journal of Econometrics}, \bold{29}(3), 305--325. \verb{https://doi.org/10.1016/0304-4076(85)90158-7} Pustejovsky, J. E., & Tipton, E. (2018). Small-sample methods for cluster-robust variance estimation and hypothesis testing in fixed effects models. \emph{Journal of Business & Economic Statistics}, \bold{36}(4), 672--683. \verb{https://doi.org/10.1080/07350015.2016.1247004} Sidik, K., & Jonkman, J. N. (2005). A note on variance estimation in random effects meta-regression. \emph{Journal of Biopharmaceutical Statistics}, \bold{15}(5), 823--838. \verb{https://doi.org/10.1081/BIP-200067915} Sidik, K., & Jonkman, J. N. (2006). Robust variance estimation for random effects meta-analysis. \emph{Computational Statistics & Data Analysis}, \bold{50}(12), 3681--3701. \verb{https://doi.org/10.1016/j.csda.2005.07.019} White, H. (1980). A heteroskedasticity-consistent covariance matrix estimator and a direct test for heteroskedasticity. \emph{Econometrica}, \bold{48}(4), 817--838. \verb{https://doi.org/10.2307/1912934} Williams, R. L. (2000). A note on robust variance estimation for cluster-correlated data. \emph{Biometrics}, \bold{56}(2), 645--646. \verb{https://doi.org/10.1111/j.0006-341x.2000.00645.x} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}} } \examples{ ### copy data from Konstantopoulos (2011) into 'dat' dat <- dat.konstantopoulos2011 ### fit multilevel random-effects model res <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat) res ### obtain results based on the sandwich method robust(res, cluster=dat$district) ### illustrate use of the clubSandwich package together with metafor \dontrun{ require(clubSandwich) coef_test(res, vcov="CR2", cluster=dat$district) } ### copy data from Berkey et al. (1998) into 'dat' dat <- dat.berkey1998 ### construct list with the variance-covariance matrices of the observed outcomes for the studies V <- lapply(split(dat[c("v1i", "v2i")], dat$trial), as.matrix) ### construct block diagonal matrix V <- bldiag(V) ### fit multivariate model res <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat) res ### obtain results based on sandwich method robust(res, cluster=dat$trial) ### illustrate use of the clubSandwich package together with metafor \dontrun{ require(clubSandwich) coef_test(res, vcov="CR2", cluster=dat$trial) } } \keyword{htest} metafor/man/addpoly.Rd0000644000176200001440000000334414055736403014420 0ustar liggesusers\name{addpoly} \alias{addpoly} \title{Add Polygons to Forest Plots} \description{ The \code{addpoly} function can be used to add polygons, sometimes called \sQuote{diamonds}, to a forest plot, for example to indicate summary estimates for subgroups of studies or to indicate fitted/predicted values based on models involving moderators. } \usage{ addpoly(x, \dots) } \arguments{ \item{x}{either an object of class \code{"rma"} or the values at which polygons should be drawn. See \sQuote{Details}.} \item{\dots}{other arguments.} } \details{ Currently, methods exist for two types of situations. In the first case, object \code{x} is a fitted model coming from the \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, or \code{\link{rma.glmm}} functions. The model must either be a fixed- or random-effects model, that is, the model should not contain any moderators. The corresponding method is called \code{\link{addpoly.rma}}. It can be used to add a polygon to an existing forest plot (usually at the bottom), showing the summary estimate (with its confidence interval) based on the fitted model. Alternatively, object \code{x} can be a vector with values at which one or more polygons should be drawn. The corresponding method is then \code{\link{addpoly.default}}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{addpoly.rma}}, \code{\link{addpoly.default}}, \code{\link{forest.rma}}, \code{\link{forest.default}} } \keyword{methods} metafor/man/weights.rma.Rd0000644000176200001440000000574514055736403015223 0ustar liggesusers\name{weights.rma} \alias{weights} \alias{weights.rma} \alias{weights.rma.uni} \alias{weights.rma.mh} \alias{weights.rma.peto} \alias{weights.rma.glmm} \alias{weights.rma.mv} \title{Compute Weights for 'rma' Objects} \description{ The function computes the weights given to the observed effect sizes or outcomes during the model fitting for objects of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, and \code{"rma.mv"}. } \usage{ \method{weights}{rma.uni}(object, type="diagonal", \dots) \method{weights}{rma.mh}(object, type="diagonal", \dots) \method{weights}{rma.peto}(object, type="diagonal", \dots) \method{weights}{rma.glmm}(object, \dots) \method{weights}{rma.mv}(object, type="diagonal", \dots) } \arguments{ \item{object}{an object of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, or \code{"rma.mv"}. The method is not yet implemented for objects of class \code{"rma.glmm"}.} \item{type}{character string to specify whether to return only the diagonal of the weight matrix (\code{"diagonal"}) or the entire weight matrix (\code{"matrix"}). For \code{"rma.mv"}, this can also be \code{"rowsum"} for \sQuote{row-sum weights} (for intercept-only models).} \item{\dots}{other arguments.} } \value{ Either a vector with the diagonal elements of the weight matrix or the entire weight matrix. When only the diagonal elements are returned, they are given in \% (and they add up to 100\%). When the entire weight matrix is requested, this is always a diagonal matrix for objects of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}. For \code{"rma.mv"}, the structure of the weight matrix depends on the model fitted (i.e., the random effects included and the variance-covariance matrix of the sampling errors) but is often more complex and not just diagonal. For \code{"rma.mv"} intercept-only models, one can also take the sum over the rows in the weight matrix, which are actually the weights assigned to the observed effect sizes or outcomes when estimating the model intercept. These weights can be obtained with \code{type="rowsum"} (as with \code{type="diagonal"}, they are also given in \%). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.mv}}, \code{\link{influence.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### extract the model fitting weights (in %) weights(res) ### extract the weight matrix weights(res, type="matrix") } \keyword{models} metafor/man/confint.rma.Rd0000644000176200001440000004152014055736403015200 0ustar liggesusers\name{confint.rma} \alias{confint} \alias{confint.rma.uni} \alias{confint.rma.mh} \alias{confint.rma.peto} \alias{confint.rma.glmm} \alias{confint.rma.mv} \alias{confint.rma.uni.selmodel} \alias{confint.rma.ls} \title{Confidence Intervals for 'rma' Objects} \description{ The function calculates confidence intervals for the model coefficients and/or other parameters in the model. \loadmathjax } \usage{ \method{confint}{rma.uni}(object, parm, level, fixed=FALSE, random=TRUE, type, digits, transf, targs, verbose=FALSE, control, \dots) \method{confint}{rma.mh}(object, parm, level, digits, transf, targs, \dots) \method{confint}{rma.peto}(object, parm, level, digits, transf, targs, \dots) \method{confint}{rma.glmm}(object, parm, level, digits, transf, targs, \dots) \method{confint}{rma.mv}(object, parm, level, fixed=FALSE, sigma2, tau2, rho, gamma2, phi, digits, transf, targs, verbose=FALSE, control, \dots) \method{confint}{rma.uni.selmodel}(object, parm, level, fixed=FALSE, tau2, delta, digits, transf, targs, verbose=FALSE, control, \dots) \method{confint}{rma.ls}(object, parm, level, fixed=FALSE, alpha, digits, transf, targs, verbose=FALSE, control, \dots) } \arguments{ \item{object}{an object of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, \code{"rma.mv"}, \code{"rma.uni.selmodel"}, or \code{"rma.ls"}. The method is not yet implemented for objects of class \code{"rma.glmm"}.} \item{parm}{this argument is here for compatibility with the generic function \code{\link{confint}}, but is (currently) ignored.} \item{fixed}{logical to specify whether confidence intervals for the model coefficients should be returned.} \item{random}{logical to specify whether a confidence interval for the amount of (residual) heterogeneity should be returned.} \item{type}{optional character string to specify the method to use for computing the confidence interval for the amount of (residual) heterogeneity (either \code{"QP"}, \code{"GENQ"}, or \code{"PL"}).} \item{sigma2}{integer to specify for which \mjseqn{\sigma^2} parameter a confidence interval should be obtained.} \item{tau2}{integer to specify for which \mjseqn{\tau^2} parameter a confidence interval should be obtained.} \item{rho}{integer to specify for which \mjseqn{\rho} parameter the confidence interval should be obtained.} \item{gamma2}{integer to specify for which \mjseqn{\gamma^2} parameter a confidence interval should be obtained.} \item{phi}{integer to specify for which \mjseqn{\phi} parameter a confidence interval should be obtained.} \item{delta}{integer to specify for which \mjseqn{\delta} parameter a confidence interval should be obtained.} \item{alpha}{integer to specify for which \mjseqn{\alpha} parameter a confidence interval should be obtained.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level. If unspecified, the default is to take the value from the object.} \item{digits}{integer to specify the number of decimal places to which the results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the model coefficients and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{verbose}{logical to specify whether output should be generated on the progress of the iterative algorithms used to obtain the confidence intervals (the default is \code{FALSE}). See \sQuote{Details}.} \item{control}{list of control values for the iterative algorithms. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{other arguments.} } \details{ Confidence intervals for the model coefficients can be obtained by setting \code{fixed=TRUE} and are simply the usual Wald-type intervals (which are also shown when printing the fitted object). Other parameter(s) for which confidence intervals can be obtained depend on the model object: \itemize{ \item For objects of class \code{"rma.uni"} obtained with the \code{\link{rma.uni}} function, a confidence interval for the amount of (residual) heterogeneity (i.e., \mjseqn{\tau^2}) can be obtained by setting \code{random=TRUE} (which is the default). The interval is obtained iteratively either via the Q-profile method or via the generalized Q-statistic method (Hartung and Knapp, 2005; Viechtbauer, 2007; Jackson, 2013; Jackson et al., 2014). The latter is automatically used when the model was fitted with \code{method="GENQ"}, the former is used in all other cases. Either method provides an exact confidence interval for \mjseqn{\tau^2} in random- and mixed-effects models. The square root of the interval bounds is also returned for easier interpretation. Confidence intervals for \mjseqn{I^2} and \mjseqn{H^2} are also provided (Higgins & Thompson, 2002). Since \mjseqn{I^2} and \mjseqn{H^2} are just monotonic transformations of \mjseqn{\tau^2} (for details, see \code{\link{print.rma.uni}}), the confidence intervals for \mjseqn{I^2} and \mjseqn{H^2} are also exact. One can also set \code{type="PL"} to obtain a profile likelihood confidence interval for \mjseqn{\tau^2} (and corresponding CIs for \mjseqn{I^2} and \mjseqn{H^2}), which would be more consistent with the use of ML/REML estimation, but is not exact (see \sQuote{Note}). \item For objects of class \code{"rma.mv"} obtained with the \code{\link{rma.mv}} function, confidence intervals are obtained by default for all (non-fixed) variance and correlation components of the model. Alternatively, one can use the \code{sigma2}, \code{tau2}, \code{rho}, \code{gamma2}, or \code{phi} arguments to specify for which variance/correlation parameter a confidence interval should be obtained. Only one of these arguments can be used at a time. A single integer is used to specify the number of the parameter. The function provides profile likelihood confidence intervals for these parameters. It is a good idea to examine the corresponding profile likelihood plots (via the \code{\link{profile.rma.mv}} function) to make sure that the bounds obtained are sensible. \item For selection model objects of class \code{"rma.uni.selmodel"} obtained with the \code{\link{selmodel.rma.uni}} function, confidence intervals are obtained by default for \mjseqn{\tau^2} (for models where this is an estimated parameter) and all (non-fixed) selection model parameters. Alternatively, one can choose to obtain a confidence interval only for \mjseqn{\tau^2} by setting \code{tau2=TRUE} or for one of the selection model parameters by specifying its number via the \code{delta} argument. The function provides profile likelihood confidence intervals for these parameters. It is a good idea to examine the corresponding profile likelihood plots (via the \code{\link{profile.rma.uni.selmodel}} function) to make sure that the bounds obtained are sensible. \item For location-scale model objects of class \code{"rma.ls"} obtained with the \code{\link{rma.uni}} function, confidence intervals are obtained by default for all (non-fixed) scale parameters. Alternatively, one can choose to obtain a confidence interval for one of the scale parameters by specifying its number via the \code{alpha} argument. The function provides profile likelihood confidence intervals for these parameters. It is a good idea to examine the corresponding profile likelihood plots (via the \code{\link{profile.rma.ls}} function) to make sure that the bounds obtained are sensible. } The methods used to find confidence intervals for these parameters are iterative and require the use of the \code{\link{uniroot}} function. By default, the desired accuracy (\code{tol}) is set equal to \code{.Machine$double.eps^0.25} and the maximum number of iterations (\code{maxiter}) to \code{1000}. These values can be adjusted with \code{control=list(tol=value, maxiter=value)}, but the defaults should be adequate for most purposes. If \code{verbose=TRUE}, output is generated on the progress of the iterative algorithms. This is especially useful when model fitting is slow, in which case finding the confidence interval bounds can also take considerable amounts of time. When using the \code{\link{uniroot}} function, one must also set appropriate end points of the interval to be searched for the confidence interval bounds. The function tries to set some sensible defaults for the end points, but it may happen that the function is only able to determine that a bound is below/above a certain limit (this is indicated in the output accordingly with \code{<} or \code{>} signs). It can also happen that the model cannot be fitted or does not converge especially at the extremes of the interval to be searched. This will result in missing (\code{NA}) bounds and corresponding warnings. It may then be necessary to adjust the end points manually (see \sQuote{Note}). Finally, it is also possible that the lower and upper confidence interval bounds for a variance component both fall below zero. Since both bounds then fall outside of the parameter space, the confidence interval then consists of the null/empty set. Alternatively, one could interpret this as a confidence interval with bounds \mjseqn{[0,0]} or as indicating \sQuote{highly/overly homogeneous} data. } \value{ An object of class \code{"confint.rma"}. The object is a list with either one or two elements (named \code{fixed} and \code{random}) with the following elements: \item{estimate}{estimate of the model coefficient, variance/correlation component, or selection model parameter.} \item{ci.lb}{lower bound of the confidence interval.} \item{ci.ub}{upper bound of the confidence interval.} When obtaining confidence intervals for multiple components, the object is a list of class \code{"list.confint.rma"}, where each element is a \code{"confint.rma"} object as described above. The results are formatted and printed with the \code{\link{print.confint.rma}} and \code{\link{print.list.confint.rma}} functions. } \note{ When computing a CI for \mjseqn{\tau^2} for objects of class \code{"rma.uni"}, the estimate of \mjseqn{\tau^2} will usually fall within the CI bounds provided by the Q-profile method. However, this is not guaranteed. Depending on the method used to estimate \mjseqn{\tau^2} and the width of the CI, it can happen that the CI does not actually contain the estimate. Using the empirical Bayes or Paule-Mandel estimator of \mjseqn{\tau^2} when fitting the model (i.e., using \code{method="EB"} or \code{method="PM"}) usually ensures that the estimate of \mjseqn{\tau^2} falls within the CI. When \code{method="GENQ"} was used to fit the model, the corresponding CI obtained via the generalized Q-statistic method also usually contains the estimate \mjseqn{\tau^2}. When using ML/REML estimation, the profile likelihood CI (obtained when setting \code{type="PL"}) is guaranteed to contain the estimate of \mjseqn{\tau^2}. When computing a CI for \mjseqn{\tau^2} for objects of class \code{"rma.uni"}, the end points of the interval to be searched for the CI bounds are \mjseqn{[0,100]} (or, for the upper bound, ten times the estimate of \mjseqn{\tau^2}, whichever is greater). The upper bound should be large enough for most cases, but can be adjusted with \code{control=list(tau2.max=value)}. One can also adjust the lower end point with \code{control=list(tau2.min=value)}. You should only play around with this value if you know what you are doing. For objects of class \code{"rma.mv"}, the function provides CIs for the variance/correlation parameters in the model. For variance components, the lower end point of the interval to be searched is set to 0 and the upper end point to the larger of 10 and 100 times the value of the component. For correlations, the function tries to set the lower end point to a sensible default depending on the type of variance structure chosen, while the upper end point is set to 1. One can adjust the lower and/or upper end points with \code{control=list(vc.min=value, vc.max=value)}. Also, the function tries to adjust the lower/upper end points when the model does not converge at these extremes (the end points are then moved closer to the estimated value of the component). The total number of tries for setting/adjusting the end points in this manner is determined via \code{control=list(eptries=value)}, with the default being 10 tries. For objects of class \code{"rma.uni.selmodel"} or \code{"rma.ls"}, the function also sets some sensible defaults for the end points of the interval to be searched for the CI bounds (of the \mjseqn{\tau^2}, \mjseqn{\delta}, and \mjseqn{\alpha} parameter(s)). One can again adjust the end points and the number of retries (as described above) with \code{control=list(vc.min=value, vc.max=value, eptries=value)}. The Q-profile and generalized Q-statistic methods are both exact under the assumptions of the random- and mixed-effects models (i.e., normally distributed observed and true effect sizes or outcomes and known sampling variances). In practice, these assumptions are usually only approximately true, turning CIs for \mjseqn{\tau^2} also into approximations. Profile likelihood CIs are not exact by construction and rely on the asymptotic behavior of the likelihood ratio statistic, so they may be inaccurate in small samples, but they are inherently consistent with the use of ML/REML estimation. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Hardy, R. J., & Thompson, S. G. (1996). A likelihood approach to meta-analysis with random effects. \emph{Statistics in Medicine}, \bold{15}(6), 619--629. \verb{https://doi.org/10.1002/(sici)1097-0258(19960330)15:6\%3C619::aid-sim188\%3E3.0.co;2-a} Hartung, J., & Knapp, G. (2005). On confidence intervals for the among-group variance in the one-way random effects model with unequal error variances. \emph{Journal of Statistical Planning and Inference}, \bold{127}(1-2), 157--177. \verb{https://doi.org/10.1016/j.jspi.2003.09.032} Higgins, J. P. T., & Thompson, S. G. (2002). Quantifying heterogeneity in a meta-analysis. \emph{Statistics in Medicine}, \bold{21}(11), 1539--1558. \verb{https://doi.org/10.1002/sim.1186} Jackson, D. (2013). Confidence intervals for the between-study variance in random effects meta-analysis using generalised Cochran heterogeneity statistics. \emph{Research Synthesis Methods}, \bold{4}(3), 220--229. \verb{https://doi.org/10.1186/s12874-016-0219-y} Jackson, D., Turner, R., Rhodes, K., & Viechtbauer, W. (2014). Methods for calculating confidence and credible intervals for the residual between-study variance in random effects meta-regression models. \emph{BMC Medical Research Methodology}, \bold{14}, 103. \verb{https://doi.org/10.1186/1471-2288-14-103} Viechtbauer, W. (2007). Confidence intervals for the amount of heterogeneity in meta-analysis. \emph{Statistics in Medicine}, \bold{26}(1), 37--52. \verb{https://doi.org/10.1002/sim.2514} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}}, \code{\link{selmodel.rma.uni}}, \code{\link{profile.rma.uni}}, \code{\link{profile.rma.mv}}, \code{\link{profile.rma.uni.selmodel}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat, method="REML") ### confidence interval for the total amount of heterogeneity confint(res) ### mixed-effects model with absolute latitude in the model res <- rma(yi, vi, mods = ~ ablat, data=dat) ### confidence interval for the residual amount of heterogeneity confint(res) ### multilevel random-effects model res <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat.konstantopoulos2011) ### profile plots and confidence intervals for the variance components \dontrun{ par(mfrow=c(2,1)) profile(res, sigma2=1, steps=40, cline=TRUE) sav <- confint(res, sigma2=1) sav abline(v=sav$random[1,2:3], lty="dotted") profile(res, sigma2=2, steps=40, cline=TRUE) sav <- confint(res, sigma2=2) sav abline(v=sav$random[1,2:3], lty="dotted")} ### multivariate parameterization of the model res <- rma.mv(yi, vi, random = ~ factor(school) | district, data=dat.konstantopoulos2011) ### profile plots and confidence intervals for the variance component and correlation \dontrun{ par(mfrow=c(2,1)) profile(res, tau2=1, steps=40, cline=TRUE) sav <- confint(res, tau2=1) sav abline(v=sav$random[1,2:3], lty="dotted") profile(res, rho=1, steps=40, cline=TRUE) sav <- confint(res, rho=1) sav abline(v=sav$random[1,2:3], lty="dotted")} } \keyword{models} metafor/man/dat.hart1999.Rd0000644000176200001440000000622314055736403015024 0ustar liggesusers\name{dat.hart1999} \docType{data} \alias{dat.hart1999} \title{Studies on the Effectiveness of Warfarin for Preventing Strokes} \description{Results from 6 clinical trials examining the effectiveness of adjusted-dose warfarin for preventing strokes in patients with atrial fibrillation.} \usage{dat.hart1999} \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{numeric} \tab trial number \cr \bold{study} \tab \code{character} \tab study name (abbreviated) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{x1i} \tab \code{numeric} \tab number of strokes in the warfarin group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the warfarin group \cr \bold{t1i} \tab \code{numeric} \tab total person-time (in years) in the warfarin group \cr \bold{x2i} \tab \code{numeric} \tab number of strokes in the placebo/control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the placebo/control group \cr \bold{t2i} \tab \code{numeric} \tab total person-time (in years) in the placebo/control group \cr \bold{compgrp} \tab \code{character} \tab type of comparison group (placebo or control) \cr \bold{prevtype} \tab \code{character} \tab type of prevention (primary or secondary) \cr \bold{trinr} \tab \code{character} \tab target range for the international normalized ratio (INR) } } \details{ The 6 studies provide data with respect to the number of strokes in the warfarin and the comparison (placebo or control) group. In addition, the number of patients and the total person-time (in years) is provided for the two groups. The goal of the meta-analysis was to examine the effectiveness of adjusted-dose warfarin for preventing strokes in patients with atrial fibrillation. } \source{ Hart, R. G., Benavente, O., McBride, R., & Pearce, L. A. (1999). Antithrombotic therapy to prevent stroke in patients with atrial fibrillation: A meta-analysis. \emph{Annals of Internal Medicine}, \bold{131}(7), 492--501. \verb{https://doi.org/10.7326/0003-4819-131-7-199910050-00003} } \examples{ ### copy data into 'dat' dat <- dat.hart1999 ### calculate log incidence rate ratios and corresponding sampling variances dat <- escalc(measure="IRR", x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat) dat ### meta-analysis of log incidence rate ratios using a random-effects model res <- rma(yi, vi, data=dat) res ### average incidence rate ratio with 95\% CI predict(res, transf=exp) ### forest plot with extra annotations par(mar=c(5,4,1,2)) forest(res, xlim=c(-11, 5), at=log(c(.05, .25, 1, 4)), atransf=exp, slab=paste0(dat$study, " (", dat$year, ")"), ilab=cbind(paste(dat$x1i, "/", dat$t1i, sep=" "), paste(dat$x2i, "/", dat$t2i, sep=" ")), ilab.xpos=c(-6.5,-4), cex=.85, header="Study (Year)") op <- par(cex=.85, font=2) text(c(-6.5,-4), 8.5, c("Warfarin", "Control")) text(c(-6.5,-4), 7.5, c("Strokes / PT", "Strokes / PT")) segments(x0=-8, y0=8, x1=-2.75, y1=8) par(op) ### meta-analysis of incidence rate differences using a random-effects model res <- rma(measure="IRD", x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat) res } \keyword{datasets} metafor/man/forest.Rd0000644000176200001440000000321414055736403014262 0ustar liggesusers\name{forest} \alias{forest} \title{Forest Plots} \description{ The \code{forest} function can be used to create forest plots. } \usage{ forest(x, \dots) } \arguments{ \item{x}{either an object of class \code{"rma"}, a vector with the observed effect sizes or outcomes, or an object of class \code{"cumul.rma"}. See \sQuote{Details}.} \item{\dots}{other arguments.} } \details{ Currently, methods exist for three types of situations. In the first case, object \code{x} is a fitted model object coming from the \code{\link{rma.uni}}, \code{\link{rma.mh}}, or \code{\link{rma.peto}} functions. The corresponding method is then \code{\link{forest.rma}}. Alternatively, object \code{x} can be a vector with observed effect sizes or outcomes. The corresponding method is then \code{\link{forest.default}}. Finally, object \code{x} can be an object coming from the \code{\link{cumul.rma.uni}}, \code{\link{cumul.rma.mh}}, or \code{\link{cumul.rma.peto}} functions. The corresponding method is then \code{\link{forest.cumul.rma}}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Lewis, S., & Clarke, M. (2001). Forest plots: Trying to see the wood and the trees. \emph{British Medical Journal}, \bold{322}(7300), 1479--1480. \verb{https://doi.org/10.1136/bmj.322.7300.1479} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest.rma}}, \code{\link{forest.default}}, \code{\link{forest.cumul.rma}} } \keyword{hplot} metafor/man/dat.riley2003.Rd0000644000176200001440000000652314055736403015166 0ustar liggesusers\name{dat.riley2003} \docType{data} \alias{dat.riley2003} \title{Studies on MYC-N as a Prognostic Marker for Neuroblastoma} \description{Results from 81 studies examining overall and disease-free survival in neuroblastoma patients with amplified versus normal MYC-N protein levels.} \usage{dat.riley2003} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{yi} \tab \code{numeric} \tab log hazard ratio of the outcome in those with amplified versus normal MYC-N protein levels \cr \bold{sei} \tab \code{numeric} \tab standard error of the log hazard ratio \cr \bold{outcome} \tab \code{character} \tab outcome (OS = overall survival; DFS = disease-free survival) } } \details{ The meta-analysis by Riley et al. (2003) examined a variety of prognostic markers for overall and disease-free survival in patients with neuroblastoma. One of the markers examined was amplified levels of the MYC-N protein, with is associated with poorer outcomes. The dataset given here was extracted from Riley (2011) and has been used in several other publications (e.g., Riley et al., 2004, 2007). The dataset provides the (log) hazard ratios (and corresponding standard errors) with respect to these two outcomes in 81 studies, with positive values indicating a greater risk of death (for OS) or disease recurrence/death (for DFS) for patients with high MYC-N levels compared to those with normal/low levels. Note that information on both outcomes could only be extracted from 17 studies (39 studies only provided sufficient information to extract the OS estimate, while 25 studies only allowed for extraction of the DFS estimate). } \source{ Riley, R. D., Sutton, A. J., Abrams, K. R., & Lambert, P. C. (2004). Sensitivity analyses allowed more appropriate and reliable meta-analysis conclusions for multiple outcomes when missing data was present. \emph{Journal of Clinical Epidemiology}, \bold{57}(9), 911--924. \verb{https://doi.org/10.1016/j.jclinepi.2004.01.018} Riley, R. D., Abrams, K. R., Lambert, P. C., Sutton, A. J., & Thompson, J. R. (2007). An evaluation of bivariate random-effects meta-analysis for the joint synthesis of two correlated outcomes. \emph{Statistics in Medicine}, \bold{26}(1), 78--97. \verb{https://doi.org/10.1002/sim.2524} Riley, R. D. (2011). Erratum: An evaluation of bivariate random-effects meta-analysis for the joint synthesis of two correlated outcomes. \emph{Statistics in Medicine}, \bold{30}(4), 400. \verb{https://doi.org/10.1002/sim.4100} } \references{ Riley, R. D., Burchill, S. A., Abrams, K. R., Heney, D., Lambert, P. C., Jones, D. R., Sutton, A. J., Young, B., Wailoo, A. J., & Lewis, I. J. (2003). A systematic review and evaluation of the use of tumour markers in paediatric oncology: Ewing's sarcoma and neuroblastoma. \emph{Health Technology Assessment}, \bold{7}(5), 1--162. \verb{https://doi.org/10.3310/hta7050} } \examples{ ### copy data into 'dat' and examine data dat <- dat.riley2003 dat ### random-effects model analysis for outcome DFS res <- rma(yi, sei=sei, data=dat, subset=(outcome == "DFS"), method="DL") res predict(res, transf=exp, digits=2) ### random-effects model analysis for outcome OS res <- rma(yi, sei=sei, data=dat, subset=(outcome == "OS"), method="DL") res predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/metafor-package.Rd0000644000176200001440000004656614055736403016027 0ustar liggesusers\name{metafor-package} \alias{metafor-package} \alias{metafor} \docType{package} \title{metafor: A Meta-Analysis Package for R \loadmathjax} \description{ The \pkg{metafor} package provides a comprehensive collection of functions for conducting meta-analyses in \R. The package can be used to calculate various effect size or outcome measures and then allows the user to fit fixed- and random-effects models to these data. By including study-level variables (\sQuote{moderators}) as predictors in these models, mixed-effects meta-regression models can also be fitted. For meta-analyses of \mjeqn{2 \times 2}{2x2} tables, proportions, incidence rates, and incidence rate ratios, the package also provides functions that implement specialized methods, including the Mantel-Haenszel method, Peto's method, and a variety of suitable generalized linear mixed-effects models (i.e., mixed-effects logistic and Poisson regression models). For non-independent effects/outcomes (e.g., due to correlated sampling errors, correlated true effects or outcomes, or other forms of clustering), the package also provides a function for fitting multilevel/multivariate meta-analytic models. Various methods are available to assess model fit, to identify outliers and/or influential studies, and for conducting sensitivity analyses (e.g., standardized residuals, Cook's distances, leave-one-out analyses). Advanced techniques for hypothesis testing and obtaining confidence intervals (e.g., for the average effect or outcome or for the model coefficients in a meta-regression model) have also been implemented (e.g., the Knapp and Hartung method, permutation tests, cluster robust inference methods / robust variance estimation). The package also provides functions for creating forest, funnel, radial (Galbraith), normal quantile-quantile, \enc{L'Abbé}{L'Abbe}, Baujat, bubble, and GOSH plots. The presence of funnel plot asymmetry (which may be indicative of publication bias) and its potential impact on the results can be examined via the rank correlation and Egger's regression test, the trim and fill method, and by applying a variety of selection models. } \section{The escalc Function}{ [\code{\link{escalc}}] Before a meta-analysis can be conducted, the relevant results from each study must be quantified in such a way that the resulting values can be further aggregated and compared. The \code{\link{escalc}} function can be used to compute a wide variety of effect size or outcome measures (and the corresponding sampling variances) that are often used in meta-analyses (e.g., risk ratios, odds ratios, risk differences, mean differences, standardized mean differences, response ratios / ratios of means, raw or r-to-z transformed correlation coefficients). Measures for quantifying some outcome for individual groups (e.g., proportions and incidence rates and transformations thereof), measures of change (e.g., raw and standardized mean changes), and measures of variability (e.g., variability ratios and coefficient of variation ratios) are also available. } \section{The rma.uni Function}{ [\code{\link{rma.uni}}] The various meta-analytic models that are typically used in practice are special cases of the general linear (mixed-effects) model. The \code{\link{rma.uni}} function (with alias \code{\link{rma}}) provides a general framework for fitting such models. The function can be used in conjunction with any of the usual effect size or outcome measures used in meta-analyses (e.g., as computed using the \code{\link{escalc}} function). The notation and models underlying the \code{\link{rma.uni}} function are explained below. For a set of \mjseqn{i = 1, \ldots, k} independent studies, let \mjseqn{y_i} denote the observed value of the effect size or outcome measure in the \mjseqn{i}th study. Let \mjseqn{\theta_i} denote the corresponding (unknown) true effect/outcome, such that \mjdeqn{y_i | \theta_i \sim N(\theta_i, v_i).}{y_i | \theta_i ~ N(\theta_i, v_i).} In other words, the observed effect sizes or outcomes are assumed to be unbiased and normally distributed estimates of the corresponding true effects/outcomes with sampling variances equal to \mjseqn{v_i}. The \mjseqn{v_i} values are assumed to be known. Depending on the outcome measure used, a bias correction, normalizing, and/or variance stabilizing transformation may be necessary to ensure that these assumptions are (approximately) true (e.g., the log transformation for odds/risk ratios, the bias correction for standardized mean differences, Fisher's r-to-z transformation for correlations; see \code{\link{escalc}} for more details). The \bold{fixed-effects model} conditions on the true effects/outcomes and therefore provides a \emph{conditional inference} about the \mjseqn{k} studies included in the meta-analysis. When using weighted estimation, this implies that the fitted model provides an estimate of \mjdeqn{\bar{\theta}_w = \sum_{i=1}^k w_i \theta_i / \sum_{i=1}^k w_i,}{\theta_w = \sum w_i \theta_i / \sum w_i,} that is, the \emph{weighted average} of the true effects/outcomes in the \mjseqn{k} studies, with weights equal to \mjseqn{w_i = 1/v_i} (this is what is often described as the \sQuote{inverse-variance} method in the meta-analytic literature). One can also employ an unweighted estimation method, which provides an estimate of the \emph{unweighted average} of the true effects/outcomes in \mjseqn{k} studies, that is, an estimate of \mjdeqn{\bar{\theta}_u = \sum_{i=1}^k \theta_i / k.}{\theta_u = \sum \theta_i / k.} For weighted estimation, one could also choose to estimate \mjeqn{\bar{\theta}_w}{\theta_w}, where the \mjseqn{w_i} values are user-defined weights (inverse-variance weights or unit weights as in unweighted estimation are just special cases). It is up to the user to decide to what extent \mjeqn{\bar{\theta}_w}{\theta_w} is a meaningful parameter to estimate (regardless of the weights used). The \bold{random-effects model} does not condition on the true effects/outcomes. Instead, the \mjseqn{k} studies included in the meta-analysis are assumed to be a random sample from a larger population of studies. In rare cases, the studies included in a meta-analysis are actually sampled from a larger collection of studies. More typically, the population of studies is a hypothetical population of an essentially infinite set of studies comprising all of the studies that have been conducted, that could have been conducted, or that may be conducted in the future. We assume that \mjeqn{\theta_i \sim N(\mu, \tau^2)}{\theta_i ~ N(\mu, \tau^2)}, that is, the true effects/outcomes in the population of studies are normally distributed with \mjseqn{\mu} denoting the average true effect/outcome and \mjseqn{\tau^2} the variance of the true effects/outcomes in the population (\mjseqn{\tau^2} is therefore often referred to as the amount of \sQuote{heterogeneity} in the true effects/outcomes). The random-effects model can also be written as \mjsdeqn{y_i = \mu + u_i + \epsilon_i,} where \mjeqn{u_i \sim N(0, \tau^2)}{u_i ~ N(0, \tau^2)} and \mjeqn{\epsilon_i \sim N(0, v_i)}{\epsilon_i ~ N(0, v_i)}. The fitted model provides an estimate of \mjseqn{\mu} and \mjseqn{\tau^2}. Consequently, the random-effects model provides an \emph{unconditional inference} about the average true effect/outcome in the population of studies (from which the \mjseqn{k} studies included in the meta-analysis are assumed to be a random sample). When using weighted estimation in the context of a random-effects model, the model is fitted with weights equal to \mjseqn{w_i = 1/(\tau^2 + v_i)}, with \mjseqn{\tau^2} replaced by its estimate (this is the standard \sQuote{inverse-variance} method for random-effects models). One can also choose unweighted estimation in the context of the random-effects model or specify user-defined weights, although the parameter that is estimated (i.e., \mjseqn{\mu}) remains the same regardless of the estimation method and weights used (as opposed to the fixed-effect model, where the parameter estimated is different for weighted versus unweighted estimation or when using different weights than the standard inverse-variance weights). Since weighted estimation with inverse-variance weights is most efficient, it is usually to be preferred for random-effects models (while in the fixed-effect model case, we must carefully consider whether \mjeqn{\bar{\theta}_w}{\theta_w} or \mjeqn{\bar{\theta}_u}{\theta_u} is the more meaningful parameter to estimate). Contrary to what is often stated in the literature, it is important to realize that the fixed-effects model does \emph{not} assume that the true effects/outcomes are homogeneous (i.e., that \mjseqn{\theta_i} is equal to some common value \mjseqn{\theta} in all \mjseqn{k} studies). In other words, fixed-effects models provide perfectly valid inferences under heterogeneity, as long as one is restricting these inferences to the set of studies included in the meta-analysis and one realizes that the model does not provide an estimate of \mjseqn{\theta}, but of \mjeqn{\bar{\theta}_w}{\theta_w} or \mjeqn{\bar{\theta}_u}{\theta_u} (depending on the estimation method). In the special case that the true effects/outcomes are actually homogeneous (the equal-effects case), the distinction between fixed- and random-effects models disappears, since homogeneity implies that \mjeqn{\mu = \bar{\theta}_w = \bar{\theta}_u \equiv \theta}{\mu = \theta_w = \theta_u = \theta}. However, since there is no infallible method to test whether the true effects/outcomes are really homogeneous or not, a researcher should decide on the type of inference desired before examining the data and choose the model accordingly. In fact, there is nothing wrong with fitting both fixed- and random-effects models to the same data, since these models address different questions (i.e., what was the average effect/outcome in the studies that have been conducted versus what is the average effect/outcome in the larger population of studies?). For further details on the distinction between equal-, fixed-, and random-effects models, see Laird and Mosteller (1990) and Hedges and Vevea (1998). Study-level variables (often referred to as \sQuote{moderators}) can also be included as predictors in such models, leading to so-called \sQuote{meta-regression} analyses (to examine whether the effects/outcomes tend to be larger/smaller under certain conditions or circumstances). When including moderator variables in a random-effects model, we obtain a \bold{mixed-effects meta-regression model}. This model can be written as \mjdeqn{y_i = \beta_0 + \beta_1 x_{i1} + \beta_2 x_{i2} + \ldots + \beta_{p'} x_{ip'} + u_i + \epsilon_i,}{y_i = \beta_0 + \beta_1 x_i1 + \beta_2 x_i2 + \ldots + \beta_p' x_ip' + u_i + \epsilon_i,} where \mjeqn{u_i \sim N(0, \tau^2)}{u_i ~ N(0, \tau^2)} and \mjeqn{\epsilon_i \sim N(0, v_i)}{\epsilon_i ~ N(0, v_i)} as before and \mjeqn{x_{ij}}{x_ij} denotes the value of the \mjseqn{j}th moderator variable for the \mjseqn{i}th study (letting \mjseqn{p = p' + 1} denote the total number of coefficients in the model including the model intercept). Therefore, \mjseqn{\beta_j} denotes how the average true effect/outcome changes for a one-unit increase in \mjeqn{x_{ij}}{x_ij} and the model intercept \mjseqn{\beta_0} denotes the average true effect/outcome when the values of all moderator variables are equal to zero. The value of \mjseqn{\tau^2} in the mixed-effects model denotes the amount of \sQuote{residual heterogeneity} in the true effects/outcomes (i.e., the amount of variability in the true effects/outcomes that is not accounted for by the moderators included in the model). } \section{The rma.mh Function}{ [\code{\link{rma.mh}}] The Mantel-Haenszel method provides an alternative approach for fitting fixed-effects models when dealing with studies providing data in the form of \mjeqn{2 \times 2}{2x2} tables or in the form of event counts (i.e., person-time data) for two groups (Mantel & Haenszel, 1959). The method is particularly advantageous when aggregating a large number of studies with small sample sizes (the so-called sparse data or increasing strata case). The Mantel-Haenszel method is implemented in the \code{\link{rma.mh}} function. It can be used in combination with risk ratios, odds ratios, risk differences, incidence rate ratios, and incidence rate differences. } \section{The rma.peto Function}{ [\code{\link{rma.peto}}] Yet another method that can be used in the context of a meta-analysis of \mjeqn{2 \times 2}{2x2} table data is Peto's method (see Yusuf et al., 1985), implemented in the \code{\link{rma.peto}} function. The method provides an estimate of the (log) odds ratio under a fixed-effects model. The method is particularly advantageous when the event of interest is rare, but see the documentation of the function for some caveats. } \section{The rma.glmm Function}{ [\code{\link{rma.glmm}}] Dichotomous outcomes and event counts (based on which one can calculate outcome measures such as odds ratios, incidence rate ratios, proportions, and incidence rates) are often assumed to arise from binomial and Poisson distributed data. Meta-analytic models that are directly based on such distributions are implemented in the \code{\link{rma.glmm}} function. These models are essentially special cases of generalized linear mixed-effects models (i.e., mixed-effects logistic and Poisson regression models). For \mjeqn{2 \times 2}{2x2} table data, a mixed-effects conditional logistic model (based on the non-central hypergeometric distribution) is also available. Random/mixed-effects models with dichotomous data are often referred to as \sQuote{binomial-normal} models in the meta-analytic literature. Analogously, for event count data, such models could be referred to as \sQuote{Poisson-normal} models. } \section{The rma.mv Function}{ [\code{\link{rma.mv}}] Standard meta-analytic models assume independence between the observed effect sizes or outcomes obtained from a set of studies. This assumption is often violated in practice. Dependencies can arise for a variety of reasons. For example, the sampling errors and/or true effects/outcomes may be correlated in multiple treatment studies (e.g., when multiple treatment groups are compared with a common control/reference group, such that the data from the control/reference group is used multiple times to compute the observed effect sizes or outcomes) or in multiple endpoint studies (e.g., when more than one effect size estimate or outcome is calculated based on the same sample of subjects due to the use of multiple endpoints or response variables) (Gleser & Olkin, 2009). Correlations in the true effects/outcomes can also arise due to other forms of clustering (e.g., effects/outcomes derived from the same paper, lab, research group, or species may be more similar to each other than effects/outcomes derived from different papers, labs, research groups, or species). In ecology and related fields, shared phylogenetic history among the organisms studied (e.g., plants, fungi, animals) can also induce correlations among the effects/outcomes. The \code{\link{rma.mv}} function can be used to fit suitable meta-analytic multivariate/multilevel models to such data, so that the non-independence in the observed/true effects or outcomes is accounted for. Network meta-analyses (also called multiple/mixed treatment comparisons) can also be carried out with this function. } \section{Future Plans and Updates}{ The \pkg{metafor} package is a work in progress and is updated on a regular basis with new functions and options. With \code{metafor.news()}, you can read the \file{NEWS} file of the package after installation. Comments, feedback, and suggestions for improvements are always welcome. } \section{Citing the Package}{ To cite the package, please use the following reference: Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \section{Getting Started with the Package}{ The paper mentioned above is a good starting place for those interested in using the package. The purpose of the article is to provide a general overview of the package and its capabilities (as of version 1.4-0). Not all of the functions and options are described in the paper, but it should provide a useful introduction to the package. The paper can be freely downloaded from the URL given above or can be directly loaded with the command \code{vignette("metafor")}. In addition to reading the paper, carefully read this page and then the help pages for the \code{\link{escalc}} and the \code{\link{rma.uni}} functions (or the \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} functions if you intend to use these methods). The help pages for these functions provide links to many additional functions, which can be used after fitting a model. You can also read the entire documentation online at \url{https://wviechtb.github.io/metafor/reference/index.html} (where it is nicely formatted, equations are shown correctly, and the output from all examples is provided). A (pdf) diagram showing the various functions in the metafor package (and how they are related to each other) can be opened with the command \code{vignette("diagram")}. Finally, additional information about the package, several detailed analysis examples, examples of plots and figures provided by the package (with the corresponding code), some additional tips and notes, and a FAQ can be found on the package website at \url{https://www.metafor-project.org}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \cr package website: \url{https://www.metafor-project.org} \cr author homepage: \url{https://www.wvbauer.com} \cr Suggestions on how to obtain help with using the package can found on the package website at: \url{https://www.metafor-project.org/doku.php/help} } \references{ Cooper, H., Hedges, L. V., & Valentine, J. C. (Eds.) (2009). \emph{The handbook of research synthesis and meta-analysis} (2nd ed.). New York: Russell Sage Foundation. Gleser, L. J., & Olkin, I. (2009). Stochastically dependent effect sizes. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 357--376). New York: Russell Sage Foundation. Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis}. San Diego, CA: Academic Press. Hedges, L. V., & Vevea, J. L. (1998). Fixed- and random-effects models in meta-analysis. \emph{Psychological Methods}, \bold{3}(4), 486--504. \verb{https://doi.org/10.1037/1082-989X.3.4.486} Laird, N. M., & Mosteller, F. (1990). Some statistical methods for combining experimental results. \emph{International Journal of Technology Assessment in Health Care}, \bold{6}(1), 5--30. \verb{https://doi.org/10.1017/S0266462300008916} Mantel, N., & Haenszel, W. (1959). Statistical aspects of the analysis of data from retrospective studies of disease. \emph{Journal of the National Cancer Institute}, \bold{22}(4), 719--748. \verb{https://doi.org/10.1093/jnci/22.4.719} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Yusuf, S., Peto, R., Lewis, J., Collins, R., & Sleight, P. (1985). Beta blockade during and after myocardial infarction: An overview of the randomized trials. \emph{Progress in Cardiovascular Disease}, \bold{27}(5), 335--371. \verb{https://doi.org/10.1016/s0033-0620(85)80003-7} } \keyword{package} metafor/man/forest.rma.Rd0000644000176200001440000005143714055736403015052 0ustar liggesusers\name{forest.rma} \alias{forest.rma} \title{Forest Plots (Method for 'rma' Objects)} \description{ Function to create forest plots for objects of class \code{"rma"}. \loadmathjax } \usage{ \method{forest}{rma}(x, annotate=TRUE, addfit=TRUE, addpred=FALSE, showweights=FALSE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=x$level, refline=0, digits=2L, width, xlab, slab, mlab, ilab, ilab.xpos, ilab.pos, order, transf, atransf, targs, rows, efac=1, pch=15, psize, plim=c(0.5,1.5), colout, col, border, lty, fonts, cex, cex.lab, cex.axis, annosym, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{annotate}{logical to specify whether annotations should be added to the plot (the default is \code{TRUE}).} \item{addfit}{logical to specify whether the summary estimate (for models without moderators) or fitted values (for models with moderators) should be added to the plot (the default is \code{TRUE}). See \sQuote{Details}.} \item{addpred}{logical to specify whether the bounds of the prediction interval should be added to the plot (the default is \code{FALSE}). See \sQuote{Details}.} \item{showweights}{logical to specify whether the annotations should also include the weights given to the observed outcomes during the model fitting (the default is \code{FALSE}). See \sQuote{Details}.} \item{header}{logical to specify whether column headings should be added to the plot (the default is \code{FALSE}). Can also be a character vector to specify the left and right headings.} \item{xlim}{horizontal limits of the plot region. If unspecified, the function tries to set the horizontal plot limits to some sensible values.} \item{alim}{the actual x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{olim}{optional argument to specify observation/outcome limits. If unspecified, no limits are used.} \item{ylim}{the y-axis limits of the plot. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{top}{the amount of space to leave empty at the top of the plot (e.g., for adding headers) (the default is 3 rows).} \item{at}{position of the x-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{steps}{the number of tick marks for the x-axis (the default is 5). Ignored when the positions are specified via the \code{at} argument.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is to take the value from the object).} \item{refline}{numeric value to specify the location of the vertical \sQuote{reference} line (the default is 0). The line can be suppressed by setting this argument to \code{NA}.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the x-axis and the annotations should be rounded (the default is \code{2L}). Can also be a vector of two integers, the first to specify the number of decimal places for the annotations, the second for the x-axis labels. When specifying an integer (e.g., \code{2L}), trailing zeros after the decimal mark are dropped for the x-axis labels. When specifying a numeric value (e.g., \code{2}), trailing zeros are retained.} \item{width}{optional integer to manually adjust the width of the columns for the annotations.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{slab}{optional vector with labels for the \mjseqn{k} studies. If unspecified, the labels are either taken from the object (if study labels were specified) or simple labels are created within the function. To suppress labels, set this argument to \code{NA}.} \item{mlab}{optional character string giving a label to the summary estimate from a fixed- or random-effects model. If unspecified, the label is created within the function.} \item{ilab}{optional vector, matrix, or data frame providing additional information about the studies that should be added to the plot.} \item{ilab.xpos}{numeric vector to specify the x-axis position(s) of the variable(s) given via \code{ilab} (must be specified if \code{ilab} is specified).} \item{ilab.pos}{integer(s) (either 1, 2, 3, or 4) to specify the alignment of the vector(s) given via \code{ilab} (2 means right, 4 mean left aligned). If unspecified, the default is to center the labels.} \item{order}{optional character string to specify how the studies should be ordered. Can also be a variable based on which the studies will be ordered. See \sQuote{Details}.} \item{transf}{optional argument to specify a function that should be used to transform the observed outcomes, summary estimates, fitted values, and confidence interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the x-axis labels and annotations (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{rows}{optional vector to specify the rows (or more generally, the horizontal positions) for plotting the outcomes. Can also be a single value to specify the row (horizontal position) of the first outcome (the remaining outcomes are then plotted below this starting row). If unspecified, the function sets this value automatically.} \item{efac}{vertical expansion factor for confidence interval limits, arrows, and the symbol used to denote summary estimates. The default value of 1 should usually work okay. Can also be a vector of two numbers, the first for CI limits and arrows, the second for summary estimates. Can also be a vector of three numbers, the first for CI limits, the second for arrows, the third for summary estimates.} \item{pch}{plotting symbol to use for the observed outcomes. By default, a filled square is used. See \code{\link{points}} for other options. Can also be a vector of values.} \item{psize}{optional numeric value to specify the point sizes for the observed outcomes. If unspecified, the point sizes are a function of the model weights. Can also be a vector of values.} \item{plim}{numeric vector of length 2 to scale the point sizes (ignored when \code{psize} is specified). See \sQuote{Details}.} \item{colout}{optional character string to specify the name of a color to use for plotting the observed outcomes (\code{"black"} is used by default if not specified). Can also be a vector of color names.} \item{col}{optional character string to specify the name of a color to use for the summary polygon or fitted values. If unspecified, the function sets a default color.} \item{border}{optional character string to specify the name of a color to use for the border of the summary polygon or fitted values. If unspecified, the function sets a default color.} \item{lty}{optional character string to specify the line type for the confidence intervals. If unspecified, the function sets this to \code{"solid"} by default.} \item{fonts}{optional character string to specify the font to use for the study labels, annotations, and the extra information (if specified via \code{ilab}). If unspecified, the default font is used.} \item{cex}{optional character and symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{cex.lab}{optional expansion factor for the x-axis title. If unspecified, the function tries to set this to a sensible value.} \item{cex.axis}{optional expansion factor for the x-axis labels. If unspecified, the function tries to set this to a sensible value.} \item{annosym}{optional vector of length 3 to change the left bracket, separation, and right bracket symbols for the annotations.} \item{\dots}{other arguments.} } \details{ The plot shows the observed effect sizes or outcomes with corresponding confidence intervals. For fixed- and random-effects models (i.e., for models without moderators), a four-sided polygon, sometimes called a summary \sQuote{diamond}, is added to the bottom of the forest plot, showing the summary estimate based on the model (with the center of the polygon corresponding to the estimate and the left/right edges indicating the confidence interval limits). The \code{col} and \code{border} arguments can be used to adjust the (border) color of the polygon. Drawing of the polgyon can be suppressed by setting \code{addfit=FALSE}. For random-effects models and if \code{addpred=TRUE}, a dotted line is added to the summary polygon which indicates the (approximate) bounds of the prediction interval (the interval indicates where \code{level} \% of the true outcomes are expected to fall) (Riley et al., 2011). For random-effects models of class \code{"rma.mv"} (see \code{\link{rma.mv}}) with multiple \mjseqn{\tau^2} values, the \code{addpred} argument can be used to specify for which level of the inner factor the prediction interval should be provided (since the intervals differ depending on the \mjseqn{\tau^2} value). If the model should also contain multiple \mjseqn{\gamma^2} values, the \code{addpred} argument should then be of length 2 to specify the levels of both inner factors. See also \code{\link{predict.rma}}, which is used to compute these interval bounds. For models involving moderators, the fitted value for each study is added as a polygon to the plot. By default, the width of the polygons corresponds to the confidence interval limits for the fitted values. By setting \code{addpred=TRUE}, the width reflects the prediction interval limits. Again, the \code{col} and \code{border} arguments can be used to adjust the (border) color of the polygons. These polygons can be suppressed by setting \code{addfit=FALSE}. With the \code{transf} argument, the observed outcomes, summary estimate, fitted values, confidence interval bounds, and prediction interval bounds can be transformed with some suitable function. For example, when plotting log odds ratios, one could use \code{transf=exp} to obtain a forest plot showing the odds ratios. Alternatively, one can use the \code{atransf} argument to transform the x-axis labels and annotations (e.g., \code{atransf=exp}). See also \link{transf} for some other useful transformation functions in the context of a meta-analysis. The examples below illustrate the use of these arguments. By default, the studies are ordered from top to bottom (i.e., the first study in the dataset will be placed in row \mjseqn{k}, the second study in row \mjseqn{k-1}, and so on, until the last study, which is placed in the first row). The studies can be reordered with the \code{order} argument: \itemize{ \item \code{order="obs"}: the studies are ordered by the observed outcomes, \item \code{order="fit"}: the studies are ordered by the fitted values, \item \code{order="prec"}: the studies are ordered by their sampling variances, \item \code{order="resid"}: the studies are ordered by the size of their residuals, \item \code{order="rstandard"}: the studies are ordered by the size of their standardized residuals, \item \code{order="abs.resid"}: the studies are ordered by the size of their absolute residuals, \item \code{order="abs.rstandard"}: the studies are ordered by the size of their absolute standardized residuals. } Alternatively, it is also possible to set \code{order} equal to a variable based on which the studies will be ordered (see \sQuote{Examples}). Additional summary estimates can be added to the plot with the \code{\link{addpoly}} function. See the documentation for that function for examples. When \code{showweights=TRUE}, the annotations will include information about the weights given to the observed outcomes during the model fitting. For simple models (such as those fitted with the \code{\link{rma.uni}} function), these weights correspond to the \sQuote{inverse-variance weights} (but are given in percent). For models fitted with the \code{\link{rma.mv}} function, the weights are based on the diagonal of the weight matrix. Note that the weighting structure is typically more complex in such models (i.e., the weight matrix is usually not just a diagonal matrix) and the weights shown therefore do not reflect this complexity. See \code{\link{weights.rma}} for more details. By default (i.e., when \code{psize} is not specified), the size of the points is a function of the square root of the model weights. This way, their area is proportional to the the weights. However, the point sizes are rescaled so that the smallest point size is \code{plim[1]} and the largest point size is \code{plim[2]}. As a result, their relative sizes (i.e., areas) no longer exactly correspond to their relative weights. If exactly relative point sizes are desired, one can set \code{plim[2]} to \code{NA}, in which case the points are rescaled so that the smallest point size corresponds to \code{plim[1]} and all other points are scaled accordingly. As a result, the largest point may be very large. Alternatively, one can set \code{plim[1]} to \code{NA}, in which case the points are rescaled so that the largest point size corresponds to \code{plim[2]} and all other points are scaled accordingly. As a result, the smallest point may be very small and essentially indistinguishable from the confidence interval line. To avoid the latter, one can also set \code{plim[3]}, which enforces a minimal point size. } \note{ The function tries to set some sensible values for the optional arguments, but it may be necessary to adjust these in certain circumstances. The function actually returns some information about the chosen defaults invisibly. Printing this information is useful as a starting point to make adjustments to the plot (see \sQuote{Examples}). If the number of studies is quite large, the labels, annotations, and symbols may become quite small and impossible to read. Stretching the plot window vertically may then provide a more readable figure (one should call the function again after adjusting the window size, so that the label/symbol sizes can be properly adjusted). Also, the \code{cex}, \code{cex.lab}, and \code{cex.axis} arguments are then useful to adjust the symbol and text sizes. If the horizontal plot and/or x-axis limits are set manually, then the horizontal plot limits (\code{xlim}) must be at least as wide as the x-axis limits (\code{alim}). This restriction is enforced inside the function. If the outcome measure used for creating the plot is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those limits (the observed outcomes and confidence/prediction intervals cannot exceed those bounds then). The models without moderators, the \code{col} argument can also be a vector of two elements, the first for specifying the color of the summary polygon, the second for specifying the color of the line for the prediction interval. The \code{lty} argument can also be a vector of up to three elements, the first for specifying the line type of the individual CIs (\code{"solid"} by default), the second for the line type of the prediction interval (\code{"dotted"} by default), the third for the line type of the horizontal lines that are automatically added to the plot (\code{"solid"} by default; set to \code{"blank"} to remove them). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Lewis, S., & Clarke, M. (2001). Forest plots: Trying to see the wood and the trees. \emph{British Medical Journal}, \bold{322}(7300), 1479--1480. \verb{https://doi.org/10.1136/bmj.322.7300.1479} Riley, R. D., Higgins, J. P. T., & Deeks, J. J. (2011). Interpretation of random effects meta-analyses. \emph{British Medical Journal}, \bold{342}, d549. \verb{https://doi.org/10.1136/bmj.d549} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest}}, \code{\link{forest.default}}, \code{\link{addpoly}} } \examples{ ### meta-analysis of the log risk ratios using a random-effects model res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### default forest plot of the log risk ratios and summary estimate forest(res, header=TRUE) ### summary estimate in row -1; studies in rows k=13 through 1; horizontal ### lines in rows 0 and k+1; two extra lines of space at the top for headings, ### and other annotations; headings (if requested) in line k+2 op <- par(xpd=TRUE) text(x=-8.4, y=-1:16, -1:16, pos=4, cex=.6) par(op) ### can also inspect defaults chosen defaults <- forest(res) defaults ### several forest plots illustrating the use of various arguments forest(res, cex=.8) forest(res, cex=.8, addpred=TRUE) forest(res, cex=.8, alim=c(-3,3)) forest(res, cex=.8, order="prec", alim=c(-3,3)) forest(res, cex=.8, order=dat.bcg$ablat, addpred=TRUE) ### adjust xlim values to see how that changes the plot forest(res) par("usr")[1:2] ### this shows what xlim values were chosen by default forest(res, xlim=c(-16,14)) forest(res, xlim=c(-18,10)) forest(res, xlim=c(-10,10)) ### illustrate transf argument forest(res, transf=exp, at=c(0,1,2,4,6), xlim=c(-8,12), cex=.8, refline=1, header=TRUE) ### illustrate atransf argument forest(res, atransf=exp, at=log(c(.05,.25,1,4,20)), xlim=c(-8,7), cex=.8, header=TRUE) ### showweights argument forest(res, atransf=exp, at=log(c(.05,.25,1,4,20)), xlim=c(-8,8), order="prec", showweights=TRUE, cex=.8) ### forest plot with extra annotations ### note: may need to widen plotting device to avoid overlapping text forest(res, atransf=exp, at=log(c(.05, .25, 1, 4)), xlim=c(-16,6), ilab=cbind(dat.bcg$tpos, dat.bcg$tneg, dat.bcg$cpos, dat.bcg$cneg), ilab.xpos=c(-9.5,-8,-6,-4.5), cex=.75, header="Author(s) and Year") op <- par(cex=.75, font=2) text(c(-9.5,-8,-6,-4.5), 15, c("TB+", "TB-", "TB+", "TB-")) text(c(-8.75,-5.25), 16, c("Vaccinated", "Control")) par(op) ### mixed-effects model with absolute latitude in the model res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, mods = ~ ablat, data=dat.bcg, slab=paste(author, year, sep=", ")) ### forest plot with observed and fitted values forest(res, xlim=c(-9,5), order="fit", cex=.8, ilab=dat.bcg$ablat, ilab.xpos=-4, atransf=exp, at=log(c(.05,.25,1,4)), header="Author(s) and Year") text(-4, 15, "Latitude", cex=.8, font=2) ### meta-analysis of the log risk ratios using a random-effects model res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### for more complicated plots, the ylim and rows arguments may be useful forest(res) forest(res, ylim=c(-1.5, 16)) ### the default forest(res, ylim=c(-1.5, 20)) ### extra space in plot forest(res, ylim=c(-1.5, 20), rows=c(17:15, 12:6, 3:1)) ### set positions ### forest plot with subgrouping of studies ### note: may need to widen plotting device to avoid overlapping text forest(res, xlim=c(-16, 4.6), at=log(c(.05, .25, 1, 4)), atransf=exp, ilab=cbind(dat.bcg$tpos, dat.bcg$tneg, dat.bcg$cpos, dat.bcg$cneg), ilab.xpos=c(-9.5,-8,-6,-4.5), cex=.75, ylim=c(-1, 21), order=dat.bcg$alloc, rows=c(1:2,5:11,14:17), header="Author(s) and Year") op <- par(cex=0.75, font=2) text(c(-9.5,-8,-6,-4.5), 20, c("TB+", "TB-", "TB+", "TB-")) text(c(-8.75,-5.25), 21, c("Vaccinated", "Control")) op <- par(font=4) text(-16, c(18,12,3), c("Systematic Allocation", "Random Allocation", "Alternate Allocation"), pos=4) par(op) ### see also the addpoly.rma function for an example where summaries ### for the three subgroups are added to such a forest plot ### illustrate use of olim argument with a meta-analysis of raw correlation ### coefficients (data from Pritz, 1997); without olim=c(0,1), some of the ### CIs would have upper bounds larger than 1 dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat.pritz1997) res <- rma(yi, vi, data=dat, slab=paste0(study, ") ", authors)) forest(res, xlim=c(-0.8,1.6), alim=c(0,1), psize=1, refline=coef(res), olim=c(0,1), header=TRUE) ### an example of a forest plot where the data have a multilevel structure and ### we want to reflect this by grouping together estimates from the same cluster dat <- dat.konstantopoulos2011 res <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat, slab=paste0("District ", district, ", School: ", school)) dd <- c(0,diff(dat$district)) dd[dd > 0] <- 1 rows <- (1:res$k) + cumsum(dd) par(tck=-.01, mgp = c(1.6,.2,0)) forest(res, cex=0.5, header=TRUE, rows=rows, ylim=c(0.5,max(rows)+3)) abline(h = rows[c(1,diff(rows)) == 2] - 1, lty="dotted") } \keyword{hplot} metafor/man/dat.assink2016.Rd0000644000176200001440000000671714055736403015343 0ustar liggesusers\name{dat.assink2016} \docType{data} \alias{dat.assink2016} \title{Studies on the Association between Recidivism and Mental Health} \description{Results from 17 studies on the association between recidivism and mental health in delinquent juveniles.} \usage{dat.assink2016} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study id number \cr \bold{esid} \tab \code{numeric} \tab effect size within study id number \cr \bold{id} \tab \code{numeric} \tab row id number \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance \cr \bold{pubstatus} \tab \code{numeric} \tab published study (0 = no; 1 = yes) \cr \bold{year} \tab \code{numeric} \tab publication year of the study (approximately mean centered) \cr \bold{deltype} \tab \code{character} \tab type of delinquent behavior in which juveniles could have recidivated (either general, overt, or covert) } } \details{ The studies included in this dataset (which is a subset of the data used in Assink et al., 2015) compared the difference in recidivism between delinquent juveniles with a mental health disorder and a comparison group of juveniles without a mental health disorder. Since studies differed in the way recidivism was defined and assessed, results are given in terms of standardized mean differences, with positive values indicating a higher prevalence of recidivism in the group of juveniles with a mental health disorder. Multiple effect size estimates could be extracted from most studies (e.g., for different delinquent behaviors in which juveniles could have recidivated), necessitating the use of appropriate models/methods for the analysis. Assink and Wibbelink (2016) illustrate the use of multilevel meta-analytic models for this purpose. } \note{ The \code{year} variable is not constant within study 3, as this study refers to two different publications using the same data. } \source{ Assink, M., & Wibbelink, C. J. M. (2016). Fitting three-level meta-analytic models in R: A step-by-step tutorial. \emph{The Quantitative Methods for Psychology}, \bold{12}(3), 154--174. \verb{https://doi.org/10.20982/tqmp.12.3.p154} } \references{ Assink, M., van der Put, C. E., Hoeve, M., de Vries, S. L. A., Stams, G. J. J. M., & Oort, F. J. (2015). Risk factors for persistent delinquent behavior among juveniles: A meta-analytic review. \emph{Clinical Psychology Review}, \bold{42}, 47--61. \verb{https://doi.org/10.1016/j.cpr.2015.08.002} } \examples{ ### copy data into 'dat' and examine data dat <- dat.assink2016 dat ### fit multilevel model res <- rma.mv(yi, vi, random = ~ 1 | study/esid, data=dat) res ### use cluster-robust inference methods robust(res, cluster=dat$study) ### LRTs for the variance components res0 <- rma.mv(yi, vi, random = ~ 1 | study/esid, data=dat, sigma2=c(0,NA)) anova(res0, res) res0 <- rma.mv(yi, vi, random = ~ 1 | study/esid, data=dat, sigma2=c(NA,0)) anova(res0, res) ### examine some potential moderators via meta-regression rma.mv(yi, vi, mods = ~ pubstatus, random = ~ 1 | study/esid, data=dat) rma.mv(yi, vi, mods = ~ year, random = ~ 1 | study/esid, data=dat) dat$deltype <- factor(dat$deltype) dat$deltype <- relevel(dat$deltype, ref="general") rma.mv(yi, vi, mods = ~ deltype, random = ~ 1 | study/esid, data=dat) rma.mv(yi, vi, mods = ~ year + deltype, random = ~ 1 | study/esid, data=dat) } \keyword{datasets} metafor/man/anova.rma.Rd0000644000176200001440000002574114055736403014653 0ustar liggesusers\name{anova.rma} \alias{anova} \alias{anova.rma} \title{Likelihood Ratio and Wald-Type Tests for 'rma' Objects} \description{ For two (nested) models of class \code{"rma.uni"} or \code{"rma.mv"}, the function provides a full versus reduced model comparison in terms of model fit statistics and a likelihood ratio test. When a single model is specified, a Wald-type test of one or more model coefficients or linear combinations thereof is carried out. \loadmathjax } \usage{ \method{anova}{rma}(object, object2, btt, X, att, Z, digits, \dots) } \arguments{ \item{object}{an object of class \code{"rma.uni"} or \code{"rma.mv"}.} \item{object2}{an (optional) object of class \code{"rma.uni"} or \code{"rma.mv"}. See \sQuote{Details}.} \item{btt}{optional vector of indices to specify which coefficients should be included in the Wald-type test. Can also be a string to grep for. See \sQuote{Details}.} \item{X}{optional numeric vector or matrix to specify one or more linear combinations of the coefficients in the model that should be tested. See \sQuote{Details}.} \item{att}{optional vector of indices to specify which scale coefficients should be included in the Wald-type test. Can also be a string to grep for. See \sQuote{Details}. Only relevant for location-scale models (see \code{\link{rma}}).} \item{Z}{optional numeric vector or matrix to specify one or more linear combinations of the scale coefficients in the model that should be tested. See \sQuote{Details}. Only relevant for location-scale models (see \code{\link{rma}}).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{\dots}{other arguments.} } \details{ When a single model is specified, the function provides Wald-type tests of one or more model coefficients or linear combinations thereof. In particular, for a fixed- or random-effects model (i.e., a model without moderators), this is just the test of the single coefficient of the model. For models including moderators, an omnibus test of all the model coefficients is conducted that excludes the intercept (the first coefficient) if it is included in the model. If no intercept is included in the model, then the omnibus test includes all of the coefficients in the model including the first. Alternatively, one can manually specify the indices of the coefficients to test via the \code{btt} argument. For example, with \code{btt=c(3,4)}, only the third and fourth coefficient from the model would be included in the test (if an intercept is included in the model, then it corresponds to the first coefficient in the model). Instead of specifying the coefficient numbers, one can specify a string for \code{btt}. In that case, \code{\link{grep}} will be used to search for all coefficient names that match the string. Using the \code{btt} argument, one can for example select all coefficients corresponding to a particular factor to test if the factor as a whole is significant. See \sQuote{Examples}. Instead, one can use the \code{X} argument to specify a linear combination of the coefficients in the model that should be tested (i.e., whether the linear combination is significantly different from zero). If a matrix of linear combinations is specified, each row defines a particular linear combination to be tested. If the matrix is of full rank, an omnibus Wald-type test of all linear combinations is also provided. Linear combinations can also be obtained with the \code{\link{predict.rma}} function, which will provide corresponding confidence intervals. When specifying two models for comparison, the function provides a likelihood ratio test (LRT) comparing the two models. The two models must be based on the same set of data, must be of the same class, and should be nested for the LRT to make sense. Note that LRTs are not meaningful when using REML estimation and the two models differ in their fixed effects. For location-scale models fitted with the \code{\link{rma}} function, one can use \code{att} to specify the indices of the scale coefficients to test. Similarly, one can use the \code{Z} argument to specify one or multiple linear combinations of the scale coefficients in the model that should be tested. } \value{ An object of class \code{"anova.rma"}. When a single model is specified (without any further arguments or together with the \code{btt} argument), the object is a list containing the following components: \item{QM}{test statistic of the Wald-type test of the model coefficients.} \item{QMdf}{corresponding degrees of freedom.} \item{QMp}{corresponding p-value.} \item{btt}{indices of the coefficients tested by the Wald-type test.} \item{k}{number of outcomes included in the analysis.} \item{p}{number of coefficients in the model (including the intercept).} \item{m}{number of coefficients included in the Wald-type test.} \item{\dots}{some additional elements/values.} When argument \code{X} is used, the object is a list containing the following components: \item{QM}{test statistic of the omnibus Wald-type test of all linear combinations.} \item{QMdf}{corresponding degrees of freedom.} \item{QMp}{corresponding p-value.} \item{hyp}{description of the linear combinations tested.} \item{Xb}{values of the linear combinations.} \item{se}{standard errors of the linear combinations.} \item{zval}{test statistics of the linear combinations.} \item{pval}{corresponding p-values.} When two models are specified, the object is a list containing the following components: \item{fit.stats.f}{log-likelihood, deviance, AIC, BIC, and AICc for the full model.} \item{fit.stats.r}{log-likelihood, deviance, AIC, BIC, and AICc for the reduced model.} \item{parms.f}{number of parameters in the full model.} \item{parms.r}{number of parameters in the reduced model.} \item{LRT}{likelihood ratio test statistic.} \item{pval}{corresponding p-value.} \item{QE.f}{test statistic of the test for (residual) heterogeneity from the full model.} \item{QE.r}{test statistic of the test for (residual) heterogeneity from the reduced model.} \item{tau2.f}{estimated \mjseqn{\tau^2} value from the full model. \code{NA} for \code{"rma.mv"} objects.} \item{tau2.r}{estimated \mjseqn{\tau^2} value from the reduced model. \code{NA} for \code{"rma.mv"} objects.} \item{R2}{amount (in percent) of the heterogeneity in the reduced model that is accounted for in the full model (\code{NA} for fixed-effects models or for \code{"rma.mv"} objects). This can be regarded as a pseudo \mjseqn{R^2} statistic (Raudenbush, 2009). Note that the value may not be very accurate unless \mjseqn{k} is large (Lopez-Lopez et al., 2014).} \item{\dots}{some additional elements/values.} The results are formatted and printed with the \code{\link{print.anova.rma}} function. } \note{ The function can also be used to conduct a likelihood ratio test (LRT) for the amount of (residual) heterogeneity in random- and mixed-effects models. The full model should then be fitted with either \code{method="ML"} or \code{method="REML"} and the reduced model with \code{method="FE"}. The p-value for the test is based on a chi-square distribution with 1 degree of freedom, but actually needs to be adjusted for the fact that the parameter (i.e., \mjseqn{\tau^2}) falls on the boundary of the parameter space under the null hypothesis (see Viechtbauer, 2007, for more details). LRTs for variance components in more complex models (as fitted with the \code{\link{rma.mv}} function) can also be conducted in this manner (see \sQuote{Examples}). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Hardy, R. J., & Thompson, S. G. (1996). A likelihood approach to meta-analysis with random effects. \emph{Statistics in Medicine}, \bold{15}(6), 619--629. \verb{https://doi.org/10.1002/(sici)1097-0258(19960330)15:6\%3C619::aid-sim188\%3E3.0.co;2-a} Huizenga, H. M., Visser, I., & Dolan, C. V. (2011). Testing overall and moderator effects in random effects meta-regression. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{64}(1), 1--19. \verb{https://doi.org/10.1348/000711010X522687} \enc{López-López}{Lopez-Lopez}, J. A., \enc{Marín-Martínez}{Marin-Martinez}, F., \enc{Sánchez-Meca}{Sanchez-Meca}, J., Van den Noortgate, W., & Viechtbauer, W. (2014). Estimation of the predictive power of the model in mixed-effects meta-regression: A simulation study. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{67}(1), 30--48. \verb{https://doi.org/10.1111/bmsp.12002} Raudenbush, S. W. (2009). Analyzing effect sizes: Random effects models. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 295--315). New York: Russell Sage Foundation. Viechtbauer, W. (2007). Hypothesis tests for population heterogeneity in meta-analysis. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{60}(1), 29--60. \verb{https://doi.org/10.1348/000711005X64042} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}}, \code{\link{print.anova.rma}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res1 <- rma(yi, vi, data=dat, method="ML") ### fit mixed-effects model with two moderators (absolute latitude and publication year) res2 <- rma(yi, vi, mods = ~ ablat + year, data=dat, method="ML") ### Wald-type test of the two moderators anova(res2) ### alternative way of specifying the same test anova(res2, X=rbind(c(0,1,0), c(0,0,1))) ### corresponding likelihood ratio test anova(res1, res2) ### test of a linear combination anova(res2, X=c(1,35,1970)) ### use predict() to obtain the same linear combination (with its CI) predict(res2, newmods=c(35,1970)) ### mixed-effects model with three moderators res3 <- rma(yi, vi, mods = ~ ablat + year + alloc, data=dat, method="ML") res3 ### test the 'alloc' factor anova(res3, btt=4:5) ### instead of specifying the coefficient numbers, grep for "alloc" anova(res3, btt="alloc") ############################################################################ ### an example of doing LRTs of variance components in more complex models dat <- dat.konstantopoulos2011 res <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat) ### test the district-level variance component res0 <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat, sigma2=c(0,NA)) anova(res, res0) ### test the school-level variance component res0 <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat, sigma2=c(NA,0)) anova(res, res0) ### test both variance components simultaneously res0 <- rma.mv(yi, vi, data=dat) anova(res, res0) } \keyword{models} metafor/man/to.table.Rd0000644000176200001440000001350614055736403014475 0ustar liggesusers\name{to.table} \alias{to.table} \title{Convert Data from Vector to Table Format} \description{ The function converts summary data in vector format to the corresponding table format. \loadmathjax } \usage{ to.table(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, ni, data, slab, subset, add=1/2, to="none", drop00=FALSE, rows, cols) } \arguments{ \item{measure}{a character string to specify the effect size or outcome measure corresponding to the summary data supplied. See \sQuote{Details} and the documentation of the \code{\link{escalc}} function for possible options.} \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell).} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell).} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell).} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell).} \item{n1i}{vector to specify the group sizes or row totals (first group/row).} \item{n2i}{vector to specify the group sizes or row totals (second group/row).} \item{x1i}{vector to specify the number of events (first group).} \item{x2i}{vector to specify the number of events (second group).} \item{t1i}{vector to specify the total person-times (first group).} \item{t2i}{vector to specify the total person-times (second group).} \item{m1i}{vector to specify the means (first group or time point).} \item{m2i}{vector to specify the means (second group or time point).} \item{sd1i}{vector to specify the standard deviations (first group or time point).} \item{sd2i}{vector to specify the standard deviations (second group or time point).} \item{xi}{vector to specify the frequencies of the event of interest.} \item{mi}{vector to specify the frequencies of the complement of the event of interest or the group means.} \item{ri}{vector to specify the raw correlation coefficients.} \item{ti}{vector to specify the total person-times.} \item{sdi}{vector to specify the standard deviations.} \item{ni}{vector to specify the sample/group sizes.} \item{data}{optional data frame containing the variables given to the arguments above.} \item{slab}{optional vector with labels for the studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the array returned by the function.} \item{add}{see the documentation of the \code{\link{escalc}} function.} \item{to}{see the documentation of the \code{\link{escalc}} function.} \item{drop00}{see the documentation of the \code{\link{escalc}} function.} \item{rows}{optional vector with row/group names.} \item{cols}{optional vector with column/outcome names.} } \details{ The \code{\link{escalc}} function describes a wide variety of effect size or outcome measures that can be computed for a meta-analysis. The summary data used to compute those measures are typically contained in vectors, each element corresponding to a study. The \code{to.table} function takes this information and constructs an array of \mjseqn{k} tables from these data. For example, in various fields (such as the health and medical sciences), the response variable measured is often dichotomous (binary), so that the data from a study comparing two different groups can be expressed in terms of a \mjeqn{2 \times 2}{2x2} table, such as: \tabular{lccc}{ \tab outcome 1 \tab outcome 2 \tab total \cr group 1 \tab \code{ai} \tab \code{bi} \tab \code{n1i} \cr group 2 \tab \code{ci} \tab \code{di} \tab \code{n2i} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies (i.e., the number of people falling into a particular category) and \code{n1i} and \code{n2i} the row totals (i.e., the group sizes). The cell frequencies in \mjseqn{k} such \mjeqn{2 \times 2}{2x2} tables can be specified via the \code{ai}, \code{bi}, \code{ci}, and \code{di} arguments (or alternatively, via the \code{ai}, \code{ci}, \code{n1i}, and \code{n2i} arguments). The function then creates the corresponding \mjeqn{2 \times 2 \times k}{2*2*k} array of tables. The \code{measure} argument should then be set equal to one of the outcome measures that can be computed based on this type of data, such as \code{"RR"}, \code{"OR"}, \code{"RD"} (it is not relevant which specific measure is chosen, as long as it corresponds to the specified summary data). See the documentation of the \code{\link{escalc}} function for more details on the types of data formats available. The examples below illustrate the use of this function. } \value{ An array with \mjseqn{k} elements each consisting of either 1 or 2 rows and an appropriate number of columns. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{escalc}}, \code{\link{to.long}} } \examples{ ### create tables dat <- to.table(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", "), rows=c("Vaccinated", "Not Vaccinated"), cols=c("TB+", "TB-")) dat ### create tables dat <- to.table(measure="IRR", x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat.hart1999, slab=paste(study, year, sep=", "), rows=c("Warfarin Group", "Placebo/Control Group")) dat ### create tables dat <- to.table(measure="MD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat.normand1999, slab=source, rows=c("Specialized Care", "Routine Care")) dat } \keyword{manip} metafor/man/dat.moura2021.Rd0000644000176200001440000000774014055736403015167 0ustar liggesusers\name{dat.moura2021} \docType{data} \alias{dat.moura2021} \title{Studies on Assortative Mating} \description{Results from 457 studies on assortative mating in various species.} \usage{ dat.moura2021 } \format{The object is a list containing a data frame called \code{dat} that contains the following columns and a phylogenetic tree called \code{tree}: \tabular{lll}{ \bold{study.id} \tab \code{character} \tab study id \cr \bold{effect.size.id} \tab \code{numeric} \tab effect size id \cr \bold{species} \tab \code{character} \tab species \cr \bold{species.id} \tab \code{character} \tab species id (as in the Open Tree of Life reference taxonomy) \cr \bold{subphylum} \tab \code{character} \tab the subphyla of the species \cr \bold{phylum} \tab \code{character} \tab the phyla of the species \cr \bold{assortment.trait} \tab \code{character} \tab the measure of body size \cr \bold{trait.dimensions} \tab \code{character} \tab dimensionality of the measure \cr \bold{field.collection} \tab \code{character} \tab whether data were collected in the field \cr \bold{publication.year} \tab \code{numeric} \tab publication year of the study \cr \bold{pooled.data} \tab \code{character} \tab whether data were pooled either spatially and/or temporally \cr \bold{spatially.pooled.data} \tab \code{character} \tab whether data were pooled spatially \cr \bold{temporally.pooled.data} \tab \code{character} \tab whether data were pooled temporally \cr \bold{ri} \tab \code{numeric} \tab correlation coefficient \cr \bold{ni} \tab \code{numeric} \tab sample size } Blah. } \details{ The 457 studies included in this dataset provide 1828 correlation coefficients describing the similarity in some measure of body size in mating couples in 341 different species. } \source{ Rios Moura, R., Oliveira Gonzaga, M., Silva Pinto, N., Vasconcellos-Neto, J., & Requena, G. S. (2021). Assortative mating in space and time: Patterns and biases. \emph{Ecology Letters}, \bold{24}(5), 1089--1102. \verb{https://doi.org/10.1111/ele.13690} } \references{ Hadfield, J. D., & Nakagawa, S. (2010). General quantitative genetic methods for comparative biology: Phylogenies, taxonomies and multi-trait models for continuous and categorical characters. \emph{Journal of Evolutionary Biology}, \bold{23}(3), 494--508. \verb{https://doi.org/10.1111/j.1420-9101.2009.01915.x} Nakagawa, S., & Santos, E. S. A. (2012). Methodological issues and advances in biological meta-analysis. \emph{Evolutionary Ecology}, \bold{26}(5), 1253--1274. \verb{https://doi.org/10.1007/s10682-012-9555-5} } \examples{ ### copy data into 'dat' and examine data dat <- dat.moura2021$dat head(dat) ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat) \dontrun{ ### load 'ape' package require(ape) ### copy tree to 'tree' tree <- dat.moura2021$tree ### turn tree into an ultrametric one tree <- compute.brlen(tree) ### compute phylogenetic correlation matrix A <- vcv(tree, corr=TRUE) ### make copy of the species.id variable dat$species.id.phy <- dat$species.id ### fit multilevel phylogenetic meta-analytic model res <- rma.mv(yi, vi, random = list(~ 1 | study.id, ~ 1 | effect.size.id, ~ 1 | species.id, ~ 1 | species.id.phy), R=list(species.id.phy=A), data=dat) res ### examine if spatial and/or temporal pooling of data tends to yield larger correlations res <- rma.mv(yi, vi, mods = ~ spatially.pooled.data * temporally.pooled.data, random = list(~ 1 | study.id, ~ 1 | effect.size.id, ~ 1 | species.id, ~ 1 | species.id.phy), R=list(species.id.phy=A), data=dat) res ### estimated average correlation without pooling, when pooling spatially, ### when pooling temporally, and when pooling spatially and temporally predict(res, newmods = rbind(c(0,0,0),c(1,0,0),c(0,1,0),c(1,1,1)), transf=transf.ztor, digits=2) } } \keyword{datasets} metafor/man/macros/0000755000176200001440000000000013722772107013756 5ustar liggesusersmetafor/man/macros/metafor.Rd0000644000176200001440000000021513723236423015675 0ustar liggesusers\newcommand{\icsl}{\out{\hspace*{0.1em}}} \newcommand{\icsh}{\out{ }} \newcommand{\ics}{\ifelse{latex}{\icsl}{\ifelse{html}{\icsh}{ }}} metafor/man/dat.normand1999.Rd0000644000176200001440000000447714055736403015535 0ustar liggesusers\name{dat.normand1999} \docType{data} \alias{dat.normand1999} \title{Studies on the Length of Hospital Stay of Stroke Patients} \description{Results from 9 studies on the length of the hospital stay of stroke patients under specialized care and under conventional/routine (non-specialist) care.} \usage{dat.normand1999} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{source} \tab \code{character} \tab source of data \cr \bold{n1i} \tab \code{numeric} \tab number of patients under specialized care \cr \bold{m1i} \tab \code{numeric} \tab mean length of stay (in days) under specialized care \cr \bold{sd1i} \tab \code{numeric} \tab standard deviation of the length of stay under specialized care \cr \bold{n2i} \tab \code{numeric} \tab number of patients under routine care \cr \bold{m2i} \tab \code{numeric} \tab mean length of stay (in days) under routine care \cr \bold{sd2i} \tab \code{numeric} \tab standard deviation of the length of stay under routine care } } \details{ The 9 studies provide data in terms of the mean length of the hospital stay (in days) of stroke patients under specialized care and under conventional/routine (non-specialist) care. The goal of the meta-analysis was to examine the hypothesis whether specialist stroke unit care will result in a shorter length of hospitalization compared to routine management. } \source{ Normand, S. T. (1999). Meta-analysis: Formulating, evaluating, combining, and reporting. \emph{Statistics in Medicine}, \bold{18}(3), 321--359. \verb{https://doi.org/10.1002/(sici)1097-0258(19990215)18:3<321::aid-sim28>3.0.co;2-p} } \examples{ ### copy data into 'dat' dat <- dat.normand1999 ### calculate mean differences and corresponding sampling variances dat <- escalc(measure="MD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat) dat ### meta-analysis of mean differences using a random-effects model res <- rma(yi, vi, data=dat) res ### meta-analysis of standardized mean differences using a random-effects model res <- rma(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, slab=source) res ### draw forest plot forest(res, xlim=c(-7,5), alim=c(-3,1), cex=.8, header="Study/Source") } \keyword{datasets} metafor/man/ranktest.Rd0000644000176200001440000000666514055736403014630 0ustar liggesusers\name{ranktest} \alias{ranktest} \alias{ranktest.rma} \alias{ranktest.default} \title{Rank Correlation Test for Funnel Plot Asymmetry} \description{ The function can be used to carry out the rank correlation test for funnel plot asymmetry. } \usage{ ranktest(x, \dots) \method{ranktest}{rma}(x, digits, \dots) \method{ranktest}{default}(x, vi, sei, subset, digits, \dots) } \arguments{ \item{x}{an object of class \code{"rma"} or a vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances (needed if \code{x} is a vector with the observed effect sizes or outcomes).} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the test. Only relevant when passing a vector via \code{x}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is 4).} \item{\dots}{other arguments.} } \details{ The function carries out the rank correlation test as described by Begg and Mazumdar (1994). The test can be used to examine whether the observed effect sizes or outcomes and the corresponding sampling variances are correlated. A high correlation would indicate that the funnel plot is asymmetric, which may be a result of publication bias. One can either pass an object of class \code{"rma"} to the function or a vector with the observed effect sizes or outcomes (via \code{x}) and the corresponding sampling variances via \code{vi} (or the standard errors via \code{sei}). } \value{ An object of class \code{"ranktest"}. The object is a list containing the following components: \item{tau}{the estimated value of Kendall's tau rank correlation coefficient} \item{pval}{the corresponding p-value for the test that the true tau is equal to zero} The results are formatted and printed with the \code{\link{print.ranktest}} function. } \note{ The method does not depend on the model fitted. Therefore, regardless of the model passed to the function, the results of the rank test will always be the same. See \code{\link{regtest}} for tests of funnel plot asymmetry that are based on regression models and model dependent. The function makes use of the \code{\link{cor.test}} function with \code{method="kendall"}. If possible, an exact p-value is provided; otherwise, a large-sample approximation is used. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Begg, C. B., & Mazumdar, M. (1994). Operating characteristics of a rank correlation test for publication bias. \emph{Biometrics}, \bold{50}(4), 1088--1101. \verb{https://doi.org/10.2307/2533446} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{regtest}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### carry out the rank correlation test ranktest(res) ### can also pass the observed outcomes and corresponding sampling variances to the function ranktest(dat$yi, dat$vi) } \keyword{htest} metafor/man/dat.obrien2003.Rd0000644000176200001440000000762514055736403015324 0ustar liggesusers\name{dat.obrien2003} \docType{data} \alias{dat.obrien2003} \title{Studies on the Relationship Between BMI and Risk of Preeclampsia} \description{Results from 13 studies on the relationship between maternal body mass index (BMI) and the risk of preeclampsia.} \usage{dat.obrien2003} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study id \cr \bold{author} \tab \code{character} \tab (first) author of the study \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ref} \tab \code{numeric} \tab reference number \cr \bold{ch} \tab \code{character} \tab exclusion due to chronic hypertension (yes/no) \cr \bold{dm} \tab \code{character} \tab exclusion due to diabetes mellitus (yes/no) \cr \bold{mg} \tab \code{character} \tab exclusion due to multiple gestation (yes/no) \cr \bold{bmi.lb} \tab \code{numeric} \tab lower bound of the BMI interval \cr \bold{bmi.ub} \tab \code{numeric} \tab upper bound of the BMI interval \cr \bold{bmi} \tab \code{numeric} \tab midpoint of the BMI interval \cr \bold{cases} \tab \code{numeric} \tab number of preeclampsia cases in the BMI group \cr \bold{total} \tab \code{numeric} \tab number of individuals in the BMI group } } \details{ The dataset includes the results from 13 studies examining the relationship between maternal body mass index (BMI) and the risk of preeclampsia. For each study, results are given in terms of the number of preeclampsia cases within two or more groups defined by the lower and upper BMI bounds as shown in the dataset (\code{NA} means that the interval is either open to the left or right). The \code{bmi} variable is the interval midpoint as defined by O'Brien et al. (2003). } \source{ O'Brien, T. E., Ray, J. G., & Chan, W.-S. (2003). Maternal body mass index and the risk of preeclampsia: A systematic overview. \emph{Epidemiology}, \bold{14}(3), 368--374. \verb{https://doi.org/10.1097/00001648-200305000-00020} } \examples{ ### copy data into 'dat' dat <- dat.obrien2003 dat ### restructure the data into a wide format dat2 <- to.wide(dat, study="study", grp="grp", ref=1, grpvars=c("bmi","cases","total"), addid=FALSE, adddesign=FALSE, postfix=c(1,2)) dat2 ### calculate log risk ratios and corresponding sampling variances dat2 <- escalc(measure="RR", ai=cases1, n1i=total1, ci=cases2, n2i=total2, data=dat2) dat2 ### forest plot of the risk ratios dd <- c(0,diff(dat2$study)) dd[dd > 0] <- 1 rows <- (1:nrow(dat2)) + cumsum(dd) rows <- 1 + max(rows) - rows slabs <- mapply(function(x,y,z) as.expression(bquote(.(x)^.(y)~.(z))), dat2$author, dat2$ref, dat2$year) with(dat2, forest(yi, vi, header=TRUE, slab=slabs, xlim=c(-7,5.5), fonts="mono", cex=0.8, psize=1, pch=19, efac=0, rows=rows, ylim=c(0,max(rows)+3), yaxs="i", atransf=exp, at=log(c(.05,0.1,0.2,0.5,1,2,5,10,20)), ilab=comp, ilab.xpos=-4, ilab.pos=4)) text(-4.4, max(rows)+2, "Comparison", font=2, cex=0.8, pos=4) ### within-study mean center the BMI variable dat$bmicent <- dat$bmi - ave(dat$bmi, dat$study) ### compute the proportion of preeclampsia cases and corresponding sampling variances dat <- escalc(measure="PR", xi=cases, ni=total, data=dat) ### convert the proportions to percentages (and convert the variances accordingly) dat$yi <- dat$yi*100 dat$vi <- dat$vi*100^2 ### fit multilevel meta-regression model to examine the relationship between the ### (centered) BMI variable and the risk of preeclampsia res <- rma.mv(yi, vi, mods = ~ bmicent, random = ~ 1 | study/grp, data=dat) res ### draw scatterplot with regression line res$slab <- dat$ref regplot(res, xlab=expression("Within-Study Mean Centered BMI"~(kg/m^2)), ylab="Preeclampsia Prevalence (\%)", las=1, bty="l", at=seq(0,18,by=2), olim=c(0,100), psize=2, bg="gray90", label=TRUE, offset=0, labsize=0.6) } \keyword{datasets} metafor/man/plot.cumul.rma.Rd0000644000176200001440000001025414055736403015642 0ustar liggesusers\name{plot.cumul.rma} \alias{plot.cumul.rma} \title{Plot Method for 'cumul.rma' Objects} \description{ Plot method for objects of class \code{"cumul.rma"}. \loadmathjax } \usage{ \method{plot}{cumul.rma}(x, yaxis, xlim, ylim, xlab, ylab, at, transf, atransf, targs, digits, cols=c("gray80","gray10"), grid=TRUE, pch=19, cex=1, lwd=2, \dots) } \arguments{ \item{x}{an object of class \code{"cumul.rma"}.} \item{yaxis}{either \code{"tau2"}, \code{"I2"}, or \code{"H2"} to indicate what values should be placed on the y-axis. See \sQuote{Details}.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{at}{position of the x-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{transf}{optional argument to specify a function that should be used to transform the summary estimates (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the x-axis labels (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the x- and y-axis should be rounded. Can also be a vector of two integers, the first to specify the number of decimal places for the x-axis, the second for the y-axis labels (e.g., \code{digits=c(2,3)}). If unspecified, the function tries to set the argument to some sensible values.} \item{cols}{vector with two or more colors to use for visualizing the order of the cumulative results.} \item{grid}{logical to specify whether a grid should be added to the plot (can also be a color name).} \item{pch}{plotting symbol to use. By default, a filled circle is used. See \code{\link{points}} for other options.} \item{cex}{symbol expansion factor.} \item{lwd}{line width.} \item{\dots}{other arguments.} } \details{ The function can be used to visualize the results from a cumulative meta-analysis as obtained with the \code{\link{cumul}} function. The plot shows the model estimate (i.e., the estimated overall/average outcome) on the x-axis and some measure of heterogeneity on the y-axis in the cumulative order of the results in the \code{"cumul.rma"} object. By default, \mjseqn{\tau^2} is shown on the y-axis for a random-effects model and \mjseqn{I^2} otherwise, but one can also use argument \code{yaxis} to specify the measure of heterogeneity to place on the y-axis. The color gradient of the points/lines indicates the order of the cumulative results (by default, light gray at the beginning, dark gray at the end). A different set of colors can be chosen via the \code{cols} argument. See \sQuote{Examples}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{cumul.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### random-effects model res <- rma(yi, vi, data=dat) ### cumulative meta-analysis (in the order of publication year) sav <- cumul(res, transf=exp, order=dat$year) ### plot of model estimate and tau^2 over time plot(sav) ### illustrate some other plot options plot(sav, yaxis="I2", ylim=c(0,100), atransf=exp, at=log(seq(1.3, 1.6, by=.1)), lwd=5, cex=1.5, cols=c("green","blue","red")) } \keyword{hplot} metafor/man/to.wide.Rd0000644000176200001440000001264214055736403014336 0ustar liggesusers\name{to.wide} \alias{to.wide} \title{Convert Data from a Long to a Wide Format} \description{ The function converts data given in long format to a wide format. } \usage{ to.wide(data, study, grp, ref, grpvars, postfix=c(".1",".2"), addid=TRUE, addcomp=TRUE, adddesign=TRUE, minlen=2, var.names=c("id","comp","design")) } \arguments{ \item{data}{a data frame in long format.} \item{study}{either the name (given as a character string) or the position (given as a single number) of the study variable in the data frame.} \item{grp}{either the name (given as a character string) or the position (given as a single number) of the group variable in the data frame.} \item{ref}{optional character string to specify the reference group (must be one of the groups in the group variable). If not given, the most frequently occurring group is used as the reference group.} \item{grpvars}{either the names (given as a character vector) or the positions (given as a numeric vector) of the group-level variables.} \item{postfix}{a character string of length 2 giving the affix that is placed after the names of the group-level variables for the first and second group.} \item{addid}{logical to specify whether a row id variable should be added to the data frame (the default is \code{TRUE}).} \item{addcomp}{logical to specify whether a comparison id variable should be added to the data frame (the default is \code{TRUE}).} \item{adddesign}{logical to specify whether a design id variable should be added to the data frame (the default is \code{TRUE}).} \item{minlen}{integer to specify the minimum length of the shortened group names for the comparison and design id variables (the default is 2).} \item{var.names}{a character string with three elements to specify the name of the id, comparison, and design variables (the defaults are \code{"id"}, \code{"comp"}, and \code{"design"}, respectively).} } \details{ A meta-analytic dataset may be structured in a \sQuote{long} format, where each row in the dataset corresponds to a particular study group (e.g., treatment arm). Using this function, such a dataset can be restructured into a \sQuote{wide} format, where each group within a study is contrasted against a particular reference group. The \code{study} and \code{group} arguments are used to specify the study and group variables in the dataset (either as character strings or as numbers indicating the column positions of these variables in the dataset). Optional argument \code{ref} is used to specify the reference group (this must be one of the groups in the \code{group} variable). Argument \code{grpvars} is used to specify (either as a character vector or by giving the column positions) of those variables in the dataset that correspond to group-level outcomes (the remaining variables are treated as study-level outcomes). The dataset is restructured so that a two-group study will yield a single row in the restructured dataset, contrasting the first group against the second/reference group. For studies with more than two groups (often called \sQuote{multiarm} or \sQuote{multitreatment} studies in the medical literature), the reference group is repeated as many times as needed (so a three-group study would yield two rows in the restructured dataset, contrasting two groups against a common reference group). If a study does not include the reference group, then another group from the study will be used as the reference group. This group is chosen based on the factor levels of the \code{grp} variable (i.e., the last level that occurs in the study becomes the reference group). To distinguish the names of the group-level outcome variables for the two first and second group in the restructured dataset, the strings given for the \code{postfix} argument are placed after the respective variable names. If requested, row id, comparison id, and design id variables are added to the restructured dataset. The row id is simply a unique number for each row in the dataset. The comparison id variable indicates which two groups have been compared against each other). The design id variable indicates which groups were included in a particular study. The group names are shortened for the comparison and design variables (to at least \code{minlen}; the actual length might be longer to ensure uniqueness of the group names). The examples below illustrate the use of this function. } \value{ A data frame with rows contrasting groups against a reference group and an appropriate number of columns (depending on the number of group-level outcome variables). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{contrmat}}, \code{\link{dat.hasselblad1998}}, \code{\link{dat.lopez2019}}, \code{\link{dat.obrien2003}}, \code{\link{dat.pagliaro1992}}, \code{\link{dat.senn2013}} } \examples{ ### data in long format dat <- dat.senn2013 dat <- dat[c(1,4,3,2,5,6)] dat ### restructure to wide format dat <- to.wide(dat, study="study", grp="treatment", ref="placebo", grpvars=4:6) dat ### data in long format dat <- dat.hasselblad1998 dat ### restructure to wide format dat <- to.wide(dat, study="study", grp="trt", ref="no_contact", grpvars=6:7) dat } \keyword{manip} metafor/man/forest.cumul.rma.Rd0000644000176200001440000002246714055736403016177 0ustar liggesusers\name{forest.cumul.rma} \alias{forest.cumul.rma} \title{Forest Plots (Method for 'cumul.rma' Objects)} \description{ Function to create forest plots for objects of class \code{"cumul.rma"}. } \usage{ \method{forest}{cumul.rma}(x, annotate=TRUE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=x$level, refline=0, digits=2L, width, xlab, ilab, ilab.xpos, ilab.pos, transf, atransf, targs, rows, efac=1, pch=15, psize=1, col, lty, fonts, cex, cex.lab, cex.axis, annosym, \dots) } \arguments{ \item{x}{an object of class \code{"cumul.rma"}.} \item{annotate}{logical to specify whether annotations should be added to the plot (the default is \code{TRUE}).} \item{header}{logical to specify whether column headings should be added to the plot (the default is \code{FALSE}). Can also be a character vector to specify the left and right headings.} \item{xlim}{horizontal limits of the plot region. If unspecified, the function tries to set the horizontal plot limits to some sensible values.} \item{alim}{the actual x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{olim}{optional argument to specify observation/outcome limits. If unspecified, no limits are used.} \item{ylim}{the y-axis limits of the plot. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{top}{the amount of space to leave empty at the top of the plot (e.g., for adding headers) (the default is 3 rows).} \item{at}{position of the x-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{steps}{the number of tick marks for the x-axis (the default is 5). Ignored when the positions are specified via the \code{at} argument.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is to take the value from the object).} \item{refline}{numeric value to specify the location of the vertical \sQuote{reference} line (the default is 0). The line can be suppressed by setting this argument to \code{NA}.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the x-axis and the annotations should be rounded (the default is \code{2L}). Can also be a vector of two integers, the first to specify the number of decimal places for the annotations, the second for the x-axis labels. When specifying an integer (e.g., \code{2L}), trailing zeros after the decimal mark are dropped for the x-axis labels. When specifying a numeric value (e.g., \code{2}), trailing zeros are retained.} \item{width}{optional integer to manually adjust the width of the columns for the annotations.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ilab}{optional vector, matrix, or data frame providing additional information about the studies that should be added to the plot.} \item{ilab.xpos}{numeric vector to specify the x-axis position(s) of the variable(s) given via \code{ilab} (must be specified if \code{ilab} is specified).} \item{ilab.pos}{integer(s) (either 1, 2, 3, or 4) to specify the alignment of the vector(s) given via \code{ilab} (2 means right, 4 mean left aligned). If unspecified, the default is to center the labels.} \item{transf}{optional argument to specify the name of a function that should be used to transform the estimates and confidence interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify the name of a function that should be used to transform the x-axis labels and annotations (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{rows}{optional vector to specify the rows (or more generally, the horizontal positions) for plotting the outcomes. Can also be a single value to specify the row (horizontal position) of the first outcome (the remaining outcomes are then plotted below this starting row). If unspecified, the function sets this value automatically.} \item{efac}{vertical expansion factor for confidence interval limits and arrows. The default value of 1 should usually work okay. Can also be a vector of two numbers, the first for CI limits, the second for arrows.} \item{pch}{plotting symbol to use for the estimates. By default, a filled square is used. See \code{\link{points}} for other options. Can also be a vector of values.} \item{psize}{numeric value to specify the point sizes for the estimates (the default is 1). Can also be a vector of values.} \item{col}{optional character string to specify the name of a color to use for plotting (\code{"black"} is used by default if not specified). Can also be a vector of color names.} \item{lty}{optional character string to specify the line type for the confidence intervals. If unspecified, the function sets this to \code{"solid"} by default.} \item{fonts}{optional character string to specify the font to use for the study labels, annotations, and the extra information (if specified via \code{ilab}). If unspecified, the default font is used.} \item{cex}{optional character and symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{cex.lab}{optional expansion factor for the x-axis title. If unspecified, the function tries to set this to a sensible value.} \item{cex.axis}{optional expansion factor for the x-axis labels. If unspecified, the function tries to set this to a sensible value.} \item{annosym}{optional vector of length 3 to change the left bracket, separation, and right bracket symbols for the annotations.} \item{\dots}{other arguments.} } \details{ The plot shows the estimated (average) outcome with corresponding confidence interval as one study at a time is added to the analysis. } \note{ The function tries to set some sensible values for the optional arguments, but it may be necessary to adjust these in certain circumstances. The function actually returns some information about the chosen defaults invisibly. Printing this information is useful as a starting point to make adjustments to the plot. If the number of studies is quite large, the labels, annotations, and symbols may become quite small and impossible to read. Stretching the plot window vertically may then provide a more readable figure (one should call the function again after adjusting the window size, so that the label/symbol sizes can be properly adjusted). Also, the \code{cex}, \code{cex.lab}, and \code{cex.axis} arguments are then useful to adjust the symbol and text sizes. If the horizontal plot and/or x-axis limits are set manually, then the horizontal plot limits (\code{xlim}) must be at least as wide as the x-axis limits (\code{alim}). This restriction is enforced inside the function. If the outcome measure used for creating the plot is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those limits (the observed outcomes and confidence intervals cannot exceed those bounds then). The \code{lty} argument can also be a vector of two elements, the first for specifying the line type of the individual CIs (\code{"solid"} by default), the second for the line type of the horizontal line that is automatically added to the plot (\code{"solid"} by default; set to \code{"blank"} to remove it). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Chalmers, T. C., & Lau, J. (1993). Meta-analytic stimulus for changes in clinical trials. \emph{Statistical Methods in Medical Research}, \bold{2}(2), 161--172. \verb{https://doi.org/10.1177/096228029300200204} Lau, J., Schmid, C. H., & Chalmers, T. C. (1995). Cumulative meta-analysis of clinical trials builds evidence for exemplary medical care. \emph{Journal of Clinical Epidemiology}, \bold{48}(1), 45--57. \verb{https://doi.org/10.1016/0895-4356(94)00106-z} Lewis, S., & Clarke, M. (2001). Forest plots: Trying to see the wood and the trees. \emph{British Medical Journal}, \bold{322}(7300), 1479--1480. \verb{https://doi.org/10.1136/bmj.322.7300.1479} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest}}, \code{\link{cumul}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat, slab=paste(author, year, sep=", ")) ### draw cumulative forest plots x <- cumul(res, order=dat$year) forest(x, cex=.8, header=TRUE) forest(x, alim=c(-2,1), cex=.8, header=TRUE) ### meta-analysis of the (log) risk ratios using the Mantel-Haenszel method res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### draw cumulative forest plot x <- cumul(res, order=dat$year) forest(x, alim=c(-2,1), cex=.8, header=TRUE) } \keyword{hplot} metafor/man/dat.egger2001.Rd0000644000176200001440000001102314055736403015120 0ustar liggesusers\name{dat.egger2001} \docType{data} \alias{dat.egger2001} \title{Studies on the Effectiveness of Intravenous Magnesium in Acute Myocardial Infarction} \description{Results from 16 trials examining the effectiveness of intravenous magnesium in the prevention of death following acute myocardial infarction.} \usage{ dat.egger2001 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab trial id number \cr \bold{study} \tab \code{character} \tab first author or trial name \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ai} \tab \code{numeric} \tab number of deaths in the magnesium group \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the magnesium group \cr \bold{ci} \tab \code{numeric} \tab number of deaths in the control group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the control group } } \details{ The dataset includes the results from 16 randomized clinical trials that examined the effectiveness of intravenous magnesium in the prevention of death following acute myocardial infarction. Studies 1-7 were included in the meta-analyses by Teo et al. (1991) and Horner (1992) and were combined with the results from the LIMIT-2 trial (Woods et al., 1992) in Yusuf et al. (1993), suggesting that magnesium is an effective treatment for reducing mortality. However, the results from the ISIS-4 mega trial (ISIS-4 Collaborative Group, 1995) indicated no reduction in mortality with magnesium treatment. Publication bias has been suggested as one possible explanation for the conflicting findings (Egger & Davey Smith, 1995). The present dataset includes some additional trials and are based on Table 18.2 from Egger, Davey Smith, and Altman (2001). } \source{ Egger, M., Davey Smith, G., & Altman, D. G. (Eds.) (2001). \emph{Systematic reviews in health care: Meta-analysis in context} (2nd ed.). London: BMJ Books. } \references{ Egger, M., & Davey Smith, G. (1995). Misleading meta-analysis: Lessons from \dQuote{an effective, safe, simple} intervention that wasn't. \emph{British Medical Journal}, \bold{310}(6982), 752--754. \verb{https://doi.org/10.1136/bmj.310.6982.752} Horner, S. M. (1992). Efficacy of intravenous magnesium in acute myocardial infarction in reducing arrhythmias and mortality: Meta-analysis of magnesium in acute myocardial infarction. \emph{Circulation}, \bold{86}(3), 774--779. \verb{https://doi.org/10.1161/01.cir.86.3.774} ISIS-4 Collaborative Group (1995). ISIS-4: A randomised factorial trial assessing early oral captopril, oral mononitrate, and intravenous magnesium sulphate in 58,050 patients with suspected acute myocardial infarction. \emph{Lancet}, \bold{345}(8951), 669--685. \verb{https://doi.org/10.1016/S0140-6736(95)90865-X} Teo, K. K., Yusuf, S., Collins, R., Held, P. H., & Peto, R. (1991). Effects of intravenous magnesium in suspected acute myocardial infarction: Overview of randomised trials. \emph{British Medical Journal}, \bold{303}(6816), 1499--1503. \verb{https://doi.org/10.1136/bmj.303.6816.1499} Woods, K. L., Fletcher, S., Roffe, C., & Haider, Y. (1992). Intravenous magnesium sulphate in suspected acute myocardial infarction: Results of the second Leicester Intravenous Magnesium Intervention Trial (LIMIT-2). \emph{Lancet}, \bold{339}(8809), 1553--1558. \verb{https://doi.org/10.1016/0140-6736(92)91828-v} Yusuf, S., Teo, K., & Woods, K. (1993). Intravenous magnesium in acute myocardial infarction: An effective, safe, simple, and inexpensive treatment. \emph{Circulation}, \bold{87}(6), 2043--2046. \verb{https://doi.org/10.1161/01.cir.87.6.2043} } \seealso{ \code{\link{dat.li2007}} } \examples{ ### copy data into 'dat' and examine data dat <- dat.egger2001 dat ### meta-analysis of trials 1-7 using Peto's method (as in Teo et al., 1991) res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=1:7) print(res, digits=2) ### meta-analysis of trials 1-7 and LIMIT-2 (as in Yusuf et al., 1993) res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=c(1:7,14)) print(res, digits=2) ### meta-analysis of all trials except ISIS-4 res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=-16) print(res, digits=2) predict(res, transf=exp, digits=2) ### meta-analysis of all trials including ISIS-4 res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) print(res, digits=2) predict(res, transf=exp, digits=2) ### contour-enhanced funnel plot centered at 0 funnel(res, refline=0, level=c(90, 95, 99), shade=c("white", "gray", "darkgray")) } \keyword{datasets} metafor/man/baujat.Rd0000644000176200001440000001201514055736403014225 0ustar liggesusers\name{baujat} \alias{baujat} \alias{baujat.rma} \title{Baujat Plots for 'rma' Objects} \description{ Function to create Baujat plots for objects of class \code{"rma"}. \loadmathjax } \usage{ baujat(x, \dots) \method{baujat}{rma}(x, xlim, ylim, xlab, ylab, cex, symbol="ids", grid=TRUE, progbar=FALSE, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{cex}{optional character expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{symbol}{either an integer to specify the \code{pch} value (i.e., plotting symbol), or \code{"slab"} to plot the study labels, or \code{"ids"} (the default) to plot the study id numbers.} \item{grid}{logical to specify whether a grid should be added to the plot (can also be a color name).} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ The model specified via \code{x} must be a model fitted with either the \code{\link{rma.uni}}, \code{\link{rma.mh}}, or \code{\link{rma.peto}} functions. Baujat et al. (2002) proposed a diagnostic plot to detect sources of heterogeneity in meta-analytic data. The plot shows the contribution of each study to the overall \mjseqn{Q}-test statistic for heterogeneity on the x-axis versus the influence of each study (defined as the standardized squared difference between the overall estimate based on a fixed-effects model with and without the study included in the model fitting) on the y-axis. The same type of plot can be produced by first fitting a fixed-effects model with either the \code{\link{rma.uni}} (using \code{method="FE"}), \code{\link{rma.mh}}, or \code{\link{rma.peto}} functions and then passing the fitted model object to the \code{baujat} function. For models fitted with the \code{\link{rma.uni}} function (which may involve moderators and/or which may be random/mixed-effects models), the idea underlying this type of plot can be generalized as follows (Viechtbauer, 2021): The x-axis then corresponds to the squared Pearson residual of a study, while the y-axis corresponds to the standardized squared difference between the predicted/fitted value for the study with and without the study included in the model fitting. Therefore, for a fixed-effects with moderators model, the x-axis corresponds to the contribution of the study to the \mjseqn{Q_E}-test statistic for residual heterogeneity. By default, the points plotted are the study id numbers, but one can also plot the study labels by setting \code{symbol="slab"} (if study labels are available within the model object) or one can specify a plotting symbol via the \code{symbol} argument that gets passed to \code{pch} (see \code{\link{points}} for possible options). } \value{ A data frame with components: \item{x}{the x-axis coordinates of the points that were plotted.} \item{y}{the y-axis coordinates of the points that were plotted.} \item{ids}{the study id numbers.} \item{slab}{the study labels.} Note that the data frame is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Baujat, B., Mahe, C., Pignon, J.-P., & Hill, C. (2002). A graphical method for exploring heterogeneity in meta-analyses: Application to a meta-analysis of 65 trials. \emph{Statistics in Medicine}, \bold{21}(18), 2641--2652. \verb{https://doi.org/10.1002/sim.1221} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W. (2021). Model checking in meta-analysis. In C. H. Schmid, T. Stijnen, & I. R. White (Eds.), \emph{Handbook of meta-analysis} (pp. 219--254). Boca Raton, FL: CRC Press. \verb{https://doi.org/10.1201/9781315119403} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{influence.rma.uni}}, \code{\link{radial}} } \examples{ ### copy data from Pignon et al. (2000) into 'dat' dat <- dat.pignon2000 ### calculate estimated log hazard ratios and sampling variances dat$yi <- with(dat, OmE/V) dat$vi <- with(dat, 1/V) ### meta-analysis based on all 65 trials res <- rma(yi, vi, data=dat, method="FE", slab=trial) ### create Baujat plot baujat(res) ### some variations of the plotting symbol baujat(res, symbol=19) baujat(res, symbol="slab") ### label only a selection of the more 'extreme' points sav <- baujat(res, symbol=19, xlim=c(0,20)) sav <- sav[sav$x >= 10 | sav$y >= 0.10,] text(sav$x, sav$y, sav$slab, pos=1) } \keyword{hplot} metafor/man/fitted.rma.Rd0000644000176200001440000000311514055736403015015 0ustar liggesusers\name{fitted.rma} \alias{fitted} \alias{fitted.rma} \title{Fitted Values for 'rma' Objects} \description{ The function computes the fitted values for objects of class \code{"rma"}. } \usage{ \method{fitted}{rma}(object, \dots) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{\dots}{other arguments.} } \value{ A vector with the fitted values. } \note{ The \code{\link{predict.rma}} function also provides standard errors and confidence intervals for the fitted values. Best linear unbiased predictions (BLUPs) that combine the fitted values based on the fixed effects and the estimated contributions of the random effects can be obtained with \code{\link{blup.rma.uni}} (only for objects of class \code{"rma.uni"}). For objects not involving moderators, the fitted values are all identical to the estimated value of the model intercept. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{predict.rma}}, \code{\link{blup.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### compute the fitted values fitted(res) } \keyword{models} metafor/man/dat.berkey1998.Rd0000644000176200001440000001000414055736403015336 0ustar liggesusers\name{dat.berkey1998} \docType{data} \alias{dat.berkey1998} \title{Studies on Treatments for Periodontal Disease} \description{Results from 5 trials comparing surgical and non-surgical treatments for medium-severity periodontal disease one year after treatment. \loadmathjax} \usage{dat.berkey1998} \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{numeric} \tab trial number \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ni} \tab \code{numeric} \tab number of patients \cr \bold{outcome} \tab \code{character} \tab outcome (PD = probing depth; AL = attachment level) \cr \bold{yi} \tab \code{numeric} \tab observed mean difference in outcome (surgical versus non-surgical) \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance \cr \bold{v1i} \tab \code{numeric} \tab variances and covariances of the observed effects \cr \bold{v2i} \tab \code{numeric} \tab variances and covariances of the observed effects } } \details{ The dataset includes the results from 5 trials that compared surgical and non-surgical methods for the treatment of medium-severity periodontal disease. Reported outcomes include the change in probing depth (PD) and attachment level (AL) one year after the treatment. The outcome measure used for this meta-analysis was the (raw) mean difference, calculated in such a way that positive values indicate that surgery was more effective than non-surgical treatment in decreasing the probing depth and increasing the attachment level (so, the results from the various trials indicate that surgery is preferable for reducing the probing depth, while non-surgical treatment is preferable for increasing the attachment level). Since each trial provides effect size estimates for both outcomes, the estimates are correlated. A multivariate model can be used to meta-analyze the two outcomes simultaneously. The \code{v1i} and \code{v2i} values are the variances and covariances of the observed effects. In particular, for each study, variables \code{v1i} and \code{v2i} form a \mjeqn{2 \times 2}{2x2} variance-covariance matrix of the observed effects, with the diagonal elements corresponding to the sampling variances of the mean differences (the first for probing depth, the second for attachment level) and the off-diagonal value corresponding to the covariance of the two mean differences. Below, the full (block diagonal) variance-covariance for all studies is constructed from these two variables. } \source{ Berkey, C. S., Antczak-Bouckoms, A., Hoaglin, D. C., Mosteller, F., & Pihlstrom, B. L. (1995). Multiple-outcomes meta-analysis of treatments for periodontal disease. \emph{Journal of Dental Research}, \bold{74}(4), 1030--1039. \verb{https://doi.org/10.1177/00220345950740040201} Berkey, C. S., Hoaglin, D. C., Antczak-Bouckoms, A., Mosteller, F., & Colditz, G. A. (1998). Meta-analysis of multiple outcomes by regression with random effects. \emph{Statistics in Medicine}, \bold{17}(22), 2537--2550. \verb{https://doi.org/10.1002/(sici)1097-0258(19981130)17:22<2537::aid-sim953>3.0.co;2-c} } \examples{ ### copy data into 'dat' and examine data dat <- dat.berkey1998 dat ### construct list with the variance-covariance matrices of the observed outcomes for the studies V <- lapply(split(dat[c("v1i", "v2i")], dat$trial), as.matrix) ### construct block diagonal matrix V <- bldiag(V) ### fit multiple outcomes (meta-regression) model (with REML estimation) res <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat) print(res, digits=3) ### test/estimate difference between the two outcomes anova(res, X=c(1,-1)) ### fit model including publication year as moderator for both outcomes (with ML estimation) res <- rma.mv(yi, V, mods = ~ outcome + outcome:I(year - 1983) - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") print(res, digits=3) } \keyword{datasets} metafor/man/print.list.rma.Rd0000644000176200001440000000200214055736403015636 0ustar liggesusers\name{print.list.rma} \alias{print.list.rma} \title{Print method for 'list.rma' Objects} \description{ Print method for objects of class \code{"list.rma"}. } \usage{ \method{print}{list.rma}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"list.rma"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \value{ See the documentation of the function that creates the \code{"list.rma"} object for details on what is printed. Regardless of what is printed, a data frame with the results is also returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \keyword{print} metafor/man/addpoly.rma.Rd0000644000176200001440000001323314055736403015174 0ustar liggesusers\name{addpoly.rma} \alias{addpoly.rma} \title{Add Polygons to Forest Plots (Method for 'rma' Objects)} \description{ Function to add a polygon to a forest plot showing the summary estimate with corresponding confidence interval based on an object of class \code{"rma"}. } \usage{ \method{addpoly}{rma}(x, row=-2, level=x$level, annotate=TRUE, addpred=FALSE, digits=2, width, mlab, transf, atransf, targs, efac=1, col, border, fonts, cex, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{row}{numeric value to specify the row (or more generally, the horizontal position) for plotting the polygon (the default is \code{-2}).} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is to take the value from the object).} \item{annotate}{logical to specify whether annotations for the summary estimate should be added to the plot (the default is \code{TRUE}).} \item{addpred}{logical to specify whether the bounds of the prediction interval should be added to the plot (the default is \code{FALSE}).} \item{digits}{integer to specify the number of decimal places to which the annotations should be rounded (the default is 2).} \item{width}{optional integer to manually adjust the width of the columns for the annotations.} \item{mlab}{optional character string giving a label for the summary estimate polygon. If unspecified, the function sets a default label.} \item{transf}{optional argument to specify a function that should be used to transform the summary estimate and confidence interval bound (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the annotations (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{efac}{vertical expansion factor for the polygon. The default value of 1 should usually work okay.} \item{col}{optional character string to specify the name of a color to use for the polygon. If unspecified, the function sets a default color.} \item{border}{optional character string to specify the name of a color to use for the border of the polygon. If unspecified, the function sets a default color.} \item{fonts}{optional character string to specify the font to use for the label and annotations. If unspecified, the default font is used.} \item{cex}{optional symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{\dots}{other arguments.} } \details{ The function can be used to add a four-sided polygon, sometimes called a summary \sQuote{diamond}, to an existing forest plot created with the \code{\link{forest}} function. The polygon shows the summary estimate (with its confidence interval bounds) based on a fixed- or random-effects model. Using this function, summary estimates based on different types of models can be shown in the same plot. Also, summary estimates based on a subgrouping of the studies can be added to the plot this way. See \sQuote{Examples}. The arguments \code{transf}, \code{atransf}, \code{efac}, and \code{cex} should always be set equal to the same values used to create the forest plot. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest.rma}}, \code{\link{forest.default}} } \examples{ ### meta-analysis of the log risk ratios using the Mantel-Haenszel method res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### forest plot of the observed risk ratios with summary estimate forest(res, atransf=exp, xlim=c(-8,6), ylim=c(-2.5,16), header=TRUE) ### meta-analysis of the log risk ratios using a random-effects model res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### add summary estimate from the random-effects model to the forest plot addpoly(res, atransf=exp) ### forest plot with subgrouping of studies and summaries per subgroup res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) forest(res, xlim=c(-16, 4.6), at=log(c(.05, .25, 1, 4)), atransf=exp, ilab=cbind(dat.bcg$tpos, dat.bcg$tneg, dat.bcg$cpos, dat.bcg$cneg), ilab.xpos=c(-9.5,-8,-6,-4.5), cex=.75, ylim=c(-1, 27), order=dat.bcg$alloc, rows=c(3:4,9:15,20:23), mlab="RE Model for All Studies", header="Author(s) and Year") op <- par(cex=.75, font=2) text(c(-9.5,-8,-6,-4.5), 26, c("TB+", "TB-", "TB+", "TB-")) text(c(-8.75,-5.25), 27, c("Vaccinated", "Control")) par(font=4) text(-16, c(24,16,5), c("Systematic Allocation", "Random Allocation", "Alternate Allocation"), pos=4) par(op) res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=(alloc=="systematic")) addpoly(res, row=18.5, cex=.75, atransf=exp, mlab="RE Model for Subgroup") res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=(alloc=="random")) addpoly(res, row=7.5, cex=.75, atransf=exp, mlab="RE Model for Subgroup") res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=(alloc=="alternate")) addpoly(res, row=1.5, cex=.75, atransf=exp, mlab="RE Model for Subgroup") } \keyword{aplot} metafor/man/dat.lopez2019.Rd0000644000176200001440000001431014055736403015173 0ustar liggesusers\name{dat.lopez2019} \docType{data} \alias{dat.lopez2019} \title{Studies on the Effectiveness of CBT for Depression} \description{Results from 76 studies examining the effectiveness of cognitive behavioral therapy (CBT) for depression in adults.} \usage{dat.lopez2019} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab (first) author and year of study \cr \bold{treatment} \tab \code{character} \tab treatment provided (see \sQuote{Details}) \cr \bold{scale} \tab \code{character} \tab scale used to measure depression symptoms \cr \bold{n} \tab \code{numeric} \tab group size \cr \bold{diff} \tab \code{numeric} \tab standardized mean change \cr \bold{se} \tab \code{numeric} \tab corresponding standard error \cr \bold{group} \tab \code{numeric} \tab type of therapy (0 = individual, 1 = group therapy) \cr \bold{tailored} \tab \code{numeric} \tab whether the intervention was tailored to each patient (0 = no, 1 = yes) \cr \bold{sessions} \tab \code{numeric} \tab number of sessions \cr \bold{length} \tab \code{numeric} \tab average session length (in minutes) \cr \bold{intensity} \tab \code{numeric} \tab product of sessions and length \cr \bold{multi} \tab \code{numeric} \tab intervention included multimedia elements (0 = no, 1 = yes) \cr \bold{cog} \tab \code{numeric} \tab intervention included cognitive techniques (0 = no, 1 = yes) \cr \bold{ba} \tab \code{numeric} \tab intervention included behavioral activation (0 = no, 1 = yes) \cr \bold{psed} \tab \code{numeric} \tab intervention included psychoeducation (0 = no, 1 = yes) \cr \bold{home} \tab \code{numeric} \tab intervention included homework (0 = no, 1 = yes) \cr \bold{prob} \tab \code{numeric} \tab intervention included problem solving (0 = no, 1 = yes) \cr \bold{soc} \tab \code{numeric} \tab intervention included social skills training (0 = no, 1 = yes) \cr \bold{relax} \tab \code{numeric} \tab intervention included relaxation (0 = no, 1 = yes) \cr \bold{goal} \tab \code{numeric} \tab intervention included goal setting (0 = no, 1 = yes) \cr \bold{final} \tab \code{numeric} \tab intervention included a final session (0 = no, 1 = yes) \cr \bold{mind} \tab \code{numeric} \tab intervention included mindfulness (0 = no, 1 = yes) \cr \bold{act} \tab \code{numeric} \tab intervention included acceptance and commitment therapy (0 = no, 1 = yes) } } \details{ The dataset includes the results from 76 studies examining the effectiveness of cognitive behavioral therapy (CBT) for treating depression in adults. Studies included two or more of the following treatments/conditions: \enumerate{ \item treatment as usual (TAU), \item no treatment, \item wait list, \item psychological or attention placebo, \item face-to-face CBT, \item multimedia CBT, \item hybrid CBT (i.e., multimedia CBT with one or more face-to-face sessions). } Multimedia CBT was defined as CBT delivered via self-help books, audio/video recordings, telephone, computer programs, apps, e-mail, or text messages. Variable \code{diff} is the standardized mean change within each group, with negative values indicating a decrease in depression symptoms. } \source{ Personal communication. } \references{ López-López, J. A., Davies, S. R., Caldwell, D. M., Churchill, R., Peters, T. J., Tallon, D., Dawson, S., Wu, Q., Li, J., Taylor, A., Lewis, G., Kessler, D. S., Wiles, N., & Welton, N. J. (2019). The process and delivery of CBT for depression in adults: A systematic review and network meta-analysis. \emph{Psychological Medicine}, \bold{49}(12), 1937--1947. \verb{https://doi.org/10.1017/S003329171900120X} } \examples{ ### copy data into 'dat' and examine data dat <- dat.lopez2019 dat[1:10,1:6] ### create network graph ('igraph' package must be installed) \dontrun{ require(igraph) pairs <- data.frame(do.call(rbind, sapply(split(dat$treatment, dat$study), function(x) t(combn(x,2)))), stringsAsFactors=FALSE) pairs$X1 <- factor(pairs$X1, levels=sort(unique(dat$treatment))) pairs$X2 <- factor(pairs$X2, levels=sort(unique(dat$treatment))) tab <- table(pairs[,1], pairs[,2]) tab # adjacency matrix g <- graph_from_adjacency_matrix(tab, mode = "plus", weighted=TRUE, diag=FALSE) plot(g, edge.curved=FALSE, edge.width=E(g)$weight/2, layout=layout_in_circle(g, order=c("Wait list", "No treatment", "TAU", "Multimedia CBT", "Hybrid CBT", "F2F CBT", "Placebo")), vertex.size=45, vertex.color="lightgray", vertex.label.color="black", vertex.label.font=2)} ### restructure data into wide format dat <- to.wide(dat, study="study", grp="treatment", ref="TAU", grpvars=c("diff","se","n"), postfix=c("1","2")) ### compute contrasts between treatment pairs and corresponding sampling variances dat$yi <- with(dat, diff1 - diff2) dat$vi <- with(dat, se1^2 + se2^2) ### calculate the variance-covariance matrix for multitreatment studies calc.v <- function(x) { v <- matrix(x$se2[1]^2, nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) ### add contrast matrix to the dataset dat <- contrmat(dat, grp1="treatment1", grp2="treatment2") ### network meta-analysis using a contrast-based random-effects model ### by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons ### the treatment left out (TAU) becomes the reference level for the treatment comparisons res <- rma.mv(yi, V, data=dat, mods = ~ No.treatment + Wait.list + Placebo + F2F.CBT + Hybrid.CBT + Multimedia.CBT - 1, random = ~ comp | study, rho=1/2) res ### forest plot of the contrast estimates (treatments versus TAU) forest(coef(res), diag(vcov(res)), slab=sub(".", " ", names(coef(res)), fixed=TRUE), xlim=c(-5,5), alim=c(-3,3), psize=1, header="Treatment", xlab="Difference in Standardized Mean Change (compared to TAU)") ### fit random inconsistency effects model res <- rma.mv(yi, V, data=dat, mods = ~ No.treatment + Wait.list + Placebo + F2F.CBT + Hybrid.CBT + Multimedia.CBT - 1, random = list(~ comp | study, ~ comp | design), rho=1/2, phi=1/2) res } \keyword{datasets} metafor/man/cumul.Rd0000644000176200001440000001262514055736403014113 0ustar liggesusers\name{cumul} \alias{cumul} \alias{cumul.rma.uni} \alias{cumul.rma.mh} \alias{cumul.rma.peto} \title{Cumulative Meta-Analysis for 'rma' Objects} \description{ The functions repeatedly fit the specified model, adding one study at a time to the model. \loadmathjax } \usage{ cumul(x, \dots) \method{cumul}{rma.uni}(x, order, digits, transf, targs, progbar=FALSE, \dots) \method{cumul}{rma.mh}(x, order, digits, transf, targs, progbar=FALSE, \dots) \method{cumul}{rma.peto}(x, order, digits, transf, targs, progbar=FALSE, \dots) } \arguments{ \item{x}{an object of class \code{"rma.mh"}, \code{"rma.peto"}, \code{"rma.uni"}.} \item{order}{optional argument to specify a variable based on which the studies will be ordered for the cumulative meta-analysis.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the model coefficients and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ For \code{"rma.uni"} objects, the model specified via \code{x} must be a model without moderators (i.e., either a fixed- or a random-effects model). If argument \code{order} is not specified, the studies are added according to their order in the original dataset. When a variable is specified for \code{order}, the variable is assumed to be of the same length as the original dataset that was used in the model fitting. Any subsetting and removal of studies with missing values that was applied during the model fitting is also automatically applied to the variable specified via the \code{order} argument. See \sQuote{Examples}. } \value{ An object of class \code{c("list.rma","cumul.rma")}. The object is a list containing the following components: \item{estimate}{estimated (average) outcomes.} \item{se}{corresponding standard errors.} \item{zval}{corresponding test statistics.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bounds of the confidence intervals.} \item{ci.ub}{upper bounds of the confidence intervals.} \item{Q}{test statistics for the test of heterogeneity.} \item{Qp}{corresponding p-values.} \item{tau2}{estimated amount of heterogeneity (only for random-effects models).} \item{I2}{values of \mjseqn{I^2}.} \item{H2}{values of \mjseqn{H^2}.} \item{\dots}{other arguments.} When the model was fitted with \code{test="t"} or \code{test="knha"}, then \code{zval} is called \code{tval} in the object that is returned by the function. The object is formatted and printed with \code{\link{print.list.rma}}. A forest plot showing the results from the cumulative meta-analysis can be obtained with \code{\link{forest.cumul.rma}}. Alternatively, \code{\link{plot.cumul.rma}} can also be used to visualize the results. } \note{ When using the \code{transf} option, the transformation is applied to the estimated coefficients and the corresponding interval bounds. The standard errors are then set equal to \code{NA} and are omitted from the printed output. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Chalmers, T. C., & Lau, J. (1993). Meta-analytic stimulus for changes in clinical trials. \emph{Statistical Methods in Medical Research}, \bold{2}(2), 161--172. \verb{https://doi.org/10.1177/096228029300200204} Lau, J., Schmid, C. H., & Chalmers, T. C. (1995). Cumulative meta-analysis of clinical trials builds evidence for exemplary medical care. \emph{Journal of Clinical Epidemiology}, \bold{48}(1), 45--57. \verb{https://doi.org/10.1016/0895-4356(94)00106-z} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest.cumul.rma}}, \code{\link{plot.cumul.rma}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### cumulative meta-analysis (in the order of publication year) cumul(res, transf=exp, order=dat$year) ### meta-analysis of the (log) risk ratios using the Mantel-Haenszel method res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### cumulative meta-analysis cumul(res, order=dat.bcg$year) cumul(res, order=dat.bcg$year, transf=TRUE) ### meta-analysis of the (log) odds ratios using Peto's method res <- rma.mh(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### cumulative meta-analysis cumul(res, order=dat.bcg$year) cumul(res, order=dat.bcg$year, transf=TRUE) ### make first log risk ratio missing and fit model without study 2; then the ### variable specified via 'order' should still be of the same length as the ### original dataset; subsetting and removal of studies with missing values is ### automatically done by the cumul() function dat$yi[1] <- NA res <- rma(yi, vi, data=dat, subset=-2) cumul(res, transf=exp, order=dat$year) } \keyword{methods} metafor/man/print.robust.rma.Rd0000644000176200001440000000372314055736403016214 0ustar liggesusers\name{print.robust.rma} \alias{print.robust.rma} \title{Print Method for 'robust.rma' Objects} \description{ Print method for objects of class \code{"robust.rma"}. } \usage{ \method{print}{robust.rma}(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) } \arguments{ \item{x}{an object of class \code{"robust.rma"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{signif.stars}{logical to specify whether p-values should be encoded visually with \sQuote{significance stars}. Defaults to the \code{show.signif.stars} slot of \code{\link{options}}.} \item{signif.legend}{logical to specify whether the legend for the \sQuote{significance stars} should be printed. Defaults to the value for \code{signif.stars}.} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item information on the number of observed effect sizes or outcomes, number of clusters, and the number of effect sizes or outcomes per cluster. \item the results of the omnibus (Wald-type) test of the coefficients in the model (the indices of the coefficients tested are also indicated). Suppressed if the model includes only one coefficient (e.g., only an intercept, like in the fixed- and random-effects model). \item a table with the estimated coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds. } } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{robust.rma.uni}}, \code{\link{robust.rma.mv}} } \keyword{print} metafor/man/dat.bourassa1996.Rd0000644000176200001440000000730714055736403015706 0ustar liggesusers\name{dat.bourassa1996} \docType{data} \alias{dat.bourassa1996} \title{Studies on the Association between Handedness and Eye-Dominance} \description{Results from 47 studies on the association between handedness and eye-dominance. \loadmathjax} \usage{dat.bourassa1996} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{sample} \tab \code{numeric} \tab sample number \cr \bold{author} \tab \code{character} \tab (first) author \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{selection} \tab \code{character} \tab selection of subjects on the basis of eyedness or handedness \cr \bold{investigator} \tab \code{character} \tab investigator (psychologist, educationalist, or other) \cr \bold{hand_assess} \tab \code{character} \tab method to assess handedness (questionnaire or performance based) \cr \bold{eye_assess} \tab \code{character} \tab method to assess eyedness (see \sQuote{Details}) \cr \bold{mage} \tab \code{numeric} \tab mean age of sample \cr \bold{lh.le} \tab \code{numeric} \tab number of left-handed left-eyed individuals \cr \bold{lh.re} \tab \code{numeric} \tab number of left-handed right-eyed individuals \cr \bold{rh.le} \tab \code{numeric} \tab number of right-handed left-eyed individuals \cr \bold{rh.re} \tab \code{numeric} \tab number of right-handed right-eyed individuals \cr \bold{sex} \tab \code{character} \tab sex of the sample (combined, male, or female) } } \details{ The 47 studies included in this meta-analysis examined the association between handedness and eye-dominance (ocular dominance or eyedness). Results are given in terms of \mjeqn{2 \times 2}{2x2} tables, indicating the number of left-handed left-eyed, left-handed right-eyed, right-handed left-eyed, and right-handed right-eyed individuals. Note that some studies included multiple (independent) samples, so that the meta-analysis included 54 samples in total. Also, for some studies, the combined data of the males and females are further broken down into the two subgroups. In some studies, there was indication that the selection of subjects was not random with respect to handedness and/or eyedness. While this should not influence the size of the association as measured with the odds ratio, this invalidates those studies for assessing the overall percentage of left-eyed and left-handed individuals. Handedness was assessed in the individual studies either based on a questionnaire or inventory or based on task performance. Eyedness was assessed based on various methods: \code{E.1} methods are based on task performance, while \code{E.2.a} denotes assessment based on a questionnaire. The performance based methods could be further broken down into: \code{E.1.a.i} (monocular procedure with object/instrument held in one hand), \code{E.1.a.ii} (monocular procedure with object/instrument held in both hands), \code{E.1.b} (binocular procedure), \code{E.1.c} (a combination of the previous methods), and \code{E.1.d} (some other method). } \source{ Bourassa, D. C., McManus, I. C., & Bryden, M. P. (1996). Handedness and eye-dominance: A meta-analysis of their relationship. \emph{Laterality}, \bold{1}(1), 5--34. \verb{https://doi.org/10.1080/713754206} } \examples{ ### copy data into 'dat' dat <- dat.bourassa1996 ### calculate log(OR) and corresponding sampling variance with 1/2 correction dat <- escalc(measure="OR", ai=lh.le, bi=lh.re, ci=rh.le, di=rh.re, data=dat, add=1/2, to="all") dat ### overall association between handedness and eyedness res <- rma(yi, vi, data=dat, subset=sex=="combined") res predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/figures/0000755000176200001440000000000013736147230014134 5ustar liggesusersmetafor/man/figures/selmodel-beta.pdf0000644000176200001440000006440313750017350017346 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102155951) /ModDate (D:20201102155951) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 22833 /Filter /FlateDecode >> stream xluO7Ҩ ˀPU!aP,UR*W3"w5Hr弌/f__}4,?*+?}ӷ_ïo?oҿǷۯw_e_ǷYͩS_tdzSxq[jV󶚷պm~Z!O!}߲ieUmUnz[۪VjUm5nq[jVZպ{y~߲ieUmUnz[۪VjUm5nq[jVZպ{}~9?[3}W_ ~5ׂ_ ~-׃~#7 ~+F{w9Omݠ[З_η[~%W_ ~5ׂ_ ~=7~#7 ~+џzۯߥSw=nA_~%~o}W_ ~5ׂ_ ~-׃~#7 ~+xw=O]}|W_ ~%W_ ~-׃_~#7 ~3ෂ߻z^J+jkzoFoV{c<Aϧ~Ͽs==A~%W_ ~5ׂ_ ~=7~#7 ~+1]S9נoJ+ZkzofoXOGt{kз_ ~%W_ ~-ׂ_~=7~3ෂ ~z~#w}5ۯJZkFof[o}̚nA?y{]η[oJZkzofouؖ;݂~})W_ ~5W_ ~-׃_~#7 ~3෮[ύ/;݂J+jkzofoֵ[t ?\tLoJZkzofou;݂~})W_ ~5W_ ~-׃_~#7 ~3෮[/;݂J+jkzofoֵ[t ?\[з_ ~%W_ ~-׃_~=7 ~3ෂߺs{nA?{]t +jzFoV[~n}ϿS-p}ϿKЗ{nA~%W_ ~5ׂ_~=7~37 ~ϭw w {ϿS-ۯjZFof[o]=A=A_~w}W_ ~5ׂ_ ~=׃~#7 ~+k?9s5=AoJZkzofou;݃~|}9W_ ~5W_ ~-׃_~#7 ~3෮{נg}W_ ~5ׂ_ ~=׃~#7 ~+k?9s5=AoJZkzofou;݃~|}9W_ ~5W_ ~-׃_~#7 ~3෮{נ/;݃J+jkzofoֵt?{з_ ~%W_ ~-׃_~=7 ~3ෂߺs{A?{>^t+jzFoV[~n{Ͽs=p{kЗ{A~%W_ ~5ׂ_~=7~37 ~mw ?fM/}W_ ~57FtrOt|}&j8nug߭?wmw?ȇӷ_e_?Wvtzs O?~?`G_hM nѶ=_ӟw\AQ}'<_Oh\':!& 1ɝfIn7CLrcbuBL2[ 1NI.7CL2\ 1s)$g֛!&9 1v)$'֛!&9 1Iy)$y/넘z3$-֛!&/z3$M֛!&0$_Kz4Xo$b8SI7CLRcb8SIj7CLReb8SIrVuBLNI*7CLc 1IfIJ7CLGd 1Ibb{7BL栌!&Qo@lFi!&z#"3qlnYo9Ï3;_cbbF/.NU!&6{EbAݠQoXӨ7BL*XBLLޘFbbӟ!&QoXoӨ7BL*XBLWFi!&_X/Ө7BLlh::b*A1^FbbFI!4,PoFbb:E]f7BLL1Fi!&QoFi!&Qo, FIR}MQoFbbF}QoFbb!&^Ш7n\v[BbbFi!&vCbbF6QoFbRk1r^Ш7BL^4Y7Yog)Q3z#4ꍅrY[zA{bbz3Ĵכ!&fi7CLL{wCLgӵSkhnbb:ojx!&!&S6CLLAm~m 11uN&0 > :e 1a^oذ^o0Ө7BLL1aX F QoFbbF QoL L1)`-^ 11z#4611z#41)V|)Ħ57BLL1i?BL|ڔ4vUz#ĄӶBL|Z7o04&11z#Ħ7BLL1)54Fi!&6-Fbb|"oFMQoCLlڎz#4Ө7BLl`ez#4[f@FٟR-S11z#ė9U!&LRݯ!&\fy)ėaq_ F-11]*z#ĤdbbF-K11z#ĖPoFbbFI1Ĵo0ĖPoFbbF-11z#4[6~Fbbˎ7BLLM=QofnШ7BL,R/vz#ė]uFbbF-1)Yy)ĖQoFbbuPn̨7BLL1ek`i!&K!& z#4[VGbbFi`-ӣ11z#Ĥ$bb7BLL1m!&Qo6D~6E^hcFb$mA/z#4aK!&Qo6ꍍ-Ө7BLL1m#!&Qo6BLL1m+7BLl FiӨ7BLl FI!tj6Ө7BLl[FSi!&ibے7bۢ~bۦzB1mVѦQoض-=*7bbFm11z#$#Ө7BLL1mj!&QoLmF龩i!&-/FbbFmӣ11z#Ķ_!&QoQ bmzB16FݡQox_xâШ7GLL~Fi!&ֆz#4Ө7BLFIeR11zcatn,!&QoX  o{IBLm/BLQ_gbk1y΢Ш7BLL1 ai!&Vz#t ^ֶz#$bbFA11z#ڪޯM11z#ڴ 11f}ybb^oXכ!& 1`_ 11f%jx!&W'twqoWPoxަ?Qo8^cc>_8^c閨7כ!&fi7CLM^oCL7bbFq11z#4k EbbF11z#R~ 1V z#b;FbmB!&s~ߘmF#i!&z#$cӨ7BLFi!&QoX2ӥQ_gb1{)4kFbbFe11~*kFb+k 1q!&QoLԽBh:YikxgNx'SMh"4Lh\`D dBo׍DB㕫ф+i׎Baix- 4^3%="Hxa;,#4nkC%#;"HO@u4nk}Qhz=~ ?Aƀk߂V@ Z(ׇ zmmq xB㶗p5oϛ ?@㾗ͯqSиHZ h4E='44^)Mhچ sBݾ;{}ThܛĥOP%h^|@7 HZtJhbиnUD h Խ) qoN[ q+_7@ޔxNhܚy&~; 4!?nZ/ex /и5Qи5I и5EhtDn~U!ƭ %@Դij"4MLEh@EƝIWh4@d$ {Ѣv?/E0ƭ q& iSHqӸ[Sߟڝ nCoiTEhܚrp4^@`M7/ZT#4^&L~~7_৐!@4#HB#dи54~b[LO qoz&1;;4!;qkRи7>?AƝ]ֲ|_{ fݰ|} K| '4OHqgݏF[7@&4U ;<74/qk xƌ%hܚMA)Eh_FMޥG lx>@1;Pt_xi 4eTW"2;t7ޏ4YC@㦓 qcSqZ4n_htD~ mt[mBzB(6y@h4Iۖ Mhڡqo$$ޡ i;}cU4v?67Mȼo*4n:t 4Hh~vhtT~σƽ~ ~TAYMJmƽAmMݑ+jڰ*|X?XcPCCM ~qobiz?AÀ3z _mEï z4n OA~E/ ]mSޡqoZ4n`|OӴ@h4-AErv36MB/mи7-nc(qoZXvD>}4n:'jx?&4n:=qF AqmA)Ѐjs4nw "4nO@3 Mw%hܛWBO+@6o~C~]P_>$^7xV"\Dh<@L3t}@!7ݤ't~HC4n3.D[ق'4 -@6Oh< 4eqo=PBq4Mx cq_ϡq[Fۂ+q<2 и隨't¿/ h7t$~geEݡ}4Mx?nu7 и/ "w%=~/]xAhܛ:wh и7y;m˼zB4‰Ch4~4ehsY%h4!eW A~{ӈ 5q_Fw x|иi~eD~ E4nG@t[ hܛP{D: {SJ^е8L wh< @mи)htqSzABo˂?h4G}'Q"(~?m#иo#wh4!|iLhܛj*uD qoи!|@4n@и7~/qoU 8^s:Pvߐ?Țhܷ2Kݯ R|&Dht{@{h4@ -%4nπƽSh|^MAxиm*/cpB9 hܛ65иi>_ 4mqC#Eh<@թ,|BV{Ԡ~4n@Yܘmp@d:gjиoMa #';&T{+✚b9Ŏ/bڋ>@ڋ{dսLD{mՕV:^l= 48hF+\ހϒ ֦Vvًfɀe/L^E`g[DHd/Z^Ad]{6oFc/f^'|{qM a+Z>(@kux1>D:tZgDOZV`^K(Eb0%k5^JuP~B׋ۉd"\/&ē^KqJ+ {[%ڹ !`Eb4#Q>5p19ŘgbKgP`_S=i $V'jn@z-^퀫ۺV%9 ?"  Vů꓂z) # 2ՋqD%`@RxꥹpDG{{^$#R{;IR/s^Sd0ņ&R@[Cz1R"3֤OYhK@O/^#;-N!SvY%JX j)a5'^SD䒕X\".Pn+Ye% +rЙVxPҋOVr?e.FoI+CV唕$*~%+w#}Rh~HO92JV#$SVYI[J|s|.+~jVVM`se~E#w )i7Jr?eeUoz[۪VUm5d%y>唕d~u[#e~~i)-/r[۪VUmo~[jV㶚ռmn+oHr >V~v˧i凼mUnr[_=8me=Oʇ'&/ʶ{EԠA@+uie#} Z7he^'o/ʾ?D+ƾO@+{jslm 8y'_"}V>hn-AܾD+Ƒ$heo:\A+[sO!VllF&ZsmvZ4S@+[ەO9A+[ 'mOXeoׄ{0chCJ`YWy+Xv*:S-03]t he-u }ACHh&/9A+{ yK5heGmn%ie~M_KJ֯:&qjS׿|FFx٭ZyLI "H+[S3A+[4iZ_&jtV6Mطy&6#-H+[tW{)EZ4_I+ #V} Gl{G@j[:it˃V6G{O#ieӤVv@4rjl|whe0D'w?H+;=P7h)_t\m옰C'F6y7i#H+;ww r֑ M~l@+s"zݿ4ʄ^=SֽOzieӤ{"I+g@Z9#A+{7~ߩ4.M6ì#H+[>nZ١4}A~V۳Cr4_{P_g4ie&uArA+ultheAfF/i~@+gVn}п~UOt+he|A qAZٻ΢ͯ = ZٻU6V:A+{w>=rpR,@+e%zy@:w;R/s#+x$ie ޝzO;oO4V:ҁ}{~N+'4i[y~em3#E+ktV>4:ih#I+g'Vκ^I+띴&3# E+&rn|wʙfuDh̎3:RE+gFV>4=iWʙfhkʹBZ91 Z9gȠ3#yE+g'I+[Yiuh,rN:rfĿh̖YGԓVNVNVNVNUtd:iZ9)"5E+'v?VNlC Z9#t$!ih-Fʉ[ԑE~Y2~CG`V> mAZ9 ՠ#E+#DI+'~VN]GTVNlDwVN:rz)?kS ZY#E+'3VN:rCZ95QGr".ZS Z9N$E*Z9HnIG V>GVNI+ʇ HTIyG,ob6@7"9, rÛ Ćwe[i5а"3odxkLbx+00͗x}ΰUZxgVOVQ͸i›'ANx%b;: >'i]de\&!L+ܯ$Њn9AV<ҊUWZ "h\XVjPGB8 k&NZFg1āho[69۷y6QOB{deUnr[۪VjUmo~[jV󶚷ռm%ΗR!{pG[^V*UmUov[۪Vոm5oy[jVbW?dŮ]M7o 6lmUnz[۪VjUmo|7uD;x/?z{$zjkȾzqs 9o뜯u3f 0 _ k/75z×_:Aג+O _ | DCpLխ  w3|*~լ 1A 1+7פ/̂z;€=|=}%&)Vzt+VjRF|!6PU|zߴ}8zjN$u*I]t[׼oZ盘&7M盦bpIM\M<;[So'C7 q|N$tqJA:9#rI"M]08ŭqE7Vrp|Sg 7#TtqooE75֟on&q||S%G@W)|ϲ盪%? W}|S 7 7q 9 rZC7&q|,o&qk|;L&q|SO7{Io8Ė$qvq)sXrA|NI'>jqY|958_p{_f !߽:u w+|iÍVsn#{uMW d{79n愒:V`^z oz[Gl2 ӻ:D^a5pnܙ ū~nBFy };*w!9<%noһ'ޯ' "_ <6nWvv~nfw J+\i:x]􍽄n=HZWuo[(Pݭ$Xq<> 9@ '`]wmG@wwswmx֯,\ݝc[?@sՒN2w+`3\uU>s:\5gӁ $hݙ{Ά=Ů:;uY@rv3,,ЊTjH@k+dfdvU9cݤIZEVGn=aa# )r'T%"X⯂s !*8⯂s~U#s !*8bV}9P:vg(KD_+!i:J4, z&LI+VK4l%0"ʼnꔴ[XxN!_r}*ކ둙Vm !zd"j>d#&FI+а]V*)2x)+ѰCE+P1_* CQ=_r|sT#*x*Bn*SJ햰O!SNZ0!C_wm\IU#$GYIq;_r=eUOe%ILrJr<唕xvKVc<$)6C$)eEu\=GYIl&x.+vռm5ou[-"H!zG[^V*UmUov[۪Vո=r=#tR =H'O#c9R둰7o_U\O)HZ;zdzJ<1WknI+) wn g =V~mvKZ둴++Hr=_3xʃvˋY7 NyrO௒dV~)a5[jj=V~1_byWJu+并_cn5>+ 0<@W+W]7 @\"5ை8+E_pW& E9 [d7cnpMOU!K"B0t-'Et18"s9s(i']<1-OU![cn6r-x^ss-rL1*sТs-T*lYbJu!:b*\ e_&kn_yW. ZW :W.* eW.H W ]:_ӊgZ\A+;it'xO]Ȿ_t~0W? eHW.e BW W. eΔW& _/{_% _%'6W&v 厉WnWeW <ʔ+n> ;S_*\_#&k_&;q_I妞W v׍ y$n_^GU;_=Ju'cnfUWՆ/W w3h&]i&ufG1W?&ylWv?WU#םXjQ 3?v௛_Iug~ˀndݙ,WEUݙ;tJ_w9~lL_73nU WUWξ*N.*.&%⯻`_JUW_լEuL ⯻ NZ_7O"$ e_FuW~qnA_nGu"#+B⯛Wu*ݕ/f&=#U*w67%u7 SͣnCIuw ]k:~[z"o!_LU_ u!' uOU:1?ͽ'JB'55*IWŀ%u30zB{2I:+274~,(Xgs A-`Mmf<$T@av?3A4#Nt yX?P߬ ~Wݵy~Oߒ0Co+}V6W[r|ϛ4˦חQosğT~<{DF|?t8h<G<#4|y=?>__?W2Sy66_eX6g{ڗz6yoyă'~_]܅WywxRs(Umhty6WټI/y].|S<T> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000023198 00000 n 0000023281 00000 n 0000023404 00000 n 0000023437 00000 n 0000000212 00000 n 0000000292 00000 n 0000026132 00000 n 0000026389 00000 n 0000026486 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 26564 %%EOF metafor/man/figures/selmodel-preston-prec.png0000644000176200001440000037570713750017505021105 0ustar liggesusersPNG  IHDRC IDATxw\SW DčhqTZW AZGm몊ZlUZֽź'7$=HY&y?'{ΓXɹ!hB!PFZ;B!P0qG!B`B! !B!;B!B6wB!l&!BLB!#B!d0qG!B`B! !B!;B!B6wB!l&!BLB!#B!d0qG!B`B! !B!;B!B6wB!l&!BLB!#B!d0qG!B`B! `Z;`h@B!PMoa1qԼ9sX;B!ToLmk6mB!Ql6ڵk#B!d0qG!B`B! !B!;B!B6wB!l&!BLB!#B!d0qG!B`B! !B!;B!B6wB!l BlN<9k֬:uX;DNN΄ BCC&!=jSL9sANDEEX;VB!l&!BLB!_z #GT\>g5~xEB!ͩRN:P(LKK۽{c.]ԲeKĈB!Ҋ;MGٲeK [[r/{b#F  B!yF~ɓ'~G4MWBLva "##H 8r䈛ۋ/"##-3B!Bfdɓ'mɓ^x!V۶m-Z5 <sFB!F~5kV ϟ?}-ޯ_?$B!Bc7zyyyyy _ 4(nj2S!BYmW[FFH$NNN 5B!BlcŽL+:utXoG#T6FHN#H@ (<HE BcX -JӋRyHT[]k P/j{PO_?Gӆh4:Tӽ`񉉉͛7o׮}+yNlWy]bu#nu8q]\}VgΓ.15w1_^R'E3|vٹs/IKN7 u/Snn9P*Zo%RjBVشiӷ~q͛7oذuׯ[޽;""d޽;77KAEh>rȖ-[Պu233M6hР5kDGG?xӳaÆqqqq߾}DڊO"?&Ni5!=rhؠg.FaeBo7QnSWWnS ; tuv.A=>)t~POm;Tx:$]IUjϋB>}t>v 8ٳg7oԩ't| /cǎ55N:ݻ۷o߱cG8-:9sy'pBrr㋷OVQ#7LLSH8Γ3NXxz%y; z1 gJEN?|R%2O77VS2,B!dҶmݻw?~Ĉ999p5jdoM53` _z~h!!!AݻwoȐ!RٹO>WjE"## Xx` srr*~#L&;wn׮]b׀h.1ǏǍ׬Y3@ЬY3f7{ӬY/S#? ,G=SI>nz{JVBʍMDk=QUfpa#.`,8y1ubӴ^oRq {}+p50-kc_>/@A)*B:tɓ'&M޽uJLLVZqӧOwرcF.>}zΜ9=zP*%zn-~Z۷[ljժWr̓'O8p̙߁ݻwmvϞ=O> O<駟7n+3//c & GNƫ)OqB]?p-M&Cԫ׆o !EJԙT<0Ӌ,#|vt[8ehQ ԩ&Kr_Pj%!zKz:vL&?~{guQ&B!\^Ftpss;uTϞ=jT zi۷sׯ 9x?]vvqq)9$$ܹs啒Rє:th  ϟ?С)BE;wT [ L83fժUӦMspp0u{֭['MTtƍgӦ >d2Mdɒ &`0cƌCy{{_pA.M*jw's)Ky>.!v}*Og6Q3g_i7j'rU^]vX}/&-{}"V;^}R8$GSW)gyZ5*|ՀB͛MgϞQF]ܹ7o޼۷?pR2Slؿ^֭[רQ#N9u .|K,j}Q 6&#G\`Wn߾ljtss7n\nڡ%O4)<<<)))117** ŠJ֭KMMdj?&% S^8YšLE%&%_iݽ  N&h#8fyH խGy;dN9O1﫫<6km֠DI6.Q|ӰwB!uҥ)Sԯ_ȑ.lٲ#Gرcܹ 6,qI-7BhܸM+9r`OV\pB&j9[PPPѺuW^%$$%,x/^ׯ__zHIIr nʔ)o * $zA84mw?~" sqhtJC5Ky*,Rr-Pمs~@^ .SӿZaOǏZwTy~-=Fy>%Ӡ.߿B"K.i:(($_)=p@o޼Y:q6cǖXo1bĄ 4͋/4&e{~ Cjj*\rM##qlT-ʴPUg CrzoNĩwų;)F%Ͽ$i܀*^a;,&w㯺T;O\BDz}%bzuRU[ߺdC!TQu5prr*.L֥j$YbLpwwoo,=%%%==iM ;kMә(@d2RivvvBBB"1W26,+8guY^6|ZKn2+N"Jd>Q|WQB c\a}AS*8~v6 :e=[!PCӴikJZM6-}СCi޼y 5j%juZZT5kǏh(ݻP΅gKuDt\\\\\Muar^q۶m/K\wց MNyLӗ}.L^tEN~D!&q@^f9ӲvU?x2c,A6aXd A=Tw҄=:FJ~۷exz16m\|ݻEQ*)))L&cǎ͗|Uo_DݶmEQ^^^+i߾}K.-~A6mʹGׯ__f̀1roժݻw FnΞ=vUVh4|MzzK7=LmǑCeiI *wIq7%Бz}Y-s7 sԜ }9ՋN@]Uv^xӴ/Kg+}hAڕhԾx);y~Ν u!Us̹|rBB‰'>S֭[F~k,^Boܹs׮]\ҷo߭[6nX޽{޼y?%˗/W(mڴ=~x1cFQ$AAAk֬quu:u@ ˛6mZzzzVFoN}{Uƶ9LX^iomփ.!d(8;R:)B@\݄ఝX:S33$;w6S'x дSZg3bBr4U(3f޽{>}nݺs7n杋 {zzz^~iӦD$M2EV >u;S)}gɭ[vtt'NT={v}9;;ݻo߾999ӧO7ЦM'sbOOOTzD~iB0qm> $R~\.鳯4eluLF)a%]$.kƊpAVsE\Rz`2;3=!8?^CqJ]ۋB|wgϞڵܹs/\pȑ]vU2ce>|0<<^z'$$k׮^ܯ_#GJRXq PXᵽ{~є)SڶmT*%I~N>~̚5իÇ_\.oٌ֬3^xaz;*c87݇w&*^]:4ϏiEw E%O!-_Rql A߿mLWgov-Eސ<13!ۓd=zv :0qmbO L8%36!9'#=V6ϑ=S쑱&0ه`2\ oϽ%e,ߡPHhf|~za׶E5.nܷv8!yۼ@+MUd 4@T%nQ&ޑ܁M }qtoTʨ!i]8ՙh~ؤ~m{zwZ;B^ Or]_0R1<T/>0>h*r)I"OAE)K#ו IDAT> wFe2%5tkEB$22 aÆe]v1~ɓ' 'Oed0qL>.|SK#gV} WeLwā1=EL%B]tEq0@GJ0HiXaPp|ޮw(w/U׋OA!dY .ܶm[VVVzX,V p9<ӿ^_TQq*m"W3 !v"GY^+xhIe=,1u-Gr9?ZxNZ;Bu8|Ç4h`pm`Z;d~L:ו?୽U0еqM, WA[wJc}bpsIJ|h0j-{=wAPܯ͎BvB.@%ۉ&lE5h3IY'4UJKHR'rS;M-dNS߄`ܾmg |#BwV]hQ9Qwiĉ}[.رM4k u!aϟo0/_޸qc;zܼys„ Z F:w\/^l0œ'N0͒?uTOOOӠAu4V.\X^=*f+v5ߍKySٌaj]y OMEixsL B29d˵ I:_C}uIBk8`P $#n`C$ZeX;zh4޽{_|?{ÇM}~ᇯ8T*ͽqƍ7Ο?Сkmڴ=zѣGjuHH@ H$ER5dȐǏ=~;s===M}mg4 RǏ?޿%KӅO%:tߨQ#N{ūWyyy|rÆ )))Gu#j!L |RH,j2ZogNi:*3SC:ctxBd_3\VSB,@4|b֊{1|E--9>*)0^&;ΐC)U֎,CEEE^t[nѣ=zdggر㫯 ƍ'MbFÇǍwȑ7~'|򉷷wjj ?k.Ϟ=ۣG bbb322nݺh"HII izĉ˗/J+VXbŋX4?>cƌ7mڴ}۶mAAA^jǏ?pe$ɹsz˔)S;F&Ëɧ! НzYUfTy#]c6+B2#;ׇXX&`8 $prR沟5Os~K xM+KL3eN?b]jB6Ouaʭvpӧglb 000,,lO>5\~޽{O2`0F{ɓ'U}ȑ`ӏM4/ǏZ/^Vm6rD"Y|yNN,Xٳ[N4, c o9p^ڔ?$%%c^Kwр ZP=.-u9Y[aq=xWI=GazLpؒyo=K5;,=q[4aHoǥTjPY!T%H !e$OO>xK@@WyȐ!zɓ4RhrZ%xxx 2/\_~eM8f2=K6nܸҳ4o߿G___ӃGzajp#(\q-9Gu e#݉Gv\I˭l+~#ͩ%8l&K _}ªaHj'H(FyavT:n`iO&禬 ȏ~!IBtbbo޼yԩwZbFN-Z(qaFX,Vaaavv1 ӝ6f;99ijT`n?PFheJ?EW_iT|dNC"37o`ҳVM~x2^]J>J[<yۏV>Ac 9~jKk 2Z+8q!eb]BA!Va.^v?ܳgOLLL&tRv,1+IF1//SEeeeo F@&v NWQ&pxw 3g{%ZB/!ǵ'XL}rz֊-ޠ3S:k q;BYҲe`{O7nlʕM6;Q4@ (#;]cPL>>.'k?kHBO r6k2m &&/gn˘ola?޴k7Bٳd(r իK\u;^&vE6>So^,uif4j-GA  ?6r?bqL6Q)_,~TB6lk֬Q*ݻ~1-p8QQQyNNdo߾ ˗}FT{`nW@ӋD\8g2ج#nnKPUSԻKa<ͱ5IhbB}jq},]:vXRR̙3Bv"***&&fΜ9ѣG[n]~كw{M P2'c׏ ͱ[I2oM={tk G1Pe-RLko|W` ̛0D&ųvŅYjbT!&YPn{B!4ALQω2cH\}FY5;h#Z= |Oa'˪d,^&,BZۛ\ ( lS=n glI~A#3\>N@] > =gFJguiiîSFq͓XkBano: =&kʔO\vB:5Z5\%aHo(8yu2i;8y^oshSsuݿ wjdLUkBano\2l3행ћ"o&C#ˌcVd@~YB}ƞm#sƀZ7uIE-$[!Bvw;\WFjʍf-S eSCN/p ˪AYJnVu ~c+dGN [Y#!*&v>[qWM\JYRq]}F FYAm'}h%d34QĽ:gdgܢKJv8!лw;@>ᢥ8~P -rh!ES<Ov۝~-KR]* $}*oۡ XĜ'KM_8pkׂ.kB0qC`$xwJݟ NL8n2"@"#feD$'$ &$:g2rs.?^trVέ tuEB(jȐ!'RRR~;e-[\jիW\nffɓ'8sLz]!""gϞNMMe2gϞ޽ѣGKXhk׮ɓ'#3sL v):}t;hrӧ̙ӣGSrl& 9 uP}1T#{˴D,ow)@_wwd vxʀaJ=WX9F~1v튎>{lZZZLḶGݳnjjG8f̘.aÆ]vݻw/,,իWǏ7k,<{>?%KL[\,X͛={@ iӦgϞp8ŧhAlٲE$ZO޻wo@@eJOQԹs:w`0ĉmV8~ۧ7h*nF[6XLqAsl] $csMD6q*ӛi0t\aBT ,<~H?e!Bfxxx 2xK@@T>~82"&MbIIIzԩSVѣTf4SΏ?h4'N_k.>_">߿xȑ#֭+n߾]շX> RD4oAt_j4˴oo&cPOLtw4$qjvUɐΚ>g@vےRS~{M{+עS՝Fc,kGBfаa94I_/^ׯ__Z)))h4X֭ &M:xs`Μ9:u*|PPPAZnի7lT[anIu ̳?wWcVe!h<֌3c䒀0]$.c3ߤy/\2J^ЉosLG En /?v y899`HMM+WOaa[gMa4A})q6lX===K7zyy@RRқ@%vACv>=tXb00k?9^&Gq۬e_?]f((}~۷4G(6ޑPmdq&uLT*NHH{S'O@ff_RL MO~ufa^$##Nۭ6\I[.Wv. I-%XD֮2*Uϛ [De+Agg 9xpr8v-Kvdjwk!jz>>^ i-_8 \q[^LVi]lc},/7*^}"N=!bˣ3sD~B-5.ɛPgcH\ZUSVcO!@xjuC{4U9Cf*ۧ Ë본ZkGaK̙y .7nʕ4M>}z0c S~k׮?ɓ|~ll1cLi6Tf‰'MBaPP̙3׮]g={Zo߾-Zh4N64s_ţZWgc9}TmbST+@[~ msqn ;|R}z׿/^0@"P5lذhw3v͛7;99]Pi&={vZM9rT*'N XrUƞ5* tSBFٿlǏ{xf5TƊ2:Tqlޜ]uZK>!m CVnf$לK ↓E$QΒzuBBBn8p`v7V8A TPK9ٳ̨_f{ӦM_a`V=w#x2w3t5f V3Y i0P^p,c~IcGwCcU^Y h( e :nݺϟ/ѾsN(u Bo=k0hrы&`'̶z/s)wwh y3(p[U@H޹GJ{Y=q3i0ǥ )!PMjѢEbbܹsL-F~8uꔻ{Ϟ=Uƞ1\@ O_' [DVVb0>T>{}#S/ MŨW\Y"fem:~\!Ċe!IQ-EbhBȲ̙sǧ]vL&3&&&33dnٲX;@dp1 Ӑ} IDATHUeP}c,[M}3[ޝ1UcӘI!eX W,?Dxu ;jB~]u>%MBFx .DDD4o<&&֭[F~z]W\ C ҕss+73Qn8wd&BdDHi1,x,7?~ײ1A'KW.W 2Ƶ pIՊig a` NBBCCCCCk`<\qs%+X_ ƪ<\X,a<n'KŗǑuHCkufHiX[ RlO0R?fmpBw;׊H  @RQ&"+}wdשF,K8gd:j),bFҮ,u_*ޙ"y#6 B &vGPu{6tb6pbqՖt8ngGKLg>%(toٝd|G1`^p'v,!P킉!uPqm-'ZRjV/\;"xf s6f,TXbل@Ur45sSAЧd#e ! 0qMlxAy@f=Ue8T'WҮKXN042m y[2å_rDCP#_dj2ť֎!L_{ HחO:bFK76XuLjy 9a:J}"ISqWo[b,bN{O\O;Aԧ)P UΆ=OB!d8ؿv|g uU;? qdB+[>kr/KUvoor2-4Kq\R~D$r7ײA,w)ͲBZ wHBZ 4ܟ y ZROwfH$n4&S. cHH)T9[n7;TFaWN Gtx<֎!NصkAǏv ;J#;ysXP#k2'.;nXz:xr>o%d3Bx ftn+5aaaA8::V~Ϋ7]YA;P4UagSmP#k_ Ύ k\#Rбsk!Wf an{pr-%9aO沟k !_dIk0q'$`zjR+Ǐej}\8U*LmyGv4 T_~3ȕQGXYܳ^x֞ Cأ# 0L*0,77y >>~̙:tr:tXdI^^^ H3CpKmݺ tcF S ZfݞA,/,3A ѣǡCbcc9N||' q򗅅 2̙3YYY4ƍ 8w\ 3lܸɓ'L&S<~xҥSLӧԩS SXUm8|pnݎ9+ŋm۶MMM JM/lKB;{RU˰1GkWMN:m0Œg$(;^rQ=Q]{c8,o$Ƣ+6Xi9233.]J9s󋋋W^ 6l.{eHܶmP(,**Сk|ݺuiii={LNN+))z*Ŋw|GG_~ѣUvILLrMA^^^ZZڀJJJΝiv(q7]X &s붡yk6!m;NjUygb]=g{'(!ݖ? e(fF_JDU0s9$EBAذaL& >ys$f2V^'p\ ڷo &Y< F`zP[fZ^d_|affOf0o AITtc?^6ld@ñV !!2c(Qꉬ?8Zi]ͩ+e7eK 蝕!`:ӵ#ޚ4w8o+Wb.Xz$IJuuXlYklllNZPwߎ;bcck<$/_\mmmw_sѣ;x4ReQu}/xDN^ы? v2wk w06ꈰ4=pҘܿq~#l}LM^I?S^Nة ϔ5Fg%BcT0 asWyLTt:],D"F`ggW᲎;0|޽}>}zԩիWIj'H8l_… 5bPn*p'\NZVuvHe*XceT+K Ke{|)_W$q^˞,:pա5edƳgtPTL5 IJ=b91[;1h?=; `اuv]~~F!ʪC8;::gddPKgpttaqݼysڵ111vڵkazO5'Z8::zb644,]Y$ڿ;3\̈ &2ILUi4DKe深MB+Wd/ߤ)a`gnnn#d=']G-'ZJA6“_{ŝI`]԰q\[[[H#yދ5?ŠA ߿ŋ}70BJMHg V\;fMo)(' e 0 WKe[,AOA[NU|f4oWV{>gQTɓ\}M/o0Lw2=[pV,={$ٰ,a0nnn)))qqq (Pbb\.rVVVTB|ɕJevv6annnӦM6mݻwرz~  NNN ̙3{Y/14:x(kDo>ךн<hg9W oWG$, h*@pZcgkJ+g3ps&~F[v|;J]g#cL&ϧQg}v 6oL]wQݹsT*%I۷Æ STPe7,ZUV@>}*ܕ]m[g*۷oݺ:fx999۷ nBhfIR]Yp)Q1G#p ˖glIZ+R7b0]LS#|}8;:kВ`)_( ՕZG߿tΝ;~zF|r 6mdggoP~޽{1 t钣˥Klmmw]vUӭ[Ν;WY4ߥK~ðUV;99ڞ_N0!&&Ze:tG&MYRRh֮]Yvٞ={ڴi#HJYb'M- ;tdɒo߶iFG/Ohsi4T#:;0U^JieϋXmڛuZ=?3FYϟ>L6U|?gА"ЋE[h_ /WuA]EB㷟2ǬYf͚UaG=zQFNU^tt488D!88Bz.]N:UC}uPA:bo)B-KS~h6\O)4lE@\Dmx?pؖQ G}y %.j9c(+USd.?t4F|RΠEƏ~֭ v  %ŗNA e}O f2X̥fpQ*KTգ3 K3L^t'o:mgZ8oA`Ÿaλ:mckAFiiiV*fhׯ;88 2D!ʘف4^֧ӊy']q)AjtbXFN;ZZѫT+˱5`ҿbKq 0ح49p W@j&AO?r_{h/_h3^q82Oen+D #(МoA}^Eejӕ!F3Yw$BWStxM YcAbٷo {GCCC+T#H@D8)r~P)5yfZYʼi[3Շ0]]P,8uYW5?g3w+&NSLltD+I|F$w86t҇O;v[n i Pnr@[KS~Q,qhciFmx{f6 Iw7oop& 7Ά5' #SF+S_~ 4 Tnr18h?k(֥DكlETc5}J$aBa䄝5Oklx+kLO6,jQ!3f0\$4#~.Lk?" J;cMǭL[!AALJMlRxQS)Y.mF";>Аpj^`25St.50^Hw-ppMAEN7?;A\pp0aO>7n÷mۦPT,#|ٳx<^ǎCCCo޼Y#0 oa YnLxx8ae#`ժU}577wvv9r˗a؆ jeˬtZ %&5 SՒzkMocE FE2߷XҭZs7xBٰ> 8yY#&J35  7*EE@DEEr\(FEE}駃 JۻޣGCr8'N 2dƍ#F۷o"?ew@ꏏ?ԩ֭[߿b\k+}ƻ|򰰰&xCwU?52jKJ=ZTׅFHraaQiUBH-לQ9O[*+5BGa7 ZxXn d;Krڝ>ReL;e˖9::޿_ X谰0̥K$9gΜիW =::l>::ZbwޥIJ ɓҦM/X,ݻ:T>SNٳgɒ%W^ݷo_6Duc[})Mc|@Lm-G |5+I]Kjּ^غωs9ic@rU_Rcc0s$Ȫl(6awj +w vw8Vyf޽ rs [ BذaL& >ys$fhӦ\.5`ĉp?e)))l6_~wÇ9r8ҥKꫯ̼~zv1bK@HBjˠc@=^'Łн}*  HJJ?LBS)Zʂ$'F_-bЊpɐ Kߕ $*])d,,&;ᶭTѸW+ Z'F)#ЦUdgAƢ+;;;5MqqźSVTyy޽JeLL Ucp IDAT^ݻwҼFQd5ZGZ% ppx?3slL[u~F,n1v(?tAzHW7EF~V[Zt`O|||Va0nnn)))qqqT ^&11Q.s\*q#Fܽ{Ϟ=>}@޽{z)>>>/^xW IIIIUiPn38ZʊGAn,.ȫIvJvn/\D8ͲT+zk#삇R -L(>x`?U)V_HoלA`‚C7[[Z9҂1rV-\'ѣG7nP6rɤ$.ۥK8p`JJʶm*$[l~#Foݺ+WWWWWWpqqzՙ3g\; 4(::z#G,SIAba&%ٯMÆyο]L{Nð6V+ YJDF6~R-܉9kv($l 7,)Vy]:ҋׅaiv8vĸs9ZXP6=L6xGSN-^>SkkkX~c._<M6͛78pð͛7Mgoo ~8Ud2g~O?t3gnٲ$ɨYf%Kt"%&-U@.iYrn}o>V(G-8񌻺a?ZWhؔXͻ=kƓׯÒ9=:S4=~n p<@zcb^`;qDΝJKK5 ~'aaa ,ؿmmmx:m6??p1bÇwʀ8p[377?zhhh诿믿d2jCG}D1d,@BsM5̠^\KBdآZF_z66 Ÿ:g4զ`L|݌YN&CyUcTLkAbkkknn>xరHWvcbbf̘/J۷oåKV|Nûu2aÆ-\[nRFEEڵ U&n:19rev[(Phy>afaB?J_žߊ[1[ˇ6۟8qkzѣGCBBBBB* .w vA+&-{$4l1>lx]zSܢN+L/\RTҘA莶FSo:t@T HKg#^p),eR[m4NvPr6SE+b3ՎC7N[?s 4w, Aw[JiBfr$G9JgK,>4_minU;0qV _~aY1P.n`R]&C/;\_`VRFLKFAt%# *Nְ,hvtزe0fYfnj;jFFIlogi""9{ 6WItnZg2bh"""H w 6t:$lx:Zj\iA2uzؾ'8ͭ2ˉ9O+<.q$Ⱥ5]K™ץψɁ$EW۩ bPnB( I|iAIo eW)K!eVZa2Ĺlː ~Lh_g 6{6wq,' SM?pAwL(sY ińW-S!dH2PSmCZw&_S!!n@ŇPkj `^HCZw`@Kư;w4 AC;] !?m~+M!Re5 +FQx\MeqY@2QicmAT'zhΕA:+f4sZ%im£m8RbsPdG7xLш%߮ bX&q ~tvv5kVRRR-..;wڶm;mڴ/-<>t+/m$6ԝ-$gmNۘU X.y˧[4"uY&XFD8œe~w'Hoi&+wKN\YX}ǂ H um۶d}Ν;[ٳgvz)|c:vx̙f(0̝@685f+s$-/Ki7#6a_(֐415CYG[NMJkemd b(#q_|yBBGB۷o/&OVTrT*g̘QPP0|"@xbB1o޼f \'&DK|8,,F>|H,ٳ6nXsc"\5R,XP\\6}С7ۗF@֭+S#5uL֭@6XJԢL5ڇ #{[o8 q>t]jĊ ڱ;_#P`7F+c,@z$YקU.۸[[#!HLL>|x޽ͧLc___\C>R\n3fx4mѢEsjTM9s˗Νsss`-[ڵkk\ƭ@(ְp>pQFI$?S"O8իW[7o^Fn\:x 6[1}u… ݻw+رcC櫯*))gLH %DZtC:|auat*?pu2T]׉;܃OT_JNa(Uxqztw4buvYfM<L/VbMDEEرc Opw~ǏWs_Ղ1 KLL|qlmmm6E؈ 6lؠV-[feeT@ XjU߾}͝Gyʋ3gССÒ%KMS5СCqgff ?} ԷGoW*5FFF޼yl[NNP(|CV^]ゥSN:.W;wnƍu1Oe7\dن>,<ƞp4kD(1^g%?U^OOkϭc֑ӭ@2~+W?]v[n>}:MZ @sFEE-_\Tx*ܗ=XDc%񭘔3U$g;P;wnΝx9sv]PP0yd.((ךw9;;WZ2e u -8p\.Wcƌ ˏ=z,--Iϟ.`Z8Օ^%K&??``F=^*e0^Vs^ק`ԨQ@ݻW[l$I>++FP*CW 5.Doѣ+<Էo_㈍V_JpM.}tnTc|7Rw3%r-et3 blPWqj+ R6beOzM *sTzoAL/fDtfFE%B3ܜpS]Pfúm*~ HcO/>rHhh(:tNLL\z*_?ԺÍdVv8qbС;wU*-}~ۀ'zyy YfMjjǏrh{{3gҥKt |ܨ-[V[[۝;wfee  z4jϊh4[ $>uTTTԚ5kʏS;9*X,T*+7$IԖ*sZ%^oAnL3&Vst6YYrq05ޛ]L,9r+w*@(8=(˗;÷oٯ-T$eAtݻ . FX,7|rUViS`=OR"mҥKTbBBBjӷlٲvZ~9[At955599,q/=7o޼k׮3@ff&ŒH$r… }qqqêˤL>?6mTpbK&M/KRmguuT`0:vOիW% l߾}DmSI 9d@[Bðu|/ubڍe0Y@"q\3i<,i{δ~`:9mYmfp#[7BqF$eAAATV0cƌ ͓'OhrJɩ򠳳3ؔ@VSPZbjߪT*(KPY>}SNm۞?nY;ŊŤI92~k׮988Ds禥{eWn޼Y"tޝ*]իWO4iƍL&sѢE<O.={dgŊ C?_A&p;\%LV}g^)ӄj7 #GYhFeX{Ox#U>ן7Ak$æBDWZ-4[aS#uvv+% U*U38^aNwUVtCfffNNNv%T,Y~}BJfkk[PP\CTZd]_WMS"##Lj{i#ۍ.o3''mZ[[={ѣܹsӦMׯ_/8qhV^mfffggpO^TT4j(UNLTYb#zω#H nZITPWJw,˹VrS'i#yS/~#~ a)tBA#F$UR+9IP'Ǐ_}}} !9*2,;;꛵@|||VF/^T'I211`0B!^y ,✸>TaU#]^Uj.\x (VѣG_odcc3bĈץK~;~[ťxzz.X`ȑqgd*)$}ۚۨ:">쟞J"dKLQ$9$5LsK)@ g5[r4]I8-#8iSoD󧌲;:? tr޽'OhڲjTIzYo=z֭[8rJT JxxVuvvn@yэ7ɓIII\.K.58hР۷WȔ\ wƍ;vlݺ\nݺ> ~}&MtRjlm] <<\*ٳ%$~MѪ1v$I>+|ӳ|3U=e?U6~6c@.훞5;PS焭c̎ql3Ň$I",D)_,pA \.\P6cu\TŋZt߾}l6NrACbQC;RJ<4G'fTH2S?VT{83F@wR%+Q}[)j,̾e^{X)6O]UͼMrAALL̠A(qG*ә@+5"/nCj6t/sTvG&_PPBY!*lU_+LSz!dsTI$O :]5}7b00ٳWһ<A4dȐ;D?PTDe@J/A df\g!%E 0?-,宐`=bf 4 ^)0m|6oWvךr!b?~vږЏi#U`@BsO,} 2YYfC񳭵5ITysv8Iiftgbl,A EDD$@C;R :HhI:w+'"Т;v>ѮTӃUYy&A?_dK_^ I*ӳI ,#Uc0@1 u2!(o6{w.֝lN_!VrQܔ])Za@op 'B6L=(voڥK R;#Up8 9`UMNI[r T[N1bjDIuі<)i_e(Ɲ+&Zpfk&6MP  @;RNL $@;5"ޢEw\:eVl@V,fgZcxJiBt%Ql$5jn\P$;A(qG`$Ԗ#jE_*W7 r2Ha{k5EVOgƅ6޷:4J߱{'W,y_߱  @;R5];):hAL{7ØV%43 D]wp.bP_C]XB v4<#K-W7cVjrCqdF[9AСH:28T\3P̻p,Gs,~U;V@XY*&C3{H>!0xvh"{Ti bPPT ]i8q$]M;5n*D T.}on9)$w2r*_p!#4yH}j fc/ U0SOgUXD"""0 0A7:tðYf?ޞxB'ςx(qGT{Tge=XАWj;كV[rRGeRsǎd!а!Þw8uc2Թ9_PV έ]6<<\˖-0,++ \\\0 ~gjzm۶ennnSNMJ%Çgx;v yf 6lؠV-[feeԳ,ZɉdnzΝ$Idkzzz2LK H#j1a cRLwOiǵf ձy{mIp9<hwI-CG˞QeIr{wao?˖g|躬E&76U_ }-eMx,qY^A\.6lؽ{޼ysĉ7n</d=xn:}t5.]L:ܹs2,88GZq.]͛ǏﱱNNN5#IlmmO8W_[-__=z$%%*ʄ˗ݹs)))wrK?ke ֵdo*ƣ۴zwظ;|ڵ/^ԷcLAAٳgL]v+W\re||<5aLN-͛78p`͚5.\(?|PY8qDVd[ɓm۶AQ̟?KOOOJJW4Ď D4d.ap(+ ՟<+lgj?z}ȌV /Pqgaejl; foV{6lzGbas9PØ0&''ѣGƍSTW\) I4GGqo+WV(Yb(Yn̙3+?eoeKeQeOZqGjщɼ^*=ޝt 3wd&dHdk"BĞK[VRj/mR*X"4"l~m&I&SLj3>9;YY!:=P=),6M* :AA$ eN(/0VHd^|]$N`8 0؎ȹZjpWck})p.NOa Ʋck>BQTNN۷SRRo޼iu:]NNԸM6L&S.kݩ*Az$I cRcP2*&b\&ֿc;LZ~rpF]nAcXF2qd}Ԩ/}'O7غuXc\CIIh$pɓKq %M;bFIp{3Q\3p:;EBA(JCQ xc a*ىulDj\xE>`qOWM:^>TEZv[ hF7v,b-fDz6lؕ+WFw޴4Dr^{=8n4+**j2L%%%@iA\HӄI&r[f6~y13b71A!pF.-01sٺn*&B@Z:{2٤֔ & yEZ vڵw޷~m۶XHkT Fܑf 3ֆ73Zsn>`bݫ͝G^,&ˍKh"Ϭ(y3W7v(b#@ƍ/_G%ڷooNg̘qwy'$$DTo~ĉ)))111 iRPwp>{^)SLRl^^^{6lسǻw޽{lv_p] fܑ:B~*}{#g4J=z_]a,2_7P&xiN7V .ԗ8S a"VHlt-`6Xiw W0D.&bo@A ܑ:wn0ďa`*u:Kcƛ1`~7W]-=$o#ָnZ澢fq1&JBvHD#B|=DپwA ܑ:"@CxhJF Wv laQmOpiPP]ȩ-^#bkJ7QRU?^ mxr(m.'Ң8Az@wB4w#= ,1bʖi tB2/f:kwE4s*W%\s6RP)U %6Aѣ-Z`0Ym۶UV&M̬/*H,XЧO>9tГ'OR̞\paܸqZ HJJ0,228;;cvS+F~g J%DGGcq֭QFD"P8hРk^X#@ϸqw 'btEJT%-B Vaj~yiKOq wttq2ri%i6l"N/.\^_lN%LQF}7//ѣGA7nԩӺu뒒lvqqS 6{l鿽"##|~=-Zlڴ {=ٳgc{nWԙ3gw~F#JϜ9A{H=' O5fpp[3gX,NJJH$Rt׮]l6;999../b-h1`=B5w`0"^b^0ްۃ =EM AӋYz? g8"q"M/_ω$K~ϓ3*8n;m^JKKcڵ?>l߾=++kРA{rcbb6l9tRXpyx<a۷OHH`z;Kkرqݺuo>s̾}WZU#xtܹ^zNsNXnT* X#ӉEgmhtLF%h4n(8qδx{g83Gg!M+:TQ|eZwqPb%bxԨQՏ^' &&W>}:I999z>>>̙SWWI&U?RkÆ Fqԩ|a{r5;v#&LH$7nܨK4$R?Hr/(Ver]Yv6BrU!n`EwSϖ}_`8fl 8se\gk.=&ĶDi2ZcLل4AAA58 < IDATG5klR^ //`0F77tر:t9ٳwF:w㬬?4Ucpte|N֟,gKrb(TKvK4YAۅӗ+Mh15r"?\x*Sw]A6WqىDVk_Og6ArqqyY/_~5r.Y:^?`1czq-= i "Q{*ɤZScP5x0܄z&I~w聑ø`ՅY`(vKJ_lrˏzD4Assj7Qa`<+N]>.ZQ|W?b&ʨ^PKv+{yƎ98r=qȑ#?JRHovaa!EQ&?@_Fi&Ba2>|صkW6 uoM&j T)GDDL9r$=k׮͛7FR8Xº]rĉk;w̚5wڒL.]]aL B8 s&;.y&R(&G*\pa[xVEk᙭  **l̙SL/ݻˍFܹskPPPAAU3ui-666''gĉC Zr걽O<ܹϟ:uZdx Zj=>dȐZofC셙\}YYYX,^~q9l+t$LW޷^G=(oH6)UՏ?/-[\zGT_3Z}& e'mH'v r0KUYpѪӸ!.SF2s)/E[=zk߽{7< <9$ Ν;׶-]O(I&P2)8}D g5Lk:P4Rw#?4\iىDpះ$ 3J+z["񝅳?Yp<\0ЗݿTZ >nqXeE 9| N:Ej4׮]"̛`IIIPP#582C=uDtD^GlWn#4PapפH~܇xсh㯶d6gdy} taNc/7ƞ4I*%N2p8111)))eeer֭[?S׮]ѣG)FH#23p6l>\je"qԄҊ2 FrC5l SR- %We}_⟝vniq]:?+H vBe˖i毿=zX, rqq߬!Dy3"(5 T5 wepHAZf?*#;vZ&c++%N8>[J1w\?*)#OH'VYb֍ֿ4;OqP~.Y2!"3_uυ-OgR˘Yʿt&[kHڅAL-P s؂+>&;}??Ts`EZlC;vb$#SOU`ر}\DFi.̼W9r6q 3Iƫ gq`qSK'3z9eysy\!Q5z1wy{ e;ϒ+VY-,b"R,ͽ2n/+Ci Ѳ4w VduUQ4oFg##+_O<3I 5Q(U+GrLX;|BG;2TNϖye0*%}H? 4#-TEbȭ\,<҇ >PYWp_BHңg=|tXN?doW*h^*ݪ!M0ԟstkBQeG.٨bDiZ4psΈ#|{.]<==zkLJ4e@09/SPOݘ6H)= HHbH "kv8R_1s#YxP>KvV3|gdzSz}]֝G@i,yN?P(HfJԩS:uںu D,t٪ ի%K@P=DK(^z4ZX9[GKpq fsDc|کHī>aEJʭ!سgaSLi@?f>)Ϟ=b OVsrrJNJQ&V OU-E! FSнa#OWPTTa’lŬ03 КcDDi< wyb"A}Xʜ9s0 sr/rŊjo/b Om88ڵ+66Vӭ["MRI+>V݆6&(hM p؇pPzH-*5mb6i[SwvA-{#H#/?sP{afNg/\ֳ-4*s 2[|+#:=ኡl Uð;Qm^=+kҔ 6~,5){^_ egCZ|9=j L9ǏD"MX\\\D"QvvCFLd*#֪ݍiM)ԝH& eE7cѳ.>$[pLe[;$I5ivnmP"+&["?s}̖A.*****鴃 3㞀Zҧb7BI]kw7؟@bZ#u|Ν;1 m+47rÿSK<d |w]hO,uHp+دv9rJK/I /&}[zn^rnibbb(:ujIIIIIIEEpŊw֭[s̡(jR<55Cs}A}YNNN=+++O6}K.̙3Ow޽Z+dddL>]Ѭ^Z"䄇WVVN6/ bsfAAA;v`٧Npuue2'Od|Mnl+dE! Ie-0<g-Q =t?RIwe+֎Dbj) O_nQ;t]e<Ɓ!ϊ+jw)y͚5ӦM(jҥe .7o0} ^v0 BBBҥK ìY/^޿ IR)J{M4)==}ڴi~~~AF |}}Nѣ3f J c`lZs#Έ*[d "!.*V"gig\*cS{ݛGh}פN]lRiN`F?,EQz>>>̙SWWI&q ȉ'r6mtXEQ'O|wHyŋK$/ZȄ3Ueh-[ܹs'bwww&i{BܵW?P>= n.hL C^U8hB"#A΂ Zюq^wf(X:h}Ghogk:TeR}+M4oζbk#ӚjMӓ50BxO,˾ 5..5F8.sss}zvv+** +z}j)n4lo,OG!ZjIN^/=ر |M.]pFgB"j.".FX' dfN"ݻwﳛm۶avEkEء.,0&gmV`t~}2ږH>~&Xеwu/%" rۥ?ܔ^Zyƽ#v́b8\{|]N<27@¨78ؔǏ[=UT([!0^ p< l*m1(U.24_k#}XBn4hEa,?< +ޏ>!:!2_ʮKwn\hJP+^.q<7.i! +4_k61 ȸu֨QD"P(4hZmSRR}P;N8ܹr 0 [jU[BCC1 +Ν;1 kӦMD`>}|OOϡC:[ n <==ׯ_o2֮]ۭ[7Ċ !2gurdب,QENOˎ74ӽF!ymHktLc +"fOM;.tT7gΜ޽#G4T*=sLlllDDRfΝݻw߳g۷\njj"##W\I_Ud;w@^^Ǐ_pLƍ:uZn]RR...>u԰afϞmzfܹs*++2 M9ɓ'߿ܸq**33S&$9`O>AyZ54`l[.+3^ͯ9#4"K to"\+Zo w<b]"ԛ(ʨ4:[9<}o̙#$T*ݵkNNN/ˋ(jԩ%%%%%% .+Vܽ{'''WM'''L& :EQ]Վ?>''筷*((())dqqql6{֭{Cm6k֬?ok4:=8tL&+,,vZvvJ:sLΨ8R;:ͽD2]$BKT-qd'_]7G7%k~7mT;cd/OzNJ1)iY0LΝիA<oԩ;wuIRXbZ>>}2ddffٳ=""l[|3RGIw yqMjy467yH-4v,m@_uܝG߳$=㎖'>;ӥKG3t:=z qY6mL\./--ܵZmJJJ탃G5k6b d2 M;EQ111ӧOeX͛7}]TRyz]j6Os0l8t_e~_8SMJ ,trpiM^J_>fc}WwM$՘.M>QBCw FlEܒHKkp[ Z rˍi O|y(ZݓL! EϗPTTnnn8ƊʙL|>W^:իt{ŋsrrrrr`t1ZUJu!6`fm6Xv~X aCݿ?lڴɪ!"v5 4 #e|g !nL8&- qC@vE&ݛּ+Zo>vxJcǂ%JW\L1?UU&̃i8ѮM:=`ZZ>tLqfNO]R7uZdy&$٪U+Thx<=pj2wKO޽EedddddtW۷ >}zg 4Ax3@p{]TPmKs&Zv[i)R]bl̽oQyAb e[.Z{_T Pac |~ɓ3fXzD"Yfͮ]0 [fMUS@gff8qbљ3.11qk׮(̙3SLYfY/3䢢"Zw KKKΩte~SrLNNQͫЗ0mzĆpW'!I4& ubX8wiL)Ngސ&*֓ MK,^ImJ}^ 䞡.6IwP]zi:wRF# p<޽vUT:;;<̙3[lAy2&33sݵk޽{KJJf͚R ;;;55uÆ /^tuuwCg7k0xM{env̴24U[?nfB%?}I@ ܝ&5鰱ׅ9a>/ѱu/uWRcz 7.13̡+*ڷo_ӽ{ݻmj„ &,ebx/h3::R}uq:uꔟӧJ1bŋ~ZhFOK@*IjYwpdΛ4V&$Y4u|+f e%;`Z>[3ⷲQwAa~^z__~"gϞK.MOO0`DkldSF( \u20:vQ<4ˍK#К4ʇ(s[vGɆ:$u1rn+dRhUAa~,k޼yϟ%%%W\/|C=㮢d=;ؐ@ݭ8: c(Kʷ/l]0.{ 35jtjn|-!!҇}|}P; :LZ". Ϣ7Ól%cW>^sVr!ȫ( ٳ U76Xa`ҕ& g2Pwf!A}UGZja1ǁ}0 pø*I`7Jp%m+J>v!/ l1%W]ȍZ8ߎ/G6vRŚ;]^yXC ##3F(CŽ`~cЖ"Iz,]D%A,LyD&Onq\uYuЪ:jk'3J> bX}r{ AΛUԠ;jjw(q X7 PKH'm{tkd*=u,]f; =/IueIwmqu14T0LƎKyA0ue/;҇u6/؀eRedd &NWP$*n>G@ ܭA=@q.X)UJod Mp{,4g!(S.\~6O=21T˜HN0qPK44guNQǏO֌ inXP{?{{Mrov a ,@QܻUGV:jVVm]uWq[ (" d#-RFIy $ɗs{x[eV?ceE58 ?QώvVFbYLk<^ s8"|n&զ\6׍ öT|Ibiv}͛7֭۠A]v}с@çdpY`Lr7^x.kHG'8_ 6|hiy:dn<:p2XV6|01r_?8_&0g9)Y1cƔ)Sn޼I4 Ì^xW^qqBt`hIJ2I/@4&`>E58 ?ae$!*}RUjs#0͘O%:H?͗fNOy_~͙]O?d09)wW_1 _k4l]rKJ~$ق/r4vAßZmUi=$%ɴ$Km_ux %1:99Ap9n)cCtoJGHb3aIW_'|2m4. n:XtK#b!6:!EՅ8A QP> q\8I6ū/5ڍ&90 DN /R5eʔ߿H$t a#6 j̖H\B\B:?ՋЊ}OBy\{ŮeϟSj*F\?J\R2UU: aIj붶EϯpA0פ4`eY  -\ؐ/7$@ԵvODdtnn usq .rϬU`YmڴQ* >ZRRT*[j`DK8Z5H0}֬Y~4hݻz=\~}ΝzZnBx7zidA-#ڢa).ӝrӿ2 JNF4VfA0 Lp`apoڴݻ###;xcǎ@ddaÎ9~`Dż^[G #5,[TUYKp;&@=cfJψշp{{yoPg<k2oA0ڥKŋ+44:wo? V̙Ethhĉ ˷nڧO\߫W~uQ{ "jkmb%^M JAXnpy[oI -'K(*60ڊw"b S.Ys~?1 L wi^;t۷-Kyy'}M۷_xqaa!ǻs/r IDAT߶mۋ,˾+#G{,yg}6>>sγfͺzBuu͛ ضm[JJ I111[l ۶m}l6r{N$q8ӧO?I֨jEBAIIIO޼yqs;iqFf o/rĉ/((rS?'O캈eeeТEzWEEEtCPj:h2jP. = ۷U;0 k;\gWﺿ?*))}W&>}\=}L .WN*xUL oQuE?13.CPǪ aފta}CPg<EƮ՗!'dU;!S=''NZcƌ{}p 僿VY^Q*M6mkM6lBP:tСC6mڳgM?D};N{!:2.](̚sL80 _n_;1,aD:f!3הwݐy:w*Qn Io{*+˝r͞6beg5\5</x@BP s{aa@ w͛7]r{QӮ_҈Ao]Kf=T*aÆ'xbٲeqqqfy۶mӧOOOOj{۶mr|C;76zph8jOŒЖE%(u cvz^UH &0 sL a :'PGƸXn-=Yi0_faU; ºkjju4((ՎgF]2Lܹӱ0رcV >䓷z'+ Ցn:jO.Wo (/tի O9:\l"##N^QʋzN=\~Ƽǟs&Cs8PF6<;kq~}௉u9>_똁vspHHpp:V' 9^5, ;$`\~[1oEc 6)6xXzZ&u |`CBNvm[|Egc71x/;t /LI&I$Ti&5kfӧO޽lݻK逸bɓuu:ݶm# W7%--nsN MYZUSŝ9Gnۤ!3ט [<βMSZ뚍 b3@kEsІYU~wi١€/c\?[,FQ̙3_}Uǣ[bfKJJZn#Jҧzf51ӽfĉEEEmڴСCg~oΝ;ktҥ[n aY`0\rܹgsX q .t,햑ҤcgOq HPuVYYoe?]gӢ!&)ht鈾&3  M {d $IΟ?&33uֲeȿFѶmQF}wgΜqt%Kdff6i$>>^.oٲE&[nlٲEw lҤɓ'[l'Mf0^y啧z {qzhJ̚sho) ˅2~"',UY KPyDcL8$"CA 8z8gGqZ0HnpI:tS='s86mӧ^iNlϻzd-\~^{Yf bĉ.\hBfͲf͚i4GqСKv:4\1[R>ƪF'1: +۵xZVnZ+kw2{>lzq[q0ò'm6NݩZO: Ck J/~l.))Yvm}Nii)˲_~er|ɒ%Fp֭zrSnޒiCvɧ,5FR #ӕ4/Iݔ]HҚ_wK|C˼8{Tϝ=Y: k6ꛍҭ)a ASv ՊL0|] RNLm$悕Ey!F`}ކwOAFlsMi! 5rX妒lMy3R,?9XjNlA. ׫>77_ \c(9 ;VLؖ`rFR@P.$HM5 SV}I8?bFR߫܊:AMH禝iOR$klSy~' dpPr  P7rрBA~:\T:XYvNc yv\v5Eɞ,ћzK8DGYJ_eN϶[j};>O) ,i3w *Ǖ+oX@ ΏDM򷎪K} ˗j?xbQymV2?x;E3-U>ϥ]rV{Ң{Sm\D%6eO]疛J)lebkjk;%o~ij\ &>G*q0 {jjѣk?LNN~7 k\)*?DI x@c4EԘ; 42ijn T#3^ %Y g0\CC ~ m՚ꍿDswJM}a>e˖T:.aaa 5]&ߢ=n4͓Q]tJ+]Ə!D3\5xY?6A0um޼RTW1yqrR GVTD Z!C4Yj=ޝPgqϵ>ת®O6!IaH[>|իnjS{e۶m۶mwa=o E:3w%9LކGf)[*6s\5,gKCːtdim:k/:IP̉S hOdX N<[U79?^ҡD=:pCnW{Lw>bne)* r_ZYZ: =(K ip;X_qşyA9rݺuM5. A$ӼSF }V͹vF`ϵ\(3n,ق2W'ѳQ!Wfg`fa~*-RSq8G5xQԉ0y|+?)s.R1~Նa9s' ֘97$ t6m.u(H๐%ف(]ݣWث7l– UH֚}`hX+fq姫Xל`6쥿}>-S11 sR/[l„ fYwM(XmՄ?.֜A $t~ %} >c-WӀHDe$ٜk{ňqB&΂!^6zeN]@U ?:Uu]ٗ絨`c@ò &HRRRBCCb\k$bFS=\9:3  J}wu'Չcm6t. aAO7e!BC ;%[{!|X#fvY >r1 w}7))iΝZㅇ3 vޝ_!%N 2PfSbt2 TU#8~=UY:Ju#,Mw&S~RA8䝮c؃s } ,;eʔPTTrss'MIJٳ333ݓm]MLW>.Y}M$݉#Vz.ub4Qp;ƛjk݄:=)MTWϔyN 5kWZO$$z9sO?I1_יI^0Y3}/A𢻋\dMZݬ,;b\Y6n\~1|Vg^ͺe:@^|C.ucunWavո*-޺y''}ܹ >:o<+c!xNLɒT˭QJ *nt-qΔX[r?Hy,Շ AH|=)g쑙mUOWi@1t3%k^39rR߸q# @*m? .5FLV|J2â24؍x.\Ա~U}ov Pu7Wl a̝fXnv-O7/):= '{ttRT* >x(&&ƨ+CUN ja8΅)Gok.&ۅk6^tDZUu0͸Hє(ItYsu.P1շ9enJ$ŢEF cY{1'^j+Ҥ–p'ќ~-0wݻBr w޻vbfŊɊ5'&%h!j ??ɑEl6"""&M7uT7|f[&欝&89s!]Nܯ+%fz勉<}zyyRuvAvjYn(̗&JAK,%>/.WZKj:P6fF<<tXXXXZZZAA`u`(,,\zuӦM]kl)*S;iG \zυtÍqB?$KmYFw;\yU[F _rU-CQj*-`raaa\.c(1ɏ+G@4PcH]B7J"bMf՗7Q\LU aY\kz[4EB% syPKfsV -nA2dXowVEYd9O m soݪAUO nw|.1OԍdHnZY喌R3,O<;)&s[t_RrEeZk>3`B|VDrZG*@R IۇF2ο\c;@~li+7 +esm0Mc.$E\3ҡ@Mex<4V|J!aMWdCy$:2?q (&. udN6y^;'Oʭyq0߄ wCup k IDAT`"HvOjsAQLxH>#4{zݢdhcj$A ՏoD: Z=? BQ| e:ۧcu8>n޼#|7n(VWwAd6Wm_Xu4.I*=v|I/٬-W %EEE$I<<4O@z=n4 % )Φ>Cu?K|}A6ٷIUvj MEaj~d34 ;c5?$]+Y*DD^vx@CqԿu{n޼ /@ӦM/^w?yv}ɷoFqB5)j-i-'ElވAu8:ˣi>_ZmԳD;Ⱥ2vֶ_cgfq00[}6݃^20~8agƷ{awĉkv5//^ٳgLLLnf̘q՞={{?3 i4I;mG {UY2H>#4{x]{ ~F\! [,2! Ds Icp+VUQA)@@ڄ-: ܱGdk֬a2y<^ZZ,YEF'j=ln%N摴f֜E89S(f5"̝${[ iY{EPG(8ݙnې5^?Lsb([zY[CpRgggKJ.]rA0 `rDL+q2SBo&[kUVY|g${fo3߼:G)bl`؀ͤwol;fp9r/ |Ippp-zѢEO?^}aü0̋p+C[d֜O!%`eن_g]t?쥝uݶƔ)Gg6⽪H)02xlAѤI͛sl_eRb+Ykr!Hn0N@RuǕbI h3Q07EG}Veqzch3YYYÆ H$AAA)))bxYYY·aЃhs7@[iG 6]Ƴ[p;/ݙ|ӽ)zI"B͉zonԢW#A{̬;s>V}k]7T:lnRRΝ;Z- gF޽;))/pCJ0Pd^Z%Y0ΩOUD!.‡1ɟ>9+j)~hagOX>f2ḿ~m7Jԉ<4w^3!p?p… Y2eJNN`(**t&MbYvٙ87]͘R $ti~ѽ5**U6|2=PV§5y/&!a@5;Β֙FIf?UI$cccW^=g aCu[I:ֳH&0J$A2ӽfOo*"5,NJđmWVuIGS9:9:wc»uתkJnO%mN wGܹs|t޼yW170pdR_ V@Qq|M:<-H>#4{Ңr Z,oUV"->M2rn/<{f3qQx+Y؜ ˜Ds!'7Ri\~0o:34C6F:@O-#ϴ0&^p Xh7j:cӝaUMH.Qg0G)9:,VNJRlQC111.aDtßC! t6m6u&(Beo;܁3ҡfQZ:cӏϜ yïu^:Ս+]E{tٷuo.#;?1xPy-gK̾Iវ ,`k,.X]ŧdqe<[&P@f`m/x`R,dMf܆!՞S^+^t통\vcO;Pg2"AyPOo]5xsŮ /̟?_  2$==]R,RӇFĴiܓkz@m_0 E桳e`rVeM/‡1g@^ZtKc3ރ$tR$@Tp?)nW;Pu tzj KKK+((0 n2 WnڴaX] \ZoXP$&O-]2aLn"؝}u9El:ZN{zL *nRk<{X*++S*ܫanВuLs,u% Jgh.րmp&A.WQy^J>},^<ݸgVQ~y1 >zɺ_iwYYYÆ H$AAA)))bx0mC!I%ĩaCBf5YxM$Q21k; )biAU+>׭eWjNr!{ ~{U6l=wMJJڹsVx tݻw'%%}nHam&(p}B!lA ht9p†{:&/|jQܗ:^ f-߃>-9=֖=MhI~ ,;eʔPTTrss'MIJٳ33=n:24 lG &h'DϮ@cb/Qgi,Q2 kTuʵXF)i K8e{3YC*"H|ȁ8 K/Z~7z5'k֬Z*>>$I I266vs1͟~;b0љ hG JOq BdÕ;<~=xZQ: ]0s̖EUj\?+eW-"q0'H ~F˦%mpI"XGz'}ܹ >:o<+3phtpt\9뙳&pIBoçֹ__Z_B Vsxo٣t\宽%8R wgql,1<>5'7Ri\~0z @kgϘVڑKvŚ5 HH a`u k=\:p b¯7QP w<IQ%)@{8zgw^?~{E'k&>'JRT6㡘ð{'D FS֒pdb(|oڪً: h&|GŊZΌx_݋Vtb))7*ycF18>Ky)<[8)ܓ`β 11E0^2 Ff%}c9EO.G:?Vy3ǧf=mY{"^"E[_Qq >]c/& wx9)ϟ/҆ RXUTCMKK#bڴiɊa{hMIZΫOְDp|V*QN@9kUoك:4r-x2,ٚfQuu잘E R(ɹ:Iz7kmT~ؚg0 poٽ{w޽ U({޵k0+VHMMuOV Dr9W tvt%r`^twCIGѳ;C:Y[Vq҈;P~3"MlhР9SI4RZ3YnXn}J|?'OIQf(*""bҤIyyySNuCJ nLJd Wz$PZuB3zsXtPQ\e_2{;JGdxY^u/#C* vj4 ,,,--`0ܺu`0^iӦ·aқarjVd!%'PGk͙HuƂ(;qruy3Ȳ xa<^)2l)ta*7oN]kp S=(w#7MDIស߹s 'p!чl3L*ݏ$cE*LAONELl-?4觷h|h;0߼SywQX0/~uMw%AG!{(QTNQgiftY_e؄:pR[3gΌ!|'Bбl8R( OY3 ¯33%ډPiB:f3Y[Ƶ9ls-V)0FԽhe-FQJ$0 tHBDϋw̟toJ?/8y?~8Ijժ{a؃D7>Wo8h0 _}%7 H ϶>[7%Q5 ŽIfw/8nudiN,W(ǵk|V0_ݿg$)8X:=)Na<_j>_jvQXBT*pՎy>|NM:f& <̔m 27]ѣhl`Пdʿ:[-If{uEV[;]eVէ{W|kÃ{hZȟOZ28)j{`#\0&uX= >9:tV;4 5Cŭxܴ(.'6Hf4_0>E%6ٍf8C?\⣅QTg כpIi)SI>~c|sQWnON"-c#i?-%?EU$lSd0-U" vWoT=zt+lᡡ-Z#lŒv_}܅_ksC#=tE; փ7}34~O)}w1!3:6~q/\Z)XUG,D 5QxzP1ud|/W Y _/bdwd`td]7l̚e]PlX?W3PiEnônjaW~M,c1Xlʥvw wVWwoy"`#C XmjNu..I 󨦏NܪMt }W}֭Hr`ؿїϜ7oII!%j%n{̩[dѡ"r˩bsP3{)ys>㲠]c5Dߥ7Z,QVRPdij+I1$A(՟Δ>oxoaf1Ɵq1_Xp1bDa/5J܎@'kdi2j~6!wuS?8ON0\ȩmFݕ^SU]Xv/V w1WML7[D$޺ߦRFR-(V[;UlVi-sxc$C_eT@͉imEpؔYwл߸!@u\D \_ӂϟōJMwPiLjwsv=:??6`Xz {ܨ+7pL&y͛7{U7b=g- eKRV8Uub0tԽ)R\a:C?q ^d}kw SQ < )WBpU{5ݕe-'@b‡zHYuk@96A0%Q$SqU? IDAThU0ҏ_[mWGEٿ`:u{V7yDz7==X6rT{{{qгfwף>P`:Wh C{t݅r}9-ČD.@0sLر#ཪr"B(c ^PdQLSPm]yA^Pad݉ .m QW͖g+0wD;ivN[Y wBPz9>E;F8 ِ?Sb(np_lY㽪w1msάg쉈.DkOi)J( -E_VUm}-P!Xk_,3}GZRX2sdϧ9L~O2s>}] -qX,JDFqG񆣚ae- ?ٯݙgR- җCRMG:.mC'#RT:TBB;/M䯄xMEN2dK; L…G }K.^zVպ;;7gٕN`ve"u'svV8%;N@0Nh(MvmtA^bBS+dȑ/,+Vx(qQ,cd +g6;pxC͹ZhGzt4{v;Fl o6㢑U W՟(Ffh2-O4 60UZ|ݽyQYaECȓ [lꫯΞ Ns4/7‚ˮdf*_w@E%Q\5|^LwRta!B*ZjVY$PQvT(;"(K.8KIgvxuG .]pJGWܗ/_^A|0:,_{wrJwK`c >p^samcc.^qjEA7:?V_@T0U#C hZ{RbэC1 hlA?ј #"kXBD6z?0^v4Gm/!RqDݟYm>݉FQ#EBtR^y  ~5ֲDKϭKwe8xj}m7iu՘YW(?|pFF1I<VWJJڒ=HiLZk 2繍f}tgF75SѢ{z5>O&qj'qHUυ}ߕMwe8,!zCk "U,^lYLLL5:tгgOk޼+P2쎻NWٕ&d`D} &B$&~~5k"7գ:2%C#58PHvomKwԒf^L0~qƍ3Ν;\.xӣF?~"i[t1#hSj 9k HXulu*Fܧ3.-Vֿ/H%C(v΂%ܧ:?ۺxyS m3p);<vZx1駟 Cb?[vܩP(.\p|1?lGNmoҖٌl$1ͽ -N%}!㠳TKw|. *Emq-E˛1wSavQ?V5,HP/^Ν;Ob ,jDy^I\[&Cobr Ɂ}Gܳ!Rns&YHM6 RO- SaV C~Y ƌSgSRRBŋ~= @͹\SQm/'u96H@0՝v-aK_Ƭ"q8zĬ_dfMLҺ"8;- 8 w(om/0>Rw8ĿQ,[,s,.)1=9VeM'Ztavri7;?֦7y1SK< N<˙K _wq*CިQ#RYTTTg Je  A^ʃ2-C&VpX̀o6XXŽ沙,cA=xo4sQ2:2{ǏR]pWn)L Wsk[b!qpOHHɓ'?I&@-A^Xs.72.kTboQLCъ}Hر;* (J5~$8:[8Kls[wlZU3q& r]s! bUz왞n6֭[;vHJJJKK >}O"sx[fw2EM` jBzK`J{fC8. mnѝƿ0;3W[1o.6L ԡ;NFU[>GԵYpVZzzzlllfffjjC 66O> YfMXXO"N^n )9}nG uYͅVt|Gк '64h1l ' tAWwiנ^寫,HHw$xڮ]˗/ϟ??)))22۶m;}ׯ'''{?$\N0A<[ Qzxlhl!Muh/az!r>4R9iL2vj{4|C9|o.sy.N8q߾}$''g̙b @a 6 .$` Zt-~@O)n;LK !%_,X{}|'Η|L)n ^p7m#bxΝ;Wr($=H>\;7 ' N=mɞ;MYt_v>`q|Y6U݈jwH|V188U[;^@QuLh{Ϟ= [I&5n8;;s!Hehd;LZ@f?}ўUXA>ŭ]l4vm XR_ q#߮ ^Bs*NC?{Oҁ$c<޽N9Emܸ1>>~޼yN[nL /H&-9e;̈́l-@:7`xY~qyFb 3ppN~yӯ.B搨_frjDѝ<;w>rHn4Mٕnݺ <޽{QQQ7n~NyA=I>:yGwܷ޽aJ[gNwcaݐN9Вd)@(Le/6 uǾȝ^8Lq9! -h׌[8=z~{C۷o?5fCJғ/v,$g2:ʻVgݶ[bqt{64nbQ\2i,;Njsm0얏@w°7쎿,֎|/ʒ88w{Ѻ{;w-ta2 H;REqq8T0N*)SL2pgyM6Ur.l,)sfeq\$OQ77(|7h@Jo_qٰ^%~3hǫˀ~éu?,,TqUKvh%mx9_8̄xq.FwI*(u:XVؼuBTfc~FQ Db ٥G2p1MiLz{9G&![5֎]פm#0WruxdU`/v["yѽCUgB.ڑa ?Zn [nu"$vں[HIАQ|13r7 ۰idC}d#}{|<٪݁A˃,UU^Tl_lFw[p!N0Nƞ_@6o"OQA?~0 x&s½•6&sPO>7 V^4i%g'7Y'٢:U$XNw ċ3w-y\0嶫7Vƽω [NBBB-*% @*Iv?'ֲD_I[$`o5@%sEw"{`ȸ8Yď4i5 f*YlZ'rWub/[:uԩSA| aE'E ܳ޹nBS6 [Otg ,ab+ЬIta$V U9`q:"ۀG+c͛vp`O2 E0 yt , NX60jѝda4+Z)Trutga e8FYz\{VJ;*{Irqtk"L?'abpGNoǭ6E&)#doX=2\H8¿ tg ,`{"qta$?ͲR{*uIwċ&ܷޥ;cG#BN/T#=+q.B'8lϋK`4k<-~|JJ .MfLr_b(J[ R%8Yu~7S@)iҶn2T#aI>K!紗w z;X\,8}  $@qЩy bsXpfxHs.,/D }7_3NAc0^UmoJ ~H *-sx};^r^Ϙ#[  85RvN0âYt%Fm7 J{񪂟6A%$cf|Oн)H@jTc!&ONOǯ`SEw8lT0xv*m<aݎ}?N{8 f^E%N ~ AgC7|TF`,#\n͈9>]#ꗙh*R½nݺH͚5k˖-AAAɟ}Y\\>^oذaiiшZ_/z}ޢF Np]]94 >չ86S'>%sX\^5Cw bM]`EK=`N9{&<{p?'`~UȿUFRlr7op8_refΞ=O5iҤ;wx#$,Yl7g e}!sn&uNٳn[=>Ddz5@z/a˃Y,+EJccl۞5=%k<~  -ܕ/)'Lp'Otk׮uQ2|xYYYK,^NYz b OfcxLˤ: j\Fգo^fyto0XAQ ߕ΂І/|?,rD/?z=^3K-܃^W#jڍ7bm۶-Z8^VM6^vm۶m<a^0KO0۝m%n%kY]4E{A$p ۯ>ŭY]ж)h8L%:l6A:qq^rPyΫNw:DȔ fW.\lٰafkٲeƍ\ ߿?l߾gɷoF;y\x;t JkK4${Qؽj lXE8jC HUcR}6'_]xZwT1g02hP۷w=zǟI~VZM0"ՇAMٍ18Aثdآ"ךfϏF*+,H=3q&s#V&6A -y\7];p!ptA|{qq1Ԯ]eWJJJ<>h=z4 x$p$ybwR.cT8?vк$,>m4ѢOIt|ɢݺ,UQJjw`tqkm.iC ǿfgI))c@^TT2rt:ꡇƔ)S?{)$P I`ҶM2Ƶ ؘjSH ;h,`l5N@,Y3j͗Vւ]:`+2/;..[qppETVVI_xM~'/ٳ'NxG9sbA_g4];;#X֢5 UZG5,:e\v4 πϫ qj'CAJY#xH0^0A QSix8A{y'HW' =0'7B"zGa⭡tA=nJOZ4zh|rK)qs!ϫ SmG;*RXgT_HxPj)Y fKS)ۍtǩ bmh`zE"E̒8({v~#dXaV`9MwċV5Goiڲ {F)Sܼysƌ;ҼF5jYe˖GwD|+/v-I@YY&6 IDAT;:Ȼ 9̈́_^6j(.aoz!Ll߹~@jkW鎃 U_Ki9}yfc>xlӔI΂xѳ.aPጌ?BCC\@Y)/A_/ZAӦMvH$tr;/5( >YML80Zt- 7'cxH`$E1go,6GItECn\4)/\e˖ԨQC={͛X|Yភ7oSŋ\.Wݺud2}8k,%G"Eq`cej _ʝ4${ lJyֳ%^Z @z;rJ9dP>27 AwD費 '_˸ŗJa>q^}/DBUqƍ3Ν;\.?ӧG5~owyZٕN:= \nc Eg>|j=h|g{9[! ^BjA0,G>/~aaT)-lƕj>@ϐmd 8JOjū5}K; R9<vZx1駟 Cbbb;w* feey5y<ޑ#G/Lm۶S&ݻ1@P!DŽFf]MQm*NP@U1l"8QhϺ'}]-\ԥ 6fB^Ǘ(jwH7'̉嵨 ; zٮ,b!Yp_x1̝;w)ߕðԴ4X`W#J$\t#G}vFZjs̙>};B28w J,aHFrq`1{ wI<ި۲,U׆ +ĮRp;)Yl2_wq_4*2e,. q>,HP={ 1cTٔPxE/Uֹs"""֭P(6m$Jʷݻ{$"^pn{l/QV/:)m74΍WWR_2wyLİ\뎼Cet./#qlޠڲ?;R9<FQ.^a|>_@xBqɉ'V^=///((hȑgΜiذ58s95X,XCw>AܫvʧJ~`vT%?$ݙ$ҝ*3QT:<6c'j &gﻅ@˪ {FJe* x!أ$oݺe /_c(ZhS'>>(G*Gf%@^8ڕסBK!Kϡ|8l^`:rv=8UberaI::K<&~]eL*^LW*P-Z}o,۵[tA^=!!&OxG&M-ZR8~P4 m*N=J- 7 ෳbZ)ab+n\4Pz&" 'EM*dy2=p|銯MA|줾?} d۵|ӱtr.yqB8AuZw|N޴:)wQaYd-ZM`{jccc333SSS:}9p@HHȚ5k2 AlUXZdmحNC#`c\R4ɧxj @B @˂E‰R1Y# :?Sxa,Ew"`GXI678'kOJJmN> ӕϓ Os)}81x`αȆXXOϦ;K$D RnRܬ73z•L|G Bwy.N8q߾}$''g̙b1Z@&2%m9PL\t'pZ ޵'ÃE@y m@Ҍ^}wqvw|t4֋Nxp={ݻw_a#y`<с8'~SOCT6 ջFߒ I٢]ϰLtfST?N'mMT*bUk SV^=999--d2&+ͮavwBۖUh`j[usT. {aonNljZ#zB*AY9?3嵨k S /Cޮ];;bĈ7x#;;Fӑ*n %}t&0`C:(%]-\7ҝ)q)5ZAD*$;)c@綢o_;fE1P> pBtޟy(>| ih4._<)))66vƌ7nTҋ0LQt8zﳽ D$+:6[F}z%Te< 9Hލϋe8AwRڋn[zߗ>:=Dw"9rS0x>5~#Gܾ}KHH}̙3kժծ]_U (ǻ<d`?%m+{n@Mc)y~:^r|/aAa! 0 UףލEr#BYAt@9^΢?cǎϝ;EG;v,TI8gWx=%XϣUэB=Ƽ#L'9qذs0vGEuPUÃv:Z@cg:Z<tAzuN:%''ƼmQs.6F4-NVx,lrkdܴ(|yUQvݐ6dT7yyBK3~mK]^aoO盎; (ܝNgvvq㢣[j5{5kN6{.@! MkEG{|Ke9_ 580+GFT}K:4Fv#{1QZ;YQ!^W_$AupvtyACYrfW¥xjܹsaaaIII?cAAA?䓿ڵkfAP$0۽b}dxaHW2rV\R96֐>Eҁ=@琣8! z|IoXnt ^ūu5>k+K*[a"NLTɂf p8iJC IOO7i=9hР!C$$$`ESx>/lh4!?G+W/oĐ5}%5az2G,_AԾ#a͡ߣ;NՇSf'vxX#qF/Z\o$E&V;K'\$ S yXq߰ah }krE`B!4xTLw$@i g~̟=0o"po1-y(z뭬{-ZC8j!5bQ+ 3d+yJ* nOk;5o--Аf'&ܨq_65S9.yKde6"_dIΝ M@m&#-%B8aP%[}rn,v`_b8@w@dv<&'l4!vƼzKg=ܱ\h+8;};"LG~:]ьBī*.J`ʔ)SL8p<˦M*9WBAvX{=!,&oP9y+x,lJ{4{ lt' 0QJ}zvc %!y6̯șr)o n~HX! OL%""gI)'&D]j͛sA*yfzɓ:ԙ%9 _^X^B}A,TtHO㡓nIvat ,Y,b2SȥtAZ i=?uNͶx1M% t'z"Qr;^8Fv߫puu+S#H%, 3͖S6{~SzOЁb#g٤q]2l(;Nlhҥ YG]rkV;QyS,Ը m&Xq!zrPJ{qma}x cGѝ"UP_~> ~sT:)Rvl6:{;}—`G]2wM*%39l}ObKN$&EG.l,\P,)GO]moR,Uש"V(JJU`eGT+yA`iԾZ<\os?FKG ͛7o߾T*}A|M<X_QCw ATثiu3Czd /hķdq9.VeYW˕RXJ]5=kVsJS@Tu^8RQؗJv֬Ή;NUVA>k,`ݺu'h4qqq> 4*ynA82S6Z!08W؜[(d]@#Y Cvu:,ֱJ aeùwk+chV#و~s>a2-2TPs8 ժUk…> 4hf`T,q'E Vn:RYom>;nt ,~ɬ`9pWVgg$ mU@PV{ɚ{"ev X\SN5 j|k׮uC$4[ +!88gm4XJ-'QS-0`9s8jP0G.kaad" /:xdM`,oޡ;Kp!C#},T.v KQ%n_7_Lk!?..)U"[75)814 Cw ?_Ɣ. Pq B.IuU6mڼ曕 AaIMO#?蜚j/l #g(11p/OwA#O3V1skV1;2r8myhѽrT0E]~ҥK.ףk?/_>vؕ+W.]xw^ MΣV[ۊKDErjWzɖN6E;$(=UdߒkPOw®.ꑨ߹_9SؒW0 kDLtgAϦЁ=jaޜ興WoԦ1FUpZÇ߼iǤw$ıY-6zrtΡ9PkP/',#9kwܰ|{L5+ᢹ#to֠^5Ñt t 쎿m!0T#~ɉG4_K/\Oؤ#17 މ-[yfӽ{DQƐ!CRRRHiӦ[Io  b-|6T[H-pB}F$`cөUNeS+yt t$bv:(K΂ /fi{K&\uLs nhE^}jժf͚EGG[.333777...+֡c|^A?/$ `alC2b|KlDT}J)[P S`Xh# Qэn[-EJ9:Ӓ<n"IraXnN:v **j޼yǎ[h/"`a! fF_tTgBT} co3>Hw@fGmDe&G5>1O^7]An116qXzcph4rwK_VFcn?'SSSq߽ b l SB@j694K3}r8|()2Sjm@nKv+Lal #yÖ o}=ƧӮa>"{~4_ 0Jt:>lҤ '88ɓ^ ~(yǓw؊nQTEg`< ߝ0ZО ڇ f& -M@r7GaPnDmA=(~G ܸO7z<V#wYh,--eAN͗uO ;,r =l\fק*>FLpwMIIq\5m4+++---77wĉNA> )S4_$lYdlD'%`5{R)Q6Z1蔪_ia]R@綑/fHĨA/pqF,i1 uΉًdo- A ^z}g`ĉcĈ+L<A-XO =CpqeKÇ*d0|~Hgwc>a1 蔪_%U!avT#Un}pzQ.\0rN\R5x(1 :ujAAƍq޽{Y^z"e˖۷oON~?$_Zo|j%NM.U2 `fCY_9EEv.V ,ֆAW>d 7.b>m"n V7 zob0t0 쐗e˖Y[[37xVx( /7&qҕ,.sϐG8asLK`郃b$|︍{rĖA^; -|䚂xF-VƘ?… K.]pa?3a /edaB=dvZ|Z"EE8}} }xotA*`C$Ϝ[`Q}w+ms jwݦ'GFϲbŊ LB)|.ew̓{U48;7/r/V.}^2&CS`vү6 DڐuԖ{t>RƯ6Wr)I?tȿq0]u B;RQѭQ*bI*dީm+KƏyC˭]v%/w0Zxp_|9-9q89svJ9WuGQxj|}N|Vjq>f )Qsv|. IDATc} VDCw6BH?- WQ@Vn)(#ZWR>URgWe2^ >*X ', 1N!+ ~:?* >N~H-~Ȋ3 j:rڥ֩2- $XDoF綑#Vh-aZjWF_06K>j ٪hBU{gMQT~~Ç322>f*an$? \iƧm ƍpSEk|KWlơP[w<`>q ð%$:_YYyf5ҿ;Y<"TjF\e˖ԨQC={͛XUp̦'R?U8;v|w6뜋N1?= 67 7Q64AS/lX֢5k/,wW&;ڭ{錌p(jܸqcƌs">}zԨQǏrBa0۽Øm ?jc+5eMYR4<ҧcclT),]w8չN k+ʍ|K^v~˙+yi˩[pߵkŋy/1AJlH6ͰC~Y ƌx^LJJP(x!DXI|0x(a+d"7 G;xs`jaQ HJD 8y@ʖGc-m7}@إM䱬 /; w(om/0>R AiHg3O=2X [8(. f'5-aAYͥ3bX#A*'kYyA g g5MQlK^ <5R*EEE~PT6h Zp9 9lXah啰WYԮRI4/rS`tK" . Ff6 da΂sO2vkњroTϥ?ڽ< 0y>R5i$hѢ!  bt>aAФdv]z죆3j;D$OX/0hl2OWknk q#@e/QajSɉ7#_(<&LūVٳgzzl[nر#)))---((h> ЍGJO;{ & eQ7ټjF;lttg ,.myq@Qq MJT*i&~qAD[/cB?}[>rB>O; jժfff:tbccs5kք$*0P[LfSՄ,QO^bxC(vbuWjC;cBn LJ8v|O; 0n+a~e4ƃw,7?M3%.8;K`./kP[w?מ͉ -XAwa^yu[GXvSƊ?p<4JGL{y,֣ EQÅB&]Vdj(_yLBڊ`We*Bxk`M p1߀%cr#*s]17o!^7q©QΪ0iⶲYYY/_ III8+h,((h4JɓK.}  C7+ ޝX2+KiQD)J$!-h-IIBJYS$*ԝ)a,cfξ~Lwv3399g?}3yo?_I}ͳ |~s풸ѷqtR' "mV_y{{Ϗ76ur zk4!tsNz;{˯itS"\qjFq_pC՝3gC=n:SO}7CyR]D{ȯ}YzFm4rU5X%,d̼]sOZxV8U~pQۺ|}>%vF-RqJa#^99߾R0_^Q]qKK φeK8p`ѡPHPtСC bƍg0`-["0Fzmڴ5j(Jr.>aLHv?Ze+I,nI(J_;*[$ ۟!PQjjT͗_XSVAZtlO-ZVGM_]̙r˹s~_ܹszwU^={3S/]t Z Ce&}wF/V3lOD8P~{M L 3NֲX}6c7#M}ig kō+޲m M4^&$tuq?v̛7/33̹s<K.ҥ?Bל:H$PE3Q3 $FC5j e#L"6}frɢϳ2f:Pԑןh#9k6?_;W]]ϟ?-[[hQkBQk x4WhCg zJG5|SG2%i NVG#'z#) eJҦ훋®.`8RT*B7Y*i̱uE.kj < FC@^هfG}_WI<뒵Bw_>BrdO}Q0_ vM/\5D*% nԟ(B=4oߛp7vVJ9bgJ(01BRǗߊUHR[N< !p:Bqw\Vaxe@٤/^ 95v_~q2J]\^5CPe㠊)R3ǁ!7,|r/gGquK`ϮKt8.Uhvgg ѨB*Wu 9lH8P"< etDce;LMK]Y v{?51x  ں[8h7zb^,dqS#ٳAL0!J',!O ^2=2ﲆ̟- WZ 5mߊCˏzn;QU!iPG}m?:&oߜ;*/[!;B(\Gщ'^XG.?q{ib>pׁCm2n!y|1߂ܷՐ4_3bZ_KRuu/N:'%dJZAfm46Q<>ƪ a3&ܪr32vcCGcL"#L7p}UpRK}YaIUIW}`JUn(_X^?RiʷgK@x3d]~,''z+(Vս-ML؉PP eEHcwG{GA xn#>r$ջeL_YBKs"<_&qtb Z>~A84`Epd+v#P EySO~>5ڢ-aoj Beg)U2 C_9޴PURS@V˵.s#p{)x.%Ş=xL4UvU#qPep3j ޡPUDR(q-vjj k4[atR~T!!}iKp B,%S-_sXPצqN8-\xjPJG .0>tyR? vP"`qGHCTJ9!N_.ע{$>ݴ8x95Y FGq{5Wti a& D/X|x !)jR+ܞr.)ɉO/ @pJwj(Kr 1<ȿ7̬ 3)mNۈc#0Jw1T5[tg 7ڳp/.q:V9=!1i) Vy- dS8L]44L0*`Hݐ>LA,p*g05jLaB! U)T+ѣly@a%%ܭe)RL݋I#!3M;ŎbuFCQuYF,X⎐T5Tng=#+.-Y%mE_o<$i}}c7"TZ-B.vP,aqGH|O: ‡^f;syߙq`C+%: TjYB$jYR_;B%#,OA0>Jr~W=E;`cZS$̹CSME =!_xh*tc]Z}Dp:~8۾JmXJ L o =k  } VBs-2gq I{ x~qPd̽4-vP%aqG(T  ZDNZ Y{=8=ATvj 6kO4z;B) ;BIÃCmiaE&;Vl9_s'vȁJɻūAki?Mc #\I[I8xP]X G}\z/&M=e0&gNcp}qPcv8=JXJ:c5j8 X%T}8sI 30 E翳D-w O7.^;1 qCB;BI*BBcZk:ܪ*O:qY%W<ѻ<&~lv[Y)=igF=J"> !$.,%15 PlVlWc՞ֱ40SWǼU {/ͻ(@S E՛ŎboZ='sQ/vP`qG(c 9t+TA+x9=yKg xwx\?|\8(j2 A(U`qG(IQI 1G+ܞ }c3U 5EI'p˻x*S"h+m`Yi"JQʉF|oF%wT6MR)`cVvGsfra!ˆV ޽G!a<)!(8jEnOq:dL9-Th-B(a#V)u\[PҧjDܖO,F-Kȸ"1 op:dj#iOGCJ:XJ^*Vz \󺊆eoxO%xZeqwVKWl+E;oMbAqMk2& g*oB(#(y tU{z4O t;st@co.ŎS%=f OJr,'r V ?*VO\~S8(32 * kBbŸ@R02Pѐ7gP-Y>/K/,DmK M;ݴ4L "<^q}i}O#qP|p$)갸#2ժ`_<7VBIaǗ 3J{T)F"L:1j0Qyr! `E(Ѱ#Jzee^3J,I-_:̢n:QSxlF4B k%{/1-%%wR ѐ_zxw++ }gcP|ՐQ  qLZ4[wf*AFH- };BӔcˎ"vVjjOHGȒ |ʴ$7rX|ȳ =SڤO#&qP14p<6\tG(#zFk*"geV%td8C\z/6Κ!3~t,b(*!V{}8ٽJPzdF,U!XJ=zN=ޓ- IDAT>&xg_ߛp;MݴuuL(* n*_N1jPT bA %C*;B)iR2Qvg;꨻,mdPݧ7({ăcKڬ_p}ŎDp"ThgP`qG(%1LjP0{LUfD,)Tf6|vIhrRėowYF`]&\T*vP|>3E(n#nJze0TJ(Z㤔|x*w"Jh:ʋQy@P@(qri%T8#²izZkSKk;m[K8:|IV qҜ^-ŎT.+cVS.; B ;BmTW.Ux4wh=E#e6skΚ5%0eNj_)ʍĎ R)2E!C\K =" F=ӰM4NJ^@d>+Dˣ4GH S >#:H$eYe#jLp1 ,,ap{\ Ӕ\9 >n*iѫ4iw8,7Y_Ѥ_վgys% 9qOVJYm6D9BWm;AMnxg:!t# 45Z ^`xG]WX_h%:Z:C.8#þX'HޮowpmѳqPѪ@?#&U*s0OTYK  YFVY;ݴ4GM羳GNēdŎUe7tVn!&(iz P|Ty8{5D݋ LwIgܦ!{.3U㈐B(l,zoNBw+K$jժ 2k2Fٶm[Z]V޽{o۶-iECRR0RI.cdq4/.|tPLaE1 \(Ж:u~3݀^6M bA"Vfs\`;B/ڵkѢ?Ƈz6o޴鲽4j !f(>xFGQ.pЄ7.S⃋ gX%L* ݣkB,1VazqZk:YJ1)Pw ݻw=z׻z)hڴU+JJ.]aT&4ysIjTWs~f1dY]c#W*? ]%eRȢ6i2!;*ec;B)&{ii)ԯ_1Lٳg8p'O\.]v,"4nJ usH{"E/CfjEO}-l5a673㥡@Q3%kŎ…HćW(%%%z\ `Fڶm[FcƌΝ;ggg>1Ba^9YMT- vʊKJllO֪~ЃsiqC=M/[F7VGfs5l'ܾ};0fͪP||eAA 94=Fnwnze]y{KWv70oΔazZzQҟl5'Z6ѦH}Wv/ O^Kh_*3u}g$tBݙyLB,Ƿ\A^ېQxu9_j!E}r-1 $?sٲe-3FVV8Rnڿ⋖-[jzGkkG(`1Z5lnv!GI^=e3!s,2&&F \vGj+ qAkTd҈n{j =T֎uGYq/((s . rtrԩcr%ǔvNbson hگ)h3l_p~7I$*>![l&աkjf4lȌeׁQbD3B)]w}U=ҵk|˗#{!& `^''$uf GZړX./:?3,9:)C;#Z-~q!kTCwĎbig9Ž_~R_~Ew׻i&6lu>zzZx1.CU&oTS5G g'>6/mXM^&gǷZC=O;YUuX+|dP(_~ho߾e3].א!C.\мygϞ'LZՐ[d v'kdm ft,!x.B,XP^?#77QF/jW1 .9s;g$)?tМP?BGf*D?Xo.,&ϙ֔uw32j iβpQ؉dgm$x!T)cǎ+((0C 9|pfͮ]/_FO6MUwf/&7gP'c鎼[fW5(4sZ὾ҙGn!5 ' TUFqF3χBO>VZW}MII ^Up#uM!QVȻɤ0'[dh皫KsLZwIܫhrŊsf1&>K$\d5@ϨQ %P2!+Z#My~Izd+>/Xvf ;GI^juiyp?Hڴ>ejSbBIDUDUwF;Z^w8NL:S+b cIE&AF-փ%8"d{ly޲t*P`P%w*e'ű;KuH)%dw5G8o#kd`6d+hwHm%,1FeYG2z(~"v"D:d}=(aqGJ#:Ex=Q F՞QSx^kHljMNZ y]@eh6ՑJuJX3H(]q/,UuFeCX&/֚8xy^kH:N?mhg¼0j95q8ps|6wF3*G4!I%CUJXrVƪמ(pq^OZwL9U՟KfF>~̱M_6^*n`@k?4cЋxIjqQWk4U6S~x0{wJUԐsgM%k@H۟1}Vhl5([!A(s46BvdqigFΐe=t=ݎv˘RN?~;j`-)<_x|~P2[ub|_Kuw¹^U" j*OkE\ƒ_m6%#v0u+vb ;BtIW)}@Zۏ5!LQ✂ΰ=ϟT(S;kFSKǷXm~\x%݀Uvݎߊ%FJEt8x!yE.nd2-힯5%\qWakl?׌_\joWE ;Bh  '5,BL-F= ^?>H*=Q8&2V(,ks\\!5[3?J)y? %wEFXT&WZcb73 x̃_"IDƐ5W^5J ln ^8~VD()je\F((?(%!t=M9vN%>L5դ5#B ~WIIoܦ)z܂fb֪d`4kI9"%mpRY ,諐P}*.\&hY x_uyץ*IzU)Fd< Z͞:4`JIZu FUPʗ5S@PfM2^HcSn!YE羳/9;Q`/2z(~yu(bϒB1!tc !4,6'J-/~+l[ra^XㅒG==] 0{.G0iȞ"c@e??|b؍Q *MgK tC%Ma5Gux9VTo4eԧi(gvaFqϨ$0:L#ʫ k /vg^;d@aiYg'XB8V`)2ՎjQS,K@e̞:1xŸ7-E0<7 -vn;B˞U`ǻ.t3?OP&'}b'JL3"yCݐMcqG;Bb^ШI`xNilTtFs &qk%jH6110G{a<^1Y/1(>̈́J]XBC5ql`^!3e+O7e{olk%ZfCݵ-6/ͧ$iȞ!Co~?|\D(-w{:lU/t#*LFF}&My~jϐ29-hꢥn*XAwݨ*P/KpbjYB(lԷu&$c%v?XBM2B.E[lA!;:Ԍ7i;n˷od@^l^-L>~?^=%f,=(v"&k5IEE* ;Bp<8 M9⺟y/z9>2xS$/ȓlchg"z|W! NhryheءR%eOy^֢!mFgNR@PD#nx.fٝ~|E)ՔՉ E~DHEǛ?m}aa.=`Tp;ǗZ?yJ/Zwt;Bf xj=džIu,]'ws &;xZd[6ޑ'u}}7X9K3Lٵ1/Z)Dq=O:vH,XB1 %dIaŖ9 %rYigƞN+Џ렢)8a ʲ4ne(\vJyդ5#B,(ħil>`S_Z[bJa<]X<%w^&)U]XBԐe3$#6_Bn˒N7l϶ݳƔwNj&{u$aIo.^;J'>?ܡ[ p4zb %K(JcwO늋 n!C- 3~q=fJwh5e$Em)o;Jv[lQ#J IDATdz-5c'fTroFqohY/]\8+Gn~د\ \ou!C9ӕq7Fz-XwPD('N)U,J> 8vl-W4ZACe3[fd ⢑3ͮ>yXfnάl!1ҹi؉PQ0\DRJ,xRT`?HXT3ڗL?R;5U` t#ر߃_o9\wVc#mRzuB[Jŷٙ2QB(^Ш7LRz(1u^W3o^EEo ze/j&ȣ- ~sy\z0J!˚K[pk(EUgh %B5^*wӕȗnj9F[qf\I(DA\wO S6iB_ \yfܖϫ2L!_Dxx/-KC 6(*x• sg R&K+n wP1,0{ep%~՗"TG&כ+J83(P%>bZPp^'Kug֤B9K^[W&EPu'v°#D@3 S%d{ƃ. K ,Kh2jS_cL`1=*@ֲQΛղpIJxmz!T9`E˄aCˌ(mSN;C:qO?4|p (2s4z<+.FI@ece3㮈Qס:F,!i(H!G 9&SKpM\hBٵCRq)xBY릔T"v.4[!TqP|LG1f,q ]eR τwɤ4!O]BB#bk)B]ECyzד"yg>@,C!,' "fw!iZgk` (`k WwB(Z v-6H9ĺofKEϦS=%J?]E=כ_ʇ=3=CeQw poWE+q+UHDA_K证P7T*ƺ;9mnN"}]K"Y=ӿ<4~~)ygu6`qjY-:\ߏV C %/GiR6 wPRGCMc pΠ˓ l|ߝʓH L&w̴7'e;g{O.*;J^TqPjJ}Fpd@l8"b:S;$7-|)a4̽CkQ0W}bJT1zGC.^)?׻ءPo W <wP[&0Cht"O3`Q3Uk8緛7U@gŕSO>=&nPK;5v@/,s~uiM7$_;gxY&޹x6>fx;*,ӀeeY&(cn؉6U8evbJvw骩PTuOޯV)&=h*TXT5i&QjFQG YBYO}wz;bdUa{-XcEv%O)41 hJu筼*1[=?\^عP c",\u&Zx{ B$I~Hw5+g|Q_{u!GZ@d@ & )M]!aW 44X3iy&y?OnoYKϢ(9 1[g;pS,ŃPȎ#߃cR$9v Sht[H\D\?ɑT7pb;"sG#D\A~ֳg?Qras2];DWJ×={~9VA8^D=`qG(N$TȽG(t!*dٔ"$mi;֒`=lgu,OhNw2VCҧN|`AFQNѹ c:û}Vi=[7OP? '9,);BCtJj0@󛽾,L]GŨ;h4R5+5l;̡?H)O᡼hKˏ<4Zq;mYYfYQdǛ4~ёܙ=s{δ:< Z3% w+CHC}~Sb|ӵ n9IS=A<,_]hfQUQ0 ̀Jv {ZE)UZ~K-eRenvőg-j4'绂9W/T6)Fp* 6N- xmȋkݎ`8wq;:ӴD|_M`pGhfܡkc]'4T+-巆HfIg4sbw"Fe N%!>f?a%nQ)<6>\ͅtI6W#ʲD/tH9~´VHb E;B3بK$1;lw)B\1'd^-j08Ku:E&eآ Ģvؿ;iY\*߳76]vG̋V/-'NSǺJq܋ wfX(ܢ){]w0n*a3Y˶*rba #8U6ͅ3Bf5鰓q?amo[ LkBZ|i9F3{zJYZ \ܤ|E Nsv3QDx JMcm4mY|W (08]50 BQUSr ;lw"E cLĖoSyq:*6"] K Gie!~ׅ kq^uU62uںD*}?[9/_{`"M`pG(w?5r_&׻H_vcmmőg;֘XV.U滂9dCzƷ'w:D3D5˴+W=hɼS0 r]sY{4UG>`p/2;k*EeqJY1 8cuOfmVJ鴟pvķ~C=o0]D?Bo)2ML1˥lak7%֍|I9N zP#gиuݎ;Ы z܋ w D I Q [okzãp}#sG|;*"WG`QurcI?O=.WhsxLvkXtOdf.H^4S|Wղt孚2t{P ESEa&(}ʴjY 2R-][zӊ30^30my:'Z}^ YbM-;{q(R!S!rdeة}OGH7m4mYKB|Gc~c?MXvҍ]eFap/2*L`$WN~L狥J䤅қ;OmJzo('Jm䩤7Z.UHs ջh@:xVYSLo"^ڼ{HЌ Gt%!Jok[Ӟ +nlIm`zmcƫo?4 D޿5r"f }{PQq{5u(z J_l{$WwI %[߻H_P{j}aZ_tJzޕR!tv}O{<@cTCp"Dj r-N[5-PkfkDH,q /Ce wBQKҌqaPF |IJz}lu7k>1Mm7Kb,-#-[/RuJu:2ALjB|1[7oB܎njX־#Ks"6ԑso ""rlRî7D!{0<BVx%;-u>:Gٟ3RY>d"p{WWTI>ea7Ǭߞ2'z$Z7^\skRC 9o!WD`pGUBznmHBq"@7cpO%~jX!W9Vh5!>n&W]' ?r]"tzjXnWMo;C:.>g_:|&ɩE'"TԆ&O ӵ+0OfuZ- %^Sr%u1'c'[L-8 p(t.uLz-~`k]j.՛>'g:3OPrc|BzY}^w_7O;eP]D(T "فa?nv̓M+75*,0uA"zj{Wa~ ]54s|~m5<#.5sY0 n/?%R<x;t35/`[o$vK&ukW*yތI8=3'Jirs=me#H"[,[y UЅ`p/2(Ӧt@ȝpXɝG@\&aMdevqKKv}sALIQzee~`|3S=,ru9]o {Ѳm :M{0TH4S=2z<7k_rLV Uu#V(se谷u:F0^kYvYVԓ7g!~}|W ͐>g<.}R IDAT" ϮC`pGhNa0]S? f .}_}׻BCW׭\Y%z3?e?My*- u r-J?bfnje-jV"<n?hQQq,#܋ w G әl;Gz8l+03X Hج-ZZX_P_2WoKOzݧa!>"sWV몥u5 &R6w˼>gPoR߼N,o E;B(ExŲM d"(oR9 㧎g굻&xpEEqi>k<ēUjiMTNjɼxy"@QߴVfPC,f׵9q"!tax4cWw;ufuVq HA;kTH_ڬ,Zt}}m>#Қ*qI(|wۻ2/a&NJQZk W`;=0+%[%sj&8 E;Bx=onɾ)sW+v(s{'cѓ6CJܤ.h[hų3>nӽ~~аg3+Y^.^Q)WWJ|<Ø}^s +$D^<_۰Z߰Z*kД0d9ܛ`pG].aְ>0|@׮9>u8f?iq&=D,-j5 U~Ζ)}L0*R\Y!VVyVP }NRS~~4/5+u/&8G+`"!4+ah;ޙwZ]So״Es9%6xqDK%e2qqPNz=I!RSvz2y|ة.~1>H~^d0#r>eϘ@楒x΅cSyiuYN9^z^iS積Y9& {G#ߜV ’2Ll)ZJy| LY8ڭ}G0!U%ʊ|dv*v6cL?j_Egq^t0#f?XVv*TU٤[RQFnTu; _) J}R_̫"|s*n#Q؈wloKY]aQ\",*u!Z0:ju>p,H" "5*+++r,.*@܋ w;z,>y"!Wu|@g;{^ \U+jZZes^]IOz'FSIQ9&̏ J1a~L4!Y1:p>x>ܷ_@X,_,[$/]=e9 iw E;B(Nvn{ם&_y"_Y93i:mw:]}v9<H\W-Uɵrm\[%̦yB{omqd; dU??*4ńQ)7D]ya"4y>ϋSPDee=m6XsfH0_?y&yaIfum"c"ݫDNq qG,vv9Qכ)*J:YB4.!d7 :nߤN$sJTU.U},*ˤY0t2)T2LAWj,Ϳ&2O#pK ٯru mzDwzޙ8U免RsA*. #2!y"@=v^wVs{xI\+IWZYU"eKB+&&At!w`O)pcw؝G2L(*ŊRT,/*bb)∕lX#L]I+tߝF, NNqS)kCg|m :w!75b}MM@3zjd'MԲíֱjk44^?O?Oƫ>ZϏ _Få8!g !4wE9n|,({ \/8zG\*y~$U\3~l&r܁aw`vj=9=OX %bYX^*byDE*__QϺSAOI'tt(=и_5!FCc?Vi\HS,S׌?0r:ݶ.Z659X_+5I RcTˇdHWlyʴVuLZ: !J9fUYU`lu}{q VYVHJI\.EN SզziRyO {C#`v78z? IgXJK+ˣbT,/ji)ZJ'z~o&MLp:COg!3 IM e*W5!R4R*5Z5\岋 UBUi-c^ߐvN{7͘.kiXDjkFWͩ-|$JbM~?>;BtB6)&Ewܷ\{~,eTJQM1 &ur#7<ǽ Z0!ӟieN)K#BD,4"DXT,Ѱ-DK&/Q6`оLЛ`3A_&0akl*LjEG,z\k|Uj|Uh\Wi\X]Vq%8qOzng7HYo6DU*s>nҔ_gB"-HSz!=y 8y'sшa!b!"!c>")9zU܀ t3힧` M.l@a]JKLWYPė\Hxxh 2pZrq8n9NWŚJJk+ŚJ;ϯ>y8y͂ ճ|B]0]W+r\#z>F$d ,E@BQ>NUk,H7qo4ų^a>"X"!>"a!":wJ<s2h6IL:h#0X{o?uub&T2E<3be"x8>xf5]n5BuPU.VgG]WA_9y؃!qfE|&ǛwwNx~e(;y<>L$AIYمny/i?F~"'hOd‹dOlb^ }u2·B$ćCBDC!!ú·t> |̉\L5>ِE`Ԧ ݠCfvǒGـ BU޹NQ׌8F3Þ);"(b1-Z*˄r4Ņ'@ t_u,rܛ!дY#Kk&I'}{ŘXy$FAhFAhzo2\wof !buJû=&~*$S~2%~*S~"3~:j;=g?X:҅k|HuqR"܀tԢhܦ#gn4nQ&'\D6IiE%v˳ #v:0"BTD豊hIe$.l[TUHlf dƣ>S^^~7+_Y`Li!T |_fMy?p߱J<'k>,[[? LLwdg$i" IDHXt"QDTh"H*M Xpsdv]'sʩ<@kkkSS#raA8_1mBeLzcݾ~A` 6gQ r2R2/+X'Kn'eV`c_dyİ L.}h :.uR~2t>$rK,I5ıDH DyEdDHH:{dιGr'L/1N q#0i'iK,AČTLQvwd33`C9v KD" y$ϫ~q׏'o(//;|<{*"*@Qrqw(H t0`(A0NрfCX6r RqQ8.b(G0E9DGKݐujPen6qm\:V`1,sAf:1.Q$ & ;_ BD*Ɨ1"OZhRqLJ 0,1eCfK po)6/:dN0$,A8$MNxޔTL;T}۶mp뭷N*|ݻwh[BE   QJ:#tS:{  .9.đ sNp:G„9}y"30|9ԱP ]̷ÇLy1<> 1j L w-.u]8A¥d W@(_ʀ2RL@ (aLqx0j4]"@f`s (|<0ƙx%ϋ Dhqb@xH!!ʟ ~7$@*~1$8pW6EyE`œʳ%9!ЬT|%/9Gc1(qJ&)M4I_,{#Eicr,ƬNI(84/8@BDN$$Ȏs8"QȄp@BEJ, eɞfl,g-j2lj,p3/{H ٣u)0+0ϼss9LLPrh׀!bTF9U{-cL\HZO(i5HeIDATK{?L*/--d2i۶{TE!4_{sX(MQ4Mi iRf(3(53(0dԤAS_u# +}ApKqU)nBJU8%MŘX21 c.012X211&.cc.1` F /g.G.{Q4xK™H|PzENY0r̉- 1( Ɩ] h@(P4x8[ү~0`(VR2}"Jޓ\]GwMe۳O~r1Y_ !lZaa0(4 dg,P iF:z`б f N<60( `|?LmB!*Eܷn =ܤl 7ܐmB!*E?+sΉa/G"B!T8 GA|̮˞JUV_~o'mB!*dE09koVmmmKKɓ'}ߏb?'.gK!BBV=PVVgϞ󍍍mmm{}V\mB!* sPv}{B!QˁB!!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B" ֡C~ɏ?#IRHYYYk_&AQ|WM?mg+4 !"hu=yooo{sb…sOù{~Gofrgpp0ߵ@9lߟZرcG3X.Էkr-~[lپ}{k߿˿444y~V?CG?|WM"m!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B" r('|W… +++] Vͪ('t%|!B!t8T!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"!B"$/| ,e{mkkm J _źu"HSS9-xӵRP(45Dmcmݺt˖-O>d.TZvttڵkCҥK?洶꫄7ID) ТE2Dۗm J˥MA޾{)3PytӸٟ躞z0ƥ~n"I(_fҲﯨn2,?<5G$#\"Jap/ iϞ=A8qŋ{m r]wٲee˖SN1,~*<3s]~H`p/Si iqv횙ws-8ΪU[nb%|3z{{g/@@)}G!܋"Jap/x\eB .ʶhL~_@4_***=k^PҸC=4O}S4Si_|JJJ&!AvZwz /|\|9qop{xǹꪫV^=^X^^~w?m J߿X,6OO !CCC5o3PJn߾{X~oڴ)NL yߐ9馛`߾}4hO=C=tKݰXBm6['vm{mfTbŊIP(@ww4V]im`<]4s=wy'7lꫯ򗿜溢K1, T-m{.<?:ϣX 1xIْmfTWIǎKR0+S2}'Iҏ~I!T֬Yڡ7ꪫ`׮]}}}555Bz)Xn].*.I]]]]]]aD)q/PRR2<;i=LP~*ۢ0Ztu. `_ctq}_{M7b[>\]TZv˖-wqG&wؑd:}ѣ/ԧ>Ӛ*(Pd~Ō b[4gyFo~UItز/5k| _Q eJf?|뭷 'm۶}ӟ馛YхMe9}w曛7o+W|ׯ_K/MKD) "hQD3b[4(=В%Kx A~]{[UtIҲaw} ?E1wDg*կ~u7>|8Je2_W%%%۷oO> /K,Fp'͈bR Evx<> ,;J˲axG.0O|z꾾>X2뮻`ժUo}_~wۢ|]h?{AgLe{ƥfW|ڸqÇ)i?PU+ [AiƘeY?ϲ˾/߂IsTZK_ug@p^Q Ǹ={|olllkk+//{۷rʜnf5POOO3VItpכŦҸxmnhh>m۶:{h4æҲۻЪˇٖ6d [#6]C7M@-QD0ʄZ/*!! "KL0BZc:(BrLzp9f)m~q0+_W|^\wε]\g?9p@mm]w5<<H⋣zD"jժ^ziƌyyysݽ{w2C? w1jjjxP{{{CC̙3ϝ;wYdommK$/NYkkk;ydaa_|3,Y󺫫{YvmGGGvvSZ[[͛Ϗ;jժ-[dgg߿Æ י裏N8qxxO?M߹sg;! !͛7={xxxǎƍ۹s#+tRmmmooŋ~3glذ!;;{ӦM[nDA!###}ٴi233srr-[֮];44{233,XBhjjJ裏BO&fZbM7ݔXWW7sÇod7o={mۊBrʕ+|ի;p²LLinnrJj377'H̟?˭Cۍ%rD">|qs}̬{zz.\xy睩-iӦ8q29 .!$C}Wmmm7.olܸo!_ZZz/%ƐoB___ĉWL}pǩpuN&e֭>sL!77bҤIǎKM?rʕԪ{ן={Ͼ@\Cwӯ:ydn !':te2MMM /Gz;͛o߾|۶mGܳg</\:++0ӓU"b` oF_>XRRյgϞ“O>:믇lٲdɒyk2F|O>}I&'ɣGF0l߾=~Շ~xќ{sĢEBMMMs2!Q9B ! |'#M8Ǵ/ZHz` pBUUGFFΟ?/_'ϙ3~gS&Ɛκ¼ 6ڵ+77LO$uuu!ǧͤikk_ݻz6Gjiiihh(//?qP}}}wwwj>ݬY+VΝ0aBuuu{{ƍƂ jjjjjjWqwpw7@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@;D@@%]XIENDB`metafor/man/figures/selmodel-negexppow.png0000644000176200001440000030335613750017357020474 0ustar liggesusersPNG  IHDRC IDATxwTTs00APPTD:#E%؂<0D(ƖO4$lςƮ`GED" OM \.zgn{s&Jk q q q q q q q q q q q q q q q q q q q q q q q q q qhD=UWW_^,PiӦyzz*DQQQuuupp0сξ~:$dһwogggj#U\j H H H H H H H H H H H H H ^r~=Bee-[Ft AKccE/_Nt W\IOO':RHwHwH|Ç1 laa`0LMMrrrT!JGɩ'N񕕕GBikkGFF^p޽{CUM(FܥRiTT;t֊+ 3͛cr\HPP.r$qqq ,ppp믥R'֞;wð#FP(KK˨(cc7oĨ.fիW>իxF'''Y#͞1cBŋʌ!GbŊ?1..!4iҤ'OF=}TA:䘜jjjjjjd2? !4dȐxKyy@1i!}}!@@Xt9F; qoZ***#hl N͢sF$#ԀIIHGp0u`"/IihhtCѫW6_h,E#azzzܱT*UGGuMGGJ-L7O1 ۜD"hjjj'OȪ*.qyzjYݫz[[RTPXp^m6:W(:tmmmŢhZZZx#btEst'@*9!&~Ē'Ba4`'鎚'uuuٛ7|>_( B>/JU'kkkb1B-n]WWWWWWQQ!ZZZ /t2 ;B(''E{nn.p{Ay?DhF V9⦼¦wM H:u.'F2 f5Nch֓+/@ jjjx<^mm-p8555?:/,,,,,l^l6w}ݻq~cffL7O=<<Μ9s7oy&Bݝ#[ ѻ겴o<~뷍9 ٍR!=!=%[_ksPM͗")*tĦڪ*******+++++B!~ mzzz400055߿Y~?`sssSSS r|وTJcѹPYڽF874OɬO~%*U]O5j(s`s ޽{{%%%eeeEEEeeeeee$<et٠Ahaa1h Rq\ &Lh"ZzuAAռGBikkGFF^p޽{CU<.E<O^]]bEEE.] '*q߶m5jԔ)S]]Yf;v&&&\.7000??qȑ9~(N'Q4{8rqD K랥%6f狹|ޭ[izL7qM ںWBaiiw  JJJ޾}'YZZZYY 2dȐ!փЀ߿?44ҥKW\Yxݻ:;zhBB=pǯX"++kΝ>|xNN΂ ݻ+MLtQS(.\zܸqG577ohhXreDD'|߫)׭ Y?~ŋ}ZYYeggD"=='N=lIt )bHB7353ѝ񹨪qQRcVssa5fpt:_~mnА999999p'>>^vMJ)\QuICa_M!E35NTZɿ *'VGDkbMW {shjjZYYYYYhz D_~H:nmmmoo?tСC:99@ƍwI|g o߶ k֬ QFqC@}Uooo[[~m޼y"S/UVV2dHvEY{E$\ Uzq]^}^-߆> _M)s1V$?lj;m10{(pvvvvvƿūh={ӌ Hw^P?{*U$%%۷oذaK,9s挛Ǐlv3}155msϻwݻwذa)>:TTT@E}o4_~.wu(ƌeT4-\ŋkՑќu5q F1Lt:}Ç_p!B>y?x'N8qBO>#|t8p ((رcgFEDD$%%eggYF*s;wkg`n4ѣtz>ƗCi(D"hjj[ ]ǔH$!!!UUU4-<읐 kڼykSbb={׭[׉rkkk[Y,s#Gم/^8::ݽ^c={vС=ٱcGjj#T(c wFۑQҦlAwT]ogN=} l>-'k}4%::#`fgggggdX|7o>z޽{[vm~iӦ7g~ D! #H=zOpa2yyy~ɓ'vvv\oJPZ.//!W@ CBCCׯ_6''E;.-[xxx!|g`)/_H$x`䚍$C}W"EZ]'qzL9v>um c(ڽ T*_=$$D ܹsڵkW^{__ikkO4i^^^ZncԨQD@yZJ+NOOouR1 }}}%ɢEȾ-7$#!JBNyu#0`fqi}RZ ¢2ލ$SYFÞM@LB(==ʕ+w||<Ϗ9s[ *|ϟK$Y)@ (,,hYibbbZZZ;)S>233Ɔd~hgIYf-[,>>>''Bj>yHKK+##TvV||P(\'*^^^{m=t q'ُTBڎW |&U>LپPgHx~1H{[[[[[+Wܸqڵk׮][p}||f̘bl@|˗/O:o ^9Լ!a5饦*%9ZNڶXG)|]$ ? mVVVUeBLNE!:1iz{A\PAsI$+wWlOjGԦW^ާN*//e21113gӧς >|(?֭[o߾zj;wDEEEDDhٳg?yNNHNNvppP颸8n޼9))Iu1 b҃!T!::z)))ƦYn Q%Ǎ}mOwTX;5icB6TV-=@ tҩSnܸ![ " _ .,_@z__ߴ{[nyzzD|W\IOOЦ/_A4mƌoNKK[p!rObb" q'وbjt> Vs E/LIªg;$gkkwfff"ٳ...Ǐq 7HIEW˼7I HF3P6fr_zunn'sΤI%G;Hm)E(mQ4As1t.{Dt9soܸ1~xϽN:6WcR͵,BAL3>-nf"7[,>اON6 ð9sٝpQ5DL8 )E ^LjY>3@WRW_ڨkm|@gkk{/^̘1ð/>{1ѡг߿?44t߾}EEE())ٽ{ )66bŊ<{yfر\.G$)O"E3++K=A$riZ!􊯜] IDATchZ[ á}tttBBĉB<=z7o ";;/Outt|mms0 1bB266~MLL!u.R1ۭwx]dZ@i q'6WsKn;M1ykqrɺVz#Fܼy366vȑh{{e˖UUUݟߥKN<kaa|rBYfM~~ٳg]\\dl6{ƌ/*RW"OmllTz@N>ڀ g=!T+.nxQ 5\QMyE%!߼%:(:oO<9}P(ܽ{]B!ѡНDFFTVV" 5&+KU9rŊ4iRɓ'#>}xT]y̺wGu $$fW>!medX_oZ^?acy@bܹS__fʕ׮]#:43ooW^v622-|~``Fxx8J󬲲2А!CZ-Y.R1glll]6}t1c%%%,^ĝ|qG, w>^E75 D֞骀444V\D^~9eʔn]ĉ/\P[[0vN\~ !!!yyy6mx- B]cu2_~ŋ333=zo߾QFm޼OA,HIL /s-x%&F}~Y`3W*`S@n{III0aB?d322}||>|ׄt?CIݻw;lذnZ***: E<&D $$$p8ٳgM6]|OA,Ȳ&IAz"e~q{OEյ":.bggw֭+W߉'ӺUJuDG+~;vl5k9rCڹsgMGVJ`0A`` F;z(NMBj&E^ ]ceO<`0=|r//= q'Uht̵RzH¨T9ӗ^.]4C=@Q_}դInݺcǎɓ'ڵ{LW,|nPUh䱊\޽{-[, +,.+qpO `ǎ;w}QTgg?\"JAAaa!FsuuE 8pժUv Qgg^N2Ҋ666L&.Κ5kٲe999GAɿk"-- SSSYBPLsիٵkٳu/!FE,@Ο?䤢 EEEsS-OH%*qxkz;s R,d</((0]KKkΝb蠀EGGEwŋevB8N=edd Lf֭tR_!''TpfΜrttHM:ER}ׯS-ʕ+! vY__/JkjjlB(?z˗/o߾CѦ.]C wPO#Ľh~ʴ)rU )J~w[Mt8(ߝ;wq;v۷o$JO?!X,ֱcwEBa#B⎗z5osSkkkEOO/55U)EgժUt:633S)nNld3( ƿ§TJܡƝ|qow= F_1k4Gd('Pmn͹~ݻw)"" [޾}mw܉BR044|ُ?hnnfr}y(..7oNJJREٞ"7i$333>4 (=NU#%h qwn={(Nc`従OWJ P7X ӧ7%%%ϿrʁZ/huX[[KcPɻ;vءx[nyzzHBBB~~+>cUfѣG&*n ~5!-j_!qϝ14j\KH-PUNtP(Y@@1cfϞٳ-[ܻwɓ#:.@wotӧO7oߴi$2$&A^=ZOmAs#ʷ"i>\ja<6l&sN>:=ܹs\&: wȱ8pPicqESmz2 BV^ rw ihhoW\1222eڵkE"q$HILE ͵t 5`La/2=;&O&Jm6a„kĝ|qGae EEs7xYAݕi\\\pp0лÇ @AN>2 ^H 3Ygo?$BFm߾… zzzǏ߳gAHIL"$قLUDF:f&9|awִiӞ={  .]7yĝ|d#_S!c5!{NsWߏHrv-3xǏ""##  ?\s8` ajj]=|ð(O6];ɳ _[%$5N4P]BjH;趘L~J&&&<~ T* srrbX:::˖- 6իWypeeˎ;rss544###``8qCǫ wޏw qg C UU7;}ϗ!O5Zjյk JKKǍw1# ݷo_QQсJJJv=|xB=pǯX"++kϞ=p8o޼;v,Q]RiTT;tH q'1ܙ^VL)2z!' 28qӧOmllCBB$YaO?TGG/11ں!!!xx<^``֞;wð#FP(KK˨(cc7o(R颸 888|?H-]N?5rR0#j9ϝ9!Ľv%@ɓ'#~י3g @}.]tI[ ˗Ϛ5kϞ=$kd3f@]xQH]zï^j"uĝ:pP:D |>Aqor.$:TKWW_pݽPȀJ0*]\\Gb OAM4E;nӧGEhŊH q'`;#b~NUm`{l0Pͩy7aQ԰}hO>$--PooW^v6221>NR< !4dȐxKyyRb ]djj:=&)"ĝ:}9-TDh ERiա3GID-Z)((3f >L@O0q .;p>tzHHH^^ަM)~vQ^ ]:Gm])kq(d`4jU2!rt#@&M3333ydYU+XRRR```FFFqqÇ>}|(){޽{ ܡɭRYKEEEGE.Rr/pE:4=խ` 㵋J758li)c@@?~y^P;vl5k9rCڹsgMMM0֍ 00F=zNwxyrsY5KSSS.ZW"E wĪ2LMV>2Z]auSu*Wt3@H;ZEݻhѢ{ #455lzj+++|ђ ݸqc'`ll\]]:ſ01QoV"E wFa9&s&A.?Ɇ%%wkye[lfWnNWWڵkgΜٵkWYYYDDDGAw{B MJ*r͛7KR|moYN ͳ7*}vbb={׭[׉Ӎ322d9 XlM.JǏߵkWVVe###ED"5ĝ|wxkOGs2+^D*`0A^4ҍaʚ-{lYAa":(T`:u?z}w%o\#(ѻw_~?||Dn('Ji4Zcc#>yK!|djj?ɓMz{{ƶSSS- ,DUUU}ΐǙ3gn޼"y&Bݽx'gmٲĉKnnngϞoqVHA*"ĝ|qtfT^$0_aG2r*"{ DP zuֱ젠G7ƍ{&:(ER߿sD"+4!b<=====O8!ڹ`f^N2Ҋ666L&.Κ5kٲe999GAɿk"j*Yt:==.R?HIs85&0z5uOSF|5޼yRRR>۷o ~nn˗N7:tH,"gam|QLΚ5رcׯ_711r#GT pBP"7-H͠ʙ|p Bes9m3!Ľzs#m\pAKKן}Y_H맟~B͝;aaaW !:6mݺƍwKK/^0**JOOĉ/.аfXɾ׎T?wANb׸#,Z^R$-T;`tB5c`C+S|W\~رc߼yCtD(j֭ovss[zǝ;w"""h4Ҕ$>{477a Mޕ\nppQnݺ+HHs dqGڎ9ORyIWZX= K\mHϮsjigt?xgaa۷mmm xxxxxx56w]];vرCE|ϟ_[[y~ASSSE}ft6w\i[~gC q'έ*eMeI4z0cBQcoom6]VV6~,#n waq`{aD%2k-G+$V;=[KJJ<<<'HIL B9BTTz4J5.PC^~]GGݻw&L(..&:" q'وBi;B:(ixhL!evsxH9r+WLfNN΄ ***HILUeBLE/+2JE3aQ*Ph1d3f̘/jjjfdd|[ĝ|d# &!gP*$Jiki,Pݳ5pP+g)Sr͵<[XX0 SSӀ|n|#F0`iӮ\"ݦ q'1TueBȉ5!TTZX&|;Bs/=˔)S"""(ʃkfрK*999X,[[e˖eddի >>"HxB … VWW7.//߿_KK+""ퟮ.R'HILB aN:#F޷IX}AϲxPɓ'Gm_|ŧ~痘hmmА@H<</00Pkkkϝ;aXLL̈#(eTT7obbb+tѹsuuuB .ܾ};BhÆ wո+#uF b(EU2 SwCR!/]!믿mtɓ'o-,,RRR222/_NHydӧO+tׯB-ڵY,Bݻw:W ]NrG5( km{2*5NT{DZQ( &H \vhի׮]$jJ RrUVV2dHv\)E۶m{ٳ[gffr\?tzHm q'1A"__Iw,@444 & g͚SA3q .;!!!yyy6mx- B]dcc3bdbʕ+Bcƌ111ME;3⮤}2wrJ09LbqpQe V,ի ^^^yyyDG?322}||>|ׄt?CIݻw;lذNnZqW*++g̘qu/szHmhD:O$A)(\cmi^ؐhRFh%{wk9MBt@}LLL]6f̘ROOG; 8p ((رcxFDDDRRRvv5k9ҡKܹ#2 u@  hGwO^h޺\Vԡ 9DRUUE9^ ]NO=DAUG )g_QjMކF+)۲1;v\#@cbb&NW_ݸqCCCz"nܵZ4lӘ)o-xݻhѢ-[l _zU/Z!!!!7n[gõԐȣt۷o͛#]\\?E]fP4_N:#/lȫl*gkVA[L9ÿaџ53#@ƌwo622;zK DG *<6o,Je^^^t:](&$$4J7nܘ[KLLܳgu:qqFF,Td2 O.JǏߵkWVVe###Eۦg.XlܸqҥWu$P,Et%ٳeMዸ/y\2 (TAVyڵk?niia#q& L qgPD"n[Lyd2yyyƪ͛7A# ?B>#--u@ ⇆_ #rrrZ+8,g9rdsYvm~~>|?Pwe???DvYM]~[~R0#kx|N$a K w} UOP#GlڴR=!!Dz^|yԩxCbqhh)BhxV~3/?j۶m|>ԨQSL[tuug͚u1ooׯp|GGǑ#G*xǏvQSSSFFWxP(Kt:u !zfi4"M TNNN*X*.(.(|8Z+a~ʴJ&hGKo\Nt, ++!4ˆ駟B,رcvBa?>>>vvv*##!d2[3AAA+++---BY[[wMdQDd(j8nk+Z}QEZZU['-*}ZW]UZh$!#W< I^N<2U򷈦˗/{{{VzQ)))/-ڻwŋ(׿7zH%̦Qh! kdi6o4\UQ~!Vnݝ;wrk׮ :Qș͟??l߾)SÏ?sM6k;FP(ɓ&MUVjj{dd+W5kV%_[hbbbBCCe2ّ#GD"Q:<~h43Xc/QꅏvW2k E)u\WSrXgNΐ:E1FΥcǎ+Vv=k,w TI 6r:hyJqqqqqqVc׫T9sL0A T{7E vܙEU8x^_I§E`KD_8lk~#!F_yvA:B9r{wܙ:ujV%X3 ;*G$p~@CHAKvܙÇ_vtPeƑ#Gvȑ#իW' UmZHg#TmLҳC36Ϋ[k pm?099ټh!d.\6m,]aaaaaaR90,ũP׵#TW5]{U퓣 (>T>iΒDz\]H'BȦݓ>djZmTT۰aͮ䣞>} Jvvvd[d?pg$4Κ2,L8-cxkȇ|H:BԬYsƍE8qbܹ ҵkݻwT:FZVeI'O:5==omҤI_U.t juII[Z^N`lV-4nkoB"I.D\q"O>&Ly8qt .]u파ӧOo<F>%8O8jժ-[ļыɯVeWrrr*EC:zeʔZk74embbbH餅a„Lъ_9J+ R ϋ Z۶m;|Ke;VbE ߲eSmڴi߾ۇ ңGe$4>#)qR8n##r%_/8|ZܭDٚX,޺ukvn޼9eʔ˗Nfj꯿|d*),,|1iӦ ={s˖-'|}yuk;w4j⮋ 7nٳgSSS~ IIIPEU[V>|8yGiӆ޺u't[d{X3[Ge P33!_L/_ۥ ^Y$z/ʭSyrk0>]HHٳ+W|"B_ݻog׮]k4Νk6W/^xW^VޢEZmhhh>}WRA///F͛+ZE:{evY^_V"!o>M`>52dO#p)R]Ώh}E} Ulllh1b6cFw_5 2dͷo^lٔ)S 6uTʱlٲ:tŋ?Cz]ݨQ#BsNLe˖ws-x 6毿r}}}{GڞoalVpYm^UE1nXU!!X,VBBL&ʊ&1O2%<<;wܴiØBiȐ!tyf͚E:c^tf;B@+ik5hlb!@QwI...ڵkg&!gQh, &xMsf/*OHңo[z!8 />D9j:&&ߟDFFV~ϯDF 2$%%Ūi:6,y,^3q ഌǭFSʹ^O:B̜9E!22t2(sssx<^FFF|||``իW->ڵk 6\|+W|;wnڬY;v U(J)Rj*Jx\q([8+ DNjxwoHA*Ì}ĉ)))~~~j޽{:th4tCtѣGJRR;tԨQ6%T-sǝæP[q(ΤQp٦/*WW6'h,qthѢ`ٲeΝ#! wJcX,Vzvy޽ {ܹׯD___J˗/ TT۶m/*,w܅~8 IDATlpObc]TM6e˖FqĈ%%!B0p߾}{iiipp򺻻 giBBBd2YEեKr劵B[ٳ;梁j{bJyIA.qF̷~K:BUǎ=z7 0/Kzk`t PʢƢ[lҨ^n>_E[^ )YdɅ HAwŀӧP~dggW`333.@PPPgKl#.ᴌ}wm+l?'U>rFӦM 0̔B{VV򗮻ZqީS>}h޽{9sF޼yO>}vGec) \\0Ѷ:r3%r1hk~#!\9?`qBpHqp/c^x*m۶#F$&&k׮zHH{elݺuӦMlJFOFwD(muZn?DmUM90g醢7GωېN,\p…oB1 w"d@U{=zBaZ233j7mV`WڵkΜ9}6*c)R?%9,k \yD̙3wusαl҉Bm0`T_^zcwQPPnݺ۷o|xƌ/^?|0tu<O /4mSo"s;BUH,^wG$=~ҥKS ߿O:c@>`qƝ={655|0)) ^@׬YK.m߾oySj|>I&V oU<3$ ە5O(#{ ΚI?B:B֫W&&&N}Wbcc 0g>矋D;wN0+GWn{}|5<>;#՗Iն.p}N-[Jrĉ[l!mԮ]{ѢES `{?Cz]ݨQ#BsNLe˖s\l:TvO>6mhժUsqqT*]eFe HT8!=h!gyǓ6sWmƁ6l8uTHAJ*cfyZoq:$(}`t1_䌦MV~}Ng!Fx@_85=ހMwׯ-Tԥ="![W(7n$!,̋Sii'ΐޔl`/'M rB)S撎B`Hfu{, J<)-P3Op`98\T*cccIgA! pg0ק֩l3PC' dk ,7>}td>uödm Y;fh#0ԨQBBBh;v` !^ w=#>*ϏP,y23w&![cXWfׯ__b8!ZX3ҋ'@|'ƯU זu2HAZjgY22XԳp i!QF FꭴD:B6o޼jժi4ɓ'΂B̝vK@@- cQ<烁J?9J:B&ɾ;HLLw0 r.E\f߼yW"n/VyZ%餳AycEK?A:BPJ5+ wB,zs=͸@MaO7$ S wm g后͝;]Y!{;rX`O2|y)(z#|qӥ:߀G\r,͛7=!;;ۮ2f=GQq,-APr^x.r:QQQAAA4M7΄=Bv wFzy;H{ {0ecQPOH2(U dS,kŊE]ti 3X3,m ɪ38-hn#>fL%ʟ#![kݺСC`ƌ* X3ҳ; .vyZ&W(tX.B(Odq ٳg΂BX3ζ4a>f. oFu8Tg̘Vs8!;3=q]oqYഌCPD` yv΂M0^zz"!pg8y{+S o΂ [&q \.JA:B6,Yطo8!g;#Q .;x^~BHVf'*lkIAȦӭ[74i^'!԰pg6WUtOV6FYл(AgSotl~p8w]j,!;#{6T`i[ (A}YлTS? )!ҤIѣG9sJ%8!煅; lcP<6 HgAU@ҳдros5k\.ϟ5k,!煅;n2vpI}@HgA 8l}Fji)ww3g5knݺE:BIalgZ(VQOUYPV~Stljر7 'O&!䤰pgQ]RkS8i!MǥY ]x18pСC &PlVg%yo+:S), P\!@QG}IAȦשS'CV.8?bn޼~zYBN w3O-i5y_*#ܗ.Ku_~NaÆ̙3 HA9, wE< dP{p]sXH: ,md_=qtlj޼yO>HgA9,ϼ>nGe @g oYPqiԵm %6I`ҥJ!͸e3:*| @ qLy~'!^zQQ7|C: Bȉ`xw{2 y*.]A:B#̙ ׮]#!,pgU|xӦMM&Ӕ)SHgA9 ,OdAq(N, 㴌c(AkOK: Bf͋S>|aqBN w3*c;{٥ĭ!hХ="!ٳgΝ`ʔ)&]+"gUnON-@DU.QuD~Z>`26"BUhE]vm˖- DQ4cϸT='19V|6(JfqqVZ 8曒qB wFzqqG@wd>~XJ: bz~ն}l%8y<ޣGVXA: Baxb 5Mw%G!KuʵHgAvj׮EIA92,Ŏyqj60~AuK(pWnH:B3}tTh"YB w3ʀݟdFց*_zKpswtlݼ_qB wFgQ<6s'IgAV%yw΂?z%%%f"!䰰pg{A):|CKs:n2=wɭ d#~-߾}tcwƝeOFt D֖ME4ɤ woϯ4%HAFׯo4ON: B1axl  2"%W2Qlq:4Ù7o$%%?tg=g|2mP@: ^-INuH8tl? izԩ L)$!B6\T!Yl@/;7(4c9zE-XN8q!qB w+̅;SFeJ[-(W=JnhO^ !ڵkΝ`4ȊRX;sO-c_x'Gtd-–]۶-8Ȃ (tΝ;IgA9,]e@bavfD\ twhn?f I d#3gFqB wG 3N* Y[& Kn̛7f߹s'>>tXk޼yS׿kfS/w,P3mf*xPRۥ8Z]jO\(9;nŵBLפIO?tͳgzhFls]ô;#3Ak,RJ:"'= ^6d+kf~O7n+VLwgݠAg-3K&p::{{SIA?sj58!ƳP׬YSR& zkb tF=̭\\,4>NĵMKȏe-L:U,-]tY(###322;f4^UF6uaԎf-%!B'N΂m,.,ق`ҥJtY(cbbzk.Ppl4qI&rFߓ΂b6 S曆 ?իWoРʪ2<ՙ;v4k#x*»Y^|oquIo_x|i#/տl҉.L6iҤ3f\򫯾V wUB% w}WqfffffeQUb#$45Qtde,bqGǥՍ7Yh?@:B,wMF^q *1L(<~6^(NMI4Nw xNu☘)SYf"BmX(?@o.2Jq0yǽOw(u9w7N6wṫ>>bW^ׯ_dI^I &V.ΖJHgAڦ0 ma*)%!=zwii HgA1}{j<f͚p+VAJdT@Ʌ{╚J.ϑ΂lD rU΂ iӦ >|H:By,G5kMѷn*..~aaa;wFAɓ]f̫7e: araHLN8 Btnȑ:n޼y Ba6mڵk5jbb5h`111:O "Fe[8PrHgA6"έFrͯg_5?xtX(S챱~ּ;X 3~TZHr賸DiPb  %!1bDZzIgA1===C*Y777bs٨L wn#gрWg!h/[0 HAȺx<鞐F:BI,u)nzY!z32 TLq7k' ٺۤ ۑ%r1i7'΂EFFb!,-[Սi6 Zhap$|tٵK=8wtd;lH>hO^(B:Bl޼ʳPO6eݺu{>~RiZT?~O>֭(j̘1Ɋʔ̳k=yj nT]kF#8YMw!Ty  /`;wvwwr;w޷o@ 駟BCCmwpiPY{.+1_TREgBQQ(4YWY=!!!*LNIIS6l6v#FwѣmՎbQ*Ok4Nc\x~>@ACtk5kĦ;B,PFu֥iiiׯv>T]aQ ;<"ٔ,'MF6$!*kAʨT^֨QZ) W;PC-<h\lXBP|(:8Y}}}z… IgA1K2 N:uԏ?2ϲs'xi].Րũ@&3y>D rLmZj[6.r+oaG:BcccM6M>FU]V@II N*;֭ާ;\m*nI:)EԀ'_7vt(**j…OylTNMM8SPǥ&`YݤPptiJ:8YرcrV1wSx<f^z/_nήuzr! 4hh7Kgґ΂4\LЋ$yW+WV !)ܿks0p@ݻwK.6^Yjء6Sd,tDlP.QӤ dEƍH$jzʕ 쑅ũ ݑ]yTuss$-p en-q6tE.;~Gs !^dpOLL4*R>|XvܹsM;05t9,YX;<"!ҏ%v5'"!+8qKnn?L: BX( ==snnnk׮-ߴiӠwZ3*뎢1O9֨ 4(x4ЧtwR,ې@{bɭqQF%K{!P ӧO?r}||ʮyyy]|9,, 7";[;^ǝ<N3Fqέ M+F9ɓ' 6΂/ swɓ'cƌ)sZ9$z: nn-Jϻ^ptDE)F6K8K84YСC 숅رcҧU(:y'wϣ%T&ë-y=PoÊp8+,!;bpOOOQFU())8ojkqw܎; 7 .bs^r)]ˋM: B-2,~~~999۔ӧ+둵ѯ͋Ss GfM΂a n>W"!k6mEQwٵ wRB=cpoժdZbE]jM-[B0639*l֭Zpm$hq9&M.\H: B^X(cbbǍVq{FFFLLl UpW}ϭ+T. ([}teڴip΂  ͛YfX_$խ[W*řLŋ&+*S2z]PMwOwQS%N)G OsIA*BBBttG=gaÆݾ}{"(55Ux]^t髯AJTW:-ڐO: "IqN5+n#!k173gΐ΂"ruݶmF~^^U"_iؔGCw9[gKy:9VM oNTG:BVan߿u!gg6Lׯ]v{ ZڴijuwPЅ;J˺]9;ň).אډcӧO&Mh^h,!,4M7.::ѣG|>_(}Ç-e*gelƖk5yDq<ݥu͞'OIAQ ۷oOKK#!D}߾}VW.((xݫP(/_~(ckڞ;\QT ¤tV F:\kժe4HgAdp_j,^x̘1\.:EQzڲe ,]ԪqwO58ڀ"q ܸWx8U=.;i$ظqcVV8!b,W^e~{"͛V*R½;\g^zRy"a.T\B:BU/**ã$!D]պ8"Ban.~BFw6: tPt=UptDgS#_O?hHAaԜ׽1ӴiS+Cbq݋c>EAq>~4NҟP;vH$Rk֬!!D=44bcc_)S@PP¡`єJ?@QjU\O0o/mԨQlٲRqBX('N(Hz"x={:ueww3f$*zYS 6@ik Gj!Gq؊(=ytĉy<^ff͛IgA`pݿ:u<ثWSN@:u{jժ^^^6_ES]~}*p(N;p8KtXFz /&!dk*_p-pe@ʘurn\@ܳ#7Lʵp*r<p…Ǐ΂ȷxM6{Tyw=A RsRyCIAQlۨo=+Җt"RXXXvN>xN: x,܉x]'pw;bup+k멼??ġ! EkO^Kh XD:BUiʔ)O>tכ7oN:BF^.7nH$z#:lv+&Yu#8.a&mjgHA*ݻq#HAHՌ "^7n><;2cIJ(8zVކ_Ϗt" EQ'O1bDbb}}}I'BBe4~Ӛwܫ8]:)z;΂셤{{^@ʵ7Fez~ٲe lrn2֯_WvRSS[ji& z2.,(5ҚR'TjqY`##ti &Ək׮UT lBNq㢣=zBa٧._<| &X9!*O-wstXH: E[@o{qJGH$k֬!!d  }ZJ ^3_߻wBX|ѣGw6v1wh#(` KM%g΂|H?TXYJRtȑ|RqBVgp_j,^x̘1\GQT^lK.jD*–;O .dwN!wHA*M0fffnݺtY(ܯ^flE"͛7 YVAeZq= 뤳 ;"ږ_hZnmthcQ`ɒ% !gpjnnn/(Pk`"*\X|3#@)΂ EW?)8ttɓ)uBB޼y󜜜r?ӴiS+CYg:]:+zUMRC: #]*qQ!*Ӽynݺ@\\,!P@llo4=e R8:;یY+ik9WaM'HgAEi_Dd**΋M: BUipĉ/΂" ĉ%IBBBϞ=_TT<سgONl>c DE/7,A N:qa\@䒛HAt% ,YB: BȊ,S{u)SN߾}O;;̙3sΕH$g222^j4MO0ᣏ>:vV-..>qD>}fϞmVb^]6c ҋ%^@1G?֯_O:B**UX#G&N痜VݻסCFa0*~l||?pJeAAT9s?&񭥂3dMSX6QlW>ÖKM%J9#GJ$V/΂4Mݻw ҨQ'޽k^JڱcEQIIIAAA,^z;ww^RRRt3g5k :T$q8?<**d2mذ2epMj6` O@K%7p*r|`u:\*pizrĉeדnzΝs-[, ȑ#֎}-Z]tww?ǏU#Gm۶:y}ӎ@#1І/$zc?(o -r0Ǐp8 ^91cƏVX,6_4L *))􌋋[lYHHHIIItt?v/]ѣTl۱(..Φ-wLxC=RĢĢ Dzې:N=Xݏ WqhՌu"Bj9sN>}۷o7oޜuBH)['&&::: wޕJݺu{qݻwϦ9{z2`^شiS^.\جYݻwO6ĉ+ͽm۶*ʚ<~x||\5wͫsKLEzQjp˩SHFu G3M 8qZp|[};2xKH0`@&M.\H! y'TPsr:tprrz|RԦN"7)ex[aaaHH޽{uB[o ѣGW߬LVj5T8lͲ x*Mk rӵb|kzܤZTH}JFcDHuD>`ڴik׮/Y'"Ԍ3>xe>8!!@PP-72)s)RۉgիW1cʌČ1B" xswOGX2f:+%np" ,6(w!#uBjرcjuIIҥKYg!Ԙ{HHe˖=}$7776656h.$<+11۟>_qTj~</8=w1/G1:}>vWp2렞KIH9\{;Xg!8::-Ya)[Ϛ5 zbV;eV۱cg?($kٳ'ldݻW8Ϗt>Sqt>=}%siYHm$rPx 囬R}BaY!5l޾}ǗL0ATnݺٳͳFqݺuӦMڵkiiixxxťsEEE)?tjw0q >wV\YfKLLda_kYyY Ώpw f,r֡eSYz=8T#,X-z!Pͩ111zhųf_cƌ6[^~#FEEaÆw7.))e˖۷_~姟~wާtԩK.}}6:nҥse5/_g1* H)uRKyL$bCZfޮ#R>n:r~K2(pDq~~׏=Å D?㸶m6l͚5/^4oqׯ_ QT۷owss[~… :t#ǭ[򊻻ԩSu:|e 7BШ C&Nny+#{uĐ:!պukouBH (p7H$-[ٳg\._|ycǎ"ȧT*եK̙S~Z=nܸW?~7nܘ={vPPPqqq``?`-Kp=RF %;p{DYBH ={6ÇߺuuBHupm\]]{^Ae>&--Ey\R} ʼnvѣrۆp;Ez>/1k ydҤ;)'y@wfѥR]M6ycR])츃O8f+2jY!cm_r_BC/c~zm]츻;$pFgbS񩬲 F ycTbEO3FRtg/i!fqd{J< vc<}?C':S2X!Z'O `%%%B wAP@rva 'gwuR{-RzCmR]ӦMJiii+ !EYspUR˼ڹf,dRջ#puB.\: !p$X9$Sg+f^m; gS1 a3Qvɓ'Yg!TQ֭[qqqOxڵ$F"ֲxT ?iԴSS5?Bj5IQ\f~u` IDATB^ԩN-|ͧ?lӦ'|bH2g]2L%Fk: W.Bj/uH/y{>*DM{&$$Bo}>zV}d 3<000 ݻ,aXI,T?Gĉzy/c:^h(`^T1a8qڳCHI$3fXjU^^8J+[/X`̙͛7g ugC=g1GT?G/'sD/8iz?2Y!&OT* VZ: !...}ݭ[ :Fej'+cq\xgjcrr1'/w~Y:R9f-2B:Yf>Q? E!yC_I 9rNBj5mxJY!}Hw/!D`, .=zyȸ|rJJ ӊ*qS&s f7o(=>0VlHI&w߱B.`qJwhhKoܸa|\츋<̧{x?OIE8Xp\ɝ“CH͚5 ɓ']: !,y֭[XX( Vmݺ5bʎ_R Sg*S-\8uR)^i9@κ&m8ToѢE PӝP9r>y~ҤIotIIIZΝ;&Ly퓕Yx,vq6ir: !Ud>zşuBe ?1&&f'Ny>++ĉ111M:>YS\q7h!DNd,6X!H'):!UѱcۃB" M._\PƆjTV߯P(쓕a}nq/1"jW$\!)SYYg!FGyp}|NAzl߾ѣGB,|ӨQ'N$FX,nذ LbJ68Hi 9K\zpXӑb ǩ&HT\p$4Tň# ҥKYg!X`p>|W\`|webQ A,$9 9'Yg! t@Xc6k&#J{!/_^\\:!"VOIRTj4J*uܭ.Uw@.}dŤ+^uBbʔ)2LlܸuBHE*WZREeW <*-5V|%,DNО;8TOTT BT|jr"53vBkdO>͛7O>: !䅨pJuJܧw~ۅ7Xg!Bqw8XQivu?: !䅨p$=b|Yȩi3bӷBAPO@CLqYfؽ{Xg! wl\'Q:^.X: ;_ZE6ly/%KXg! wAŒrOS¥MFIwb5N.? ͢4dEF*{bbbt:=ڈ wJsNwk~1cXk6dƗCHL2EPdeemذuBH9$e~ܠA*ZC7{T[kzrԒG3vL:Ĩ2dn?>j 8TgTTڵk-Z4i$q!e-ܓdHT^CRQ<9J2Ex ]gND@v;w=ǜo"$3g\vO:խ[7q!RvT=x`'Nŝ>}kРd8qGX$'+9+c~^nj7ȠK1\Ƭ[*ڵkױcG^HBj;wn՝;w3gN7nܥK3gݻw?e&벪uJt-5ݭ%}=8}"G:N*v|KR9>ݻw,!é}UV)2Od~‘UpX\*+ffCmeCH% 6h4.[uBȟX(7WWr ruuy 's.bt>`X!1nAa-ٰuB*ٽŬBBT*/VT* p?2]Y,5PF%pu{k#JV?!MZ]TWYOu'sIwTu""ćϰNCH%|^z9Y!Y(܇:c _}O&Mu֤Iz} H7pO.0NJr;#ܧ3YCb-y.oty1Hh׮]AMwBj ;~76 O7 -[ܳg7|cjtL<Ә{RGE X~Y$V:t?bJ0ܹsgJJ ,aÆ=:q͛ϝ;vڵHJq†EH,q WpXDDΎОUw68Xkz~ҥBk w3|}}7oܠA///*ܫ`HQh,8: aW^Y!*r|.]ZRR:!ĺƍT*ޡ~~~...qㆭg.rp ĵCt'qIQXOtuB5eTho: !Ċ?oݺ޽{ e2Y``Bj[3+LTy@@uܫ`H^`?uu"$Ҁz=8O` 2tDB~ȑ>I&ݾ}[%%%i;wL0ׯ_OVR#̅{bD{eUMݥ&zT[TD ٫yI|D_~t,u UVWX"DM\?,--5$āC:>ΧVQP)'7RӝT 'yL@wUqJΝ[n `ѢERY(Ss-y(%r1n6MT_kګW0.[lIOOg:B~}OOOWWrw SBرPi纻<ϟ?@VlHuhA5t?IMwRI=d0o{g״ibqrr={Xg!Pǎ111 8qDVVYYY'N8nԩJj|j|pz'c3vҤ;N,VM~ Wr7q~xbY,M6]|B WRTV߿_PDGG'+yEz>Tyҝː*7 r gcv8Xf>zĉ[nBHeQFO81((H,FXܰa &M2)IWOo8Z,S=nRE2vE,|jLjbo #G|g}:߻wuB y}ܹ>;o<q"h.-!=:uƢqhܩc[9?2CHE`2Yg!nP߿յg=<<<==n~@3q!b>^ˌ3䰎Cc›"'c~aκݬRD{XjNG۴ {ppFh4>k~q6FlukX/T?},DxnJ _(:!4i\.ڴi,! 6m?[x?>VZ(y鸇% K0\͐^:dK.exuB^+** EXg!Pǎ111 8qDVVYYY'N8nԩJjV3)tj.Ux-8{#!9: !1QzYg!P7mt "666<<\VKRZ~Bf)Pݭq2E ws92,{89KRX!#k@tz߆^aaaؑ F?qĠ Xl4bqÆ 'L7e;$6L%p':5G//7NORnoҬRs}鬳R'X.$$$ttĕ+W:T(1'gCG{L@wqy#FT+VBH`UT*J6JC,Dmt.] >|Ϟ=Gx|8F^Jд}x7}krp }15UۍR-ɓ'Yg!UpoYR>h ??&Ms\3v۷oaF/zSFKC2{ąC? ܤ*TXP~j۬RVV:utܹŋуuB^Ne ;vCmXEqjj&Ut3ؒԽz5{)%̊ZpSRϝ;O?%''CKlk.&9Hٌ5qK ]kԙ&y =}А'};~Y!ҧKᩋ%mcNJeذa>>>iiiK./X!%T3A*EnxL M<$21Wj/~<<cM+Oeꭎcǩ'::!"&OsŊd2Dl*HOOTęϧFSmwg,W oa}r:85eT^-!y y͛YrT;?9l5-}ɓ'ۗT~}qyX;vϞ={mX_fCBp@k/)k"PQ~9}.8D8H5e$D)?e4hh\l,$,&iʕ 4hذa.]"""$$$kn͚5T3!yrWF_ t:j'<7Ј:y@eDwS2X!1T:yd111%%%2P'1WS8yFK!%Ĺeke{R~4NTP|+yqy^zC dYyX(/^ ૯:uT*}8q_~= 4"yɯ\ θPEAnWi~|NJ9qFishܭ=u9y\pˬ"x k׮I&l>}oޤK07kpY[s;_U?{ӷY!1nŤeR[tڵEYg!D,ώ?8L#l@)+4$kO|r;E6BL>v(ruB6mM6BY([lh}655Uј&v&8V: !fp :x`Ϝ9 ((hNڸq]?&2x2-s% w{{եmKe;[SjHR~M糖nKaYf=zmu|sjNn߾7gǎ?^z>$)nM+d %􆦽;AI2JkBTImX!x266޽{"T wr|Μ9Ǐ^8w_|T*>P8D8D=mp\i=GY!L&ܺu+,}|M쓕<wB!p)eh+t"qqrzฒ{ΰCΝ;l%KXg!Dx,w4hիW4h[ׇ߸qlyOnNU}=4zc"|CMaSĹ{ { 06m47obR.o۶mkcbb=bl":xR_I+5. FܤQe&?`ǸbwW8k}H{w\]]u:իYg!D`.J7n;XvmaaS[9mq(273i̝^>r?oܐu"l"'Gդ( O]diNNNcǎm B$)㘘}hΝO:u>}L0٧&Mdt6{9J~I.i%՗!p&}.m֙u""`a^op5{V!b7։H5mڴ~!11"e w}\p… >B 'g%tJ}bA 6m];\yKVPH:0Qqmʭs&CꮦMѣ/-'NʅѣLLL baĨ Ul~rZ=[eR<{ ەfogs쯺+b*Z49;NDDGG׬" G* z[^bssr F?!^zް>y9,DT9(٫B.X?gtﰎC"GGGԥK6]s8k%p^[$v f22\Ѭx>k&!LL:U$%%%߿uBj5*܅JΙku8G{"QYo 9':8SM}J YNC>}Xx1,jT ^wR MڢWS3G u"lR?o*}uRM6 #GXg!]̫SEPӽ&ĩ%b3vCOS\?9Я :i@=sc֊e„ ?rss7l0ydqvC*d2'h^^އ~بQ#\7nܸ*N~~~k";v)}6̭ )kh;3L!7}tn:s ,Fe wM:ٳ4hpҥnݺ1`0X:ǎ[lrړP3 p:#&: SAR]ІBhhhXXh/$!/PpWWM#n۶ݻwDƍo߾;..nVNAAĉm՞vs>@@9}NQJJ[8rN,:h^'O\xq6m>j&vŋȫWtj~ɟθp*_gqSنJuĜxѣ;"X'"&v{`G;vh:+Ok׮3gܸqe˖R[q}ӻJK_j'NH$WJL"բ.铟{vW8DcToK)~:>>/y%&&v#Mlwy…ŊI9K;nޣ[dBv︷D{-&O%h8u"xo! ORt83fV]f , wɴr 4lذK.ڵkf;T=!!㉉~}7oޔ޴Zgf>j]ġ[$2Q{5snM tqЪe:e̘1.$6PQ.{?GR5EzA0oo~N)-*JRIKLk.x8N5u[| 4>}:qbZBŋ+%KtxFۧRcǎ4bTTB8MwV{n܄gprr2P1 w{n1 R#!M^b1S^xTV{ ,c›j:BBBAGT yBnW_M:٭,,X`ӈQQQFqذaƍKJJjٲe~_~駟ݻצyjFb(^8zejv<lK1T:>}8;;߼yomܸ}}}CBBT*֯_ȅ _;o9p"&So4P8`'WX ̐2Rl*,ʌ;&ٰaC: !Y( =<<mq6'*ҥKs̩_~BBZ7nիW_}U[Lq`qгB!s MjNb1~Uptq\<&zyqO,O<@LLVe,-[h4祦j4-Z XY|ͽ{JKKSSSW^ݠA2E*xf͚<_X2lE`X{9Z{G2]װlM]C3^o59?4ۼqCȤI EnnMXg!1 {XXs>>@hh‘98:FVnp~ QR9ޥRT=L% 5Z=bUB,gV*k׮-**p޽{cjO?KTRÎ;> ҵL9Opکr؎Q=}8lX<9zڵ3gΰBK ؠ&((hNڸqcW rXvf*)4-#ut`KʚLTC]nW u Сa yY9SNoozӳcǎ~i|||^lm@ CN .UW>fHy, FqKtߵkWJ }H.˅;\>gΜǏ?zHgdd;w/P*G*oCƛ= zNb s(uTu"x\1bQ;}||zҥKYg! >D"eR rsǝ`w 4-#`GںԒG7i67=NJ$XxBL&VXQZJgHep?~{~zڟZ8is2RSĩL<7rO)ub؁{'JҶm: !lX(;uѣGG3a„'OLt[fvYD#Iy[4-# Ä{E3vC=gdRCzfjElwȐ!~YaB~ه.X_/,,\zu=߽{>ɋ0? i tf-zyFؓ徎HuI}KC^f#˥KXg!ˇS?'%% KJJ/7nܩS˗!(yéfbhM "Wԫ' ~8#0U՚z%C)SU  ǭGoh~(nuډ޽ ׯ_?~xq* /)srr@?P҉PQ(s tyt*GM8]rqPu(fG],ed!aS5ڝ$e9Wډ]Q(}YhիGq/)i3/y>j׮ h)fȳd|"=]DD< p71S +=v"$px $b9'7}VwjK,IHH8xN;B%%KsvJ0 *2!`#wh_Fv Z{!Am.G#OZoIkeVQUG!ac]THVw;& vh'Bv%$$uևZ|90N#"P0 I|,?:vylKZfH FE {a}l8XB>}bW@;dȐ?jժ4*gw@22w_k3!@ qkk-0x%z[n=ۋ|y9}f^+wiӦֽxr/)M&o`6uѣQ s>oB䜾*=Ϗٗv$xD,ыD'q;$zǾkؾ}{j*^!K Yfv풽BhhhVVVhhhFXGeldU WG i+s8zvuq\4'睿F;+=zpqq6l],+,*^-<<|ٲe%=#qQdp,Nkwt S_$ǔE;<ɪU5;MBB۷/^d2юP1zI>qDK.WnڴiF(댻mǭCerњUc;tUM&,fHxܝuVYӗm8 Ap,vٺ&af@."H`=6wgp;Oxz[cHw DwAiA#88M6SȾPڵ5[exSW_}SnsNp#x/H; z+54hwFݡ ZV  {Q8q6l={˴ T\ ۷o/ns΅ ޽~≇ 2V\Yw>юJRb&h>Z8bD;6/>݊1 ܓhAv/tk׮bQ@m6غuѣGի.]z׮]ǎ~hhɓ'K$'z9'ߠe\L`)츃GTA- [8ޞIE{cb28d2빻իW x CrC@㏯\bX.\XbEI$E/brm|*t k)xD.tSZbk-PrhA>+ i6|@A8{޽xG/CYYYnnn,Zh4[تU+a~WzQ󯎻-r66큈lc8#yEX֐$s `w> TOzz{ȪV NP,{zz^tX#א"&rmoTM1<-&DE%0{PnqIti?ͿxvdigA+p/WNo? rrr:|&## )Վ;|^V.HډuzC)o^ sf* }SZ&8H֭[fMXd ,cCA5ol6BUv۷߿?""d2UXDS1 jTb6\nhDTtK _8ÐF;8B(X吠 ƥR',<1CoN^SÿfLZ>LZ DH*Vظq'O.] wd^,7mD%zc 96_JOeo=9Vv$@ KnLΧ+DH\l(mR &.*ډP >ɓǎ{niAmQD @ wUIn~lGFqлTǵa|~̉~L$Tv"$`c=Gy2f9[l!@>zZ Yt5khAmp*>ߏ=jaqBEp>ȿqH` ,\OE I a=}ho8H^%u֌ qz[eÆ AAAoٲ%֨QcXSg->p>p;7,aukh͸AyN|{nFӎw...Zzk$BV@a뗐 Hd2u޽{1s$e8/9q.qDT>a xN"[ڣ$,,jq )~UF!a+p駟V\)JWZ۠A㡡vww_lɓ'?'z%y.;~&eLv(D^@@NqYܜN[773|pHdvJʕ+`޼y_H$z8!UV/^\)uG-6ӇnMx){䜧 1K Dw+:{q o-ZD; Bo,Z?c͛+[nC0TOPD SE{g넑UR})+4Qv$l򚕝Z7~^C;#G+W~wYzsߣFd  C@ܟR^$aM[tO,a![tgkL8ފ뗟Jʆϧ/jJϢ OڵԩtGW@^r崴ۧOUX"*w+tO[o}RqNC&Ji!eգf47GXsTIiդ-鄊t?tPll,, Zjqc_LP!Y ,tg%#:ˮ4nȂNܑv$l@>a i߾}PPl^l, #G:99mݺe˖GQF۷ok8OA;6Ȫ;ORhAӯ_?'' 6΂Л(zݹsg5OOϺuN<9&&YfqϰlW~VVkkL{ӁWzx,›W?B;2B7%XmޏMw{,0=i-8H؜Z~}>:v$$cƌ+W>}v9,YdܸqóܹSVȑ#&dCX;a1E gmlvUM2rN`Q|S}]3H{~J;0%?M[w8YE;5k֭[ܹs-裏hA^.]4rHP(jժUV-BsvСC&85j >Jt ð); dx6pwڮ `ϓWs.ҎH^c12)#;m&^/QFÇigA^,ܣvqqq.\pB\\\:ufΜy3gz띩?|%Kh$G`)CPܟ r=nNw;G(>aC-^aޜǐ/hAѮ]ŲxbY* ۷o󽼼xyy͛7:v3 N3wtP H5ヒ;퟈M{-2 i!תi9t2uq0,;bغukzz:8֋%x||<TZVR5k,Xl*w+_%{rXs]kC(+`r'BouRzGD1)%}65lС$%%e T(8n'<wjJd,›5E]׆t' $,ЭoGȿW ڵ+,\7{HpBqr1}(`_ykC&p0yhAjZOմdI{.8H"""!;|0, w;aqW[,z UQɘ|r.,*ĕsq1zsn};JŒx)TXE`Y*KnN5LsǭƏΓ"tŏcy ZC9w{++B\^E%sc'>>\0j|XdiYPHyluu>#v(dF}3g\pvڴ :䅡7;a/طo̙3_/ơ'>vxyTK[G9f5]iA%'ۘ`\!Y:),CE;*ݽؔixYQ>kըQի;vܳg,ܼys˖-{_vfz)! m$ |*2j_u>5|+!\Dn#y0!ǘ(nİ9JΉv($Hrn}:dߝw/?ٴ/b߾}qqq!!q]/;lŒ"ZHyW{e4`XӒIK#DŽ38ބy}}\Ajh'BSN'N|ŋ/_N;BS@XIuUF`vTF0Ϳ| ^̄ޔ{.d|F;]Ǎ16nܘg~,U5oqP̿5! RݹVwpC}ySJ q^b]-yԨx*z=??ժU JX/d0J’J;*kL?"⌉KU ŀ`ŊZ-~ҋl:bUQ뚧}Bjݹ{ 8XqPI ofKv+ :T*m޼v^ w˲f``5՚9>&G_'[RUƔM8l9睻J;Q>>>ݻw Rd?p f;ȘZ*8vTYAY~Sk9i'BB3i+iA6*""a~v^ wQ}v{j} ڕU3~WT4 ,5ܠ aQ}8^oHZkK-[m۶0|Yz ,퇈w'&,C^qm<ꈤld_7,C;$F7IMY FzDʸq˧Nx9X?kn_wxCԥ_MǮ#RrNCfxt⸙Iډ |=GߋXvdj׮ݠA7o, wRc Ɏfܟ@*er jY"ΜƜ0nz!v"$Hj\^t?~igA`nWuEʌ IxJAyKJ `Ӳ!ArnL٠&dn;zvds>J*<E; B wrd_3u,'#1Z 2UH ) bM9!ArLX,i7Ўl !dرwXqv4g=j6`VglkxJq $fO 5!!"EKZgg?;w 4 .`nW9x>ժ׷V]e 0)+ ᢸZs>DHxX'qDlJIOA草DشiSJJ 8=]X!h߁PX|&?V@V٩FQ ach'B#ݍXvd[.]J; B`nWX_;sGf6pO {aա\O0$&HO-"תlos\>l0XjZ ͐Mp,<ߎ;4|.sSqcڰg 抇sLv"$ސ2w)=v"d+F%H7oL; BX۝ uNKpY )L='S3gN=ZE`UJ _3 93'u;TR=z)lq7sZ3ZxKsaXy{GX0)m&`G;eٸ]v΂Ə!~Z̔Np$VSvD_+}sk-R]@{vhA6!,,CH@p7pʚJ`YunAm; WrcހqiS 0aBȭ[:D; rhXۡ`8śY&Φلޝ=3\Qԫ[_vD_*UZl gϦ94,PXz}dBm;\͹;*2B~~~`nʊ81!pǁe@."+?vUyl3SEU>>lvlM\}8{ډ`(Ts/X/Uu4dܸqaÆDqCnޟz`0W. #Qb.8(XUx=ݾ3iXBQhМ:AqPIׯ^9,YEn%'!~UX8HI\/?OXlMC!apJC9pB}$8Dd1c~D;X۳Jb1;zjL-z`w zGy YgYC!apvUv@qP0`Næ;*X۳b1dZ,y X˚8&~YZY5+Ihi0[m[u:A w{TN CSÔ3jqxX$ #M7̎ DHk 7o4]M\>vX;*~X۹jb1\Î4 +vd* t\4Sz&TJ=ejRg2&&NJtG w;WU"{ Օ C\2^.H:)~2kE_z}dd$,>anޗH/cʭn*fItiϙ3#@: C؇Y1clJ9ϑ}e W@wRt„ iӦҎ_~ZQ $Ø_sp;z-FOLc 7ԗi'BNAe!݁yxv"TWti7΂υaʈDpQF srw:U&q{,egT E!ڿ-D=H$'O[=\!ԒJ W_Uٳ6[w]C;iBe,eg҆p~yQ܌<3CԺk6wZmH$:u*ܹ۴ C@*bG^!s= 0tx\^G̈F|݅M̘zu]:͙Kw`nǾrY,iӦ΂ AFH11dMs*^"3Od\uO} 3TY1co^ 4.6ͩvcnX۷ʕ+ P*3:"@."3?XdLJvkKgK;@;i=;r=sX۫;VZyYU 999cƌ H$~~~z-MMM4h駟OŚ5I d~hedp2[ZY/Us$fDM _*/k-{l^`|zwP5GOgnO;*Fcǎ={vd'Q׬Ysqqqbɓ'[l^ gϞ-_ի\"H:Ժu޽{(+y, [nk`s |2e` ,rM]Fp!؉tڡ"Ľge:>|*k+uuI&΂0 #G]t)'''::>R՝;w6l4٨Q^Z&m޼yݺu%[4J7s/YzΌO*PDDܯ.0}`f;C![Enʆ ɬ8ae"##ٳG{%8pg&<<>pu/v{s玳}@*8p޼y0ej7I2.,n{w7t?;*ǞmGOQ*){~씓7 _*?r@kԨQf`ҤIUrb"}Ϟ=zf͚UTyGY.]={BҒ'-+H 1lZI˷|y(#kwT頻N)_d%xǑw2x RQ&?]$\vmϞ= @~)hѢ 'p@Ŋ_x\TT*x;jdԕJ$EywѶn&5šYXy'ϵG1 C!0CŠݩYg}SN}|/B@ធeʔyq#^8wK.uݻV 88]fyeR8!E¤[kwT(F: 0s aicgFGʽF;I 9LU>>l'N<#:bP Y(\1 `yӼUgC@@.\տf*$bˉ*Tp#GXO>bth4 _$>>{-[v۶m5k,9sfa*yYK')&_ oaO4fGV5wm  ߶&[tucg۝l\c`W`0׊ =zӎ͛7ׯ_lٲwZ /<*}Ϟ=UV?,Xpƍbm+ԕ|L9s۹9sZhO' k|%&޸-qG !;;lSӖox=pww;v,X@0{ll @K>ܵkWZ]~GDDŎ>Z!ߴ:鍕R>u/.2Z'wF_˥>\>;#:!PsM 쥴yv& >OM:v$H(ܛ4iǏq# 6|ͯMLLҥbO8h;d^L*#Dp[3;ں[xr&g DH0l@`Dw%"cƞx%9:B\{w9_:=oeV'g̘۶mq8HxPwI*;wM@߾}_kۼ֭[\ i&<g [X|)wA6{Is>aK⪵,8vk6ʭkx=^+xzz,˸qhgA#ٹSNf?tWսzzQʕ??sܹ'O?cSRR_66r9\:"nCgLX N+K=}vzGsc^H3WXݝ,;g8vX!W!@FFF:ubbb!e˖}drqq9sLJ4XL&T*5_7V++'(g'q1w5 Tk*bd;~8+i7pDѷGSp ;b{O:=ְaӧOWV n ;U/]ѫWk׮jW펌S`^Iol[K5C'dCr T[L.3Wo;lXkRlI= .1ei !׮]hgAB"~FORK=>ƅQ'u&MƬmZOV"[t;=yE_B;!o-!"OډЛ֭Ν;߿/hAwQqdن2)b?Kqs1Z˗?f~Ө$we<ǘ0n[L<~z_{׌LjJH0v""##%Ǐ,YB;  ,Zg.F ޙj}=B]8q"{e ~gY(,ud̸DڡV,=m(RӖNPppСC`ܹ aݡՓJ9T{?1r_jk|hd[ aicgDq|!IX%O ۴74i$wwwZ=}tY0`/ 8[fs+.S<<*!l;B#=^~[ejID_Y#Eސ.%ډЛpqq6m_Ν; ѵWȕ =xӻ1yj='[iv?_KSPф+(kCK}yr.l(q?o6-۪>+DM 8lٲ&iѴ )BsbIͷt;.ABE#cJ8Z*զlzXE|fVC֎C/B#͛G=v8aᎠRl6RX=̕3s1Z+*\>Yvu;Sf\=eVw<}wm۶7JqlK6j:s>h,wđwTT"q!u^]O6,8HWլh~:og,^e;wYvdӰpG} hF!"ޮO+*Giey=*.?v.Døҩ%hJ̜ AʕӧO̤.,@L kԸt5ZHt5R|T8n3Fڹe.Z y?v.DY=1Dl|tB}lDLk֬ՐUpGϼ'5I`yN.V_iޭN=ҷً"ћM _ƻC$]B?f5+LʪKWnN kСe˖5#F(,?:@x'݋%4p^E."O4.3]DZTdt>_>yhB4ID=x!%j]/N E,/Z~嗃ҎlDr,QN6\3nQr]/ o^ Qú|f _ rH[7 fXhX,]p!,`O uT`:Wmo rdV\$LZOϩ fl" M )flx,ό~Gwjf5>l]ٲesy18ȶ`^4IdHŲ _KVP5|<yeDgz¶|Fex<5ސgiB0[ߎn>BNX`LJ `ʔ)J=z4,ȶ`^2T&/ΈcS;?uQS2p/7,yGE#Mk-\73݀rjkWD"6?Pw>DuT*UTTٳ_,KtW*Js#shgq8,5?tf>ጤ\3\Hx-"ˮ\n^|رNH^7#XWgK^~ʬU'_|Q~}2d'a^BLD'8Ր4Tѥ.$ZMs 7"r4nHWtFZWͨ<΂e۪*U 4DqMDg9!f\YW@Ξmd[Şވu79lu_o?O7S<) H%^;m+N\hJI ̙3}||4ȑ#igA6 wJ8v vkuj\$̒.+?vu1jeo9|;zA)}%cjSΆˢLJ= ,B\{%XO/݉  iAa^KXl[j"Ӷ…3M0ijvٕ\@tޝѣv=٨59E٨aҜI"i'B/ѭ[FСC.U;z EL#iQvG*e4uY[;Ȉ]8\F +i&|>7q(rQcA~ٜvWݼ?ͳ-+W☘sҎ(cON;{ӧH;;2zMDq9c9Yn|g}Wk0ռChGC)c #Ϳg\͹pWs#P,r F!S~T˘bLJ6&q$+LNZ׋}.'׫e.*+'Rj@K3-?I/MPs=Bwg(oUTTh^ghp>隨dYO ؾ}͛3]14SWe_]Q_'3]5J`\xmY!xUwC:hzK}wj'ؓV7}d?麨D^2{s9D7ȯ^-wBLCe hDcgt:$Ra_zOB |@W2^BԞgۿ:y@tiYw+B,~\뙮 :9uBLɩZ% 5h&KΆ, >z9iƠ1?\P#ւ?,Y#'ۺ;5QcZc)rO:2eV^JF3]uT':SzD#% u u$FHdc7-!+ʓ[pL9d fBt]p/MӼ2]u7NPq: 5nLtEҝ3i8;lDT]߫x|ХVwvJ{ŠoL*}|N&-3CFw2N{rrrLܽ{ÇツLWD]P߻J";!.>I8厷>sg8>?.'zY秊jе ЖIթ |_G7A1zo3]to~s֬Yi~ _0Mں6 1GuGц,kDCO') CP _1ӔxN'YRz܀6BZ[cL)QEQ.ͫWObԡSV,!ͩ?CYp@ ի3]U&L!\'ow*]dG|*v0KlCЈhĀ9!8;c59k|jGŒu*m;CoX+8ܶK90v)'.gi8:th=tEEU?r, á0LjjmcV4!ͭ ~PIJ382h~,q4Oz o)n1GTFS+3.6Qهl}Z8;v8GfEb) S߲ AK9(Ξ8j37[EEcݻi&ϗ(h 5my5GTB/Czrn!A7F69D];|߫-dѓ,z00.G޷}Azgou8EMGE?Of<:>!Ո{Z-.ESq'w> }ZZ[*VwOXjVEI}SLbKkf,.֓/=jinzK1yunAfϞ /tww;Ε+Wf"di3yϠݪkYG5Ihv6ieV aY9ا:' u,v\^5#Z_Ԍ;;yB!S o]f}Fw 0y2]4RYYnٳg@Zx 8H: Y֮]~xxß:W8hp26f\PA Rgb'P@KX}q?amiQwj^ UYAL]+7]esQ#3c4O1BUm|| -fqv хo,Xsa֭[銦5ܳt)u(*v2+$Y)z]h6Q{q. !?=~*v: bߧiߺeG;3>_V麦D"qu]EEǞ1! L?<̩S2]H&;8؎|U銨wjN5~Qhgg{^uiO5egbw Œ>Խ>e(l+#t՛@Ӵŋ744?~\<3Ξ=?ow#b̭SQG0~=4g|s_tWIkČidOT$3N][ekǏj==t(C|TFo]f }FObQ˧&'˲+Wh4z=didӼ GhwZH?OBwV!-#f G @*s^nkkWd@IZӱFC2 m"Q<{̊&3 K''P~~>۶m;vҥKg̘銦#*eh̥B oWTrQ_>NW~# ܘܐ XW&|~me퉢ǀxcû,bl6G6ﵳLF}X`06WgL5aׯ_{3g!o>*e8% qM57R"AȥUgfmLGԊh3jYyC02t*u-ck*zHڥbBaGhǩ2ctCv~r( ڹV+H:L4M`b@ׯ__: ?Gt9d/X. GoI)nЖ!87l,?7} +mΥ:tv^]8!ORp=ռ=ƀ֗8OւPZ:v=uP]zݙljr\srr.]銦ܳ WTqI!M ZxJQN*PLT-5$[FL9u $l`'e%{ծ]ZS3WȧkGf)尯_η[Xb!b}d7{쎎ӧOرcƍyyyh=Y$!OD^O)b3O-)oR)>R>=K8CP"ȞCSG;s>s F]77zW+SBME… [[[gΜyQ:MC{ܳ qɺ2]#um By\ G|i!tiSJQQ,o'gϞ銦ܳ WˠlF]?$S5\vmy ZIW[ԄAJ ]̇VVVqۏ;V[[;gΜLW4V,C[eNOK?]%t.W[W. #Ej;+ER7@ݱ{Y#yȯ^Q_^'L0Gnjۏ9R__銦k}B!u?U @7cNL;g--JP]^jEC_!4Om=;  ,p.]scpJ j՞Oއlt‡ .\]__ݞ銦&ܳ 7[)Xer]m2{, vhѥi{THx4O.SW+gd[̌ wnpF] +;zK9r-hiӦ^z S q!9v Y݈aCQ( Cߧ<AJl4ҟ99h)FH EڸL};ޖB$aBpJ4&No ڧvXϥu;n[6B;NRGΨgRMϻSQQכʕ+3]D'fo¥pM`b!_K)4e h4 pNN~VhX#*Q\Cjg0HqiBeW3Vᝇ»u"CW/ jY08j*rgŝOޱcǚ5k***2]TC'f*3Ƴ>UK߽ErΦLSKZڟG {Ļ*<=>AcCo7'Gd96ױA*Ԇ0NSWΙ(իO8sѲ2:F=YsXw$~Z%c &Z] McE,iBڻg;f,}yVyn]ݐ/e6ˊ+VLו}/^ .\w^Yoܳ m8?.Zt,VE|m[jDwVwU`dޑmS~&ΰկ_ZE7ap4–Ğ5gly>}?sg,cǎ;4O|/"zɩYOr}nNpi1:i^94 (u/̵/bWNmDn$O]LX³nwᔕ1bG_ h"r<:uRA(/'ͯ7HXP] VQQr|͆QW^銦eMCح_Ncp@nn%K2]QVAf*3?$So؋?aTOw`h9wwk{zyC3bau4;ÑG"z~>o{b yFJH,me|#ͣ,zl0+rgC{ܳ _7Sʟc>@+KRf8j2[{{==>-~8s^{.կ?ߧv\+VVjhMFK'K/;Wx>~hvtR>,I_UVe,C{=֫k)͸8ZsHҝTBwXjzC8_Y$(k}MdԸ~Ht߱ȁK-t-_Z^c =@䷯Mw{ykr Vjnnz{9sf+&4gܳyx5R;$vIGtJk5Թ]V5Zj``d%%tE{={aNj)mE$WqI sxRLZ}ڡ>=.}'eaY!?K;)ELo* qcҹ6ƞ |{ԳB]{t}ܩS֭[Fٓ銲 Y) VK|.ìuBvNe~8ӆR҇<"Z\/+~i/lGjN6b2jA͚Z:߹-?o57Ԇ^S~.{KUՅ ܹtf,@{}*!46Eٮ.p(Vx]"摀~8qpQ>8_R$:?%ѓ# :Ə繖Tt2͠6D^~S={!},q?p']yꫯ>iY7ߔe9Mv4gܧnܮ]Ī8v(Ow댚~HGg/ yD*N_:֛NEN5ǹhs,|э'v=w8mCfZм⋟g1wy-[AtE Y)/>Uۭ{U-/vD.Rj9u H~|@?گMR'(_/fxiG4EiO59;zr4,dkl38s.2XᔧvE3lyzPs_|K_"lܸW^8{=>}`t}U]t(ՇRQX.E. O]ID'c~찡YxCqp^. /<"رӱ&1Əys:N;'g[Ö2g}>~;~ܳ Sリv@hZvY* ~ е+20i6Oem2;?gYO3VĎ#9gv,XX g© FlK1w}^X bѿG9^kV"tYg<}饗:3=N]<'4we3'sy~t0z=1̰~v83l4 Iwug~nUY?XlhnSg8O}Lz\OwԦk;RGGA`[yZ8?ӥ w}I.'-,C;uF0>t71*8v.YyζbUbTߵkF=ϖ-[֬Y2,C;)!y0.abu(-tOZKZͰW3$flwޗ{lt+\Q}J-K?u=/?iӦLW4gܩc 0 0 S"[[ɲ[β"mޥ/n9lma3e^"Pb=lUnM{OnN4$5'.[AT,U:<xβoX!-IWWw}9'x"e YwjҲ5fl_}P2,[1Y6S [-a5lְ6߽eZpnU 7[`-6'$Z[Mun[%V*2G|@bĞa[~2(}xp߳g _~鶵* Yw*Xfavfafi^ @X2e,[ƲeEC a|klsD+Jbap1nŖ aPTk|k|[|{Eʥ2)* B/A@-.":z&r{ˋV/bY&i>/X~/Nhp24S.d64l7N3~ `,͗l ˖L1˖L!pӆQq[l Q-lGi]9]l)s.ŔXm0}jw[|[-yaP \6/3 yFw'S#Hsm`>JN͛7nܘnܳ 4cZݦ96>2Eee e erh @\'3jvDΨ5;V]s Ŕ:R'Sgc}1M2A-БjHZ='W((ʥ2L9r)rf +V/AV]<~>O>OK Ywj:451>5{ͧq0L!0L!L3L!m!+jvŬ"{x;bSdl)u2E:.3VOꊙ=1k ik׋JbSdL-v0tʌӰڭtt)mNu@{wrRL,*J ?R3&O?1${ l+gE7 k_ڿ_~ŋgB{BH,+`oZ귬Az2L>2(a&a  TKn޸cVO[}q;f^qӨqn;";S8 Lq ѱ֥w)m]J{W-brH%REXQ,e,d3RMfO$;z2^mh Kn< r#EIAI+Ik i`G?($_ _BG讱RJzծ{ >>@(*KŢP,1_B C=ۜ:z6uPDA[+-)-3a<=˲Axꩧ8n4g)jRG,8};l a `ؼU7tw]rąAg'$ k( k(i pNT,S=Xgcrd+#(Wf2ʳ1> $Wbf4vԞ>_SW,K|@(\.vMћ:ѨoZ%Q\q`4^Y g=tg?ٖŋ_e; Yw΢,8! cʵ t3~ A Bȉg@0R@J5)?LϪ1%'\$gcI(xE##H>_W{Zπw \@( tϢXB9٤lRNqsb}8NWǗSԷco|[E&qCehp(i}!c>'*]N4?rBDyA'BNd'+b $pj4p+`n3+\I'19RI/3^y%WXPQ~@ kZ$,3<0O( stI;`v))'np^x!ήJsjܛ}jjj~_fpCehp(0 b1#G1b8q (1L{apLЉA'B.M'C JY)RP x(kD!j_(=x##8z#;tZ`P {=:\!?W( r\`rDJ9{A=}^9szg׈k5uQU}0'܄F=NQMf8It,1݈a#8v t hG!;C8@ AڽsY+QpPSxDNY#*Q@ ^ yD_B㷽${7tauP jQ^ jW&1r._ 9|~#8?,} JC4 G9C5C5'/~8Χz+_ 7Yw Yw&3 8Ʊ48A1Bwɇ B.m9 !@ ADX!ADPtJ"(82C?a^22}XG@d[!'Đ? iZ?{=AGC<\b3 !zGЬ4hMXQ/}j+̝׿!@mm>{NĿucehp(j*!A$c8I8ɱğ8AH"(֝:½ !2NdEs(U2X!GTVq0z #i2 Xt-% \; IDATjW2 _am d񕶋J|s>>';F,w -jcvϸBmXW%U򕥐W"ȓO>4M`޼y7hp24SE%0I8IH8s$1LR7S `GHPPFЁB AB'DveDҧv:-"c!>'=HXa Gujr)ɏfzt %"\t Hf򗅉2>8 A}hX|fxsx?܄^\ԪkK<'T BmPSθoLOccWշ~g>Oݐo~ehp(>!)Bb$ Na"$q%}8 &)B$0N${[^t !ABBB<"dy :!`/ 0$xpT% GTדLxKDN~4;N9yK@:'*VPPLG>ԇBpJBXh^s=&joP=ߦk.ŒraF_Q!ߺuO<7aD=NQuihD1QVIa'DD!8BJH& !)BdQk t (wY0@! FcGP4DTHDQG4HTQ G5HD1 G4ׯW6yGN9y:IRTCA}hD!}8d nj`!rܜy} 'YvCЮ5wjD7.}2 WZ T e|U)WRk>,{ꩧz{{,O<;!}ׇ,C;EQT6cX#D%$I`!)L4B035B$'S5=o!yR !AA!D,s']Dt'ѱ1~L#14Ɖq#;s#]c7p~r~ W,WkZ:wCˋR(~Ȳc=ㅅWE{(jR b"D'$I`bb8#IL n߫<Xr K6WO*,kj84:i8:@t Ȟ\A._^ėqeE|Y☉Dų>;00@mܸ_5kqehp(&&8&8mBcB D1c+>L D V4`C`Gѓ8 ',;DIB@F=E^N$9i>I$iFŻS22Y(#%J,tB6:(PbG:ޕ1fdFȈݎ7 d].d]|r.vn;d! 0aW;z^sx $+)ˊG;?3B 6GH+J* iUx@RE! T6 (%@%s$$Lfx<3w{smhXpoQTTd?m=,Y!.6J9w;W-y ADR͇rr*WkàT.9sږ69QaXs[m6C:h0BLGc.4jt)e4TSRݤfcwUۨ\ΩjTMS:M"ClQF݈1C0-Ĉy8SRUz9QgSծJrFڣbqюhGllH\=6펈8z:±1cCHIryr=B/KVasٴiݻ׿~i!k@EZFARn%NQJeHDyԙ)J5w69LjSycWa7 %p1D$TnSJBLeTK\r2ѭM&Sĭi"Fi[DDsmJDD\h"F݈ Q!aw9p ʨ7VT5n{p: ۛvm6d7ƙ-5CHGt֭)$Qih(oV'nunu:[H!"zL:n?} T22 ;:ujVVVXXOm;wG/›fR}z1MS%T"_uLnrT+SDJ꽶DTnvHSIZgf?Q9qsIմ+wHq5t:3y^bm$)zlfM܆eMC^g6]v[t aJt\MG#ءq3nz(tǏʟ=_L~[niӯ)Žg]#: 664XG LQ55IM%5Uݼa`sD<3}'1̌oeq̟`E"γ;Nf^[T.s(qG;{T{Xw 9 <N%u<u&{}}k˖-{>|xUUĉ]Sח@ WTT,Y0e˖ 4f-]4))i޽˖-S_ xh|{w%"~  5kDd-nj#"~C~1۷ov~  ѣGE$>>E%\""  i)//G_ xhpw0o޼@.w"Gp4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4t]v~Z={JKKCCC|ɓ']:_MMt!|:0"""]:_SSSFF^VV%/xN8#mǏt V UYYѣG]bÆ [xJJ#<%J),~5{QFt#G]6U͝;+ t!|?**WU Ate;;;;;;;;;;;;G @GׯgϞ~1|@ꪫ.@W`P;"""U/4]m TkpL4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p4@p.} KII)**ꂾ A{/Z-ڮVԨNgdd$$$\r%#G\|JE2Nzӣ 0iҤ}Zt XtiۻhFyyyZZ5.cbbo׾ пK.ҽ{wa=1M G+:q{D$22u|\4~iKhhhxxOST2;vHLLxrdɒ6_(Ep"&M^zmٲvݻw"ҷo_鿾ȑ#8?~DD{] :kꫯ$×]p8>t;"b6o5\:<]w5J'xBDʺeyGU4E$>> vȜ9sX7fSNꫥYuRqyyy=ݻw?Eev!"qqq'O6 ?U:kPRRb: _wڵKsrryo?%M|B2df뮻Dd*mbŊ ޽uR`f=zt1cƈȷ~맾 О={Dki-"R.YfC >}zWepWZ%"YYYvݻo^~N׋nn444tbӧoov%J9]8v예۷Erq?Ee^y?{nJDάWSS3eʔ?EC`2%%%"r7:t/#;x`ټy#GFӹb 4h? FX###Q(`qQon^YYʦ/}| 4so+"CѣGW}{g8/zD0ep9""III+W뮻222{=4Y5ϗ9rqjjjƎaÆ]v?~۷>WD){6񴔗/@Љ'K1k֬*~΍7ޘ a FzjUYYf͚iӦu]~l۶m:thtt^[PP0dȐuֵ8 z%J܃u"_o3MMM.Yd _XXp8>aÆunh_Fvʔ)c…!!!+Z/^g?ݻjjj/^vZ/˗^ZD"""+";w\hR/Kºӧ[WTTXZK_tN;xGN<ٯ_7>^*ŗ}gy+ x2֞8fgggϛ7OD򗿸\.Սed'NX]]`~_~eBBs=K/l.Q,߇79E}n 6ļ;wΑB`uxdno_s53gw_V[Ňz(,,̻}ĉa޽_1Ɨ}gRf͚2eu6aFꫯ|XDy}: txdDpJO>:\sugA+Z F){9qℕ ߿;wz͚Ot/ct:{7} _VƗ=p@Ϟ=466ֳ"?3.Zs=b`bbq㚚sӦip~w<3ׯ7n8wVz ><))櫯n_tddd\~"r^xAD}Қŋ.]t%IDAT}T8qĒI&>|UUUs 뭷-Z@ wzonGFF> ,^{\v/"yyy"2ydƍRw}iBBBwy޽ۗ}QF}"ҭ[z7^z 3.xw?O>-ٽzغuk+2.j9xMtX»RNDn/SO=ewԩSCCCwVN{.E˞={޽+nWJH>}ZL {"駟Z=>󢢢țnaL0ADy2-CDV淈bÇ/[̻ź4͜9sZ\=fŊiii&L "R__}v4.\8uTMHH8owk̇~e˖nN~Ȝ9s99΂#FXO\W_-"Vz-77711qڵ'O=R+W'| .">Cbbb~1c̘1-s19T9s?⋷z6lСCMݻw~RSSǷzm111~iBB'|;z'N7;\p3f̦Mcbb222Ν|򨨨t7 #;;[D¬i3233W\9bĈئ9s|7/}k\op榥UVVN mEAD b#eըQiĺ$IlkbKhؒhQLFDCSG:3sL@y}'92,lSO`;@p0f w3`;@p0f w3`;@p0f w3`XKzS_=zt$>|XTT`S*==~Bp7'nnn={4uЬbdq0f w3`;@p0f w3`x. :wlB+Ѧ.̂Vq+[[ne=r zKa.88޾SNѷon BBBV^)d2Y\\\=_Y6 rvvtҼy|}}322\\\]dΆ#%91v4e-@֟孩|89hju?/5 "|}ѦM>|yGmܸGIII>WLLLZZZ6m.]P(RSS\RR2i$NY6ۜo=q1LMW^zueLy)93v$oav^ -;}Aq.tRٙF tƵFXhGٿ"z饗{;w\x_~8\.?p0Dp.]O0xRqY";j[Tyo\dK~Nچ,㥗^:rݻ_~si܉߯VCBBeرDtƝ!NB $b Q 8vRU9W~'5i$hBa```oKHH jF"qx*DJD,˱ EYSC[ecBWWO6ishFhjǏ~N: %77ڷo_ؒZ0wsla5D!ZiXuK=Z,?z} MdQQQ~^A"*++?BQA1Riv'''"R(*3gm)[DDDSJ$7subZc~θ<€e˖5/ʕ+fͺ}L&4iҹsh\6SSddi:w\{^ْ?׃*8B"8Us!>f&$HܵC%5Y-Y0fPw5uJ>͛~;wN2vqʕEm߾k֬)..~Ba-!aVU2GIˌqnꨀ# Ps,]<iXIvk*?r%z׻y v%''ggggpgΜ={vv&OllDɓ꫅ vСZgԋ{QQQ/ˍ_xxx4ldnƂDʨ~{9yWұ"^VBNj>Ztl_I ,00000Uފ+Xիp>j/^X37۷+cz%cH$55@p7c}l圉oNupp8qΝ;ǏO?yxx̚5+++k׮Ͼ`yQ9HHM&9{|c7r)$?Z̴%@%KѴiӾ۷o_~…3fXxqOvڀ7nxyyuvyL$<y.ѯ4ruӿ>.Ng|mN~V\lCgQ|'N8p… CCCU޸c qY ]( .KtlQJA[EyUSW`)[LD4DahTK]-cr=7QySW`-7GMDw}k<[v6ȕ:x#Ж -{O"t(vw:%^Gb;IJ7uE-%&v SW1B7lzŻM]@˅n K%r%*t Oij ,ɀ^D?U+wСPggg''رc,![7-%bcK3GDG[j#qn;7M(ځдX]~}pp}No߾Ds͝;wܸq eeeJ_~b:ˆn!IADw?_h'&*dE4QɱL]%/>M6=|p͏=ڸqc=}XWXXXZZDbŊѧleLde9Uw ԿŻ >rۿK/_ TT/^l܉4|@D_~mmmy<[o5k,W_5tBtv{nW>-3fH|.2KGٽ{[ׯ߾}{Μ9;Qbbܢk/--m,xNJD:2x `a\ޞ*ߓM!$$$...**jҤID$ i܉N|CryTTsrrwgYYi E͎=""ww'N 6944/l rxsUDGcDNUifpL%u?lFy+(W\5k۷e2٤IΝ;7p@㲙"##Oӹs}aaaOt "!!aÆ SgE@3uha%UG8(Q>Pk#2Um n~3gYnvޚ# lk,͛~;wN2vqʕEm߾k֬)..~Bfc~~>ٳgׯ h4oVbb_^/{ 1r]#$@}4]e^86bZU_z~oiۙ3gfϞݮ]ɓ'"G Ke,[o"b""JЙj#pO6 妮Xe^zq8p>OD{'M>I&xP+Y~Zui8UJZ]3_`\nϞ=Ϟ={UPZ<;;۷foN3f}Uo۶`0x{{C![."3T8X=eC}6|B˞hc d@/]xW|y),Xٳǎ3fq֭z>2.S?p_5,,l֭;vTTqqq-"5k4Z-Y>|-v'"cNJ11FD&"ٍ@DO%frOxx%Khڴi|۷ׯ_p3f,^qboJJJܹT*={R;wĉw:nQ۸pHGDUT_NLDs4N&w"{T؊Po|ɩSpăر)Nf?bbbڶmR|||"##֭[2G#[\ĸ&+]Z(UwUӻHjn.9Ki|/7S`fBCCCCCg.ggk׮]y`xniZq4Dǡ0?52D&PVKCyO)DGv5/W\dj"HZY^گX4-wKC$%r9_[%.Kt<a$ hTb;qߛnizpΗgWahT[02Fƍ ~)=drq l]W)j^5I)Ц[GmBQ};2 [ *%?5D|_2DD =ړVoU_ `-P;>KD٬֫ۊpw!3^HlX1*w $%bEmP׼ &%|msWd1NWmh|S/GDG-f uNDJ>or(t"̯1MWf-9Dݍ:N2u9 qIAD4 frs4[ZcwY.6}i hq0aBca*[JKK_{5www[[[n5,`npDtWϭ=7ȍOtp"qDB]Aq8b-o -￿m۶m|ScQ-S' HR5~T ZX3p=7nhr?LD?ڷoor, e #Tj"|v-'Ϙ;IM]Q(DԻwoSb-S;C,sQ7K!e*I#-L͊aS`- ñcK4Ǵɻ*3<{3vu_z[ljeڵk' }}}L^e;֦MPٷo/Bs2 C"jݺ50ׯ_m]xN[jUǎbԩS322 rW^y{AAASL9}t-_\͝;)<<;f ۷_~=˲Jo۶P(󋎎jQQ":_0Z6!"%z:1N|QSЂT#FXBTgggٳ_~999\2""ĉ999r~{뭷^~ez9uTXLDSNJW øqr??oo߾2zm֧O;v\~&99yϞ=Æ [bEbbbbcc6g˖-NNNvvviii111|#>c"wƍgΜ̿3uT9)npr$d)G5gmFܽË?+ui/n IDATn)0uτr:q}Ǐ9sfРADt͡CWK,!"LtR"ڴiӫzf̘qO_~_~UV>ܸqcV^ݱc˲N:t(0)))999[n]leggGGG,;sUV駟~ǎT9ھ}w}wѝ;wl߶m[^ݻתU+Zwޏ?X*>}zСD_Ξ=СC...l6H2{U\= Y^h~q|)Bu'C}'`N7ʿhsr>h*[Go7P[l1v" ;wҥKoP$eG1{lc ˝ػw/^ژډ7Xr322ܡE+BUcƌt߿[___ ޿rjsnyB[blV#kdž PKWZjp'b}U*0SW͍#y,6u-HcS[c ˲YYYׯ_x'^3j4," 100痖绹j}G)6#fY4ԑ;SFDwtOLNDD |MHWQH~.Īn}Ǐ?󎎎~~~cǎݸq_f\.mӓ d1+^ ݒ %\'[[.D,;مӍ䛺CDvr"j2d۔FqٲeBoFAAŋo߾aUV5fݒq\HADOZa(Y2w#7 *u8Vo[r?mٲE*zzzHWWW뙖-[ַoƝ'668T*O|vڸә#w QA"ډ(@VkLcktbu=w?(22ĉC qpph4k׮_?q988T}_icy7.\0}Ν;O2ŋ8ʓaY5X~Ŋׯ_7u!ϭJ;bΓ=Y ;=mcB욭<*&^qq97d+h6999111{thVǏOIIY`Aݻܼys֭6lhbtGywn+"#iJ+$89\ƛ ;Y<.rd@Ds|gDo\g0|6'h.)-aД?NN<+ |S#y[.I-GCDwuOqtJٿK{bق/՗>5U&n: DGR=+E3Dy2Y0.oO؈Ŋ-L] c![b"U>Dڞ]@ִZxNNµsM]@-s6D2ܤ<&@DD?eY.Cd;MHW"*ܶ__0u9@p|-[ND7TOٟJD\jÙ/q~%`3N*((/Eusё4USWբp^DD+eܭBGrXkxƳeTZ+Z-CD%{Q .ܭB/xUzH?(u;9:`gP~kh ,^XӭZcǎbwԩn .8p /pqh_~8qϝ;0̰a8L&J Ü9sjoavڕQdd$0׮]7nȑ#W^V[*_wƅ Sm!Y2F\;[g゙k)X0`07\.o+*|rn>sΉD~!<<|Μ9Ï6l؁>|N:5dȐ￯V'n:"zהʿHAA9s%IP'OӧϡCT*B8y j  *xqy"VEDWu:2ŸZuKk,Mn=hw",˴cǎ .:uJ&ܼy#??֭jIM&JJJbccE"ƍwaڵkseYv 099K.111Uh3f KKK[b{WPP0w܁VmܹΝ b"()))66m&n-8DӲ}Fown 9/\Yykh_~ehh(0DԩSQrrM6eff9rΝDdccf"K\.]JD-7oD"asΧN Uh lٲ?~ɓ'曀?Zrd̙۶m#?\cܭE CD2Vӳˌ4[-CD\;[W'Qŕ䲳L]@7n\Ֆ"jo=JDd_W_j'N sVeԩU[2nz͚5z~̙o&0;v찱V/-'OnӦ\.|ْߥ3uLzP);G-z?vT^z.շ6C-o*~^Aq@+hL:t9},XЯ_ի =zw^fffhhcl3n-z۸RI_.ϊCpJ"( ?d(_f 4ίMTJ5m:ohLNNNO>|HD)--5^q=LF3~0믿n &Lf//DtMaܭc:Y)\Fw&MWYgp:;2`7I~a;@ñĞȋ<2u!τpIokهꚟnݺED5/w4ŋ~wxyrrr5wk!ghu2&@3<@V p]~ZJ.9+zQhU ߺuZfYxܻ?0999...UoKIIh-{o}K.Λ7Zm;h+ WV[*kLcV= K,COm]ڎT,CdxIHQM/hV"0ʗ4#Fܼys!!!RtԨQ'OܰaC˗/?r / J Ä .\`|]'vԩ͛7;99[_|AD͓dFpɓ]]]CCCccc;fkki`1;S&chtuNtP{4]=Z?[{:!?cS=>B`"##/=&<>> :t%Kx<ޖ-[B (%琁~Wu*n+qT&dYj"N:M] خ]\tsʔ)IIIVóT"`7*!k$ h%hrT[Qӕq$6NQz6tX,na OܭNAT2%jCf.$z;QZcrZ [";"*zսp? wzk__$wԵ@p:D8'߭{/+"#ipu75eO![Nq%ea:F7:\$V7nދc [83J"Jc* n-8g;12X!`Ν0cz]|N6CpFqe*V}ޟj4oջep^L ]YSW`~~U;Ԋg4(OKsn;:;8,NSvu7]fDc7b@_{K:ڛ" sttD-d nziu%V٪>bz{t咾\͆S"*.\ӗtS`aaaaaa-g eBĎD溦gԷbPi S7Aif#N{^Rݮ.^+t6((hʔ)Oy/2qD___{{Ç;w.;;aaÆogf„ ]-[ֳgOGGGgg琐7Tf ÔX,f&''q(99yƌ]tH$]tywsss2ቻ(3IJ\R-rHr@+aglSz:^Ѷ-b0۶m{Y%"WW={||Am\.ԩSk׮}yyy=\VV˗/_|9!!gf<o޽z~\.qc|MJp\\\nݺu֭ݻw_zM6h- ø1J"7DtJ20ίN$GįNY9sf^^^^^^QQѢEh7o4vڵs,zjBQXXܥK' dee733377DgϞ%wyg׮]ư_ڵ֓d_}UJ'ܬ!CϚ5)-c!k1hwt[Ôi؄,ݰ~D$wL(5u9O|rR9jԨm۶T*]jլYX]t6-7oD"asΧN ~Ϗ {7^ҥKu:ݻᄏd;;;"ٿ@ HLLT( KorqO)c8vCxaStD4|a{ƫ,j'Nܹs2u' @DǎS~ҷnݺׯ?cU,?~쮮ׯ_d\.PkܭWw-T\ה?%}L=ՙj 'plm +wwb4]*1y M n5qzpppK|>4??D{xxU-(( ϙ3ٳ_|Nׯ O[VVfccc|l_ٳ5Cp^|b.*"}Hgʵɝl>e7$M惢zO|&qO;a %lglTkǍw… 6lذa07o^dd$S4~ zƍQ; `*1#DRGҕ8ףYek;f4H0<7777닊^2 yyyDi 0H$߿?VG.ܹq#iӦ2|>̘1ƫƓ㟊4֭[W]R}7o63c ܭZ7 8)>X_O/2urp?J<1u9[lP(<~oa\"/^}vaVZemʕ_e#FhZ"usv護2! рd+Xڵk?skV^yGu޽s΍0Vϐl9J1w >`>E_gZjc|eWWW777T駟wxۀ-[cbb\\\:t ɾ;"~ҦM9z{֭=qی `BBBzQ={Ya {yyݻW*ٳz6"[5xq4D#tu9pVTҨDqMSP7x… ӧO.//ܹ)S.^]˗9r^Ja„ .\0ni">|K&Nضmb^߽{?99m۶) IDAT_jj{;wnĉڵS(]tywSSS;t S] ȑ_(xx#iqo]"Iy-xnEEEEEEUkӧO>}7""""H'%%j+{o߾';|;wTm9ȧeo݊0!zTHӨY /9hZhmڴIHH_^ D8,j`7>jF &"SYYY .fW\y aÆ<+n:b芦^LD?f49k֠T{Ե4Ă z}A :{ɒ%<o˖-Bac  5WCD:K /R W7ju#;N'4L]@خ]\tsʔ)IIIVCAp*Q13Ga#89Z enX?7l fI,GGG_xڵkw 1u]V؁ʸ ߟJ;=!K]64ZeqGD L] w 4 _1OdgzxjDAlBQ71P(,X/ 222طhΜ9=zرihy|2uUE1ۈobz"錱 +,9V8-'3^PPzL@ zqqF`` ]& ܹ{4Cf0mz"gj ?=.4Ve{u4'V<{LLLZZZ6m.]P(RSS\RR2i$IK4GyB\o_]&5ۏuQ ["X:8a֠T3u-`N 0 ߫W/pAww'p!Jۺuk"rppذaC=r5" q}3a*C/4,s,D8i4j1>` ߿_VW6;>Ƶ4=ce#15.8W1))6Nev~+27u-`6 '$$QXXXQFJK-* ŏ1z"CLoPsQ<>8GD?T^I6u9`]"##IOOvڸq\]]GzjupW^y{AAASL9}tհ0a>]w0L֭Y;n۶a\p{{{ Ǐyxe?tgYeUYFE?ue뜣3q2#qAED Ⱦ'$v }јa"_UosyϕW_ 쳳g.//sT j^g|Ǐw8ƍ'O|С֭[7iڥ!CVLlmgn~@M}Esu[BDD3<Ӹq-[TTTXֹs [Ξ=;SN=S>#.]zυ֭[+n AiӦEUUÿ^wԩ%%%<3gJKKm6ٳ û_󫯾z~߭Z>צi / +eee፧$W_}#̟?;לּޫW%K\^yLRZZZQQqC7\g)5?MߧYHoI&(&coKz}w V& )Z'""z$ mذ!++ @BB#<(<'X,=zt@JJ믿~…s+K,iӦM-JJJزe ɓ'/Xo>}:cǎ:uh40_\\|>1c6_**U$XFm۶޽{?+_W=4jNz=r_:1XU.s=SNJFC.Zu""_&O+M6e˖?~nP~UUUA‹N7o//l0,[ SO=U3f'N())CE^Ղ=##/*5jt.^xԩv}Μ9vСCWNKK{_{Ůu$!? qY>uC-mudl+6.i_$[n}>_zr_X۶meYeee6w?vСSNYYYŧN/ ݄ $ITXNN,1JpeIOJJJHHg_xUU< 9/׿ŋ1^۴E{oi(d"lXR2XRUZg!"zn&M8qDiii0$)J(6nɓ' 2Dm۶vرA|x=\ӧO_Z yvOOO5{QQQŸrf K7a„*/?997>SXwCԼUp wl:)pd>W8:*A5j԰aCQKU  3lׯ۶m[}ӦM%%%%%%t:] P6 Ja`sСCꫵk׾+W^_v-~gqUA|@ P%UUotjnswVM|+BI v@SMzVl?UF=u""z‰_>ov ,ҪU(:tMZ)|~ iVGt¨L#\vD$9Pii&[(tWt&7o^x}󋊊w`f͚UoT$3g?|Ŋ=WTTs_UN222n `̘1 TTT|rEQ\9sz7hԨk׮ ώ~?`t… 999‡t:{mn'|ʋ/bm 4}>_7;wcꩲҶ'Nyt[U0|gfoG%[-9}lMzҶfYjgN6Mu]w7MRR>ѣG~XY1Ȳ<{*V^GQk֬ {aÆ$~߆B{/^ `ҤI2Tx޽{"LXKKK۾}s=עEӧڵ+ s}_o>u;̱c.[lɒ%.K#ԲySErr m{X"M""ѣGo۶mڴi 40C={+cm۶ܹС}?>SUVޙnsǿɄ1b߾}?xϞ=NgJJѣ׮];O:jMb5k/TURϟ\uMx[jxؕ_d7ƿrXfۤRsnۖO:V8DDTu/{ӧO6mڴiӪ\l֬z.7n_'NڱNqQ/PaswNѵO,)t<)b?u`U8DDDTpcR/2F/?MsdädU,Xu"""YXSU] &NEȭm|AP m:D|(mSWo""";UYpZEpHhXRȁՒ8ܸϗjjTU'E]grÉ6ڼܰn$) \zk\UU'Nua,ܩYV8o8AuUq6 }([YY`NUI@;Ulzuoh[!偉G]?: ,**F]ˣQax̏g|16@K [;]zC׼u4*7TihU Ֆ뉐DmZg!"""p] Q$`|#,ܫKn8+Uo6QNJUgz=<[5S z9xj[Q,DDD1tu]Ԣiڧmd\t.]zy@u!S8DDD%tuzUcfj3BL0\EkBDDDZbNWnswvЄ$;$W`=}rfXݮE@C`Ta i27o T~񈈈t=cr4 w68n >AG} έBDDDqZ xۜT]tbnQAJV>]T_9Rij.z@H(u[\8yY/ Ѻm}Z :$u֝'_V lemIFގm_JKZ'"" ⋁@@ T 0O>Z;EEQָ9y({QL+.:f~qn)x¶jaZ!"QnSi=ch6 r{o.=%iֆ'@DDT/pƒeRîS!5G& kRey,we[u"""A@(6:9;+0&`7L$ `[9CDDD1"0B[YkUiy2rn6aCĤc]V,DDDs,).z=Q޹[#} "tqhk8́ Z!""bN]>Iת +pwk#wPRj[Yu"""-Yx= Ά}eѽF8 n;A/'3#{(Q"k%]ײ02͒[$HWVjb;UKx(]ym{~0A'%O 'Z!""XaNs*+d:PW{~ q@Oi#jłZg!""XaNEHJ<+7O [|s$w/D1yXGKNCDDD1v,*QWg2?Ep#O}PՊ˵BDDD1 m2hЯ BnQ)2mCCEZ!""cNUy S`o. c 7ؽ&@KBDDD+n׵*r@ok`9_o^O7P{W""""mp MH+qE#;Eױ `v$CmT|*tADDTpj0ə0"'&j}Zg!""hbN7 nZ` FY>uqtIJVT_Ew"";݀6wU《)qp 얹5)A|'8: E wwQd,r{Ɂ7Iߪwg V :E wN!,t#57Ő%\ty)SB;6o: E wAh/̂OՉm-5N; vADDTpE 2[hy`O?F$_pu"""tc~>?X<}6MW,_Oȍ$ pYDDDg,Ƅ%6o o^PذU,DDDtXӍi$I$ ]<<ܕcԉzC{c@0-q*YwDDDTpvy{H3,b̭;J`վvY薰pUp?9m Xcp~ tiIS>cN7,S=%vm[(F1bI!BEVn: <te <]Fa~KKK7ME|Ǚ3g>ܖTTIz^],p݃mcd3<нsC9v,w ;m+6&;J8DDDt3"G;.\4T[tU?xv)k;2JX<}.M>tѿsCK,QOH$uey1EQDDDDt"_u…ƍ?s999 )5C.tdMjcEWISˋNmy~1Z!""p߲e$I6lh׮]|QU 9u bWO1 ;u'&%Xڶ</׾feP)7p;v8s 7z$1Z|}$K[siZ޽{'L`6322zѤIcݻ7&N],r񫾘>޶F?qak ^Y DDD@^ҥ˲e^o޼`p:+Wҥ˻Tcu~wu4Ti(#\tREBDDDE(׭[ꫯ:cƌyGyDU?{OV:ŦA롐{\b-ePu\t'"""?}QvDQ b6mΝ;sL-IFK*]˘voqYA҈RENDDTE(]?U_}pŽ>K6zMuy8cfgc=1}P=!eK}w\t'"""ǏoРriq 48vX Ql_6w5X{cb iRj8^fPggg]K9991FFWCT!vFg7.;A-NDDT D(ܻv ॗ^vUU_z%;wQ8z* ;un c2@IOJuZ5E(_|E4gΜq_xQUՋ/?~Μ9 /,,|G333%I $jG)((x␒jTF ܾ 'G ˖8o ^:]E@ӦM̙STTvOmZ?nBY$ k]Ta]&7ӴCDDDr߮ĉjvz9AesOujHWJMV.W)37^CVVVyyyVVVcSMY/+69] :PNm2#Ȳa.^Y*^OI-''wމ{lQ/pZP@|; yb}4_xEw""*K/iӦ9`ذaqM5TEpnT@e˄;QMasԩS݉#<0(?&K ݞyI#R-?`[Ew"""E(ϟ*SQQQRRRy}۶mPUIo)z A2O{NqZQ7F ˖ m ^jP8~!CRSS?ʋ|Iǎ{qȑXƣZ&Ae. c|bR-і4lV~!""\ݺuϗeI&{ѨQ;wŋ1IIx(W8Ns0 }%@Z rNDD>}>cƌcǎM:ҥKcj2!#$w6U$I*FSRr[: Qp駟Wy`0{ lڴ)VE kY;w>>LjcaWFN[\t'""LM6iii1FEeG@܎P0IP m<-d_$>u_ -[n?zz*z>v얹-Q`!vD͗;ݷ+Z!"""ݻwBU_}TUڵk Q-.KPtȱWEƆNigsC9QNDD̙3 /SOWn?s̙3_~eIyڤCۣTҌb0oE5Eo?}栕DDDPw?$wJLLζX,M45kV(z7zT[Ib+Ytp(U]1‹et'""@zCM2%11rl6^?|;vCJuz]1#~m&3 [pio<[ EuU(3, !۶rYHMMJ  . Ew8?}B1A<5[TLL4%zpTsELx=8?F `[r{CDDTE(ܟ}Y駟3fʕ. cǖ-[6xy楧+qJOmJТ[SC}:ƊyѠ.oBDDTE(ܛ5krիW;v233'LiӦ ~5KT‹R=*}bWx}Q¢L2'&4/j|rj~<8k֬7i@ +>}M?(TW6;t-B!oȣx!M_?亽%dsҰ~FrэZ'7qt+ h&@'s8gBKwd!O,w.Xak6?1cƬ\r8vزe_xСCRSS|EkQ^࢔(?.$I4k KS@9-"":&r@Q{.//ԩS~l6c/… ={5kVqq^?s'|ҭ[ݻ#>3sO^^p7n?~kTE}o@@`}VIkos&옐56p5݉n] +tX举g}e˖۷oZlSN '|2{lN'\xnOӟ?%>KoEP4Ϯԑ1YdEԅ+ JN: Qw]U* 9k׮o߾>#Gb_ =zE1''g…/~>xG}4 C~R' :pO#/B\ }T,d;ѭJ᮪;3q۷oҥ~m۶]tYn]#.XٳsΕnK,gO<ٰaӧ_y??gk3G> B99jWMprʫU:r(⓮BDDT]pǟ~ij0CЯ~+Ǔ1k֬~W^gƌ>_lr5jTGpvk%Iz޽l_D9+UCEGoub{h枙D얉%k{֯WivZٳ(}n;vlڵkdY޸qs=OoڴO>'NbZn]zJiiu>[RRk׮1KG7\0(/6-׼}i{Pu[GKܻjZ[^x3f\5ܑrm6|`0*SSSXVsϞ={@FFƚ5k 6t?05ZDSVaF2d$HA E5f֭"<ӝnV1cU~z'ObϞ=?~_nnO<冦w---:+,,; cU;4G0jgpZ{3&ˠU6p{kϾ׫Yݜ3 U 6ӧOBB•m6Yc1##ҥKUWTTQی+%'';v-[,n0M~_?>=====:,))GQTB>ytƉ=tpɣ+./0te"}ҥK.]yqժU REEE233cL.哒|;qƍxL:Uy<'H:b YЇ{Cpc&ߟ^p4CClӝfU-۵k{啊+Wjdgg4bppb\wզMV.rp<=`t($~M/qj Y ssYjO? ^?t:{1ymwnݺ>vFСCD+ 4: e>s G/)݀;N}*4;YS&\+Nm mXYu""ڧjޫW~>#fb,X>}O<1`7dȐηnʔ)a֭W.;\>u>;asέ2eΜ9PI&_عàHGUkU*4Ѩ>ui[ueH==&""2nΜ9_F$=ӿï|>]t7o^#Z,)SI&glӧtԩW^|_ye˖U^ׯ_O>=jԨv?j(>̢N/8Jhe,ޙSTcص65Zg!""eR/l={_ɓo(^~ ݺu4i?wڛogϞn]viii .LNN7oޕok >۶nڡCv?3SLCx[=~6uF%={A& my:7oo EX2vd`x""Q暳X,f͚5kun7|7ߌM4I]Au""ځ;i)\  AW󠶑`K<&L;[4V,zD] wRYNDJKS66eZXtw6U&JNk`NZ~-s[FhU˾:/ᎮrFPUt'""~^oGs/缧{hiPsC$I\XI8vˤQrZ""""|G(TBnx{ AG֡0:$B ;c+aP/]4B\JDDtu ~׿kUUKKKwqUx$[Uwy]-P3lo.1&:.kfP؟ /;EE$z{8v\҈;d[NDD{6m +W2$==]!CX`0|{OVC!zz:]5c nOoB˹NDDT￿G̔$) JԪUGyCJ?/-Ƕ=ֈ޷ۦ>ǹ14N)9IKkF\hڴ9snɓ'nwqqܹs5k|T4,Y[u:er.dR.QjKc|uYFN s!E5D2~A;݉bN5QSJNhk%&͖0:ZgFM!׶ebN5T[W 5,CC \t9h [.Q-[]?~Q4`B,}6]\{uu, tHxs}Ao't։긤16.Ҽ'hHU XƍG5e=I\9AJt? Lu~.-pޤu:.^2M CD[Vĉ?0f͚͚5+??o={v˖-CУ>z)-SػosCBhRB*HYtuݟ+ *DQz 5z5 Os? P2d^9' 3s?BfFBA-ɱr;ap}VB!"X]v͛7K.ϟ7{葔ԵkW_}ܹs=z_?T{ʇB:^3vo RkNT, ~uB C~60w\RY\.={6_ G\N ;r5׶ 9f: v_4u%08$]F-EwB!aO~ ^߰aJ&$$'O !HʤNlm qع~e|K לK=bgXNH`AB 7~ wNg4FcwKJJFcTTTݕJ9i>9Duu R݅U>ӴvY!A)SRRpa^3gжm*EHnYכk/e.ع~2R `y{'vUqiw!0p7n{oʔ)wM<>P>v@h#kYrNNjmPYpu<ݘ+X_D1 BуUs>HB‰駟~W|'5޽{FjԨ駟x7PHHR`[#cn%r̘f* OY]^ p龝: !?;?|ҥIII… .\x<-[\z3HIYw.uYFOW~\%eQ`D՘n`ӝBHX_3 3|so۶mŻvyѣG 0t; l<HuM4 n8$#0B UqwfS;!0p/0L͚55kְaRNɲ-rܞ-i[)vN1 A70ը|vq-uB U?~|:....%%VZZ6##G\y@}am^i1sJYrТP~龎: !￟f"֭T*VkfffrrR0Cp褆$uSUфVjۯaLߢ;uB ~ ͛7{eʔJN:tW*t:>^P]bhxΠ`~!$S`ސMB7?{^^^LL^ndddLL˗_T( 8R 6 2*)wcv@wV9U[B!½aÆ-`a+7cZZN皫[q.vrJL볽Fq!@Snix!ӦMЪU#ni@ 2A-b5TwJKjռeZMB-?oRfϞ=pm۶<_\\m۶A͞=a_~Y$ `8]R A:[VXvc){0mEwB!Ֆ½q|RٳgttL&ٳuJW_}*LV%&r~-cO[n{tHi: vIhwh5e\Y,BH@?iر.\0aBBBD"zAǏ?K/$@JBʥ)]d42&Rd[,aU*tCz0oĎC!T=;ڵkϞ=;77n_vn_tiΜ9u t>B)\x<uS͖йY ct@tIZZt'R]=T^[n޻W<%I$Ѕe$V*1@%{9%F!eѽ(vB!=T~tZjiڌ t~ ,GdؐWK>=N*1hy۸ !T7 ?99y͚5E.׭[WTZ?$6N~79=,o䬭;NX`rtͻ%eb!B}'N<}nrj={yӟtX Xc/VQH^ aUbtSVS6 m߮w˖o; !RsoM6eY˲73g[or>S!P2L'6Fmi`ș1:3D\%kT8BHSwO2һSN@+D`=9%-sts2Zd,Sh8o;K,nqFB!UOភ+s#zpJ `.mds5;Z%ԒIJsY}.Fv{}OAq!poذaaaaaaawo%%% !ɲrYvNjk`_N 4 p̓ TKޝ[lY!poݺ5iӦ;iӦhժUr?2[n#6o0^LSh0ƫTt_=7ĎC!T?oRfϞ=pm۶<_\\m۶A͞=a_~YQ^zOuûr/$k:p#VVWQ[tY!*poܸ7|T*333{-ɢ{n:RW_ ;H 2)nwYEk}{ʂ[} yr .F* }sq!'c^pa„ J$ ?/) םn ]0zt5^q\,%\hzJ-EwB!!vڳg͵׮].]3gN:uT8v{k[%;PKhKL0Hy F"1;q!'P2v2,@iyxh n e)N2,V_pܴzŎ.]j+)S,B}ƍ;s3/TiJ-V uG\۹[Ο5XaܲzN;NX`$κ>ﺼ~-B!O~…N:F;E/Nj;t|,Xkm5ţ}EKqf՟˖dƾq!p\TTk%%%,$tT* cmvjU{Kpf/[.?Ո'<0cu<6B I~ enڼysa ӕSnٳ쎧*"m,\uÙ~. vJh̳O:bub+5'eNj\/q]ۿ;!86i҄vzqJN+%P4[d7Z)eJ60:I<B!O^vm(LBCwB0NϹ=н;}e;yFhј0;MPP$PxY!p;vw zhYBGHƀ)u5ڃLlnh;K`:N_; !ӦMڵ3~RcDֳzQX߷֬`Ps;N5;N_;˵j"vB!T,ܗ-[vG_~!EK !kQ^7tlvt+8RVO*lHټEq2!*+V%!O7jK#!СCEAȓq #NW[@LjGM޻lxcn10ֽGB!ӄ_Ď.Q^wo(,B!b>oU$ؔϖ9A+ہ IDATr_x˯жK#x{J!_dw3 pB3g hDBU? 2uUM\s5'V7- G7$eB!TO~QD2qJWќ<gR0V<-]"z8`9AaBKJʔ9| OҢpt=Eb!B*S[,Ȼ[0 qEE-M^!^=HL`qC,޲։BO޲e›7oVzƍ͛7@0Bȝn뷻eTu=-b&{8/$kR[t&v0H%gi$N`dffl6/_^fMZZ… O.HTBnwu… IhH$eFbOY!K[VbtDfljsNC!Bx7nݚv vt: XK-ki 8Ȭ<ӄ5- Kn|* @٢ՠB ~ wiTzӧOٳCRE8\ϝ]#{8f:`DK^nE]Xcp^bwL,Bp0`lb}9k^dɆ 4@8v+vvzVdNwZtnMMe?ew}B!S]_~ի׵ky-ZlٲE*N<̙3#F`FO!q bɒ%<0{-[4m48@ss1U*>Ǿl=;t1 l1*vB!adE=,X`XT.k ȼ[: ī%/[. AfD#"i׳gϾߣ]tٱcGvvv߾}Ǐ'$!Ude2'uwdtSuŞgϭ%Z{Sb #!͛r|fqqŎC!$LU,_x_gϞ={} w ^q\^~1Qݸu FZ2j 7G-yJҾp*0Oɂ ٺݥqb'"*&L%! 6κl;;Q}]tRĄ֚glv߂߷f)*-[>ĎC!$=B !!-Cŝ57S z#z,+Y>g&ŪرTs[=f})FNad23%np/yݚb'"vjs*!^~EDE{4/֨L7U,E*쿫B!$U\q`0L:uԩ#FxwYti"ՔJZ+䇝u6{G}wlͳ]̳],?)EqZ:bs\s^A~ e# ,tIǙ\eL!:p7e˖ PqMvǟy^qDuDžB`bz)sF{qˆ}+Ey W B /:XbЉ ~*oFϷGT߅׿WsT9JĄɠ`Ƿy H݅0 slڷ;!0RIgСC CH`Elg"\c]wwNcODLƷTwZ}}vGqˆi"׶EU)oMB?rRSSSRR$ !Pp8>ߝD!mDK4rVj OnYb /ca7-BHyСC(Jd'o;#/뎫gDȞkXE,E^[{?.B !/ /j)^KLP5BH-m4~ST*vB!$w˜vsݿZNǀ)r7̖k il8U;NxeI ]ZBp'(Y!#XcUwrSd[2IX2Q2eQ,]yŎC!xٳgy>d: T_kl:[=49i>rsh֗d_sj_C.v0j\}FWŽB*M6%_F?\|y~~~tttzzG}ԢEbF_!j?{/nKmJ$0 蔓&MpB8`4ϟ?߽{w4j(Feee}ׁIB 5`u52]ȭ^޲P:['[>a`ץk֜bg!RmU,ܣK@#-Ya+W,tҸϯ\!l6O0!QIh cDGtUNhT-|gIQ4@5B h駟NgvZjubttaZ!gʔ)W\ڝɲݔ 3@ y筧DHE6|vҪ" fOayBBpߺu+~U޿{7ꫯڷo?iҤ*OHB` ,^KLT70'JY䛽N?M4>Zۯe|fq!TC!Pߺu @F*\/RPP,˄ rܹs%I B)u,u{ݣ26+q1IF7UubqѢ jg$vB!P7oQzdd$p1uԼ/O=TB%g**[C',{#{5E1%vc4TPVyOAq!T7{ ;W ԩs/߾}YZn[o=aG߿a5뮒 t-ZG3uӶ[Cozδvr~}]8B]By>///''g/WpW~Gy)jju_x/yNC6DbBw=%zGgw\9c9.FTut3Ӣ&rpmBۆBH0_|9sԯ_A]v0`ܶmΟ?_ pͭpҥKxr{^^'O* 润MZ/?%'T^2Ff6!̛:Ŏ^O%qm(~BH5pyW_8qիW qwn>|ܫW/6UV~GBBw/ZOP`a@w13(p!Hx=&ge?2o; !jOnݺYf)/l6w֭zbbڵk>󬬬F9rRܽ{݋VuʕpAn#''Z.I1r}Tak,|/|Ta222.\`̙Gz^>d27ʕ+-[l߾'L>}͚5C%a8T-2l 9%YvoGک[O?K e5^Ÿ|Y!T~ GJ$'Vzo߾ɓ+׿;VfͦMFEE-]`0,\1}G}q#$i q+p[d>{vf1=uK=KΆoNbꇦ0!T?bn pWTᣢ8o֫W/777::zܸqGiѢE$|UHV3НAn)ZClaHӍU}ls 7 ); !poٲeaaayʽnܸQXXؼyHϘ1.ƍͫ_~gn޼_|iҤ  O*1XXe[2l1&OjL1# `}y!O8BB=55)Sɓ(!VI"/^q+g<.[:SKƷR}zb:MĺyK! B?It:݂  i\|y͚5iii .>} Q xR`=2 pvɞKɚhyТ&w8Y; !pSNfffBB† 222v !!a;v숍]h@"$ Sl;’nj`c*=yst͒T.\߳ByHONܹӧg̘VV-111:u>} zm,q_`TU?v"H%[Ebg! śou|]PPk׮>@:!3 `ye2mcq:9Ts#*mv=2-?E._54 o0UwAivթxB =~ k׮͜9cǎe޼yiii ŋH8%x>fn #7{L9%[&תdLj#a#q)B 17֮]_߽{+W^r>HJJܹ7|SVV&@PBe>*%JItRC6T}k&:IP͹||B!ࡦʔ[ow޼O>$%%e/q'3j5c.yw%S en2g !KbeCqfѐB. d2B ~ ~b_vvիWgΜ١CIxFp{u>uZ{UI" )8F*0glNBHSYYYׯ_/vʲA+B2`pظ@Z yFC K҂k@wy'⇟B믿ٳD"& !}JUvαxZkk%./n )3bg!^2 N:u#F<̻,]s4V>7K}Mv@W.˰c.=ۇďEI(ԎW6ns;V(jkv5en/[^ݭD;!UIn48˖j.!U%Jl-{ w#VZ\.P7& OP֔.Ӻgb' è|fkk~?F8BW%kCVO B#ժ 6!+IU[#ed}cMFQB>w;/-XYx:Z8aU"F,GsnMNND!$HURgwY F>?l5i!)?{5^y<ЧO*DH0Ve>+:^ۖg*%F;-l b /[(~;!`TIxztҕ+W M"Yce$^u*/ff2s>:JUX{QȽ%eB!$URm/^Gbbbiiibb hq봫>уl-6Z0xW Lk<7BHEr\}D# IDAT?\؄BH&bT#/>NxFOMNq‹~h4&w{JQB*p6m6G{-xlB1Zf7*hrǴxѪҔ:)fՍ!"4ہ#ŎC!$ٜ:jԨC* y~.v4{d_ V|.aUXjOYO.UA:$+[4P2oBHS/^Sexر /Pթ R*RXlofa2Fn( EqՍ#^hƏ`$ϷLkBH xp©Sފk?O޻w3g#$ը[==g' nQYE2 +c‡|rv֏]S|k9Ɯ؉ˆN mnی6hDVr!0pw8>e&E=$$ʤ) kZi`@;7ݥ;7(pªҡ|PCnE{M+ :KU89eWļ6N8BsYl\.ׯ_n4h`ԨQ}UTyŋ 1FqP"dQݢd,wZ)R"`98+vB!AOX`oަMu.^xÆ ΝKLL$'!A$SJ$^̅;\%!=^&N-ym' i4=RKf{i.'!˗U*Ո#0 ӧOC|>kO_!eQ%|݋._`ۻTIg a&˺\7o; !)KKK###%I[,sΕ`Yv^P$Te SU>@Fp#7Ko2[Ւk7}Y?T%O @u2V"pp/**x'v0I`///''g/GK$^`AsFW7 -@~L6^d4]@,7mqb!"3gN4hеkm۶'xVj/Hŏpظpez=_ Hݩ @ܥ%ppyW_8qիW 2A?믿ja>߲ Щ>ȃ_rq6җZ|sr-v9q$#y ˖n; !)׭[7k,R_nݺ_OLL\vmTT;-S|oxfD8g9utHtm4LJi;/#$Y|~xY?Bf'2ua222.\`̙HHVSdKo|#]ep݉*.{%Z$0p?zD"8bhN<`Ri0진Q9L#oOv!R1Tf7_3?R4Q`\=b!p~ wywk8( IG]#2XKL `En>7NNū%6#- I٢K WzfB ,?{˖- o޼Y7n6o< I) y |A+OǏ2bWaV:AF|ҳڛK\丧Yg.X)vB!pOMM0eʔ{>^=cBȃ)'M,X0`Lk֬IKK[pattJHhjI%^;bG$jײOd w;"{wQ,E۫I"xx7$jO^N̄ 6dddܹ@BBwhѢxA$o5˭R߃֞5Re*oH !Qng͗r4WT1zi4#om; !@rjΝO>=cƌZjԩ/\e<Ϣ{(y,0jp׻{9;5GVnHoƕ!K !J/(7|s֭n`׮]|N t>BBahTk:8`[ȇ?`ֳ  {/$vbOV#x"N!CwJC YF0%> )+]x?gM2ߡ0a~4qLBʍi[mW=M6{U姘KR7mxȸgm2F.XߏEv9zĎF"i\>>::vm۶}DNHk f; 35W~ՁpjɴN:k.RÌX:rl{;!TptҥK{ݻwK:v׿5+++)));;tzڵ~gb$'$Livw9~2V;f uݥB MT*9HfڎKn ۟|vBS|駱Wbcc?6k׮,[Z O-T>(T9ެ!^`P^(ޒ҅B!T,/_ 99Z*Cv턉EHQ<~{pհv<ƺ{og c߲{)("T)$mJd ER֙1c̘vypι9yuu96||m;K}Ec*zFO-|[G2n2?K!Pt[܃ qj?_h49#.\ֆc`‹}*E?=<FCyrxnևfG{C_Y<EO0N@T7^!"BbܯQB"Ԩ{l}w.b{Kun^rEPH' Ӕ }tt8 E:p.TZ.uB ;B1GkF"rFd(n*apf xi-F"[DqB%7,MJE&MGDqE,Ai`KͺPU>  Sw8Tؽ˘N6:B7'F"s^gڴiQOP*a GpҪh_?(}؏Pgkl|BƔ^FgPIlj_lTvfK!%O?_^ݦV.u;U=swG=K_͵`mre*W-}Uob0&?"5V۫2_|pB%s٫W/Ir 䄌ը_s>xת :ȬʶGܿ}zvyMSӮ-8#; 3q46x 2 /vmڥj!Q5#5 |Ni.r{j1cDƚ!Gv>y@"oBs͕{WvmǤNBTGE Vyn#>dK6|e <_\tLg函?W\4YEoaLz! 0B%,JCQnA㽘)vk O*ދuytVɝWg TO+uBRh V/j]FȾ~s@MJTz\Ri@]کG_G,v , En^m5`UŲ"{Kť^ZX_8 qpڠ0BI;B񦧨Q.{/9y!%TM`3.Dwu4aCƋ[{ݡnLU8!. ;BGV=|Ȝ{q}^ALK7իXR}:NbVڨA╸a!w$cњ5t֬1׶拣<3VITJC pе ,InWoj-|IXLMᷴPBs[Ri(h5RA!t>XҨ#yqݍ&0]_E }t:&ȋms"s#N c1&1S9B(aqGHb4* E}tϖ^@i ŗ%o\g)R܍RF!RmRA!?aqGHbQJz7fܖ%kċg bꬒKc:_8 ] Wc {X]|b<20R+fxq6|9ϿI!пaqG( V)*Z}Wxnw1K'ZtI=.'(:Oy~B(Q`qG(tZ.t=zggO 3WO_UG|Ri1OMkԂkYD!wK?zե9}pT0wS .PK{]cdAg?qB`qG(MiPh{}z͘F0s/>nIL٣`x]qBaqG(tq}rXt]iLYJ-ؤ%T,9'mw)B2La+<0B(#iipd~Go`M^٢0WûQrB++I!:,%,3T7n} T걹 'G֮Y@i .AoY&J!4,%Gu9!*_b 2T8#OqWѥX~H!,%v;D7n_= rձ?[V (N }tuZNx7PT̾rqBPrȤjg[Mi775OzD?ԞRۻLGmX8O޶ցoRA;BI^:x !`x֘|E KO/DRmK MuL ">^1u] E#qB(aqG(i f@h3y0oϼ)B=ȩQ2h8Ro"L86nZ<EYBP2Ttq0!h<;7N'=:MpMxN6X :cw!U륎B);BɄ<ѐ_zp`hUg{b IDATj`cq`!=usB1$ӎcoQ) )D&9gEȒ|ww{ը6JX|гĜA-o~J!Rw$VCQvAX伔e%7dμz`fo]ݐi;TyT?XZ8!b;BDSk{<|)sy:67y 2Vآ cE+H!RwhexK[0mVW(nDQ`4)({@[#x)ۊœo1\~U8!#Bup0mc=#o#KJy"2>j 2rD&lP*:Uj PP\6H*Oȟ*ݲSr{ n5zi_bbx7c6`ͼeB (u J)XJbSZ%!<˳d> ,5ucF6^*OB);BI,j`{⒞Rך/F3_xzD~~'u'ko;{^8!:#).)U={\sUkee׆Rsk޺e0c2[WtOI!RwK  ^Z2MUrHH^KSuvi,/^"Ǐg<_`9HGB(#zd7*帤T@&勣0Q(Y c{p"2.mRۜ yn#P*՝:26[ڎ ?wm]p$MI?بSV0v1f  -qB(aqG(hjNk%_:= }`uGG=/43 2(rw1^?| 6~ٵ_8!ܰ#"nW:p0^Fj4!_Q ;,)j 4koGp=ߠ|*):B%1, `QG#=|Οet޽dNPD1d'*u&8uT!Bfpͼeep"P4tN2@&;"[SS3a„]jCnܸ1iD+QR4똸 SiBNWFJ #Z)u{qI Q*eSPrYZ}8BR$GqX,ݺu?S8;{+:wo]wnݺ%K<(***֯_?dȐqƉNAQ|渜sΟRDUY}w{|wA\ !67@HH!RrI&߿tBIc8p94;'NvڝZh4p̙(FE(A)jA[|m f4:B%$(UUU`0yh4 `֭vZy'޽{gffF?1BaQH-u\öw<8a%$h1GC{B⸍KOѩqٵ߹vqB(90R_z677"fX͛73 3gΜz%-++,..> i_;)}˹Z_[jm].g:wZjR1ޱZ`t> ;7^*;}z6'C٭ԉB(%!? ?ݨ ӦMZ ü}W~m'Oq6j_`_0ͱ>3MC]ևl#ֆ~u[UĦuvV΄cV 1\o1l 5jiC&wD*kNj-o| .?GD!HClӦͱcǾۺQR]]]T_d̘1{-[~ݺu]5kּ s>Bv&p%o8|xzPFɧ5(JtsiOcw+2fC)^+u"\Dze-Z+' p8n@\;vܳgV?ÇcJ( ^ k;萣d5y̥’hdLDm7U,)wwnU{y&ZI G,WCL0BOIS܋yԩSpK曑#G\>}8qb(*BiZMƉ3lMc9v JfyyOTB&N{7 9파^o]-qvcX 90BI% {`֭^J߾};Cz2af JBx~eO,yG<2PdvXL٩ݳ r8#c6X-~1x-RA}Ĉr|޽z׮] {y{y!C,^QCOqyG2ijs_Hػ_Ĕm=sdK7hRdܹj!4z-wB5XIPu:݈#x>|xLw5vӧOwС{}ܹsOaÆ^O`U6\0F ?x/ʕ}I-2{DѺB PI ,h޼ݺuk_+Wȅ Ξ={˖-uD7.(--OPF,0vޗZ҇NuUE嚉ocqL,J|/8[5, WTK!Krwɴɓ'c:t}yWyy9 -k`?5geUӺ9*LL⳪1@)RE.(UՓ Oe%/OE岉W#Jqw:q>Ui^z-7rN8!$=,5PS T*uɦ {_;52P&CjTz묅6gsWZݽ_8!$1,5PrB !‘h]̥?Y0KS׆Ry_L!& 6XX7B̏khYԁBHJXjZs$V{}[qj,Y'Tj{:x5T+'+U7M F(QFX&}uk- 8!t~Xj֨{e0樌޹Mo:CN),ysmѺrjmb?jT83AJȘ>I3 @#!4#ԠF\0jxeˉMs&X99W$:g&=ͦ<㋈oo:/PڨxvQW`&PÄLSsLp0z>RgXU?玸xDHC6Ȅq6Ǣs0- (3Dƅjg/ !w\%Ө`˽/͝m4WN8vA;%>Nts#bn]EYzԂfk2g>d`fΒSR'Bh*H?OeD=klN /zu,PFQ7ȨH}uk ڨϜ9I7 `췃'#RwЅ=vqSV{Q T~`V{Mg5UVU,?j\͹j_r#u 3cҌ?P%R'B07LLSIQǚ<[6.9=/,bqȬf "~b#Q2g>Ƙ ?PbԀ!tQL4(',yj4mUK~tPwQ'j|¨/N Zw!T?4*0)ݍ{M.Έ鿹^ \ٚaf&ċO|~3ǫ^6&ÜLIgg3!wPP 9hfL64k2BҗY6^ "_Ǭf6_+\ot8`M/>x݁F [?#q@gfrR'BDm9v@ 9byOɸ9mvS5X0!˓fjid"5;/[/ dΚ5m Ph=R'Bzt:/pbz/dXOd[(c1c"1_ 3mocPAC@iT3j h]kv!,wiTc4*X|u4}%-f|;JN8#/K*QJEGWQ}㳍R'B!t+0 T-g4Xє(;""m 5hh_X1}3㶙KGd\WMX>BQ!th&L |K{u`jtgJtv~y2o)G;eҞWݷ7]BS|`B(aqGE%i| -pl|ޓ2G E 'y˨eS{hh YC=f.)wj5s!.wPt(jY)p_*Ȑ&6yVIa۫wZ7#vRz)KM;nT7VZ$q&B(AaqGEM#~7ͨ*jL:yg_4}s?X藖ԡnGEOVx=bIWd!MX-AFHq82_ǶcNØS 4ziYs&Yb8R mR'B{/qkZFdY7f @}'ftlMG{s-K Y/=!oDѾru"BGu5jpnD経{*I{1[l^1LO-GRgu.RdLXէ+yeSB#b)vZ  x޺ [; ~g©UxEͭM,/_=XOz#,ݺ#U3^XRB5hXBE7自Yt:d[2GMur.[ز|=4&p+˒CWFVT>=/tLL ;B((F`K\qȍ#*xĥg/.}kICͦ&:&ċoRdǥzS_se)9+g,MD ;B(hWL X1 IDATtUgX]:zGݿ9$:epn3m^K;&S3?XBq ]u)ODŽxq>-ǭnt9Y/?)o DE+0~Bq!? ! L^j6ˆ '#gqo3렢Ye~wQg<+@Ռ-~B;B(rwL4.cZaiB ]8u} UC~6Tg"ys㗖UC6Om; (*XtWRB5XBlTRU+d6\^V}RԊ@%pyBg5CA=2rNOA;䚌gT zngR'B5XB 7))Ӓy[60fPųNNK,h2fpstVa߀k7AѱuS؜ 1,^i[ZS'!$-,!i/ !Ia 7"{촂9^S©gRd&[{j o߿VyS]k33wd(YF]&Yl!F5SbဴR '',"sͷ˓.=ȋC)O=m8V|v+JYXBR"Oui5o$ݑ}^Δ"Y'($Lh`PѾ>յCӧ>@)Qͻ΄JMXB{Lq5V zo3UY-_!}8E@ٜi*yTyiPS;uP#['mwp(Ϛ;#/jX!7PaqG%qF= p8R%s~,n\yy햍O>nkʒ7" c">V%ɟ+$X_ŒV&GhZٹY^[JѾ%wQd# zI8E_#r,1v Snj}8/D\{x~˒52qiR9"ݳ[)#"D\Aa^N0WXجtu53]pجQA!d##2B^Q=:lˌtlJQڮakUh~"ONs \+R 'la{@R\0)f~.JƩzwăou5a%;,I;BCrzw hu^_-Ha=}ZkW*-_{e&.\E~;[=Ӻ9(+6Jk5>oKjn3FkT~Ԫ"4#!A(`qO2X \֊cͬB:ɸDk4 T-12);=usH6V4i]E\?_n %/l) / "k%Tv6Rc |1q;BI{P5eP / z%ض: aZ3 d)Nz|oy&]IiG@q: rtIR+Wwb-XX*oՔR)0,I;B񧣨UJ  E >km$},ŶRkLaݹYA j*B[~,~r ͍@[*RUn/ahY3MXsO a\ce gg|Ws4t{h*-NΟ#UM(\p!,I;BCFJE 9[ L&K H,vN>[?݇vZ7# Y{ʨ\- F$Wsm/Щ3 .71w=`qG(1v7X<G>_6M7cc8KqT5nwp;u`[gf24V17SJ`H# u`D3 &²+Z{wXjj9\^%kKU {P"RP2NA 8uz?!T<g? )_N?YːeS>FQWvP02PAqOE?,M\JU.ve>S޺[p{fq;B=`qG({JYK#HKONvd.cui]m˻-ν?XzxKW13MH #;rH3GR' ŎȺBW'HheIWLIsmO6;=t K=[kژ0) =`qG())jRьe~ c C К2j.GqT7W Pe@zm|{{LY6EOt rOll{Us# n }we"cNpy9*ELzvDkK%$$ҌeoU)ݢx4ᅯ> tq >/H,U딴6T㽖PAO;,a5X1cHK#;rhs\'$q[K-sz92l7*9#xCG?B)lp~A(bW܉yt՝7߭wemlYl@bgX<Fn;D$Wݼ,da v_=#;O9DOe~L]NDs9OKݯq+4'`QճcE$=qAmmG[8L^ol!%",eE Z(X8#X8yR_M|?Y]vߎלW \Vpдmې9m9=O| ! xzuᱱu5/ D$W}LD|>0 l5/cZDb쳱?D#Zi5v KՊ򯼤`IZ5.-?О?'?B@bԨ7N^Y$ٰ?m^|ʋW\r!} 0 ;=$t\"*ůEoVou֨- NZXdaBx6dLhjaٵZUFVSK>Y3.>4rugFT귩9Mɏ- ݮѬ7n~cS+a&_mHGu M3OQ- 0!pxU :֚x̛p!"1͕UW >Ͷ_sY{Ӽ,ޛܬA‰ ) ϻ4_.>XK19C2zzou&־~u7-"jdeJD "wApv֏͆ADPbщY&6eW/Ȼ|~ޥEJpht WEhr݄5(0u N<| oH—΋}l:ip7 7}`t{C7+V|n ]g9ٓ~mi'[^4?yVyC&n/_8o2{+SVԟ_;^I`G ]<kG z._O&"E+GW|'";;Ilܖ;yy̋-&&BQ=msRtMzMMv.+ƭ?l9qҢ WD^.6 g菙/c)[ ~nԍ{\}7i[bsDbsb \F\V+ FK-W%55j躨@W[w{0U\0Jb0! ;bqt&h28D$1vc(Xd4 'Qxm%n{ %&M̚]0'vQZ9hwcoTd4TzzxuV,axx{/_jW5%z%s/O0}` 29&e*퍾 D FfgNl/e[6zKՊ9ыfO _Q֌jj%qjxTjzͩakJXˢ-3wAp,Mm+<~6la Wߕڶ#Ի \i .ͻ 2"0n[84 o>fhhNؽhFZ]09Dg_&AȲ`@b>Olg‰7j;Lmp{W\<+zܙQ)6GG7Si( ʢqeUb_ڐz-g1?5ɕ63@DMU2ax%x{4|K8ke{;S[wŭrF:8aftᙪF[[7뛌o*\R\V\^N.I}wWꥍڶ='F,tcpQ\YP.}tbٍүoщ /(xTZ4wAps6ә75hcW["Eul'Hӻw5}C)3gȣf)"DL_+SJBxm7dһ$1N /lT?d>gdIgNW!7GB+¡7+mf>ӨvkR2%/tɅH0: ;4}u&Y77eҀ"6$OpC}3{ee^EPk&M M:ukk?U,_X*d IDAT-*#xs}ϡw;tO!c K/+kg4ә3Z3áTeLw""rӨ9[(lA\4!4ebhʄДvۥVs[ٴ=;[,.9>30(笶60©7P`>;Mzc}v}FS߅&+WjBk'N jmN9mvkG5,,,f˳Erul̫8p8?Qfy@3Bpw\GlٌlVk8$ReGjiĤppБ8wOR6>8-VJf|nlwYf3EҌ"yj4Z[vde8ܻ$1V ^8S:#wApaeZ|]0`pL]v${Vאk'Vj555rjwuX{:=N;IӋiEBiZ<.6AIwwisZo fO Ν3]*0R ;K0j V !OpڑF~Ao-nۆ+VJk`MU`|Z%vhp:vX{:.5#A񆦿-}‹D3z^*cʬ-nu3ZcFqh>"b 2T(S+JRT-2mǥ)P]N]}8nǍS UF yRM8>&51i|LTWnZz}!}A`IQuuDuuB5SGI_HMFcDtвx0uwNz*cs咀PUg+|pfdNoj-zs|hn1[19NlCn̈˅jyR^+ejyRH(PmTPѥ >nI8ݐp ;.M:G}cFTk4>*Qq\L*"fO ̞FH|]NWow3Y'ʾ#(5)ZB `L"KS?D#Fd{Mmx0R*csy.P U%rB6464oa%Vxqhi3w[V~w{U(eJiqbH) PT?r:vӐ$dOD-%l>}TUqQKWq=IsE_o?l8lk#5 DDB0LV'(Ibx!07ۆyв η$"h"W UeTy* ƭ6hi3[ڌfkْDdzФ7OP %EJIRZ,*%rqR/2GyfӘSѤӘN%"ᇺCv]$6.*VFN|EʈX™0&WWk|jpSW{ *) r\ ;ؕ'si\]|05}1=tJEq"U92K1?@4} uWk7ZnumdSzr P.. ☜L_Š2hiJMIhiJ9)9$ tԁX+"bEX,X:u"༙y=}avѬ?jx&KruR[TjJ1CP  (v|0Np8՜j:DeEȳy,A`m;6o|awYi;IDw򃙽*f|0_*,P8O/GuaCV(M+TR&oNi9K9α{,g'j˺mY:yEA4$VDҰXKBiH, !8(xKeRYqx|""έf}y<|Ԩ?꦳ܲy5cJM\]TW+qBpDm0cꢀJD!nӬle5""h,TU@\ 䢘_ <)?O.IyQ)odG6H^yMxiN9-5K;NkzַN{DbiH( %!$$IJP/[TrE#"nןbQQ\V(TY1+ұdw=yf6#ZwT/o/D*+ˋY:+;|AXPY~gY,e]nqϲYm&36^'JR,JDYLnNy/');vlCuв!˪ls:ˮI(Oj$i$HxYTu#cR~U`m);rI;);vRI;%J;{ SSϳ'KѐѠ =?YH'"]ۥ۩ݺyFvkn&\D6%(B^;є#u8jr+bP~A8$4UH$=3MMM]wݷI3u` $J ȉNmYa>l9mq(;Ls.CAI8I'UX%IX)>ɲtN&e'v*v2@}鴓i'qdMb($B>c((b (BH`9y("+Mr\ݸvn4ݸu7N6\DwWq%|=&D DduQ5#dG7&ST!Ra% Ũ*DdQXXfE*,8݆3Q>j;ͶsԶɶNs"{'ۜ$X$ b(b(b( b(a~$QUpߏj;u3|eNZw4͞(hNVs5]ou5LHhgP )0%,FdAQ5$AC=w2SBbXb*TA2fDQNsUY+ޑ%ݙxyeޮEtFQ$X*" J,2UdTA) *(" J,0E`$,2Y`ax<^^^nmΝvtt\pO=7ߜwIq9f;Ͷ|v׵?X B B B B( b(`~tpz讦;je]Mw4ղNt W:5l~!HBbXdb@ʂ" J@L aFBH L6S,(2 QTJ $EPdvAwWӝθL9 'r =NDHfnkLJ&*))MTuQJFR5QDEDr TOW$dd"RXDTA *LXDf@!YPD J;$'Jr0GW^m… {7XGYvi@'y0]qZqZq[:]zs/rD$3V y BL'1A Brh`0LNx=׊.w4WӜ嚆g&7oꚓ5idmnywYt;s{ne`<^|Ȥ !"oZU*QH,""EPoXFY &d49SRiMйeI.,1+*YIE崤bRVRb& rVP,Q*ACT,AJI)Ⅲ\y8' QDa( DUz $FDEAgNr+ 7Я|ٲe<͛sT|D *Q$!t.ms.trvn]W?1`oq8mY cQA,„f,,02!,cf! 1F&_+dlnY'cspu5lnkN&Gw9<6uGsќ,nIs⚓u՜5M:A¹QT3WRH\l\N.".uGD!v\&iaΘmӃęŃ3hr 'F̶CL`ђX{KK +/,,$D"z p3RƔ cAIx5qrOr;Oww ށsYvqzpoCΉ'+r>aV5z 3z=IRdx≳ٲ˶g}%]DQu9Qss(D^'"JqsD{Qw` rzS x1(t-9{p!GjıYHlEx{YYYWWWwwwxg)\='?O~g N)I'yR69i'enx=EgOU+m6rn$10 >>I#?޽{{v//GpCH]8Kiʈ_y}}=i| uFKы/د+ꪫrT`Ap[ƍg2?Dtw.w뭷:޺drժU sYpa?o[<d>89xzޫ6mڡCl~wǟ'_os 0`ĝlrZj۶mgi]_=a q衇!0r10!; ; ; ; ; ; ; ; ; 4 v裏hojjR%GaYTT4ܭN%I |xَVl1 !0L\tivܜ=c='&OO~2w߹sgGGGh֭ȉn^UsssKKprbÆ yUU]wݕ=3y. 9uϟ?_}n |p+VvzG#wC`ew@pw@pw@pw@pw@pw@picڴi ȉ+r91yn^ĉpr§/[96`  ; ; ; ; ; ; =IDAT; ; ȒH$I&ZUUjժ! C` /|aX眶`dMMM$y`YfҥEEEW_}>9Hvuu}_?~$1c~cO?WA0bO2ޱXl۶m9 C` ozU%InZu!h< >Q8E;< s]UQ%x}Z7ݾ}{IIWYUիWAxoO=Yn(>~DT[[eq8pWԩS-]]AiΜ9#Gp5M{ᇃ =#CK/y$Gt?ND$w*,?9 iӦi?|Y0̙CD_}cc#<_$zp>Ss R#Eww۷i70A(//oOS"*))07I}DHQ]]MD=X;3>pҳ}CvgDpv^~~f͢/Q sGիWqϝ;xŊDvՅ!0ھ};\2??owkooonnMoo ^b ꫯ=ztժU}ER4ߐ.\ YkmۖFY{{ݻwkED)W^!n_eˈh9 C` ~" G"h4JDGĦ9+/\{yHDb Q_r%ׯAn+iD8NrDAl'{g '< D)i=Z[[hԩʽՅ!0~o}kĉۗL&h„ V8;SQ_B tnCC͛7/g7⋉hӦMǏ -zhh0*v8>~Rq)ZZZ_wz"8͡@H͘1ckxg"Zxqyy_z_W9oL ҹǏ'^xk/**Zt#o޼/G-6իoᆗ_~ٶD"+}^{a9k5@zV]r֭[/^FgϞvڅ DKBp)E}Ϙ0\}ǦOn:I~_/Ydp d =dNIqYsH8?\O>yu޽;L'|W_>/Q0>}z^^ر'0$G o~xܻq C`P:W\q]w9mڴ7~S9H~_׿w9ҹ3֭[7sLAʕ+z!"۶sn85knT*cR{vww?EEE_׾ِk~R#}~Ch4X . wի/ 6b;vΑu=;g? o云p~N[?jnM$]80-3W9wzg3Ʈzo9l:_H=cәR;gd2dɒ{j䨩pNλg9BDv1ce2}{k9@^޽ƍW.˲w7 yl2>kɒ%رc C/Q }Xt)X/䪫Q]頦+W{wK#h=jI{GΘ5Ӊ5M;v͜9sZ {6x&'t{8?տ|ʭpB<(:t0N{BߴiSH}&ouܷKDpx c oQUUU2[qy g/""wۯg%"UU I\r%DSO~R#ȧ?i";w9Do&9s}oO:0\λsLBD?aY0<8a /~G\u.ݻwf~`0HD@ݳW&"I~R)ιio˾ c8MpoBpA:::>}$IDcǎyӰ/G].׹ej=}H9r{򕯌0f gXRR{n2Mshi渏 EEE[l{kjjꊋWZm۶ٳg. 렦&qpҹ555;v{&N/_+ EH~߲em6st:=qď~֭[v-.wx?g 0>>>>>>>>>>>>>>>>>>>C'`Zj3>>ccڶm'>񉒒믿?a1vm|ŋc#gjkkUU~a]?l?Oc/үg?c+_Jxx@Znmp;v-L>@?O<1;/h"Qg>{~%%7LDk֬[zj";7n9G>rw˲U\r5\CDwH__o***( }K_O"w 30!!rˤI\6;͖Yvm^Icc[oDVX|,[sfq|ݺuD/}1ַ( ?' w`,X_(?rH}}ҥKo{'O /ZyW]u's7aocAA "yCC~^xa֭o?C=tC ځ" ` <護jnn+{7A[~<3^p7O??֭[$E"O0ԾS_lZo~T\` w4WKK QSScye֬Y o޼+'7l7͞={믿phjgggmIJJJe˖o!vW⺮7eʔ){%444;:}ӟxѯ~O}S}+׾W?N2}&L[9#I&0`` ~獯PWW/ezDfoL~Q"7t-D$I-hAAAEEE0\|yII׿u"}e%K,^u˗O8qڴi555OwX,跿mEEEYYY^^ 7^|Ɛe˖[+W,))bK.}}H$r6c+W$"UUi3}-_^ꪫL\t #IҺu)S477';c֭}_Gvyw_|řL`ٲe/C=y200\j`,X|ڵk׬Y|n ; ;T; ; ; ; ; ; ; ; ; ; ; ; ; ; ;?bm^IENDB`metafor/man/figures/selmodel-stepfun.pdf0000644000176200001440000001536113750017360020117 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102160000) /ModDate (D:20201102160000) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 2958 /Filter /FlateDecode >> stream xn9L]uj# `$l eĎ,ŗ$ȳWqm H](e|Z_~m|I__M_Moۗtݼӟ_|~w_3w㧻;ߎ{e~n;S eJi6f2}a@yJi3R(p2F@ m@ B3FhcP( ƘX~zS \ cP(p3O@]Kޕ#P(PHB#1(Acc els?)y.~7A 5g6qPomRz8JZQGAK}746qP/cωz-=hi[A?䉅Y^jizR4j/=h҃ơy͐ژ}4A='6qPomRz8JZQGAK0}?46mqP/cωz-=hi[A4j-=h҃AmQz8yYX2ب҃AQKAK҃6j+=hi^zF-#ؘuqP/cωz-=hi[A4j-=h҃AmQz8yX2ب҃AQKAK҃6j+=hi^zF-sf3|4I='vqRoRz8JZ'SGAk\ؙcqR/cωz-=hm[A;6Nj-=h҃IQz8W}?4vCcmsb^KZ'VzN-ZKکqR{A;u6΍yxUPk^3;Zz8҃vj)=hmZzNmKکq ;8N6NZJZ'S[Ak҃v(=hmʼμ6Ne9Sz+=h҃IVz8:JZ13o~hz{NkAkJکqRkA;6Nj/=h҃~h̲8N6NZJZ'S[Ak҃v(=hmά~hz{NkAkJکqRkA;6Nj/=h҃ƙ~h̶8N6NZJZ'S[Ak҃v(=hc–Z/Q/cωqPm[AKzorR8KZu-yRK`'6e94z+=hiRzF-JڨqPGAʜ/4朏cj^ƞK^Kڨ҃A-mZz8KZumyRK 76e94z+=hiRzF-JڨqPGA01K-yybj^ƞK^Kڨ҃A-mZz8KZu)s8'6e94z+=hiRzF-JڨqPGA11K-yybj^ƞK^Kڨ҃A-mZz8KZu9s8e>扩z{N,z-=hJZQkAK㠶҃6j/=hiQzyRK`'6e94z+=hiRzF-JڨqPGA[21K-cڨ8҃6qPKA4j+=h҃A3s8}>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzо01K9cک8҃vqRKA;6Nj+=h҃I+s8s>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzо11KX've96Nz+=hmRzNJکqRGA01Kyybj^ƞk^Kک҃I-Zz8KZ'u+s8've96Nz+=hmRzNJکqRGA11Kyybj^ƞk^Kک҃I-Zz8KZ'u;s8e>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzyRkd've96Nz+=hmRzNJکqRGA{21Kcک8҃vqRKA;6Nj+=h҃It<a^8F48] -g/~z\Ϸ ?\>wzQ/UqwWAޭ~rx_/nvuwWr?/L/39q[˟?58Gz ]ݎ=yWn|tG{V<>jgm~G~뇧wÍ\ _Oo_L/?'^?^^y՗^{5۷Oo^z|g[EOǦzܲ9fY]au޻=/r}r1CNz8Zh>~6LOO[VǛ__M_Mot~%OĹa=sj?7xr ?)endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 504] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj xref 0 11 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000003322 00000 n 0000003405 00000 n 0000003517 00000 n 0000003550 00000 n 0000000212 00000 n 0000000292 00000 n 0000006245 00000 n 0000006502 00000 n trailer << /Size 11 /Info 1 0 R /Root 2 0 R >> startxref 6599 %%EOF metafor/man/figures/structs1.png0000644000176200001440000006230513231422320016422 0ustar liggesusersPNG  IHDR rkgAMA|Q pHYs''$6tEXtSoftwareMicrosoft Office5qdBIDATx+@a"h4Fh4ڈFFFh4F"PP6=gWO?Ca0` A ?0`8p A ?0`8p A ?0`8p A cW苻tpw>~ۣZ>Ǯ=1?яX:M7?~ۣZ)~@_tt?+o >_\-O >8w`ۉ{=45>}I2bZ̪GA111j/x3:g~kJo"&;FFna>nKe4cd( ctH_5oWz{t߉@,:]}FI4rTcM>4LI2}QPJo"&gl>;Q'^pf}Yx{Z](OJ_7`&:I5o3.+J|v3=:bDmp~cE~ue0ju9Oc0Sg_Wν|?r7ݏ+=:ߛ)˿YEfr2>N|QwKPPܬ^(E+=:jPW=UG,$'f6*'g=׹n9u4TJ@>ǙY:)I+#BTi?~UcJo{?l~^Wl7ߋ'ɏ.'v3kWd {@'U}9͍_SU2>b'JѵotRVԩsܣ7ΗqwV﮲\ǼM!m_uJ-$|6'YH\{q o?O[Eau>wv| $z+ŗqR䆞h.uE/\j%?]nU$lF?ǻ2;1>mfb~=CF*| ]bígg'n;  l޿Cu^O>˻KPb4}7 _qs5='sy#Nm1sC!yCq={ŗ!7ku7γc/&\(D#>9Ο2_Ze%(c5}w$[e݀GlEP^j5sͧCȁ7/CL⬺@љ7YĶ4\*ob(;'ZpY_B^_j:*mzrI54~8S6i5$A>KP 㽼ʒoAz|՟^YJqWk^y0P7mZ^ދϭIle;WOjw4Ps//f=|oG)H.o;A~{*?_霛> n*=)tßVwSL }ldM/w( ],Oudw[(n)VRC>^jOR uE6}ߏpBLil>pjg#nk4Z G| ZnsWߛ outXown3Cy?%8[|C?5}ߏv?6cEӏ2om@]f rO|iErpך=Ȧ{/߮o2e{֚>뙿2A,WQ9鋿)\ y{žpn%om_|lB*ŧK[:ypfz9r۫7y~[hS s?\{=xZ_R$7!/>6-?=E{/w o8MD7DY"^~\Mϻ1mt|! qEow7RjG7ib/ټ~Zg}x+6_̻wK3J{۾eSFQ['ڞ]slZgS {?|p~~6O'*;"QF/P8\C3 걏aYS m{;u( (,֗M?x5SvtMF7 ] ٸwq)GlD唀Kx?( &VƇԗvDhf)=mߓ\7]o#cjgZa·\Q\CSAٓ5KN4~]i7tM Mc6jqZ$Κ4-u>W4 ο\= q;H|"*x=qArp,-ý~«~>&V}?Ά={qPOl:/jXw湛 u]$m?~tEz#^~dgosE'KʭQ{ז\ͭZ7`l"e;Svj`}*ur,9Ѝ{box=٩'rdo&y^aR_2V}-2-ۭ_VNR_o6g'g?N GqWiV9u]i^7,#q{v?~g ePxI#Y2lvgki>uUf^ŧDSrYnѻ{H)rF^v}BrX>uBtS(Y6^_%D\{o8MDqz4s^:Bw7cN}$1̣uvSCB*~Xd"-b_vbgi!R\|ĉx៓ǴLbB|e`׽功ÛroFSߩ/Y|Qֳt-_iP? n+{/mkSl. ,~;1Ʊꧥf{k;+dv~}7$G-yC}xY/w}-{|I~g}p.q9,3ώ2BNO( M">]^&.c%9;Y'`,3*]~{m_^D}?:2P6}%?sgViycfwnfíKɊ_/Y|::2bSk5w^gyX?}KfS *||-ҼeXe߷#g?XB^׻p]]ǭi*N0 _;}{7\Η=i>W"@=6| _׺{w A ?0`8p A ?0`8p A ?0`8p A ?0`8p A ?0`8p A ?0`8p A ?0;{nY־0:?7xY uN, 0(_쑿9l{=2ό wA0:a8 0$_,VSA&wGoN$pgl1DرzVo2uSDH6?=Q'l!@s'0F}HhwI}PkE`HhDHofCAu˨7$D{Hu [G@E;~438M/6l` &id^#"ǯ @_Cq7'ݗF;섿/nz>@ʚ#kWS%CѻzGOsq~c{׉_}p(RA)M#!:wnh\Ώt2jPS%Cѻ:{GOۉll_'>>PV'9~z^ʠ0(EefDG1w[O}*1t;̹xpt6Od ZKQ eA9 ѧ~{n78{;#,+F2;v|}Th{ޑ%ic{OWG*ڼkmZR-/ePҋd#j˜6֬GMFEǖ/+s6nbIS%ƣѻ{GO:Dck* ͫu_ /ɇzG2( e`J͞?>ذ*o{Kpձ420ג#~tL~K:b?,NֻG?:X bRTK*@hW=㼩Fenc857>aG.YVwGc/%:xdt;1. NGGneY*1J 02A|O:qI.>Yx&է4i[;8኱-)M#{ޑcLDj8K|%2fQe JQ$bc^)Qvȗ:s Nt_lwJw$uj(X4JZ 02Azdi]|c7My'Z6?W|Fד;dS-j]=#gp_'IVC˨3(M$SSM##$!DHN7ݣwa۝:Qt5U+1J-02A?}ijP&Ъ42fr8dYaѻzzGϐMb5IVjPQjq! Jbi^fOJe1CRr!;#DZѻzGOvLXI!,1Jm02AOBh^NۅAj=kn7Hgі'v.jof'm%NKgױ ~1Jw}bxX67(mqPMR! /+Yl7|3%2v6m*vR܋t()6܌V)<ֽzYcrS>>qe@vN:0^?[;N,Ydka(V޴,AٿFn|kdPTޠԛ`Aws ~Uf!9Yv7Mv6M[SSgڌTj?zhWCD6AYzgtR#bGj8}fAGs$z4(]W޴,AYbzSà,6Vzy_2Fj4󿨞 6i72o23:u[>_?岖s!WCV93ow7}~9T'%VWIT3u-+R(kKUޠeq.h3(*moP^Ϩ75 bceSy?J9nFxeU֦٩'d;n+͟(Ŷ>(gM}x~?m#q yĮvw|ֆ]$"B֩jfdԗ&iKݫwduwt/_IJevuJyj՟^/< #,Aq46(nm*QLN2:"Wwf~޻Cp@/8꾍r=lT:|yKNw{ib5Q8vŶ<(GXɨ7u JbeZǁջ);fia ;3F7ycl&Bb5Uv*壟7Yܻ븎 6ub8ڏ,aNR*@Q{i}AcAI[lUoj\忳VM#? ;(׉4DKk&%TKm 'Ȩwމ+&N{^=~Œ@/['fjh9I%F)掠EU'PX9A [BK_d*sEh4Ӱ󿸀XV)B\2{%湵;`ۍɡmO4&X' W⻕f' Orq~(jWf/AχO7 Jbe߉ed;^'fiTp?/9llOc/Fw*SIܤzk[Ef`BN8ه识0?(Vxʳpطz,Ayۼ҂x-xIFߏ6WSOO}H_cptjyKz(1Jm+%o ]TEe2<aylL U40nifǻJóNy(u̇ݫwwOXSϐ7쬏}a'[yaF*@QyimA{eݟןEe2_:&7Ï٘uvOr8S/ a?;εNeԝճ.ֽz'֮E{73_ԉ[tbů٫azD1J/Yl˃AYlKrxԺg ~7wTzP?uhRE1]CFF& >g&+,Yw_eJd۾7vB;HM/8ZGTQ*%^8bk2ճD9Ln5J%Wi6E<ҺIcv :,kS̨vGvzӻ!H_cq4j*Syr{P bkkMw?}xvQWggcid?065NyچDbw:h>Vw))Y ORp_u*W@rP >TPhds$g%EvjQۨWٽ쯗?5 G ,RϚvJ.A(W̠|{ܦ ?Z YȮ5E/ 1iSvug ٽ8ޑkC%8*ӟ]ɑrv5~p_u~ .J l,qBN]^Z'=Ml۟ŐkCeX +LnJ/A&ֺ rvaA#S:?.{|U\H2wtSw'-*vb5 GI'}L pnhWw8(1 [KߺXil?KݭU* LmL'UƝJMvwx޻ua@ҍ fi]\XS~5[|U8 [_)%WϬtM*΋蘼E}ջxM+0>+vQbP,AMX(l?CUYT[r[?ɭu}ҧ~޻IYJ"wNdY+ p+*bkRA^Wy>tvnbPKhFvJvVfRV+/m{MV6մ~+~_ ^_Ey 3PWwm7O bD952(%Wny40BngzJ]GN~אv25-k j.]ٽFC dl@c`6Gs=lFz|=GB>?#!?@ϑ@Gs D#9?"z|=GB>?#!?@ϑ@Gs D#9?"z|=GB>?#!pwܲ}a,@G겱!!@Wce١u!"u, Aٶ}6I${heyfLJ\>0&_`ӡohR#d'cPJElreP mppj9uGo$-?{e_] 5tB׃#w n'؉v6?H-,v1(CGG%|peΠ@=z!rv7H Y^wjDG%e||@c 2T\p#_7ae0@=8~;qN[o1e._\'I+.ay|@ _G"hwÍ$Ca.G hݱکv7?x8~3P @/M;zN{fߟsFz!kѣhwF#Cf'.G!GܡG ;Fjrz.?*Fun>hw$mҕnnTP]v{oFe'?*Eu~91I2wOTIT5t4w9$1THTq^zyN; q@xU"kE$>m$9È?*Cu:!H`-EđQc2 _#q7'ڭHPa U@O}'݌hJ#u|@/BTemt3wJ$N{>6pGt߯1;Uf@]#CMmxu=qܑkkƩj=* I7'!* ^}M󜃽ZUvH?7cCuF0Fч~IO˨CN{zP.:𱒛׷rԿS62NuVQi{P_Ki" c`ѳAӃ^ri0 hpw!64v$;OSe޵=?WG>y"~lV^Tkߐ)fv%=XYЦch$vF4KJU2{V2{ߵ۩2z4*ik=\5Uy+mZ:;N5VQiPV)k YO6oq⋤#vEg+&I!yz~!k{Ò_j_O='N{CPp6SuCTk :.֤YOǃ>nW6~LǃAV"qrZ*3 mN[qZBoZo.e콮/k>yԦ4;n]J˭^e:ɀdNjFWmtjё[sʌ&c\h1( ct O3>7v#mK?ѲiwC$4^i7op:WoN1 Yӣ{mvMYb*JeI1aT6]+ľϑI![O'u՞8|4=?8.Ŧ8e8i0Bo7sw'gxMT#}&NHc ΪN7 k{$WNLM˨2<* YV{dfsdv7Hm, ![vQ]#mK92"NQioPͮHF6{oh}֨ݍ5g;zgw 8CfYaZ޵=?HMbj$XfZGA!78]E,,ZI`n4v)9Ԑ_N7 k{vmˑb%Y\ʌS{c\8(Fm*вXme+Tn;h;7zٴ.tQ]#V{YѵeƩ1yTZf]t+Fdfk$VFH=Oyg-ڗضfj^L۶_QkdH]>tμd<8eoks䕛%o 4^i\rBv7HMi-){;u\],o}kwtɛf? uق߾ױ֍8eokڨ(ZEa:J#iQp?ǒv7HݲKnTbO-mE 6lrkwtc3uz꒾gX?}m3ϙ{5)7W[qx*-QQ)7u J2GoF$ǵRv7HR*KnGSFK1gQy򀈿SjԻZgQ&]kSsy8Lܷte}҂Mܨ̑1+ْby?6`#% \+Kn1+NSVД#i]#%F{n[&p:n^+s-0pq3Xaӌ5jzoIJM[pr4qFmPr+suikemF{k:;̝[ڶn2ܯ:卥:aR]mݻA ȫ.,15)S3c=qʚ lT; g%o9N./#/ sܱ4FInI6x~X9W:I(uj+?'±Zf?.rԷsSi3/;gYk|mTrSנXe߄hy}M;囏6h#%e,GIW輛OFSɞiZ`ɡս dp©܁~t]I{q`,QI+75 Jrs5-oA[m.igFIﭳ~;mW~ald{r{!iu?E1gHػ؞i?V]2}oO͔q*Wnhy?z%Aׇeo(lˏ٘h#W1Ov4L|qen~=~驽ҽu0?W]:853/V7ߍSujT; #:Ee>^kD:p1RVl#]ԝ, -}p픛y%?w]-ݛG[g_ރֿdoiNԙ/IaF۹Ѩ,s>sss]Ǡ,GoRp,iq5huvn>'kːQ+8rg]ǐ,*8]?*3:P]rWk3oS7~9cVF(v&9n&\(.߰޶MY=/j:;۶l#겁WVs q^}սik'1r~'TGA@WzyvnݰvNXUoKg,73Qnӂ+7\PM4Y $VY"yػ77_Q]zبNM䈺aVMW mTrs{PԗK>.!v6h+ͫX1D`׻/gѸ k7oJ Ty_ = VQQ/7J2Gq #T+rh-*Y/eq4 Ru۷45 PQz5,Qj-`PRIjֻ݋!׮657O$REʫQW^)W {??Q x}/ѽ?G"ͽFٟyh杣K\\o]h%Eua߇FCTɾxf 揍`'vwjUԻ2݋!vmHij.;ƥhH\\XfAQ].=h$ٗ\+oW.G۴u]_f Tpp=s$u^nK$ԂIѦ`-!P5W~LYrˡRO9wխwOrم\If5Y7u#}sZpݣ9fEua_h_rUU<Org{nou{Oޕ$|LM%ҵԔͼ[u#}sYJʫLȠ.?H.?#!?@ϑ@Gs D#9?"z|=GB>?#!?@ϑ@Gs D#9?"zζӽw&svg-B WX6X"N7vG-"5:cEiqe@ZA2wL٩_MG#ίl{=2όI>?P(gQ2Ƥ (sgw{2]%^kcPrEI6AiD>ay֫v?v_] J.?P(͟!lB2?GoĹ4nMG<6Ơ"y%u(Js(s:q.j+QvX=rv̠e\g݄aE?$rO&nL\!cRKҺcY9+8"XmBǗ=x0TׅqE!ґqh9vtf~"hE=|7iݜY&߶6C>?R{DMGǾNܲu  g&Ϯ&٪" (/vt[ۤ+h`(✎v_.[@(J"˨g嵛rgli uEi8DsMG_!5EfCGAoi2?P/b2?MG?ys֒b@(J#_ӂ ?Pce2 "率_bo"@v#I~jmҶ>:oiV7/JVs W>_E0 6=4=n?g'&0~Gt߯1E;Uf@TR.ob^3QQQ:wdښ> ⯙a)K?"SkN 9 4wo7tߌekڹau<1 GQkLJb*3 *lB|L}3:: 跉aO?dkI%۴ MGGRO~SU,Dn֟)qظ87 H 5:iRرȢ`ME=ZoE=򱐗aR~۬ ?NZn$2o^y訍dFTv7zBNSxy-qDMyr}%)sm&m6*!YydEr$XvQi'*oz6MS/?;@@}GS^r'}D.N[&c6*n!}Okdz2SN~SU0KRJڿoaf ( |*;5TGpOwڦayת,GdkӜUr[{Tn?vʮ򆍒zIGc[#d,ٯu'{Ij@ۺMdL?I~[oy՞[Hwof6{=Fyik̷wovMRt36Tb]l9צ;5 )K^V j$yf/W 헑f!⎥vMޭKnaDa&%Jb=Wgzp .7(5&YLԣ=2M/7J$y,W #9(qSnIOsCy>&ol-\&*#*o1!ԭivo&y>G(mug#`]?B4dS6w=9ߘd9un:G#]ھ8B|zN[yl6?PŢ$ݱݳ/Xcyz#"~JJems_\ ?ڸE,ĺj>;n?:jNlzd*o{6UBݩ}cz*u!QfQޱY' Vbi8<)5+Cת,Gǧ޶zٶn?:;مt9ʛ9=<7\UI'cvm)^A;(և8@K3`Qet{g_n)ms_\4pp?:sh*o8 vםzNEKڽaI7ovl5ԣ/0=҇IU\.:s I77V_LX;!Lޕ;a uޱC~ٿYnfݞ*W_.:>oo ?+cWys?)ܐrI_gwlaS`z\v6r׶}VhWy=j8ox ހ"VMUemkþőgvV{_$h[ۍ'm7܆z[rЗY;oy)=E<.z/YH赯wWEM(.|ې}c J#_msM,DaW_*J5'_Bhݯ_\Id=I.hkߺȲ6Wr_ !g*U^򷸷;JAZPю&T [=#T{\%C[>ީhWyE :ڢݦ_Q ?Pvl7{Wf^Z&E \. $g*U^}чv67gQ|U#ē}FeUcjO#;3oPOO"w*U^E5Ozf$|:pznZ`)ԣܨ,]Zv $gU^IEphZߝ:.G*W͎&(>[g.GQ)pC[.3UCg*{qJUQT`]{mm9 vI;Q ?"jUKV؆?"z|=GB>?#!?@ϑ@Gs D#9?"z|=GB>?#!?@ϑ@Gs D#9?"tvvmE^C#9?z紙O|=GG۱XuV%`R?A=,kk_*{K?ƅlc;M,~qĊw{b-2/%]s\_GeUYu-/~C$K@#qoe}2seS"?o9@#bb^%KФ ĭ{#5k-R%Hf.a_4ɯeE#v?uK}l<[kY@}dw]Z6{'?&bw{.D$+vc.^$}.@M*y'?lTB)¿ƴw"hϸZ;+Q@@kF2=CArW\*y+? *v?p?]޸Lme ߓ^{Z:1x,b\ћZr?YWfQm-@G?]FAM <;@;3Z^T5܊7mp7t;xIQv,*-nha}M']}oI8jrNGWtqϫMe nNBW{{5L?-3xeHd1U|c?9T 5<蘃)K0џWo?5M:Uk+ oczֲeiA#47 @Zv{,/+cje@g$5QTp;9)@{CJbݭ$FSA?$ҎbQSJl|?9.~_w@sU@s/Xt@cEm3ӎݮe@C +~Zo@Cv;Y\ ղS7;@ ?0`8p A ?0`8p A ?0`8p Ɯ[`IENDB`metafor/man/figures/selmodel-stepfun.png0000644000176200001440000003607613750017360020140 0ustar liggesusersPNG  IHDRz}$PLTE  !!!"""###$$$%%%&&&'''((()))***+++,,,---...///000111222333444555666777888999:::;;;<<<===>>>???@@@AAABBBCCCDDDEEEFFFGGGHHHIIIJJJKKKLLLMMMNNNOOOPPPQQQRRRSSSTTTUUUVVVWWWXXXYYYZZZ[[[\\\]]]^^^___```aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrssstttuuuvvvwwwxxxyyyzzz{{{|||}}}~~~]} IDATxuk  ҎZVjeiFbI{Uk][5ݵnƖ|LVtCSZ0Ig0 ̜\?+͜O.χ9:A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A*A> ԕ0̺?~e1C<-~#ԕ1>oؗ:^z ϕ}ؗ=x`_~6~fk}A]Hi/jw;:۾|}`NBT}Uךm5/wl6!#<*龟X2s{|@۾k'56k@$Hw!*>î.mn}ϺhNBT}]TV>g:{C%/qޭՠY:@ QIU'VOݻ ݅ϻq8\V;ލtt`s6m7=ӽ{Κ2tKZ܍t.$CatvqH=.ItBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6OG9~׌[ܓt.t_i6l^o6lw ݅m=>]s쨥Usm'ܻat+uX>ʮÆs}I|BHx9vTq{_=2}_Ξsx׽/6OG>î*mLϹ[lԓ]u߸%A )#ݧٚM6git,ݧ}r >ڀA )#w;:='?^W_udom%ÖHweX85X|!4Q/w2}?8>eu\bIv{Dުd6ٙAv2}]].=}=ުd6a#Hwe,8.knڮvwDުd6`з.tܦyvS/ FO ݅:߫'=^k8`_KgtQTO՟=h<>]7|kw;׽+]H)oS]imӆخ4~06継qܻ ݅sUgM:%A;k/?D#3B!ItRыC.$ݷ]"Hw!G]b#Hwt ݅.Hw!A u.=tBxD ݅.=tAH]=tB]"Hw!G]b#Hwt ݅.Hw!A u.=tBxD ݅.=tAH]=tB]"Hw!G]b#Hwt ݅.Hw!A u.=tBxD ݅.=tAH]=tB]"Hw!G]b#Hwt ݅.Hw!A u.=tBxD ݅.=tAH]=tB]"Hw!G]b#Hwt ݅.Hw!A u$K'.|it.$Uwb5!uJG)]Ht_WKH|BR'o0=R, =#Tn褻]Ht kt.׺]Gʷ֞~Bt.$T?nTGs|Q=d~aٰ;.MH:Rf=Sl})]Ht?u?&]HtVu*&]Gty|#C+ ݅J[ثk ݅J9vƿBfsr!uJvcn]Q[qvw5At3WuY{:`YM ݅{Nm}NY_t#G2{|]p9- ݅L.Hw!)}̘{K~!uHw ݅Hyn 3tf\ z#ճV|fyhE>=TϺzo'V#A I׬Yc'݆Z\?H:RR-{>Hw!)#GZM:8kۄt.$ճz^H:R=>)V]H׺U?~]H,9vCU5Ggtב*ݟ͆}6vEu5Ct*_f _=T龟=}kE[W%݅:R =7'r]w4BR~;mO^\t.$U϶tߚo'!3BHw}䗵zwjh ݅${?1 ]H+m_t#uk^btA- ݅$|{m7{NHw!}{7.AWꬓ!uJ\#) ]Ht:(nnI&H|BRvi~TMvlu5Cgtב*'?=Qjt.$U_2B ݅JlY?i$XW3{FHwvRlbaM ݅JqR, =d/iy ]G׺I6O=_t.$kݻ-fH|BҽMu 3BHsH~ ݅$Hw#"A IWjq 3BHyt.+̸xD ݅pA 3.=tf\<BŒA up@ 3.Hw!\aŠG:Œt ݅p0b#Hw\afS1-O~-cwq}{ ݅rfF/u6lXم]I|BJlrφ}{JOϭt{W=#4^eC=\9 {A{>Hw!)ҽ1O:Ou|22;8i{^bxĽ/]Ht?ko7&\/5gZ9vF˵6!u$HlؾɆ.3;޾r {>Hw! }]Pϴylͦs5?x똱ojGd=z{8ơ,̖n:v#n?doo]#Asq__Zۥů9oJy(|Úo͖߾ޤ]HtelqM/~vcq|ʬ{~C} ]Htqxo׵5gqqn=x=#i^pלeDžz8{Y|BoY>ۣhf5/yvzϹslNO5At30nwPկ$ɶuwؤ5${FHw)^3#u3{\8\V;1ϻ럵vku}ǽ+]Hbt?k贶iCl?SlLW|=RަZ_u֔O^ROϻnmqO=#>MEB^af ݅ێt鮃tw#z.tw1EB~EJ"]HtGO"]Gtg >nz|dt.$Aj[KH|B>I6bfV]GgUN ݅z̼oXMk{>Hw!)_0S{'wǑ!u{Ǎ2_t.$k/l3wYۥ ]Ht77x-t鮃CH]uw@ .=tAH]=tB]"Hw!G]HtGt#Ao1-;]Ht/>nmlKn{/L-{>Hw!uiMq#3BHpG<{>Hw!u9fg\t.$U쾵 Cgtב*ݏ og Ct*ݿg]]`]͐ ݅${͆tsٟ'XVS{FHw^^2\7Ltxj? t.$TkO 9]GܨryE9H|Bmf0k4"A I7NaXX{FHwD;9<{>Hw!}[}<{>Hw!}=7Ǐ Ggtב*go=cCt*i v͉.At*َuEmŭZ)]G\af]Ze5Et:9ei|Q=ɴuDgtב#B ݅Kw7:Hw!]M-]GtWyS 鮃t*UB ݅Jw7BHw]M-]HtWyS 鮃t*UB !uJw7:Hw!^0#].$k5B !u|›ZHwɴ] ݅$K'/=kjt#U?<6K&H|BR,t*')]GtմFt*ݧNJH|BR_)]Gt_3}µ}a=~|XIGgdt.$A[V]Gtdt.׺xD ݅$Ht#ųc~Fҟ#uBRu'Hw>oMu'Hw!WH|Bx#z.$A#Agtב ݥ^G ݅$H+H~ ݅AH{m7{>Hw!ɞupލ??rЕ:{>Hw!ҽ6lA7hu5Cgtב*ݯ;v[R, =T~]Z8q`]͐ ݅Jwzm"3BH׻>~dxU͑ ݅JlY?i$XW3{>Hw!vRlbaM!uJqR, =d/wjt.$k^r$ا/Agtב 3k`I|BR{x|FdA- ݅J7s #P uJw;lUP$Ǘ]Hti2N ݅J珶,ˡ6mDgtבY3m'<`M=tu_^+JR{FHw! /~<>hH:7^FEt ݥ>tA Ilu}!ui.=t§xD ݅x}̘{K~!uHw *OƑ:Hw!)}޼jq A T] zcu?ۼH|BYv^0] {N !u B:褻]=tB]"Hw!G]Ht_HwHw }.Hw!ڎ8]Htm mGgtבY?YW$Xt.$f~CfVF A I|q_̆V#3BH|qOΝ0OSv{>Hw!)}mٳ{>Hw!}x6{r}mFgtב*7k'}A:]H؄ߢ9!݅Jit鮣d A Ic̯[t.$E]PR-{FHw)}޼jq A )T#H|BR=l;YYZt#ճZt.$E_f/wjq A IؖK~!uHGi#7`=o=du/Kt.$ճb5!u{Ӷ}9- ݅${L_PIfH|BR7?^ZHw!T%vmYGl ' ݅JC;A돿2tA I/z/Fu)]Gtg1/ttu5Ct* K]7'`]͐ ݅JWLH&H:RuCXX{>Hw!ҽvqS, =tĂjz]^m|U͑!up]w@ .Hw! }N=#n}$[]_{>Hw! >R-{>Hw!u>H:>^[rǍZtA- ݅${}{7tAWꬓ ݅J\#) ]Gt:(nnI&H|BRvi~TMvlu5Ct*'?=Qjt#U_U5Gt*e݃`]͐ ݅JIbkmn5Agtב*ݗNǮ;-K&H|B`S ]H׺I6O=_t#f3Z6"A +ܹ=$З~f~n^~z ݅:R5{g9+̠.$Al! j6d=_;~I|Bvo:o`8o;=#>h05Ѣ{>uq] dЇ[_ߙ a2tǞ}垇mѰqH=GB1W4\aIwڿlYwA'ueu$x]jIw3:R|$Ҡ:Hw!)ҽ}~wKH|BR{V]HtcV]`Mu0 ݅i.=t 3t.tw@ !] z.tw#z.tw1EBHwŠBHw@ !]<BHw@ !]{.tw@ !] z.tw#z.tw1EBHwŠBHw@ !]<BHw@ !]{.tw@ !] z.tw#z.tw1EBHwŠBHw@ !]<BHw@ !]{.tw@ !] z.tw#z.tw1EBHwŠBHw@ !]<BHw@ !]{.tw@ !] z.tw#z.tw1EBHwŠBIUgOǜG8^w➤{>Hw!NQf:wXqCNw%A )%g۔{6<|Ժ_|u}ͽ+]Hrx}]lAϹ1+/x!t.t.gswmw_=2}_Ξs/hӻ/]H>î*mLϹƣ2ᄂ{>Hw!eylV?&{>Hw!enul?8rV_̽+.* {Zl|FY{b`_~|^+@2r^=ijp^}^z:O­lw7K7=#lRަz v@cw]?;e{w%A )m+:m&z}}濓yĻ+]H9WYu֔O^R.}%t]CH]t."#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<t ݅.=tBH6O"Hw!t ݅.=AH]"Hw!t`t.=tBH]"#<cΣ-EtRJ/4e6zQs A )%g۔{6<|?=#l2}'\H|BHxvt.tek==#l2}]U\H|BHi8>m;=2}783[6OG>*kmtՑJ{:Hw!e~vcq|lw/tWB )#gq{ "ݥy:HYvQq\_=D+!݅8NwϽJHw!en' ] d.?{Psn[D+!݅66m@lnsۺ ] .+̬:kЉ'/)nwuA6OGn$A ␤{>Hw!]ttwEBHw ]{.twEGy:Hw@ !]{.twlEBHw@ !]{twEBHw ]{.twEGy:K#.sM?|ؗs`_~3nW ?~E+@I\~B g~01/OlR* 7|t8t! A`Ѕ0.=A ]bt! A`Ѕ0.=A ]bt! A`Ѕ0.=A ]Ȏ6s߅iW\^ ȝ@j :P :P :P :P :P :P :P :P :P :P :P :P :P :P :P :P :P :Pj|;lAWA?nZ}}y(ы廜]bݾXڲ+6m=>]1yg0 :,~ˏN{eoT~7-vKi BC֠_f3mvrVĠeOl5xu.YhѫۼۘZYA!WkgUqrRp7{ZB}}-,O }φҖjw;:]ۼcvOOͮ/m]ؑ}Uך^30yߜ% jSf`5. zk޿%flahgq2߅AoWPmm`hgEqaך|5_l 68γ0yWS׮?j򰉇~eM{?og?E85]#!io1lԛ3v zr`ʚi_xg=kɍ'ү_34c}V zeʹӋ㆗7MUѽř~'G8y zeʹ+z>}=ܸ=8<>gпh3gh bol߅c05ln7F؅Ok샫f[mAvٲze,/ ^Y3탻.fxZ`_Vܺlv;/8dϾ6q?e}rǠWLg'N_opƸosjZV9lœwl|zȽq3s%`a+A*a+A*aЁ `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё `Ё X9^W bIENDB`metafor/man/figures/selmodel-preston.pdf0000644000176200001440000006557613750017351020142 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102155953) /ModDate (D:20201102155953) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 23468 /Filter /FlateDecode >> stream xM-UW4ROD؎ lG b$w}2sw{wWWW'Wӏ}~c)}?o?O~-}_>~'[~?~H-oS{v;=d,2 eUZ&RU*nUoVGvA Vꐍ]l&9䠴ϼ AIj]YV5AҪ,q$[eUg^Vr5X}B6 iUЭ. iy XeU>јVwO#*}rs<C6{N,A&J2A9 VG*[9qd7䀬vw<ҭ&κ#$>Fմr@fa?A  8UdU]ua0پ 9 )a1sH/^]gav?9O Lj;g+xenK ~I ~ïп٩tjwo?JuM=}@.oG7薨ͯNt|=;7촇v ,U_eO烙01\и^=L'@s?/4K}n~0~>\6^Cfh| ~7|^vq}/z9zAc;?ͩ~/;ן(M}~Ӹ^ΉVLclZ9QOL7Ӻh+ӉF>nƛ0LOG7h870 n=醉i ~g@z~uoK7jk~Fo\4Dam7V8LL!oӓ&FLO{qGߣݯ;?˘ҚnߏvșVߙMc=}ho ^]g¯qfF8vWoq>X5u|]u$/>k=?w{x1灌Gj= =dv P99~l)9|;s߹a~p sfh/vÑitL'p|92Gch̯l>G3XLx.oyda= ~~ݯrg%4ߣρg>X~Ӊ ODMc>p3ߙ/m~Yӹ_p5·v_~ޯv_ZiKGoh|s#368:7BK؍jei5>/c/OM<7j:GDMs5MOr*t9)Cp}v볤 Ҵ%5дӌ~wNS~>_(4淆>߀_~ Ւg Ggbt؉uғnzz1}>QΉj~VOrNlL_|x ?.@v#7eLwh~M_| llS߹N~ >3/@_Y㒰i|y2Y rl:ad { /o3Uh6>1{2=x}z_|U~ޣ74¯`>m26eh~xlz}ش l E?B94~/[H7663tv&eim{0i$lډ}>W`X|_{psn<4߹qehi8~(-B~\iݏvk>mt o[J߀LF_q~\ϱ%}Ͷ҇=|0독z&&}m4>l;_u^o(|vcM~0+'!c:-.vqtnt-8} q}m~C a,'-ls#762 ?2M^;Kl[W?N h\Pf&ݯp* oG_wWO ,cŒi\/:lYorLhvajMMo0i`S1=ʇ-c|_>3] u3/  <N~=%h\;ïbgq/s~wnx}6 >mi[st4F~\7=1ڍ|ݥ4t>Gƴ´ ض^ wD̴_{P/94=zi]W /0_6~g :-jök6p}^7ME~-$>t)7v?~_W_Wy2AgAM~77+?B%??Btp?\mAAݠ珉ϳrY:^hۼKAw󩊍I6Jק|aSh[,~6=}~c p2ݠǢ^ ~,`3~$Zq-ܺ_bz@|-~u|&2O{0qy5 ?!+^`Kl8оfφdyt*0^=qυ1 ?Mu.= x.t*7p&HH8ߎv^[߫?z ߶m zNBi|eU>G7h^>?]?FԃnCzA|tиpʎt /s|h~Yo/F4Χ4[3q 狾q4ƣoe[o!h_9|ޭHoa?7˴z~/Xu& 88_K蹨4'_kAxMy?:q= 'qDiG輬/QknS_иZ!"4=x@؃ 4Fsw+"'&a_$|jCSª}'ZUZ]6/tewk&7ͩJ od.vwa+o_+bV~љ͔!%[*dU_aUA+ NZM )a֮Ny:V۞wֱCcU; Jy+dV^^u.qJN$̔jxJV(aU%p$s1|P9gXB (akV j '!v5Xa'4\! mF[xl2Sn8c (a3]J +WЪOJXaRNX0 +(G yUKµUN+ ʟV]µ`wVj nn !e̔g?Wpk~瓿ڪ' O'O\?|«կ|a4 4swKc#͏g~_|k^G/=$Xُݏtw/WyQ:M?=X0@iz=y5'c$ ѹ@}zt |eGc=:~ h %=*3 =*Q n |bGb)=Lv N/= @<葿=*Ǝz_]h ) :?=?Θ{ښ!>zm2c'=}JVP}@_e܇Y-\ʁ/ z8tVV A-dNL2ì&?a f5 Va*ER?a;6+УĢb@* yK"9a[|v9_1 ;Oʀ _J} }\`x@۷ @ 0*` mjU! ~QMzX-z807ͯ0Ca=cj =eK_~B<^wz5GGU9=NbuEauKuO@ S= 7=L Hz(GձC- yx"$##>܏n (3Mm~K@$=$E p "&L@+У,:YZ˝7;zX0zEB1У(^,m~\ :j :,Pa׉="~ \@ձWսW+# h6P|@q&+861{CqU6pe Fs@HP@P? ÞSƠ^;/zx>ဓx"ùWGyfz{^&(*Hd019.x~_zĀp`I6c`q@Q &a@PGĀLE,z'iHwթ4AR$~ ?0=L#P|Csz,zz@0"#PL3ws?AGQ= (##ADF!У`%qzEE @I @QUG!P4"߂ߐ]a;= B@=CG@zF kz0 >"zCG==O`=Wiܿa1'@:OzИ >~Ozxɿt~ N/Pxi̇?[p?@}*N#O+#cSqF,C ?=L@_Vz G>oMQ<:40 DOz07;tS.1FzB3P“0|ԝ@&P~xzmPwhi@ u =o6;4o0@:|F 0uئ==2?aq<^- giߨi+ç xDu)*4=zd2æ1a=|@=l43uf@G_%@v`GjnwOup`y:>Jݡ؃|i(M|b!=r}CBi?8Ԩ'xz:IoI>i$? Qǀ%zcTG&T `bL@ͻOz@~~'=hzz.@Oz'+t~z<=pBz@a @%z8 0 uд W-o&Mzc>64a=߀ @LWdž@{gȄFZHU +Ѓ p` ^S p`jQ_@r X@h&z=|W( lS'4LX@Kz2􄟀Xz<=GP@[bl \T@6@U~ A=1!Á-|th|z/ N pPwh /eD=h-vdh ÁMmY@6&+ˤC|=|!P/=l'=LcEG O =a ec #~@\x~3#(C{S Eg/=2zt?@ydGf@=rc2zd$ WG&=2* P*#Q% H,=)#U^O =g葪:40~1UOGb =葊A葊/42#fd|ЏY~ H@D`W=葸~@D@I}zSG"*@D8@8=oz$0C@='ؐ@X@@qz$W(# 0fGJ h4z$2co{8mY8͛<6Oeyh'QLPP8<6Y-) ʌ@~"Be+ {y5;R/mn~3c/KVݱdDp2s;hC$.36!03WFglv^`r9en]f R36 E V!2CP/3[LZ1>dҊiV ,c8Y" I(7f1'cg^̔9@Hffd[ ;Hجa@@>`bcln3Cl11vV|EPT+O0Ɠ1h,cd'0MnѱhdX؃Ycd`܈D+01XY` %1u w0bMcpMJ쩢` >( <Ecp5Z,R0SAZZ c0` 01C1, `ٸ1B`L X` 0#)ܢj(V#c$ X dJ`+& Zf0nc(sDEcp?T@W0FR>10`13#e0ppc08PXc13#e0qc$ J`N+0 氂13#)bҊqVHh`0Y>S S91*jZ`RG0ڸw3aWN%@#c 1' X)b0ڊhJxN+X*`  X@018` $cM>q xyc,3!ʎX`G0b1c`x{c,2O x(|c,.3c1k+TѪ17 X,Me0 1;2:&#c1kk+aG0b51 xB&2r2`QOT1w6SOr1W c+1\c4N$ Z5JZ!bJ9ʔRI`;c<x" ` _O0 ` Jc$dЪP 1 X`c(J;`0R>1~`őB`0K r*S rjmJX1c &1^rz`vXc0QJY`0R,w02 cpS€` %40 xIZ!͢J9*-2%}+ xIX;c`GW2dcerן |Ep He-;Heǖ>v+'[f Helwf٘+-.; Re&TC=uf&.s|le~>TnڽݍLJH妰XXJpH[29Tl%Ry;I!oA*oZ&=tHi:E*/$`>I V$W!ERyeVT^"I*ϭRy. '[T$IdSdIYyA*O "H:ʃϹ"Y"ZT~TJR!SI**`8I:?Vl$Tʛ }6>}5N~H*HʶE._"R/He39RvXҝ'Oŵ2He ԭ{]\? ZTjBR@T0oʶNSUNRٷ@72.j+~ Heo ;g*m~e}i+"m3爐HZԚo:5ʶ zR6AZTmz¾{TyJRuIe+> I\Ǎ^琩OHCV$2ZTR+ pHe/Bi'2 ~"{H*+.[^:> UJ!RχT޻Hem7TI*ksO."A*o:Y$RYKL"#RyM$+E*!^HD T^rFRy% ARy/29Ag7H9xT,Al@T."I*oH*"$3D*gT|>Y7&R9=InR9sNrfՃH嬤"yTJ!"3D*gTκ_TD*g]/$ OI$EYIIORY'"'""R97YqʹqNRY3"sGrH"II=II\u@*g=oT΄D*g6y}T\H*+C8D*gT΂E@*.ޯO]*B rB+6 ;w&x?rlR_d6-rjrYe=Crn"4}쭡'qo i04 szx"?,8q<#Q (VҨdF=Ad2~8`LaP/W)z:S/hб=cS'OW+j0͞ѩ4, Ě C u~͞ԑ|̱͞t{͞!~`7{2fBz!ZzAII+C[]^#}ȓ:y}#6`gO^Фr" ̳'uj$u{~:={R?|eEGʺˏ\I!?lIij1Y'uj ˢE7h0LZsEM? Zu>B{R?g ~~CI|M~]4Z'$M*ړ:y0^-d4a[4hR0"If!h '.ќ&R!2h ")2"Y/2z2 ͂m ѺVphnVphw枍hw^bAFOA ''jWN2O"'oF$սd4CEFO~_ѓߗd4DF3pTd4EFsSEdZN2z% [у_d=AFs1VdfhhP-C$8h)*hn2f)h( dddt 2 ypHF34Pd43EFw\$;/vN2"#2ZIFwE*Ph߮c2ɞ"ռdthG2E"&2ZS]1CG2#nFHF3^Ld0$Y!2Z}IF+ǃdr b MBdt 2AN"tidtViԎd4WDFr$!2j#,[D'DF3I]dt! F2Sܜ{"9u͞)"p 2ZхhVޜެ&EB2z2 77HFBdfѪ$ Pћw$w! 2z 7'$7IFB d,d" Kh HpY路\oܾ%hn+Y;v2z33d&C2zs_d6$c;."U]hEҊtJ 蝅B7Zv"iEӊA+˓VU4p"ޙ1"ћ{t$7HFoVؑY(փd΂3PhX4dfi@$A2Z$UC2zsdN@Fo֌HFof[THFosމ$Ͷ$7boHF+փdb=HF+փdb=HF+փd9($w" 2Z$A2Z$w 2Z$w"2Z$A2ZX$7W2IF? Xћ 'd2zd2z~b=@F? 7sIF? Xћ$X;Jz~b=@F? XO'փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HFo1 A2Z$A2Z$A2Z$A2"A2Z$ٕKdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փd} L}UU޷ #hz8}y!i=>DÙ(4{8eV_=ǻï΅=Ѵ=.~~pેs#(~CWDP/o4fgV93NÙH=F2Z Xpfg@pfyz83dt"Ό~UgYs3l %дÙk\s^̱w3+7{8 u3YMÙA^='Ѫ6yl4#z΃E2M=AF?$:dtBhfil3"A"2:|%Sd*#DF'%7N:$zҒNchDJVn\fbT t}v,S{b}IF;`&M2:kdtr h'&Y($HFdi  2II~"?ytcUEv,L=Wi!y1U=z"yjeg~>='Qy|dpSɞx5z8O<&΃q٫sH_p0PEsW {8?cpŠOr {8wP΍'2:$@F'r$$G2:RI$/hUڋNK2yI2Z"SWdOOKщ-DF'&$DNl$2:Kdtj7D֑V%TEzNK2: %z8@p{~5h.NjނA h-TV1 hU y^lGy?/l.@}V 0筠[\!R:@=+г:y EHWZp5*wV-qgגvV=-agВuV,QgȒtVo;jfGy+&Cy7kvy.݄Hf pލ|nyoFp<͛武@C&WZqwUN+ J0[H>]IhV&fgV5&q]x>cBU`IYш2If>#Ȭfg䘷&vP̛!f5,#üAY$Uu#_V1jFxy-"Mty@A+v`j”W/REhY|dUGdYz$fjWV#-*#vʛrd"A**)ԍH)z(9qN& VOOZ::1[2``L4Y-H&`2KOcqUE*YYUsE&YEVDuI$+@ #/.GVid;FVuYd/E^OcN+ϝV]1CVY)dB^C^q!{f:D.b T0OY%JXQ-Ix-~X7/y&^K,?*œ( IƙV@E/ν%J PmC0ċ GDUExq[/%tR rËbᵈ^"A/nO^" %رr)RkeW<%KXF / ^Sm^SKidoJK&Xᥖ@ߒVw2%^𚢐;0nVK0^ɴzoI+|% %}\O/5 H‹1.~KXʹbwBG l$%@vYIҪP/YzKZᷤU\oYd%I+!閴-$iJݒVz$[NY%vKVYS~~ ᷜoe@l_Jreՠod%9߲˪S[)+~KV>g[SQr$ᷜo/+‹d|*+~˪.+~ռm5e%"c_/ -V޲Vm 9߲Umo~[jV㶚Քբod%yn._X޲rIo92eUnr[۪VjUmo~[jVSVr咕~$Z'd~IN~ɷ['deUnr[۪VjUmo~[jV󶚷ռmɏD//b췼mUnz[۪VjUmoq[jV󶚷պ:h%:%VlUmUoz[۪VUm5nq[jVZZ'?_r$['ۊ_*UmUoz[۪VUm5ny[jVBGuK[d|[uK[^V*UmUov[۪Vոm5oy[jV @%-K~ɷ췼mUnz[۪VjUmoq[jV󶚷պ?K$ o+/yY۪V嶪U}[ ?@Dj%[~ոm5ou[ #xI/n" UmUnz[۪VjUm5nq[jVZպ:h%:%-VlUmUnz[۪VjUm5nq[jVZպ:h% _./;/]_ ~%W_ ~-׃_~#7 ~3ෂo—o @-oJ+Zkzofo~i/=[ЗK~%W_ ~5ׂ_ ~=7~#7 ~+>K{[t  |ۯJZkFof[o?_K |@ҷ_ ~%W_ ~-ׂ_~=7~3ෂ ~^_z5K/?—J+jkzoFoVs@jA^}W_ ~%W_ ~-׃_~#7 ~3ෂWqo @=oJ+Zkzofo~i/2|k—A_~/}W_ ~5ׂ_ ~-׃~#7 ~+E/[t |ۯJZkFof[o?'^Kϗ& |tt ~%W_ ~-ׂ_~=7~3ෂ ~^#^#}/݃_+jZFoV[v2_}W_ ~%W_ ~-׃_~#7 ~3ෂ#/[t |ۯJZkFof[o?G^KϷ |@ҷ_ ~%W_ ~-ׂ_~=7~3ෂ ~C—o L=oJ+Zkzofozi/={ЗK~%W_ ~5ׂ_ ~=7~#7 ~+kgXۼ/=.o?ŗJ+jkzofoֵcK-/K_~`/݂J+jkzofoֵK-0QK_~/݂J+jkzofoֵK@-0KKˏoJZkzofou\t ?LxҗKoJZkzofouit ?LҗKoJZkzofouut ?LҗKoJZkzofout ?LҗHKoJZkzofou璎t ?L@җKoJZkzofout ?LrҗKoJZkzofou璧t?LҗKoJZkzofout?LҗhKoJZkzofout?Lҗ0KoJZkzofou;t?L:#}ۯjZFof[o]d/݃~۾p=ۯjZFof[o]/݃~=ۯjZFof[o]/݃~=ۯjZFof[o]Ŀ/݃~=ۯjZFof[o]/݃~ 8=ۯjZFof[o]/݃~j=ۯjZFof[o]$/&K~ҷ_ ~%W_~gH?~?~Hv.M#PoOSiџPfy7u鴯AuQdg]:Px_/[I0\KN~ݹ:{J{?j{/~ܾ?#MTH7?f/z^\7l|8wn^fo_/7|b~oϿt΀ß4n?g N7I;}_f k~q&?_#G/endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 504] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000023833 00000 n 0000023916 00000 n 0000024039 00000 n 0000024072 00000 n 0000000212 00000 n 0000000292 00000 n 0000026767 00000 n 0000027024 00000 n 0000027121 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 27199 %%EOF metafor/man/figures/selmodel-preston-step.pdf0000644000176200001440000006405313750017353021102 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102155954) /ModDate (D:20201102155954) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 22617 /Filter /FlateDecode >> stream xKmu?b7ɆhTm@2`+MRUs{(U5u"gŚk>r닑?#?GG{J⿲Ǐ~-}_>~ӷW>[-'3jNm~O-rոm5ou[j}_%r[[VmUnr[۪VjUmoq[jV󶚷պmuN{yso9^r~v˷ΗՑUmUnz[Ǫl׆lC.saUVX~v| rA{'!d&Yؐ \ O:U7sޏ>ݠ.uvL\eP+|}ߐ߰A_q<~PA7Vt'aM>s3u\gs"Mg\ft 'svy|Gǟutw!O<t߰ݠvߗ;:ڨgkv]ۤn?lW)|~nuQ_ǓK_Q*xv}&tkȡt2﷣/ %t= rxݠq~5?z@W}5فzAݠv2 u~n8_9 ;z@/6s8G7hvWs?3mn;1ϗ74}:7BF{yyPyо~C{pn<ƸF4=xnЩP_Lh>F9N/yl~ٺxtN۫/fMm~54 5$;֒7x=L}< Y2] L=%}tj'Kǿtt~:AF}tq?ًta/bӧ}u]nиv˼vH$hoG9ΐRx :2uYG׭+~jONGt}xO G_hK|Xh.5EQ?&}zl|V=vW*teM'>G߹ @ƻ(;i<_60=|~G_c\(~۹ﳣ/~siVоۍcz|+C/kqm?f7i AOٍl~\֧2G_?YdRwhܟ;0~8$ϣͯjqdShk|7G_p|vW< Owh?Oe~E'kL޿GB={Catl}|t7]>:i S{}S#F5/w;5k6_w8yz}t~~e4OLOӜ)ɦ c1~,%/}7m~}s :7O#JӃ z:æݯ}2~iݏoiO93m~6Ok[a~ikߎ>Fyr=QOӍn4L"tGb OA_]l?|m~~.>;u6uOf#`fhq0i iK]6 0]!ڥ ik~ ;]8+i|[_B״Q2tMw/6ӌl|_|[`{wnT~]~¯ydzWx?m"pv50u̢Ƌ74޿Q7=y64=4G_ӍGk 쵎s -ݯ`>ǴeߧaйS֭{4ڃ upM"n70?jCsa}"t>ѳn&]ݏG@l"tf{n/ӉQ؞Bvw4ϣo?p^|w3߆Ong=:1}~M7<UGWE~Bi?-Mqt3w$q|0A}MAV@!sN%>o'l91:KѦImtn4υ).4oeu>+ï~shUwnn<Ӆ ~\)5=ѾfA8:6P(-Nl>'Q7h }dͧ%m iOn;@}i4&ӕ!b>۩4ڗ]W؟==h~__br7~6mh?^h_O48ߣ4Χ TMO=[n/&mI7hM`b5=t _Act&/a>MD$ܨHp5fl}kv?Ϧ4wυeopnE_o7?oD0 ڟwN &܏i _W_Wt< ~[y?>_tG(+8yϛM'n=^i4L;{2t?ݥ4(qt MDhvM\&hnxJ{|0>P1peը4W'bg¯|  wY:^oMOt :Oe:y:7F﵁)/6 =y(eM7h~& gLSD|~^>>q&4:7x/}4gTllm-8~pL|=0adp=//d@}i~6ݠ}|h8a %#/[XIEAF~1PS!tnI2la@M &2{ћ-Dh܏m MAG÷Y-slQ)_2=q=NG'AxtF3Vgz@XnAx{ڻ7<u nx|?_$4ϣ4'&JMA}:yn\ۖJ^8? w4[?~Iw?@Өk<_X6]w}d۶}[n>΅j!?\,lFpLϱ*;8ui8}[&o\1] uFfx:g5mиgdz;4~n`be 386p34~Ch=C~oLNwv%^.6+mM3Z3yA 0LF%'62ImHv9%Ktq.qGԍ& +7xVEVYV,.KZ⢜.j9'SąZ>}DV +U(Vh֤ٚ#zܰdll% n6=x; yְ­3pg +xw.B3|հ>gr;KuGN~_ܐ堕OJ?yVPª JX5|UmŽ9!+ew'ʾ($-fU. E&_}&a5WXrF)a;ZU_aUA2(aUI+ G*믰J8E+ߵ#phi\rC_ث۷By.Vx4JLWl/gV_Ti-IXF +Wg +I[0{mV:JUՠNlʰJVQ*.niAî)Ř쐃rCV2cMm8pKocMjZ*B+<εҪwJX~V(ѪuJXVuQªvJXzhn3ЅZ:iKV'Pdh`5iVOZVhd :[L&b2*Cv(7;kL |фIXy$gH|al-ޣ ّ>*pA+b_2-t2Qj*}^I+sƤ1e/ZJgN$B._4=Yܥ#d̲ʔX#aHX1+ ^o;/ CG{Ϗ_?Cu<uϔ_]LS~3^|}OOo'NoG>{?`*D_?ㇿp?]埶U_i|zOϧ׿-u-|w +}.X32{q}uwQVS~W)_Ow%ﯣ\T '|OӶ=^~TO]'O4?y?~5_O][-b~`g_g>g/=$X叟Oݏo?~>~珿7D׹6%sω?'zDN =EL#Y&z ·L\Kam&zt1ѣsc =>s/LhR1ѣMtՙ -)Qu)aJ\RG-$ʘQE42ѣ, ٔQ8sDpJ(E #H(I HKGG1Dܘ(D\DH1#-&0$zD$z$N/*#q=l=_q{մ7= "60uk o H&xx+D[MJ0?4JpwRv2@"o,%~V[Vk[@-"a˄?Pb//j듉Hl%|_D_h_ԣp#=l#'|n6*a| ѣ"xSD*=|cG6aO[/l;7EGfV'z!&xm_%*FB{G$zFF~OE_D۸DOHD8p"[ l*?|8e__BGL3=aq>aў"623"ʓ@D8$zkH Αa!63!o$ 7+f'QD(+,ۘ=|6yA `= > >?Wy#@: ?% ѣt-LpbpP_QNf걜,Duw$z8(~1D@aғa~m*Q=ʓHZAJ0 +$ !@2&tyCkObUoL062J0?f{G$zaC\H#`?ᠣ<΍n^Q% ѣL0"o$zѓѠQx}aD:*hD[~+cAD$ <FD/ %rb=k a,1 WjD az%zfE_!1D(B3/ CK:?n`G~;'L,6|iHȚ7a_J<>'MnE|ۻzB$f.릀0D*~U ~a~=74>$zx7ROh&r4qFF7247=|X^;4b>N- ď?% #ע?[LpPCx㖫o$zd0 ??$z87;4o A1ç]2DY W>_ W-}d{DF+=rqD'_DjAHi0HE =,=LAO5 5''qDx^! @D'7q>AH#уӐJpQwh&4qD攞M = d*ѣ$% ѣp7=;&z$?$z$zd/葕DD8D &zdLJ aG%zdu3C=2,%zy'zdLȓ_+#/=&zD &zdrJJl`GV=reGL|`JJ`G~E葹ZDx?0#7%Z #s=r3#+-%zd%0#}D\_aGJAG~;@D̄w%zdl%zd%@1#%L= Ke^o&zd&+#+=2(#?Dy葹-ZJJLcG~4&zdVǓD&z$nUGD8D=w*CJHQG>J%z$%"2C JHDxD%z< NLHLWGJ@G~&z$/葦2!SQ%z~ &z$%1#)IJ=J_Qp>~fGbB=LHJtbǓpDuul!C@SwҪ:0YDtlnaL+tl" !ѱY$tlv]؊S@HX$';Ik+Hd+hQ2hPwDRĝ %itkw4?Iy4t?IN$ia4R$ X1BIy4a$v'i I@$iTl 'I œI@C$i;IW5%i$i0+CIhU4:弣3hU(d[4$ Ɠ1)$ 4$ HOFVt2ʱNx2:a%i<H$4$ TKҪP YEV`3+Iwƾ4>Ih $G^Q!J@;$i09DILQ'IA"Jx$2+$i0fDIKY@$i uIXwƺ4֝1$y'iP*I mV;̓[6y mG# 7.^@ ,ݍi/)FLو<@*W'?@(lD7416фht ]v?6;q/l_ͦ g/6;Q6ͯ m6Mtf#mvNCe/# m6/f'\m.B[6 hsyl6;a(2g6H4p'@ QBݯɿOQW@l(OQX f*g@I~ m.ښOXgf#h mv3 mv/S_hiU#Qh$Tlh׃:w"~\'l:IP͛s)y/歭D76k<'y'FBcͻ6o&ڼ+#y@տڬ|Jڼ"y ڬz͊Uڌm^*NYQ B1͋'B7 m^MhUnm^*nJy !ڼj͋?6?Ŋ6/DC6<m*Myx)fE) m,(y>͓6O[Dgو mMh5Ym,-"yؔYRmEh6Bm*Lyr]h@6zm_m6wDThB6n<:?Ch:шm:)y4}bD|!hJFypGhPGypOBGz y$ڬo/m m('ʁhsB6AhsB6w6wDވRm*>M $ܫhs7܋Pbͽe܅m*Mg_D;6ڬ(/Od͍B =(7涄>mn/ M#ܸ^hsD67 mnChsS 槸2>%T͍B'ƭBPMʼn6"4hs+|67F mn ܄fmnF[K%FmV1f͍UW6W\mܙ r[hse%u \UhsO:BmSn\Ar~BhsUhsmF͕6WD͕}Bk /ԹA"ڌfAkhszO}BhAy6EYş6פmmIlDm.[2hsYB6Fo m.6O@L@7|>hsJe%\( \( @DI6.hs!!m.DE6B '6b BUlZhB Ƅ6ݯDKNpgR!\#pwRt6 m.Y2':hAp E7#B3 m,$9 -'ڜʜ#ڜGhs͙|x}6g mj6gڜ'D3;6gE1m~_m'h0P@=zB~CgL~mV"WQ ~D{"ܺÏR=3Gz$%h M? ~#h~ AJ~IM?Dvb=zPGO jy'MLPmxkK8.?2]~]4m}`ڣ&p4w$p M/Vxe5A%]Vo`cHhzrY4WM鄦U4[MhZpBӃMBCrnBC1iK f] A\k4ͩ}A D.*| ;. |ҪiyMi%Q|vAmf.i%QlM+д(M+t}EhZ0BVi%Qn_ 5p& MVivM+д(M+д(McDhZI9L4UzA̗4D+gZOδ" hID!hׂD!hqMsS434L0Aӕ\!ikM@i' i. 挔in04]MsVt% GhePMW^BL4͌2AӕJv4gM^#B\s4-6 +B NA .Pgt=iPtʽh+Hh .M+h(Ms[4%hδ"a]h4tR]i0԰rCEtՃE Bt7W/MoNޙwͪJ7CMoTMo&49 Ehz30LhzQ 4BhzsChz3+Lhz3f;靉L#4S~)Mo"4lwkд6ތs%49$#49NhzsA~YBӛ7B;4D7D~)M?` @O0'NlMo2@7 M?` @O07WM?` @O0͑*'LhzD/Z4SI̸CO0'2'Lh 4SL'4S~)@<` @O0'N%Pnك(7A*8$N{ 'p=e/ZJ'+д)M+b_4%,V 4` B 4` 3c U^ M MV~Aw2Ԭ @Y%i4&4>"4Ή)UtZ h:1^t_ȀSݯ~A : v?O'4E}AӉs zhڡ794 wBYP>0 ?6h_/)$t 鬐@B7-4m ?ի4mдAex6_t^h v?NvSh w=h%;4gԃ\U=ɥLՃ\S=փP(AOnQ=zŬ=|~AOhPA|X_o^:AjAhYz(TTzzO A۫`A>3A^5A^5AwX/AWgShAwBݹz]sWL^1AzAӦc=zzНS9zНѹ6;փn[PףDݡ tS A׬?e=/h:7= Wg{Wt[t+4"@Ǩx?tzMY d=ʴRՃVAEH+ǧ]+ߧ hY?t$Ah6BL BM+@t h՛4]U_tU==BUP*tUJBU$u CM-t'4]TOS/tQHBӅulM)tQ}6Bez4]EtaHAE0%AO=SBӥ 4]M.ܴ)hZ!G.Hhb'4])hy!4AӅ B[3,[P1Tt^ h:/B h8Mz̀ dBhZSLh:1St4Lh:5Տ4MM'4 m@ISU}c@Ӊs'eAPAӉjMkTh:5-h:qZtGh Q!4X}DtʂNYM'G4Ah:q9_BhZM$tzC;4Ο f(>-Vk[MZZ Ko[s>쉎*RzPz?%&j{U^3 Fz "bj2-^|v"D+h5&dh~a L.-Pb9P}9"K"ZDdyh%06ےZ[EABoOލ$8 ĠH z7Տvz7~n:H&틈^.2U v}V3ϻA>o>wHY@I=k'g# cDURijjx;"k%ig vV/[U:Ig$MU pr)Y;8k"|*E[?7k!歆UۊpYm&AHY6`ݓV,H=iUWX^buhGU DJ?h֖:;_8fǍ4eeV%FY1k{1fG#ŬRHŌ a֮1&FY/keM^V!ڽEvY.kCem"?䖵زMZ>)B EfY[,ksef"JU9JVV8ae!6D'I*d Aeը!*4ĔUv RV2ʪCDY`H(  OVEɪB:YUY'=NBa 2YS& `>R,d4!&dUHzF+ UZ<GVy*By=I+< R!E!.P6`롰7F;lRqq.*gZ6DŁ?^O}B+@bY?Z&{bΕVoXqpkUvXPNj럄<0)=e" /|Re /n| mNx-ހ,QX.]L%iTZl3 gZIe[B@Ƌ3dRqJ+8avo7?/?/ㇿifŷ/?򣦏_W8%_$?oML?t!!o/,endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 504] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000022982 00000 n 0000023065 00000 n 0000023188 00000 n 0000023221 00000 n 0000000212 00000 n 0000000292 00000 n 0000025916 00000 n 0000026173 00000 n 0000026270 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 26348 %%EOF metafor/man/figures/selmodel-preston-step.png0000644000176200001440000025140213750017352021110 0ustar liggesusersPNG  IHDRC IDATxy\Ug32ϳHN9 *)eZ6x}--G^)4MM)P!ͱDd3A>k={?xZ q kX$;@`X$;@`X$;@`X$;@`X$;@`X$;@`X$@ht:s+Qܿp֬YL*99ѣH-{ǎX,>{1FF;@`X$;@`X$;@`X$;@` BBBڴic@ْoDGG;"$`4M޽we@r[n;P*`X$DaWWrY7o.H|||޽k 6ر^_x;wmVVֶm8p{81%so߾ 6ӧ߹sŋr<))wţFjF ,#q4iRHHȫq\_d{e&66SN,۷#)))66x1e$Gٴi͛7={T*UΝCCC+]]]JD?!0HܧO~om۶DQ}РADo0H㱌ͩ>>>>>>666u1''ZlYo5Pe+O,;;;;;\.W*f ,c+<___StTV'B5ùT<MTO#OgezBV};wnݺ.= W^]nݱcvn7uq\3oyQXXXTTT]&_xzz}cǎ;.=߿hѢ\/3_~yׇ4D"DLL f9IfT"c(++7w۷oWTBvyWPؑm>v"5.3}0Ξ>}0F9Ã޽[=%%r58: ]-:xχzyl\ԩ1kȑ#Rs*/ĉEr&YSf;|4ܹE\ԻwQFiǿX^믿r7bĈꏲ9[aٲej[l7ENkO""cl(!CUuvKJR3D JNN8p`_{K.+ .v"Lw^abcc;uIJl``}<<<=Pw""ƎOs4(`W{&jkO;wm޼իWo߾=uTNgJչsЊFWWסCя?h=$P 3WBD]Rk8C}&+h _Qx#uy۶mQQQFH$ODU DDakC4h#;sEwM^g<8&/f[wqR$@1|#Gϭ[޶m8;rrre˖U\cLڈ!q IH`"te-=E^^_β &?|3R(Jx8p@&EEEݛ?8FǑk8ɩJ3r\@,YNt~X{~Po"*j4qTVPPX8H`| /_8q۷FسgOlHiӦM{EK^^A~gyڝ26T9Gu۽py߻" @DEEEiik׮=8ׯo>f"ںu˗̙y/_^TTTz#02G_;ԏP,ZFOħ)kOyz]Vn<sߑ73A-Zܸq#33܁<eӧOO2EG[Rg}6zofAAAU^|&LxxxVOe2瓍lB1Uq2]5[^sѴ/dٟm֝pNppppp0Eqǟ^8dHh.\P=qbo߮H+𩼝z qzk*=БƦRdT6NG7bFgd.:em.FϞ=Ko`cc㓖R-\hje*cݽ{J;? SZDT*xذDWML"twCIoGSF 8N(F<">qڵkWŰ0":~xvO>%Xqzk/uVVkP*)IMDJkTF=exH6-HWeV0!C̛7ƍ޾}{ժUgϞ0aܹs >݊+ooVZ۷qǎՏ!qzcܨRu|$+T9@6um߮DT|T7L?ٳٳ۷uVݰ\\\.^8c ??wﺺFEE]r%$JeI ZKĥ)OSi,XF((9qǹL|7;GMaٲe˖-3tVI[qe*Use=g 2y݀DTr jfjDkW"Rruɴza\&߃.~@a4HI+#'%')søLe/!h<ÓĤ!0)e,x̝j0.fy"?&_gwxBތR4\N@D'sjq6Qw?|h$ 2Drɧᱬ(m}/`LˇP[=y0? ]~dF(p1Q:8.7~LKP{+'"Rw]%#J5b"-nK:}ͪ?S 2,BO(P$&"aP Y+!9X++rԚ%_k=0;z_/-|@f,wxr>ҵDD}BZ\݉H1]J/|\a48ܷoC;"_-OJNtQ obٓ@t#5WgqV oݺutt۷4״iӆ _ZZP(N:K/}'ƘqCO.T@DLE:I(>]7XCҐ ('8CO,Ynݧ~v_k֬Сùs >׶mbbbBm JJJѢE~7O׸!q'FbCD*Q^Z]Z^BD<;ؽ{w{{^{ҥKJ_V?c"Ǐokk+ }݉'oư5zHDD( &vB"7ML8 h\EcL,x~駝;w6oիoߞ:ua'JHHw{TTTI&u޽İ5zHٳ=&$%p)'q'"ɣBOs۶m5jT~~>I$`a ;ljhСr{.];N!q'QNGDDW6]5t@6c'~A[`YÇ9rnݺmz1fIOO'cgwx*VDT̺s(]f͒P;;[]qizV8p@&EEE;;;Q=KKKe#˫"8vXxxKXX_m_7$T)Re.Zn ˗'NxQF%&&ٓ/.22qڴiSE>q߳gODDɓ'Z\.2eJxxJeeJhDDaR@D/vjJ 1oV]&_:}H4>ׯo>f"ںu˗̙y/_^TTTzc^^ڵV VձnBB_|;<@!#憪o#2ZT@+MmM}/P<Ǧ{Gh>}zʔ)-Z=z4"J?ѣG7g JhhXZZڪUT*=zV}׿꫏>H(D:ZW(b0.5/4 ":f9>Ԫ]Ȍ:54-8SN,O8dHDD=ˋƏ_e=~ԨQBPT&%%pF;<"Ⱦ\WƷI=w#`>(tTܯ6JJ:gϞ%"gg666>>>DRir>>;wxZ"!imENߍa~4= | [M>J<`8ߦ.z٬YΜ9rС_~x,ƍu:ݧ~TěSw޳g_~%""bƍZR*۶m3g-_w6n(L(GDԊ)enkLӘ!AqkwjW 2o<"7nܷ~{UV͞={„ s5\ |ΝkӦݔ)S ŴiFi=$ j(C/sϖi( pQϖyeݦ!tu+K7ꕸfy\\\Ϟ=gϞo߾[dF??k׮M>YfJiӦ+Wr{}T X|Oɕ9^҇;%ƥ);ٚ& یdf|mZi8aaaaaaeŊ+V0tVHl\Lh |MfdHQY⥒gL6/!qH!JWU4v;IY"JH7iՊ]D/m)xHOΊ0OBD'ӕ&DF}/4xH| (I3dK}Uʤ"VRXv)0$` (/*{JVOdqSoFQoW>m ;F5 3O2|Dt" 3jy"*~@rw0`>wlSJ3}TΓFydwhH0EB"R RZ ԦJ>|4Y7} #H,""VlT-CDbDTzB陋f!qpbYGVOD"_9:_-cC!+G܎ 7~7WO;LHDDeW2r;(ݭ|Moø;VW(br: 2 3buVa*ZJJJ&Makk{M<$`0VT`"j$wQ\ٶv6naTwd{+ F>ڴiSnnnfD"iT \X5q'p H3[ I۶tOD'1FĉDwk׮l4*H`ZDDa(r< ѭ|Mf9TG4oJz}2#hr9u܁4BH`Z,qDtGsʏ:xX"7U"bio06C 0B#a&[*#CdC!+ܜQY⥲K T*Ղ Zh!Hƌ\qH$^^^ݺu[nRY2ܴi>2 sU{;ܹsZ%KZjeee7vػwV… oFmmmCBBƌs.\jM}W^Dt~}7#ڵkz-H;a„}ݹs__oҤ׬YӤɿnq\\\\~uVXXXvvƍ,X@DǽK,qss+**/˅ :4$$b^|6mToڴSNiiiM4QTQQQwϜN<ٯ_?"믧Lr|WWק0M;?\8^呕DKS=q8ع3&wz: IDAT>mbƦd0Zy Cmذډ($$dڴiϿ}6r9 0eE =z۶mGyf}O۷o_XXm֭gΜ9s7n- .T( ڴidɒ͛7ϟ??ܸq[oUe@k.wD޽-[gDo/^8##ݻH'zFJ^iI\tJ3/r[w Vz|юB[|<QR[h@ 2֝B߯_~-D<-qذaJ= qDm^^^Æ eƄ"9sfI?͛7'$$pWc„ gi׮]+㿈hd/##bjK P`Ő|ΡP,oR+/p{7+ QÕדk{~>`6@ZI=D;$((J~̲loӯ^z…cǎƘQVQhhhE"QIII^^;XNΏڂXd`1DA|'CR婣!&sT?F^{Nmr}::: :t͚5O6cԹ\N'wcYˋݻWl4gS"q L^-qob:sFցlu}s`"ew5dȐgϾ+۷ouL&;}?oYtU˙z}nn.;O #͛7o߾_oժ+X,ѯ_^QrrR?2J%EUiS{AKg!ť6j"b"_'Uݗ;jp.zff/bKD˖-W_Q^MCR$dLT[RCZa$quʽh􂂂hŊeeeDh~>}޺u3.X@">|'"L6w͛73 dhqٱ%2A S2"mLkq䋢&N(LݰaUdd=ӂ uf6mO_DUVkΰY"$`x"1?"qqy-3"UߗxEFF;vO>j:,,lŊ'|2l0o}LJif̘1.\QD8!H$JP(BtŅO7O&?gkEpǭ<'1wDq56bĈ* 0`@?CQQQQQQfffq!CToڵk׮] GJ5v>uT-S4XXqk)Q'ej9|ޤ=No9]Ê qHe4CT;t8HX=Gq ZwèS33w8@'JJi;Y+!jp;IZ}%Ϛlsw0<Q ZFXD?@JD2eqj%N zrMQMqy2DԷD"`T:tʴ=#1ϔgw0" +ZtS!"iH]nD$6 q H%pp QBJ oq)pr+v;$`| GL 랩HOeX5Ukc֫DrKuH(XS "7Yb":eȺs]Bprs4$`,Abޏ*s'":jGLZ[%;x!qc  LQ'BQ)jLY}ƽBD~S^O2w8O+66asj%KjoرwV]kdgٳ>U;+ԩS#G󳷷߿bbbff&0u-++ɉaӧOW~cEeeeD0Lrr+W 2pe˖T ְDD4Hq8 [8 W]fqws @aÆ?d}]nݲ*\t[tibbT*9rȐ!CNsDLLLxx޽{߿/ տT>ʕ+hҤI kN0̖-[lll*:~x׮]8T*rg͚կ_?>oĐ'Bjicw_nDtA ø#hr?7w4ueeeݺu랞yyy7n;TQF7.+++778&&F*Yf֭|+WL6e˖򂂂7nmv镧h&Ls΢E>iӦٳhӦMJLLdr|Rܹs111F ;H(e"*R_\MiM\=|_ '"yqMfxZyyy_uXX0DԺu3gэ7k׮MII8p۽:::zD'%.'9s̘1Ɔa6mI$e4a6l`ggtҫW?~o ϪO<ٽ{w@`cconڴ.]*ˍw0Q ʅ>uy x!""O7N+=U,^^^Æ HDBYh1 n5&gՇ!/~,.MN{[SW/X>>Dmuw0"`UT>Ҧ5v%qEJ}\jlkP' S~S_RV=q#Xnly*FSf-~*UB77Guy&TWxs~ϝ;'VW+^!;;<==kߢ!q#  "rWwK}J$)*x.pwJݥ~Mڪ"uσm+34ݼyJq{͛7o0Lvvv~~knnݪhsIMM}Ξ={Ř3fTbm^ߕw0"; kue/8}TI_TrAҠL^^^{sAí M~C Թ䩰p6Qw~Ν;bŊW^_o߾.Jw駟Vd۷ohDvڦM.Y?ҥG}Ye+;-ZTyq}w޵ر?;WHt_+6IW\[_T(ܩ +V& I0ۯeJU|ˍv @c2^%5k&LW_yzzrw("n/ٳŋ]\\&Mdmm}ΝqTu7$uviԩ+W}kPPPVV?@D3um֬Ycƌy-ZԴiorl'Ow^|MB1hРE5 w0`*:[u3T 8zATnܱŀ_>eʔΝ;999 4WpŸ~iNNNz~ĈϟW+8Shqqqׯwvv^re+֭#3fdeeU4Gvss 9t萭m#? 2`\A"!RH>g??%; 0'Z[9׫ke$22&!CToZvc|饗^z-Νjg2ZxxxQ5if׮] ;Ph2DO}KLR-CD6=:J۶${8㫴oٲ]O ;w{IĝHdJba\y?4w4f>{܇'tŋ;v3<<ܼ52HOe})zNWKρRH_|6UtOE~H_"?+4w86k֬.]\|iӦzׯϼyB $lT=ܟ*QUjݟa#h&"iQ-CD8;r*uԬbbbڵkw֭/zyy3ܹsUas*]HDDjQإ'?Tހ<͉TGk` CVn)Vإ ++hkY,ѵ E˅MW>Ju740lWRikxbH$  Ãek(l($9[2UCUw0Z\$Cu* l&%KKE^z|Q]@#LߟZ*lY{wL.ޕY#_8镪o;0$` D""R <8UqmD޶Ewi+DTxIcJ ;Bk8b޵ߟJDߋm.@{IoH@Ȳ=qg5Y[0GGGT@id v,[ח |Sw*Lߦ҃Ɋ㩖3Ba9V\]~usG`  gF2`" QHHD&Y |Lu3z|fձsh~NkQg… oFmmmCBBƌsN:5rH???{{'&&fff2 wefĈ-Xcǎ...;w^fR0 =z40eeeDdee0Lvvvэ7&Lжm[m~999F@&.&zNH iU{>M%R!r ʗZX&Hr~yGN9fpjiӦɓ'sGDnnn7nܸqƮ]>䓏?[LĽ3t:@ KHHXbE-#>DR.]tҥ3 ӣGP{nN7zh@m۶;JeYWWכ7o޼ysΝ!? +`: աZZh"! 'ji?K@ 2339{7ssssss ̙CD .~:ʕ+ӦM8nٲerƍm۶>}z-ݺuKII)**知Rill3g߱coٲeǎ5$[o)?\&䤧ӧhĉP*$`:""R16j֡.)%Z˫!"Q/v6rlasP *Amڴ͍,Y2qDϟw㿘3gΌ3llliӦM\\D"e_7 xwBCC333Z|0o<;;;"jڴ={bqBB\.ҟ qi.wJD2 -w6Se茂v5JNSgd;h̙ Tni4cǎѴi*quu;vl-;88ѡCT;rʫWbe>|nnnV7oL&P5`:"i.Vkʄ2kMK.LձT%%űУx:#p>rŖ}R!6lߗZ CCC< D%%%yyy:ݽJNN:u3g֭[gϞc^>HrrrJKKeʦLR,w0"m\Є#.<]MT7 @KSieۿaY#s>_!%M=|Wa( O\~sG,zyyeddܻw_>W-6ɓ}W^zja^x3fDFFVYW //:oĐI"JUܩK?@0Q^ZڷV\U=֪}kF 0wDϜ`Ŗ}>/0 Bwwweu:]aa˿.GDożڧׯ_~rrrN8O?={ٳ+V}ck:Pw0)`rQX&"rf{/g(-4q'"C_SSr;gwb??ׯӧdRicc̟Zۭ[5Zbcܸqƍ[fʕ+̙|:&bX.geey{{W~iӦK.M0[n-6IE,GT&T"LJD'Ҕz44M6cƌpBtN1^pO?4x`'''^?bĈ[ZkqFf* *--q+?LLL9rd-ry۶m? } S%׽fJniBD Јi+y+)]i9QQQQQQUvڵkǾK/K[Ν;GDU*;v>l-|PVR=1d괆(l$$ !"KZQѶTM9f7|p*[l]L͉e="*JsT"_-c;1s0bUr. IDATzYesGP'gϮf/^|1OOp@fF,""')ɵZՒ6]#Zcpo֬Y]t|rӦM{կ_?y 6H$zъeAf_{][ hY4F(Ȋ6w,geeӮ][n]xk̘1ΝRƃ̠HDDE GlJwj42DBOWD$?pL_Rfp*::… %%%W\ٹsgΝ3;%\蕡Hru-hfEDITeWZ_񐸃x .M"/mDt4Y[k}Lgp<$r|֬Y͛7H$>>>QQQwޭ㻅SNСmVƍwN=3j%է̝!He~d4w,YF➟߹se˖⬬m۶uի}?^z+W$ɟsΐ{ rxZF)nFD)u>X"If&DgL ^{_QI3w8РYF>};w_xQ.'%%ݻxԨQZmmjzyyy(((dJ>>>|pWbccYpV6m!C YX Hsrre˖UZ/_]HN6w8?~k׮P*rfׯ_Y?nڴk׮[nz7vh"PJqqk׈(333--ԩS+]t[tibbT*9rȐ!CNӧc0gggSg*̜33$d!aK }d_"@k<.TQTSkqiօZE=술 ;HHBId2>!1L2}s>Xk\u07X86l؄ *++СC#G:v_*:K"R К'CJ̮sΒf[=-[Eϥ򊕛|azVZm۶l6/X@m߾Rp'xB3g\tgK/}Z2{WXX8f̘O>DsDDO}?#`~թ~*ʲ,to bذa:<ue ĵݞW; 5g[b6muTe>QEwպl2<>(IR׮],ZKWjYYYIM7%Yj]WUnw%i#4+o8Q&Ljl{sNCDLTxgxsOo`…z? 22w `޼yu>2dHMm7nܶm[RRRRRvڥ9r/DwÇΘUVO:U4S~'SLYfMbbbEE#ԩS?i{`0ŋ?S~J?T@'Iᄮ 6Zp+lz04Iо~o>`1߮˟,kED`I7-/\sz"ݴ)]l6ۈ#.\ؽ{w_<̙ +W>_{ kܪ[n ۷o0~wuWAA+Zme9sߴiC='&&ʲ̎o~?`tRVV@9,W>p@)?xsέ_ &8ꫯw73{ٜy̤3G7iNn+R?so )My`g!" Ν>}zS4&M_h405}ƍX,5j*F1(oV{Llj.~ѣGyڵkZl$_K2eJ`2ܬ+V7 D&* 66v׮];vxwnZ~ܹ'svegBis}2ѣGh2y]hU[V{*KTp[i}\l׮|.[j{'OSXqfxe>[۟ꓘrT:?RRkÐʿX)*"""j d* Sxp0^U Nd=-[Шe-k;  w >ͽ\*+ږHQN `u3:Iik}'W9^[s-aNEFk?m?yw夣լeD;Vz,7;  w >e+K4x :I-$&:h̾ @7E`cq3`9P|Ѫ`g!"" `N!!Y1TD]/?ZQ%f7[VGO`ݾyLQccN!A([IML-0Ծ dBDDD; eyDpxŷq͹[,WxБW4DDDԨXSP\V{M- oWmZ2Y,DDDԨXS"Id96/۸ITu̪ Ay݆(*h*h?u؛g Jp3.*n yvN լg(Z|G] w !JL7R$[> I]s-@B"n=M[3bgBDDD;p˲17nw*Zr˛m.w_gQBHZR3Uwˮ۸Ilf-e]rPegQpТ, ̖ܲ|Qj2bDÝZv{;p?Zj[ȯ:r{^'[XLZ.XqGQ`i*`|{tdǹ1}0v iz? bb _1 /bPAA?`r`;n(nYn&SuOطrX]r(1dH\֝喜Lw;Qlrܹn;A( 2dNq w -Q*Uct`v_.qo)%ƭF$j7OLo-kGRJ{(N!$8)GF [o CV46w$S^QfkQpO=pDtАnTg-@e3@Ų^[3BDDXSQVOm#;uןhlQVX*-;  w 9Yd,gۻU[9@e4 a w 9"j\T% *列Ĵ-bH4RyU+7;  w E$ a7Iu KF++j2*c){kpPO=tEfe5d` ӄHf7!YXS(Rx"(s˷2[3f-ԛ& `YSq(4pP!jI:O onCIxe^BDDDBF:"/%amZ0>[J0@ŚrK߰pM+8tu4da0Nx8. ;e낝;.ӕ @5!wNUye nNk4%[espMX^ mM¨:` !htqѝ`N!*IVZ5&4Tà[FJQF|T8DDD,)D :ݩhp>6Va1q`u(*_,DDD,)t):#Va-sϖ HbQ*7lw]v"""j(6#.wRDg* jywWsjBceۼDDDM w ]ݴ,;dI!7pѽo+)!R `UaXtbCDDD BWJVp+[TWq@uLr QBZ6,y [qGȗD1*7p^ v"""},)uDNNڊm-RlgTX[TF ĵBDDD;4eT^S`=֐ @vDM=TnDDDM w i%Q#2Ѵu?eSu ݹe.?Dl ,{<毸NDDTpcdg4x;qbZ qAw ?pѝbN$8tv4tPXRtk" #д;QBr +-uUo=N8c=CĦ@P&~,q薱pPMrD&j2)њq"aEa+Kx*QB]QRt)2 ߟ `bB'leZtg;QSBtUe+Sxeoo{wNJߟq!ea~5Q,ܩ Pw:3"}޹` 0 ü4]vټ$'YQ?~;(+ (.Yu2";7U掊ێKս\n=^痨O5QG]rQִ v"""")> .++kժ3VKJJ222XSP:6Qb y :ZrOU5Ѳ3݉ {RRRyyyD!scdg*PIZˏ~&D5ɣTn2DDDM}ƌgϞݸqc!LI Nו+&|{Ynol8r&6Zx_v"""G>gΜ{ڴiK,9aB,;.]J~FIpyՅ-#ӤEQ9Cff?0eʔVZeddB=|/XB+zHnsLtAmXYԶjmjad{<㶟uKԦ8jyYټ4'v`!""Q/sΝ'顕`a-* ;OL/ͳ<ŪUm43n>s K2gIԲwV: IDATuy- wA&nEw""u 'ʲ\RR{gr*Eimu#ԑi}B2WA/7lBTGe`g!""뺩'N4L }iӦh>p@զNJ (5|^Z4q˺mrKQ|/r=/_^YY)IRRRNZVѣ;)="nEW wz70qq` ;˹NDD|֭{饗dY5kVnnf+..ZG9s,˿8Y:Vs:3#(u^,'/qMJ5=@Ś`!""z(;w~ةS'J@Redd,X`Μ9Nkc$%DAȒD{$}ʕ6w?utdh,nCFpV,DDDTs=+Ԙz\nV<.iCXVmXNDDr|'NB\LLL|||QQQկVcLCWpW (8=T^òrsQ]> {~=$]YJʠ(:,eTzS]*VmZpٳ',?wpD׊WhL;ervwTzoDDD!G>w܈}ݛ6mx,/^ܴiӄ ϟ/ٳ'+v{kT*pQEFPb?v "" Y> :nժUÇE1..n+Wt~'+BistP<诛GkUC˓:oͲfkO|?#)))j:̜93//GmD$gݞ O 22g trQMa;yF8DDDt@۶mϟ_PP`N:e ,XЮ]@#V$F(09IHQ-H$Wij0\J^ղ`g!""+np!b۶mEQ Pt 2 Y]Fw!\gP wzDDD!Hsh=s=w}]/^\D7S+r891wK~<&~Oz wmدc]9{nk-LL @Iw}U-;(S?\/_NMMmDH5Q*=We8½Qݷ0@g/Gz wI"PՑחo7zl"@Zm k;I٫.DM uԺ`g!"" _?|e5ӧO#G6zl"[fVO)[1.E'*6-bȁ_`iӦ)݉BP/6{RB U K"go,uu`g!"" S> ?\*S^^^\\\s}ǎFAUEA}We8bgzߝqVo61_w_;QP(8qb111~aŏ?K.}9vX ݈N:"e=Ϛ~|]hŊ~XԹ]LDDD7G~…^zmڴI6m\ӧObb={x$i: ^{QU!T-#o+ @NDD|˗ v̙ٳg\5kVQQѴi.]+8$u)m.,4ѭtmk;z\DOAp v""p߸q#O>$..K:wa˖-JGKoI`\ @gC7ns(%@wRJ{&ū((O8жmz_MHHg[-U4t S7U^NoɷyûI$zxBDDԨ|۷/--U_b\pze=Q]=E%ܲ;zĿOs:{E۱.+y`g!"" /> ޽{{wyW}]Y{`D7Kisp@:2Y Os(XW;79`ͷ)v""0p3gN{xšgϞ3g /Vz$^[vlLsG@v;et]3)t'""jD> nݺ}jwIMM5 iiiQQQmڴ7o}8YAԴP2JſԂ%+g}S,uv""pzȑ#SN5 UUU$5jݿo!% 2{N鑝$$CPhjxa?gg)% t'""jD wiii_|EEEŹs^z:Y@#鑝i7-ʢ{՞SBDDnp rqq/]Ծ}{f?H2nEm~]I#-ѷ\`g!"" o׻`waǏPPPлw>HNd` 2>HݩzKlBs`!""j|,?f:yV5/ٳH A*枬O1jL` =cܹe.߼i6l^,DDD͟}ʕN{,ː!C멩+V}7lD>jv82 ]Vp/X[T!Q`ݾu? "":|._}٢(\!;;{…x㍀F$\JQk wsQ|{71}VzK; Q3p߷oZ5kV3`0:t(nMOIR 'JVgMW hn9͛4i$]NCDDԜ(+++cbbj&^/++ @0[cP %'X)>Aa^?+1R=nQ hcdǼDDDsJKKt`D2T~ @Q=}CvA6MrKCDDl(gv,˿'@nI/It2,c\3Ǥ"EW@wƑ1Qm[ՉGOLO>dVPTT|aÆ-\0.._lD>(e|inٝga2=2Dtp37n<}*))^y|DD)t8J~~u{2nᴬ,DDDMi4@ eL{Q!hތ8ol?^g?@Ū*V3f̸|G BpIMKpvE4tkpA1vFuVsίlSVe֨{;QRpo.,)tJl?:?3D2k!ײ_,@<)W8&-Q"c4/[_|){[_ J"[]+bm>e+Jҧq٩WUTՅi"~&t[=Jo/*C:i%I;Iim/%:/ D) cRtKlKXPG#VlfqAٟ˲|ĉm۶^Zm Sݾ4QRz;,cO?z;O9&D²a{5 w׻`waǏPPPлw><z(e[8@<6v& ,@w:&0lnO5> wYxYfZy(OQoɣ]vrBDDL(W\t{b 2DbŊط~{Æ It zkN=<zuDZDÖzb@Ns;hZF 4Gx9Q^ٳgOA^p!7x#nUgI4f(m*ZKbL{FCr/n1Y}jY}u̘1С65SX-g˜3#+\`uȁ怛aG^YYS6Az}YYY52rʊ{rDQcB Ǥ"EWئ(5҇(|(ܻuVZZzz_=w\iii.]Aj%Enw@e@a|E*@hrĶ0DDD p߿?g}ڱ,ЧO#m]%Q/j(d/9P _ʿāWDOyLn5駟6L|WZUUUhÆ [pa\\܋/(QnFz\ 4wk;n=NGi|yWH)I v""Gޮ]UVY&;;o2q-[l>KLLlDM$} -#T//˷CZ}c8+Q8}rArss͛7lذ6m8p/?jԨ}r|wd t>EYYY߾}͛WXX(Iٳg?^z{T,O=Խ޻qJͶy &ˍ$A5ͽ'Ӷrץ=4!R}g;-^Kǃ½6F7Oo~׮]f9//oСӦMs}z-F_xb(JOޜIM_Iԫ#d-{\e*\Qxu5U-eY.,,3رc/:u4p~رcO/Re˖GR/^8!!!//oٲe7o|>h04c=#x󟍐SU:ءF$k[T/qONwہcBDD$S˲oh"55u5wգGGݱcǛo٣Gu:EG߾}w^s1..{_೛6m:uT˖-g̘Q 4b&2~: @U^ ! ¤t=G-}$p p={O>i6u:hT.zޟgv=!!a޼yof~vYNg@#nܸرc\7n{ju߶mg}bzHVܻ{ lª=L=ri2@՞[߿V-Kvvr=''@͛7?3O>-[ pO?4/\cǎu+WJJJnb={ X: i t BmOpbw+v|EZ"vZC_ v""noxf͚U{ґdff*Wt:3<o hDϾEu0vu;w@BBڵkG;bĈ>t @OEՕ*WS× DM>>NDDD Gs} ĉˇk +ڵJhѢ?"##+++7nܸqE^Z|oǧ<6Їe1B{UɒSrץh1&@ϝV _rP' v""n^XXQ0ԓ'O?~\šC־(Ajod_ZZ ?ϨQ|Lӹlٲ{lӦM-MsիWllͼsƍ7g !˻ΑzԈL::h38fdkҪFw}o( ZT{GߧvGO/&;QQpw::Na }(pҥ˗/׹^^^׌ڢ+++;u|req]M>v|?dygqqq,JNcájAeyfF|o}YPN)rpE%Ksf?8DDDMFK.]tչW0|:o.((ȄHHH@2R:_'jkժ|NK̴i4n h ׶w3pr_BFJ(DPer"""{NR^^j*WQ)ܕ/ 5m۶uQ&xji\˵+C!G\/C`1{k1 CBv{~QQp'׿vj}GVk֭X۷oQNV1b'צ\뮻nYeε6ٳ:w쿤KNZGFiZ$Sp($3#*\y7_wQ4jӄ*7l\2;QPpׯ?p8fΜi2-Z駟Vz=ϧ~c 2t>ƥsM:Um߾j]lGy8q" ԙ2|ۦM/S B-s Geb=H"Jv7; QPɩWzxxgVCR?pw-O9otWM͌leAMwl9;ȷz wJ5w܊_ԩSoJu坂 kʔ)}Ν;ݟ-==}[ԩSllŋ.\X~7?]ӡ |駭[޾}{VVV-Fٳm6SO=5uFOV lѨ_1 IDAT1!Óu*%xUcYrS5 Fӭ[#F)͵Zݻ/^Czd]v=3qqq3fػwo׮]}~699O?tJJnOJJB5Z{3"|yCTkQuWWoZJDDDu P5o޼"yܹ_۷˲uo+((텅K.6lX# Bze8Zyu\05S/-g&4.UkW,DDDD 4@}iY*A:V Gi,ի"CTqэppTv\$6Ed_clQ)r\qULT:ZeY5YB w YdTpM}QF`ǥꢴ*W^-WQ#cXQv8}(lppzk%jsW K>]&|[u&$SQi,DDD;J^ֺv-(2;G((w>u嫨Ƒ^/;]CDDXS8tk.vSBpψto)<[T2M%OyEBDD˫}B@h2D1Fճe(p`Z f^EmkuGգn޿={|7Q@?WgTz `B>RnknQFԔ1F.\`g!"" Eu w^ŋ:uVi_vJv5;D5&]~z(LH׃2ĵyIO$[O4 ߟԮ];˖-kKݖ:-^oM=L}s9Sc{/pf]QS@r_(n ""&GSoFddf33g`d#MZ}(n>*AeڎUVVk'.s}qgBT@䈉{YѼdaH_B+F:|pM̽VjNE܍SJDG4J uHAp`ݾ7YBqO>1c' QP;Ou-bgP0!M! 6,[T۵5ņ'> 7|kYKJJv}YA&KYqZmPȋӶ?5H„4=/pj= 8Oڹ?YBMt'L>}ih4fgg8p .AN5v:B.)^Jbg#dPW-uIF|1݉~p{|JIt:j]jU=yFHI_Z\} QXmZYq uֽK,Ϛ5+77f[֣GΜ9Sw is:״:nv]n m>(iko ?ϺrNoC8Vh?,DDDpOKK+-----U# ,eQ^AtC8'+N/uLPxu½gϞk˲޽{No/CnJaBc͖AB{9'zXyBDDd> sFDD̟?޴iŋeYxM&L0|AfϞ8YH^zeo=cBޤUcܢZ}ڴd; Q(322tUV ><..NŸÇ\RӽoDuN ;Յ{T?Bǒoma&z0)]?U˵6AP:ݫ~<,<4DDDAsS{ Q۶m+b5&_qRwϨʜ%l'';GX[d/zD.%P$'Y wffR7%ш2Ru-t*r.d}!JYta`!"" *zH{T96lDDXSXkVhlzLNڊʜ%#UXs֭[&''I&]zUDwM;:Jtd=E-}@<^PD7b?z1wWH>e)R%"Ztv1gΜ޽{9s_߿-Ϟ>} Y- V5E"[]uX%ώqj| M|r!"" S={V-uIVϜ9?0#i\RDupzpxow b?z1 ꥈ|BDD!A o&**ye^mڴiTTԱc(J @s󝢺)'>f=0[j铖vFV/J5j "EBl6[,RՂ`DUWhRnAKe *dg;8//__d Qu[p~Y9*MD~v4ba(W-3 2Ie9;݉ Rw}< z+))e˖kٲeݺu~moFDaMI{2 ";[ 6_v]\; y@ֵq+H[xq-|>ٳgnzYϗ|ӧG %QhF `URjtЃk ǹ^.Ӱ> Qr{NDD5]]cǞ>}ի7o^p;nܸq#G DQ-y{{W|~M6(EUVfz饗<O\HVܒ]zkTOvJVђE59v΅,9hUBDDA .)SyuԩN5I;j(+pLfsm^%uWel""(H~ń2&$$\p! dSPxud]3vD2J&Xps.d̙^pWrѝj {ssssss˼ZrEaF$>Z->eԥ^[vE[桶zg \GE쀝DDT);w `ڴiTL$iڴi:vpDhV)Dn-5"oYHFcP m0w^VAoXVѨa;݉p?zQfs:uRSSׯo2233Kg"JB^*k(d^XS=Π|g`ŷ Y;ݹNDD5HOSNVXajuFZnԩDNJP(P֢{*5Iԋ͵~ùo;QMp_~GI&O| y%~ԩ'Jȑ#JIG6w-snEDww Y;9^j ٳ+|I6mDQ bVf͚K/y<~;I"p8n5(vpS$IRӝj {I)Sʼ:uT\qV$$(UL+C{-/X9"EDDT3)/^U؄ .!LxZ `m;nѽ*"ݲ2Q0 :`;A ͛yR-J( ,=5RP:"J! FiZγ*f.ux;݉ Rj?jYYY WT XjV裏"H}Z[ M-=!˞c +ySEDe6.Qu~ٳ&MjڴB I&'NJ4̙Y[V~!"j)G*A**Li i55[&E$ˮGC6q* <"]4N݉Rs9y?Zp]METj7;]\n?%'M{ |'R /o.Zu9AיG-\eb1@a6ʝrgϞٳg^^^ŷpN#r{R5RWGQd'7Nl#k#:9^J3EW/╛6G 1;Q)%$$ >#*;fɠQJN2,]ﻙoY&;QE(KN\r\p*Q픮Ӿg^y}TEujTmvnpGyeP]JEf̷muU:r'"""*Ws={^U۴V+|NW3zƤo+pqT#A1n)#m"+dZܢ/W%0Q8DDD*pw5.]oor2Q4W'yn*Pxa)oEא&ZY2TBO;ENHZDDDTJo*@l 7 <ɸPguO׺2UBܿr=yct駟pZE)F=IS9靅*ܻUw:y=(CNW̪3it>gYrU/tڕn!Iz퟽bO+p'5<] m IDATOu2>pE׷1elh 7q MDDDTi!mhjApK6k^w ]{~m;:`]r*S J6V/}|Vz,}j{Mo/~2+w""Bݜ*Iŋo߾zꒇLETE!MFGNQ],dLh놽}oN; Qi @ 0k֬&M'##st2w\VTk idܾrIpM'7n1.Q/z#/Smf ; QiA wI}ɓ'_|Ytm 8xO<χ9!Q5X@`S MU#GjD'E\1Gp:jQ`ҞhT_~)cB\г3/$?'iQpOKK0eʔ>J/HMM S8oNw+BOT 6D4O54+2-DO轑k۴[,DDD@^0r8.\bŊǿkJTS*[["6lo }/ִS%v`YFzCDDpoذaVVVӦM׬Ym6M65jԖ-[,XDUr[P;ī%H 䜝*V9Gl2ƨF.B:9ۜJ?9W^'N>}zzzz$$$^;{J+9^/Z+v ӿlpwU1 ebiP/\#""/h4_|qӦMW^z7oܱc믿n6NTtѨ/j`$rO:\tYءFXBDD]nzFTĉ ˡHj+[&NroQtg#]݃ :tūEr!"+H>`;w25ӧO2d׮]kРE֬YDUR4֗?R)({`9w JֶUJGsf"$=%kBDDW}ʕÆ ۻw\ꫯvaÆ J_>yqALVlN`]([O 7ߝ\ۈsEeEuڨ1Xoj N[lٽ{Сz 8zoi4#l.ѽQ3C+[ E0Z2뒣2 ||FYh̜9sÆ mڴ 6D+t%e*:dꁢ]vB~oWr.*l[zސ;F!qWT ,xG̛7fQvwtS轒gWav䒕eLK}}8=4U (|Y6Rzi$Yr CNv.Ir.55K7vkzy wB_t<}1QR\&1ycΣu)DDDH%Ze(tV#nIv2/v_юM+ LXʃm:dwfMEth۵p?[`oyDD)\`# [Q){[TuJarGO8[<:cQ-Rz@tt4SN:uܸqŋr.jk^r;$I_r-[TόpRigy>I0Ղyc}G ?_+ DD edɒH'"贚g˸27lQի)?n!F24G:^˱mcC8DDT+Q?sw`ҥNDT͕tlt8tM[\p_#o ؋܁YGM{pu}ڲ-2MT*QWFѣGG0 Q Q-$Xagz$R΍FD}y; |yūF(w""~?iiiw% Q~ۺE+ ^ɳ`cIJ(hJR=(bMC,]p/2]s 8pD!1eVWXϩEMϘt$H WZxEn I}CE6`[m; |B(,u@EEpz0MV >Aoֈ64C0#(^_h;p,܉"] G6VW۠=k"\fď..!tHpBo~$UCFyYtDqr!";Q -sOT(UL@g篍TrOr=4u%rg!";QzAJzgEeo`[$_ҕ23Aypo/j,Dt!t* eWDUĤ&;f/pr=$^]MB ?[&w""JtԩJ^daGT V9\@ V,sr*ˮMyYi}"Lu0^ ||6'CUgu:V@DDT.۴X,7x㫯zj||5k֬S\\ܡC|D՘Du84*s@]g'.6&G*`ٌjɎXwLhLt:v9Uu@(\"";S==ڵϟ?VO?{9|pe_|r8BN-tZYeShؔd!V':}ҿ#nbxͶuY*]ޑp|Ξ=o>r̙~O0f7_Neu=77 -v^%<{TBݴWE WJ^q)]ߑF,**Zh ˖-KMMEE/SΙ3g- u+j4iRXݮV+`ME^:po/l{>A/҇ Q]<4RP)}[BDD5M5h/nw׮];vcƌ_>SLtkw00T*Xbؔ%Am:tg#/N:YebiHoE_ r!"6m0lذR>={B|>[n/]OHTL1Rή\OΑmI7 }p7L6e:QR -[,|37o?cfM4IVϞ=[P#$Q:kI BXtOko `cުH$ FbEá5zU[|yr!"7nSXXJSNxvڅ)$Q݃g8a=ru5Bpk}C{*sf"6Zz>_)w""9JqJVo/|&77aÆ<;;{ƌ;w~饗~fÇݻ7;:t/ѨfYm罾o[;;謹nCG*b ˣ\鷛V|Ugcj? ū̼cۺro_<|y䤮c=P%IxW^]0~N:LQI)o2 O߿|о}W_}5! ]ZQ;f yR "T.^%EP麴vh e3 ^Yf%''7iҤO>Ν;ץKsF/)ϝ;Wϣ/8vF~ЦMvoDjTvPE5Cy8&M3qսZiTJcA<ٶ=A wI}ɓ'_|Yt/c oO?}TA233ϟw kĨcǖt/..~/]ҭ[kXQUV(JS+쎠714&PeS5`vh ~,K9 \ be^:thfeOm̫*_8Ո:zѠ7uvQupOII-iPsss۷o`EEEM>… sINN.uύ7$I*x֭[Kd kN-T3dqJ[ ]MJWǃiXU; UWA 4SL}$I/20#F=|7vmGG I#6z3^.DP(YQpfy222 .X"==}ZD=: p_6)=bXBBUb+|g`~y*CNM|1R7l0++iӦk֬ܶmM5j˖- ,$"dup! 7YaL)Ulj@b19ٶui ~rj^N81}HHHٳkv?$QM3ʠp; ~ i]MsksP{S1Z?`;Kiг3W<&"";F/nڴի^͛;vxfsHUV*B^t0Y3M2Zx',>T1yL+ʦ""._\r3J2yj{ :Y/v\d!{I {*eb9˲bQup6mZƍ<|LDN[\P07-T*QM7sΣ7*ƀUJQupի$I6lxG&NG"XBSuFW'aV #ƫ$ox*Q y5Qp߾}+Wy=zl9s椧7m~D5ս=l(â0 MV_UESγRH_^I ?J,DDTmߜڠA~Ν.]vҥ_Ez㏋"ӚD+IYdP=pg ] 5St 18vF8DDT=4UDF~޽ŋoVjjΝ;_r;ѝ Pкe4v@|Y@Z~IpTHV׹-¹_I!KDDTG5߿ԩBQ2ڠ73բ+Z!*MjLK9|\uU/@!zoZm; U(}>_vv>ۨQnݺi޼|D5=u#!/QbX/Uet3iB?OJհi`OE_fl^\+WN4))))==߿zjƍ_z9s7ވ@PIFtv8B,Ç%1ߓ -&S >>lPE?0B6GїUhUMA &$$$9rwu…zK. D&(QMu^/y6+uFu+ U/嗝q:g;K0 PvJUODDUS/luu˗/;ݻwgNtS*i56[\ g1gSMr.e3Z,DDT)ܟzꩍ7^v>ӧ(fV"}=MNWah55io `%MVI7Q%mWU4_ JE/p8<|R8DDTu)0@PD& Q4+.g ipF)^qoySwKvh `_QhSN:uqByŋ\DV_KGMP^ЮYU72 w h@vt}|w/zúv9q**pX,\.%KD5XK{mVı^xEd]p' ݴQg | O:n;Nn\4u/}}~#"Zн{wK.t"Z)EnRmQK#] r:yJuj\k'j3=$8; X7yq)pwѣ#VgпYT|9ڬ ap!#q.M?hJ=m ]M+u83X0zPg]b]4Q=QƍEEEܐwW#RjA(66@jT$M} ҪUkI^Ug#O9.seLA P8mDDTZ½nݺ~U_t)777ⱉjqF.U_cūUNZt(}#wjC׹K{~%yr!"*7… uh֬YaaafUhQKIR(W!/+EFX; 7{֧f$57v%T'(BDDUHZ6B0E^cL `A+#ޱbTq~ɟUR2N{ %r|_P.#""U(ܧMfxV3g 4(ⱉjBޢT`c7?*}jl+=rǩ66\Ͼ; UA6N0d;EF}G%78J㓼oV^f2g6TNO;U A V0UF=zj {֭].̙3K&''+#e3%alQUıl-B׭w8"Qjd2Mee7$F|+r O;'mV7R 2>&z0ūWodp1b_֭$ /z}֭[LDӄNQ)TzdvnpwuvعK5TT%?VNj;jԨp#픂p`Qi ^s]O;gR S:yTCF@_q*QmaD$Fod#_iٍÔ]V p9l{r' c.XQ*ۘ]հ܉(BJ~ w`4lt罾J `Mf Kb?-j|am#[r'̣ٲz䋤?=A;EB `Μ9 j)}v_(@/w/qzXkc|WkVZdzsķɝ"QXF[EʬS[ڜ\rW[wF0z]+\T6ӥ2Ǿ`gKDNDDDvnN$ŋ۷o_zup"3赂` V+7غ88}Ȳ' ~aMiyG*N/~B,DD @ 0k֬&M'##st2w\VD%z+9@KCێTKnHgS(R~u"6*z|sUn'uA wI}ɓ'_|Yt6OY(yf R2ec%Iz).Q_-ďQy%-u}ja[cC$qO= (빖ɝ"H /yedddee9.\XbEzz_{D%dp=XjQ3&! \vFuua]A,{ˑ;}A fee5mt͚5۶mдiQFmٲ%11qIIIJDKQSj{Ϙd ͽ&s]MaG S&K^_8ONի׉'O^~} ={|Ξ=;x$5s8oT~.(=w?0m* aږ"/纇FP➜u-{q. ^h4/M^zo޼cǎ_l6;iNPѢ{{S>7 U qCw{tz(oC?莅TJTzf׮]w) J%;jP QfWѽUR>wIϥG)=~ij2!RDbm^u'"IJ/xwL|?73f^\\RߠA.]eU `jwsAIӲ- Awq6ߟ}w3 f0DZ(B%)h!IJ$6%TBB"ʚ}0a3wߗs1}S ιgG݇95|6 KĎB(/G~;99zzܭ[7VPj`o0t.gAמ<1#ZyvO Hlz>vT !D_Ϟ= W_}_l۶mtرvb!.QW).wѽK:.ݘ)L ˼a1?<  *p.vBW d2FZV&y#DGeL@ K{) louEa]bg Elm.{.vBW^t JUJ3E.c,UvWS/XU?ԫ(MnT&= >Y*vBWw#$yrBѪ`^w4U]yֻ+d+`nw7\J4?x~=q@8!.wa{Y11Lm?b.vՙ',L 3Lչګۨ+vB ;BuUs'7{' LBtIf-0fjJz+΂B2]Tf̘_z{.B52R^qџ~*eZFl5b-mj`"gw\}dM2@BIwQ mQ$K|'T$x.fbM7 NڹwyĎ#ey^Б ubA!tI#T ѨL+Zt<(U_$@.fIfF S./,#M?/:yV8!.;Bu{j.勃4RX^I[dY ` BځiF&K+yNKmk`y"/Qأ)xF%e;7gB!4my^Bӑ kŎB"#T7(jF =ޠpE\=5^νlalGr֞:;4Ȳ [q B%8,Y9!v_~e:pg`}qsU75Vԝ"OìWo5n9\/? }5 7;`N=Bh 3%x%B%.,e4,!jm!@k8!LytmŊXŋz?S} N$tW2m ;B P]րoU)`]٢{"_C巉<6?V RП$ٙ y_!MCD:nNCs5+=9 %Xwx* `fyIMYAX+;B P0UJӚ,l?ٚm۠bH'vqmj6<w;BaqG/7\-Exib`^K]\C7FE_W6!PaqGk27;#j0B|pQW-PowO9H!GS y}iB, tZ LpŋJZ5"a89˾%hZ}#79\BOL88qBwlIݞ+?N߹,-]8yq9 'bǑm״'ˢ;B_Xw3@ >p{"c W2qݾW&%^CA-N~q&"2:Uw:-CH)]E.^TN $Se:`Ey^l~6aqݶϷqBP}Ԉ:6||ೢ9$۪on;ǬHowe`x)gM!TWaqG Fb@5ѣ +WC޼gBQaOWg^ !GFz-[.BaqGJa5w{=1*a{oK1yfhedM%Cѧ8D@|Aky^ $xkUf@;BhFM?fJ*OsFp'-04W8l_p,_!BaqG2Q(y݋)3&Є.~[eLW7Shx G#ne9ҏX!PP6J1R_>xbƪ[SUcgkRe7;Ϲcr-U]FX6\UXm,;B#X4yX^1?ugEs\Jg(x!IEB v?t1lTӨ;ֽbA;BPMG=W6E^vI٦9X?XUĎBuwП&th8CLs7Q5#^fkw ')j,EN;FՋ!Hڠ}w?8O!;BOmd*%vCB,jۯ^ Y@>UZwl* umYN8Z#v#k^REbXdtƓz>, ;>6jWSx?E(47 oqBn Qn^w11O g)['t綹WË0ue/*qBuZEyx~+[Dr5mۇM`NoCc= 6˼{_QT҄Q^{U!P5,q: ,r1rkݹ6͕fwϨS~~Ó6-OgbB1!tupvLcPc23FA[ ' {} 4Ǭ2sQ6 Cm=!,6 \aHU!, Tt`-4oO6ql➖Qa V|CQ͒Otz;7gRHj槼'{B^7#4Law5i ˿s(os--{}4WEbugR)U*[\ +]04uG\~3F @٘kf9*"vEIFTX.8lBMjvq̙gΜd-j߾Ç/;v<(KJJ֬Y3`_m tTo:JyBxRЏd=k%܂inN[RMFe #+b:z#bA!)@qw:| !d:t(*''gŊ)))'O\z|7|s1^rLP(cǎ}뭷_Ewj%%͏]մ)-\5`'HcI-&C=ao?2=0X:wq\8!$(˗/B;vl۶_/Z,Aw}/_[fС﯏9RUUURRM~>x]W"kLƓ)E.16wq[dz{/Uz@rǼ/v" -[@߾}{_~o߿`;qj5VFE>Ӧ3tH^uqK;} FnǶ+Fսe6:82h6DFʫf-UBRHWTT@f{3f8p{9< 7eV!/+$t#*sW(f+_-g:y\v&qÁoyūĎB ^^^FM&\?СCK48q"t5555nP*z+0std5W |V4d(kQ ߝ SkEA{pݲG8!\\/RUUըQK|j}6l0oQ$UUUΝ_n=5ry^6. a|ng+BsϾ1%M,%N[]qϏ<ʨ;kN R&-YѢ؉B(qIBY ×,Xsl6a,XЭ[%9|͛/W}.W\̙35}>B2\#y6%?]J<ϿRYO56NCz!?>%R*QJEeϾuy*g|:m"T !j_VU/˗_}ջvt3gBMGnH|ut6t}Ϲ㛲q}b)2&2]kN& S6 z!ToI ~^~gϞCy~ܸq6m2czm# KgI%6Td]wC5O<չ?ª4 kx6;B% KP޽>o~>oÅvAA+F8 /vn _|kw.VT,Ypua&Ǧfqc}<[qB(H+pSO=oXF믿n_8?P5=Ed4`;_gi&/Ys upy1V ˁl# ~tl/~ɐPw˩ReS~v}~'v$S^G)Q|~F!zzd Ÿ{joP] F-KK^9|>ԥ\x}{AL5ό& >WZ'BD2B(&#⥛B>T.O^-v{a 6Z_^ʩ2WfGEѺSU~|}Bg glPm]~Ip\Ӎj7ej}cBNn6V|S~vE/ukLKRUP!G B4BNEY.w/rG7V}Qko} eRK'lr/];nݎ;B,j-c4Ƿ3X{Rz{߮([\ko}(w?N?ZG㰁^kO﷈!jwP܍iv7%SzWJk;xk`Wqm BUuWy;BwPoZF\:`'lZug cL逸 V{#KakE%MhbB i Q?_xjuHy곪Kk; if!Nnj ;'vBX&1L 剝!;BS)PxV'pSzc3"X^>/HiI,&E~gW;Q)2҄W'3b'Bx=SmNv7?nTҶ dKo3iwξ.?(vBiT)/>¤ZPgNBq!T{c6)WZ~w>yoF)-]771+L8*LH8JpQc͋!b;BVg :X k?#y=Ki:MC/ܩWvq_dsˏm-Hi؉B(ư#jpR9\#"rװ c#ozk?e˩EL*`?:?i))/>JiTQ╹\UD!KXB"xdhAA`ĸOQur{gl2mV تR:kW)P*%gsgu!b;BH:e6G^I aK8\ET"uWiG*#WYOgg0RȹJ[9MB(永#ZN`?+9kMh0{Eqr췃̭(th<ه FJ*#,!1 k1) )ᢓl g<7)8 D rR׷oiG9CXm[$?;l;BH갸#D2 #z+5=\Me,} 5$|JiWP^?sp;BH#wJyV =݈UO6yl~ i]D쨝Ǡ`Hʺ(۵sݽ9Q!JrqF[19_ 8f'A*fNUް0fC^ ݟY՗|waqG%wͦT£VG@ i0c)BO~Ꙓ9\VI2^w{`{eܔc\)*i6ޫ,Da7wyOs'psH85h;Wp;(Z7OyaҤx7H}umF=NB!phYfcc £VGyTEn%#|ZrUUWGVЭZTF%2YFZO2#T6/1!E`qG%"-E}dSTU4:DA Jgl2J^.vC}F)6XR?bړl$!|cnAF7XB *af !y$#vt4ty>{Y f|WTm\u%Ȅԝ7:a} $R_{R.pѪ =w!wPV!Ѡ-[NTP6y1gf3UU,}̪֭;,(`]~oǬz mХyy'K]+;B]wPBK_EO8:F?7Znz4X$vQ^ir;Wپ<*?bRR_zTٮ%c߂ػBJtOt x&chKdNM)vRb Ew %Ž$q,ه];{֪Nb?BuwP6dQ6Dkrt,)?*ug~w%l8z\"" H-=@A!aqGI,F UY R6}9睫WFoU3jex^ŐOt{zOC!ߎ<'+B#LS$ eU6;(:4nboKJM=_UcwPFy}3|ē(?>-M=NRD!!$!Y Ť U s|-uM^0:~̫+˾䥶mv6jp,O% צ<0˸J[ Έ!#$\6lv.a;ҶlVo(m32L^)IEy„M'7;ĪUN Z+=}c#$Rۃj&li}n'@N|b$RSX =ŦHլ5Nװ#gZ^ 3.wz F}cI kVR5NW1nwKj bi0mESU˄h=!$:,!IޫQbCG[J IDAT8kJw[hZ l{3 T}?.*egmꔗSwgsQ4׬Dyw{Nq@c#D&Q3Z!$a !kr%s}Bn`hn>ٻ{o_^:a~CXkh lA+x&M)/=Fk5Qޟ!T`qGIyS[, ݹ%!WҪ2&|ZMkܜk7>/z?MK:iWnɩ^z?]۴نBՐ0ׁ!6,!S2?ɔ+c9Ax :|NKm[a'򼿋$k[i)8e熬NԳ3LZM3>Z!Th)$s˄q}Wvw#k~{G(-\vKKJMOd4IKK `_eU;%]\!JHm(}, ,)({n},LS Y qVD*O>{Ɲ F0 y%%Dܠ/ ƵӬb@N,Zy"PhztN}mm2@囟8YB @,:%%3&(x P%yi@d]'y~;W510Kn3O)G9|-ɉȳ3zFޢ sٺʷ>-xkizqX {VZ"w55\Ι Eb04WI4u~OZ ԩOho=V\ءBu wPTݫ?foN7y+g7S@i<%]b窱$5 y}Um)Lb0y]G%,)*<ӷءBu wPݔDӋ,sf&y7ߥӞ͞voJZ=kpعj{#F#I':z}zG'*vZ&2f!6GBq!TgijQz$c//<"@n{{'P**HidG,`saﲪVKfqׯ_̟@}*B>N2ݠT.4K)iհ_yz^Nb$ QI4| ]ľˠh,msV@kOUX*\q \qG(0U)܉w$) s]5u71Ӿ<_{s{",K;Zl5?"GU~@$$q(\ӽ#<91I&YFعBw,qDV*=[8r* Gz;nzκ8.piU3%;]d{ZJpevל XTTe#DqU"iH6 È !{X%;BtS( q]P/R%^%Zߩ`#b+lhcU3Hi8Em2;4W*7 n?jb`ҵRFjI6kztsebÊi^\.1XJdei -?ݔ t Ys7s$YʙI_ԛcs5+2H'j to$\_q"pYR25EePuɨݲ0E'?KSB(U$Usaփ؉jbqznI-\q;N=+őzfM巛ۥ`SAU}]_l0}0UF_)y.We;BHp=.pė0]-ˊ¶Զ9JEm}sq0KThNW3izHͳq Yr XS7iN{u/?Uټ[&,عB1[e$;BL7)C!kriDhR۶$x6x8w*[Z @RVE\!PEcX-?KBhZ*yNV$\Xh݌Ď"X%;BR*H1 wfYRws36ۏQf"%Ijr4"e$rFI:sHqyܷm MO;Baq,I*eU?p{JTFiȼWC /&b2M("M,E:4 k)r񄅝šy~"uSrL%x$vʫn!]b#$-!7** ʢ@g[;=wEHiH?BwPaԪB縼HdK I.7ђ_ճN xiqöhCEKIC:FR[i' ea20I UuhhÁO2^hkX%;Bu[kFPBBr3(Bg*hDU }`m/KUKk%צGV760h? ~} YzF'񑄦Whztڜݼ˳3)BtSX%;BuP|{8R-u_yHi8No+ E"v)kfTuk(rpE_Ë^iTN>)R}}{U9.(x^I)}BP݀]b#T0tW*e{Ca'o E"rw_\1waYIC%,T˱kIKk|d ݷ\VFuE=aᬋ[s*d YzF-Wdwl.}SmhJMX%;BG RJV +"{䔢ޖ G}spu-Ribz(vPslTku 3 ;,) /wid\'D$Cۧ+~#}1fA/K w!7MXf(HS]\zP銌nC%!>Xfw!BH̓oL&G͢Lt$#DT^k[wqm/_O{Tۧ3Spm' ^oi#"[ BLpwAp"z'[OixQܜ9kxoII$$^s y+mУZkkRߑN)aDsK<(!LM+kttxBw݆eg"?&#ǜ`t Ӭm[:ئS7= ro]8v:1w ijţ;ynT 36a"beAhŧ00(]:4" q#~YdKxm=ﴦ[q3˹qN:w݇LoJIm8&S>V1γp,_ޏ쎐3~Њۄ} @?xH,bYD4I{̜U8-Ύ-q3攈L8I'?פkz'mʧ)חON|&"9߂krM( 9u[J$-""ZS(Xv9ӏivoټ7CNBpYyrN vJpzV]WѢpK( Gzu#ֶȫqc޹B?%O3MpwAp د'WӈHa쁀`w9s6J|Axˁ>5kf6gLI덺cꞶO|Z,V$~vmvElԛۜegn^3' ;\M.p4(!*WŒ 3pbe; {M9'o /Wk|N}N1M_Qض;>Kة5=Q%;n4.;.B_⿎SMDcDPpGn(Dȶ]&x͟9' uS;[4Gijʤ۪[*z?Yfj!m"E79n2pMHDҙ)2K ̓]6ffpT,w- oZ󤶩^T2I7B#h[팽pj=~>7 [y*).xWUi:1[=W>8QtgxkR*P6ުO]jOFitBq|}G0hMGEDs޹-̘g]2d%<q&QO03thmEw}y$~\1Z`)54iӛԎ'^iQp<@F@<6:{b>u;- p-9m0Bpwwdh~*.T/W-W/*mEwK-W;-0{j`X$fBMtSܘ~1afQ҂rQ0"0mܢjmJG_NO>?T5\M1skSzkSz[֝>46GW&]W&/ )ۺܹ/i[ja:1 } p~7]= 00.aZcwS;V}>w0ѡsi($NM67TqMl:ܥ߬mkN8%3>JkKeRhrJ(xL43{ Gj9 e ێ dJm"+s~\z '?>PPPM:7eR1w?hIoo><%3>2in4D7K۶zXZoSg^LUhʃ204:M=֩)$R""]HGx47ioJw)bZQ㤶aܜqNtM4H;=i-;S/߂پsUHp@pwwJmO~K9L>{7Jp"*#Ihā#zg߇8ƏTN8;)_*F^Ӧ]ΓڮڮVDs4%_],.gHxXbĖIBag*]δě67 IDAT)հm"*=~bE.SQ8X8t4qEm{a+yc&Nq`Uժ}ت@U$,g3ĠiBF{WbˇjͽBAYXJ\ e`ˉ+di:%<w;WHL&K>8|YWmHEH'0bRQ\TR*>&Z;N`^5_bĉy¤|qb0!Oe6$oz&F[xN_53sܟ{P=Tg^8W>3}2}XV41wApi0Mo$Sdţ̗% #e&ԉFxZߔO[O2\]*WFʣF*M PqK?mқbQ„\al0.W_Dy;jR{>:7}HQMPMP3{MeNƆ1:}}-(7+"ydݩ7N Mj}Кn6sL(9TU**ry\^, lnhqۨW+T19\:G(ܥ6=é}Gԏ3.- )㕩ґ{W wApWk27S7S޴yt,R@g90m5ܢ66 -jcK-r(1@,*JX*-F#i8czc=F[/$U*$n{.xiZIuQcV"A>7L#O+O#Ub/ xnܠ 9Cȴvdx2ܪ6Z~7%qRTZ$˥ERi\Z$#056lahƹ) ʐPB|eH B\CN4j5Z+ %y\xƗʣJ3V1ѻN-jsk-ҩwXYZZߤ}BH*)J BP*.J$Nڗrf3>cZ3jF]<1NDai}wUAat"WyzT=*xdzSz6}V=Tktt;S5IJ"y\4J7Z`0<q 0S;g K͔y!>i{GiZܲ;MkSJݔR#i8UHdIĤc|T/NMPԈt=Zgѭu]zgֻ*ӖzRm:6sA!'(r+PrB(W̓9%ϗkK?Vy2aĬٚ0[fkji&"qRY s>C-s& ?Vr=%QyŕBA. Ap,2@(2YDMk4N7L:ݨӍ?'SDU $I"%q('(B3_MvT]aK=zW=zW]kmZS&Ĝ A A? ܵs.W&!ݲ;V[lOZm +eƌd[T[4NI"҈::jRD|z=\9?)Wصt9buHi!8鵆avemWto2+EZQ,y78D.+wG [+fD"z8jcF$Dp̈Fpo7lù^IЏsOOlgsޜY|Ų[:FKSwGk+MwV:9OTHEDI:\(,Osp2p!UXHB2p<F͓y|vaqD׏FiQa{}|UB T BWB x00*Ă~w. I8IdqԲ1ݨ5ZݨӍIDaړ xR*BGB |cpޥ&8kMU9mh܈ōX܌ƌH܈X܈%X܌%X܌'Mnfغcx>x9*O=ޫp\`eLΣHcFKcFwuh2:.i4uq ވ荈ޘ艊JTFE%"y'*zc'&xR'.ql]<G`A H, 9_Y@悧9Ȃ2,(q!, 1 CwN\`AMM xޯȜg~zx2x8J,sɜ"ORRn6:{1#I48!&zb JL% 555UUU/9sjkkz͛7w} LHel$NX˴ 0 4 0:M4ۮ7z8{WW_sE<_WӲHHHLHɔLqTfQ]hS-sbΝ4Ҝ$9%9I?/.) ^IbqqOvL?aQ8mDtI}<, 3$#0gy yD&q,(sO%N/rd#pDӴ={̜9)6mZ[[/|wgn&00lͦl'MӼhx.y.98> 8.r9Nƀ=YiJ%ͤ{+)J- iKU-URI3TJfj`RxL2yy4tKN\U &Lo0Y84c)i$<)il\*c˹i_NOy| şTmr 2,.)VM;uEVǗ=OD |eNn %.o~ʗ-[s}Y WKUێXVԲel+lXV̶|g-ٶf|GD~D1/LjØNo89 Dةu53 g[fw!#?;c)γx".8sְ8Y"ێǻ̔kI-jRnMSOҖj 2lPI5muM$T∈4ɲxFD`ۆhɑ&:TM*Kl,pApwF߽%^`P()<_|9'i)NvԲTNvܲv²v̶4Tێ[A,æmmRm;m?a9x20z%.@d00b,[{fd(d[6Z"meٖ-_4(J.ޯ}Y$4s2{?/<8rc˘gDhgtWmm;e6}Ҷu6MDI2l"mmS쓍S-˙`vtjmͦTuEÅONzue 09O?z.%qPh wwwY8v6JJλk&u~]~Сިˉ@e. "?Lj8b:pS\\LDbC9\ܗ,YBD7nW,^8KuF{WQy"xWR]= {ヲi.{4]jU}}3͛Gw֭[7# .N%'|r۶m}QYYĉ;fFNNK=>Smmm_W˭ 0`ĝwcUVVZjϞ=ӧOj]e)3<3 !w; ; ; ; ; ; ; ; ; ; ݀+_eiGijj$)KaՕ?ܭAPe*L2<p7iK,ΛggŸq>foqggghȊnd+Ussskkpb֭yyyC==3۶_Ȫ'xbΜ9K.~n ~z+Va{կ~nϥ-F\\\\\\n ĉʆ7tp7bܸqEEE W1cx{M˒Ivww?#s'OkjjZ-[0^yK(eÈ1~x_z?={Z@&9U ApWZeY4.`/DNL:ײGyĩ"I(A2ٽ{:Uz3˲z!h9\/|wEBpA~"ڹsiG馛h„ g. wiSL!oĉmRg}s=7Tm7|@p92^xA눝QIDATX,/~ "8nC~8l:1c-]p8կ~rrrEX/3.+"J!===,3[Q\\|ѷ]&uadA( -~FDtFu9dGL:7NWTT?߷$/}Kj4\Lzw!ܾMӜ3g=Yl7\M6}_:u*v0}Xzu:kgΜ[XPPb "ZvmȤKD+W[0:::j:_Iu0BdҹovcccQQѪU?C7pC,Nd y޼y}!sw뭷ў={hd?˭(>R[Dt+_l}Y C :rM6_D8M2([oΛ7G€eҹ7n$+V<߷|[lAn+\Lz6JiʝUU00>D)aф ;%Y C я~~w̘1>Fz0 #C/?$7`xeҹD4{쬵.kh'O,--u u]׉hܹh0\rg]zED)Dۯܹh=\T>02ɓ'ϝ;wiHD .,))å7qĉ}{}gdHI}ڵۼys+]G g"k_3e. ˲I&[N7-ܦeɤgă>( / b I:/ҧ>Dxmڴon$ L&s?yYw} zo1\Lzm?|AjrҥK~wuueMnR#󎩭W^WWG;ˤ. ;hF-Ztȑ{L,5.ˀ{ĉD~~ɓ'Q"p~Eer:5_(C$xh4q_u-Z}0TG%KrJ/^02頦+WZ#wP=+rYGoGU+vҤIDTSSӯx0c=zj"?q,m;Jws\o~s_ \{4ؤIA }}3 _pX~ѻi S29lApi2'N9i(=pʤgoXXXػ}wj6.ݽQ sG;w>cVڳgӳZ:4!k$ +X&[YYo߾G}t̘1=zoP4 Lz?Ν;)S1c|ӟ^nڵkqCssf~#F\\\\\\\\\\\\\\\\\\\`H⋌UV wC\\\ર|rرcs]w/]?i:p/ˌ.\{'om^~^UU%rii_Ϫz?SW1oa8EOӯmۗ"7n֬Yj$ٸq?~-$ r"믿/755mݺ1v;%O<ĝw޹aÆp8}|+2׻v5kO~-[(qIk.^ԩShݺuk׮uJ}?^XXo?ιm6lXj?epwȗ9s/| Tjٲe=إT3gرcR6W---x<˗//,,ַEDhѢ Z|1cL8?>- ~WZZZ\\ nοw; *lٲm۶\0 .Y駟^~ᅯꌱ+W,δ/_aÆŋB!MӖ,YO{P( #ºu777G"x`}_v_{D"77wٲe7n|g0O ,_|ڵk֬Y|p#..80..............? 6GIENDB`metafor/man/figures/selmodel-stepfun-fixed.png0000644000176200001440000014052413750017361021230 0ustar liggesusersPNG  IHDRC IDATxg\Tgk@4 ",&($vFuM15]5]WmܖM0.&FcԸHQ0v:0p603_>9/?ΜBV ; 4 и2@; 4 и2@; 4 и2@; 4 иޫ_Zx^P(ڶm1sν{vvvnnnzZr'O!C(jռy:7J _~]3f4rΝg)O紸xƍaaa[k۶wQ^^Up?V<ڐۨ.}uٳ'88X[Ǐ߼yS3r۷o=fٳgM[L]zO>B<{޼yzѣSN奥/!!_Eko>7nHվ}{ԨQ:7ݽ{w!*ԩS}뭷\\\n޼7Vuu|󡯷YTTf#_|4^DiOn1_#_%%%jɓҏ3f̐qI|@@@nn̝;w*ۦM8_Ovuu//۷oK 9sQQQj kkxZ6mTtww}wi wޭ~ T)xȐ!Uv$V24QtأHVR'O|ĉs(8==׷ ~PkTߑ./x]z9r̘1UF% ^z-=-mrۜиqߵk̙3jZ:h2oռWXQ1,=*b M1ƍӼ^BiaÆ 6lHqq^Fu]/_ڵk]||@>}M;wF233T_ @_usZ+VT?"h֬޽{u|XСCYYWcԺ̐ޞϗ~}?ի׹s*.\RR"xW!F_ݾ};55U5FV׮]_J{5 K6mV]uʕcǎI3˪T*mڝD\ xų%n4iÇq_8::~S= UYX}֨aÆsΝ9sٳg?믿=zÇB?OW\dzS*yyyjZ1x`ס>_"hٲNn/Sh{3o|r!D\\܋/x!?OҞܮ m84E:~B={Vi޽+ݨxΝ4iF7HӫWr7.ٷoߴi48;;[YYvO;N =zڢE* hk׮]IG璺O?iFשHuc 4 BϚ5ҥKJryyyI5-pvvܹsV /mO=`٥K:\~=..nŊRޥK;gH{rv6qONNRݲe*RB>>.]tҗ_~y޽wi>ڵk=:W߯9ѪUVZ;vj9ʥ 0O}{jyI3IɈgH{r #m'Sj&|~Ye:u |GU6۫WM6͘1CsbiʧFmݺܹs۶mfrssNt|q$??_S;4{ٲe4{2}4;HAew54HƏ?tPɑf 1xyy=KR痜t)F qqq2eʺuV\9bi)%%Eף`tC ٱcǢE(wuu-**uRc Χ0Cz{J4 !D``fiOn䶹q7*u4ywquM'O4S&Uԭ[7M]~]ݽaÆ⧟~| In߾}5+&h׮4#] ʕ+qw5R*W!MK ԯ Ity@ttyN{aj|mll4s(޽{?j|8p/x~@HODDDT~i mr@hMJAwZT*yxxYFTV{~moo&M1֭[/|ͱcǶmgĉW\ܫ-Ϟ=K/5jԨ]v~a^^+"-}vi+WjJɓ󎗗Mƍo]KSehG.t)L紺6o<88xΜ9}+877wƌݺustt|F} TԸ:_L)4$%%U~ikDnKmr[= ˩ и2@; 4 и2@; 4 и2@; 4 и2@@S`֢_n*@~ d*-j5^zi͚5KJJ:}uL]tBЅf;GkP(wn* >}U@Wd;]as; 4 и2`ƍ7o˒Jrҥ~~~˗//++3tyz ]T*l٢˒jz/V*#F(,,\`ɓ ]!įqOMM:tŋuY;v IJJ bF{6m^yÇ|ttbժUB+W !bbb V#/fԸܹsϞ={iѢ.ŹkFBCCoOl}1 01B?~jujjj.]5>>>4v3jܟJ~~~qqkqWW~b͛wޭ}:+~Ϟ=|0;ԛd*<'WV*/ߠq`ڸKc''';;Kxxx4lИtQHttt=<P#[8 !=*UA1ɵqW(oVTARiz5k&())IHHt `nn$P+Jn7~P\w!İa=ztȩSjb"""#V/,Ye[#}?LM6{QQѝ;wRSS5#&LB,\P:0T*/^72{{W_}Y3lW4tɦq_|||*q ;vz1mڴ>>>~ڵ3aݑ;4)%Kdeemݺy՛7o6u]#@s Pqp?4 и2@; 8ܭYf/Ӹq $>ؔp֭Mf`his?F( ]&N8q:ѣGvvŨq߿]>|X:0+d;Ȕ=&&wB4lƈ% lڸ\~۷1ku0dA|im6lhRF|im'N?0 XK9/sNhhhxx /P(*.ШQ#×'KkV_P `d;ȗW_5f# @6[l1f# @~9Ҕ"CW0䥶ƽ ""kٲeF_d{(S3K[?eCS i=U088ڵkC 8s… wy;;;c xvd{e}]Ś/t1t1q_dɵkf͚W_ƦL6˟~1^v}u_XaJi=~СUVUaڵ駟"""R!@ozhtҗ|^|z͛ݺura!uoܸaG׏C֦.ڸ{{{kK7nx{{" _ZГ'OnذUjBCCR@v/GDDM6-222,,YfW\iݺ5'A _Zw77SN-Y$**ҠI,Yj zC|im܅-[ܸq;wRSS[hmccczGLUj>,ݻ'O4-ZhѢTlԨR>p@!DrrSkVbl`*5BI&"^`*5/^޲eыA<?.))񮢢<0KkS]K.5XIC!@Njn={%%%qqq%9,@7܎qW_}ՀEl Pq߳gt^_"M"VcXXK j92xW^yeȐ!F/ gd;X_N=p!C>|x ۓR@v/{YY5kGi̙Ӯ]ٳgT*'Kkg}ǚo߾k֬6Fu"@6?N81f<ضmM6<> _Z޾x z} ?KkުUxWJJJ^Rgkk|򲲲W)((X`AΝv^TTb _Z>}>|866xlll|||Ϟ=[Z?~ŋJ# ,X0yZV)-- [|y ^{5J駟0\ @ZdfflR//_<**jժU#FB߿_ۊs9!DHHHqqZ.(( B$&&j[eBST*Z]^^ !nݪ\\\^x!ʕ+ qwӧ je{꼕ɣ<{`|fZ7io֏?8 &̙3g߾}C=v옔z$Mej*[[[!ʕ+111V&!5kBBXYY͜9S3lڸ !|||>>0`/l$22RѿC liP}=Zd7|#سgݗ_~b ???=_\\Ze~+//3gLJJvɮ]?رc3fX9s/qaBڡClڸgee%&&B8;;ƞ8q"!!K_EH<;99T\77kYYYu矅AAAVVu شiѣGk_@I~ңVZݻw ۅ MkhѢ{Ir޽{/[Lu>FJJJ2Z}"@q KLLׯ/pvv=qDBB6"Ba**^.vCˉٛWץ ^AMfN4dE,#-JLL\z4re˖B֭[[YوǛ Ol7k?H.r7888006|mΝ;_z.3g^ry! ɴwJUb魔o$Fk㞙ٷoԩӮ] YرcǎM]E%....S# Gakc׹SP^nZX&'`;wNRU+11Q@^v/ï\2{Ҋ 4|m=#@6ٳڵk}}}M&ؼys>}&Ln"A|immll9b J+8x,Xp)# 䫶+͝;wܹyyytss3ZeC @tѣgΜ)))1tM# @^6*jӦM#G ~m۶=ztϞ=۴i`"τl PTJ5rHAI wrr8qbVVwwww xJd;XJGܿÇgffN2E\~o]n]LL?'OHGeԸGFF:;;4iDˋԩ/,͛7 d;XJ[z]Rǎ+//8qm۶MNN6j"2Tjӛ6mZq?mllPّ`*5[wGJ~77.]T\6N}gDeԸw9>>ƍҏLJJ۷-v;woިelPq7o^yyy###wG !|Mo~ӧO7r!2TǽgϞ+V?/ 8pȑB>}\rETN0o߾&(v |޽{AAAƍkР<)))88wߝ8q)Jj. 2'DdlqBL d;GYqdw@hԸ'''gggKoݺkDeԸw1""Bwߙ$>`*_7kIqѲ2mk~f/55_5 }lˠP՚?*J5+h\]]srrL] ;~v}7x;ҥKQQQ֭3u!5#۫0l/[ƲM ^߬5u-j`^?lذ[n}Ξ={ذa fwiܯ_nlР-[lٲbܸqC}饗LP`///7.Fq m Tm5mۦ]ZZִiS{{{Typvv66lh@d;WmDDDx{{y{{;::zyy-[hlzĽ088ڵkC 8s… wy;;;c xvd;ȗ#K,vڬYRRRؔiӦ]|O?5f @6 Xjfaڵ;v駟R@v/͛7uP([[[wƍ. d;ȗ[[߸q[(ʥK-_kiaaaNNN-Z7nݻw^X Kkz 6TZ^~}BBBhh~PǏ_xR1bDaa &O\Z[n:t˗ СCo&$$$++K @ZdeenZѵkשS.]tԩ]tBn:;;[ۊs9!DHHHqqZ.(( B$&&j[%77m۶iiiȆ fWU...o߾BpSŋӧO7u:!f%r͏L]ak=vԩ{ڵk6lXhц ~I&W^om`1v/§5*--sNjjj-mll QoNNÇwwm^xUBCC^ikhS~9dSҥKQQQ֭3u!Ol7uWt᷌e6 YkZ ]4lllڵk׮];VSSStIv!OzzڶmP({{_|pu @vn܍ ??G[yyyЗ?|C#F5㯿ztt.W+---((}:?|嗱/[UlׅZY:w} [45u!2]ɩʸJussrWffJ=p@^޽;{;v/^Ν~u^ji3ggg!DZZZZZkK,1e kgeo'juf7e G 6uw#kܸ8Pq<,,ĉ%%%;tOOOBq%i>!D~~~6mJKK?~\"`&AaڵK)ݻ6mBK_Yi:O(zl**?x,3a.c^6u-aw''';;ǏWNNG54ibeeնm[MK ݳgݻw [4 k֬YSNI;:ur }%TUղL̔ IPxzz޾}[RiJeJJgX[[7iҤBRU ebذa=pfԩSCնJhh7=zQT}6m [.@d;QSeڶmnc&Lذa… oeeT*/!M0AZ(##ƦE￿k׮)S_Uk֬ILL}ǎ;vЌc͚5:t0Jy}"@6B^z… III[ R(F0O_}U}QFF~x:d*888(((--iӦ) 0OVVZȔ)SL[Iu˖-[`< G5ׯ/޽V E֭3f888D|teo6u!fee2q/,, v횇ǐ!C<==322Μ9p;w9spASWQȑ#g* d;ȗ ,YrڵYf}W)))ӦM|~j*zA|im:jժ:88]cǎ?QqyfnݪO2`mmݽ{7n0 _Zwooom ~ oooCU0Kkz 6j͠Z^~}BBBhhQuViӦEFF5k,###>>ʕ+[0f @6nnnNZdITT˗AkkI&-YXlL-[ܸq;wRSS[hmccczGLָKlllmmm6mJe @v~9UQPPmgglٲB/dJw. lzĝb!@(((HV_p˻tboo믿BSruu1u@Fo>SWazuر jFХK֭[gld{-NmX97};?CxfZsYl@.Znm,իWKJJL]ސ1U_M2Ž 1cL]ej׮-!jU͟Eοvy`Lk_lذaʔ)c3j?OHHU!:ؼK@v [ F0`dZw. lڸsYl>FJJJ{BUV !V\)so~ܹޫCLimܳ6n1zhi966w)))#..588X3W̜6mСCǎߒ _ZE%&&^Z 9~xvveXZNMMm۶f'==uLT* Bǒ" _Z׿{O>Jhz,"??յʸoV^^^1+ڵk׮]"##[jzR0ٹ5u)hjegk2hm333[㡎N:ڵKEH=rrr2Rrssܪѣ)S߿~W>rH'tc`vAÿUTҠG |3q?wJz:MbbbXt<ɓ'U EЗ| ٹq*!nj}4u5(K.+W40_Z_Ç_dٳ?쳊 scNNNvvv?2Q5~۷Ovڽ{ݻ׾_y"njoՁB2u!VJҍЁZ={ !Zj5|p!ĠAz-/((жbxxxkFJKKvZk׮?\\\ŋӧO7u:!d;){)ɣJf ]w#G|ׯBHW yRU^vq0;!Cth]@w@hwJuVZ& ;qԩS=<~ SZ7zxx=ZtvvݻwJJ* qHLL\hQbbիǏggg/[Xlzݻ>BQq<888006|m=#@q N:*A _ZwsΩT5\h711} `d;ȗ}W\={viii脄6 _ZsڵӦMBl޼O>&L7b @~9ȑ#ccctww_`yX$@?vU͋;j**o颦ovT;;sΝ;7//޽{nnnF `d;IwSPn>vq2u!fGko !;udĪAp}5uy V7G !PqO?^z?~ر, `d; EkN~ S`ִ~9u۶m#G<_OOQFݻ,y!@6ƍۻwÇÇ㏯)SN>Vq899[w~իWv>ޑ G*SQF铝v[n, #S4 IDATu4j~ۻw޽{ϝ;'8رcR@v#ʜ8qbΜ9~~~;w^p73#333#GRtR___[[[??˗վʓ'OΝ۵kWGG:|"@q B8;;ӟ dggg:jwѺu#F} ٮ&3Vz?=ySrrM]9f{Se>|ᢢ"!ĦMT*h߿VB888\RmVVV˖-bҤIݺuS\\@v 8P:Щ.#..588X3~mܸqM6M4ь({ݻ-lP!bҤIF+BVvZ3hkk㓞m8GGNJ#eee֭[6` +d;XJŋ5lb"󋋋]]]oC_#00*?y뭷 X. A2?JHT:;;뫈!DhT*Unn[[HMM2e޽{իWO/SXX˦@.vA-kdd{WKFFF>|P_EHc)*??_P~ΥJڸq |yY%@/v/GO>ݵk׏?Xq֭^Boo7nѪÇ5w\|XѦM^z)**Hlڸ7i$++KݵkWJuqGs:^ _ZุrKP5^.`v/E_yAx7￿F,d;ȗ/%$$!,Y{nR9`# 䫶 0knҥm~ɓ'J(lԸyQ:;;;;;;i jkflP)6l\dlPq`yv m۶`>QRݿ?77UV|u ,uw!ģGNյk5*))hlڸgeemܸcҠslll޽SRRU!@ov/{DDDbbEW^- ?~<;;{ٲeƪ7d;ȗsw'(?k{fff```dt)--͐U lڸ;wNRU+11} `d;ȗ}W\={viii脄6 _ZsڵӦMBl޼O>&L7b @~9ȑ#ccctww_`yX$@?vt6:q{w ?欶+͝;wܹyyytss3ZeC -+{[P4AwTzí qpvvԩDz͛7Kdl`n\Ν|ad;9qP?t~3hC@-Pqsۍ({jjX{hhX' @(;4и @(;4и `GFOBBBjuXX؜9s> v0{i%I9r5M L2f&^'Nlܸ1:::55uͩ)))f`Md;4B j!k֬1,k"qOJJҏxxxl߾݌l3]PGGGZ}fXd"//ۻ¸sZm7y _Q4ZֈU qBxzzVt999>>>>K>ydӸX<Jd;]4Ε yyy*r6KÆ 6lX=zTlc===]]]߿_a<;;,l]R_|Y5MZZfXeo߾L}8 v0#{iG%:uEL>]?.(**zjzzll3SCݴiӳ>۱c>}z̘1͛7'8tP=ڶm믿9 v0#{R֬Y3cƌUVyyy-\pŊ`Md;J$[`vO?ٺ ԩSW^d Qvưl=A(;rUԬY=z8ٳg P;;;+})JJJKQZZh4OVZZڿ딄GG+NHڱdmqrL4W^i߾ y$SN}:udB'|ݭ[7[H.\ޯ_?[H-[7x`[HKWWW_}օvAݶ8TPw@hqPw@hqPw@l]@mc*Uf|}}m]ţj֬O-x7,ZjjGcnl׏fAGvqAfͼS\\Ѷm[3bi&ZapB .XjbܹS~>}̜9nRtܹ^zV)(&,EϞ=ׯ_׏;wNK/Yjmݺu˖-[l 0&piGGGvHTo6,fZIIIIIIIGhr x;h4/BRY*jӧO3gξ}RoL[ +/B$ݺup߿Yvu4d{ёd8ٺW\\,RsZmY,ʹ"""tnvv,X&؄+W6nejw ߿;Æ wuuxѕ^~Ç۴isĉO0᭷޲Jf`[7LCfAVdgg !*rrr2=zI ZbEPPwTYӖ"33w֭ѣ-^LX No߾/ܹ3''̙3zڸqQt%ڵsppسgϒ%K߯V###Bvu4dl7lŬVϯ0RńY,QJt?|k׮hذk5̴xw V\i/Re&,\JڲeK^Zjyf__EId+0>}oK/9s&??ɓ=zx,X`nl'͋l'ۭƽ:߯0lY,޽۷ow}N:? 4a)vڵaÆs[ƚ 4ppp kݺu牉͵Ousmٲ 鈈HHHhٌ֬3*ǥ}í!v"vqJ|Nj44sbiTTTԯ_;w0 %%m2a)Ο?/?~L6MѣGJvZT^ KؠA;zxx!JKK-W!&,EjjjYYYllΫQхŋ6;ܺal7/l733ɓ#GnjX %͚5_~yۺuZDZ&,ݻCMɧ.3& =RtVZ988l߾]$6mBtWOV֯"nlU͕dԾlqN:t}ƍkӦMJmq{إHMMBԭ[7*يX!MX VۣG!DPP/ܾ}{!DHHHff-@LZ'O !:w ++иv|rǎ^x?!!}'N04!C[n-?yf!ī*HMMҥ˶mڵkꫯz{{7?;wL(8666..pȐ!u]palllff  ; u6l(ڵk^^d;wZ?i5[n?Õ+WXVҥKΝGKJJF)ҥʕ+食EB{Pے%K5j4w\!mXXu: O 3'[XD```˖-ˏojjj&MG|:uC Yvw}'ϸyfggaÆɓuMQXXx̙C\Ҵrssw֭mڴonܸѤIӞj79wNF5j$~zvvvXX~|ذa_gϞ};vG,''gƌwNIIqpphݺuӦMSRRG$ׯ !+O 9wNF] Iƍ7\]]nݺ7n//SLYhKҫsbuX=o!ž?䶍I#!y3˂ ֭+ yц /^l'ܲe 2ڼAOuʕGySN#_~vv~r6kg f<m qyg?|ԩS ʖ,Yҵk'xA=܂ 4Mi233|͐ 4͛͛Fjٲe:u~ &ҥ|Znnܰax;vٳqjiӦ 8pP޽_y T*/:u$w-yCȡCݻ#W\1~ՙ">аc{Z^vv{ӏO0aРAF |GQQQM40`' }N\W5kjvv~غukOOϨsm'N?W?_|! ڴ'mrێ?o7!Z.))$ &Wͷm6''GիM6-hfiӦ/_'x>@zjIysttܷo$IOW۶m[N+Wٳ~my#F3V*͘F{ypҤI$%''ÇKt9q_u[1+?Ƭ*-zw]\\lcBo<ˀ6nܸxbU嗮+ʘwMYuww0agyFP; ؄Y6ON,*--$LS۲eKcA26Mnw7}ǎK.$I6߯иk4y+[nVV$r999뿉 o+{Λ7O_̈#o{jذa4w۷o߾}WX!;:: !yymUh4N:I|r!o#44T*C7rՙP1+?5c*Λ7.!DÆ ;c^݄ eee?`仦nڴI&##C.Eտk裏{$Il3Tv%%%r8ݾDGG;]ʗwʕ!Weee/_xWz?b/?IIIŹɷxsΫW~;u$_YVZz9#999u]qR?O.]w<ܥKaܪ[~}raT VH5c*9;;O<9##СCs2dH ܹ3dȐ .8˥K$IBKs(;w3gΜ9ӪU+#׌1Zu&hРG\/,ߕ!C#=]=rrr}ݻCǎ+?͵k5*?{UI?iР$o߮\Ikm۶mJz93!!_Gׯߎ;&ӹsg<Ƭ}׮]skd V5i5 7qSNi4o677W7ಲ2>^^^G̘wEqBWҀ={ͳu)))III͓֭[?SҞܮmm9"7W\OnԨQRB4iDQtF```@@@A!Ľ{'EÆ ';埼tʕC^v- `ǎ̬ ׳gOƮ]RRR\]]۴i#$ӧO !z%Oc̪3VH5c>1uTSn䓡w1ݩqyw>ԪB`sfh߾3gX䗫^ӦM|I!.IR]\\3WZ%<0fM:|ݿjR1=,88ԩSN׿uiӦ NɃ۷oř={vƍ7nV^.c޵W`̻y?P|x%GnW䶥T VVTI+lj*Ӳem#^رg}6aj 1%ԩjժcǎ]V>C'77W*wH^^7_}͚5_~|A!Di6o,k9(Ƽ\5w}W&N(IR6lXBZƬ-ØRINƬ*ɇ !ի;,Yd===Ҍyu _w7n6mEEEU~N\WƼkU~VKM6l\L]!"""&=Mn*4K,чׁ|%k׮>ISRRyׯ\>,vڥ? Iu۶mUnTo.yHppN$̙3'0Q԰yfIJJJx9Ƭ-Øbk\?Ƽݽ{7<<" !\\\O^㫛P+cy*?'U^w?P"sm9s={vLK{_JDnWBn?"w۫иrrr*((j%Ih4ccc}}}ׯh"FS_ꫯ5hР.]3/^:thhhZ~7Μ9P/]tSN}_|QrÆ 4ק/Wjk~}?'UPPf Qaaas)++tyqh4_~1SJ4rӧk4N2e̘1vxtvԸ'&&_5f'Nlܸ1:::55uͩ\ bG{f^| ,PBww !֬Ycls}ӦM[lٲeK@@1'%%y{{GEEGbbb<<cNN 砪!R` /}u@vT-Vf5-=xV8_G;:(Jmܳ===u:]NNOp̙꧹tRBhsyh܁tqB)IH5UOgj%ZЍwvwD+ !ĕuEG?&??x^^J 4hРAO_57H?S=7vi; `7Kk;)kvurCtuu|dvv5wm\ש=~P-0vZB'\ۺLj]e{S7G!7mfnΚ.+{~25Bu<=RwJeIh_s=!DRz `.OeIZ!D]z.bo (J34V@,I)"v]ѷo̓'OG9קO3o丠ے_s$Ifqغǎ]eg=?ULYJ"qu_an.Di܋^5jbԩF3}t8UtѣgJ(HelwuR T;vJ(q?tPppp=.Cݹs>;~~z߾}cƌi޼ l)qLRYfƌYYYVZp+l]td;bLII<ؽ{w\\\Rtd;<"q4и @(;xUuW?G';zX$B[?d&I'Qfzx(4r*N3fbg}-]ČkĎ*Gâq7-d;U/ӕZ$z. CA8tJQݜb|Zlu<۷'$$ܽ{G-SvP(5k^{5!KX eq?֭[vIZP\㞚:rݻPk\;;;[\7xc֭ݳf5"@ >}W=3*uԱ|ys"@ 6$ݻwoذaUN IŪXeq8p5Xeq/Y vP.'WZZVTTdjVC.(((Xtk׮IRǎ;awwwX[ek]-l*!ԍ- PL]/(˷\1 B2gUx֭+HWVZcQ󥰰0**ٳ{sѣGNiӦGD>2!{o$$$b%,lx9ȯZ_~|y%9j+rʕ+WV9-Xv*6[lo,_DX0j~wK^^|޽{[$P <9uΝ{{6lHMMJa#@ 6eee}QÆ {=|мyI&t:0'`rʹsFFF~W]v]hQ||5eqϛ6mz{{>*̉l2ظ_twss0ԩS .X0\ƍ߼yʇ^FOV͙3Y LҪU+6m05vP.{Ν ㉉Сy$iȑӧOh4/,,2eʘ1c466vΜ9NNNt:̙3wj[d;(W͝;w׮]/ /痙ym۶կ_ܹĉ7n޳gZ.,,ܹs||~^,'ƍCRtѣG?5jyځl2ǽA>|;>QF}۶mӧ5jd:K,X@V !ϟ/XfYO8QR !}82`.^nݻw>_O7nؾ}{-^GRRwTT~$&&cfB89K^d;(TuǧQF-Zh۶w$IJOO utt۷o{B/B$+WBtE@mB<]9cƌggg !lk޼yaaaf,"//ۻ¸sZm{SSS?Çiĉ0a[oeVm*thGUjBmɜeR l[Td;(=+++66aaa?7|#JLLi_.{tt:u4k֬_*B_aw:ݻS v{N{.54,օnum%IO'/[W$dleqo###?c~BJ IDATy/xzzV>(;;ٹ,smٲ """Zh1cƌ~N:տhzիW4V<|2UT4Ҕl]i*Hva77U[˖-oݺe"T*˗u:~PѤW9KjjjYYYlll+݌@Arl[hqرiw'|Ҽu733ɓ#Gӧ郃7nܨ0.ڄl2ظ̙3&M*---?ܣG!ԩS?'fq!DQQիWZھ}{RRI6oO?uСnݺ-j1}Ν;/^y戈!Ċ+.\xС-ZřȡCnڴgرO>=f̘͛:tGm۶_*j͚5:u۷oΝ~g//իW65vP.{]\\~ytD!ݻSRRLrwww!3VZp+VT3KDDDJJʨQ222߿?vؔpU0NyPgeeƞ?>,,o^^^LNN槧!r{-n-N+˽D-S7Wj% 6ܹ?9J>!7W}u>KcB=g>iΟ?pBy0::ݛ5k*qi`W)] /hgrOXko7E[N@LJEi%(PyZӺv6@%2oU*U񨨨={X6صCg~q׋Zku;vrOW*n9 7Fk4O,#Ӣ_<³#-IաCe:(޳. ܫ;dphرc:]ryBrlw̙I&ONNѣkeq裏:tx␐ !VXѹsQFh"..ΊẼl2ظOtBݻwL2ȑ#V,`d;(Wu:yɓ'^~j,l2t]///{=z+@m@Tlu:g}6`իWgΜ:hР:4k,99ٺE tG=Sܶm[\\oaÆի׷AD@͛7'&&/##wޑ.]*믗,Yf͚ P;<и\k͚5 4Grssײe^xAyYlP jK.uԩ^zk7xC؎W\jSP;<и߾}'(?Wѭ[...ݳBqGG@@xu]N}v֭[ N}GD@@ުU}]pA{Ԯ]:8o_~իO>Ul?jݺu[reBB{'xW\|7B/rӐP;;v<ңG!:w|F3jԨ]ڠX#vpp={_9b'''!VMMMzx [ 0@]Qa(JWE^%J/v @A(;4и @(+Wݻ'߾tRNN-JSO=5{lvXX͛mQv{׭[װaCOOO!޽{ [oY:##vxq{'O,]nݺu I"P;<и;o߾.]$k׮&M۷*S5jԨbĈ}ҥ @]oڵۥnz'ܬR"vP^PP0{젠 WWנ MΚ5j̋l2ǽ0**ٳ{sѣGNiӦGZP#@ q1cٳg'NW_%&&?3gδf @ 6?C۶m,Xtww_xSO=k.0'`~vکT ㎎۷pႅ eq 2.\ 2{)O> Qaaas̩v0bĈk׮05vP.{LLÇ-[&I~PK&''ĘIF9}tFӿ)S3VZէOӧOw=<<|YYY j UefϞ4~+W6lΝ;;sL``ٳ[lj'6ngZ]XXعs?0<wV-2V{رC?>lذxc~iʟ[{oשd[Wa2J @ (JɴutQ}S7]tC7J'ktvEh;devvӳ¸NPFFN۷o_HHΝ;;vxڵI&mܸEӧOES#EEE037Gw!D&;GmZ% ۫WE%Iⷻ[Rюߋܮ}}R[WQGT& qWSTC_!zJڲeK֭Zڼysf-Z4mڴ?/RI&MdLao6h.D\\:y?o*knnWĭ?Wx#OkO{`aAZBsTmZЪ-*Ƴ}}}+ҠAP9e˖k׮5mԲE4``"Fȳ%`i8)/X IR_|Y5MZZ8::6hРBR{+d;]4B}fff}%&&… ;G?~ͭYf-`CeBCCҥKfcԨQ˖-:u4|ҨQ ܹ ;6!!wV%IZhnjSJ"vxqW^nJOOn߾}=!Dn݂[GddСC7mvqO3fÇѣG۶mWy[n=zشi/ҡCǏ̙3ǼP;hVLWQd޷oM6_~نs3:nݺugϞ5k$Ca.=ۚwk\f3m{SZ;lb@\ЕS,6d])$$$$$iA44ٳMII1O$ SF>lԩB͛[.lv/`JHHpqqIMM4’)@q&M4l0Ke{pp%̍n2:9r߾}߷dY _^*SVW!~뭷.]x{UVc빻[(# `j ;vctfI>}tk 駟Z+LvG !zR5 @v!DNNc_YY>ܻu&prr&~E׼&^]] l֬F}U͛7odnPkp8qr!jUo߾ݾ}W_}n-[L2ȑ#/Ys`jU֭[CM}L6K.P(7njEZ]233sUǢE111 ŠW[F|=tu}GBÇjj…󎫫9 ~ Ù3gΔ:ۇycFFƅ ܶmێ;J2k_EEEM4ϟB 駟òe˄G0U˖-%7ŋoSbt;b¾yk>v[7{*v vїܻw𵧧ghhׯ_"""<ϝ;KOO1cFHH~SN;Ih ޿:ӟBCC/P(lٲdɒ͛7{zz&$$lܸQڟO9K RxѶm !t:Ν;߾}m۶ ǏlT+PT!\t)))iڵ8]6qѷ?U{GoVNs[i,O)vgߟ۷oߜ_ 7n\~~ _F?Uf6lݿ_ 2䣏> d>v/g܅}=vNh4|Ɂhv;d]5kܼy"EzLxxc7R(999۷7,^xѼR>+++gle3>܏=*ݻKYYY{1@"t;؇Z{LL"''ã"r`j ݺuB899 !OnDI`j 5?LO?xv|c_~ԩR'X6Δ믿$"l); cpdw@9 Mڵk o[+Lv7Z+LvYfY+Lv9w@`pdw@5V}vIIIvT*2̍ni{aa9s;t~QFegg[,@Zt;Ȕ(::zÆ ޣG/zzz;77R @+V\hQfffBB~1**ɓ[|$C|}ݻ#""{=BQsGǎ36v/g4^hh;w̙ `t;ȗK.Νj٩S'setpr󫪪j'''={6&&@{vqܹB7gڴi]t`H4v/oNurr:~u>T!Ç[jpwyՂ!Ҡa#֝+[w)iʩ ,X`nݺedsa]n֎ '>&M BxzzZ0,vXQlG IDATݲc:jt:ke3ENNbtp_tҥK{5y丸VZY2vXK3E϶ )9u֭#Fp~;__QF _F'l۶-66o;{oV3\ Ct;ȗ]c„ w.((ؾ}{zb| G }LM}wޝ;wN>}uX2^r94qqqrd2|oܵkw}ڵkWFo۶mĈGQ˖-رR ^reuuuû-X ,,ͭs3gμ{n c:#4i޽{+**m) V;n8!ߘ1cڴi#:uj!Č34~%11Q1p@RQ]GhZ/9zѣG+**|IjuҞONNBYFT !\]]W^-زe]|rӧO޽?Ji|`j 111111z4Fiii*G}8p.׮] |g + }6֧tM$>}Btg}ѰT*~gc{\NOOwtt3c\Cŋ?S(--TTuU*իW5M7ySΛ7x„ ...f B}09wqqѿ* Z)Ub!Dh=<`@;ˆ[ȑ#% P(|}}oܸj VsssUQQߏ1>kѢ`{w]LLL}ܢ1lذgddDFFWΜ9SZZ:tPc,_뭷֬Yhv@w%$E|{葖h\]];uꤿ~N{n2:/Z7Ӂ?~~!CX0$@t;ȗ7FDD={v˖-B%K޽[VK+V`H4v.l2-[ܻwoYYZVT2Ukpot͚5khCn޼yv t;؇Z;C}5oݺZ9fB}hZ%%%ڵK`v.(,,3gw<8jԨlHn2:EGGoذ{EOO޽{Z*!@2t;ȗ}Ŋ-LHH/FEEu12@:ئ,1@:t;؁ݻw8ДE\`j 0C=((Z9fB}ͩ 02;  02;  V-[ֱcGRrjwXR]M\p3`It;HV .ر#***;;{ΝYYY$$d+{rrb͚5JRzj!Ė-[$`It;HV4JգGJ߾}8 .K@B61t GGGâR 知`It;H!DiiieeJR^hj6ɻq֭h4Mx e{qqãκV-))z]8}tFFF۸4Xt;H&wɕ:륥 ~Kƍ7n\|wvMu֋7o..K@Z61+ __7nhZâZj% -܅Æ +,,3gΔ:T]Dlep6mw՟eQՋ/6 !***n޼]ElͩB/ի׉'._Z>n.ޜ*ˑD}!CAȲeˢ ` O$!!sαD֯_3fky"~IV@vKe`pdw@`pdw@`pd؃ ///kxRN_Y;œv'վ}VZY;œsrrv Xn;vA[BY;FR@`pdw@`pdw@`pdw@`pdqjzٲe;vT*+W|s3!RYYق :wnܸdgggǴ[o>}:,,… 'NYfY$l t$vb8ސb!D]j%%%bnO)//o7n_b1( gϞ=`ӧ=bpZ6==KJJ\2hР;v^1oн{wcǎ]ĉJ2""A6-dnnףۥB#_UziiBx/&bnOI~ǝ:uJII߿ɓ'[nmƬƙv+//OLLz&>BسgϠA<==vsNoo>@Y v-/^믏5ʕ+eee111SLYfK0 NKn-!߯^\\ݼysIv17# 6l޼yG3&šCo߾jժdl G3888?5o߾<ʥ:L8UVn߾]]v.Y~]&|t4t;.-n( __7nhZâZjs3-REEEllG+Xi/"33S1w\,ZHP(nj5p>>**=447|ŃbQӦM qqqܹ̙3,X:|t4t;.-n9ͩ 02;  02;  02;  02;  02;  02;  02;  02; I&)jkHnMapdw@a|||^{aÆyzzo~ر7nh`)S(om E||fFFƘ1cJߘ1c.]{uwwRVVP(^{5ÊZ^ti=~"t;tƍ^z]xqȐ!v튌pႱǎ+ػwoŝ;w !&M$ׯ߾}w>i$J_{ *++>|8v-Z$$$DGGpovuBW6o,1KeegPPVկnܸ/B1p@?Oeee_}Ull~%K>|8++gСCVVVt:׷nB߲'|XT*njؽƎm۶};vGI76yf4ٳg *//9rd]N>JK"t;r9IΛ7R399СC/BPPP{ 4c۶miiiǏwrrүWUUUUUyQZ>vfQQҥKkn0k֬SN}g_~6m2)@ig̡u۷С{!hѢŹsw„ ƙ3gjԱcǗ_~y͚5>|φ t?2lB^{m֬Ym۶2d#JJJ~_ !z+)))FGؿAAA[nݻw vmxjUyܹWTT|EEEӦM;{lHHGnSNh>t믿vdve[IDAT3 02qd3 02;  02;  02;  02;  02;  02; k"R8IENDB`metafor/man/figures/selmodel-negexppow.pdf0000644000176200001440000007306213750017360020451 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102155959) /ModDate (D:20201102155959) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 26208 /Filter /FlateDecode >> stream xˎuu?OM!j皍Ȱ UX:DR%*~1⋕{H/!w˺bԏ_c_ۯJ]U~>?_wSV>^oSU㧿?Տ>vǯjOG~zW]WޭֻzVzޭ_]E_w^uw9U߬nfޭڻU{Vj[wn5߭zZVj[wz"]y]wju-߬ڻU{jVݪ[wn5ޭ|ZVj[wnu[]V_"koz|כov7ͯ}7o|7o}[7o]1^}8 ^uvM_oݯ}k7ͯ7o>~z7}o]1KO7^u7=7Vn_9=Mw=o<~z|כzM_oz?~[z|z@Ozս?|=כz7=igx*z|:N߾7}-m<>zז{oz%莾t}~YۯnGw~Fw-7[_GۯϿ?k\߾IB/t~YBN=;W7օom4~+?[_Cۯ_ h~*~z~B76>R= oQ.!گO4~=?jmƯ_~mkyװ_khZAW7W?oֲOϫ[~.xkܿn}~?D|.`'zt}?]O?7G*DA}kt>sjA7FGum?oƯ_tkt:_uGOFsO·[OtG_ywO;z7Z~M~{httn?a?[խ'rktpk_߷FdW7꿟qޡ {]=u}Hz8U~˭~O}k{f? 8~n?>gO[&?,?O{~mB׭'4_V.F\}z;t׷{]q ~vϿSzA˯ _hy8~|i/{_ۺCKgo %eh@Çn[:oWb=C/oCi}{>ޥGU:94~8~y/vϿoj<zJy&wy[zٯO4~[~C[/ ?Oj|K:wcE#k]:[ZkzO߷5.k~!x)G~:O=Ny)H|]Z㭷te2W?~0}N0?1 Wi7=4&.[Z=η[߻.{]u>zJWz{>ޥK~ok|:߾4_ |﷾/qO;4-Һ~wS:סoh>F*q4:o>:˯ր}ϿӯqC~y 5|KuUwcíߏt+?K%[)]{]z90 խgE|iݏڥuU~;6au[~.H+]oj}%>tN')[c}Ņ[ZO\ӏX{ ZQtiޡSnT&˭ӯ7Rҹ~V>ʉ\iV.r!(zЍǃVooi=+]K~#'QhhI B]-%5ɭ>mk=c /=&+㗘H|tN狉hZйZ7-Ԍv1B] "/.%m /i>%?Bǎmc}ز1:n=unr&W\U~EOï/iwK멡;ae=߇>7?7S~֗4kɯo5>[~n~7iWNFyϿWN=ul ϿΏQ{"s4M#or?>]~.?C_:wLfJ_x~׷^_bRK:~PWt|C?t~ץ:\\~K::_C߰G%]-}~.<ĬoJh/Bf}:}!K%SNu–:X˯.?B_f.,unQᷧOC+bbڕxPоi_ }9-[yŴ2;B_¥t'CZP78V`L?柡cC_/o܌V,cC˕ +l?CxЇ:hhˡӏF+ZߏD*6vG,[~r!5t>$z|YwӏW9c98Qn΁e,{V˨Ή\,.y{)?{ʏ8_yeo~vкϝm̯΁}n '49pzH+ZvUC_Wn6^7BX=5XoCzuUgBt+\8m>+9&:oBOb~ԧ)B7ʁHlz ]8(m|C_Z_C3^qzi tM'op>T?үq~:*xF\3Kw: ^iMuQ_}EBNC^'&bYfr'4\H ~{BD75 ~~M~Q] !:X_~/JyzKW>o Qc[{N.:0~~Д~BRāMzJׅQЗt\( U0t o]Z wGe)]+:ʅUS-߳N:Z#w.Lf.t caJyN-JuQ:'62 Tnn-'Yv;G}R,_[.Ɣe=:t[vЌb<[__c=(^-tQHK~쇇l|66 !FBi=>>O~^4? =uMoM~]~Gw#~N?bJڿ߭~OύX`:1PqƟX 'ҭOhwbcK@i}Q~h)]ӏ؈:ɏsll_,1繑*=+zKwFXq+O]#tGnLKw}, ݁EOLB'&zx9 u|Htӏ[zJwM? M;|#?ou<~_̏?o_~;?GM 'ϿO?N?ę_WǪķW?ձF7WZpoO0{o΋LU_[.GW˧#j_9^ ϧ|us_|>ձS|_p>Ӿ>/8C qۗ%.u/}{̕aO?~_}//T?a~ǏӿN4yHē%~ WJI>RJrRۥ_)%9:vJ N)anN)adfRB݅SJ)%ݩt RқS7RҩqJINQJI7MJI/)%J)%@R] vq>RXuJIjuJIH))%mAJIIJIRJS~H)iѺRҚ_)%+)%:C)%RJZqJRJ)mRJ?RJ*TH)PN)";B8B9UzN)RJpʊRJ*UN)OJRJjJ'yLJIu )%r?'DRR 8I)):DOJIvJIvJWٝRR|$Lp(L(Dà'Tc┒߇r;TN)ѰI)))%Q,RJ#@)%YRT.KJIR}JRi|Z) J)]RJbQ_uRH#$2d1SLҏT RJB|ТNR̔RJ9PJIhQpJ) Tĉ&/)%IMLt#$`הТRpDAae3z(D*$o4 t|:$4)9K8E)"YN(o*UJIT:ssL) jC)J)ICU~R+լvP%$QJ)Iخï8G)%Ay\N%Ҥ MM$q[ZRJQ&"I8H7mUXt6>RTSInBD]+$QJ)IJĩ%{%%%r^RJBQ7xR锒HHhc>OJI³~9E)%AL=&)*MBQJIP(ǩ%Sũ%{Q* jк?*$t;_4:$(]J)Iީ$75"$REL+y<ќZHJIs)%Ax+9`tRҜ:FJIRa RTDOiH(yRJBzWJI{RkAgФdz%$RRi8d/< ʑ)%qL)$tNE)%{R?H-&$.a~) C~I)Vo&eRJⶡA)%%DJIvKJIRR=QD?*SJB )%KJIRҴL锒%$+SI($=OR\eLJI{wJ)inN%)t=($+J)4oo1VJI<5RJIJ2u)$:_RT@/QM:~J)aSK(_RF/QO:GrԒ! JףRJB+eO)% ?R.?I)ahE_tooʏnRJB\4RJIh=?R˩$ʩ$CJ)i KtJIN#z%j8jIei>%$IJը[<ߕR\uKJIJ!iWRZ_)%1xY)%u}($z ~HJIR^S~4RhSCIY_|K~_+$>R8d4WJIhQJ).$$4#U~"R:2r*I&%ɏ2 RJBUJI,ːzW4q_)'0Z J)e"QgJ) MJnƲT&MʒrȖ߂BSJIԒ|$$'& Z)% wc$FI.ćх|RZJ)eCQJ) ϫ*g*Q)%lԏL)e͗ФdJI}RZRJbURJפ&dʏ_RJ6娔\ֿo)%ЗDTw :eTsRJnNiɁDhR>7eRZ׋RJB|WJI,ËrWJIݦ•RP}SJIb~+E*$.%=JRSIoq=*$4~K~NQJIh$$YSL.iRM.u\(NU%$[6~I)y(CRJb[T"*ܦjKZ)J)m?m}:$4&RR]OBJIUԒcMJIRĶߖrɒwRJr[ѩ%C-{o+m'CztUF.6jC/iR6X#Nf($RiRTA){pEi?$u)UϏH)N$$9pm삾TsJRJBiRX?Ķ=OBSJIRN-:,hNs*RJZ*%$|I)y(SRJX/i($tz>*$Ft)$J&*$Ԓ!I)%YRZ׫RJERJ,ft}I)R7/&$~١uXyqRZ-|,:!\UybAJIhϕReL~RZRJB+5@)%IN,I*B)%KJIh=ReY{D~I) =br:Ф$.eaw]~.?H)24}~&d+do)=J) MJʖ[y,s*IWJI'VQN%)&Gz:KB/eN)˩$SZ xM)%u($SHGJ)IJ_S<2)%QRJLrJ)ɲJߒS+RR_EJIh/mCJI}*eC)%u)$ݩ%GZ)%TRRzGJISJT&RJFJIe=)%u%+N)RR}RR= .礔TwI)N)>I)tEpJI)%.N)_)%*)%u8%E)%nTRRR>RRvJISJ*]RR{JI%)%6TSŤT$9씒t~9唒?G RJ*]zRRAJIN5QJIrJI%)%SJj!M?~SJ*)N)SJ*N))8gN))\N))_9|qJIq )%+RR.\RJ=&\NQJI٤RRO)%eRRx^9l~(B딒?;ĩN)))J))˩J));N))ORJ\蔒2RI/)%\դRRAJIN QJIa攒2J)yRH))N!ĩN))O)DOH))iN))tAtJISJS)H))RRW ~I))9P/씒Ҝ:J;0tJIq )%'RJTRJ ]$RR|"N))tsJIq)%O< 8E)%oRRRHsJIqJ)%NvJI)%N))N"o;u)%SJꍜRR􍐒LerGA#"uG0wKnY!Y=Gyv͎tst@G&7~GH[uCDL. GX."zd: Ry!z^?#OGHK ƎyD=BeW=WHEEEua=,85͂ Â{X8| n*FXE+"XUXɔ +İsb',K`'eXp$ >n/Γ㡽뀰fbJ* 9a;+PX_ y72`Ͱm 3XpJ麆o‚;,^cu) /XpO`+,x 2߇/GwHXBXt2 ,x,,x/g٫xd Sł<Xn%Gj?̂ k&,x..0E&̶ %] KN~cXX"<^:H'qϬXX" {/<$>OfmMX Ac/T*łi?,XX(<͆G\<%z*Z-}^ vC,xlIR7X } [(fϵne[,xl :` A ;<`s@, [n%;pï/\~XXRfã#Dg-<06-Pɂǖ:-V>bcVX!<>'Tֽ6 {#XsGf-X:ࡋÂgDa ZaQ‚gɄ>%?SbBGGbU,xhXdĬwt(fĂGIXX?,ẍnsta˛*łGɏ~Q"C,x~<oÂG"͉հQR%N,xvXЗj"-vJ,x;p-#,xs,x/J܎_Q2M,xMT,xZwM~_B]/b$.ĂGI;Ă7'XÂg %ɂ=lX(#<Ê_][~WHXQ2X(5[~+X ޼ ZQ{\G,xԞ<^57GObۓe!<dUZ7b]{Y‚?P ޼ Ho zWզ<ֳy~ZҪE kXAKV^0(/,x̪ bV,xvH*Ȏ)}&:W! fTX<_Xbʂ+*<&_X|M~a㱻#'<lvެwvaY#=~wf6|SKoֻRK‚~X#V{ϬX' z Vg簫4]}łׇ <]ZX&z = N]~dWgWwk9 6ݥu}b|K~fwł{j[~˽}.5=ʡ ߐ̆ob;1<ÆfKY~K,x. ρBu~͆Oi=_ł粎X*ƂC,x ~F‚粒޿ˏ`XŊ ZXdbO `fӯs>O᠏CbCM l^ɏOl'vsB3[P! <e^OOB;ދM,x z<و<=PZO,x.J@:&NOG5Xbł i<ɯOD+fɻƄO6DW్sɊT}IW9~ac͂7g{,x/dzo͂  ޶f͵,x3 `fh1,xc,xc,x,xt(oV~jM~w5wvY}X6̎o6  b;‚7Vf׳Yp_odof5a[bϬ,xk~?Ă7Ĝ7waG f[5-UmUb;Â?l ,x3; ؿ0  iÂ7 ̂ `f%<͂?l,xe,xe},ÎW=SGyX԰JU̓wDլ?,x~XXYł k @v,xe,xe<`~ >u ^Yn'X K`R jY/f,x5 ^u ^Wk͂ [ ^mf6?r ^?XjVY!lR;Xjwn,x%,x%,xeof@X X {`YJ=Yڸ`kwYpQfo ُfYDXZĂj6Y,Û,xuv,ĝW+f+]f`k|fak1-~ ^,?~z>W!͂ ^ {Xp/Âc6],.sXB=YrƇfm` Ep/fa)`˥gzg hfe|O#XBvYpwh5 n,x`z/Pil~e7{fl~X|,xf`t?/כ/V/췘/,X,Y6,j,͂gI֓͂ ,m?XB er‚i\,xa=,Y9e2/~̂̂g,xft;;,xfłM` ,xYBe0/*wG`l1  ^ ,x1K ^X}Xb6O̖Yp݇7n^ fV}zOYpC ^̚OQdMH r{Sa0 ~D SC9V:a1st~R߇)(Tr0/}U92OX}A{<=XsOaUEztU0da\fV ;VxXQ }q|VS@w?wg>lv^!gq2+ 0P+81AD>Y<}[֧a =7}Hf>lrX#8nǐr20-{i2?=]Eccef1_0n q^Ņqvp`0nڂf7Mq/N~0nq/ 7 qO7[ƸV̀7YƸv7ycܬM7VnY=yqSdUyclfN`{.㦀7oƸʹbb6cJfja Q Dka QwLrh!;f꥝K1/PݷU6Ƹc(¸Yw.?N;4'vXuq'oww*0XWkĸca~aܽ {Sw6AUN;vr&;7Ƈ10ƸJ9msw=cr0_Ƹ+3q't>qGzxzIrLvƸ2IQW0 ꎴŹ*;Xʇ1(Q[FMZ "VsAf&T ΊعTywƝL51Ƹ8l|a`V_TwZؑuwP;:¸ |㦈w cY˜dES @2O$ ]Ϝwo9M41(4Ɲ-*:&F_R`D`Y]703,;tNfl|ɏӲSw+q7G|qGaܡ}h[a ;~ƝƶH,A؂0i>w{Zl nN㮞qק0¸q`c`IW36+;aK~-ŗ*0:^¸쿣aO?wocޱMz]~ߍ2iۮ2V(UwOcQqgA/i¸ ̻0(3 ^n*;/(;S-"[zg ]U70VTe:_qgټ&-3#;:]w7fwi{Hf,ʯ 4Ɲe ~T˜"-7qgٻHtʜz/܍m7ӂ{ʏ)q)o=W<)Dʃqg/12;0(KWxaO:wՈwҤ}I SƝez ]0n+ܿq0Z|E?P9d`1/|*2>b w7ƺ/iaQFWwq(N\Ɲr24z3iQQFe/w /aA/׉Q]&?KY9wOƺ/iݟq>ƺ4g]QƝe²" Ο-gqgyE_*,;47}0Ɲ}0}IWcۍ^`M~MƝeR6Ɲ@/ƍm'zP6^їEayu:_`q(;6}I kDq,;/ӘQY8{bܡiY06Y z16} n-mmq7$n/n/uc¸_[|`܍1ݲ\w,w32vc܍1fl%/w wkn-eƸt0F,1Ӳ142wcܭn"aܭnşG[Nn-(q-͗ ^_wuV0xqWc5`-M+cpv1j22㮬--~|`r n?>߰-QY;-qW qmZw/?o`[0n㮛wށ!; w2[vv1n3x>/w%w]n-N?wq¸1@0xŸ+{Ƹ[qWbq¸]o+ÿ0nU|>q¸`X8qalO1`upvٿ1n01]K]5q0])C4ƭ㮝v`ܵ%w1`>¸\w 6a14`ܕkƸ1y0ꖵ`ܵeuu0D0nG㮍7cqe1nfmwyoobܵmWX1`ܵ]qS01Ƹiy ]L[w-nY]7u7uGnqWc`c0ZqWƸ[q¸E'~>w9`Oh0r mƸ1.E2wqLw!w9Ƭ+Vcܢ1?-uq\01VgsƸ `r i?Pq[6va<-ʷ?ݏcҌq9w|<0blc¸ 1B 1}7i7=-~iP"gmx> ](3mw̗q0]~3FƸC`܅7ce+m,[. n̺ڏ~O1wq40nP2]ܲ.-Ňqb.ص0[2.(4־',[][_ƺc?c-qJ]qe02l.q? [F.T...Ø0B-12{Ow!w1& `0`nQesabR0B12 .ػ0Bl1b %.w![b.j]Se05`6`ܥq_qnL|؏~ܞ[qE1݂|O?[x.1Ƹ1[0.-qqv1.-q;q.0]0^7`A`܅ cܥcܺm~>w!&wiƪ+]0`D`܎ 5]qh.nq mwi`f`nff1Ƹ2wq wiƾqcScű`nfe0%c܅ocZ|0Rݲ&cܥ;.Ē.vc܅1`¸ cܥuXv/`1`ܥ%x')cű`n9gT]1jaƪqk 1ƸݲɌq2]1mޅ.q/l{y_z~._X7~`~:Ƹj?0f?oc؍qiLgar]'V:Uf=VvGlWeg6p}nU>AcW$8quO 'VՀ? \ƹVŴt^ 6{צ!M]` w}>0FbP&+Vh4;۽VN-+aSNM`φj"A(f}6( iokYbwݜ;Գs !U~H߀~FՇ"sZ}dWkY5sԲm*U}!莕;1Շ DaeP O}>S/LMMajB6_ L8c[S2l%0riԆ1Y㊕N``wJ\0rꆕ0@`j Sh '02<XWژ'05= S/[`eZ|c="^f/FE vY`jY Sf0e%<)0R6^ LMav+U5=-+Ԭ2LS/S b\+aep68ͱS{kX+05MSϷgLX]HYv^`j#Ԕkm%z`VJw~`j'4leZV+z}o+L2ajlr6 L=aj@6 L=ZW+L as ;V4V)+0큕)f~FֆQkajnո&زFe;LSbu >0u1= k<.6L `5f)7 %,!*++] S pAOb-:!$&a:;sR':9a:EO0uhP?Yaӊrs%$}S'eDp!!VE!:W^E0ubJ0u+ׇaO$LpJ0L]iIXIuPȤ}\KR]"L0u 0=$TѼ- %+jN|a:q &HKV [ WXg]fXL0u2{/0u2}z}r&?>^vt.?.N`./i(6f7 'TtS'|g8L0u2=@S'LpбY٫/a^`*i2t5lSÜ~]0u;S7ÃͰ80u3,LQaimL"dq9n適0ԍaƴ0 S7n~۴`G$0u{`__=S7n0`f1 L S,L S75p 0u3\L0 S;0uǎaFa枍OK`Vzbg1{C_ajgn Lbuԭp?n끩]n.n=~L]C0L]}SW SWvLLLvԕUkd7LLvd7LLvd7LLvd7L]ݳnނ]~(L].L] 5L]<=)6l}ЂS0ua0uٜ?ԚF}>0uq0uنa= ihxLn.6L]eڙ t I a[03 S?ԅM3偕Sl.'x 0uy`n?eSaj.퀩 [H;`j.tjp0uqJ`Bax0 S80u,Nb03L]&p0ua[0uqO@`jvO=4 O'xㆩꁍ_5\L]WlCS0ua0{&.0`邩{S0 SvXaŰ;0 SÍԅa2 w v<0{6.#xSӆ0u!0{<.3L] WS.L]{r . L]3Lbup&0uaKvO.7Lb}00uqx`B8ab>iTLb~ԅajvqaj.=t~S0uy`a?K%<7`R9߀K|. L) C0u#ԅajs0 S0L]*)0u)SLm0{.Gn?a. 0a偿t3LGme? aj'ˁl}S;0u5L]?6nNno050S~lngÆY`j·^_5<o/<0{F6lo8K:f% {ǜ8j`E}#C< + U#^D=2g=}{X/YjY6ot>ENcJ3aaӇ4iC@n3}"!b-e[Rhs3aE鎕J C9V¼I7%}!V4VrXCh-}Dt!l8]`qG<1W9TDk-E77#P &ID? !@Õ~m I4vo&XC4(Cv&@s%~ aЇ)(s΂I}S􃨈~Ї%MX+"X>C>?sBX|.3a$+9/[U7,+]FX;?hXCT)\A: |$5X`8#Pp4b*Y·<1懲P6+]b+\&'V^X 9š$GT\BGLx4?&_̇np&䢙3e>D2i<lٰqZQ̇6 f?0gIH 4eE+lPx<lnt6nn0j-π[Y6dhf+]$he#Hn|67 gXF!Pln+)WR>K)_Q6| +7(nO6|N6||WiY2l+U%K>l@%Ji&ٜH&V'V#F>#E>ԙD^$5C"\D^E"/ ey3rP܊}+V#A"cdKD^&+}>HYKOh<^HcUX˨*ȋ$[$2?& 3 [ .*HYMsʲFe۰usjL`2VVcYzba$r 셕ulDGIdD^.}DV3A"DvB$C"/T`D^6[tcDvsHN"; y}XSM[u$V c RVD&$4,1CghnL5Id'gA"wj${AB"Df$q$2$JOɇD~$V$z'|HG~HdK=$2pIdbȏD=C$_|o]z?~ۿ(e1.X#džv/~ʅ>q9iyG{oĸ^'kPսRگ:~>ۯ+}N:^?5~?Q?~ǏG=G.F?xz~_/?pzQx}{/>=?v8=^|߲sCgo0~z8=^C}/N|_|z~8=Pqz~CӳY/_?pz~_/?pzQx}{/>=?v8=^~,ӷ_|z <}z_/:=ۋu~C;6endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 504] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000026573 00000 n 0000026656 00000 n 0000026779 00000 n 0000026812 00000 n 0000000212 00000 n 0000000292 00000 n 0000029507 00000 n 0000029764 00000 n 0000029861 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 29939 %%EOF metafor/man/figures/selmodel-beta.png0000644000176200001440000024673113750017347017375 0ustar liggesusersPNG  IHDRC IDATx{\T?g  8(^ A}4JC'5vXi(Q4=QAT@P;rs"P^㚽~xxXZ ˲M`m(x;p<P w@(x;p<P w@(x;p<P w@(x;X:ΩO>QT_޽#LՅ  12ָ w@(x;p<P w@(x;pKSpĉ˗ԁ<͛xbSB_ϟdSL׿i(~2<P 3 }L.BCCsrr !Ԅv]_VV6f̘l"+,,?rٳgnO3,&&&n߾]-[ݯ_L5n8\T* *~pONN~7YվcUUաCIJJ9r@ 8p`bbD"JJJ2\zďĉ;vuV{;0~˖-K{lСwLNN&I&5kWz ppVD4hРf\KII0,~̸wXQQuڵY{n݈H&כ ,vnj{q3- wMKiii޽nj^|ZVwg;N`馎L h۔Z^466j?ھ}vޭ͕r|ƍ͗S*?GlqPÝ}+ȑ#7u`ړH$DӬ=779n><bADI)ULX'/ԩSڹ?$=*QxjVU(M $&&jE&EDDD"77ЖSɳN^O>ŋM=IIIDf:N-KL.]gJD(6Qaeٸ8XlooxbTju}YY_LLLnnUaaa||ozz^AJ*ׯ_'ӧT nOw\7lذQF.؎kEYBB S.-^E]`d֭QQQ[l)((ضmÇ7m{EòlbbkٲeKIIdYYYƍJ>GJ*֭[O?5m/x={2)111!!6$jFZk?:VvqGu& /ܹ3qıcϘ1իW\1fo&˶cC1 4rH@0pDDŭ/RdV:UDNNN)))^߾}srrCCCҼMZG)* H<1yLb_snR<2a0cƌcǎ۷{zzT*]d18qĎ;nݺގlhh4:;;O6={lHY_~%ex㍖o,yf111wmll|w}ׯ_?cj`®DYs,KEkRɪM󋏏 .++#"Ha-[СCLD&Mj+˗u )2++A@!D$мe0c,-JU)3M<tĉ'OzzzkV7&77Ᏽ넙b"4hPvD/!E;h~ŋnT2zKD8#TS5?50a‘#GBCCǍf^555Um<ƅڵkfݺu#"LV__!Ef; $Gڼ?EeÈKV)KM KMM JϟքԩSKs-RMKiiwȦv̸w:ur-i,d256>|Xžݍ*9۶m… 3sL"ڽ{wjj;wV\sv _TV)H$xO-4o\Q[ Ef;=f!a%E55sdջ[UE\W/aP\timOfv߾}.={v  ZCBBvڵbŊk.>|Un޼њnZb4w{ڤeٽ{fgg...-G)22,%!;REY$۪Ejo0ݽ{U;:ɲ,qʔ)BS^^^ A"PTv=ْ6)ڹsܹsvڵ`Ç?\1t 3<&b7!QݘSvzőU%. kPL̘1瞫cƌeZ}z۽{rss(##cҥ.]k}L&S(T 44''Y;sZ(:::000!!{? sm6`Kp矦,ŽmݯY]RIU!+^=܍,ѣG:cYҲ{x[Dĕbxɓ'ONHHP* /~S77|}~xSN}'MO:EDǏepmR$}[B033倝/EƇ~/Y""O;uw:i]\]#٨,,Z;74xvXXX1ܹs׮]S՚ DԿ*zj+ DO^N>}/^qwmV6))t\HR777M/* M0Mu wSDDC~,M[wNƢ.F-+ZQY\V6{/V3qܹǏk\~R0“ׯ=zʵ888L>}Ϟ=AAA?\. 6lبQt6)\һwon f:e O zY2B5"tTS6*J6lؔ)S>#"={޽{Ri\\܊+Ν:'[nO?Դ/8p{9d''DGGDŽ{ )Gvtt<}PʰDDV "ʬf_K'GȥV}z eiڋ '[n?bŊ3g$&&޽Ғ7KRRR{セ}8;;y{?ɷ&E۷oLMMm90t7/%5r/<|~Y XvqqƻqYnRgT`````ݥKVQQbbbbbb R):}ɓף1N1 fGe0"*P(f Ţ}I.ۜP}!MR?Ʃڟqq矖˱2ƢKꑴƘD`EzQg[9 qz:+W6m3fp1񔻀l;4]~EZeNDk' okŮCBK :`߿iիQ䵧xݪNoJ;6^DT7I}Y߿}ܟMgfd͚5sB?2(|JQ}1BaowCDUU1D zW={w:ѭ7lpYc}HvTMGs8Nj)d1-,\ڍMD@`VPXʚ[~憚Uud8ylDT}|ٖ}V$ waz0ԩ>Ќ!OD5g.nêP)rwva݋nմoo?t DT{j闻Xe@PXE^vÉ(ϧ6u'ѣ%U(tqya""""E"[hh(z'SRR`#G_׿eN"ô<:1O<ʪS=.'w:ե,Ymhq@sòl\\X,\xT*5aH /&&&77ʪ0>>7=]ߛ0?2d֭[SSSE"QAAcǦL2o<4 "cB?O[*CD, 5ʪFS't1L붨u|lݺ5**j˖-۶m{M|}}/^h`XMLLܾ}{z-[,;;_~)))2,++kܸqr<88XT9HP;/{JKKnjcc{63f wyvDd-q2y0w6k2,9sĉǎkoo?cƌWzxx_rŘa$''{{{m7UUUu!aF)(HtRtСLÇۗy 6ѧ~JҌ"cB?xLfaz"!^ڍZ} `Z3f8vؾ}?K%K3'Nر֭[x???Miӈѣf)ִ8::6m;w.0Ok wz\?pO-P( ^˒d!h}P+UUF0!??ಲ2"D>^m܆?fkk}b"4hPvD/DY}}j|#RSw|#KMM JϟքԩSKs-RMKiiw1M6Z8)2KSéOyK<ʪ6{wk|85=%L]qňDB7=ٶm… -ؽ{wjj;wV\sv _TVV~H$xO-4o\QcX-Ejz׮]|Ayy]Z)2&{cXԁ脱dD:tSmGmmmݻKDK.tR]]]˯e2 fW4kq.YEGG&$$piSh Md2Y^^￯y_(fff) QϏjlMp8v]=Yn=%="ݨ,(Z߬ ,,,Fqܹk׮jR|KKK"߿?W^zdSOx/主6wV[[DDaaa MlllRŋ &:;vN2믿nOMp&3=Ze7ޣ;71ǠX:F-+^QQTZ7юSL"""ܹsǏ׸oVREEE5-FaSׯ__SS3zW_}kqpp>}={~GWWW\7lذQFxGmR᡹>---$$w-K"RQPPP˹%"EFS=ZKCDʕ?ҹkRa/WV(Ƿ.'2eG}DDg޻wT*[bܹs?SGqqq֭駟6~~zϞ= 䔘蘐閴O\.=zӧtR֣ϛ7Sܻw)22m;hf[ǝ%NDn} Fݒ϶g)Pyqn tʕ<ئcƌ3fP6szZ*tTߪN7ZNDk%]]]"0e;hoھzj2<>ܤt$Ȫw #Go}gٳ'YfCP3ZT\k-%kӭ{DxÉʄF3AcZ͸04jeپ0dI+v%j/Zagi9"4jc!pY<R~v#`~iY?OD7 R^ 6'qPpу{2eBj8l?ҸݬndȐv4B ;T 64\:;;yEk6kL<[CCCʈH$yxx0}L7˖-K{lСwLNN&I&5kW˺f)zуz޾FH1p]չewj j3Zb7nP¢n~ꍪ*#gKPPЉ'N<6I#nnnվcqq1 4Y;RRRL"n!C_eqYѧ5. :m۶-\pϞ=3g$ݻw޹sgʕ;wlP_|Eee'D$B5YusRԳgϹs-XbGo>Hw-Ydʔ)O)2&־}}E eVaރ$.,YUYVYiE^ QT٥G4gVcV r0 $$k ٵk׊+hM7-1DRQQѲmݩN?I$Eo[oղ}ժU}]nnnii˓ "#C?>9{KRv:mSǫ́d⨯U_IVkAt**|`Ѓע%!ɲ,qʔ)BPP\riUzUVݼi^ωI$TA5:U,QזI˲{vqqy嗣VFW߾}r[Ək "#CGUvaTyl w" fqqfhWO1uPs]_B.# .5lmmݻi,//o!HLP(ZF 4$ =t).x璵LΝ;g͚mذW^i6S0\.5Mpv.q'"*~RYi `,>Xh&Ux&E3uPu5bM,kii=-"➒XtKZ4((_~innn8u'|ҴԩSDIh-i-Ν;YѣGӟϜ9lbaZ9ɠ)2>L;T>i"""Ν;{^{koU*UTTߟJ^w#<~ѣG\?򰰰aÆ5J;FTsEB)p}ݓ'O~NNN ,?3@i&͕O`?}|| 4xBq~:=pWL"5w YR{0Çř:N⣏>""XgϞXP8w\B`///#4zh":tP˷ .lXVV6p@"bҒ322)bYڵk={ݻwuu^Y"666۷ooz!Rt 6+ڌEp0ND%NDm|\e,F.re|r#cݺu?s@@+Ϝ9{n''{o߾999Ρiii:=eryDDѣO>mggkDEE%''O4W^555>>>KOOoSdL{ 3:ƝRzt&}LXtup]8js㽂/w;6`N"00000QҥKO{111111 %''ϛ7*22rҥֆ_|6Sh0?Y{ONX0C켉b20 %">RnS_SGDDO8q۷?UAcq'Vucd"H.}LNR?jCRgP/(߅7, H>]X~[ͬX5uPϮ+Wp1cƌ3TQub(};:]lսÆxTZ$h~ɆuҊìBM3;toھzj2<24}c4ʿuMD/SG[4{'ZfS9p.+ԽžD(VUZDy"J2uD`f̘qر}qtwwOOOJK,1Z lhh4:;;O6=JD|rɬOD~CΜ9ݻ6m;vluuu+}uIBczex)d v.琅N^*ygXz\VVFD"ÃmINN&I&5kWu "++ׯtsHѩShڴiMG}NyB?mKD2E僺{zl\c,-SUL'N>^m@hРAڹuT( œwb$"LVWW{&OQ^^='<JBD7;2M?-EERW:"0G&L8rHUUUhhq㊊:0HMMMU[d'"qڵknݺL&{3]tׯݾ}[vkiɴ) DO?KNNN|MEt8 ;T γ]ˈWY) K>U:"0/aaaR088ܚv:uj׶\B[Vҧߟ+soJr*=LZ|899yƌq~222V\y ug)j߄ <<<,Xp̙>uIBa7J`ըn̬8蟵@ɧ mUUu"a/WS`7鵲c/S/K&VbkOw0 $$k ٵk׊+hM7-iDRQQѲL>gΝ;~8pk//_ ޱ8f"GGǚ!C\eommT*z 6|ܲtIB?z_*c%l;FNYhsUGzת_/S`oK {m,b]FdY[8ePP(\Ҵ*=|Un޼H"HҖkZZS,ڶ}֬Y~~~7ovZMMͨQz޽{/_{BPشI˲{vqqy嗣MGsi6L&{/xNNNv.m qƦ-gϞ%"2E;w 5kև~aÆ_zƦ7)ЫW7PꚟV w-f;`LYWw0 V\#ݤGn|4_񜩃0*gXlhh^l==##cҥ.]{sAAAK7ussoxSN}'M۹Ǐʀeee ӧOvԉ'OLQttt```BB@@Ν۬WS?dgg7k+,,$U[z wT +DԳ<D$xFeYeq?{@gaa1bĈs]vMVk[ZZrɓ'O<9!!AT2`>}ŋ/^ #))Zѣk֬:u#G4>ܷo@ ={vNdjǎjͭs]kX0>>>\Ry/_nݗףWqL(.{`ݠf,}ipqqq3ZѣX"jvqpp"3g<|eYLoѰaj??>v옦E*rأJeJJ=6S JrsѶmtZ 1cܺuKV?zh֭jkl"V>7lЮ322-ZԮ.ZBn- z-3_>Z!fKQVhݠBԦd:mpף>b={233cccBܹs E+ Z 86HqttÏ'n… 6n޼Bf6: V/i"e]ֳg޽{WWDaY޽{={: 6Zti)2OcV;`5}.w*XY4=k,]# {`3U@S֭VXx̙ݻw?moAqrrJIIy훓f߅ 9sf„ ݺuSTÇ_t颗شL\.=zӧ|,L߾}322-[sgԩɱ-hoFq3=n+C$dyS2Su>6WK#77/U(AQ: rppi嚢'?ޠYyUUUEFF.]a899}_~e+<-Eڤ0czۉG 55m´g!s]X4/eTrӧ'N~>@U;pP aDTKd"kwboYSG**<<|OѣT4$q;Huթ8Ę6֒OFVŮDR(_3Ǧfʕ+yyyV3cƌ3fPg܉ޯui3 0bDVۺ̊=Gԍ 7&:(ƝH7m_z5 wCR^>~=[(fv_vaDTx?:"0y󦩣0;g~⮅k֬1uh wik# aka{ocac|Hv_!(yLmR' yGeiݸQD$!|A`Pf@$)dfu%0̞@[QO)۲`PCsľFP˪4u,0N 3XۦWeȟ29nA0uץ붪M֭[lRPPm۶nڴŋ cٲeKIIdYYYƍJek666Ι3tĉ///ZpaCCo]X7l̙g.)))--})Sf͚U_ϳ(KebPkOy)<>38kum#]ݹsgĉcǎ1cիW=<3䁞"P"2d00Ң1hW # :qɓ'===F?q Ԭk)))i]tÇFBq "9r^"o B$---H&q w3dND4~$ݐtMu=" EEe:n„ G 7n\QQQjLv]vmޭ[7"dL⋯jMM͔)S.\PSSs7|S*4DU:"԰0TZXX|nMHL:k[Z.ؠ-RMKiin*8rڵ^xA,{{{=ztԨQgϞm]N?}W,,R\~=_ Y:8'$n} __}Kϧf` J>FYZQ*NzA@;l۶m…{9s&޽;55Ν;+Wܹsg/*+V$l\2Y!Ǐ_Ʀo߾>dw^r+Ç'''Ϙ1?ׯ_FFʕ+oܸ]н{wC?{h)\ga9@u[UEzתOOSКJJ˔ˆaV:: 2ٳgϟ?`::::$$d׮]+VL&{&oNNNv.Vr9k쭀GGǪ_|Fܹ3<<|֬Y~a^^ކ Ο?U HcРA7nlrY"]ɩxۏTy<{+t]hFU8rs[q7uPO bvMO>#Xlhh|l=;##cҥ.]{sAAAZV7k 䓦N"?m4kkk++FRKuy>UEGG&$$po}4mL8g w3BND>#/Ud4Oxd8B7IeEk7*K+|7kvc1bĹs]V5KAjkk---Hbɓ'Oв>nO^N>}/^qwwׄDD50-YYYy{{?رcje˭ku 544HR\yܹsDm6oLS$JU J2%%Ã׿jsǏoذ]fdd,Z]] S(;˲w?8;eyeȻA ,|@g]>#"{̌ sU(ͮ4h^VV6p@"bÃ;1##eR 6m5$h^꫍Ǧ}XvZϞ={Gh\NBX,^jsk*qޏ)8BU].ӓU(KY{#'[n?bŊ3g$&&޽ )%%۷oNNshhhZZgN,6هmQ\A$},MOv3sIQf[jQeRiKNj n 26~%CdP}(|f.w흾?tȑ#۶mk2Çm۶oNK姨hڴiqqqJrΝ~~~ou?'4(((fEGG/Zh׮]>>>u 0y*v[)30B 9tNW'N%}dxU7[ű8pY B\sZm׻tqF"ŧ(--^2 III21Sԯ_aXq`n+ic10rxB?ͼd-Ps/DuoΝ8š3\݃gWi}H'ސmE++˥i]s }|$[ GED;}3g➗|DnݺΝ;?~kO8,YرcR̙3_|E+7$w)ۗk;*bPȀ)7u:x(SLjѢšCFcfff߾}FiZkxl=z^OAE%ŶbDPXpα~{q7N lڴiر7nիW]~_|v<377`0;NVtS3TG&ljEyK֛/^]NxSg (UV'ט5ڵryͯ_n4322֮];}m F5jԨ<믿NJJ{[O<Ǭ(*EGa‰͗ssS4s<`TFVQaaaKNoFY\\jժ? 9sfbbb{1@ֿ#ao!tDlbN'OU3fy7nP($"_|Fw1'1G|M`=wj9J|xzT*3228Jb8lgM'Yebճ' 2zBvOq8pMv1{[رwd2Db6֪<;. RFF+9sYB67tV`MQ')i֘u(/^z)sα(#F4iϟ?XRRuV"7nܝ^(Hڷoȑ͛7ϙ3և~$J|AwJق;Qs%g {c9㵤-4&>skL7c ؋>xW_}:@COpwB1bĈ?S?F)**7n\vvvvZ>d2 6qe#FHJJJ_yy-[護ޒH$l~W/Zue#XNMԉ^M\~ui떬Cc-[X<`Ɲ.\yĉƍi&88x˖-JrÆ 瘒{OWy晙3glӧ Æ ]e+LNNHD# #cF-Bf5IEJ{InvNQ܃:4u͛?^R3رc۷wСC#Gl۶d :tm۾[X^++::7VkE!Ar{eeQB\sZm׻tqFj:͕KyfJfǛ*)yK,Z}? }{K;֡ݢȰ9pV։UP=S=78BxXgip80dX1Ddsg>.{S:qęlJϰqBAțt'_?Xp@AqhKB {0$&

7LP"*=`oNuLt@W":ZΞGG?AWv 3DpP=ODT=t_)7XgR 4i?2uY'Ө }@! $L ~qgKJY'{8"w8^z*ek.X'{xu89Wrh)dn5dKPhtUf(b;ԥ6~BEXgvxN,\MHX'"N12Olr%'W;g57u";(#3]݈OSF B/6Y8ĚMH\׳NwݣοD EYgdZk!ˬyڄU-DP[(#gm' d;Ys ڄk@{Y2'8>j7Ig+2i-8u"p^t@W',>: TOҲ&)^JuK+^`@qH7K>B(vDt2F/RKt󗕟bj.[DaocH(D/ W_^vO։P=NDYM Jl3?XgB5Iơ|9OKPO{\D(2@ $ŋhxU@5PU(QA;og*4IaՖhMɯX'P=X}^q'ΊnqEVù3s?Mdid뗬7G% ;J\NDG A룞p_u" ;Qgew":l L={]k_}u"#Up&Fѝ -Jϱ%Iճ&;%/XX'"wp)4,L֌$w^iOD_n3lu"@qd'[0Ģi}w""Ö Y^ ݃yDr܊W.w C'ە+3P=R}mESy 4wO P9o`w"*iO'_~P=_YGs\z9Gi@qQ +˯ǏP HD%/XghpP=7e<\'O?BD%?\[4,(Ƀf܉#α>0"*=Gf D w̕KY(qe'Խ^^:@CG2Q{Q`¢W6 x3q'3uKX'hP8nd2> UK\j7NPMb=(ެc߀!^$eŶ"D^#qē'MEQ2^·wL蔱HhtMb+4NP=gwΊn1w-:d˜Xl&X Y'Z(#O+t$KzYti:N*\kRx'wp(@x,{y6w'dRknv"KN.D^݃y܊;G\8˜7T'![ ڄ˕xwy\g)Vٓ.fbZH[T}Sc3i/^e{0Ol} )xF4̛,TۊMyK*eN=P='w'NDXg$N*%ĥg.N%P=:U %{Abѽ!S8j89|u"zIq_f ͜9sʕmڴD$V^=m4#)܆%we˜{x᠗&+;qFr{Y9Db>}zΘ11-s岋KC28tn2{ID~ҥBQAAA!!!/l2DDic¢;z} K%N{Vz^QC.5eHRnG2i4  s Ŭ N{Nh֬YoYcǎ.  G>D7.c8N$4_MY'`Iq9sϪU{|Ӈ j*&LI45'-7u }N,\iRy0椸GEEXB&0@RbJ5`۷d˗Ź'+Te3%] e&yCgI%^;gU:K`5jTVVָqÅBf -[;vlff櫯ꆔp^Up&" 4,P_;D}] YXrt0㼸Q&MVZu+W]paM6uu>TDMKH´ m+_MH1_aZJbI&bEisL26Yn;Ik3k.6_:/)J"1cƌ3~ڼ-[q.vx]6-6e:#ɺ6C6qzDid ֡ܪn4;4`H߶%~Eq*$k㵉Km]G&D>ɓ'Qn݈oqw"5[p'";Qo&$@"S7J&. tZ:Uy(֡ܤ➒R?0P[7nNYE/,Qt8P4*)yڼ:mCSZ` z>/+CHD H()^-+J`F5<H&CeRY)5fyMpMɾ\5jDD6.\1*1"$7+uJMdIxSf/^kJu"ת_DD7nADDDaaaD6vp;<&Ч ,: k_I-n8 Y5]"DDB"##,Y ymo'9-stZ: k_z+7m:TSg͚e}Yӝeff>Cn ^C@2` ew';D|Ÿu'D.ԑ#G:6tzȋWŜ;aoN* K;"*as*DuIq߸qcW]yN}=BC){ѹ3y\Ypbq贗}⢉Ȱ9p÷1'ŝ.]4`+WV^\n]vbbbΞ=xP=/NowPxǢ;'5ַg"2nYx']u9==],U^h4GѣG~~CBCIDao5N(_nDT}wM5ڵk&L>~/9`. >☖Z}<nTܛl3'=--֯_R<$ɖ-[q/tа6",G<֟Lm6֙~%ZݤIj VYYY.5/4oyJDTVVtwlN{-zcCt;zpPeQY ,y9J]XX'wN{.]vҥK}tٲeD(.]4""ϯUV ",,,99n/X ..=Y]^N`Q|uTC,q\uIKJY'k`z#F?H" 4ȑ#oRB T"Ru 0-7'"v[q[tї^z [ʀ{t KDgL,Ǔwj9J}#""%Kڵ9k_` @־zD\f+&XX'pIq_l-X`„ b:qCݰa-ZȥH cha:,%dm"s ڄs5։j⤸?~\(?GwÏ ѕKW˳Yg/!m\8Ig3k-1_:9)&)(([q'\ jtԆU#|Dtp7,=$-h' Y'SzVׯvڹ ԄcǢo{M i#MR(Xi/-%}T~:u"j8)qqqD4}۷}yw!5-/G\%YjE {Na*'}ʔ)ׯ2dHjjjii)]xq۶m߰aJ={[BU J4QDtCUxqœGp/N{ӦMSSSǡCD>|_~%44/h4n @D#67+Xgo#RjM4Ţpeɩ={<}trrrˆ($$Gg4hCBU7NNeXe/'.+uB@&qyo.\Sa։nIRtԩSN%"*U+qެ*5u?8Yt=o :@-Vܫ@k2L'BY; |sߔFݞp^/W3ڵk? ^C!]|~%6_:x'\=gb?)?j3o<6u(hЪu{Aqwqiy?/܍#Iճ&.XYvς_fC@UgLrܕ~\vZ0Y3qkqRIW_n-VȡC@U72f[l vV('J#mW>ы7ĢS>+x%Y7<ҥ{_28q7/BD[/v: x9N$ 2ַw,`Wwݾz-ZlٲwC !weڵhLpGD؏#`)8m:: x?N(4o`w"*On椸DTkdO{}۷/[L&}}q\,Yk.焿pDD2Gl@q >L{}l3@Cᤸ/[,X0aX\y㸡Cnذ-Z҈p' |Tb=^qظuh08.hQɁcռ:4NDžB}t~~~'OtA0cWܥYgE+HcS>]7[X'礸L[Goq\.sA0 G5 +9eJ5kYgE#A +;vZ|u"rN{zV[}%~^o׮ AM+<݉w xUp>MWGn2{Y9D͜8">}><;QLLp707e8#}i<5_}U=GW~.q݄U)S_~Ȑ!Dtm۶Æ *jn l ODfY)Y!]h*ek-X'䤸7m4555<<:t诿JDÇ_BCCKF㖨wK!"4,'U`ïOlȔ8|vbu"BONٳӧFD!!!=z={vVV֠A\h4N6-""B*37|3pࠠ׍#Kq4W1d lv2m<'Y\&X Y'o㼸T*:ujZZիW-Knn}ϟ|yyy.\H$999ܹ֭ǝ'|2--d2޽{ذanH"""%$lĴ*'Xrr )|։ԪJ$"GͦLբEC̾}9jurɺu/^,֭[_\\8U*))n_8Fuͩ>BcZwl5A Zs )z։{ܱ<…*A={oӦM=LrY'$W_qu֘@eZu^k6O>=zH$zǍg׬YpJiSyf,РTϙ([ )8aF5ŝ%KFDD޽C3g8p %%%::zΝy把؎;V^TTO<}56==ʕ+cƌ/ܳgbDvvͩ7(߇ ݁5iTz_[Qbtu" &LTDʨQxL6uT"\1gXzPP;xu"R?C=T*'yXqFڸD`,DD&MRHh/+kY,։U"DꫯV?3 ykOl{֡T-fY&Uiɻv"nݺzbK#ꂂª U͸R4Lmڴٶmcq]&=V^X`9sj3JRTyfvvva]7V!tQt_{_A;`&)^r>y*~owo\{V n?р=ǨZܻvK/UTT;6 @Pl޼Ll믿ާO<`1bL&ۿ%%%[n%qÇիj*V}j (FKaFQYe9e[v/ZUVrbl6P8yzb=zm6[tt \QP1f=S=݋ƌݡC]V>>={m*ٳw׮]{GN>|YY'|2}t"7,UZ6~hoϬTJ3';%/\?N U̙3Mvi^߶m[-uܹe˖Æ {\… 8pĉƍGEE;wj* 6ڼSRRt:ĉ V?ѣ|ABa2l6Ǐ1 ]s FeWSǏ6Z 0IġB_f79u(תYqwD:t8p୭R#Gl/QppCNڼyϫT1c;v}{{!7o<##cʔ)͚5{-ZDdŊtQtO IDATyہݬTBۣ3SpGœ9B\s4 ^p… ]% '{ ~8GMx%N( ÉE_~/ږƛ!_SW;g;4Εa/AD?[%1@qd~gM-Z#hov_g}Ȕv@t=oCwP=Rұ^w {9,5⸠q(z8/3ފoP=[Ž&ݔ}$iw^Y |qӏQɁcW DP{ T#'FуdV |n89 ތ7{2;;!":_rZeYjEAc$+;@P:u*33Ǐvo$-ԂDkic"xl@q姲tIKX'~:uzw 3Nq z:?Q$Td^&.JY'ƪ$-[9-))zjI& 5¨Lm XeOqj˯7N$|vbuB?PLc3g_]IxS` بZ-Z?F"v\. b rT 3'hQ':D:]rnjUvnz$*u(`jq_xq/9{'7lTP[XqvĪKޞԌbȻ jzmBf"u(p7'AN: Tϙ([ ڄŖ+Y'LÇP111aaaCpu> GiZ DD`I[T') lyqOLL޶mdH$͚5d%%%K.uCJm<(>Z`cI#iM*m&%YX'7qRw9o<Ǐ鲲쒒3gΌ;~ĉ 2U! v M:c "Š:*eJAqGYg/bMfFWui։K.(rE8nJ4m;nܢ O()^X͛- V†Iqoժ^>x(22&w#߽AQFBK>,K$K5-V}QrR;uDDfͺ}vg͚ED;vtQ8X@I Ryނ]_8Iޔ)kMd\Iq9sϪU{|Ӈ j*&LP㈨%'r`'nQ ̝$lAv{Gwcꞓb L:`J%U*Հo.ɖ/_瞬p B{E5߬?Y|u!ސ _(u7DPǜ4jԨqƅ B& [l9vW_} )Z! J4m;Xg3L={C|-oq^WIM4YjY,NVbAM8VMf {܋~O(>\hP78$tkɫʎ*|+o(yu(Wo%4i^OH~/*OQݓuIġב ~DP7C"v`9anQP80q=kG #9q~_?Wh?^tu: !_GDEߧ؄P=2G% mЙ~-ID;-@v{_`":Y|LoֱʳCiA cx*w V] '8.襧" WVP=X@DTS DD{ vYy+8?#Q-։஡{[oN-v߇>AjAX\e'8tku(=;;/5իWffԩSٻwI&={_~III6mb*ًps V:2EN(4_f(fsrsŋh͚52Y9HDj*"ZpA9"2T߯A%ia,۳KqHhb+0NN8)BhժU+'O ԊR* "c9Fe/Nٛ0- O‰Ŗk ։&N{@@h4_)((0. +uQ"Z~)OYDA8Ī&Xy9)111ts`v-".]y,Y֏7VQB yDOk"JuRϞ(ˬ )k:։zN1ch޼yӧOϯ;{DvP8"2׍a"uP'! ڹ)k@5'|7  h4֭[۷u֍5護>|;mx"T@Dq#^~B+oݓuF̝$/\frR܉hɒ%[lZYYY{ʲZ:tݐ aT.I^켍u7U'N*%ĥY'q^9{ꩧΞ={7۷O?~|ذa9}"]VV]RRr̙clذUVq7adj:yd=N,Oh"2|Cᆭ4,N{TTԊ+d2YjjT*X,VT ؾ}L&[|y\\{BTqw't kNu80dX^1Dds-tcE4jԨqƅ B& [?{wd<ٗM@ #Cّ) * "ep" "2,"RJ Iӽiy?T)^ȓ4ɹvѣG_~}^H wO[ cXg X_vD?sF!)˃"""VZEDvh4t:T`^KV%&x$mp#Y2vv :o4oɤ9}'D9~DTpռ<}`͛o߾l6d5k*͛^H *>3LT1wOODNfJ? ۀbԅW6;D}Ϟ=y~ر/_.**uVAAիWG[o믿z'+sL\pDdDcZ j\ 1<(h#Qѹ?6։*'7W_}EDo_~٨Q#HDD"AW?i>#o$ܛiתDDd*@SD/cuYX9 38u"3xn )3fz̙3wf;:5LT/~Y|Q]C_8rqB։*7{rrV ,ސV{Mr(X&wO JD{2v|Jߣfp2[7NP)֭k2LU^=YOLDw'KYgQ~]SGqb-98o3'u"MޢE "5kփ33_~0`U80awB KwP{^/"%uߥj4l8N&193Y'  4XrB޽FJ{;wT(yttw=%;j1mنkj:L^'~OY|Ecݬ \0f 8I?W^ILL3fLTTX,v:bvڣG~㽐N<ØfR;GQ43QT82 s։}NDVqFQQ۷V^]F OG]רD21GXB+*z s7,NѰnޛ"ʙkgKNc@T#J#""R@Yq%oy')D.!(!#/ yZ~de&9 QJ.:^*Ӷl2nӏ('oSew;ow:/*Y4Ix %Fe"%DtH9Sn^6_hהu[c3;'f 1P RfŅŧaGH節)"ڝubM(.m(w[BMW)'̙3gΜZRaq/Ĝ'8fkh]3_.͒: qbW94?hdw2RdXkU9e$8)gjjкݦGxu_&$:q?"*p2e k׎VUhjI~v2[;pDz?[4u"1CD2iSݡɉ)"@i{\\ܽ?cO{`3*HBDI8]m#'!cj dLyWsáNZ @a#u l H9YM?"ڟ*b@^"*<[+xu"J60Fϲv'On ]VWʼ_|΂?>:؇Im):纷_2֡)Y׏,(ܙxp:A'Jqb!=fcS_a*I I%7Y.^7,Z5ARF}͚5Lr@͙$S@ŌMR/Db(p1w646Nڕ}pWoӲo7R ?}S:u ÇO7= xā$cXγaQ%b9Y*u6QXlKN3̍sfNm w˵zڵkwܹ_~DtƍVZ],!k踳SSs")Y*uqR=`fBDTob̝%)'@D2.pYzX&UdI3x}ԩ֭ׯ_|||aa!ݼysݺu[~F={WBY8"1wֺ>,}TZRV0VӸ,V{:G)kԨk׮CQTT8a^PVܟMK3#;l.+8$,T0VZ]W:@srjǎ/_w-<sΝ|v{/\y'%PJ¤DooTP[U)D,U'hؒr؝zd].oڴiȐ!.VZwFy>|8[l6{8ɣ_y8Z)nNpSԍhgfYH3iώDPgߒ:@}=Ǝte\V\'=xzր$iIףTY28.tKLqм*'.S G,_{5_sرcǎoj,{/{ڿeNrэԞ:@Uq!p2iֽO6v'a % 1c0f0#.mɼ ʠn/I)Jbj >Js6Tx/oR֡ʤ2;<$g;l(}GZ[ۍ&F` ɤn+AV~A8$UPd\rD(܅2"Ɉh*e|o/Iw! U[+ц,w[~:Tu%`zj V7-Ҭ|~.=ӿϲgH7X""^_kXa0Eה-UW½QFLrW^s̩SNy'//駟4͞QywQF+1Q?s- ' &Y 1a/jd:։H 00Sc+j۔u(1*ѦM?8))I&ݹs믿nٲTӧOOIIDHW'HWH*}T琞{o7b{" /լ;.8ru"JSNMLLl3fL&ꫯb'B Er":tt?}gN. {ue˧({֜Y'B` CBB(77b<fΜuo|1Fw~.6a_[PP0fDfTID"zXDeD?2>mPE- @8H;Ug(o̕˽M/@y鲲K\)^Μ93))iܹHؚ6mڴi;aLMOICV$N7HJ**|H8Js94_!@Q_.<'''>|~*TC}e=ťZ. O>lٲ&M;) j)DmC?_"0:RU~0|: <Džп0}5tr^:22vڝ;wׯݸqUVk׮B_\߸q$zd=99.^(˹?5jԈ oKiW=Ѿ[n[qFDj:<Dž|1DTp߫x6py~cǎMIIJ]gϞ5jTllR=.q3Jww9LY)kԨk׮CQTT8aÆG{-\Y/)h F-/LY:Ecݬ \0d,q<0 *+'vqnÉHvaىz|H!#ob]bt"ّ˴u(us& GFݹq4#D H w"ӦM۷o_jjnOOO?rȢE<JS },e @[;~6ms`X@H NUά8ۭ4։@x۷"H<WB,B;82G$l3~: ^~d370[R D 0n YfժUW^ׯ/((N&(v) }u9iaPUÆсi%[0Y2|j~u"7{ǎy߻w#zѣr*jT =7-V'eF[~: fu) WAqrD։@0}%Kڷoo6׬Yӭ[sމէB,;6eAINN.:N0V*yѯWY'ap85"""66ѣn}֭EիWcǎ+W"96*Cv]k(#y7]: <N_+孶V:@v)VfǏONN^xq֭=:~x.t1uDe#npWrϳC /Fy=/ O2Q5k>3ztDdb8;;Nb[6kJD~]QO/L |Up4DQ;OY_p6lX||l.>dȐaÆEGG^g#zE*N*PUqO BgreL }X$R+us&Rbx#7֡fN8qSRR,YҮ];T핌JTѶ߱henghqs:<>B5A٬|su"9n q%$$-[s",f Ww"XOADҬg>1xր*inux"\6sU"ް-gN։)WXѽ{wX4P`l.(~ah_F|.8D8D;5uDoN> "9s̙3\gټys(b­׋4UCi3}oX??Y'‰ŚQ$:^n50 DdXv"`gP}K3ײ B8>B^_tkuT X''‰E7_Rcyx#tKlʔ)DԮ];"ڲe;-tځ- g`!:˞7 7}5@D"̈́qRi{vfpJ3=..ޟ sU 9cb?xF~xk؁u"xb:v'mg>p;4Sa@43 999xdȨH'v)6RQzPO節D:T /!gM֙7{j&Mݻy X .'6Ŝd3.rG^OD~K_9@UJp8,""iy[nL&Ǯ'=q'(ho5wO˫w#MfG8Pa ~y⸢s[mODqFCԩS';;N:^ 'J/lN~E@ S [Yg8G!q߮-s *]&DDbXp[tc)%\L]%m]xbaD?smq"q֫I 0PRϚ5'"z饗w={z=vUT i"K~zyuܐ%,P{tLA"[yKyf։{,N6lXP4 wW1'Z}$]6_-,8PNʼnŶTüO9XPU)7n]exo߾kVѩ myϰ*"1}T*"zw6nĮsQ"zl׳pP =E%V8۰5$@%xnDJ6̍X'rS7Dnݺ~i-[Yƍwuڵ:u$$$x%'0ж,*HBD%/ JD2wakHJ0J7wO5̍%NpyJ NO0#G|gYvG_/sӲ: q1Ev u - m:IX'9ax=s&f؃u(xL% ~k(ܫM{nZNlM&Z)8^?si|5&eY?u[\6{ྫྷC(YYI(QpYPL =Ȼ*$jÿIPޮkNɤaǙo9wR֡*fTOfeF;W $kH?%}z8(*?N*Ѿ56#nms9wlXr( 6ɇ駟oz2ND)C"aL&D#s%838%X:ʯK"ۖj@P.kՑkܹs~ƍZZv-**E.?&o/9:Bzw;Xo4oߣ>py~cǎMIIJ]gϞ5jTlloy)\X\?\+bj~NXg/_x+qB@)wܹ|rBgwҥz:uvt҄GĦK $.Ц2IFᬄĂ+q\Ș!$CLKN֙=7ˉh&LJ㸘ђ%K<|j(9ױ71 [2<ߤpxZG ,?oxowNn)ϟ?/ǎ[}x,^KAD_%7"|Zfd-襘 ӿX'GqS搐GqR@0icvڀLh"jo=u|8+>wYC)ܛ6mj2 C޽{d25i&k&%t$AWV :fqRqrW!Qn h"1cƃ>u)SN Xn]~ ͛۷o֭5ٳ"&""'#3R$V73]p~Mh} OU_7_%z#Ű`3|½FQQQv튉9tEEE 8aaa6l^ GDN18\MD/Jdž$7(8"r>H*6SwWL8xObt/xo U&a3sr07Α|E I$W;VAa|NDCB"_}Mwrs%\9u`F٬ "ܑi~'u" zX~ڵ>lҤI3gܽ{W3l6_zرc۶m/ǍסCo?H8JzZMD.Xt~M?CDu#rKѸn$J1̍߾:P9͘1fcg?7vx12>aDY]k.Lm:<ۘZXfkiX̙gn$Y֡SNM:fhZe˖>_޶mH$z^DDDjjʔ)LWeRL˸W m> $`XDt"udQ5foNP,?C"j׮]RRǏ?Ծ} &$$ԫWV۷o>}:..E* YǸgTSZÙAn7_X]q%Yd~qHиST]% K.G}V|%,,lc+VܹHT%PXw" ^m&5 h G#&D [q1i^0V vY[~:@Uy&5o͚5+C6m x;HޗcUR.BӽrWJD{3v,Ldڌ^t2DUQjd2??/d2"*qX)/5݃MDE_HZΛD`CZmQ,pu"* Iڎcd\յ Z^aO?^$\=:@U]|NDA QAk.d[t "#^p]OUgؘu"*S|E"zz3gVx2x{3ߪۉ4baյyfq4o)ǡk:Te)v-M8'P\)|K}îpvZsG'T8ZN`XQͱg}?汎>t^OhUK{gYJ;vdŗ;D5 2\Ϛvd*J>/;)D84l8Vq'om PVLr@v$-+5{e"0mP9Κ&-!O$b1K.8r6y=/v,N{-NƪEqYg JI`Ӽ. YpbvHg(o/_p$韝 ͸)h[bL;8P1%#y'gc H8ܿW'"pgߒ {T0tѺz!O@ӽhԱU`{"ڐ*ב: 7, "2rܴtDPP '譶D?z⎍u0#"o00%p\ȨzQӦ|;|s5 p$Ϧӷ{F*ZeD<  [kDt6샬 \~DTx|G_v LT BUt߅3ѯE@iܥe`;"6me=u9AC\L ފ]* wDǝZe}j+v{5u?I@Ӽ63Y2j0q\хk}*N x(܅33_"<@ ODN:gB #\N4.Z*GOPe;XEDΘsahԱmP'"xLuE:i&'z=ٰ`+u"C.Tw՟_X]ΘYg4<||$Y*l=wlI >q``DŽ]$ZBRFK?"bar |L$]6_HȈg|c$b[ü8gVD]$$;|Z]_lwGRiЦSH"|wzuQfaqR=h#' w* bƽLMo@D{nZpS%r12e]vJ򩰷_2da`@`P Uv:WGѦs ԘTx}gR6m=QT8LYwӌ wcqp: J}l Y4{vm3߭R4;IV:r l)(+B%%~ߧ5dbኚN޹2eͅi(x(yH"3'0[m։PUu{*r}r[CV*RNZX 'kI|kqɢjL Zd@P ULU&' ~g(5Q_"ϲ>MV~a8$UPd\rSvnp*3sP)8""k(j6D˅vqidW#&ѯyf`J*9|~+DpQQѫMԍ5ROs:1/SDu҃6Yw@p՝ZQֽY_mxP XDu st%Ӿu^ M/w/nBd(N,N~կ[;"ʋߟr#jwP ru܉itD5-=md*\x[RNfYuH+;Q#֓ B]*٨Lim;{%TSY{hGq ߝN>Y;߁P9*:GRIHkzhbZ'i+ZRYฐ/Л 5{5oǑPp*Eq[-yJ"zh^F> F֘Y]n-m`K1DTxBG_vlDU w40=hnǀ ((uxjs4Kʷi_4჈%{_V!P(܅Jx5C$ˮ$ 8PTW{e=uA=CF!vrW^)@.Tţ2|%m?__LM9?c|Mzk6hKDߤ~qru~]CǿDgrøpu"o@.Tţ2T6_]eеp,P8Ԝ*[]h@쨙4D"kbaRg>PU]@D?&MFUW ~o?{U]Md2BzФEAXsbE A^PPDPH2I&L~pQJ?=cHY󮵲cS=щyjAiʼn NM-pw2*;X[[/M7ğq`Fr̹CRo?UR;BH{,tN|t8a=uʧv"w:c] unHEX3:uM^c< ܭ]c&+>3K,T;BuV~TF~g|s$fiXC)4Q I8:gNmI XqO%+NPU>'fJ&]2RQُ(/_:FcόLFuO%pڇ]hF xn"ğJ&}J3fޛ>vWo[^P8!M2j9XȬ "G?:qBZŦ&<;IrXy]NP-a5=h;\a^[!Չ֋4~_YvV1IΔi&J/Nx^!tk $7&0_ x\x}eeԎ*H:MOQv5[*~rΰk)hs#C^ UDHeQ;yaQ4~}-e O..yD"FU) -,{-YDHM5 ;w 3\qo\25KǻL?&_Aھfo+!aٔ(c(-^NDH5'ܱkqo+2Q/]OjAu/qE፪ A O0uh Rj'BiNګX5B2g,p guތ9\Y.|+V;BHÈKzsv]9Zq-fF'Zqoc"vuHbeƯz"BX~2MKRHD! #,8ag'-^k&+ ;g5;yJG5? =gFg?Q;V2%YGh5nw7݅XNu> vw?a„f͚ C疗?]vξV\YiGc>UL6zF_;l<{s{P8s?D  T{+{Qͩut_>z(q%%%}QΝw+6lкuym۶`07^{aYqEo 1ol l*Eh,p\vM`XYS* 4;m.M7>FyP[#3ukG޲e?|p>}o6QA1bDUUU~;VQQ͛g2>wy~ urj8 ɞI5QMM|V<pqBGsq_T" W3XvMwϷh IDAT"BҥKvJQTnnŋ>tҿE߿n/Y$++ F#fΜ <kN}i,E޸a7P<Ov/_2MM%Ewl&2P$sS- 뎊U[D:nN3 .bݺuС].M7˖ݝ,54~{||{db o=l˜n_<l)gnV;+8&);IYE<!toqu޲|; /!ܜZ֭[W_}5l޼o>СCжm?=nZm6?~3u7etyadg++hT8$1Oq[9c á}|E]p+{7pDDN:UF{4v ںu]|i$=p饗~zd(cq?MMtHP\ tMMޟ)R8!= 5N $vhٴ9r0v"T9Q;?Yq~j[j0?9%(**:njAG &ͫ/~_Nt6%\+o_&r)8!y+zx$WztUP6ɚݜN9$j<ϟבe'*++y{}NIvܹvڳÇG"ZE@5θIKZ :w[pT|խGT ;?<>'uyO B+e(xX{)GqjB6ik'''WUUy?=jw)((7n-[OuvI+l>rɒ%G=ׯ) pyO𬭁6._ADNЄ~0(9Sg-8_=:&N^{RYɓF3?4 7֡d8VSm6bpŽ;nܸ1.._޽{yNmqS8YkG}QULy|IlBX V0/u>BsvIO<@ PR4K,T;5 (Ek{~~Y+Vqս{>t#ĉ~,#G\fϐoqXI7aIO+auׯLS ]9%-GEGQf']ͩZ(C 16m:s= -]w}7 ]{oOԳ!b/7Y[[{G ;?<&VM'fwOz?vBD肜q dC $i5gWWW:}_|鏜1cĉ/_~ ~m;vܜzvnjaW QY7-)L8!04L2ۤ@=yv,PDqޚ;꫹vJMMmݺ\xq||~z1f͚>}իk(5sÆ k?;vLQm9U&(8&iHO*XYxgqB:eLK;r(\68>vwӹe˖Gy$+++??r :tǎڵ:q$&P^ MFǫuݝ6;,(~woqB:LǸr$~vNt!i&mw/rAA󥥥|Avv>v+2gΜ͛V9A+G_`QywU1|[HYN IbʴqLSeFvP;:g8a(ah'~q7&4cs ^8!t~$gʳ$_|;eډйͩXܵgK 7l<vT,ᦓ\bTvt;Vv"N0i،& Vnj'B7"՜,z]7tvՎꐃu>t կCPRIxNQv,k;]70++H=K1ϙTs+GE<!T;h5eC,eϜOk7/`Bjq4̹#/wO ?"u-|tΓ,J_-j'Be1'Oml ϫ[v"ps*R GeK8;^RFy}{Gd=BX8c2TCdL~f] PYv"N͸k3?gl1yʪѳNe&@-)*R !T;K~rSP_v"wN*C7,]iEl)F{ʑ4l%s8ޙ6vWo_!T;&=osbj'B *YE,ZνRP8b7&៤]fp`o'(nP ,K:U-AknNU;ybf5](ǃe.@t~v~Vw}&knU>+~;BN7b^Ee@2}2ܟza5=3VD'm nrW׵’Վr=t/3o>GejXBj`rcq`[d/_P;[w;?XUjA!n~xcoHN]5w,zЀŽLn vR;CЌQ=}`ee/NB~cmZ #`;RS͘; \#zAG3vʾXQXD!]qucW@xΊ+B.w=hBP/k M`V^`zډP@]=`Iߖ/Q;BHWoq޶|J oS(@<`q׃tXkG8:%sXS;CGd=~k!t~U C!]ʞ'GqW9y "W,yeb\#3#&t K?]qsD)6GENԨ9UBbq׃tZg>&Y<WՍ]q*zA|S=BSO?Hby{,LDrv,Q^2uzEz%\k<+?:>;B4O~fe1>Y|Qډ/w,{D:V͔/*߽jovw=u_~΁sf !jENє"eg'N{^T25?~GqZt;H]#@J`@ w™qPfLCmR 2;vډtGē24Rԇf̂5ܻf7a7$g<jB2m/eD䫝H]?rYpŽ3_p|a+6N )ߞz?!)v"~iMc\9-{j'-ԒK3֜e{ʌYso UE*?|ż5L+LKe/̋دv"}:}θ#5լL=K_l`b2ruՊډPD֣ aND =Q+U;BH?Ą&Ʊi /vj'!M]?Z d-*B~exxa~؜^{(Rv"~ )Sq(U^hv钄0!55gY#!E`>.s ')0q8s:Nh6J!t-yi"IiډtgQ@ ⮆8ѵ 3 ^-7?jjnDXWD zt6j'BmLcYij'<5m8㘻J cP3#3O~ ҵTcS3R"=6sgډBAYLɓ2nRv"89hcqוyJBP! pK.Sy 71³Aѵ vUn_VqܵI 8Z4=HډB:Ah*q~=zO|xs*j8 Ba Fg ʿZx'(tM'iOj'BE?U@_Y;IWN]B#nP_oBʦ͑Cxv?8}'eeF#lư6X-ovuMd^=0n/Uuܙ"}ZG'把v(خ]@Q#SfNw!w,drIڋW6` &nkmy!K+x29ħP]u}/?S-kc N'.j'jpT58Y ӌe`5l,Ec|iM~poŸH80"ћR$@L=HAڡBz`9 E%I*\kʩͩ[ocWC$vikHUE{VV3!s2fP9# Uk7Xt,JD h'H v]b%T=_Xw=mbCp!tL$?98qO%=j'jpθI{60mݵ!F/yS ̏MNϚҟ};TżBڡBgl2y(d+J'&a9& `q׳FXʭVS.9R%޸ijw)虉6?)GM=0p͒'=DYR=y_XvGePu EoSӔۚ?!!ByejMՂ/t6qMҶj)++q1fLM,/2?ZvTSQCӌep,|1hMd[\ Cw,, IzyrWs̹"/)!)v(qM3R@=550ik46:[7[]$QpOc&kFwRv --ı=srO6s :x<)!tԔicg=ftډW3*w{ךMFBjM`\7$$(_L~;9wxe+^{86ljrʴqLbMuPD*)mΖMyתK +nquOjfnЏYGI/3EE9f BB0ɮgdz)J/vd^ ;j`?/l-l?9&Bnga| EϚӟ}WÿyyƾHˑ2mBKÛwH5rrsaq׿מ.iM`lW'9-tHPY{x/nq13֌zyBY3B i㸬4E_}/aډԁ+fh#=VWl#,;fQ67g8MnJ)GV ^>:/iB7nK2k \S; NwܜfSCK㢻:nbȉtDzWD\կCӹ/w]C |hvfC!YR6Y,/j'o5oUkcqo$hmV:qKxƼtxs[=>|;EXݙ9O٘s셏N̍x<(B|PfS򤇌mrAQ*Ybډ bNi^QޮEwh`|"B~=!yױqݦx"[GX_AC O4uh RNTdE6`F@ȿU(|BEw`)Xwg;mtTTm{yeqeg7|[0XyO,/[(+G3bxܥ-x|rډQm`qoDnд(΂jM& U|q KzE H/3%Eڽ'ݱs!L{tߢ?,U;Q};F@ V#{xA86Y92ݫTW^$ a̞7 GÇֳXE+Љ ek_ ΟIpiskU^uQeXu[-Lxl һn-u؄fӜ\"/>+JJB\!!48kPOo^S5ގŽ1~Y(vTH2S!Ayg}SYgtݴ_p~И;BP 5=s>Y ܜJicqotk&xW_Fqm2~";;~uD=6guDG'zt*.# !C⮻qTivEU R-zkltTT^-pϞ ۠[>K}`_`3X BO IDAT{o<BW" S]KX#MMeOE.08&# ^yvcuH2O:w!уeSߐr lbջx]  s8X騨z˜ڹP0P;ӆ?tc?xO<8zڹB`{by)@PD (MVw,d2yƀ'>R/_UqzcΝ[Q*3 "BgzYM'ܓ^]ddhTLJRLF{b, ̐Ǻ۾au =37] =(Zng ;C+ ZCKN'+}j' ~Y,HrLxkph^RE2葋KtȬ ˱J?|h.s!:svIpPR4K,T;;UqTiV_doÑ.:WroKffHEX~' _y6}kz9W%^O4v壓3g& yܓf n>i8\qG6f?߭tN y;oja"=TX=Q 2RSVֶտC-/[j !?۶H82E=ip­vQ2HrM X/fW~(^b/n\֡4cc͞{ 둚ə &zhڹB Uɣ)Y'qs* `fB|oAݽ~y+q('{-q]~՛3s|1Qs!(C󬔩c8TtOy=_vsS~v%0gtqb+:`P3'N$.H-/,(-M~qྩU;B!Sv9.Ff^39邑y^F0W=P;R Ka-?ޑ8Đ;#!s] W0:LOR;RQݶ[i ]TCAۻu{G1]ɃnZ#+V BԤi$Mu@DC<w'!n`q(<2K[ǾzH5͌P8bgw0۷ok+cq/5ZMs$bIZ eG4yQy^1+*[Kʲ3â4[^ƟPQ菕ߕFO$SLBPGGJb ꐹsu诱X.i/]cIq3E2:]<$k4#C.n֒G|< }Y%;X5WI`]Ѣ,\]K\N.QE{ *k;Ae271d۬f#v|4lV3d.)ǫ%_L^_d_",g#PfW%Rb%բ,VKڽVږbL#j41wm'B|^!_pB,󘺶':O~Yb`1MHWw엕ÂP)+Ñ_c mT@iP3Q]'{+GJO6BefyrP*oMTc&MpB!Nm;Tns$추:>4^JQWW%T$J4F:`.2ui ?UK+_D%'5I1]pek[/?UʑTc2!T1uh ݟ'p NJ;=htu$j"x{L,h{ M_g10DT%PXES ] =m-8?/y=bGeV=pr=]%Q9r8orC:glۂ0tta/D"^^j4l1]cyq7[̝ \$JR$G~DM4c:!ܐkI_ JW\z$p$$(Yv٘N8b)4v<*GG ~\} HR w!=3nNEwEQsNizAEbtw9XO`-拍\$JY^.EҔa8ˌn.M7 mgixrI@Z]!iU(ckk׸$_^-+- 2bL3Pu{BHE9ٱ_,˳HXуn O9p w'i s|R1` &2t]ACSE;&CRHP 2Qxф25:&"҈>ܳƳ4v"#+C,o+y݇,=;n`xkd`qK4}trTCrH D@6p%H]nz j),(;˄(E%%݆{X5/KG?b=-j6 ()48P=r2&{J_d~sPGW-+wX-=W9w7hfdrI*JY9$D+EVGf\mm,.ި>Y.L;䕖iEM*PU-wq=s!T4.I]R?}{nE} TG^1XCHkb3@(F ~, =)Р?K ww[eFj)"*{B>@ta43M9}Wuww8&G"G7T$5X0C6=No-#[/@k}GE?Z_/]q'EK,6mΝ;EY| o"sYf4l+_%姢Ñbt'H jj[*Gc׸y}Gv_qy[[' AH"Ϝx&ə2y4\oukN:xU3ٳgW^'M8<9&cLW_ɫ ˎD~/SOiIfj`SӠf.)k]a$C՚߼?P#lBOG^SfC]#Ͽ%Gcˑ.eh6Ʌ̓0]!<S߻tw^[d1~u$}8zfO&C`ijh9ˏ|S!~0D]m2xI^*u~,5Pnk#Nx 3)&Gt4CfzԘUu=x!c\q]r-mp3+B M;uaaEyS$ ٬7`q, P ;y~G(XBrY ^ım8i7*o*rX+yZ\4CT.ϥAv}g=6vhץs0언A}NT? ![Uk?wiO~^ IQ-8%6g,یeL$ 6[Jy%ip]pv,/4/zwWoر/3,N?Q\Em2L;BMe3ގ9oпRW#Dӯ:] 6w'>Y>  /A"O1t.6c یeboLdV [|U%ȐvlS2)skH{;v 睹 ocZY۵kem[ZG /4?cDS?>t;M3 L̉X5' Â09 Ӕe2L60L3{mn~K)U^f1vH8Olb@p]&kiCZ jk6s0X +c߫[|/(w >7߼8ZZ/ʵna(ݘ4.`]$Vu`_@Uw=$JMNh;B*.6.6"F,|A,|A(QŸ>=a:a&3ƅ_"L 11 +{+]2aW< 2T/ frm]lD"`IR)}W)D 9{(/(VE_~&ܪUu9@eY(I T 55}>i: EÇ@OPsPTwcxL E@ E+e4tNtNct[4HdS2;@eD].*wW{*n~O&-Lɶq2~j04cf1 55%phá^2" lƬ斖M-[$bh\ߩ'N%"E1U;^Uڗm(=j|~8\̿E1WGRO7&Ɓ_"s.uO_u񏶶<.e#T.wk(xf?+Klb!h4DB4EJC)"Hrס2Xc4Xc4H%fi@+G9~RѾE=4 IѴ9I(o"M'<}b@/G#VWwVyDd(c_bwmzw|Wm>W(ёiȴ}ج]˝ǍsV*$@JmK7&S3&/մJB4AyW/f;~|эlt\i!X[yO?htA (DhE!Zlo ̠%srm 4bhZ1煚VyGgT+E;K I{BFUQaTu5AH jW+[n~*zE@+ZE@ǕtsB""CeD;}=iJxUlGUlGwB{9yX28,IOәͩtu}q9>c .dv{7M?}v=ĝ#Ww'iSXf!hQV!hYV!SJQgB~{F}'(_\+x1X9<_ㅜppyK2vu5AcwM1AD,5k6V|86ާk|SpЙm{T9_b뒵m"jK[|#Vh/.s_(/u9 .MBHgvd2әҙq)6ۙNl}H~,ߞlzۯK 3eXfқR*EB*e-BI!+A`cyhUy8w~ ;3jO!6,̣]|hG[fG[,rq>ܧicZO?^V(c/-;J%y %0FM!g];ަϰNOۦmp3uo5{[//;L,CC=k]pwɓs]zGO}-TBJ!ePȠR a2$ѕSJ5 t|OD8Y 8sc܋IB¯yEu/,꣢>"!"""%2ݨj?qnyb/h.^ъܼģxGeWMgN.ITc)ИoL77b"JD#1/{.Ag"{q}L^\hSh/. a PDz! Qou0/9R'D=hn^d]Hx>zϟOmhE+8J1u~Timm=묳FQ"6l8SsWw^{|p֭9Z>!bz)R 2(ePĉ8qØy˸3^{JڴњQ?*!*crT쏊0Wn{"/t/pjEn^.^c<~HD9)%؜nlI5vE i[Q}L VT`+ʷ | eJ5 ,SWg\&>ɦOu}Tהؼ*^\0l/駟׿^|/#wqGUUՄ ^yYfUWWp 6lXpuwM])>Ώ;QXʐa)R{vDa)@Da)\F9rf|1n̈͘sc~݌9sKŋ\2RD-qMq?*[bT)5! nڐQ:w/rqFx;y9yٵc3φG󚧯YQTSk5mmn˴FQ=OhcL yfϷ|[_ |N$;ꄤl]f!ڄlUB48f\׎q~ ]+G\N(矚LcK>Y[nT,0 KKK-[N9OljjzW\FF$H*"eXʈRa)R***eD"RFԁ;Y2C17g>݌9qfŘ1/g.>9w22o IFTm qqٞ1ٖm ْ:mcys9x~;;8MSZ2LkLyvg ~=>=gj~}߫}ߧ<7ؑV*ԽRv!ڥ J!dGWXoﺬG8QiյrM/յ6V |W#DR++-%ʕ+Sٳ7-X[zu;`E(yQRJEuE2*ULT1%RƕK80,e8;31/vF渾q;cNϙ17cf-g1g66zm)HdgHȎ &e{Rψhvxl>;ٹμ6s{ts0mog^;ؘ<6qU~()̴֠3L{G=dt3L0F?θW{uWyuGytWy4W5G5/'ort+e~D"vT{>a|t^qObM+ӵ"Gd>z9gN)榟Rɒ~ؑnY _.^{>c1Ɗh`CJ(*JuV*!eDy[ɈTIJE4oDIypL*T*<_ayc:#/s˹NFřUb:74FͿD%Sww̮ ~;7ߨ`m\ژ;q[m%Ww#8<:stl?s1 `Ť1R.N~ns3%UaHgz}6f {,~ooӢ=3c9zg@p7G%---zG6uF?19d#nQ$)s\ߌR DQJ)+i(ʐ2'ښIҊm"JH!e,""swXAٳxX0"N$ny))2"uRDL1e HosI)Fm1 pub׹·Tt tԑɁ Nt-k˂ !)eHĤ"HJf(RJIŤ"CyCH ER1!I(A'u䬱=/ ޿pƇtX?6d*#IRf}X_=k=u u~߿yf{{a$匈eC?\QdRIky,%"3vMj [ϧ}DD^rۂYDJ2 Եc{7I}/H*) b Ͱ=un=]>̇J#RED`LDH2u-Vez' SꭗF {III{{{GGm`\=W_}WidcV0 G"0tH~ϭ*6*z>-ysRm Ӄ}Ebכ0 1FDRQ,\k96B>xFe92_9vy$Q$5 RJeԁF0qWedqFCJ3*Ǚ J(CV!3B~k#ݐFp߱cGwfqtQcc+ |Quuu:Ґy6uF J"z7{%sQ]r:~ρX,?.]{ ꪫ`=/YO=>5k .hfS'>䓱cv2 #//綾^y'|oKZ`4;nڴ;ﬨ.**Zdɖ-[N:餜%X'A3|n @k,,,,,,,,,,@>_W9Z_|Q__os|Ammm# zhf9n =Gx<s\#zt2G ђ1SLsO?-wˇynD]]]ssHrѪq[9{n7pC.̔RX.Oٳg_p#zw;3ҭON0a#zXmV+Vp:/гjw @pw @pw @pw @pw @pw @p}1}nĹ;M:ujIIHrjĉv}[9aՖ)F p*`````````>Be˖M

7#U__jkkTD6;۷o̘1K,Y~ 7}ّH$7M~ y=9>mٲ%W~[n݊+o>ЊVR/U>o<"裏rTA6_щ'ث|>"ڷo6dHV?쳳gϾ;0htoID ,4ggq?CVlz6HWYL&08wqǖ.t*QJ@&"6mZr9GuadA>}7iҤ^_|E8&'e[a _ҥKv?+ʦskkkk_ZZMϞqD߿,d2֭#O?= )/////7o{wI9ҡo1O]wݵgϞ￿L QIIoqVVV>sR\,=.h4z%{h>+w1mڴo1-JBp-Mt.  uF dٳ~_|k_[lYZM}ʕ_|omF(Z~M7tR,OZh͛9wI'^z6lu$XUhaSL:E]CAR+VL>}͚5?sڦ€dӳXlҥ 6-wɦs_~ .ؾ}{8F/r~~;#푕ڵk~m"r\ӧODm۶_|Q)ðJBp- utt*>fDdStݻop mmm5 ySa@ٻ{y=\suƌk֬9s&t.ZhDooap$U.\DVXDv׿~rl5D)h (IDATqE6uadA+WZc{Б Zv??9s|wynQSa@ݳ{!>̼އiƌD̻=PZGjk>:nܸ^6|Ȝ#b=-|;3gN^^^2ܶm7UhQYYI]g,;wn0Ȧ-Z$馛zE:wG1+vDTUUի s:۷K)>ra=rJ"ug?Y$QJ%w{ n(>)16}t]׉(//o۶m=fNúQF:7jUUU#S6m/M6gϞcǚ+i ^oѓYw}ۗ^zi:GGpnQpӦMwygEEEuuuQQђ%KlrI'. uP]]b U(MVTTl۶;4iR2<ϟ~(Y6=oڴi…3gΌF&Mf͚իW VgspQ#```````````````````0^|Eؒ%KF!``pL?>cl׮][lˋ /$c?agy1v, ?я̙˿oJ~}p y7Z*LB7|sٲeַbXȾnݺ^c-^,yG.7x8 ~|ꩧ>c7nt:MMM֭Kn6)e !~{YYƍ`(o~t:zZ{nIII4}뭷zDTYY9~x"jhhLD/#H]]]4}vZUUMS… kkk/^z)O,!Rʷ~4|]b=cPp4M+hժU=W\ID]wyWJ]x7tf3+.Z[m߾=򗿬袋~ߖ[xL;1+ s*4m֬Y{쩩+[ZZ2e9$goܸqܹ5O+x<9s"RJnݺoy[sN"zG/_~[ &dB;1dر޽{>~( 78W^yO>k<믿o޼95kĉw=Цb٪w}pϏD"} kTcHnDTZZJDuuu[v?͜-j*s@>7_~K.y.y0ܰaٳԶۺQMM:y U,#ǐ>WҜ2uԾy k5Ƙz!"o~s5a=ѫ/ywԩ---۷o8qbrTuu5Mmٲeu]gn{(x%K-2Oz!7x}f͚&yy>k֬ɓ'755AѽKDկ\.󋋋"o9ss9RO4㏯ؽ{O?i~^*,,~WVVVRR.6; 2o޼>`ѢE~򩧞ZvOuآEpfz?o1w@ N+++xw}9ދk֬YlԩSCu]yfs /O?馛8X,?o޼7|s'K ϟzUV͟?w @pw @p `qw @pw @pw @pw @pw @pw @pw @pw @pw @pw @pw @pw @pw @pw @pn8z2۬IENDB`metafor/man/figures/structs2.png0000644000176200001440000006641013200642117016430 0ustar liggesusersPNG  IHDR rkgAMA|Q pHYs''$6tEXtSoftwareMicrosoft Office5qlIDATx+LDh4H4w46x"qGh4F"#mP2<{k-}t0"00000000000000Wl+I}㈜a>Q ~< P_YGK^e;x#>nte}t{EXEy:|Pv;}:a >n#hɧV?R p/gz> rZdH_y'2\VܝX9rOVTs;4GinlO(9{\$wNkZs})KjdwKfX A]YE( f\>:kT[_&᥇Ü0U̻O:'Ocrħ}Tng2K/$#?<+czO8)GophpQ FRfQLd?4!Z@gctoZ??a &/tP;tHH?ئdN3>F4>M,d?$x7ꌻf;̰_?+%/3EHJ LW ޡr}42r%Rÿ+w{ZO1xA/ϲ @[? Ss̽w\!8~$ U}:( uHw-*=`;u,p˄i+3P{lTW[7z|OIݽQO1?}fIvw,J],+f>QHe*/FڮOEde X_)˄O-h:P4sgaԟpq5/ypOd^eE %Y(沺>Qdkg$B,NkLe"d>-2c#89ɳ) l~Z;.,kcH⠹I<D'ԇ_Yp6T^6ï{qvW/:׏ @M)º~>󊤮"A?㪺wɺ~3yLE_O]<1Q0I2\/61onp\!و {brp,QkLJߤk`&/I7ߍcmb'ObPHO_pDXd =zS]/5bcsvt+:LN ˸K%2,V. (& WSno2!LojGAbOwx;VLӽIt]FGQ_Ro@?la_ogSq?ֳw-EZMyx !9n$ LY/'/G_Obkn*E}i%I' W>|QujF4\NjJ%@xgRd>B؏R;K?b>q $/4/߈uǏ(ZNf>OQL`EV뵨wqMS2Uy;>mnM)CHW4d\ `[*=4Go~dARffÀ w wL-VpOyy{NIX+dhp\}Q'&ńg^]TN»>˃^! mi_W,)_#Y_u}ny,ah2`|?mY##@me}J8&rz:|3ٞ&i>kelʸ$u;z-R㨝-^Zʍu98mslԤe35d>;W]?ۜ +;6W(Y7vAxgh1d z}=oF*<?E )6dh,ַt^At TA60`rW`/:B WRRO@pkz~ ~[?O&%h~J-IbmqS t9MJf9+OEܨެaϐHhWj awo|_LH%HnK.?|! e9.EWwd@H2y3K}]K-OH}|MvϏ @& Q!=<{_&K:wf#6Vh2JU.oLV_9I$g{|ʻd' ’7/;MfI>r_S3vy??zIb Tm|=byTo8v&;qL.ʉ:v]|>Y| ދc|3gk)?7O[cU+C]/Ϯx~៼2o/V_G7޽9F| &e@kҲ(z"Cnlc[k3? aUml9xا>7?ZFv/-CNJF3-piY YXMeZi֟s8t5cCm#ηfw6D3_7؂/*12sG]oVyT#ʝpJhƿt޽/W_\Z[>ltm2L.~ VWǔ=|BG-MOA#]>Gq:ٙ⏯z*;G?xU(!q?5TG77{x|]EA¬g2@@GȺ,bp[rw?Q?2׼g{Ah&?:Pf2iI?YNfMa\ ٺ~J~\0qnY3>ƷRNV@VsuULyQ@GoM"_.⏯̿Dѕz&@stI6?L}$+ڮKqIbnzl !]ly#ZG%{T&5ϡ՟KV~~3Z(956Ne΂n︙8:e_Dx|J?{@Ec% M*Ng46a'“%i2!0=Uw,a(g>_u|լ6s`v~zDq`-fi]^y3>֟Wp1/~^-cC-G~q?`@?@p& J0&k[ HBP?@opB϶Ĥ2&GO5@'PYCyAA7ɼ˲6ЖJ1Z+j] E8Z`uT K C tJ$sE O{R m"5PЉ?=@ln^!+l,g <IP CmcTV$%iRN7&b׃33QŦq}k?tM/罎M+TTcc AQMGˏc}v״B!'sMHeZ=Ҵnݫ,K:ؤi6'RR ҕ bwƨ_b|EBQV(1?uƠdBwَW+A߿ePahdwM+Tt"c蟂h}àdAfdW+Aӈ"Lnz]MqBhqjv״BE'e+wNBP!7qb)|]Pe7YIS tm AlfEa#D2t\ kZB0 91l gt,X;Uj'[o[O݈I6߯ H b̌b.s5iNMO:1(NJ0&YВ]2Ikf,Sls0~!$nX@k3Q$z†q] %z;a\?Q7`Zv 7^*9-:gC&ITk*~KfHD_r5iNsf~N]O_ʅa6\ZU2LY%V5dRV^9^~HQ$ bOj] %VVbs7;}fMǎz9ÔwJlM7aRV,n8_'ҵ(PjwM+TpBډACmv7֊|?q nn¤6ܺ)sNkQ$ b/?tv״k'sbko#cMcoV.VBmPݤm'ʱf*{E8 R:Z5ډGϱY#cpT2?vnRB褷L6 ߯cZ9q8Au\ kZѹ9׎=Aaa34N]**n-(q T7)!tNeM+{k(_r5i N&ܹ؊G.&٭ɳW0Z]QݤЙ7v״B{`pWd"e[ ITIi7RVV"AN{xsoڡ5P)QŎӏNU_'Xìgh &f6Io\r5LJ T~JQ$ b?C+ ԽNTgLϯ7d=~/?CVǤ5l,uNk(g_r5ib?VZ_ m ׸[ [4FsrK:U# 8I_]ljEl÷ĩD;'IheB̙K'':uH~]Gհ_l5o Mr$9yJ(,oΦ42׼qF<pS[{j4{?y|8j\Ϥ*ݯKZ+q8AFuj] _a`!'Z^D<"-b/ٯ rJ8U#;IoMez-OuUO4)%t.A^wXP]9 kw~k6Q&{B1,vVj4/ߗzo|Ls$F~B}'s݄:oSNՈG>4zz̊@|uߝ'k:Z<X(UP]({^M>:vJA|܊Q'5$!M=fwUM kC NhCcћOe㲾)]zǟ}hyɌߜF|UwF~B's syj+i}wUl'wnΩQع=|W^>g} .4f5'ʌ %NPtfvg8K.Z bzuC+~%_S~k./8:ܑCv?TӘSţs-z0dkw7SB/8ҜIEt4Bg%̈́zsg nyjcQT5߹G]фrF7Qo D[,|~o&W lNy,Y"Ɯ.Ekу!;_wXZF al~c&ybfVn DU]aHuTV]9 ,F9lyJ2A܈g=fnv*_'to+ % } :GaZ`]esh6]7f)5ít²xs]WiUYMuߝԆKצho;#Ղxk/ևM$#{..jW LS]kY0j8Y< ;עCv UmZ==`3n~dI-lJJғƳƨE5(zq%q^;;e--JN.jW WieGaRSrS!?kT ~~՜wmku}aG䷜eY?䱥I^ot~Bc&^;>L|17[pɿUR~5,]A|X b ?\?Q;T"֒m$F|uX+{<$84>8uY{7bw\7Zy|%ߓP ؏.o7f?K-hST)bבʿ=.)kV6Ee;NX|X!g |h6Z0*9ͮSD"9݅T987[A~9JiպSWF4 lkYΜF+9XZ:_lEʀ[q~#^yFn̤u& 1!:;pVZ!]rWӬtm*Q|3IQX0Wy&ψ&G_.v?#)3F*|uXOgw=_GGN7NVu*p:!C+ eբRfW1i-AIۘ[#K6ǒK rUrB_?(cuVE:ޓF /\7ژf"u"GߧIdIV]VcZ rXV^ *瘺'A,>_Eo‚9I*e]]HKҍOST.U}M9/ o -~2t.b"uL'ٷN^M,P!ӗ nwNk6QT4A+JkɖM1E-ljiv6R䄶?PFvTr(l$]Zة'>^*KS8W6| ̧W=бnEʀ[:jLZH{7^3UIξM(*DQwfO>Lfoe/HҮ..lW m) pOdt㥂}( ;-|P.7=*wF-ԄMK=@r9w>+xk:QT4q3d~{K{#4&ؙN.jW m%!QXHaQSKI( ;-Xnzלy2F"tV/&% su(E*0Al %u0xbc9UF|VLiSHg>N}4Ϩ3/ ;}X@O_scTO1I6XeԈ"A$INT nZbK rrB\YBnw; ;GΜk񍾽t~t9_wyڣI+hZ$evT? &QDwvp{!o$K rrB_~I/O:_XjLˣƖ״S.}u\o/UW 4CTUMeoqWC "AL_L{Y?kܑ..lW }4{ۘ:_,5ogkfFԇKRεF_zrz߼Wd=EbTӿ$[~VGH b fn_#٫Hvwv0R.g-RG ót+[sv+s-˧w⠧}}6鹾6TI~-!EHShO/\+Q$Hӗ՝%T7j9:ڜTKI~"MZ:_{^ mB˨ uLf]jG IBvBg檜eT6j9oOuA⩧smv.Ηz8ttҋO]~Q Uw82s&=]qΊyG "À(:`]S#us^is4sF%8*z湱! J1s /qsizW{g=$P6_1 p5R7'zk6Ƶ3VE,׊9מ89qVͶoa*7H "AlU 9\#ush茨h"2؊9מigm>f[=W26l" 'pPc$uOTssaSfDu8XK #s~)@Ea8ADWӿDzemYQA$oAWd0rN |g, "wpcAP?e;\HP?e;ppA$(2\E b+ʁ @"%@P5"D8r(3r(3r(3r(3r(3r(3r(3r(3r(3r(3r(3r(3rg;>ʀP{sbb PAwG*p9_:Am-k!>-LtQ6=JVB=L#(*z nZO+Pƪ#PO!_ 6?ī8sUE/Gk^73QQ`CJPϏjmXUrոշ=t *A)K,p"V96JJuTo@w_^OqM?5W\ҙR/ד]M?jZ9_s6ڎ l+!w?*r l+!~(iUٳ^(`CC_A'K?~Դq' < l+}TӏV3B_Xyg.Dw/%8bjJZr_}P0!'E98S~!YVd~Դ ^#d k!K2 4D- 'T]ї~H5iGk 6vLmDJ(E͆z}_*K?s _61GobV-,qn`CGM>\?oqu?.=X|׆YlxouD'ڴU.Gk_b=6qu?NA0qQFF-nn,_'8ADwP.~(izr4놃 6xA0M\Pm̎e>}" [rp 7 +\P(ɴt\~}5*p9_Gh܃\7Tns2jdY< ޯc N_!UE.GkHp߾t 7hޞ\ŌZ7^R#B!1ͨyl5 ?_Lg;Oϴ~n(ˏ䕋)B0'eԿJQ/'9PpkV]2B5Z7Q-?:j0) ן(2S.n gّUwiTKo&)5ڬqB{gMly*wXRGarj.-DKoϿ.&ۑlAY޵*מp׻ CĒgtb8=.THA(w ;I z]ƠgSaU9Ijp(l^_M9"O2%~1RXZ!J8[}7=BЀ\d&@iXLZa1ѕli~6< R^e.k Garj2YQ?[;6H׶ -^K],v9j{ V}#-Y?L5\ {?KabJt=%!q[ԜbL%giAI`+f-KBb.n}s6j8qAko(8lMo[Dl܋uhл`j11SJ{L/ܔտJQovv{2XL1\i(񩯄ͼ 6?e _*NZŘ.mYE^wYԞnU?ؙ E.ao)g^W,3\(l7^_INѺRM 9dv% l6좆ͼ X{/fr\{'tݎFs'䰬b*瘚ۖ__33_ĉj[c3ngyA+% %LP#Sy.;j:r]!zCYQ etvzGigSg֍wP>T~l AloUͼ`ÜW%? Wp ~7}#s _٥/m[EGyX K\em}WuCXjOŅdӛMhnuqk `,v;\Ks{?SQ=@ֿ*Qo,^Enyu-coDb] ˏ_iM+]mUm}Wm~apQXDMܺqʏs\K ,ٚ_̲{B ND?ZFVS{-Y vɝּ6[l8]7ӿj(;Ry1߷fUEx ִ6:Z]!ݘA6u+/ ב&:[a+N &3Tyg яeoMcC'?,B F!t؊b_6ITTw@O4{bz(|_\; * XuD3'eŹm~'«\ljkݐ#_?SiO|F| `ޗk^׎:[ <Dy(>J̭;2r`@zu}Uu^rޟ1P8:h?%,Y<%zm[&e8b0bc!oCUgf̆* cFuL蟋@SF,3,8 7㤸-#ƿw[dHUwqp87ܴ~ >Ae{hf4unZYxߜO+پrW`DxbW ?~3.J34/ A,PpW5v6r ,+y oGm9lx-Z2/Dgt^.>2iߥC:xC_׋f4t$3] s}Vvr}]@pCssżn\:sWLb%CӿMcS9 YʀQ H@Eݕ7?EgGb ?@9Ї/GP?e?e@PAsA ?@9ЇeE}"?W#(2~=G( >A@m/(P?ea_1 ?Dr( W+8$r(3r(3r(3r(3r(3r(3r(3r(3r(3r(3(Pyg7x?1_%];Jà@w8) kÉk5 A'gY|_P=]o^SDY;ĽDOFԐgžB@wP=}-AC9v saO @wPyC z@ a\\2k1(žۉ a\E@薬sv ڤIT*n@q]Zы}0R~:l j7t@~%"cY德ۊw_čN{*'JA ~cw4hg>I6]50DBe;f&͸(vHr]5%0DA"xǫ|^6.0uA?A#!2Eq舋2*4hg&G_ ? 80f\Je[1q꩜ȯC.22{pcP뤟r_ ]{_ R2۷QUe)Y^*8Ft|~h?Z9M_P<7_{5H+\(ɴ ClۈhqI0D?&uF&/_ zYONjη8Z Ӑ?\'\@9)r2t/mVn..{??FR^^D * 9bK'N~[҄F.֒7\*alz{p=3{XI!?q29JWKUgࣿBCkyk_p-ٳ%?vʸ\'\U@йf퓋i7{KiCtM;|<~;U'ّo/?t7i>멃SVq)rpU&U(%-RCCz46aZ&ٺs\1i/?tGg^٫]{xB2V-GϪVlnS:NzX Ӓ{[!el̠dbf]ԉX|iBCg[[O|*O~}hɛs7 \'\@ð qҒ_''ag]f ? ]$I~3aLyAWNuvج:..? 5GZrK#4>-],u??rq,Ͷ'& #2wpak]s1%ĹCC|E~O6 }gEC58-03_Mmɟtpa ?_̬dQč7M0g(0?0BCyڛbI>dqX#IKޖkl0Xn̬.d/" >y %{>xB<65@,@';5rBP bRr{ތxcٝ )1ʬ}&Bl49#$;Y,VfÅ"f.&%Q2hG}^١cbnJ Z]~$VL@l%=kirB@Y?6bRr'=Pνa48(=jv&%cF g)dr$޹4ͅueYj7?|mep\<EwkNFs9%,)WY]PvrGCW_ɮ>ᒀu~{#zH0Xh?ew?ț.K܍tW<Yt{ 4q+uR-^Dq[wq!~h i1& ژtKl/?tH w:Õ(iɻ|^(,2ʹKKp10t ݿKYWvqdag6F3ҝyۈg(+pC]~$~%}|-:ÕʛYSG|>59>BY;?~7PEIc{p:LLJn[ޮԿwblKO$!@CilN%dBCxR\3\)HcA6>?qwGd;ü#&g!r52gK*ܾ7┭,uS|kxX)32D{5ǘ{ژ˹9St+Eh[ؽWe{k+{!s/K>\33E*e4ILۧ$6+~g/6^ P?|`n>g6B@rUp'Y#jĹ_$[?r0d?Q_pt:=gI=J <"S;7gDZF'J+$jMFmΙL)"}?]vVh6)a;RD] ,"+ɬ'#3ZO?Dv7/)֮w+,w)?bMkկq 5ˀ]hm.n2ѥr})>D pp< i؆_7Z?#?bFqrv &Ee dVŰ$Eeq^kn}T ]sVgtD1O/"os9(fr׮G \\f,齖uz6CPk oN95I# W03Ŷkqٞfv|2?@7yfGW1prؙP$MJX9scK~8w7}#s:ÕⱑifnKˮcrK:~Ԗ(u/pQ+ٱIX\H?lt>!>pz)j ͘$購oYDӜ WWK~w{%iy,.9mFz1=T`ؠ@w0ZPcoo? Qq ?{3-=Aԟ &ae-ħe;Aa/Exx+?9Lk9? {9foTwx+?|d8a>5? uR?2ytn9? ]꜊.h^߶:uʀ7Hu^ŋ:`&; Ǿּ`?llPpC@ax>8kWr?S q04Y=YЀ}k@{04DScWp?j5ze Ts J652,.@ðו;̧PpC=1^waP[O5S{O2 k!K2HF0$w8V?1 r >^3S0  2z՘أxt? рGMոQÈjpjD/|*q?NA? Pf*n^ 2xoFYэPS p 'b{F_orcsʁP`ovD@}Awt?5Y;%>P hjO2%@CpgryegÓMs!b4m?LZtx?W15Os_iPu+vN )T}$(~gG؄]ky?Yo`kcns' )T}\_6-{V=s]otg lZұGCiGti2/=jGC \7,A@uQiEL;t|?T&ʠ{^!kNS3G^w*V?Z)bY*+E2z?m!Nٴve g༾'40j/?@Ljsuh丛 IY0m/?@]~%}?Hh4(?~T?05Z~ K4q@Zyw]ي`/?@L3),zU$)Ʋ_ߋt~Ha ugn{OAtV{t >ڣ&12.Rm@χv)WmK oG9?K?tYQX?8`?@wᑙ4+b%Gq++Ύ?@,^W{zC@8)T?@9r~:Kӿr6#N1>?f4mYOӿR;@i9e|tBb㺭%xE@fvwf#i#Nw(y$:,ܺU̎?PJ֝x1=eDd5tbʭTc*( :C Uε, ;tb!i'쾭A5Fhg'!ggD@%@h'v}D@̠V[]WQ[a6^'?4 z?oo|B@à P?4 @a&QEU.A@4lf(.[3TV`IENDB`metafor/man/figures/selmodel-preston-prec.pdf0000644000176200001440000013065113750017506021056 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102160125) /ModDate (D:20201102160125) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 41431 /Filter /FlateDecode >> stream x͎5Mv7@2#lmbd &]$-Sn߹zVV&E{ZuNDFFgg_oo׼Vu]_J_ۿ?]_ׯV?{__~]_[rjWUvY%7*!!U%Ӫ:n9$'2گ=$;rW[r"3{K.%ĪbvIbUSV[Ѹ%V2ꯓɪ3&꯱$dX8t{تlI8)ߝ*֔ܒ:KVdUVy12_gJra}%{ⴇCK^ȰS9%{V.y5ɴkMV8Sq>]V[_itrKc Y8 )dɟNYM[v9$dˏd5KVCٲƖyRn[ʼ}B_INɼrua ]{ֱң.&~\:t]>7qu.wqݔk߸C?eop9]~K~!_[֕zo ~in@ߧC =~1J7k WC=)U~a>OBoi]ɯj ~EO)]~ᆱzKWG@)~>Lݥ{A__K~MSQ&뭻5=}9J uo#P˺n\ȯ*>ݥN멶k9)rmsݺK ~mi,uoZS~3.{zE׹>n=u-U|`>}>B Wv[Z𫾿o=5~|)t]:/M~#k)]c~"~͟˯S~?D}\_n5'6a'~wz 8߷zp=3F<^<BwMLB_f?&FH[_|zK&?N'~W4 ݥ5:.#2ٴ[|L9:~h[+}B[RBocFۭïok:ǠF-Һ^ Q -<_uu{`L/JiݿLz>k ]?|wt?{}[_t?o0>::SNtN#?:|[ߵO<cZYituqktU''upztxzf }B/z5bB3WuN^M~Me>aw_fu2G/a~{M %5_ז{ӯ~ }O\BW]zqazz޺_BOB ? y/B_|*.{M~,@NQt޿|7fAX]~CltM=_.H_cI8t1 }:Y%u>zI|W }i(jTt\ެפs:x^+x*&t1~x~ͿW]~ſW.'pT|~|C~+zK~g~`c8b`]zԖڋGBoïH_~~w?u?&z%uu>Eȯr}:7W<~{LLn]=~^zIW~iSϏ WLB=BНD7ƣ 'qe>qMZWE8|!t+Ņ~mKZ+&}.$Ű¿c}4tu=.ҹ~:<^:\c7V]<n=KC/^E"+{}cЋ-nЬ>Һ~!x? u~F_g0GZϏW5~EȊ4{t]X_1meHtχX(ЬN?b.:v ;bH=G.ŴS'^s>WX}xqs?xsh:7W|Ftn/lc=#s64~\ eBG,BN_)!nО_% ׍*ֳ~2Э{hֿBoi}{Һ%?Eq*ȏxKS<ߎ44O=BOi'cY[ϭ 5z})5'[wa=pM+Bg@l }EfzJ/t7ӏ[ɯʏq)=M~E_|9],LTkïB%:zKO} ]7:ƳXHBwZ7> 1Uz4~'.ݥDžyO 9K:O*f*'tt9+|W|?4 iU3bsȓnݥ/?>ϖ_{ +uxB`ln.=tΧ6&c8\hE~O¤tӏXȬ=s~ ݥFo..? BdzIz֋Vhݯuȯy W5Һ^_|:ϒ_Ci[/u}DF9Y2e ‘w6]o"+ӡ4_ү~NBխ}\Z8_YWEOiݏ~IA.}UtM=b#Jҹ [~UDm /YwŨx};t^ u\ /zE~s.[:gRبIz]zZoi]o񠓮SZ3DѶzKwOƇ>W.lTIz _Һ{^x׉_YubnI9q hβ􏉐tG\hw&Ru2Ae}?]ȯkK/|K:^!/&zf?( ݸ>*S:狱}I?j:5/byb#e.UgҺ?*BrsXiϣxF STJFဴU:?:c8 Ɨ˖Vx).?ƯoO{ktW^Ǐ[e%[v+X(!5X% q^BP%)Jg]RNˑrX*EVRV:=aU,euYk QDRn˕O%E^ˊRV&DNJRj/tfaJ=BnVhW;hQ:3)u97:2aŻQRڎMΔNJ3Su VzîEWdn9R6+>sl 9+M/Lt㰤\BJw.vZu+dZK|y#GJMZO>cvyt,D霬%+$+RVNV eUVD!>FNND*girRVC*6L9RV]waRVm!ed %"S)sm ?s[rI̔IO(UnOV )BJG#fʒ)+CHYUTVU?XbwItauեoqO:|Չ}Չ:EoQ$+RV9B`YeyIԡ+]HQ#YC ^j)+XV9M?:YV"eU6RV:/y!euS-tja\܃5q߆dE޳CmtɎyRǎdGJWlԟ7 )+Wb5RVt.-N-e~UҽUsqJCAX@J#CTKk + ~'9*T+[%lB!YxСIN?˜rc!~ް + Pa~˕~X>)+] }b56RVCYY +ݰAݍ|=a$4mjB{.Õ3rλ!eӀ{$ RVEvERV+]W )U-HY2 "UHY骻_]eU RVJXQ>&V&*Bbu! d"د&4loe33OL"d:o-.yaӼdyrȜd:j )"'+]a/!envVCby[]HYy4V50ͭ1'VzZͅ[bUXmhS 3d6FJ] /mrܼ**M'zoU=(\wZYb7ɟcG_?,Ϗ??ϑ??ϑϟ?yg?88ǿXWI)*ן?ӯo_$>; )'~#~{xQS%ؿ#g}ǺRtYP֞#~L?&.O8Y sG_Oex?zߎ{'v>mqד~;Q_O u͑ߡ$gNO{rUr'׿~?ïr-ܓ<%_/?6У>1`@I4si:Gd@?Ӑ@Z"}=`(7Y=:eOzt Cs?=zS)УPјѦ.+=KzG6.=*zԩ0R;=KG克@=$УtQ~=ªPcG$@k q@r4衔zb]Iz^g=cP9=15IϜԜd*#f퀋\9@.Ӫʊ zĒxN$ aXTGll ~z{ʜ ?wq\Χ=x葀\x*#WG}fz{AYzT$Lz$`UGK|dGKH&ޏ cFGJ~ =H$WjQ="#$@I I?I@wiAR5$p`ǒlzpJ@t)#І;#=B~QGG{( ȝ+#s?志k/x4Cܔɜ=r=JHQ79? WGh==1 ($Уہi{GZzTGhG"kZG?O<4@Qp5RG>葷@/"˯8pc۔@:zIWGDҚ_+#z'@P cantwǩ5VG $ zY""?_*D+#= _%罊o1vCZ7zSGF^>Z~+УnہՊz=BV]N ^9mM^E=B SGq!1@ӠӤUC*@v `Wj韁9m!H=By?_z4{1 %$'**У'0$oN@%^YHQGNۭ:/@|MPFKzkrG`jHQy@7W>@+#t}z|;У< _+HIZח=B PVGFHŁKz$𤀋 ]*zhow-Mzk>Zׇ=ߔpVG;H VG gQVR =|*# `Yہ L)p"k`@6:׃=B UGT`@h&oc=@\f@X@\i%Mɔlz_DK~bWzP@X"#/\rǒx@rGVGYu+#tsǐ&@z$5KZ=rǐWG)_EB/wG\= / e hmvi(#@b [^z2iEi+#W|5WG,TGh (}z'Dz<@lL=zMG*qQϲ(;(@qOG)xG⿟@z\0@ˁQz\ ?&p59zs z\;bQ@ˁ8z\:Á00@ˁ1z\T"r^t=fbe~p@7:j| 0p@q?1z\8jX+i"CUOE =.qU(P@:0د:?7y жq9@ `ׁ=. q~@ zs*@ 88=.z\!@=.NqѐqQ@@qz\W82`Lu9Тد8?hz\q=9԰qxXqC'C;N.Qg<NaC71ǔR<ׄxgF(<!YrL|M~َe7gP*lgk+Jr2* @n?ԎT%8Dg9#;v٦qLo+,X=M"+BY}7V"cx8z}8,B:FFy";2㐰LBa}{axdt35!LNB7`q&9 G0Ǚд8X3B2ժPC&YX]t*!5q=# $qNqNQI(X \W8Q !Tǡp 38V8lГ=8ToHxPo' #\h[7}%o!xQnb-8)!}(tÙdnf0DnW6dE^E;CV핶q([#l8K@Y;aüC&r61lL%eFa EigElZan#k#8+Vf5Lq]H5N ڐtjy%V85A|;T4gtT+RD,ǁ( 8 ifi84 i8VQaa$7a FTΑR4{hgN)Cì-v4O4gaû>s8\8t^ 9gf%6T&'J8y 8 +=a xq|! +6Vz(,PJV `2H8AEN)ܒ0LJ)(qAF+8NQBơSP>ab1c-&W 2d5_*~1܏b('Acgtl{p:h#@0Fqną bv0ep2m8|c8` 691` xAA1 /:hcP` -` G91.d(9Lc8` B1.Us2dg t!rEj:BVޑ1$ YMd` Yw0!+?&V ƀv03gv0 `?T;r>12J0ƥ1qAa` a:` bqA?c'%8?F ƸA1K00hN0弊q +%4>13` xtci`}&aONVRV"B0C+cSzB0f`}#mu!eV*@0Y 1V0+@"c`cl xycl xHzclZ xzclʞĺ` x{cl'1 _z16cPUcl6x}clJS جdH0+cu,+c5E0>HX& & P0>(XUdJ1clv6U`'9@*1 cl (cY 0Hul +'aJ ؔD((x14|=J,x1ƸUEb ch#c;CN; icls(clFB1@0Kb9mUXC.Aʊ䏅M1^re`vp6:) c8e`G[1@03`lXi@& c`JiV"[N[$Vs1^RV3c~ch19^O0xzE8Cυ'c 1?1g0#eAȄ12'gH'1UEOᜌx1xz1Hp0#;Hcuc( ư'I' c}|zTf Ryx0cڤ2ԀI򄱃T,EB*TvIH(dn <!cc΅TTv. Ry <!;?HT,C*w"!;1 ; T Rܘ@*7@*7"+TnC*7TNP49x׀s-hN< `)|'r9H-#R97 y'[e˻H/9N${rPrL,rXhrxȊVʽ|&R9{$/4ZE*eR9zܽ{/R9/a"p9H[24|L*Z{T>0&πDT>y+R2sL*;ͤ>&E*{4MB*I>~cO+aH]! wRyT^ǟO&E*/ZT^RylRyT^͟Wb1LA*OsM*OY&[B*?d*CB*?d*l&E*O!,&ZYrwC'ܜ7@ߘP@r.4{r]"ZT=c"ceL#|H|+nݭE*NPCvuTi+RUroda:"c+M׻H܇'Ms;rn2R9w+:_<ss5Lï~u}ʱOR9w_rn*R97'>qEGrlh$D*$TwV"cR8Wsϕͭ ʏHs('RR91tT}L˭E*G)Q8Brttt.E*gHeeT>GK)L*y{7f17L*jXաICeIe/1TI !7&4i-RynRyw"7&CT& eX:\kɗt1~kxT^RyQ`Ry9RyQlRy5"m&W̄T^T;T~ZC*O*OM*?"Vf&INH g$g"R9L&Đ(Tۤr@K'7r1$ zʙ 8IlEdH_ʡ|I "s#4 Պ\r̯R8I(W^_]~dp_ʡ!T@ߟ&s Xl<44·HVV t2A+@Q-"CWWM# \IVʵy mRr5@ZTNE*x/RE R9(S~Կ@*Pym ^6!M*oc&.H=MT޴3M^A*oUʻC@*oL*;ɤnʻHsʻ"wE*oL*oM*/6/M*/L*/M*/T^$H { P&4),Ry2M*/ZnTv2IeRRyT^,Ry0HeRRy"[gC*/&'H&IGH6II9?sAC*O&Tn5 bRQ`RTnn V&2,Ry3??ϙa_3,1ϙQz~:Lqs&u,+2\$GYd9HփΤ-?~bZq β(C]Z 03cX)UP",t&u3}&:˸gJLMF/F^M^|_hLF8j2_lvsE?vfdCFO}__d4&2,P2Mi\d4e&Y5&hj,MFOx(2z2L? l&4d4&dfv?vh_MFc2!ƪwX&dCFF;hMFf2SaXdtAFws"aѝa2x1ў@FSa2}!hMFh2ړhx@F;29f]dt7 .2ͦNdTvds< hx@F7# +zdte(= ~PhY,@F;2>hx@F\l29&d qf`%2U3\2 2,[Ѡ&IR7] fsd4LF3u5M,f^k2B*AFS}BFj![$ѧ¨>lAF v*2(2՘ѧ>AF )2TOч6da2T0LчY8da2`I8 SL)FsP}ؾ>4w} O1)浓>dBFþXdH2G}X‚>TAFbR8wV}߂eZVDF@FJ = Xh@Fv!$2P3} ֐@2P}.Iч2 dc= Xhz@F;2k}h}.Lюvdc= ,2pXhz@F 2 z~b=DF'Cd!2 z~b=DF?"XOC1d!2}x~b=DF;Ed!2z~b=DF?"Xhz@F;2ڱюvdc= Xhz@F;2ڱnjhz@F;2ڱюvdc= hz@F;2\&Xhz@F;2ڱюvdc= Xhz@F;2ڱю>d$ -+>cՐX$V + z$}/=_ddIù}pvgPhz8mV?=wIp}ù?=ùܢs=]$==tOhpnTs&ùs&ٕxqzܓY=GsïNz8=kWg|?='hWƙver||{AF".$YdCCF7х.c2\&EF_dh1їW=E!]a2rrd }'-dwMF_{LN ʤt,Ihz m2:}!03 ~,}CF/BF/BF'[R9͋_P Y)2:HAArd$BOz8onpdz8ofaἫIkpΛVp^NuLRxKg~?=ۨ2Y=c =WW=Wy=z8/p $='d{8O4劉&yN<9Γg<\=gsOkpT⸇tXz8O?g22 }-LAF_ 2RїI^hWڛs2y JzpdOOKT&/~zVBF_&} /Z:(2}O22YJvW3-2r]d(dYd }쁌~zDBF_NZ^Wԅl}"/LF_tv1}Qa2ڕ&/'9@F_6.n'2};der2ڕِǷhEO,G}عvDcd^Hq⁈ q4<+6'#l +Pdq2VAӠ/  0^v1;>Ӹt{ɘ-ٝ~Ra 5A'Bq v=tzv г;<{  +G%ήwvy-i]@ YPgB:9s>P=PΧ]sBw8nD:Χ p>,t㼉7pt!67|2aEK㆕šOsさA c$_Hi"] |`xfWc3 [3ٝ@ ٽ xr-xl|ݰ 8Bk!]~/p"e^v0[|x]>&E.bybEU7xA-cвk`]j=eXv_-xe7Wv+`t"]78e) Jk@.WQ>OsDH< |zaHtĀe&{4- ]Lpix< ]+dY$ou!XB)x"=8k]&Xd/"隣 y`5J܁0dA!y? JȚdA /@ft@.q-o]lݧ؝h@zؽfK'o~1nm0k&/iF-ngf+`e)@cc@0vc5/vb&.v/bw-v?bw ,vb7+vb*v݅yBJ@Ͷ$@l6vm8_JBHѸ`%TDf F)j+A +b7 G Į loV}ᇷE-Eo^&@`DBIÛim(RƌޔKA Y,a, o" mli7`mTSvuon e y`qCXm#òzz%c+}m #s܆Oo7 H›MᷔnGb%~+XJd%Vrʼ|e ? ~l2߲-)6'Ko ~Kre%VFOUC춲JﰕeXꝶolu!omey9:HM@-[[m[\ol5S춲\o9l5SJTﲕ-,[Gm/9G}re )0V-,ǧz?rr|j~ZOile"}{ %Jp[O)+`ۊ#[69>UVj|ZOi5?֧ղFܶ.?2_Xr|#rey[UVӪ}ZOi?ƧVj~Z-[--,#:%ǧ֧RGuKOy^/uKOaU?UjVӪZOi5>ƧVj}ZOiɏT$_mEê~ZOi>ڧUVj|ZOi5?֧ZVJ/9>yIZ'ۊ/9>UVӪ}ZOi?ƧVj~ZOi>Z'?R_r|N~ɷ_r|iU?ڧUjVӪZOi5>ZVj}ZO+ S~ɷKOaU?UjVӪZOi5>ƧVj}ZOi%@_r$KO~iU?Ujoўp.%+?t+3/o$o)%m||-pXzċk`?,/{0Z&^o,xQ L&^&^^H`մL2&^/Nċv`մLӀċ>ċ޽lCmNYv,/ihAEmNY^M Y^ *ir3j+AVlqZQHEH(mMY^UY^D4fyQHUHEm5Y^UY^d9fy}!ufyU8lY^c!&NmxK?pqüEv g*ޭ#87 u=mS1`a_0qhﴟ`ik,8l"s_) _*LZꂉR0qh'Lܵm8`ġ_ &@d 84m&oifx/4~a놋!8~p1~--J0qi=u|58mC L3.-XB0qw[)`zJ6گ.OmEm&ima9 *- <7,JoOG0qhi?cRKTDeXM=Ђ" N84m`Q -ġU0qNH?jpm`д]#8Æ@ġwmc} wi5s2C+A0qhCzL`%LZ0`~n9~~O3#8SKO[Zׯ`|1\mV9Jġ> L`x&8^t= &WҺFLܟ6C_/C?% 0qhuCMi򛆁ܦW0q G &]0qiS6C#QLm݀E0<\)V0qLbCgUh|.*5ıf+].:8W]0qvLZsęd= &Ĺ\#=Ĺ#=w//a\;YNYZ0`޷_B_/8ֽHy%8tġaBnՅNJ0qmLZ^EiBwVwHMuS~~ &G-Le87_0qhTgAbh>OıT 0qYY6L6E~LZ`p\0q}F0q{\0qjxH+ G0q1}&%^Ҵ5N8W ey!84m(&oX8 /Hv&^54LkzH?p/oġ &Q0qcAsۡ4`Ϗ4?p ġS~՟ʏ6^^#[8cty˯N/wwy)6%= i!B8'NM`j&84n30qcV>LZ`Ђ3J0qhC=pt[)?Lf96T{/84woFR0qn &8w7>q#&-(F0qM v2д5N8Ĺ)`д n\gU0ql6÷M~Űr]~& 疯>ߐıC i`>CLςO#oġ/綎Cr̭^͙ &'87+H NLZ0qh$WLH &] G`n/8 }vQ0qh]C'OÿS~on/; 8K-^0qCLg] &n{LU!O#U0qX~ &=t{ġ6Z0qw[0`GyOY=#wmeG p)`,չ}<~&-.G0qO0q_}Gmġ && ']߂CkLܯ˰o.g!]>ŚñCfn`д5LZK0qs`#{ġu= &n/*8PzIwEZp%ġ5^ (]^Dq&Mj*^Lqs/ /=VpYܜhZ[Lpqs8tqs5xqV- ͅf8j"Cq3cK~~oo1_3n@LH8=;zI&kZ^qsXq۾ouU~m}_u[&?tZSqКB w |8ί6aiY ,\]_q&'zGE Ga2mf˺e!G6<5~ C \g[Oi\7Wy'9S{m}ۉYK^]Z>h$gzBOi%R%/09;:a/iH'ܦςSBϧдE.cB9m&JQ 7RNR?:┃sE#R9BGrh)D2^r/^9C~*Thmd9hbC!h97Y=&3r+y\eU$t[f%z9—Cz@`oMb!9b ~1̉*l %9t{A9ar#lz\nih C#̗k iNkL 6%)9;qAZ@ՎI)M! (97[f9tyA-̈́ a~ͨ@έWe΍5wi|ΡtnP`uM"?$t rΡKZУ(;zK10ᖻzn^zSd]go |^tmI.-Ss¨[~xNe=/rtС/Qoԭ(BAСh m:_  C~KZ p`mCyZDt]K~ßwo&ݴm.:470iB3lFa[x f9N>дA+tH/F:4sBOPҡIe:[N?BCy?][~ӟ77ـ~>Cy^ٌ@Wt[q[GQӡ nۃaiBDNqа9 ^--\t =EދaR;CAZ!1MK~EQcCH0|oɭ@|,BwBh,uizJMhxlNE~&Tc޺ʏG|˯ɯfn#: tesoqա<=duhI[SZyC^&,D?횷 º: :w1iC,:QLY4Yg3 ݥ5?iPOZקիX\lZOD[קp&E˯~Ս*!>\WL-u]f:q=IA^k@_,}剭OΆx&:4A[I \~!vv oc?;Ο(l6CZ8кbkk"Ck,ojTH6yʏp/c/ ;^5dIuk"M\X]^Xv|MK~N,n^pv.C,n/<;14?Y^vh]"c^zeN?ࡴCC|O57TP;{ϿGTX\Fj@2~v,C}Lvu.vh=/lWBl?f@Y6]] ZWQ۱異:ⶫGsSUзzZXv_eK}%?ֿA|>z~ eRMsrV=ܡW(w, XQ,;s./qj';Q ^议M.?JcٜC~vVKu~;;zHzɏ)twh}>CF' }xW׻x~>Rȯ*?7w7&ڱM/;qzIT]oZGw wl+ m<_bREϯ^\ocߡum4ΉzlC,vO3'к_VV\ FW U>COJYMBO 8 U>)K*PE-ryCAg1Tn*E ||VMfg'}Š'G|qUn |PFU>z*,Cy*,BűU>~U>N(T |6 U>(r@A*EITAK$QiU>ʟfТ펡;XTSUމ*Tyo2TyROH@UއjQ彛U9ʻI.N^ZVwb<1U2o  UCcj[Ty;ny-*.Lŋ*o -S4/1*o-EUU޺|Q-=7ULB7SP͔TysFFFձV~ZRe\Ty#Ty}uQC?-۠+fʫj .Un T[*7uv/;3y}{|`lL%Fɀ}2YZ[gTVdg}JܫnUy*UQ^eʫrweU嵪+CTU*TWʫU^2*/wU^dF */%Ty>r}< _*/'@#YQWfʋ U^!*/Ke 8%U^U>TE*/CTdrRUʏڎ*TU~0\U~\HXTy*?0F*FT U~QrU凟q>B.T](UrwUUG|*?$GEU~*?Q\(U*]yVU?|˝e[D?S azջ]9W?|wԝz]^3d=NP68|mnI{}zEE͕=WTan͜L*ofmʛIծ"t竫U7T杮zk=Q~GTT"aTQ廋>Sx*ߛw/U|u48|[=|m6{+E^SGF<3 U>ӥN$>q:[U|=_|otUDF:9|Gj"WT9_^(Sqݲޣ[kH*ߛ;]D>Ǯ*_9ʛLY*J5|9FL*_5J}ߎY[憗*_u>oWxD{Du^S1W3FQ홚7ª-qU/t7.Se~Q廋Q_tߟYϬD`Y)1Q^Y̬T|?VsY}ծ"/|]UTnzrT廫=J{ |չ>*_I9D楓J?\Vyr̯QMQ./UN3旪|Չy*_u}Tn#5f=#W3i~lg=#ʟ.zTysU[tߪ:UW;]QQJEUj"g#U{_|w[_#]Q[zXzghTysWUS׬1Tjn(*]Q=Uf}&B{d=*ʛsP+StO;uTXTysUޚyQ廹tOpռSfJ=TtW3;z7GEԁUk3%7=S+WUQ-NwWYoGc* |9W"e~]YY}QCLꑚo+y U^?|y=WR[DIT*CWvTU^#QTa7}Q-z(TGtGcUL@;PG'HU~x=*?FtU5PYQG19BluUGCU*?P1P_l+ziTQ[oUV&Rzd=4E? U^Tz*_u9^ʗ |+}cl+>lު|B}#R[qTKo|R5_GכrTGoܨzFg*GnUWW_R9?EO&`+y~**$zQs*_RgjYuDWS}QLyO_Ty5U^ 7EoUR[=5e=ϣ3#|դhEQ^'Z2?Sx*_ugW>U|?*_5{l+P|>U5uSj||zQ5q?V--1e VET[*SUUՋʼ*R}gjTO+/UUMֻ3=~sz iMeS7> FoU2?SKֻ .Y4d=UMTQ5ܢʷ{jB[cJ|dž|b*3zUjYWkeKD׋xQnȭ!ʫ xTulwTV=Q%OTyd=OT,/U^*_5-qU^#C%VWTVA/U^OeT/UE}&zVG:+|]fED݉U*_uQ󥊢WUU/35 %fP*_uQ嫎ʊ* )?e=TYETQYe>Sx*ߗمLM ^ET󥚢ʫSePvTϗ*)yUďW.YK5Ee>S*_kzVQ74{a=S*_5ʻgiz2~f=ѣ׼e*_5JzNM*j!2謹Wϗ4me*J˺35ʻf=Pk4뵬]e=QUU^TV^=uy~QGET~LzQEʏ*HU>~Ag|(NT88^PN=Wߡ*w깪(Tygjy*?PQ}TGT"TyףQ=PQ~֣ʝ*TUOƣ*TTy?9QT"Ty~tTyQ}A3wxTyaGw%T)7zTyo\?;;TiTTyw?j=QyUr{Q9Gw;MgܩQC%UPwϩb*o( UyJUnxTyUn>QmأU&SPmģ^*o(UyUޞv|STUNtTy;@7_ʛGTUy68PQQT)Ty{xw\HUޜ*o[*QQ *oMUޜ*wLUޜ‚*YUPKkTy*FUT!QVUQ/Uy+Uy+*rȫ[QeG7TNR[XVN*ǫݯS;ǟ2EFU^*7 U^TWw+GUyCU^>*QD׎RCU^*bTyQr}*ME]]Q{*TQx)UTyU^J*EU^R*wSU^_P;_Unr?BU^IQWʨ/Uϯ*G*/N@[UnQŪU^|U*/LSWNj;ު0VU^.|Ty!KU^"*/*mTy*.*/*nTyWU^U^NuTyywTyUyqo*/TyyTxTy*(U^^jFGGޫrST兔7UΥYtZU΍)I2m ʏT弩UITtL\VObQdxʹV*gʙJUN}TmrlQ6C M젡ϑ @TI$ Un,HrP'/r;Q'@a]*?iB@~GA@1D{ϒۏr?U~r/U~y+Q6#Ax |zFTByʝ}*f=ʇYy&U86*QQ僮UPʟ,TT`YmrʇǨ|tT||*f]A׹|PY/ C;MU>Ta<|˥*RPvT0U>*YQQdGGG{^Ud*dʻَrҫʻU޹*fףQ=u~TM*t GYTyW(l>2*N)@wUݬGTy|TyfGwTݬmTyo>QDw{*'WFbwTTYN;%UãʻYר~MU;Uy}PME*o =MzTy>Ql]TTmD0Tfۥ*o*fTyrrizTySi]Ԫf.q*ofϣʽ*Wmʛ UR?S7P孙 UkTykQl0ʛSQ*WuUޘnVycլƷXfG'ܻf)YYf{٬vB*o9YU6> ;Y啫+fW*diVwTu'Y~f?SC*NE <5_OV*'jzU^=ߒU^=ߒU^=ߒU^=ߒU^dWϷdWϷdW*ݬdaVy+JY,t+Y fMzWNVyUU^+_dBVy-/djVykѬʮY啮ZʫS* Y)5d}=Y,DM)'.7KʋS*/d*/|2كfT*/u˓erU*/^ߒU^%mVy9NVy9U*.Y)d{Ҭ2x}*/,d4Yʋ d*/dOVyi*U^<~*/d$U^<~*/do*/d*/EMVw*/N!f'eVAVYǭNV-U~8Ugd3RfS/Yޥ3JCV(d3z]ʏSurﲙU~EdtiU~ХhVʏT ʏbOVAYfʏYd^?U~YǣēU~T~jU~8[ f*Q&1(;uyy~ʏCUCULVqЕOVav5&KAUdӢU* QfoiyT=Q'{ģlQQO-|>Q7ʪ[*OOT}£oQPmGCU~3BUn6U~P7cU}>QU~"QwDߪ0T]}>QwDU~݈*U~*Ub۩|szTͮ,U}o$TS*wQN)ROgTŋ*ϨɮEOQ)ThzTTQlWTOT|zTXU>U(jDT Tdʎ|>Q峛=U>ڀ*fg3;|6U{TfGϪR*5*Nu@O#|2k]U>͚EςrEOQ哮UOUQσ+|E*G_ ;eNMwTŔ;UEbVfG_dʯ?RI/f3ʯ,TG_YQjUףʳk//ʯ*7{GU~yF_+5YQ]rJ/ԚFF_OyT{jl{TdG_NA_/V_ߨ'KU~y|/\Q7RYʯCU~}PU~*QNRQ*?%WKTR#5:t*4U~2IUR6U HU~0Q* U FT'7T* U HU~\*?U¨S*WU@rwT* U HU~6sT*?Q* UI׶rq'+Yé>dU>23|x>'=2&|ufVs*'Y)@dK5NVx'ɾ&>Q=u>*dU>2l&ʇSa*OV ìU*t]U>Pf땬r*wYqYffm"'kn=UݬQ{=*wJYˬ&NV9]OVa9Yd?YGy?U޽>'\U 3*wxY;Cf'6 GfUɦ1d_lϏ2fk߬rէYrh?Y*Uof*Ufs?YfE~ʫʛS_Yzdw3SPOVmMV9dWTYL-ye*7׬bvx5T>> f'TgVaVͬrY*4KrUf3*UdU'\cV'',Eʙd*qO*WݘU'\FVy3 6>|"MLVyc*Y史!~ ~ ux[72}T:R_NVySU:ʛYdz*oUު;Y(ݿ4[aY&OVy+ z*o%r*%ޮjV/YO6=Yvdz*WŘU^%NKVyu*YAVy5[z}SNʫٱdד*Oʫꓬʔ++*U^3YYYYY)dfVfW4YU%MVyeYyFVyUU^AVy|>U^@Vykݬ$VdWU^~2\bVyuJYfU^d]>SG5U^ ]MVyQdP*w|LMUTn/Uۍ\cvuQQc>SU?z_b|D1*o);gQ͔ TYc}R*~d?^7{zͨU=e0zMžZil:=uSGG:[Q嫎b*SY/=|_fEI׬:PR~e=PUF琢z7W?|OY,{jvxz|GzT3uU.Nj#e=OQbTڙ׳)^5U5sjGTZ{SߩQT0*o'6K= U~* >rUTMGLWf{V}ԣoV*&T}ȣo>TYŨ[*2*Ų[*.Eߘ UѪ{ޣo_P U~QNTo?^U~Qw3;n<_T]gߕ*UêܻoU~*U|ozoU2v6{U~Ō*7eGU~*7uGU>oU{TUQ6{;|݌*Ovwq(MTfGOQTGIv.|^e~FG';ܩy>\y=U>IqRI |Ud?QU>j=|ߢ*CU>*LVOKUөٙJ*xU|vzTl([TlfyGOQ|*U>+U>SO0|V3|2ZU>YR*E%U>U睪|O6RPGOߩU> **ޡ2_]7SP׭Ҏ*Tc|* /ߟ'* *_@U~9U~9Unr*򫨤ʯb6wu}Tu0%Un?ܩMRxG_NANANANAwUndʜdNV1YOV@U'*fOţ6*S*W1UZ4\hV*ѬrrBQܪz.PFՠ룴n7Ul'O'UUߓU>|dF*WU 1}W׬r*^_ʟחr__}}*7جCUQR*/ U^<~Q4KlJʙd謁z**7جrvrrrodOU>UdO+YfɘU>u5|fV<:@VTU>U}dO*~ޑU>Ud>*WyU0|VOg$ܻDOV!16OVu$U*'Yʘ˩9d{Ŭ]/%Ylf_tU~5OV?L㙬=򫩬U~9]`/.*AVEYw*fVIY~13ɪ&kǬsr!Ӭq,d*?J>Ys͝sU~z~&ld*jyͶ0.vOLU~Oy+s%|TyO6xfZOVKЬ(dfۛU>@VP UnWYeʇdTY#dNV ̬]1يf?*UnNV]ffǚULA"fVydwv1*Oz;we**=Y YT'.bʻkʻO*^ߑU{JUU0zU}*dwU?Y彨U2aVy?T*o3<*od7*wY㛬#YQ*o'oʟd?Ss*oteU:WdBOVyzʝbcVySU^|{Y:7r=bVy5U^o^TSeTuʫU^Uj4z PVϋ*UTyyQVWQEWUN]Qׂ@W+ªrF*OUUy1[U^lVUya߳ "UEe*/Sfʋ Ud U2U^Q󋪼p~QU*/2*?*?BVf}ʝڡ*?.wT.-U'US PSbTvUʝ*?fQ?YrT]]_UypףZR?S-P>PiGtwWQndF.!yIU#u^յsAVyIT{=Pl֨n^(򒺪zFTk{Q%uQ[ﮕ9E](]Q`/U9XKUwu'^lV{=~Do̭"oKo uݳ٦Uw_ìzL\F:oTjQQAʨ򝽘gf=vx6U{)U/U,Ujvz*nH룞aCyz~Dq%޲**w*YSU><&|EbVhu>S}AWYédj6{G|CV(dWU>2|MVyY6;Y坬";ٔfwJ*tUޝ"BVy\/Y) dUއ%NpdegVyﮗd'7KVy&KVy/^\/YS+|em7d7d7ԂYm̓U& YQ*w YMOVy;OVy;U*OsדU|U|U޺;YqfOVy#+ЬT*o~_ bVJҬT6*7+̬(d7U^ldWU^0gVyUU^1JVy5Kz~{(ʫudWUTT*f;U^2mVy}ٓU^r?U^deVyUU^ldWU^Ov.fU^ntˣU^J;Y~6\*dy}*/|U^2_hVyaJYf&Y奣*Yf U^dęU^=Y.AˣU^ o1*/:Y3}=YSa*7ǬCNVa1Yew>d}*w YS*?NvʏG'0{CHV.f?Y]fL2Yʝ:iVQ|=U~<*?O0Q@Q#{x~|QmQ6|UWUnFeEۨ^.cT ;ԙ UnFE{GsJFTy:|+2WϿ^ϬWSQf~noUoGL?|ߖzSUU^?FU {Yu{@ϣʋɨM(#5#fQT&9z*UܯUWkӭlWYT?|ըj7YJK*R:S׶Y[žUUђ%U|DxMGz^?Dm>L?Qk1ߨU|G x?Uj>: 0|m{|5ۢ_mUyqU_*/?إ*_ۼ9ʋmƨUU6|o3g6>SFec3|9Dm*yWc-|d_Yoe>`*FTxYu۪|Me~,CTn#WR ![F/|9D1GݲYQmPߩKW[{U*__Q-uWQM$zW{=ʣW륨bJ|<#UEVUGfkU%9&r*_u/QT#Tn#uyF;|yʋSP2/AﶦUeooSyP 2*8|QkQmrPo|QQ奝?B6NHU@Y~Tj|YGkfYOeU^j,|Me>RGUdϬWTFӪyYbߍϻ-SUyڻ1a]E^RZwq֫YlՆبU'+8|-{=P*+MU^LA:J+|aϬTQW(܈Xm(B2*_mɚ*/NMBPM#1UU;^+U*_5f=W=Y֪Q[QTTyyW]T{=R`P嫎 D،zwگw¯~1*_y`Ty1U:wTø_؏*_uޟQ&f=mQ~*/NB/F2ԨTQ#|17cSȍ`rE:│fTk=3PΔp*_*̨x|m^OTV嫎ʍ*_L(绨͈:H/q!x*/wVUʋd G';8|1L%*߬YTfZJ'F_Ylx3} U#R/fbߪx UKRf'<R~+~#m5t&oCifް &7K~_Rݽs3)I~>v5^ad+>}sL"|_M1Șۉ=>zdI7ͫ8~{F+cYo{3%Co-b Jqi`f%^>ʹ{``F|(0o}f2( (>oN|ކo&>ΈC.$=߇7>wBpO?& _~c]t|_Jٟ8~}t2e%;?7  ߟ"{}Jv{͖!㩿~,Ke1Vkۣ}:C>.<ywl_y\> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000041796 00000 n 0000041879 00000 n 0000042002 00000 n 0000042035 00000 n 0000000212 00000 n 0000000292 00000 n 0000044730 00000 n 0000044987 00000 n 0000045084 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 45162 %%EOF metafor/man/figures/selmodel-stepfun-fixed.pdf0000644000176200001440000004065513750017363021223 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20201102160001) /ModDate (D:20201102160001) /Title (R Graphics Output) /Producer (R 4.0.3) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 12739 /Filter /FlateDecode >> stream xOu=?E/'sP+"B  r "-ꇄ 0?VS=&T./]Ŀ2ZM_f!\p//omssp>'6eνm_2٦ٹ72٦#VjYlS-zot쭖6cνњzco,{v[=[ی=[=[7;F[ݜ=[79;F[{uc{OzOp943e~px{{7r(wv9~PiOo|~Hu@M;?mm-knJ}uu?Z}uu?ڏY}eu;ZEL}UݏǍ8ڏ>kOcVt;F]_0//_wy8w5V{ˇ~yo/{7Ǐ?\>ˇy@y3wL??~y^~޵N}=V_}9?_*'nJ}y;1؎>?' ȷ O| m>O$^|PMZ6~>|fkW'`essopbC d3v|oEcuc0ɹ71غ:Va6ghIձz{I6g[2C6ٹ7Rou!lmldOձz([&;F+Dбz([;&;F[{mso/X{([;{[=m{1!{umsoHzu g~`z g='9V?N0=odMձz8[7vcB&9Vg}chOսUع7z[=&[='y[=&[퍝{ 'ν-Vν=Vتl#Vjl-DŽLrνњz`Vc`l:VVc`hOձZ6:;FcB&9VVgsoJ]ز.Kc]ع78:VKcdsotOձZ['ە{GʖveN[{u]ٹ7Z [-νіzuع7Sou쭖vcA:|)8!̺n5+L!<ν=Vl7>!GꭎcUع7Z [=&[퍝{5VmcUٹ7Rou'dUs=V,wvDŽLrν+V0c>عѲm̏g^[-[택{1{ec]ع7z.llv[{.Țzco4N ;F1{u]ٹ7SouW4N+{uLձZ:['ە{Wꭎٺخ[{bl]l7vn[:[ۍ[-νXM2ضع7ZRouw mlwv-^v jCχ6;{_ܖzco6;;Fޛ{E=Soޛ,¶ν+V}7'{)xءĢlklݎ[-ʶν=Ve[gbzcoޛl틽,:Vm}soɱZm=7v[=6{n:Vm^N:VMscuG'jq {6v [=hlq {6zcoLɯxU|χ<&[퍝{ޛ{dsoJzolUwv -8xL*;{_-Vتlݏ[=[흝{G=[택{%V}7Y탽6;;Oll5v>hKձZ6:;F{–vazco4:ۅy[-νɡ:ٮRouW4N?|>=OuՏ~[ovyuq-mׇ۲ǫcǸw?᷅m4b?uæ{syݟ>y~/_]r{~ﯸo /xx|~ïqme8\>uko?|_mpZ3 |8=uhtܽ2{fCq=(!wxC[q=ןx}g_gϾ_]㸹g_gqgS2ow{>ɟ?>_?=/?\~?/*qvܲzZWׇ|݇~Ow_?۾Ǐ^.?on7?|ۗ/OV+B㑾}ҮS~zWy}xA'X9zmc(yc}x|ٗe_wx|k~u鷛=a pvZyn^+2SxJ3ĵnَWQhs?4#N\9=FAE4#AL3FB݈f9X cu#S#m ͉cu#{ESoul5Ov:V}%VjQuOv}4":V}soآll_:V}gꭎbll_ш[-ƶ;-xll1 -Vjq ع7[-6aύZw,cχ,6aνђzGcMس$kꭎ2&ع7X[2٦ع7SollS=Sou쭖6e^Rou쭖6eν}cbggz`b`zconl3q {`6chIcucso[=ۜ=l:Vakcso+ѱz[&;F[&{[&{*y[=m{{OzOk#[{ec}so>z١IJ.Roullu ;F[{.#Vjillvc=99VKcdsolillWdKձZ:['ە{=Vjl]lWvsr쭖vczukYLzO,νXM2ضع7ll[?g2m^ďn8mlwvX],:Vmcso'"lklwv[{Xm퓝{=V>K3VjQ5Ov^qco(:';s:;>(:;F[{z[gb/#[{a}soO{6aso^bl칱so[۟{ndKձZ^/p⹱soɱZmž{g-6a^Rou쭖6&xh/2٦ع7ɱZ&۔=;;F[=:[&۔=;{-HձZ۔=;;F˱ޛ{eسso[-mƞ{-V6cAc79V֍mν3V7g֍mν+V!l)akcsot;{co69;F[8C6ٹ7z[=m=,:Vekgdh=֫coPv-v[{([;;F{ꭎՏsϬm=c>ع7[-[탽6H'כ-[탽6[-[택{5VwaVgso[-vaF[{u]ع7z7'jillWv^+[['zlyxǍoSoߦH۔x?.ߦwCxyֻ>mݧ{uoow=ηO;?moퟶ7?>yj~[ڟg~u\w?^ϯ.&x<>~}G1hqQ>=6rɱǎxZ~ctMo[1PuMo[^70:8c_?'f_ g7}1^ǫ>bRb}{g_Lqbڏ> t\p3>|||}x᷿\pdr̂^S=/}u}˗}ty<{]_ ~_|_)>Π/>5^//w_]s{__=o|'?}'}_x/)~:Ƹ/:vLױK5^&8lʞ5o5~ggEI~acΖ6c^RolYl3ܶ{{bf9JcucsotOcucGꭎCغٹ7ZRou6g6ua!뱔ܜ=m{-VƶɞJ[{msom)9VekgdzcoPv-v bekgbO#[{msot?co0-v:\w8 ̏J8̾3Qg=:Vg`νFa9Vg}chOՏ0<ν3V1*lo9+V1ُ0썝{Q'ν-V?W! c&9V`vc`bemcsotKձzI^B:VQdwv(Lra[cso}&Ym퓝{5V}&'{)Roue[gdh?Faco(:;F[[m틝{Wꭎbll_e`bC6ؾع70ɱZm=7vzll1 #Vjq ع7ZRoug^z;&d=Fa!Mسsoll}s{ꭎ2&ع7Xo2٦ع7z}$Y&۔=;;zO,mʞ|zco,){vv(Lr쭖6cν#V0ɲf9ع7ZRou֍mƞW.colfm9{ nlsv[{ucgꭎC[=m{{O&{[&{*[=m{{ꭎC6ٹ7zɱz([;;FKn=m=:Vckgbh;sco0-v[{u}cz=Rou{v&9q8}cO'[{8[퍝{{ꭎ*lo9#V1*lo-zorνњz{gVe{ghKձz,*;{.[=[흝{ޛ{bsoJ}b}seB{,[탽6rKձZ6:;Fc:9VVgsoH]ز.-:VKc]ث5Vjillvc:9VKcdsolillWzcotN+;F[{u]ٹ7x4ءٺnRou7t.{ rOձZ[ۍ{DZ{emcsoll[_s>Okc?W?l۝޻Rouavgh{E=Soz3}soJձq}yZzO,ʶν->se[gdz{Ym틝{GꭎՏl틽,:V{;FkX}[-ƶ~\}mzll1 ع7Sou~\of칱gꭎ yn[-6aνc'-6a^Rou쭖6~Ź$|ȣeMٳso8֫coL){vv[=:[&۔=;{-[-mʞ{XMղfٹ7Sou쭖6c?2Թ7z1زf9XMcucsolcucsotKձz[79{ [=mνXMC-q^mso[=m=l:Vekgdh?֫coPvv~Ĺ7zCٶع7z[8ٶc?΃?-vnaco0<3z8z[=;FK[={5Vl<3hKսU~ţ~&}y?l7vzorν+VcUٹ7ض{{1٪l}s[{bsot?{coXl5wvXl5v[{d5ȚzOşǸll5Ȗzcolluv[݅-[ν3Vjilu {5JձZ['ۅ{}׃Cu]ٹ7lillw"]:茶ە:yܕ-߶~K[1np6oO|O} }x~Oݟ/헛K.~|y_]yws>z x<ѷGg Go?z_[p6qmY۟.v˝cVv8}ý>ma{tO; η=n~iҎi~u?z7~t=Z>z=\[|z>!f~x 9 ]&^{'OݏϱSo}?y%ܯ o_]w?].?7^j]w˿.|S|b2㑾}zM\yŮ?w?pd}endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 504] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F3 /BaseFont /Helvetica-Bold /Encoding 9 0 R >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000013104 00000 n 0000013187 00000 n 0000013310 00000 n 0000013343 00000 n 0000000212 00000 n 0000000292 00000 n 0000016038 00000 n 0000016295 00000 n 0000016392 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 16494 %%EOF metafor/man/rma.mh.Rd0000644000176200001440000003572614055736403014157 0ustar liggesusers\name{rma.mh} \alias{rma.mh} \title{Meta-Analysis via the Mantel-Haenszel Method} \description{ Function to fit fixed-effects models to \mjeqn{2 \times 2}{2x2} table and person-time data via the Mantel-Haenszel method. See below and the documentation of the \pkg{\link{metafor-package}} for more details on these models. \loadmathjax } \usage{ rma.mh(ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, measure="OR", data, slab, subset, add=1/2, to="only0", drop00=TRUE, correct=TRUE, level=95, digits, verbose=FALSE, \dots) } \arguments{ \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{n1i}{vector to specify the group sizes or row totals (first group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{n2i}{vector to specify the group sizes or row totals (second group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{x1i}{vector to specify the number of events (first group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{x2i}{vector to specify the number of events (second group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{t1i}{vector to specify the total person-times (first group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{t2i}{vector to specify the total person-times (second group). See below and the documentation of the \code{\link{escalc}} function for more details.} \item{measure}{character string to specify the outcome measure to use for the meta-analysis. Possible options are the risk ratio (\code{"RR"}), the odds ratio (\code{"OR"}), the risk difference (\code{"RD"}), the incidence rate ratio (\code{"IRR"}), or the incidence rate difference (\code{"IRD"}).} \item{data}{optional data frame containing the data supplied to the function.} \item{slab}{optional vector with labels for the \mjseqn{k} studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be used for the analysis.} \item{add}{non-negative number to specify the amount to add to zero cells, counts, or frequencies when calculating the observed effect sizes or outcomes of the individual studies. Can also be a vector of two numbers, where the first number is used in the calculation of the observed effect sizes or outcomes and the second number is used when applying the Mantel-Haenszel method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{to}{character string to specify when the values under \code{add} should be added (either \code{"only0"}, \code{"all"}, \code{"if0all"}, or \code{"none"}). Can also be a character vector, where the first string again applies when calculating the observed effect sizes or outcomes and the second string when applying the Mantel-Haenszel method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{drop00}{logical to specify whether studies with no cases/events (or only cases) in both groups should be dropped when calculating the observed effect sizes or outcomes (the outcomes for such studies are set to \code{NA}). Can also be a vector of two logicals, where the first applies to the calculation of the observed effect sizes or outcomes and the second when applying the Mantel-Haenszel method. See below and the documentation of the \code{\link{escalc}} function for more details.} \item{correct}{logical to specify whether to apply a continuity correction when computing the Cochran-Mantel-Haenszel test statistic.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}).} \item{\dots}{additional arguments.} } \details{ \bold{Specifying the Data} When the outcome measure is either the risk ratio (measure=\code{"RR"}), odds ratio (\code{measure="OR"}), or risk difference (\code{measure="RD"}), the studies are assumed to provide data in terms of \mjeqn{2 \times 2}{2x2} tables of the form: \tabular{lccc}{ \tab outcome 1 \tab outcome 2 \tab total \cr group 1 \tab \code{ai} \tab \code{bi} \tab \code{n1i} \cr group 2 \tab \code{ci} \tab \code{di} \tab \code{n2i} } where \code{ai}, \code{bi}, \code{ci}, and \code{di} denote the cell frequencies and \code{n1i} and \code{n2i} the row totals. For example, in a set of randomized clinical trials (RCTs) or cohort studies, group 1 and group 2 may refer to the treatment (exposed) and placebo/control (not exposed) group, with outcome 1 denoting some event of interest (e.g., death) and outcome 2 its complement. In a set of case-control studies, group 1 and group 2 may refer to the group of cases and the group of controls, with outcome 1 denoting, for example, exposure to some risk factor and outcome 2 non-exposure. For these outcome measures, one needs to specify either \code{ai}, \code{bi}, \code{ci}, and \code{di} or alternatively \code{ai}, \code{ci}, \code{n1i}, and \code{n2i}. Alternatively, when the outcome measure is the incidence rate ratio (\code{measure="IRR"}) or the incidence rate difference (\code{measure="IRD"}), the studies are assumed to provide data in terms of tables of the form: \tabular{lcc}{ \tab events \tab person-time \cr group 1 \tab \code{x1i} \tab \code{t1i} \cr group 2 \tab \code{x2i} \tab \code{t2i} } where \code{x1i} and \code{x2i} denote the number of events in the first and the second group, respectively, and \code{t1i} and \code{t2i} the corresponding total person-times at risk. \bold{Mantel-Haenszel Method} An approach for aggregating data of these types was suggested by Mantel and Haenszel (1959) and later extended by various authors (see references). The Mantel-Haenszel method provides a weighted estimate under a fixed-effects model. The method is particularly advantageous when aggregating a large number of studies with small sample sizes (the so-called sparse data or increasing strata case). When analyzing odds ratios, the Cochran-Mantel-Haenszel (CMH) test (Cochran, 1954; Mantel & Haenszel, 1959) and Tarone's test for heterogeneity (Tarone, 1985) are also provided (by default, the CMH test statistic is computed with the continuity correction; this can be switched off with \code{correct=FALSE}). When analyzing incidence rate ratios, the Mantel-Haenszel (MH) test (Rothman et al., 2008) for person-time data is also provided (again, the \code{correct} argument controls whether the continuity correction is applied). When analyzing risk ratios, odds ratios, or incidence rate ratios, the printed results are given both in terms of the log and the raw units (for easier interpretation). \bold{Observed Effect Sizes or Outcomes of the Individual Studies} The Mantel-Haenszel method itself does not require the calculation of the observed effect sizes or outcomes of the individual studies (e.g., the observed odds or incidence rate ratios of the \mjseqn{k} studies) and directly makes use of the table/event counts. Zero cells/events are not a problem (except in extreme cases, such as when one of the two outcomes never occurs in any of the \mjeqn{2 \times 2}{2x2} tables or when there are no events for one of the two groups in any of the tables). Therefore, it is unnecessary to add some constant to the cell/event counts when there are zero cells/events. However, for plotting and various other functions, it is necessary to calculate the observed effect sizes or outcomes for the \mjseqn{k} studies. Here, zero cells/events can be problematic, so adding a constant value to the cell/event counts ensures that all \mjseqn{k} values can be calculated. The \code{add} and \code{to} arguments are used to specify what value should be added to the cell/event counts and under what circumstances when calculating the observed effect sizes or outcomes and when applying the Mantel-Haenszel method. Similarly, the \code{drop00} argument is used to specify how studies with no cases/events (or only cases) in both groups should be handled. The documentation of the \code{\link{escalc}} function explains how the \code{add}, \code{to}, and \code{drop00} arguments work. If only a single value for these arguments is specified (as per default), then these values are used when calculating the observed effect sizes or outcomes and no adjustment to the cell/event counts is made when applying the Mantel-Haenszel method. Alternatively, when specifying two values for these arguments, the first value applies when calculating the observed effect sizes or outcomes and the second value when applying the Mantel-Haenszel method. Note that \code{drop00} is set to \code{TRUE} by default. Therefore, the observed effect sizes or outcomes for studies where \code{ai=ci=0} or \code{bi=di=0} or studies where \code{x1i=x2i=0} are set to \code{NA}. When applying the Mantel-Haenszel method, such studies are not explicitly dropped (unless the second value of \code{drop00} argument is also set to \code{TRUE}), but this is practically not necessary, as they do not actually influence the results (assuming no adjustment to the cell/event counts are made when applying the Mantel-Haenszel method). } \value{ An object of class \code{c("rma.mh","rma")}. The object is a list containing the following components: \item{beta}{aggregated log risk ratio, log odds ratio, risk difference, log rate ratio, or rate difference.} \item{se}{standard error of the aggregated value.} \item{zval}{test statistics of the aggregated value.} \item{pval}{corresponding p-value.} \item{ci.lb}{lower bound of the confidence interval.} \item{ci.ub}{upper bound of the confidence interval.} \item{QE}{test statistic of the test for heterogeneity.} \item{QEp}{correspinding p-value.} \item{MH}{Cochran-Mantel-Haenszel test statistic (\code{measure="OR"}) or Mantel-Haenszel test statistic (\code{measure="IRR"}).} \item{MHp}{corresponding p-value.} \item{TA}{test statistic of Tarone's test for heterogeneity (only when \code{measure="OR"}).} \item{TAp}{corresponding p-value (only when \code{measure="OR"}).} \item{k}{number of studies included in the analysis.} \item{yi, vi}{the vector of outcomes and corresponding sampling variances.} \item{fit.stats}{a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.mh}} function. If fit statistics should also be given, use \code{\link{summary.rma}} (or use the \code{\link{fitstats.rma}} function to extract them). The \code{\link{residuals.rma}}, \code{\link{rstandard.rma.mh}}, and \code{\link{rstudent.rma.mh}} functions extract raw and standardized residuals. Leave-one-out diagnostics can be obtained with \code{\link{leave1out.rma.mh}}. Forest, funnel, radial, \enc{L'Abbé}{L'Abbe}, and Baujat plots can be obtained with \code{\link{forest.rma}}, \code{\link{funnel.rma}}, \code{\link{radial.rma}}, \code{\link{labbe.rma}}, and \code{\link{baujat.rma}}. The \code{\link{qqnorm.rma.mh}} function provides normal QQ plots of the standardized residuals. One can also just call \code{\link{plot.rma.mh}} on the fitted model object to obtain various plots at once. A cumulative meta-analysis (i.e., adding one observation at a time) can be obtained with \code{\link{cumul.rma.mh}}. Other extractor functions include \code{\link{coef.rma}}, \code{\link{vcov.rma}}, \code{\link{logLik.rma}}, \code{\link{deviance.rma}}, \code{\link{AIC.rma}}, and \code{\link{BIC.rma}}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Cochran, W. G. (1954). Some methods for strengthening the common \mjseqn{\chi^2} tests. \emph{Biometrics}, \bold{10}(4), 417--451. \verb{https://doi.org/10.2307/3001616} Greenland, S., & Robins, J. M. (1985). Estimation of a common effect parameter from sparse follow-up data. \emph{Biometrics}, \bold{41}(1), 55--68. \verb{https://doi.org/10.2307/2530643} Mantel, N., & Haenszel, W. (1959). Statistical aspects of the analysis of data from retrospective studies of disease. \emph{Journal of the National Cancer Institute}, \bold{22}(4), 719--748. \verb{https://doi.org/10.1093/jnci/22.4.719} Nurminen, M. (1981). Asymptotic efficiency of general noniterative estimators of common relative risk. \emph{Biometrika}, \bold{68}(2), 525--530. \verb{https://doi.org/10.1093/biomet/68.2.525} Robins, J., Breslow, N., & Greenland, S. (1986). Estimators of the Mantel-Haenszel variance consistent in both sparse data and large-strata limiting models. \emph{Biometrics}, \bold{42}(2), 311--323. \verb{https://doi.org/10.2307/2531052 } Rothman, K. J., Greenland, S., & Lash, T. L. (2008). \emph{Modern epidemiology} (3rd ed.). Philadelphia: Lippincott Williams & Wilkins. Sato, T., Greenland, S., & Robins, J. M. (1989). On the variance estimator for the Mantel-Haenszel risk difference. \emph{Biometrics}, \bold{45}(4), 1323--1324. \verb{https://www.jstor.org/stable/2531784} Tarone, R. E. (1981). On summary estimators of relative risk. \emph{Journal of Chronic Diseases}, \bold{34}(9-10), 463--468. \verb{https://doi.org/10.1016/0021-9681(81)90006-0} Tarone, R. E. (1985). On heterogeneity tests based on efficient scores. \emph{Biometrika}, \bold{72}(1), 91--95. \verb{https://doi.org/10.1093/biomet/72.1.91} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.glmm}}, \code{\link{rma.peto}}, and \code{\link{rma.mv}} for other model fitting functions. } \examples{ ### meta-analysis of the (log) odds ratios using the Mantel-Haenszel method rma.mh(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### meta-analysis of the (log) risk ratios using the Mantel-Haenszel method rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) } \keyword{models} metafor/man/dat.fine1993.Rd0000644000176200001440000001346114055736403015003 0ustar liggesusers\name{dat.fine1993} \docType{data} \alias{dat.fine1993} \title{Studies on Radiation Therapy with or without Adjuvant Chemotherapy in Patients with Malignant Gliomas} \description{Results from 17 trials comparing post-operative radiation therapy with and without adjuvant chemotherapy in patients with malignant gliomas.} \usage{dat.fine1993} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{nei} \tab \code{numeric} \tab sample size in the experimental group receiving radiotherapy plus adjuvant chemotherapy \cr \bold{nci} \tab \code{numeric} \tab sample size in the control group receiving radiotherapy alone \cr \bold{e1i} \tab \code{numeric} \tab number of survivors at 6 months in the experimental group \cr \bold{c1i} \tab \code{numeric} \tab number of survivors at 6 months in the control group \cr \bold{e2i} \tab \code{numeric} \tab number of survivors at 12 months in the experimental group \cr \bold{c2i} \tab \code{numeric} \tab number of survivors at 12 months in the control group \cr \bold{e3i} \tab \code{numeric} \tab number of survivors at 18 months in the experimental group \cr \bold{c3i} \tab \code{numeric} \tab number of survivors at 18 months in the control group \cr \bold{e4i} \tab \code{numeric} \tab number of survivors at 24 months in the experimental group \cr \bold{c4i} \tab \code{numeric} \tab number of survivors at 24 months in the control group } } \details{ The 17 trials report the post-operative survival of patients with malignant gliomas receiving either radiation therapy with adjuvant chemotherapy or radiation therapy alone. Survival was assessed at 6, 12, 18, and 24 months in all but one study (which assessed survival only at 12 and at 24 months). The data were reconstructed by Trikalinos and Olkin (2012) based on Table 2 in Fine et al. (1993) and Table 3 in Dear (1994). The data can be used to illustrate how a meta-analysis can be conducted of effect sizes reported at multiple time points using a multivariate model. } \source{ Dear, K. B. G. (1994). Iterative generalized least squares for meta-analysis of survival data at multiple times. \emph{Biometrics}, \bold{50}(4), 989--1002. \verb{https://doi.org/10.2307/2533438} Trikalinos, T. A., & Olkin, I. (2012). Meta-analysis of effect sizes reported at multiple time points: A multivariate approach. \emph{Clinical Trials}, \bold{9}(5), 610--620. \verb{https://doi.org/10.1177/1740774512453218} } \references{ Fine, H. A., Dear, K. B., Loeffler, J. S., Black, P. M., & Canellos, G. P. (1993). Meta-analysis of radiation therapy with and without adjuvant chemotherapy for malignant gliomas in adults. \emph{Cancer}, \bold{71}(8), 2585--2597. \verb{https://doi.org/10.1002/1097-0142(19930415)71:8<2585::aid-cncr2820710825>3.0.co;2-s} } \examples{ ### copy data into 'dat' and examine data dat <- dat.fine1993 dat ### calculate log(ORs) and sampling variances for each time point dat <- escalc(measure="OR", ai=e1i, n1i=nei, ci=c1i, n2i=nci, data=dat, var.names=c("y1i","v1i")) dat <- escalc(measure="OR", ai=e2i, n1i=nei, ci=c2i, n2i=nci, data=dat, var.names=c("y2i","v2i")) dat <- escalc(measure="OR", ai=e3i, n1i=nei, ci=c3i, n2i=nci, data=dat, var.names=c("y3i","v3i")) dat <- escalc(measure="OR", ai=e4i, n1i=nei, ci=c4i, n2i=nci, data=dat, var.names=c("y4i","v4i")) ### calculate the covariances (equations in Appendix of Trikalinos & Olkin, 2012) dat$v12i <- with(dat, nei / (e1i * (nei - e2i)) + nci / (c1i * (nci - c2i))) dat$v13i <- with(dat, nei / (e1i * (nei - e3i)) + nci / (c1i * (nci - c3i))) dat$v14i <- with(dat, nei / (e1i * (nei - e4i)) + nci / (c1i * (nci - c4i))) dat$v23i <- with(dat, nei / (e2i * (nei - e3i)) + nci / (c2i * (nci - c3i))) dat$v24i <- with(dat, nei / (e2i * (nei - e4i)) + nci / (c2i * (nci - c4i))) dat$v34i <- with(dat, nei / (e3i * (nei - e4i)) + nci / (c3i * (nci - c4i))) ### create dataset in long format dat.long <- data.frame(study=rep(1:nrow(dat), each=4), time=1:4, yi=c(t(dat[c("y1i","y2i","y3i","y4i")])), vi=c(t(dat[c("v1i","v2i","v3i","v4i")]))) ### var-cov matrices of the sudies V <- lapply(split(dat, dat$study), function(x) matrix(c( x$v1i, x$v12i, x$v13i, x$v14i, x$v12i, x$v2i, x$v23i, x$v24i, x$v13i, x$v23i, x$v3i, x$v34i, x$v14i, x$v24i, x$v34i, x$v4i), nrow=4, ncol=4, byrow=TRUE)) ### remove rows for the missing time points in study 17 dat.long <- na.omit(dat.long) ### remove corresponding rows/columns from var-cov matrix V[[17]] <- V[[17]][c(2,4),c(2,4)] ### make a copy of V Vc <- V ### replace any (near) singular var-cov matrices with ridge corrected versions repl.Vi <- function(Vi) { res <- eigen(Vi) if (any(res$values <= .08)) { round(res$vectors \%*\% diag(res$values + .08) \%*\% t(res$vectors), 12) } else { Vi } } Vc <- lapply(Vc, repl.Vi) ### do not correct var-cov matrix of study 17 Vc[[17]] <- V[[17]] ### construct block diagonal matrix Vc <- bldiag(Vc) ### multivariate fixed-effects model res <- rma.mv(yi, Vc, mods = ~ factor(time) - 1, method="FE", data=dat.long) print(res, digits=3) ### multivariate random-effects model with heteroscedastic AR(1) structure for the true effects res <- rma.mv(yi, Vc, mods = ~ factor(time) - 1, random = ~ time | study, struct="HAR", data=dat.long) print(res, digits=3) \dontrun{ ### profile the variance components par(mfrow=c(2,2)) profile(res, tau2=1, xlim=c( 0,.2)) profile(res, tau2=2, xlim=c( 0,.2)) profile(res, tau2=3, xlim=c( 0,.2)) profile(res, tau2=4, xlim=c(.1,.3))} \dontrun{ ### profile the autocorrelation coefficient par(mfrow=c(1,1)) profile(res, rho=1)} } \keyword{datasets} metafor/man/influence.rma.mv.Rd0000644000176200001440000001361314055736403016133 0ustar liggesusers\name{influence.rma.mv} \alias{influence.rma.mv} \alias{cooks.distance.rma.mv} \alias{dfbetas.rma.mv} \alias{hatvalues.rma.mv} \title{Outlier and Influential Case Diagnostics for 'rma.mv' Objects} \description{ The functions can be used to compute various outlier and influential case diagnostics (some of which indicate the influence of deleting one case at a time on the model fit or the fitted/residual values) for objects of class \code{"rma.mv"}. \loadmathjax } \usage{ \method{cooks.distance}{rma.mv}(model, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, \dots) \method{dfbetas}{rma.mv}(model, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, \dots) \method{hatvalues}{rma.mv}(model, type="diagonal", \dots) } \arguments{ \item{model}{an object of class \code{"rma.mv"}.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{FALSE}).} \item{cluster}{optional vector to specify a clustering variable to use for computing the Cook's distances or DFBETAS values. If not specified, these measures are computed for the individual observed effect sizes or outcomes.} \item{reestimate}{logical to specify whether variance/correlation components should be re-estimated after deletion of the \mjseqn{i}th case (the default is \code{TRUE}).} \item{parallel}{character string to specify whether parallel processing should be used (the default is \code{"no"}). For parallel processing, set to either \code{"snow"} or \code{"multicore"}. See \sQuote{Details}.} \item{ncpus}{integer to specify the number of processes to use in the parallel processing.} \item{cl}{optional cluster to use if \code{parallel="snow"}. If not supplied, a cluster on the local machine is created for the duration of the call.} \item{type}{character string to specify whether only the diagonal of the hat matrix (\code{"diagonal"}) or the entire hat matrix (\code{"matrix"}) should be returned.} \item{\dots}{other arguments.} } \details{ The term \sQuote{case} below refers to a particular row from the dataset used in the model fitting (when argument \code{cluster} is not specified) or each level of the variable specified via \code{cluster}. Cook's distance for the \mjseqn{i}th case can be interpreted as the Mahalanobis distance between the entire set of predicted values once with the \mjseqn{i}th case included and once with the \mjseqn{i}th case excluded from the model fitting. The DFBETAS value(s) essentially indicate(s) how many standard deviations the estimated coefficient(s) change(s) after excluding the \mjseqn{i}th case from the model fitting. } \value{ The \code{cooks.distance} function returns a vector. The \code{dfbetas} function returns a data frame. The \code{hatvalues} function returns either a vector with the diagonal elements of the hat matrix or the entire hat matrix. } \note{ Right now, leave-one-out diagnostics are calculated by refitting the model \mjseqn{k} times (where \mjseqn{k} is the number of cases). Depending on how large \mjseqn{k} is, it may take a few moments to finish the calculations. For complex models fitted with \code{\link{rma.mv}}, this can become computationally expensive. On machines with multiple cores, one can usually speed things up by delegating the model fitting to separate worker processes, that is, by setting \code{parallel="snow"} or \code{parallel="multicore"} and \code{ncpus} to some value larger than 1. Parallel processing makes use of the \code{\link[parallel]{parallel}} package, using the \code{\link[parallel]{makePSOCKcluster}} and \code{\link[parallel]{parLapply}} functions when \code{parallel="snow"} or using \code{\link[parallel]{mclapply}} when \code{parallel="multicore"} (the latter only works on Unix/Linux-alikes). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. Alternatively (or in addition to using parallel processing), one can also set \code{reestimate=FALSE}, in which case any variance/correlation components in the model are not re-estimated after deleting the \mjseqn{i}th case from the dataset. Doing so only yields an approximation to the Cook's distances and DFBETAS values that ignores the influence of the \mjseqn{i}th case on the variance/correlation components, but is considerably faster (and often yields similar results). It may not be possible to fit the model after deletion of the \mjseqn{i}th case from the dataset. This will result in \code{NA} values for that case. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Belsley, D. A., Kuh, E., & Welsch, R. E. (1980). \emph{Regression diagnostics}. New York: Wiley. Cook, R. D., & Weisberg, S. (1982). \emph{Residuals and influence in regression}. London: Chapman and Hall. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods}, \bold{1}(2), 112--125. \verb{https://doi.org/10.1002/jrsm.11} } \seealso{ \code{\link{rstudent.rma.mv}}, \code{\link{weights.rma.mv}} } \examples{ ### copy data from Konstantopoulos (2011) into 'dat' dat <- dat.konstantopoulos2011 ### multilevel random-effects model res <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat) print(res, digits=3) ### Cook's distances for each observed outcome x <- cooks.distance(res) x plot(x, type="o", pch=19, xlab="Observed Outcome", ylab="Cook's Distance") ### Cook's distances for each district x <- cooks.distance(res, cluster=dat$district) x plot(x, type="o", pch=19, xlab="District", ylab="Cook's Distance", xaxt="n") axis(side=1, at=seq_along(x), labels=as.numeric(names(x))) ### hat values hatvalues(res) } \keyword{models} metafor/man/print.rma.Rd0000644000176200001440000002740414055736403014701 0ustar liggesusers\name{print.rma} \alias{print.rma} \alias{print.rma.uni} \alias{print.rma.mh} \alias{print.rma.peto} \alias{print.rma.glmm} \alias{print.rma.mv} \alias{summary.rma} \alias{print.summary.rma} \title{Print and Summary Methods for 'rma' Objects} \description{ Print and summary methods for objects of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, \code{"rma.glmm"}, \code{"rma.glmm"}, and \code{"rma.mv"}. \loadmathjax } \usage{ \method{print}{rma.uni}(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) \method{print}{rma.mh}(x, digits, showfit=FALSE, \dots) \method{print}{rma.peto}(x, digits, showfit=FALSE, \dots) \method{print}{rma.glmm}(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) \method{print}{rma.mv}(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) \method{summary}{rma}(object, digits, showfit=TRUE, \dots) \method{print}{summary.rma}(x, digits, showfit=TRUE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, \code{"rma.glmm"}, \code{"rma.mv"}, or \code{"summary.rma"} (for \code{print}).} \item{object}{an object of class \code{"rma"} (for \code{summary}).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{showfit}{logical to specify whether the fit statistics and information criteria should be printed (the default is \code{FALSE} for \code{print} and \code{TRUE} for \code{summary}).} \item{signif.stars}{logical to specify whether p-values should be encoded visually with \sQuote{significance stars}. Defaults to the \code{show.signif.stars} slot of \code{\link{options}}.} \item{signif.legend}{logical to specify whether the legend for the \sQuote{significance stars} should be printed. Defaults to the value for \code{signif.stars}.} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item the log-likelihood, deviance, AIC, BIC, and AICc value (when setting \code{showfit=TRUE} or by default for \code{summary}). \item for objects of class \code{"rma.uni"} and \code{"rma.glmm"}, the amount of (residual) heterogeneity in the random/mixed-effects model (i.e., the estimate of \mjseqn{\tau^2} and its square root). Suppressed for fixed-effects models. The (asymptotic) standard error of the estimate of \mjseqn{\tau^2} is also provided (where possible). \item for objects of \code{"rma.mv"}, a table providing information about the variance components and correlations in the model. For \mjseqn{\sigma^2} components, the estimate and its square root are provided, in addition to the number of values/levels, whether the component was fixed or estimated, and the name of the grouping variable/factor. If the \code{R} argument was used to specify known correlation matrices, this is also indicated. For models with an \sQuote{\code{~ inner | outer}} formula term, the name of the inner and outer grouping variable/factor are given and the number of values/levels of these variables/factors. In addition, for each \mjseqn{\tau^2} component, the estimate and its square root are provided, the number of effects or outcomes observed at each level of the inner grouping variable/factor (only for \code{struct="HCS"}, \code{struct="DIAG"}, \code{struct="HAR"}, and \code{struct="UN"}), and whether the component was fixed or estimated. Finally, either the estimate of \mjseqn{\rho} (for \code{struct="CS"}, \code{struct="AR"}, \code{struct="CAR"}, \code{struct="HAR"}, or \code{struct="HCS"}) or the entire estimated correlation matrix (for \code{struct="UN"}) between the levels of the inner grouping variable/factor is provided, again with information whether a particular correlation was fixed or estimated, and how often each combination of levels of the inner grouping variable/factor was observed across the levels of the outer grouping variable/factor. If there is a second \sQuote{\code{~ inner | outer}} formula term, the same information as described above will be provided, but now for the \mjseqn{\gamma^2} and \mjseqn{\phi} components. \item the \mjseqn{I^2} statistic, which estimates (in percent) how much of the total variability in the observed effect sizes or outcomes (which is composed of heterogeneity plus sampling variability) can be attributed to heterogeneity among the true effects. For a meta-regression model, \mjseqn{I^2} estimates how much of the unaccounted variability (which is composed of residual heterogeneity plus sampling variability) can be attributed to residual heterogeneity. See \sQuote{Note} for how \mjseqn{I^2} is computed. \item the \mjseqn{H^2} statistic, which estimates the ratio of the total amount of variability in the observed effect sizes or outcomes to the amount of sampling variability. For a meta-regression model, \mjseqn{H^2} estimates the ratio of the unaccounted variability in the observed effect sizes or outcomes to the amount of sampling variability. See \sQuote{Note} for how \mjseqn{H^2} is computed. \item for objects of class \code{"rma.uni"}, the \mjseqn{R^2} statistic, which estimates the amount of heterogeneity accounted for by the moderators included in the model and can be regarded as a pseudo \mjseqn{R^2} statistic (Raudenbush, 2009). Only provided when fitting a mixed-effects models (i.e., for models including moderators). This is suppressed (and set to \code{NULL}) for models without moderators, fixed-effects models, or if the model does not contain an intercept. See \sQuote{Note} for how \mjseqn{R^2} is computed. \item for objects of class \code{"rma.glmm"}, the amount of study level variability (only when using a model that models study level differences as a random effect). \item the results of the test for (residual) heterogeneity. This is the usual \mjseqn{Q}-test for heterogeneity when not including moderators in the model and the \mjseqn{Q_E}-test for residual heterogeneity when moderators are included. For objects of class \code{"rma.glmm"}, the results from a Wald-type test and a likelihood ratio test are provided (see \code{\link{rma.glmm}} for more details). \item the results of the omnibus (Wald-type) test of the coefficients in the model (the indices of the coefficients tested are also indicated). Suppressed if the model includes only one coefficient (e.g., only an intercept, like in the fixed- and random-effects model). \item a table with the estimated coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds. \item the Cochran-Mantel-Haenszel test and Tarone's test for heterogeneity (only when analyzing odds ratios using the Mantel-Haenszel method, i.e., \code{"rma.mh"}). } } \value{ The \code{print} functions do not return an object. The \code{summary} function returns the object passed to it (with additional class \code{"summary.rma"}). } \note{ For random-effects models, the \mjseqn{I^2} statistic is computed with \mjdeqn{I^2 = 100\\\\\\\% \times \frac{\hat{\tau}^2}{\hat{\tau}^2 + \tilde{v}},}{I^2 = 100\\\% hat(\tau)^2 / (hat(\tau)^2 + v),} where \mjeqn{\hat{\tau}^2}{hat(\tau)^2} is the estimated value of \mjseqn{\tau^2} and \mjdeqn{\tilde{v} = \frac{(k-1) \sum w_i}{(\sum w_i)^2 - \sum w_i^2},}{v = ((k-1) \sum w_i) / ((\sum w_i)^2 - \sum w_i^2),} where \mjseqn{w_i = 1 / v_i} is the inverse of the sampling variance of the \mjseqn{i}th study (\mjeqn{\tilde{v}}{v} is equation 9 in Higgins & Thompson, 2002, and can be regarded as the \sQuote{typical} within-study variance of the observed effect sizes or outcomes). The \mjseqn{H^2} statistic is computed with \mjdeqn{H^2 = \frac{\hat{\tau}^2 + \tilde{v}}{\tilde{v}}.}{H^2 = (hat(\tau)^2 + v) / v.} Analogous equations are used for mixed-effects models. Therefore, depending on the estimator of \mjseqn{\tau^2} used, the values of \mjseqn{I^2} and \mjseqn{H^2} will change. For random-effects models, \mjseqn{I^2} and \mjseqn{H^2} are often computed with \mjseqn{I^2 = (Q-(k-1))/Q} and \mjseqn{H^2 = Q/(k-1)}, where \mjseqn{Q} denotes the statistic of the test for heterogeneity and \mjseqn{k} the number of studies (i.e., observed effect sizes or outcomes) included in the meta-analysis. The equations used in the metafor package to compute these statistics are more general and have the advantage that the values of \mjseqn{I^2} and \mjseqn{H^2} will be consistent with the estimated value of \mjseqn{\tau^2} (i.e., if \mjeqn{\hat{\tau}^2 = 0}{hat(\tau)^2 = 0}, then \mjseqn{I^2 = 0} and \mjseqn{H^2 = 1} and if \mjeqn{\hat{\tau}^2 > 0}{hat(\tau)^2 > 0}, then \mjseqn{I^2 > 0} and \mjseqn{H^2 > 1}). The two definitions of \mjseqn{I^2} and \mjseqn{H^2} actually coincide when using the DerSimonian-Laird estimator of \mjseqn{\tau^2} (i.e., the commonly used equations are actually special cases of the more general definitions given above). Therefore, if you prefer the more conventional definitions of these statistics, use \code{method="DL"} when fitting the random/mixed-effects model with the \code{\link{rma.uni}} function. The conventional definitions are also automatically used when fitting fixed-effects models. The pseudo \mjseqn{R^2} statistic (Raudenbush, 2009) is computed with \mjdeqn{R^2 = \frac{\hat{\tau}_{RE}^2 - \hat{\tau}_{ME}^2}{\hat{\tau}_{RE}^2},}{R^2 = (hat(\tau)^2_RE - hat(\tau)^2_ME) / hat(\tau)^2_RE,} where \mjeqn{\hat{\tau}_{RE}^2}{hat(\tau)^2_RE} denotes the estimated value of \mjseqn{\tau^2} based on the random-effects model (i.e., the total amount of heterogeneity) and \mjeqn{\hat{\tau}_{ME}^2}{hat(\tau)^2_ME} denotes the estimated value of \mjseqn{\tau^2} based on the mixed-effects model (i.e., the residual amount of heterogeneity). It can happen that \mjeqn{\hat{\tau}_{RE}^2 < \hat{\tau}_{ME}^2}{hat(\tau)^2_RE < hat(\tau)^2_ME}, in which case \mjseqn{R^2} is set to zero (and also if \mjeqn{\hat{\tau}_{RE}^2 = 0}{hat(\tau)^2_RE = 0}). Again, the value of \mjseqn{R^2} will change depending on the estimator of \mjseqn{\tau^2} used. This statistic is only computed when the mixed-effects model includes an intercept (so that the random-effects model is clearly nested within the mixed-effects model). You can also use the \code{\link{anova.rma}} function to compute \mjseqn{R^2} for any two models that are known to be nested. Note that the pseudo \mjseqn{R^2} statistic may not be very accurate unless \mjseqn{k} is large (Lopez-Lopez et al., 2014). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Higgins, J. P. T., & Thompson, S. G. (2002). Quantifying heterogeneity in a meta-analysis. \emph{Statistics in Medicine}, \bold{21}(11), 1539--1558. \verb{https://doi.org/10.1002/sim.1186} \enc{López-López}{Lopez-Lopez}, J. A., \enc{Marín-Martínez}{Marin-Martinez}, F., \enc{Sánchez-Meca}{Sanchez-Meca}, J., Van den Noortgate, W., & Viechtbauer, W. (2014). Estimation of the predictive power of the model in mixed-effects meta-regression: A simulation study. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{67}(1), 30--48. \verb{https://doi.org/10.1111/bmsp.12002} Raudenbush, S. W. (2009). Analyzing effect sizes: Random effects models. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 295--315). New York: Russell Sage Foundation. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \keyword{print} metafor/man/dat.konstantopoulos2011.Rd0000644000176200001440000000704314055736403017321 0ustar liggesusers\name{dat.konstantopoulos2011} \docType{data} \alias{dat.konstantopoulos2011} \title{Studies on the Effects of Modified School Calendars on Student Achievement} \description{Results from 56 studies on the effects of modified school calendars on student achievement.} \usage{dat.konstantopoulos2011} \format{The data frame contains the following columns: \tabular{lll}{ \bold{district} \tab \code{numeric} \tab district id number \cr \bold{school} \tab \code{numeric} \tab school id number (within district) \cr \bold{study} \tab \code{numeric} \tab study id number \cr \bold{yi} \tab \code{numeric} \tab standardized mean difference \cr \bold{vi} \tab \code{numeric} \tab corresponding sampling variance \cr \bold{year} \tab \code{numeric} \tab year of the study } } \details{ Instead of following the more traditional school calendar with a long summer break (in addition to a short winter and spring break), some schools have switched to a modified school calendar comprising more frequent but shorter intermittent breaks (e.g., 9 weeks of school followed by 3 weeks off), while keeping the total number of days at school approximately the same. The effects of using such a modified calendar on student achievement have been examined in a number of studies and were meta-analyzed by Cooper et al. (2003). The dataset (taken from Konstantopoulos, 2011) contains the results from 56 studies, each comparing the level of academic achievement in a group of students following a modified school calendar with that of a group of students following a more traditional school calendar. The difference between the two groups was quantified in terms of a standardized mean difference (with positive values indicating a higher mean level of achievement in the group following the modified school calendar). The studies were conducted at various schools that were clustered within districts. The data therefore have a multilevel structure, with schools nested within districts. A multilevel meta-analysis of these data can be used to estimate and account for the amount of heterogeneity between districts and between schools within districts. } \source{ Konstantopoulos, S. (2011). Fixed effects and variance components estimation in three-level meta-analysis. \emph{Research Synthesis Methods}, \bold{2}(1), 61--76. \verb{https://doi.org/10.1002/jrsm.35} } \references{ Cooper, H., Valentine, J. C., Charlton, K., & Melson, A. (2003). The effects of modified school calendars on student achievement and on school and community attitudes. \emph{Review of Educational Research}, \bold{73}(1), 1--52. \verb{https://doi.org/10.3102/00346543073001001} } \examples{ ### copy data into 'dat' and examine data dat <- dat.konstantopoulos2011 dat ### regular random-effects model res <- rma(yi, vi, data=dat) print(res, digits=3) ### regular random-effects model using rma.mv() res <- rma.mv(yi, vi, random = ~ 1 | study, data=dat) print(res, digits=3) ### multilevel random-effects model res.ml <- rma.mv(yi, vi, random = ~ 1 | district/school, data=dat) print(res.ml, digits=3) ### profile variance components profile(res.ml, progbar=FALSE) ### multivariate parameterization of the model res.mv <- rma.mv(yi, vi, random = ~ factor(school) | district, data=dat) print(res.mv, digits=3) ### tau^2 from multivariate model = sum of the two variance components from the multilevel model round(sum(res.ml$sigma2), 3) ### rho from multivariate model = intraclass correlation coefficient based on the multilevel model round(res.ml$sigma2[1] / sum(res.ml$sigma2), 3) } \keyword{datasets} metafor/man/radial.Rd0000644000176200001440000001311714055736403014217 0ustar liggesusers\name{radial} \alias{radial} \alias{galbraith} \alias{radial.rma} \title{Radial (Galbraith) Plots for 'rma' Objects} \description{ Function to create radial (also called Galbraith) plots for objects of class \code{"rma"}. \loadmathjax } \usage{ radial(x, \dots) galbraith(x, \dots) \method{radial}{rma}(x, center=FALSE, xlim, zlim, xlab, zlab, atz, aty, steps=7, level=x$level, digits=2, back="lightgray", transf, targs, pch=19, arc.res=100, cex, \dots) } \arguments{ \item{x}{an object of class \code{"rma"}.} \item{center}{logical to indicate whether the plot should be centered horizontally at the model estimate (the default is \code{FALSE}).} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{zlim}{z-axis limits. If unspecified, the function tries to set the z-axis limits to some sensible values (note that the z-axis limits are the actual vertical limit of the plotting region).} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{zlab}{title for the z-axis. If unspecified, the function tries to set an appropriate axis title.} \item{atz}{position for the z-axis tick marks and labels. If unspecified, these values are set by the function.} \item{aty}{position for the y-axis tick marks and labels. If unspecified, these values are set by the function.} \item{steps}{the number of tick marks for the y-axis (the default is 7). Ignored when argument \code{aty} is used.} \item{level}{numeric value between 0 and 100 to specify the level of the z-axis error region (the default is to take the value from the object).} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the y-axis should be rounded (the default is 2).} \item{back}{color of the z-axis error region. Set to \code{NA} to suppress shading of the region.} \item{transf}{optional argument to specify a function that should be used to transform the y-axis labels (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf}.} \item{pch}{plotting symbol. By default, a filled circle is used. See \code{\link{points}} for other options.} \item{arc.res}{integer to specify the number of line segments to use when drawing the y-axis and confidence interval arcs (the default is 100).} \item{cex}{optional character and symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{\dots}{other arguments.} } \details{ For a fixed-effects model, the plot shows the inverse of the standard errors on the horizontal axis against the observed effect sizes or outcomes standardized by their corresponding standard errors on the vertical axis. Since the vertical axis corresponds to standardized values, it is referred to as the z-axis within this function. On the right hand side of the plot, an arc is drawn (referred to as the y-axis within this function) corresponding to the observed effect sizes or outcomes. A line projected from (0,0) through a particular point within the plot onto this arc indicates the value of the observed effect size or outcome for that point. For a random-effects model, the function uses \mjeqn{1/\sqrt{v_i + \tau^2}}{1/\sqrt(v_i + \tau^2)} for the horizontal axis, where \mjseqn{v_i} is the sampling variance of the observed effect size or outcome and \mjseqn{\tau^2} is the amount of heterogeneity as estimated based on the model. For the z-axis, \mjeqn{\sqrt{v_i + \tau^2}}{\sqrt(v_i + \tau^2)} is used to standardize the observed effect sizes or outcomes. If the model contains moderators, the function returns an error. } \value{ A data frame with components: \item{x}{the x-axis coordinates of the points that were plotted.} \item{y}{the y-axis coordinates of the points that were plotted.} \item{ids}{the study id numbers.} \item{slab}{the study labels.} Note that the data frame is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Galbraith, R. F. (1988). Graphical display of estimates having differing standard errors. \emph{Technometrics}, \bold{30}(3), 271--281. \verb{https://doi.org/10.1080/00401706.1988.10488400} Galbraith, R. F. (1988). A note on graphical presentation of estimated odds ratios from several clinical trials. \emph{Statistics in Medicine}, \bold{7}(8), 889--894. \verb{https://doi.org/10.1002/sim.4780070807} Galbraith, R. F (1994). Some applications of radial plots. \emph{Journal of the American Statistical Association}, \bold{89}(428), 1232--1242. \verb{https://doi.org/10.1080/01621459.1994.10476864} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat ### fixed-effects model res <- rma(yi, vi, data=dat, method="FE") ### draw radial plot radial(res) ### line from (0,0) with slope equal to the log risk ratio from the 4th study abline(a=0, b=dat$yi[4], lty="dotted") ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat) ### draw radial plot radial(res) } \keyword{hplot} metafor/man/dat.hine1989.Rd0000644000176200001440000000451514055736403015012 0ustar liggesusers\name{dat.hine1989} \docType{data} \alias{dat.hine1989} \title{Studies on Prophylactic Use of Lidocaine After a Heart Attack} \description{Results from 6 studies evaluating mortality from prophylactic use of lidocaine in acute myocardial infarction.} \usage{dat.hine1989} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{source} \tab \code{character} \tab source of data \cr \bold{n1i} \tab \code{numeric} \tab number of patients in lidocaine group \cr \bold{n2i} \tab \code{numeric} \tab number of patients in control group \cr \bold{ai} \tab \code{numeric} \tab number of deaths in lidocaine group \cr \bold{ci} \tab \code{numeric} \tab number of deaths in control group } } \details{ Hine et al. (1989) conducted a meta-analysis of death rates in randomized controlled trials in which prophylactic lidocaine was administered to patients with confirmed or suspected acute myocardial infarction. The dataset describes the mortality at the end of the assigned treatment period for control and intravenous lidocaine treatment groups for six studies. The question of interest is whether there is a detrimental effect of lidocaine. Because the studies were conducted to compare rates of arrhythmias following a heart attack, the studies, taken individually, are too small to detect important differences in mortality rates. The data in this dataset were obtained from Table I in Normand (1999, p. 322). } \source{ Normand, S. T. (1999). Meta-analysis: Formulating, evaluating, combining, and reporting. \emph{Statistics in Medicine}, \bold{18}(3), 321--359. \verb{https://doi.org/10.1002/(sici)1097-0258(19990215)18:3<321::aid-sim28>3.0.co;2-p} } \references{ Hine, L. K., Laird, N., Hewitt, P., & Chalmers, T. C. (1989). Meta-analytic evidence against prophylactic use of lidocaine in acute myocardial infarction. \emph{Archives of Internal Medicine}, \bold{149}(12), 2694--2698. \verb{https://doi.org/10.1001/archinte.1989.00390120056011} } \examples{ ### copy data into 'dat' dat <- dat.hine1989 ### calculate risk differences and corresponding sampling variances dat <- escalc(measure="RD", n1i=n1i, n2i=n2i, ai=ai, ci=ci, data=dat) dat ### meta-analysis of risk differences using a random-effects model res <- rma(yi, vi, data=dat) res } \keyword{datasets} metafor/man/forest.default.Rd0000644000176200001440000003362314055736403015714 0ustar liggesusers\name{forest.default} \alias{forest.default} \title{Forest Plots (Default Method)} \description{ Function to create forest plots for a given set of data. \loadmathjax } \usage{ \method{forest}{default}(x, vi, sei, ci.lb, ci.ub, annotate=TRUE, showweights=FALSE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=95, refline=0, digits=2L, width, xlab, slab, ilab, ilab.xpos, ilab.pos, order, subset, transf, atransf, targs, rows, efac=1, pch=15, psize, plim=c(0.5,1.5), col, lty, fonts, cex, cex.lab, cex.axis, annosym, \dots) } \arguments{ \item{x}{vector of length \mjseqn{k} with the observed effect sizes or outcomes.} \item{vi}{vector of length \mjseqn{k} with the corresponding sampling variances.} \item{sei}{vector of length \mjseqn{k} with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{ci.lb}{vector of length \mjseqn{k} with the corresponding lower confidence interval bounds. Not needed if \code{vi} or \code{sei} is specified. See \sQuote{Details}.} \item{ci.ub}{vector of length \mjseqn{k} with the corresponding upper confidence interval bounds. Not needed if \code{vi} or \code{sei} is specified. See \sQuote{Details}.} \item{annotate}{logical to specify whether annotations should be added to the plot (the default is \code{TRUE}).} \item{showweights}{logical to specify whether the annotations should also include inverse variance weights (the default is \code{FALSE}).} \item{header}{logical to specify whether column headings should be added to the plot (the default is \code{FALSE}). Can also be a character vector to specify the left and right headings.} \item{xlim}{horizontal limits of the plot region. If unspecified, the function tries to set the horizontal plot limits to some sensible values.} \item{alim}{the actual x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{olim}{optional argument to specify observation/outcome limits. If unspecified, no limits are used.} \item{ylim}{the y-axis limits of the plot. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{top}{the amount of space to leave empty at the top of the plot (e.g., for adding headers) (the default is 3 rows).} \item{at}{position of the x-axis tick marks and corresponding labels. If unspecified, the function tries to set the tick mark positions/labels to some sensible values.} \item{steps}{the number of tick marks for the x-axis (the default is 5). Ignored when the positions are specified via the \code{at} argument.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{refline}{numeric value to specify the location of the vertical \sQuote{reference} line (the default is 0). The line can be suppressed by setting this argument to \code{NA}.} \item{digits}{integer to specify the number of decimal places to which the tick mark labels of the x-axis and the annotations should be rounded (the default is \code{2L}). Can also be a vector of two integers, the first to specify the number of decimal places for the annotations, the second for the x-axis labels. When specifying an integer (e.g., \code{2L}), trailing zeros after the decimal mark are dropped for the x-axis labels. When specifying a numeric value (e.g., \code{2}), trailing zeros are retained.} \item{width}{optional integer to manually adjust the width of the columns for the annotations.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{slab}{optional vector with labels for the \mjseqn{k} studies. If unspecified, simple labels are created within the function. To suppress labels, set this argument to \code{NA}.} \item{ilab}{optional vector, matrix, or data frame providing additional information about the studies that should be added to the plot.} \item{ilab.xpos}{numeric vector to specify the x-axis position(s) of the variable(s) given via \code{ilab} (must be specified if \code{ilab} is specified).} \item{ilab.pos}{integer(s) (either 1, 2, 3, or 4) to specify the alignment of the vector(s) given via \code{ilab} (2 means right, 4 mean left aligned). If unspecified, the default is to center the labels.} \item{order}{optional character string to specify how the studies should be ordered. Can also be a variable based on which the studies will be ordered. See \sQuote{Details}.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the plot.} \item{transf}{optional argument to specify a function that should be used to transform the observed outcomes and corresponding confidence interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{atransf}{optional argument to specify a function that should be used to transform the x-axis labels and annotations (e.g., \code{atransf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified via \code{transf} or \code{atransf}.} \item{rows}{optional vector to specify the rows (or more generally, the horizontal positions) for plotting the outcomes. Can also be a single value to specify the row (horizontal position) of the first outcome (the remaining outcomes are then plotted below this starting row). If unspecified, the function sets this value automatically.} \item{efac}{vertical expansion factor for confidence interval limits and arrows. The default value of 1 should usually work okay. Can also be a vector of two numbers, the first for CI limits, the second for arrows.} \item{pch}{plotting symbol to use for the observed outcomes. By default, a filled square is used. See \code{\link{points}} for other options. Can also be a vector of values.} \item{psize}{optional numeric value to specify the point sizes for the observed outcomes. If unspecified, the point sizes are a function of the precision of the estimates. Can also be a vector of values.} \item{plim}{numeric vector of length 2 to scale the point sizes (ignored when \code{psize} is specified). See \sQuote{Details}.} \item{col}{optional character string to specify the name of a color to use for plotting the observed outcomes (\code{"black"} is used by default if not specified). Can also be a vector of color names.} \item{lty}{optional character string to specify the line type for the confidence intervals. If unspecified, the function sets this to \code{"solid"} by default.} \item{fonts}{optional character string to specify the font to use for the study labels, annotations, and the extra information (if specified via \code{ilab}). If unspecified, the default font is used.} \item{cex}{optional character and symbol expansion factor. If unspecified, the function tries to set this to a sensible value.} \item{cex.lab}{optional expansion factor for the x-axis title. If unspecified, the function tries to set this to a sensible value.} \item{cex.axis}{optional expansion factor for the x-axis labels. If unspecified, the function tries to set this to a sensible value.} \item{annosym}{optional vector of length 3 to change the left bracket, separation, and right bracket symbols for the annotations.} \item{\dots}{other arguments.} } \details{ The plot shows the observed effect sizes or outcomes with corresponding confidence intervals. To use the function, one should specify the observed outcomes (via the \code{x} argument) together with the corresponding sampling variances (via the \code{vi} argument) or with the corresponding standard errors (via the \code{sei} argument). Alternatively, one can specify the observed outcomes together with the corresponding confidence interval bounds (via the \code{ci.lb} and \code{ci.ub} arguments). With the \code{transf} argument, the observed outcomes and corresponding confidence interval bounds can be transformed with some suitable function. For example, when plotting log odds ratios, then one could use \code{transf=exp} to obtain a forest plot showing the odds ratios. Alternatively, one can use the \code{atransf} argument to transform the x-axis labels and annotations (e.g., \code{atransf=exp}). See also \link{transf} for some other useful transformation functions in the context of a meta-analysis. The examples below illustrate the use of these arguments. By default, the studies are ordered from top to bottom (i.e., the first study in the dataset will be placed in row \mjseqn{k}, the second study in row \mjseqn{k-1}, and so on, until the last study, which is placed in the first row). The studies can be reordered with the \code{order} argument: \itemize{ \item \code{order="obs"}: the studies are ordered by the observed outcomes, \item \code{order="prec"}: the studies are ordered by their sampling variances. } Alternatively, it is also possible to set \code{order} equal to a variable based on which the studies will be ordered (see \sQuote{Examples}). By default (i.e., when \code{psize} is not specified), the size of the points is a function of the precision (i.e., inverse standard error) of the outcomes. This way, more precise estimates are visually more prominent in the plot. By making the point sizes a function of the inverse standard error of the estimates, their area is proportional to the inverse sampling variances, which corresponds to the weights they would receive in a fixed-effects model. However, the point sizes are rescaled so that the smallest point size is \code{plim[1]} and the largest point size is \code{plim[2]}. As a result, their relative sizes (i.e., areas) no longer exactly correspond to their relative weights in such a model. If exactly relative point sizes are desired, one can set \code{plim[2]} to \code{NA}, in which case the points are rescaled so that the smallest point size corresponds to \code{plim[1]} and all other points are scaled accordingly. As a result, the largest point may be very large. Alternatively, one can set \code{plim[1]} to \code{NA}, in which case the points are rescaled so that the largest point size corresponds to \code{plim[2]} and all other points are scaled accordingly. As a result, the smallest point may be very small and essentially indistinguishable from the confidence interval line. To avoid the latter, one can also set \code{plim[3]}, which enforces a minimal point size. Summary estimates can be added to the plot with the \code{\link{addpoly}} function. See the documentation for that function for examples. } \note{ The function tries to set some sensible values for the optional arguments, but it may be necessary to adjust these in certain circumstances. The function actually returns some information about the chosen defaults invisibly. Printing this information is useful as a starting point to make adjustments to the plot. If the number of studies is quite large, the labels, annotations, and symbols may become quite small and impossible to read. Stretching the plot window vertically may then provide a more readable figure (one should call the function again after adjusting the window size, so that the label/symbol sizes can be properly adjusted). Also, the \code{cex}, \code{cex.lab}, and \code{cex.axis} arguments are then useful to adjust the symbol and text sizes. If the horizontal plot and/or x-axis limits are set manually, then the horizontal plot limits (\code{xlim}) must be at least as wide as the x-axis limits (\code{alim}). This restriction is enforced inside the function. If the outcome measure used for creating the plot is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those limits (the observed outcomes and confidence intervals cannot exceed those bounds then). The \code{lty} argument can also be a vector of two elements, the first for specifying the line type of the individual CIs (\code{"solid"} by default), the second for the line type of the horizontal line that is automatically added to the plot (\code{"solid"} by default; set to \code{"blank"} to remove it). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Lewis, S., & Clarke, M. (2001). Forest plots: Trying to see the wood and the trees. \emph{British Medical Journal}, \bold{322}(7300), 1479--1480. \verb{https://doi.org/10.1136/bmj.322.7300.1479} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest}}, \code{\link{forest.rma}}, \code{\link{addpoly}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### default forest plot of the observed log risk ratios forest(dat$yi, dat$vi) ### forest plot of the observed risk ratios (transform outcomes) forest(dat$yi, dat$vi, slab=paste(dat$author, dat$year, sep=", "), transf=exp, alim=c(0,2), steps=5, xlim=c(-2.5,4), refline=1, cex=.9, header=TRUE) ### forest plot of the observed risk ratios (transformed x-axis) forest(dat$yi, dat$vi, slab=paste(dat$author, dat$year, sep=", "), atransf=exp, at=log(c(.05,.25,1,4,20)), xlim=c(-10,8), cex=.9, header=TRUE) ### forest plot of the observed risk ratios with studies ordered by the RRs forest(dat$yi, dat$vi, slab=paste(dat$author, dat$year, sep=", "), atransf=exp, at=log(c(.05,.25,1,4,20)), xlim=c(-10,8), cex=.9, header=TRUE, order="obs") ### forest plot of the observed risk ratios with studies ordered by absolute latitude forest(dat$yi, dat$vi, slab=paste(dat$author, dat$year, sep=", "), atransf=exp, at=log(c(.05,.25,1,4,20)), xlim=c(-10,8), cex=.9, header=TRUE, order=dat$ablat) ### see also examples for the forest.rma function } \keyword{hplot} metafor/man/print.matreg.Rd0000644000176200001440000000242114055736403015371 0ustar liggesusers\name{print.matreg} \alias{print.matreg} \title{Print Method for 'matreg' Objects} \description{ Print method for objects of class \code{"matreg"}. } \usage{ \method{print}{matreg}(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, \dots) } \arguments{ \item{x}{an object of class \code{"matreg"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{signif.stars}{logical to specify whether p-values should be encoded visually with \sQuote{significance stars}. Defaults to the \code{show.signif.stars} slot of \code{\link{options}}.} \item{signif.legend}{logical to specify whether the legend for the \sQuote{significance stars} should be printed. Defaults to the value for \code{signif.stars}.} \item{\dots}{other arguments.} } \details{ The output is a table with the estimated coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds. } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \seealso{ \code{\link{matreg}} } \keyword{print} metafor/man/print.confint.rma.Rd0000644000176200001440000000252014055736403016330 0ustar liggesusers\name{print.confint.rma} \alias{print.confint.rma} \alias{print.list.confint.rma} \title{Print Methods for 'confint.rma' and 'list.confint.rma' Objects} \description{ Print methods for objects of class \code{"confint.rma"} and \code{"list.confint.rma"}. } \usage{ \method{print}{confint.rma}(x, digits=x$digits, \dots) \method{print}{list.confint.rma}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"confint.rma"} or \code{"list.confint.rma"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item estimate of the model coefficient or variance/correlation parameter \item lower bound of the confidence interval \item upper bound of the confidence interval } } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{confint.rma.uni}}, \code{\link{confint.rma.mv}} } \keyword{print} metafor/man/dat.mcdaniel1994.Rd0000644000176200001440000001005414055736403015632 0ustar liggesusers\name{dat.mcdaniel1994} \docType{data} \alias{dat.mcdaniel1994} \title{Studies on the Validity of Employment Interviews} \description{Results from 160 studies on the correlation between employment interview assessments and job performance.} \usage{dat.mcdaniel1994} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{ni} \tab \code{numeric} \tab sample size of the study \cr \bold{ri} \tab \code{numeric} \tab observed correlation \cr \bold{type} \tab \code{character} \tab interview type (j = job-related, s = situational, p = psychological) \cr \bold{struct} \tab \code{character} \tab interview structure (u = unstructured, s = structured) } } \details{ The 160 studies provide data in terms of the correlation between employment interview performance and actual job performance. In addition, the interview type and the interview structure are indicated. McDaniel et al. (1994) describe the interview type and structure variables as follows. "Questions in situational interviews [...] focus on the individual's ability to project what his or her behavior would be in a given situation. [...] Job-related interviews are those in which the interviewer is a personnel officer or hiring authority and the questions attempt to assess past behaviors and job-related information, but most questions are not considered situational. Psychological interviews are conducted by a psychologist, and the questions are intended to assess personal traits, such as dependability." In structured interviews, "the questions and acceptable responses were specified in advance and the responses were rated for appropriateness of content. [...] Unstructured interviews gather applicant information in a less systematic manner than do structured interviews. Although the questions may be specified in advance, they usually are not, and there is seldom a formalized scoring guide. Also, all persons being interviewed are not typically asked the same questions." The goal of the meta-analysis was to examine the overall criterion-related validity of employment interviews and to examine whether the validity depends on the type and structure of the interview. The data in this dataset were obtained from Table A.2 in Rothstein, Sutton, and Borenstein (2005, p. 325-329). Note that the \code{type} and \code{struct} variables contain some \code{NA}s. } \source{ Rothstein, H. R., Sutton, A. J., & Borenstein, M. (Eds.). (2005). \emph{Publication bias in meta-analysis: Prevention, assessment, and adjustments}. Chichester, England: Wiley. } \references{ McDaniel, M. A., Whetzel, D. L., Schmidt, F. L., & Maurer, S. D. (1994). The validity of employment interviews: A comprehensive review and meta-analysis. \emph{Journal of Applied Psychology}, \bold{79}(4), 599--616. \verb{https://doi.org/10.1037/0021-9010.79.4.599} } \examples{ ### copy data into 'dat' dat <- dat.mcdaniel1994 ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat) dat ### meta-analysis of the transformed correlations using a random-effects model res <- rma(yi, vi, data=dat) res ### average correlation with 95\% CI predict(res, transf=transf.ztor) ### mixed-effects model with interview type as factor ### note: job-related interviews is the reference level rma(yi, vi, mods = ~ factor(type), data=dat) ### mixed-effects model with interview structure as factor ### note: structured interviews is the reference level rma(yi, vi, mods = ~ factor(struct), data=dat) ### note: the interpretation of the results is difficult since all ### situational interviews were structured, almost all psychological ### interviews were unstructured, and actually for the majority of ### the psychological interviews it was unknown whether the interview ### was structured or unstructured table(dat$type, dat$struct, useNA="always") ### meta-analysis of raw correlations using a random-effects model res <- rma(measure="COR", ri=ri, ni=ni, data=dat.mcdaniel1994) res } \keyword{datasets} metafor/man/print.escalc.Rd0000644000176200001440000001263614055736403015355 0ustar liggesusers\name{print.escalc} \alias{print.escalc} \alias{summary.escalc} \title{Print and Summary Methods for 'escalc' Objects} \description{ Print and summary methods for objects of class \code{"escalc"}. \loadmathjax } \usage{ \method{print}{escalc}(x, digits=attr(x,"digits"), \dots) \method{summary}{escalc}(object, out.names=c("sei","zi","pval","ci.lb","ci.ub"), var.names, H0=0, append=TRUE, replace=TRUE, level=95, olim, digits, transf, \dots) } \arguments{ \item{x}{an object of class \code{"escalc"}.} \item{object}{an object of class \code{"escalc"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{out.names}{character string with four elements to specify the variable names for the standard errors, test statistics, and lower/upper confidence interval bounds.} \item{var.names}{character string with two elements to specify the variable names for the observed effect sizes or outcomes and the sampling variances (the default is to take the value from the object if possible).} \item{H0}{numeric value to specify the value of the effect size or outcome under the null hypothesis (the default is 0).} \item{append}{logical to specify whether the data frame specified via the \code{object} argument should be returned together with the additional variables that are calculated by the \code{summary} function (the default is \code{TRUE}).} \item{replace}{logical to specify whether existing values for \code{sei}, \code{zi}, \code{ci.lb}, and \code{ci.ub} in the data frame should be replaced or not. Only relevant when the data frame already contains these variables. If \code{replace=TRUE} (the default), all of the existing values will be overwritten. If \code{replace=FALSE}, only \code{NA} values will be replaced.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{olim}{optional argument to specify observation/outcome limits. If unspecified, no limits are used.} \item{transf}{optional argument to specify a function that should be used to transform the observed effect sizes or outcomes and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used. Any additional arguments needed for the function specified here can be passed via \code{\dots}.} \item{\dots}{other arguments.} } \value{ The \code{print.escalc} function formats and prints the data frame, so that the observed effect sizes or outcomes and sampling variances are rounded (to the number of digits specified). The \code{summary.escalc} function creates an object that is a data frame containing the original data (if \code{append=TRUE}) and the following components: \item{yi}{observed effect sizes or outcomes (transformed if \code{transf} is specified).} \item{vi}{corresponding sampling variances.} \item{sei}{correponding standard errors.} \item{zi}{test statistics for testing \mjeqn{\mbox{H}_0{:}\; \theta_i = \mbox{H0}}{H_0: \theta_i = H0} (i.e., \code{(yi-H0)/sei}).} \item{pval}{corresponding p-values.} \item{ci.lb}{lower confidence interval bounds (transformed if \code{transf} is specified).} \item{ci.ub}{upper confidence interval bounds (transformed if \code{transf} is specified).} When the \code{transf} argument is specified, elements \code{vi}, \code{sei}, \code{zi}, and \code{pval} are not included (since these only apply to the untransformed effect sizes or outcomes). Note that the actual variable names above depend on the \code{out.names} (and \code{var.names}) arguments. If the data frame already contains variables with names as specified by the \code{out.names} argument, the values for these variables will be overwritten when \code{replace=TRUE} (which is the default). By setting \code{replace=FALSE}, only values that are \code{NA} will be replaced. The \code{print.escalc} function again formats and prints the data frame, rounding the added variables to the number of digits specified. } \note{ If some transformation function has been specified for the \code{transf} argument, then \code{yi}, \code{ci.lb}, and \code{ci.ub} will be transformed accordingly. However, \code{vi} and \code{sei} then still reflect the sampling variances and standard errors of the untransformed values. The \code{summary.escalc} function computes \code{level} \% Wald-type confidence intervals, which may or may not be the most accurate method for computing confidence intervals for the chosen effect size or outcome measure. If the outcome measure used is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{olim} argument to enforce those observation/outcome limits (the observed outcomes and confidence intervals cannot exceed those bounds then). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{escalc}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat ### apply summary function summary(dat) summary(dat, transf=exp) } \keyword{print} metafor/man/blup.Rd0000644000176200001440000001163614055736403013731 0ustar liggesusers\name{blup} \alias{blup} \alias{blup.rma.uni} \title{Best Linear Unbiased Predictions for 'rma.uni' Objects} \description{ The function calculates best linear unbiased predictions (BLUPs) of the study-specific true effect sizes or outcomes by combining the fitted values based on the fixed effects and the estimated contributions of the random effects for objects of class \code{"rma.uni"}. Corresponding standard errors and prediction interval bounds are also provided. \loadmathjax } \usage{ blup(x, \dots) \method{blup}{rma.uni}(x, level, digits, transf, targs, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}.} \item{level}{numeric value between 0 and 100 to specify the prediction interval level. If unspecified, the default is to take the value from the object.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the predicted values and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{\dots}{other arguments.} } \value{ An object of class \code{"list.rma"}. The object is a list containing the following components: \item{pred}{predicted values.} \item{se}{corresponding standard errors.} \item{pi.lb}{lower bound of the prediction intervals.} \item{pi.ub}{upper bound of the prediction intervals.} \item{\dots}{some additional elements/values.} The object is formatted and printed with \code{\link{print.list.rma}}. } \note{ For best linear unbiased predictions of only the random effects, see \code{\link{ranef}}. For predicted/fitted values that are based only on the fixed effects of the model, see \code{\link{fitted.rma}} and \code{\link{predict.rma}}. For conditional residuals (the deviations of the observed effect sizes or outcomes from the BLUPs), see \code{rstandard.rma.uni} with \code{type="conditional"}. Fixed-effects models (with or without moderators) do not contain random study effects. The BLUPs for these models will therefore be equal to the fitted values, that is, those obtained with \code{\link{fitted.rma}} and \code{\link{predict.rma}}. When using the \code{transf} argument, the transformation is applied to the predicted values and the corresponding interval bounds. The standard errors are then set equal to \code{NA} and are omitted from the printed output. By default, a standard normal distribution is used to calculate the prediction intervals. When the model was fitted with \code{test="t"} or \code{test="knha"}, then a t-distribution with \mjseqn{k-p} degrees of freedom is used. To be precise, it should be noted that the function actually calculates empirical BLUPs (eBLUPs), since the predicted values are a function of the estimated value of \mjseqn{\tau^2}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Kackar, R. N., & Harville, D. A. (1981). Unbiasedness of two-stage estimation and prediction procedures for mixed linear models. Communications in Statistics, Theory and Methods, \bold{10}(13), 1249--1261. \verb{https://doi.org/10.1080/03610928108828108} Raudenbush, S. W., & Bryk, A. S. (1985). Empirical Bayes meta-analysis. \emph{Journal of Educational Statistics}, \bold{10}(2), 75--98. \verb{https://doi.org/10.3102/10769986010002075} Robinson, G. K. (1991). That BLUP is a good thing: The estimation of random effects. \emph{Statistical Science}, \bold{6}(1), 15--32. \verb{https://doi.org/10.1214/ss/1177011926} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{predict.rma}}, \code{\link{fitted.rma}}, \code{\link{ranef.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat) ### BLUPs of the true risk ratios for each study blup(res, transf=exp) ### illustrate shrinkage of BLUPs towards the (estimated) population average res <- rma(yi, vi, data=dat) blups <- blup(res)$pred plot(NA, NA, xlim=c(.8,2.4), ylim=c(-2,0.5), pch=19, xaxt="n", bty="n", xlab="", ylab="Log Risk Ratio") segments(rep(1,13), dat$yi, rep(2,13), blups, col="darkgray") points(rep(1,13), dat$yi, pch=19) points(rep(2,13), blups, pch=19) axis(side=1, at=c(1,2), labels=c("Observed\nValues", "BLUPs"), lwd=0) segments(.7, res$beta, 2.15, res$beta, lty="dotted") text(2.3, res$beta, expression(hat(mu)==-0.71), cex=1) } \keyword{models} metafor/man/vif.Rd0000644000176200001440000001545414055736403013555 0ustar liggesusers\name{vif} \alias{vif} \alias{vif.rma} \alias{print.vif.rma} \title{Variance Inflation Factors for 'rma' Objects} \description{ Compute variance inflation factors (VIFs) for objects of class \code{"rma"}. \loadmathjax } \usage{ vif(x, \dots) \method{vif}{rma}(x, btt, intercept=FALSE, table=FALSE, digits, \dots) \method{print}{vif.rma}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"rma"} (for \code{vif}) or \code{"vif.rma"} (for \code{print}).} \item{btt}{optional vector of indices to specify a set of coefficients for which a generalized variance inflation factor (GVIF) should be computed. Can also be a string to grep for. See \sQuote{Details}.} \item{intercept}{logical to specify whether to include the intercept (if the model includes one) in the computation of the VIFs (the default is \code{FALSE}). See \sQuote{Note}.} \item{table}{logical to specify whether the VIFs should be added to the model coefficient table (the default is \code{FALSE}).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{\dots}{other arguments.} } \details{ The function computes variance inflation factors (VIFs) for meta-regression models. Hence, the model specified via argument \code{x} must include moderator variables (and more than one for this to be useful, as the VIF for a model with a single moderator variable will always be equal to 1). Let \mjseqn{b_j} denote the estimate of the \mjseqn{j}th model coefficient of a particular meta-regression model and \mjeqn{\mbox{Var}[b_j]}{Var[b_j]} its variance (i.e., the corresponding diagonal element from the matrix obtained with the \code{\link{vcov.rma}} function). Moreover, let \mjseqn{b'_j} denote the estimate of the same model coefficient if the other moderator variables in the model had \emph{not} been included in the model and \mjeqn{\mbox{Var}[b'_j]}{Var[b'_j]} the corresponding variance. Then the VIF for the model coefficient is given by \mjdeqn{\mbox{VIF}[b_j] = \frac{\mbox{Var}[b_j]}{\mbox{Var}[b'_j]},}{VIF[b_j] = Var[b_j] / Var[b'_j],} which indicates the inflation in the variance of the estimated model coefficient due to potential collinearity of the \mjseqn{j}th moderator variable with the other moderator variables in the model. Taking the square root of a VIF gives the corresponding standard error inflation factor (SIF). If \code{btt} is not specified, then the VIF is computed for each individual model coefficient. However, if the model includes factors (coded in terms of multiple dummy variables) or other sets of moderator variables that belong together (e.g., for polynomials or cubic splines), one may want to examine how much the variance in all of the coefficients in the set is jointly impacted by collinearity with the other moderator variables in the model. For this, we can compute a generalized variance inflation factor (GVIF) (Fox & Monette, 1992) by setting the \code{btt} argument equal to the indices of those coefficients for which the GVIF should be computed. The square root of a GVIF indicates the inflation in the confidence ellipse/(hyper)ellipsoid for the set of coefficients corresponding to the set due to collinearity. However, to make this value more directly comparable to SIFs (based on single coefficients) or when the set includes a different number of coefficients, the function computes the generalized standard error inflation factor (GSIF) by raising the GVIF to the power of \mjseqn{1/(2m)} (where \mjseqn{m} denotes the number of coefficients in the set). } \value{ If \code{btt} is not specified, either a vector (if \code{table=FALSE}) with the VIFs or a data frame (if \code{table=TRUE}) with the following elements: \item{estimate}{estimated model coefficients.} \item{se}{corresponding standard errors.} \item{zval}{corresponding test statistics.} \item{pval}{corresponding p-values.} \item{ci.lb}{corresponding lower bound of the confidence intervals.} \item{ci.ub}{corresponding upper bound of the confidence intervals.} \item{vif}{corresponding variance inflation factors.} \item{sif}{corresponding standard error inflation factors.} If \code{btt} is specified, a list with elements \code{gvif} and \code{gsif} with the GVIF and GSIF values for the set of coefficients specified. } \note{ The values of the (G)VIFs are invariant to the scaling of the predictor variables if the model includes an intercept that is removed when inverting the correlation matrix of the model coefficients to compute the (G)VIFs. This is the default behavior. See \sQuote{Examples}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Belsley, D. A., Kuh, E., & Welsch, R. E. (1980). \emph{Regression diagnostics}. New York: Wiley. Fox, J., & Monette, G. (1992). Generalized collinearity diagnostics. \emph{Journal of the American Statistical Association}, \bold{87}(417), 178-183. \verb{https://doi.org/10.2307/2290467} Stewart, G. W. (1987). Collinearity and least squares regression. \emph{Statistical Science}, \bold{2}(1), 68-84. \verb{https://doi.org/10.1214/ss/1177013439} Wax, Y. (1992). Collinearity diagnosis for a relative risk regression-analysis: An application to assessment of diet cancer relationship in epidemiologic studies. \emph{Statistics in Medicine}, \bold{11}(10), 1273--1287. \verb{https://doi.org/10.1002/sim.4780111003} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}}, \code{\link{rma.glmm}} } \examples{ ### copy data from Bangert-Drowns et al. (2004) into 'dat' dat <- dat.bangertdrowns2004 ### fit mixed-effects meta-regression model res <- rma(yi, vi, mods = ~ length + wic + feedback + info + pers + imag + meta, data=dat) ### get variance inflation factors vif(res) ### show that VIFs are not influenced by scaling of the predictors u <- scale # to standardize the predictors res <- rma(yi, vi, mods = ~ u(length) + u(wic) + u(feedback) + u(info) + u(pers) + u(imag) + u(meta), data=dat) vif(res) ### get full table vif(res, table=TRUE) ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit meta-regression model where one predictor (alloc) is a three-level factor res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat) ### get variance inflation factors for all individual coefficients vif(res, table=TRUE) ### generalized variance inflation factor for the 'alloc' factor vif(res, btt=3:4) ### can also specify a string to grep for vif(res, btt="alloc") } \keyword{models} metafor/man/dat.hahn2001.Rd0000644000176200001440000000427114055736403014754 0ustar liggesusers\name{dat.hahn2001} \docType{data} \alias{dat.hahn2001} \title{Studies on the Effectiveness of Different Rehydration Solutions for the Prevention of Unscheduled Intravenous Infusion in Children with Diarrhoea} \description{Results from 12 trials examining the effectiveness of a reduced versus standard rehydration solution for the prevention of unscheduled intravenous infusion in children with diarrhoea.} \usage{ dat.hahn2001 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab trial name and year \cr \bold{ai} \tab \code{numeric} \tab number of children requiring unscheduled intravenous infusion in the reduced rehydration solution group \cr \bold{n1i} \tab \code{numeric} \tab number of children in the reduced rehydration solution group \cr \bold{ci} \tab \code{numeric} \tab number of children requiring unscheduled intravenous infusion in the standard rehydration solution group \cr \bold{n2i} \tab \code{numeric} \tab number of children in the standard rehydration solution group } } \details{ The dataset includes the results from 12 randomized clinical trials that examined the effectiveness of a reduced osmolarity oral rehydration solution (total osmolarity <250 mmol/l with reduced sodium) with a standard WHO oral rehydration solution (sodium 90 mmol/l, glucose 111mmol/l, total osmolarity 311 mmol/l) for the prevention of unscheduled intravenous infusion in children with diarrhoea. } \source{ Hahn, S., Kim, Y., & Garner, P. (2001). Reduced osmolarity oral rehydration solution for treating dehydration due to diarrhoea in children: Systematic review. \emph{British Medical Journal}, \bold{323}(7304), 81--85. \verb{https://doi.org/10.1136/bmj.323.7304.81} } \examples{ ### copy data into 'dat' and examine data dat <- dat.hahn2001 dat ### meta-analysis of (log) odds rations using the Mantel-Haenszel method res <- rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, digits=2, slab=study) res ### forest plot (also show studies that were excluded from the analysis) options(na.action="na.pass") forest(res, atransf=exp, at=log(c(.01, .1, 1, 10, 100)), header=TRUE) options(na.action="na.omit") } \keyword{datasets} metafor/man/dat.lim2014.Rd0000644000176200001440000000712014055736403014617 0ustar liggesusers\name{dat.lim2014} \docType{data} \alias{dat.lim2014} \title{Studies on the association between maternal size, offspring size, and number of offsprings} \description{Results from studies examining the association between maternal size, offspring size, and number of offsprings.} \usage{ dat.lim2014 } \format{The object is a list containing data frames \code{m_o_size}, \code{m_o_fecundity}, \code{o_o_unadj}, and \code{o_o_adj} that contain the following columns and the corresponding phylogenetic trees called \code{m_o_size_tree}, \code{m_o_fecundity_tree}, \code{o_o_unadj_tree}, and \code{o_o_adj_tree}: \tabular{lll}{ \bold{article} \tab \code{numeric} \tab article id \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{species} \tab \code{character} \tab species \cr \bold{amniotes} \tab \code{character} \tab whether the species was amniotic \cr \bold{environment} \tab \code{character} \tab whether the species were wild or captive \cr \bold{reprounit} \tab \code{character} \tab whether the data were based on lifetime reproductive output or a single reproductive event (only in \code{m_o_size} and \code{m_o_fecundity}) \cr \bold{ri} \tab \code{numeric} \tab correlation coefficient \cr \bold{ni} \tab \code{numeric} \tab sample size } } \details{ The object \code{dat.lim2014} includes 4 datasets: \tabular{ll}{ \code{m_o_size} \tab on the correlation between maternal size and offspring size \cr \code{m_o_fecundity} \tab on the correlation between maternal size and number of offsprings \cr \code{o_o_unadj} \tab on the correlation between offspring size and number of offsprings \cr \code{o_o_adj} \tab on the correlation between offspring size and number of offsprings adjusted for maternal size } Objects \code{m_o_size_tree}, \code{m_o_fecundity_tree}, \code{o_o_unadj_tree}, and \code{o_o_adj_tree} are the corresponding phylogenetic trees for the species included in each of these datasets. } \source{ Lim, J. N., Senior, A. M., & Nakagawa, S. (2014). Heterogeneity in individual quality and reproductive trade-offs within species. \emph{Evolution}, \bold{68}(8), 2306--2318. \verb{https://doi.org/10.1111/evo.12446} } \references{ Hadfield, J. D., & Nakagawa, S. (2010). General quantitative genetic methods for comparative biology: Phylogenies, taxonomies and multi-trait models for continuous and categorical characters. \emph{Journal of Evolutionary Biology}, \bold{23}(3), 494--508. \verb{https://doi.org/10.1111/j.1420-9101.2009.01915.x} Nakagawa, S., & Santos, E. S. A. (2012). Methodological issues and advances in biological meta-analysis. \emph{Evolutionary Ecology}, \bold{26}(5), 1253--1274. \verb{https://doi.org/10.1007/s10682-012-9555-5} } \examples{ ### copy data into 'dat' and examine data dat <- dat.lim2014$o_o_unadj head(dat) ### calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat) \dontrun{ ### load 'ape' package require(ape) ### copy tree to 'tree' tree <- dat.lim2014$o_o_unadj_tree ### compute branch lengths tree <- compute.brlen(tree) ### compute phylogenetic correlation matrix A <- vcv(tree, corr=TRUE) ### make copy of the species variable dat$species.phy <- dat$species ### create effect size id variable dat$esid <- 1:nrow(dat) ### fit multilevel phylogenetic meta-analytic model res <- rma.mv(yi, vi, random = list(~ 1 | article, ~ 1 | esid, ~ 1 | species, ~ 1 | species.phy), R=list(species.phy=A), data=dat) res } } \keyword{datasets} metafor/man/contrmat.Rd0000644000176200001440000000671114055736403014614 0ustar liggesusers\name{contrmat} \alias{contrmat} \title{Construct Contrast Matrix for Two-Group Comparisons} \description{ The function constructs a matrix that indicates which two groups have been contrasted against each other in each row of a dataset. } \usage{ contrmat(data, grp1, grp2, last, shorten=FALSE, minlen=2, check=TRUE, append=TRUE) } \arguments{ \item{data}{a data frame in wide format.} \item{grp1}{either the name (given as a character string) or the position (given as a single number) of the first group variable in the data frame.} \item{grp2}{either the name (given as a character string) or the position (given as a single number) of the second group variable in the data frame.} \item{last}{optional character string to specify which group will be placed in the last column of the matrix (must be one of the groups in the group variables). If not given, the most frequently occurring second group is placed last.} \item{shorten}{logical to specify whether the variable names corresponding to the group names should be shortened (the default is \code{FALSE}).} \item{minlen}{integer to specify the minimum length of the shortened variable names (the default is 2).} \item{check}{logical to specify whether the variables names should be checked to ensure that they are syntactically valid variable names and if not, they are adjusted (by \code{\link{make.names}}) so that they are (the default is \code{TRUE}).} \item{append}{logical to specify whether the contrast matrix should be appended to the data frame specified via the \code{data} argument (the default is \code{TRUE}). If \code{append=FALSE}, only the contrast matrix is returned.} } \details{ The function can be used to construct a matrix that indicates which two groups have been contrasted against each other in each row of a data frame (with \code{1} for the first group, \code{-1} for the second group, and \code{0} otherwise). The \code{grp1} and \code{grp2} arguments are used to specify the group variables in the dataset (either as character strings or as numbers indicating the column positions of these variables in the dataset). Optional argument \code{last} is used to specify which group will be placed in the last column of the matrix. If \code{shorten=TRUE}, the variable names corresponding to the group names are shortened (to at least \code{minlen}; the actual length might be longer to ensure uniqueness of the variable names). The examples below illustrate the use of this function. } \value{ A matrix with as many variables as there are groups. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{to.wide}}, \code{\link{dat.senn2013}}, \code{\link{dat.hasselblad1998}} } \examples{ ### restructure to wide format dat <- dat.senn2013 dat <- dat[c(1,4,3,2,5,6)] dat <- to.wide(dat, study="study", grp="treatment", ref="placebo", grpvars=4:6) dat ### add contrast matrix dat <- contrmat(dat, grp1="treatment.1", grp2="treatment.2") dat ### data in long format dat <- dat.hasselblad1998 dat ### restructure to wide format dat <- to.wide(dat, study="study", grp="trt", ref="no_contact", grpvars=6:7) dat ### add contrast matrix dat <- contrmat(dat, grp1="trt.1", grp2="trt.2", shorten=TRUE) dat } \keyword{manip} metafor/man/dat.pagliaro1992.Rd0000644000176200001440000000627514055736403015664 0ustar liggesusers\name{dat.pagliaro1992} \docType{data} \alias{dat.pagliaro1992} \title{Studies on the Effectiveness of Nonsurgical Treatments in Cirrhosis} \description{Results from 26 trials examining the effectiveness of beta-blockers and sclerotherapy for the prevention of first bleeding in patients with cirrhosis} \usage{dat.pagliaro1992} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study id \cr \bold{trt} \tab \code{character} \tab either beta-blockers, sclerotherapy, or control \cr \bold{xi} \tab \code{numeric} \tab number of patients with first bleeding \cr \bold{ni} \tab \code{numeric} \tab number of patients treated } } \details{ The dataset includes the results from 26 randomized controlled trials examining the effectiveness of nonsurgical treatments for the prevention of first bleeding in patients with cirrhosis. Patients were either treated with beta-blockers, endoscopic sclerotherapy, or with a nonactive treatment (control). Two trials included all three treatment conditions, 7 trials compared beta-blockers against control, and 17 trials compared sclerotherapy against control. The dataset has been used in various papers to illustrate methods for conducting a network meta-analysis / mixed treatment comparison. } \source{ Pagliaro, L., D'Amico, G., \enc{Sörensen}{Soerensen}, T. I. A., Lebrec, D., Burroughs, A. K., Morabito, A., \enc{Tiné}{Tine}, F., Politi, F., & Traina, M. (1992). Prevention of first bleeding in cirrhosis: A meta-analysis of randomized trials of nonsurgical treatment. \emph{Annals of Internal Medicine}, \bold{117}(1), 59--70. \verb{https://doi.org/10.7326/0003-4819-117-1-59} } \examples{ ### copy data into 'dat' and examine data dat <- dat.pagliaro1992 dat ### restructure dataset to a contrast-based format dat.c <- to.wide(dat, study="study", grp="trt", grpvars=3:4) dat.c ### Mantel-Haenszel results for beta-blockers and sclerotherapy versus control, respectively rma.mh(measure="OR", ai=xi.1, n1i=ni.1, ci=xi.2, n2i=ni.2, data=dat.c, subset=(trt.1=="beta-blockers"), digits=2) rma.mh(measure="OR", ai=xi.1, n1i=ni.1, ci=xi.2, n2i=ni.2, data=dat.c, subset=(trt.1=="sclerotherapy"), digits=2) ### calculate log odds for each study arm dat <- escalc(measure="PLO", xi=xi, ni=ni, data=dat) dat ### turn treatment variable into factor and set reference level dat$trt <- relevel(factor(dat$trt), ref="control") ### add a space before each level (this makes the output a bit more legible) levels(dat$trt) <- paste0(" ", levels(dat$trt)) ### network meta-analysis using an arm-based random-effects model with fixed study effects ### (by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons) res <- rma.mv(yi, vi, mods = ~ factor(study) + trt - 1, random = ~ trt | study, rho=1/2, data=dat) res ### average odds ratio comparing beta-blockers and sclerotherapy versus control, respectively predict(res, newmods=c(rep(0,26), 1, 0), transf=exp, digits=2) predict(res, newmods=c(rep(0,26), 0, 1), transf=exp, digits=2) ### average odds ratio comparing beta-blockers versus sclerotherapy predict(res, newmods=c(rep(0,26), 1, -1), transf=exp, digits=2) } \keyword{datasets} metafor/man/predict.rma.Rd0000644000176200001440000003073014055736403015173 0ustar liggesusers\name{predict.rma} \alias{predict} \alias{predict.rma} \alias{predict.rma.ls} \title{Predicted Values for 'rma' Objects} \description{ The function computes predicted values, corresponding standard errors, confidence intervals, and prediction intervals for objects of class \code{"rma"}. \loadmathjax } \usage{ \method{predict}{rma}(object, newmods, intercept, tau2.levels, gamma2.levels, addx=FALSE, level, digits, transf, targs, vcov=FALSE, \dots) \method{predict}{rma.ls}(object, newmods, intercept, addx=FALSE, newscale, addz=FALSE, level, digits, transf, targs, vcov=FALSE, \dots) } \arguments{ \item{object}{an object of class \code{"rma"} or \code{"rma.ls"}.} \item{newmods}{optional vector or matrix to specify the values of the moderator values for which the predicted values should be calculated. See \sQuote{Details}.} \item{intercept}{logical to specify whether the intercept should be included when calculating the predicted values for \code{newmods}. If unspecified, the intercept is automatically added when the original model also included an intercept.} \item{tau2.levels}{vector to specify the levels of the inner factor when computing prediction intervals. Only relevant for models of class \code{"rma.mv"} (see \code{\link{rma.mv}}) and when the model includes more than a single \mjseqn{\tau^2} value. See \sQuote{Details}.} \item{gamma2.levels}{vector to specify the levels of the inner factor when computing prediction intervals. Only relevant for models of class \code{"rma.mv"} (see \code{\link{rma.mv}}) and when the model includes more than a single \mjseqn{\gamma^2} value. See \sQuote{Details}.} \item{addx}{logical to specify whether the values of the moderator variables should be added to the returned object. See \sQuote{Examples}.} \item{newscale}{optional vector or matrix to specify the values of the scale variables for which the predicted values should be calculated. Only relevant for location-scale models (see \code{\link{rma}}). See \sQuote{Details}.} \item{addz}{logical to specify whether the values of the scale variables should be added to the returned object.} \item{level}{numeric value between 0 and 100 to specify the confidence and prediction interval level. If unspecified, the default is to take the value from the object.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{optional argument to specify a function that should be used to transform the predicted values and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{vcov}{logical to specify whether the variance-covariance matrix of the predicted values should also be returned (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ For a fixed-effects model, \code{predict(object)} returns the estimated (average) outcome in the set of studies included in the meta-analysis. This is the same as the estimated intercept in the fixed-effects model (i.e., \mjseqn{\hat{\theta}}). For a random-effects model, \code{predict(object)} returns the estimated (average) outcome in the hypothetical population of studies from which the set of studies included in the meta-analysis are assumed to be a random selection. This is the same as the estimated intercept in the random-effects model (i.e., \mjseqn{\hat{\mu}}). For models including one or more moderators, \code{predict(object)} returns the estimated (average) outcomes for values of the moderator(s) equal to those of the \mjseqn{k} studies included in the meta-analysis (i.e., the \sQuote{fitted values} for the \mjseqn{k} studies). For models including \mjseqn{p'} moderator variables, new moderator values (for \mjeqn{k_{new}}{k_new} hypothetical new studies) can be specified by setting \code{newmods} equal to a \mjeqn{k_{new} \times p'}{k_new x p'} matrix with the corresponding new moderator values. If the model object included an intercept, then it should not be explicitly specified under \code{newmods}, as it will be added by default (unless one sets \code{intercept=FALSE}). Also, any factors in the original model get turned into the appropriate contrast variables within the \code{rma} function, so that \code{newmods} should actually include the values for the contrast variables. Examples are shown below. For random/mixed-effects models, an approximate prediction interval is also calculated (Riley et al., 2011). The interval estimates where \code{level} \% of the true effect sizes or outcomes fall in the hypothetical population of studies (and hence where the true effect or outcome of a new study from the population of studies should fall in \code{level} \% of the cases). For random-effects models that were fitted with the \code{\link{rma.mv}} function, the model may actually include multiple \mjseqn{\tau^2} values (i.e., when the \code{random} argument includes an \sQuote{\code{~ inner | outer}} term and \code{struct="HCS"}, \code{struct="DIAG"}, \code{struct="HAR"}, or \code{struct="UN"}). In that case, the function will provide prediction intervals for each level of the inner factor (since the prediction intervals differ depending on the \mjseqn{\tau^2} value). Alternatively, one can use the \code{tau2.levels} argument to specify for which level(s) the prediction interval should be provided. If the model includes a second \sQuote{\code{~ inner | outer}} term with multiple \mjseqn{\gamma^2} values, prediction intervals for each combination of levels of the inner factors will be provided. Alternatively, one can use the \code{tau2.levels} and \code{gamma2.levels} arguments to specify for which level combination(s) the prediction interval should be provided. When using the \code{newmods} argument for mixed-effects models that were fitted with the \code{\link{rma.mv}} function, if the model includes multiple \mjseqn{\tau^2} (and multiple \mjseqn{\gamma^2}) values, then one must use the \code{tau2.levels} (and \code{gamma2.levels}) argument to specify the levels of the inner factor(s) (i.e., a vector of length \mjeqn{k_{new}}{k_new}) to obtain the appropriate prediction interval(s). For location-scale models fitted with the \code{\link{rma}} function, one can use \code{newmods} to specify the values of the \mjseqn{p'} moderator variables included in the model and \code{newscale} to specify the values of the \mjseqn{q'} scale variables included in the model. Whenever \code{newmods} is specified, the function computes predicted effects/outcomes for the specified moderators values. To obtain the corresponding prediction intervals, one must also specify the corresponding \code{newscale} values. If only \code{newscale} is specified (and not \code{newmods}), the function computes the predicted log-transformed \mjseqn{\tau^2} values (when using a log link) for to the specified scale values. By setting \code{transf=exp}, one can then obtain the predicted \mjseqn{\tau^2} values. } \value{ An object of class \code{"list.rma"}. The object is a list containing the following components: \item{pred}{predicted value(s).} \item{se}{corresponding standard error(s).} \item{ci.lb}{lower bound of the confidence interval(s).} \item{ci.ub}{upper bound of the confidence interval(s).} \item{pi.lb}{lower bound of the prediction interval(s) (only for random/mixed-effects models).} \item{pi.ub}{upper bound of the prediction interval(s) (only for random/mixed-effects models).} \item{tau2.level}{the level(s) of the inner factor (only for models of class \code{"rma.mv"} with multiple \mjseqn{\tau^2} values).} \item{gamma2.level}{the level(s) of the inner factor (only for models of class \code{"rma.mv"} with multiple \mjseqn{\gamma^2} values).} \item{X}{the moderator value(s) used to calculate the predicted values (only when \code{addx=TRUE}).} \item{Z}{the scale value(s) used to calculate the predicted values (only when \code{addz=TRUE} and only for location-scale models).} \item{\dots}{some additional elements/values.} If \code{vcov=TRUE}, then the returned object is a list with the first element equal to the one as described above and the second element equal to the variance-covariance matrix of the predicted values. The object is formatted and printed with \code{\link{print.list.rma}}. } \note{ Confidence and prediction intervals are calculated based on the critical values from a standard normal distribution (i.e., \mjeqn{\pm 1.96}{±1.96} for \code{level=95}). When the model was fitted with \code{test="t"} or \code{test="knha"}, then a t-distribution with \mjseqn{k-p} degrees of freedom is used. For a random-effects model (where \mjseqn{p=1}) fitted with the \code{\link{rma.uni}} function, note that this differs slightly from Riley et al. (2001), who suggest to use a t-distribution with \mjseqn{k-2} degrees of freedom for constructing the prediction interval. Neither a normal, nor a t-distribution with \mjseqn{k-1} or \mjseqn{k-2} degrees of freedom is correct; all of these are approximations. The computations are done in the way described above, so that the prediction interval is identical to the confidence interval when \mjeqn{\hat{\tau}^2 = 0}{hat(\tau)^2 = 0}, which could be argued is the logical thing that should happen. If the prediction interval should be computed exactly as described by Riley et al. (2001), then one can use argument \code{pi.type="riley"}. The predicted values are based only on the fixed effects of the model. Best linear unbiased predictions (BLUPs) that combine the fitted values based on the fixed effects and the estimated contributions of the random effects can be obtained with \code{\link{blup.rma.uni}} (currently only for objects of class \code{"rma.uni"}). When using the \code{transf} option, the transformation is applied to the predicted values and the corresponding interval bounds. The standard errors are omitted from the printed output. Also, \code{vcov=TRUE} is ignored when using the \code{transf} option. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis}. San Diego, CA: Academic Press. Riley, R. D., Higgins, J. P. T., & Deeks, J. J. (2011). Interpretation of random effects meta-analyses. \emph{British Medical Journal}, \bold{342}, d549. \verb{https://doi.org/10.1136/bmj.d549} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{fitted.rma}}, \code{\link{blup.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### average risk ratio with 95\% CI predict(res, transf=exp) ### fit mixed-effects model with absolute latitude as a moderator res <- rma(yi, vi, mods = ~ ablat, data=dat) ### predicted average risk ratios for given absolute latitude values predict(res, transf=exp, addx=TRUE) ### predicted average risk ratios for 10-60 degrees absolute latitude predict(res, newmods=c(10, 20, 30, 40, 50, 60), transf=exp, addx=TRUE) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### predicted average risk ratios for 10 and 60 degrees latitude in 1950 and 1980 predict(res, newmods=cbind(c(10,60,10,60),c(1950,1950,1980,1980)), transf=exp, addx=TRUE) ### fit mixed-effects model with two moderators (one of which is a factor) res <- rma(yi, vi, mods = ~ ablat + factor(alloc), data=dat) ### examine how the factor was actually coded for the studies in the dataset predict(res, addx=TRUE) ### predictd average risk ratios at 30 degrees for the three factor levels ### note: the contrast (dummy) variables need to specified explicitly here predict(res, newmods=c(30, 0, 0), addx=TRUE) # for alternate allocation predict(res, newmods=c(30, 1, 0), addx=TRUE) # for random allocation predict(res, newmods=c(30, 0, 1), addx=TRUE) # for systematic allocation ### can also use named vector with arbitrary order and abbreviated variable names predict(res, newmods=c(sys=0, ran=0, abl=30)) predict(res, newmods=c(sys=0, ran=1, abl=30)) predict(res, newmods=c(sys=1, ran=0, abl=30)) } \keyword{models} metafor/man/dat.pignon2000.Rd0000644000176200001440000000533614055736403015332 0ustar liggesusers\name{dat.pignon2000} \docType{data} \alias{dat.pignon2000} \title{Studies on the Effectiveness of Locoregional Treatment plus Chemotherapy for Head and Neck Squamous-Cell Carcinoma} \description{Results from studies examining mortality risk in patients with nonmetastatic head and neck squamous-cell carcinoma receiving either locoregional treatment plus chemotherapy versus locoregional treatment alone.} \usage{dat.pignon2000} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study id number \cr \bold{trial} \tab \code{character} \tab trial abbreviation \cr \bold{OmE} \tab \code{numeric} \tab observed minus expected number of deaths in the locoregional treatment plus chemotherapy group \cr \bold{V} \tab \code{numeric} \tab corresponding variance \cr \bold{grp} \tab \code{numeric} \tab timing of chemotherapy: 1 = adjuvant, 2 = neoadjuvant, 3 = concomitant } } \details{ The purpose of this meta-analysis was to examine the mortality risk in patients with nonmetastatic head and neck squamous-cell carcinoma receiving either locoregional treatment plus chemotherapy versus locoregional treatment alone. For 65 trials, the dataset provides the observed minus expected number of deaths and corresponding variances in the locoregional treatment plus chemotherapy group. Based on these values, we can estimate the log hazard ratios with \code{OmE/V} and the corresponding sampling variance with \code{1/V}. The trials were also divided according to the timing of the chomotherapy: (1) adjuvant, after the locoregional treatment, (2) neoadjuvant, before the locoregional treatment, and (3) concomitant, chemotherapy given concomitantly or alternating with radiotherapy. } \source{ Pignon, J. P., Bourhis, J., Domenge, C., & Designe, L. (2000). Chemotherapy added to locoregional treatment for head and neck squamous-cell carcinoma: Three meta-analyses of updated individual data. \emph{Lancet}, \bold{355}(9208), 949--955. \verb{https://doi.org/10.1016/S0140-6736(00)90011-4} } \examples{ ### copy data into 'dat' dat <- dat.pignon2000 ### calculate log hazard ratios and sampling variances dat$yi <- with(dat, OmE/V) dat$vi <- with(dat, 1/V) dat ### meta-analysis based on all 65 trials res <- rma(yi, vi, data=dat, method="FE", digits=2) res predict(res, transf=exp) ### only adjuvant trials res <- rma(yi, vi, data=dat, method="FE", subset=grp==1, digits=2) res predict(res, transf=exp) ### only neoadjuvant trials res <- rma(yi, vi, data=dat, method="FE", subset=grp==2, digits=2) res predict(res, transf=exp) ### only concomitant trials res <- rma(yi, vi, data=dat, method="FE", subset=grp==3, digits=2) res predict(res, transf=exp) } \keyword{datasets} metafor/man/influence.rma.uni.Rd0000644000176200001440000001756014055736403016311 0ustar liggesusers\name{influence.rma.uni} \alias{influence} \alias{cooks.distance} \alias{dfbetas} \alias{hatvalues} \alias{influence.rma.uni} \alias{print.infl.rma.uni} \alias{cooks.distance.rma.uni} \alias{dfbetas.rma.uni} \alias{hatvalues.rma.uni} \title{Outlier and Influential Case Diagnostics for 'rma.uni' Objects} \description{ The functions can be used to compute various outlier and influential case diagnostics (some of which indicate the influence of deleting one case at a time on the model fit or the fitted/residual values) for objects of class \code{"rma.uni"}. For the corresponding help file for \code{"rma.mv"} objects, see \code{\link{influence.rma.mv}}. \loadmathjax } \usage{ \method{influence}{rma.uni}(model, digits, progbar=FALSE, \dots) \method{print}{infl.rma.uni}(x, digits=x$digits, infonly=FALSE, \dots) \method{cooks.distance}{rma.uni}(model, progbar=FALSE, \dots) \method{dfbetas}{rma.uni}(model, progbar=FALSE, \dots) \method{hatvalues}{rma.uni}(model, type="diagonal", \dots) } \arguments{ \item{model}{an object of class \code{"rma.uni"}.} \item{x}{an object of class \code{"infl.rma.uni"} (for \code{print}).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{FALSE}).} \item{infonly}{logical to specify whether only the influential cases should be printed (the default is \code{FALSE}).} \item{type}{character string to specify whether only the diagonal of the hat matrix (\code{"diagonal"}) or the entire hat matrix (\code{"matrix"}) should be returned.} \item{\dots}{other arguments.} } \details{ The term \sQuote{case} below refers to a particular row from the dataset used in the model fitting (which is typically synonymous with study). The \code{influence} function calculates the following leave-one-out diagnostics for each case: \itemize{ \item externally standardized residual, \item DFFITS value, \item Cook's distance, \item covariance ratio, \item the leave-one-out amount of (residual) heterogeneity, \item the leave-one-out test statistic of the test for (residual) heterogeneity, \item DFBETAS value(s). } The diagonal elements of the hat matrix and the weights (in \%) given to the observed effect sizes or outcomes during the model fitting are also provided (except for their scaling, the hat values and weights are the same for models without moderators, but will differ when moderators are included). For details on externally standardized residuals, see \code{\link{rstudent.rma.uni}}. The DFFITS value essentially indicates how many standard deviations the predicted (average) effect or outcome for the \mjseqn{i}th case changes after excluding the \mjseqn{i}th case from the model fitting. Cook's distance can be interpreted as the Mahalanobis distance between the entire set of predicted values once with the \mjseqn{i}th case included and once with the \mjseqn{i}th case excluded from the model fitting. The covariance ratio is defined as the determinant of the variance-covariance matrix of the parameter estimates based on the dataset with the \mjseqn{i}th case removed divided by the determinant of the variance-covariance matrix of the parameter estimates based on the complete dataset. A value below 1 therefore indicates that removal of the \mjseqn{i}th case yields more precise estimates of the model coefficients. The leave-one-out amount of (residual) heterogeneity is the estimated value of \mjseqn{\tau^2} based on the dataset with the \mjseqn{i}th case removed. This is always equal to 0 for fixed-effects models. Similarly, the leave-one-out test statistic of the test for (residual) heterogeneity is the value of the test statistic of the test for (residual) heterogeneity calculated based on the dataset with the \mjseqn{i}th case removed. Finally, the DFBETAS value(s) essentially indicate(s) how many standard deviations the estimated coefficient(s) change(s) after excluding the \mjseqn{i}th case from the model fitting. A case may be considered to be \sQuote{influential} if at least one of the following is true: \itemize{ \item The absolute DFFITS value is larger than \mjeqn{3 \times \sqrt{p/(k-p)}}{3*\sqrt(p/(k-p))}, where \mjseqn{p} is the number of model coefficients and \mjseqn{k} the number of cases. \item The lower tail area of a chi-square distribution with \mjseqn{p} degrees of freedom cut off by the Cook's distance is larger than 50\%. \item The hat value is larger than \mjeqn{3 \times (p/k)}{3*(p/k)}. \item Any DFBETAS value is larger than \mjseqn{1}. } Cases which are considered influential with respect to any of these measures are marked with an asterisk. Note that the chosen cut-offs are (somewhat) arbitrary. Substantively informed judgment should always be used when examining the influence of each case on the results. } \value{ An object of class \code{"infl.rma.uni"}, which is a list containing the following components: \item{inf}{an element of class \code{"list.rma"} with the externally standardized residuals, DFFITS values, Cook's distances, covariance ratios, leave-one-out \mjseqn{\tau^2} estimates, leave-one-out (residual) heterogeneity test statistics, hat values, weights, and an indicator whether a case is influential or not.} \item{dfbs}{an element of class \code{"list.rma"} with the the DFBETAS values.} \item{\dots}{some additional elements/values.} The results are printed with \code{print.infl.rma.uni} and plotted with \code{\link{plot.infl.rma.uni}}. } \note{ Right now, leave-one-out diagnostics are calculated by refitting the model \mjseqn{k} times. Depending on how large \mjseqn{k} is, it may take a few moments to finish the calculations. There are shortcuts for calculating at least some of these values without refitting the model each time, but these are currently not implemented (and may not exist for all of the leave-one-out diagnostics calculated by the function). It may not be possible to fit the model after deletion of the \mjseqn{i}th case from the dataset. This will result in \code{NA} values for that case. Certain relationships between the leave-one-out diagnostics and the (internally or externally) standardized residuals (Belsley, Kuh, & Welsch, 1980; Cook & Weisberg, 1982) no longer hold for the meta-analytic models. Maybe there are other relationships. These remain to be determined. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Belsley, D. A., Kuh, E., & Welsch, R. E. (1980). \emph{Regression diagnostics}. New York: Wiley. Cook, R. D., & Weisberg, S. (1982). \emph{Residuals and influence in regression}. London: Chapman and Hall. Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis}. San Diego, CA: Academic Press. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods}, \bold{1}(2), 112--125. \verb{https://doi.org/10.1002/jrsm.11} } \seealso{ \code{\link{plot.infl.rma.uni}}, \code{\link{rstudent.rma.uni}}, \code{\link{weights.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### compute the diagnostics inf <- influence(res) inf ### plot the values plot(inf) ### compute Cook's distances, DFBETAS values, and hat values cooks.distance(res) dfbetas(res) hatvalues(res) } \keyword{models} metafor/man/bldiag.Rd0000644000176200001440000000214614055736403014205 0ustar liggesusers\name{bldiag} \alias{bldiag} \title{Construct Block Diagonal Matrix} \description{ Function to construct a block diagonal matrix from (a list of) matrices. } \usage{ bldiag(\dots, order) } \arguments{ \item{\dots}{individual matrices or a list of matrices.} \item{order}{optional argument to specify a variable based on which a square block diagonal matrix should be ordered.} } \author{ Posted to R-help by Berton Gunter (2 Sep 2005) with some further adjustments by Wolfgang Viechtbauer } \seealso{ \code{\link{rma.mv}} } \examples{ ### copy data into 'dat' dat <- dat.berkey1998 dat ### construct list with the variance-covariance matrices of the observed outcomes for the studies V <- lapply(split(dat[c("v1i", "v2i")], dat$trial), as.matrix) V ### construct block diagonal matrix V <- bldiag(V) V ### if we split based on 'author', the list elements in V are in a different order than tha data V <- lapply(split(dat[c("v1i", "v2i")], dat$author), as.matrix) V ### can use 'order' argument to reorder the block-diagonal matrix into the correct order V <- bldiag(V, order=dat$author) V } \keyword{manip} metafor/man/dat.craft2003.Rd0000644000176200001440000000545314055736403015142 0ustar liggesusers\name{dat.craft2003} \docType{data} \alias{dat.craft2003} \title{Studies on the Relationship between the Competitive State Anxiety Inventory-2 and Sport Performance} \description{Results from 10 studies on the relationship between the Competitive State Anxiety Inventory-2 (CSAI-2) and sport performance.} \usage{dat.craft2003} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{ni} \tab \code{numeric} \tab sample size \cr \bold{sport} \tab \code{character} \tab type of sport (T = team sport, I = individual sport) \cr \bold{ri} \tab \code{numeric} \tab correlation coefficient \cr \bold{var1} \tab \code{character} \tab variable 1 of the correlation coefficient (see \sQuote{Details}) \cr \bold{var2} \tab \code{character} \tab variable 2 of the correlation coefficient (see \sQuote{Details}) } } \details{ The 10 studies included in this dataset are a subset of the studies included in the meta-analysis by Craft et al. (2003) on the relationship between the Competitive State Anxiety Inventory-2 (CSAI-2) and sport performance. The CSAI-2 has three subscales: cognitive anxiety (\code{acog}), somatic anxiety (\code{asom}), and self-confidence (\code{conf}). The studies included in this dataset administered the CSAI-2 prior to some sport competition and then measured sport performance based on the competition. Most studies provided all 6 correlations (3 for the correlations among the 3 subscales and 3 for the correlations between the subscales and sport performance), but 2 studies (with study numbers 6 and 17) only provided a subset. } \source{ Becker, B. J., & Aloe, A. M. (2019). Model-based meta-analysis and related approaches. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (3nd ed., pp. 339--363). New York: Russell Sage Foundation. } \references{ Craft, L. L., Magyar, T. M., Becker, B. J., & Feltz, D. L. (2003). The relationship between the Competitive State Anxiety Inventory-2 and sport performance: A meta-analysis. \emph{Journal of Sport and Exercise Psychology}, \bold{25}(1), 44--65. \verb{https://doi.org/10.1123/jsep.25.1.44} } \examples{ ### copy data into 'dat' dat <- dat.craft2003 dat ### construct dataset and var-cov matrix of the correlations tmp <- rcalc(ri ~ var1 + var2 | study, ni=ni, data=dat) V <- tmp$V dat <- tmp$dat ### examine data for study 1 dat[dat$study == 1,] V[dat$study == 1, dat$study == 1] ### examine data for study 6 dat[dat$study == 6,] V[dat$study == 6, dat$study == 6] ### examine data for study 17 dat[dat$study == 17,] V[dat$study == 17, dat$study == 17] ### multivariate random-effects model res <- rma.mv(yi, V, mods = ~ var1.var2 - 1, random = ~ var1.var2 | study, struct="UN", data=dat) res } \keyword{datasets} metafor/man/dat.bornmann2007.Rd0000644000176200001440000000554414055736403015662 0ustar liggesusers\name{dat.bornmann2007} \docType{data} \alias{dat.bornmann2007} \title{Studies on Gender Differences in Grant and Fellowship Awards} \description{Results from 21 studies on gender differences in grant and fellowship awards.} \usage{dat.bornmann2007} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab study reference \cr \bold{obs} \tab \code{numeric} \tab observation within study \cr \bold{doctype} \tab \code{character} \tab document type \cr \bold{gender} \tab \code{character} \tab gender of the study authors \cr \bold{year} \tab \code{numeric} \tab (average) cohort year \cr \bold{org} \tab \code{character} \tab funding organization / program \cr \bold{country} \tab \code{character} \tab country of the funding organization / program \cr \bold{type} \tab \code{character} \tab fellowship or grant application \cr \bold{discipline} \tab \code{character} \tab discipline / field \cr \bold{waward} \tab \code{numeric} \tab number of women who received a grant/fellowship award \cr \bold{wtotal} \tab \code{numeric} \tab number of women who applied for an award \cr \bold{maward} \tab \code{numeric} \tab number of men who received a grant/fellowship award \cr \bold{mtotal} \tab \code{numeric} \tab number of men who applied for an award } } \details{ The studies in this dataset examine whether the chances of receiving a grant or fellowship award differs for men and women. Note that many studies provide multiple comparisons (e.g., for different years / cohorts / disciplines). A multilevel meta-analytic model can be used to account for the multilevel structure in these data. } \source{ Bornmann, L., Mutz, R., & Daniel, H. (2007). Gender differences in grant peer review: A meta-analysis. \emph{Journal of Informetrics}, \bold{1}(3), 226--238. \verb{https://doi.org/10.1016/j.joi.2007.03.001} } \references{ Marsh, H. W., Bornmann, L., Mutz, R., Daniel, H.-D., & O'Mara, A. (2009). Gender effects in the peer reviews of grant proposals: A comprehensive meta-analysis comparing traditional and multilevel approaches. \emph{Review of Educational Research}, \bold{79}(3), 1290--1326. \verb{https://doi.org/10.3102/0034654309334143} } \examples{ ### copy data into 'dat' and examine data dat <- dat.bornmann2007 dat ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=waward, n1i=wtotal, ci=maward, n2i=mtotal, data=dat) ### fit multilevel meta-analytic model res <- rma.mv(yi, vi, random = ~ 1 | study/obs, data=dat) res ### estimated average odds ratio (with 95% CI/PI) predict(res, transf=exp, digits=2) ### test for a difference between fellowship and grant applications res <- rma.mv(yi, vi, mods = ~ type, random = ~ 1 | study/obs, data=dat) res predict(res, newmods=0:1, transf=exp, digits=2) } \keyword{datasets} metafor/man/dat.lee2004.Rd0000644000176200001440000000341314055736403014603 0ustar liggesusers\name{dat.lee2004} \docType{data} \alias{dat.lee2004} \title{Studies on Acupoint P6 Stimulation for Preventing Nausea} \description{Results from studies examining the effectiveness of wrist acupuncture point P6 stimulation for preventing postoperative nausea.} \usage{dat.lee2004} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab trial id number \cr \bold{study} \tab \code{character} \tab first author \cr \bold{year} \tab \code{numeric} \tab study year \cr \bold{ai} \tab \code{numeric} \tab number of patients experiencing nausea in the treatment group \cr \bold{n1i} \tab \code{numeric} \tab total number of patients in treatment group \cr \bold{ci} \tab \code{numeric} \tab number of patients experiencing nausea in the sham group \cr \bold{n2i} \tab \code{numeric} \tab total number of patients in the sham group } } \details{ Postoperative nausea and vomiting are common complications following surgery and anaesthesia. As an alternative to drug therapy, acupuncture has been studied as a potential treatment in several trials. The dataset contains the results from 16 clinical trials examining the effectiveness of wrist acupuncture point P6 stimulation for preventing postoperative nausea. } \source{ Lee, A., & Done, M. L. (2004). Stimulation of the wrist acupuncture point P6 for preventing postoperative nausea and vomiting. \emph{Cochrane Database of Systematic Reviews}, \bold{3}, CD003281. \verb{https://doi.org/10.1002/14651858.CD003281.pub2} } \examples{ ### copy data into 'dat' and examine data dat <- dat.lee2004 dat ### meta-analysis based on log risk ratios res <- rma(measure="RR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) res predict(res, transf=exp, digits=2) } \keyword{datasets} metafor/man/plot.infl.rma.uni.Rd0000644000176200001440000001301114055736403016231 0ustar liggesusers\name{plot.infl.rma.uni} \alias{plot.infl.rma.uni} \title{Plot Method for 'infl.rma.uni' Objects} \description{ Plot method for objects of class \code{"infl.rma.uni"}. \loadmathjax } \usage{ \method{plot}{infl.rma.uni}(x, plotinf=TRUE, plotdfbs=FALSE, dfbsnew=FALSE, logcov=TRUE, layout, slab.style=1, las=0, pch=21, bg="black", bg.infl="red", col.na="lightgray", \dots) } \arguments{ \item{x}{an object of class \code{"infl.rma.uni"}.} \item{plotinf}{logical to specify whether the various case diagnostics should be plotted (the default is \code{TRUE}). Can also be a vector of up to 8 integers to specify which plots to draw. See \sQuote{Details} for the numbers corresponding to the various plots.} \item{plotdfbs}{logical to specify whether the DFBETAS values should be plotted (the default is \code{FALSE}). Can also be a vector of integers to specify for which coefficient(s) to plot the DFBETAS values.} \item{dfbsnew}{logical to specify whether a new device should be opened for plotting the DFBETAS values (the default is \code{FALSE}).} \item{logcov}{logical to specify whether the covariance ratios should be plotted on a log scale (the default is \code{TRUE}).} \item{layout}{optional vector of two numbers to specify the number of rows and columns for the layout of the figure.} \item{slab.style}{integer to indicate the style of the x-axis labels: 1 = study number, 2 = study label, 3 = abbreviated study label. Note that study labels, even when abbreviated, may be too long to fit in the margins.)} \item{las}{integer between 0 and 3 to specify the alignment of the axis labels (see \code{\link{par}}). The most useful alternative to 0 is 3, so that the x-axis labels are drawn vertical to the axis.} \item{pch}{plotting symbol to use. By default, a filled circle is used. See \code{\link{points}} for other options.} \item{bg}{color to use for filling the plotting symbol (the default is \code{"black"}).} \item{bg.infl}{color to use for filling the plotting symbol when the point is considered influential (the default is \code{"red"}).} \item{col.na}{color to use for lines connecting two points with \code{NA} values in between (the default is \code{"lightgray"}).} \item{\dots}{other arguments.} } \details{ When \code{plotinf=TRUE}, the function plots the (1) externally standardized residuals, (2) DFFITS values, (3) Cook's distances, (4) covariance ratios, (5) leave-one-out \mjseqn{\tau^2} estimates, (6) leave-one-out (residual) heterogeneity test statistics, (7) hat values, and (8) weights. If \code{plotdfbs=TRUE}, the DFBETAS values are also plotted either after confirming the page change (if \code{dfbsnew=FALSE}) or on a separate device (if \code{dfbsnew=TRUE}). A case (which is typically synonymous with study) may be considered to be \sQuote{influential} if at least one of the following is true: \itemize{ \item The absolute DFFITS value is larger than \mjeqn{3 \times \sqrt{p/(k-p)}}{3*\sqrt(p/(k-p))}, where \mjseqn{p} is the number of model coefficients and \mjseqn{k} the number of cases. \item The lower tail area of a chi-square distribution with \mjseqn{p} degrees of freedom cut off by the Cook's distance is larger than 50\%. \item The hat value is larger than \mjeqn{3 \times (p/k)}{3*(p/k)}. \item Any DFBETAS value is larger than \mjseqn{1}. } Cases which are considered influential with respect to any of these measures are indicated by the color specified for the \code{bg.infl} argument (the default is \code{"red"}). The cut-offs described above are indicated in the plot with horizontal reference lines. In addition, on the plot of the externally standardized residuals, horizontal reference lines are drawn at -1.96, 0, and 1.96. On the plot of the hat values, a horizontal reference line is drawn at \mjseqn{p/k}. Since the sum of the hat values is equal to \mjseqn{p}, the value \mjseqn{p/k} indicates equal hat values for all \mjseqn{k} cases. Finally, on the plot of weights, a horizontal reference line is drawn at \mjseqn{100/k}, corresponding to the value for equal weights (in \%) for all \mjseqn{k} cases. Note that all weights will automatically be equal to each other when using unweighted model fitting. Also, the hat values will be equal to the weights values (except for their scaling) in models without moderators. The chosen cut-offs are (somewhat) arbitrary. Substantively informed judgment should always be used when examining the influence of each case on the results. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods}, \bold{1}(2), 112--125. \verb{https://doi.org/10.1002/jrsm.11} } \seealso{ \code{\link{influence.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### compute the diagnostics inf <- influence(res) ### plot the values plot(inf) ### select which plots to show plot(inf, plotinf=1:4) plot(inf, plotinf=1:4, layout=c(4,1)) ### plot the DFBETAS values plot(inf, plotinf=FALSE, plotdfbs=TRUE) } \keyword{hplot} metafor/man/fsn.Rd0000644000176200001440000001465214055736403013556 0ustar liggesusers\name{fsn} \alias{fsn} \title{Fail-Safe N Analysis (File Drawer Analysis)} \description{ Function to compute the fail-safe N (also called a file drawer analysis). } \usage{ fsn(yi, vi, sei, data, type="Rosenthal", alpha=.05, target, weighted=FALSE, subset, digits, \dots) } \arguments{ \item{yi}{vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances.} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{data}{optional data frame containing the variables given to the arguments above.} \item{type}{character string to specify the method to use for the calculation of the fail-safe N. Possible options are \code{"Rosenthal"} (the default), \code{"Orwin"}, or \code{"Rosenberg"}. See \sQuote{Details}.} \item{alpha}{target alpha level to use for the Rosenthal and Rosenberg methods (the default is .05).} \item{target}{target average effect size or outcome to use for the Orwin method. If undefined, then the target average effect size or outcome will be equal to the observed average effect size or outcome divided by 2.} \item{weighted}{logical to specify whether Orwin's method should be based on unweighted (the default) or weighted averages.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be used for the calculations.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{\dots}{other arguments.} } \details{ The function can be used in conjunction with any of the usual effect sizes / outcome measures used in meta-analyses (e.g., log risk ratios, log odds ratios, risk differences, mean differences, standardized mean differences, raw correlation coefficients, correlation coefficients transformed with Fisher's r-to-z transformation, and so on). Simply specify the observed outcomes via the \code{yi} argument and the corresponding sampling variances via the \code{vi} argument (instead of specifying \code{vi}, one can specify the standard errors via the \code{sei} argument). The \code{\link{escalc}} function can be used to compute a wide variety of effect sizes / outcome measures (and the corresponding sampling variances) based on summary statistics. The Rosenthal method (sometimes called a \sQuote{file drawer analysis}) calculates the number of studies averaging null results that would have to be added to the given set of observed outcomes to reduce the combined significance level (p-value) to a particular alpha level (e.g., .05). The calculation is based on Stouffer's method to combine p-values and is described in Rosenthal (1979). The Orwin method calculates the number of studies averaging null results that would have to be added to the given set of observed outcomes to reduce the (unweighted or weighted) average outcome to a target value (as specified via the \code{target} argument). The method is described in Orwin (1983). If \code{weighted=FALSE} (the default), the method does not require (or makes use) of \code{vi} (or \code{sei}), so these arguments are then not relevant for this method. If the \code{target} argument is not specified, then the target average outcome will be equal to the observed average outcome divided by 2 (which is quite arbitrary). One should really set \code{target} to a value that reflects an outcome one would consider practically irrelevant. Note that if \code{target} has the opposite sign as the actually observed average outcome, then its sign is automatically flipped. The Rosenberg method calculates the number of studies averaging null results that would have to be added to the given set of observed outcomes to reduce the significance level (i.e., p-value) of the weighted average outcome (based on a fixed-effects model) to a particular alpha level (e.g., .05). The method is described in Rosenberg (2005). If the combined/observed significance level is above the specified alpha level (for \code{type = "Rosenthal"} or \code{type = "Rosenberg"}) or if the observed average outcome is below the target average outcome (for \code{type = "Orwin"}), then the fail-sage N value will be 0. } \value{ An object of class \code{"fsn"}. The object is a list containing the following components: \item{type}{the method used.} \item{fsnum}{the calculated fail-safe N.} \item{alpha}{the specified alpha level.} \item{pval}{the p-value of the observed results. \code{NA} for the Orwin method.} \item{meanes}{the average outcome of the observed results. \code{NA} for the Rosenthal method.} \item{target}{the target value. \code{NA} for the Rosenthal and Rosenberg methods.} The results are formatted and printed with the \code{\link{print.fsn}} function. } \note{ For the Rosenberg method, the p-value is calculated based on a standard normal distribution (instead of a t-distribution, as suggested by Rosenberg, 2005). } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Rosenthal, R. (1979). The "file drawer problem" and tolerance for null results. \emph{Psychological Bulletin}, \bold{86}(3), 638--641. \verb{https://doi.org/10.1037/0033-2909.86.3.638} Orwin, R. G. (1983). A fail-safe N for effect size in meta-analysis. \emph{Journal of Educational Statistics}, \bold{8}(2), 157--159. \verb{https://doi.org/10.3102/10769986008002157} Rosenberg, M. S. (2005). The file-drawer problem revisited: A general weighted method for calculating fail-safe numbers in meta-analysis. \emph{Evolution}, \bold{59}(2), 464--468. \verb{https://doi.org/10.1111/j.0014-3820.2005.tb01004.x} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{ranktest}}, \code{\link{regtest}}, \code{\link{trimfill}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit fixed-effects model rma(yi, vi, data=dat, method="FE") ### fail-safe N computations fsn(yi, vi, data=dat) fsn(yi, data=dat, type="Orwin", target=log(0.95)) # target corresponds to a 5% risk reduction fsn(yi, vi, data=dat, type="Orwin", weighted=TRUE, target=log(0.95)) fsn(yi, vi, data=dat, type="Rosenberg") } \keyword{htest} metafor/man/dat.laopaiboon2015.Rd0000644000176200001440000000536714055736403016175 0ustar liggesusers\name{dat.laopaiboon2015} \docType{data} \alias{dat.laopaiboon2015} \title{Studies on the Effectiveness of Azithromycin for Treating Lower Respiratory Tract Infections} \description{Results from 15 studies on the effectiveness of azithromycin versus amoxycillin or amoxycillin/clavulanic acid (amoxyclav) in the treatment of acute lower respiratory tract infections.} \usage{ dat.laopaiboon2015 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{author} \tab \code{character} \tab author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ai} \tab \code{numeric} \tab number of clinical failures in the group treated with azithromycin \cr \bold{n1i} \tab \code{numeric} \tab number of patients in the group treated with azithromycin \cr \bold{ci} \tab \code{numeric} \tab number of clinical failures in the group treated with amoxycillin or amoxyclav \cr \bold{n2i} \tab \code{numeric} \tab number of patients in the group treated with amoxycillin or amoxyclav \cr \bold{age} \tab \code{character} \tab whether the trial included adults or children \cr \bold{diag.ab} \tab \code{numeric} \tab trial included patients with a diagnosis of acute bacterial bronchitis \cr \bold{diag.cb} \tab \code{numeric} \tab trial included patients with a diagnosis of chronic bronchitis with acute exacerbation \cr \bold{diag.pn} \tab \code{numeric} \tab trial included patients with a diagnosis of pneumonia \cr \bold{ctrl} \tab \code{character} \tab antibiotic in control group (amoxycillin or amoxyclav) } } \details{ Azithromycin is an antibiotic useful for the treatment of a number of bacterial infections. Laopaiboon et al. (2015) conducted a meta-analysis of trials comparing the effectiveness of azithromycin versus amoxycillin or amoxycillin/clavulanic acid (amoxyclav) in the treatment of acute lower respiratory tract infections, including acute bacterial bronchitis, acute exacerbations of chronic bronchitis, and pneumonia. The results from 15 trials are included in this dataset. } \source{ Laopaiboon, M., Panpanich, R., & Swa Mya, K. (2015). Azithromycin for acute lower respiratory tract infections. \emph{Cochrane Database of Systematic Reviews}, \bold{3}, CD001954. \verb{https://doi.org/10.1002/14651858.CD001954.pub4} } \examples{ ### copy data into 'dat' and examine data dat <- dat.laopaiboon2015 dat ### analysis using the Mantel-Haenszel method rma.mh(measure="RR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, digits=3) ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) ### random-effects model res <- rma(yi, vi, data=dat) res ### average risk ratio with 95\% CI predict(res, transf=exp) } \keyword{datasets} metafor/man/print.anova.rma.Rd0000644000176200001440000000475414055736403016007 0ustar liggesusers\name{print.anova.rma} \alias{print.anova.rma} \title{Print Method for 'anova.rma' Objects} \description{ Print method for objects of class \code{"anova.rma"}. \loadmathjax } \usage{ \method{print}{anova.rma}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"anova.rma"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item the number of parameters in the full and the reduced model. \item the AIC, BIC, AICc, and log-likelihood of the full and the reduced model. \item the value of the likelihood ratio test statistic. \item the corresponding p-value. \item the test statistic of the test for (residual) heterogeneity for the full and the reduced model. \item the estimate of \mjseqn{\tau^2} from the full and the reduced model. Suppressed for fixed-effects models. \item{R2}{amount (in percent) of heterogeneity in the reduced model that is accounted for in the full model (\code{NA} for fixed-effects models or for \code{"rma.mv"} objects). This can be regarded as a pseudo \mjseqn{R^2} statistic (Raudenbush, 2009). Note that the value may not be very accurate unless \mjseqn{k} is large (Lopez-Lopez et al., 2014).} } The last two items are not provided when comparing \code{"rma.mv"} models. } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ \enc{López-López}{Lopez-Lopez}, J. A., \enc{Marín-Martínez}{Marin-Martinez}, F., \enc{Sánchez-Meca}{Sanchez-Meca}, J., Van den Noortgate, W., & Viechtbauer, W. (2014). Estimation of the predictive power of the model in mixed-effects meta-regression: A simulation study. \emph{British Journal of Mathematical and Statistical Psychology}, \bold{67}(1), 30--48. \verb{https://doi.org/10.1111/bmsp.12002} Raudenbush, S. W. (2009). Analyzing effect sizes: Random effects models. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 295--315). New York: Russell Sage Foundation. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{anova.rma}} } \keyword{print} metafor/man/dat.senn2013.Rd0000644000176200001440000001730514055736403015006 0ustar liggesusers\name{dat.senn2013} \docType{data} \alias{dat.senn2013} \title{Studies on the Effectiveness of Glucose-Lowering Agents} \description{Results from 26 trials examining the effectiveness of glucose-lowering agents in patients with type 2 diabetes} \usage{dat.senn2013} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{character} \tab (first) author and year of study \cr \bold{ni} \tab \code{numeric} \tab sample size of the study arm \cr \bold{treatment} \tab \code{character} \tab treatment given \cr \bold{comment} \tab \code{character} \tab whether figures given are based on raw values at outcome or on change from baseline \cr \bold{mi} \tab \code{numeric} \tab mean score \cr \bold{sdi} \tab \code{numeric} \tab standard deviation } } \details{ The dataset includes the results from 26 randomized controlled trials examining the effectiveness of adding various oral glucose-lowering agents to a baseline sulfonylurea therapy in patients with type 2 diabetes. The outcome measured in the studies was either the mean HbA1c level at follow-up or the mean change in HbA1c level from baseline to follow-up. A total of 10 different treatment types were examined in these studies: acarbose, benfluorex, metformin, miglitol, pioglitazone, placebo, rosiglitazone, sitagliptin, sulfonylurea alone, and vildagliptin. One study included three treatment arms (Willms, 1999), while the rest of the studies included two treatment arms (hence, the dataset includes the results from 53 treatment arms). The data can be used for a network meta-analysis, either using an arm-based or a contrast-based model. See \sQuote{Examples} below. } \source{ Law, M., Jackson, D., Turner, R., Rhodes, K., & Viechtbauer, W. (2016). Two new methods to fit models for network meta-analysis with random inconsistency effects. \emph{BMC Medical Research Methodology}, \bold{16}, 87. \verb{https://doi.org/10.1186/s12874-016-0184-5} Senn, S., Gavini, F., Magrez, D., & Scheen, A. (2013). Issues in performing a network meta-analysis. \emph{Statistical Methods in Medical Research}, \bold{22}(2), 169--189. \verb{https://doi.org/10.1177/0962280211432220} } \examples{ ### copy data into 'dat' and examine data dat <- dat.senn2013 dat ### create network graph ('igraph' package must be installed) \dontrun{ require(igraph) pairs <- data.frame(do.call(rbind, sapply(split(dat$treatment, dat$study), function(x) t(combn(x,2)))), stringsAsFactors=FALSE) pairs$X1 <- factor(pairs$X1, levels=sort(unique(dat$treatment))) pairs$X2 <- factor(pairs$X2, levels=sort(unique(dat$treatment))) tab <- table(pairs[,1], pairs[,2]) tab # adjacency matrix g <- graph_from_adjacency_matrix(tab, mode = "plus", weighted=TRUE, diag=FALSE) plot(g, edge.curved=FALSE, edge.width=E(g)$weight, layout=layout_as_star(g, center="placebo"), vertex.size=45, vertex.color="lightgray", vertex.label.color="black", vertex.label.font=2)} ### table of studies versus treatments examined print(addmargins(table(dat$study, dat$treatment)), zero.print="") ### table of frequencies with which treatment pairs were studied print(as.table(crossprod(table(dat$study, dat$treatment))), zero.print="") ### add means and sampling variances of the means to the dataset dat <- escalc(measure="MN", mi=mi, sdi=sdi, ni=ni, data=dat) ### turn treatment variable into factor and set reference level dat$treatment <- relevel(factor(dat$treatment), ref="placebo") ### add a space before each level (this makes the output a bit more legible) levels(dat$treatment) <- paste0(" ", levels(dat$treatment)) ### network meta-analysis using an arm-based fixed-effects model with fixed study effects res.fe <- rma.mv(yi, vi, mods = ~ study + treatment - 1, data=dat, slab=paste0(study, treatment)) res.fe ### test if treatment factor as a whole is significant anova(res.fe, btt="treatment") ### forest plot of the contrast estimates (treatments versus placebos) forest(tail(coef(res.fe), 9), tail(diag(vcov(res.fe)), 9), slab=levels(dat$treatment)[-1], xlim=c(-2.5, 2.0), alim=c(-1.5, 0.5), psize=1, xlab="Estimate", header="Treatment") ### weight matrix for the estimation of the fixed effects (leaving out the study effects) w <- t(tail(vcov(res.fe) \%*\% t(model.matrix(res.fe)) \%*\% weights(res.fe, type="matrix"), 9)) rownames(w) <- res.fe$slab ### create shade plot for the diabetes network with placebo as the reference treatment ### negative values in blue shades, positive values in red shades cols <- colorRampPalette(c("blue", "gray95", "red"))(9) heatmap(w, Rowv=NA, Colv=NA, scale="none", margins=c(6,11), col=cols, cexRow=.7, cexCol=1, labCol=levels(dat$treatment)[-1]) ### network meta-analysis using an arm-based random-effects model with fixed study effects ### by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons res.re <- rma.mv(yi, vi, mods = ~ study + treatment - 1, random = ~ treatment | study, rho=1/2, data=dat, slab=paste0(study, treatment)) res.re ### test if treatment factor as a whole is significant anova(res.re, btt="treatment") ### forest plot of the contrast estimates (treatments versus placebos) forest(tail(coef(res.re), 9), tail(diag(vcov(res.re)), 9), slab=levels(dat$treatment)[-1], xlim=c(-3.0, 2.5), alim=c(-1.5, 0.5), psize=1, xlab="Estimate", header="Treatment") ### compute the contribution of each study to the overall Q-test value qi <- sort(by((resid(res.fe) / sqrt(dat$vi))^2, dat$study, sum)) ### check that the values add up sum(qi) res.fe$QE ### plot the values s <- length(qi) par(mar=c(5,10,2,1)) plot(qi, 1:s, pch=19, xaxt="n", yaxt="n", xlim=c(0,40), xlab="Chi-Square Contribution", ylab="") axis(side=1) axis(side=2, at=1:s, labels=names(qi), las=1, tcl=0) segments(rep(0,s), 1:s, qi, 1:s) ############################################################################ ### restructure dataset to a contrast-based format dat <- dat.senn2013[c(1,4:2,5:6)] # reorder variables first dat <- to.wide(dat, study="study", grp="treatment", ref="placebo", grpvars=4:6) dat ### calculate mean difference and corresponding sampling variance for each treatment comparison dat <- escalc(measure="MD", m1i=mi.1, sd1i=sdi.1, n1i=ni.1, m2i=mi.2, sd2i=sdi.2, n2i=ni.2, data=dat) dat ### calculate the variance-covariance matrix of the mean differences for the multitreatment studies calc.v <- function(x) { v <- matrix(x$sdi.2[1]^2 / x$ni.2[1], nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) ### add contrast matrix to dataset dat <- contrmat(dat, grp1="treatment.1", grp2="treatment.2") dat ### network meta-analysis using a contrast-based random-effects model ### by setting rho=1/2, tau^2 reflects the amount of heterogeneity for all treatment comparisons ### the treatment left out (placebo) becomes the reference level for the treatment comparisons res <- rma.mv(yi, V, mods = ~ acarbose + benfluorex + metformin + miglitol + pioglitazone + rosiglitazone + sitagliptin + sulfonylurea + vildagliptin - 1, random = ~ comp | study, rho=1/2, data=dat) res ### estimate all pairwise differences between treatments (using the 'multcomp' package) if (require(multcomp)) { contr <- contrMat(setNames(rep(1,res$p), colnames(res$X)), type="Tukey") sav <- predict(res, newmods=contr) sav[["slab"]] <- rownames(contr) sav } ### fit random inconsistency effects model (see Law et al., 2016) res <- rma.mv(yi, V, mods = ~ acarbose + benfluorex + metformin + miglitol + pioglitazone + rosiglitazone + sitagliptin + sulfonylurea + vildagliptin - 1, random = list(~ comp | study, ~ comp | design), rho=1/2, phi=1/2, data=dat) res } \keyword{datasets} metafor/man/reporter.Rd0000644000176200001440000001237114055736403014626 0ustar liggesusers\name{reporter} \alias{reporter} \alias{reporter.rma.uni} \title{Dynamically Generated Analysis Reports for 'rma.uni' Objects} \description{ The function dynamically generates analysis reports for objects of class \code{"rma.uni"}. } \usage{ reporter(x, \dots) \method{reporter}{rma.uni}(x, dir, filename, format="html_document", open=TRUE, digits, forest, funnel, footnotes=FALSE, verbose=TRUE, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}.} \item{dir}{optional character string to specify the directory for creating the report. If unspecified, \code{\link{tempdir}} will be used.} \item{filename}{optional character string to specify the filename (without file extension) for the report. If unspecified, the function sets a filename automatically.} \item{format}{output format for the report (either \code{html_document}, \code{pdf_document}, or \code{word_document}). Can be abbreviated. See \sQuote{Note}.} \item{open}{logical to specify whether the report should be opened after it has been generated (the default is \code{TRUE}). See \sQuote{Note}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{forest}{either a logical which will suppress the drawing of the forest plot when set to \code{FALSE} or a character string with arguments to be added to the call to \code{\link{forest.rma}} for generating the forest plot.} \item{funnel}{either a logical which will suppress the drawing of the funnel plot when set to \code{FALSE} or a character string with arguments to be added to the call to \code{\link{funnel.rma}} for generating the funnel plot.} \item{footnotes}{logical to specify whether additional explanatory footnotes should be added to the report (the default is \code{FALSE}).} \item{verbose}{logical to specify whether information on the progress of the report generation should be provided (the default is \code{TRUE}).} \item{\dots}{other arguments.} } \details{ The function dynamically generates an analysis report based on the model object. The report includes information about the model that was fitted, the distribution of the observed effect sizes or outcomes, the estimate of the average outcome based on the fitted model, tests and statistics that are informative about potential (residual) heterogeneity in the outcomes, checks for outliers and/or influential studies, and tests for funnel plot asymmetry. By default, a forest plot and a funnel plot are also provided (these can be suppressed by setting \code{forest=FALSE} and/or \code{funnel=FALSE}). } \value{ The function generates either a html, pdf, or docx file and returns (invisibly) the path to the generated document. } \note{ Since the report is created based on an R markdown document that is generated by the function, the \href{https://cran.r-project.org/package=rmarkdown}{rmarkdown} package and \href{https://pandoc.org}{pandoc} must be installed. To render the report into a pdf document (i.e., using \code{format="pdf_document"}) requires a LaTeX installation. If LaTeX is not already installed, you could try using the \href{https://cran.r-project.org/package=tinytex}{tinytex} package to install a lightweight LaTeX distribution based on TeX Live. Once the report is generated, the function tries to open the output file (either a .html, .pdf, or .docx file) with an appropriate application (if \code{open=TRUE}). This will only work when an appropriate application for the file type is installed and associated with the extension. If \code{filename} is unspecified, the default is to use \code{report}, followed by an underscore (i.e., \code{_}) and the name of the object passed to the function. Both the R markdown file (with extension .rmd) and the actual report (with extension .html, .pdf, or .docx) are named accordingly. To generate the report, the model object is also saved to a file (with the same filename as above, but with extension .rdata). Also, files \code{references.bib} and \code{apa.csl} are copied to the same directory (these files are needed to generate the references in APA format). Since the report is put together based on predefined text blocks, the writing is not very elegant. Also, using personal pronouns (\sQuote{I} or \sQuote{we}) does not make sense for such a report, so a lot of passive voice is used. The generated report provides an illustration of how the results of the model can be reported, but is not a substitute for a careful examination of the results. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}} } \examples{ ### copy BCG vaccine data into 'dat' dat <- dat.bcg ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, slab=paste(author, ", ", year, sep="")) ### fit random-effects model res <- rma(yi, vi, data=dat) \dontrun{ ### generate pdf report reporter(res)} } \keyword{methods} metafor/man/update.rma.Rd0000644000176200001440000000455414055736403015030 0ustar liggesusers\name{update.rma} \alias{update} \alias{update.rma} \title{Model Updating for 'rma' Objects} \description{ The function can be used to update and (by default) re-fit \code{"rma"} models. It does this by extracting the call stored in the object, updating the call and (by default) evaluating that call. } \usage{ \method{update}{rma}(object, formula., \dots, evaluate = TRUE) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{formula.}{changes to the formula. See \sQuote{Details}.} \item{\dots}{additional arguments to the call, or arguments with changed values.} \item{evaluate}{logical to specify whether to evaluate the new call or just return the call.} } \details{ For objects of class \code{"rma.uni"}, \code{"rma.glmm"}, and \code{"rma.mv"}, the \code{formula.} argument can be used to update the set of moderators included in the model (see \sQuote{Examples}). } \value{ If \code{evaluate=TRUE} the fitted object, otherwise the updated call. } \author{ The present function is based on \code{\link{update.default}}, with changes made by Wolfgang Viechtbauer (\email{wvb@metafor-project.org}) so that the formula updating works with the (somewhat non-standard) interface of the \code{\link{rma.uni}}, \code{\link{rma.glmm}}, and \code{\link{rma.mv}} functions. } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model (method="REML" is default) res <- rma(yi, vi, data=dat, digits=3) res ### fit mixed-effects model with two moderators (absolute latitude and publication year) res <- update(res, ~ ablat + year) res ### remove 'year' moderator res <- update(res, ~ . - year) res ### fit model with ML estimation update(res, method="ML") ### example with rma.glmm() res <- rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, digits=3) res <- update(res, mods = ~ ablat) res ### fit conditional model with approximate likelihood update(res, model="CM.AL") } \keyword{models} metafor/man/rma.mv.Rd0000644000176200001440000013360414055736403014167 0ustar liggesusers\name{rma.mv} \alias{rma.mv} \title{Meta-Analysis via Multivariate/Multilevel Linear (Mixed-Effects) Models} \description{ Function to fit meta-analytic multivariate/multilevel fixed- and random/mixed-effects models with or without moderators via linear (mixed-effects) models. See below and the documentation of the \pkg{\link{metafor-package}} for more details on these models. \loadmathjax } \usage{ rma.mv(yi, V, W, mods, random, struct="CS", intercept=TRUE, data, slab, subset, method="REML", test="z", dfs="residual", level=95, digits, btt, R, Rscale="cor", sigma2, tau2, rho, gamma2, phi, sparse=FALSE, verbose=FALSE, control, \dots) } \arguments{ \item{yi}{vector of length \mjseqn{k} with the observed effect sizes or outcomes. See \sQuote{Details}.} \item{V}{vector of length \mjseqn{k} with the corresponding sampling variances or a \mjeqn{k \times k}{kxk} variance-covariance matrix of the sampling errors. See \sQuote{Details}.} \item{W}{optional argument to specify a vector of length \mjseqn{k} with user-defined weights or a \mjeqn{k \times k}{kxk} user-defined weight matrix. See \sQuote{Details}.} \item{mods}{optional argument to include one or more moderators in the model. A single moderator can be given as a vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving a matrix with \mjseqn{k} rows and as many columns as there are moderator variables. Alternatively, a model \code{\link{formula}} can be used to specify the model. See \sQuote{Details}.} \item{random}{either a single one-sided formula or list of one-sided formulas to specify the random-effects structure of the model. See \sQuote{Details}.} \item{struct}{character string to specify the variance structure of an \code{~ inner | outer} formula in the \code{random} argument. Either \code{"CS"} for compound symmetry, \code{"HCS"} for heteroscedastic compound symmetry, \code{"UN"} for an unstructured variance-covariance matrix, \code{"ID"} for a scaled identity matrix, \code{"DIAG"} for a diagonal matrix, \code{"AR"} for an AR(1) autoregressive structure, \code{"HAR"} for a heteroscedastic AR(1) autoregressive structure, \code{"CAR"} for a continuous-time autoregressive structure, or one of \code{"SPEXP"}, \code{"SPGAU"}, \code{"SPLIN"}, \code{"SPRAT"}, or \code{"SPSPH"} for one of the spatial correlation structures. See \sQuote{Details}.} \item{intercept}{logical to specify whether an intercept should be added to the model (the default is \code{TRUE}). Ignored when \code{mods} is a formula.} \item{data}{optional data frame containing the data supplied to the function.} \item{slab}{optional vector with labels for the \mjseqn{k} outcomes/studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies (or more precisely, rows of the dataset) that should be used for the analysis.} \item{method}{character string to specify whether the model should be fitted via maximum-likelihood (\code{"ML"}) or via restricted maximum-likelihood (\code{"REML"}) estimation. Default is \code{"REML"}.} \item{test}{character string to specify how test statistics and confidence intervals for the fixed effects should be computed. By default (\code{test="z"}), Wald-type tests and CIs are obtained, which are based on a standard normal distribution. When \code{test="t"}, a t-distribution is used instead. See \sQuote{Details}.} \item{dfs}{character string to specify how the (denominator) degrees of freedom should be calculated when \code{test="t"}. Either \code{dfs="residual"} or \code{dfs="contain"}. Can also be a numeric vector with the degrees of freedom for each model coefficient. See \sQuote{Details}.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{btt}{optional vector of indices to specify which coefficients to include in the omnibus test of moderators. Can also be a string to grep for. See \sQuote{Details}.} \item{R}{an optional named list of known correlation matrices corresponding to (some of) the components specified via the \code{random} argument. See \sQuote{Details}.} \item{Rscale}{character string, integer, or logical to specify how matrices specified via the \code{R} argument should be scaled. See \sQuote{Details}.} \item{sigma2}{optional numeric vector (of the same length as the number of random intercept components specified via the \code{random} argument) to fix the corresponding \mjseqn{\sigma^2} value(s). A specific \mjseqn{\sigma^2} value can be fixed by setting the corresponding element of this argument to the desired value. A specific \mjseqn{\sigma^2} value will be estimated if the corresponding element is set equal to \code{NA}. See \sQuote{Details}.} \item{tau2}{optional numeric value (for \code{struct="CS"}, \code{"AR"}, \code{"CAR"}, or a spatial correlation structure) or vector (for \code{struct="HCS"}, \code{"UN"}, or \code{"HAR"}) to fix the amount of (residual) heterogeneity for the levels of the inner factor corresponding to an \code{~ inner | outer} formula specified in the \code{random} argument. A numeric value fixes a particular \mjseqn{\tau^2} value, while \code{NA} means that the value should be estimated. See \sQuote{Details}.} \item{rho}{optional numeric value (for \code{struct="CS"}, \code{"HCS"}, \code{"AR"}, \code{"HAR"}, \code{"CAR"}, or a spatial correlation structure) or vector (for \code{struct="UN"}) to fix the correlation between the levels of the inner factor corresponding to an \code{~ inner | outer} formula specified in the \code{random} argument. A numeric value fixes a particular \mjseqn{\rho} value, while \code{NA} means that the value should be estimated. See \sQuote{Details}.} \item{gamma2}{as \code{tau2} argument, but for a second \code{~ inner | outer} formula specified in the \code{random} argument. See \sQuote{Details}.} \item{phi}{as \code{rho} argument, but for a second \code{~ inner | outer} formula specified in the \code{random} argument. See \sQuote{Details}.} \item{sparse}{logical to specify whether the function should use sparse matrix objects to the extent possible (can speed up model fitting substantially for certain models). See \sQuote{Note}.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}). Can also be an integer. Values > 1 generate more verbose output. See \sQuote{Note}.} \item{control}{optional list of control values for the estimation algorithms. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{additional arguments.} } \details{ \bold{Specifying the Data} The function can be used in conjunction with any of the usual effect size or outcome measures used in meta-analyses (e.g., log risk ratios, log odds ratios, risk differences, mean differences, standardized mean differences, log transformed ratios of means, raw correlation coefficients, correlation coefficients transformed with Fisher's r-to-z transformation, and so on). Simply specify the observed effect sizes or outcomes via the \code{yi} argument and the corresponding sampling variances via the \code{V} argument. In case the sampling errors are correlated, then one can specify the entire variance-covariance matrix of the sampling errors via the \code{V} argument. The \code{\link{escalc}} function can be used to compute a wide variety of effect size or outcome measures (and the corresponding sampling variances) based on summary statistics. Equations for computing the covariance between the sampling errors for a variety of different effect size or outcome measures can be found, for example, in Gleser and Olkin (2009). For raw and Fisher r-to-z transformed correlations, one can find suitable equations, for example, in Steiger (1980). The latter are implemented in the \code{\link{rcalc}} function. \bold{Specifying Fixed Effects} With \code{rma.mv(yi, V)}, a fixed-effects model is fitted to the data (note: arguments \code{struct}, \code{sigma2}, \code{tau2}, \code{rho}, \code{gamma2}, \code{phi}, \code{R}, and \code{Rscale} are not relevant then and are ignored). The model is then simply given by \mjeqn{y \sim N(\theta, V)}{y ~ N(\theta, V)}, where \mjseqn{y} is a (column) vector with the observed outcomes, \mjseqn{\theta} is the (average) true outcome, and \mjseqn{V} is the variance-covariance matrix of the sampling errors (if a vector of sampling variances is provided via the \code{V} argument, then \mjseqn{V} is assumed to be diagonal). One or more moderators can be included in the model via the \code{mods} argument. A single moderator can be given as a (row or column) vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving an appropriate model matrix (i.e., \mjseqn{X}) with \mjseqn{k} rows and as many columns as there are moderator variables (e.g., \code{mods = cbind(mod1, mod2, mod3)}, where \code{mod1}, \code{mod2}, and \code{mod3} correspond to the names of the variables for the three moderator variables). The intercept is added to the model matrix by default unless \code{intercept=FALSE}. Alternatively, one can use standard \code{\link{formula}} syntax to specify the model. In this case, the \code{mods} argument should be set equal to a one-sided formula of the form \code{mods = ~ model} (e.g., \code{mods = ~ mod1 + mod2 + mod3}). Interactions, polynomial terms, and factors can be easily added to the model in this manner. When specifying a model formula via the \code{mods} argument, the \code{intercept} argument is ignored. Instead, the inclusion/exclusion of the intercept is controlled by the specified formula (e.g., \code{mods = ~ mod1 + mod2 + mod3 - 1} would lead to the removal of the intercept). One can also directly specify moderators via the \code{yi} argument (e.g., \code{rma.mv(yi ~ mod1 + mod2 + mod3, V)}). In that case, the \code{mods} argument is ignored and the inclusion/exclusion of the intercept again is controlled by the specified formula. With moderators included, the model is then given by \mjeqn{y \sim N(X \beta, V)}{y ~ N(X \beta, V)}, where \mjseqn{X} denotes the model matrix containing the moderator values (and possibly the intercept) and \mjseqn{\beta} is a column vector containing the corresponding model coefficients. The model coefficients (i.e., \mjseqn{\beta}) are then estimated with \mjeqn{b = (X'WX')^{-1} X'Wy}{b = (X'WX)^(-1) X'Wy}, where \mjeqn{W = V^{-1}}{W = V^(-1)} is the weight matrix (without moderators, \mjseqn{X} is just a column vector of 1's). With the \code{W} argument, one can also specify user-defined weights (or a weight matrix). \bold{Specifying Random Effects} One can fit random/mixed-effects models to the data by specifying the desired random effects structure via the \code{random} argument. The \code{random} argument is either a single one-sided formula or a list of one-sided formulas. One formula type that can be specified via this argument is of the form \code{random = ~ 1 | id}. Such a formula adds a random effect corresponding to the grouping variable/factor \code{id} to the model. Outcomes with the same value/level of the \code{id} variable/factor receive the same value of the random effect, while outcomes with different values/levels of the \code{id} variable/factor are assumed to be independent. The variance component corresponding to such a formula is denoted by \mjseqn{\sigma^2}. An arbitrary number of such formulas can be specified as a list of formulas (e.g., \code{random = list(~ 1 | id1, ~ 1 | id2)}), with variance components \mjseqn{\sigma^2_1}, \mjseqn{\sigma^2_2}, and so on. Nested random effects of this form can also be added using \code{random = ~ 1 | id1/id2}, which adds a random effect corresponding to the grouping variable/factor \code{id1} and a random effect corresponding to \code{id2} within \code{id1} to the model. This can be extended to models with even more levels of nesting (e.g., \code{random = ~ 1 | id1/id2/id2}). Random effects of this form are useful to model clustering (and hence non-independence) induced by a multilevel structure in the data (e.g., outcomes derived from the same paper, lab, research group, or species may be more similar to each other than outcomes derived from different papers, labs, research groups, or species). See, for example, Konstantopoulos (2011) and Nakagawa and Santos (2012) for more details. See \code{\link{dat.konstantopoulos2011}}, \code{\link{dat.bornmann2007}}, \code{\link{dat.obrien2003}}, and \code{\link{dat.crede2010}} for examples of multilevel meta-analyses. In addition or alternatively to specifying one or multiple \code{~ 1 | id} terms, the \code{random} argument can also contain a formula of the form \code{~ inner | outer}. Outcomes with the same value/level of the \code{outer} grouping variable/factor share correlated random effects corresponding to the levels of the \code{inner} grouping variable/factor, while outcomes with different values/levels of the \code{outer} grouping variable/factor are assumed to be independent (note that the \code{inner} grouping variable must either be a factor or a character variable). The \code{struct} argument is used to specify the variance structure corresponding to the \code{inner} variable/factor. With \code{struct="CS"}, a compound symmetric structure is assumed (i.e., a single variance component \mjseqn{\tau^2} corresponding to all values/levels of the \code{inner} variable/factor and a single correlation coefficient \mjseqn{\rho} for the correlation between the different values/levels). With \code{struct="HCS"}, a heteroscedastic compound symmetric structure is assumed (with variance components \mjseqn{\tau^2_1}, \mjseqn{\tau^2_2}, and so on, corresponding to the values/levels of the \code{inner} variable/factor and a single correlation coefficient \mjseqn{\rho} for the correlation between the different values/levels). With \code{struct="UN"}, an unstructured variance-covariance matrix is assumed (with variance components \mjseqn{\tau^2_1}, \mjseqn{\tau^2_2}, and so on, corresponding to the values/levels of the \code{inner} variable/factor and correlation coefficients \mjeqn{\rho_{12}}{\rho_12}, \mjeqn{\rho_{13}}{\rho_13}, \mjeqn{\rho_{23}}{\rho_23}, and so on, for the various combinations of the values/levels of the \code{inner} variable/factor). \if{html,latex}{For example, for an \code{inner} grouping variable/factor with four levels, the three structures correspond to variance-covariance matrices of the form:} \if{html}{\figure{structs1.png}{options: width=700}} \if{latex}{\figure{structs1.png}{options: width=5.4in}} Structures \code{struct="ID"} and \code{struct="DIAG"} are just like \code{struct="CS"} and \code{struct="HCS"}, respectively, except that \mjseqn{\rho} is automatically set to 0, so that we either get a scaled identity matrix or a diagonal matrix. With the \code{outer} factor corresponding to a study identification variable and the \code{inner} factor corresponding to a variable indicating the treatment type or study arm, such a random effect could be used to estimate how strongly different treatment effects or outcomes within the same study are correlated and/or whether the amount of heterogeneity differs across different treatment types/arms. Network meta-analyses (also known as mixed treatment comparisons) will also typically require such a random effect (e.g., Salanti et al., 2008). The meta-analytic bivariate model (e.g., van Houwelingen, Arends, & Stijnen, 2002) can also be fitted in this manner (see the examples below). The \code{inner} factor could also correspond to a variable indicating different types of outcomes measured within the same study, which allows for fitting multivariate models with multiple correlated effects/outcomes per study (e.g., Berkey et al., 1998; Kalaian & Raudenbush, 1996). See \code{\link{dat.berkey1998}}, \code{\link{dat.assink2016}}, \code{\link{dat.kalaian1996}}, \code{\link{dat.dagostino1998}}, and \code{\link{dat.craft2003}} for examples of multivariate meta-analyses with multiple outcomes. See \code{\link{dat.kearon1998}} for an example using a bivariate model to analyze sensitivity and specificity. See \code{\link{dat.hasselblad1998}}, \code{\link{dat.pagliaro1992}}, \code{\link{dat.lopez2019}}, and \code{\link{dat.senn2013}} for examples of network meta-analyses. For meta-analyses of studies reporting outcomes at multiple time points, it may also be reasonable to assume that the true effects/outcomes are correlated over time according to an autoregressive structure (Ishak et al., 2007; Trikalinos & Olkin, 2012). For this purpose, one can also choose \code{struct="AR"}, corresponding to a structure with a single variance component \mjseqn{\tau^2} and AR(1) autocorrelation among the values of the random effect. The values of the \code{inner} variable (which does not have to be a factor here) should then reflect the various time points, with increasing values reflecting later time points. This structure assumes equally spaced time points, so the actual values of the \code{inner} variable are not relevant, only their ordering. One can also use \code{struct="HAR"}, which allows for fitting a heteroscedastic AR(1) structure (with variance components \mjseqn{\tau^2_1}, \mjseqn{\tau^2_2}, and so on). Finally, when time points are not evenly spaced, one might consider using \code{struct="CAR"} for a continuous-time autoregressive structure. \if{html,latex}{For example, for an \code{inner} grouping variable with four time points, these structures correspond to variance-covariance matrices of the form:} \if{html}{\figure{structs2.png}{options: width=700}} \if{latex}{\figure{structs2.png}{options: width=5.4in}} See \code{\link{dat.fine1993}} and \code{\link{dat.ishak2007}} for examples involving such structures. For outcomes that have a known spatial configuration, various spatial correlation structures are also available. For these structures, the formula is of the form \code{random = ~ var1 + var2 + \dots | outer}, where \code{var1}, \code{var2}, and so on are variables to indicate the spatial coordinates (e.g., longitude and latitude) based on which distances (by default Euclidean) will be computed. Let \mjseqn{d} denote the distance between two points that share the same level of the \code{outer} variable (if all true effects/outcomes are allowed to be spatially correlated, simply set \code{outer} to a variable that is a constant). Then the correlation between the true effects/outcomes corresponding to these two points is a function of \mjseqn{d} and the parameter \mjseqn{\rho}. The following table shows the types of spatial correlation structures that can be specified and the equations for the correlation. The covariance between the true effects/outcomes is then the correlation times \mjseqn{\tau^2}. \tabular{lllll}{ structure \tab \ics \tab \code{struct} \tab \ics \tab correlation \cr exponential \tab \ics \tab \code{"SPEXP"} \tab \ics \tab \mjeqn{\exp(-d/\rho)}{exp(-d/rho)} \cr Gaussian \tab \ics \tab \code{"SPGAU"} \tab \ics \tab \mjeqn{\exp(-d^2/\rho^2)}{exp(-d^2/rho^2)} \cr linear \tab \ics \tab \code{"SPLIN"} \tab \ics \tab \mjeqn{(1 - d/\rho) I(d < \rho)}{(1 - d/rho) I(d < rho)} \cr rational quadratic \tab \ics \tab \code{"SPRAT"} \tab \ics \tab \mjeqn{1 - (d/\rho)^2 / (1 + (d/\rho)^2)}{1 - (d/rho)^2 / (1 + (d/rho)^2)} \cr spherical \tab \ics \tab \code{"SPSPH"} \tab \ics \tab \mjeqn{(1 - 1.5(d/\rho) + 0.5(d/\rho)^3) I(d < \rho)}{(1 - 1.5(d/rho) + 0.5(d/rho)^3) I(d < rho)} } Note that \mjseqn{I(d < \rho)} is equal to \mjseqn{1} if \mjseqn{d < \rho} and \mjseqn{0} otherwise. The parameterization of the various structures is based on Pinheiro and Bates (2000). Instead of Euclidean distances, one can also use other distance measures by setting (the undocumented) argument \code{dist} to either \code{"maximum"} for the maximum distance between two points (supremum norm), to \code{"manhattan"} for the absolute distance between the coordinate vectors (L1 norm), or to \code{"gcd"} for the great-circle distance (WGS84 ellipsoid method). In the latter case, only two variables, namely the longitude and latitude (in decimal degrees, with minus signs for West and South), must be specified. If a distance matrix has already been computed, one can also pass this matrix as a list element to the \code{dist} argument. In this case, one should use a formula of the form \code{random = ~ id | outer}, where \code{id} are location identifiers, with corresponding row/column names in the distance matrix specified via the \code{dist} argument. See \code{\link{dat.maire2019}} for an example of a meta-analysis with a spatial correlation structure. The \code{random} argument can also contain a second formula of the form \code{~ inner | outer} (but no more!). A second formula of this form works exactly described as above, but its variance components are denoted by \mjseqn{\gamma^2} and its correlation components by \mjseqn{\phi}. The \code{struct} argument should then be of length 2 to specify the variance-covariance structure for the first and second component, respectively. When the \code{random} argument contains a formula of the form \code{~ 1 | id}, one can use the (optional) argument \code{R} to specify a corresponding known correlation matrix for the random effect (i.e., \code{R = list(id = Cor)}, where \code{Cor} is the correlation matrix). In that case, outcomes with the same value/level of the \code{id} variable/factor receive the same value for the random effect, while outcomes with different values/levels of the \code{id} variable/factor receive values that are correlated as specified in the corresponding correlation matrix given via the \code{R} argument. The column/row names of the correlation matrix given via the \code{R} argument must therefore contain all of the values/levels of the \code{id} variable/factor. When the \code{random} argument contains multiple formulas of the form \code{~ 1 | id}, one can specify known correlation matrices for none, some, or all of those terms (e.g., with \code{random = list(~ 1 | id1, ~ 1 | id2)}, one could specify \code{R = list(id1 = Cor1)} or \code{R = list(id1 = Cor1, id2 = Cor2)}, where \code{Cor1} and \code{Cor2} are the correlation matrices corresponding to the grouping variables/factors \code{id1} and \code{id2}, respectively). Such a random effect with a known (or at least approximately known) correlation structure is useful in a variety of contexts. For example, such a component can be used to account for the correlations induced by the shared phylogenetic history among organisms (e.g., plants, fungi, animals). In that case, \code{~ 1 | species} is used to specify the species and argument \code{R} is used to specify the phylogenetic correlation matrix of the species studied in the meta-analysis. The corresponding variance component then indicates how much variance/heterogeneity is attributable to the specified phylogeny. See Nakagawa and Santos (2012) for more details. As another example, in a genetic meta-analysis studying disease association for several single nucleotide polymorphisms (SNPs), linkage disequilibrium (LD) among the SNPs can induce an approximately known degree of correlation among the effects/outcomes. In that case, \code{~ 1 | snp} could be used to specify the SNPs and \code{R} the corresponding LD correlation matrix for the SNPs included in the meta-analysis. The \code{Rscale} argument controls how matrices specified via the \code{R} argument are scaled. With \code{Rscale="none"} (or \code{Rscale=0} or \code{Rscale=FALSE}), no scaling is used. With \code{Rscale="cor"} (or \code{Rscale=1} or \code{Rscale=TRUE}), the \code{\link{cov2cor}} function is used to ensure that the matrices are correlation matrices (assuming they were covariance matrices to begin with). With \code{Rscale="cor0"} (or \code{Rscale=2}), first \code{\link{cov2cor}} is used and then the elements of each correlation matrix are scaled with \mjseqn{(R - \min(R)) / (1 - \min(R))} (this ensures that a correlation of zero in a phylogenetic correlation matrix corresponds to the split at the root node of the tree comprising the species that are actually analyzed). Finally, \code{Rscale="cov0"} (or \code{Rscale=3}) only rescales with \mjseqn{(R - \min(R))} (which ensures that a phylogenetic covariance matrix is rooted at the lowest split). See \code{\link{dat.moura2021}} and \code{\link{dat.lim2014}} for examples of meta-analyses with phylogenetic correlation structures. Together with the variance-covariance matrix of the sampling errors (i.e., \mjseqn{V}), the specified random effects structure of the model implies a particular marginal variance-covariance matrix of the observed effect sizes or outcomes. Once estimates of the variance components (i.e., \mjseqn{\sigma^2}, \mjseqn{\tau^2}, \mjseqn{\rho}, \mjseqn{\gamma^2}, and/or \mjseqn{\phi}, values) have been obtained (either using maximum likelihood or restricted maximum likelihood estimation), the estimated marginal variance-covariance matrix can be constructed (denoted by \mjseqn{M}). The model coefficients (i.e., \mjseqn{\beta}) are then estimated with \mjeqn{b = (X'WX')^{-1} X'Wy}{b = (X'WX)^(-1) X'Wy}, where \mjeqn{W = M^{-1}}{W = M^(-1)} is the weight matrix. With the \code{W} argument, one can again specify user-defined weights (or a weight matrix). \bold{Fixing Variance Components and/or Correlations} Arguments \code{sigma2}, \code{tau2}, \code{rho}, \code{gamma2}, and \code{phi} can be used to fix particular variance components and/or correlations at a given value. This is useful for sensitivity analyses (e.g., for plotting the regular/restricted log-likelihood as a function of a particular variance component or correlation) or for imposing a desired variance-covariance structure on the data. For example, if \code{random = list(~ 1 | id1, ~ 1 | id2)}, then \code{sigma2} must be of length 2 (corresponding to \mjseqn{\sigma^2_1} and \mjseqn{\sigma^2_2}) and a fixed value can be assigned to either or both variance components. Setting a particular component to \code{NA} means that the component will be estimated by the function (e.g., \code{sigma2=c(0,NA)} would fix \mjseqn{\sigma^2_1} to 0 and estimate \mjseqn{\sigma^2_2}). Argument \code{tau2} is only relevant when the \code{random} argument contains an \code{~ inner | outer} formula. In that case, if the \code{tau2} argument is used, it must be either of length 1 (for \code{"CS"}, \code{"ID"}, \code{"AR"}, \code{"CAR"}, or one of the spatial correlation structures) or of the same length as the number of levels of the inner factor (for \code{"HCS"}, \code{"DIAG"}, \code{"UN"}, or \code{"HAR"}). A numeric value in the \code{tau2} argument then fixes the corresponding variance component to that value, while \code{NA} means that the component will be estimated. Similarly, if argument \code{rho} is used, it must be either of length 1 (for \code{"CS"}, \code{"HCS"}, \code{"AR"}, \code{"HAR"}, or one of the spatial correlation structures) or of length \mjseqn{lvls(lvls-1)/2} (for \code{"UN"}), where \mjseqn{lvls} denotes the number of levels of the inner factor. Again, a numeric value fixes the corresponding correlation, while \code{NA} means that the correlation will be estimated. For example, with \code{struct="CS"} and \code{rho=0}, the variance-covariance matrix of the inner factor will be diagonal with \mjseqn{\tau^2} along the diagonal. For \code{struct="UN"}, the values specified under \code{rho} should be given in column-wise order (e.g., for an \code{inner} grouping variable/factor with four levels, the order would be \mjeqn{\rho_{12}}{\rho_12}, \mjeqn{\rho_{13}}{\rho_13}, \mjeqn{\rho_{23}}{\rho_23}, \mjeqn{\rho_{14}}{\rho_14}, \mjeqn{\rho_{24}}{\rho_24}, \mjeqn{\rho_{34}}{\rho_34}. Similarly, arguments \code{gamma2} and \code{phi} are only relevant when the \code{random} argument contains a second \code{~ inner | outer} formula. The arguments then work exactly as described above. \bold{Omnibus Test of Moderators} For models including moderators, an omnibus test of all model coefficients is conducted that excludes the intercept (the first coefficient) if it is included in the model. If no intercept is included in the model, then the omnibus test includes all of the coefficients in the model including the first. Alternatively, one can manually specify the indices of the coefficients to test via the \code{btt} argument. For example, with \code{btt=c(3,4)}, only the third and fourth coefficient from the model would be included in the test (if an intercept is included in the model, then it corresponds to the first coefficient in the model). Instead of specifying the coefficient numbers, one can specify a string for \code{btt}. In that case, \code{\link{grep}} will be used to search for all coefficient names that match the string. The omnibus test is called the \mjseqn{Q_M}-test and follows, under the assumptions of the model, a chi-square distribution with \mjseqn{m} degrees of freedom (with \mjseqn{m} denoting the number of coefficients tested) under the null hypothesis (that the true value of all coefficients tested is equal to 0). \bold{Categorical Moderators} Categorical moderator variables can be included in the model via the \code{mods} argument in the same way that appropriately (dummy) coded categorical independent variables can be included in linear models. One can either do the dummy coding manually or use a model formula together with the \code{\link{factor}} function to let \R handle the coding automatically (note that string/character variables in a model formula are automatically converted to factors). \bold{Tests and Confidence Intervals} By default, tests of individual coefficients in the model (and the corresponding confidence intervals) are based on a standard normal distribution, while the omnibus test is based on a chi-square distribution (see above). As an alternative, one can set \code{test="t"}, in which case tests of individual coefficients and confidence intervals are based on a t-distribution with \mjseqn{k-p} degrees of freedom, while the omnibus test statistic then uses an F-distribution with \mjseqn{m} and \mjseqn{k-p} degrees of freedom (with \mjseqn{k} denoting the total number of estimates included in the analysis and \mjseqn{p} the total number of model coefficients including the intercept if it is present). Note that \code{test="t"} is not the same as \code{test="knha"} in \code{\link{rma.uni}}, as no adjustment to the standard errors of the estimated coefficients is made. The method for calculating the (denominator) degrees of freedom described above (which corresponds to \code{dfs="residual"}) is quite simplistic and may lead to tests with inflated Type I error rates and confidence intervals that are too narrow on average. As an alternative, one can set \code{dfs="contain"}, in which case the degrees of freedom for the test of a particular model coefficient, \mjseqn{b_j}, are determined by checking whether \mjseqn{x_j}, the corresponding column of the model matrix \mjseqn{X}, varies at the level corresponding to a particular random effect in the model. If such a random effect can be found, then the degrees of freedom are set to \mjseqn{l-p}, where \mjseqn{l} denotes the unique number of values/levels that the random effect can take on (i.e., for an \code{~ 1 | id} term, the unique number of values/levels of the \code{id} variable and for an \code{~ inner | outer} term, the unique number of values/levels of the \code{outer} variable). If no such random effect can be found, then \mjseqn{k-p} is used as the degrees of freedom. For the omnibus F-test, the minimum of the degrees of freedom of all coefficients involved in the test is used as the denominator degrees of freedom. This approach for calculating the degrees of freedom should often lead to tests with better control of the Type I error rate and confidence intervals with closer to nominal coverage rates. One can also set \code{dfs} to a numeric vector with the desired values for the degrees of freedom for testing the model coefficients (e.g., if some other method for determining the degrees of freedom was used). \bold{Test for (Residual) Heterogeneity} A test for (residual) heterogeneity is automatically carried out by the function. Without moderators in the model, this test is the generalized/weighted least squares extension of Cochran's \mjseqn{Q}-test, which tests whether the variability in the observed effect sizes or outcomes is larger than one would expect based on sampling variability (and the given covariances among the sampling errors) alone. A significant test suggests that the true effects/outcomes are heterogeneous. When moderators are included in the model, this is the \mjseqn{Q_E}-test for residual heterogeneity, which tests whether the variability in the observed effect sizes or outcomes that is not accounted for by the moderators included in the model is larger than one would expect based on sampling variability (and the given covariances among the sampling errors) alone. } \value{ An object of class \code{c("rma.mv","rma")}. The object is a list containing the following components: \item{beta}{estimated coefficients of the model.} \item{se}{standard errors of the coefficients.} \item{zval}{test statistics of the coefficients.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub}{upper bound of the confidence intervals for the coefficients.} \item{vb}{variance-covariance matrix of the estimated coefficients.} \item{sigma2}{estimated \mjseqn{\sigma^2} value(s).} \item{tau2}{estimated \mjseqn{\tau^2} value(s).} \item{rho}{estimated \mjseqn{\rho} value(s).} \item{gamma2}{estimated \mjseqn{\gamma^2} value(s).} \item{phi}{estimated \mjseqn{\phi} value(s).} \item{k}{number of observed effect sizes or outcomes included in the analysis.} \item{p}{number of coefficients in the model (including the intercept).} \item{m}{number of coefficients included in the omnibus test of moderators.} \item{QE}{test statistic of the test for (residual) heterogeneity.} \item{QEp}{corresponding p-value.} \item{QM}{test statistic of the omnibus test of moderators.} \item{QMp}{corresponding p-value.} \item{int.only}{logical that indicates whether the model is an intercept-only model.} \item{yi, V, X}{the vector of outcomes, the corresponding variance-covariance matrix of the sampling errors, and the model matrix.} \item{M}{the estimated marginal variance-covariance matrix of the observed effect sizes or outcomes.} \item{fit.stats}{a list with the log-likelihood, deviance, AIC, BIC, and AICc values.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.mv}} function. If fit statistics should also be given, use \code{\link{summary.rma}} (or use the \code{\link{fitstats.rma}} function to extract them). Full versus reduced model comparisons in terms of fit statistics and likelihoods can be obtained with \code{\link{anova.rma}}. Wald-type tests for sets of model coefficients or linear combinations thereof can be obtained with the same function. Tests and confidence intervals based on (cluster) robust methods can be obtained with \code{\link{robust.rma.mv}}. Predicted/fitted values can be obtained with \code{\link{predict.rma}} and \code{\link{fitted.rma}}. For best linear unbiased predictions, see \code{\link{ranef.rma.mv}}. The \code{\link{residuals.rma}}, \code{\link{rstandard.rma.mv}}, and \code{\link{rstudent.rma.mv}} functions extract raw and standardized residuals. See \code{\link{influence.rma.mv}} for additional case diagnostics (e.g., to determine influential studies). For models with moderators, variance inflation factors can be obtained with \code{\link{vif.rma}}. Confidence intervals for any variance/correlation parameters in the model can be obtained with \code{\link{confint.rma.mv}}. For random/mixed-effects models, the \code{\link{profile.rma.mv}} function can be used to obtain a plot of the (restricted) log-likelihood as a function of a specific variance component or correlation parameter of the model. For models with moderators, \code{\link{regplot.rma}} draws scatter plots / bubble plots, showing the (marginal) relationship between the observed outcomes and a selected moderator from the model. Other extractor functions include \code{\link{coef.rma}}, \code{\link{vcov.rma}}, \code{\link{logLik.rma}}, \code{\link{deviance.rma}}, \code{\link{AIC.rma}}, \code{\link{BIC.rma}}, \code{\link{hatvalues.rma.mv}}, and \code{\link{weights.rma.mv}}. } \note{ Argument \code{V} also accepts a list of variance-covariance matrices for the observed effect sizes or outcomes. From the list elements, the full (block diagonal) variance-covariance matrix is then automatically constructed. Model fitting is done via numerical optimization over the model parameters. By default, \code{\link{nlminb}} is used for the optimization. One can also chose a different optimizer via the \code{control} argument (e.g., \code{control=list(optimizer="optim")}). When using \code{\link{optim}}, one can set the particular method via the \code{optmethod} argument (e.g., \code{control=list(optimizer="optim", optmethod="BFGS")}). Besides \code{\link{nlminb}} and \code{\link{optim}}, one can also choose one of the optimizers from the \code{minqa} package (i.e., \code{\link[minqa]{uobyqa}}, \code{\link[minqa]{newuoa}}, or \code{\link[minqa]{bobyqa}}), one of the (derivative-free) algorithms from the \code{\link[nloptr]{nloptr}} package, the Newton-type algorithm implemented in \code{\link{nlm}}, the various algorithms implemented in the \code{dfoptim} package (\code{\link[dfoptim]{hjk}} for the Hooke-Jeeves, \code{\link[dfoptim]{nmk}} for the Nelder-Mead, and \code{\link[dfoptim]{mads}} for the Mesh Adaptive Direct Searches (MADS) algorithm), the quasi-Newton type optimizer \code{\link[ucminf]{ucminf}} from the package of the same name, or the parallelized version of the L-BFGS-B algorithm implemented in \code{\link[optimParallel]{optimParallel}} from the package of the same name. The optimizer name must be given as a character string (i.e., in quotes). Additional control parameters can be specified via the \code{control} argument (e.g., \code{control=list(iter.max=500, rel.tol=1e-8)}). For \code{\link[nloptr]{nloptr}}, the default is to use the BOBYQA implementation from that package with a relative convergence criterion of \code{1e-8} on the function value (i.e., log-likelihood), but this can be changed via the \code{algorithm} and \code{ftop_rel} arguments (e.g., \code{control=list(optimizer="nloptr", algorithm="NLOPT_LN_SBPLX", ftol_rel=1e-6)}). For \code{\link[optimParallel]{optimParallel}}, the control argument \code{ncpus} can be used to specify the number of cores to use for the parallelization (e.g., \code{control=list(optimizer="optimParallel", ncpus=2)}). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. At the moment, the starting values are not chosen in a terribly clever way and could be far off. As a result, the optimizer may be slow to converge or may even get stuck at a local maximum. One can set the starting values manually for the various variance components and correlations in the model via the \code{control} argument by specifying the vectors \code{sigma2.init}, \code{tau2.init}, \code{rho.init}, \code{gamma2.init}, and/or \code{phi.init} as needed. Especially for complex models, it is a good idea to try out different starting values to make sure that the same estimates are obtained. Information on the progress of the optimization algorithm can be obtained by setting \code{verbose=TRUE} (this won't work when using parallelization). Since fitting complex models with many random effects can be computationally expensive, this option is useful to determine how the model fitting is progressing. One can also set \code{verbose} to an integer (\code{verbose=2} yields even more information and \code{verbose=3} also sets \code{option(warn=1)} temporarily). Whether particular variance components and/or correlations are actually identifiable needs to be carefully examined when fitting complex models. The function does some limited checking internally to fix variances and/or correlations at zero when it is clear that insufficient information is available to estimate a particular parameter (e.g., if a particular factor has only a single level, the corresponding variance component cannot be estimated). However, it is strongly advised in general to do post model fitting checks to make sure that the likelihood surface around the ML/REML estimates is not flat for some combination of the parameter estimates (which would imply that the estimates are essentially arbitrary). For example, one can plot the (restricted) log-likelihood as a function of each variance component and correlation in the model to make sure that each profile plot shows a clear peak at the corresponding ML/REML estimates. The \code{\link{profile.rma.mv}} function can be used for this purpose. Finally, note that the model fitting is not done in a very efficient manner at the moment, which is partly a result of allowing for crossed random effects and correlations across the entire dataset (e.g., when using the \code{R} argument). As a result, the function works directly with the entire \mjeqn{k \times k}{kxk} (marginal) variance-covariance matrix of the observed effect sizes or outcomes (instead of working with smaller blocks in a block diagonal structure). As a result, model fitting can be slow for large \mjseqn{k}. However, when the variance-covariance structure is actually sparse, a lot of speed can be gained by setting \code{sparse=TRUE}, in which case sparse matrix objects are used (via the \href{https://cran.r-project.org/package=Matrix}{Matrix} package). Also, when model fitting appears to be slow, setting \code{verbose=TRUE} is useful to obtain information on how the model fitting is progressing. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Berkey, C. S., Hoaglin, D. C., Antczak-Bouckoms, A., Mosteller, F., & Colditz, G. A. (1998). Meta-analysis of multiple outcomes by regression with random effects. \emph{Statistics in Medicine}, \bold{17}(22), 2537--2550. \verb{https://doi.org/10.1002/(sici)1097-0258(19981130)17:22<2537::aid-sim953>3.0.co;2-c} Gleser, L. J., & Olkin, I. (2009). Stochastically dependent effect sizes. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 357--376). New York: Russell Sage Foundation. van Houwelingen, H. C., Arends, L. R., & Stijnen, T. (2002). Advanced methods in meta-analysis: Multivariate approach and meta-regression. \emph{Statistics in Medicine}, \bold{21}(4), 589--624. \verb{https://doi.org/10.1002/sim.1040} Ishak, K. J., Platt, R. W., Joseph, L., Hanley, J. A., & Caro, J. J. (2007). Meta-analysis of longitudinal studies. \emph{Clinical Trials}, \bold{4}(5), 525--539. \verb{https://doi.org/10.1177/1740774507083567} Kalaian, H. A., & Raudenbush, S. W. (1996). A multivariate mixed linear model for meta-analysis. \emph{Psychological Methods}, \bold{1}(3), 227-235. \verb{https://doi.org/10.1037/1082-989X.1.3.227} Konstantopoulos, S. (2011). Fixed effects and variance components estimation in three-level meta-analysis. \emph{Research Synthesis Methods}, \bold{2}(1), 61--76. \verb{https://doi.org/10.1002/jrsm.35} Nakagawa, S., & Santos, E. S. A. (2012). Methodological issues and advances in biological meta-analysis. \emph{Evolutionary Ecology}, \bold{26}(5), 1253--1274. \verb{https://doi.org/10.1007/s10682-012-9555-5} Pinheiro, J. C., & Bates, D. (2000). \emph{Mixed-effects models in S and S-PLUS}. New York: Springer. Steiger, J. H. (1980). Tests for comparing elements of a correlation matrix. \emph{Psychological Bulletin}, \bold{87}(2), 245--251. \verb{https://doi.org/10.1037/0033-2909.87.2.245} Salanti, G., Higgins, J. P. T., Ades, A. E., & Ioannidis, J. P. A. (2008). Evaluation of networks of randomized trials. \emph{Statistical Methods in Medical Research}, \bold{17}(3), 279--301. \verb{https://doi.org/10.1177/0962280207080643} Trikalinos, T. A., & Olkin, I. (2012). Meta-analysis of effect sizes reported at multiple time points: A multivariate approach. \emph{Clinical Trials}, \bold{9}(5), 610--620. \verb{https://doi.org/10.1177/1740774512453218} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, and \code{\link{rma.glmm}} for other model fitting functions. \code{\link{dat.konstantopoulos2011}}, \code{\link{dat.hasselblad1998}}, \code{\link{dat.begg1989}}, \code{\link{dat.berkey1998}}, \code{\link{dat.fine1993}}, and \code{\link{dat.ishak2007}} for further examples of the use of the \code{rma.mv} function. } \examples{ ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model using rma.uni() rma(yi, vi, data=dat) ### fit random-effects model using rma.mv() ### note: sigma^2 in this model is the same as tau^2 from the previous model rma.mv(yi, vi, random = ~ 1 | trial, data=dat) ### change data into long format dat.long <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### set levels of group variable ("exp" = experimental/vaccinated; "con" = control/non-vaccinated) levels(dat.long$group) <- c("exp", "con") ### set "con" to reference level dat.long$group <- relevel(dat.long$group, ref="con") ### calculate log odds and corresponding sampling variances dat.long <- escalc(measure="PLO", xi=out1, mi=out2, data=dat.long) ### fit bivariate random-effects model using rma.mv() res <- rma.mv(yi, vi, mods = ~ group, random = ~ group | study, struct="UN", data=dat.long) res } \keyword{models} metafor/man/matreg.Rd0000644000176200001440000001777514055736403014260 0ustar liggesusers\name{matreg} \alias{matreg} \title{Fit Regression Models based on Correlation and Covariance Matrices} \description{ Function to fit regression models based on correlation and covariance matrices. \loadmathjax } \usage{ matreg(y, x, R, n, V, cov=FALSE, means, ztor=FALSE, nearPD=FALSE, level=95, digits) } \arguments{ \item{y}{index of the outcome variable.} \item{x}{indices of the predictor variable(s).} \item{R}{correlation or covariance matrix (or only the lower triangular part including the diagonal).} \item{n}{sample size based on which the elements in the correlation/covariance matrix were computed.} \item{V}{variance-covariance matrix of the lower triangular elements of the correlation matrix. Either \code{V} or \code{n} should be specified, not both. See \sQuote{Details}.} \item{cov}{logical to specify whether \code{R} is a covariance matrix (the default is \code{FALSE}).} \item{means}{optional vector to specify the means of the variables (only relevant when \code{cov=TRUE}).} \item{ztor}{logical to specify whether \code{R} is a matrix of r-to-z transformed correlations and it should be back-transformed to raw correlations (the default is \code{FALSE}). See \sQuote{Details}.} \item{nearPD}{logical to specify whether the \code{\link[Matrix]{nearPD}} function from the \href{https://cran.r-project.org/package=Matrix}{Matrix} package should be used when the \mjeqn{R_{x,x}}{R[x,x]} matrix cannot be inverted. See \sQuote{Note}.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} } \details{ Let \mjseqn{R} be a \mjeqn{p \times p}{pxp} correlation or covariance matrix. Let \mjseqn{y} denote the row/column of the outcome variable and \mjseqn{x} the row(s)/column(s) of the predictor variable(s) in this matrix. Let \mjeqn{R_{x,x}}{R[x,x]} and \mjeqn{R_{x,y}}{R[x,y]} denote the corresponding submatrices of \mjseqn{R}. Then \mjdeqn{b = R_{x,x}^{-1} R_{x,y}}{b = R[x,x]^(-1) R[x,y]} yields the standardized or raw regression coefficients (depending on whether \mjseqn{R} is a correlation or covariance matrix) when regressing the outcome variable on the predictor variable(s). The \mjseqn{R} matrix may be computed based on a single sample of \mjseqn{n} subjects. In this case, one should specify the sample size via argument \code{n}. The variance-covariance matrix of the standardized regression coefficients is then given by \mjeqn{\mbox{Var}[b] = \mbox{MSE} \times R_{x,x}^{-1}}{Var[b] = MSE * R[x,x]^(-1)}, where \mjeqn{\mbox{MSE} = (1 - b'R_{x,y}) / (n - m)}{MSE = (1 - b'R[x,y]) / (n -m)}, where \mjseqn{m} denotes the number of predictor variables. The standard errors of the regression coefficients are then given by the square root of the diagonal elements of \mjeqn{\mbox{Var}[b]}{Var[b]}. Test statistics (in this case, t-statistics) and the corresponding p-values can then be computed as in a regular regression analysis. When \mjseqn{R} is a covariance matrix, one should set \code{cov=TRUE} and specify the means of the \mjseqn{p} variables via argument \code{means} to obtain raw regression coefficients including the intercept and corresponding standard errors. Alternatively, \mjseqn{R} may be the result of a meta-analysis of correlation coefficients. In this case, the elements in \mjseqn{R} are pooled correlation coefficients and the variance-covariance matrix of these pooled coefficients should be specified via argument \code{V}. The order of elements in \code{V} should correspond to the order of elements in the lower triangular part of \mjseqn{R} column-wise. For example, if \mjseqn{R} is a \mjeqn{4 \times 4}{4x4} matrix, then the elements are \mjseqn{r_{2,1}}, \mjseqn{r_{3,1}}, \mjseqn{r_{4,1}}, \mjseqn{r_{3,2}}, \mjseqn{r_{4,2}}, and \mjseqn{r_{4,3}} and hence \code{V} should be a \mjeqn{6 \times 6}{6x6} variance-covariance matrix of these elements. The variance-covariance matrix of the standardized regression coefficients (i.e., \mjeqn{\mbox{Var}[b]}{Var[b]}) is then computed as a function of \code{V} as described in Becker (1992) using the multivariate delta method. The standard errors of the standardized regression coefficients are then given by the square root of the diagonal elements of \mjeqn{\mbox{Var}[b]}{Var[b]}. Test statistics (in this case, z-statistics) and the corresponding p-values can then be computed in the usual manner. In case \mjseqn{R} is the result of a meta-analysis of Fisher r-to-z transformed correlation coefficients (and hence \code{V} is then the corresponding variance-covariance matrix of these pooled transformed coefficients), one should set argument \code{ztor=TRUE}, so that the appropriate back-transformation is then applied to \code{R} (and \code{V}) within the function. } \value{ An object of class \code{"matreg"}. The object is a list containing the following components: \item{tab}{a data frame with the estimated model coefficients, standard errors, test statistics, degrees of freedom (only for t-tests), p-values, and lower/upper confidence interval bounds.} \item{vb}{the variance-covariance matrix of the estimated model coefficients.} \item{\dots}{some additional elements/values.} The results are formatted and printed with the \code{print.matreg} function. } \note{ Only the lower triangular part of \code{R} (and \code{V} if it is specified) is used in the computations. If \mjeqn{R_{x,x}}{R[x,x]} is not invertible, an error will be issued. In this case, one can set argument \code{nearPD=TRUE}, in which case the \code{\link[Matrix]{nearPD}} function from the \href{https://cran.r-project.org/package=Matrix}{Matrix} package will be used to find the nearest positive semi-definite matrix, which should be invertible. The results should be treated with caution when this is done. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Becker, B. J. (1992). Using results from replicated studies to estimate linear models. \emph{Journal of Educational Statistics}, \bold{17}(4), 341--362. \verb{https://doi.org/10.3102/10769986017004341} Becker, B. J. (1995). Corrections to "Using results from replicated studies to estimate linear models". \emph{Journal of Educational and Behavioral Statistics}, \bold{20}(1), 100--102. \verb{https://doi.org/10.3102/10769986020001100} } \seealso{ \code{\link{rcalc}}, \code{\link{rma.mv}} } \examples{ ### copy data into 'dat' dat <- dat.craft2003 ### construct dataset and var-cov matrix of the correlations tmp <- rcalc(ri ~ var1 + var2 | study, ni=ni, data=dat) V <- tmp$V dat <- tmp$dat ### turn var1.var2 into a factor with the desired order of levels dat$var1.var2 <- factor(dat$var1.var2, levels=c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf")) ### multivariate random-effects model res <- rma.mv(yi, V, mods = ~ var1.var2 - 1, random = ~ var1.var2 | study, struct="UN", data=dat) res ### restructure estimated mean correlations into a 4x4 matrix R <- matrix(NA, nrow=4, ncol=4) R[lower.tri(R)] <- coef(res) rownames(R) <- colnames(R) <- c("perf", "acog", "asom", "conf") round(R, digits=3) ### check that order in vcov(res) corresponds to order in R round(vcov(res), digits=4) ### fit regression model with 'perf' as outcome and 'acog', 'asom', and 'conf' as predictors fit <- matreg(1, 2:4, R=R, V=vcov(res)) fit \dontrun{ ### repeat the above but with r-to-z transformed correlations dat <- dat.craft2003 tmp <- rcalc(ri ~ var1 + var2 | study, ni=ni, data=dat, rtoz=TRUE) V <- tmp$V dat <- tmp$dat dat$var1.var2 <- factor(dat$var1.var2, levels=c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf")) res <- rma.mv(yi, V, mods = ~ var1.var2 - 1, random = ~ var1.var2 | study, struct="UN", data=dat) R <- matrix(NA, nrow=4, ncol=4) R[lower.tri(R)] <- coef(res) rownames(R) <- colnames(R) <- c("perf", "acog", "asom", "conf") fit <- matreg(1, 2:4, R=R, V=vcov(res), ztor=TRUE) fit} } \keyword{models} metafor/man/profile.rma.Rd0000644000176200001440000003050414055736403015200 0ustar liggesusers\name{profile.rma} \alias{profile} \alias{profile.rma.uni} \alias{profile.rma.mv} \alias{profile.rma.uni.selmodel} \alias{profile.rma.ls} \alias{print.profile.rma} \alias{plot.profile.rma} \title{Profile Plots for 'rma' Objects} \description{ Function to profile the (restricted) log-likelihood for objects of class \code{"rma.uni"}, \code{"rma.mv"}, \code{"rma.uni.selmodel"}, and \code{"rma.ls"}. \loadmathjax } \usage{ \method{profile}{rma.uni}(fitted, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, \dots) \method{profile}{rma.mv}(fitted, sigma2, tau2, rho, gamma2, phi, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, \dots) \method{profile}{rma.uni.selmodel}(fitted, tau2, delta, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, \dots) \method{profile}{rma.ls}(fitted, alpha, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, \dots) \method{print}{profile.rma}(x, \dots) \method{plot}{profile.rma}(x, xlim, ylim, pch=19, xlab, ylab, main, cline=FALSE, \dots) } \arguments{ \item{fitted}{an object of class \code{"rma.uni"}, \code{"rma.mv"}, \code{"rma.uni.selmodel"}, or \code{"rma.ls"}.} \item{x}{an object of class \code{"profile.rma"} (for \code{plot} and \code{print}).} \item{sigma2}{optional integer to specify for which \mjseqn{\sigma^2} value the likelihood should be profiled.} \item{tau2}{optional integer to specify for which \mjseqn{\tau^2} value the likelihood should be profiled.} \item{rho}{optional integer to specify for which \mjseqn{\rho} value the likelihood should be profiled.} \item{gamma2}{optional integer to specify for which \mjseqn{\gamma^2} value the likelihood should be profiled.} \item{phi}{optional integer to specify for which \mjseqn{\phi} value the likelihood should be profiled.} \item{delta}{optional integer to specify for which \mjseqn{\delta} value the likelihood should be profiled.} \item{alpha}{optional integer to specify for which \mjseqn{\alpha} value the likelihood should be profiled.} \item{xlim}{optional vector to specify the lower and upper limit of the parameter over which the profiling should be done. If unspecified, the function tries to set these limits automatically.} \item{ylim}{optional vector to specify the y-axis limits when plotting the profiled likelihood. If unspecified, the function tries to set these limits automatically.} \item{steps}{number of points between \code{xlim[1]} and \code{xlim[2]} (inclusive) for which the likelihood should be evaluated (the default is 20).} \item{lltol}{numerical tolerance used when comparing values of the profiled log-likelihood with the log-likelihood of the fitted model (the default is 1e-03).} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{TRUE}).} \item{parallel}{character string to specify whether parallel processing should be used (the default is \code{"no"}). For parallel processing, set to either \code{"snow"} or \code{"multicore"}. See \sQuote{Details}.} \item{ncpus}{integer to specify the number of processes to use in the parallel processing.} \item{cl}{optional cluster to use if \code{parallel="snow"}. If not supplied, a cluster on the local machine is created for the duration of the call.} \item{plot}{logical to specify whether the profile plot should be drawn after profiling is finished (the default is \code{TRUE}).} \item{pch}{plotting symbol to use. By default, a filled circle is used. See \code{\link{points}} for other options.} \item{cline}{logical to specify whether a horizontal reference line should be added to the plot that indicates the log-likelihood value corresponding to the 95\% profile confidence interval (the default is \code{FALSE}).} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{main}{title for the plot. If unspecified, the function tries to set an appropriate title.} \item{\dots}{other arguments.} } \details{ The function fixes a particular parameter of the model and then computes the maximized (restricted) log-likelihood over the remaining parameters of the model. By doing this for a range of values for the parameter that was fixed, a profile of the (restricted) log-likelihood is constructed. The parameter(s) that can be profiled depend on the model object: \itemize{ \item For objects of class \code{"rma.uni"} obtained with the \code{\link{rma.uni}} function, the function profiles over parameter \mjseqn{\tau^2} (not for fixed-effects models). \item For objects of class \code{"rma.mv"} obtained with the \code{\link{rma.mv}} function, profiling is done by default over all (non-fixed) variance and correlation components of the model. Alternatively, one can use the \code{sigma2}, \code{tau2}, \code{rho}, \code{gamma2}, or \code{phi} arguments to specify over which parameter the profiling should be done. Only one of these arguments can be used at a time. A single integer is used to specify the number of the parameter. \item For selection model objects of class \code{"rma.uni.selmodel"} obtained with the \code{\link{selmodel.rma.uni}} function, profiling is done by default over \mjseqn{\tau^2} (for models where this is an estimated parameter) and all (non-fixed) selection model parameters. Alternatively, one can choose to profile only \mjseqn{\tau^2} by setting \code{tau2=TRUE} or one can select one of the selection model parameters to profile by specifying its number via the \code{delta} argument. \item For location-scale model objects of class \code{"rma.ls"} obtained with the \code{\link{rma.uni}} function, profiling is done by default over all (non-fixed) \mjseqn{\alpha} parameters that are part of the scale model. } A profile plot should show a single peak at the corresponding ML/REML estimate (assuming that the model was fitted with ML/REML estimation). The value of the parameter estimate is indicated by a dotted vertical line and its log-likelihood value by a dotted horizontal line. Hence, the intersection of these two lines should correspond to the peak. When profiling a variance component (or some other parameter that cannot be negative), the peak may be at zero (if this corresponds to the ML/REML estimate of the parameter). In this case, the profiled log-likelihood should be a monotonically decreasing function of the parameter. If the profiled log-likelihood has multiple peaks, this indicates that the likelihood surface is not unimodal. In such cases, the ML/REML estimate may correspond to a local optimum (when the intersection of the two dotted lines is not at the highest peak). If the profile is flat (over the entire parameter space or large portions of it), then this suggests that at least some of the parameters of the model are not identifiable (and the parameter estimates obtained are to some extent arbitrary). See Raue et al. (2009) for some further discussion of parameter identifiability (structurally and practically) and the use of profile likelihoods to check for this. The function checks whether any profiled log-likelihood value is actually larger than the log-likelihood of the fitted model (using a numerical tolerance of \code{lltol}). If so, a warning is issued as this might indicate that the optimizer did not identify the actual ML/REML estimate. Profiling requires repeatedly refitting the model, which can be slow when \mjseqn{k} is large and/or the model is complex (the latter especially applies to \code{"rma.mv"} objects and also to certain \code{"rma.uni.selmodel"} or \code{"rma.ls"} objects). On machines with multiple cores, one can usually speed things up by delegating the model fitting to separate worker processes, that is, by setting \code{parallel="snow"} or \code{parallel="multicore"} and \code{ncpus} to some value larger than 1. Parallel processing makes use of the \code{\link[parallel]{parallel}} package, using the \code{\link[parallel]{makePSOCKcluster}} and \code{\link[parallel]{parLapply}} functions when \code{parallel="snow"} or using \code{\link[parallel]{mclapply}} when \code{parallel="multicore"} (the latter only works on Unix/Linux-alikes). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. } \value{ An object of class \code{"profile.rma"}. The object is a list (or list of lists) containing the following components: \item{sigma2}{values of \mjseqn{\sigma^2} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\sigma^2}).} \item{tau2}{values of \mjseqn{\tau^2} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\tau^2}).} \item{rho}{values of \mjseqn{\rho} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\rho}).} \item{gamma2}{values of \mjseqn{\gamma^2} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\gamma^2}).} \item{phi}{values of \mjseqn{\phi} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\phi}).} \item{delta}{values of \mjseqn{\delta} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\delta}).} \item{alpha}{values of \mjseqn{\alpha} over which the likelihood was profiled (only when profiling was actually done over \mjseqn{\alpha}).} \item{ll}{(restricted) log-likelihood at the corresponding parameter value.} \item{beta}{a matrix with the estimated model coefficients at the corresponding parameter value.} \item{ci.lb}{a matrix with the lower confidence interval bounds for the model coefficients at the corresponding parameter value.} \item{ci.ub}{a matrix with the upper confidence interval bounds for the model coefficients at the corresponding parameter value.} \item{\dots}{some additional elements/values.} Note that the list is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Raue, A., Kreutz, C., Maiwald, T., Bachmann, J., Schilling, M., Klingmuller, U., & Timmer, J. (2009). Structural and practical identifiability analysis of partially observed dynamical models by exploiting the profile likelihood. \emph{Bioinformatics}, \bold{25}(15), 1923--1929. \verb{https://doi.org/10.1093/bioinformatics/btp358} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mv}}, \code{\link{selmodel.rma.uni}}, \code{\link{confint.rma.uni}}, \code{\link{confint.rma.mv}}, \code{\link{confint.rma.uni.selmodel}} } \examples{ ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model using rma.uni() res <- rma(yi, vi, data=dat) ### profile over tau^2 profile(res, progbar=FALSE) ### change data into long format dat.long <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### set levels of group variable ("exp" = experimental/vaccinated; "con" = control/non-vaccinated) levels(dat.long$group) <- c("exp", "con") ### set "con" to reference level dat.long$group <- relevel(dat.long$group, ref="con") ### calculate log odds and corresponding sampling variances dat.long <- escalc(measure="PLO", xi=out1, mi=out2, data=dat.long) ### fit bivariate random-effects model using rma.mv() res <- rma.mv(yi, vi, mods = ~ group, random = ~ group | study, struct="UN", data=dat.long) res ### profile over tau^2_1, tau^2_2, and rho ### note: for rho, adjust region over which profiling is done ('zoom in' on area around estimate) \dontrun{ par(mfrow=c(3,1)) profile(res, tau2=1) profile(res, tau2=2) profile(res, rho=1, xlim=c(.90, .98))} ### an example where the peak is at 0 dat <- escalc(measure="RD", n1i=n1i, n2i=n2i, ai=ai, ci=ci, data=dat.hine1989) res <- rma(yi, vi, data=dat) par(mfrow=c(1,1)) profile(res, progbar=FALSE) } \keyword{hplot} metafor/man/dat.nielweise2007.Rd0000644000176200001440000000545514055736403016035 0ustar liggesusers\name{dat.nielweise2007} \docType{data} \alias{dat.nielweise2007} \title{Studies on Anti-Infective-Treated Central Venous Catheters for Prevention of Catheter-Related Bloodstream Infections} \description{Results from 18 studies comparing the risk of catheter-related bloodstream infection when using anti-infective-treated versus standard catheters in the acute care setting.} \usage{dat.nielweise2007} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{author} \tab \code{character} \tab (first) author \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{ai} \tab \code{numeric} \tab number of CRBSIs in patients receiving an anti-infective catheter \cr \bold{n1i} \tab \code{numeric} \tab number of patients receiving an anti-infective catheter \cr \bold{ci} \tab \code{numeric} \tab number of CRBSIs in patients receiving a standard catheter \cr \bold{n2i} \tab \code{numeric} \tab number of patients receiving a standard catheter } } \details{ The use of a central venous catheter may lead to a catheter-related bloodstream infection (CRBSI), which in turn increases the risk of morbidity and mortality. Anti-infective-treated catheters have been developed that are meant to reduce the risk of CRBSIs. Niel-Weise et al. (2007) conducted a meta-analysis of studies comparing infection risk when using anti-infective-treated versus standard catheters in the acute care setting. The results from 18 such studies are included in this dataset. The dataset was used in the article by Stijnen et al. (2010) to illustrate various generalized linear mixed-effects models for the meta-analysis of proportions and odds ratios (see \sQuote{References}). } \source{ Niel-Weise, B. S., Stijnen, T., & van den Broek, P. J. (2007). Anti-infective-treated central venous catheters: A systematic review of randomized controlled trials. \emph{Intensive Care Medicine}, \bold{33}(12), 2058--2068. \verb{https://doi.org/10.1007/s00134-007-0897-3} } \references{ Stijnen, T., Hamza, T. H., & Ozdemir, P. (2010). Random effects meta-analysis of event outcome in the framework of the generalized linear mixed model with applications in sparse data. \emph{Statistics in Medicine}, \bold{29}(29), 3046--3067. \verb{https://doi.org/10.1002/sim.4040} } \examples{ ### copy data into 'dat' and examine data dat <- dat.nielweise2007 dat ### standard (inverse-variance) random-effects model res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, drop00=TRUE) print(res, digits=3) predict(res, transf=exp, digits=2) ### random-effects conditional logistic model \dontrun{ res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="CM.EL") print(res, digits=3) predict(res, transf=exp, digits=2)} } \keyword{datasets} metafor/man/llplot.Rd0000644000176200001440000001376714055736403014304 0ustar liggesusers\name{llplot} \alias{llplot} \title{Likelihood Plot of a Parameter Corresponding to an Effect Size or Outcome Measure} \description{ Function to plot the likelihood of a certain parameter corresponding to an effect size or outcome measure given the study data. \loadmathjax } \usage{ llplot(measure, yi, vi, sei, ai, bi, ci, di, n1i, n2i, data, subset, drop00=TRUE, xvals=1000, xlim, ylim, xlab, ylab, scale=TRUE, lty, lwd, col, level=99.99, refline=0, \dots) } \arguments{ \item{measure}{a character string to specify for which effect size or outcome measure the likelihoods should be calculated. See \sQuote{Details} for possible options and how the data should then be specified.} \item{yi}{vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances.} \item{sei}{vector to specify the corresponding standard.} \item{ai}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper left cell).} \item{bi}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (upper right cell).} \item{ci}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower left cell).} \item{di}{vector to specify the \mjeqn{2 \times 2}{2x2} table frequencies (lower right cell).} \item{n1i}{vector to specify the group sizes or row totals (first group/row).} \item{n2i}{vector to specify the group sizes or row totals (second group/row).} \item{data}{optional data frame containing the variables given to the arguments above.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the plot.} \item{drop00}{logical to specify whether studies with no cases (or only cases) in both groups should be dropped. See \sQuote{Details}.} \item{xvals}{integer to specify for how many distinct values the likelihood should be evaluated.} \item{xlim}{x-axis limits. If unspecified, the function tries to set the x-axis limits to some sensible values.} \item{ylim}{y-axis limits. If unspecified, the function tries to set the y-axis limits to some sensible values.} \item{xlab}{title for the x-axis. If unspecified, the function tries to set an appropriate axis title.} \item{ylab}{title for the y-axis. If unspecified, the function tries to set an appropriate axis title.} \item{scale}{logical to specify whether the likelihood values should be scaled, so that the total area under each curve is (approximately) equal to 1.} \item{lty}{the line types (either a single value or a vector of length \mjseqn{k}). If unspecified, the function sets the line types according to some characteristics of the likelihood function. See \sQuote{Details}.} \item{lwd}{the line widths (either a single value or a vector of length \mjseqn{k}). If unspecified, the function sets the widths according to the sampling variances (so that the line is thicker for more precise studies and vice-versa).} \item{col}{the line colors (either a single value or a vector of length \mjseqn{k}). If unspecified, the function uses various shades of gray according to the sampling variances (so that darker shades are used for more precise studies and vice-versa).} \item{level}{numeric value between 0 and 100 to specify the plotting limits for each likelihood line in terms of the confidence interval (the default is 99.99).} \item{refline}{numeric value to specify the location of the vertical \sQuote{reference} line (the default is 0). The line can be suppressed by setting this argument to \code{NA}.} \item{\dots}{other arguments.} } \details{ At the moment, the function only accepts \code{measure="GEN"} or \code{measure="OR"}. For \code{measure="GEN"}, one must specify arguments \code{yi} for the observed effect sizes or outcomes and \code{vi} for the corresponding sampling variances (instead of specifying \code{vi}, one can specify the standard errors via the \code{sei} argument). The function then plots the likelihood of the true effect size or outcome based on a normal sampling distribution with observed outcome as given by \code{yi} and variance as given by \code{vi} for each study. For \code{measure="OR"}, one must specify arguments \code{ai}, \code{bi}, \code{ci}, and \code{di}, which denote the cell frequencies of the \mjeqn{2 \times 2}{2x2} tables. Alternatively, one can specify \code{ai}, \code{ci}, \code{n1i}, and \code{n2i}. See \code{\link{escalc}} function for more details. The function then plots the likelihood of the true log odds ratio based on the non-central hypergeometric distribution for each \mjeqn{2 \times 2}{2x2} table. Since studies with no cases (or only cases) in both groups have a flat likelihood and are not informative about the odds ratio, they are dropped by default (i.e., \code{drop00=TRUE}) and are hence not drawn (if \code{drop00=FALSE}, these likelihood are indicated by dotted lines). For studies that have a single zero count, the MLE of the odds ratio is infinite and these likelihoods are indicated by dashed lines. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ van Houwelingen, H. C., Zwinderman, K. H., & Stijnen, T. (1993). A bivariate approach to meta-analysis. \emph{Statistics in Medicine}, \bold{12}(24), 2273--2284. \verb{https://doi.org/10.1002/sim.4780122405} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.glmm}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### draw likelihoods llplot(measure="GEN", yi=yi, vi=vi, data=dat, lwd=1, refline=NA, xlim=c(-3,2)) ### create plot (Figure 2 in van Houwelingen, Zwinderman, & Stijnen, 1993) llplot(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat.collins1985a, lwd=1, refline=NA, xlim=c(-4,4), drop00=FALSE) } \keyword{hplot} metafor/man/qqnorm.rma.Rd0000644000176200001440000001505714055736403015063 0ustar liggesusers\name{qqnorm.rma} \alias{qqnorm} \alias{qqnorm.rma.uni} \alias{qqnorm.rma.mh} \alias{qqnorm.rma.peto} \alias{qqnorm.rma.glmm} \alias{qqnorm.rma.mv} \title{Normal QQ Plots for 'rma' Objects} \description{ Function to create normal QQ plots for objects of class \code{"rma.uni"}, \code{"rma.mh"}, and \code{"rma.peto"}. \loadmathjax } \usage{ \method{qqnorm}{rma.uni}(y, type="rstandard", pch=19, envelope=TRUE, level=y$level, bonferroni=FALSE, reps=1000, smooth=TRUE, bass=0, label=FALSE, offset=0.3, pos=13, lty, \dots) \method{qqnorm}{rma.mh}(y, type="rstandard", pch=19, label=FALSE, offset=0.3, pos=13, \dots) \method{qqnorm}{rma.peto}(y, type="rstandard", pch=19, label=FALSE, offset=0.3, pos=13, \dots) \method{qqnorm}{rma.glmm}(y, \dots) \method{qqnorm}{rma.mv}(y, \dots) } \arguments{ \item{y}{an object of class \code{"rma.uni"}, \code{"rma.mh"}, or \code{"rma.peto"}. The method is not yet implemented for objects of class \code{"rma.glmm"} or \code{"rma.mv"}.} \item{type}{character string (either \code{"rstandard"} (default) or \code{"rstudent"}) to specify whether standardized residuals or studentized deleted residuals should be used in creating the plot. See \sQuote{Details}.} \item{pch}{plotting symbol to use for the observed outcomes. By default, a filled circle is used. See \code{\link{points}} for other options.} \item{envelope}{logical to specify whether a pseudo confidence envelope should be simulated and added to the plot (the default is \code{TRUE})). Only for objects of class \code{"rma.uni"}. See \sQuote{Details}.} \item{level}{numeric value between 0 and 100 to specify the level of the pseudo confidence envelope (the default is to take the value from the object).} \item{bonferroni}{logical to specify whether the bounds of the envelope should be Bonferroni corrected.} \item{reps}{numeric value to specify the number of iterations to use for simulating the pseudo confidence envelope (the default is 1000).} \item{smooth}{logical to specify whether the results from the simulation should be smoothed (the default is \code{TRUE}).} \item{bass}{numeric value that controls the degree of smoothing (the default is 0).} \item{label}{argument to control the labeling of the points (the default is \code{FALSE}). See \sQuote{Details}.} \item{offset}{argument to control the distance between the points and the corresponding labels.} \item{pos}{argument to control the position of the labels.} \item{lty}{optional character string to specify the line type for the diagonal line and the pseudo confidence envelope. If unspecified, the function sets this to \code{c("solid", "dotted")} by default.} \item{\dots}{other arguments.} } \details{ The plot shows the theoretical quantiles of a normal distribution on the horizontal axis against the observed quantiles for either the standardized residuals (\code{type="rstandard"}, the default) or the externally standardized residuals (\code{type="rstudent"}) on the vertical axis (see \code{\link{residuals.rma}} for details on the definition of these residual types). For reference, a line is added to the plot with slope of 1, going through the (0,0) point. For objects of class \code{"rma.uni"}, it is also possible to add a pseudo confidence envelope to the plot. The envelope is created based on the quantiles of sets of pseudo residuals simulated from the given model (for details, see Cook & Weisberg, 1982). The number of sets simulated can be controlled with the \code{reps} argument. When \code{smooth=TRUE}, the simulated bounds are smoothed with Friedman's SuperSmoother (see \code{\link{supsmu}}). The \code{bass} argument can be set to a number between 0 and 10, with higher numbers indicating increasing smoothness. If \code{bonferroni=TRUE}, the envelope bounds are Bonferroni corrected, so that the envelope can be regarded as a confidence region for all \mjseqn{k} residuals simultaneously. The default however is \code{bonferroni=FALSE}, which makes the plot more sensitive to deviations from normality. With the \code{label} argument, one can control whether points in the plot will be labeled (e.g., to identify outliers). If \code{label="all"} (or \code{label=TRUE}), all points in the plot will be labeled. If \code{label="out"}, points falling outside of the confidence envelope will be labeled (only available for objects of class \code{"rma.uni"}). Finally, one can also set this argument to a numeric value (between 1 and \mjseqn{k}), indicating how many of the most extreme points should be labeled (for example, with \code{label=1} only the most extreme point would be labeled, while with \code{label=3}, the most extreme, and the second and third most extreme points would be labeled). With the \code{offset} argument, one can adjust the distance between the labels and the corresponding points. The \code{pos} argument is the position specifier for the labels (\code{1}, \code{2}, \code{3}, and \code{4}, respectively indicate positions below, to the left of, above, and to the right of the points; \code{13} places the labels below the points for points that fall below the reference line and above otherwise; \code{24} places the labels to the left of the points for points that fall above the reference line and to the right otherwise). } \value{ A list with components: \item{x}{the x-axis coordinates of the points that were plotted.} \item{y}{the y-axis coordinates of the points that were plotted.} Note that the list is returned invisibly. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Cook, R. D., & Weisberg, S. (1982). \emph{Residuals and influence in regression}. London: Chapman and Hall. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Wang, M. C., & Bushman, B. J. (1998). Using the normal quantile plot to explore meta-analytic data sets. \emph{Psychological Methods}, \bold{3}(1), 46--54. \verb{https://doi.org/10.1037/1082-989X.3.1.46} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### draw QQ plot qqnorm(res) ### fit mixed-effects model with absolute latitude as moderator res <- rma(yi, vi, mods = ~ ablat, data=dat) ### draw QQ plot qqnorm(res) } \keyword{hplot} metafor/man/rma.uni.Rd0000644000176200001440000012131114055736403014330 0ustar liggesusers\name{rma.uni} \alias{rma.uni} \alias{rma} \title{Meta-Analysis via Linear (Mixed-Effects) Models} \description{ Function to fit the meta-analytic fixed- and random/mixed-effects models with or without moderators via linear (mixed-effects) models. See the documentation of the \pkg{\link{metafor-package}} for more details on these models. \loadmathjax } \usage{ rma.uni(yi, vi, sei, weights, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, r2i, ni, mods, scale, measure="GEN", intercept=TRUE, data, slab, subset, add=1/2, to="only0", drop00=FALSE, vtype="LS", method="REML", weighted=TRUE, test="z", level=95, digits, btt, att, tau2, verbose=FALSE, control, \dots) rma(yi, vi, sei, weights, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, r2i, ni, mods, scale, measure="GEN", intercept=TRUE, data, slab, subset, add=1/2, to="only0", drop00=FALSE, vtype="LS", method="REML", weighted=TRUE, test="z", level=95, digits, btt, att, tau2, verbose=FALSE, control, \dots) } \arguments{ \item{yi}{vector of length \mjseqn{k} with the observed effect sizes or outcomes. See \sQuote{Details}.} \item{vi}{vector of length \mjseqn{k} with the corresponding sampling variances. See \sQuote{Details}.} \item{sei}{vector of length \mjseqn{k} with the corresponding standard errors (only relevant when not using \code{vi}). See \sQuote{Details}.} \item{weights}{optional argument to specify a vector of length \mjseqn{k} with user-defined weights. See \sQuote{Details}.} \item{ai}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{bi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ci}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{di}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{n1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{n2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{x1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{x2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{t1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{t2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{m1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{m2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{sd1i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{sd2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{xi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{mi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ri}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ti}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{sdi}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{r2i}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{ni}{see below and the documentation of the \code{\link{escalc}} function for more details.} \item{mods}{optional argument to include one or more moderators in the model. A single moderator can be given as a vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving a matrix with \mjseqn{k} rows and as many columns as there are moderator variables. Alternatively, a model \code{\link{formula}} can be used to specify the model. See \sQuote{Details}.} \item{scale}{optional argument to include one or more predictors for the scale part in a location-scale model. See \sQuote{Details}.} \item{measure}{character string to specify the type of data supplied to the function. When \code{measure="GEN"} (default), the observed effect sizes or outcomes and corresponding sampling variances (or standard errors) should be supplied to the function via the \code{yi}, \code{vi}, and \code{sei} arguments (only one of the two, \code{vi} or \code{sei}, needs to be specified). Alternatively, one can set \code{measure} to one of the effect size or outcome measures described under the documentation for the \code{\link{escalc}} function in which case one must specify the required data via the appropriate arguments.} \item{intercept}{logical to specify whether an intercept should be added to the model (the default is \code{TRUE}). Ignored when \code{mods} is a formula.} \item{data}{optional data frame containing the data supplied to the function.} \item{slab}{optional vector with labels for the \mjseqn{k} studies.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be used for the analysis.} \item{add}{see the documentation of the \code{\link{escalc}} function.} \item{to}{see the documentation of the \code{\link{escalc}} function.} \item{drop00}{see the documentation of the \code{\link{escalc}} function.} \item{vtype}{see the documentation of the \code{\link{escalc}} function.} \item{method}{character string to specify whether a fixed- or a random/mixed-effects model should be fitted. A fixed-effects model (with or without moderators) is fitted when using \code{method="FE"}. Random/mixed-effects models are fitted by setting \code{method} equal to one of the following: \code{"DL"}, \code{"HE"}, \code{"SJ"}, \code{"ML"}, \code{"REML"}, \code{"EB"}, \code{"HS"}, \code{"HSk"}, or \code{"GENQ"}. Default is \code{"REML"}. See \sQuote{Details}.} \item{weighted}{logical to specify whether weighted (default) or unweighted estimation should be used to fit the model.} \item{test}{character string to specify how test statistics and confidence intervals for the fixed effects should be computed. By default (\code{test="z"}), Wald-type tests and CIs are obtained, which are based on a standard normal distribution. When \code{test="t"}, a t-distribution is used instead. When \code{test="knha"}, the method by Knapp and Hartung (2003) is used. See \sQuote{Details}.} \item{level}{numeric value between 0 and 100 to specify the confidence interval level (the default is 95).} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{btt}{optional vector of indices to specify which coefficients to include in the omnibus test of moderators. Can also be a string to grep for. See \sQuote{Details}.} \item{att}{optional vector of indices to specify which scale coefficients to include in the omnibus test. Only relevant for location-scale models. See \sQuote{Details}.} \item{tau2}{optional numeric value to specify the amount of (residual) heterogeneity in a random- or mixed-effects model (instead of estimating it). Useful for sensitivity analyses (e.g., for plotting results as a function of \mjseqn{\tau^2}). When unspecified, the value of \mjseqn{\tau^2} is estimated from the data.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}). Can also be an integer. Values > 1 generate more verbose output. See \sQuote{Note}.} \item{control}{optional list of control values for the iterative estimation algorithms. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{additional arguments.} } \details{ \bold{Specifying the Data} The function can be used in conjunction with any of the usual effect size or outcome measures used in meta-analyses (e.g., log risk ratios, log odds ratios, risk differences, mean differences, standardized mean differences, log transformed ratios of means, raw correlation coefficients, correlation coefficients transformed with Fisher's r-to-z transformation, and so on). Simply specify the observed effect sizes or outcomes via the \code{yi} argument and the corresponding sampling variances via the \code{vi} argument. Instead of specifying \code{vi}, one can specify the standard errors (the square root of the sampling variances) via the \code{sei} argument. The \code{\link{escalc}} function can be used to compute a wide variety of effect size or outcome measures (and the corresponding sampling variances) based on summary statistics. Alternatively, the function can automatically calculate the values of a chosen effect size or outcome measure (and the corresponding sampling variances) when supplied with the necessary data. The \code{\link{escalc}} function describes which effect size or outcome measures are currently implemented and what data/arguments should then be specified/used. The \code{measure} argument should then be set to the desired effect size or outcome measure. \bold{Specifying the Model} The function can be used to fit fixed- and random/mixed-effects models, as well as meta-regression models including moderators (the difference between the various models is described in detail on the introductory \pkg{\link{metafor-package}} help page). Assuming the observed effect sizes or outcomes and corresponding sampling variances are supplied via \code{yi} and \code{vi}, a \emph{fixed-effects model} can be fitted with \code{rma(yi, vi, method="FE")}. Weighted estimation (with inverse-variance weights) is used by default. User-defined weights can be supplied via the \code{weights} argument. Unweighted estimation can be used by setting \code{weighted=FALSE} (which is the same as setting the weights equal to a constant). A \emph{random-effects model} can be fitted with the same code but setting the \code{method} argument to one of the various estimators for the amount of heterogeneity: \itemize{ \item \code{method="DL"} = DerSimonian-Laird estimator, \item \code{method="HE"} = Hedges estimator, \item \code{method="HS"} = Hunter-Schmidt estimator, \item \code{method="HSk"} = Hunter-Schmidt estimator with a small sample-size correction, \item \code{method="SJ"} = Sidik-Jonkman estimator, \item \code{method="ML"} = maximum-likelihood estimator, \item \code{method="REML"} = restricted maximum-likelihood estimator, \item \code{method="EB"} = empirical Bayes estimator, \item \code{method="PM"} = Paule-Mandel estimator, \item \code{method="GENQ"} = generalized Q-statistic estimator. } For a description of the various estimators, see Brannick et al. (2019), DerSimonian and Kacker (2007), Raudenbush (2009), Viechtbauer (2005), and Viechtbauer et al. (2015). Note that the Hedges estimator is also called the \sQuote{variance component estimator} or \sQuote{Cochran estimator}, the Sidik-Jonkman estimator is also called the \sQuote{model error variance estimator}, and the empirical Bayes estimator is actually identical to the Paule-Mandel estimator (Paule & Mandel, 1982). Finally, the generalized Q-statistic estimator is a general method-of-moments estimator (DerSimonian & Kacker, 2007) requiring the specification of weights (the HE and DL estimators are just special cases with equal and inverse variance weights, respectively). One or more moderators can be included in these models via the \code{mods} argument. A single moderator can be given as a (row or column) vector of length \mjseqn{k} specifying the values of the moderator. Multiple moderators are specified by giving an appropriate model matrix (i.e., \mjseqn{X}) with \mjseqn{k} rows and as many columns as there are moderator variables (e.g., \code{mods = cbind(mod1, mod2, mod3)}, where \code{mod1}, \code{mod2}, and \code{mod3} correspond to the names of the variables for three moderator variables). The intercept is added to the model matrix by default unless \code{intercept=FALSE}. Alternatively, one can use standard \code{\link{formula}} syntax to specify the model. In this case, the \code{mods} argument should be set equal to a one-sided formula of the form \code{mods = ~ model} (e.g., \code{mods = ~ mod1 + mod2 + mod3}). Interactions, polynomial terms, and factors can be easily added to the model in this manner. When specifying a model formula via the \code{mods} argument, the \code{intercept} argument is ignored. Instead, the inclusion/exclusion of the intercept is controlled by the specified formula (e.g., \code{mods = ~ mod1 + mod2 + mod3 - 1} would lead to the removal of the intercept). When the observed effect sizes or outcomes and corresponding sampling variances are supplied via the \code{yi} and \code{vi} (or \code{sei}) arguments, one can also specify moderators via the \code{yi} argument (e.g., \code{rma(yi ~ mod1 + mod2 + mod3, vi)}). In that case, the \code{mods} argument is ignored and the inclusion/exclusion of the intercept again is controlled by the specified formula. \bold{Omnibus Test of Moderators} For models including moderators, an omnibus test of all model coefficients is conducted that excludes the intercept (the first coefficient) if it is included in the model. If no intercept is included in the model, then the omnibus test includes all of the coefficients in the model including the first. Alternatively, one can manually specify the indices of the coefficients to test via the \code{btt} argument. For example, with \code{btt=c(3,4)}, only the third and fourth coefficient from the model would be included in the test (if an intercept is included in the model, then it corresponds to the first coefficient in the model). Instead of specifying the coefficient numbers, one can specify a string for \code{btt}. In that case, \code{\link{grep}} will be used to search for all coefficient names that match the string. The omnibus test is called the \mjseqn{Q_M}-test and follows, under the assumptions of the model, a chi-square distribution with \mjseqn{m} degrees of freedom (with \mjseqn{m} denoting the number of coefficients tested) under the null hypothesis (that the true value of all coefficients tested is equal to 0). \bold{Categorical Moderators} Categorical moderator variables can be included in the model via the \code{mods} argument in the same way that appropriately (dummy) coded categorical independent variables can be included in linear models. One can either do the dummy coding manually or use a model formula together with the \code{\link{factor}} function to let \R handle the coding automatically (note that string/character variables in a model formula are automatically converted to factors). An example to illustrate these different approaches is provided below. \bold{Tests and Confidence Intervals} By default, tests of individual coefficients in the model (and the corresponding confidence intervals) are based on a standard normal distribution, while the omnibus test is based on a chi-square distribution (see above). As an alternative, one can set \code{test="t"}, in which case tests of individual coefficients and confidence intervals are based on a t-distribution with \mjseqn{k-p} degrees of freedom, while the omnibus test statistic then uses an F-distribution with \mjseqn{m} and \mjseqn{k-p} degrees of freedom (with \mjseqn{k} denoting the total number of estimates included in the analysis and \mjseqn{p} the total number of model coefficients including the intercept if it is present). Finally, when \code{test="knha"}, the Knapp and Hartung (2003) method is used, which applies an adjustment to the standard errors of the estimated coefficients (to account for the uncertainty in the estimate of the amount of (residual) heterogeneity) and uses t- and F-distributions as described above. \bold{Test for (Residual) Heterogeneity} A test for (residual) heterogeneity is automatically carried out by the function. Without moderators in the model, this is simply Cochran's \mjseqn{Q}-test (Cochran, 1954), which tests whether the variability in the observed effect sizes or outcomes is larger than would be expected based on sampling variability alone. A significant test suggests that the true effects/outcomes are heterogeneous. When moderators are included in the model, this is the \mjseqn{Q_E}-test for residual heterogeneity, which tests whether the variability in the observed effect sizes or outcomes not accounted for by the moderators included in the model is larger than would be expected based on sampling variability alone. \bold{Location-Scale Models} The function can also be used to fit so-called \sQuote{location-scale models}. In such models, one can specify not only predictors for the size of the average true outcome (i.e., for their \sQuote{location}), but also predictors for the amount of heterogeneity in the outcomes (i.e., their \sQuote{scale}). The model is then given by \mjdeqn{y_i = \beta_0 + \beta_1 x_{i1} + \beta_2 x_{i2} + \ldots + \beta_{p'} x_{ip'} + u_i + \epsilon_i,}{y_i = \beta_0 + \beta_1 x_i1 + \beta_2 x_i2 + \ldots + \beta_p' x_ip' + u_i + \epsilon_i,} \mjdeqn{u_i \sim N(0, \tau_i^2), \; \epsilon_i \sim N(0, v_i),}{u_i ~ N(0, tau_i^2), \epsilon_i \sim N(0, v_i),} \mjdeqn{\ln(\tau_i^2) = \alpha_0 + \alpha_1 z_{i1} + \alpha_2 z_{i2} + \ldots + \alpha_{q'} z_{iq'},}{log(tau^2) = \alpha_0 + \alpha z_i1 + \alpha z_i2 + \ldots + \alpha_q' z_iq',} where \mjeqn{x_{i1}, \ldots, x_{ip'}}{x_i1, \ldots, x_ip'} are the values of the \mjseqn{p'} predictor variables that may be related to the size of the average true outcome (letting \mjseqn{p = p' + 1} denote the total number of location coefficients in the model including the model intercept \mjseqn{\beta_0}) and \mjeqn{z_{i1}, \ldots, z_{iq'}}{z_i1, \ldots, z_iq'} are the values of the \mjseqn{q'} scale variables that may be related to the amount of heterogeneity in the outcomes (letting \mjseqn{q = q' + 1} denote the total number of scale coefficients in the model including the model intercept \mjseqn{\alpha_0}). Location variables can be specified via the \code{mods} argument as described above (e.g., \code{mods = ~ mod1 + mod2 + mod3}). Scale variables can be specified via the \code{scale} argument (e.g., \code{scale = ~ var1 + var2 + var3}). A log link is used for specifying the relationship between the scale variables and the amount of heterogeneity so that \mjseqn{\tau_i^2} is guaranteed to be non-negative. Estimates of the location and scale coefficients can be obtained with either maximum likelihood (\code{method="ML"}) or restricted maximum likelihood (\code{method="REML"}) estimation. An omnibus test of the scale coefficients is conducted as described above (where the \code{att} argument can be used to specify which coefficients to include in the test). } \value{ An object of class \code{c("rma.uni","rma")}. The object is a list containing the following components: \item{beta}{estimated coefficients of the model.} \item{se}{standard errors of the coefficients.} \item{zval}{test statistics of the coefficients.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub}{upper bound of the confidence intervals for the coefficients.} \item{vb}{variance-covariance matrix of the estimated coefficients.} \item{tau2}{estimated amount of (residual) heterogeneity. Always \code{0} when \code{method="FE"}.} \item{se.tau2}{standard error of the estimated amount of (residual) heterogeneity.} \item{k}{number of studies included in the analysis.} \item{p}{number of coefficients in the model (including the intercept).} \item{m}{number of coefficients included in the omnibus test of moderators.} \item{QE}{test statistic of the test for (residual) heterogeneity.} \item{QEp}{corresponding p-value.} \item{QM}{test statistic of the omnibus test of moderators.} \item{QMp}{corresponding p-value.} \item{I2}{value of \mjseqn{I^2}. See \code{\link{print.rma.uni}} for more details.} \item{H2}{value of \mjseqn{H^2}. See \code{\link{print.rma.uni}} for more details.} \item{R2}{value of \mjseqn{R^2}. See \code{\link{print.rma.uni}} for more details.} \item{int.only}{logical that indicates whether the model is an intercept-only model.} \item{yi, vi, X}{the vector of outcomes, the corresponding sampling variances, and the model matrix.} \item{fit.stats}{a list with the log-likelihood, deviance, AIC, BIC, and AICc values under the unrestricted and restricted likelihood.} \item{\dots}{some additional elements/values.} For location-scale models, the object is of class \code{c("rma.ls","rma.uni","rma")} and includes the following components in addition to the ones listed above: \item{alpha}{estimated scale coefficients of the model.} \item{se.alpha}{standard errors of the coefficients.} \item{zval.alpha}{test statistics of the coefficients.} \item{pval.alpha}{corresponding p-values.} \item{ci.lb.alpha}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub.alpha}{upper bound of the confidence intervals for the coefficients.} \item{va}{variance-covariance matrix of the estimated coefficients.} \item{tau2}{as above, but now a vector of values.} \item{q}{number of scale coefficients in the model (including the intercept).} \item{QS}{test statistic of the omnibus test of the scale coefficients.} \item{QSp}{corresponding p-value.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.uni}} function. If fit statistics should also be given, use \code{\link{summary.rma}} (or use the \code{\link{fitstats.rma}} function to extract them). Full versus reduced model comparisons in terms of fit statistics and likelihoods can be obtained with \code{\link{anova.rma}}. Wald-type tests for sets of model coefficients or linear combinations thereof can be obtained with the same function. Permutation tests for the model coefficient(s) can be obtained with \code{\link{permutest.rma.uni}}. Tests and confidence intervals based on (cluster) robust methods can be obtained with \code{\link{robust.rma.uni}}. Predicted/fitted values can be obtained with \code{\link{predict.rma}} and \code{\link{fitted.rma}}. For best linear unbiased predictions, see \code{\link{blup.rma.uni}} and \code{\link{ranef.rma.uni}}. The \code{\link{residuals.rma}}, \code{\link{rstandard.rma.uni}}, and \code{\link{rstudent.rma.uni}} functions extract raw and standardized residuals. Additional case diagnostics (e.g., to determine influential studies) can be obtained with the \code{\link{influence.rma.uni}} function. For models without moderators, leave-one-out diagnostics can also be obtained with \code{\link{leave1out.rma.uni}}. For models with moderators, variance inflation factors can be obtained with \code{\link{vif.rma}}. A confidence interval for the amount of (residual) heterogeneity in the random/mixed-effects model can be obtained with \code{\link{confint.rma.uni}}. Forest, funnel, radial, \enc{L'Abbé}{L'Abbe}, and Baujat plots can be obtained with \code{\link{forest.rma}}, \code{\link{funnel.rma}}, \code{\link{radial.rma}}, \code{\link{labbe.rma}}, and \code{\link{baujat.rma}} (radial and \enc{L'Abbé}{L'Abbe} plots only for models without moderators). The \code{\link{qqnorm.rma.uni}} function provides normal QQ plots of the standardized residuals. One can also just call \code{\link{plot.rma.uni}} on the fitted model object to obtain various plots at once. For random/mixed-effects models, the \code{\link{profile.rma.uni}} function can be used to obtain a plot of the (restricted) log-likelihood as a function of \mjseqn{\tau^2}. For models with moderators, \code{\link{regplot.rma}} draws scatter plots / bubble plots, showing the (marginal) relationship between the observed outcomes and a selected moderator from the model. Tests for funnel plot asymmetry (which may be indicative of publication bias) can be obtained with \code{\link{ranktest.rma}} and \code{\link{regtest.rma}}. For models without moderators, the \code{\link{trimfill.rma.uni}} method can be used to carry out a trim and fill analysis and \code{\link{hc.rma.uni}} provides a random-effects model analysis that is more robust to publication bias (based on the method by Henmi & Copas, 2010). The test of \sQuote{excess significance} can be carried out with the \code{\link{tes.rma}} function. Selection models can be fitted with the \code{\link{selmodel}} function. For models without moderators, a cumulative meta-analysis (i.e., adding one observation at a time) can be obtained with \code{\link{cumul.rma.uni}}. Other extractor functions include \code{\link{coef.rma}}, \code{\link{vcov.rma}}, \code{\link{logLik.rma}}, \code{\link{deviance.rma}}, \code{\link{AIC.rma}}, \code{\link{BIC.rma}}, \code{\link{hatvalues.rma.uni}}, and \code{\link{weights.rma.uni}}. } \note{ While the HS, HSk, HE, DL, SJ, and GENQ estimators of \mjseqn{\tau^2} are based on closed-form solutions, the ML, REML, and EB estimators must be obtained iteratively. For this, the function makes use of the Fisher scoring algorithm, which is robust to poor starting values and usually converges quickly (Harville, 1977; Jennrich & Sampson, 1976). By default, the starting value is set equal to the value of the Hedges (HE) estimator and the algorithm terminates when the change in the estimated value of \mjseqn{\tau^2} is smaller than \mjeqn{10^{-5}}{10^(-5)} from one iteration to the next. The maximum number of iterations is 100 by default (which should be sufficient in most cases). Information on the progress of the algorithm can be obtained by setting \code{verbose=TRUE}. One can also set \code{verbose} to an integer (\code{verbose=2} yields even more information and \code{verbose=3} also sets \code{option(warn=1)} temporarily). A different starting value, threshold, and maximum number of iterations can be specified via the \code{control} argument by setting \code{control=list(tau2.init=value, threshold=value, maxiter=value)}. The step length of the Fisher scoring algorithm can also be adjusted by a desired factor with \code{control=list(stepadj=value)} (values below 1 will reduce the step length). If using \code{verbose=TRUE} shows the estimate jumping around erratically (or cycling through a few values), decreasing the step length (and increasing the maximum number of iterations) can often help with convergence (e.g., \code{control=list(stepadj=0.5, maxiter=1000)}). The PM estimator also involves an iterative algorithm, which makes use of the \code{\link{uniroot}} function. By default, the desired accuracy (\code{tol}) is set equal to \code{.Machine$double.eps^0.25} and the maximum number of iterations (\code{maxiter}) to \code{100} (as above). The upper bound of the interval searched (\code{tau2.max}) is set to 100 (which should be large enough for most cases). These values can be adjusted with \code{control=list(tol=value, maxiter=value, tau2.max=value)}. All of the heterogeneity estimators except SJ can in principle yield negative estimates for the amount of (residual) heterogeneity. However, negative estimates of \mjseqn{\tau^2} are outside of the parameter space. For the HS, HSk, HE, DL, and GENQ estimators, negative estimates are therefore truncated to zero. For the ML, REML, and EB estimators, the Fisher scoring algorithm makes use of step halving (Jennrich & Sampson, 1976) to guarantee a non-negative estimate. Finally, for the PM estimator, the lower bound of the interval searched is set to zero by default. For those brave enough to step into risky territory, there is the option to set the lower bound for all these estimators to some other value besides zero (even a negative one) with \code{control=list(tau2.min=value)}, but the lowest value permitted is \code{-min(vi)} (to ensure that the marginal variances are always non-negative). The Hunter-Schmidt estimator for the amount of heterogeneity is defined in Hunter and Schmidt (1990) only in the context of the random-effects model when analyzing correlation coefficients. A general version of this estimator for random- and mixed-effects models not specific to any particular outcome measure is described in Viechtbauer (2005) and Viechtbauer et al. (2015) and is implemented here. The Sidik-Jonkman estimator starts with a crude estimate of \mjseqn{\tau^2}, which is then updated as described in Sidik and Jonkman (2005b, 2007). If, instead of the crude estimate, one wants to use a better a priori estimate, one can do so by passing this value via \code{control=list(tau2.init=value)}. Outcomes with non-positive sampling variances are problematic. If a sampling variance is equal to zero, then its weight will be \mjseqn{1/0} for fixed-effects models when using weighted estimation. Switching to unweighted estimation is a possible solution then. For random/mixed-effects model, some estimators of \mjseqn{\tau^2} are undefined when there is at least one sampling variance equal to zero. Other estimators may work, but it may still be necessary to switch to unweighted model fitting, especially when the estimate of \mjseqn{\tau^2} converges to zero. When including moderators in the model, it is possible that the model matrix is not of full rank (i.e., there is a linear relationship between the moderator variables included in the model). The function automatically tries to reduce the model matrix to full rank by removing redundant predictors, but if this fails the model cannot be fitted and an error will be issued. Deleting (redundant) moderator variables from the model as needed should solve this problem. Finally, some general words of caution about the assumptions underlying the models: \itemize{ \item The sampling variances (i.e., the \code{vi} values) are treated as if they are known constants. This (usually) implies that the distributions of the test statistics and corresponding confidence intervals are only exact and have nominal coverage when the within-study sample sizes are large (i.e., when the error in the sampling variance estimates is small). Certain outcome measures (e.g., the arcsine square root transformed risk difference and Fisher's r-to-z transformed correlation coefficient) are based on variance stabilizing transformations that also help to make the assumption of known sampling variances much more reasonable. \item When fitting a mixed/random-effects model, \mjseqn{\tau^2} is estimated and then treated as a known constant thereafter. This ignores the uncertainty in the estimate of \mjseqn{\tau^2}. As a consequence, the standard errors of the parameter estimates tend to be too small, yielding test statistics that are too large and confidence intervals that are not wide enough. The Knapp and Hartung (2003) adjustment can be used to counter this problem, yielding test statistics and confidence intervals whose properties are closer to nominal. \item Most effect size or outcome measures are not exactly normally distributed as assumed under the various models. However, the normal approximation usually becomes more accurate for most effect size or outcome measures as the within-study sample sizes increase. Therefore, sufficiently large within-study sample sizes are (usually) needed to be certain that the tests and confidence intervals have nominal levels/coverage. Again, certain outcome measures (e.g., Fisher's r-to-z transformed correlation coefficient) may be preferable from this perspective as well. } } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Berkey, C. S., Hoaglin, D. C., Mosteller, F., & Colditz, G. A. (1995). A random-effects regression model for meta-analysis. \emph{Statistics in Medicine}, \bold{14}(4), 395--411. \verb{https://doi.org/10.1002/sim.4780140406} Brannick, M. T., Potter, S. M., Benitez, B., & Morris, S. B. (2019). Bias and precision of alternate estimators in meta-analysis: Benefits of blending Schmidt–Hunter and Hedges approaches. \emph{Organizational Research Methods}, \bold{22}(2), 490--514. \verb{https://doi.org/10.1177/1094428117741966} Cochran, W. G. (1954). The combination of estimates from different experiments. \emph{Biometrics}, \bold{10}(1), 101--129. \verb{https://doi.org/10.2307/3001666} DerSimonian, R., & Laird, N. (1986). Meta-analysis in clinical trials. \emph{Controlled Clinical Trials}, \bold{7}(3), 177--188. \verb{https://doi.org/10.1016/0197-2456(86)90046-2} DerSimonian, R., & Kacker, R. (2007). Random-effects model for meta-analysis of clinical trials: An update. \emph{Contemporary Clinical Trials}, \bold{28}(2), 105--114. \verb{https://doi.org/10.1016/j.cct.2006.04.004} Harville, D. A. (1977). Maximum likelihood approaches to variance component estimation and to related problems. \emph{Journal of the American Statistical Association}, \bold{72}(358), 320--338. \verb{https://doi.org/10.2307/2286796} Hedges, L. V. (1983). A random effects model for effect sizes. \emph{Psychological Bulletin}, \bold{93}(2), 388--395. \verb{https://doi.org/10.1037/0033-2909.93.2.388} Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis}. San Diego, CA: Academic Press. Henmi, M., & Copas, J. B. (2010). Confidence intervals for random effects meta-analysis and robustness to publication bias. \emph{Statistics in Medicine}, \bold{29}(29), 2969--2983. \verb{https://doi.org/10.1002/sim.4029} Hunter, J. E., & Schmidt, F. L. (2004). \emph{Methods of meta-analysis: Correcting error and bias in research findings} (2nd ed.). Thousand Oaks, CA: Sage. Jennrich, R. I., & Sampson, P. F. (1976). Newton-Raphson and related algorithms for maximum likelihood variance component estimation. \emph{Technometrics}, \bold{18}(1), 11--17. \verb{https://doi.org/10.2307/1267911} Knapp, G., & Hartung, J. (2003). Improved tests for a random effects meta-regression with a single covariate. \emph{Statistics in Medicine}, \bold{22}(17), 2693--2710. \verb{https://doi.org/10.1002/sim.1482} Morris, C. N. (1983). Parametric empirical Bayes inference: Theory and applications. \emph{Journal of the American Statistical Association}, \bold{78}(381), 47--55. \verb{https://doi.org/10.2307/2287098} Paule, R. C., & Mandel, J. (1982). Consensus values and weighting factors. \emph{Journal of Research of the National Bureau of Standards}, \bold{87}(5), 377--385. \verb{https://doi.org/10.6028/jres.087.022} Raudenbush, S. W. (2009). Analyzing effect sizes: Random effects models. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis} (2nd ed., pp. 295--315). New York: Russell Sage Foundation. Sidik, K., & Jonkman, J. N. (2005a). A note on variance estimation in random effects meta-regression. \emph{Journal of Biopharmaceutical Statistics}, \bold{15}(5), 823--838. \verb{https://doi.org/10.1081/BIP-200067915} Sidik, K., & Jonkman, J. N. (2005b). Simple heterogeneity variance estimation for meta-analysis. \emph{Journal of the Royal Statistical Society, Series C}, \bold{54}(2), 367--384. \verb{https://doi.org/10.1111/j.1467-9876.2005.00489.x} Sidik, K., & Jonkman, J. N. (2007). A comparison of heterogeneity variance estimators in combining results of studies. \emph{Statistics in Medicine}, \bold{26}(9), 1964--1981. \verb{https://doi.org/10.1002/sim.2688} Viechtbauer, W. (2005). Bias and efficiency of meta-analytic variance estimators in the random-effects model. \emph{Journal of Educational and Behavioral Statistics}, \bold{30}(3), 261--293. \verb{https://doi.org/10.3102/10769986030003261} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., \enc{López-López}{Lopez-Lopez}, J. A., \enc{Sánchez-Meca}{Sanchez-Meca}, J., & \enc{Marín-Martínez}{Marin-Martinez}, F. (2015). A comparison of procedures to test for moderators in mixed-effects meta-regression models. \emph{Psychological Methods}, \bold{20}(3), 360--374. \verb{https://doi.org/10.1037/met0000023} } \seealso{ \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, and \code{\link{rma.mv}} for other model fitting functions. } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit a random-effects model using the log risk ratios and variances as input ### note: method="REML" is the default, so one could leave this out rma(yi, vi, data=dat, method="REML") ### fit a random-effects model using the log risk ratios and standard errors as input ### note: the second argument of rma() is for the *variances*, so we use the ### named argument 'sei' to supply the standard errors to the function dat$sei <- sqrt(dat$vi) rma(yi, sei=sei, data=dat) ### fit a random-effects model supplying the 2x2 table cell frequencies to the function rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) ### fit a mixed-effects model with two moderators (absolute latitude and publication year) rma(yi, vi, mods=cbind(ablat, year), data=dat) ### using a model formula to specify the same model rma(yi, vi, mods = ~ ablat + year, data=dat) ### using a model formula as part of the yi argument rma(yi ~ ablat + year, vi, data=dat) ### manual dummy coding of the allocation factor alloc.random <- ifelse(dat$alloc == "random", 1, 0) alloc.alternate <- ifelse(dat$alloc == "alternate", 1, 0) alloc.systematic <- ifelse(dat$alloc == "systematic", 1, 0) ### test the allocation factor (in the presence of the other moderators) ### note: 'alternate' is the reference level of the allocation factor, ### since this is the dummy/level we leave out of the model ### note: the intercept is the first coefficient, so with btt=2:3 we test ### coefficients 2 and 3, corresponding to the coefficients for the ### allocation factor rma(yi, vi, mods = ~ alloc.random + alloc.systematic + year + ablat, data=dat, btt=2:3) ### using a model formula to specify the same model rma(yi, vi, mods = ~ factor(alloc) + year + ablat, data=dat, btt=2:3) ### factor() is not needed as character variables are automatically converted to factors rma(yi, vi, mods = ~ alloc + year + ablat, data=dat, btt=2:3) ### test all pairwise differences with Holm's method (using the 'multcomp' package if installed) res <- rma(yi, vi, mods = ~ factor(alloc) - 1, data=dat) res if (require(multcomp)) summary(glht(res, linfct=contrMat(c("alternate"=1,"random"=1,"systematic"=1), type="Tukey")), test=adjusted("holm")) ### subgrouping versus using a single model with a factor (subgrouping provides ### an estimate of tau^2 within each subgroup, but the number of studies in each ### subgroup is quite small; the model with the allocation factor provides a ### single estimate of tau^2 based on a larger number of studies, but assumes ### that tau^2 is the same within each subgroup) res.a <- rma(yi, vi, data=dat, subset=(alloc=="alternate")) res.r <- rma(yi, vi, data=dat, subset=(alloc=="random")) res.s <- rma(yi, vi, data=dat, subset=(alloc=="systematic")) res.a res.r res.s res <- rma(yi, vi, mods = ~ factor(alloc) - 1, data=dat) res ############################################################################ ### demonstrating that Q_E + Q_M = Q_Total for fixed-effects models ### note: this does not work for random/mixed-effects models, since Q_E and ### Q_Total are calculated under the assumption that tau^2 = 0, while the ### calculation of Q_M incorporates the estimate of tau^2 res <- rma(yi, vi, data=dat, method="FE") res ### this gives Q_Total res <- rma(yi, vi, mods = ~ ablat + year, data=dat, method="FE") res ### this gives Q_E and Q_M res$QE + res$QM ### decomposition of Q_E into subgroup Q-values res <- rma(yi, vi, mods = ~ factor(alloc), data=dat) res res.a <- rma(yi, vi, data=dat, subset=(alloc=="alternate")) res.r <- rma(yi, vi, data=dat, subset=(alloc=="random")) res.s <- rma(yi, vi, data=dat, subset=(alloc=="systematic")) res.a$QE ### Q-value within subgroup "alternate" res.r$QE ### Q-value within subgroup "random" res.s$QE ### Q-value within subgroup "systematic" res$QE res.a$QE + res.r$QE + res.s$QE ############################################################################ ### an example of a location-scale model dat <- dat.bangertdrowns2004 ### fit a standard random-effects model res <- rma(yi, vi, data=dat) res ### fit the same model as a location-scale model res <- rma(yi, vi, scale = ~ 1, data=dat) res ### check that we obtain the same estimate for tau^2 predict(res, newscale=1, transf=exp) ### add the total sample size (per 100) as a location and scale predictor dat$ni100 <- dat$ni/100 res <- rma(yi, vi, mods = ~ ni100, scale = ~ ni100, data=dat) res ### variables in the location and scale parts can differ res <- rma(yi, vi, mods = ~ ni100 + meta, scale = ~ ni100 + imag, data=dat) res } \keyword{models} metafor/man/dat.dorn2007.Rd0000644000176200001440000000621014055736403015001 0ustar liggesusers\name{dat.dorn2007} \docType{data} \alias{dat.dorn2007} \title{Studies on Complementary and Alternative Medicine for Irritable Bowel Syndrome} \description{Results from 19 trials examining complementary and alternative medicine (CAM) for irritable bowel syndrome (IBS).} \usage{ dat.dorn2007 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab trial id number \cr \bold{study} \tab \code{character} \tab (first) author \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{country} \tab \code{character} \tab country where trial was conducted \cr \bold{ibs.crit} \tab \code{character} \tab IBS diagnostic criteria (Manning, Rome I, Rome II, or Other) \cr \bold{days} \tab \code{numeric} \tab number of treatment days \cr \bold{visits} \tab \code{numeric} \tab number of practitioner visits \cr \bold{jada} \tab \code{numeric} \tab Jadad score \cr \bold{x.a} \tab \code{numeric} \tab number of responders in the active treatment group \cr \bold{n.a} \tab \code{numeric} \tab number of participants in the active treatment group \cr \bold{x.p} \tab \code{numeric} \tab number of responders in the placebo group \cr \bold{n.p} \tab \code{numeric} \tab number of participants in the placebo group } } \details{ The dataset includes the results from 19 randomized clinical trials that examined the effectiveness of complementary and alternative medicine (CAM) for irritable bowel syndrome (IBS). } \note{ The data were extracted from Table I in Dorn et al. (2009). Comparing the funnel plot in Figure 1 with the one obtained below indicates that the data for study 5 (Davis et al., 2006) in the table were not the ones that were used in the actual analyses. } \source{ Dorn, S. D., Kaptchuk, T. J., Park, J. B., Nguyen, L. T., Canenguez, K., Nam, B. H., Woods, K. B., Conboy, L. A., Stason, W. B., & Lembo, A. J. (2007). A meta-analysis of the placebo response in complementary and alternative medicine trials of irritable bowel syndrome. \emph{Neurogastroenterology & Motility}, \bold{19}(8), 630--637. \verb{https://doi.org/10.1111/j.1365-2982.2007.00937.x} } \examples{ ### copy data into 'dat' and examine data dat <- dat.dorn2007 dat ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=x.a, n1i=n.a, ci=x.p, n2i=n.p, data=dat) ### random-effects model res <- rma(yi, vi, data=dat, digits=2, method="DL") res ### estimated average risk ratio predict(res, transf=exp) ### funnel plot with study 5 highlighted in red funnel(res, atransf=exp, at=log(c(.1, .2, .5, 1, 2, 5, 10)), ylim=c(0,1), steps=6, las=1, col=ifelse(dat$id == 5, "red", "black")) ### change log risk ratio for study 5 dat$yi[5] <- -0.44 ### results are now more in line with what is reported in the paper ### (although the CI in the paper is not wide enough) res <- rma(yi, vi, data=dat, digits=2, method="DL") predict(res, transf=exp) ### funnel plot with study 5 highlighted in red funnel(res, atransf=exp, at=log(c(.1, .2, .5, 1, 2, 5, 10)), ylim=c(0,1), steps=6, las=1, col=ifelse(dat$id == 5, "red", "black")) } \keyword{datasets} metafor/man/regtest.Rd0000644000176200001440000003123714055736403014443 0ustar liggesusers\name{regtest} \alias{regtest} \alias{regtest.rma} \alias{regtest.default} \title{Regression Test for Funnel Plot Asymmetry} \description{ The function can be used to carry out (various versions of) Egger's regression test for funnel plot asymmetry. \loadmathjax } \usage{ regtest(x, \dots) \method{regtest}{rma}(x, model="rma", predictor="sei", ret.fit=FALSE, digits, \dots) \method{regtest}{default}(x, vi, sei, ni, subset, model="rma", predictor="sei", ret.fit=FALSE, digits, \dots) } \arguments{ \item{x}{an object of class \code{"rma"} or a vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances (needed if \code{x} is a vector with the observed effect sizes or outcomes).} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{ni}{vector with the corresponding sample sizes.} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included in the test. Only relevant when passing a vector via \code{x}.} \item{model}{either \code{"rma"} or \code{"lm"} to indicate the type of model to use for the regression test. See \sQuote{Details}.} \item{predictor}{either \code{"sei"} \code{"vi"}, \code{"ni"}, \code{"ninv"}, \code{"sqrtni"}, or \code{"sqrtninv"} to indicate the predictor to use for the regression test. See \sQuote{Details}.} \item{ret.fit}{logical to specify whether the full results from the fitted model should also be returned.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object or is 4 for the default method).} \item{\dots}{other arguments.} } \details{ Various tests for funnel plot asymmetry have been suggested in the literature, including the rank correlation test by Begg and Mazumdar (1994) and the regression test by Egger et al. (1997). Extensions, modifications, and further developments of the regression test are described (among others) by Macaskill, Walter, and Irwig (2001), Sterne and Egger (2005), Harbord, Egger, and Sterne (2006), Peters et al. (2006), \enc{Rücker}{Ruecker} et al. (2008), and Moreno et al. (2009). The various versions of the regression test differ in terms of the model (either a weighted regression model with a multiplicative dispersion term or a fixed/random/mixed-effects meta-regression model is used), in terms of the predictor variable that the observed effect sizes or outcomes are hypothesized to be related to when publication bias is present (suggested predictors include the standard error, the sampling variance, and the sample size or transformations thereof), and in terms of the outcome measure used (e.g., for \mjeqn{2 \times 2}{2x2} table data, one has the choice between various outcome measures). The idea behind the various tests is the same though: If there is a relationship between the observed effect sizes or outcomes and the chosen predictor, then this usually implies asymmetry in the funnel plot, which in turn may be an indication of publication bias. The \code{regtest} function can be used to carry out various versions of the regression test. The model is chosen via the \code{model} argument, with \code{model="lm"} for weighted regression with a multiplicative dispersion term or \code{model="rma"} for the meta-analytic models. In the latter case, arguments such as \code{method}, \code{weighted}, and \code{test} used during the initial model fitting are also used for the regression test. Therefore, if one wants to conduct the regression test with a random/mixed-effects model, one should first fit a random-effects model with the \code{rma} function and then use the \code{regtest} function on the fitted model object. The predictor is chosen via the \code{predictor} argument: \itemize{ \item \code{predictor="sei"} for the standard error, \item \code{predictor="vi"} for the sampling variance, \item \code{predictor="ni"} for the sample size, \item \code{predictor="ninv"} for the inverse of the sample size, \item \code{predictor="sqrtni"} for the square root transformed sample size, and \item \code{predictor="sqrtninv"} for the inverse of the square root transformed sample size. } For predictors based on the sample size, the object \code{x} obviously must contain the information about the sample sizes. This will automatically be the case when \code{measure} was \emph{not} equal to \code{"GEN"} or the \code{ni} values were explicitly specified during the initial model fitting. If the model passed to the \code{regtest} function already included one or more moderators, then \code{regtest} will add the chosen predictor to the moderator(s) already included in the model. This way, one can test for funnel plot asymmetry after accounting first for the influence of the moderator(s) already included. One can also pass a vector with the observed effect sizes or outcomes (via \code{x}) and the corresponding sampling variances via \code{vi} (or the standard errors via \code{sei}) directly to the function (in this case, the \code{regtest.default} function is used). When the predictor is the sample size or a transformation thereof, then \code{ni} needs to be specified here as well. The outcome measure used for the regression test is simply determined by what measure was used in fitting the original model (or what values are passed to \code{regtest.default}). The model used for conducting the regression test can also be used to obtain a \sQuote{limit estimate} of the (average) true effect or outcome. In particular, when the standard error, sampling variance, or inverse (square root) sample size is used as the predictor, the model intercept in essence reflects the estimate under infinite precision. This is sometimes (cautiously) interpreted as an estimate of the (average) true effect or outcome that is adjusted for publication bias. } \value{ An object of class \code{"regtest"}. The object is a list containing the following components: \item{model}{the model used for the regression test.} \item{predictor}{the predictor used for the regression test.} \item{zval}{the value of the test statistic.} \item{pval}{the corresponding p-value} \item{dfs}{the degrees of freedom of the test statistic (if the test is based on a t-distribution).} \item{fit}{the full results from the fitted model.} \item{est}{the limit estimate (only for predictors \code{"sei"} \code{"vi"}, \code{"ninv"}, or \code{"sqrtninv"} and when the model does not contain any additional moderators; \code{NULL} otherwise)} \item{ci.lb}{lower bound of the confidence interval for the limit estimate.} \item{ci.ub}{upper bound of the confidence intervals for the limit estimate.} The results are formatted and printed with the \code{\link{print.regtest}} function. } \note{ The classical \sQuote{Egger test} is obtained by setting \code{model="lm"} and \code{predictor="sei"}. For the random/mixed-effects version of the Egger test, one should first fit a random-effects model to the data and then set \code{model="rma"} and \code{predictor="sei"} when using the \code{regtest} function. See Sterne and Egger (2005) for details on these two types of models/tests. When conducting a classical \sQuote{Egger test}, the test of the limit estimate is the same as the \sQuote{precision-effect test} (PET) of Stanley and Doucouliagos (2014). The limit estimate when using the sampling variance as predictor is sometimes called the \sQuote{precision-effect estimate with SE} (PEESE) (Stanley & Doucouliagos, 2014). A conditional procedure where we use the limit estimate when PET is not significant (i.e., when using the standard error as predictor) and the PEESE (i.e., when using the sampling variance as predictor) when PET is significant is sometimes called the PET-PEESE procedure (Stanley & Doucouliagos, 2014). All of the tests do not directly test for publication bias, but for a relationship between the observed effect sizes or outcomes and the chosen predictor. If such a relationship is present, then this usually implies asymmetry in the funnel plot, which in turn may be an indication of publication bias. However, it is important to keep in mind that there can be other reasons besides publication bias that could lead to asymmetry in the funnel plot. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Begg, C. B., & Mazumdar, M. (1994). Operating characteristics of a rank correlation test for publication bias. \emph{Biometrics}, \bold{50}(4), 1088--1101. \verb{https://doi.org/10.2307/2533446} Egger, M., Davey Smith, G., Schneider, M., & Minder, C. (1997). Bias in meta-analysis detected by a simple, graphical test. \emph{British Medical Journal}, \bold{315}(7109), 629--634. \verb{https://doi.org/10.1136/bmj.315.7109.629 } Harbord, R. M., Egger, M., & Sterne, J. A. C. (2006). A modified test for small-study effects in meta-analyses of controlled trials with binary endpoints. \emph{Statistics in Medicine}, \bold{25}(20), 3443--3457. \verb{https://doi.org/10.1002/sim.2380} Macaskill, P., Walter, S. D., & Irwig, L. (2001). A comparison of methods to detect publication bias in meta-analysis. \emph{Statistics in Medicine}, \bold{20}(4), 641--654. \verb{https://doi.org/10.1002/sim.698} Moreno, S. G., Sutton, A. J., Ades, A. E., Stanley, T. D., Abrams, K. R., Peters, J. L., & Cooper, N. J. (2009). Assessment of regression-based methods to adjust for publication bias through a comprehensive simulation study. \emph{BMC Medical Research Methodology}, \bold{9}, 2. \verb{https://doi.org/10.1186/1471-2288-9-2} Peters, J. L., Sutton, A. J., Jones, D. R., Abrams, K. R., & Rushton, L. (2006). Comparison of two methods to detect publication bias in meta-analysis. \emph{Journal of the American Medical Association}, \bold{295}(6), 676--680. \verb{https://doi.org/10.1001/jama.295.6.676} \enc{Rücker}{Ruecker}, G., Schwarzer, G., & Carpenter, J. (2008). Arcsine test for publication bias in meta-analyses with binary outcomes. \emph{Statistics in Medicine}, \bold{27}(5), 746--763. \verb{https://doi.org/10.1002/sim.2971} Stanley, T. D., & Doucouliagos, H. (2014). Meta-regression approximations to reduce publication selection bias. \emph{Research Synthesis Methods}, \bold{5}(1), 60--78. \verb{https://doi.org/10.1002/jrsm.1095} Sterne, J. A. C., & Egger, M. (2005). Regression methods to detect publication and other bias in meta-analysis. In H. R. Rothstein, A. J. Sutton, & M. Borenstein (Eds.) \emph{Publication bias in meta-analysis: Prevention, assessment, and adjustments} (pp. 99--110). Chichester, England: Wiley. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{ranktest}} } \examples{ ### copy data into 'dat' and examine data dat <- dat.egger2001 ### calculate log odds ratios and corresponding sampling variances (but remove ISIS-4 trial) dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=-16) ### fit random-effects model res <- rma(yi, vi, data=dat) res ### classical Egger test regtest(res, model="lm") ### random/mixed-effects version of the Egger test regtest(res) ### same tests, but passing outcomes directly regtest(dat$yi, dat$vi, model="lm") regtest(dat$yi, dat$vi) ### examples using the sample size (or a transformation thereof) as predictor regtest(res, model="lm", predictor="ni") regtest(res, model="lm", predictor="ninv") regtest(res, model="rma", predictor="ni") regtest(res, model="rma", predictor="ninv") ### if dat$yi is computed with escalc(), sample size information is stored in attributes dat$yi ### then this will work regtest(dat$yi, dat$vi, predictor="ni") ### otherwise have to supply sample sizes manually dat$ni <- with(dat, n1i + n2i) dat$yi <- c(dat$yi) # this removes the 'ni' attribute from 'yi' regtest(dat$yi, dat$vi, ni=dat$ni, predictor="ni") ### standard funnel plot (with standard error on y-axis) funnel(res, refline=0) ### regression test (by default the standard error is used as predictor) reg <- regtest(res) reg ### add regression line to funnel plot se <- seq(0,1.8,length=100) lines(coef(reg$fit)[1] + coef(reg$fit)[2]*se, se, lwd=2) ### regression test (using the sampling variance as predictor) reg <- regtest(res, predictor="vi") ### add regression line to funnel plot (using the sampling variance as predictor) lines(coef(reg$fit)[1] + coef(reg$fit)[2]*se^2, se, lwd=2) ### testing for asymmetry after accounting for the influence of a moderator res <- rma(yi, vi, mods = ~ year, data=dat) regtest(res, model="lm") regtest(res) } \keyword{htest} metafor/man/dat.dagostino1998.Rd0000644000176200001440000001165514055736403016061 0ustar liggesusers\name{dat.dagostino1998} \docType{data} \alias{dat.dagostino1998} \title{Studies on the Effectiveness of Antihistamines in Reducing Symptoms of the Common Cold} \description{Results from 9 studies on the effectiveness of antihistamines in reducing the severity of runny nose and sneezing in the common cold.} \usage{dat.dagostino1998} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study id \cr \bold{cold} \tab \code{character} \tab natural or induced cold study \cr \bold{scale.rn} \tab \code{character} \tab scale for measuring runny nose severity \cr \bold{scale.sn} \tab \code{character} \tab scale for measuring sneezing severity \cr \bold{drug} \tab \code{character} \tab type of antihistamine studied \cr \bold{tnt} \tab \code{numeric} \tab total sample size of the treatment group \cr \bold{tnc} \tab \code{numeric} \tab total sample size of the control (placebo) group \cr \bold{outcome} \tab \code{character} \tab outcome variable (see \sQuote{Details})\cr \bold{mt} \tab \code{numeric} \tab mean in the treatment group \cr \bold{sdt} \tab \code{numeric} \tab SD in the treatment group \cr \bold{mc} \tab \code{numeric} \tab mean in the control group \cr \bold{sdc} \tab \code{numeric} \tab SD in the control group \cr \bold{xt} \tab \code{numeric} \tab number of patients reaching the therapy goal in the treatment group \cr \bold{xc} \tab \code{numeric} \tab number of patients reaching the therapy goal in the control (placebo) group \cr \bold{nt} \tab \code{numeric} \tab sample size of the treatment group for measuring the outcome \cr \bold{nc} \tab \code{numeric} \tab sample size of the control group for measuring the outcome } } \details{ The studies for this meta-analysis were assembled to examine the effectiveness of antihistamines in reducing the severity of runny nose and sneezing in the common cold. Effectiveness was measured after one and two days of treatment in terms of 4 different outcome variables: \enumerate{ \item \code{rnic1} and \code{rnic2} (continuous): incremental change (improvement) in runny nose severity at day 1 and day 2, \item \code{rngoal1} and \code{rngoal2} (dichotomous): reaching the goal of therapy (of at least a 50\% reduction in runny nose severity) at day 1 and day 2, \item \code{snic1} and \code{snic2} (continuous): incremental change (improvement) in sneezing severity at day 1 and day 2, and \item \code{rngoal1} and \code{rngoal2} (dichotomous): reaching the goal of therapy (of at least a 50\% reduction in sneezing severity) at day 1 and day 2. } For the continuous outcomes, standardized mean differences can be computed to quantify the difference between the treatment and control groups. For the dichotomous outcomes, one can compute (log) odds ratios to quantify the difference between the treatment and control groups. } \source{ D'Agostino, R. B., Sr., Weintraub, M., Russell, H. K., Stepanians, M., D'Agostino, R. B., Jr., Cantilena, L. R., Jr., Graumlich, J. F., Maldonado, S., Honig, P., & Anello, C. (1998). The effectiveness of antihistamines in reducing the severity of runny nose and sneezing: A meta-analysis. \emph{Clinical Pharmacology & Therapeutics}, \bold{64}(6), 579--596. \verb{https://doi.org/10.1016/S0009-9236(98)90049-2} } \examples{ ### copy data into 'dat' and examine data dat <- dat.dagostino1998 dat ### compute standardized mean differences and corresponding sampling variances dat <- escalc(measure="SMD", m1i=mt, m2i=mc, sd1i=sdt, sd2i=sdc, n1i=nt, n2i=nc, data=dat, add.measure=TRUE) ### compute log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=xt, ci=xc, n1i=nt, n2i=nc, data=dat, replace=FALSE, add.measure=TRUE, add=1/2, to="all") ### inspect data for the first study head(dat, 8) ### fit a random-effects model for incremental change in runny nose severity at day 1 res <- rma(yi, vi, data=dat, subset=outcome=="rnic1") res ### fit a random-effects model for reaching the goal of therapy for runny nose severity at day 1 res <- rma(yi, vi, data=dat, subset=outcome=="rngoal1") res predict(res, transf=exp) ### construct approximate V matrix assuming a correlation of 0.7 for sampling errors within studies V <- lapply(split(dat$vi, dat$study), function(v) { S <- diag(sqrt(v), nrow=length(v), ncol=length(v)) R <- matrix(0.7, nrow=length(v), ncol=length(v)) diag(R) <- 1 S \%*\% R \%*\% S }) V <- bldiag(V, order=dat$study) ### fit a model for incremental change in runny nose severity at day 1 and at day 2, allowing for ### correlated sampling errors (no random effects added, since there does not appear to be any ### noteworthy heterogeneity in these data) res <- rma.mv(yi, V, mods = ~ outcome - 1, data=dat, subset=outcome \%in\% c("rnic1","rnic2")) res ### test if there is a difference in effects at day 1 and day 2 anova(res, X=c(1,-1)) } \keyword{datasets} metafor/man/fitstats.Rd0000644000176200001440000000652214055736403014626 0ustar liggesusers\name{fitstats} \alias{fitstats} \alias{fitstats.rma} \alias{logLik.rma} \alias{deviance.rma} \alias{AIC.rma} \alias{BIC.rma} \alias{nobs.rma} \alias{df.residual.rma} \title{Fit Statistics and Information Criteria for 'rma' Objects} \description{ Functions to extract the log-likelihood, deviance, AIC, BIC, and AICc values from objects of class \code{"rma"}. \loadmathjax } \usage{ fitstats(object, \dots) \method{fitstats}{rma}(object, \dots, REML) \method{logLik}{rma}(object, REML, \dots) \method{deviance}{rma}(object, REML, \dots) \method{AIC}{rma}(object, \dots, k=2, correct=FALSE) \method{BIC}{rma}(object, \dots) } \arguments{ \item{object}{an object of class \code{"rma"}.} \item{\dots}{optionally more fitted model objects.} \item{REML}{logical to specify whether the regular or restricted likelihood function should be used to obtain the fit statistics and information criteria. Defaults to the method of estimation used, that is \code{TRUE} if \code{object} was fitted with \code{method="REML"} and \code{FALSE} otherwise.} \item{k}{numeric value to specify the penalty per parameter to use. The default (\code{k=2}) is the classical AIC. See \code{\link{AIC}} for more details.} \item{correct}{logical to specify whether the regular (default) or corrected (i.e., AICc) should be extracted.} } \value{ For \code{fitstats}, a data frame with the (restricted) log-likelihood, deviance, AIC, BIC, and AICc values for each model passed to the function. For \code{logLik}, an object of class \code{"logLik"}, providing the (restricted) log-likelihood of the model evaluated at the estimated coefficient(s). For \code{deviance}, a numeric value with the corresponding deviance. For \code{AIC} and \code{BIC}, either a numeric value with the corresponding AIC, AICc, or BIC or a data frame with rows corresponding to the models and columns representing the number of parameters in the model (\code{df}) and the AIC, AICc, or BIC. } \note{ Variance components in the model (e.g., \mjseqn{\tau^2} in random/mixed-effects models fitted with \code{\link{rma.uni}}) are counted as additional parameters in the calculation of the AIC, BIC, and AICc. Also, the fixed effects are counted as parameters in the calculation of the AIC, BIC, and AICc even when using REML estimation. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}}, \code{\link{anova.rma}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### random-effects model res1 <- rma(yi, vi, data=dat, method="ML") ### mixed-effects model with absolute latitude and publication year as moderators res2 <- rma(yi, vi, mods = ~ ablat + year, data=dat, method="ML") ### compare fit statistics fitstats(res1, res2) ### log-likelihoods logLik(res1) logLik(res2) ### deviances deviance(res1) deviance(res2) ### AIC, AICc, and BIC values AIC(res1, res2) AIC(res1, res2, correct=TRUE) BIC(res1, res2) } \keyword{models} metafor/man/dat.bonett2010.Rd0000644000176200001440000000551514055736403015333 0ustar liggesusers\name{dat.bonett2010} \docType{data} \alias{dat.bonett2010} \title{Studies on the Reliability of the CES-D Scale} \description{Results from 9 studies on the reliability of the Center for Epidemiologic Studies Depression (CES-D) Scale administered to children providing care to an elderly parent.} \usage{dat.bonett2010} \format{The data frame contains the following columns: \tabular{lll}{ \bold{study} \tab \code{numeric} \tab study number \cr \bold{source} \tab \code{character} \tab source of data \cr \bold{ni} \tab \code{numeric} \tab sample size \cr \bold{mi} \tab \code{numeric} \tab number of items in the scale \cr \bold{ai} \tab \code{numeric} \tab observed value of Cronbach's alpha \cr \bold{caregivers} \tab \code{character} \tab gender of the children in the sample } } \details{ The Center for Epidemiologic Studies Depression (CES-D) Scale is a 20-item questionnaire assessing various symptoms of depression, with each item scored on a 4-point scale. The scale has been used in several studies to examine depressive symptoms in children providing care to an elderly parent. The dataset includes information on the reliability of the scale as measured with Cronbach's alpha in 9 such studies. Also, the gender composition of the children in each sample is indicated. } \source{ Bonett, D. G. (2010). Varying coefficient meta-analytic methods for alpha reliability. \emph{Psychological Methods}, \bold{15}(4), 368--385. \verb{https://doi.org/10.1037/a0020142} } \references{ Bonett, D. G. (2002). Sample size requirements for testing and estimating coefficient alpha. \emph{Journal of Educational and Behavioral Statistics}, \bold{27}(4), 335--340. \verb{https://doi.org/10.3102/10769986027004335} Hakstian, A. R., & Whalen, T. E. (1976). A k-sample significance test for independent alpha coefficients. \emph{Psychometrika}, \bold{41}(2), 219--231. \verb{https://doi.org/10.1007/BF02291840} } \examples{ ### copy data into 'dat' and examine data dat <- dat.bonett2010 dat ### meta-analysis using the raw alpha values res <- rma(measure="ARAW", ai=ai, mi=mi, ni=ni, data=dat) res ### meta-analysis using transformed alpha values (using the ### transformation suggested by Hakstian & Whalen, 1976) res <- rma(measure="AHW", ai=ai, mi=mi, ni=ni, data=dat) res predict(res, transf=transf.iahw) ### meta-analysis using transformed alpha values (using the ### transformation suggested by Bonett, 2002) res <- rma(measure="ABT", ai=ai, mi=mi, ni=ni, data=dat) res predict(res, transf=transf.iabt) ### forest plot forest(res, slab=dat$source, header=TRUE, atransf=transf.iabt, refline=coef(res)) ### examine whether female/mixed samples yield different alphas (with raw alphas) res <- rma(measure="ARAW", ai=ai, mi=mi, ni=ni, mods = ~ caregivers, data=dat) res predict(res, newmods=c(0,1), digits=2) } \keyword{datasets} metafor/man/dat.collins1985b.Rd0000644000176200001440000000625614055736403015674 0ustar liggesusers\name{dat.collins1985b} \docType{data} \alias{dat.collins1985b} \title{Studies on the Effects of Diuretics in Pregnancy} \description{Results from 9 studies examining the effects of diuretics in pregnancy on various outcomes.} \usage{dat.collins1985b} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study number \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{pre.nti} \tab \code{numeric} \tab number of women in treatment group followed up for pre-eclampsia outcome \cr \bold{pre.nci} \tab \code{numeric} \tab number of women in control/placebo group followed up for pre-eclampsia outcome \cr \bold{pre.xti} \tab \code{numeric} \tab number of women in treatment group with any form of pre-eclampsia \cr \bold{pre.xci} \tab \code{numeric} \tab number of women in control/placebo group with any form of pre-eclampsia \cr \bold{oedema} \tab \code{numeric} \tab dummy variable indicating whether oedema was a diagnostic criterion \cr \bold{fup.nti} \tab \code{numeric} \tab number of women in treatment group followed up for mortality outcomes \cr \bold{fup.nci} \tab \code{numeric} \tab number of women in control/placebo group followed up for mortality outcomes \cr \bold{ped.xti} \tab \code{numeric} \tab number of perinatal deaths in treatment group \cr \bold{ped.xci} \tab \code{numeric} \tab number of perinatal deaths in control/placebo group \cr \bold{stb.xti} \tab \code{numeric} \tab number of stillbirths in treatment group \cr \bold{stb.xci} \tab \code{numeric} \tab number of stillbirths in control/placebo group \cr \bold{ned.xti} \tab \code{numeric} \tab number of neonatal deaths in treatment group \cr \bold{ned.xci} \tab \code{numeric} \tab number of neonatal deaths in control/placebo group } } \details{ The 9 studies in this dataset examined the effects of diuretics in pregnancy on various outcomes, including the presence of any form of pre-eclampsia, perinatal death, stillbirth, and neonatal death. } \source{ Collins, R., Yusuf, S., & Peto, R. (1985). Overview of randomised trials of diuretics in pregnancy. \emph{British Medical Journal}, \bold{290}(6461), 17--23. \verb{https://doi.org/10.1136/bmj.290.6461.17} } \examples{ ### copy data into 'dat' dat <- dat.collins1985b ### calculate (log) odds ratio and sampling variance dat <- escalc(measure="OR", n1i=pre.nti, n2i=pre.nci, ai=pre.xti, ci=pre.xci, data=dat) summary(dat, digits=2, transf=exp) ### meta-analysis using Peto's method for any form of pre-eclampsia rma.peto(n1i=pre.nti, n2i=pre.nci, ai=pre.xti, ci=pre.xci, data=dat, digits=2) ### meta-analysis including only studies where oedema was not a diagnostic criterion rma.peto(n1i=pre.nti, n2i=pre.nci, ai=pre.xti, ci=pre.xci, data=dat, digits=2, subset=(oedema==0)) ### meta-analyses of mortality outcomes (perinatal deaths, stillbirths, and neonatal deaths) rma.peto(n1i=fup.nti, n2i=fup.nci, ai=ped.xti, ci=ped.xci, data=dat, digits=2) rma.peto(n1i=fup.nti, n2i=fup.nci, ai=stb.xti, ci=stb.xci, data=dat, digits=2) rma.peto(n1i=fup.nti, n2i=fup.nci, ai=ned.xti, ci=ned.xci, data=dat, digits=2) } \keyword{datasets} metafor/man/coef.rma.Rd0000644000176200001440000000413214055736403014452 0ustar liggesusers\name{coef.rma} \alias{coef} \alias{coef.rma} \alias{coef.summary.rma} \title{Extract the Model Coefficients and Coefficient Table from 'rma' and 'summary.rma' Objects} \description{ The \code{coef} function extracts the estimated model coefficients from objects of class \code{"rma"}. For objects of class \code{"summary.rma"}, the model coefficients, corresponding standard errors, test statistics, p-values, and confidence interval bounds are extracted. } \usage{ \method{coef}{rma}(object, \dots) \method{coef}{summary.rma}(object, \dots) } \arguments{ \item{object}{an object of class \code{"rma"} or \code{"summary.rma"}.} \item{\dots}{other arguments.} } \value{ Either a vector with the estimated model coefficient(s) or a data frame with the following elements: \item{estimate}{estimated model coefficient(s).} \item{se}{corresponding standard error(s).} \item{zval}{corresponding test statistic(s).} \item{pval}{corresponding p-value(s).} \item{ci.lb}{corresponding lower bound of the confidence interval(s).} \item{ci.ub}{corresponding upper bound of the confidence interval(s).} When the model was fitted with \code{test="t"} or \code{test="knha"}, then \code{zval} is called \code{tval} in the data frame that is returned by the function. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### extract model coefficients coef(res) ### extract model coefficient table coef(summary(res)) } \keyword{models} metafor/man/dat.collins1985a.Rd0000644000176200001440000000751314055736403015670 0ustar liggesusers\name{dat.collins1985a} \docType{data} \alias{dat.collins1985a} \title{Studies on the Treatment of Upper Gastrointestinal Bleeding by a Histamine H2 Antagonist} \description{Results from studies examining the effectiveness of histamine H2 antagonists (cimetidine or ranitidine) in treating patients with acute upper gastrointestinal hemorrhage.} \usage{dat.collins1985a} \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study number \cr \bold{trial} \tab \code{character} \tab first author of trial \cr \bold{year} \tab \code{numeric} \tab year of publication \cr \bold{ref} \tab \code{numeric} \tab reference number \cr \bold{trt} \tab \code{character} \tab C = cimetidine, R = ranitidine \cr \bold{ctrl} \tab \code{character} \tab P = placebo, AA = antacids, UT = usual treatment \cr \bold{nti} \tab \code{numeric} \tab number of patients in treatment group \cr \bold{b.xti} \tab \code{numeric} \tab number of patients in treatment group with persistent or recurrent bleedings \cr \bold{o.xti} \tab \code{numeric} \tab number of patients in treatment group in need of operation \cr \bold{d.xti} \tab \code{numeric} \tab number of patients in treatment group that died \cr \bold{nci} \tab \code{numeric} \tab number of patients in control group \cr \bold{b.xci} \tab \code{numeric} \tab number of patients in control group with persistent or recurrent bleedings \cr \bold{o.xci} \tab \code{numeric} \tab number of patients in control group in need of operation \cr \bold{d.xci} \tab \code{numeric} \tab number of patients in control group that died } } \details{ The data were obtained from Tables 1 and 2 in Collins and Langman (1985). The authors used Peto's (one-step) method for meta-analyzing the 27 trials. This approach is implemented in the \code{\link{rma.peto}} function. Using the same dataset, van Houwelingen, Zwinderman, and Stijnen (1993) describe some alternative approaches for analyzing these data, including fixed and random-effects conditional logistic models. Those are implemented in the \code{\link{rma.glmm}} function. } \source{ Collins, R., & Langman, M. (1985). Treatment with histamine H2 antagonists in acute upper gastrointestinal hemorrhage. \emph{New England Journal of Medicine}, \bold{313}(11), 660--666. \verb{https://doi.org/10.1056/NEJM198509123131104} } \references{ van Houwelingen, H. C., Zwinderman, K. H., & Stijnen, T. (1993). A bivariate approach to meta-analysis. \emph{Statistics in Medicine}, \bold{12}(24), 2273--2284. \verb{https://doi.org/10.1002/sim.4780122405} } \examples{ ### copy data into 'dat' and examine data dat <- dat.collins1985a dat ### meta-analysis of log ORs using Peto's method (outcome: persistent or recurrent bleedings) res <- rma.peto(ai=b.xti, n1i=nti, ci=b.xci, n2i=nci, data=dat) print(res, digits=2) \dontrun{ ### meta-analysis of log ORs using a conditional logistic regression model (FE model) res <- rma.glmm(measure="OR", ai=b.xti, n1i=nti, ci=b.xci, n2i=nci, data=dat, model="CM.EL", method="FE") summary(res) predict(res, transf=exp, digits=2) ### plot the likelihoods of the odds ratios llplot(measure="OR", ai=b.xti, n1i=nti, ci=b.xci, n2i=nci, data=dat, lwd=1, refline=NA, xlim=c(-4,4), drop00=FALSE) ### meta-analysis of log odds ratios using a conditional logistic regression model (RE model) res <- rma.glmm(measure="OR", ai=b.xti, n1i=nti, ci=b.xci, n2i=nci, data=dat, model="CM.EL", method="ML") summary(res) predict(res, transf=exp, digits=2) } ### meta-analysis of log ORs using Peto's method (outcome: need for surgery) res <- rma.peto(ai=o.xti, n1i=nti, ci=o.xci, n2i=nci, data=dat) print(res, digits=2) ### meta-analysis of log ORs using Peto's method (outcome: death) res <- rma.peto(ai=d.xti, n1i=nti, ci=d.xci, n2i=nci, data=dat) print(res, digits=2) } \keyword{datasets} metafor/man/transf.Rd0000644000176200001440000002143414055736403014261 0ustar liggesusers\name{transf} \alias{transf} \alias{transf.rtoz} \alias{transf.ztor} \alias{transf.logit} \alias{transf.ilogit} \alias{transf.arcsin} \alias{transf.iarcsin} \alias{transf.pft} \alias{transf.ipft} \alias{transf.ipft.hm} \alias{transf.isqrt} \alias{transf.irft} \alias{transf.iirft} \alias{transf.ahw} \alias{transf.iahw} \alias{transf.abt} \alias{transf.iabt} \alias{transf.ztor.int} \alias{transf.exp.int} \alias{transf.ilogit.int} \alias{transf.dtou1} \alias{transf.dtou2} \alias{transf.dtou3} \alias{transf.dtocles} \alias{transf.dtorpb} \alias{transf.dtobesd} \alias{transf.dtomd} \alias{transf.logortord} \alias{transf.logortorr} \title{Transformation Functions} \description{ A set of transformation functions useful for meta-analyses. \loadmathjax } \usage{ transf.rtoz(xi, \dots) transf.ztor(xi, \dots) transf.logit(xi, \dots) transf.ilogit(xi, \dots) transf.arcsin(xi, \dots) transf.iarcsin(xi, \dots) transf.pft(xi, ni, \dots) transf.ipft(xi, ni, \dots) transf.ipft.hm(xi, targs, \dots) transf.isqrt(xi, \dots) transf.irft(xi, ti, \dots) transf.iirft(xi, ti, \dots) transf.ahw(xi, \dots) transf.iahw(xi, \dots) transf.abt(xi, \dots) transf.iabt(xi, \dots) transf.ztor.int(xi, targs, \dots) transf.exp.int(xi, targs, \dots) transf.ilogit.int(xi, targs, \dots) transf.dtou1(xi, \dots) transf.dtou2(xi, \dots) transf.dtou3(xi, \dots) transf.dtorpb(xi, n1i, n2i, \dots) transf.dtobesd(xi, \dots) transf.dtomd(xi, targs, \dots) transf.logortord(xi, pc, \dots) transf.logortorr(xi, pc, \dots) } \arguments{ \item{xi}{vector of values to be transformed.} \item{ni}{vector of sample sizes.} \item{n1i}{vector of sample sizes for the first group.} \item{n2i}{vector of sample sizes for the second group.} \item{ti}{vector of person-times at risk.} \item{pc}{control group risk (either a single value or a vector).} \item{targs}{list with additional arguments for the transformation function. See \sQuote{Details}.} \item{\dots}{other arguments.} } \details{ The following transformation functions are currently implemented: \itemize{ \item \code{transf.rtoz}: Fisher's r-to-z transformation for correlations. \item \code{transf.ztor}: inverse of Fisher's r-to-z transformation. \item \code{transf.logit}: logit (log odds) transformation for proportions. \item \code{transf.ilogit}: inverse of the logit transformation. \item \code{transf.arcsin}: arcsine square root transformation for proportions. \item \code{transf.iarcsin}: inverse of the arcsine transformation. \item \code{transf.pft}: Freeman-Tukey (double arcsine) transformation for proportions. See Freeman & Tukey (1950). The \code{xi} argument is used to specify the proportions and the \code{ni} argument the corresponding sample sizes. \item \code{transf.ipft}: inverse of the Freeman-Tukey (double arcsine) transformation for proportions. See Miller (1978). \item \code{transf.ipft.hm}: inverse of the Freeman-Tukey (double arcsine) transformation for proportions using the harmonic mean of the sample sizes for the back-transformation. See Miller (1978). The sample sizes are specified via the \code{targs} argument (the list element should be called \code{ni}). \item \code{transf.isqrt}: inverse of the square root transformation (i.e., function to square a number). \item \code{transf.irft}: Freeman-Tukey transformation for incidence rates. See Freeman & Tukey (1950). The \code{xi} argument is used to specify the incidence rates and the \code{ti} argument the corresponding person-times at risk. \item \code{transf.iirft}: inverse of the Freeman-Tukey transformation for incidence rates. \item \code{transf.ahw}: transformation of coefficient alpha as suggested by Hakstian & Whalen (1976). \item \code{transf.iahw}: inverse of the transformation of coefficient alpha as suggested by Hakstian & Whalen (1976). \item \code{transf.abt}: transformation of coefficient alpha as suggested by Bonett (2002). \item \code{transf.iabt}: inverse of the transformation of coefficient alpha as suggested by Bonett (2002). \item \code{transf.ztor.int}: integral transformation method for the z-to-r transformation. \item \code{transf.exp.int}: integral transformation method for the exponential transformation. \item \code{transf.ilogit.int}: integral transformation method for the inverse of the logit transformation. \item \code{transf.dtou1}: transformation of standardized mean differences to Cohen's U1 values (Cohen, 1988). \item \code{transf.dtou2}: transformation of standardized mean differences to Cohen's U2 values (Cohen, 1988). \item \code{transf.dtou3}: transformation of standardized mean differences to Cohen's U3 values (Cohen, 1988). \item \code{transf.dtocles}: transformation of standardized mean differences to common language effect size values (McGraw & Wong, 1992). \item \code{transf.dtorpb}: transformation of standardized mean differences to point-biserial correlations. If \code{n1i} and \code{n2i} are not specified, the function assumes \code{n1i=n2i} and uses an approximate formula. If \code{n1i} and \code{n2i} are specified, the exact transformation formula is used. \item \code{transf.dtobesd}: transformation of standardized mean differences to binomial effect size display values (Rosenthal & Rubin, 1982). Note that the function only provides the proportion in the first group scoring above the median (the proportion in the second group scoring above the median is simply one minus the proportion in the first group scoring above the median). \item \code{transf.dtomd}: transformation of standardized mean differences to mean differences given a known standard deviation, which must be specified via the \code{targs} argument. \item \code{transf.logortord}: transformation of log odds ratios to risk differences, assuming a particular value for the control group risk (which needs to be specified via the \code{pc} argument). \item \code{transf.logortorr}: transformation of log odds ratios to risk ratios, assuming a particular value for the control group risk (which needs to be specified via the \code{pc} argument). } } \value{ A vector with the transformed values. } \note{ The integral transformation method for a transformation function \mjseqn{h(z)} integrates \mjseqn{h(z) f(z)} over \mjseqn{z} using the limits \code{targs$lower} and \code{targs$upper}, where \mjseqn{f(z)} is the density of a normal distribution with mean equal to \code{xi} and variance equal to \code{targs$tau2}. An example is provided below. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Bonett, D. G. (2002). Sample size requirements for testing and estimating coefficient alpha. \emph{Journal of Educational and Behavioral Statistics}, \bold{27}(4), 335--340. \verb{https://doi.org/10.3102/10769986027004335} Cohen, J. (1988). \emph{Statistical power analysis for the behavioral sciences} (2nd ed.). Hillsdale, NJ: Lawrence Erlbaum Associates. Fisher, R. A. (1921). On the \dQuote{probable error} of a coefficient of correlation deduced from a small sample. \emph{Metron}, \bold{1}, 1--32. \verb{http://hdl.handle.net/2440/15169} Freeman, M. F., & Tukey, J. W. (1950). Transformations related to the angular and the square root. \emph{Annals of Mathematical Statistics}, \bold{21}(4), 607--611. \verb{https://doi.org/10.1214/aoms/1177729756} Hakstian, A. R., & Whalen, T. E. (1976). A k-sample significance test for independent alpha coefficients. \emph{Psychometrika}, \bold{41}(2), 219--231. \verb{https://doi.org/10.1007/BF02291840} McGraw, K. O., & Wong, S. P. (1992). A common language effect size statistic. \emph{Psychological Bulletin}, \bold{111}(2), 361--365. \verb{https://doi.org/10.1037/0033-2909.111.2.361} Miller, J. J. (1978). The inverse of the Freeman-Tukey double arcsine transformation. \emph{American Statistician}, \bold{32}(4), 138. \verb{https://doi.org/10.1080/00031305.1978.10479283} Rosenthal, R., & Rubin, D. B. (1982). A simple, general purpose display of magnitude of experimental effect. \emph{Journal of Educational Psychology}, \bold{74}(2), 166--169. \verb{https://doi.org/10.1037/0022-0663.74.2.166} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### average risk ratio with 95\% CI (but technically, this provides an ### estimate of the median risk ratio, not the mean risk ratio!) predict(res, transf=exp) ### average risk ratio with 95\% CI using the integral transformation predict(res, transf=transf.exp.int, targs=list(tau2=res$tau2, lower=-4, upper=4)) } \keyword{manip} metafor/man/selmodel.Rd0000644000176200001440000010601214055736403014564 0ustar liggesusers\name{selmodel} \alias{selmodel} \alias{selmodel.rma.uni} \title{Selection Models} \description{ Function to fit selection models. \loadmathjax } \usage{ selmodel(x, \dots) \method{selmodel}{rma.uni}(x, type, alternative="greater", prec, delta, steps, verbose=FALSE, digits, control, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}.} \item{type}{character string to specify the type of selection model. Possible options are \code{"beta"}, \code{"halfnorm"}, \code{"negexp"}, \code{"logistic"}, \code{"power"}, \code{"negexppow"}, or \code{"stepfun"}. Can be abbreviated. See \sQuote{Details}.} \item{alternative}{character string to specify the sidedness of the hypothesis when testing the observed outcomes. Possible options are \code{"greater"} (the default), \code{"less"}, or \code{"two.sided"}. Can be abbreviated.} \item{prec}{optional character string to specify the measure of precision (only relevant for selection models that can incorporate this into the selection function). Possible options are \code{"sei"}, \code{"vi"}, \code{"ninv"}, or \code{"sqrtninv"}. See \sQuote{Details}.} \item{delta}{optional numeric vector (of the same length as the number of selection model parameters) to fix the corresponding \mjseqn{\delta} value(s). A specific \mjseqn{\delta} value can be fixed by setting the corresponding element of this argument to the desired value. A specific \mjseqn{\delta} value will be estimated if the corresponding element is set equal to \code{NA}. See \sQuote{Details}.} \item{steps}{numeric vector of one or more values between 0 and 1 that can or must be specified for certain selection functions. See \sQuote{Details}.} \item{verbose}{logical to specify whether output should be generated on the progress of the model fitting (the default is \code{FALSE}). Can also be an integer. Values > 1 generate more verbose output. See \sQuote{Note}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{control}{optional list of control values for the estimation algorithm. If unspecified, default values are defined inside the function. See \sQuote{Note}.} \item{\dots}{other arguments.} } \details{ Selection models are a general class of models that attempt to model the process by which the studies included in a meta-analysis may have been influenced by some form of publication bias. If a particular selection model is an adequate approximation for the underlying selection process, then the model provides estimates of the parameters of interest (e.g., the average true outcome and the amount of heterogeneity in the true outcomes) that are \sQuote{corrected} for this selection process (i.e., they are estimates of the parameters in the population of studies before any selection has taken place). The present function fits a variety of such selection models. To do so, one should pass an object fitted with the \code{\link{rma.uni}} function to the first argument. The model that will then be fitted is of the same form as the original model combined with the specific selection model chosen (see below for possible options). For example, if the original model was a random-effects model, then a random-effects selection model will be fitted. Similarly, if the original model included moderators, then they will also be included in the selection model. Model fitting is done via maximum likelihood (ML) estimation over the fixed- and random-effects parameters (e.g., \mjseqn{\mu} and \mjseqn{\tau^2} in a random-effects model) and the selection model parameters. Argument \code{type} determines the specific type of selection model that should be fitted. All selection models that can be fitted are based on the idea that selection may haven taken place based on the p-values of the studies. In particular, let \mjseqn{y_i} and \mjseqn{v_i} denote the observed outcome and the corresponding sampling variance of the \mjseqn{i}th study. Then \mjseqn{z_i = y_i / \sqrt{v_i}} is the (Wald-type) test statistic for testing the null hypothesis \mjeqn{\mbox{H}_0{:}\; \theta_i = 0}{H_0: \theta_i = 0} and \mjseqn{p_i = 1 - \Phi(z_i)} (if \code{alternative="greater"}), \mjseqn{p_i = \Phi(z_i)} (if \code{alternative="less"}), or \mjseqn{p_i = 2(1 - \Phi(|z_i|))} (if \code{alternative="two.sided"}) the corresponding (one- or two-sided) p-value, where \mjseqn{\Phi()} denotes the cumulative distribution function of a standard normal distribution. Finally, let \mjseqn{w(p_i)} denote some function that specifies the relative likelihood of selection given the p-value of a study. If \mjseqn{w(p_i) > w(p_{i'})} when \mjseqn{p_i < p_{i'}} (i.e., \mjseqn{w(p_i)} is larger for smaller p-values), then \code{alternative="greater"} implies selection in favor of increasingly significant positive outcomes, \code{alternative="less"} implies selection in favor of increasingly significant negative outcomes, and \code{alternative="two.sided"} implies selection in favor of increasingly significant outcomes regardless of their direction. \subsection{Beta Selection Model}{ When \code{type="beta"}, the function can be used to fit the \sQuote{beta selection model} by Citkowicz and Vevea (2017). For this model, the selection function is given by \mjsdeqn{w(p_i) = p_i^{\delta_1 - 1} \times (1 - p_i)^{\delta_2 - 1}} where \mjseqn{\delta_1 > 0} and \mjseqn{\delta_2 > 0}. The null hypothesis \mjeqn{\mbox{H}_0{:}\; \delta_1 = \delta_2 = 1}{H_0: \delta_1 = \delta_2 = 1} represents the case where there is no selection (at least not depending on the p-values). \if{html,latex}{The figure below illustrates with some examples how the relative likelihood of selection can depend on the p-value for various combinations of \mjseqn{\delta_1} and \mjseqn{\delta_2}.} Note that the model allows for a non-monotonic selection function. \if{html}{\figure{selmodel-beta.png}{options: width=600}} \if{latex}{\figure{selmodel-beta.pdf}{options: width=4in}} } \subsection{Half-Normal, Negative-Exponential, Logistic, and Power Selection Models}{ Preston et al. (2004) suggested the first three of the following selection functions: \tabular{lllll}{ \bold{name} \tab \ics \tab \bold{\code{type}} \tab \ics \tab \bold{selection function} \cr half-normal \tab \ics \tab \code{"halfnorm"} \tab \ics \tab \mjseqn{w(p_i) = \exp(-\delta \times p_i^2)} \cr negative-exponential \tab \ics \tab \code{"negexp"} \tab \ics \tab \mjseqn{w(p_i) = \exp(-\delta \times p_i)} \cr logistic \tab \ics \tab \code{"logistic"} \tab \ics \tab \mjseqn{w(p_i) = 2 \times \exp(-\delta \times p_i) / (1 + \exp(-\delta \times p_i))} \cr power \tab \ics \tab \code{"power"} \tab \ics \tab \mjseqn{w(p_i) = (1-p_i)^\delta} } The power selection model is added here as it has similar properties as the models suggested by Preston et al. (2004). For all models, assume \mjseqn{\delta \ge 0}, so that all functions imply a monotonically decreasing relationship between the p-value and the selection probability. For all functions, \mjeqn{\mbox{H}_0{:}\; \delta = 0}{H_0: \delta = 0} implies no selection. \if{html,latex}{The figure below shows the relative likelihood of selection as a function of the p-value for \mjseqn{\delta = 0} and for the various selection functions when \mjseqn{\delta = 6}.} \if{html}{\figure{selmodel-preston.png}{options: width=600}} \if{latex}{\figure{selmodel-preston.pdf}{options: width=4in}} Here, these functions are extended to allow for the possibility that \mjseqn{w(p_i) = 1} for p-values below a certain significance threshold denoted by \mjseqn{\alpha} (e.g., to model the case that the relative likelihood of selection is equally high for all significant studies but decreases monotonically for p-values above the significance threshold). To fit such a selection model, one should specify the \mjseqn{\alpha} value (with \mjseqn{0 < \alpha < 1}) via the \code{steps} argument. There must be at least one observed p-value below and above the chosen threshold to fit these models. \if{html,latex}{The figure below shows some examples of the relative likelihood of selection when \code{steps=.05}.} \if{html}{\figure{selmodel-preston-step.png}{options: width=600}} \if{latex}{\figure{selmodel-preston-step.pdf}{options: width=4in}} Preston et al. (2004) also suggested selection functions where the relatively likelihood of selection not only depends on the p-value, but also the precision (e.g., standard error) of the estimate (if two studies have similar p-values, it may be plausible to assume that the larger / more precise study has a higher probability of selection). These selection functions plus the corresponding power functions are given by: \tabular{lllll}{ \bold{name} \tab \ics \tab \bold{\code{type}} \tab \ics \tab \bold{selection function} \cr half-normal \tab \ics \tab \code{"halfnorm"} \tab \ics \tab \mjseqn{w(p_i) = \exp(-\delta \times \mathrm{prec}_i \times p_i^2)} \cr negative-exponential \tab \ics \tab \code{"negexp"} \tab \ics \tab \mjseqn{w(p_i) = \exp(-\delta \times \mathrm{prec}_i \times p_i)} \cr logistic \tab \ics \tab \code{"logistic"} \tab \ics \tab \mjseqn{w(p_i) = 2 \times \exp(-\delta \times \mathrm{prec}_i \times p_i) / (1 + \exp(-\delta \times \mathrm{prec}_i \times p_i))} \cr power \tab \ics \tab \code{"power"} \tab \ics \tab \mjseqn{w(p_i) = (1-p_i)^{-\delta \times \mathrm{prec}_i}} } where \mjseqn{\mathrm{prec}_i = \sqrt{v_i}} (i.e., the standard error of the \mjseqn{i}th study) according to Preston et al. (2004). Here, this idea is generalized to allow the user to specify the specific measure of precision to use (via the \code{prec} argument). Possible options are: \itemize{ \item \code{prec="sei"} for the standard errors, \item \code{prec="vi"} for the sampling variances, \item \code{prec="ninv"} for the inverse of the sample sizes, \item \code{prec="sqrtninv"} for the inverse square root of the sample sizes. } Using some function of the sample sizes as a measure of precision is only possible when information about the sample sizes is actually stored within the object passed to the \code{selmodel} function. See \sQuote{Note}. Note that \mjseqn{\mathrm{prec}_i} is really a measure of imprecision (with higher values corresponding to lower precision). Also, regardless of the specific measure chosen, the values are actually rescaled with \mjseqn{\mathrm{prec}_i = \mathrm{prec}_i / \max(\mathrm{prec}_i)} inside of the function, such that \mjseqn{\mathrm{prec}_i = 1} for the least precise study and \mjseqn{\mathrm{prec}_i < 1} for the remaining studies (the rescaling does not actually change the fit of the model, it only helps to improve the stability of model fitting algorithm). \if{html,latex}{The figure below shows some examples of the relative likelihood of selection using these selection functions for two different precision values.} \if{html}{\figure{selmodel-preston-prec.png}{options: width=600}} \if{latex}{\figure{selmodel-preston-prec.pdf}{options: width=4in}} One can also use the \code{steps} argument as described above in combination with these selection functions (studies with p-values below the chosen threshold then have \mjseqn{w(p_i) = 1} regardless of their exact p-value or precision). } \subsection{Negative Exponential Power Selection Model}{ As an extension of the half-normal and negative-exponential models, one can also choose \code{type="negexppow"} for a \sQuote{negative exponential power selection model}. The selection function is then given by \mjsdeqn{w(p_i) = \exp(-\delta_1 \times p_i^{1/\delta_2})} where \mjseqn{\delta_1 \ge 0} and \mjseqn{\delta_2 \ge 0} (see Begg & Mazumdar, 1994, although here a different parameterization is used, such that increasing \mjseqn{\delta_2} leads to more severe selection). \if{html,latex}{The figure below shows some examples of this selection function when holding \mjseqn{\delta_1} constant while increasing \mjseqn{\delta_2}.} \if{html}{\figure{selmodel-negexppow.png}{options: width=600}} \if{latex}{\figure{selmodel-negexppow.pdf}{options: width=4in}} This model affords greater flexibility in the shape of the selection function, but requires the estimation of the additional power parameter (the half-normal and negative-exponential models are therefore special cases when fixing \mjseqn{\delta_2} to 0.5 or 1, respectively). \mjeqn{\mbox{H}_0{:}\; \delta_1 = 0}{H_0: \delta_1 = 0} again implies no selection, but so does \mjeqn{\mbox{H}_0{:}\; \delta_2 = 0}{H_0: \delta_2 = 0}. One can again use the \code{steps} argument to specify a single significance threshold, \mjseqn{\alpha}, so that \mjseqn{w(p_i) = 1} for p-values below this threshold and otherwise \mjseqn{w(p_i)} follows the selection function as given above. One can also use the \code{prec} argument to specify a measure of precision in combination with this model, which leads to the selection function \mjsdeqn{w(p_i) = \exp(-\delta_1 \times \mathrm{prec}_i \times p_i^{1/\delta_2})} and hence is the logical extension of the negative exponential power selection model that also incorporates some measure of precision into the selection process. } \subsection{Step Function Selection Models}{ When \code{type="stepfun"}, the function can be used to fit \sQuote{step function models} as described by Iyengar and Greenhouse (1988), Hedges (1992), Vevea and Hedges (1995), and Vevea and Woods (2005). For these models, one must specify one or multiple values via the \code{steps} argument, which define intervals in which the relative likelihood of selection is constant. Let \mjsdeqn{\alpha_1 < \alpha_2 < \ldots < \alpha_c} denote these cutpoints sorted in increasing order, with the constraint that \mjseqn{\alpha_c = 1} (if the highest value specified via \code{steps} is not 1, the function will automatically add this cutpoint), and define \mjseqn{\alpha_0 = 0}. The selection function is then given by \mjseqn{w(p_i) = \delta_j} if \mjseqn{\alpha_{j-1} < p_i \le \alpha_j}. To make the model identifiable, we set \mjseqn{\delta_1 = 1}. The \mjseqn{\delta_j} values therefore denote the likelihood of selection in the various intervals relative to the interval for p-values between 0 and \mjseqn{\alpha_1}. Hence, the null hypothesis \mjeqn{\mbox{H}_0{:}\; \delta_j = 1}{H_0: \delta_j = 1} for \mjseqn{j = 1, \ldots, c} implies no selection. For example, if \code{steps=c(.05, .10, .50, 1)}, then \mjseqn{\delta_2} is the likelihood of selection for p-values between .05 and .10, \mjseqn{\delta_3} is the likelihood of selection for p-values between .10 and .50, and \mjseqn{\delta_4} is the likelihood of selection for p-values between .50 and 1 relative to the likelihood of selection for p-values between 0 and .05. \if{html,latex}{The figure below shows the corresponding selection function for some arbitrarily chosen \mjseqn{\delta_j} values.} \if{html}{\figure{selmodel-stepfun.png}{options: width=600}} \if{latex}{\figure{selmodel-stepfun.pdf}{options: width=4in}} There must be at least one observed p-value within each interval to fit this model. If this is not the case, an error will be issued (setting \code{verbose=TRUE} provides information about the number of p-values falling into each interval). When specifying a single cutpoint in the context of a random-effects model, this model is sometimes called the \sQuote{three-parameter selection model} (3PSM), corresponding to the parameters \mjseqn{\mu}, \mjseqn{\tau^2}, and \mjseqn{\delta_2} (e.g., Carter et al., 2019; McShane et al., 2016; Pustejovsky & Rodgers, 2019). The same idea but in the context of a fixed-effects model was also described by Iyengar and Greenhouse (1988). Note that when \code{alternative="greater"} or \code{alternative="less"} (i.e., when we assume that the relative likelihood of selection is not only related to the p-values of the studies, but also the directionality of the outcomes), then it would usually make sense to divide conventional levels of significance (e.g., .05) by 2 before passing these values to the \code{steps} argument. For example, if we think that studies were selected for positive outcomes that are significant at two-tailed \mjseqn{\alpha = .05}, then we should use \code{alternative="greater"} in combination with \code{steps=c(.025, 1)}. One of the challenges when fitting this model with many cutpoints is the large number of parameters that need to be estimated (which is especially problematic when the number of studies is small). An alternative approach suggested by Vevea and Woods (2005) is to fix the \mjseqn{\delta_j} values to some a priori chosen values instead of estimating them. One can then conduct a sensitivity analysis by examining the results (e.g., the estimates of \mjseqn{\mu} and \mjseqn{\tau^2} in a random-effects model) for a variety of different sets of \mjseqn{\delta_j} values (reflecting more or less severe forms of selection). This can be done by specifying the \mjseqn{\delta_j} values via the \code{delta} argument. Table 1 in Vevea and Woods (2005) provides some illustrative examples of moderate and severe selection functions for one- and two-tailed selection. The code below creates a data frame that contains these functions. \preformatted{tab <- data.frame( steps = c(0.005, 0.01, 0.05, 0.10, 0.25, 0.35, 0.50, 0.65, 0.75, 0.90, 0.95, 0.99, 0.995, 1), delta.mod.1 = c(1, 0.99, 0.95, 0.80, 0.75, 0.65, 0.60, 0.55, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50), delta.sev.1 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.10, 0.10, 0.10, 0.10), delta.mod.2 = c(1, 0.99, 0.95, 0.90, 0.80, 0.75, 0.60, 0.60, 0.75, 0.80, 0.90, 0.95, 0.99, 1.00), delta.sev.2 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.25, 0.25, 0.50, 0.60, 0.75, 0.90, 0.99, 1.00))} \if{html,latex}{The figure below shows the corresponding selection functions.} \if{html}{\figure{selmodel-stepfun-fixed.png}{options: width=600}} \if{latex}{\figure{selmodel-stepfun-fixed.pdf}{options: width=4in}} These four functions are \dQuote{merely examples and should not be regarded as canonical} (Vevea & Woods, 2005). } } \value{ An object of class \code{c("rma.uni","rma")}. The object is a list containing the same components as a regular \code{c("rma.uni","rma")} object, but the parameter estimates are based on the selection model. Most importantly, the following elements are modified based on the selection model: \item{beta}{estimated coefficients of the model.} \item{se}{standard errors of the coefficients.} \item{zval}{test statistics of the coefficients.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bound of the confidence intervals for the coefficients.} \item{ci.ub}{upper bound of the confidence intervals for the coefficients.} \item{vb}{variance-covariance matrix of the estimated coefficients.} \item{tau2}{estimated amount of (residual) heterogeneity. Always \code{0} when \code{method="FE"}.} \item{se.tau2}{standard error of the estimated amount of (residual) heterogeneity.} In addition, the object contains the following additional elements: \item{delta}{estimated selection model parameter(s).} \item{se.delta}{corresponding standard error(s).} \item{zval.delta}{corresponding test statistic(s).} \item{pval.delta}{corresponding p-value(s).} \item{ci.lb.delta}{lower bound of the confidence intervals for the parameter(s).} \item{ci.ub.delta}{upper bound of the confidence intervals for the parameter(s).} \item{LRT}{test statistic of the likelihood ratio test for the selection model parameter(s).} \item{LRTdf}{degrees of freedom for the likelihood ratio test.} \item{LRTp}{p-value for the likelihood ratio test.} \item{LRT.tau2}{test statistic of the likelihood ratio test for testing \mjeqn{\mbox{H}_0{:}\; \tau^2 = 0}{H_0: \tau^2 = 0} (\code{NA} when fitting a fixed-effects model).} \item{LRTp.tau2}{p-value for the likelihood ratio test.} \item{\dots}{some additional elements/values.} } \section{Methods}{ The results of the fitted model are formatted and printed with the \code{\link{print.rma.uni}} function. The estimated selection function can be drawn with \code{\link{plot.rma.uni.selmodel}}. The \code{\link{profile.rma.uni.selmodel}} function can be used to obtain a plot of the log-likelihood as a function of \mjseqn{\tau^2} and/or the selection model parameter(s) of the model. Corresponding confidence intervals can be obtained with the \code{\link{confint.rma.uni.selmodel}} function. } \note{ Model fitting is done via numerical optimization over the model parameters. By default, \code{\link{optim}} is used for the optimization. One can also chose a different optimizer via the \code{control} argument (e.g., \code{control=list(optimizer="nlminb")}). When using \code{\link{optim}}, one can set the particular method via the \code{optmethod} argument (e.g., \code{control=list(optimizer="optim", optmethod="BFGS")}, which is the default). Besides \code{\link{optim}} and \code{\link{nlminb}}, one can also choose one of the optimizers from the \code{minqa} package (i.e., \code{\link[minqa]{uobyqa}}, \code{\link[minqa]{newuoa}}, or \code{\link[minqa]{bobyqa}}), one of the (derivative-free) algorithms from the \code{\link[nloptr]{nloptr}} package, the Newton-type algorithm implemented in \code{\link{nlm}}, the various algorithms implemented in the \code{dfoptim} package (\code{\link[dfoptim]{hjk}} for the Hooke-Jeeves, \code{\link[dfoptim]{nmk}} for the Nelder-Mead, and \code{\link[dfoptim]{mads}} for the Mesh Adaptive Direct Searches (MADS) algorithm), the quasi-Newton type optimizer \code{\link[ucminf]{ucminf}} from the package of the same name, or the parallelized version of the L-BFGS-B algorithm implemented in \code{\link[optimParallel]{optimParallel}} from the package of the same name. The optimizer name must be given as a character string (i.e., in quotes). Additional control parameters can be specified via the \code{control} argument (e.g., \code{control=list(maxit=1000, reltol=1e-8)}). For \code{\link[nloptr]{nloptr}}, the default is to use the BOBYQA implementation from that package with a relative convergence criterion of \code{1e-8} on the function value (i.e., log-likelihood), but this can be changed via the \code{algorithm} and \code{ftop_rel} arguments (e.g., \code{control=list(optimizer="nloptr", algorithm="NLOPT_LN_SBPLX", ftol_rel=1e-6)}). For \code{\link[optimParallel]{optimParallel}}, the control argument \code{ncpus} can be used to specify the number of cores to use for the parallelization (e.g., \code{control=list(optimizer="optimParallel", ncpus=2)}). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. All selection models (except for \code{type="stepfun"}) require repeated evaluations of an integral, which is done via adaptive quadrature as implemented in the \code{\link{integrate}} function. One can adjust the arguments of the \code{integrate} function via control element \code{intCtrl}, which is a list of named arguments (e.g., \code{control = list(intCtrl = list(rel.tol=1e-4, subdivisions=100))}). The starting values for the fixed effects, the \mjseqn{\tau^2} value (only relevant in random/mixed-effects models), and the \mjseqn{\delta} parameter(s) are chosen automatically by the function, but one can also set the starting values manually via the \code{control} argument by specifying a vector of the appropriate length for \code{beta.init}, a single value for \code{tau2.init}, and a vector of the appropriate length for \code{delta.init}. By default, the \mjseqn{\delta} parameter(s) are constrained to a certain range, which improves the stability of the optimization algorithm. For all models, the maximum is set to \code{100} and the minimum to \code{0} (except for \code{type="beta"}, where the minimum for both parameters is \code{1e-05}). These defaults can be changed via the \code{control} argument by specifying a vector of the appropriate length for \code{delta.min} and/or \code{delta.max}. A difficulty with fitting the beta selection model (i.e., \code{type="beta"}) is the behavior of \mjseqn{w(p_i)} when \mjseqn{p_i = 0} or \mjseqn{p_i = 1}. When \mjseqn{\delta_1 < 1} or \mjseqn{\delta_2 < 1}, then this leads to selection weights equal to infinity, which causes problems when computing the likelihood function. Following Citkowicz and Vevea (2017), this problem can be avoided by censoring p-values too close to 0 or 1. The specific censoring point can be set via the \code{pval.min} element of the \code{control} argument. The default for this selection model is \code{control=list(pval.min=1e-5)}. A similar issues arises for the power selection model (i.e., \code{type="power"}) when \mjseqn{p_i = 1}. Again, \code{pval.min=1e-5} is used to circumvent this issue. For all other selection models, the default is \code{pval.min=0}. The variance-covariance matrix corresponding to the estimates of the fixed effects, the \mjseqn{\tau^2} value (only relevant in random/mixed-effects models), and the \mjseqn{\delta} parameter(s) is obtained by inverting the Hessian, which is numerically approximated using the \code{\link[numDeriv]{hessian}} function. This may fail, leading to \code{NA} values for the standard errors and hence test statistics, p-values, and confidence interval bounds. One can set control argument \code{hessianCtrl} to a list of named arguments to be passed on to the \code{method.args} argument of the \code{\link[numDeriv]{hessian}} function (the default is \code{control=list(hessianCtrl=list(r=6))}). Information on the progress of the optimization algorithm can be obtained by setting \code{verbose=TRUE} (this won't work when using parallelization). This option is useful to determine how the model fitting is progressing. One can also set \code{verbose} to an integer (\code{verbose=2} yields even more information and \code{verbose=3} also show the progress visually by drawing the selection function as the optimization proceeds). For selection functions where the \code{prec} argument is relevant, using (a function of) the sample sizes as the measure of precision (i.e., \code{prec="ninv"} or \code{prec="sqrtninv"}) is only possible when information about the sample sizes is actually stored within the object passed to the \code{selmodel} function. That should automatically be the case when the observed effect sizes or outcomes were computed with the \code{\link{escalc}} function or when the observed effect sizes or outcomes were computed within the model fitting function. On the other hand, this will not be the case when \code{\link{rma.uni}} was used together with the \code{yi} and \code{vi} arguments and the \code{yi} and \code{vi} values were \emph{not} computed with \code{\link{escalc}}. In that case, it is still possible to pass information about the sample sizes to the \code{\link{rma.uni}} function (e.g., use \code{rma.uni(yi, vi, ni=ni, data=dat)}, where data frame \code{dat} includes a variable called \code{ni} with the sample sizes). Finally, the automatic rescaling of the chosen precision measure can be switched off by setting \code{scaleprec=FALSE}. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Begg, C. B., & Mazumdar, M. (1994). Operating characteristics of a rank correlation test for publication bias. \emph{Biometrics}, \bold{50}(4), 1088--1101. \verb{https://doi.org/10.2307/2533446} Carter, E. C., \enc{Schönbrodt}{Schoenbrodt}, F. D., Gervais, W. M., & Hilgard, J. (2019). Correcting for bias in psychology: A comparison of meta-analytic methods. \emph{Advances in Methods and Practices in Psychological Science}, \bold{2}(2), 115--144. \verb{https://doi.org/10.1177/2515245919847196} Citkowicz, M., & Vevea, J. L. (2017). A parsimonious weight function for modeling publication bias. \emph{Psychological Methods}, \bold{22}(1), 28--41. \verb{https://doi.org/10.1037/met0000119} Hedges, L. V. (1992). Modeling publication selection effects in meta-analysis. \emph{Statistical Science}, \bold{7}(2), 246--255. \verb{https://doi.org/10.1214/ss/1177011364} Iyengar, S., & Greenhouse, J. B. (1988). Selection models and the file drawer problem. \emph{Statistical Science}, \bold{3}(1), 109--117. \verb{https://doi.org/10.1214/ss/1177013012} McShane, B. B., Bockenholt, U., & Hansen, K. T. (2016). Adjusting for publication bias in meta-analysis: An evaluation of selection methods and some cautionary notes. \emph{Perspectives on Psychological Science}, \bold{11}(5), 730--749. \verb{https://doi.org/10.1177/1745691616662243} Preston, C., Ashby, D., & Smyth, R. (2004). Adjusting for publication bias: Modelling the selection process. \emph{Journal of Evaluation in Clinical Practice}, \bold{10}(2), 313--322. \verb{https://doi.org/10.1111/j.1365-2753.2003.00457.x} Pustejovsky, J. E., & Rodgers, M. A. (2019). Testing for funnel plot asymmetry of standardized mean differences. \emph{Research Synthesis Methods}, \bold{10}(1), 57--71. \verb{https://doi.org/10.1002/jrsm.1332} Vevea, J. L., & Hedges, L. V. (1995). A general linear model for estimating effect size in the presence of publication bias. \emph{Psychometrika}, \bold{60}(3), 419--435. \verb{https://doi.org/10.1007/BF02294384} Vevea, J. L., & Woods, C. M. (2005). Publication bias in research synthesis: Sensitivity analysis using a priori weight functions. \emph{Psychological Methods}, \bold{10}(4), 428--443. \verb{https://doi.org/10.1037/1082-989X.10.4.428} } \seealso{ \code{\link{rma.uni}} } \examples{ ############################################################################ ### example from Citkowicz and Vevea (2017) for beta selection model # copy data into 'dat' and examine data dat <- dat.baskerville2012 dat # fit random-effects model res <- rma(smd, se^2, data=dat, method="ML", digits=3) res # funnel plot funnel(res, ylim=c(0,0.6), xlab="Standardized Mean Difference") # fit beta selection model \dontrun{ sel <- selmodel(res, type="beta") sel # plot selection function plot(sel, ylim=c(0,40))} # fit mixed-effects meta-regression model with 'blind' dummy variable as moderator res <- rma(smd, se^2, data=dat, mods = ~ blind, method="ML", digits=3) res # predicted average effect for studies that do not and that do use blinding predict(res, newmods=c(0,1)) # fit beta selection model \dontrun{ sel <- selmodel(res, type="beta") sel predict(sel, newmods=c(0,1))} ############################################################################ ### example from Preston et al. (2004) # copy data into 'dat' and examine data dat <- dat.hahn2001 dat ### meta-analysis of (log) odds rations using the Mantel-Haenszel method res <- rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, digits=2, slab=study) res # calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, drop00=TRUE) dat # fit fixed-effects model res <- rma(yi, vi, data=dat, method="FE") # predicted odds ratio (with 95\% CI) predict(res, transf=exp, digits=2) # funnel plot funnel(res, atransf=exp, at=log(c(0.01,0.1,1,10,100)), ylim=c(0,2)) # fit half-normal, negative-exponential, logistic, and power selection models \dontrun{ sel1 <- selmodel(res, type="halfnorm", alternative="less") sel2 <- selmodel(res, type="negexp", alternative="less") sel3 <- selmodel(res, type="logistic", alternative="less") sel4 <- selmodel(res, type="power", alternative="less") # plot selection functions plot(sel1) plot(sel2, add=TRUE, col="blue") plot(sel3, add=TRUE, col="red") plot(sel4, add=TRUE, col="green") # show estimates of delta (and corresponding SEs) tab <- data.frame(delta = c(sel1$delta, sel2$delta, sel3$delta, sel4$delta), se = c(sel1$se.delta, sel2$se.delta, sel3$se.delta, sel4$se.delta)) rownames(tab) <- c("Half-normal", "Negative-exponential", "Logistic", "Power") round(tab, 2) # predicted odds ratios (with 95\% CI) predict(res, transf=exp, digits=2) predict(sel1, transf=exp, digits=2) predict(sel2, transf=exp, digits=2) predict(sel3, transf=exp, digits=2) predict(sel4, transf=exp, digits=2)} # fit selection models including standard error as precision measure (note: using # scaleprec=FALSE here since Preston et al. (2004) did not use the rescaling) \dontrun{ sel1 <- selmodel(res, type="halfnorm", prec="sei", alternative="less", scaleprec=FALSE) sel2 <- selmodel(res, type="negexp", prec="sei", alternative="less", scaleprec=FALSE) sel3 <- selmodel(res, type="logistic", prec="sei", alternative="less", scaleprec=FALSE) sel4 <- selmodel(res, type="power", prec="sei", alternative="less", scaleprec=FALSE) # show estimates of delta (and corresponding SEs) tab <- data.frame(delta = c(sel1$delta, sel2$delta, sel3$delta, sel4$delta), se = c(sel1$se.delta, sel2$se.delta, sel3$se.delta, sel4$se.delta)) rownames(tab) <- c("Half-normal", "Negative-exponential", "Logistic", "Power") round(tab, 2) # predicted odds ratio (with 95\% CI) predict(res, transf=exp, digits=2) predict(sel1, transf=exp, digits=2) predict(sel2, transf=exp, digits=2) predict(sel3, transf=exp, digits=2) predict(sel4, transf=exp, digits=2)} ############################################################################ ### meta-analysis on the effect of environmental tobacco smoke on lung cancer risk # copy data into 'dat' and examine data dat <- dat.hackshaw1998 dat # fit random-effects model res <- rma(yi, vi, data=dat, method="ML") res # funnel plot funnel(res, atransf=exp, at=log(c(0.25,0.5,1,2,4,8)), ylim=c(0,0.8)) # step function selection model \dontrun{ sel <- selmodel(res, type="stepfun", alternative="greater", steps=c(.025,.10,.50,1)) sel # plot selection function plot(sel)} ############################################################################ ### validity of student ratings example from Vevea & Woods (2005) # copy data into 'dat' and examine data dat <- dat.cohen1981 dat # calculate r-to-z transformed correlations and corresponding sampling variances dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat[c(1,4,5)]) dat # fit random-effects model res <- rma(yi, vi, data=dat, method="ML", digits=3) res # predicted average correlation (with 95\% CI) predict(res, transf=transf.ztor) # funnel plot funnel(res, ylim=c(0,0.4)) # selection functions from Vevea & Woods (2005) tab <- data.frame( steps = c(0.005, 0.01, 0.05, 0.10, 0.25, 0.35, 0.50, 0.65, 0.75, 0.90, 0.95, 0.99, 0.995, 1), delta.mod.1 = c(1, 0.99, 0.95, 0.80, 0.75, 0.65, 0.60, 0.55, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50), delta.sev.1 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.10, 0.10, 0.10, 0.10), delta.mod.2 = c(1, 0.99, 0.95, 0.90, 0.80, 0.75, 0.60, 0.60, 0.75, 0.80, 0.90, 0.95, 0.99, 1.00), delta.sev.2 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.25, 0.25, 0.50, 0.60, 0.75, 0.90, 0.99, 1.00)) # apply step function model with a priori chosen selection weights \dontrun{ sel <- lapply(tab[-1], function(delta) selmodel(res, type="stepfun", steps=tab$steps, delta=delta)) # estimates (transformed correlation) and tau^2 values sav <- data.frame(estimate = round(c(res$beta, sapply(sel, function(x) x$beta)), 2), varcomp = round(c(res$tau2, sapply(sel, function(x) x$tau2)), 3)) sav} ############################################################################ } \keyword{models} metafor/man/tes.Rd0000644000176200001440000002432114055736403013555 0ustar liggesusers\name{tes} \alias{tes} \alias{tes.rma} \alias{tes.default} \alias{print.tes} \title{Test of Excess Significance} \description{ Function to conduct the test of excess significance. \loadmathjax } \usage{ tes(x, \dots) \method{tes}{rma}(x, H0=0, alternative="two.sided", alpha=.05, test, tes.alternative="greater", progbar=TRUE, tes.alpha=.10, digits, \dots) \method{tes}{default}(x, vi, sei, subset, H0=0, alternative="two.sided", alpha=.05, theta, tau2, test, tes.alternative="greater", progbar=TRUE, tes.alpha=.10, digits, \dots) \method{print}{tes}(x, digits=x$digits, \dots) } \arguments{ \emph{These arguments pertain to data input:} \item{x}{an object of class \code{"rma"} or a vector with the observed effect sizes or outcomes.} \item{vi}{vector with the corresponding sampling variances.} \item{sei}{vector with the corresponding standard errors (note: only one of the two, \code{vi} or \code{sei}, needs to be specified).} \item{subset}{optional (logical or numeric) vector to specify the subset of studies that should be included.} \emph{These arguments pertain to the tests of the observed effect sizes or outcomes:} \item{H0}{numeric value to specify the value of the effect size or outcome under the null hypothesis (the default is 0).} \item{alternative}{character string to specify the sidedness of the hypothesis when testing the observed effect sizes or outcomes. Possible options are \code{"two.sided"} (the default), \code{"greater"}, or \code{"less"}. Can be abbreviated.} \item{alpha}{alpha level for testing the observed effect sizes or outcomes (the default is .05).} \emph{These arguments pertain to the power of the tests:} \item{theta}{numeric value to specify the value of the true effect size or outcome under the alternative hypothesis. If unspecified, it will be estimated based on the data or the value is taken from the \code{"rma"} object.} \item{tau2}{numeric value to specify the amount of heterogeneity in the true effect sizes or outcomes. If unspecified, the true effect sizes or outcomes are assumed to be homogeneous or the value is taken from the \code{"rma"} object.} \emph{These arguments pertain to the test of excess significance:} \item{test}{character string to specify the type of test to use for conducting the test of excess significance. Possible options are \code{"chi2"}, \code{"binom"}, or \code{"exact"}. Can be abbreviated. If unspecified, the function chooses the type of test based on the data.} \item{tes.alternative}{character string to specify the sidedness of the hypothesis for the test of excess significance. Possible options are \code{"greater"} (the default), \code{"two.sided"}, or \code{"less"}. Can be abbreviated.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{TRUE}). Only relevant when conducting an exact test.} \item{tes.alpha}{alpha level for the test of excess significance (the default is .10). Only relevant for finding the \sQuote{limit estimate}.} \emph{Miscellaneous arguments:} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is 4.} \item{\dots}{other arguments.} } \details{ The function carries out the test of excess significance described by Ioannidis and Trikalinos (2007). The test can be used to examine whether the observed number of significant findings is greater than the number of significant findings expected given the power of the tests. An overabundance of significant tests may suggest that the collection of studies is not representative of all studies conducted on a particular topic. One can either pass an object of class \code{"rma"} to the function or a vector with the observed effect sizes or outcomes (via \code{x}) and the corresponding sampling variances via \code{vi} (or the standard errors via \code{sei}). The observed effect sizes or outcomes are tested for significance based on a standard Wald-type test, that is, by comparing \mjdeqn{z_i = \frac{y_i - \mbox{H}_0}{\sqrt{v_i}}}{z_i = (y_i - H_0) / sqrt(v_i)} against the appropriate critical value(s) of a standard normal distribution (e.g., \mjseqn{\pm 1.96} for \code{alternative="two.sided"} and \code{alpha=.05}, which are the defaults). Let \mjseqn{O} denote the observed number of significant tests. Given a particular value for the true effect or outcome denoted by \mjseqn{\theta} (which, if it is unspecified, is determined by computing the inverse-variance weighted average of the observed effect sizes or outcomes or the value is taken from the model object), let \mjseqn{1-\beta_i} denote the power of the \mjseqn{i}th test (where \mjseqn{\beta_i} denotes the Type II error probability). If \mjseqn{\tau^2 > 0}, let \mjseqn{1-\beta_i} denote the expected power (computed based on integrating the power over a normal distribution with mean \mjseqn{\theta} and variance \mjseqn{\tau^2}). Let \mjseqn{E = \sum_{i=1}^k (1-\beta_i)} denote the expected number of significant tests. The test of excess significance then tests if \mjseqn{O} is significantly greater (if \code{tes.alternative="greater"}) than \mjseqn{E}. This can be done using Pearson's chi-squared test (if \code{test="chi2"}), a binomial test (if \code{test="binomial"}), or an exact test (if \code{test="exact"}). The latter is described in Francis (2013). If argument \code{test} is unspecified, the default is to do an exact test if the number of elements in the sum that needs to be computed is less than or equal to \code{10^6} and to do a chi-square test otherwise. One can also iteratively find the value of \mjseqn{\theta} such that the p-value of the test of excess significance is equal to \code{tes.alpha} (which is \code{.10} by default). The resulting value is called the \sQuote{limit estimate} and is denoted \mjeqn{\theta_{lim}}{\theta_lim} by Ioannidis and Trikalinos (2007). Note that the limit estimate is not computable if the p-value is larger than \code{tes.alpha} even if \mjeqn{\theta = \mbox{H}_0}{\theta = H_0}. } \value{ An object of class \code{"tes"}. The object is a list containing the following components: \item{k}{the number of studies included in the analysis.} \item{O}{the observed number of significant tests.} \item{E}{the expected number of significant tests.} \item{OEratio}{the ratio of O over E.} \item{test}{the type of test conducted.} \item{pval}{the p-value of the test of excess significance.} \item{power}{the (estimated) power of the tests.} \item{sig}{logical vector indicating which tests were significant.} \item{theta}{the value of \mjseqn{\theta} used for computing the power of the tests.} \item{theta.lim}{the \sQuote{limit estimate} (i.e., \mjeqn{\theta_{lim}}{\theta_lim}).} \item{\dots}{some additional elements/values.} The results are formatted and printed with the \code{print.tes} function. } \note{ When \code{tes.alternative="greater"} (the default), then the function tests if \mjseqn{O} is significantly greater than \mjseqn{E} and hence this is indeed a test of excess significance. When \code{tes.alternative="two.sided"}, then the function tests if \mjseqn{O} differs significantly from \mjseqn{E} in either direction and hence it would be more apt to describe this as a test of (in)consistency (between \mjseqn{O} and \mjseqn{E}). Finally, one can also set \code{tes.alternative="less"}, in which case the function tests if \mjseqn{O} is significantly lower than \mjseqn{E}, which could be considered a test of excess non-significance. When \code{tes.alternative="two.sided"}, one can actually compute two limit estimates. The function attempts to compute both. The function computes the significance and power of the studies based on Wald-type tests regardless of the effect size or outcome measure used as input. This works as an adequate approximation as long as the within-study sample sizes are not too small. Note that the test is not a test for publication bias but a test whether the set of studies includes an unusual number of significant findings given the power of the studies. The general usefulness of the test and its usefulness under particular circumstances (e.g., when there is substantial heterogeneity in the true effect sizes or outcomes) has been the subject of considerable debate. See Francis (2013) and the commentaries on this article in the same issue of the journal. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Francis, G. (2013). Replication, statistical consistency, and publication bias. \emph{Journal of Mathematical Psychology}, \bold{57}(5), 153--169. \verb{https://doi.org/10.1016/j.jmp.2013.02.003} Ioannidis, J. P. A., & Trikalinos, T. A. (2007). An exploratory test for an excess of significant findings. \emph{Clinical Trials}, \bold{4}(3), 245--253. \verb{https://doi.org/10.1177/1740774507079441} Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{ranktest}}, \code{\link{regtest}}, \code{\link{trimfill}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=x.a, n1i=n.a, ci=x.p, n2i=n.p, data=dat.dorn2007) ### conduct test of excess significance (using test="chi2" to speed things up) tes(dat$yi, dat$vi, test="chi2") ### same as fitting a FE model and then passing the object to the function res <- rma(yi, vi, data=dat, method="FE") tes(res, test="chi2") ### illustrate limit estimate (value of theta where p-value of test is equal to tes.alpha) thetas <- seq(0,1,length=101) pvals <- sapply(thetas, function(theta) tes(dat$yi, dat$vi, test="chi2", theta=theta)$pval) plot(thetas, pvals, type="o", pch=19, ylim=c(0,1)) sav <- tes(dat$yi, dat$vi, test="chi2") abline(h=sav$tes.alpha, lty="dotted") abline(v=sav$theta.lim, lty="dotted") ### examine significance of test as a function of alpha (to examine 'significance chasing') alphas <- seq(.01,.99,length=101) pvals <- sapply(alphas, function(alpha) tes(dat$yi, dat$vi, test="chi2", alpha=alpha)$pval) plot(alphas, pvals, type="o", pch=19, ylim=c(0,1)) abline(v=.05, lty="dotted") abline(h=.10, lty="dotted") } \keyword{htest} metafor/man/plot.rma.Rd0000644000176200001440000000461214055736403014517 0ustar liggesusers\name{plot.rma} \alias{plot.rma} \alias{plot.rma.uni} \alias{plot.rma.mh} \alias{plot.rma.peto} \alias{plot.rma.glmm} \title{Plot Method for 'rma' Objects} \description{ Plot method for objects of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, and \code{"rma.glmm"}. } \usage{ \method{plot}{rma.uni}(x, qqplot=FALSE, \dots) \method{plot}{rma.mh}(x, qqplot=FALSE, \dots) \method{plot}{rma.peto}(x, qqplot=FALSE, \dots) \method{plot}{rma.glmm}(x, qqplot=FALSE, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}, \code{"rma.mh"}, or \code{"rma.peto"}. The method is not yet implemented for objects of class \code{"rma.glmm"}.} \item{qqplot}{logical to specify whether a normal QQ plot should be drawn (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ Four plots are produced. If the model does not contain any moderators, then a forest plot, funnel plot, radial plot, and a plot of the standardized residuals is provided. If \code{qqplot=TRUE}, the last plot is replaced by a normal QQ plot of the standardized residuals. If the model contains moderators, then a forest plot, funnel plot, plot of the standardized residuals against the fitted values, and a plot of the standardized residuals is provided. If \code{qqplot=TRUE}, the last plot is replaced by a normal QQ plot of the standardized residuals. } \note{ If the number of studies is large, the forest plot may become difficult to read due to the small font size. Stretching the plotting device vertically should provide more space. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{forest}}, \code{\link{funnel}}, \code{\link{radial}}, \code{\link{qqnorm.rma.uni}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### plot results plot(res, qqplot=TRUE) ### fit mixed-effects model with absolute latitude and publication year as moderators res <- rma(yi, vi, mods = ~ ablat + year, data=dat) ### plot results plot(res, qqplot=TRUE) } \keyword{hplot} metafor/man/leave1out.Rd0000644000176200001440000000752114055736403014672 0ustar liggesusers\name{leave1out} \alias{leave1out} \alias{leave1out.rma.uni} \alias{leave1out.rma.mh} \alias{leave1out.rma.peto} \title{Leave-One-Out Diagnostics for 'rma' Objects} \description{ The functions repeatedly fit the specified model, leaving out one observation/study at a time. \loadmathjax } \usage{ leave1out(x, \dots) \method{leave1out}{rma.uni}(x, digits, transf, targs, progbar=FALSE, \dots) \method{leave1out}{rma.mh}(x, digits, transf, targs, progbar=FALSE, \dots) \method{leave1out}{rma.peto}(x, digits, transf, targs, progbar=FALSE, \dots) } \arguments{ \item{x}{an object of class \code{"rma.mh"}, \code{"rma.peto"}, or \code{"rma.uni"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{transf}{an optional argument to specify a function that should be used to transform the model coefficients and interval bounds (e.g., \code{transf=exp}; see also \link{transf}). If unspecified, no transformation is used.} \item{targs}{optional arguments needed by the function specified under \code{transf}.} \item{progbar}{logical to specify whether a progress bar should be shown (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ For \code{"rma.uni"} objects, the model specified via \code{x} must be a model without moderators (i.e., either a fixed- or a random-effects model). } \value{ An object of class \code{"list.rma"}. The object is a list containing the following components: \item{estimate}{estimated (average) outcomes.} \item{se}{corresponding standard errors.} \item{zval}{corresponding test statistics.} \item{pval}{corresponding p-values.} \item{ci.lb}{lower bounds of the confidence intervals.} \item{ci.ub}{upper bounds of the confidence intervals.} \item{Q}{test statistics for the test of heterogeneity.} \item{Qp}{corresponding p-values.} \item{tau2}{estimated amount of heterogeneity (only for random-effects models).} \item{I2}{values of \mjseqn{I^2}.} \item{H2}{values of \mjseqn{H^2}.} When the model was fitted with \code{test="t"} or \code{test="knha"}, then \code{zval} is called \code{tval} in the object that is returned by the function. The object is formatted and printed with \code{\link{print.list.rma}}. } \note{ When using the \code{transf} option, the transformation is applied to the estimated coefficients and the corresponding interval bounds. The standard errors are then set equal to \code{NA} and are omitted from the printed output. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods}, \bold{1}(2), 112--125. \verb{https://doi.org/10.1002/jrsm.11} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### random-effects model res <- rma(yi, vi, data=dat) ### leave-one-out analysis leave1out(res) leave1out(res, transf=exp) ### meta-analysis of the (log) risk ratios using the Mantel-Haenszel method res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### leave-one-out analysis leave1out(res) leave1out(res, transf=exp) ### meta-analysis of the (log) odds ratios using Peto's method res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### leave-one-out analysis leave1out(res) leave1out(res, transf=exp) } \keyword{methods} metafor/man/dfround.Rd0000644000176200001440000000244614055736403014427 0ustar liggesusers\name{dfround} \alias{dfround} \title{Round Variables in a Data Frame} \description{ Function to round the numeric variables in a data frame. } \usage{ dfround(x, digits) } \arguments{ \item{x}{a data frame.} \item{digits}{either a single integer or a numeric vector of the same length as there are columns in \code{x}.} } \details{ A simple convenience function to round the numeric variables in a data frame, possibly to different numbers of digits. Hence, \code{digits} can either be a single integer (which will then be used to round all numeric variables to the specified number of digits) or a numeric vector (of the same length as there are columns in \code{x}) to specify the number of digits to which each variable should be rounded. Non-numeric variables are skipped. If \code{digits} is a vector, some arbitrary value (or \code{NA}) can be specified for those variables. } \value{ Returns the data frame with variables rounded as specified. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \examples{ dat <- dat.bcg dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) res <- rma(yi, vi, mods = ~ ablat + year, data=dat) coef(summary(res)) dfround(coef(summary(res)), digits=c(2,3,2,3,2,2)) } \keyword{manip} metafor/man/dat.kearon1998.Rd0000644000176200001440000001165314055736403015347 0ustar liggesusers\name{dat.kearon1998} \docType{data} \alias{dat.kearon1998} \title{Studies on the Accuracy of Venous Ultrasonography for the Diagonosis of Deep Venous Thrombosis} \description{Results from diagnostic accuracy studies examining the accuracy of venous ultrasonography for the diagonosis of deep venous thrombosis.} \usage{ dat.kearon1998 } \format{The data frame contains the following columns: \tabular{lll}{ \bold{id} \tab \code{numeric} \tab study id \cr \bold{author} \tab \code{character} \tab study author(s) \cr \bold{year} \tab \code{numeric} \tab publication year \cr \bold{patients} \tab \code{character} \tab patient group (either symptomatic or asymptomatic patients) \cr \bold{tp} \tab \code{numeric} \tab number of true positives \cr \bold{np} \tab \code{numeric} \tab number of positive patients (cases) \cr \bold{tn} \tab \code{numeric} \tab number of true negatives \cr \bold{nn} \tab \code{numeric} \tab number of negative patients (non-cases) } } \details{ The studies included in the dataset examined the accuracy of venous ultrasonography for the diagnossis of a first deep venous thrombosis in symptomatic and asymptomatic patients. Cases and non-cases were determined based on contrast venography. Venous ultrasonography was then used to make a diagnosis, leading to a given number of true positives and negatives. A subset of this dataset (using only the studies with asymptomatic patients) was used by Deeks et al. (2005) to illustrate methods for detecting publication bias (or small-study effects) in meta-analyses of diagnostic accuracy studies. } \source{ Kearon, C., Julian, J. A., Math, M., Newman, T. E., & Ginsberg, J. S. (1998). Noninvasive diagnosis of deep venous thrombosis. \emph{Annals of Internal Medicine}, \bold{128}(8), 663--677. \verb{https://doi.org/10.7326/0003-4819-128-8-199804150-00011} } \references{ Deeks, J. J., Macaskill, P., & Irwig, L. (2005). The performance of tests of publication bias and other sample size effects in systematic reviews of diagnostic test accuracy was assessed. \emph{Journal of Clinical Epidemiology}, \bold{58}(9), 882--893. \verb{https://doi.org/10.1016/j.jclinepi.2005.01.016} } \examples{ ### copy data into 'dat' dat <- dat.kearon1998 ### calculate diagnostic log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=tp, n1i=np, ci=nn-tn, n2i=nn, data=dat, add=1/2, to="all") dat ### fit random-effects model for the symptomatic patients res <- rma(yi, vi, data=dat, subset=patients=="symptomatic") res ### fit random-effects model for the asymptomatic patients res <- rma(yi, vi, data=dat, subset=patients=="asymptomatic") res ### estimated average diagnostic odds ratio (with 95% CI) predict(res, transf=exp, digits=2) ### regression test for funnel plot asymmetry using SE as predictor reg <- regtest(res, model="lm") reg ### corresponding funnel plot funnel(res, atransf=exp, xlim=c(0,7), at=log(c(1,10,100,1000)), ylim=c(0,1.5), steps=4) ys <- seq(0, 2, length=100) lines(coef(reg$fit)[1] + coef(reg$fit)[2]*ys, ys, lwd=2, lty=3) ### regression test for funnel plot asymmetry using total sample size as predictor reg <- regtest(res, model="lm", predictor="ni") reg ### corresponding funnel plot funnel(res, yaxis="ni", atransf=exp, xlim=c(0,7), at=log(c(1,10,100,1000)), ylim=c(0,300), steps=4) ys <- seq(0, 300, length=100) lines(coef(reg$fit)[1] + coef(reg$fit)[2]*ys, ys, lwd=2, lty=3) ### regression test for funnel plot asymmetry using 1/sqrt(ESS) as predictor (Deeks et al., 2005) dat$invessi <- 1/(4*dat$np) + 1/(4*dat$nn) tmp <- rma(yi, invessi, data=dat, subset=patients=="asymptomatic") reg <- regtest(tmp, model="lm") reg ### corresponding funnel plot funnel(tmp, atransf=exp, xlim=c(0,7), at=log(c(1,10,100,1000)), ylim=c(0,.15), steps=4, refline=coef(res), level=0, ylab="1/root(ess)") ys <- seq(0, .20, length=100) lines(coef(reg$fit)[1] + coef(reg$fit)[2]*ys, ys, lwd=2, lty=3) ### convert data to long format dat <- to.long(measure="OR", ai=tp, n1i=np, ci=tn, n2i=nn, data=dat.kearon1998, subset=patients=="asymptomatic") dat <- dat[9:12] levels(dat$group) <- c("sensitivity", "specificity") dat ### calculate logit-transformed sensitivities dat <- escalc(measure="PLO", xi=out1, mi=out2, data=dat, add=1/2, to="all", include=group=="sensitivity") dat ### calculate logit-transformed specificities dat <- escalc(measure="PLO", xi=out1, mi=out2, data=dat, add=1/2, to="all", include=group=="specificity") dat ### bivariate random-effects model for logit sensitivity and specificity res <- rma.mv(yi, vi, mods = ~ group - 1, random = ~ group | study, struct="UN", data=dat) res ### estimated average sensitivity and specificity based on the model predict(res, newmods = rbind(c(1,0),c(0,1)), transf=transf.ilogit, tau2.levels=c(1,2), digits=2) ### estimated average diagnostic odds ratio based on the model predict(res, newmods = c(1,1), transf=exp, digits=2) } \keyword{datasets} metafor/man/residuals.rma.Rd0000644000176200001440000002412414055736403015534 0ustar liggesusers\name{residuals.rma} \alias{residuals} \alias{rstandard} \alias{rstudent} \alias{residuals.rma} \alias{rstandard.rma.uni} \alias{rstandard.rma.mh} \alias{rstandard.rma.mv} \alias{rstandard.rma.peto} \alias{rstudent.rma.uni} \alias{rstudent.rma.mh} \alias{rstudent.rma.mv} \alias{rstudent.rma.peto} \title{Residual Values based on 'rma' Objects} \description{ The \code{residuals}, \code{rstandard}, and \code{rstudent} functions compute residuals, corresponding standard errors, and standardized residuals for models fitted with the \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, and \code{\link{rma.mv}} functions. \loadmathjax } \usage{ \method{residuals}{rma}(object, type="response", \dots) \method{rstandard}{rma.uni}(model, digits, type="marginal", \dots) \method{rstandard}{rma.mh}(model, digits, \dots) \method{rstandard}{rma.peto}(model, digits, \dots) \method{rstandard}{rma.mv}(model, digits, cluster, \dots) \method{rstudent}{rma.uni}(model, digits, progbar=FALSE, \dots) \method{rstudent}{rma.mh}(model, digits, progbar=FALSE, \dots) \method{rstudent}{rma.peto}(model, digits, progbar=FALSE, \dots) \method{rstudent}{rma.mv}(model, digits, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, \dots) } \arguments{ \item{object}{an object of class \code{"rma"} (for \code{residuals}).} \item{type}{the type of residuals which should be returned. For \code{residuals}, the alternatives are: \code{"response"} (default), \code{"rstandard"}, \code{"rstudent"}, and \code{"pearson"}. For \code{rstandard.rma.uni}, the alternatives are: \code{"marginal"} (default) and \code{"conditional"}. See \sQuote{Details}.} \item{model}{an object of class \code{"rma"} (for \code{residuals}) or an object of class \code{"rma.uni"}, \code{"rma.mh"}, \code{"rma.peto"}, or \code{"rma.mv"} (for \code{rstandard} and \code{rstudent}).} \item{cluster}{optional vector to specify a clustering variable to use for computing cluster-level multivariate standardized residuals (only for \code{"rma.mv"} objects).} \item{reestimate}{logical to specify whether variance/correlation components should be re-estimated after deletion of the \mjseqn{i}th case when computing externally standardized residuals for \code{"rma.mv"} objects (the default is \code{TRUE}).} \item{parallel}{character string to specify whether parallel processing should be used (the default is \code{"no"}). For parallel processing, set to either \code{"snow"} or \code{"multicore"}. See \sQuote{Details}.} \item{ncpus}{integer to specify the number of processes to use in the parallel processing.} \item{cl}{optional cluster to use if \code{parallel="snow"}. If not supplied, a cluster on the local machine is created for the duration of the call.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded. If unspecified, the default is to take the value from the object.} \item{progbar}{logical to specify whether a progress bar should be shown (only for \code{rstudent}) (the default is \code{FALSE}).} \item{\dots}{other arguments.} } \details{ The observed residuals (obtained with \code{residuals}) are simply equal to the \sQuote{observed - fitted} values. These can be obtained with \code{residuals(object)} (using the default \code{type="response"}). Dividing the observed residuals by the model-implied standard errors of the observed effect sizes or outcomes yields Pearson (or semi-standardized) residuals. These can be obtained with \code{residuals(object, type="pearson")}. Dividing the observed residuals by their corresponding standard errors yields (internally) standardized residuals. These can be obtained with \code{rstandard(model)} or \code{residuals(object, type="rstandard")}. With \code{rstudent(model)} (or \code{residuals(object, type="rstudent")}), one can obtain the externally standardized residuals (also called standardized deleted residuals or (externally) studentized residuals). The externally standardized residual for the \mjseqn{i}th case is obtained by deleting the \mjseqn{i}th case from the dataset, fitting the model based on the remaining cases, calculating the predicted value for the \mjseqn{i}th case based on the fitted model, taking the difference between the observed and the predicted value for the \mjseqn{i}th case (which yields the deleted residual), and then standardizing the deleted residual based on its standard error. If a particular case fits the model, its standardized residual follows (asymptotically) a standard normal distribution. A large standardized residual for a case therefore may suggest that the case does not fit the assumed model (i.e., it may be an outlier). For \code{"rma.uni"} objects, \code{rstandard(model, type="conditional")} computes conditional residuals, which are the deviations of the observed effect sizes or outcomes from the best linear unbiased predictions (BLUPs) of the study-specific true effect sizes or outcomes (see \code{\link{blup.rma.uni}}). For \code{"rma.mv"} objects, one can specify a clustering variable (via the \code{cluster} argument). If specified, \code{rstandard(model)} and \code{rstudent(model)} also compute cluster-level multivariate (internally or externally) standardized residuals. If all outcomes within a cluster fit the model, then the multivariate standardized residual for the cluster follows (asymptotically) a chi-square distribution with \mjseqn{k_i} degrees of freedom (where \mjseqn{k_i} denotes the number of outcomes within the cluster). See also \code{\link{influence.rma.uni}} and \code{\link{influence.rma.mv}} for other leave-one-out diagnostics that are useful for detecting influential cases in models fitted with the \code{\link{rma.uni}} and \code{\link{rma.mv}} functions. } \value{ Either a vector with the residuals of the requested type (for \code{residuals}) or an object of class \code{"list.rma"}, which is a list containing the following components: \item{resid}{observed residuals (for \code{rstandard}) or deleted residuals (for \code{rstudent}).} \item{se}{corresponding standard errors.} \item{z}{standardized residuals (internally standardized for \code{rstandard} or externally standardized for \code{rstudent}).} When a clustering variable is specified for \code{"rma.mv"} objects, the returned object is a list with the first element (named \code{obs}) as described above and a second element (named \code{cluster} of class \code{"list.rma"} with: \item{X2}{cluster-level multivariate standardized residuals.} \item{k}{number of observed effect sizes or outcomes within the clusters.} The object is formatted and printed with \code{\link{print.list.rma}}. } \note{ Right now, the externally standardized residuals (obtained with \code{rstudent}) are calculated by refitting the model \mjseqn{k} times (where \mjseqn{k} is the number of cases). Depending on how large \mjseqn{k} is, it may take a few moments to finish the calculations. For complex models fitted with \code{\link{rma.mv}}, this can become computationally expensive. On machines with multiple cores, one can usually speed things up by delegating the model fitting to separate worker processes, that is, by setting \code{parallel="snow"} or \code{parallel="multicore"} and \code{ncpus} to some value larger than 1 (only for objects of class \code{"rma.mv"}). Parallel processing makes use of the \code{\link[parallel]{parallel}} package, using the \code{\link[parallel]{makePSOCKcluster}} and \code{\link[parallel]{parLapply}} functions when \code{parallel="snow"} or using \code{\link[parallel]{mclapply}} when \code{parallel="multicore"} (the latter only works on Unix/Linux-alikes). With \code{parallel::detectCores()}, one can check on the number of available cores on the local machine. Alternatively (or in addition to using parallel processing), one can also set \code{reestimate=FALSE}, in which case any variance/correlation components in the model are not re-estimated after deleting the \mjseqn{i}th case from the dataset. Doing so only yields an approximation to the externally standardized residuals (and the cluster-level multivariate standardized residuals) that ignores the influence of the \mjseqn{i}th case on the variance/correlation components, but is considerably faster (and often yields similar results). It may not be possible to fit the model after deletion of the \mjseqn{i}th case from the dataset. This will result in \code{NA} values for that case when calling \code{rstudent}. Also, for \code{"rma.mv"} objects with a clustering variable specified, it may not be possible to compute the cluster-level multivariate standardized residual for a particular cluster (if the var-cov matrix of the residuals within a cluster is not of full rank). This will result in \code{NA} for that cluster. For objects of class \code{"rma.mh"} and \code{"rma.peto"}, \code{rstandard} actually computes Pearson (or semi-standardized) residuals. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis}. San Diego, CA: Academic Press. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods}, \bold{1}(2), 112--125. \verb{https://doi.org/10.1002/jrsm.11} } \seealso{ \code{\link{rma.uni}}, \code{\link{rma.mh}}, \code{\link{rma.peto}}, \code{\link{rma.glmm}}, \code{\link{rma.mv}}, \code{\link{influence.rma.uni}}, \code{\link{influence.rma.mv}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects model res <- rma(yi, vi, data=dat) ### compute the studentized residuals rstudent(res) ### fit mixed-effects model with absolute latitude as moderator res <- rma(yi, vi, mods = ~ ablat, data=dat) ### compute the studentized residuals rstudent(res) } \keyword{models} metafor/man/trimfill.Rd0000644000176200001440000001531114055736403014603 0ustar liggesusers\name{trimfill} \alias{trimfill} \alias{trimfill.rma.uni} \title{Trim and Fill Analysis for 'rma.uni' Objects} \description{ Carry out a trim and fill analysis for objects of class \code{"rma.uni"}. \loadmathjax } \usage{ trimfill(x, \dots) \method{trimfill}{rma.uni}(x, side, estimator="L0", maxiter=100, verbose=FALSE, ilim, \dots) } \arguments{ \item{x}{an object of class \code{"rma.uni"}.} \item{side}{optional character string (either \code{"left"} or \code{"right"}) to specify on which side of the funnel plot the missing studies should be imputed. If left unspecified, the side is chosen within the function depending on the results of Egger's regression test (see \code{\link{regtest}} for details on this test).} \item{estimator}{character string (either \code{"L0"}, \code{"R0"}, or \code{"Q0"}) to specify the estimator to use for estimating the number of missing studies (the default is \code{"L0"}).} \item{maxiter}{integer to specify the maximum number of iterations to use for the trim and fill method (the default is \code{100}).} \item{verbose}{logical to specify whether output should be generated on the progress of the iterative algorithm used as part of the trim and fill method (the default is \code{FALSE}).} \item{ilim}{limits for the imputed values. If unspecified, no limits are used.} \item{\dots}{other arguments.} } \details{ The trim and fill method is a nonparametric (rank-based) data augmentation technique proposed by Duval and Tweedie (2000a, 2000b; see also Duval, 2005). The method can be used to estimate the number of studies missing from a meta-analysis due to suppression of the most extreme results on one side of the funnel plot. The method then augments the observed data so that the funnel plot is more symmetric and recomputes the summary estimate based on the complete data. The trim and fill method can only be used in the context of a fixed- or random-effects model (i.e., in models without moderators). The method should not be regarded as a way of yielding a more \sQuote{valid} estimate of the overall effect or outcome, but as a way of examining the sensitivity of the results to one particular selection mechanism (i.e., one particular form of publication bias). } \value{ An object of class \code{c("rma.uni.trimfill","rma.uni","rma")}. The object is a list containing the same components as objects created by \code{\link{rma.uni}}, except that the data are augmented by the trim and fill method. The following components are also added: \item{k0}{estimated number of missing studies.} \item{side}{either \code{"left"} or \code{"right"}, indicating on which side of the funnel plot the missing studies (if any) were imputed.} \item{se.k0}{standard error of k0.} \item{p.k0}{p-value for the test of \mjeqn{\mbox{H}_0}{H_0}: no missing studies on the chosen side (only when \code{estimator="R0"}; \code{NA} otherwise).} \item{yi}{the observed effect sizes or outcomes plus the augmented values (if there are any).} \item{vi}{the corresponding sampling variances} \item{fill}{a logical vector indicating which of the values in \code{yi} are the observed (\code{FALSE}) and the augmented (\code{TRUE}) data.} The results of the fitted model after the data augmentation are printed with the \code{\link{print.rma.uni}} function. Calling \code{\link{funnel.rma}} on the object provides a funnel plot of the observed and augmented data. } \note{ Three different estimators for the number of missing studies were proposed by Duval and Tweedie (2000a, 2000b). Based on these articles and Duval (2005), \code{"R0"} and \code{"L0"} are recommended. An advantage of estimator \code{"R0"} is that it provides a test of the null hypothesis that the number of missing studies (on the chosen side) is zero. If the outcome measure used for the analysis is bounded (e.g., correlations are bounded between -1 and +1, proportions are bounded between 0 and 1), one can use the \code{ilim} argument to enforce those limits when imputing values (imputed values cannot exceed those bounds then). The model used during the trim and fill procedure is the same as used by the original model object. Hence, if a fixed-effects model is passed to the function, then a fixed-effects model is also used during the trim and fill procedure and the results provided are also based on a fixed-effects model. This would be a \sQuote{fixed-fixed} approach. Similarly, if a random-effects model is passed to the function, then the same model is used as part of the trim and fill procedure and for the final analysis. This would be a \sQuote{random-random} approach. However, one can also easily fit a different model for the final analysis than was used for the trim and fill procedure. See \sQuote{Examples} for an illustration of a \sQuote{fixed-random} approach. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Duval, S. J., & Tweedie, R. L. (2000a). Trim and fill: A simple funnel-plot-based method of testing and adjusting for publication bias in meta-analysis. \emph{Biometrics}, \bold{56}(2), 455--463. \verb{https://doi.org/10.1111/j.0006-341x.2000.00455.x} Duval, S. J., & Tweedie, R. L. (2000b). A nonparametric "trim and fill" method of accounting for publication bias in meta-analysis. \emph{Journal of the American Statistical Association}, \bold{95}(449), 89--98. \verb{https://doi.org/10.1080/01621459.2000.10473905} Duval, S. J. (2005). The trim and fill method. In H. R. Rothstein, A. J. Sutton, & M. Borenstein (Eds.) \emph{Publication bias in meta-analysis: Prevention, assessment, and adjustments} (pp. 127--144). Chichester, England: Wiley. Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{funnel.rma}} } \examples{ ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### meta-analysis of the log risk ratios using a fixed-effects model res <- rma(yi, vi, data=dat, method="FE") res.tf <- trimfill(res) res.tf funnel(res.tf, legend=TRUE, cex=1.2) ### estimator "R0" also provides test res.tf <- trimfill(res, estimator="R0") res.tf ### meta-analysis of the log risk ratios using a random-effects model res <- rma(yi, vi, data=dat) res.tf <- trimfill(res) res.tf funnel(res.tf, legend=TRUE, cex=1.2) ### the examples above are fixed-fixed and random-random approaches ### illustration of a fixed-random approach res <- rma(yi, vi, data=dat, method="FE") res.tf <- trimfill(res) filled <- data.frame(yi = res.tf$yi, vi = res.tf$vi, fill = res.tf$fill) filled rma(yi, vi, data=filled) } \keyword{models} metafor/man/print.ranktest.rma.Rd0000644000176200001440000000213314055736403016523 0ustar liggesusers\name{print.ranktest} \alias{print.ranktest} \title{Print Method for 'ranktest' Objects} \description{ Print method for objects of class \code{"ranktest"}. } \usage{ \method{print}{ranktest}(x, digits=x$digits, \dots) } \arguments{ \item{x}{an object of class \code{"ranktest"}.} \item{digits}{integer to specify the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).} \item{\dots}{other arguments.} } \details{ The output includes: \itemize{ \item the estimated value of Kendall's tau rank correlation coefficient \item the corresponding p-value for the test that the true tau is equal to zero } } \value{ The function does not return an object. } \author{ Wolfgang Viechtbauer \email{wvb@metafor-project.org} \url{https://www.metafor-project.org} } \references{ Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \verb{https://doi.org/10.18637/jss.v036.i03} } \seealso{ \code{\link{ranktest}} } \keyword{print} metafor/man/dat.cannon2006.Rd0000644000176200001440000000726114055736403015321 0ustar liggesusers\name{dat.cannon2006} \docType{data} \alias{dat.cannon2006} \title{Studies on the Effectiveness of Intensive Versus Moderate Statin Therapy for Preventing Coronary Death or Myocardial Infarction} \description{Results from 4 trials examining the effectiveness of intensive (high dose) versus moderate (standard dose) statin therapy for preventing coronary death or myocardial infarction.} \usage{dat.cannon2006} \format{The data frame contains the following columns: \tabular{lll}{ \bold{trial} \tab \code{character} \tab trial name \cr \bold{pop} \tab \code{character} \tab study population (post-ACS: post acute coronary syndrome; stable CAD: stable coronary artery disease) \cr \bold{nt} \tab \code{numeric} \tab number of patients in the high dose group \cr \bold{nc} \tab \code{numeric} \tab number of patients in the standard dose group \cr \bold{ep1t} \tab \code{numeric} \tab number of events in the high dose group for end point 1: coronary death or non-fatal myocardial infarction \cr \bold{ep1c} \tab \code{numeric} \tab number of events in the standard dose group for end point 1: coronary death or non-fatal myocardial infarction \cr \bold{ep2t} \tab \code{numeric} \tab number of events in the high dose group for end point 2: coronary death or any cardiovascular event (MI, stroke, hospitalization for unstable angina, or revascularization) \cr \bold{ep2c} \tab \code{numeric} \tab number of events in the standard dose group for end point 2: coronary death or any cardiovascular event (MI, stroke, hospitalization for unstable angina, or revascularization) \cr \bold{ep3t} \tab \code{numeric} \tab number of events in the high dose group for end point 3: cardiovascular death \cr \bold{ep3c} \tab \code{numeric} \tab number of events in the standard dose group for end point 3: cardiovascular death \cr \bold{ep4t} \tab \code{numeric} \tab number of events in the high dose group for end point 4: non-cardiovascular death \cr \bold{ep4c} \tab \code{numeric} \tab number of events in the standard dose group for end point 4: non-cardiovascular death \cr \bold{ep5t} \tab \code{numeric} \tab number of events in the high dose group for end point 5: deaths (all-cause mortality) \cr \bold{ep5c} \tab \code{numeric} \tab number of events in the standard dose group for end point 5: deaths (all-cause mortality) \cr \bold{ep6t} \tab \code{numeric} \tab number of events in the high dose group for end point 6: stroke \cr \bold{ep6c} \tab \code{numeric} \tab number of events in the standard dose group for end point 6: stroke } } \details{ The data were obtained from Figures 2, 3, 4, and 5 in Cannon et al. (2006). The authors used the Mantel-Haenszel method for combining the results from the 4 trials. This approach is implemented in the \code{\link{rma.mh}} function. } \source{ Cannon, C. P., Steinberg, B. A., Murphy, S. A., Mega, J. L., & Braunwald, E. (2006). Meta-analysis of cardiovascular outcomes trials comparing intensive versus moderate statin therapy. \emph{Journal of the American College of Cardiology}, \bold{48}(3), 438--445. \verb{https://doi.org/10.1016/j.jacc.2006.04.070} } \examples{ ### copy data into 'dat' and examine data dat <- dat.cannon2006 dat ### meta-analysis of log odds ratios using the MH method for endpoint 1 res <- rma.mh(measure="OR", ai=ep1t, n1i=nt, ci=ep1c, n2i=nc, data=dat, slab=trial) print(res, digits=2) ### forest plot forest(res, xlim=c(-.8,.8), atransf=exp, at=log(c(2/3, 1, 3/2)), header=TRUE, top=2, cex=1.2, xlab="Odds Ratio") mtext("(high dose better)", side=1, line=par("mgp")[1]-0.5, at=log(2/3), cex=1.2, font=3) mtext("(standard dose better)", side=1, line=par("mgp")[1]-0.5, at=log(3/2), cex=1.2, font=3) } \keyword{datasets} metafor/DESCRIPTION0000644000176200001440000000453714060150152013420 0ustar liggesusersPackage: metafor Version: 3.0-2 Date: 2021-06-09 Title: Meta-Analysis Package for R Authors@R: person(given = "Wolfgang", family = "Viechtbauer", role = c("aut","cre"), email = "wvb@metafor-project.org", comment = c(ORCID = "0000-0003-3463-4063")) Depends: R (>= 3.5.0), methods, Matrix Imports: stats, utils, graphics, grDevices, nlme, mathjaxr, pbapply Suggests: lme4, numDeriv, minqa, nloptr, dfoptim, ucminf, optimParallel, CompQuadForm, mvtnorm, BiasedUrn, Epi, survival, GLMMadaptive, multcomp, gsl, sp, ape, boot, crayon, R.rsp, testthat, rmarkdown Description: A comprehensive collection of functions for conducting meta-analyses in R. The package includes functions to calculate various effect sizes or outcome measures, fit fixed-, random-, and mixed-effects models to such data, carry out moderator and meta-regression analyses, and create various types of meta-analytical plots (e.g., forest, funnel, radial, L'Abbe, Baujat, bubble, and GOSH plots). For meta-analyses of binomial and person-time data, the package also provides functions that implement specialized methods, including the Mantel-Haenszel method, Peto's method, and a variety of suitable generalized linear (mixed-effects) models (i.e., mixed-effects logistic and Poisson regression models). Finally, the package provides functionality for fitting meta-analytic multivariate/multilevel models that account for non-independent sampling errors and/or true effects (e.g., due to the inclusion of multiple treatment studies, multiple endpoints, or other forms of clustering). Network meta-analyses and meta-analyses accounting for known correlation structures (e.g., due to phylogenetic relatedness) can also be conducted. An introduction to the package can be found in Viechtbauer (2010) . License: GPL (>= 2) ByteCompile: TRUE LazyData: TRUE Encoding: UTF-8 RdMacros: mathjaxr VignetteBuilder: R.rsp URL: https://www.metafor-project.org https://github.com/wviechtb/metafor https://wviechtb.github.io/metafor/ https://www.wvbauer.com BugReports: https://github.com/wviechtb/metafor/issues NeedsCompilation: no Packaged: 2021-06-09 12:35:22 UTC; wviechtb Author: Wolfgang Viechtbauer [aut, cre] () Maintainer: Wolfgang Viechtbauer Repository: CRAN Date/Publication: 2021-06-09 14:30:02 UTC metafor/build/0000755000176200001440000000000014060132612013001 5ustar liggesusersmetafor/build/vignette.rds0000644000176200001440000000040514060132612015337 0ustar liggesusersuj0 %֎ /{Y\Ơv%v]Bo{fJcwIio}bHi0su9XX{hs]n:sTfVIN0ek~N?P*u+,FeH~h.7wwxԱ' +w0u0Je?7J{)u4ظ1{ӧ86z> stream xڍVn8+89#R|`N }viGk2`ZɖbspBE&U!cȑs|H % EbgI%$I3^B@(8* -%+IiFHE$iihC<d hEdsE$VH}h*s͔G%P]S)r8dUKFd%zc0_@֑@a>-8 H{E!X2:*M*ܘ c/85zDR,XE 0ZeaJ4lrf6%ĥf\G H82S3-2yacXhCBB$ERXnF`E %%_,X%V\H`,/, ecC栬 #PV%b`L9Â}wRTa4ܮ{2쓆GZJI1ki %&}J_? I_zjv7s؉-qT9q>c8W Ns۫_mp6}v;0W;6~o𯟛ۈ?ϑn+rCN[hC~,y{~PzhyoES%a69=¸߈C=y>xι)~&x]W  6s9m~Ď/S:%U]$KQB#xOX^O̧\&4lb{Ԕ/3bcL{S졅7ՙ%oo8Â{G-D S,CYjɜ$bگ3[{`#bA/Z`7}խof*6k ſV]%yB̰o*~}L6 th3;9` 3'lu\,^Iw4l{n3c{?f>)[\gR_qLXuǓ=a۬ c n.6qM0`L>ͭ/'Z5~o_ω~ZdH endstream endobj 203 0 obj << /Type /ObjStm /N 100 /First 861 /Length 1032 /Filter /FlateDecode >> stream xڍTˎ8+xL.3&)@Sd %C|}Hci,!b]4kVZΔuX2ŤiQj9"`2EŌ=bvF7gʐ^he2YI*˲:e-&(5Q9Öf(f*Ӳ"=BN#M,jx)oe EUnǙQߌFWQt)f{R1ܣ^-EyIGX"1#v@$HaI3Ÿ#%*GD6bYDd:9"K$d Fd Aw"A#rH{ & "gd1QA(8yFd܅XQAGA9hk/.()܅QtM F1 2 GXd`q2mZXe&4j1~pLf- d#^Kg \c*̟ jcn0݄3W+qdf|L]D Gq3@dg Gla7zY˿wOe^_b_krOU}F\:@Xk;ܠ Ms? Ȗܠ-MuDV_g\]IKnǼ*R);,9薲orLsp]'9M&NaC_7:–w7E+^+KYM| xi=N膲)_>szuMS3gO`~(Ҫ \׵lUyuҲwHs,yqoҪ \ѝ}M!/#{<]];r*K'v{m2/g7,M1\77n݅\dV4m5qjWU*'ڰ{|ȓǬ4թSuHC-בFWc9|OfجRN y=f^I/#[a.G6{lEUP[?.K0TT 0TKn t_^9_ endstream endobj 598 0 obj << /Length 1649 /Filter /FlateDecode >> stream xڕXs6 ~_w1MId%klhȤFQIݿ~DҖɋ$ƒN 狓4EwQă0x v!IȊ~n"w#? '<%ak1!F b4N)*n'Ϳ}Ovj4 J"?R^ g3}ؠ hʒ ~;K[Fg1NzM-E72N `|I-Y8u'ds1?H n͌鴠<+L=zSghC:@!3ڬEV$ hhM!$O TDYb^IRY@5YJ3oNXȷj`V+Z7 L-jJV8P`v7lcNa᝹_4\$in.6.+!ͣk!<3w )iErsQ0sUV򱁭Uxu}sC2pwֹʍV֗H *#RXK!L% j[ jM:!!?0 ~0e*_) $]S^y¾&xw&+^?-2=< _#cR j2n]/FWÄOP'm׻xWwD iZ6LTf;Đ-Rj !@#1GllCIx&6L w؈fU6FͼHuw%Q֖>ISA\:Tҗ]s! Șxš6E+tq]$c. >[.mҝӀ{cˑhEW}Kgkqa8'^A1<,p9-$/  )4Gb @M1)rr)g]'bW.u;=@I}qF-UBǧxCjiY1E ZRß3NQhL]`Lyi Qtz45sb%P[&TW5ŀK8,Y7\l')~ݡwT&6pdV`<= aR Y:OV%+zX»Hf[3|vvPX *]2JA¸XwKB0?w 9".X'UOz a/UU]ZĜRR8HJ@Y:Q8BNr8X%hJa\VA33në9ũXQ74]wKRѽBcLȐ@Bcf.W'7읇7xr_b8h5r=z}[hk'xILx&իIʯ82-[Eu;'C&|4/̖V{M~naT&CM|&UfK˰ S"gӗx֯Eng7$z"sZدRuC4Tb9}ZcE3gzUcMS׭-݉1 J _ͱxr o5խSj<=ֹof&V](&N42꘰J&oXW-H!jNxUh%qz/A'7!KFE],Nd`PD endstream endobj 404 0 obj << /Type /ObjStm /N 100 /First 861 /Length 1338 /Filter /FlateDecode >> stream xWMoFW19t I6@ Nz2|%Zb" IR_Eu.5`ٷ33303R2)ᕊIe0Z& dGJ0E4 >1jLk-htUіK6bzzŚf):3k9zb[Fh'XF4 N'(,~dHدHdž"Z:L(ASdb@2c&LDc8"20Z ‚V آ"R+XԞ&%)JdK5Y* V` ՉEE6,*%m }ڥCxZIg$&P= Q. ܢ^m IDbLA$%ߢh2`UC2Bjd !2UĢr2Z*?"G-v2F\%H!6AOQQ,jD 9RiHB0qB-b *%R؜E JQXZ`HG ZT9S갰`9തF~d/?Wn7ewU|S|kM4X #Udzgc=]#y61uͱjLcsGs8=؛wYf]Ƽ7M.XMV}鱣G,3_]!Y޷5Z~)vXl1r\g4ٲNyUx˜č*2_b57 2G`NbGjMY<ؓSXM{YsjS'FKlAG4rw%i-Su?2+mQ6Y7.,# L7uuzIl==~*`2'F (؜}.s_Kў@ _x0'i^^ekCz{RYN{60DI+31ߊK_!}芺J?~W۶?.xݬR)8:dxv5DP\;@^ǃ4~[GTpeե"t`}K;swę|?d;4(A?%{ endstream endobj 656 0 obj << /Length 1020 /Filter /FlateDecode >> stream xMϛ8 6WVc[†~TJ]yN;D 8$#}߽{δd2#JHsfh_Eb|`93N\6}3cwm!I qՕźVv~;)ho L+Le廬|Ja`.&{{G9L2|Ŏ #qz5_Y B\k8<ڍ+TۏvѧrNbj|:C7UuگIZ0S؀EF"*{L(Ԏ`B"O/QW~"Ä+R7VUd 6][p޽{bc{3`H]=<9<垢,L_8Z(L7Xo?\~l{hM5 e%rxLbz̥yWbhfٴ1n_m>   endstream endobj 757 0 obj << /Length 1021 /Filter /FlateDecode >> stream xMϛ8ϧ~=Vʭ݃-؈j?}FV!$%s A2!0@cB/n_TDZ2l %$B&%:Vɷ헰_-ԹT"yz?+ȦnՇ2Q^]uRs!K^$*D$+%|^T1;dcMZY)Mf!O{Z{vOׯLqś$(iyܙcJ<;uzsѸPC0M3lWPqu;ۧTmgL>/L}g.xmœSטYH.>Y6Df2d~%}5"K$O(փKz.5}P/d)BkS OpQgvuԽ"5Mm@w5Xɴi|^i!mAEÙOa4ӌ#p3p%F?l=Y IX-}WlcIe)i0񻾶.4н"tK(~n1jT]Q꣋rBB]͍WF.\EFRF6pZiƕ€E nucSZ.)M99XMOe=tTyMY1񇽈qB6kmqg8A)r@aէ a0qwP0slVyߚ1Fw4(8|o5"O\29*5H]XWGE:%_gsL 3!˜Ĥ{ $x*ar6q3w)ayOڅʙOmyqa^ؓrڌ0v~X`/6׸oB6iM5>ˇGZ3ɔcZ֎U!P[ovr endstream endobj 607 0 obj << /Type /ObjStm /N 100 /First 915 /Length 2506 /Filter /FlateDecode >> stream xڵZ]|_GblA,H"A:oC;AJ3gDD fflְMZ9Sƕg$,Hq%O*Qn^DƠ06Z|Km9uxs.8[8L}1*"*ayr{{w:,}UB_קyJl^ZC_IAߞ`r?'WW'קo_;nY~96^7w/ӛ}Xn?5O+CCX׿}Qd4ۏ?sm)s\%nO*,aݞP#ZbHz~@GL_X?}_Obt<,tʭ{(;7o5=W?}1rz=Fmذlp}xٖLԯܯүگ֯BQw{qw{qw{I'ݞt{I'ݞt{Iݞv{iݞv{iݞv{Yg~]=496{SYʑe5)3 k.Qyə/w<=hF"ģ{#;}1E.Tcc-Xr`1Pv F 01X-qr' VObaxOXxPp60cB:`p#A` 0Lxly#e\$ r|$ǀ_L'vVb$)AK$ A]cb( Ҥ4k ;l@ݿtPiԚ)&dI?KeԪdI@IXҚ%U@dIۿ~Qض :? "IԴfj%cAw 5S+RVABƔ4)ퟩ=t@(ɠ2W2:yI$)HXI:VyuM`'fRRH1 Qvbr$dr}5$ \ソ$/(V5Ir6pra`[v>`?,X\5+Z$I߿bN_0pŦz&}łSM?#c%dɲ&Z6",Y *]35U%aQ rB9fj* dɲ&Qʚ%)Pra He-g + SW m>+|넸`n>+8:QDb_?QX< K~RH>+m5&Z J_o#Qr&>\K>"yWHU$*wɻ]E"yWHU$*wɻ]E"yWHU$*wɻ]E"yWHU$*wɻ]E"yWHU$*wɻ]E"yWHAA57ʃeNP즔 dXJh͑ Fʨ%HHijYMM!FVcC :Mj4LiGaգl*LddT5CÊTƔ(.n%6H ʶ\`ggB&ڻlQL~ l#(x i vc;A$F'f֐oWlBTńeD fWŻ QLm#ʫs#~¢9pcL&qGpI-3\b n&e >` :M[R$5NXM 'J7 h2> stream xMs0*$$;dnjoid)?`pf:b^c{d<_Zw|Lz۝QJx,=EDrms֏o{pʆK%Ȅt͡ dy;y_B%ިK>jrS*l* Bc J?!z8dIWJώ76,O%Ԅ`lwP ~/ J_Npϲwy~­eV8zQI_BAoC^Pg^Ł g')pM>}DؚfSOdVz ;M 1`O'HY^ 8c*g /5s Gܦ2Cn껶<ƍlj)\҉K/¾ gu&ˢ*K895cN_ y T46N=D 4ꋰ'2U\Ȱ]ߟ$R?&إ))WԀҚNmiaL IaRf\cv. 2W @UbGӹ zmƭ[% ;CJgl9D<9UѶeWAUhlg ,5mi@1R2*%mGQ0g]_ *;"8ACo2_Z_EPx+8V< )C`?LJAA[T}v7W{csԙv-R6~=gZEޕpB TWDKMvsnt3K㓪 { ) өUȃhi|oj;D>~nf endstream endobj 920 0 obj << /Length 2597 /Filter /FlateDecode >> stream xYKsܸW͜*!Af'~d8^U.VCbF9,_ntCJzS F$6Ǎؼzysu:"*$6eQ&M̠mMDuYכ_lNd&r%xM(<*n& HbI0U*#ďgW\~Eo:Rhc(dV<`fM'Q'l {yC(f3x;QwҰPBrUg B<$@"{W,£TK|usUffb*ݔM?@Y;zNBrG{sD DEMgQ^,}35j1?<چ2m"xEDzEKXchk|p+*%UH7*qG_Xp%HI_L_v<ضy G9Mod.tM8OxBńV'OٞΝ5MoQ.M?Tצtq@8* r*'a9R7jg;X $R* "MiBPQ>VEq!=j*;nhxτ7QֳX Bw{7N1rv@L<ݏiH7P3Ewrj,2.pp2΃OB$٬ VF^S[K,p,xyO$۔X9~6ϫMk6XHſpNm?~ S8=}+[ 9 ncLLǻdG;G0G\ x@۹Cݷڛ݃Sv"xbRf|H ymu ԓgRR=YPM(//y$$$"р"RdB#:="Xv׎D偷pA+Bs:q|IiÏ:32ַ}(^5ٞ/Y{55y f -Gd,%Abe,^+bH# x ~5tW16;FBܶ= KeqZI R3lܩEE*qLyF[F0pM3:9؎gr8D-8- vnͫ,:uf!ف;B,J/;3\s"QU,[.-8HOtWcijEqvYY3\5 @SVw/YigӝA3J` 7yQjp(c]ǍL̸gQ-k,/ @A:S#+.X^KuK8ʬ0p]UPvFVsM15w:,B9P];m/'66B ~!K-ً$Y_z`~?̛o[jv]jMYIHCqAꉔwF-ߟl=uw];Iߓ n[aqtvy׎D%#Ɣ.Ŏ~{EL ,1o oU/'u7TNr u^vj"rBrR׿W!"g}ݨuJ(!V]*ɢ_v^1Nxk_^|OEȖ,<]gMRgv̄$5 *&J \<;t]{家÷ba~p(ҏ()>wXQ@m\W)*p`8}!SRû(?fm.^V ,.լO3pQ \݇,Єq`FM;Z޺E=x>(.;ݨdv(?d#NV3B$%j~AI"O2f"[T5'xa\MV,S<5 iQE@i4/@-V.'#Zȩ,ĝY_fAo!n4_?&9IF[ pfOơ Q㍎6rd&alf|Gr;z~TG̹tXQR;c.J;NoxL3*=HȈ9nus4߻d#ۺg,,2DK4mU mbg#W=efʁ} _8ZMn& endstream endobj 936 0 obj << /Length 3466 /Filter /FlateDecode >> stream xَܸ_oA2~  `55RGקUD {gK7"YU?z>wE\d*}8N].ebW]ß߼O P'i,DXj,O]D?M^ > Vn^0i^"xjk꼟Xw_]]jOkY\l& vq\BI}5ӕJi|_es?UcU4ޖ#=o1ޟC4ԝHs_Ňv P uP1݉<zHpE7U[vqRDMVeOD*/W2:!}00! _3[Fq2OcL 1t◻'w3zeSQ1*.\a@^q;ӧhyXПe*&˻ yJ?p,BF5u{Ct &~" ]8ЇHU̡k'q Q4„Ą ETHZQ҄x`zRS|)FZA_hp@xU|&,*yv,bw4mh ^ђ&Q CnOMEhtBma<#k}n,)8v".x=X K;mV>s(p^G&GKfczWf KF](nYe|;3 q}^pC+$6p(y qƀiUUߠ&[ۆs2PpAgfy@K VQ.Xn,Yк`=a9LFHdE5N*K|m"j*C8 w5@K\6~!'UU .^I&ӐWލ2`SBc+Q0|SJe08@^&)='(<Ԣ-67.0K!2~y&xɼ{y'hqF3~3-~:8zxl'YfoZ^,7OV&S*ÚTAqs\'آ2MXipի%-aHlF|pd?&@$ޏ:,4kve,Z?&e1Xɐ3Oh{wTSk-›3c2`cբ}R;-bυ np3BFs}]qn-\ɀSA~˷c_D}~"cm-{(n$ .4(2BKؚKB|8)|3Z)̃IjW6y^ endstream endobj 949 0 obj << /Length 3838 /Filter /FlateDecode >> stream x˒_JK JURqTO%8Uˑ0#)O? 0;3NFo@~7o}ժL\櫛HDe"Uٯ>١kLsY,<)r iQ oRj,`b.!\$i=A<) eB'EV[deP<&Q*[@ߥ*'$Ж&(~T1,&1zBf+75b\ ,w6]EE߳j3sEg !B{H]hwTŜ(?UbolyovU5_vo=wFzo{nm?ԧjpڻD}a# /9iy=dFEdoI3K FJ-Pjsk8f8>nrb2[5)̺jm^ł8 #tHi1zf_uTG^V7wۨbm]Jy.Wݶ źB;Z:F8fA8pwpמpY52rx` @m?>~>u=&b,(ROfw(Qzo+c_$[0pNUߏ'B?[? t>ZE v"I\}cWl} - `R而y~7Q0OeiLL2 ۋQ 0}I!& ?$]in]"6Hl6/ VW`YIY*W1TɎ A_j4 @Q&"(ujF=# yȣOP*L6R!!81aR:{>V;5GnI&g# #ávseQN?W& _o¤Ls'˹5hJط Ri"ubED=+63oMTǾuC\tk:C%P"#K-;Qaܻz\8w<8Es0E)G`xXlz g;?^ 1!y?&_D$PIg`T؂vTՍۓik)GNoPG;HbibvF{cC&눣*w Ӆi疁S6*ngHNqWB,d6<(`x_;?GI3`wEt5:<aP2!Y3\v а6lu`By3>i,;yԴCPDqI)`ܝu0_^@z;ƶԭ2䐗a^!Qq %|B|ZtYsΩ4؁ӉO16 8}M2dd"۪ SCNp߄&Fb6I飙WaNuPIϣD Frsfv_{H0FDiQ< 9ڧQɡ8bN=:Bv;tLY:.%8?R\N:S\2){(ϴRN+tsʿB7 qC;߃;dnc\oLK)T%ִ(@6sM: G0Xahhp>FF@)pgCdROZH4qhbÍ/`~ 'dOqd LaP:M2jDN;6ɼ}Џ#:!.B8 .+M7<0Ιj* uͧ)=|Q"ŧT^ SӐPee*vvRGv 9!+f>˅W=,凌x`ٝ۵/#&Z"s,{eT>Lj,zbIQd/R_~'ᓨg Cge~K'a(FpiiLְng'CJcB*fh\8\QuI>Z/^#yLOGs`z`'%#u%SdH "Ô(+G Z LpJ_7K`sL K'bS 9I*gn3n1e)^/^!H3t 4>ױ{2_.la+rG {#r,o>"ɩ$;E`I_c V5.fI ~kE>lndžu.pnNx>a4!hMƥ0儺<3|XU:zX]2RY!*v>W)XK?:K\2MeFԹ99\a,ovm/nvMQ ag>Y|] R,P0B:#U4c(;{f[ BxBf%RhdxHc Z>&m2 nqQk2hu1{:~'2U?.7o 3 endstream endobj 962 0 obj << /Length 2906 /Filter /FlateDecode >> stream xڵYIϯfW$ǩrJRe$dr_Ԝw* {i}ǧ7bWU仧.P0W3(wZ4 (6DҙI!E7| >v}Rb&OgO(;vgzC.(,w#'ZqU'0 p`X敿Meq'M~5-N&x0e,`xUɌCH$<6(FW%d/ٛ#8zrL ikH{`e|gӣ81%Ngin,_WX(=iYa4 ;dUiQ~0"ėy9/üJo IN4քe[&Kh~ 8G^\dG 3rN^IL˵{KQa/r]9~,e EIDխnS3 puL+8 W `sߘ׵Ti(ASu'^$k~*q?09H ˄;`;: fgT=ӔxRvC OrNm^G_qif 'z~ XGMS7$L6+E|\և6A $pSN+u7d(飡I ؈#xdĥ >L 0ᰲ #SHZ%h _"9"[_\@CUr0iN/ude|%c0@%t>hIl1zFDiM`&`z@]0atɾy q0q‰~E`ALin'Km("SN DŽ'/)0<%AV[o I >`2A0,q8+QYk0x/ͼ^0uј˿FhT: gȮ4d{K iT2f#g;jٴ]O6UR=`r[TMInN̕nLUrupuC,х\q t}G}8G=@އk,)LPCYs.(8z0>,?11$OKo]xI yi5vR:'K`uUJ$S3͵ÈZ 9 0(n+L9ħRFaUE4Lӑ6=^$\ L}VTS' b־RGy pUnV7 endstream endobj 985 0 obj << /Length 2591 /Filter /FlateDecode >> stream xڭYm۸_a܇ EA6^)PZmʢm}g8#Y*{Cl$ p7]Wܾ͋ U*HFs]*<6_9NLUvzw7A<# I)py'&y#c 4f%^od:+UҫrzUMyjB&-{Ďf\"Aj{Ե 8#N3p}֞ kZ7f趥/{&;̄ޞ(l }_[NEw J'"* SYe6a(˦aHG & jm9L:p\Ԫ$UL#*}j? ނޢT&t[΃%NLU$D4gIX$ן>.l O(hϮ+˵g]6ZjEv,H6e E$ΈyeG]uauu%.0n4hz)4=bЃ $k=9 ,rA x|gCLn@jcdȍhv ;KvǨDx-ڰ=U&>K ps躺}y}Bמ+$_Vܻ8 _'pf}3H􍍊X&GBA@Qۉ[?F_|*za&qRe<^梂(6,ٷ YBO!YojJ@W dY6ਡ!EtcKcnL7;'pvkKj-JGAv}tq[`zA쀻lߛ,eI6Ó#h[͚bk}Ϋ :2`aڃKf*RD]oUClQ0Z,TODL#ͲH]u@BnUĎ =󇟈/Q-_B8At/[ȋFg__vsG@/w_O@D8s)DMLCoQ2qTl ր'/NdJ >?hiꁏ:5vPWqY0_OmSzPV'Z *l&DvT6+F43o&؛A<:䪆3[:MLն^\jַ}moWsœ|g']oԑF#]5[:R&xhE{oxV{ 6=$7ŵ78zʸF0Ns>:Ժ] bez|^e ZsCPyȈ ĦI/Рβ؉M6>j -CQ۩Kslz fcI.0_t Y:mdfİ}?uXSZm --}wSӻL'dHn!TcRՀƷv-B -efdJ[u/__o>O2^rU*+c&ͦt jeINGi,d[ hDjE E8"^n/Q#gOLn, &Bآ!qZPHnT80{6^߾?TI endstream endobj 803 0 obj << /Type /ObjStm /N 100 /First 911 /Length 2366 /Filter /FlateDecode >> stream xZo ~߿BV$%J.p[$yH}k8+{IVgar8(?D֒B P 96rb-Q%B+5s qK- m@ Hq!,.r:Xyy 5ib w9Kc pdgIl'O6ۗ /..7~ſ6.~]=^oWlwU6}3ֈIA1%&lPhb>{z/w^wE 6|wq`ax|q10ˣ~~_"Ƙ7Wx_tT?Se?RL~>>v~qǝw~qǝw~qǝt~I't~I't~/w~˝_r;/w~+_JڮG]^GʮrZSt{Z2Q'( BaYcvtB&v9"da(00zp*0ub[ӿR jqD#id YZ;+I(&hEz^h h(b! Yr5Lw)52B'lH; E.!ډ* ,² c!d+aHgF(&XR$>5Pӣȭƶ ,yMCE%j;Ya,2D1!* E(:D1jBoV(`Ȇ(&X,P%2a"čige)! ;5S h HUW(Y7P?"(dI(&TB+FubXV(`R VKĶ QQL~%#£r@QuXXz!lb,Fx VhES"B+FebNeHB(Z {~sA[G(lBڢk(&h'd|ȖaB21 X1{P XKؙ h]dTAaJ$N9|̈|!KJq%R&l@q`o_FJ1#*oR&3^ՇTdFʛhkpپGV޼XYYYYMedF>~ֺ:SdlJS iFU6ФNӔTjeqXW!/GLHF{?8=E}<h9ݩ7|<#G_asZJi!VD T̻G\(ݓ%>ڙp7>7Przͦ6Pwcu_}ީ`SzN ;^tJ46%֊Qz&~x{} 50*bGb=r1f[ Y}D^  kN¥̺ oc :6FvBEJņ *!M]P4^fvM{k9yB;BY1YY9;gx޻kMAwھo7FF;jhrO&V )<֏_޽P!6~^PAzߊ~C 7) G) '='jOc؃@nZΚv5J qNJ" o9>qVc}G#}GvQw~N;mu_࿊S endstream endobj 1002 0 obj << /Length 2404 /Filter /FlateDecode >> stream xڝY[w6~Ö:' o$'fcow4=ILyQ ҎwR(n@ 0734_l|n/._dxqYΙT"R/nρ_n\Jh+5Y^p+ e&0(i+o}\2lUR/G YV~_G\4V0{d鲮]J_nlvVYtTK*fI8 W\7YM_f)`^_=uX㉠4di*+P":}kuW"fO S:=;=E2{7~^y:%孤g%_ђiO Z 1҂MEMk'꛳ !U5J5+R0OiU,vy}iX1hb: (gbXRg$Y0au>̛Dl/g4\TIl7&pG%6~mdGF;g($v,MT IUB.>a溩-䘯. ] >PP!orl[5*Mg sL50MI"сeC#hFgs s? .:d2ވ> ֽ7 wޛ[ &,j{YB%Miv"MMUyq wev b p0OܛzlYrz\ktn4 [e=lu%"&-BJ (Bv>v[tu_f-ͽ M:>ԂA j(L}RRIķ_ZΥ\8Dd\9]/ĘT0H)_"I,:]h %[ pC `gĎQ.BN} UЀ;Sc!:t~U~ ZβݶmapxK`{NGî3509Gxpe]:@{ AM{|Ӓ6v) XEBm!ϢKԝɣ. 31Cy5Zdz=&uv ($qg I1ɻOҘ342vƣtĸ=껶ʾ"џ/SȧFNjxb@Yiƹ!y!j 7n!vQى&[yeMRHxF]!RS=_mA0\>ծD1p/e!"< }X3:0@k pW׀#%}ߢ%C`=k0Nh3<K1'<c<ˆ$9 I,]A8 pO2IJX(c9Pҿ('=$@RtLЧ,px(y[V9*tI\..{?{dsFD5" _R&7q|>ycRAti5'ýi8NŸl݀=$Tmb4EєqM֗BM2@W endstream endobj 1016 0 obj << /Length 1687 /Filter /FlateDecode >> stream xڵXo6_!!bM$%V'IdQ$m_,tD-=콝r39C/Gy$#0JwSy6&*FN?\^Dio{ J3X&jޤ;̳ ZhOco8~2zz !ʬYXNchYR&دibK>;'5łنOO #8cWo'{j%F<=vIEO) QDc]$i[RF `!`y,L3_>)7X[mKYPi+fE[4w ѻּ2]-W\&nbIA!(r>rf䰅 qle!k!Rq+(B > S$l":},"g1Ɉ\P UN~!(S%ĨX 9\$K q8bNuהE`9RLg]^LA؊G|]i8Bi8u:eZ\[yHa IHoQ2}#]c;!ZnZp\O^//AOgN=T5{}2$P/& MAeLnݒP$3 8+zM i&-V&JW]L/.}veVE6 U]s^ض2/]x4VHpnGU251vqׅƞ``fB <4q0d,ҡNȢ ^9¼QE uA_TИ`LE!K:rACVM߳g(J[x NzF:.ĆZ\9.ݧ XQd2R㾡ԭ֘ٶ.ICUݳ5!Ȣ8< i7j:,w㓾I%<jNּy ߡуC:> stream xY[ ~_31W"umN93mt&EJdɕ_u[yA `uXopw9LVb U"V!_r߇h|!Ut|ZǑ'r_/e|>}?1RҒӝPq"KcNk x]Mlv:ףY43jy9ɧD6|\ f2x( rm@Y:.aC u#(HvhI1q*iK|USECSFn:$&HCQ^} ;X AK8h^u9mzOs.δ#Rz,v1!hL瑞eY7"G +ߖ"c"kw˰>=ITJLtueD=zi;jE`)lǬt9r҂a@?~S7owԍux[>ͮ-Fزb'n:tEɔelCXT>@oMR{XA`MQhIkr][$8XwLHb3⠦n=p9TE5"qZ7'>(T /#H*BScWW?MC"if^70 ZBN ȉ{}j 3p-z }0{e['Uޭ~T{Mw{C};ep(Ø=}2z4`P؀C=tèu%)phRWQI_H0`UDf 2dH ˫AnOL?BXP PV͡ Q`.*c{"-#@otwx,S^\ pD)iܯ{]*(jH[^/SzGg{6rHXlˆ=ⒽkpX0c Qk@eks+&0nxO ;+IX(Κevb*oe1sSl+LK3}A8Z'DgxszB>KMFI7.<8 BVo5E ܔ4RTrpCaZVKuk(egނK|vȹy觡8D9t~Dl\GXKS#NVL h\ӌpM} bSYz=퐫Ѥ xI#G3,%詈! 1 ` ٺCr̬ccW7s]dySVqcϯۻCKUR% 7xp&- fpu&iB1J 9ԦZxD*|S^+k[63¾Mٙ!#d["!4&>Id$yUG(jjxG ğe $>-WY®vA2Lzʗ$1 .4~ YQx戣W(sr,0Pstҡ&Ud6qf97&/FLH✓>y >>$m~!mpI&DGPca]ݱYDeRq^ dԿ]'7H$6p\k]wn|^1Y~z?*4 endstream endobj 1041 0 obj << /Length 1523 /Filter /FlateDecode >> stream xڝWKs6WpDaofʃ؝$v$9@$$1H,ŋ&m*v{!^oK2y{;9 }'EiE!#?s[8g(vMu?jtU8JRd) d]#z1z5[vNs{jG#/?,߈%݃s8+9O~̾{`h LEsS\Zij-ԥ59Xv^͡ 6aӪi! TطڇcC g;Ӵf%79sijAEEKi/ijf%x%ߞ0}_(M򑑐>|(H$6B)Q>'$o;G!pO P4!saAoFIQrג*ʧ(!7՜UWq{(N!GU'"i ({mµlπ?aլu-w5\i]mM wAXS\Z׍h[R` >MCTh\2.퐔2-˦Ӆ9ĮfQfYVa N|2_KZK<#UiLږGVlb06tWjɛj }aFP8e|PQq_k1nFWt(L{iZtϨr,S}ĞGK1YwUcz`VjbY{NJAC+Ck_ͥ-*cǝ9V6S rEػBzk9;f(Q@?iqW}K$:37X nf0*M/O=\rZd:=!*aȺv+v X&Anj8PL~?UU=,@(bHI8H2 ą$,~'VzF[Wsed9 G/Ѥy-Q0&3E[>ҧS&K;(r[}aͻ:i >JN%> stream xYK02Hԓr 3 r=m,mѶvpίOWk=nH"U*~Uim~xÇaEy:n|s0$ƁGdg 7$1{T]uG۔ _x3xξ bsK]u`*nv~)yY~zz$ve*NDSׇGz նt Rr}G]4c <#M|1* w;_"rTkv5=ۋ>WDNwզ4yuCnxIWQiinU _6V>Qdȗ:hA +>y`=kC GĊj19ށ^׎ aL Q诺CA `ݳyPUFpjF\5 {"Ձ)y醿D [?rtA,+YTUS} zbݵonS>">q| [CXpK4_$y/K˺sL1I69>\ ܶ f/VL߇n}_" oHT#:u8Qe_U8"PQ0wXeVhoq'|=QE  }Hr Cϵ;ƒ<AV9?`0Y{e @g7<}~9Ϻ& sE9p@ ߷'?gvf,}e ßWG32|FU.<"%`L}BXmP](tMlCl"HiRqsl6m!HcW@q2Na?ީe'RTZwzݛg?W7;r( tYM@ ;Xi,o+2٭GuSsH:&xx VP9t&Ö0$P5?Y.CT:/8'O]RRrg|싢ͻRO&,B*rdXjJM$s|x*Zy 9A.m(+]w$^.&u.YʆS4N9=MMb82t5YlXzh^tn5못3-tBQ4v"C|ubH&9ơIh078Z2}x-5Mnר|d7DdmrSFWڪ䜏K+*k ׾Xo-,qs֕!}MĮo_ubl.’x e~܁lRdH%6nx,% ٵ9%'yե*^H\X% ~rUUg2*o]M)(ѡP&e97u}re l?tEݘ6Ń-&Qfl&G0M8SO'mGkO)c'7^WF,{sZ{'+2X2RJLՋɰ-PH܁SG}?_Z(k /"H5 "ș# N [@^/S 4 M*9?9_uN{1:<"dy\cS?$^𱻽?5uOq_;>-#s1VzU!R L&j]z'ۧU^ ~?Fȍz!Ej5)rDH7Mrd,Js,kW#|lַG.+ 8%'P3 zꃕbZ>SӶ:6 !ĵb> stream xWM6dR"*)6)uSIDJ$Qxɒ-o&AsŦ7C [^_8pc1lFh(pccoMe5+o$V_z`mFS۾0: [Ksr;;ּ>nƵHGdf;MRmnl45FwqsI9ٰƪfBB0Fpx+C8ۛ:s nƶb9 PZ #V)m /x\!10;&[nzXSªl\p醡)e- J--!;w P5I?-E*  al@4m&--Sa-rq6@tBa]&Cqdwn0#=WPPwx>*#c9@$s{ ߷-؝"UB*UD@E&E!Q$K!O-E7bHձ2!?Y3p@ʺP8"p;D(_Ϟ= [5RbP4yA@}L[Y/-Ъ}ˤ1fj_-$ &rP$OxZnS'q-ZF8Iӭ̡́?dԤT'KktÖIg&ʠ9u\o5 /P-m>)U`eIGp:ų.!UICgLRaeE@׸r z,,)}HP σhAF4) M ;olǽ̖wr OFҴH"O9xp%.FPik-e#Xm"?fM]Dǰ&=#qn+Z~Ky"[8nڞhWs_d89t)i_SQ| T**IW_oW endstream endobj 1071 0 obj << /Length 2044 /Filter /FlateDecode >> stream xڽXKs4WOZ- x$hfLy]~=jIc;$P,RK3 vA|qŋI n"&d3)vrS1 c]V< w4,Ŕʦƒ (&,<U<7[A&ӛ ;bH#&4gOFOݽD#,Y`I2w!7WFv6e?ٯS5|zͥ]Q ܖu0/;jKƿNMT8Mm'1ˌa[w:vfo'$ ?'5N1-/IUlF+)2yjp3|QU]++|h؎]Y2PžX:A>Ć][MJ,WɘqYǮF UXnq;^ !EmKJ1;yùDD(lwy}X 6dL?=O p'X ҩ!`YZWjC hߡ 6w&"1'(\/ŗ5]E~ D8|ny߯Q 8<s9U}|ώtIc ?d7E$THfY9~ЩWO73/@/L!3!`@IM#6XS4LB.Q lRǩ1^8:`LxTY'Y VO $n+mxD;Ej |K1,[a;s$6>Uɩ a5)'& 6N}SVg}UDqm'zHZ[\2ibaլU9`6.'^uC"*R@(}`!i; :0Cj.6sG=K]lx}{ 8 endstream endobj 1081 0 obj << /Length 3640 /Filter /FlateDecode >> stream xڽZY~ׯXų>$!D0w3KCNxH^TuU7\J2`e>8&?{Mo QXeoޝndmM&yw׮*r_w]wEҩX(V\/FX糧nG:Lхrgc.wY#^ع6/>ܯ:}5L z͈ᾛ SiwS{x$M9V[ ,hD=0yxL=eEj/ [ iϲ<]vw+ RQ}|Se Ơ3{ ˪4U-jۗ`@C51.TD<*CB3ánŏJ鮢_ǾF7oMVsR>Yá#nrځOK U(+r/%>*}Gl{wUӵg'f:_{^ϵ&gXho~bo2đaz:*:p4E|ITY {-)YQtxYÂ~wW0pCD㵁zi4l5/_ lǩPMfp;[0 `m}r>ܡ)Ʉ&+KB-/7`.Dz:MsU}n;)De!UW?6y{,m_kK/H^؉oy@~tsmAg]9ThƑ9_klVX u`B'X.?pT%UqWТ!JGj|HjLh#d#h·K:ީ06[/ƆXcx#uVr({kG8`; c874L pp5z )^d`0㑪tŷq@Zc}%- I0B3驭Gj9)0ZqРkvĖ.E݀Az{)pjz5/tw?Gw( 1AbB9 *HݓߐCaҮ1M`[6^kc*0xsv39c6r2P-\ԗl(%ݹ+5;|6v+q|U5$Irn. Ŗ(8)'oj*^9,9BI2%`I|D2,u'W &Pm?Vx4٥i.~Ծo Z!NiFnkd藃E/ D CIT c\y$*mxV 9\N.B  @D$ň>T֢ڒ{309. ]˺eNJ E1QDWY#m+rFM 3#v :hrXpTe7*)vm(BMrCl Qxx4YW!PฯЀΡ,mžzeYmwv2Q|щj]eI}(>HE)9y@xAk3qQVuHgȊDnSmTЉ5vg}@X8 &BX|a'"w*8yl5U{yc2ICqc@G9x#ҫ:ZM4hݾۤN]M`o!W-yG }x@_W[g/ȕ4VwWtܣG@61vh]CXz\ q>R<.'!9|fc-*)<`8mJȎeC WᎭښGO?l9d ΍~qZN4peM+-#IQ:.S9$ʅ7w0~"!x}|3!o5R)TUh&#zp=罸*&t| FJ/莓FfDbPkW)Q$⾰5рנMiR;]aZ%cCS)eY? FXbbfƀ" +pi/W]Q9a B\DT( 3~RGUT_΅&[U-OTVa} ,f(+"ckL/#y=]dk傹i\[xJRDɞQ'!Jn(9~Fq^O endstream endobj 992 0 obj << /Type /ObjStm /N 100 /First 953 /Length 2073 /Filter /FlateDecode >> stream xZmo9_HQ/`]`(pwE>ΤmbΤ{({r۱3H0#=ȇ%q$6#WpFB5dq-;Q x@iR2h BQl| .q̸SxN$!GU&j S!)!8qɆrr&F2켬p$E $J@)=R`9R Ke7Kf$6‚ߪT :+$jM\E'BCtO&E}0! 9D@صjI5xS2BFBg2":0HAǃE;l\(*8/u$f )``x *&IJq.`D 18TOET\50$z/AJ^6IC: z]Jn;'R $uZgE!@մjeFlD}4$ ltlVsUSźx8NH˩q.Kua_Y9fFyek4o?tg?'Oq|&͛v֙RK%,M<[,\z/iޚŻi^ޜm{5^86sAX'[q,/z~6 tD9۬X`v0P!و AĆ2lí, qi+Mӂ'uM4z-\"q`zu4/Yw7oM8˫7Og`?u٢hb,ۓvgms6?n9$2HFۚ+av o0],?ȓ6AlӃPm{@X:،neU ~az.lqnITjaXѬ/i{~y+yc(, rh.jTn#ǁB9HlfTt!r>uzYK*փj)5kׯMҙz4?cv])z9kVEkG{|6iTO(MS!8)h3ExW jh^]e}kZ_sEj0 YxZ%TV: c&6i(Ɋp,C;О/OlsgL#6s!93Lm7rq"HDQXx fQ[`ۓyw8vCIrPZΣ|m j&H77"0mb_h{|nDC"ao"^g k ^1 bLw˯}Han'z*J|J ( f\o˹`/v;]`$alȐ^ 5lWx8?WGξK[ M^%MϚP=J2n2X%԰KBݨ>Pmj,CI5nGô}*6%~dש܋+>fEfEpĒ:WoK#B2?{c"M jgA`c}>H_J+uDYX$ aؤMlih=(~(/>A׷w;nn1+E?sϘl˘vP@mmA@l{҆0X]zڥIcDi2x3ġCKt6ەǵۏWG)ZHR5Qrdnl. -b1TIR?Ie'k(_fƱ endstream endobj 1091 0 obj << /Length 2207 /Filter /FlateDecode >> stream xXY6~_a< iԹ,v;`3!Zmedv~X Emb_T:տ^}j>JW9+LF*%2_=W?<[X(,>F^88@uiSBw쮍e),JbUUI<7La4*uqp34N@%<(Օʦ_8T&']ztE <#7Ֆ nZZDUN znty6taP2A,/4d.(wZvj@]ٺCc6/p][N@17>Chs,piA,{CEF/UYqm֐pTX1xPz H,qvͳx=QH%/ ]Y(cztXM^nA,<'hO<&$!%7!X$кvf,BEaPkMO#"PN+)#^u"z,n07]O]aT2@)nWG]VUt44]AX\-ha,D8< ڵp]W5-"Q0x{ ޼O` R8۩Ā i ͧBU֢:j ȉ2LSƃ4ÝTnw}A]e,cwڔ{@2΁t+F.ha53N)T%"TIiok^q *5}[۲^d*4;I>OMyTͱk澴h{=I‚4-)$22L?,aF2dҮll$Ʀo}h"V4lRc]T X8и-r\AX׊ & K Rnbׁ߮0ەss޷о- Z]FY|MJ ˱]4BkNv<׺@GRrP{rRHLR#8$MB40 :_MMm J{F!iCb*y;opE)V=/8e~ QiE;4E |4CUQ.ixn޾nz[sHKPgW@3T9,ui]S֧z><>Uء?؊kČIu>, Oq Ω=ɡSʷwJlI9Ktxd-d'ΛϾ\zE7+vr,[?~D7N4eI_:2| k pk=gA:U! ?.NN]/q &g);tkH"Z^u꺋v{ݘXlf (|m~0.G8ae% Bm.4^Ԕ6VЃ"O'H?;AȦt\Tjx,kI͞@tAY(>5^BZ]z }8^YdGn?z9 :^$ж|@ƾ)m> stream xXo6Bsl"ݺ  k]tX[`L\H)Aݑ,9!!u:wLG_'?'ϯC$X䭷O)AžO"z~&j㜅3At!׿=$N@92Nkνxy<ȼd1и=|R:qiW Qת̨<)ItđOR?gϞY epze9$HxԜ 9 K2̮eP5IE!sԑZҸN;cYx|!ool*gR9 I 3g#rXD4}4Lէef$%!dՌܩ՝ZXQ_!$TnC֌F@vanasg<*wM{F2wDqwB֕vfݪfʓEJ^ l*e.U9jkWJFKpƥ!CH2x8W+P|Pt?$Z{q#G/.s9(ckw%UX^o[bMc=pp`?nAi0jfMYmB&,-HJhTNMӣbƒ$%rUDY"'m /EHYd!S Qq 7UBՐp㝄;PJ9OW~Woё^-ɽЗ/YgWS- ̩s7{+J|`砞ߍX< Oc/+&?Ro/Aְ^SX`ʽ7?y|5sq`Fg (c70d$=W܇j_Ut[Q:ڻyf",8A?65!k c6W8K%Ws#'e@[D:&nLFa93s}Hk)0J\nl#CQmd^=&.Kz8?"RMfoޱ5y/H8&$Zr]CCn,P|-[[A˥m !,kuLb:%EnZը%LZvX@)kn _f`s^)4R2_e98 _'\vř⧩qod.`MQ x7O Pݡof-+6\Jv*B5eSAB'U^ ZLh+&ԕZhq/aVpcګu`zW@@xd|QuC),}S%>ϴ^l ]OMK;SM!ИCa2R$.G}PAi}[mŞˤ|}3~Q'c^. "P{$Mаcc?"WS}N(֐Aڔl+2opb# endstream endobj 1115 0 obj << /Length 2642 /Filter /FlateDecode >> stream xYKϯ-j`HQCyL`,6 %kK$oק^(Yl.ADEX&_yd",Rn8B,Tj@EzwF{:49SCl ʰ?U$wڗFʀYngGҰȕݷMזsPǮ݁M[5zcR'|lGxH훺ep쮧ǻ ޟU]_ñ>?((Ovt& zÇe˴4t< WɟaI S__xtKokY?cٜ߄ _B0~UM^uG2oKGe;<ā{mŢʩiRk0aOB:Y}a7UQځBuJ ->}Г }_j*K{+֏ih!Mp@ G;4z${OqiVl@khpdo[(k,bI?D&Z|SÍƄy.2L'a$6_0da7V{kϬ '׺7ɷg+ȷq6RgbUdݙg@[\`!7X7'9 ԝ0Nn[7oS8qcSߦb^V5O8|e5X=ޅb[)|[A"Log9[l~\N%8_K;Wi\5/k ߾YAfS-m^?lkGYR!Bb(a&4-BawRNWyı  ȳa6/+dۭHx7iq;qn 0-:֘rEI>tMb&r{t7u6P H2K֗0K]sYy{m̰ 7xR(Dqْ?G Ֆ)/۪;oVDMX"axnFq%$㊬ȵø0~/o T;!Nn,hБMw Rx̸F_5g,*(6;𝴥uuV:LEF{c$S(~$(6,cE8y.h~r"Cߌ/j', endstream endobj 1123 0 obj << /Length 1520 /Filter /FlateDecode >> stream xYY6~_aIj%ERHta4}%f :6CݵhX`ESCr9‹/~xxw1,^ A/OCuΖ} G =LP J$m0]i aUKXaYVvF`1PSnI+ǑSn\Q c20^^L=ƾE˕·=2ub#]T^qp" 8&$Rô(fnm">p52V*)ǛLBh:TPyҺ2ѡCH5$R{lji%\SˮkxͤEA) YR FaYfPpV[lQ0ˣf)oZTR6E|&?nq,g3Sˊ:d.g5xXbn?+ 7|+k0DيI5hF,*$z}YtрkWG׾L bgBv/0@1h5O<DW: 6a9JPlSzfBƿPJc\o ykEڱL^N^*Ee 8SXx::G@L !L®lQD^.^@4tfb5"6;KG m^Eo{uRKx:g(}z1*Sø8@$PQs=.P><-QxtIWˮC5d7FՄ !FŖf$h:t$g:,I$#T2PvăA+u<0 ä1!Fu:7b tE[Җ0V m@ )9Tg 3t{ @)<0)' WZ^q@!j@1qXȘzT rg bdž( ^) Ѧ+R%R^V^֯PXƞl5@mP89€ʙiEF^ݵ~ AuECEUVYVY;p|ʊYM5"y-;_߸oee\RmۛJUΞ|E-H2֯-Xx$BD׶+M~|#kt( endstream endobj 1134 0 obj << /Length 2604 /Filter /FlateDecode >> stream xڝYoFBHW.MA>VJbBqu$ep,%Qf1̾f~,'|ӓ IƲXƓDpTO!Xu1=r7"u SNENeh֬5yBJ|0vxg;3u0zG'Rݰ땙:Oѹ03TO*"3c JĝZgW4-L87(XƏ+L˪C#HAK*< )ugBVnIi)'1Uz.F2jyL!T#cB+0Vr ]ۭzu]fĜQU_v9N?e̔4B^?K$lk\4N](Yn&gz0Gʶlj>.5nV%r݇iH?$tI/ LHN#KzhqAXF6U0 U K @tJ=F4M_v-D1eÖM hCga'0N K  {X;9ae+AT} Ԯ@Vx*poyHDG7/X) I1ĽQژϳᲽ؄_`pE kd_aߨP 4MwB u2 Nw6}g_=R@RP=ԓ$ .ֈ,P!_WOac`Jyң`crYwt6`inbN!NQ^cȶvnKieC҄).xx bCC$h < 0dR_ν$CBaڻ"tn7L@+ǜJotiW*uiYIØcgc_`)JO#Z" ݡ\K:psM~/nPGKӢ_$#oK^ZAEEH'rLtsk?R^=ѮAP"N{_.*Х a26)-Yj b MG1tA!#@ϋ@|gS1/P 3b^m!z+Wxw v;J=XME5wf*q~}YE|%Ns_Co_⌋A|A W,IG1#FI5#Fxmy[L5ƿig{'ixoy༤!Hc3-rV}NPrtRüS]SÚ .;r?I]ǘRE)~ɒimn9ij3HH ʏptwwm]~!di2PVpoJ`l!$8WWGa)7^ h6={<. u+FS} ciKm0@նtw7v9KO(} jFƜ?zz-ϻm}o,}//=ޞs +3aQvqo fQi;rЁǸ/&nˣ- ]-HUw?fzwg\wQpjG fJ} @je P Z y_ >ZuqO< endstream endobj 1143 0 obj << /Length 1005 /Filter /FlateDecode >> stream xWo6~_!8IP!nТkt}%:f+H10o/)lK &Exw 4 xkadЂRkh7>Ճ9]L.L~xfutVű0>>ʧ͝m =2l(='a}cڳ1u&M$6a'KVMA$%/_^O8bqu|%?NPm!uk ٨t#ruIGy0/6Q੽x> stream xYKϯ0z1[$ 0]d:HCۛh%Gv~}X^qoh"YG7{pě6 }&hs"nO=|mӥy "a$͖ޱ?kߴkZ8=j_u{ζ9\wx}ˏ[0\,йZsÉ:zUzEm`~J\Ň c閿dơ[ᴴ BifR]WTG+9Vl; [BOJ` AVdʹ~|>7@!@u}~&,-k!`iZAF09d°YU3`DpENߪ?u,,sCQT"3!(~ħ&E4̉ז)u>]0j,3x mXY]YF?GYX[t#:|GAgt}pF-pCD=0*ꎾ/XÐ" !jWAՇ7T9S'qӀ5EН*u"Mq:$%RPk "+'7n! q׮}ZӔn4Xi=%X2.l$NB$}Χ)'6]d[tESm˝ %KR1O2Iѓ[Q ! Bȯ&pBT&EļPǪn"#o0B"*GHB BА+tp38ND+Y0We;b1䕖:Ǒ`Vl<0ru)&H1[FF%S/o+1$wsJ-VF\U8\+GZGr]킩 B%ܪOyI5زA\ctW&aT-׆Uj⟎Gɋ{z' s7B0-g S(<-$E]w % y+2U3Q$}hE=Q :r1=.G|CRdIAe^LـB-8y !:ѴB&ꪑ괷fFxJ>~>3C-Jl}Z+M~{À#,=J")+[儌@w2/.'m/5c `:^E Kq<p Djp> stream xXo8_a] h.6Mz[$AZm$Hr_3J\?h<$gj^]ޝ_,aI(r]y~88gGk!eg]΢Dm"-g>seGDp£M;2kmѽZZR]eNeG6fǰMvPMk.lZێKei$mWB.TaLZpE0JժB*&mUq& #懢 |i9"9֠ SUmX2 [%mWCTAjY(T5_,䴵JΨ 8I"/C}X8Hs>DCF*'1RydT66vʮ.;ҷRGȫebjB7>:]OhsZLÏųcțg:b惼^aʜR{RmN;VuWed@l7,WZ;ߪ'tNlFFTZmj!ˣ;04.scSjO,)Aӣ#Z5Y^NF簻.ԑv_NK2WʶwgM8Y H,4LȨ,*CUMsFuۦTPL瘃;; ɴä9̸ ;F]6tˎ!0ʆ2K,``JtjLUV~FuJS76V5E,5 K!>jU뮫ۗ%^̱ǸpWBp2F-.LU#\[LN]J}˜6WZo &\WPu`]ʪ:OeAuk6 ƈTS9SJ*0g Oۑ,v@k.h6_ ,]U` sɥ1a@Mi;64/D&D@ a~2=:t\^P{h"FFNOB@>l*]1o ゔkĜć:(Jb5UUf۔ /ΣS .Ѓ:'.jQRS@z3`KG$Zf(nLj=T0T79$/zS_Ώ8e<*`Bƽeډ^Ɖ;=ϷҵJ7av<ɫ6Ls+# #!zgF. YvaoTkc; UeC^G೘,EU}X 9./`~*ar!onoCeJ}#;]o\տ^抈j[s)_Cn͓(8O| crL's4ɶe~rtǕTrl֧eOS:y!ՔKx&Ͼۏӗ43W$^.FN$dLqHE'R˦E Ѫ,fpM92`dC@2~m`6?팤p)= nBYJ´q9=YL@%18HIJ-k_fOk* EZ% 8X؈Lϓss=OS")LhXCBEApnʴFc Ь]n 4ž?{up\k5F >h"~Sz[mԁcV{I0 xq:S/jwgfNSY endstream endobj 1184 0 obj << /Length 1247 /Filter /FlateDecode >> stream xڵWmo6_!$&C(0`kCDj%%'GhYr'0x; ;&w0qR~1 IAq:wM 4#$ʎљyZo pzD-v뾲oaM5,D}"ڬ2SVJQ$f-gm!j}c T[$D4*hB`ۆW,?fHK=+Qԭ4`S' .Q٫l x?sYT>րA5+ƒ?{/0 ?f珣3lBKb̞ d}vwn}BI쐞QJɪ#vrQ:OFrB$V:?z&#!ω=Q >hHG)%ۋUԲmz~RdS``S݅Y +)+o~ dH@GAH̚bef.qv:2覫m0Zc60S(X4+FT i؝e!)17m˸DfX=?%[xL 9%hrlI' \5(9$VkQQԏuw#ft[C=#1Ăoy$.SxQ͈+W<+ZɚJni$#O"[k8=OɎ¦^fΐr)2-*hdgHC^Mզ]Ɣ˾7#0󻐭[rk ]xO]ݪ 2>[nGwzTKPBfm5m`_;g&_Sp)狩zϖ79;[yu-WCiVf5CzGkgVJ._n>>AIDL웛;{Q5E:W>@^jxoHC=`5S(LZ"jtxZXԯ3eb+‡]3]FLf̺6SԶs]Gڲw?q%[ʍIUYFH`5)N r >)Y΋~?X<ѴPasl=?ƱUc6CT={U`0黓| ;6ja0pz%jkJ{{*7?H#!z{/ j\; endstream endobj 1197 0 obj << /Length 1688 /Filter /FlateDecode >> stream xڽXK6 WhDH搴L;i8$9m%WM}%s@|Z[˵^-^W~h6續Њ<^xn蹑HmUG*v5iHڞ&w:>;yn*-IrvMqGߋKM#3 ~`9˒ 19?;ԮK<ҌZ,OҾ | jxd⟅\fnX~k3֭[<%si]98i|O| g<#dϒ3x![@\uQɴj]iYȖyuE]4쓥>e}Uy&PA4F9@eq&^ΤsDUc:F7봺xovf?,3qa.`@Z V3l͖^Uy)iUO> stream xZ[o[7~cPr.,i$ 쮑URdr֑%[ l3  b(D7WW#(ۋDAvlr!Wڀ]퍸*u31_lk-9@X2We <7"`#<* fw-[lJ6äE+>@"UG!M?i_Cm(3FjF2e$*N8$[qc)b& cj|);j_cM Tg98.$`̭ K{gV0FN -a]u¦'$"QC0RP2&rIf[%[V41YJM(f bj_SAu*ML l5:VVrZVhn-.Ŷm7lRp h!Qj3 x`OI$\m.TK#쟃|IŤse&ҤdE9IX)a1sS|*) b5/|7D+I05>GaM"l[FX@O $ֻp 1lSRS B ܍`1D` ONOO0 _?3Ib{˷'~1٨SL^lNO];h^s!Jo,hI+srz^cRՊpʲ.ݙ~L\o{Ζ׆Ijz=YOow?ݙ o-M B8Y#|1ͱ-V< +WFt_6{}=bI|t؅?b{0 ϱ7+.a$@gCԄ{ьu;Wfjq1[k.avqWΜ-WCpWʞRyON1^//uW?W.W뺏?|b{X!3r|_n$Abz5*rڂE=$2ڊC$bܨ*LJJWk[Prm\!,zQO0jD5/OrN9Ȁ벿O|o%-knwD˲<,'XvU[[=sE!֢*jHH(P+5] ([Clhopɣ]v5ZR ArTk^ۡSb<&񣨱ZGّ$!Bvd.}j>=)/~g.3K̥}Ҿ%Yc1+ގmʹp!*t0Z niA>:9Z+C# ^?Dr>bgto"%{;`&9gNɫ]a\ AF5 LrHArl8kk 4oQT4@to?wRG4/a(,(I0Zh!L2CVY!o!8q?X~A+ E" tH)[dGh9$)Q^Rؖux->˱dr dL\Ȳp!bͪ%$ U+4bf:;>C^CiJw"iAT3\>TM50kDKHCas{ڮ5Kc^5Tv::>)QOԜkN;&8hF*]Mx%?F$afa\2tm#Rz}lQ=H*^Ci4E>dvn[|wvQR_^\on endstream endobj 1217 0 obj << /Length 2316 /Filter /FlateDecode >> stream xXmo6_pB xi赨^p>hW5]i+Jv}3m-Nl"yf(-Woo^]EƲXƋBpT/!XM-7?^HSe! q*}{DW/ #W_d&}=4&}$@;> b8aS@wG]wr/d/IU[a磴#麆86e3]zex=j7ZQK1u݇73ћ,N^[O33^a1ʌc PȱmX#oy2L] ށ ]D<cڬl=tp2?qXd7>o4u z'1)~ ?9.B;lCd٘Zph>1 j>|f!#g60{2Rܮ.s [ ܌]ffz$K 1V00 I-}1Մg۶~|@0zZfrt)Q&nL9cʍ>cþ נh Fx}ZcKfrVTJ}O 9 J+ ,|H5*d-Hx,c`my5ewwtC[wXUF;b}R7ۦ9o/.Y0/Lse[ c.?OB}x|`^ UϼN{?FR(N.nilW+H;Mx$rL܀6hkWPųఄbp3(:r8`Jz^7 m{%U@>4gagZ!|&:2E20+D'(d}2Êʀ3|F_p :#ҫ-tIi"wQpX@d2`^ >V?|rйӻCAQnď89pLMUYOѤĄjN>+Uu-h HUMTAsl1JA AA|u\D]~8L,Kc. KDtLf'UkSڪtd8fHe`t *HB-B)mEuB! \&gKV=2=~~|7g&ς&,pa'vJ>tR^$\Lt`MOp;MBg>S!_a{Ŵ"e(&`4Azd0 Hl:T@ \7kum1b08ATK"PQDgPJ.~2\тyݲyZVieN3S]l^Ġ_!ʗ/f,}p&DLa}g~8(^?vX&)XMnW;j~-dXKmWsi/(w&B!iD$2?j}-TgWWg9V+6r3-%f ;b:cr[F1Uu/r_fȱQQ"ƟΜȻW"K" endstream endobj 1230 0 obj << /Length 1499 /Filter /FlateDecode >> stream xڽWKFWP,U-y"NUʉ{F0"Pק zr_w=`oa˷b4.1z!(dwz_fI)h/]SKUj'PSd?|ˣi `g[ƼȆ\lO#xgϞw~SvYnQ5nUeʭDBdkWn[ xʛ8TvQR{Ҟd1r2F|Q*+FR7J( @Sjr:>kYi0KྕwxzE*j~pc@FzSLV}luC행C Vn-^`vI c)ġ^N_շC.V鴳ޕi̎,je10P z;\:)4զSؗÍZs5Cf,m)eJ0^|y#UiMħ ~LjuKQ1X)9G ;~*>Pw f!' :4VH,SgR( ٷv";(ip]J)lTPHbceU- DVUYN\.>kmtg3k%E|ѯzafж n"LSY$Ҋfs2+j` kӾ\M:J^k1JɠFAMȍ&Q 5Ʀq#Be{.Mc,GL7Hy&Px9^SPB;O9Cı |xT%tje&g9jٰ77P+kXD[;7nz;%2K7̥qs0~ݧEKO`tw7R>87>k`43ns wLٙlr7:7L6'd(_=iWm797dST[Dup7Yo>( 5M^Aµ^q"}ЖjfKs}V'!6L]&i}95_-Vǔ@UuwF'88No#fx@HDN0[?HyC&U 1z)pe+Y7U6v<܅E/aN endstream endobj 1240 0 obj << /Length 1496 /Filter /FlateDecode >> stream xڽWKs6Wp샩4u2S[mI LHBI)w%R'^$\,vAj-j]B+&q2J +tZf{廫7>H0 % Y%(9F?(G&'_}jwOqFJFF‰ V<(ܷz=_y@킧*nKh^mW?UMr!)>%B% };ޫvӼv0_'qLAp<v")ocz):N9̣)'z% sRv)&َݧ' X 6h=%qSg(9e{ofJdg?ЁN&,,ra]('SRJm*KD1j+͗+|bg,~kЋEnZ\:).DFczT.auvߦ86'4 =ǀ,61`,\@>c9DU퇯SC.VB $3QljcL z}:*=alq?(g SLˤi&p#h~*9pMmt}7f'u!0x(0 )Kp П,`&}W=K m eXL?q)RBzmi m[:);ޘI .RQINz[.]^FlɁ*60>8}]cq1O}Q!|1AiJ xSJ2)Ɓ軆iQ > FOzPoskyRE)|6ڀmh#w159MڜиГe,nbkt_"G.cwI!E@}}5PY:6d28b f=O endstream endobj 1260 0 obj << /Length 1286 /Filter /FlateDecode >> stream xڽWKs6Wh 95!ofNNeOr{(PbLۻK)jyzX÷,'tϻ$$ prMI dr|R)2R }HNc(rAz v<}䳝/Z1:900#qs>8|,DYނ)Y+ǢQyɕXi)1V {J4miQ96녬ЋLDۼZj)Q7o0A!D!C$_0`) _1'|,wsL`z;sY;'L_q$Zb~]t.ܘ p;nUHRWJ&3(h׎`0WDe;sRfPD=AKn GWjGx0# А7XQvI2T#i4P1uO$eI7˜#zvzL> stream xXK6Wam`CCi :$DK,pd}ْ-xw nQ973xý[5c5P$"2u+U"Wk/U'hn,q_IU> 9ppq2lF:UWOOCyk"O| Gm_%F|$.;dQ AG/2nZ֩,Zn5(YuNr\7ú6}7q)AE;ށGkIzH(B6q'4B>-(Nlc|}0"C>WO.S*1h Ԏ 3kJKٶ.wqT;Eul D&+٨ĭfk<(' @D1Mx!w+%n ȏi-7(fZfUu#pH8yfk~z(:SI\8 km=v2Q\_LoiXAژlBN/4K}$ܹFv}S= 0 .L\[_X{'.^EtZ4Ui\:m=@BCmt%U& endstream endobj 1289 0 obj << /Length 2326 /Filter /FlateDecode >> stream xYKϯPR K$H&~<*Q 㖾PbJW-vj!0bě!٤ hFU p!)Ekvcއ}ޓRU|w g\#-F2RjP`;#X֍~ ) e2C2-SWz)'4|vM5Is:NN/\ ߃^*#?OͤW %fN1a5XBa :^IvܿKaÚi\^Rv `R%0BL9W~ X*F 13{oLנPY㓥וţGoWj83ȍP 1JГY|ӵX, }rw8ݼ$>*. %bo(`GzmYFv]?[^DpQ6W$^}^Yp*DpWX \tS9򅯷eWۗ\u\h#«5;3΄|Yr,zZv/ܡfԬiQ{o P@hjh`ÓXaE)03XOqdHqZيoa9Iɮ'Y@͟>~)7e`T% wy÷ՄNIdJgG0]LاŔ5g…0B$"ϧ&2oE]7u^l4.wǐ9/lJX5mn˔X h+[]Z6&FJlOթ XDp"fj̝8؅8:Au1¾wrX|T變HuCWJ: Cǝ^U\iGjep]ؙ;A9A2% 0~,VBJbx]]5O6@,\p ┧Sx띯1c&YWa f\5AKٗE.&l>DO5!&?ՄjH0IKwz=Skr2#)E䌨Б{{A؞9$_pɿ@P7 =מt +>"7W$FGog3{$1Vi2Z*$KGBB91yl Jݲ?LV5(Sծ\ݬoIYХa9bh|'#qh^%'؊͝B! }6Y9p\&GO aJ0Sݨxk=RFN*r3?]Ya򎤼O$z2/pw_L"* endstream endobj 1302 0 obj << /Length 3756 /Filter /FlateDecode >> stream xZێ}߯ApDR($68vufZZj]O>Us7@)^N*2.Wo"Y EBE*Htvq~otQM$"uvMC1"dÕVIzqRWMYwERM^iiR˔0* ! M/b#b.HdBՙ g 7*k}s(c!_.y[:Gu;2 |[]4[zv/UA^\Ȉ(P"3/Ͷ9{G咟yh/6mѝۺؓܕ}'weSøcmlI_{vCs=R Tؔwk;"w<#MJ;µpTT޾Z(oz[M&d hқS۠&/~ACdʻv$->Bs<~ (X_*aō)4$˙g.g_M㧳tlkrGXC_+l6S\Z-#$O~4L:߃͔`E?a/ZD@*~@OmI)uz&HAKy]{M%sѺقɊ4zg&?6Mfl)hwJ7 cT꺢'[(Jw*|?YfTf ` n_C]J<4͞7 JoLRM)HL\Q-";욶-ܩe}G/y!RAY*q=9g#[6_oRo>݁9W:ۂVشEXڕ+?(; g_Zok$<+ 1_`ztfAk6])1J]\YX0C #S^̴W;j~"ޤs!ܹ{T~ڴ{B@V]3kК>XdK1/׿=;mmF1Hu1k 9hP)7;yqw*v= IѠ@xGgȅTzT _ x)S w d϶\+ *aWGjOE~FS0Z2@ʹR*лxISAݿ?K7oV<e;B}>n[aAو5T%=H[:j?&& oԔ|^ʍ#)i |Я{2Αڼ ?2(_.cIV+ԄxnxaEDe>4lG)̿-BxHzjH3@edFѻ^ ȉkÎJ 'gW߃N\Bx'՚;ǰ^*)PޝgsMw<|-U w8-Wx]xZ Y kMUW{59Qf<1? ҆6XIB~[Q>&7cyr0i`cxw) IE/ AG'lhƹ;BߏјmQӈAAI܁^=iKĬc!Yy卞i=iCQ%TDH蚒@U C8G,c|E+f3O'`2>CBO6us0)y\Łh'뗦R^,@ا!` GܢԱ؋NW$A! 2i ͓;>pI̤<ב@!jSx|m7:*:][C̡V6ދ<`hP_> stream xZMo7 1=T#"%F|mhk:ĭcM3zXDEҊ%ȆOxJw&D0!d(Tfebb3D#9+J`ػ:EBY<9F()9T>*3P`V+.յ !d- D%A:s9@ׄR$LI3«Uv٩X`!**XgCNT`"WI}1vy&l[R#J"Sݠ+-\%S;b`芎3SY"T*ښ*aG>1d`Sta@P+;9φ0`^Ae:cIr% DT /8Raq8 q HbDfdP:&wIt X ;3녫!B+zVRgol`ZB4<|+&qLV2)ՐARjdꘆjylX`[Lg{{9@ Bi~7S%xI œf~yzzff\_>#[7vj&y7 B~7ޞiYݥ,/=u@q D'aAYM%7˶7yt4Y ~yoYJBs[]yּh/Q{wu9p@k:\`6rZM~4wXDD"@(+»b࿾J5//'?gn]TmY#؂Vc9f(l4ݫ`s8=F<ґ%5J8 ,U㓾otxVQG⭃͑n|8UfeN$8`R؅mT|]Em&r?) *:H4EH)l~=ͣ*ytԟteg{/k۳?<ߞ/? vLf.V6XBIf5V١LmlMae-{?C6žkJ2k$wDRqTMGro۰_/ؐiJXp3擻!W ^py]*D;*JɍnɢFMFY+3mJξ7;׃ :z-xh 1@cjLFp:d% v@jg}{Oȗ̠QDm%#,Hn.ΦB :4JkQÔhx-MdCf㑙-CCC<Ci^b}X(|I|z\j($|v`#c8{7q-Wywf~Z6փ" Pلdt#E]F_7wA 6qDŽ`܈@=rˍF99o㨴{DD[h%Oyaa=Of`Yb^eV;'%]e^^7 3!@,i$cPa}1?TI3pJz[*Qrrl]ښbK i]h^3wr!]e^3'c]=ඊy96e(P̯/P̗5pQRtЀ@1!`у[Pф(IV/;6ctej3օzz/`XPO!,i;>E%AuzEN\gݛv j)4P`tlCeoBKԿ|m[?D+}~}s%v12ńР#7,jFYt]?" H5z|mR$p? endstream endobj 1315 0 obj << /Length 3017 /Filter /FlateDecode >> stream xڭZݓ۶_q'i!I48M2n;$< :1H Iqbȣ3'p~77_}qUo QXeoo)6&Kauqvwj6HAtroo2YDd9g|̵ Oި hk]UYdeC|ZKϛFlm[}OY&YbI3z{pϭzSW=x_ 96e~U3kwtӔ_d*v $F@4 Q)1'bTW,Xg:*D[zRрbvRx)\/3-3" tNrP---Um\QE;۝imDVKP,VXvUl:WC6a٨Î=\rDCTũ@XF=*&D>D}ta" F^0wnMpב03*'pz> ;!@I{ exHw쉴G-xTmcPRtaߏ7)釰O6WK\$y2K^@DnҩMtbr{x*rS5ʌ3vv՝cŗw:Ic&υL&YI\I eT`wE+:T2?0bo:4\A3B^.巌%!tz0;zrBȹ $c$ "89V{yNQ~ y6d`%iCpL8)&7Z؄o!O&+\ W3"x']jVhV19/\ DL7O&W_5|e] #a27ʃ y"xOSG\Տ8\3B zwc [򠋛ZFlմmVs'pd &xƹT1$aE1QXjGf(`%Q~eCo%`!vi䥊@BS@EiJ,R[(1PF?{xCS`;ږ5ѿ(?ƻ(~@(ϵۼQ`Z~ևZZZZ׵ 1mъaaYigW ND=o1A5^=i4W%.a,nRĈx [wqoZ(EϖUnt ʄjm=kEgئ,ۦ9 c z1"-r?^I%A "@ (ަfxA̅˕_;2RK;CS쾩R Ʃewʺ/C~j4%BH---Q/ zz~?U&πyvCِx^ `۟^$|.= 3 :i:Zu;աo~~QaQaHF N G2uSܜ*=xlLm [86D\8ҡ :BZ1,  xC;4})Ʉ/I&>쉯IJ*4:  yhLn0H[]3`3YK|]tRNyFWZD Nuɣ ͹oc 4N﷫D@ IwܐmrK^%]"׽kK+4joc&W_j&z|a5`8%O謨7>0KG8lseR`܁sM!~HH40#>Nz&6ZHF- Kf?΅& |z*r$+I?&T@ C?#D"-(&7x){` zrL,9[ŷ d89ʮy!4n{T2cyaWCBIܐC"#`D8 c8ȸnۥRy+ %bnsj)!ѷ4}Y{Ǣ$d-ŽOʼn ;| 9U7I)(Î^nkΎ';>ŭx ei |3&7͒[;l8 Ǿgy729ʦ!0Zs( 0 y޴QeV޴ P=c_EG-> stream xڵYms۸_P3! Kw\\d5O~~rk&9%QJD"'1"E>^L~ů8R)MR^w(z% H20 [~X*;m쀌zfVMnCvEW]U:fuqnSF_sz, '(vu[7`nyp1T mOZp;798T19L =]%jL$# 5%ODN=ό :@TY0[21ָ_\98mq oUq rV^WVY;q(u&k0o+wRV{z{\|:.O<~)I#x̵i1lH+d۰ӌFf\M!d1*8lQu7/8*?\[@![!Yٝ`I'߭u$6ѷ (=ͲH#WvQn0ii4`?_vךڏ^yWX6gy.= &TĽ1Ev=$EE]l17B.׸^M4SyBX'^ zQUGH-KH؂A#{6 6VhN#K d0 =@/lQq=)O*sx ?^T8 ճ[[mY}/^ BIih^I8$n_0dÇrć(!8FBѴ@:aH1bBl2 [SUc;7s>wkF@bj" TP^`2,8i tP־$4=%ʁQ xtUr <"4L,z3׻)Jk8@P3v2W#{0IS08T!RD>P9ۥ00XVUm3bA,d~lGESDKN Fk{">싺kt]kJnͰ0Un/dF0s(4U>QJX(z^Xfh h0IwA4 /)s!O0Ȃ,`d[kx+74 ~4; h:׎]bS^r : Rp9$XQ/h[50a.p$z*3~j2GK2qfi9ExdCK%*!p.Z3@Xzii".2m $@ 8a ,`-"Y^Q+!sniVwKԾG/!`~|ygx\܇A:9(y~NK@xG)Kd?e1KbX+<2맯qh%$H˦lȀR/x(D(K{|-.`l=n/'h>3Ke)QzUR˯IH J;£Mra* \@` fSԴV%L8EΩ0+Vz4@meu<;q'02lBSg2KaB'ʳ~8( ~YM^}qBgp,s)c+x SGuvݍYFS} ?zm<4PP ]\CwdMp,(s٢ԏ-)uvf=Jδ7^WWV/ʻ@Ky _$ò_pK w=riK_ZEC49T>7bώGR,w.7]EY{6o.ݙ.L8lZV_M௮dc endstream endobj 1342 0 obj << /Length 1021 /Filter /FlateDecode >> stream xWo6_!4{XIɖaIb}JET"=߾H4/}b`ϫMJeyIhN nZTwW7>Q`Hd;W7c;#13=b\\\4-*Vmɒ5Vq][dkfji\UP-SWb.Ə3B}i|/p]>< )9G(ZH@%>aԖ"+4yi+{,+ <5Qپy1Łf zl周F<9h]>:py/UWR6GG4Znd}ȍF }Q[8ݛH=zz*][) #ˡGl][sU6]N-.Xvt;nxx>{9n79`]ιn}ZC^r?Mע4q}]ϧ0r5fbL6$AdIgcۭOvh0 v'`\lpV)X&4ퟜ$lg(gV|ꢇ90^M 8h)E;~Yfn0mo&_}$G&8OPIh~> stream xYKQZa@ONex]qUc )ݍ8뙹䐚xtZW_oG*gy&*(YA_"ǻiM۳Ը y2:uga Nk7_u79>ދ l r#h/ Y.,70tPVeMۙCc3l-mw:wR>va\ OѬnjjGp\#4❹lZmQeS7Vħ.=ܸ`=th(jiW*;^FӿaSjU?gXeDJN!S(d f0;Z,UJ/=?䂂5}esH/tjB7MNʾ^*Okș}U~m=˹E5sQ(\KQZ“`5NE{[l-TU?ayG`KRk Y_P5wӎup^`(zJD1*XjQD~1gE1O+Yqm\z(tgz}\1~^ k@yNC@_ /wXqB8z"0u 1HS *$Lj{gEGARaP>hѧq\JI+VyĒؿ 'Ґe&@If[b=NM 36,/xOBϑ4S!Yz΄ "x_Lj58J-djf%OCиɞiFR,pFVuZX L_.GԀ'BӧwRU(66`&-K>M-rWQmTK,HemTqiؒAŶ{;4J, Wh|)dI$Xpa6{-uavA$y?. Br H'rVL摓,CVQ>ͱ7 "¾opɷkB8@bη&܇&\l'򧪳XH:i{LJyp}(r*em6 xD́AKPY,h9f_u-R9/h ⒅t^lh2.1&8oe!5U[s "i7]s*2̿*3KOMz)Mhԡv|u_3'~S(f7)da]]/U!*-ܙS+9 "@0lHɪWAndέ֣u6bBTij^njx/z6(:~e-Tq;ك,Di>v8z{iϪЬiiخYڵկFIKzef?* l *|Pe *9W9VyH.R7(=@JxIY%MZؙ L~4e{Ȁ<*FCjn*m}|QCI?6}b%ҵZCx>if@ѡHxCCo->ae`}S z2hk>w{"a%., endstream endobj 1364 0 obj << /Length 888 /Filter /FlateDecode >> stream xVKo8W! `|QԇlXh`H.EɿߡHkNm6h+uN+u)|kT YR?Ef('JSDAKUf/Pr><"m#ci9A$eaE\ף{U;ˆc9  ߚe6'wV_T+#,qG 'SqXUjQ*sF#jRM^W;[րvRWʊ15N4IVin/T:S@6+Ugaޤw^ǾA+%<ȥ:~G}&>)CfӸf=*dž_^U9#O`TO+'L9h/B쩴֙^enS5n#Xzy'xg\pDax863VE0L w#`50 endstream endobj 1382 0 obj << /Length 1866 /Filter /FlateDecode >> stream xXK60RIh&)zihIm}wCɖV];H DR<>|.ۋn.^_"aI臋s]ƃpy yׂI8d1@I+PµAsx$8~>jVvZ=||}͏,1)}U${Ser|._*mxCϴZ"5\6dG[b|5*[ƚtG*U@]$X#,=o|'u1m^WX9 ,J[S,^v $BZjuEKwT1m. #&e[g0C{5tּl֍)fMW ' 'I)CG bdiV 4kiG2]:#b&H N!Cd:yv.gL23 vͫ-MM LQfW/,&Q<1 r&[`l~)T(L9"RV"CE7rxb7}``\j ML82{=ZUmuL<xBB:>4FwhjX#7=7 #8C,*n gAՀ2 ,Cʵ6z}> 4ϒY.M7Y SY6nx&`0ֵ5 Z]0G[cC-c-c?zr0wAY"{Y 6qmWh8W]QE*b,r2hJ.Ќ΄Ã+8?!ܛSl$Mqk[]A,EAnqӺo.~΄gct wVM_d}{\$bjV(QZ0Kqm'XH8O}0*li `ehnZuwӜSk8o\ kSԮ=ڝ`kvژr0 p)Rp R͇U(gغ?[s]d I}2 C a'y~6'83 i=7@ߓDEp6$ g?wb0u&8Sᆴk׷\O&xo֏uyss/w}j endstream endobj 1392 0 obj << /Length 1911 /Filter /FlateDecode >> stream xX[6~_a$Ek1bA"d}h KF-2Xy=׏9:Njo/.,%rv1JL1FHgyj"ur8$S#4Ȟ-E"p{п^,v`g ]݂y;J[z^A  V}6e,'QFI*ƢlJZFhc.byDh8MŒsDz?TYUA1z$7?un*jqn3 GcFۮ]N`^" ] MD^ڭ;^/ kk첐mX¯V{ ?xqM 9|:I*[K@ga)Qb-Yڛv.m ^[1Wo鮲ps(e!ܲ7\b)-%<,X<;meVg}qsGVY٬tF,Ms+q|.f bFI8hn#B^ld$D DSlh.DjV&8`k{X۰s(.ʏ !$8yyc 6Vx%$elēi vyT>)8rdu I‡`N3T.^{UB.Di%@)ւ#k;x^r^f1Bnԅ^P>G"h(έLWe$\m-ȚtӘ GQچsz1D'stJOq89 dPso@ۑab:쓐w@Pۭiј" Ejfqw@-$o*T)M9ryVDl-A'[D#0Tmݷ^^ݑ^b29q\)>8^7z fd\'./YM_%ۅ)1I~ >nj L z3p-sTWu| yau\ʺj.$@,H 4K=aEmkA"(ɍnu,H۩Seza蓒 O1/ LwK8SP\SꒂHP '?3@cB |+ 7pPO$,|';=FD#!HP"xY{EЂ$r eNGY+ԣoNn_eͽ_Y90~.F-F/qfٸjJS OaHSFcRs J&kD,(RbPH+"c>pEI/#(8P2ǩG#BZ58ȂAjR@f[NqNm_r}64m@$EQfӇ?7$^[w.D:m>KB>%)0QHP-9P!IIdՅv޷:k2_o(v{ʉLC!஄b{ϟcO*G~Z4e>}muaڸĥf xoK'wK[Y5ͳAiye j ʏ{̎{%* Z5A_t( q=ı}3|-DP4j>ŒG9' 

> stream xWߓ6~_L2S{L@C2ӇH42[@$]! s2i'-bwjmf?gnUy`](8IBa x O4cqOSRҖ%ܨ"g7qd`&nǤ7\ư5bIh^tMWSo}nKp\W/\(̉ 73"dY2ym"ۧF%G_l^hI[U]_}y0G,_qkꃓ SLRu7wƍ(Ym;wsJw莵4-njڳWOrjiOA8ڿ*#090AtK^AAuS浃bB1aC'լꝃUqY"{x[g$bځ {=SbC\\/*$zGAZYf5S]mRWR4V"9Nkh?҆5[=-jFcz#x81{>fҢʎ6zEimW!44$J*eq\cUv {܉m+VdXVS*n%ZYzͺkZD0%p3}޵:jh[bG%-FYuV^N)ۚMDplԦؚĦHWd\sҡNędg5L(ݦ?^}8VVq}x4q;Uv=9Q` EJWva%1;;ӹ jꯆ3m!! OYj%wun- [)Ȋ׮L]9Po0^?LAo8ܩa[F~r}؋oD]+ZM!0r h,I+ܥYOGO9EݕLMPSj ;M4|`VkUr { uHf=y a^[& v#"=ndޙ*Q@,'w F#$Í؉QHթK? endstream endobj 1412 0 obj << /Length 1357 /Filter /FlateDecode >> stream xWݓ6_ g.:If@)eJt.#X2ﻲ־h@yHvWݕWzyp3;^N'c0J1)W~) Z, l?2fr`B2x& d^B'[kЁA XN:GU~[GD![1ss!u,;qRM)%Ձ#*tm&ۮ66xovJ{HNȈt@K?syᜄ@] n>"pi#jQJ-cv۾ X_Vxul4Ԉں U&B #VLaD6EcTъm_L|y_5[v dc])s6Cn IZ>q@a4b []kBN,D{̱Īp 4+B!'J2SImBRogQ4X9c`8ל'& CBcvUg{(o ;+jAp>')}%8!~@YM Sʝ{A-\}[J5PʃA =БńetpD$ƒ[Ȅ(;52 _kU#,`gj]gڱo)oI2#g-nKcdA^Ap'wzg;x9?srB-Baj ?jYL;v8/on/d$ԏf9|̝x'f-)?%.ػs$d0>oK+Wj̾* }Gva{6͹o>jϡ\\ :&tYͿe2u[V\ګf~f?cӔ'K{E={z%l$7Z{t#id?ȕ~g0C_߃nVI~*M_*؏+5t…4WR,T>(@|_BB+*G?NNX l~lR$ISwKk,/Tԭ8h᱋Iqݙxan]9uJY}\o{lt:q endstream endobj 1421 0 obj << /Length 1979 /Filter /FlateDecode >> stream xڭXݓ۶_ϤԌ#A"M}x4CJIpY볋(R;`X,~P W=ݓ[],Oy_EabQ8_ݕ!{us8ސd°BkQOBw N6n 3Aζ]onQDKkv\xЎ5O xzÓ-1mc h/{VU>%nE9 99YDj3'y#IX>I3ԲW{sz$el#eU[w_& OS$X8J _jG˽![s܃2G8W&_eI=9MY%8RTνlx۾kr.gWgn͠NX)}.]!uoh>I c`,ǽþ0mEREZ`u}QA^E{@PE+eeNE_;ׯgng̣N5L'|?|XGI UIPw qzWBTMBld_TϜzt[3Ԃes%JAhVrPҝr p4qiiW] NU QI{.kf z~Z5Vb4 'Go n<[WVܙ@\?˳g8yVky (@ЄXBETAC {,*\* 7ߵp&6568!ku dXBSzA`&Io $ޏpo,#GfatKga5MJ. yqj}c %JB B)*qpiA,eNy5AB5 F2+[^H 6Gׅі'\̎7Pq\{1)VrAcNF Gi-vέ24rTU t>&~RcFx9|*LJ"Y=kV:QϬw8ʱwᖅ}E5\R?mp@@ږ4^6ʌM \@i Ǐ#Xc~ڴK8 %A6ٸ%E xvіzlύg=vBGpko`=ï[Zrk7N< nusT6( ϱ5J`ھo j#|\[o<;R9W&(=lÇ[5plV8}.$~ԧ=ᱭZ(Y.?‹ $ӱ˂jl{o:  H\D|Q@ֶs{C2JF~m wuK/."N0\P"P5QJ?4kS.6~ @nsB XۏU/vĵgdhHqt5"HU7پ ߉z1 ,ˏ̕`Xg6c !HӧO[;E_Hꎵ* ':lXX, #H~Kml" WWj\ns BK~>e3_;܏/ H endstream endobj 1310 0 obj << /Type /ObjStm /N 100 /First 975 /Length 1921 /Filter /FlateDecode >> stream xYKoGW19lO?! ^ YFڐWM #jH>*TwUW}E㌏JxJxW"Y 6IDl8V1c0xcRB0>x]tM"8Է`άH+NeEpC1" 1&ȈdUb2!U":A g/P~J@sVD$o`"PY*(V)U< UTŠ\ 1S]\ |B2R%*s*#PYW(B U(sW0 qQ̆r,J$QIۖmƶ JN3ḮP8]!xEHm&豫P;t+ābJYTϜM8'fW1ruŚhNT[]D]D%ޜ0zMޜz"/ @o ÙyMG`x'O6 m@a)%P R K xSa\JeKFF!!D-#VT~g$ vD24ߙKGT2/۔b%, m_Wbm..L %cuڗȦ0懘`S[7?/\^Mlŋv| zY4UwjWR^N4I)KF+ŢnG+y}ۺv[^*Žmo~h*v#"Vq`P@Y$d}mvo:|fa7w9~E#@&xAդ1LZKai;W$ˈaHB!$t;UI 6"xƸmABOĞH=={Bz<[lZBM.D(T͋*yq_~__V_5͇]Oo? vw_ g=|` /n JfnUQd~D=b%Azܵnh0!n 焻La4exg{hARd9+pΏ5 9Okt܅5ySh9Eu8;a]y 7J 2; $';zrNn 9vQ?s=3eFtzv<;"=Lg=F6K~##Ba~:ȝz2ce wph>ۧdld`ƞ endstream endobj 1434 0 obj << /Length 1218 /Filter /FlateDecode >> stream xWK6Wld!EA @=9%+.I5PwֲY/b49~pfAΛO]:%AJG ę3+KoۊpYp6 Bכh B7MA>Jd}7wa DJ P 2u2?5WWW7Fh\Ri&ͪ&kR=#/-|N5#o׬D1k&k 3*7D~@?@>]+A$WBr6YmI8is+`8j>l 4]G}?\טSCYd-ixz t2-!n[P{޽F_ivtozUΖLs)ydKEg*CTp=jT8i J\y~-Ҹԛ&K2xGZ8tOM6틊9ۻS3{yQ׳_# #n^Nތ>~BN`fѢAeF!k跾$%:09VwNю-~W?T FcxzԵBM^?$t q$:Ҍp*i[ow{Ze֊ǹ gp9ND4 M?s6P@tK625۹ QjVDьa H+͘'~> stream xڵXK60@"m6zX!l$f{hYrH;_U$%Kv\,>bWEdWonneeRErMDdER2F Q%MK_oy >IۊT{ݶS@V1"*5/aQF wA)au2t3򂔛Qv5iZSM IWR*Tt͆Aг{idyځc0R9NA +6?߆[Zv}^gT # NL:KM]dQ-K.$~&h!3 A}qFI;Pq/ 6G3橃J "aU«43{+4u-`7<xw'ހo2:t>S41L\mP#_3bJ#ɨmZ63Vg1xDI.> stream xm=0w~ōt\ h"&݌C@&nwEI*`4Hƨ2PBOaë18ScdT׋xYV8l-JNxCy1ktս$Op.to}7mت7SMWoyݸ[ᠼM7 endstream endobj 1456 0 obj << /Length 1833 /Filter /FlateDecode >> stream xڽX[o6~ϯ0R`Xְa1x-e{f!Vmoʭr!t3#>ԜHs/F[FGRlGO%xZ$.ى\i TgM RL<Ԅ=?6<|Jp^;S=оn+Ř*U͙3Q)X\ yvs}hu r4KO\τmpObw/zzr]&88~t^'C#*$xes.s eCQ) VGIN"" F:zYwn+7umtS@%=7me ,+ g?rCz @܋7n^$hbwh \J<Jv=b~j8V;F23q{,3l`Ơ(46~Ďu[#p;+[[zMٮ ጴ5ÎcLib%' _FDE{Pًqd$4m.)M/=,Vt3`ͪHwڐW`ۀƱ.`3}GM[?$hc%b4.4S"k 95ʐViaFR4 t&k"혦N|Agi"fvU'N 5Ռ5h^f9+.@,ٮFOvQ)UWu'ۣ^`oA\4E9O ]#ep~İLNrTv Ѿ2>]Pg~KCKLޯ>/}yoE ,܆}7n$ΏK7mx V溑 gj|} ]WGrH`d8R>P™KY>X. McCp|2U~KQ8LUF;[pJl]tgo {m e{$?l;llZgu̪E${מGnNHK(^^ ; endstream endobj 1464 0 obj << /Length 1574 /Filter /FlateDecode >> stream xڽXKs6Wp ձo㋓N<}D=9@$qJ*եH68Mv.EGWū$H[|$ *rΐ5^ k9 rӘz 1rQsI6a"=~ݶBI#ŤDW7LeIKf֪cZm2rr8wNo>pK{zRhg媫5' R4v kkֺI?R}p8'| K]=[(Z~pa*zn &q^#|n F 0{$eq<%,r*`C!p\FVvQOjAhI1Q ]؞φa L_KR|1p?$ܺV`vq\j] Opf33V};kp0 vj1KOv(Ҥǝk<:ҌrΊkHsFDAŽ$+ q3Fw$ tyd>iS|3KAM<*^ӓ|gWĩj~W'ΜLNڝu?J WOzf,QqvxkJ2|M-cͱVցbmrWX'P EibLq,)"‘--J +Xppѹ±xc}{\U>ģ&}?BQv;sIl_IQsyɭR3]cw4}Y8mʠ"݀9y m ;!VzJ]Ůgv!ۮ#wm=^WjS;Nl%5*R7>Qp_(A$A)P@k5P6[gjG=_gE$L3Qvx,/faS&'3D)S&0cn%Y3g@4J$XDWEid6> stream xWK60`+LRkQM`@=%9pe&*.E;1Pw(R$E'Sy~PF^^\_^̣Q%! GF( dt}p4tj"[~yJgI&8pkĉ@6 [nm } ul[>:` =΍$t=+ϑgB1+JQp` q7XX42^Bnk#R STJ4PȩuiACKb.\? [`Kb{86LFĸVG> VCk(4Jr$Y'(`lږWRpOT-[0򈏢 |61:MBc'3[+RVݾCtqlApl8g%iQn3Z( (Đ4Bm z;֏";1v Q!nDONvjG,jaYM|k%93Y )r€Gly0f%V J#ŲͭuJxa'+a4 $cb;pEwqc"Q~y ;DC#aptg ͷNbi LO<#  35jUY#<|щ2ZN̓|TZA3JA\OQiE@$9ƭ~ruqM5;=+&7 ړhiESXeOdVUטy؞e噼V=V#YeidUQoZv1UI vdT>VVwj&gkW0kuv,5k[rvTrBUZH˧!Oikd޺Jd:ӸRN=6& 4lOQHzs&\=Q..]Y}|mz Yu95[vB0m?|U h.)  .͏0M|癁|u@!8|?$PvGф|H1hH( NSACy!7}@"7`>pj+US] &<_?>qJz`S}؜(1TV1w9K%wZyS,݋ooX޺f{f3yةێZ{*9=޸^ ''6NqQ endstream endobj 1481 0 obj << /Length 1881 /Filter /FlateDecode >> stream xXK6 QLN39{Jrez(eMg/R$@]r_^^+s؏\ J<ωOAt^_7kˍ/͗dV<s<7'Ww[փLIƥaL0ss88Jh|6(dgB֏\nEv [uǦ0kԝ(iMPRJޏP^ AIȋ7Rmq `,^jmԉSS>z}l GDT47YE}OKo7^jD}];ӀB%E_m=?a2݅Ό}REdH"L; } c'^w>TX4|Zw8>F!2r͗^il|nY(MAVԢ(rf>n+Пrq4'06Hޗb#/HLL\=7șw^Cl-$}qֲ&~TMc}O[f5 66YB2ErLw 0\8)=lSBZd!˳хyFd+e Erҡ"](n%ؗV [j`K' Xp*JY P:1 Y)0MboUn?֋.^XPk"R'SV! {GPw x^`Ȱƾ1leMJhOi\taۯhL TWVA*R׈pkxz Ɍu}@kyUAeGcu7Uͯ `;i 7 V30ÿO<:;.Ɵv0[M+t2v* jƙ}9VeVKC'˲ǑOGϼq}l$NDI8aϐ<{hW{'Qq{W-1N,BNCL2 y<t)}S> e)/\[d^68O ߨ-Dh!-u՝VUxӹ60n!2>$}Nզ~_?IqNss# :S'> stream xڝWo6HN"=f{&+t KCL=N߾;-I}=xw;RtwG/oN_E$'y“|(%"J&)c$䶚 b>}XpT!3Jc8e&p y Do,(HvG$dI2'RvL zDQD o[S~(_UiEt[XԪKbؾֽu-iD; ( ]ݛN7+zM4,cAžDZzUeWԍ!㠑׽$=1 L' x\wnҕnMQ;J%ݍ-Inm]A[4(1wN2B)Ԟx10r8 it@[nua #"4`T9O`1֐;cigSF ciz TDq44`Y YL$}i9q;!a"\]a ڭgi-ҐWJ7=2CB ԦZ4A#fkو,:JnA;[nURsY RFgCroLaToTi*U+#K<@0,0Db6EqLm@e4OClcaH8 UjX|#%aya0ѯ*Hh 1@Wmzo wgϞMØҠ˵ePjvI莎,&,K1(;%L(~J @w$ #p'9eQek^v2 טMD>|_p . ۖ4ҵ^ґvQA ߥm촨 hϘvcyꪓ>N^VeNE ,kPGZCzxf>?B~L䟲B3gh> stream xXo6_aC'1CR4i[=cՇ+i~w$%ˎҤ]xǓd5'W?IIh0JD1dOrђe]ɶߋgx0+ 4桏&'-v4x J6Y00&qwm)DF@Yo3Rqc(Eaff/c'[$[Fdm Spho9ORFԈrSXG>9jrR;cD@ ^jMY܆r7'Q5_6uiznجPȡIɕ2\~d#Wʏg8["odP,hRƒsJ8w|q)[ }6(3fEx/e՚h!ݷ6*eV{]Im.妑Z뺲4/nf 07,Da9!VVU%T̅~[TPU+F[mzWnںtO&0.Ժ:EBO]Wd% :Ơ]TvvfZUmg{#әpNiŵpq,%{w~G-qG4p.~xmmm>RU*M}J aOw*7u8݊]6IU)|0$`-[[iUe6@*\)ֶ= 4Pb ,{.綔Bom UG1K%g3u'skn)2@VZ+ŧSC۞sVd+Rwoگd ӏY]ڌylUģ*? D䬖SgJsK DkUf-Asm,aW }8 c}'e5r V:B~@|:#H,FY`̜Ï%9$tZFrv׊Ԧ3F ~|&(&>cba]Vڋʤo8Է8(q^Lh-p| (Vc F-3&*! > &G!$M-΅P p@g.\*fQaBBŝ¢`bu7-BzXo'<~>|> |j* |Y8mGǔ0f>مވ+an(}{)^wk h1ޕ4H|0IZUWT!8Kwyȴ{C >s-5a'% SE>{h|vBkȒ @CZl?N P{!Ə [_gїA@%iiQՆP4 dnH-H}'tW)͟_{vZȎPu;9\Jbadsڵu2_\qե~LM#o 9ާ7FE|Z >p endstream endobj 1508 0 obj << /Length 1489 /Filter /FlateDecode >> stream xˎ6,Mz9 66h 4٢$FmR:Ρ>uS9$9#<̮,y2`AI<.7aO]]%|yJ$:yuQ u̮8r8xܜ"KѣGӋP~]ֲ[KmKYU,-v#[+qR-R˺KٮzKSڮ Q`!d8hO\%P%Ek髧ؑR ܼRktvrnn"(V,K o,U(`x]רE䯙<=a@)xPT7hP&L<v b,0'|h|=îdg'Ur,"y|BE ڎ/d];R-,S.7a"py@/p8ghnrw"Hü(;+ðmwl`OwFǸBͤJiDL RCy=洹?0F6WgT粯C0"㳡> \qfn VFHugS7i5~4{nT!xRPp>k-V'Pbg0{7緁TgB*Էҿط =,ftb]4[zFNrY:o.%\b.b$ɢA5Y 蔅=B3,e q,ZYD~}㋻rP7F(6qy k kt}GY<ʈzNߔ;09<o:iw jlroWGnTNUЖ@tB> 㞱d¢ ؕCX2HȔ14{a 8;D.D-6ݢ6-Gq^ڽE7FHQ晶F$GVr/^^ M`MojB~Mhc<1PLyx!ݷG[E0imd6+E\7c0vB9zɿm5O endstream endobj 1518 0 obj << /Length 1492 /Filter /FlateDecode >> stream xڽWێ6}W Z&uWP?$ͥhvE%f! Iw(RMI3vbf2Ne;7+#a${q97-|+gY Skb1%=p\8oLDT&CP+R ά椃TR"AZ}3R ǽ 2,ЋPd#FY34 @ ~pmjmaF!8bQ,B#f~1Lb/2'R5q 'ql,B 913V+nPh`#< l~0G^{#Fp9ShMC;<;C5w+W3NhEVC-nHoK,̈́43 {9ʆE{H$_pHyc-dMGsoʢW__\\cD"jA?5+l͔\_̎6O=wgbcUewrÚ;ElV@ yBeȱ}C15i 75ljJྫK1ujr xA:VrBHPSh鼝v-8j8N^[ qt k*"h۫@fԽJ;y*%Y[FWx YEM]n\k;(蠂$@9C{cpF&TM&tw֩ЅgabFy6dl6BֶA 7/hVwY}IO{n8| !ehRYakEX-vŁ4fjhGm|N#6۶7#8omEkQ O3Zd X|~z;mS1-HVm"|/\OcѤ!_Hʦ~D`j>3d\@a:44ܲ]սGeI nǀSk)> stream xX]o[7 }/ERE Vm@a[Ї5E2$)*ITc a{D!k,$\Sm9~q!=Kk= 'J8IZERjfI MM85gZ2mb,HSo x/,%n $K ICWxX(5tU 7~EM)$:,(, g ܅p8E< FRl6A[Gtib%J:@-q+LkRЏ-!vl}]YpHCԒ>SmH whn!ԑy%a<@i#"Z S؍֑z1HRj!Drr 2ZTe pl 15LuH>AR6-\,X,* {7=h#In.TaN-4?X ҮD,"HR  ꀤll%U+o V"S$E!\V|iG7uj6" j}Z_\sJ߾~z'h-9j~]>̡kC\94Ifw̙94>u:9IS% u,;s1"ķHznaH=A-qw\~4?l_?>Y&6W7 Wo7W7 tmO\|Og]/# }}B>#hY})?͒->&ϊ5F_1& OtύtxH5{6jٟ lxFe>tD3ygp6I4M+>w%yg`X[6=Z'RZ=-;pOg'LD%bnlEC]K@]KhIe+FscDxvb~|˸s;T^I]HT.y% `)ڏf:g5Z?{}Fn4z1q]"{mdx3t8`r?RKv o/l?q<5,,[7TYϢr_jhФ]樇P$f6>rvaiFw-,-8[ endstream endobj 1530 0 obj << /Length 2102 /Filter /FlateDecode >> stream xڽXݏX"}rͥMK})-qm5np/[\M^,r43cuXx}bU2"^ݯ W ,]ދw~ Ld!ȱ H !o(2[*zeK+O6d< 3^w(ftuVSy,|x,Pb+&"`qq4"񢐞%[YY;Ee; ~DvN0#Ғ' AuK tqw |mlBc>=pNtC36a"r (%zq5"nƽB &M` .Fv RG $wʉBkd4}rǖQ9×) 6<À&޿MTxKߘ֤Gl :eߐ^DX W!aoGLОYN!29 0DjP{UcY>.$JA[2(ll#QO¶^1FVϪ&ۦjvOyô`ՐW-$ߧ.c&D' بnrdk.'y@ֲld GZ۲qTPXJ"t/e - ԛaB(ڽ$7r єs_\g^-4s@J'OYh lm Qtlj- b^ȣEI0(\FphpQ+ 2nxmĶ~X̏.؁"jp0ű\ ~0C!RE3Id{0 Tt=vwnt 'Y?deh.Cзemڦ#"bZwve/?K!3&x|aŒ` O0Ͱ^dAck@@yI!3bAfs8v؞sɖT@@)lh.;|qsO:G|jg"S#Od`|h r^7Lxo{n߂n芠UAjk*^xBt={QM s,γ-P/qFm>.Nx4@BBÿ Kg`Cg Hyl۳ydMxD|>L'%LRKr>g)H!}`r˗KV426V؟H#G?n>νx}Hlfuf_k0ƓK#e@$n+}}A$&4khְEކhdW|+&hҐ !Tuwnn,w_morLv;xU;ZfjIͩG Λ=!LUR.(3744օ_L̴F;v~B~F-Ko[N=Cٚ_]7w/~͑ endstream endobj 1538 0 obj << /Length 1574 /Filter /FlateDecode >> stream xYKs6Wԡf"$ܓm؈(+[ۦ)us:iz4%ckw m(rƛ*jng6(!J4}{4Ho ;)0H#0伙ɻ0̿?_1z\Őre]52v9$w1b&_~.?N-1姭`tϷs9嫒?9K{2"?Ε_pq<?,3XHk呠8cb >v괪ީ؋4~y$O?Egٟ;;6wtI~b깲"8=Ua.'K:V]ٍ9R%=C@=Fx&At (xVUծM_ZZ*b;V8PG+]W l.)IgNnC> stream xڭXKo6WЬ/-R!MڠsJr%.=";á#N A~30a'*Y4< YsFyp]Iv$:E;NN5<88!=[KfDodh> 4v>gXy[/^8[&a(=;I+ؖI#Yx`^䔘p8?n Ȧ$Įu)hBH\C YN 4m#0u*-8!Y$ʥlduo!Von=,V'n`tλmڔ @Sf`WKV>IX+iZ:=MjϭMOnlx Lt.d9>jln?pP+v"Jt<%#AYי>,Uƫv}Bݚ31I;}7܋+C6>ڪ;%ʅ;ݝ-c.(Z(kUV1_-.՚vdQo2d3I9C=RrO4.GQ}W "(2HM}=ys} U0fQE}s Q+ nh ޟ(=ܤ1l(dPYCGJ_N职+U=jNYU˽nkE|ql #Ņ$ !H$UqƓN+jx4q0߯H2tQIE{|=2w}̡5Mָ"$28|)(ڮI'1n/ t7$\/OsEBZ\=0ZtL(E$Q[t}qs={xЇ7W&\m:Y{fo[Bb즭Ou $CݘgSVೀX,0l3[,ż8stvkƁ1WKjOMKo7`.{\G ,٢7FWޏzO{'itt( +&wX[~1Nd*89t/ƨ:CNS&V{ͫUc-|SQEXTy%m醸v qep} s8$cGӀ0/ig0 yv1I3ga`bCS j b?VDWzϽZ jMQI,L2[mpGGx5>>3$aȟE,-6YF rݮ$i.Q mySyXDmbU= `m{=?֙NdTTpӬY)?2 endstream endobj 1558 0 obj << /Length 1510 /Filter /FlateDecode >> stream xW[o6~Zl!uW<$iv(vI[FmR"RIaۋuxxx;*IxG0Fax)!( so^zuYHGtX J3i\Ld-ٙ)B{FtrJ2`;GTl_G4PBF?΂$_!G|?%SY) r,d:#>K;!<[/V~A[j*#J]:\琼q[[,\ iG,'(ck|\J,WNgQCS+xbR#$Ql^k (Q;8?iVoS,!?N)Vv.+Dt29 ;YB8p^3yxJa?Vcfƒ<#8 Mەv_bTvݬW me@0,6gOUeug l"ǙnXKgA{mT^ZWPB#IDHBf]e>6M/ˮ0Ae\H@elh$ %NЖ,@(fT]RTH YheCG@ѱt ˶z6@}wae0K!"MX?X4=DSo-YHFz|g%vnP n}#JHK:fHaFl@ CGWG$-uմev)g}t+)W !Vސ&Z@KR:ʊ}TƟϔƥaTu>gGeu*+G)3GQt<|%앸fR3Y'{S\R43X@[pR(Y=R2PmCoݾZV: /75-YYmoc =+-jWo,qzViH37^<=Ocewpφ~Li_G㯝~<( 0$׏~\05/p/h4$5fB)3oznf2/OsZTu9вˎɊ[ FNsOSqLeWcOG2x-_DDl /a;V5qBV;ly8ɺk: :늸!Tcu мk ;'NPYm%7#Og^JtJ1ᠿXQIGX:3U7a ~@| S=npSAa8v-?d%&]7V2j^h7oj^]/sW E>Z n=RG o| endstream endobj 1570 0 obj << /Length 1943 /Filter /FlateDecode >> stream xڭXYsܸ~ZWv9UOW imoIySSGZO7ei q4}|}`bOoίߜ|ETۅ8]dBTQʗcF*ጫxXTk!"oxHG'W*,IJfAY ]Ĝe*o;ڒ&')1ݍi+𨹥9}kIY&R& mGC[ӷ{MݵME/Rh8=A~6nv9,V)+2,E0G}4L/gc*XPL \"InUWIxM%ݔ+1ʊӛD^0SHFR6ha߮߂+]YGKKx }];[G-[ Eޚr8hpGXx1lrD}yRGD{jha l55 [Ⱦthitx l . *ak )pS&HQ1ADmc`Ƙk٦ᦲ  o1 XX8'܃1b]/8qq󈅍s~@hyXI\W= ;HM>͇vmom ( K-,GZ@M^kHy/aE qK]^@J@#> q0B@dG9_SOY=ܞTTD"H=]e;Ck1T3EM5_<\xt']g5iYθ/z7%|DfbI̊.ȿHf\zhu{dXbNgОzzo a )(yp)\4PV/^3N.so-mF^f@&G6[N(=n:Wmwۥʢ9fJ}XCPk|zGw;0OUv9vU,WPdZA>@7Ok H+h| ilhZ-p49,B*C%fw"S$5@vUpyûp=n ^aV򸷆q}u ?$sR|4Cmuר0k4`4wbGyGb;<7fM4\" .6wtΐ;OݞOx endstream endobj 1581 0 obj << /Length 1336 /Filter /FlateDecode >> stream xWr6+8vԢDetHyIHP”$M@dOpO!g bty̛~,r#aL1`,2狛饼(XYt\7M@g.I@6$H%<#I[{MhT6fg;&zD /ZI 633RezP˂+D\4 3`"F|Eڰ =LjO!`ć7>fxCV-hRITYk6#Es?Ft%U{(ּXr򪑤2XzlcT)Ujw/)/7xxnk*|Nmct';V\ҷ;ڲJRH<mao򜥰n`Ҕ 1S #HUfm֝=^힂f=ړ`}p״ R;\תּCQ7ÀzEzgfr_./3#D1C`A,i>Fwrr2؄`8wh鄐aa4+mRӶ)~zPC!LpSUlPڨp[jq+5e25U9Y.} hꌼTVD Cd Mc JѐaVCׁ5$%P=Ӣlhc/4V3cC *y=zkë́|gª}AJIVb9 ջvݖUv`x-A|gY\G6;TmIKQ )h pd ݇:FXƹ``=(|C26קk",Zb+8:?|FUdx,Obo.Gc\Z͡W)}ck@ECwӅ^49]G2Jq.HJ@l䛲17n۴6uy41:UP ^:Htooc/!rS.5P%{!BMnK4ONg+QQok*KJ+89䚘E讲)?P%^ǃTPwtRF E} jQs b endstream endobj 1590 0 obj << /Length 1642 /Filter /FlateDecode >> stream xWo6~_a@+1CR]uD;$&_;eG(OGxN:YORF<,WF)h3F"?,ɯ^ħ-ߞ`%",7fI.Z,j$hpD6G49A[?V޳ŘLf,%Ǽ$ n:Ü#{QD?p?ܶ4WQSF atzx{єq؍1,0Jq'L~W1V1oym5TdVİ)[ W土4 lv.pO8a>|[`$]#5/cP<.}Ͳf!Ss%Ki}Mi +2Zݕٺr4, IT>l:mA_jCly2L|mWC+OлipgQo}[ةdOr9~ĦgRVĀw ulc{*DG& M( #g#\Cn,YvKhAzcEGbb n?Kxǣ1c*|r>:[hUj.7GMQ$03yT~xBTOcq%16wtZA-WNq_<èfI.P:?{u~Bg3ejavػ'΃'0ޓ]-o\]s4د,="'O7/.{ pR9Zo^Jy)KA> stream xXo6_! &6-[5PXxCZmIT;,JjIӾ<H{ޓM+85n%\&'2e$eb7OپU&Ϟ X*ծ V'xC:ff<#"^{qZɒ1{<2%tʧ¡ “S ~i εDBOsr-JD J2="+ 5edrڒ"ᐄ^anVin:qϺgYYQT4@\faZɃp3 ;eL2WR5? g16^끛 4~dwԐK,MaC[-om]5t\R?oف_=UmQ9 q!Pو]Y#L0qvw},ҾOێDn8[!bpxc[c_$QGqB-WEJߕO;B?bU= Cxӯ]jpv5QPL0( x~sY endstream endobj 1609 0 obj << /Length 1513 /Filter /FlateDecode >> stream xڽXMs6WpC왐&uǽ4SB$$aU ET#{^,`v-MG_f?-gonYo(!A%ޜ 3oYx77q8 4sNu7|!!ψMz$È>0_1!W?לWT-xGq>AFS˗V9e/vĤ7C.֎em\vjl7\tR6X)ZoXvQQ5"PZv$y'VWɝ׀;o69}U6VInnMuU^)PVBs+^Z0۷=8m.$dSå+1zDiWmkutQ9Eom_%D8Ӑ(5v`(B}cps*/EhJoA,V4.kQ;1wv;FխZ4VSY|0 OAA3=fNzYg +|q"T7Rxҙ$޶AZJ6Gy ~Qqߝ䙑L'yٰa@Wc<uz f *J˫٧/+`L ,jEg_һޓapsN"Es(6#>,Rڽ|w]G,ڷz+(T;+Y ĩ޻ԥNp *E-8ߪM(]7],v,~N|;&<|${ H&:5=k#wڽ`tڵi$.>v_j| uSW8WXI\4 H5i|\0P ;f(a-{{ۤ=iw P` w$o$Gb grp0Ѻ:D;ӝo}0a2%_ xˆuRMqdAyeq76[d-0 d3s,d5#Ĩ+:.Dz+9vSK9%meA&9YMC j*m}WЇۘ|Xt@B/"*K;2eu^Z¾:/$ *WEF5b_z@S,IL\a€@lt( =S[VJej>%:yVG-iknlInÏ=Ibޱk^6@y\gYX9eєvɊ`{#7qc<&qaH{¶#9]!;X+0 CfǼiôX;;r\` }\lbC2^Gϟ$ dBRxgGFMA̢5N`;@kQl?]pIF#:Bj@&^M4P4nʞH*'1k@8bOTeMIS$}F1H.";ؤpf1U?վ=|N> ~pQ7ز;:׃^{/ O$n3ᷮIĢddɀ :–ө4KP\ -elq[#}Pu[pJ S;z/}%1ɸ=3yʕ>Nǣ#ܒwYw> stream xXn[7+l7HF<@ Ii,DMV8@=C!E7%[@//u8393ެD!A̟SCAlifs> נq ] kAV'Z ?5L r$cT.ZrT \Wm[9 .T!Ӡёŵ(lʥPU%X׊(徢r/J&6,bre[iHr]`fiT]i K5`>6d@H_e\ nq!) v`\r}0]r Lk gKI8'B K|p8KB0j;8w5: |Z̓u8pB,qVMumXz0UZ `)aa9ܽ*d&Nkhc0Ҡad:(ρ B6^`DP2Vza*cT ).7CWAv3kno  )Wj|>' X, Z͏?deX`ׯ-6j h:7ЧprS|xA:'k_r9Z1OV/Y>8 ӓI,b#o$_-߮]=_Ymp ki ίځցu? щMGiK148j MŢgW"lFd+5x\&6 U ~^^ l}F=_ݟW,{˫.<=~~QCab.r[ޭv`"={)̃hܵ"i-~ ۼjND endstream endobj 1626 0 obj << /Length 1471 /Filter /FlateDecode >> stream xڵWKo6W5-QK hm^@Kṱ,ᐲh$Hsp8П]Ǔ.N,cY“f> d 좘%p1B,od!~Cdo6. &CNQVhtR\l|4KRmW(in%i5xު=-e{#K5iU!\Ғ{:{lf'J˺Y@X8&Q%תjIs{ d#캪7R[OUehiOX_4 p}/<8 <0*3{ 6I4jU7c*f8`ET A< FYqF3ℭҕ3زdea:a͔+> ੮uryr[K]w4]rnCJ6W{ ÎVb'up7Mڇ|p1nW&Z!bNh6-si:2SuC ~SuqRUy p#Rbב۵liojk14Tb,뺳ʆQ3'R=>Vb*TC(~G6 Ҳ(!qTq U1aR785&.s$aޚ N,,"ȂzgU1aJEwMܱנЧ՜8|p5&[GKxmc,h+`!O75AӐ>]CQ^v]ek%+kZV2n&M|]ҥڶvԕ:nЈ]'IE CtmB&g6G${>(|F#B.Xx}bV8?#־/P- ŊpJJGpl u?v! `(.974?7Hc) -gsT=`  [h@25J{līs0bjt.o…S]EG8q7v+݉',;ԟ0a$a_K@L۶7eQ+V^-x0>_>vd$ܹϒ,c?N0Kb_Xǫ?D&Ul1 ҾoHCH(~?! zv4f|a{;~Z ;ah-m#*U3cp/}]案Skl_j Bw<{_^FYRg1Ig5_b8{q/ endstream endobj 1638 0 obj << /Length 901 /Filter /FlateDecode >> stream xWYo7~ׯX \ dZ7\胛z+C]RRU+c%qf8AZW)ks@ 5lqK'v4B|)U[ ww~m㲗;GP:5[+LLX9.s2J[sjlNYdHګ&Q6'ϓ<'}]tWxA7+Iq]E6e)yVЉ9 ,XBjK C ȩ 6g37<Z-JsGZƯPMߞ}1ߏ4cZuBǘK ?ccL9?^M]o('7\'I8$vڵr|he"\:}! C"D"fr%1ڢ5Wz<u(m1`< ;QZ4Gr3of{9Ja^F=f = L ,= 1Ǟ& yW=r;$'("?"QJl%> ]ˊ0R_Gfa#@^܃\h endstream endobj 1644 0 obj << /Length 1617 /Filter /FlateDecode >> stream xY[o6~ϯlfI^`/]vCkh.E_C%EVk"y\<37E(?[mgc, N4[ųs?\V:0jMڔRDQ[) om\;4IXl3% އ/^o>gv-7eԠ>A]qKXRǩvrg0Tv'I^lWL4+^ S )rCbجPQRHV\= uJaJ\8Ag9ܳ}#>9k`xV`l.d{?st,b7O>Pe )keF{'roEt-2}zA ڶěs Mq,Mh?Vq(əJJ 5l=*nyQJ>U1|;)ix4|k##& 9zawr~6<Ӈy>]TWZDuulC5(H]q8[29KD<7e#^,y8"Ћ> stream xXYs~Py*ʂؚ7+<$$BZF(SJe+6q4_&|gw?IYhrΙDX侘|eEҦ-k-4~~.\8D"wr&3?q{&c8z:yeEQ/`*׮vWФ(SzKըVvMF[H5u)Wp3{qr:@`"I5uͮQx+ IСk2T:CN Kܨ e#q<ohB6]8emu;&0zuA5$gAtCL5z2'|Wb.%`L4'l& FsV;eIS0꡿d2HveƐc~/o aBC5Dr~ts?z0#KUX%U[VP;Bo0317\6z x+z]_G55^16@| Ɖg*F8HR.} b+ǽ"vc/3ߵA1㑕*K`@dΪgSNghɆ`/Ea)kX8Ki\mQo?V OU w\؋.t KU`= :J:݆pe0ĺ)6qH[ FU^FO& G9GVP5rwTmՠwcǁP8jJѪXJ,C$jDŽpmvw#]Ӗƭ8Bx# 50iz"u{eAbk C()gcF)0'OBAbL&}2Fbtq82} # X<vtvˈ\zPRFBPݶ[f>/tɴj.8I 9K09ę'3ys4 /qvu󝤠X+d3#CA44%t%teaLX˗x=kČdv!o&I>i}X.(}-T& gM 9愊@<@ ̎qTp$- ohSU} QA?MLx+wG\'q!Dw;8@>>a5(g⹚6lкx.k̘P^fsAiϟ"7}' xNB#~ܠoS<΃@rdRK1k(',x)EQ{Vx$׫Ws/gQ~IMZEGBb1c)$}~#L d lk98JgfuӬז6M|tzǕ:pB( @c2*U>P_lBj[_[)? uEkR+M :76bq6_ӧRO7?#eܠ\.]ꧏ?]ߟr&l 'BE$XY$ߜ}'l<ɒnT XD_O7{ѯdE 9H l}~4I`xJ4,\/c҂VP!%]m.юfg7U._DymƌGfr?Jo6V;o`-9ЯXO endstream endobj 1661 0 obj << /Length 1428 /Filter /FlateDecode >> stream xXIo6WRjHs,2)ce$`.IM("/I<) $oޢ`]n~\|E$fQH1FI/e}Na :홒jTҭPyf!x%]+bhI1Uj+GDFfPۿ0촺]/7Rf,stSVL5.Gf +teoqd)GMl1uX)M3Wq'YN(+> l87[{ 8+V4KG3=tOw]ƪ7j# #[6u'ҽю5կ OS;HZwV\Mt0>uzU55Wg&( &(rW:س#}0xM /͡q,,5kr Bi=XwD}UAw IzT95M5KUnEQ+潔z|ޱ={?>ᥐB{gV{.(hɨNR4p^{('G-؃!~0ѐElylʊ>x ˊ_w BN}ri8>~$CTy5 ;p/U`\ͼp1TW`WLZ`+gFw][2·51d=99Zv7| +J"p, [1!7oơǓUgPGR&9~qH #oDLvBPR!Yoԁ|ܪƚ֤樋YjU ^SUp`aXR S=1+~ф/:Qk=qpIl|-<>-8xlfSl5: Pgcc2@sz 1Nz}km3Pr[dK03q<2 \KGhCˇzh-ߍu6 7@r5W7G@Y?1"ėX:,pt}U *U+\tc) 5BL`JھJ8Ƿe!荄ʚVXvW‰s祹B? endstream endobj 1674 0 obj << /Length 1603 /Filter /FlateDecode >> stream xڵW[o6~ϯ0fH%K!m]q XHM@&]NoMl%"9zq}tv"*$\(tEyZ oAUT2b2h:8Q4VsbQlޒ p#3C4 J9R68J&{ճs0L,wa&kwyp2%IW)I]F"k(f{0 /޶Ȃ:Z0,(ZP: iiv!UM˥:Kz3\qy¬!NKS:Yp$,RQ"1"e'p]Re'H))[Xv4YzAֲBaLv˸$Vʝg|χ(疽" z+lgKs]#/khk\虝?,X|`Wx '^,i(*&G=JKݝZ'q)IeKYiMU.Pg84jlq<*¤dzeyiؠƁ{ZDVa`|TDIƓ'OaA[~ [c/Jt$G JQ&K$yS:%QC!sv}׊6⇍)GN zPr¢_oE0s|NaHMhb(D9L]EF-K"r0LDž +}XB:2=u|jVNWoؔn#+`@\L1`1ѨOǔ Oqu]Uy_~InKh'A[s*of;GzJc u Xf)8Xk\م;=u}R&0Vk|ʪ[:TH{Smp[\iLdFtҸP̩ӊuB2]WsRa ;NlA<)V}r놛1/2gL閺@oZcIԔo1N6t܆eNuij l?hiCˈk{X,8vf[w[M}Xp49 zqm^>T&H|J׏L*)5ׂVHY5cT[^@5.#Bpn%uuQ?v!zW}F)q1eaU  cތxa ~QwSx/}C}յh/@@_K>jc?raFojGMC:븖_:}zkk*A;frPHOucQL˜%sDd0ݲDw`rnw6;. %QDksl~a:¿Si3H@HKZ_v:95ȝilMl+rBى?VvF:oPA ;+c2>2F/fw̯S4: Q;҃gG endstream endobj 1680 0 obj << /Length 1499 /Filter /FlateDecode >> stream xWKo6L> Z4$9+@+n(wdˎ:VCp8?!o8s HY?S' ? gU9,>Ndg~FGTT% )P~A.'ŧE$qa4KdN]H o`Ynlbܡq)mA@44p/LU:oe[]yE]]' wFlIV5uiؒe.ĽdSjsTrG+%8HmKW*L]RּԱ~oLv$(b])? d,mER-pU}4H(dr B+b˅M-V+ޒT$kQjɒ f=yE %/qFEZr(n#4RBϡFA_Pm8>ՏZ}>w3*dKLN(gtƒعp_"uJw#{vtT*/E 8< 4 rxWkEő$X rdװ>i @Z ~3B5'mYҖ60:X P7*#T M m5)T {nx5֘(v`Ǐ:}T/4tǭ(-#7Zvǡou \MeC8]"YBÅ6 Xz]RQSEmuhǃ;ÞF]tܯ endstream endobj 1691 0 obj << /Length 1764 /Filter /FlateDecode >> stream xˎ6Ce V(Q"[`d)zI(-6=}g8,{ͶXsz*Neq;30vb9 ѽƲo/dr-d@'QryqŹn"a +Ϟ=["Jvb%jQ޵%PߪzG[AuT+ʼXUSϕϼ,ki1^J|bS˰=<E7hO)Z* `XrHn/NT^śf1'by (?3C,un j$/wWn~n:}'/Ƀ2JN^?t}AܦCz /%]/JzK¯sT鿱\Q{kZ:MmqI.~6˶/;cꦢi==fG xz@ 6MѶ% -iws4h241]]/] 41e3aySw)!;ֳ?5 5b/OG+vrTMw;W r{Xhբ@Pn6e [4,n훀ЁBB hT]^hwR( Xx>13ŅRPnnim!afd4>s4 _FI뾒ZߔoJܻ#i'dKl%6ceՓxyJ֝AUCZ}_־.d}]lJ+Be#SF֛侮0*C~|فLբH#S]bydtZ:/a$JּY[PxA+ݍP6jizȱ\wZ(zBM7Hzp! '=06U{j9xet*SB|(VmuT2dށQiš_|1&AqUy,ȆƎ}<p/Z%(;z8bJ ղN%! 8`QqE[A}A6&-ySmP kӰ:K0Dm*tC68/a] %&ܢ^(h!?km鎩.~'mgy^'-Ѕ E {EG&ec XɌMrn>4 %OؠVb ndH0w>wp0Z&=!XOqB4m[Q[˛^G23w!CRNig:@q5 =FwnA$fQ81F)+̟8q/`1aO(Xlob_t%)$jvNDFrYa UYPCP[IЩ%Ԅ PB@YZ|NI9-s1Gȁ󩬳yh S L¨L:jGt=!exUsoacפl|4e!㓘G:ڔධrGmcFvRZl`Zv-be,f>kI`C$k<2D1\8&-8NN;@tMޢ6VA@w䘡=0֘/'И띤=-oz|YX݃?!؜s@M[J&Slbd )L1%K*_xB1lOƇFۮm${f ]g](gƳ1kgsox^ /NV endstream endobj 1701 0 obj << /Length 1402 /Filter /FlateDecode >> stream xWKs6WpF73Nd&)I0 h[ -)9"b.>񝗳l"J˓ qF%aK烛FO7qx ƙh* |?EoπŠR JrN2`G6̜ɼ$G"SBy2_~"w'YSgrsg^eJ M&/1/-,yƷ+(܍5Krܕ}ː}WXqg2yŐg=6 |vVQ67|\ 4;"`|}2\vy`4~=rO |E/0CAʭw6ZCjptHj5BLt d NC<=#sc?6}$/&XexirY^POP(;r?2Pݻ=B|@vfjhv>푠3*ѭ/~~;`{&i}/1r^ch7r9p}d endstream endobj 1711 0 obj << /Length 1699 /Filter /FlateDecode >> stream xXYo8~ϯ0Ң+[b]4hD ;$MPl3C37YOɟ˃Wq8$H&<7In'b)H/7ǯ&|7`C-J#@6$sd@ v%ݼa;bO|w! [ڌZקY{NȚ ٲ9a>{pP>Knh{gfG ֔;|[A%A"2߶(oQGfO Vc-r>WR ج00d.h.yR35Tւf8zw8 bzuaDդd5yT$WW'<}AC2sVA%U>T_)7K,PUHښ y-4ʀeU#*3&N 3KYNJ5!) DUݒq;Tx N9ZOmPй꒪&CJENU.45PU,膖xxtKeQ:2U9*_SO;P|@ XU0Lt0 lbkCq s@ CENg!jМ̍n@ 30;iւZm9eu)󊭷?* k7"1 qAoW40L \>50{$m3Pdi87k$0E|[ 0ӎQeZJK"Dh+i[}KJI*p)ϜPEBU9[Rf i9(E?*PI q=6;!p[ݞj:_v׏*($ Eu숛=g7MbG"&M p yĩxahu%te280Rbs91 I c֥RS6% `F4{ks5`֌&u;5 hx_ޗq?B/pC+RM[MdkfQi8;̮${Ѳ<=h$k"wu{; %3B|$9$LOmkq!+t}I< YPpƅcɜFnÂ*8u+@>'Iԥ7<ݳgϦSޙ?ՈVP#ioߺj/Q (zdt*Ht=:.|?I׶c։:W,@rc"Rjbm p$wA $5Ws*j  Uvq قw\j ΔǾhJng% >r\P9DTD1<9N^p/nD8<w7R(n*[V)i+]ӆƶu!*xp‚ދՂJjlz%52-Ӟal>ޔAV!@GQJJǏ#ݵvcXettO)k.>#2}eWjP\$8+\XN.[RR 9C+(- ,H!d/zLu\3wSe3ύ}qi4Ud=_v>pn6v`n[^AG?Չ˃1Q endstream endobj 1719 0 obj << /Length 1677 /Filter /FlateDecode >> stream xX[o6~ϯaX%Qa~ڦa>Вl%w)[Æ><$܏Y[˳\x~VꦑY< Ȋs Vގ#<8 8wYœ-h}cx=+QgA ?ŀx왳g'~ 74lc/E:'^暫N_h )r>ƶ h~gV,LVӪǞڷE.oWuY|,>SfԢGA{J|B@2^GʲlȕktrM1{(,])s$b9Z+ͪ,2ҳRtK5Rn+X^._^"R.zuxÃ'U?zV@7H^VVХu{ 磊41'HlI3MfA_F5ٯ7KM򮨋mhcZawan9T$kE6:,F4h > -ȍ#%*ͤ7 D0*s DJ(ӿPJ7mTlJ߂zWOĦ-{d $u-@m9"ggpNUg̔3 gӔeGh\df$ZhMYo;DRݘ`#bV 1!Dȣk!Qr|/̾i `)КjE;wFf}7Lʜ'bA舯he/<C%SGPEgts|$TcfpF8N"k뵏 PC[xFU?=g>Usa]LP!x"m;bv&$It6k߃>!1гrmS~ jk*iY>Ћ44'fU?t̔U_eծi"rlbpT^PE +uTowqPw3@6eQ藎ڜ_{@ϵ9c5n3l0&nS<+ز 7Nj5 ?M>qF'+X$u;0t؆ (4qC|(JT}4xlZ0,j6`~ʎ3"ч3GoGo:{f夾Fx~w&hW#vTbu3pc @%pLzH-(IOw1vBz@h*:nQV2ݼ7m/J7#0mгyRӎV5"3]Eept+}~ q!M+"hWDzʄ9D JtS٘/:PSX5#Ĩ_4?O; )Tb!EQO "MuYU%ģ?""}@fگ` :v[^d=깭@?gZwt. &uISnON̺<('DrL@S@V߿ s"WB6q}tRڏ椝Q٩g(JGM˨>| endstream endobj 1623 0 obj << /Type /ObjStm /N 100 /First 966 /Length 1425 /Filter /FlateDecode >> stream xX]o[7 }+( A~,X (!M6Xa3t~J9v#W1䂋:.]MOR$mPA0sAt\ 4ⲪdI]aL*7}2ó,&j&CEaTlyNf1b @AeD d cm2DKAJdYKL $ u9J9*2kkFULqL1N&J5͹T)4b^,JHZJ E@#M d2I V(*"TIVM2dkT`O vLP;eda p`VpU׌QcUUѪu j@QWPcjP !"=0#(DsICȨA1MIq $ /-IJ0!MFkN A5B&\ Hcs0+C1@gGG;UlnϿs.>|8=zxX#7sO;F27tH7sMy~mKah^kwώzi~9&gS81_/lry:_^+ogOIthM0t4ioݗ@⠉l;jQżsh*u-|)1<C3!k J5b!ջhwi9I[L{mQ]E;hG A3yB! ; ;9v&[ :@A4a=d$$I-Ib m'=Z4|e!gӓ|Ԇ(9@~pڠ+=p12pެsbY/zi_}zs闫w?i~0JB(ӏt?gcTF-(}m$_OEFȆrD)xk@|K5ʱ'a;d{퓰 n; ZA4>|rB{G1yб>wpk"YwȺSECSB']b\;QSq 蘽D- :n9CƄJiȣ1Iz_yuکHe"ղҺKEpkkF'_RDsnUN9Bwwܱ˿a8W$)> qL|Qנw&a˪]M:ݛtq1Q"Z:N`FdMhsm^i3e^$NvIl)?L endstream endobj 1730 0 obj << /Length 2044 /Filter /FlateDecode >> stream xڽXmo8_a %m7mMp&>2Ζ In3J\9 C?$p8|v7޾:}o,evv Ιv Jgٿe27&s\} '`k1aE^Tz.(w#ՁrED6<:/5 %~ 0,lt-u/8g7va7F) X6 b0F0(8(2f4$BsдnEN빊ɵ|kh$6yFg/^@Rchkf6M~l㾝H!^K9U.I-UU.:o?۪o"0η O&2w͏&]jCIZc"VS.ƴ(nV6ɓh6.[lqŽ8rMS jIVp4m &d+hb*8Mms1&u5mQ4ڮZ搈)xS(J0]rE[4kҾv"6^YN`$LqY9Hf|LMT!lH =ƅ(6nc1=Ժmw*XAsw*8B/0X hhøģa=R|D sȡo 0.|\YޡD~}jjߐ6+]S췝 gΠ>TprXc#-9bqKz=<{(ÓmU٦hZCz@܊x."tGT紷7pc,)=+~GRDF!]m-IUMЂkB.X+SVoh yk3b qʪ,.yIG~:sm@hl Q7;`Wy N!pSnY,{^epOMݛprNEF 8qi 'raOo|ace(5 3qjGmy uBV'?B]4Ua/&>w fNnFuI$џrmHƳ2nv6AڞafD%Ӣ5t7iq41N٨h;۰DP)TF~; PSTB$ C݀PZ*pDPi=5baƵG\n21+M % r bhil4N˾F!PJ"xPqvL4`PlC1IS{5a1] D)v!+K5"ƞ jl* IȗƲYW!"x.K}o]Bms{8a]3\6*γ"*Eqy?#aFv=9 fq8{GoFqW͹$%?Zj]Q'⪎m'4Y|"4z򓃛m6Gnb^CU? > "(չ?_ܾC" endstream endobj 1739 0 obj << /Length 1468 /Filter /FlateDecode >> stream x]6=/Lm,:[ITdٕ;$-Vq EZUT86qyP Ilղ(Wo6?=Ȓ$+chqEUhEnyr$A y$M g @&ǫ0eK v`Eu[-$#2Qabg/._\QUBR]hWْ{g1ak¾xܮfpL {Ʉ>fvL k|? mX}Ƥ$vMQT̗u'T7ʐ=5m,v; A'{)GK紴JJLu^S5QqUJ6Þ4R>3EG;InȷYgş  :$oVQA.^(ƐA ] <\vTHr<(Ȋ8*[6٤T:-K0nTP$E0wVL7*c&~ӶS'"|8!]jMYuU3HbUL8 +9Z2'iC"`{Q>Ӌ2TDV٧t%N}t`k~IkPu B+'A]O%8hv(g1#M"5DDsDjk.? p.h t8|{NыlW<yaiL=^!E3ԌTh m%iRwE„ێNzcn)؊Yqc J(6PϷx$W[*Y턞ESYiF`dQ $7esJ਀_U=w_tpQ];d;wr`/v9A23+̌ ' .FpWw^[k`Y(ټ_UݚXɇ¿Ԝ17p=P:U8ImV%16YdtBy8+`s@-fc<cplVS9=8Qʈ_qp0:> stream xڵWYs6~8jE/7L&ifqԾ8~`)FC*I)V}I(/$X,=xlȧ`: ,LggW4$vn] Rb9;evi4f|WŐN"vDt#Dxf/".;)uޣй {'@ (w~DWrH=g=$# ȀRBf2 3X*v,Nٔ.sgU!&MfWjG5kJ+LA'a Q"{ 0/e2)ud9( IP.%0D ;U,/Y.QC|7&Q1,Զb8ۃ8.3"K}bd/%>7+2TfZO gƇ:b&\KK sH*C/Y*2/* 5-C7RLGЗ̪//`lv(d\ /E'}!Wԣ+LS>>x>nDj`@|&'$$8G(%a6. =DEk1ZQ1L&I&jޮ_h9NsbcygǐXR8iGv:{1 amF@qފxؒya!SC[nw, )_Uf=2*܋E#'0QmXS\jޤ|Н68%}01=gEe SR[A6BOAzQlCpZ s/J@.JE,gn% )3C%O5IhȲl_Qؿ* B*!\[={6t=@i/7fd*A~ˍB>ȀKK::#6#0ĽdQ͠aT`;\2 ʌ5۰l}v6X2v\*9\MNT7FRHfwfV_g3=H[%LYQ#]V2ƴ=/Ja̪YDi'LԳDNvRRϔG'Se1&}dmI)JԊÊeZQvjEaEqr(W)w*Ifv.Vիӎ\.E6f76?74j\7[ W~&TnU &~E'ZZg%iV^//铗lKZyLb$vbM$~bLGF8JtQۙOlĎ4$vI9MBVپvMaeBPib4/^(%j5L5㋬ȿXٖ}&CP-Є\p+;oUVM-15ԍScF}`/nMTmu9;JG 5\ͤiw}׶xL7cVTo h[|%-|cu'P'Fx|m]1%M %}9 5 &rE 'I67jizlPx)z6mb!˲yIv:&x3$f1 4WIW$/ Q.shjʠ1hy/]^ܐv[UK47LNn{_ůgټ endstream endobj 1756 0 obj << /Length 1012 /Filter /FlateDecode >> stream xVKo6WI[!E\ԇHZM _D,%EJI-zP$7@g@ŵ9 Ye`?t"@*u1ݯ~N@X4{R"=T V.1vbZk4]/R蟟iA>Q3"F$|ٌxfZ+hA,~6y,P<".'Ik~,)J"3SrSD=nRVɭf醶 AISNExYܷiϐe"k^ Ѻ"P#c*Z($;-L;(.,և?Af7=[%9[f y +* /^y9&d~ "'!l4 lʍ,q%b>KujIqWPt?(";yh)֕'%{n!Lyˍu,0CEKψM]e*fYbmf6E]m/kh.j z`l ZPΨK,͙rR}+[ڷzld'Y1GR8DDkLTR,17{ ZX 3[;5?g` @GE3o=7g_xys:{qD]~;Ϯ8vg,jrbo Fʤ{/i9]"qzs{FژL[xYa?k3DrޕyB_SԒ7/KNt2D.K([LV&HiBu}, E{jQfkՍ|䗮mʦY-0R.2GSF8m7աײI\e3O U/3dt?2i=s4ͪM Jx)z~)K[ {ֈG3zpW2c4D> stream xYK6 WV{fVni/iCҙm((g(Kvn8@ @̟|ۻ$e^~; |ߋt /}1{=/D4:pwj*o--:@g>wp&oվS1Vj٪J@qOHց gًS "@3+ N=)kCmZ_QZ aJBMOaNmڷ&/45U5<ֽIt!mۼ[qE*"`¹X8~(hV]qu#7:<,JR)l\q/iy,vO|#y ej0YXԯ$Xn.6[ha'-` M*da 26uՑgwASRr\rЅL\̉ZN.yw 2$\v@ܷ:.CR,B$P6Rj^^RS*RfgiVӊJ;y3ҁ3Al[$}(Sك,"ЂZ' s#;DKά8n8O0UԌ"M'E\uGچ~i)Ȏ *tEZcWmAm?-$?,Eԫ?T埿ZYpY?W+,+MD)rL-0 8,Y"):lco\@8L-V.)%(wZ{"NiۻhB9,s/ m~_̏71em{aݺ>8t -~o-xEO)Zy|R'Vl+M,8Ý'7DNR֜&Pw Agߚ&fְy'hPh/l[/ü:(N+9ys Ee"-ܾ`D' s+hV*Z`@Enbޕ:TN'1. P %бj#/]C*c#枺P q6?Ͱ$ }%oNG8/ [~E,{f;9/vac\ኤzшD lV-KzKXϘ=@ +]O!yTBɮ\?`zS`!\G] Rj@ B-=P*Dޚ7uaOi:W[h3 Xwu2&f&-Ø,y[!:^ټf Z֖PP& & xN)G)D%YqjojP }e՘"WzQy:8r$* # &Q%ܳ:': > stream xWKs6Wp1a|T$~$d&%Q{Cb ,܌^GIPșALA8te:8 I@X Sn$t`unnPQZYuS/EA@6>Sn%ҕhc lEƻi|],EG[Wԧ[Sl84)^0z FW?#}ٿ|h FNZ>N I4kеL.[ MLP|?=ONɡL( KČ:*W-*~_nw_ AQoVuMGi~->KSh"#hEР˼97') }(]B6b{翔n4=r%o̠mԣ|8]xU\ÊdkTCУ17 wp?Zy7g>4v1Vqv^yjq/M,@ endstream endobj 1783 0 obj << /Length 1745 /Filter /FlateDecode >> stream xXYs4~ϯpx2$[,U ƣE| >6_OZx޳*h5ܻٷQe,ema%B8ȼߩ* /,]yy"LfgC.Xc{( 䌻6Ѯ a>q@G~=`.T1'yZ]!AdTC_4]ˈ4Taؖ&WijZxjd}"q,E,+Y"&!ҭ?(,mA'+Bo^4nNw44NԾpڼYWynjkm U& '8*s!8E_ˌ>׽2x3K+R8&놉oP0}1( ,0Х4;FyMk[M yanӕc%&s0S!ڝSc=t#A1nlL|*O!ں7-F pם UM+!˜C'Vk}7Rt/ ]72i7Oø9 -yJ`@A%zeE90PQxXXwN~?Mh벤e_v (--_n5}Ҷ 7eY^ҳ=A mv ڂ_*qBצs[{8X3UθWJ߇zjdU vBݬxO-C X^pS5w+n$eI)>W1MO.o^ f$B׌`$25#84|/[{5!v95s{9 ]߸=up3koWcE?^x*(N <}8ک[I_`!nVf|R Lбoʲ-d)./U\o endstream endobj 1794 0 obj << /Length 1920 /Filter /FlateDecode >> stream x]4콿b1=0--( Ļk8{$&{Ю,K_mV|݃.\|gi.+9t ҨX]֫_<>ŷI4Xٶޘmy&FEL{ I'aY2")Mߊ3&¿;٩f')ӑ0r%;1Cp8~8ˡnItTږ]YV5agzRI^;w8<5UW%};S=Kle'n",4tt[ ε4jޣ՜l {> s0tFN;`4Hfy +Vr婮kCTTiYλ 2H>!gtpўǖǘl.OJw4{hLʗ)wf<]cºZT0 qB,}3;h[Ҝ8/1NQ㨆]4ͺ;BGbS]c O1$",5mSueMW띴,e v@f=I@ֲVZݚ~cdgܛ$DˎlW6A!1b2o뇈zpzYfNoڳtPnϗ *Ja臧8X\,VD dG? !0xk+ b*&x"7,{yr1X H ssV àHFҤ8NKLwc=1YG5Qz=si=kޭ>ay!" Kۂ3~ݤiH mL *jh(a3C] E9 "& m-Yj!5H0_?lų1xg$<}I!YB(ںϪ2S=6Zr7rG!][< %D5ML@n+ LfLl{j`;{+f!۰%mshԉVW60W;WRb `]p 3: zF37~,v]E2"1z!ĚX… R)G'Ƒ"fqM DD,N%B$-'|"dM]KH7Av&!Dv4ѻs^ ?(ӵl]c!@{I{4s]ivP|4 ;[Go.K'!\]^ÀE Q _Tuվڥ?¹t5lI?G2@UgW7'ۭ67O©EsT {gV92oʊj2 f چЭ-ƃcǍ`t9?>y3x[!ʏ'ȅfpEKR,Kp$8J ? V0O~'8 endstream endobj 1802 0 obj << /Length 1569 /Filter /FlateDecode >> stream xX6ERXorpBPnQULua'\c }Pe(ڄkff.Y`0c:Զ2`ϑUPUltǛr0/c 3dex +%Br^@nc HKMAXňXX9UJ q%W ,!aMܯ4߉ݮO9ӬuzlrӪwg=2a5 g/琻IB{{8 kuV\*ۅIm6`& SWꇚ r>c&vݖ?]S&xFo1zcau1{U}jJ<ٽ\&$d0 P|)7)Y['MƸm2dq<3YwSze+Q:yn x)AY%e?ȿ!'\5wwKOPk k^& h /4KȃA{y{oM3P8SlIgytA8]Cn(}sSFwއГځDzľ+S>H0~I>KGD}Ծ?{Հvpro1h0Y8F$FS4 "S!/A1%~!``&"a?!Q淇 ́ M.j|XܟLpbŤIjWvFnf46H(Kg1̫ːso:Q.hGQkr.Je~T nˌE꽉\Jy=Tryƥ|+Y1jh;3X]wg#A| h endstream endobj 1814 0 obj << /Length 1563 /Filter /FlateDecode >> stream xX_o6Z`iaݺ>0hEd~w$H&!xwHzz/gV(r',Vk/QA@0V~%_/p$& `Ȕ\Kt9JϨ[c" Ԓ0 R`V ?tJtm-Ɣם_ݥrޕ쇾NJ(a)q]ԻRθ^ 7] ns] Nӆ^# [uҲ^OJZ[vrR#4p'5q./:Y;{r[j I`A(I+a0ϼ+#"J@CGp vDАyU ـ=XL粱I3gE|$rk;ٜ5Wӄ ZZ\,xj6VEߋ`8H-@YPty UNuiig$>ݮVRjK &| &8mŒsMBJ q v׮ӢK9+$$&3 Z[&=!&qlRb'5ǽ(ek1%McYBbq0gci6Q U_*wxW|똅l4nֲ%z5@!ݶ0ag$"5HI(bF0 m gsE4K{Q@Ba X]yshЂ#/ +wڳ;h ڊN{Ms}7^6f~\v5w퀿%wh&s\\GtAKݗP+T'_WcV#K5kk^ dMb?_G@V!Fa#6~}ӊipNNj`yty n=thg{:3jԣNO%S2:#i#<6bv?`jdcNV׫_Vfo=E)ᩛ߹ T}ok, Qkah0 ^h4(J'ta_r/CK7fNɹYc.aZ>pySYā*bqU)%$j,e)0K 8Ꝩi[[[cV@vTh>KY)-B &vY:pn. fK@/[zaY6#5hسpXfW<(/G|#ܙvPo)i cKD x]> stream xڵWs6~9S{锗:q&}jt:id6@+$qp<,շ߮c=Z~vu}xV maaAZ(k[/s"VqW_h`o#r! GV~Ԗw{R响K[ϳbۀ#2cAIQRF cJcbG>pRf@aK%aۯ-P ;(i%IvTV9m2{7hT2d䆦eM^6l oϤ+ qY)nNM u4][o)7\&`M kİ&3?Knؤ!ՁV|m;>BjڦEA3jjO`zJWWڈc>3#}MhJ|)œ'%|v4/3Nt H;<6]5mĩy0S fZF`oƕ,ѦO}Welo2)\Q 5Qa˦wȃF2W!m ԑ7\X\ŹhHkev&RgeKސs<1rڦ둙|"Ƙyso Qݧ(6Yx$]Yuؓ4ehꚚ(X#RwP'_z#+&]͵1,oc\Ftb?sl ~vNm>]O84}L_oy mGѷοz]Ccaxp$zcKbW-ȗLeݯ\ ћLFc˃NKg&ѴD%QKRrF=U» Ǘ2/3m(7zzް]fwJBIswHə X /ơl_βAece;wQϒG+7PI 3$>EhBlVš56`2ː\RUSɫEn/&VFЊHP*)F'i(XU1iHvuq8L%ׅ>`e>%FWmfWӮffAT?$dO!{z%]=-t/I@nhe_VZĉsك?njƕ endstream endobj 1726 0 obj << /Type /ObjStm /N 100 /First 968 /Length 1364 /Filter /FlateDecode >> stream xXn[7+l7 g8ym,YZ q{CnJlyppȕ8+P%\C/ZR\!p RO%( jݞW &=Z-I gv?6J(sO!,Q*kv{!?5 ׆.ZMMJ $)_MV%H}!GvUhy*H"{>T!Y ծaj׀eͮI50Pr< ,^ p*\AU J%81B R -0YT}daΞAP^I $s*J 0Z05a {Fl+$n&tЌ7K=tBKR[W B:2RuJqX2P]P`&(0˼bD1%j>lKլiN|%v H+fH]1:aa(9vA[rȚt I쪸 RS׀NeäCNj'89YLOY ,L/ )E.>pzͫŃ f`v M*7Чu89 )f&ި [Q_0c7i{ӫpONb:|r⟿pr1=M'~Y^\?ZgR$yGWCj kgr߯ϗӣOg?x<14jaQ[D|KQgy~XXdgu|sI)=3ۥ4ʠGieu&Q 5 ĜGm;m_b ݠon1׼RW=3Pbb4jD Va4 Vє-fo7q pǸ ܟE'$`!7>Ew "`5^mǬe_Tw;} NάbG$rUMZflQ4<Qt(l k6"Q4FmGN ŴUi\5N 63jڰQ4Jg14Ύd-AqDg]09}\KwYjpmY:Ɩ%e4Qۅ)0f߼e4idQt~Aug e체,mK_gZJTd-ˌFIߪe~vB|~UwyuC֒:^͂юXE',6:"Xa;A2+::J? Mfd{G$ͻMo0&iޣ";@EgP oTF#ۙEhEYqvhݨ0` 0_`( endstream endobj 1833 0 obj << /Length 2215 /Filter /FlateDecode >> stream xXo8޿hVbnq! -zEʒOzᐲha`7636{Ga2,z=A8f/M矮ߟG2"?3chr[*yG̞c X Hmh:,~̅;k\!ʑQ'i(uBD "[֑Rq?2p?"qs)ܫUTSi-b%ۢHC/M'G m?Xoş[FzMϢʋER\궨@kY,g}Ƃ"`~,lV,J= A5E>qzUp`b0JZ$Ūri%k֮zG"cȬhZZgϯ)UU"jcum fҽ6wC`̅׋*f޺d  X"> ,1z@85N _F-Xs1:ϱ 7EEX!-4C(̹nUF5Vl!X@m;pVgDZ0n@3 p4X]ջlTnin04,C fh+*NC21 g7y'4Q' tYm%vGc>:m#"NV ¡<{Ec?HagV0F6f`I]) ,d%˃.sLN|Y:yN̮f|QuadG1R5BzK3Mh3<+-/SӴӔdb)0b]Lw`NICM88\R2-,d[ R;;3˜SyRUJ9O!#umߧacǜ3ˍQؿ,»BL!]]XC&"4M])PKb{V&fŘH:'cc i:-15'AQ8 #D׶mi^~mnbsʙy2,~RS8gKVFWI›Rcb h{}+v靥A'D3{˼NJjn: ^~Bd>VF铩D#ʧ9seR֜o 5G!Rm!), @]5sly4`^Ҏ>@S0;?![8D R.ѕ 29@OeRH-r6/;%@ht@i&+47X0qęw{R[5Uxv/J[}WCVv)?@ ;#(Ujr i >2&3[MGbn+-.*G\>F &84.n!ѕ"9Ft_܁NjW'O\Ƨcwa\PC)#6I Yڿ8R80Kgj.$\/ '~ Ha \bg bLw*zE߱$ v=ZVUnB٦SWD ކξ f| I,ދSks$N)N𪅳)ㅋ[̏S]ԏ{'npDaV "E OXʠbO_cWC:?wkqR}-N!{ d#oԤU2iG`mB0g8*)= 3ek;=Ϡٲ,m-`C]¼ߌxq"pkRT/\u8l =˯-Wxl?<>3~WtM.OFw[ mHGxDc+caP^/'fӽ,le\Ab3N}Lg׏~iF endstream endobj 1841 0 obj << /Length 1664 /Filter /FlateDecode >> stream xX[o6~ϯ0lhQ[bz؊$P4C=AKE=rmo/2iX"^9<Ε \psrv1 1 ?O0bp n_fH9.Vb槳?KeP$'tvǃ(NJxZlXWInd9CrhDo(U,`d)6|mQsv07g/簖'߿x.%JQ( nL a90z*Z@-JG\l:+&uȎd5*>zKJMshn6IT,~ZC] $^=5Ii-Yj3}"Zs@?p2HqŔ%^K+?zpaåkr9wxa?2uT9+Q/¹ei] UPOe܌֔|(ݘuh}%c;z%Ff8~~e'ɞ"Ie(˒,23P"nóWd1VG-D cfYa@zTessL >a$&+L='Y!U&9ۺ '>2%0V)3\aZ[3IlCJta(6&m[+"lҳ ic68=^[4j: AO,Ux5o|UMV[QԚexƩu#2|9B&lC!L6"͆2Rra`q3tNEFĖ7VUz5+_ mȡVۍIţ:Xu'iϦ>hwH#/(\7t%DInvn6Kḛv!N( mJ"-[Hffl+b*͡O , Il'%X$%Q̸,red&oٝcIqdT;4xz@oE-ck,]N\6ɗ?-`9Ovަϣ6vYJɎ l}w~i@L^ϻIBͯUTwA}3H{G F$] "WM%v1PzwF\jV/ĬNEm8U$en!lI["q΂>󒷱wmeGl 錒[ɧOof_Q喙M9HvS}KZWփzj Wzm'{mSE>Ws4]mX:UNR> stream xVKo6vI=EEm䒦-QDG)i~{"Z+ uh73 ^-..Pػ*=`byWvwW?]\FHF03L9h/R/S|$!u -kߎwo1|,rn) ΟsoVR;3Z)Yέ̞w/>v @(ÉuvvfUYQZ+iVu:` LnTϴ?;g`O!=9Cm>8\3L:YQ߉z;= 'r(VMvVM7ty#/f_sҬ ^Jk}r;aUΛҎ[j6B;e΂LW|7o)5u7Zn%L;|r';t\\v.pSb{[=A'b1>:D&c®! m^x i/D4iŴhDQAtU׻y峥ܙ1oM@@5kcݚܫӺ)>NJG:YLI F.^[2 s?I9(0I 0Ix2P?YM 4˂wbS!vU2ҳHmJ/Z@y} G˫wW&D,n &!^-~7qhb E4~FΎ RE$?r u5C[#B2eG]jeg !}[ '?HP5y,3  .brz=mTӷFL ËO늛p xoFz<hL endstream endobj 1853 0 obj << /Length 1803 /Filter /FlateDecode >> stream xڝ]s#ݿBivg,~qgzM.IfҳKY¥츿A+|AA|䫻_}{]֔\lVsYV?'r[u/9_ny.r.XUGO$9-@2:vѹ]5uMG3NR6v։Yx|M*Nco脢eשHԨ6vZ-5c4EpSӾN5qqk- MQ.;;i@֝rF.=AK;u;L۽ 7Op@#Kٌ/,/*Y`;h:=zy"&Tom[⁉;x,Oy6whgUhMvښ)6^M9e}$J%ORQ%đ+A,ȝ@LWp6dvGgl_zFבΎ>(ՠ% ~S 5$17.(D!7, x'@ĢUY# #UP6(Zn1 < ඃW`ӾRY@@wRl.@S9n* RGǨ#!d`,@eG7M$F+Xv'Tـ'B.y[o,vR2Y.:@umԃ [h@_7i t8ف6TW朶; h0f&%VZQTs:P4x%Xt*(:i';H'*K%OgD| @HF"(_,ʚɪ˙~qGA|(2wźq-%"2??a<>8P2*%U|̊wl͗dxw!8A^D7՚ˢRQ_f_Ke`dv ev\>'k-OXqy@NSEr.`~JIDxcA9Qa]+ʡ:>-@CB NBRf{#֧Z٢_eQا?44Q//_iV%-f<{l{cdqM3埊@ 5y~!))CG)&Ǥ`-seC9yQr!>'M p8l\!an$z 4w9@%_ &jM%Qݒ>x`"2\NX 1!rS?=HiXS-cK_* 5}`6 M,\;d|M=(ce)Qa0W_sB\/$d)swk?YJPYU H L1~>^=k2P SSfO4 4oSw[p3G%rg?W ˚zIU9\?gGbJ~Y ("'8@<6* wˢ2#ᮝd Z|!^xE8{`Lsa:ũE[ԟ0hX]?bVo ,~|vb|}eزL W 5OCE1lZ L1Lο4ߛ?s6mE9;;]!Ў+xH endstream endobj 1864 0 obj << /Length 2118 /Filter /FlateDecode >> stream xnF_1 N`"{4 I)ߪj9fجj_ݯ/}q6JW'"YV{a 0_ݖsq8 :^>OLC29u"`HnDnZ3)6ȼ$÷{يG!$vXM/ucwZ{Y uc%mgR /CYx35P 3% 8YdѫiN;Y0?EgzX$})iqT[q^a/c"z ai7C:]Q&׊lzܩN5\i!#NgP>~D9e(g;Z˻#y܏ws:_x?¯^;dԳj-T_l_Ss[Co4u[4usjƶο("Yp74E۩%e,"DkSKe1]+uu  L**Zjݠ. i=mjC@S uD];3nyl} YUG+ޝ|ҝbښ1EAk PHA`Ayp")0:`6CPMڋT(czlkQG]2̢XeBTw=<]Cֆ9fԶf]f1M f1w,TD=T=Cw][*J\T vuc+3UKRR e!H !5zA;PTjpªNH}h V<$B0[I346dcjY$z,0@޳7 ƜY6;`q2 !Y mTCoe#,C8L,Imr@ac^1ObqJOLȉ@6y3v۝ڵcrQj1R0Vcˇ ȍF&ܥT7lQpFXHjUw8>46=7RgB Tgt9c튉=]C``p <2miЖޫFu\r`'!xXd$i i.wd=g!. D6&6w1[!.Z{'Ǖ+>"+2 [(p81^gc9>& Hcv#^dk2ɶ 9OJ5rR ^s^щY!-&}H"|sXҀ v5$$ )擠'QY~glo1$jV eeNbG<);6*Lֹޡr@w%V,\&eNCw”DH(ڡTSעvf3gYÅB2C-9uH GDE@#A9oaͼj,AGQlfgʻviF70Rʧ'&9Kd0H$G9Zt˲u.Y&gbu=x=fI&Θ~局Ya@Em}?ּٷoP; ɡ}!O^NM EϳGf5KpƮll4LҠ<eсg澵$0@j^pN]z\lZU{`[1\DiSg\lp3ɣc{0UM.]ka/?TMe.{ٵg(9 W[?Iάa<60,ӮƳu%in@I:v:,]Sݏ".yy6RHP<G!8=`*[t |/*H#?MO,t1xGo~O> stream xW[o6~7[u![][-6QIT)ڱa}7r-!yxH~cY9sYu8~̖<7c'AȍԙA i<:Lr @<˫ p&w짡P |OմۋLG(MFqyeF(lxr14:_j0_k}xN\Z^6bG]DS;y=8pIuΞu|z>rS^xpR-6 q7%ejtwY nڸr!pJ@ srfd&6!/7E[΢`+V@;ݾb[߫:һgOu\ѹi--.626@ $^m@Lp1< u]夦.498{_ʥi1mڛtP 6`3 Pn>ҖT' G.NL~nFddͻ+-ehFv[4hBw6(LmD*pMuD{#[ (Jr"Ӟi1D`OQk2/[g)JMiaĴ_AW']hY*^_~ֈnWŴ? }3WorI82b=E;wCs2EFVXLbޟQztdXo)~)88!Qn>cM!(rM@H!eOMMO27Ds6{xR^Qoo3|Ԃ5Sl ejоf:UnOw*&q}P\GGx[?>jA2 Ͷ+*o'z{Hj;axvt+ KڅsN : @CЍTG^tn:A.fqYsҐn"C0y̆tON!4IQ1cIje!]v>&kɣ$C|ʼnά&uBPz4F(xUs%+ &woEbSV변}} [K\{vR7զ$\I Q 5TNPC;ֆJkDvleBzMPz~;i nFTŴ\wUP_n|2ơY%&(kZA*?U1? endstream endobj 1881 0 obj << /Length 2007 /Filter /FlateDecode >> stream xXm "-zx,˯Eavhw;g3䗬ޡSE)J8,oj}.r?Od}Z y,R~Go'2g GQR,&hspS$*:OS?;N- '~t mƁ<ѷhDq,;ч ?c!]垆;;XljQvTu7}. MGa" rCY&̮[86'dhGF(Uꓬ[x2@Z*r#{؎ OO$FnTVTEmKbZNOBϱ+Lj]W"#-Es:TM=YЏr;,#~#(+5I 0IA,[ГO֓,UŞ27ۀ'qi'{,M%BK"1oX=r'emuCe^ũ lvŸ't(* Tr9\yȫBIMHc K<@/@(vocORv[ije]H@<gL'QcVAF%, l5'&uBIud6ݙH;SnݙD `̽qGRN;PDF9,i#\hi0d5Y)4Gq9]+{^sky8g:R~z؇Nc;@LCbg?7l a䰑B;Jf/[ֹt+}@f b$Ϡi8*0&e^òH%Dvl>~zФ ;؝:ҜUc1,N]FR[{8hgtCUvg˰a(HMJ>+6b }FPܦX[l-XI>ٌ; ݉ШSŠg+|.J}U҆?A:WR}E8~_̄$e~@XTzAo'~Nڣe2=K_GA{hUz8åҮ&84 JEkUD8m ,E(x_ԥ@6x!eލu ?@6^lFĄq. $L @hu~qoJ1<ܳgOᛅ<3]?)G$hO#kBkoR/Q4 Qt*kF$b |#򣌏 ¸'c (DŒn\^Bƣ,> !h"ktYAς]ʯ09 n6{6=ݷT}pʞ|TpDQ4Њb3yezabSb~Vܬ~@09^Jz_֊c.tk2>RTVܗW 18l V($ZWъ.7'F#k>Do $z,R8nXnVb#iåwbxYGxe1s1ckUȦK?QE:īܕv#3*6Tw V;]nhx }cu6˜A n_Gpn?x"3?gЕs(>cou8ҥݖoSNwڣͩ>&%aVl endstream endobj 1889 0 obj << /Length 1105 /Filter /FlateDecode >> stream xWK6W[K+ꭠ>l]Hsʔ%Tco!۔em9Ǯ\}[F^dr #D~jsr3ڠ4MfV?f"'NTIC!2q{߷!k/m/3_2\e3;twZxI_+(|"b! BF:кaȸ4&Lh EE /.2Z s5=.M5$ Ofy¡j'y6TGP$/^V/VǷXTa(z˞ U7[9BK :KcpyF*ʌJ/ʬ/n Q\6u%GU_yfjʍvv58A`ҵqRQ$ibeZ%xu|8K `08 W98aD(x6煱I5=>x^LiV+(ɲP8Aj}!}liC`QBkʷ-jR#1PLabkKa;T}Zwlt͂pJg*ԮRŁx_FRLE{Gڌ[1x0fs6JH7%idh3Qʹ-/3u!8D{$벑f^=*a8뱄sI7 iH3m`кm}esߗsm9~2 %f͘`_V5L jWPXrk}}9**R}=> stream xX[o6~ϯ0Rl!u0?t]Z[ hL $~E'Mv<<<<׏G^|yro*Ų,3'ģË%$dl'@ Y{=aJ|kT D(&I8|Gt:UQD4 #YS6UsL$,u oxމ1jiD[:Z=5uUʜwRF pPN"2k3E_ m@^mU[ ֎QcrjW+3vM/vU+;9#z"OW1rMJ[HߪȷJyGjwwU{;HHDYj.Et\GH<dd/7KD$M/˾[scgO|+|P@Bh^ؗ]JUכš-h'6V+x?=~vfQQX 6M)h7SWk˅!rCā+`r1 `j;WdɁ%3Tgޅq,3[1 ǻhU&w4PHٞu)x!ږp5]þSU#N` 1 bkS8Nkh@̍@6:2d=XXJ;åU sx e`oxi}`M؁jg(u jSY#˲T`PYnt,=y7mM#r-/Ũ+ D9;T|/wAC;\mF `(v3H}W͜^.9Etѓ{՗;+ ^nc_;néyz䅓Y#{rt5{S)pwH*N#83T]`1xzy +zx R٬^B~G]'uv$!c`~zLJH :L{ J}/IGME%Ie|z$SJ(AVnpyaʲhH.cҷbe-T.xa3%-5#]_ cʁF-o(l3k12:4ùNԐ=!')ogw^ K$P)Fbd=yRRhꝙ3ofRkHFÞ!$a: 0_+a #O!^Š-8 kćdaLeޗ'GIZIU`y;׍Tӈ吤koX]FrUb Gp-<nˮ>3ɥrj).0KdŒvКc8q%;UTۅXYpAڶ*Di‰}?шvg@RNY'Z}naNry:N}2݇%8/AϯEwA٘)*oz\ewf] 6>uФvjg\KwRnɿA endstream endobj 1905 0 obj << /Length 1041 /Filter /FlateDecode >> stream xWo6_!(}p:[&%i_%)#.TEʍ)Kۂ@w~)  ji.E(-%"Iu܌r, Fל4]]ܮ^͖=rtM΀hz$J.fvm(cr~~~10ɶ&BP\!Han,[;me[+^_z SfRSY 'DWo~t ނ9P9)PXSq:Q xU]u$&e P9CAKi+0^ ;KT֛E8@_zm/!I%[1O6n,wڍ*(#ƞxbCu% a~h>6o_N(>KA(n z~BJ.3 d<#S1QM$IF-~$C>k1>[/٥"n9}Fe jehzE8küksp!:{J|"r%QSwR&tNEeupJZ v[ N*,)aR j*KɞlnJ9dDHOãoBk#z)@ C!֜U=ɱbu+.rdߝ\ޑ[ O\!*GOg\,` 1? Z`ԋeV4w.HMNE)na|;80P'ۚ7G0TaJ8"Lx..rmkErOS;%JH<?P/Vif9S(*GQq\̇PMNAS\vG<#|PN@uO;5R弜͆dR'qW8=޾daգzur׵_yA%ᬑ>a92X=cElv$ ߼ tObZ;UOV0ޒAϖ ר_/g9(MD endstream endobj 1911 0 obj << /Length 1688 /Filter /FlateDecode >> stream xXKs8 Why&VD,u&6=l[d9%W~ؙd;C/&$A ff+"2oXqyKƂ,.U}|yu$4 0}Lu}%B{u{9\,\DK`ƴ|߉JSPsFJqECw+ Ja! C0HXwQ+Y'GA-DVhe%AT]Z,",'}_z5_PZNt|#$4-Dk+mh[X/jw< "-qJBksٝ"/S[goW3g""`">~ >ZA\޽y 2a^{fx8,a^Ȟt, PJ COq|#sk/ H+w͛F6Ks[ݎTC9gn'h_ AȤ)h4>Dɜ-mt7ORx¸?A鞝I4 0F{f%#; hgݵ;~̈8H7xdUS"N#\Ǐ#CbCYn?j!c,.( Ȣ?S7NS`ƓktCh+8h+iNecݣ캭O1uk3 GQ3޶ OwKD.-xf2_:I.Ov0 8SZK2;AiJ}[˒t2ݟYR#+ax>]% NpipX 5*OZR(}TF@ݡ!SкQ \}ЫQ_LzFvm}7ݥxT_|xeۿ.!TJ[i3]?S;/ "mwwm~fBCȦ J7c[ȼ^Ճn YCF @(-C,<2!I3(51&CvĚy0(nŰd)Yx ;r~4}r']YS[h{EM[[ɉmDU ݴR3}LO Isw`;9 Jd 7A=-`W4|S}g;hOÖpdI39u* bvgKPȐk ʱ 0aɃ"A[74;)Cչjv~b- =X%],rL2Ia'&6QF[^P.v`*I.Q.`3D([K^L{< kQ$:񵗔e;G=O!OٿQ endstream endobj 1923 0 obj << /Length 1404 /Filter /FlateDecode >> stream xWY6~_alV*ZR~h"]l>mE![^mAb99(rDF?}{sv2KFETq>Y(!Q QjkP6zװ&&tvtrt)%4LA. 9#hL<Xoe qrav5z˵k*' ZJxZ7hvǕJ_&VtfJnkwc]HIg9hb!F"l4g$` Z3|D4Vt_O1h{/) " s^ڐpLĖ0)嗰 ?q٬j*rB?V+;{|KX]b`;EMH) - XYoj4&[(j!U`<5%$Y1^nV7YěV?$5oاŲLjf%$J';KUb ۺ[.k*KPyd=_˥b(IHMAeJy}d<"Iޗ!͂%fi׼h@oYD(4Hҡ.L: )Ql2;hq!1jۍ~~yYIISKJ" py 'qA'tBIbc͋{t CM󰉒d4y5x]O4<!e1ton+פR7047V;Qw|8pD0"9gv..n%Lj[!jˆJT^ ,֞C{'kL 12q\JJcF< +viGxeZ_GBƚ+g~搡~j@c4P@q9?55mJw&91gL<li NS(y7ylWJY=VW}6x 2y-`8xg_= և(`F#|Kzy;PW㨛|5[ͽ&8%;1ggGӹs^e-Wc%@XMW>Rޮd傚3%ٜ8&F;B],B{p3~i$нy#[Tyax<Һ'>,>~zplwħTۇ2W)vo.#l]^⑙lg<%}m!x$F}xXͫwjgѕXŸn;o܇?M;( endstream endobj 1830 0 obj << /Type /ObjStm /N 100 /First 968 /Length 1423 /Filter /FlateDecode >> stream xX]k\7}cHb 4BIRhkLHmjv!-u]ģkHj%P`1di.T]h%`kqCBmtL :X+w0ZSh̙5W*xAr dL&I΅%,s&H ܺkUT&`kT6Ս\[[X!9uɟڔ V$0W(bX *>gr\J!š\#& >5pң\1?ǣR/u'`5H$dC$P{G|6K1I DHc :b,H!C"A/䊳ļAUEDke HefrK,1GRDsoːoCjr%rra(ܳos-T?`Ш;!y UH ERx* ?h\ 5h͝ uPU6Z5gA_iƒ ZH*GGӓpD E~wPv)N7x@!5f=sTnԶWD5=ѹqa0KT4[drSez.oLV®eI7R[=s]CENRK,?&:u͡([,}us\[]qp5fU٭gR؛C<[͡Μ` A㯵RX-˽kvypٕ[v/"{]kk)6ʭ ܅Ѭ 1DZD3{LSc\k'uhkXcNNr[}df>^ -Q ^:Y1g)rc/.^|||Z^/.Ì&T_G"%~/{5C'AϤ678ΣSMF32wC1&pA0W6JMcQ[0ޝKj͏K,*g"gs?}h endstream endobj 1929 0 obj << /Length 1811 /Filter /FlateDecode >> stream xڭK{6_qb [&~=4vOI^@$l}!y'4Ѽg ◛q~ԻyaQzYiTx7n7o^'ь4"ʁե?n`zmި[Ex"otx\%~d^]x5 x*rH;/8/S44 ?C/Mr?+f~CRwaPEz&\n^&q l^67;Ycğn5_W0jle_]/ޠ %:!IX̥|%{"@9\ҕpI/&L"y2r"S{cI c(IyAJG@"q'Ħbc#\Yc1д)+4+% {\2"ܢ6NZUAWQlTFЕ'q, 'eg+՛vmzw^?_8K}Q cH/Vfh* b4tU; \8sO1$ipVkFS?ۋ1Phg>HN8[`SK͓IRbͿ.;YNV`}%WO%ZA)ui+d9kd9E` 1Fl_[a`0m/cl k)Q"7r+k,ԩҲ<`NH:Mtq nF~EiFV 5ƜhQC0Tw>({)9I` RYy:k\ 7L~k^Q}JFl*s1l(фTs h1SƳaP_讲=Z~8ܗ>9l|BY6Zdky7 XJ%#]ӻj d-zvbZ@^UnS^ǩ.RkMa-3 ; pgn.g{ǂMaLɑ0c%g/EqI.͆9a[\inŬaK9up8r̀iig~l"hʪe=NW}d4R o5lh>=%,jp_;4 =TQlTF3_jr_t 4qIJ20!.F4Ii7FSR$br2~gs"xBp&J˜cM5V8!T7UinOe`+@gqzrdH5xeCΣ |٢"4?x(,r+Pg%;5S;G,G9B3FS4]da㎢_YmJ[0ATSo^jIlbCi{kn,&jɎepX10j}Ĥl1ڪR8+D;a]K𔺓l=_>V[+3}_s1U'G:q퟾B]CuX?eRw^zXAB&:<7.ۘl.4~SX>,{ 4{Uo) lG1%b{lN O{!,ޘ~~uUW黫0C(vW HXu` endstream endobj 1940 0 obj << /Length 1282 /Filter /FlateDecode >> stream xڽWo6_! &5MְavkވNh}*>HxLw;/:Q5&saOc0qoc+#7]qsG 9-mlN9x4jڵ9b # kexBoSk^ܘ>9EBnVG e.ݹYj_bay5 J؄8H/wV  ufÃld s` ~Z7pK|7p~e42Mx˽h*G/^S|eEdBr/M@" YQ1[ѨCe|rka 0ɢ8pQ?,(:Ps +-Fl4Pi^jH{P{w Y c7/}CȔPCmV6XkYn|=` endstream endobj 1948 0 obj << /Length 193 /Filter /FlateDecode >> stream xm0<@ݶ&b7ʏ$&  $v'3 B ःCL2 Hx"!H.Rd]_+gpFT}>*KŔ p)gR1:ߙa0v4UD9_e33_7">LcVoM֫}w?pN$ endstream endobj 1953 0 obj << /Length 1669 /Filter /FlateDecode >> stream xZ6 &J`omNjZ&m&p$NmwsB R8;?[ųxގy_懫[V@ ![`4-3IThmٖqM:3JE=o|f0"*ʵjr14oED6Ps^D 30#dAX6R,?ReDqIR*^jfϵVMdUuoMJaϕK@h)%Q5XR0Y3O'W)!uh7rf} wdUj'}! ≠ \S3J*ܥ]rX>ëRjguuƈbG/U+5=]ST$Mm6,Qv!=GD"}p K>ٻA$΅"vt83ycю>h"*9tJhY|7;n ۞v&>b,GOG"lI =}[W2Ϫ'TcQ\Jqvx"RZBDɵX |Q@z ,]ˆ0\ #cF!.l04Zh`cT`e?; 5nBRd#&K~Vh $g;>9gNH?{TҘ$-J L>؟/( KOĀ\ rgBu 陆BXD)8bJu0nP=X@#̀k"E#ť'{ez43%Tv2 |A , HƍX]WK JMT?RNJuGth2߃CT{+*@kNvDiPdŎ0bx?{}.f?ׁڰ V7j +{fM.[!~h:m~ۇm3VjNg'M"uP^<`nT,SzSBꍈA.NS͖qm@LO0@$$+^zE0,wrAcsӺj ,[͗4Ed 6~zG).nv&0z єArpBSd$B`r>bZLODխQmTAoqnybz#Ni;5Bt+@]m!Kh\ވAm5=xnEHbaFs+;˴ 72<-\cxi< 뽣Qa}a%ez(JO&1s'ċ/F[~l# [C6Ynb;@WX9oԽ gw:^8/XꖨgW{!x5^@|9~Bzύm ~pyX0SoQx^Ir߯Z endstream endobj 1962 0 obj << /Length 1584 /Filter /FlateDecode >> stream xr6$Ӗ( H1S7$6NI0ɘKPrC(S.c',}HM@o'/yyq,%$bIdF)˃*xRL?,_92G,Fh%J:& PΌľ c#[UɺtsUպSJީqG4J^O((_О@p>$K)(Kk̳|U)]9%8,J<g't`F)F<jR)Ga;Ah4#߈z%+dG==Ш/=k'Ġ1PI—iõ,:hڝԪSu ٕӘ;[ R5Yk ;;7!IwYėx^0Xٚ'Qdi|LT͕,˽d%:UuNƌ% KOg pyP3)zSġMHgKB26/L;<0Q: E<Dq<4Gu9?g,X4/ > lKӧ',N;R2ő<a eQ?B=ԀSIM}A^~+ג}=;Ckn燜y{KPuT\uQ zQEum9 [ptӇ܋![+LL}B\W38m%Ю:q])b9-n9}J?i:G;يηn[ze#ဳW^Ҵr Tcv0SPՙAՏZV8_,׽):j1JޯMO}#/[G;xB[6kPTE /Ԗ. yNcc]4V&=O %~<*&/_'دI@X<(ɻ$XG'b0ho,i$0ȳx\N~wK4)?g3Ď>.JayڝݵD` Vl j'kp[oʼnvl4p;eY)oKoҌ.7jq޻uѳBd9ԫYp0``=!NoAg ߖ]rDΞjwڠ `yt{= N8l=GZ=AH>̋0kXG%`Ժ`%&VXզv~J<,pAN@m{a-0W޽ҽ, Ys$BkH\f¾x.3]Zr[٨°1(ú]lk7e$I#ʲ"ۑzV8P:1o /a8*Ũ* a ?Ń4۫RVu)o r(A9Fۅ*v`&}i@36 5 ze/xÖme$D endstream endobj 1971 0 obj << /Length 1668 /Filter /FlateDecode >> stream xXYo6~_!$h[Eh@kOiKDUJZ{;tl R x f> sneu8b?vw<` s [Y]v:L&K,FIJ{Zy 'қ MQ8?@}7aٷ+*X@DnKzGwېѺkun7fLPBwnY-RcҮb:Q 4h(A(: |O;=(T')- }iMaa{V֏FP0rƑy^xE.5oY;w:b 9l77,8&YDtGr*p"7j} ͅDo%;e#u'xQNjDž˨RyBk *Z#lg!yHQ?@!M]ec~'߷tחSCC$ِOrT+7Bw][y|i֛ ?oHlg-R8pP'Bl\@WB- $=/f=EEP7=ry9f5fgz9;χf|ۓv9nXמJYNTg`}oZ2teaR`Vt :ׄ=Eh+ ~x; ʊ\mzG{SO 'F,@|.B7u .[Ynxt#hr H5Vիտ+ =@PxcKyzs Pέʫ_:W JgYQA8ĎNjrLB,w˟Ɗjj&_%yo&`,H\t=UfQ#ω.GћzrTn8D ) Ⱦ$\f8Ebp9TGTGA#0yAXT - r-TSk\p4 ֦/5!\zB"01<9 =Sm)5~sO:R;yvWʻ乻SՓP}Twk~Bz\B껏 NK endstream endobj 1980 0 obj << /Length 1557 /Filter /FlateDecode >> stream xXn6}W,-Rиv[NdP$JK\/$wYZ S4ȃp8<9-◃냓 ^$XXoDRb/;ԋ'?2q Q;R)CDU]^|dS5;M֎|L@& &$JV,]zkiQfUqY(*VֲweJu:d$JCH`v mO6*ٖ7c;af9㻺K-dU.Nm!)pSU[Ҵt UO-Z.%<H wU,idHh/ & bnK:g/Υ;Q,n8|&g8, K#[sWׅPPߔ7 ãÄ",LMcnx<#迒g 6A`m0)7P$ࢰ0;v]ݾ:9+I*ɛK̖36LNÎxeS (ZĔ0j =|t,l(L7򑄄>N=Z x %YE$G 20JGw0wv5Jd/B{UxKp,ks3`hU EUQu9ۂ_FNAlև]b%*|^l)GKm3ஐU:0=NuSGl7+qWmYSsFFo<"&;<j33A_#vFCl9ߝSGv#p7 @K6}~F̱v&-Ly09vO:Q~`' {ʁ8 <&#oLc'; RYQ21# !) 0T:nKu# '{Ë^zz0{_ݖZ(1-<(T #m_NhDJe?CWife":rjh4ZgWÙ*IBx"6hY?o>o endstream endobj 1989 0 obj << /Length 1412 /Filter /FlateDecode >> stream xWKs6Wph|g;ɤVOI0IUŃ(1l@bb~^.^ǡyBo0BA%^qZ{5UAA(ZxG[< <FT ͐;z5sa<woD+/ (y~ƇpFAa/ va $3r5gr 1ڎE6VĎW]I6pb>j]vQc4FQo`[`RZ,o`@ggA7LvP#JR_XeZڸԎYA߰Bq=EYͤlx/TvĻP`_O5#e?8-<-8zFHմLc*pBCC &Nn$sv>6⯵ɍzBUZ僵b'{\\SQU oEӵ9ը>s_#wzJLop]= Գ<Q7U|B~֓GЧ.~yOMUbm ; %;1Ǚebo1<$"}C'u_H|ZBmϜ;EUV<бy-;j;,cN-C(M!ͱ.^ >ALO|evH鐒iңΨYz1#lʼnU'*f9O9ӱgiHM5ʯ6dz{y$PDR20AS@_5u.sG#ͮ`İјkDq\SJJ;J-xz232B2!Wqp(218TWM5Ok 2r۽T $[~c_/ rcxC~1$'TD// ImZCp)ʗ-fQ ށ#Ikm|ʒhHPYdu=|,3H#Y? {'JpD0^d6*Ď,51Ү!b?-㗋IyL 8;A.S%vI3LL=NBfёN`rl"ǖnn_(_R~|Y _|!/J2`rjjšz K Z͒}i{|˕\^Y]f꒓ endstream endobj 2002 0 obj << /Length 1389 /Filter /FlateDecode >> stream xWK6W@%)^fSl^HM@Wl_ʎ6q Ï& v ~[rz'AF2ep $e0FdwE6d4]{yBD3("+TS#V4`v"h{2, h #vzw+Ds\Q"cH},en0[o"*¦vkHzfiyo>Zw֭7u߮l6|֌;6CXvCq( 4jQF :`|xesG1#_"C.D1I#0=]ޚtwΣ++Dot7 mcMBo~W-VMsb]s@:(+%8p:Uy?};Jy>~ߴ-%>Z_zÇ%9NڿF wG#fm7&Y ~f8\f E[@ϿɱM5gR4Bc96S}./=)Uӽ0u^N0Տ2võ]mUMeхKh )ӨIyty< ljWƼW[9\3߫ث?*ɳ,ߝA'w`qx{$Йʔ 'qƓ4@Xg_~$ ]0o>iB(Ct '#c)r3罱J]SyL]ivq }:a7[(+<{؁@ף6L)O4tx|IK}t6CCw&G~7pm% ܫz癯'4|52%Jk46<qwTP`ef)yTu8|20 s&#ٯMHURQ,s ckD76?w uɄĒ=ا7.__ha_t ?tOƐ {6W,_+1Ypi_]L}A~NQ]kRwp(1N{ T,pt}33,?~*y)ZP"(` BҮ̐tWo'f=flҠxvrg oTcUݲ0+pUe鈱9>x77x_.c* endstream endobj 2013 0 obj << /Length 1508 /Filter /FlateDecode >> stream xXKs6Wp nGiB[~h΀1CH o]nڦ+&^~a0}d\W,t#Pin`GsY= H!׹Ơ4A,T s.z Tn1OO|>cv[ .tac[@g{iY͕Uwj[rWl>P60#*F0mxRtiI8}fDEBz=?|$("XUg(Kiu[]I=vcrB/6MU5kƄhZFn{/Us%DZΝ>g[՗`rɏYX9gS&=%<ˑݓ( u 8bŧ 7\:S @)±X ۓ |0eÙT {y,QVxQ8fvN%$‹-kYF>*ȕJˀ^AYxԎb=hk?V k0* 8wEGF0äV*]wQGz*O4ɘw֢^ooSknȕ@yA *6e_L,})NQiʥs$QO " Pk2I>s48U~QsjW+=5WS4̜1>7EtRy1 (K/0ۢ+R&:aIqaax^M$Dtsf!WG{;~{ex ~$IjNy?Q;zɆS~KCg3,CWo'f=ocK endstream endobj 2024 0 obj << /Length 1585 /Filter /FlateDecode >> stream xڥWY6~_a$A*+.II6&Aݾ)DLt8:uPwxHc~!=p' :{qwv'1e` > El6tF1b?Yg :ۂRJ^#)AFCc$p$e5qJChiDA`*QJF;4p6Yhdbl%W"ݱ,ME3y/][y@ES# BD|fqy[ՔDN"vwyJDEFUcAFY­($xc,ǻ㹕_BħoSީqUIJStHtK&brJg-Ӗg+zFĪ*M*n6ierSB>&e6`'lXz]fm#b^BRvR8+E#:b,^Bwn̤f)eԃDN]lU}yq ߅0aЗPgc!/Ⱥs"@k~Ę,UGYȜ_%<7@ny4@R4{YmŸ\42UC[@M|ū{9L۶;vU>BeQx3Uec e*2˥/E[ o2oQV2QklOFg"tBIشP^HSb( qg!ν:~+(O 1$&z0}'> stream xڵXMo7W^ -8>5rp5H*HHntfwא j9 A/(pV8p%rl.HX|+up *s0*x!ԍAP\Hh&ZR6( uOPVW9!MqT)wt9·8R wDI? $Bs EìKRu3RB2Mٕ$TfH#\z _Kup8A۲q[[[h-=jT/ʐ̍U/&;\S-] ZmA%^ya}%#ZՉڨ!Yt0~\L׫Rjp.oom|SJxQ5~[0mOt /m01;sY߿~L)ut146$ch-)znѤDz@1t%2fT"ւs<P;G2|6lnL_,4=w^ޮo^-qן~k1=[߾Yv+;vx1Q(-! u@Us\۷[tw>Yw{J /w7aFaъ&0ͨi HEsmQytMri17[Vj49Y`bv\G߈rl4ʨ8VQ| `첌pqm" E# endstream endobj 2032 0 obj << /Length 1567 /Filter /FlateDecode >> stream xXK6WRD 6X@u&@mիCʖ ANRCrgHC?oɼ,cgq(!^NB.{`ҫʦ>!M, K RŴ 7? $qx@D^D'S[~eYk'0rD J8N^1%A2Y{YJ6wr(J/W>MܶQԕ;o6Knx.Gm{'* Ľ]Wːe滭8ڴ2\ 3Y6[6}(iL,+˭IDBV7R<-)Q&j ̢Ub4TD FL"Rf=Y.xCڛ"`N/뻶/(Oߔ{ 5s3%^^$?\5&YuVCVCw Ae"T,oՁ8HU JaeaLJG4gxU24Ӌή3h#IZ"j(oI^RײiڥT3v+KhUzӍG}Lۦ}'v0tI31I]cr5 T@0:`]Vrûc)?YVX#XnVIXL421Llt;)؋C^P@Tt2VzjVgYcon,?vI`'"S:]hhi,hj6Wrd.!X=HmK PZ(Ҟ $ 1,$Ea=rm;Q뮬ʮ7i422am [J,ĀFuUk,TEזևFe/lzqLg!Xc#1Uh&{mCxo0M=dv^ObRMa2I<ªr}(O1JedLʠ{~ʡ^蛧M=*"1&Ʉ|18Y*zCFi2H{)`G?!ެ/7 endstream endobj 2043 0 obj << /Length 1545 /Filter /FlateDecode >> stream xڵWo6_a*LR-u-0-0ZevhHoߑG:vͰ><'2Fdu2Q].G(NhFi|tY>˷i| YRQR2BR~F?R,f<l1aJH,!KIDr1f$8cעpR~"5W^DV+7e99MF!ES4-4B(g#z%'Ttaά嘥1q?itP0A@-EȿbZoZm̟,u~ebFynMX~.(.Bl.uDutV㾵}QS8yn] X1WU:eڙV.΃B`F7eg׸3[DP=3%㒨q\aJ a*$, PZ HFR*\ùp,t/R׵6_uNq1c:n-Z(ԝO0^i]q-'àekюiTYy8JfBh~\\!=,bzku[3@ !Ӧeۀ' {!ڠ08x[1?¤g'K<ɂ={Wq Wc If&stB.#cACiŪ_ 1Y`yOEMʰp]^TvJ  {)C5C["}{ 2~ 2Ψǵ ܦ s'!NZ'Y"}R78,  dCL̅~Q4$ dԵ qEY}$l6%$I@#C5:k.Jc@Oa󉿳7*b[^vC@ݨ@1x;vUKӆ[o[9?88j. 5/' Li1!V w]u*2wVv1p۾Nt9?>ۛk^]& P7[mq wؘ챎::M[sƦ ~ '~@AOV^J™jixQxrxbKgDŽL!LId5&\^o|Toy*^] 6ey endstream endobj 2057 0 obj << /Length 1328 /Filter /FlateDecode >> stream xڽXK8Wp[S3OU{lm7iՀUD$1GOIپX꯿~i=z[ݼK"/4L]A(N roWzwo(O,z;ڴV ) ͒ W4+=WƋ%,>ΛZhAC/MA3%%0ȷiA%߄ X֌m… 띙T1?r+)ĩ$̓|5S( ͌Bo0/?sf\6pFym,K[j}X&85W[-oE oxy|PjLPC>@-\ir#˓z456#*u1EJIh/ݩN]eyb_y2؇m"-y|E*ڨfg7-}p0 b x0d*f '1lw*; d\[kb1baV)$SOLE;FX k ;SU~OJl+9 oZg[O14;y,mɴn3R< zhY;>AIl X)]?gCUP&Ѷ5+e'#PQl 8vx. eW!lbl>3KioK1ԃŒ\h!Qq 2䚧t#/7&<}O`{q8&Shk0UA8p YTJ-xeq<~yӔ}!~BSwFOl@ݞEZXݩRĹ %s\ZǗ]Uy}rj?͋%1uǏŁbUϚ@|U@ -ʾ\X#w5!w2ٜJWUQ#.&R׮a~_T]SqUfJ5ΞJnhK!.o?^YGt2lVV|׽CSM5bQN~xViȨrP Mܺɜ܅˃,m6<snsW0W5SW5Y vr`s@K|<ӆl f vD74B7Ԛ5d7i fs})+‰hG'vƸB5H_v`ci8uN; [n7 fjyiO?I_n^ endstream endobj 2067 0 obj << /Length 1875 /Filter /FlateDecode >> stream xڭWmo8_aŮ ؊W+8&{E^hh[JԊTﯿl9j7܇D4g8oyfLv`닟/.nlyvt!Q>YOBMFI'Y 4SxC +gQQOaO$ax%XV!W0Y_FI@}1z@Ʈ& V; B@Cھ"4s cQOavK9š{0: ~yN l _]I=- 7-)x[z8$adPaG~lłWM'O]+aiܒ,*L|NIݏ6+:᪳B-FLGÂ%niPav8& `^\{BT "6b1*Ahm{ñky(K!$'Weܫ :b-u +L$K?am6hv֤CH^B0SCK@ԇikgh9#0/0t +־9?-I]@4l~-MDKﵯwۍ_, B(n-3fa=}ߵŞ  ev2f׽YBQFJVŹqlvڡt`PjqYUOT=PW:¿>d#tZ2C sptUL?5|^l@,e4Q؁Pu Ԫ>'w0d5_Հa}o]7Kcq0̞8I*:f8:tgDjWC qԕ_rgνέP[ :oK&#'~:^reo K[O4J}W'/EEJ+!`+WmPWyԖm-Oq![5 ^\Hފ՛c(Gn =J {JVŸyiX/JÿQZO% I5>(^ua OK脼۶Eܙ.Pc-xi de>ӆubj߮Qvgp R_t=Oh)kͯ2:/BSZV3|\ &;t!P[ӏv;zj9-w=e}[<`,(?3mOT^՛{Ҙ{J xĚg7 ՔC2t~eZx CZhp ٠;JvQ`ԒmWȮ%mduSZeH;7FT\wm.]KW30`dzONSA_=0 9cᄏ\zv?jb!c:\8 n;zۈu^|^]_ @ endstream endobj 2080 0 obj << /Length 1462 /Filter /FlateDecode >> stream xɮ6lIO$*CEmD?H%ڼ$iϽpC׺\vxVD3:V3+$ęyJI'c%ۥhQ,:!"\#vyg>b^8W6 2\w! IfC q6q("ӊTo4a'"sѣG+fi\Lr+fg`yh c֩Lf> stream xY[o6~ϯ4EWhRԵ#IL4FeDYϨHgB$.6awumH#xEy(B$%Y qF q^JJ`8 E焩U& %ODMxa<;)b NZTurVulEׅo$]%'BPmUl`Ͻ28F4SW%wNua }+ZG j^Vu A 7S)} Ufaq7]tk4< .qC6yGvJNY иnt\qӫ0N,R/w _w;mIy3~bY358O4#=(4c'PSQ@o.#.?Qy`mHÈLJѹػY7͠M/?/ܣ N4G]k4 sjŠդřxQF5xd(QJ8o1Jr? 2|E [ղ4 qR,Z $Q@SD߈aIk,‡~Ps^7Fg#72< |=ۡoD̃Jy8seG=YmҙqiCZĆhK*IHO6,p'-Z~q%eᜍĤȧ509b&zŸGV< ҋ7`(dih!J"i8PykG@!hȁ#pAh7ϟoÄbٹ=kypTwT)ɨAL<}s]@$ I wt͖D_,gpeN6]n'ssد0ѵPۯn5z? endstream endobj 2097 0 obj << /Length 1437 /Filter /FlateDecode >> stream xڽWێ6}WX+u PIAAK.)ؠȷwdn.!9sj߹u|&Neq;C|ߣa$x1͜Uu zU^ZdY8zq}&s4$V}f}R'8D룈8 "aKx|-j%ݏb˛MٳD-%Q }g'^΂^Ys$W>2ՕHVMe|yǦfD- d-ehjK>v+꡴}Z*R0W6F#d4,'1˟fb'3:s-}.Oo=fL?>R|6MMl +1)[rcgهYV^䃓<OF|ښ]\DД23uR|zRəR&ā)rkN`ŇLV3 R!ǥY=?_Q1jN |Aka!} "+qj=QVN* )Ke_W]L騘qH0&ARXmw'\5mNqY) YYV,w[n<>'{Tfk ˎ̋MOʥiw81.S?b ڗ4Lݍu0L\:CW5j0 5$y{܊qjN|+ )ŕBeꏜͺRȟ(y%dC{QZA FbWS\b51V鷭7h(*笺>`v*KĊD΂wӔ:« ,UPʧrQA3Kz_q)%%M;9s-z@2NNŔFu`%>qF @=W׬^t:jody;~T/6f4 $Bk̋6w.6es+rVvK6kyc/G^{} H |, ;n@OH]A* .nQX1S_Pk 2$LM.W}8Ux PwTQ ].Feگ <Ŧ# sNE;h,Bhk#>D7Mu{.=|̊Ǘ((SJD4ds!.3۹ɟ^bn䭽F:а͘Q E |^4ޅ5gQu.ʰNDYUÔMZ 0 aFSo={#eH|oĘ>Z<1 vE2\"D{qn+%pY-;FNtn%N"uZJ]f[ݵA袁}q%>TG6wrf;u+& f endstream endobj 2106 0 obj << /Length 2033 /Filter /FlateDecode >> stream x]۸=HЫ \} {h/\dzZ-)9PZɫ>Ťf=áa/gWo"cY"}b,m wWohD>KSpÎyk) bs^JdΫ^I 㝞Ӗ[_t9)3kڠ!Q~~ƣ=kԢ.zr8(Vkh4ډ]^:~|'+̀4%$UP{ڴ`WgO@JDJ<ѵB/B r'ZӍ%-ɌA*ܞK9mR2'ccQMӪk Ed_Yz0~G&a jX~+HSՏIOxVN}P@ȡSSrwPIlzen*ݡs8e?6+LOH&b}tU>4%}0PQHhN J~5fѹ.&w;`tV+6ùm)f7`[A,v{B֞;(,{L0|͡-5Aw($ΏsZ1@TNTׅhAЭLda/NpaT ePDs(RG:w2xBPsl&q!qwdqdm(E(xsa%jJI`qZ;3,kbjv ?cƁQն;oO1 5R:>~ OJ# =2 'ez7ܻN;Q4Rcn& 5qD m7 jUhdu.sAYĒx3fFNf'^(f!lRE["vLzL|"e;܇ن51\}#sǍþe{ЬA f'K6 hsZ|d,fpvw sonGa^CG4zy./^@؎3ܳJm‚?}"fvaD,N/ qAY}n4!H^LٮZ=j#+&9M_9-u+ݾu8]&crrń 2R|W:=[`-{GuTħh@W{3Ojo{nk^~-?vNvY'ڼaN B樅Wc?]¾Վw]sNPv$;.[{ܩ=Aޜ,x ~wÄc-^¾G"X,3h룏E;r#-> stream xڽXY6~_!$E!Vi݇4G idۗ4(hHKJq_Jm}9g>E6ﯾ~AUERqfEPi6Cˮ $&Yy3*kR!U쎺~ i2ˤbj7=~x8Zf;}[-igI\ L}l|=uQ|r/e L]dߒQWZLzk\*ʌ;kǀWaǞ}8;a쇧7hv/.!QE z6%;hTUtoTutRx\4[;Y4_͆Ճeú9cs.m=yǷ=(Ƃ~4w*8HUe^.+q@&WeJA@V`oX .]/& ҳ(5y ("Ĺl{U+C06K$uE$]ZaϘ`L˨N \e~K;xXC -C&j7fXSlArREq{tht \BV` bwez?і~'d/N&xxlAo=n$xjN()*[6k]ٓݿ\$9[Gb\ 7@{e,g7B30)bf$QbJ-E*TTLʡJωqpw*;EYu;,ue2*!ƞ)^Qݸ<0I6?]V۴S=pɵ¼ yȚ{O}CՂB/jvoI =*^CA0x60ͷUN?%^j)6Udz.]\*:suihˇ7,ݰ4[{OGlZ46KvR Hw$rͦԪ;hr=HTgk9cb6«mF;ⲣI2Ri;txc]Gpg$ivBp=Is@ͥN2;Ű~%[ag_NI<3A%VI f@n $Z]g˦:k@vK&ч vCNMqej ,F`K2u-iWVv׍ʑY&,_L eGHB;X"dS ]=IfsHO̘mIfJОE Œ&֎<cs^Pcg!y[XY1d w\z"1K9S [QdJf]L@=gJ-jå8Pbbs~H'1 %'?+LrN,$/͹.;h!o9s^?mYu{;88N]}9+R-ʿqb_٩g$|nS0D/dmw_.R5[CD>7|zs}|j6D> stream xڝX_s68s 5#3szHrNLSl,BZP ʉ黋ʦ3=X\,v .۳W˳7iT !c# Ua"\=;ڏVy6)õ[^*4ioLxVnD5E1a>BQBLTܝz+uO$1h{4: ^o*1(ëeׁLue._ (gb\zQ5" ez"W,mMK! cYoBծ ,yrnrXHBlG0YIK+1Jm但cS`G{JSeB1K}KYƏi1rK f,@Us )7}/..j!/gpV",qJ0.4JL]5.lRy EQW@W~C̛!G^|H t^;$(hj9+'^~2^`}+ouU'Dɇr@`N\.iNG(-kI{FߗAu.u5LX4@0I Q-8y 3o[c~#5^!KUy#h}:v4<%#Vf"|8=v; }y}u}3ӋN{ՋvS6v"PW"ݓVsU{1,On,ba:,7OG|l#^C9R[SfJ3J(d{;bPSTMmNF+88ئto| ۂ>3ѓ֭{kL?5ɜ,-Z,uY]p5?;ǶB$%er7Q#eFIL/]xGȀ#`ddm u,r{v< "+Ig?ƒ&AK2DA Sbl>Ysaw Sج> stream xXMo7WPOCV Hshk`BQ 7tbKѪTE]>pr TnI=9uƿPbT%!Ā`MZ0,i0<35Y oҒ5ZRQ#rjW cÓDD IdWO_XV`Y0cq"n D*1C6f £Na_G¯Gku$ c%x(%S@^Y(aYfP2)1ai2iFAM0V$!˗i(,Ga<\ ю=\VS^DD5|y}.담z|5X~"w<ͧ뻇2ϫn>˸ᠯv~@WID>. 9&1r/+=J4=K]6Q(shvb2&\ I4KnjT%-ntK=Z*T:^r.٥Ϣk>v0Vl]JI5nOiiߊL ͢%4MVw5њƮ6M3%^k{Zl9ĺ/(= J[Df4֚Ypn6ntxC̊u$4:9vʮHj4'f;"n7//f4fDO ]zOoл ~ZBMhђI47۽qw?R}av</u@ug@ h<q肽OOQPCܘcZd^:",Oc&}olGB{oa[/F-9tY4L["2?̻'nؙim2kDkܖ9Ү?# endstream endobj 2131 0 obj << /Length 1703 /Filter /FlateDecode >> stream xXK60"&)QM춗Zmwly]o)r"E g> ' >{yu6`$AIHrB0F~N"BP'l8ŔLz3I~(I@'V U@:ܓu3o'QL;/x2#1 Xky"Fp1SUzZLg>ŞZ|ic-M.k#Uee)@ /”!G3LD|4k>D6spcKU /1><,I3f=e6W+QFeD%P=V=adCЅz>X 1ufi=$&" tz5xڈjnuЄ0H,To(W)BT`Јv0PC <\`}C86GĴ1ü+Yj6 ]M}VK۾K7RDφU*ܿkpv>^*mW(xL'>RCV*@܉Cn̯'Qr2;?xગ ճzӭ>[QvFV%sZ׶ U.,zld:VO@-ta}yѭR@[l4N9TJ'*x+:=RG8쯔F z0NBihs#8N>S `1:M癒Hƭ#p&\}>L8$z<(Dw vBchmzxj,Qnt3,ivc(dߖE!{Fҙ4/["@F=wq\uXm5H}ܽV@$#Qw}3aǾ{N捜ٻPȝȲUtRy`G}pޘW*lk:^jkG^1yH.^wIYp9/ 25& a˶, 5;/!3 -=w Ryܠk_1v afuݚ}"̆csЬXiۅNѴZrLSCO4n;)G8:iԈ'@ q4 c:S`lFWăӟca۴Nɓa]4lgcԢ 񞎘!3ԯ}PdJv 4${CjOaފ6wodG^ϯ(' endstream endobj 2143 0 obj << /Length 1299 /Filter /FlateDecode >> stream xWKoFWqRHsXTǵЇ45&̒#o g) ea_3fvYwgYMίùIjm1s0挹qXzo3ߟ~Xt~A([JOg? Y 6mA`-? | p9l$^//@  [ԉNڈ xN, ]Шn=40G j{>{v̦屙C ~ils{!-SPŗꤪF:%INlE~;Q hڧ1.lMl/kEDAcCQ]TڕA^DZӻ`<8V4[@d.6P$O8^|RЏh5k.[t.EedF)OS'{IC.k,?gZ2nl h9J#k6n- {}A@@&МeiyAl5֡:F{ gE )-ƺ!,JA#Ji:J)zVp /jYm-<͹F98a% ̷.Z"2tǣoM{$t3ax eRH(}7B;^esZS 2Ez6ױ'(5[/]wD4JL]&x$@Jʲk+̛ţ[қ)|4Dq].ipA&dS/uEr=j58aYl׏20;-&?xV ؑV-<6%M~۵c<!|/t S|4m`,X"$Xxa#8/F:?)!;F8)T2= #٥(=e52,jK/z4-rJZQDZm@r~D;ag;741 NJ-<+G~Xg |~ -Ίh2'1nD Ɍl/5X`'t=TI~zCރHqk> stream xXYo7~@[lOcq䀤 Kus41 ْǿ>U4Vqo^]%z;PU *PƇ0a#(ir|$ Qcܠ Z/_G_"yR0V -okjh~TN35[8 aPԮzw2=%QDǍ8({'#וf롅 Vh(wʇl똱bG3);Ci B'J? F~Tf!,nw_Qz@'#,z0Sm7v?j  ˣ+w|Z=`>INl.MZbC(C.G(bT%%ѝTAURuZl݌vsM;&5P8qm@Lkc[>Ydz Kj&'g]ˡaxFI -$#YyE yy>h*Xd3K@sxHcw3 =*U#UUfz9PdIVEu IYɰ@`$v AS4 A@#K<M3.MWiug0r {x@p>Łûg5> stream xXn6}߯-ꮠNMS{>$JK\/]eg!)YrE m8Μk]Yt8yVJȋƢK bJI:^ o/NHSy(RB9XR׾&mᚯ<}+QߣJ0k5 oNwn,߸[rv _r~tX1a+3LaŃlaYX=th5XPD^]\dw6ɆUf?ZCZ=rgU^-EFKbYYxֵrXOFVQuuVQ@IFS 8AH>ޏM'j6Ϥzvc3s-WYfRR՝Q-ApMݯ:gp>ڪk,/tzA9/!rZuJc_'~4ñ T3bs70t'6kNbۨL?$$2y=Ky6ML; Y]dɕFuK؁M@4T ;OJ/t-~i2B1ۖ^h_#0:8鹦ExตYcV{̢ vd-`؅2xӏN6b#e !(.9v/"gp[ˮl>4/> VCݕ_5ώE8aJ2Q^vSE_QђEWVY=z*.Bq' bB]2UWFdFaD$6:gK!Ci`M;gI@"( ٖ5,2Qc:Κ{y_ve! ?0_ia>?xn%>ST5 ϸ XշsucZ?)B+tTv=fnlccrI?rJOJH{eh|ďئiӀ7 {N 4qS#vzkYd[vG~B-peB-PLjZ:k*dsdf+-LDV!ڳ::n.E.^OC\t 8X& 3oDWpexv3H<`9UY;([@r*iVkۦx 8b[)A/8,DkrFLE=ncs*XoB' ArKRqrDRq8aAMiRK|؄]!fk>t פS j=5/M ly-Eq:mVene6+sK܊\A:"Qjܠ aX-r=r ZobY^O^HFר@ľu5ZEq.F5@_ {+t7=P|PA5f0}S- :&9T 5v17r5#NhoL;jEw~$$ /R1 ``ڧ^QIa^ƁM-WAM紩{ah~aq"]v{oœ{kƛ,OR6ˡ;r<1Y, [C]jnF/1> JB=bMQ8Ǻ 4O&~!>~H:|xV(ё ) x0Jk u=HwVTI"l a endstream endobj 2169 0 obj << /Length 1378 /Filter /FlateDecode >> stream xWKs6Wp ofK[n'iS[=%9$$!Ab;H W3`.>EGf߯f'q$O[=F) K#I{{W&BV,/'Qzp-95X#ӌw;9^ ^kYsYr5 Uy $nKry>_4W4لUѼ-F"ooD7猱%i@/(km,Ay=h.,%Q/9Վep]8 ,I[nh9~۽J?pɻoyej!yYFi+^зvUl( -Z^-:垁)xqJx q/ā#kkLg%h;nQJBRaE$`*dͪ0ѭF HB@z/U+Hkud@K%шNh( b=!1{EODQ^hͣG拘Rlv+!ukJ职,&,cZ+: !,$Q>f3@X8؎nkjbP4B( en1)x}Džِ,GqqŲ#]Ω X^):Q@@Τ]V͓;BD /Ԯg?__[=vvz@ C.QqBm.GA.W0ӎ.i\/v,%8bMle%=x Z@{.||0 ɦn)w{ mqF˧g_K໔J:D2\a%&W6W0ϼ; #inf,^fG#D Qtz8"vzxƴ2$U[1q=aL0~[|Q4ku0H&}nQ\b }dtծ\ ݯ;Wp'z3DA\AC\;@@yȨt7mdaK)H+nr2m=. Nl4e̲qʴ\([?mxL5n4m'1=NO?'8)O]rlsأ !Qۺn̈́)wT'4f1 Qf_Űz 5Āo,4\\?T 8'U3Z't s(AC))o( =ڀВF?|Rj88F(UFyGvQ|A)Z]Oh5e;?}kY(l ilg7K)(FeS4n4>"U@4?P7 endstream endobj 2179 0 obj << /Length 1449 /Filter /FlateDecode >> stream xWKs6 $jg,H=dwڙt=%9ˉ^%q6I=^N.=A`zjsg^)MG(dqe)+?]LB% RU|%EK*rba0 hLf׿>ZPpdd$ܮމurzխI`'SAIw[U6;Z]{9z4,cD6?hɱ =D@/ĺ:Zu/JkMTQr%숷kqA(@Z^oTdVmb=8[p$qmgxrr Ms@p_ oh 4=i!#\u{&_8.t `g+, ,sE,VD;EsॱemoBpy"Fnd; <Vp'KY4*u`slj'K6c(%Bm]jOiɨqDl aNc:QQҔ%b 송Mɰ3Aݐ(K7XDED4`D$f"?g@9"KÂ{*(,i?'kq?2ƼedYlu|JFȖ^1(:$$9JqW %sHOHBBb^Ŭ* [{VP}u@XFnm2L[Xԝk+|҈D}JrpDek+4ڀ/޽yqf'"/(]nl' KgYNUKز{5˪bYXIp'\:)@kच)Ԩg 4q;Wq{jɬEh+*{?Ore3(2XL1SX9S)hQ\@I$Aw\_ [)&SBRΎȺSp +pיͅ9KAvjnП'YLE;b'84^+͕qָ#S͠"EfSU0 Ƚ;#x1,W{W-˦gF0G{F~tBbWcwmF Em:ÝN^@=ԟj2_r%51=OzC$l|Rpa7Vut~ 醎̈́ ,nQ-fvp4JhM#=7sQw[b nj#ҫ9O>xdV'jw3UJ>  ^* endstream endobj 2189 0 obj << /Length 1850 /Filter /FlateDecode >> stream xڽXm6_a8UF-.W+HMq $[܇hJN/|o IɒWɦ^d5$gFaA?<{uu/D^/Edq-~u2(=Jo7?^.?H$ S3/Fgш-\/Ilj.;QzeWZ3B{>#BrO0$d႞IMk#yڊQhHQM $3x)V{m#+; atULMH<~ A }|75[m%[m~" ^Y~f֢%)5RCMʦk:uC+z} ^?͇6;Vw͔MҋÇc7dW4]Yaz(|*(-;i/Bh*G}W#}>ߟ{McwThvd]j}U_`WCщuouXdLÒvs</v &,LzpuHB['mBjiRqdc5zbrUzܲĞ[>_i@AM_5D-gͳ>| endstream endobj 2198 0 obj << /Length 1379 /Filter /FlateDecode >> stream xڽWI6W[[CjTTRLyPB(H^F3Ѓ%ogm<2z]D /$Hea0J~fr ~\v}'G(c shMߐ gD e!& XHC(@$u[ț0 sbT臏13< fD6?8Y全o߈6G$M:^m7+c0 #x;+;FhRHqjGDVcG*En~SʞcVmW+*ZRxW؁%di'8z`4oa?vm͎Q@Gb腙 (;:'U2IK[gIdJIFkVo,OzϽmA2J_Bg1},WB~āx J E%::d;HiV0iY.xv`Sfncg8>bP2grqiEn mXw*tO*V_撤oYc)+(jʴSJtmzS𨝵 e@Yi;]}Nչhם9߮Jt^odH^XDג9QaÅ:v.>:ʞЙg0F3͸/нnp8zso]UE_a<7էxhaIrnA׶#B9;-+9v:c> 9 `Khv4luwٙ &o$9dhX xAy|e',[r[2t2z}a<~F^QD6AfwgV^" aͽ?Am&t`ra/L#?铆l[tB0]A m*[/^SNJZ(5;Wqr.&%.q<1DJT嘘Iy+Z>`gA;%\~cpI֘[#XR?q8-m'D٤݄MRIvӫ͍l5fEU/vCWc+}%(>OJl^RTI}`K!/A#=,v`8v9*!4 7NWit/"2pOmn8܏ye(휿 wHeW Ra6QE1shoqHP PF38+(JVT<"NEιDu=Y?i'q)SسoTܪ+jchsڬ$n &S endstream endobj 2206 0 obj << /Length 1833 /Filter /FlateDecode >> stream xWY6~ϯ0ѵVԭ~"M }-6YHio Yjm>瞏`_o_|}M- Htq[ 8]diT,/^E;S)HQ_|g{QYQT}2"NGkǾ3 FkQ^ő:/֤^>: C4*֙ oʺ2HԝG\E^cTgٗ館k$V$7ٟaUx;VvUCG5HH#ɽ=/i 9jd;FۜGNH{ul1tPoA(Ugۚ؛c KKN❴`XJ.A(:2=vzw q FB1ʝp o!m-ʏ+xL hH%T)Z^(J I=jN: a fğz*r|*-ʜк6 s'p*i=ZcK(LV~: F7.\F#emsLSmӆeۄ@>5!lT[CpK QEoii>H? <^׽$Yw2!@PaOͧaɥ8ܦkܐ. וS8Z>+'88~|K)(gమ{EKGַ7QJ(X*=߬ԻrR޲d||K)Acx'$NX)W+J 7QB, ?[E -qax<$Lw4/SὟD`Հ fk2-ze2j^s9!E04"~af++FL2^5#7<,yfnB?R'bc? qE;* 4 tzuzuyY %XEazAdbF2IlMIlYfsfAX V˗/W$o άxؤ Jto! f蠫y63<3gf[k={3l70*j<>ՙ` 5MO.|! ũ_1ϴųip-8 w1$FS`1(>DM# -(s?бY6[uVTKu[c36.i>ҏ..kn,?ٞC(ʑ֨Μe, gDz4+=m)mW֎Rl F=$cL4Fa?ՃؐBlGBژ0IDz5_=0ypPK8v/JdV9&Nx> stream xڭXK6ϯ` 5|i;]nbQDEbbQV;YR*+R,,qQ4Hiq7KDʸ,F٨vicNy57 rXƸp-%w+"]7Ui?z^V?5f3)E #"'L۫vؾ_:To8n~` t4QA\}TON bpLKs<,Jc2Lk,2\DEGwæjKFDnW<գn0mqÏ4Jf^,UU5Iܯ$)eZbwa@@P$)YnZOB}]E-li!J.W՘ð~XS_Vi7BHxWBEٞ.d{oyLg^e_u21.%y'=j 87 Q./ڪ1Uueg_ U4vVSeDA4ڪ|I:RNJ~a, _RK {]j%YrqwtN5.C(r6Fu+¯$u?RK tx 6f^cD%¯yP;˨ϋy2~VM7Ivu'\,MkUeG٭kZ(N>4z`1!h^m!'dx6<=BFY>u GX `JBfs})`Nɨܫ^vy{h;ʞH;joZs?1s+>`{.3L~b,J O>}b\+pw9t/u/kptLYR/gI*[59cνz0r}mkb@͂@,/hCE=^ =E$xv=iv3H0}54v:+ jCm ~跦D|j8RڢS"mq[PrH$&"L6D8o"H/ `=.ְ;uµd:ǎCi2m[ԻV 0M]opcу_G`j*qU gKfe#p!BY#8N9㐸>?) N˷=8ڕd`}"|zPVOK͎C^Ah ڥ%r<z/ ..')SG? 0#q+fanPMW/~ҽ +vQsS%ا7~61Q9pf|W1o)PmKzi.6Iyi^L_v z[CGt[ on6K g> stream xVM4Wv8Ud"`Ņ {YD-IɬӲdxwErRj~~X}[nc(OU(RBPBsoWz)a>#X߬:*!:g될dn͍3-X]kf۽#S3NfrTjt)dt(/|Ջ9aÿNsswTd"8=,8`, 7l${-Pck'$[ ]/U h{1=M@cKЖWۻW 7bS˴w0Px=fRW{mVvĤy^s٫n!_r%Vm#̗ȭXV9x(!W4wWO8<Ƌ` ެ~9w90x4 Q)h@loL9Os:419p/8qdWǾ*_5pwPg]̪V9aı?l00`2LMc;;MoBt;N3G^tLS s5 .`gS cM2WW"j+QDN,sȂU*c+,1c6͆5;orNUź`wC2?jLUpSQl1 h|> stream xXn7W d?I@!1t `8>$|0E[O5׶YIzfj>*y**$\R5 KChRð$axa70z`8B\Rp)0: R[>תTI  )p,1|Gʩ g ҂Q1F,eŻ#^w;\\Xv WPຌlL:|T#IDgb;~11*D0`sY i GLA10 ȧ$êbT4(B;Sx/g)`i  Q1k#~vwh%p 7 6iJ"9 B~ȰPȏ ʡ?2Lf$\G$NL ؑNBb*p .U07|֢ 0ݡ2H$ 0@iR &Hz $Y!<'ͺX&Zsh$]r=;2 nDc vs $|$+Ihn(3)ry`oܪi TT"^Dp{Dc?ɴحKhn-, }Ld/ϼ It!i[!&dғjc$=ˍP6'mU3D㈜;_bقB5BfZDe6YM3o27nYfqȸI͢]^vz|Av*-N$w|̅3Q4cY4&0gbʮ-Ȯkkdeǐ]D+qE\'5BE}|5pR3? 5I0ugѸJ$p?KNC|9?E6^}ޫ<]wrE -t4,ٸ20KGE4[p$5KwEpŎ_fx$t4ms[<o_9a endstream endobj 2233 0 obj << /Length 1665 /Filter /FlateDecode >> stream xWo6pM"Ez@ɖbY+@KM\RJÖi~xOwG,ӣh<ȼ,!`:Q2`Z q>9Îh^0 xkOAQ7^EsK +P=Ŏ^ 2āV}.Gn8\+Wz{YHk4ޏ}758;(v6k&MEKkf]hS͙0~:,zfVI^PhU` fq#;ӑKHhmɐ:l*(*m$DnuSRس'vycZ2cs&e/DQ!K,Gdp68>{' +^񜮌p5&{?dUc*㈉hZkEm弑PKZcFdCY*8ĹD0TQT-0ŨviEW%EC*YcU!e9ݼ%mTqojZG y !U4( BS [D=1Dݨ rk!vA- V/+?cM|5:!6eWU2pi656Ŧ `ֿڈȝa|lKbô?qԃExUbNξj"#!;y_UayXJ|oq֥慑L`E0 /Y 纷r\-42:uб8}llLΤ-\@W T|^琳Ak 8  NlnS% I$p@}4Z5j'KNP~6v}n? =\ a8^_PtRØX5³CH ]8LƦpB?p0vlG)H@ 6ρۻ4K__RYj U -de-P29]LBXfoB mӊw}z$E0%p.PGK5!VCExO?_Y=VcM!1\8W/2--׫{nY I-|'O^o4xJ 5%y( ;F EYU#q"}|(lW=sw*= &PFmXo_,fjZ_* z-#ZF֔V&͋? *GS%Pl2|nh-7t' -->v Gq,׬֟nqr@7d^a7QQ43|?[?罀 J%9~tiZU!d;tlM#8i;r`gDZpC,J63u o&5 2sZT6ŵ%ή]J φ{ `fX]l>gy#?7?Z഑@0r/XY[s $PN ۛ+ݬ'zAxK~ˊ>|#[ 4_X71}dwCgӣup"f endstream endobj 2241 0 obj << /Length 2173 /Filter /FlateDecode >> stream xڽɒ6_Tp_橱'eT"!1I0N[鸓E߾ ؝w_wMJ̢lwڅAIrw_~j1ZL(&s "|7I%Ц{a#]`?׻.婟~9pvGe{ n3yúxLĉW℁%.K ?T$3PHe:~fS=2Fgގɋק}Xx'Y‰eIQԻHmSo7 Mo'f5dh4W?u,S9KOi* JWW ,vo Ԣ NZu dq*ōD&'u"E{3/K qTh/ѼP=]ē+4PQ̺I\UOD~>ek|KPQ槑%#rSsjrkMng7ȯҝ3÷NZtX~OmjsPT}<.MOW'u$(wOMRv`I؀+G 9h,14^#x_]O>.H?Y|܇L=U9tl‡{YAR~0 S[eJrj͠ezJVM_OWOqDg}>[?\8'QWarjz满ԐxZ+5&D_S/JWʳM >*',O6hFtCk1O馓}XOe61egdj椓¦9Cwm}?>sZAAdbD*w?¤ *teZ"(|[TLYrR bT=;r>=Ռr2K:տhUV`Ym佋>:q5H+ X#e){ Qui, Xf+gJZ[|l%ŒFEi97i!>gKP&CG99`7B[3(|-xl!YܱJ PME{U:5J ;{uŒI6[¶n^6> ׸iν PPEzKFفňo@˥iSk3imkAPYӏߒR aƨ'TΎɨc P-AqZZh|ރp6OH3cO[:H\w3?qFH` KK\rXE 7bS(-j ͬ-XA ,Vžp> stream xڥX_6Oa U\()%ht]Eڢm"lڤp8_ݛ$[PR-ֻʼnZdB0u%qmݛ4ix"@g*uZ[H<-7nLKfG#1!Z۴mGw -V"A&iJtFoy#]8)"םKSw15UCǓvzrvnil' K#DM ^FЇL%*hޖh<몺,UR{F˗{[eLʼH+SI>oJ\d[_6bPp h<ɔRbgS=Lws$.ku`Rbr3-`T0G4WG"GS۠p ;J.7 &ۃ_.hLUlD95` BSA fM3OGQX4G0 ̚MmՇ]i5{u{yO_yE0 1yg JRDn< ugٟds xR)fOL`‰jO R< |ߜۥȣTg3H~T XcOBFa>UmI(qPKsIwh]]x:/NY;"i؝-":(T_()Vk)QLKdF_]j0R~\BNřȧ&x5(C->1|a䦯A]$}.>j0C/3-C@-aE.{LYظ%- TE%]wr/Ʋƣu'8܃Z_O2R#G_]hލ\XY7 Aϱȯa#xu[q I?Q?x>9r_F?>X0f(ršg\R9+d1I[?㩢$i=MbARɐĠCŋ*PӅfTp!S&MP9"bgjU2pw}i<8fyAxgj`g-"xHWYD~eti+ )iǦ4՜ָP1{K O8g?z~ZνrnXvptMMgiz`/l;ݙ|BaӱgcGєsh0J,zϷ+A{{8ʽν?`^ØE"_DZ}OP߫4aI.PLan#?"l9ӖM]Fd_0f f8Aq֞|c7HΕM'q,Q`A D VmMάv1A޹@O`tWDX=mNPLs|8tX,t}Tr_$C36Q|O*N͟/: ?sߡox|?Aëѿm4vW]SAS >=gu> G endstream endobj 2260 0 obj << /Length 2352 /Filter /FlateDecode >> stream xYmo8_a@WbEԫw^}PlfW Q)Kh#rf8<l3 f?={u]r?Otv 8eBiVzU3x0ݽxgQYí"v&Xy"yA vA% \WN6j9Cco0 hȽlQ",~$<&hP`[hVnVܬl vYlGUN[k+-ݾTA(?xd;m]7/}SyƙY'omt[T~kdW摽|jnwr[4ŲG{׷ܾS0tx(7cWJBrz &k/YdϸAv(9,v1/ȡݚK交#`H` *9 jG:+fث4#%.8^h7}I`z*[]s&R/Ñâ#z7Tɷۮmv^7NC9|AǂZLYG%8ɽ[YLW\}6U=o蚴C@ҊTNZWGΖU{7tpyk0RZ3CwTu*r=j-QY+8J]YTtHB=_Z=2hjf&e ? {؋}$3H ',@s:MJ r\aal@׃/.Vkͅ@!i%Ƚ ~ oa6+8$ӝnZ$Cp*:i Eo c.;}/ kkFHw%DU*9dC9[$"x_p ͥ!5n{SCĀF O2䊦>TT8[7D@&-AhXl -D z.DeLl _/a U̝- R3:0PB8-4ϢǑ& 0IL~kb4L *nZņ#8,$Tt W+ms(:^xWJ*5o8#9ɸlwܥXnr{kk2ϼm>8aHm8 w{egHi;N `c<OeQw ב%d[ʍ܎-J;r8,2O$#:di8G]l"|"JT~br}jgHm{}xot1-SC~TM\pcChY!"?NaZ"Yl(_"&nH 뼷7_]D8a!`EG]%77MG0uUlbFqx$@9T~\%8ewe{IF]^9f}nibx_]>v<Є[1tKYwᩍje8tn64zom4|{?c endstream endobj 2269 0 obj << /Length 1892 /Filter /FlateDecode >> stream xڭXK6ϯiCU8xl֣lq!!>o PxfS>j_n+zw6We\f,[mw+JH̓lSg\m/7)򴈋AN&ֲJ_-eݶ q6,IvlPdTI H\7)!Q׻o姣}c ĨW#+:% LZU/U kp]b$UF ~he`S,((%t4i," sⲠ{scR = vEi>^ ]{-7?R5g_הD7 P'[OZ z#ui) g8,Eԧ83n5tcF5rM[^`mf)f;_+ǯ"{7=Gacc֧Lu#A_ֳ,NY[~p"lqN9d|[k:a϶pNOVCoD f֣>SW 0EXi ϰH2< T|.yq!LibSJaPyB_D m goqzeStUZzLuYF սXQlt@gu~9F3J+}r}eT؟Ts=e=JXG|[E{ g op2`D𙚟jZ|$īڦϣPIuՠwP(v-S)csCxLCFBDjC] L2uXAWvޅ 2jLf0.KfEz8o#ce^d D5=U34N J 9C،u#`ZxN^ƾ3q,,L,|.lzβY9fx0D?Arb A܍y0Z V7QD6>.i=N5pg^{>MykLn z\”&Wv}Ek\2Hw׍'m'OzXtBf q&A k'gZo(E%*EO6 ٳQNT1Mźf6fhX|pб|ܠX#`[/ѻdشu '<k!Ac4AI/sFzAΒ yi~F"hYC㪆<A2s^._ &YP\r A':d_Ixr M괔9GYbLVkw<ސnjN+W2_r`ve_zK'fh;%饷]@?htcqI:F^2^00? c͸؂"Tg-Cb#zy>86~[w&쟖J3e1 l ;[cM=4( "I¯H6)ZqB;Oa8:<]EĚA_lwbg&K8R^u_+'4֮.hb{ S9S LY=K`{^ZRJt=-wƁU'9._{ _%S2 0r!7AYelX7<(M8Td1`^qqq endstream endobj 2278 0 obj << /Length 1616 /Filter /FlateDecode >> stream xڭYo6=p]lU C$:(-Q6.I~>^(IV!'omG7w`9_s]'9 ѧW)e^7wCD gEQb{R ݸZ`/:3>p(*9f盻 xJW+tܹŋ׳JYA`R)ylX`a+ùf*Q.c:G H{DȪ~ts[fv;MץmYL=E; (j 7 ̬q [ iڥN)SM,}ʑ [,Dom:GqG]ϼNm',fNcfB+JEJmA0@E21n1;|NՕ3H08x{ D,%1鷒%i}btkZT\:ݼ'lg}Q_&=z%016X륓ւ͢U&m12Io-#̌MGkn n=m\%*&n#Ʊ+ztXS0\f. 9g&5 @T*,DR;U9t '\!+dI<VnpcNU>OZ 8SČTX=1&xռvdV14q{ӗ,>m+ڌnf{&`iZ%Ȭ;d #ےd$A%RWjháy6b,Q*F'8 _X~G9\-r =th޼qgu#_n7q' endstream endobj 2283 0 obj << /Length 1446 /Filter /FlateDecode >> stream xڭn6=_a8yH0I%[a6[,hR"R۷CYr,V&y7%mfoϮ?,nfh%쾚8E6A(쾜=\t.oUYHΓ,!tvɲ! ElFzKq[pǗ?]7 6d\p2e^Y^ bZ6f!L,^ˣOAZb06 #;¤PQpe$=ѬVBE$(Z['*\,!昽H*QrI7sX:\cIJIUBI`$Jg{O#UW3i^_*RV[$pݧ.HŜ*bKעE-Q U -3kX-DF-:j; F 47\d_MqatDpJcM%h96Jgxy"1ʳ2W<<S܈TnG#pLKX1Hᴢ\1iyo)uZ7LNxķZ56ׁF^$H+;€*3``ZRLvdb=rIƬBD'mBKCrٕdJq] (,(L%X]= PI'9h:SK/z&jSd*t)*8[9WǖN Sf 1h C'Gab>vsŖyXjYn5S:=ή_PyJL;QZ л\(;k(B٠wvM =C"K- y9sSz=eIԛ|`;`P !&R*X#vAӍ3\8Z+{5MnBu-KKOMj 5<$wM3v8>@],eWL h)^3_m/D*=C+ .›W 0:2_8OY~v8ìV51CٕTϙ-ᅟyǹVG'KPO׏ÿ:)t|>ؐieN|wwyM#԰,\܂FJ*ɫ!9$?0hv;> stream xXo6BH ,FG|+]m֬f-6ITI*)b_G{xw|7T)w? K.Mi1\f6$mIM:6[8"ނU TZ&F'Kh{M b04.H'솔S|mtE?5WU7ʶ=]#g5 sü"l'Ӿ!0xnK*Y9PfE+|?GiM`X"`Vur}^d~Cxk8zu>3mUcFo ]63pV_ ~t8hUI$>*bMףTЋP]=̰P,#b%Cchpq0cN^^gmP1iB-D}] J̓b6JP_‰=݆ctuoE{M'SflEɚ\vVe6@T֋<%^\]rv[z\ z0\7To 35h[FBmXq b|keBx~rWPAQjtl^!*ߚYS 3UC55I O6_p ʦZnvJQ4Eṫxo"?@Q:E55,BȠ0Ϝ[ZHղy>ﺦTw}3ȇ-Fq>u{F.NGg~W~To{sV5RVP,iҗy5n { *ýYB Æ8Cq" Ъ"<銡~[f~!³*8( C1F@pi 'Db*Sy𫀫HmđWul)H@/:TBv]bqZL`ް4lZ<9bpT=h [!!=I/ܱ&m;q#*Ҭb$Efִ EoT2wܮ!&uV>ߪ3_=nhd^J`⃼?o3duI2qS̿~(2.˺k _)޻p3N KLtCv]gCuq\Ï,)cxy\41*E,ī Z endstream endobj 2297 0 obj << /Length 1766 /Filter /FlateDecode >> stream xr68hofC$3m3c{rIrI„"8|w RLNCۃ`w],Ept{rƏn]/؞."K|gśz>!0Do ,^n%IO#H؊hr#zՇ Rl%-Y`効sDN@bŶ IRTiMe#2vI@o - |}*ENvka"hMA rFM/4K ʵ+^bȆpKXZ"vW(9ղB BL,"m* ZFh0X t)u5t~@=p (LQވ^bi7PE2'HyC0Fj,!: :dT]ּ5 v_.ʒҷET:]AnWjVۀ"JՒ֪1ˤeFPVU#~*%0X* NA*Nsy^Wߊ#w C82*@aYз/W,A,;*.5!Vdm1| 'ʹNd?{vEAy " @AMmo5T︶Ԙu-!()D CΘe= \" ,|%sdF.ob&㾫PhR̬L7S)&P~9"\W c/E!ž¿ivv$7r[5<[PE5W2pޱg׍%TL^G^Sbx*ؤ*o]1+ZW "dz~ t 3 b "׺:R/DZl 'y`x6Z&ς^dQ?WK | YXPp@p}E紼zu32?xxgC93LFOc;rݐkӶU"SV:slOI9,!GQdkdtsl7sI?Co6Qoc8Hu+̆67jq5W#J0SVj^z9SgyF)`$H8\1ǵ#Eƞn(ݘݧg݁~f\BfȪx$10 !"RzSUi`|STxābֻle l?3+x696ZՠϢ' # >ˇ18kѢ-qISPUW9*މLo^_87XY_d2msJjgx^'cSjC$0~2ʆц1`+Gϱ}eb\^{y/pZ~<̓bcr5=?  ɃڮGI㌭D1g^?ym{i#fЍ.O_z+`8z}{A1 endstream endobj 2305 0 obj << /Length 1402 /Filter /FlateDecode >> stream xXo6_!&1-,fڡ[>(mqDMx--Үú"w;xk/~XN2$+a%f޲egEn I@e,FI`ÄCC4 YKJPShI`ZI 3,r t=+y~(Χ(|\-CJJ.A (é3Khflbv%v.77^lZ^[C nr; /}6LU\}|wdjQ䊭E],*f=hqvhڗPʿjOeVrFt:W\⤿c_ڢ awW3B#h&^ pY &!y]O~:ub/) Bpd$,C"_s Žz#U?őڵӋS]MXHR`1B.#Cɢq{Gy (vIM !Ecg$c5m'T/.b.jwU d}ޮ[MINI*Vv#MC"kJ΁=+R4y%, pL#Bb~1Ctز]j4d\V4l hU!ڕkO!WzӴI  ݱvpMz^(aRtVXn +"IP0hS"JSqML> stream xڽWQo6~ϯ H1K([q@Æ4%ҶJ$:^R$eɕlEY^YMVF!= 5}7 "kf5 :[sBj6aݧ;=|tiUX Nqw1 @3*":w#.//tO'ʤl Z*3ǨڍUKV5KZOP>Q:XxפWb'ҕggj$s=#0 :XMqQH"e506sw C{j+w{8_, M今"q[:o~9tO@I (QRT&Ӊ?ʘձe}&s&BO ͙NB `",~URrS$ ub(J}gCcPt;3*PQz{aDδBryC{FUpC_ɁR{- OSg54{6|r_ŕjtGKL ޘy^g$% cj}/k-O&`zʕtAgwz{65..9)Ic5ڒsҼcA]xjZ^#FQ笼Tk zG݉(8 Y`MMO5 ȑzsUg- V5n4'=W_=c,y endstream endobj 2320 0 obj << /Length 1031 /Filter /FlateDecode >> stream xWKs6WpHOrNfld! 1I($J鴿 (czL' v!d--d9{5=+q؏r0AbMSEJJHMK/W7ᰣ$;L0(ya ΐuuPJ;pԈ;,R:??t".jj+& EJʔ}ZIiF(iҚd\@vF&s0TI=7F D;Я/Nd7ضN8;̰}'tkj{..٤xﻡ[$p7d#]H͹+} y ::8yb# {i3lX%A=G~Tg7әeRkJ\lrGb餻5gh@JPzirFCc567XWKǜg^d;Ds)-DO` _LVxŝhLb(vt &lCknpO'\y_3.Jz@kZė;mw&!{JʊG;`6Db!NϢfף߽C!|niC{^9+j^'L rOK̈́yHvu)Hls~XU5g DW~rԸjP"> O2UeC|WtQ7KJ+%Q g> stream xڭXKs6WhDDI8Mqsq}HH$dRۻx MZX~-hox_._\/ܝ~8_8Lr`>OWh<~xrXAtP{HI+'HlY `w8G?@)y2hr5ӄdB!(ȝRvq~g㿼wJj|0/H=V2~R59-O58?F![D[GϬ8bE;ڎL[sNJ5eT#zYg\[̷cz{`am?fibrT ;^6Eq%'g8[X6ڕ$4hK YlJ| =GO%&LRߒj늟"'3*v&k&Ds_O{"K:}a:DŽNa,q@9-;yiG":Zbr| PlѴ.˪*2. )oΊrN2l05:T D&)V'R$zCL? Z6D8:̂"%䪀JCCJw+l܌ѸbPY8Vfs; ޒkZ*(YuE "1"fI&{.&xzD `R)T2OHQ'i9lj8139V,d= b"[DLeM#lNBE)9d0SJꝬ$杈3|hr\u a Ak$oozLLX(*Hyw=X+\؇řk3ֺ -prPJT6IZ"H}GE.k0)yꉏ+FʃJ iJR6cR7i'e,v{s|l*l$.=󍏁fU_}eo67[ܣF>yƝ@?S\&ya&-{H&H$cHYR€Y;(U1? endstream endobj 2328 0 obj << /Length 1303 /Filter /FlateDecode >> stream xWKs6WpTjD ;塩Ӌ$haB)T]Rb p_oޭx/'ϯ&g/be(KĻ<DbyWw=+DT8[wWҁf8LZ9{A$R>I'FnAzÄtL7@':!7%kO]qf'C%}/PɅM:p CqF"W>A0kzO_--3kJ+*w7;-q VKڕ+9tLrC";|L 6&F/BhEh?KԼXYQ YZiJmX^CCx) ׏/d,S"_kP-<j 5']8(@x`EBI6O<1 =j+3ʕ3l P̐,d3#F(Z`cUX{sau[_! bC;! \N˲ݱ|N]Eq|{ MDXq8Ksϙ1=j㧡z>$(C!:A11cft0!JW{3|xA`w0TЕcG@?P,&I2*{@k8EI w F9pG4yYCFm MHv \LPKZsGʕ!v՝k >r耎4*Vp!GkFe rg\Ⱦv\;*y+q"; Q0h Ԏ ܷ[ZxoՎ'.8G"_OQ9=Hsm׼gϪR 3=tt׆qˆqRr:XU}+e?I0p̧]F0Tj]\irq50O* ^\ LjdK\{H]jru8`IpΦa#eKKWhalڐ3I-U< fNN[MǛ[CCe !ڂvsuRKKSӜNᩙDŽO D|∠0vϙ(:~77/ m S+tVuP{ 7&>VxUpx+㍉2@mUѱrSMNRShDNgo2 endstream endobj 2230 0 obj << /Type /ObjStm /N 100 /First 967 /Length 1273 /Filter /FlateDecode >> stream xXMo7WP 9o4AA*p }C9T[J_zZ=TkmI,k`\!m<$ ÓZmzsF;ōV@0";=!pai,Вy$k’22, wCZ)ZGE|" #%pYQ$`!D=e4HT#bXb@8U#f `mR4Dp`ͭDT4Fn[Q**COO@CMF)\jVeQE$B$GcP%86@AMehDyD@T2!V%,+P+a$>V(NܫdЧ]>Yl]]B,uַ]<lcl}xc{\B,s4=Os^yеe:Vm]-f1N94kuA;rv]2M{̡VBde񭱄Dm7 )I +:v1:K;Ϣk02 *6ev1jH4TCOTɓA|>Z14Y4Tt-4VϡY{2?1y<ۡr>YgGlcyI6d[ި69)t.BoFUP͆ixxZDג$bҎԓhoI4Xgη#e}@l\y gH#nnj/KhLZf.iOV/yqygD5wgAi:LYx]ty:ZDwItu΢+~MB?ۧ'R)١JINW)zQeP_;~l}S<^ַi~#~&'nbW /}0ĭ;!@ endstream endobj 2336 0 obj << /Length 1755 /Filter /FlateDecode >> stream x]8}EVں'p|= XN:m-Ҥo&ci CwBK| &I0:J&KE9:ɜ,N?ƹt1Bҁ뙐ӪީfTNll.dbAZ2FҪ(*zgʍ,]iFBF!,XrۨB/Bilj5v\vȘ%i)_In'K&Rsy&"`R~Qi|CbC*V5}mC 6z*U%p]*:fkZ5[)3Yc*tXN΂M]Pk^5474].~PW8\\¨vԍ':_a.e4!GV7\B07 zE쮪6́:mP; kqEo MU{6:n*{յYQ4]Q: FoiUjBiP&Z3-kJPk?"d^Y89-D p6>hwBD6b]QޟS}6nᇫB5 U/Çs֡RT[dbvLG:Cŀf\N739bU]hTєΉDqt;8[f5;O@n_fRNtmVpr[9J2.:nԤqD,⡟c(x4t,YrO7OLvia%Aٿ3h*ga2ue|轲Y[5R<_b.'\r&Yd۲{E)/P a(h w%mZ,ʰ2A΃XȆ"'M/ 8DÇgsEy ~@ȔT8bN)ɥW&> stream xڝWKs6Wpr"&LNԩ;搙:Nzpr)Bˇ }oE=Xogg_v:+CYž0Qdƺ7[~*L&ۃ,FI2شw7VD£8QVQn{?ZAn'are}^1{A 4E*ՏԧqǔI*"ud*YbBgv$$Gc:e(bvZAxzN4$:,i—\G2?],8%`0 VΩJee}0gלkB^~u2_E?nkܰ{ޣwY'!$];݄*ErDc&O'N6irc^Z t G^l_kx#wۿl/!6?Z~n]ﻟ2D_fO> stream xXK6WT">ͦ@h9%JK-@:×,fhNG)_g?gYe$z`Ga8)( κp޻8o.^4R18RFʥ7/׳O3 s( S'g?NoZf΍2Oʹ1T0Qa'3- LP-3l}E]xA컗-ͅ_)"i -ދ Q{SXZ-{(A "tJpbl eLPOs9PD^\9Qꍙ9-h&EmϵJa aĖiv "Қ|c_.A-m O lv:)]5lh)XŞ$f mS`lx!ՁS>ȓ J`q's?Q궉Dž-i1DXxFX!BaR`Aw;ĠR'#eI=XÛG`h6,p4+p=S? Xlo8fP6y(V",ndm/^]ڈġ۫IׄԔTrb~]e2OEu)V?ٕ*ӱ(wfRE~:@eSi/i#5PII輁D\P" 7.LV72kqb"b0DCj۾*`yr\^-+*͓?V- Br^n*cժ@4 FtAVJ jl $]/htؾ5!Hj@R,;$m-AUR7vVi'U♜{89Izc؀6aO{6peI@hN#w"i(J`i~i @$%U%7<,Fs#"7?#eLW&+݈,=A&@vg(HjUɸ0_yLwӼ}O|lzcݚ_{$ເs/IC#ΗUVZs'ʚ!#"v( AҧA3۲<{?.0л$9&a|[gMݞ ˑmŤOmeGG # J mկîB#'ă/+A G TzRψ4S1BȄcgsqoo!` endstream endobj 2364 0 obj << /Length 2187 /Filter /FlateDecode >> stream xY[o~?bYZ$%JzC)H!)U K[]83CR7˵i$ ùq#lN`_}MR%Ad61Ltso~ܚ6en?[i )2}x(keд(6/fKŊ+=x5YW7mWg{1Yq|rijqNWnrjbѹn=Y$]BQr.Ada,GLh%BLqeyJ]G^yt cD۩էQyd*և4vK$sDWADeD} 1:M?| i ]ehޚ곽mn9ם~K.]=vliL{(} *Hjanv<ٞ{Sun>8{.J>\k2hEwo ԔNӱyqUJԍ Td@w6Ef7c6"Aʊ<:5sj7XnzPt6"pQeeRT ǭXcr*C/Nk-Eb٦&Un fZ bQ:/D_ Pxg 7R^ɂD}fe]oĞ )X$C5A|TT)K">J8D8EF06$lXL?!r(oO`rjnjZ䪶L5X9^̑ M]]&?H>j%x8D1h}M-C$KlCSlMXlC~BzX%y.}7 bFc%! /҆F~ V#59xg|.phZ`_qnX}?)}{AjK6q0j7qXo"8oi! dK,0iC§;Y8XU^y0 V$)CyXUDRTX=K+o M8H/hK[0ܷd^=G"J'aOߑdQ";9=-V; tI0zRt cKOpڧ L47)m`(ZK506ZH-MX nމ`Dǰ<-+K^;|{ Gԙ7xɂ{|C 泃Z~2[piA,T , Xa8O(\Kh # =T`5b&ňQ< i'7458%SU|1]Sjaectd uhx>RjHSjLTғ;ZPhj9CT{ǵ`.|'ڹ({u:tip~@Cځc_lhxVP(9Zj_?09EL08ky( NʷuX-DE7[BE\L●/[uTxhnHG1׮9._}/!2 endstream endobj 2368 0 obj << /Length 2733 /Filter /FlateDecode >> stream xڥ˒_#jDGYSe*o I(SL;}u h4~.Ӈ^>|1Mܔ<iY^4ˬ9_H>58O DHRw!އĐn8 Q.qGc>:.kO;.LrzC< F0[{ H, Ir5ejcD#@@'VӬU<#,cZPdY nV'f"UR{YCip@V$љNs oB `p2h3+^rݨQCE\͗3gC;cjBp:N"nq^Mc:PuSQ.m@I`EZ$pXYq*"F pۛ Y%ZL8-@e4Ҟ.s=X6 @Dm\MNF!❠#dǀ;.V;4hNw IwZ IaU( 9pr)M}?G5qX%y%M iabEmcyceK 7146P∙1Dvd;1٫𠳥)3"8TlC)G"m ]c}ْ4F_D;裂l<=I{2c,3I!4d+sMXA\Ɨ滐FB̜W4yBEZWZPI+\KJe؟JOVX4N"ԌhJ]'3aūWw0"c٧ Xl% xzF0N$wjU>T~YZ/Rxt! `9:ƪpq{+!ބ (|_EP&[m/pxFγlv2!4-TrVhZ|YnghlAu#tz殟}.}S{p^Ͳ3UȾkZ'FO5Mv[g{F) .P+؁}[87#N7Oq"NJ6rwN?[ɯXwfuoլwaQjI8/ȯۄTbT%gitŽq-8);nP 4oFp샿Y.rdzB:bM@YPku28̛rBF}%R`ɔG[a#^Q O+ӝ묟0~.QƧ\_:dD_L=n ;k W"۪X^􏊞~:S${ϡw*Cz:Ip58GWx/v ۡdO5Ŷs1eE3C4ne~Ec%Y-^U)[C33}o6Yd ;ܳ. ^7-K-V+rV9T@!Tdzg%cgiizڨ/x!0*+웚HQ>$<㝈m& H( p?E\VwvjqQ "kUȗrg{U,}-=q}M8? Ƶp> _asz0DDYvN>,i7Qlq]94{7S۰ /~)1 endstream endobj 2385 0 obj << /Length 3865 /Filter /FlateDecode >> stream xڵێܶ=_C<2)R~pi&EQ;l4D|}9i'v0rsPJ\峯n>{:UWe\fIvusw%ήr)LW7տ#3U[_o_|ZY%d*qg_mTcUW$Mi,Ud/nߚgכDhFG鿻쯦uk; ٱU;04.hǻfS 5ꁦzsgvvkhnM5L7"T)!'e۪6yw- bJpİ7h4)`ڇ,x 4sp64möw"&![."8zmGݵQeC-ȍJJO&ln;5HNn62U$-ù#80qڝmH̤ȼ<%@yJ ;69ܺdžXŧȃ8 TBCq:\U<~^! * 940ۊfpnm-5:UOݍ 9#$yU4? tVN/ifں$6WhT]4Q"jt MOwWsm7xkV%<Y43ڊ+rm L@HDHt ]w.]]:߲Ьj [ gc%-|)(o z,$LIG PptRP4q[`D{7z^  Z63ݵFq]b{;H=pjc{̐? 56 Š&y}V~j[Rx7D;e*X\9=XwB$&e-x-k@Pz>e/Xip$M+V9E>[UnhgjS= v!^ xo`=w8#y-G4,yC@ gK 2,@="EITZmpE[-TSfW)a'fG!Gw; .-RBIۮnƞ͸tPuQ&@@dV '{h-xMW5 \Ĺ(S\Qq19u*X0# Х6r -L B<%d.G )g`تKĺĥ D_H;|=1O3cT/ (Λ/MC[ckhpXX_bT}G@{{ Op򤬊skn Z8]'p|(e)gEaQlM5VؽQ{+%t@bi8w{OPݡ)N"v#e]z_.i/= Qb K{qUQA@[ 2SQ껗j\G蜭UzyM 8Ll¾EyIsYN͇,VZ7pW*Mܑ$ڷv82)$BwrGXJ{7.T*"+ċ7c `}e͚h{fThi۩53]֗8oE"7gYbY}!tKxCE=L}𽇽>` }_& ee}ldzΎoETt$S܁ԮmA(vU;V.'>9qEp7}"OGFv҇-8w`)h[`M;($J1z(.*K0JJU,z|<}ԳW[JY0m!O@{C;3%#\ďO LyC1'RhL|N endstream endobj 2391 0 obj << /Length 2991 /Filter /FlateDecode >> stream xZK۸W|RUS{o=U9dS3ìD*$5cO7FR> 4@?nW|ݼx>V9SnVst Rn"o}FJg, 0D-ҽF)x62IEn-δޚWfqWX솟LfǨh˟>w^'QpTx]odʣfyz]$*vMIhD},Lpjv55]_JvE]mYu*F<rOrNk~EwX؟-( ApdE zIt  q0mՔ4 _';EvdG${*Zǹw  h#7з6-*BaSx22y eca+!rŀ"(yT7!zﱉ= \ODYTM$at+6 Onzx58!!ql> U; P "9: 24 ̡>;!Yq@ !IJ9KmB61b YCCg޸)t{ѩ")yO"W|.r-#K]Wڪ!Ow߉1ۼiJU 7Wb*+'S67ޫ ƞ0.+EW Orf3Kef؀,7IPF>x)L(&ڃW. U8NOTy%Ǧ4MȽT}ВCV̖B*,€,)gW)8 ZG ډjtpWxT4GlL5. ㉊R%%Af]$)05kD ӹ=`_^,~f f-c|mI2t~\O;-v@֯CFr{)ÈLp}{4FT;2LMxޣ!S(æ |z5֝:㦂a‡7L p:SW)"SΝ C^#ns(bw41)$4=_'(&n|Zm3ݗߗi(g]Q.݈SNzm3|y (n?.j6؏,%[ߊ-{NMSevP+.|[iBzRm50@gomhN KkA2av[8[G? 6x@p]8usAf҉aß8h`/l0/+4|Ԇ i ujÚWIbQ5EhrR|w':D4"PڐPfTP@h%PrNWo)$kt 8pyXw o\Tni\LEXUT(ET8A(G+::z(P?\s>s#͝O㠥0[jK֩c|lny-gzN,f)Ly7QR8g2QWm2EX(|.pNX3-_va+" "dODu/"taO |YAJoݩ\c͚^w ҫzuLk9QV0pš9-QCRGdw4z"'t=J0rl;+s79g ݯS0q?Ǽ 1TLKVCyȃv`"KFf-_FZz(R l/ϝ LZ(n(SԳ"ȠF_庿<\X?ͣw`ȓ9# 3Q# >hWfqI^ү߼a s endstream endobj 2398 0 obj << /Length 3328 /Filter /FlateDecode >> stream xڭZY~ׯ`%*-<8|()9k!H̒H@ơ=8 ʪTř{>bs؈>Hol`co7R@Hn?٧LfkZϓIoqhlE.mEY~퇼ɫ( ]s3GؕvX/+IyCU}W7ԸvMWyyJZty_Ðtf2ޕy ;-)(vfWh45{]ݞפeFgܡ5:ǔMurm~;ZW+A@1WB).qYUr[ 6|VQBƻ _]y( |` QYp@)/ jdUf0.o&x#9a(e[π}״)/4ۊ;W5޻M=#iӸ67|3Kb\x~Vy lLkߓ gR2,vpVQV3BmRNsB@3):Y(o$XSa{/bDЇyH!6̓+H idp2 d،db%MW΄89L|f$b-aR ^?ٍ"N,]o( ĸFx 3}]2C 9# >$G٣~`0ɵ??@bi^y K$kTi-@o]0-o= d7K>"эN;WSP p ʭK O%)\_@5 2(5':3ңf`A !HLj96- .m#I.8(wcp+F*ۺ*&)yHXjآ:h5?;&?HX]MWŮOaJ]qGs`ViI`4oMHyaNQ^O4wrNjW&NΈB@c2qa䲨L5,\F½jj5zмj=( 71 L##ÛY0]|lӁ9צi#"ypG"7 !7!$ M Ѐ|ﵒ;Wj>t:<\hT7_co8"5[%1vը- h$ppʕc6T+VM aQu^/@GYRX੬#bVd-3ÑHNeװDMD >'\5[VdəD|2.$%!҇VQ2b'EӸѵR1hsu fqL M\E+APS$m|UvT}%Ige-*d Ȣ{Ɋy081=S _|/ʤE0h?+1-(%\z)k!6W3~k@7dq_,4lP fmJEU鸗Խ,,11g{H3ĩ@(~/kk M m/+Y!IObeNmǩ 'm~j|4 XA-rGUc^J<>-!RA{>BX ٚ78ٚH* ޱ:&ojCMbKCꡮބ=7M4 @^D`HBfX nφ:!?g̶ZuyCev4D}iF$eR I^Ej;R44: }ӺP,/jڊ[!mQCBk_֥"I x]\ $M$"0fEkNP_>~$n2Li'`ПlYLfk{yro&ޛbl]}OCF g=$_e?zDI'L4LԐ0?ȵ于Kc0{'|i',4f? ooɔ +QMПwr Pffq9<.Q )RD3Gң4 R!N'O˅@uUv6cy`cGƱNrl YoFQC$\do~s{{`%?“GP`$O\9Fw@&^lB;}x ,yҟW$`s q?xM]/pHPȆZuG^GwK KEvX$4IҏU咕wvK≆') ğt33E> stream xZY۸~Pah๩}zةx'CP4b-E*$ק)GyKM*Xݮ՟t0Ye~xu_ U!Xe?oY*M0`#" 0ި( \mdם|V9˦VZxҘࢤf=׻[ݙY^֛PJOdY.#DX e8yߟ}s'LDʏN>h3PJ _*]m@Y.?]oJkyy[uK{yםÄCslB1E@uEۻ Eޝ'pe:/{6L)ϋHz]ޕ{݂T٥P^k,A3RWx Vl0x(BhE]到,6|3B Am̠$[g\еoZl5lAP7dsaȐݩwvD!Q4r(2Pvћ/;TľL&g/;82ԑq'j,߼ -͌c{ךd ˼m+ۼஏz`)owG+u^smW 錦&]vJۙ['S h&{|ZWp/{ 62U$ [Rp:EfT(?n^"Ɋ1ޚ,n)意["{x3@ZI"E`sDkzh S&D!/e[g 6+xˇ?o^=\&+;xUBFdx`Y:L[|FNBkklo57` -%gr>xCdskBd$StFPga^oKBG=vz9*ʮoLj2XlX|E{EMWhllDc]FAViC(q@q0Zq-Nr2'IA/ e8{?^{"1fއfKwMmIYoK66q+$il4TҥL%H}QN\Y!R |I+x"6 1" % \ xV&$^4 g#¿ 0-@?`smR cgJԂr:(8x gn8,N3+~ظnDNQ`$7Q͔`__x@+>iW՜&v$>6`шlyR\ poיdר2 t]S0\/]X07uگD V}G%got5 z3>9SF=hWPF;DX% W# 2U:j-Hk0e .uIԻǔʢQb\G/8DAmJz/h0$TFY ܌b98l=ϢP $ܕUfΤvwgRJq;Qb(|AtnOMX kC!+`ewiF:g6&⑌vh2yq1pU jZ0 u3 /_xsB8 @cs]2Ǹ6x.#]^ݱ@\X?m 5q (H(uzw2|-| BsD&ѿ肠3Sso >8O}qYnhc@66 YD' +`n1Px`x1YVD]:dT 8o%- w0lpWvND^I"Ptn&i 0qL\K10Yvx? j׿gn_{npZ6}8ssg,CA0NR#cF3e)&S>C܂H RM5'Xt3‚Bz'B !tsb`|1 b?M,5&W R0:67vפ$xYj$aVƙ/ tw0\PV6{ LN/X7xm endstream endobj 2414 0 obj << /Length 3387 /Filter /FlateDecode >> stream xڵr}B1X1۩} .쭔8n?z{пI4Í<X)7ӛͿYo}OoxO#7NSETW7[? Կg?$n _zqb= kT*/kn7A4li7[zNў/5:Yʓ30fǎ)+ 48?{ Su+Tj`kBK^ HƣR`BX~|UxwqKEn({dmߘ?µ(v`ZV0Ʈ9;`|w e{SW.*'ϐ[)pX׭tTkg{ۏkڍkǗ/;,3=׋C`r0XNYɰ2kffs) ?'+ 橼>,0:0F|5EVH{" s'4H'G}wE֚]*e8x֖EW.߷uc3EtzN7+k/۵wٻGnQi;|b"L !PSxvX,lf0-LRڇpA\"qLx_r1 `&^ޭ5eMR7 џtEaTۥa8o0| TL͙f*PnXtG`/:!RP'3Gme۶E S4 {&er4X LC벬=\аO((QVQ'e۬ʧ-Fd+{9#),6]$zhA ,b4+zb-15nA,CTW "-FDR@;Umy}ȑMǺOu/K(ȴJ~b~GXSPX37 ))|e)቏řl1,"\+ͺ[{Ѿ3/q&ˏ.ۙDž?gMW=DDd{Vl dCM$h1+ώoֈ2NJ[^>wZz;e7Gڹ1ryK H_;^\0!sOΰ/'0tdZ%[4=w`ں Gh3@c,c. yRN'lm>OhB !sJgO>MY2衁LTpӖl*԰BjLe+R]{(~^"\O fjPz.+#acLiw%O\=M ෫fW,E:U!Knpa(*ʧM\*X]Kl0]/Yg[&| Im[NZpJ٬pMR7;˻@Czh_tVЉECM=OAJD!R3YQբm.߬k.%2/Ua.IO{ ~wI5Ǽc5 8uf09-MBjj}c޿]~tYꏹw_]mҦKt1!e!2ՋU!2NF} TRuΩ_LSK٥[Ø8G%)$U$%Dʹ\o4t.WHqO~l-B37r[Ő@+|atIOd4҈qU,Bc0\ױ= RŶ#N>!!|> stream xڵko6{~=-UHE?^^4'KV7á&M X g>~$LPW׷WPFJs*\]Wxv㗱JÈŀ6K 1~{/8d, & /u DMޔ+iiY>l:ǃPns|q`+o Ÿmcc]֣RWmǢCր=H&c">Ki=gi>mRwv4OÎٹZZ˲o<dTusHKɋk`A߁@.趸#q -[GXmH׶]q- Lgu2F=X[&a0- AwnPI~~ɱgDVتi<6mںzA3g$mx*A "ې]4siyqSZTf?/6 Rzص7Yw^F &W& &h;.mrZϭ3YDN<9G2ΰ:wFg81Vxypc∎ 9QabF}=a7[2L/6+l_™5D8.-mkcVٽcp}^`ZM=)>$A Qp%k =ٷ6Cm@%EEhCyPeWT"%dOBz8if*!wa)q)Qʄ)eᛀ+Ze-0G[dM0 æK*׫|LOsqSo(Cc./826S%[pSw!٣*gfx<#臾ꁆ8DXpǁo1A Fs8pL<>[oQXw0r\>eIC94.)}\ΎT:7} f:$傔 PMR gi8؆ѱ {'Gh=H ? d }F"¸/{Z xB.8&\qSF`.m=)E Ģ ʡ p#zwk6m 8hN> ۽^U籣# ?PZ6]xJL9/sβ {cpRaYcG]T {SNh/8݁>%N3;K8Pz `s&זd)ѡu5gۣr <}̒DR|QC@?QJpҐ_ : ]gb9$4K$zWXh*UuGمŚW=8̭N7rlfO4ƈk.y u- 0;Oj֜&SaLP#w*v҉ؠ,8v6]G;⠣}lW;& t g'bRo6u&)AzM:+! ,Sasvox~b!@b 8?L!gÓWːVD8˯|qCbMcnučꌅr{R^@YcZ@qbE93a2 Z:_װ]t8{f"]^u"_pWD}\>sF.bb"yB 2 dP eKdRwz*7,7Q[Nb9{ח@|wI+I(Cf$Rͤg)Y(f]$>{H5>ٝd#*{L'Y'/X(4:e 5m|8JPzyb,`N@c]y # |8M*QǣͨoƒOJ1jJ',B6E?{^W#;ӕKllZH%C 0B@߀I5$]oGJꗄLʒ85Q(ڭSW,6{a_M|+T<t3reVw0 ]e|2ϣw)3ṉ ytIA~S~`gs}=~VuxRXBe5L7OwͽhPz9'y9E2|q?@qޔ GT#) ?֌:Oάȷb:l./^[nKk%p&lN|Mԅp/.\$xmfpz3ys͓j(A!]]Bd+FoTqp|PO>mWmzX,|}T׷p`u#QCcH*|Ht8R=e=_o'n7!Vag%C]@TpM9\-]oG :_ʍ1wǃu7AaP {U/a㷷DDβiG<2n-'C 0uPPlzS=A^}~IgɞѠlW v-龠ԖdNF,n&2k`̇/4W &4$O':BB=hZAMɩGSB<|NG@Cwy; Y](Lt)5<d]ڞb;CY{zCVy!T"l䣠%_zhp88uDV=O5Q> stream xZKWlU"`\>V6V*Vk9D,9\"&ɫ_jeI)8watuw]ѷ7^+yJDQ(} j_lfcn:IGe4a/7O'=x{{JZ)4Mr|,Lt?>^@~ҮWXEA4YM&ga~r*0hAQn-7>i ' x2Z$FǓ۫%Be4$BJD/ 8^S_?kU:4^t<$_;Sd%*0[EC8 EB\|a^)Gє~A-+_3 ٛHE׵;~nPp~э\EN=0ZYL5MWۆ.0c,;h)  *)?D#kщ94tpkk7mڔp(hi>ip&Ԧ-#D\+4\_ RxdZFQP6~wPomͣ;곫X4l+n.N3:Qyt$FSڽ;71Z`" (3 ѻߊED>N{qA,{h6bwϝ{7 M\cI5p-Yi*)Q TݾGl؇7dӓa%SpIrLv8xo`y'M*PEYX0 ,!l3f6=Dzwݒˀ-GRϦ*4&zh;sZ5XnCδ+>| +h" ~\7qU%Pq'cHu[phCFFA?)3?1,1풋 =xpV{~C! Й9_*k錟_U I= )d!^MS>u~(Ns*nZ{WJH慭%TE@椲>̠/ p$jq +|.*$mH.T0ݞȂ%'ՇéTBZ"T9r4wqJ%B"X 5ɱ&N΍AF"xmBXDx,Z&0,%h*NG&ZN)ŀSTA͑z1 ^ULI4 f߮i"&e}>^֚cGR%dK0"NFu-9ָlMk$AGsO[j9` Q幬*wz+7aGn@fM1 R@U[=IGr؆W>]lW(wI克Y>@2LRLe|>gاhe~Z Na4.6 *J+V4[z6fVs!fojH]O;ړYN% g4Jt9) gl2,j M0McusK?=.?%Јl;O4h[ Vk/9Ê9*˒w`q<@[VdQ}x+.`#!SUcZ@Ɋ5>qb :rZZ҄[fz\wRaD5uy|JGcHZpieڊ>^(LXp,' [BPCq[<YK qshI5{eeç~`;זQ&\M,A~1Kq7k f !t :@._rWjkr@+rh:3[|OԟV-֒w3%-gtl*Bo(wýmQ .B¯4dSյuSuqqڭBQ'D)WsrfqH<~<RZ7t[/Hh;4:i> stream xe1o0w~G#lc0+JLTPA:ݓ#TѾLsR":L3s2Y%+URɕUꜥ|d>5`NR %Ƚ(\sE1N$紵M?<\'bh=dDF]oB]tOރ2tAr3bv9inayhA1HΤ <]Yrb1|)<)dCO3`;CR{О_YT0dA`}Ãv>l* & endstream endobj 2434 0 obj << /Length 2653 /Filter /FlateDecode >> stream xYY~ׯ``9 a%K>*k)VR(U! h =8wvS弐A_VUɋ'^pY,v%xa)W)CO^GɄ;b?2X !ӓ_]IiDBbn}%UIk]5=ALY]%W*Y]Y3 Vp*qi"*im*W;{w,pGRt(OBe5f)͹V;VeD f'A7]!&ݻ&.]I_SEB'^Dpm;뺨Wax?OMWD"8(y"bs^K"4'=Sջ]ۜO4liȜ73<(!]% 5 eD޷Ӡ`.,Lcw4nDCXyZ' v-StzP0!Pw>(@ۦe6V7NĞZλ#, ZOޏu?m-͝KP^w̶mg%\Nks"ذ_Q6w 5DcޮE:?Ho< <ۣHA7?Jȉ.D/n/] kpyX%qv#Oq!?gL!oOҔ D~8=F!2^ - O.W@合B}N{!g*~Ec!@x/Cq3C?PDs!:kѤy )'֚8igDwg(ﵵNTiEDċ0m(}mXd2gY-6Ftf%2[0xL>L{sY,ݚ^=rHUV8Cؿ_Lyj'10[ "c^;$Fym )A졆D x8X^!P`|ԭ0v"]|1I yDY"}]BD@'W7o\rG5c=VݞkZ+q}S2ZEлp=!9o#N_ռ9y QM_ヹF4!TdVǁ ʰ}í#aVQ6G[zj=l:y[V״lF:l(5[aED94;`uK{>b%^#Ùa}‰t;іHlٚ7ay" ZE #W j!ĆzۢJ8Y.$I Yǚb6y"ylTC_vDq*L}HY׶Ե(2ԥL,c*CXL|ėQn}~6c?ܱ6qM|`mZrMb^wmL\WR.V701̐E_6У{g&ī˃0ݛcwar/Y G+vB:^ T< .R "_ġB!E90{4-rG.,?AY6ݞm]C ǂdž|Ӯ49PY<[ : J )`6b6*)}SQQͶ{Y+Rƫy#:p [,ı- `R"g}W=K2Ny 2T`ED"H-z2-z8^=w:no endstream endobj 2441 0 obj << /Length 3467 /Filter /FlateDecode >> stream xZܶ3A?F]9$jyBp;JVHEኋ qo_zgET*BD:N/2)T׻ld*.yWfMqT᎜^,Zs{A p_ },) ӫ?Tu,8ut:oWϦ(lԦY<`#LmwW~&2&.~R/'牜K|#e9z[%Ƴ|E1D&+QԽ38KͪAf6cҫz_["'ɛp ",G1ΣL'4@dj!"h!>skp-ʚO{:M~\:Xn]@Im4 5E:C,:+4ߨUN)"{Js/X` ӳX7i${ttzgNN:69UiDْ82Y\g)%!lKk!XVQ&OI@R⍩>P,6l }/)*!@v|'RdD %q>`,OaPAES1C米o`bݩ-8Y[a#\ "ZLE a,tEQ.ue`qc7L@ J UsLx2eoqxl6~PX=O[߶Ҷ8pdF ʁX(aZ>=@{gZ:GsA?z:4To:Yy>`r{`1i8C``z% 1[ J:OXS <r[7+8͆TW ^IV38Q fSGWUSuvipEh*44b=8M{8/q:.t| }ZRՈ CQm9"!)'W4:xQ V|Aؐ|0utG`ܿe%q˸95kd`I,?]Р5"22S| :Kfmv0m=;q2Q lix=l9~,u:Y=nn;'WU&Xkؿ5kA:' bdL?@g8$țrVe W$hGXQ=_MvHs$H<ƥ]QBIn QZ"z$OwBwdc2V ®Z=CnV,3:}WE5k SOI- Ap~dS #0pӡ}watS)8b͗0Z q@_1,[3[ Q"eV$. U2QzS(6mf#$Lb"s`$%.n [zG/.\HCS 7VA"0??vL?{ ,x` ԗ5b Xϗt1d"'(ΐԮzMH/D';xp1k-P*dǽi V<;6HPOÒg)L2lw\vOEt;3WF[c[1s1wb^F Hf2Z$c@23,D` {(yutl%z`,+#U]€#v}=}aypfo +H,QõӞ8 MN2RV[&eo9Eu} =aep/^ 2C* daPWp@;02ނ^nBCz۞$1g04w'SNƚݴS3f5`kR1jtR M. $%O)Ͽ nM n.km>?_J)7p}YZreQ 2S` > Q"`D/πlkOOÓ@7 KQ)UMܸ1645=z@s8=yu7\*(%z07>Kq*}*x; *fRJJGuM ?D-6=SsvPpGKF{jؼ `C8:c"8FZ7]߀ê+SKLe6 `߅v_zs+1rw]xsu$ԸzRsε ؃ J' mo)3ys'Ben|Ie1?3/ӚX R 4%(p7lS WX%y D!.dz#74HFxd"#9Md!mWdS8LH5WEH@2gB~2n}]v")'c*;-IfzU3q-XA-Qg\HK-} |i)դK(i1C^rͮ+\W}QǩݬEeyThy.ũ'RxU3tzAqXϜ//VwS endstream endobj 2449 0 obj << /Length 3060 /Filter /FlateDecode >> stream xڭYoܸ_Kv"EC.(@{h[Wdڸ_!g' a)~ g~ps 7>˄o Llo6Q<4uV"ݿ2N'y&4`/;/,7{.grg),8NF.JҺj,Zuc0dEڪa $HҀG11LiQ:? ;uY)cReԝq2: .'K4d{ߩ*]Af1y }ǶT5оMc"MhyH)Ld(#;',"qpFAri& DD,2ϑi)dɨK^]'?=LBA*㹨URioQT%>ܩ'Pu :\:o?q)sӁ(0Wlz=0C̴t)>N nH62`ͫ3eaD3-) b1Ś08J*Z#t2A:>*qAvݎ[`;ooPEs?\}|{sj FHНma@O0b ?1BCTȌ9&ĶWk 0v]ATi}1x0R^})3Vhj%kN!_093a8Ʉ"@gAաOLi-2ZiέP#pz D!)___G9a}k$J)+|x\S=D 0t}ج]I|xvqc1#!>+dDr8|=` ~ʻɍ:';@9 /S^Ѕbp(Q8ۦ.M;2 x ٟ݇A5s „_x &VoT׮tMF_cI@OuoKpSc-K`{N& x#N# Tմ$h?2Mhpff8s.΀(~J><=\=l>OYgmS6jH:} 2He]U]c@O0nRhclBA§U*h*gA/◉*|蒸ަWZ7%R)Utp,h[杷1͚ b;nIKsG6 yؓ3FӰ'EcתAiTwp7ٯjT=fY?ɢЩaaqkZAo)ၾ;A*BPAfk&p*JQ-Bvb'D̕Bx(b5cVJeS:c +ݵl/-;8B}x{f`)ӎO`3oRԋ(nZ#Lͫ3H]n- 0l:A>s-$pu#wO|D)`NJ`䮐J=jS{0]pc` z&w}zqU)*i|`7%2[&Nz!%[^qÚO3{!nm];࿧#ʾ^9ȌRjd?g=(AWd+J]/m #6nj J-N*@/4^adtjZuOk6~Es'B=SWxq!9ģi"z"@?B VP0PYWhDPjp9m Հ"; h`(ٿnٲT{.yIۘi1y1,rf"ɘ/Ppz W=iBy] C4BÓM݀%}*H 9:Dfcy>5^?3qUMMh'L]C h}dui6ס_CM9`E&j[c( \/cAܠq&GxƄ٧6snB;>DhX$p>Ey (#u)R> stream xZMo#W{U$+0$ $3sڐd$[doKn r0\]M>V [!d 1Mz' P*$= '}I.M82$U9BU$2d=ŝ){)l{!xrsD$o` *&K0+DQb)1,aRɊ_|P] ^XH冘񀋎eb] (P)^Hs.p٨y͖4X%s\` ]]SUWJ|ǫ+O`K z~5̻vi.Lw4Gxz5?D;[.4uh95OzV^M?v I]$#&*٬h,xL[FetQc7jupksӅgJhY g Y|ot:Ӽ5ߝ籽mj5fjFj́,Xffw77UU$lSURv6l5FڴYOp?P[OxhyZv(9z14 Gb,d5WlƲQS0?E Yy3YNYݯM]?u?λ`ͯuJp}[:xjDT,>\j#vي)zk{䟍ce78NK|/98F;K/4M֓M|xVgSюOw|0PJܨh]x|2 Yw:Q~/۔^/b1N Sǝ򰈂nǷy\s -z2-v/v>] CHz4XupTS0(oʇHaQ"Ћ5]7oWĢc p:=;c>(= ?ohOrst<сfCttٰY\p;ɐu2|| Jʮw7?8ƻJCY@!HY pgzV  O@-m,L endstream endobj 2457 0 obj << /Length 2965 /Filter /FlateDecode >> stream xڽZmoF_OT}%8;uH}ŵD"u$7Hw RK33 O'?2DpΔO!Xu~{$b9OF DU0 l\ {w[xmF\|]fs%ttE_e\hT9 DJqmQ`PY3ͣ/3amuUnimVEΤn)tyyT<+}S4íDڭ9N a:>",XP :ffo+|jW1x}JT/&L/ZW3=m>VL]ZS3U' E3u'h)X]ݣ+ by x<姜K) lXj@y~61E2BJ / ݀Eb$#zРijj^֮i ްh(hdb!ri1pMbkXpec !LJ׫jmҏS 2MalW;DcA;0Q,xυ0nX#1}1ZȘb=2GR{%}q'pMܦ +fA d$2Q,MPuK*X5vILS-/7}#d䷐j>¢VoM}5Rzϔ*=œ"f>}̎’%X}(N_}&퉪QT 2dp)WAAni\)wTp![ (Kc(ШƊ}Rs尧R(R ")i]=~\b/rEKyEiwR_@fU0A/[țWUi>]pKbAKx _Ug8R8\?e0RC<$i:*pP>/e8r`C@)0x3/>4:0v~.e6vF c$c|=Igq>fL!Fѽy*JČBbMc(i)z=/npDp<+ZxYmuOkHQota>cz_ؕg[5^c)0*{< iX")!=VBudΡpl'Lq=Ϩ67Iq&IT+tB׸lJÄQ|[a{u_`pdi&|8tFx2YM^794|ufP.<^d 8olS*L:~ IgXl02xp .Ueǔ-aC?e~c n@ub:T仪̫2WlAgM^wMqp wu!05/iɻ,6eރtcʜj[[[ ݆"$F- Le (զٶ}=Vj}xR0G,O:?$._P endstream endobj 2463 0 obj << /Length 3132 /Filter /FlateDecode >> stream xڵZmo8_O9H;ФK]4+{AY[r쯿)KPTpHY3 ÎGWRydb:ze2c%6|5;]Qqf &2Na"2$I],m}D$S{XʸD ϢKRi!7u5ͧ+KC )T_ISwVٯ'fbVزQẶ-*ogvN_j=XY~+|ʤ)}[7+^f2lյmaW#*ti$Ѽ% )8lvTr-vsvze^G=INyʕq_7D` C+Vh\ mt)czU{JhXFE*_ ²) ijj̋ٲjumHi$U9 &iv_c`(ys=.Qs.MzJ`[纰nh Od'1;H#C n M$_6ez7&Bs+ɢ_ D/ gâ;W6EU}wF~y4Zm9wgl}]g$ƍO%L,xtw':>4*W~ޗ#"&@I;),!a/[Ԑwa"!4r]u0k߮`M lh/:Wl׀`\&K!C JBn^ y=yUpL\4)Y '&TZ}1Kd\𲷱)dpe0qPj~Z~v/rW~kON`wO[Kѵ}T)J(GP(Iغrń T<%~=ju_ h5̪&6h̍aWm]Kc iIT'κr'ɾ'II>Yd. xý{½<Vk$7%;FP|DDۓ,qO T@ҸP'Xy96-R$QQWeNcP&cQQ?/Ui(tT.OjbEgcZ0cB恂i oN *U>wфh{hMmzQd.kӋf)2db( 83H;(&Ƽegh/XVC;z_UoW+CBn3zU6_B1ayslLtm푉dI.T[ώU@ȹA~VI F,kwۊ>ң+hAeA-<N RC3)r񂁇^#xʀfʪT8Z++56 SAߛ Z>%V(}3p'J;@xV_UHF׈Y``d!do04mGήaOTLpL+w:p8N8+T;wEMCܮh̝l`T|(xvXx?ko(^#v)IP sXC m:F m9-r%Y8i )(*)_$=Z6GżhHUg[i6d)O Jc?AȚ%?B1S~$YvY TY)t٣" .;/k^ W]lhWXkĤyJ$K#] !EK74ɹvLP/Jxxt{ݬIkρoTw~-]AL@v]BvUOXcn]y.C7l#),6A0-\\̯S 6ܾdƏ.g d@!:R*t(}xƧw-m ~cU/T8XfqqY { E%aC@i`&䅆'h$&tIFc bjɢڣv9"gm_o&k|9%8ИyH݅D" ݖWղl\jjwQ39CU⁁)oh3+ ըGm7.*Oǁ̱胗Evv̲\UZ983M{l*ϗ3wχ\CZHpA]\"cLt,$qLt]b#3:quggo>|VR]z~92*tH0,ܥ;-#:@74n^!Zk{T@߂ƣ+}dw?af;0ȓF]kY"K/9``x_#P{4"!p@^|@Q7yT;DU•]phbXLj oyރo<8oab~Zjkb$)zZڸ٫ _(޺Ҡ5 >ba 58XQ .D\zD#!Lc'Pz>'LD;By>KQf"Kp OFﱻ'`u  endstream endobj 2475 0 obj << /Length 3350 /Filter /FlateDecode >> stream xڝZs8_B&6璹1XKO__q7_>S@e'm?"KKD8@}q`mf=1ڬRye֔m35b#5>)K2\zw$2: }΄ep}3V"V_G4 PLiHKB׫rQI >6HcuB!pmmw7r[Q#ogYh -%|,,]*T}UK?vYZ!x5:[ͤ I^3+j78j2+x;rhYu]ٶ iɿld $MmMC'fPK؇%Sd$#4Y xc.%I(t _;K;X5%9("bXAa6F=i[ޔyf*Hϗa$IG n]~YdU!lewƋQ9ʦ*4xK4̍) Feu(ufQ[0IeD\kl,Z>ηb-fa:+޷ԞL5T [l ]E`PȬh9U-uC(TW⫧_e;@YL{OJ`ꉯo{G#h}Dʃ?2DwLC#I% ۓkeQIr幘s.u2-p[Ìti[=ҹߪ?.&EVAJ(ÌGc|GtEY=;Mq2AF ) jeqhrMn`D6~%6o?>vEBpr!dD*Tʰp-]x1~>:d|Erȵ^ wL^3pPŻ.Be^/209VeEV<_Mdu:8KXЉ`bR59NϾv˕AJo z<3FG\dFja'S|U3]CX˖G&3czwڗc1e9BΔxU}0bG b{%*RpDta z 0חLj{t?9/gͣ޷ƄxEuz]JJ.$%j(6MCh~ ;^~==y}Gb,\&*U$GlsPPd˯[SXXMu Uwp[_\f䜭?g:j|UuE^빼ϛsU$x8}B@I ˼f> $ޢq5T+<@4;3ZCqAD7Ld%S66)E265,j)\vet͢x(&+GFvܡ>MUHy{"/=$8hFF!zFUrlv J?xcze|i{YĤyQMg B_Uu+42TlX$ suz\+o"ђ2ݫ:@?ecxK8S>v^ڵKĥ~@\ojwQW+HxÀуE`肼vR2O8|R:Õ!/sNM+y2h}1Lig03`e.6fLXK[ߌAq(&3SnrZlSG6,}|YIT0W,O uA=֩/}dexH5n.M[ǫڶZmczmݪ&v +~i\+X^Lbdo< J*3yTOJb+WUW^城$tȞs%؞(zȚ|jBw((ނė$W29gip0 &Y|j? BW6F4#TTSW2vSNU\¼A AOJc4P&!/olv吼ot kܧpsBeT|p":d( #u^X)=Sמݼfy endstream endobj 2479 0 obj << /Length 1543 /Filter /FlateDecode >> stream xn7]_p$\VNb)\!zX.Wr{ZA€93;ጰյKYwQdz|BGC.>خDUbW׎ACa/\4Fp{y>} Ritn2Eo{ Riy)y^!&(6u=pC^̰Aa`6ˢr>+z_Ehc[! oφUQ@KUeGS;~l}Sg(ۤL\ @4KhY/^]eZnӨbMJ. Ul+ʒM'<7;vQɣ<ւEsQO\?GEb[(jSbo `7-W , [K B`݀xj]P6l2,m7f vPykP#ucnҒEބ|pr\VF6>enJ ̋c~m%ۤQח7˫@hTvQeb6*cI Ğ|Aȧ~z9Μ T1 B[dmKMvɮ0Y%?Oӭʦ8$ၕ nuoՓL9{[n\9v, '3y/Xu@Կ <*GBs&5TPdcxۄD -ߘ5Mmr A]ώB&# X)^M9qXYjmVmM(E][&?{r6Wf _,L&M铼 ƭ" fJzCgy9l][}T7ţ[=HWN^/ IF:Łz?4`m҂ Woȥ|+imo|LЗ6E44@$ HE%Vƭms"o|بIom"S!Q;<*0&[:iGFCPBݬӬ{U?H6X.x>~ZA^Z*T9RRR^j[Y =_칁w|,QFM$*uoΖs_޽e\X%x% ny49Xq6 [ |X3ˁwdiKAĢ ̀3x~oݝ}{+=/a38 ^Pr29wBc-˩qFS%V(*D*Pm[ȘoŇƳ 9Mzr/)q?CWÚiƴyF"2 ݛL}1Nn4#„U#zHk= /jt>dD  ؊7!u;:sb5%yK׌U")@нr}Y ~DȜPh^Beo t{>&7,7 endstream endobj 2503 0 obj << /Length 2032 /Filter /FlateDecode >> stream xXKsFWC*s4/ujڔ8{Hb{z XV|K0yt|zg?{uy,efv Ιff`Fc$X$fV `6}\M }Žk?PƺS iaRロ/bΣ[Eh7ՒUUo!= ,4me U46Իl 9& 4өB4ix,}^|1,5{d@ `!K?oӓ;E=Fz*v~]q! x$7[]ExiDz? Ah[ pl?_ȘsDh=iSg/ ž @rvgI aRj_:e϶Ϥ \>je_RaD\m. e*&jE^>i^^~Ǧ-!_дuӕ`~[>rSU~nu[r_Lݱqܒ4L fv."N"1JٴeTl=.:J^mQ/*l+\TAex>%$pd9Ă[& fݔKѝ-qw~؅߈,|C=Neb$.7SU_N1%SX\v\ÞʦJXs a<'[FmQx(ǒYygznҀsViչ+ä, ;l;EW =9&R>ʟ Yg`+^5ʄdv2\ g|JH"'ΐ#jRM||iXQ"\f"ׁ9<,k DˬuT0tQw$s"p*={k:u婄J N!(nbz6]h Yy Bk| x=?~l}0P$`-]71GhR7@]\/W$ oX.yS/ROriǀv&R0bYפeʇ0CX8fTAz\&: )IùDR>1͡ܔKϼcՋ .ǙiBWy>и3d6QQ tFmGE>1*0E)SkTy)#XsNPr`1:!?hwrxl#Bk!ɩ}_z&=*A,n:4ř=gjeؑS#v *dȩW؋LT8aƆ%!U*j: Qe5&bȄLPuQJ9`kNO!PLiegЂX3bFx:< /pޮS V]p\PZӳl/M;,Yrn]d,p]X' (74-%=i\v01 Н'"xSWEJ9vFFoMV!!VICo TT28jai(ih1'ʗDTsSDJpH+eZ-Yw%?𘩶|(k^8Eic1UR O.uz7`AǶ VI!fcx"`щڴt:zlƁ0Epbj 72F@st^6q Jr}YX 1ꡬ~ >7)瀰tCar/~ ȡC.kădݰ:.C-<'@z[<) Kۈ:VT0A_')NbqeU+Iח`qe/lȕa%WF~w|P endstream endobj 2515 0 obj << /Length 1044 /Filter /FlateDecode >> stream xڽV[8~ϯ@IbvfժΤ}i@IIۗ=Ɔ"YuH;l l1]^;#ߥXcdٮA41-K< _^۬%c.b+n24p-nf7)z|jROnBtO!%Rq3L!SZ_;m|rN9e_agKq2 _aiﱔL샺'% QcD6ő>+z8HMZѦ"ɐ: }sB)/7"|?>RѐG[\n4>B$&r^¶e2zi)d830fPUѤNkX bm(!ڢpM22NmVFy61$f?$z`y?rѳ޼}=kNWWo_ =3(瞵NX)=int3"ո&͞a a3t 6"z{Ʒ55lXEm91G2;&eak꯵ʴƶ:}yOmgR -2QW/V"W)ud^+j.= *εU+>AD_4o`;CVmNV,TKRa˚^ a#4XiIPj$Z ǀx`pfC{{~`/ЏU7z" =[1Se6rXSe0w!-cۧ\wqڝ [V%F7-B>267z<Q?2p]]P#: r:u 6b]ҭ+j$ط+>:?tMݬęuFVaY~W endstream endobj 2528 0 obj << /Length 1636 /Filter /FlateDecode >> stream xڝXo6_!D}Ͱvk%>hG,z"${߾;eKEHw?9Ks~=yy}r:Leq; YqoJQN<}N.9! Yʃa kGk仭ԤRBl6uDk3kbY7,X;˖7 ˼ Z{ c2lC bK6IuM/f(>5KrdI9AQ"Ewຳyq8\MJ|g on#)Жvޛn^ooo a6'+{F 6bjP},^e0xGN'66o氫ni>&eer5̮r滟nprnK=w-:+35MJV#׾ } )'8Iᬳ^,:~#zgt!0ExoT7\mi̓OJcV{`'bW ?ucȒ,wqPJಒ$._G"a)Nh={ %!oʾGF.jRfRm&;h/&XpJv&|_6D֒ z>P:J=k쨀Q+vTnn N/+`QOpXVU}'Xo(#In*l-n+Ӻ/Pjq\g$*!:H L5r3 Dz^Af@{}[cMIn k?' ^Ȣ,tS"cgKd(^^+zqs1oRK턀N(Ko93{WX }R 6P: 䁨wo#8Lmp߿@W7;wx H YܘL;sK}7 endstream endobj 2552 0 obj << /Length 1692 /Filter /FlateDecode >> stream xX[o6~ϯa!ŋbvͰŶ$]>(2%Ocg~"us粗!@th^}9<=zS/Fwƈ2ᅄ Acr}e%FfQJ&/ߜ7q 3͠#h=mO~v/R'Y{ ݑN;զdۉdp쯤^serB˔Mx!ke%3oҍ.2ItV MF$DBEqP>&J/T[h혡 v#(MIMk97-ʹmsgwQڋ\u;(ʀls`DphvM;C[?G,~VCnEQ>YZ)EqB (ǦHL`r4K.%3ըPuD@5mtay`EZ%Y9OJSuhNs-d? cFvquHC9̀4)rdO֍T! ?o3=啒>0\8s ;&m4Ā40"5u`ሁR@a@Kb\$\"L(a6i,+wS#D)9DeHk̳i7M10z\ )9g3GOH4 qLuw8p`$tc~LaH[')SΠ"k!@@FtL?&7X?$ :8AK}ld5v07W? t]FBa ϙvz^l[Ԭ0BGo!ފUJr&/T>`"{+Mf蘅 `:YI7cb0ñfRr-3 ~ukASek'0[YZI1P)>6N4tv;9$ٳ87r'^>V2A Q+n#@Ǭ0]JX AX]47HP}}J w*̵!Iãa_ ŀ80r( ҋI 4n9`ͬ]aiQ3L+m:I'\Q3j>)RP[ ݸ}ZQ&\mRMh˱2$DI#kG)id=TF'_B7 a:MC9 rr4${ҙuPDΫחGz)^)祫7N"G޶0dFwq [y`U0QAg ^ pB(aҰ5@j{]u861 ,-O*kp؟8*hپxϺç{sR4X1RS=%(ywlwE _.߿v-e2:Yvyj̲3o;Smkg(-jƏۯBsy#WrgaמgיVko^WV+7(h.U]7G6P-TU9:7j.g7 sn2D7:+t.BݫVTcuې_m# endstream endobj 2453 0 obj << /Type /ObjStm /N 100 /First 980 /Length 2048 /Filter /FlateDecode >> stream xZn}WCz hm81I ?peZD"Z}N59nȻ|V(".($BP\b9f6!:tv'9lwK9ٝ 8ͣfP$qDmDb_BL6͚R8Rg'c'dG%Y0,aK%H?Ylhf,P1_.PKpC . ˄C h Pǡ!c8$[EɎڈIۈ8Q$`#X0BVHZcCOZ )9!&HԤsM U3\{䄃 "f,^,y}b @W4B9cA*xm32[ Ba 1d;9IV#9- #bR٥@m,Dfj̉׳K泐ĥDfjHݩTLmHDexD`(n0Vx~ ౌuR2kB,N ӛ_YSM2)d3~.gk؁f M"WbiRtE,2h(bI0kEZ۽*iM]\w?SBhhh@] 0-p`d6션l. XesX_Aݫ {W_ו{M\:FXtіo7ѯV, !7a Vs 6;>:jOVgyW˫?wݧOl5Xr8=roT'ڒ}Inml;s1}!@_Hk_le6,6؛QNF(P7B@ȴ "K9L`\.WA~BѠby4OUN}7 =gy1T\@8Zy<xH7h,G F{D+Cr٧>[6L,ȓh탵3yBp&>CfOh(HKiϥ/?ָ){AƬGr(ʅ YU;Sg##a/j'g;A/bpuPJ>sv3擵yKEu- 5L;TJCĆ͌{H}plNmCյյuCX I4)~{H[RcoMF3#w)HrYw|f΄VęI +lq$v|ev:> Zu6 "nKCmAсQs#[M=oLwf=98r8tpq Oh/A6Zzrv[ ijq/XxΝHJNÖ 7GvdwrDb-/Ԅ$֧k`g{g/W<<cotc`vH7OikI<>-* .@m2OShq60tnJG{/t Rjy= }ڗ>KOzYYNkn ؖaGlQ] z;Uq4Ƙ݂dal# UNy¾, ޾IHIʻͮFOŃ" }l%}AhcP>>NWc iKp4||ez4"ͩ{u[GңX}Ɇy/V#9vhJ~u`|Tv?* K8(VB4.+?ç endstream endobj 2565 0 obj << /Length 2214 /Filter /FlateDecode >> stream xڵYK6ϯ0rY5%R"$AI-VF =뷊U(Y=n`.&Y$Kůql~χty"a/d|Pl~4OtHI#дp*=+!Wg݊8|l;g]l$]Il s? 1zZ~=ay&Ԫۊ8_zs@R]G}. }w㉿ő]/݊@agI Luӫ^HUs,3H E3 O?@/=5CUPUE9G@ .U ,BPjx\2ӆ~$~aEPn.F3ב yҪ3LSsh&HasM}TY;x -9N O\Ox" 2/+x(䡟>5tEdКkx C`#UZEZ ]{GMK=`EtPV«N[O96I0K槶~]Ǚvݟ{ `s٣:D1RGp%0b-jA t=Є30{t?Zڷ :O\WZ[гrc-]suW*}jзp ] i Fh?mSQF#LK iG6 n [>ٷD 4p`B;EzX:ݒ=D" f ^ {k#lڮhgehN;$Ag֦#x+Bt8r \9lY8xg8y'vj;Q YV@ZꎉԹ5A@vi#p"!l;p)}̄\QqTB>[c* ؿjy"Ԧ%c/i{=X z14]i(a+.h1|_?S-;hLԄ *zr'#ڬ Gq'Z&OH5#8bK2be䋇Oݓ@z}n Iygm]@Cט"vƭ?Zd0kWŜR%QtN*hzb#(1,դ<0P%cG̱A}g8kUKg"UޮPn=hb?jԐ/$D1)ӄYLh3{{1V~)UZ3. qCۜ+1$1>Te\6Nb$ Щ!c{Nӄcs% pE 9fng0΁S`β;2a{#[OwQB9,"^; +#X)2JRX794mS ?* IWaHFlK)snrbŗKhU@WTnV賋 ca k-0TZ 7<`E^1CGX2_^P\eeSĊ%b~^2SԸEY X'? v: ShhZ:*WqDwߋꊤL/x4܂tFĤg NKyхUt׸Wd ?٫fj*+2mф:f <I{y|\.pnR Zp-Iq 1#`LƉWwװ hbbN*a#W_)EieџU3 +8zPUL#U5o4>lho9O HBLӎJno˾2NY9 =M$\"!.j_dĵ/-׾ .Җ`M*a#~yԍQࡵ  +sh)Z C_J.IaP?۴욡*:)N67x'onizxIf7ɟWr!Rؠo Az })CA8[zrJkTk?O %cV8.RtrYfҏ½l^Ld:voR ]%KQ`^$6<M[*]/ǚlh`db ҺËJY\g g菱F, jm+\F,pf弒ĽL4e W羐 endstream endobj 2571 0 obj << /Length 2043 /Filter /FlateDecode >> stream xYM60rF $;Aݾ%{eVF4ί*VQi,A2Y$_*갊V?=|CX=Wqi&VE"V/^ a=6Orǿ޸aQU`Վ;=D5r1-ag󴰔~ 8KS1M(t|">lsc#{gS{Y26hAy~! '{3P~#<]^d׾ZWᎦY`iGBqo+O"OpNC=LKeQ@팰 J 4u9^[ SםO!};3~|{~-%Wp,B<3`QLUGCD` cs[%PWE~nF {5R/u%^m+,cs읗7 yY b琧7.wE(8BGBt, 3KE~4jSN U %ɠj,҉6Fn6i)-pu'ABEϽ_/)JOt~BduOiy 8뼟8M0.$`Oޘ_(o؅@Uܕt`(nt)Pzn` ALNg; D,*zgI"Yn p+M HؾmH*HSwf1SdtdO{̒tV7dIbԷ; GS $XwT~ i #Csf> C!Ĭxd6阛;th{o0 ߀v3{f lUF%E Q i 5%, -% . ;ji(׉+{yPЄ?NOI؇$jԺ9j._4P#y5[s-`!fkVSOg[De{12?> stream xڝYoBV\.iT44F |HG%u y䉲o,^.܋_t]$" n{!]W(?J.y޽z YmjӉRU'^|-kڋ`Q^Nl:늦Yu}Z4ku׷i*>RpNNmrny?ەt/;3Xs_b#VE}aP5Pk 6J d7zۗ4Ny-ӥ-q١{h5[QHͧO*]wfNf,ʦtpCI/dJ}-My M /:>SJN_|+mltitҺn:ktiզ)q>`3FerKX4Gso}bc{ӭCYcсcQ ?~%Gk!X\E&ith$/r eRȄ$)ʷz"f<1/s4"s8KwI ;o;VܰB5=>V|"ȧ6NY_gꄤrA(2 +,-EK9,M;?A *5j52LDd6P?a"@Fqtc¨{D)ጥ0D\FHQ|#EJy O=v ]gR~oKSwiI<VV};.Њ46!`UZ΂U0(|[O)w|"WR0" h̀D@y5=6;j\^KeeޖĆ5 Fv¼aww$zYG" f2 zq"TNn#S %a 9'| Kq:F1^7gГ55\יãPҒ3=pTZw")j [&HьeO1@IR 6dvowĞR"!|Mk;0GjgT=5ȵh^y&(+sg)h0TDҸy$ޓ{lq *CHKXVjCaX@yw 6Q(\깿^"<T852o\6hܓw6eZ]NIڍ,#KpA8׏+|@h3%̋PXD ;U 9Rnms; A MCTăM~Jw)dhI"={т@<}WWQ Ia;3@z- .7tkVQܭ"ߩ^״Ȁ꧆>nNGx:ox>r?h^,Bvac- 9VYJħF#>M1(ޟ%"ȶo/gs&hf9~ zU0<ⱽw &1v 2{UF7\CMQb>^z|>09ȁ>{پ*r; pa9'1gQ#r#!Rsn-0 i5F}˜ j}csšV EmaѷW2mi]KA&!p xKӷ5xfW@hqv X@EZ E'/0I쇮I CAV|.\^q}8J7\Y~8E|* Ӈ'& hƾKc$A}e?!I`ˡ ZjW3k=%jrQ8ZEIȱ,{:4@ g&B/Fy( G?v *T$^ >ΫK@..4DgMO!-nOԆ!`}Nwz8Hb"eY';-%֞mwa7ܩKRYMo2UtqPZj8o>jb -:66< 4o,>Ctɤ/jV>9~}MoRtLO;(^xmP4Eכ7o\a4- |~H04d's Kg)¡BRr| k $ׯoo__uwh 60gYqrl7~IHڧo=B37P`ר7ȖN=ˡmǶsm,M 9gj O> "^o_?]Y{COrݫ@ endstream endobj 2588 0 obj << /Length 1682 /Filter /FlateDecode >> stream xXێ6}WIEQ~Hl")d(-6QYrEijw}*ᙣ}oϋ/.)RFA]<c"zWij. Kկ/.x58&j·[$K`vk'z`Eϟ?_/akVV iy6n} Be[]JQ'K߱Kݞ*WNk죔!z3凵dxP9m7޿va^2iN-JLl]]`H(Ydkk3הq,h<} " H{(4\ ƣJ2l+/54$U=NC"JM:NXzW)(\f8A `G8%-3VT՜5ܴ Uæc/V]^0]rkMrL z|CVȆWA0 (ٞφs71\x]v:P~^vIZt.:iAŶo[!$DIlxXYV 9T3C#7~x3N);4$y7!nrvnIn_bR;B|wo"ox6oqҬ;sZŜv r֬.aauxmOR|݉FtU1ds{ᄼld} hn"ur2h Uj}{It_$2V͑fD}8Qp_rGA5S(M;Q> b$`uf܌@7Zj:rVJC9JUGGy-pdU u\_:~v޵Ղٓ{*>C$'@AX:7=^U-d jMp:Zʾbq7ghՅqvJ)Jڡ!f#ݑP3V|']}o`jꪰ3saκ~u[D*` ,BH|.D!P;*)]Q)u"JA!Hal!SA.: n'Bb7s$E{@clQH:6y̑"<@p1E3[!x><=ƒ 8< t{;2@<=Ipb*oAQE%Ĩ> stream xڽYK6-20H^F}M"-Ѷvdc;~XEhIb`sSn> QIO~Xl~4bL̤c }܆5oeۏd'pىx=Wez!elܴMݫjAﰿ#&µjz"zީljHS igqԾ-u;4Dt?m g)w}J+ž ۮh+}يSՠ;zYS03.횓4TFh?sQT"qC&I:4݆ZCCKn76aY@5wi½myk<`Bkt|i͡-Ɉ?6CID0Yf ި͸`怣2tC!yߍ$-)H7 8LdWJ*t"3" ,+Jb\ Dx Ɠzö*!}/̄\Qq1 XāO>Z#3(ZmCr ý5 dKjudoJE ;e#IG(?SMPuAiN65I: 8(xÁNLNI5kp-Ɉ=/>eѢ.ЫA7@$j3. ppΊɿvIy?jh x:zbQ hu :,dTt ȿԪVeߡ3 Wg"'N[܅NF;rNäIs@I*o" s-X+ơz묳^FPz0_Q2KncB@Q?y%&LM|&sso)z)D! BE13B[ċ1|\+ct.3,/s`xʼnI9T>Gg0nzH UAgZ-dfℬ1GVauŠ ~ou)AGn jꌳ`1?/$5*yo򕘍 m1jq z'jihl>Ʀ{;2֫护k x4eA}"[Fb3#˼] H|JN`b+M9iM ײjɌ*+B zh@{Ni:c^#ms*eU%9?.kY_q={U]q4:XU.ut_VWn^PQ׉( ʢ?rzPUB_`ɾTLLcjJv gp;RӲ :+caVqG6KESيleaeKkE]/g D∽cn`D^ 10~(Jmnr8r5ƛ 1t9 55͕_*SNɓMvoJ~ö0{AN/AzAxxZS!s? +Zsl: }ݡ.69 fiBj\~m汰5GSӳԽJ|OP2_P5q2Z4L 2ś>>ʊ/,TA?zrj *'pdً̓'i7ŅXqeLq-d'Q8lvEfMw׹&]M˔2JyR\E[mѴ8S<)>j4 -mu/W IjAL/Z my:|h^=ldsnƣ`I\G.H|ئ- |~V)'`2AItxp( 9BH$]H`b 'j|=<]_L?U5GWfo|꾻{Q[5'~8.7f21Wf-$h\e%UWRy5Msn eHߥ:,7 8(8Vw$#uo sHIRHx#zL^sX6JueMx'&R~¼ZLh,<k1Vl6R9vgMHٖK;/2r"4n'csao}&}S endstream endobj 2602 0 obj << /Length 2440 /Filter /FlateDecode >> stream xY_OaKd`(PI{YI6m 'K(g3)YiQ,(~!?>|S/PR-vH0N"Pi9ص6}ջe$\=$ fEo,IrϏbÙeߕٍlȥۭ=U۔r%((eQ7=qeDTWIݹE֪+ς`NU9>)PJ.N]K&A9K܉̭潍iPdԮ}S_QMX(# v)\. Fwe].U<"/q vկmӻO_yn T}_5{т@~SEP6ķc5,a`Y_Q>/.;4@YـBF 8*hwm3= X&%N˪!.s N 7̚;!s=86ࢌ3XhL"&`i算m|ѵ}Qf Eͷ{}gw̠r'VvH*db r 番uBSZuYй<4ISxelD1DlAg\W_JI|vּ~IQti]4Zwm f1IXMhy;G&rBmM4S t8|s`N6hޖ9@C.؞Q, ?go+30c?iF0*$em yoPK ;&Ç!#CőavNBgdn"~GQ91TjUjC+YaC=f7y@>a윒n%@SEv^:~׎ b5rL[l ߮~Ic{bnxV444\zNA8xo>La\D':HWXnVśYP!ƤBU2d)BY c-W{*8gݿ*왉 myC FE a=1Ea؟5q67w};wcLPCij{Æ3Ft];;'ȝa837s…rqs2]yc5"~8|,ĕ#Y9j&gI23_|sn FKpKگ{/p1v#µJI)d(TZU32TiPC 8{pHw]@iw[rĉT\5+G&qtK m L d.oXZsl^ȝΖcsWs+扷Xq(m^FNU l9,^L&RDalSF:gKy7WbH1Y&~KLj.ؚ\n\/[q:w ԼpD\"I"PS}e l.xzsB ؔ+3> stream xڭZKndZKzl'9Avn%δbؖ;3z},Uӝw>wy':{x Lܥa$&{~ޅil@) ѠG~*ծ6d>4Sh4k`T+ݵ﮽|8 rho˅.u88u[pq'nk8YqUsSW !Jݷ`Zp}lO= 2ޕEB`v7ctvooT?T 8I( q>vO׳mYɗpy P=766R.Z7[EO:JE+o[L²!0ؘhZIUtɸHC~p3\B6J(= r;<^l m)!$a8dKee[pšh浯QbXAy|i,^zXVZ7͵"uTeK"} CW 5һd)UTaulŕΝ4/w4Ձ ;#e(b8VPV>KdM^DuJ턥$a84H&mKwT(wDA߷k նa4 bTxLX%8&ܠ= ,\QZ;&ӵsm69b/h:@p˓lsPL/hQ"V+hsEWdpeÂ,Hћ S3yev^ڜBxδWsAEDIᛯܱkͰRT2 s_W65C Ʋ_-1 dF~KgͣA&՝B{JkQ.Ҋ1vuyHXAxr?6lTZ'[[hxltϽ;vT"߱jp\L~y(Y4R"]oDۮs;yW7AtA-ÉMyΎ<<1`yAWE\1%jLzzFhK@(1NPd=O 깵όOl}I\e$ !NgK!E7NA3|!&}@\ʓ\4a3x*\u|C]zZe~˥Kp纒 Y!=] ˆ*I=Aԫw0!P+`jqHMtW6B_hƒϢ;7231%(`<]C2@ aG_Qڗ /F+^?Wϟ=aNw KC;$zIbDưI,{wa0lh%;zzT߲{^η聥?R Z35,qe3nZ钿q쪿:zKD:?n{즂HbcjEc -3yx1c39_8I~8˷Jhw?><3yL8MIa5Gdoʃ1(`/ZX)rl"jz[s8׌P# >Tjó7y>RgWx)m(dQE֦C 0z_XOosҠM?,oBk;͋h<[;"Wg거M6L=B/_wF {Rv|zο_w endstream endobj 2618 0 obj << /Length 1522 /Filter /FlateDecode >> stream xXK60&VMh\nsHz%VV !)[*6@e! 㥃~y AB ՀQJ%o("φ} -Fwd0l7:#EZa5Qæ=eme=.ӦRhܓlҤ1(*p"7QՖA(Ϧ@Z }Vsun84+(S!QZzȱ_~U;as`2ģQ֭Y]bNm nl4*3/sU !m1Ssˍz'2~YƋKP5uiVx}Ĭe\TLȣv-,GG S);MVP"͘B%0KP AY^@I9=ubQ:h+-ZHC8yͳIyҝpQ i琫90 ;sעC' hU']@xJgM>o:KE#w,y\6[fA&:GZ{B1q1cW?ǝ3Rj\ZIusx>yƝD͆g~6W.:y,~ۑ \f覄U;^ < endstream endobj 2626 0 obj << /Length 1657 /Filter /FlateDecode >> stream xڵXY6~bFu>!mE=v,Ѷ YrE*ͯ ^e޶OÙoR9?/~Y b'%iGơGX91$bsS8]'o޾ HŔxqЦig(on/(C{Q=ŷ,sDNrNGEG:QZ.M>Ij]^6%]}Cj'/sk =C4qq.u N!`E3(#g|EޖY6i $ΒgAsW"=-$9 p43U&Ni҄oc `|&VI>cJB}%?lgaԮX?\^3=M" gRya2++q AOǧ] Fق̡PB!"릫 фҾ`%oM|5UT5OGi6{φTy%|z1_nq=iF/'5Ĵd qLlKN;e2ꔰX +oVɶ ~oALuyѬ7ɼ٫*Yѵe=٤]G"}q1o?Jj}ZQ)=])#40q~>ǦѨǣ^Gݾ4 vtx落j!A^W,3" cC~>-'|00I,]fT N\k$ Цp+1gu_?3{~K5m^*^R0UyFKmr I"]}Dz1s de`Cُܺ`r>ò֠ ^I;G|U0Xm1S8*K:%uY=[E8U endstream endobj 2634 0 obj << /Length 2674 /Filter /FlateDecode >> stream xڭ]o}NbD(Я=\Q ЇG[[Y2$yܯ gHQ"@Dhghu\E?/\mm&aGQ(lq\NC؝%ɽrv 8O_aH sl#rJ:_FVeҭ\v Dicʲj4>iUn puDz8î42@]UOOI]4x2æ\fm$R&y|ijfqf8!<qy<\WU ) o;PKoTpImӁצ}K ]g ,<\=lJ3k-ш#F-D G{[WgP2Ȉ~ЗNMY4Nw46@$2Fq:9)(])&Kc>nF bq$Fo+Rxk.ܲwƩX x0[Epu3-m"Frq/D2N`'mk=`,APi8sxU @m*ugH h3h#'V&+^_2xzjkϳ7>:}F"Sb~1'Lv`מ2B"W Nw(RjNy>{jF &aQIhygQF^/Y^.5{k1 1F3Q`o K$qHix[R̳PX w89TmHR$%8pD1WFOU8|^JJR!pĕ73R؞Qo*ZM|SV ļ@MX"(p+);ֵZ)sw; =Ӳh._ n7-u"0J/"@2w^j&`lKȶa(i׵Oc)Ib~Fpy> stream xYKϯPeQĈ 8}c>yu"(IxPnwfeu8: 6Mˇ?|zM2& _r/Ey6,Li̤C׫A>?=Hz:RYdw O.fSyPw-0Rw=-QvAU-qV6J'^a]73^j;egU.sm4^ =C^]:\ pvKqý@ vQ= z_h|MР7 ~48L 73P#1/rA|s+j؆tm5E g6A)z3f)fػΗtsxNjZJ@pS/gO$gs*.~Cכ鴮#*sk"6O0"Ĝn?M"s$3z݈"FdokڰOJ5goK? x@/$nɋ76aH9Q9ڳ z̔3a4|v#0 % Q¦34`hFyQe}`dO~d9x%I? >VܚmQ7T}5hH76HpmKMaF<9wj|ŔDruH@ǕWGja^/Iq8?qbY="z s>Ĵg0@$A}l!tR-e{EI<ͪ&] j3`H_r9+%t\.f][SCOE6K1cU1AUwozKǀ'0Nb'Af/-U~v^c"aL}<~Q 1ΏEO4RG]"HxտO'i!It^Zrs?C-$?@Z\xlw ( u-`k|iI8^UZ`JmUgKp endstream endobj 2651 0 obj << /Length 3230 /Filter /FlateDecode >> stream xڽZKh,p"%=or'`MK=D-vؙɯOH=F&>"*V*vE?7x\+=d8;#q-w?OAt]bf#\ 0DW/;&se`PLdnd!zmiχt:=mY5n@B6/CQ*7v퉿i *J; UIK6u[!m b{⻃3iC}|8mYEb3B/QKg3tç;;%SQy}jXPuQE3z6ZuՆȣ%?Mj6 F 7#i%mn=HPt\<]@7XK= ?ԍL@]E#ZW8Dga}t|-dB'cSDxY{7"4髊 ~"uh]TᏯ֐ pZfa3Ee$XT;P_)gu*R8!""7q,N$Y,)t&<w1QZUF+//ŚDkTH򉮻~2mxD[>ygWti,5oD\}@rbKBX: =MޣK6Ė'F$r7`|2T}ltcLR9}<0 y$s tTy{*,?`IJ.PqCsuiBw Z#$e D'pY6LHμÀ_H`yKTcfꇱ|xM>ꛈ#`iKcp.|ߡLTt`l5v=֥~I:=}mb !]уeʊ^wBKS' />!qfQgUSYoz xF/MؤŬ8=*,*v1Kf&+c!޿: iBSpUmHDl:`|˛!JUdSQ(c?pH2Ĺllu+'߲z8KO2~c'я>8gnBy.INۿqb#_*=)#2EɭBs7>ځe]7”t r Qqp7B4hu +rh>UTQ41JP63: 롸kxilݒQaQT΍|cDrŖn#Xq RmEG]y[~@f"6.[j: jH7l᎑철&:P7N5*(G6 $w!Mg-̚3|3keOot<+ky<TFg؉b2E:Cq ؙ`'\ƫ 7?Ow )CEe굷mpI*qD]r4$%6X#m5e T:ܔcōp,"+8q4 ]Bn(Cq`Όjp~ʂs5-v%Ι1LC"e,4p:N01r3&D5sڄ D0rvypιN8u\m_;Q}jQ e;4Zo M b( A$F.Mqt2n&,;,5p -spG>Þ+8|^OZUFHdZV&ǔIED-f!tj[ڦmgl6fQp^50H^/Tqʕ9 +T?X^/$(8f* dkٸ"#V8|~JZhLt6.6Dg-y 5[Ͱ)JW c0f >-Y1Xt5_Yw_@guǣ DxpK2TU[i8/OJ}֌8G!3ElÈ~+@S~bT~9wz_}؋mBb˰ZGٌDźR3P@ Zl8e wsi2ЕY\AL+J`c0GKXrbr)x ¶X}AտGkf/.,WvqKyD+3D f%ˠx=}=c̑5eYc xɣ)'"z# :B4<Gy7yȶ|Z[ixCpHmCit0*$}i?}e„G[iı3_bE06&-6ZDjsoPYq>6Qf,ng{k5oϬwDl8։(c9v0`Oo[Fp HTtW k9WD,#([Ӎ^&uBOC쓨ݣCvfƭHֻۥW!=((v2y ?CW?߬VO4*Gx? 鬄?5=ڮ#{jeJ~0D^D.`Iɐ@mSiՉ8ǩ-Ϸ-~:X=8+Y,bZ#T6iB(J&z> stream xڝYݓܶ _qT;DRI4&o$3J]Zi+J^_dҗ?@~nx o}WE~w#PDqziW72S{D3HK9];X %[u4 FֿUyFhLEöp(E;uuȕkKGsU'^SSBH,Vh+/ KFf $C.t!Z!Y{:\ [0V$44(fxHuǛb7* okh ~ FRKaaLaUӵ{ӘAFrhhzԡƐ`EIp60X:fxAXl`?\-uf0} $oKXgd"5{)I`\1JQ MF̼j&τس.t +d붝cҭ_;eX'860"lb8N 8qq>b@=x6SN;Q@-S@1Z Z5#0iUk{(]Ee2miZ{DwHS΃M [e29j -2u:3skW0ct >AGh"Ρ(=n0,#ٔ H3ZvTb7Pw-v"FqBhPkk2'H_df~;sxXTT[uD[P-߭ގ`&SxCОS򙞰H8^oo-uHMتbj(dwЁ:]F8kg}_'O=`e* ~5aW,! &;Jv\E1L|8  3UcF;u5@SMD ?1L,} 80,V9|H_"MܸII0w.`NUr> ̵H~ "C8a:rg S%+92ƓFo+uf_KTSCWekkD[%g  e]M0{>\yAuef U (u0HImwG_6lG% e.((*VL[4ú#NodpN4 WNOxp)_;JNRV4NN> DAƿ3UhVWa#^ -İL>+8^cvǞ=Y|LwxRyt9jpb~B2:_OD s" }#i6?R ?RS&iu_1YZ]E޲8΋c9fD< 'p)Pa a.\UũP2}H4>UP?fJBM aϔa[_sHh=ht [H#>˅7=>mu|> ك?)2t,t 6C-2ܭ]ueq|Ag&Ar iH0_?H8 /q. 7zq,nЍUԣh>?@|dHL롶Z,<">}zƾ-2 z[ vΦ:$RE8*PGa@A:g!W(6UWBܵy@2JǷvEZ:^x]7.G%R:x-(&W"Y)| XK*L&4A/:|WͪC¶q69\n))[ v2HfE20[ڋΉg1`F}՘- endstream endobj 2560 0 obj << /Type /ObjStm /N 100 /First 977 /Length 2002 /Filter /FlateDecode >> stream xZmo_yٷBP JT&g"rGvWB*9!j+$HmjĨ$Yד(fq/)`l6Jm⤊Sqr@RDfv _b/|jDKp1$C!KJnzb`G"T X2j &$#5؎&6H,m+\Vc*4x[-

uKw7矦{1-oDop-.czyuWl9|shH:wﻶQ[x`׎ޚL{ۚUb­=诅҄qr Ub)?&oeP*R\\n?4g ,/0`ިb ٱEdN)'pn_;5~|H~m8<{l%Aqt9ᏺ#q|-u|}rys<]~sU|ѐIٷ}#]9sck3'Xf4αuHt=UUOKbOmCPmðaIWx=gEye0'S[ ¾ƞh;0O˫[Dlh]ԧ]WW=>Vg0zhGUxƲio(9~8#xb-OYC/Ow=!WG~DK*m]yA}@Fou.EOu cjKwƅ, 8T up2mBI3u AzB$xBυ4B)q~yy3mIeʞaH*4^F…EG;y<oFЍG! yO%{Ng>1'poLL94n)ߟ Q[-/=,a_۱_ 㕟QĻr9pC{eEhsz* frOmm}Nb$l uxa,}S;\hp nx4Fv6In`g3%(F}0F%N Nlë=OS=r# ooQv7]ւnƍ6FYGm_ټ ڍѯ0 _[Ri;@cHV." PiH̵gà[`/fUbZD :1D U>Uh.ʔQ|ۯ)}̡}!r7/VQ" :@1a2c]sL~ 7}?w<{NAxoΎiOvOȎq9in0Rm~L=?L[ endstream endobj 2669 0 obj << /Length 1179 /Filter /FlateDecode >> stream xW[s6~ϯ`>$S"nlyd;DW$N' 9G߹r_~^]}Vȋڂ OUn}YSDV_>|#?2Iƾ"r:Q;1ͧ  v%ܟ-&`1C8c^86mMĞ5"y[ro* %jw[8fMNGdS }7fm tVs R>=ӏFQ^0t$x(z"q$AީEeJp, yWQ<\SYNyhL HDsԛa Su~TJ-] 0:F S x:"lּuff^ٖr\͝83v)=LϘɘMwEp2>I|iJ\ .xkB9Lݕ-qYHA4SoV<%]c;Flcј"ljā)3UG3.v! gm oؚio2Q Ok/Jʂt~w}F '!&kt)&DG=;jzV*덾LB )|G? }rhN;9(V 6Bj`{]RM&BqACA>.~#U1fCw~$=FS GR;=#]*iۑܔ u sL2<%FϔI6<%]4=gp{TEG-#{$\eFךҾʎ-g8=> ֡K@hs" @x7p8OHqahayd:` endstream endobj 2675 0 obj << /Length 1414 /Filter /FlateDecode >> stream xXM60TҊVM-zn큖5ZHk^RP-i ș̼7#Q8d6Zi46#AG37gyO~,3LŒ`x*:2 p"6*rP/?I3Vic~g_7>M'AZZ7eN};UM-nP@fË/q;z O6)ixNX3|jیZrV)VTUѢy-JK?$?>=1MUɥ Of\wZՑ6h6jb-˱© H KER[.#0L]lz1AO,f\:%{[ ;P`qY:F1[?qFwMz\Dc-Qin'Ըz曎c :4?8* Iɡt+)OAx&>oN[k;@vu?Ȍ  0t&:y=sZFQfGIm& @mSjnWvEwC8:(85㻒 ;d  'i#OՎh}C 6dSC/5-}Z+^ҽ;rGk3eIl\׼*ioHAd[LdYky߻@ウ=miK ̿鯏 j*2Y^7nkPWr Qi[C\7/@Pz)GL IMRuY_a׷7Ie feviަ`LwȔ=[er젫5X=Wa@1KkHşV@z*+dQ Mw!c!}S$+m,/5ָBz%5 endstream endobj 2682 0 obj << /Length 1454 /Filter /FlateDecode >> stream xڥW[8~﯈$ZѸv KѲ !V;S؇Oⶁ)Lj_%tV\sg{ {oKF%QyG0F4#fx]7:+Wf>5?f f Y^7U6:]eawgS+/(@g|fά 40*v6 $ (yyEҋOonWڭ{lDGX4V<%c|..CY1VY*1"tvMW,T]WصZWFsm_:7&}:ئV:2a*"+Ff7?GɢE$2x "Vw=sߋ?c ƒ?jmƝJbe&0<̆F4KLyLLn)eHji@6Ҕ\!;"GdIϜP}omRYw$X1/+h]ɵ- hVZ z*{D|#^QmFY%`(Vc_[. ~Y_}]kň,F2]6m$@8pVTb/TV#_CvzvhS˦&v: GjN!ΕKVNe3BXSAH}$z0b޼*Y w %\kQjviWWfә7&=ju#/p۪ps̆6}i%(8\v H¡Vŵmb]vzn9 Kfg"p endstream endobj 2694 0 obj << /Length 1649 /Filter /FlateDecode >> stream xڭˎ66JQԇ-nOimEWq9$-)f7p8{Fq|M9Waܭ(N,4*0t7I#K)h$׼qڣv fқOdܼ"'iD7^~={pߟ|Bo'et!t5K>jI^GNr!n^^gfJ\+*7pscI]ji kAeO q0i*cbϛ5{2Ҕ R!O%WQ\ v ]ɖLzk J44!jZ|-oz _^]@spW6F6ޱOrzMNrG;tF7+ZO jwDJ|bZ{ MmUml5/G_vA"Q7&Li֧TFx2ȇBT M0QC,vХA im~b7wf\B>| sT;'RŲ׋S_2'b/ȋKu<4 <0ƾYd vޒ50pyِ$Tf'sVk"G`x9ǥ=./ u'z ūeNT3I|r//tRU@WQkwCX/_LmW-m6Z!L V-$=z$,eM }|罬5Y$ AjN)l;Z-o^t{KMvWԺ ^Җlt5a &v6VɌ 8QͶܖD=^rKZRvD^D83)JR>\vDn>yBibد̀ӫeL22۴|ɼ(V#T>%1K?GL^~IPө~Mp(Ic/+ljy8Fpa6[cejA㣬QNe(R}]G#5T8Sa&`5?i` 0PnMϣSfJ\zQ_i8a,aZ9B2a }!e#bFreJ@)@Sʓƨ(EMi}Z_m** pC)3#1T@OLk"j5͑5B+(@Z綽0L8=`ugV'\'2,5߰R d9b {~~HS[Pl_P+›tQڱE}u 1 j0Q!Z endstream endobj 2704 0 obj << /Length 2661 /Filter /FlateDecode >> stream xڭYߏ~¸hVpEE}iDd-t[ g(S2woZApHg>jV}wXPIzܭ( E8s!W_Nw=O{OJVQZj־uQ|vUq]Ȅ2mw.E <`=Ǘ. \ksL s Ufh&LDK1?rR4 cޮ#\@?|<)Ⰼ[NB'ū4< U;(@zq 6_;KJh|+JD tx#&a2`:&0/B.Hf+eJ͑ >M g]+j&J'(5Be)=!#_i@Dz"5X.Ϭqt;d쥁*,LaɅx‰a'e7,DU,2q^5CM!RնB UFQGܰ @Qp'/|Fs?htO-PY۝YD8L;(fjYєNtD)3?k0=sq&wG':!ja"U `ˋ K#"EZ.9ƳCiLfdrNӎEad$Qv^Sh_Lpثק9/j`zCV_؅n"g|sHI 6NSs6/d^Jy敥KidWa0x( FZ0xT}4NLR%,YI243&!)SR.s4vZܼl!$C I;tbXY<3h1%%ey~7"Ly'sP5+LriiZOi}+T9 E޿Tox?dBc}ÿ*ny]3ej{H8~JPD)XM>Gj)-6S,vY.l<||ßox_mg)ݚ> stream xڥX[o6~0 HfX7$ l}h ۴V QP(e y:Z͋_^LotL01["J (""x6[>Noʳ$"ܦ)ݶ?rc!Ka&ת99X*H&Y+٨IjN"n|?aI{EPB~87UʛUF&/p\28UͦZ(A(M 2kkeEcۼiLخe7{$"Dj $vgxdL$Kڗ+9g)wE# Fc!Q,Pܮ!vqvV۴v};(x*Jʱ1 ~Ԧ|hhGP]FKG?Hxޫgr׽EzLGa/ +!'j~ P.=4jit|#Ttp?]]}RTgԈΘM.iy90 tל).u3S+V e~.X71L IgQfS#]~skÍ=Bm8rvPuZE-)E=_Q$}9OIw4iV*ID/<1_Ajt9K9 ٗCL R ܑwXܸۺ#cBE aYiܹ(OrfD !I|U,H D( qe,x[Nr\^VV\W ],P+V`86_SZ 2/ AXVx\hrm(inF/`ړ$cs\<>OLmb=΢$EI NnH'|vʂXĖXH=O,L?ADVI'xq(fNxɗ֣y"d(O55HDX*}!%S41d`db tc2(a'7cD;GojNiJSH|s#GJ'c=h +$Q*_/.L5"@F@\E!$ v&Z*ugS+]C![=_x 䃄4xqqa3zqXTk|= ]U53;pMvX}^-\(cv7U2Vk[}Ƴ Wͮ2~4Q~WF5_2_۲j8tveVBVKU >AKgzHsBVlcZ}㊉ADyMr:9knpظr'KUnw[oZ_ͱI~~xj _Ez݋E endstream endobj 2728 0 obj << /Length 1500 /Filter /FlateDecode >> stream xXo6_!1KJ"%C6Êې-0ZlnrbC0iR{y}A8ΰn^z iԏFXLFs퇋`t2,%Wӭ])U=vKPxB]Bg< nr^~ihGW`޵V]gzn>k~0a_#/wn]r%EǪ::La2t-2}^:;٭]C[^#%ZWX(/B:+=W(4.o6v5' E.YV>i#>@ʛbaԅWLͶrd-΍s0,=a.te RYaܾëd!&Gq :Gwo$a=$IL9$p76 ڶi=1X4x{1v25&K0t6>2Jj0&4_ Xsiѥƃ"1Eu2x}'oE 鏉c$;6цl jj+&W{wkQ-A]Mv4ݱZ7N.dW/(>  bz jHcBF*ưA H zUCrׁA]K}9>A̭W YH.o}Pʲf(Mӧ˩_ĬQ_$ +(=& J崪$vKb1EсI(,CL~PvWVz鸷EaM3eD/suPHXϮK|֏踬g +鉽7*ҖԿhϥ%~ "7hrw]kjYjWlf*!W/ wnk/3OJ.+ї Bp3gz endstream endobj 2741 0 obj << /Length 2517 /Filter /FlateDecode >> stream xZKoFWF滉 Mv&"l ! 0ْHb+>j>dG^>tWU%nV}x}R7x]yJAs ]=_/U=(t t:Tצ~wJt:5\ lΒMoW;{Xv``w:"- Ouw4yR)ހ,~oeSWߖF,S3q|^>yv`5F3uWn*ȏǃ9Yu2q y^`\e1 |6-W<~82fc >e,mEhu[Qȫl@ :0Jk6oBO3oa1o&N^W.f79hEHيgMM_~"ƉB3Xmrʵ*7z~1V揋U5*.nLdL R @|:LL:+p$^{ؙSp8Q:_d9C+vZ4%!O@!'G~43wP{бvfEg'zV̭ͮ'^R(@HHlzm0XBG6;S ,=H<38^Rr85) z"g2zN>3~C1:їDD/AǮ c %'ssI\ < aW7-i Ρ)PQVyu*A/lh3dOU/k WU&&-n%Gs=MIZ@Xemdc"I:AZM.3 K˄o@[n˰i|"S{ta3M<%h"-RzHSHkev b->!cfP lȠophy5"&& WC^$8S7؁- | F\ҋv"]otrwmvlLdH7 .nfܼ}(, 7lm I`x(Xfsd"~@-[[P;5 s!Jo?-Z $ՉFNM y+''/[']c8nQnQ5-g_̝g5W#Vs L h~ZQXM/vWemr?i?}k\y!6|yy^q\G]S 20`ø/ ZF= 0g>NՀ76x,̧I3ّCɇͯd)8`5} U\V O Mqnma*b2Rqݑ & 7YL ȆQJ3ATi([9aˌׂ.!eIy1#/fѐc*o Rz'ΟKD]Ԛ2 η|ovY6)(/[:OޛAI >RmkSBCN2sf$lL_Y.kqb!ܢANH" |[Z]$6&tƼ.4rxI$k_K89v0>.nMAD:mpMɾ0EgH qÇMٷBpM@qqκ#<4? |4N*B#*H.tF]Ya|{f2AQ ߪ+O'/,Uʦ MMM T#7\Y|9q7F26xx,D endstream endobj 2749 0 obj << /Length 2810 /Filter /FlateDecode >> stream xڽYn}W  񴻛 Y+ȃA8d,%9'_6lK 4b_OUjFo|՛IUhsD|v/7y>g~8Ma,3A+-o9~k[痏ڗUb8RibUe}{uî7{7M+=w"iN}\OU ~*kݝ/^.orP[_'g u LÐ\^vנ/klaXBx3wl㿬Y?Y\޳,o]^c'M͟Ŧ,_;pt/Z]ĉ1A#TI 3 q ǏC/,]NЅrf:wٳ$цs؏=0Ǧn tٿoa/vUj %GIw>XsGܠ#]K2XޟEНU]#C:@&`e7207hmFv_ƀۗ ˯7%Ma@@Br~3E;dcm389s $Rޭ!VI,G+wf\rbxnh2_Qv}VsT/>6q̙w1B޴M]!pd5s|AܭIUs[s-i!NQw,t o!c H44TI{'eEAN 40Np'}XkB!(\1*& Ծ q$EckKٴw?3W<54MWb<'NԽ0! ACX`&fcX&rXسot`(Dtg]w: (Wxv8G]6 WoTGA~e2 qQɾk,* 4nFX KAWUfU85h:Z*)qEͳOT:A0.$y2U"r¯ _W6ƀO|e>`tLlN_:ٳւ#ew7W]fc#l|U燫MD[H;biXm>\ks t48,@ΏFn-k I)7b,hp?x@"B}iQrG=_,$vmfpe殬!mW`t<焒o8K3GTS ON@3P` tJY%eq.X YR'f1X^{Wu c3s8Ƈ1jn}3ME*ORz)2Ecџn|`۔Gӥ7Qdt 2×*(V6h,UM+,Q[€۱"հG5 D0S2~¶>zݳʇn#y1h)gq븍ƀvlN*i)x5;}iA 9דj#s1J)"m !h[iV ŭ>sT')*jLl$l"%k(7O{#7ot.B` ДrTR@0#Z^z ޷2NӉ۬ΪsWv'.VY.ex I߫v*.Tfr9<G">p]J_^/+"QDZdogJhdz;CI!7' tJ+y~xSO :FW,se}VJ&k sKN;- J ewc/;ſ{'~61 sCA4T8 endstream endobj 2761 0 obj << /Length 2772 /Filter /FlateDecode >> stream xˎ_!Izgl`7q`؁=N(5C2ISUP3;cFfuw_\__|w݇ ^"tۯ*VJD&]UoswB3B5I$tAi*[""7:EC?jNv`%"%\߼["mrC➲P%ntµmZ^36;J_l{;oMڨH:|=MWKN3~^I"E'JICzR,S`0bW.)묕k Ba<,ulqZ[F n+E2-otxuj2XUy٨Ԭ"[]&6(CsQr.CQ;zWWԹv /ꊞٍI{u6'(BPCR" -rIPѧ#Cv>݁&CSYT412^[58`:S欿g9&o:$3êd! pـn`=L޳F@Dj0^󣰡e(л8хfď8r܍sǑ=X{b`TY~pLPhhC3B{2pۮ p}on/m]N7[-)q3#+ϖm w~*G[!D}>kxAYshsJYby\RJ4.Efа sA$Q!Ro[^N ꃄ]B9=\E?G= iod"Dҏb6Gn۞*'[H8O tZGS@njI Y(ΚMS7>}I*°/!?/ܧ@>Ap!\yRGR3J(hT_^Uטݐ&C뜨(:Uwv+e%vo]+*_ֶ^>gcE Tc7lmO+`%u/ۅSBAՇ, hP^giED7\p"B]J^Cբt6U@3=N-=J3՜\z}b3V(f'c.Z7^eK\G"Y ߮0Ztܦ.JS]9e%6'u\$: CQnyR1PcKǍ䌁B0"IfGz,u$ aL_:! DB@NGBRtQ*R"k OFFK]JN.gaB'`c7͵\im̷0|Q^nH\Zy'0tjbGk}\lrkm֘ȵ—R PQu)¬*.FC]z@z8 R>W±n;Z^k҂׶;P˜ַEOȭc.&؜Z ֱPC.1SK񕣞M7[6PmP `"DY";I|bOs<| %5+EWq"Pb, x;U,_mFnn[:Dѽp+9(Hz,  WY۹fS^`_})I=+C/2DcsV.'4T@{e{}|d;mXќY4j31Mzf ֺydoMzr|~S OaJ&_?Ho^A}۝TX+ǺsgX2%/vp(^Ŗ_bOM IѤ _ "DDm8V7C!,2չŻjDh69rK 4H!atNgӳu"8ؤOwaf@+1(8%z,36&&OG㔌5N|懕SݢZz4bV9f<-Q;u.R 0[ո+) WAKtTSUPK'N#sPHaBuL%k\HB&ivj֎9fOZHʓ0#32`MΝ?O˦ʳmSQ+9W~:qfA5px:&Ĺp`PKuNԜc&]~ a{п%I6W}ʸgBe KyQ0'8ApNb>G2R@Yǃ )贗}kK0$/qD"\=ST{?= endstream endobj 2775 0 obj << /Length 2321 /Filter /FlateDecode >> stream xڽXm۶_3)5s_=NώxΗC IpV %3mEb>t]ś'/\"%iG͂QJx-bH}U%^3iF$NSeXQ u_s+'F|0GңګJT*g3; A8|\qNN,pmmZQJbr8%&!4eT}.~VuDQ|fGFC|PW9)DYQbJiQ6ROߥFdDXXd ^jMAS);UN6dgnOFgoә0nVvXNڝWnu 'ZJڛ|UҩiYJ\2늣5B[`Gؿԭ[c oZvDnXfN.a/tB|e-bZx5O Ņw:;ym 3ugSutf;L66;1R-D+6MCl38s7}sӍh\o؀wfCA2\vC 8](@3OTx@?J3 SCS}'gHo к/Q+dq#1) 0uza96MC Od#d"26bE1&ex,f2K魾 }p0~mʑV#`в38(n+ ŝجr;ʲF9Nϕq*HZR9abܑW.[+G>0]6M48Ka.@ eWi9D gXbj%LKXw ЋzT\dsTERY,P.Re PQ2uk˹PL}B18+e'OVM[InjOD0E rͻ7Ov]ׇÁ +乓.Ls'7XTVŷ!-#CIu8{K,aznG#z/ K`E֜lGc|tsdػ7m:Sx!;m6a;씖42$%KQ[o s aߡeZL9VԦeѐI36OR0MיZ goݬוNG7y7Lޮfaf0gtw 9?ıUv«ۥOv93 y%*Qr3PbWwnx'J#J;kuc)%J ںt$DLz7ˍP4c i(@c("XE 7twDPYn#꽄:^5* `mmsc!4("& B2Z7F[KS3&n1{ (?Oҍ"tF@?dR [pK&W+dcaOX6$\3c9Jr$t(eDC47=8dRu^vʆBPސŗ18_~EHGD1Ahh824MOؐ&Sԛ \z^4f"ѷ8a $?A~SEl%<ӊtmقJS݃Ce2tYWQ2ojbPܧ=䄆fFvBi[grۮkUOF䶓{#x$4 ^}6#}W?={ h[/^2HdcR!'9]2gN"PB tXJI9dmkB(aQomU7šP۶y"[KP]7(L!Uti:`WOo endstream endobj 2666 0 obj << /Type /ObjStm /N 100 /First 978 /Length 2069 /Filter /FlateDecode >> stream xZQo# ~C5HRwaw M\zNGMfv2NfyH"?RCJd ))c1D_dM2D1 Ioě~A09b ~b]W!0xE 8ge#WP&eh|2f(M/U 22Tե)s+`H($D=X9FQa0RT#2 * gKүpM Q$j8dȈ$IRI.kGt3>FuF2X,fV)KȀ E$3 FYeeN!~I=Xٙ+bb Nsɤќ!i#򜅷^daq7|pu[:.srbS5vi \ | k~^-?64?xj6>Lf-6kş*fҼwٺŤﳋ̙.'E5 hl8j -Kp;kQP(x?uH:e|Ϧ>z~8kSÙ9

gϻ?H9Y  PV8L; t\8lVx!/ep.%wDdgP*DuH`4 y(օ򩏍ͻ*yw^.?ܮ4?7boG<= ͢ .W\:w[\Gb#^Z4>v?#ϣ)[/C W{e/5' eͪ`bèQ~CH/2! Lpt<>(DjG0U8UxJx9:£Ohx)~jKHME,cRSOMCob8r2N8Y=Ȉ TJ)D|:?Ľ`ga8#}św0=Y|[ -v]7>0#`=n qHk#ܮ{^,Q**:bxEQzcf5]Xi{MsyMfW*=Q)sYҳlV7x>H{ O?ZKH&R Hww.n{Am~)|j|jhul#9oRD噒}: dԼ-ڮF КUbtȋO!tauH2B"n=OiՈ:@h #-#ٹ:DAxvo$YW8}Io)r\ؒczW $ yGqጪ$[%.x;]##C{-5 JY*剪h}}q74bԔZ(q$/0Gރ3x4ީ9E!Sj}P co"+K'WdiⴻrHƽî^q)wb73нo 3"T x=2쐠]"> stream xXK60ݤ+EV9( Z-ve!)RIjFCk7䆓$|b!zԪ`\ :]-h\NtkrD} : =cwӇ%?#ʬ&X;.)/M9mRo3L UQ)(HQG>碗$uE@ v]i yQ%)h=LS}6g뙶NQ pk[nSH>ضl$AUka [Wy7o29 [l^|rϩ{5nFUvVQ^ZȦ^Й1/; )rKLsN@^cP49d > ' l|.LlliF2q^A7Qx1ch6͗mGN3Jo1яVmc$"P^xuۡ a@#g#" YТf=ȍ;ilrZX)/A'|.>#!_oف>](]mE{`ڝzJ⮀u]lHڍ]©f_QI96S{scEM쌊mBQ'TGn5 O6cDZp5YAT:kKs6P(*\/ի;EQ_h`k{BÑ[֣x_# sلwK\+/dzSlh\ȭq۔}XYTs \^!ԯ7_0JZ1yH>rr4maS:uadf8rP@, ym=L Fݦ@^2H,r; gܐfhGDk=}{ADd4vK[UGFD7kv PKʦm}4 IjI3~z[2ɜYQB+EyÕL28*>#z #-,("94&_Ԑ8sγZdVwWAr8aӝ 3j7l",_4f)5:&5Prhvݾ˭㜦m [ӰKſXd$"X:!MaX 8w˰aIiP8ڢoeՙO |:7\ck_CX}2[ LLY`f<E˙ Wi1>90r7}0'B(U77KfQhdlLŭQgYʥ"Zٌ:*#&~ |)(ճ'? T Rulrd9ʧ)e1M08$d-~Dϱp~ FNax7;n:SCYȭ 9=h*I/r4omOZ#mmz؎afTsXfM 4Oh W M|CbPk."D,npC_?*8~Zk:7@gv͌Vxm Iˉx(4[|<"S=!5$p:=dPpd !PЈ8!'%]X ޖ_4Q}SkRW01؈WOAT[ѫ6=m:靕'UoxJB$kݠP+v .UtoRgo6u w \#PT/ fUV/.hve@ðpD8HRڷ7P?&SY$8RRߚ(Qn]}U4 R(cwP"EMe0nC Q*u3Q₭v ra17sȲzlmkSP.4wel5}MA򮂳Ev߉5uW=MSZo6j&&p@C῭RZCƗi˭O>j>cNv>]G֙{3 MS|#KѮ`c4}hZV78sS=૳w9ydM݇Cq&7:C `$VO /jչ=jLU=Hbs~n39R].P Ɗ Ac[0tlة% I"L. %c}U '7Qs]] -u(#.7Ojo@<.7E <\,WWY endstream endobj 2816 0 obj << /Length 3265 /Filter /FlateDecode >> stream xڥZYs~ׯ`1S˒cG*<`XX`C+ק{q.HNas1t/n/܋_|/b*]H.B)E7ۋtFSXXiN-=z泝^ ӣUIӎ)&MRy%}&۬Ls |G֞;7{X'8[|ˆZiRPcSUml˽몪n1-}&C{:wY  }<,Fs2[t쳊zŬ?]TAj^!#1߁t0۫C"eƷ]GKNcWc)FP:zB,OQX79ϨMl058|6)Ǵy(LQC;)[*KLmUx',: :GNj348D(HE)˘֙bcІֱm,v@MwW $Ϩ`2q$&ςA8LC=8Y͌ 'h&AjԅF | V|Qq[7v1BxYhGA]sP/)}xv&;vXMX VkWG1-d݉.=wQrD[Z1 l܍|gH~8eY%TPn [Xϱ7Y$ƣ~D3942`qC‘jzAbaSR3 C}cWKRÊ.᲏᪁y唉 @ 7b걛FJx`Ė1/!DjY@ Wϲ = u_}";}ڳ3ͿJNDґ(RdFن Ze^F{ 8mw_r)/$7l&T)u~R B½]4T0۬ :^Ƹ H-pi4u.yDcbVe\$ ńMIm2MRL0X@4vzbE5VH8 wB-K ,E4DDdo:zLJcMǰI7֮s@1@rESyH8(r<ՏW+J9ۤ-*zR l]aKK., "SLC7)[^3/LZk0WهA_8{8}݃c>`إTй4﬊7ǹN\inMg2R{0:g tRn!pڞgԗ}l=ptLB>{DA)]>X3c  ?Jbo@) tCo(1n[>kxv}Jyw++o-Ooxv6Tzq1ĽCrY\!DeuW-F^NoMs__^N'q댶3r*7sAqqV3|[>w|$R{'dSe{A?e?uA.B$e7P;;A0JPRqdEw4T~n7|skcOj!"0^/%miT4Dɽf_euY4 \K9_E`%qZI!xXrABQd u &Y7/5 endstream endobj 2830 0 obj << /Length 1501 /Filter /FlateDecode >> stream xڭXKs6WpC !f[Nii ؐJ Iӊ7m[{˶,~[]+&q]fQ&X!$pc.>.ϯop (ȴh}터C[WkǷW~Hd85HR-.i|jzDag|*ΡpN(=Gވ&WMy-N^#ATN8q\_e=yc3 Zmx}@zj2aEzZ&DJMլɅYc<*+sBu9(k5I웋Fs}#hoɶ曋..|rCW4%&wqh`C~kǶ)-%1 '(y֬bţ̍"3&p0yvi2͵Q"R^̅9 ~eoA'.F3G&&0!۝䍱 |RPгN(9F! y@U77WlSrE!y_oo7k9V L^uD=.>=;'I 0P!6}$ Njib~.< 4m ~](gnׂ*} "\|d[)lpƑE]6B kYT}zg$a@8C0qvXHJ*HS=-\$Qrljď&QAğ_ͶI;"u7Y[%J%Lrg[4zpfE fF.T7V1eZLөa+qEf{m6(>­|-|_{(SICZ\r \qmZo2=Ԡ@k![ipYLoU]$VvEtS 2 K܋'   <;leK &!G#~_o4ǠyM$6 ZS#CQ5y5Af݄njE4>_KUےoӤ>H X34qΐ O"ĬI&h /c6UUFucG#ȓ<{4 arq*1'ǂKw$!OA5zym" 0&%u{L2PPw"1zaF/RFgd09rf(O3p|LFii')p"spUnNE\dzԌp_O݅W184-ui[z._9^l}ɷޡIo[ja=e4x0pgKp/0k}^<͙n8XD0`Ŵ\qŘ> stream xY[۸~ϯ0I]bIӦ@`ۢDHW=ᐲdCx &ŷw/^MD$Æ!QI9g7wϳ?},gSP0)3N:8Erh'Ȯډ:%/_&,τ?ʜmڢDEYp=uJU"z@pRDƶ՟U Cm:w10tEhނU,®RF$X$͎'L$:S:j۝Y`-ǧUOa5TW }8pE}4N ( >vWeA[w1ͩ X.:wmZ|R\!_͠~ f=րnИd~Zi`Q ]8-x[Ĵ^PTM^e7kd2U9X,剟֌֒;Cf ܃Eq0c P3*"ЕҤNȪ iʎ˔Go޾)UoFka~J+^Q41;kl5YIhDcI>sT- TѫMƒ7neT <vw{u'aP#Y5W(=W,U9u.C=끱-_jZsޒ;cD(}C]QD]̥~0g9I.k>H5gQ2XФ(IRkЇԉ7IWG@H1ZCKVnGϟ .* =+M)Ehnk)(;Hͣ}Y.O4Rjm@Fb(>"HPH t ": ُ #!/ miaȹCːCz׍iA-R讚߭8yɄy.Mw6CVjKpR *ul$o1y&="or2mA]iDJ`qA0lhI`ͭAPtКb:|,@ھHX78P[z0?_ҳR {~6ثgi ~0EbH3pBYFOs= R- qx@Le9BrVG@" fV^"0ixC|wEyҭMe TLB.D;fl# ڱ[tRMw+)7=< 5^2v>Pu;xĢ<.GMjLTѽ)by4A@ƉS^4WN* څP+~KS\ kewc*Lyr^^ԩ}u>֫XFׅK ֗|$'+WN eE]*"\(d=F%UY0I_#飶ɫQXUEti\"HΫ"rhJlAb_(w;Eù}-9t _o*uXղҰWGb)oTE18W,"y%B8?®sQ~^xcnC}Q'Y_ ?9ѷ>,ws%ӿX2qѼ޺%6gaԇxmXGa_)=I:w?ׯvDbLw|]ܘ^u= endstream endobj 2853 0 obj << /Length 1542 /Filter /FlateDecode >> stream xWIs6WpF1!)Yffj;!"! >,Eć^zmx'm=:{y9[,&w c/ ̻, |)8jwarI%YvNeZh+A' ,Rt7[B#;^ũ5ojqI-> 7 >ixtKKd`ICBQ^h*i6mgkʶc2^(i\15)Sl}}N8)Ϣd$xKp Qu% D؅`قD( r@G*Q8{©(D'J(`I0H6a#3u&͙N&g_IRW+vժljM3lWFИd%EBl@р@"_Ƌ+f=R!n?eF&qc؁r}D"a͠Θ:솺J%ٖO0Mxvڍ= 7j}!4ݨ-ib@βǫ @"]0չ N/i&%4FӉ@?H>Q9:zQv0(:=s  d}#)b"q:onYN C)ý܇ֳgϖ~XSNvkmYJ;mwcF:.wжgg׬lQ/dJ/:]ښ3w<<9"g@fȋ28'aNE ty<ҚIAMzAnkOM+;U9s ޼ZG5T։p pJ %ܭu|s淕sSr (7PtFQt%^ {`Q@#Y{!<I.f*Ch aQ$MuL~gc*; Y*1ftpZ%<(ۦuE/ZѴa<+ :/)9ۮt?~xpN endstream endobj 2871 0 obj << /Length 2399 /Filter /FlateDecode >> stream xڥYYo~h_Z7 aR??}Ԟmws:Yiږ92a,zҒŭHqD $I; TRaH Oj޶s[#*/]]gPcK%tf/}?۪k\]+: U+,G!5 =GqayvĦ.< 7T{>VYˑVVuG+{ڔru?in6/MKM͇sl:C f\)N]cg*T،[򐵲BO#îGP`>Ѡ@za8C |Y#yIa,SX<%uI^8BrZ,3tBg;MCpbY8cY׫s袛 cD&NɔoMomwom@ؐB+SM@B^dDm}^mt*=#<^QsIz :#$9dHU f{&,g{ t3{Dm1 ~ Ll3pupp&o9nB溁)tì6d7㊓jrFNlÓ9 }hHգza N_T9)}[ڈAfT}#|gJŽ}Q!%P3(AS`n Pƴ.$i@HccY"@2e ~?xLg9RN"]PAF&0ЙBNwFi`FG)fjL Q.Tܝ(L n\:R"IN_ @+MXG a;~++о/T!D\G5[XyjNRȠW:28/\!V9g@Gexn,9؇t#}Xᄘ-я:K#Cюt P88WbVE{ٓIG"|0"K% R/!s;!LtVI "50 5%"?- c̲_ln-'P|{6q[LUB{3 ԥd+Z Tg4|FLYk}c/KĂ!`Lh |%MPC|w(ԁca; b])G;lLWTm?_ag o"Ǜ7B endstream endobj 2882 0 obj << /Length 2356 /Filter /FlateDecode >> stream xXm۸_al ĴH碛}i{]mť(2m++K(mp3R9|(P)r8+鎖#wo_Loo$~1@=}q?9ӥ|g(`GVZos; A{YѪp`^δ?P17 `yn6D,nYeUHJ>cB<9Evj{1nQ|u]pL8⫊e='o3 \u[4yVrH-v>2 Ӡ5] U{M"7a0|eNެTׯ粑Ys#' i}t =X}ܪ4dXLi(?.7 $H E<æ rfLl!vYe 3DE,Q#s?0tLp Fn_Hp޿οLB?ym(seD!oifi2յT1]si5$*_CDgOt$$n*H ` YH  $MP IAс6H˃1jg>j4z:nrvu؜v@=h8EaA.nNKY(pz]U@&%3oqE2;G.C<$vUO$`Au>$6IzEVkKKM](,w|0`O=rK<9ԷԏAL!cO#XNT"WdzGBEZ$$ SЎ~u&j>o f}6& Tr^UY+v4z{a  }lkT $iz93Lv>}m@lg7<)WGZe:тXAF^ͬ@eDmj1|r^~>xᐫ뗁g[Z8" !mW9S&<hI)z!]T׭~[>lGbj%R~:?2wfxbs/mS亠u WPIr5dnK21޾R]QB홫`U;z 4&f EKB2T5|r1:q&w\mgt<_Pjմsh Εg{me\5q=`.]o3R r!a9k^|͞(lʈ 56U[T&M4pV-CP:Z S@&K<_;InAr`e~ҩ5_a =4kj=tA jm99@ߓ% #zM=Y8=@Rwg*^L7ly*ʦ ݒ/Fͼ-#=y|xmA|s+Վ RՃm2|!|Ng b9TvPFjgpj9.pf<1]>.$~O{ "׿[A6]tA؎?Lh}tjf46<φ^C6'fvQԇ~E>R&Y$fMwZTݙ@tRv-A+\Pkz+`Y_1 endstream endobj 2782 0 obj << /Type /ObjStm /N 100 /First 980 /Length 2103 /Filter /FlateDecode >> stream xZmo7_r2F4{pFwYjeɐKʫHTu.HYgyY8sBQ#)MU1u4Agb$u$:>&H4ʆK1D!+%^vtr.&KX(Z +_)tifk5نky֙!y&aH ؊.@CA'j 7lڜ5I}s\.kϟ?ۛv9r;Ml~vݖJO^ס/Sq ˄=)<P{>Pm=;q ˳S<">QGtuC&WJAj^!wyzz1d@s'(4;9,I@P4Y䮝^ oِBv0qUd˟4FÊLjd @On+:ǭu UCIvW+j732a/V[VO.s57D+X#=XCWJG2FQ#yG)lFP,m|ŚyKgTE3F %҆1R?R!UDj~j |6{C.] kL~!cԓG佥&FyIц_>K=M?/>^!IIAfe)dD'9W J)1h'79p܉$%}Cp#՘=vNn0m?TLq`^_PxU8Ж#o ޶ɼ?;{r{Vs =IM1ӀߺF}l*q5kPsUn$GmYwH.3ۑ,ZC;r<_#_yHWlGvtM9ԎoKu-x~ )M$KkL?_b=WOg}H=%oh9 endstream endobj 2894 0 obj << /Length 1484 /Filter /FlateDecode >> stream xXK6W+>DI&$(qOIDjpǿ IɒW6A|9;s.~\/oDD$L:C=p!R"yS<~}*xCZ)+?{T(R1 -< N+΄^0Ș1ٳޭC#|8W%O3N STL*ng}b$^ў IQ2~$tӋ=M++.|9yWۓ2q%H1ΎsZߚxf#qNA:>-TR#kn0&4<(b4Gi蒡y1u0..;mͫ?c>8a@ q P4HyEL'xa ^ÿϕ6$͚6!826"nT7 _0O0 @h̪֕^W@I\Lҳ5dK+͕fi%EiM[7ʈ΀7v Z1BLdZBNFF0Oe4$==weE$|R+$); W(|CxCԬs{>! :PKL :1/\+w-(c¾!ưOA( endstream endobj 2909 0 obj << /Length 2590 /Filter /FlateDecode >> stream xڽَܸ}_Z` ^`-v73:f%'_*)QjnUźhsDt]"6EX<6,B0| t)Y5{v ύ?řwUi6x$Mdq;uǷ&􄂻!yIXt< %nmn/qۀRV<\nK?Vx,mXԪdǒY z Ipq xrUqmb棘VUiIG;"Oleho)ZE5W C`iK!4> Dmsg8KMEEYz;%X5lD>t+Utl/,nQ}*Y%S`.A4$hI./"̀@ȲE4̖ۮnKw򕭖D؇*i\fm1aQ) ߨzT?8^D }\+4GŁ㏗+LWs=֜Pg㙇Ud)̗{3f1!蔊{Fݪa³y%N 4z4rf'lԂO%zV .(q4YZ8xDiOY.7`35~trS3a?RLg e)xԋҸcLmrK`o:M2O:YB Dɗ> stream xڥXmo6_臜diQkp8ǎ`-}Js}g8V圛;|>3̐jV߿xs&W)K#?ZoWU9D/V9&"CX RGGY5k^xv/(^~ BA⦾UG9:ZbkA݄]'wo^emKC!𝪷Uw4|eP_ڧR%5UKߌ>jLժﮔҮ:/eݵgNT_@$  vX8bigX:{DD,ē1<""V3?'Օ4.vzζ"k ,HVǬj_Ѭ5nE8kͻ;w-3DΕ֟↰ (nF}hZ5Y4 Jf(*`>vtxG#0K:GO W`X$!9_1a (vjucSCDcc%zP R!NRu4vSR9!vLPFI]}MBY>$8G3  rPj:v&!W zx|r _Ӗ#-*hHn4zP%S^ƕ |M'Ʈy|D^$!zCEef *""t62i?SHtn1DQUE`!Q k/l# 9IB*j?Hd A4D0Wf0[l{s42i6g~j5iAT+3XQiz@3v&CdU_vB Ct.`CM6z?S:=nnTea!2/pWRrAYH.A8h;lS7}Jκsb( ֺ `l RZmdwqGAԣ@@G*dFqN+p[ TgTѹ4ǟTXU[el^' f&X0y/i 3:zIk`n$qyoՆwTFB]\Qe3@$s0 S9`}2T+\N,kg[2{r\Iӷ_2|jY,H* @gAWW۝d/`%dG8BʋBo/ h3NPXG&QVeӉ+kS|9C0*N_-y J߂˒7C{ɬ`ޞ=鋦CH&hpzoxήqE W]GeXa |Pr'ѹ4aA`$+nFD,_0hgG^bƞs7E xg$F+cT/g0{ `{]\aqk& %~vfo_?bG7gb?Y) 곪h DKDGC/!J]#I- ]&b?F.w}5u\Y!@|1_O*gNE`@X?Rp7Jĉu=W`cN妈0, 캥ݸd~,$'(=KC=΂3ُaؘk2nbM` CoSf}AWi#4"Zb;\=WZ-(I$O`/rG9t}Rk1$aFB~oL/ |*e2rԗEИRyWP0^i ,YyV?IR!`mjW?٘[+(6S?z$\ .>5qXj/V/g=_+Wִݱ svS{w8>.n8+ endstream endobj 2933 0 obj << /Length 1214 /Filter /FlateDecode >> stream xWKo6WIhH$C-ݸ%aWTQ=w(8h{1gpGs֎|7y\\IPY> I,24g%}ZpqM1IB% jͰ(guسv#$zՎmʧƋkB 6v\?Bal|)mռ\kX#JeVdfYY\Ve¬*N{``E\CI`PXqȥ gr[ɇ'fS1oRm ZFK<=|@;|]~2]kF}CV„;|icޔϵE4&,ml3>.s* =66=qYE @"Uoi/LV?t?mEC #^'-7Uls-l]ECU hQ4U^6Px>2VӷmY~֭zV|v4E4@ZdAm?QsF!~00@%;mZ`onr>Q{Q R'L>~ >GDҚn ,QrN~~Z%B;RxGxA4XQwt3#R-rqMZ\f8PutOޒu݀;gA3UK~t02w-RM9*cZ|3Kp sm#IU! 셈Oɓ#7!("k4AZ!A-l_=N[:wBb @ %mrK 4٢Ԓ ~ RDG`B5 6> 0Nfz.#Y%*p8cY6}ƺ)AmJլ+z-;Z99Y%Ѽ}x(8'(P'rEZ%`/u-학țWt"g>nxvДXQaUUi##<9 endstream endobj 2951 0 obj << /Length 2661 /Filter /FlateDecode >> stream x]۸=b/,}Ї)R-]m %WpHwbL|nw7>D]D&w;9SQr =w$O| 3DnfcpwT݃JAT.g| 3i}6f c{3@Sݔ4nCi=TmCv;!Dz xe0}J28ض ms3sL2A$, _j/pbtoo]{D>tV[l"BB5ؕhjD@M8b׃۾oOuIcpթ7BHcLIYB{`zLWcoqgI˄?!~3!D9T~O:ޚu^_Q.O0<_}ScҔov E%RͥPd(f<7 Xu*1qεy:sugw'hJ-hOS؝{ƪev]*,R,80TXOUtش5шeB@n+{G_\ZXO֐dPRk+'UW>-?f[^:4|]xee _M P*U:Fn#7 4?؇xlnǾݼ*h2V*H) ݽnp7tp_h,><,s Q>y'0O 4af=C[[{#dau_`o71vN Ĝi6nLҏ5bN7͍GًI¦j@/ mo `c-<ƥL4W KI>qZ| r<^,ޫ `{b@ǶEHKTa]A6`zBvۺ""LҭbŔcŗ%YjGec}#̈zT`G[6).ކmq3Fp@\O_^@lv2D͌$LV{0pF%trـZv]{?FK[?hf1=b>j/w3,2~E{,MҙN ~gjKԅ1D2u]rJ桃2_'-[CWƵ |r GyA x*\ r_B *GMĮ >B*kV*0鮫^m*lW +L`EҠAUn*01fذ@ho*~BZ tre~d/ (IC?ƒuPaP|?9@qhhq;~wy٪3[fP(C J؎J)-[`X`1C vUM 0`:-n~7}FSЇOэ2deC[',ݗŐJŨIZ 3?9SIYN49'ek:珽A(c/C5\Of@,"{&qL!җ֤ x=kKkd<Agcl?mMMӑ4o0Ӣ-a` \T$2Fl6d=BW ԖέkrCcrr=s{^vߧ وjfF,K>"QHpb~9w ,=H2yHqWsiof4[-n [20&&|n4e223wZ646­9-xZ1zG2?uf{b S}%3[1INɧKAS*8h TҮrcknt)Pt;ARB)qsrP!s>?# qyZޖe TOϟ_HGwEOLzPa]k<K n>7({3*IY/U5EU*@䊜i^- le03}ʖ񪲬d`Y ־)XM#]%\ҩ^:m;<QSx D1g7,Z`|il]}E=X*fZX3]5"?HJW FX bO+TJ^-dž rh`5LHN| ]{l;0I@uZ r$# rv\83+ī+ft5O+9jr$쭬۷(̱8r" d3Rg|8HiřN>MP󈣀G;3ϧ=I̓3.QpѥSN@k pL\%;l$B;Vfcɮ<\t$q$صIhR#JھVa_1/=7/)g" ]^#FB/i _-!|,Hn`q> stream xYݓ۶_p LK[ItƾI<@$tb AYVb93= o W|^ܼ*gy"nrd0dWwmPVUNe4a+K&xD/}if 6"Enz#<()jK&<8ifkGF]=7l#SjԒ3r$gU3aU,JRO;Xgi;u)ʩ48Cmkd16^my^rJvA¡Pt6 I۲IQF5]k4/Nv 9޼ʦKd4Qф$e2.!ga|)KdP`4'\eք2,LFi%"OG y }`yLEyݙFK[GmX.*]5t)q7-D&~ǩCWo=48oq9\}15Ak>{c R\xMWṆӈ8ۓyA-HT6CըAu}bLTvMj2P#N>8%\0`Yzo! ) *L ӵ-\, $֒/4Z*{|h(08t=j&Zf!5z7T[lOt6- m9屠IW{J"V|d}EwKtI<lr6C飝.7@Ӆ%M$e'O_`8Gc9x$ h:f&.ӮN&pFE,$n|%mRIGxh?a;%'DD"D,KƢZ=En Хq<'!KY㝒Fӄ%P6֞2^4fQf`ȋCD$ #(ÈaGSSc]"GwAJh@IYDC$IеnؖfvMHf'zU"*9HF'&CyI~1pZf/!}RƑ)lJ5+4::AQa,@8,k{"sKm,f@LB!:SckS>WKQ8JoA ĵ?'< O(FOk(?O.>O9ŧVh㲓1 k/ ɃQ@ гn{BW ؘ{RW' *5RfsF3S ل  1?GbmiNwd/3 qW T%zړz֑**5swǃ!2]FC6ѬnY£$ Y}זr:$_G0~Je~ЪlS_ [5^ky>/(I2& $ *5cQ^>€CUW㠨:7jػa׀۫׋9Yc/i16Q+Z^l>vP9te q` pg3$?%wWB?>T/3آ97ۮ6йNgؠ\3nUM[~4`ncyv oVϰD $ :FK^^Y>{=RwˮL|˫k|3_):tG[`<K?>lA~s@a]ߢ]/Vo-0̷77Ӊek7͂Bq \ onw> b[(|,~eO7ct=Pڪ"̳<~SlT곱:NyWH{Ѫ/2^c磗n[m߽ɬ5zM`IC)tȒ$QAԌ!rmyy̅{DBDH@fWx]ذE @ "MXm,Βk5'98٥C|ߠ<t0Ծk{_٧=&kPѿ697pʿx$kl8]NIt3/opsѭp*]Јr]e{4f e*.">.ro˷5D焾QVA,ifD>ͣ|zU8nO{?Đ9 endstream endobj 2970 0 obj << /Length 1562 /Filter /FlateDecode >> stream xXKo8W1(]hR@{j{%V(%ο%UqzX)|3Z;Z׷^`E$]Zo-R< ,։y[,`B.]_:|)qɂ`nL9I5ZT{}˘bk - eիKSe-lQQJO[{;Yӝ*ULOiiF@t_mʸ6Ry Ml*i$~\|R+>]ՇRFܙQӷFo #x !LIV4Yf#J;D) 8Ia{H{5e1qΣ-IήJL70]vtӝmq^p@> 8$_G؎(Z RA?>.~ma9`9g!i؇\Q:.B P]NoSq1Ri䶬tˑ柖O1tjgRq&JWbiz( 52=YSĨ)jdzRSwKA)~hJd=Nea֗%{3Eq&1 #+̖i.N$MN<=JO!!> stream xX[6~_P<&QUe<3$ &`v=ƆNw6yc\sί?__^IP)w7;!Ĺ^;BGBW*׫A4Z$m%'Zۓ@^ܧL2轗P7-Z1Y:b| a1A|bW|̊illm ^'E(T'  IЋ"g;qм1ClmZ|J7wEZTNOrTJ U۪&jpӗzQ`'@M՝C|UoxȽ/6Z k\lk^ai]!dCca䕑(ba[RB0GB?:t$X}XOZcm4v?wBS|.AN3е@O-0(xk6(aӭ>ETkʅmi`{lCJQ^(>Ңߓ>z]ubA(`LB,Kg>o朩-3 O{AEr*}j젇P:ս"dt;L$%GRi:!ʠ >c(L|+գ8M$tT~9GP,~b:YV:$Q >[ܹh:L*yԨ,qH&M}~c' "Q2E٬1L'} ?@I٧iڊt9ir_~[0n!_cx{b5tNY641D0X@;0U#Vdl7D/c!MES7i+fBO{R_R:Gՙk_!s7(U5V<˰+U1α+IjZv~h˭I#fxC]vo|7^r}a@Z鶔$ @f)x+L'+=M[ f*FHH88j6{:8VfY,xxV [6/F.x,ۡ#O18xaGM|N9>&לS o2_/ endstream endobj 2890 0 obj << /Type /ObjStm /N 100 /First 973 /Length 1930 /Filter /FlateDecode >> stream xYKoWq~]8Y $|PdV"IŻ>_59)֐Ě꯻=䂋d'~]6Pɱ+ruXBơ.lDr)DvFZ5W$AdBvAېm)O0VE.69b5a`6bcQ`;2"o@#/k"XRq,10gRr1IiAert`LXq &39vmAYMr2 QX܈@bK`&BEa' % EKj<mIh4$씂mC` /4J[8`RTjSukvNCKc;Ġ`$.qÜ`jP%ؚ̀}%@1P_%F9L-+JX\!`̠kM@taر ذCՙcÎ1n Y.+96\ b%R+^ H3j^e4|WჯdN~庿Y{*f7o'}7%or>[׽νM{ I\ pF3 4_?@PX3',ܩ~LY[o~هD[tҽ.7r4_.ξNǷXl`Lf|1!t Ͻeä{}U{?tфݟN=sϥ&P1ǜkifo^˛|Wgl Tx{+:Q_7N1gR߉;`w>_C}y+(Fy .)96$_uru.;eN|U}b:DT'c="r/O!4qzz?zr+BPٜ`Dr!I`e+!5C#ӰXeWτf~ZqKB7prJt9OQrGe|8b.1$$0Cαn) bjbCyX<,}q0jȥ"\I잀, xa*\Gd IB_gqXO](K!EϔG Toi b_`ZڟOV-ųzZG;8H JCovWW!BJ.-vU _yz3-_h8 n37oe3Hz$32M >;h{&B3s둸MZ MZ&T|؜!o0IP(4p/FoߣٷoQO7ZF}pbwC-CTz$Hq wcC#|@nYH/{o}8:GR9>QiAپ!go}'-+s !KǸU۵FX,| u v!T endstream endobj 2997 0 obj << /Length 1234 /Filter /FlateDecode >> stream xWo6_!4%`~dCaY>FKM=Qv;~ɒa}Hx<{k{L~ZL.+PXy(S/ Fw07I4 2!-NM5Ӂ40fdHT4^^Grkos2O0;N:j\ 2RcB4UmKV4kT=iiJ-x SE:#oV*qkJ䮥/^Cټ iwK5tmw%o2hYt< T@Ƶ?v])j hAqS;IQωy3"if;k~-B5Nw9Lr}kJGadք9 M+?ExXYJvYS[ y<DW0q8^B9F~M0 (k,U>ưU!]u0ddz(-o%YӧoSyuTp,GR"tq*ijBSsvKI;o+(Fy6=r mcyଶH>p=^ct`( \y,,+xҹs!)lIo2DVzWӦ;=KwWNcنUkU[ZlV oX2*Fti( )xP k2"FUTLFxe%5k֯*dzG헿^[ql@lɭM (b#hs1~邶tP8joeuLӱ5E(mNkwab Vڱgxo'Z@S`CwIE5;H7Ҟ$[a`7w endstream endobj 3005 0 obj << /Length 1980 /Filter /FlateDecode >> stream xYϓ_BjO>pU07UJ±-LVl+Y&=%?}n)jW_ͻIʽ\bu[EXA(_ݖu}[}o߽(^ˌ Y}>Lۄ)#,+gJ, Zn6auKIƒ",b_Ӏ yb|Xpt4VmȤ]~TST'?Iij3.@q qˬuW=?ɺ ҲWf/4}'Pf=s;*ҲpvyM62+z٫mWsZ>5-o2]봬_|VʮB²? e]6NMQ5mSh.D?l{_tW xנm C 6M6@q;0nw,CI:CP$wvK@' '`ƭjz(Ui.`Ślړ? \y:;qP`SG#&g7<=u WM}&io[Ҕҁ`c@ 6BJfG⟿Tz,? I'By0xD@7@z N1ek`/:@hZy xж%Gp`~]RYTjj|6q20Xv+ّph VTd ws7$pKw{o6i,xiIٺo=UWczWatct~FkcP1ҕE0 'xPh839hAfK_ABR@1-1BUU\;@$qgͣGuGYH^[0. 3/ck/ WK`aA߰S5M^'ïsS`p,+M nzZQ״d`Yn bTߗr$I,Gu_`-\@$I>&HZIp'I^]ˑ#)~đD9xyđ0M#"pt3j1^O=^ښBs g |>o؇ VqTkz.*ԆvE[_4Yۮ4xdp|EfG:L)H8ʽ4ptI4@#K?dRto#,m*+Z$|Χ͚sLA1%}Y8;rrca@=T M b7H|<{PѾ{.'AV ΂TJC;Pk}S;c;&Vs`hkw[HEpS7LS H~T|`Kರ?%F{k10P$~<<[~߄ԣgU o,!Mrv!H#}ќiZa<?[8\Bzl/DPxarߒ=DF3?Go/̏aI " 䣒Щ>>YFq6-i4)%} T8˜Mh Xŝĸ0^n{ŷ0àS50yv;sC81ų[S.Yyx`] er66uNgj*J4s"#։榜befe&OWo~K X\ endstream endobj 3015 0 obj << /Length 2386 /Filter /FlateDecode >> stream xڥYm6HB\z?ԇ˥ ڦKAh,l7á$˫M+X߆g^)ĝ|ǫ;?$, E8y\N2',c>y.?psx'KgHmҦS8+\{#ϼ04f"AMZiOg"*]DUJD$w*CYŭTfxAЮI)n#p6}G`oYʬQNԶM݋SS:Pks: r*g)l5q7am' VW(]鍤"e$TPJMMZTn*W4b*\gGуjE-=jc}zdZSR(><xa8yqO0xDCxjZ5a{& { XK=i&nvq#Q#AGV;v>ֻJ_rbD O@ QVfjyΩacG| ĸdwS&s;!53VB0wΟ?L1;4rrYJ D&YV,goFLC M1U|"2m)OVDo߻xʂ?a1l"?i4颐5Fs^ʴ9˜2Bֶ@,E/z#Qyf .fQQYꇻ>kK~gJ:7샔6'f="1 X_JYZd_J`aD2tY@idXg+ ݠ\6*BmpAGi)pgӮ-rQ3t4Ti"N2.g S}g= VR eԧ 7PiğzTN{3^/x/1~ 9brPyRtI(Z^,68'""hva8Jjڤ}Vmk)GFRj^\ DTv#%$q.]@}A$Y\ܘ}S+ݢY n*Plt p#rI6ME>Ŀ7 389,i 1RU)PȴM>0kE{E7Ւ9E5 C$I*K*t im$ p%]d&{FᡡHjCZءϲDezW6}n5o%P`ԜX kL8ChlVT%:v4z!~v#ae8XQj<Yͅ"``"#֫iAl+ rL@6mw1,2pD*Gܵ a.F 0pI0uO}FDti1]Q~h zzA[jnN@,9;Ћ1eFP͸K蒣"+jC/Plz!S7acmzUz/*bx7ەꋅ80 wQ@U|1P-EO-x41 bx|ðcW]S#Q0qDh骒|8_i.V`Cb5vF-ЎO"/jlum{ re{g\~NI-U>Qã3;X.^6ϭG5?*K|dϹ+Ī`+;Pc] +٩# Yܩծ3 ["K  |CMްצ aT )kWzlI9,H\$R3/gR\"ǫ?q}phlsww$ $ćP+տuX wY OWLT¹8 6wS?pPlK+ uMdsYDۇNg7[Pylf$eaorƑs$?HHqf Tj~!]+M-> "oUmZ> i+;N_FfO?ko,|=~<'<{u2-t#=d{ۖͶޓ>xzY[sd)_ < l7W+<3 endstream endobj 3027 0 obj << /Length 2888 /Filter /FlateDecode >> stream xڭZoܸ_o]Y@WEMrTޕmdi+i8}g8Eiuz0`Qg~嫻___`*gy"JpΔNV,Qz~(f}w|i9giPH%: 7#FЩwݕC 5c7[mDDDO˵CNӄ׫4|s/]aض˕4뢫d5f m"0&qUm] Cs+7Zmۮ+bXHڲp@YzmI:Wj##}OKů4wpjaW5w`qJ]UܵMQLe4_7l|}S:QV|ԴF.Imw,yC8Y:MrrA|\!k3ܒ`n (=W P9# A+3y_gL&cZ;dƦ}{wtN7}~\Ӵ#i{_JGʡOW$ mm\~u{WmR0{D/3ܰ$S}<'ʡ`1pa̤PF`+oѫIc\i`~x/#IrcJ$ Sz(\5-rz}I؏(||zN;6h)D3^Xdo`D%a3P/l4K?|y{q\0/$m {*Yy)NSʚArRY3&u*+XJ B<+כ|sLupLm`]tE`P29Wp8*CTJ}7|G:˦b$YF-I&YR2W;)ݩ%R3G !.ˢrfR39U^DzUPq*93,wJ2-=ٺ ظڇ,# <>[}#«]vJDzg<6qLѪ=v!B = ɧMa0{?(LbЅ.`[4t\BXGlpoL!1w嗲\Hpj1hݔc"1N};YŌ w63Jjs+(F[jzWUC|hƄ$ svZ @$|3RZЅe_6 p}++{@P,x䖣蒸CsԝUS3g$:$Y̡s:|fK Zd?CbSLynȓS~+.cq'}2O*rP}`~( Zy0E=ɽkw@NWFna$+U”Ir/ÅQ/ݯQMχ2lڂ׿-Sb,,SkLC&U<83g SͤR/BOD^2K_^L96SUN:2m\ӬÍNXY,+`J]~6FacBu7DfS6nF$K )̈́P8ѥY¤IlL‡,`[c :V)Ss,FVejF5ݮf#'"3QSD.˱^tX-x 0%DmEGڕr}Y3۾snL:KaʂVWiJ*@u:ywx0"Cp6놛ﭛZxXVZѥ2)|Ka%}ipeCa@1_rϠ=RAOeۡGnielmAE%HcG>aҩ; h۶BRUtW"swz 0ũ0d"%m܂g͘3K!@3dsVDh}|DOWB x&^=#6og#/"IXc?b"$+a9iv26X8T%N7HvߒeD\6r*L;:ZL6x\I(?sb9g./ 'Ie:Y[ʙXEP ̡?c̘Iᘓ 2,KŊ!'F΍$BK ֠v4|+ 8|Uy1@!Hˮk;8_j!~mF!0yqr6pbd3c?6mL}i5udppM$5>vZ?/#Up\_|]nSp&CP5Ш rs/!c6<V!PQ!IĒe~c/Ihg!٨z¨勺rT:qxпPH=S|+XW<-I 7lVuF!Pa:hDbfW ̳%+;ء+l g*~{W1d:b:k e`s1XnX6eZkLWfk7Ca `hJ@_IfFRs[Nڟ\$)E|#\jXC OW!9uJ.EH%NImR okPlFHu0۱+*qYt-DI2& s\-ALLrrUq}6拝PG$%𐀢v~Iσ| +0 YHhR0{vJ38;xubHO.B4H۟ H3!h_MD6ѩa#jq "tp "2Cfiq1=B7U_|uяl~UtU/^r/bySSRGd^N" J:zbJGtx2W endstream endobj 3039 0 obj << /Length 2401 /Filter /FlateDecode >> stream xYKܶWLĩ`NR)eU%Nk rfs ~r_X@u7+ۋ/ކ*cY,~%8g*W,VX} ׿\zTLBӤS޷ZDvcj#T?zx-jUy*_qh#aix:Zb%* ! h Q>%J v'ml5e#ä~']DUʯk;:F,bAgjAEhT0pβSX$~X2.Jݱ*+pk)(EXfƢˢ4s9wd&f^Z* ).XE4o g7R&'j7{vv,3h'>.;˦F׃_ʂzl|hˮMm( ^NK;⚕ *s !@w}\4KM.LbTVUe5xNPz ..JYt^!rEZukrZl/w*L^0t>S֤DtjP!ѣ?6* z*WٶMQ4f2}~LE @X%g 9QMSt<~]R1 &DzE_WapN ]Oǃ".u;]~ˆV# `4ZF]õ`sS::oPPvvR^e{/]@@Su "8:IpW"M.==nc_ WGMn:UkɃMlXuoړs sU?ʘ8& h*cz$b| n3{jaCc -&pA)D2D Fav]gip;f"ɗix7K)YROCe9[ŎmumXp#Rβ4VR,SYc$WR98YBO",un(rqA,Uˤ!IzO* PWvfrQwq@)/N2^&"GCvo-9t;J\מO/TUpǐ>ހogD}@Ik֙>>T'؀911P< m~*MJ-K& i$0@JA#w9 EF096 )Rm&5+/hf.y(]Es>mv?gl e{5ϥI2m8l؃6Dg[[K% }ơCcꚓU*/ $epWQ79wQ7[z޵%?pJRgxf-#*Y`X2yGdHG)GG 7G*y{ѥ A_%Li [=c\4,.q ݽ(&,;$8a"9LWH>-!K*jSdFۚL_q-M0J[]U294Ugw3yi&#I!ž˼_K"9[;.,dK25{DUڨ,B.uÏ{^ \{qȧHc*}FbM&Z4p2k(u;|(gɤ1noƊBL(Y`cߟonooޘ(A> .Ɨܗ13z4I~WM+r^p#Q7D;J| ]v4^.JЈk: h>QzCj!>Ҽ .g*?"L*'7 VZɂn_C@g)c %'Eȯ[p $Π"KcH&<]M*֮/`9ϒ4xi-}bM[דddq,mf</p0Z(qaHl.[ba6[NItV#x ?ac =A؏`Ҷ콵(eI]!0mvapIa LR-M[Ŕ gL8h`ͺmKyZҤHp|j~I㹢|V-@ 7]w7CrϜ*$u'߰t0ZF$%mrFkni]oqvG\ĐSAJ-23Nz֔ecp4 8i;єdT3]9#lnH::9<k1׺iє7Z^bI M#+TnFE =ZNZK /j0h뱞y)mN%||l"I5á=xR0`ϼxJPxG<äu1mn~[;`.4U6G]T瘝D8Y m1#>TK)X*͌c>gS'LujՆܔmgεYR9I{]xvEIsE0M*Y%) ~k/Yq\C06r %|OeSi]Rň9p@KW'oA9hoa Ǹry{-ȟ7;5\Tr*Ǩ2(Dwߛj5ԽX P8׵^mCB&Bjӆ{ f~*p-5Б԰ii[oۣۑO?Abv ҿT=F bHv,o>F9>8"PwRjΣx?ay@5w ?((>< i5)/!z$/;1._d~FPөL>'SLV⇞6n^ endstream endobj 3056 0 obj << /Length 1136 /Filter /FlateDecode >> stream xڭVmo6_!"mX& #Ӷ2[$JH%{}aoaO*DS{ęfs֧gW1뉲Dc Ц5ڨQQFc[GTN7^2g᭮2Y{F&^H$Ӿj4~M~aLڝ{+z?xq6*ߧk02`*S-a@AqB!DĈAiաcccȐ^ErTw8OF8-e1C"-(JrovUN=RmtRn YY/A8U*+]9*ڮ6eES,t3tH@H"xBIvfS7(UQVS@2 ctU߫mmY7b}־MAd兤 ^~>qd5VtEPv䷣9}a8 D13gp.>)UWVߝ-0LޛPH+|In)#KM߶һ!mjǺT&> stream xڝWKs6Wp S7q2t$3HPbB*Z򥿽ER'b~Y.ԊQ-1^`2>;Ʒ`Z|ㅃ4PǠN$B3lv` t Ջ>[G>ddl5,D(2_};sl,݈U2@']n$g TO  8c7ogF]~G5ObCV8nHjnXffUʚs16;4l P3*kSQH(iLNR%}UEno_/ .I$k`[/:7]vz\.9 žoE"FsO6l"8ׅCqD:L}DJ.^$%=1H_/$ hxi*00DBjReLotU6$'$51SMF2eD,C|@vTxL5lr6 ݒH7p*>b:PW㳧41T7=pvliBm endstream endobj 3084 0 obj << /Length 3151 /Filter /FlateDecode >> stream xڥn>_"cIxNA dDʪ%q>UDj7{Y|]EɫӕûݾWNLgWWJJ*WJd=^3Ѫ?|i45͵0VF~c՟FN cIot]ٯ6g:tP67d;Rt(*CRWxXMnM鞠SQAJ]S+֌B@=Зpo#NQ f}ߝ;vw|cvڱ_Aoa ߙV=-:?}=xA1Ppⲑ`Qd.bڷl &/7RECnFJNHע0yfF|iULaąv6K=Vre62ISס0sI6髦Bۑ`= 8=悩Ǘ6\h F}ErqB]OתHN 8C 39>Xpo8 "Oshnhc ~G%A9;T[FD^<=-a ࡐɬn:-uZLVSCPTen:)xfcGs 4v녔mvرNn݆݆0KFJPԔ #[qӡXvL6h+H[ ̓<2)[JegYeI*[5"^avY,tI2;DpЩx;fM %׊+ v`x9ݖnˠ۰AnYq's ̅T3 as˩nĵ&9,mXٰi g>lٗ9gu|@5mkYR2Rk~Wi#ٖ#̑,El^n2>j~}o j<22ZJCZ'~A&YƻrvPqY6x :/!Q/Kj=G ť8Hq1¼ ,=@Ou}ю?,/<0x09 ooFd8#}u9ބC2EaMJL| C}5ڃ"cq_Cȉ$͂ʖLܯ#1ёh%HX{rr|#L؄˝J<7 sJ:HҾrmL|蜾0#K \ꊻyqH9}}!%:"Y7+<0Fd6]+L JA_1seO `dEWuJ) gKN2ܫDŽ TKD o `I!P ~auZwiWu|L4&CLb?©qS)9:f50D>zP#g`B[%\6*(&y캠{D84b+j8j3 ú]5 3OS'29y(K/aMkLs 0>6G>sx 2$!jmr{C.ki:0-sQaEkr@=3m1nxy@YN(QrtdˊeѡKՑ8S󄂌L&氚q0}OSَ52{I Qwv7q endstream endobj 3091 0 obj << /Length 2822 /Filter /FlateDecode >> stream xَ]_ifVqc4'i 3/Q4WQwC9">~_m_BeUH基5=7 \0Dﱬy)*hal̲C%"xK(2O}yK)m fΕR;VJ(de=nC]{{3:owyWy8 F}Sȸ-IT\0,ie)`ᐈFxCS#v=ǞuEKs)ȱ* =  9xy`.fh5~jPlqD޷ʼnGz/i8 /&+/^'wC< 7k SÎmY*hMǚә+o~a^|U/'EDEmr<3}陁tdU-îGp,h+ii"K)9AUAPW+*eK]#g>t=eok_*;ȆVgןRHo-H T'  #Q0vtW e1ZCX`e{*zP0 _w({2%P 0ڱq e>ͻ5 ԟ[ah /}Ѓ6^nH:ؓ/ ZE8^.Ht!#y2 3nN Ƙ YysdGubq}:ֲFM 7~;MUs9pPʺ]1[$"(љU((YG՝-$:K>/#ijwc1&WݔEow'CwUs2~+]w` 4E.`/؁Pu@]' BqܶlFZ +e&|A ;meqHQ]Xh/ɾ{!GhkC8̷2Q nPKT$R^!Ȧ Z꧟)y { JVJi2Q)$%qHI zMu)7-! ].yD@^GIՁuL>OYe-D.ePv8DUI]HapIUsMN GTe*rr"PR5 AA 䙹83Y#1h 'lx/Ny"QBDz3iTj"f)RN(BSFI܏DSbF%FERJ巇'.һk!]dw,8B>cq g_QzwSsEi8- PѶ.)v.f1( tkMt/zҙ$!$$2=i)ԧKW03wpl}* X*Chܳw_ +V[5|@ȼz Tdmz66tn`5L$Lr5]fNUKgO ( 7@`)jH;zJUn^Z CNWC$7aX[0t*:C57ě nG\D 'U[ ?Ov%rXN.kBTBBO~IxN$SrJ ۋ}޸F~GZ&ZJΘt5%뱬0r;D/K t#s#n}Yc3c"g0`LsNOւƓd-pZ=֬1DykM5Rw~B3+\B%ua&{)ڈZ6 gpyF`oA |%^oˆZv׭II9Y °4rMmtz$f%} 36U0+CRoS3 F/~O`D ȓW霧{1"_n W#ǡW 0]ZL $x 2!QΎ9FKYLK2OY`zQǞaSa"$fy=K6Fb5S py endstream endobj 3102 0 obj << /Length 2434 /Filter /FlateDecode >> stream xڕXmo6_a4B xiJD)K$נ.\U7áZX5>3̐dsO޿ٻP,z}sDHgHO? ^8{T!F>u2-N} G" G@ OV-|\eY,@x[nUh̊ҴeN/G4)hZ'4NZw6jnN7nh (jzK,lt pQD!ut@#.{Q W5l.X{?`s35eDS`c3b8i)46l< 4m#[ܻ]}WڹAa2A] W J)uM۲ݺֶ]7Un@QuKݺۭ`)|;8r²+ՔyV͙,XM? DXDI?$DІF`tp4*aremZN0MqҫhnP'.P0R>Vd?FVSC-7^^c޽AR'Lq}ﷳ47l2[ltrrn!.dUf(Ot=Ё6ތ`hvYr`%&>8d<F믦)-0"iQP^(IpT& PN4rLZHal)b(󮞲(_`֏ 7鰭.*]:mR~6EОl 9 p4][8pvّڽj `qoX9 Y3m&~[`8P Dq$0E(b3kŜZpxu.gcؓ >jO,LB ~@:lMKĔE a jYUY/b*߶SsV =;fk,FfiĤdv8.m۽yuvv{{z e #&P*O ]fUPoE܅j5K@Iav ~}lɡv4zy wdޥ} XV┲uKO( 6|D XˬΪ{S6xGG܏O33C$Cx-'/SuiRSRHD?ؽ%FXp>3y 1D=#Brk]y9̗/[uQMׁ?bL2G !ݠ#? fiQ<zK{3j MA9 ͖$?a+W0Zj@1EJ*7>q[i}xfg#zލm5^:C0ODXLRYԇ (P"ORwGsu-3#ƞx7pR֜zr(G!-]y2 ?c$b2ώ@<'z|>~ Lcr?0,>cE2BbK&~>w د4D )뮩me&?1ӼGE@J[b8#QgQJ>0L0` yvEJ.hq>a ]O"{ep%4[[zl6w5DKh5H|:߉Ibe (fc-ݷq#֙ph -u򎮨LSOi ͭXt?{[ouP.T ']( 3W*|PP)/d8 5b JٺYg*e O&1Ye/{d d.G*χ((82/_b=w UU.Prmabm nڦM2лɚ2d<ݕYK^/ ^S;)qOOi.ӱBO &[}x۳n}|VWYhG endstream endobj 2994 0 obj << /Type /ObjStm /N 100 /First 973 /Length 1901 /Filter /FlateDecode >> stream xZ]o[G}ׯa9|,i~*Jm,=}TJ}0{!;)kǘ +O(>R#"'뵴t/.h~3t/o~u?~t⧳Xxv'TgR(3iv;: ͞Qnv`Y+Îno7 i٪g ؐ=с-9wnQǟM/VG~nVpMu:rĆW(pl=4hH }2g^|E^B/jWx;ҭF!vfFog7=М|DOzB.;ޢ|tUyk> $tG^<e%JH?'t;dmFA z]'\Q֊# wMkh݄pBqmB(T8 w7g{ ڊ\[_-vXWuK)Ğ'ўh.ҒcnX?4I7[Ȟ))T}hf7jv#rz)Wzփ(]]JhQjQszD2˷.XJTʞғ.#h 2z)/:Z퍂[Z }!o쇎8*S~0*]muD̉}-} wzW j;w2嶺zB;JW wIWbŠ?gjPS,E>O֫4jQ/稽h >G}VD v:x t,α s>g3sPd7;>.;l:#8 ެv+mp7~D=ݒyC/jYҮ}w>tkփynv3a>?{I#X8'XK9hh:~yj85Oc ه\ +?v=ީ䥰4`)8uZVF?e endstream endobj 3111 0 obj << /Length 1432 /Filter /FlateDecode >> stream xڽW[o6~q0%uW7=thuإ[ܽFKUWR;IYe#$s΅_6ŝk! =3+`l3|Bg"1f۬QM|X{Zv!? f+oYDMrζ=GHæ-*oL|t."/%ǿA[3Z)4?`LQj"($ܾ[VMM,F͸r&P*wiNk&7"'lGc%ZjbˌJߴB/ \'`_UҚUTcQZ&)şn%G; )U3yΪtvVe ˬbu1@ׄ KJEM3e -KU.˒Qk<ҬaC䡾\u_DRiq^vEBDWzZKXzC׷U;" 08˕V'eO!O4U`hŨ\5Eªgu5Hq>5ݱݰ"f#ʽDm=r`v H#0>(KHGKU>Ew~U,ͣX1ݹ+|#G~(, 9BiZDӷY~G_t={^eMxCl$#; ϭhn8@|džufOgK%xrD%@Ba@4x,~eL亽:⺳y-7~_⚟]`=#6FP=K<דP_3WVՓT'IBd@@K|YgQ%/C_p%i:B\HeGkZ Zwh ]|o+P^t\d8v:b[ۺd$C=8;=.Ae;(iGWE kVõ%:apybQ+PrVL g,Ut~e# wL%سIֶ疈˝ *),ߖJzј=AĞ{-!@bܖoB gL߯reUda-k P2@q;{M,NclDd$WMK(ހ1e%ZmhðuxCfzJ9 uqt_YC9rsɑ~_UΉJ endstream endobj 3122 0 obj << /Length 2152 /Filter /FlateDecode >> stream xYݏ߿Ke̥H}&MѤch-f#$;!eYFod-oG"gy"vrd,0dO@oc91gY"@:Tgc}X[(2^VJ*W';Lgê+\I!i#U;\" ~j\fk8EV# =;ՓCk.zW`s/̑W(`:v&i/hrW#)1miJ*kN9ycN)9gb\so\KzZZvHiCV^[u>,EQuw܁hpEHha쎵jwRI#r31 <%EdXD;8Qv5Q5xoG?+c(^'!Kx=@Wrҋ~E;(8{eJ߿Xdf`!C E8!NndeͰ af³[_jPhO D]\IC@B\%X WVyUhU*R8WDqW#CrݺU3崮}Wv}cGqh*^':Lಣ.f.Q8 T#Ს ϥ;i rTfT^RtUuu;j1]TG9ě"gF6z%͖%^PCʦmr Ի!HsJzRk;Cm"<&%*JxcP H$ñpyԦ -gZBvsFf/,3C$7d ȤUmЄ`n 9! s-*r3IzY6=Ư78Af>sV")g%3-uQdRٔ,CCxAiWto}6ExjXp vb"sHNf92`-JPk;cGA  \Gݔ{:Hz4'Zf8/^0ѧx@gw~.lo|;fMaۨ! m;/N5딠fh X %4uy7D5oOePB|CsN^}Q9wE}TY@%~|y/h୩%t&,hf߯cOƲiuhMvR[;}}||> stream xڝW[o6~ϯEa~ht[aknmm,WfKf}H<7~;G[{囐y)J#y+`Xy1!(bw{]Yh] j*>tM4BqHR6]`vGs}Nc̡k͔$fBě!1F/DiYgySiJ}48?SL0 |`zwY[ljKWsA%T5Ku_nЪnϾ@&V V6HSu׀6#^C$o}m4Mjy3e/F.&te(M#CE͌~(HTl 7m?^^uj}KI"_~aCO@`Q+yUz%^fxHɺ+Q-:6ڕ!^ PEъt/^C}ͬ+y+̴fҌ6rU8M]6EspZOrޚ]?[HeJp5b1̬bjigKmβbr@ _-!84Z]j%z[չ(oש?V]qf=(X@ExʔjS_ &1Õye Y f7o]n6D'-Gb*~8=[ECMdշEu`-Нv1'e28Mc$x4ʺr6,ީ8ohq9}!ߩ,5Au}T. @Ubv ʡCacD)(E"t)" Ƣɺb;Me'Ո|$kzA4cmfq).>|^Ļ[+/Kݣ!WQ@N% !_OWn܌uTNK3sY'[7,A-fy_f2R:8?M؅pb4)AhBE TzWv|ÎWcF0>bH,ɰbIIs a;cos#>s͂D̎<݆;lPHOv@s~?l6`e#9;bO>?}Dnėt%!x55&@ 9쯻Jlۧ۟ofp ~=/bh>l>9y +|rxy3ن7> stream xYKБ,.!bOCK$f)R!)߻^lqb 0guuW.E*Z>|2,38Bd~JTYX&%Ec7lpHNߏuA6*Adv9Y({p;ofQiT8 ^.:~A8{όiwt6Mu;CǪǁ,;.ծ>dTh{=ocF6׽[nF<V,tcՎ.t{u-Ӕy+Kf(Ru;TӻM>fu^{xn[l}X48T=0nk@c E*nȍ;.i`G<"v'HELL  8H֜+"$\Hr5u\:?Eir^)KPPK{@{h6 ڄmcv8\o(uZwB$Lc$6Ay]S pU8IE PEk :+hD3 zH8LG}P$g‚^!|Mƶ ; ?C/Ms9pfR؃4 ~A{6+,d&UBx\5=\6?cE;G*(ovta]F |A2p"hMbжNӶU4p9 Za `-t@Zgx1ȧ@7c0ÒرyQݱ7 _{(``J]T/B*.Xp& ppnrV#LN=1j" \˖.o'Ɩd; g;.2?BjcCKX{t#>Uc7m0 B2yF]r'tI"ܾTڌK/XfOsɺchBKv,=Hȷ߫O\ӫw%SBS ԧ)x&2ʜ:B4NU}@y?R_@b1*B]:P*̕+HBCu<*ͥb' ">PS|b.gbk0Gz tNda3sAu}?CF8SFc=$VLޏ'粎Wg$0(#aez%.!Wij|6; kDvҪȦЧSsɹ{=y;{UrD%5='U@! ({+=|ḳ6 0|,:ߥF5(IM$t&n<L-<9O)ѧoޠCmL.e"M\W/dg+hkipo9Sqgq}1arH%$OL Kt8Ÿ(30}F00PZ3\',@B5s>j^z6 #U'^,vxT z endstream endobj 3154 0 obj << /Length 1676 /Filter /FlateDecode >> stream xڝX[s8~ϯpfE_PlAqԱGة>4\sQz] QQϚ,1bg |i}i^$1Cņ] ߏ.qW(uա68=L_:ޕq^|.CZDĚI` IBe`;OWkl=R v"\(|HZWşQU^ ECD%ʋZ"Al N,hQMa0GCDQaM &-H߱ \7Iv} q5e_߳|>`]$7:hyŘ2יfM󍂼)#yQ X#t0#C:)B%ja7v.lD}[y3I`u+TI ^膯EQk"SPtg%/Y,)or,:uz4*s=C<O80?!%C 6NT 18cʄ̱XD7ڛpr|kBXci] CDfQIwԁYDfvWfsKJR"mpCz~dz"o]+ԷϤ;r1DvV.uP~# =(e5Ϟ=SU. jk=ȗ~Q18\ RTh$t-/^ y&aK}PTr(HlBaybIgfQ2;vh=| ?HעQPHmj#i%($QlN\N *tm@]jgT=WI%V-?uK,R [HQ`2AE675.{uP۶ T(V`,4q/Ɨf'=lU4\+IJ@*WE!ʸFF8 E-Z}`a:35$+U?cڇܣQu9ߕc Fyʿl4|xDL{ʏС>E> h[K∅n,:Zf5N .zM[2{ `hNh?^zsGl0xaJV8}DȩEܝwQMQLOV\ʎhז7=RM;#k_V3L'AP-8yaVw{ r.4uQ"FOz:ϧ'j!u6 ,dbZ oU(h6Cx;B07dHwc\s17qӤե.UĎ Sq?Mq ػz!?. 2] endstream endobj 3165 0 obj << /Length 2689 /Filter /FlateDecode >> stream xڭYoP %y?$8gpP}gvOӮlo{׫wWoT%,B{w;J{LĻz_|! hTjY^~ cNewG򵈀(it4eLϿo{k3>?GiĢ;-Wk~Z7+ҦON81vx>9P',Yld絀[@mBs=REϳXe(¯{3=8j|_O:5&K7}Y5m5DhթR{ch:2G \_k@TP@Gϗ,U/yAG!m,4ҢlZ }xҫV=ӷ˄;9H;<;Pa/ÄQj̶u>tXK̭14'ӦyѼ 2lBVumMf &:FyT:t=vi^@*hJ B%P?||JF 44"mܢ@hG["U%( ŒpPԼ",B o3' "UpOXd`PQ rod)a4 o^ȽMi^Ac D<348;Ѐ7q4+S$Z$d#z >OJ=ßA ,bs𐱾}}{$59B/"=iz3QݙNۼj6NA޸Uc*xq ѽP^*0bt /= Hթ\=™/ /S~8 贎8r^):+>YU_«n9 A4USm^nP"ݰ=/ȶ‹dZ85h{DΥ؏u#o iV %NnLM'`(v 3$iw%"P\yp n6q}#Ht'bӮDp S{ iU0rwK3wc$E5Rs,g8>/fK#_ܷ> `C҇FIH`vKLmK Rq!OmMkcWۣD ~܆Rla(PN-Ue@*jIScviњ>@H yC_bFZq;{\$cO %ີiCb0DZi"v6*ɭ r?GgG ]₯(iY^gvmn/ŽJ\}-`Qšp~e}őAJLU@f1S1Dd9K&F ^ׅpBhl4T*`<呄1I2fϰ$rA2Nr^P-O5)6O_Hz#i)& >D*)3/  ҷy4Aj%{$w\yq.~RRČRX+X 1GN27b*M׋(nCpXSt%Xc]O'*Y8ԎRѬ0E2`*Wn&1- 2M!)<"9eW}Fq9 7u73ĖZ 6EaP--Nz؉KcQIc 19ޡyEN 4.C(IF;r3*}%TlR=d =p_x#f%b6&X~Cav Y:Nv,G mX쪢:Ls家'ęx\2%$X$O/cF$&^% E4;pbCE/%w )+\p1~pGCFA:0Og|R= R 4zqWj.Fzؒ4RC]7;9.mR'!~_W=.Tܪue#^v@VG Z˸)1: x_H5.lj&ڹ0#=@D a ,yœ-.0C/-Nb&` endstream endobj 3173 0 obj << /Length 2130 /Filter /FlateDecode >> stream xڝXKs6ϯPTZ &TάdSe;!$P8t8q* F?nt7/ 7_?E2f}X$3frwTCsjodt4K CA rU(b-VAC"~*}^{Yu2 GZx^W%OliwyMc[cβxP,厀!b ١xǝ*v4[MݩR ²Vye)#VvYhZ֍e`I!J̻u:X(TEI֖4`bES51TAŅM3`) p7iD:[M5$ #^1> 0Rn҃{/%g zR}O3 e) Zx]3! D4ŊEjh0{c/,ATI0h?{|> IxR/yGE 0)"u4VY#3ghza(./ز;s*s=G޽Ckq ˥Xpc EDLcTp R?Ji"2(i<8ajFK> F8q\3 "SȃPNO>vn7]Fs5sWƹѺk7UĐn\a=kZ!3+:S;}}:о;.8hZz_;ם0(2{UShĮ0ٶ &LBCgbC'F­[)\Z0'np>c"#+&~ZG.!5?.X"^l:ʐ:>:W5k?H!_>i}込>qX«u $ITe&ql|_uHR ӧ^w%`=&0 :_P^rWIb3;d,:OVڰ'>Hҡ|w鱿$VCc&!sv&y>“!L<9eNsᑙDQjQs ]ǞP06{,IDIG~h'ÿ{}5]}hWXxR60f!RۺA;e9OIK"W_j|wrC͵<`Y _}Ln:+yMy,=J,^m0p$;| ?`»HǴqΙRwPgReMc& ݻwK]eW96E8- Z}#MGcxشAĥfyrMl#^bT__bwPoPuMggkrkg)N{%N{ %.r&D9Op(^"˕l0XmCJFuTRt>sVa\x;E4#!e>A^5 x~szVt8S':#=yq'+m9yE 9"tf7h,Or- GV'~5o}uwg7 ކ endstream endobj 3182 0 obj << /Length 1432 /Filter /FlateDecode >> stream xXI6Wad 戤DRE[4E2Anq4YmZ.%U6'>no"yw3;! ar`Cg1:Ĺt g\ Heef<@v|vsN(>;>} t\ƣC 8 C4f8&(72_sͶL .+CI3uZVq >A:]@C g `g>gi3,kxg` st Pҝ|5o|;ʼnH1F;ٔB+yie"`J0*\7YʕvG=2ȧ̜TG9x膑_/s/;G+Z;Z=2o> ~'|.F ' 8^}ye$R+ߝ]B X(Ϸ`@uWQ4Wa5w_]i?N(*sj=w沰Uꉹ˨2g@%Dѡ v[kQPQ1QD{x!JЂAڬЍl4a iˤroOAZn ) 2 n܇&+7ie`4](zw2N7f߂e&čZQl+i.&U4ǁ/̲ f+$k0QYstlNX@Q!{uX&,yt/ȟl>Bi DiV?n>z|J@ Q<}X xF_F>%3q A]JM9vpj&ryDa2FE~tcW2.s$]+<)źFI+[G٢v VU9 ES&ަf%fsXN:d]ַp?.ʪ0x }X~яuslS9x۬VN!A;ǷFh9TVDDdɏfΥ~j4~:b  endstream endobj 3200 0 obj << /Length 1456 /Filter /FlateDecode >> stream x6WpDĂ7 'YgI&:)l Iyh p%5<\Pkшû/[k [?L}ܼ}+EiEr1F~YOzgoˆ" hY59-gܼe?P@Z^T MQ1W芶37iQ6ߣ&&@)V~E.hKcz69~q=b仦λZvE9qHMI䭢VGw+F =]Z5nI)R A[yt-pStFx8$ C} //2ؾoV|Gڙt1q@k1PFhGCX+{ϋD-&o Hgo\$㛏'G 'D ĤE(1QqObdhP`(ZU;*L(AKQr\^J3V]]Gג Ǻi+SMrP;oږ$@1 >Q]Eŋ3'؆dȺpeV` ajM\_kZ-L|bB1$r @PƜp 9D~Tt m-aWӵe";;"$u뼩ZьkR6T+8ZL "چEYWQ $P4G[<ܽ^ת|LUTuZz`ɚ;rznYBm`@*qUzcP?N2w>L',qJaYY5y[9y䧉@ߊ8J~,ȰXQZ~!Ezbg]W_ED'MlbrKu򣌮zᗘC=GbPHI=Tʀ(:WSzJ}0+^ ƞQMG PzA\FYhe0B]).w{=m YH=#|Y#qy@2g):n"O C$^,*<" ͶbZDKW?! FВ4!['vkY#Ls>A@(I{"7¾+VCE$0 !ڼQ`ꠀa M#wjSA50Ĺ=t@c#6bOViԑ8̚j sv4xNWB8WVXՂ/Tu endstream endobj 3108 0 obj << /Type /ObjStm /N 100 /First 978 /Length 1976 /Filter /FlateDecode >> stream xZmo7_ Y+pIk:GItgK^3ر6Y"ASPy٤PBUڢ8_,\!\-I NTl!NlN"$\芐!V]*E{^lbr.E  .jTmU:2؛F[4XF;#S{+$%HieD*jG$d01VlIRh Dm`TN$O3iۥc[Qq8=Sfjp3I c@ j`c%g%z.@Tހ+Y- ! q(eVjL3733 Ta. $€33DŎZLR4BNn*P$QaJFYf]H.HYJK ʥ֓JrjmFv)[.ݏ?{:!O'' ϔ<" .İ>]Ugzw~a^u?~Wb8/ahI!xx<[ħrZ^MLVr:BAZ |˫zZ9t^_ ѳ0dX@t>kFQ$_awg aTQڶ3Om v?@Ȃs׽X..^M׽x~_LOOqt^Yh:Wb&ӷO] |1ཛྷ}4rCf C&*RmzhF?ӵXR]eS8NP"2~ZA[~Qlj7d):>4^kzX/'ջ^+oQ<oQhټA&.s DՈI X d+JfWjs9BΎGl؜GI?P٣FL5a9$Rk>+'c]$>4nQG+◍t‰Qz_7{x1ݚM="}8sѹFo0tӠ yCe H>+>sQ 5.anGD^E!y02m?y0/fyΪ[SXTo0Sk/olǘÚ- T*c2[Ƭ> HEEp6O1noܕϒ˓,ýl ߧK'6]Keovȑ zXYι?I_>Xƾgc9ac_Oь3TmTDsA͛i 5cy4$a#Z=.M7ͣI9&iR΁L\MYQ/=(t!,걕qCho=UwVw (>b5ݷX}=aÊJMʱ-})'ee=2plߚgwb;V:°oo`;r"= yɺ2ˁg  ğ=*Lʁ9 SOM//8ėѹ btmT"|JjM >OLP,J ;"D\>-l|!Ҡ endstream endobj 3215 0 obj << /Length 2683 /Filter /FlateDecode >> stream xZK۸W|Yj>STga@$q"<>h|N*g_|㛿<I,v 9QH`ůA(?h #EXsT\{A$O U ]4MXYˆ >26ըM׏]ST{j1է^8<y{T_ҲU;!X0)DO%gY TaP7޷8KGq56l1ˣhVWn ,yeƁ*Ϛ-WR4Ne{*%+xgt@Q~p~]_')vO4x9h$eCZmR{iMնӣu%_SWFU{J vzԹh n&XgxO?<+os@d%,aS0ucEM]}<5D-EPSs`td="0:2Œ>GҵhG& f0t^[^6D+V+d,*NJYA|.lWy)ᡌ?jl" I<„E$Ȑ! b/kA FN[TAl w=fAWXr}Z9P ַ)UUႡ޴5fQ32|"Y޿0>iqMݜj𾺥yEȲscIr#e­C9Yئ+4Rv΅fSUK,s'ѝ& ip.3`c5S$AB1zvɩʉxa(bq\# `rA dx݊-ƹER*ыR.b-9ϫ,D۝Ot <8},4޻f :cj"2\d 5-1/LOP3hQ, aZW!Y6 jE5F-fy>KJ< /OrF]%/]uݒ'KXsz*I2KQ"KLs#?NjS|`he2,*̆_YijjI '4-O$0ig\yUd5+[ϐiR3CԗU[`*ѝCCBrE#A\n7Θj4mqD &/T{'%SYZ<7Jbl(=3ОH=/d:v7pw&.fPN$&~M&M]Td/00a Ep<ԠiC*` SBY24y8'ЯD-_bCQu-߹O铯VYGq]Qb ?+eWq%ϲ+̓+i]1 q4+[UݡnQf+J>F0r_bЭY~CƇ/?bujjڰ.%E͏?Yj:tݩra=wd浳dS 7W!loNO*Jl'Q5Kf)±J`=ߤioZJO-klHCT)auAvXw|(Xi۔E*#c>?1Ft%'0~e,qm%2m.Vpeo endstream endobj 3222 0 obj << /Length 1058 /Filter /FlateDecode >> stream xV[o6~؀͒"u!0?]2l谭qAh[ne;ٯ!)ʒk{)PR߹߈&V>u88C0FNH (wV~Z7"͒5E<= ,(jGMp%5LB/sY؏"QGCUbģЈ"/Tr+)fJa{:9_mHf ! #f1&Ud4ncCee[x # A0"j BДejL{V .aGâm-piV !C\Xgwb!kr'rcvٕWMJiJZC3!fBI|>~漷rw c!m._߸'d?IيD~fU9hݶ񠸳}U׻ҝeaň-Iʿڙę:ڳ>bs:bRf(wqIT^_)6཰EZW(,ο8 JAViz=.FfuR1m_:{}:|s;Y) +PY5럟$[8O{&w?g6 sb#vRsaB[ vgG?Gl}RT}(>A9AXE38$~ CozmSYOHKJ) endstream endobj 3235 0 obj << /Length 2138 /Filter /FlateDecode >> stream xYKϯ0Xsv`}IYJdɐb;kasX X,zod,3ƛDlXn~ VܲLhVx9f5WBX%$[RetZv'#E=mf"J lSjAs9Dbsĝ1 $ š_Omrj+Hxpm~2\+~SF}aNo\ҸLጚȔM\dt.Ajh=޳_jiاm, K}zǏmB ǎϋE|OSEk@9^d*Pv݊BNu XǛ[XoIN5}0;)ҿۯKV$SS=|?y5ڻ(d2|2nujᑅ B¶t,X2MӜHsHMDp1-DÂP z1Mjp<ǰG7Q)1'Nc)r/(2(3ߎ%tQ1yP'MUCD+ z=KkH/p8\ʊZbD}l!ۼ^(.'$gO[Nwi;]G}VjvSv%(Wi,\T젌w3/\]DNMmv\4;Y@-ۓ@@)XD7#s үyG!t ig;ܞ͟) 8KmckC'o3"bDʻ({Wwڬ 㜆L"pt7%քK%mB6\PeS~R!PQB ~UD#jJ2gezqO 0r. endstream endobj 3244 0 obj << /Length 3324 /Filter /FlateDecode >> stream xZKh,X x|HuA$9(VKmIbV#6)bW_7w͗o}2\nLz ɱ+n`ݡw<ꢬf0k,Focsr\:)ja+W˺DzLfWUsGڦmWNbhsZ C#x{ҕnzЫ+S4pRCu(rO8f<{%ݕapس,Ma Pe4l{vq-n 2K^&"1NٻI\SQm0nBD)gʰd´NX! -hz/0ӹўvM(v]&X:0\OqNδj'd۫L[jg55&\ "ڡvq@H_ߞj߫iv*?jW _hdo27ɩhVν>d)3zlh@ = FsR)xߗ[T^hP<-6Uʙtw釯ְ5W}7j@h&zy@'[@jq3+|89k389] ]nEReK9W=J=5jTS) ŰK^K GMA^yl'YRQ["dyhafb3sG'|8;WQRt.4xSYFC|(Oٷw'd+~} S r(aT:UV@,z/ _+jwL P;՞*rWua =9`BxήLhO nM7ˏ`%*" xQeGc"ĩ;~QUzyۈb-s]L#J \*g@*#؝ >; ;UpP+HBrEXp{R'7ti# }XX#o0;i#Ýݸ>:; @+ge,Y@~ΈbKdz )C!t&OP(̦øfw?*]v8G&W. ^QWr3$&ud5Q [m֡1nψ]uBm%k\+>ĩ\Etc„*GϜs{XihPQ+6O.FE[҂)"L&mxX !zʅ"[ܧta .̑7e- PwS5AϴLd aBXC~d|= .Ssbr>cl) Lj>wP<1" ,,u{fx &%;f:dl1ypޖfPiD߈-L/zt_ ) 2i.D&l,{' %"5J!cGm_Cai*^A%Sݿ' ]AU+F&%k(Y:P:|"/ӭ`ÑXk[8"`7St:rPA:!cIt)UA^f.0Hܸ{8da%6#YAD ta=.0VsبOR Qj<_VLf c2cY.|!T9UE9}EDF"p[! ˴U]jp^N yxJv< a#!'2)2ḅ۠k.8BklBSBa~&M5]*æ#NCU{J~jLrOҥ΃ `d:<Ԟ]J8W/3L0j{%K4髧q'BZ@Hյe\VݑC*7~Mu{w5tEӃʔMgLe ;{@ٻ J C ym–M@%%G7S qM,3SmH hDt+Kb)Kb+B{)%|s=}r߽“nt]f SAa܇pKtolvuѯ]砲Sz!KG2j7#Btxbz w |\>2 yeSS*pא` ;9(_ۂGZc4 &u濘c} endstream endobj 3255 0 obj << /Length 2880 /Filter /FlateDecode >> stream xYmܶ_@ xy")Q[8}-]qwek;C%Q'; ^yy\:]?z%eJ~ÐHΙXyy,'KR,bd[]?깜(57"AIBJvzgKRy9uވ, ͥ.l tm{ެEջ0;)6uJ7$9]ʏk=n]h]2R.V,!EJ7xPi<ލ9tK6_ZFjHۯy\&Y7 MŅʻzw?!8iN\nڕu" ۲?/.U_Vf S %Y$]٭B T,JJ2iX8,aE)}KD[Nm-ydK(c>- ej+3676p ӅmR=l42n^2c1;l^\%;F,õ~[mQB2|,@LKҰJÕ#IpJ)f2;tcl|NosܼRqЧeUڸ.6 !ge_]s/Jnfnjڞӧ r 1Z1շ qRs+5np< v4o5J[Novq([.3 8VF>&nנsh/ \1vn Xj;mJ$#I zgΑ?c┑)/>0Qb?7 rj^Q}M6:Yfo>qb>;$oiӀ^ospm7vܡcͰo ]'bh GAo^ x졣06X2ѫy9C'_PUmX o?=+@D ~ew\YCɞd04I5h:!KDحa\G`H0>dQ'E>%Y:(yrBFO1_ hIܣjaui0l ai?L &3 ',F;Zw>ahb2zx Źsfӿh~l8?.8.[,bvD}@ܪŊI5Mqoأ·zoUx!_KH+4Km.?XhDəSftd=TrLs2W!# LL.?kP&Q *<ؙLegT*d\JEGRyDZ i/dɾ)0LmBL3H"rf tpw&&+=c/n3L*wt]641 <™:2tR9/8 ͦpbOQ ~4ҺU~wzg/ Xd@e+;7uan΁ d'6K#XW0t6mgD ;=A"Y|+O+cGh ;eߧqkL<ISC뜀Je {>xL2 Q<ctgM;}l'B٩`i*xn;#Fp6Jw~,_9f(p뿡Z7ae]SMaQ wp)RH*3k3vG)Au%(b^/{NE,fE"2SéhJC1 @8xRm9(ODi߮7)?$+W)I,^[ nqv=A/D—9U:π~4HLD1o WDʡ /0U2zqx|\ l endstream endobj 3262 0 obj << /Length 1119 /Filter /FlateDecode >> stream xڵXˎ6+ X!Tԋ-P4 Jmz"%7^,93=edm,d}{VdGX]/ہY4U6)Įr2{osw^Dt::/M^i>wP*掏?dCEeA&5Ǒ` ASߋ{nzA4ώBlHv]p }ҏl/wd>|(B0CYSyss3M%uFUŬ(bmJlRڭ*weB{p+5bHZ@4Ȏ|-zS.q|S.߿ARJK1 ѥ-Jo,a*0izM˔fcD`le14Y QP枉"e]ESȐj %}]Ih:F 2;-dQDSY OMAџJ拦GB]ذIhHVO3ceB>,W޽f.x$K)^ֲZ1[JuzȏZ]d+K/і%Բ4kevdåtM{muuɲ2΋Z frܖ{Mɖ#̻#5<#Hj3f27JJ'/O!ì;6aKl+:BQF񱭴CV~w[R x.K> stream xX[oF~X%U ڪNT(! 4, .^o}XXN*eΞ9sbN_"FqHjA0Fxq->9W^l AnCIY$pn\8 jxypX8Y@dZ ѦM^wyUN6L4h6U׫ыjHּ3-6I`m|(h&x[^d8&SK#AsGŬwMjwdӧ`\2˵_.Fx,jXh`\1#Q}<E>,`C8Z,_]wza!`"s.q vV]{ŷlj<<3q=,8oGY0ɳ/Y  -!#Saڙ7"dUU"W{C(rJ3mR藺HRNE6y9Adm_hAIڴ/L/~6U_feJ)%+ Hӄ4z&%aVuT#pxw#M;K #='ix!$/گW}WօeZhM$BD~'8הSO'WM4 43Ѵ#ϯ0(̈́mbiM2^=V@KO4(W:|SQdog((3q46|51o;]~Bi@mUF3~dO 5pu#ZRhRkwPq*cB%q g+H}3Cn>X= w58ٝ(`z+ǏfjRHL={qP8 } %Pf*”Q\"Y.]BBbsi:*U8{uNíM^Vs906{M1/ aQ#[;>ϘQ(Z`aPkC_oZ_L N d)#H1 f3֢̌%2*Y7š`qL[@r*f 0lfQús9E5 cZxU_Bp%#7461E^.pldX[{;{-V-Cneҋv?w7_-&І<2j?kVh@!P@gggY AT8BQkW5 *PײE9G6iY,:x_IZk2݀ȷ"MqZbwUt*60[>Rs:wm{]ǡR8KjTJ9}S>ֲ&SW}fVe^-|v)nO@gs֜p۾0ZnA8G y|}Ԥ4Aݤ# !C_RYrqi}O⛪wL(tC5 Cg>v"# yaWGȎrǖ^VPejzA1A嶭Lg?j=" endstream endobj 3282 0 obj << /Length 1511 /Filter /FlateDecode >> stream xڽWY6~&uQ ڢi=wEYmeeu_!%bERpfI꼘̟$"gr"3F"?qEՐLV(`VoūIDxcs&P+˙<63!.6rQUә%cifNw4 B~\EFjzY'TYGY2Sr{Jp+sQ*spቔ;QOZ~ 2/p!^BG0}Ep|bG8ΘjJ\}LJq؇X% ^l n)P"5lBw;Sl*ilER޻ib;"Q19!"FQKw5^MhSHݿn^Jt-p\GU6f }=[l,`À&Gu,F>JC!!0^6p}Y$W-.F,9wA)Ĭ#ޞF> bkMԣ<(XG>P\Sp#=!AvE^XX?.$Z941 .uڥg?fCb C^daI @<{w8 (AdUq̽&y}"^f#s?/ ?Ez"c$/?U $w1pYsS-d; ßY*C^FF&!5##s~md,| BHK\:AnRH<>+ z0w H`A>:naExٌ<':}|źh]%求p#,yB8eHj}-7=Ou>G^$ ZxG3[ yG'w`_jkD(ccF(E&j-tmO#qT;Ov30x1­iTgWiz"ۜ02N&rjK4FDK[k p?9i)pÅ&q+M`u]0@GQ," Җ% ќ(lO,Ju8ܵmvm`8'|@wAimI/5yB@gM1 eSL7Y!Ue4R]U&晬kQwwiBP: /)YJ_j¶k&:@uqynHQǸ;Ȫ#6*iMV/]#ُ rz1g[uenھ \~i%N_7v♤jGji ᦍկ ;0Fբg~']9Zke1 endstream endobj 3294 0 obj << /Length 1921 /Filter /FlateDecode >> stream xX͗۶_9zoᄋMݴ8$&>3H2 I 3o>@.W}} OQH#I//n5LBCW5HU@+ꔀdl5[]^nddY BNŊe$˝-TAVD*ye\i|1Qx3 %0)BWY`/XFO vbVUzo_JӢMYA$h]6ʲh<ھmVNE`$Ƿ?qlЕU<}G-4VDiJ1uFD a⑑9^Γ(C2>ޖmEZ-eIyaB*z6,r>*z] OJEC@h 79Zg&HgL%v\7շ{p3|1jE(a lq4EH#tL. Dl)I;/c|qhJ~ݓ[][*;0WvWYh~sD1[a[Il1 tAWm;=~l.j rNh8&kkkO95d<,O rφw-W1mOocr΄ΐ8cY:aL#fY10CsooFj[iu',R]CBLʦ`Րl2: -Vq^F쥺+pZ>WQzmzjG٥&Xy{zG8dS=鍣tPBNI|]un;Ѩ; HfK&4kb4{hl|)4"+5& LeI"e$qGo7_hz61=M}Dv0.W 7tأ''?jN13t.'XOIq'L);fe90{ {p'oLLLMS]_an;)vBSK> stream xڽYKo7W30ZpRmȩ[G2d9I},JVʒr83P)rl'Wώ:H^!A KU]@Xu>Sv%W[dtHfC`t,xFr(:J@  D !G|+l2>B^(fGa4"ꨄ+`Ŷ#6cuPIlo"]7E yتC)VM)atX);PԌ?K@*)barh8|( Ov^ rJf)XI %7n\$G&X=dmfIB4$ J&(4,TtD5aFM8fl >|]ؠVvJ&gMF*Vj=Ojx؂)μ _\ˌ?f+#g.G؄5Ÿ\t]6KA%Ә*VvTWy7P"R5EpF^r3ޗ\E["fMd_N F{CUD/P >I->×'WG}xZjI޲^L DC>`4G;8p!duiQ`Hiv񀘡oɋܽro]rinbƣ)O旖6jv2\d۳'OU [j| F3t d j\K&l`=ގgZx=) pё<ҏ%_o ᚠJ꒽1qf/gCd~6t/ߏۿ?vǏt:b6\t!Ļe>+C7櫵YDX׶^/mR! Dq(Z[^\H+U+ޫ`v~ G1lG?ל߭=,݊ H#YQT{Tu?M_NLt/o~_M/&ns$ۏF⡤S~Ш >]5z:M-ڼ)ԓdI6.NԚ-;4^mH r( nGˎĈ #FB<@V^+ K6_]rPY=BH^hD$']xryU KTě>4ǗkxUCD+/Qȕ"xMe22`lu8,p&/8 @+0  WlRd)S`₈Cõ2 67/S=!' nн#BzjlQˮ m^,cD.C6P` Ԯ"5bv6nx8'JG,^jZ+(-w/:-  4R0t}94> A?|h|Hd'=2,3 D#Dn;𭭋+t%Y|i|sTNYJlse_!xgB ӳ|viYsHߒe]86˛y1kJ$yqr7y7hV.b{D˺Fnև 4 -v-,\^tDɤ8P_oqizcmE\hO-Y!2*ǞHMǓ; 澛{ Uݤ"h_'< D +Pt_māT0 DGi&íCLQ'W~CjJuyRia%U}mTf&ǝlnBE7hQ䂣Mw/:¾ּ4l?mSqсhV |+}uF endstream endobj 3305 0 obj << /Length 2438 /Filter /FlateDecode >> stream xڽYK6ϯe;"EYad2rndnk IoEQNWUn7to}"Jrsw0J r2F$WrKtf />t6+IR`FseQ3ifQ׋RITk5Uvc=n$6IÓ Q˛;HZOEJi\pJ\keIkd&瀝@*Ipǥ>ls=lwqB(FVg_~P = UۘLEyeYtBlnbja8 [3P9aj |(sc'0{CKv!,(8] C7b%rtn!@̑v878x.kN݃#-Iߘy&όsiEM<m8燇<4&LN=0Bb/Sj#t;$:A QqfNb1H:θS0V1tM}8g"=y772]iw6m]FO^` 1>͋1ECh_0,0`c΀Ռֳ0Vklcg ʐ)E()kzZ`χDWmTPzc^ߺ^uumw&=W~ꇪ׷'~AŦ%.S%B9)PdЄ63okAyV;:[ .4]V]& | ":xBbr}zv_N`!5:̨NyL)"뻐7IL a[NJPNܑD+o.?E$"D-6>wZCDo '*+%2"nr[C^i[#G!~p;ÞDN}i 80"\p&j:`[ iشʱj\b22& U%Dۦ8s:+4zn ,@Y6E۲$pnD}k7@3߷!.±6P-Xu4@~G)p5&Q*\@ x9́:УS|Gs\} N`zy{ՑS8)"6eAb &PpBA@IF2Ã36*{(4OS"c&X87=^r'79 #k9=oS$]څG!pLvhq՛Ø*L"D%kM4x\'E8(ys2ltS!r8]> stream xڥV[s6~`EB 4^fv$>dAfE"N}K]8:\rnm9 JϜ!#0'"18K,o* X. $f4,%z E:PP;,tUjRZ#e LC4#3 =f,I@z`xt֢-XT+iryowJ=/Y4BրJjv"&qK +Ϭ:eۈ˓s+4Tu%iR)/%@ ZMib}9x o!kK֪i 59c*jx)KXٗEv#Dqv ζ#8$m)_kKc, N!uQY!fɜwJo΀ݙ5[>o/MBٲfKCbuo 7SV,s̚\ʉH(X@vs2Q ! f=bQOUo kO j.+]Zdx"g~uC.@ǘx(Hm]lbpzʕ|l-BϾ*Waj7TnxFK\^2lEѽSB+A9c<fjiė[s+GTeVY4Ϟe7l7R%nunXڮt}XtP.5tj ;4U[.PRd &V[&wzEkYSm(LͰx,&ֻ&&6<Hz$61a̋own7[y>d!/<3{;@G/Izhb?Lڍ &Q0|[ǏMʅ/A endstream endobj 3328 0 obj << /Length 1313 /Filter /FlateDecode >> stream xWo6_!*1Caݺ(0lK!#6[ItE*N?H}݋IǻH`mBiM@0F4N"XWUDV~6Fh(ՏbbH{5 mzLpqL&y"JșwP֛2 Y]"02šN߰w.:N!7?7\VN~F@,{, G$Ka<ぜ%I-7\ZQy)6^h8}s3hXbc//XkV+sdĐeܢ㪯S;׾oMɾFYfQkN |'0ؑ}2 ^rD=JGw"H˘@g;NU!V\6ZZsD{! r-( Ͻ_Ɋn5Ƒ;fּC!:>4 -ZHZXF2wJ;=봨+`Jt-D-awƲmi֞R0GN~Mw"8zV{ q;-މ'ǸM>M0-% n".wY=+e<+;ͫFGz` l}k=oKp=@r_]׾& Y^_NEiT endstream endobj 3342 0 obj << /Length 1611 /Filter /FlateDecode >> stream xڥXYs6~$ 9c8xfNӤiV$ILxh)ӎ==@=zuxb{G0F,̛/w> ia~y:bR&( m[ mrVuuOaw3&JI}QBMIQZo7j;`b<8Q0cI` l˦^v*&IJ(>5/o%bNBmj#VMkw<9 fpm.MmO҇FnȢR_+ \SJx]be) X<#3.rzLOS-F|~ql _/F$YrQJtY &={E"|z-E)IL`(Z+Gʺ _gÄ!JR-Ke3P[pmc@8$^DHtR Do}Ԛ6 *ف3ȕ i*DVHpt>x%d[U4W@jJ)f+;%Bod" %I)(S fA9J@Xi00>cdjG Ql00C)a8C,Y@\gVe.wvvB:Lt&^@D6~ض(`ګ!NZB;8O \P=byF_ ]Š.Ǝr+bu6MIUR+%Vb[ݦ7G .Z!J r`#7MW@_6]nSCpՋXiLpf7 (f4#ߟ[ڦ:CNq0}C3ndB{0,^AOR_K͍O5v΍avXrefīS6GBhmٝn;kVqկ#\(wCBAJu&rEXEzbEțvzFD'h .`Wpw1$z,I';ӚT@VqS`M\fR PY2 R߹7,f:T1ŭuU8͖Cq?keWn[ y,-rn (|>?{z+(DL_1Ԯ}s lE`I<qRgCq y O6 lfBNMyTP1}hBDw"F0$\Eu$JJZԢPcfo Cͺżķ]y% omV{|9!tۈz*Uk` uX?^Qf i0اn ꊲ_qe 9UE%ðLC2d:TWE(xg{qSbQq4*Zyuo^خGgP(~Y侎̶m'Դ@(1H|w+w&=aۡ~6v= endstream endobj 3355 0 obj << /Length 1209 /Filter /FlateDecode >> stream xWIo6ϯER*ڢ%ic>.AE/-o.z JxȽ##qO8M̻MQu,5;|<[M8I< }QM#@ lR{\ H*UU0wb 8 nW`$%3ba$+&Nur)+Y޶ UTڃn/ucw2,7 Y[([!뾁,:חHn H EC: Ռc@֩;#BILgx$GB[jӝɇ0QmK}j37+9Om1tF`\RX;)z^vzB TM5Q_^.^ÆEc1b$#28> stream xڝWm6 _!\\ \te[uX.>\ ̱Ǜcg~\(QNĽ|%S!0F~ݾ$hY g / s,Sr?w=|{U42J:/[Z9ܟgznjً~Zq;) lgI[x`F r7phC8g48 |FR8\Ek!]Mos濪vS(k}#\9g׸nJ3' & XD݀Uͫr1!Yp픿 'k"n.pF 5Gf=j.xQ?B0]5lLMyزmzYZ6kafF/7ԑ'0|"y0ro4y 7 exggeML oQWͦ+XpWLˡUj) D;8mUi8Z>d2>q9de#_3s.Crg."w7 *"wV86;OvqP(N1m\dWĉjG7y99pO8Uh5؅L ,6UWpR8UW*B5v1cJNBq5780j~<.:;]14I4nlTD^J#p2CA2Ncul **5yAocb* I\& S¸t X # )Vy" *=-J;꘢Wѐ? r% dПQ$,£a]cSca C@iPwUeC4NUUkp,.Rm+N1M76m"Z07UiV oBp4w14>wٓuݕi,euZ]] {|~d۪K-֙Ҁi3;P%vwj2\gwƐ]]':-R]R%ܛ6mk?A71)6 )Ӧs, LwA?w_ endstream endobj 3383 0 obj << /Length 1614 /Filter /FlateDecode >> stream xڝWێ6}Wk$R $Eim,ѲZYRuYoCu3J-jzuwqzg9pкNΊT>tlZR"WwCNDr}yZQsտ+r8V|X}L6_[Q,^#<nǠt3{JX2T_L#!ws:Ҭm~diHFu<uٗG2ݿezBF4,Ӄ.BҰ'PCiwAz{3 ޽Q1X.⎐>wlw $Nש ![%M%l,%N%jL@QQ,>'4 el_ &l%u4ZHNQ#m%`ćs7毣3,&] /,=NT@1@K(Inߺ:$F Z u|kMĒ%x[b)!|'1?Еp Q#?A_,0Z/deyzHB][u}f+KdlZ+fÇO{q룮!?tV[lat Uæb}RGmY7l ]WU&dFpPNxQoQY,tҰ$7fp&uc 'T)$%U7ԆVֱZ-lzߪX+JpYy;vay)n/heq98Dq eEUl,[ӾotpavR"Xm.q5F/>>Ϡm6Z=ثTU )Cn(6e]TKZtসߝ˧w ΗDbdTVu%*\1,k957Ps::c5G͹w}B.!OQ1bm^pΘggmpT^!+[ϲ&HwL.(ՠ$:״j2CgFi2(=MqF.#d)dcUsZ7$~ ԐҤjvٞY~韭 w_V}s?l5ߥFYvur`)A`Wum({|S͟թH]o۪yvuu<IA\OO?& endstream endobj 3395 0 obj << /Length 1232 /Filter /FlateDecode >> stream xW[sF~ׯ`fjedN&; PE'dԄSx0)j~bSgz)gsfgzO<:1}d t fJ #(0Mwi[4oI9k~3-L h"b&9EdhqMyw >z~/_:.P~˵r_.Fxpϛ0&aU`G5P财)˪pm͊u! UgZ5~+E)dM{8a.ޗTcƲ sI0EorWYRgc¤71 W-/JN`65'ګw ̌j`[{ȕOiq8?ì7sqܫzNヸ}>ܘVƏ ` ='Rk2k?n-#ÿ_C8<~wݦuQ[B5Q,/?UРA.{"9JS&yؿ /g endstream endobj 3300 0 obj << /Type /ObjStm /N 100 /First 974 /Length 1691 /Filter /FlateDecode >> stream xY]o[7 }c0]Q(i -؀"mAIv`;hw(۩ۨECu$b xbBplH d|Oİ*$@l"@ oOɁz)%y"X(UH*%C (Du # ̐jxT u԰ջ#3NB ƻR߲ Ƈ1Asp'5x|b U >)ZuA Q lQY q5pWebNu6R j&DTa#Qu*dvj0GXUsQ*(X Sq(v^F Z"G֒`s!CY09 RB<JRHjt^"TG&Z|겉\HDA3>&QXr&f5AfR&=VHq,Y=`` #r#TZX 2+& "EyjO͝7 5u/`8(Duv|1X>u|ٜ;<Q mp̕XLz_?hn0U#-J^aЇd-;<3O7#xrz9q8|iǡY*NRlQ^LixZ- Vs\ +ӼR (^2-na^DYu^ *Un""d3*fT%y;b^dƺ㣣;^Y+{~:u>}hx7y\ c ඈxi%P#yk&kL~^/JRJr #49[ݾЬCC5ܚlHrw' HNuD"nHܜD|X76q؄lIdK{ &}lr9.SD.W6X$X4Z>@v௓F4'|+ӖJ>_$ Zm@I#u BV5}No9J}KVnv{Ow-z!%'bѦ#hŭ$r5!0$3FtMv#qM: :JiۈźX5DBѵr?gF^{0k x gHsֈ~ c K,vxaoox8qȻFt).FtޜC#q-Ԋ5F/-`tgz׆\#NNsrBd,[4'uDpq[ibt+uǼ;Z@m'GX{!=X&c:\] IށwܓMd=I#:&ɍho m5'^2>[hG64h=G- -|;L'T朓a89M˜mv߄sq?wΛ.vc<-NQv\C05va]#B+FGs[6}EY\U endstream endobj 3410 0 obj << /Length 1354 /Filter /FlateDecode >> stream xڽXmo6_!b1C%۰nm%n!)0YlJT_$K6x;*Z[urv֣VBFL-cD]f ebwUVTqkkaZ|4dC0חJ3lNm^ "՛9Wy]FCUɩq:!NiUT|n,Yξef"B+n>a+!4 RZ.ܺ9aڡ[Fu,0=o0pZ|4Xq)-esxʦ.Y;?ǢVM) _^sey~"mĮi+Ixr`r2ʎ~K7TQа@n۩!0]8~(@Ị'Y,7f,@e3#_ȡַf x!}* Bvb #%"Y-xؾ&4&GjoST~ .V?X^RQVGWFRMbW ?w=3O~Ytu; endstream endobj 3423 0 obj << /Length 966 /Filter /FlateDecode >> stream xWMo8W!9+]MCڃ*ӲZKrE:#%ZMp{Dqf͐^al2='h1y1!(bқͽk$4; 3e1GjMQԔ5G5vQHc PM@_U҃=A DH "*[8OcH'G,0Óf^|ʭIʤaZ"iWp,݄YEq_i5G-O-c~iévم)4))2kM/녹Mžr1o}td@&]X4!u=f;qGJ#Xn Ƭt^nO F$X<5,Bf#>Au 1$۰RыGEkн^X0G*ʍff66!-ٕa*\&wۚ2.'F0*qu @ _A Bi{o8j cSi TfYϵ"/Vz }2*o  hp`6Z)֦saf۰,lJvJRl7$+],Pr IZ+SP=5_*˧aLDvA.!Z<m:T"A\il soy,bdt[ڗ"<(r!x> stream xڭZ[o6~?KeIQ^ҦX9S@eYv%.b}g8D)p8f:p}*aI(~} \EP&UǚcO׷ALIȢ$rz7+7"FIn5C̫Lox1 ceh$ϴZo^WuXzOmY-muZ䳈GȃUq `@ jQ,V}NyW/rx>ZxUXr&cĒleW6FK &o1U`#hڮ83D(8|͕wț5@𺚾)ϊi|ȩPǭRӒ*2vJf;4yۗ55K=,9}v7 =fWzHe=f PCҗxo;0ÇykMuqeDaOj]և"B8m"3Ƨl6(ΌҮh"k+vQTG+ɚ˛"5@vQ ᢑa29l(/X6LBܙQ֣/!ūKO/,u>]<r|p+2?T(;(V#мP_HuYj,M٨ [RG cu,%hA%,7-<(%cQp6IYXuiQ. gawBv.o6! p BMQF\mʂ5H}lbq(S#oY !aj|>sT*aDq)sar Q I^qX*E~J+!>[@5(؁AaQ/`XWy=3C)1Bp_ l>^/wn j'#J]"L ,C-V9fШhX b0а)Dy$g\<0uGZq}O'_:6KI'rBx_ Z `&0Ӵ}>"RsDh6̥756}\X2t/QNOS!: lk(υ1H{۶ؖI Ecd aRL+iqiQ@Cѝ`[!R)(4>Jr,<&/S "4g%>T>[dӢDQG=/j^R)Rвhp#g@,h`0 M™k!.#`sf(/ s$Pq>~ G1ׇ2h2z$ͼ \JæaK+Rf,3>Kj^S:XU)6 wP#VR@sb6; 5jT~jx/zO YO%B+K_ D:ބ\.k*< PoK@b@٨A2o^sB਄4bGo`oD`uBM5~.IpH9!y(FfGUS }Fty6&LwW8k: G <%GouqP` /&*'QQ4½P}oh#nla7ZFs1awxnVm뾾Wr\{E(Ծa%!%z7o>]L^a=D}K,ffWnOFGR& X8:l1nJ_3BzrP}K10 /k<&_6HϨ2x)M193&b/{.؛0mYx% 7 ) ϦaRE,HfW^; #[ p=@PÏ}7xASqm=YP[n;uIhp49iIx<"4ya t(S܌Fw Y[L+Bp@{Xy4wyE0Ţ RPc,o6Go_FMJIyR pH^dAAr ,Y,Jb #TptF0;ZX 7\L9dfyB2 nni<ܾc>ȾCX=JKU}]YQ7b T%A@#,fWÄ "Es|-ن"ES\>[nTԩ$#93i՘^> stream xڵZݓ۶BBv:4mkHHBRw @Dݝ}܃@` `<>ٟx}k 3Lg)2!X캜˄/~˗5"UfF X+N_*0.l)3Tj_ۺ],U*2]]z'|U4DqSD1 ]}|~ֆֺCM5T]{[5H9[,S-*Տ߶CmGrG 1=$k"f)3ʑ"a"3R:!M$TPTud0l)VnH³et K Lb˄&p̛sf Qf2yL?C%Sݦ^m`(ռ]1T=>j{/;- 5>aXC3t'vCusr@{W݇H`1$jDbQWkl] y_īʜ8p= ; Ohvj CKţ}*zUC*\붡][VۗZ1K5V 5 ;Hrtsڡ)V+)i 'Q[H^RAHfzExIJJ`c{-x-) !8^e!RAIn~H=uO'_ 2IO ? s"Fmt'5N yj H _L^kz^BY icMΣ7ԛ %Aq!K2&Mlz#7P\\&(P5xTP7ٸ}p hh%L@K[{}u(ɜgX&s-c*IA uW!pPv7Ar36}'5{zcߵڇ؂Mݸ/ <-,{ƫԬbxCe:L(pzGwqzS{j{63K^ 4 |:|̈́L<+9ظ DҰ$)VKbHVޓQsa^/9¡m^mځ +hgխ">,B@'^'isò< AYÚ~/eE1vMeB2ElgáBR92I4 jٝ"0 ˓-f¥`J8<N3̮jV{Ӂ;혼YpNP߰ 9g)(0_ոHFC7}.$bR7=G)x9qɉ,(sD#}D;N(n&:L0M-@(K0NOW? e.UH}qk؋>=NN>Geُ";'*( JgTM# DDf/ KT0GKFC. VLekυ"\ж#^{p5',3вXA DW I`5*..]%;?vEw?%@FR}z3N}"d.B }90 eyo}oS&j[mBd,mK0#z,/N2Ѣ|gc혞G۝d2u'& }p~ 3 4*Xb#W؃߸u1ӣ&Ğ3jd0'xXrS \UW_]_|%,K@-IY"_g%,B:g;KC/Wo;{w})%0)|S.aSVTwxeP;g~ ^,WNjΡc4uպjBs =m\5$['dd/&.&0 `Y/"S4w& ,6ƍN4pt«?Mk6jH"h2P q q Kda. ܯ$LB!_g4a\3#3)lfObSɈ͓!)h-'G36qTݴbXS&y!:x(i$c:eqȶv/*0ϏS2bBN{aLLgب}lԶܵ?Mj|L5ܩG;v:ya ꢨ,.#'c| '̓e@S|(T'DeԷu KtoEz ַmg}J!z@m%sO|E~]=8U}Afىp#;g9`L NJ$H_ss(=ߵ¾a_%_ WůcpDT[aXHHH<00%d~>W!t0 "S?Yd endstream endobj 3459 0 obj << /Length 3569 /Filter /FlateDecode >> stream xks6:Ԍsi/iI:Mi-H_X"%VdX{'I8䛋JNh'WLFzb8gZ&`[EêM:⇳Lf&I`9 *G0I[00Iس"]˲3 M,gZehlA& [̲PU6oܴM5aWL N]*5S"Xd۬X>C6Y䛴2(IuwwL4KH(4׎@jR;(柎PZ,$`QȻ^0ͮ8_2cD d]f}GpN)q ՙCp5{vg)lJjWkFU:B U rEHA;x8롞0cDE{"pQX񣰲2cLkG?3#n;o#}"bz'Ċ`7~@$茾 0R&h5~hG vE#{ `0Հ꘬8jpqΛ6]o|%Q@m[djFU,򴘽JjA>aZ'c;87SٽUcQkw=`Kak'* flxi9p m#B˲"CBuHH,st!W$%~R LxTʚR S1NRXJLii\`Q@7;}' ;פ~;Wz*fm(1V>{z \9MåQ<0QMv ×L )!# I/@Ғܹh ]dRۂ82׏:E'iݡ7%T:S\ u. rڔ Ò!#OnKUW;*Q>*-%W̟c/ ["h Ev@4QWpbUBcxCѨ*ɻWY/AuҀm[܄iE*S{eֆ ]h:q%PTH:%vxTcW|16=Oܕ묡։ĊzP8.DJ/P)@x/gb NHaTg)8ݫk-0)#|wS M1x3Q8vU eI,Q*anЊC6bRttTA./ҝ۞K7Zv75nZL umF5ˎdt0*؛ c>2=YZcAgLk-,n=Ve'O9ZH:'אkjP,?ݓ0k$vBVHmG 9J<& :o6]Q< x_ȑ날 /%]Dz Q!S'?l>\rsM7+E@FK9*7m6#b-b}ɋ)*XpVSlW4xUn3{Qt֗xכa=ʭ0LljtEgSDY.[4V/Dg}eGWy 2mSmK!?Ml[Z~M1c5A?'ڭVM]__n KetD0q$p!+Y)tq|̋cxF?0Ds_ B]܁~#DmߴiW7vLRܻQ aιUW%s'ܽO|y3۵~7fUt[wr(ۊnUj4BOEqnfu.vyeEL.g9mR2DmTZX6?gz 芯~PцTZ.e z%M CᛏtawH8þCz 6\=+â"U.^:-q:5Ƚr$`D/2;VLBߺ_k)lJgfĀC㐏hٳ.'`9pel~Čgl4H}S L.@cd}&8n*Dn{;rv묩HJt:̬ړ vay m:[; no[,RzךeHv_Gȷeh6Ό2Gk8~͛qo7랟y]&l=Uti3ҕFt K&<1,ڽܹ׃J}Cً;}g9هD:/J*+g֔rDWd+Öp4'YݵHXg5iyg# endstream endobj 3470 0 obj << /Length 1544 /Filter /FlateDecode >> stream xڥXmo6_!"#J[ v@[u%Z&^t~G%K$Gxw';Fn8ϫ֫,4bn`u)%lk}]~{V$d ))4bcٷ@yjެW(A{#ˌt12|gVź72mw:*c:$`. O\Ly$`ec»ȐފKb9%ӮU-1BY_``EkަMQwf͞ 2lE% '[2i[<(@H\ LhĄo/zn\txaپ=%?s 6'XL"=$˦AL&xjyѵ7w!W"-iior޽Wh.ڝ8,+>Hߨ}YK*wB`BNQ&|X1~+z˻>Z@$I5ERct\g<"rMC.="o@Ů ۚ^q$~8r̀q8$\aWA ~1PqD00+=? ԛGC ThZI~lUZo_$BEٮK/ϙ>r[T}? cS3XشSv -Dſ2)pߥbew(7X Pit;डLL2VY=H X%`>64,]XWYIݷN[T)REur~yiPTY{\xa+bσbR5 p`cǾI-ȋGy;nJ,82鵔J] o e1$%؃ ݕ4@}DF鼋x% ̦HbV; cӅQjQqĨʑRn%>̛FÜQ2 HpS:N@4z)_P"t',M,h'U?yvWNC~]Ts`艒,`ჱloGݨ҅(R]IzXc]͏@-F(DPuRCyծp !gN<]f endstream endobj 3480 0 obj << /Length 1101 /Filter /FlateDecode >> stream xWK6WʀE!b6[`H]o{ ˴DWwߡ(ے}@ZQy} il4~9_H4t`:2,{wWwL* w61".li^@;h{urXdvӵjJ"W{;v^IxD lubW$իx|ΦTPM=#bI`)/?zhUl wsc/.K 4Ta?]ǃҩSyڴ)+OV"[W+pGѡVSoN1H@ 7:֪*14Mq5eܕ#p YG"" F3YЏLm vSwwTl F$ ktYf#@Li{с9C0&T,/23 Ҙ!Gˬ~4#(>4j'Qm  4$ѓի`>H PCk@,?F>Z9M%dS<ȧ_vRroblğ3◓Xk[otGIWL&;K!87a}X.kpr< (\H\bB> stream xY]o[7 }c0]1 l@t"nYqPk6@]^S$E}q  䔌wY_D+7(MO1^ߊǯ9#+A&r`S7$vJbTBL&d6DJL8ؓau A8VSYULJT0(%2) %B 7%V 3UJE5!d T1A7TV,W>%p`j gBuHrK=Q z8x6T+|YSuTzhPjwQ*%N#TR_a)P),7:JFTsC H#:M ٫ Ț#TD#f.HPdDA RK+}5"$D4#ڣn* HR-GwbTd" U TRpC5Bj,쌬Kԡ ̠ڬ@Uk:|Պ KjI)@+(#Dt]\_L?^s|v\9~5ѻݝ4?7o>ؼ!J I.XdmIlfo4Ǧ}ݚyr~h/jlG}{=<j=߂EL ݘ}1xm@-lJ'7jft|淣Wyad&*w,b#ܑnHt[ptF[%"ݘQ.,SWCƍbckF!e(pP+Kʨ?ee$$T,*f:W9+rn-ǼnUͯ#/MWm*bՍr8;rUh]ϡ}s /icv  p0A_}n ξxuta-l dݸ%`swí+;84Įh}sC@"WP. ^mgJ췗eؿ0qcxş?[Yjϖ1긋ic&^8ދFV~Lg.. 11ߜ#IGn6(+m28LFѐWC=`=.e,GLf͈ ~F!3"nج7B8q8\G =}{3O= ^vfc0lgWs0Kt3xŎ.KB9"'3Iڑ3v\|آHZʊ^a?l`nFǷl!Gi0pcWyH^e7tfA2SGnHceYJ:;؍[NZr7KI;9u W.y^?\GeWFI #n-XSGDABtD뷖Ne3K9neK8/px'ɎSLw$<{ƍuOmgdyktbZKX4K%BvhwW_ endstream endobj 3496 0 obj << /Length 1927 /Filter /FlateDecode >> stream xZKo60u+>$JZtSMOUdf+$7I}64(`7ofv-7B.2%,Yn4BRJ-n׋ߖ,o|VH"E ܦCm~"V*Rs]o+ѹUwp$Lr2SÄ䊧)a%`^'3WDeSA* NzNU8k+L;<6u߯xahY3̷¥\4-&ԡ:4G_0wh5E=vz̆Grх1IvU]Y!͆1K$HF@IysPeDf}qT>< g}~LD~ЅU\,V1yn;/ߩ6eZ)»!c+d]ՌȠq%!ywN,U |hf$AlP*WJ* v'ٚNZ1iԽN>/vR_jH}'5cۓc ҆d>3gpN'E9`ʒ!㼻-fέۼa䛶7iQ욞v I"VfwrԒ2JPP⡩LؖaI>Lc b-}ę!H1!;(}m$R$[{j6!rßHDG쟑3 pО`$*Y%C}?jڨZr, $6v._ Y\fID6~50ڀ :gn4 5kk/p0vTvqbwHb<&y|/^[4wJ[C# /{-2:oع݉#  h~O~uj :8xuu6nݘ`M9߷'` M9u5MA7g endstream endobj 3509 0 obj << /Length 3345 /Filter /FlateDecode >> stream xڥZYs6~Ө*ACMr*)G<$@q( rmo7C9nap4Gw}PaW0z,Q~'H8٥RDgk"U;7tHgH X:re xNfO~P) jZrȫ_,1o' H,=7%)mgx7˞sPcwD\eG=Cs Kў/Q\}*,&~b(ГxO93.Rzw#}YWIoU:}ѕjvK+W2栓d#[^toy7ʛ6L(뱜-ٸmCCw8>:؎_H˽Rם±)«7v㛯 oX ,q7B^w yCrufX^ʆmW4wY _-a'|m:J-MmD|@'A-Y|6E=ۿM2{cS}aq*6m=apގ:<'5ߢa/6OQ؜^Cx h gḼ PQTI{SH9 %D:&t,iiHxS O'"Te"\ȡG3g {5r)qHõb*P(fH(Po<`g/֟$6s9(| 큍6€Njc۔4rHG"X@kRV(Vt5msuٮʛ"R]W98_x 邼ɀpojVt&1/=@mül|% x(W|KU& FFd6u-.ʎN@Qnu."0l) ʖOG~Jv'YƴS ZYj6^RV &BI{ao%7K0Æ -pT;^P~1&P2E)͟#Ve _~*ZxiAkzFZOœ&{qxNP) ;{sg;-q!ߦˈO42zzn8^teğɞ5hdOߴ6?9|Jurn/c}eG zvؑ*ȳC*n4`e#IjZ .V;Nyg5KX3)N9t gd.}WNPKM 3~pS lx*ޯ.i%R$R(Y/(4bH60dTs{Y8LC5~ALtODCWL؏pu`WHCő|ZEcVhRD 4#/(2f~W ܊^5>VݸX7sz|!l9887ywW ]=@)y|zb ؜ja6دK>W-1_:TV5~TG{GV8i&:sD"#YdUϼboYXECF堯 Zj;JuBWM:>™F!U-;4k]v pN"!_kXƐv$Sjۤ@KT_!!+XD$WM(N[BNW_!'(S zy-0!noSv&c ֱ~\&7Q:ZՔ96Ja j K~?5 endstream endobj 3521 0 obj << /Length 1941 /Filter /FlateDecode >> stream xYK60r$ _AL 0a=d ۲D INߢHʒ,{=ɥEE*>϶3I"%]=-ogT}qkz)JB?]p}Szb"E?( E|,e %\|Q¯C&Hyiz=I 5l*ʬlΆ6i- *+utHt5>0B<& s1HlnY7B7vˣ77~c?Ca?LJX&Qxv(~3]HD(D Y=#;TYP[ K:֠j!e FMx2UnR4&x4iy?խ_!w^׶=|T{{ǧ~4|f󐗐VDܲn^G|@4p~ [SDԬ#FvLdf8gOn(є}TS# )+֥B.@kX<^,U&‡ 8V!tܡ`$O~29w +S;!27EtM`tzgn 5Ďpv7ED q_nǴ B_Tnk{PnSJa~bLŖ\fS &5]d0,`8ַ/-o,oi-l )t,^p08 y3в?^I /򶺠g&5|vY&H/9uD/H=p3N endstream endobj 3529 0 obj << /Length 1203 /Filter /FlateDecode >> stream xWo6_!86oKIH6{꺁hD:Cѿ}/rd 6!|?HY:v" ;7 s]3<sS8Nno>|E8)Y?Ik\t#=St~uI"P!N8 i'9. lɗEQkJrCcV5.לZ \DSEidBUFK8+%ȭ[AՇ8Y14\N|[73L{яÂ6vՔ,$o̶+^Rmm2ZF%]7 :R+57t@ׂ/hi7Dh~Q4+tFs,}<elI85EYmؐ,@nSr6 WApRF%ِ4_u)x֤* $1_5>A+\mpS'A:9ÇFq6݊*& Pz|)^XrdU(:q!SdAmR.MDS֦@HnE&5L7Rӈ v:X`bqEz?DaXʍt̥(IǬPug juCa.učé}nQ:s 2u'R2luݗxQo(MCncr8pwCdt)}_ ڷk4vM}G3OӝA B+ʈ&s%AMyjv7WO] ĹS³b#[:Ga&4Y" c]@f=MJҳR^`{r}%FIԏd5{5&)Wsvv{uFA׍lC|Hk_,Wj#ښfYDhfa fELI1xA!JJN Tx-ZRhޚ4|X1s%7h endstream endobj 3542 0 obj << /Length 1822 /Filter /FlateDecode >> stream xXݏ6 "H4.l_baÀi0VoN)ʶ}$>pDQHHϾzswVŁ63s`: xv~^~Wgz A?JUYO#@~`mZVnD~l{ֳD0HЋ˕G߽lv*FU4HIizI,Te2 cV/p8,}ۆOtt7n2Klnȇtwf"OPwkK@*';cNoJF^/gzp_3/ 1L^d:^ʖ+{$[ءAh2:Ɇju!D}[k3!3;Ȍ.S.hY- ֟ea x7Aq_̎G`K$m$ȉ7U^niq$f'9קKS XuE [r,I6昁oFT/vi9Nǂ`LZ?5Gu + +|,$y[K9/V2iVSY{qPP 4_Mc@1@}>z솮E}گUc_-RPԺէ/MuoNDry_$s(i^YH֖HScѬBiįaq1>>AGC~CaP7o;2Qֵ7F#\1 s{/,GV2m/NX]p鯬)E`>,h_h$wsv1wFJt6F.p8Й9Me;adW| tpG?\RW* Lҏxk;ք4x軎Jп1 sD`ʷ`^pqnνu]tj*UԣIжjKE`u!/1gІG GGGWQ!T"5*h-KM;5q2P pE \.P]qEzea.~oZ74 m̺7޷pH AQ, vE՗oWNak꜒<ݲ^}AD4gDDmαBRۮ$ҺD@HZ׿hXNa|ag=eH2|T -K}e$3ut#\˔\[xHM.~"ny{Zy6gдln0ht/;x[E#߷wW6_ endstream endobj 3550 0 obj << /Length 2942 /Filter /FlateDecode >> stream xڵZK6WL,/:l*lrH@ID"eF| $ښ@,g2 칮ypFdzfh^j[wPi1)W[Э7T0!T󯻋i^.4됞*=bݧ\f,MqA됉p2-{3WWՒ)2!h8_06lБOonn{5/~ZI1@S #.\-}"j[bun ݈!:N#WZd:, ID,D[_d㌆{ctCʂ @dz8u6|_٣7\p` z-+n >Xٸɥ Q̭ NUᛀ싄$("`oR8I2&q^ ; _PZht:|oohX "LLՋ^݊1 S;QE\@e:d)8n|5j [7={1#ّb»I^B Z AP0#y,)P$a\%AH)dؠPDד8*4Xj 5`ڵ(s9DcÎdbR_>xc N[wmYx|UC,lܝS-$TC Vyjér73d;Hôү i~zneV GyJ% F i=a'ލ9@5M1b pЎahd8>"-X OcƅQ)mjvZx8׮]lИ,.ŚPW#0B[Kn19r {bH*D09$ RIxS5L~n ϝr HRg%^I$,{}`kD$)M } l~/s'h&X`ȉXEY<8o9RJ] Qd7bUTHPH.0ю&٭{#9@cR'eQRN{KX @d2qR/]7yݸn酝'/fW λ\;ʡ\(d 6D`YI~ ɜC%06"bp󕟻CUH0Yn=?B(EByT6e xLI)*04Zɷ5B'q4p#jwmV$πZsĒ7U$i!6s\?޾8 `j23adlaCߗI= TMxi?/ML5W|\I^J H HdY\T7mڬ ay1Ȁ$:D_kPa&jr`_?dCaeyIʧ+VB_}TglQ~4@%ZSQx ' wj@LFr:@iFo z)| ?Hhuk-vM^6 X.d_/<"ݳgX+IUkHq ;ۼ3KL2>߯<6\oJn X X:~zea@6|@;R&¹G|Aޑv_~|PlxO*ND z?zRW]Յ.=Z,HMk1uZo29: >=s^ Ge M|s@ g '@B<6k|;onR-7X&G랸CC`.E|6?%+8Vu1,ʒ4oMՆ'^Tu}hB`rN2/쑹T=oRƠ^bp}]8|&oe﹊Yբ?|x?| endstream endobj 3565 0 obj << /Length 1444 /Filter /FlateDecode >> stream xڽW[o6~a!E]ݚbŊI h@KUG;X5WEG:x#DY3Б# 9 ۩ B,WJggE+PMIg#ic̲4QDP2#1FQ•(Iܕ_rn=d;~(:='<(phӈc.HC&'jS 2鷐=jHȷTVuHo ]& M\Ŗe8R81{?IO\:{N؇p7SND''G1%Ziin[u\ۦ0F/I]?a`Mn5YC,a~ّKÚ3 sn-`r10s"/ut4 _CBmʙ]  j+F(EEZ[;/mֹ\5o`-ϐB V1 Ԧp3yPs;SfG\O f!N דWG8,n kP]yFi8IC GzA%%$O+v)*ڦZt2Rc\½4<x d_m6 b2A'֐q4wتM1IpPbZ.VUnze'KwNŤZd'#I\h08$+KÑ ٺCG~W@!J.J-%uo<4&#ÇHtB;v Cfz07wg^pKJԎ|On+W5}@Sw5O9J**~ë;3UJ΃]dI%ʕ*;vwr:vo ʜ\u宄՜d;-AjC&\Fl;u ޹=֟%1\ZG^gށ"F.}1Eu[]{f5^k0]A$αܪ}4T]]V VWm'>4xgW:8nJ}1bK}lS+cgޣaܣg!J[@T>mA_x>lI;Fy.>`> stream xڽYKsWHW, +JfkrH*f E*$4~tVB06|/7>&gy"~#8galR!Xr%TY}0OX簗哉@0청L:V/JgUU evIiOKӟuQ}\hQ8_TmCtO%RK^Xў4nTp O截)iF5BɄ޽}n')" I{r>bRňx@)Q=J%9(ɈR@)(/P8\hF7(0h.h`h~ǘɇRoԥ6D (H)ne.OtI GHu" n ]3:X*RW̓Kq#dkz\NU;,iQO!wI< h"O| 'q5͡Z3$]gu捃  aW}C]{Zp¼>-Wj ..#WkBT 8·\+Ia{ñ*r% "aģ4&*R4޹4v,'߇N"W#^5%]NN-<{l f)@Ēeyk@ {u? u/^:5d$Pf7TǬ=cη>y>p[XXf[El6@n_L%.N~|ڮ3 t/O[1W8IS~ҏE9`?cZ![)l!4ׯ7/k{Jmx 4v+^Ɇm@E2Bw eyn9n K ,E7^. IPU c!4>! )D F$W 6Vn=[=J(2ހeNd.O%ߞ&SFΓow"As&mB~G[[/|ozS[_&kݑFpe=pTmVodMzN~ md_=Ƕ2}ep F9c̪IS 2wDj/hODMq$>)[RsP\5XRs%CX92 PRse h4  K&9s{uzjxv!DXMCPAP6Dw 5fAWGJ70W(;^; + $>yLvvU?1j'Ne/&j,|Ųi="aH-v~k;u~kkLTwܵŽ+Xf ~ǝ{zhGITY]j4p9mέ >0B/Oucᵢ ==R6sU6FVA6=)b*4 lOĊtX(£;!I>8%pgc r> stream xXKs6Wh ;u;g2lOzHrHHB‡ P 2%3E~h9߯fQ:Hdt0JFi$FØ%l<{!9(BZJ(wDzНLSdֽdl,ʍ[S$ܭ=LYLDžh-ުvFySZ֭y}/쟇ꌰ uǑ&$;z{Mŝ2ݮ.T-Z'{׍Lݤh=neӶ z$ &,&ݾU7غ*ܳTsϔbc@,ν-zWC :snjFҟQՓoQr;I1qau3,!]MjGS? B (I<z?IW\lvhL1BޛۛolŢӵn>˼%^H?8}[vm^f0ݫy4Pls);cYҼ,kd2 d|I \vA9Z _h^(Ybb4\LJLwk..zgF$Xƌ, Zq'v#Y;I0V^W2{'nAXv 逊/6 `'X|dPx`zEzp9҈)MZ"#Jݹ4) t@EOh_# Y͌HrJS?uDqH!e'}8ȓ&W d(L,7cim&tX "臲]l r_ 88 sQKioΌirm )v[PܑE9$,⌠1Xӄ';`ҽ`=9PLA<;b4]—?˖iLLklU=\K#~7c d  tp}ʭqE{+$LCQ,<|oY8Oʓ0}60!} Hp+Ww@gi1p%NJJMkˆgOܧZ= I~ ?ZͣiYV٩y㌄{{'ua38hFs^z=%܈DoJ_fZ/~+4 .28eF@EZM[)vx7@7Sq`VRǗ'~SuڮV˥i~ox82ϗ]`f~`T.x{TM!!]ZSxn2 vo8Zvؿ% $+  wlۉ[]܄+Y{yϦꞫ'\p݂hyK*B 4/[!gv{z\vHi&Y'~}g8˃+ endstream endobj 3492 0 obj << /Type /ObjStm /N 100 /First 981 /Length 2037 /Filter /FlateDecode >> stream xZݏ5c=6:!D+pCnA>NI8ِT-N"9)/zH椾 ᨨ `eQF8{r*c MV vy%H/8TT$!eR ;xQBLXB ~9;= ;PUjm1ABEL\iQ򠲮cc4QJH^·9+*U 9Ĭk9(@2)¤O8l ĺ$ tqUTx#Tϓ!p$Tb?a\E%U j 5VJP4BTIًoqjňru "D U,@՝!%˂ RA_Bpv~~|a.4?!lPOoWg}v?7GJ~6.iW] ghX?XX?tXvogW\L}4/im{<v\hIy.fvMwEZ h4jM|ed:aUS<[Κ_gv^7wTNpʨh5}'Urrf`^ڛ5eJJT#gnomΤp;d+wۤ;VD^I5eP$=jAWjYZ#BGpGĎHCC,kK(AOFvz\uZ2d{L3ق̏Jm5 `P1|0E;^Ô as V>pDhUQp@ѫE|<:& Nhr 6ZQ{(-hHThy=}r5 [;}gvYO=gw*4u)N] ~ѩG|j 'k5ڕ'5Oϫz6m.^|>uY|4޽v9z3r3!v6`Jb?]TPQ,SUCU'dȢC,ی!::Q⃀f7hOEt01tArwGm{sAn_->FE&l=ppGlwtrL(S)aϽި^lr/"U4Ƿ9!9lShBV)!xQґZ'6w77!nPygӾͻ 5¡08 h[1ZBf]\ZTzʎχ`=br{Gf^7nމ~_v9&jQQsQ>ow|s~ty /~1!u=%,C(>'6 D 32u)K604a$>0nlԅ*z! +{!y;LDI%n 6ae? {#R6&EELMd7lSpqG>;Љ&M'7Tov~;q$%>>Kw'[Hmm.i][ u ZF׺qedݺAr֥Cҵ.Lߺd9"m3?<]S'zr{Ǟܘì/7 G/o endstream endobj 3600 0 obj << /Length 1723 /Filter /FlateDecode >> stream xXK6WCl f$z!iAM$ڢm䊒r(j{Arf曡}oޛɫmȼ |0yq">OkQ[;,HQJ߮z1k@~(Ro|{ `(M;8s8_kN|”p?>9}?fQ&AwWR59|>/u;~*P2ÑS&J*ѧ3N m?1Qf'ce\5ʜp2'@f  "R|0Ag/.JT(/Nhnp](.>8tۂ]jm0068*QuD40 LWP {M(L: 0PB{Mm1aG'A*"yD?Uu-վ*U:Ӓ?u] GWAC% 2CtKHl$j w Uak_W Lf^u^B3k|Rb#G= 1zܓ'yͶʰm 5#圄qqGVb$Qznyri0lgxBdaYeպD!&,F\?GhwY'y804w9zY)c=o}Yiw8o[$AC*Lz /;k>DQqpT(FY 0P> 5ДІ o cˑ)uƊvZR6wR(- IM_|vn&aBjiO@QN߮"-_|ZS42S[-}se5,6O O9w6y@9o QBCQH RDGduA`ei-|3P`Ab+wmڞ, ќU[\q:UkSCqy  ;r_1}V'BΌ9-۠ (Ee5X R5n`ciW!CǜN_Ot+pH/j]KRr`'F(CUQTp.wiRs])?&]I9K]ϟj7?䵄aykby kĻ1Yt)­ C89`A+!"!UH,ws,'lbX3;&]ED_k.ogJSV(I,XmF"k _) ZIR]&=N*C)HFvmեN鲷3oD2N^/&_ endstream endobj 3617 0 obj << /Length 2310 /Filter /FlateDecode >> stream xڥY[o6~ϯ$@\3++!M0GZf5x?޼ē )`z\_rF d՞3Ϙ^&Qs$UP] NB-̛VJAB v@QڞJ+ꥵf Ѫ{.W. 2"|O$^/mN>Vv}ҎG*CGQ跎 |% \L>ъ,Zjîv.oyd,-Q=i[ %Ӳ۲2LQ4/:h3Me]DΕLﯿ4s5WbSi G./`Lkۺbv˟5)\_去֤O` cElfl`T" 2;9 a}F +[W }?tT[-6Nu2ht+ ^<1L`Kfj&{B1XIMW 1FLxRe3d vck$L"w8xZĹOۧ3HP9>IH'V7u:Q<1yWnW<} x" ,\5C>6#ẃf&q9 7lAH nuv.jc?0E;>ИMhY;y~ȦpڒljezvvەVhnZ3Em XyoHi^t)h3 r6N8>ɋF\)7eY'Vp.IElNbJj4D٬ՒAȬuߑ2 "L7m]D 7M`VVT_|!E^uݶݝwc!mZ'H6]ecK &jHnnNA}pw^s~L[dRc^ &hdKFuF3J4drJ }y<#[ Pgg|.YW ƜxnQ "6q(ƁveKbKn/[Pq  #XW=O8;?khg֜h"Lg(Gow4c;ԚJ<*XV% CeJ7Q1;..%;<[8K(GXsj󺾿1ߨ.mk!vhqzo dL{Xz`"r.UWvoĄQxDm嚁͏A!>߃T1DA_S m&IARQ \ |* " :? ƌIzl&)t0 sEr{c<7nRnIơ4qE7ő p\jVVmgà_X=+DtkǨ*|Ǫ=q|> stream xڭXI6W!6P[$Ri$YmhyK~}A9&gf棨f}z$H)%ax$dަ,ۼ]}C1MII,TnaH=a~ŗo+aK,ы4 a\yOe@SD$Կm/X: |c'jtKӶ0]*ESfłQ9yި}1)>Q_o/ "1wE+NJg783og$ߛ&uq6`8#҂j ٭-g^ݍz^ͭWxI46XK[6.Y'v *)N_iL/V%!^};B8J?C|·ı2Q*:!*#/PQ.ö=sLݷ#>aq|؋{o.~ԑG^ = ێ^+.Ñ}w/.8 &MNiOsAT j):9>.W~l-LU/wVNnmf͑wd RpVDPCR2SC( i?MsJDsۢi݉+/Ia||ErF3_j !XP%TM8we<+*.rSc49PpdrwhtڀθIE5&D=)ZS #BF ^q+Η>AEH] /E>+ҹbtBVe{.us<R*#*>28jR:L [#)#%%˩Uouuaѩ'y= jSDNj'da&00Yn" |Z˿?#j|AQJ4;Ȱ'H (j-IAnrmꏣf(u3T­~fo.umOsz?H([|pC\ ,a`P{o@2S`{b tv0 l#Q\ X)M=CKOöV괘L*AC.t8yY5$Xw@`?NI?L36M`R)>m@#?Qybv~]>I55lt밐P?tźuÖ̦j˙[49p`y|*(Vf#(ny{0f0bH;tHX=n2-@2֓W'G079˯F~t mΗ󡦑DR45t"ۋwMU5Wv ؑ=>! '@'b><[%n&! O Ǔi@8^ G>h0}%/dqt˦(+zb^3O܄ʽmf:GbP;MΊATlX4kgVx#z~$ڽgf>D$ l:-_p]Gm> stream xڝWmo6_! 5CzݚaM+ KCD;ZdI#e;Iɲkiaˑz z\ܜ /#Yc^y{?Ǜgo">Y@xFFHQ섺ax$?L]{ɝU;ޔ$M#VN`_)lhsCnj; kBVҠdA&S2^y NEbBfnWڱZlX0p "g|U]xXFp^Us|sCRw)ʶD5axp[Z,%Xmײ~$`WcAh¨?fV=tFUcv࡜^a (%:JLXi hڭѠmuGb¢dRܡ1 qr_ O7 "&aᄐQKnDIwk3z(&Y8jlUuj윑d)N_m.UA\._E>!NדCx,:jp,d66O%ԄBN!#Z\n*qyX`u{5Aap@#}v&*M57R/ObPHL:P(8xY~,WiGILHad&Duulِ~ԺQ6gUƾsi.h¦ y_"gA@għղC;wNQaY"00DG+ ^FV(?Jݕ&-+rn d-dlޮfUBlV B+hHA6C>ϴ'q,=0^d|҆41nl"ESB4l:N < c B;)AgQ#:blb;yHLE-mVCgf\l"sh+ O2C`pP L?os $ܕq̻ ' Ǒdi̓&kcRR~@C7Nt,9*]k{x_zG'8.OxW>{ )Xʮ V61\ [۶)2 B!s PrX˩Jz K)JӫfЫ<),_wj@4?v0/ܺhS9̶CԻ@+JúS拊Baft2sqP-Py#p0M {;iIj'90v k7$8 ħ)C)7+.󇑧ˡXy}s 3Ɇ7_|+` y^ ;?]uOJBѧ^ endstream endobj 3654 0 obj << /Length 2784 /Filter /FlateDecode >> stream xZݏ۸_a%^tCD)f+ lчZ[Jr6 Dx!9f8\_^ۛT- 3Zv!8g*ыLYn?.u__|Ƿ7I6aVFʲI!+fn=^ỳhлJP} ި@|\DkO%43W+iǽlʱ,L|*ZA Iѧ돛*Oüj@-J'ߗ}Q7l@Ԇ *F4i\3D,}ӂJ.CO*KQ 5j^7m[jj7l V`&MIc_nׁPM+rT۶8\75@]o:>]tYUq OK܄èpxʋz3cqڍhk/kbW` l$>J 9;?D.ı*7UӼJeGU߬YpҨQJvRśrR=.&2w>2a\_׿,Co!zWźG=+nDZCCk(5(`^uh7DAwUMԲ>+&ub/J*MW~n:wha\y >TahOopR]lp<9]G2aנaMeqiMco[Ƕ.b 5&Xߘ,,Yx"ˏFߘ.HLB/!s\F,'3Y T 3?u].l ]0 n8r &vx"@ Ԟr P`R\:10b&c|3C<-:Y˿z`,u/1hkh`ٶv;ulJ ]zR’e^7=ѫzS>_#*qE@T q#hk9FMdiˢ/D+{a8$`B.-s%`(K;5i O9K.kfyyt]lBiHHBڱq:j7V@Bd5U)j))ڡddKl[؎p4znw/w9?^0 4 vF*ԈcԤS< AyPgIPnD,a,ICC)vوCxksSu(lΉ1Nl]I??YK]Wvi&A Hߢ/b 0z!Js(m6n bc|(,opI&|yM DbXZqd HFOBLBƊ`:J/&lVa"9SrL'Ln3B//&i@kfSع3o`R{()Zec5 -ھ@D؈ΫЦ}VxVC+h6G 5'x27F,;=<$Jn?F@LgA]0`pYlBINNs,胿\]Ync_ .-Ѕ$W 5+uL&g/R`)z+| dR r({)ßw`6A$-eKcz)9r-D0TCE"۬BM-QP#]*L*1y<-,tr/=e*}IHlX>FDp,/ X.dhLLc[9m4!iH!a7'Q)2T{[E4i|&Vl:kH8>}\`p%[±LACa?gdJq])Ú5RLjZ9($){+1TEhI_">KwKP!=<-tH_ U,^as, NaC1'w ƃAb`X{RBفOE$TEex =:L)<šXuj$۰~〽lp>@2`_s`cbOOa~q?FX]if:e(> stream xڝYKP!V>P)K;Z:+gi<;e-xT EFZװ« -}%}NUẂ~益 r7D Wm6uل pvR{_](Sz]9h}0oo-i]۱.GGH\eLLRr|)Rr{3Ěڴa }HlCb[0o\>UN2½Kw\(85~f9o](܌]_qMym ߟ' ou|cxq䗵 Myh*rɸ/'3p͹kQϤÞ:Pȑ{nusra^`&bn[ǹ]̌썽{oXBpD9q+nr@WFS iGn{MF'ι>]uuʄ Tb[@'oƺ6 &o"LCu.=,M- i(VJỶb-s62*␓( 2%TpC?KMzԠɵ";| ;E?/9Mq(\48qӠA&70KhԂ9d3)KKvI3w,/6b?e, 6Y^N0%e3K~VYhʮLq" endstream endobj 3673 0 obj << /Length 1176 /Filter /FlateDecode >> stream xWKo6WN:'PvMbmK-ъzNQw$RI,Ѓ͡47R [?-o7.BzԳ6;`Y>!cIW]u^7?ZBa2Vi FvnS2Áu.s cV=s9l +xWRU:0!|5&VW㾄a!N+ `o?؃(|(ƬLYe>V0a&LȥWzRp%Ճ9WvǞz*cǿcL?g0Ijo=4j<\fQ!Dd΅SszϒjmS@f8]%GNk1zˑ^l8/f_@[xo {9Bn=_[|,\d-2=x  kZXp$[__l@p{D?YGQxۘ+% ū_`mk=o=-{C]TVу1#~M,fqҜ"72vQ@/X Ld8 $l(^1&Z Bq۴O)f\TdA.h?5<1DS1Rg?Z㖙!C8M3tq]A3 O|t%0ˡ띍mZ$郙xVgͻy0.1\mgQ::oA䪧,Tя끚Fe֌g˴1r.E.q" F]K]8DUy<4OiLJ5,$T]|ɷ}F;@ddt[7#25d<zůViHH*ʩ'G73; btў A^yp<Г +6gkN^J(WpU0i߃>4ޑdn;=<1C7aAFA2[|8F6Z`lU<.g|RlsDXdKMqO25Y(}n뺥QGrUIӭzL+Mل!cLng3#F3 ^G Td0ls[h(V/Iu endstream endobj 3686 0 obj << /Length 2094 /Filter /FlateDecode >> stream xYݏog-EE4WSEeVO I_PZ"Jwty"~ (Ya$*_=V?e*XU$`#Ԛ0^U#;p_QIlod DEdL4 L/?a#pҖ$}ô)3 ˦fk4bZ7u[m eZ<8&qQXnu[۲>JAb$'Yi)_Zx fxNn$H5oPBJ>,R:Z4-XչVs0W4/Bȝ f*kpNrxͥu4VWBRm)ߡG%`S2)$,۔ʂ^`Tה!Uq: w)#eĹ)#^(HlpSTOT? HޠHTQ=H9WPGu.EL@ w'f9kRGx0]<=1ac*M1I[Gh62'SD5,k3-0co79pzt桳jZ.{.3v` y$bLe9d!w( #6+hC5>{$f{4BK%~`aXT} R58/-q4Q_Ε.ǭ>8ۈ5 u(FYUvlMQq45TT^»fӠ F@&yePؑV)i6x4(p*#1/lj*g:#|@ƃ(8"5"XibfcCxY;uAgo%?xS-u'+IW`>G{#&{ul8{ERhGŒB U6;L͎!m9$UUGDQ2Hk#|9/YGdrtx=W%͵]B=W/J-4DQvWa1d?gV8(B(4 b1\뫶`ocd[ WlA\vG ewev02{d)=tw%6XVR ה-Jȃ#9DMzAJ*Mo:xfK;EA0UDV擩nuH Z_/ &S4A4*Va XWU@}5$.6cnc^R`f0K)-˴{*rZϧm'4|o$@9YvwZLd(Dll ZCMM/߾~%=w_7&q؃^UAo2kTwO| endstream endobj 3692 0 obj << /Length 2154 /Filter /FlateDecode >> stream xYͮS4۲-{yo)Z]tή@[I:V`;L%Eʑ3I6wSd!QD*Z}Oo޽Ī b,4Q5՗J8lWeף_ȻQF<@C _;uIJZE`yó YpXt2^W:|o^RO:Tv[GE6U՟& \@:A R[ށZ T$Qw?![2sdMxi2Ɖ2"*)sS݂9w5݁wD o l@K_[Kx>`&e)aSb >>G5}ԯG5NB +ng_UQ5H`-(T׺H$t/QIV@ )Ç5'Ts3SsOc`y tX0sEZسP6`/ӍLԽy:5.>E@ѭ;ܾqAe6rjh+8koZfZ 5ǿ~>Ĝ4vfww:Iem~M}.D qy8Orn7qJ֮_W7UE?U  {=LW3H ,#8<m|K4l]4v7>Yw9]߈4MiZb|"z46|Μ{Ve Nk.Kh HZI:?Z׾[K^l|Y yňї8[|ీt|6onјy\3=xyaV:v@#M@#ud#$2Y4 ԏ M֚r֚M#!4!q @JjH45%/I  ,p^5jN5vI0V9Cz8Cq;)"A~J>+f"%DtSzeK^2~5j0$ܫi6*:nbTWf$8`J`llJme3;Y Wy TCQQcZjw9@8, ibaT_B.aRL?5[JZ".t0֧o,2 ;zFreJ^6)M_> a)xu2HdԽ›2GEnW $At/ ( Nm{ixyueQCen8-haÄx`;T@3 \ 7qB/"W^/d?O4`6 ,?ALKR[rGIrCP^㽞AXNVeսh}6N8ؙX@߷$ [-;2C᫹80qr?[I{$[l;-=y׽P/o {pcU&6Fv7ɷp|oq endstream endobj 3606 0 obj << /Type /ObjStm /N 100 /First 976 /Length 1993 /Filter /FlateDecode >> stream xZQo~ׯC! '~r#~CY^yzV8rfRR .JG&d9:R!TK9IѾN'rJn`qDӫN]Mm"@% TI2["GL?Zl4IGL:N06fL%ffHL`@*9`?k[U "wIn,2[]blDw;}c}gnc?_|~Bmb(h㧃fW#:Mg!Z@4ȐOюU@4Cg)ޘsTh=SYSd; ]" Db`kQP\ E3FD|T/WԸ묣ި/.BF!0zi n>hv]&:m\u𗾱zFy )[jZP]՝x;CGbu ؘ~-iLJt|+{aՒXI'aN Qc ]]bzeQLAKAVĶ]bD' 9/NKvbhzh~]+3nX˹D{:j ӖFda@4PcD^b)1ی1 I(r 8evAή/'$OTB84H#i'6tWkO+"uf(dzE(jcr@g7g8P:hAF&5g8ō\ʼG.I#I"f;\RFI͆ -O(˖|/{f}"> )Ճ#P= ZiĘ!CTV)cV8qy34Δ%.uuɯqnjjGhKqv#aGw:2':rxq?o1#ao0oW6RS/Wɍڦ2ɆZ28c{:F3z`FCyEov\\O?Ng#^JPYS .k^ri yn;UFӻq endstream endobj 3699 0 obj << /Length 3087 /Filter /FlateDecode >> stream xZ[~X%97^ !6}ht>$•F+ oKَ[̙ۙ3߹]|Wc`Y`TLEgI3%LͬnLRQtP4=[:B=Nޝ:YFLgO K77ଢ଼x\(UsD;`x_OJi:#hzԣW ՇiA<Wn}?~$0jFQkBk rKԠC(cT  /`,4ߓgJ< x4hm*<= PqGxBdw~n%QHlv^+ U(N;!M3W?ە-DL'BycQ &4H2[z&.rB&$6l|6J]: eg\\ͫܡ]kgR˂fc 0 wb Fl( g;HaD QVV=es`9.,T_p^#!rG C a~[3;(Gm'-ĆBcϴɝMy8`ɲ ъ;WMj(VdžT%*+Û-a v` ?L?,pQ"Wb8%ޗpeLvaMOMr 0Uzl˄16%愧M4i%1~PcNg6L`LHP1G`CJH#4xC xt46=AFZB[Tp!%Fc`w!p_s. $CI %d"yt]6Gi2)@$|;(e/d a^?T3SxS춖DP/F)Pr*KN;Q^ZP0(;ޟ=QCX Q !۔zrPⴊF w"g=PX%h 3ºV.` mBWΟ}h|Qv[np}l,jOŇ>42񬶀' XK^tdMt# Q$vpQ i h3u7> stream xڭXݏܶbrE}ȥuة?ZŒ(ҭ;!Z>m4$|fܸW7_&eiEㆻ.hs"?_^ly]-?__?X P3{L79~/˃v~m;/I6ޫ∥gNbB9dc[8H߉U;(; DEoᒃ(ؕKݍ";4NDײCо"dO-`qҨwp]\,o9LKi=1ABuP Ӻ]x{ZhJO%<dۋa[}ζPP?na0O /fa> &KGX/>`Qb J}<rR  ppfH460$}؉j:4= VÀՙ#}AeF'@$;F~VdRYcDJUD?|Țk=qm"E'[',2oJa;Lc'B =N5b=ALA;cNyQ'A|TPIs@N %Vx ƍX ˡ\꒲D-=|fkwdNCW;,Nfa qYupR>= ~1A| 3H1T-r85-u E]4SHyT!-_Xj%.xkҋF9bS᫆b h@nPǮ;K8jen0aa&AP0Js┣e暆x?״/!6mDެ/8}P D )R{BS0QnGd.T9{br:,gVYh: A^v75 FbF.G3tE&]quTF ^Ħf-o}S^Ȗ->7M="8OWCVM5쳛!%zgxEŲŘ+Dey(OKk;Gݴ30BqAn;_o >[D`SF"? lkz̍&pK# \Îɾ4bXrM }i:|>3+a uZ1].ժ,s'BGNmI6-&j8oecw@Ze^l9o) ̝%6Cq䨓x;nblNQ4Ԧ3\!I̾tB<3p, |3<)oe7oUe$!4]iKh&4P0Ly^G;9 YAhCd{۫a'1a[눇Fݦ5`{cn*u"܅&å;@ =wIW๜ƇcdB:^zXQlG÷@ɓUF3o1΍S_GzхA&S=~*_GC okE* ,dȻ~MF̾VO*쨅An'JYiu=%WOv$V[`Հd={ anHw߿"!5Qc!r/%@S~=U%{5),O5.$Y: }eABկma-K"J7hd)U:d=`m .Oi*IjzۯznnxΛF((nyѼ޺=YnS}N\%q 8AfYWCxطZ X=X[j?aKWPٙ(;G endstream endobj 3718 0 obj << /Length 1108 /Filter /FlateDecode >> stream xXKs6WpCVIQ<;NV=tȕ DR'bj}g7O3f(rK'}/G4(9n41iD8(ݪ L(xx C':Ѥ9掦r1>w',Ab`VjՂ3R ko` ʲoZD_Fj 3G4ӓJ߹D/Y5Xb̤qƭf<N<bmL%d8CH=i􉷤)|a0P+} Z-sn~j >&272=iT6$S3"Fq{JXB 45 Zz_1i5X}z8%2N8NtsthgH.J5w,oXf2,&OY%8Ό;lB@ ɠ4\oH|/~1w`/9!saF[&rAWًjApd*j)'J5Hi x3F4?׾ʐ@]ϡtn\aMx׸zt+jIAUɜ)WKHmQ"[rlA 3u^)(VEX02YZ+I1]u}NY{sxoK]uy=޲O8 ^.0Vmwj!:H6 X7DFڬl u䷨q00YB7C+<;F0> stream xW[o6~DRbyf0`xOm)ms%d~琔lr<<:<|}oޏ7z)I0K/}BYA@"z0Ub}ZG4"qA&; ^C9> Cl}(RBkYWv]Jh;]Nh'JiVu>{Qjt>I Y~nG->areQ;)Ejא,m(P$ BR%7u;YF<k3c[k߹ =3w?; H$eոPozmY7vb'MuTxb;xFٶJItÄp?'+`&޽Qx #fw;hXq@Qd#ZqﻐIВbJh¶/;;# 39yBd4[S3;Xllvf.&V"H6O-ǟQvvz#49}l'>$FhGŎYIy:A 8m[WVvOgmVlgV&P@VB&^Pˑ,"w<@ʏZ̞1:n^j`<]_[fφ.ʒQ[8v!hXMV*PVZԍx]QxGV0]~s[_-19+GO88R~BXk44ף+[D^8޹GErp|8hk5j@m_)w6O }JVp}砠 Ma4Y{Ma3s,5BJ01zWc/]6KX\s!Ж*k]U e6~ 1=Y*im P s%N+q5Ew-j R(<3kW47[73mܷn?[;CÇ4@0AIt+`|'L WӷN30@qL/r,,!("@?F* KTj$I^q(X: D{(9"&E\!SC{ꎇN //N4e endstream endobj 3730 0 obj << /Length 2776 /Filter /FlateDecode >> stream xڵYKܸW |RZD="fl$݊ҬTԣ5ݜGXUG*;߅w?IvW"һӝ C']&Hge_z^ 8W"#`dzs*8fU{r}fv}`옌Tl$RQ~צ;ץnqXcGє:2 .f).Efw 0TԻvivtSúxAA{`k[Sfkj2Qȶs }Av*QpjvewoWkS<64Úӥ./F=\QtjQ V?*\q2Z7@Q?H.>"28wMLƈy$iю*kT\Ӻ 9 !;hSjq݊v8\nur4EZ mnBC6% NS@ǦiTW0RL}B8+pWCGcZj |Gio0ԣ5Dy`-33awzec!%εUѴ}@{d3U*Q-IzNIBt֗ UqJUYq( D+A YJ.zp&$Ǻi@$r 9 xrpyXC n$/h$mq[ 9VלHl *k6 =H"zYF Z~&rq~7w(s2j%_c]θՅav]·?#LRfMLDuz[8BQʧ˃m̛䳑y0r罰*(;a/l:Q}VU $zhj*嫨9S?q^Oc|#a~Ky=XUD*Hv cYWiRƀW5,w#bw~-R3ņqDD/_u\ן  >M_ۿ'\`8 =a+*W;,# أl0"VGȋ+U*rY܂+dNJ,a<vq AcwrL%yzȌ5|#h~!Wfq IqX(|xN@a~%S0?2E i30SUK2똊Q '+#}vZbWy endstream endobj 3737 0 obj << /Length 2724 /Filter /FlateDecode >> stream xڽYݏ bْ?[޵ mۇ@4fWǞXn6}IǬ )~"*ӫ?ܼz>WeXf"9^Q$8dyuS_+.NAa޼OYfa^@My^E^慛9,4/LJTQvVr2Ia]O#fpǔ=kǝH#tmk~LơN(Sʥ`DES!$, 'V]k{ڴHDSa@e NEV,7yl "ܛ VcfI5r]Im`\5uzˌlGY(i[7%ɅdmzPRc& umdGASiϺ2?Fp7:)^&$XN'Dt-I+;@!SH*HQp,ۉ ^(!?RփO4p"aY<'xh-wgpV0MCͭiUCSW/"F75Py"@zI*6Ja6gy+bU( 8c]Ʌd49`+K}q/Rq X;}2~41'ôǕ)Bߑ^jF)_gH4޳SwIFF2Paڗ25ܑEG誻5\gXnPF3$.g|EjvKCk כb't;9./2@'DZiZpQr.-c1au~v âki")|x&\og(왲Ű/Qc/f_mםwn#yqmTA@P !ߒp>:O0{0ٙL`a 2as|SGQ3AЪsX>G1=0L&yA{Y(ZV_ gDs:\"yz8s8#]3 #d_PTq9J=> stream xڽYKs6ϯPeTՈƃTǙolD=");ίn4H `ƣ?|݀d9aOn%,̔Pۻ g,$Jf@$?Ʋ'*8R!'T)ec1? zZVm&h|:YkVbV?>036m:4T)Ym8r:`?SłfwB22aifޛ m'M(L@M*/^z7 jmcZΠgŽ᜹̄ %W4ksi`c[3Y$UpNg2˂5$կBWӈ^ʋSxEhJ4uk*(tTEyM>.yi[Uf 궤W=%²`7,i |]^z^:][gQጜ`"rD!Qc6bÕZ &GhW!M=D4Wt!Yr,b)H8g<=y\?B+ࣞ <>7YyOWW)ㆎH(U#Ad)챮(I?ӥ^?ՅZYi.6۵qޮkCy=8{*^ʕY/"쮂93 %_Dd+ yv;۾Ƽ[vOq9S0QO. 4*`Bnv'1w;GM><<Pe<`۲z N^{G8Tle3!B`񴶇"d8HSD >kQB"d'!B´99ptozK/Syz8" nς[#oz!'0+XPy"C B45jzke$-~Qw\G/Ǘ*K}|I8`\8Vh;ɝQ8a4S{.L튑NxbSIŁ&Q 6>?q[[_U{{i{~nvMhw8qZ4fypZS6$B04NlkG:ބc@, 5"ު+ODefcP^.=}^A ;]lvm~¾3&#sTȉMm 7t1ǡVٮ| Y3YV6#}a3U[8tFWuXh˃"[4|fQٽKk45 ow9>P.Nt%/N$z􈶆{||kӜ̸Ombr @ϒض [Qeኪ<z8s{S=sB.n endstream endobj 3753 0 obj << /Length 1022 /Filter /FlateDecode >> stream xڵWK6Wml(z@@l#l"z8$e{o/)z)jf{XK˱~=><+ Ef!DZ=?BEj=)088J7%<>y S/CO1?Lg3NrIp>?.@ՖR:EZdK/gP`E\T)󻼸ȰȎQtt#eZyt Y g(#U>LXehQH.hTRsYK˔K IUF*` I1#Zؐ^E;EZ{@?z'﫴cYE [)hMk )ICgr$b~Wksrs4Тۅa;{cxgL"85TQl _s`" ECzI\߈*$,Ԋ9D >sjTDPKDϖm+R/J0+1UQ"{F#/pY㼿 Hl%QtFSJh%$K_`ҙKcS(,+A]g_з#fO䠥y$=B9r6VE ]?&\IM"%pԕd<}[2."A]saW+*O9RSMM+aCQ8,R0w.wqX ]7-EgK s~y]A:VÄΩӴ__i*qⲞ`jLMr(|5ZyTI*ȔfwJgrP?HܗLzoӹFS[OSmsEũ?^,m* pgL8Iaim" fTDؖ]vKm?NkҹZ0j5y:6C5}!AX}!?ga`%űRSȳ8 ia ֧xm^+R |qۢǝ<"އϵ`YN H~|~@p&Sr+f0 mM_\ endstream endobj 3759 0 obj << /Length 1099 /Filter /FlateDecode >> stream xڵVn6}WهX"Rl$&!]L*tqE9#t$GnmgΌ;K;ד_g r4tf `X:!(d3;Scg,ƈ C\Wu# {3O#XdTi*='ѐhMv>xT prJ=)2r'`n!$D2OmN=>OEJ3Em§2m|/$C%Fڿ*dhZ.Z ڗ?y1:cP!%xlQ4At>Qxc:+јH87sߪcPTX ܨte֬ |FVJE.RU5ǶB=Pz+YjLIJ, 6m왣V-J;P}X9jP.εW(Hy_{wnB;ԅ;b}\$QdDDi-W֠A(;)*/ߞ$F1b,v#ЃGf8B4fݛfUϱ1<%_,]t熐2]5b#ޕP~}|XTT˟5"A o'}wժi꧳vM{nK;(r.DΥGb_!rj{_Ӗ^}H-8 OFMne))y3؊)z܋ (lt&Fhv %N) \ϝɗWuC=Fa@Gmt %R$zne  Ec E)L.1 m 6ev>?VG1"470i )&if.eKdQ*É,7ͅR<>:1;ucB Ne pUKgZB> stream x]}q/G|C>ܹA$Hh'KD;!eIV,p9㊯vXr'aVLD* |+W[unZ#z',sRYD7Ɉzɷa HAo7_M7Y> &fm$}Ώ3&,Bϧ70f/U[T誓Ҁ,;kO~ (bb3&n\ YZRwiF76[uq+[Y3ΪЇC*LE:;$U\dlxӁ}E-Oġ=7CU~,[x=?=MgFψEL$^Wsw|FqhT-O-dz} 8DiFyo<6$ Xmxը2 UuA18Oz/#8uףQa-$3aѽiN`>&q'HR@ЀW7gQsػ @'@>M㌀؟pkNeS؀ XQzo6X̅'8vsumufĒ|i˗s6cu1:YIab){[ gGc7vG/uѴPluCQ8UV0ȼuNU+!#Vv+`آEY,yD:`Ɠ}R^7qʂtP?\b`Oh|&0*J}Գ|2tml:ܓo{7 ?=%^@x JnuFO.iЪ-f!eaF߶ZoG;&'CKCgU9])إMRu.mEh8~96"3ш@J{*€;pY\5ƟNd7V|3}%P t2` 0WG %i6v0e %xLwԋ *.LbR6LQ6 %~~芓D0*JZUo04NdžwCCތи31и74CS+ -GԐĞE t:CUvS9 " !_٠\: ]:qr8t!Rͯ pG3b9Ox ??To:&s'{>=WSN @/s)lze~¦~ױ9*Nަ@,7jOC,c/WEČ80ݤ7Gz԰!a72RWM͘ A2z1⨼=UsG_Y]:^eg=X75SSژ/1Ǒ7͊Kdn)z:ܡj6XHcKI31)7P3G4Nwh\b@+<2. 99ٵ nFTiɭ8Kn%Q` 8 jBkD=4q53ۺ17 @N頋o; 4gi\9bxY ¶B uŊBua[f~1(kYHCp ]CЊE OR6Џsgtt,z)1h&R:,>V ?c1~%'S{5\iiS_޽u !\28u?6_7eZUʅ;ŨoQhSL10E4 #V{7n.%<;g߽n)#,~ 8uY%Q8 VD gԕuj")jV`{ Ƌ4Q7 xאT:Dž/@y~5$3]Joͽd2Kc,xX/C bsɪ-Fw7q endstream endobj 3792 0 obj << /Length 1933 /Filter /FlateDecode >> stream xڝXKs Wtyr[Z25;{HRNrIeխZTR-5sH627|(,Od6>,M, ;Ypv58 KT?opJ.L]fH{[.so8"hִe+{1nrbUke]Pl rOC7t_ye z]aX++K*Zxܕ1s ؊>/ (ڵz5*FvHϢ*9-\?8XK4֟K=jY) Fi!=S7j`sW4Xr F '}#w{   Wc#WM-^Ћ,CJ9 hBwF+{l" ..S =9I&LۉNA'4{T>Z.R][/ Vs/j3a$ϠICYJ-Fb8 7&pq6mNSdǿ d,NA%q='+j?<z u!VR K>~fT?߿0'a/_|nL!X >bGU˵ "3\sAfRJs;'pj&eBãv*ë6C+xH 5?<"#'D$Ң"^P]˯#jMB9r[X‚􋪆oCVb-Hfe!_ 2AFjx$LК=0a5WnH/UP 6|D,TҫkͷP0's߇ ne{3HJǟ> fJbp"nEnK\rt:+h \¢{2v~ %_~MCqt8ESamԑO‚6_jmj4d|La:,@8c/noa蔶V-Wrz?4y$Kxin4۫U wep{izfW>-y' u)'R/if֎ꡓ9oMcÚ\|Mo`ܒOM>7)R endstream endobj 3800 0 obj << /Length 1486 /Filter /FlateDecode >> stream xYo6~_!/Ԁ*##ƅ' A2r]!L WW\v6@  ZN3&v9{B`=*!*M|^/{*Pjpz{+`#'| rʊV` 4|dz?愐Yta'nBGvy1 \؟WeQƂ3 QXY.Vy/c8Z##}. u 08GfY>RB2L0`,IG!i9$!7ևtep2Jfl?hmY.[ o5Ti7>.v"usŋ9(kf;]nȎ3Uu P%Gl#b yK+p>&dA x jEzbg˾-h,BV*E1Bg=I&D0@~5$bh87qY8|ar =A. YL7\ CQ䞠1{LPT}?Ur(Jó!cg0zsea"<V "uJֺ(]XͶ}Ii5t?|wqZݒu>as؁IhxOhKVB!n-z{G$UU]Kk% U'oY g7)uԛJ|xLX( FFGu[A3}MIdot<䩎RjN  +1-<( '4,csqkv.sq=z)* Xݑ^R2t19;o AqV1Đ, ExBS71tü)wiL8zA('JcT"őP1~'}3bŐ$-&dI,F\+D7m+sEcaU#U2&eO֖|0NqZ3a0քBej6T^ 93a}޵ז`I . 㢩-)ΥpAR$ 0>pI !ə<~*|E);Pbg\:dfJ]?+&9KXMHğ U2> stream xZKo7W%3|n4-P8)M,ܤP+Y+wW9Ě?΋3dB,8bbBrPtJxdx} (MpqF!yQ?zo83(J d<@Ա(s HNs6>6J( f(YR0GrI(!u@P LxY2$T2^@aLu,V`Zձ|]ŗTWD9cT RThՏ@rv:C@$`)ɏ6"}9ì쫥a\&8xXJ.ΰTpSĀu@$KsJ +L*$BHNuDĄD]MLx J& WjիbR()`|aE@HTPP,HVLt^%'6/zT*EaȤe*1(چKB.C^uc"!|3 9UB9Rx*Z: |"&<` oW?հ^׬G>"FoƦx壍!9:2+|?}=5 bngO ,0aNLoRf+0Ç޹M-lrC12c%JY>uC^>@PtT y6x5ƒaǦy=4ŇKnx)5^Z®i|IȊx̮mD7ސ!DHdK +9FuQyv1NW/'/ߓwMG{=ξM,v:{_cy'L* [lHlK;g&WNw I@i CB{ ` v.HzZ;t UFW,a͌]>w|xߍ́weJveIےwK,>o'%uEؽ ]ns_-μlSGY&<ܩ#7t[PA<)OxRI-I:&Vi34(KA&!р-У بDVYGѫ jvdKJmͲmkzU2 RmUs/P|K@/z4'[C3'"p1ߕtpʍ@-{dӐ"zoy5iܽʺ&J9zFiFf,d},}};ݸt7'tܙsnܔmYmrh;wQ SK%.V_"V^= Wh ?1:o$yޘ˫p'7B]jՈ%P0WP<}. ה>爥-gԕ;8thZ=L wj#ӻ.#WD\ J .׫m놮+ \R' ā*Qx>n^`?QCa,ά9.mP5$<" endstream endobj 3822 0 obj << /Length 2925 /Filter /FlateDecode >> stream xڵZKo6ϯ060_t!A2,r{,umk:z뷊ER=`>HbzMr7޽yAHoN7CC!9w u}؈ Et}@ *D'K_4+ޖ3Mjmڷ/^bhىT:;;H]o=N6k+' gA?.29tT8ZTHN * ]LjߧID;4@{RWD0+6CLOn'CChﱳRʱl8 kׯOYW×Q4Ng#c- K]>=TLJBw. ns垃KyF~vlJ[~V_oI"nal顰x$q蛧=O쒏|ڔv (aH:gI*1 |y2]:.Wq{x'u%d_: A[N`CoxiJ,tNب!t y% b+T,BaqrY⡯. Cۉ|Exs{yȖzK|3@ `5'2 !1^I$CnL"H1Gts#1$/D{N3WB ~JJx+#qn!ڣ0%$4Բ]v_D<;xbVݎ$Y'K->dNm>.-%!=arҢY&\v #  ޑz/+B|UA7?(oHY@D\@TL(\~蛖)ZQTPl~R"O ̑ȧC; `J;\|^qϫNs&Sִ@BKEV<`1! ;lU3/:I- !gm1iJjo `4SnE#,aID-!-2ѫrLHǀFKө KWq)_oY{S܂m.ZL祒ݧi-)Mֵ$ KPrZK^i{ˏw`uQp1/\^2V R]Y 0gxcx6r>EOSQQ鉬g8ptIIΖq2lVNoM1}Nhr!4t6_"U+/vqGZS{6ǰ>YWU

q0]6ɈB%|Fsq_ܣ׉ />b 8Ž#l͛?'hk$ҸH#hx eb#*TwgnZmjLL]e4CPU 8:@Eۻ҂bfpҩkNgehL̩3[s4mG J{@W1buˣplWQe8pZ, [?~C *'y6P\Ty!z V˗+fٻPz\D!Gp"n!e[dz06b^m q=ZqSH^ 5a:^ܳ+gA:2OP@Du |ΫR(ʁ=Q=PMD+;IP$;ly6_5OCҼ vןzo31yw_%Bt~ڴXva!>~Rz0 8On̩xW^'eonGCb)l^˜>_bzC=Ԗ(Px6pRƠ;T7aZ>UI|O>uY6)Hb=nY Ya\FdP*A7aQ-w7a>bQQLCK竊ߌU4Ob}L5)a}n=(! ķl7cu%cgt^0|cos.`yǙP!sf)`ǓH վs$K kqe/$ x>#3gU*S,͌=AWI='[@C.7Sqh㳛 &>71>k[ 4ɴKAƔUqߴ=`'Z): !coOcz,!8L]5p別r+3,&5KcfbxIL+3y]ITQ; EJl7B endstream endobj 3834 0 obj << /Length 2858 /Filter /FlateDecode >> stream xڝZ۶BOҌ#>@Oҩ؝ē\$@IЉ=T.vr͒9~b1G8hYZըsdH%jopH]V4yyѽE+gx*rۉagb2J D8!Qwr,6-I;:{e=XNuU令]:Uםl󥧅倀f"u7a9<_\ck ^/mQ]N<ΖeQV-6?=0%+!X $+c$-$˞h>6(u!v]CMqxvqK?A$/Y=a6s06'9烅aZ]鴋_2M6IuwSH7>8#P G @Ѯ8kE1)mݔ9ه!] ]\7./4fa\=DvJXQ0hw.+AaP"q܋ ZT28pzS-Q 㩼}FoT%JZgDu5>gI?[ٔC-<91)'PB{@PI9Ur ֬Zfw&Se8>#Kz渱JP,=һw2q*2o;NIfT,:{Q߶K[pcRw*N%F]0s?a% ?= ItIa qE7@dP(E"{?wG-s;0@=GĠb95x `qAx.he oFBO8)-Mm%u{↉--ǺMdh55xy,:BuTiu)[oD WvrQZM-fA(ۓ,z`Ż,)W,HDzjhT'+fԡ!X'ro y_2TV߹[:pfoȺ؝}twhwEIWܩ,;J %h:` )Gf;a.ÙYfo_Wy0 @9d~|Gܶk?)7QoOSLG'JU)ܚ>~Hť!v̴k0y)H}~p.+(֑P3gVpg -о R 9 \Y'Ia"PjRߞ}rg1m2|)Ce `~51 l$6+R$ht:@{o aƹ++ylc:HLҾ_@I٩U~A}RKX!=%|3<ʟVY$wU=0\P;Oo>yN] >%Dol Q;=HC3|ADGN/8=͵43{8N\4km'A3,zfZd`.(Φw }6ҒƮ(2zv)ͺ$W 8좋[!,j&QZvU g=jX[셬w~?LP~c /Fs;\K#D7]o㗺-ݧps Omh_G=I kAIq~t+1R*z%y@j! 6ֆ>xΆޏ.69`ݙ&<Ԣ0ӟ':ѝһ]'fs@`@Ds}^\US̋+ 0 6{FeU =dtgKCO4T}5kdtv*=! }T}vT胁%l,]O pM1?c!l*>*%`i5Jŭ`7$kq^RɦfʘZR*r'qRd]VlKe|5#AmɌUxCNi ͙n߃Kv%BlǝF ]ob- ׮0[.D 3~g`;+t#RL^bg];R< Z_~<:ݸoM{w񑎴-7VIus_ =dԱaĨ~ I~6 %j,D#~[͹4/< ΕxJ[?i\i p P^'[g|x-!bp[ ڹ~hMA4txTc}w ` i߮iކUPxrV/~]ߪ*3CI;^ /7_6=:[CEy"HDʒPY.~sGTyxx~m3V{}>:+7 endstream endobj 3851 0 obj << /Length 1604 /Filter /FlateDecode >> stream xڽr6(3jƂT7fIc;!Ɂ"! o{H6M <<}6^L~,NЊI8u+dnl]e;ۉهSCuCJ|!T Ej`ʼ3wB_z")o  ,&Ah lÛqb߳숁?~fS8ψQt a2"Mr}`]=4'e7T:x (Rf_&\Med}vTI "E0c(rľi^Q9 Hڂr`s@ߣ,vaPF=<ʬM(7+SD.Vk} 2BI11{֋z9s|jtV(?Oj-oz6qCпud0#q:7X6h=MwxږQZmt*A*E.P6A]bwƉN9l0~vrD5l8U+sk}^=nyD)+4w1tbc^C]k/y%&ڞL&_G6BclF#:? 1g a*h9\-pѪqX]@YĒܖaEGp`c5)e]F -AϾfc87?v\*qWDEAߗc%a^%6LxFQ"x}2DɓܧԆΐy"F/j|4+h!UתkU }u PuA5(]Ijt*oʛDjLrzq1=2LRnV+RLwgvg%Y‡ ֮N)S̪bkJvQe<SP-jq/ 70:*m5-(6qpY :_+_;鮖zQ.Y5UK #9ۻ WaWKƍwrr5kT9`e>LTJɻ7[ZX4[ןh<v8+Dx?b?vn8WF$z'A(qo 'I`a PTjUWi[.cJ?I`sf:4OX` Q/겲3|I4K46^RrRk{ã%]7yvn؎I}(Z"9 x^!FBl!ҶI )qρW]?$}U%Qc{ t6:8~83Fܙ/L@70]FZG҃y(=lD}4+[&{? 5Y ˮ34>nf'Jo^6&OVݲ]5\EveKp:*{ Y]m)p#0K]NG)G.o崗_UYq9#d?ȮA/^/N(^/O.;AӪu!o+? endstream endobj 3877 0 obj << /Length 1820 /Filter /FlateDecode >> stream xYK۸W| U ATT.grS$V(Rc_n4HXQ5l4>o??y1PZa#8gכPUy6sŽ>H^Śq ڬBz Gһ^|'CTz+wΦlwh1IcKCH6lw2VMʌ dUj-Kڼ*i:\ehEcѦƅ婱ӤIdLL4m ذ:f6|- *Jqnpܪq<=n%pw[wVՆuzm s5#W]Ќ/ފȽڊf2-P*FMRd7NڪnH`]{jXX,{!m ^X11}+ϸ4*0=ネ%>09x3?]Lai˒3xa{Ls܆CBm~)PSv֒K&Hd"dPQ7#_BF z_2l@*`E'HiUtMEz0N`y/ c}њ8HՁ|#i*nA>wEbL yz0%*ӧ~GV6Jp}_(K4! f$MW3EzJ$L QALIzӱCklH_DwD8 ;%>WM1<||`'"wQNgWpi$ov!4/@Z3ԙgL E&DϚ }Y4A,,,ɥBH1o !V'el.uujW!Rϟ~~l3P.`K":}>cbJ#D?h1( -Qq&Y%K\ۃ $j,!iٌ6`6)`=̉Ա ~7+:SA,hT8 &S9A"3MhT1j/\W@5㶮6YCz,o٥ִ6M tK Sw l)͌me-PjfF8s Y<_*!+ j,7?"R2>T6ݾ1F5b"N&J`u3h-NB+r4Nam/8FۤS]-E~jmnⰃp*w2.Blos pDE>GC#9W]N%іwSVj|vBԙRFof: 7-\-Ŧ YMHr'OM:_eYk~ՙn;{7 48 oХ)4|R C֎0~ݥT].Z q1lQz?`]Yn.9pU*@>"cA&& C Lt%\!j .CsQ6/Zz}Ur)ےt9ȋ))/1`\d?-h X( ')g)]M~hR@"q@C|vJv~SHgRإ;p1Y<ēSF|Ï?}ﻏ2Q 윯JTBHx&vb*p7MӼ3ɢWN zH3 g"W1Ǻ..pFo71(<séy  nϰ*`A! +(L/b}h}> endstream endobj 3883 0 obj << /Length 2311 /Filter /FlateDecode >> stream xڵYs ߿“g֌DLnoNMr^gJKVIkjdsQȒӝeڪzu-i+C;4<NU/GՆ?5P\8ef,0Ez,d|Se7­V`DI֒-Z7[zQQWݨ&5sm6a˕]FԬJ{UbUo6=Y~,^h`^?9A_:3?ރӄ)Psײr#.>YSK Ќ3^0wƧpviQDDX^MPeSrTmh ݮ5j}zX?߯-"c>=1iOsϹ~䑻G/ {O/z)`jIΘ'I8QOH#EE LO|x0s &k(,%,y.ler bZmʮ8/hغ, 5D2b+OwCX6ڥ4s*3Yɲ⮠o2̖x%DBݽ("0dqDh{A\z$Dѡćf$F2b.QY ydFŲ|߾du%dx@ k˩ \xfh;B}(PСƕ3zUh^ʢh:ec[ҡwvnuخ2\iUϣ.rx&m6-Ca؊rC>$LWܙLfɶ,MykHpo\tH$`PTU#Ct sk9&9أgd8oT6X~ƽ!UM滨'ylF+iRw~Zuܩz]5o婆@\4Dhi ]#m `[`nK#xgAeS?4ۆYt6!um8D;SZd+ ./n>T:R7􅸈 Dh`(%c /aebq{fmFUӂPC&T'?AK ں*NqXA'P"I$H6AV5{)?ē;ɕM`I(y&1lxe2zKD.pdB+`1I1Yiz_ (lLcnv8n GU0S'QuwE6^ӔJ6GGXx> `o{AK-sIulo9e6uf8&趵"d*rYiz|W$b<(Zn c!OntOW?A/̕ƯZ L!u_{?Q Oӗ뫟攊x23*#"- x U cÛ& G t![1Wrn` d.^Y aQY2S&D]0Bvڋ:HpVO\p\PiﱲV[hM+8.)[ɎvhUxϨp}pH*gwө endstream endobj 3889 0 obj << /Length 3408 /Filter /FlateDecode >> stream xڵZY~ׯX%EvyЃhm$@aa9!9y GZ=GUU77__;kn2:߄AL$ab7ڴ\=V.J&&UeQ4Cl7[[X8&&Qk;}gIU~cS˿Pw?\9J0=io:6ysk˦+2GW6_]4UY\Ϸݸbnnw2'槲?b?< ymA|SlmMuA*N™LfHP]dz.(A `AagEsQfam )ߡL޾L](ScB&&UM FM @lNmGw|pmw(Ol[ 2aGq`ѝ) ~,"~;!G%ŰX@+w WH{9Pȉ*HyE+ r+%6|"e]~Ph>@y8d Jz6XDat"hfWp☧)PfDPG7n}#xm2HG 7=vDE,,Ih%ώ3W$k:ˢw7x4=`횁ӄSb0_6J8eybsdB.\zrRVs9T #݀8BVԤ5MT./:s>P"j\\8;`eћí<\{ˁG[ 8T@Hls_z}6}-D|whM!>yid 7e`2np~58i_']|$$X.SxӻsYx:v-*l \M" +H6`Jf*4פU \z/~4g0պ*G7ڰac s4.ac[c]?A bcwr?-"?˺9VGKЮl%A?R=蘫G*fM@.0RDјk9BۅKF_A2Y᧱01Ϡw3HMI0Eق/ xDtD}(@t䈰Y=6ic?. ,w\l;%.#oh5{2(G}d' =me @=jzHCd5[T '/9,!Q!RHB8УyXB.XI3ۡS 51s5 1X%&Q?4śwVq1t (L!eʦEdZ5UANʋQg_;RQdj=ـ)nmz7 w%7JlPi TM3 >%V-_'@)s~[7-?pphSũp܃ʷa8r{jͩ\Q&lR=}9ݙi+IW#cR˄?OKFg%C4Vzpp޴Hm)`\p3X%O+Cʊ~J̹sCʦwkި ֜=,"V+sHd{ T8թeC$hIrG絰PDVETZNr#[Ǣlו}n{|n [8lδ3FLydg!ڬE/TFo> =UT#7suё/G| `)A~"U鋐Ԗ11-@j/LjUe*Th Njy '/֤ΉR̃0X}LBzL5j@($G"FyA(պh 8l)ւlG4zH-;cu\Ֆ_T]bƜ WY Q@'AC\ -l^+!;::~Da13vobSe3SB/[gy!X}Zʉ;)> stream xڭZIFW9IE¥x! &19E E*$emMl0KU~UFm7އ& ěF+0$ZM7旭I{>R R"Ool࿋]֦fo贼¯Dٛ_RT&R_2t-}qܙh{,}ǃ&/*O2(˩h ӻi_WeCJ6q&!QnlX4y J9TW*8~;^&{yeF63&$_ĭ?y\fAL\3)9-Мߝ萲1G\Eחg5b<\SWqgxإfK<@pަV/+=ٖy)限Do_z{vVU=xaEC8E |M_ Hgد#[»+K2pNVNIJ5EίM[lkxAYbz,sJΛɖIi8gҡ-ВmC3֤{e6)όD] Q$Oa)SEp&P̍cW.š<1$/tI2&@"qH+bk=$!8 M>]ϰ4N f0RbQ| L3 =P:&o߆շ鴝ep|tsx͹Ɂ͵%,7Ƕ9/V*/耛k%= EY﵉!nqFȦ_^,0;r;=`|_]s07abn!ܖFJ4?x&_i%ǞZ0  ,Ap1Ng nt"Y=[d׷WL"t2XDJN *evE g]Mx0][%C_̘)RDF0 Y{8qe~_fgrOC H$~!NFtB;wvzĺ0 窊S.7Y:@A. 5E%Lӊ|]@N4M%HT0Ԁ71y]ah$%a+'/J/0,Tke>rjϵo 0MIy HϠ)kᆞgK5qo)F1$_~G<z*];yWt$@ww>Ʋ%5~hjt` ,6ĂvzLa- $4X9X[M߉JU͍U5G㪋u#,^j/a:Tn3Fj)ܕϮd@g r&oޢ/ctb:o`^;\jɤ-T79 +od\&e5vs0@)9J\kF yd4M#ɤb,dlaD+j %Z(W8Z[bcIJ4+ v%Ry16ZVO y ( ~puZRH0nB.{|OÑ>]OĄË!>tB{bpV(J`N'ktC4(Mpo*}~M!BZ TqΆf0rϙlV9T1I UPHaxs4MhxDq埅8w?^{P(u~LԙVwTOdT4%y}m[~-ى8k%7h震FeCV" r嚫؋BBb!37"x¿ =Bhؓ2ܞiľat5ftBr2 *gȁ":g+,9o o1C endstream endobj 3906 0 obj << /Length 2064 /Filter /FlateDecode >> stream xYK80$mF},I6n@KtY2$*od[4f(XIٝOW?]xeV3uYƳXdb_pv_n6?u,fI7Mg]"HRMRs?r>ͽ/uT H6Ӣ,}V 6,8-l7rn"YͫFl- .NköZTDWWu=VE[ qaNMuz#ZZF*5 ϻ5̙T+ CG4x;ve[V )Z@CعsVu#u"JNUEO\GEA&tUDGYǠ5ÁGhkX/#tl{A.H ;8 K|7.9oyZA'WxE `ddD^f*a) Ƞtd> h'ۮŝ\4/)Aj zaZ(4F)1 R1/ -[㓽T#H~/?6gy3RhjSB/wLA8]=FCH[Վ&48_uݕX9sY`"hlֶzevwU ɻb2rm8)zk&6~(\$nh\4}h5!]ђj(cL?];JƔEjv6ے˪dKj݈ &u7JNJwQSZ +:Wg {ϱhNMp4B `A__Mo-wĮ=xW,yX/хB%Bn ,Ά"osy;lAYp詜ܠO5Ky[|iE^b~g}kp`Ui$t&_BxiwӀADr32hn<)#cDBiKO/^e?s¹m;_$ϱ^c9g83 v(qzs암$'+N]үAB{aA S >^N\٥PėRHJn&>u\g=2.D4jU[q`X D)YT/xyhJon󉶽Hn1-&] \ryƓmΆR'BPֆY)l,Pj/RwJvVv|02U#?JOKW:] vbfEKZ1{Q v8<@SBI|7U7|5rN^¤PwlB}cj]ׯ_]OBd/˲~ÿ/ءo#RTBKY" ?-ԭ5w/SĀKԷ@Ql 16χ6KcS]aaAV~E~jZO_7z2ӈtyzSw%rFTbz>Obv[q(NAȷe_;NpL{@_.4GLڢ6~W#㋴&lZoK+:KzP nxK $A>`="s.,6b1z\-9zsW=N1q\p[9/?seIJ "lm?(lNޘ vkKe(Zٖ-޻4|Ş A4ənJ*=nzVi҃TC,ANW+i^؏b>n/pgO$~,J8%}?- endstream endobj 3929 0 obj << /Length 2948 /Filter /FlateDecode >> stream xڵZKWl[%1OAvie$96K @ͯOIJj~|7^p›F & 9gF7˛g"wzZXe0Dc* [g-r2oޙ|֛bY ^-0^1pu>lһTL}VWXѵn%2mL3zcU s+mRzӭLnJn?в( &d?m(uj/j[nn>eJfJ^8&^w`5fUԶUb-YUQSг/w\%и+˵ö^Qۤ|:YK:(J7w^reXVsF-T0&䨽RTͷL?cP N08{PsֵwsuLJ[+i[ 2en_&vUZyM$[,DNuQBl Nt%S 3C4}憁XXnZ2d* 7zC4i_S8̲4jz\}'to>g휼ʹNfDZ<"n؏XN^bH؋^GpbOqvֹ$B%]j PCh'\u{Jƣ2m5fĔ%!-Ĺtl ~1h]CEP+;@)v*=bxL„dN:/zibzTCxE++[zBa{ )uƵD%cn-=pozI>ˑˑa˵`Rr|f.}(}FJtU,=pҦyXNiIM_8NH#=&Qi(.4Xny[O* V%^D1CE]N @"_ye'Ϸ=͋|ccAo3-G𸁔l`.O[B[;P2+> 1-K,|7Q@P%%Eq}NN y;]Nyu_\k5uHSݖ (ů1\S&J}W5R 8},EQp2Z { 4n6WD id &gD@rWDpmW 6ᙶ!ߗ cD= qݳGQ@gD{Z'1*/ǠD$HA^\4CL&z;I8N: :^AxL )AA*+BCIoN}cn`PM\Wg> stream xZQo7~cP.9$(]q=\`@{FTEI|%C/o(Ql+x;;83fPgBqDֿd|(*% Z e6T SUWœ>̩&95[)A9K1I2ydJ!c`9 9Vwg2>\Ctz-AAb|mH"I#:$9W!H"0v˦ī'0q^P%JS4q+UagE JZ5'Edvb5kS {GBz5!2~zz`.toh q6#wMzM&dnڔܜ؆X- X@3t$#DM ]xPzr6t/xaWf5?G1x7:h2QNv6,JAћͅ^`, fxZ+JX(>LvE֢P=hIw~ۼr~:{3Uu?!$9o )`+>x=?7ߦ6\~O:U)3((xIߎ&Q(eK %~pQV5X ڰ' $Fl:D9@`JJMm(7ˆ9X-Ah$cz:y|Ct8_P j|A/R OAj2`Ih?:m$yp=f)g^zG"`0~#/?/.ZbHRiE3lw`6G=Gt~O|< Ƿ7Ѭ[Nu(]ߓ~ip}=OnV ,[w0Xv ǟFh7I@2rG@b _fS'=;=#tφ;~>Q}~>>~hF?NgᄒDz!ʫOVQ;*7]mm6:?HQ'96B ԄЄ؄FQ}>ik9OhamĢp Hq}8e,`;ͧ=B+$،lɻUHVָB w M-g_.O ѱ%KX2J˞m\%-!T{o朶E go7Qs=.ɐӓ"B@#QՒm]vB.j.Q9xtwh}8`CQ7N`j88ql;vLi%JͭRs*5JͭRs*5!Yf97˹Yr} :zm}nGn84Iw)t|aa#w8<&W8d˲TfR^Hgq#ņC<>୤5Dgq%GݟáCYO8q|0:2r#'0؜ܑeJGX,ʭ6X6z:p=aA{u;8TM1([ZqxAzPgIu<Dg{3?}poμǞc]{/U&Ûܚz=Z2խpO2g [GjJ'Z{T=rj]yC :MFlR/H?3;Uz= %tR\s)g{[Damo<Ӗtxl%(mPn7r}E}7]~XУ!mЏp>-Gѧ8Id"8KWπ"}k yf#m$0Fhz%Y/.5]%lcA]x=k~sPKߡFl]vPzXkgtyR'KBz]oʾ)؄nBnBiBL25,SLr>-؄AYMtt,18yⓏ/a endstream endobj 3956 0 obj << /Length 3239 /Filter /FlateDecode >> stream xڭZYs6~P˩2! xxUT'Z$6>p c<ȿ~SǦD@Lprs{o%Uk]٭|w⁗unWa&xL씺f3Ya)Fqm-Rɛ,hG÷+-rdMsٕvFֹ[gl0 0nun:, F,>0ә}r&7%9h:ƂVsΈui|XkifeyO-v5ڎR&Noh)_g\%gD`Li+0i}5 7+H`[X#[-]%h.uov[V8%Ycdy! ]`ڢ%Lji+c.Y/-Iɥ n*VF}P,=,L1"koX0q8}gEtE4.xdt8޾<`rA|(c&SZ˗O^sH2u̅ncS;i^o̱f('ᗮpx{{R8H̔zGlMotݷYIJy{5OM򢃎u Lv/nZʸwMŢUA2hKs;pCAo!FAK'7Vr (*M$H_6[-H܀U 2nE/uc%i ll$ۡ>͍\eurQe4Z7o=TLC\u.[ fƹwv2.:*D۴v$C"Gwl0)JԓPxA;_ TSS"N_ĐIEUYK4C@'S0PaMqOCgL'.& 0D j篗LPi|0 GW&BvԢ@]oТl ?\1:n$ʭ1w&ǒPviPoM?@&pRfC=ȇ2. AZN7 $jC[.M|ɘJCۅ 8qSL#m1\8:|-.BYL%횡4ڋ(iB6}wz7[g9rr\CplgՏ)4t g4-tgn LO17{3¨]q+-K!9w;AMltz+ n:ݛ'c9֝VKgqUipp1k:z#@X&a ' g+kנ5= O OX*?޿V>lKI*XA!*f|MϚo!38ia?y /FԮݛӻ;VJdnٱ۝2s& 7͸DhP@9džXP5Hx 0+\DKTQ88uW>g7ohyI=0C?YsHMh;?Lޒ4|F}aufmY 1I>dsQ7&!%9Błp\Ȉsàǀxu tmo &a&E_g7yACi.ޙaHkfNIؿgfڝ*(/Їd Z9>4YGK]G|TG-KѨe_DE8,l=~ ml:!4Q )rn D0kqawjr@p܎.&vY YrNjTNк|B@_i)M@rbsc^x LB>G i{;P{F#5g?V.w[z\z4HSNmd^ټeƽ1e,@zj%HH()`y>GI)5_z$1 Q֑~" Y82qp!@YW#Go mQm ^Qx~Khy!A '0ۊ^>yľؘ БL!= ,xc|gU,gTִ|U^E%BӓRBNA1b""Rd0"xB%9bx'w,c- ƴlj2z]Զ w '+;xF#ԍh--݀eK 2;ƄZ8m =psСm뢆,'L昨S Dx  ;T<}'t,Rj dcPg iB%΄\HcFWڛ7Yш,`/~;. L~l3q7u>lle ʦpI8tIWF;qBM&4n,Lp5K}qE@u475aVWͶ (co ql22z p{o>Z, |.tyN2Q"9><)R"7PuIUiL`!A2)TjPV>]K\mB|8=_({0s> stream xWo6_!!EQmɆٰ$}@KMTDER?+H}"ǻ~w/Ga(Ȼ_xcDȋ AM{? pkF4! 8ꔚrFغw~"5vW[^ XK8 hQX_^ Ië.}XLfsQqji5!M% 1U7Y:p+^< x',F܎_[K9S뺵Osx>er=sggnjU0¤G,0-0x@-~0*; Q E)#n)tSX#_~D6UVWTT+#-Vҷj?ѥHʦpS9>bt}7>RU눆? &ELl~l,|n,fE!Wu_])Ηa ޻v?Գ_'mxK\ؓaσVH/Nk~MkJ%>YuϝBu\z6zQnx?8=eݗR y݊;r4_4twf粙xn\i$Lux o\<ޤ]*?1>P(EQhёo?DIњ Ʉ\2( ]M-8Cj =m>!([N`CEvj4@4{ѷ}%ܓ FZ8b!j3IS4`'MO`d[cVFg=@H؁؁F.Wʈ,!xR f+$a針 <5':_ endstream endobj 3984 0 obj << /Length 2250 /Filter /FlateDecode >> stream xZKs ϯSj[CILUyķlVZr̯@(kloA 6~L$WݞvDHӜ'J_R$Ϳo iD9vH50 .hj%ܤa_oxv7TҷeugK[6yHII˅,$y$yY6L-m)+gIgR@Tixrsi,wf O,mrf(m`@퉾~ =%f!D~zt/| 9&BO*rrĥ,QG;U')cwY*PW*`)`|V T, L Z4ixA(Uȅ I !BD"IMkaJ*OLաȍuz<#/ !]];> }9dnJbT-@-@܂ޘ?hk',;;\,ad;aIH#-[DK(3&yJldo.{]J&8A/w a(0hl:[D[JCx}_aL2cӥo!6%e8b!Z6!Da)0xp*|]6PW";hDmLo{0ôj|gtxT8Dt, f/€i-Mf_iaI!; OJYcg_]y.܃}?tUstN`@PKaYF/pCcaGFEϲ O!D6QG.%r.DFRŁL95 wu<-e/XS%~$' =:*O2;*HڥR乚M*^Ay;)'v!erRކ7Iz)/z͟^g,eZ(x_e4KW\S ^qJ@ ~Eį 3Q0uҋNuΗ &tFv%Jy0+\ Sz;\uX 4UI F)?t@jDe"Ft-vh-O9=zȣcRù,Ϋ;"IM@XzjzYĒyť!ĽDz癁ƑP,u͐qQU|l1P`>5٦ fHr PR^Wp=~bUs]\JEC[P>Q^D QPywIwð9pĉ*jï ӽ݅6A"_/tE ~52n|ڧ Ѳ>݈jSi+x/ T-և?ښz!:'qbuuJCb+}Q.!9t|MwNYwn;/'rs9xx"4}_V$#db3f DUS@TT'|2nn &4&3irw,CNBM~ִoefި NYx/놮 { zB~zC7M8,}Nei |3ITOޱ鏁m8lml÷A'MJmF%b@E!kռ%DsjޙLT&WB>0r7*s^~"QQ?]`Sg}+-]eu?c쇪L6` endstream endobj 3989 0 obj << /Length 2535 /Filter /FlateDecode >> stream xڭˎ>_јFHvv,@dȒ#Oetg''bUXO*7{ׇw&X7~7~f7?=ū=N0~ S 䐺C.`]8ݸ\!gDKb?KeϮ9tWu(#q%'Sm}Һ>yS" 9ھЪkm{R7C9rq5!i9@5h1S$.v%/?v**CL.៝}2>Z(?^yP~u;5 @S @ҷă;=Lu^h3'cKt͒~̾=%d۵ԥAu_=5Nd]̱U5GMku\p JX)TI jG+O8#N7Iy0=v] ]c,Жh TUdWU1!?M}o?LA2 ^t)bfl694V"v/k|0"zJ>kWYg7bH8%e|A%X%UoL0 zй9u 1!ݐ/:g$1nE'Q|+$}) S.SPtZ8fKAFNOehG}]h"W@< xNgbĪ'> ZB| E&Nñַ2 ( (nB0ҖXCͅ"@(ꪡWmW{c`0VvvLrp @,GS`$ r_X&u (2jb.'v("Zɒs:u~v;pxcq֝) tI@1Re$;t (EBR,cRqXit*IJBc2Ў¹S D" @ʍ^O41,Ɖ8 ĈIt8;plm\bRY 1~C9 c-gF`,K(2QKQ1]X;CWLDB$X6)y4d׆wMHD}bnN6IKn:pe3#񶜣HZΙT>\ >@;yxY*.!cQg'֮9kD@7w5 2PX5@|M'&(F ??i,$R)D];EZ>̈́<("qӠBE?Na?Mlf4 & A @n&@nJ$nh`)E(i즹r[O:I;Y5<ug h K2RIkhj[:]ЗS&6 QRNYNg(ՕP OR|Lg[K*t:]6e>QhLt'ty5D$ hn.Mث$/OG_\7V"^J.tPsݘocWmmy%K%?B K0*v=A (H ! +\/GWRAFamB`58&1̅JWCEÔ+>5CX?]a N 4ZWc7V\M&]>=IWS@F|TZ2zԳsr9=B4$9$e]Vũλɗ;ݞ t\߭s;%k]M!5} =`S8kkz)TͩAyo؎v+CW3<{7:?L5xqw'j~j=4KgK`h#x_Pҹz6.OygA/a5C]ȟV J2AA@{ N⟏ʾ|R5eԦ%?)G:9DZ.Q:Áe~p/6'H<IaoZ5':C@/_: s74^}O%%2Ӝx?eӥ_1pVZCi=dΫV9exf@K?a~zx_ endstream endobj 3997 0 obj << /Length 2840 /Filter /FlateDecode >> stream xڽZݏ۸_abE$Y i\m DۺȒKIlpl#r8 7Ǎ!TK#m M,tsWl5;vx2Z)dg>U{$Etۋ4fb~j{h h6+ϚEe*[UmÃMcY(ώp5/ԝ4mCM7-U nycλζHkQԝi*a= <4[ʺ/GeSS+S.n~<&s[ f@MZPmSrN6^\c nşw(۾Zd˝[@oJtڜybs`[j̲.Llץ[vt֖ڸ`&ݒq'}w->nرoKY;yw}7g}kS?EiYEU̥f\;(!,8~Lg] @@n:j4̬s\ +cZsX jP)dU^KЬ跬lf^6QEAPJT GeZY{G,Ӊ ѐB!y^dzj_d"!Ać&["<@mQzĞܞZط(!~{GBW\G򦯻$L?4 jy ذނc3gYOR+A:@7k#m>?5k' Ax@4u&Փ-%jx2Z[I/J̬}yE^$U5Yd Sϗ\-)ڀQ743&b^x†S3v/<5-O<&hXWc@1۾u63ɧtP0͙Qrke58.:26hZ_ GMNg82pAIx?IH= 2S6}K l [="䡣mՁ,č9l"Ek W2vdbI0ށ  K Ee$(V[RAvo~j^ ?g6-3ײOcS`[U=gC24p$XٙɪjE}Rވ8Ka '`=tQ#!mq]C yI2X zexØXagSa|+ Vf)]ArcA-km/:/iH@ GL-5UnMX`&ίyv_8G;UB$ m^?ST;"%}o'FdYeVC'%jdA |(^U *CK(e(gOMD_k3GpyAew 3q%vJ<`nri. k-,Am%gѷ~ґ[.b{I U "EG\4=83p:y|пCpnv_bfs&ЀkK(epēA 4 `[ b`A*`IB}7'Dx `TPȔt 2{ 2i!*M)\A3B9h5Z(˧k# U"NTغ@ *5}'yO(C]/91#jl(16eHQ+aK(~ێ"ft:@x0dY9h!tn:+{2MHNt4J2`{1TC,ړ`fSџv؜ 4cn3ڪ3v4ˣe23/*͋*әwʴP1V}mDgrN[pW'aH0O%װC c01oo\]OOi*p!+ M걀N=CU%OjeֲCCU^Zc%rP7L4“BW9Y&7RODCI>E؋5ͫquR_^qCQSwgaA# ,L 4iǏi؏G؇z's>ܠ+N 3NSrx|><, >0&6@` yS JÏ1e;\a3桩*o>@ xUJ!Ywٗ qɀmhcRTBHX[#MMn_ qRQr3 􌰜!hՃ7 EfTD^VmAN~1.ixm7jp[b;VKm=}^܋(&Io\R>}]nwCq| fsԵ}ȹovۧ&?ޯփtZ CB: _=J S/iĥ"ѿe@{oȩ@Yᵘ?_3 8rzg Z endstream endobj 4024 0 obj << /Length 2721 /Filter /FlateDecode >> stream xڵZIsH+}UTZi阙L44iJYIYJ" |P-[Y|,=QzsQgɳ_?tf$좦ښub|?KPsIRnv3 uvB7%q[.DK4;hz?icλ~zݝl;weۉNZ޸"#Z VTr96Jkٞn{!cIO%%4*1)ӺSֵ+YǑ,VxF i轵!=}u[ H G5k4u.@(]]P)ɧBx> nOOQNiHחdݵ"orɘ9HRkmu~L 8c3Vz41l0VA`f?1,L҅,]S4m~eiU^k` (;k 346LGoVpz a8Z,yG/7Y.CqN5ip95Y[ݗkծ{[{A=ᩘ$}U 7-b8@ 3p{5*19 4N! N%/)lFyt|㱉^c]i_* U%B+?pp]/eۖ7&d'Q4rfڃe.KmN[;߉NhZw(DXIȹp M&`QFfע\x#Iu0˼^xfѕM-p6,D cIBDQC662p^F۳ƱxlmܷP<$ؔ~e}c,ɦF2=3q5(ow<1@!>."`A! VMqn4"eF% J8:'ŹG`)S~7inhq8ծw0,$kĈC WcKЩulgQ=ȡnZwۓX#ghT LC&Bܨ?CFIFdȐX{Y[,lh*@RE `z[$I +I `|w=dNg#7־h7i@Y4KJiywnæcϱJ'@ ӥT7dx6W=mҒ|7$`c$Яz~sM5wEi,- L4Wa[x)ڭ^2l9k 5&-wҠ`7?hSaӿ V_|@cX> .n |a^RʗO&>]9 2x1f:s# &R" ;eq|xXgc,L 8zen_PW#C 3c!bRF2%{ckXá,ĪpҌL{~~f endstream endobj 3933 0 obj << /Type /ObjStm /N 100 /First 1001 /Length 2697 /Filter /FlateDecode >> stream xڽ[[o~ y(< HRm=Ig]M}CS[$Б!?wHecQ.MbbHyN"XDAV{"HARTBy#SV\dYKdJHVY[,%y²^Ye\P֧5Q6Z 0_P|6g_J⫙+2e )Pq+=A9QVV;0)s8}BG#fcHYtlmoUNDlR)CP%=l<4)fHE@ NI^X#l =T`#]yB1$$[*@FRd/fQqpY`\A`!3'<˷O,bY@ 1'Q4+EH<YeVQyb_͐Vyib@}_3<- * αpiuOtBBBB^ T`û;}.'A#:3T^H ՖDvu͓@l1uWɿ_>.6ׯ_Eh\Ά?fopI"R4c.!Γ٬wؚ^<hNN\gm95~2f&@.q-Ax8&^(C20.0"BMI+N 2B -FRݞۅ'k7wtq)WאPO=2꾐҈r˻֒r 0N9cnXe"b?wCt9,K9[%=Jh[Ӳh aђw"_oE3ؙ.Tyvys5 *H a7KK~u?u1 ~FՂû%ưyӳ1CY0ɾ,'8#GD5ځf4R f2bkHƶP6HoNS,gU6\ H|;$[ )NǚWJc%ﱒX{=јlF9Zl@yhA.՘ (9`aՇݳ9PJJU p8% C1Pr8<+G10x{xB`q∫# [JحmJ+lZ:\,IK咾,3Hh8PPQqkQ}GrQ)7?YK4WK>}@vrDrgsh1f=;MǩM1 #X:t{o!`>\=Ő 1;!5q[0o-fO4l\ y-PTg:3ՙLuf3S̶lGU;"!m!ݭ.g~1ЁG1֣C_! ;w;#ۯ2p$pp'v:!#'^Sp;4wȡz#{gv)-{h">5edz#i~hXCg!&^YG90F;٣xb\b\c񲍕wn:rl@_ާ{ZTD$5kjm2mOOC]p%(vdo) \cewP0?왝X.>1͹r:<@TS!>YQmsXN/,W%z,=h˱&t/g×% $4V$O#cɨ ԀV,ݗI۟ucWlk4mgR4'׷\3ONsSz1JsRp endstream endobj 4039 0 obj << /Length 2217 /Filter /FlateDecode >> stream xڽY[o8~ϯ0҇&%JCMf1h0$>LYmMu{IɒWqA"sGζ3:Nj$73F)x0dSɟ=b ɀ)AsD jg!u,|¹' )!'CLmGDjY 4\6t sl~oEQ}B4!ήiߗˤLIYYlFYe hScxI) T/Gaج '18PZށu = Pʭ*TڀAfW531Ҿ6O8UEc&븬T=Pf!-sThWxKkĬ4O߸$c-zK&aw&1rp<ܵY>EU5j2, BAi}]E% FM=aìuI+i0S;`J0pJMmY6n- b)Lf ņ֜5gϼ/z YT2_i7oȞ6]ɘUKk24}`N16 |C_X׃Nd8hjq(b6<2Ze% ?Gs PG2ns5- 9W~F?2M4{ga9׹$<p_r ЦTTaZdj$Iu/֣|%BRRlѶL$هڮ.>tV7_n4lgI,Zwv]j̀7w;xY]~MIQvy5YImtkwm p.=V݂:摋߃ ڮ[J6ʏ]j{>j~pcT.Oh2!FPcuD1u:'γQq8K`EbݠPlcL[$[>.^>:JM,՛ W10p3sOg3bd0$~~#Y$ [􃥞Ui2}p08BzӓjS$`{̓A] -X='<ā qę6Q,Ne !?3Y=@2]!?r0@9^!kU=\8h" e$>~5}z`l`Qe'N00pf}0Hf *@C?s!zZ\IϧG]衃ˬ ;eDiwX{^gr! a1 ;?y.f7: Pp?~9 #3|m3;TЈ5Ga50ѵpa7 endstream endobj 4047 0 obj << /Length 2892 /Filter /FlateDecode >> stream xڵnܺ=_ak,#>'4@Or%+D+mWREse)p3\;y/I"P'W'ÓHJ2;J{/]G[u,bD;#vq ρƗõ5}VLj)j7~F[$V>7[{8`꼼.,OU]yf3^0n}VgrU7nk^l*ioMn- kIWg*X]ٴ*M`PaWeZz/MmL^3M9Xet036ݚI?`+?i;>Mj)SQ:*duЮ ;/K"e a" 6Je@(5M^ݜPx2WJDn9xulـ(Ze R‹r^H$n_%6۳0XIzՇ#u6뎉hE{fnl-dD򼇉ӱ<"mQ m eepVg2&HAȓ (%R$OQ"ZdE :5yxR6ysoJ=7/aր9ars]x[_,Z$8Q1?#q68u?襸ȇ eT,(!`6TA#5F@>Eͬ" o 1$ۿ~^TIt]G#@Ͽ@}s]_>0c F|1*JRG$Ackirhv@PpYQ/Ʌ&y 6EEu)e0rF:z7` zdͶj 5_mۚQ6<Jx;|?A0m,qydB^_.U£0(Z t) ~z9ED%w %AT&Sf1fG #qNΩ:-E3awEޱ$7xՖ)y~օ<0CӦr s@mQl%I9q"% EȽ$/r@0ԶyΩui&%oUBj{!8ؖ>rb&^9>&O"cڂ9* cM}B=L9)^Cy㶷uٲmn~&B&KeT0)2I;:;GiqzcySM.pq_ݭDPa"V~$ B2v(G0I0'`b!eja# 0Ұ4Dq|& ߙ gћQD:v)K<Co]忎EHe˶/ 0 W UET9(K3ֹB+jS;[A?X&]Pްa FN[P,σ`-o&ҺGuXDg9Y \ŀ2vS3^2A%Ȝ*)Cj3CRsxKVnșҭ*3sp7 \!ߜ)Jߗsp1Qƌ!]iֈw7WΡ&{f%ug9e!ʏG 5q,,vpJFP@uGutFE )؉]~ 6<%r 5}HvrC@-.`N{Yk%ǾZT22%/}͝Ga>9k Ajn\/]VL+`$/kRPg.ͩ Q-=3Pn67֖f(8JY|q|ͅzo*&e ј$yFPPMs;3=>t;)M1Aվ0)#Ps/`}zAKI۸s]JݖD2 4>g,4*`m:H YNlIrr!1; ҢlݺjW8oΒƥ @כym\sNi$ӷsm*Hq~b0cZEpX(rb@/Vrt_NWNM^z\+("?PWz Wd;$8`wm %ЩhgE!R[e2tJ*D˱*.`#Br|/ Ce/ Ϙ8%cι`έ˷Jlf6pp # ْŷxr =͜dcgH PMxrpuT ,|ȿ/@@ಜ:OrR؇H&a@lxne?9dݍs:i܁\MYz_&A"(`mCA_K%18d G>x0;(^[;z{{⿨ endstream endobj 4053 0 obj << /Length 3005 /Filter /FlateDecode >> stream xڭZoܸ_@GRDh.^Q@Mwv%W/sՃ"73j^շ:]dZ YU.bu]3i%;|9_=W:-2rt)H{50#Z0)%\vX-iƓk۶Vvii3/gB'vUs5#IUO:<)7}`iZXU#꾡 SuQhMl-5 `?ArδA?q_zH>s.; p}I h-ms$e"+3a`랝ʒF }mN- )259 թE<)۾^,H󙧊UƊ<5H2r+ L7To@&>6-Us!aj벧]kAר*]7h.}PvHB&S=WGkq~ea:"cLCNp,uFSjr2|b~tK!r&xXv}{xj0(M@sTټ")3y0=&٬!XH3nʾBK4+v" 2_LA~9 xq"Dԅ'h)6C^Zܘ.\m?"'*Fp63ǡi E_៍Ypɓ5m;L@6۶*_D ģ)SC!҅XlXq,"{Tf b0HYdHYǐ^i@zIHO-̓]g@zl_ ҃٠.Zy:G]y8Q`(R\ߗǘ%8CuH.]!q\l4wg7pkC6ً $ 9S"{P%fO}EKuߛ5dnQ'Ld61mrB>dڌq!^nZ dB6-xfOl[[6s٧v쪍KxNE7;꼅U2<)(cc'.N V_\ I@GDpvEڃ@3\'7MU{W.ؔ5 h5?Pס=5Ѩ}O(eח51;pmA5:& 3Mo>9~e2咴tb0CpNR+A @nÓמb_%}lo|P˒iG8zۿ~|h6 |CcꢐA@[ЍPɧ3XݎHԂ|]|vߘ(=z?퀐}=c&JU0W ]|t4H'ȒsJ PAA  b Uy eP"nw.ҧRcm5hI>"~Ǽ+g% EKB- f7[.00)=?vE|_e.yNbqQtё4ϳ9Ԃp4W軫~;}7τĹ2&'`}.wQlH۪(Ztn9lݽl2m[0}bx-: DLz6i VNL$!02lK4-z4duzΗnHAál< r˟ οP̹Wϰ܉|<)5ýfuؔ@ ;K]8"ng7%"9X$OpXs/Cr.JF܋CvSb%CW$,pE+O1SΛA'Ruɀ=vAPڄ*%[U_R #6K&`V;.Lmc tJ5t> 9",R.=IF0{˸1t 9-%Dj-Y9-k#]]ۉɲOqsȍdZϤ|W pOކM9ޮ'I}ٍ3*T~K9Oy ^(ix|U3cN{l|!m~6crps8 +OU1&M T4ؾHɇ 5Y]iNdԭtX6~iD6&#xQ5Q>o+B20MkdyP ξZ\tF\E2ȯw@O_,Ӈ%{~O=QW0^Lnb> stream xڭ]o~*:) E@R ; 4}we[ͮJ\  %RK}Ea!g8dvp?yM^Tiex#,U)Hn>nL}JfDh8{12:knwJTI?֡F59Poˣc0#AˠEHagG *A>OM7;-LV`w .S쀷քgd%N䡽:Vz/}~t6̸߄d?!H#\x,(8rLWE|3ihC}S= ԍ\o&}9p+w{[U)v_gfv 6*]2!, ;QYŒC$:C*{o9.O ̻;6< ԤO* m'u|E]O?eu4floa}'/׶fȔ2d.C MaD`M96- 6.b"NߢtiscEx7oo^yZL%H1y'loˤNy=&'z8ЉR"G< t>׼*S_il ȻGWdɷlGҷYEYA@!_;Ue(05rxA4>G$~7(O-D0!zb%FQ6\\YMzEan#fO*-2;yaA݈fS|S[kikl'i~q{e6#;Gl=nf͎w1Db YpaЧdžYC]`MC@mff*RR 0.`#]2Wx`٬ 9U7kabxY(}cMcR>]M@*r{6KK14̘ XQl+rW}۴u:(( >0Y62]TYJH!U_q"+e Y2qa(;̦3Rlŋn}+&E\1&-ڀnjfBAQ \-@C?n!2ť~xG‚RB賖W΢EPXڏbq0&S3; v+vZ{le`l1 !kQUT!R+ι d.h7̫x1 jjY2 vfk *"Ac47:þe`*a\fCV@2* \v!_ \j}&p@^M;HO~PFVQ-U B&wlĺlǯk$8Hl#h(x͝wmμ*%ix9peM(W˴ЫxA7[;wsި@2]E9 _]zp6pm[BWu}࣠K&E,gCBa=o:0C\A5Sq 9Ya\XE5q'KЅ:"< 2R%hoontHG;? ^1L⦽,xlȼCQIcqsa0  *4|NkbM7)ğٟq7РW;*?C0 c}ŝz 3k29wc,ҿnmɩ~=ǚ'ѻȿkRaP|IQ.yDZTR)jM <"^(-nބHw hw=I qt&f ̄3LC?}bw5 7p+B> nfE]иFSF} [fҊ^{#DvZO^]?t\BfYqmy+wCzz[U̝BR鯼Cͮ. .pߓK[B/> stream xZK60Z$%EM&] b=mv[[r$y&_U^=`)DՋxwW7SR/n"#E&D]nY<o^'ـZYe\N b~R+K\n]}RIsx8z0^p-e^"^V.oxU4RR^>۹=H$0ˇޕ--Z4l܆ZmEGܺ ڳd+M!?r5v)ZIIEYNwK=P7٧dl$^yͬhl$u:/̲ij %lPa_jv<],K.Hj"3t07 ׆@-"r_В&uǯxJ0C0<]Yi?:WD8 pSx]MV@9`HŲ (!E&St?62=ď^p[jn7708<0}.i6hO@OfEGI,@X4bch@̉ -dd2HLWB> ``B4*yd|kiTV" dnxAح@w<6c {$,]&Eü"=y2Q gbXv Y#5&s@NOB_@6iN#رzt w wpOwu)]>'܈$h.O.u4^9{iut!#'zS3&TF2҈ MwS G$R1+LŀjN#|rKK^n|jc{ER4h܇#"Jh:6wEϬw:ƐZX3 .hq.̲({F̙Eqb,FaaΡ0݇!3&:ٳx Duu<aڳ~?cLR)#r,̗Vfz V@"a*xeAt+g &^֮_IބN=ݡg,s񠍣4Oă`N0?uZ vLS 𭏎C 1 vVPMK¹(3M0w0yCH3] Wճc{ΐe+D( UmiGjֽ#s=s|#gX/2tqNM Mz3@^@[|<rs,z;X1] jOo82yuumyǓcЋhM[8zV8GzxW=Ӥ]%nꠍ%Q48EV<Ön}h)$d@ni5ї#rmrUA\@oup{?mn3x3$mU~kU ~l/S,ߗt%sP /;j DgXݎ@~?5 Shj*XTq'.zeT`]L4. 86%`\o} N]wwm6n744SuvNW;WpDI#1 A[JV% 5ND,W=p[Sނ@9^^dqj ADܹDĚ3 e_;j9ǣn ˙ FOgvRvZ"e@aEM] ]PuǂyrB29d262ll8|^3TNuCϔ dϕ8qמA䷹( ?f`t!' ́wI|8ƌs +Z;%!B'X3FB"Ai{X8J ~3n$3p. {_BrAQ:PvB_`$ñ>TeU2?>& (~p&ٷ/^={cKS9-ah.ah*a񑃺8eaeJV_J$QWJ$aG_pE/_ ۠,!؇L U`|-ǘH{ŋԋOb GRR̔~D<#19WȐmxIvZrbȳ޹KVby }6P YR )bi"GApJ.5ڲ.NVZ^L  Q:3㬡A1 87w00(}Hyw`/3F>[78?0wNŒ"Eb|š}rرSpm lA}NKɳRA!MB&k uR?cx*s̙D*J؄AcF)昑Tfi\b@ 싿̱_@~5. q yfEAH5e4LKGBa(Őcѫ6);p[~i]Axp2S$j@i8BC*z6tŻcoݎ f\_1^K=됧?{c5781/L7IZ.eеm$Iہ endstream endobj 4066 0 obj << /Type /XObject /Subtype /Image /Width 3071 /Height 491 /BitsPerComponent 8 /ColorSpace /DeviceGray /Length 24146 /Filter /FlateDecode >> stream x-0@Dh4H4FF4FH4F"? s}^dܾl s/0\~"A~,K/0A???@.A׿~,n??Z}xZh~"܃?/:ݾx`YelGt2XVmt9lyкgϏss=/ɲ79oNW@a?w'O8K}<@cnhڲ=Q燹oRV3?.N*#ϏNJzN7ߗK'vȝ <нhnڑImg~Ϗ8O/"iř}r\٤*{ϏE茇ĕxJ?:;duN_e72: Ms_y759ZZg~H~\u<?Ɵ vgL[1I~r>|y|1AOK?N1C@ǻ/>4ٻ֋}}}_=/ގ#DŽm9Yg3?G\Ǧ?6OXDmV>^ie?Ml.NCy:&|w:;[_wߢGy>+,<3d%ӣ?nm{?HEtOxeNUvo]}D%_БKZPI3>e1ֻ+Z~$]!`p돟rgEN3d/w;zN]^𷗒gotf^~vCE2A䝸;WM1[H dيGXJ迵O6|~,^0ë? 3v3V0yXN@-v_ ?]rn?x GO5/In_a/c+bO2~=NDZFGN_X(_m(;K[/{^zYgώBN Mm7'BMTk5QQת%iZHcI,j \)MR%ĩߧZ7 fӣ@;VY㶌{KMMm;ӄsѴo;/!ro?{ϵ[}L5ޭ`tZ_E%?oVkWa\kqV\N/6&CimtFB'%pf7 S Kp1=ߍ|#g\Zg>wW-.z?r*1E:_ ɭf ^82CZEXVuCӧD5y vP 3Q;Meӑo5!XD$d3"3+;utc7 S_KQÿ@|'QZ_B/73O_- )̭8ئxЖJKd`INEӏSUrC9H'k,lZ@Y ?,r=OCV)oIʳGt3c*q#.W&OxY}l:̿K݁訟V٣?_0K}r这TI78H?JAj&mIgi=ZK'Tl 2Р>wgW}rތ3Dd\Jϲj?"[kycVRqd3f$@yυO5eOt$P>MbYl| G/ww?/XFp#t~x4֢)COy z7MFE- qz_d.OwkW,?G!_?^읷Grx=UhdvWUK._zMunM(qq]e!pBؑ[E7.Hh'r).||1H2D>=ҢJ#hthS!`C9-ۛbMU!LZ^)nmM/RuMX_&ӞJqYs ޒ R\zj} ?7H7o}g4ٚ `֚ߥFݎv "BxP soܔor]KROI w{soK2[5S* -y^ZЖ{R5?!uMk_jDs.\1O? K-YQLTKC8~F[ryZuwplhHUp߬g52tu wYGa xl/qd{ Iɿ?|TDq,Sn~9eJ=}ڵH&V 1Gќ_Fۖm&9r#rF2g\!+C"Xx=[O+̿ :F4w$~%š^04c~_2~OzsrvZJ-Y[I|/)^ :3W? h#Qfp<{='YKgԐssY%B/2ԳuA7s{afp_GBی)l-|a_y'oУ}ӈG^AQ  zL{k/Q+Hs\6 ?ڡ_Tڱ/+z-pl7ca7ߝaym~\vӖSJ*ې'!RdDI Sh矣Szu,.s~?.[É{3Tb~\ӓV^`2bc:K @; 7v|-鹷FH_X8}8FKB]cEr#(sew÷vk;CAN_z"?:O+Ao}*'ӓTŶgYǧ?dH?.67>[e"W>XKSd.3˲D xW;.eYonp~ F_=-޳H~\wڕp(Z1MQG=uA(Aͬ tD|yܲo??o[}`\~-:OD;'݆̏e5Fr.Z#GϷ#8RU +,!PƁzk)9]s8}5Z {+%?I1:w*rD"(AW##=?薹YR{K?o/=fS>-Ɓ&m&g ? ?oQLWm _ҩ{7Hl#O3N_b$m!V? H2r9O2'N_ M2Z3m)~jg`{ɞ{]}ǩkϯ(:"~>/5m:>1*pb1E.}E\v+ӌ.eo?{#gAZ8n0}kK*-pzb}+|sfK?sɛe~8Mz#?/M["G`hkQϽo8܋[QR⶟h[O=>k;}\222﹙=gx=,=nEN_oxfb>s%*qߙvwmKٗrz?2L%BKx~{tx%kѓM|nrI $nY7K\_nM^*sZ347֕X@fX[63H{HU$OoX?KZ 1Q<-'AAp<:bo-:;BBa}wyby"(!<ڡ|%#('t''f݄/#M#妐144=?S C/mIE/'F+8'zյ[srSApג4V1yKKA '@t-=9KWB<kvrեfD5((5r(A!(\ /o7||o۬j*Bdj?~ `&8gJWB<:kvSODW3KFC A)z2.V'pN)O? N*zRy] R%գvM3ȽKL l1.XbՃfl"(ꔛB{ /촯i7žjGV]C]}LDn3&m'QR%wwk?=ƶ/H2l(?9zL)#T{WRIƦ)iZhc,-kH~z nnz.P}H:!_Wt+O{?)\Y /^@ tjiʜiqnto%(2.(2Ls77߷Lb:-7D?H>ԹZ(vhp"+z]U5Z MT7G /Ii(kR!WNf8*lfap>}d5*s"s/84JOy^2C$Yjt8Xl+u_n_Ì>nȃkv˨AΚλ , *,.d gjfIyѩ(@>5Pve4-ċ5v3MA9V5ΩCBrSH?4r&[J}.Y)wC:T^mU-6]`/U}zlHb͝TmGhjf _H6J`vet*i ʓT|@PdN;$>O!3A"^ XUFImc$W]EF{u/8ĕ^Mv#G_`%OȞTQx3jw3s2!OM=T⴪|Pħ<(]3idvPRlRC XprSH1e֖ D,y,RηO}N:^㵳Xڹc^H{JfRh}+ifvNd$/O<xP&Īÿ4o,(٧o_wmle)$]{A"lm}R Mr[F,60$]k-Z#mAYoM74JMWѺsb:妐w 'dk7!6ŏV*_! 5Ft]0 vpq@zuc-WuN˫Nuj?5Ь* qlvCV2MSA9FdP]t#rSH?}N5^Ķf%4[g QJWFMunz8mA^§f{!񶫡wH`妐fOi߼9ޜmۚCn͕;_ILW^~kvp`7'ofY P!J%Ay%^wӑW](>7%?F#&LY˝s妐3j02Ѣv9D(PjwxxrV!<ϥas.BJ<ՃIw1 EpW"NRr妐 /m{x%nR99Pz繂!n^~^?dDž 3ꓕ*D)OA$Šd,> s<|=\7HÍ"_/kԴa)$Ml;=y*Vg۾5WCi3p5UuN 'w/'_݋XO߲az} m"J朶|ǟbPrNQP\w{'R;(o/%I_])$_eWc*_UڗvW]V`i⇨J_$2>AZR6hZo:fah,_fäQ<(N;a] kiެcӓr妐?R\[MQc*e|j7ӄZ3dQvڦKzMi \oN in;:y_'fo-:*Bq]7k,5VW6{SNkg@̡J}*fQj0zө=_U J|SgP B'B}'mEUʂWˎ+{}J}v-m8(_z)uvRMŠ?JnURȾ^[vX)Ǐ^l_ ]hT϶bKFi\=(߉}׭ O(d{DØ:+u+.#9+%j_"[9ifԖxlum{NQ9_9uAn(Qi.3~B!WH?KEa³m{yngGH3&GbիPpw=c{*U݇`R͸usy8-̼ pxNP"*8*i׵qJPO7'&_!g!b^ϾQP*yQX1F;O/B^ǯmo D+fxOe{{/xR(fU堼k饍Ho ٿBVy\n@vB,}hks*9,Cn=<}yEI̜ S;NοLב'M!fw؆N;׌Ӈ($_e _G@m!@Gg=, Bπ??????????????????????????粵fin+.k&jpZ7;.HvhZuv(wDch,-k5/Ąqxe&&q{`P $f0?zAޙxqڲK0xkKPG? 58R =csGP)^ s5vPݕ?I@7v!_GK0YGP'N, :m3 ?_Ј %r\H "d0 sʯr\H uL"3"qm;. n?k"āqG3 L[.$A`_E.'?ƪgګryQtP@ ĤV46q`\D?."E3 :~/ K4r^Hǹ< ?]G;ģܭm,?Ov@:N/$Ӹπ κWA!πCceKGS{j(wN!& i?zQB;?Ԟ]L4)w7Dw|F@?E˜(x|-?G^[k?Ζo?gOEe (0Ąqˌ?OE Jp|_4ϋu.[wKsXRUD\}%'99yt85xQ4( N ƒ7eH<`-uS &z x?;4_ƚf+RSѡPu^=dX>~eSK"]طU:*]o+KMї6C|6ip*j/$?Hl ~FSWjl`΃qܬB+U% ׮ ųz@G Յ|:Vt𻚾X߉6g}iX «MR \'u0\JU HSz?HiI.7t ʼ }ROʍSC*wuƟ /Z7~RϓꔻB$RGN޼x]WZMޣe+U% N +pakӥ͈vkPCqjX@P6vv5639#ᎢެN[)$?yjϓx0(Ppg3qkD쒁57d[qjXBPf|x{-9z{RrSHp%/%7J{ܖR)EQ;5C.7{ֶ@f_W`JqQQ"(l5x?4} 1qOK(ʔB[hE=i_s:nMVJdNV۫5*Njĸ(y֑\Uv03~DrTHz3^?J8+TJN Rk1n&* oܨS`Yq6+R :_O m Ǜe. z?ұd%ij5>q_%NŸtUdx>v/TNV!!bfBA6kTg]Ce]sZ!VFUgT j;[__;`^}%P[+$?dƊ]yg,8_x̢JuuX=x>]'qeA:qQ.(((T ֕w& T֮Po#Gw-]a}qQ0(Ϣի6e+$?`]ygE}7]RDv]V7Ks}qQ0(L?۩^0D.E0Qpk5>YliZ< evNkzsZePk 2~/qykAy WS"N{lJ^i*(ioLyfFN^!3V; 'O|͙W}6X 4"c߻]3{MzjKT6edv+*ԼS+q΢"n J4I I~7.b!rϺ8̛InK]Fͫ ҲVcGDZA/߻]#:%5E.'j[F {xvSw\ )ss:qWQI7MXC[aVςZP-.ԋCNdB4'W`EgpN+wlʉ;E׌{Zi0(E^(Sqኚ:n`9"3Br[ucWK]sؽ@RU]sœkzT74doZ/߼.f!be ;3]\\ᄘܱ[_Xnj`$?ԗ]p5/k~&KyCwJIrǦۨ,yɴن悒}^]<&EWB%cogb{,|GR;P%ul0;]s;`Pcq3H:5'-jy7e$\cNiTN6H7%9|PU4G[8Yn` sqjm:N<dé[RH̱QmKvU驪͡?^5nf;]Be{XfQ|VWnFR̲?j֮_s%O>玙žwwv^8KX9'0*k+tPPrϋKJ~] v\Ӟhܭ,:sE.l#8FײKWF5G]3u/j ˺G, o&(E[f?\q/kԱrZH̟H96eDb%+ukiYg]#sFW ɮ)yq*a_7ncUKҮ+wDL;oS,-.*^}:VM7ru35Rg{B?.%`v =kHpJIG[ Q(*Ϻ/ _}ME˄7@Y͛+wD=Dv1ٶo*N3GH浏 jWv PCv 6[v z6'oy91>q'Q9<=᎞\JHoβN:zkYV]\U]m8Ge0ڕU״\piYY]28Hoy<+7oXzt̬{8nA9L 62A?/5F]Hw*"Byiuuw7*v7ӄ?4]r-5=3w ^GՇR'n,*sq95?쿶+^oVGØռBmgMvQ>G[jy% RTMi5N #($1Q^NڼvYvTvuTb!E{cBfgݍVx>Es5p▢RVq7YRHwRת!rz?Xv J9Tm=Mw_2_GP6 E&<)$ aiAOqcaJ+mqU].j3ͥzLy\GJXgeRM,B!ĪmTuxͭ"ߝ Lş͟]MCl{nez.WG?D2Vz-NѼqӄU9*t3yK+A=/8}BG=3^*~LRϠSvd>C{pIUz{\Uzt7D9?;+_SLr}x@`vHvQttkw#'Qªb_Zqjb?]Ga????????????????????????C{kY\jZ347֕X$e WzX;+7C㤴$ҸNХʍ𰌠/-k5/<)WRlb*cʟ? ō a@TaDRB^*k`@<-IO"*/vEJJ'}>Qn7 ?I GHfap@R"͡P YFaw q@HJC F!sy )%9?qZ0q_6@ )asʍpqhPϧʍp)-ZREXn@JJ ^?w^n;L. 3K3Z @RJi2Fai$?J,uC7Wr0H& ?*li?(b'쒻@x}سraYfʍ9څy&&?@IiFRBep&ч smbn+UɡrO.G+9FE݃2f4ǚa.E+U% ?@\}IiIJ,KV䑰v?9"֦m6'|}i_nx;Zڙ6}kFϋOiGb4#bIԃYH2mc[jb pjo5] nxaĭ5RbnwfjWRU0\KJu =C$p&r5EuR,}[U[c!NWnz6nMR_'|0^_VZnydEFGE}Fomv ^v;V!?g=?KWJ@?샺tRR|ؽфu/6Eq׶zZ[юxGsˍCک@2$ݗ|?zthSORW f' cHũ$w]n?Iͯ^#w%T)TjU{_? l6&:+7[h(x.S3VnЎ+U[@rIir4 Yf&BOZnv<(S{J$vR< ,L5ﶮ^'QKF~CGYsihRTHv^"HXVT-?ͱ:&oY"\nz˺Ņ/$B7TU')MXJ659:%+2V쉽 eF YYjAX '1_~Q+S`R)jˍC_kj=Bج&zJ*۷4FWTŀ9 >b$gCt!FAР udA w^nʲ?VxͬJٖo9er,Fb]b n$^|䣙NtDC!֠igA EFzb/Kk|Ȩii|v^Kěd">DF0ٸWV0ݤIcsC,5(u.ˍCOg=7Q2A]Fvͭ.G+ZhW>_$)eϖßedA0KZ{t& tC!֠ʔg&Vj(7=D f,Z?Rnz=W O '-mG*imu>~$I$-"hc!N7iaجMNBKAߛ^nzZ#|d&H0w-GzQɝO;}FhM{RpX==gt&M=V?:]7Ϣk(7=%7RW UGy=h3eG*im E7#zGwbh-EjSǦ+s^̅t&~W{)zʓo2]HN"ҥ*y/}g? %F{2ڪ<#a-|zMZSǦ+s^Lz[l.ZԣrOiOLw!٢W+8A!JJ 1J8a}]3i&5L{ly73&ZOF6(J%4B8v |f{4FF<2ڪus2aMQb_{/M+f^O^ pz^y ~bI%qO}r^ 锫|ر@M٤'VOĥ %a|_l\{ly_ejLHTYܑ0_QnId[Ǽf E%vo,,T0{_6D[nNRRyku2aMY3H7Ra|l9E!Emu:A]rOfݩf5`Dyˌ~ꖪm$nY0Q 'LRUkHXGx4ӍL{lyH6p'u+`2!!v]G'tqLK4_t6W*xml I64')mEW4aY)0&?L=֔Os?<wӻZOj(7d^6Q.g'}]8ivlCр_ $ʵ UH'e|R1L7aJcsϋCˢǗ sՒvSF\>#./l<7•zu`@ù0FNJ#N mN~+YD4 JsV4^RۻVa'`fMޭ|zj?{=Q.Dz>R}mr.r3zhtD~Šhp4w(rypYSnֲh)^0>ڊޡ˙(op<sT"1s叝}/D $%++)YI=.G />r4|=\g)cs9/j3nuGp;vym<ͧ?[J:ژE ꚶO.X8^󐾿oF>zW0)0Yd?/rzkP7v}￘iBl +Gsq)8Ccs?/ʲiw8CaȘo|]y"(M%|6͡0d M7S{?#8I9?@[IIWt^q#ؤ\0M,z+^ Y)IOy )elUldߌ)˄?TDy]O+ɶehFHJE g$,A>:נr0ٵ44'뢞P@HJp>2K]J_Qj$E5dV,]Yo}/ni&§*ҏ/ʟȦ_rIG㢉Jc,jCp'+H[DyM1fMw{O>.)]_8'GL%GVj>:eI"͡-oG:&kb^Ϧ_j֣]ytΓR+%W%Eٛ4[8V(qMo9sl}+MROJ/|3q>o9l4wFA mӯm7 ?$l#(nDX!SDO4D ڀ?D9im@@@@EGGGGGGG@GHs?????????Zuv@4D{.p?݌31k ֚?\l'7`>mQWE\0%CoE?@S3e.f/ ? ׶#/2GK0}@sg2e34~Ы>?֟鏾#殁q6s怎z_)Ggw;gwUI%;/y]YO5F@\6Μ\h~  \1^?@G׸xJj\2"f:>C֎:7j@rn?f\]PGx+=w)*t\ִ#망V ?b#4?dΓa&q\DN2}j0\ h΍@]0䲭Bxz ``p}|_?/x&r3JgyL@^=#vg n`CԽk}\DN( }$X$k+WO@le? ~?\Jlg?m?ԏdCMZ@E @3#E C6a?,[Z'?$vF @L[qjY`@fղ,GT&FE7P;e_2o  x\v;^?lr70\j?Ly~P(ICHk_@E~7@} G!w?_ @?81e? WC8l跳%()gC7?9o?~<?M=D=A^7/fܗ?wZ똉ݱ-,\M2? F >Gd^םjwk?@r<--<@HOE#c6N#]S.&IF 26hڭC؄K@?\_D.Ca`vuL@=.Gxe~k$?@?L#]ZӦ<=cH.C>G[:cI.eR@?? $i( _xGGr}F/jX|v{]m7#i"P;6FzN 'wa􌃦;mr@C?>W]V#\W.0iy@1ӄ?8>?@&_D~V}iI2֯>?8fq1ٗ$YbMW@ _~Ey aOe @lƅ@@97 7M^ 5,?Ɯ endstream endobj 4087 0 obj << /Length 3365 /Filter /FlateDecode >> stream xZo_5zKDE ޹H.m J>)Q27$@H3Pnl\|q}qUo4J' 3%V&߮xU&,cɎqEy}lU4W f9/쌠i,aL V_=ۋ?/+%"}eR? ?Vlod6Wsm\ZW &*/YE?bD:i.w"KgO6=Tel?^d掺qZC73=AJ74@/)-ҵtێþ=m/jݗGjM{)cCC9TeM/.IAgۼmˮj>3F'ܺ@ݷ!t0vTuySvK83lP&vF@DZ.NYdd |22@N{$|Fcqjz5APhɥdi>-t.5g"NH=ޛvTOh[MˬdY@ETv <߂na "cjͭC/7*Ȓ—}vY%Ybsƃq˒ovd\0ũ@5Xۛ7l-oj?W$aơꇲ[4:Kl˱hq_WS6U] p@ $kAb2ѓ+`I* iC i;)vƗҰlL` UK#m  UNG/UT9fY#!HY:)shp8ꦺK+v\NA[[mZ 1-CjK\E.,|qj^'o ΀'7׫{y rV,('}i*y0Efj{}oկWH䍡B*Kw_:6HEcI-8{ډN)MhbarϒcwxUQ,+~̨v~jg4>iiL"`]"c4c<4:^u' ՞Ε򤶔,^pj8/ ;+ð0^v[h멟B#w{#SNvȄjdy̓% B>TJ՜)v{_˂Tr#4-p9o] A:<Ф'91iSDKag2({5UdDz>/^Ѽ:n'tp$:$1kcy:V,-L)EI;!O"s865^dkjknR O"BXSzAxE$V!+Hߜ.d s~F" ρ`%JyY3z҇|ҟD蹏^ۍYt!x #?v؜K&Y䢐n-N-Lo"fr5I4;33ΘPQRvw:'AJlжq n#@JXH$Q?\H,GL$K k kQ#<]YΪ su=1<2L=ͮ35Drnc-cc0)Cy&FdIYo(./̔r{ӗ6UwUso7J\-ھ|ܔnM9Uu*Dvo$[c7 z5ķl{4e1[goթ3q#x\˰w B,s;D26s#X~U,ǂh]WsC*u.!9Ёĸ*2)ŗ)BLJ8*Ŷi#J+P1A\\5`_ OSw(ob ƳgO/'KTmS5vdΩ8%'^կL-\3Wݾv=}Hu._V샶1ܷ_-64j/VkWރS@u 6j~8s.ce˒ڀ&Od6:T6=ݎf_)4[& %ZBG0)jftk\sH荶8{-#OqFĦ=[c-:=۱ͬA͇'z!BWU#d:r.VG]Ykӈa֢?Ja;řY\xEJU5_r=F! NK ˂>_x,Έq-^6Ϯ|nLPʦz_U݋oǪSk)RQsY^R:|vPVzl}FWBI:t/2М[ ɏlzeǶ3rGc4) yJOr. [)ۢ3 8<`}Wݘn^y 0AWM jp3 .vXRQ^bHNYYHsqBT.2mjkx 7"OAi|KsHUn&W`,jNW{u!M.z web[< zP. A}r!vѻ,!u {(>.sR .̟.UKߒSAG$/z@CVc#Ŋ=ajr-q1[ =إS$>:FuTvI)xOnD ,|YPg!s+B^<5`$Ϙ0 tw>߁).ғlJ }#9[0ڵB.eN^B.\$~4y# ĕ%.OOZp/$ |EOY,,낌E;Qj 's|Y\1d>8œM> stream x-0@Dh4Fh4ڈFDH4F"P~es}^qcgcl S?rxa~Ʒo8E-pi8#CԌe1o߰Zmv89Ҋ~3W&=^/^ZpyBQo@a3|sx .J`/,l>{iNo%{lJ0g vlc/?X`|$L`E'?S4'2;GcqNv,GcɪaOfF>;*e¯Ț{Is|_CIV$XsWK%v;{A1s?!scQ]A]oZ/l{om኶E۵O]6N :~(GR(kߣo>nqFc?"x>̺}0@=´m]kG~8/;c8+0Ço:Np◼+AM3Ӻݿg%LJY|emo(&fEsBoE\]8}>xZ}ѳEx1.vq[G&z;R iZ-A[Z]s95kOR^h'׍H+Ű n6f^9wM=qt[sUcif{qˎ~;<@]%^TNi3+*ѤoZ~,K㿞ye:NQUo?e Q]7yW2SGoT)WggGy%ViWFomRd}=ܓhyP>o 5^s(gmԡ =J[$಄g7đI/^:?6i Z>oV3Q[Em}ԍ [yu3nB@|\{v?v 'uKn _+m ތ_"__Or0{yCÄS%i{y}cRԬ9{_x{.*{e_[??{Rtrݫm[[*Wדmv1ڑX33 V P`W;VO-j1Gn88\r:+vfOJ=Eh]̅:;=vp.ѶDZzw+ݙ 8mv/x՞/m\ņ!4X0.:'2 Qqܜ3WUf\E}Uܯ}bX<}<%hYμEVAcq¤R/{^K77\{۔zm&697^mE"МMl/W.?;@sY@'dbQPMm fMNyH?ӝJ_߄û1Ջ0H LW/㻰gUqbY_ȟ.b]K.)Pw.5%]՜ӛ ㊁q[Ux=]$t~rd9L] z|˯WВۣ~=ޯ@Ç+fd?9r@L}]b}$(Us:=tϯbklW7<|ZԌY`OTGu_TsD_9~(Mz-T_Z VCWC3b­n{yu|%J5xbWo':m N_sB*D6uq#;MǑ#&MT/xw-jR"oLMՓVy\Mb.-v{e"pZAA8KhQlBf^p65o?+;nK'gd6.֤C7?sP-wlfv2x7kSU`}G^r߫5GV|?ofS#[@|ϠڕZildiR*A.^>5bc>qgI5/.r丛cӕ{ZHz&@eNOIᾧZ]ﴏm}{ox-ė;&g$՚ГŸoW;e lP*j&^OMYs[o->,۞j{7\yM2&VZD=ck ş^ ?\m=0~"uN|P1EX[zϯ?7>=/p%A7\ӫs7[oK=':씚{[L'9?\ڐ/=x$znyB]}_mF"5-},p2'ML/^`!wX( fn/R5sk(@/Y |xrtw=z^t#k}e¶(նh+tI_7[3{c a 7m1$Y>E̟;JL>r~X-jIJ >/:Y0<{Zr!sГ\#2`l?vYtY&Eֱ3Mwy7:IIEd[VZKw, {E!Bx_U|OJ`~!\Pϯo d%yb[Q?ѪCx)?*@ME'R n*8Y}w ‘/MٲѮ/K/dә PXjSz'ya[e__&~*\kOZ =d'yϷ/QI^Fм¥'i@g\$*gQzE*xC sBk]|ZEzE9 ! q{0kY=+|ai Ip B' C8;BF -!Au"|Jo@ZYk g b˓e?4J<ں" 'N/?N;IJlZڊ3=z-D[o W|/Bm5wźy.s2쟌 <|4MAyk[l[ƈ/v!;үe -02}O2?Á54~vӰtLWẉ}2w.Nhmm½lҏϯ%odnf}{|X g[ >,jl{V>w :;7^ڣ+Y(ަXs(!z=_j=S{l~[8hZ4*aV =%EgVp;װmOs'8ym?Yg]AElNŷ&Jvѿ׷Ӝ~ɮ^¬\Bm.-ǿ`lM6 ?#YNL9YW%s1sj}e>/0l9ʶBТOn櫸ǗϟwH fſ5.d?rh맖lѝl>fr;I}fb C_W\5z/:ף0`f+gB*p4?V1{ 4O^/pOow'+@_% MP {ۍ5j>\G +sCnO棙 FC[glW?hlw~ fMa^7!a\wkcC8rY@U1<ccflzd_~ľ0\ ,4OKrBgQW@ؾd?B30iahy-QH^ I8[yԄk'߷4#cy`-Fir1? j5Wn)Ⱥu^g\P઱( I}Hb7f?+TA̶6,žWFM~zd5xHlMtJ.̕$D ף򳮎Q(焁X ҬGֿa=G/iwR s%$Q~?-)J!agWP'`_6# v\I"ITņ BY 'h_AOn>a=222D.̕$&6W!oc//Jl#GkB 6eOAB1 Ɛ{dx bހj%ijM? 5@HMXm4S?EQ#u ?t<DU~vdT+}mp}7R$tZ="Iw;N[k(wRĬd;#u7oїi+Pe5ŏ{sm&$IN?J-wRH*4?͙#Y+v?/?'V oÄkw"%<'XҤSPxk,$r9]0ov{ ˅-}(ٹ T/Rs.i?!usWxk,$r9]2qps+AW@"k~,TA͊ExάGeױV"IN{`D)wRH|wKAV/ؿt# ̑dosF{T'FҞ5R\g$'}l.(wRȔӨ x4ұ7v \"=n$:^Z=$q8IcMi{߃[KrIgc:9߸ qHv)Rs KSsE8$KU}T.GkB+_ eߵf'VluLgsahaKU#G`mcԏ'oW!j2[-]<ש"IN{>*5K![5f-#G`;ywS& o=>o&5#h"~U\[*+Rs'vS4{>k)9{\20Ѫtw3y\kf$]lwIbvK=`{#g?[)TLxnL (=sA╠i6/ x>߃#{A%Ӷ>/W{=lra(0ZpCЂlvJw ̵>hIz7X!fwK"N^سc-c?[)L<[w s#>64X'h]Y۹hP5qpzdғ=DX?93fqa\x`3%̵>>bCUuV?Yg:KKxڱ6N $TO/c:&;ܘ,]iJL04cyi Rz_#Bd=ksAu+ىV{ 4V~eGcc},]kVw_UZzZsm*&Z$I]ͽIU%ɃCO7[APc$hmj #;DDGZ`׌]nok;;W-\΋'ZDyzϤt}8 nƲ(kWg.IDo>A_vOx0J&s!I,A`S5?\搽FƂ.Ek1!_3v8x֛&9-"S\ 5HW;jҕs bCf2ζƣvߵ\Ɗt k < ׀='8[石NKe5pZ͛kSY4jI'(tY:jA0@%FPpp-F0k.weS_1p{kcE:7ss=織)m땤0צ(gý3y%xç}++2ޔMKlh!N4A(n+;"h[ Vȇs47~: 2׏Wsi* ϵ/}a+RM^3|1y ZUsm*&L+%ћ?9zc;萫`h .]2'_ pwu5?|936.} _=Vw<m79|ؓbd˗+,zcECs4r ]ne Y^g ^'k\ʢIl;=X!U4??Io-(ZS.\H/O p'XJӵ> &iwMr>zw3E;[X?7Mhg_)XASSy739VjHM'YڗtVzC|qiֿV,ʛ7㔌ĂIؽ5+҆B|lGN'*_<ҍކu(m_̒ $KGrm%Rk䅔9IymIZگA~Kb?Q;^.\3Z={]0(Q[T:E \^ܺcs=kBo\kdQ$&zCkvlWbzjzYe.\H/Q>,x v_1{(rxܒ]oEd}TiҭUH~7 k7ԾɢII"[?Y cJtZ:/X? ył:fׇ%󑹝6m,]:~OwߖoZE :U4']"0R-* 0r&qWhIbK`邦r.ɗ)uxi7d_&|ړ?+2u|$94?_8g~{"mqSHw3#;̨tӲ(iQ<1Ibi* zE*܅ )se B]ϓ?g}ܳ?UL:m*dUch97]oH%\0 %\5}v&f2&oVO;@+F-_Tx׳E a&Ve R_JU~-])W ЀǍ^8g}/xj_1v6 '_&yblw$׈8:Cr-ɗ)e6]|&Y|FP_?  -jS4j?z'7e"]vGrM>r'1/`J-)I+iw]2'_ poa?>ÞFk[pp-ڷ3[7J; >ߐOHI4\/婑EH;Z,L|Ȓ܅ )s BWy?F`VlXG{ssgfb7^OL2ݵ:EָS"I$%N].|c {e RK}OnJMoFtP>\.Ǐ mUqWpdU4ͮ7,d$D zf2*Wr.ɗ*ueq1y 5OC4FP_>q/\gG/W _A/ oӯ:En.նrg$Dzۿ~0?<% )o Ba̧u8^kA}Պ 3zgwS&k~%竆d~NZfC|NI"ITZ/o `UpGZHVw.QHy/WJGIsY=>A;J_>G]-\C!2eB"[370jd$D%p-C; ֑e )m oE+?!reg|د#!?͐c:E Vj&%x"|74jd$D_֖d/et ]lv,wBJ|@[$X\Aqi2e§? tv?Z>d:E K"I$9F*pWY/$$fΣC IAzZ-(gؙV?\Z q%5txzg6"I$SXh! opksA^7X\ m;viq27jŭE$:EZ>9;ڶ_Q\kd$D)66.bAmn `PFs[fFk!x3վ6y/7j-},G^HdVq֪EHIT,~ {\|^D5+W>Y6/r{`AE 7kt-CjI"ID~|o!U /VA- wL26/'_׵?tSUdE:E-mu,D7z?)/mfD/S<]~ʮ,7Ï?4oEݟ_=ؘqo@?lGGHIbב%^㥍45ԺO,c|g{II"/ˆn!U xis45HS"2X^]Fg؟ݔ'Wt,I"_6j=u]*D\™0pa +I$_~Z>l%]=!?]+$pWMD__2 I'} @񛵲H" .?迴rёDVK@I"oد$D0د$D _I$?`pL.9H@@@G@m? SE6@OM?U{?M?U{??9#}ћ1E iʟHҕ.Y4OKrB Y6Zɉ8d*UȓP{㏤_CG٪mv,Z _zŏ{TZQT?mp%9 B,*Fz^DVsR}8,O 4'XA71+W8r8^7?$jf|ag Wэg ,.pwժM␲T>+??SO\d,+9^'?$]8,׏jrQ诡POA_eѳ(NXx/$X3_迒\k?8~2ׄUQI3qu}O"O֫8j%[hcwy įx1Ms/iԽc{Z>QTE>+y|6V?PN!YOJc{Z>ؿq,UR{3?i]\hf!I|~u _iKdk+unID5-nVͯaĮ!oZ&B%xmvIݝ9ITx)bZ@ƿf!E+'oJ, zMv?=ĚߥF T.:̯2qeߌnNzd [d*㟽T#Ĵ,xY_uO"ߨMoVA;{5c[8>[ w&>o&C2exbJq?3'r*_$>o,*s sq.t'GenIO` ^2%X\EjjƆ_YSt<+:u'dCXC*(h~Bא)nL{*YiTxu_ Q ̀%Wu3@IY_;.T#߉]mkbuGٖ=|*nu?BE2EYeIO` ^2)Zf;ŌD[x߯hl/uqcl{6+߶}M]=_.x[j~mEGB~rY tSXFt(Kz f@K؅ WL|OjᗧOϋӓ's]DeihVe4>w涫M7wHpy-9vע!S A%IUY At)XSծ[хy(:N2^i2uZsMFG jxaÍ_5f~ԲG#=N6N ﶏ㦡,sq-t'ph%;.`]0K-7!Cf4J~ņ[|IF/Unv 4K]ng.,Oط?g6sU顴9ߒ+h$1}S@C&0 ep?9迤Z\?_F,x=3o x}3xzs4]8؊N2"-`h޹u5;Zaxlv_X=Wz(y =hZ{Ļ>@%nu4\X*VO/);Yn1:rB p@jlLhH> [|Io6B_ x^30;}.ӍhJ߷΍'J+m9{8=N7i;w!n4%4ep?9远9 Yh4[׽B!rzʟkk} WS,5 !MS+=ﱗkn(īl;kUjIv8JZ>'q:SWф"ܫ,aCw% +UE @6C)4-zupuIM=,huwd8?_y[$M!=,^mQL(>^qJF'EyW8 '}un gN<Q ׶g/R[|WNKrDK{tb8=^nԈHAŵu.Zx Om]zFl+C.O,\8 'eNohp cg^]\hz*RUwxǼ*b)OBff?zi,fV??MR/YTx!oD]a(\_N9ou  kzq gN-l]?/;`p:1.igzoYS^o?^ͯLq.N/'ŮKB=f_lY Q.V8_6s+$kh4eEl^^h*<5?~߾8ǽ6DͯTq-/N/%ܬF-8!9B ޣ]VK/O`9/qx 7[$>)=k _EGYԞk5oВD;kt Ox ރQп89返\WJ#'Y4sʪA8U~r79m|WH:CU۠C ^ӯ0h4CNxΩ m]vÆpczڿ)…A]l!̐X *iu" x >kMtl#B2AfO ^ 1 kjgܺ۸U0$DAhɎM~avnԗe+^GBR.09'?g4XQ׀_^׶H\ܿٻm*KlB29xyq r7Wڟsf~h~剣Dpr927}I|$H7݊!-kG` ?:.*f_7j$ī _ŗa}Ib[sy;͈67ZN+O% Z5%ɜV+:rWsa#!Wx}b[qnu=o:jKMT91հ߄(~y&II4$hFeۇn؛L8Kc-ٶ<>Il]!Z|Iߦz_Zo9]f] Dxu2$BWqe %/;y]q36q 1E8ӯ! &{dCt{$KZ]Q/G7l+ɰ.B|WT{%H"oRne HC5E ~i'dz,7KQ\&ً\wNDPl)4?0o`j;^ߌ/_l?FU ` [ŕd+4ġ/1{wWIW^'fH4WMU 3_6D1B*^G[bۊr52yMHlɈiip_JѮ![hvm{ NDH,]{FhdSvuY?4o%lU'nC{WBVI^,\'N"?$ ͌>XӬ^ғlxB ?op#Ĝw?wis q  :.d5;~]kG( f;C ٯ_+YA(íC ɯk-3[Gni+T<.O=P?X'r\?ö7? ޸ +@W[2zH?Pj{BAzG޴Są*???>_soq@}E@q4z3wq@}%? 6M" COmd\?P_Ǖ6z\B,lYx??t\?@ibCEj?XĿ]1XԸoŏP\?ѿQ7~bLŌ?Sh%C=Z?B\?P_?>nLY^6sPKOmJ\?P_&_+B,dܻJeCq?mR\?(p]?Eո=˜ԜYx?|콙}Q g0Faf+C+rM_%4s3(|<>p-tW?h?&u??L"/ąCW[įEw;_B.Bm!~5n3}yC!o=~uI\ N'~v6le.vq-ǥbVgN !B=kII{mLŖ]̯>x3z l ;DžC)P_?p:WG^˄\']?@6jv>6n[x?]x43^ce_p_x~{q!,[jNojM/k!IP#Bm7|(u<rzQۆC%56;9 H.u fmDžC?wknkF:yv!IP |u!{{K}g"G+4X|Nq]w}Hoy]=-f tAjB,he6y+Nے~5z=.qqajhh}ݟ85eNm As}4ڙ^bhRC!=Ѥ)!1C jB3.kۊ[ovyx8kZJCbʸo'7W|+O2Z829?K&7Jnهx9Cn)]? &4 1eVb{w^9/>tbP`kM7~A k!7.B,o?g{P`&>iCiڋaq94Ex\?]^u{#o:Bn6]?`XTY/#wZD^_P\5>=VDž<>D2;>ǻ4rBa 3?~F.Jg#8;*S\3rj;.$`~^Ztu2UNͦ Yъ<ܲ&Ϲxv?4\p{'mDžY `\~#l(_ 4x1/5=9xhkPkt&'DžCwC0;o94pB[0yirW`':Q5u!+c<g_g$%'ﮏN:~lڎ Iz1`I]~n,Zor.L5Cܷe'%T0tEsnvf;+(AHjC)&jm l>.$/r/CRqt|^ns4p i? .^pu{SFugVLnp/ux9|pQˌEge?x;-vKm\0c`%k!.$鮋n¥̛y.'䠊O۟|in=yv@C 4ZgfҸʭYM֢߱JnK$>)|jCY,63<]G q9P KBБ.z+߄.yM6? iƃ`tmlxD-0C<6k~5cx YHǻS "L#1A'j[ }Vh(\*¼6!GA.wkh4z2Y)}5TZS7y:&1omQvx|ݑ;C1ɶrT˙h௅-Wd(\*o Cd})'ΣqDa# V:Ȱb4u &\w^NȶʯMPoy*Z/\E/rT~m(\*/ +r`ﯟ}|yGa82Mz¢y5u\S;W6"Bsor3 ąCLs 6uWiLpϟ= Evb~-shNŸZ)}qëvvA\?tNnx(\~LߥxB!o}jVsYք6s]J'ѺX"Y/_-0WM:rK8ElLҜj!]WDVpńml]UM?Pu .{qe,D\ۈ D/Z D$O|$& ٿ1Lo>'}#뷄WY.\~.@k*F_2]s}.OPT)dv^GH$>: $.6?^p9uL7%)G ?x&{9ֶm̠PT<ܾu俄|͵/{t9D{A>(MP=%F_>d:D3P+oPٴqbg>U\?t"ڔ&s¥ݑ_l~y8Mk>Ib{Yka *éZc_BOlzaP}8_޼z]\?t͂np&񎼞ӑ)ǭdo‡6!~JH-;v!~Q0؝Wiۊl&]lv~t׃3A/Cqߢӧ[)dN`ƹr>미ߊT6zKx(FT\}LQ[[;@AnH~ u`{C~bSk~x6`AauӯFB j~g;v25٬fdP+?wkO{cLeWުxxnkݦ*?au%9RC?ifӯpw7* -8f9~Ѷ*\_UGA1.2EAj?7.T[G1^$;pWBL<$ @413;Ǧڗ!nbf |`y?K[<?CÐqlFNѲw۶?s@6X??K<M?U{`MD@ K6pB@ `m*=]@uQ@??|em*=?Mh"~ӷMeֳMȣw~?c*$?n?n4?h"@qb??ћ z{?ݘ?8χ|?@aͧu'6h#z7nK@qPC~ݧ@ 5gχ|;\?-B]5=?h&ֿ?@_iGm@54?}+sEUg}-Է??n}b_u.c֌uAT?ӂsY >]_{E~@%=CmGV0g?Z[/yq{ ?n*3MA6MtY šS!mq^h;0$':?~ȇ@lחi=@ah_F$ЪP_1?(P[KʼnGgeZ ?Z4b?|P@U?ERW*θGd|?Wkg A@p=lTrrihǯkZǿgw?@_ַ]9ٯ:z$X풸]'n?72[Gܧb.YWK@9\?'^;-F,l&Է`' ifq NCv/ִC>ſ<*l4ҶF,ưCԪC{<=CUH/*.{#Uʢ?ǝ{ {ȇ@۵ZtX礿ڀ sk q?h]G Qmn/;f|?ޝ?%G*L_׺+ {ҹ'-vߤG:2%@GaXe@D4+ ?}b=j [9~'_Š{GO >7^#W1uo?/eS: uL駊ضuZfCBqE- )_?H};mM~W%/W&otKa rGB^w o:MϏ?w&. c9~z%@@F,Ѥ]w}т7B%r0M7@fwEy4 v$[-.tx4:!K*,K:OZk2x4gwӺޛ:L۴&+~Vq  Bf;GGmH zt-»HO=]k6at? vzp~.drնh{$mGAS~?B]/ӭF?to 3Kp V_30^X?tF06SEf_&.N?\v/TsNq8@>) Iߎ&? ~??&J1yDG̵֯18??R,vǕܕ_^u8j2Y͞?\Lzğ{n {+ $K endstream endobj 4096 0 obj << /Length 3949 /Filter /FlateDecode >> stream xڵ[Ksϯp퉮`<_ 9>9ۂVfk5BX0C^4z:슺g׫Xe=S갫oL˝iʼne31DL0.<8IyF eWqI0#bPttބh\|2@PJ)Z/$*ݾ7dph 6cYYb LOrrt<30'D̲8p5Lj9Wq=Uslb"-6GĨ*y'[RiyS]UmRu$`eV͏=={h/;>tHvq:iѿR7EP ^ vwvvjw`.nTGh#Hh'~ajr9?qb 8mzsX[^FCۂZGUTE_2MƊ=20"|bf8h_u=ҕa.:J26yW܄lv޲Z.̇(+'<JwXiy7n)|" gvd98_)3.IG>̛ALmj)풠]wE:^*Q6ĚIz=v5Y鎲hM%r Si]{@fX6VˑJGyx<:D]9 ?q.-=AwPD"ϹU!lM2+#iR >5UrI.:Vش#6B`n``@](FF '+%_v}]׬AƆc6\%PGܸ}SFoe@b N L-P*` Wb `SH.'*Bm8puC\ݑJV/ t# 7ҪLhMLm=MAIJd(O`@I ZYf|pag{n2pKv o)l)Ո#9ڐۅ9#ЃB|X7F:lnע}ʭ>H:ALE8K`<_#@ հf.p"3Ad/չ=&YN{0Lu \ kƳO~wCL-s PWLm cu>&LdrAݏ"I24d ` QBcpHrW߁f3&Vr97d$|jx~NdpYzbbbuYicss)HjpoׄJQ̡c ug#[gk)U6WN=aُ . L<6@H.0Bc"F*h,q8?^feI@yP 6%BhI6Tނ˶/R"~@ FJKф+SBp^ xvTm} MFJ@%-UNtSY>;$=]1xxAOL%1rJs3ztڌP0c:1&[}ڨ4v˾1<׾ 9u',BI'Bɦ*/9Q'F@*Ȍ8aݳaͰ ݾ*{7K?|i\nlpHn۲ES tmm@Uye}{Fb3b`ݕm/n+4@$fHRdB&s8$Τ2KM] >)8$yH&?KF]cےy8ɨ8?ުd_B1X9<cӏ0*rꋮ^8i\Ge$.t S/6yv͡%5idU\`I[T,k;^l2j]; `xatL_<.%cKBUHrʐ'0!mR–e+MHW!Om߃w\.#"D"3Cx̳`Π ?L{_z(+*gpt*o]w~"@SLÜZ,!`h)eNa "?s%_hb6ys'C2ɖKM~GyoD1{wpgcLiN4SP\ yj2f!]bC+N`O-m`-a2Mejvw- msLpRCA+{V~EQVapK)Tʟ1]Tiƍ$Phz8p@^jM:;vUB:^3TH{4^0C.r^&50^Bt;$]Y yM$\.EĈ؏7)ɷ|N>!HS`\ 28Q&*'Z1W*ܚ=jCLx?QM( COVW+ΔeV@3fU[/)o\G8KBW>¦@Хb 8Wwp0aqM04$=$|q8&cV{Ȁù/.@D1]8ij-js=ܗrz|.8@ÃG%^@*F4t<K `4GNt">rOg4wRAx| I*gQ{Qa`ij3^dz:%KubV endstream endobj 4104 0 obj << /Length 3916 /Filter /FlateDecode >> stream xڽZKﯘTŃ:7U+)SoO NRS5hݍP777_ܿyUQ~wldZGo7_Yqw_%v:)ujEtQB~AΦFܙ *-Ю[=e,v:Kl<6~84lLBt5`WMRj}6W0+~Ri::YC5e im_֏i;I1wQb~6d E3mHtd/T>|Jz*dLjN!בI\SJa_*tǢ=4-(hjiӦ?ebBD8Hi#nѭ> A}w>ܬrjUlz\+*'Q 6kbJ$p_#2}ջU 8nԞoh R> OVf%U}.Um8yц(M9z28ˏJ_I':q wv.}k kowlmU?bUQoJ4j6X38mڇ6k}CHt62P+͛$SYv}u(rjա-4A]1c&sVg@wOMhGt8YUbS=Y`(%jdq{&N?ndn#l_wvp` gZ<ve gzaui35R"(Z)"pΡ "IJWr3AF:'Hed_jGJVAlv|>\2tzN0ˢM7xA{e`$8E'~mY7}\Mi=s @ V =JU5OgiMM=dP;2W&gӋ>X s=?,̽D) 6Xz.3[ 7Jǯ/+@~*ƺ9 <]WXst6 iMկ<u ),`  -y[M]HȰ@9dc\3ǀC4ۤWo$sܯF ʢ1R $| $snrPgrS*ToSeܐ<힚^Ȓ|hAjniC}oRC[Fag,= Qd,8Fms:D>w^ z%EҮ9L [, GA]N8ʚjdSeYűzHe˳ttٌn. W2D. *5. xj: O27 KP -F XScRpJOUP$:%m f͐q43.\Ռr4%]kE3b$25 !ZݓsRGXbĿlz3۲۴y!~328vwa $T׹A%/ D9YW6$CYS$H=ɇF3sT ӉpG ,Qhw" k$-MQsP'X3"C4[bcvFJ4?S$h F,Sr& NGd3D:I\E=? l"e^8I")Rr.RCnu/r7-^r{g`yl;ac"c% NgPΘkHʢϩo0IyZǏ"=zv6OE ;<[Ylb`e|j`02U:V´uhLw4J>&R>65C8 Y'Dk_I0Z_> stream xZKoϯl> daddshQ1/nR{v '誯ږJnbE~ *JViD?|{*ODpYdI>Nި437a DEvwtռH"ƙefFr]5UXG/i4y+ox}ʲ'BH?bܣsPcyөݕ5KЖ0^(eXTe3| u`ng㘴BJD F"LX& ߝn9@}Y0'ǘL 2[Mhb0ODERB#<^o<oDz]pq?3£C93fqx[Z(cYR >@]F 9u&7,:ϿS| #q, RY*/evK DlN+c Z*%2Wˁ^QBMK_l;l3YHp&Y #iÊ5#M$/! ")UOr f e4 D]GY&2w]DKno xFd j@ 4hej=M8WMWEQK;֠ǯ1$<%z9qd9_h jD!,e2ecGkɧѴƌ=`NCVSхH#c;;`,úLĊ|Ax#!gXHwm=Mdrx.Xv]˻;=JIJ^p<^$(-roH$-t!5/h(F/Xd//DzmrS[x6rKh6| 5dkS 쪂~@%ؚ4}ՙ6-=mtKҲ>Y.z. e]9-O%Kq-u}{3Z zvGdR f5 'n2Gהʒٕ] l4wp'QxQb ԡljx ׌?Q`EdGd7$XBRB7 }j-d#ACtpTt3($phZ.mW y4M;]{ mGA4?g/F:$Mzܙ0K:e6pY?-󔚱*/baC0ۭj aKa`~-,,:G/ 満P_%99JY+UG]R 3;Nk2Ndd@N]- =`x!א88߾\if w%p7egS8}<*}CpS[T\ yrI}dҋ]oձzZ*OE֕>zLRlRZ1AMzOo?EՏZ{k5}ʖ,^]ruVpsg ps{g"3b1oA_Z&T̽fae Vs:UK1#,R0aחu\ш/t|W\rUlv 27SbE%ٳ3ۮpO5ONJBi_7װNnUxPtܺ")t-JgT"WJ$ z+ʭ"E9z׺> DI z|+EkEcAB(^@+z$B-?ӛc) ?cFx_ˣ0}-Af2>>_z]E{O},݋Ӭ7?u sԍY?RJ@׺Ss]LVQV|-aSfO>mB+1s endstream endobj 4152 0 obj << /Length 2524 /Filter /FlateDecode >> stream xYK ls$Aw 6N29%4jnjǿ>U^3ޑ6p>4Y"Yd=*OoݽToN7 o8Ho/Ϸ`24H|K&.8˻t!; gsRHxuy:v&n}1< 5~"ѭ)OԳtSJiG; G6qTa/]Vz#W_*sKo׷KyMM37U'|Z7yV:e, ;3 uκYH3 *7L/`3nO-*pA~y4naPbp 1YC^Sͻd]c~Eaǰ Pr-s{XvUlځg ^f'{S^B(v V(,]}luCa.+@[oo`S ƴM~Ner2@h&#qhB[T|Ҝ``eä>ֻ5>2 B?_ ]L]9tХko{Ґ0T ?~E;t!<򝵔 ށC% :lG@_x֩r8}"# "gDYmKYxZ:S^- njx9f]m'wIeц1AP(mêEler {I}֘f|, Ԉv* F)0&|տRĿ :EQkp hP0[й] "hJV R4|0Hݖt ߞ_!h!F)VLOyrIn3&)E @fW*0`Y~6 dHyHSk>^uOօR Ap?W N4 tA)(n)0E0}U@(ŽI(r5 8uxKy&;cC@SF.ĝ -9z] ]S1  0w~")ǻ4Z/h`NyI.|3?*y4Uop(ubI@rjݲOh$,hDX@Yiep"Xʌ:ؐ1pQ_n/pAwx@:+CmIbDkSL?I_V$&zN2x'ӨHײsUG y\PipI5U/쉞IJd)U'۫΍MU?wOݠhU%Ol #WsU\ &&>P` iK$ƨz_'鐸GZG㥝1(=K=ƪMeO LD R^"oZb۫ =+Q{ԺZ0/iY WǕ$gR;ȝP˪-"'G80Tp(6 ;ث jE 8N1y +4_Y6'm!x#0'_hh@ᴀyκ*NL'f2T_ק>?w`=.R.J׺ӛNc% x; YsY,WN3x)+Nbz&5E9[*鳗tR#襀SL<@AhG:}Ȭp`(zJ~0^:LSbPacQ0y81Sٳa<۹5!Y1COޏÓ;+ʖ| RҸAP3?BgO&Y <Ȼ'fїgP:U aEpXd}J[6XH^8/?-0l %v0״ƒ$a 5i,@]S/j U9PAE{oo\qBPNxEl&+-0M!r%OMVqVwl2kO%Sj^#Vϕ endstream endobj 4030 0 obj << /Type /ObjStm /N 100 /First 998 /Length 2728 /Filter /FlateDecode >> stream xڽZMo9W{7 dA9mHw~_QlYܶ\n|dU"i9Uu?Ƴ@BG`0ڲ ~ƈr+bRgйON-z*H.F 6 H,vh@=hCw n=^Ͻ$A:/d( x2NCr< oAvQP<1h" LQykF&aR<R?4` xf .,wa :mx GDE-|c@Yz Ϋ 63ŬhWFh3,'ͨ05ۼ~0I [EUD˳\RASj!1> d)`$xDJElZ9baP"$ RQ>4zH.+|ngF+l7@r"i6\H5JI6Ff:TKDbUlaJ6  Flm-J΁in5|Nc0aBRa"F.S8ϔ7oFsUa"|_~3̭a==Az35A}oވ[n6nlxK4cu0D 4}ُ?kq.FA×->4=zĖ;>}jWrwadּ /fåY}anzvUƓiu+"""?Ę{;}Xf]ˌD}}q\?ƴa;];fuV0$ f|_zӴE#o_y pA`F/"_522@rݮHޭ %'= Sⶢn66bL]II&Xf/I&ܨ"daJTQ:+_d$(,@Z̛@2/!ΏHďp}[D*U"1քYrjǒ$!C&fd83?D~SƀUOw!&0d^fx b)zU]&9Lxpmi.D:""cI\"$B9Tj}ȤY˫[n_7~TLWLdU:E;`Dݢ:R*Ca "w@!/zx,Lwx <xNZ(L7?#a|0":F(捨y#/ oF(uZ(uZ(uZ(uZ(uZ[ %FlP eM˫;BSY$!idNHiV+DscxF['Jk{uޫPiu7 Ҷ歠@Qd! xȸ\FNkmqu$u<6uUߋFs* : ?GlZ͌#ԆUFA~x|Xf oL {ﶗ7k&{{k p5zjdP6EEpEEEE(DHLg*=SJTz3M\+Z. tV˵mJϛ (рaQ]dz9[c*4o~pѬnfT!Fv`,*q0zn֫ht2_TBj"*5%֗1ktmS y/v ˷ O\{/nV`4 I+"'kHm &{0jr9> Fn'AC`Ȗ/L"~eӵwTe1P81B7Ѵ䦙j  cԏCIT_ @+@nId& ?2? &ؓGn:iqj,|񶢷Φx CgpNo%I{UfS|Lڕ| ټ}:& "O IG阷 1H$ kqd Um(vX!2ǐ܎H]^H8X8y.P;,>y ˤ5[& >4o?!3#:B~u ǻ8w~ 2QوO֬oĀU}rqQr2bvvm~p endstream endobj 4166 0 obj << /Length 3953 /Filter /FlateDecode >> stream xڥZYF~j&<̃ |dYd"h-1l+~y 5bQ"Ww^V*HDWw=FWqxL_VM~|:GeyqXSl2NlfXr2TcWPg 'p-us$4Uw\+x{,QAnk{yik[~ޯ?^C`aIB^n\lݬU^5b/pRԮ3\5xrT_|UyzAU bOD6D~yo"MWue௶BtTjjzGThy Tun+tzIyBT3:;)P9bOLS_2(yp}3јw1z+SQiÒH+iMs]W{*:H(Twe)} ܙ.xHb\2Y8h/*Q9T>p0q票H.sA~x*s؎W꯷7: :@˃4ܒTj^4uge0#$h7|M%MT'싏+\DrnBdSmĶ U-u{*Oqtsw]? іdO "'^dkFy9;&A[d1{>Aza2; fQ q.ؑP!u,eKF>*\Lywotۛw}|ӛ_?5FJ`V1C5=m A|䁊K6p0A.~ Qq䍝.A0Hb~-T/:Fߜi:NԙxqۮnEOƕn^^ZkN}{j:}8LH衚C}i$ |T=4ZMOnj˿کbZ = ן_ܳ'&l|ʘdh k$uJG@U + — ~@geh93JXU<#5H24q)bZorSc\$1itҮS48l1GT ~r WǦv\TM$ bPnI,=/=%P i6jn>"7,:rҹ"BGC78XYa&b#z?0pLL,.pҰVm=ӰDa TjG:腓 ܝ^XR+& &%Hx腝9]fhp+bcؿBB7K#1LWT.]JUgloiy`b `!YzMN9^z9N  H#/,gx19%_'8bMu\`ȱ` < 8A%),|0DF5.ߞ m圃pAm=!Xp7uIH_u6%FebZ?m.Ē)p% )*JfRSJCڒ8(uihsٰ@}t&d6o ti<9jbyzpꚆ},纜B G\ǼjOpۜZ<eߋlR$$h)QTZ`!Qg1pHVn =h}j<ќ'G™np &A >m 7qG@8> ұo*aOEȮa gzrؤ",+il1Phz>64E=iz-}&:q#ԧ*|Ň޾Yш%_ iW8j B],mʩw¯/c8! E n< #?)å@i9^5@?;q8a c CFn4k-k'oݴ[nF)JƃBt@ h0:jWZ+;h%6 >jCG%Z̷,lRP< G >i}ߣ5=髊lg> ZE0So*zEtxjWF\bSZQd(KVԏKH ;VU$z'3wwPL! ;\-YLCHGzbΆHحDmS VLb; .ߎB8Xa;ͤ~ Q)/W{)W`Xj_~QajΡgH&~/wQI=_0_a%5A&d"}u勅a ( 8OAcᏱ`mj=TlT"GBTQiMC&dP+|7i6nP;}Wp[œKQ-z[jS/¦JeÏVYFq սl'}{'!]!'qf.|g&tz9m/@\VKN#4gC $mZ3ij_F`^8@Aҏ*f 7g>r)ӂ. ];4&[V۷h 5( P^o~g ]t}l3X;"_l4[l<hkiZE t\M8FcV\8 Qzd/^w*X-Eqd0^|gBlZ㜫;5K~Dwwkw endstream endobj 4190 0 obj << /Length 2821 /Filter /FlateDecode >> stream xڭZYs8~ϯPMRTU қLq')ۻ[3@Ę"U<8~P"%ZV"\_wň>yu䍯F#F)~0R@Dd'):"dr-==:& W)XRE'ca(a! в}R拱|1p`q@TϖuBtqB=Ëtϋr./zV\lwDQ rՋOFuNONHa<5[ˡ ]9lg{y˛13ݏ=a@IwI\ǻ"^diZ4gɫŪjZu}jLK1-$ηn+QX IntLJ#MHxqgUZ'BX2oduδmYҎҔF]Z/h'KoBsňա:ӪNgn4u\[=0j tw'-By枷Z)(,*P# [F#5\ S_ 8M&UYJf?x_nMg@|jG>@\3[|nB14r$hb)fgٽ%KZk;{+g;\t妿m;XlO0yNz>thܽ`5B3l uiΒl乣ٜo@j-AƮXQ+ `iG^jcL؄v&dch9 W <q[*n4Z Y*h%8FYD/Xtysj/ıBQЇ_,N֙P?9cZKGla7/4#A5#WЄ;GCE3[u?$-Y1b+s=ޛ'Stɹp!% KIF!rkI0!8udE3c\E~D7" \-\c΁^ZO2mŏ9b\ُq+&tpޮ_?$h ᘁE2G%N*F0c ^NZs/uj="Y ʨ _zM@p "._u2v ހnձ`,|!{DƳ+qqJAZm"];WZM$-05c"tŸmLZc.oeoiRg\2:؄njLC3ys H9Knȗ6fGx"8x3"`^8wDU@6g{kTBx.i^ng4-ԍcl}yn';ؾp@bYċd(~,s]U,v XA2ĊCZ0U&.%8Z=7d! "qtr !\<Up!)<ۇpv.|a_y0)ӛ"AN^)\t ~s/"Dg7w*k%z] ;BmHH֩KE]O'>}Wn0jRz" m{īO}Y۔Cs .݅'Qe(aj|Jj'wx|X7>L4&>=7wִ">VE]F JqaKU[*R1u_cy=WM|o 擃^?~itkCON>TBe8ǭ6?u]<$;9&B;zI΀Ck"[s"͚. VSv6ԼgdK@j7 n&P|r}md&fV,lH0ZLÜZ#7]nӺu/Q&ТFĖ}[jJO?=w/뵉4u \/\kmhm`1٢;XZu[Mw ή/( endstream endobj 4205 0 obj << /Length 1631 /Filter /FlateDecode >> stream xX[o6~Ee:)0tkצOmEWf(wx%q֠/C|e==r=̋Q^yc A{;7)ڊV_vy=}(cMk2B."|`a )5=z4_pg5BV,j%1c҈VLaO .F1GW?)֛/j:Ee8> 7.M7VJTs[jok/n +~Fw #'bb̺HK&O4PbݤZ@TJ#S:tya͑zPiE2}jj LЫ"s9s1GE=cFF(^ʴZFܧe)LOY<SML.6jrfYQ2LVC}hkY^VZB1PXۧa:lDgCq+ڞːXZT\ͪ^vEwFr#De+!XW2-`Ue]f+#۲ĂchԾ|S]Klo{t M nYh+ߒ 8ΩvD3,fb|1 .z&Xٌ4.prd揩3XcU9yz=0!0=ҕ(Q/L޾^bq}Ԫχ* {=+KVA6`Fg^`!EqD\A]I*%+^OrZܚ1пI0sȳ#JW~]lBV~ ?yW*K!LjaLۃiNL93o5e:sόXp!f=QSPG F7Shm=LYTN^q>s贡"d쵰 ӏiK ဂ -rt1TP%KvD%ȫמ_=g9  ,Lue=glMӵAyScFH9YVNSPm[P 9pc'7آmSsi=1EBE:R(b^SK'P[/:-Hzc@`!F{|#LPDïOmC>كYa}awra<>{B1S a}q6 Dw?3  Pg#)KBJP@sVzYޑ"m"mܷYSc~>QV&iݴ}\Naa'g>p-|W i endstream endobj 4214 0 obj << /Length 2440 /Filter /FlateDecode >> stream xYKϯh%60Vmv [6Ym!Hrwz֋z3Y`bX"*ypwF:{<)LJyIm?!4VG^ s]l _p33aBsw:a .grirO[nl7vg ӷؼ<dlmsCVg'*aM]pQy]VSL;c3CS4l>˦fRs/%w-ڂ?vyVohĉ?\|ƹiʪ"԰EİK\d}~_6"dQY4*uYG3-Rf6fD^*^exf?(5=,s4UEi5=?r^S2lMMݖ99|_8Pao/ff>9 tyj<`ŨM7iM^?e2ܝkzr;$3S#ݼteէ$+ۭn]m-E_"vQp1Zlo[nqوx\~ΉVZShLDݭW[lHmOQ# s` t3#3h4ಖ7U0=>x(!Z']9eՑ?,mUZ^t;Ǜﳚɀ 0 >VoUw+d@ ZAm׳0Pv7UJ@$uq ؕ)v}]0:`W``PؕnYpi 9WʟpWf di3lLTpJnYNJ 21N\d<) ZL߭ UJP`*z P!i/kfY: }H#Ǥ f~]"\rI"prui)s2˴ԘiV&98B B1Jkw5%pIOpJp "Xˁ2G-C/3?01P *dxS!].ygr*?d^TPVr1W*xf)uVbʎjLn8p_kճJ*!_w?dPDzo2=e)3M5[R^Eئ+ϴ}\,'z>cSAW. 6xM\ R_;`_kRdOB-;_]+x7KMyc3FJ\{ryyhkfl;dEI/msv2|W{n +.4\5:whoo%(@x'2hu.Qj/ )u G`z4]2 S% T?0V9AtK3{u).g{tcDZ*WׁJ)xSY*̀TnH^IdlԇwiYR8_BE3>W{P@r3gIS 5˙B}NB?XPh\X+2-H!*14re;ukurNQR]TW݀S5MV x+{BBVc"wb+ܒd& #RÉT6\,*J*0e410qCPfs9^ #bl& eKe% ׉rQ!|C`Jn%|[п9C\0C^<a1p{a9呼95hjD|1L9g/= -` s}kភzMo{mh XOҞ @p9NQ=>H8\NxGѣS'VⶕL5I؜ #pHJ*I7?2?r&۫M ރDmx!]\$7c_> stream xYݏܶ_Cxeg ?8@4h>$b%_}g8C}l7hnofpwڅ}uX Od>D*JvAuǾ;Ex6Jg*O4a5ǩBg!o;4s KK_\d2bivך3SھJMGE[ }9KiM{/11A$X$ԛ^toi?T*FZKgEuөsҜ}/֖(Ev8uEs7D-F?qh5+!~Ñ&uO {g1"ܱD1E3xu-Q 9u}u!E +9QU\24JwM~ C >^Iв.{~l*p0!G';U 8Qu94ELQ}/N w71탓L cbU.V #8HI8g' iP橯5_UJ0|,v zZ( l͖LHx\o? g=*5. ڋ.=@ P)8D$j(z  1궬}k_;uWgtR?1=Z{rsJB+IHhD5;lCOFш,/6\< n4-)lS.9S`u*}K.\[}c&AHk4g(-̲޵ؖ؝c@PKYW W 4^ Zl֮ f!]:A#X%)KB.ԬX-;h-j+FeЍǙPd 3a @vEhK _d#ҸX8+Ҹ%Gm<f:٪S\QJo4]._K׍_:6~0 ldw~@'&$7βHJ9{v@wkDA*ħQ@qy6qyLEˤ^7 CbLtùjbIϻCt} Җ3 2{ +]Q̯:~Lׯզ oUAuMzb)5& "6HT~1.ТetL$ 5>SﰤkIi<`rcvw ͋1BCli>iP` J *s B[܅r}ISbB~"8 e<ݿP®mdv_Q˪PŔ|g|!L*o%j_nbe @4N(3evFI fI 7O^b{_KW=Ƭ؇m\}v 2),X\Am^Y>Ra`^IU7?TC)N6Ij}?=FbQD,%l ZH+,Ҽn 4MC58WŴ*GmďPCx\O `Z}mxyݛC:^$U?_lnctWᏈ=j?KڻZdžVZcru|>ЦzDS%~] eŐ?_nRtI*%Oo~="9~Ӻ ϊu"xjvky[Fc4LV6xIWm6No2 endstream endobj 4256 0 obj << /Length 2685 /Filter /FlateDecode >> stream xڵZmo8_O{2P3")Rq{]lbM}-V+Y$7ád&vEQ")r83|Q³YxëW"}4ꌇ!Q|9gLn_ɣo)9*uPBnR؝i-| wr\Ľ;:%Xͅ bȞm~vcK3lmMvEE3`ѼJ+OZuַ*ֵi,,Q7l{WOrVY4 ۓzzd/, *ZִYl6BeѴz( *ܠ]ˌ)9(VF" iu]8I"A TŲ5ZFح, 0z{)Tms;,# 1!H$|oڍ͛?pK@(Wo"jʶt_mOa(Z2榤3V`?E{+B.׻_ą,9QZj~JB &+-^}K$V]D<yyܥ8;0¥;_,Q9^]XV6zztT9\I8l2xLf#*t>*D(Bps'I*bF頹uH`:NMr^QU{[g1T NpGWd 8(~(jPSpz5ʳ:?]q\3q7"Yh.7GޕD;!n21tFJi4UP G\l& :ZٺUXx 6+~i0ǫ+`)Mvo8}wwf{i}xЦJv+S:C׳TX,>I}aTe}2JJatTDݕ.N'xro\ %I?jE` ba)b#i|"'æ 2ne$i(+ %W q!Z3!C$, u ?AG+]cz8/D֡cH39$W?U~6c=v}b2+SL3.âG(8ޟ'3 飬H% eCYw&Λb;&hI6*apm!]&ZPizi\YCN]KĤ r5wDʕZyYKЋ #5}AѰZFOeOd)KFZ`zh #H.GZ0I!N: Ԑ7ƴ?g&`+;ndĄ6<_gۅ[a9Է~A;V~8>T$`h=wz_Tn\ݭ?Te͏}ߵهW[[nG%M/;d-| 6Ei_׭vuhZ"}Wc4Zǃvn .apQ %.zҏp.@UnBuR(S~g`] E߷ 7>k4}ј9@% _1aI`MJF|x|ބ0@B0\(XqF!2(L?N<@>00Vyl?oq#0@NK.Xa] u]24sAkI(1Zh&`MC"ٲTrʖo/_`}Y>3Bc~UљIHϺnM~<v6۽}7=Ad nŨK;2{(\ChAgK 54v5Dɐ{;.NBLtG!+ޏq'WbbEM֑,/Ǽ?ϏӣpHxw OOvI;!4 endstream endobj 4157 0 obj << /Type /ObjStm /N 100 /First 1003 /Length 2790 /Filter /FlateDecode >> stream xZn}W!ڗ@ 0$1ТZ--s9Ȣ$k(RRK ` [w9n9mPiE&j NX]>CU$E)к&sgNh8BG]:YG hr(o$a"B1+lX#(a 3¤.Dar|E )e*YZ.GJ6XbIJV m4@ 5Me@8GYw:r6iV<Ykfb$h踢T~aYaru?~__ᬙ_3lp /G7X x2j:ulcVcJ珡RX^,/U*KHVD2U"*LJ(`0r/,` (}&'9nXƋY*cs]vHYNG}f(y2BO7 P\.x+# {##ҙ}l<GfRՁx,E7/颻n=>fUi/\|ގ͉9y̠v$X۳3G4Iı \]?U Y2W<JhO0ň̑J;ь^R'Q9tfƣFN1OlG@o0k(W&L'kAyLdy4b<.t|m5 G'\a^ F>fv:·x#$4.j#烱-TuIaF5j@7q~axycpRpZ)+u`_Km+qٵxSMWnG]muh]c7Ʈ&D3}no`bi[%w\:=ݯ.)Y<DRےx2T3%*{(=LFzmL!=wlI?޼:gjɎȬ^lQ7 W y=iۯăwnB5r3b-**԰mkض 5[[W:3:s6lj>}`SM6ljwBיӲmHVd@5.gֿtI- y!FD|/'fz< U0ΆBv9-f10c )%spdٌ{ 4&'.wYI3;y=;TOg`Hlv63nOȌpxJYv`J~-Y=,vܳΐF͢yy* e -ޏ\c1tM;g?M!s$k"؉Y~nz6$&8ӕVlf*˴M"KB ʽ^Ul=O;įr8C3 nsBNg,O8rb(+[ g_VFbr]H=M@u$ɸ;}t3> stream xWKo8W6P3$>EER6[`evJW~OKN.#@4&7g8x{?m9 }/EiD#o"/&E~- ˤ3k凋 i4͔OB4fЎz3>1 z>+^\(P&ތ(I^^B'5,k+;=+n!>hm3Qr#ﺲhw&F!=+;^k *t[^He$>xQp‘Ь-+հ0W\l*מJ#`GW04j8AFI E8D)|y pցQ ]˰ua!L WeòVzJɚӁ׼`U<aꃟWP.6|wr jRNzҙ pq, InKQ) 'VʚB -HK;SjZ#Jq%Y9wN)1x@ӡv!U9)T&rR{=[[tD1иN&w5kDf"[:k7?=V4 T $pڏ'm68kn/zYVe++VÝ#sbN.5Ǽ=h۫{^mE dbF2 q0DF_t2rdB+կJ%@f=ӲI4v:wu| ᆖ\E>>TcVhȤxQ00WFVɊA#iG3ݖEߵ>nل rי`WiTAL*t$ |T_VO@gb̺?UC5GH?fɊ2اȏk8ʓ6VD۩a;E ɸ:kq{ ݊3l/ 3n/q{ ng=~?jjZ endstream endobj 4300 0 obj << /Length 2045 /Filter /FlateDecode >> stream xYKo8(UZ[C:=,YagulWbˮNg8wn]*HH)|u|/XE,Rnv+9S^B0MZ |5bU`\ 2}2G7o?ʭ.|GEvl>_fE~eJ1U*TX21 ?>QZh|;:%d*:][&`_NϬfG":bV['ɍu~q[pK-%ޖ%j^"nQ%䖅ܗAm\mYYmY^颀+/n[o*\u@5bU\f#Rĺh&/M3Q=3qS[܏ ~G:/ {j+|cEw9יk'% l9_,UGW;iOM ;&%#Qh"1k#z@'g<-2șDwE|q7F '8?2w0ㄍ`*uATE_QGե|C'0! ώX> @ ;0%@<0ƙ]b-)H̱q XТ [䮒uk^>6qz 0 reBn|<inE{89XN+.`vmFXta74m1UKRBDPUoPڥD~1"8ZfmTjIeLV22'xPM=8~"108`n`ؤ4ָ-ܜ-9H}T[JOG"qk1-=͌ M!}$`h椉RxưPeH, n 09̋^4JHV9) c9̲c!:@%N,Ca $,FQxʵ88k%QoV(Ma 6$P`v{HNƸ%mRcf𠑭ڄɶL3C3G=9vՈ,Ug4q!6B^C$>=$]YƢKd ?: Ɯ`W)vFӪIvhNV+Eu'X%vIaF3-pPA4>%a@meG飪3y=4/]7y=0zT0s?ݕUT` .`G;Bqڝg>DA7iGl}C7 . 0/mޛ{֞qh>^u.5f|ͳ;YQݳ<h{ wMΝk=}KkoP|2 'J@'Կ aP%Fv'Tʎ>Ď3v.gft*u1Z(?@ýWeThOzw)Bοlo_s ը^^VZQS 28/tW5e0= kߛl.;7MB/{7PR8ϩH^~DwZ^O7S6B&GЧ=1_ ==>\Ͻ-pQߡ'nYWJS2^kb@ s+8tU28zi CopD02% g$PEUN&]DзW:٩?}g'W~^,dʯAȈaxiR&?-s7~.)\\_&|;ς?|ig˕a:!WǙOVV endstream endobj 4307 0 obj << /Length 2590 /Filter /FlateDecode >> stream xڭYI0260VICIgJ YmedR {+iuX?Xr/Odu_(YA%*_WS OwtBK6#BHΗ2٪4e0Aŋu}8p/M< =elO}_{epP?+OB>mTukѱ^y\;ن, _|?e(Tҍws9^rmh y=R芲'$Rm6lAnxbE T([G)5=.Ecyx)l ;kB-֠Xeoq w$ ߲=PFq¤2-xm) >}h^h/qϛ 5ն5sm,E0̼8 'E6ͣ0 $׻ dIӂ67} 2ь4?YƦY`.}'U=gẶhtz7Iwu 4 ht9?0 )G(2`3q@>0ue'@1YՇՈ(d24lP\.\vsF9W鴅%|ӵF33$2,< svoHBǤA㵩j )o䄃!2{ei[P $|mf\=@{2!ԇk FCc۹r l;-Y )EL)'V Dž|@ ǡ&\Rθ[6Ke~9BLp~">]Pd+hڒ9`K@A&:K+@Ѡ6io!P4R>P=h^v!q%;i djֶB One&׀~s:Z ;Ctr/ZKpͧJs{.B )YZg?RJ܁  ؋k]"&JWTeTxQVK3\(X" RDBpJռؐDP} A\rm ѾkOy苗-#WtkKc*N3|&FiB% ;Cȍl|3uzz 9x,h&B ;c]ۿ|qAai[L$c^K$aL`n3ٛe3u=lϏ[: Ga6^RǓ@SȔP]|V 릦 F܆2DZHuP"5]*CDIOԃ %sOʾxXtc sUuo/ ^Q$&y2eGi,MVA^$H~p-?@Z*ZI6^YHc'rV1=wl[,ΩR Vkr澭?i"WTe{']ء#p"- Dx.td9$I!jW@o|[UvaԻWVÛ# <FKO$m6Bt>,0)'q+eAi,˦ jX!Ńl-$1` qo^=%SB l_||@>\ĕ@[R ܫ_hdeчd'Fge;$Wc|ޙ~5H1ň ZckW Y/0y |Nz/fgpIx|Mc#w 4 _dKͻslvכ{.G4:SbOwܧ}-tg%hwmۏ'`)1Nw yMρ^oV/Ÿz*~~LGWVnIvL|PXk/eFz$FH p`" &— z=g2=_':SJvPj9T 8'bL48xM4Cs:0zeW0f=#97KQS]gi  53 %1*}(׳ͫIf/x> stream xڽksܶT |uZi;MgΘ";V<ʇeC{׉gMƣX,žBjl vV<1#^uOt͚ eƜ .-JOK k?7>kv]r80tRVXussHtk?%vcly`v\bQs}mIܾ $*WmԤX!0z5i @nGRNc`#/8q^Gk>S| ~GS(PP*dr痵)Y:O:ZR̤HH?8iw;zɈlv+f*m fC5AKNj?/\@&Aq/xO4\yбӲVR|Qn!zbJtaw]CWBQc#=g\%G03q|y0Sha8 ș|I Z3p@+/D`ScIPɮ%.HR1Jbe6/vC!m{@=h/y:*b=rkg_SOU"x5FgvL.N6\V/4*xlVc')?A;>p0`[`< AWv-r Ylf/G.FQnQjX>h0@/ưxiywWzLE"4W3qF#~> $BAxB)$1̯!Uc | cujA)(qhƍZ^h',39TTCԘF[߃Wb:ٸ (mjaJ-A.>Vn֞n0eH$E@mYgס B\ u_vŁ.kpR HA`<0!IpnF9`wZbE\ZƊWT)]fȥAWTa5hC)rCmQHaV^R`֙JX#/"J+PNrZI]7r)fWE#QTU$Ἰ-q}oL*('^⟢D%|zdtD'$K/IJ;s@cﭒXmCnQIBfR;(<Cr &HG1@ׁ7^3vUڝ##'*>-Iʦ%Ъed /r֑v-A NtiQA0c}_ܥcן> stream xڽZ[ܶ~X#.T ?8Aܸm] Mh553T>ܨpwm x(<<;Q7zsy7Th&:LM~sGНԛ8< <fDhtD/̙oMf|eix7(Qin}=vDcJg+ZeTfk 70?P JW3[43aj (4)T5/WB}ߴ]΢M2YS/}6N 9)f ' Yu3T]Y,7IPԍ [е#mV\sUlU(mwPNN`GIhw㩩6 .& 'L2ePmiuvus? 1OOcr0ӵ=!@=?mDI(kK]/qw)J_ޖMɅYpWK3ʠq hʳ#'ޱUwkyi` ΢kJeV -̒p> /.&@K~"_(\ÿ9oҝ;C}K4 t l!ISn~/2|]]!cVg˕ XW|i5FeY^(clMbBR4F6/N#!k-Mo^ŏ{V7ò f"5篻7eap^:u^i wT6r~wBQ7l[% 3(f_\ͣ chq$>dn^ƙ ?r 7Rp|-'sZo*vd!(^ĤۆGg|Z@q,+$q_ud}Ey%VB/.ލJ0]OApva(\߶ 5Xj)m4`?{ߞN Dlj2p$AK9>L0瀉e:KoLea/B:s)eBt0(4T6,yI1b%5= $0$`j(Ss;Hը{2?9EȊl/\_cG'[bH }\qʵ RH|=H UNE%)R90E_y~W*kz=JMĹ&5',8;WtI#\Z,IoAP~`SFcW_7 ] /+:w\pH:I=x.~+B88 ^IQ Տj~jzϾV7TʌT+D`H'tp7rniB,̮[s@,eNRu|:|;UxYc7c9F0ks4iTu_W"FF̚'ʻ42#̝XdJdOϖږT2-I鼈^)^˭;!%#؃=po?95K #y!}QOs)l0 եX:|diχVTVKҳU:4Gi&sy ;d,[yֺ 8w_͛CDm ]K c[էuzsS4h~Ҍt3*z>x38y3]ল'4odʼ~V:flZ&Y|վ[_`3> stream xZo_!IF~/yER]q)Мq8E6THʎ{gH%Z0of~3]/|Gf)J%1.g$Y:X1g> 60M`"3]gh[z+ JJq 7p̾s<(U,#RQ'I> 4X`$͎H&RS`c$($:[Ux#B\4"(GDB@ǩdT(-Dc(Q|"Y,K z`{=ETIɏlo1RH1aVV"EDQӘgf<DπJc)DD2JGؾt1F_W" *LƼ>q8NW=t=2!oE?F9ƟHMI r'h77iCDi 4pɝH&)La$7(XHM4QI E[D愤(ՐGӢLC@LYzQa@DL0x):43ymn$*Y-:\v= R8xX&AD'q&6m*iV̖L s٦S-/VNfT4tNW d~"bI;N3<;'b^ٕkۭk}ެ݃NZUe} rRvК<lk&5,I3]5z?kUd^u{ MqcEwYQK[ťu**m< /v6μ4y>ښL -&$tilU3hMC1a(,d9( 8hb ro( 7˳*OV!%)!%$)AUnuݫF]^}6WE]LGI𞣟N{?0§.B{"c'<@at$T 6ؓd %lŃm{2:e㬞\L?DGntk䌆ERkit U̻MTvƂL %$]Dp{eIWua:ت\ڵyllS@WmLΑ`"2 7 {fh67!j LN_Q;4Y!/ݘwq#JN/m:<N &kC8*M}V$j9<0%tUsoؼ*uiK]L& ]l (ۼ,ۢwyvNJmYcU CJ>eKd 1!84(7C$(|YYYGe6Q<^ԅ٫3/5n6MhyחkȐcbyI Ⱥyq )E\GEMlQLYYYg۲`kSB:^kQ_Uy4*R뢿nV_uca: ]Ȋ\T%`b'i2jWqQ P;䅵%X i2Pێf]KvݻKbn|P](oցY}@cPK(Dt qm#-e}a%p-Lm9+4X26m.kjĵxwJ BK]laj"ZpN=K)ӃgH*.# 忊wWYEW E6kE[}z<iKMqa9 ~sui~pIxrV ܱіu7Mmǚ@#p1؍3/ d;o(zu\8mm Uttnݧ8^Z'YѶM{H}O͟G@pz`mCpۜ$\޴Bv,}%ǯ_Yg9xfuZ[ܵ}Ajڞv_UYm\.M.3PA8"}.2V> stream xZYܸ~%j`Cg l`-g䨥>U,RGg嵁TQu|5_ۻW7Xn%8g*!X?A{l~s&U$eF%ߛ[5_d> =/{8Y0YJlTAUv=>&mUo#x`ikePR׹"/[7 a ԛ {r=LGC]vZvznYCݚo˼76@np:TdiNg-܎]s0N(uEצ2SʹYiQ nD4->sv2"6&驲s=]:ŦE&X~{UX9q&xJO`"W u^ qxW_S“ j4L7cS躬qH]MkЦ?碀/]ヾ< ʃ]1{{e^N"6`}>P;V QumڧtZ *%eCTՎߠa޴9I2S^jV ;% vj4Qa}bu*;DlmXxn|\Rr;HD4TO`[z=\;AɃelM1;~O|\z8@~/2jiP@ضMRn -eAF m16`%ȑC뚺9Z<8ryghl0efs]l3z]V(ؿQƄ Χy/Y8U#`,&0-!E;Zڢ8YD-l[&u=M9޺XpVrqkleHYR &S Q-h!O_C]#wh5mWtTmsjd::h7_sitx'wr@ E ENw~6-dߖ0hW].]U'*Q j̈́,LF6e3LKB%UŞ* ˓%_fWo+ɘLY.*j~VbAhP[YЅ1+#@`=w!? e.QYVF<\Й% "bW Nj"/Z1dIĢ@]yOѧb~/^Xph %uB_.N,hL`ǪĔ*,)vL-d"E)Zw<=::bB@Y-*ۙuͭJۋGfA"LEj~B#=2Z'\ (BB`*V8r@ڝD؝1>~pQBK3:RKao%ڡwHDA=G l!3K`@uMagt#BTҞ!j6[NQ_;+!NT)*;L'G.%{hm<ٮmeuK<<]b3qa3 X+h1iH`MoHP^[ݟwx'k'DKc{!pxUHw%EXT %MDX.s6,ÉɑhAVة5>/EΔq9uN?Q|^A endstream endobj 4262 0 obj << /Type /ObjStm /N 100 /First 999 /Length 2565 /Filter /FlateDecode >> stream x[n9}Wqb7 `f. Iv7xG<S(H Q&OO]8#>$CUȆrR !:SQ ?wh$&:FL&w!":8FC.5M86IJ&&ź.RVT k@%)'t/v&3 䘫qB@BUWز+NpٔbJ}řICȔrTJNuYY>P5,j+@&w9yXbd޼1fǩ5y=W~]Η03S4o'5ޛѿժ`x\ԇ{m6SєȂO'sl{t<| am/D00pү`ѻC0woclZms6\}Yu첟eWp{DACzONP7:Gv\;8򸗨E0߄ SOyS ܂dnA2 Y\ڢ\̥\̥\Ґ1g湱X4K'Q)ژ^F`붴 OhCE|zk8؃8\o %8mK[8WH!gCN#"-W!2q< r+@- Rq89=88 'ohK8Mu?P.p@i琘I (VxPj\ Em N<qPV½>0KsRמJx=nK^B!?d((<1XTp`}J9BA'<{n!a%>hE9TK1_P_ /7dbXS#[݁] CDw:|C0q<ơ8(XKzHHdڈ gJjU6J"곴Q^w1cssy@]5.y̖/bypZ`-.oE^N8߂&Ȑ`p@VsڄbJ.f7vv9m-z"fESjpe`P`rχS-?'xooɳ=nky @_lGQQ/#^\ ͲY؄Ԅ܄uC,mfi3KYf6,mfmfmf^u*i.l5xrB)gm4KLyt,;L+VKIvDF3_vg.iERObYb>,HRW;4m |;tn`$بE7Eq=G¹` kÛjjKT 8=lz<  ٪SJTH+n_û\ McDVF-E'NjjX,8rp=a7FdXCۂY7鯇C,6! DMk:qwi07Wn<zؖ6SB7fW+N5 .}|U`j)@}騪J6 Iz'q\b+ć,s iw_z.ZkUN&y7/7;8r\5HkĻtr}TGв,rX?).)7 :  ,ɤ_ۧif}Pfj$Z9nxCsOBwe8= [[ yq VO#"%ݝd:=qX)?־;*=}7vys>?y> stream xڽZYs~ׯC*Hଔ,GXL!N*Xpw,kKk KRJUj0sun^|&. H" _E~?=Ey՛D9]UADԩoJ"O&V!QDG_^Gan6Q7ZC ˉ7KܛvǝǽA4eδe [z]ao5u?C@_dW}Kz#YU('1" ( Q?ATזX.@~8پzٓOID}ugzpvmZߢ6܂J3'*iD/nJg.6]ئ t︇h aeC*B=ޔtX| AʛmcMw(Q (Td͈𼙇 ,RyUz0]kvAd*'M⃭3F5/I`O)LzB.&a} [-\Ϭ}/\ӏCYC$"I*[vF\|I}f3糙]3g"=KTX: ij| [лk5dֿ@k9:KXFYOq>ď"J:s K `3:dҋ|WXƾ"nB=mQџk`S;?+;񩵀yp[}gИ=  ?d_I~_SGIԜW[1cȧOxWG6OOh^kۏZ:5Z Rf0 a"ݨӞ>D+x?™<l#{H{)Jo_7.6 lc g sbDIAi(N¿%|rzE` moUw@ХaGwWfzRfPa^Poaa׍L&gFq/. Q*Dt<=YK44n,Kޮ9.$7f zP)}®lMaH*cُӔhQe b`XW*8QG8~=#[9ze&ō<^mOBP6k)Q©(l-x$^^JKZemoh,oZ3$5H8<\dT(?/ :ߤp%-G nJynwnA*if'| #6"i#S_bA\ܦ<6c?<&`G9bI;X]WCb v}4]$U,1WD@xw a]L$mN>aQPEKes(snZ&_A^|H( tJV # W{j^aڭae(u9L#f݊1TL;τucMǚ@   O$Q~^.I \pRNj&8'yG[- '0bsy( :(9\WW1J"RA4]0H;()K(l]7>\rUIɓEPi-/'Egc_pPU8c5~ s\VPji a펛Z0X(9ġX1>$5RV䄁 x9;tx>PkŚ^_yΓ,`(/Ji/{1/ְ<񎇃=FlspW1}͔A}[4\(D5.ay$PᗄJC (:Ju`#ꖝNCY E {J l2܇pPYue8@#m5+ħk_j`\69a!3X#jn{wIOFvӲ=^ـDg# w @f{48IG񊍭b<_E~~8]'lOфm, d ޡF_tB6J,}6WmH~BoP`+ % YdyνO-0؃v._KNGR@'Fd1+U`yMp`=J D@;Re}'|-A;aUAa9ަa(tAkiep{,Ym jlXR~D* )tD0HNrtMO|6M>ؑȎ (L|XEK *voӗDg$s2ҙ^oABQs3!(_;H~'㋍$&V-"rd4`po/]c:{o9* -x^ ̴jlHIƴOg΋2[aAmX.z2A */pCJ,Ggdjߘt0ځ8E4 B9NO#]lg@3f8,@$2lhT"Oq4 [kWC:qReh`W߶_߼/ endstream endobj 4389 0 obj << /Length 3144 /Filter /FlateDecode >> stream xڵZKﯘS5$aAN֎Àw-4=j#J3;S/%6H$>E_;ynyՇ7wtrSeg7oTIJYR|o~sww|FY%0#$NM$oI^>aQdow̃ӚjS:0]=u{{諶{@gX{supo6n`b(o"\~Go 7܃JV= ؝ wΦrc/ ~k㎽=Z[}AOáp%QEGW VK M뉇Gڸ=_s6GuLo‰RZ+pI}׵塹Ҡ놪'5@'leA~b^Fa *KLda'0-$̔*ް4~Ft ۦQ|GaK]tSU()\Xh3-2(R7n$JFx2AR_i<n0p ρ^r~]kxNAjpiɺfxhh.z2 4t;h|/T;zB7Q<يuXGaoǮn@a×OȉALB >P`G#ICsvxjn8WcMKjI /}w RfgKx sP4iJ{ i` 4؇mp`Xx\v* ӫԆaYĞ,p^hC?yyN}%C>LN!2]of9Mn=hx⸿W t%rJTA('_y<%,']׌ `Wqo;gJ~wIwMB0M)ގZY^7| QUcZmP-'F-0qhTȰҤ0sWṟx`}e",x0꟫NeH]))…q 2  ‫I6M?&csq3O./0nyr}Yнe 7Ⱦ@-/]yD ~qL"`L,A'P،5{yaji}%KV CpŲQ5>r׳`K Fyʆ&DqNߊ? DMCƝG y?Lˡ1E[ %I )měj:Jld$S-H͎Gr(!R&Jr KVDuxZ١p}{Dw  h. d ={R4%NkFM/Ϧ1:t=F0;vwUpA "T1ܩ+96;ےYd:lZ4"`[)L I'WDRh^x:B/PGVsr3:O94q֫DrsmȰ c-;qg?(P,Ait>3ËoX~h@0fs( G=`GOn># {w|5@vpP;)j޺G ^LwA$'g'5-󸇀 o[8΍1 ^RO0VG+K[w­|„mƾ8@U-Sp8Рw*$tsc#ߎs:*wh ?"a:w8|dpT*_Q|@DU.-_kG%1PI q…jiʇWlsMz.KLtc^CDϠ9v5nARۇ-XG :\:؍ӯ^MZsQ%lJHH$s4> S4/d[4 7g $,3AUN]bq>O15/"̤iWlI!jY}]]qeO+"ïp s nz84K}qwzwZk(QX6"Q5/L/8zy^ɳP,9K3M+ L!)w(sO;YA>7Ve!N1=\ǭ4ĤW/:D[Y@iˉ통AtQ,Q$ke`PvQ$ 8RڛXY&w`4p3ܧTQi(_(d endstream endobj 4395 0 obj << /Length 3126 /Filter /FlateDecode >> stream xڽZrH}WiFu I{|8$llKj:Gɬ,hh!/jP',vWo9I4 g,*91ljLOW'P^8}hf<8]ILJ0(..D4Cz吊L‹r+Coî=xGtB_p+p9U3fлr{y{y9ZR<11"F& HK] W*PјXrZ{U .oltM6Z]M_ nS$ti}Xa&7nx$Dr U2|zT=h_!Y ,g:x%xڶ z[:Bm )Y>LJiOӤD))ȁf?>JZ f` Ѱ6]O>A4-ϋ|U->J75] fX|shjIa@*/~JN)%1)CxE s[H(("9 /> iVXj)U& >}.Ho2II4ϥtR=JAj|BkH|64-dʃKWoԉ9adιaqzKVq?6Qr[L³;Z'˰ }/Z@&)p嶭0sH LhHwwߺZ^fuK neK g+T`H^V0w#g?MZX>T# 08NDl@[Ohc2p v Uw3C^qH%<ܘa}{9Dp0lT't|9ag! \I:6TuyulW]"Iga Y seC Yo1]xR>08I0np'r/ JКTZQLu4PnL(([Tr~' MhѨȪUAZ\'(++98˱: ,vX ;S^=2Bo{2jEx*cUk_ЗT,&Y31a12ہ(tmjb( ux>C%TNAATIḖ̲}E;\v/~ /3TOB˄$~/^dC eH gD*˗q5|M]]_zBAOPQa Q챁㽑EL'ڂMnՈԗ`HEo05u^m6{mHWo{xŠSbUّr\RW%(f}\`o:Vbm R:)W#[F6h +[EUQn7U> M /}8e.Sk& !%`rW z/, E/X^ju iU$X I_@.&fa`MIb\mgA` 5f fC gԃ`sE'q`ه:d 4>)YZ LİtT0nU̗C!,"cF:>JSFX$(WZf]n!贿WX _|X}!ާ;@pq49F˾;, x35be_tcb=]hRtliNm.'Q?p?W endstream endobj 4403 0 obj << /Length 1591 /Filter /FlateDecode >> stream xXK606E+k-%JԇHYH-6QIT%CO^oMhٻnxQA^0[:hv~ʌuܿ}-{8 e3E]_]!P,%šy,FXsY79oȵl\п쿷a ]uC{NƤ%3%9H}?_DYp\7KbAםE6':u  ,E lLF~TyV|pVʥ~|Ճ";Kv~#]Iw? ~NI ri=) ʯ$O.:!g%6^ k drfJBO /`V/7u (^D aL"Չ<ʴ8^!!ܔԋ/ ! */S"^|'%43zVz.ݩjy,[Ԯ]IHkmd!(h ꧅i%ŗQRr?~1BJ25r5[H!+^[Yҍ&1ÆHcu 3єӺj-h?wkY^b[9QEA"x݇ks SaF&uj4J)tÇOke!t`}3+SR-Ku}q$Zy#:IJ:At l(~qΜՕdt"m4B< qCPQ6f 0UuQ7OF9֒6P;qmwRTA;ج(6h5;d6uLSsjSҿjnj^iK"Qޑk>Ui#ȯLx@!d]+:1+)t'ˁB>΅ *^<тv͗Ƹ*h6Ge?@VJ}zcg巳ex5vg$1"ܵ]tv}c6)NeFRA\FΞ9:TfxON65)r2rq|I`\сzb_z"aSǗ!v/@N?D_GfI7 O) FQhx XAktMq+#=qeg;I<\Bi&b:VS+bx(:_|sv)s4g5О у!u[|ohpՋHtAL-NJ"W)McDIxGJ8+QҪZ̡Jj&*`B3t;M&ys/ endstream endobj 4410 0 obj << /Length 1078 /Filter /FlateDecode >> stream xXs6`i2 ,a[.ILd Wҿ0`nvH>{k,"' 恵Z:9ujq_ouC>D T%/\+/%lZĞ/"ht\ ׽z6Lb5$Qp@v&\I!qpaғ ?D^W1fž!/!+-LS /gj XYF :ҁw9g$UsTpNfh0,W)'h ' )MK"s`'K(m 9@:BgE:?j߉ 'T)R۫7B= BR\ S$>_tYU.ϸ´K9. RrP{J9%h{73 ˔G8?ݛڰX2?Q<[WO03^j2c~6ܝub`NVmvT:'0!*uIb!GMQvZT,SH5RX`HGIֽkLk*bhW+UM.wtNyjLi1-.8 ?"U 26i'}k,PwNHsHw `C&*Ly' pO_QWGf 7q;枰C̦sٛl9pno|g$:yFzf^Q }C:Y=u:C㚈g[#bXc?FI8kQtuS)1ۺ* }T {[ٽi*A I:7T'0O*<ԑW%#폼:>GW{aCiu̸7Hܖ=E}Ϫ^4ͦ>ۢi~6&_o/V6n-zLa~0<"w&E8,V3+O MYQiM~rX>]s PeV[l-|?}{ L??l&^E8f[_^E endstream endobj 4414 0 obj << /Length 1332 /Filter /FlateDecode >> stream xڽWKs6WpX0@Цq't($HPb‡ʇK"%ږ'u/X,v} ;k;~^ήo}(YfQ8!!(L^-<]xM6j= 2]D奾FK*Mw lY&մNEaebߚ+ڱ6M?b;jS2$tFm^zڤvdZQ'm^̘y&|Z̓y wfՍޔ.Fy"UŠ1;e䍵hq4!2c\CP{ḑ,g,߾m[λvnB(Džň$BfA|ݗ* C+ }ʾrW%yyDp_@e"Q˩KȱQ<V]LsAOFu!8\p bیӞ T ,4{[WM4捼ZkM s9 !dZm]kPwy.3mfiݧ& H 4|?Ԣ^)5l%w(r'\ڒkvyM#l/R}Ʒ"*$FlRBf';$Q|T}Úc`Ͷ,vLcAF L}ѵSX#5MWH }6ij_)0!R0G"13z:ʿHþgl.(h/kd3 endstream endobj 4423 0 obj << /Length 2330 /Filter /FlateDecode >> stream xَܸ}I7IJjI!g8NaK::v>U,Rרp@ģX*-wWz}ƛyܜ6sM,;& d`$1Y>$mW]`}pH齌a1 q񁥉ݝnv{jv"4GB_tL^ w.d ;$tNũ؞*L],kki+xV`tCAqٹn;SOE;FUgND%|CY@^=nELVsnd{TqoA\R4mU2ݾrִȯZ+Zw!3! ᖩ@f'@qib%poQJ1:7x*hoURKX͔ M/`=v܀&RR0d;n!$A,H+Xg0@ggEhŁOQom6MH=R^41n_am?ݙBӪ}?{Fm:DDU^_= BX!},P!AcX4I}d g zE\.4gv8tP-n!sA՗G`ڹ!i_UwzykWwhxHsfR6PꖖFX3sӓCqnt ˣqAg`飪'`J8kG%Іt$H{L[+4ڪ}޲1o߬,}DܑN;a/ªV,,Rc4OXb,0I@9=a80d>THWP.%d(`8֚,cqCKB)%bJXyZ$dO;E1m%a)]ط<NxHyv:]KWӦ)ύ{ML!i0w us?;_I4twW4d`1̸enդ\Qa~hˀ k|{]\B;J%T$#~²9wu!<"qVkG>mж }wb~?Tqd/3г>[Kh2]|qh/u;V <$.<"9<\Y#D#(D+g+W\2!a.LWTtV?[jLD3#f ,SE bbZ>i;xWPXUEFqF*+?,*y(},ЗDXz~+Ryp1`R.]u6US*WT7M\B"R}K#2߄8Eeu9T>y^l2ÊwұpŢyH&L/`?_rJrzc{F'i OzOo+@jJ?Eo`#竈>,*a[N$* BۤiL.4k1`~!APt ZF~!w4fUnD5 (E"HYz`zA=%NҒrcVڳ  @z2q.^bK-{t$P H*ݨխ}$([TU$ [ !ᘴ>Ioʬ m:l>Mhl+!~ S?PX Fˉ-hѶT`BҴnGs)PKWao-Mٗ0\KQ242J]+hUQ`&s +sFRe"dDT1!{EXDַE\R~k-q?+rW;g 4V1}LUk4FemjM7Vf 6?^ ߙ]I` ot==w 56pVWaW&Vjg|6ef0 ٹ.<C 2l# ./zh&~KGvn[F~ߪ쯦l+?H9RE@*{^L;*E*_eV` H8VqzNغn꺣%GoJ&C߅p(*XrU(> stream xڵYKsW褀UpJbkekˑa7#zק{z@4D[WOׯ!?=Gyf!RG)KGW7GsB} J߃Iln﫳Ň0Pfq~n)NzGs'n\ưH43Qؙ35LUPcͽ58#aPЌ|:\4ݲ.Z6!.ma@V8oeVўKΣaM $A3m5uMYpGĔFɟ ҷ[jh6ϪlٜHy]u^Djp5墴N*ܠTnLʺ0+s?r gf${{Ӑh*<\׭ݔ g 8q\zTCLng:+=MT0FCQQKdo@Y rœݜd+ `lֳH =BAl|hCaUs7`\0e TpJ "ヤu{4Mݶ- :pFLB U4dTdpF/b 0Pd]6DP)W6r9'$#9dT轡wA\X`<"p x-彖dBLyWi&y4YH;4xJDD[ 8dBrJ1SrfaG.WR1 b{Y _083(S+ReV^UzD>W~1p6g>$6(>puGpWCENz7$o,dbY5~ !N Ƃ A%HA5` :? Uh cC_CFsOry3o{Rۧ9OtN _oubIc#UMAX=W#Ü g؊Fq%TS`'YSz@>C9e֔Yn6}Çéu(P?՟9ZS ^BRJɠW11t:yLY. I ` ys\4ʛ\NA&-_^/Lz'utY_ws`>?CFCY{rD/%gXCSa1I2걆{[hg8r rNNɾ̦} ۸J0gqQS5wz[dʁ"5I_zjƚ<=旊a(e>4]lVX*WYR{˔^eayjl9RV}‍aMcVP4; Q/+p'}bB?o#:tɟBp??t#!.gφ\ y L{pajMNْ'W(/\:,N?ñaLPRMu.D/^!̄6b<}2(5Шs ';r5Gj ̥oAVm= Y~ Yf;2 T{-%z,ם5rcxPy =z?NM endstream endobj 4440 0 obj << /Length 958 /Filter /FlateDecode >> stream xWmo6_!85C^Kd@K D'B(%_dKlwZKDsG=`ޯUy JB?k`hz!(̼sJ˻廋9mEa &E!3. v@2bf~F^~bFp5Oz֕,,UZnd#Į2qP`OvVvHBQ0?c1  1J.ft4 |(Ұ8%)Q炷6\2Ō<]Y]P*Pe|_٥slf/bgG'WuJv=\ӚPCvWLqn2=\aHx1(̅X49r ү6,}du--S^8$ ])_cW\8ԙT5߯Ov1O fc.LgC>YWܽ.+h$I (_wZ V8R(Hդ}+=sӲ*gegIڠɕU?:ԩ,nwݿ7Cm6YHmDn0M3mI]iS»LE24>}}ږ艐cd^)M7X Qߜ<| vWm@mT[/$nš/ָNOD}E:.:S/^GǓj}O.=alWD#/-&w%$G4'5ZK'7͈Ls/a`;`&>Jx YNX7x[ pV@E; Q`ïG~_[W :)4z0!F# endstream endobj 4446 0 obj << /Length 2441 /Filter /FlateDecode >> stream xڭY[~_aL\Q!mhPEw@dʒοQv֙ yC\sH{[͟߼UȏV}'cXt!犿v]H8]?4V]/oq6񝮳jCo]kNn'vkeFމYCT+XWNiV4&țI0\STGw]l^ddP$)z裨+&/Smm[J\-Yc}nglI[~$qn1B ?e塪A6˪~-JB}>mWĩ.#Dҡfq灸%a,18 l{PhCd8Սh" 6pP` '+ӿܧ(۷/D,&xp*? (hT8{,@LOptn yOu>YWP6E\ CuWm4,Y{ wVD91ӚGKͳZE(ئS?{sd5qRW{ ZR? 4& Du`X`#βօD?x|aD+ůN>08_2ߏv0܆X$́=*[)\.\=Ei+Nb9}hIs^?eɽXpRHgL*;2meubDI/M &zrdLjVn Ҡ@HñŹI-ʳ^d=7q| MfDjk1g*s,zhX?^;"[3tM*ٳ78\Eug_?sL?D\(P!-C%29hGQ"?] 4 p(0|Gӄ}ڦ=]<0X}5bm (Gm?da$= yhȱL&rĂuYAp0&\.k)NjuSt3bJk%n/fgp%?uUy /xwGd3oP5'zhZl=n L!O]Lp0wtqw=_:faf3u2wCfƥ}Ang\SmH lG?k eO9e!'\8=*ww ~iV]] iST z -"ˬazz'Au .c͇7k.cG!5<%\m8[_"1C*Ț'>[<ީ >G1[uTxq~ IJ^z&~0]ggHL`'1Ƚ`jX*y~!3Q@F~>g> stream x]}q/5O z"&h hr*[]Jm~}gHʦdz{w-bQh83.tnz'"#YQJL1lqS,~I_o~Њ" ԗ-!`}%߬Iᾬg"io)t}-mm\=Lڮvӎ;ܴuՔ{ٗT(]Yj*l۾}Y" )Yu*yH"nkZ׸I=zr%I-JNJ|owuyf[exQ'!,ws]vU"e`|w̍c|EQ(>h]'yYfiVuOT% O' >fS ZC1=sZ '|c#0}FZxd|o!j)#T~T´cj<=)އ*vu/u/TcYWm[8Bh@E&edPmsOLhW dG<,!YJrK^U4\q(~DhIȵ >5<j I0)" GH)ɴ04b!Hʼhxd/M,(Pd,(u4T¹OݷV^z٩88=- HFJMԈnLrߖaFy O2+$MTf[7= & :ra䃇ouk}F%mHԬ6:q aoueobڢ@* Xhf%}O5g$ %K_R鋢IO~"M% gr0jO$ ޺7^Ms$]pWNEJmr]9~ʞ:")ș<#zl;8 c<>7k+&{%">~X,,; c^U$dZ=O9AYu除?AjI G/yj D5fWfrA˱E(14H$FYTTLDsnJH^lLϐl.!> stream xX[o6~$c;}֬Ai[,y4ɒM7 Ds%1N N~\|Z\] h%j2Q tZ'T?W?_]s53-9$ĉ8n 0 IH^pf~ E8w- =K~h}l8#t&:RDx&дz̨-rIpzoCS/G/Ͷ6Lz`ZOT/OD,A 4Ͻ\[.Ӈ*smIOӶjkٜ7MU5K*҇s7wOiH2JѠB]GY(C80X1#9@d-wɵsrL \<^YXB.tqLur!p"V,0:(P#*`B,<}?a"Bb☘`"#H:`F'm#@vX_4G\" @!T͒tM}H{;7#S-:_^蜩 :;p?{FE9+?qBD#7 !#=4j6vK|ekxJ8FvR)K=0wROޡKO<v^"}G}i\\ BF8;r]3Ô*=X[);O*kWig@}Ym8mK@pⴠG'X3\=%Wtm υny`)_<i~Pqc1p)'IKދz 7uv*3gČ q>-{N5E*]y?cD=Nٽ Й#dJh7MYRԶW,?zZX(?lpEfQη ӺF`NԐҽۻ6nLhîf;XT{jU5\OjSoz Ȑc|Ra= 춱7)u;+$Q;+ȡO=΋֖]Ovɮr.m" G^h'/hi endstream endobj 4452 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-beta.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4464 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4465 0 R/F6 4466 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4467 0 R >>>> /Length 22833 /Filter /FlateDecode >> stream xluO7Ҩ ˀPU!aP,UR*W3"w5Hr弌/f__}4,?*+?}ӷ_ïo?oҿǷۯw_e_ǷYͩS_tdzSxq[jV󶚷պm~Z!O!}߲ieUmUnz[۪VjUm5nq[jVZպ{y~߲ieUmUnz[۪VjUm5nq[jVZպ{}~9?[3}W_ ~5ׂ_ ~-׃~#7 ~+F{w9Omݠ[З_η[~%W_ ~5ׂ_ ~=7~#7 ~+џzۯߥSw=nA_~%~o}W_ ~5ׂ_ ~-׃~#7 ~+xw=O]}|W_ ~%W_ ~-׃_~#7 ~3ෂ߻z^J+jkzoFoV{c<Aϧ~Ͽs==A~%W_ ~5ׂ_ ~=7~#7 ~+1]S9נoJ+ZkzofoXOGt{kз_ ~%W_ ~-ׂ_~=7~3ෂ ~z~#w}5ۯJZkFof[o}̚nA?y{]η[oJZkzofouؖ;݂~})W_ ~5W_ ~-׃_~#7 ~3෮[ύ/;݂J+jkzofoֵ[t ?\tLoJZkzofou;݂~})W_ ~5W_ ~-׃_~#7 ~3෮[/;݂J+jkzofoֵ[t ?\[з_ ~%W_ ~-׃_~=7 ~3ෂߺs{nA?{]t +jzFoV[~n}ϿS-p}ϿKЗ{nA~%W_ ~5ׂ_~=7~37 ~ϭw w {ϿS-ۯjZFof[o]=A=A_~w}W_ ~5ׂ_ ~=׃~#7 ~+k?9s5=AoJZkzofou;݃~|}9W_ ~5W_ ~-׃_~#7 ~3෮{נg}W_ ~5ׂ_ ~=׃~#7 ~+k?9s5=AoJZkzofou;݃~|}9W_ ~5W_ ~-׃_~#7 ~3෮{נ/;݃J+jkzofoֵt?{з_ ~%W_ ~-׃_~=7 ~3ෂߺs{A?{>^t+jzFoV[~n{Ͽs=p{kЗ{A~%W_ ~5ׂ_~=7~37 ~mw ?fM/}W_ ~57FtrOt|}&j8nug߭?wmw?ȇӷ_e_?Wvtzs O?~?`G_hM nѶ=_ӟw\AQ}'<_Oh\':!& 1ɝfIn7CLrcbuBL2[ 1NI.7CL2\ 1s)$g֛!&9 1v)$'֛!&9 1Iy)$y/넘z3$-֛!&/z3$M֛!&0$_Kz4Xo$b8SI7CLRcb8SIj7CLReb8SIrVuBLNI*7CLc 1IfIJ7CLGd 1Ibb{7BL栌!&Qo@lFi!&z#"3qlnYo9Ï3;_cbbF/.NU!&6{EbAݠQoXӨ7BL*XBLLޘFbbӟ!&QoXoӨ7BL*XBLWFi!&_X/Ө7BLlh::b*A1^FbbFI!4,PoFbb:E]f7BLL1Fi!&QoFi!&Qo, FIR}MQoFbbF}QoFbb!&^Ш7n\v[BbbFi!&vCbbF6QoFbRk1r^Ш7BL^4Y7Yog)Q3z#4ꍅrY[zA{bbz3Ĵכ!&fi7CLL{wCLgӵSkhnbb:ojx!&!&S6CLLAm~m 11uN&0 > :e 1a^oذ^o0Ө7BLL1aX F QoFbbF QoL L1)`-^ 11z#4611z#41)V|)Ħ57BLL1i?BL|ڔ4vUz#ĄӶBL|Z7o04&11z#Ħ7BLL1)54Fi!&6-Fbb|"oFMQoCLlڎz#4Ө7BLl`ez#4[f@FٟR-S11z#ė9U!&LRݯ!&\fy)ėaq_ F-11]*z#ĤdbbF-K11z#ĖPoFbbFI1Ĵo0ĖPoFbbF-11z#4[6~Fbbˎ7BLLM=QofnШ7BL,R/vz#ė]uFbbF-1)Yy)ĖQoFbbuPn̨7BLL1ek`i!&K!& z#4[VGbbFi`-ӣ11z#Ĥ$bb7BLL1m!&Qo6D~6E^hcFb$mA/z#4aK!&Qo6ꍍ-Ө7BLL1m#!&Qo6BLL1m+7BLl FiӨ7BLl FI!tj6Ө7BLl[FSi!&ibے7bۢ~bۦzB1mVѦQoض-=*7bbFm11z#$#Ө7BLL1mj!&QoLmF龩i!&-/FbbFmӣ11z#Ķ_!&QoQ bmzB16FݡQox_xâШ7GLL~Fi!&ֆz#4Ө7BLFIeR11zcatn,!&QoX  o{IBLm/BLQ_gbk1y΢Ш7BLL1 ai!&Vz#t ^ֶz#$bbFA11z#ڪޯM11z#ڴ 11f}ybb^oXכ!& 1`_ 11f%jx!&W'twqoWPoxަ?Qo8^cc>_8^c閨7כ!&fi7CLM^oCL7bbFq11z#4k EbbF11z#R~ 1V z#b;FbmB!&s~ߘmF#i!&z#$cӨ7BLFi!&QoX2ӥQ_gb1{)4kFbbFe11~*kFb+k 1q!&QoLԽBh:YikxgNx'SMh"4Lh\`D dBo׍DB㕫ф+i׎Baix- 4^3%="Hxa;,#4nkC%#;"HO@u4nk}Qhz=~ ?Aƀk߂V@ Z(ׇ zmmq xB㶗p5oϛ ?@㾗ͯqSиHZ h4E='44^)Mhچ sBݾ;{}ThܛĥOP%h^|@7 HZtJhbиnUD h Խ) qoN[ q+_7@ޔxNhܚy&~; 4!?nZ/ex /и5Qи5I и5EhtDn~U!ƭ %@Դij"4MLEh@EƝIWh4@d$ {Ѣv?/E0ƭ q& iSHqӸ[Sߟڝ nCoiTEhܚrp4^@`M7/ZT#4^&L~~7_৐!@4#HB#dи54~b[LO qoz&1;;4!;qkRи7>?AƝ]ֲ|_{ fݰ|} K| '4OHqgݏF[7@&4U ;<74/qk xƌ%hܚMA)Eh_FMޥG lx>@1;Pt_xi 4eTW"2;t7ޏ4YC@㦓 qcSqZ4n_htD~ mt[mBzB(6y@h4Iۖ Mhڡqo$$ޡ i;}cU4v?67Mȼo*4n:t 4Hh~vhtT~σƽ~ ~TAYMJmƽAmMݑ+jڰ*|X?XcPCCM ~qobiz?AÀ3z _mEï z4n OA~E/ ]mSޡqoZ4n`|OӴ@h4-AErv36MB/mи7-nc(qoZXvD>}4n:'jx?&4n:=qF AqmA)Ѐjs4nw "4nO@3 Mw%hܛWBO+@6o~C~]P_>$^7xV"\Dh<@L3t}@!7ݤ't~HC4n3.D[ق'4 -@6Oh< 4eqo=PBq4Mx cq_ϡq[Fۂ+q<2 и隨't¿/ h7t$~geEݡ}4Mx?nu7 и/ "w%=~/]xAhܛ:wh и7y;m˼zB4‰Ch4~4ehsY%h4!eW A~{ӈ 5q_Fw x|иi~eD~ E4nG@t[ hܛP{D: {SJ^е8L wh< @mи)htqSzABo˂?h4G}'Q"(~?m#иo#wh4!|iLhܛj*uD qoи!|@4n@и7~/qoU 8^s:Pvߐ?Țhܷ2Kݯ R|&Dht{@{h4@ -%4nπƽSh|^MAxиm*/cpB9 hܛ65иi>_ 4mqC#Eh<@թ,|BV{Ԡ~4n@Yܘmp@d:gjиoMa #';&T{+✚b9Ŏ/bڋ>@ڋ{dսLD{mՕV:^l= 48hF+\ހϒ ֦Vvًfɀe/L^E`g[DHd/Z^Ad]{6oFc/f^'|{qM a+Z>(@kux1>D:tZgDOZV`^K(Eb0%k5^JuP~B׋ۉd"\/&ē^KqJ+ {[%ڹ !`Eb4#Q>5p19ŘgbKgP`_S=i $V'jn@z-^퀫ۺV%9 ?"  Vů꓂z) # 2ՋqD%`@RxꥹpDG{{^$#R{;IR/s^Sd0ņ&R@[Cz1R"3֤OYhK@O/^#;-N!SvY%JX j)a5'^SD䒕X\".Pn+Ye% +rЙVxPҋOVr?e.FoI+CV唕$*~%+w#}Rh~HO92JV#$SVYI[J|s|.+~jVVM`se~E#w )i7Jr?eeUoz[۪VUm5d%y>唕d~u[#e~~i)-/r[۪VUmo~[jV㶚ռmn+oHr >V~v˧i凼mUnr[_=8me=Oʇ'&/ʶ{EԠA@+uie#} Z7he^'o/ʾ?D+ƾO@+{jslm 8y'_"}V>hn-AܾD+Ƒ$heo:\A+[sO!VllF&ZsmvZ4S@+[ەO9A+[ 'mOXeoׄ{0chCJ`YWy+Xv*:S-03]t he-u }ACHh&/9A+{ yK5heGmn%ie~M_KJ֯:&qjS׿|FFx٭ZyLI "H+[S3A+[4iZ_&jtV6Mطy&6#-H+[tW{)EZ4_I+ #V} Gl{G@j[:it˃V6G{O#ieӤVv@4rjl|whe0D'w?H+;=P7h)_t\m옰C'F6y7i#H+;ww r֑ M~l@+s"zݿ4ʄ^=SֽOzieӤ{"I+g@Z9#A+{7~ߩ4.M6ì#H+[>nZ١4}A~V۳Cr4_{P_g4ie&uArA+ultheAfF/i~@+gVn}п~UOt+he|A qAZٻ΢ͯ = ZٻU6V:A+{w>=rpR,@+e%zy@:w;R/s#+x$ie ޝzO;oO4V:ҁ}{~N+'4i[y~em3#E+ktV>4:ih#I+g'Vκ^I+띴&3# E+&rn|wʙfuDh̎3:RE+gFV>4=iWʙfhkʹBZ91 Z9gȠ3#yE+g'I+[Yiuh,rN:rfĿh̖YGԓVNVNVNVNUtd:iZ9)"5E+'v?VNlC Z9#t$!ih-Fʉ[ԑE~Y2~CG`V> mAZ9 ՠ#E+#DI+'~VN]GTVNlDwVN:rz)?kS ZY#E+'3VN:rCZ95QGr".ZS Z9N$E*Z9HnIG V>GVNI+ʇ HTIyG,ob6@7"9, rÛ Ćwe[i5а"3odxkLbx+00͗x}ΰUZxgVOVQ͸i›'ANx%b;: >'i]de\&!L+ܯ$Њn9AV<ҊUWZ "h\XVjPGB8 k&NZFg1āho[69۷y6QOB{deUnr[۪VjUmo~[jV󶚷ռm%ΗR!{pG[^V*UmUov[۪Vոm5oy[jVbW?dŮ]M7o 6lmUnz[۪VjUmo|7uD;x/?z{$zjkȾzqs 9o뜯u3f 0 _ k/75z×_:Aג+O _ | DCpLխ  w3|*~լ 1A 1+7פ/̂z;€=|=}%&)Vzt+VjRF|!6PU|zߴ}8zjN$u*I]t[׼oZ盘&7M盦bpIM\M<;[So'C7 q|N$tqJA:9#rI"M]08ŭqE7Vrp|Sg 7#TtqooE75֟on&q||S%G@W)|ϲ盪%? W}|S 7 7q 9 rZC7&q|,o&qk|;L&q|SO7{Io8Ė$qvq)sXrA|NI'>jqY|958_p{_f !߽:u w+|iÍVsn#{uMW d{79n愒:V`^z oz[Gl2 ӻ:D^a5pnܙ ū~nBFy };*w!9<%noһ'ޯ' "_ <6nWvv~nfw J+\i:x]􍽄n=HZWuo[(Pݭ$Xq<> 9@ '`]wmG@wwswmx֯,\ݝc[?@sՒN2w+`3\uU>s:\5gӁ $hݙ{Ά=Ů:;uY@rv3,,ЊTjH@k+dfdvU9cݤIZEVGn=aa# )r'T%"X⯂s !*8⯂s~U#s !*8bV}9P:vg(KD_+!i:J4, z&LI+VK4l%0"ʼnꔴ[XxN!_r}*ކ둙Vm !zd"j>d#&FI+а]V*)2x)+ѰCE+P1_* CQ=_r|sT#*x*Bn*SJ햰O!SNZ0!C_wm\IU#$GYIq;_r=eUOe%ILrJr<唕xvKVc<$)6C$)eEu\=GYIl&x.+vռm5ou[-"H!zG[^V*UmUov[۪Vո=r=#tR =H'O#c9R둰7o_U\O)HZ;zdzJ<1WknI+) wn g =V~mvKZ둴++Hr=_3xʃvˋY7 NyrO௒dV~)a5[jj=V~1_byWJu+并_cn5>+ 0<@W+W]7 @\"5ை8+E_pW& E9 [d7cnpMOU!K"B0t-'Et18"s9s(i']<1-OU![cn6r-x^ss-rL1*sТs-T*lYbJu!:b*\ e_&kn_yW. ZW :W.* eW.H W ]:_ӊgZ\A+;it'xO]Ȿ_t~0W? eHW.e BW W. eΔW& _/{_% _%'6W&v 厉WnWeW <ʔ+n> ;S_*\_#&k_&;q_I妞W v׍ y$n_^GU;_=Ju'cnfUWՆ/W w3h&]i&ufG1W?&ylWv?WU#םXjQ 3?v௛_Iug~ˀndݙ,WEUݙ;tJ_w9~lL_73nU WUWξ*N.*.&%⯻`_JUW_լEuL ⯻ NZ_7O"$ e_FuW~qnA_nGu"#+B⯛Wu*ݕ/f&=#U*w67%u7 SͣnCIuw ]k:~[z"o!_LU_ u!' uOU:1?ͽ'JB'55*IWŀ%u30zB{2I:+274~,(Xgs A-`Mmf<$T@av?3A4#Nt yX?P߬ ~Wݵy~Oߒ0Co+}V6W[r|ϛ4˦חQosğT~<{DF|?t8h<G<#4|y=?>__?W2Sy66_eX6g{ڗz6yoyă'~_]܅WywxRs(Umhty6WټI/y].|S<T> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4473 0 obj << /Length 888 /Filter /FlateDecode >> stream xڅVM8h6`)DIvo+m ;-cC4RxU.7I. I>O_xH,,ON %3'8g295ɿ18Ey &U4<(,HB ,',~((bT(xʓ?ͪl2SDђaNbNԠ>Ǭȵ*X=}mVC S} g"L  !,hX܌;ɶrNJWQfyLtHB=߉ ;V. >cn@˂ 0 \ҏf4OVF# }h8f FY=29ph)QW6 /NHVW}iToT"Нvеw)IEL5 _o)CW K`PrP]ŷSvgJ Rni%m 1@> /ExtGState << >>/ColorSpace << /sRGB 4478 0 R >>>> /Length 23468 /Filter /FlateDecode >> stream xM-UW4ROD؎ lG b$w}2sw{wWWW'Wӏ}~c)}?o?O~-}_>~'[~?~H-oS{v;=d,2 eUZ&RU*nUoVGvA Vꐍ]l&9䠴ϼ AIj]YV5AҪ,q$[eUg^Vr5X}B6 iUЭ. iy XeU>јVwO#*}rs<C6{N,A&J2A9 VG*[9qd7䀬vw<ҭ&κ#$>Fմr@fa?A  8UdU]ua0پ 9 )a1sH/^]gav?9O Lj;g+xenK ~I ~ïп٩tjwo?JuM=}@.oG7薨ͯNt|=;7촇v ,U_eO烙01\и^=L'@s?/4K}n~0~>\6^Cfh| ~7|^vq}/z9zAc;?ͩ~/;ן(M}~Ӹ^ΉVLclZ9QOL7Ӻh+ӉF>nƛ0LOG7h870 n=醉i ~g@z~uoK7jk~Fo\4Dam7V8LL!oӓ&FLO{qGߣݯ;?˘ҚnߏvșVߙMc=}ho ^]g¯qfF8vWoq>X5u|]u$/>k=?w{x1灌Gj= =dv P99~l)9|;s߹a~p sfh/vÑitL'p|92Gch̯l>G3XLx.oyda= ~~ݯrg%4ߣρg>X~Ӊ ODMc>p3ߙ/m~Yӹ_p5·v_~ޯv_ZiKGoh|s#368:7BK؍jei5>/c/OM<7j:GDMs5MOr*t9)Cp}v볤 Ҵ%5дӌ~wNS~>_(4淆>߀_~ Ւg Ggbt؉uғnzz1}>QΉj~VOrNlL_|x ?.@v#7eLwh~M_| llS߹N~ >3/@_Y㒰i|y2Y rl:ad { /o3Uh6>1{2=x}z_|U~ޣ74¯`>m26eh~xlz}ش l E?B94~/[H7663tv&eim{0i$lډ}>W`X|_{psn<4߹qehi8~(-B~\iݏvk>mt o[J߀LF_q~\ϱ%}Ͷ҇=|0독z&&}m4>l;_u^o(|vcM~0+'!c:-.vqtnt-8} q}m~C a,'-ls#762 ?2M^;Kl[W?N h\Pf&ݯp* oG_wWO ,cŒi\/:lYorLhvajMMo0i`S1=ʇ-c|_>3] u3/  <N~=%h\;ïbgq/s~wnx}6 >mi[st4F~\7=1ڍ|ݥ4t>Gƴ´ ض^ wD̴_{P/94=zi]W /0_6~g :-jök6p}^7ME~-$>t)7v?~_W_Wy2AgAM~77+?B%??Btp?\mAAݠ珉ϳrY:^hۼKAw󩊍I6Jק|aSh[,~6=}~c p2ݠǢ^ ~,`3~$Zq-ܺ_bz@|-~u|&2O{0qy5 ?!+^`Kl8оfφdyt*0^=qυ1 ?Mu.= x.t*7p&HH8ߎv^[߫?z ߶m zNBi|eU>G7h^>?]?FԃnCzA|tиpʎt /s|h~Yo/F4Χ4[3q 狾q4ƣoe[o!h_9|ޭHoa?7˴z~/Xu& 88_K蹨4'_kAxMy?:q= 'qDiG輬/QknS_иZ!"4=x@؃ 4Fsw+"'&a_$|jCSª}'ZUZ]6/tewk&7ͩJ od.vwa+o_+bV~љ͔!%[*dU_aUA+ NZM )a֮Ny:V۞wֱCcU; Jy+dV^^u.qJN$̔jxJV(aU%p$s1|P9gXB (akV j '!v5Xa'4\! mF[xl2Sn8c (a3]J +WЪOJXaRNX0 +(G yUKµUN+ ʟV]µ`wVj nn !e̔g?Wpk~瓿ڪ' O'O\?|«կ|a4 4swKc#͏g~_|k^G/=$Xُݏtw/WyQ:M?=X0@iz=y5'c$ ѹ@}zt |eGc=:~ h %=*3 =*Q n |bGb)=Lv N/= @<葿=*Ǝz_]h ) :?=?Θ{ښ!>zm2c'=}JVP}@_e܇Y-\ʁ/ z8tVV A-dNL2ì&?a f5 Va*ER?a;6+УĢb@* yK"9a[|v9_1 ;Oʀ _J} }\`x@۷ @ 0*` mjU! ~QMzX-z807ͯ0Ca=cj =eK_~B<^wz5GGU9=NbuEauKuO@ S= 7=L Hz(GձC- yx"$##>܏n (3Mm~K@$=$E p "&L@+У,:YZ˝7;zX0zEB1У(^,m~\ :j :,Pa׉="~ \@ձWսW+# h6P|@q&+861{CqU6pe Fs@HP@P? ÞSƠ^;/zx>ဓx"ùWGyfz{^&(*Hd019.x~_zĀp`I6c`q@Q &a@PGĀLE,z'iHwթ4AR$~ ?0=L#P|Csz,zz@0"#PL3ws?AGQ= (##ADF!У`%qzEE @I @QUG!P4"߂ߐ]a;= B@=CG@zF kz0 >"zCG==O`=Wiܿa1'@:OzИ >~Ozxɿt~ N/Pxi̇?[p?@}*N#O+#cSqF,C ?=L@_Vz G>oMQ<:40 DOz07;tS.1FzB3P“0|ԝ@&P~xzmPwhi@ u =o6;4o0@:|F 0uئ==2?aq<^- giߨi+ç xDu)*4=zd2æ1a=|@=l43uf@G_%@v`GjnwOup`y:>Jݡ؃|i(M|b!=r}CBi?8Ԩ'xz:IoI>i$? Qǀ%zcTG&T `bL@ͻOz@~~'=hzz.@Oz'+t~z<=pBz@a @%z8 0 uд W-o&Mzc>64a=߀ @LWdž@{gȄFZHU +Ѓ p` ^S p`jQ_@r X@h&z=|W( lS'4LX@Kz2􄟀Xz<=GP@[bl \T@6@U~ A=1!Á-|th|z/ N pPwh /eD=h-vdh ÁMmY@6&+ˤC|=|!P/=l'=LcEG O =a ec #~@\x~3#(C{S Eg/=2zt?@ydGf@=rc2zd$ WG&=2* P*#Q% H,=)#U^O =g葪:40~1UOGb =葊A葊/42#fd|ЏY~ H@D`W=葸~@D@I}zSG"*@D8@8=oz$0C@='ؐ@X@@qz$W(# 0fGJ h4z$2co{8mY8͛<6Oeyh'QLPP8<6Y-) ʌ@~"Be+ {y5;R/mn~3c/KVݱdDp2s;hC$.36!03WFglv^`r9en]f R36 E V!2CP/3[LZ1>dҊiV ,c8Y" I(7f1'cg^̔9@Hffd[ ;Hجa@@>`bcln3Cl11vV|EPT+O0Ɠ1h,cd'0MnѱhdX؃Ycd`܈D+01XY` %1u w0bMcpMJ쩢` >( <Ecp5Z,R0SAZZ c0` 01C1, `ٸ1B`L X` 0#)ܢj(V#c$ X dJ`+& Zf0nc(sDEcp?T@W0FR>10`13#e0ppc08PXc13#e0qc$ J`N+0 氂13#)bҊqVHh`0Y>S S91*jZ`RG0ڸw3aWN%@#c 1' X)b0ڊhJxN+X*`  X@018` $cM>q xyc,3!ʎX`G0b1c`x{c,2O x(|c,.3c1k+TѪ17 X,Me0 1;2:&#c1kk+aG0b51 xB&2r2`QOT1w6SOr1W c+1\c4N$ Z5JZ!bJ9ʔRI`;c<x" ` _O0 ` Jc$dЪP 1 X`c(J;`0R>1~`őB`0K r*S rjmJX1c &1^rz`vXc0QJY`0R,w02 cpS€` %40 xIZ!͢J9*-2%}+ xIX;c`GW2dcerן |Ep He-;Heǖ>v+'[f Helwf٘+-.; Re&TC=uf&.s|le~>TnڽݍLJH妰XXJpH[29Tl%Ry;I!oA*oZ&=tHi:E*/$`>I V$W!ERyeVT^"I*ϭRy. '[T$IdSdIYyA*O "H:ʃϹ"Y"ZT~TJR!SI**`8I:?Vl$Tʛ }6>}5N~H*HʶE._"R/He39RvXҝ'Oŵ2He ԭ{]\? ZTjBR@T0oʶNSUNRٷ@72.j+~ Heo ;g*m~e}i+"m3爐HZԚo:5ʶ zR6AZTmz¾{TyJRuIe+> I\Ǎ^琩OHCV$2ZTR+ pHe/Bi'2 ~"{H*+.[^:> UJ!RχT޻Hem7TI*ksO."A*o:Y$RYKL"#RyM$+E*!^HD T^rFRy% ARy/29Ag7H9xT,Al@T."I*oH*"$3D*gT|>Y7&R9=InR9sNrfՃH嬤"yTJ!"3D*gTκ_TD*g]/$ OI$EYIIORY'"'""R97YqʹqNRY3"sGrH"II=II\u@*g=oT΄D*g6y}T\H*+C8D*gT΂E@*.ޯO]*B rB+6 ;w&x?rlR_d6-rjrYe=Crn"4}쭡'qo i04 szx"?,8q<#Q (VҨdF=Ad2~8`LaP/W)z:S/hб=cS'OW+j0͞ѩ4, Ě C u~͞ԑ|̱͞t{͞!~`7{2fBz!ZzAII+C[]^#}ȓ:y}#6`gO^Фr" ̳'uj$u{~:={R?|eEGʺˏ\I!?lIij1Y'uj ˢE7h0LZsEM? Zu>B{R?g ~~CI|M~]4Z'$M*ړ:y0^-d4a[4hR0"If!h '.ќ&R!2h ")2"Y/2z2 ͂m ѺVphnVphw枍hw^bAFOA ''jWN2O"'oF$սd4CEFO~_ѓߗd4DF3pTd4EFsSEdZN2z% [у_d=AFs1VdfhhP-C$8h)*hn2f)h( dddt 2 ypHF34Pd43EFw\$;/vN2"#2ZIFwE*Ph߮c2ɞ"ռdthG2E"&2ZS]1CG2#nFHF3^Ld0$Y!2Z}IF+ǃdr b MBdt 2AN"tidtViԎd4WDFr$!2j#,[D'DF3I]dt! F2Sܜ{"9u͞)"p 2ZхhVޜެ&EB2z2 77HFBdfѪ$ Pћw$w! 2z 7'$7IFB d,d" Kh HpY路\oܾ%hn+Y;v2z33d&C2zs_d6$c;."U]hEҊtJ 蝅B7Zv"iEӊA+˓VU4p"ޙ1"ћ{t$7HFoVؑY(փd΂3PhX4dfi@$A2Z$UC2zsdN@Fo֌HFof[THFosމ$Ͷ$7boHF+փdb=HF+փdb=HF+փd9($w" 2Z$A2Z$w 2Z$w"2Z$A2ZX$7W2IF? Xћ 'd2zd2z~b=@F? 7sIF? Xћ$X;Jz~b=@F? XO'փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HFo1 A2Z$A2Z$A2Z$A2"A2Z$ٕKdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փdb=HF+փd} L}UU޷ #hz8}y!i=>DÙ(4{8eV_=ǻï΅=Ѵ=.~~pેs#(~CWDP/o4fgV93NÙH=F2Z Xpfg@pfyz83dt"Ό~UgYs3l %дÙk\s^̱w3+7{8 u3YMÙA^='Ѫ6yl4#z΃E2M=AF?$:dtBhfil3"A"2:|%Sd*#DF'%7N:$zҒNchDJVn\fbT t}v,S{b}IF;`&M2:kdtr h'&Y($HFdi  2II~"?ytcUEv,L=Wi!y1U=z"yjeg~>='Qy|dpSɞx5z8O<&΃q٫sH_p0PEsW {8?cpŠOr {8wP΍'2:$@F'r$$G2:RI$/hUڋNK2yI2Z"SWdOOKщ-DF'&$DNl$2:Kdtj7D֑V%TEzNK2: %z8@p{~5h.NjނA h-TV1 hU y^lGy?/l.@}V 0筠[\!R:@=+г:y EHWZp5*wV-qgגvV=-agВuV,QgȒtVo;jfGy+&Cy7kvy.݄Hf pލ|nyoFp<͛武@C&WZqwUN+ J0[H>]IhV&fgV5&q]x>cBU`IYш2If>#Ȭfg䘷&vP̛!f5,#üAY$Uu#_V1jFxy-"Mty@A+v`j”W/REhY|dUGdYz$fjWV#-*#vʛrd"A**)ԍH)z(9qN& VOOZ::1[2``L4Y-H&`2KOcqUE*YYUsE&YEVDuI$+@ #/.GVid;FVuYd/E^OcN+ϝV]1CVY)dB^C^q!{f:D.b T0OY%JXQ-Ix-~X7/y&^K,?*œ( IƙV@E/ν%J PmC0ċ GDUExq[/%tR rËbᵈ^"A/nO^" %رr)RkeW<%KXF / ^Sm^SKidoJK&Xᥖ@ߒVw2%^𚢐;0nVK0^ɴzoI+|% %}\O/5 H‹1.~KXʹbwBG l$%@vYIҪP/YzKZᷤU\oYd%I+!閴-$iJݒVz$[NY%vKVYS~~ ᷜoe@l_Jreՠod%9߲˪S[)+~KV>g[SQr$ᷜo/+‹d|*+~˪.+~ռm5e%"c_/ -V޲Vm 9߲Umo~[jV㶚Քբod%yn._X޲rIo92eUnr[۪VjUmo~[jVSVr咕~$Z'd~IN~ɷ['deUnr[۪VjUmo~[jV󶚷ռmɏD//b췼mUnz[۪VjUmoq[jV󶚷պ:h%:%VlUmUoz[۪VUm5nq[jVZZ'?_r$['ۊ_*UmUoz[۪VUm5ny[jVBGuK[d|[uK[^V*UmUov[۪Vոm5oy[jV @%-K~ɷ췼mUnz[۪VjUmoq[jV󶚷պ?K$ o+/yY۪V嶪U}[ ?@Dj%[~ոm5ou[ #xI/n" UmUnz[۪VjUm5nq[jVZպ:h%:%-VlUmUnz[۪VjUm5nq[jVZպ:h% _./;/]_ ~%W_ ~-׃_~#7 ~3ෂo—o @-oJ+Zkzofo~i/=[ЗK~%W_ ~5ׂ_ ~=7~#7 ~+>K{[t  |ۯJZkFof[o?_K |@ҷ_ ~%W_ ~-ׂ_~=7~3ෂ ~^_z5K/?—J+jkzoFoVs@jA^}W_ ~%W_ ~-׃_~#7 ~3ෂWqo @=oJ+Zkzofo~i/2|k—A_~/}W_ ~5ׂ_ ~-׃~#7 ~+E/[t |ۯJZkFof[o?'^Kϗ& |tt ~%W_ ~-ׂ_~=7~3ෂ ~^#^#}/݃_+jZFoV[v2_}W_ ~%W_ ~-׃_~#7 ~3ෂ#/[t |ۯJZkFof[o?G^KϷ |@ҷ_ ~%W_ ~-ׂ_~=7~3ෂ ~C—o L=oJ+Zkzofozi/={ЗK~%W_ ~5ׂ_ ~=7~#7 ~+kgXۼ/=.o?ŗJ+jkzofoֵcK-/K_~`/݂J+jkzofoֵK-0QK_~/݂J+jkzofoֵK@-0KKˏoJZkzofou\t ?LxҗKoJZkzofouit ?LҗKoJZkzofouut ?LҗKoJZkzofout ?LҗHKoJZkzofou璎t ?L@җKoJZkzofout ?LrҗKoJZkzofou璧t?LҗKoJZkzofout?LҗhKoJZkzofout?Lҗ0KoJZkzofou;t?L:#}ۯjZFof[o]d/݃~۾p=ۯjZFof[o]/݃~=ۯjZFof[o]/݃~=ۯjZFof[o]Ŀ/݃~=ۯjZFof[o]/݃~ 8=ۯjZFof[o]/݃~j=ۯjZFof[o]$/&K~ҷ_ ~%W_~gH?~?~Hv.M#PoOSiџPfy7u鴯AuQdg]:Px_/[I0\KN~ݹ:{J{?j{/~ܾ?#MTH7?f/z^\7l|8wn^fo_/7|b~oϿt΀ß4n?g N7I;}_f k~q&?_#G/ endstream endobj 4367 0 obj << /Type /ObjStm /N 100 /First 987 /Length 2374 /Filter /FlateDecode >> stream xZr+zi-cJ*Yg\5$.- snI@ %ps_MhuL{MgVs֒Y& I=II,ikG)4Qu9QLz$daJ[dcJ3eA2AALyA#dtL4o<VJr4LI"oY5#cF8a KIY17A*:#0&c4y]#q A[0Z#hDbj+ʬB81IvJ,$ieE  ,>Z d/lLgN4BBJ;s2$=F+ HyfxdFM,E;悡а"ͫa9j[lƌ6KC1,` QYcVd ^Uh 3 p6=l l@' &+@">, HV4AFZ768(Czp`v=Ɋa:0ohBGP6Ga͢$b'PLa@mZEgt-;=e;?3Z'YQ{Se>w,X~mhڏޕxOQv]1 ?>z%=ӳ_qO@Q&"H>BIj-NkNNw:t'NWs].jyq٤5}vI-6TJ)g$}U\t,X삽p3M>ܖ7h0@F"h`i& xKTj:%0|9G\fNk@fNq(8͕'W\QKtğûS.=%%47YQ b8J8tV@yNAU9;sG-_*«( I|2hf`'DT0 ?侨FBb:$(x*W@2'Tr7#rSX}ϋr`4^sOÝDs|EKKk[dx;8xBQFۼϧUءoQL>@҈T=KYMnW:;I qWzi$(A@u "Y[~lx2atzuPjҶ oi+HjЭ4{XTSBl_ZJ5W=R";[tL~ ,HD-Fd.-i!;+ڪg?>onnC}650 wl*w*_P` /I-ZJꅌv1Qǘqm5B,.5z-j#v[zN+Wn$P"vjuAi#.l!<υHY Lg!+d$R HDe:PD=E>>p"C:ƿi8ڈ8\z՚JJ}6;)?C_C[GǑ[{*6ҖK ٢aHjOSutѾlJ&4(]GJ,+5|5a䈧`v-|>R'D8}c5W h]._5ռM}ܽ_LLS~/;.Ԧ uOmT^)B؋f~бs-i, Ck  <ѦtW*RR%h3mAw#DDǞrKrzic =m@ހwӖFqoYp=6@Y/(t< ׸hQj˞2"i:6hB_mcG~@vOۘmw.kb0ے)C P40}ͷ1{7^mT~oչO[)X|HݽNv~mr=% ܝ⇻5N-=oʜꇋ-ٛ@.BIkث})|`?5mU,onٞuE[Aד\5=:^ ՆC9蝂2gs9/۪GE=կM6æ۴y캞Osz1&iC6Lv3?̟շMA٬/,˺-MK>ؿŢ&M~_fylˬb9_l 름EXT#R˛ $+piv9r6͗mVOy9+r@KTnǘ/LM処"Y;d@r--O}]Sϸ^~~O'q?~^~Ѡ) endstream endobj 4481 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4484 0 obj << /Length 1713 /Filter /FlateDecode >> stream xYK6r\>E*^6}ٞڴVIwLr4({И ~3"gqNpKrg4KW 1%<^&Vzٛ.s0Z,H_4j68Kug2-JB.?cOUR_]Lojܯ+mtL Bˢ33*.ni ͦzu "R?i]9[ -NW˶O._{Ԏf`&:z ӚޒFkt]Q-f'uM]}ud gEgܙ2ʝ @=cFC tSX>zvٚht^ Om{/܄weoMpWm~lSt~Թ`RDkVۺ >^=8~?ԦqAӍYoIn-7ō)Mwls 1U(^o[,&#>:>Hu]mwP&@ @o+?ts,Ԍ,9,-G\t٨}}wz¢H^'NHÝ(^sǛRUlr H/j['^sǤy0a!%y 8P. aB9]!Fd*n!`)tH6aHHSNp(Dsuǣՠ3DJGKzy_HC'0A@:,46` kT1w suՙko\?=٣z`g'@1zmxzկotb(GlĴ3 TΓ>8i# }>iY6iC(pFrT~7cDVYK g B-a^oL5epA\@ w5?.zMRjC)rGh;'%RSms~%L 1S&SFL^]͐e0"Xo'\Hs(Ut2H#w&ҟq~]{V$) ;$A闃+wH6Y,n?M ą5 '/#lkqREZW)J3#{<|>/ om; lwzaMT݄ͤEo¤ rFmFa )=z؟QMfe|"[F;ddH:oZE݆>]ލ[ FmEx@vzQ:>aUpS1ԫ'6O& HrT} "ڧG3/i?P#heXlw2 kLQ- N!gLu;J瑚jV^S/2B}t< y#kjצ}{)>&V*Ӷ޺:ܸo*lCy( 9*_th4+8 iK];0ʶSu%@U@ʛ5AhAZ9фgP.BW7|gVG꛿-]Ѷ+}AqA'BF-sPp7"60{JߡE jY? endstream endobj 4470 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-preston-step.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4487 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4488 0 R/F6 4489 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4490 0 R >>>> /Length 22617 /Filter /FlateDecode >> stream xKmu?b7ɆhTm@2`+MRUs{(U5u"gŚk>r닑?#?GG{J⿲Ǐ~-}_>~ӷW>[-'3jNm~O-rոm5ou[j}_%r[[VmUnr[۪VjUmoq[jV󶚷պmuN{yso9^r~v˷ΗՑUmUnz[Ǫl׆lC.saUVX~v| rA{'!d&Yؐ \ O:U7sޏ>ݠ.uvL\eP+|}ߐ߰A_q<~PA7Vt'aM>s3u\gs"Mg\ft 'svy|Gǟutw!O<t߰ݠvߗ;:ڨgkv]ۤn?lW)|~nuQ_ǓK_Q*xv}&tkȡt2﷣/ %t= rxݠq~5?z@W}5فzAݠv2 u~n8_9 ;z@/6s8G7hvWs?3mn;1ϗ74}:7BF{yyPyо~C{pn<ƸF4=xnЩP_Lh>F9N/yl~ٺxtN۫/fMm~54 5$;֒7x=L}< Y2] L=%}tj'Kǿtt~:AF}tq?ًta/bӧ}u]nиv˼vH$hoG9ΐRx :2uYG׭+~jONGt}xO G_hK|Xh.5EQ?&}zl|V=vW*teM'>G߹ @ƻ(;i<_60=|~G_c\(~۹ﳣ/~siVоۍcz|+C/kqm?f7i AOٍl~\֧2G_?YdRwhܟ;0~8$ϣͯjqdShk|7G_p|vW< Owh?Oe~E'kL޿GB={Catl}|t7]>:i S{}S#F5/w;5k6_w8yz}t~~e4OLOӜ)ɦ c1~,%/}7m~}s :7O#JӃ z:æݯ}2~iݏoiO93m~6Ok[a~ikߎ>Fyr=QOӍn4L"tGb OA_]l?|m~~.>;u6uOf#`fhq0i iK]6 0]!ڥ ik~ ;]8+i|[_B״Q2tMw/6ӌl|_|[`{wnT~]~¯ydzWx?m"pv50u̢Ƌ74޿Q7=y64=4G_ӍGk 쵎s -ݯ`>ǴeߧaйS֭{4ڃ upM"n70?jCsa}"t>ѳn&]ݏG@l"tf{n/ӉQ؞Bvw4ϣo?p^|w3߆Ong=:1}~M7<UGWE~Bi?-Mqt3w$q|0A}MAV@!sN%>o'l91:KѦImtn4υ).4oeu>+ï~shUwnn<Ӆ ~\)5=ѾfA8:6P(-Nl>'Q7h }dͧ%m iOn;@}i4&ӕ!b>۩4ڗ]W؟==h~__br7~6mh?^h_O48ߣ4Χ TMO=[n/&mI7hM`b5=t _Act&/a>MD$ܨHp5fl}kv?Ϧ4wυeopnE_o7?oD0 ڟwN &܏i _W_Wt< ~[y?>_tG(+8yϛM'n=^i4L;{2t?ݥ4(qt MDhvM\&hnxJ{|0>P1peը4W'bg¯|  wY:^oMOt :Oe:y:7F﵁)/6 =y(eM7h~& gLSD|~^>>q&4:7x/}4gTllm-8~pL|=0adp=//d@}i~6ݠ}|h8a %#/[XIEAF~1PS!tnI2la@M &2{ћ-Dh܏m MAG÷Y-slQ)_2=q=NG'AxtF3Vgz@XnAx{ڻ7<u nx|?_$4ϣ4'&JMA}:yn\ۖJ^8? w4[?~Iw?@Өk<_X6]w}d۶}[n>΅j!?\,lFpLϱ*;8ui8}[&o\1] uFfx:g5mиgdz;4~n`be 386p34~Ch=C~oLNwv%^.6+mM3Z3yA 0LF%'62ImHv9%Ktq.qGԍ& +7xVEVYV,.KZ⢜.j9'SąZ>}DV +U(Vh֤ٚ#zܰdll% n6=x; yְ­3pg +xw.B3|հ>gr;KuGN~_ܐ堕OJ?yVPª JX5|UmŽ9!+ew'ʾ($-fU. E&_}&a5WXrF)a;ZU_aUA2(aUI+ G*믰J8E+ߵ#phi\rC_ث۷By.Vx4JLWl/gV_Ti-IXF +Wg +I[0{mV:JUՠNlʰJVQ*.niAî)Ř쐃rCV2cMm8pKocMjZ*B+<εҪwJX~V(ѪuJXVuQªvJXzhn3ЅZ:iKV'Pdh`5iVOZVhd :[L&b2*Cv(7;kL |фIXy$gH|al-ޣ ّ>*pA+b_2-t2Qj*}^I+sƤ1e/ZJgN$B._4=Yܥ#d̲ʔX#aHX1+ ^o;/ CG{Ϗ_?Cu<uϔ_]LS~3^|}OOo'NoG>{?`*D_?ㇿp?]埶U_i|zOϧ׿-u-|w +}.X32{q}uwQVS~W)_Ow%ﯣ\T '|OӶ=^~TO]'O4?y?~5_O][-b~`g_g>g/=$X叟Oݏo?~>~珿7D׹6%sω?'zDN =EL#Y&z ·L\Kam&zt1ѣsc =>s/LhR1ѣMtՙ -)Qu)aJ\RG-$ʘQE42ѣ, ٔQ8sDpJ(E #H(I HKGG1Dܘ(D\DH1#-&0$zD$z$N/*#q=l=_q{մ7= "60uk o H&xx+D[MJ0?4JpwRv2@"o,%~V[Vk[@-"a˄?Pb//j듉Hl%|_D_h_ԣp#=l#'|n6*a| ѣ"xSD*=|cG6aO[/l;7EGfV'z!&xm_%*FB{G$zFF~OE_D۸DOHD8p"[ l*?|8e__BGL3=aq>aў"623"ʓ@D8$zkH Αa!63!o$ 7+f'QD(+,ۘ=|6yA `= > >?Wy#@: ?% ѣt-LpbpP_QNf걜,Duw$z8(~1D@aғa~m*Q=ʓHZAJ0 +$ !@2&tyCkObUoL062J0?f{G$zaC\H#`?ᠣ<΍n^Q% ѣL0"o$zѓѠQx}aD:*hD[~+cAD$ <FD/ %rb=k a,1 WjD az%zfE_!1D(B3/ CK:?n`G~;'L,6|iHȚ7a_J<>'MnE|ۻzB$f.릀0D*~U ~a~=74>$zx7ROh&r4qFF7247=|X^;4b>N- ď?% #ע?[LpPCx㖫o$zd0 ??$z87;4o A1ç]2DY W>_ W-}d{DF+=rqD'_DjAHi0HE =,=LAO5 5''qDx^! @D'7q>AH#уӐJpQwh&4qD攞M = d*ѣ$% ѣp7=;&z$?$z$zd/葕DD8D &zdLJ aG%zdu3C=2,%zy'zdLȓ_+#/=&zD &zdrJJl`GV=reGL|`JJ`G~E葹ZDx?0#7%Z #s=r3#+-%zd%0#}D\_aGJAG~;@D̄w%zdl%zd%@1#%L= Ke^o&zd&+#+=2(#?Dy葹-ZJJLcG~4&zdVǓD&z$nUGD8D=w*CJHQG>J%z$%"2C JHDxD%z< NLHLWGJ@G~&z$/葦2!SQ%z~ &z$%1#)IJ=J_Qp>~fGbB=LHJtbǓpDuul!C@SwҪ:0YDtlnaL+tl" !ѱY$tlv]؊S@HX$';Ik+Hd+hQ2hPwDRĝ %itkw4?Iy4t?IN$ia4R$ X1BIy4a$v'i I@$iTl 'I œI@C$i;IW5%i$i0+CIhU4:弣3hU(d[4$ Ɠ1)$ 4$ HOFVt2ʱNx2:a%i<H$4$ TKҪP YEV`3+Iwƾ4>Ih $G^Q!J@;$i09DILQ'IA"Jx$2+$i0fDIKY@$i uIXwƺ4֝1$y'iP*I mV;̓[6y mG# 7.^@ ,ݍi/)FLو<@*W'?@(lD7416фht ]v?6;q/l_ͦ g/6;Q6ͯ m6Mtf#mvNCe/# m6/f'\m.B[6 hsyl6;a(2g6H4p'@ QBݯɿOQW@l(OQX f*g@I~ m.ښOXgf#h mv3 mv/S_hiU#Qh$Tlh׃:w"~\'l:IP͛s)y/歭D76k<'y'FBcͻ6o&ڼ+#y@տڬ|Jڼ"y ڬz͊Uڌm^*NYQ B1͋'B7 m^MhUnm^*nJy !ڼj͋?6?Ŋ6/DC6<m*Myx)fE) m,(y>͓6O[Dgو mMh5Ym,-"yؔYRmEh6Bm*Lyr]h@6zm_m6wDThB6n<:?Ch:шm:)y4}bD|!hJFypGhPGypOBGz y$ڬo/m m('ʁhsB6AhsB6w6wDވRm*>M $ܫhs7܋Pbͽe܅m*Mg_D;6ڬ(/Od͍B =(7涄>mn/ M#ܸ^hsD67 mnChsS 槸2>%T͍B'ƭBPMʼn6"4hs+|67F mn ܄fmnF[K%FmV1f͍UW6W\mܙ r[hse%u \UhsO:BmSn\Ar~BhsUhsmF͕6WD͕}Bk /ԹA"ڌfAkhszO}BhAy6EYş6פmmIlDm.[2hsYB6Fo m.6O@L@7|>hsJe%\( \( @DI6.hs!!m.DE6B '6b BUlZhB Ƅ6ݯDKNpgR!\#pwRt6 m.Y2':hAp E7#B3 m,$9 -'ڜʜ#ڜGhs͙|x}6g mj6gڜ'D3;6gE1m~_m'h0P@=zB~CgL~mV"WQ ~D{"ܺÏR=3Gz$%h M? ~#h~ AJ~IM?Dvb=zPGO jy'MLPmxkK8.?2]~]4m}`ڣ&p4w$p M/Vxe5A%]Vo`cHhzrY4WM鄦U4[MhZpBӃMBCrnBC1iK f] A\k4ͩ}A D.*| ;. |ҪiyMi%Q|vAmf.i%QlM+д(M+t}EhZ0BVi%Qn_ 5p& MVivM+д(M+д(McDhZI9L4UzA̗4D+gZOδ" hID!hׂD!hqMsS434L0Aӕ\!ikM@i' i. 挔in04]MsVt% GhePMW^BL4͌2AӕJv4gM^#B\s4-6 +B NA .Pgt=iPtʽh+Hh .M+h(Ms[4%hδ"a]h4tR]i0԰rCEtՃE Bt7W/MoNޙwͪJ7CMoTMo&49 Ehz30LhzQ 4BhzsChz3+Lhz3f;靉L#4S~)Mo"4lwkд6ތs%49$#49NhzsA~YBӛ7B;4D7D~)M?` @O0'NlMo2@7 M?` @O07WM?` @O0͑*'LhzD/Z4SI̸CO0'2'Lh 4SL'4S~)@<` @O0'N%Pnك(7A*8$N{ 'p=e/ZJ'+д)M+b_4%,V 4` B 4` 3c U^ M MV~Aw2Ԭ @Y%i4&4>"4Ή)UtZ h:1^t_ȀSݯ~A : v?O'4E}AӉs zhڡ794 wBYP>0 ?6h_/)$t 鬐@B7-4m ?ի4mдAex6_t^h v?NvSh w=h%;4gԃ\U=ɥLՃ\S=փP(AOnQ=zŬ=|~AOhPA|X_o^:AjAhYz(TTzzO A۫`A>3A^5A^5AwX/AWgShAwBݹz]sWL^1AzAӦc=zzНS9zНѹ6;փn[PףDݡ tS A׬?e=/h:7= Wg{Wt[t+4"@Ǩx?tzMY d=ʴRՃVAEH+ǧ]+ߧ hY?t$Ah6BL BM+@t h՛4]U_tU==BUP*tUJBU$u CM-t'4]TOS/tQHBӅulM)tQ}6Bez4]EtaHAE0%AO=SBӥ 4]M.ܴ)hZ!G.Hhb'4])hy!4AӅ B[3,[P1Tt^ h:/B h8Mz̀ dBhZSLh:1St4Lh:5Տ4MM'4 m@ISU}c@Ӊs'eAPAӉjMkTh:5-h:qZtGh Q!4X}DtʂNYM'G4Ah:q9_BhZM$tzC;4Ο f(>-Vk[MZZ Ko[s>쉎*RzPz?%&j{U^3 Fz "bj2-^|v"D+h5&dh~a L.-Pb9P}9"K"ZDdyh%06ےZ[EABoOލ$8 ĠH z7Տvz7~n:H&틈^.2U v}V3ϻA>o>wHY@I=k'g# cDURijjx;"k%ig vV/[U:Ig$MU pr)Y;8k"|*E[?7k!歆UۊpYm&AHY6`ݓV,H=iUWX^buhGU DJ?h֖:;_8fǍ4eeV%FY1k{1fG#ŬRHŌ a֮1&FY/keM^V!ڽEvY.kCem"?䖵زMZ>)B EfY[,ksef"JU9JVV8ae!6D'I*d Aeը!*4ĔUv RV2ʪCDY`H(  OVEɪB:YUY'=NBa 2YS& `>R,d4!&dUHzF+ UZ<GVy*By=I+< R!E!.P6`롰7F;lRqq.*gZ6DŁ?^O}B+@bY?Z&{bΕVoXqpkUvXPNj럄<0)=e" /|Re /n| mNx-ހ,QX.]L%iTZl3 gZIe[B@Ƌ3dRqJ+8avo7?/?/ㇿifŷ/?򣦏_W8%_$?oML?t!!o/, endstream endobj 4492 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4497 0 obj << /Length 1781 /Filter /FlateDecode >> stream xڽXYo6~j%R*MHoMʻJVJɯ ZIk9q0`r3ˣ}ģ^^_={mH]Fs̨{)&mFȰܪ 1+ieEd hNu:.h9Zkoi: چ6UCIe/]GwE}r=QnۮsmvUUH{'A(1"Aڞ-0 eYۯ7ȿinWxJkJG-rKg%q-5p,y1DV-j7O:=J]K•7I >rL|R!A͔[p6h1㡸xS<`>^ ^ x`\5}_4 nOv`_j^?mKX0 R~v]EeM!)vd{v!:Zw k]fH?v"٭d|b3e5#vŨ1R{r;UC`ׯ`{s3BL@h]KT6n!䜪8ۖEw n6FPN̙eK+{/eD\ 6Ǟ=Ezѝ⦪!=r%/@ZNQP1Y|=ކ{o8yzF KCiO#}q8.yԘf0\] tRq]'p1ɢ)̏e[N6ЄH>*%`C*F{^}k3{9qʸF03\˃՗sޕ m0 $!nr) v̴v)H5/gYLZ1#EFx _{(*!Z_u-R`I~ oVY <4 lkHp >pPmҴš;4稙Э3%#(mQ(ۯe#s ۻ㱽bE$Xf轂>><.q<~/} KzpVFv-t~&IXysBeÒI>\zԳ1Dϯ0LjՒljl!Xs0<: .-3.iL8Mr& j3a؊;a(m0X3HT,/w>4'ώΗ֗e̐7SSwn/2f&/ 4>oJJE0,lOhe KǢ+np]HBX.@3@ *Q\(5=q,p :i!??Yt %*=.7+e6p6g65"l6ȊZ^tW#OG߉%#O->Wb[[bl@ʪ[#qŦ*Ϛsk.h}WXE endstream endobj 4493 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-preston-prec.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4499 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4500 0 R/F6 4501 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4502 0 R >>>> /Length 41431 /Filter /FlateDecode >> stream x͎5Mv7@2#lmbd &]$-Sn߹zVV&E{ZuNDFFgg_oo׼Vu]_J_ۿ?]_ׯV?{__~]_[rjWUvY%7*!!U%Ӫ:n9$'2گ=$;rW[r"3{K.%ĪbvIbUSV[Ѹ%V2ꯓɪ3&꯱$dX8t{تlI8)ߝ*֔ܒ:KVdUVy12_gJra}%{ⴇCK^ȰS9%{V.y5ɴkMV8Sq>]V[_itrKc Y8 )dɟNYM[v9$dˏd5KVCٲƖyRn[ʼ}B_INɼrua ]{ֱң.&~\:t]>7qu.wqݔk߸C?eop9]~K~!_[֕zo ~in@ߧC =~1J7k WC=)U~a>OBoi]ɯj ~EO)]~ᆱzKWG@)~>Lݥ{A__K~MSQ&뭻5=}9J uo#P˺n\ȯ*>ݥN멶k9)rmsݺK ~mi,uoZS~3.{zE׹>n=u-U|`>}>B Wv[Z𫾿o=5~|)t]:/M~#k)]c~"~͟˯S~?D}\_n5'6a'~wz 8߷zp=3F<^<BwMLB_f?&FH[_|zK&?N'~W4 ݥ5:.#2ٴ[|L9:~h[+}B[RBocFۭïok:ǠF-Һ^ Q -<_uu{`L/JiݿLz>k ]?|wt?{}[_t?o0>::SNtN#?:|[ߵO<cZYituqktU''upztxzf }B/z5bB3WuN^M~Me>aw_fu2G/a~{M %5_ז{ӯ~ }O\BW]zqazz޺_BOB ? y/B_|*.{M~,@NQt޿|7fAX]~CltM=_.H_cI8t1 }:Y%u>zI|W }i(jTt\ެפs:x^+x*&t1~x~ͿW]~ſW.'pT|~|C~+zK~g~`c8b`]zԖڋGBoïH_~~w?u?&z%uu>Eȯr}:7W<~{LLn]=~^zIW~iSϏ WLB=BНD7ƣ 'qe>qMZWE8|!t+Ņ~mKZ+&}.$Ű¿c}4tu=.ҹ~:<^:\c7V]<n=KC/^E"+{}cЋ-nЬ>Һ~!x? u~F_g0GZϏW5~EȊ4{t]X_1meHtχX(ЬN?b.:v ;bH=G.ŴS'^s>WX}xqs?xsh:7W|Ftn/lc=#s64~\ eBG,BN_)!nО_% ׍*ֳ~2Э{hֿBoi}{Һ%?Eq*ȏxKS<ߎ44O=BOi'cY[ϭ 5z})5'[wa=pM+Bg@l }EfzJ/t7ӏ[ɯʏq)=M~E_|9],LTkïB%:zKO} ]7:ƳXHBwZ7> 1Uz4~'.ݥDžyO 9K:O*f*'tt9+|W|?4 iU3bsȓnݥ/?>ϖ_{ +uxB`ln.=tΧ6&c8\hE~O¤tӏXȬ=s~ ݥFo..? BdzIz֋Vhݯuȯy W5Һ^_|:ϒ_Ci[/u}DF9Y2e ‘w6]o"+ӡ4_ү~NBխ}\Z8_YWEOiݏ~IA.}UtM=b#Jҹ [~UDm /YwŨx};t^ u\ /zE~s.[:gRبIz]zZoi]o񠓮SZ3DѶzKwOƇ>W.lTIz _Һ{^x׉_YubnI9q hβ􏉐tG\hw&Ru2Ae}?]ȯkK/|K:^!/&zf?( ݸ>*S:狱}I?j:5/byb#e.UgҺ?*BrsXiϣxF STJFဴU:?:c8 Ɨ˖Vx).?ƯoO{ktW^Ǐ[e%[v+X(!5X% q^BP%)Jg]RNˑrX*EVRV:=aU,euYk QDRn˕O%E^ˊRV&DNJRj/tfaJ=BnVhW;hQ:3)u97:2aŻQRڎMΔNJ3Su VzîEWdn9R6+>sl 9+M/Lt㰤\BJw.vZu+dZK|y#GJMZO>cvyt,D霬%+$+RVNV eUVD!>FNND*girRVC*6L9RV]waRVm!ed %"S)sm ?s[rI̔IO(UnOV )BJG#fʒ)+CHYUTVU?XbwItauեoqO:|Չ}Չ:EoQ$+RV9B`YeyIԡ+]HQ#YC ^j)+XV9M?:YV"eU6RV:/y!euS-tja\܃5q߆dE޳CmtɎyRǎdGJWlԟ7 )+Wb5RVt.-N-e~UҽUsqJCAX@J#CTKk + ~'9*T+[%lB!YxСIN?˜rc!~ް + Pa~˕~X>)+] }b56RVCYY +ݰAݍ|=a$4mjB{.Õ3rλ!eӀ{$ RVEvERV+]W )U-HY2 "UHY骻_]eU RVJXQ>&V&*Bbu! d"د&4loe33OL"d:o-.yaӼdyrȜd:j )"'+]a/!envVCby[]HYy4V50ͭ1'VzZͅ[bUXmhS 3d6FJ] /mrܼ**M'zoU=(\wZYb7ɟcG_?,Ϗ??ϑ??ϑϟ?yg?88ǿXWI)*ן?ӯo_$>; )'~#~{xQS%ؿ#g}ǺRtYP֞#~L?&.O8Y sG_Oex?zߎ{'v>mqד~;Q_O u͑ߡ$gNO{rUr'׿~?ïr-ܓ<%_/?6У>1`@I4si:Gd@?Ӑ@Z"}=`(7Y=:eOzt Cs?=zS)УPјѦ.+=KzG6.=*zԩ0R;=KG克@=$УtQ~=ªPcG$@k q@r4衔zb]Iz^g=cP9=15IϜԜd*#f퀋\9@.Ӫʊ zĒxN$ aXTGll ~z{ʜ ?wq\Χ=x葀\x*#WG}fz{AYzT$Lz$`UGK|dGKH&ޏ cFGJ~ =H$WjQ="#$@I I?I@wiAR5$p`ǒlzpJ@t)#І;#=B~QGG{( ȝ+#s?志k/x4Cܔɜ=r=JHQ79? WGh==1 ($Уہi{GZzTGhG"kZG?O<4@Qp5RG>葷@/"˯8pc۔@:zIWGDҚ_+#z'@P cantwǩ5VG $ zY""?_*D+#= _%罊o1vCZ7zSGF^>Z~+УnہՊz=BV]N ^9mM^E=B SGq!1@ӠӤUC*@v `Wj韁9m!H=By?_z4{1 %$'**У'0$oN@%^YHQGNۭ:/@|MPFKzkrG`jHQy@7W>@+#t}z|;У< _+HIZח=B PVGFHŁKz$𤀋 ]*zhow-Mzk>Zׇ=ߔpVG;H VG gQVR =|*# `Yہ L)p"k`@6:׃=B UGT`@h&oc=@\f@X@\i%Mɔlz_DK~bWzP@X"#/\rǒx@rGVGYu+#tsǐ&@z$5KZ=rǐWG)_EB/wG\= / e hmvi(#@b [^z2iEi+#W|5WG,TGh (}z'Dz<@lL=zMG*qQϲ(;(@qOG)xG⿟@z\0@ˁQz\ ?&p59zs z\;bQ@ˁ8z\:Á00@ˁ1z\T"r^t=fbe~p@7:j| 0p@q?1z\8jX+i"CUOE =.qU(P@:0د:?7y жq9@ `ׁ=. q~@ zs*@ 88=.z\!@=.NqѐqQ@@qz\W82`Lu9Тد8?hz\q=9԰qxXqC'C;N.Qg<NaC71ǔR<ׄxgF(<!YrL|M~َe7gP*lgk+Jr2* @n?ԎT%8Dg9#;v٦qLo+,X=M"+BY}7V"cx8z}8,B:FFy";2㐰LBa}{axdt35!LNB7`q&9 G0Ǚд8X3B2ժPC&YX]t*!5q=# $qNqNQI(X \W8Q !Tǡp 38V8lГ=8ToHxPo' #\h[7}%o!xQnb-8)!}(tÙdnf0DnW6dE^E;CV핶q([#l8K@Y;aüC&r61lL%eFa EigElZan#k#8+Vf5Lq]H5N ڐtjy%V85A|;T4gtT+RD,ǁ( 8 ifi84 i8VQaa$7a FTΑR4{hgN)Cì-v4O4gaû>s8\8t^ 9gf%6T&'J8y 8 +=a xq|! +6Vz(,PJV `2H8AEN)ܒ0LJ)(qAF+8NQBơSP>ab1c-&W 2d5_*~1܏b('Acgtl{p:h#@0Fqną bv0ep2m8|c8` 691` xAA1 /:hcP` -` G91.d(9Lc8` B1.Us2dg t!rEj:BVޑ1$ YMd` Yw0!+?&V ƀv03gv0 `?T;r>12J0ƥ1qAa` a:` bqA?c'%8?F ƸA1K00hN0弊q +%4>13` xtci`}&aONVRV"B0C+cSzB0f`}#mu!eV*@0Y 1V0+@"c`cl xycl xHzclZ xzclʞĺ` x{cl'1 _z16cPUcl6x}clJS جdH0+cu,+c5E0>HX& & P0>(XUdJ1clv6U`'9@*1 cl (cY 0Hul +'aJ ؔD((x14|=J,x1ƸUEb ch#c;CN; icls(clFB1@0Kb9mUXC.Aʊ䏅M1^re`vp6:) c8e`G[1@03`lXi@& c`JiV"[N[$Vs1^RV3c~ch19^O0xzE8Cυ'c 1?1g0#eAȄ12'gH'1UEOᜌx1xz1Hp0#;Hcuc( ư'I' c}|zTf Ryx0cڤ2ԀI򄱃T,EB*TvIH(dn <!cc΅TTv. Ry <!;?HT,C*w"!;1 ; T Rܘ@*7@*7"+TnC*7TNP49x׀s-hN< `)|'r9H-#R97 y'[e˻H/9N${rPrL,rXhrxȊVʽ|&R9{$/4ZE*eR9zܽ{/R9/a"p9H[24|L*Z{T>0&πDT>y+R2sL*;ͤ>&E*{4MB*I>~cO+aH]! wRyT^ǟO&E*/ZT^RylRyT^͟Wb1LA*OsM*OY&[B*?d*CB*?d*l&E*O!,&ZYrwC'ܜ7@ߘP@r.4{r]"ZT=c"ceL#|H|+nݭE*NPCvuTi+RUroda:"c+M׻H܇'Ms;rn2R9w+:_<ss5Lï~u}ʱOR9w_rn*R97'>qEGrlh$D*$TwV"cR8Wsϕͭ ʏHs('RR91tT}L˭E*G)Q8Brttt.E*gHeeT>GK)L*y{7f17L*jXաICeIe/1TI !7&4i-RynRyw"7&CT& eX:\kɗt1~kxT^RyQ`Ry9RyQlRy5"m&W̄T^T;T~ZC*O*OM*?"Vf&INH g$g"R9L&Đ(Tۤr@K'7r1$ zʙ 8IlEdH_ʡ|I "s#4 Պ\r̯R8I(W^_]~dp_ʡ!T@ߟ&s Xl<44·HVV t2A+@Q-"CWWM# \IVʵy mRr5@ZTNE*x/RE R9(S~Կ@*Pym ^6!M*oc&.H=MT޴3M^A*oUʻC@*oL*;ɤnʻHsʻ"wE*oL*oM*/6/M*/L*/M*/T^$H { P&4),Ry2M*/ZnTv2IeRRyT^,Ry0HeRRy"[gC*/&'H&IGH6II9?sAC*O&Tn5 bRQ`RTnn V&2,Ry3??ϙa_3,1ϙQz~:Lqs&u,+2\$GYd9HփΤ-?~bZq β(C]Z 03cX)UP",t&u3}&:˸gJLMF/F^M^|_hLF8j2_lvsE?vfdCFO}__d4&2,P2Mi\d4e&Y5&hj,MFOx(2z2L? l&4d4&dfv?vh_MFc2!ƪwX&dCFF;hMFf2SaXdtAFws"aѝa2x1ў@FSa2}!hMFh2ړhx@F;29f]dt7 .2ͦNdTvds< hx@F7# +zdte(= ~PhY,@F;2>hx@F\l29&d qf`%2U3\2 2,[Ѡ&IR7] fsd4LF3u5M,f^k2B*AFS}BFj![$ѧ¨>lAF v*2(2՘ѧ>AF )2TOч6da2T0LчY8da2`I8 SL)FsP}ؾ>4w} O1)浓>dBFþXdH2G}X‚>TAFbR8wV}߂eZVDF@FJ = Xh@Fv!$2P3} ֐@2P}.Iч2 dc= Xhz@F;2k}h}.Lюvdc= ,2pXhz@F 2 z~b=DF'Cd!2 z~b=DF?"XOC1d!2}x~b=DF;Ed!2z~b=DF?"Xhz@F;2ڱюvdc= Xhz@F;2ڱnjhz@F;2ڱюvdc= hz@F;2\&Xhz@F;2ڱюvdc= Xhz@F;2ڱю>d$ -+>cՐX$V + z$}/=_ddIù}pvgPhz8mV?=wIp}ù?=ùܢs=]$==tOhpnTs&ùs&ٕxqzܓY=GsïNz8=kWg|?='hWƙver||{AF".$YdCCF7х.c2\&EF_dh1їW=E!]a2rrd }'-dwMF_{LN ʤt,Ihz m2:}!03 ~,}CF/BF/BF'[R9͋_P Y)2:HAArd$BOz8onpdz8ofaἫIkpΛVp^NuLRxKg~?=ۨ2Y=c =WW=Wy=z8/p $='d{8O4劉&yN<9Γg<\=gsOkpT⸇tXz8O?g22 }-LAF_ 2RїI^hWڛs2y JzpdOOKT&/~zVBF_&} /Z:(2}O22YJvW3-2r]d(dYd }쁌~zDBF_NZ^Wԅl}"/LF_tv1}Qa2ڕ&/'9@F_6.n'2};der2ڕِǷhEO,G}عvDcd^Hq⁈ q4<+6'#l +Pdq2VAӠ/  0^v1;>Ӹt{ɘ-ٝ~Ra 5A'Bq v=tzv г;<{  +G%ήwvy-i]@ YPgB:9s>P=PΧ]sBw8nD:Χ p>,t㼉7pt!67|2aEK㆕šOsさA c$_Hi"] |`xfWc3 [3ٝ@ ٽ xr-xl|ݰ 8Bk!]~/p"e^v0[|x]>&E.bybEU7xA-cвk`]j=eXv_-xe7Wv+`t"]78e) Jk@.WQ>OsDH< |zaHtĀe&{4- ]Lpix< ]+dY$ou!XB)x"=8k]&Xd/"隣 y`5J܁0dA!y? JȚdA /@ft@.q-o]lݧ؝h@zؽfK'o~1nm0k&/iF-ngf+`e)@cc@0vc5/vb&.v/bw-v?bw ,vb7+vb*v݅yBJ@Ͷ$@l6vm8_JBHѸ`%TDf F)j+A +b7 G Į loV}ᇷE-Eo^&@`DBIÛim(RƌޔKA Y,a, o" mli7`mTSvuon e y`qCXm#òzz%c+}m #s܆Oo7 H›MᷔnGb%~+XJd%Vrʼ|e ? ~l2߲-)6'Ko ~Kre%VFOUC춲JﰕeXꝶolu!omey9:HM@-[[m[\ol5S춲\o9l5SJTﲕ-,[Gm/9G}re )0V-,ǧz?rr|j~ZOile"}{ %Jp[O)+`ۊ#[69>UVj|ZOi5?֧ղFܶ.?2_Xr|#rey[UVӪ}ZOi?ƧVj~Z-[--,#:%ǧ֧RGuKOy^/uKOaU?UjVӪZOi5>ƧVj}ZOiɏT$_mEê~ZOi>ڧUVj|ZOi5?֧ZVJ/9>yIZ'ۊ/9>UVӪ}ZOi?ƧVj~ZOi>Z'?R_r|N~ɷ_r|iU?ڧUjVӪZOi5>ZVj}ZO+ S~ɷKOaU?UjVӪZOi5>ƧVj}ZOi%@_r$KO~iU?Ujoўp.%+?t+3/o$o)%m||-pXzċk`?,/{0Z&^o,xQ L&^&^^H`մL2&^/Nċv`մLӀċ>ċ޽lCmNYv,/ihAEmNY^M Y^ *ir3j+AVlqZQHEH(mMY^UY^D4fyQHUHEm5Y^UY^d9fy}!ufyU8lY^c!&NmxK?pqüEv g*ޭ#87 u=mS1`a_0qhﴟ`ik,8l"s_) _*LZꂉR0qh'Lܵm8`ġ_ &@d 84m&oifx/4~a놋!8~p1~--J0qi=u|58mC L3.-XB0qw[)`zJ6گ.OmEm&ima9 *- <7,JoOG0qhi?cRKTDeXM=Ђ" N84m`Q -ġU0qNH?jpm`д]#8Æ@ġwmc} wi5s2C+A0qhCzL`%LZ0`~n9~~O3#8SKO[Zׯ`|1\mV9Jġ> L`x&8^t= &WҺFLܟ6C_/C?% 0qhuCMi򛆁ܦW0q G &]0qiS6C#QLm݀E0<\)V0qLbCgUh|.*5ıf+].:8W]0qvLZsęd= &Ĺ\#=Ĺ#=w//a\;YNYZ0`޷_B_/8ֽHy%8tġaBnՅNJ0qmLZ^EiBwVwHMuS~~ &G-Le87_0qhTgAbh>OıT 0qYY6L6E~LZ`p\0q}F0q{\0qjxH+ G0q1}&%^Ҵ5N8W ey!84m(&oX8 /Hv&^54LkzH?p/oġ &Q0qcAsۡ4`Ϗ4?p ġS~՟ʏ6^^#[8cty˯N/wwy)6%= i!B8'NM`j&84n30qcV>LZ`Ђ3J0qhC=pt[)?Lf96T{/84woFR0qn &8w7>q#&-(F0qM v2д5N8Ĺ)`д n\gU0ql6÷M~Űr]~& 疯>ߐıC i`>CLςO#oġ/綎Cr̭^͙ &'87+H NLZ0qh$WLH &] G`n/8 }vQ0qh]C'OÿS~on/; 8K-^0qCLg] &n{LU!O#U0qX~ &=t{ġ6Z0qw[0`GyOY=#wmeG p)`,չ}<~&-.G0qO0q_}Gmġ && ']߂CkLܯ˰o.g!]>ŚñCfn`д5LZK0qs`#{ġu= &n/*8PzIwEZp%ġ5^ (]^Dq&Mj*^Lqs/ /=VpYܜhZ[Lpqs8tqs5xqV- ͅf8j"Cq3cK~~oo1_3n@LH8=;zI&kZ^qsXq۾ouU~m}_u[&?tZSqКB w |8ί6aiY ,\]_q&'zGE Ga2mf˺e!G6<5~ C \g[Oi\7Wy'9S{m}ۉYK^]Z>h$gzBOi%R%/09;:a/iH'ܦςSBϧдE.cB9m&JQ 7RNR?:┃sE#R9BGrh)D2^r/^9C~*Thmd9hbC!h97Y=&3r+y\eU$t[f%z9—Cz@`oMb!9b ~1̉*l %9t{A9ar#lz\nih C#̗k iNkL 6%)9;qAZ@ՎI)M! (97[f9tyA-̈́ a~ͨ@έWe΍5wi|ΡtnP`uM"?$t rΡKZУ(;zK10ᖻzn^zSd]go |^tmI.-Ss¨[~xNe=/rtС/Qoԭ(BAСh m:_  C~KZ p`mCyZDt]K~ßwo&ݴm.:470iB3lFa[x f9N>дA+tH/F:4sBOPҡIe:[N?BCy?][~ӟ77ـ~>Cy^ٌ@Wt[q[GQӡ nۃaiBDNqа9 ^--\t =EދaR;CAZ!1MK~EQcCH0|oɭ@|,BwBh,uizJMhxlNE~&Tc޺ʏG|˯ɯfn#: tesoqա<=duhI[SZyC^&,D?횷 º: :w1iC,:QLY4Yg3 ݥ5?iPOZקիX\lZOD[קp&E˯~Ս*!>\WL-u]f:q=IA^k@_,}剭OΆx&:4A[I \~!vv oc?;Ο(l6CZ8кbkk"Ck,ojTH6yʏp/c/ ;^5dIuk"M\X]^Xv|MK~N,n^pv.C,n/<;14?Y^vh]"c^zeN?ࡴCC|O57TP;{ϿGTX\Fj@2~v,C}Lvu.vh=/lWBl?f@Y6]] ZWQ۱異:ⶫGsSUзzZXv_eK}%?ֿA|>z~ eRMsrV=ܡW(w, XQ,;s./qj';Q ^议M.?JcٜC~vVKu~;;zHzɏ)twh}>CF' }xW׻x~>Rȯ*?7w7&ڱM/;qzIT]oZGw wl+ m<_bREϯ^\ocߡum4ΉzlC,vO3'к_VV\ FW U>COJYMBO 8 U>)K*PE-ryCAg1Tn*E ||VMfg'}Š'G|qUn |PFU>z*,Cy*,BűU>~U>N(T |6 U>(r@A*EITAK$QiU>ʟfТ펡;XTSUމ*Tyo2TyROH@UއjQ彛U9ʻI.N^ZVwb<1U2o  UCcj[Ty;ny-*.Lŋ*o -S4/1*o-EUU޺|Q-=7ULB7SP͔TysFFFձV~ZRe\Ty#Ty}uQC?-۠+fʫj .Un T[*7uv/;3y}{|`lL%Fɀ}2YZ[gTVdg}JܫnUy*UQ^eʫrweU嵪+CTU*TWʫU^2*/wU^dF */%Ty>r}< _*/'@#YQWfʋ U^!*/Ke 8%U^U>TE*/CTdrRUʏڎ*TU~0\U~\HXTy*?0F*FT U~QrU凟q>B.T](UrwUUG|*?$GEU~*?Q\(U*]yVU?|˝e[D?S azջ]9W?|wԝz]^3d=NP68|mnI{}zEE͕=WTan͜L*ofmʛIծ"t竫U7T杮zk=Q~GTT"aTQ廋>Sx*ߛw/U|u48|[=|m6{+E^SGF<3 U>ӥN$>q:[U|=_|otUDF:9|Gj"WT9_^(Sqݲޣ[kH*ߛ;]D>Ǯ*_9ʛLY*J5|9FL*_5J}ߎY[憗*_u>oWxD{Du^S1W3FQ홚7ª-qU/t7.Se~Q廋Q_tߟYϬD`Y)1Q^Y̬T|?VsY}ծ"/|]UTnzrT廫=J{ |չ>*_I9D楓J?\Vyr̯QMQ./UN3旪|Չy*_u}Tn#5f=#W3i~lg=#ʟ.zTysU[tߪ:UW;]QQJEUj"g#U{_|w[_#]Q[zXzghTysWUS׬1Tjn(*]Q=Uf}&B{d=*ʛsP+StO;uTXTysUޚyQ廹tOpռSfJ=TtW3;z7GEԁUk3%7=S+WUQ-NwWYoGc* |9W"e~]YY}QCLꑚo+y U^?|y=WR[DIT*CWvTU^#QTa7}Q-z(TGtGcUL@;PG'HU~x=*?FtU5PYQG19BluUGCU*?P1P_l+ziTQ[oUV&Rzd=4E? U^Tz*_u9^ʗ |+}cl+>lު|B}#R[qTKo|R5_GכrTGoܨzFg*GnUWW_R9?EO&`+y~**$zQs*_RgjYuDWS}QLyO_Ty5U^ 7EoUR[=5e=ϣ3#|դhEQ^'Z2?Sx*_ugW>U|?*_5{l+P|>U5uSj||zQ5q?V--1e VET[*SUUՋʼ*R}gjTO+/UUMֻ3=~sz iMeS7> FoU2?SKֻ .Y4d=UMTQ5ܢʷ{jB[cJ|dž|b*3zUjYWkeKD׋xQnȭ!ʫ xTulwTV=Q%OTyd=OT,/U^*_5-qU^#C%VWTVA/U^OeT/UE}&zVG:+|]fED݉U*_uQ󥊢WUU/35 %fP*_uQ嫎ʊ* )?e=TYETQYe>Sx*ߗمLM ^ET󥚢ʫSePvTϗ*)yUďW.YK5Ee>S*_kzVQ74{a=S*_5ʻgiz2~f=ѣ׼e*_5JzNM*j!2謹Wϗ4me*J˺35ʻf=Pk4뵬]e=QUU^TV^=uy~QGET~LzQEʏ*HU>~Ag|(NT88^PN=Wߡ*w깪(Tygjy*?PQ}TGT"TyףQ=PQ~֣ʝ*TUOƣ*TTy?9QT"Ty~tTyQ}A3wxTyaGw%T)7zTyo\?;;TiTTyw?j=QyUr{Q9Gw;MgܩQC%UPwϩb*o( UyJUnxTyUn>QmأU&SPmģ^*o(UyUޞv|STUNtTy;@7_ʛGTUy68PQQT)Ty{xw\HUޜ*o[*QQ *oMUޜ*wLUޜ‚*YUPKkTy*FUT!QVUQ/Uy+Uy+*rȫ[QeG7TNR[XVN*ǫݯS;ǟ2EFU^*7 U^TWw+GUyCU^>*QD׎RCU^*bTyQr}*ME]]Q{*TQx)UTyU^J*EU^R*wSU^_P;_Unr?BU^IQWʨ/Uϯ*G*/N@[UnQŪU^|U*/LSWNj;ު0VU^.|Ty!KU^"*/*mTy*.*/*nTyWU^U^NuTyywTyUyqo*/TyyTxTy*(U^^jFGGޫrST兔7UΥYtZU΍)I2m ʏT弩UITtL\VObQdxʹV*gʙJUN}TmrlQ6C M젡ϑ @TI$ Un,HrP'/r;Q'@a]*?iB@~GA@1D{ϒۏr?U~r/U~y+Q6#Ax |zFTByʝ}*f=ʇYy&U86*QQ僮UPʟ,TT`YmrʇǨ|tT||*f]A׹|PY/ C;MU>Ta<|˥*RPvT0U>*YQQdGGG{^Ud*dʻَrҫʻU޹*fףQ=u~TM*t GYTyW(l>2*N)@wUݬGTy|TyfGwTݬmTyo>QDw{*'WFbwTTYN;%UãʻYר~MU;Uy}PME*o =MzTy>Ql]TTmD0Tfۥ*o*fTyrrizTySi]Ԫf.q*ofϣʽ*Wmʛ UR?S7P孙 UkTykQl0ʛSQ*WuUޘnVycլƷXfG'ܻf)YYf{٬vB*o9YU6> ;Y啫+fW*diVwTu'Y~f?SC*NE <5_OV*'jzU^=ߒU^=ߒU^=ߒU^=ߒU^dWϷdWϷdW*ݬdaVy+JY,t+Y fMzWNVyUU^+_dBVy-/djVykѬʮY啮ZʫS* Y)5d}=Y,DM)'.7KʋS*/d*/|2كfT*/u˓erU*/^ߒU^%mVy9NVy9U*.Y)d{Ҭ2x}*/,d4Yʋ d*/dOVyi*U^<~*/d$U^<~*/do*/d*/EMVw*/N!f'eVAVYǭNV-U~8Ugd3RfS/Yޥ3JCV(d3z]ʏSurﲙU~EdtiU~ХhVʏT ʏbOVAYfʏYd^?U~YǣēU~T~jU~8[ f*Q&1(;uyy~ʏCUCULVqЕOVav5&KAUdӢU* QfoiyT=Q'{ģlQQO-|>Q7ʪ[*OOT}£oQPmGCU~3BUn6U~P7cU}>QU~"QwDߪ0T]}>QwDU~݈*U~*Ub۩|szTͮ,U}o$TS*wQN)ROgTŋ*ϨɮEOQ)ThzTTQlWTOT|zTXU>U(jDT Tdʎ|>Q峛=U>ڀ*fg3;|6U{TfGϪR*5*Nu@O#|2k]U>͚EςrEOQ哮UOUQσ+|E*G_ ;eNMwTŔ;UEbVfG_dʯ?RI/f3ʯ,TG_YQjUףʳk//ʯ*7{GU~yF_+5YQ]rJ/ԚFF_OyT{jl{TdG_NA_/V_ߨ'KU~y|/\Q7RYʯCU~}PU~*QNRQ*?%WKTR#5:t*4U~2IUR6U HU~0Q* U FT'7T* U HU~\*?U¨S*WU@rwT* U HU~6sT*?Q* UI׶rq'+Yé>dU>23|x>'=2&|ufVs*'Y)@dK5NVx'ɾ&>Q=u>*dU>2l&ʇSa*OV ìU*t]U>Pf땬r*wYqYffm"'kn=UݬQ{=*wJYˬ&NV9]OVa9Yd?YGy?U޽>'\U 3*wxY;Cf'6 GfUɦ1d_lϏ2fk߬rէYrh?Y*Uof*Ufs?YfE~ʫʛS_Yzdw3SPOVmMV9dWTYL-ye*7׬bvx5T>> f'TgVaVͬrY*4KrUf3*UdU'\cV'',Eʙd*qO*WݘU'\FVy3 6>|"MLVyc*Y史!~ ~ ux[72}T:R_NVySU:ʛYdz*oUު;Y(ݿ4[aY&OVy+ z*o%r*%ޮjV/YO6=Yvdz*WŘU^%NKVyu*YAVy5[z}SNʫٱdד*Oʫꓬʔ++*U^3YYYYY)dfVfW4YU%MVyeYyFVyUU^AVy|>U^@Vykݬ$VdWU^~2\bVyuJYfU^d]>SG5U^ ]MVyQdP*w|LMUTn/Uۍ\cvuQQc>SU?z_b|D1*o);gQ͔ TYc}R*~d?^7{zͨU=e0zMžZil:=uSGG:[Q嫎b*SY/=|_fEI׬:PR~e=PUF琢z7W?|OY,{jvxz|GzT3uU.Nj#e=OQbTڙ׳)^5U5sjGTZ{SߩQT0*o'6K= U~* >rUTMGLWf{V}ԣoV*&T}ȣo>TYŨ[*2*Ų[*.Eߘ UѪ{ޣo_P U~QNTo?^U~Qw3;n<_T]gߕ*UêܻoU~*U|ozoU2v6{U~Ō*7eGU~*7uGU>oU{TUQ6{;|݌*Ovwq(MTfGOQTGIv.|^e~FG';ܩy>\y=U>IqRI |Ud?QU>j=|ߢ*CU>*LVOKUөٙJ*xU|vzTl([TlfyGOQ|*U>+U>SO0|V3|2ZU>YR*E%U>U睪|O6RPGOߩU> **ޡ2_]7SP׭Ҏ*Tc|* /ߟ'* *_@U~9U~9Unr*򫨤ʯb6wu}Tu0%Un?ܩMRxG_NANANANAwUndʜdNV1YOV@U'*fOţ6*S*W1UZ4\hV*ѬrrBQܪz.PFՠ룴n7Ul'O'UUߓU>|dF*WU 1}W׬r*^_ʟחr__}}*7جCUQR*/ U^<~Q4KlJʙd謁z**7جrvrrrodOU>UdO+YfɘU>u5|fV<:@VTU>U}dO*~ޑU>Ud>*WyU0|VOg$ܻDOV!16OVu$U*'Yʘ˩9d{Ŭ]/%Ylf_tU~5OV?L㙬=򫩬U~9]`/.*AVEYw*fVIY~13ɪ&kǬsr!Ӭq,d*?J>Ys͝sU~z~&ld*jyͶ0.vOLU~Oy+s%|TyO6xfZOVKЬ(dfۛU>@VP UnWYeʇdTY#dNV ̬]1يf?*UnNV]ffǚULA"fVydwv1*Oz;we**=Y YT'.bʻkʻO*^ߑU{JUU0zU}*dwU?Y彨U2aVy?T*o3<*od7*wY㛬#YQ*o'oʟd?Ss*oteU:WdBOVyzʝbcVySU^|{Y:7r=bVy5U^o^TSeTuʫU^Uj4z PVϋ*UTyyQVWQEWUN]Qׂ@W+ªrF*OUUy1[U^lVUya߳ "UEe*/Sfʋ Ud U2U^Q󋪼p~QU*/2*?*?BVf}ʝڡ*?.wT.-U'US PSbTvUʝ*?fQ?YrT]]_UypףZR?S-P>PiGtwWQndF.!yIU#u^յsAVyIT{=Pl֨n^(򒺪zFTk{Q%uQ[ﮕ9E](]Q`/U9XKUwu'^lV{=~Do̭"oKo uݳ٦Uw_ìzL\F:oTjQQAʨ򝽘gf=vx6U{)U/U,Ujvz*nH룞aCyz~Dq%޲**w*YSU><&|EbVhu>S}AWYédj6{G|CV(dWU>2|MVyY6;Y坬";ٔfwJ*tUޝ"BVy\/Y) dUއ%NpdegVyﮗd'7KVy&KVy/^\/YS+|em7d7d7ԂYm̓U& YQ*w YMOVy;OVy;U*OsדU|U|U޺;YqfOVy#+ЬT*o~_ bVJҬT6*7+̬(d7U^ldWU^0gVyUU^1JVy5Kz~{(ʫudWUTT*f;U^2mVy}ٓU^r?U^deVyUU^ldWU^Ov.fU^ntˣU^J;Y~6\*dy}*/|U^2_hVyaJYf&Y奣*Yf U^dęU^=Y.AˣU^ o1*/:Y3}=YSa*7ǬCNVa1Yew>d}*w YS*?NvʏG'0{CHV.f?Y]fL2Yʝ:iVQ|=U~<*?O0Q@Q#{x~|QmQ6|UWUnFeEۨ^.cT ;ԙ UnFE{GsJFTy:|+2WϿ^ϬWSQf~noUoGL?|ߖzSUU^?FU {Yu{@ϣʋɨM(#5#fQT&9z*UܯUWkӭlWYT?|ըj7YJK*R:S׶Y[žUUђ%U|DxMGz^?Dm>L?Qk1ߨU|G x?Uj>: 0|m{|5ۢ_mUyqU_*/?إ*_ۼ9ʋmƨUU6|o3g6>SFec3|9Dm*yWc-|d_Yoe>`*FTxYu۪|Me~,CTn#WR ![F/|9D1GݲYQmPߩKW[{U*__Q-uWQM$zW{=ʣW륨bJ|<#UEVUGfkU%9&r*_u/QT#Tn#uyF;|yʋSP2/AﶦUeooSyP 2*8|QkQmrPo|QQ奝?B6NHU@Y~Tj|YGkfYOeU^j,|Me>RGUdϬWTFӪyYbߍϻ-SUyڻ1a]E^RZwq֫YlՆبU'+8|-{=P*+MU^LA:J+|aϬTQW(܈Xm(B2*_mɚ*/NMBPM#1UU;^+U*_5f=W=Y֪Q[QTTyyW]T{=R`P嫎 D،zwگw¯~1*_y`Ty1U:wTø_؏*_uޟQ&f=mQ~*/NB/F2ԨTQ#|17cSȍ`rE:│fTk=3PΔp*_*̨x|m^OTV嫎ʍ*_L(绨͈:H/q!x*/wVUʋd G';8|1L%*߬YTfZJ'F_Ylx3} U#R/fbߪx UKRf'<R~+~#m5t&oCifް &7K~_Rݽs3)I~>v5^ad+>}sL"|_M1Șۉ=>zdI7ͫ8~{F+cYo{3%Co-b Jqi`f%^>ʹ{``F|(0o}f2( (>oN|ކo&>ΈC.$=߇7>wBpO?& _~c]t|_Jٟ8~}t2e%;?7  ߟ"{}Jv{͖!㩿~,Ke1Vkۣ}:C>.<ywl_y\> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4507 0 obj << /Length 2112 /Filter /FlateDecode >> stream xX[~_a2"AhNEQd<֦jeɑlwCɔsvA:CӊݻZJViVܬ̨r[3\svY>6ZJlY5R)x`Ý_i؛s j{i+ܯ~j}w?uwoL47QUJA'}8z#mՠij:w.=]O9Wn-tKX7BkꖦǃPu{̹mǺ {nǵyt{7,( cA!6Rkѱ"k~yVv5NW M:3^w:q~[Uߴ]V}!!ֵc=bizH+ 'VB)=\KK>a +=7k9G. +J \\9 z-;bϙFQTgeҼL[o.ooO"fV/iUk$73qˬ`1&Wb:BDQCLL+ouPR}<55NemQdܖK΁y::JLAbPb )ɡSb#bbHTB V J!ioz\p<90^K 3_niH)`4U#8uk7W6kp|4pCLH_Al'4bt`3LPt=!+{N-0§G>0Ѕ ˅XnC\ja'ݟ6,h\ +nGt)eR0 B*4J?k:vm˭7% yL0`y2ۼ_#Hp3jy`hR,ABp,$Av0\pz,o § H,@$J"/(;Jf,Lmk@r!A1Siy"@ 4S9`y0=9=&'#.0F` P=BrǵD_ۮ?u=TÔ_7? kp껭B?Db^UKu2G˔[(_ TSz$9$iGnj⊄h_z/dRuPtܰlj ( /݋ka,26kuRJz, t fߺݓ )t)@3bq?/u^*aRv%:tRzb;ׅكԺv=#X0o!\׌ AҗpX©)Pe[yCj좮?o+@5%$\9YlvvDuֿT/@]9u")A**BTD+iL7{0\DY.5&I,bnDlܞSj;tmi2]s_w!5$*ꯇv&FUօk*oJzPL[`qX@DcpϤTK?7G!7:1 4oᨮ>A}Rbr;>릡Vu;|Gy CpCkHpf4HނC5~~ǁӨSi0-ieC@o@rK^#QHϻ=5\|0D1rL߭1J>Y3)nˊ%N)$_(v^Ǚ)ۢii_f%)dHR)mi؁8+yJPZ ҫd> ZIܤ_?'(qV endstream endobj 4494 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-negexppow.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4510 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4511 0 R/F6 4512 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4513 0 R >>>> /Length 26208 /Filter /FlateDecode >> stream xˎuu?OM!j皍Ȱ UX:DR%*~1⋕{H/!w˺bԏ_c_ۯJ]U~>?_wSV>^oSU㧿?Տ>vǯjOG~zW]WޭֻzVzޭ_]E_w^uw9U߬nfޭڻU{Vj[wn5߭zZVj[wz"]y]wju-߬ڻU{jVݪ[wn5ޭ|ZVj[wnu[]V_"koz|כov7ͯ}7o|7o}[7o]1^}8 ^uvM_oݯ}k7ͯ7o>~z7}o]1KO7^u7=7Vn_9=Mw=o<~z|כzM_oz?~[z|z@Ozս?|=כz7=igx*z|:N߾7}-m<>zז{oz%莾t}~YۯnGw~Fw-7[_GۯϿ?k\߾IB/t~YBN=;W7օom4~+?[_Cۯ_ h~*~z~B76>R= oQ.!گO4~=?jmƯ_~mkyװ_khZAW7W?oֲOϫ[~.xkܿn}~?D|.`'zt}?]O?7G*DA}kt>sjA7FGum?oƯ_tkt:_uGOFsO·[OtG_ywO;z7Z~M~{httn?a?[խ'rktpk_߷FdW7꿟qޡ {]=u}Hz8U~˭~O}k{f? 8~n?>gO[&?,?O{~mB׭'4_V.F\}z;t׷{]q ~vϿSzA˯ _hy8~|i/{_ۺCKgo %eh@Çn[:oWb=C/oCi}{>ޥGU:94~8~y/vϿoj<zJy&wy[zٯO4~[~C[/ ?Oj|K:wcE#k]:[ZkzO߷5.k~!x)G~:O=Ny)H|]Z㭷te2W?~0}N0?1 Wi7=4&.[Z=η[߻.{]u>zJWz{>ޥK~ok|:߾4_ |﷾/qO;4-Һ~wS:סoh>F*q4:o>:˯ր}ϿӯqC~y 5|KuUwcíߏt+?K%[)]{]z90 խgE|iݏڥuU~;6au[~.H+]oj}%>tN')[c}Ņ[ZO\ӏX{ ZQtiޡSnT&˭ӯ7Rҹ~V>ʉ\iV.r!(zЍǃVooi=+]K~#'QhhI B]-%5ɭ>mk=c /=&+㗘H|tN狉hZйZ7-Ԍv1B] "/.%m /i>%?Bǎmc}ز1:n=unr&W\U~EOï/iwK멡;ae=߇>7?7S~֗4kɯo5>[~n~7iWNFyϿWN=ul ϿΏQ{"s4M#or?>]~.?C_:wLfJ_x~׷^_bRK:~PWt|C?t~ץ:\\~K::_C߰G%]-}~.<ĬoJh/Bf}:}!K%SNu–:X˯.?B_f.,unQᷧOC+bbڕxPоi_ }9-[yŴ2;B_¥t'CZP78V`L?柡cC_/o܌V,cC˕ +l?CxЇ:hhˡӏF+ZߏD*6vG,[~r!5t>$z|YwӏW9c98Qn΁e,{V˨Ή\,.y{)?{ʏ8_yeo~vкϝm̯΁}n '49pzH+ZvUC_Wn6^7BX=5XoCzuUgBt+\8m>+9&:oBOb~ԧ)B7ʁHlz ]8(m|C_Z_C3^qzi tM'op>T?үq~:*xF\3Kw: ^iMuQ_}EBNC^'&bYfr'4\H ~{BD75 ~~M~Q] !:X_~/JyzKW>o Qc[{N.:0~~Д~BRāMzJׅQЗt\( U0t o]Z wGe)]+:ʅUS-߳N:Z#w.Lf.t caJyN-JuQ:'62 Tnn-'Yv;G}R,_[.Ɣe=:t[vЌb<[__c=(^-tQHK~쇇l|66 !FBi=>>O~^4? =uMoM~]~Gw#~N?bJڿ߭~OύX`:1PqƟX 'ҭOhwbcK@i}Q~h)]ӏ؈:ɏsll_,1繑*=+zKwFXq+O]#tGnLKw}, ݁EOLB'&zx9 u|Htӏ[zJwM? M;|#?ou<~_̏?o_~;?GM 'ϿO?N?ę_WǪķW?ձF7WZpoO0{o΋LU_[.GW˧#j_9^ ϧ|us_|>ձS|_p>Ӿ>/8C qۗ%.u/}{̕aO?~_}//T?a~ǏӿN4yHē%~ WJI>RJrRۥ_)%9:vJ N)anN)adfRB݅SJ)%ݩt RқS7RҩqJINQJI7MJI/)%J)%@R] vq>RXuJIjuJIH))%mAJIIJIRJS~H)iѺRҚ_)%+)%:C)%RJZqJRJ)mRJ?RJ*TH)PN)";B8B9UzN)RJpʊRJ*UN)OJRJjJ'yLJIu )%r?'DRR 8I)):DOJIvJIvJWٝRR|$Lp(L(Dà'Tc┒߇r;TN)ѰI)))%Q,RJ#@)%YRT.KJIR}JRi|Z) J)]RJbQ_uRH#$2d1SLҏT RJB|ТNR̔RJ9PJIhQpJ) Tĉ&/)%IMLt#$`הТRpDAae3z(D*$o4 t|:$4)9K8E)"YN(o*UJIT:ssL) jC)J)ICU~R+լvP%$QJ)Iخï8G)%Ay\N%Ҥ MM$q[ZRJQ&"I8H7mUXt6>RTSInBD]+$QJ)IJĩ%{%%%r^RJBQ7xR锒HHhc>OJI³~9E)%AL=&)*MBQJIP(ǩ%Sũ%{Q* jк?*$t;_4:$(]J)Iީ$75"$REL+y<ќZHJIs)%Ax+9`tRҜ:FJIRa RTDOiH(yRJBzWJI{RkAgФdz%$RRi8d/< ʑ)%qL)$tNE)%{R?H-&$.a~) C~I)Vo&eRJⶡA)%%DJIvKJIRR=QD?*SJB )%KJIRҴL锒%$+SI($=OR\eLJI{wJ)inN%)t=($+J)4oo1VJI<5RJIJ2u)$:_RT@/QM:~J)aSK(_RF/QO:GrԒ! JףRJB+eO)% ?R.?I)ahE_tooʏnRJB\4RJIh=?R˩$ʩ$CJ)i KtJIN#z%j8jIei>%$IJը[<ߕR\uKJIJ!iWRZ_)%1xY)%u}($z ~HJIR^S~4RhSCIY_|K~_+$>R8d4WJIhQJ).$$4#U~"R:2r*I&%ɏ2 RJBUJI,ːzW4q_)'0Z J)e"QgJ) MJnƲT&MʒrȖ߂BSJIԒ|$$'& Z)% wc$FI.ćх|RZJ)eCQJ) ϫ*g*Q)%lԏL)e͗ФdJI}RZRJbURJפ&dʏ_RJ6娔\ֿo)%ЗDTw :eTsRJnNiɁDhR>7eRZ׋RJB|WJI,ËrWJIݦ•RP}SJIb~+E*$.%=JRSIoq=*$4~K~NQJIh$$YSL.iRM.u\(NU%$[6~I)y(CRJb[T"*ܦjKZ)J)m?m}:$4&RR]OBJIUԒcMJIRĶߖrɒwRJr[ѩ%C-{o+m'CztUF.6jC/iR6X#Nf($RiRTA){pEi?$u)UϏH)N$$9pm삾TsJRJBiRX?Ķ=OBSJIRN-:,hNs*RJZ*%$|I)y(SRJX/i($tz>*$Ft)$J&*$Ԓ!I)%YRZ׫RJERJ,ft}I)R7/&$~١uXyqRZ-|,:!\UybAJIhϕReL~RZRJB+5@)%IN,I*B)%KJIh=ReY{D~I) =br:Ф$.eaw]~.?H)24}~&d+do)=J) MJʖ[y,s*IWJI'VQN%)&Gz:KB/eN)˩$SZ xM)%u($SHGJ)IJ_S<2)%QRJLrJ)ɲJߒS+RR_EJIh/mCJI}*eC)%u)$ݩ%GZ)%TRRzGJISJT&RJFJIe=)%u%+N)RR}RR= .礔TwI)N)>I)tEpJI)%.N)_)%*)%u8%E)%nTRRR>RRvJISJ*]RR{JI%)%6TSŤT$9씒t~9唒?G RJ*]zRRAJIN5QJIrJI%)%SJj!M?~SJ*)N)SJ*N))8gN))\N))_9|qJIq )%+RR.\RJ=&\NQJI٤RRO)%eRRx^9l~(B딒?;ĩN)))J))˩J));N))ORJ\蔒2RI/)%\դRRAJIN QJIa攒2J)yRH))N!ĩN))O)DOH))iN))tAtJISJS)H))RRW ~I))9P/씒Ҝ:J;0tJIq )%'RJTRJ ]$RR|"N))tsJIq)%O< 8E)%oRRRHsJIqJ)%NvJI)%N))N"o;u)%SJꍜRR􍐒LerGA#"uG0wKnY!Y=Gyv͎tst@G&7~GH[uCDL. GX."zd: Ry!z^?#OGHK ƎyD=BeW=WHEEEua=,85͂ Â{X8| n*FXE+"XUXɔ +İsb',K`'eXp$ >n/Γ㡽뀰fbJ* 9a;+PX_ y72`Ͱm 3XpJ麆o‚;,^cu) /XpO`+,x 2߇/GwHXBXt2 ,x,,x/g٫xd Sł<Xn%Gj?̂ k&,x..0E&̶ %] KN~cXX"<^:H'qϬXX" {/<$>OfmMX Ac/T*łi?,XX(<͆G\<%z*Z-}^ vC,xlIR7X } [(fϵne[,xl :` A ;<`s@, [n%;pï/\~XXRfã#Dg-<06-Pɂǖ:-V>bcVX!<>'Tֽ6 {#XsGf-X:ࡋÂgDa ZaQ‚gɄ>%?SbBGGbU,xhXdĬwt(fĂGIXX?,ẍnsta˛*łGɏ~Q"C,x~<oÂG"͉հQR%N,xvXЗj"-vJ,x;p-#,xs,x/J܎_Q2M,xMT,xZwM~_B]/b$.ĂGI;Ă7'XÂg %ɂ=lX(#<Ê_][~WHXQ2X(5[~+X ޼ ZQ{\G,xԞ<^57GObۓe!<dUZ7b]{Y‚?P ޼ Ho zWզ<ֳy~ZҪE kXAKV^0(/,x̪ bV,xvH*Ȏ)}&:W! fTX<_Xbʂ+*<&_X|M~a㱻#'<lvެwvaY#=~wf6|SKoֻRK‚~X#V{ϬX' z Vg簫4]}łׇ <]ZX&z = N]~dWgWwk9 6ݥu}b|K~fwł{j[~˽}.5=ʡ ߐ̆ob;1<ÆfKY~K,x. ρBu~͆Oi=_ł粎X*ƂC,x ~F‚粒޿ˏ`XŊ ZXdbO `fӯs>O᠏CbCM l^ɏOl'vsB3[P! <e^OOB;ދM,x z<و<=PZO,x.J@:&NOG5Xbł i<ɯOD+fɻƄO6DW్sɊT}IW9~ac͂7g{,x/dzo͂  ޶f͵,x3 `fh1,xc,xc,x,xt(oV~jM~w5wvY}X6̎o6  b;‚7Vf׳Yp_odof5a[bϬ,xk~?Ă7Ĝ7waG f[5-UmUb;Â?l ,x3; ؿ0  iÂ7 ̂ `f%<͂?l,xe,xe},ÎW=SGyX԰JU̓wDլ?,x~XXYł k @v,xe,xe<`~ >u ^Yn'X K`R jY/f,x5 ^u ^Wk͂ [ ^mf6?r ^?XjVY!lR;Xjwn,x%,x%,xeof@X X {`YJ=Yڸ`kwYpQfo ُfYDXZĂj6Y,Û,xuv,ĝW+f+]f`k|fak1-~ ^,?~z>W!͂ ^ {Xp/Âc6],.sXB=YrƇfm` Ep/fa)`˥gzg hfe|O#XBvYpwh5 n,x`z/Pil~e7{fl~X|,xf`t?/כ/V/췘/,X,Y6,j,͂gI֓͂ ,m?XB er‚i\,xa=,Y9e2/~̂̂g,xft;;,xfłM` ,xYBe0/*wG`l1  ^ ,x1K ^X}Xb6O̖Yp݇7n^ fV}zOYpC ^̚OQdMH r{Sa0 ~D SC9V:a1st~R߇)(Tr0/}U92OX}A{<=XsOaUEztU0da\fV ;VxXQ }q|VS@w?wg>lv^!gq2+ 0P+81AD>Y<}[֧a =7}Hf>lrX#8nǐr20-{i2?=]Eccef1_0n q^Ņqvp`0nڂf7Mq/N~0nq/ 7 qO7[ƸV̀7YƸv7ycܬM7VnY=yqSdUyclfN`{.㦀7oƸʹbb6cJfja Q Dka QwLrh!;f꥝K1/PݷU6Ƹc(¸Yw.?N;4'vXuq'oww*0XWkĸca~aܽ {Sw6AUN;vr&;7Ƈ10ƸJ9msw=cr0_Ƹ+3q't>qGzxzIrLvƸ2IQW0 ꎴŹ*;Xʇ1(Q[FMZ "VsAf&T ΊعTywƝL51Ƹ8l|a`V_TwZؑuwP;:¸ |㦈w cY˜dES @2O$ ]Ϝwo9M41(4Ɲ-*:&F_R`D`Y]703,;tNfl|ɏӲSw+q7G|qGaܡ}h[a ;~ƝƶH,A؂0i>w{Zl nN㮞qק0¸q`c`IW36+;aK~-ŗ*0:^¸쿣aO?wocޱMz]~ߍ2iۮ2V(UwOcQqgA/i¸ ̻0(3 ^n*;/(;S-"[zg ]U70VTe:_qgټ&-3#;:]w7fwi{Hf,ʯ 4Ɲe ~T˜"-7qgٻHtʜz/܍m7ӂ{ʏ)q)o=W<)Dʃqg/12;0(KWxaO:wՈwҤ}I SƝez ]0n+ܿq0Z|E?P9d`1/|*2>b w7ƺ/iaQFWwq(N\Ɲr24z3iQQFe/w /aA/׉Q]&?KY9wOƺ/iݟq>ƺ4g]QƝe²" Ο-gqgyE_*,;47}0Ɲ}0}IWcۍ^`M~MƝeR6Ɲ@/ƍm'zP6^їEayu:_`q(;6}I kDq,;/ӘQY8{bܡiY06Y z16} n-mmq7$n/n/uc¸_[|`܍1ݲ\w,w32vc܍1fl%/w wkn-eƸt0F,1Ӳ142wcܭn"aܭnşG[Nn-(q-͗ ^_wuV0xqWc5`-M+cpv1j22㮬--~|`r n?>߰-QY;-qW qmZw/?o`[0n㮛wށ!; w2[vv1n3x>/w%w]n-N?wq¸1@0xŸ+{Ƹ[qWbq¸]o+ÿ0nU|>q¸`X8qalO1`upvٿ1n01]K]5q0])C4ƭ㮝v`ܵ%w1`>¸\w 6a14`ܕkƸ1y0ꖵ`ܵeuu0D0nG㮍7cqe1nfmwyoobܵmWX1`ܵ]qS01Ƹiy ]L[w-nY]7u7uGnqWc`c0ZqWƸ[q¸E'~>w9`Oh0r mƸ1.E2wqLw!w9Ƭ+Vcܢ1?-uq\01VgsƸ `r i?Pq[6va<-ʷ?ݏcҌq9w|<0blc¸ 1B 1}7i7=-~iP"gmx> ](3mw̗q0]~3FƸC`܅7ce+m,[. n̺ڏ~O1wq40nP2]ܲ.-Ňqb.ص0[2.(4־',[][_ƺc?c-qJ]qe02l.q? [F.T...Ø0B-12{Ow!w1& `0`nQesabR0B12 .ػ0Bl1b %.w![b.j]Se05`6`ܥq_qnL|؏~ܞ[qE1݂|O?[x.1Ƹ1[0.-qqv1.-q;q.0]0^7`A`܅ cܥcܺm~>w!&wiƪ+]0`D`܎ 5]qh.nq mwi`f`nff1Ƹ2wq wiƾqcScű`nfe0%c܅ocZ|0Rݲ&cܥ;.Ē.vc܅1`¸ cܥuXv/`1`ܥ%x')cű`n9gT]1jaƪqk 1ƸݲɌq2]1mޅ.q/l{y_z~._X7~`~:Ƹj?0f?oc؍qiLgar]'V:Uf=VvGlWeg6p}nU>AcW$8quO 'VՀ? \ƹVŴt^ 6{צ!M]` w}>0FbP&+Vh4;۽VN-+aSNM`φj"A(f}6( iokYbwݜ;Գs !U~H߀~FՇ"sZ}dWkY5sԲm*U}!莕;1Շ DaeP O}>S/LMMajB6_ L8c[S2l%0riԆ1Y㊕N``wJ\0rꆕ0@`j Sh '02<XWژ'05= S/[`eZ|c="^f/FE vY`jY Sf0e%<)0R6^ LMav+U5=-+Ԭ2LS/S b\+aep68ͱS{kX+05MSϷgLX]HYv^`j#Ԕkm%z`VJw~`j'4leZV+z}o+L2ajlr6 L=aj@6 L=ZW+L as ;V4V)+0큕)f~FֆQkajnո&زFe;LSbu >0u1= k<.6L `5f)7 %,!*++] S pAOb-:!$&a:;sR':9a:EO0uhP?Yaӊrs%$}S'eDp!!VE!:W^E0ubJ0u+ׇaO$LpJ0L]iIXIuPȤ}\KR]"L0u 0=$TѼ- %+jN|a:q &HKV [ WXg]fXL0u2{/0u2}z}r&?>^vt.?.N`./i(6f7 'TtS'|g8L0u2=@S'LpбY٫/a^`*i2t5lSÜ~]0u;S7ÃͰ80u3,LQaimL"dq9n適0ԍaƴ0 S7n~۴`G$0u{`__=S7n0`f1 L S,L S75p 0u3\L0 S;0uǎaFa枍OK`Vzbg1{C_ajgn Lbuԭp?n끩]n.n=~L]C0L]}SW SWvLLLvԕUkd7LLvd7LLvd7LLvd7L]ݳnނ]~(L].L] 5L]<=)6l}ЂS0ua0uٜ?ԚF}>0uq0uنa= ihxLn.6L]eڙ t I a[03 S?ԅM3偕Sl.'x 0uy`n?eSaj.퀩 [H;`j.tjp0uqJ`Bax0 S80u,Nb03L]&p0ua[0uqO@`jvO=4 O'xㆩꁍ_5\L]WlCS0ua0{&.0`邩{S0 SvXaŰ;0 SÍԅa2 w v<0{6.#xSӆ0u!0{<.3L] WS.L]{r . L]3Lbup&0uaKvO.7Lb}00uqx`B8ab>iTLb~ԅajvqaj.=t~S0uy`a?K%<7`R9߀K|. L) C0u#ԅajs0 S0L]*)0u)SLm0{.Gn?a. 0a偿t3LGme? aj'ˁl}S;0u5L]?6nNno050S~lngÆY`j·^_5<o/<0{F6lo8K:f% {ǜ8j`E}#C< + U#^D=2g=}{X/YjY6ot>ENcJ3aaӇ4iC@n3}"!b-e[Rhs3aE鎕J C9V¼I7%}!V4VrXCh-}Dt!l8]`qG<1W9TDk-E77#P &ID? !@Õ~m I4vo&XC4(Cv&@s%~ aЇ)(s΂I}S􃨈~Ї%MX+"X>C>?sBX|.3a$+9/[U7,+]FX;?hXCT)\A: |$5X`8#Pp4b*Y·<1懲P6+]b+\&'V^X 9š$GT\BGLx4?&_̇np&䢙3e>D2i<lٰqZQ̇6 f?0gIH 4eE+lPx<lnt6nn0j-π[Y6dhf+]$he#Hn|67 gXF!Pln+)WR>K)_Q6| +7(nO6|N6||WiY2l+U%K>l@%Ji&ٜH&V'V#F>#E>ԙD^$5C"\D^E"/ ey3rP܊}+V#A"cdKD^&+}>HYKOh<^HcUX˨*ȋ$[$2?& 3 [ .*HYMsʲFe۰usjL`2VVcYzba$r 셕ulDGIdD^.}DV3A"DvB$C"/T`D^6[tcDvsHN"; y}XSM[u$V c RVD&$4,1CghnL5Id'gA"wj${AB"Df$q$2$JOɇD~$V$z'|HG~HdK=$2pIdbȏD=C$_|o]z?~ۿ(e1.X#džv/~ʅ>q9iyG{oĸ^'kPսRگ:~>ۯ+}N:^?5~?Q?~ǏG=G.F?xz~_/?pzQx}{/>=?v8=^|߲sCgo0~z8=^C}/N|_|z~8=Pqz~CӳY/_?pz~_/?pzQx}{/>=?v8=^~,ӷ_|z <}z_/:=ۋu~C;6 endstream endobj 4515 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4519 0 obj << /Length 2223 /Filter /FlateDecode >> stream xڭYK0@ÇHJ;K W9dDfdɑ*%K2;,HKd"W+woާvHz8Lfe`F嫇R~^Y@^sթٻ 5x`1Fiδ T%}R&}sYl+w,yvٹbfb00eWVeT¬yJzHIeWw0>iPälQ\$G1P*&[_ؗ԰TL{ZKuAѵдa`/ލ]TkWǦٓ9 \]_6u8-8P,zjD[6ozӂuUї(zZ xSl7lIX 7Śdgjjp*z;0[[!dvpN3wy*q]M}*2˹n-ݔ0|7|eAb 3jud#4JH3$f3 FOgO, >zӧk=k>z85 N@aC񖄻2c9Y5msS1NåZxw99ݖ} qz!tQyx["1Y6#׽zr_A#!|ogNwc jR]wA)[ ~Cfs34-Yf >`?KN=IЮ\A]w#Y0APk!Ry.#&_ݑ<9!(Lv$Z`f&pX*PAPc AB+:m`D͒.':PۆOʮ갎 2qzΘ#+=v {_E2n<6Opi¼5T؃6~AΛ)Hl>߀gK!WUE.81xLٸoG"r`ەʤbTDKn`XY2zѧA8T Ӝ66":y@( 3& [#@RUx"d [6-N= OԅXQ?=}xNt~<:'"tbZ+_Ie"~!yBz* |fj!7H7;p·0.px:nS__KB;UC3#Gs }غ_EezLܜȕذd9ϮּdҰZJX\H.pՄ"'ױHF|Uؤi|ti"\'_e!]p5|Z8.eKfU-1ˇ5\΁4[]Me eOEYA$(PWI.q 9u}B!`m_ Ńpzn\B`oX~}*ôp۲[ I3XtGߐ~Ȧ{6C/b) #g8h֚';Dc-?jH~;Rg_9)8fP%V endstream endobj 4516 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-stepfun.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4521 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4522 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4523 0 R >>>> /Length 2958 /Filter /FlateDecode >> stream xn9L]uj# `$l eĎ,ŗ$ȳWqm H](e|Z_~m|I__M_Moۗtݼӟ_|~w_3w㧻;ߎ{e~n;S eJi6f2}a@yJi3R(p2F@ m@ B3FhcP( ƘX~zS \ cP(p3O@]Kޕ#P(PHB#1(Acc els?)y.~7A 5g6qPomRz8JZQGAK}746qP/cωz-=hi[A?䉅Y^jizR4j/=h҃ơy͐ژ}4A='6qPomRz8JZQGAK0}?46mqP/cωz-=hi[A4j-=h҃AmQz8yYX2ب҃AQKAK҃6j+=hi^zF-#ؘuqP/cωz-=hi[A4j-=h҃AmQz8yX2ب҃AQKAK҃6j+=hi^zF-sf3|4I='vqRoRz8JZ'SGAk\ؙcqR/cωz-=hm[A;6Nj-=h҃IQz8W}?4vCcmsb^KZ'VzN-ZKکqR{A;u6΍yxUPk^3;Zz8҃vj)=hmZzNmKکq ;8N6NZJZ'S[Ak҃v(=hmʼμ6Ne9Sz+=h҃IVz8:JZ13o~hz{NkAkJکqRkA;6Nj/=h҃~h̲8N6NZJZ'S[Ak҃v(=hmά~hz{NkAkJکqRkA;6Nj/=h҃ƙ~h̶8N6NZJZ'S[Ak҃v(=hc–Z/Q/cωqPm[AKzorR8KZu-yRK`'6e94z+=hiRzF-JڨqPGAʜ/4朏cj^ƞK^Kڨ҃A-mZz8KZumyRK 76e94z+=hiRzF-JڨqPGA01K-yybj^ƞK^Kڨ҃A-mZz8KZu)s8'6e94z+=hiRzF-JڨqPGA11K-yybj^ƞK^Kڨ҃A-mZz8KZu9s8e>扩z{N,z-=hJZQkAK㠶҃6j/=hiQzyRK`'6e94z+=hiRzF-JڨqPGA[21K-cڨ8҃6qPKA4j+=h҃A3s8}>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzо01K9cک8҃vqRKA;6Nj+=h҃I+s8s>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzо11KX've96Nz+=hmRzNJکqRGA01Kyybj^ƞk^Kک҃I-Zz8KZ'u+s8've96Nz+=hmRzNJکqRGA11Kyybj^ƞk^Kک҃I-Zz8KZ'u;s8e>扩z{Nz-=hJZ'SkAk㤶҃vj/=hmQzyRkd've96Nz+=hmRzNJکqRGA{21Kcک8҃vqRKA;6Nj+=h҃It<a^8F48] -g/~z\Ϸ ?\>wzQ/UqwWAޭ~rx_/nvuwWr?/L/39q[˟?58Gz ]ݎ=yWn|tG{V<>jgm~G~뇧wÍ\ _Oo_L/?'^?^^y՗^{5۷Oo^z|g[EOǦzܲ9fY]au޻=/r}r1CNz8Zh>~6LOO[VǛ__M_Mot~%OĹa=sj?7xr ?) endstream endobj 4525 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4529 0 obj << /Length 1597 /Filter /FlateDecode >> stream xڵXY#5~ϯhH><,E !- Nοr0rUա)RF< ǀQJ14agʪMs2^4"q4)Dj6_D̈J`c8$ՕMmw<{;SamLi ;J=U\dg\u1Y 闶9ϦFlT8oC^ΚfRJPv\j0Hj -WÖR~h< l?lcpb::G߷ &ZX?pJU)wD?y"22%1MSy.D$Eb$ȈL%2~\ACL8eZr'"F]T"HEHMgcg n:MC;z b8dKϣ쬩!􀋻G`k]>uqiGccIWE԰$LZ e8r"wL4޳mȹ$ syc\ ?7À왫mjg֥GnpYlǦEH)!8bC[޾!^|kI7y) LW]l[SRЫjZ`KyE^EUwm7dk+}vRJs_ )aI]nt֎y2 d¸l/d"x!8 }X2.ب +0nY c]nی] ;l5a0aÜ7YXiENɑC/ˡ`J^pZ sVP.}[2CSߊuW/t->b54}nEt Xf[ʰ8 3bX,\ }Zұ}&fЮg9G-(˔~ol q V 6mc1u[9OxS77nFPx凑hwP3ńIsQAmv}SXO n(s$MӺmsOh/.2]m [tj~6!^IO۔tX'@.@dIdZV[b \S؛d7wvt')/݌\hey|%Xaen*;t[wito)\O+h,4Uh_o5X:pAw=}^6$c |_K.{x3 n2ɱ)}ƙ2 \U^=MVc6 1LRoSU$Cb endstream endobj 4526 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/Rtmpv18u2b/Rbuilda74ca4fa19c9a/metafor/man/figures/selmodel-stepfun-fixed.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 4532 0 R /BBox [0 0 504 504] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 4533 0 R/F3 4534 0 R>> /ExtGState << >>/ColorSpace << /sRGB 4535 0 R >>>> /Length 12739 /Filter /FlateDecode >> stream xOu=?E/'sP+"B  r "-ꇄ 0?VS=&T./]Ŀ2ZM_f!\p//omssp>'6eνm_2٦ٹ72٦#VjYlS-zot쭖6cνњzco,{v[=[ی=[=[7;F[ݜ=[79;F[{uc{OzOp943e~px{{7r(wv9~PiOo|~Hu@M;?mm-knJ}uu?Z}uu?ڏY}eu;ZEL}UݏǍ8ڏ>kOcVt;F]_0//_wy8w5V{ˇ~yo/{7Ǐ?\>ˇy@y3wL??~y^~޵N}=V_}9?_*'nJ}y;1؎>?' ȷ O| m>O$^|PMZ6~>|fkW'`essopbC d3v|oEcuc0ɹ71غ:Va6ghIձz{I6g[2C6ٹ7Rou!lmldOձz([&;F+Dбz([;&;F[{mso/X{([;{[=m{1!{umsoHzu g~`z g='9V?N0=odMձz8[7vcB&9Vg}chOսUع7z[=&[='y[=&[퍝{ 'ν-Vν=Vتl#Vjl-DŽLrνњz`Vc`l:VVc`hOձZ6:;FcB&9VVgsoJ]ز.Kc]ع78:VKcdsotOձZ['ە{GʖveN[{u]ٹ7Z [-νіzuع7Sou쭖vcA:|)8!̺n5+L!<ν=Vl7>!GꭎcUع7Z [=&[퍝{5VmcUٹ7Rou'dUs=V,wvDŽLrν+V0c>عѲm̏g^[-[택{1{ec]ع7z.llv[{.Țzco4N ;F1{u]ٹ7SouW4N+{uLձZ:['ە{Wꭎٺخ[{bl]l7vn[:[ۍ[-νXM2ضع7ZRouw mlwv-^v jCχ6;{_ܖzco6;;Fޛ{E=Soޛ,¶ν+V}7'{)xءĢlklݎ[-ʶν=Ve[gbzcoޛl틽,:Vm}soɱZm=7v[=6{n:Vm^N:VMscuG'jq {6v [=hlq {6zcoLɯxU|χ<&[퍝{ޛ{dsoJzolUwv -8xL*;{_-Vتlݏ[=[흝{G=[택{%V}7Y탽6;;Oll5v>hKձZ6:;F{–vazco4:ۅy[-νɡ:ٮRouW4N?|>=OuՏ~[ovyuq-mׇ۲ǫcǸw?᷅m4b?uæ{syݟ>y~/_]r{~ﯸo /xx|~ïqme8\>uko?|_mpZ3 |8=uhtܽ2{fCq=(!wxC[q=ןx}g_gϾ_]㸹g_gqgS2ow{>ɟ?>_?=/?\~?/*qvܲzZWׇ|݇~Ow_?۾Ǐ^.?on7?|ۗ/OV+B㑾}ҮS~zWy}xA'X9zmc(yc}x|ٗe_wx|k~u鷛=a pvZyn^+2SxJ3ĵnَWQhs?4#N\9=FAE4#AL3FB݈f9X cu#S#m ͉cu#{ESoul5Ov:V}%VjQuOv}4":V}soآll_:V}gꭎbll_ш[-ƶ;-xll1 -Vjq ع7[-6aύZw,cχ,6aνђzGcMس$kꭎ2&ع7X[2٦ع7SollS=Sou쭖6e^Rou쭖6eν}cbggz`b`zconl3q {`6chIcucso[=ۜ=l:Vakcso+ѱz[&;F[&{[&{*y[=m{{OzOk#[{ec}so>z١IJ.Roullu ;F[{.#Vjillvc=99VKcdsolillWdKձZ:['ە{=Vjl]lWvsr쭖vczukYLzO,νXM2ضع7ll[?g2m^ďn8mlwvX],:Vmcso'"lklwv[{Xm퓝{=V>K3VjQ5Ov^qco(:';s:;>(:;F[{z[gb/#[{a}soO{6aso^bl칱so[۟{ndKձZ^/p⹱soɱZmž{g-6a^Rou쭖6&xh/2٦ع7ɱZ&۔=;;F[=:[&۔=;{-HձZ۔=;;F˱ޛ{eسso[-mƞ{-V6cAc79V֍mν3V7g֍mν+V!l)akcsot;{co69;F[8C6ٹ7z[=m=,:Vekgdh=֫coPv-v[{([;;F{ꭎՏsϬm=c>ع7[-[탽6H'כ-[탽6[-[택{5VwaVgso[-vaF[{u]ع7z7'jillWv^+[['zlyxǍoSoߦH۔x?.ߦwCxyֻ>mݧ{uoow=ηO;?moퟶ7?>yj~[ڟg~u\w?^ϯ.&x<>~}G1hqQ>=6rɱǎxZ~ctMo[1PuMo[^70:8c_?'f_ g7}1^ǫ>bRb}{g_Lqbڏ> t\p3>|||}x᷿\pdr̂^S=/}u}˗}ty<{]_ ~_|_)>Π/>5^//w_]s{__=o|'?}'}_x/)~:Ƹ/:vLױK5^&8lʞ5o5~ggEI~acΖ6c^RolYl3ܶ{{bf9JcucsotOcucGꭎCغٹ7ZRou6g6ua!뱔ܜ=m{-VƶɞJ[{msom)9VekgdzcoPv-v bekgbO#[{msot?co0-v:\w8 ̏J8̾3Qg=:Vg`νFa9Vg}chOՏ0<ν3V1*lo9+V1ُ0썝{Q'ν-V?W! c&9V`vc`bemcsotKձzI^B:VQdwv(Lra[cso}&Ym퓝{5V}&'{)Roue[gdh?Faco(:;F[[m틝{Wꭎbll_e`bC6ؾع70ɱZm=7vzll1 #Vjq ع7ZRoug^z;&d=Fa!Mسsoll}s{ꭎ2&ع7Xo2٦ع7z}$Y&۔=;;zO,mʞ|zco,){vv(Lr쭖6cν#V0ɲf9ع7ZRou֍mƞW.colfm9{ nlsv[{ucgꭎC[=m{{O&{[&{*[=m{{ꭎC6ٹ7zɱz([;;FKn=m=:Vckgbh;sco0-v[{u}cz=Rou{v&9q8}cO'[{8[퍝{{ꭎ*lo9#V1*lo-zorνњz{gVe{ghKձz,*;{.[=[흝{ޛ{bsoJ}b}seB{,[탽6rKձZ6:;Fc:9VVgsoH]ز.-:VKc]ث5Vjillvc:9VKcdsolillWzcotN+;F[{u]ٹ7x4ءٺnRou7t.{ rOձZ[ۍ{DZ{emcsoll[_s>Okc?W?l۝޻Rouavgh{E=Soz3}soJձq}yZzO,ʶν->se[gdz{Ym틝{GꭎՏl틽,:V{;FkX}[-ƶ~\}mzll1 ع7Sou~\of칱gꭎ yn[-6aνc'-6a^Rou쭖6~Ź$|ȣeMٳso8֫coL){vv[=:[&۔=;{-[-mʞ{XMղfٹ7Sou쭖6c?2Թ7z1زf9XMcucsolcucsotKձz[79{ [=mνXMC-q^mso[=m=l:Vekgdh?֫coPvv~Ĺ7zCٶع7z[8ٶc?΃?-vnaco0<3z8z[=;FK[={5Vl<3hKսU~ţ~&}y?l7vzorν+VcUٹ7ض{{1٪l}s[{bsot?{coXl5wvXl5v[{d5ȚzOşǸll5Ȗzcolluv[݅-[ν3Vjilu {5JձZ['ۅ{}׃Cu]ٹ7lillw"]:茶ە:yܕ-߶~K[1np6oO|O} }x~Oݟ/헛K.~|y_]yws>z x<ѷGg Go?z_[p6qmY۟.v˝cVv8}ý>ma{tO; η=n~iҎi~u?z7~t=Z>z=\[|z>!f~x 9 ]&^{'OݏϱSo}?y%ܯ o_]w?].?7^j]w˿.|S|b2㑾}zM\yŮ?w?pd} endstream endobj 4537 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 4560 0 obj << /Length 1952 /Filter /FlateDecode >> stream xY[o6~ϯ0$1CDC34݆:{X@[U=In,ɲ# "xsz'/nw,(?&.ƈ2.i8G?wm,F\&[DV@Og og$IjvXB` |rRɓyoE!xY*1Y2-r3VYl QGN4s}-/D"YShaH+pF6,b5F[,NQ #>(Y (-^fFBqRmHK;,Xkҕ^~RLdǢyVXڱn} ~4c|۷ `nUGa8]`2xx^%o:Wd43,zY7rNU#FPTyu M!LG뇒'n^:9 tmC %@=YFzgBd{6zH#`fgbc.C|if%u["i|~)Xj Le\S v`'V ;XXF9qoK+rKT+c;cǼupX5]jUq lڈ˨S(3dα;g5>ε.]Z? 1=Q3/GFK@NaGoT7#F%]5~paF󿧬ӌGUȧr33R40tYC+o=i.ťim0d dsnTpGPOڊ,ޞ: j{8EwIqB۫i̔r ί}g"|ܨܾŖdۡڳ40 n }9-26Ȧ.wL5݇vꏕu+_o.d2{8a`ni<ݰ;:s>OpW֙sgg:lODf~E3֞2mQaۃD\}HFcb,YW/śYf/qȦ0sA}pƾn̑3H8I{EVЙmn62qf=݄@ &r9 v(݌ҪR7p>jPxowYl=8AzAK`b[cH۬5K endstream endobj 4573 0 obj << /Length 4285 /Filter /FlateDecode >> stream xڥ[Y6~ׯTGtѸANl?Hg'l3zBwq"K$Kf"^MZ; He&<ް_~ﴼɒs3Hen,牑lZW+o_NxfGJq J[a#Iúdm"tx]qZZ&8v+4 _4H5z2M q?MHaմCЊh_-gsBo>rqI3Du"=Wyʃknltz-p>)?"іSk=BlA2W{`|?wZz£՟~_=vuStv%_ؾ'+>{/l狏0=.SQ=\I{}hSDcF.7M« Zws}?JM^ ;brOgkzKsNh>)3M7YsMJg1 oJa-7`f~H(cE@ti;jBHt+QR%(=}'k\$.㓞\Ҝ43 :r(:8 SU~[&[8h>ǹ%=}+c$t|(rjh%)t`J$a-O7naÓev)Tt]pMxJY"EO1-$TtU4W$/VJJ6>$ueKFÖm^*Zw^/㘇Y{=i05Xb.e {-8[Ţ-'=\ӁAgT{1.iz<p];//X@q[Xׇ+UFoyz;j֡L841"cVSұ-NS{ yǢ'99J@W4Q>ݹeXJ^TW߿l;wrs&KxeZq/iƝ]& Aw/r- C<TD?aENafz=&7Zqg=y5~k'M]bDc\/ɀBm={pC]L L tt \OUPy'?l b^p3]^VXrC-aNMsi a] T˭XVw6xMXۮY v>V^FE@ک):A2pN95{2i8$bu^vCa^5m͎frHٰDfAMG 6 `W <jR> r2_T4@ipba1Q#H4?D ptd||c|oէoNEv &5(Oi>`n# ҃H JAV]63BV(+WBVss.zH$Kp*w>7) 1—~u:e<)[2`Ԥ[5Mf@2@[tE|ݲ$-hB9J"C/y}Φ5ܣ;`!m&j +ZBH@}05]q"n@$M|0F*+ʢ %ɇ`+  6&BP}8^ h$h7T.'z>~V@9>R:IY_[FQE6^_wq'\eÉ2λ͸' _icb/ħMA܄(*gC33&?}~:q#2CQ=,C..B@0śFq'c6&x4м3je*T).(ӧ+'VF{0 SH> `8P {ub2z6GR '纠@a  cFZ C^ah{lG9?>fjTXD*X 6,_`I;FW/3/A, f 8`L/BBȬW"0,KfƖժ+Z̛H@ySV GUR'6Am[KSX,Q,s -,VρXss/TQfm4 #Kf7oP- . ,@-hS&хw$ӱ{Q!䦉MٸPcJxqa0_*JWP33h酬1Bw}Ї.wM>Ms]GkuTp-y}xcxgW$*WSRI6EN1+c@OO5ceTiocaE􉊇Vv/8B|93NF:%i u'Pp{Vk[;J#̏ wG/ff~rKK+ Ua %rH/LA;"ͧqs!F1f#kwNl3H-sv0lj3oc4PFCE0fpn~`֫EXn>=2Q=(& ysA/ [&ׇovkw]mj,:z jj[":VB^Ҥ|̧UQ}X y gK,LӵnmJ*V̂hpEO2`[5XgcGLwo_^9 endstream endobj 4579 0 obj << /Length 2871 /Filter /FlateDecode >> stream xڽY[o~ϯS@z/$ O''}D" K([Cc \wgL}xuy:gHcݟ)) 3Mzv7=30udvXXVS@W?p }l"׌ˬ-r4֑Inqvdբ?\CEKv-fhMh Uo.O!f`f*q&r.s<@N<"$Ai<$&hz1qn+LgtgB5@F a_H^WOhn1 j4=rv4T{Ms6-lS+2_m[3|;KàQM󬜾a0X,XpY7q2(0stTXœ&B&[8R2xk56sT)!PsXHu@-1 La'G* f+ۆ6:SklvMa 㻋"),xBh@0 5X4ɀP <^"Iz~p0I_YqWc'ńA#a$d r)&>(Ey_}Btc%8_ԗE9)>UF҉>8-pA Q¦xP)wef;j+a(¨;|6 `5* p=l910Fw $!ڞ(zrY<9@]d޺H\KB-c{Ǽ+#'ڇ}}[02PmO&>Rr[ k@p'rϞϫ Wˋ_.kuǗ1u„NϽg$IS GgTE_w}Pl-(!ޅe _z(rΫqt ]㈋@+">?F1$[Sv:8#lH}oKf!ƫRT쯴#aOyۮ珏0ĎCSRo:O\:,f3@`hJ/'5d?)Pzìh!|l$} Y E yCDbxf;sWuW(m]' ([dH}qz,kk@Uyn`R6~qLZ`WTFUpe9X=ב1)(dUV;8xbL_MLF`_:X=c,-O>&6 gkZXx#kF Whܜ,hQ?@Ѩ(la`l --E)Ox[atqv(t]ȸ>dޡEz"kt wq͘ ߃m LX3ϰ#d J=k`PԻT$d'EDHCWiZ24P!2;BL=_#$~$#T=7~2,+`] `)UYT([o+%ߪ$QN46bV"L̼9ݴxvl} 5J{"RinRAgԏn:s44!Z`~ĠJ7a?nq o_M{C'0P-j79-|i OO2::h_|M1DG3*~Qu#w%sچnerjurh97^VP,F4v[}ƪ+ghV龼 ] g {|A^1|>xSH7viDD0=!y+FqbDZ֡3H@Ϳ|GTKB5\4I׺/Pvi{k%EPXWP 'H#wju&U9&>?l!?`+|-H@N|ा+F^𠺞R$/R j q7adn2> ݷ O|GR1B?,t|}G B 9pQfft5CM RL5Lx;GHn >HI*<^Cs3Pd`>| Bn/x@ I 0(/_BOG`/ih>U*EN@$0!5x$ 爄||79_D}H!兤+I`T0']m7Ybz[7 q+|ꢪ wM&&(<&'D-BI!Ҽ,BWSl!y&`d0#MO)ܽu|{~h+͓~U/:?>߽/3^ endstream endobj 4586 0 obj << /Length 908 /Filter /FlateDecode >> stream xWo0_xG`!*^m-m;n.Ƅ`|ݯf^=#/CY&x(N4Pe޸KZբՓo7qڱYZ(uұad]zQQurzzE'q|GGwa21>&hLQS?+nJxi#~C 5Ec U bu x~!+#Ĺ`\ SwKCWb ţ'A8;DK3XàQcvB9PZ@p{фdUEAҞ˶"ރԔY %jN쁹볡r;&.O\'rc՚(ہ3`3d,9([yQ uU:/gD_nw+2 BP[ JldSCyAݠ~|ݠhDKP]5Am'V t}H˃SբۗkP>:}8xͻ@܂}O[욖;]  2r xԼgC, M Q$rYז.Iq,hCPfKaaq=g>#hh \B3)T˒$1e)]㬉RNbz=W^i\K}=5Vf)W#꺪Bݧ :Ndw%-KNdvts%f(GI7z4.W_卛_xʏMKjJ䲁yQ$,'<`9Z`y a'tZn[ghU"V{*Q6{@1ChB"4aE'l| ] endstream endobj 4592 0 obj << /Length 948 /Filter /FlateDecode >> stream xX[8~_E86 <,vaAy}0ZJvql'm'a:LyC3>5[{zf^xj/B(LҙEQ8KroUy%iտir5ɦa6v$͖W;^&7#T i֞ 9Ya<"|YhK R>i𱳒eJf 㦂*V>-$Ҍm0='+0F(r H]RI#Щg[xX~%jë_.CvTT2S^UR`Ej6fO߯LaspI d];ǙE|zǰm4{@6RP E f$vkx:`ո"k07S[FT*Z:zbo0CA!3%z0Lr>O]'=ѽ(C8/ >hg'v6ț:" 5Vvd,7(UN^bCr֥Y lmDr_nX %ZHa;_ѮM:ƧT)Hj9HH|i4I:iȻX63NvT!|o\^ qGj ;P7'9oi/Ye9TйoDvC:踚jCj˰IVkr ^wnn혲@*X !Bpۓ¨We^}0 3KvF D+o Cl)Ikca -'퓱k/*>}v\x\%y2h_lT_4!3{^>˻$[ endstream endobj 4596 0 obj << /Length 850 /Filter /FlateDecode >> stream xW[o0~߯LJ<;6A*)&`ZZ@$ r%|2EVԴRK^J+ʭf]}+<{7y~k (2G)5x%0|_VAMT2 5ȝK5$aU=6r¶f#1o9D3l%uęsI@sR9|c*8k Sqx WCK2׬Jf T^2tP:/cD&W; Q E +ix7% M۔ib$qKQ564HB0̛w'QK\]%BE{MssJ3mdCڵ?$"Zl2tK]fo`,*Ge͒ק辅1fp5bŇUAU8 !Ba憞wmC5۽Tdqtu٬-o8+?|0Pz@mW<54SF轅 X/Jk39d0D9]ƼйhCrZSAֵnc-uCX5!olkc#~cNϞX%aGXZ7\h"ţE/R4\ :v[rYM&j5L80H˄%XO޽*h)|ed(}m"_QgE~Y1/%w{`!/ȍH:ܻиxߡ_ |vF> endstream endobj 4603 0 obj << /Length 1786 /Filter /FlateDecode >> stream xڵXY6~_!l :C$E4EMmӶ Yruxa+n>hDəof>LG.~x܏$!ە(%1Ļ]z&00p˓詊$RnL֊[^ B̈́AGY@RZiUʾHo^I#~cҪdh X1c$usF3F>RhM%jci5ka/ ͏*cZHY jj6iw?Ɍ Ix k9g&: c|;͋vҭ !WcC9etn*ua0T s%+EkpdĈb hi/> @3ۏU9|2OY0Vb] +ڼFcѐMV{/֮/:_(b6N-g|' Mltgh%~*W5O֛Fs4:٬M{eLH!fG2'Ϡ(aď.X63!`еFFKH x64˥y,S|d!\MQPj"y`,!dC MK4_o$^>$+3O'ܭF}7fr]/͔Gn8$q$ښF7/G&l]lOt S[*cݔ9Zíeʼn^]E$slB۰|slPXldh[&.X ٯHȦ禭u'W=':owa[ 6##lU:m@?ZBuXYv:M  ?X?pMaf:bb^~b[0O5ZWI[lF \0MxO)WK:c1]J$2ï+&a{l^5"&9ߚcWu]WsLmڜ2ˣvߥ@3BjY~BkNI"$"Xn{_IPk!ϳVo] endstream endobj 4480 0 obj << /Type /ObjStm /N 100 /First 999 /Length 2463 /Filter /FlateDecode >> stream xZ[oF~ׯÙ3(˦ N]#D'j%ҕ&Hi6HCܿs imB kCdDD^p& 6aOZDCL9˫Z1Huq@:sJ #-Gx"1N!GrY0dXyw`42"8b `Q`f*ފHE,Fñ0sl`q>ߋ"FeQ$K" K&MeOÜ1g({H1]`r4Qg3TVZ8H+0他;@BȫNhquXЦ o1b6CE&ūϟj0@ѻx?i3'Dy^ϿLVVkq r^oְLצiEuζ5ZUwUM[jY.OE;_̪bYNWM]*#tZm1z.dݴXw1fŢaS/WܴE󾩫?igz7駬 qdF(~i偬d՛b˦n(^r#^˜.W/=.s .~j~׫fjax[}jŅL^IDkb&>)uYCr[ϚOzF%|2=竪lMl+ŏH )4jH*>̧kۦ=۴YfZxJ%9o*Ȧo6'mzRZ.y)~W|ZN.<[*Ao|yy,$~P).6$W``ie\z]ϾC-H i!C&_$ΎC&Y 0^F: LׅwǫaFw951œ<]܌]F{˩wK7 KG@vxyGt mcvnm2TP|NWøP  Y7@.#m*>]o| w1Z?| dOk +vs%~A=eSvK*fay'bYͪUVD+~)^Ϗu`ac! AGzFZbX4(~n6贖gV8⨀ {Ad|`N@4,΢FŲUhHM4+tH|Q=`wI(N$Lp-xUlԧ[|l2BG%y]z~P欝/G'8!3I:/$| ڐe(1$Eh!֭R H%%HCFK_> o/I{V1)F.Z1NFZCUqk,1VK=)5fBFjx_3k-7{& 0';dݫ j; ׷9os\渾q}6m~c9"G5DNF6 sNqZ/> stream xڝWY6~_al*+ԭ6[4)Ag>$}eVbHI}gxj ؈ MgzywxIQ2K#IֳO]8iQ: y5uHtENԾ#Cq^;@P!ˀ Rb1օ*̪j=7spg7' bw7r VjrQZ-AUT_nEkGU=u<%3FQzGs.:Ps幞3O_z5wT|" ,8%8$2GF~H3h,6=oԄ8"qXQtä#I:{7!#1@Ʊw4W)s(!ICCF,Df-",q|8,"ˢY=qQx #ek+Ѣ} &jјQR-W%\KY+Q[b+ |UOP8 t8!AWV1k׺Dl!2K#$R}1]q  &r$Q_7b (!I`,m}/ASs9B[{sOqѐhwi#w%0:Ɇܘm)ږoi޵fcn:|[JLpTM@`8>gX:r*#> yDF{w6ғo=Ϡk9<^nժ8vj5|Z窀/0h7H=|?Um:_L1L 4e J)p BDsp[TQ5??D>Hazoz\-401Z32kuQxfb x _DjQ3cz|hӔ@$'}F$Ϻ &2r5>Aeecd-j :˒0]|hu!QnU!no쮬on u q2e04EhZ؋5MImGǾ֬SD>xnvSL;~~Ǧ d1_ݳ]>)ȽHgz [O-7BeAs< WwW = endstream endobj 4628 0 obj << /Length 1471 /Filter /FlateDecode >> stream xXKs6WpTm4ɩmDBZTr@XNNOվK g?f/_ǩ9x((^[<ŧջh$(NP$,0oV?g8|e^~ ypgQ,q{?u7}*ߣCC1p}[/ AXJ;8#9j4 ~gɚڊh-< _u~‘~M][YJ2bXvq [ta 6} h@cesuҏZ}{*wMכEQ!8*$t﵈Y<.!'.- _RDQ9$q&̒THvG/+iy1vK[>T-4,}KߖSok¯W}~ޥm<} +!m%]]`*3#w,VZP2QI5i}"6tY-h{,O|ZG.=QaEցrI؄>K"^+R!FCe%1E~ta(LI{T$q7췏L&@p-W,d/jiZY4{(e;)4Sla{'Lޭn:++LgDV4Sq[ peOb[})MS/lM3!I]^jyÅ>ڨn$Z,#͛j U^1Ǯ[KG9 Ýb:Y~ƅh(vۛM.93%SNO&>kT6Me&(;MoA&o4@<5_TrLPXܝřEiy .)X۪rM@E2|h@N0t]B5U0R}h9?ŎpRHT@raYl?^Ls5uΧqGk#Ux:KWX\sA>&2לBF9T`~A8u*a DͲO# S/CYص3 {qmv:a r"0EOY!U;O 8A}|~ejqe҆UTzksЩp^[=>4GamWwӷ$tLi%obtT>Z"'8 o70ёU]ng}uMDT݀WqF&] RS$Oh~0g~9߬f9a endstream endobj 4637 0 obj << /Length 3166 /Filter /FlateDecode >> stream xڵZݏ߿¸ᷤ[wmC$jlɑrH}Df|fh:껻=}NUF2ab!*ah7?>w2 HEIe ;x߯߉PV$lNB~峯M{Q\uuӆu~V:uzn6?fvjP5U=8Y|7t?PW{#o_K_A,r ˔ŶסhUO6[=HMak(UGB0'<ͻhAeeh޹/Ma*c{*hf3ZUvtCӆZ|wyd"?79- vONt[N%a a$i1FBTgAceNܶsJaHMnZTCU@R $,3dE`rm ] Զ,hpx;g OL6w#/!'_@XwneI2:y8H !&*z6ܨӥv4Kx'"-+ZrGyZe;h<0ɔ!~ z`EEHfxlZLp*L= *~ޜ@e/41FHqL08/Y"<*_"?ҥ|dq 391K%jm^O13slG43IqmD  l Fh81? UU'Ը˚iw4a*G|j<$/)', cCra! qo Hތ1 쑊07INaFHHnQ#ש>N U =Q\pK(pէTևz_Du3X,8 N12<@6wo gr VǺ8.]:#pO8>MAEQڬl{Yyv@ $MB^#{":〺/IƁ?TO3u<Δb PH[w]m'x*O\ЁN>N/]]3i^wO͛.ke޸P)?tƋ9 ӨG橰`vnI;=8uHaYy:N%l#B$u!f. $ a05$9z$8Sm  &q̔VNȮw,׳ Dݬ<g#x8끇=*dd?\Kk&}U'bo']rNsV sibORi*ڼz}^b2 Z|ǵ3VxjgKn6~+owi р_*S&Pi=:YŢaփ?bÏj;fS*Rv>bFMDlw&#~a 6!YbyȌmri}V7/?_NM8\b*ƺS~P8 >"ڮwJ93kDQA}] Hi6b .[.e2-C4$u2880hhLXr'lԖy^~*^8B|nhaXctjG3gʿ8Bĝ-PۂH_yCHnTHa@=Eťcr֋Q@4bɛ %}&r;芭:C*ӫ|tU$6>ZJ@yjL_G>[`p9@HƄ%pF_QP#+@Z\Ddد#Rsc}v,pH VW_e\BM-f&Ci\~#@g ZďeP:>8:ԵG櫍5umji\o]9$ig2M%0 6u J1g endstream endobj 4644 0 obj << /Length 2568 /Filter /FlateDecode >> stream xڽ˒۸Pʂ ʇN6SC6P$4"><;FII*'@ ~mwoJb*Q$䜳,VjmF4&)d#ЛȝSuHø3qP$9 PiP@Fw-}e{ U44_ﱨauY2_mD5{fI{~,OK;/[XMqt2JW)澱 (u9\t]qD4uy/b$gb‹4z8K,M,VaV9%0G:nb1őt ;Êi7p߲kk 2 4 6bп%\bc,Y,=5E#ՖE* 6`Fy,-D r`N55ޤ,fkHMc*)s6wc~)j?nq{FhW68%0Ly3 <~ Sj²Ewac{6 [bTXHfՊsTÚ"&|9*&-pm6dj3b(f!,8 ҚN 4&2st%H] &_tpGe]kI OSނXr J|j#XW}^+\Ԩe iulE?5Tж'9XJR~Y옻| Pa°~+5) !YP4E؛=|_m{uGQ=$ ?G~ ?}og4_O{_xsXR5"c_+/f=ZKW>^;Q+$r9K =*cޛ'hs|T86>+}N:ȓgd!{6܆ !v@WjJ^ȔLUinOJ\OD&|P1m3acl۳Ue6^})>hMjFH *|bх;c1 .iNWXJe¤zg2?ZJCxI[O99Ol^˧wu5*Jne|w{g5jn"ef3kh=z4yg@}^(UFד\:_d(Z]2/Nќ/z;N̢"1UЈ=MC)iV V0==#rŋjg~9`WهÖ́'N jRh [#wJJ>笋~܏+(`~ :0_ΒgVI/8$Vؖܡҵĉ״aJJ =_<Հ)?ݫG_/u(~˼ڸV9vgUp c-ux#QaP,! e<Զ.%na h7ķ'<@=e v@N&xQ[،=r&"i2=,A-'l{o=fSKC8{Σw놜2 ffG?d\n0X8کtxpC=C(P:`{c Z k*/{݆U.vۻ6 rzy4`%F,B@XnQQ;o"}v}=]m-%nC_8tQ"ޘo`-fkK;B`j' endstream endobj 4653 0 obj << /Length 1794 /Filter /FlateDecode >> stream xڵX[o6~0b!u0붦X[jlʢ*NwIɒ4iX~~i:]O:9ɓ .VSF) d2F0.ӷg:Ja41C u@ g,Ha1^1vz#w4 ~ !kMg,#IfQ3jcPꕢ[U &Q'wBlgM+ߋBٮydְhݨﮮH`cY0F( !,gVY扺K̒:].Ju W( ذTeu)kG4̔ʞ*d`dY$)vn;{Xǣȡ :˿ؓxɕr=C w E8; :6>p&h0v/3į@khK+wA$7=RQ\' Ag HCh ]*y]K͈R5'pRqͅ^[~UYOKhc.&XzQ[6TZ$ĺ]#zXe Йu]4(xy2,IzY1BgPRg$K'{<:1"@SQx},MX4b4"f(8:%igCKf9v~rW aV:(쩲[7sW݀|;rͼ<d_ffi@(&5"B,vt#߿꽑+}[?L})K<٩a4zOfI^WiB7BXF/*%vl1d VQ ;iP?rrxUjKnRT'zZQ@ߍÕx?ovua.SC=Lj햣ijn }#X|͢ UwJ&E r[j(@{^ĸ/hۈ6{ :Tq HE'T#G/e%@Nj, Cs7Mu@~i;l-q:t+0qXI=DŽ$ ޹@]i P9wp^8 (kqQQ ́ޡ҇Rj-O[ 40"oC ɸኽfFݺi="7*ٛq33zT3{$pKZl)I铑{,'ВǶG1Lks {'X/cPWW%fϜ\L>No!$gwN `/^B endstream endobj 4666 0 obj << /Length 1321 /Filter /FlateDecode >> stream xYK6r 2)YR$[)qNMRm Dv7ے^e  fFCr o~[,VIz@\:n':w7{.{~QsER$V+{3f‹8W?hӾ\} Wc7|0j;/fDj!jh)Phyq?qrSbpOv52%J ٥-f,0-\zF5}S1(i{u/9ࠦu~ ֨-w:T)2 x)xt9mWᚵ'~::gsI EYCe S2Ztݢy ΊWvX f9)V[H2RD. s^$;*`(YU~U/"4%;_+0aE:WR$dVcdQ?7vp"?‹C7|ZfNbaf n:Ӗ`NDCڶHKH#DZw~`Bz7teAq?1AS!.Z==@hJ W\F3ޡDá*2dKbk%0Ee,wzڏ0\ #ӟeyOԂkdID|xִ:9RS{.NIl)-wL̥ QPك*I[ӳ؞w? gDr*5y'OJ8ҝD8 HаG>Baڒz _ "{9 ] ?'> 95ʻ_+_ NQ W_{!/D,kbG[A-p<_لom" *@>H$S+@bD+gdjJ\I4b@ U[%P!ÙÄ5E^, q}; Qsgm|w-nJx1}9;MT=)Eݛ +CE}}/-%O׭mj4D0}SJMHS!PڣˋI.{ۓ̍5oL1" l2)ӏWO/e"x Znw+c5qn׎&Ŭl|?G=f#o1G{⟉Q'#;؟Tgˆ怩zAꡊKGAPNVHZ IX=k(d.~=j;@ UH+SE!=ŘHT endstream endobj 4675 0 obj << /Length 3011 /Filter /FlateDecode >> stream xڥZKϯh "%K6M%G8>U"c*V}COz×D?aR><d "yM߈_٤6Ȍ\Gh)_tr7l248m_/V5宨JfEvW.vѶh/J8ʔE[Y32Bʰ7zQNE(puOs%QyrSu@'G`qۋ5Gw SI?6C鷵֖k0+MGܕM{*z,;[gՀ|QFbP$ڔ݊5(>+ PT2 KTDNw01V>nZ͹/mKk( jKK-ע8ֲx,B5T:KP 77654U%IIorΔV'D&Caw(}u6gub` ŗ NNUIS՛ipR(l_Uw'\Hs2- D *~:v'uv [+) mOA+TV-Ux]BḾ@=r;-h~לdnh2aCI$L}<)) 9E]WОiֵwTc'"/cY .g'X.oaMYlsml;R,cg%"VK v GP%HY$SFy˄V+F ocyz\$JZ{*va y6CK89mnhS@Ύ=D?G-Nʂc#Fo:+;\K-7Xy{ەArCG\3Z1(H\$q͡wwfo`c7'%k쥬t 5WR'YƪW^1Mw7t_2Ph+Bp'4@泤S:Zە=0IpSL.yrFzh|}*,8KŷY\@P"4G@lyʾD\Q\$_xete"ۖFZG[g:֠VבJV7#vrDf2p2^~L;\XCf`ikI-8{[72̂tn6;[UTuh@]Y跰"QcCugۜ+PUCcYҰǡi@-l /wCU޹{ί x \d`Aut#YG"BDnwnN  zD39 $C] [X*HJ'|bX1bᕜF ur!) Wb ᰾1.(9&v,Cy͜ĜG@ѨNnDBe]Wn,Fgm?CK}WB> Yhni֞9$E_aatlXɭ9eߧDDlu%YzGvŇ$JVq+ΠiTɥ]kQPșcjnF8za#OMNi3XfZ yO0Hq1ePŵZ`UU>T@frVVGN)Ή |$IBP%g|,3r_'ZNy ZbG*_?}Y{^~F7tx#&dzy;N|I4Hbad]?Q_6nh.8Gp!08f# A?z֎z|1>']C1bڇ5TD SԴ>m^E W[XYm ]!5yё?0cj)7(as%tX@ݢg#w9[?<0wS]kOWb؍Kp)^Ә`*^'(`x3%߳eF͸k>'59G|0"Ob6QjrJ}vpЅBeg0ЈXul m1—+~Hi$QG ie(4L.QYg~83<0#ae B@8]t NɦӑS U" 1) UOQϜJ0]3r*ۀ`ܦ.jρ3[&̚ &%H$ߍ::Wt/| \.Jdc„2^GwWAnpO!ApBhHtB#sVXYPȌdAdelt 9WЈCsAX..AJuƵ7Yh>l30ԬDzye2 OvP$Ͽ{w,׏lGH t p0F}: o9, ma&R<÷ҿ\ҍҺoHii* OaTﯫϥ h;~Ԉ4,ulJzƒ-_C$4ZKAxyIJl la̮ѓI=;> stream xWKs6Wp 9cBxى:u;L;J{prHHb* Iο p$^z?,vXz7y9y8\[cļ Aef=O; DYDaj@2YX*qiּq|lVTk!##cP#0=mo'¶ح7o'q) t+WɁW: 6L#]oNOZTy*6?Pت unb=4Rf鄌̳P\Q@Z| ae)1 S9. #[AO1 }QfT KJMdHԬ~r  dQ[MèȜ{8TMkشr}_KDS%^)\;A7[ax(`qSB)讵LP0738k,}kt,@9fgiU=ͮgB&FQSBʨRiq5 >-a4FQJ׭ \)@zOQӾ}L5Z@؃+Jb5Ҷv*#&D&M U ʸHdSlahi7QFJOEPZs|.nP%ߴTӞTpsAtcdJ܇~pCld60k2S˙)n Afg>d~:*7_6~Ns3JOv< ER}DᖷJZn#񗊪lƲUG4j^>~/ow`fb7K߾}}Px˰Qa0;ARuz.a_1]2Ue_%Y6'3jL%7#o& q2KC*v /| endstream endobj 4699 0 obj << /Length 1110 /Filter /FlateDecode >> stream xYK6 ϯ!dǯcv -$l+n}%QJd1xf(~$EJ  ~}izL,΂.E: r,)M*)mou>O,RsI6ـzɗqXX`vJ_~WHB3BQc`R"{A/%ʛgQY^\X8RT@W*i;Ҋ>CG("NR0$ٿc{U֭"¾\*~OK[ji `7P(%3tj>-uX&kA݊?SEX#.jh˴ k^Yxs|w]_T¨_#.Z*+TϙbWr(Y%ƑKɌ ?]ɳt^bj85v"XW߳+'"(|b N.SOIiȘXh :v>iGCw Zm=#.6T"Cd^5pSv}~goK鞱D87HШc][*,p'3A_g> NM+Ҳ:RgA kMÜ՘>|φrCb4r5M` q1š o m-3a ,.`@&Ĩc1^CSҐs%P1X+S 9Sܔ0&擓󰺼{\D[_YHIy+ڜHGLXa"2$G[{68cاeD,pq*Ж%nBiv*K+=1upO{ms ~+$nݫy*pNO2 FtAi=Ќ[Dfm[ԁiy}S)nOJU״+ĝAU/:OJ:jZ׷kYRze"+\aZ&Q6qEqH{cGޱ3LRi73ȧm:ue endstream endobj 4709 0 obj << /Length 2432 /Filter /FlateDecode >> stream xڭYݏ bAw [C -.IA9gLWWO_?| T&q#C5˶Bwܣɖ۳1LJ겲ҳ% OXXjq p :qU:ϪSYRpޮlFDW*,M|[ mK=RÕ;h黼9ɣt:1!򬦩%-j,4 Sz#yP297ٚ{{8t%FLc>Gufw4;yhrrMH8ZGp*ՃcPM#5 J}*ApekKOehT0oZ}jꢬߌ(tk(<DY,A<`q}]s3VݡZh{֔" Xs=3).hJ]v m19 Yswί 7GMs]+c6Guf!By Y!C -ɎJʔH}b }ݣ8* *:ɣ*eRչ[v7q-/wgp~MͮSx8@#]۲gr -HYl0E Ǒ *t;8&>#,ӢAEXp}'kAgk-JV:ZTiI:.!(b,穃0 fi֏潘iaok:!Ng+aq ).rNvM82|ˣxKq #+1֤-M( WЮ6s{AN('ŢI[p#3_Ĵ|,„ 1c~3]/X<cY`F LXXQ$r3.Y qPuc޺Է^vw4,L1spRǭjizΝT2Bo'ڼu8mq|gmW}η;7fH|x8#3 m0lmAfF`W>>&J /m'-h[Ѡ[̃ 2/zxu6:s)WS!``r4;8O}IU+8)CB^86%I: S0~Bd"CoQ"(}!0@ z z1>~xrq6 ʛdۤJ;[PAbKGN V@P!`1m;a<{әtµ &}e)%LQ5Y2Q<≚J4aQ2 XDPwrG&\3AkA_LaSpR3#̡tSaTA]]-!"z>Wpa DD#:t$BM &[ll$f<|(MZLbfc+&Lo՘j3KGÕWׅ6pBOpU /~}VjܾPDMn>)]QԽM@14Wؒ>=886+mX_dP싊O S Kta24%vef5Mʪub:?Y: RK׈wf#v 2S FL?@s"s$ ?EfH64&S~6Rιw!zլOA?6Lm'$5!{:ͩ-3`UHbEMY\ySZJhдic(޶Y'`j_c\ٓݿ#6UT@r1>IߟmN;ִ҉Xl9WQ;nn3sCM< Bʱ=;S4ͨ?Kg;J 5P羍fbwCÈuV=hS.ӌ;1@7 5E ,EE8 Q 6H3²!\>nQLmugH/d-!1$NvRIF/=i{uQn)&[$ZoXEo=s}tWc^~IDʒJׯf f!Pk螅_.KpWӷ)?mn fE"KjhzbuMYy۝mU|\+ˬT[ :6w4W_> stream xZ]o[G}a|F@n?֖Yٴ#]EJ]{99')G]cl/1xI:h2!; I1:r\Sr"&HmPH@ѬF C0"Vra%kЬdG*!ڈ-B˛c01Š'E ]fY ri蒋†!&..&mzX,Ucg` ֘ Q!w R2pKCsjLj-4B6zmG GphM96\mմP1Wۅ٬lv>hxRMbj\Ê @Zln&9Uhe]>Ò @X$He &qxjKUjC+AjL͡ q&m1. F!Ui3mFU D2`VWHی idԓӓǮ{6L'OWN靈7뀠ݏ=t˹{U/v5{Q+S׽t?{:;񳛑0} d$ÀsK8]jFŃAnb2дEكhS?P;d_aڜGd!C}Bt2o's\h\L;C>x`SG:Cew/f˗cu/˷w{Ovo~]o7D{w]cRFPh8RŷX]e]eKAzApqX|D>-K,EqhgߎovmG2'/-&qXH8t@vXm{vzV]ίeo麏?|v:XOg>V ,a#e.T+ޮR [2=2׺rR:*a mLjF,&Pm ‘p-]N$y((nIEqT56jdV1>6I nLQr"L*3rr)o@ΛcF;J;N{n]S;Xz `ֺ h??.ѝC\d \x+JikMi==v^yJ~[RK. WXh} -_zEVL't%H]،&Ƿ#X)zB;B/";.pY0DtžV||vuz}TT{[5xB7L[(eTAjS^TF}(S[I[),TX;SZ(Iۍ xDQQW3/l Gơ}q g.B!S{hat弴U;!_vGxͺk~EB)'Z+ q Gc g pdC1|\W^1jRB7Ԧ{v7wmY+&rBr"L]HurOkutd TTB4H+P| |_AW "\[E|2P[ vCrA4D 2T۲wk&߽VBjL!5K!9čRR߱w,UvMer_&E_Jk5#jhN^o?Ca_JWU {쑵֕?>a3:8O˂ N?uj&TdUBP endstream endobj 4721 0 obj << /Length 1505 /Filter /FlateDecode >> stream xXo6_!/6+" [6`K]-QItIidIq$؆=!wxd0)5^xZ$g-7*ZMCLZڪ WkBLuo.\\7Rn௧(PZY2t7l?--INTmL:vs&{%~1],/1.S \|``l̔7RtaoN\M+E5VA5%nJ0czZ@⏥ rSt!O5krkЉSSG;dUύ%|ڲ#ɣ_.,9d(]/^|99!^ONNU>+_]!( NHDlOQ7MH_V%䞆 mF\RԆ_D7VLVaDߚ^BV|40ı >ͪ^qЖ1GL&`llo*ɴ Me 2|k_hhvJiՀZa09݀Qr<^C'vWlOᚧgfsLpE}Q<ŋ#+Rڢf%.~&!Qad&KOemoG:!sJSh줏tCɊ MW49QؒK@_ mSY~(#ǛϿmp= ;ZM g4"<5Žeb\&oW}Tk;/~P-tw,RQTx5pMjF܈NyIo/ZBYB/ս,IV(m#t?TƳj 댤" iG13@Ҩfyb/6e,tAMqYf'Kn 56zzmkӚQ ";|ooɪvy ` >yfނ9,7y_#tP2'9Iov3@d @ 3<[L Иz)HxǭG(0 HoN@ x`^I t`S3E+ki48 Y^ߠco: o?l> stream xڝَ}/ lk$R ;HH^ l@GGǶS*RhO*bXu~x8AdrEa8٥Q$2߽'9[*3D2BvSj\2}},;2֣H)@mJ7+$34͠i( 8oׅ2 kAFp{Yom7F4vx'Q ݴ x t"ms8SWMI@{sea׃8a:WHQ O\7*ˢ`%Yh( d* .VF:U VR3͕9D{+T Nkuk =;#!k)BWaOq!q*I V/o3&9`&vN/% Lyr`jiK 8HSmyMf{KPDA,I~oє@hv_{ D-z0(hvzē1 ؙLտ0㈶~b_jT:Qr_AzyJس&hX ccмᗪm \&IhUhEsJo)ڮ-ۦDJ:cȢa,_ d' je6VSjzpNbޚ=f!KHWdN]UCý̅,Stx1Yra#&1ǡ3a1Km0s$4SЩ~p4 uݼxi /DZrYd҉ ;΂0ϽC4`8z['alRjbuZDA Pfr:`z6qĤ4w^,El^C" (?PPvBXuǓ)Mq9"Ѷvޠ;1 }۲%aPH#Nʨ0{AP2O|rS|QTILf(d_h*?Ҷa63ML0I°X^>+ZBݘUayǙNA*fvtߊp\9Lل,pRBqx ,8)ޕqX-!rJqƤx+6PVk&j͊-V fS82vJҁ|j6sN)uc9WV>4[V<";s7ZZKñ#P׮b޵?_XYy9OL:lZqXoi:ih",ҵ5y yI 1biT.8P9R PFೢlK^*xmx ʴlTM5g.>fv+IʭNQMD+&fRLcه*g1ji쿪~,8d1(0F" "mTZI( (ۈZ.}PSr 4kqfޔ N֤p!nw}-ϨFlb>:] q,p,r 6\It"̃Bn-A L@Z EIg#+Y1㉟E|Lfܶ%& Qf1 8 bj̙Ed^DPbq:ŷ+%0~WW՛!?ےJ7ϬQb(>Z8MT"cC*Y܉k%qOMcYWm´--Z3з<%A>/]GM+bO˱i=~' 'ޘ}F#&cǤD\e"HuxY('Nn]u+os%j:kp \7qydSom $!鲿 IZ\Hg2 0RG~1Y8޵wD<LCX*J0g2Le"9;[ߍf؉|m.?ETa3bE!%m8Qw Li{A[b(&Q'{m7Ϗ#pGټo endstream endobj 4741 0 obj << /Length 986 /Filter /FlateDecode >> stream xVMs6Wp 5#BA>mܙbt` R@q_A)-+Ǘ^ طoX#1u2_4HEYD0F4cQAbTDit;#;M>bG !@W+GsSlG:و'i4L!jJe{=G 4h4d8Tpr 2IEP1&g +-'f.]̽H/ld݂%-juX*n~fڄݕ,sB^6|4[УC:2 &h1ZS*L~BA&)Cl +V- ڗ Jԍ3K鞖iI/vǑebMC ik9%BC\ ƲJ=BCt,C{S&!/ TP|+9+i44=X#~$ZՀlLrE2|Z忠 u *U"[?/VMϱcod(#Kn;&F'@]6C5U'48o¦u˳v9fuy挒n5jvyeWiev:OL?m~²tO.D{~22 %Y2GS8 =(Ǵa;_0e$A|2\kOHNyØ؆MxMҢm'xzkR[e@sweKSZcN]*>`iߵUu7̴ `h{=O XӏU]m"z53uGO!r'TISڪ9Uլo\sIX I3dz/d'G/jb endstream endobj 4777 0 obj << /Length 1274 /Filter /FlateDecode >> stream xڽXKo6W8~` wתMJ]uvM2Xa&ͯ_ dfȤ]9z{z4ƱyB@cަI ?7Q22%I M#cɬjw:wdm"*=T>|yցx܅H7` ` :g% Nobaa1H܋Ss?1~קS?+]m[ ZxNJ\"5GOft`Z0 vu=X}| pD򼮊wᲭj(ES_Iu9RȅC}^rNb#w3*E_ CRu9dwcPLOuV*O ߹'elϖգ<9%M#;;ߠMEweWe3J|nz~RA.BT" Bcz* 6Eg̎Fjy'Z/- ×L"Kf^Ft{K*-Z' 2%F)`,1ƃ.GG(Z 9F֍,(8ϑxSDٚ, j4b~:/LId2W) kݯvu]"$gJM@? 3Q J+JAȈCcNr~ȪpD?>7ׄi>n:Hi:" eM ۬ ܊^OsEMN1 PJ2@%b'B}5[cf0v Ar9gcv Hk"1l]}γJ3q4YxFmf% mi]Uv=BKf[$.jTђY*ijGsQFbIFg!s&ʲpْd]4`EӅxƓʒdxypN3$wp\CJͬ6q]1sH}I_Dd@jgƴ`胰VIdͲ4=uf P}ڹHR65kcY1@$S),8A',5?\Yן77R* endstream endobj 4784 0 obj << /Length 2220 /Filter /FlateDecode >> stream xZK6WPUL@nUI*N6UC2I(PH$=6<{4xo^z*C'|u_8F4+14[?VU_W1G"`/KG96D/b}߮636[mԑIпzMǼ(in {7ǑI[cI8m~yu]~ڞWSުrc,VH/}W.m.wM՝Rj&6IRLgxr& &;eouW>^Mw8Ȧ5k~n= ?oV~ۗrp&x( 1#70D D)  Lgو( n1fb3ztz,7r|~6M]ɶM`53Μ䦵c;3xDx̰v׎1? <1O)">mB ,lk#Ay.ft7mѡ ӧ7}|EF ͞AH{ETx,>7>ix:qxeF N`t 0kٮ*j!3۸p陜ˠ}&4mGe&#%NLh\꣍z0r&XT(R$q6Ӈ(/֌E~WwfM^v=L?w\>Y>}b@#h䯂 4ڮU'I9l0a6t=aW.?HK=F F{NGh{M輟kHJ0Y!!`b\eMKM;24 ͠MYU٪Fj{i9[yY\ d<>P3S kfb<)—`6VGco4ktsIeaԊF@+oԛ }F|r%x,ݹk3g]WK0@ Kgoݕ9rIRC:\:A#G7w徑їC͛p ot[ n w'PA^';^ VUJOi`26r-#7 ՜vNC6纁hK3?wpa Ϙ*GPIY4ӯK:0tޅf AilA? |b'q`z-2k q( =/_צ|bC $@T(q@>5F"گ{?2uޫ8Cs! j\ wV:w[D6ze]ޠ,:FShV!SB`<"`.-I:#2kcMM(dh>B-71B˞ɉ gO=5^r}]}hˇ۲ocgcD hu&7R[,QN "N$EGYP4fa=X^j@]w>|1p![9>>#l@&c Y5F]z<*j&P5PLuy ~>/5n6QT R.vc4W0!￘?tVjLbf>m|c_-դʇBGNQKלE(ƿ{_Nk endstream endobj 4791 0 obj << /Length 2121 /Filter /FlateDecode >> stream xڭX_sOjƂ ;nd|CX$}Χ.%wv:}!bł|ч7o:0Ҍ#9Sڌ2!Qz>)QF{}1=O _ Ńla ;HֹT\"hJ`q #al$\}~qL<$*M!ش뀏'Wu -RxX2ʶg׭mmn'ȞԐ<ޗ$V!qeOA.%`wK'| A6B_.2d kx"/Tsq#%|D| e,L]fxڋɪk2)qT0ٓ| LUD}=DR%ס6ޡ]mey G1m;e'A4M6dZ0FAliԚP*GPKJ(8tV\E aj<%(/b}-0ޔu\ o5uw3; '&}G ;i] jV?{Otn7p.䋿sϮvC endstream endobj 4800 0 obj << /Length 2061 /Filter /FlateDecode >> stream xڝXY6~_!tZ EXdg&H`LS`i[*]nu{bUA ǛoNe a$Anט!dQla2ddȳA Ӎ#>}P8(=N'ٗ7)v`)KZ:!\ Η_Gy_BP^NF C {AdQzYoȅm7a$;P5-uC_k֍p|Xas"I7ܙZ?aQ?uRxIy"%csЧ{n WS)hrPmKZSp-[va襁8=昛g+`lEwz2-څ!"np=σAV | [T4.X5dD݊9n+0[&hC,`-0mMyΔ[&ty8eWV/f S>7ߥ/s7~h6p}+otEX,#T5u_tYYf|I;R v^j|E]qpm=#n }t rdWE^LsMH`4N.Jm]%qVg*ux諽-볮r>,ּ/l? X(rrhb(hXya3Уtphe xQ;eqD}BT'̓!8 :cTtqv%L|Qw+b"HL(巗D7"1v(vQu]T{]Kd3G#e "f/5nZXuP|,ez@)4fGU(Kb l@3t0D"); sN@yc薾iEQ0Ț+ ]᧾& npEQޖDR Kfq$.m]m⚂c)|:B3?|sSczD@+vG`_!CǺiU녜jUk|Iqt6'gdlصxip}\ &^,{.]kS}4"|E׭mb)TVw._5,"0]1Љ$|ұe-_}e!6o?rM5_(LBI=2Ҵ+iQר۝q\uGQq Se>ig9֢c~ yPTZCB(x8f&U5Z#6{us{K̃jg@prTJy CS4R PJHB7&M|z9K(m&l{]&,/% Qg :$tAD|u@ "*e:0ȫwn(pÛ%iF<,%!&hTR'$ qGfc!]$3YVK\#6W^D#]2v;!sᮚǎwg?B^[A[S e[͑00x5倭FU '̪b:='a\YU ۀJMrj[GF9u.7IA8|B"t7f$yF:.Ǧ32aEUtR$aEP*Mʼn&LpGc#,Kp3\bkc}Ǹ.GU"uz&sh ';} endstream endobj 4810 0 obj << /Length 2374 /Filter /FlateDecode >> stream xڕk۸{~_"1CQ>$뇻eVO =(ZnX`5ἇJd,V~Aqw_ (pi(d!oӯRµ$cgfܵQ :S^^ jC6"C^of] [Ty{bUF=bKٗUufDt|5'Fbb )[eGߜ>UD}^e}q4Fx}dQDGt Jӹuw4KA{#DjQ~FI_IɏALXsBVoPzzG޷B{b?.sAWˇ .nQHF~E2kب"McC 5}SU~!ʕ+ WN̈́q%>asKbjDJó-ƴ.eY~~ؕϱ#6~*=+wJOpO㲻JW kO%L1Kp\&54we,k04DcY e.L{쏌u+UϦQ8ұ2Fd*kGG G*O}9uZbX]lWmY&o7M,7l&C|ZȷBL Y$HjE0䂡?zI([?U9UN3!pSW4aSF,H7>%+3eq08/'(>|);F8kU?/W[toy{z&7;I=;8qz*MКV7)q7Dcú?~p E&:êԱ1,Bx.q+N M,(1J SQm a`~Ҩ'ׁѩaC0XnstA0!s[֖WJ( Is3n$Ţ/Ns@NJ9N"z)Bb;#Q` d0T"*FbR጖kf=PT (5yMX~sjiۚhVV}oWro+D)uT8tyߩ|a0[)@\ϐw8$E{1 BdO*)e =$6r:)Ae!^ӄ۾,*"LЙvyɢɾY 2JzA)1yP`'"P'(d1aj(UgqGWXqR_XIn@`_T!&Tb:,QK~3L9"ɜ#dLM({K-۔$YqXnp)w6Z]%TSInhyf 4y+\w h9 3I gm{f6בWԭ폴}ҖʑOfAO" ӆrXs'SA *g a8QԃC5H`zT6dY5S8޲'5 5+Lӱ!v:k[[ CXK 8W톖XZ@dzT^k&l0g>A(eWOx,%=+Jъʧoz7SClRu%yG؀9Lu9*0bpt@6 0=DV,u?8)GrԵr5v-?b^G S@m9h!NN/ &k-i!>ɿw׼ ^x_?3uL[Gs2Q0޶a,Np-@c}8&$brƜB!I.%2]I&؀,Gm4·cfAyE:KYJ&,c9GSJyeW5^t, (ԃ$Y=ihm:)}RT8T6' @&%MqTCYOVK#i}ѼoܠWQA(j=Ę2ZՕMzvk1@ j1uc~OCº5ʅkhkK]m>,fg zsd' =8d/cߟwo^.1RG!x# endstream endobj 4718 0 obj << /Type /ObjStm /N 100 /First 979 /Length 1563 /Filter /FlateDecode >> stream xY]o[7 }0]H[d+Eۊ!U#ahTCAM 6"lF\O +J[LqtV)?l:8?$6 e g  FPabR+dR+g N¦Cq$%:'E¨ rVF%q0XH IwqpbPfP. |sĎF?oZO]?juv᜻eQs^ fa%#>+k6?^OhYK4>}~}{3~ͮ<}:Ÿ-nV㊰H(g3HIX(.[@#ӜugwS/Ɠ'`^Wʍ\_t줛^¼Ei~7S  dr~'XC p4; Z qKXCBah\VplOi_Ss6XU,O!B"+X6xb >6fY ҘՋӼn?|Wyi?B|;Uͻv(uzl*C#j3'pxmFu-gW h'{@4`]$:#6 z?y; Opԟ*UH*XڇiǽGd V[ahb ,~] d۵] a%ĕVBfݷz#>6S(Hަ﬛ø?`d帷y;y;_Zù!،r0[4C\t7_aM6t{;n5cXnWCp([Gw!>uڻ}:hI(Cе%&K;{)G#ѠGќtchEZkvߦߦpn{umsl) EK@y`sފc=9;L1ZfʗS.;bXcȋxjQ`OZQ=^0 -Ɲe綩SS!7l'/Kw̬y)ez O ^LJA 8MR\=7 YlN_fבIא|%Tva%ĕ2 6 endstream endobj 4819 0 obj << /Length 1463 /Filter /FlateDecode >> stream xWo6_aC!1M,懴KP8i,xc;JRlXxd`5/^^Lr< r 8g2J,v1#hyX?oMF鑇9s2%:9w0$^f$@W'GqWDT"ۏ8Ph4a8F;/q(gF{[gd<+I,ULWhWf-5闻x[x^Fڇ$kmM­vǀ~Ǵ5yvd>uCn^BH07x ,ƪRk,*x1(V2vȍ8Se$,!4;e9( =@9 9G м@9jAKÖxZLWC28- SEQ*ʑ =9!F O ' ' xC]ȥcjasʯx@ڢ. "XOs+V ,Gr#H4=1OHB91J(9 CG)3(qǓ :b+u`7'Rx+n׶զr˻ovm[wʗ^׍s0ey_3IYzɇ&c!̌L8 ~I(k9A %y$;&vۍ@)BD3CL$[65Wxp]J3<5t%q(+]۹E<< 0# hbWiz ܝ qEՌϦ]] (ݪ/5^,x$$'#B# Լ jgm7|*M L6 -M]0=v޳DOֲ=6hI ;LB kM ]}p,4I>s{xEzn\)|G9iA K.C zhs@-]ZMeIc_~RV7[ c0,fwګƨN "BdFm1,F+kt8 /fnkU*̴-^)yzfEzI"^8aӋ^.uzMe&=/ogouynxgY< wҔ%g\{4b6>Rt~j廬JpNE|hsmS͢C pgKNqѧVo3rUwm{P>>yi-L x4At\_M::WL-JS[뻣ԇ`>S9$fE}pa^^ su endstream endobj 4835 0 obj << /Length 1581 /Filter /FlateDecode >> stream xڽXmF_gKao_`6*wN!T6 /. w 8t,<33֋^D(;C0FN@,r֩v8__|TDӸ.b5sW C6b!ub=vRxʁWQ`c0Ν߽U;"#> ($KVW.>+nSfp^ۿn?zy=|XqfυLldU9r8ȡRSw<L}vpk_RjDMeh`8.S3x}|ٌSYx <&a-w$(|QWG#3m YD+]nvIݕXDG ~8"hd|EnsA6UaqVk?)ܴ1esjÐiw_#qs,RtqUFƥ2ފ,2k̚d+xy̵ͮJXSD I[z0cdoFN.SsY_:g-Di⁠NTlDn.01xcSњy6{tdK @]4|_]\n5UWZ\P碉\^ޡ]iC=NLIgq"߲Պ|vȚѩ˫mtuȽHխ/n ~bƇVڔE |y!m<>BUUD5oɗ#DhdGKBJɾzum.Bge>ۨ8Rdi`e/z&F5QTrTEK1C @c]P!1Sٴ"(^h]Oi`}I0"!g)feD|6. },E? #֓lt-v@c6|:{T'@߁Cz?o endstream endobj 4849 0 obj << /Length 1431 /Filter /FlateDecode >> stream xڝWKs6Wp䃩 ꐤNg:d9@$$!dExpʵ֖k2yܾ$BkVD) Yb-2닽O~6J> α$$QVu M\sHiw6>tw=cV C_[]Vuuu5s׵S^7B/ ֓*orY9d昬*Qo&7Du=rI`C]7Z'GF;tUb>izc.VfF*i-U ns!t ikGIBg8uwHv)3Q詶yxwas#q3;yS9C{~ipRީ-NgneB@VFZ;܁ɛ]fV*Dp-<'Wۂ/OM<,6~~%ͧQ/ J:cCw1~B ڄpD iY th+%a?TD tCM^'? ms 2E>Ah{~9YeƗE 4 G:8zU9xsZ 8Lj&u/'n/s{C{y_go<"&AXDpC{byע]eK߳rke Q58 ䷓PUq6`hS.ރ2DIbڦݡdqh1PƝ^"~ڊZ ȕH:kMU5[ 0;}@q"z Q*W#=ɨ'R=  ΘmR]7p%aŘ߁ " `[o۟[Z1k+/\ #i#Ak SxIvRD> stream xڽXKs6Wh4`ĝd&Or$&$l|ɴ^omgt竓7~4Izj3c1xv}P,zw&Rp9jasR9pB#&KOPQ69P$u.s1c0w7)Zqy/ؼ e|w{yqZIOz$O8JMUjhYVYRro!u,%<E02jjgF{Xɚ$R($րxBq)X!ˤZ016,kd '!Gn^H<*$  ĤYO4X8ʹ[l?Z?_O4;hkΕ"<+x b(jw'^VsЄّsάu.<66gH:U4<Ӹ5$Β.!$bhG} )%יrdf4L|g D/'1J|MƎx @"~ɦR!}6%*E v%Ԃx~EN@E! }oَC2gy㠄ukS')TS#L0^Ӏi wwRf]O;]+)YP{;ƏZ\!raS%DC}6 EUB!}/TI҈{Җf-\kP*5إLfmR)q=)a{ k,TA{UѽP^o4yu+FA+7NU6pm=_ D.@V^% ]=. ֓^V?aAWʸ.QG Ig㦗oOf,]]777ak1,P@(#k_ʍ]#S.=|4D#a3G5V_~(^#ȉ2Zsm-SƋͧKl0N!}~Im` /8ǀm "[ \'t{_'u%5(զ*VѸO#4]}>u3܀xٵ煾0(E*o9G矵&CQ>5@7 ҅\I0NJ>릗5G>5*[48}| ^QLAa߃6/Sqx<hQBִWiu ᠈$yy^e82 0ƹظ1+)ʠBDT8Jm0!XDC8<=sLz[`Uʭ[E[ê\ !tV&#i#6nm_e*K<={=oW'i3 endstream endobj 4875 0 obj << /Length 1332 /Filter /FlateDecode >> stream xڝWrF+]BT YTdrŕbA$R @(}zl$LS:{^^/3ɻV0'BO}gpƈq A>ܹ>DLeBq~}A|DԛMlu|M'HV0D$waKQֵ33nO+o>#кЙߞ׃14~/rc8X֮GfB(D`6.]L2}֪ QP 0p0p>PFVIn|{!0$ bDv'G!0& o2`Ŧ FzmpËaL,͗n4 ¥bV>6-d C)F!&/U8̼=m}?x:O_wDR/jݱ㵬9j-5JKry]GF^u r#8hS6^]`0̗ iY5 Q9ݴXIY5VV ,e"0dUa 8ͫ3ʲ"o=Ԍv0 #!CTVSxcʭva.*mfɠa#/t2uQf㡣QmlR QMbo 8(NEY⥤m67]/&igwi2вƂSM0/6+G>ǐH|xB^ L9=DM4ol:Shao5fqA-FBfի@g>nӱ n<]QP 3מ@#Zf"zBMPn:0fh[ Լ"lbkӮ1B*>76Kv4}j Ť03C|}#בaLQU?a?c'84QD9b睁DBwы鼽f?hC٣S\_݈jlQ^x.֫T> __7҅/l;62YV#(]{zc tQ4\n,Bc) K~8qU]oˋn ;ٿ2e·xdu{J)!A3 urn׏ Y04`+$/~3|9Sf?߷ endstream endobj 4887 0 obj << /Length 2015 /Filter /FlateDecode >> stream xY[4~_Ҥ)cǷDy`AjwzyFT Ij_ϱsqU"N;7Wh蛫^}~#tL%*DR…4cD,-1wH> EN}BW+]Q-FUT޾Q E'~ug1,R2%c%d).5c,*?zH>L)9͵q^M{hF:q__LwˈIR<dM-$ xLW|D|XJ2}e?z{{E!JZvRQۢλ,5h }~bA2"^b*I3gԫD;e[%^fo|z%) !^%dI~)MkJ;lg "h/s)Y1.tɤ\}:-̱uo}M0SV۪/.EaR= aFul9PxWwwsSeT 6|4y Ttw  p=-?̽iΪ7ʒzi<Üǭ`P u8g~$=#$gȌ>byvB <EXtcEwL/sxzݣP%tS@ww JJ/ ItS>9fz!=i 5Y’KbrʎJ$/=P͏,C.>\"` h*\&|cF*$ՒQBi'Zg_< ŝ-khI:Q0_ѿL_٭_[|K>ɯ endstream endobj 4897 0 obj << /Length 2115 /Filter /FlateDecode >> stream xڽXݏ߿O ZQ )p{C>$*Kd*_ eI>3of+oۻ(Xenj_ s0^%Bq'گCX,v,A$<#{ 6~f⸆̩NeUZ:(-g r]T'0#'T5j?"{]5 dzJYL&~QɓflVjńVi6,،Buu~^o4$ 9+ ՇwnAYZu;ؽ%S.e%4rLB׎V]oB8u9/@.$t^yN9̸SJߜ?xpPRHBu[;pʼ+OvI a[-^ q;d= %7CS an,*0Y)`iPM{ſ=!F 9*@BB2M!">֡v5(\:IE}.`pg""vA&2l'y((XYBPg:.Ǫ<js'A`Lo) ِ1*AzȰtEkƩ#̿ANCTϐ&AnZ[9ΐ YJCf4\Nw*Tȳ[؍E6Oĉ``-;-&:%s̬EP88]ervB*) ߜ'ǥ|! ڶI9E2 P%i2q2N/6#.2XPcʲ~U ÝbM ҹLʘs9A[y ?dؔvO~,WK}^6};))dmk3a;(2=wHͥPahbt8ӎ–& ecǾٌurN!tkPϩv)2εI;y>WAD,y‘|4~Ƽ }EۗM^b:dqԻ,%P]/ O_sS~vUw3k"x2ޭ~Zŵ,b7ۻs endstream endobj 4907 0 obj << /Length 1799 /Filter /FlateDecode >> stream xڵXs6~_IfzĂ@9?$minM{0 IPJPv|ۻ)Ӳ3m_ pX,v2&tŻ Nr',\/'dFIx>.'LyfyOUj`)UzAm0A}RrtETՅ2=;Z"ͦCөKԮ4No;Bi"yG$$d#[ W9=Z )c(yP`#&lhXnQIy&|𘂗3% D"bX4Fg ToUKCRC[w2 VorWt^YtQtpI tÝָUoBt+/rBXe4ːnw0_5N8VRvesBuFPdO/Kx1h qSq%9%%Rʿ@iF{ )&t8?Iv>nq ,W-/JF$k A`--`H#Ca\`JdL-VEV\d4\`H&rPLpF3c!ט0T4j#te,F]0gl`,^cD-5m`UfUPI ByIj>K.xjyX`kR|*in:,D+ʣ\ֱlp6`Խjlt>1?OuϦñwGݏ;_Fܼ4QOp(d%9800};5 .yd9"DH!L N W|pPvl-w>XBqqV,sv3/͝HBj ,Xѽ `hA==O'ݛ/ϣ]'{9Mg]ͦ"?Qym_ݔJdOn..tUoTQۼ){mA>NuYK-DYqe񀚌|H L׽MOތ~ӽzqhhs _:̯(qӛzg?OϏM6ucygm8WJkTzD2zj,V+'65mI3>oAuȐdoF.lb/A,a/vGϑSe҈z#FYzF%ׂ hJY l l7W=\wYua!xV%HʷkU[ Hߨ-@_\^ GBMH.?pkeI[&k{ 6  G.(^Уs;W|CvHmi4P-V"ޢc;:do BQIڼE<'mތV3(ջTwVgޞA$)K ո6qzZEɍ2Bj'QlbP5 Fo4z|5'UFoTm},K{w/_' b^"$1Oa'9qʫ^Qm|CSxMFoA;DomTuujvdk[R|ٔe{Ԥ}r}8e{[!"7S)&SlOũYw|Ē&,6;$Cނgћ#YfXw/F/^d< Tp /}6' )'e0w|2YyA "hXaė4*4*K<'󥱏 ruOII d׽Fk~K'k endstream endobj 4916 0 obj << /Length 1825 /Filter /FlateDecode >> stream xYY6~6"1#Kf<$i[-9:pz7A1r\y>|/Ł=I8!,st޸7u]2w38`aCC,CD3|~ơ9c\9n,aSq QpPIho:~_#=(g"'e薃h`q[a~.v=<.UQR\.p~D_/)>⫘y 7bҏ*yUZf:+򑔽㑳 "Z}AItWM*DZ:C\ιj.F [\T$%Z_e65EM /2G|wRoQ\qj}yyi {?Y7&UEɓ, [@-&& A)IR>ॆ2vIbibT˞6ɗLm.o?Cp.Y,Cc1*YI`~ %|{RwMel .4F&vtvOppA|Sqi-{tj;[fɺȓe%n3Hf5 Z@S*8PWe_;[tC?c9瞻nv:i,(_E0u1/6 b}g ǜNe"P,{Dׅ hA<M"M^ԸҶ[033 F']=Jc\EJPdvn2Ik bHUM颌IG^fKM-uݔ9tk`eӴF=v P)OV >\uU- 3^)~@ *kghN8I翴v~gOXOB (Xd۪օ>Jšjq,͟eaht[ R; L^p.)&ԥDM 6WSc36ky@'סXji"wMN.Ґ*AvEKclE=,:(E$ Mn0Vuy,]dBp!l#p =vSM8|MqfiUg m'3JkLA_F %}8ۿBn'0#2EdF2CQxqPJ5 T&OM̪n/> stream xڵUK0W]U58ejml=8`w DdK{DZI V}\kfo>l--l}&JQJ|b !bC$Hyn}8to'g$@GƗ+)Px ۹~ v5جʘ &%$:g`l%P!ǎ⤾7Gу-'*o3ɫ25ԥ-xfzxr䊝ѻ=%CF>`$$uȮtPNs(wJ.$ϔa.#m (Ƣ!J3ȅኌR/Pҍ5w$LWRně$9wxy w!q\ yBAu>A%̘zWbM`bXZLUJ<_)Py)DI<z*%?EɩgdbBy$ψWE٪Fa> }ϔ!nvt)$O; pI " Ct}}6<-dzYK=ix03xsZƭn&6gMFm ҆Ӯ#CޛS޺zdBPoqͨh6ͮOfUŖfiv:au::ZdK;&>/9ӡҚX`4Y뜕zBe{ DmdڴzX=ަmzA⺹TP)%Ӟ$ө~t> stream xڭZr6+LgbOvɤˎwiI@@v/h2d`P^^{žŗ;OIU R֬îBUYxhoF?|gH ^rl| N_l.KR#e8iO0>[QI()/ռiNV?܉AOylg @Wv(׹q?[DZ'8AnmD/Xm. %c,m[ulbwڜTZ|2xyA^uyz @;w7_^ #L9 UyZ:ZulC)sDVK /p ^1ٻf/]8d:Y)+ tG-`IBc~wCx a') a{ ™VY/eQ7` &{촃A+TY`ayc=aG_d Z's(eIQw$Pr]sctTD8 cN΋ϔKH+ldH11bprIwۮWp0XLT2[Ѫ9QFnܠLEVf8X E58#jv]%N)%V{^R(AI(wQvJ+g8;FD'8`I8:xU"zEl^VU5+^B4OA/g1J/o}8寘B %LL"<)p*-8MW<Òj5]/LoOvv%Qh Xl%ZF߳O ..|* z70\bOBm ͡#ۅ੊ ;> e`U %8 zZOLNiؗ-FFfw.'B'{| h'm#:4=- *jmg֎=P"ċ8d@-vPR~BҀX׆/jb .hJe7NIjE3UI =V xE,i~Rik8w>i8-81%ƹk2 2)-s]!&If@A? sO2``"JnF c'Ro1!IטC7'յG־sySz+c 5:eV,qr;|ޙ/Um3FYŲ: H GAF jN_=\/F!|ޫaM@DK&w endstream endobj 4912 0 obj << /Type /ObjStm /N 100 /First 992 /Length 2479 /Filter /FlateDecode >> stream xZ][}_CXp m ?Wzݤg(QZ'jBaW4\rptRЖZЌWJR 1" I|̡I j`P`QBRC*Z1Ș1&67e71)OłQvt|WDە@.k[R@N  j[ ̚0߾" ƨԐV+Z6)4ɝr+AJsVA@sr'WLDP\&GPߙL3 ^5WG2s~dVYj3ui͔ґ{@Ȓ}Qq;`Lv;y.9,cד35 V{nJgPR(XQ(ܮ0}]x6~0ll lj_<~{ $7w\l:ͣw{yݫ_~ O>0Li =ծ@tgv͗ﶷ-=q+?s/@] q}_--jQ=~;`n_}̓wv7Ǜ<>o6?c|{rwbsлояJ |o}Z4;:cKH[LLgZ Yz.R?‡:p_!>D" _/ ӋàƀGu# ٘b恵=~6=g^7WQ۸ȫ,Blh?Ɗ?!.!q&1>Ux!gX #?'>J.\K.8'b  VJ<F@B,8vݪ Q.Լ6![y){ TSJ$>USΥu}4.2(䬺2)* sAl)y CTKBp|AEKnYmita/P(X:e )2EjNPgXN)s(~U?W {!6PELM2o5m$ LP(=2ge(yC&굷B8q{_K_YK(MJlL~'z;3vF.tK_[桷u&\:Ed?֨P?wu`(ґDɔBЈ?ȶ.T/~q-R OjeH3{ 18 d> v!`KlʺĨ[De~sTitmd?};FkmC DPQh> #վ!1_šqxd9$hzP:$}soG7=JW[8;_`%b:uX!IIQT7R =j!)IVyCP\jfVЊ" `@ZjH:$;RQV D!{'0+ uMB1@ 4I endstream endobj 5137 0 obj << /Length 1667 /Filter /FlateDecode >> stream x[Ks6W0^شq&=3IlPDRi}!VD :>Xwž]xOw\4҂bF0F$ gO&O=GHF =}wL`WT/YgѲ^:[-=@[%Uϝl~nF ̋xO("CZ,-H( yqtOpi"vNK~T,/9lcG˫uH33p?(:-Ҋ(nϏ0E <D^rF+9RBgYZ<*|MYc-i"?[(z;4>AkS|~>K!ؑ#[=sTNF%4m!"qMSR&t52ϙ>0zm L7Y˦HB/fiU[%btV{?g&#>t-MLnmAjVٶ1s:l`ӧz[~)jrI*$0:r䍓$Gvr5F ﹝C3o )Wq{J 49a <*6eSak$%Ʃ\Z jg.)- L N7N^EQnStO@Up!qhfVf7[Y Q@|vln `,[Ej gtMni]qc&  r`Q POTuv-֘b[Š|i -F}VtBWt'd*"Jv=m! ܗ ݞHh_M΃U%4+,]7!ڍb1Y 1g$HkJ~4i BWW͏MeAT/ARiiy}F>Ke~@9'.L\TȎB/,vڛU&4zr`3/7P7"ȮUi[L|`X2)@lй_MF妎-U]XyҖ^#NjmG6EQje*sRp:ޓ KjC075@V/(žhfPŽE,U1: j *UN4< 2wRٌkeO^?NRW(4cM# Tx 8뾩U~Ïo@ #tñ{NoqޙÅ?zʯvW*wOw1 endstream endobj 5017 0 obj << /Type /ObjStm /N 100 /First 1018 /Length 2757 /Filter /FlateDecode >> stream xڽ[Ke2*I0`@=$, &ftnn/6Dк}>}Tok)5TKD>h|Гh:QIf|D DHH1-U@s%$>ka ' ĭ9,SQ|I$E$ ͹IG5O-I=$ۓtq>C8$s.^JM:֐s-8\S3\J_ hmG؎)O1m4Z9Z2mɤjړU߄0VlE\9r~|j̹&4uSVSo͹@Ҙ1;\e|.lG$~j0C[/.}|ǻ7q~swx_._wAV &- ?? !DAU5ȁZ}+YTz'^h5vQ홠Jxh;iN/=kI/iQl\hWo.z.RxBw/<'i>e÷_k4 PV2ޟb bǾҪ`vDodG " XDHfKg b6GuX(o$<lYvG2,!Y3Q`wyP[F4[Jx[;{iq:l0Rax* ,`~d#U;mC`~^.Cv@pՄ"J:8DW' uhImvfXAVn^Q?f6|]$ѠCKE3BzI&nJIUZw^#)}T=x Ï͝уݡȫSo*'p@!̰P}a;ƪ$$f۷.רpH E۩Q("d~֏7jq_VE),~q}_mPeYOg4Y!#\6JoC*aː-OlS *Q9A5ңjB@WnL*/HHeҺCd TER3m{/Hd< mm\>y•eSM\9CCt Rn?#Wecc;AA"D<GAQ H\I4۟kX\,x.~DúYy7o;)[]k8"5> stream x\Kϯ12M^ CAiвHrz}(LJDk<91zW"~/|x%d^v+Ee/O?Wc,U?9]V7|}3?ur:)-pή_e}U^}?hE@ ZaB&~^C>2ψF-G&@094FMH #kc1?`MƲźBH.8z59skx_#j-7T U.jY Z%k(@F£ReO8THjqxnFPۮ1G ɝtNk$# .Pt)Z5>iiӼ |>Mٺ16(jgPrĕ:Ёa)7p%#-CB3 xwF-iw1i]jv nTl%`"RauQa]D0,B @Ɛ 1Be\M໰ ]5R1'[Cf  6;eg_]":C@ _4cJQ =.:OKvdv'f#*KjayinU Tҭ r/qx9nPq"fEs%` -bhҽm}K Eړ2fyIxתx{$Yhx)ۣkJ6cHo>n4qS_&Kqq&bCkQ`6!~%=J!gի)b_R]McGaCk4.ߙ~4!! "6hZD\n1,0Ѕ4,!f)7QfAXDxZ ed{ڿ|YNU& ƚ/&%z6*8-, aa 0Du  :!uK;ukjgr9up.%fRgVbBiZ)/"2d2z`-;.F&N907!O7D*u4}PrULI20=?ڥ!K2;f$P ֓NќQpw_{>`ҭN:?aԶ{>҃-Q7JH0=j;NkJxCĉHIԕQؑN⹾twVXL`Gҿ DUQ$Qn4$0j?ApNAŹ'6Pld6[:qmv5 njYZ5,IGƽXr!C5p"c QX@u]est\hG^1=0.o;; :Ft!!+~fe=Xܚ%yu0:u(u`E଩B:}{7Z6y{PNsc!iKki4q7hzDzsy\2\XН"[` }j(UYN?OφZ+DnP܁\}.y:LvZq y/@0έ06@ @Y7ng[y\F%J'\{s|7ݵ1(k:G/0)hȫi_uE|J !1C8v&<5ɮI| \w_ѭl7;~~Gv e!}U";taH[MC˧= endstream endobj 5139 0 obj << /Type /ObjStm /N 100 /First 1027 /Length 2825 /Filter /FlateDecode >> stream xڽ[M1p*~ۂ :$tPE`5gQw?.+VU3915٠:)ͥ 6.k WkAm /DQrQa |XΏ6jwqUFDj:I$k'x Ҝ,~g1J[hR:G e\ϔ=FbȊZ'T]b%qEa,+e 3v㜊+] H0Ss5*[SWKpq 3 Fa65̆yly6[lw0 G5 $0@lfRl٤v&}X8Xq8XwL$i|7 . D2liǜ*Lb$Xlaz 1[ ԇ5ϦyS|woN?~ןswO_N߿OOTHu^ӏw׏7,raѻW! Y dz[,bZBKNqB1I4IXH5gвrE :l"X E5}sV8'bbڒO/oDl$RHp(urTzॾ|w@G`H`9.䐪;9Gsbm*`Yf@ol!!vs=XQo7$ȕھ ߭,͙,R ه,n -BHY,X v1@z 'XJbAL,Y\#X5 f!Wfb+nXl1KGZϐobES(s$ً *% DgvQX`N? ;RE_o~4PߠtxMz lqVXፔp>U?~zӏx/wz;}ނ{pxb1Yϟǿv{ #rT`ۏKy`ё0>!1@9Hsxs͉7'ޜ3A42333333333 ++++++++++;;;;;;;;;DD q9(TDDDDDDDDDDD" Bd!Y,D"SVBY e%PVBYYDV"+AjPAjPAjPAjPAjPAjPAjPAjPAjPAjPAjPAjPAjPAjPAjPATjPATjPATjPATjPATjPATjPATjPATjPATjPAujÚl&ZKּ͠G} hU/·P! Du,e.6t[O/a0F=E y(-Zxf|/9,JE}&6-xDCKhćR|*-+#߭63n),*LE YT1T<\${,.( '>!Re)l5IVjy 1 n7R"2u+rQQmhn$jҦuNwcSd!N$qda_:c+V,|䛾+VZe& Mߤ{,v"cabEϖP>r+aνz7"Q_U@!'^@#gC?6ʎX)fEdW6ب7[s" ƫckW63^X1^vݹD<3H̴,oY]"' { R7Ib$DTdvjPQm 1ʾs "JHOD_!L='b;XExWyEBt:Ҩ4,,ңx'H) VvcoƂ\C-fic1;H,D"g;saQ2"Bc},`bhd{)a?*}f{)#Zx+Sl,XZ ᕐmc4q^Yxc<$dy Hb 6NY- `c7Z"/K6:i+&]=∂+xXHk#YЦpy I"v~&jFW ˦󬃂B&SLxb%^yv&=ĬIh 룄$_&c-LL3NrN srDv܊5 \a5&XSGj/ڈWݩrBО$8:$A֛Bi)[{w'L+_$ASKY!US:NPJI~/\^*(2ƭɲW(%[fkxDWpIbs#kSX! +/E!lZRn!R.nagh+$Y-X\7 Ye{b=^+& vI+k9 endstream endobj 5346 0 obj << /Type /ObjStm /N 100 /First 1014 /Length 2113 /Filter /FlateDecode >> stream xڽK$Shl:/=`088b/x00cxoQNj.XuI>sq*!TE .(M!r~!ڱdR6ʁ\l)XmU\q T*Se,ά r&3>%9P? rPa{9Ś" DrgH 8 cRIJA*P Z)^cQ3^H}ٚtQ?a|k2{B|C6v}XqFA>m%{B}g@*CjN]*c5RsPWk~FnrP9AP C柭HXP۱jc5FpF)0cfϨ)¾KЪ9Aڱ*SbK{W|sj6Nk0a!4 :Aʄ.<ѡR*6Jإi;7%D+ A8 ~'O\⌗7o^w>û_&D>zK1$h 2y߄EP5| y ݓErT pSdQ5&Ē)_B@w) (`BM"y9&>D) )a@D+ Tԁe"bqb{6//Ң(}lKYD.(#P;NA =6ѣ,V}(} ŒY2C&2TlF$6@!s@\>TopA1tIatU0EyZPDm3QHKDCgJcSA.)(폚 0ZQZTn$vBt@;K{BZk(6*^!#64LPf$2FQhD{DwxI.*@lbRKDv.Go@ДFA|L U0N*wqcAQ[@rDw= W+M>Q ^#ԍej̅_#Ův/'HikC a^b׈ >F!G%, >zB\>Z]I2gFq&UTIntC -Z\!WF]GX2 6a-G *'!l_AH]Cl W*e5O3-6KpQ+ >'ȡ`H="oBSl(iwdǠ𦬬!n{2Βbk8૪1+qVlS„| S&<g 5[(BgfE M5v:q˩ܐNQ׬)j{ւ]$ g6V-#r bLbHbK;> stream x\Ko68$E}]9h{Pq#ˆ,o_ڒQ("I!A`3y~3dX-ŷo~ d n/8(&rq}3^#!ΤZ~ߎ{ի}Gڟ: 'v,mQ-Q !ΫQ~X ?9_H 8dԡIgW} 7( Vr8,0Zr_& ]_>1n`?bh`q?a/|"So4gB[StLokG* ?GĢCښxjG)UIk9ldʱHb ֪Q/gra(s!6 IL_o2X"nt_z 4foAlk$<@l<%bAX"aGtI9d=D%sL_l=X9FVdmz2_PN1$`/O'8qߞx~F`4XKD>)KS `w"+LT"X29TuFMVd%b9X!=R(I 2 {"* q$ =jWpU%+'R/ՃW0d a@\HyG2Cus#A2I;#bp`\%_ROU D":i A8/9=1nv@mFv/gO&yHQlҽ7dcGW?,þ4 i,8`!:}#6ƁMRM|w#f%]KQGJF:erw!Yn6(OϺo/6mc ]cp01 $Z'w?1B9kt# i Me(LڥuTZ>ΉvW0F9曇nNx9mo}oR+N|R 9C=vSj}uQӁO$ˌ}A׀UGQ6, i[>%ՙt}.Y{>bh̰ݐj(*&b vN2Iu?nfO>;DLt*]Zl .On5 MigEgNftЅl|&%C"=!6M I.NYno)E7oµ endstream endobj 5347 0 obj << /Type /ObjStm /N 100 /First 1029 /Length 2762 /Filter /FlateDecode >> stream xڽ͏W\8d`P ['ZC+Ns`/ ܙEn+rQo S_,zoh,Tjy$!unl5$܊HcE٧ g?Ad*DgRXCG =o FS()lGRV EJM Jo> @҃fujAg~ f i}PmjP,;9j&uu&g~x??><}z8}?Ϳ>xw "?ӗ;3b%%G?7S9ZWU8}NyzN_?#s[#&Gq3UȈ0HakѳTnyL Ɗ$4)0f?v!ix A.Q, Wnbu.9-EO Q]xÅ.EIg̀Y6ne VFEF Av=ԅ<`&^|:>)}@K/%Xx?_~Lϟ0CUQ!87}ј< :g+nFg16 ʆQhlP٨\\\\\\\\\\ܨܨܨܨܨܨܨܨܨܨܩܩܩܩܩܩܩܩܩܩ<<<<<<<<<<6e alFgʙʙʙʙʙʙʙʙʙʙBePY,T* Berrrrrrrrr2iRҤ 'TV*+Je2dPɠA%J *T2dPɠA%J *T2dPɠA%J *T2dPɠA%J *T2dPɠA%J *T2dPɠA%J *T2dPɠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 42hdȠA#F 5UU;Ls(wquo]-ܦȾuѡc&ݫ;w}tG J^nj0Gm |rpx__6Pp k(Л{ 1^Zo.$8bᦑ#%t!%, rpH&ҳPCBnQgpR;)&, רv@m<5"_!BIogt I^cªkbm :\ܠ U<0!+wI$k(D,vT.^B"3+RJ{lBQ-6#kU|CŊiTƷ]aIϷnͭ\t̻\wM+:MpE5Q+ⲢhrN\]VtV]W=(M Kի>ҲKHkaӻ& /-ER .l岮H-qBh(KP' z`'=p>zX:+ LAݼ5Vk$*~όX0YꙬ}+ c/|CY/kz H&V&6XwKKs&T͚g!&>ot endstream endobj 5578 0 obj << /Type /ObjStm /N 100 /First 1015 /Length 2197 /Filter /FlateDecode >> stream xڽM+xL.lYH!`06{%DŎw4TRIDDWIZ%|,jqPS=Z'*ZODr%⺭2'TDM(4W#K8u[i2\[yb-=a)q.^ l]b7\@J{ 𥳐D՚DՖI&== B6J b ('Ž)Ĥ&}Ւ}&}%*+-C*a%d2XpZ*1.Aec*mR}Sݩ *Q ԷQUJK\r]*׫Dk5=p6R ܠnҹnk 4Mnە%4eZ]VSez\b/atqS;s;Km#}_]/WҾp|uQ/a-aŷ05Na*-N0bb ҎKi؍؍8n4$[] F\ #ǵ 3ĀC8nl}?Zb7v84wX$`v&~o|Jo?u?O_~ӯ#4viO?|MI50n-{ߦt¦_~O21}-NN)Rf!/BgQ( zt2X }% c )u!D]r!}GBd:wޝ"0Lo hRX9EC6Н+'s0V˥;Et(V> ¢d!'kI 9 U be&uxqrN볇A&s1**VB[lۣp//"p뫂őL<ҙL_D1+Y_|9 Y_^Gy5'HD$(pbP QL)lN B2׀cZ_qqsJT](vNEa'aPLBP="h̻52 =SO)RY Fxa13^hSPbn{+o@-C5&Zъmo*!%Em)N!ֆM4m@7 Gk:gX'A* ʋ%1 (vL8Q*>W v8ڱ(0/ưwN,͠e6RmΔ΀;ǻ endstream endobj 5883 0 obj << /Length 2332 /Filter /FlateDecode >> stream x]Ko7W5MעIz(|(kGJ6_JZId%!JKp83<ųr'Ngibv0##Lճ?OLZ z.GDC_i/=eۻ2HJqr|Y~6^!Q-s~MB9[eQCqlj}T\F vrQ:0Rܪv6?{`SVN#o:F]TG c$=$a_Jڹ1.INH)( lhcaWvfojĤlj(><{c^ X,_=Bhn`0`BZsb`SyX,3H6ċ3tH̓ϒ{N36Fd}+!.N6^q+G٣"aHcHʑG)38Nz\V<=il5Z Z+^F {vhS[/a,zZ?Ť@B?8|hA{@VxgBAVe\H(!h>NsaiBm6kxpHEHdpXS"]*)Ezz:[[V ͝@9VfR+Ԅ((tx{>fE; ]"-SG}Nrq¼E2" lprϖFCDG1K߾7Qb(H?EVMYp@my)ߑnټp-P). o!ps>*!Gbn ,'Oo<^3KbfY CG~S7h==#0."KQacRS%HM]tiK%1dE ^`?'L\ЍQo0W}VΩ#J[{OЎ(e}e`/+U $9K/y~''%9E9o܎ޫP;<&' b:0Xd5;҄ opC3NRtGO:AJKr$!EMQz/q^I)i "K`{ae$W>JIʂ#KQp<2q"Jr@˿n9|edRu_mSשAK\/=]70նuow-> stream xڽK$WdgF>"ma=B.FX'TK uEaȾUy:*GSHC.7J){V7Zݼу%Q7,]!K$IP`- 'GJȺ]!RZ4$OˢA6AbA6E2RGIP;Ղo0e|f0Ud Dy|,=yoB@5ٚCjPe>[:5Զ]mvu-jɵT uRᦤ>JR`TῪD{'noȶx)[=oj]vu'>[ϡ[v]B4?kPneڃYEkSxB Lg}DhaH$.ys&MŨ2\ n5O `مUuXCSfnP4eyq?Лlp8 i0[s V8[ 譴9Rz+  &r:Ezٓ1GdV2-\';FRӓ=6 zkuN?a^Ħ}^ /O_?O?}(R>RzG(mDLEsWcy*:T$PATJ^G3XhOZ動* *Gq2z=!)bqPA\Ek UFq2B34B E%zF fYCbPQ*֎ r!X;(Dq$sw"uXQVPu \)|#tH)bLꊅ3CC3!y%K1: ٳw`Y-\ HJKe"DzJ"19yc YQ VqO /U!f~+U5^I/u?|\Tp1tWQt8Gd\9-@@亾Ej $*兂:pɁHnw+Xs¬WfxT& GY3Fǰ‰)f,C}uhSYǨ\E[BL!",PA>NU\(8DӡV WB="%=R֯dܮMKPgsmeif˞*l| \|TE_N+T*U4Z ].P]_3`l/ސ"|2KG!=Tx%zKDw@zi @ E9Y?Cs>RÞNO5`!RŞNUX^N)bOc<)U\EwHdp&B렔*UٮHdg*JG" &SK`IQ͈ypb|\]7}ϓ"NEBE0d(w %_.yg Ѫ7r}C\> wK_Az _7Wpkw7S[ CTyE`w_=B{u|x?f6o/{~k}q^˼vtxop=pFfC({x=vShlt6 n}A˃-Z撱[2T0*D)B2"/y4_)!cU%ߖb8J$[dy"HǩCǹ'E}PQB!,9 7)<\E]xPa)NE>X.ѧ! Vn/t4uf_|?HCYֺjE`Uoūbgd .؟X-͢/]*1ֹ2 c"yD37^-3߫H7Ͻ;TyT2\Eo2"@fӜ}=Bb~{smTLjj7n[>2j,62q+xYko~!r}sᇊVP5 $Y릥BjD N'^o^R[G !E=45by"y[׼=Wq=v endstream endobj 5886 0 obj << /Type /ObjStm /N 100 /First 1015 /Length 2135 /Filter /FlateDecode >> stream xڽϫ$1hTJI ؋1]c6Vy@4֧K~R)J(J|P$P! 8)X.>@8aq|| Z}T3@ڏj֏S9hH#gL  T_/kȜG-N-d=I!w*;pȠCT"ApK>*A_:hn(iA:XNUR~I2(h~ :iq(~~.#%X*l5XgpW>[k&Q3O*tQB59ΨA-[,N#`r|T?Sjӆ9430Q\\qe1/.ՊWi~^ՒRh&~F8'-s9Z Vm5hlԍ n-KvsoS7é4n2 n3S481ufn6x )\)CW 0l|ć5a <BaX0[bv XbX+ ٲÇ)ܾ׷wo?}O~L _~G:~y_Gj/ѥ ha1m!ܾ ~K%)r?oyÿ%L N6RGI*Q\,ZrlGErw-n!ʀ EAbJ1A0 M{ ZSșbry-P&2Frq-&WA(d;`=Ƅ+ϐs,ؓAR^ V 1VVk3ZqFlw ?a;}B(3"!2sf=> stream xڽM5+|KOi)H+Q{r9M)"L\ۤBK#fi$rFvKQ]EQ9QkX$7ttԪAZQ=.]=* K- 1G-tP{c2BLg|zL.s^rp9QFx|"~oN.3É`7WQ5WN]9 rR +'U xIT%zU)7scU0JgFK%|@=b$ PQXR^jvP8OkimD2ͣDi2{Hm,s:JW  > c&9g !0ˈ4D(3VLa\*T/&7C:d:dXG4̾|\Orӟϟ{ ߾+dp8 {랞~ˋ~_ݛ7Y0"Ń2,H 1C!J<<& A9-(2c9 3?riGꙂ?"[m!D Ge@IWhyx{# zxI Q6H: R;(1\)01{z3 01e +ҧׁhhLh Tt"zE':jRdQaB-5)*tR6M'ԄXtO'jS |47+<C; MOԆRyC; GĩPVʁ;Y!s<";"7ҺrD#c#ˑDb!Dm>#\ ԗM]Lay U&Uq$y2D+šLxI(1d@l)V,=W'Ccø,޿[%#W !l!s0)fJ(fE~CFK:M#b& BT jǗfGjjC􄘖;yB!GPf j c+zTEQ7(IQ6TbJjR[R6ŎR&ҴB5U 6x*C)!AzRxTjz: TS"JOi 6X: TR"t@6D Kˇ`PCmPS.t6(M7=""AQxLSʠUDa8 h1 b;~͠ZH B+&D^_IT-%Z k?ZK4XZ‘DbeASmYjy]VBmCՀ˄ Xi}bX+ŰX)&V6Ćl^+{5="UT)6bttP6J;(KbhjS 5IqzzObjS ~|s`xZTU(QOjjA,}hjzBLKm`PIMc+ 7}CQnPt0&D'>"*D|zc7+37"ر> qɸ-Eq9 d#,}g9R/++8smn endstream endobj 5974 0 obj << /Length 1086 /Filter /FlateDecode >> stream xZKo8W6 9f=PPAڲҶX2%Z!!A7/#4yJh#W!FL&QJD1F$d2K>AM:y((a,á>?<Ӆ{PG<^db5G5Y-؉m!:YY~s91|RÜ$|A(!J*QIEJVk.Prp,:QcrAN3䄛bHˬ H`l/TTV:9`~/+,A56эEP.h!-tdi:P_br8&D~َA~|YӻTjOi mDB ov0zdt &W JyӊMs"'h z)` /Wr*DJϳ|Rphnߨ Κl3Б`uK¸C{Rb| _ `sH_X$em w︢w,̃LV Pu,"tB0 >8 P@Bx g› {G b>m.#4W40Udjc0#tAk`# zc$Da':/uZLo@MAacf*lLs"jvZւWqԌ0ma8(֫UD'lA>0 /ݩw{@P3GRmc}w(+Z ̲y]m+p{DS8Es `-}嶺x^=OyBpFjbh!X3#D%^1={ *_Nb|MHj3B6.G x_o,@jh޷; /,ܸ"+Vv~q$ /zmmVŮ^ ~œ//~Cnr5 Ү endstream endobj 5888 0 obj << /Type /ObjStm /N 100 /First 1019 /Length 2416 /Filter /FlateDecode >> stream xڽO)xL.VX$mAIl:("0b8>]=rs` h;ӏ&U9(J# %54䞭QChAZ:B/1J5@Z˫RUnx-@4)0eqnZ0A[M/PwPeBʹLBΣCbzB^B^CT"A*)Ax+d HIRǵ%C4-x3Ghf+x/cLǸ5R}\у-W-CsPU#Z*AwPP1 jl+j U]Q)ԚM2A9*ez̶U k56^mt2zIt]LM)5cLYL#,o@4&%#S-6+@J-b{QAI %H=7*6}7KMio@3 l.{Boڸ 1+h7K0CqMQ'6ǎ0n ER``,:nZw exŋ˛.>==~_??mBzH.[<\~x9;wr DBeͱw}^Ooeï1|M@(5aE19MDgEh~@Zh #ʱ EErU EQk{h=J t"DmWI#MebD( =C:"#?WC!g[(CF'$ @h|PQCr( 2OJ¥VcQALX.v HpiFB.EqrL0\ڶt(!8M̚[)BT I(wiaz0s68pBk.Wri u !̢bJ &u w1E9N6=lO!4SpSaTP_}J%v=9+l@l.=h|KW71`=z qGWբ cY4IL'fxnэ=zDyGb3!&(ܥ'tpS6(VB %F=a-ղm\toO0SF n( FuըzQb5 '׆-8*U8T.w"g,IJ!cwōb)&;dMklf ,e-Y&D{236ݕ0L-{kH(lQ_xޖJp?lA#*F ԟ;CUAK_z%5h/(5 y?>>\^|.o~@^=zz|Ɏ 1O_>~xo{=P)ʉڭy#HeyOxT2qNim_7 FFƪly!(PoTo4o22222222222222222222rvٕ+gWή]9rvŕ+W.\\rqŕ+++++++++++WW9gBl³1E;!}lt;MI7\}bA&DoCR̉i̙Yf@ 2g6YۋgBtEsԻB)Ym{{A33mT vTdN{?[BAKWlo|5#pNJ{ԙ<okbc}]dLF/D fwcgXK;Sx\$T=&ҧta_1WntD3̳ b#EN5ӦU ޝ#t#)V endstream endobj 5976 0 obj << /Type /ObjStm /N 100 /First 990 /Length 1507 /Filter /FlateDecode >> stream xM7+tL.jՇT,'f@!C,V4aZS*ܨrHE\ bn.H(\`M]ȡquJenoC њxk`BBhv4D,C  ttcAzȾ!B;)HX^:'FlVm`xlDhZϡi Ŧ)@Ar tNvύBr-8 ZlڱKQRdj~d Cv"FcZ8vBB8݇h.DD.[CtA aAB݃<'T=qtGV݅"p~B>6pdGD};H՘pR#}3!gk;fk}d 12L q'|d8tb="@!~r"SLIqB{)8oP\b[iGh px`||\"B[{Jf#dnKgįbݔeFRsR;fv OHhB))LEK,8|49EKϲkE,KOH'v$kX=y&[z{.kXS]fϰ6%e闬`H Q{#A=@^ ` ]Dkz#A52_ωӤOl nX#m86nOmF^WS0RM ɌɓKƩ3l"KyכBĒT"A;VN)SwblO;,2I[!M.؋p ~_~-O1cSZ(imu ,?MkAbz[0!̽;x?@8{ endstream endobj 5998 0 obj << /Length1 3107 /Length2 22640 /Length3 0 /Length 24185 /Filter /FlateDecode >> stream xڜP]%w'AACp {pHM*c)c5^ %Uza3{ =3@ˊ^hjc1SPY'/x N@cE {̜<,<&f8;L-6UKc' < G)5(BP1vݬv2rrv32 dakle`joKM%df+?zـ<+W(#_[P:AٌhJ79+S3 jgtJArHN )&_&U @e+9Ѕ$- `fe0ZX3J 0]<L LL_??郤0 L?t""o&=3''gR%cb(mgny3WlPL`@ؙLA6{~&Ouw#:|{~0)BkmfsF Wu! }!alkeŀBS)ꟍ*b b%lgaYhdbj 07qeW+;կ%h$ؙkM ؛YYX9NNƞLafagx3@<@PmF;{P)`0 28"'Q70FF'Q7b0JF,F߈(e~#E7q@\~#ſoF .ohF .o7q@q#VPe :Կ}@,MLL]m3p{ dlp~%qT7EZ;lN@8m.mknֿohjo7bk[0f&b bfoc'g%[@0Q׺+w?x!5H!Y@VnlgY"u_5?]@6HDKOK D;@^7[:H?v7b]~*mjk*3H.ߤA9bfm2o?ODY>c66ldq=A g?`%q}MFt1],4 Hw?@9\O濮4gS{?eD%zAG3(/6L]@g)Нln@)꒽)o·ZSv׾Rdn~rXk1GDŵ< gh#?zkTOm~HL̗}o\lLlBv4y #PdROź5+*ϚNZ5 -\!kYk8tĮzES˫D?ΧrE"@shE},RL U/I?=(.brjN;Ւ3 >CTY|I.ن.aavVz9]/(-t qRγTh>J*Ivqf nEi^ٞ6zr#[:5}N{^+QD&PM{pM9^E $ٵae:%}Q&3ڏt /i$/Vߋ0OU*CCsTy"on]f|Iw+"|cq([5J񦆶g|Үȯ <,T$z1=v b<: _=E&]>Z(rw!MTθPHa7rl>OgGZX+-2$@;́)[FMKLi&0oIK#:i4ټmrVeoX_^yQuC8/n>/.+(-zW%YY`dU OP "<xE$LN5괢פ+qM}dJۓRq!Qz4G@ ݶgl+q=#:{$ΆzQ!>paHϲTV^ڭO`~bl6 &Zz5˒6dYۇ}4{o~Hƨ :1xҞda}^|3 $M\F#Ʊy赔̡4ʠ&K* ?8R_ EcY+>G([_Ek\\}P$$/˽/LNKluug#8yYؼJq|lؒoYLJtzPb/|,XK>Y81Bj:zybMOGm]#8۸GpluXlAoCxm|8,k`&E|P GrhyLlo|USXTrؚ.\߭!+׋8j89e;H,US[L(:ڪ6!PD mZEvC4}.&DЁgVipn_śٌ>1h4æzߡSse!nIɡY]wd7ye:ON Z[*FBĜߛg2Μh~! иu;#l -A("Ϙ" >f=; ):kN^vRl)fCA_'hlh?& u 1Ϗ  lNpQբy/P-R\)5¡pQcCw%jn$RÇ5e}_ri+:qq"ݼg'aX>#JwJ?QKLv# \yp'q^,SqoG#1qաF1޺eX~~Ys6EJFakV<;DG?z#uSB(O^[vGVL˅{ݗE_dm /N"wg>Wz%t>&]`vA{o mc51\V4=%eOۋH؛eI?K He S׺v^$,Y;1L}P0Jq켡֊X?Gn`cA'<1^pzV okq !>+rQe5zB4ɣ˶b<} Kղ o83VGB4>mjqc`KHkX1мIRi3To - m N.cR*OjkeH_oA~YYpnm~gp .a~*!٭HR;N9%h5b~7VcOC`"OB*V͠aO7` \r~HB7{8PeȗSeզ~j-F uO{=6ʐ'7k6rfT撗Mzr՛YBcbwc| M7N_Z}s%nF)b,gp'\RHu 'ǧ cyg ܏aiOM6ӵޢx,܁Ӛ-  UB- ԣTRhU,,k?2xJNZ*,3o wM1j M^jd#JM:-劻eWdaXK*{kĽ"#hOm3-F _Dl6Dr Eh!*:'o|FzhOݫF8hIa~e Oa-ܶȥi^a2#ʦd6:"f=+6'7>D%F;FE!O'V֕zQxX时(I|fiѴ!*tJHUiKN&ٛw:U6KM"4–uKY`klJ(욅"1ê L[fEigL6`!48_ . SDhfvXԊVѺS V(x#bFhq!ho8zxSN_)uj-OkoԔvܒ'wׅ=VySVEdXOԫPPnBD'P1rIXa݄S~V9r[2Ch0eߒw']?6TUj҅+12*72<+w Xc?l_|&Qu5{%jFeP}Rn=2mȡTpea{Gt˫ۮԷ೦yQВjX|ɼ,du[S2[q].$J(~W8k=vHN Wb1**[AMrΩ%'5s:rg(ax|y [zvsALgE9Se`Нu@T QwE\4fh)&̷ 3Z>'=?D$zKY1'ކi%:uҪg':q 8 JQO`e\ǎh0\dN :6@I̡;l@TiБW,|$^pT-T`A 3ŢZ )1F8^PW(O}]nr㐻G:jǓ(Ԧ"^;sZ4'Rv˧Xa!5 [X nJMrd\!Fn 2( %QT񎨨>J,mUR`v.|,%KO@{'\ۻ#YXPI:QZU.zq#j p"6Zp"Kw&kfxf5XfE&ꓐO4UHdѼ.ǦЛflO肫Rmr-Q:ݍţU[K 'U앞C̲d-iCa2ϸE\vI D \.2h6olSޯ尅EզΛlw{v2ܐ@{ ډlw"~8Ԕ\Aᜢ+}y؁kl| j%u -ޒsN$xyf MY21kD?_H> yUK$ ޘĥv.VqbԠawU?B8!kgZg/z-Vk] Pfe}e$4eƿ @IڈEA|šB%J [/qWk~4x'㎺ %`Rl_6׃.zբLRQ!sES1F1cyfgmW߮W몇}'فV+tїq{45%A{pLo/t&6ָvɓdo=N2)%e{N4)}{ϽYC^`) "*|zJqO\ΰI;&)gO.qQÝp7 tP|,L'fZa).V̳>9f'{EVqU/}20eߔUYNEYCMLeEj"SYo(Q9sp72&?m 62hO&g#ҜrjӶp"Nh/g_' 3Jcp!PԦ1~_rJ53|>p0RrӂrF wC|s̨b̴l΀+6|Щx WBd9 簉(b͏rz: ɘ1E\_em 3w>lnQ]$_دPDe7'75u4d齾nɧ$FAl@{YyrI2*p(f""S` S5r1m ׁTl;ۧBhRbٟpF=AQ}1Z|{}s(z $g+<63u d$X4,NJV/ku뿕vc 7'{}"BEIjZ)r!F= !5%a{pyMփ;H Qo-5MP[0} 7y$^"yWXrccw*4B~ydvA{7+琲ۥ;aSve"C/ OHIlR}%_9Ee?tki B?x;z.pQ&R0w%##$*j:];j3h;fQjյd^'҉,|a`BPb^66 ~t< <@:A:CƢ^#P1^4R;緩e)Đ.D#:]Uu*WLBPS yVd%IgԘ/=ZU`S9= $|)H̙RL=1Wn68>b߯q^XE ޮ`/ e_IE-W ;lj e8vݡir8b}7K@cakǑ3U >ݗ{ dwϦ-kcWl2g˪L=y?e9-&~̉ 17ZEa6M.9z3~wQ lW)g,UsKIxE( n72"]g+}Zѯ:Ujx+ъ,AxTvOZզƂ-H-w r=Mu9`ԞBBc hTش:9Ww:r67Aω}IXh"IN*.v+iyW`f) !zn#O0Xh8|"=3ȶi`k{8fKQx,bBBA d%v=RNit1.%a4QY .D3|2$P}-;Orܔji4gA͖!VNhc_a@1ɬ|l3͈KOd>U__@couIFǚ5Q b~ݞDL_A)[f[(g.#r J1Dcɲw4*3D|^놓rKid䨥fȑyUCə>߼?Ks.;~BTS!Fր{$x'M?/,'Vf@?PÊCRLb VS3uΡn*c/:%E]ْ[h?~E{M.M3^ G|dMzN|{W=z$g}NkndzR;Ewe۰[,śW^tjyD}ԥw%LS~hv@|YfSyҟT3ܛ3 pA}FPwfܿcZ[/) +9.} 0`{+^V\ SBSw:CkdѷZSV /+O]"8_ QqGCq۾ Vg.zZUyN7RI&U@&.^%o59N`'vcoɦ9%@ˏ~ hdYhqZ}v j?n?< lwa`ln}orm>5ǟ`j댼г!Q‘?KNJºBF #錭]B&S ! t79CT*F,d>>q}:!-.>m{(ӆw/j{k}v; ޠ:,2&OS>ڔvc{-^Vһ -|Gm6 9{ժxЭg'րum_&n8ѩyMo2Ng8bHu6T>`NÍ/,Wps2+k\LHwM=uD S޲X@ I͗%4`iy'lXhង(^5u:M&=Cdx`f>"\mA䥯oTKʮ"[>]d٬f$f%2iM٬z B$׭$^zW]ԱV/TKuP2D8_&}>zp!Cf[NM?L> G5D3iЏ1t++OS\֚>[q&?[t^*|nC#څ|6`M(x\t{CAVEj`63VRN2J,t)iݚ<0JZTNzϖj)vgu<Ά;)}< d+Xnz0748 AH*Y"/ct?l))qh9)1;.LF86?%xT#w:!Tf9Q*ɺjkW[mO풋Ɓs-b2PT-%{O0+ "VhJCB%̱-bGZ?XFk b([MxNy\BmI-` y&|Gi0qcӟ޹e7Ru{ ?n1k΂p^47gR)dvemg3 Eؑ|~:˲f4ƈWS֦Z*O Avb̈^ԴFGq~mpU-6Nq+PZ}:"\* ޸| zCrUp ϐײW^ǛgYpIlu՞#*h `6}cjl w_%F lPa JF|E OfRnDӎ+N"~t2y)!sti*4n"s%͗Ԏu~#)(&Ο#6^n}kX<ǥ:dw\!;jQE#!Iꚏ?Q'ӫ9&Aaru zVTj,{ ܨbS?pF4ΒvJZLꋨGFrL9 C.3^JO 9'I@7K`Nt+^#I>GԿXTWԃīۄO;fzuٻHpn'(t.:KTN#35vŨa!^|[iBKg@J|S`/GgxɮzG88^T ~pzNIh} /}؄6O`}'=X[vSFkZSI)U{7'-?%.9 =RFm& qgϓFzEN[Q*z4٦-ȴY$ϾT .Cbh9/h5L6鍤E]N3U$r&>K /Mj~䉳ګ@CO)Op[+veɯ"Ft`33X?G7|wp!>Od -mN3}0zƉ5Θa:T%P-y:Übc.Uܸ=o_O ac C} qH_쁃GM37\riYxi&@P{%8,l D9E;l\iQ$XR`ׄKP8[bM!dH׫>0s Ud)Nҙ2Բ(p)uuI۪},`Yqy֙KKaݧ2LLĦ vg3}mV$?0CS/؁7#;LÜ ﰩr3-כM5Dr[W)}&显fI4>h%9ifA}N?!&b)<[Ę;JҘDvm=.//-Zpzv<(\-ŬIv@H{ |҄i=*3kiZiqXU)U7 jCq>|\vS.WHN;^v_0潮ۉT8`mo}<[}t#<5%=%ˍK"Yتr3@2Srs}!.mwb6 f8׸a*||ݷ*x{y75P~ @:6Zcؑt!Q sROW tk%l/s[20{@Hn1Ɍ䦱EHH!tc+W{k~{ }Nykv|Vo:{:vb "$,+"H&P28oדR1"֚lUBy &Yw bO˼4o}K#1;$⍪"G'vR+8:/ D)ϴ}#!w$쿇 h/RԎS, {ƭ&ȄJW 8[.hmEh$/I_mNB=bXziE/R%L"̤y5ֶ鍖;] '`W6=<*$ F)Z;^IZܾ4úӵ| m`a2$Y c4}[+wd1M#T&)Þr>$|+qZCC=j\*= v{Q"L3Kr_ n*c]1y×B/7I;Γ #Z .{x#Q" ?bkMa73tqL̯-`^mV1=3e=qgSNݞ%hT-XF!z]ɢijfzz.ƌPgsM%$N㞑wR<ݶ9j\!!6]g_q%xPCfa؞ʗ0$@i]ktHTkoZ׵W&@(NȗsjԡPд0Cg:E;L{Vtnayb`?XiY?vω,LФ}폄H )?FsmN YX icUnF_a9PF=e)xڛ3*F8 iB^R`uN '^ ^?OԵ`=d!nSW A4.EUw;<5M 2LPd>r7Y[~p+ߊwt4NQ;oogݑ--j/D0,+s]YDԯ"}Fc]YJ"׸Z 4G)ʽ*3M ',1 =_)( g k⶘T;KO_upuwrLX}!Y=IlL=H;32"B0z~Aap/HwCgqTÕ60/#K]H Y oȮAjK*[LAL?\ >0;nZ\5x Xe舃7Y;_Ձ) &Q[ mѣUf=`ו VW4 #H+0ĀLU1Gv`(Rp?!S7N4cK93_Y#)i u]uOjvw.{~% BcnU곐Q=6PM~>@nt) ӽ6 'l.'sqգAhawjn0,4NP'\P$L@ )Kk̷I$󈝸\i!sidൗZ:)zzp,GoiJ:7_-[}- T[_/e70WjˠjTE}&<.|b01$&]EJ:-y4Jľpx6HU#5JrK8;3etm#L`;pe@)ჲd&Y>9X/VdR: ͼ"H`BmY5~ŞpVS%=TC6{u;L! ƚ,{&(?Ts 0zt>NCInP%{(=.SVmɯ½[|( ԡo㤃jj{_SUgSiD ѵV lZoЏi#rADd.=Wnd9`MIYkGgqƾ|@G|iT7W&a"?.~O78D<-`3>ɶRXVYkhcpDIӟPJ%6S0P*gb:WP }-ie'j"AHh9H6UX –?0 NQN $wB].C)\-Q8n-?/ q)Ƶ]@3+U x. lp]Hh6k C.fhOJm[mc!!LF).Y'Ցз\dfrHIVE) Qg*%-\߀@P.j'/خ\[IUBq>ʇM^fg2GkۺȐ#Au26;>"XJ {'񘒺t3n}[ŤPQ*-cㅝ*2ԘBFՉ ROIc{dhELx-1aT;(a:TG)h!7-܋2o2oLp#`7} 9^a1艃/Uc]26 x%{w"\/&z GP0J'$GxZ0!T 8KX)}-Xd-ɩ.vI&俍gvnܶ^C&{a"zZ? HYg]HN涎\CkFC 'v Vw=?˒(S{Aݗa4[HNeWiْ9$/w~:4[.X`Sӈ_$Gް i"=f|]ΫG:nŁ|+Utα]/DrJ;ҁټ\£E:ihdaN{Y22Sg{Dr+j}J*zja/4ݹSb Mh [v1tyZnσŜ  g\:iǖEh>}S9_!l7g:bD)>?\hCAd\j%Ϟ4g|'`苋F׏s)cb %='o_}ӿ {M3n BՌ򀱴Ej RD&s&I!"KbPЍm!uqJ@=䌚C%6ΙF<49\{yQP; ׫7[%W-b!M<\;ӝאَl(AL 9dn#J,m?TɃjH&w+rW}MݷHD4ZSK`-,P4i:c^*3+)ܨ-~bOOܾy#hs^+F!镅I<#x45};km`@ZGVp؇O@FѬ[H!Q.qR˴h9MN.}HGZyeiK89w|T?D _(ѳv'(UiA^nWYV;RLx:Ί{I,T[|tS@Tq bE*j)'"SRlL<{!һ1#C~Hv+ӈv(].k?p&ChEOUv˨.|(g lUA΋`97 "h塼I$arVR|Byh6mJP2?޳{ >"@nurDu3O'BxZ5G#)a~ !ϳ[`3V95yb;!BBy&*ғ(8`r{WWACNrX`T`!8Y[Ob0y(hσ? 78 #o:ɨQ޺pj:*.NAYqVָ.08raՆ 8%~#1~\M]+r-4L;L6B%4#Y[tZb+t Ku?0}x^aЀ] @T&EuDm$ɝ]B]RyT1R(lGŘZ_K`S2PʧאØeR qC{ov (pŘv3|O 6NKY`U.7,!Q^;H&m(Ǔ[QSYhge%$?RrA;T/Ն@ /trYC*ԝc[Y_%OyH,,& dsj=DzWQŖU#\NC s?D +BY/U>?zky åW15/tͱ(SuL/ 惴v?~̔tgR>\nbB9Z7^]upR8a{uo(blm=}vc #e‘V9L "u3}ᵺrW2ⵜJQJ=\5Oob}RУĄ #OYBxIS|Ox"AmR#gyϿ@'ȓ)0y^WPn 02;`RCsY<Į~C $Fo(B$eǶVoNχqM[ !{V v }8So© 歩kx sI_± AIY0)2дG e Ҷ;\WTl_˷0Qg GM֞\@H!x6)h(ICg}BS>hq5ئfs=MuӼ`5uE Rp06xeŔU3pXqV[*WtThF+rvR^}dMXI7J+]O*ti}En3P_R[}!(\}ݺOk(4IW J]K҃sS@!5YU:mEY&qsɯS#uewTfču7#cv$$Vh3XDrJ.0->8yCtuo2!{8aW҂|rlKQS nƮk̑1nKa(NL0 Z<m5tUBtT2>oEVHCi̴6/R*Nr*w-ɓ>G y;S9m.,+n`R_zOH/hM䊢m7M |<ҢXrt_W$wWv+08^} DɎ(ZV7I]L/[ق\0OE;9GCM|m/Q}mG4ԄCWZX54kw*~6@bI A(kZ'niwe"'t ={$c˹{ e\/T6 i<[l8'k>~t؉?mjm&H 10P(ɼz]4_Z[[mOB_k7X.A!3#ۊO4J5}KyxKLa fHq \&͹UTY޿#f;kv@C&eT_S( -}{qQn-B+B` 3ܸ Blņx'DEDﰩ CW "j,JZ^ziXLDw4\DΦ'NJ[}6~NGzE+yԨ_^(8`uQ>.@lKΆ.ӇfIL88LWA U< /Q= PB{*φN%z"jn9!͇{;*POs-WNr|>AvLd h(;m(&!}@j>ڰPם& 5' Я^k@e](X1 c%r|zMȎ&2[SKsy^ܛ܎iRj (},kI;.kJn_-էߗ5tM 4NtZEsjAUOQ_o{$alxrH?^T!Qpg LmpjrϨeԘC\5Q$"> stream xڍtT]6]ҍ ]3tww70 0 C "t+% HKJ()( H}Z߷>>{sb`̯p!(~P j@a Prm'b7"a0P0ƦFup;$ Iĥ@(7}a]&bWFx a.}pA IIqE(` fG`qA<7rAH_#W=OiD_1 FB7w q;Bƚ:}O(/_>@+  <<stP(>vF}0w w`!|S!H'[F_anڬ wTFxx@(o_PMGr~)h y@5UpnLD9CQQC\m` ~ojF{"<N7e@aNЛ >`\@Gp:D~c:9$` Fa{鿏XPKGDYO4_HR Ņ dI?5N_4}τpKq#](J!7/Y]2Ug`{ƍt}P7cSͮ͡.ߨ& |3pg5:P׬P7 oz3?RA84!Q1JL#ppS^0 $u @ v|/]B sy{ DAQA"ovn{6BiD:ʵ!Nƨ G9dORL܈eb k\ ̗&ضAv7ډfhޗ(637Q 2 wnb/ 5(:So.fzpVLEiMx${C=+lekfD¥h*!n{ Vǔ9J_fKO'YRзkJJ`;3to~;wubpX|H<:TECy#K' Jq*jA)n^%I/fŭ-a`cR _'}$Ies1ӉUԼ*报+˓L{SyOmy %+)E5+a{!N?Dv$fоjGŧ^T}M{[ip2ԼsշAev 'zj_DD?`=lt_ϵUf/bURb3 #_̪ClHsNM񽌟]/}4R(f;#vYl:5Bq/vmf|?Pݍthl4Mwu( A[)m<:[-uo6 T/ ;ű#h4jsUQF ohX3F{Mw]G$s?E$7? SvMu?:8a! Lؒݑub}~HMsFryoA g+6NX.׺а.fSi$ŎBφC?}GsܯT亿kM |cTnjpp\yZ;r niS5֙%ݙ0dDyS[>uyխhWڋΟa>_fBFdRV=cU5.MYQf7f~i[ZsNs3K2+t5PDž/;zH1ƷɅn%vh 躟YVc Ь 7ItN&]C #@r%zIF'IVͺ9@Avx*Σ4Yr7u_vyEҔ`" /lTbGch,_:;<9ωN06{RėZ2'O) QCe>/֊1O1-R6jgnGD2&}+<ڟF[qƅ^*E)go& ?}#EIWK{IfHHЮd#T7WJkbKdِ0X*}6*J$M:&k!iS?z1#+&|Un23E.o꓉Wz*-(t{vI"(BPx!I}_ Ԭc,k)،n_F[I"Fd5bt:4 {[m2yNGӁ "ecؗGAgwz)pa@b́ GwJSjcOU WVl#K^Žͅ{ EqIobPIٚL/d VQupUQf|b ة'>q*w}Κtc}@z Ѡcd:×2-.sM}A&BJu %&SRqbE~&mH㾪blI԰$x`3EFOk}dUBPI{G\dP={yjDڗbQ#s<Ӵ:o(v?EB@Im5!1h,OXa#RPJNm [VB^ױOT+yBvI+vsNHH{wNFi~6%wIn1l=Bvp/5^W6] hf/Ú~ h|H7GpI,ӄbIY`{ S \6fC?>O\'y2Ca8O_ÇTFl%Zq> 2թ-׎.ھ7baģͭ0A}-Q&C4?^ֳ@[u#fjj}upLn=c첡XKcZm50|  f؄Q@5"eꖙ㴝QG넫ÅwTc&%Ӧh k)L|Ŭ c%NF yC fI4&6Ҧv=:١\msΌ}H<%A]O3å `\N+T.V*Ymֽvby| ˝b#37[ dO1M9\ɩ.aֱ>7jmxhaCr㫘3!0?iOwAl盇#)e_lg}|סFmүb+vqu`aE-Rד*6EU׆VEWoqkNE0u,;g3{^PnW?MFF^X >D!+N1Wu%?o4^M]IpXRj!(\]qOi l~<Q > #rC?|ИOߢl"4$zq3*A͇5+gvJmgV;6]XJ4LѢ.yȖ2էQ4*^'`_x񏅈Yli[% 0mFrߍd,e,:`,Ļֲ\Rf ڴrOMqc$#m׮Ĺf'YBDDM\j#?DIƤ 2  woK1H:%!Ote .~ר* Ko5*k&k=vȗٴ%Dw:m69>j~?@qktz9j4_;n|*rEȞ$YxWGd5osmDb_rau5hU|ҮgEF<'2}ƚKQw\죕1fuIo<޴AIrQ&G޸N>å}!ENLꇺcFB}r5B8/#GM2q{;Z1$bxdx7YSv l \R*ATzn"-tS? &RnY]oT\R߶ėPsy:I!m%)$w* "gIi۽vѯ]'osZK4VOׯ&MS2*B^ J!8eiv$9+c*놜C:' } !ʚʎxmk?[N:UZ4&R8t w^FA֯pm+gqj{:7{-b H>ZKM%?Hyur? >HN2,0Mz5l_EE.cyfz4HI<7ȺXS^mIwOV#K(DH/Y~AH&$oC\X~F1C-q>'E'w~ )<^"{%[e3-`6Os;\:e-@sG. w$^d$lf:Vq_mc}pIFP /v-ũNCQ!lŒ(wSl>&U++.C0*fe,D,P@Uku2Y8q-LӰ(H춼̹=Uؚ(:sKEA֝͹MB@TgBH?qZj edJQ{[;YC5H?-JX)h%n-܅%h= giZ1O >{HfR>TUSM m>H!֌rtKx9r,Yd.T%aqXw~sNv>䅔۽^TM-X d'W԰Q%-f?Rn–@y#SW"'v#EmQ9\ :fWtExb2\ xRqlSbAJ}_FIj #Tž=˳uٱ~3ycÃX*͌:5mOf [˨eoK2K©$١%q+ݯ+ 9pf0 r f^Uf4 G'hc. H]e}$怮mx3yת le<2)u%=P9L^Ad1Vfv]x:𡼶,i$[_ǞÏB) fAlc9K]bE_D .:Sٜ*ԤD.|~(od >ܲlw6G/_e,ʒ8ln{OtdY*T&cВJb'dXRZVe3W"C y݇#pd)0|*h"lA.!gK"ߌdk'62#,OFڅ:)Yz6|$@յ7x"7?vD.Sl07;C ~8+d/5;n//;Di1n4ẇSUy(i:@ULVG¢.Q\"U!Q.$֚Em S@]ݝ4W RkC [u][C*WɦA0AZ{RALJmϿUε**;0#B4^9cS*wMUV|01 HHjUz ĹPu|gri?Fp;iz̤Pg*\,ʒ^>"_**@a/6HLLk&Ɗ f(ś4p},]}T yk;ͣMx&b%Rj*QU*[>+0uc].V=N[2E"']}6Kd7/%ts endstream endobj 6002 0 obj << /Length1 2160 /Length2 16094 /Length3 0 /Length 17405 /Filter /FlateDecode >> stream xڌP]b݂ Hr-`1{^ 2"eZAΙ ,++```c``%#St1LhWM=Phl`d0q1s108tXdR@;S'X2a9yPS99iIښ:Zd -LmO46(-MٞՕ։hGIpt(::~Kښ[,@_e)`cilj~:@YR ooj`#dCcc9 /&CL03hh|7t114ztC"]9;Z;;9YH͢v&@[[S;g'X:ݝߗkmt231-=gSIǼ`M STMq26kez[t2t18;~6`bi 025n65~GK761~I}Lv6bzUiaYKSHef22X8ˣ`h:ʕ3~QC@ /}tM&]y'ߘf[g Zڸ;}t?;,}ovWJ:H21n$ffj`ll]Xڙ*,b޳}ˌ_#N/9sRov@0tt4t}w d|_SS@Ogt~Oka_4'^;^A,Ez?SbЫA;[752u^{1oOӛ,/BFV hWf{ e{:֟wr;_)VXe/+/dz?_]| 4}7oW+t +#rG _wejczWdiwwޯf0s8Iy//{;\w=nVvt|_^+vqhdU~_#J;,^v@y@tZ3^ktb?U}+ϝzw[ZBsB[;cn4E_r# (҉zcӈ=0S)<ӍoD6mUp= =yՆcCc%޾:;݊IQ K^J!"+ :t <7FT55c̺à.cc42y0]oؑYkϹR52k[R]4mݏֆ28+* h8v9_ĖT0NVkd/5JU\TDsJoCޒ_hU`|RpO p {"=$i8a_WVE&-f.͸YaZi tv6ũfldCmU$_3MUߞ!V2#̄^jݴ=\VU*$D9UF:zN0)IУ 1DҢN?zi0pfln90u޵TdFI Ϛ@&:TpFJmYjRMԄøֆ9ژ+cf~)?$VͰydQ1g(/Jʪ)>tjEadKJP9*6+`xZ`*ɿc>r%SǔQk('\,{;gy6ė28q {LӊDA$eQXXD+L/b$iS[r0]6V<28'Mr9[G}WܷRx~e b TMRyfuWN9Zjr*29 VXd^ViӮl|c6xqys TUS/˸GӤu>HK=c:wT ˣhӿj(}/$ Ds _Vլ)(3B1M+򐛌ȏ.Ԉ&ߗ"I%hLLA=&1e˕貉('G:5p+QRZăǪHGӤBep2`\-B7~Y㌟t a㯓(j1Y׋)υ^hQۿf &[n^?}&\X/[mTvVD2;g<Rc%AVcf?¤Sx' r*~6[X/An2K7@ [pnv`ӊ#V}۫j#UfRѮB=U_ks0eɚeUTlV,Cp`@UwNA=.i6|xOn5nċW/gqM=×0b%_aTx-xZ- dbRѲx1h[k*וv-3lHܬ^Ӹ8iy[L E+X8h0px&Ft%yjU->@Cjx\dؐ_cgl[= 5:r#y@;MpYn d 4pls4h21[Ui6'<Y[:ߪ>d2/p|X0bQWv'  2JԜ|RĤ;/uh?k5Qks0bG'0R?^#[E-f1hUI_8#R:2 *)Jp'H]՟pzTChEpvTVXw;;O 7;WfNzM$ cnK;cp/ʦ0=XR[宾[myH{()8dd"N-$/[bsf#n`zkHQLJv9ʌV0{s}%ͥ+$?5qeTyĉSw[*ߦuD:֙=POHRʋ:T"0c<-( I!8Jxf G^Rcec\V6%`&nxˆ+'l^sUP >fpzg6Rvq/qx9Io.H3n-naM+FJL|_DS7a@ w ,k"Z+ @]uu2WlO_ o=onTZYC{cf7 ;7M6z$-Dy uu*mO2VH C!^ajR`?*[oǾG8[ÿ+$ȫ]Ca6GccmӥK3UE12HZN<>m#|b%gip@)BՒQ<ُ,K3H> 7ھf&,zxMq,]oU?(䄎״$`"CNy(xi#*]6=aȃtU)KLJ &ծ49Sܯ 'wpzѩ( >oҫo{ҸW:L0?;A4Us+?4R8J*T9TNx6FvWcJqQvYL!՘AMTI9p6E:]GfMa%dUJK OԠTr;[?wa[(!n>^d3niŔ*i9/rp^mx|˞ ~,o^EAVwz)">7:joxڱ fH7qOQciZ%PK">ڴ&Y;Py|ЁDIvQM>-x)8Mv_v(>9ғ9]@Rv=$^߫$׀ g؇8gq˼HVPl0jn^ e__dveG{:p뛝ЍE~aIFLReB9jRF¼&f: (&wŒay >D.!, % PTOszam!G~ӽ&j%+[4Hkw.(["}xH÷d/WJI >KCC/AZ:*Â{0f!!1S8pnpIW7cTMgR]a6' 6}{Rhke!T PO=|QP2peqBg +ũGj#x[U9||CcIR7lz91:Z,zr(ߌv0ypiPlj~8񨵹T 1N'Rƈa^Ж-?zY WM*E}F  }eZVi綗 ALU 0y צkU BYHQ.k`TsH2=F- ۩/!RۻU \KBU*:-.eȹ5#e2&|Q`POs:r\!֡U(wfȦQ~|lC>0C?G7J(LI֤;Z6nzdo$S*dJ9|T0%'Lagm^t{ ( .JS)XXׂe׫aЬT?dK6^̷w-فt2"T?FՖuoqin"6vH~;(4(zvO$C:z*loٺfleC+ַڋ,_pHJ2M$ULF n!2S:%`L0v6Dr5K0Y0mKR=MZ/R /a;C W(L+c(PI);hn_;!Ӣ,vXM6ıٳ.XJHY `j6x؆z}dd_Օ.8Wޤٯi]3gDc+~O~#{ !Ee _t)K6DoO/1>-ʳhS3X&Ş(/r~—zMZtLQz ]tS\ft{]i:oFtȅbz;Nee.}zˏz2&'+1H m\pzL/#-\pۧo/VݷjrM{LA5sn\-xi )Ew=T%,]klm\d0jN~&G9rПW+0܎5V@|i0 (lO0&k9sXIm/Vb.\sTɷ ]>{䦶uOīCQqZ&BDD$lc vK3>CibcEAJXʜM|,,h1zD3uF GRY+D vrKe7A(A''_\Y66e=sCCn]'~g"JݥS!,[p_,Z\6繕&GOt*XAC^))Iy/T}n4 ?MTK AU^o/dכb"kC,oh~pAj$u" r-Zľp Z-!Oss-E7-H`!sO5B2b(}m9B ٤ 6/E h%>mڵ7󾁻{$tsp$=zS2wEi~NCF,=ΜUm&L]IkKP_ڥ~P )W@aBh ߢu!Ռ32|+eZt錾9 ف}𚖖]nUHywaEÞi :܉6s+z`᫤D~+Ug}tvlH8}pPYۥ]R9r9_9lJ:4UK7a߳v􊇖}h\Sv{@_裂|c}G)􁅐Uq$}s%,P ~Ƽ^3Qd=^,bBn>_:: =PҐ@t=?}AS3\ɭ!]o@O|ݞVEyELH+b+X.-qa#tNtAF/#)Om?)]4zcd?oiozY=IXS~uT$z]>fX'Mp W:DYbvWˮsWlѮ/9B]e wmqK8{]ԧ @'^P/=#24}hpa>ks\AC`L8|Ls .tS:p.'+-Z.Fy0 (F10/߈dÐtIw{"-w RcKoȃ- *5i倚zz?Wk J[AWF`gZ?viȉU' XUh~<歹fE2ϼ@l݇泮X#2iGa|W tF!rX۱_,`!25KwmׄGD7RLF=~hh6ZРfˑڔM!SoO t'!Xp@x\1rս[kejCp3:kkdvr>s=S% R|#D1 hh_iy-Ou<͆Fqr[$4KzfтȧE^>]Ɇ0RQWl80'40Ae^O+[0:k`bk﵍XpS=rlv<˲L +nsPZo;D0.6k?~I?*@_\Á.::9*>^7d'vg *X>Β+I73b)Ă˂mSJDib]prH>KQHֽI0;U1ӄf=e4RޮIu[kFJPÊߢpz3)ߍn4HUW6?V\ Fmz (YQٔFj=1kY ЈïҾTwz>Sx GvRɇ L?˼B!,! 'D?CFɵp?,צ!fҐ%jdhO354Js`r7[]ZC{89/64nm=gG=XPr>nu~C^~f8ӡ5Aq2+Khe1u9 *ρVq˪v_o/wC"Ƞ>˂F~fU:[C0490w.OM,֛ (^xmT\]}$;7aFP&1jW(o1Ϙ\ f|OɡzAhIU f}+nEt)KF:^2}%3\==KD#sʽ$8=O<\0¹0|nJ͎xouڄjWr}STO.0  gjDњ{d.1l3v #b,~. +l[0T$|mN>up`;xmhaH1Q?nf"-db^ ApMꞒHG4a7~ҖշvxCQMTI+LOrVQȇ\zܳ_Ї6f))?҈ mYwMy]pC)MH,8Ai?&k!(kE4GL>kr@\[K!Zb C0PscS8R֝_uоOxn' .}kM?S'+?D6D6^(V>²xT.P"m^젮Y"F6ݟ GcN =[SXH 'c(0Fc0owCtuYrYZ+RD`|R=w@d ʦ7#F-̭#hw|pv۱YHk(ۜ: 񁆍5EWiy:412scq 2ɾU'pLӹojڽ[&6r E!LʷK֯!{VxDM@st\܏7"6I(!'b!`i겴h֣H2舞3Œ6>uM*ڛ܆~,njɸ!Q~f&v >,>~0w }(eCqˠ$v>eKp/ ~a_kɒHfDS%y)K苶xCG#MT#l!\k%;uaܭNmZ7ŏTZXoڴ盿Uy,l-BT,^Y.p O zݭe~)-Xwn%ac lA0aT#FJDiW؋ m@VY4ut ]ݪ1l\jJw1\E7ܷ8cEa;4B 95sxfmgƈ,ѯkx\{å A|J9۹EsAMҞˊ/(9^cq^襋[ILޏZz=IQY:PI݁Xo`g(g=aуz>eĢT-%~\/ZFH9+{ҒVbˡ綅 ̉~>H㷮9͒iNB2$C)/zyZ-jIytqG Ηjyb\7תO_:L1 MLa ]6&Ru1FYx@#'Jpu#dH3s:U$0c.tLJ*X7oez 8"[G>p6HcORIHqzd)ixƒTv\ T˒ĴNw -. r~SrW oNW=: @  gi Lͻ'W"L&7KWQˈWhkʭ}\YX+[3Ju$YB:;;zZH"Eb$06;@#/.ʛQv88ș `kh/&'l h5Qhb-81L"!bu c ]:~Ns\DŹ8NPKc9NUW 2YGepN $$Zm NQv7&/ĪnR?U6UW7^u_/k0`V.,QZc;˕ҋɋ+1f/bFuy $6tGCqKr*kqi2mg.L;~ \pT-Gj=O71Gs# _?9*h߮&)EY>@}?PZqmdw)/Fa*iASU))y xtJvQ*G> |%A?)' Ns3o~'mTW}:</HGU}.u*x`^JP kk2KkM׻Yo|4a me tlt"W-ŀQۍݡ# e|g,6aE뭓֏X['WǣЁ$@c1 r+ل U¾VF;1# " oܥ<մi*seE &`Vޔ2DR튊Up[jž6|kp@N3[9?* J ظھjgUO g Q鄱aNv5Z8~/ T N~H%!mizfF)̸ ·l'?Aa[[c-5QEIL0p4:] hrpUf¨8l A0<)!r,FZz^!á'JlFgQЩetK9'Pi}\S0m_9+ |9HAԖ^iM$kp׃(%*2~s݂bD)% Z/ 4V}Ji)ιFfn-/rE6NSt~\c{^ݺsdq )ӃSZ!ǟl7a9Gϓ] |8Wla_vx!UƋ\Y5{OrŴZ^ c ̙OxKw ]ݒz+{ݱ{Mb'8:|x%$l# +gO6Ol4b{qb vƼъڌ1Ȍ sotnį^.8kAGE6vHzߵA-+|2\ZfUeoEnq+ɲ9Ƚa\=\fCfUkj9r_&RaL*%˓I)ornV~OЗ΀NyMѠp7TƢS*_%> t @.,Wsag]јbАq~!o M3JIj^`cdL ϔ{J*)~BodCsc^ fԪ2RDJɹSS&ĊD|.ote ʍMj@ uh!I}]{Vש(5T=zԱw&ా}=\mGy$!I܇f,̫(QIsՠ1 sLz.*wX޸E5/P;~L"5z`+POCWpRi<m=ehn1 D 6ޮ"hkD[rc9*siI 2jSd~W=-03h;u,&g-Se^%n`2Rhrlt@"P!H"w3O紕39[.q;€=tAhK%X?/ҺWJ)ɬLv†Y]>=0,)v\zש+SgE!lU֗*E6^"t(^+`CT~{#>}^PWEnX`hy G]ɤv'"1ia$iYo{:GJw&Ory`cRN!~vMmZe)xS?]L ŕŦkpWtYpKVZ_A6*{\I$Irbƕ3~cv:s%JOSjXlm΁Rd Y=vKXE F͗t[bѺHj*ca$E^oy5k.DV!9*dXdd/>rYww)^תĎM=DY;/?o$bPБY>T |#̜m$IٱWE=?m'Klj7ރ/]}GWE ]D"\AFy֛&=Trhɴ QdDXoOPu?٫ &,kԗ|!Ÿ9,&:gE/?FpF(t2 w۩0_jۃ_LT%a? PUBPNP2c~kqȆyxd,0&carUZ%C&{6> stream xڍtTT6)]JI@SI`A!AiiTI鐔P?ƽ߷Y{?{{y# 9yxĀ*?6#>f4A`Px >yӀAUwGxxD   `7l0goW=q̿^kVWTTw: vXnvBh r`0%X$pg1nnOOO.N]l9LƅC`pO+@8!`"jvz*ꀖ3f*NY[ÜAPo8-Eu.Am~An0D>qY!;:1ܬ]!p7.7Aܲ1 aO F\7>< [6P;XE/ yDEx U^;ˍ "Cl?l_7}w6//`V`;?n|W`ʃ/7sl`PG[W^]D_~aOAWQ@* .x<E\ӿ:K`YL`-`xyoN1W!EwGa# r8z HG@F?@ܝ;! CWGhC(oKj(XmAdW /gB 7#>0_@ ol ˋ 7n.( H30W_k!|M!c]]}fٿ{g`!5!UO8 ,0 cOW^S{8ūhQS"{qZ-m'UD9A#RQXk76u4',in=F?ΙdO&ⷊª>U}NJ%jMdէ.9S|jYdf&ćK$Jy.AGD }Ty79}t.]hدӱte7wa+%=̍OJh6U]$( D'@yu,f]}=t@Ȑ -[xo *OEG71]J'V$W%@LgwbS {.DvvMud,|}7lf$Ok;0YQ0ltT 7_?&r:m,NT5 -gStTX'\L8U=nN?< A*?"KiPQ)Z>Fbo+;ֺ)uc%}J|IvfDr0ij+pIRqUaɼ2ŗ;_|/4|)~k[Q:@)Zd|wUȈ&4~z6~LU.+.f@ ¾#te_m6"K oB)^71Ut4Us?(e ~TOL~kF"W R`U9tXasFZXa.PcCk]物F{/`W-r˹`}&,E+d#Z#|IOB~rQXodgq>7Ƒ7!SLnx }P.R@Ӧ~Lʢ` [v 'zJ72֑TxXOD:5՝ ؏Y<K~i\b{k ] gejQve`d*x:F7Ah88ěR>*LxCckʥl3S螲{U!)m5s<vk^Rp5LI*\Z_\p0d}{ը/`0`ltJHnCh"([ _յͮk`[S`u5>Z f ;8cm},|U-7nf6;7Pu=HԴOan88ktFtXW fkگE;6 u3Fkȯ27f%F :[5% kx%N Ҵ _uhMO=H7VG-<GPO|J%eAMFT=n襅DBV78(9Kb[oReB`-|m+dn}0( ݄+}+?= {3y݃01QB 8|D<5#tw6̧#fD/7h@߼>zV LrUj$'h>lS7f=0jQo0ߌ]b oxCS~ Cx}b^FܬoiV4Sa>5&N"lwN+G6]e4! )b*6FSb#E!H$}<-Phoڛ @W+pՆhtXiZ9(L1kڣ֧,*Ta `Ӳ'_Ln.oc&/;k^*Or^X$HThLna{ N\4#&Oq|zD=kٚW3M2یfH{,nc󪬘 >dUYKW+>\`ļF'I4xڒO\5_YbťɅ~=le{u)B Q+mf{S]@z$:ˈJB.m'0ޏ>yZ};g6};Ndb:+22/98nfyLbqdϙag9{/y5ew8%WAH4MSȁyR lXQ+Q <<=N;V?Ҽ5 O*v[?=.# zw߂4wX?z;BTZ'd=w >HQ) lt92~P˶h+kH0JűBwhrqg֋P:VNen4!W[#{ٙ=슻#}N0K&}+b4<_\GJ9*8W@۫!`CKoZ]AYMœy6i7zSg4Ïۯq|Ceⰵ9O2DcͭVg ֙ ]J? V<᪦T7)>vJemOu*z[ ޣG#+r#qzDJf|v)O=۶YYK&*ZvsɳC3âA̫EXS +^N{֟r7p8; 'ե1 W2Y)?@]#aʉ')UI19f7|bxG>h=4 ҫZר@Y[]l+V€# I4U mXӑ}g0&֥LB.it=:T_c= 3m>"WQELd'^,_S<h! DWۦ_>!\+ c Bar1N+;3H?gGfrdwd56۴Y58<6{"'$ٗ`Y<%rρo F  & Bcj_|p‚+u-i31"BJ:E,d"Oj3AGa),7 NV|Vqgkb5mnsqɃ籤q8._D5\ՓyD^=rzp6ik~IEEHfNs4GUI4Պ8 lVTgXۜWJ*..kY1H7|h`,;mL IXh-Bئٶx%vjd%C"WeOwzX6"P/X_ߝy~yk=)&0,8@9M E+»/)w,ea: n{&'.V-(6#*OU?5G lX35f&r~\ǀi׌p$*t@:_dzy ,dm;>^JSdl<#;£W49[~+oЌTs=aROQDY kI+R^!|bNbeeՅrA:űx{χ֘iS}d;˧ [1>q].mBm'㜺cǘ]1}PdүȞd+?P[/fͩXTm OuXdŲQaEDby{v1x;:< C>ˊSM֭4},P~HQ>Kdٜͮtʣo7[" X$>i b#glJWq+|o,Eq3lޥn,q{\/ ʮkcUq/罙OcK}Wb9ƅO;~t, +DM=|r,1n9d:zTayC(U߃ #?d6dJdÖ^jFm _?܋K9Yss+x Q >qEe:ZƶgoΪ-7 d!ǟ(fQ(wBx/kϵ`X/ct"YW.ٚjz{&G,&Fn aVESjx}H |{)r՗`@ y`-Yg_*irnr͉5:I(oxvjOJ~@NdX_p':.}G)Cf BJ-p#VkR[=[L9ٯqBX}┴1oO(.]61$9\Րpґ?ǗdFy]A%a~ bL@oBxGM?eHWRf OJLݳ.\vo1T4J <CՊ:ʫ'#1kRИ۶K bePоAw%4jf6 endstream endobj 6006 0 obj << /Length1 1690 /Length2 10439 /Length3 0 /Length 11529 /Filter /FlateDecode >> stream xڍP-Bpwwwwi  w;w'H`AK;3_^uUYkjjrU fq 3&TRqq QSk vH@gX_@SȋLbȻ9<ll66:8 L߃,J,y0Zde yI#Μ;@ 27L!@v s_!脬!GVV777S{g+z&b P-(i rS` q3u^v s lt$h)T?4`u7v;)XE; ` mhjodgjbGq5Kb r~;-K-$` @@k`s`7_݄# E prqN5?/x9:8,_,/?H^.+ߊFH 9`"Z_ rc{; ,v1_Veq%- u?;['!bf0spy/j \v?}T/Z q|?S?#$jgGmjPrxY\Z%rӗE[O9EPA̭̟r߫fU\@ߖ/6ѽ엹?U./c1ei:;z q_,`ȋ g31X@VZ_,FܿN-aZ / ` Xm_r X__ |_7y_/!/]_r9^r  ]_&L?t#-9 V1oOr0O!B7423Jeel:%nV/=d~Z}u =uCL1 $A*PJZª_͛w!ލXW>M8C_pSb6}Oy>&d4 ;8*>ۧT i!b).*\E0bIҿ/ޯsV5bq5O1C{p= cq]E%ٯ|؀Wm1bYmlUv}eMba{0ey2O-h*AC #]rwQV߫ "5-D}#$mgE$GCƘsZ^naWTo"{cwfW|iaiBZx ;Yý v],JvI+mO:ĴOz?K;JD\f?׭L&&P}x3YA[ 딥zVJ":!tcs.4(ԧj F$ by%SU:Z뉈ND1;C(<%e~;rn̅Rn%e @.NPgQ^j԰% EY5}|lٮ{F7ʏHoI_5F yg=#q<1P ^8q_BgޥRp\սnTN]̛z2Ê>SKYx{r8'Qۚ5JΊE 3kX-?Gu=P. W ڍۚxQDyyQtkR81&ܧ`D>׌郉K~E12֔XLcOP3RtޏP*EIəVkX2֐ClCXQ摭UBhV_{ ]^w%\CY:fʏrOYkP"u-4׍m`naƇ;Y:P7ƹYCrK܎vTm[a:aO XFֆ•z-&i,SUrfn:hoP\u>P'H(LO|KS6t~CFΡfk3HyQFʲ&wی+DՇ V hv1J ZAR!_O-y6áH|X:)jep?߁S`ul"0D%7SZO`Oݢ6x!\ * ,=<,67! ҧ;cE“YPnI"Ɂ<}. 8$EPa.ʩ|UTs;NUu&/(#+ĈM\Z;s5$G\VC/e¾N~\, FHe(QjmFLH-lX2`8|=+JCFvjߎT㈺-Qq6(:̷& `0yCU\$C *-h a.~vXң=ɾ @M[:!'TUIRɒt1sȍC wED{MƬ9BB ]Aa6^} 7]$zP ͭ%0\ڦJhvBȔ2/H|**̪oevH`7-Gwp st:?U}=cDb]]aBcR/VhzX@T".dlW@x觕RT/8G\GwNjrO Ip1J}1wn:( 'EM;4uFpǼJ=}DE؍\-=g&l.LkoHMҳ 1zs(peӉ%!¼ul-yΪۈݺVQBGrNPСWc5T2VtF5|! b?/CH 4Qg*F2剶ޑC Ɩ);At. 򮊘86Qsd~RRZImֳ{ ~J ׈̬vސg< Mdc5賏am+$aÒ 5vuS--[IJC!rI{p[~:JnA>q=C]vi<$ؼ0]`AW&.:?cM hF7eMZI4Y.CUR}\6wb3RmJV*&hIAǔD}isu-%qF!⁗C:5ss%ꜟdgژӫY%zmhwN!G@/An)r $LA:Cso4LWmʰPTkHwԯ Ŀh󷆆p_"|O{z(xP'\~P;gB&v-{{*}]K}S"Z)ӣ 5gEoMωS[nLF7qRک8]JؑHB EUD& ~R&lQd%l1J];N)Y8񞾴-`p}3 I|E9{[E5P E9z28H#Lhf2)4LL\MuMڳf/;{V5PӉ/>_S8dk{9f/(&D? [ HTPcPg5/^ZD [@fԗt:5F(⑸;btg\u9+2˯#teiQ<4sѭAa 1Η9b&Z܄6H}OdV9 +3vDjL]yP˯1dӢG[.H7Ion*#0 ?].ޠpX`6qSV|z*!(azݢGgEEobFfb''2ܫח_|l*h+g+*pN.ڑ 0{e#Qҕ%RdIHT(D2.nKy 僧Dy\Ed Q ~X\y${:w\\NM~N#lƬEK-G!bUv>G&EP)GЦ]mR3Psa1lEj9X^w=n|ClzUH=gWk5+ҘO c!Q6Oj:FfQj=֬ *Y?LM ]NIY<^# [ ACr 5 'nL&޽y0PRp?>) ^+ӹ+q?fm^hJ|xbx6o|9aFԱŒY6Ch}36_`)eʱvnaJf^zŷ}{(`vq!eOp&͘Ĵgz̩AAkxoR`vG,u#%q<HuYNO1 ȱX_jW}^̃kMUuffjĵǦՂH-$Lx/)=ΩB'@ͨs|jFN0̅U*~ hNS3GXEVYK&e!>l2/C'*fkw))ވPz?7 $u&{ivs0_4Fq#H?_@.qS,{ybާ>)x,?Q~E'+ﮛ>JKtz^ k*Pȶ5S#+#K(`ґ?S e55wi(n~Oǃ1]VWPe)3vv2Ix[# 4;Gqq~x7M_~ħKR}mLu2/Ƒ8xLK˿نD^Jei(b\^._PQr)eK̪(|KKU{gSr_D6z[R6[~b7*ĩwA1)_c`T^`țA@ZD1ʹ&8\ =QJr D+9L 7`B 5= 79>_7(IP}::Xr@؉e>ՔD/iKRQj!X̮sv~:(|對mR؁CV=]tw{,L0Q1ꩃ&UhCv&l|=TFVh^S|GصmД{v+hUD(8m^oY"J3wRWP'7 % 5wQ_"gcTB*v|wԁUu˭|GsA:Vb/(N{eEәv),)0:] j͓6XX(#`3('d Ȩ<_(>7@AK)JИ:ih*AEq"h+mǹ8zj^_5E3@ИeȿP38R {64,KNA_YĔT)I|r*VىLPA(Q3F!ZQ)Wҝ1ҁ{DsGyo޲y1>9%fxCmJ?MLu˒Z4,I8/uQa#9f8 %.:&WB@hC3MzA ]fX8j%9 B@V.ŒBJ\ݓ+aD$}VKutwC^o(KDO.7ٱ F{vI1a T4~[CB ]-#:242,'JHH #ԏ(t gU@^BV y^}vR$Y+wo%R>hO! 5 ]a1] /863kF6M"GF5-?-4ޯƒ},2$!5Ck2 +7?r:T}'/:r!R(3qgQ.qosXUp sl¦yTo03h{NFĺWt"DW5WY\ϒn9'OߩrR qWM.ǎ5vE'[__ E75Lvަ&hsTYdJN)\5 XnpJW:\s|j[khUp1q:Yiݵەst"r07/S_n5G%nRÝuԈЬvӎ˲ЎSygmyU8w=b,>7+x UҎWE"f/=I   w "*47=K_>< lcnɾ$bapX!v57Zio05˓HcM+g +>a.)jʰu=Pot>⏵ǖKL U-93 Xd?ӓގQG#}A ]F"$/u Y,.IK,x ,'p4Uy-6T l5wI0]+ e[K11ELjhVt~HE0(]?8"2-(NoGΊVoLx+G HRabUH، J9JWXL ‬3hmYAX֋cs#]\yɤ( ,"C,g"o8Ὥϣk,[+Nft)8&<#B#T -%t$zl;sA `F39,7o1pJsaMG\do 3=7ra̫T.WYw\:No>jf Sˆ|v7'lZ;*JtU&>:@Xb` SX3n3T٠J[D`a[7rK}qCKik̐c",؈Ð=дzWRC+~-Ty-6.ghkVdFH^Lt)hLU[2Hܵۀ˪q6'}(cym̐~Ǟ>Հ%zH϶0ɰ2DN鹺`8=JMlKW3\^LC6d6xxv RqNq^䲆8>'Nk"(pǰgCX;u`pT7PJeWY^8 5^GIO\ >g3@)xT}S0EEBfnImMs =ıF Q0>l*/N8^K@ˊ%HJ'Y e\z*7#b$l}<[}(2-)mJP_B$4޷EA`C].!{壛q0(18LKDNU+KCN՘ϵܫ5@齔T7$'cޞL*n㖘bfGP(%:,qHډ$[D#[ĵM5=t5n\:1Db. ñ\p 7ޚ+rpO/f-$}("L"LKߏ[K#+ c.#9RB=>5ޟZX' ^ UԫׁCgKɎFCiCLtO%FW.4oȊ, .nH endstream endobj 6008 0 obj << /Length1 1893 /Length2 13007 /Length3 0 /Length 14205 /Filter /FlateDecode >> stream xڍP\ !4k1.~w@La ^sHZޒa_x=lGВ,Asre~oP E[|{ݠw?i\1 po={z˗Fgw4vkxP˧XbtJfh-R!@XȐya\]O䎿P(&0psYt !2$$OU$--^j.EgN[fb̬iFk=F9R$}Ieo\jZ͢p!\d6+m׺6iSu/tN߇z>ơN?^1R[O2vFKdiQ=,EGx0as{b"vvC~n~U g3s8&!/!/?Ίd̳3e:?k}6̄WJyS('n̟s+uUF{ VjPnE7 H\[o^WTH8ޝUyFHnhl pSK|ْX[҃ұ1SO[~k's4-}Jҁ &D_?^5r~vKKm}QXDKg{njtrxrסʸ @G9q&lٴ)QHZ9\;FFtHPTF\e6)~'["I+ h5i_)S^r HmP lr$2'AQNFh0`$}($-oƥksSbc?OH`(bְ/|RRCu!֢PNtOtItV6!;gI%˓yA%ICxlo, up69{{ƻAռе@/_2.RQFR/ԅ9~]@o1хxӛ\92tA򐪯~ބVk.--jFf7 -_Njv*.TTr +G<|ygo0.T\D"7SJJ; 䆔|-" %ry/1`D*чwsOU*8<W6$uGnF8im-k|>(3;Tr'}}3zoz5Bd|+F 3\HXVx06w4w7=U?&<7V78MʻfOb vØ' HHe޼72YPBe0օ']9R_" #Om vj8OV?A ;T~97 ",r\e_<'?i-;; g 2ۮp9 nTDMPp*u˼>m4bZU-g_1Krf+b,.N%~${o)]7(ց3{r*figa㍧; n /# M⦒b@7L0OB'bU^,rI,Y+jdqAyt{2.Ibޖd A9NVBX yP}H-VBp#<)`uʡn`g~a Joqb'=H LB-L#[wIHFf0j\pHWg9KqjsZz&3Y eE'@bB;R鑯+TBL|E^D8r(+<2գIx8fͩr 7g h'L'[ TBg>(N5ky^܂gp8dOfώ 1 ^W ؓT&>i%d 󽥬iMRUR=rkqR~wf@M vIrg@@V -Ql\p X-Rs~V/n(n9[g8t)0݃=xIcN{6'ChSMrqݨ Ƹ |7ߪ"G?r]ԻTM.>pOnG A2%[ )sAݥ;w&`RC9whT, gshZdc.ٷ m}SUYyHQvj_\ f쑑kqU/ Яv8fo1Yhi_D 50!/YF|=tyvclZ%o3kl`3p r>>*C8׽ U2_%ن3w]_5&HKE:ͽ8k-I9>:J껼Mo)r:s|c)=D)%EK`Ī`|nfɚefl&NYN.fz{oL6c>ޥ!f_%}>iCBuv5[ڢg#JB[JD[Ydqo-\;'ك$dlJH`25 `[u/@<녖g`RqIY|ܽݐ70BHK?Q(wSl|lpc=f O"jT?$$HI#`.ѻ@1x`uWܣ)iJ)iЫ,o [+JI қHD*6ޅ|JUCi:=Y f=BQUrOsy0^k*#rPzDBE&oojd&?/N(Ɯq3^`Ϸe&} ӌ߲/bL!pxUԅ0 -iĖ'Ӧo>w!oA8{RZLA-QT Tš$*0=cE|jպat {ߏޞߥoté)G(gf vJshej2E$FLH8nԗ-v#Fv,;Z<uBp1&pePQa<+k (Y ! \񂽖SQvYD'fkjߥU/:ѥP -f|3Lnd77l)XAkӥ %=PAٲ[\Um[Lor( S-Ug?nqfߢ+KŮڣQqDy{nhl2i1E o?RA0%E@¦bG> ejr%z); tP-'/IU,o%"U.\K&K\`<Ȫ[k)wջ| dj>9[웢5no:,t-SP r*Qe#f r Z>yz4cu@&}u-hMY H`3al(/l0)qA#PJ$ V*ЧLb ER fţ\Raގ&GzWg+޳ S^xQoǏ`I?8 {# >E^s|F5cXS#29]ĝG$h> eP?/{;~^zwb?bb8\F D6S8(O >*KiS NV~ x ?~|71Qjiƴ־DP3yivb;6޽^6o2]W{rm̀ >IW#ƾtyp|LKiՁt0'Iw m ˲&a7_Ɵp>b ڠzŇѱh@^DjNF7 T gG*U5G%j=Lq룛Yk]9Yf=|7JuS2nkh^ʻ;CJP28 '#3w_e^\ԞQX|[-Be`~+}ZK>dkA> ̧du?!H6iF ٛ{=ә$X2bXTt4=3/O5,b).)F-h lCj/'YdA]D^&_D7Os;L`yڱ'dUJqAeƵ 3'(:ĴeO T_P;Wk+[fs4J7-i0jO=Q# Ud&ܠh|fϸڦQC 6xd2:cB`urG'6ԙU@5H[}5{Yt1<)΢Ol BQؕZL|?fNqֽ'c-n0ˌີ=uqs2^K?jqV:Kx"_U{0; a~=?p#m:tIp͞ir%Y7'=*f/W^[->aGsLeJ>GUwLV?(zt1+QFO]BZo*ө/H $ftM^|ں,#~YSC=ۓUg}жmjY׋- v/2:i+*kUOpq8k9[D =xs)2Kh ܊^֎zhD}H~=AԈ):_'A WA9ɏ48P&眴ͻv[#G_ }?;{NAA(\Fu|>~Z&"f[AmŪF%7jNEOzj}U{si{JRBٰV6]-&>K#:wqԎzKlukz̖"EL*m-Y|V3OY}p/Ca_VGqIO+ͧZ+ բ⟎A:lq /lnl:TD⍫MϏROΟG5fZ~* 'O/^.uk!F7A4$QuOD\ 0rVK j5HnIFng.n-슍)1`8K͹#&,bP[n~Fӓ¤ oϠ-ѰM>.B"MBG.W+Z!p+ 6%<9خn@&,@ Dw>GO)kv%ҽ}a!΂Nޕ ^/6No&obI,;U${_HAߋ}7|?J`)"R# KOFqޙ}?6xeYx4G!dPsӰ_AY B!t6a;OmQmb?i$g;Cў쯱 fG/-XŸKvx6P  P<>pLxzLwI'XM7XƟe ˡ23[6tuԍ*9u;ymG8FB{m{C;W^JX2Wvqj݃nG]X t@gf-" 8/wͩy+E>Ws8= ]-^0Nf-m q˥-N)tQkz`xQq,n2߲mCDIWQ34ͷڢZɛ/Z|5`GÓTA 4ŚAJv\8+/>/?nT~6&QmrLD[7$n{$0~8y/FY0^p j֣Tw1izl9"7 >՞ J7l+ v35(=tr[ae>pWEFoUWMa .ܥ ^Y,Н-FHt&sv鿡W+ MϜN}&sNJLEs"i6l4|VꘈJkEz=y;񾘻l87c!jθDbVj Q4?G-hI=SXbDl O+?]s>GeMwIzQy mSӨo ڪ;tue<(rT[I `+aO-' yXwtiѩ&}TФU*c%.y> "i tVI /17W;Y5hF7 OG/,K^GLFg_Z`% 5OYa9'"Jg4~C֙@T + bBҫ+w賱6e'Pws[Ca/GǪ+pt+EBbEeͬ_F"s"c+{1$RsBEK'w`Jb$Eq9ɩ;8V & nOD\Q) q_B_'LCM0yǛU{-_ ["oGЦb+^? ӿ$"mdK>τh2W^Ma2MgySNN3n#3Sl0-Cż.YwlZLco5'.gBZgҋ:b7 EHM[ c) O88gp!m +悾ynʳ2WZLb:'@.Jo)zη4 <]pW2jUh+*+,ߋ&r}!;Wœ,h0O*$~wG2R8Gdcyk? "qii67G-*~j!gCu :29UO2o ( +g:>r!c3; C<tݾaf5`7h Os}&zD <[ uc[uKmk1p3%4AaQg73d~KLFu =7$/xgC=dd6ҺPn-L*U G-\a>O.Prh #"aٸ j zx\x9)larG-MˮBO~Y䭠I8WK~i֐jc: F\AkF]/D^IʟcR}F:H<>*K "~}&,tISҠ'ɔu:^XklަkrL~c`xlXhC+֩l[Y,5ZAVQ% }slU 2Ϯ5"$ `L ftƂoG+s8q%tj 1.g8S#Dː50-,X'[*gM;{'OjG$ y!EF鍿n%&'f-hճ1zIc7(/=4C VH$>TKd}`FD&2VOynS2ທ2N ~L,5gʒh ]zcq@+oL*|F3` i~yq3ݜK %7r:!zf-a+ny)_@Gׁ~9?!1Vm 84x`ڈ'22Ђ&ϐ!G7ESeT ;j1GʦT5Dʋ ,˃̚Kk.ߥev!_g?Kt[b.37=\:Uo'rSgo #fG +A}Ld,qH*;&ΛW =lA..G8dvJ|0ɱ SO\gcS"S7.Ў%ը+{̳/my\ *1shl[q K D#1jY1d~tOakj]c j6 .ںR#"s<%{>Ro1tVRFE:{s([AefNfB#YN~Fvsꌚ'L7pW[Q ؁9 *)If6ҁ:1%S]~txhDvdJExCeX}rZl%2VDw͏wH߉t<>dJQ̙TqRN  ΢gGwQ ?l5PP өЊ}+WѨ&i$MgA+~x 2P|oVmNgUw"&8A޹}5j$"!EКton+p+&@e\Str;X{JAA`?v;Ķ 6 TckQo8l/χ\|lTcGA_h"B8Å cwp QP[qU:@(6T/6* NCYt[ S7 Vs]J%R􀛸ďc#kOp%VyƸgOvBlC+5~HrB $fX9V诏Vm]x&'i? m"#P_8nA&5 quV+9k}]W`wnf8NUw8R)kVket 3c[x 2PY2Y ~go ?۱n0txwpvP!{8zIJ d]O2~>/|esD@z%@$,Z gAR-d#z"Dn,NK gӧ46%, W(n C:Ŧq4G}o`oXy l>Rנ>g?@+U{۔Gft7y\jM}\,.pEGkbCc纞%l>G~=' K77K^Y{B. wβޭUo$y+d\z¸=ɣ1"i{(_H6[pj"5|r{qy,:=YI\>|1>`cpXdRk AW\b}z>y&Fb)l2l9f}gj= \fbMulK\?B*V$m|pHޥh~k+mFiX3J8 On'gQ+t,P;PJIFU'xf0GEI-'_"ݻ;rn ~&LU-Z 3_4ŝf#*B*Fx.Q/)5g%8()BpԌof)/tgi[3(j7Bect>ީ̜Xq. bApjnx.^&nLEe2 3hZ;@ z| k%áHW1O~{Ѡ8$}:-W/ҽW#FgȡG;QI~5j7+ShfL) >YhioT(G$DW6=# Oœ Tm̳+DVj"0?JxycN5Fk ƙ ,W|/8,Hg\]׸/mYki4wsQ0ř;n* lr9ŧn§11|˜I/ 6Јoت#kbY`~0eov[W!{zr}T{T;&DE&խ/[ѣX-~khpiZ&Rŝ}5ybyVBy0?W;O-Z3 V6sd9DTh,V EVymUOr˵SI 'yy,R&[1$f>o&$sjIFgnБVW,+[0+:s4F2,9~%ו+2wʅK+Ӿ_xBckkNM`WܠB~r\UAՑYRR&_s&6,'*+&xD!_{(T{ΘÄ"2T;}vȭ3Zn -ƛ6憥P qv:d57價8'<13:1- 0=xꇹz[=z~f._x%~#O?j9QDCԟ  J޽#̯:QkXh^ה)E˧#>Oaf  %~TG予!'`9T)Og߹3+S9qcMЩǬ>2u[ a pڍKa,s Gq]V$1R3}M6ZVA"ήHnl.k1rDNB&v8i4kK/1| 5~ i#w寠RCa_Vgfl ("Nf0xLۑ[4#-AHsA- aET߼)(Zퟬw%NPa˂WvWpJ$bɊJɛiKIxK#y}:,~e0=Ib+C/={i<7"quS(w-8)GMǛ퉦+o檨 M*r>s|P +uM)^VNv|`eGj[K"7cu{.$XšWRӻjX왼':g%pe8Mlg(6ߦHG__ O?~~4ۢ]cE3hŅz_fۨnӸlh‹/ꦕUz!oвS'P70%&8[]!fZ `!h5_7dTG3T Cyth9߬;} 5# @yRX=>"Uz2 ߭*gh,v!#.1R3\lua#uE͆fR2Բ|\&_:AX$M4b<.!$hJ=S3XڱDqI2lC endstream endobj 6010 0 obj << /Length1 1373 /Length2 6101 /Length3 0 /Length 7047 /Filter /FlateDecode >> stream xڍvT.!RCҍtH# 3 tH4ҍt !%J+]sֽܻk֚ywyװ3)!l*8O( x/@!BvvC(솄" BuJ Mh;b@ @#M8IuUupr%$x``7- `-`QH% A\$<==A0$?A EA`$l=. 3?!;E6@أ2MPWUsm_ _멬XtU4[DU ;7NwQbE |)Z+/{0 ן@Oܽ0yݣ4FBA伛8磣QCQ%0u_ "zY<lu&gG:pk5Q?:FQQanTxu+Jb⤑DIFtewhay- kHRCN9?x;9ڏ(g ~%~ׂ+H{.evb?( :zyLWl]@:csUY ?]r o/pp 4O6Ȳ/V|g97"{mF^}}9!D S:X76ODI3FSY)g)UIL<ߙ$ZWSw8˼oTУ?=~7dp|zv6U_o\Kg쮭9"/!xxZ2%:R 4VME=Smi-Kdc`0C̑R5|JONdr}s/)߀4cFqLMB `roҡ[ T k5!wFNxVfy8ZUIpN5b[%|W54 C:λ O\%Fમ0b}'޹]c;+[?=)yjio[/n!]7n=b;I ,wiYޘvzDajrW19Òi=v>P>D{y;z;SY 9.X=zܢ2 _h) ˸H=a$>N3+a e#QX1w_4XZƹFjD?{tyRvnk#Am#+bcu'^gM(iTUHipT* 7^E@]rSrݵ7CYe*0nK;%d?]yS2G彚'4Y>ء2!QGbɼ .HDi쯡>e8K=)sXW2\-70bԾuWMҲY 1OEȊ̘P b i7,[in2Il3(=vaP@`Rܕ4VUz{Ma_V<[IBx]e#h:@f̞y6VI%ݡپ5\:qB>^ބSh<:Me*/hH&75uGd#v|T(lŋIQbiLQrLڟ<՗Գ:{Qx9yn }_=A'i~sHX=#yUľ / Ԧ7ꫝ~E%9,ܻA Ӊ޿`X#I/e#qF\_:y]X)Q$9I|jX/J}0+?3(9k0 "~'+e2-O~cSS4)ג,Md'V ?,*F->W٢~Qt;*0te W.p֟.\V *h<XDEF\PʏrsTZkq#n)޲fI ǻzм3 4e5߁i mm| .UAzƖ{2r>)D{S5Z8&h"G̉էBd3|lIϞO-Ѽ['R ?5AX&4MZ<5tpʺlD4ʂލoq2V?̐.joXZ5mدN(8eu~)C/p BtvsPpEKbf>fb0DU7g ?e1BDywa˟l_ kĦUM+Ip_D!%\PqVOqT{to]S{sQ^,0x=Vezsw= E CMr :a5d8Ě;luΜpRoN]qKjrגt|R%Cul8cڹ~m8i"dQݧRG2xM٤nfx~_ltw{G}t=9\S8m.V597n?59w rvfN̠,w+]][̫*(G cwiM =2۾L\ʢk]:ɋ  InZx~iG rʔd˵?edPjPNWyL1C65q?RY噵"K!"jLd ,6TیPȲ4:Vd?50>dN CXzZD!{횣a䷧|jپf]q1]јE!ZKxLef(Dc's X-|#e f%-4273fka>i|Κ{¼%k(J8Z[#$:g} AK}UKNSKS^UTUc'q.fH~Řcؚ-rS ^RmI5ޭ 0F)~mLW!=8Uom>r+ZI2'i<̅ܙf&iVZHd^.l┼~6Vk})s.$pz/%y[#KIQ6JTo bb| endstream endobj 6012 0 obj << /Length1 1553 /Length2 7855 /Length3 0 /Length 8889 /Filter /FlateDecode >> stream xڍTl7NIQl#VBjۈэ" (H4" H" )}ys~W_x8 M0u$G"K`10Xc qA>(O-r}\0o/]>16A:A`@ w!1h# 4`?ƺ /_P #Npw@]WGE82{#1P_(j1]8db;o/8[CЯ0!!U07W}p/f@!G_-8Q;'9|`0XJ ya._MPJ/SH :aZ`@7BB[o >@{3':917c}>YcDcrAzZw7JY  J(P swC(*j!@?b}}XH ca@n;` 4_Q/z|-WzH g]`pj|PB8=D:hqpC?_ G _ PUnG*fiR t]@4b$ `@qb :!SZr ~`?Dc.o1 _NǤ|$`,P%AXK`$o 1}\`AĿ¼ 4&/frC0?0;t}ߵ%f?Q u ~Y69A*˜EnOj|NJsWA[Z//m߯fގ=Vc%b6U یݡ͓EfXH}׫_W1?=nRRpxIgS >¬T'TycI %gS Uޝ܌ To)oi *+\4i.ԉe:B$%OJacyN!FL\+ʰj%8o b/hi}&dl61o ף9_v9v#ƴjh):^}hȃY/^ޕ '+$rLҶlẍ't0fiSܤg Lt R>jsPBx6dIYL2nr#vNQVTM)='&Ti# ak)f})wյ][.0#rq~3jN I$vnI\:*# 6*;=|e8h?Rj˽ ZՑ&zFI2SZd+̿{eyϼx eT) fy˭0 i5y(-SC-:a=䱭nd};LP[.jKXsQYƗJDO|ڭ؏ v XɃ[7 _*1:a-6:B.hd@>v*:{VX& %~N)k|M}k|-N-7X>6zO֌>}I3}Uq ͗ӾJuoH,;MZJ8 OϩtWtlKNjJ9+LrF3[<‡{^\H(Dk8vu']j'̳+YB% apA~SF]bruFŤ 8HP_!O~,&W.?_Kd=P˩]ιVoOZ`< hQ}D|k+>?OOaTL'4^Ly\AJUd7]w\O~T?T+EAqZѷۮF&$.ʆ:B:gk2AH[p3_T4~O'#͢钼GP2`:g}h8:YO;!ȋx4$kCREi(cu{XjPim̼( ARNsa}Wk7 5%2ta~% Iz2&6stӗ8dAhF][OkH2V9+jbxڬxɲ k g'ݲ15 < Q:Nn\~vrPK-Ȉ4 Kz7?^$m,+,?fw1EnGƲVcz ~0u;@Ĵ%?`ڦl29SͬIyE6Ѷ=DkwykTrgWP[`cN ݉]RfsZ&Ao{/L*̩m^'YS\1Nj҅lZ+F✌H43T1Z9}YGk;d$[{Hn pz흦5u߶/['..sT.ȩO` ~hh2s21/(WSqZ,p|I0' l-n(q]TC-BƧxhNُh-N=6WsZ;ҥ4E[Ei0:|6z&elQf8ZK5d%!#m~=58{љz̳!guYjkxVbb"_~gn,v+`JUtPHJX@fExi Ӽ)`K-kKǪpC}6bu#hxbeL~tW$:ycSdmS\O_tj[VsGf68!fxa5QE̜?C?s{o|=.¿P,;k(8/ߢh j{<#O=pc39RŮ$*ש|W8l;(xSD-@Jކ+G-vennLu 96M)-^8JdՆgDxnHu6xRnf$!W%@vOYѨ2)^# Fĥ&:T7b&IJ0 (n2ɧL"tOd2 xc95¶a1O1-K ,3Ng/b&3U##ޟo!]]&MKg*@d4[KrExxPYo2Qk)J5ȝ߿NF“̊;oyq8%X|di]WG|_J="WNJhc:>K'<Ρ9-mu12VۑEh0NWhc\q\rb;s~%A$Ezլ\tmx8g=#~㢡+A7wDns1Hӡ#8W~c 3)kln[wT@t*_&i qq6}vFxril[] |OkalbI")'hka`K@/-$le}4Z1}*uLB2d!RS 8F%n>'JzJyA fK<)4c?>\kЫt{pr\.Dq)1]xǟl V4=?-F ?"X83^ny12XOgc+u3mތXIS4^,GiROغןQCWo)+~P(﹟jb/Sja7=Ys$N`IɼutOd*&(顉k>Q!bMm}M+jW }49x"M a3@Q 4 d? MCC#|  (6r*vwoT~ŊZh.Q$jpokO&4F![xllg3$h#‹)4%! ρ ^[Q'iq4K^9Й?Ck9)+m2@^xKQiPMw\Igu-蟼**({mZ{uZ ŠW“&VHEv뫄P~?FiNqXvc7x."IЖzx*?Aq:a3ir}]~'"}i1 NC⾥e쉟;gwĦ|;OoG03rfn':|ats^6/ZV%Eaq}|Uz2o@MqE?6A]#WB#{_?fAsH0ֶ|g,lV Kk4Hѫ^gwْZΡ|3OӦ \Vl"#>˅#s;C;e>4q= !e ob1W37=>tWJX2.ChI!K' *J DذJqf wc\IUOn"K=$zDJt̠3,Q\FY2yAΈB-pN־vJ= p U-?uسyxr^\dUY)jVD.f9x޼&.p<!_ș{޹7P+$1D`YPTpOYV545d͵dՊ ץ{WbM')ZFӗ .[aet:UR;RB7ız;Y!Iw1Mm+:bZgn2+gi[$+Ns4h"zj#_3 :Pԥl;|G`pV36B'gQnDu|M]p$\naz>Ckw3{o5:w۷2*+ΉW Un%ʒ+ueTx<ƴ:yO,Ky@Gvxݰݴdը9QHܫymx"R-n*%,Ȍc,GU9SgduZs82o)bb~VU[h|5-׼NA$y<#ZK^DuOy%!mʁ_2ms?fmvo Onm@q>e(QzZX.4`}UԋtSSIKoD}>by9sV@:Xژ^B)В$,ϗ/mˆy+wom{'?2|gPbq} #^'%'D@] ߺ&HF /bA'咃k+A7I)I.j45Uy}-C ?ȑ&$lmĤog Legz.Ͷ1έWV{sN`Wp[4Hfc.F7ۼ~ ߷zEQҮn];0Aeh@k%VͭN$³K8R2ΉwkSg}OϱLƪ\[̿MS6=B v-^?P\?G[KxXiRg.D|y1LjU g .$߈G|a؛|q: 1Cګ~ǝa{nŒOLm,sgbq '_j'rq>)FZn+ iۢF$87}$E3y4Foo3WP)ưΟ!v!Ժi֗}G<;oRVzfsUٸ-*s24)QUTRQH^ûl((`Xj[.5!֍Z}x =lcws&a]@JNkYkU.kHnW-acma!9Su;P6lN8L)~5^rmzEۀ*"3fܵ]rDN̓PB+OޑPJTq "mGs:9pJq=ڏ6*m[yMې8lïϖn_Oפr+U%Aa؆3gzŨ+K"->%ܚegyvU^`QlгM\ ~3Q>;Tin Q՗(5Jc,Wi2,^Tu ]≑2x.ex7Zh4z#/-}x ed􊟸9e;hmx:,/ aZ8|H 6~|k&rwc>C짴Rk47')SK]4q([&{xg5 1|4U!&b *O=qV.7h&6E8H"Z0]ϰ!AҝDU9g75]Gvݢ{Kge9%j>ڱ>:'L}cGS7PkFAUKI&IQ.66}6ta %(GxiƓ{4sKO.wS=!!@N[YoPܵ-" ^ù:~4wq֏{_iM+x?ڗ|)C,ʜ8," 6`eazbKǁrTS=ZZ)Eoz ΄F,~3tnԊ)G&dlA5XoLEL#ghΕ )'8@ZqV4UM& 0lV?˃ؔ9 jުUQG'L>7r yQȹh*C9};:ӧu (.Z`6}&%TnG"+l7ˣ4W\~丁%0–U}tw;1蟣j7X/CwٲX%pN,Vx??>}U2jY%)LNQ¾B]L|o>@g;)x; ^ 4w+^TMg #Umzw4I}Q`@Zkf%orB+"HG#^mht4c]rМWs>G]~W D?g<*S;EG=u4P)D-<3k#tӶ*Ti Vjp'zHz4RB}M'd V~o܎rmo "#_լB5p ix6f oUը>(-yqyLDZ`Z2sh_aF?4:c-mZ`V;dѝް"j6{Sy p B%mG ?eTzFŚ íHd E !oGP YΫ-^JIs9Q.4fxĩ18 UyFwC6\|7aiT廵yg*~HHyBg]3rXnH0xV= _[A ~-l&# endstream endobj 6014 0 obj << /Length1 1573 /Length2 7288 /Length3 0 /Length 8351 /Filter /FlateDecode >> stream xڍxT[."J5 PEzcB$tQzAzH*H)x޵]Y+~3{oVY vP$|P64 ٍ`h8oꎂ!C Fc0%0CF"pH{ (tP{ $"dWDИ} p@|PwhNPW̎0`hrB^^^`WQC; ('d4Bv C@(0TA"$O?޿ !08%F_D0=08C:"c*S sCP0`Yatu"(_)ܡ̹4Br!~a4Fy@Օp0?# PoF>n0?7S4|P ^@{ :DP֘ü^fQ=@EEm} c?%Ǩ I% H .. w=0OWH.N 7߱tB\ oE 3R۹"v+ Q3H, j kt0iG8b T`P{=j!Q_7 KPl!`nFMP {_eik؄D`ww!ט(J{o1$p@j0Faj\0}pe@;?8}áLƅ@㯻1c Ǥoha l@a`ZaZ' sQ1^Po(pv s~vV-ſoa#:7J1I G wfJ#ƧxvFveϧ墱{~u9B{LɦSk\v,$24-XsECFƣر mqt?|-f~.7]0_J2W%g(:1[š zXJW1c+>s"5_أQG1ZtN!J 񝦤h't6*{*axn. 3ӵ\ {!q;gO%RTӃ3 9]H()qtX= | +78^oz6de`lfbGre1t4ӆ;>'~" 7WCUBNbf5D4ψ|Hk +Cp 2JRWsb/CS >4CI;f~ v@!k 'bN Ha+b]C|•q eㇹO~>v\k&_eNlrݢ)cƛv5o}JAax/}Ɩ}`&aD SI`7z=e3˕A~DQ&JSCXN +pY9-KئsU/-5@~_)NazŸt#;kvoY>bP nƋ2Y(>h"77S׺Zj]\TK#zloEe2xrX?}#e9~Wa9JAQho$X PT a0z'[{knm)\3ź[ihPͪ5b#8LLwSq'^\݀=a*9|!u 3\L9qE`U8Nj}o $5S%Z9+bŖAPdm'ϖ$ڔ)5Ce;hH}F;ns@1u7[_ɉdBΆ/f(݅扝|M^L6ke T8gs@ᠱzH{}8{󪶼gWe^&-vz&Nձ:kn.&L{LJUI tDSVk;D}"Cu+'}`' JTpda/r̚\J_fBTRMWrV5zGtnhF]J\MJ>!ci`!'xc9ǂ}z82>b=zthE~҈j &,-Ys?c1Z}&>^؝] n7d͓_O2&=S&iLP_3>~ZJi{57 ; FrqC |Z:WFSĎQj^) YY0[>:^#9C3uWY\п"?x d~I5]VEQCwwG9ӟ >[ snRh] R]2pg֌ K HeԘQRr=u172#PR`knrZhL[r5+ntx]*BB]?9~.㄂\)_$imM+Oh38jgzu:kbDEĻ9VFϞԛK_{T<8Hx7 ߛFQ-7L<;q}yH*Jo[|xx@SzԳJZ{8 zqa;Ԙy<#QnyP|gX&vrבe,%ksG~; W"jz;~y$ z>ҽUCeř+1>* [G,4WvNA.1_ e>noMxIVU>H(T)a`hOCd/` 2zA:I^@W\w5"ur5Ki67:MK($8KZןyݬ!>-Tuy-_RN݅7"1̑ cWzw>ު82;^n½2KWOh;OUٮs 7GGn4~Pyy"}ˁ]2<h-\r|!Mr=:9_^ʘsM ʍiHLY?(=хv2Y18WoSW6 .E^*V40۵y,> ^f$G SD ҝ ނqj2IePTcNmTFMv~",պś2$ؽ%8[Q d4|egvuo!=3aϳ1ޥG stM|fUDΐ@eO9z'+xn1\dq͉}f%UEqh;"kU5M;:)$.\Eh\jэFɏ:T\Mʓ'u"(?_j>𕔚 v=\me<䡹|ddtqbs9Ȗ q :7W..Ze+c +fig ry+&c,sO_2-i/' &.O#ٜdbPc(̑TqsݭD|BNO G1SwT9OqVr]e}F]3|UX[[6Q'GCªVH/2Z}486FkHDoy8`9W ̞V D:B]IIocj^X4Wc%/5yX?##|R1MMiЄ^gNnB8YM%ɛa wnEQ -KDn7hqNV^&I\ݾL{񑮊YSxlM܎ewɃs6^xx|:;!FbxJj:?alfݨ]qlFh+0IUXZPf D DVjc~'a8]2u!E 6'5(Jo4TZ[A^XQI<Tid|8E Uw%cdlq1SI$Ϋ\]rE./F;YV#Væwug\I^>ܿe`A;ԑr3dmrd]ځ!S\f5'˔"̚QݥkrqF)IN +{fwˇJ$GAoK\N{*@x;yeD8fz2c[*kd)3 xƺ#>g!EW¯2..CfUwhI_O|w&&!ʼ@64KUY#ȴkuIw,u)u=']9vy]/F&K[*;oH%+YbǍ>=T֏-Mɣp[6ъָGykHx7͂mH>mv-4(Gv`Cu fm"[6BRgxqɶьeՖ5QtyWrډ!OЈɇ@CmAA^U!S FۆcatdCuKsYDQ?kqݷ݄)K \Ե3HQqꅫ{jI*GQU(B)Mșwo7[ rw27'ѸX}:(]Kgy-^ LWtAo%̫f,ZY0Es^8zlzu+ >.I7VAȴ@*jgRqh+x9-[Ab u1Z4KEWsW*σ^dW:?Jl)\i>&cְd D?UY@B1O|bўޓ<i<,|l^ٳ&]ҍWy:\3{H֩ɬO(ePd9mx+cٚT^سVn*DFMC_dݰMdÅ*>Lor%H1܏Z9OZ{lxN/5~_ھx);e\iǽT~T5^M(' xB8536<)x{f6?Pa24Il5␓{5(Նp\@/'|cvJM߇:Ty1ak˧4ʛ|PڴHJn*` Hׂgƛd@QYI;M[^^~O"i:rs\,|ޔH1YwrnO^1 ̖\$tW$hϗj᪢P&; tC:_5ՂocT>씵^+h[/7ȨeZǩ}O>,ֽZxJ_>đ5Qatٛ3'ed.ze55wR{.Ӯz<ߨhJdTV GUy߄e57)4:B* ^M+s|a%ebmWڨ'쬭+ 4 x٨eZNDn3ƺD12gIW2~5H""_,V&Tp]!2PcJy<"89&_bWH|1<7e*㖍$ɊSx_w.{n@mfTZHZAetjt3`="W]b{SCnHwYŻncGtmc?.~ɻLT(yg'mͦft$&n|Wcq !qagHs.l5W-`2XM o^wmޜhr2qki*)ݛwkw2JHOoo!7gkUZKC BؼDwq9M:(U?U:2vr(Z)KzgbW QaҢUږ Lρ?j6`nľ²9W1ftoROf;Ҫ7) a 7@* t`hC&H-y cдÚ82~qmյL2zGV9c'ިf].y{_3HۈBcUNJjO/^}cF>LZ .Z7c꺇opXioOQ@my; o$L#gK,slK .}"4Vex)>7:?%"#҃I&\0-Ӓ#MxrRX¢~S)")Cv55]~_MOY<ⷓi~Bg)o^E{Wm 4r%vR^≾ +~$ND,UPh]X>0[~9hČ&o5bx,24^µchM3|qs Uˏ M E<QuA3 endstream endobj 6016 0 obj << /Length1 1371 /Length2 5926 /Length3 0 /Length 6871 /Filter /FlateDecode >> stream xڍvTݶ-H D>CEQz IPJ)MT7Qz"R+H"As㽑1Z{^.] H J]d%-q`" ^^C4 CEPrCqxL@ %d%e`0@&(=H@ h`1(w mgo?BNQnhhq(g`EQ8\dpgw(B};~ hÝQ&xC{k󂻡P|]X U(.?_NsBcPXwG > ^G/!=W"LD\}@0D?a(B,g ln_׊W?>4 o;P($q#ܡ&J bkTIw8o*Yڨ54G& ywd̥t/+n_{2f7,L-QLcmFlv5h,BStbُV5SM(+Wۤ^V,W|J) WX YKΝ4%zi+PxFB_%3aPi ' S() 譤Q)19J+ -4"=@{O)և-=J6 ҼiγA Y^)Xy8tzԻ%m%D;&FԻFf3^uVrYj˭/ߘ|N*cҏmxPTcآs_^081`ʴRBrVȨ:uk)Ac&[34U<ĦRnw;G^JndSf'֮@+yŪyPtX&:l :^p'y;~7&üd'swZa:AmxMxnۍ3Fzqb\ɱ_0@(_`)va9ax例J#6 #QO[9)˭9R#ېvUf;S+[iD-g)){0{ubX ~g=,C#l-* =ԙVlTN=Iֿ/1* lA{ L|18)C~('.g#Y $VEeqYy~* ԻM%VR< QlCSc0'G#E믤tw+v4Qz.crcu="{Ō>{g[^ VxEpiYaRō:*_ 03Gbf (Nρ6Jïզ.W^Hٰy{d?b>|ypOVi^[3่Z{EXٖX#VUyj jtЬcAۈYsUuUoIB˜eNƁ!I uhfI6'#H_[OO#s璓z*ω!H#@33H~;bx^2LrD^l$[?,α&\|:6DNezoIj.o](\ٟ}cΡėtk.a>C:4M_ԎWqe;O=#̛^u xHQ.?/K( TMT&5XOb[shHvZOl\N)݋#I'U$ұd c~;Pa;Аi8v 'j\ߐJ=) E,ʭɞVUQr.*)~*(ZiO FY_֘ |SBzL6}zmN3N[WpqaޑG>Vҭcɫ>sNݰ}VҫgC|7CUHE*# Fm3Lzbuu1\D;,Υx@L,iG`eEbS]Ibսީ5.5"i9-1wUYSϓeg b;}5*4wfABzcĒ\3mG$:f'A__֨U,M'-M!mAa:\D7[9 Qm = VƏ޹|%3y 9@:]=#A2>J,C? MVQ^/ꝳLQ{9 + n*{'҈y1E0Lč쀩8ΖrzX`4ֶm9y? fۭ!Roa7j?6b+ w=nG$EYټfk!!6bjHv+@n#ͥ0Z54Ǹ>fzJӴqoAք (GTFJ(! PQt _\Mk.yjIZH t2+Y_#Yʠ"#l.d&ԄS$tfɢϋld Y'(OOlȷy0C|b!]FF3%haHꤡO8ۏ/HĪ Jw_ L> a67 !'tz4;'u \T)#).ᄈӔ.2~Juy/Mvbr?ʌKtY4x]qkv^ep'Nn˫KW{>tM4N.e:K23QHF/+:QAqweiVmò Xʀ*SqlBO}(G>{Gt˾NV儹| e9߼g0jC+]?+]<Nt ϴ>TJ%lxSr̾2eQgLLm5O敇vTTdz!H"ͫLyԢmByh'_Y=KffhKM:#j^1Znn:w.8-L4bJ宝ݲ UH`Ӎ D89^|dgJQӨAu pGaKy'ʒCD XwIp*|^{!L(00`͊= To1yp=+bgGSD;+dzZ`yl΍rVN3y5EqG^쭗/{2t9$S,u-_{J祋C۫mEޟoEo7}("dkHpp(kC1t=^X-e-J Ñe6vg`E= 9wgHd`.GȡLIWaLx(ug#yql͡ɶr;G׌ˎW4D&yޤYSͧ~UZcw߉#I nE 4EsTx|ccRjg5Y@,J`k!_"~@cDt ed~b'"v5+.).԰6L1th,u&zR|n2 򱫤{W;g,=3X2?aGPU'2-÷)1$D/tڔP]Wk'REt{ZY&ڳ$:o hBEkh h"]C8ղC[JیMFzNd5z[0mcl}`r@3EjH7m{U RU MRP9rq w|/&O Ʈ)J+9xdVdrd#E|+͙pW49\mrY=8NT]~|e&߅YjW"7vj!fK9& +{JJ2B]w3 IKM(3"ewے7 T滚J>QiէmԮb2Tr|{Wk B-.,9~/sZ*+M-G_xde 6E,<=:^GF Q2hrhrw0vVQa;ct'ߞ_ "2< endstream endobj 6018 0 obj << /Length1 1426 /Length2 6217 /Length3 0 /Length 7198 /Filter /FlateDecode >> stream xڍtTk6(ࠀ03HHw#-0  1ݍtJ "% %ߨ;kz湯}}}p aJH,kꛈ@ !HvpPnp$BQ0)@&Psw`Q MD$  @SDH8.(=߯n(|ؽYg  =#GB0JpKڣ.@Mz07f50@ 3 'Gڢ=!(80&aC0U5.0=_g S_(Axv[ !B@6'7$&;A1ߝCJfs.h77ӯ`NYa#tv!n$S`P̱{ܬ#ka Gwu*E@$`v04@t_TTXs_ ]`_0f_ 3n A<`4^8 $T0?k^3F{`?o1A"_ߟCz|""`XWrUH?b=_ZD@P-)77;;!p'Ѻ1Dbloi5a6pw!#"0b p7%F/90׷W /# c靖"m~LPDA $ ED`!m`^ hL 3?"u`(h AB pw ܝp̎c]Pp̡Bn0`Pq$ACUX YfOD(43_{ ,W9+rt/#yzϥ䣱; >lLvCb˔I7 kZVtnz (FƌkƉ5aXDS$P2 T ]Ͻ2{gg{r:Ӆ-fQcKfMQs&256css# (eq蓅Ga'o 44 rH&x)5pe ˹ޥIk͇5A6n9XZ1 뭙. RǷt߈( NOkT⊾3>bth6|<.|JȲm.٢兊QT_K>(K޼ugʖ\> J`38 Ќp3s$0 3tڮu^N E*R}xTxBGQÒLwj7f_J||\ѩLlZ"ƶ>w R*C)[K1sf<ݭ kRBo*@\P-}s${Ѯ 9>efӛ<*BR3d>= Xy9xt&e[]n4GNl2QMxCn?7JTFyؽHP||yxwRM?[CIdч"+w%fs#;kN8Uu<  ͸7 bs"4AK/uh /~sbrН`p|]/oXH kuӶl3IV^x>}Nd3f¼f*ӠfGqίxsw2dA(9.NK⩄ӽNȮ^Q.x yF2e9Qe&d29UCc\z)ZGŭ  m[>&Qz~.X_Hc*eZ4jYN1Pg k,i.͕3ARQ""1%G LwkX3O ;e鑪CK*2ɻ7}pO{CU*^%w?M$0#VeD=^T -q7~}.|&aOd$1F L]/ E4GY-el79GWϛ=P((3tVB/sk7n'!ݘB:c[1ԂrЭT;Q`_lY|!^WcN,-?APS|[o_nF>c^ ~=Mҿ>n|_T&[V3^Z3ŀ'0`n yՃxC[jiB͚e}iUg$H^=Z_CjjڎЯB`qq?I`FHvD|WA[l6;BM@捍u@<~+9r{*Rw.QuE$}g-KX\2IDhK] !2J> yոa(R}H_{+UY 376kgiHf}rdYHhB,u[>}{+Kn! b#*/8լk,e^G%}6̅'!|Q7a5 u}>7<;]2d}gF^xs<Ë[&B:N.eN["%z`O8esk#L}>WE̩eEv[.9[ nXih~C : )/ƅM³C(5ة12&l!v7 pzeDEu@h e~LRIލ@Aw>ᾡT‰w>,-p<'~WRDz/s<_җIɎI?d)┬ qsM Owش ͿyRJu䛲^;%o [1r)YWKr5C:ng[ĭiuͯן[u6VZۜ# DiF;yLE?o<3uFo] 6Q%43' kr)[.Df8+Z?80x9u%nJ/1\r@٫l'ۦciVjJ}+WR(^u!au`nl^׶}3˹T>i0%oYH'eezsQ wrm{~-R#%7f-•!z'EX" A<#XNT"/.rGpH,):"XmktjA}S8U7RVwo ,7*b/ds9O;- M8qjD>-EZ4ئE]U3|Ćno_u}<+ߠvz\BǢF3" Ӻ7GZl_pxT2e VWM{zCZdy)ԋclej,dv, Zڗ[ՍKvl}|2~%Dr}n|xjٵ168i@khxR% ߓSw^#IK~A-lrk'Vf1׵bd1v#"q.O4ӞcQGگ7l߽g}l}Ōª2Gl+bŴjTmW_؂[^RTWkd|, zŏ[s_<.QU 38Z|.%a%vI_rWyZ~[ 0'߽y HG>m}K#~ eA}87es GX/b,z#v̀,򁂳dUGĚ ._XʾRexFKF?Qe9WՒiɈ;%4TOAͩ?7ouLul: jy,1Yװf;ejdI+æ[:ʵQ1I]OpdttBW/3k5M`3VYq˴.bTS{F|0|v:bKt4A eZ|T-d=AFwG* D@uT,@C`7oQl;p#c@J)>?={ 2ߋUMuF~щ]"m:lKmf>wQ,>)Ѵ@sxZLV E'ǥ"'Os#iA;Q| IqԖn!}7<'lV F, 2qh/wb7 9?塇E !KMg97H'ȍ4yHy!|i "jU᱓h>.bA`mqݚ+ۉ{d2Bă95slGW>^zPn⻍S'S-~?fE'~`*I[n@ń~sv㸊kUW2.L+}Rc%&m~-s|lJ%ܦԖ@ݎ+䋬NEe?{?t"b٧ {ߠsNPY,zµz#)>s Q ЇϓLe^/? f lZ/).h2iؐ DSP?7+qjܲNQ7Dֳ%tlӦlԉ!ba&"g3,iLw"%.rWM<&%ґj:-]SӰJ%ze?S?Z h_]bJڢ iImOX"SȾ?hs#ǸV\H~G&iȘSc~au&yZ~8|srzkBㇲrچ}ܜnS˃#LIa~D2%/$OyRu83J;U4 R8J թ7G{^SI)(sa`lH 3;Y$rD7;_d}1X)E 66o蔮;â>M >vjtT.^p-EKs%9ͯ\:Їq" { 4=AFmn & ʬG9>R 6O1Ů(d(un )[ (\& D7A%եcߍ3>+WWU#O|JB ?jcxZM=)|$ p(Xwx%aY̢嚻<.xB|tlOj}qԎGjRVc9s֌jrpS*uO|HFz$pb|+LB/DgHtOJv^嚕6tD0Fo XX@iYJ_.X(K_˜U^/NHF|y]^#>{/<>W=~.]{Y8 ?YǙn\|oZP ܒSyجMܬ㱏,fԨ"T@5ûA"kG3@ҺS$D)IMzRt )ݸ۞e4*(gm ~'כ~_\qpݥq{Fy}mP_Ls2|A>GؔvgcNt͔Sy B;?|koU/]0VJ G1 ݃I!)QwE 7%sOJ.WgerXKD>&(|v,oӃV)置=CJ;uP`.鍂z舗S _p(\I),rs |jT3\{PxO)v_>]=C`{S"{VƔۆsQBV5*KP!}ʙ*qSB^Jw֏3?-v\.aô/+oӔs#ICն}>Bt9& zhVXYqC"i2-Vld endstream endobj 6020 0 obj << /Length1 1144 /Length2 1528 /Length3 0 /Length 2250 /Filter /FlateDecode >> stream xuSyQa"AXHx\dDg"B+1+|&WY#]AĆ#t rt&TA>Z4s:¢gBvP#X4L,SB ]3i̜!>@͝[q?,fδ6Ptw'alPXp+c62@gH4Lx`Ѹp;џb B;E`B !@5|SGa5 V ku^(o>H0fn_T06x)"o1WB;Blľ  îWALd3Ep?5wO-47˝dq\xӽsiiWsYw! 10uL 2)5,fμ87 `px.1"`P @7C0sN0aB0 Q̯4xf.=eςAp+P/AIg'ϐc0nYXm,Zn+t^fD6r)m`9o9L{c" j湥i0=gCT~Ф5EkcϝWFWO;T&#񺓛Qz|%1͏(u#%[҅S.x^Ѡ[ꨂJvU}E*&6޼d(۴dzt̬]ӣ뫻5S^ّX}Dkm60dx0t~zli^Kɚv󶞆{k'֩#%ILf=?x$6wjVurhu(237k<]iu4Mтָ'" ^&?S^PZo#fn=q-ޞ'IS 6Ɖg'v5+:+E-%F#/7삯O$1w_H\W8PAݓҨ@BT9>2hZJ?U7[qf*L&\꺪#oXl-Aih\Fѹw)}ʭDءx5{b 2+: M%w:~uxe[ؤ=j*/ާ z:V]q[e"Y)sa@&YDtd[~Lwp[:eMY1uX|ƹڪ~9qluL,a$+o[{$mr>[4|x~p7>Qi\XZT< 0\8e@<2}llDUޭ\Q=D-)p#1ve9k|U\3)J)}AؾގWuЉ<گ4kli3[}!FW7=81&A[%E R9etI犓%?Hd)g֍{}:drވ>~s@ҞhReQ? {#nq69WxKKԇn7r겜p=*VmI.xu$ #c|?M>ՙe:Y`{Yt2C eͺiۍ{6i8U捞5 K֭^]%+ ڍ#VE\~E"Pk~%lLs+ęyoj UVHF`iͶ8QO 6kKZ$M sSC] ąhv~B1Ja:`:>LcKRa-4&w([nR(UK}5*a㧬'R4>o R:`4V̷(2語rnxjo \s͓T҅ اPPhy`#qRãvEjA fR[SiNuC%eNy՝թsG9޷h{cdE>!Gm,)hi|-M7Q21dՈDZêhEm 쩒\h endstream endobj 6022 0 obj << /Length1 1626 /Length2 15159 /Length3 0 /Length 16009 /Filter /FlateDecode >> stream xڭct$.v*muJ:m۶m갓t줻}sƨ5gg5T5,̀Ҏ &6fV~#"΅@E%48:H@h`g!P$<]lAZ-u:06V_܁vN@_D ,m U]9e@t1٘ḿ@: ?2s\6Ӏ@\'W @s;7[:` Ws'oUUIYj pihOK8@@?̀ W';SϿ91`L],쀮ab3uodlE/6 W%3ߚ栿lX9KGnNt׀h$L-<@KeGߒ;DoEyo}Ohi7;;eS 71u}g7WWFV?}r ӿ#s+ +3뿍66@ U5er8XYçimc:X'R<*qW%iz:?ZQr_`=Llܼ&Nw/!>nVCYb7+ۿ:?k2ui<4G\s45H A:7k9gDUՇ2tz;?O^R a8b1*GμЉYQ܃f>>QS7*{!py p/ |vB3OoAkh(>NL329>6=!/J/,ilǝ LkIO~S9Ѣ+Kɲ;60C{T q2I5K2P##` Yd^b[AP' <\, 5 Vg`LP  ^B\%f~\ofY3p~0+(, T3/UpwR-nx4y|[^f0It_jq'Jɋv~n{"lae}ya)D<htQCUXd*%D\刬4MgH6rVhYk~`C]x 0$R]!3Oaeƀ8hܙv Fs 8d.te>lxUAntun7?JqƘ)n8r-iC!}OLNCj5!/6wτw Z YA5ONEG!/fC 1@ yVYtٖ֘&eoC~D;m{9:>U"o-/58/KOf)Fڲl"~úI= Ҿdm*x͏d{d0f>@ UӦL%omrdYX:2n,o igZ5d|J]|UjxLTjpu}gD~5( ֫P㬑S+ƵF0'LةƔxa3 ]n8x_Pp!qQ_g[/H lo^gv0N7b~'9/stAһ%kS(ҩdаRz@s#}ѰrJbsl⑺vۮI۳)r{L+kf_jHhd?I+Dہ[ӰahJe!˩4=]|搪0c}MjM,6J"~$Ge4#Jby?IYp17)9 A{YQbg'Ӄu<.|;nc,b6&q9b|0<\=MΈeS:1UE-Uٌ+c՘~;tխƩ=0S^FPϬ/0 wRʎ!|i1|ߵI{Vrh2ʺdܞ#% .LҌ'ԂU|uqͼ7fԋ1b".pmMnRYɿ8`=+:Y>)Uq^}xw;ɄtN8撥paCIڳJ&QQ:a(p ւ-Ȋ*BH2pq5XJh:TIhO@XwR1_N(X1+[.B`0Q+B1Ίs+N.k.P8콁Bͣa(7Lsea,j&a^[9# u UOnR^2~l֮-F5Z~f*L2P(HHM\2d|W|!3n]Z֍{oK+}Ћ' nHAȕڙфFC "!HN+6MB>Q{zQܳjشQ" V Q1xlRB(Uz9nmm%v4ͭ`T/VІA̶׏>-ƺqx\+mXCpq6W7iAEjvRA֜LLtnEJoYqއb^]S̽SL/9_FOHy{;1WK glXzT aRCURSL!W B!gl+#Nܨ_ V,{G(mc[W'ArUդKL-BA% l{C=<aRW -Pg}ƒslL!VMnį^d 䴋귦x_;֪(aa\2ln>P53@v{~O4I/Tg:}IFesăj + -V+Txu}!?iCr$keǑ FSRU&ە6Ly~wcU] y?_Z38 * E܌CſOu̎+=Cuc=*j`~V~gO<#[28JKe)CcAk^?;<'0Tk.E^ok}qQ6FE7 r8e &6Jjxɽа!]Ad9X!!}:5yo$_16c\[x̔b '53:6uy|#"GϾ}BHwݮ E01fH㒝C]\3S.}ֻ}Yz%!|'MZsCk#ٚo11-A~Pn;u]EG L=-q9nAU~g#lb]]"g׽Ά(# + ]nA [RRԋn 슔U%O ]6-SOOrJj`Vcw.O9;<.Tf2lC{Cу\y'wm3_AAfW Y^g`4F ,B,nR,Q8J˩{x$qS߷Ղ`(/FJ'.5v4R0 @4@9tıu:?8H90BWh2FǑmJR;~D?lchB1ܥp31 x)Rֹ E_H۟5uE%Jp}Wt5u{#( 'E4[tH"nn%w`*0]_H%NfAud #1!=Sy(0'#|J}(՟f3TqāyL$v*n#/.}C 4~iwD:̦m-~ϴ`*Us jdkʰuY}Πd ֯Ԡ]ZC(fʨp@!G[O?LsRԨC٬m'8p~䠊7d[ptC" HN,a@\R]\YAzvaۚnH:)ghޮ^%8":+l:|ЕqFn ئ󠘶޾->y:|@LE4IrpƶI 35 58r Go`9di)p./ZR &h7X.;% 㓋 H]'6k5+o\9u>g"eϖDž+h78.MTz$Qw: L'۪ֈ%S|dMH$twuE.[A9~ Uf7ue{H\ 8_8 1 g\㽙Nyƭs$:@;9{\s1`HQ,i}3eaS8fyu޲] O` wNM,X;Y4ET2-&!Bݍ>l;ގ ՜GFbm/s"f4IZ*T=su^r^! 5W"ah_1W׸6mVCL`]B B?ƊOGM,>?Ur8Z-~!*h53&sVF.ވ^6C 2Rm. ?H<՞_&A^zKĺl#b,ĉkmZzQ:F̻C 4҆eۈo=8;^3?E|Zcv.ٚb_.RNxW&R.=l' XHR0OƂ1i 7~=aU *žQPR ճ(2(,Ztca`T5M*}{/ښ$Urx~,h2Gs vDZ3qCֿ~· OsrȖ.'=7䰿mBs !36KE*S1P,Bٕ>Ѱ)3X}O\ç>l02-gKjU=a31jЬ݂Gg',Uhe" 3탠 9mX1jZIݚG,BoMҁD1W2Zg]ak?I~405d=$Pm@{Wq{{EҺj/Z1-I{c˩I H7I'=X,s :k13+T((2\* %c{ZӶUHf*ܴGGGR!1ew4ʚnY%!=0/#@*w~:pI+SO˜f(I54_6.v/'K༢x ,t&4xUbx509?r-kkC x8ü'h=;>W7g|wmW[#½kCȆH`(A@U^tc# סFhr.+62B5*W'W$ZʑA(9Nr"+) p 'ۛeᯰ%WnJ9U(Dr \bY2 $P`d2Zy4_H G ٶQaN? T< ;>"v֮x[_خB=feSP[< kڜU8΃(= @$!gFVV~O7,A9˸R LF N~i]+^IHB\Dt6aFR(-t#eA( EYK\!WIT-Z_)yQk#(E KR6GQ]cT@m#7ǂ[:F C{)lvUAOQgˮώta5ncRo}4 3N g=Yҋf.m31 0Ioܫ+;V1GzU~~ʃ[I,$J &w8̚w[eEL0(w2!+d%j 9--3fpEυ lɅ1&;w)7%db3ӁyÆ9jގ ykhQ̂G(Y_t,\㭩KAOމ/ E|94^-mr$I 63e6qmY=uG9fD(IDp{5T_'yqX,ljrS8w 8sg;rY6$>3a\QΧ?s_=8`{hHuǙ8q '`{ ixp2QyV;pQ﵉(*1EJVwf)مݪFrI4I' -*@_N$o_1X.kNG;t,fۭKBLRV$=]~X ?MHc }Q|IAl232} /'eX@x$:aj`]Qq&Z-ьuYT/ɴ9/yz|?~U~ɑz)]>Rbmm@%L1ʊ n $A\D)CIk!'!EM.qRj0Gߟ*Z:WD*a=g,'4hKVM6{'r0Ё `}A  ZH4#H`30X"Ӫr7dC;XN00Y4` +ug}kG'#U[QFl_QcbQ_:%OPA/6}0Ƌd7nȣ9hIN58$sS!]B"lyɃ葞5D#iXCjW ii?dU]Bls1k{䨣Ae+[u>mjWШ't 6v]9t#xwƬc(X `-ͨ#Emo 27Y^_Syd#P;05WkQt~\+p -g-; ~0Φ?KHŋTŶxT+Œ`ɚD H^h^G((7xX\}ŭq_)g͓&~45!2ba'l͹ R9&!ye] 0X@Tdie2M+mt0 N虺o0EHE3DUˮ.=fMoŇIP+/aq5r7@9ul?hW5is>Dz.h:lJ-MSMz/چUlA;/%! >D"7iAin1/i. Gp`rzft[JQZ_Мg}.3cXTu ^@ٴԼ0\OJ+̟'R~lE;gD2rec'zI[BJxMd*4/&"QzV'SM ߷ɹka6jւ|}Ge7>3j1R|_'g|51դp$!Y:Iޡ`s A^)k5/zƿo2q'V/T8`,cz8U70; vw| |&Z65cG xl6 '-ȞgB(a55`C^E+5|k57l2:wXSFDm~wl=fh9W1chb4b׭[+Sⲱjܭ-otDA#AQv&=gg{@4`] P5(hwǽ"REאo jqeM,]x zԌp9;xe@f!v=z؇dd'ԘO3R".U:ЖOʼz{ƣ%!0P@;c/`R:LiS)C=J56,7 $pr f&rrT*a#It[Ru.am:yoG*`]Kү w# 6[KkE(+bQZS"Weso+\yQi׀;c[r멄iޑB֘ 7!zPЦ TX $]~1)O]NC3Gr$ah֕0VVq;JĪ9]Aa&fGV maR0yKFelNZ[= @+XNV)*XI7;fL>)9>YK_[穀@7hS .+clTҬ_2DV8"z/G{ʳ?.p}ZiO1l=NLwzDu@]E 2}4yGr/T d nH}-T7ɤ. T-i=̗OgUNmW({Yp/,hѥr԰|![*dWGYI; Cϟcob5+M%Cb _'b{Lbf.`J6'T[;g׆\7Lq\#!/n]G0YZ%UhJ='' k9|YؾdyypC;!G:">YLX6tvM۾;TX;-C#Akhk79 ڧ.5᷒{@XCےɤ,F$/ވᄦ?}f3̕\MUEޭE/Q9>GtL/JlEB˚zW*E ^yܰIy\Mv)Q$w:a[N[. Li ժ(ALo^Xxt+SbeyЗց4j<DXu|/6H۾V,dyq9+/r`0-C5Q{س`{qM~(齀yw',VD6NFj(pg-YSC'ˉ% ͪgfYzcn#Q[[?AW!Y4Fb|6% g7du?Om AJ~>=UY*R E^;/gTCQ>"lvTچˀ!Ǩ/jV^Xw3]̚6kmdҩ9k~99"|Hk79?}ģAÂ}7ǻӨu2Ὸ^z`**wHf$xF*|ӷvA׍ gJQϊu$ 7@x+oe.'MNA.G$U$ܥ"I{88qg/l0&3W)VmbI}I?lEC <k LX>o02/<|]+W0zP<)pf]/l;GbC%ܱzj7@/FmiAGU'O@rv4QnBϡ Β,gxx=W)D *F]F1 38㕝 )x WYB828,(g<@}^⍹$ggbzT|k'PB!|lfV; r9 '*+jԉ3ܠ ~軟E>f\ 4+[["fy1Q21v7@chNlN%c@զPnca]3دoreQ<4Ȃy$ec)BLN_}&U6hӨș:c"DwiJلt'~¥'\7p>QJPpwd8ǚ?㬙֮YWY l8/l? WFZ*+r?1zL{erX\{6)e+U ;-?^]N3 .;*diBl& ?6<}J6Gr])-ғmL{^hPu4vfN!C/ @R==McF 事Dws e{UTf`{Wo=Ř'Ckʵ;@<5Jo4M~C ~7n2xW5`C]+曝 y&ݨ}@AE}FG .dAf+y 'aa-F ee_L`iMr# [#|ggc:p+rUЈo%up֛Q1ų{0uREVAzp㠻 2J ߫,-|崟ghkb9T=-Hbpd#RÀD}lSTo"Y7 |S=z~zp4kXGuݷj6ÊhwHc ۾J ߜE\?(qw녔8?rz!c;9/q[?BadZ\TK ^$&<1/d+50hJv0&/R@`-@\pX8YP'y̐=ve?*XzOzq55e[q exk%W7DFZ|P zw0 +>9n+Ƣ}* ~46*p#Ju0h g ;+&Ltwuqt9jzS%z9~}4(.~QW]2ˆBPLS`WpYߚe95-m9gn/%- BO;7&E%$&56d͉SwI oP!9g2iMM!z<ͯ%KUL 64GR"NF!-rT˲쨬jy#!SM0AOKzFQ%<9/yVacシVLqƫDtn./eKi_Lܦ^_$nGbu"Mn6~,cm\: W&㺜/ߗ/0i;MIٖISfٟC?289NDt~B:䰥\09hi^6˛Sc&Mƾ""ee#%װ}Jw0v KbOn!ӫA4U@@YxĪ˞3a8hE 8ȨK%w'0쥭8yU#:{s?njwuqi|ƊM@G-^|u#94g3;|PmB' OQq>ܴGsx*4my{4GHH#eMYU$>lxGGz\OC*HuОB_wᦚ4=Ei4D~S2' VoU8W^V'2TK=bm:Jyy, n挅, q5dW/?~%4eJ;E_p%&Q0[r CNzDb$a~ET.%D?w5 e$NN[Rz{bJun5[= cUoEAOJqXy\UYƭ— }>p#W\sX[LTiƑtGnUlKyN*Ifsv2>+*:F{\:1אZR65Ti=ӒB)P*G*~ CQ=lsyMSce (ڰ[R)ZP$GUG#ƶq}ԔGN =9%A [ԇ` endstream endobj 6024 0 obj << /Length1 1630 /Length2 20235 /Length3 0 /Length 21089 /Filter /FlateDecode >> stream xڬcto&vvlvl۶wl'Îm۶ӱm;y̙uޙ/3^wW}Uu]km %Ua3+ #3@EN[AhkguZ9؋y@3 `憣:8z9[YXU4iO?<oXY(~m!/WK UTҖVPK*$@gc[)@h;8l}:؛YS _,a1hjtrq rX8ۻ큫"7/+oV%1t4v'_7o?% t' `fhk7_0GgpsOg-/_umu_Q ֜oNS׿-i{s fns:A _f^39ߔ;DoEy/=Wh 7[[cd @Ϣ5v1?њ?Im_imr)YZ̍m_vu{3=j+,Lm.\Ϥ "NY T;j^jOw.+7+&忀X,ol [73˿<q{SFGO?nS7g"k?{ h `bZ;2);>XڨVTP]i^4tq(C{4ևeK՛'%/@ݢ; b2(ELЌ~(q7bPE8&̽ 4! 2jpoC|8X ^cls$W/#FOWwNVDuoe7b]toD_ B&qaE 3j2] /3P;#?幫a: 4H }i2z\X gf,Z۰ g7[bdΧO5tЪc{"Hq43?b}HK\j@DK%J~R"W<{;xOTYe:OU&Vprnv|ƽzH^d6N|dKB\*ϼ%T,YզdlL QbmI/lK§EԉZğޥ1n4;}qiVe%7G#Gb"%TS ~A)ZB-.HT;E6dEP)Wy5CI?wrmw#[l7oBUU}0Ő^?܁oÇŭ+#p: P}h-S0ROE-a$ Ltv@3g-=WąnBsV^.hwߵVL[yc39UCS#SߥLAt%ix V<%)|C뿠'9ѕtbV=s5&)'H݄ܩiFdoBf?Rt+b*:ț!ÃI ftF$܇.`xPC[X;}%6S}jG5A5n43eKpWxleaIT]yag$iG#VۻvuY -a@t9 ѐ"b؋߬LKX !45w4fnvJב{=l7ҡ:zNW0DNd饂u1oJ%x0r,0Pp~i$2r%J ߸-L&-:^ɜDD1.“gp Xr";ܰ3UiVɴL*CNJh iLsD˵6M9 Jk#[ʕiM:K^ϴx&iOob̙n.DŽ)Xk˜NE%:{dDZgKayf"ĽD54{ɦЪqP(QKZGص#ؾޙ$e[6(l8Rh|'ۆ9o4+Əc 3Y"2όH򟆾}ku/,p5ezIsG p^\vu fmE"PdFG4A "wlm/urH8>Pz& )͹'ƬH(=4d]qݨB:%E>=A&XM?zZ*f-#^bzz2{E6 [x.oDk-ŵ('x4Vʤ˃=tsrO@-$ m&d":)ѝU;pOKǽ wC69# oVqHXR?+~ZW߈XH9FT?^. zmͩo@xk|fF9I%TOӏ%  w48'`7L%0Y@ ʦcYC_ż:Qpz/0k>8.Hv6%e^DŬ&j2$~`7o#NT]E?7Pj>g]|OsŔG{~xCb 6T &l^o-2zlkys/=ZA>%o q ĉ!(cYh> cV @ҼMp'f_~l]t~^>Mt=oD.ϒ00Y۷;4aC=Mn9ɔ4)xO‰/'6S.T ӡw,}hr~gѵ=, ߩBBcI?X]DnY )9}fӪhʨ 6%< *F\}قWK;4X`#$7eKk#oZL+*@̽@#fs"iDMbH5 y9z2"wQ c$W2L+ykd{2ȋRn:2e$ @$BzC0WrС04>2vfY&n9 镠{O+nsy7|`\ h2 *ZI9pA>py?R: A<4ۯԟ Duh  >TefYm;J$;O*;鯭^\Z2Gjt㳴Mw^Qd?u5!i2wtrTC +.0e_i3xm3pvԢ~+ku`ϡΊmQ=j0Ҡ.Bǜº-u]bqk;fP,rgNm>G-ȻLe+Jl<1V n W!2;u9ݦ4͆!}cYǙR&fb gKcr ~}ș ZA;`t}E`6oEe99Ì^!?;oߌ( _jێ2m2QqJ|M3[i 'E1ajZdҧN<@*S~bEπ3?s4|clp`s%.nUDs+}5NH!+7i'a;*%:߮(`XB:&lʏ7Ah͖v ZTaH  " dTI#0œU,>zO]ssd|y !A=z#m7:dYIڠg\vՀ%I}+ve}W$+5>~UV,AeNoB4Rȇ*Db\u3nd꟮瘲= R-&ҶĘ'‡=F6GBi%iNm5'%srW;Q!U!I qEAA˛rkZQ"FZrލh,k I0NJ va5`n|{`sQ,{%$]gq/=*H ] (oU]1P(>WdBj6AC8A ҳP"5 f+P;\V}W1Ĥ!+3V|(R,I;-fŧI4bSWL##wIF9CI(FW ekᚠUPq$6Oޕ]W2b;D+n'QTnjU^"*׹!3HT9dl|vf;T#e kA1)=FjLwr9|j6C$}\HWhw $u>2nQtO[ d\8uݞ^tmt6A:h&tz|%~Iwb&؏8&ylSp|4ɅGTDh AdYnSUobQg7GSezTBZpIj9"!t8nk*"'T1"jji3( H| ro{׀ iѓ"'I =|t} !# ÚƊNZWu~`9*7~ٗ|O[em;\m\WIz/0Ƴwr4@?[WT[x;z]ΫGcfTS^wq3;uIK/l{|$ ?a$QhECB~uyDGDW蹾-kEl4WJ.UqOogNYB Ty _2y7WܴUir +H45Mͬ15]oAgf]o;,Ĝ@ZӼϾsW=gfWF+;ԕib9CO:=%5Xu[ᚄĆ m C4֢(miڭ s_&vg ]0nPr$V}Ĉ1d+ 3 ǩ,id8,{qt+kR_fgC97} M^¦[ ׬<[Ld3zl)i\rMN14jA[CEez}AMpW.'Wo"Q85$x^f 8c?ӡ+Qjr(b۬wJhK_ISP{_j!t66/#qeFM+eg2 pkZQZ.j5.IN*b1,z?.)]T !(2p۵!H ~ez-`X8(g;PF5X8]qY5;oYs)T\2]zVݷ-R>2A^|};lT%#u2h[ᡇUloꠗ#%킧Ds!5yW/$ft|(V(IgBGOزeK_o5ڜ2(xRP4 ]&P GoS7`г%)V'z`*ЉW/^> BB[%g=W}QF B2}TŹ(B&DM8˸| {ICaJ=%8fyg)=:F/8i]2dA ౪8 `6u%Í_őJQ6hWY1EfY;!)QqkEt ci@TM jnNM ^̥FУ W' ^70$\ .F5Zؖbe&dF0egD2,Ku~ìm* 1n־jKpN7=Vk͜y/WQe/Fr:;:K A5mal19Zx׵κܒ2K:7xj 'hqoN Li:q¶.B,2ʔWxwjF0ݯ!>js^2'C2c/c!PS8L7k3ʉZޅ#rh(< vn@ ;gޚ͘kDa}?6/?:習jỖ3cBȭ}G[7``fɖ#jUN@!l8{88Z%+N÷r;@~1p )EJ 9"{'bԼ=^}cɅ1X*]-[JKdrZkXPh"hs Y9xCgWĐbwA{Uf*$=_ZDQe`TMSTˊNYJ*~z?gHPGr~wk3C!ǡ&2sԞ۷}|cP7Q̍qh?=N۶{F's?KM- {Tv j ^;  &05D C)1S|N Ck ;mn'{73IOpI/A_ D]Tdry .yVqw b-<{&ZnUH?YG[0 k!PD_v`"#m$HlUx8"J$.6) |ff~LM2v"7I:"ؚwQIoeO^ri~Ze*8O> ~ɰ /ڕG-cz6O*Kcfh.WO&!]* a(b=^B>˓H+wmPgʣ9Ra3bD~X=$QJ's@ܷ)sl^Hg"AVۼC̖OL3i]Ft'-ҌCˋ/E*ANޘxzuI"hP?3cFNy>mA.M5cgD!o2ƚM,&ކUEu\~_+ ۯ y? EWD' 98*VRnxSFˆ53}uӊJ(Mm-/ۘ}S|ǾxI٫I32_m\ ˍ Һ5~HYaW|ќL.DO7|I|`vJ{ V(21]i!M3{fmL)Ⱥ^QdY;`q$HpS&q mCzHaR%d$%Q2cO譊k.…S x:jF~ nWֆU4"m(aW+̬l1 k ]YsqvmC/0%pԋCgTqtHoFus5eal?^ϮA+yD'(kKQZbp \ ϻC!:Ŋ((&66 qWCրcC<=B$5&MDjظbStQs8{f?Ra2Y!rr=tGG zȥXܤfqk-V^ J7yl B8\PVUG*#'>*kHJ ; b /!ȫ'C=*wmeĆ㢽WKۤljꊈ@-:PoŚ6Id1P@繭]:!ćH1[6"zRkB>s^7=]g,21iCW~p'ZSj#$PgpU3ra9  iE9\Sl*{b4HOG]]E`mUAGc+?Oڰiocg[vjޖ$tN%kIqjId8e@x@,ߕH{!$;"]xni{[jTÁۆ.m٤BnE-GJDBx7r;=8V;z2}/iB"?|'rK5ŗ=+^)+*F`COO 9M $7)1?*g.m-BQ_!M\sa ,f&Eۯc?BxԄ#v7 @Ndƕ.+J,п[}k`݂Ep.e6Cso*sxlO)xz3=za z$w ¼n]jW!Xu%OD\m~`VF?n=^}T{č5Ѹ[t_|5S"a'ZnF|`ms^lȬKaYA-fIU]¾ΰ<cXG+_}%̊{Dn44eŸIJ P4ȃvUi1#y z)) p=#{+OW<~w03h Hb+xj'*PG%djpٺjM@ _5Kzq)dPI|sM\>Q6H^0нwSdCl+d!.Qw\:VgUyT;ey3uхOT$? =]bҹWw)bӑ>#C1ߑ[7 rPceѽMT~snyF7weۘrĠA26|A# 7luF]8+<6qK h$WӈЊ%ζ_MZU Jul%.5Hv]O3Ha|rq֍ok4 DF@gYѪîm$] X#G>]KwN)̐Ͼtc%I/:cXscECI_*tvOl9 ,DuA&{veOp)-;Y0C{<#?@e6@JЙo^_0zFeW姚,D:lZi)wv4#e?7Oӆiu[aAΗVF8 NsЇW l69nQW 玷mhQ KF(G>blc2ZZw%t4 9ƞ&eVz%E%l]h-ʙ\#k~r<{R wqn VTMhx3~\:9]]}NLv8 K@IϷ.babJˏioXJvkD͖_S(#&BE(xWGi Zip'*\*ү9r] ?ſewi[rRvm9lIق>CFCкzm k *H65hIn#\ix UR)@G{đN),5aS ۄoJS[Eτ"1)DnבhCmMˆ/vp q'vQ@-mLw9Cm_}>:6C iVc3JlƣS^Y.M`$"qj[Z1|G~lT^R~'L eY8PN=LE>ͱ޷ |#vZ :\h #ľwPU܊b|6%U\۟<"D`ftT6GCFCu5L+Ti^rߩ P6P˩9;nyddjV1(VӬ0p[BSC:lO>C sEE=#wcvxV/T@]u~$g#V#rO%;+ԁK#ۭK}~'~YׂGmBzs,%TvڃE}͜+w>iYfjy!aHlsn"^~\&\aHN%UzsIm[6 < ૣ ehS6)`nN; lYOG5%BX'tuF)la eW/̓ !(QD[&#+jO2>/bJ1cR ;X)v΅m}(p'L_mP{<%ON8޵)Fr*>YݛU^,ioDqHhIẍ́K&g4Zkc5AZfxԒ1 ~.[k3kLǤ#fP0#ӑ7.e~,hsPV =AVaޮ’֔g/y^|m#WMSr9enD;oJAb֋C EZ~ڸɜTQLSunwQj/*vyƮNIpLK۱فDu _Ku{bnd.{-Lc׾ = MFR#>&Qe!-i^# N7߁ q|fbprs."&藮5S[>ɷgj)L/^a3B_jVNV丌.)\`:P"JkX h2 A5o׶];oJ}bT~1%ۈlԡ̷ lbOz):B?gz^{&x#ID(XdT9 ,gusu'D_$d?ENDhȞZ0Nqߦ@^"]x<՞ (hc_DC18֠Dxfo]y$`[E-9'N#%r뛭uH*.\nK+f4x5= ~o"5jZr7ǚEKh5jSmVZGf& E'J{ X9A{?6wї9XQFc۝9ݾ4[TPb'ncoeV.> f;-#/zz,>FiҲmUbCDC05'FwMDMOaMé$ʂKYڇ2 [o7;h1O@cȴ%x֚()f ܵorO"$8Y*vwħ7RfkʙMY2&*)*0E=Hse 8i\n %60l7Wr7siCSsMY^T}QK$5ɤ mNb*ÅRKRFsÇka("pzL:\?sUo/G~3Ũ ru˃1k/kv1O `D ʗaU~'/گF՜MR~tQ{2ْi&›tTDEi6C/ G)ң 11KÜG?a rn{lzc38""3l[  >-kIӰ 1D78t`6[y:m "d1?;f{h vIF*UU~& ?P?Iǐ֒BZoomU5 ˗]V]IZd?;VMYyςDYb>j|p`::?0ᴇ5mX5:'9a4f4k^Mװ(Olxic . PcF]B)w42@K 4AR)"m#  fnAD2WLZ97 E{z65(bo!ZDdvQsHzuƐZa$%:L(vTGT|F6lH^idƒY:T廭TSX .+ Pc n#=I@_7EuE@Z9g ;/'Ǟ Jw,u,.pӐXy) (ЭdIB3NA(F4+njbRN߄Eʧtm~4Ě&xwǸaDBmzVGK||uO* !g%ד`|frIKƟ;di: ]C_m/_*b (K!vb'uxsW"X?RQ*153x%`N%ք]l$Jpgm_GV0 tCզBodž) 6"E_3eoblBN86=`AppUCBqfqRv9܎{o!8r@xl`ȫ!aWUB*,GbL:RnkR ˔&!k( /Ӄ*'bq9,rD}`8~R-kz( @78G=Y o1Ԍ>ܸ,$bFsIe\VF1V*p|53F#d3qEyXVyQT-BrOk0{4E}m{&m.DR֐? 1tJg?"OˆO3zS #Bq@]UDcSZT A~Vi^V]c=G)G G]ombV r]t(g9$QMB1$7]LǗ2\*Ia#373u+< {GɄtȟ]N(Od?gRFWk7/SQuev+/MP`Ӛqf 7&rZ*N5(X;d[P6 H_QQzsYP5E/\@O",ߋ1 7:(82{أJca\;H`1wF3x% 9< ob*ऍ8wZHC.T |-! HhI~&:05Yimg>#tf܂<bʻ+&Fv-pւ1 &ƝmwX'yP h8E "nZ3+1d'DEŲZejԒ"Ǐ'4  b)ڿLؔW(1)?Z*OImaVNH*Mv- ܴя.IWG}P_3nxɵyPl ~.%9fXWՅX c_!p r^;ajmMBbAUS]d>8KHәQIK / y?ƿk[i鹓Pz@n\39`ki+>I rKG@/ {%)^A|ַT%Nnߑߟ %x' # uts#BO endstream endobj 6026 0 obj << /Length1 1644 /Length2 13018 /Length3 0 /Length 13871 /Filter /FlateDecode >> stream xڭxcx$\el۩ضmWl'v:VǶmwұmI{fUu:{m}SOQ(29%]Xy Vv&n.*v r *@ 7Bhj`/f hb@S++ lea VWѤ/+@>=],_܁v@{OgGU j [J jIu$ll Pr32Y]4sgS{30~r .@SO7)/Dp:Y|~X,]?{7u3ោ>o}bdJ..VϨJbolO`yoI`4 7 `fhk4\-+z3I;U'ongV.@[sF8Ϙ-Ί_v37܁4|&al`o01)8~P'-'k<n v\2=]4okL-0p3q1s r)YZ̍m?] lkeX S2+ ٿW)?50HKKAs Y889j^@)`"" ,\V.7+!?D,u7vu2323|d? 7u0;Hf+=p ߭S3\ks&tzXK Sö+^kx[OdhGzlyd4tߘ JN5^mBp0koO(BNq| s pD3M@iA-89L8z|쾆çˎ5K:!It2rk0}|vtl֬FMPYp#&BgI;Bd! ,`+qZ/@O<&:&cPױ }ırV/jc JVȾA-$50g&̅?V'H!.)T0Z-TmKR_u0orq,$mB{R#Çވ%R>i]9; -ӉZ!{K7Ujݮ'J:6ɠ[+UoAþFm!msRYX\?$79Fz# 6D^ %z^bg Z4Q{C& fcj-zoN$yq;Xm5h0  VHJ ]Xw9UdDoz&bk969MCe&,41&LEWTn/qTxTwQhFuWIUh KeUۡ{ֹh<{9b+}QBC>/SE L{YŠDKC"CnՓC{O+,(aU ^k:X3BcYPŪ&!yD8 Jeuķ^.l4ܚ.R_t ~Ɯi:;V"cN( 즖 1M.6_N< Uئ,H цR_ȷ=e݇CzN"rwyeG E:y}͌sQY`;᱄0VzZ~h]8*cؓ0ƳY054 q8cpb1+eI+Mm // CgA ±Ns+<{+8|+@@FܣwWTb&o` |s!Ь IٕByaEq+}cG?M.&!w耑 >0r!`eRf΋vuN7/P Ei e9aNFF]VdMnE_S:>e{$ 4Cqb=Z>FYB՗jUg'+jJTuu FHiP%K3Zȯ(FnL Mq> (=BwX%9ZV?ɀmG"94jXf ͕]Am2"},~.A79DBFg=(q Uq|к%H2y /\n"Imc^:Kn3f{5[9qs)\zjvc럊C/SR LgEo#麱.}}՛߅-x·{f`Tw 7Ξ||5胇aD)d{c3;r1O0coj/^d ޜQk+r8|242ȴїх3늶2\rmR*3G- *V]֚c|KNX\ֵxi&).M )Ğ'i""̇B]VSPV8QǬ;;G<+U]&5]D3ޠTKХSsJH7k,"yb(ACLD[zĹ=9z9^!gį+zBn=Y/%J I&Z̕u?aS8،52(ɡ6LOY" p#aa-ynJRUuE*jHsӳ:BC.uӞAs CS7$?G>ah zZ5_ZI)Gu%IƼIL2W'!r!eH,YJ:+*^ 3KȥPn׺:IX u cRjLņ`h甼uW5ːKDBf>.0(#73 Z;uN,+ǣ:K3Z ^hy2a Jyf62`JGwV-U/:X,P|QC/,=$\GDUp0KZ[76ǤGO՘B`©%$>T&=p[!XjԬn}nv{0`Bec|Ya*w<}}2dkXFW%8Ew~0c{9峡;mҘkpF$k-i+,/ݣ E/Y.[w?XLwIVI_,%l7ϧgn)zsLJm(o+Kg)~!e^yF8CH :)ģ"]Xvڈ҈(*Z-Etr~"4l!'w`!%>OT(&nk$_"87@F}%ݸ8Jjyrի{)wklof,*JF6=嗈],"pn^Q X{])y^g'lKxHc-Hvn)gNcT y-)KRIHƽ l1rD/$6Pѳ5dNQ7?|6s1]ޫ"9ލ;Uѐ˅|Dwk;yrU:+}Ue_Hc' |ű m梋ִVl/|l0z"-I}NMTȟ} 8 g[1 N9/5L+*KPvY`*B>(.e٭)̜Ͻ:oO [n<3t!` Wg𢡬s s뷓[S4U(m#5%u̓us%_PQuЪ^(ئ龝#% }tHkA6fTc(wiؖe,þb("'wښY"5២ԑ| WD.M'wF< 491<-QGxbʡ5𞮤։@Ȕapx|Hx+oswSUâYn_eNq?" &&gAŷ1 >oN=B3~7DŎ :Ql3/d5Z>VTU8cM@% BM3JdP# 긣TIW౮<4 |Г, Z¨j"g\p!P{a^.R~%L$mzV;Ҳ5^}w>P \;pdLtsIc,;Nn.,z(|*%f@ŗ|v%s A+0:hIiY\?ܦs/= U C-r(>4>%?V sa+*o` Pq[Եd7~ɾ'nxUv`ZSrW2`~a#^9:c`ېhP[_T)_0"l$ fͮ%#܏oAQ̑h`8@[^d @M i38PL}#.ұ<^gV_Gp^ D>>gz8/gy(RTc4G؏(9La?-q}6UʥӔFCJ } =w_&!߄YBR9-'nByS)&`o ^W/fvvΚT^Y-PY(}/1+)˳_^ڛdPDGa5;˅f﷮O)&bqE+ր:MԳ N'z UPm($Ip5͸elY JvCNGUޱZ} uɘ؃"ֹz4fXScC,eW=vZbS-.l/?ܑC8m[pWf 5S2|Em3*w m 伷O@@AΠ@j05jnV2^?p$wgJL/T]?MS$bC2yֿ98ՔownQg(օ4R̓5oHXUm?lKxs2 j={kZ5tß`|k~:(ň[4"Y6hZF@V)uln~Jp НKkC߮,JgfC 0DsDn9MJH(i'ܦ,@àΫ T}M =NIGӬu$V  Bj`3+HB~t@{9&ҽr(Qaj/1(xNu?xQS4[L41wMugþXbVFF[~,M( M 7 ch10V?'3XV;'qN͔/ƻ#|$Dׂh󄌦r<*YYbS a[_@W7Ɨ;?WkyMjQ_HS Ŕ[k턭9_%[ ߵ 5_6RTTʾඛ.Mf;G@9ճmAw/c(.Iw}_ ,qe̤;B~Y!4.nc&Lbw?6$@Ǘ:q9? e+n%;!c^K-eXR !-fsBJC q2 9*A{'W._ V7Qզ̍F*kT[]wr&o͌/Idǩ+ ܾڬAQ0ﱩ#,l}@ŲAAKIX\goS.5& =_{rxON+k3t7e5mm=hX&zJRl|zn3rkr`<CbwoEeXi?/ uS9P1s荔W+4,@OՍpEOOZ-8}`o :f6w0P۞B@DPmfJO3'TB c&k/lʥc2ЈΊf7h?(['C텞_oE 裏¯HtO`tPlnt| 5!$y=g-b >|+ŚfTpnk);Odl5W wg #sh#hR7]&v[m6F=~xÓ4HY hOpk wŨG2Y{,㝠424f0o!nˏ=-1[S̻i:=Mk$ËFZt4A bȅ|_jɌ-UHųnʍ.5DM=Sʓ[ʂyT)X&u='ߓy ֜"L'/ɛ5+h`w3eqn!DMPF]$dlWjT7hՏ($vqW Y#CXm9x^p)S<}Hu*xZ.RIR Kr8х 9Pb!<4gH:#]phGML49N:V;5*~>CSg'!0~lY1ӑ֭~~^Rq[#NTGa"z %P6fLW{DS[Q +O4z]0]>X3Samn7=CU DswI|Fnx0vҫ sbv=pIwevҟs5- LM~R;PҢ+εgZRIF[\qwޑ$؊IR`[~e,g}kCgJ ]>>s 3Ý{WKѮH/T7b@. /쿋%*KD&dpӾ]2m1>" X|h#kb=%B  n'Xr?Ԕb9mߥI#N#e08v߯hH9B4 Ӵ#W_|7'zԎ::rQSwwl0n+PlUS6jRpQXDp8}z19QGn-ʟ $~?%Ցy_0{!| em1g0*Lv.JLwWڴύ/{P$F#4d8re0+:Щ*ã䰑Pf@V^}R(RCh TҊq]m|/vAHtGPh~Pa:MrZukP/K#$.nk;8g8ATܭø\@z Q~!ăfoQ¬[=\ g (SqCפ/'0"Z%DO=mqT=_&jmu`Y]of)0QVu_ƚl,NWwSutHguTJN65B4L_ۜOw'j*T-qG^9Ea{ĩ9*C$SXv7mQډ~SȯZ YE|'t\B> ذ?#t1nFYT~ɽl#@L̡ cbLSYxK 7FMp Rjݩ79䤠3Qߔz7Mq?g( DrX~bE[ҭ;\19Ѿw <,ǩ\ A3";wWO{Dsz} ]J3L#짐L{WNkgyF+0ga-_F} *gKxauD"x*00Kq J =xC`Ѫ~ d^5G=㰽rpcdOKU(3yPDdYOo^^1R Ei\Oj .UMP[-*U5i@ DMđ?km`(֋n-rJI6P\Y)zI9;:VXbFߴ V-|Y{iu`2 H٭CE\8ꍇNO.ѷ avɌNISkZ0uTe~!saR5Ee4RK'"$W,eh >f+ Oq (S?$rUϰ$ toh~}v[f*!3 ңw3V=R"k^&U)=#!Y9y(g`3KL=J綿;<@ o!h6y| L6?J+V2U{"uQ uZ>qhgP7M#H+#AWd2ԥ_HpDRa*Mh5綈^S9SKZgpAo"n`Ԥ9MHJ *o6 .R^mMΐEJc7,3 1Ta:GI*쯙vj籺B>w+ ,]B:4I^b@rMO~]Gk\:))bGXq930sehmK.[ ]M-=l3f*WN /$nuoզ¡f,ux=0O+NRRqE`fUbߧZ EǮ/ -jlí?mbByد?5XѭGfdhfe&/W {BJ;a>rȎAGFY;n4UVKuunǂ2EF+C op<WJ\S ӓ38Rx?Q`Dž?^K<lEf{`u"5^WRlO[2my{trB,i&!'<mgy1b%6l_ce0u+e`S?l:؄|aXRuiEؒ""Ziw%-ggZ(Ji1:/2I[|u/ӊ#!I밸%v!#aG=sݰ߈< 2-f-i$3~ZGg<ח3QB&}&,k}5[$C,̀zЩ/aMQ >D'/cfwwKA#J u@ֲ0dqLJg]6|Q{=Y޾lQeQmRrp! y$wCfpםtRu\i(5EYMi~{ŸcDk)p!hp$p3۔tU 2G84.\}^H0 ^Wu'<)凕wFcѸhۼzw9t"7rT?4^'8ߥ͞cdC|lҏ 0Yn^t9k," B㌙< _&T]|աkZJaU- j`^r=?dI5/{653},\W󶆰NgkZ)'Zjoe dGVMs{,3i2')"hM X7'IaT n/$>ㆷhUP#h`zJ:{΁e,cnP;*j35+8 /x(ld6$ uTwusn!68 'x*{ b kM[GӨ6c̯E٧UawfFSl',_U/>sj\"qf r@/ِg&] :ojNHxS f5ɩH,q)P/nH5V6$NblhsoS?f~ʃﰨ8PC endstream endobj 6028 0 obj << /Length1 1647 /Length2 16960 /Length3 0 /Length 17821 /Filter /FlateDecode >> stream xڬcx_&mvcul۶c۶m۶::63g\}[uWUZEFL+hbguecY9;*q*͜% 1VX22a 4LLFNNNX2RUIZq7@hmgouK ́S k @X^ASRN@). 6ldma 0:vv&HK`p[ ~63C[wd5v6vSd`/L 7ؿt27t'_`g:Z:nN2L, Kf`2-l5/_n?o[+_^ 'G),#ߜNsY3/vFMs:(EZLrvNS(TA[j=_Ŝ m 15?±0XX:UO ;_hYXmpp(X8L ^޿쪶&@k [_u bnale&z!9q9!YöߩpRqg&uY;uJH I ebg>GN&"bϳ@?~y/4v&̑_`cgk?z@vmΘ;2-3ݩ+wdRD{l$ؾQƮ7-l6i}c_`Ӛ7xMB_򛼓 ^!L=jQf\A`wRQIʏĥ8! AHFٔ~r-]gZ -m[oH S+cKM]V7vy:lp5eb[ zLuM. @'Sn^"TϨ qf%p^%ZτZ6'<\`N}x1b_LVl mXM)e>.:^c#DI u͟ߌwx"rjˆ/SW;h$)R31y:ANAiź)EFNی A{ۯЉ˺"p~]|滚AmV^}ԈL V7dOOɮsDKX#QDT7 6c|%-X䄦z *LJ%GW:UI-zݫqje0iAӌ;6ON@N些7}͏(U_qcd؝oCQfG:"KձYa.2O"#^V[p:6Mx\A[]_O7[{>_ɮkRBH /TxhBC :p!̀wt8lP;}ZNPNVS÷wZab]^| $.9=IV['1k5 ';d"nbɞ2M^~7GU%)3 h-w/$[zUhBm)sֆX1NM eD[T7+<NZ^VC3W/OR䩹#Kq ?BPUw=Q#$2O.+qSt踩Qn:+;teF! 1WB"M{te7 h|\p'u2Ү*.yA> 3:W Jta]@aȴ@^HJn/Oh箳c ^+m:248>ct؟c,r?Ǻ( I]V-w\*$"kQ^)p(ھQ$3?KN, *~i"֔y_sғ!X.Nڔ0 Ϩ!vhʬDծ%楥fyEݝݵi_X/Y0>/8q ¡)]e@HjIhSɚq-E5W-Jiᴃ`J-kUA)IK%\+t+JDSͤncYx8|7RΑRdJ2r(X31t$%)!_Qocùpl`띈[f/ƬU]b-3PUelYO!Ybz~$6B @%#Um)iG-Q[xt]hX'H ƷN-F+A~i52޵yw2n';;k{{ #e+\x~N;Z%dF5cFYsJHpS Ѝ1\s PO<ٸod_![B!t -Pra.z$݉mε6OPN*!jmRKalj JЫ\c>uv"6| }u,Gk ]݀c\|ޠ@M#9^7QnUSx/twe02/ e/i.<6sǵ. RMG\fa*uJ!~ڢVl Rސ%Zיk3ɔ\!E2ڴk8J6̔^h&.\ 6s%ROaǯ1iP@C673TnWVu;7xs} ;ߴ)pr> ̍a72rOMޞɫebՋUKvG}*! hiOR^d9U\.Vk,Gvϸy>eպ=3` ["JW(mkɻB7[KǸ?qpݠr;"}9R 4\b9rD3OzXo[ơK:Ϊ|Lw˹-? vZt/R~ի0Zܿ3V ];f[b%JBAve}1ߟBD@|rjkxIlka>ȇNB "P!R4D=j.ub[YHc`('plpI9^u~ jZzFC~m /Mpd/ ?G&Ssxm\R[J^6nS]iOLayY|x%O"(GZy_RϠq.Whi- l_H>.+[4d({,hP{MHZ¦珼^iXDnuzxE LTeDә^,Zד'|qМ)UgK37"~;Ϧ߃%G5uk{e@{ |xJJ̰G)7pb# `:/蒁T{Au\zUDIc5*i֝xm *z;\*z:ٰYI)`#{`yQ?=gHMq I~w]ޙ8Vw=V]LB-==wwrS.) ߣүλm#5G6w1"R5 }wµ5Cˁhq)~;\*2&]\)٭ղ"XE{<c~vK`5V~W1HR~˫ :FՊX;_SɥMsyQ(q!g p#$ ;*ˆEWf+ǎ-vըNKQWԂzHJzH 7QV[wzκ/UùV#]nF_֓gz~{\8Hoe (Tq7$3\O?9`!w7f'gNlSzTu0jUVii,֞pD DGt_\TqckW5IfyfFY_ o *v|R&r#}V4i6K~#eH:Κu#4~ضaޔr"l\@CiC珠k>~;L*YG{o2b_7%l k^RrI8.~,[>T>1ĀÜ}E$。>7D^zM!jp7 aힿ5"5F-j_ u}j1AB*u0gtmj^/}\o5tXEO#zۃD "xZظ-nº4s7Dꉅ /L@ U}fHg7@|)rڱpLJ5˳^^hO۩#r٨2dT$|4Wmc7d79㎲WED;_s\inӦL> &ݲhQBRYFo6c!84Q{Hcs 'Ӻmd}rXd^a~>Wcp?^͜ltg1dRU^Wc0$H6^3X~f@s6#AH/2s k?1q.sfx.|CB><ݳ3GPEC{m)|hOhx] Un[teЮm(C RqB,[2Xm7Tc>?.|~$ew?T֊1!kB[= Sdz ڂOPD~I몯8K4,7 Eʥ5S7n״+kЛKPslqqvݘ#D_ 1>*bK:*l˪(@m!ie,\gVAטyI},"etD'"Lͬv'SƎŶلME =Eɭm^92s'=-fqEk&*Jm2ixs|5gܽL׼h /;kUEtYL,]1Y0ya,e 0tQ;"aQ;V? 4ƣFEhS裱_bF.rCPPR:'[ 2M!mhYa[)s e^&_ %0EvGy`[&f[2BkSоBo>msӄ(ckSѿ38Y'w:G~e~ G9==70硳DsqhFdp-\L??iƋ|9qt'oTaWzVߔB@w l0߽Pʠ=7JW!isC .^ys`ڲ+wr;IvT✲&/(aTIL}Xm| V ؃*)ULk[J s=x<cұh_7yǎH1,YBYk"=f+j.-ilta 4F m-X@ӼvVo՝ÏqSpR/EM6 ^.;V4 _p-h A|v-Mlr- 撮|[}ge~#-}#,ZjS<K*_V(x Ux;Wcn~IO6lߨ"cn=YU*zM, %j^ڜp:vE`. \ Jcr rjj >pHb 1+U7ą% h=.Kd'젃Pƞ Y>@H=јtC٩%9,"Kĵ^tva~<Ly.YÕ r)q:;HKDsXaŽO7g0-3Y!w a 2dF( \Db06Uu zвݰ[ p՜{;C̮J8)Ad0`q`~ CdEis58x@{< gֳz&5A3esnrX1J%oRݸp[@&Dywk$8j'SѵGQښ7 egH\C0k^L83r{pbF8M/a\n$qVc|jԷQ2B^kbi"6[M%B/ӄvg 2RSQV@LkND*;VOL6]hCa}}Sp5i.b(DzK3(b> 椃\UMb%-7(NЫ;M<3nۑq5&d4YȌ"{4d[zӁ4UPy0Lh%HH_#eܺ SKZJQC[׮5MBUQ=hZ#J~6&8;\ G Xu|}hdjt7 cˇ#%rS9P* ]0q GTr*D6&Cn0k>:IBA짺+EBc>;@`c>s*kOc2+_)޳ug0ꌂ$l}mYthÈK|qH2=fSsr6[A4ND)UA#k133|lKk˱Z\(ߟfa^Q\;8v=,A<ƗsqvvP\_jv $US[ Z#;YZYˍ?*{ZnLx<=}qDSdDΚA;AT깭O&cb|Gw ɇZtG<é5*gG .jఴ zKZNνô{ e:rD‡8+t[ a5QBJ1Թ/T۝^@s $UЄ:cKa{9gI)\IV&z}$f#cuDOQ-.$oK ֱ!rpݯF *sDeb?^"i_gho]S}mR :I<+x_ PSL|f`ؖ[Q{K6=vu菻| Kil%,,&V9*4ΆFK|gDVr7vXo4)?҉~Q.";]gB=qٝ+śjh`*Ҝ8A7RQ 6>;6~*E\01'+nE@]͚d:(åN+:H)zW5K6!f7#6u^=&SS|Tτ ]{5)gHQ}zAIztQ'e.iT'{7.ej.}?f\dqYXU^+ m"C#7E((դ:gR6).t*mf ,t /\~2H~*VXKEЮ U "VBb3">?*\7[gVɲ7ivkPVzJdSz"2qvIRSS_Hh;WeoUl*ix4[ʁoL]=THh)ZTe;Bn:Rx(Zw*ksSS'l* z˦7Z6}B c iZ&A2ҸTL(8*1 ?e)/!dIUshg:$#u*E){̓HJGs O c.vzjoY71ZF##glTd[- ^zɊxц 1„FW؀;sZ5,#5dQ)1ܨvȽh+.tmmjsW/@#1<?g f*/Kl9s+ $mP )7C暴I]7 6loXqO _%& _^ܼ?-C@"pG6/I}%ܮmpB5p@4D#ЛԲ^ '"+18}jK-BUlN%8g άGsL|B*DFfyLBMS1%N?s%TsT;ɷ62J.ECF19A9MKaqG1y8|UX'nSU tݙ8#ߛA `[ VaH5e`Q+5]䬬s7{ )ڋHe-(]Tprb T ['!JO5*[c1O6'!a.h1D+\(RPDR$ ť$5^"[ (ׇMiA2)g `_SX&8O3dgٞD3E(|L0  y8˱S3;qI</ABAPQ8 PSϬڶZZ. P2-גx3O[OZ,!e+mD-2׏`.w/ ˅I aw4n"l93+ǠM%ې{1l;R{fAY;p ؗJ:7KʑѸA_ڿ!z]aIUbW]3C-B.F)JT3Kd5 Q#dEcj8*iK'uCᑧz' " 8wNaJmu:ǫ QP~4gNй{SnOz+ZIhNR hG [Zøg961i(\gl&/zִ҈: a?3;uc(䨽Q )%!$=,N%OoXtf?Rۛ#'ug_}OBFXZ" zw|kxlxr܆9=SP21肕7 9巟ZW-{¯Ǎ9(P6l "/vGLAl1}~,FD&!U4̵+ Wy)¶-IRg|+Kf~J:xseD.B]`]IOSHrPҞoc 4%G7G[ \܍pV_}_뺍wToGXtNwW`ˀ0롑LPjs%G˫}tz] IWWﵔ:쯙klp[dN#J}z1i }B77LԴZ jP„_Zxk7,E?gҡ.|,o6&ݝ:P1*&f++?vfffTW? d)OoUsҥ3o%8v4>iyNnf|+R.n@"ƵH!.υ9oyj|fHS cմ0xYQA6=+=Kj<(+?RGh:9E:jtD`$ ?_\<=-  r<>.$6H_- Xǒ 1&o]nh8!8{Ȼs oN";X[:#T0D ΍GEyd , \Tꆹ|bNC%iyB-棽0XV)ݮLH҈3wxV*aە}-x.׉:7ԀsY ـ>)o0Y- "frQS.z&qӒxBoaVHaUC8[aE9p*;[J>MC[_C#f:/'Эy} RMP`.zC[qlP[E$U_L$ҹOar1.]K0K{{8FPe%kP F;-_.݂xinZӧRzv d+̻$ пc;t+xR˓}0_ثPpŇNF|-xAz:~2*>:ϯJ劜澄'iSzcX$=a# V.VWufy;+eȆǝ+zbl0C:-Hq~w˂FRW1UeBJ"QmWOeŇˢy?&uWv$I|@BΉg]měHL ;ntN3oLSO^^DRa@}&Mܿ|OHY㻪5C{m(\2|H&wP\l}ѡi+NŠSE3OWU]*׌[in -I%j3X:v!2 ctb- CBxcW6s,/1z :^Ξ;fA? M  ūGJTnES~V% Of|sU haAi*F^ƁuGY_ =(Cr*_~/gVk!mT:B_эW)bƴ\."tv ߘI3X{8}*ˇ+◱)o3Twl"qa ^^<D.ɜ "5['I;@#n:{l'mnIdrSgz1A"\dW?^;^]p̄0߃- b/ Rhs$)}QHZZ6s\z~{+b[.9`71dU Sw^c,*llb;`vl(Zڵ;\W%^z"Qd>J-QeZ>8q+exi*Z(%/;@6:SǙd#|^08QW5rP1s,߻ 2[l\2K"|r 2Hgy $@q$4n-8)(NN5hn,F7;ΤҮy Dܩr>ac]2xeNs) dȡCEH!" )7IG1#b6['Nk.$3@ÑfMn#2HtUϠH`Jx|6\*&D*)({!`Oajm>1Fr/V)Geor E|bvى͌SZ󤟟8{Dzx #g\۬ȁ$Ih\ $9l͟i0Kv8ELoyT8*>(A)v$/0誟Dp}^|裴 T67Zr y1 51t[UUR"Gu@# wvAp3w݀2 z]3\wi C_t2_෯lL3TP&\>` !"v>av9Eo.]al2{]=eG=)szt+.☮q/h{,W2f‹$Xb`(ׯSgCzn'K~qi{Tרe%#Yn,2Zm0[;i[$'!# jK}(5 Mh}ԋ*G7aD34^f;lAOgbro=r8vgē:UoNI+0FZRQ+[gk +pQ2l&~9 aIUE˾ׅ&ۊ,G*캥% |9cN@GqV>r!)*s-gOKP$'a!jDK6k49n!z&=1NmS*C04A!I$~4wRXfC `Me (NV.MpFW *-{2cuDDoq;%( mq_hE_gV)h~N " Z9u ck˽Jt!"չ䊫޽~]5FZAUQ ͮdD̺U= h1/Ϸ"@v!(dؾsj,^ʵZ@zO} כ&L 삞VS+Eq`wpg2ΉޒP? {Wb.)Nr$HՆLyC:6*`j d.j, PR4{ ЧYʓDŽ/A:]v"@JH# jϭT/|#Ei բjnJ=hޠ7LfOqxgL $O=nS`VsKkGF*E7(ꚺ %>:5Vd4w{ͲcW͐,n c<4mA^V$_N*$̈y`S~FB;hArdWP̵, q <69Sq RQ"tgV,IyЕj fj,vj-rTeSq[%K]}7)كNM ݦbcl?l!B>@m65,hkVVl 8y/Ĩ>zHN\VNC׃Vaůr*hiuLa,i6d%3@MJr_/p[R|Mt+j !+b}w l?-pXon7 ZMMYf尟~g+lamnob[JB >۫ME.:t$I+;ILpU+<*`2#PI1;gy#*(7}>C໯-ZFiB̓~C5 g#[Uv(yr1I$A^Ue5ƻL5C ~+Y?n&?F Q iiSP%DP|R$PO=zj }\]61 <);.u^3b۾ԒP_결rN a^tOfӯѳؽoyzT.1n,g ؇=nMC.,q.יFAaNn**W?{b- u{0cZZ-oڥbC&fF+IH,v 9 Wp\\Xrp߈J{_l>k;#D4*eK [Q~x _ #3xDg w߽${TGC;sT4Y5T.J$>Ÿ'_;ph-|kǺ[![K5|ges|we-\$עS,q46<)O<ԯNېH6W⫄S"a5wdIyC Avڼ.Z/V^d Gw'v \~Kt6Pɸ7'b܁wftY09z/Gm+La226ι t?~,t ; 7`| ӽXd.ϝfi% agE}'Udm(w'8@#n- 9ud"퉒 Mr>MEc<}Ff㥋1C7l"@YI7wfC{5E?sN;X^ıWW["AuT*cO_cj@K{0+-Μ)|g,H `WKސR:uziǞ5ϵ z/,onb5ѳ-IYjP+tiY:MJ@%OÅF ]PscRX㵏HČ2.5DuM1y-"/p%n~/}:aO6I. U_qK1B4&Ό>̢WY?$AXqEBNJ[yƒ RF~V_YduHc-P@ĒcQ/L:u#bgGKx6JSDa _0$''bm/2ed %A>[YȺFZ#p^9V&UϚ 8ω=w_9MqwUx !9Utm"Tw\ #P(d|1*\E% =ZTH/0)'|>Q2ύ2G>NmY]NZZnp[wb3gZ=#y TIW5vkjeDe I q2o!88GpƠ<2ؿxCv!z "W6]+ftƼcۣ=TZ0\{v_p>E81 k':1(KL>`z`[f6?lCfG D南c]|gxw&l3#O{ ˤMdgt8X|D[6Mtֆ뭦 V A4[Sr*=b#%Ȱ# -mw.1:PȘghc3;ьtOzuo &qnjp&;Ng@"Ǥ?EQ%z _{'U-Eǒ\9lɠH$I9 Q2 endstream endobj 5978 0 obj << /Type /ObjStm /N 100 /First 1046 /Length 5286 /Filter /FlateDecode >> stream x6 &aD"or2Îq!H]t0?k @Z>EkڥGwkFɕm1HZ| )p"h oÀ'P9TRi2$MKi$eTdBYx.b1Xv I UH&1yT TreAOƊ(49p$pP ǀ ~ Pл/æԬ|K} `z@ U?N! sTT"<95uBe8ϡꚶV_~? RͮJC^m2kMo/hP9d&CcyFŃQ/͙-2|dsX> >yh-n!. Wy̚\0 "X\hz{:Ջ=X?;0tn1 )FْBoae갢r }P ٚc|lO _dꅾ),O ':`xjC!&uc9g"q+=+w4Br×X0GKK2exNYUhB(2'RzO[jǗ{bHͽ: -}ù錂"uT#TVtG;pJ*/Q\2\uYGHͪ:12;wvg$mw!IN+; u.ɰhG֋d m.[ Ŗ.+ҥK[h,eR8왫"?I_KC7 ܗeȰwkcdl%omU??z> F>}Ox6 惿u{q3bVټɷ:Ol~Bnܧϫ~4~/.q|0=_Z||3on]՗v^G~X?O_q~^OeG~S_zPl0>l8=|8:oz0?ןgF%@tp֎/ M,h]7y>&chy}>&v`TFKeg[Lb~}َaY}Up㛫b\O t\׃i;&Rʄ(d'fVLQfC{ pZY{5?[0Ӷ7(8L{WW;g'CЦK ~'s{?=:yA5kPP'8&f7ءo6 Utp><>/ (r>χc~{t-8~գ&Ո# }%>g%χʮu>׏ɡ+#e/|}9\w;z6Q`XX _݀_w& ;}Û^N3 bYؒ6~󋧿ea6Eml'QfA0Km9}pk&ܨn103lH_;d}. 'zFl'll4XBV{OUeG'xqcM!eYr`E!-v͑PJ/ȩ9 Y+$Hc|x=N֟8o̮Pv- jëHPxPssR &7z:j*d~<}gSp~S;`[߇7>zYkx*RxOë77 :l6`5I=m~`W]zlZnӦdu+UzדIzqxҞ70cuh, \Su60zalqqd$qOA;U%qh<7{j@w[޼[-FQtm{sgFiɍ=Knmq|3uϛp6Vl[üϺs2J!̆]N OzKfyfW;bwDj[eߎ/[aYA ~EbEȐ}*뭲_N5c,jYx(E5ug,lՏ/EٗwяgMux|c|~@:j8UF>]#KAeBM+_%G9dyR/fKR&1@Hg$P|QVB8pܝAdzJ2.cj4/d9LEw^#y&_,\`tQiD@Hvza9dP巿+޽XC-ʷe-Py_E.QWzqGWVc<ӛszNGV[RBY!Ƙp:c%ȋ?(<\:<_o&X~BY&@# lOҫnJ_, ]! `{PFO+C_w`~nZ?c5`n~} +w/&ZAnYAnyQo~ eBj%TCb5m6Y!oL½wۛmAR`VHL[ĻgW [Z ?&EDK$1c$JX|XDU}5b|5hgpDtmL\?Cr# qNo{t*堇$&^{Meqrf jl85AiTmbTJ3*E_BJOꮨ|mlV[hMvK<G0l 2Vs̕DJ fTV6"+TX'CJxA7ˮlb\LrBoX>Jxp\}x!}oYޱyTxqė*l{ q8|\*FRZH?h& =b1QGLnT᠓DEImҌ!iaT•+$KƩ9)Qi@v/2YVN"Z 9JÕIL'k xBAqBޒBژWaZ4峉=[BUT9jOxcGVǪ=P9˨$@gaW~T^tEW^sքBAMN\&nGTA 5' endstream endobj 6038 0 obj << /Type /ObjStm /N 100 /First 920 /Length 1944 /Filter /FlateDecode >> stream xڥ)-U*I` &CBv! C0`03R54:.%iAKM!̓J H? Z@{pEFe`). VjR9 G I s)!)sJ*G V $rm."nPUj7Fʽr#tς7@+rF5$[[x[=Ҡ#sAZCk-JI%(e Ü/;2[ /nf|p_l-lYnb/MٙĄ !Ne$*KCQe*7*'ʑ/%ٽY[ZJV| d;t 4<6gl,мM 6 |DmcѨRZ5*C.j`& @Vnpm`T['T*@I`ʑJdk+nJ^]!QXN0-+[&+fm/CH[w, Q4|pC6!W2箴Y䤄8ʙ,V)9+z4 Io=~O_ϟnOן?-jC_oO?|w͵YŮ@ph;d۷TuIUH钩]+_mTY{NDhHO\{PFzUűR¥GR.Ӂ@hx =0\9 I2ީTK*¿dHjKPI޷v]dAɫ?4:ml@ҫFП譧>.T*GO{呱ەgd'X%UszTLg 8Ѯ2qv؟XM;FJLe݂xolY^xTvbVyLI/{usMׇ'Y?񒕞˯ro1)xurIP? nr\a H.ﭓqH.=$~5|].])+ke*.v} Ak\\{uI {`~z dwI$q+;{Uvj0$n??s㏌_~s;}n}\x\~]nOz*XβM19b}_1/G1E 1-s] `wl3[M8v0 3]`@'=vt3]Nr  @d;2@Z@@Z@@Z43H;qgqvPq@ N̦^B/b"wb:΄1Mgbؙ0 lXL;4aM cg˜& c1aLӄq6a,& c0&ń3aLل0v&i80΄1Mgbؙ0 lXL;4aM cg˜& c1aLӄq6a,& c0&ń3aLل0v&i80΄1Mgbؙ0 lXL;4a }K,-b;çk9?½ۙnbvE,0QWbc_ao"MLb}~!631ۈ d]v|wg;>/|';`g  gX&`Lz gNz  @d;2,d@&t&t{mX endstream endobj 6046 0 obj << /Type /ObjStm /N 100 /First 882 /Length 2550 /Filter /FlateDecode >> stream xڅM+<_$`$lr |Ќce%ju6>o5EuUl_V[cmĈiXO7 Ox407LPE|'OӘE}E1o Yh:ܛ dA"ZeA ɡ2!9F(#j:e4dzk-[7 qC hi =7Ⴘ3s-.q=Am쉗ʨaaBXkK$[٠Xj6^$k6F(bPk\Ƹ;.9m{8Z@'A]Ρn\:O^Ggh\Dhu5rVM>߂ Z`O0 x :'X-9e?`[8>>.9 i~//HO? .ƺbVӻRVBf1?oJ1(FMw1_Bhb.U/u.u1/ԅ,fb%΅Xbb:h f Q3P@@ x h|4>uN:'x \e N`3[` jl@u$X eT@i(0uF LHM&L„I3a&Lu&a¤0eBRL3a}N]Mǥ>b.FBlkb]WuJ٧!6[蚘b,Fu\B,u11 Db: D @B@ d A2_@ 8k\5.puNp:+8 lf HP`@:#d:V06&<׹{J!n~+Wluƫnݺ <7[nqj~l5V[wZaf0YcV^*Ulugƪjպ S<5[jQj~L5/VZwRaf0QCT>)SloOT[]~ B۾vΧ&MGOWwowv/fEezsz|?.驕/En~4|z|wύ)xZ#*Nn|/! ע/7xx9Nf ?,C/a7zYW[s!V,fr{擩}C >FçuI1é 촖e>LQ*|q |hٿqZOe+w`Ăq5eڢ4y'Pqp{5/)7>Dob^杣CUQ(3&^/|+q1_ov>=q/p KGz7SujSznj_3-< ?9|@z8lNb9>~ڍ̝}3+~Xʎ_C*s4VFѕ;.4m+^/<Nlg%)UySƏ/kաa"<&T%`^xwW+a -oÕɆ}ذҥ^䣄=㶕lM`'6#O->֭ ?SWLXA= ,K']sf|Nxs+jR*'.Ο~?|.v.TS,Rؕ|I }"+${6h'>f~m4],aD_ւ2{J#N85%bk u;Zj+ @ϯ,AΚK'l6vep$xmir|[i] ~I5Ђv~<_vq.wI".3#_3ki]nguIS [nyl p(EjWTXxZt;.٩^G9 endstream endobj 6070 0 obj << /Type /ObjStm /N 100 /First 1051 /Length 5191 /Filter /FlateDecode >> stream xڅ\K6qkeMFL2s[nj{ܘJYT:n@Hd@,ԴojF|0MlZCOQF7YFHOѭmoc :Ԉ` *1hLc&m$'ߘ)4V0<ƚ)5օ6R&I56yKnQ.YM\8op-jo؄|N^jj  C“mBBpШj}-Rmhɒ\jlIVRM5t"UҀ #Q!#M8116 1 L iuiiP k:/: x4FkE X3!k& " 8kֶTFYI٠ l̲`͵B89X* ZB` РP^a(<2lwdu |,Xa1D_9 !SZp ض݁##XAaTJkѻc:Y=an`ew_@Bdt;@/a(?}REZ@!E) |Q**WeŰf^.e߽M y_\U?(ETeŕ t̪vn8QU5k13!WYx(UYycw:$LRM˵;^xۏHHʴ@y8P=[n}w|/?CXX&B^FC # D SkA2d=h"Ɨ8Yi:x(TuY0źYd3ASafuzS'v ffV]qpQU]T"d۬sx}C8RSD~1J)[㒲m۹y.\((}d+p~y~jP q@p 6\D LQ&18ApIy{P!7Va> GjZ\_"!x:?a`xt(3I5wnXnh % }cvH/ Ug2W)G,<0[e;~i쬾a]]@P IL]n)F 3 p<^1*3L[;=v3yzqﻗ]0wnzχ/3ގ)Q[23-bۿ>5ɔ7;B.\%l&xb{,\b+͚wBÛ6,iSYIk* i"(u(VJeLT0_pDHXADdS}|<H> GЉ7?Ra_=1lWݎ}1ULq-EG>ƷU }\P€[Qb|>5` i8(PFlit%M|RUF ĵ?wמŒﻗO[L->ga|w_ofo gF˖84M#IWҌYQM=5q?Gr/L^Y>8BE͡쟏Hjo+n]:js]OuߝN4K:(IwO(:TNgjcQޭ̱~A*lB7ؚ1g.tٍ(@(??/gmpzy6GXˬp|A*s܏df W,r< E8,( PHA2#ȩHOA?,>ub1 _U <|zhX9YʅvtŠR%ܪxly ,جWʏbw\!ǵ!8ElE}ѝQC*FuX%uoނv9F|KMg%BɅz)=M.{:o+cbkY:e_3sU}mk]|xWwWf;tZ6r+"Q_T(HQl]̧ۆUܚ#rvKK#<5V5y4m[&bR+֣)?]&{Жyh7/T"&byj}05G?k(71Z+LRu)39#Ʋ #Wcu(٥cu,򾈗J*YӖ-*'r̡(wkeG^RVНa#ovY l".4h y恻F,uETx˹eVtLTea(^:Ew3o I[89Ț9cJp-g~ZpءqEͿpvY5oLa?e*1Sܼe] ?+}9Ukk(÷eӃl2c~UlN~/Fxe's- q['wKiK0c\ >aB%YJfcTfbbglẠ? l+г)i<{(|q)1RR梸sXVWHxw*qw6yrQ!_sp٥yKY$%1IG=_.יִ*2=W2J$N??cmNj\BUPpyAIKecYeFJKE]okŶ?˾ al?=lܿXcXcޥtSŎAyKc \ x]Z_m{ŝ>u'\MM]|^̚N { Nom=Y $OkXLb-rHUx!ghNЯ˞[V*{3>z}Lx Brѯ%ܚÆܽ`*t߉S3Fُ~ Gt :7uO'S=kK酘ļQ; V(ilSkg7~%6+Ô0xf<,w#W˨@LVww6A)uq 6wD>pw½{syU_A7iP埉]@l# Zy}j{2xdDnO{ܖNېĘ$l,pkk k^PY oˏQ-_f*+2ĝ^"'Ǡؠ*E^/:^"mB{gѿCG0yvd@\?.oپ~?N;lM?yÖZE60r*[z 76o<:׏gFsg묰4xԒ 0H=r,Cw6"VAWs 0S?ΘHx,E0$/H I/r #[-L+W$M7#s L$cSHHC b70bAd^SW+"WAq|H;D4N "B"Wq Ml)D5]#A48i#B* 48Ţ@cpi# K`[r Cb}A(AdDe!1$:v-ݭI"ؖ*`3D"ML3,JҌJM/r58͢P ~N0hN-LGK4 ReK=z$9(-2>r4p /0+qKALՂV\tsո& ZëF)kMOc1kF06r4bXh z9/brU a+$` dr؈ MpbbG1}HU,`#hZ-X ((h"X<) \ "W<яDhWb8[FT5J ȹ@ H, Ş?GcmbiA/r5\!E 7Z.XC!bL4C k+7a)fb:bo()DK![I1%c`Į_fJc1UcEGD2P *>(.Al5Օmfzl+xrv?FIxLcn(wT['&XޜH 61JnNΊbl(y=VNd}@t/O[}D=ML2lWRfuXR}b@9*Lj(0dlݍ_IUT_j`@M} v&[}IH4G|w_E 5[}-s-颙:PdX`VAl8i|9iTceAݬ(4 Rc boV^_ MNt H<ؼd.NBRSbISco\뛴%M"qFIH> endstream endobj 6171 0 obj << /Type /ObjStm /N 100 /First 1036 /Length 4371 /Filter /FlateDecode >> stream xڅ\庍Wx95Ģ$|Alo0xE&G*Gm<(JZy!GQ\ԣʈvY[\aqLqaG)]j+;Ҭ}6yVj\գ[;λzw!*1!B4mF)8FCc6C;c+nSpŕa I\"VM`Oi>̳hn}VehU-Do>Z5حh 751棵f!#_ZchGGH}bQ:uPȍ F>5xGS=}5-C^ц )ᯢ>^ц;w7) >|5YkxG39ʏ?~_?ԏc_gq(śS%\{Dxof_ǟl ޣgM]6 f`fX |?"Q’^KLB@--P|o ±Bso2\S3 %K]@yopy|?"3-j5hՒՙihK:KxZʐ71V#M>)Zj@^dԏs/2Kx H}}-`k|lmd!%3T&)71zY5p5%{S8P}<~EuˣW%[g-?aZZ7)yf!6 =`ӖK{#b=@Y6Jnj» y/:3zMZgjc&Eq{EF W4P&c!l3)߀ l1 C+[uP89Y ~UE/\g}Hf~<_iYbY uYx\Փ<Jk ) J)6|WvT<: 2ݚo",Ul@٥FնW& `ؚɴΉLk율7vʠ:ЫSYjUQjW]c&{UGBTČ>t1o"fFgasMxeM&* [* C,oPRJۨ=^٪bbu!Uhc[#?u~xWvڠGa.r.T30 }$7OnPξ2֑ @'ԤJr쌣f쓛.,Ā llȭ6K'7@eJgZ؜FiHnl8"#1+XsիznԱGrcfL`N`&l&lX (.H?l3 b$`0&X$&X$&[#[>gNmRɗ@} I)(h|faR4ٹ!%5O;Iqc!ؔ(&npW`v< دqH8{)`Ě,d8C( K6Ձ CmL< H,/$v=^ik_cP~6 X |Wʳ]9v_rXc$N_ f د6- X۱5xelld@48+e+ڿisҶ9g:p sMǍԊ,2ZW "ryE e)}MkJ朥_ aw.Vp V9Bt YoL#2 x&p #op "`k5rPeCMqT뜔~Hk8&zl˅ty.j@ Ψ~!N(2jF; ȻΨ?zhٖTSy h35s8~W$rA2W &pF> 68~Zd6 $4 ies͠miNB1 Dsh6 8\# =-dXDstyRYMnl1G6 Y9҃hNίI Bqpc)Z ImM>h! |B@4g`tnKT1~x"'!shڨAt׏Sȥ@fPA;8d̜ic*HN]4^оy,M&,aT'hԎ'$ijY=Q?.&%?; o۟c*K ++iߟ(y6jͤݜ+;)MJ]Au o'4@R|F$>pQNtD1e:T36;5N$hI%W0|3h- :7. w.mFݞ27K i/ '0Àռ?W\#Oe2,DGyn' l9yלFH#O# hݟ'8;\sRΎ[ɡ+eRΎ/mx{V[d?oQ endstream endobj 6272 0 obj << /Type /ObjStm /N 100 /First 1041 /Length 4171 /Filter /FlateDecode >> stream x}&qFy=.=0@v ?L3ͪl{}x0}=݂72||FHg ?oY3̏+>vۡ]N?9{Ik-͟Fxr57i=wve۳mҵbw s?Qy}l7~33c64!9wg[̈1vol6qg[3+ؽiWm%̳~&Ƙ1{Hڞ{LD?o)18ٍf9fg~{۞0^ 7g ;z92۞}{~&dJ-45V~92 _;/`] 3 60;c o - o 7j0 γ9 vِޱKdќ}b ̴?xv7^"zW DW3wn^rӠb ou e5)>v*;PoaK* DK>''|uigO"n!pƸc׵y"r4]z9A ojF12D]i`+8 ( u#3DOuSEt{ u1b0lҠԔz: FLGtuopD0" /Cm!mWo:̶]샌sةtc hNdBv.Cwl%@A ^o; 2 4.wu%撯hkf}=LBu4Wf2!.'Bb!A}wIHIٕ%ͮuY.c 1vx nUw$/E\@F]wa; Yi4lgL&]v#{2skJn 2vaPx=Hݠ #y]nvQ6F lxjUA 򵛉]ą6Ouch70AO ЦACVZOI%7qXŕE0MEt91q l`0 <a DBmY9 lE%qgܳ{FYq4^g=G8uUEW:heIc^l4.R@"4ZH}>xTa8Sv47R h.kQ\iPDkOGAt]RauчŀF U.9k-ixhlSM4̊(t&Yf2`MzA{DV' 46M-)Q&6 l,ڻ>ie}ҠM54]#M#7u=VV' .m m6@ދV' t͍Mw#.e`ˑ^TOZYVDtЦY2` ՗qEY4`6]? `ӗFK@5u'݂&zz젦K5A͐aƐahj?Q~^'ҌޞaYWҌQaٹd ٹӊ/d9Rmq@3$v@3\Br Yb(OzYt@3B  c ev1f^'L9lv@3x h@1uB9Rw})))hȥ&W1p:ĘӋ(\9+D2f"2N?OzYtH3"fꔆ4aߣ^q'O:#zj.Ì)ÌU5]I57PqPsI0㒔a%ÌWQ\I5W( qD_K{Qf\EIjA%dꠦQuq3U]>{(i.ɛԻiRW ҍd(r4Wj'H.z!Fq㒚CkI]>4]M.c1 \!j~'O͡w2k j{qx%w}''Ou ѥVSs!/ԛR{'OBtuKA/D*_ng~'OBt蒜!ͯ1s[b/#jQ'e$H/^FB.HH~/#!TDhDH]>/#.#!<dU]6hW\G>e}2ݟ89ګv.:z 6&+pl}Q'~̑C]8.8z36-.F+(]/#}>]DlzŅQ'kb [ D8 }e}2g 4k^u5h.i%26܇ t0 &KuHjGrٻc"$V](1\јh.hfCX]54^ܙ>e}2kL82kn Aw @h~]>5Gi.Mo@#TaQok]6чvR Gt]u1rD7 Sw}2J GtApD zi˭D+P]>FSEt9 ״˖Q'욲%X@]Nlz/ƾ($OٗFKE"Rwz/_OFYoYq@C3?hs\q" 60e,dCI`sw ljPF 4+3yऻ>Y}n j55Uf'8a(dYLHsH>|:!!DZ 2\@{QLʻ>Վ4x h$8jLY'hn.N(™w}2dzW3!8 hW==ԋG, h9I@sȆ =!F؄ 1,>e}2!4GƝ 㐍;AƱ>/y'O&9'8$a)P0,5d5XSԜqDM ^̩>e}2A)Drf4#a)iFŒSX/a٫(#dk$9C\FMq~#Ry'O&)5df8ÌSgq2dB345ҜS`qb 2bǸYQO@s=4.ԥbKG<+'d4MZ@ $Mb4'Ĩ &(U'4MSM]M`$!Fqw}2dfrɟ$h]̲>ih; 2mQ']Od% endstream endobj 6373 0 obj << /Type /ObjStm /N 100 /First 1023 /Length 3465 /Filter /FlateDecode >> stream xڍ[Ko W̲.PC/}l].|'1'%9_5l CJGY /뒥B-iB^RcRPV m!դ.$߫PRIb¦t)-cN[.E\ GiPqںdż]s䥩fҒgA*gG1 ]:JKk^ajIumZyu w&&hM^ Y||&ת֖dElkɬ6kɬk^a"Ri믙5ʫ'*5*`MgI/Ǔ&,w䵫>1k5kVĒ֬2Ѭfs#59]k2+ĭ[|~=mŬIjӀ?w˿|VolY?k}3}^yЛ#rq[P} zn7?%z72܇K[z+Q>w}b-]/at/LH0Oz QP]A (ɰNFt;Gtڣgs0 lYX -wS;9DOh7 'Qp=W<8NG0ܳ 'aH=tkAKZa׮(eϟh1D1/C jOOH7Oе ykv%v5N;qvklvg٭jQCIE?J&nBBK=P:葦KP:5~8r )z;A ֵ?MUGvQ7!tRͧt/]蠷赓e7!t:aGH=7}()A>{ ]##:✿lP"x>FW>L$/A]DrgvNEfut'o4ܼh}~V=a{^2q$־^}n;5e]>>J՗G ^Q|{k](Ѧ--Tx7Tt:mEGn7ZסO6;Ǣ2*ʣ1i=>{czgB `{_BZA6RtX#n;uX[&{ixԷC,LJz{k>xaރWR/6`2յn7| &]Pi0ߖ3lo&uѡf]0C1xoLC5xgotd%̽wm7Hu}dg=qQ[莪_P쵎aa׭/"i=vO֐+Oubw<;Bvd5\z9!u ЀO|9L|ږ^cyݎCL&lG&p/o𯗻o?>=E;헏 mWKo|~5__LL"@2f p+bF}~L{|LeXy,x|Ow7xAd^W_ߛyӝ1$ « Xؽ~{m4X[ElU`^Tm3C_,}Xi! Pn${ǻJC 14İAm5aZ@O޾Y B!L gj~}oߞ_Ҷ7"ΩLk$s{#X??Ob%DFٶmO´loۻ"eňrc<"=͗o>?|XDPA1x%:XssçhW .A\ɴ<~4_>[۝& &EL im|J +mqmn t- I&ذR`K[R ~|$V)߶len#]u(Gk FC&P-Ѽ@sNMlb)H*jHt RӠ5Q X(| H .DFN-Ք[fPo&0p|ga׽5 Ǡ<ގwL'wƜ1I c0U1)X_46P:̄HQ b>PޚĴ1m̈'zbMi61@@k, %Ø^d &cA`$#T $c1"0K_u^l! ywi.M!L6x|p Ơ7F3c@ vod` 4 cA\\KCH c08{W%u.ni3n3V*)h`4pʻ4Md L6>״KI Fdv8~bN.]&IVttzЛǥNlU( M@nBǥ_w%D% 7 M66SQ@M@lb>L%*$_C 5 xM6$_`C Z4 M6rOl! !M 6({V.:P@FM@lb=XI@:4lb(/&G6o$FnN: Pп M@lK.]u;P@>N@lN7M P M@lRd9l(`CA_' 6I?wС &X P hM@k}#0[AUJ A ]"݌hl`3-xqpD?)z;)Lco79Q4 4pxx@⃑?@_~8iB(O|Na S9]4 Ýe5eM+gݜ|;Pp͜'̶ SpS3Wp͜'@<p endstream endobj 6480 0 obj << /Producer (pdfTeX-1.40.20) /Author(Wolfgang Viechtbauer)/Title(metafor: Meta-Analysis Package for R)/Subject()/Creator(LaTeX with hyperref)/Keywords() /CreationDate (D:20210609143520+02'00') /ModDate (D:20210609143520+02'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) kpathsea version 6.3.1) >> endobj 6474 0 obj << /Type /ObjStm /N 6 /First 51 /Length 253 /Filter /FlateDecode >> stream x}Kk!L !P]da P:-{9Q(AQżBmm R$X0~"]oslv6&e$}jnV9a0I tǖo! %p?)]Le-!Gʏ/ku9?Rl41abn! 8+|ǐ6XcޟBq]9js讇K?s endstream endobj 6481 0 obj << /Type /XRef /Index [0 6482] /Size 6482 /W [1 3 1] /Root 6479 0 R /Info 6480 0 R /ID [<6D9F5AB7C71C03713ED02266B758552D> <6D9F5AB7C71C03713ED02266B758552D>] /Length 15802 /Filter /FlateDecode >> stream x%ytdZwmjCuUOyy繻Z=wcG| 5 @(~,YXFZ`cA l+J0X F6+jg<ԩs޽wMMMM&d6_}vW,Am56Fbԋbb4jk. mbGŮ+.%b/Į)fu]Q\+]VJbsiX'vQq;,vAq {$v^qt;6Tcmbg7&ͰEliŭM)CIŝK ݰGqŽP cXhyT !vD0Ũ|L!G' bxfϓgBxqpΉmۧI!%b{C+bۥF:q"A0-b;wB8a{bŶ)އbBHSlcxR[Ӵ-|r?gbYMϡ I,:mP| .^A[xW׉EvULV;{>Y+_l>Y/vNL' yE}Y,:5OV;{|JE}M,/'u%bV(ba~}zRg\&zblX!*k~·+ʋLJY-'%q ֊HNvɣM Ǣ&b?YE8$3MlX"MBN%vFQ' )}bjkY dC' VQv8tcbaE}X޹O!}4}P'ψmI} w:d/!>>yI'?Q'''X4镢>yCL'L쓷nu*U۶j&u{b>@쾘:OV;{h3E}*1|2Lڧ'/O,y/OW?Qۖb/C"@1;1'Ov`S 3*c[_ wԊXbgn+6@[M,Nb T-6Pl6bb3.p*.ebrX!e,T?{N2.) /溨 V.(b?+WbC/&vVq#l)vFq3T5Z2 rZq+lstJq;;.vRq'|Bq7T;qŽO1p@QŃpHIP}d.:Iru/N)?̄oOxw~6O>\,FQ[+O18VBZ“{C+bNPűڥF!T,]eNxv(m'nSþljXHۢ] 31W#6)>jg_~E,}P6(W[xk@#ԧ5QLP ;שGu4_ۛ[`B,%vX `9,AwټZX6Fa lmm;a=~8!>a8щ)8fǛq jpGW-.+N]p5Z6p-P `=܆/pvl+B| wz˵ͷ!_wC|*8}[-Mf5< gl987&܂pTT5*;`__mte3>^6e3zٌ3,K5ppgmFf.yft]mFWf.ft]mFWqj]mFWftp0|)f NDH gp*J5Y`v^ ŷs:0 jaa3R|kԋM3Of8lf 懳Q}f8gUfXeUfiˀ0T] չ-_b=4@#,T_[Z nzD}orCT+_-#_ cOR:ǞWGq{~/);[^}T U^C3ԦlslhF`PoVh?:@a),]:VACŞW@VBn:Q;w^)}hEAТSDqo%w ]]q88 <ϾkpŔۣ ~u܆;pW*QE1xEJ3Q% G|Hi2 z5C `SJ`*f_e_eOe)dAdAyhГ6 ) !`<cA"nO_E!7 r`  {o dArO闿. [ Ƒ~*2 2 G 2 ?n;Q 7  ?H)_$ @A/xoR*E bKr/{!KQlp*XhHOC/P 8gXt )-G/0Ȃł@\J_򞩿@/|2iT,Z{ l/Pz9b ^ B0=(٨wN)8 nn R]D p .Cp-յ|gT7&K6܁p/յ} R2ay/ ;^cĎwߦm6,"#oM"k#4"/# "%}e|~O[S Qd~V3˨tTw^XTwGbK̡_T5ڿ>՝Q7`mTߙ.Deg~~?T?nNT~>,t{p6*J,,O~ӼnSݳO"Y{ь'w?)%:ym>|>4G\O6ͧT7ZW"S_E4?6z|ؐ{PT#J @Cy3ün?Oyrm~mOq?v@yJSzmNr*w=z< ?Oyxr ?*Į?QY`Y`Sɨ9Lz< ̿ȣש~!*?R_RWP~xZ`CrӫЖ~@sA;e_ ĆIS9TU RT՟f O!r6L[ Tͥb'ezChq18'õ2Mu?-yAM-T7w'*_,xbT5)݃B<jC^@'`WGԦw#>?"i>Nq|x㭩ugr<e C3Io~b/T21ꏟ|q;TχWCq_oVmQ/|48[?O})=,yͨ?N? T)b=9WoS˺x;閭s]ZX)ߍX q\ Ah JXTfkat&ؐe#lͩ'"Bkw. {`/!QؑN1=)zUBړp8d}T9\+p .wnM.\O?'q\rTk?USx=9EÖU# /&/+v@fo|H uFY$@+4oE=`԰c\Nus8gs8g9l6I nF[R5a6zxm`3K ģF``~dUU`ؙ,77pv ppNఁR,5R393@ځkh쀑XypgX`X`A@I5 -_:h˅avI[ @IMU˱a1, *59hO _suda%հ6B>5|⨼A /5;aSjz`;®wz8G0K ߷>v>?_q8!2B+K<9῏ArE\s"jj Q&H< >d-bxV7(_t-誺V.jFb."wUO *]\C]y.~.b.wO mkl%]]ѵ#5^$dtE!.]Rhv_A$ Ͷ;o̳s&ztEzԘ;`.VvV銬vVD/I]ջ8U5ݱIvy>]]Sց>]N )su1Mx-uTW|oobWޘb7>S㙦_ii645~*ͩG}ۀ_yhY`yc55>тigni_eצPPe_M343L;eLʴSt>5G|5W|5-o G ټ`|O ld/vxczgj?p4Ms47Ms47Ms4M;LGoLL_Ffigiod4/ohC!Xj:@8L;L34MtܫbiF4yc eUo{vnfi,5/2NDEjߌf3>mt$J؁ũX=^UXfhE m@a),,aEj{bW+!VX ªc`3l!I!A8a[j aHF~4[G(ًNI8g@j=mu2܄_hq"}\pJ Q.@wrm^^kBjxBX=DtӂQX:FheEE=5ֲ3e-Fb-F)=$5ٻhXe7Fs4114]>r(GQ9`;~`Kjj,feQf3JQ=2E0(3OM+vF>eѳ/l1l1,0,0ԔG?zX`FcxGu$(G iGۈPG<*5UX`F;uL~lI`$Җ:3Tqp- `ҖHhNM[¢t^+ڒ,ㅶda%` ,KMOD<հ:`MMSԋ3F[aDk|]o LMonŮsp` OY|q8p=|kl\Y[oƨwn-%my Rӷ1JU?OwG>}l'KMq N;8sѲ|jQ*pDG@Gƾ[7~+vpG>} _kqe>Bs95y{mDmȁMM=A,gc`!yf5[=z1ȳ1fc }gc?ϒvfI1K|jZgi>KYΒv:,gXD%lcݱwxě%,f;'Ư RaDj>O:r6.7KYodkg1{=57f셌tՉg}ug8KY ,gi4,5/{Q)8ijF cv> A+R1Լ/#V@ A$$i`b^X}o{{[`+lvn0Kͻ~4vu!8 'Hj>6Uq8 sp.ETj8rU7!REsx7RO܃#xYg-bK'8{ q$ߪp$Gcl0iR_#Ô0 w95wP ͇i>La|x PuxEjh9`Sj?z,0;,0<4La$v,0 &0}/z1| `<Ѓ` ?La O s0 4L@aJSz)Armc@"K-wIl= sAzAz.pIAAjjyt'TO=1@!qDG'nFϣizج9p]ͅP/SFcǒsC1\0|s~gjC9̑{斤?G9Qs\2˥ϧhs|0 sss1|0 sԟ#t9-;>;9= 1sD#\kp)~/3ߧnM܅{pCx [HoɨaO>XU|Sle^}2{2j A2~!bKW82s_CuK 9hMO?Ua),竺 Ruf,˧N0mQZXalm9e~[ NF^ݰ=e`T;apNž?ʇ0p LPwY81?ᩔp.BTh~:s ֙|rE:L7.|$Nu2 uulu{/x/Rw$$~ emj8S ڶ-٢-٢KT/cQdnfnw{EWEH<תYۡ^2_f`@nwӼ#t@7 t@ Mnw!&`S||Mnw-/ t@mk,3ͻisRwR{BTa2 0,!݅Ծ{Hգͧgs1oS- ͧh>ՔڿmlXzTKj?"bL3RS锓?S L|S4=՞Ob)b)rޘ"᧜ ~b/|0E)OQ*&2G)O~S"U+LLLQJgohS1KNSԟxcSz0S1hL2y*僩荠S/RNSNSNS4 aP]jc@ZKk,4YE ma),!x<հ6 {^ & [`+lv. {`/p!8 G(p"}3py\+pu7܆;pAL#'s c_ibW[x|!"C1sUB䍩&G@!|C4 @!~C(=D!f4͇h>D!|C4͇h>D!|C4͇h>D!|C4͇h>D!|C4͇h>D!|C4͇h>D!|C4͇h>D!|C150͇h>D!|C4PUXZ`B,%vX `9,:`=l0CkjO?r,^mMVa]^CpQ8SpY8\KpU܄[p]#9-W8.v#9YV4W1Lt:c2\댁1t:c Q1ىOu:tiX~ūeiwG\ǫixM:ʴY$$vҲ}U>-8Vex&-jmZMj]Z~oǫ(W򟫍W S,hcX-˧k})F:}?bsZ,Kn:,2,Y@ƚ+gh*uƲ,?^r!'9ahڬ8M3 V$X9`L3YVnə+- +g&rfLXʍ;8Və+gIr.rfʙ +gڬ8ߛ+gIr&ʙ+gr|o)Isrʙ+g:rʙ;+7fʙ +gܸIr&ʍ; Z_(g\ i&rfʙ +3a6+7.W9_3UW9dƝW3U.4 V$X9`L3 V$X9`̝'rʍW9a o֫rㄷ&RάWXW5U/8_Z`B,%vX `9,a5A 6f[alvn{apa8GpNi8g p.eW\pnmw܇cxO<Nx 5 |oU& ^L^L^^^^򽄗1%4444|/%%{ /%/%/%/%Kx/y0^KdKdKd^K^KVVVV򽄗גג%||/%%%%%%%䥯䥯䥯{ /%/%/%/%/s%/s%/U%/U%/U%/U%/%KxY*yY*y+^MW^ZJ^JT_!=_&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|e— _&|nU|W j!g*_m5?N& endstream endobj startxref 1340804 %%EOF metafor/tests/0000755000176200001440000000000013150625652013056 5ustar liggesusersmetafor/tests/testthat/0000755000176200001440000000000014060150152014703 5ustar liggesusersmetafor/tests/testthat/test_misc_metan_vs_rma.peto_with_dat.bcg.r0000644000176200001440000000207213465300323025204 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.peto() against metan with 'dat.bcg'") source("tolerances.r") # read in tolerances test_that("results match (FE model, measure='OR').", { data(dat.bcg, package="metafor") ### compare results with: metan tpos tneg cpos cneg, peto nograph or log res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) expect_equivalent(res$beta, -0.4744, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.5541, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3948, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -11.6689, tolerance=.tol[["test"]]) ### 11.67 in Stata expect_equivalent(res$QE, 167.7302, tolerance=.tol[["test"]]) ### compare results with: metan tpos tneg cpos cneg, peto nograph or sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.6222, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.5746, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.6738, tolerance=.tol[["ci"]]) }) metafor/tests/testthat/test_analysis_example_raudenbush1985.r0000644000176200001440000001301714052500405024235 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:raudenbush1985 context("Checking analysis example: raudenbush1985") source("tolerances.r") # read in tolerances ### load data dat <- dat.raudenbush1985 test_that("results are correct for the random-effects model.", { ### random-effects model res <- rma(yi, vi, data=dat, digits=3) ### compare with results on pages 83, 85, and 86 (in text) expect_equivalent(res$tau2, 0.0188, tolerance=.tol[["var"]]) expect_equivalent(coef(res), 0.0837, tolerance=.tol[["coef"]]) expect_equivalent(res$QE, 35.8295, tolerance=.tol[["test"]]) ### 35.85 in paper expect_equivalent(res$zval, 1.6208, tolerance=.tol[["test"]]) ### empirical Bayes estimates tmp <- blup(res) out <- capture.output(print(tmp)) ### so that print.list.rma() is run (at least once) ### compare with results in Figure 2 expect_equivalent(tmp$pred, c(0.0543, 0.1006, -0.0064, 0.2144, 0.1051, -0.0082, 0.0174, -0.0293, 0.1604, 0.2485, 0.1618, 0.1102, 0.0646, 0.1105, -0.0288, 0.0258, 0.1905, 0.0744, 0.0248), tolerance=.tol[["pred"]]) expect_equivalent(tmp$pi.lb, c(-0.1324, -0.1033, -0.2228, -0.0533, -0.1622, -0.1737, -0.1481, -0.2689, -0.0543, 0, -0.097, -0.1303, -0.192, -0.1463, -0.2405, -0.1906, -0.0076, -0.0808, -0.1954), tolerance=.tol[["ci"]]) expect_equivalent(tmp$pi.ub, c(0.2411, 0.3045, 0.21, 0.4821, 0.3724, 0.1572, 0.1828, 0.2102, 0.3751, 0.497, 0.4206, 0.3507, 0.3212, 0.3672, 0.1829, 0.2422, 0.3886, 0.2295, 0.245), tolerance=.tol[["ci"]]) ### empirical Bayes estimates (just the random effects) tmp <- ranef(res) expect_equivalent(tmp$pred, c(-0.0294, 0.0169, -0.0901, 0.1307, 0.0214, -0.0919, -0.0664, -0.1131, 0.0767, 0.1648, 0.0781, 0.0265, -0.0191, 0.0268, -0.1125, -0.0579, 0.1068, -0.0093, -0.0589), tolerance=.tol[["pred"]]) expect_equivalent(tmp$pi.lb, c(-0.2187, -0.1852, -0.3019, -0.122, -0.231, -0.2659, -0.2403, -0.343, -0.1337, -0.0723, -0.1674, -0.2043, -0.2627, -0.217, -0.3207, -0.2697, -0.091, -0.1761, -0.2736), tolerance=.tol[["ci"]]) expect_equivalent(tmp$pi.ub, c(0.1599, 0.219, 0.1216, 0.3834, 0.2738, 0.082, 0.1076, 0.1169, 0.2871, 0.4019, 0.3235, 0.2572, 0.2246, 0.2706, 0.0956, 0.1539, 0.3046, 0.1574, 0.1558), tolerance=.tol[["ci"]]) skip_on_cran() ### profile tau^2 opar <- par(no.readonly=TRUE) profile(res, xlim=c(0,.20), progbar=FALSE) par(opar) ### profile tau^2 (without 'xlim' specified) opar <- par(no.readonly=TRUE) profile(res, progbar=FALSE) par(opar) ### profile tau^2 (with parallel processing) opar <- par(no.readonly=TRUE) profile(res, xlim=c(0,.20), progbar=FALSE, parallel="snow") par(opar) }) test_that("results are correct for the mixed-effects model.", { ### recode weeks variable dat$weeks.c <- ifelse(dat$weeks > 3, 3, dat$weeks) ### mixed-effects model res <- rma(yi, vi, mods = ~ weeks.c, data=dat, digits=3) ### compare with results on pages 90 and 92 (in text) expect_equivalent(res$tau2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(coef(res), c(0.4072, -0.1572), tolerance=.tol[["coef"]]) expect_equivalent(res$QE, 16.5708, tolerance=.tol[["test"]]) ### 16.58 in paper expect_equivalent(res$zval, c(4.6782, -4.3884), tolerance=.tol[["test"]]) ### empirical Bayes estimates tmp <- blup(res) ### (results for this not given in chapter) expect_equivalent(tmp$pred, c(0.0927, -0.0645, -0.0646, 0.4072, 0.4072, -0.0645, -0.0645, -0.0646, 0.4072, 0.2499, 0.4072, 0.4072, 0.2499, 0.0927, -0.0646, -0.0645, 0.2499, 0.0927, -0.0645), tolerance=.tol[["pred"]]) expect_equivalent(tmp$pi.lb, c(0.0198, -0.1552, -0.1552, 0.2366, 0.2366, -0.1552, -0.1552, -0.1552, 0.2366, 0.1391, 0.2366, 0.2366, 0.1391, 0.0198, -0.1552, -0.1552, 0.1391, 0.0198, -0.1552), tolerance=.tol[["ci"]]) expect_equivalent(tmp$pi.ub, c(0.1656, 0.0261, 0.0261, 0.5778, 0.5778, 0.0261, 0.0261, 0.0261, 0.5778, 0.3608, 0.5778, 0.5778, 0.3608, 0.1656, 0.0261, 0.0261, 0.3608, 0.1656, 0.0261), tolerance=.tol[["ci"]]) ### empirical Bayes estimates (just the random effects) tmp <- ranef(res) expect_equivalent(tmp$pred, c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), tolerance=.tol[["pred"]]) expect_equivalent(tmp$pi.lb, c(-0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016, -0.0016), tolerance=.tol[["ci"]]) expect_equivalent(tmp$pi.ub, c(0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016, 0.0016), tolerance=.tol[["ci"]]) ### predicted/fitted values tmp <- predict(res) ### (results for this not given in chapter) expect_equivalent(tmp$pred, c(0.0927, -0.0645, -0.0645, 0.4072, 0.4072, -0.0645, -0.0645, -0.0645, 0.4072, 0.2499, 0.4072, 0.4072, 0.2499, 0.0927, -0.0645, -0.0645, 0.2499, 0.0927, -0.0645), tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, c(0.0198, -0.1552, -0.1552, 0.2366, 0.2366, -0.1552, -0.1552, -0.1552, 0.2366, 0.1391, 0.2366, 0.2366, 0.1391, 0.0198, -0.1552, -0.1552, 0.1391, 0.0198, -0.1552), tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, c(0.1656, 0.0261, 0.0261, 0.5778, 0.5778, 0.0261, 0.0261, 0.0261, 0.5778, 0.3607, 0.5778, 0.5778, 0.3607, 0.1656, 0.0261, 0.0261, 0.3607, 0.1656, 0.0261), tolerance=.tol[["ci"]]) skip_on_cran() ### profile tau^2 opar <- par(no.readonly=TRUE) profile(res, xlim=c(0,.06), progbar=FALSE) par(opar) }) metafor/tests/testthat/test_analysis_example_morris2008.r0000644000176200001440000000736714052500377023416 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:morris2008 context("Checking analysis example: morris2008") source("tolerances.r") # read in tolerances ### create datasets datT <- data.frame( m_pre = c(30.6, 23.5, 0.5, 53.4, 35.6), m_post = c(38.5, 26.8, 0.7, 75.9, 36.0), sd_pre = c(15.0, 3.1, 0.1, 14.5, 4.7), sd_post = c(11.6, 4.1, 0.1, 4.4, 4.6), ni = c(20, 50, 9, 10, 14), ri = c(.47, .64, .77, .89, .44)) datC <- data.frame( m_pre = c(23.1, 24.9, 0.6, 55.7, 34.8), m_post = c(19.7, 25.3, 0.6, 60.7, 33.4), sd_pre = c(13.8, 4.1, 0.2, 17.3, 3.1), sd_post = c(14.8, 3.3, 0.2, 17.9, 6.9), ni = c(20, 42, 9, 11, 14), ri = c(.47, .64, .77, .89, .44)) test_that("calculations of escalc() are correct for measure='SMCR'.", { ### compute standardized mean changes using raw-score standardization datT <- escalc(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datT) datC <- escalc(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datC) ### (results for this not given in paper) expect_equivalent(datT$yi, c( 0.5056, 1.0481, 1.8054, 1.4181, 0.0801), tolerance=.tol[["est"]]) expect_equivalent(datT$vi, c( 0.0594, 0.0254, 0.2322, 0.1225, 0.0802), tolerance=.tol[["var"]]) expect_equivalent(datC$yi, c(-0.2365, 0.0958, 0.0000, 0.2667, -0.4250), tolerance=.tol[["est"]]) expect_equivalent(datC$vi, c( 0.0544, 0.0173, 0.0511, 0.0232, 0.0864), tolerance=.tol[["var"]]) ### compute difference between treatment and control groups dat <- data.frame(yi = datT$yi - datC$yi, vi = datT$vi + datC$vi) ### compare with results on page 382 (Table 5) expect_equivalent(dat$yi, c(0.7421, 0.9524, 1.8054, 1.1514, 0.5050), tolerance=.tol[["est"]]) ### (results for this not given in paper) expect_equivalent(dat$vi, c(0.1138, 0.0426, 0.2833, 0.1458, 0.1667), tolerance=.tol[["var"]]) ### use pooled pretest SDs sd_pool <- sqrt((with(datT, (ni-1)*sd_pre^2) + with(datC, (ni-1)*sd_pre^2)) / (datT$ni + datC$ni - 2)) dat <- data.frame(yi = metafor:::.cmicalc(datT$ni + datC$ni - 2) * (with(datT, m_post - m_pre) - with(datC, m_post - m_pre)) / sd_pool) dat$vi <- 2*(1-datT$ri) * (1/datT$ni + 1/datC$ni) + dat$yi^2 / (2*(datT$ni + datC$ni)) ### compare with results on page 382 (Table 5) expect_equivalent(dat$yi, c(0.7684, 0.8010, 1.2045, 1.0476, 0.4389), tolerance=.tol[["est"]]) ### (results for this not given in paper) expect_equivalent(dat$vi, c(0.1134, 0.0350, 0.1425, 0.0681, 0.1634), tolerance=.tol[["var"]]) }) test_that("calculations of escalc() are correct for measure='SMCC'.", { ### compute standardized mean changes using change-score standardization datT <- escalc(measure="SMCC", m1i=m_post, m2i=m_pre, sd1i=sd_post, sd2i=sd_pre, ni=ni, ri=ri, data=datT) datC <- escalc(measure="SMCC", m1i=m_post, m2i=m_pre, sd1i=sd_post, sd2i=sd_pre, ni=ni, ri=ri, data=datC) ### (results for this not given in paper) expect_equivalent(datT$yi, c( 0.5417, 1.0198, 2.6619, 1.9088, 0.0765), tolerance=.tol[["est"]]) expect_equivalent(datT$vi, c( 0.0573, 0.0304, 0.5048, 0.2822, 0.0716), tolerance=.tol[["var"]]) expect_equivalent(datC$yi, c(-0.2213, 0.1219, 0.0000, 0.5575, -0.2126), tolerance=.tol[["est"]]) expect_equivalent(datC$vi, c( 0.0512, 0.0240, 0.1111, 0.1050, 0.0730), tolerance=.tol[["var"]]) ### compute difference between treatment and control groups dat <- data.frame(yi = datT$yi - datC$yi, vi = datT$vi + datC$vi) ### (results for this not given in paper) expect_equivalent(dat$yi, c(0.7630, 0.8979, 2.6619, 1.3513, 0.2891), tolerance=.tol[["est"]]) expect_equivalent(dat$vi, c(0.1086, 0.0544, 0.6159, 0.3872, 0.1447), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_dersimonian2007.r0000644000176200001440000000535514052500353024377 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:dersimonian2007 source("tolerances.r") # read in tolerances context("Checking analysis example: dersimonian2007") ### data for the CLASP example n1i <- c(156, 303, 565, 1570, 103, 4659) n2i <- c( 74, 303, 477, 1565, 105, 4650) ai <- c( 5, 5, 12, 69, 9, 313) ci <- c( 8, 17, 9, 94, 11, 352) test_that("results are correct for the CLASP example.", { skip_on_cran() ### calculate log(OR)s and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i) ### fit RE model with various tau^2 estimators res.PM <- rma(yi, vi, method="PM", data=dat) res.CA <- rma(yi, vi, method="HE", data=dat) res.DL <- rma(yi, vi, method="DL", data=dat) res.CA2 <- rma(yi, vi, method="GENQ", weights=1/(vi + res.CA$tau2), data=dat) res.DL2 <- rma(yi, vi, method="GENQ", weights=1/(vi + res.DL$tau2), data=dat) res.CA2 <- rma(yi, vi, tau2=res.CA2$tau2, data=dat) res.DL2 <- rma(yi, vi, tau2=res.DL2$tau2, data=dat) res.EB <- rma(yi, vi, method="EB", data=dat) res.ML <- rma(yi, vi, method="ML", data=dat) res.REML <- rma(yi, vi, method="REML", data=dat) res.HS <- rma(yi, vi, method="HS", data=dat) res.SJ <- rma(yi, vi, method="SJ", data=dat) res.SJ2 <- rma(yi, vi, method="SJ", data=dat, control=list(tau2.init=res.CA$tau2)) ### combine results into one long list of fitted models res.all <- list(res.PM, res.CA, res.DL, res.CA2, res.DL2, res.EB, res.ML, res.REML, res.HS, res.SJ, res.SJ2) ### create table with estimate of tau, mu, and standard error results <- rbind( tau = sapply(res.all, function(x) sqrt(x$tau2)), mu = sapply(res.all, coef), se = sapply(res.all, function(x) sqrt(vcov(x)))) colnames(results) <- c("PM", "CA", "DL", "CA2", "DL2", "EB", "ML", "REML", "HS", "SJ", "SJ2") tmp <- t(results) ### compare with results on page 111-112 (Tables 3 and 4) expected <- structure(c( 0.3681, 0.4410, 0.2323, 0.3831, 0.3254, 0.3681, 0.0023, 0.1843, 0.1330, 0.4572, 0.4084, -0.3811, -0.4035, -0.3240, -0.3861, -0.3655, -0.3811, -0.1974, -0.2980, -0.2666, -0.4079, -0.3941, 0.2060, 0.2327, 0.1540, 0.2115, 0.1901, 0.2060, 0.0694, 0.1343, 0.1125, 0.2386, 0.2208), .Dim = c(11L, 3L), .Dimnames = list(c("PM", "CA", "DL", "CA2", "DL2", "EB", "ML", "REML", "HS", "SJ", "SJ2"), c("tau", "mu", "se"))) expect_equivalent(tmp[,1], expected[,1], tolerance=.tol[["var"]]) expect_equivalent(tmp[,2], expected[,2], tolerance=.tol[["coef"]]) expect_equivalent(tmp[,3], expected[,3], tolerance=.tol[["se"]]) }) metafor/tests/testthat/test_misc_rma_vs_lm.r0000644000176200001440000000316013465305760021136 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking tip: rma() results match up with those from lm()") source("tolerances.r") # read in tolerances test_that("results for rma() and lm() match.", { data(dat.molloy2014, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014) res1 <- rma(yi, 0, data=dat) res2 <- lm(yi ~ 1, data=dat) ### coefficients should be the same expect_equivalent(coef(res1), coef(res2)) ### standard errors should be the same expect_equivalent(res1$se, coef(summary(res2))[1,2]) }) test_that("results for rma.mv() and lm() match.", { data(dat.molloy2014, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014) dat$id <- 1:nrow(dat) res1 <- rma.mv(yi, 0, random = ~ 1 | id, data=dat) res2 <- lm(yi ~ 1, data=dat) ### coefficients should be the same expect_equivalent(coef(res1), coef(res2)) ### standard errors should be the same expect_equivalent(res1$se, coef(summary(res2))[1,2]) ### get profile likelihood CI for sigma^2 sav <- confint(res1) expect_equivalent(sav$random[1,2:3], c(.0111, .0474), tolerance=.tol[["var"]]) ### fit with sparse=TRUE res1 <- rma.mv(yi, 0, random = ~ 1 | id, data=dat, sparse=TRUE) ### coefficients should be the same expect_equivalent(coef(res1), coef(res2)) ### standard errors should be the same expect_equivalent(res1$se, coef(summary(res2))[1,2]) ### get profile likelihood CI for sigma^2 sav <- confint(res1) expect_equivalent(sav$random[1,2:3], c(.0111, .0474), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_yusuf1985.r0000644000176200001440000000352714052500434023257 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:yusuf1985 context("Checking analysis example: yusuf1985") source("tolerances.r") # read in tolerances ### create dataset for example dat <- dat.yusuf1985 dat$grp_ratios <- round(dat$n1i / dat$n2i, 2) test_that("log likelihood plot can be drawn.", { skip_on_cran() opar <- par(no.readonly=TRUE) par(mfrow=c(1,2)) expect_warning(llplot(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(table=="6"), drop00=FALSE, lwd=1, xlim=c(-5,5))) expect_warning(llplot(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(table=="6"), drop00=FALSE, lwd=1, xlim=c(-5,5), scale=FALSE)) par(opar) }) test_that("results are correct for the analysis using Peto's method.", { expect_warning(res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(table=="6"))) out <- capture.output(print(res)) ### so that print.rma.peto() is run (at least once) out <- capture.output(print(summary(res))) ### so that print.rma.peto() is run (at least once) with showfit=TRUE sav <- predict(res, transf=exp) tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) ### compare with results on page 107 expect_equivalent(tmp, c(.9332, .7385, 1.1792), tolerance=.tol[["pred"]]) }) test_that("results are correct for the analysis using the inverse-variance method.", { expect_warning(dat <- escalc(measure="PETO", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, subset=(table=="6"), add=0)) expect_warning(res <- rma(yi, vi, data=dat, method="FE")) sav <- predict(res, transf=exp) tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) ### compare with results on page 107 expect_equivalent(tmp, c(.9332, .7385, 1.1792), tolerance=.tol[["pred"]]) }) metafor/tests/testthat/test_plots_plot_of_influence_diagnostics.r0000644000176200001440000000160314052500476025440 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:plot_of_influence_diagnostics context("Checking plots example: plot of influence diagnostics") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### load validity of employment interviews data data(dat.mcdaniel1994, package="metafor") ### fit random-effects model with r-to-z transformed correlations res <- rma(ri=ri, ni=ni, measure="ZCOR", data=dat.mcdaniel1994) ### calculate influence diagnostics inf <- influence(res) out <- capture.output(print(inf)) ### so that print.infl.rma.uni() is run (at least once) ### plot the influence diagnostics plot(inf, layout=c(8,1)) plot(inf, plotinf=FALSE, plotdfbs=TRUE) par(opar) }) metafor/tests/testthat/test_misc_pdfs.r0000644000176200001440000000155513465301705020114 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: pdfs of various measures") source("tolerances.r") # read in tolerances test_that(".dsmd() works correctly.", { d <- metafor:::.dsmd(0.5, n1=15, n2=15, theta=0.8, correct=TRUE) expect_equivalent(d, 0.8208, tolerance=.tol[["den"]]) d <- metafor:::.dsmd(0.5, n1=15, n2=15, theta=0.8, correct=FALSE) expect_equivalent(d, 0.7757, tolerance=.tol[["den"]]) }) test_that(".dcor() works correctly.", { d <- metafor:::.dcor(0.5, n=15, rho=0.8) expect_equivalent(d, 0.2255, tolerance=.tol[["den"]]) }) test_that(".dzcor() works correctly.", { d <- metafor:::.dzcor(0.5, n=15, rho=0.8) expect_equivalent(d, 0.1183, tolerance=.tol[["den"]]) d <- metafor:::.dzcor(0.5, n=15, zrho=transf.rtoz(0.8)) expect_equivalent(d, 0.1183, tolerance=.tol[["den"]]) }) metafor/tests/testthat/test_misc_setlab.r0000644000176200001440000002100213634667161020430 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: .setlab() function") source("tolerances.r") # read in tolerances yi <- c(-.3, -.1, 0, .2, .2) vi <- rep(.02, length(yi)) test_that(".setlab() works correctly together with forest().", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message opar <- par(no.readonly=TRUE) par(mfrow=c(5,3), mar=c(5,6,0,4)) xlim <- c(-3,5) cex.lab <- .5 dat <- escalc(measure="GEN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="RR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="OR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="RD", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="AS", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="PHI", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="YUQ", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="YUY", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="IRR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="IRD", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="IRSD", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="MD", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="SMD", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="ROM", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="CVR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="VR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="RPB", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="COR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="ZCOR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.ztor, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.ztor, header=TRUE) dat <- escalc(measure="PR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="PLN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="PLO", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.ilogit, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.ilogit, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="PAS", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.iarcsin, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.iarcsin, header=TRUE) dat <- escalc(measure="PFT", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.ipft.hm, targs=list(ni=rep(10,length(yi))), header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.ipft.hm, targs=list(ni=rep(10,length(yi))), header=TRUE) dat <- escalc(measure="IR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="IRLN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="IRS", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.isqrt, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.isqrt, header=TRUE) dat <- escalc(measure="IRFT", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="MN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="MNLN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="CVLN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="SDLN", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="MC", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="SMCC", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="ROMC", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=exp, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=exp, header=TRUE) dat <- escalc(measure="ARAW", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="AHW", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.iahw, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.iahw, header=TRUE) dat <- escalc(measure="ABT", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.iabt, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.iabt, header=TRUE) dat <- escalc(measure="PCOR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) dat <- escalc(measure="ZPCOR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, transf=transf.ztor, header=TRUE) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, atransf=transf.ztor, header=TRUE) dat <- escalc(measure="SPCOR", yi=yi, vi=vi) forest(dat$yi, dat$vi, xlim=xlim, cex.lab=cex.lab, header=TRUE) par(opar) }) metafor/tests/testthat/test_misc_dfround.r0000644000176200001440000000116713777147464020640 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: dfround() function") source("tolerances.r") # read in tolerances test_that("dfround() works correctly.", { dat <- as.data.frame(dat.raudenbush1985) dat$yi <- c(dat$yi) dat <- dfround(dat, c(rep(NA,8), 2, 3)) expect_identical(dat$yi, c(0.03, 0.12, -0.14, 1.18, 0.26, -0.06, -0.02, -0.32, 0.27, 0.8, 0.54, 0.18, -0.02, 0.23, -0.18, -0.06, 0.3, 0.07, -0.07)) expect_identical(dat$vi, c(0.016, 0.022, 0.028, 0.139, 0.136, 0.011, 0.011, 0.048, 0.027, 0.063, 0.091, 0.05, 0.084, 0.084, 0.025, 0.028, 0.019, 0.009, 0.03)) }) metafor/tests/testthat/test_analysis_example_viechtbauer2007a.r0000644000176200001440000001265514052500427024534 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:viechtbauer2007a context("Checking analysis example: viechtbauer2007a") source("tolerances.r") # read in tolerances ### load data data(dat.collins1985b, package="metafor") dat <- dat.collins1985b[,1:7] dat <- escalc(measure="OR", ai=pre.xti, n1i=pre.nti, ci=pre.xci, n2i=pre.nci, data=dat) ### note: results are compared with those in Table II on page 44 (but without rounding) test_that("the heterogeneity estimates are correct.", { ### fit model with different tau^2 estimators res.DL <- rma(yi, vi, data=dat, method="DL") res.ML <- rma(yi, vi, data=dat, method="ML") res.REML <- rma(yi, vi, data=dat, method="REML") res.SJ <- rma(yi, vi, data=dat, method="SJ") sav <- c(DL=res.DL$tau2, ML=res.ML$tau2, REML=res.REML$tau2, SJ=res.SJ$tau2) expect_equivalent(sav, c(.2297, .2386, .3008, .4563), tolerance=.tol[["var"]]) }) test_that("CI is correct for the Q-profile method.", { res.DL <- rma(yi, vi, data=dat, method="DL") sav <- confint(res.DL) sav <- c(sav$random["tau^2","ci.lb"], sav$random["tau^2","ci.ub"]) expect_equivalent(sav, c(.0723, 2.2027), tolerance=.tol[["var"]]) }) test_that("CI is correct for the Biggerstaff–Tweedie method.", { res.DL <- rma(yi, vi, data=dat, method="DL") CI.D.func <- function(tau2val, s1, s2, Q, k, lower.tail) { expQ <- (k-1) + s1*tau2val varQ <- 2*(k-1) + 4*s1*tau2val + 2*s2*tau2val^2 shape <- expQ^2/varQ scale <- varQ/expQ qtry <- Q/scale pgamma(qtry, shape = shape, scale = 1, lower.tail = lower.tail) - .025 } wi <- 1/dat$vi s1 <- sum(wi) - sum(wi^2)/sum(wi) s2 <- sum(wi^2) - 2*sum(wi^3)/sum(wi) + sum(wi^2)^2/sum(wi)^2 ci.lb <- uniroot(CI.D.func, interval=c(0,10), s1=s1, s2=s2, Q=res.DL$QE, k=res.DL$k, lower.tail=FALSE)$root ci.ub <- uniroot(CI.D.func, interval=c(0,10), s1=s1, s2=s2, Q=res.DL$QE, k=res.DL$k, lower.tail=TRUE)$root sav <- c(ci.lb=ci.lb, ci.ub=ci.ub) expect_equivalent(sav, c(.0481, 2.3551), tolerance=.tol[["var"]]) }) test_that("CI is correct for the profile likelihood method.", { res.ML <- rma.mv(yi, vi, random = ~ 1 | id, data=dat, method="ML") res.REML <- rma.mv(yi, vi, random = ~ 1 | id, data=dat, method="REML") sav <- confint(res.ML) sav <- c(sav$random["sigma^2","ci.lb"], sav$random["sigma^2","ci.ub"]) expect_equivalent(sav, c(.0266, 1.1308), tolerance=.tol[["var"]]) sav <- confint(res.REML) sav <- c(sav$random["sigma^2","ci.lb"], sav$random["sigma^2","ci.ub"]) expect_equivalent(sav, c(.0427, 1.4747), tolerance=.tol[["var"]]) skip_on_cran() profile(res.ML, xlim=c(0,1.2), steps=50, progbar=FALSE) abline(h=logLik(res.ML) - qchisq(.95,1)/2, lty="dotted") abline(v=c(0.027, 1.131), lty="dotted") profile(res.REML, xlim=c(0,1.2), steps=50, progbar=FALSE) abline(h=logLik(res.REML) - qchisq(.95,1)/2, lty="dotted") abline(v=c(0.043, 1.475), lty="dotted") }) test_that("CI is correct for the Wald-type method.", { res.ML <- rma(yi, vi, data=dat, method="ML") res.REML <- rma(yi, vi, data=dat, method="REML") sav <- c(ci.lb=res.ML$tau2 - 1.96*res.ML$se.tau2, ci.ub=res.ML$tau2 + 1.96*res.ML$se.tau2) expect_equivalent(sav, c(-.1011, .5782), tolerance=.tol[["var"]]) sav <- c(ci.lb=res.REML$tau2 - 1.96*res.REML$se.tau2, ci.ub=res.REML$tau2 + 1.96*res.REML$se.tau2) expect_equivalent(sav, c(-.1306, .7322), tolerance=.tol[["var"]]) }) test_that("CI is correct for the Sidik-Jonkman method.", { res.SJ <- rma(yi, vi, data=dat, method="SJ") sav <- c(ci.lb=(res.SJ$k-1) * res.SJ$tau2 / qchisq(.975, df=res.SJ$k-1), ci.ub=(res.SJ$k-1) * res.SJ$tau2 / qchisq(.025, df=res.SJ$k-1)) expect_equivalent(sav, c(.2082, 1.6748), tolerance=.tol[["var"]]) }) test_that("CI is correct for the parametric bootstrap method.", { skip_on_cran() maj <- as.numeric(R.Version()$major) min <- as.numeric(R.Version()$minor) ### run test only on R versions 3.6.x (due to change in sampler) if (maj >= 3 && min >= 6) { library(boot) boot.func <- function(data.boot) { res <- rma(yi, vi, data=data.boot, method="DL") c(res$tau2, res$se.tau2^2) } data.gen <- function(dat, mle) { data.frame(yi=rnorm(nrow(dat), mle$mu, sqrt(mle$tau2 + dat$vi)), vi=dat$vi) } res.DL <- rma(yi, vi, data=dat, method="DL") set.seed(12345) sav <- boot(dat, boot.func, R=1000, sim="parametric", ran.gen=data.gen, mle=list(mu=coef(res.DL), tau2=res.DL$tau2)) sav <- boot.ci(sav, type=c("norm", "basic", "stud", "perc")) sav <- sav$percent[4:5] expect_equivalent(sav, c(0, .7171), tolerance=.tol[["var"]]) } else { expect_true(TRUE) } }) test_that("CI is correct for the non-parametric bootstrap method.", { skip_on_cran() maj <- as.numeric(R.Version()$major) min <- as.numeric(R.Version()$minor) ### run test only on R versions 3.6.x (due to change in sampler) if (maj >= 3 && min >= 6) { library(boot) boot.func <- function(dat, indices) { res <- rma(yi, vi, data=dat, subset=indices, method="DL") c(res$tau2, res$se.tau2^2) } set.seed(12345) sav <- boot(dat, boot.func, R=1000) sav <- boot.ci(sav) sav <- sav$percent[4:5] expect_equivalent(sav, c(.0218, .5143), tolerance=.tol[["var"]]) } else { expect_true(TRUE) } }) metafor/tests/testthat/test_misc_handling_nas.r0000644000176200001440000002037713470300034021576 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: handling of NAs") source("tolerances.r") # read in tolerances dat <- data.frame(yi = c(NA, 1, 3, 2, 5, 4, 6), vi = c(1, NA, 1, 1, 1, 1, 1), xi = c(0, 1, NA, 3, 4, 5, 6)) test_that("NAs are correctly handled by various method functions for rma.uni() intercept-only models.", { expect_warning(res <- rma(yi, vi, data=dat)) expect_equivalent(res$k, 5) options(na.action = "na.omit") expect_equivalent(fitted(res), c(4, 4, 4, 4, 4)) expect_equivalent(resid(res), c(-1, -2, 1, 0, 2)) expect_equivalent(predict(res)$pred, 4) expect_equivalent(blup(res)$pred, c(3.4, 2.8, 4.6, 4.0, 5.2)) expect_equivalent(cooks.distance(res), c(0.125, 0.5, 0.125, 0, 0.5)) expect_equivalent(dfbetas(res)[[1]], c(-0.3273, -0.8660, 0.3273, 0, 0.8660), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(0.2, 0.2, 0.2, 0.2, 0.2)) expect_equivalent(leave1out(res)$estimate, c(4.25, 4.5, 3.75, 4, 3.5)) expect_equivalent(ranef(res)$pred, c(-0.6, -1.2, 0.6, 0, 1.2)) expect_equivalent(rstandard(res)$resid, c(-1, -2, 1, 0, 2)) expect_equivalent(rstudent(res)$resid, c(-1.25, -2.5, 1.25, 0, 2.5)) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 5) expect_equivalent(weights(res), c(20, 20, 20, 20, 20)) options(na.action = "na.pass") # note: all of these are of the same length as the original data (except for predict(), which gives a single value for intercept-only models) expect_equivalent(fitted(res), c(4, 4, 4, 4, 4, 4, 4)) # note: can compute fitted value even for the study with missing yi and the study with missing vi expect_equivalent(resid(res), c(NA, -3, -1, -2, 1, 0, 2)) # note: can compute residual value even for the study with missing vi expect_equivalent(predict(res)$pred, 4) expect_equivalent(blup(res)$pred, c(NA, NA, 3.4, 2.8, 4.6, 4.0, 5.2)) expect_equivalent(cooks.distance(res), c(NA, NA, 0.125, 0.5, 0.125, 0, 0.5)) expect_equivalent(dfbetas(res)[[1]], c(NA, NA, -0.3273, -0.8660, 0.3273, 0, 0.8660), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(NA, NA, 0.2, 0.2, 0.2, 0.2, 0.2)) expect_equivalent(leave1out(res)$estimate, c(NA, NA, 4.25, 4.5, 3.75, 4, 3.5)) expect_equivalent(ranef(res)$pred, c(NA, NA, -0.6, -1.2, 0.6, 0, 1.2)) expect_equivalent(rstandard(res)$resid, c(NA, NA, -1, -2, 1, 0, 2)) expect_equivalent(rstudent(res)$resid, c(NA, NA, -1.25, -2.5, 1.25, 0, 2.5)) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 7) expect_equivalent(weights(res), c(NA, NA, 20, 20, 20, 20, 20)) options(na.action = "na.exclude") # note: all of these are of the same length as the original data, but are NA for studies 1 and 2 expect_equivalent(fitted(res), c(NA, NA, 4, 4, 4, 4, 4)) # note: all of these are of the same length as the original data, but are NA for studies 1 and 2 expect_equivalent(resid(res), c(NA, NA, -1, -2, 1, 0, 2)) expect_equivalent(predict(res)$pred, 4) expect_equivalent(blup(res)$pred, c(NA, NA, 3.4, 2.8, 4.6, 4.0, 5.2)) expect_equivalent(cooks.distance(res), c(NA, NA, 0.125, 0.5, 0.125, 0, 0.5)) expect_equivalent(dfbetas(res)[[1]], c(NA, NA, -0.3273, -0.8660, 0.3273, 0, 0.8660), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(NA, NA, 0.2, 0.2, 0.2, 0.2, 0.2)) expect_equivalent(leave1out(res)$estimate, c(NA, NA, 4.25, 4.5, 3.75, 4, 3.5)) expect_equivalent(ranef(res)$pred, c(NA, NA, -0.6, -1.2, 0.6, 0, 1.2)) expect_equivalent(rstandard(res)$resid, c(NA, NA, -1, -2, 1, 0, 2)) expect_equivalent(rstudent(res)$resid, c(NA, NA, -1.25, -2.5, 1.25, 0, 2.5)) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 7) expect_equivalent(weights(res), c(NA, NA, 20, 20, 20, 20, 20)) }) test_that("NAs are correctly handled by various method functions for rma.uni() meta-regression models.", { expect_warning(res <- rma(yi, vi, mods = ~ xi, data=dat)) expect_equivalent(res$k, 4) options(na.action = "na.omit") expect_equivalent(fitted(res), c(2.6, 3.7, 4.8, 5.9)) expect_equivalent(resid(res), c(-0.6, 1.3, -0.8, 0.1)) expect_equivalent(predict(res)$pred, c(2.6, 3.7, 4.8, 5.9)) expect_equivalent(blup(res)$pred, c(2.4444, 4.0370, 4.5926, 5.9259), tolerance=.tol[["pred"]]) expect_equivalent(cooks.distance(res), c(2.0741, 0.7664, 0.2902, 0.0576), tolerance=.tol[["inf"]]) expect_equivalent(dfbetas(res)[[2]], c(1.0954, -0.4153, -0.1912, 0.1369), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(0.7, 0.3, 0.3, 0.7)) expect_equivalent(ranef(res)$pred, c(-0.1556, 0.3370, -0.2074, 0.0259), tolerance=.tol[["pred"]]) expect_equivalent(rstandard(res)$resid, c(-0.6, 1.3, -0.8, 0.1)) expect_equivalent(rstudent(res)$resid, c(-2, 1.8571, -1.1429, 0.3333), tolerance=.tol[["pred"]]) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 4) expect_equivalent(weights(res), c(25, 25, 25, 25)) options(na.action = "na.pass") # note: all of these are of the same length as the original data expect_equivalent(fitted(res), c(-0.7, 0.4, NA, 2.6, 3.7, 4.8, 5.9)) # note: can compute fitted value even for the study with missing yi and the study with missing vi expect_equivalent(resid(res), c(NA, 0.6, NA, -0.6, 1.3, -0.8, 0.1)) # note: can compute residual value even for the study with missing vi expect_equivalent(predict(res)$pred, c(-0.7, 0.4, NA, 2.6, 3.7, 4.8, 5.9)) expect_equivalent(blup(res)$pred, c(NA, NA, NA, 2.4444, 4.0370, 4.5926, 5.9259), tolerance=.tol[["pred"]]) expect_equivalent(cooks.distance(res), c(NA, NA, NA, 2.0741, 0.7664, 0.2902, 0.0576), tolerance=.tol[["inf"]]) expect_equivalent(dfbetas(res)[[2]], c(NA, NA, NA, 1.0954, -0.4153, -0.1912, 0.1369), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(NA, NA, NA, 0.7, 0.3, 0.3, 0.7)) expect_equivalent(ranef(res)$pred, c(NA, NA, NA, -0.1556, 0.3370, -0.2074, 0.0259), tolerance=.tol[["pred"]]) expect_equivalent(rstandard(res)$resid, c(NA, NA, NA, -0.6, 1.3, -0.8, 0.1)) expect_equivalent(rstudent(res)$resid, c(NA, NA, NA, -2, 1.8571, -1.1429, 0.3333), tolerance=.tol[["pred"]]) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 7) expect_equivalent(weights(res), c(NA, NA, NA, 25, 25, 25, 25)) options(na.action = "na.exclude") # note: all of these are of the same length as the original data, but are NA for studies 1, 2, and 3 expect_equivalent(fitted(res), c(NA, NA, NA, 2.6, 3.7, 4.8, 5.9)) expect_equivalent(resid(res), c(NA, NA, NA, -0.6, 1.3, -0.8, 0.1)) expect_equivalent(predict(res)$pred, c(NA, NA, NA, 2.6, 3.7, 4.8, 5.9)) expect_equivalent(blup(res)$pred, c(NA, NA, NA, 2.4444, 4.0370, 4.5926, 5.9259), tolerance=.tol[["pred"]]) expect_equivalent(cooks.distance(res), c(NA, NA, NA, 2.0741, 0.7664, 0.2902, 0.0576), tolerance=.tol[["inf"]]) expect_equivalent(dfbetas(res)[[2]], c(NA, NA, NA, 1.0954, -0.4153, -0.1912, 0.1369), tolerance=.tol[["inf"]]) expect_equivalent(hatvalues(res), c(NA, NA, NA, 0.7, 0.3, 0.3, 0.7)) expect_equivalent(ranef(res)$pred, c(NA, NA, NA, -0.1556, 0.3370, -0.2074, 0.0259), tolerance=.tol[["pred"]]) expect_equivalent(rstandard(res)$resid, c(NA, NA, NA, -0.6, 1.3, -0.8, 0.1)) expect_equivalent(rstudent(res)$resid, c(NA, NA, NA, -2, 1.8571, -1.1429, 0.3333), tolerance=.tol[["pred"]]) expect_equivalent(length(simulate(res, seed=1234)[[1]]), 7) expect_equivalent(weights(res), c(NA, NA, NA, 25, 25, 25, 25)) }) test_that("NAs are correctly handled by rma.mv() intercept-only models.", { dat <- get(data(dat.konstantopoulos2011, package="metafor")) res1 <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat) res2 <- rma.mv(yi, vi, random = ~ factor(study) | district, data=dat) expect_equivalent(logLik(res1), logLik(res2), tolerance=.tol[["fit"]]) dat$yi[1:2] <- NA expect_warning(res1 <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat)) expect_warning(res2 <- rma.mv(yi, vi, random = ~ factor(study) | district, data=dat)) expect_equivalent(logLik(res1), logLik(res2), tolerance=.tol[["fit"]]) dat$yi[1:4] <- NA # entire district 11 is missing expect_warning(res1 <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat)) expect_warning(res2 <- rma.mv(yi, vi, random = ~ factor(study) | district, data=dat)) expect_equivalent(logLik(res1), logLik(res2), tolerance=.tol[["fit"]]) }) metafor/tests/testthat/test_misc_to_long_table_wide.r0000644000176200001440000002011513765733035023001 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: to.long() function") source("tolerances.r") # read in tolerances test_that("to.long() works correctly for measure='MD'", { dat <- dat.normand1999 sav <- to.long(measure="MD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) sav <- sav[,c(1,10:13)] expected <- structure(list(study = c(1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L), group = structure(c(2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L), .Label = c("2", "1"), class = "factor"), mean = c(55L, 75L, 27L, 29L, 64L, 119L, 66L, 137L), sd = c(47L, 64L, 7L, 4L, 17L, 29L, 20L, 48L), n = c(155L, 156L, 31L, 32L, 75L, 71L, 18L, 18L)), class = "data.frame", row.names = c(NA, 8L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='MD'", { dat <- dat.normand1999 sav <- to.table(measure="MD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expected <- structure(c(55L, 75L, 47L, 64L, 155L, 156L, 27L, 29L, 7L, 4L, 31L, 32L, 64L, 119L, 17L, 29L, 75L, 71L, 66L, 137L, 20L, 48L, 18L, 18L), .Dim = 2:4, .Dimnames = list(c("Grp1", "Grp2"), c("Mean", "SD", "n"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.long() works correctly for measure='COR'", { dat <- dat.molloy2014 sav <- to.long(measure="COR", ri=ri, ni=ni, data=dat, subset=1:4) sav <- sav[,c(11:13)] expected <- structure(list(study = structure(1:4, .Label = c("1", "2", "3", "4"), class = "factor"), r = c(0.187, 0.162, 0.34, 0.32), n = c(109L, 749L, 55L, 107L)), class = "data.frame", row.names = c(NA, 4L )) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='COR'", { dat <- dat.molloy2014 sav <- to.table(measure="COR", ri=ri, ni=ni, data=dat, subset=1:4) expected <- structure(c(0.187, 109, 0.162, 749, 0.34, 55, 0.32, 107), .Dim = c(1L, 2L, 4L), .Dimnames = list("Grp", c("r", "n"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.long() works correctly for measure='PR'", { dat <- dat.debruin2009 sav <- to.long(measure="PR", xi=xi, ni=ni, data=dat, subset=1:4) sav <- sav[,c(11:13)] expected <- structure(list(study = structure(1:4, .Label = c("1", "2", "3", "4"), class = "factor"), out1 = c(11L, 24L, 179L, 82L), out2 = c(18L, 9L, 147L, 158L)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='PR'", { dat <- dat.debruin2009 sav <- to.table(measure="PR", xi=xi, ni=ni, data=dat, subset=1:4) expected <- structure(c(11, 18, 24, 9, 179, 147, 82, 158), .Dim = c(1, 2, 4), .Dimnames = list("Grp", c("Out1", "Out2"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.long() works correctly for measure='IR'", { dat <- dat.hart1999 sav <- to.long(measure="IR", xi=x1i, ti=t1i, data=dat, subset=1:4) sav <- sav[,c(1,14:15)] expected <- structure(list(trial = 1:4, events = c(9L, 8L, 3L, 6L), ptime = c(413L, 263L, 487L, 237L)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='IR'", { dat <- dat.hart1999 sav <- to.table(measure="IR", xi=x1i, ti=t1i, data=dat, subset=1:4) expected <- structure(c(9, 413, 8, 263, 3, 487, 6, 237), .Dim = c(1, 2, 4), .Dimnames = list("Grp", c("Events", "Person-Time"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.long() works correctly for measure='MN'", { dat <- dat.normand1999 sav <- to.long(measure="MN", mi=m1i, sdi=sd1i, ni=n1i, data=dat, subset=1:4) sav <- sav[,c(1,10:12)] expected <- structure(list(study = 1:4, mean = c(55L, 27L, 64L, 66L), sd = c(47L, 7L, 17L, 20L), n = c(155L, 31L, 75L, 18L)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='MN'", { dat <- dat.normand1999 sav <- to.table(measure="MN", mi=m1i, sdi=sd1i, ni=n1i, data=dat, subset=1:4) expected <- structure(c(55, 47, 155, 27, 7, 31, 64, 17, 75, 66, 20, 18), .Dim = c(1, 3, 4), .Dimnames = list("Grp", c("Mean", "SD", "n"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) ### create dataset (from Morris, 2008) datT <- data.frame( m_pre = c(30.6, 23.5, 0.5, 53.4, 35.6), m_post = c(38.5, 26.8, 0.7, 75.9, 36.0), sd_pre = c(15.0, 3.1, 0.1, 14.5, 4.7), sd_post = c(11.6, 4.1, 0.1, 4.4, 4.6), ni = c(20, 50, 9, 10, 14), ri = c(.47, .64, .77, .89, .44)) test_that("to.long() works correctly for measure='SMCR'", { sav <- to.long(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datT, subset=2:4) sav <- sav[,c(7:12)] expected <- structure(list(study = structure(1:3, .Label = c("2", "3", "4"), class = "factor"), mean1 = c(26.8, 0.7, 75.9), mean2 = c(23.5, 0.5, 53.4), sd1 = c(3.1, 0.1, 14.5), n = c(50, 9, 10), r = c(0.64, 0.77, 0.89)), class = "data.frame", row.names = c(NA, 3L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='SMCR'", { sav <- to.table(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datT, subset=2:4) expected <- structure(c(26.8, 23.5, 3.1, 50, 0.64, 0.7, 0.5, 0.1, 9, 0.77, 75.9, 53.4, 14.5, 10, 0.89), .Dim = c(1, 5, 3), .Dimnames = list("Grp", c("Mean1", "Mean2", "SD1", "n", "r"), c("2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.long() works correctly for measure='ARAW'", { dat <- dat.bonett2010 sav <- to.long(measure="AHW", ai=ai, mi=mi, ni=ni, data=dat, subset=1:4) sav <- sav[,c(1,8:10)] expected <- structure(list(study = 1:4, alpha = c(0.93, 0.91, 0.94, 0.89), m = c(20L, 20L, 20L, 20L), n = c(103L, 64L, 118L, 401L)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(sav, expected) }) test_that("to.table() works correctly for measure='ARAW'", { dat <- dat.bonett2010 sav <- to.table(measure="AHW", ai=ai, mi=mi, ni=ni, data=dat, subset=1:4) expected <- structure(c(0.93, 20, 103, 0.91, 20, 64, 0.94, 20, 118, 0.89, 20, 401), .Dim = c(1, 3, 4), .Dimnames = list("Grp", c("alpha", "m", "n"), c("1", "2", "3", "4"))) expect_equivalent(sav, expected) }) test_that("to.wide() works correctly.", { dat.l <- dat.hasselblad1998 dat.c <- to.wide(dat.l, study="study", grp="trt", ref="no_contact", grpvars=6:7) expect_equivalent(dat.c$xi.1, c(363, 10, 23, 9, 237, 9, 16, 31, 26, 29, 12, 17, 77, 21, 107, 20, 3, 32, 8, 34, 9, 19, 143, 36, 73, 54)) expect_equivalent(dat.c$xi.2, c(75, 9, 9, 2, 58, 0, 20, 3, 1, 11, 11, 6, 79, 18, 64, 12, 9, 7, 5, 20, 0, 8, 95, 15, 78, 69)) expect_equivalent(dat.c$comp, c("in-no", "gr-no", "in-no", "in-no", "in-no", "in-no", "in-se", "in-no", "in-no", "gr-se", "in-se", "in-no", "se-no", "se-no", "in-no", "gr-in", "gr-in", "gr-se", "in-no", "in-no", "gr-no", "se-no", "in-no", "in-no", "in-no", "in-no")) expect_equivalent(dat.c$design, c("in-no", "gr-in-no", "gr-in-no", "in-no", "in-no", "in-no", "in-se", "in-no", "in-no", "gr-in-se", "gr-in-se", "in-no", "se-no", "se-no", "in-no", "gr-in", "gr-in", "gr-se", "in-no", "in-no", "gr-no", "se-no", "in-no", "in-no", "in-no", "in-no")) dat.l$trt <- factor(dat.l$trt, levels=c("no_contact", "ind_counseling", "grp_counseling", "self_help")) dat.c <- to.wide(dat.l, study="study", grp="trt", grpvars=5:7, postfix=c(".T",".C"), minlen=1) expect_equivalent(dat.c$xi.T, c(363, 23, 10, 9, 237, 9, 16, 31, 26, 12, 29, 17, 77, 21, 107, 12, 9, 32, 8, 34, 9, 19, 143, 36, 73, 54)) expect_equivalent(dat.c$xi.C, c(75, 9, 9, 2, 58, 0, 20, 3, 1, 11, 11, 6, 79, 18, 64, 20, 3, 7, 5, 20, 0, 8, 95, 15, 78, 69)) expect_equivalent(dat.c$comp, c("i-n", "i-n", "g-n", "i-n", "i-n", "i-n", "i-s", "i-n", "i-n", "i-s", "g-s", "i-n", "s-n", "s-n", "i-n", "i-g", "i-g", "g-s", "i-n", "i-n", "g-n", "s-n", "i-n", "i-n", "i-n", "i-n")) expect_equivalent(dat.c$design, c("i-n", "i-g-n", "i-g-n", "i-n", "i-n", "i-n", "i-s", "i-n", "i-n", "i-g-s", "i-g-s", "i-n", "s-n", "s-n", "i-n", "i-g", "i-g", "g-s", "i-n", "i-n", "g-n", "s-n", "i-n", "i-n", "i-n", "i-n")) }) metafor/tests/testthat/test_analysis_example_rothman2008.r0000644000176200001440000004350414052500412023532 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:rothman2008 context("Checking analysis example: rothman2008") source("tolerances.r") # read in tolerances ############################################################################ ### create dataset (Table 15-1) dat <- data.frame( age = c("Age <55", "Age 55+"), ai = c(8,22), bi = c(98,76), ci = c(5,16), di = c(115,69), stringsAsFactors=FALSE) test_that("the to.table() function works.", { tmp <- to.table(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", slab=age, rows=c("Tolbutamide", "Placebo"), cols=c("Dead", "Surviving")) expected <- structure(c(8, 5, 98, 115, 22, 16, 76, 69), .Dim = c(2L, 2L, 2L), .Dimnames = list(c("Tolbutamide", "Placebo"), c("Dead", "Surviving"), c("Age <55", "Age 55+"))) ### compare with data in Table 15-1 expect_equivalent(tmp, expected) }) test_that("the to.long() function works.", { tmp <- to.long(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", slab=age) expected <- structure(list(age = c("Age <55", "Age <55", "Age 55+", "Age 55+"), ai = c(8, 8, 22, 22), bi = c(98, 98, 76, 76), ci = c(5, 5, 16, 16), di = c(115, 115, 69, 69), study = structure(c(2L, 2L, 1L, 1L), .Label = c("Age 55+", "Age <55"), class = "factor"), group = structure(c(2L, 1L, 2L, 1L), .Label = c("2", "1"), class = "factor"), out1 = c(8, 5, 22, 16), out2 = c(98, 115, 76, 69)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(tmp, expected) }) test_that("the stratum-specific and crude risk differences are computed correctly.", { ### stratum-specific risk differences tmp <- summary(escalc(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="RD", digits=3, append=FALSE)) tmp <- as.matrix(tmp[1:4]) expected <- structure(c(0.0338, 0.0363, 0.001, 0.0036, 0.0315, 0.0598, 1.0738, 0.6064), .Dim = c(2L, 4L), .Dimnames = list(NULL, c("yi", "vi", "sei", "zi"))) ### compare with data in Table 15-1 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) ### crude risk difference tmp <- summary(escalc(ai=sum(ai), bi=sum(bi), ci=sum(ci), di=sum(di), data=dat, measure="RD", digits=3, append=FALSE)) tmp <- as.matrix(tmp[1:4]) expected <- structure(c(0.0446, 0.0011, 0.0326, 1.3683), .Dim = c(1L, 4L), .Dimnames = list(NULL, c("yi", "vi", "sei", "zi"))) ### compare with data in Table 15-1 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("the stratum-specific and crude risk ratios are computed correctly.", { ### stratum-specific risk ratios tmp <- summary(escalc(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="RR", digits=2), transf=exp, append=FALSE) tmp <- as.matrix(tmp) expected <- structure(c(1.8113, 1.1926, 0.6112, 0.6713, 5.3679, 2.1188), .Dim = 2:3, .Dimnames = list(NULL, c("yi", "ci.lb", "ci.ub"))) ### compare with data in Table 15-1 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) ### crude risk ratio tmp <- summary(escalc(ai=sum(ai), bi=sum(bi), ci=sum(ci), di=sum(di), data=dat, measure="RR", digits=2, append=FALSE), transf=exp) tmp <- as.matrix(tmp) expected <- structure(c(1.4356, 0.851, 2.4216), .Dim = c(1L, 3L), .Dimnames = list(NULL, c("yi", "ci.lb", "ci.ub"))) ### compare with data in Table 15-1 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("results are correct for Mantel-Haenszel method.", { ### Mantel-Haenszel method with risk differences res <- rma.mh(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="RD", digits=3, level=90) out <- capture.output(print(res)) ### so that print.rma.mh() is used expect_equivalent(coef(res), 0.0349, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.0176, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.0874, tolerance=.tol[["ci"]]) ### 0.088 in chapter expect_equivalent(res$QE, 0.0017, tolerance=.tol[["test"]]) ### 0.001 in chapter expect_equivalent(res$QEp, 0.9669, tolerance=.tol[["pval"]]) ### Mantel-Haenszel method with risk ratios res <- rma.mh(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="RR", digits=2, level=90) out <- capture.output(print(res)) ### so that print.rma.mh() is used expect_equivalent(coef(res), 0.2818, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.1442, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.7078, tolerance=.tol[["ci"]]) expect_equivalent(res$QE, 0.4472, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.5037, tolerance=.tol[["pval"]]) tmp <- c(confint(res, transf=exp)$fixed) expect_equivalent(tmp, c(1.3256, 0.8658, 2.0296), tolerance=.tol[["ci"]]) ### Mantel-Haenszel method with odds ratios res <- rma.mh(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", correct=FALSE, digits=2, level=90) out <- capture.output(print(res)) ### so that print.rma.mh() is used expect_equivalent(coef(res), 0.3387, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.1731, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.8505, tolerance=.tol[["ci"]]) expect_equivalent(res$QE, 0.3474, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.5556, tolerance=.tol[["pval"]]) expect_equivalent(res$CO, 1.1976, tolerance=.tol[["test"]]) expect_equivalent(res$COp, 0.2738, tolerance=.tol[["pval"]]) expect_equivalent(res$MH, 1.1914, tolerance=.tol[["test"]]) expect_equivalent(res$MHp, 0.2750, tolerance=.tol[["pval"]]) expect_equivalent(res$TA, 0.3489, tolerance=.tol[["test"]]) expect_equivalent(res$TAp, 0.5547, tolerance=.tol[["pval"]]) tmp <- c(confint(res, transf=exp)$fixed) expect_equivalent(tmp, c(1.4031, 0.8411, 2.3409), tolerance=.tol[["ci"]]) skip_on_cran() ### conditional MLE of the odds ratio res <- rma.glmm(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", model="CM.EL", method="FE") expect_equivalent(coef(res), 0.3381, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.2707, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.9468, tolerance=.tol[["ci"]]) expect_equivalent(res$QE.Wld, 0.3484, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.Wld, 0.5550, tolerance=.tol[["pval"]]) expect_equivalent(res$QE.LRT, 0.3502, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.LRT, 0.5540, tolerance=.tol[["pval"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 1.4022, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.7629, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 2.5774, tolerance=.tol[["ci"]]) }) ############################################################################ ### create dataset (Table 15-2) dat <- data.frame( age = c("35-44", "45-54", "55-64", "65-74", "75-84"), x1i = c(32, 104, 206, 186, 102), t1i = c(52407, 43248, 28612, 12663, 5317) / 10000, x2i = c(2, 12, 28, 28, 31), t2i = c(18790, 10673, 5710, 2585, 1462) / 10000, stringsAsFactors=FALSE) test_that("the to.table() function works.", { tmp <- to.table(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", slab=age, rows=c("Smokers", "Nonsmokers"), cols=c("Deaths", "Years")) expected <- structure(c(32, 2, 5.2407, 1.879, 104, 12, 4.3248, 1.0673, 206, 28, 2.8612, 0.571, 186, 28, 1.2663, 0.2585, 102, 31, 0.5317, 0.1462), .Dim = c(2L, 2L, 5L), .Dimnames = list(c("Smokers", "Nonsmokers"), c("Deaths", "Years"), c("35-44", "45-54", "55-64", "65-74", "75-84"))) ### compare with data in Table 15-2 expect_equivalent(tmp, expected) }) test_that("the to.long() function works.", { tmp <- to.long(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", slab=age) expected <- structure(list(age = c("35-44", "35-44", "45-54", "45-54", "55-64", "55-64", "65-74", "65-74", "75-84", "75-84"), x1i = c(32, 32, 104, 104, 206, 206, 186, 186, 102, 102), t1i = c(5.2407, 5.2407, 4.3248, 4.3248, 2.8612, 2.8612, 1.2663, 1.2663, 0.5317, 0.5317), x2i = c(2, 2, 12, 12, 28, 28, 28, 28, 31, 31), t2i = c(1.879, 1.879, 1.0673, 1.0673, 0.571, 0.571, 0.2585, 0.2585, 0.1462, 0.1462), study = structure(c(1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L), .Label = c("35-44", "45-54", "55-64", "65-74", "75-84"), class = "factor"), group = structure(c(2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L), .Label = c("2", "1"), class = "factor"), events = c(32, 2, 104, 12, 206, 28, 186, 28, 102, 31), ptime = c(5.2407, 1.879, 4.3248, 1.0673, 2.8612, 0.571, 1.2663, 0.2585, 0.5317, 0.1462)), class = "data.frame", row.names = c(NA, 10L)) expect_equivalent(tmp, expected) }) test_that("the stratum-specific and crude rate differences are computed correctly.", { ### stratum-specific rate differences tmp <- summary(escalc(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRD", digits=1, append=FALSE)) tmp <- as.matrix(tmp[1:4]) expected <- structure(c(5.0417, 12.804, 22.961, 38.5674, -20.2008, 1.7316, 16.0947, 111.0423, 535.0172, 1811.1307, 1.3159, 4.0118, 10.5377, 23.1304, 42.5574, 3.8313, 3.1916, 2.1789, 1.6674, -0.4747), .Dim = c(5L, 4L), .Dimnames = list(NULL, c("yi", "vi", "sei", "zi"))) ### compare with data in Table 15-2 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) ### crude rate difference tmp <- summary(escalc(x1i=sum(x1i), x2i=sum(x2i), t1i=sum(t1i), t2i=sum(t2i), data=dat, measure="IRD", digits=1, append=FALSE)) tmp <- as.matrix(tmp[1:4]) expected <- structure(c(18.537, 9.6796, 3.1112, 5.9581), .Dim = c(1L, 4L), .Dimnames = list(NULL, c("yi", "vi", "sei", "zi"))) ### compare with data in Table 15-2 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("the stratum-specific and crude rate ratios are computed correctly.", { ### stratum-specific rate ratios tmp <- summary(escalc(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", digits=1, append=FALSE), transf=exp) tmp <- as.matrix(tmp) expected <- structure(c(5.7366, 2.1388, 1.4682, 1.3561, 0.9047, 1.3748, 1.1767, 0.9894, 0.9115, 0.6053, 23.9371, 3.8876, 2.1789, 2.0176, 1.3524), .Dim = c(5L, 3L), .Dimnames = list(NULL, c("yi", "ci.lb", "ci.ub"))) ### compare with data in Table 15-2 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) ### crude rate ratio tmp <- summary(escalc(x1i=sum(x1i), x2i=sum(x2i), t1i=sum(t1i), t2i=sum(t2i), data=dat, measure="IRR", digits=1, append=FALSE), transf=exp) tmp <- as.matrix(tmp) expected <- structure(c(1.7198, 1.394, 2.1219), .Dim = c(1L, 3L), .Dimnames = list(NULL, c("yi", "ci.lb", "ci.ub"))) ### compare with data in Table 15-2 expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("results are correct for Mantel-Haenszel method.", { ### Mantel-Haenszel method with rate differences res <- rma.mh(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRD", digits=2, level=90) expect_equivalent(coef(res), 11.4392, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 6.3498, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 16.5286, tolerance=.tol[["ci"]]) expect_equivalent(res$QE, 26.8758, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.0000, tolerance=.tol[["pval"]]) ### Mantel-Haenszel method with rate ratios res <- rma.mh(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", digits=2, level=90) expect_equivalent(coef(res), 0.3539, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.1776, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.5303, tolerance=.tol[["ci"]]) expect_equivalent(res$QE, 10.4117, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.0340, tolerance=.tol[["pval"]]) expect_equivalent(res$MH, 10.7021, tolerance=.tol[["test"]]) expect_equivalent(res$MHp, 0.0011, tolerance=.tol[["pval"]]) tmp <- c(confint(res, transf=exp)$fixed) expect_equivalent(tmp, c(1.4247, 1.1944, 1.6994), tolerance=.tol[["ci"]]) ### Mantel-Haenszel test without continuity correction res <- rma.mh(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", level=90, correct=FALSE) expect_equivalent(res$MH, 11.0162, tolerance=.tol[["test"]]) expect_equivalent(res$MHp, 0.0009, tolerance=.tol[["pval"]]) skip_on_cran() ### unconditional MLE of the rate ratio res <- rma.glmm(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", digits=2, level=90, model="UM.FS", method="FE") expect_equivalent(coef(res), 0.3545, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.1779, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.5312, tolerance=.tol[["ci"]]) expect_equivalent(res$QE.Wld, 10.1991, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.Wld, 0.0372, tolerance=.tol[["pval"]]) expect_equivalent(res$QE.LRT, 12.1324, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.LRT, 0.0164, tolerance=.tol[["pval"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 1.4255, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.1947, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 1.7009, tolerance=.tol[["ci"]]) ### conditional MLE of the rate ratio res <- rma.glmm(x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, data=dat, measure="IRR", digits=2, level=90, model="CM.EL", method="FE") expect_equivalent(coef(res), 0.3545, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.1779, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.5312, tolerance=.tol[["ci"]]) expect_equivalent(res$QE.Wld, 10.1991, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.Wld, 0.0372, tolerance=.tol[["pval"]]) expect_equivalent(res$QE.LRT, 12.1324, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.LRT, 0.0164, tolerance=.tol[["pval"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 1.4255, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.1947, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 1.7009, tolerance=.tol[["ci"]]) }) ############################################################################ ### create dataset (Table 15-5) dat <- data.frame( age = c("<35", "35+"), ai = c(3,1), bi = c(9,3), ci = c(104,5), di = c(1059,86), stringsAsFactors=FALSE) test_that("the to.table() function works.", { tmp <- to.table(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", slab=age, rows=c("Down Syndrome", "Control"), cols=c("Spermicide Use", "No Spermicide")) expected <- structure(c(3, 104, 9, 1059, 1, 5, 3, 86), .Dim = c(2L, 2L, 2L), .Dimnames = list(c("Down Syndrome", "Control"), c("Spermicide Use", "No Spermicide"), c("<35", "35+"))) ### compare with data in Table 15-5 expect_equivalent(tmp, expected) }) test_that("the to.long() function works.", { tmp <- to.long(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", slab=age) expected <- structure(list(age = c("<35", "<35", "35+", "35+"), ai = c(3, 3, 1, 1), bi = c(9, 9, 3, 3), ci = c(104, 104, 5, 5), di = c(1059, 1059, 86, 86), study = structure(c(2L, 2L, 1L, 1L), .Label = c("35+", "<35"), class = "factor"), group = structure(c(1L, 2L, 1L, 2L), .Label = c("1", "2"), class = "factor"), out1 = c(3, 104, 1, 5), out2 = c(9, 1059, 3, 86)), class = "data.frame", row.names = c(NA, 4L)) expect_equivalent(tmp, expected) }) test_that("results are correct for Mantel-Haenszel method.", { ### Mantel-Haenszel method with odds ratios res <- rma.mh(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", digits=2, level=90, correct=FALSE) expect_equivalent(coef(res), 1.3300, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3579, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 2.3021, tolerance=.tol[["ci"]]) expect_equivalent(res$QE, 0.1378, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.7105, tolerance=.tol[["pval"]]) expect_equivalent(res$CO, 5.8248, tolerance=.tol[["test"]]) expect_equivalent(res$COp, 0.0158, tolerance=.tol[["pval"]]) expect_equivalent(res$MH, 5.8092, tolerance=.tol[["test"]]) expect_equivalent(res$MHp, 0.0159, tolerance=.tol[["pval"]]) expect_equivalent(res$TA, 0.1391, tolerance=.tol[["test"]]) expect_equivalent(res$TAp, 0.7092, tolerance=.tol[["pval"]]) tmp <- c(confint(res, transf=exp)$fixed) expect_equivalent(tmp, c(3.7812, 1.4304, 9.9954), tolerance=.tol[["ci"]]) skip_on_cran() ### unconditional MLE of the odds ratio res <- rma.glmm(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", digits=2, level=90, model="UM.FS", method="FE") expect_equivalent(coef(res), 1.3318, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3582, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 2.3053, tolerance=.tol[["ci"]]) expect_equivalent(res$QE.Wld, 0.1374, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.Wld, 0.7109, tolerance=.tol[["pval"]]) expect_equivalent(res$QE.LRT, 0.1324, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.LRT, 0.7160, tolerance=.tol[["pval"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 3.7878, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.4308, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 10.0276, tolerance=.tol[["ci"]]) ### conditional MLE of the odds ratio res <- rma.glmm(ai=ai, bi=bi, ci=ci, di=di, data=dat, measure="OR", digits=2, level=90, model="CM.EL", method="FE") expect_equivalent(coef(res), 1.3257, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3551, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 2.2962, tolerance=.tol[["ci"]]) expect_equivalent(res$QE.Wld, 0.1237, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.Wld, 0.7250, tolerance=.tol[["pval"]]) expect_equivalent(res$QE.LRT, 0.1188, tolerance=.tol[["test"]]) expect_equivalent(res$QEp.LRT, 0.7304, tolerance=.tol[["pval"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 3.7647, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.4264, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 9.9361, tolerance=.tol[["ci"]]) }) ############################################################################ metafor/tests/testthat/test_misc_list_rma.r0000644000176200001440000000447513465317513021002 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: head.list.rma() and tail.list.rma() functions") source("tolerances.r") # read in tolerances test_that("head.list.rma() works correctly.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) res <- head(rstandard(res), 4) sav <- structure(list(resid = c(-0.1748, -0.8709, -0.6335, -0.727), se = c(0.7788, 0.6896, 0.8344, 0.5486), z = c(-0.2244, -1.2629, -0.7592, -1.3253), slab = 1:4, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4)), class = "list.rma") expect_equivalent(res, sav, tolerance=.tol[["misc"]]) }) test_that("tail.list.rma() works correctly.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) res <- tail(rstandard(res), 4) sav <- structure(list(resid = c(-0.6568, 0.3752, 1.1604, 0.6972), se = c(0.5949, 0.5416, 0.9019, 0.5936), z = c(-1.104, 0.6927, 1.2867, 1.1746 ), slab = 10:13, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4)), class = "list.rma") expect_equivalent(res, sav, tolerance=.tol[["misc"]]) }) test_that("as.data.frame.list.rma() works correctly.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat, data=dat) res <- predict(res) res <- as.data.frame(res) res <- res[1:3,1:2] sav <- structure(list(pred = c(-1.029, -1.3491, -0.9708), se = c(0.1404, 0.2011, 0.1315)), .Names = c("pred", "se"), row.names = c(NA, 3L), class = "data.frame") expect_equivalent(res, sav, tolerance=.tol[["misc"]]) }) test_that("as.matrix.list.rma() works correctly.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat, data=dat) res <- predict(res) res <- as.matrix(res) res <- res[1:3,1:2] sav <- structure(c(-1.029, -1.3491, -0.9708, 0.1404, 0.2011, 0.1315), .Dim = c(3L, 2L), .Dimnames = list(c("1", "2", "3"), c("pred", "se"))) expect_equivalent(res, sav, tolerance=.tol[["misc"]]) }) metafor/tests/testthat/test_misc_plot_rma.r0000644000176200001440000000171013465302555020772 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: plot() function") source("tolerances.r") # read in tolerances test_that("plot can be drawn for rma().", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) plot(res) res <- rma(yi ~ ablat, vi, data=dat) plot(res) }) test_that("plot can be drawn for rma.mh().", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message data(dat.bcg, package="metafor") res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) plot(res) }) test_that("plot can be drawn for rma.peto().", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message data(dat.bcg, package="metafor") res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) plot(res) }) metafor/tests/testthat/test_misc_fitstats.r0000644000176200001440000000711013465262202021010 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: computations of fit statistics") source("tolerances.r") # read in tolerances test_that("fit statistics are correct for rma.uni().", { ### load BCG dataset data(dat.bcg, package="metafor") ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random- and mixed-effects models (with ML estimation) res1 <- rma(yi, vi, data=dat, method="ML") res2 <- rma(yi ~ ablat, vi, data=dat, method="ML") tmp <- c(logLik(res1)) expect_equivalent(tmp, -12.6651, tolerance=.tol[["fit"]]) expect_equivalent(tmp, sum(dnorm(dat$yi, coef(res1), sqrt(dat$vi+res1$tau2), log=TRUE)), tolerance=.tol[["fit"]]) tmp <- deviance(res1) expect_equivalent(tmp, 37.1160, tolerance=.tol[["fit"]]) expect_equivalent(tmp, -2 * (sum(dnorm(dat$yi, coef(res1), sqrt(dat$vi+res1$tau2), log=TRUE)) - sum(dnorm(dat$yi, dat$yi, sqrt(dat$vi), log=TRUE))), tolerance=.tol[["fit"]]) tmp <- AIC(res1) expect_equivalent(tmp, 29.3302, tolerance=.tol[["fit"]]) expect_equivalent(tmp, -2 * sum(dnorm(dat$yi, coef(res1), sqrt(dat$vi+res1$tau2), log=TRUE)) + 2*2, tolerance=.tol[["fit"]]) tmp <- AIC(res1, res2) expect_equivalent(tmp, structure(list(df = c(2, 3), AIC = c(29.3302, 21.3713)), .Names = c("df", "AIC"), row.names = c("res1", "res2"), class = "data.frame"), tolerance=.tol[["fit"]]) tmp <- BIC(res1) expect_equivalent(tmp, 30.4601, tolerance=.tol[["fit"]]) expect_equivalent(tmp, -2 * sum(dnorm(dat$yi, coef(res1), sqrt(dat$vi+res1$tau2), log=TRUE)) + 2*log(res1$k), tolerance=.tol[["fit"]]) tmp <- BIC(res1, res2) expect_equivalent(tmp, structure(list(df = c(2, 3), BIC = c(30.4601, 23.0662)), .Names = c("df", "BIC"), row.names = c("res1", "res2"), class = "data.frame"), tolerance=.tol[["fit"]]) tmp <- c(fitstats(res1)) expect_equivalent(tmp, c(-12.6651, 37.1160, 29.3302, 30.4601, 30.5302), tolerance=.tol[["fit"]]) tmp <- fitstats(res1, res2) expect_equivalent(tmp, structure(list(res1 = c(-12.6651, 37.116, 29.3302, 30.4601, 30.5302), res2 = c(-7.6857, 27.1572, 21.3713, 23.0662, 24.038)), .Names = c("res1", "res2"), row.names = c("logLik:", "deviance:", "AIC:", "BIC:", "AICc:"), class = "data.frame"), tolerance=.tol[["fit"]]) tmp <- nobs(res1) expect_equivalent(tmp, 13) tmp <- df.residual(res1) expect_equivalent(tmp, 12) }) test_that("fit statistics are correct for rma.mv().", { ### load data dat <- dat.berkey1998 ### construct variance-covariance matrix of the observed outcomes V <- bldiag(lapply(split(dat[,c("v1i", "v2i")], dat$trial), as.matrix)) ### multiple outcomes random-effects model (with ML estimation) res <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") tmp <- c(logLik(res)) expect_equivalent(tmp, 5.8407, tolerance=.tol[["fit"]]) tmp <- deviance(res) expect_equivalent(tmp, 25.6621, tolerance=.tol[["fit"]]) tmp <- AIC(res) expect_equivalent(tmp, -1.6813, tolerance=.tol[["fit"]]) expect_equivalent(tmp, -2 * c(logLik(res)) + 2*5, tolerance=.tol[["fit"]]) tmp <- BIC(res) expect_equivalent(tmp, -0.1684, tolerance=.tol[["fit"]]) expect_equivalent(tmp, -2 * c(logLik(res)) + 5*log(res$k), tolerance=.tol[["fit"]]) tmp <- c(fitstats(res)) expect_equivalent(tmp, c(5.8407, 25.6621, -1.6813, -0.1684, 13.3187), tolerance=.tol[["fit"]]) tmp <- nobs(res) expect_equivalent(tmp, 10) tmp <- df.residual(res) expect_equivalent(tmp, 8) }) metafor/tests/testthat/test_misc_pub_bias.r0000644000176200001440000000270113675405412020741 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: regtest() and ranktest() functions") source("tolerances.r") # read in tolerances test_that("regtest() works correctly for 'rma.uni' objects.", { dat <- dat.egger2001 dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) res <- rma(yi, vi, data=dat) sav <- regtest(res) expect_equivalent(sav$zval, -4.6686, tolerance=.tol[["test"]]) out <- capture.output(print(sav)) ### so that print.regtest.rma() is run (at least once) sav <- regtest(dat$yi, dat$vi) expect_equivalent(sav$zval, -4.6686, tolerance=.tol[["test"]]) sav <- regtest(res, model="lm", predictor="sqrtninv") expect_equivalent(sav$zval, -5.6083, tolerance=.tol[["test"]]) sav <- regtest(dat$yi, dat$vi, model="lm", predictor="sqrtninv") expect_equivalent(sav$zval, -5.6083, tolerance=.tol[["test"]]) }) test_that("ranktest() works correctly for 'rma.uni' objects.", { dat <- dat.egger2001 dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) res <- rma(yi, vi, data=dat) sav <- ranktest(res) expect_equivalent(sav$tau, 0.15) expect_equivalent(sav$pval, 0.4503, tolerance=.tol[["pval"]]) sav <- ranktest(dat$yi, dat$vi) expect_equivalent(sav$tau, 0.15) expect_equivalent(sav$pval, 0.4503, tolerance=.tol[["pval"]]) out <- capture.output(print(sav)) ### so that print.ranktest.rma() is run (at least once) }) metafor/tests/testthat/test_misc_transf.r0000644000176200001440000000366713466272353020472 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: transformation functions") source("tolerances.r") # read in tolerances test_that("transformations work correctly.", { expect_equivalent(transf.rtoz(.5), 0.549306, tolerance=.tol[["est"]]) expect_equivalent(transf.ztor(transf.rtoz(.5)), .5) expect_equivalent(transf.logit(.1), -2.197225, tolerance=.tol[["est"]]) expect_equivalent(transf.ilogit(transf.logit(.1)), .1) expect_equivalent(transf.arcsin(.1), 0.321751, tolerance=.tol[["est"]]) expect_equivalent(transf.iarcsin(transf.arcsin(.1)), .1) expect_equivalent(transf.pft(.1,10), 0.373394, tolerance=.tol[["est"]]) expect_equivalent(transf.ipft(transf.pft(.1,10), 10), .1) expect_equivalent(transf.ipft.hm(transf.pft(.1,10), targs=list(ni=c(10))), .1) expect_equivalent(transf.isqrt(.1), 0.01) expect_equivalent(transf.irft(.1,10), 0.381721, tolerance=.tol[["est"]]) expect_equivalent(transf.iirft(transf.irft(.1,10), 10), .1) expect_equivalent(transf.ahw(.9), 0.535841, tolerance=.tol[["est"]]) expect_equivalent(transf.iahw(transf.ahw(.9)), .9) expect_equivalent(transf.abt(.9), 2.302585, tolerance=.tol[["est"]]) expect_equivalent(transf.iabt(transf.abt(.9)), .9) expect_equivalent(transf.ztor.int(transf.rtoz(.5), targs=list(tau2=0)), .5) expect_equivalent(transf.ztor.int(transf.rtoz(.5), targs=list(tau2=0.1)), 0.46663, tolerance=.tol[["est"]]) expect_equivalent(transf.exp.int(log(.5), targs=list(tau2=0)), .5) expect_equivalent(transf.exp.int(log(.5), targs=list(tau2=0.1)), 0.525635, tolerance=.tol[["est"]]) expect_equivalent(transf.exp.int(log(.5), targs=list(tau2=0.1, lower=-10, upper=10)), exp(log(.5) + 0.1/2), tolerance=.tol[["est"]]) expect_equivalent(transf.ilogit.int(transf.logit(.1), targs=list(tau2=0)), .1) expect_equivalent(transf.ilogit.int(transf.logit(.1), targs=list(tau2=0.1)), 0.103591, tolerance=.tol[["est"]]) }) metafor/tests/testthat/test_misc_predict.r0000644000176200001440000000545014024365531020607 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: predict() function") source("tolerances.r") # read in tolerances test_that("predict() correctly matches named vectors in 'newmods'", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat$alloc[dat$alloc == "systematic"] <- "system" res <- rma(yi ~ ablat + alloc, vi, data=dat) pred1 <- predict(res, newmods = c(30, 0, 1)) pred2 <- predict(res, newmods = c(abl = 30, ran = 0, sys = 1)) pred3 <- predict(res, newmods = c(abl = 30, sys = 1, ran = 0)) pred4 <- predict(res, newmods = c(ran = 0, abl = 30, sys = 1)) pred5 <- predict(res, newmods = c(sys = 1, abl = 30, ran = 0)) pred6 <- predict(res, newmods = c(ran = 0, sys = 1, abl = 30)) pred7 <- predict(res, newmods = c(sys = 1, ran = 0, abl = 30)) expect_equivalent(pred1, pred2) expect_equivalent(pred1, pred3) expect_equivalent(pred1, pred4) expect_equivalent(pred1, pred5) expect_equivalent(pred1, pred6) expect_equivalent(pred1, pred7) expect_error(predict(res, newmods = c(30, 0))) # not the right length expect_error(predict(res, newmods = c(30, 0, 0, 0))) # not the right length expect_error(predict(res, newmods = c(abl = 30, random = 0))) # not the right length expect_error(predict(res, newmods = c(abl = 30, alloc = 0, sys = 1))) # alloc matches up equally to allocrandom and allocsystem expect_error(predict(res, newmods = c(abl = 30, ran = 0, year = 1970))) # year not in the model expect_error(predict(res, newmods = c(abl = 30, ran = 0, sys = 1, ran = 1))) # ran used twice expect_error(predict(res, newmods = c(abl = 30, ran = 0, sys = 1, rand = 1))) # same issue res <- rma(yi ~ ablat * year, vi, data=dat) pred1 <- predict(res, newmods = c(30, 1970, 30*1970)) pred2 <- predict(res, newmods = c('ablat' = 30, 'year' = 1970, 'ablat:year' = 30*1970)) pred3 <- predict(res, newmods = c('ablat:year' = 30*1970, 'year' = 1970, 'ablat' = 30)) pred4 <- predict(res, newmods = c('ab' = 30, 'ye' = 1970, 'ablat:' = 30*1970)) pred5 <- predict(res, newmods = c('ablat:' = 30*1970, 'ye' = 1970, 'ab' = 30)) expect_equivalent(pred1, pred2) expect_equivalent(pred1, pred3) expect_equivalent(pred1, pred4) expect_equivalent(pred1, pred5) }) test_that("predict() gives correct results when vcov=TRUE", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) sav <- predict(res, vcov=TRUE) expect_equivalent(sav$pred$se, c(sqrt(sav$vcov)), tolerance=.tol[["se"]]) res <- rma(yi, vi, mods = ~ ablat, data=dat) sav <- predict(res, vcov=TRUE) expect_equivalent(sav$pred$se, c(sqrt(diag(sav$vcov))), tolerance=.tol[["se"]]) }) metafor/tests/testthat/test_analysis_example_konstantopoulos2011.r0000644000176200001440000002167014052500370025341 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:konstantopoulos2011 context("Checking analysis example: konstantopoulos2011") source("tolerances.r") # read in tolerances dat <- dat.konstantopoulos2011 test_that("results are correct for the two-level random-effects model fitted with rma().", { res <- rma(yi, vi, data=dat) ### compare with results on page 70 (Table 4) expect_equivalent(coef(res), 0.1279, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.0439, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.0884, tolerance=.tol[["var"]]) expect_equivalent(res$se.tau2, 0.0202, tolerance=.tol[["sevar"]]) ### CI for tau^2 based on the Q-profile method (CI in paper is based on a Satterthwaite approximation) tmp <- confint(res, digits=3) out <- capture.output(print(tmp)) ### so that print.confint.rma() is run (at least once) expect_equivalent(tmp$random[1,2], 0.0564, tolerance=.tol[["var"]]) expect_equivalent(tmp$random[1,3], 0.1388, tolerance=.tol[["var"]]) }) test_that("results are correct for the two-level mixed-effects model fitted with rma().", { res <- rma(yi, vi, mods = ~ I(year-mean(year)), data=dat) ### compare with results on page 70 (Table 4) expect_equivalent(coef(res), c(0.1258, 0.0052), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.0440, 0.0044), tolerance=.tol[["se"]]) ### 0.043 in paper expect_equivalent(res$tau2, 0.0889, tolerance=.tol[["var"]]) ### 0.088 in paper expect_equivalent(res$se.tau2, 0.0205, tolerance=.tol[["sevar"]]) ### CI for tau^2 based on the Q-profile method (CI in paper is based on a Satterthwaite approximation) tmp <- confint(res, digits=3) expect_equivalent(tmp$random[1,2], 0.0560, tolerance=.tol[["var"]]) expect_equivalent(tmp$random[1,3], 0.1376, tolerance=.tol[["var"]]) }) test_that("results are correct for the two-level random-effects model fitted with rma.mv().", { res <- rma.mv(yi, vi, random = ~ 1 | study, data=dat) ### compare with results on page 70 (Table 4) expect_equivalent(coef(res), 0.1279, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.0439, tolerance=.tol[["se"]]) expect_equivalent(res$sigma2, 0.0884, tolerance=.tol[["var"]]) }) test_that("results are correct for the three-level random-effects model fitted with rma.mv() using ML estimation.", { ### three-level model (ml = multilevel parameterization) res.ml <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat, method="ML") out <- capture.output(print(res.ml)) out <- capture.output(print(summary(res.ml))) ### compare with results on page 71 (Table 5) expect_equivalent(coef(res.ml), 0.1845, tolerance=.tol[["coef"]]) expect_equivalent(res.ml$se, 0.0805, tolerance=.tol[["se"]]) expect_equivalent(res.ml$sigma2, c(0.0577, 0.0329), tolerance=.tol[["var"]]) sav <- predict(res.ml) expect_equivalent(c(sav$pi.lb, sav$pi.ub), c(-0.4262, 0.7951), tolerance=.tol[["pred"]]) }) test_that("results are correct for the three-level mixed-effects model fitted with rma.mv() using ML estimation.", { ### three-level model (multilevel parameterization) res.ml <- rma.mv(yi, vi, mods = ~ I(year-mean(year)), random = ~ 1 | district/study, data=dat, method="ML") out <- capture.output(print(res.ml)) ### compare with results on page 71 (Table 5) expect_equivalent(coef(res.ml), c(0.1780, 0.0051), tolerance=.tol[["coef"]]) ### intercept is given as 0.183 in paper, but this seems to be a misprint expect_equivalent(res.ml$se, c(0.0805, 0.0085), tolerance=.tol[["se"]]) expect_equivalent(res.ml$sigma2, c(0.0565, 0.0329), tolerance=.tol[["var"]]) }) test_that("results are correct for the three-level random-effects model fitted with rma.mv() using REML estimation.", { ### three-level model (ml = multilevel parameterization) res.ml <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat) out <- capture.output(print(res.ml)) ### (results for this not given in paper) expect_equivalent(coef(res.ml), 0.1847, tolerance=.tol[["coef"]]) expect_equivalent(res.ml$se, 0.0846, tolerance=.tol[["se"]]) expect_equivalent(res.ml$sigma2, c(0.0651, 0.0327), tolerance=.tol[["var"]]) ### ICC expect_equivalent(res.ml$sigma2[1] / sum(res.ml$sigma2), 0.6653, tolerance=.tol[["cor"]]) ### total amount of heterogeneity expect_equivalent(sum(res.ml$sigma2), 0.0978, tolerance=.tol[["var"]]) ### log likelihood expect_equivalent(c(logLik(res.ml)), -7.9587, tolerance=.tol[["fit"]]) }) test_that("profiling works for the three-level random-effects model (multilevel parameterization).", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() ### three-level model (ml = multilevel parameterization) res.ml <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat) ### profile variance components opar <- par(no.readonly=TRUE) par(mfrow=c(2,1)) sav <- profile(res.ml, progbar=FALSE) out <- capture.output(print(sav)) par(opar) }) test_that("results are correct for the three-level random-effects model when using the multivariate parameterization.", { ### three-level model (mv = multivariate parameterization) res.mv <- rma.mv(yi, vi, random = ~ factor(study) | district, data=dat) ### (results for this not given in paper) expect_equivalent(coef(res.mv), 0.1847, tolerance=.tol[["coef"]]) expect_equivalent(res.mv$se, 0.0846, tolerance=.tol[["se"]]) expect_equivalent(res.mv$tau2, 0.0978, tolerance=.tol[["var"]]) expect_equivalent(res.mv$rho, 0.6653, tolerance=.tol[["cor"]]) ### log likelihood expect_equivalent(c(logLik(res.mv)), -7.9587, tolerance=.tol[["fit"]]) }) test_that("profiling works for the three-level random-effects model (multivariate parameterization).", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() ### three-level model (mv = multivariate parameterization) res.mv <- rma.mv(yi, vi, random = ~ factor(study) | district, data=dat) ### profile variance components opar <- par(no.readonly=TRUE) par(mfrow=c(2,1)) #profile(res.mv, progbar=FALSE) profile(res.mv, progbar=FALSE, parallel="snow") par(opar) }) test_that("BLUPs are calculated correctly for the three-level random-effects model (multilevel parameterization).", { skip_on_cran() ### three-level model (ml = multilevel parameterization) res.ml <- rma.mv(yi, vi, random = ~ 1 | district/study, data=dat) sav <- ranef(res.ml) expect_equivalent(sav[[1]]$intrcpt, c(-0.190, -0.085, 0.141, 0.241, -0.107, -0.237, 0.534, -0.200, 0.057, -0.142, -0.012), tolerance=.tol[["pred"]]) expect_equivalent(sav[[1]]$se, c( 0.167, 0.124, 0.137, 0.119, 0.119, 0.101, 0.130, 0.101, 0.111, 0.125, 0.150), tolerance=.tol[["se"]]) expect_equivalent(sav[[2]]$intrcpt, c(-0.038, -0.047, 0.044, -0.055, 0.021, -0.252, 0.062, 0.127, 0.073, 0.024, -0.026, -0.165, 0.200, -0.058, 0.144, 0.002, -0.031, 0.098, -0.122, -0.080, 0.033, 0.033, -0.136, 0.007, -0.151, 0.103, 0.043, 0.084, -0.023, -0.031, -0.287, 0.195, 0.361, -0.053, -0.033, 0.006, 0.035, -0.014, 0.015, 0.025, -0.082, 0.198, 0.313, -0.032, -0.19, -0.137, -0.123, -0.289, 0.337, -0.038, 0.118, -0.2, -0.014, 0.125, -0.044, -0.073), tolerance=.tol[["pred"]]) expect_equivalent(sav[[2]]$se, c( 0.164, 0.164, 0.166, 0.166, 0.122, 0.122, 0.123, 0.132, 0.137, 0.146, 0.129, 0.126, 0.103, 0.103, 0.109, 0.125, 0.109, 0.105, 0.103, 0.118, 0.115, 0.117, 0.121, 0.118, 0.119, 0.089, 0.092, 0.092, 0.092, 0.092, 0.127, 0.123, 0.122, 0.069, 0.069, 0.069, 0.069, 0.069, 0.069, 0.069, 0.069, 0.107, 0.109, 0.107, 0.106, 0.113, 0.113, 0.137, 0.137, 0.136, 0.136, 0.136, 0.159, 0.158, 0.155, 0.155), tolerance=.tol[["se"]]) }) test_that("results are correct when allowing for different tau^2 per district.", { skip_on_cran() ### shuffle up dat to make sure that this does not affect things set.seed(1234) dat <- dat[sample(nrow(dat)),] res <- rma.mv(yi, vi, random = list(~ 1 | district, ~ factor(district) | study), struct="DIAG", data=dat, control=list(optimizer="optim")) out <- capture.output(print(res, digits=4)) out <- capture.output(print(summary(res, digits=4))) expect_equivalent(coef(res), 0.1270, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.0588, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, c(0.0000, 0.0402, 0.0000, 0.0582, 0.0082, 0.0000, 0.5380, 0.0008, 0.0606, 0.1803, 0.0000), tolerance=.tol[["var"]]) ### check that output is also correct tau2 <- as.numeric(substr(out[grep("tau", out)], 13, 18)) expect_equivalent(res$tau2, c(0.0000, 0.0402, 0.0000, 0.0582, 0.0082, 0.0000, 0.5380, 0.0008, 0.0606, 0.1803, 0.0000), tolerance=.tol[["var"]]) k.lvl <- as.numeric(substr(out[grep("tau", out)], 32, 33)) expect_equivalent(k.lvl, c(4, 4, 3, 4, 4, 11, 3, 8, 6, 5, 4)) level <- as.numeric(substr(out[grep("tau", out)], 45, 47)) expect_equivalent(level, c(11, 12, 18, 27, 56, 58, 71, 86, 91, 108, 644)) }) metafor/tests/testthat/test_misc_matreg.r0000644000176200001440000000633413777126363020452 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: matreg() function") source("tolerances.r") # read in tolerances test_that("matreg() works correctly for 'dat.craft2003'.", { dat <- dat.craft2003 ### construct dataset and var-cov matrix of the correlations tmp <- rcalc(ri ~ var1 + var2 | study, ni=ni, data=dat) V <- tmp$V dat <- tmp$dat out <- capture.output(print(tmp)) sav <- structure(list(study = c("1", "1", "1", "1", "1", "1"), var1 = c("acog", "asom", "conf", "acog", "acog", "asom"), var2 = c("perf", "perf", "perf", "asom", "conf", "conf"), var1.var2 = c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf"), yi = c(-0.55, -0.48, 0.66, 0.47, -0.38, -0.46), ni = c(142L, 142L, 142L, 142L, 142L, 142L)), row.names = c(NA, 6L), class = "data.frame") expect_equivalent(dat[1:6,], sav, tolerance=.tol[["coef"]]) sav <- structure(c(0.00345039893617021, 0.00132651489361702, -0.000554579787234042, -0.00139678475177305, 0.00250189539007092, 0.000932237234042553, 0.00132651489361702, 0.00420059687943262, -0.000952140709219857, -0.00194335914893617, 0.00126485617021277, 0.00251607829787234, -0.000554579787234042, -0.000952140709219857, 0.00225920113475177, 0.00057910914893617, -0.00153379787234043, -0.00106924595744681, -0.00139678475177305, -0.00194335914893617, 0.00057910914893617, 0.00430494191489362, -0.00180268914893617, -0.00120505595744681, 0.00250189539007092, 0.00126485617021277, -0.00153379787234043, -0.00180268914893617, 0.00519185361702128, 0.00188440468085106, 0.000932237234042553, 0.00251607829787234, -0.00106924595744681, -0.00120505595744681, 0.00188440468085106, 0.00440833021276596), .Dim = c(6L, 6L), .Dimnames = list(c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf"), c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf"))) expect_equivalent(V[1:6,1:6], sav, tolerance=.tol[["var"]]) ### turn var1.var2 into a factor with the desired order of levels dat$var1.var2 <- factor(dat$var1.var2, levels=c("acog.perf", "asom.perf", "conf.perf", "acog.asom", "acog.conf", "asom.conf")) ### multivariate random-effects model expect_warning(res <- rma.mv(yi, V, mods = ~ var1.var2 - 1, random = ~ var1.var2 | study, struct="UN", data=dat)) ### restructure estimated mean correlations into a 4x4 matrix R <- matrix(NA, nrow=4, ncol=4) R[lower.tri(R)] <- coef(res) rownames(R) <- colnames(R) <- c("perf", "acog", "asom", "conf") ### fit regression model with 'perf' as outcome and 'acog', 'asom', and 'conf' as predictors fit <- matreg(1, 2:4, R=R, V=vcov(res)) out <- capture.output(print(fit)) sav <- structure(list(estimate = c(0.14817903234559, -0.0536342615587582, 0.363679177420187), se = c(0.156551433378687, 0.0768472434859867, 0.0909539697381244), zval = c(0.946519805967891, -0.697933447262015, 3.99849702511387), pval = c(0.343883525131896, 0.485218815885662, 0.0000637459821320369), ci.lb = c(-0.158656138804758, -0.204252091102472, 0.185412672482517), ci.ub = c(0.455014203495939, 0.0969835679849561, 0.541945682357857)), class = "data.frame", row.names = c("acog", "asom", "conf")) expect_equivalent(fit$tab, sav, tolerance=.tol[["misc"]]) }) metafor/tests/testthat/test_analysis_example_berkey1998.r0000644000176200001440000000664214052500346023374 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:berkey1998 source("tolerances.r") # read in tolerances context("Checking analysis example: berkey1998") ### load data dat <- dat.berkey1998 ### construct variance-covariance matrix of the observed outcomes V <- bldiag(lapply(split(dat[,c("v1i", "v2i")], dat$trial), as.matrix)) test_that("results are correct for the multiple outcomes random-effects model.", { ### multiple outcomes random-effects model (with ML estimation) res <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") out <- capture.output(print(res)) ### so that print.rma.mv() is run (at least once) ### (results for this model not given in paper) expect_equivalent(coef(res), c(-0.3379, 0.3448), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.0798, 0.0495), tolerance=.tol[["se"]]) expect_equivalent(res$tau2, c(0.0261, 0.0070), tolerance=.tol[["var"]]) expect_equivalent(res$rho, 0.6992, tolerance=.tol[["cor"]]) }) test_that("results are correct for the multiple outcomes mixed-effects (meta-regression) model.", { ### multiple outcomes mixed-effects (meta-regression) model (with ML estimation) res <- rma.mv(yi, V, mods = ~ outcome + outcome:I(year - 1983) - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") ### compare with results on page 2545 (Table II) expect_equivalent(coef(res), c(-0.3351, 0.3479, -0.0108, 0.0010), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.0787, 0.0520, 0.0243, 0.0154), tolerance=.tol[["se"]]) expect_equivalent(res$tau2, c(0.0250, 0.0080), tolerance=.tol[["var"]]) expect_equivalent(res$rho, 0.6587, tolerance=.tol[["cor"]]) ### compute the covariance tmp <- res$rho*sqrt(res$tau2[1]*res$tau2[2]) expect_equivalent(tmp, 0.0093, tolerance=.tol[["cov"]]) ### test the difference in slopes res <- rma.mv(yi, V, mods = ~ outcome*I(year - 1983) - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") ### (results for this model not given in paper) expect_equivalent(coef(res), c(-0.3351, 0.3479, -0.0108, 0.0118), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.0787, 0.0520, 0.0243, 0.0199), tolerance=.tol[["se"]]) expect_equivalent(res$pval, c(0.0000, 0.0000, 0.6563, 0.5534), tolerance=.tol[["pval"]]) }) test_that("results are correct when testing var-cov structures against each other with LRTs.", { ### test whether the amount of heterogeneity is the same in the two outcomes res1 <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") res0 <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="CS", data=dat, method="ML") tmp <- anova(res0, res1) out <- capture.output(print(tmp)) ### so that print.anova.rma() is run (at least once) ### (results for this not given in paper) expect_equivalent(tmp$pval, 0.2597, tolerance=.tol[["pval"]]) ### test the correlation among the true effects res1 <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML") res0 <- rma.mv(yi, V, mods = ~ outcome - 1, random = ~ outcome | trial, struct="UN", data=dat, method="ML", rho=0) tmp <- anova(res0, res1) ### (results for this not given in paper) expect_equivalent(tmp$pval, 0.2452, tolerance=.tol[["pval"]]) }) metafor/tests/testthat/test_plots_caterpillar_plot.r0000644000176200001440000000301714052500444022713 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:caterpillar_plot context("Checking plots example: Caterpillar plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() ### simulate some data set.seed(5132) k <- 250 vi <- rchisq(k, df=1) * .03 yi <- rnorm(k, rnorm(k, 0.5, 0.4), sqrt(vi)) ### fit RE model res <- rma(yi, vi) opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(5,1,1,1)) ### create plot forest(yi, vi, xlim=c(-2.5,3.5), ### adjust horizontal plot region limits order=yi, ### order by size of yi slab=NA, annotate=FALSE, ### remove study labels and annotations efac=0, ### remove vertical bars at end of CIs pch=19, ### changing point symbol to filled circle col="gray40", ### change color of points/CIs psize=2, ### increase point size cex.lab=1, cex.axis=1, ### increase size of x-axis title/labels lty=c("solid","blank")) ### remove horizontal line at top of plot ### draw points one more time to make them easier to see points(sort(yi), k:1, pch=19, cex=0.5) ### add summary polygon at bottom and text addpoly(res, mlab="", annotate=FALSE, cex=1) text(-2, -2, "RE Model", pos=4, offset=0, cex=1) par(opar) }) metafor/tests/testthat/test_plots_normal_qq_plots.r0000644000176200001440000000437214052500470022571 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:normal_qq_plots context("Checking plots example: normal QQ plots") test_that("plot can be drawn for 'rma.uni' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### set up 2x2 array for plotting par(mfrow=c(2,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit fixed- and random-effects models res1 <- rma(yi, vi, data=dat, method="FE") res2 <- rma(yi, vi, data=dat) ### fit fixed- and random-effects models with absolute latitude moderator res3 <- rma(yi, vi, mods=~ablat, data=dat, method="FE") res4 <- rma(yi, vi, mods=~ablat, data=dat) ### normal QQ plots for the various models qqnorm(res1, main="Fixed-Effects Model") qqnorm(res2, main="Random-Effects Model") qqnorm(res3, main="Fixed-Effects with Moderators Model") qqnorm(res4, main="Mixed-Effects Model") par(opar) ### draw plot with studentized residuals and labels qqnorm(res2, type="rstudent", label=TRUE) }) test_that("plot can be drawn for 'rma.mh' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) data(dat.bcg, package="metafor") res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) qqnorm(res) qqnorm(res, type="rstudent", label=TRUE) par(opar) }) test_that("plot can be drawn for 'rma.peto' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) data(dat.bcg, package="metafor") res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) qqnorm(res) qqnorm(res, type="rstudent", label=TRUE) par(opar) }) test_that("plot cannot be drawn for 'rma.mv' object.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat) expect_error(qqnorm(res)) }) metafor/tests/testthat/test_misc_funnel.r0000644000176200001440000000357413465263227020460 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: funnel() functions") source("tolerances.r") # read in tolerances test_that("funnel() works correctly.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() ### simulate a large meta-analytic dataset (correlations with rho = 0.0) ### with no heterogeneity or publication bias; then try out different ### versions of the funnel plot gencor <- function(rhoi, ni) { x1 <- rnorm(ni, mean=0, sd=1) x2 <- rnorm(ni, mean=0, sd=1) x3 <- rhoi*x1 + sqrt(1-rhoi^2)*x2 cor(x1, x3) } set.seed(78123) k <- 200 ### number of studies to simulate ni <- round(rchisq(k, df=2) * 20 + 20) ### simulate sample sizes (skewed distribution) ri <- mapply(gencor, rep(0.0,k), ni) ### simulate correlations dat <- escalc(measure="ZCOR", ri=ri, ni=ni) ### compute r-to-z transformed correlations res <- rma(yi, vi, data=dat, method="FE") opar <- par(no.readonly=TRUE) par(mfrow=c(5,2), mar=c(5,4,1,1), cex=.5) funnel(res, yaxis="sei") funnel(res, yaxis="vi") funnel(res, yaxis="seinv") funnel(res, yaxis="vinv") funnel(res, yaxis="ni") funnel(res, yaxis="ninv") funnel(res, yaxis="sqrtni") funnel(res, yaxis="sqrtninv") funnel(res, yaxis="lni") funnel(res, yaxis="wi") par(opar) opar <- par(no.readonly=TRUE) par(mfrow=c(5,2), mar=c(5,4,1,1), cex=.5) funnel(dat$yi, dat$vi, yaxis="sei") funnel(dat$yi, dat$vi, yaxis="vi") funnel(dat$yi, dat$vi, yaxis="seinv") funnel(dat$yi, dat$vi, yaxis="vinv") funnel(dat$yi, dat$vi, yaxis="ni") funnel(dat$yi, dat$vi, yaxis="ninv") funnel(dat$yi, dat$vi, yaxis="sqrtni") funnel(dat$yi, dat$vi, yaxis="sqrtninv") funnel(dat$yi, dat$vi, yaxis="lni") funnel(dat$yi, dat$vi, yaxis="wi") par(opar) }) metafor/tests/testthat/test_misc_reporter.r0000644000176200001440000000065614056237056021027 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: reporter() function") source("tolerances.r") # read in tolerances test_that("reporter() works correctly for 'rma.uni' objects.", { skip_on_cran() data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) reporter(res, open=FALSE) }) metafor/tests/testthat/test_plots_forest_plot_with_subgroups.r0000644000176200001440000000753714052500452025071 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:forest_plot_with_subgroups context("Checking plots example: forest plot with subgroups") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(4,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### fit random-effects model (use slab argument to define study labels) res <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR", slab=paste(author, year, sep=", "), method="REML") ### set up forest plot (with 2x2 table counts added; rows argument is used ### to specify exactly in which rows the outcomes will be plotted) forest(res, xlim=c(-16, 6), at=log(c(.05, .25, 1, 4)), atransf=exp, ilab=cbind(dat.bcg$tpos, dat.bcg$tneg, dat.bcg$cpos, dat.bcg$cneg), ilab.xpos=c(-9.5,-8,-6,-4.5), cex=.75, ylim=c(-1, 27), order=dat.bcg$alloc, rows=c(3:4,9:15,20:23), xlab="Risk Ratio", mlab="", psize=1, header="Author(s) and Year") ### add text with Q-value, dfs, p-value, and I^2 statistic text(-16, -1, pos=4, cex=0.75, bquote(paste("RE Model for All Studies (Q = ", .(formatC(res$QE, digits=2, format="f")), ", df = ", .(res$k - res$p), ", p = ", .(formatC(res$QEp, digits=2, format="f")), "; ", I^2, " = ", .(formatC(res$I2, digits=1, format="f")), "%)"))) ### set font expansion factor (as in forest() above) and use bold italic ### font and save original settings in object 'op' op <- par(cex=.75, font=4) ### add text for the subgroups text(-16, c(24,16,5), pos=4, c("Systematic Allocation", "Random Allocation", "Alternate Allocation")) ### switch to bold font par(font=2) ### add column headings to the plot text(c(-9.5,-8,-6,-4.5), 26, c("TB+", "TB-", "TB+", "TB-")) text(c(-8.75,-5.25), 27, c("Vaccinated", "Control")) ### set par back to the original settings par(op) ### fit random-effects model in the three subgroups res.s <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR", subset=(alloc=="systematic"), method="REML") res.r <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR", subset=(alloc=="random"), method="REML") res.a <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR", subset=(alloc=="alternate"), method="REML") ### add summary polygons for the three subgroups addpoly(res.s, row=18.5, cex=0.75, atransf=exp, mlab="") addpoly(res.r, row= 7.5, cex=0.75, atransf=exp, mlab="") addpoly(res.a, row= 1.5, cex=0.75, atransf=exp, mlab="") ### add text with Q-value, dfs, p-value, and I^2 statistic for subgroups text(-16, 18.5, pos=4, cex=0.75, bquote(paste("RE Model for Subgroup (Q = ", .(formatC(res.s$QE, digits=2, format="f")), ", df = ", .(res.s$k - res.s$p), ", p = ", .(formatC(res.s$QEp, digits=2, format="f")), "; ", I^2, " = ", .(formatC(res.s$I2, digits=1, format="f")), "%)"))) text(-16, 7.5, pos=4, cex=0.75, bquote(paste("RE Model for Subgroup (Q = ", .(formatC(res.r$QE, digits=2, format="f")), ", df = ", .(res.r$k - res.r$p), ", p = ", .(formatC(res.r$QEp, digits=2, format="f")), "; ", I^2, " = ", .(formatC(res.r$I2, digits=1, format="f")), "%)"))) text(-16, 1.5, pos=4, cex=0.75, bquote(paste("RE Model for Subgroup (Q = ", .(formatC(res.a$QE, digits=2, format="f")), ", df = ", .(res.a$k - res.a$p), ", p = ", .(formatC(res.a$QEp, digits=2, format="f")), "; ", I^2, " = ", .(formatC(res.a$I2, digits=1, format="f")), "%)"))) par(opar) }) metafor/tests/testthat/test_misc_rma_error_handling.r0000644000176200001440000000131213465303533023004 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: proper handling of errors in rma()") source("tolerances.r") # read in tolerances test_that("rma() handles NAs correctly.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) dat$yi[1] <- NA dat$yi[2] <- NA expect_warning(res <- rma(yi, vi, data=dat, digits=3)) expect_equivalent(res$k, 11) expect_equivalent(res$k.f, 13) expect_equivalent(length(res$yi), 11) expect_equivalent(length(res$yi.f), 13) expect_equivalent(res$not.na, rep(c(FALSE,TRUE),times=c(2,11))) dat$ablat[3] <- NA ### TODO: complete this ... }) metafor/tests/testthat/test_misc_replmiss.r0000644000176200001440000000067013675407642021025 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: replmiss() function") source("tolerances.r") # read in tolerances test_that("replmiss() works correctly.", { var1 <- c(1:4,NA,6,NA,8:10) var2 <- as.numeric(1:10) expect_identical(replmiss(var1, 0), c(1, 2, 3, 4, 0, 6, 0, 8, 9, 10)) expect_identical(replmiss(var1, var2), as.numeric(1:10)) expect_error(replmiss(var1, 1:9)) }) metafor/tests/testthat/test_analysis_example_vanhouwelingen1993.r0000644000176200001440000000620614052500417025132 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:vanhouwelingen1993 context("Checking analysis example: vanhouwelingen1993") source("tolerances.r") # read in tolerances ### load data dat <- dat.collins1985a test_that("the log likelihood plot can be created.", { skip_on_cran() opar <- par(no.readonly=TRUE) expect_warning(llplot(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat, xlim=c(-4,4), lwd=1, col="black", refline=NA, drop00=FALSE)) par(opar) }) test_that("results of the fixed-effects conditional logistic model are correct.", { skip_on_cran() expect_warning(res <- rma.glmm(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat, model="CM.EL", method="FE")) ### compare with results on page 2275 (in text) expect_equivalent(coef(res), 0.1216, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.0993, tolerance=.tol[["se"]]) expect_equivalent(res$ci.lb, -0.0730, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.3162, tolerance=.tol[["ci"]]) ### 0.31 in paper (rounded a bit more heavily, so 32-bit and 64-bit versions give same result) expect_equivalent(c(logLik(res)), -53.6789, tolerance=.tol[["fit"]]) ### run with control(dnchgcalc="dnoncenhypergeom") expect_warning(res <- rma.glmm(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat, model="CM.EL", method="FE", control=list(dnchgcalc="dnoncenhypergeom"))) ### some very minor discrepancies expect_equivalent(coef(res), 0.1216, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.0996, tolerance=.tol[["se"]]) expect_equivalent(res$ci.lb, -0.0735, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.3167, tolerance=.tol[["ci"]]) expect_equivalent(c(logLik(res)), -53.6789, tolerance=.tol[["fit"]]) }) test_that("results of the random-effects conditional logistic model are correct.", { skip_on_cran() expect_warning(res <- rma.glmm(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat, model="CM.EL", method="ML")) ### compare with results on page 2277 (in text) expect_equivalent(coef(res), 0.1745, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.1360, tolerance=.tol[["se"]]) expect_equivalent(res$ci.lb, -0.0919, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.4410, tolerance=.tol[["ci"]]) expect_equivalent(c(logLik(res)), -52.9890, tolerance=.tol[["fit"]]) expect_equivalent(res$tau2, 0.1195, tolerance=.tol[["var"]]) ### run with control(dnchgcalc="dnoncenhypergeom") expect_warning(res <- rma.glmm(measure="OR", ai=b.xci, n1i=nci, ci=b.xti, n2i=nti, data=dat, model="CM.EL", method="ML", control=list(dnchgcalc="dnoncenhypergeom"))) ### no discrepancies expect_equivalent(coef(res), 0.1745, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.1360, tolerance=.tol[["se"]]) expect_equivalent(res$ci.lb, -0.0921, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 0.4411, tolerance=.tol[["ci"]]) expect_equivalent(c(logLik(res)), -52.9890, tolerance=.tol[["fit"]]) expect_equivalent(res$tau2, 0.1195, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_misc_escalc.r0000644000176200001440000003656113701710333020411 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: escalc() function") source("tolerances.r") # read in tolerances test_that("escalc() works correctly for measure='RR'", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) expect_equivalent(dat$yi[1], -0.8893, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.3256, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='PHI/YUQ/YUY/RTET/PBIT/OR2D/OR2DN'", { ### see Table 13.4 (p. 242) in the Handbook of Research Synthesis and Meta-Analysis dat <- escalc(measure="PHI", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.1309, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.0789, tolerance=.tol[["var"]]) dat <- escalc(measure="YUQ", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.3846, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.1901, tolerance=.tol[["var"]]) dat <- escalc(measure="YUY", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.2000, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.1071, tolerance=.tol[["var"]]) dat <- escalc(measure="RTET", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.2603, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.1423, tolerance=.tol[["var"]]) dat <- escalc(measure="PBIT", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.4399, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.2456, tolerance=.tol[["var"]]) dat <- escalc(measure="OR2D", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.4471, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.2460, tolerance=.tol[["var"]]) dat <- escalc(measure="OR2DN", ai=135, bi=15, ci=40, di=10) sav <- summary(dat) expect_equivalent(sav$yi, 0.4915, tolerance=.tol[["est"]]) expect_equivalent(sav$sei, 0.2704, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='SMD/SMDH/ROM'", { dat <- dat.normand1999 sav <- escalc(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(sav$yi, c(-0.3552, -0.3479, -2.3176, -1.8880), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c( 0.0131, 0.0645, 0.0458, 0.1606), tolerance=.tol[["var"]]) sav <- escalc(measure="SMDH", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(sav$yi, c(-0.3553, -0.3465, -2.3018, -1.8880), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c( 0.0131, 0.0657, 0.0509, 0.1874), tolerance=.tol[["var"]]) sav <- escalc(measure="ROM", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(sav$yi, c(-0.3102, -0.0715, -0.6202, -0.7303), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c( 0.0094, 0.0028, 0.0018, 0.0119), tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='CVR/VR'", { dat <- dat.normand1999 dat <- escalc(measure="CVR", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1) expect_equivalent(dat$yi[1], 0.0014, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.0159, tolerance=.tol[["var"]]) dat <- dat.normand1999 dat <- escalc(measure="VR", sd1i=sd1i, n1i=n1i, sd2i=sd2i, n2i=n2i, data=dat, subset=1) expect_equivalent(dat$yi[1], -0.3087, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.0065, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='RPB/RBIS'", { x <- c(20, 31, 18, 22, 30, 16, 28, 24, 23, 27, 1, 4, 8, 15, 9, 11, 11, 6, 8, 4) y <- c(3, 3, 4, 5, 6, 4, 7, 6, 5, 4, 3, 5, 1, 5, 2, 4, 6, 4, 2, 4) xb <- ifelse(x > median(x), 1, 0) sav <- escalc(measure="RPB", m1i=mean(y[xb==1]), sd1i=sd(y[xb==1]), n1i=sum(xb==1), m2i=mean(y[xb==0]), sd2i=sd(y[xb==0]), n2i=sum(xb==0)) expect_equivalent(sav$yi, 0.3685, tolerance=.tol[["est"]]) expect_equivalent(sav$vi, 0.0384, tolerance=.tol[["var"]]) sav <- escalc(measure="RBIS", m1i=mean(y[xb==1]), sd1i=sd(y[xb==1]), n1i=sum(xb==1), m2i=mean(y[xb==0]), sd2i=sd(y[xb==0]), n2i=sum(xb==0)) expect_equivalent(sav$yi, 0.4619, tolerance=.tol[["est"]]) expect_equivalent(sav$vi, 0.0570, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='D2ORL/D2ORN'", { dat <- dat.gibson2002 sav <- escalc(measure="D2ORL", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(sav$yi, c(-0.4315, -0.9285, 0.5932, -0.1890), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c( 0.1276, 0.0493, 0.3204, 0.0690), tolerance=.tol[["var"]]) sav <- escalc(measure="D2ORN", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(sav$yi, c(-0.3925, -0.8447, 0.5397, -0.1719), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c( 0.1056, 0.0408, 0.2651, 0.0571), tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='COR/UCOR/ZCOR'", { dat <- dat.mcdaniel1994 sav <- escalc(measure="COR", ri=ri, ni=ni, data=dat, subset=c(1,13,33,102)) expect_equivalent(sav$yi, c(0.0000, 0.6200, 0.9900, -0.1300), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c(0.0082, 0.0271, 0.0001, 0.0242), tolerance=.tol[["var"]]) sav <- escalc(measure="UCOR", ri=ri, ni=ni, data=dat, subset=c(1,13,33,102)) expect_equivalent(sav$yi, c(0.0000, 0.6363, 0.9925, -0.1317), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c(0.0082, 0.0253, 0.0000, 0.0241), tolerance=.tol[["var"]]) sav <- escalc(measure="UCOR", ri=ri, ni=ni, data=dat, vtype="UB", subset=c(1,13,33,102)) expect_equivalent(sav$vi, c(0.0084, 0.0283, 0.0000, 0.0261), tolerance=.tol[["var"]]) sav <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat, subset=c(1,13,33,102)) expect_equivalent(sav$yi, c(0.0000, 0.7250, 2.6467, -0.1307), tolerance=.tol[["est"]]) expect_equivalent(sav$vi, c(0.0083, 0.0833, 0.3333, 0.0263), tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='PCOR/ZPCOR/SPCOR'", { ### data from Aloe and Thompson (2013) dat <- data.frame(ti = c(4.61, 6.19, 4.07, -0.77, 1.16), ni = c(218, 232, 156, 382, 259), mi = c(4, 7, 6, 19, 15), r2i = c(.240, .455, .500, .327, .117)) dat <- escalc(measure="PCOR", ti=ti, ni=ni, mi=mi, data=dat) expect_equivalent(dat$yi[1], 0.3012, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.0039, tolerance=.tol[["var"]]) dat <- escalc(measure="ZPCOR", ti=ti, ni=ni, mi=mi, data=dat) expect_equivalent(dat$yi[1], 0.3108, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.0047, tolerance=.tol[["var"]]) dat <- escalc(measure="SPCOR", ti=ti, ni=ni, mi=mi, r2i=r2i, data=dat) expect_equivalent(dat$yi[1], 0.2754, tolerance=.tol[["est"]]) expect_equivalent(dat$vi[1], 0.0033, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MC/SMCRH'", { dat <- escalc(measure="MC", m1i=26, m2i=22, sd1i=sqrt(30), sd2i=sqrt(20), ni=60, ri=0.7) expect_equivalent(dat$yi, 4.0000, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.2618, tolerance=.tol[["var"]]) dat <- escalc(measure="SMCRH", m1i=26, m2i=22, sd1i=sqrt(30), sd2i=sqrt(20), ni=60, ri=0.7) expect_equivalent(dat$yi, 0.7210, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0129, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='PAS'", { dat <- escalc(measure="PAS", xi=10, ni=20) expect_equivalent(dat$yi, 0.7854, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0125, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='IRS/IRFT'", { dat <- escalc(measure="IRS", xi=10, ti=20) expect_equivalent(dat$yi, 0.7071, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0125, tolerance=.tol[["var"]]) dat <- escalc(measure="IRFT", xi=10, ti=20) expect_equivalent(dat$yi, 0.7244, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0125, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='ROMC'", { dat <- escalc(measure="ROMC", m1i=26, m2i=22, sd1i=sqrt(30), sd2i=sqrt(20), ni=60, ri=0.7) expect_equivalent(dat$yi, 0.1671, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0004, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MPRD'", { dat <- escalc(measure="MPRD", ai=20, bi=10, ci=5, di=20) expect_equivalent(dat$yi, 0.0909, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0048, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MPRR'", { dat <- escalc(measure="MPRR", ai=20, bi=10, ci=5, di=20) expect_equivalent(dat$yi, 0.1823, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0200, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MPOR'", { dat <- escalc(measure="MPOR", ai=20, bi=10, ci=5, di=20) expect_equivalent(dat$yi, 0.3646, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0782, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MPORC'", { dat <- escalc(measure="MPORC", ai=20, bi=10, ci=5, di=20) expect_equivalent(dat$yi, 0.6931, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.3000, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MPPETO'", { dat <- escalc(measure="MPPETO", ai=20, bi=10, ci=5, di=20) expect_equivalent(dat$yi, 0.6667, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.2667, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='IRSD'", { dat <- escalc(measure="IRSD", x1i=10, x2i=6, t1i=20, t2i=20) expect_equivalent(dat$yi, 0.1594, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0250, tolerance=.tol[["var"]]) }) test_that("escalc() works correctly for measure='MNLN/CVLN/SDLN'", { dat <- escalc(measure="MNLN", mi=10, sdi=2, ni=20) expect_equivalent(dat$yi, 2.3026, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0020, tolerance=.tol[["var"]]) dat <- escalc(measure="CVLN", mi=10, sdi=2, ni=20) expect_equivalent(dat$yi, -1.5831, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0283, tolerance=.tol[["var"]]) dat <- escalc(measure="SDLN", sdi=2, ni=20) expect_equivalent(dat$yi, 0.7195, tolerance=.tol[["est"]]) expect_equivalent(dat$vi, 0.0263, tolerance=.tol[["var"]]) }) test_that("'var.names' argument works correctly for 'escalc' objects.", { dat <- dat.bcg dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y1","v1"), slab=paste0(author, ", ", year)) dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y2","v2"), slab=paste0(author, ", ", year)) expect_identical(tail(names(dat), 4), c("y1","v1","y2","v2")) expect_identical(attributes(dat)$yi.names, c("y2","y1")) expect_identical(attributes(dat)$vi.names, c("v2","v1")) expect_identical(attr(dat$y1, "measure"), "RR") expect_identical(attr(dat$y2, "measure"), "OR") }) test_that("`[`, cbind(), and rbind() work correctly for 'escalc' objects.", { dat <- dat.bcg dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y1","v1"), slab=paste0(author, ", ", year)) dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y2","v2"), slab=paste0(author, ", ", year)) dat <- cbind(dat[,1:9], dat[,c(12:13,10:11)]) expect_identical(tail(names(dat), 4), c("y2","v2","y1","v1")) expect_identical(attributes(dat)$yi.names, c("y2","y1")) expect_identical(attributes(dat)$vi.names, c("v2","v1")) expect_identical(attr(dat$y1, "measure"), "RR") expect_identical(attr(dat$y2, "measure"), "OR") dat <- rbind(dat[13,], dat[1:12,]) expect_equivalent(attr(dat$y2, "ni"), rowSums(dat[,c("tpos", "tneg", "cpos", "cneg")])) expect_identical(attr(dat$y2, "slab"), paste0(dat$author, ", ", dat$year)) dat <- dat.bcg dat1 <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y1","v1"), slab=paste0(author, ", ", year)) dat2 <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y1","v1"), slab=paste0(author, ", ", year)) dat1 <- dat1[1:4,] dat2 <- dat2[4:1,] dat <- rbind(dat1, dat2) expect_equivalent(attr(dat$y1, "ni"), rowSums(dat[,c("tpos", "tneg", "cpos", "cneg")])) attr(dat1$y1, "ni") <- NULL dat <- rbind(dat1, dat2) expect_null(attr(dat$y1, "ni")) }) test_that("summary() of 'escalc' objects works correctly with the 'out.names' argument.", { dat <- dat.bcg dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y1","v1"), slab=paste0(author, ", ", year)) dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, var.names=c("y2","v2"), slab=paste0(author, ", ", year)) dat <- summary(dat, var.names=c("y1","v1"), out.names=c("sei1","zi1","pval1","ci.lb1","ci.ub1")) dat <- summary(dat, var.names=c("y2","v2"), out.names=c("sei2","zi2","pval2","ci.lb2","ci.ub2")) expect_equivalent(with(dat, c(zi1[1], sei1[1], ci.lb1[1], ci.ub1[1])), c(-1.5586, 0.5706, -2.0077, 0.2290), tolerance=.tol[["est"]]) expect_equivalent(with(dat, c(zi2[1], sei2[1], ci.lb2[1], ci.ub2[1])), c(-1.5708, 0.5976, -2.1100, 0.2326), tolerance=.tol[["est"]]) dat <- dat[,1:11] expect_identical(attr(dat, "yi.names"), "y1") expect_identical(attr(dat, "vi.names"), "v1") }) test_that("'subset' and 'include' arguments work correctly in 'escalc'.", { all <- dat.bcg all$tpos[1] <- NA dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=all, subset=1:4) expect_equivalent(c(dat$yi), c(NA, -1.5854, -1.3481, -1.4416), tolerance=.tol[["est"]]) dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=all, subset=1:4, include=1:3) expect_equivalent(c(dat$yi), c(NA, -1.5854, -1.3481, NA), tolerance=.tol[["est"]]) expect_identical(attributes(dat$yi)$ni, c(NA, 609L, 451L, NA)) dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=all, subset=1:4, include=1:3, add.measure=TRUE) expect_identical(dat$measure, c("", "RR", "RR", "")) attributes(dat$yi)$ni[3] <- 1L dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, include=3:4, add.measure=TRUE) expect_equivalent(c(dat$yi), c(NA, -1.5854, -1.3863, -1.4564), tolerance=.tol[["est"]]) expect_identical(dat$measure, c("", "RR", "OR", "OR")) expect_identical(attributes(dat$yi)$ni, c(NA, 609L, 451L, 26465L)) dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=all, subset=1:4, include=1:3, add.measure=TRUE) attributes(dat$yi)$ni[3] <- 1L dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, include=3:4, replace=FALSE, add.measure=TRUE) expect_equivalent(c(dat$yi), c(NA, -1.5854, -1.3481, -1.4564), tolerance=.tol[["est"]]) expect_identical(dat$measure, c("", "RR", "RR", "OR")) expect_identical(attributes(dat$yi)$ni, c(NA, 609L, 1L, 26465L)) dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=all, subset=1:4, include=1:3, append=FALSE, add.measure=TRUE) expect_equivalent(c(dat$yi), c(NA, -1.5854, -1.3481, NA), tolerance=.tol[["est"]]) expect_identical(dat$measure, c("", "RR", "RR", "")) }) metafor/tests/testthat/test_misc_permutest.r0000644000176200001440000000702513675404302021206 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: permutest() function") source("tolerances.r") # read in tolerances ### load data dat <- dat.hine1989 ### calculate risk differences and corresponding sampling variances dat <- escalc(measure="RD", n1i=n1i, n2i=n2i, ai=ai, ci=ci, data=dat) test_that("permutest() gives correct results for a random-effects model.", { skip_on_cran() maj <- as.numeric(R.Version()$major) min <- as.numeric(R.Version()$minor) ### run test only on R versions 4.x.x (due to change in sampler) if (maj >= 4) { ### fit random-effects model res <- rma(yi, vi, data=dat) ### exact permutation test sav <- permutest(res, progbar=FALSE) expect_equivalent(sav$pval, 0.0625) out <- capture.output(print(sav)) ### so that print.permutest.rma.uni() is run (at least once) tmp <- coef(sav) expected <- structure(list(estimate = 0.029444, se = 0.013068, zval = 2.253107, pval = 0.0625, ci.lb = 0.003831, ci.ub = 0.055058), .Names = c("estimate", "se", "zval", "pval", "ci.lb", "ci.ub"), row.names = "intrcpt", class = "data.frame") expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) ### approximate permutation test set.seed(1234) sav <- permutest(res, iter=50, progbar=FALSE, control=list(p2defn="px2")) expect_equivalent(sav$pval, 0.08) set.seed(1234) sav <- permutest(res, iter=50, progbar=FALSE, control=list(p2defn="px2", stat="coef")) expect_equivalent(sav$pval, 0.08) } else { expect_true(TRUE) } }) test_that("permutest() gives correct results for a mixed-effects model.", { skip_on_cran() maj <- as.numeric(R.Version()$major) min <- as.numeric(R.Version()$minor) ### run test only on R versions 3.6.x (due to change in sampler) if (maj >= 3 && min >= 6) { ### add a fake moderator dat$mod <- c(3,1,2,2,4,5) ### fit mixed-effects model res <- rma(yi, vi, mods = ~ mod, data=dat) ### exact permutation test sav <- permutest(res, progbar=FALSE) expect_equivalent(sav$pval, c(1, 0.0028), tolerance=.tol[["pval"]]) ### approximate permutation test set.seed(1234) sav <- permutest(res, iter=50, progbar=FALSE, control=list(p2defn="px2")) expect_equivalent(sav$pval, c(.04, .04)) sav <- permutest(res, iter=50, progbar=FALSE, control=list(p2defn="px2", stat="coef")) expect_equivalent(sav$pval, c(.04, .04)) } else { expect_true(TRUE) } }) test_that("permutest() gives correct results for example in Follmann & Proschan (1999).", { skip_on_cran() maj <- as.numeric(R.Version()$major) min <- as.numeric(R.Version()$minor) ### run test only on R versions 4.x.x (due to change in sampler) if (maj >= 4) { ### data in Table 1 dat <- read.table(header=TRUE, text = " ai n1i ci n2i 173 5331 210 5296 157 1906 193 1900 131 4541 121 4516 56 2051 84 2030 52 424 65 422 36 1149 42 1129 62 6582 20 1663 2 88 2 30") dat <- escalc(measure="PETO", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) res <- rma(yi, vi, data=dat, method="DL") sav <- permutest(res, permci=TRUE, progbar=FALSE, retpermdist=TRUE, control=list(stat="coef")) expect_equivalent(sav$pval, 10/256) expect_equivalent(sav$ci.lb, -0.3677, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, -0.0020, tolerance=.tol[["ci"]]) } else { expect_true(TRUE) } }) metafor/tests/testthat/test_analysis_example_raudenbush2009.r0000644000176200001440000001426414052500410024222 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:raudenbush2009 context("Checking analysis example: raudenbush2009") source("tolerances.r") # read in tolerances ### load data dat <- dat.raudenbush1985 test_that("results are correct for the fixed-effects model.", { ### fixed-effects model res.FE <- rma(yi, vi, data=dat, digits=3, method="FE") ### compare with results on page 301 (Table 16.2) and page 302 expect_equivalent(coef(res.FE), 0.0604, tolerance=.tol[["coef"]]) expect_equivalent(res.FE$QE, 35.8295, tolerance=.tol[["test"]]) expect_equivalent(res.FE$zval, 1.6553, tolerance=.tol[["test"]]) ### 1.65 in chapter }) test_that("results are correct for the random-effects model.", { ### random-effects model res.RE <- rma(yi, vi, data=dat, digits=3) ### compare with results on page 301 (Table 16.2) and page 302 expect_equivalent(coef(res.RE), 0.0837, tolerance=.tol[["coef"]]) ### 0.083 in chapter expect_equivalent(res.RE$zval, 1.6208, tolerance=.tol[["test"]]) expect_equivalent(res.RE$tau2, 0.0188, tolerance=.tol[["var"]]) ### prediction interval tmp <- predict(res.RE) ### compare with results on page 301 (Table 16.2) and page 302 expect_equivalent(tmp$pi.lb, -0.2036, tolerance=.tol[["ci"]]) ### -0.19 in chapter but computed in a slightly different way expect_equivalent(tmp$pi.ub, 0.3711, tolerance=.tol[["ci"]]) ### 0.35 in chapter but computed in a slightly different way ### range of BLUPs tmp <- range(blup(res.RE)$pred) ### compare with results on page 301 (Table 16.2) expect_equivalent(tmp, c(-0.0293, 0.2485), tolerance=.tol[["pred"]]) }) test_that("results are correct for the mixed-effects model.", { ### recode weeks variable dat$weeks.c <- ifelse(dat$weeks > 3, 3, dat$weeks) ### mixed-effects model res.ME <- rma(yi, vi, mods = ~ weeks.c, data=dat, digits=3) ### compare with results on page 301 (Table 16.2) expect_equivalent(res.ME$tau2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(coef(res.ME), c(0.4072, -0.1572), tolerance=.tol[["coef"]]) expect_equivalent(res.ME$QE, 16.5708, tolerance=.tol[["test"]]) expect_equivalent(res.ME$zval, c(4.6782, -4.3884), tolerance=.tol[["test"]]) ### range of BLUPs tmp <- range(blup(res.ME)$pred) ### compare with results on page 301 (Table 16.2) expect_equivalent(tmp, c(-0.0646, 0.4072), tolerance=.tol[["pred"]]) ### -0.07 in chapter }) test_that("results are correct for the random-effects model (conventional approach).", { res.std <- list() res.std$FE <- rma(yi, vi, data=dat, digits=3, method="FE") res.std$ML <- rma(yi, vi, data=dat, digits=3, method="ML") res.std$REML <- rma(yi, vi, data=dat, digits=3, method="REML") res.std$DL <- rma(yi, vi, data=dat, digits=3, method="DL") res.std$HE <- rma(yi, vi, data=dat, digits=3, method="HE") tmp <- t(sapply(res.std, function(x) c(tau2=x$tau2, mu=x$beta, se=x$se, z=x$zval, ci.lb=x$ci.lb, ci.ub=x$ci.ub))) expected <- structure(c(0, 0.0126, 0.0188, 0.0259, 0.0804, 0.0604, 0.0777, 0.0837, 0.0893, 0.1143, 0.0365, 0.0475, 0.0516, 0.0558, 0.0792, 1.6553, 1.6368, 1.6208, 1.6009, 1.4432, -0.0111, -0.0153, -0.0175, -0.02, -0.0409, 0.1318, 0.1708, 0.1849, 0.1987, 0.2696), .Dim = 5:6, .Dimnames = list(c("FE", "ML", "REML", "DL", "HE"), c("tau2", "mu", "se", "z", "ci.lb", "ci.ub"))) ### compare with results on page 309 (Table 16.3) expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("results are correct for the random-effects model (Knapp & Hartung method).", { res.knha <- list() expect_warning(res.knha$FE <- rma(yi, vi, data=dat, digits=3, method="FE", test="knha")) res.knha$ML <- rma(yi, vi, data=dat, digits=3, method="ML", test="knha") res.knha$REML <- rma(yi, vi, data=dat, digits=3, method="REML", test="knha") res.knha$DL <- rma(yi, vi, data=dat, digits=3, method="DL", test="knha") res.knha$HE <- rma(yi, vi, data=dat, digits=3, method="HE", test="knha") tmp <- t(sapply(res.knha, function(x) c(tau2=x$tau2, mu=x$beta, se=x$se, z=x$zval, ci.lb=x$ci.lb, ci.ub=x$ci.ub))) expected <- structure(c(0, 0.0126, 0.0188, 0.0259, 0.0804, 0.0604, 0.0777, 0.0837, 0.0893, 0.1143, 0.0515, 0.0593, 0.0616, 0.0636, 0.0711, 1.1733, 1.311, 1.3593, 1.405, 1.6078, -0.0477, -0.0468, -0.0457, -0.0442, -0.0351, 0.1685, 0.2023, 0.2131, 0.2229, 0.2637), .Dim = 5:6, .Dimnames = list(c("FE", "ML", "REML", "DL", "HE"), c("tau2", "mu", "se", "z", "ci.lb", "ci.ub"))) ### compare with results on page 309 (Table 16.3) expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) test_that("results are correct for the random-effects model (Huber-White method).", { res.std <- list() res.std$FE <- rma(yi, vi, data=dat, digits=3, method="FE") res.std$ML <- rma(yi, vi, data=dat, digits=3, method="ML") res.std$REML <- rma(yi, vi, data=dat, digits=3, method="REML") res.std$DL <- rma(yi, vi, data=dat, digits=3, method="DL") res.std$HE <- rma(yi, vi, data=dat, digits=3, method="HE") res.hw <- list() res.hw$FE <- robust(res.std$FE, cluster=dat$study, adjust=FALSE) res.hw$ML <- robust(res.std$ML, cluster=dat$study, adjust=FALSE) res.hw$REML <- robust(res.std$REML, cluster=dat$study, adjust=FALSE) res.hw$DL <- robust(res.std$DL, cluster=dat$study, adjust=FALSE) res.hw$HE <- robust(res.std$HE, cluster=dat$study, adjust=FALSE) out <- capture.output(print(res.hw$REML)) ### so that print.robust.rma() is run (at least once) tmp <- t(sapply(res.hw, function(x) c(tau2=x$tau2, mu=x$beta, se=x$se, t=x$zval, ci.lb=x$ci.lb, ci.ub=x$ci.ub))) expected <- structure(c(0, 0.0126, 0.0188, 0.0259, 0.0804, 0.0604, 0.0777, 0.0837, 0.0893, 0.1143, 0.0398, 0.0475, 0.05, 0.0522, 0.0618, 1.5148, 1.6369, 1.6756, 1.7105, 1.8503, -0.0234, -0.022, -0.0213, -0.0204, -0.0155, 0.1441, 0.1775, 0.1887, 0.199, 0.2441), .Dim = 5:6, .Dimnames = list(c("FE", "ML", "REML", "DL", "HE"), c("tau2", "mu", "se", "t", "ci.lb", "ci.ub"))) ### compare with results on page 309 (Table 16.3) expect_equivalent(tmp, expected, tolerance=.tol[["misc"]]) }) metafor/tests/testthat/test_analysis_example_law2016.r0000644000176200001440000001742713675406145022673 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking analysis example: law2016") source("tolerances.r") # read in tolerances test_that("results are correct for example 1.", { skip_on_cran() ### example 1 EG1 <- read.table(header=TRUE, as.is=TRUE, text=" study y ref trt contr design 1 -0.16561092 C D CD CD 2 -0.13597406 C D CD CD 3 -0.08012604 C E CE CE 4 -0.14746890 C F CF CF 5 0.09316853 E F EF EF 6 -0.15859403 E F EF EF 7 -0.22314355 E F EF EF 8 -0.06744128 F G FG FG 9 -0.11888254 C H CH CH 10 -0.06899287 C H CH CH 11 0.26917860 B C BC BC 12 -0.33160986 A B AB AB 13 -0.26236426 A B AB AB 14 -0.39319502 F G FG FG 15 -0.11557703 A B AB AB 16 0.00000000 E F EF EF 17 -0.40987456 A E AE AE ") S1 <- structure(c(0.0294183340466069, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.147112449467866, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0780588660166125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.140361934247383, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0479709251030665, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0506583523716436, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235695187165775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.04499494438827, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.17968120987923, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.735714285714286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.184889643463497, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0294022652280727, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.232478632478632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.857874134296899, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0219285638496459, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.168131868131868, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0826973577700322 ), .Dim = c(17, 17)) ### create contrast matrix X <- contrmat(EG1, grp1="trt", grp2="ref", append=FALSE, last=NA)[,-1] # remove 'A' to make it the reference level ### fit model assuming consistency (tau^2_omega=0) modC <- rma.mv(y, S1, mods=X, intercept=FALSE, random = ~ contr | study, rho=1/2, data=EG1) ci <- confint(modC) expect_equivalent(modC$tau2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(coef(modC), c(-0.2243, -0.1667, -0.3274, -0.3152, -0.3520, -0.6489, -0.2758), tolerance=.tol[["coef"]]) expect_equivalent(ci$random[1,2:3], c(0.0000, 0.0708), tolerance=.tol[["var"]]) ### fit Jackson's model modI <- rma.mv(y, S1, mods=X, intercept=FALSE, random = list(~ contr | study, ~ contr | design), rho=1/2, phi=1/2, data=EG1) ci <- confint(modI) out <- capture.output(print(ci)) expect_equivalent(modI$tau2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(modI$gamma2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(coef(modI), c(-0.2243, -0.1667, -0.3274, -0.3152, -0.3520, -0.6489, -0.2758), tolerance=.tol[["coef"]]) expect_equivalent(ci[[1]]$random[1,2:3], c(0.0000, 0.0708), tolerance=.tol[["var"]]) expect_equivalent(ci[[2]]$random[1,2:3], c(0.0000, 0.6153), tolerance=.tol[["var"]]) sav <- predict(modI, newmods=c(1,0,0,0,0,0,0), transf=exp) sav <- c(sav[[1]], sav[[3]], sav[[4]], sav[[5]], sav[[6]]) expect_equivalent(sav, c(0.7991, 0.6477, 0.9859, 0.6477, 0.9859), tolerance=.tol[["pred"]]) }) test_that("results are correct for example 2.", { skip_on_cran() ### example 2 EG2 <- read.table(header=TRUE, as.is=TRUE, text=" study y ref trt contr design 1 -3.61988658 A B AB AB 2 0.00000000 B C BC BC 3 0.19342045 B C BC BC 4 2.79320801 B C BC BC 5 0.24512246 B C BC BC 6 0.03748309 B C BC BC 7 0.86020127 B D BD BD 8 0.14310084 B D BD BD 9 0.07598591 C D CD CD 10 -0.99039870 C D CD CD 11 -1.74085310 A B AB ABD 11 0.34830670 A D AD ABD 12 0.40546510 B C BC BCD 12 1.91692260 B D BD BCD 13 -0.32850410 B C BC BCD 13 1.07329450 B D BD BCD ") S2 <- structure(c(0.9672619, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.24987648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.61904762, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.27958937, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.23845689, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04321419, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.47692308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.18416468, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.61978022, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.12650164, 0.07397504, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07397504, 0.1583906, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.389881, 0.2857143, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2857143, 0.5151261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.4361111, 0.2111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2111111, 0.5380342 ), .Dim = c(16, 16)) ### create contrast matrix X <- contrmat(EG2, grp1="trt", grp2="ref", append=FALSE, last=NA)[,-1] # remove 'A' to make it the reference level ### fit model assuming consistency (tau^2_omega=0) modC <- rma.mv(y, S2, mods=X, intercept=FALSE, random = ~ contr | study, rho=1/2, data=EG2) ci <- confint(modC) expect_equivalent(modC$tau2, 0.5482, tolerance=.tol[["var"]]) expect_equivalent(coef(modC), c(-1.8847, -1.3366, -0.7402), tolerance=.tol[["coef"]]) expect_equivalent(ci$random[1,2:3], c(0.0788, 2.0156), tolerance=.tol[["var"]]) ### fit Jackson's model modI <- rma.mv(y, S2, mods=X, intercept=FALSE, random = list(~ contr | study, ~ contr | design), rho=1/2, phi=1/2, data=EG2) ci <- confint(modI) expect_equivalent(modI$tau2, 0.1036, tolerance=.tol[["var"]]) expect_equivalent(modI$gamma2, 0.5391, tolerance=.tol[["var"]]) expect_equivalent(coef(modI), c(-1.9735, -1.3957, -0.6572), tolerance=.tol[["coef"]]) expect_equivalent(ci[[1]]$random[1,2:3], c(0.0000, 1.6661), tolerance=.tol[["var"]]) expect_equivalent(ci[[2]]$random[1,2:3], c(0.0000, 3.9602), tolerance=.tol[["var"]]) sav <- predict(modI, newmods=c(1,0,0), transf=exp) sav <- c(sav[[1]], sav[[3]], sav[[4]], sav[[5]], sav[[6]]) expect_equivalent(sav, c(0.1390, 0.0369, 0.5230, 0.0178, 1.0856), tolerance=.tol[["pred"]]) sav <- ranef(modI) expect_equivalent(sav[[1]]$intrcpt, c(-0.106, -0.094, -0.078, 0.335, -0.058, -0.128, 0.026, -0.121, 0.013, -0.148, 0.029, 0.13, 0.027, 0.084, -0.101, -0.064), tolerance=.tol[["pred"]]) expect_equivalent(sav[[1]]$se, c(0.314, 0.293, 0.283, 0.301, 0.285, 0.282, 0.286, 0.297, 0.297, 0.304, 0.313, 0.315, 0.301, 0.304, 0.302, 0.305), tolerance=.tol[["se"]]) expect_equivalent(sav[[2]]$intrcpt, c(-0.551, 0.152, 0.675, -0.119, -0.383, 0.104, -0.493, -0.699), tolerance=.tol[["pred"]]) expect_equivalent(sav[[2]]$se, c(0.64, 0.619, 0.642, 0.518, 0.543, 0.53, 0.486, 0.54), tolerance=.tol[["se"]]) out <- capture.output(print(sav)) sav <- predict(modI) expect_equivalent(sav$pi.lb, c(-4.029, -1.2853, -1.2853, -1.2853, -1.2853, -1.2853, -0.4911, -0.4911, -1.137, -1.137, -4.029, -2.7699, -1.2853, -0.4911, -1.2853, -0.4911), tolerance=.tol[["pred"]]) }) metafor/tests/testthat/test_plots_llplot.r0000644000176200001440000000127714052500464020671 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:baujat_plot context("Checking plots example: Likelihood plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### adjust margins so the space is better used par(mar=c(5,4,2,2)) ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### create likelihood plot llplot(measure="GEN", yi=yi, vi=vi, data=dat, lwd=1, refline=NA, xlim=c(-3,2)) par(opar) }) metafor/tests/testthat/test_misc_tes.r0000644000176200001440000000222213777146704017757 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: tes() function") source("tolerances.r") # read in tolerances test_that("tes() works correctly for 'dat.dorn2007'.", { dat <- escalc(measure="RR", ai=x.a, n1i=n.a, ci=x.p, n2i=n.p, data=dat.dorn2007) sav <- tes(dat$yi, dat$vi, test="chi2") out <- capture.output(print(sav)) expect_identical(sav$O, 10L) expect_equivalent(sav$E, 4.923333, tolerance=.tol[["misc"]]) expect_equivalent(sav$X2, 7.065648, tolerance=.tol[["test"]]) expect_equivalent(sav$pval, 0.003928794, tolerance=.tol[["pval"]]) sav <- tes(dat$yi, dat$vi, test="binom") expect_equivalent(sav$pval, 0.01159554, tolerance=.tol[["pval"]]) sav <- tes(dat$yi, dat$vi, test="exact", progbar=FALSE) expect_equivalent(sav$pval, 0.007778529, tolerance=.tol[["pval"]]) res <- rma(yi, vi, data=dat, method="FE") sav <- tes(res, test="chi2") expect_identical(sav$O, 10L) expect_equivalent(sav$E, 4.923333, tolerance=.tol[["misc"]]) expect_equivalent(sav$X2, 7.065648, tolerance=.tol[["test"]]) expect_equivalent(sav$pval, 0.003928794, tolerance=.tol[["pval"]]) }) metafor/tests/testthat/test_tips_rma_vs_lm_and_lme.r0000644000176200001440000000563414052500510022630 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking tip: rma() results match up with those from lm() and lme()") source("tolerances.r") # read in tolerances ### this is essentially checking the equivalence of the results as explained here: ### https://www.metafor-project.org/doku.php/tips:rma_vs_lm_and_lme test_that("results for rma() and lm() match for method='FE'.", { data(dat.molloy2014, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014) res.fe <- rma(yi, vi, data=dat, method="FE") res.lm <- lm(yi ~ 1, weights = 1/vi, data=dat) ### coefficients should be the same expect_equivalent(coef(res.fe), coef(res.lm), tolerance=.tol[["coef"]]) ### standard errors should be the same after adjusting the 'lm' one for sigma expect_equivalent(res.fe$se, coef(summary(res.lm))[1,2] / summary(res.lm)$sigma, tolerance=.tol[["se"]]) ### fit the same model as is fitted by lm() with rma() function res.fe <- rma(yi, vi*summary(res.lm)$sigma^2, data=dat, method="FE") ### coefficients should still be the same expect_equivalent(coef(res.fe), coef(res.lm), tolerance=.tol[["coef"]]) ### standard errors should be the same expect_equivalent(res.fe$se, coef(summary(res.lm))[1,2], tolerance=.tol[["se"]]) }) test_that("results for rma() and lme() match for method='ML'.", { library("nlme") data(dat.molloy2014, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014) dat$study <- 1:nrow(dat) res.lme <- lme(yi ~ 1, random = ~ 1 | study, weights = varFixed(~ vi), data=dat, method="ML") res.re <- rma(yi, vi*res.lme$sigma^2, data=dat, method="ML") ### coefficients should be the same expect_equivalent(coef(res.re), fixef(res.lme), tolerance=.tol[["coef"]]) ### standard errors should be the same after adjusting the 'rma' one by the factor sqrt(k/(k-p)) expect_equivalent(res.re$se * sqrt(res.re$k / (res.re$k - res.re$p)), summary(res.lme)$tTable[1,2], tolerance=.tol[["se"]]) ### check that BLUPs are the same expect_equivalent(blup(res.re)$pred, coef(res.lme)$"(Intercept)", tolerance=.tol[["pred"]]) }) test_that("results for rma() and lme() match for method='REML'.", { library("nlme") data(dat.molloy2014, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.molloy2014) dat$study <- 1:nrow(dat) res.lme <- lme(yi ~ 1, random = ~ 1 | study, weights = varFixed(~ vi), data=dat, method="REML") res.re <- rma(yi, vi*res.lme$sigma^2, data=dat, method="REML") ### coefficients should be the same expect_equivalent(coef(res.re), fixef(res.lme), tolerance=.tol[["coef"]]) ### standard errors should be the same expect_equivalent(res.re$se, summary(res.lme)$tTable[1,2], tolerance=.tol[["se"]]) ### check that BLUPs are the same expect_equivalent(blup(res.re)$pred, coef(res.lme)$"(Intercept)", tolerance=.tol[["pred"]]) }) metafor/tests/testthat/test_plots_contour-enhanced_funnel_plot.r0000644000176200001440000000156014052500446025217 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:contour_enhanced_funnel_plot context("Checking plots example: contour-enhanced funnel plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(5,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### fit random-effects model res <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR", slab=paste(author, year, sep=", "), method="REML") ### create contour enhanced funnel plot (with funnel centered at 0) funnel(res, level=c(90, 95, 99), shade=c("white", "gray55", "gray75"), refline=0, legend=TRUE) par(opar) }) metafor/tests/testthat/test_plots_gosh.r0000644000176200001440000000341314052500460020311 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:gosh_plot context("Checking plots example: GOSH plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### load data data(dat.egger2001, package="metafor") ### meta-analysis of all trials including ISIS-4 using a FE model res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.egger2001, method="FE") ### fit FE model to all possible subsets sav <- gosh(res, progbar=FALSE) out <- capture.output(print(sav)) ### so that print.gosh.rma() is run (at least once) ### create GOSH plot ### red points for subsets that include and blue points ### for subsets that exclude study 16 (the ISIS-4 trial) plot(sav, out=16, breaks=100) ### fit FE model to random subsets (with parallel processing) sav <- gosh(res, progbar=FALSE, parallel="snow", subsets=1000) ### meta-analysis using MH method (using subset to speed things up) res <- rma.mh(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.egger2001, subset=c(1:7,16)) sav <- gosh(res, progbar=FALSE) plot(sav, out=8, breaks=40) ### fit FE model to all possible subsets (with parallel processing) sav <- gosh(res, progbar=FALSE, parallel="snow", subsets=1000) ### meta-analysis using Peto's method (using subset to speed things up) res <- rma.peto(ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.egger2001, subset=c(1:7,16)) sav <- gosh(res, progbar=FALSE) plot(sav, out=8, breaks=40) ### fit FE model to all possible subsets (with parallel processing) sav <- gosh(res, progbar=FALSE, parallel="snow", subsets=1000) par(opar) }) metafor/tests/testthat/test_analysis_example_vanhouwelingen2002.r0000644000176200001440000001651614052500421025110 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:vanhouwelingen2002 context("Checking analysis example: vanhouwelingen2002") source("tolerances.r") # read in tolerances ### load data dat <- dat.colditz1994 ### calculate log(OR)s and corresponding sampling variances dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) ### 'center' year variable dat$year <- dat$year - 1900 test_that("results for the fixed-effects model are correct.", { res <- rma(yi, vi, data=dat, method="FE") tmp <- predict(res, transf=exp, digits=3) ### compare with results on page 596 (in text) expect_equivalent(tmp$pred, .6465, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, .5951, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, .7024, tolerance=.tol[["ci"]]) ### .703 in paper }) test_that("results for the random-effects model are correct.", { res <- rma(yi, vi, data=dat, method="ML") tmp <- predict(res, transf=exp, digits=3) ### compare with results on page 597 (in text) expect_equivalent(tmp$pred, .4762, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, .3360, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, .6749, tolerance=.tol[["ci"]]) expect_equivalent(res$tau2, .3025, tolerance=.tol[["var"]]) ### CI for tau^2 (profile likelihood method) tmp <- confint(res, type="PL") expect_equivalent(tmp$random[1,2], 0.1151, tolerance=.tol[["var"]]) expect_equivalent(tmp$random[1,3], 0.8937, tolerance=.tol[["var"]]) ### CI for tau^2 (Q-profile method) tmp <- confint(res) expect_equivalent(tmp$random[1,2], 0.1302, tolerance=.tol[["var"]]) ### 0.1350 based on a Satterthwaite approximation (page 597) expect_equivalent(tmp$random[1,3], 1.1812, tolerance=.tol[["var"]]) ### 1.1810 based on a Satterthwaite approximation (page 597) ### CI for mu with Knapp & Hartung method res <- rma(yi, vi, data=dat, method="ML", test="knha") tmp <- predict(res, transf=exp, digits=3) ### (results for this not given in paper) expect_equivalent(tmp$ci.lb, .3175, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, .7141, tolerance=.tol[["ci"]]) }) test_that("profile plot for tau^2 can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() res <- rma(yi, vi, data=dat, method="ML") opar <- par(no.readonly=TRUE) profile(res, xlim=c(.01,2), steps=200, log="x", cex=0, lwd=2, cline=TRUE, progbar=FALSE) abline(v=c(0.1151, 0.8937), lty="dotted") par(opar) }) test_that("forest plot of observed log(OR)s and corresponding BLUPs can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() res <- rma(yi, vi, data=dat, method="ML") sav <- blup(res) opar <- par(no.readonly=TRUE) par(family="mono", mar=c(5,4,1,2)) forest(res, refline=res$beta, addpred=TRUE, xlim=c(-7,8), alim=c(-3,3), slab=1:13, psize=.8, ilab=paste0("(n = ", formatC(apply(dat[,c(4:7)], 1, sum), width=7, big.mark=","), ")"), ilab.xpos=-3.5, ilab.pos=2, rows=13:1+.15, header="Trial (total n)", lty="dashed") arrows(sav$pi.lb, 13:1 - .15, sav$pi.ub, 13:1 -.15, length=.05, angle=90, code=3) points(sav$pred, 13:1 - .15, pch=15, cex=.8) par(opar) }) test_that("the prediction interval is correct.", { res <- rma(yi, vi, data=dat, method="ML") ### computation as done in the paper tmp <- c(res$beta) + c(-1,+1) * qnorm(.975) * sqrt(res$tau2) ### compare with results on page 599 (in text) expect_equivalent(tmp, c(-1.8199, 0.3359), tolerance=.tol[["ci"]]) ### computation done with metafor tmp <- predict(res, digits=3) ### (results for this not given in paper) expect_equivalent(tmp$pi.lb, -1.875, tolerance=.tol[["ci"]]) expect_equivalent(tmp$pi.ub, 0.391, tolerance=.tol[["ci"]]) }) test_that("L'Abbe plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() res <- rma(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat, method="FE") opar <- par(no.readonly=TRUE) labbe(res, xlim=c(-7,-1), ylim=c(-7,-1), xlab="ln(odds) not-vaccinated group", ylab="ln(odds) vaccinated group") par(opar) }) ############################################################################ ### create dataset in long format dat.long <- to.long(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.colditz1994) dat.long <- escalc(measure="PLO", xi=out1, mi=out2, data=dat.long) dat.long$tpos <- dat.long$tneg <- dat.long$cpos <- dat.long$cneg <- NULL levels(dat.long$group) <- c("CON", "EXP") test_that("results for the bivariate model are correct.", { res <- rma.mv(yi, vi, mods = ~ group - 1, random = ~ group | trial, struct="UN", data=dat.long, method="ML") ### compare with results on pages 604-605 (in text) expect_equivalent(coef(res), c(-4.0960, -4.8337), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, c(2.4073, 1.4314), tolerance=.tol[["var"]]) expect_equivalent(res$rho, .9467, tolerance=.tol[["cor"]]) res <- rma.mv(yi, vi, mods = ~ group, random = ~ group | trial, struct="UN", data=dat.long, method="ML") ### compare with results on pages 604-605 (in text) expect_equivalent(coef(res), c(-4.0960, -0.7378), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.4347, 0.1797), tolerance=.tol[["se"]]) ### estimated odds ratio tmp <- predict(res, newmods=1, intercept=FALSE, transf=exp, digits=3) expect_equivalent(tmp$pred, 0.4782, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.3362, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.6801, tolerance=.tol[["ci"]]) ### amount of heterogeneity in log odds ratios tmp <- res$tau2[1] + res$tau2[2] - 2*res$rho*sqrt(res$tau2[1]*res$tau2[2]) expect_equivalent(tmp, 0.3241, tolerance=.tol[["var"]]) }) ############################################################################ test_that("results for the meta-regression analyses are correct.", { res <- rma(yi, vi, mods = ~ ablat, data=dat, method="ML") ### compare with results on pages 608-609 (in text) expect_equivalent(coef(res), c(0.3710, -0.0327), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.1061, 0.0034), tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.0040, tolerance=.tol[["var"]]) expect_equivalent(res$R2, 98.6691, tolerance=.tol[["r2"]]) res <- rma.mv(yi, vi, mods = ~ group + group:I(ablat-33) - 1, random = ~ group | trial, struct="UN", data=dat.long, method="ML") ### compare with results on pages 612-613 (in text) expect_equivalent(coef(res), c(-4.1174, -4.8257, 0.0725, 0.0391), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.3061, 0.3129, 0.0219, 0.0224), tolerance=.tol[["se"]]) expect_equivalent(res$tau2, c(1.1819, 1.2262), tolerance=.tol[["var"]]) expect_equivalent(res$rho, 1.0000, tolerance=.tol[["cor"]]) res <- rma.mv(yi, vi, mods = ~ group*I(ablat-33), random = ~ group | trial, struct="UN", data=dat.long, method="ML") ### compare with results on pages 612-613 (in text) expect_equivalent(coef(res), c(-4.1174, -0.7083, 0.0725, -0.0333), tolerance=.tol[["coef"]]) expect_equivalent(res$se, c(0.3061, 0.0481, 0.0219, 0.0028), tolerance=.tol[["se"]]) expect_equivalent(res$tau2, c(1.1819, 1.2262), tolerance=.tol[["var"]]) expect_equivalent(res$rho, 1.0000, tolerance=.tol[["cor"]]) }) metafor/tests/testthat/test_plots_meta-analytic_scatterplot.r0000644000176200001440000000347714052500466024545 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:meta_analytic_scatterplot context("Checking plots example: meta-analytic scatterplot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### adjust margins so the space is better used par(mar=c(5,5,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit mixed-effects model with absolute latitude as predictor res <- rma(yi, vi, mods = ~ ablat, data=dat) ### calculate predicted risk ratios for 0 to 60 degrees absolute latitude preds <- predict(res, newmods=c(0:60), transf=exp) ### radius of points will be proportional to the inverse standard errors ### hence the area of the points will be proportional to inverse variances size <- 1 / sqrt(dat$vi) size <- size / max(size) ### set up plot (risk ratios on y-axis, absolute latitude on x-axis) plot(NA, NA, xlim=c(10,60), ylim=c(0.2,1.6), xlab="Absolute Latitude", ylab="Risk Ratio", las=1, bty="l", log="y") ### add points symbols(dat$ablat, exp(dat$yi), circles=size, inches=FALSE, add=TRUE, bg="black") ### add predicted values (and corresponding CI bounds) lines(0:60, preds$pred) lines(0:60, preds$ci.lb, lty="dashed") lines(0:60, preds$ci.ub, lty="dashed") ### dotted line at RR=1 (no difference between groups) abline(h=1, lty="dotted") ### labels some points in the plot ids <- c(4,7,12,13) pos <- c(3,3,1,1) text(dat$ablat[ids], exp(dat$yi)[ids], ids, cex=.9, pos=pos) par(opar) }) metafor/tests/testthat/test_analysis_example_normand1999.r0000644000176200001440000001123014052500402023530 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:normand1999 context("Checking analysis example: normand1999") source("tolerances.r") # read in tolerances test_that("results are correct for the first example (using dat.hine1989).", { ### load data data(dat.hine1989, package="metafor") ### calculate risk differences and corresponding sampling variances dat <- escalc(measure="RD", n1i=n1i, n2i=n2i, ai=ai, ci=ci, data=dat.hine1989) ### transform into percentage points dat$yi <- dat$yi * 100 dat$vi <- dat$vi * 100^2 out <- capture.output(print(dat)) ### so that print.escalc() is run (at least once) ### compare with results on page 330 (Table III) expect_equivalent(dat$yi, c(2.8026, 0.0000, 1.9711, 1.7961, 3.5334, 4.4031), tolerance=.tol[["est"]]) expect_equivalent(dat$vi, c(17.7575, 37.5657, 8.1323, 10.8998, 8.0114, 6.1320), tolerance=.tol[["var"]]) ### CIs for individual studies tmp <- summary(dat) ### compare with results on page 330 (Table III) expect_equivalent(tmp$ci.lb, c(-5.4566, -12.0128, -3.6182, -4.6747, -2.0141, -0.4503), tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, c(11.0618, 12.0128, 7.5604, 8.2669, 9.0810, 9.2566), tolerance=.tol[["ci"]]) ### fit fixed-effects model res <- rma(yi, vi, data=dat, method="FE", digits=2) ### compare with results on page 349 (Table VII) expect_equivalent(coef(res), 2.9444, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3831, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 5.5058, tolerance=.tol[["ci"]]) ### fit random-effects model (REML estimator) res <- rma(yi, vi, data=dat, digits=2) ### compare with results on page 349 (Table VII) expect_equivalent(coef(res), 2.9444, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3831, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 5.5058, tolerance=.tol[["ci"]]) expect_equivalent(res$tau2, 0.0000, tolerance=.tol[["var"]]) ### fit random-effects model (DL estimator) res <- rma(yi, vi, data=dat, method="DL", digits=2) ### compare with results on page 349 (Table VII) expect_equivalent(coef(res), 2.9444, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, 0.3831, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 5.5058, tolerance=.tol[["ci"]]) expect_equivalent(res$tau2, 0.0000, tolerance=.tol[["var"]]) }) test_that("results are correct for the second example (using dat.normand1999).", { ### load data data(dat.normand1999, package="metafor") ### compute pooled SD dat.normand1999$sdpi <- with(dat.normand1999, sqrt(((n1i-1)*sd1i^2 + (n2i-1)*sd2i^2)/(n1i+n2i-2))) ### calculate mean differences and corresponding sampling variances dat <- escalc(m1i=m1i, sd1i=sdpi, n1i=n1i, m2i=m2i, sd2i=sdpi, n2i=n2i, measure="MD", data=dat.normand1999, digits=2) ### compare with results on page 351 (Table VIII) expect_equivalent(dat$yi, c(-20, -2, -55, -71, -4, 1, 11, -10, 7)) expect_equivalent(dat$vi, c(40.5863, 2.0468, 15.2809, 150.2222, 20.1923, 1.2235, 95.3756, 8.0321, 20.6936), tolerance=.tol[["var"]]) ### CIs for individual studies tmp <- summary(dat) ### (results for this not given in paper) expect_equivalent(tmp$ci.lb, c(-32.4864, -4.8041, -62.6616, -95.0223, -12.8073, -1.168, -8.1411, -15.5547, -1.9159), tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, c(-7.5136, 0.8041, -47.3384, -46.9777, 4.8073, 3.168, 30.1411, -4.4453, 15.9159), tolerance=.tol[["ci"]]) ### fit fixed-effects model res <- rma(yi, vi, data=dat, method="FE", digits=2) ### compare with results on page 352 (Table IX) expect_equivalent(coef(res), -3.4939, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -5.0265, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -1.9613, tolerance=.tol[["ci"]]) ### fit random-effects model (DL estimator) res <- rma(yi, vi, data=dat, method="DL", digits=2) ### compare with results on page 352 (Table IX) expect_equivalent(coef(res), -14.0972, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -24.4454, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -3.7490, tolerance=.tol[["ci"]]) expect_equivalent(res$tau2, 218.7216, tolerance=.tol[["var"]]) ### fit random-effects model (REML estimator) res <- rma(yi, vi, data=dat, digits=2) ### compare with results on page 352 (Table IX) expect_equivalent(coef(res), -15.1217, tolerance=.tol[["est"]]) expect_equivalent(res$ci.lb, -32.6716, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, 2.4282, tolerance=.tol[["ci"]]) expect_equivalent(res$tau2, 685.1965, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_miller1978.r0000644000176200001440000000640114052500374023367 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:miller1978 context("Checking analysis example: miller1978") source("tolerances.r") # read in tolerances ### create dataset dat <- data.frame(xi=c(3, 6, 10, 1), ni=c(11, 17, 21, 6)) dat$pi <- with(dat, xi/ni) dat <- escalc(measure="PFT", xi=xi, ni=ni, data=dat) test_that("calculations of escalc() for measure='PFT' are correct.", { ### compare with results on page 138 expect_equivalent(dat$yi*2, c(1.1391, 1.2888, 1.5253, 0.9515), tolerance=.tol[["est"]]) ### need *2 factor due to difference in definition of measure expect_equivalent(dat$vi*4, c(0.0870, 0.0571, 0.0465, 0.1538), tolerance=.tol[["var"]]) }) test_that("results are correct for the fixed-effects model using unweighted estimation.", { res <- rma(yi, vi, method="FE", data=dat, weighted=FALSE) pred <- predict(res, transf=function(x) x*2) expect_equivalent(pred$pred, 1.2262, tolerance=.tol[["pred"]]) pred <- predict(res, transf=transf.ipft.hm, targs=list(ni=dat$ni)) expect_equivalent(pred$pred, 0.3164, tolerance=.tol[["pred"]]) }) test_that("results are correct for the fixed-effects model using weighted estimation.", { res <- rma(yi, vi, method="FE", data=dat) pred <- predict(res, transf=function(x) x*2) expect_equivalent(pred$pred, 1.3093, tolerance=.tol[["pred"]]) pred <- predict(res, transf=transf.ipft.hm, targs=list(ni=dat$ni)) expect_equivalent(pred$pred, 0.3595, tolerance=.tol[["pred"]]) }) test_that("results are correct when there are proportions of 0 and 1.", { ### create dataset dat <- data.frame(xi=c(0,10), ni=c(10,10)) dat$pi <- with(dat, xi/ni) dat <- escalc(measure="PFT", xi=xi, ni=ni, data=dat, add=0) ### back-transformation of the individual outcomes expect_equivalent(transf.ipft(dat$yi, dat$ni), c(0,1)) }) test_that("back-transformations work as intended for individual studies and the model estimate.", { ### create dataset dat <- data.frame(xi = c( 0, 4, 9, 16, 20), ni = c(10, 10, 15, 20, 20)) dat$pi <- with(dat, xi/ni) dat <- escalc(measure="PFT", xi=xi, ni=ni, data=dat, add=0) ### back-transformation of the individual outcomes expect_equivalent(transf.ipft(dat$yi, dat$ni), c(0.0, 0.4, 0.6, 0.8, 1.0)) ### back-transformation of the estimated average res <- rma(yi, vi, method="FE", data=dat) pred <- predict(res, transf=transf.ipft.hm, targs=list(ni=dat$ni)) expect_equivalent(pred$pred, 0.6886, tolerance=.tol[["pred"]]) expect_equivalent(pred$ci.lb, 0.5734, tolerance=.tol[["ci"]]) expect_equivalent(pred$ci.ub, 0.7943, tolerance=.tol[["ci"]]) ### calculate back-transformed CI bounds dat.back <- summary(dat, transf=transf.ipft, ni=dat$ni) skip_on_cran() ### create forest plot with CI bounds supplied and then add model estimate opar <- par(no.readonly=TRUE) forest(dat.back$yi, ci.lb=dat.back$ci.lb, ci.ub=dat.back$ci.ub, psize=1, xlim=c(-.5,1.8), alim=c(0,1), ylim=c(-1,8), refline=NA, digits=3, xlab="Proportion", header=c("Study", "Proportion [95% CI]")) addpoly(pred$pred, ci.lb=pred$ci.lb, ci.ub=pred$ci.ub, rows=-0.5, digits=3, mlab="FE Model", efac=1.3) abline(h=0.5) par(opar) }) metafor/tests/testthat/test_misc_aggregate.r0000644000176200001440000000513713777124506021116 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: aggregate() function") source("tolerances.r") # read in tolerances test_that("aggregate() works correctly for 'dat.konstantopoulos2011'.", { dat <- dat.konstantopoulos2011 agg <- aggregate(dat, cluster=district, struct="ID") expect_equivalent(c(agg$yi), c(-0.125687, 0.06654, 0.350303, 0.499691, 0.051008, -0.041842, 0.885529, -0.02875, 0.250475, 0.015033, 0.161917), tolerance=.tol[["est"]]) expect_equivalent(c(agg$vi), c(0.032427, 0.003981, 0.006664, 0.001443, 0.001549, 0.000962, 0.003882, 0.000125, 0.001799, 0.006078, 0.018678), tolerance=.tol[["var"]]) }) test_that("aggregate() works correctly for 'dat.assink2016'.", { dat <- dat.assink2016 dat <- escalc(yi=yi, vi=vi, data=dat) agg <- aggregate(dat, cluster=study, rho=0.6) expect_equivalent(c(agg$yi), c(0.162877, 0.406036, 1.079003, -0.0447, 1.549, -0.054978, 1.007244, 0.3695, 0.137862, 0.116737, 0.525765, 0.280461, 0.301829, 0.035593, 0.090821, 0.018099, -0.055203), tolerance=.tol[["est"]]) expect_equivalent(c(agg$vi), c(0.019697, 0.005572, 0.083174, 0.0331, 0.1384, 0.02139, 0.054485, 0.0199, 0.027057, 0.010729, 0.011432, 0.002814, 0.011, 0.001435, 0.126887, 0.016863, 0.007215), tolerance=.tol[["var"]]) }) test_that("aggregate() works correctly for 'dat.ishak2007'.", { dat <- dat.ishak2007 dat <- reshape(dat.ishak2007, direction="long", idvar="study", v.names=c("yi","vi"), varying=list(c(2,4,6,8), c(3,5,7,9))) dat <- dat[order(dat$study, dat$time),] is.miss <- is.na(dat$yi) dat <- dat[!is.miss,] rownames(dat) <- NULL agg <- aggregate(dat, cluster=study, struct="CAR", time=time, phi=0.9) expect_equivalent(c(agg$yi), c(-33.4, -28.137183, -21.1, -17.22908, -32.9, -26.342019, -31.37934, -25, -36, -21.275427, -8.6, -28.830656, -28.00566, -35.277625, -28.02381, -24.818713, -36.3, -29.4, -33.552998, -20.6, -33.9, -35.4, -34.9, -32.7, -26.471326, -32.753685, -18.412199, -29.2, -31.7, -32.46738, -31.7, -35.274832, -30.189494, -17.6, -22.9, -36, -22.5, -20.67624, -9.3, -25.52315, -16.7, -29.440786, -31.221009, -20.73355, -37.982183, -22.1), tolerance=.tol[["est"]]) expect_equivalent(c(agg$vi), c(14.3, 5.611511, 7.3, 4.562371, 125, 4.132918, 86.117899, 17, 5, 6.308605, 41, 20.229622, 7.743863, 5.632795, 3.438095, 12.975915, 27.3, 10.7, 1.895013, 25.3, 20.1, 21.2, 18, 16.3, 29.751824, 9.417499, 5.156788, 5.8, 12.4, 24.954806, 19.1, 17.528303, 8.508767, 28.4, 20, 27.7, 20.3, 1.379225, 85.2, 15.281948, 9.8, 179.802277, 3.317364, 15.082821, 20.888464, 40.8), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_plots_cumulative_forest_plot.r0000644000176200001440000000510114052500450024142 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:cumulative_forest_plot context("Checking plots example: cumulative forest plot") test_that("plot can be drawn for 'rma.uni' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(4,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects models res <- rma(yi, vi, data=dat, slab=paste(author, year, sep=", ")) ### cumulative meta-analysis (in the order of publication year) tmp <- cumul(res, order=dat$year) ### cumulative forest plot forest(tmp, xlim=c(-4,2), at=log(c(.125, .25, .5, 1, 2)), atransf=exp, digits=c(2,3), cex=.75, header="Author(s) and Year") par(opar) }) test_that("plot can be drawn for 'rma.mh' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(4,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### fit fixed-effects models using the Mantel-Haenszel method res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### cumulative meta-analysis (in the order of publication year) tmp <- cumul(res, order=dat.bcg$year) ### cumulative forest plot forest(tmp, xlim=c(-4,2), at=log(c(.125, .25, .5, 1, 2)), atransf=exp, digits=c(2,3), cex=.75, header="Author(s) and Year") par(opar) }) test_that("plot can be drawn for 'rma.peto' object.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(4,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### fit fixed-effects models using Peto's method res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste(author, year, sep=", ")) ### cumulative meta-analysis (in the order of publication year) tmp <- cumul(res, order=dat.bcg$year) ### cumulative forest plot forest(tmp, xlim=c(-4,2), at=log(c(.125, .25, .5, 1, 2)), atransf=exp, digits=c(2,3), cex=.75, header="Author(s) and Year") par(opar) }) metafor/tests/testthat/test_misc_rma_uni.r0000644000176200001440000000673113675406246020624 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma() function") source("tolerances.r") # read in tolerances data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) test_that("rma() correctly handles a formula for the 'yi' argument", { res1 <- rma(yi ~ ablat, vi, data=dat) res2 <- rma(yi, vi, mods = ~ ablat, data=dat) expect_equivalent(coef(res1), coef(res2)) }) test_that("rma() correctly handles an 'escalc' object", { res1 <- rma(yi, vi, data=dat) res2 <- rma(dat) expect_equivalent(coef(res1), coef(res2)) }) test_that("rma() works with method='DLIT' and method='SJIT'", { res <- rma(yi, vi, data=dat, method="DLIT", control=list(maxiter=500)) expect_equivalent(res$tau2, 0.1576, tolerance=.tol[["var"]]) res <- rma(yi, vi, data=dat, method="SJIT") expect_equivalent(res$tau2, 0.3181, tolerance=.tol[["var"]]) }) test_that("rma() works directly with input for measure='SMD'", { dat <- dat.normand1999 dat <- escalc(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) res1 <- rma(yi, vi, data=dat) res2 <- rma(measure="SMD", m1i=m1i, sd1i=sd1i, n1i=n1i, m2i=m2i, sd2i=sd2i, n2i=n2i, data=dat, subset=1:4) expect_equivalent(res1$tau2, 1.0090, tolerance=.tol[["var"]]) expect_equivalent(res2$tau2, 1.0090, tolerance=.tol[["var"]]) }) test_that("rma() works directly with input for measure='PCOR'", { ### data from Aloe and Thompson (2013) dat <- data.frame(ti = c(4.61, 6.19, 4.07, -0.77, 1.16), ni = c(218, 232, 156, 382, 259), mi = c(4, 7, 6, 19, 15), r2i = c(.240, .455, .500, .327, .117)) dat <- escalc(measure="PCOR", ti=ti, ni=ni, mi=mi, data=dat) res1 <- rma(yi, vi, data=dat) res2 <- rma(measure="PCOR", ti=ti, ni=ni, mi=mi, data=dat) expect_equivalent(res1$tau2, 0.0297, tolerance=.tol[["var"]]) expect_equivalent(res2$tau2, 0.0297, tolerance=.tol[["var"]]) }) test_that("rma() works directly with input for measure='MN'", { dat <- dat.normand1999 dat <- escalc(measure="MN", mi=m1i, sdi=sd1i, ni=n1i, data=dat) res1 <- rma(yi, vi, data=dat) res2 <- rma(measure="MN", mi=m1i, sdi=sd1i, ni=n1i, data=dat) expect_equivalent(res1$tau2, 408.9277, tolerance=.tol[["var"]]) expect_equivalent(res2$tau2, 408.9277, tolerance=.tol[["var"]]) }) test_that("rma() works directly with input for measure='SMCR'", { datT <- data.frame( m_pre = c(30.6, 23.5, 0.5, 53.4, 35.6), m_post = c(38.5, 26.8, 0.7, 75.9, 36.0), sd_pre = c(15.0, 3.1, 0.1, 14.5, 4.7), sd_post = c(11.6, 4.1, 0.1, 4.4, 4.6), ni = c(20, 50, 9, 10, 14), ri = c(.47, .64, .77, .89, .44)) dat <- escalc(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datT) res1 <- rma(yi, vi, data=dat) res2 <- rma(measure="SMCR", m1i=m_post, m2i=m_pre, sd1i=sd_pre, ni=ni, ri=ri, data=datT) expect_equivalent(res1$tau2, 0.3164, tolerance=.tol[["var"]]) expect_equivalent(res2$tau2, 0.3164, tolerance=.tol[["var"]]) }) test_that("rma() works directly with input for measure='AHW'", { dat <- dat.bonett2010 dat <- escalc(measure="AHW", ai=ai, mi=mi, ni=ni, data=dat) res1 <- rma(yi, vi, data=dat) res2 <- rma(measure="AHW", ai=ai, mi=mi, ni=ni, data=dat) expect_equivalent(res1$tau2, 0.0011, tolerance=.tol[["var"]]) expect_equivalent(res2$tau2, 0.0011, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_misc_rma_handling_nas.r0000644000176200001440000001043413465304241022436 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: proper handling of missing values") source("tolerances.r") # read in tolerances test_that("rma.glmm() handles NAs correctly.", { skip_on_cran() dat <- data.frame(ni = rep(20, 10), xi = c(NA, 4, 0, 0, 2, 2, 3, 8, 9, 2), mod1 = c(0, NA, 0, 0, 0, 0, 0, 1, 1, 1), mod2 = c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)) ### 1) NA in table data for study 1 ### 2) NA for mod1 in study 2 ### 3) if add=0, then yi/vi pair will be NA/NA for study 3 ### 4) if add=0, then yi/vi pair will be NA/NA for study 4, which causes the X.yi matrix to be rank deficient after row 4 is removed ### note: even for the model fitting itself, study 4 is a problem, because the log(odds) for study 4 is -Inf, so the coefficient for ### mod2 is in essence also -Inf; on x86_64-w64-mingw32/x64 (64-bit) with lme4 version 1.1-7, this just barely converges, but ### may fail in other cases; so checks with both moderators included are skipped on CRAN expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1, data=dat)) ### k, length of xi/mi, and number of rows in X must be equal to 8 (studies 1 and 2 removed due to NAs in table data) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) ### k.yi and length of yi/vi must be equal to 8 (studies 1 and 2 removed due to NAs in table data) expect_equivalent(res$k.yi, 8) expect_equivalent(length(res$yi), 8) expect_equivalent(length(res$vi), 8) ### full data saved in .f elements expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) ### now use add=0, so that studies 3 and 4 have NA/NA for yi/vi expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1, data=dat, add=0)) ### k, length of xi/mi, and number of rows in X must be equal to 8 (studies 1 and 2 removed due to NAs in table data, but studies 3 and 4 included in the model fitting) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) ### k.yi and length of yi/vi must be equal to 6 (studies 1 and 2 removed due to NAs in table data and studies 3 and 4 have NA/NA for yi/vi) expect_equivalent(res$k.yi, 6) expect_equivalent(length(res$yi), 6) expect_equivalent(length(res$vi), 6) ### full data saved in .f elements expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) ### include both mod1 and mod2 in the model and use add=0, so that studies 3 and 4 have NA/NA for yi/vi ### as a result, the model matrix for X.yi is rank deficient, so that in essence mod2 needs to be removed for the I^2/H^2 computation ### also note that the coefficient for mod2 is technically -Inf (since xi=0 for the only study where mod2=1); glmer() therefore issues ### several warnings expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1 + mod2, data=dat, add=0)) ### k, length of xi/mi, and number of rows in X must be equal to 8 (studies 1 and 2 removed due to NAs in table data, but studies 3 and 4 included in the model fitting) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) ### k.yi and length of yi/vi must be equal to 6 (studies 1 and 2 removed due to NAs in table data and studies 3 and 4 have NA/NA for yi/vi) expect_equivalent(res$k.yi, 6) expect_equivalent(length(res$yi), 6) expect_equivalent(length(res$vi), 6) ### full data saved in .f elements expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) }) metafor/tests/testthat/test_misc_rma_uni_ls.r0000644000176200001440000000722713466272621021317 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma() function with location-scale models") source("tolerances.r") # read in tolerances test_that("location-scale model results are correct for in intercept-only model", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma(yi, vi, data=dat, test="t") res2 <- rma(yi, vi, scale = ~ 1, data=dat, test="t", control=list(optimizer="optim")) res3 <- suppressWarnings(rma(yi, vi, scale = ~ 1, link="identity", data=dat, test="t", control=list(optimizer="optim", optmethod="Nelder-Mead"))) expect_equivalent(res1$tau2, as.vector(exp(coef(res2)$alpha)), tolerance=.tol[["var"]]) expect_equivalent(res1$tau2, as.vector(coef(res3)$alpha), tolerance=.tol[["var"]]) }) test_that("location-scale model results are correct for a categorical predictor", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma(yi ~ alloc, vi, scale = ~ alloc - 1, data=dat) res2 <- rma(yi ~ alloc, vi, scale = ~ alloc - 1, link = "identity", data=dat) res3 <- rma.mv(yi ~ alloc, vi, random = ~ alloc | trial, struct="DIAG", data=dat) expect_equivalent(as.vector(exp(coef(res1)$alpha)), as.vector(coef(res2)$alpha), tolerance=.tol[["var"]]) expect_equivalent(as.vector(exp(coef(res1)$alpha)), res3$tau2, tolerance=.tol[["var"]]) }) test_that("location-scale model results are correct for a continuous predictor", { data(dat.laopaiboon2015, package="metafor") dat <- escalc(measure="RR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat.laopaiboon2015) dat$ni <- dat$n1i + dat$n2i dat$ni[dat$study == "Whitlock"] <- dat$ni[dat$study == "Whitlock"] + 2 res <- suppressWarnings(rma(yi, vi, scale = ~ I(1/ni) - 1, link="identity", data=dat, method="ML")) expect_equivalent(as.vector(coef(res)$alpha), 79.1084, tolerance=.tol[["var"]]) expect_equivalent(exp(c(res$beta, res$ci.lb, res$ci.ub)), c(0.8539, 0.5482, 1.3302), tolerance=.tol[["coef"]]) res <- rma(yi, vi, scale = ~ I(1/ni), link="identity", data=dat, method="ML") expect_equivalent(as.vector(coef(res)$alpha), c(0.2750, 31.5127), tolerance=.tol[["var"]]) expect_equivalent(exp(c(res$beta, res$ci.lb, res$ci.ub)), c(1.0163, 0.6215, 1.6618), tolerance=.tol[["coef"]]) res <- rma(yi, vi, scale = ~ I(1/ni) - 1, data=dat) expect_equivalent(as.vector(coef(res)$alpha), -34.5187, tolerance=.tol[["var"]]) expect_equivalent(exp(c(res$beta, res$ci.lb, res$ci.ub)), c(1.1251, 0.6381, 1.9839), tolerance=.tol[["coef"]]) res <- rma(yi, vi, scale = ~ I(1/ni), data=dat) expect_equivalent(as.vector(coef(res)$alpha), c(-0.8868, 42.4065), tolerance=.tol[["var"]]) expect_equivalent(exp(c(res$beta, res$ci.lb, res$ci.ub)), c(1.0474, 0.6242, 1.7577), tolerance=.tol[["coef"]]) sav <- coef(summary(res)) expected <- list(beta = structure(list(estimate = 0.0463, se = 0.2641, zval = 0.1755, pval = 0.8607, ci.lb = -0.4713, ci.ub = 0.564), row.names = "intrcpt", class = "data.frame"), alpha = structure(list(estimate = c(-0.8868, 42.4065), se = c(1.2392, 118.6932), zval = c(-0.7156, 0.3573), pval = c(0.4742, 0.7209 ), ci.lb = c(-3.3156, -190.228), ci.ub = c(1.542, 275.041 )), row.names = c("intrcpt", "I(1/ni)"), class = "data.frame")) expect_equivalent(sav, expected, tolerance=.tol[["misc"]]) sav <- model.matrix(res)$scale expect_equivalent(sav, cbind(1, 1/dat$ni)) sav <- fitted(res)$scale expect_equivalent(sav, c(-0.479, -0.588, -0.831, -0.711, -0.494, -0.254, -0.661, -0.458, -0.542, -0.039, -0.039, -0.13, -0.405, -0.764, -0.357), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_gleser2009.r0000644000176200001440000002173614052500356023356 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:gleser2009 source("tolerances.r") # read in tolerances context("Checking analysis example: gleser2009") ############################################################################ ### create dataset dat <- data.frame(study=c(1,1,2,3,3,3), trt=c(1,2,1,1,2,3), ai=c( 40, 40, 10,150,150,150), n1i=c(1000,1000,200,2000,2000,2000), ci=c(100,150, 15, 40, 80, 50), n2i=c(4000,4000,400,1000,1000,1000)) dat$pti <- with(dat, ci / n2i) dat$pci <- with(dat, ai / n1i) test_that("results are correct for the multiple-treatment studies example with risk differences.", { dat <- escalc(measure="RD", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat) ### compare with results on page 360 (Table 19.2) expect_equivalent(dat$yi, c(0.0150, 0.0025, 0.0125, 0.0350, -0.0050, 0.0250), tolerance=.tol[["est"]]) calc.v <- function(x) { v <- matrix(x$pci[1]*(1-x$pci[1])/x$n1i[1], nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) res <- rma.mv(yi, V, mods = ~ factor(trt) - 1, data=dat) ### compare with results on page 361 (eq. 19.6) expect_equivalent(coef(res), c(0.0200, 0.0043, 0.0211), tolerance=.tol[["coef"]]) ### compare with results on page 361 (eq. 19.7) tmp <- vcov(res) * 10^6 expected <- structure(c(24.612, 19.954, 13.323, 19.954, 28.538, 13.255, 13.323, 13.255, 69.806), .Dim = c(3L, 3L), .Dimnames = list(c("factor(trt)1", "factor(trt)2", "factor(trt)3"), c("factor(trt)1", "factor(trt)2", "factor(trt)3"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]) ### compare with results on page 362 (eq. 19.8) expect_equivalent(res$QE, 7.1907, tolerance=.tol[["test"]]) }) test_that("results are correct for the multiple-treatment studies example with log odds ratios.", { dat <- escalc(measure="OR", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat) ### compare with results on page 362 expect_equivalent(dat$yi, c(0.4855, 0.0671, 0.3008, 0.6657, -0.0700, 0.4321), tolerance=.tol[["est"]]) calc.v <- function(x) { v <- matrix(1/(x$n1i[1]*x$pci[1]*(1-x$pci[1])), nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) res <- rma.mv(yi, V, mods = ~ factor(trt) - 1, data=dat) ### compare with results on page 363 expect_equivalent(coef(res), c(0.5099, 0.0044, 0.4301), tolerance=.tol[["coef"]]) ### compare with results on page 363 tmp <- vcov(res) expected <- structure(c(0.01412, 0.00712, 0.00425, 0.00712, 0.01178, 0.00455, 0.00425, 0.00455, 0.02703), .Dim = c(3L, 3L), .Dimnames = list(c("factor(trt)1", "factor(trt)2", "factor(trt)3"), c("factor(trt)1", "factor(trt)2", "factor(trt)3"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]/10) ### compare with results on page 363 expect_equivalent(res$QE, 2.0563, tolerance=.tol[["test"]]) ### 2.057 in chapter }) test_that("results are correct for the multiple-treatment studies example with log risk ratios.", { dat <- escalc(measure="RR", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat) ### compare with results on page 364 expect_equivalent(dat$yi, c(0.4700, 0.0645, 0.2877, 0.6286, -0.0645, 0.4055), tolerance=.tol[["est"]]) calc.v <- function(x) { v <- matrix((1-x$pci[1])/(x$n1i[1]*x$pci[1]), nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) res <- rma.mv(yi, V, mods = ~ factor(trt) - 1, data=dat) ### compare with results on page 363 expect_equivalent(coef(res), c(0.4875, 0.0006, 0.4047), tolerance=.tol[["coef"]]) ### (results for this not given in chapter) tmp <- vcov(res) expected <- structure(c(0.01287, 0.00623, 0.00371, 0.00623, 0.01037, 0.00399, 0.00371, 0.00399, 0.02416), .Dim = c(3L, 3L), .Dimnames = list(c("factor(trt)1", "factor(trt)2", "factor(trt)3"), c("factor(trt)1", "factor(trt)2", "factor(trt)3"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]/10) ### (results for this not given in chapter) expect_equivalent(res$QE, 1.8954, tolerance=.tol[["test"]]) }) test_that("results are correct for the multiple-treatment studies example with difference of arcsine transformed risks.", { dat <- escalc(measure="AS", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat) ### compare with results on page 364 expect_equivalent(dat$yi*2, c(0.0852, 0.0130, 0.0613, 0.1521, -0.0187, 0.1038), tolerance=.tol[["est"]]) ### need *2 factor due to difference in definition of measure calc.v <- function(x) { v <- matrix(1/(4*x$n1i[1]), nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) res <- rma.mv(yi, V, mods = ~ factor(trt) - 1, data=dat) ### compare with results on page 365 expect_equivalent(coef(res)*2, c(0.1010, 0.0102, 0.0982), tolerance=.tol[["coef"]]) ### compare with results on page 365 tmp <- vcov(res)*2^2 expected <- structure(c(0.00058, 4e-04, 0.00024, 4e-04, 0.00061, 0.00025, 0.00024, 0.00025, 0.00137), .Dim = c(3L, 3L), .Dimnames = list(c("factor(trt)1", "factor(trt)2", "factor(trt)3"), c("factor(trt)1", "factor(trt)2", "factor(trt)3"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]/10) ### compare with results on page 365 expect_equivalent(res$QE, 4.2634, tolerance=.tol[["test"]]) ### 4.264 in chapter }) ############################################################################ ### create dataset dat <- data.frame(study=c(1,1,2,3,4,4), trt=c(1,2,1,1,1,2), m1i=c(7.87, 4.35, 9.32, 8.08, 7.44, 5.34), m2i=c(-1.36, -1.36, 0.98, 1.17, 0.45, 0.45), sdpi=c(4.2593,4.2593,2.8831,3.1764,2.9344,2.9344), n1i=c(25,22,38,50,30,30), n2i=c(25,25,40,50,30,30)) test_that("results are correct for the multiple-treatment studies example with standardized mean differences.", { dat$Ni <- unlist(lapply(split(dat, dat$study), function(x) rep(sum(x$n1i) + x$n2i[1], each=nrow(x)))) dat$yi <- with(dat, (m1i-m2i)/sdpi) dat$vi <- with(dat, 1/n1i + 1/n2i + yi^2/(2*Ni)) ### compare with results on page 364 expect_equivalent(dat$yi, c(2.1670, 1.3406, 2.8927, 2.1754, 2.3821, 1.6664), tolerance=.tol[["est"]]) calc.v <- function(x) { v <- matrix(1/x$n2i[1] + outer(x$yi, x$yi, "*")/(2*x$Ni[1]), nrow=nrow(x), ncol=nrow(x)) diag(v) <- x$vi v } V <- bldiag(lapply(split(dat, dat$study), calc.v)) res <- rma.mv(yi, V, mods = ~ factor(trt) - 1, data=dat) ### compare with results on page 367 expect_equivalent(coef(res), c(2.3743, 1.5702), tolerance=.tol[["coef"]]) ### compare with results on page 367 tmp <- vcov(res) expected <- structure(c(0.02257, 0.01244, 0.01244, 0.03554), .Dim = c(2L, 2L), .Dimnames = list(c("factor(trt)1", "factor(trt)2"), c("factor(trt)1", "factor(trt)2"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]/10) ### compare with results on page 367 expect_equivalent(res$QE, 3.9447, tolerance=.tol[["test"]]) }) ############################################################################ ### create dataset dat <- data.frame(school=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7), outcome=rep(c("math", "reading"), times=7), m1i=c(2.3,2.5,2.4,1.3,2.5,2.4,3.3,1.7,1.1,2.0,2.8,2.1,1.7,0.6), m2i=c(10.3,6.6,9.7,3.1,8.7,3.7,7.5,8.5,2.2,2.1,3.8,1.4,1.8,3.9), sdpi=c(8.2,7.3,8.3,8.9,8.5,8.3,7.7,9.8,9.1,10.4,9.6,7.9,9.2,10.2), ri=rep(c(.55,.43,.57,.66,.51,.59,.49), each=2), n1i=rep(c(22,21,26,18,38,42,39), each=2), n2i=rep(c(24,21,23,18,36,42,38), each=2)) test_that("results are correct for the multiple-endpoint studies example with standardized mean differences.", { dat$yi <- round(with(dat, (m2i-m1i)/sdpi), 3) dat$vi <- round(with(dat, 1/n1i + 1/n2i + yi^2/(2*(n1i+n2i))), 4) dat$covi <- round(with(dat, (1/n1i + 1/n2i) * ri + (rep(sapply(split(dat$yi, dat$school), prod), each=2) / (2*(n1i+n2i))) * ri^2), 4) V <- bldiag(lapply(split(dat, dat$school), function(x) matrix(c(x$vi[1], x$covi[1], x$covi[2], x$vi[2]), nrow=2))) ### fit model res <- rma.mv(yi, V, mods = ~ outcome - 1, data=dat) ### (results for this not given in chapter) expect_equivalent(coef(res), c(0.3617, 0.2051), tolerance=.tol[["coef"]]) ### (results for this not given in chapter) tmp <- vcov(res) expected <- structure(c(0.01008, 0.00537, 0.00537, 0.00989), .Dim = c(2L, 2L), .Dimnames = list(c("outcomemath", "outcomereading"), c("outcomemath", "outcomereading"))) expect_equivalent(tmp, expected, tolerance=.tol[["var"]]/10) ### compare with results on page 371 expect_equivalent(res$QE, 19.6264, tolerance=.tol[["test"]]) ### 19.62 in chapter }) ############################################################################ metafor/tests/testthat/test_misc_robust.r0000644000176200001440000000261413675405440020477 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: robust() function") source("tolerances.r") # read in tolerances test_that("robust() works correctly for 'rma' objects.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) sav <- robust(res, cluster=dat$trial) expect_equivalent(c(vcov(sav)), 0.032106, tolerance=.tol[["var"]]) sav <- robust(res, cluster=dat$trial, adjust=FALSE) expect_equivalent(c(vcov(sav)), 0.029636, tolerance=.tol[["var"]]) res <- rma(yi, vi, weights=1, data=dat) sav <- robust(res, cluster=dat$trial) expect_equivalent(c(vcov(sav)), 0.037028, tolerance=.tol[["var"]]) }) test_that("robust() works correctly for 'rma.mv' objects.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat) sav <- robust(res, cluster=dat$trial) expect_equivalent(c(vcov(sav)), 0.032106, tolerance=.tol[["var"]]) sav <- robust(res, cluster=dat$trial, adjust=FALSE) expect_equivalent(c(vcov(sav)), 0.029636, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, W=1, random = ~ 1 | trial, data=dat) sav <- robust(res, cluster=dat$trial) expect_equivalent(c(vcov(sav)), 0.037028, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_berkey1995.r0000644000176200001440000000552714052500336023371 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:berkey1995 source("tolerances.r") # read in tolerances context("Checking analysis example: berkey1995") ### load BCG dataset data(dat.bcg, package="metafor") ### calculate log ratio ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### calculate "smoothed" sampling variances dat$vi <- with(dat, sum(tneg/tpos)/(13*(tneg+tpos)) + sum(cneg/cpos)/(13*(cneg+cpos))) test_that("results are correct for the random-effects model.", { ### fit random-effects model using empirical Bayes method res.RE <- rma(yi, vi, data=dat, method="EB") out <- capture.output(print(res.RE)) ### so that print.rma.uni() is run (at least once) out <- capture.output(print(summary(res.RE))) ### so that print.summary.rma() is run (at least once) ### compare with results on page 408 expect_equivalent(coef(res.RE), -0.5429, tolerance=.tol[["coef"]]) expect_equivalent(res.RE$se, 0.1842, tolerance=.tol[["se"]]) expect_equivalent(res.RE$tau2, 0.2682, tolerance=.tol[["var"]]) }) test_that("results are correct for the mixed-effects meta-regression model.", { ### fit random-effects model using empirical Bayes method res.RE <- rma(yi, vi, data=dat, method="EB") ### fit mixed-effects model with absolute latitude as moderator res.ME <- rma(yi, vi, mods=~I(ablat-33.46), data=dat, method="EB") out <- capture.output(print(res.ME)) ### compare with results on page 408 expect_equivalent(coef(res.ME), c(-0.6303, -0.0268), tolerance=.tol[["coef"]]) ### -0.6304 in article expect_equivalent(res.ME$se, c(0.1591, 0.0110), tolerance=.tol[["se"]]) expect_equivalent(res.ME$tau2, 0.1572, tolerance=.tol[["var"]]) expect_equivalent(anova(res.RE, res.ME)$R2, 41.3844, tolerance=.tol[["r2"]]) ### predicted average risk ratios tmp <- predict(res.ME, newmods=c(33.46,42)-33.46, transf=exp, digits=2) ### compare with results on page 408 expect_equivalent(tmp$pred, c(0.5324, 0.4236), tolerance=.tol[["pred"]]) }) test_that("results are correct for the fixed-effects meta-regression model.", { ### fit fixed-effects model with absolute latitude as moderator res.FE <- rma(yi, vi, mods=~I(ablat-33.46), data=dat, method="FE") ### compare with results on page 408 expect_equivalent(coef(res.FE), c(-0.5949, -0.0282), tolerance=.tol[["coef"]]) ### -0.5950 in article expect_equivalent(res.FE$se, c(0.0696, 0.0040), tolerance=.tol[["se"]]) ### 0.0039 in article ### predicted risk ratios based on the fixed-effects model tmp <- predict(res.FE, newmods=c(33.46,42)-33.46, transf=exp, digits=2) ### compare with results on page 408 expect_equivalent(tmp$pred, c(0.5516, 0.4336), tolerance=.tol[["pred"]]) }) metafor/tests/testthat/test_misc_rma_mv.r0000644000176200001440000002006713465304701020437 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.mv() function") source("tolerances.r") # read in tolerances data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) test_that("rma.mv() correctly handles a formula for the 'yi' argument", { res1 <- rma.mv(yi ~ ablat, vi, random = ~ 1 | trial, data=dat) res2 <- rma.mv(yi, vi, mods = ~ ablat, random = ~ 1 | trial, data=dat) expect_equivalent(coef(res1), coef(res2), tolerance=.tol[["coef"]]) }) test_that("rma.mv() works correctly when using user-defined weights", { res <- rma.mv(yi, vi, W=1, random = ~ 1 | trial, data=dat) expect_equivalent(coef(res), mean(dat$yi), tolerance=.tol[["coef"]]) expect_equivalent(c(vcov(res)), 0.0358, tolerance=.tol[["var"]]) }) test_that("rma.mv() correctly handles negative sampling variances", { dat$vi[1] <- -.01 expect_warning(res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat)) expect_equivalent(coef(res), -0.7220, tolerance=.tol[["coef"]]) expect_equivalent(c(vcov(res)), 0.0293, tolerance=.tol[["var"]]) }) test_that("rma.mv() correctly handles a missing value", { dat$vi[1] <- NA expect_warning(res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat)) expect_equivalent(coef(res), -0.7071, tolerance=.tol[["coef"]]) expect_equivalent(c(vcov(res)), 0.0361, tolerance=.tol[["var"]]) }) test_that("rma.mv() correctly handles the R argument", { P <- structure(c(1.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 1.000, 0.621, 0.621, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.000, 0.621, 1.000, 0.642, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.000, 0.621, 0.642, 1.000, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.128, 0.000, 0.128, 0.128, 0.128, 1.000, 0.266, 0.266, 0.221, 0.221, 0.221, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.266, 1.000, 0.467, 0.221, 0.221, 0.221, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.266, 0.467, 1.000, 0.221, 0.221, 0.221, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.221, 0.221, 0.221, 1.000, 0.605, 0.296, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.221, 0.221, 0.221, 0.605, 1.000, 0.296, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.221, 0.221, 0.221, 0.296, 0.296, 1.000, 0.157, 0.157, 0.157, 0.157, 0.157, 0.000, 0.128, 0.128, 0.128, 0.157, 0.157, 0.157, 0.157, 0.157, 0.157, 1.000, 0.773, 0.390, 0.390, 0.390, 0.000, 0.128, 0.128, 0.128, 0.157, 0.157, 0.157, 0.157, 0.157, 0.157, 0.773, 1.000, 0.390, 0.390, 0.390, 0.000, 0.128, 0.128, 0.128, 0.157, 0.157, 0.157, 0.157, 0.157, 0.157, 0.390, 0.390, 1.000, 0.606, 0.606, 0.000, 0.128, 0.128, 0.128, 0.157, 0.157, 0.157, 0.157, 0.157, 0.157, 0.390, 0.390, 0.606, 1.000, 0.697, 0.000, 0.128, 0.128, 0.128, 0.157, 0.157, 0.157, 0.157, 0.157, 0.157, 0.390, 0.390, 0.606, 0.697, 1.000), .Dim = c(15L, 15L), .Dimnames = list(c("S11", "S15", "S06", "S10", "S08", "S02", "S07", "S14", "S09", "S01", "S12", "S05", "S13", "S04", "S03"), c("S11", "S15", "S06", "S10", "S08", "S02", "S07", "S14", "S09", "S01", "S12", "S05", "S13", "S04", "S03"))) dat <- structure(list(study = 1:44, species = c("S01", "S01", "S02", "S02", "S02", "S02", "S03", "S03", "S03", "S03", "S04", "S04", "S04", "S04", "S05", "S05", "S05", "S06", "S06", "S06", "S06", "S07", "S07", "S08", "S08", "S08", "S09", "S09", "S10", "S10", "S10", "S11", "S11", "S11", "S11", "S12", "S12", "S13", "S13", "S13", "S14", "S14", "S15", "S15"), phylogeny = c("S01", "S01", "S02", "S02", "S02", "S02", "S03", "S03", "S03", "S03", "S04", "S04", "S04", "S04", "S05", "S05", "S05", "S06", "S06", "S06", "S06", "S07", "S07", "S08", "S08", "S08", "S09", "S09", "S10", "S10", "S10", "S11", "S11", "S11", "S11", "S12", "S12", "S13", "S13", "S13", "S14", "S14", "S15", "S15"), yi = c(1.91, 1.67, -0.92, -0.1, -0.58, -1.29, 0.04, -1.33, 0.02, -1, 0.2, 1.75, -0.75, 1.36, 1.24, 0.64, 0.52, 1.93, 1.11, 1.12, 1.17, 0.25, 1.95, -0.06, -0.79, 0.39, 1.61, 1.96, 0.93, 0.5, 0.73, -0.7, 0.11, 0.84, 1.83, -0.59, 0.19, 0.14, 0.74, 0.55, 0.34, -1.16, 1.93, 1.85), vi = c(0.213, 0.387, 0.381, 0.467, 0.132, 0.603, 0.374, 0.2, 0.119, 0.092, 0.139, 0.449, 0.412, 0.398, 0.25, 0.168, 0.303, 0.125, 0.164, 0.229, 0.482, 0.059, 0.421, 0.111, 0.373, 0.032, 0.062, 0.126, 0.066, 0.155, 0.229, 0.276, 0.039, 0.409, 0.312, 0.304, 0.601, 0.096, 0.216, 0.181, 0.537, 0.16, 0.303, 0.281)), .Names = c("study", "species", "phylogeny", "yi", "vi"), row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44"), class = "data.frame") res <- rma.mv(yi, vi, random = list(~ 1 | study, ~ 1 | species, ~ 1 | phylogeny), R = list(phylogeny=P), data=dat) expect_equivalent(coef(res), .5504, tolerance=.tol[["coef"]]) expect_equivalent(res$sigma2, c(0.1763, 0.5125, 0.1062), tolerance=.tol[["var"]]) expect_equivalent(c(logLik(res)), -54.6272, tolerance=.tol[["fit"]]) }) data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) test_that("rma.mv() correctly computes the Hessian", { res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(hessian=TRUE)) expect_equivalent(c(sqrt(1/res$hessian)), 0.1678, tolerance=.tol[["se"]]) }) test_that("rma.mv() works correctly with test='t'", { res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, test="t") expect_equivalent(res$pval, 0.0018, tolerance=.tol[["pval"]]) }) test_that("rma.mv() works correctly with different optimizers", { res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="optim", optmethod="BFGS")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="optim", optmethod="L-BFGS-B")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="optim", optmethod="Nelder-Mead")) expect_equivalent(res$sigma2, 0.3133, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="nlminb")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="uobyqa")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="newuoa")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="bobyqa")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="nloptr")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="nlm")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="hjk")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="nmk")) expect_equivalent(res$sigma2, 0.3131, tolerance=.tol[["var"]]) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, control=list(optimizer="ucminf")) expect_equivalent(res$sigma2, 0.3132, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_misc_diagnostics_rma.mv.r0000644000176200001440000002714513465257342022761 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: model diagnostic functions for rma.mv()") source("tolerances.r") # read in tolerances dat1 <- dat.konstantopoulos2011 dat1 <- dat1[dat1$district %in% c(11, 12, 18, 71, 108, 644),] rownames(dat1) <- 1:nrow(dat1) dat1$yi[dat1$district %in% 12] <- NA ### all values for district 12 are missing dat1$yi[dat1$district %in% 18 & dat1$school == 2] <- NA ### second value for district 18 is missing dat1$yi[dat1$district %in% 108] <- dat1$yi[dat1$district %in% 108] + 1 ### increase district level variance dat1$district11 <- ifelse(dat1$district == 11, 1, 0) ### dummy for district 11 dat1$study53 <- ifelse(dat1$study == 53, 1, 0) ### dummies for studies in district 644 dat1$study54 <- ifelse(dat1$study == 54, 1, 0) ### dummies for studies in district 644 dat1$study55 <- ifelse(dat1$study == 55, 1, 0) ### dummies for studies in district 644 dat1$study56 <- ifelse(dat1$study == 56, 1, 0) ### dummies for studies in district 644 #set.seed(123214) #dat2 <- dat1[sample(nrow(dat1)),] ### reshuffled dataset dat2 <- dat1[c(23, 2, 6, 3, 19, 14, 20, 12, 21, 9, 13, 7, 11, 8, 10, 22, 18, 1, 5, 4, 17, 15, 16),] res1 <- suppressWarnings(rma.mv(yi, vi, mods = ~ district11 + study53 + study54 + study55 + study56, random = ~ 1 | district/school, data=dat1, slab=study)) res2 <- suppressWarnings(rma.mv(yi, vi, mods = ~ district11 + study53 + study54 + study55 + study56, random = ~ 1 | district/school, data=dat2, slab=study)) test_that("model diagnostic functions work with 'na.omit'.", { skip_on_cran() options(na.action="na.omit") sav1 <- rstandard(res1) sav2 <- rstandard(res2) sav2 <- sav2[match(sav1$slab, sav2$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$resid), rep(FALSE,18)) sav1 <- rstandard(res1, cluster=dat1$district) sav2 <- rstandard(res2, cluster=dat2$district) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), rep(FALSE,18)) expect_equivalent(is.na(sav1$cluster$X2), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- rstudent(res1) sav2 <- rstudent(res2) sav2 <- sav2[match(sav1$slab, sav2$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$resid), c(rep(FALSE,14), rep(TRUE,4))) sav1 <- rstudent(res1, cluster=dat1$district) sav2 <- rstudent(res2, cluster=dat2$district) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,4), rep(FALSE,10), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, parallel="snow") sav2 <- rstudent(res2, cluster=dat2$district, parallel="snow") sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,4), rep(FALSE,10), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- rstudent(res2, cluster=dat2$district, reestimate=FALSE) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,4), rep(FALSE,10), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- rstudent(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,4), rep(FALSE,10), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1) sav2 <- cooks.distance(res2) sav2 <- sav2[match(names(sav1), names(sav2))] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(FALSE,14), rep(TRUE,4))) sav1 <- cooks.distance(res1, cluster=dat1$district) sav2 <- cooks.distance(res2, cluster=dat2$district) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, parallel="snow") sav2 <- cooks.distance(res2, cluster=dat2$district, parallel="snow") expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- cooks.distance(res2, cluster=dat2$district, reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- cooks.distance(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1) sav2 <- dfbetas(res2) sav2 <- sav2[match(rownames(sav1), rownames(sav2)),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(FALSE,14), rep(TRUE,4))) sav1 <- dfbetas(res1, cluster=dat1$district) sav2 <- dfbetas(res2, cluster=dat2$district) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, parallel="snow") sav2 <- dfbetas(res2, cluster=dat2$district, parallel="snow") expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- dfbetas(res2, cluster=dat2$district, reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- dfbetas(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(TRUE, rep(FALSE,3), TRUE)) sav1 <- ranef(res1) sav2 <- ranef(res2) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$district$intrcpt), rep(FALSE,5)) expect_equivalent(is.na(sav1$`district/school`$intrcpt), rep(FALSE,18)) }) test_that("model diagnostic functions work with 'na.pass'.", { skip_on_cran() options(na.action="na.pass") sav1 <- rstandard(res1) sav2 <- rstandard(res2) sav2 <- sav2[match(sav1$slab, sav2$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$resid), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,13))) sav1 <- rstandard(res1, cluster=dat1$district) sav2 <- rstandard(res2, cluster=dat2$district) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,13))) expect_equivalent(is.na(sav1$cluster$X2), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- rstudent(res1) sav2 <- rstudent(res2) sav2 <- sav2[match(sav1$slab, sav2$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$resid), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) sav1 <- rstudent(res1, cluster=dat1$district) sav2 <- rstudent(res2, cluster=dat2$district) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,8), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, parallel="snow") sav2 <- rstudent(res2, cluster=dat2$district, parallel="snow") sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,8), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- rstudent(res2, cluster=dat2$district, reestimate=FALSE) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,8), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- rstudent(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- rstudent(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) sav2$obs <- sav2$obs[match(sav1$obs$slab, sav2$obs$slab),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$obs$resid), c(rep(TRUE,8), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) expect_equivalent(is.na(sav1$cluster$X2), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1) sav2 <- cooks.distance(res2) sav2 <- sav2[match(names(sav1), names(sav2))] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) sav1 <- cooks.distance(res1, cluster=dat1$district) sav2 <- cooks.distance(res2, cluster=dat2$district) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, parallel="snow") sav2 <- cooks.distance(res2, cluster=dat2$district, parallel="snow") expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- cooks.distance(res2, cluster=dat2$district, reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- cooks.distance(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- cooks.distance(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1) sav2 <- dfbetas(res2) sav2 <- sav2[match(rownames(sav1), rownames(sav2)),] expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,9), rep(TRUE,4))) sav1 <- dfbetas(res1, cluster=dat1$district) sav2 <- dfbetas(res2, cluster=dat2$district) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, parallel="snow") sav2 <- dfbetas(res2, cluster=dat2$district, parallel="snow") expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, reestimate=FALSE) sav2 <- dfbetas(res2, cluster=dat2$district, reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- dfbetas(res1, cluster=dat1$district, parallel="snow", reestimate=FALSE) sav2 <- dfbetas(res2, cluster=dat2$district, parallel="snow", reestimate=FALSE) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$intrcpt), c(rep(TRUE,2), rep(FALSE,3), TRUE)) sav1 <- ranef(res1) sav2 <- ranef(res2) expect_equivalent(sav1, sav2) expect_equivalent(is.na(sav1$district$intrcpt), c(FALSE, TRUE, rep(FALSE,4))) expect_equivalent(is.na(sav1$`district/school`$intrcpt), c(rep(FALSE,4), rep(TRUE,4), FALSE, TRUE, rep(FALSE,13))) }) metafor/tests/testthat/tolerances.r0000644000176200001440000000216013765624461017250 0ustar liggesusers.tol <- c(est = .01, # effect size estimates coef = .01, # model coefficients pred = .01, # predicted values, BLUPs, also residuals se = .01, # standard errors test = .01, # test statistics, standardized residuals pval = .01, # p-values ci = .01, # confidence/prediction interval bounds, CI for effects var = .01, # variance components (and CIs thereof), also if sqrt(), var-cov matrices, sampling variances cor = .01, # correlations, ICCs cov = .01, # covariances sevar = .01, # SEs of variance components fit = .01, # fit statistics r2 = .01, # R^2 type values het = .01, # heterogeneity statistics (and CIs thereof) inf = .01, # influence statistics, hat values den = .01, # density misc = .01) # miscellaneous, mix of values # to quickly set all tolerances to a common value #.tol[1:length(.tol)] <- .001 # note to self: search for "]]/10 or "]]*10 to find adjusted tolerances in tests # some hardcoded tolerances; search for: tolerance=.0 metafor/tests/testthat/test_misc_vif.r0000644000176200001440000000172713675405734017757 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: vif() function") source("tolerances.r") # read in tolerances test_that("vif() works correctly for 'rma.uni' objects.", { dat <- dat.bangertdrowns2004 dat <- dat[!apply(dat[,c("length", "wic", "feedback", "info", "pers", "imag", "meta")], 1, anyNA),] res <- rma(yi, vi, mods = ~ length + wic + feedback + info + pers + imag + meta, data=dat) sav <- vif(res) out <- capture.output(print(sav)) vifs <- c(length = 1.53710262575577, wic = 1.38604929927746, feedback = 1.64904565071108, info = 1.83396138431786, pers = 5.67803138275492, imag = 1.1553714953831, meta = 4.53327503733189) expect_equivalent(sav$vif, vifs) sav <- vif(res, table=TRUE) out <- capture.output(print(sav)) expect_equivalent(sav$vif$vif[-1], vifs) sav <- vif(res, btt=2:3) out <- capture.output(print(sav)) gvif <- 2.06507966959426 expect_equivalent(sav$gvif, gvif) }) metafor/tests/testthat/test_misc_fsn.r0000644000176200001440000000467313712636274017761 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: fsn() function") source("tolerances.r") # read in tolerances test_that("confint() gives correct results for the 'expectancy data' in Becker (2005).", { data(dat.raudenbush1985, package="metafor") sav <- fsn(yi, vi, data=dat.raudenbush1985) expect_equivalent(sav$fsnum, 26) ### note: Becker uses p-values based on t-tests, which yields N =~ 23 out <- capture.output(print(sav)) ### so that print.fsn() is run (at least once) sav <- fsn(yi, data=dat.raudenbush1985, type="Orwin", target=.05) expect_equivalent(sav$fsnum, 44) ### note: Becker finds N = 4, but uses the FE model estimate with 1/vi weights for ### the average effect size, but Orwin's methods is based on units weighting out <- capture.output(print(sav)) ### so that print.fsn() is run (at least once) with type="Orwin" sav <- fsn(yi, vi, data=dat.raudenbush1985, type="Rosenberg") expect_equivalent(sav$fsnum, 0) out <- capture.output(print(sav)) ### so that print.fsn() is run (at least once) with type="Rosenberg" }) test_that("confint() gives correct results for the 'passive smoking data' in Becker (2005).", { data(dat.hackshaw1998, package="metafor") sav <- fsn(yi, vi, data=dat.hackshaw1998) expect_equivalent(sav$fsnum, 393) ### note: Becker finds N =~ 398 (due to rounding) sav <- fsn(yi, data=dat.hackshaw1998, type="Orwin", target=.049) expect_equivalent(sav$fsnum, 186) ### note: Becker finds N = 103, but uses the FE model estimate with 1/vi weights for ### the average effect size, but Orwin's methods is based on units weighting sav <- fsn(yi, vi, data=dat.hackshaw1998, type="Rosenberg") expect_equivalent(sav$fsnum, 202) }) test_that("confint() gives correct results for the 'interview data' in Becker (2005).", { data(dat.mcdaniel1994, package="metafor") dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat.mcdaniel1994) sav <- fsn(yi, vi, data=dat) expect_equivalent(sav$fsnum, 50364) ### note: Becker uses p-values based on t-tests, which yields N =~ 51226 sav <- fsn(yi, data=dat, type="Orwin", target=.15) expect_equivalent(sav$fsnum, 129) ### note: Becker finds N = 64, but uses the FE model estimate with 1/vi weights for ### the average effect size, but Orwin's methods is based on units weighting sav <- fsn(yi, vi, data=dat, type="Rosenberg") expect_equivalent(sav$fsnum, 45528) }) metafor/tests/testthat/test_analysis_example_stijnen2010.r0000644000176200001440000002135414052500415023527 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:stijnen2010 context("Checking analysis example: stijnen2010") source("tolerances.r") # read in tolerances ### load data dat <- dat.nielweise2007 test_that("results for the normal-normal model are correct (measure=='PLO')", { res <- rma(measure="PLO", xi=ci, ni=n2i, data=dat) ### compare with results on page 3050 (Table II) expect_equivalent(coef(res), -3.3018, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2378, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.6629, tolerance=.tol[["var"]]) tmp <- predict(res, transf=transf.ilogit) expect_equivalent(tmp$pred, 0.0355, tolerance=.tol[["pred"]]) ### 0.035 in paper expect_equivalent(tmp$ci.lb, 0.0226, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.0554, tolerance=.tol[["ci"]]) ### 0.056 in paper res <- rma(measure="PLO", xi=ai, ni=n1i, data=dat) ### compare with results on page 3050 (Table II) expect_equivalent(coef(res), -4.2604, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2589, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.3928, tolerance=.tol[["var"]]) tmp <- predict(res, transf=transf.ilogit) expect_equivalent(tmp$pred, 0.0139, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.0084, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.0229, tolerance=.tol[["ci"]]) }) test_that("results for the binomial-normal normal are correct (measure=='PLO')", { skip_on_cran() res <- rma.glmm(measure="PLO", xi=ci, ni=n2i, data=dat) ### compare with results on page 3050 (Table II) expect_equivalent(coef(res), -3.4964, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2570, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.8124, tolerance=.tol[["var"]]) tmp <- predict(res, transf=transf.ilogit) expect_equivalent(tmp$pred, 0.0294, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.0180, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.0478, tolerance=.tol[["ci"]]) res <- rma.glmm(measure="PLO", xi=ai, ni=n1i, data=dat) ### compare with results on page 3050 (Table II) expect_equivalent(coef(res), -4.8121, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.3555, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.8265, tolerance=.tol[["var"]]) tmp <- predict(res, transf=transf.ilogit) expect_equivalent(tmp$pred, 0.0081, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.0040, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.0161, tolerance=.tol[["ci"]]) }) test_that("results for the normal-normal model are correct (measure=='OR')", { expect_warning(res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, drop00=TRUE)) ### compare with results on page 3052 (Table III) expect_equivalent(coef(res), -0.9804, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2435, tolerance=.tol[["se"]]) ### 0.244 in paper expect_equivalent(sqrt(res$tau2), 0.1886, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 0.3752, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.2328, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.6046, tolerance=.tol[["ci"]]) ### 0.62 in paper }) test_that("results for the conditional logistic model with exact likelihood are correct (measure=='OR')", { skip_on_cran() expect_warning(res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="CM.EL")) out <- capture.output(print(res)) ### so that print.rma.glmm() is run (at least once) ### compare with results on page 3052 (Table III) expect_equivalent(coef(res), -1.3532, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.3510, tolerance=.tol[["se"]]) expect_equivalent(sqrt(res$tau2), 0.8327, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 0.2584, tolerance=.tol[["pred"]]) ### 0.25 in paper expect_equivalent(tmp$ci.lb, 0.1299, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.5141, tolerance=.tol[["ci"]]) }) test_that("results for the conditional logistic model with approximate likelihood are correct (measure=='OR')", { skip_on_cran() expect_warning(res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="CM.AL")) ### compare with results on page 3052 (Table III) expect_equivalent(coef(res), -1.3027, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.3386, tolerance=.tol[["se"]]) expect_equivalent(sqrt(res$tau2), 0.7750, tolerance=.tol[["var"]]) ### 0.77 in paper tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 0.2718, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.1400, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.5279, tolerance=.tol[["ci"]]) }) ############################################################################ ### load data dat <- dat.nielweise2008 ### incidence rates reflect the expected number of events per 1000 days dat$t1i <- dat$t1i/1000 dat$t2i <- dat$t2i/1000 test_that("results for the normal-normal model are correct (measure=='IRLN')", { res <- rma(measure="IRLN", xi=x2i, ti=t2i, data=dat) ### compare with results on page 3054 (Table VII) expect_equivalent(coef(res), 1.4676, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2425, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.3699, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 4.3389, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 2.6973, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 6.9795, tolerance=.tol[["ci"]]) ### 6.99 in paper res <- rma(measure="IRLN", xi=x1i, ti=t1i, data=dat) ### compare with results on page 3054 (Table VII) expect_equivalent(coef(res), 0.9808, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.3259, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.6393, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 2.6667, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.4078, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 5.0513, tolerance=.tol[["ci"]]) }) test_that("results for the Poisson-normal model are correct (measure=='IRLN')", { skip_on_cran() res <- rma.glmm(measure="IRLN", xi=x2i, ti=t2i, data=dat) ### compare with results on page 3054 (Table VII) expect_equivalent(coef(res), 1.4007, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2310, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.3165, tolerance=.tol[["var"]]) ### 0.316 in paper tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 4.0580, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 2.5803, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 6.3819, tolerance=.tol[["ci"]]) res <- rma.glmm(measure="IRLN", xi=x1i, ti=t1i, data=dat) ### compare with results on page 3054 (Table VII) expect_equivalent(coef(res), 0.8494, tolerance=.tol[["coef"]]) ### 0.850 in paper expect_equivalent(res$se, 0.3303, tolerance=.tol[["se"]]) expect_equivalent(res$tau2, 0.6543, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 2.3383, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 1.2240, tolerance=.tol[["ci"]]) ### 1.23 in paper expect_equivalent(tmp$ci.ub, 4.4670, tolerance=.tol[["ci"]]) }) test_that("results for the normal-normal model are correct (measure=='IRR')", { res <- rma(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat) ### compare with results on page 3055 (Table VIII) expect_equivalent(coef(res), -0.3963, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2268, tolerance=.tol[["se"]]) ### 0.223 in paper expect_equivalent(sqrt(res$tau2), 0.3060, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 0.6728, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.4314, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 1.0494, tolerance=.tol[["ci"]]) ### 1.04 in paper }) test_that("results for the Poisson-normal model are correct (measure=='IRR')", { skip_on_cran() res <- rma.glmm(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat, model="CM.EL") ### compare with results on page 3055 (Table VIII) expect_equivalent(coef(res), -0.4762, tolerance=.tol[["coef"]]) expect_equivalent(res$se, 0.2377, tolerance=.tol[["se"]]) expect_equivalent(sqrt(res$tau2), 0.3501, tolerance=.tol[["var"]]) tmp <- predict(res, transf=exp) expect_equivalent(tmp$pred, 0.6211, tolerance=.tol[["pred"]]) expect_equivalent(tmp$ci.lb, 0.3898, tolerance=.tol[["ci"]]) expect_equivalent(tmp$ci.ub, 0.9897, tolerance=.tol[["ci"]]) }) metafor/tests/testthat/test_analysis_example_lipsey2001.r0000644000176200001440000001161714052500372023365 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:lipsey2001 context("Checking analysis example: lipsey2001") source("tolerances.r") # read in tolerances ### create dataset dat <- data.frame( id = c(100, 308, 1596, 2479, 9021, 9028, 161, 172, 537, 7049), yi = c(-0.33, 0.32, 0.39, 0.31, 0.17, 0.64, -0.33, 0.15, -0.02, 0.00), vi = c(0.084, 0.035, 0.017, 0.034, 0.072, 0.117, 0.102, 0.093, 0.012, 0.067), random = c(0, 0, 0, 0, 0, 0, 1, 1, 1, 1), intensity = c(7, 3, 7, 5, 7, 7, 4, 4, 5, 6)) test_that("results are correct for the fixed-effects model.", { res <- rma(yi, vi, data=dat, method="FE") ### compare with results on page 133 (Exhibit 7.3) expect_equivalent(c(as.matrix(coef(summary(res)))), c(0.1549, 0.0609, 2.5450, 0.0109, 0.0356, 0.2742), tolerance=.tol[["misc"]]) expect_equivalent(res$QE, 14.7640, tolerance=.tol[["test"]]) expect_equivalent(res$QEp, 0.0976, tolerance=.tol[["pval"]]) }) test_that("results are correct for the random-effects model.", { res <- rma(yi, vi, data=dat, method="DL") ### compare with results on page 133 (Exhibit 7.3) expect_equivalent(c(as.matrix(coef(summary(res)))), c(0.1534, 0.0858, 1.7893, 0.0736, -0.0146, 0.3215), tolerance=.tol[["misc"]]) expect_equivalent(res$tau2, 0.025955, tolerance=.tol[["var"]]) }) test_that("results are correct for the ANOVA-type analysis.", { res <- rma(yi, vi, mods = ~ random, data=dat, method="FE") res0 <- rma(yi, vi, data=dat, method="FE", subset=random==0) res1 <- rma(yi, vi, data=dat, method="FE", subset=random==1) tmp <- predict(res, newmods=c(0,1)) tmp <- do.call(cbind, unclass(tmp)[1:4]) ### compare with results on page 138 (Exhibit 7.4) expect_equivalent(tmp[1,], c( 0.2984, 0.0813, 0.1390, 0.4578), tolerance=.tol[["pred"]]) expect_equivalent(tmp[2,], c(-0.0277, 0.0917, -0.2075, 0.1521), tolerance=.tol[["se"]]) expect_equivalent(res$QM, 7.0739, tolerance=.tol[["test"]]) ### 7.0738 in chapter expect_equivalent(res$QMp, 0.0078, tolerance=.tol[["pval"]]) expect_equivalent(res$QE, 7.6901, tolerance=.tol[["test"]]) ### 7.6902 in chapter expect_equivalent(res$QEp, 0.4643, tolerance=.tol[["pval"]]) expect_equivalent(res0$QE, 6.4382, tolerance=.tol[["test"]]) ### 6.4383 in chapter expect_equivalent(res0$QEp, 0.2659, tolerance=.tol[["pval"]]) expect_equivalent(res1$QE, 1.2519, tolerance=.tol[["test"]]) expect_equivalent(res1$QEp, 0.7406, tolerance=.tol[["pval"]]) }) test_that("results are correct for the meta-regression analysis (fixed-effects with moderators model).", { res <- rma(yi, vi, mods = ~ random + intensity, data=dat, method="FE") expected <- structure(list(estimate = c(0.3223, -0.3298, -0.0041), se = c(0.2998, 0.1304, 0.0493), zval = c(1.0752, -2.5286, -0.0829), pval = c(0.2823, 0.0115, 0.9339), ci.lb = c(-0.2652, -0.5854, -0.1007), ci.ub = c(0.9099, -0.0742, 0.0925)), .Names = c("estimate", "se", "zval", "pval", "ci.lb", "ci.ub"), row.names = c("intrcpt", "random", "intensity"), class = "data.frame") ### compare with results on page 141 (Exhibit 7.6) expect_equivalent(coef(summary(res)), expected, tolerance=.tol[["misc"]]) expect_equivalent(res$QM, 7.0807, tolerance=.tol[["test"]]) expect_equivalent(res$QMp, 0.0290, tolerance=.tol[["pval"]]) expect_equivalent(res$QE, 7.6832, tolerance=.tol[["test"]]) ### 7.6833 in chapter expect_equivalent(res$QEp, 0.3614, tolerance=.tol[["pval"]]) ### 0.3613 in chapter }) test_that("results are correct for the meta-regression analysis (mixed-effects model).", { res <- rma(yi, vi, mods = ~ random + intensity, data=dat, method="DL") expected <- structure(list(estimate = c(0.3311, -0.3269, -0.0068), se = c(0.3198, 0.1439, 0.0528), zval = c(1.0351, -2.2712, -0.1292), pval = c(0.3006, 0.0231, 0.8972), ci.lb = c(-0.2958, -0.609, -0.1103), ci.ub = c(0.9579, -0.0448, 0.0967)), .Names = c("estimate", "se", "zval", "pval", "ci.lb", "ci.ub"), row.names = c("intrcpt", "random", "intensity"), class = "data.frame") ### compare with results on page 141 (Exhibit 7.7) expect_equivalent(coef(summary(res)), expected, tolerance=.tol[["misc"]]) expect_equivalent(res$QM, 5.5711, tolerance=.tol[["test"]]) ### 5.5709 in chapter expect_equivalent(res$QMp, 0.0617, tolerance=.tol[["pval"]]) expect_equivalent(res$tau2, 0.00488, tolerance=.tol[["var"]]) }) test_that("results are correct for the comutation of R^2 via the anova() function.", { res.ME <- rma(yi, vi, mods = ~ random + intensity, data=dat, method="DL") res.RE <- rma(yi, vi, data=dat, method="DL") tmp <- anova(res.RE, res.ME) expect_equivalent(tmp$R2, 81.2023, tolerance=.tol[["r2"]]) }) metafor/tests/testthat/test_plots_radial_plot.r0000644000176200001440000000120314052500500021631 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:radial_plot context("Checking plots example: radial (Galbraith) plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### adjust margins so the space is better used par(mar=c(5,4,0,2)) ### load ETS data data(dat.hackshaw1998, package="metafor") ### fit fixed-effects model res <- rma(yi, vi, data=dat.hackshaw1998, method="FE") ### draw radial plot radial(res) par(opar) }) metafor/tests/testthat/test_plots_funnel_plot_with_trim_and_fill.r0000644000176200001440000000146214052500456025623 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:funnel_plot_with_trim_and_fill context("Checking plots example: funnel plot with trim and fill") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(5,4,1,2)) ### load ETS data data(dat.hackshaw1998, package="metafor") ### fit random-effects model res <- rma(yi, vi, data=dat.hackshaw1998, measure="OR") ### carry out trim-and-fill analysis taf <- trimfill(res) ### draw funnel plot with missing studies filled in funnel(taf, legend=TRUE) par(opar) out <- capture.output(print(taf)) }) metafor/tests/testthat/test_misc_formula.r0000644000176200001440000000145613675407162020634 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: formula() function") source("tolerances.r") # read in tolerances data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) test_that("formula() works correctly for 'rma.uni' objects.", { res <- rma(yi, vi, data=dat, method="DL") expect_null(formula(res, type="mods")) expect_null(formula(res, type="yi")) res <- rma(yi, vi, mods = ~ ablat, data=dat, method="DL") expect_equal(~ablat, formula(res, type="mods")) expect_null(formula(res, type="yi")) res <- rma(yi ~ ablat, vi, data=dat, method="DL") expect_null(formula(res, type="mods")) expect_equal(yi~ablat, formula(res, type="yi")) expect_error(formula(res, type="scale")) }) metafor/tests/testthat/test_misc_weights.r0000644000176200001440000001271413465307140020630 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: weights() function") source("tolerances.r") # read in tolerances test_that("weights are correct for rma() with method='FE'.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### weighted analysis res <- rma(yi, vi, data=dat, method="FE") ### weights should be the same as 1/vi (scaled to percentages) expect_equivalent(weights(res), (1/dat$vi)/sum(1/dat$vi) * 100) ### weights should be the same as 1/vi expect_equivalent(diag(weights(res, type="matrix")), 1/dat$vi) ### weighted analysis with user defined weights res <- rma(yi, vi, data=dat, method="FE", weights=1:13) ### weights should match (scaled to percentages) expect_equivalent(weights(res), (1:13)/sum(1:13) * 100) ### unweighted analysis res <- rma(yi, vi, data=dat, method="FE", weighted=FALSE) ### weights should be the same as 1/k (scaled to percentages) expect_equivalent(weights(res), rep(1/res$k, res$k) * 100) ### unweighted analysis (but user has specified weights nevertheless) res <- rma(yi, vi, data=dat, method="FE", weighted=FALSE, weights=1:13) ### weights should be the same as 1/k (scaled to percentages) expect_equivalent(weights(res), rep(1/res$k, res$k) * 100) }) test_that("weights are correct for rma() with method='DL'.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### weighted analysis res <- rma(yi, vi, data=dat, method="DL") ### weights should be the same as 1/(vi+tau2) (scaled to percentages) expect_equivalent(weights(res), (1/(dat$vi+res$tau2)/sum(1/(dat$vi+res$tau2)) * 100)) ### weights should be the same as 1/(vi+tau2) expect_equivalent(diag(weights(res, type="matrix")), 1/(dat$vi+res$tau2)) ### weighted analysis with user defined weights res <- rma(yi, vi, data=dat, method="DL", weights=1:13) ### weights should match (scaled to percentages) expect_equivalent(weights(res), (1:13)/sum(1:13) * 100) ### unweighted analysis res <- rma(yi, vi, data=dat, method="DL", weighted=FALSE) ### weights should be the same as 1/k (scaled to percentages) expect_equivalent(weights(res), rep(1/res$k, res$k) * 100) ### unweighted analysis (but user has specified weights nevertheless) res <- rma(yi, vi, data=dat, method="FE", weighted=FALSE, weights=1:13) ### weights should be the same as 1/k (scaled to percentages) expect_equivalent(weights(res), rep(1/res$k, res$k) * 100) }) test_that("weights are correct for rma.mv() with method='REML'.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### weighted analysis res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat) ### weights should be the same as 1/(vi+sigma2) (scaled to percentages) expect_equivalent(weights(res), (1/(dat$vi+res$sigma2)/sum(1/(dat$vi+res$sigma2)) * 100)) ### weights should be the same as 1/(vi+sigma2) expect_equivalent(diag(weights(res, type="matrix")), 1/(dat$vi+res$sigma2)) ### weighted analysis with user defined weights res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, W=1:13) ### weights should match (scaled to percentages) expect_equivalent(weights(res), (1:13)/sum(1:13) * 100) ### unweighted analysis res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, W=1) ### weights should be the same as 1/k (scaled to percentages) expect_equivalent(weights(res), rep(1/res$k, res$k) * 100) }) test_that("weights are correct for rma.mh() with measure='RD/RR/OR'.", { dat <- dat.bcg res <- rma.mh(measure="RD", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) sav <- weights(res) expect_equivalent(coef(res), sum(res$yi * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) sav <- weights(res) expect_equivalent(exp(coef(res)), sum(exp(res$yi) * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) res <- rma.mh(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) sav <- weights(res) expect_equivalent(exp(coef(res)), sum(exp(res$yi) * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) }) test_that("weights are correct for rma.mh() with measure='IRD/IRR'.", { dat <- dat.nielweise2008 res <- rma.mh(measure="IRD", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat) sav <- weights(res) expect_equivalent(coef(res), sum(res$yi * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) res <- rma.mh(measure="IRR", x1i=x1i, t1i=t1i, x2i=x2i, t2i=t2i, data=dat) sav <- weights(res) expect_equivalent(exp(coef(res)), sum(exp(res$yi) * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) }) test_that("weights are correct for rma.peto().", { dat <- dat.bcg res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat) sav <- weights(res) expect_equivalent(coef(res), sum(res$yi * sav/100), tolerance=.tol[["coef"]]) tmp <- diag(weights(res, type="matrix")) expect_equivalent(sav, tmp/sum(tmp)*100) }) metafor/tests/testthat/test_plots_labbe_plot.r0000644000176200001440000000121214052500462021451 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:labbe_plot context("Checking plots example: L'Abbe plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the full space is used par(mar=c(5,4,1,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### fit random-effects model res <- rma(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, measure="RR") ### draw L'Abbé plot labbe(res) par(opar) }) metafor/tests/testthat/test_misc_metan_vs_rma.mh_with_dat.bcg.r0000644000176200001440000000553113465300116024644 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.mh() against metan with 'dat.bcg'") source("tolerances.r") # read in tolerances test_that("results match (FE model, measure='RR').", { data(dat.bcg, package="metafor") ### compare results with: metan tpos tneg cpos cneg, fixed nograph rr log res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) expect_equivalent(res$beta, -0.4537, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.5308, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3766, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -11.5338, tolerance=.tol[["test"]]) ### 11.53 in Stata expect_equivalent(res$QE, 152.5676, tolerance=.tol[["test"]]) ### compare results with: metan tpos tneg cpos cneg, fixed nograph rr sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.6353, tolerance=.tol[["est"]]) expect_equivalent(sav$ci.lb, 0.5881, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.6862, tolerance=.tol[["ci"]]) }) test_that("results match (FE model, measure='OR').", { data(dat.bcg, package="metafor") ### compare results with: metan tpos tneg cpos cneg, fixed nograph or log res <- rma.mh(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) expect_equivalent(res$beta, -0.4734, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.5538, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3930, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -11.5444, tolerance=.tol[["test"]]) ### 11.54 in Stata expect_equivalent(res$QE, 163.9426, tolerance=.tol[["test"]]) ### compare results with: metan tpos tneg cpos cneg, fixed nograph or sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.6229, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.5748, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.6750, tolerance=.tol[["ci"]]) }) test_that("results match (FE model, measure='RD').", { data(dat.bcg, package="metafor") ### compare results with: metan tpos tneg cpos cneg, fixed nograph rd res <- rma.mh(measure="RD", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) expect_equivalent(res$beta, -0.0033, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.0039, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.0027, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -11.4708, tolerance=.tol[["test"]]) ### 11.56 in Stata expect_equivalent(res$QE, 386.7759, tolerance=.tol[["test"]]) # zval is slightly different, as metan apparently computes the SE as # described in Greenland & Robins (1985) while metafor uses the equation # given in Sato, Greenland, & Robins (1989) (only the latter is # asymptotically correct in both the sparse-data and large-strata case) }) metafor/tests/testthat/test_misc_confint.r0000644000176200001440000000256313675405421020623 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: confint() function") source("tolerances.r") # read in tolerances test_that("confint() works correctly for 'rma.uni' objects.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat, method="DL") sav <- confint(res, fixed=TRUE, transf=exp) expect_equivalent(sav$fixed, c(0.4896, 0.3449, 0.6950), tolerance=.tol[["ci"]]) expect_equivalent(sav$random[1,], c(0.3088, 0.1197, 1.1115), tolerance=.tol[["var"]]) expect_equivalent(sav$random[3,], c(92.1173, 81.9177, 97.6781), tolerance=.tol[["het"]]) expect_equivalent(sav$random[4,], c(12.6861, 5.5303, 43.0680), tolerance=.tol[["het"]]) }) test_that("confint() works correctly for 'rma.mh' objects.", { data(dat.bcg, package="metafor") res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) sav <- confint(res, transf=exp) expect_equivalent(sav$fixed, c(0.6353, 0.5881, 0.6862), tolerance=.tol[["ci"]]) }) test_that("confint() works correctly for 'rma.peto' objects.", { data(dat.bcg, package="metafor") res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) sav <- confint(res, transf=exp) expect_equivalent(sav$fixed, c(0.6222, 0.5746, 0.6738), tolerance=.tol[["ci"]]) }) metafor/tests/testthat/test_misc_rma_glmm.r0000644000176200001440000000567113741345762020766 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.glmm() function") source("tolerances.r") # read in tolerances dat <- dat.nielweise2007 test_that("rma.glmm() works correctly for 'UM.RS' model.", { expect_warning(res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.RS", method="FE")) out <- capture.output(print(res)) expect_equivalent(coef(res), -1.2207, tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 0, tolerance=.tol[["var"]]) expect_equivalent(res$sigma2, 0.6155, tolerance=.tol[["var"]]) expect_warning(res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.RS", test="t")) out <- capture.output(print(res)) expect_equivalent(coef(res), -1.2812, tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 0.7258, tolerance=.tol[["var"]]) expect_equivalent(res$sigma2, 0.5212, tolerance=.tol[["var"]]) ### check some (current) stop()'s expect_error(confint(res)) expect_error(plot(res)) expect_error(qqnorm(res)) expect_error(weights(res)) }) test_that("rma.glmm() works correctly when using 'clogit' or 'clogistic'.", { skip_on_cran() expect_warning(res1 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="FE")) expect_warning(res2 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="FE", control=list(optimizer="clogit"))) expect_warning(res3 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="FE", control=list(optimizer="clogistic"))) expect_equivalent(coef(res1), -1.2286, tolerance=.tol[["coef"]]) expect_equivalent(coef(res2), -1.2286, tolerance=.tol[["coef"]]) expect_equivalent(coef(res3), -1.2286, tolerance=.tol[["coef"]]) expect_equivalent(c(vcov(res1)), 0.0504, tolerance=.tol[["var"]]) expect_equivalent(c(vcov(res2)), 0.0504, tolerance=.tol[["var"]]) expect_equivalent(c(vcov(res3)), 0.0504, tolerance=.tol[["var"]]) }) test_that("rma.glmm() works correctly when using 'nlminb' or 'minqa'.", { skip_on_cran() expect_warning(res1 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="ML")) expect_warning(res2 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="ML", control=list(optimizer="nlminb"))) expect_warning(res3 <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="UM.FS", method="ML", control=list(optimizer="bobyqa"))) expect_equivalent(coef(res1), -1.2369, tolerance=.tol[["coef"]]) expect_equivalent(coef(res2), -1.2369, tolerance=.tol[["coef"]]) expect_equivalent(coef(res3), -1.2369, tolerance=.tol[["coef"]]) expect_equivalent(c(vcov(res1)), 0.0786, tolerance=.tol[["var"]]) expect_equivalent(c(vcov(res2)), 0.0786, tolerance=.tol[["var"]]) expect_equivalent(c(vcov(res3)), 0.0786, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_viechtbauer2005.r0000644000176200001440000000647714052500424024373 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:viechtbauer2005 context("Checking analysis example: viechtbauer2005") source("tolerances.r") # read in tolerances ### create dataset for example 1 dat <- data.frame( id=1:10, yi = c(-0.581, 0.530, 0.771, 1.031, 0.553, 0.295, 0.078, 0.573, -0.176, -0.232), vi = c(0.023, 0.052, 0.060, 0.115, 0.095, 0.203, 0.200, 0.211, 0.051, 0.040)) test_that("results are correct for example 1.", { res.HS <- rma(yi, vi, data=dat, method="HS") res.HE <- rma(yi, vi, data=dat, method="HE") res.DL <- rma(yi, vi, data=dat, method="DL") res.ML <- rma(yi, vi, data=dat, method="ML") res.REML <- rma(yi, vi, data=dat, method="REML") res.EB <- rma(yi, vi, data=dat, method="EB") res.SJ <- rma(yi, vi, data=dat, method="SJ") res <- list(res.HS, res.HE, res.DL, res.ML, res.REML, res.EB, res.SJ) res <- data.frame(method=sapply(res, function(x) x$method), tau2=sapply(res, function(x) x$tau2), I2=sapply(res, function(x) x$I2), H2=sapply(res, function(x) x$H2), se.tau2=sapply(res, function(x) x$se.tau2)) ### compare with results on page 271 expect_equivalent(res$tau2, c(0.2282, 0.1484, 0.2768, 0.1967, 0.2232, 0.192, 0.1992), tolerance=.tol[["var"]]) expect_equivalent(res$I2, c(77.2284, 68.7988, 80.4447, 74.5098, 76.8399, 74.0511, 74.7545), tolerance=.tol[["het"]]) expect_equivalent(res$H2, c(4.3914, 3.205, 5.1137, 3.9231, 4.3178, 3.8537, 3.9611), tolerance=.tol[["het"]]) expect_equivalent(res$se.tau2, c(0.1328, 0.1234, 0.1841, 0.1255, 0.1464, 0.133, 0.0979), tolerance=.tol[["sevar"]]) }) ### create dataset for example 2 dat <- data.frame( id=1:18, yi = c(0.100, -0.162, -0.090, -0.049, -0.046, -0.010, -0.431, -0.261, 0.134, 0.019, 0.175, 0.056, 0.045, 0.103, 0.121, -0.482, 0.290, 0.342), vi = c(0.016, 0.015, 0.050, 0.050, 0.032, 0.052, 0.036, 0.024, 0.034, 0.033, 0.031, 0.034, 0.039, 0.167, 0.134, 0.096, 0.016, 0.035)) test_that("results are correct for example 2.", { res.HS <- rma(yi, vi, data=dat, method="HS") res.HE <- rma(yi, vi, data=dat, method="HE") res.DL <- rma(yi, vi, data=dat, method="DL") res.ML <- rma(yi, vi, data=dat, method="ML") res.REML <- rma(yi, vi, data=dat, method="REML") res.EB <- rma(yi, vi, data=dat, method="EB") res.SJ <- rma(yi, vi, data=dat, method="SJ") res <- list(res.HS, res.HE, res.DL, res.ML, res.REML, res.EB, res.SJ) res <- data.frame(method=sapply(res, function(x) x$method), tau2=sapply(res, function(x) x$tau2), I2=sapply(res, function(x) x$I2), H2=sapply(res, function(x) x$H2), se.tau2=sapply(res, function(x) x$se.tau2)) ### compare with results on page 272 expect_equivalent(res$tau2, c(0.0099, 0, 0.0126, 0.0132, 0.0157, 0.0104, 0.0248), tolerance=.tol[["var"]]) expect_equivalent(res$I2, c(22.9266, 0, 27.5275, 28.4505, 32.0203, 23.7198, 42.6734), tolerance=.tol[["het"]]) expect_equivalent(res$H2, c(1.2975, 1, 1.3798, 1.3976, 1.471, 1.311, 1.7444), tolerance=.tol[["het"]]) expect_equivalent(res$se.tau2, c(0.0138, 0.0217, 0.0159, 0.0151, 0.0167, 0.0156, 0.0118), tolerance=.tol[["sevar"]]) }) metafor/tests/testthat/test_misc_update.r0000644000176200001440000000377413631753325020453 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: update() function") source("tolerances.r") # read in tolerances test_that("update() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma(yi, vi, data=dat, method="FE") res2 <- update(res1, method="DL") res3 <- rma(yi, vi, data=dat, method="DL") res4 <- update(res3, ~ ablat) res5 <- rma(yi, vi, mods = ~ ablat, data=dat, method="DL") res2$time <- NULL res3$time <- NULL res4$time <- NULL res5$time <- NULL expect_equivalent(res2, res3) expect_equivalent(res4, res5) }) test_that("update() works for rma.mv().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma.mv(yi, vi, data=dat, method="FE") res2 <- update(res1, random = ~ 1 | trial, method="REML") res3 <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat, method="REML") res4 <- update(res3, ~ ablat) res5 <- rma.mv(yi, vi, random = ~ 1 | trial, mods = ~ ablat, data=dat, method="REML") res2$time <- NULL res3$time <- NULL res4$time <- NULL res5$time <- NULL expect_equivalent(res2, res3) expect_equivalent(res4, res5) }) test_that("update() works for rma.glmm().", { skip_on_cran() data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, method="FE") res2 <- update(res1, method="ML") res3 <- rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, method="ML") res4 <- update(res3, mods = ~ ablat) res5 <- rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, mods = ~ ablat, data=dat.bcg, method="ML") res2$time <- NULL res3$time <- NULL res4$time <- NULL res5$time <- NULL expect_equivalent(res2, res3) expect_equivalent(res4, res5) }) metafor/tests/testthat/test_tips_regression_with_rma.r0000644000176200001440000000413614052500505023244 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking tip: rma() results match up with those from lm()") source("tolerances.r") # read in tolerances ### this is essentially checking the equivalence of the results as explained here: ### https://www.metafor-project.org/doku.php/tips:regression_with_rma test_that("results for rma() and lm() match for method='FE'.", { stackloss$vi <- 0 res.lm <- lm(stack.loss ~ Air.Flow + Water.Temp + Acid.Conc., data=stackloss) res.rma <- rma(stack.loss, vi, mods = ~ Air.Flow + Water.Temp + Acid.Conc., data=stackloss, test="knha", control=list(REMLf=FALSE)) ### log likelihood (REML) should be the same expect_equivalent(logLik(res.lm, REML=TRUE), logLik(res.rma), tolerance=.tol[["fit"]]) ### coefficients should be the same expect_equivalent(coef(res.lm), coef(res.rma), tolerance=.tol[["coef"]]) ### var-cov matrix should be the same expect_equivalent(matrix(vcov(res.lm), nrow=4, ncol=4), matrix(vcov(res.rma), nrow=4, ncol=4), tolerance=.tol[["var"]]) ### fitted values should be the same expect_equivalent(fitted(res.lm), fitted(res.rma), tolerance=.tol[["pred"]]) ### standardized residuals should be the same expect_equivalent(rstandard(res.lm), rstandard(res.rma)$z, tolerance=.tol[["test"]]) ### studentized residuals should be the same expect_equivalent(rstudent(res.lm), rstudent(res.rma)$z, tolerance=.tol[["test"]]) ### hat values should be the same expect_equivalent(hatvalues(res.lm), hatvalues(res.rma), tolerance=.tol[["inf"]]) ### dffits should be the same expect_equivalent(dffits(res.lm), influence(res.rma)$inf$dffits, tolerance=.tol[["inf"]]) ### covratios should be the same expect_equivalent(covratio(res.lm), influence(res.rma)$inf$cov.r, tolerance=.tol[["inf"]]) ### dfbetas should be the same expect_equivalent(as.matrix(dfbetas(res.lm)), as.matrix(dfbetas(res.rma)), tolerance=.tol[["inf"]]) ### Cook's distancs should differ by a factor of p expect_equivalent(cooks.distance(res.lm), cooks.distance(res.rma)/res.rma$p, tolerance=.tol[["inf"]]) }) metafor/tests/testthat/test_plots_plot_of_cumulative_results.r0000644000176200001440000000166114052500473025041 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:plot_of_cumulative_results context("Checking plots example: plot of cumulative results") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### decrease margins so the more space is used par(mar=c(5,5,2,2)) ### load BCG vaccine data data(dat.bcg, package="metafor") ### calculate log risk ratios and corresponding sampling variances dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### fit random-effects models res <- rma(yi, vi, data=dat) ### cumulative meta-analysis (in the order of publication year) tmp <- cumul(res, order=dat$year) ### plot of cumulative results plot(tmp, transf=exp, xlim=c(.25,.5), lwd=3, cex=1.3) par(opar) }) metafor/tests/testthat/test_misc_selmodel.r0000644000176200001440000001715513777132316020775 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: selmodel() function") source("tolerances.r") # read in tolerances test_that("results are correct for a step function model.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() dat <- dat.hackshaw1998 res <- rma(yi, vi, data=dat) sav <- selmodel(res, type="stepfun", steps=c(0.05, 0.10, 0.50, 1.00)) out <- capture.output(print(sav)) expect_equivalent(coef(sav)$delta, c(1, 2.422079, 0.977543, 0.396713), tolerance=.tol[["coef"]]) expect_equivalent(sav$se.delta, c(NA, 1.66085, 0.820387, 0.469235), tolerance=.tol[["se"]]) expect_equivalent(sav$LRT, 7.066137, tolerance=.tol[["test"]]) expect_identical(sav$LRTdf, 3L) expect_equivalent(sav$tau2, 0.03071325, tolerance=.tol[["var"]]) opar <- par(no.readonly=TRUE) plot(sav) par(opar) tmp <- confint(sav) expect_equivalent(tmp[[1]]$random[1,], c(0.030713, 0.000224, 0.135284), tolerance=.tol[["var"]]) expect_equivalent(tmp[[2]]$random[1,], c(2.422079, 0.665133, 9.915798), tolerance=.tol[["coef"]]) expect_equivalent(tmp[[3]]$random[1,], c(0.977543, 0.209558, 5.386044), tolerance=.tol[["coef"]]) expect_equivalent(tmp[[4]]$random[1,], c(0.396713, 0.040198, 4.119681), tolerance=.tol[["coef"]]) }) test_that("results are correct for the beta function model.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() # data from Baskerville, N. B., Liddy, C., & Hogg, W. (2012). Systematic review and meta-analysis of practice facilitation within primary care settings. Annals of Family Medicine, 10(1), 63-74. yi <- c(1.01, 0.82, 0.59, 0.44, 0.84, 0.73, 1.12, 0.04, 0.24, 0.32, 1.04, 1.31, 0.59, 0.66, 0.62, 0.47, 1.08, 0.98, 0.26, 0.39, 0.60, 0.94, 0.11) sei <- c(0.52, 0.46, 0.23, 0.18, 0.29, 0.29, 0.36, 0.37, 0.15, 0.40, 0.32, 0.57, 0.29, 0.19, 0.31, 0.27, 0.32, 0.32, 0.18, 0.18, 0.31, 0.53, 0.27) xi <- c(1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1) res <- rma(yi, sei^2, method="ML") sav <- selmodel(res, type="beta", delta=c(1,1)) expect_equivalent(logLik(res), logLik(sav), tolerance=.tol[["fit"]]) sav <- selmodel(res, type="beta") out <- capture.output(print(sav)) expect_equivalent(coef(sav)$delta, c(0.4731131, 4.4613093), tolerance=.tol[["coef"]]) expect_equivalent(sav$se.delta, c(0.2352523, 2.1845971), tolerance=.tol[["se"]]) expect_equivalent(sav$LRT, 7.846906, tolerance=.tol[["test"]]) expect_identical(sav$LRTdf, 2L) expect_equivalent(sav$tau2, 0.00000243, tolerance=.tol[["var"]]) opar <- par(no.readonly=TRUE) plot(sav) par(opar) res <- rma(yi, sei^2, mods = ~ xi, method="ML") sav <- selmodel(res, type="beta") out <- capture.output(print(sav)) expect_equivalent(coef(sav)$delta, c(0.4200973, 5.0959707), tolerance=.tol[["coef"]]) expect_equivalent(sav$se.delta, c(0.239128, 2.410997), tolerance=.tol[["se"]]) expect_equivalent(sav$LRT, 9.044252, tolerance=.tol[["test"]]) expect_identical(sav$LRTdf, 2L) expect_equivalent(sav$tau2, 0.00000193, tolerance=.tol[["var"]]) expect_equivalent(coef(sav)$beta, c(0.1343001, -0.1363559), tolerance=.tol[["coef"]]) expect_equivalent(sav$se, c(0.1707418, 0.1244394), tolerance=.tol[["se"]]) }) test_that("results are correct for the various exponential function models.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() # data from Preston, C., Ashby, D., & Smyth, R. (2004). Adjusting for publication bias: Modelling the selection process. Journal of Evaluation in Clinical Practice, 10(2), 313-322. ai <- c(4,0,34,7,6,1,0,11,2,0,0,33) ai <- c(4,0,34,7,6,1,0,11,2,0,0,33) n1i <- c(19,18,341,71,45,94,22,88,82,33,15,221) ci <- c(5,0,50,16,5,8,0,12,7,0,1,43) n2i <- c(19,18,334,69,44,96,22,82,84,30,20,218) dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, drop00=TRUE) expect_warning(res <- rma(yi, vi, data=dat, method="FE")) alternative <- "less" sav1 <- selmodel(res, type="halfnorm", alternative=alternative) sav2 <- selmodel(res, type="negexp", alternative=alternative) sav3 <- selmodel(res, type="logistic", alternative=alternative) sav4 <- selmodel(res, type="power", alternative=alternative) expect_equivalent(c(sav1$delta, sav2$delta, sav3$delta, sav4$delta), c(3.162948, 2.656714, 3.339338, 1.458923), tolerance=.tol[["coef"]]) expect_equivalent(c(sav1$se.delta, sav2$se.delta, sav3$se.delta, sav4$se.delta), c(2.988922, 2.347468, 2.388776, 1.393725), tolerance=.tol[["se"]]) opar <- par(no.readonly=TRUE) tmp <- profile(sav1, progbar=FALSE) par(opar) expect_equivalent(tmp$ll, c(NA, -6.569986, -6.35659, -6.210436, -6.121035, -6.07939, -6.077928, -6.110356, -6.171488, -6.257068, -6.363607, -6.488238, -6.628599, -6.782733, -6.949015, -7.126075, -7.312763, -7.508097, -7.711241, -7.921472), tolerance=.tol[["fit"]]) sav1 <- selmodel(res, type="halfnorm", prec="sei", alternative=alternative, scaleprec=FALSE) sav2 <- selmodel(res, type="negexp", prec="sei", alternative=alternative, scaleprec=FALSE) sav3 <- selmodel(res, type="logistic", prec="sei", alternative=alternative, scaleprec=FALSE) sav4 <- selmodel(res, type="power", prec="sei", alternative=alternative, scaleprec=FALSE) expect_equivalent(c(sav1$delta, sav2$delta, sav3$delta, sav4$delta), c(3.506329, 2.279336, 3.017851, 1.444174), tolerance=.tol[["coef"]]) expect_equivalent(c(sav1$se.delta, sav2$se.delta, sav3$se.delta, sav4$se.delta), c(3.387300, 2.133013, 2.315789, 1.381633), tolerance=.tol[["se"]]) sav1 <- selmodel(res, type="halfnorm", prec="sei", alternative=alternative, steps=.05) sav2 <- selmodel(res, type="negexp", prec="sei", alternative=alternative, steps=.05) sav3 <- selmodel(res, type="logistic", prec="sei", alternative=alternative, steps=.05) sav4 <- selmodel(res, type="power", prec="sei", alternative=alternative, steps=.05, control=list(hessianCtrl=list(r=8))) expect_equivalent(c(sav1$delta, sav2$delta, sav3$delta, sav4$delta), c(5.832106, 3.819847, 5.041039, 2.399645), tolerance=.tol[["coef"]]) expect_equivalent(c(sav1$se.delta, sav2$se.delta, sav3$se.delta, sav4$se.delta), c(5.644466, 3.627467, 2.306998, 2.134629), tolerance=.tol[["se"]]) sav <- selmodel(res, type="negexppow", alternative=alternative) expect_equivalent(sav$delta, c(2.673818, 1.153199), tolerance=.tol[["coef"]]) expect_equivalent(sav$se.delta, c(2.363403, 2.143849), tolerance=.tol[["se"]]) }) test_that("results are correct for a pirori chosen step function models.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() tab <- data.frame( steps = c(0.005, 0.01, 0.05, 0.10, 0.25, 0.35, 0.50, 0.65, 0.75, 0.90, 0.95, 0.99, 0.995, 1), delta.mod.1 = c(1, 0.99, 0.95, 0.80, 0.75, 0.65, 0.60, 0.55, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50), delta.sev.1 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.10, 0.10, 0.10, 0.10), delta.mod.2 = c(1, 0.99, 0.95, 0.90, 0.80, 0.75, 0.60, 0.60, 0.75, 0.80, 0.90, 0.95, 0.99, 1.00), delta.sev.2 = c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.25, 0.25, 0.50, 0.60, 0.75, 0.90, 0.99, 1.00)) dat <- dat.cohen1981 dat <- escalc(measure="ZCOR", ri=ri, ni=ni, data=dat[c(1,4,5)]) res <- rma(yi, vi, data=dat, method="ML") sav <- lapply(tab[-1], function(x) selmodel(res, type="stepfun", steps=tab$steps, delta=x, defmap=TRUE)) expect_equivalent(sapply(sav, function(x) x$beta), c(0.351894, 0.321518, 0.362019, 0.33218), tolerance=.tol[["coef"]]) expect_equivalent(sapply(sav, function(x) x$tau2), c(0.0045, 0.009544, 0.002774, 0.005652), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_misc_vec2mat.r0000644000176200001440000000170413777151637020532 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: vec2mat() function") source("tolerances.r") # read in tolerances test_that("vec2mat() works correctly.", { sav <- vec2mat(1:6, corr=FALSE) expect_identical(sav, structure(c(NA, 1L, 2L, 3L, 1L, NA, 4L, 5L, 2L, 4L, NA, 6L, 3L, 5L, 6L, NA), .Dim = c(4L, 4L))) sav <- vec2mat(round(seq(0.2, 0.7, by=0.1), 1), corr=TRUE) expect_identical(sav, structure(c(1, 0.2, 0.3, 0.4, 0.2, 1, 0.5, 0.6, 0.3, 0.5, 1, 0.7, 0.4, 0.6, 0.7, 1), .Dim = c(4L, 4L))) sav <- vec2mat(1:10, diag=TRUE) expect_identical(sav, structure(c(1L, 2L, 3L, 4L, 2L, 5L, 6L, 7L, 3L, 6L, 8L, 9L, 4L, 7L, 9L, 10L), .Dim = c(4L, 4L))) sav <- vec2mat(1:6, corr=FALSE, dimnames=c("A","B","C","D")) expect_identical(sav, structure(c(NA, 1L, 2L, 3L, 1L, NA, 4L, 5L, 2L, 4L, NA, 6L, 3L, 5L, 6L, NA), .Dim = c(4L, 4L), .Dimnames = list(c("A", "B", "C", "D"), c("A", "B", "C", "D")))) }) metafor/tests/testthat/test_misc_anova.r0000644000176200001440000000476514005532617020271 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: anova() function") source("tolerances.r") # read in tolerances test_that("anova() works correctly for comparing nested models.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res1 <- rma(yi, vi, data=dat, method="ML") res2 <- rma(yi ~ ablat, vi, data=dat, method="ML") sav <- anova(res1, res2) out <- capture.output(print(sav)) expect_equivalent(sav$LRT, 9.9588, tolerance=.tol[["test"]]) res1 <- rma(yi, vi, data=dat, method="REML") res2 <- rma(yi ~ ablat, vi, data=dat, method="REML") expect_warning(sav <- anova(res1, res2)) expect_equivalent(sav$LRT, 8.2301, tolerance=.tol[["test"]]) }) test_that("anova() works correctly when using the 'btt' argument.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat) sav <- anova(res, btt=3:4) out <- capture.output(print(sav)) expect_equivalent(sav$QM, 1.2850, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.5260, tolerance=.tol[["pval"]]) sav <- anova(res, btt="alloc") out <- capture.output(print(sav)) expect_equivalent(sav$QM, 1.2850, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.5260, tolerance=.tol[["pval"]]) res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat, test="knha") sav <- anova(res, btt=3:4) out <- capture.output(print(sav)) expect_equivalent(sav$QM, 0.6007, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.5690, tolerance=.tol[["pval"]]) }) test_that("anova() works correctly when using the 'X' argument.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat) sav <- anova(res, X=rbind(c(1, 10, 0, 0), c(1, 30, 0, 0), c(1, 50, 0, 0))) out <- capture.output(print(sav)) expect_equivalent(sav$zval, c(0.0588, -1.7964, -3.1210), tolerance=.tol[["test"]]) res <- rma(yi, vi, mods = ~ ablat + alloc, data=dat, test="knha") sav <- anova(res, X=rbind(c(1, 10, 0, 0), c(1, 10, 1, 0), c(1, 10, 0, 1))) out <- capture.output(print(sav)) expect_equivalent(sav$zval, c(0.0568, -0.8252, 0.2517), tolerance=.tol[["test"]]) expect_equivalent(sav$QM, 0.4230, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.7412, tolerance=.tol[["pval"]]) }) metafor/tests/testthat/test_analysis_example_jackson2014.r0000644000176200001440000000736613464054050023525 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking analysis example: jackson2014") source("tolerances.r") # read in tolerances test_that("confint() gives correct results for example 1 in Jackson et al. (2014).", { skip_on_cran() ### example 1 ### yi <- c(0.0267, 0.8242, 0.3930, 2.4405, 2.1401, 1.2528, 2.4849, 0.3087, 1.4246, 0.1823, 1.1378, 1.2321, 2.0695, 4.0237, 1.4383, 1.6021) vi <- c(0.1285, 0.0315, 0.0931, 2.0967, 1.0539, 0.1602, 1.0235, 0.0218, 0.5277, 0.0556, 0.3304, 0.1721, 0.4901, 2.0200, 0.3399, 0.1830) xi <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1) ### random/mixed-effects meta-regression model (REML estimation by default) res <- rma(yi, vi, mods = ~ xi, digits=3) ### approximate 95% CI for tau^2 based on REML estimate and its SE ci <- exp(log(res$tau2) + c(-1.96,1.96)*(1/res$tau2 * res$se.tau2)) expect_equivalent(ci[1], 0.0110, tolerance=.tol[["var"]]) expect_equivalent(ci[2], 0.6330, tolerance=.tol[["var"]]) ### generalised Cochran heterogeneity estimate and CI (inverse variance weights) res <- rma(yi, vi, mods = ~ xi, method="GENQ", weights=1/vi, digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0029, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 0.6907, tolerance=.tol[["var"]]) ### generalised Cochran heterogeneity estimate and CI (inverse SE weights) res <- rma(yi, vi, mods = ~ xi, method="GENQ", weights=1/sqrt(vi), digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0000, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 1.1245, tolerance=.tol[["var"]]) ### Paule-Mandel estimate and CI res <- rma(yi, vi, mods = ~ xi, method="PM", digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0023, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 1.4871, tolerance=.tol[["var"]]) }) test_that("confint() gives correct results for example 2 in Jackson et al. (2014).", { skip_on_cran() ### example 2 ### yi <- c(0.54, 0.4, 0.64, 0.365, 0.835, 0.02, 0.12, 0.085, 1.18, 0.08, 0.18, 0.325, 0.06, 0.715, 0.065, 0.245, 0.24, 0.06, 0.19) vi <- c(0.0176, 0.019, 0.0906, 0.0861, 0.0063, 0.0126, 0.0126, 0.0041, 0.0759, 0.0126, 0.0104, 0.0242, 0.0026, 0.2629, 0.0169, 0.0156, 0.0481, 0.0084, 0.0044) xi <- c(1986, 1987, 1988, 1988, 1998, 1999, 2000, 2000, 2000, 2001, 2001, 2001, 2002, 2002, 2002, 2002, 2003, 2003, 2003) ### random/mixed-effects meta-regression model (REML estimation by default) res <- rma(yi, vi, mods = ~ xi, digits=3) ### approximate 95% CI for tau^2 based on REML estimate and its SE ci <- exp(log(res$tau2) + c(-1.96,1.96)*(1/res$tau2 * res$se.tau2)) expect_equivalent(ci[1], 0.0163, tolerance=.tol[["var"]]) expect_equivalent(ci[2], 0.1108, tolerance=.tol[["var"]]) ### generalised Cochran heterogeneity estimate and CI (inverse variance weights) res <- rma(yi, vi, mods = ~ xi, method="GENQ", weights=1/vi, digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0170, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 0.1393, tolerance=.tol[["var"]]) ### generalised Cochran heterogeneity estimate and CI (inverse SE weights) res <- rma(yi, vi, mods = ~ xi, method="GENQ", weights=1/sqrt(vi), digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0180, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 0.1375, tolerance=.tol[["var"]]) ### Paule-Mandel estimate and CI res <- rma(yi, vi, mods = ~ xi, method="PM", digits=3) ci <- confint(res) expect_equivalent(ci$random[1,2], 0.0178, tolerance=.tol[["var"]]) expect_equivalent(ci$random[1,3], 0.1564, tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_plots_baujat_plot.r0000644000176200001440000000143614052500442021660 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:baujat_plot context("Checking plots example: Baujat plot") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### adjust margins so the space is better used par(mar=c(5,4,2,2)) ### load data from Pignon et al. (2000) dat <- dat.pignon2000 ### compute estimated log hazard ratios and sampling variances dat$yi <- with(dat, OmE/V) dat$vi <- with(dat, 1/V) ### meta-analysis based on all 65 trials res <- rma(yi, vi, data=dat, method="FE", slab=id) ### create Baujat plot baujat(res, xlim=c(0,20), ylim=c(0,.20)) par(opar) }) metafor/tests/testthat/test_misc_rma_ls.r0000644000176200001440000002402214037044322020422 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: location-scale models") source("tolerances.r") # read in tolerances dat <- dat.bangertdrowns2004 test_that("location-scale model works correctly for an intercept-only model", { res1 <- rma(yi, vi, data=dat) res2 <- rma.mv(yi, vi, random = ~ 1 | id, data=dat) res3 <- rma(yi, vi, data=dat, scale = ~ 1) res4 <- rma(yi, vi, data=dat, scale = res3$Z) expect_equivalent(res1$tau2, res2$sigma2, tolerance=.tol[["var"]]) expect_equivalent(res1$tau2, exp(res3$alpha[1]), tolerance=.tol[["var"]]) expect_equivalent(res1$tau2, exp(res4$alpha[1]), tolerance=.tol[["var"]]) }) test_that("location-scale model works correctly for two subgroups with different tau^2 values", { res1 <- rma.mv(yi, vi, data=dat, random = ~ factor(meta) | id, struct="DIAG", subset=!is.na(meta), control=list(hessian=TRUE, vctransf=TRUE)) expect_warning(res2 <- rma(yi, vi, data=dat, scale = ~ meta)) expect_warning(res3 <- rma(yi, vi, data=dat, scale = res2$Z.f)) expect_equivalent(res1$tau2, c(exp(res2$alpha[1]), exp(res2$alpha[1] + res2$alpha[2])), tolerance=.tol[["var"]]) expect_equivalent(res1$tau2, c(exp(res3$alpha[1]), exp(res3$alpha[1] + res3$alpha[2])), tolerance=.tol[["var"]]) expect_warning(res4 <- rma(yi, vi, data=dat, scale = ~ 0 + factor(meta))) expect_equivalent(unname(sqrt(diag(solve(res1$hessian[1:2, 1:2])))), res4$se.alpha, tolerance=.tol[["se"]]) expect_warning(res5 <- rma(yi, vi, data=dat, scale = ~ 0 + factor(meta), link="identity")) expect_equivalent(res1$tau2, res5$alpha, tolerance=.tol[["var"]]) conf1 <- confint(res1) conf5 <- confint(res5, control=list(vc.min=0, vc.max=.5)) expect_equivalent(conf1[[1]]$random[1,], conf5[[1]]$random, tolerance=.tol[["var"]]) expect_equivalent(conf1[[2]]$random[1,], conf5[[2]]$random, tolerance=.tol[["var"]]) }) test_that("profile() and confint() work correctly for location-scale models", { opar <- par(no.readonly=TRUE) par(mfrow=c(2,2)) res1 <- rma(yi, vi, data=dat) prof1 <- profile(res1, progbar=FALSE, cline=TRUE, xlim=c(.01,.15)) conf1 <- confint(res1, type="PL") abline(v=conf1$random[1,2:3], lty="dotted") res2 <- rma.mv(yi, vi, random = ~ 1 | id, data=dat) prof2 <- profile(res2, progbar=FALSE, cline=TRUE, xlim=c(.01,.15)) conf2 <- confint(res2) abline(v=conf2$random[1,2:3], lty="dotted") expect_equivalent(prof1$ll, prof2$ll, tolerance=.tol[["fit"]]) expect_equivalent(conf1$random[1,], conf2$random[1,], tolerance=.tol[["var"]]) res3 <- rma(yi, vi, data=dat, scale = ~ 1) prof3 <- profile(res3, progbar=FALSE, cline=TRUE, xlim=log(c(.01,.15))) conf3 <- confint(res3) abline(v=conf3$random[1,2:3], lty="dotted") expect_equivalent(prof1$ll[c(1,20)], prof3$ll[c(1,20)], tolerance=.tol[["fit"]]) expect_equivalent(conf1$random[1,], exp(conf3$random), tolerance=.tol[["var"]]) res4 <- rma(yi, vi, data=dat, scale = ~ 1, link="identity") prof4 <- profile(res4, progbar=FALSE, cline=TRUE, xlim=c(.01,.15)) conf4 <- confint(res4, control=list(vc.max=.2)) abline(v=conf4$random[1,2:3], lty="dotted") expect_equivalent(prof1$ll, prof4$ll, tolerance=.tol[["fit"]]) expect_equivalent(conf1$random[1,], conf4$random, tolerance=.tol[["var"]]) par(opar) }) test_that("location-scale model works correctly for a continuous predictor", { opar <- par(no.readonly=TRUE) par(mfrow=c(3,2)) res1 <- rma(yi, vi, data=dat, scale = ~ grade) expect_equivalent(res1$beta, 0.2220791, tolerance=.tol[["coef"]]) expect_equivalent(res1$alpha, c(-3.10513013522415, 0.041361925354706), tolerance=.tol[["coef"]]) res2 <- rma(yi, vi, data=dat, scale = ~ grade, link="identity") expect_equivalent(res1$tau2, res2$tau2, tolerance=.tol[["var"]]) res3 <- rma.mv(yi, vi, data=dat, random = ~ sqrt(grade) | id, rho=0, struct="GEN", control=list(hessian=TRUE, vctransf=FALSE)) expect_equivalent(c(res2$alpha), diag(res3$G), tolerance=.tol[["coef"]]) expect_equivalent(diag(res2$M), diag(res3$M), tolerance=.tol[["var"]]) expect_equivalent(unname(sqrt(diag(solve(res3$hessian[1:2, 1:2])))), res2$se.alpha, tolerance=.tol[["se"]]) conf1 <- confint(res1) expect_equivalent(conf1[[1]]$random, c(-3.10513, -5.25032, -1.21713), tolerance=.tol[["var"]]) expect_equivalent(conf1[[2]]$random, c( 0.04136, -0.65819, 0.69562), tolerance=.tol[["var"]]) profile(res1, alpha=1, progbar=FALSE, cline=TRUE) abline(v=conf1[[1]]$random[2:3], lty="dotted") profile(res1, alpha=2, progbar=FALSE, cline=TRUE) abline(v=conf1[[2]]$random[2:3], lty="dotted") conf21 <- confint(res2, alpha=1, control=list(vc.min=-0.4, vc.max=0.3)) conf22 <- confint(res2, alpha=2, control=list(vc.min=-0.1, vc.max=0.05)) conf2 <- list(conf21, conf22) class(conf2) <- "list.confint.rma" expect_equivalent(conf2[[1]]$random, c(0.04293, -0.00137, 0.23145), tolerance=.tol[["var"]]) expect_equivalent(conf2[[2]]$random, c(0.00273, -0.04972, 0.04411), tolerance=.tol[["var"]]) profile(res2, alpha=1, progbar=FALSE, cline=TRUE, xlim=c(0,0.3)) abline(v=conf2[[1]]$random[2:3], lty="dotted") profile(res2, alpha=2, progbar=FALSE, cline=TRUE, xlim=c(-0.1,0.05)) abline(v=conf2[[2]]$random[2:3], lty="dotted") conf3 <- confint(res3) expect_equivalent(conf3[[1]]$random[1,], c(0.04291, 0.00000, 0.11333), tolerance=.tol[["var"]]) expect_equivalent(conf3[[2]]$random[1,], c(0.00273, 0.00000, 0.04062), tolerance=.tol[["var"]]) profile(res3, tau2=1, progbar=FALSE, cline=TRUE, xlim=c(0,.3)) abline(v=conf3[[1]]$random[1,2:3], lty="dotted") profile(res3, tau2=2, progbar=FALSE, cline=TRUE, xlim=c(0,.05)) abline(v=conf3[[2]]$random[1,2:3], lty="dotted") # conf2 and conf3 are not the same because in res3 the two components must # be >= 0 while this restriction does not apply to res2 (and when profiling # or getting the CIs, fixing a particular component can lead to the other # component becoming negative) par(opar) }) test_that("location-scale model works correctly for multiple predictors", { expect_warning(res1 <- rma(yi, vi, data=dat, scale = ~ grade + meta + sqrt(ni))) expect_equivalent(res1$beta, 0.1110317, tolerance=.tol[["coef"]]) expect_equivalent(res1$alpha, c(-1.08826059, -0.03429344, 2.09197456, -0.28439165), tolerance=.tol[["coef"]]) expect_warning(res2 <- rma(yi, vi, data=dat, scale = ~ grade + meta + sqrt(ni), control=list(scaleZ=FALSE))) expect_equivalent(res2$beta, 0.1110317, tolerance=.tol[["coef"]]) expect_equivalent(res2$alpha, c(-1.08826210, -0.03429332, 2.09197501, -0.28439156), tolerance=.tol[["coef"]]) out <- capture.output(print(res1)) }) test_that("predict() works correctly for location-scale models", { expect_warning(res <- rma(yi, vi, data=dat, mods = ~ meta, scale = ~ meta)) res0 <- rma(yi, vi, data=dat, subset=meta==0) res1 <- rma(yi, vi, data=dat, subset=meta==1) pred <- predict(res, addx=TRUE, addz=TRUE) pred0 <- predict(res0) pred1 <- predict(res1) expect_equivalent(pred$pred[1:2], c(pred1$pred, pred0$pred), tolerance=.tol[["pred"]]) expect_equivalent(pred$se[1:2] , c(pred1$se, pred0$se), tolerance=.tol[["pred"]]) expect_equivalent(pred$ci.lb[1:2], c(pred1$ci.lb, pred0$ci.lb), tolerance=.tol[["pred"]]) expect_equivalent(pred$ci.ub[1:2], c(pred1$ci.ub, pred0$ci.ub), tolerance=.tol[["pred"]]) expect_equivalent(pred$pi.lb[1:2], c(pred1$pi.lb, pred0$pi.lb), tolerance=.tol[["pred"]]) expect_equivalent(pred$pi.ub[1:2], c(pred1$pi.ub, pred0$pi.ub), tolerance=.tol[["pred"]]) pred <- predict(res, newmods=0:1) expect_equivalent(pred$pred, c(pred0$pred, pred1$pred), tolerance=.tol[["pred"]]) pred <- predict(res, newmods=0:1, newscale=0:1) expect_equivalent(pred$pred, c(pred0$pred, pred1$pred), tolerance=.tol[["pred"]]) expect_equivalent(pred$se , c(pred0$se, pred1$se), tolerance=.tol[["pred"]]) expect_equivalent(pred$ci.lb, c(pred0$ci.lb, pred1$ci.lb), tolerance=.tol[["pred"]]) expect_equivalent(pred$ci.ub, c(pred0$ci.ub, pred1$ci.ub), tolerance=.tol[["pred"]]) expect_equivalent(pred$pi.lb, c(pred0$pi.lb, pred1$pi.lb), tolerance=.tol[["pred"]]) expect_equivalent(pred$pi.ub, c(pred0$pi.ub, pred1$pi.ub), tolerance=.tol[["pred"]]) pred <- predict(res, newscale=0:1, transf=exp) expect_equivalent(pred$pred, c(res0$tau2, res1$tau2), tolerance=.tol[["var"]]) expect_warning(res <- rma(yi, vi, data=dat, mods = ~ meta, scale = ~ meta, link="identity")) pred <- predict(res, newscale=0:1) expect_equivalent(pred$pred, c(res0$tau2, res1$tau2), tolerance=.tol[["var"]]) }) test_that("anova() works correctly for location-scale models", { expect_warning(res1 <- rma(yi, vi, data=dat, mods = ~ factor(grade) + meta + sqrt(ni), scale = ~ factor(grade) + meta + sqrt(ni))) expect_warning(res0 <- rma(yi, vi, data=dat, mods = ~ factor(grade) + meta + sqrt(ni), scale = ~ 1)) sav <- anova(res1, res0) expect_equivalent(sav$LRT, 3.146726, tolerance=.tol[["test"]]) expect_equivalent(sav$pval, 0.6773767, tolerance=.tol[["pval"]]) sav <- anova(res1, btt=2:4) expect_equivalent(sav$QM, 5.286715, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.1519668, tolerance=.tol[["pval"]]) sav <- anova(res1, att=2:4) expect_equivalent(sav$QS, 2.030225, tolerance=.tol[["test"]]) expect_equivalent(sav$QSp, 0.5661571, tolerance=.tol[["pval"]]) expect_error(anova(res1, btt=2:4, att=2:4)) sav <- anova(res1, X=c(0,1,-1,0,0,0)) expect_equivalent(sav$QM, 4.463309, tolerance=.tol[["test"]]) expect_equivalent(sav$QMp, 0.03463035, tolerance=.tol[["pval"]]) tmp <- predict(res1, newmods=c(1,-1,0,0,0), intercept=FALSE) expect_equivalent(sav$Xb[1,1], tmp$pred, tolerance=.tol[["test"]]) sav <- anova(res1, Z=c(0,1,-1,0,0,0)) expect_equivalent(sav$QS, 0.3679934, tolerance=.tol[["test"]]) expect_equivalent(sav$QSp, 0.5441001, tolerance=.tol[["pval"]]) tmp <- predict(res1, newscale=c(1,-1,0,0,0), intercept=FALSE) expect_equivalent(sav$Za[1,1], tmp$pred, tolerance=.tol[["test"]]) expect_error(anova(res1, X=c(0,1,-1,0,0,0), Z=c(0,1,-1,0,0,0))) }) metafor/tests/testthat/test_misc_rma_vs_direct_computation.r0000644000176200001440000000161513465305702024421 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.uni() against direct computations") source("tolerances.r") # read in tolerances test_that("results match (FE model).", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat + year, data=dat, method="FE") X <- cbind(1, dat$ablat, dat$year) W <- diag(1/dat$vi) y <- cbind(dat$yi) beta <- solve(t(X) %*% W %*% X) %*% t(X) %*% W %*% y vb <- solve(t(X) %*% W %*% X) expect_equivalent(res$beta, beta) expect_equivalent(res$vb, vb) yhat <- c(X %*% beta) expect_equivalent(fitted(res), yhat) H <- X %*% solve(t(X) %*% W %*% X) %*% t(X) %*% W expect_equivalent(hatvalues(res, type="matrix"), H) ei <- (diag(res$k) - H) %*% y expect_equivalent(resid(res), c(ei)) }) metafor/tests/testthat/test_misc_metan_vs_rma.uni_with_dat.bcg.r0000644000176200001440000001323713466272236025051 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: rma.uni() against metan with 'dat.bcg'") source("tolerances.r") # read in tolerances test_that("results match (FE model, measure='RR').", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, fixedi nograph rr log res <- rma(yi, vi, data=dat, method="FE") expect_equivalent(c(res$beta), -0.4303, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.5097, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3509, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -10.6247, tolerance=.tol[["test"]]) ### -10.62 in Stata expect_equivalent(res$QE, 152.2330, tolerance=.tol[["test"]]) ### compare results with: metan tpos tneg cpos cneg, fixedi nograph rr sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.6503, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.6007, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.7040, tolerance=.tol[["ci"]]) }) test_that("results match (RE model w/ DL estimator, measure='RR').", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, randomi nograph rr log res <- rma(yi, vi, data=dat, method="DL") expect_equivalent(c(res$beta), -0.7141, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -1.0644, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3638, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -3.9952, tolerance=.tol[["test"]]) ### 4.00 in Stata expect_equivalent(res$tau2, 0.3088, tolerance=.tol[["var"]]) expect_equivalent(res$I2, 92.1173, tolerance=.tol[["het"]]) ### compare results with: metan tpos tneg cpos cneg, randomi nograph rr sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.4896, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.3449, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.6950, tolerance=.tol[["ci"]]) }) test_that("results match (FE model, measure='OR').", { data(dat.bcg, package="metafor") dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, fixedi nograph or log res <- rma(yi, vi, data=dat, method="FE") expect_equivalent(c(res$beta), -0.4361, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.5190, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3533, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -10.3190, tolerance=.tol[["test"]]) ### -10.32 in Stata expect_equivalent(res$QE, 163.1649, tolerance=.tol[["test"]]) ### compare results with: metan tpos tneg cpos cneg, fixedi nograph or sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.6465, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.5951, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.7024, tolerance=.tol[["ci"]]) }) test_that("results match (RE model w/ DL estimator, measure='OR').", { data(dat.bcg, package="metafor") dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, randomi nograph or log res <- rma(yi, vi, data=dat, method="DL") expect_equivalent(c(res$beta), -0.7474, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -1.1242, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.3706, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -3.8873, tolerance=.tol[["test"]]) ### -3.89 in Stata expect_equivalent(res$tau2, 0.3663, tolerance=.tol[["var"]]) expect_equivalent(res$I2, 92.6455, tolerance=.tol[["het"]]) ### compare results with: metan tpos tneg cpos cneg, randomi nograph or sav <- predict(res, transf=exp) expect_equivalent(sav$pred, 0.4736, tolerance=.tol[["pred"]]) expect_equivalent(sav$ci.lb, 0.3249, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, 0.6903, tolerance=.tol[["ci"]]) }) test_that("results match (FE model, measure='RD').", { data(dat.bcg, package="metafor") dat <- escalc(measure="RD", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, fixedi nograph rd res <- rma(yi, vi, data=dat, method="FE") expect_equivalent(c(res$beta), -0.0009, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.0014, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.0005, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -4.0448, tolerance=.tol[["test"]]) ### -4.04 in Stata expect_equivalent(res$QE, 276.4737, tolerance=.tol[["test"]]) }) test_that("results match (RE model w/ DL estimator, measure='RD').", { data(dat.bcg, package="metafor") dat <- escalc(measure="RD", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) ### compare results with: metan tpos tneg cpos cneg, randomi nograph rd res <- rma(yi, vi, data=dat, method="DL") expect_equivalent(c(res$beta), -0.0071, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -0.0101, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.0040, tolerance=.tol[["ci"]]) expect_equivalent(res$zval, -4.5128, tolerance=.tol[["test"]]) ### -4.51 in Stata expect_equivalent(res$tau2, 0.0000, tolerance=.tol[["var"]]) expect_equivalent(res$I2, 95.6596, tolerance=.tol[["het"]]) }) #expect_that(rma(yi ~ ablat, vi, data=dat, subset=1:2), throws_error("Number of parameters to be estimated is larger than the number of observations.")) metafor/tests/testthat/test_analysis_example_ishak2007.r0000644000176200001440000001222613464053233023167 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking analysis example: ishak2007") source("tolerances.r") # read in tolerances ### load dataset dat <- dat.ishak2007 ### create long format dataset dat.long <- reshape(dat, direction="long", idvar="study", v.names=c("yi","vi"), varying=list(c(2,4,6,8), c(3,5,7,9))) dat.long <- dat.long[order(dat.long$study, dat.long$time),] rownames(dat.long) <- 1:nrow(dat.long) ### remove missing measurement occasions from dat.long is.miss <- is.na(dat.long$yi) dat.long <- dat.long[!is.miss,] ### construct the full (block diagonal) V matrix with an AR(1) structure rho.within <- .97 ### value as estimated by Ishak et al. (2007) V <- lapply(split(with(dat, cbind(v1i, v2i, v3i, v4i)), dat$study), diag) V <- lapply(V, function(v) sqrt(v) %*% toeplitz(ARMAacf(ar=rho.within, lag.max=3)) %*% sqrt(v)) V <- bldiag(V) V <- V[!is.miss,!is.miss] ### remove missing measurement occasions from V test_that("results are correct for diag(V) and struct='DIAG'.", { res <- rma.mv(yi, diag(V), mods = ~ factor(time) - 1, random = ~ factor(time) | study, struct = "DIAG", data = dat.long) ### Table 1, column "Time-specific (Independence)" expect_equivalent(coef(res), c(-24.8686, -27.4728, -28.5239, -24.1415), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, c(23.0537, 27.8113, 27.6767, 29.9405), tolerance=.tol[["var"]]) }) test_that("results are correct for diag(V) and random study effects.", { res <- rma.mv(yi, diag(V), mods = ~ factor(time) - 1, random = ~ 1 | study, data = dat.long) ### Table 1, column "Random study effects" expect_equivalent(coef(res), c(-26.2127, -27.1916, -28.5464, -25.6339), tolerance=.tol[["coef"]]) expect_equivalent(res$sigma2, 26.6829, tolerance=.tol[["var"]]) }) test_that("results are correct for diag(V) and struct='ID'.", { res <- rma.mv(yi, diag(V), mods = ~ factor(time) - 1, random = ~ factor(time) | study, struct = "ID", data = dat.long) ### not in paper expect_equivalent(coef(res), c(-24.8792, -27.4670, -28.5185, -24.1502), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 26.6847, tolerance=.tol[["var"]]) }) test_that("results are correct for diag(V) and struct='HAR'.", { res <- rma.mv(yi, diag(V), mods = ~ factor(time) - 1, random = ~ time | study, struct = "HAR", data = dat.long) ### Table 1, column "Correlated random time effects" expect_equivalent(coef(res), c(-25.9578, -27.3100, -28.5543, -25.7923), tolerance=.tol[["coef"]]) # -27.5 in Table vs -27.3 expect_equivalent(res$tau2, c(20.3185, 35.9720, 26.4233, 30.1298), tolerance=.tol[["var"]]) # 20.4 in Table vs 20.3 expect_equivalent(res$rho, 1.0000, tolerance=.tol[["cor"]]) }) test_that("results are correct for struct='HAR'.", { res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ time | study, struct = "HAR", data = dat.long) ### Table 1, column "Multivariate model" expect_equivalent(coef(res), c(-25.9047, -27.4608, -28.6559, -26.4934), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, c(22.7258, 33.7295, 26.1426, 31.1803), tolerance=.tol[["var"]]) # 22.6 in Table vs 22.7; 31.1 in Table vs 31.2 expect_equivalent(res$rho, 0.8832, tolerance=.tol[["cor"]]) }) test_that("results are correct for struct='AR'.", { res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ time | study, struct = "AR", data = dat.long) ### not in paper expect_equivalent(coef(res), c(-25.9418, -27.3937, -28.7054, -26.3970), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 26.6874, tolerance=.tol[["var"]]) expect_equivalent(res$rho, 0.8656, tolerance=.tol[["cor"]]) }) test_that("results are correct for struct='HCS'.", { res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ factor(time) | study, struct = "HCS", data = dat.long) ### not in paper expect_equivalent(coef(res), c(-25.8814, -27.3293, -28.6510, -26.6631), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, c(20.8629, 32.7429, 27.6593, 32.1908), tolerance=.tol[["var"]]) }) test_that("results are correct for struct='CAR'.", { res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ time | study, struct = "CAR", data = dat.long) ### not in paper expect_equivalent(coef(res), c(-25.9418, -27.3937, -28.7054, -26.3970), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 26.6875, tolerance=.tol[["var"]]) expect_equivalent(res$rho, 0.8656, tolerance=.tol[["cor"]]) }) test_that("results are correct for struct='CAR' with unequally spaced time points.", { dat.long$time[dat.long$time == 4] <- 24/3 dat.long$time[dat.long$time == 3] <- 12/3 dat.long$time[dat.long$time == 2] <- 6/3 dat.long$time[dat.long$time == 1] <- 3/3 res <- rma.mv(yi, V, mods = ~ factor(time) - 1, random = ~ time | study, struct = "CAR", data = dat.long) ### not in paper expect_equivalent(coef(res), c(-26.0293, -27.3838, -28.7339, -26.0515), tolerance=.tol[["coef"]]) expect_equivalent(res$tau2, 26.9825, tolerance=.tol[["var"]]) expect_equivalent(res$rho, 0.9171, tolerance=.tol[["cor"]]) }) metafor/tests/testthat/test_misc_handling_of_edge_cases_due_to_zeros.r0000644000176200001440000000305013465264202026343 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: handling of edge cases due to zeros") source("tolerances.r") # read in tolerances test_that("rma.peto(), rma.mh(), and rma.glmm() handle outcome1 never occurring properly.", { ai <- c(0,0,0,0) bi <- c(10,15,20,25) ci <- c(0,0,0,0) di <- c(10,10,30,20) expect_that(suppressWarnings(rma.peto(ai=ai, bi=bi, ci=ci, di=di)), throws_error()) expect_warning(res <- rma.mh(measure="OR", ai=ai, bi=bi, ci=ci, di=di)) expect_true(is.na(res$beta)) expect_warning(res <- rma.mh(measure="RR", ai=ai, bi=bi, ci=ci, di=di)) expect_true(is.na(res$beta)) expect_warning(res <- rma.mh(measure="RD", ai=ai, bi=bi, ci=ci, di=di)) expect_equivalent(res$beta, 0) skip_on_cran() expect_error(suppressWarnings(rma.glmm(measure="OR", ai=ai, bi=bi, ci=ci, di=di))) }) test_that("rma.peto(), rma.mh(), and rma.glmm() handle outcome2 never occurring properly.", { ai <- c(10,15,20,25) bi <- c(0,0,0,0) ci <- c(10,10,30,20) di <- c(0,0,0,0) expect_error(suppressWarnings(rma.peto(ai=ai, bi=bi, ci=ci, di=di))) expect_warning(res <- rma.mh(measure="OR", ai=ai, bi=bi, ci=ci, di=di)) expect_true(is.na(res$beta)) expect_warning(res <- rma.mh(measure="RR", ai=ai, bi=bi, ci=ci, di=di)) expect_equivalent(res$beta, 0) expect_warning(res <- rma.mh(measure="RD", ai=ai, bi=bi, ci=ci, di=di)) expect_equivalent(res$beta, 0) skip_on_cran() expect_error(suppressWarnings(rma.glmm(measure="OR", ai=ai, bi=bi, ci=ci, di=di))) }) metafor/tests/testthat/test_analysis_example_henmi2010.r0000644000176200001440000000263114052500365023156 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:henmi2010 source("tolerances.r") # read in tolerances context("Checking analysis example: henmi2010") ### load dataset dat <- dat.lee2004 ### calculate log odds ratios and corresponding sampling variances dat <- escalc(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat) test_that("results are correct for the random-effects model.", { ### fit random-effects model with DL estimator res <- rma(yi, vi, data=dat, method="DL") ### compare with results on page 2978 expect_equivalent(res$tau2, 0.3325, tolerance=.tol[["var"]]) expect_equivalent(coef(res), -0.6787, tolerance=.tol[["coef"]]) expect_equivalent(res$ci.lb, -1.0664, tolerance=.tol[["ci"]]) expect_equivalent(res$ci.ub, -0.2911, tolerance=.tol[["ci"]]) }) test_that("results are correct for the Henmi & Copas method.", { ### fit random-effects model with DL estimator res <- rma(yi, vi, data=dat, method="DL") ### apply Henmi & Copas method sav <- hc(res) out <- capture.output(print(sav)) ### so that print.hc.rma.uni() is run (at least once) ### compare with results on page 2978 expect_equivalent(sav$beta, -0.5145, tolerance=.tol[["coef"]]) expect_equivalent(sav$ci.lb, -0.9994, tolerance=.tol[["ci"]]) expect_equivalent(sav$ci.ub, -0.0295, tolerance=.tol[["ci"]]) }) metafor/tests/testthat/test_plots_funnel_plot_variations.r0000644000176200001440000000152514052500454024142 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/plots:funnel_plot_variations context("Checking plots example: funnel plot variations") test_that("plot can be drawn.", { expect_equivalent(TRUE, TRUE) # avoid 'Empty test' message skip_on_cran() opar <- par(no.readonly=TRUE) ### load ETS data data(dat.hackshaw1998, package="metafor") ### fit fixed-effects model res <- rma(yi, vi, data=dat.hackshaw1998, measure="OR", method="FE") ### set up 2x2 array for plotting par(mfrow=c(2,2)) ### draw funnel plots funnel(res, main="Standard Error") funnel(res, yaxis="vi", main="Sampling Variance") funnel(res, yaxis="seinv", main="Inverse Standard Error") funnel(res, yaxis="vinv", main="Inverse Sampling Variance") par(opar) }) metafor/tests/testthat/test_misc_residuals.r0000644000176200001440000001007713465314044021152 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: residuals() function") source("tolerances.r") # read in tolerances test_that("residuals are correct for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) res <- rma(yi, vi, data=dat) expect_equivalent(c(residuals(res)), c(dat$yi - coef(res))) expect_equivalent(rstandard(res)$z, c(0.1401, -0.9930, -0.4719, -1.0475, 1.6462, 0.4825), tolerance=.tol[["pred"]]) expect_equivalent(rstudent(res)$z, c(0.1426, -0.9957, -0.4591, -1.1949, 2.0949, 0.4330), tolerance=.tol[["test"]]) res <- rma(yi, vi, data=dat, method="FE") expect_equivalent(sum(residuals(res, type="pearson")^2), res$QE, tolerance=.tol[["test"]]) expect_equivalent(sum(residuals(res, type="cholesky")^2), res$QE, tolerance=.tol[["test"]]) }) test_that("rstudent() yields the same results as a mean shift outlier model for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) dat$trial1 <- ifelse(dat$trial == 1, 1, 0) res <- rma(yi, vi, data=dat) sav <- rstudent(res) res <- rma(yi, vi, mods = ~ trial1, data=dat) expect_equivalent(coef(res)[2], sav$resid[1], tolerance=.tol[["coef"]]) expect_equivalent(res$se[2], sav$se[1], tolerance=.tol[["se"]]) res <- rma(yi, vi, data=dat, test="knha") sav <- rstudent(res) res <- rma(yi, vi, mods = ~ trial1, data=dat, test="knha") expect_equivalent(coef(res)[2], sav$resid[1], tolerance=.tol[["pred"]]) expect_equivalent(res$se[2], sav$se[1], tolerance=.tol[["se"]]) }) test_that("residuals are correct for rma.mv().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) res <- rma.mv(yi, vi, random = ~ 1 | trial, data=dat) expect_equivalent(c(residuals(res)), c(dat$yi - coef(res))) expect_equivalent(rstandard(res)$z, c(0.1401, -0.9930, -0.4719, -1.0476, 1.6462, 0.4825), tolerance=.tol[["test"]]) expect_equivalent(rstandard(res, cluster=dat$alloc)$cluster$X2, c(3.7017, 3.6145), tolerance=.tol[["test"]]) expect_equivalent(rstudent(res)$z, c(0.1426, -0.9957, -0.4591, -1.1949, 2.0949, 0.4330), tolerance=.tol[["test"]]) expect_equivalent(rstudent(res, cluster=dat$alloc)$cluster$X2, c(27.4717, 5.2128), tolerance=.tol[["test"]]) expect_equivalent(rstudent(res, cluster=dat$alloc, reestimate=FALSE)$cluster$X2, c(3.7017, 3.6145), tolerance=.tol[["test"]]) }) test_that("residuals are correct for rma.mh().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) expect_equivalent(c(residuals(res)), c(dat$yi - coef(res))) expect_equivalent(residuals(res, type="rstandard"), c(0.1068, -1.4399, -0.6173, -3.4733, 3.2377, 1.9749), tolerance=.tol[["pred"]]) expect_equivalent(residuals(res, type="rstudent"), c(0.1076, -1.4668, -0.6219, -4.2413, 3.3947, 2.7908), tolerance=.tol[["pred"]]) }) test_that("residuals are correct for rma.peto().", { data(dat.bcg, package="metafor") dat <- escalc(measure="PETO", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) expect_equivalent(c(residuals(res)), c(dat$yi - coef(res))) expect_equivalent(rstandard(res)$z, c(0.2684, -1.1482, -0.4142, -2.3440, 3.4961, 0.8037), tolerance=.tol[["test"]]) expect_equivalent(rstudent(res)$z, c(0.2705, -1.1700, -0.4173, -2.8891, 3.6614, 1.1391), tolerance=.tol[["test"]]) }) test_that("residuals are correct for rma.glmm().", { skip_on_cran() data(dat.bcg, package="metafor") dat <- escalc(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) res <- rma.glmm(measure="OR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, subset=1:6) expect_equivalent(c(residuals(res)), c(dat$yi - coef(res))) }) metafor/tests/testthat/test_misc_influence.r0000644000176200001440000001641114030353674021126 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: influence() and related functions") source("tolerances.r") # read in tolerances test_that("influence() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) sav <- influence(res) sav$inf <- sav$inf[1] sav$dfbs <- sav$dfbs[1] sav$is.infl <- sav$is.infl[1] sav$not.na <- sav$not.na[1] tmp <- structure(list(inf = list(rstudent = -0.218142, dffits = -0.040708, cook.d = 0.001717, cov.r = 1.116449, tau2.del = 0.336157, QE.del = 151.582573, hat = 0.050595, weight = 5.059483, inf = "", slab = 1L, digits = c( est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4)), dfbs = list(intrcpt = -0.040266, slab = 1L, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4)), ids = 1:13, not.na = TRUE, is.infl = FALSE, tau2 = 0.3132, QE = 152.233, k = 13L, p = 1L, m = 1L, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4)), class = "infl.rma.uni") expect_equivalent(sav, tmp, tolerance=.tol[["inf"]]) }) test_that("leave1out() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, data=dat) inf <- leave1out(res) inf <- inf[1] sav <- structure(list(estimate = -0.7071, se = 0.1900, zval = -3.7223, pval = 0.0002, ci.lb = -1.0794, ci.ub = -0.3348, Q = 151.5826, Qp = 0, tau2 = 0.3362, I2 = 93.2259, H2 = 14.7622, slab = 1L, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4), transf = FALSE), class = "list.rma") expect_equivalent(sav, inf, tolerance=.tol[["misc"]]) }) test_that("leave1out() works for rma.mh().", { data(dat.bcg, package="metafor") res <- rma.mh(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) inf <- leave1out(res) inf <- inf[1] sav <- structure(list(estimate = -0.4514, se = 0.0394, zval = -11.4462, pval = 0, ci.lb = -0.5287, ci.ub = -0.3741, Q = 151.9153, Qp = 0, I2 = 92.7591, H2 = 13.8105, slab = 1L, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4), transf = FALSE), class = "list.rma") expect_equivalent(sav, inf, tolerance=.tol[["misc"]]) }) test_that("leave1out() works for rma.peto().", { data(dat.bcg, package="metafor") res <- rma.peto(ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) inf <- leave1out(res) inf <- inf[1] sav <- structure(list(estimate = -0.4722, se = 0.0408, zval = -11.5791, pval = 0, ci.lb = -0.5521, ci.ub = -0.3923, Q = 167.2005, Qp = 0, I2 = 93.4211, H2 = 15.2000, slab = 1L, digits = c(est = 4, se = 4, test = 4, pval = 4, ci = 4, var = 4, sevar = 4, fit = 4, het = 4), transf = FALSE), class = "list.rma") expect_equivalent(sav, inf, tolerance=.tol[["misc"]]) }) test_that("model.matrix() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat, data=dat) sav <- structure(c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 44, 55, 42, 52, 13, 44, 19, 13, 27, 42, 18, 33, 33), .Dim = c(13L, 2L), .Dimnames = list(c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"), c("intrcpt", "ablat"))) expect_equivalent(sav, model.matrix(res)) }) test_that("hatvalues() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat, data=dat) expect_equivalent(hatvalues(res), c(0.049, 0.1493, 0.0351, 0.3481, 0.2248, 0.2367, 0.064, 0.357, 0.0926, 0.1157, 0.2309, 0.0189, 0.0778), tolerance=.tol[["inf"]]) sav <- structure(c(0.049, 0.067, 0.0458, 0.0994, 0.1493, 0.0904, 0.0374, 0.0498, 0.0351), .Dim = c(3L, 3L), .Dimnames = list(c("1", "2", "3"), c("1", "2", "3"))) expect_equivalent(hatvalues(res, type="matrix")[1:3,1:3], sav, tolerance=.tol[["inf"]]) }) test_that("hatvalues() works for rma.mv().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma.mv(yi, vi, mods = ~ ablat, random = ~ 1 | trial, data=dat) expect_equivalent(hatvalues(res), c(0.049, 0.1493, 0.0351, 0.3481, 0.2248, 0.2367, 0.064, 0.357, 0.0926, 0.1157, 0.2309, 0.0189, 0.0778), tolerance=.tol[["inf"]]) sav <- structure(c(0.049, 0.067, 0.0458, 0.0994, 0.1493, 0.0904, 0.0374, 0.0498, 0.0351), .Dim = c(3L, 3L), .Dimnames = list(c("1", "2", "3"), c("1", "2", "3"))) expect_equivalent(hatvalues(res, type="matrix")[1:3,1:3], sav, tolerance=.tol[["inf"]]) }) test_that("cooks.distance() works for rma().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi, vi, mods = ~ ablat, data=dat) expect_equivalent(cooks.distance(res), c(0.0048, 0.0489, 0.0104, 0.2495, 0.0072, 0.2883, 0.3643, 0.2719, 0.02, 0.1645, 0.0009, 0.0403, 0.1433), tolerance=.tol[["inf"]]) }) test_that("cooks.distance() works for rma.mv().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma.mv(yi, vi, mods = ~ ablat, random = ~ 1 | trial, data=dat) expect_equivalent(cooks.distance(res), c(0.0048, 0.0489, 0.0104, 0.2495, 0.0072, 0.2883, 0.3643, 0.2719, 0.02, 0.1645, 0.0009, 0.0404, 0.1434), tolerance=.tol[["inf"]]) expect_equivalent(cooks.distance(res, cluster=dat$alloc), c(0.2591, 2.4372, 0.1533), tolerance=.tol[["inf"]]) expect_equivalent(cooks.distance(res, cluster=dat$alloc, reestimate=FALSE), c(0.3199, 2.2194, 0.2421), tolerance=.tol[["inf"]]) }) test_that("influence() correctly works with 'na.omit' and 'na.pass'.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste0("Trial ", dat.bcg$trial)) dat$yi[2] <- NA dat$vi[3] <- NA dat$ablat[5] <- NA dat$trial12 <- ifelse(dat$trial == 12, 1, 0) options(na.action="na.omit") expect_warning(res <- rma(yi, vi, mods = ~ ablat + trial12, data=dat)) sav <- influence(res) expect_equivalent(length(sav$inf$rstudent), 10) expect_equivalent(sum(is.na(sav$inf$rstudent)), 1) expect_equivalent(sum(is.na(sav$inf$hat)), 0) expect_equivalent(sum(is.na(sav$dfbs$intrcpt)), 1) options(na.action="na.pass") expect_warning(res <- rma(yi, vi, mods = ~ ablat + trial12, data=dat)) sav <- influence(res) expect_equivalent(length(sav$inf$rstudent), 13) expect_equivalent(sum(is.na(sav$inf$rstudent)), 4) expect_equivalent(sum(is.na(sav$inf$hat)), 3) expect_equivalent(sum(is.na(sav$dfbs$intrcpt)), 4) }) test_that("'infonly' argument works correctly with influence().", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg, slab=paste0("Trial ", dat.bcg$trial)) res <- rma(yi, vi, data=dat, method="FE") inf <- influence(res) tmp <- capture.output(sav <- print(inf)) expect_equivalent(length(sav$rstudent), 13) tmp <- capture.output(sav <- print(inf, infonly=TRUE)) expect_equivalent(length(sav$rstudent), 3) }) metafor/tests/testthat/test_misc_vcov.r0000644000176200001440000000354213675405455020145 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") context("Checking misc: vcov() function") source("tolerances.r") # read in tolerances test_that("vcov() works correctly for 'rma.uni' objects.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma(yi ~ ablat, vi, data=dat) expect_equivalent(vcov(res), structure(c(0.0621, -0.0016, -0.0016, 1e-04), .Dim = c(2L, 2L), .Dimnames = list(c("intrcpt", "ablat"), c("intrcpt", "ablat"))), tolerance=.tol[["var"]]) expect_equivalent(diag(vcov(res, type="obs")), dat$vi + res$tau2) expect_equivalent(vcov(res, type="fitted")[1,], c(0.0197, 0.0269, 0.0184, 0.025, -0.0007, 0.0197, 0.0033, -0.0007, 0.0085, 0.0184, 0.0026, 0.0125, 0.0125), tolerance=.tol[["var"]]) expect_equivalent(vcov(res, type="resid")[1,], c(0.3822, -0.0269, -0.0184, -0.025, 7e-04, -0.0197, -0.0033, 0.0007, -0.0085, -0.0184, -0.0026, -0.0125, -0.0125), tolerance=.tol[["var"]]) }) test_that("vcov() works correctly for 'rma.mv' objects.", { data(dat.bcg, package="metafor") dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg) res <- rma.mv(yi ~ ablat, vi, random = ~ 1 | trial, data=dat) expect_equivalent(vcov(res), structure(c(0.062, -0.0016, -0.0016, 1e-04), .Dim = c(2L, 2L), .Dimnames = list(c("intrcpt", "ablat"), c("intrcpt", "ablat"))), tolerance=.tol[["var"]]) expect_equivalent(diag(vcov(res, type="obs")), dat$vi + res$sigma2) expect_equivalent(vcov(res, type="fitted")[1,], c(0.0197, 0.0269, 0.0184, 0.025, -0.0007, 0.0197, 0.0033, -0.0007, 0.0085, 0.0184, 0.0026, 0.0125, 0.0125), tolerance=.tol[["var"]]) expect_equivalent(vcov(res, type="resid")[1,], c(0.3822, -0.0269, -0.0184, -0.025, 7e-04, -0.0197, -0.0033, 0.0007, -0.0085, -0.0184, -0.0026, -0.0125, -0.0125), tolerance=.tol[["var"]]) }) metafor/tests/testthat/test_analysis_example_viechtbauer2007b.r0000644000176200001440000001314114052500432024520 0ustar liggesusers### library(metafor); library(testthat); Sys.setenv(NOT_CRAN="true") ### see also: https://www.metafor-project.org/doku.php/analyses:viechtbauer2007b context("Checking analysis example: viechtbauer2007b") source("tolerances.r") # read in tolerances ### create dataset for example data(dat.linde2005, package="metafor") dat <- escalc(measure="RR", ai=ai, ci=ci, n1i=n1i, n2i=n2i, data=dat.linde2005) dat <- dat[c(7:10,13:25), c(13:16,18:19,11,6,7,9)] dat$dosage <- (dat$dosage * 7) / 1000 test_that("results are correct for the CIs.", { sav <- summary(dat, transf=exp)[c(13,17),] ### compare with results on page 106 tmp <- sav$ci.lb expect_equivalent(tmp, c(.7397, 1.0039), tolerance=.tol[["ci"]]) ### 1.01 in article tmp <- sav$ci.ub expect_equivalent(tmp, c(1.2793, 1.5434), tolerance=.tol[["ci"]]) }) test_that("results are correct for the fixed-effects model.", { res <- rma(yi, vi, data=dat, method="FE") sav <- predict(res, transf=exp) tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) ### compare with results on page 107 expect_equivalent(tmp, c(1.3840, 1.2599, 1.5204), tolerance=.tol[["pred"]]) ### 1.39 in article expect_equivalent(res$QE, 51.5454, tolerance=.tol[["test"]]) ### 55.54 in article }) test_that("results are correct for the random-effects model.", { res <- rma(yi, vi, data=dat, method="DL") sav <- predict(res, transf=exp) ### compare with results on page 109 tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) expect_equivalent(tmp, c(1.5722, 1.3103, 1.8864), tolerance=.tol[["pred"]]) ### 1.90 in article tmp <- c(sav$pi.lb, sav$pi.ub) expect_equivalent(tmp, c(.8488, 2.9120), tolerance=.tol[["ci"]]) ### .87, 2.83 in article (but this was calculated without taking Var[hat(mu)] into consideration) expect_equivalent(res$tau2, .0903, tolerance=.tol[["var"]]) ### .091 in article }) test_that("results are correct for the mixed-effects model.", { dat$dosage <- dat$dosage * dat$duration res <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="DL") ### compare with results on page 112 expect_equivalent(res$tau2, .0475, tolerance=.tol[["var"]]) expect_equivalent(res$R2, 47.3778, tolerance=.tol[["r2"]]) ### 48% in article sav <- structure(list(estimate = c(0.4763, -0.0058, -0.0672, -0.0016), se = c(0.0876, 0.01, 0.0352, 0.0034), zval = c(5.4342, -0.5846, -1.9086, -0.4555), pval = c(0, 0.5588, 0.0563, 0.6487)), .Names = c("estimate", "se", "zval", "pval"), row.names = c("Intercept", "Dosage", "Baseline", "Dosage x Baseline"), class = "data.frame") ### compare with results in Table II on page 113 expect_equivalent(coef(summary(res))[,1:4], sav, tolerance=.tol[["misc"]]) ### compare with results on page 113 sav <- predict(res, newmods=c(34-34, 12.5-20, (34-34)*(12.5-20)), transf=exp) tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) expect_equivalent(tmp, c(2.6657, 1.4560, 4.8806), tolerance=.tol[["pred"]]) ### 2.66, 1.46, 4.90 in article sav <- predict(res, newmods=c(34-34, 23.6-20, (34-34)*(23.6-20)), transf=exp) tmp <- c(sav$pred, sav$ci.lb, sav$ci.ub) expect_equivalent(tmp, c(1.2639, 0.9923, 1.6099), tolerance=.tol[["pred"]]) ### 1.61 in article skip_on_cran() size <- 1 / sqrt(dat$vi) size <- 0.15 * size / max(size) modvals <- cbind(0, cbind(seq(12, 24, by=.1)) - 20, 0) preds <- predict(res, modvals, transf=exp) opar <- par(no.readonly=TRUE) plot(NA, NA, xlab="Baseline HRSD Score", ylab="Relative Rate", xlim=c(12,24), ylim=c(0.5,4.0), bty="l") abline(h=seq(1, 4, by=0.5), col="lightgray") abline(v=seq(14, 24, by=2), col="lightgray") lines(modvals[,2] + 20, preds$pred, col="darkgray", lwd=2) lines(modvals[,2] + 20, preds$ci.lb, col="darkgray", lty="dashed", lwd=2) lines(modvals[,2] + 20, preds$ci.ub, col="darkgray", lty="dashed", lwd=2) symbols(dat$baseline, exp(dat$yi), circles=size, inches=FALSE, add=TRUE, bg="black") par(opar) ### check results for all tau^2 estimators res.HS <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="HS") res.HE <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="HE") res.DL <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="DL") res.GENQ <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="GENQ", weights = n1i + n2i) res.SJ <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="SJ") res.DLIT <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="DLIT", control=list(maxiter=500)) res.SJIT <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="SJIT") res.PM <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="PM") res.ML <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="ML") res.REML <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="REML") res.EB <- rma(yi, vi, mods = ~ I(dosage-34) * I(baseline-20), data=dat, method="EB") res <- list(res.HS, res.HE, res.DL, res.GENQ, res.SJ, res.DLIT, res.SJIT, res.PM, res.ML, res.REML, res.EB) res <- data.frame(method=sapply(res, function(x) x$method), tau2=sapply(res, function(x) x$tau2), se.tau2=sapply(res, function(x) x$se.tau2)) expect_equivalent(res$tau2, c(0.0253, 0.0388, 0.0475, 0.06, 0.0912, 0.0299, 0.0633, 0.0633, 0.024, 0.0558, 0.0633), tolerance=.tol[["var"]]) expect_equivalent(res$se.tau2, c(0.0197, 0.0764, 0.0376, 0.0528, 0.0436, 0.0437, 0.046, 0.046, 0.0222, 0.0409, 0.046), tolerance=.tol[["sevar"]]) }) metafor/tests/testthat.R0000644000176200001440000000023413150625652015040 0ustar liggesusers### to also run skip_on_cran() tests, uncomment: #Sys.setenv(NOT_CRAN="true") library(testthat) library(metafor) test_check("metafor", reporter="summary") metafor/vignettes/0000755000176200001440000000000014060132612013712 5ustar liggesusersmetafor/vignettes/metafor.pdf.asis0000644000176200001440000000015313150625652017011 0ustar liggesusers%\VignetteEngine{R.rsp::asis} %\VignetteIndexEntry{Conducting Meta-Analyses in R with the metafor Package} metafor/vignettes/diagram.pdf.asis0000644000176200001440000000014013150625652016754 0ustar liggesusers%\VignetteEngine{R.rsp::asis} %\VignetteIndexEntry{Diagram of Functions in the metafor Package} metafor/NEWS0000644000176200001440000023322514060132460012411 0ustar liggesusersChanges in Version 3.0-2 (2021-06-09) ===================================== o the metafor package now makes use of the 'mathjaxr' package to nicely render equations shown in the HTML help pages o rma() can now also fit location-scale models o added selmodel() for fitting a wide variety of selection models (and added the corresponding plot.rma.uni.selmodel() function for drawing the estimated selection function) o rma.mv() gains 'dfs' argument and now provides an often better way for calculating the (denominator) degrees of freedom for approximate t- and F-tests when dfs="contain" o added tes() function for the test of excess significance o added regplot() function for drawing scatter plots / bubble plots based on meta-regression models o added rcalc() for calculating the variance-covariance matrix of correlation coefficients and matreg() for fitting regression models based on correlation/covariance matrices o added convenience functions dfround() and vec2mat() o added aggregate.escalc() function to aggregate multiple effect sizes or outcomes within studies/clusters o regtest() now shows the 'limit estimate' of the (average) true effect when using 'sei', 'vi', 'ninv', or 'sqrtninv' as predictors (and the model does not contain any other moderators) o vif() gains 'btt' argument and can now also compute generalized variance inflation factors; a proper print.vif.rma() function was also added o anova.rma() argument 'L' renamed to 'X' (the former still works, but is no longer documented) o argument 'order' in cumul() should now just be a variable, not the order of the variable, to be used for ordering the studies and must be of the same length as the original dataset that was used in the model fitting o similarly, vector arguments in various plotting functions such as forest.rma() must now be of the same length as the original dataset that was used in the model fitting (any subsetting and removal of NAs is automatically applied) o the various leave1out() and cumul() functions now provide I^2 and H^2 also for fixed-effects models; accordingly, plot.cumul.rma() now also works with such models o fixed 'level' not getting passed down to the various cumul() functions o plot.cumul.rma() argument 'addgrid' renamed to 'grid' (the former still works, but is no longer documented) o forest.default(), forest.rma(), and labbe() gain 'plim' argument and now provide more flexibility in terms of the scaling of the points o forest.rma() gains 'colout' argument (to adjust the color of the observed effect sizes or outcomes) o in the various forest() functions, the right header is now suppressed when 'annotate=FALSE' and 'header=TRUE' o funnel.default() and funnel.rma() gain 'label' and 'offset' arguments o funnel.default() and funnel.rma() gain 'lty' argument; the reference line is now drawn by default as a dotted line (like the line for the pseudo confidence region) o the 'forest' and 'funnel' arguments of reporter.rma.uni() can now also be logicals to suppress the drawing of these plots o added 'weighted' argument to fsn() (for Orwin's method) o added some more transformation functions o bldiag() now properly handles ?x0 or 0x? matrices o p-values are still given to 2 digits even when 'digits=1' o summary.escalc() also provides the p-values (of the Wald-type tests); but when using the 'transf' argument, the sampling variances, standard errors, test statistics, and p-values are no longer shown o rma.uni() no longer constrains a fixed tau^2 value to 0 when k=1 o slight speedup in functions that repeatedly fit rma.uni() models by skipping the computation of the pseudo R^2 statistic o started using the 'pbapply' package for showing progress bars, also when using parallel processing o to avoid potential confusion, all references to 'credibility intervals' have been removed from the documentation; these intervals are now exclusively referred to as 'prediction intervals'; in the output, the bounds are therefore indicated now as 'pi.lb' and 'pi.ub' (instead of 'cr.lb' and 'cr.ub'); the corresponding argument names were changed in addpoly.default(); argument 'addcred' was changed to 'addpred' in addpoly.rma() and forest.rma(); however, code using the old arguments names should continue to work o one can now use weights(..., type="rowsum") for intercept-only 'rma.mv' models (to obtain 'row-sum weights') o simulate.rma() gains 'olim' argument; renamed the 'clim' argument in summary.escalc() and the various forest() functions to 'olim' for consistency (the old 'clim' argument should continue to work) o show nicer network graphs for dat.hasselblad1998 and dat.senn2013 in the help files o added 23 datasets (dat.anand1999, dat.assink2016, dat.baskerville2012, dat.bornmann2007, dat.cannon2006, dat.cohen1981, dat.craft2003, dat.crede2010, dat.dagostino1998, dat.damico2009, dat.dorn2007, dat.hahn2001, dat.kalaian1996, dat.kearon1998, dat.knapp2017, dat.landenberger2005, dat.lau1992, dat.lim2014, dat.lopez2019, dat.maire2019, dat.moura2021, dat.obrien2003, dat.vanhowe1999, dat.viechtbauer2021) o the package now runs a version check on startup in interactive sessions; setting the environment variable METAFOR_VERSION_CHECK to FALSE disables this o refactored various functions (for cleaner/simpler code) o improved the documentation a bit Changes in Version 2.4-0 (2020-03-19) ===================================== o version jump to 2.4-0 for CRAN release (from now on, even minor numbers for CRAN releases, odd numbers for development versions) o the various forest() functions gain 'header' argument o escalc() gains 'include' argument o setting 'verbose=3' in model fitting functions sets 'options(warn=1)' o forest.rma() and forest.default() now throw informative errors when misusing 'order' and 'subset' arguments o plot.profile.rma() now always includes the actual value of the variance or correlation component as an additional point in the plot o fixed failing tests due to the 'stringsAsFactors=FALSE' change in the upcoming version of R o print.infl.rma.uni() gains 'infonly' argument, to only show the influential studies o removed 'MASS' from 'Suggests' (no longer needed) o argument 'btt' can now also take a string to grep for o added 'optimParallel' as possible optimizer in rma.mv() o added (for now undocumented) option to fit models in rma.glmm() via the 'GLMMadaptive' package (instead of 'lme4'); to try this, use: 'control=list(package="GLMMadaptive")' o started to use numbering scheme for devel version (the number after the dash indicates the devel version) o added contrmat() function (for creating a matrix that indicates which groups have been compared against each other in each row of a dataset) o added to.wide() function (for restructuring long format datasets into the wide format needed for contrast-based analyses) o I^2 and H^2 are also shown in output for fixed-effects models o argument 'grid' in baujat() can now also be a color name o added (for now undocumented) 'time' argument to more functions that are computationally expensive o added (for now undocumented) 'textpos' argument to the various forest functions o added a new dataset (dat.graves2010) o added more tests Changes in Version 2.1-0 (2019-05-13) ===================================== o added formula() method for objects of class 'rma' o llplot() now also allows for measure="GEN"; also, the documentation and y-axis label have been corrected to indicate that the function plots likelihoods (not log likelihoods) o confint.rma.mv() now returns an object of class 'list.confint.rma' when obtaining CIs for all variance and correlation components of the model; added corresponding print.list.confint.rma() function o moved 'tol' argument in permutest() to 'control' and renamed the argument to 'comptol' o added 'PMM' and 'GENQM' estimators in rma.uni() o added vif() function to get variance inflation factors o added .glmulti object for making the interaction with glmulti easier o added reporter() and reporter.rma.uni() for dynamically generating analysis reports for objects of class 'rma.uni' o output is now styled/colored when 'crayon' package is loaded (this only works on a 'proper' terminal with color support; also works in RStudio) o overhauled plot.gosh.rma(); when 'out' is specified, it now shows two distributions, one for the values when the outlier is included and one for the values when for outlier is excluded; dropped the 'hcol' argument and added 'border' argument o refactored influence.rma.uni() to be more consistent internally with other functions; print.infl.rma.uni() and plot.infl.rma.uni() adjusted accordingly; functions cooks.distance.rma.uni(), dfbetas.rma.uni(), and rstudent.rma.uni() now call influence.rma.uni() for the computations o rstudent.rma.uni() now computes the SE of the deleted residuals in such a way that it will yield identical results to a mean shift outlier model even when that model is fitted with test="knha" o rstandard.rma.uni() gains 'type' argument, and can now also compute conditional residuals (it still computes marginal residuals by default) o cooks.distance.rma.mv() gains 'cluster' argument, so that the Cook's distances can be computed for groups of estimates o cooks.distance.rma.mv() gains 'parallel', 'ncpus', and 'cl' arguments and can now make use of parallel processing o cooks.distance.rma.mv() should be faster by using the estimates from the full model as starting values when fitting the models with the ith study/cluster deleted from the dataset o cooks.distance.rma.mv() gains 'reestimate' argument; when set to FALSE, variance/correlation components are not reestimated o rstandard.rma.mv() gains 'cluster' argument for computing cluster-level multivariate standardized residuals o added rstudent.rma.mv() and dfbetas.rma.mv() o smarter matching of elements in 'newmods' (when using a named vector) in predict() that also works for models with interactions (thanks to Nicole Erler for pointing out the problem) o rma.uni() and rma.mv() no longer issue (obvious) warnings when user constrains vi or V to 0 (i.e., vi=0 or V=0, respectively) o rma.mv() does more intelligent filtering based on NAs in V matrix o rma.mv() now ensures strict symmetry of any (var-cov or correlation) matrices specified via the 'R' argument o fixed rma.mv() so checks on 'R' argument run as intended; also fixed an issue when multiple formulas with slashes are specified via 'random' (thanks to Andrew Loignon for pointing out the problem) o suppressed showing calls on some warnings/errors in rma.mv() o rma.mv() now allows for a continuous-time autoregressive random effects structure (struct="CAR") and various spatial correlation structures (struct="SPEXP", "SPGAU", "SPLIN", "SPRAT", and "SPSPH") o rma.mv() now allows for struct="GEN" which models correlated random effects for any number of predictors, including continuous ones (i.e., this allows for 'random slopes') o in the various forest() functions, when options(na.action="na.pass") or options(na.action="na.exclude") and an annotation contains NA, this is now shown as a blank (instead of NA [NA, NA]) o the various forest() and addpoly() functions gain a 'fonts' argument o the various forest() functions gain a 'top' argument o the various forest() functions now show correct point sizes when the weights of the studies are exactly the same o forest.cumul.rma() gains a 'col' argument o funnel.default() and funnel.rma() can now take vectors as input for the 'col' and 'bg' arguments (and also for 'pch'); both functions also gain a 'legend' argument o addpoly() functions can now also show prediction interval bounds o removed 'formula interface' from escalc(); until this actually adds some kind of extra functionality, this just makes escalc() more confusing to use o escalc() can now compute the coefficient of variation ratio and the variability ratio for pre-post or matched designs ("CVRC", "VRC") o escalc() does a bit more housekeeping o added (currently undocumented) arguments 'onlyo1', 'addyi', and 'addvi' to escalc() that allow for more flexibility when computing certain bias corrections and when computing sampling variances for measures that make use of the 'add' and 'to' arguments o escalc() now sets 'add=0' for measures where the use of such a bias correction makes little sense; this applies to the following measures: "AS", "PHI", "RTET", "IRSD", "PAS", "PFT", "IRS", and "IRFT"; one can still force the use of the bias correction by explicitly setting the 'add' argument to some non-zero value o added 'clim' argument to summary.escalc() o added 'ilim' argument to trimfill() o labbe() gains 'lty' argument o labbe() now (invisibly) returns a data frame with the coordinates of the points that were drawn (which may be useful for manual labeling of points in the plot) o added a print method for 'profile.rma' objects o profile.rma.mv() now check whether any of the profiled log-likelihood values is larger than the log-likelihood of the fitted model (using numerical tolerance given by 'lltol') and issues a warning if so o profile.rma.uni(), profile.rma.mv(), and plot.profile.rma() gain 'cline' argument; plot.profile.rma() gains 'xlim', 'ylab', and 'main' arguments o fixed an issue with robust.rma.mv() when the model was fitted with sparse=TRUE (thanks to Roger Martineau for noting the problem) o various method functions (fitted(), resid(), predict(), etc.) behave in a more consistent manner when model omitted studies with missings o predict.rma() gains 'vcov' argument; when set to TRUE, the variance- covariance matrix of the predicted values is also returned o vcov.rma() can now also return the variance-covariance matrix of the fitted values (type="fitted") and the residuals (type="resid") o added `$<-` and as.matrix() methods for 'list.rma' objects o fixed error in simulate.rma() that would generate too many samples for 'rma.mv' models o added undocumented argument 'time' to all model fitting functions; if set to TRUE, the model fitting time is printed o added more tests (also for parallel operations); also, all tests updated to use proper tolerances instead of rounding o reorganized the documentation a bit Changes in Version 2.0-0 (2017-06-22) ===================================== o added simulate() method for 'rma' objects; added MASS to 'Suggests' (since simulating for 'rma.mv' objects requires mvrnorm() from MASS) o cooks.distance.rma.mv() now works properly even when there are missing values in the data o residuals() gains 'type' argument and can compute Pearson residuals o the 'newmods' argument in predict() can now be a named vector or a matrix/data frame with column names that get properly matched up with the variables in the model o added ranef.rma.mv() for extracting the BLUPs of the random effects for 'rma.mv' models o all functions that repeatedly refit models now have the option to show a progress bar o added ranktest.default(), so user can now pass the outcomes and corresponding sampling variances directly to the function o added regtest.default(), so user can now pass the outcomes and corresponding sampling variances directly to the function o funnel.default() gains 'subset' argument o funnel.default() and funnel.rma() gain 'col' and 'bg' arguments o plot.profile.rma() gains 'ylab' argument o more consistent handling of 'robust.rma' objects o added a print method for 'rma.gosh' objects o the (log) relative risk is now called the (log) risk ratio in all help files, plots, code, and comments o escalc() can now compute outcome measures based on paired binary data ("MPRR", "MPOR", "MPRD", "MPORC", and "MPPETO") o escalc() can now compute (semi-)partial correlation coefficients ("PCOR", "ZPCOR", "SPCOR") o escalc() can now compute measures of variability for single groups ("CVLN", "SDLN") and for the difference in variability between two groups ("CVR", "VR"); also the log transformed mean ("MNLN") has been added for consistency o escalc() can now compute the sampling variance for measure="PHI" for studies using stratified sampling (vtpye="ST") o the `[` method for 'escalc' objects now properly handles the 'ni' and 'slab' attributes and does a better job of cleaning out superfluous variable name information o added rbind() method for 'escalc' objects o added as.data.frame() method for 'list.rma' objects o added a new dataset (dat.pagliaro1992) for another illustration of a network meta-analysis o added a new dataset (dat.laopaiboon2015) on the effectiveness of azithromycin for treating lower respiratory tract infections o rma.uni() and rma.mv() now check if the ratio of the largest to smallest sampling variance is very large; results may not be stable then (and very large ratios typically indicate wrongly coded data) o model fitting functions now check if extra/superfluous arguments are specified via ... and issues are warning if so o instead of defining own generic ranef(), import ranef() from 'nlme' o improved output formatting o added more tests (but disabled a few tests on CRAN to avoid some issues when R is compiled with --disable-long-double) o some general code cleanup o renamed diagram_metafor.pdf vignette to just diagram.pdf o minor updates in the documentation Changes in Version 1.9-9 (2016-09-25) ===================================== o started to use git as version control system, GitHub to host the repository (https://github.com/wviechtb/metafor) for the development version of the package, Travis CI as continuous integration service (https://travis-ci.org/wviechtb/metafor), and Codecov for automated code coverage reporting (https://codecov.io/github/wviechtb/metafor) o argument 'knha' in rma.uni() and argument 'tdist' in rma.glmm() and rma.mv() are now superseded by argument 'test' in all three functions; for backwards compatibility, the 'knha' and 'tdist' arguments still work, but are no longer documented o rma(yi, vi, weights=1, test="knha") now yields the same results as rma(yi, vi, weighted=FALSE, test="knha") (but use of the Knapp and Hartung method in the context of an unweighted analysis remains an experimental feature) o one can now pass an 'escalc' object directly to rma.uni(), which then tries to automatically determine the 'yi' and 'vi' variables in the data frame (thanks to Christian Roever for the suggestion) o escalc() can now also be used to convert a regular data frame to an 'escalc' object o for measure="UCOR", the exact bias-correction is now used (instead of the approximation); when vtype="UB", the exact equation is now used to compute the unbiased estimate of the variance of the bias-corrected correlation coefficient; hence 'gsl' is now a suggested package (needed to compute the hypergeometric function) and is loaded when required o cooks.distance() now also works with 'rma.mv' objects; and since model fitting can take some time, an option to show a progress bar has been added o fixed an issue with robust.rma.mv() throwing errors when the model was fitted with sparse=TRUE o fixed an error with robust.rma.mv() when the model was fitted with user-defined weights (or a user-defined weight matrix) o added ranef() for extracting the BLUPs of the random effects (only for 'rma.uni' objects at the moment) o reverted back to the pre-1.1-0 way of computing p-values for individual coefficients in permutest.rma.uni(), that is, the p-value is computed with mean(abs(z_perm) >= abs(z_obs) - tol) (where 'tol' is a numerical tolerance) o permutest.rma.uni() gains 'permci' argument, which can be used to obtain permutation-based CIs of the model coefficients (note that this is computationally very demanding and may take a long time to complete) o rma.glmm() continues to work even when the saturated model cannot be fitted (although the tests for heterogeneity are not available then) o rma.glmm() now allows control over the arguments used for 'method.args' (via control=list(hessianCtrl=list(...))) passed to hessian() (from the 'numDeriv' package) when using model="CM.EL" and measure="OR" o in rma.glmm(), default 'method.args' value for 'r' passed to hessian() has been increased to 16 (while this slows things down a bit, this appears to improve the accuracy of the numerical approximation to the Hessian, especially when tau^2 is close to 0) o the various forest() and addpoly() functions now have a new argument called 'width', which provides manual control over the width of the annotation columns; this is useful when creating complex forest plots with a monospaced font and we want to ensure that all annotations are properly lined up at the decimal point o the annotations created by the various forest() and addpoly() functions are now a bit more compact by default o more flexible 'efac' argument in the various forest() functions o trailing zeros in the axis labels are now dropped in forest and funnel plots by default; but trailing zeros can be retained by specifying a numeric (and not an integer) value for the 'digits' argument o added funnel.default(), which directly takes as input a vector with the observed effect sizes or outcomes and the corresponding sampling variances, standard errors, and/or sample sizes o added plot.profile.rma(), a plot method for objects returned by the profile.rma.uni() and profile.rma.mv() functions o simplified baujat.rma.uni(), baujat.rma.mh(), and baujat.rma.peto() to baujat.rma(), which now handles objects of class 'rma.uni', 'rma.mh', and 'rma.peto' o baujat.rma() gains argument 'symbol' for more control over the plotting symbol o labbe() gains a 'grid' argument o more logical placement of labels in qqnorm.rma.uni(), qqnorm.rma.mh(), and qqnorm.rma.peto() functions (and more control thereof) o qqnorm.rma.uni() gains 'lty' argument o added gosh.rma() and plot.gosh.rma() for creating GOSH (i.e., graphical display of study heterogeneity) plots based on Olkin et al. (2012) o in the (rare) case where all observed outcomes are exactly equal to each other, test="knha" (i.e., knha=TRUE) in rma() now leads to more appropriate results o updated datasets so those containing precomputed effect size estimates or observed outcomes are already declared to be 'escalc' objects o added new datasets (dat.egger2001 and dat.li2007) on the effectiveness of intravenous magnesium in acute myocardial infarction o 'methods' package is now under 'Depends' (in addition to 'Matrix'), so that rma.mv(..., sparse=TRUE) always works, even under Rscript o some general code cleanup o added more tests (and used a more consistent naming scheme for tests) Changes in Version 1.9-8 (2015-09-28) ===================================== o due to more stringent package testing, it is increasingly difficult to ensure that the package passes all checks on older versions of R; from now on, the package will therefore require, and be checked under, only the current (and the development) version of R o added graphics, grDevices, and methods to Imports (due to recent change in how CRAN checks packages) o the 'struct' argument for rma.mv() now also allows for "ID" and "DIAG", which are identical to the "CS" and "HCS" structures, but with the correlation parameter fixed to 0 o added robust() for (cluster) robust tests and confidence intervals for 'rma.uni' and 'rma.mv' models (this uses a robust sandwich-type estimator of the variance-covariance matrix of the fixed effects along the lines of the Eicker-Huber-White method) o confint() now works for models fitted with the rma.mv() function; for variance and correlation parameters, the function provides profile likelihood confidence intervals; the output generated by the confint() function has been adjusted in general to make the formatting more consistent across the different model types o for objects of class 'rma.mv', profile() now provides profile plots for all (non-fixed) variance and correlation components of the model when no component is specified by the user (via the sigma2, tau2, rho, gamma2, or phi arguments) o for measure="MD" and measure="ROM", one can now choose between vtype="LS" (the default) and vtype="HO"; the former computes the sampling variances without assuming homoscedasticity, while the latter assumes homoscedasticity o multiple model objects can now be passed to the fitstats(), AIC(), and BIC() functions o check for duplicates in the 'slab' argument is now done *after* any subsetting is done (as suggested by Michael Dewey) o rma.glmm() now again works when using add=0, in which case some of the observed outcomes (e.g., log odds or log odds ratios) may be NA o when using rma.glmm() with model="CM.EL", the saturated model (used to compute the Wald-type and likelihood ratio tests for the presence of (residual) heterogeneity) often fails to converge; the function now continues to run (instead of stopping with an error) and simply omits the test results from the output o when using rma.glmm() with model="CM.EL" and inversion of the Hessian fails via the Choleski factorization, the function now makes another attempt via the QR decomposition (even when this works, a warning is issued) o for rma.glmm(), BIC and AICc values were switched around; corrected o more use of suppressWarnings() is made when functions repeatedly need to fit the same model, such as cumul(), influence(), and profile(); that way, one does not get inundated with the same warning(s) o some (overdue) updates to the documentation Changes in Version 1.9-7 (2015-05-22) ===================================== o default optimizer for rma.mv() changed to nlminb() (instead of optim() with "Nelder-Mead"); extensive testing indicated that nlminb() (and also optim() with "BFGS") is typically quicker and more robust; note that this is in principle a non-backwards compatible change, but really a necessary one; and you can always revert to the old behavior with control=list(optimizer="optim", optmethod="Nelder-Mead") o all tests have been updated in accordance with the recommended syntax of the 'testthat' package; for example, expect_equivalent(x,y) is used instead of test_that(x, is_equivalent_to(y)) o changed a few is_identical_to() comparisons to expect_equivalent() ones (that failed on Sparc Solaris) Changes in Version 1.9-6 (2015-05-07) ===================================== o funnel() now works again for 'rma.glmm' objects (note to self: quit breaking things that work!) o rma.glmm() will now only issue a warning (and not an error) when the Hessian for the saturated model cannot be inverted (which is needed to compute the Wald-type test for heterogeneity, so the test statistic is then simply set to NA) o rma.mv() now allows for two terms of the form ~ inner | outer; the variance components corresponding to such a structure are called gamma2 and correlations are called phi; other functions that work with objects of class 'rma.mv' have been updated accordingly o rma.mv() now provides (even) more optimizer choices: nlm() from the 'stats' package, hjk() and nmk() from the 'dfoptim' package, and ucminf() from the 'ucminf' package; choose the desired optimizer via the control argument (e.g., control=list(optimizer="nlm")) o profile.rma.uni() and profile.rma.mv() now can do parallel processing (which is especially relevant for 'rma.mv' objects, where profiling is crucial and model fitting can be slow) o the various confint() functions now have a 'transf' argument (to apply some kind of transformation to the model coefficients and confidence interval bounds); coefficients and bounds for objects of class 'rma.mh' and 'rma.peto' are no longer automatically transformed o the various forest() functions no longer enforce that the actual x-axis limits ('alim') encompass the observed outcomes to be plotted; also, outcomes below or above the actual x-axis limits are no longer shown o the various forest() functions now provide control over the horizontal lines (at the top/bottom) that are automatically added to the plot via the 'lty' argument (this also allows for removing them); also, the vertical reference line is now placed *behind* the points/CIs o forest.default() now has argument 'col' which can be used to specify the color(s) to be used for drawing the study labels, points, CIs, and annotations o the 'efac' argument for forest.rma() now also allows two values, the first for the arrows and CI limits, the second for summary estimates o corrected some axis labels in various plots when measure="PLO" o axes in labbe() plots now have "(Group 1)" and "(Group 2)" added by default o anova.rma() gains argument 'L' for specifying linear combinations of the coefficients in the model that should be tested to be zero o in case removal of a row of data would lead to one or more inestimable model coefficients, baujat(), cooks.distance(), dfbetas(), influence(), and rstudent() could fail for 'rma.uni' objects; such cases are now handled properly o for models with moderators, the predict() function now shows the study labels when they have been specified by the user (and 'newmods' is not used) o if there is only one fixed effect (model coefficient) in the model, the print.infl.rma.uni() function now shows the DFBETAS values with the other case diagnostics in a single table (for easier inspection); if there is more than one fixed effect, a separate table is still used for the DFBETAS values (with one column for each coefficient) o added measure="SMCRH" to the escalc() function for the standardized mean change using raw score standardization with heteroscedastic population variances at the two measurement occasions o added measure="ROMC" to the escalc() function for the (log transformed) ratio of means (response ratio) when the means reflect two measurement occasions (e.g., for a single group of people) and hence are correlated o added own function for computing/estimating the tetrachoric correlation coefficient (for measure="RTET"); package therefore no longer suggests 'polycor' but now suggest 'mvtnorm' (which is loaded as needed) o element 'fill' returned by trimfill.rma.uni() is now a logical vector (instead of a 0/1 dummy variable) o print.list.rma() now also returns the printed results invisibly as a data frame o added a new dataset (dat.senn2013) as another illustration of a network meta-analysis o metafor now depends on at least version 3.1.0 of R Changes in Version 1.9-5 (2014-11-24) ===================================== o moved the 'stats' and 'Matrix' packages from 'Depends' to 'Imports'; as a result, had to add 'utils' to 'Imports'; moved the 'Formula' package from 'Depends' to 'Suggests' o added update.rma() function (for updating/refitting a model); model objects also now store and keep the call o the vcov() function now also extracts the marginal variance-covariance matrix of the observed effect sizes or outcomes from a fitted model (of class 'rma.uni' or 'rma.mv') o rma.mv() now makes use of the Cholesky decomposition when there is a 'random = ~ inner | outer' formula and struct="UN"; this is numerically more stable than the old approach that avoided non-positive definite solutions by forcing the log-likelihood to be -Inf in those cases; the old behavior can be restored with 'control = list(cholesky=FALSE)' o rma.mv() now requires the 'inner' variable in an '~ inner | outer' formula to be a factor or character variable (except when 'struct' is "AR" or "HAR"); use '~ factor(inner) | outer' in case it isn't o anova.rma.uni() function changed to anova.rma() that works now for both 'rma.uni' and 'rma.mv' objects o the profile.rma.mv() function now omits the number of the variance or correlation component from the plot title and x-axis label when the model only includes one of the respective parameters o profile() functions now pass on the ... argument also to the title() function used to create the figure titles (esp. relevant when using the 'cex.main' argument) o the 'drop00' argument of the rma.mh() and rma.peto() functions now also accepts a vector with two logicals, the first applies when calculating the observed outcomes, the second when applying the Mantel-Haenszel or Peto's method o weights.rma.uni() now shows the correct weights when weighted=FALSE o argument 'showweight' renamed to 'showweights' in the forest.default() and forest.rma() functions (more consistent with the naming of the various weights() functions) o added model.matrix.rma() function (to extract the model matrix from objects of class 'rma') o funnel() and radial() now (invisibly) return data frames with the coordinates of the points that were drawn (may be useful for manual labeling of points in the plots) o permutest.rma.uni() function now uses a numerical tolerance when making comparisons (>= or <=) between an observed test statistic and the test statistic under the permuted data; when using random permutations, the function now ensures that the very first permutation correspond to the original data o corrected some missing/redundant row/column labels in some output o most require() calls replaced with requireNamespace() to avoid altering the search path (hopefully this won't break stuff ...) o some non-visible changes including more use of some (non-exported) helper functions for common tasks o dataset dat.collins91985a updated (including all reported outcomes and some more information about the various trials) o oh, and guess what? I updated the documentation ... Changes in Version 1.9-4 (2014-07-30) ===================================== o added method="GENQ" to rma.uni() for the generalized Q-statistic estimator of tau^2, which allows for used-defined weights (note: the DL and HE estimators are just special cases of this method) o when the model was fitted with method="GENQ", then confint() will now use the generalized Q-statistic method to construct the corresponding confidence interval for tau^2 (thanks to Dan Jackson for the code); the iterative method used to obtain the CI makes use of Farebrother's algorithm as implemented in the 'CompQuadForm' package o slight improvements in how the rma.uni() function handles non-positive sampling variances o rma.uni(), rma.mv(), and rma.glmm() now try to detect and remove any redundant predictors before the model fitting; therefore, if there are exact linear relationships among the predictor variables (i.e., perfect multicollinearity), terms are removed to obtain a set of predictors that is no longer perfectly multicollinear (a warning is issued when this happens); note that the order of how the variables are specified in the model formula can influence which terms are removed o the last update introduced an error in how hat values were computed when the model was fitted with the rma() function using the Knapp & Hartung method (i.e., when knha=TRUE); this has been fixed o regtest() no longer works (for now) with 'rma.mv' objects (it wasn't meant to in the first place); if you want to run something along the same lines, just consider adding some measure of the precision of the observed outcomes (e.g., their standard errors) as a predictor to the model o added "sqrtni" and "sqrtninv" as possible options for the 'predictor' argument of regtest() o more optimizers are now available for the rma.mv() function via the 'nloptr' package by setting 'control = list(optimizer="nloptr")'; when using this optimizer, the default is to use the BOBYQA implementation from that package with a relative convergence criterion of 1e-8 on the function value (see documentation on how to change these defaults) o predict.rma() function now works for 'rma.mv' objects with multiple tau^2 values even if the user specifies the 'newmods' argument but not the 'tau2.levels' argument (but a warning is issued and the prediction intervals are not computed) o argument 'var.names' now works properly in escalc() when the user has not made use of the 'data' argument (thanks to Jarrett Byrnes for bringing this to my attention) o added plot() function for cumulative random-effects models results as obtained with the cumul.rma.uni() function; the plot shows the model estimate on the x-axis and the corresponding tau^2 estimate on the y-axis in the cumulative order of the results o fixed the omitted offset term in the underlying model fitted by the rma.glmm() function when method="ML", measure="IRR", and model="UM.FS", that is, when fitting a mixed-effects Poisson regression model with fixed study effects to two-group event count data (thanks to Peter Konings for pointing out this error) o added two new datasets (dat.bourassa1996, dat.riley2003) o added function replmiss() (just a useful helper function) o package now uses LazyData: TRUE o some improvements to the documentation (do I still need to mention this every time?) Changes in Version 1.9-3 (2014-05-05) ===================================== o some minor tweaks to rma.uni() that should be user transparent o rma.uni() now has a 'weights' argument, allowing the user to specify arbitrary user-defined weights; all functions affected by this have been updated accordingly o better handling of mismatched length of yi and ni vectors in rma.uni() and rma.mv() functions o subsetting is now handled as early as possible within functions with subsetting capabilities; this avoids some (rare) cases where studies ultimately excluded by the subsetting could still affect the results o some general tweaks to rma.mv() that should make it a bit faster o argument 'V' of rma.mv() now also accepts a list of var-cov matrices for the observed effects or outcomes; from the list elements, the full (block diagonal) var-cov matrix V is then automatically constructed o rma.mv() now has a new argument 'W' allowing the user to specify arbitrary user-defined weights or an arbitrary weight matrix o rma.mv() now has a new argument 'sparse'; by setting this to TRUE, the function uses sparse matrix objects to the extent possible; this can speed up model fitting substantially for certain models (hence, the 'metafor' package now depends on the 'Matrix' package) o rma.mv() now allows for struct="AR" and struct="HAR", to fit models with (heteroscedastic) autoregressive (AR1) structures among the true effects (useful for meta-analyses of studies reporting outcomes at multiple time points) o rma.mv() now has a new argument 'Rscale' which can be used to control how matrices specified via the 'R' argument are scaled (see docs for more details) o rma.mv() now only checks for missing values in the rows of the lower triangular part of the V matrix (including the diagonal); this way, if Vi = matrix(c(.5,NA,NA,NA), nrow=2, ncol=2) is the var-cov matrix of the sampling errors for a particular study with two outcomes, then only the second row/column needs to be removed before the model fitting (and not the entire study) o added five new datasets (dat.begg1989, dat.ishak2007, dat.fine1993, dat.konstantopoulos2011, and dat.hasselblad1998) to provide further illustrations of the use of the rma.mv() function (for meta-analyses combining controlled and uncontrolled studies, for meta-analyses of longitudinal studies, for multilevel meta-analyses, and for network meta-analyses / mixed treatment comparison meta-analyses) o added rstandard.rma.mv() function to compute standardized residuals for models fitted with the rma.mv() function (rstudent.rma.mv() to be added at a later point); also added hatvalues.rma.mv() for computing the hat values and weights.rma.uni() for computing the weights (i.e., the diagonal elements of the weight matrix) o the various weights() functions now have a new argument 'type' to indicate whether only the diagonal elements of the weight matrix (default) or the entire weight matrix should be returned o the various hatvalues() functions now have a new argument 'type' to indicate whether only the diagonal elements of the hat matrix (default) or the entire hat matrix should be returned o predict.rma() function now works properly for 'rma.mv' objects (also has a new argument 'tau2.levels' to specify, where applicable, the levels of the inner factor when computing prediction intervals) o forest.rma() function now provides a bit more control over the color of the summary polygon and is now compatible with 'rma.mv' objects; also, has a new argument 'lty', which provides more control over the line type for the individual CIs and the prediction interval o addpoly.default() and addpoly.rma() now have a 'border' argument (for consistency with the forest.rma() function); addpoly.rma() now yields the correct CI bounds when the model was fitted with knha=TRUE o forest.cumul.rma() now provides the correct CI bounds when the models were fitted with the Knapp & Hartung method (i.e., when knha=TRUE in the original rma() function call) o the various forest() functions now return information about the chosen values for arguments xlim, alim, at, ylim, rows, cex, cex.lab, and cex.axis invisibly (useful for tweaking the default values); thanks to Michael Dewey for the suggestion o the various forest() functions now have a new argument, clim, to set limits for the confidence/prediction interval bounds o cumul.mh() and cumul.peto() now get the order of the studies right when there are missing values in the data o the 'transf' argument of leave1out.rma.mh(), leave1out.rma.peto(), cumul.rma.mh(), and cumul.rma.peto() should now be used to specify the actual function for the transformation (the former behavior of setting this argument to TRUE to exponentiate log RRs, log ORs, or log IRRs still works for back-compatibility); this is more consistent with how the cumul.rma.uni() and leave1out.rma.uni() functions work and is also more flexible o added bldiag() function to construct a block diagonal matrix from (a list of) matrices (may be needed to construct the V matrix when using the rma.mv() function); bdiag() function from the 'Matrix' package does the same thing, but creates sparse matrix objects o profile.rma.mv() now has a 'startmethod' argument; by setting this to "prev", successive model fits are started at the parameter estimates from the previous model fit; this may speed things up a bit; also, the method for automatically choosing the xlim values has been changed o slight improvement to profile.rma.mv() function, which would throw an error if the last model fit did not converge o added a new dataset (dat.linde2005) for replication of the analyses in Viechtbauer (2007) o added a new dataset (dat.molloy2014) for illustrating the meta-analysis of (r-to-z transformed) correlation coefficients o added a new dataset (dat.gibson2002) to illustrate the combined analysis of standardized mean differences and probit transformed risk differences o computations in weights.mh() slightly changed to prevent integer overflows for large counts o unnecessary warnings in transf.ipft.hm() are now suppressed (cases that raised those warnings were already handled correctly) o in predict(), blup(), cumul(), and leave1out(), when using the 'transf' argument, the standard errors (which are NA) are no longer shown in the output o argument 'slab' in various functions will now also accept non-unique study labels; make.unique() is used as needed to make them unique o vignettes("metafor") and vignettes("metafor_diagram") work again (yes, I know they are not true vignettes in the strict sense, but I think they should show up on the CRAN website for the package and using a minimal valid Sweave document that is recognized by the R build system makes that happen) o escalc() and its summary() method now keep better track when the data frame contains multiple columns with outcome or effect size values (and corresponding sampling variances) for print formatting; also simplified the class structure a bit (and hence, print.summary.escalc() removed) o summary.escalc() has a new argument 'H0' to specify the value of the outcome under the null hypothesis for computing the test statistics o added measures "OR2DN" and "D2ORN" to escalc() for transforming log odds ratios to standardized mean differences and vice-versa, based on the method of Cox & Snell (1989), which assumes normally distributed response variables within the two groups before the dichotomization o permutest.rma.uni() function now catches an error when the number of permutations requested is too large (for R to even create the objects to store the results in) and produces a proper error message o funnel.rma() function now allows the 'yaxis' argument to be set to "wi" so that the actual weights (in %) are placed on the y-axis (useful when arbitrary user-defined have been specified) o for rma.glmm(), the control argument 'optCtrl' is now used for passing control arguments to all of the optimizers (hence, control arguments nlminbCtrl and minqaCtrl are now defunct) o rma.glmm() should not throw an error anymore when including only a single moderator/predictor in the model o predict.rma() now returns an object of class 'list.rma' (therefore, function print.predict.rma() has been removed) o for 'rma.list' objects, added `[`, head(), and tail() methods o automated testing using the 'testthat' package (still many more tests to add, but finally made a start on this) o encoding changed to UTF-8 (to use 'foreign characters' in the docs and to make the HTML help files look a bit nicer) o guess what? some improvements to the documentation! (also combined some of the help files to reduce the size of the manual a bit; and yes, it's still way too big) Changes in Version 1.9-2 (2013-10-07) ===================================== o added function rma.mv() to fit multivariate/multilevel meta-analytic models via appropriate linear (mixed-effects) models; this function allows for modeling of non-independent sampling errors and/or true effects and can be used for network meta-analyses, meta-analyses accounting for phylogenetic relatedness, and other complicated meta-analytic data structures o added the AICc to the information criteria computed by the various model fitting functions o if the value of tau^2 is fixed by the user via the corresponding argument in rma.uni(), then tau^2 is no longer counted as an additional parameter for the computation of the information criteria (i.e., AIC, BIC, and AICc) o rma.uni(), rma.glmm(), and rma.mv() now use a more stringent check whether the model matrix is of full rank o added profile() method functions for objects of class 'rma.uni' and 'rma.mv' (can be used to obtain a plot of the profiled log-likelihood as a function of a specific variance component or correlation parameter of the model) o predict.rma() function now has an 'intercept' argument that allows the user to decide whether the intercept term should be included when calculating the predicted values (rare that this should be changed from the default) o for rma.uni(), rma.glmm(), and rma.mv(), the 'control' argument can now also accept an integer value; values > 1 generate more verbose output about the progress inside of the function o rma.glmm() has been updated to work with lme4 1.0.x for fitting various models; as a result, model="UM.RS" can only use nAGQ=1 at the moment (hopefully this will change in the future) o the 'control' argument of rma.glmm() can now be used to pass all desired control arguments to the various functions and optimizers used for the model fitting (admittedly the use of lists within this argument is a bit unwieldy, but much more flexible) o rma.mh() and rma.peto() also now have a 'verbose' argument (not really needed, but added for sake of consistency across functions) o fixed (silly) error that would prevent rma.glmm() from running for measures "IRR", "PLO", and "IRLN" when there are missing values in the data (lesson: add some missing values to datasets for the unit tests!) o a bit of code reorganization (should be user transparent) o vignettes ("metafor" and "metafor_diagram") are now just 'other files' in the doc directory (as these were not true vignettes to begin with) o some improvements to the documentation (as always) Changes in Version 1.9-1 (2013-07-20) ===================================== o rma.mh() now also implements the Mantel-Haenszel method for incidence rate differences (measure="IRD") o when analyzing incidence rate ratios (measure="IRR") with the rma.mh() function, the Mantel-Haenszel test for person-time data is now also provided o rma.mh() has a new argument 'correct' (default is TRUE) to indicate whether the continuity correction should be applied when computing the (Cochran-)Mantel-Haenszel test statistic o renamed elements 'CMH' and 'CMHp' (for the Cochran-Mantel-Haenszel test statistic and corresponding p-value) to 'MH' and 'MHp' o added function baujat() to create Baujat plots o added a new dataset (dat.pignon2000) to illustrate the use of the baujat() function o added function to.table() to convert data from vector format into the corresponding table format o added function to.long() to convert data from vector format into the corresponding long format o rma.glmm() now even runs when k=1 (yielding trivial results) o for models with an intercept and moderators, rma.glmm() now internally rescales (non-dummy) variables to z-scores during the model fitting (this improves the stability of the model fitting, especially when model="CM.EL"); results are given after back-scaling, so this should be transparent to the user o in rma.glmm(), default number of quadrature points (nAGQ) is now 7 (setting this to 100 was a bit overkill) o a few more error checks here and there for misspecified arguments o some improvements to the documentation Changes in Version 1.9-0 (2013-06-21) ===================================== o vignette renamed to 'metafor' so vignette("metafor") works now o added a diagram to the documentation, showing the various functions in the metafor package (and how they relate to each other); can be loaded with vignette("metafor_diagram") o anova.rma.uni() function can now also be used to test (sub)sets of model coefficients with a Wald-type test when a single model is passed to the function o the pseudo R^2 statistic is now automatically calculated by the rma.uni() function and supplied in the output (only for mixed-effects models and when the model includes an intercept, so that the random- effects model is clearly nested within the mixed-effects model) o component 'VAF' is now called 'R2' in anova.rma.uni() function o added function hc() that carries out a random-effects model analysis using the method by Henmi and Copas (2010); thanks to Michael Dewey for the suggestion and providing the code o added new dataset (dat.lee2004), which was used in the article by Henmi and Copas (2010) to illustrate their method o fixed missing x-axis labels in the forest() functions o rma.glmm() now computes Hessian matrices via the 'numDeriv' package when model="CM.EL" and measure="OR" (i.e., for the conditional logistic model with exact likelihood); so 'numDeriv' is now a suggested package and is loaded within rma.glmm() when required o trimfill.rma.uni() now also implements the "Q0" estimator (although the "L0" and "R0" estimators are generally to be preferred) o trimfill.rma.uni() now also calculates the SE of the estimated number of missing studies and, for estimator "R0", provides a formal test of the null hypothesis that the number of missing studies on a given side is zero o added new dataset (dat.bangertdrowns2004) o the 'level' argument in various functions now either accepts a value representing a percentage or a proportion (values greater than 1 are assumed to be a percentage) o summary.escalc() now computes confidence intervals correctly when using the 'transf' argument o computation of Cochran-Mantel-Haenszel statistic in rma.mh() changed slightly to avoid integer overflow with very big counts o some internal improvements with respect to object attributes that were getting discarded when subsetting o some general code cleanup o some improvements to the documentation Changes in Version 1.8-0 (2013-04-11) ===================================== o added additional clarifications about the change score outcome measures ("MC", "SMCC", and "SMCR") to the help file for the escalc() function and changed the code so that "SMCR" no longer expects argument 'sd2i' to be specified (which is not needed anyways) (thanks to Markus Kösters for bringing this to my attention) o sampling variance for the biserial correlation coefficient ("RBIS") is now calculated in a slightly more accurate way o llplot() now properly scales the log-likelihoods o argument 'which' in the plot.infl.rma.uni() function has been replaced with argument 'plotinf' which can now also be set to FALSE to suppress plotting of the various case diagnostics altogether o labeling of the axes in labbe() plots is now correct for odds ratios (and transformations thereof) o added two new datasets (dat.nielweise2007 and dat.nielweise2008) to illustrate some methods/models from the rma.glmm() function o added a new dataset (dat.yusuf1985) to illustrate the use of rma.peto() o test for heterogeneity is now conducted by the rma.peto() function exactly as described by Yusuf et al. (1985) o in rma.glmm(), default number of quadrature points (nAGQ) is now 100 (which is quite a bit slower, but should provide more than sufficient accuracy in most cases) o the standard errors of the HS and DL estimators of tau^2 are now correctly computed when tau^2 is prespecified by the user in the rma() function; in addition, the standard error of the SJ estimator is also now provided when tau^2 is prespecified o rma.uni() and rma.glmm() now use a better method to check whether the model matrix is of full rank o I^2 and H^2 statistics are now also calculated for mixed-effects models by the rma.uni() and rma.glmm() function; confint.rma.uni() provides the corresponding confidence intervals for 'rma.uni' models o various print() methods now have a new argument called 'signif.stars', which defaults to getOption("show.signif.stars") (which by default is TRUE) to determine whether the infamous 'significance stars' should be printed o slight changes in wording in the output produced by the print.rma.uni() and print.rma.glmm() functions o some improvements to the documentation Changes in Version 1.7-0 (2013-02-06) ===================================== o added rma.glmm() function for fitting of appropriate generalized linear (mixed-effects) models when analyzing odds ratios, incidence rate ratios, proportions, or rates; the function makes use of the 'lme4' and 'BiasedUrn' packages; these are now suggested packages and loaded within rma.glmm() only when required (this makes for faster loading of the 'metafor' package) o added several method functions for objects of class 'rma.glmm' (not all methods yet implemented; to be completed in the future) o rma.uni() now allows the user to specify a formula for the 'yi' argument, so instead of rma(yi, vi, mods=~mod1+mod2), one can specify the same model with rma(yi~mod1+mod2, vi) o rma.uni() now has a 'weights' argument to specify the inverse of the sampling variances (instead of using the 'vi' or 'sei' arguments); for now, this is all this argument should be used for (in the future, this argument may potentially be used to allow the user to define alternative weights) o rma.uni() now checks whether the model matrix is not of full rank and issues an error accordingly (instead of the rather cryptic error that was issued before) o rma.uni() now has a 'verbose' argument o coef.rma() now returns only the model coefficients (this change was necessary to make the package compatible with the 'multcomp' package; see help(rma) for an example); use coef(summary()) to obtain the full table of results o the escalc() function now does some more extensive error checking for misspecified data and some unusual cases o 'append' argument is now TRUE by default in the escalc() function o objects generated by the escalc() function now have their own class o added print() and summary() methods for objects of class 'escalc' o added `[` and cbind() methods for objects of class 'escalc' o added a few additional arguments to the escalc() function (i.e., slab, subset, var.names, replace, digits) o added 'drop00' argument to the escalc(), rma.uni(), rma.mh(), and rma.peto() functions o added "MN", "MC", "SMCC", and "SMCR" measures to the escalc() and rma.uni() functions for the raw mean, the raw mean change, and the standardized mean change (with change score or raw score standardization) as possible outcome measures o the "IRFT" measure in the escalc() and rma.uni() functions is now computed with 1/2*(sqrt(xi/ti) + sqrt(xi/ti+1/ti)) which is more consistent with the definition of the Freeman-Tukey transformation for proportions o added "RTET" measure to the escalc() and rma.uni() functions to compute the tetrachoric correlation coefficient based on 2x2 table data (the 'polycor' package is therefore now a suggested package, which is loaded within escalc() only when required) o added "RPB" and "RBIS" measures to the escalc() and rma.uni() functions to compute the point-biserial and biserial correlation coefficient based on means and standard deviations o added "PBIT" and "OR2D" measures to the escalc() and rma.uni() functions to compute the standardized mean difference based on 2x2 table data o added the "D2OR" measure to the escalc() and rma.uni() functions to compute the log odds ratio based on the standardized mean difference o added "SMDH" measure to the escalc() and rma.uni() functions to compute the standardized mean difference without assuming equal population variances o added "ARAW", "AHW", and "ABT" measures to the escalc() and rma.uni() functions for the raw value of Cronbach's alpha, the transformation suggested by Hakstian & Whalen (1976), and the transformation suggested by Bonett (2002) for the meta-analysis of reliability coefficients (see help(escalc) for details) o corrected a small mistake in the equation used to compute the sampling variance of the phi coefficient (measure="PHI") in the escalc() function o the permutest.rma.uni() function now uses an algorithm to find only the unique permutations of the model matrix (which may be much smaller than the total number of permutations), making the exact permutation test feasible in a larger set of circumstances (thanks to John Hodgson for making me aware of this issue and to Hans-Jörg Viechtbauer for coming up with a recursive algorithm for finding the unique permutations) o prediction interval in forest.rma() is now indicated with a dotted (instead of a dashed) line; ends of the interval are now marked with vertical bars o completely rewrote the funnel.rma() function which now supports many more options for the values to put on the y-axis; trimfill.rma.uni() function was adapted accordingly o removed the 'ni' argument from the regtest.rma() function; instead, sample sizes can now be explicitly specified via the 'ni' argument when using the rma.uni() function (i.e., when measure="GEN"); the escalc() function also now adds information on the 'ni' values to the resulting data frame (as an attribute of the 'yi' variable), so, if possible, this information is passed on to regtest.rma() o added switch so that regtest() can also provide the full results from the fitted model (thanks to Michael Dewey for the suggestion) o weights.rma.mh() now shows the weights in % as intended (thanks to Gavin Stewart for pointing out this error) o more flexible handling of the 'digits' argument in the various forest functions o forest functions now use pretty() by default to set the x-axis tick locations ('alim' and 'at' arguments can still be used for complete control) o studies that are considered to be 'influential' are now marked with an asterisk when printing the results returned by the influence.rma.uni() function (see the documentation of this function for details on how such studies are identified) o added additional extractor functions for some of the influence measures (i.e., cooks.distance(), dfbetas()); unfortunately, the covratio() and dffits() functions in the 'stats' package are not generic; so, to avoid masking, there are currently no extractor functions for these measures o better handling of missing values in some unusual situations o corrected small bug in fsn() that would not allow the user to specify the standard errors instead of the sampling variances (thanks to Bernd Weiss for pointing this out) o plot.infl.rma.uni() function now allows the user to specify which plots to draw (and the layout) and adds the option to show study labels on the x-axis o added proper print() method for objects generated by the confint.rma.uni(), confint.rma.mh(), and confint.rma.peto() functions o when 'transf' or 'atransf' argument was a monotonically *decreasing* function, then confidence and prediction interval bounds were in reversed order; various functions now check for this and order the bounds correctly o trimfill.rma.uni() now only prints information about the number of imputed studies when actually printing the model object o qqnorm.rma.uni(), qqnorm.rma.mh(), and qqnorm.rma.peto() functions now have a new argument called 'label', which allows for labeling of points; the functions also now return (invisibly) the x and y coordinates of the points drawn o rma.mh() with measure="RD" now computes the standard error of the estimated risk difference based on Sato, Greenland, & Robins (1989), which provides a consistent estimate under both large-stratum and sparse-data limiting models o the restricted maximum likelihood (REML) is now calculated using the full likelihood equation (without leaving out additive constants) o the model deviance is now calculated as -2 times the difference between the model log-likelihood and the log-likelihood under the saturated model (this is a more appropriate definition of the deviance than just taking -2 times the model log-likelihood) o naming scheme of illustrative datasets bundled with the package has been changed; now datasets are called ; therefore, the datasets are now called (old name -> new name): * dat.bcg -> dat.colditz1994 * dat.warfarin -> dat.hart1999 * dat.los -> dat.normand1999 * dat.co2 -> dat.curtis1998 * dat.empint -> dat.mcdaniel1994 o but dat.bcg has been kept as an alias for dat.colditz1994, as it has been referenced under that name in some publications o added new dataset (dat.pritz1997) to illustrate the meta-analysis of proportions (raw values and transformations thereof) o added new dataset (dat.bonett2010) to illustrate the meta-analysis of Cronbach's alpha values (raw values and transformations thereof) o added new datasets (dat.hackshaw1998, dat.raudenbush1985) o (approximate) standard error of the tau^2 estimate is now computed and shown for most of the (residual) heterogeneity estimators o added nobs() and df.residual() methods for objects of class 'rma' o metafor.news() is now simply a wrapper for news(package="metafor") o the package code is now byte-compiled, which yields some modest increases in execution speed o some general code cleanup o the 'metafor' package no longer depends on the 'nlme' package o some improvements to the documentation Changes in Version 1.6-0 (2011-04-13) ===================================== o trimfill.rma.uni() now returns a proper object even when the number of missing studies is estimated to be zero o added the (log transformed) ratio of means as a possible outcome measure to the escalc() and rma.uni() functions (measure="ROM") o added new dataset (dat.co2) to illustrate the use of the ratio of means outcome measure o some additional error checking in the various forest functions (especially when using the 'ilab' argument) o in labbe.rma(), the solid and dashed lines are now drawn behind (and not on top of) the points o slight change to transf.ipft.hm() so that missing values in 'targs$ni' are ignored o some improvements to the documentation Changes in Version 1.5-0 (2010-12-16) ===================================== o the 'metafor' package now has its own project website at: http://www.metafor-project.org/ o added labbe() function to create L'Abbe plots o the forest.default() and addpoly.default() functions now allow the user to directly specify the lower and upper confidence interval bounds (this can be useful when the CI bounds have been calculated with other methods/functions) o added the incidence rate for a single group and for two groups (and transformations thereof) as possible outcome measures to the escalc() and rma.uni() functions (measure="IRR", "IRD", "IRSD", "IR", "IRLN", "IRS", and "IRFT") o added the incidence rate ratio as a possible outcome measure to the rma.mh() function o added transformation functions related to incidence rates o added the Freeman-Tukey double arcsine transformation and its inverse to the transformation functions o added some additional error checking for out-of-range p-values in the permutest.rma.uni() function o added some additional checking for out-of-range values in several transformation functions o added confint() methods for 'rma.mh' and 'rma.peto' objects (only for completeness sake; print already provides CIs) o added new datasets (dat.warfarin, dat.los, dat.empint) o some improvements to the documentation Changes in Version 1.4-0 (2010-07-30) ===================================== o a paper about the package has now been published in the Journal of Statistical Software (https://www.jstatsoft.org/v36/i03/) o added citation info; see: citation("metafor") o the 'metafor' package now depends on the 'nlme' package o added extractor functions for the AIC, BIC, and deviance o some updates to the documentation Changes in Version 1.3-0 (2010-06-25) ===================================== o the 'metafor' package now depends on the 'Formula' package o made escalc() generic and implemented a default and a formula interface o added the (inverse) arcsine transformation to the set of transformation functions Changes in Version 1.2-0 (2010-05-18) ===================================== o cases where k is very small (e.g., k equal to 1 or 2) are now handled more gracefully o added sanity check for cases where all observed outcomes are equal to each other (this led to division by zero when using the Knapp & Hartung method) o the "smarter way to set the number of iterations for permutation tests" (see notes for previous version below) now actually works like it is supposed to o the permutest.rma.uni() function now provides more sensible results when k is very small; the documentation for the function has also been updated with some notes about the use of permutation tests under those circumstances o made some general improvements to the various forest plot functions making them more flexible in particular when creating more complex displays; most importantly, added a 'rows' argument and removed the 'addrows' argument o some additional examples have been added to the help files for the forest and addpoly functions to demonstrate how to create more complex displays with these functions o added 'showweight' argument to the forest.default() and forest.rma() functions o cumul() functions not showing all of the output columns when using fixed-effects models has been corrected o weights.rma.uni() function now handles NAs appropriately o weights.rma.mh() and weights.rma.peto() functions added o logLik.rma() function now behaves more like other logLik() functions (such as logLik.lm() and logLik.lme()) Changes in Version 1.1-0 (2010-04-28) ===================================== o cint() generic removed and replaced with confint() method for objects of class 'rma.uni' o slightly improved the code to set the x-axis title in the forest() and funnel() functions o added coef() method for 'permutest.rma.uni' objects o added 'append' argument to escalc() function o implemented a smarter way to set the number of iterations for permutation tests (i.e., the permutest.rma.uni() function will now switch to an exact test if the number of iterations required for an exact test is actually smaller than the requested number of iterations for an approximate test) o changed the way how p-values for individual coefficients are calculated in permutest.rma.uni() to 'two times the one-tailed area under the permutation distribution' (more consistent with the way we typically define two-tailed p-values) o added 'retpermdist' argument to permutest.rma.uni() to return the permutation distributions of the test statistics o slight improvements to the various transformation functions to cope better with some extreme cases o p-values are now calculated in such a way that very small p-values stored in fitted model objects are no longer truncated to 0 (the printed results are still truncated depending on the number of digits specified) o changed the default number of iterations for the ML, REML, and EB estimators from 50 to 100 Changes in Version 1.0-1 (2010-02-02) ===================================== o version jump in conjunction with the upcoming publication of a paper in the Journal of Statistical Software describing the 'metafor' package o instead of specifying a model matrix, the user can now specify a model formula for the 'mods' argument in the rma() function (e.g., like in the lm() function) o permutest() function now allows exact permutation tests (but this is only feasible when k is not too large) o forest() function now uses the 'level' argument properly to adjust the CI level of the summary estimate for models without moderators (i.e., for fixed- and random-effets models) o forest() function can now also show the prediction interval as a dashed line for a random-effects model o information about the measure used is now passed on to the forest() and funnel() functions, which try to set an appropriate x-axis title accordingly o funnel() function now has more arguments (e.g., atransf, at) providing more control over the display of the x-axis o predict() function now has its own print() method and has a new argument called 'addx', which adds the values of the moderator variables to the returned object (when addx=TRUE) o functions now properly handle the na.action "na.pass" (treated essentially like "na.exclude") o added method for weights() to extract the weights used when fitting models with rma.uni() o some small improvements to the documentation Changes in Version 0.5-7 (2009-12-06) ===================================== o added permutest() function for permutation tests o added metafor.news() function to display the NEWS file of the 'metafor' package within R (based on same idea in the 'animate' package by Yihui Xie) o added some checks for values below machine precision o a bit of code reorganization (nothing that affects how the functions work) Changes in Version 0.5-6 (2009-10-19) ===================================== o small changes to the computation of the DFFITS and DFBETAS values in the influence() function, so that these statistics are more in line with their definitions in regular linear regression models o added option to the plot function for objects returned by influence() to allow plotting the covariance ratios on a log scale (now the default) o slight adjustments to various print() functions (to catch some errors when certain values were NA) o added a control option to rma() to adjust the step length of the Fisher scoring algorithm by a constant factor (this may be useful when the algorithm does not converge) Changes in Version 0.5-5 (2009-10-08) ===================================== o added the phi coefficient (measure="PHI"), Yule's Q ("YUQ"), and Yule's Y ("YUY") as additional measures to the escalc() function for 2x2 table data o forest plots now order the studies so that the first study is at the top of the plot and the last study at the bottom (the order can still be set with the 'order' or 'subset' argument) o added cumul() function for cumulative meta-analyses (with a corresponding forest() method to plot the cumulative results) o added leave1out() function for leave-one-out diagnostics o added option to qqnorm.rma.uni() so that the user can choose whether to apply the Bonferroni correction to the bounds of the pseudo confidence envelope o some internal changes to the class and methods names o some small corrections to the documentation Changes in Version 0.5-4 (2009-09-18) ===================================== o corrected the trimfill() function o improvements to various print functions o added a regtest() function for various regression tests of funnel plot asymmetry (e.g., Egger's regression test) o made ranktest() generic and added a method for objects of class 'rma' so that the test can be carried out after fitting o added anova() function for full vs reduced model comparisons via fit statistics and likelihood ratio tests o added the Orwin and Rosenberg approaches to fsn() o added H^2 measure to the output for random-effects models o in escalc(), measure="COR" is now used for the (usual) raw correlation coefficient and measure="UCOR" for the bias corrected correlation coefficients o some small corrections to the documentation Changes in Version 0.5-3 (2009-07-31) ===================================== o small changes to some of the examples o added the log transformed proportion (measure="PLN") as another measure to the escalc() function; changed "PL" to "PLO" for the logit (i.e., log odds) transformation for proportions Changes in Version 0.5-2 (2009-07-06) ===================================== o added an option in plot.infl.rma.uni() to open a new device for plotting the DFBETAS values o thanks to Jim Lemon, added a much better method for adjusting the size of the labels, annotations, and symbols in the forest() function when the number of studies is large Changes in Version 0.5-1 (2009-06-14) ===================================== o made some small changes to the documentation (some typos corrected, some confusing points clarified) Changes in Version 0.5-0 (2009-06-05) ===================================== o first version released on CRAN metafor/R/0000755000176200001440000000000014056237142012114 5ustar liggesusersmetafor/R/print.rma.mh.r0000644000176200001440000001045114036766141014621 0ustar liggesusersprint.rma.mh <- function(x, digits, showfit=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mh") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!exists(".rmspace")) cat("\n") cat(mstyle$section("Fixed-Effects Model")) cat(mstyle$section(paste0(" (k = ", x$k, ")"))) cat("\n") if (showfit) { fs <- .fcf(x$fit.stats$ML, digits[["fit"]]) names(fs) <- c("logLik", "deviance", "AIC", "BIC", "AICc") cat("\n") tmp <- capture.output(print(fs, quote=FALSE, print.gap=2)) .print.table(tmp, mstyle) } cat("\n") if (!is.na(x$I2)) { cat(mstyle$text("I^2 (total heterogeneity / total variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, 2)), "%"))) cat("\n") } if (!is.na(x$H2)) { cat(mstyle$text("H^2 (total variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, 2))))) cat("\n") } if (!is.na(x$QE)) { cat("\n") cat(mstyle$section("Test for Heterogeneity:"), "\n") cat(mstyle$result(paste0("Q(df = ", ifelse(x$k.yi-1 >= 0, x$k.yi-1, 0), ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } if (any(!is.na(c(x$I2, x$H2, x$QE)))) cat("\n\n") if (is.element(x$measure, c("OR","RR","IRR"))) { res.table <- c(estimate=.fcf(unname(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]])) res.table.exp <- c(estimate=.fcf(exp(unname(x$beta)), digits[["est"]]), ci.lb=.fcf(exp(x$ci.lb), digits[["ci"]]), ci.ub=.fcf(exp(x$ci.ub), digits[["ci"]])) cat(mstyle$section("Model Results (log scale):")) cat("\n\n") tmp <- capture.output(.print.vector(res.table)) .print.table(tmp, mstyle) cat("\n") cat(mstyle$section(paste0("Model Results (", x$measure, " scale):"))) cat("\n\n") tmp <- capture.output(.print.vector(res.table.exp)) .print.table(tmp, mstyle) if (x$measure == "OR") { cat("\n") MH <- ifelse(is.na(x$MH), NA, .fcf(x$MH, digits[["test"]])) TA <- ifelse(is.na(x$TA), NA, .fcf(x$TA, digits[["test"]])) if (is.na(MH) && is.na(TA)) { width <- 1 } else { width <- max(nchar(MH), nchar(TA), na.rm=TRUE) } cat(mstyle$text("Cochran-Mantel-Haenszel Test: ")) if (is.na(MH)) { cat(mstyle$result("test value not computable for these data")) cat("\n") } else { cat(mstyle$result(paste0("CMH = ", formatC(MH, width=width), ", df = 1,", paste(rep(" ", nchar(x$k.pos)-1, collapse="")), " p-val ", .pval(x$MHp, digits[["pval"]], showeq=TRUE, sep=" ", add0=TRUE)))) cat("\n") } cat(mstyle$text("Tarone's Test for Heterogeneity: ")) if (is.na(TA)) { cat(mstyle$result("test value not computable for these data")) } else { cat(mstyle$result(paste0("X^2 = ", formatC(TA, width=width), ", df = ", x$k.pos-1, ", p-val ", .pval(x$TAp, digits[["pval"]], showeq=TRUE, sep=" ", add0=TRUE)))) } cat("\n") } if (x$measure == "IRR") { cat("\n") cat(mstyle$text("Mantel-Haenszel Test: ")) if (is.na(x$MH)) { cat(mstyle$result("test value not computable for these data")) } else { cat(mstyle$result(paste0("MH = ", .fcf(x$MH, digits[["test"]]), ", df = 1, p-val ", .pval(x$MHp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } } else { res.table <- c(estimate=.fcf(unname(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]])) cat(mstyle$section("Model Results:")) cat("\n\n") tmp <- capture.output(.print.vector(res.table)) .print.table(tmp, mstyle) } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/print.regtest.r0000644000176200001440000000460414046527007015114 0ustar liggesusersprint.regtest <- function(x, digits=x$digits, ret.fit=x$ret.fit, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="regtest") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$section("Regression Test for Funnel Plot Asymmetry")) cat("\n\n") if (x$model == "lm") { cat(mstyle$text("Model: weighted regression with multiplicative dispersion")) } else { cat(mstyle$text(paste("Model: ", ifelse(is.element(x$method, c("FE","EE","CE")), "fixed-effects", "mixed-effects"), "meta-regression model"))) } cat("\n") if (x$predictor == "sei") cat(mstyle$text("Predictor: standard error")) if (x$predictor == "vi") cat(mstyle$text("Predictor: sampling variance")) if (x$predictor == "ni") cat(mstyle$text("Predictor: sample size")) if (x$predictor == "ninv") cat(mstyle$text("Predictor: inverse of the sample size")) if (x$predictor == "sqrtni") cat(mstyle$text("Predictor: square root sample size")) if (x$predictor == "sqrtninv") cat(mstyle$text("Predictor: inverse of the square root sample size")) cat("\n") if (ret.fit) { if (exists(".rmspace")) cat("\n") if (x$model == "lm") { print(summary(x$fit)) } else { print(x$fit) } if (exists(".rmspace")) cat("\n") } else { cat("\n") } cat(mstyle$text("Test for Funnel Plot Asymmetry: ")) if (is.na(x$ddf)) { cat(mstyle$result(paste0("z = ", .fcf(x$zval, digits[["test"]]), ", p ", .pval(x$pval, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("t = ", .fcf(x$zval, digits[["test"]]), ", df = ", x$ddf, ", p ", .pval(x$pval, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") if (!is.null(x$est)) { if (x$predictor == "sei") cat(mstyle$text("Limit Estimate (as sei -> 0): ")) if (x$predictor == "vi") cat(mstyle$text("Limit Estimate (as vi -> 0): ")) if (x$predictor %in% c("ninv", "sqrtninv")) cat(mstyle$text("Limit Estimate (as ni -> inf): ")) cat(mstyle$result(paste0("b = ", .fcf(x$est, digits[["est"]]), " (CI: ", .fcf(x$ci.lb, digits[["est"]]), ", ", .fcf(x$ci.ub, digits[["est"]]), ")"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/addpoly.default.r0000644000176200001440000002114014050175625015355 0ustar liggesusers# Note: If x and vi (or sei) are specified, the CI bounds for the polygon are # calculated based on a normal distribution. But the Knapp and Hartung method # may have been used to obtain vi (or sei), in which case we would want to use # a t-distribution. Adding a corresponding argument would be a bit awkward, # since the user would then have to specify the degrees of freedom. Instead, # the user can just pass the CI (and PI) bounds (that were calculated with # test="knha") directly to the function via the ci.lb and ci.ub (and pi.lb and # pi.ub) arguments. addpoly.default <- function(x, vi, sei, ci.lb, ci.ub, pi.lb, pi.ub, rows=-1, level=95, annotate=TRUE, digits=2, width, mlab, transf, atransf, targs, efac=1, col, border, fonts, cex, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) if (missing(targs)) targs <- NULL if (missing(mlab)) mlab <- NULL if (missing(col)) col <- "black" if (missing(border)) border <- "black" if (missing(cex)) cex <- NULL ddd <- list(...) if (!is.null(ddd$cr.lb)) pi.lb <- ddd$cr.lb if (!is.null(ddd$cr.ub)) pi.ub <- ddd$cr.ub lsegments <- function(..., cr.lb, cr.ub, addcred, pi.type) segments(...) ltext <- function(..., cr.lb, cr.ub, addcred, pi.type) text(...) lpolygon <- function(..., cr.lb, cr.ub, addcred, pi.type) polygon(...) ### set/get fonts (1st for labels, 2nd for annotations) ### when passing a named vector, the names are for 'family' and the values are for 'font' if (missing(fonts) || is.null(fonts)) { fonts <- rep(par("family"), 2) } else { if (length(fonts) == 1L) fonts <- rep(fonts, 2) } if (is.null(names(fonts))) fonts <- structure(c(1L,1L), names=fonts) par(family=names(fonts)[1], font=fonts[1]) ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) yi <- x if (hasArg(ci.lb) && hasArg(ci.ub)) { ### CI bounds are specified by user if (length(ci.lb) != length(ci.ub)) stop(mstyle$stop("Length of 'ci.lb' and 'ci.ub' is not the same.")) if (missing(vi) && missing(sei)) { ### vi/sei not specified, so calculate vi based on CI bounds ### note: assumes that the CI is a symmetric Wald-type CI ### computed based on a standard normal distribution vi <- ((ci.ub - ci.lb) / (2*qnorm(level/2, lower.tail=FALSE)))^2 } else { ### vi not specified, but sei is, so set vi = sei^2 if (missing(vi)) vi <- sei^2 } if (length(ci.lb) != length(vi)) stop(mstyle$stop("Length of 'vi' (or 'sei') does not match length of ('ci.lb', 'ci.ub') pairs.")) } else { ### CI bounds are not specified by user if (missing(vi)) { if (missing(sei)) { stop(mstyle$stop("Must specify either 'vi', 'sei', or ('ci.lb', 'ci.ub') pairs.")) } else { vi <- sei^2 } } if (length(yi) != length(vi)) stop(mstyle$stop("Length of 'vi' (or 'sei') does not match length of 'x'.")) ci.lb <- yi - qnorm(level/2, lower.tail=FALSE) * sqrt(vi) ci.ub <- yi + qnorm(level/2, lower.tail=FALSE) * sqrt(vi) } if (hasArg(pi.lb) && hasArg(pi.ub)) { if (length(pi.lb) != length(pi.ub)) stop(mstyle$stop("Length of 'pi.lb' and 'pi.ub' is not the same.")) if (length(pi.lb) != length(yi)) stop(mstyle$stop("Length of ('pi.lb', 'pi.ub') does not match length of 'x'.")) } else { pi.lb <- rep(NA, length(yi)) pi.ub <- rep(NA, length(yi)) } k <- length(yi) ### set rows value if (is.null(rows)) { rows <- -1:(-k) } else { if (length(rows) == 1L) rows <- rows:(rows-k+1) } if (length(rows) != length(yi)) stop(mstyle$stop(paste0("Length of the 'rows' argument (", length(rows), ") does not correspond to the number of polygons to be plotted (", length(yi), ")."))) ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) if (any(yivi.na)) { not.na <- !yivi.na if (na.act == "na.omit") { yi <- yi[not.na] vi <- vi[not.na] ci.lb <- ci.lb[not.na] ci.ub <- ci.ub[not.na] pi.lb <- pi.lb[not.na] pi.ub <- pi.ub[not.na] mlab <- mlab[not.na] ### rearrange rows due to NAs being omitted rows.new <- rows rows.na <- rows[!not.na] for (j in seq_len(length(rows.na))) { rows.new[rows <= rows.na[j]] <- rows.new[rows <= rows.na[j]] + 1 } rows <- rows.new[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) } k <- length(yi) ### if requested, apply transformation to yi's and CI bounds if (is.function(transf)) { if (is.null(targs)) { yi <- sapply(yi, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) } else { yi <- sapply(yi, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ### determine height of plot and set cex accordingly (if not specified) par.usr <- par("usr") height <- par.usr[4]-par.usr[3] ### cannot use this since the value of k used in creating the plot is unknown #lheight <- strheight("O") #cex.adj <- ifelse(k * lheight > height * 0.8, height/(1.25 * k * lheight), 1) cex.adj <- min(1,20/height) xlim <- par.usr[1:2] if (is.null(cex)) cex <- par("cex") * cex.adj ### add annotations if (annotate) { if (is.function(atransf)) { if (is.null(targs)) { annotext <- cbind(sapply(yi, atransf), sapply(ci.lb, atransf), sapply(ci.ub, atransf)) } else { annotext <- cbind(sapply(yi, atransf, targs), sapply(ci.lb, atransf, targs), sapply(ci.ub, atransf, targs)) } ### make sure order of intervals is always increasing tmp <- .psort(annotext[,2:3]) annotext[,2:3] <- tmp } else { annotext <- cbind(yi, ci.lb, ci.ub) } annotext <- .fcf(annotext, digits) if (missing(width) || is.null(width)) { width <- apply(annotext, 2, function(x) max(nchar(x))) } else { if (length(width) == 1L) width <- rep(width, ncol(annotext)) } for (j in seq_len(ncol(annotext))) { annotext[,j] <- formatC(annotext[,j], width=width[j]) } annotext <- cbind(annotext[,1], " [", annotext[,2], ", ", annotext[,3], "]") annotext <- apply(annotext, 1, paste, collapse="") par(family=names(fonts)[2], font=fonts[2]) ltext(x=xlim[2], rows, labels=annotext, pos=2, cex=cex, ...) par(family=names(fonts)[1], font=fonts[1]) } if (length(col) == 1L) col <- rep(col, k) if (length(border) == 1L) border <- rep(border, k) ### add polygon(s) for (i in seq_len(k)) { lsegments(pi.lb[i], rows[i], pi.ub[i], rows[i], lty="dotted", col="gray50", ...) lsegments(pi.lb[i], rows[i]-(height/150)*cex*efac, pi.lb[i], rows[i]+(height/150)*cex*efac, col="gray50", ...) lsegments(pi.ub[i], rows[i]-(height/150)*cex*efac, pi.ub[i], rows[i]+(height/150)*cex*efac, col="gray50", ...) lpolygon(x=c(ci.lb[i], yi[i], ci.ub[i], yi[i]), y=c(rows[i], rows[i]+(height/100)*cex*efac, rows[i], rows[i]-(height/100)*cex*efac), col=col[i], border=border[i], ...) if (!is.null(mlab)) { if (is.list(mlab)) { ltext(xlim[1], rows[i], mlab[[i]], pos=4, cex=cex, ...) } else { ltext(xlim[1], rows[i], mlab[i], pos=4, cex=cex, ...) } } } } metafor/R/addpoly.rma.r0000644000176200001440000000352014046526444014516 0ustar liggesusersaddpoly.rma <- function(x, row=-2, level=x$level, annotate=TRUE, addpred=FALSE, digits=2, width, mlab, transf, atransf, targs, efac=1, col, border, fonts, cex, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma") if (!x$int.only) stop(mstyle$stop("Fitted model should not contain moderators.")) if (missing(width)) width <- NULL if (missing(mlab)) mlab <- NULL if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE if (missing(targs)) targs <- NULL if (missing(col)) col <- "black" if (missing(border)) border <- "black" if (missing(fonts)) fonts <- NULL if (missing(cex)) cex <- NULL ddd <- list(...) if (!is.null(ddd$addcred)) addpred <- ddd$addcred if (is.null(ddd$pi.type)) { pi.type <- "default" } else { pi.type <- ddd$pi.type } if (addpred) { temp <- predict(x, level=level, pi.type=pi.type) pi.lb <- temp$pi.lb pi.ub <- temp$pi.ub } else { pi.lb <- NA pi.ub <- NA } ######################################################################### ### label for model estimate (if not specified) if (is.null(mlab)) mlab <- sapply(x$method, switch, "FE"="FE Model", "EE"="EE Model", "CE"="CE Model", "RE Model", USE.NAMES=FALSE) ### passing ci.lb and ci.ub, so that the bounds are correct when the model was fitted with test="knha" addpoly(x$beta, ci.lb=x$ci.lb, ci.ub=x$ci.ub, pi.lb=pi.lb, pi.ub=pi.ub, rows=row, level=level, annotate=annotate, digits=digits, width=width, mlab=mlab, transf=transf, atransf=atransf, targs=targs, efac=efac, col=col, border=border, fonts=fonts, cex=cex, ...) } metafor/R/anova.rma.r0000644000176200001440000004234314046526641014173 0ustar liggesusersanova.rma <- function(object, object2, btt, X, att, Z, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma", notap=c("rma.mh", "rma.peto"), notav="rma.glmm") if (missing(digits)) { digits <- .get.digits(xdigits=object$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=object$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("test", "L")) if (!is.null(ddd$L)) X <- ddd$L if (!missing(att) && !inherits(object, "rma.ls")) stop(mstyle$stop("Can only specify 'att' for location-scale models.")) if (!missing(Z) && !inherits(object, "rma.ls")) stop(mstyle$stop("Can only specify 'Z' for location-scale models.")) if (missing(object2)) { ### if only 'object' has been specified, can use function to test (sets) of coefficients via ### the 'btt' (or 'att') argument or one or more linear contrasts of the coefficients via the ### 'X' (or 'Z') argument x <- object if (missing(X) && missing(Z)) { ### if 'X' (and 'Z') has not been specified, then do a Wald-test via the 'btt' argument (can also use 'att' for location-scale models) if (inherits(object, "rma.ls") && !missing(att)) { if (!missing(btt)) stop(mstyle$stop("Can only specify either 'btt' or 'att', but not both.")) ### set/check 'att' argument att <- .set.btt(att, x$q, x$Z.int.incl, colnames(x$Z)) m <- length(att) QS <- try(as.vector(t(x$alpha)[att] %*% chol2inv(chol(x$va[att,att])) %*% x$alpha[att]), silent=TRUE) if (inherits(QS, "try-error")) QS <- NA if (x$test == "t") { QS <- QS / m QSdf <- c(m, x$QSdf[2]) QSp <- pf(QS, df1=QSdf[1], df2=QSdf[2], lower.tail=FALSE) } else { QSdf <- c(m, NA) QSp <- pchisq(QS, df=QSdf[1], lower.tail=FALSE) } res <- list(QS=QS, QSdf=QSdf, QSp=QSp, att=att, k=x$k, q=x$q, m=m, test=x$test, digits=digits, type="Wald.att") } else { ### set/check 'btt' argument btt <- .set.btt(btt, x$p, x$int.incl, colnames(x$X)) m <- length(btt) QM <- try(as.vector(t(x$beta)[btt] %*% chol2inv(chol(x$vb[btt,btt])) %*% x$beta[btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA if (is.element(x$test, c("knha","adhoc","t"))) { QM <- QM / m QMdf <- c(m, x$QMdf[2]) QMp <- pf(QM, df1=QMdf[1], df2=QMdf[2], lower.tail=FALSE) } else { QMdf <- c(m, NA) QMp <- pchisq(QM, df=QMdf[1], lower.tail=FALSE) } res <- list(QM=QM, QMdf=QMdf, QMp=QMp, btt=btt, k=x$k, p=x$p, m=m, test=x$test, digits=digits, type="Wald.btt", class=class(x)) } } else { if (inherits(object, "rma.ls") && !missing(Z)) { ### if 'Z' has been specified, then do Wald-type test(s) via 'Z' argument if (!missing(X)) stop(mstyle$stop("Can only specify either 'X' or 'Z', but not both.")) if (.is.vector(Z)) Z <- rbind(Z) if (is.data.frame(Z)) Z <- as.matrix(Z) if (is.character(Z)) stop(mstyle$stop("Argument 'Z' must be a numeric vector/matrix.")) ### if model has an intercept term and Z has q-1 columns, assume user left out the intercept and add it automatically if (x$Z.int.incl && ncol(Z) == (x$q-1)) Z <- cbind(1, Z) ### if Z has q+1 columns, assume that last column is the right-hand side ### leave this out for now; maybe add later or a 'rhs' argument (as linearHypothesis() from car package) #if (ncol(Z) == (x$q+1)) { # rhs <- Z[,x$q+1] # Z <- Z[,seq_len(x$q)] #} if (ncol(Z) != x$q) stop(mstyle$stop(paste0("Length or number of columns of 'Z' (", ncol(Z), ") does not match the number of scale coefficients (", x$q, ")."))) m <- nrow(Z) ### test of individual hypotheses Za <- Z %*% x$alpha vZa <- Z %*% x$va %*% t(Z) se <- sqrt(diag(vZa)) zval <- c(Za/se) if (x$test == "t") { pval <- if (x$ddf.alpha > 0) 2*pt(abs(zval), df=x$ddf.alpha, lower.tail=FALSE) else rep(NA,m) } else { pval <- 2*pnorm(abs(zval), lower.tail=FALSE) } ### omnibus test of all hypotheses (only possible if 'Z' is of full rank) QS <- NA ### need this in case QS cannot be calculated below QSp <- NA ### need this in case QSp cannot be calculated below if (rankMatrix(Z) == m) { QS <- try(as.vector(t(Za) %*% chol2inv(chol(vZa)) %*% Za), silent=TRUE) if (inherits(QS, "try-error")) QS <- NA if (x$test == "t") { QS <- QS / m QSdf <- c(m, x$QSdf[2]) QSp <- if (QSdf[2] > 0) pf(QS, df1=QSdf[1], df2=QSdf[2], lower.tail=FALSE) else NA } else { QSdf <- c(m, NA) QSp <- pchisq(QS, df=QSdf[1], lower.tail=FALSE) } } ### create a data frame with each row specifying the linear combination tested hyp <- rep("", m) for (j in seq_len(m)) { Zj <- round(Z[j,], digits[["est"]]) ### coefficients for the jth contrast sel <- Zj != 0 ### TRUE if coefficient is != 0 hyp[j] <- paste(paste(Zj[sel], rownames(x$alpha)[sel], sep="*"), collapse=" + ") ### coefficient*variable + coefficient*variable ... hyp[j] <- gsub("1*", "", hyp[j], fixed=TRUE) ### turn '+1' into '+' and '-1' into '-' hyp[j] <- gsub("+ -", "- ", hyp[j], fixed=TRUE) ### turn '+ -' into '-' } hyp <- paste0(hyp, " = 0") ### add '= 0' at the right hyp <- data.frame(hyp, stringsAsFactors=FALSE) colnames(hyp) <- "" rownames(hyp) <- paste0(seq_len(m), ":") ### add '1:', '2:', ... as row names res <- list(QS=QS, QSdf=QSdf, QSp=QSp, hyp=hyp, Za=Za, se=se, zval=zval, pval=pval, k=x$k, q=x$q, m=m, test=x$test, ddf=x$ddf.alpha, digits=digits, type="Wald.Za") } else { ### if 'X' has been specified, then do Wald-type test(s) via 'X' argument if (.is.vector(X)) X <- rbind(X) if (is.data.frame(X)) X <- as.matrix(X) if (is.character(X)) stop(mstyle$stop("Argument 'X' must be a numeric vector/matrix.")) ### if model has an intercept term and X has p-1 columns, assume user left out the intercept and add it automatically if (x$int.incl && ncol(X) == (x$p-1)) X <- cbind(1, X) ### if X has p+1 columns, assume that last column is the right-hand side ### leave this out for now; maybe add later or a 'rhs' argument (as linearHypothesis() from car package) #if (ncol(X) == (x$p+1)) { # rhs <- X[,x$p+1] # X <- X[,seq_len(x$p)] #} if (ncol(X) != x$p) stop(mstyle$stop(paste0("Length or number of columns of 'X' (", ncol(X), ") does not match the number of ", ifelse(inherits(object, "rma.ls"), "location", "model"), " coefficients (", x$p, ")."))) m <- nrow(X) ### ddf calculation if (is.element(x$test, c("knha","adhoc","t"))) { if (length(x$ddf) == 1L) { ddf <- rep(x$ddf, m) } else { ddf <- rep(NA, m) for (j in seq_len(m)) { bn0 <- X[j,] != 0 ddf[j] <- min(x$ddf[bn0]) } } } else { ddf <- rep(NA, m) } ### test of individual hypotheses Xb <- X %*% x$beta vXb <- X %*% x$vb %*% t(X) se <- sqrt(diag(vXb)) zval <- c(Xb/se) if (is.element(x$test, c("knha","adhoc","t"))) { pval <- sapply(seq_along(ddf), function(j) if (ddf[j] > 0) 2*pt(abs(zval[j]), df=ddf[j], lower.tail=FALSE) else NA) } else { pval <- 2*pnorm(abs(zval), lower.tail=FALSE) } ### omnibus test of all hypotheses (only possible if 'X' is of full rank) QM <- NA ### need this in case QMp cannot be calculated below QMdf <- NA ### need this in case X is not of full rank QMp <- NA ### need this in case QMp cannot be calculated below if (rankMatrix(X) == m) { ### use try(), since this could fail: this could happen when the var-cov matrix of the ### fixed effects has been estimated using robust() -- 'vb' is then only guaranteed to ### be positive semidefinite, so for certain linear combinations, vXb could be singular ### (see Cameron & Miller, 2015, p. 326) QM <- try(as.vector(t(Xb) %*% chol2inv(chol(vXb)) %*% Xb), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA if (is.element(x$test, c("knha","adhoc","t"))) { QM <- QM / m QMdf <- c(m, min(ddf)) QMp <- if (QMdf[2] > 0) pf(QM, df1=QMdf[1], df2=QMdf[2], lower.tail=FALSE) else NA } else { QMdf <- c(m, NA) QMp <- pchisq(QM, df=QMdf[1], lower.tail=FALSE) } } ### create a data frame with each row specifying the linear combination tested hyp <- rep("", m) for (j in seq_len(m)) { Xj <- round(X[j,], digits[["est"]]) ### coefficients for the jth contrast sel <- Xj != 0 ### TRUE if coefficient is != 0 hyp[j] <- paste(paste(Xj[sel], rownames(x$beta)[sel], sep="*"), collapse=" + ") ### coefficient*variable + coefficient*variable ... hyp[j] <- gsub("1*", "", hyp[j], fixed=TRUE) ### turn '+1' into '+' and '-1' into '-' hyp[j] <- gsub("+ -", "- ", hyp[j], fixed=TRUE) ### turn '+ -' into '-' } hyp <- paste0(hyp, " = 0") ### add '= 0' at the right hyp <- data.frame(hyp, stringsAsFactors=FALSE) colnames(hyp) <- "" rownames(hyp) <- paste0(seq_len(m), ":") ### add '1:', '2:', ... as row names res <- list(QM=QM, QMdf=QMdf, QMp=QMp, hyp=hyp, Xb=Xb, se=se, zval=zval, pval=pval, k=x$k, p=x$p, m=m, test=x$test, ddf=ddf, digits=digits, type="Wald.Xb") } } } else { ### if 'object' and 'object2' have been specified, can use function to ### do model comparisons via a likelihood ratio test (and fit indices) if (!inherits(object2, "rma")) stop(mstyle$stop("Argument 'object2' must be an object of class \"rma\".")) if (inherits(object2, c("rma.mh","rma.peto"))) stop(mstyle$stop("Function not applicable for objects of class \"rma.mh\" or \"rma.peto\".")) if (inherits(object2, "rma.glmm")) stop(mstyle$stop("Method not available for objects of class \"rma.glmm\".")) if (!identical(class(object), class(object2))) stop(mstyle$stop("Class of 'object1' must be the same as class of 'object2'.")) if (!is.null(ddd$test)) { test <- match.arg(ddd$test, c("LRT", "Wald")) } else { test <- "LRT" } ### assume 'object' is the full model and 'object2' the reduced model model.f <- object model.r <- object2 ### number of parameters in the models parms.f <- model.f$parms parms.r <- model.r$parms ### check if they have the same number of parameters if (parms.f == parms.r) stop(mstyle$stop("Models have the same number of parameters. LRT not meaningful.")) ### if parms.f < parms.r, then let 'object' be the reduced model and 'object2' the full model if (parms.f < parms.r) { model.f <- object2 model.r <- object parms.f <- model.f$parms parms.r <- model.r$parms } ### check if models are based on the same data (TODO: also check for same weights?) if (inherits(object, "rma.uni")) { if (!(identical(as.vector(model.f$yi), as.vector(model.r$yi)) && identical(as.vector(model.f$vi), as.vector(model.r$vi)))) ### as.vector() to strip attributes/names stop(mstyle$stop("Observed outcomes and/or sampling variances not equal in the full and reduced model.")) } if (inherits(object, "rma.mv")) { if (!(identical(as.vector(model.f$yi), as.vector(model.r$yi)) && identical(as.matrix(model.f$V), as.matrix(model.r$V)))) ### as.vector() to strip attributes/names, as.matrix() to make both V matrices non-sparse stop(mstyle$stop("Observed outcomes and/or sampling variances/covariances not equal in the full and reduced model.")) } ### for Wald-type test, both models should be fitted using the same method if (test == "Wald" && (model.f$method != model.r$method)) stop(mstyle$stop("Full and reduced model must use the same 'method' for the model fitting.")) ### for LRT, reduced model may use method="FE/EE/CE" and full model method="(RE)ML" ### which is fine, but the other way around doesn't really make sense if (is.element(model.f$method, c("FE","EE","CE")) && !is.element(model.r$method, c("FE","EE","CE"))) stop(mstyle$stop("Full model uses a fixed- and reduced model uses a random/mixed-effects model.")) ### could do even more checks for cases where the models are clearly not nested ###################################################################### ### for 'rma.uni' objects, calculate pseudo R^2 value (based on the ### proportional reduction in tau^2) comparing full vs. reduced model if (inherits(object, "rma.uni") && !inherits(object, "rma.ls") && !inherits(object2, "rma.ls")) { if (is.element(model.f$method, c("FE","EE","CE"))) { R2 <- NA } else if (identical(model.r$tau2,0)) { R2 <- 0 } else { R2 <- 100 * max(0, (model.r$tau2 - model.f$tau2)/model.r$tau2) } } else { R2 <- NA } ### for 'rma.uni' objects, extract tau^2 estimates if (inherits(object, "rma.uni") && !inherits(object, "rma.ls") && !inherits(object2, "rma.ls")) { tau2.f <- model.f$tau2 tau2.r <- model.r$tau2 } else { tau2.f <- NA tau2.r <- NA } if (test == "LRT") { parms.diff <- parms.f - parms.r if (model.f$method == "REML") { LRT <- model.r$fit.stats["dev","REML"] - model.f$fit.stats["dev","REML"] fit.stats.f <- t(model.f$fit.stats)["REML",] # to keep (row)names of fit.stats fit.stats.r <- t(model.r$fit.stats)["REML",] # to keep (row)names of fit.stats if (!identical(model.f$X, model.r$X)) warning(mstyle$warning("Models with different fixed effects. REML comparisons are not meaningful."), call.=FALSE) ### in this case, one could consider just taking the ML deviances, but this ### is really ad-hoc; there is some theory in Welham & Thompson (1997) about ### LRTs for fixed effects when using REML estimation, but this involves ### additional work } else { LRT <- model.r$fit.stats["dev","ML"] - model.f$fit.stats["dev","ML"] fit.stats.f <- t(model.f$fit.stats)["ML",] fit.stats.r <- t(model.r$fit.stats)["ML",] ### in principle, a 'rma.uni' model may have been fitted with something besides ML ### estimation (for tau^2), so technically the deviance is then not really that as ### obtained with proper ML estimation; issue a warning about this? } ### set LRT to 0 if LRT < 0 (this should not happen, but could due to numerical issues) LRT[LRT < 0] <- 0 pval <- pchisq(LRT, df=parms.diff, lower.tail=FALSE) res <- list(fit.stats.f=fit.stats.f, fit.stats.r=fit.stats.r, parms.f=parms.f, parms.r=parms.r, LRT=LRT, pval=pval, QE.f=model.f$QE, QE.r=model.r$QE, tau2.f=tau2.f, tau2.r=tau2.r, R2=R2, method=model.f$method, class.f=class(model.f), digits=digits, type="LRT") } if (test == "Wald") { btt <- setdiff(colnames(model.f$X), colnames(model.r$X)) if (length(btt) == 0L) stop(mstyle$stop("Full and reduced models appear to contain the same moderators.")) if (length(setdiff(colnames(model.r$X), colnames(model.f$X))) != 0L) stop(mstyle$stop("There are coefficients in the reduced model that are not in the full model.")) btt <- charmatch(btt, colnames(model.f$X)) if (anyNA(btt)) stop(mstyle$stop("Cannot identify coefficients to test.")) res <- anova(model.f, btt=btt) return(res) } } class(res) <- "anova.rma" return(res) } metafor/R/model.matrix.rma.r0000644000176200001440000000211613770372633015467 0ustar liggesusersmodel.matrix.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) ### note: lm() always returns X (never the full model matrix, even with na.exclude or na.pass) ### but it seems a bit more logical to actually return X.f in that case if (na.act == "na.omit") out <- object$X if (na.act == "na.exclude" || na.act == "na.pass") out <- object$X.f if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in results.")) if (inherits(object, "rma.ls")) { out <- list(location = out) if (na.act == "na.omit") out$scale <- object$Z if (na.act == "na.exclude" || na.act == "na.pass") out$scale <- object$Z.f if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in results.")) } return(out) } metafor/R/labbe.rma.r0000644000176200001440000002520614054725022014124 0ustar liggesuserslabbe.rma <- function(x, xlim, ylim, xlab, ylab, add=x$add, to=x$to, transf, targs, pch=21, psize, plim=c(0.5,3.5), col, bg, grid=FALSE, lty, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("rma.ls", "rma.uni.selmodel")) if (!x$int.only) stop(mstyle$stop("L'Abbe plots can only be drawn for models without moderators.")) if (!is.element(x$measure, c("RR","OR","RD","AS","IRR","IRD","IRSD"))) stop(mstyle$stop("Argument 'measure' must have been one of the following: 'RR','OR','RD','AS','IRR','IRD','IRSD'.")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (length(add) == 2L) ### for rma.mh and rma.peto objects (1st 'add' value applies to the individual outcomes) add <- add[1] if (length(to) == 2L) ### for rma.mh and rma.peto objects (1st 'to' value applies to the individual outcomes) to <- to[1] if (!is.element(to, c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) if (missing(transf)) transf <- FALSE transf.char <- deparse(substitute(transf)) if (missing(targs)) targs <- NULL if (missing(psize)) psize <- NULL if (missing(lty)) { lty <- c("solid", "dashed") ### 1st value = diagonal line, 2nd value = estimated effect line } else { if (length(lty) == 1L) lty <- c(lty, lty) } ### get ... argument ddd <- list(...) ### set defaults or get addyi and addvi arguments addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ### grid argument can either be a logical or a color if (is.logical(grid)) gridcol <- "lightgray" if (is.character(grid)) { gridcol <- grid grid <- TRUE } ######################################################################### ### note: 'pch', 'psize', 'col', and 'bg' must be of the same length as the original data passed to rma() ### so we have to apply the same subsetting (if necessary) and removing of NAs as done during the ### model fitting (note: NAs are removed further below) if (length(pch) == 1L) pch <- rep(pch, x$k.all) if (length(pch) != x$k.all) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) pch <- pch[x$subset] ### if user has set the point sizes if (!is.null(psize)) { if (length(psize) == 1L) psize <- rep(psize, x$k.all) if (length(psize) != x$k.all) stop(mstyle$stop(paste0("Length of the 'psize' argument (", length(psize), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) psize <- psize[x$subset] } if (missing(col)) col <- "black" if (length(col) == 1L) col <- rep(col, x$k.all) if (length(col) != x$k.all) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) col <- col[x$subset] if (missing(bg)) bg <- "gray" if (length(bg) == 1L) bg <- rep(bg, x$k.all) if (length(bg) != x$k.all) stop(mstyle$stop(paste0("Length of the 'bg' argument (", length(bg), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) bg <- bg[x$subset] ######################################################################### ### these vectors may contain NAs ai <- x$ai.f bi <- x$bi.f ci <- x$ci.f di <- x$di.f x1i <- x$x1i.f x2i <- x$x2i.f t1i <- x$t1i.f t2i <- x$t2i.f ### drop00=TRUE may induce that the contrast-based yi value is NA; so ### make sure that the corresponding arm-based yi values are also NA yi.is.na <- is.na(x$yi.f) ai[yi.is.na] <- NA bi[yi.is.na] <- NA ci[yi.is.na] <- NA di[yi.is.na] <- NA x1i[yi.is.na] <- NA x2i[yi.is.na] <- NA t1i[yi.is.na] <- NA t2i[yi.is.na] <- NA options(na.action = "na.pass") ### to make sure dat.t and dat.c are of the same length if (x$measure == "RR") { measure <- "PLN" dat.t <- escalc(measure=measure, xi=ai, mi=bi, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=ci, mi=di, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "OR") { measure <- "PLO" dat.t <- escalc(measure=measure, xi=ai, mi=bi, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=ci, mi=di, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "RD") { measure <- "PR" dat.t <- escalc(measure=measure, xi=ai, mi=bi, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=ci, mi=di, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "AS") { measure <- "PAS" dat.t <- escalc(measure=measure, xi=ai, mi=bi, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=ci, mi=di, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "IRR") { measure <- "IRLN" dat.t <- escalc(measure=measure, xi=x1i, ti=t1i, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=x2i, ti=t2i, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "IRD") { measure <- "IR" dat.t <- escalc(measure=measure, xi=x1i, ti=t1i, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=x2i, ti=t2i, add=add, to=to, addyi=addyi, addvi=addvi) } if (x$measure == "IRSD") { measure <- dat.t <- escalc(measure=measure, xi=x1i, ti=t1i, add=add, to=to, addyi=addyi, addvi=addvi) dat.c <- escalc(measure=measure, xi=x2i, ti=t2i, add=add, to=to, addyi=addyi, addvi=addvi) } options(na.action = na.act) ### check for NAs in yi/vi pairs and filter out has.na <- apply(is.na(dat.t), 1, any) | apply(is.na(dat.c), 1, any) not.na <- !has.na if (any(has.na)) { dat.t <- dat.t[not.na,] dat.c <- dat.c[not.na,] pch <- pch[not.na] col <- col[not.na] bg <- bg[not.na] if (is.null(psize)) psize <- psize[not.na] } if (length(dat.t$yi)==0L || length(dat.c$yi)==0L) stop(mstyle$stop("No information in object to compute arm-level outcomes.")) ######################################################################### ### determine point sizes vi <- dat.t$vi + dat.c$vi k <- length(vi) if (is.null(psize)) { if (length(plim) < 2L) stop(mstyle$stop("Argument 'plim' must be of length 2 or 3.")) wi <- sqrt(1/vi) if (!is.na(plim[1]) && !is.na(plim[2])) { rng <- max(wi, na.rm=TRUE) - min(wi, na.rm=TRUE) if (rng <= .Machine$double.eps^0.5) { psize <- rep(1, k) } else { psize <- (wi - min(wi, na.rm=TRUE)) / rng psize <- (psize * (plim[2] - plim[1])) + plim[1] } } if (is.na(plim[1]) && !is.na(plim[2])) { psize <- wi / max(wi, na.rm=TRUE) * plim[2] if (length(plim) == 3L) psize[psize <= plim[3]] <- plim[3] } if (!is.na(plim[1]) && is.na(plim[2])) { psize <- wi / min(wi, na.rm=TRUE) * plim[1] if (length(plim) == 3L) psize[psize >= plim[3]] <- plim[3] } if (all(is.na(psize))) psize <- rep(1, k) } ### determine x/y values for line that indicates the estimated effect min.yi <- min(c(dat.t$yi, dat.c$yi)) max.yi <- max(c(dat.t$yi, dat.c$yi)) rng.yi <- max.yi - min.yi len <- 1000 intrcpt <- x$beta[1] if (x$measure == "RD") c.vals <- seq(ifelse(intrcpt>0, 0, -intrcpt), ifelse(intrcpt>0, 1-intrcpt, 1), length.out=len) if (x$measure == "RR") c.vals <- seq(min.yi-rng.yi, ifelse(intrcpt>0, 0-intrcpt, 0), length.out=len) if (x$measure == "OR") c.vals <- seq(min.yi-rng.yi, max.yi+rng.yi, length.out=len) if (x$measure == "AS") c.vals <- seq(ifelse(intrcpt>0, 0, -intrcpt), ifelse(intrcpt>0, asin(sqrt(1))-intrcpt, asin(sqrt(1))), length.out=len) if (x$measure == "IRR") c.vals <- seq(min.yi-rng.yi, ifelse(intrcpt>0, 0-intrcpt, 0), length.out=len) if (x$measure == "IRD") c.vals <- seq(ifelse(intrcpt>0, 0, -intrcpt), ifelse(intrcpt>0, 1-intrcpt, 1), length.out=len) if (x$measure == "IRSD") c.vals <- seq(ifelse(intrcpt>0, 0, -intrcpt), ifelse(intrcpt>0, 1-intrcpt, 1), length.out=len) t.vals <- intrcpt + 1*c.vals if (is.function(transf)) { if (is.null(targs)) { dat.t$yi <- sapply(dat.t$yi, transf) dat.c$yi <- sapply(dat.c$yi, transf) c.vals <- sapply(c.vals, transf) t.vals <- sapply(t.vals, transf) } else { dat.t$yi <- sapply(dat.t$yi, transf, targs) dat.c$yi <- sapply(dat.c$yi, transf, targs) c.vals <- sapply(c.vals, transf, targs) t.vals <- sapply(t.vals, transf, targs) } } min.yi <- min(c(dat.t$yi, dat.c$yi)) max.yi <- max(c(dat.t$yi, dat.c$yi)) if (missing(xlim)) xlim <- c(min.yi, max.yi) if (missing(ylim)) ylim <- c(min.yi, max.yi) ### order points by psize order.vec <- order(psize, decreasing=TRUE) dat.t$yi.o <- dat.t$yi[order.vec] dat.c$yi.o <- dat.c$yi[order.vec] pch.o <- pch[order.vec] col.o <- col[order.vec] bg.o <- bg[order.vec] psize.o <- psize[order.vec] ### add x-axis label if (missing(xlab)) { xlab <- .setlab(measure, transf.char, atransf.char="FALSE", gentype=1) xlab <- paste(xlab, "(Group 1)") } ### add y-axis label if (missing(ylab)) { ylab <- .setlab(measure, transf.char, atransf.char="FALSE", gentype=1) ylab <- paste(ylab, "(Group 2)") } plot(NA, NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, ...) ### add grid (and redraw box) if (.isTRUE(grid)) { grid(col=gridcol) box(...) } ### add diagonal and estimated effects lines abline(a=0, b=1, lty=lty[1], ...) lines(c.vals, t.vals, lty=lty[2], ...) ### add points points(x=dat.c$yi.o, y=dat.t$yi.o, cex=psize.o, pch=pch.o, col=col.o, bg=bg.o, ...) ######################################################################### ### prepare data frame to return sav <- data.frame(x=dat.c$yi, y=dat.t$yi, cex=psize, pch=pch, col=col, bg=bg, ids=x$ids[not.na], slab=x$slab[not.na], stringsAsFactors=FALSE) invisible(sav) } metafor/R/baujat.rma.r0000644000176200001440000001151314054724714014330 0ustar liggesusersbaujat.rma <- function(x, xlim, ylim, xlab, ylab, cex, symbol="ids", grid=TRUE, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("rma.glmm", "rma.mv", "robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) ### grid argument can either be a logical or a color if (is.logical(grid)) gridcol <- "lightgray" if (is.character(grid)) { gridcol <- grid grid <- TRUE } ######################################################################### ### set up vectors to store results in delpred <- rep(NA_real_, x$k.f) vdelpred <- rep(NA_real_, x$k.f) ### predicted values under the full model pred.full <- x$X.f %*% x$beta ### note: skipping NA cases ### also: it is possible that model fitting fails, so that generates more NAs (these NAs will always be shown in output) if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next if (inherits(x, "rma.uni")) res <- try(suppressWarnings(rma.uni(x$yi.f, x$vi.f, weights=x$weights.f, mods=x$X.f, intercept=FALSE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, subset=-i, skipr2=TRUE)), silent=TRUE) if (inherits(x, "rma.mh")) { if (is.element(x$measure, c("RR","OR","RD"))) { res <- try(suppressWarnings(rma.mh(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } else { res <- try(suppressWarnings(rma.mh(x1i=x$x1i.f, x2i=x$x2i.f, t1i=x$t1i.f, t2i=x$t2i.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } } if (inherits(x, "rma.peto")) res <- try(suppressWarnings(rma.peto(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, add=x$add, to=x$to, drop00=x$drop00, level=x$level, subset=-i)), silent=TRUE) if (inherits(res, "try-error")) next ### removing an observation could lead to a model coefficient becoming inestimable (for 'rma.uni' objects) if (any(res$coef.na)) next Xi <- matrix(x$X.f[i,], nrow=1) delpred[i] <- Xi %*% res$beta vdelpred[i] <- Xi %*% tcrossprod(res$vb,Xi) } if (progbar) pbapply::closepb(pbar) yhati <- (delpred - pred.full)^2 / vdelpred ######################################################################### ### x-axis values (use 'na.pass' to make sure we get a vector of length k.f) options(na.action = "na.pass") xhati <- resid(x)^2 / (x$tau2.f + x$vi.f) options(na.action = na.act) ######################################################################### ### set some defaults (if not specified) if (missing(cex)) cex <- 0.8 if (missing(xlab)) { if (is.element(x$method, c("FE","EE","CE"))) { xlab <- ifelse(x$int.only, "Contribution to Overall Heterogeneity", "Contribution to Residual Heterogeneity") } else { xlab <- "Squared Pearson Residual" } } if (missing(ylab)) ylab <- ifelse(x$int.only, "Influence on Overall Result", "Influence on Fitted Value") if (missing(xlim)) xlim <- range(xhati, na.rm=TRUE) if (missing(ylim)) ylim <- range(yhati, na.rm=TRUE) ######################################################################### ### draw empty plot plot(NA, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, ...) ### add grid (and redraw box) if (.isTRUE(grid)) { grid(col=gridcol) box(...) } if (is.numeric(symbol)) { if (length(symbol) == 1L) symbol <- rep(symbol, x$k.all) if (length(symbol) != x$k.all) stop(mstyle$stop(paste0("Length of the 'symbol' argument (", length(symbol), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) symbol <- symbol[x$subset] points(x=xhati, y=yhati, cex=cex, pch=symbol, ...) } if (is.character(symbol) && symbol=="ids") text(xhati, yhati, x$ids, cex=cex, ...) if (is.character(symbol) && symbol=="slab") text(xhati, yhati, x$slab, cex=cex, ...) ######################################################################### sav <- data.frame(x=xhati[x$not.na], y=yhati[x$not.na], ids=x$ids[x$not.na], slab=x$slab[x$not.na], stringsAsFactors=FALSE) invisible(sav) } metafor/R/robust.rma.uni.r0000644000176200001440000001244114036345322015165 0ustar liggesusersrobust.rma.uni <- function(x, cluster, adjust=TRUE, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav="rma.uni.selmodel") if (missing(cluster)) stop(mstyle$stop("Must specify 'cluster' variable.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } level <- ifelse(x$level == 0, 1, ifelse(x$level >= 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))) ######################################################################### ### process cluster variable ### note: cluster variable is assumed to be of the same length as the original data passed to the model fitting function ### so we have to apply the same subsetting (if necessary) and removing of missings as done during model fitting if (length(cluster) != x$k.all) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster <- cluster[x$not.na] if (anyNA(cluster)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) ### number of clusters n <- length(unique(cluster)) ### compute degrees of freedom ### note: Stata with vce(robust) also uses n-p as the dfs, but with vce(cluster ) always uses n-1 (which seems inconsistent) dfs <- n - x$p ### check if dfs are positive (note: this also handles the case where there is a single cluster) if (dfs <= 0) stop(mstyle$stop(paste0("Number of clusters (", n, ") must be larger than the number of fixed effects (", x$p, ")."))) ### note: since we use split() below and then put things back together into a block-diagonal matrix, ### we have to make sure everything is properly ordered by the cluster variable; otherwise, the 'meat' ### block-diagonal matrix is not in the same order as the rest; so we sort all relevant variables by ### the cluster variable (including the cluster variable itself) ocl <- order(cluster) cluster <- cluster[ocl] ### construct bread = (X'WX)^-1 X'W, where W is the weight matrix if (x$weighted) { ### for weighted analysis if (is.null(x$weights)) { ### if no weights were specified, then vb = (X'WX)^-1, so we can use that part wi <- 1/(x$vi + x$tau2) wi <- wi[ocl] W <- diag(wi, nrow=x$k, ncol=x$k) bread <- x$vb %*% crossprod(x$X[ocl,], W) } else { ### if weights were specified, then vb cannot be used A <- diag(x$weights[ocl], nrow=x$k, ncol=x$k) stXAX <- .invcalc(X=x$X[ocl,], W=A, k=x$k) bread <- stXAX %*% crossprod(x$X[ocl,], A) } } else { ### for unweighted analysis stXX <- .invcalc(X=x$X[ocl,], W=diag(x$k), k=x$k) bread <- stXX %*% t(x$X[ocl,]) } ### construct meat part ei <- c(x$yi - x$X %*% x$beta) ### use this instead of resid(), since this guarantees that the length is correct ei <- ei[ocl] cluster <- factor(cluster, levels=unique(cluster)) meat <- bldiag(lapply(split(ei, cluster), function(e) tcrossprod(e))) ### construct robust var-cov matrix vb <- bread %*% meat %*% t(bread) ### apply adjustments to vb as needed ### suggested in Hedges, Tipton, & Johnson (2010) -- analogous to HC1 adjustment if (.isTRUE(adjust)) vb <- (n / dfs) * vb ### what Stata does if (is.character(adjust) && (adjust=="Stata" || adjust=="Stata1")) vb <- (n / (n-1) * (x$k-1) / (x$k-x$p)) * vb ### when the model was fitted with regress if (is.character(adjust) && adjust=="Stata2") vb <- (n / (n-1)) * vb ### when the model was fitted with mixed ### prepare results beta <- x$beta se <- sqrt(diag(vb)) names(se) <- NULL zval <- c(beta/se) pval <- 2*pt(abs(zval), df=dfs, lower.tail=FALSE) crit <- qt(level/2, df=dfs, lower.tail=FALSE) ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) QM <- try(as.vector(t(beta)[x$btt] %*% chol2inv(chol(vb[x$btt,x$btt])) %*% beta[x$btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA QM <- QM / x$m ### note: m is the number of coefficients in btt, not the number of unique clusters QMdf <- c(x$m, dfs) QMp <- pf(QM, df1=x$m, df2=dfs, lower.tail=FALSE) ######################################################################### ### table of cluster variable tcl <- table(cluster) res <- x res$digits <- digits ### replace elements with robust results res$ddf <- dfs res$dfs <- dfs res$vb <- vb res$se <- se res$zval <- zval res$pval <- pval res$ci.lb <- ci.lb res$ci.ub <- ci.ub res$QM <- QM res$QMdf <- QMdf res$QMp <- QMp res$n <- n res$tcl <- tcl res$test <- "t" res$s2w <- 1 ### just in case test="knha" originally res$meat <- matrix(NA_real_, nrow=nrow(meat), ncol=ncol(meat)) res$meat[ocl,ocl] <- meat class(res) <- c("robust.rma", "rma", "rma.uni") return(res) } metafor/R/dfround.r0000644000176200001440000000126613770363460013752 0ustar liggesusersdfround <- function(x, digits) { mstyle <- .get.mstyle("crayon" %in% .packages()) #if (inherits(x, "matrix")) # x <- data.frame(x) .chkclass(class(x), must="data.frame") p <- ncol(x) if (missing(digits)) digits <- 0 if (length(digits) == 1L) digits <- rep(digits, p) if (p != length(digits)) stop(mstyle$stop(paste0("Number of columns in 'x' (", p, ") does not match length of 'digits' (", length(digits), ")."))) if (!is.numeric(digits)) stop(mstyle$stop("Argument 'digits' must be a numeric vector.")) for (i in 1:p) { if (!is.numeric(x[[i]])) next x[[i]] <- round(x[[i]], digits[i]) } return(x) } metafor/R/print.robust.rma.r0000644000176200001440000000441614043267652015540 0ustar liggesusersprint.robust.rma <- function(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="robust.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$text("Number of outcomes: ")) cat(mstyle$result(x$k)) cat("\n") cat(mstyle$text("Number of clusters: ")) cat(mstyle$result(x$n)) cat("\n") cat(mstyle$text("Outcomes per cluster: ")) if (all(x$tcl[1] == x$tcl)) { cat(mstyle$result(x$tcl[1])) } else { cat(mstyle$result(paste0(min(x$tcl), "-", max(x$tcl), " (mean: ", .fcf(mean(x$tcl), digits=2), ", median: ", round(median(x$tcl), digits=2), ")"))) } cat("\n\n") if (x$p > 1L && !is.na(x$QM)) { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) cat("\n") cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n\n") } res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) rownames(res.table) <- rownames(x$beta) signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } if (x$int.only) res.table <- res.table[1,] cat(mstyle$section("Model Results:")) cat("\n\n") if (x$int.only) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/to.table.r0000644000176200001440000011530514043614106014007 0ustar liggesusersto.table <- function(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, ni, data, slab, subset, add=1/2, to="none", drop00=FALSE, rows, cols) { mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications if (missing(measure)) stop(mstyle$stop("Must specify an effect size or outcome measure via the 'measure' argument.")) if (!is.character(measure)) stop(mstyle$stop("The 'measure' argument must be a character string.")) if (!is.element(measure, c("RR","OR","PETO","RD","AS","PHI","YUQ","YUY","RTET", ### 2x2 table measures "PBIT","OR2D","OR2DN","OR2DL", ### - transformations to SMD "MPRD","MPRR","MPOR","MPORC","MPPETO", ### - measures for matched pairs data "IRR","IRD","IRSD", ### two-group person-time data measures "MD","SMD","SMDH","ROM", ### two-group mean/SD measures "CVR","VR", ### coefficient of variation ratio, variability ratio "RPB","RBIS","D2OR","D2ORN","D2ORL", ### - transformations to r_PB, r_BIS, and log(OR) "COR","UCOR","ZCOR", ### correlations (raw and r-to-z transformed) "PCOR","ZPCOR","SPCOR", ### partial and semi-partial correlations "PR","PLN","PLO","PAS","PFT", ### single proportions (and transformations thereof) "IR","IRLN","IRS","IRFT", ### single-group person-time data (and transformations thereof) "MN","MNLN","CVLN","SDLN","SMD1", ### mean, log(mean), log(CV), log(SD), single-group SMD "MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC", ### raw/standardized mean change, log(ROM), CVR, and VR for dependent samples "ARAW","AHW","ABT"))) ### alpha (and transformations thereof) stop(mstyle$stop("Unknown 'measure' specified.")) if (is.element(measure, c("CVR","VR","PCOR","ZPCOR","SPCOR","CVLN","SDLN","VRC"))) stop(mstyle$stop("Function not available for this outcome measure.")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!is.element(to, c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### get slab and subset arguments (will be NULL when unspecified) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ######################################################################### ######################################################################### ######################################################################### if (is.element(measure, c("RR","OR","RD","AS","PETO","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL","MPRD","MPRR","MPOR","MPORC","MPPETO"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci k <- length(ai) ### number of outcomes before subsetting if (length(ai)==0L || length(bi)==0L || length(ci)==0L || length(di)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(ai),length(bi),length(ci),length(di)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } n1i <- ai + bi n2i <- ci + di if (any(c(ai > n1i, ci > n2i), na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(ai, bi, ci, di) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(n1i < 0, n2i < 0), na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 0.")) ni.u <- ai + bi + ci + di ### unadjusted total sample sizes ### if drop00=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } if (to == "all") { ### always add to all cells in all studies ai <- ai + add ci <- ci + add bi <- bi + add di <- di + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(ai == 0L | ci == 0L | bi == 0L | di == 0L) id0[is.na(id0)] <- FALSE ai[id0] <- ai[id0] + add ci[id0] <- ci[id0] + add bi[id0] <- bi[id0] + add di[id0] <- di[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(ai == 0L | ci == 0L | bi == 0L | di == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { ai <- ai + add ci <- ci + add bi <- bi + add di <- di + add } } } ######################################################################### if (is.element(measure, c("IRR","IRD","IRSD"))) { mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) k <- length(x1i) ### number of outcomes before subsetting if (length(x1i)==0L || length(x2i)==0L || length(t1i)==0L || length(t2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(x1i),length(x2i),length(t1i),length(t2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] } if (any(c(x1i, x2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(t1i, t2i) <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- t1i + t2i ### unadjusted total sample sizes ### if drop00=TRUE, set counts to NA for studies that have no events in both arms if (drop00) { id00 <- c(x1i == 0L & x2i == 0L) id00[is.na(id00)] <- FALSE x1i[id00] <- NA x2i[id00] <- NA } if (to == "all") { ### always add to all cells in all studies x1i <- x1i + add x2i <- x2i + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) id0[is.na(id0)] <- FALSE x1i[id0] <- x1i[id0] + add x2i[id0] <- x2i[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { x1i <- x1i + add x2i <- x2i + add } } } ######################################################################### if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) k <- length(n1i) ### number of outcomes before subsetting if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(n1i)==0L || length(n2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(n1i),length(n2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] n1i <- n1i[subset] n2i <- n2i[subset] } if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(c(n1i, n2i) < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- n1i + n2i ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("COR","UCOR","ZCOR"))) { mf.ri <- mf[[match("ri", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ri) ### number of outcomes before subsetting if (length(ri)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(ri) != length(ni)) stop(mstyle$stop("Supplied data vectors are not of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ri <- ri[subset] ni <- ni[subset] } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) if (is.null(mi)) mi <- ni - xi k <- length(xi) ### number of outcomes before subsetting if (length(xi)==0L || length(mi)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(mi)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] mi <- mi[subset] } ni <- xi + mi if (any(xi > ni, na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(xi, mi) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes if (to == "all") { ### always add to all cells in all studies xi <- xi + add mi <- mi + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(xi == 0L | mi == 0L) id0[is.na(id0)] <- FALSE xi[id0] <- xi[id0] + add mi[id0] <- mi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(xi == 0L | mi == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { xi <- xi + add mi <- mi + add } } } ######################################################################### if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.ti <- mf[[match("ti", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) k <- length(xi) ### number of outcomes before subsetting if (length(xi)==0L || length(ti)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(ti)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] ti <- ti[subset] } if (any(xi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ti <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- ti ### unadjusted total sample sizes if (to == "all") { ### always add to all cells in all studies xi <- xi + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(xi == 0L) id0[is.na(id0)] <- FALSE xi[id0] <- xi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(xi == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { xi <- xi + add } } } ######################################################################### if (is.element(measure, c("MN","MNLN","SMD1"))) { mf.mi <- mf[[match("mi", names(mf))]] mf.sdi <- mf[[match("sdi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) sdi <- eval(mf.sdi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ni) ### number of outcomes before subsetting if (length(mi)==0L || length(sdi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(mi),length(sdi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) mi <- mi[subset] sdi <- sdi[subset] ni <- ni[subset] } if (any(sdi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) if (is.element(measure, c("MNLN","CVLN")) && any(mi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more means are negative.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] ### for SMCR, do not need to supply this mf.ni <- mf[[match("ni", names(mf))]] mf.ri <- mf[[match("ri", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) k <- length(m1i) ### number of outcomes before subsetting if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } else { if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] ni <- ni[subset] ri <- ri[subset] } if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } else { if (any(sd1i < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("ARAW","AHW","ABT"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ai) ### number of outcomes before subsetting if (length(ai)==0L || length(mi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(ai),length(mi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] mi <- mi[subset] ni <- ni[subset] } if (any(ai > 1, na.rm=TRUE)) stop(mstyle$stop("One or more alpha values are > 1.")) if (any(mi < 2, na.rm=TRUE)) stop(mstyle$stop("One or more mi values are < 2.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### ######################################################################### ######################################################################### ### generate study labels if none are specified if (is.null(slab)) { slab <- seq_len(k) } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) } ### if a subset of studies is specified if (!is.null(subset)) slab <- slab[subset] ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ######################################################################### ######################################################################### ######################################################################### if (is.element(measure, c("RR","OR","RD","AS","PETO","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp1", "Grp2") } else { if (length(rows) != 2L) stop(mstyle$stop("Group names not of length 2.")) } if (missing(cols)) { cols <- c("Out1", "Out2") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(2,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- rbind(c(ai[i],bi[i]), c(ci[i],di[i])) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("MPRD","MPRR","MPOR"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Time1", "Time2") } else { if (length(rows) != 2L) stop(mstyle$stop("Time names not of length 2.")) } if (missing(cols)) { cols <- c("Out1", "Out2") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(2,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- rbind(c(ai[i]+bi[i],ci[i]+di[i]), c(ai[i]+ci[i],bi[i]+di[i])) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("MPORC","MPPETO"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Time1.Out1", "Time1.Out2") } else { if (length(rows) != 2L) stop(mstyle$stop("Time1 names not of length 2.")) } if (missing(cols)) { cols <- c("Time2.Out1", "Time2.Out2") } else { if (length(cols) != 2L) stop(mstyle$stop("Time2 names not of length 2.")) } dat <- array(NA, dim=c(2,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- rbind(c(ai[i],bi[i]), c(ci[i],di[i])) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("IRR","IRD","IRSD"))) { ### check for NAs in table data and act accordingly has.na <- is.na(x1i) | is.na(x2i) | is.na(t1i) | is.na(t2i) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { x1i <- x1i[not.na] x2i <- x2i[not.na] t1i <- t1i[not.na] t2i <- t2i[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(x1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp1", "Grp2") } else { if (length(rows) != 2L) stop(mstyle$stop("Group names not of length 2.")) } if (missing(cols)) { cols <- c("Events", "Person-Time") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(2,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- rbind(c(x1i[i],t1i[i]), c(x2i[i],t2i[i])) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL"))) { ### check for NAs in table data and act accordingly has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(sd2i) | is.na(n1i) | is.na(n2i) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { m1i <- m1i[not.na] m2i <- m2i[not.na] sd1i <- sd1i[not.na] sd2i <- sd2i[not.na] n1i <- n1i[not.na] n2i <- n2i[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(m1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp1", "Grp2") } else { if (length(rows) != 2L) stop(mstyle$stop("Group names not of length 2.")) } if (missing(cols)) { cols <- c("Mean", "SD", "n") } else { if (length(cols) != 3L) stop(mstyle$stop("Outcome names not of length 3.")) } dat <- array(NA, dim=c(2,3,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- rbind(c(m1i[i],sd1i[i],n1i[i]), c(m2i[i],sd2i[i],n2i[i])) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("COR","UCOR","ZCOR"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ri) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ri <- ri[not.na] ni <- ni[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ri) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (missing(cols)) { cols <- c("r", "n") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(1,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(ri[i],ni[i]) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(xi) | is.na(mi) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { xi <- xi[not.na] mi <- mi[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(xi) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (missing(cols)) { cols <- c("Out1", "Out2") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(1,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(xi[i],mi[i]) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(xi) | is.na(ti) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { xi <- xi[not.na] ti <- ti[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(xi) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (missing(cols)) { cols <- c("Events", "Person-Time") } else { if (length(cols) != 2L) stop(mstyle$stop("Outcome names not of length 2.")) } dat <- array(NA, dim=c(1,2,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(xi[i],ti[i]) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("MN","MNLN","SMD1"))) { ### check for NAs in table data and act accordingly has.na <- is.na(mi) | is.na(sdi) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { mi <- mi[not.na] sdi <- sdi[not.na] ni <- ni[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ni) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (missing(cols)) { cols <- c("Mean", "SD", "n") } else { if (length(cols) != 3L) stop(mstyle$stop("Outcome names not of length 3.")) } dat <- array(NA, dim=c(1,3,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(mi[i],sdi[i],ni[i]) dat[,,i] <- tab.i } } ######################################################################### if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC"))) { ### check for NAs in table data and act accordingly if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(sd2i) | is.na(ni) | is.na(ri) } else { has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(ni) | is.na(ri) } if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { m1i <- m1i[not.na] m2i <- m2i[not.na] sd1i <- sd1i[not.na] if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) sd2i <- sd2i[not.na] ni <- ni[not.na] ri <- ri[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(m1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { if (missing(cols)) { cols <- c("Mean1", "Mean2", "SD1", "SD2", "n", "r") } else { if (length(cols) != 6L) stop(mstyle$stop("Outcome names not of length 6.")) } } else { if (missing(cols)) { cols <- c("Mean1", "Mean2", "SD1", "n", "r") } else { if (length(cols) != 5L) stop(mstyle$stop("Outcome names not of length 5.")) } } if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { dat <- array(NA, dim=c(1,6,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(m1i[i],m2i[i],sd1i[i],sd2i[i],ni[i],ri[i]) dat[,,i] <- tab.i } } else { dat <- array(NA, dim=c(1,5,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(m1i[i],m2i[i],sd1i[i],ni[i],ri[i]) dat[,,i] <- tab.i } } } ######################################################################### if (is.element(measure, c("ARAW","AHW","ABT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(mi) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] mi <- mi[not.na] ni <- ni[not.na] slab <- slab[not.na] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### row/group and column/outcome names if (missing(rows)) { rows <- c("Grp") } else { if (length(rows) != 1L) stop(mstyle$stop("Group names not of length 1.")) } if (missing(cols)) { cols <- c("alpha", "m", "n") } else { if (length(cols) != 3L) stop(mstyle$stop("Outcome names not of length 3.")) } dat <- array(NA, dim=c(1,3,k), dimnames=list(rows, cols, slab)) for (i in seq_len(k)) { tab.i <- c(ai[i],mi[i],ni[i]) dat[,,i] <- tab.i } } ######################################################################### return(dat) } metafor/R/ranktest.r0000644000176200001440000000006613457322061014133 0ustar liggesusersranktest <- function(x, ...) UseMethod("ranktest") metafor/R/fitstats.rma.r0000644000176200001440000000264213770363510014723 0ustar liggesusersfitstats.rma <- function(object, ..., REML) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(REML)) { if (object$method == "REML") { REML <- TRUE } else { REML <- FALSE } } if (missing(...)) { ### if there is just 'object' if (REML) { out <- cbind(object$fit.stats$REML) colnames(out) <- "REML" } else { out <- cbind(object$fit.stats$ML) colnames(out) <- "ML" } } else { ### if there is 'object' and additional objects via ... if (REML) { out <- sapply(list(object, ...), function(x) x$fit.stats$REML) } else { out <- sapply(list(object, ...), function(x) x$fit.stats$ML) } out <- data.frame(out) ### get names of objects; same idea as in stats:::AIC.default cl <- match.call() cl$REML <- NULL names(out) <- as.character(cl[-1L]) ### check that all models were fitted to the same data yis <- lapply(list(object, ...), function(x) as.vector(x$yi)) if (!all(sapply(yis[-1], function(x) identical(x, yis[[1]])))) warning(mstyle$warning("Models not all fitted to the same data."), call.=FALSE) } rownames(out) <- c("logLik:", "deviance:", "AIC:", "BIC:", "AICc:") return(out) #print(.fcf(out, object$digits[["fit"]]), quote=FALSE) #invisible(out) } metafor/R/zzz.r0000644000176200001440000000375614056237142013147 0ustar liggesusers.onAttach <- function(libname, pkgname) { ver <- "3.0-2" loadmsg <- paste0("\nLoading the 'metafor' package (version ", ver, "). For an\nintroduction to the package please type: help(metafor)\n") inst.ver <- as.numeric(strsplit(gsub("-", ".", ver, fixed=TRUE), ".", fixed=TRUE)[[1]]) # only run version check in an interactive session and if METAFOR_VERSION_CHECK is not FALSE verchk <- tolower(Sys.getenv("METAFOR_VERSION_CHECK")) if (interactive() && verchk != "false") { # pull version number from CRAN page tmp <- suppressWarnings(try(readLines("https://cran.r-project.org/web/packages/metafor/index.html"), silent=TRUE)) # or pull version number from github # tmp <- suppressWarnings(try(readLines("https://raw.githubusercontent.com/wviechtb/metafor/master/CRAN_version", n=1), silent=TRUE)) if (!inherits(tmp, "try-error")) { cran.ver <- tmp[grep("Version:", tmp, fixed=TRUE) + 1] if (!is.na(cran.ver) && length(cran.ver) != 0L) { cran.ver <- substr(cran.ver, 5, nchar(cran.ver)-5) # strip and save.ver <- cran.ver # need this below is message cran.ver <- as.numeric(strsplit(gsub("-", ".", cran.ver), ".", fixed=TRUE)[[1]]) inst.ver <- 100000 * inst.ver[1] + 1000 * inst.ver[2] + inst.ver[3] cran.ver <- 100000 * cran.ver[1] + 1000 * cran.ver[2] + cran.ver[3] if (isTRUE(inst.ver < cran.ver)) loadmsg <- paste0(loadmsg, "\nAn updated version of the package (version ", save.ver, ") is available!\nTo update to this version type: install.packages(\"metafor\")\n") } } } options("pboptions" = list( type = if (interactive()) "timer" else "none", char = "=", txt.width = 50, gui.width = 300, style = 3, initial = 0, title = "Progress Bar", label = "", nout = 100L, min_time = 2, use_lb = FALSE)) packageStartupMessage(loadmsg, domain=NULL, appendLF=TRUE) } metafor/R/vcov.rma.r0000644000176200001440000000526514036314605014040 0ustar liggesusersvcov.rma <- function(object, type="fixed", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("fixed", "obs", "fitted", "resid")) ######################################################################### if (type=="fixed") { out <- object$vb if (inherits(object, "rma.ls")) out <- list(beta = object$vb, alpha = object$va) return(out) } ######################################################################### if (type=="obs") { if (inherits(object, c("rma.uni","rma.mv"))) { out <- matrix(NA_real_, nrow=object$k.f, ncol=object$k.f) out[object$not.na, object$not.na] <- object$M rownames(out) <- colnames(out) <- object$slab if (na.act == "na.omit") out <- out[object$not.na, object$not.na] if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in data.")) return(out) } else { stop(mstyle$stop("Extraction of marginal var-cov matrix not available for objects of this class.")) } } ######################################################################### if (type=="fitted") { out <- object$X.f %*% object$vb %*% t(object$X.f) rownames(out) <- colnames(out) <- object$slab if (na.act == "na.omit") out <- out[object$not.na, object$not.na] if (na.act == "na.exclude" || na.act == "na.pass") { out[!object$not.na,] <- NA out[,!object$not.na] <- NA } return(out) } ######################################################################### if (type=="resid") { options(na.action="na.omit") H <- hatvalues(object, type="matrix") options(na.action = na.act) ImH <- diag(object$k) - H if (inherits(object, "robust.rma")) { ve <- ImH %*% tcrossprod(object$meat,ImH) } else { ve <- ImH %*% tcrossprod(object$M,ImH) } if (na.act == "na.omit") { out <- ve rownames(out) <- colnames(out) <- object$slab[object$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- matrix(NA_real_, nrow=object$k.f, ncol=object$k.f) out[object$not.na, object$not.na] <- ve rownames(out) <- colnames(out) <- object$slab } return(out) } ######################################################################### } metafor/R/print.rma.mv.r0000644000176200001440000004124414050235602014630 0ustar liggesusersprint.rma.mv <- function(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mv") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!exists(".rmspace")) cat("\n") cat(mstyle$section("Multivariate Meta-Analysis Model")) cat(mstyle$section(paste0(" (k = ", x$k, "; "))) cat(mstyle$section(paste0("method: ", x$method, ")"))) if (showfit) { cat("\n") if (x$method == "REML") { fs <- .fcf(x$fit.stats$REML, digits[["fit"]]) } else { fs <- .fcf(x$fit.stats$ML, digits[["fit"]]) } names(fs) <- c("logLik", "Deviance", "AIC", "BIC", "AICc") cat("\n") tmp <- capture.output(print(fs, quote=FALSE, print.gap=2)) .print.table(tmp, mstyle) cat("\n") } else { cat("\n\n") } sigma2 <- .fcf(x$sigma2, digits[["var"]]) tau2 <- .fcf(x$tau2, digits[["var"]]) rho <- .fcf(x$rho, digits[["var"]]) gamma2 <- .fcf(x$gamma2, digits[["var"]]) phi <- .fcf(x$phi, digits[["var"]]) sigma <- .fcf(sqrt(x$sigma2), digits[["var"]]) tau <- .fcf(sqrt(x$tau2), digits[["var"]]) gamma <- .fcf(sqrt(x$gamma2), digits[["var"]]) cat(mstyle$section("Variance Components:")) right <- TRUE if (!x$withS && !x$withG && !x$withH) { cat(mstyle$text(" none")) cat("\n\n") } else { cat("\n\n") if (x$withS) { vc <- cbind(estim=sigma2, sqrt=sigma, nlvls=x$s.nlevels, fixed=ifelse(x$vc.fix$sigma2, "yes", "no"), factor=x$s.names, R=ifelse(x$Rfix, "yes", "no")) colnames(vc) <- c("estim", "sqrt", "nlvls", "fixed", "factor", "R") if (!x$withR) vc <- vc[,-6,drop=FALSE] if (length(x$sigma2) == 1L) { rownames(vc) <- "sigma^2 " } else { rownames(vc) <- paste("sigma^2.", seq_along(x$sigma2), sep="") } tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) cat("\n") } if (x$withG) { ### note: use g.nlevels.f[1] since the number of arms is based on all data (i.e., including NAs), but use ### g.nlevels[2] since the number of studies is based on what is actually available (i.e., excluding NAs) if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { inner <- trimws(paste0(strsplit(paste0(x$formulas[[1]], collapse=""), "|", fixed=TRUE)[[1]][1], collapse="")) if (nchar(inner) > 15) inner <- paste0(substr(inner, 1, 15), "[...]", collapse="") } else { inner <- x$g.names[1] } outer <- tail(x$g.names, 1) mng <- max(nchar(c(inner, outer))) cat(mstyle$text(paste0("outer factor: ", paste0(outer, paste(rep(" ", max(0,mng-nchar(outer))), collapse=""), collapse=""), " (nlvls = ", x$g.nlevels[2], ")"))) cat("\n") if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { cat(mstyle$text(paste0("inner term: ", paste0(inner, paste(rep(" ", max(0,mng-nchar(inner))), collapse=""), collapse=""), " (nlvls = ", x$g.nlevels.f[1], ")"))) } else { cat(mstyle$text(paste0("inner factor: ", paste0(inner, paste(rep(" ", max(0,mng-nchar(inner))), collapse=""), collapse=""), " (nlvls = ", x$g.nlevels.f[1], ")"))) } cat("\n\n") if (is.element(x$struct[1], c("CS","AR","CAR","ID","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { vc <- cbind(tau2, tau, ifelse(x$vc.fix$tau2, "yes", "no")) vc <- rbind(vc, c(rho, "", ifelse(x$vc.fix$rho, "yes", "no"))) colnames(vc) <- c("estim", "sqrt", "fixed") rownames(vc) <- c("tau^2 ", "rho") if (x$struct[1] == "ID") vc <- vc[1,,drop=FALSE] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[1], c("HCS","HAR","DIAG"))) { vc <- cbind(tau2, tau, x$g.levels.k, ifelse(x$vc.fix$tau2, "yes", "no"), x$g.levels.f[[1]]) vc <- rbind(vc, c(rho, "", "", ifelse(x$vc.fix$rho, "yes", "no"), "")) colnames(vc) <- c("estim", "sqrt", "k.lvl", "fixed", "level") if (length(x$tau2) == 1L) { rownames(vc) <- c("tau^2 ", "rho") } else { rownames(vc) <- c(paste("tau^2.", seq_along(x$tau2), " ", sep=""), "rho") } if (x$struct[1] == "DIAG") vc <- vc[seq_along(tau2),,drop=FALSE] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[1], c("UN","UNR"))) { if (x$struct[1] == "UN") { vc <- cbind(tau2, tau, x$g.levels.k, ifelse(x$vc.fix$tau2, "yes", "no"), x$g.levels.f[[1]]) } else { vc <- cbind(rep(tau2, length(x$g.levels.k)), rep(tau, length(x$g.levels.k)), x$g.levels.k, ifelse(rep(x$vc.fix$tau2,length(x$g.levels.k)), "yes", "no"), x$g.levels.f[[1]]) } colnames(vc) <- c("estim", "sqrt", "k.lvl", "fixed", "level") if (length(x$g.levels.k) == 1L) { rownames(vc) <- c("tau^2") } else { rownames(vc) <- paste("tau^2.", seq_along(x$g.levels.k), " ", sep="") } tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) cat("\n") if (length(x$rho) == 1L) { G <- matrix(NA_real_, nrow=2, ncol=2) } else { G <- matrix(NA_real_, nrow=x$g.nlevels.f[1], ncol=x$g.nlevels.f[1]) } G[upper.tri(G)] <- rho G[lower.tri(G)] <- t(G)[lower.tri(G)] diag(G) <- 1 #G[upper.tri(G)] <- "" if (length(x$rho) == 1L) { G.info <- matrix(NA_real_, nrow=2, ncol=2) } else { G.info <- matrix(NA_real_, nrow=x$g.nlevels.f[1], ncol=x$g.nlevels.f[1]) } G.info[upper.tri(G.info)] <- x$g.levels.comb.k G.info[lower.tri(G.info)] <- t(G.info)[lower.tri(G.info)] G.info[upper.tri(G.info)] <- ifelse(x$vc.fix$rho, "yes", "no") diag(G.info) <- "-" vc <- cbind(G, "", G.info) colnames(vc) <- c(paste("rho.", abbreviate(x$g.levels.f[[1]]), sep=""), "", abbreviate(x$g.levels.f[[1]])) ### FIXME: x$g.levels.f[[1]] may be numeric, in which case a wrapping 'header' is not recognized rownames(vc) <- x$g.levels.f[[1]] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[1], c("GEN"))) { vc <- cbind(tau2, tau, ifelse(x$vc.fix$tau2, "yes", "no"), "") colnames(vc) <- c("estim", "sqrt", "fixed", "rho:") rownames(vc) <- x$g.names[-length(x$g.names)] G.info <- .fcf(cov2cor(x$G), digits[["var"]]) diag(G.info) <- "-" G.info[upper.tri(G.info)] <- ifelse(x$vc.fix$rho, "yes", "no") colnames(G.info) <- abbreviate(x$g.names[-length(x$g.names)]) vc <- cbind(vc, G.info) tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[1], c("GDIAG"))) { vc <- cbind(tau2, tau, ifelse(x$vc.fix$tau2, "yes", "no")) colnames(vc) <- c("estim", "sqrt", "fixed") rownames(vc) <- x$g.names[-length(x$g.names)] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } cat("\n") } if (x$withH) { ### note: use h.nlevels.f[1] since the number of arms is based on all data (i.e., including NAs), but use ### h.nlevels[2] since the number of studies is based on what is actually available (i.e., excluding NAs) if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { inner <- trimws(paste0(strsplit(paste0(x$formulas[[2]], collapse=""), "|", fixed=TRUE)[[1]][1], collapse="")) if (nchar(inner) > 15) inner <- paste0(substr(inner, 1, 15), "[...]", collapse="") } else { inner <- x$h.names[1] } outer <- tail(x$h.names, 1) mng <- max(nchar(c(inner, outer))) cat(mstyle$text(paste0("outer factor: ", paste0(outer, paste(rep(" ", max(0,mng-nchar(outer))), collapse=""), collapse=""), " (nlvls = ", x$h.nlevels[2], ")"))) cat("\n") if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { cat(mstyle$text(paste0("inner term: ", paste0(inner, paste(rep(" ", max(0,mng-nchar(inner))), collapse=""), collapse=""), " (nlvls = ", x$h.nlevels.f[1], ")"))) } else { cat(mstyle$text(paste0("inner factor: ", paste0(inner, paste(rep(" ", max(0,mng-nchar(inner))), collapse=""), collapse=""), " (nlvls = ", x$h.nlevels.f[1], ")"))) } cat("\n\n") if (is.element(x$struct[2], c("CS","AR","CAR","ID","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { vc <- cbind(gamma2, gamma, ifelse(x$vc.fix$gamma2, "yes", "no")) vc <- rbind(vc, c(phi, "", ifelse(x$vc.fix$phi, "yes", "no"))) colnames(vc) <- c("estim", "sqrt", "fixed") rownames(vc) <- c("gamma^2 ", "phi") if (x$struct[2] == "ID") vc <- vc[1,,drop=FALSE] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[2], c("HCS","HAR","DIAG"))) { vc <- cbind(gamma2, gamma, x$h.levels.k, ifelse(x$vc.fix$gamma2, "yes", "no"), x$h.levels.f[[1]]) vc <- rbind(vc, c(phi, "", "", ifelse(x$vc.fix$phi, "yes", "no"), "")) colnames(vc) <- c("estim", "sqrt", "k.lvl", "fixed", "level") if (length(x$gamma2) == 1L) { rownames(vc) <- c("gamma^2 ", "phi") } else { rownames(vc) <- c(paste("gamma^2.", seq_along(x$gamma2), " ", sep=""), "phi") } if (x$struct[2] == "DIAG") vc <- vc[seq_along(gamma2),,drop=FALSE] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[2], c("UN","UNR"))) { if (x$struct[2] == "UN") { vc <- cbind(gamma2, gamma, x$h.levels.k, ifelse(x$vc.fix$gamma2, "yes", "no"), x$h.levels.f[[1]]) } else { vc <- cbind(rep(gamma2, length(x$h.levels.k)), rep(gamma, length(x$h.levels.k)), x$h.levels.k, ifelse(rep(x$vc.fix$gamma2,length(x$h.levels.k)), "yes", "no"), x$h.levels.f[[1]]) } colnames(vc) <- c("estim", "sqrt", "k.lvl", "fixed", "level") if (length(x$h.levels.k) == 1L) { rownames(vc) <- c("gamma^2") } else { rownames(vc) <- paste("gamma^2.", seq_along(x$h.levels.k), " ", sep="") } tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) cat("\n") if (length(x$phi) == 1L) { H <- matrix(NA_real_, nrow=2, ncol=2) } else { H <- matrix(NA_real_, nrow=x$h.nlevels.f[1], ncol=x$h.nlevels.f[1]) } H[upper.tri(H)] <- phi H[lower.tri(H)] <- t(H)[lower.tri(H)] diag(H) <- 1 #H[upper.tri(H)] <- "" if (length(x$phi) == 1L) { H.info <- matrix(NA_real_, nrow=2, ncol=2) } else { H.info <- matrix(NA_real_, nrow=x$h.nlevels.f[1], ncol=x$h.nlevels.f[1]) } H.info[upper.tri(H.info)] <- x$h.levels.comb.k H.info[lower.tri(H.info)] <- t(H.info)[lower.tri(H.info)] H.info[upper.tri(H.info)] <- ifelse(x$vc.fix$phi, "yes", "no") diag(H.info) <- "-" vc <- cbind(H, "", H.info) colnames(vc) <- c(paste("phi.", abbreviate(x$h.levels.f[[1]]), sep=""), "", abbreviate(x$h.levels.f[[1]])) ### FIXME: x$h.levels.f[[1]] may be numeric, in which case a wrapping 'header' is not recognized rownames(vc) <- x$h.levels.f[[1]] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[2], c("GEN"))) { vc <- cbind(gamma2, gamma, ifelse(x$vc.fix$gamma2, "yes", "no"), "") colnames(vc) <- c("estim", "sqrt", "fixed", "phi:") rownames(vc) <- x$h.names[-length(x$h.names)] H.info <- .fcf(cov2cor(x$H), digits[["var"]]) diag(H.info) <- "-" H.info[upper.tri(H.info)] <- ifelse(x$vc.fix$phi, "yes", "no") colnames(H.info) <- abbreviate(x$h.names[-length(x$h.names)]) vc <- cbind(vc, H.info) tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } if (is.element(x$struct[2], c("GDIAG"))) { vc <- cbind(gamma2, gamma, ifelse(x$vc.fix$gamma2, "yes", "no")) colnames(vc) <- c("estim", "sqrt", "fixed") rownames(vc) <- x$h.names[-length(x$h.names)] tmp <- capture.output(print(vc, quote=FALSE, right=right, print.gap=2)) .print.table(tmp, mstyle) } cat("\n") } } if (!is.na(x$QE)) { if (x$int.only) { cat(mstyle$section("Test for Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("Q(df = ", x$k-x$p, ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$section("Test for Residual Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("QE(df = ", x$k-x$p, ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (x$p > 1L && !is.na(x$QM)) { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) cat("\n") if (x$test == "t") { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (x$test == "t") { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } rownames(res.table) <- rownames(x$beta) signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } ddd <- list(...) .chkdots(ddd, c("num")) if (.isTRUE(ddd$num)) rownames(res.table) <- paste0(1:nrow(res.table), ") ", rownames(res.table)) if (x$int.only) res.table <- res.table[1,] cat(mstyle$section("Model Results:")) cat("\n\n") if (x$int.only) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/print.rma.glmm.r0000644000176200001440000001666514046532454015165 0ustar liggesusersprint.rma.glmm <- function(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.glmm") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!exists(".rmspace")) cat("\n") if (is.element(x$method, c("FE","EE","CE"))) { if (x$int.only) { cat(mstyle$section(sapply(x$method, switch, "FE"="Fixed-Effects Model", "EE"="Equal-Effects Model", "CE"="Common-Effects Model", USE.NAMES=FALSE))) } else { cat(mstyle$section("Fixed-Effects with Moderators Model")) } cat(mstyle$section(paste0(" (k = ", x$k, ")"))) } else { if (x$int.only) { cat(mstyle$section("Random-Effects Model")) } else { cat(mstyle$section("Mixed-Effects Model")) } cat(mstyle$section(paste0(" (k = ", x$k, "; "))) cat(mstyle$section(paste0("tau^2 estimator: ", x$method, ")"))) } if (is.element(x$measure, c("OR","IRR"))) { cat("\n") if (x$model == "UM.FS") cat(mstyle$section("Model Type: Unconditional Model with Fixed Study Effects")) if (x$model == "UM.RS") cat(mstyle$section("Model Type: Unconditional Model with Random Study Effects")) if (x$model == "CM.AL") cat(mstyle$section("Model Type: Conditional Model with Approximate Likelihood")) if (x$model == "CM.EL") cat(mstyle$section("Model Type: Conditional Model with Exact Likelihood")) } if (showfit) { cat("\n") fs <- .fcf(x$fit.stats$ML, digits[["fit"]]) names(fs) <- c("logLik", "deviance", "AIC", "BIC", "AICc") cat("\n") tmp <- capture.output(print(fs, quote=FALSE, print.gap=2)) .print.table(tmp, mstyle) cat("\n") } else { cat("\n\n") } if (!is.element(x$method, c("FE","EE","CE"))) { if (x$int.only) { cat(mstyle$text("tau^2 (estimated amount of total heterogeneity): ")) cat(mstyle$result(paste0(.fcf(x$tau2, ifelse(abs(x$tau2) <= .Machine$double.eps*10,0,digits[["var"]])), ifelse(is.na(x$se.tau2), "", paste0(" (SE = " , .fcf(x$se.tau2, digits[["sevar"]]), ")"))))) cat("\n") cat(mstyle$text("tau (square root of estimated tau^2 value): ")) cat(mstyle$result(paste0(ifelse(x$tau2>=0, .fcf(sqrt(x$tau2), ifelse(x$tau2 <= .Machine$double.eps*10,0,digits[["var"]])), NA)))) cat("\n") cat(mstyle$text("I^2 (total heterogeneity / total variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, digits[["het"]])), "%"))) cat("\n") cat(mstyle$text("H^2 (total variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, digits[["het"]]))))) } else { cat(mstyle$text("tau^2 (estimated amount of residual heterogeneity): ")) cat(mstyle$result(paste0(.fcf(x$tau2, ifelse(abs(x$tau2) <= .Machine$double.eps*10,0,digits[["var"]])), ifelse(is.na(x$se.tau2), "", paste0(" (SE = " , .fcf(x$se.tau2, digits[["sevar"]]), ")"))))) cat("\n") cat(mstyle$text("tau (square root of estimated tau^2 value): ")) cat(mstyle$result(paste0(ifelse(x$tau2>=0, .fcf(sqrt(x$tau2), ifelse(x$tau2 <= .Machine$double.eps*10,0,digits[["var"]])), NA)))) cat("\n") cat(mstyle$text("I^2 (residual heterogeneity / unaccounted variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, digits[["het"]])), "%"))) cat("\n") cat(mstyle$text("H^2 (unaccounted variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, digits[["het"]]))))) } cat("\n\n") } if (!is.na(x$sigma2)) { cat(mstyle$text("sigma^2 (estimated amount of study level variability): ")) cat(mstyle$result(paste0(.fcf(x$sigma2, ifelse(abs(x$sigma2) <= .Machine$double.eps*10,0,digits[["var"]]))))) cat("\n") cat(mstyle$text("sigma (square root of estimated sigma^2 value): ")) cat(mstyle$result(paste0(ifelse(x$sigma2>=0, .fcf(sqrt(x$sigma2), ifelse(x$sigma2 <= .Machine$double.eps*10,0,digits[["var"]])), NA)))) cat("\n\n") } if (!is.na(x$QE.Wld) || !is.na(x$QE.LRT)) { QE.Wld <- .fcf(x$QE.Wld, digits[["test"]]) QE.LRT <- .fcf(x$QE.LRT, digits[["test"]]) nchar.Wld <- nchar(QE.Wld, keepNA=FALSE) nchar.LRT <- nchar(QE.LRT, keepNA=FALSE) if (nchar.Wld > nchar.LRT) QE.LRT <- paste0(paste(rep(" ", nchar.Wld - nchar.LRT), collapse=""), QE.LRT) if (nchar.LRT > nchar.Wld) QE.Wld <- paste0(paste(rep(" ", nchar.LRT - nchar.Wld), collapse=""), QE.Wld) if (x$int.only) { cat(mstyle$section("Tests for Heterogeneity:")) } else { cat(mstyle$section("Tests for Residual Heterogeneity:")) } cat("\n") cat(mstyle$result(paste0("Wld(df = ", x$QE.df, ") = ", QE.Wld, ", p-val ", .pval(x$QEp.Wld, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n") cat(mstyle$result(paste0("LRT(df = ", x$QE.df, ") = ", QE.LRT, ", p-val ", .pval(x$QEp.LRT, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n\n") } if (x$p > 1L && !is.na(x$QM)) { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) cat("\n") if (x$test == "t") { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (x$test == "t") { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } rownames(res.table) <- rownames(x$beta) signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } ddd <- list(...) .chkdots(ddd, c("num")) if (.isTRUE(ddd$num)) rownames(res.table) <- paste0(1:nrow(res.table), ") ", rownames(res.table)) if (x$int.only) res.table <- res.table[1,] cat(mstyle$section("Model Results:")) cat("\n\n") if (x$int.only) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/print.matreg.r0000644000176200001440000000330214045601362014704 0ustar liggesusersprint.matreg <- function(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="matreg") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!exists(".rmspace")) cat("\n") if (x$test == "t") { res.table <- data.frame(estimate=.fcf(c(x$tab$beta), digits[["est"]]), se=.fcf(x$tab$se, digits[["se"]]), tval=.fcf(x$tab$tval, digits[["test"]]), df=round(x$tab$df,2), pval=.pval(x$tab$pval, digits[["pval"]]), ci.lb=.fcf(x$tab$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$tab$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$tab$beta), digits[["est"]]), se=.fcf(x$tab$se, digits[["se"]]), zval=.fcf(x$tab$zval, digits[["test"]]), pval=.pval(x$tab$pval, digits[["pval"]]), ci.lb=.fcf(x$tab$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$tab$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } rownames(res.table) <- rownames(x$tab) signif <- symnum(x$tab$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) .print.table(tmp, mstyle) if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/regtest.default.r0000644000176200001440000001122314036331520015367 0ustar liggesusersregtest.default <- function(x, vi, sei, ni, subset, model="rma", predictor="sei", ret.fit=FALSE, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(subset)) subset <- NULL ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } if (missing(ni)) ni <- NULL model <- match.arg(model, c("lm", "rma")) predictor <- match.arg(predictor, c("sei", "vi", "ni", "ninv", "sqrtni", "sqrtninv")) ddd <- list(...) if (!is.null(ddd$level)) { level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) } else { level <- .05 } ######################################################################### ### check if sampling variances and/or standard errors are available if (missing(vi)) vi <- NULL if (missing(sei)) sei <- NULL if (is.null(vi)) { if (!is.null(sei)) vi <- sei^2 } if (is.null(vi)) stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) yi <- x ### check length of yi and vi if (length(yi) != length(vi)) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) ### if ni has not been specified but is an attribute of yi, get it if (is.null(ni) && !is.null(attr(yi, "ni"))) ni <- attr(yi, "ni") ### check length of yi and ni (only if ni is not NULL) ### if there is a mismatch, then ni cannot be trusted, so set it to NULL if (!is.null(ni) && length(ni) != length(yi)) ni <- NULL ### if ni is now available, add it (back) as an attribute to yi if (!is.null(ni)) attr(yi, "ni") <- ni ### if a subset of studies is specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=length(yi)) yi <- yi[subset] vi <- vi[subset] ni <- ni[subset] } ### check for NAs and act accordingly has.na <- is.na(yi) | is.na(vi) | (if (is.null(ni)) FALSE else is.na(ni)) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] ni <- ni[not.na] warning(mstyle$warning("Studies with NAs omitted from test."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } ######################################################################### if (predictor == "sei") X <- cbind(1, sei=sqrt(vi)) if (predictor == "vi") X <- cbind(1, vi=vi) if (is.element(predictor, c("ni", "ninv", "sqrtni", "sqrtninv"))) { if (is.null(ni)) { stop(mstyle$stop("Sample size information must be specified via 'ni' argument.")) } else { if (predictor == "ni") X <- cbind(1, ni=ni) if (predictor == "ninv") X <- cbind(1, ninv=1/ni) if (predictor == "sqrtni") X <- cbind(1, ni=sqrt(ni)) if (predictor == "sqrtninv") X <- cbind(1, ni=1/sqrt(ni)) } } ### check if X of full rank (if not, cannot carry out the test) tmp <- lm(yi ~ X - 1) coef.na <- is.na(coef(tmp)) if (any(coef.na)) stop(mstyle$stop("Model matrix not of full. Cannot fit model.")) if (model == "rma") { fit <- rma.uni(yi, vi, mods=X, intercept=FALSE, ...) zval <- fit$zval[2] pval <- fit$pval[2] ddf <- fit$ddf } else { yi <- c(yi) ### to remove attributes fit <- lm(yi ~ X - 1, weights=1/vi) tmp <- summary(fit) zval <- coef(tmp)[2,3] pval <- coef(tmp)[2,4] ddf <- length(yi) - 2 } ### get the 'limit estimate' if (predictor %in% c("sei", "vi", "ninv", "sqrtninv")) { if (model=="lm") { est <- coef(tmp)[1,1] ci.lb <- est - qt(level/2, df=ddf, lower.tail=FALSE) * coef(tmp)[1,2] ci.ub <- est + qt(level/2, df=ddf, lower.tail=FALSE) * coef(tmp)[1,2] } else { est <- coef(fit)[1] ci.lb <- fit$ci.lb[1] ci.ub <- fit$ci.ub[1] } } else { est <- ci.lb <- ci.ub <- NULL } res <- list(model=model, predictor=predictor, zval=zval, pval=pval, dfs=ddf, ddf=ddf, method=fit$method, digits=digits, ret.fit=ret.fit, fit=fit, est=est, ci.lb=ci.lb, ci.ub=ci.ub) class(res) <- "regtest" return(res) } metafor/R/misc.func.hidden.mv.r0000644000176200001440000014144114052500613016034 0ustar liggesusers############################################################################ ### function to test for missings in a var-cov matrix .anyNAv <- function(x) { k <- nrow(x) not.na <- not.na.diag <- !is.na(diag(x)) for (i in seq_len(k)[not.na.diag]) { not.na[i] <- !anyNA(x[i, seq_len(k)[not.na.diag]]) } return(!not.na) } ### function to test each row for any missings in the lower triangular part of a matrix #.anyNAv <- function(x) # return(sapply(seq_len(nrow(x)), FUN=function(i) anyNA(x[i,seq_len(i)]))) ### function above is faster (and does not require making a copy of the object) #.anyNAv <- function(X) { # X[upper.tri(X)] <- 0 # return(apply(is.na(X), 1, any)) #} ############################################################################ .process.G.aftersub <- function(mf.g, struct, formula, tau2, rho, isG, k, sparse, verbose) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (verbose > 1) message(mstyle$message(paste0("Processing '", paste0(formula, collapse=""), "' term (#1) ..."))) ### number of variables in model frame nvars <- ncol(mf.g) ### check that the number of variables is correct for the chosen structure if (is.element(struct, c("CS","HCS","UN","UNR","AR","HAR","CAR","ID","DIAG","PHYBM","PHYPL","PHYPD")) && nvars != 2) stop(mstyle$stop(paste0("Only a single inner variable allowed for an '~ inner | outer' term when 'struct=\"", struct, "\"'.")), call.=FALSE) ### get variables names in mf.g g.names <- names(mf.g) ### names for inner and outer factors/variables ### check that inner variable is a factor (or character variable) for structures that require this if (is.element(struct, c("CS","HCS","UN","UNR","ID","DIAG")) && !is.factor(mf.g[[1]]) && !is.character(mf.g[[1]])) stop(mstyle$stop(paste0("Inner variable in '~ inner | outer' term must be a factor or character variable when 'struct=\"", struct, "\"'.")), call.=FALSE) ### for struct="CAR", check that inner term is numeric and get the unique numeric values if (is.element(struct, c("CAR"))) { if (!is.numeric(mf.g[[1]])) stop(mstyle$stop("Inner variable in '~ inner | outer' term must be numeric for 'struct=\"CAR\"'."), call.=FALSE) g.values <- sort(unique(mf.g[[1]])) } else { g.values <- NULL } ### turn each variable in mf.g into a factor (not for SP/PHY structures or GEN) ### if a variable was a factor to begin with, this drops any unused levels, but order of existing levels is preserved if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { mf.g <- data.frame(mf.g[-nvars], outer=factor(mf.g[[nvars]])) } else { mf.g <- data.frame(inner=factor(mf.g[[1]]), outer=factor(mf.g[[2]])) } ### check if there are any NAs anywhere in mf.g if (anyNA(mf.g)) stop(mstyle$stop("No NAs allowed in variables specified via the 'random' argument."), call.=FALSE) ### get number of levels of each variable in mf.g (vector with two values, for the inner and outer factor) #g.nlevels <- c(nlevels(mf.g[[1]]), nlevels(mf.g[[2]])) ### works only for factors if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { g.nlevels <- c(length(unique(apply(mf.g[-nvars], 1, paste, collapse=" + "))), length(unique(mf.g[[nvars]]))) } else { g.nlevels <- c(length(unique(mf.g[[1]])), length(unique(mf.g[[2]]))) } ### get levels of each variable in mf.g #g.levels <- list(levels(mf.g[[1]]), levels(mf.g[[2]])) ### works only for factors if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { g.levels <- list(sort(unique(apply(mf.g[-nvars], 1, paste, collapse=" + "))), sort(unique((mf.g[[nvars]])))) } else { #g.levels <- list(sort(unique(as.character(mf.g[[1]]))), sort(unique(as.character(mf.g[[2]])))) g.levels <- list(as.character(sort(unique(mf.g[[1]]))), as.character(sort(unique(mf.g[[2]])))) } ### determine appropriate number of tau2 and rho values (note: this is done *after* subsetting) ### note: if g.nlevels[1] is 1, then technically there is no correlation, but we still need one ### rho for the optimization function (this rho is fixed to 0 further in the rma.mv() function) if (is.element(struct, c("CS","ID","AR","CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { tau2s <- 1 rhos <- 1 } if (is.element(struct, c("HCS","DIAG","HAR"))) { tau2s <- g.nlevels[1] rhos <- 1 } if (struct == "UN") { tau2s <- g.nlevels[1] rhos <- ifelse(g.nlevels[1] > 1, g.nlevels[1]*(g.nlevels[1]-1)/2, 1) } if (struct == "UNR") { tau2s <- 1 rhos <- ifelse(g.nlevels[1] > 1, g.nlevels[1]*(g.nlevels[1]-1)/2, 1) } if (struct == "GEN") { p <- nvars - 1 tau2s <- p rhos <- ifelse(p > 1, p*(p-1)/2, 1) } if (struct == "GDIAG") { p <- nvars - 1 tau2s <- p rhos <- 1 } ### set default value(s) for tau2 if it is unspecified if (is.null(tau2)) tau2 <- rep(NA_real_, tau2s) ### set default value(s) for rho argument if it is unspecified if (is.null(rho)) rho <- rep(NA_real_, rhos) ### allow quickly setting all tau2 values to a fixed value if (length(tau2) == 1L) tau2 <- rep(tau2, tau2s) ### allow quickly setting all rho values to a fixed value if (length(rho) == 1L) rho <- rep(rho, rhos) ### check if tau2 and rho are of correct length if (length(tau2) != tau2s) stop(mstyle$stop(paste0("Length of ", ifelse(isG, 'tau2', 'gamma2'), " argument (", length(tau2), ") does not match actual number of variance components (", tau2s, ").")), call.=FALSE) if (length(rho) != rhos) stop(mstyle$stop(paste0("Length of ", ifelse(isG, 'rho', 'phi'), " argument (", length(rho), ") does not match actual number of correlations (", rhos, ").")), call.=FALSE) ### checks on any fixed values of tau2 and rho arguments if (any(tau2 < 0, na.rm=TRUE)) stop(mstyle$stop(paste0("Specified value(s) of ", ifelse(isG, 'tau2', 'gamma2'), " must be >= 0.")), call.=FALSE) if (is.element(struct, c("CAR")) && any(rho > 1 | rho < 0, na.rm=TRUE)) stop(mstyle$stop(paste0("Specified value(s) of ", ifelse(isG, 'rho', 'phi'), " must be in [0,1].")), call.=FALSE) if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")) && any(rho < 0, na.rm=TRUE)) stop(mstyle$stop(paste0("Specified value(s) of ", ifelse(isG, 'rho', 'phi'), " must be >= 0.")), call.=FALSE) if (!is.element(struct, c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD")) && any(rho > 1 | rho < -1, na.rm=TRUE)) stop(mstyle$stop(paste0("Specified value(s) of ", ifelse(isG, 'rho', 'phi'), " must be in [-1,1].")), call.=FALSE) ### create model matrix for inner and outer factors of mf.g if (is.element(struct, c("CS","HCS","UN","UNR","AR","HAR","CAR","ID","DIAG"))) { if (g.nlevels[1] == 1) { Z.G1 <- cbind(rep(1,k)) } else { if (sparse) { Z.G1 <- sparse.model.matrix(~ mf.g[[1]] - 1) } else { Z.G1 <- model.matrix(~ mf.g[[1]] - 1) } } } if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { if (sparse) { Z.G1 <- Diagonal(k) } else { Z.G1 <- diag(1, nrow=k, ncol=k) } } if (is.element(struct, c("GEN","GDIAG"))) { if (sparse) { Z.G1 <- Matrix(as.matrix(mf.g[-nvars]), sparse=TRUE) } else { Z.G1 <- as.matrix(mf.g[-nvars]) } } if (g.nlevels[2] == 1) { Z.G2 <- cbind(rep(1,k)) } else { if (sparse) { Z.G2 <- sparse.model.matrix(~ mf.g[[nvars]] - 1) } else { Z.G2 <- model.matrix(~ mf.g[[nvars]] - 1) } } attr(Z.G1, "assign") <- NULL attr(Z.G1, "contrasts") <- NULL attr(Z.G2, "assign") <- NULL attr(Z.G2, "contrasts") <- NULL return(list(mf.g=mf.g, g.names=g.names, g.nlevels=g.nlevels, g.levels=g.levels, g.values=g.values, tau2s=tau2s, rhos=rhos, tau2=tau2, rho=rho, Z.G1=Z.G1, Z.G2=Z.G2)) } ############################################################################ .process.G.afterrmna <- function(mf.g, g.nlevels, g.levels, g.values, struct, formula, tau2, rho, Z.G1, Z.G2, isG, sparse, distspec, verbose) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (verbose > 1) message(mstyle$message(paste0("Processing '", paste0(formula, collapse=""), "' term (#2) ..."))) ### number of variables in model frame nvars <- ncol(mf.g) ### copy g.nlevels and g.levels g.nlevels.f <- g.nlevels g.levels.f <- g.levels ### redo: turn each variable in mf.g into a factor (not for SP structures or GEN) ### (reevaluates the levels present, but order of existing levels is preserved) if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { mf.g <- data.frame(mf.g[-nvars], outer=factor(mf.g[[nvars]])) } else { mf.g <- data.frame(inner=factor(mf.g[[1]]), outer=factor(mf.g[[2]])) } ### redo: get number of levels of each variable in mf.g (vector with two values, for the inner and outer factor) #g.nlevels <- c(nlevels(mf.g[[1]]), nlevels(mf.g[[2]])) ### works only for factors if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { g.nlevels <- c(length(unique(apply(mf.g[-nvars], 1, paste, collapse=" + "))), length(unique(mf.g[[nvars]]))) } else { g.nlevels <- c(length(unique(mf.g[[1]])), length(unique(mf.g[[2]]))) } ### redo: get levels of each variable in mf.g #g.levels <- list(levels(mf.g[[1]]), levels(mf.g[[2]])) ### works only for factors if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { g.levels <- list(sort(unique(apply(mf.g[-nvars], 1, paste, collapse=" + "))), sort(unique((mf.g[[nvars]])))) } else { #g.levels <- list(sort(unique(as.character(mf.g[[1]]))), sort(unique(as.character(mf.g[[2]])))) g.levels <- list(as.character(sort(unique(mf.g[[1]]))), as.character(sort(unique(mf.g[[2]])))) } ### determine which levels of the inner factor were removed g.levels.r <- !is.element(g.levels.f[[1]], g.levels[[1]]) ### warn if any levels were removed (not for "AR","CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","GEN","GDIAG") if (any(g.levels.r) && !is.element(struct, c("AR","CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","GEN","GDIAG"))) warning(mstyle$warning(paste0("One or more levels of inner factor (i.e., ", paste(g.levels.f[[1]][g.levels.r], collapse=", "), ") removed due to NAs.")), call.=FALSE) ### for "ID", "DIAG", and "GDIAG", fix rho to 0 if (is.element(struct, c("ID","DIAG","GDIAG"))) rho <- 0 ### if there is only a single arm for "CS","HCS","AR","HAR","CAR" (either to begin with or after removing NAs), then fix rho to 0 if (g.nlevels[1] == 1 && is.element(struct, c("CS","HCS","AR","HAR","CAR")) && is.na(rho)) { rho <- 0 warning(mstyle$warning(paste0("Inner factor has only a single level, so fixed value of ", ifelse(isG, 'rho', 'phi'), " to 0.")), call.=FALSE) } ### if there is only a single arm for SP/PHY structures or GEN/GDIAG (either to begin with or after removing NAs), cannot fit model if (g.nlevels[1] == 1 && is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) stop(mstyle$stop("Cannot fit model since inner term only has a single level."), call.=FALSE) ### k per level of the inner factor if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { g.levels.k <- table(factor(apply(mf.g[-nvars], 1, paste, collapse=" + "), levels=g.levels.f[[1]])) } else { g.levels.k <- table(factor(mf.g[[1]], levels=g.levels.f[[1]])) } ### for "HCS","UN","DIAG","HAR": if a particular level of the inner factor only occurs once, then set corresponding tau2 value to 0 (if not already fixed) ### note: no longer done; variance component should still be (weakly) identifiable #if (is.element(struct, c("HCS","UN","DIAG","HAR"))) { # if (any(is.na(tau2) & g.levels.k == 1)) { # tau2[is.na(tau2) & g.levels.k == 1] <- 0 # warning(mstyle$warning("Inner factor has k=1 for one or more levels. Corresponding 'tau2' value(s) fixed to 0."), call.=FALSE) # } #} ### check if each study has only a single arm (could be different arms!) ### for "CS","HCS","AR","HAR","CAR" must then fix rho to 0 (if not already fixed) ### for SP/PHY structures cannot fit model; for GEN rho may still be (weakly) identifiable if (g.nlevels[2] == nrow(mf.g)) { if (is.element(struct, c("CS","HCS","AR","HAR","CAR")) && is.na(rho)) { rho <- 0 warning(mstyle$warning(paste0("Each level of the outer factor contains only a single level of the inner factor, so fixed value of ", ifelse(isG, 'rho', 'phi'), " to 0.")), call.=FALSE) } if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) stop(mstyle$stop("Cannot fit model since each level of the outer factor contains only a single level of the inner term."), call.=FALSE) } g.levels.comb.k <- NULL if (!is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { ### create matrix where each row (= study) indicates how often each arm occurred ### then turn this into a list (with each element equal to a row (= study)) g.levels.comb.k <- crossprod(Z.G2, Z.G1) g.levels.comb.k <- split(g.levels.comb.k, seq_len(nrow(g.levels.comb.k))) ### create matrix for each element (= study) that indicates which combinations occurred ### sum up all matrices (numbers indicate in how many studies each combination occurred) ### take upper triangle part that corresponds to the arm combinations (in order of rho) g.levels.comb.k <- lapply(g.levels.comb.k, function(x) outer(x,x, FUN="&")) g.levels.comb.k <- Reduce("+", g.levels.comb.k) g.levels.comb.k <- g.levels.comb.k[upper.tri(g.levels.comb.k)] ### UN/UNR: if a particular combination of arms never occurs in any of the studies, then must fix the corresponding rho to 0 (if not already fixed) ### this also takes care of the case where each study has only a single arm if (is.element(struct, c("UN","UNR")) && any(g.levels.comb.k == 0 & is.na(rho))) { rho[g.levels.comb.k == 0] <- 0 warning(mstyle$warning(paste0("Some combinations of the levels of the inner factor never occurred. Corresponding ", ifelse(isG, 'rho', 'phi'), " value(s) fixed to 0.")), call.=FALSE) } ### if there was only a single arm for "UN" or "UNR" to begin with, then fix rho to 0 ### (technically there is then no rho at all to begin with, but rhos was still set to 1 earlier for the optimization routine) ### (if there is a single arm after removing NAs, then this is dealt with below by setting tau2 and rho values to 0) if (is.element(struct, c("UN","UNR")) && g.nlevels.f[1] == 1 && is.na(rho)) { rho <- 0 warning(mstyle$warning(paste0("Inner factor has only a single level, so fixed value of ", ifelse(isG, 'rho', 'phi'), " to 0.")), call.=FALSE) } } ### construct G matrix for the various structures if (struct == "CS") { G <- matrix(rho*tau2, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- tau2 } if (struct == "HCS") { G <- matrix(rho, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- 1 G <- diag(sqrt(tau2), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) %*% G %*% diag(sqrt(tau2), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- tau2 } if (is.element(struct, c("UN","GEN"))) { G <- .con.vcov.UN(tau2, rho) } if (struct == "UNR") { G <- .con.vcov.UNR(tau2, rho) } if (is.element(struct, c("GDIAG"))) { G <- diag(tau2, nrow=length(tau2), ncol=length(tau2)) } if (is.element(struct, c("ID","DIAG"))) { G <- diag(tau2, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) } if (struct == "AR") { if (is.na(rho)) { G <- matrix(NA_real_, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) } else { ### is g.nlevels.f[1] == 1 even possible here? if (g.nlevels.f[1] > 1) { G <- toeplitz(ARMAacf(ar=rho, lag.max=g.nlevels.f[1]-1)) } else { G <- diag(1) } } G <- diag(sqrt(rep(tau2, g.nlevels.f[1])), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) %*% G %*% diag(sqrt(rep(tau2, g.nlevels.f[1])), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- tau2 } if (struct == "HAR") { if (is.na(rho)) { G <- matrix(NA_real_, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) } else { ### is g.nlevels.f[1] == 1 even possible here? if (g.nlevels.f[1] > 1) { G <- toeplitz(ARMAacf(ar=rho, lag.max=g.nlevels.f[1]-1)) } else { G <- diag(1) } } G <- diag(sqrt(tau2), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) %*% G %*% diag(sqrt(tau2), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- tau2 } if (struct == "CAR") { if (is.na(rho)) { G <- matrix(NA_real_, nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) } else { ### is g.nlevels.f[1] == 1 even possible here? if (g.nlevels.f[1] > 1) { G <- outer(g.values, g.values, function(x,y) rho^(abs(x-y))) } else { G <- diag(1) } } G <- diag(sqrt(rep(tau2, g.nlevels.f[1])), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) %*% G %*% diag(sqrt(rep(tau2, g.nlevels.f[1])), nrow=g.nlevels.f[1], ncol=g.nlevels.f[1]) diag(G) <- tau2 } if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { ### remove the '| outer' part from the formula and add '- 1' formula <- as.formula(paste0(strsplit(paste0(formula, collapse=""), "|", fixed=TRUE)[[1]][1], "- 1", collapse="")) ### create distance matrix if (is.matrix(distspec)) { if (anyNA(distspec)) stop(mstyle$stop("No missing values allowed in matrices specified via 'dist'.")) if (!.is.square(distspec)) stop(mstyle$stop("Distance matrices specified via 'dist' must be square matrices.")) if (!isSymmetric(unname(distspec))) stop(mstyle$stop("Distance matrices specified via 'dist' must be symmetric matrices.")) if (is.null(rownames(distspec))) rownames(distspec) <- colnames(distspec) if (is.null(colnames(distspec))) colnames(distspec) <- rownames(distspec) if (length(colnames(distspec)) != length(unique(colnames(distspec)))) stop(mstyle$stop("Distance matrices specified via 'dist' must have unique dimension names.")) if (any(!is.element(as.character(mf.g[[1]]), colnames(distspec)))) stop(mstyle$stop(paste0("There are levels in '", colnames(mf.g)[1], "' for which there are no matching rows/columns in the corresponding 'dist' matrix."))) if (is.element(struct, c("PHYBM","PHYPL","PHYPD")) && !all.equal(min(distspec), 0)) warning(mstyle$warning("Minimum value in the distance matrix is not 0."), call.=FALSE) if (is.element(struct, c("PHYBM","PHYPL","PHYPD")) && !all.equal(max(distspec), 2)) warning(mstyle$warning("Maximum value in the distance matrix is not 2."), call.=FALSE) Dmat <- distspec[as.character(mf.g[[1]]), as.character(mf.g[[1]])] } else { if (is.element(struct, c("PHYBM","PHYPL","PHYPD"))) stop(mstyle$stop("Must supply distance matrix via 'dist' for phylogenetic correlation structures.")) Cmat <- model.matrix(formula, data=mf.g[-nvars]) if (is.function(distspec)) { Dmat <- distspec(Cmat) } else { if (is.element(distspec, c("euclidean", "maximum", "manhattan"))) Dmat <- as.matrix(dist(Cmat, method=distspec)) if (distspec == "gcd") Dmat <- sp::spDists(Cmat, longlat=TRUE) } } if (sparse) Dmat <- Matrix(Dmat, sparse=TRUE) } else { Dmat <- NULL } if (struct == "SPEXP") { Rmat <- exp(-Dmat/rho) G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "SPGAU") { Rmat <- exp(-Dmat^2/rho^2) G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "SPLIN") { Rmat <- (1 - Dmat/rho) * I(Dmat < rho) G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "SPRAT") { Rmat <- 1 - (Dmat/rho)^2 / (1 + (Dmat/rho)^2) G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "SPSPH") { Rmat <- (1 - 3/2*Dmat/rho + 1/2*(Dmat/rho)^3) * I(Dmat < rho) G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "PHYBM") { rho <- max(Dmat) Rmat <- 1 - Dmat/rho G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "PHYPL") { Rmat <- rho * (1 - Dmat/max(Dmat)) diag(Rmat) <- 1 Rmat[Dmat == 0] <- 1 G <- tau2 * Rmat * tcrossprod(Z.G2) } if (struct == "PHYPD") { Rmat <- 1 - Dmat/max(Dmat) G <- tau2 * Rmat^rho * tcrossprod(Z.G2) } ### for spatial and phylogeny structures, compute a much more sensible initial value for rho if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { if (struct == "PHYBM") rho.init <- max(Dmat) if (struct == "PHYPL") rho.init <- 0.5 if (struct == "PHYPD") rho.init <- 1 if (!is.element(struct, c("PHYBM","PHYPL","PHYPD"))) rho.init <- unname(suppressMessages(quantile(Dmat[upper.tri(Dmat)], 0.25))) # suppressMessages() to avoid '[ ] : .M.sub.i.logical() maybe inefficient' messages when sparse=TRUE } else { rho.init <- NULL } ### for "CS","AR","CAR","ID" set tau2 value to 0 for any levels that were removed if (any(g.levels.r) && is.element(struct, c("CS","AR","CAR","ID"))) { G[g.levels.r,] <- 0 G[,g.levels.r] <- 0 } ### for "HCS","HAR","DIAG" set tau2 value(s) to 0 for any levels that were removed if (any(g.levels.r) && is.element(struct, c("HCS","HAR","DIAG"))) { G[g.levels.r,] <- 0 G[,g.levels.r] <- 0 tau2[g.levels.r] <- 0 warning(mstyle$warning(paste0("Fixed ", ifelse(isG, 'tau2', 'gamma2'), " to 0 for removed level(s).")), call.=FALSE) } ### for "UN", set tau2 value(s) and corresponding rho(s) to 0 for any levels that were removed if (any(g.levels.r) && struct == "UN") { G[g.levels.r,] <- 0 G[,g.levels.r] <- 0 tau2[g.levels.r] <- 0 rho <- G[upper.tri(G)] warning(mstyle$warning(paste0("Fixed ", ifelse(isG, 'tau2', 'gamma2'), " and corresponding ", ifelse(isG, 'rho', 'phi'), " value(s) to 0 for removed level(s).")), call.=FALSE) } ### for "UNR", set rho(s) to 0 corresponding to any levels that were removed if (any(g.levels.r) && struct == "UNR") { G[g.levels.r,] <- 0 G[,g.levels.r] <- 0 diag(G) <- tau2 ### don't really need this rho <- G[upper.tri(G)] warning(mstyle$warning(paste0("Fixed ", ifelse(isG, 'rho', 'phi'), " value(s) to 0 for removed level(s).")), call.=FALSE) } ### special handling for the bivariate model: ### if tau2 (for "CS","AR","CAR","UNR") or either tau2.1 or tau2.2 (for "HCS","UN","HAR") is fixed to 0, then rho must be fixed to 0 if (g.nlevels.f[1] == 2) { if (is.element(struct, c("CS","AR","CAR","UNR")) && !is.na(tau2) && tau2 == 0) rho <- 0 if (is.element(struct, c("HCS","UN","HAR")) && ((!is.na(tau2[1]) && tau2[1] == 0) || (!is.na(tau2[2]) && tau2[2] == 0))) rho <- 0 } return(list(mf.g=mf.g, g.nlevels=g.nlevels, g.nlevels.f=g.nlevels.f, g.levels=g.levels, g.levels.f=g.levels.f, g.levels.r=g.levels.r, g.levels.k=g.levels.k, g.levels.comb.k=g.levels.comb.k, tau2=tau2, rho=rho, G=G, Dmat=Dmat, rho.init=rho.init)) } ############################################################################ ### function to construct var-cov matrix for "UN" and "GEN" structures given vector of variances and correlations .con.vcov.UN <- function(vars, cors) { dims <- length(vars) G <- matrix(1, nrow=dims, ncol=dims) G[upper.tri(G)] <- cors G[lower.tri(G)] <- t(G)[lower.tri(G)] H <- diag(sqrt(vars), nrow=dims, ncol=dims) return(H %*% G %*% H) } ### function to construct var-cov matrix for "UN" and "GEN" structures given vector of 'choled' variances and covariances .con.vcov.UN.chol <- function(vars, covs) { dims <- length(vars) G <- matrix(0, nrow=dims, ncol=dims) G[upper.tri(G)] <- covs diag(G) <- vars return(crossprod(G)) } ### function to construct var-cov matrix for "UNR" structure given the variance and correlations .con.vcov.UNR <- function(var, cors) { dims <- round((1 + sqrt(1 + 8*length(cors)))/2) G <- matrix(1, nrow=dims, ncol=dims) G[upper.tri(G)] <- cors G[lower.tri(G)] <- t(G)[lower.tri(G)] return(var * G) } ### function to construct var-cov matrix for "UNR" structure given the variance and vector of 'choled' correlations .con.vcov.UNR.chol <- function(var, cors) { dims <- round((1 + sqrt(1 + 8*length(cors)))/2) G <- matrix(0, nrow=dims, ncol=dims) G[upper.tri(G)] <- cors diag(G) <- 1 return(var * crossprod(G)) } ############################################################################ ### function to construct var-cov matrix (G or H) for '~ inner | outer' terms .con.E <- function(v, r, v.val, r.val, Z1, Z2, levels.r, values, Dmat, struct, cholesky, vctransf, posdefify, sparse) { ### if cholesky=TRUE, back-transformation/substitution is done below; otherwise, back-transform and replace fixed values if (!cholesky) { if (vctransf) { v <- ifelse(is.na(v.val), exp(v), v.val) ### variances are optimized in log space, so exponentiate if (struct == "CAR") r <- ifelse(is.na(r.val), plogis(r), r.val) ### CAR correlation is optimized in qlogis space, so use plogis if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) r <- ifelse(is.na(r.val), exp(r), r.val) ### spatial and phylogenetic 'correlation' parameter is optimized in log space, so exponentiate if (!is.element(struct, c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) r <- ifelse(is.na(r.val), tanh(r), r.val) ### other correlations are optimized in atanh space, so use tanh } else { ### for Hessian computation, can choose to leave as is v <- ifelse(is.na(v.val), v, v.val) r <- ifelse(is.na(r.val), r, r.val) v[v < 0] <- 0 if (struct == "CAR") { r[r < 0] <- 0 r[r > 1] <- 1 } if (is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { r[r < 0] <- 0 } if (!is.element(struct, c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { r[r < -1] <- -1 r[r > 1] <- 1 } } v <- ifelse(v <= .Machine$double.eps*10, 0, v) ### don't do this with Cholesky factorization, since values can be negative } ncol.Z1 <- ncol(Z1) if (struct == "CS") { E <- matrix(r*v, nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- v } if (struct == "HCS") { E <- matrix(r, nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- 1 E <- diag(sqrt(v), nrow=ncol.Z1, ncol=ncol.Z1) %*% E %*% diag(sqrt(v), nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- v } if (is.element(struct, c("UN","GEN"))) { if (cholesky) { E <- .con.vcov.UN.chol(v, r) v <- diag(E) ### need this, so correct values are shown when verbose=TRUE r <- cov2cor(E)[upper.tri(E)] ### need this, so correct values are shown when verbose=TRUE v[!is.na(v.val)] <- v.val[!is.na(v.val)] ### replace any fixed values r[!is.na(r.val)] <- r.val[!is.na(r.val)] ### replace any fixed values } E <- .con.vcov.UN(v, r) if (posdefify) { E <- as.matrix(nearPD(E)$mat) ### nearPD() in Matrix package v <- diag(E) ### need this, so correct values are shown when verbose=TRUE r <- cov2cor(E)[upper.tri(E)] ### need this, so correct values are shown when verbose=TRUE } } if (struct == "UNR") { if (cholesky) { E <- .con.vcov.UNR.chol(v, r) v <- diag(E)[1,1] ### need this, so correct values are shown when verbose=TRUE r <- cov2cor(E)[upper.tri(E)] ### need this, so correct values are shown when verbose=TRUE v[!is.na(v.val)] <- v.val[!is.na(v.val)] ### replace any fixed values r[!is.na(r.val)] <- r.val[!is.na(r.val)] ### replace any fixed values } E <- .con.vcov.UNR(v, r) if (posdefify) { E <- as.matrix(nearPD(E, keepDiag=TRUE)$mat) ### nearPD() in Matrix package v <- E[1,1] ### need this, so correct values are shown when verbose=TRUE r <- cov2cor(E)[upper.tri(E)] ### need this, so correct values are shown when verbose=TRUE } } if (struct == "GDIAG") { E <- diag(v, nrow=length(v), ncol=length(v)) } if (is.element(struct, c("ID","DIAG"))) E <- diag(v, nrow=ncol.Z1, ncol=ncol.Z1) if (struct == "AR") { if (ncol.Z1 > 1) { E <- toeplitz(ARMAacf(ar=r, lag.max=ncol.Z1-1)) } else { E <- diag(1) } E <- diag(sqrt(rep(v, ncol.Z1)), nrow=ncol.Z1, ncol=ncol.Z1) %*% E %*% diag(sqrt(rep(v, ncol.Z1)), nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- v } if (struct == "HAR") { if (ncol.Z1 > 1) { E <- toeplitz(ARMAacf(ar=r, lag.max=ncol.Z1-1)) } else { E <- diag(1) } E <- diag(sqrt(v), nrow=ncol.Z1, ncol=ncol.Z1) %*% E %*% diag(sqrt(v), nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- v } if (struct == "CAR") { if (ncol.Z1 > 1) { E <- outer(values, values, function(x,y) r^(abs(x-y))) } else { E <- diag(1) } E <- diag(sqrt(rep(v, ncol.Z1)), nrow=ncol.Z1, ncol=ncol.Z1) %*% E %*% diag(sqrt(rep(v, ncol.Z1)), nrow=ncol.Z1, ncol=ncol.Z1) diag(E) <- v } if (struct == "SPEXP") E <- v * exp(-Dmat/r) * tcrossprod(Z2) if (struct == "SPGAU") E <- v * exp(-Dmat^2/r^2) * tcrossprod(Z2) if (struct == "SPLIN") E <- v * ((1 - Dmat/r) * I(Dmat < r)) * tcrossprod(Z2) if (struct == "SPRAT") E <- v * (1 - (Dmat/r)^2 / (1 + (Dmat/r)^2)) * tcrossprod(Z2) if (struct == "SPSPH") E <- v * ((1 - 3/2*Dmat/r + 1/2*(Dmat/r)^3) * I(Dmat < r)) * tcrossprod(Z2) if (struct == "PHYBM") { r <- max(Dmat) E <- 1 - Dmat/r E <- v * E * tcrossprod(Z2) } if (struct == "PHYPL") { E <- r * (1 - Dmat/max(Dmat)) diag(E) <- 1 E[Dmat == 0] <- 1 E <- v * E * tcrossprod(Z2) } if (struct == "PHYPD") { E <- 1 - Dmat/max(Dmat) E <- v * E^r * tcrossprod(Z2) } ### set variance and corresponding correlation value(s) to 0 for any levels that were removed if (!is.element(struct, c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG")) && any(levels.r)) { E[levels.r,] <- 0 E[,levels.r] <- 0 } if (sparse) E <- Matrix(E, sparse=TRUE) return(list(v=v, r=r, E=E)) } ############################################################################ ### -1 times the log likelihood (regular or restricted) for rma.mv models .ll.rma.mv <- function(par, reml, Y, M, A, X.fit, k, pX, # note: X.fit due to hessian(); pX due to nlm(); M=V to begin with D.S, Z.G1, Z.G2, Z.H1, Z.H2, g.Dmat, h.Dmat, sigma2.val, tau2.val, rho.val, gamma2.val, phi.val, sigma2s, tau2s, rhos, gamma2s, phis, withS, withG, withH, struct, g.levels.r, h.levels.r, g.values, h.values, sparse, cholesky, posdefify, vctransf, verbose, digits, REMLf, dofit=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) ### only NA values in sigma2.val, tau2.val, rho.val, gamma2.val, phi.val should be estimated; otherwise, replace with fixed values if (withS) { if (vctransf) { sigma2 <- ifelse(is.na(sigma2.val), exp(par[seq_len(sigma2s)]), sigma2.val) ### sigma2 is optimized in log space, so exponentiate } else { sigma2 <- ifelse(is.na(sigma2.val), par[seq_len(sigma2s)], sigma2.val) ### for Hessian computation, can choose to leave as is sigma2[sigma2 < 0] <- 0 } #if (any(is.nan(sigma2))) # return(Inf) ### set really small sigma2 values equal to 0 (anything below .Machine$double.eps*10 is essentially 0) sigma2 <- ifelse(sigma2 <= .Machine$double.eps*10, 0, sigma2) for (j in seq_len(sigma2s)) { M <- M + sigma2[j] * D.S[[j]] } } if (withG) { resG <- .con.E(v=par[(sigma2s+1):(sigma2s+tau2s)], r=par[(sigma2s+tau2s+1):(sigma2s+tau2s+rhos)], v.val=tau2.val, r.val=rho.val, Z1=Z.G1, Z2=Z.G2, levels.r=g.levels.r, values=g.values, Dmat=g.Dmat, struct=struct[1], cholesky=cholesky[1], vctransf=vctransf, posdefify=posdefify, sparse=sparse) tau2 <- resG$v rho <- resG$r G <- resG$E M <- M + (Z.G1 %*% G %*% t(Z.G1)) * tcrossprod(Z.G2) } if (withH) { resH <- .con.E(v=par[(sigma2s+tau2s+rhos+1):(sigma2s+tau2s+rhos+gamma2s)], r=par[(sigma2s+tau2s+rhos+gamma2s+1):(sigma2s+tau2s+rhos+gamma2s+phis)], v.val=gamma2.val, r.val=phi.val, Z1=Z.H1, Z2=Z.H2, levels.r=h.levels.r, values=h.values, Dmat=h.Dmat, struct=struct[2], cholesky=cholesky[2], vctransf=vctransf, posdefify=posdefify, sparse=sparse) gamma2 <- resH$v phi <- resH$r H <- resH$E M <- M + (Z.H1 %*% H %*% t(Z.H1)) * tcrossprod(Z.H2) } ### note: if M is sparse, then using nearPD() could blow up if (posdefify) M <- as.matrix(nearPD(M)$mat) if (verbose > 1) { W <- try(chol2inv(chol(M)), silent=FALSE) } else { W <- try(suppressWarnings(chol2inv(chol(M))), silent=TRUE) } ### note: need W for REML llval computation if (inherits(W, "try-error")) { ### if M is not positive-definite, set the (restricted) log likelihood to -Inf ### this idea is based on: https://stats.stackexchange.com/q/11368/1934 (this is crude, but should ### move the parameter estimates away from values that create the non-positive-definite M matrix) if (dofit) { stop(mstyle$stop("Final variance-covariance matrix not positive definite.")) } else { llval <- -Inf } } else { if (verbose > 1) { U <- try(chol(W), silent=FALSE) } else { U <- try(suppressWarnings(chol(W)), silent=TRUE) } ### Y ~ N(Xbeta, M), so UY ~ N(UXbeta, UMU) where UMU = I ### return(U %*% M %*% U) if (inherits(U, "try-error")) { if (dofit) { stop(mstyle$stop("Cannot fit model based on estimated marginal variance-covariance matrix.")) } else { llval <- -Inf } } else { if (!dofit || is.null(A)) { sX <- U %*% X.fit sY <- U %*% Y beta <- solve(crossprod(sX), crossprod(sX, sY)) RSS <- sum(as.vector(sY - sX %*% beta)^2) if (dofit) vb <- matrix(solve(crossprod(sX)), nrow=pX, ncol=pX) } else { stXAX <- chol2inv(chol(as.matrix(t(X.fit) %*% A %*% X.fit))) #stXAX <- tcrossprod(qr.solve(sX, diag(k))) beta <- matrix(stXAX %*% crossprod(X.fit,A) %*% Y, ncol=1) RSS <- as.vector(t(Y - X.fit %*% beta) %*% W %*% (Y - X.fit %*% beta)) vb <- matrix(stXAX %*% t(X.fit) %*% A %*% M %*% A %*% X.fit %*% stXAX, nrow=pX, ncol=pX) } llvals <- c(NA_real_, NA_real_) if (dofit || !reml) llvals[1] <- -1/2 * (k) * log(2*base::pi) - 1/2 * determinant(M, logarithm=TRUE)$modulus - 1/2 * RSS if (dofit || reml) llvals[2] <- -1/2 * (k-pX) * log(2*base::pi) + ifelse(REMLf, 1/2 * determinant(crossprod(X.fit), logarithm=TRUE)$modulus, 0) + -1/2 * determinant(M, logarithm=TRUE)$modulus - 1/2 * determinant(crossprod(X.fit,W) %*% X.fit, logarithm=TRUE)$modulus - 1/2 * RSS if (dofit) { res <- list(beta=beta, vb=vb, M=M, llvals=llvals) if (withS) res$sigma2 <- sigma2 if (withG) { res$G <- G res$tau2 <- tau2 res$rho <- rho } if (withH) { res$H <- H res$gamma2 <- gamma2 res$phi <- phi } return(res) } else { llval <- ifelse(reml, llvals[2], llvals[1]) } } } if ((vctransf && verbose) || (!vctransf && (verbose > 1))) { cat(mstyle$verbose(paste0("ll = ", ifelse(is.na(llval), NA, formatC(llval, digits=digits[["fit"]], format="f", flag=" ")))), " ") if (withS) cat(mstyle$verbose(paste0("sigma2 =", paste(ifelse(is.na(sigma2), NA, formatC(sigma2, digits=digits[["var"]], format="f", flag=" ")), collapse=" "), " "))) if (withG) { cat(mstyle$verbose(paste0("tau2 =", paste(ifelse(is.na(tau2), NA, formatC(tau2, digits=digits[["var"]], format="f", flag=" ")), collapse=" "), " "))) cat(mstyle$verbose(paste0("rho =", paste(ifelse(is.na(rho), NA, formatC(rho, digits=digits[["var"]], format="f", flag=" ")), collapse=" "), " "))) } if (withH) { cat(mstyle$verbose(paste0("gamma2 =", paste(ifelse(is.na(gamma2), NA, formatC(gamma2, digits=digits[["var"]], format="f", flag=" ")), collapse=" "), " "))) cat(mstyle$verbose(paste0("phi =", paste(ifelse(is.na(phi), NA, formatC(phi, digits=digits[["var"]], format="f", flag=" ")), collapse=" "), " "))) } cat("\n") } return(-1 * c(llval)) } ############################################################################ .cooks.distance.rma.mv <- function(i, obj, parallel, svb, cluster, ids, reestimate, btt) { if (parallel == "snow") library(metafor) incl <- cluster %in% ids[i] ### note: not.na=FALSE only when there are missings in data, not when model below cannot be fitted or results in dropped coefficients if (reestimate) { ### set initial values to estimates from full model control <- obj$control control$sigma2.init <- obj$sigma2 control$tau2.init <- obj$tau2 control$rho.init <- obj$rho control$gamma2.init <- obj$gamma2 control$phi.init <- obj$phi ### fit model without data from ith cluster res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=ifelse(obj$vc.fix$sigma2, obj$sigma2, NA), tau2=ifelse(obj$vc.fix$tau2, obj$tau2, NA), rho=ifelse(obj$vc.fix$rho, obj$rho, NA), gamma2=ifelse(obj$vc.fix$gamma2, obj$gamma2, NA), phi=ifelse(obj$vc.fix$phi, obj$phi, NA), sparse=obj$sparse, dist=obj$dist, control=control, subset=!incl)), silent=TRUE) } else { ### set values of variance/correlation components to those from the 'full' model res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=obj$sigma2, tau2=obj$tau2, rho=obj$rho, gamma2=obj$gamma2, phi=obj$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control, subset=!incl)), silent=TRUE) } if (inherits(res, "try-error")) return(list(cook.d = NA)) ### removing a cluster could lead to a model coefficient becoming inestimable if (any(res$coef.na)) return(list(cook.d = NA)) ### compute dfbeta value(s) (including coefficients as specified via btt) dfb <- obj$beta[btt] - res$beta[btt] ### compute Cook's distance return(list(cook.d = crossprod(dfb,svb) %*% dfb)) } .rstudent.rma.mv <- function(i, obj, parallel, cluster, ids, reestimate) { if (parallel == "snow") library(metafor) incl <- cluster %in% ids[i] k.id <- sum(incl) if (reestimate) { ### set initial values to estimates from full model control <- obj$control control$sigma2.init <- obj$sigma2 control$tau2.init <- obj$tau2 control$rho.init <- obj$rho control$gamma2.init <- obj$gamma2 control$phi.init <- obj$phi ### fit model without data from ith cluster res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=ifelse(obj$vc.fix$sigma2, obj$sigma2, NA), tau2=ifelse(obj$vc.fix$tau2, obj$tau2, NA), rho=ifelse(obj$vc.fix$rho, obj$rho, NA), gamma2=ifelse(obj$vc.fix$gamma2, obj$gamma2, NA), phi=ifelse(obj$vc.fix$phi, obj$phi, NA), sparse=obj$sparse, dist=obj$dist, control=control, subset=!incl)), silent=TRUE) } else { ### set values of variance/correlation components to those from the 'full' model res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=obj$sigma2, tau2=obj$tau2, rho=obj$rho, gamma2=obj$gamma2, phi=obj$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control, subset=!incl)), silent=TRUE) } if (inherits(res, "try-error")) return(list(delresid = rep(NA, k.id), sedelresid = rep(NA, k.id), X2 = NA, k.id = NA, pos = which(incl))) ### removing a cluster could lead to a model coefficient becoming inestimable if (any(res$coef.na)) return(list(delresid = rep(NA, k.id), sedelresid = rep(NA, k.id), X2 = NA, k.id = NA, pos = which(incl))) ### fit model based on all data but with var/cor components fixed to those from res tmp <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=res$sigma2, tau2=res$tau2, rho=res$rho, gamma2=res$gamma2, phi=res$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control)), silent=TRUE) #tmp <- try(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=res$sigma2, tau2=res$tau2, rho=res$rho, gamma2=res$gamma2, phi=res$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control), silent=FALSE) Xi <- obj$X[incl,,drop=FALSE] delpred <- Xi %*% res$beta vdelpred <- Xi %*% res$vb %*% t(Xi) delresid <- c(obj$yi[incl] - delpred) sedelresid <- c(sqrt(diag(tmp$M[incl,incl,drop=FALSE] + vdelpred))) sve <- try(chol2inv(chol(tmp$M[incl,incl,drop=FALSE] + vdelpred)), silent=TRUE) #sve <- try(solve(tmp$M[incl,incl,drop=FALSE] + vdelpred), silent=TRUE) if (inherits(sve, "try-error")) return(list(delresid = delresid, sedelresid = sedelresid, X2 = NA, k.id = k.id, pos = which(incl))) X2 <- c(rbind(delresid) %*% sve %*% cbind(delresid)) return(list(delresid = delresid, sedelresid = sedelresid, X2 = X2, k.id = k.id, pos = which(incl))) } .dfbetas.rma.mv <- function(i, obj, parallel, cluster, ids, reestimate) { if (parallel == "snow") library(metafor) incl <- cluster %in% ids[i] if (reestimate) { ### set initial values to estimates from full model control <- obj$control control$sigma2.init <- obj$sigma2 control$tau2.init <- obj$tau2 control$rho.init <- obj$rho control$gamma2.init <- obj$gamma2 control$phi.init <- obj$phi ### fit model without data from ith cluster res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=ifelse(obj$vc.fix$sigma2, obj$sigma2, NA), tau2=ifelse(obj$vc.fix$tau2, obj$tau2, NA), rho=ifelse(obj$vc.fix$rho, obj$rho, NA), gamma2=ifelse(obj$vc.fix$gamma2, obj$gamma2, NA), phi=ifelse(obj$vc.fix$phi, obj$phi, NA), sparse=obj$sparse, dist=obj$dist, control=control, subset=!incl)), silent=TRUE) } else { ### set values of variance/correlation components to those from the 'full' model res <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=obj$sigma2, tau2=obj$tau2, rho=obj$rho, gamma2=obj$gamma2, phi=obj$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control, subset=!incl)), silent=TRUE) } if (inherits(res, "try-error")) return(list(dfbs = NA)) ### removing a cluster could lead to a model coefficient becoming inestimable if (any(res$coef.na)) return(list(dfbs = NA)) ### fit model based on all data but with var/cor components fixed to those from res tmp <- try(suppressWarnings(rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=res$sigma2, tau2=res$tau2, rho=res$rho, gamma2=res$gamma2, phi=res$phi, sparse=obj$sparse, dist=obj$dist, control=obj$control)), silent=TRUE) ### compute dfbeta value(s) dfb <- obj$beta - res$beta ### compute dfbetas dfbs <- c(dfb / sqrt(diag(tmp$vb))) return(list(dfbs = dfbs)) } ############################################################################ .ddf.calc <- function(dfs, X, k, p, mf.s=NULL, mf.g=NULL, mf.h=NULL, beta=TRUE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (beta) { if (is.numeric(dfs)) { ddf <- dfs if (length(ddf) == 1L) ddf <- rep(ddf, p) if (length(ddf) != p) stop(mstyle$stop(paste0("Length of 'dfs' argument (", length(dfs), ") does not match the number of model coefficient (", p, ")."))) } if (is.character(dfs) && dfs == "residual") ddf <- rep(k-p, p) if (is.character(dfs) && dfs == "contain") { if (!is.null(mf.g)) mf.g <- cbind(inner=apply(mf.g, 1, paste, collapse=" + "), outer=mf.g[ncol(mf.g)]) if (!is.null(mf.h)) mf.h <- cbind(inner=apply(mf.h, 1, paste, collapse=" + "), outer=mf.h[ncol(mf.h)]) s.nlevels <- sapply(mf.s, function(x) length(unique(x))) # list() if no S g.nlevels <- c(length(unique(mf.g[[1]])), length(unique(mf.g[[2]]))) # c(0,0) if no G h.nlevels <- c(length(unique(mf.h[[1]])), length(unique(mf.h[[2]]))) # c(0,0) if no H #print(list(s.nlevels, g.nlevels, h.nlevels)) s.ddf <- rep(k, p) g.ddf <- rep(k, p) h.ddf <- rep(k, p) for (j in seq_len(p)) { if (!is.null(mf.s)) { s.lvl <- sapply(seq_along(mf.s), function(i) all(apply(table(X[,j], mf.s[[i]]) > 0, 2, sum) == 1)) if (any(s.lvl)) s.ddf[j] <- min(s.nlevels[s.lvl]) } if (!is.null(mf.g)) { g.lvl <- sapply(seq_along(mf.g), function(i) all(apply(table(X[,j], mf.g[[i]]) > 0, 2, sum) == 1)) if (any(g.lvl)) g.ddf[j] <- min(g.nlevels[g.lvl]) } if (!is.null(mf.h)) { h.lvl <- sapply(seq_along(mf.h), function(i) all(apply(table(X[,j], mf.h[[i]]) > 0, 2, sum) == 1)) if (any(h.lvl)) h.ddf[j] <- min(h.nlevels[h.lvl]) } } #return(list(s.ddf, g.ddf, h.ddf)) ddf <- pmin(s.ddf, g.ddf, h.ddf) ddf <- ddf - p } names(ddf) <- colnames(X) } else { if (is.numeric(dfs)) dfs <- "contain" if (dfs == "residual") ddf <- k-p if (dfs == "contain") { if (!is.null(mf.s)) ddf <- length(unique(mf.s)) if (!is.null(mf.g)) ddf <- length(unique(mf.g)) if (!is.null(mf.h)) ddf <- length(unique(mf.h)) ddf <- ddf - p } } ddf[ddf < 1] <- 1 return(ddf) } ############################################################################ metafor/R/matreg.r0000644000176200001440000001553614045601257013570 0ustar liggesusersmatreg <- function(y, x, R, n, V, cov=FALSE, means, ztor=FALSE, nearPD=FALSE, level=95, digits) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (missing(digits)) digits <- 4 level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) if (missing(R)) stop(mstyle$stop("Must specify 'R' argument.")) if (nrow(R) != ncol(R)) stop(mstyle$stop("Argument 'R' must be a square matrix.")) p <- nrow(R) y <- round(y) if (length(y) != 1L) stop(mstyle$stop("Argument 'y' should be a single index.")) if (y < 1 || y > p) stop(mstyle$stop("Index 'y' must be >= 1 or <= ", p, ".")) if (missing(x)) x <- seq_len(p)[-y] x <- round(x) if (anyDuplicated(x)) stop(mstyle$stop("Argument 'x' should not contain duplicated elements.")) if (any(x < 1 | x > p)) stop(mstyle$stop("Indices in 'x' must be >= 1 or <= ", p, ".")) if (y %in% x) stop(mstyle$stop("Index 'y' should not be an element of 'x'.")) if (missing(V)) V <- NULL if (cov && !is.null(V)) stop(mstyle$stop("Cannot use a covariance matrix as input when specifying 'V'.")) if (cov && ztor) stop(mstyle$stop("Cannot use a covariance matrix as input when 'ztor=TRUE'.")) if (is.null(V) && missing(n)) stop(mstyle$stop("Either 'V' or 'n' must be specified.")) if (!is.null(V) && !missing(n)) stop(mstyle$stop("Either 'V' or 'n' must be specified, not both.")) m <- length(x) ############################################################################ R[upper.tri(R)] <- t(R)[upper.tri(R)] if (ztor) { if (!is.null(V)) { zij <- R[lower.tri(R)] Dmat <- diag(2 / (cosh(2*zij) + 1), nrow=length(zij), ncol=length(zij), names=FALSE) V <- Dmat %*% V %*% Dmat } R <- tanh(R) diag(R) <- 1 } if (cov) { S <- R R <- cov2cor(R) sdy <- sqrt(diag(S)[y]) sdx <- sqrt(diag(S)[x]) } else { if (any(abs(R) > 1, na.rm=TRUE)) stop(mstyle$stop("Argument 'R' must be a correlation matrix, but contains values outside [-1,1].")) diag(R) <- 1 } ############################################################################ Rxy <- R[x, y, drop=FALSE] Rxx <- R[x, x, drop=FALSE] #invRxx <- solve(Rxx) invRxx <- try(chol2inv(chol(Rxx)), silent=TRUE) if (inherits(invRxx, "try-error")) { if (nearPD) { message(mstyle$message("Cannot invert R[x,x] matrix. Using nearPD(). Treat results with caution.")) Rxx <- as.matrix(nearPD(Rxx, corr=TRUE)$mat) } else { stop(mstyle$stop("Cannot invert R[x,x] matrix.")) } invRxx <- try(chol2inv(chol(Rxx)), silent=TRUE) if (inherits(invRxx, "try-error")) stop(mstyle$stop("Still cannot invert R[x,x] matrix.")) } b <- invRxx %*% Rxy if (!is.null(rownames(Rxx))) { rownames(b) <- rownames(Rxx) } else { rownames(b) <- x } colnames(b) <- NULL ############################################################################ if (is.null(V)) { if (length(n) != 1L) stop(mstyle$stop("Argument 'n' should be a single number.")) df <- n - m - ifelse(cov, 1, 0) if (df <= 0) stop(mstyle$stop("Cannot fit model when 'n' is equal to or less than the number of regression coefficients.")) sse <- 1 - c(t(b) %*% Rxy) mse <- sse / df vb <- mse * invRxx R2 <- 1 - sse R2adj <- 1 - (1 - R2) * ((n-1) / df) F <- c(value = (R2 / m) / mse, df1=m, df2=df) Fp <- pf(F[[1]], df1=m, df2=df, lower.tail=FALSE) if (cov) { mult <- sdy / sdx b <- b * mult mse <- sdy^2 * (n-1) * (1 - R2) / df if (missing(means)) { means <- rep(0,p) has.means <- FALSE } else { if (length(means) != p) stop(mstyle$stop(paste0("Length of 'means' (", length(means), ") does not match the dimensions of 'R' (", p, "x", p, ")."))) has.means <- TRUE } b <- rbind(means[y] - means[x] %*% b, b) rownames(b)[1] <- "intrcpt" XtX <- (n-1) * bldiag(0,S[x,x]) + n * tcrossprod(c(1,means[x])) invXtX <- try(suppressWarnings(chol2inv(chol(XtX))), silent=TRUE) if (inherits(invXtX, "try-error")) { vb <- matrix(NA, nrow=(m+1), ncol=(m+1)) warning(mstyle$warning("Cannot obtain var-cov matrix of the regression coefficients."), call.=FALSE) } else { mse <- sdy^2 * (n-1) * (1 - R2) / df vb <- mse * invXtX } if (!has.means) { b[1,] <- NA vb[1,] <- NA vb[,1] <- NA } } rownames(vb) <- colnames(vb) <- rownames(b) se <- sqrt(diag(vb)) tval <- c(b / se) pval <- 2*pt(abs(tval), df=df, lower.tail=FALSE) crit <- qt(level/2, df=df, lower.tail=FALSE) ci.lb <- c(b - crit * se) ci.ub <- c(b + crit * se) res <- list(tab = data.frame(beta=b, se=se, tval=tval, df=df, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub), vb=vb, R2=R2, R2adj=R2adj, F=F, Fp=Fp, digits=digits, test="t") } else { if (nrow(V) != ncol(V)) stop(mstyle$stop("Argument 'V' must be a square matrix.")) s <- p*(p-1)/2 if (nrow(V) != s) stop(mstyle$stop(paste0("Dimensions of 'V' (", nrow(V), "x", ncol(V), ") do not match the number of elements in 'R' (", s, ")."))) V[upper.tri(V)] <- t(V)[upper.tri(V)] U <- matrix(NA, nrow=p, ncol=p) U[lower.tri(U)] <- seq_len(s) U[upper.tri(U)] <- t(U)[upper.tri(U)] Uxx <- U[x, x, drop=FALSE] Uxy <- U[x, y, drop=FALSE] uxx <- unique(c(na.omit(c(Uxx)))) uxy <- c(Uxy) A <- matrix(0, nrow=m, ncol=s) for (a in 1:ncol(A)) { if (a %in% uxx) { pos <- c(which(a == Uxx, arr.ind=TRUE)) J <- matrix(0, nrow=m, ncol=m) J[pos[1],pos[2]] <- J[pos[2],pos[1]] <- 1 A[,a] <- - invRxx %*% J %*% invRxx %*% Rxy } if (a %in% uxy) { pos <- c(which(a == Uxy, arr.ind=TRUE)) A[,a] <- invRxx[,pos[1]] } } vb <- A %*% V %*% t(A) se <- sqrt(diag(vb)) zval <- c(b / se) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) crit <- qnorm(level/2, lower.tail=FALSE) ci.lb <- c(b - crit * se) ci.ub <- c(b + crit * se) QM <- try(t(b) %*% chol2inv(chol(vb)) %*% b, silent=TRUE) if (inherits(QM, "try-error")) QM <- NA QMp <- pchisq(QM, df=m, lower.tail=FALSE) R2 <- c(t(b) %*% Rxy) rownames(vb) <- colnames(vb) <- rownames(b) res <- list(tab = data.frame(beta=b, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub), vb=vb, R2=R2, digits=digits, test="z") } class(res) <- c("matreg") return(res) } metafor/R/methods.escalc.r0000644000176200001440000001165413702016525015177 0ustar liggesusers############################################################################ "[.escalc" <- function(x, i, ...) { dat <- NextMethod("[") ### add measure attribute back to each yi variable (if that variable is still part of dat) yi.names <- attr(x, "yi.names") yi.names <- yi.names[is.element(yi.names, names(dat))] for (l in seq_along(yi.names)) { #eval(parse(text=paste0("attr(dat$", yi.names[l], ", 'measure') <- attr(x$", yi.names[l], ", 'measure')"))) attr(dat[[yi.names[l]]], "measure") <- attr(x[[yi.names[l]]], "measure") ### if selecting rows, also subset ni and slab attributes and add them back to each yi variable if (!missing(i)) { attr(dat[[yi.names[l]]], "ni") <- attr(x[[yi.names[l]]], "ni")[i] attr(dat[[yi.names[l]]], "slab") <- attr(x[[yi.names[l]]], "slab")[i] } } ### add var.names and out.names attributes back to object (but only if they exist and only keep variables still in the dataset) all.names <- c("yi.names", "vi.names", "sei.names", "zi.names", "pval.names", "ci.lb.names", "ci.ub.names") for (l in seq_along(all.names)) { if (any(is.element(attr(x, all.names[l]), names(dat)))) ### check if any of the variables still exist in the dataset attr(dat, all.names[l]) <- attr(x, all.names[l])[is.element(attr(x, all.names[l]), names(dat))] } ### add digits attribute back to object (but not to vectors) if (!is.null(attr(x, "digits")) && !is.null(dim(dat))) attr(dat, "digits") <- attr(x, "digits") return(dat) } ############################################################################ cbind.escalc <- function (..., deparse.level=1) { dat <- data.frame(..., check.names = FALSE) allargs <- list(...) ### for each element, extract the 'var.names' and 'out.names' attributes and add entire set back to the object yi.names <- NULL vi.names <- NULL sei.names <- NULL zi.names <- NULL pval.names <- NULL ci.lb.names <- NULL ci.ub.names <- NULL for (arg in allargs) { yi.names <- c(attr(arg, "yi.names"), yi.names) vi.names <- c(attr(arg, "vi.names"), vi.names) sei.names <- c(attr(arg, "sei.names"), sei.names) zi.names <- c(attr(arg, "zi.names"), zi.names) pval.names <- c(attr(arg, "pval.names"), pval.names) ci.lb.names <- c(attr(arg, "ci.lb.names"), ci.lb.names) ci.ub.names <- c(attr(arg, "ci.ub.names"), ci.ub.names) } ### but only keep unique variable names attr(dat, "yi.names") <- unique(yi.names) attr(dat, "vi.names") <- unique(vi.names) attr(dat, "sei.names") <- unique(sei.names) attr(dat, "zi.names") <- unique(zi.names) attr(dat, "pval.names") <- unique(pval.names) attr(dat, "ci.lb.names") <- unique(ci.lb.names) attr(dat, "ci.ub.names") <- unique(ci.ub.names) ### add 'digits' attribute back (use the values from first element) attr(dat, "digits") <- attr(arg[1], "digits") class(dat) <- c("escalc", "data.frame") return(dat) } ############################################################################ rbind.escalc <- function (..., deparse.level=1) { dat <- rbind.data.frame(..., deparse.level = deparse.level) allargs <- list(...) yi.names <- attr(dat, "yi.names") yi.names <- yi.names[is.element(yi.names, names(dat))] for (i in seq_along(yi.names)) { ### get position (column number) of the 'yi' variable (in the first argument) #yi.pos <- which(names(allargs[[1]]) == yi.names[i]) ### get position (column number) of the 'yi' variable yi.pos <- sapply(allargs, function(x) which(names(x) == yi.names[i])[1]) yi.pos <- na.omit(yi.pos)[1] ### just in case if (length(yi.pos) == 0L) next ### get 'ni' attribute from all arguments (but only if argument has 'yi' variable) ni <- lapply(allargs, function(x) {if (isTRUE(names(x)[yi.pos] == yi.names[i])) attr(x[[yi.pos]], "ni")}) ### if none of them are missing, then combine and add back to variable ### otherwise remove 'ni' attribute, since it won't be of the right length if (all(sapply(ni, function(x) !is.null(x)))) { attr(dat[[yi.pos]], "ni") <- unlist(ni) } else { attr(dat[[yi.pos]], "ni") <- NULL } ### get 'slab' attribute from all arguments (but only if argument has 'yi' variable) slab <- lapply(allargs, function(x) {if (isTRUE(names(x)[yi.pos] == yi.names[i])) attr(x[[yi.pos]], "slab")}) ### if none of them are missing, then combine and add back to variable (and make sure they are unique) ### otherwise remove 'slab' attribute, since it won't be of the right length if (all(sapply(slab, function(x) !is.null(x)))) { attr(dat[[yi.pos]], "slab") <- .make.unique(unlist(slab)) } else { attr(dat[[yi.pos]], "slab") <- NULL } } return(dat) } ############################################################################ metafor/R/ranktest.rma.r0000644000176200001440000000201613770376412014715 0ustar liggesusersranktest.rma <- function(x, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("robust.rma", "rma.uni.selmodel")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("exact")) if (is.null(ddd$exact)) { exact <- TRUE } else { exact <- ddd$exact } ######################################################################### yi <- x$yi vi <- x$vi res <- rma.uni(yi, vi, method="FE") beta <- c(res$beta) vb <- c(res$vb) vi.star <- vi - vb yi.star <- (yi - beta) / sqrt(vi.star) res <- cor.test(yi.star, vi, method="kendall", exact=exact) pval <- res$p.value tau <- res$estimate res <- list(tau=tau, pval=pval, digits=digits) class(res) <- "ranktest" return(res) } metafor/R/misc.func.hidden.escalc.r0000644000176200001440000002310314044005015016633 0ustar liggesusers############################################################################ ### c(m) calculation function for bias correction of SMDs (mi = n1i + n2i - 2) or SMCC/SMCRs (mi = ni - 1) .cmicalc <- function(mi) { ### this can overflow if mi is 'large' (on my machine, if mi >= 344) #cmi <- gamma(mi/2)/(sqrt(mi/2)*gamma((mi-1)/2)) ### catch those cases and apply the approximate formula (which is accurate then) #is.na <- is.na(cmi) #cmi[is.na] <- 1 - 3/(4*mi[is.na] - 1) ### this avoids the problem with overflow altogether cmi <- ifelse(mi <= 1, NA, exp(lgamma(mi/2) - log(sqrt(mi/2)) - lgamma((mi-1)/2))) return(cmi) } ############################################################################ ### function to compute the tetrachoric correlation coefficient and its sampling variance .rtet <- function(ai, bi, ci, di, maxcor=.9999) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (!requireNamespace("mvtnorm", quietly=TRUE)) stop(mstyle$stop("Please install the 'mvtnorm' package to compute this measure.")) fn <- function(par, ai, bi, ci, di, maxcor, fixcut=FALSE) { rho <- par[1] cut.row <- par[2] cut.col <- par[3] ### truncate rho values outside of specified bounds if (abs(rho) > maxcor) rho <- sign(rho) * maxcor ### to substitute fixed cut values if (fixcut) { cut.row <- qnorm((ai+bi)/ni) cut.col <- qnorm((ai+ci)/ni) } # │ ci | di # ci = lo X and hi Y di = hi X and hi Y # var Y │----+---- # # │ ai | bi # ai = lo X and lo Y bi = hi X and lo Y # ┼───────── # var X # # lo hi # +----+----+ # lo | ai | bi | # +----+----+ var Y # hi | ci | di | # +----+----+ # var X R <- matrix(c(1,rho,rho,1), nrow=2, ncol=2) p.ai <- mvtnorm::pmvnorm(lower=c(-Inf,-Inf), upper=c(cut.col,cut.row), corr=R) p.bi <- mvtnorm::pmvnorm(lower=c(cut.col,-Inf), upper=c(+Inf,cut.row), corr=R) p.ci <- mvtnorm::pmvnorm(lower=c(-Inf,cut.row), upper=c(cut.col,+Inf), corr=R) p.di <- mvtnorm::pmvnorm(lower=c(cut.col,cut.row), upper=c(+Inf,+Inf), corr=R) ### in principle, should be able to compute these values with the following code, but this ### leads to more numerical instabilities when optimizing (possibly due to negative values) #p.y.lo <- pnorm(cut.row) #p.x.lo <- pnorm(cut.col) #p.ai <- mvtnorm::pmvnorm(lower=c(-Inf,-Inf), upper=c(cut.col,cut.row), corr=R) #p.bi <- p.y.lo - p.ai #p.ci <- p.x.lo - p.ai #p.di <- 1 - p.ai - p.bi - p.ci if (any(p.ai <= 0 || p.bi <= 0 || p.ci <= 0 || p.di <= 0)) { ll <- -Inf } else { ll <- ai*log(p.ai) + bi*log(p.bi) + ci*log(p.ci) + di*log(p.di) } return(-ll) } ni <- ai + bi + ci + di ### if one of the margins is equal to zero, then r_tet could in principle be equal to any value, ### but we define it here to be zero (presuming independence until evidence of dependence is found) ### but with infinite variance if ((ai + bi) == 0L || (ci + di) == 0L || (ai + ci) == 0L || (bi + di) == 0L) return(list(yi=0, vi=Inf)) ### if bi and ci is zero, then r_tet must be +1 with zero variance if (bi == 0L && ci == 0L) return(list(yi=1, vi=0)) ### if ai and di is zero, then r_tet must be -1 with zero variance if (ai == 0L && di == 0L) return(list(yi=-1, vi=0)) ### cases where only one cell is equal to zero are handled further below ### in all other cases, first optimize over rho with cut values set to sample values ### use suppressWarnings() to suppress "NA/Inf replaced by maximum positive value" warnings res <- try(suppressWarnings(optimize(fn, interval=c(-1,1), ai=ai, bi=bi, ci=ci, di=di, maxcor=maxcor, fixcut=TRUE)), silent=TRUE) ### check for non-convergence if (inherits(res, "try-error")) { warning(mstyle$warning("Could not estimate tetrachoric correlation coefficient."), call.=FALSE) return(list(yi=NA, vi=NA)) } ### then use the value as the starting point and maximize over rho and the cut values ### (Nelder-Mead seems to do fine here; using L-BFGS-B doesn't seems to improve on this) res <- try(optim(par=c(res$minimum,qnorm((ai+bi)/ni),qnorm((ai+ci)/ni)), fn, ai=ai, bi=bi, ci=ci, di=di, maxcor=maxcor, fixcut=FALSE, hessian=TRUE), silent=TRUE) #res <- try(optim(par=c(res$minimum,qnorm((ai+bi)/ni),qnorm((ai+ci)/ni)), fn, method="L-BFGS-B", lower=c(-1,-Inf,-Inf), upper=c(1,Inf,Inf), ai=ai, bi=bi, ci=ci, di=di, maxcor=maxcor, fixcut=FALSE, hessian=TRUE), silent=TRUE) ### check for non-convergence if (inherits(res, "try-error")) { warning(mstyle$warning("Could not estimate tetrachoric correlation coefficient."), call.=FALSE) return(list(yi=NA, vi=NA)) } ### take inverse of hessian and extract variance for estimate ### (using hessian() seems to lead to more problems, so stick with hessian from optim()) vi <- try(chol2inv(chol(res$hessian))[1,1], silent=TRUE) #res$hessian <- try(chol2inv(chol(numDeriv::hessian(fn, x=res$par, ai=ai, bi=bi, ci=ci, di=di, maxcor=maxcor, fixcut=FALSE))), silent=TRUE) ### check for problems with computing the inverse if (inherits(vi, "try-error")) { warning(mstyle$warning("Could not estimate sampling variance of tetrachoric correlation coefficient."), call.=FALSE) vi <- NA } ### extract estimate yi <- res$par[1] ### but if bi or ci is zero, then r_tet must be +1 if (bi == 0 || ci == 0) yi <- 1 ### but if ai or di is zero, then r_tet must be -1 if (ai == 0 || di == 0) yi <- -1 ### note: what is the right variance when there is one zero cell? ### vi as estimated gets smaller as the table becomes more and more like ### a table with 0 diagonal/off-diagonal, which intuitively makes sense ### return estimate and sampling variance (and SE) return(list(yi=yi, vi=vi, sei=sqrt(vi))) ### Could consider implementing the Fisher scoring algorithm; first derivatives and ### elements of the information matrix are given in Tallis (1962). Could also consider ### estimating the variance from the inverse of the information matrix. But constructing ### the information matrix takes a bit of extra work and it is not clear to me how to ### handle estimated cell probabilities that go to zero here. } ############################################################################ ### function to calculate the Gaussian hypergeometric (Hypergeometric2F1) function .Fcalc <- function(a, b, g, x) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (!requireNamespace("gsl", quietly=TRUE)) stop(mstyle$stop("Please install the 'gsl' package to use measure='UCOR'.")) k.g <- length(g) k.x <- length(x) k <- max(k.g, k.x) res <- rep(NA_real_, k) if (k.g == 1) g <- rep(g, k) if (k.x == 1) x <- rep(x, k) if (length(g) != length(x)) stop(mstyle$stop("Length of 'g' and 'x' arguments is not the same.")) for (i in seq_len(k)) { if (!is.na(g[i]) && !is.na(x[i]) && g[i] > (a+b)) { res[i] <- gsl::hyperg_2F1(a, b, g[i], x[i]) } else { res[i] <- NA } } return(res) } ############################################################################ ### pdf of SMD (with or without bias correction) .dsmd <- function(x, n1, n2, theta, correct=TRUE, warn=FALSE) { nt <- n1 * n2 / (n1 + n2) m <- n1 + n2 - 2 if (correct) { cm <- .cmicalc(m) } else { cm <- 1 } if (warn) { res <- dt(x * sqrt(nt) / cm, df = m, ncp = sqrt(nt) * theta) * sqrt(nt) / cm } else { res <- suppressWarnings(dt(x * sqrt(nt) / cm, df = m, ncp = sqrt(nt) * theta) * sqrt(nt) / cm) } return(res) } #integrate(function(x) .dsmd(x, n1=4, n2=4, theta=.5), lower=-Inf, upper=Inf) #integrate(function(x) x*.dsmd(x, n1=4, n2=4, theta=.5), lower=-Inf, upper=Inf) ### pdf of COR .dcor <- function(x, n, rho) { x[x < -1] <- NA x[x > 1] <- NA ### only accurate for n >= 5 n[n <= 4] <- NA ### calculate density res <- exp(log(n-2) + lgamma(n-1) + (n-1)/2 * log(1 - rho^2) + (n-4)/2 * log(1 - x^2) - 1/2 * log(2*base::pi) - lgamma(n-1/2) - (n-3/2) * log(1 - rho*x)) * .Fcalc(1/2, 1/2, n-1/2, (rho*x + 1)/2) ### make sure that density is 0 for r = +-1 res[abs(x) == 1] <- 0 return(res) } #integrate(function(x) .dcor(x, n=5, rho=.8), lower=-1, upper=1) #integrate(function(x) x*.dcor(x, n=5, rho=.8), lower=-1, upper=1) ### should not be rho due to bias! #integrate(function(x) x*.Fcalc(1/2, 1/2, (5-2)/2, 1-x^2)*.dcor(x, n=5, rho=.8), lower=-1, upper=1) ### should be ~rho ### pdf of ZCOR .dzcor <- function(x, n, rho, zrho) { ### only accurate for n >= 5 n[n <= 4] <- NA ### if rho is missing, then back-transform zrho value(s) if (missing(rho)) rho <- tanh(zrho) ### copy x to z and back-transform z values (so x = correlation) z <- x x <- tanh(z) ### calculate density res <- exp(log(n-2) + lgamma(n-1) + (n-1)/2 * log(1 - rho^2) + (n-4)/2 * log(1 - x^2) - 1/2 * log(2*base::pi) - lgamma(n-1/2) - (n-3/2) * log(1 - rho*x) + log(4) + 2*z - 2*log(exp(2*z) + 1)) * .Fcalc(1/2, 1/2, n-1/2, (rho*x + 1)/2) ### make sure that density is 0 for r = +-1 res[abs(x) == 1] <- 0 return(res) } #integrate(function(x) .dzcor(x, n=5, rho=.8), lower=-100, upper=100) #integrate(function(x) x*.dzcor(x, n=5, rho=.8), lower=-100, upper=100) ############################################################################ metafor/R/gosh.rma.r0000644000176200001440000002002614046526721014020 0ustar liggesusersgosh.rma <- function(x, subsets, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("rma.glmm", "rma.mv", "robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } ddd <- list(...) .chkdots(ddd, c("seed", "time", "LB")) if (.isTRUE(ddd$time)) time.start <- proc.time() ### total number of possible subsets N.tot <- sum(choose(x$k, x$p:x$k)) ### if 'subsets' is missing, include all possible subsets if N.tot is <= 10^6 ### and otherwise include 10^6 random subsets; if the user specifies 'subsets' ### and N.tot <= subsets, then again include all possible subsets if (missing(subsets)) { if (N.tot <= 10^6) { exact <- TRUE } else { exact <- FALSE N.tot <- 10^6 } } else { if (N.tot <= subsets) { exact <- TRUE } else { exact <- FALSE N.tot <- subsets } } if (N.tot == Inf) stop(mstyle$stop("Too many iterations required for all combinations.")) if (progbar) message(paste0("Fitting ", N.tot, " models (based on ", ifelse(exact, "all possible", "random"), " subsets).")) ######################################################################### ### generate inclusion matrix (either exact or at random) if (exact) { incl <- as.matrix(expand.grid(replicate(x$k, list(c(FALSE,TRUE))), KEEP.OUT.ATTRS=FALSE)) incl <- incl[rowSums(incl) >= x$p,,drop=FALSE] ### slower, but does not generate rows that need to be filtered out (as above) #incl <- lapply(x$p:x$k, function(m) apply(combn(x$k,m), 2, function(l) 1:x$k %in% l)) #incl <- t(do.call(cbind, incl)) } else { if (!is.null(ddd$seed)) set.seed(ddd$seed) j <- sample(x$p:x$k, N.tot, replace=TRUE, prob=dbinom(x$p:x$k, x$k, 0.5)) incl <- t(sapply(j, function(m) seq_len(x$k) %in% sample(x$k, m))) } colnames(incl) <- seq_len(x$k) ### check if model is a standard FE/EE/CE model or a standard RE model with the DL estimators model <- 0L if (is.element(x$method, c("FE","EE","CE")) && x$weighted && is.null(x$weights) && x$int.only) model <- 1L if (x$method=="DL" && x$weighted && is.null(x$weights) && x$int.only) model <- 2L ######################################################################### outlist <- "beta=beta, k=k, QE=QE, I2=I2, H2=H2, tau2=tau2, coef.na=coef.na" if (parallel == "no") { if (inherits(x, "rma.uni")) res <- pbapply::pbapply(incl, 1, .profile.rma.uni, obj=x, parallel=parallel, subset=TRUE, model=model, outlist=outlist) if (inherits(x, "rma.mh")) res <- pbapply::pbapply(incl, 1, .profile.rma.mh, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) if (inherits(x, "rma.peto")) res <- pbapply::pbapply(incl, 1, .profile.rma.peto, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) } if (parallel == "multicore") { if (inherits(x, "rma.uni")) res <- pbapply::pbapply(incl, 1, .profile.rma.uni, obj=x, parallel=parallel, subset=TRUE, model=model, outlist=outlist, cl=ncpus) #res <- parallel::mclapply(asplit(incl, 1), .profile.rma.uni, obj=x, mc.cores=ncpus, parallel=parallel, subset=TRUE, model=model, outlist=outlist) if (inherits(x, "rma.mh")) res <- pbapply::pbapply(incl, 1, .profile.rma.mh, obj=x, parallel=parallel, subset=TRUE, outlist=outlist, cl=ncpus) #res <- parallel::mclapply(asplit(incl, 1), .profile.rma.mh, obj=x, mc.cores=ncpus, parallel=parallel, subset=TRUE, outlist=outlist) if (inherits(x, "rma.peto")) res <- pbapply::pbapply(incl, 1, .profile.rma.peto, obj=x, parallel=parallel, subset=TRUE, outlist=outlist, cl=ncpus) #res <- parallel::mclapply(asplit(incl, 1), .profile.rma.peto, obj=x, mc.cores=ncpus, parallel=parallel, subset=TRUE, outlist=outlist) } if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (inherits(x, "rma.uni")) { if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, asplit(incl, 1), .profile.rma.uni, obj=x, parallel=parallel, subset=TRUE, model=model, outlist=outlist) } else { res <- pbapply::pbapply(incl, 1, .profile.rma.uni, obj=x, parallel=parallel, subset=TRUE, model=model, outlist=outlist, cl=cl) #res <- parallel::parLapply(cl, asplit(incl, 1), .profile.rma.uni, obj=x, parallel=parallel, subset=TRUE, model=model, outlist=outlist) } } if (inherits(x, "rma.mh")) { if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, asplit(incl, 1), .profile.rma.mh, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) } else { res <- pbapply::pbapply(incl, 1, .profile.rma.mh, obj=x, parallel=parallel, subset=TRUE, outlist=outlist, cl=cl) #res <- parallel::parLapply(cl, asplit(incl, 1), .profile.rma.mh, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) } } if (inherits(x, "rma.peto")) { if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, asplit(incl, 1), .profile.rma.peto, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) } else { res <- pbapply::pbapply(incl, 1, .profile.rma.peto, obj=x, parallel=parallel, subset=TRUE, outlist=outlist, cl=cl) #res <- parallel::parLapply(cl, asplit(incl, 1), .profile.rma.peto, obj=x, parallel=parallel, subset=TRUE, outlist=outlist) } } } beta <- do.call("rbind", lapply(res, function(x) if (inherits(x, "try-error") || any(x$coef.na)) NA else t(x$beta))) het <- do.call("rbind", lapply(res, function(x) if (inherits(x, "try-error") || any(x$coef.na)) NA else c(x$k, x$QE, x$I2, x$H2, x$tau2))) if (all(is.na(het))) stop(mstyle$stop("All model fits failed.")) ######################################################################### ### in case a model fit was skipped, this guarantees that we still get ### a value for k in the first column of the het matrix for each model het[,1] <- rowSums(incl) ### set column names colnames(het) <- c("k", "QE", "I2", "H2", "tau2") if (x$int.only) { colnames(beta) <- "estimate" } else { colnames(beta) <- colnames(x$X) } ### combine het and beta objects and order incl and res by k res <- data.frame(het, beta) incl <- incl[order(res$k),,drop=FALSE] res <- res[order(res$k),,drop=FALSE] ### fix rownames rownames(res) <- seq_len(nrow(res)) rownames(incl) <- seq_len(nrow(incl)) ### was model fitted successfully / all values are not NA? fit <- apply(res, 1, function(x) all(!is.na(x))) ### print processing time if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } ### list to return out <- list(res=res, incl=incl, fit=fit, k=x$k, int.only=x$int.only, method=x$method, measure=x$measure, digits=x$digits) class(out) <- "gosh.rma" return(out) } metafor/R/tes.default.r0000644000176200001440000002637714056420302014524 0ustar liggesuserstes.default <- function(x, vi, sei, subset, H0=0, alternative="two.sided", alpha=.05, theta, tau2, test, tes.alternative="greater", progbar=TRUE, tes.alpha=.10, digits, ...) { # allow multiple alpha values? plot for pval as a function of alpha? ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) alternative <- match.arg(alternative, c("two.sided", "greater", "less")) tes.alternative <- match.arg(tes.alternative, c("two.sided", "greater", "less")) if (missing(subset)) subset <- NULL if (alpha <= 0 || alpha >= 1) stop(mstyle$stop("Value of 'alpha' needs to be > 0 and < 1.")) if (tes.alpha <= 0 || tes.alpha >= 1) stop(mstyle$stop("Value of 'tes.alpha' needs to be > 0 and < 1.")) if (alternative == "two.sided") crit <- qnorm(alpha/2, lower.tail=FALSE) if (alternative == "greater") crit <- qnorm(alpha, lower.tail=FALSE) if (alternative == "less") crit <- qnorm(alpha, lower.tail=TRUE) ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("correct", "rel.tol", "subdivisions", "tau2.lb", "find.lim")) if (!is.null(ddd$correct)) { correct <- ddd$correct } else { correct <- FALSE } if (!is.null(ddd$rel.tol)) { rel.tol <- ddd$rel.tol } else { rel.tol <- .Machine$double.eps^0.25 } if (!is.null(ddd$subdivisions)) { subdivisions <- ddd$subdivisions } else { subdivisions <- 100L } if (!is.null(ddd$tau2.lb)) { tau2.lb <- ddd$tau2.lb } else { #tau2.lb <- 0.0001 tau2.lb <- 0 } if (!is.null(ddd$find.lim)) { find.lim <- ddd$find.lim } else { find.lim <- TRUE } ######################################################################### ### check if sampling variances and/or standard errors are available if (missing(vi)) vi <- NULL if (missing(sei)) sei <- NULL if (is.null(vi)) { if (!is.null(sei)) vi <- sei^2 } if (is.null(vi)) stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) yi <- x ### check length of yi and vi if (length(yi) != length(vi)) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) k.f <- length(yi) ### checks on H0 if (length(H0) != 1L) stop(mstyle$stop("Argument 'H0' must specify a single value.")) ### checks on theta if (missing(theta) || is.null(theta)) { single.theta <- TRUE est.theta <- TRUE theta <- rep(0, k.f) } else { if (length(theta) == 1L) { single.theta <- TRUE est.theta <- FALSE theta.1 <- theta theta <- rep(theta, k.f) } else { single.theta <- FALSE est.theta <- FALSE } if (length(theta) != k.f) stop(mstyle$stop("Length of 'theta' and 'yi' is not the same.")) } ######################################################################### ### if a subset of studies is specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=length(yi)) yi <- yi[subset] vi <- vi[subset] theta <- theta[subset] } ### check for NAs and act accordingly has.na <- is.na(yi) | is.na(vi) | is.na(theta) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] theta <- theta[not.na] warning(mstyle$warning("Studies with NAs omitted from test."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } ######################################################################### k <- length(yi) if (k == 0L) stop(mstyle$stop("Stopped because k = 0.")) sei <- sqrt(vi) zi <- (yi - H0) / sei if (missing(tau2) || is.null(tau2) || tau2 <= tau2.lb) { wi <- 1 / vi } else { wi <- 1 / (vi + tau2) } if (est.theta) { theta.1 <- .wmean(yi, wi) theta <- rep(theta.1, k) } if (missing(tau2) || is.null(tau2) || tau2 <= tau2.lb) { if (alternative == "two.sided") pow <- pnorm(crit, mean=(theta-H0)/sei, 1, lower.tail=FALSE) + pnorm(-crit, mean=(theta-H0)/sei, 1, lower.tail=TRUE) if (alternative == "greater") pow <- pnorm(crit, mean=(theta-H0)/sei, 1, lower.tail=FALSE) if (alternative == "less") pow <- pnorm(crit, mean=(theta-H0)/sei, 1, lower.tail=TRUE) } else { tau <- sqrt(tau2) pow <- rep(NA_real_, k) for (i in seq_len(k)) { res <- try(integrate(.tes.intfun, lower=theta[i]-5*tau, upper=theta[i]+5*tau, theta=theta[i], tau=tau, sei=sei[i], H0=H0, alternative=alternative, crit=crit, rel.tol=rel.tol, subdivisions=subdivisions, stop.on.error=FALSE), silent=TRUE) if (inherits(res, "try-error")) { stop(mstyle$stop(paste0("Could not integrate over density in study ", i, "."))) } else { pow[i] <- res$value } } } if (alternative == "two.sided") sig <- abs(zi) >= crit if (alternative == "greater") sig <- zi >= crit if (alternative == "less") sig <- zi <= crit E <- sum(pow) O <- sum(sig) if (tes.alternative == "two.sided") js <- 0:k if (tes.alternative == "greater") js <- O:k if (tes.alternative == "less") js <- 0:O if (missing(test) || is.null(test)) { tot <- sum(sapply(js, function(j) choose(k,j))) if (tot <= 10^6) { test <- "exact" } else { test <- "chi2" } } else { test <- match.arg(test, c("chi2", "binom", "exact")) } ### set defaults for progbar if (missing(progbar)) progbar <- ifelse(test == "exact", TRUE, FALSE) if (test == "chi2") { res <- suppressWarnings(prop.test(O, k, p=E/k, alternative=tes.alternative, correct=correct)) X2 <- unname(res$statistic) pval <- res$p.value } if (test == "binom") { res <- binom.test(O, k, p=E/k, alternative=tes.alternative) X2 <- NA pval <- binom.test(O, k, p=E/k, alternative=tes.alternative)$p.value } if (test == "exact") { X2 <- NA if (progbar) pbar <- pbapply::startpb(min=0, max=length(js)) prj <- rep(NA_real_, length(js)) id <- seq_len(k) for (j in 1:length(js)) { if (progbar) pbapply::setpb(pbar, j) if (js[j] == 0L) { prj[j] <- prod(1-pow) } else if (js[j] == k) { prj[j] <- prod(pow) } else { tmp <- try(suppressWarnings(sum(combn(k, js[j], FUN = function(i) { sel <- i not <- id[-i] prod(pow[sel])*prod(1-pow[not]) }))), silent=TRUE) if (inherits(tmp, "try-error")) { if (progbar) pbapply::closepb(pbar) stop(mstyle$stop(paste0("Number of combinations too large to do an exact test (use test=\"chi2\" or test=\"binomial\" instead)."))) } else { prj[j] <- tmp } } } if (progbar) pbapply::closepb(pbar) if (tes.alternative == "two.sided") pval <- sum(prj[prj <= prj[O+1] + .Machine$double.eps^0.5]) if (tes.alternative == "greater") pval <- sum(prj) if (tes.alternative == "less") pval <- sum(prj) pval[pval > 1] <- 1 } theta.lim <- NULL if (find.lim && single.theta) { if (tes.alternative == "greater") { diff.H0 <- .tes.lim(H0, yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb) if (diff.H0 >= 0) { theta.lim <- NA } else { if (theta.1 >= H0) { theta.lim <- try(uniroot(.tes.lim, interval=c(H0,theta.1), extendInt="upX", yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb)$root, silent=TRUE) } else { theta.lim <- try(uniroot(.tes.lim, interval=c(theta.1,H0), extendInt="downX", yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb)$root, silent=TRUE) } if (inherits(theta.lim, "try-error")) theta.lim <- NA } } if (tes.alternative == "less") { diff.H0 <- .tes.lim(H0, yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb) if (diff.H0 <= 0) { theta.lim <- NA } else { if (theta.1 >= H0) { theta.lim <- try(uniroot(.tes.lim, interval=c(H0,theta.1), extendInt="downX", yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb)$root, silent=TRUE) } else { theta.lim <- try(uniroot(.tes.lim, interval=c(theta.1,H0), extendInt="upX", yi=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=FALSE, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb)$root, silent=TRUE) } if (inherits(theta.lim, "try-error")) theta.lim <- NA } } if (tes.alternative == "two.sided") { theta.lim.lb <- tes.default(x=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, theta=theta.1, tau2=tau2, test=test, tes.alternative="greater", progbar=FALSE, tes.alpha=tes.alpha/2, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb, find.lim=TRUE)$theta.lim theta.lim.ub <- tes.default(x=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, theta=theta.1, tau2=tau2, test=test, tes.alternative="less", progbar=FALSE, tes.alpha=tes.alpha/2, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb, find.lim=TRUE)$theta.lim theta.lim <- c(theta.lim.lb, theta.lim.ub) } } if (single.theta) theta <- theta.1 res <- list(k=k, O=O, E=E, OEratio=O/E, test=test, X2=X2, pval=pval, power=pow, sig=sig, theta=theta, theta.lim=theta.lim, tes.alternative=tes.alternative, tes.alpha=tes.alpha, digits=digits) class(res) <- "tes" return(res) } metafor/R/confint.rma.uni.r0000644000176200001440000004604614046726316015326 0ustar liggesusers# What would be most consistent is this: # if method='ML/REML': profile likelihood (PL) CI (based on the ML/REML likelihood) # if method='EB/PM/PMM': Q-profile (QP) CI # if method='GENQ/GENQM': generalized Q-statistic (GENQ) CI (which also covers method='DL/HE' as special cases) # if method='SJ': method by Sidik & Jonkman (2005) (but this performs poorly, except if tau^2 is very large) # if method='HS': not sure since this is an ad-hoc estimator with no obvious underlying statistical principle # Also could in principle compute Wald-type CIs (but those perform poorly except when k is very large). # Too late to change how the function works (right now, type="GENQ" if method="GENQ/GENQM" and type="QP" otherwise). confint.rma.uni <- function(object, parm, level, fixed=FALSE, random=TRUE, type, digits, transf, targs, verbose=FALSE, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.uni", notav=c("robust.rma", "rma.ls")) if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object k <- x$k p <- x$p yi <- x$yi vi <- x$vi X <- x$X Y <- cbind(yi) weights <- x$weights if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL if (missing(control)) control <- list() if (!fixed && !random) stop(mstyle$stop("At least one of the arguments 'fixed' and 'random' must be TRUE.")) level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("time", "xlim", "extint")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (!is.null(ddd$xlim)) { if (length(ddd$xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) control$tau2.min <- ddd$xlim[1] control$tau2.max <- ddd$xlim[2] } if (missing(type)) { if (x$method == "GENQ" || x$method == "GENQM") { type <- "GENQ" } else { type <- "QP" } } else { #type <- match.arg(type, c("QP", "GENQ", "PL")) if (!is.element(type, c("QP", "GENQ", "PL"))) stop(mstyle$stop("Unknown 'type' specified.")) } ######################################################################### ######################################################################### ######################################################################### if (random) { if (k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (is.element(x$method, c("FE","EE","CE"))) stop(mstyle$stop("Model does not contain a random-effects component.")) if (x$tau2.fix) stop(mstyle$stop("Model does not contain an estimated random-effects component.")) if (type == "GENQ" && !(is.element(x$method, c("GENQ","GENQM")))) stop(mstyle$stop("Model must be fitted with method=\"GENQ\" or method=\"GENQM\" to use this option.")) ###################################################################### ### set control parameters for uniroot() and possibly replace with user-defined values ### set tau2.min and tau2.max and possibly replace with user-defined values ### note: default tau2.min is smaller of 0 or tau2, since tau2 could in principle be negative ### note: default tau2.max must be larger than tau2 and tau2.min and really should be much larger (at least 100) tau2.min <- ifelse(is.null(x$control$tau2.min), min(0, x$tau2), x$control$tau2.min) tau2.max <- ifelse(is.null(x$control$tau2.max), max(100, x$tau2*10, tau2.min*10), x$control$tau2.max) ### user can in principle set non-sensical limits (i.e., tau2.min > tau2.max), but this is handled properly by the methods below con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, tau2.min=tau2.min, tau2.max=tau2.max, verbose=FALSE) con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose #return(con) ###################################################################### tau2.lb <- NA tau2.ub <- NA ci.null <- FALSE ### logical if CI is a null set lb.conv <- FALSE ### logical if search converged for lower bound (LB) ub.conv <- FALSE ### logical if search converged for upper bound (UB) lb.sign <- "" ### for sign in case LB must be below tau2.min ("<") or above tau2.max (">") ub.sign <- "" ### for sign in case UB must be below tau2.min ("<") or above tau2.max (">") ###################################################################### ######################## ### Q-profile method ### ######################## if (type == "QP") { if (!x$allvipos) stop(mstyle$stop("Cannot compute CI for tau^2 when there are non-positive sampling variances in the data.")) crit.u <- qchisq(level/2, k-p, lower.tail=FALSE) ### upper critical chi^2 value for df = k-p crit.l <- qchisq(level/2, k-p, lower.tail=TRUE) ### lower critical chi^2 value for df = k-p QE.tau2.max <- .QE.func(con$tau2.max, Y=Y, vi=vi, X=X, k=k, objective=0) QE.tau2.min <- .QE.func(con$tau2.min, Y=Y, vi=vi, X=X, k=k, objective=0) #dfs <- 12; curve(dchisq(x, df=dfs), from=0, to=40, ylim=c(0,.1), xlab="", ylab=""); abline(v=qchisq(c(.025, .975), df=dfs)); text(qchisq(c(.025, .975), df=dfs)+1.6, .1, c("crit.l", "crit.u")) ################################################################### ### start search for upper bound if (QE.tau2.min < crit.l) { ### if QE.tau2.min is to the left of the crit.l, then both bounds are below tau2.min tau2.lb <- con$tau2.min tau2.ub <- con$tau2.min lb.sign <- "<" ub.sign <- "<" lb.conv <- TRUE ub.conv <- TRUE ### and if tau2.min <= 0, then the CI is equal to the null set if (con$tau2.min <= 0) ci.null <- TRUE } else { if (QE.tau2.max > crit.l) { ### if QE.tau2.max is to the right of crit.l, then upper bound > tau2.max, so set tau2.ub to >tau2.max tau2.ub <- con$tau2.max ub.sign <- ">" ub.conv <- TRUE } else { ### now QE.tau2.min is to the right of crit.l and QE.tau2.max is to the left of crit.l, so upper bound can be found res <- try(uniroot(.QE.func, interval=c(con$tau2.min, con$tau2.max), tol=con$tol, maxiter=con$maxiter, Y=Y, vi=vi, X=X, k=k, objective=crit.l, verbose=verbose, digits=digits)$root, silent=TRUE) ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.ub <- res ub.conv <- TRUE } } } ### end search for upper bound ################################################################### ### start search for lower bound if (QE.tau2.max > crit.u) { ### if QE.tau2.max is to the right of the crit.u, then both bounds are above tau2.max tau2.lb <- con$tau2.max tau2.ub <- con$tau2.max lb.sign <- ">" ub.sign <- ">" lb.conv <- TRUE ub.conv <- TRUE } else { if (QE.tau2.min < crit.u) { ### if QE.tau2.min is to the left of crit.u, then lower bound < tau2.min, so set tau2.lb to 0) lb.sign <- "<" } else { ### now QE.tau2.min is to the right of crit.u and QE.tau2.max is to the left of crit.u, so lower bound can be found res <- try(uniroot(.QE.func, interval=c(con$tau2.min, con$tau2.max), tol=con$tol, maxiter=con$maxiter, Y=Y, vi=vi, X=X, k=k, objective=crit.u, verbose=verbose, digits=digits)$root, silent=TRUE) ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.lb <- res lb.conv <- TRUE } } } ### end search for lower bound ################################################################### } ###################################################################### ################### ### GENQ method ### ################### if (type == "GENQ") { if (!requireNamespace("CompQuadForm", quietly=TRUE)) stop(mstyle$stop("Please install the 'CompQuadForm' package when method='QGEN'.")) A <- diag(weights, nrow=k, ncol=k) stXAX <- .invcalc(X=X, W=A, k=k) P <- A - A %*% X %*% stXAX %*% t(X) %*% A Q <- crossprod(Y,P) %*% Y ### note: .GENQ.func(tau2val, ..., Q=Q, level=0, getlower=TRUE) gives the area to the right of Q for a ### distribution with specified tau2val; and as we increase tau2val, so does the area to the right of Q GENQ.tau2.max <- .GENQ.func(con$tau2.max, P=P, vi=vi, Q=Q, level=0, k=k, p=p, getlower=TRUE) GENQ.tau2.min <- .GENQ.func(con$tau2.min, P=P, vi=vi, Q=Q, level=0, k=k, p=p, getlower=TRUE) ################################################################### ### start search for upper bound if (GENQ.tau2.min > 1 - level/2) { ### if GENQ.tau2.min is to the right of 1 - level/2, then both bounds are below tau2.min tau2.lb <- con$tau2.min tau2.ub <- con$tau2.min lb.sign <- "<" ub.sign <- "<" lb.conv <- TRUE ub.conv <- TRUE ### and if tau2.min = 0, then the CI is equal to the null set if (con$tau2.min <= 0) ci.null <- TRUE } else { if (GENQ.tau2.max < 1 - level/2) { ### if GENQ.tau2.max is to the left of 1 - level/2, then upper bound > tau2.max, so set tau2.ub to >tau2.max tau2.ub <- con$tau2.max ub.sign <- ">" ub.conv <- TRUE } else { ### now GENQ.tau2.min is to the left of 1 - level/2 and GENQ.tau2.max is to the right of 1 - level/2, so upper bound can be found res <- try(uniroot(.GENQ.func, c(con$tau2.min, con$tau2.max), P=P, vi=vi, Q=Q, level=level/2, k=k, p=p, getlower=FALSE, verbose=verbose, digits=digits)$root, silent=TRUE) ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.ub <- res ub.conv <- TRUE } } } ### end search for upper bound ################################################################### ### start search for lower bound if (GENQ.tau2.max < level/2) { ### if GENQ.tau2.max is to the left of level/2, then both bounds are abova tau2.max tau2.lb <- con$tau2.max tau2.ub <- con$tau2.max lb.sign <- ">" ub.sign <- ">" lb.conv <- TRUE ub.conv <- TRUE } else { if (GENQ.tau2.min > level/2) { ### if GENQ.tau2.min is to the right of level/2, then lower bound < tau2.min, so set tau2.lb to 0) lb.sign <- "<" } else { ### now GENQ.tau2.max is to the right of level/2 and GENQ.tau2.min is to the left of level/2, so lower bound can be found res <- try(uniroot(.GENQ.func, c(con$tau2.min, con$tau2.max), P=P, vi=vi, Q=Q, level=level/2, k=k, p=p, getlower=TRUE, verbose=verbose, digits=digits)$root, silent=TRUE) ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.lb <- res lb.conv <- TRUE } } } ### end search for lower bound ################################################################### } ###################################################################### ################# ### PL method ### ################# ### note: cannot actually use this at the moment if (type == "PL") { if (con$tau2.min > x$tau2) stop(mstyle$stop("Lower bound of interval to be searched must be <= actual value of component.")) if (con$tau2.max < x$tau2) stop(mstyle$stop("Upper bound of interval to be searched must be >= actual value of component.")) objective <- qchisq(1-level, df=1) ################################################################### ### start search for lower bound ### get diff value when setting component to tau2.min; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the lower bound must be below tau2.min res <- try(.profile.rma.uni(con$tau2.min, obj=x, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (res < 0) { tau2.lb <- con$tau2.min lb.conv <- TRUE if (con$tau2.min > 0) lb.sign <- "<" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.uni, interval=c(con$tau2.min, x$tau2), tol=con$tol, maxiter=con$maxiter, extendInt="downX", obj=x, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.uni, interval=c(con$tau2.min, x$tau2), tol=con$tol, maxiter=con$maxiter, obj=x, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.lb <- res lb.conv <- TRUE } } } ### end search for lower bound ################################################################### ### start search for upper bound ### get diff value when setting component to tau2.max; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the upper bound must be above tau2.max res <- try(.profile.rma.uni(con$tau2.max, obj=x, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { tau2.ub <- con$tau2.max ub.conv <- TRUE ub.sign <- ">" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.uni, interval=c(x$tau2, con$tau2.max), tol=con$tol, maxiter=con$maxiter, extendInt="upX", obj=x, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.uni, interval=c(x$tau2, con$tau2.max), tol=con$tol, maxiter=con$maxiter, obj=x, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { tau2.ub <- res ub.conv <- TRUE } } } ### end search for upper bound ################################################################### } ###################################################################### if (!lb.conv) warning(mstyle$warning("Error in iterative search for the lower bound."), call.=FALSE) if (!ub.conv) warning(mstyle$warning("Error in iterative search for the upper bound."), call.=FALSE) #if (lb.sign == "<" && con$tau2.min > 0) # warning(mstyle$warning("Lower bound < tau2.min. Try decreasing tau2.min (via the 'control' argument)."), call.=FALSE) #if (ub.sign == ">") # warning(mstyle$warning("Upper bound > tau2.max. Try increasing tau2.max (via the 'control' argument)."), call.=FALSE) ###################################################################### I2.lb <- 100 * tau2.lb / (x$vt + tau2.lb) I2.ub <- 100 * tau2.ub / (x$vt + tau2.ub) H2.lb <- tau2.lb / x$vt + 1 H2.ub <- tau2.ub / x$vt + 1 tau2 <- c(x$tau2, tau2.lb, tau2.ub) tau <- sqrt(c(ifelse(x$tau2 >= 0, x$tau2, NA), ifelse(tau2.lb >= 0, tau2.lb, NA), ifelse(tau2.ub >= 0, tau2.ub, NA))) I2 <- c(x$I2, I2.lb, I2.ub) H2 <- c(x$H2, H2.lb, H2.ub) res.random <- rbind("tau^2"=tau2, "tau"=tau, "I^2(%)"=I2, "H^2"=H2) colnames(res.random) <- c("estimate", "ci.lb", "ci.ub") } ######################################################################### ######################################################################### ######################################################################### if (fixed) { if (is.element(x$test, c("knha","adhoc","t"))) { crit <- qt(level/2, df=x$ddf, lower.tail=FALSE) } else { crit <- qnorm(level/2, lower.tail=FALSE) } beta <- c(x$beta) ci.lb <- c(beta - crit * x$se) ci.ub <- c(beta + crit * x$se) if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] res.fixed <- cbind(estimate=beta, ci.lb=ci.lb, ci.ub=ci.ub) rownames(res.fixed) <- rownames(x$beta) } ######################################################################### ######################################################################### ######################################################################### res <- list() if (fixed) res$fixed <- res.fixed if (random) res$random <- res.random res$digits <- digits if (random) { res$ci.null <- ci.null res$lb.sign <- lb.sign res$ub.sign <- ub.sign res$tau2.min <- con$tau2.min } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/reporter.rma.uni.r0000644000176200001440000006703214046532406015521 0ustar liggesusersreporter.rma.uni <- function(x, dir, filename, format="html_document", open=TRUE, digits, forest, funnel, footnotes=FALSE, verbose=TRUE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) if (!suppressMessages(suppressWarnings(requireNamespace("rmarkdown", quietly=TRUE)))) stop(mstyle$stop("Please install the 'rmarkdown' package to use the reporter function.")) if (!is.element(x$test, c("z", "knha"))) stop(mstyle$stop("Cannot only use reporter function when test='z' or test='knha'.")) if (!x$weighted) stop(mstyle$stop("Cannot use reporter function when 'weighted=FALSE'.")) if (!is.null(x$weights)) stop(mstyle$stop("Cannot use reporter function for models with custom weights.")) if (is.null(x$tau2.fix)) stop(mstyle$stop("Cannot use reporter function for models with a fixed tau^2 value.")) if (!x$int.only) stop(mstyle$stop("Cannot currently use reporter function for models with moderators. This will be implemented eventually.")) if (x$k == 1) stop(mstyle$stop("Cannot use reporter function when k = 1.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } format <- match.arg(format, c("html_document", "pdf_document", "word_document")) # , "bookdown::pdf_document2")) if (format == "pdf_document" && (Sys.which("pdflatex") == "")) warning(mstyle$warning("Cannot detect pdflatex executable. Rendering the pdf is likely to fail."), call.=FALSE, immediate.=TRUE) ### set/get directory for generating the report if (missing(dir)) { dir <- normalizePath(tempdir(), winslash="/") success <- file.exists(dir) if (!success) stop(mstyle$stop("No temporary directory available for creating the report.")) } else { if (!is.character(dir)) stop(mstyle$stop("Argument 'dir' must be a character string.")) success <- file.exists(dir) if (!success) stop(mstyle$stop("Specified directory does not exist.")) } if (verbose) message(mstyle$message(paste0("\nDirectory for generating the report is: ", dir, "\n"))) ### copy references.bib and apa.csl files to directory for generating the report if (verbose) message(mstyle$message("Copying references.bib and apa.csl to report directory ...")) success <- file.copy(system.file("reporter", "references.bib", package = "metafor"), dir, overwrite=TRUE) if (!success) stop(mstyle$stop("Could not copy 'references.bib' file to report directory.")) success <- file.copy(system.file("reporter", "apa.csl", package = "metafor"), dir, overwrite=TRUE) if (!success) stop(mstyle$stop("Could not copy 'apa.csl' file to report directory.")) ### set default filenames object.name <- deparse(substitute(x)) has.object.name <- TRUE if (grepl("rma(", object.name, fixed=TRUE) || grepl("rma.uni(", object.name, fixed=TRUE)) { # check for 'reporter(rma(yi, vi))' usage has.object.name <- FALSE object.name <- "res" } if (missing(filename)) { file.rmd <- paste0("report_", object.name, ".rmd") file.obj <- paste0("report_", object.name, ".rdata") file.tex <- paste0("report_", object.name, ".tex") } else { if (!is.character(filename)) stop(mstyle$stop("Argument 'filename' must be a character string.")) file.rmd <- paste0(filename, ".rmd") file.obj <- paste0(filename, ".rdata") file.tex <- paste0(filename, ".tex") } ### process forest argument plot.forest <- TRUE args.forest <- "" if (!missing(forest)) { if (is.logical(forest)) { if (isFALSE(forest)) plot.forest <- FALSE } else { if (!is.character(forest)) stop(mstyle$stop("Argument 'forest' must be a character string.")) args.forest <- paste0(", ", forest) } } ### process funnel argument plot.funnel <- TRUE args.funnel <- "" if (!missing(funnel)) { if (is.logical(funnel)) { if (isFALSE(funnel)) plot.funnel <- FALSE } else { if (!is.character(funnel)) stop(mstyle$stop("Argument 'funnel' must be a character string.")) args.funnel <- paste0(", ", funnel) } } ### forest and funnel plot numbers if (plot.forest) { num.forest <- 1 num.funnel <- 2 } else { num.forest <- NA num.funnel <- 1 } ### save model object if (verbose) message(mstyle$message(paste0("Saving model object to ", file.obj, " ..."))) success <- try(save(x, file=file.path(dir, file.obj))) if (inherits(success, "try-error")) stop(mstyle$stop("Could not save model object to report directory.")) ### open rmd file connection if (verbose) message(mstyle$message(paste0("Creating ", file.rmd, " file ..."))) con <- try(file(file.path(dir, file.rmd), "w")) if (inherits(con, "try-error")) stop(mstyle$stop("Could not create .rmd file in report directory.")) ### get measure name measure <- tolower(.setlab(x$measure, transf.char=FALSE, atransf.char=FALSE, gentype=1)) measure <- sub("observed outcome", "outcome", measure) measure <- sub("fisher's z", "Fisher r-to-z", measure) measure <- sub("yule", "Yule", measure) measure <- sub("freeman", "Freeman", measure) measure <- sub("tukey", "Tukey", measure) measure <- sub("log ratio of means", "response ratio", measure) ### model type if (x$int.only) { if (is.element(x$method, c("FE","EE","CE"))) { model <- x$method } else { model <- "RE" } } else { if (is.element(x$method, c("FE","EE","CE"))) { model <- "MR" } else { model <- "ME" } } model.name <- c(FE = "fixed-effects", EE = "equal-effects", CE = "common-effects", MR = "(fixed-effects) meta-regression", RE = "random-effects", ME = "(mixed-effects) meta-regression")[model] ### get tau^2 estimator name and set reference tau2.method <- c(FE = "", HS = "Hunter-Schmidt", HSk = "k-corrected Hunter-Schmidt", HE = "Hedges'", DL = "DerSimonian-Laird", GENQ = "generalized Q-statistic", GENQM = "(median-unbiased) generalized Q-statistic", SJ = "Sidik-Jonkman", ML = "maximum-likelihood", REML = "restricted maximum-likelihood", EB = "empirical Bayes", PM = "Paule-Mandel", PMM = "(median-unbiased) Paule-Mandel")[x$method] if (x$method == "HS" && model == "RE") tau2.ref <- "[@hunter1990; @viechtbauer2005]" if (x$method == "HS" && model == "ME") tau2.ref <- "[@hunter1990; @viechtbauer2015]" if (x$method == "HSk" && model == "RE") tau2.ref <- "[@brannick2019; @hunter1990; @viechtbauer2005]" if (x$method == "HSk" && model == "ME") tau2.ref <- "[@brannick2019; @hunter1990; @viechtbauer2015]" if (x$method == "HE" && model == "RE") tau2.ref <- "[@hedges1985]" if (x$method == "HE" && model == "ME") tau2.ref <- "[@hedges1992]" if (x$method == "DL" && model == "RE") tau2.ref <- "[@dersimonian1986]" if (x$method == "DL" && model == "ME") tau2.ref <- "[@raudenbush2009]" if (x$method == "GENQ" && model == "RE") tau2.ref <- "[@dersimonian2007]" if (x$method == "GENQ" && model == "ME") tau2.ref <- "[@jackson2014]" if (x$method == "GENQM" && model == "RE") tau2.ref <- "[@dersimonian2007]" if (x$method == "GENQM" && model == "ME") tau2.ref <- "[@jackson2014]" if (x$method == "SJ") tau2.ref <- "[@sidik2005]" if (x$method == "ML" && model == "RE") tau2.ref <- "[@hardy1996]" if (x$method == "ML" && model == "ME") tau2.ref <- "[@raudenbush2009]" if (x$method == "REML" && model == "RE") tau2.ref <- "[@viechtbauer2005]" if (x$method == "REML" && model == "ME") tau2.ref <- "[@raudenbush2009]" if (x$method == "EB" && model == "RE") tau2.ref <- "[@morris1983]" if (x$method == "EB" && model == "ME") tau2.ref <- "[@berkey1995]" if (x$method == "PM" && model == "RE") tau2.ref <- "[@paule1982]" if (x$method == "PM" && model == "ME") tau2.ref <- "[@viechtbauer2015]" if (x$method == "PMM" && model == "RE") tau2.ref <- "[@paule1982]" if (x$method == "PMM" && model == "ME") tau2.ref <- "[@viechtbauer2015]" ### Q-test reference if (is.element(model, c("FE","EE","CE","RE"))) { qtest.ref <- "[@cochran1954]" } else { qtest.ref <- "[@hedges1983]" } ### CI level level <- 100 * (1-x$level) ### Bonferroni-corrected critical value for studentized residuals crit <- qnorm(x$level/(2*x$k), lower.tail=FALSE) ### get influence results infres <- influence(x) ### formating function for p-values fpval <- function(p, pdigits=digits[["pval"]]) paste0("$p ", ifelse(p < 10^(-pdigits), paste0("< ", .fcf(10^(-pdigits), pdigits)), paste0("= ", .fcf(p, pdigits))), "$") # consider giving only 2 digits for p-value if p > .05 or p > .10 ######################################################################### ### yaml header header <- paste0("---\n") header <- paste0(header, "output:\n") if (format == "html_document") header <- paste0(header, " html_document:\n toc: true\n toc_float:\n collapsed: false\n") if (format == "pdf_document") header <- paste0(header, " pdf_document:\n toc: true\n") if (format == "word_document") header <- paste0(header, " word_document\n") header <- paste0(header, "title: Analysis Report\n") header <- paste0(header, "toc-title: Table of Contents\n") header <- paste0(header, "author: Generated with the reporter() Function of the metafor Package\n") header <- paste0(header, "bibliography: references.bib\n") header <- paste0(header, "csl: apa.csl\n") header <- paste0(header, "date: \"`r format(Sys.time(), '%d %B, %Y')`\"\n") header <- paste0(header, "---\n") ######################################################################### ### rsetup rsetup <- paste0("```{r, setup, include=FALSE}\n") rsetup <- paste0(rsetup, "library(metafor)\n") rsetup <- paste0(rsetup, "load('", file.path(dir, file.obj), "')\n") rsetup <- paste0(rsetup, "```") ######################################################################### ### methods section methods <- "\n## Methods\n\n" if (x$measure != "GEN") methods <- paste0(methods, "The analysis was carried out using the ", measure, " as the outcome measure. ") methods <- paste0(methods, "A", ifelse(model.name == "equal-effects", "n ", " "), model.name, " model was fitted to the data. ") if (is.element(model, c("RE", "ME"))) methods <- paste0(methods, "The amount of ", ifelse(x$int.only, "", "residual "), "heterogeneity (i.e., $\\tau^2$), was estimated using the ", tau2.method, " estimator ", tau2.ref, ". ") if (is.element(model, c("FE","EE","CE"))) methods <- paste0(methods, "The $Q$-test for heterogeneity ", qtest.ref, " and the $I^2$ statistic [@higgins2002] are reported. ") if (model == "MR") methods <- paste0(methods, "The $Q$-test for residual heterogeneity ", qtest.ref, " is reported. ") if (model == "RE") methods <- paste0(methods, "In addition to the estimate of $\\tau^2$, the $Q$-test for heterogeneity ", qtest.ref, " and the $I^2$ statistic [@higgins2002] are reported. ") if (model == "ME") methods <- paste0(methods, "In addition to the estimate of $\\tau^2$, the $Q$-test for residual heterogeneity ", qtest.ref, " is reported. ") if (model == "RE") methods <- paste0(methods, "In case any amount of heterogeneity is detected (i.e., $\\hat{\\tau}^2 > 0$, regardless of the results of the $Q$-test), a prediction interval for the true outcomes is also provided [@riley2011]. ") if (x$test == "knha") methods <- paste0(methods, "Tests and confidence intervals were computed using the Knapp and Hartung method [@knapp2003]. ") methods <- paste0(methods, "Studentized residuals and Cook's distances are used to examine whether studies may be outliers and/or influential in the context of the model [@viechtbauer2010b]. ") #methods <- paste0(methods, "Studies with a studentized residual larger than $\\pm 1.96$ are considered potential outliers. ") methods <- paste0(methods, "Studies with a studentized residual larger than the $100 \\times (1 - ", x$level, "/(2 \\times k))$th percentile of a standard normal distribution are considered potential outliers (i.e., using a Bonferroni correction with two-sided $\\alpha = ", x$level, "$ for $k$ studies included in the meta-analysis). ") # $\\pm ", .fcf(crit, digits[["test"]]), "$ ( #methods <- paste0(methods, "Studies with a Cook's distance larger than ", .fcf(qchisq(0.5, df=infres$m), digits[["test"]]), " (the 50th percentile of a $\\chi^2$-distribution with ", infres$m, " degree", ifelse(infres$m > 1, "s", ""), " of freedom) are considered to be influential. ") methods <- paste0(methods, "Studies with a Cook's distance larger than the median plus six times the interquartile range of the Cook's distances are considered to be influential.") methods <- if (footnotes) paste0(methods, "[^cook] ") else paste0(methods, " ") if (is.element(model, c("FE","EE","CE","RE"))) methods <- paste0(methods, "The rank correlation test [@begg1994] and the regression test [@sterne2005], using the standard error of the observed outcomes as predictor, are used to check for funnel plot asymmetry. ") if (is.element(model, c("MR","ME"))) methods <- paste0(methods, "The regression test [@sterne2005], using the standard error of the observed outcomes as predictor (in addition to the moderators already included in the model), is used to check for funnel plot asymmetry. ") methods <- paste0(methods, "The analysis was carried out using R (version ", getRversion(), ") [@rcore2020] and the **metafor** package (version ", x$version, ") [@viechtbauer2010a]. ") ######################################################################### ### results section results <- "\n## Results\n\n" ### number of studies results <- paste0(results, "A total of $k=", x$k, "$ studies were included in the analysis. ") ### range of observed outcomes results <- paste0(results, "The observed ", measure, "s ranged from $", .fcf(min(x$yi), digits[["est"]]), "$ to $", .fcf(max(x$yi), digits[["est"]]), "$, ") ### percent positive/negative results <- paste0(results, "with the majority of estimates being ", ifelse(mean(x$yi > 0) > .50, "positive", "negative"), " (", ifelse(mean(x$yi > 0) > .50, round(100*mean(x$yi > 0)), round(100*mean(x$yi < 0))), "%). ") if (is.element(model, c("FE","EE","CE","RE"))) { ### estimated average outcome with CI results <- paste0(results, "The estimated average ", measure, " based on the ", model.name, " model was ", ifelse(is.element(model, c("FE","EE","CE")), "$\\hat{\\theta} = ", "$\\hat{\\mu} = "), .fcf(c(x$beta), digits[["est"]]), "$ ") results <- paste0(results, "(", level, "% CI: $", .fcf(x$ci.lb, digits[["ci"]]), "$ to $", .fcf(x$ci.ub, digits[["ci"]]), "$). ") ### note: for some outcome measures (e.g., proportions), the test H0: mu/theta = 0 is not really relevant; maybe check for this results <- paste0(results, "Therefore, the average outcome ", ifelse(x$pval > 0.05, "did not differ", "differed"), " significantly from zero ($", ifelse(x$test == "z", "z", paste0("t(", x$k-1, ")")), " = ", .fcf(x$zval, digits[["test"]]), "$, ", fpval(x$pval), "). ") ### forest plot if (plot.forest) { results <- paste0(results, "A forest plot showing the observed outcomes and the estimate based on the ", model.name, " model is shown in Figure ", num.forest, ".\n\n") if (is.element(format, c("pdf_document", "bookdown::pdf_document2"))) results <- paste0(results, "```{r, forestplot, echo=FALSE, fig.align=\"center\", fig.cap=\"Forest plot showing the observed outcomes and the estimate of the ", model.name, " model\"") if (format == "html_document") results <- paste0(results, "```{r, forestplot, echo=FALSE, fig.align=\"center\", fig.cap=\"Figure ", num.forest, ": Forest plot showing the observed outcomes and the estimate of the ", model.name, " model\"") if (format == "word_document") results <- paste0(results, "```{r, forestplot, echo=FALSE, fig.cap=\"Figure ", num.forest, ": Forest plot showing the observed outcomes and the estimate of the ", model.name, " model\"") results <- paste0(results, ", dev.args=list(pointsize=9)}\npar(family=\"mono\")\npar(mar=c(5,4,1,2))\ntmp <- metafor::forest(x, addpred=TRUE, header=TRUE", args.forest, ")\n```") #text(tmp$xlim[1], x$k+2, \"Study\", pos=4, font=2, cex=tmp$cex)\ntext(tmp$xlim[2], x$k+2, \"Outcome [", level, "% CI]\", pos=2, font=2, cex=tmp$cex)\n } results <- paste0(results, "\n\n") ### test for heterogeneity if (x$QEp > 0.10) results <- paste0(results, "According to the $Q$-test, there was no significant amount of heterogeneity in the true outcomes ") if (x$QEp > 0.05 && x$QEp <= 0.10) results <- paste0(results, "The $Q$-test for heterogeneity was not significant, but some heterogeneity may still be present in the true outcomes ") if (x$QEp <= 0.05) results <- paste0(results, "According to the $Q$-test, the true outcomes appear to be heterogeneous ") results <- paste0(results, "($Q(", x$k-1, ") = ", .fcf(x$QE, digits[["test"]]), "$, ", fpval(x$QEp)) ### tau^2 estimate (only for RE models) if (model == "RE") results <- paste0(results, ", $\\hat{\\tau}^2 = ", .fcf(x$tau2, digits[["var"]]), "$") ### I^2 statistic results <- paste0(results, ", $I^2 = ", .fcf(x$I2, digits[["het"]]), "$%). ") ### for the RE model, when any amount of heterogeneity is detected, provide prediction interval and note whether the directionality of effects is consistent or not if (model == "RE" && x$tau2 > 0) { pred <- predict(x) results <- paste0(results, "A ", level, "% prediction interval for the true outcomes is given by $", .fcf(pred$pi.lb, digits[["ci"]]), "$ to $", .fcf(pred$pi.ub, digits[["ci"]]), "$. ") if (c(x$beta) > 0 && pred$pi.lb < 0) results <- paste0(results, "Hence, although the average outcome is estimated to be positive, in some studies the true outcome may in fact be negative.") if (c(x$beta) < 0 && pred$pi.ub > 0) results <- paste0(results, "Hence, although the average outcome is estimated to be negative, in some studies the true outcome may in fact be positive.") if ((c(x$beta) > 0 && pred$pi.lb > 0) || (c(x$beta) < 0 && pred$pi.ub < 0)) results <- paste0(results, "Hence, even though there may be some heterogeneity, the true outcomes of the studies are generally in the same direction as the estimated average outcome.") } results <- paste0(results, "\n\n") ### check if some studies have very large weights relatively speaking largeweight <- weights(x)/100 >= 3 / x$k if (any(largeweight)) { if (sum(largeweight) == 1) results <- paste0(results, "One study (", names(largeweight)[largeweight], ") had a relatively large weight ") if (sum(largeweight) == 2) results <- paste0(results, "Two studies (", paste(names(largeweight)[largeweight], collapse="; "), ") had relatively large weights ") if (sum(largeweight) >= 3) results <- paste0(results, "Several studies (", paste(names(largeweight)[largeweight], collapse="; "), ") had relatively large weights ") results <- paste0(results, "compared to the rest of the studies (i.e., $\\mbox{weight} \\ge 3/k$, so a weight at least 3 times as large as having equal weights across studies). ") } ### check for outliers zi <- infres$inf$rstudent abszi <- abs(zi) results <- paste0(results, "An examination of the studentized residuals revealed that ") if (all(abszi < crit, na.rm=TRUE)) results <- paste0(results, "none of the studies had a value larger than $\\pm ", .fcf(crit, digits[["test"]]), "$ and hence there was no indication of outliers ") if (sum(abszi >= crit, na.rm=TRUE) == 1) results <- paste0(results, "one study (", infres$inf$slab[abszi >= crit & !is.na(abszi)], ") had a value larger than $\\pm ", .fcf(crit, digits[["test"]]), "$ and may be a potential outlier ") if (sum(abszi >= crit, na.rm=TRUE) == 2) results <- paste0(results, "two studies (", paste(infres$inf$slab[abszi >= crit & !is.na(abszi)], collapse="; "), ") had values larger than $\\pm ", .fcf(crit, digits[["test"]]), "$ and may be potential outliers ") if (sum(abszi >= crit, na.rm=TRUE) >= 3) results <- paste0(results, "several studies (", paste(infres$inf$slab[abszi >= crit & !is.na(abszi)], collapse="; "), ") had values larger than $\\pm ", .fcf(crit, digits[["test"]]), "$ and may be potential outliers ") results <- paste0(results, "in the context of this model. ") ### check for influential cases #is.infl <- pchisq(infres$inf$cook.d, df=1) > .50 is.infl <- infres$inf$cook.d > median(infres$inf$cook.d, na.rm=TRUE) + 6 * IQR(infres$inf$cook.d, na.rm=TRUE) results <- paste0(results, "According to the Cook's distances, ") if (all(!is.infl, na.rm=TRUE)) results <- paste0(results, "none of the studies ") if (sum(is.infl, na.rm=TRUE) == 1) results <- paste0(results, "one study (", infres$inf$slab[is.infl & !is.na(abszi)], ") ") if (sum(is.infl, na.rm=TRUE) == 2) results <- paste0(results, "two studies (", paste(infres$inf$slab[is.infl & !is.na(abszi)], collapse="; "), ") ") if (sum(is.infl, na.rm=TRUE) >= 3) results <- paste0(results, "several studies (", paste(infres$inf$slab[is.infl & !is.na(abszi)], collapse="; "), ") ") results <- paste0(results, "could be considered to be overly influential.") results <- paste0(results, "\n\n") ### publication bias ranktest <- suppressWarnings(ranktest(x)) regtest <- regtest(x) if (plot.funnel) results <- paste0(results, "A funnel plot of the estimates is shown in Figure ", num.funnel, ". ") if (ranktest$pval > .05 && regtest$pval > .05) { results <- paste0(results, "Neither the rank correlation nor the regression test indicated any funnel plot asymmetry ") results <- paste0(results, "(", fpval(ranktest$pval), " and ", fpval(regtest$pval), ", respectively). ") } if (ranktest$pval <= .05 && regtest$pval <= .05) { results <- paste0(results, "Both the rank correlation and the regression test indicated potential funnel plot asymmetry ") results <- paste0(results, "(", fpval(ranktest$pval), " and ", fpval(regtest$pval), ", respectively). ") } if (ranktest$pval > .05 && regtest$pval <= .05) results <- paste0(results, "The regression test indicated funnel plot asymmetry (", fpval(regtest$pval), ") but not the rank correlation test (", fpval(ranktest$pval), "). ") if (ranktest$pval <= .05 && regtest$pval > .05) results <- paste0(results, "The rank correlation test indicated funnel plot asymmetry ($", fpval(ranktest$pval), ") but not the regression test (", fpval(regtest$pval), "). ") ### funnel plot if (plot.funnel) { if (is.element(format, c("pdf_document", "bookdown::pdf_document2"))) results <- paste0(results, "\n\n```{r, funnelplot, echo=FALSE, fig.align=\"center\", fig.cap=\"Funnel plot\", dev.args=list(pointsize=9)}\npar(mar=c(5,4,2,2))\nmetafor::funnel(x", args.funnel, ")\n```") if (format == "html_document") results <- paste0(results, "\n\n```{r, funnelplot, echo=FALSE, fig.align=\"center\", fig.cap=\"Figure ", num.funnel, ": Funnel plot\", dev.args=list(pointsize=9)}\npar(mar=c(5,4,2,2))\nmetafor::funnel(x", args.funnel, ")\n```") if (format == "word_document") results <- paste0(results, "\n\n```{r, funnelplot, echo=FALSE, fig.cap=\"Figure ", num.funnel, ": Funnel plot\", dev.args=list(pointsize=9)}\npar(mar=c(5,4,2,2))\nmetafor::funnel(x", args.funnel, ")\n```") } } if (is.element(model, c("MR", "ME"))) { if (x$int.incl) { mods <- colnames(x$X)[-1] p <- x$p - 1 } else { mods <- colnames(x$X) p <- x$p } results <- paste0(results, "The meta-regression model included ", p, " predictor", ifelse(p > 1, "s ", " ")) if (p == 1) results <- paste0(results, "(i.e., '", mods, "').") if (p == 2) results <- paste0(results, "(i.e., '", mods[1], "' and '", mods[2], "').") if (p >= 3) results <- paste0(results, "(i.e., ", paste0("'", mods[-p], "'", collapse=", "), " and ", mods[p], ").") } # 95% CI for tau^2 and I^2 # table for meta-regression model # links to help pages for functions used ######################################################################### ### notes section notes <- "\n## Notes\n\n" notes <- paste0(notes, "This analysis report was dynamically generated ", ifelse(has.object.name, paste0("for model object '`", object.name, "`'"), ""), " with the `reporter()` function of the **metafor** package. ") call <- capture.output(x$call) call <- trimws(call, which="left") call <- paste(call, collapse="") notes <- paste0(notes, "The model call that was used to fit the model was '`", call, "`'. ") notes <- paste0(notes, "This report provides an illustration of how the results of the model can be reported, but is not a substitute for a careful examination of the results.") ######################################################################### ### references section references <- "\n## References\n" ######################################################################### if (footnotes) { fnotes <- "" fnotes <- paste0(fnotes, "[^cook]: This is a somewhat arbitrary rule, but tends to detect 'spikes' in a plot of the Cook's distances fairly accurately. A better rule may be implemented in the future.") } ######################################################################### ### write sections to rmd file writeLines(header, con) writeLines(rsetup, con) writeLines(methods, con) writeLines(results, con) writeLines(notes, con) writeLines(references, con) if (footnotes) writeLines(fnotes, con) ### close rmd file connection close(con) ### render rmd file if (verbose) message(mstyle$message(paste0("Rendering ", file.rmd, " file ..."))) if (verbose >= 2) { file.out <- rmarkdown::render(file.path(dir, file.rmd), output_format=format, quiet=ifelse(verbose <= 1, TRUE, FALSE)) } else { file.out <- suppressWarnings(rmarkdown::render(file.path(dir, file.rmd), output_format=format, quiet=ifelse(verbose <= 1, TRUE, FALSE))) } if (verbose) message(mstyle$message(paste0("Generated ", file.out, " ..."))) ### render() sometimes fails to delete the intermediate tex file, so in case this happens clean up ### see also: https://github.com/rstudio/rmarkdown/issues/1308 if (file.exists(file.path(dir, file.tex))) unlink(file.path(dir, file.tex)) ### try to open output file if (open) { if (verbose) message(mstyle$message(paste0("Opening report ...\n"))) if (.Platform$OS.type == "windows") { shell.exec(file.out) } else { optb <- getOption("browser") if (is.function(optb)) { invisible(optb(file.out)) } else { system(paste0(optb, " '", file.out, "'")) } } } invisible(file.out) } metafor/R/forest.rma.r0000644000176200001440000010241214054724757014372 0ustar liggesusersforest.rma <- function(x, annotate=TRUE, addfit=TRUE, addpred=FALSE, showweights=FALSE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=x$level, refline=0, digits=2L, width, xlab, slab, mlab, ilab, ilab.xpos, ilab.pos, order, transf, atransf, targs, rows, efac=1, pch=15, psize, plim=c(0.5,1.5), colout, col, border, lty, fonts, cex, cex.lab, cex.axis, annosym, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav="rma.ls") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE transf.char <- deparse(substitute(transf)) atransf.char <- deparse(substitute(atransf)) if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) if (missing(targs)) targs <- NULL if (missing(at)) at <- NULL if (missing(ilab)) ilab <- NULL if (missing(ilab.xpos)) ilab.xpos <- NULL if (missing(ilab.pos)) ilab.pos <- NULL if (missing(order)) order <- NULL if (missing(colout)) colout <- "black" if (missing(psize)) psize <- NULL if (missing(cex)) cex <- NULL if (missing(cex.lab)) cex.lab <- NULL if (missing(cex.axis)) cex.axis <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ### digits[1] for annotations, digits[2] for x-axis labels ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers if (length(digits) == 1L) digits <- c(digits,digits) ############################################################################ ### set default colors if user has not specified 'col' and 'border' arguments if (x$int.only) { if (missing(col)) { col <- c("black", "gray50") # 1st color for summary polygon, 2nd color for prediction interval line } else { if (length(col) == 1L) # if user only specified one value, assume it is for the summary polygon col <- c(col, "gray50") } if (missing(border)) border <- "black" # border color of summary polygon } else { if (missing(col)) col <- "gray" # color of fitted values if (missing(border)) border <- "gray" # border color of fitted values } ### set default line types if user has not specified 'lty' argument if (missing(lty)) { lty <- c("solid", "dotted", "solid") # 1st value = CIs, 2nd value = prediction interval, 3rd = horizontal line(s) } else { if (length(lty) == 1L) lty <- c(lty, "dotted", "solid") if (length(lty) == 2L) lty <- c(lty, "solid") } ### vertical expansion factor: 1st = CI end lines, 2nd = arrows, 3rd = summary polygon or fitted polygons if (length(efac) == 1L) efac <- rep(efac, 3) if (length(efac) == 2L) efac <- c(efac[1], efac[1], efac[2]) # if 2 values specified: 1st = CI end lines and arrows, 2nd = summary polygon or fitted polygons ### annotation symbols vector if (missing(annosym)) annosym <- c(" [", ", ", "]", "-") # 4th element for minus sign symbol if (length(annosym) == 3L) annosym <- c(annosym, "-") if (length(annosym) != 4L) stop(mstyle$stop("Argument 'annosym' must be a vector of length 3.")) ### get measure from object measure <- x$measure ### column header estlab <- .setlab(measure, transf.char, atransf.char, gentype=3, short=TRUE) if (is.expression(estlab)) { header.right <- parse(text=paste0("bold(", estlab, " * '", annosym[1], "' * '", 100*(1-level), "% CI'", " * '", annosym[3], "')")) } else { header.right <- paste0(estlab, annosym[1], 100*(1-level), "% CI", annosym[3]) } if (is.logical(header)) { if (header) { header.left <- "Study" } else { header.left <- NULL header.right <- NULL } } else { if (!is.character(header)) stop(mstyle$stop("Argument 'header' must either be a logical or character vector.")) if (length(header) == 1L) { header.left <- header } else { header.left <- header[1] header.right <- header[2] } } if (!annotate) header.right <- NULL ddd <- list(...) if (!is.null(ddd$addcred)) addpred <- ddd$addcred if (is.null(ddd$pi.type)) { pi.type <- "default" } else { pi.type <- ddd$pi.type } if (is.null(ddd$decreasing)) { decreasing <- FALSE } else { decreasing <- ddd$decreasing } if (!is.null(ddd$clim)) olim <- ddd$clim lplot <- function(..., textpos, addcred, pi.type, decreasing, clim) plot(...) labline <- function(..., textpos, addcred, pi.type, decreasing, clim) abline(...) lsegments <- function(..., textpos, addcred, pi.type, decreasing, clim) segments(...) laxis <- function(..., textpos, addcred, pi.type, decreasing, clim) axis(...) lmtext <- function(..., textpos, addcred, pi.type, decreasing, clim) mtext(...) lpolygon <- function(..., textpos, addcred, pi.type, decreasing, clim) polygon(...) ltext <- function(..., textpos, addcred, pi.type, decreasing, clim) text(...) lpoints <- function(..., textpos, addcred, pi.type, decreasing, clim) points(...) ### TODO: remove this when there is a weights() function for 'rma.glmm' objects if (inherits(x, "rma.glmm") && showweights) stop(mstyle$stop("Option 'showweights=TRUE' not possible for 'rma.glmm' objects.")) ### TODO: remove this when there is a weights() function for 'rma.uni.selmodel' objects if (inherits(x, "rma.uni.selmodel") && showweights) stop(mstyle$stop("Option 'showweights=TRUE' not possible for 'rma.uni.selmodel' objects.")) if (!is.null(ddd$subset)) stop(mstyle$stop("Function does not have a 'subset' argument.")) ######################################################################### ### extract data and study labels ### note: yi.f/vi.f and pred may contain NAs yi <- x$yi.f vi <- x$vi.f X <- x$X.f k <- length(yi) # length of yi.f ### note: slab (if specified), ilab (if specified), pch (if vector), psize (if ### vector), colout (if vector), order (if vector) must have the same ### length as the original dataset if (missing(slab)) { if (x$slab.null) { slab <- paste("Study", x$ids) # x$ids is always of length yi.f (i.e., NAs also have an id) } else { slab <- x$slab # x$slab is always of length yi.f (i.e., NAs also have a study label) } } else { if (is.null(slab) || (length(slab) == 1L && is.na(slab))) # slab=NULL or slab=NA can be used to suppress study labels slab <- rep("", x$k.all) if (length(slab) != x$k.all) stop(mstyle$stop(paste0("Length of the 'slab' argument (", length(slab), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) slab <- slab[x$subset] } if (!is.null(ilab)) { if (is.null(dim(ilab))) ilab <- cbind(ilab) if (nrow(ilab) != x$k.all) stop(mstyle$stop(paste0("Length of the 'ilab' argument (", nrow(ilab), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) ilab <- ilab[x$subset,,drop=FALSE] } if (length(pch) == 1L) pch <- rep(pch, x$k.all) if (length(pch) != x$k.all) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) pch <- pch[x$subset] if (!is.null(psize)) { if (length(psize) == 1L) psize <- rep(psize, x$k.all) if (length(psize) != x$k.all) stop(mstyle$stop(paste0("Length of the 'psize' argument (", length(psize), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) psize <- psize[x$subset] } if (length(colout) == 1L) colout <- rep(colout, x$k.all) if (length(colout) != x$k.all) stop(mstyle$stop(paste0("Length of the 'colout' argument (", length(colout), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) colout <- colout[x$subset] ### extract fitted values options(na.action = "na.pass") # using na.pass to get the entire vector (length of yi.f) if (x$int.only) { pred <- fitted(x) pred.ci.lb <- rep(NA_real_, k) pred.ci.ub <- rep(NA_real_, k) } else { temp <- predict(x, level=level, pi.type=pi.type) pred <- temp$pred if (addpred) { pred.ci.lb <- temp$pi.lb pred.ci.ub <- temp$pi.ub } else { pred.ci.lb <- temp$ci.lb pred.ci.ub <- temp$ci.ub } } weights <- try(weights(x), silent=TRUE) # does not work for rma.glmm and rma.uni.selmodel objects if (inherits(weights, "try-error")) weights <- rep(1, k) ### sort the data if requested if (!is.null(order)) { if (length(order) == 1L) { order <- match.arg(order, c("obs", "yi", "fit", "prec", "vi", "resid", "rstandard", "abs.resid", "abs.rstandard")) if (order == "obs" || order == "yi") sort.vec <- order(yi) if (order == "fit") sort.vec <- order(pred) if (order == "prec" || order == "vi") sort.vec <- order(vi, yi) if (order == "resid") sort.vec <- order(yi-pred, yi) if (order == "rstandard") sort.vec <- order(rstandard(x)$z, yi) # need options(na.action = "na.pass") here as well if (order == "abs.resid") sort.vec <- order(abs(yi-pred), yi) if (order == "abs.rstandard") sort.vec <- order(abs(rstandard(x)$z), yi) # need options(na.action = "na.pass") here as well } else { if (length(order) != x$k.all) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (grepl("^order\\(", deparse(substitute(order)))) { sort.vec <- order } else { sort.vec <- order(order, decreasing=decreasing) } if (!is.null(x$subset)) sort.vec <- sort.vec[x$subset] - sum(!x$subset) } yi <- yi[sort.vec] vi <- vi[sort.vec] X <- X[sort.vec,,drop=FALSE] slab <- slab[sort.vec] ilab <- ilab[sort.vec,,drop=FALSE] # if NULL, remains NULL pred <- pred[sort.vec] pred.ci.lb <- pred.ci.lb[sort.vec] pred.ci.ub <- pred.ci.ub[sort.vec] weights <- weights[sort.vec] pch <- pch[sort.vec] psize <- psize[sort.vec] # if NULL, remains NULL colout <- colout[sort.vec] } options(na.action = na.act) k <- length(yi) # in case length of k has changed ### set rows value if (missing(rows)) { rows <- k:1 } else { if (length(rows) == 1L) { # note: rows must be a single value or the same rows <- rows:(rows-k+1) # length of yi.f (including NAs) *after ordering* } } if (length(rows) != k) stop(mstyle$stop(paste0("Length of the 'rows' argument (", length(rows), ") does not correspond to the number of outcomes (", k, ")."))) ### reverse order yi <- yi[k:1] vi <- vi[k:1] X <- X[k:1,,drop=FALSE] slab <- slab[k:1] ilab <- ilab[k:1,,drop=FALSE] # if NULL, remains NULL pred <- pred[k:1] pred.ci.lb <- pred.ci.lb[k:1] pred.ci.ub <- pred.ci.ub[k:1] weights <- weights[k:1] pch <- pch[k:1] psize <- psize[k:1] # if NULL, remains NULL colout <- colout[k:1] rows <- rows[k:1] ### check for NAs in yi/vi/X and act accordingly yiviX.na <- is.na(yi) | is.na(vi) | apply(is.na(X), 1, any) if (any(yiviX.na)) { not.na <- !yiviX.na if (na.act == "na.omit") { yi <- yi[not.na] vi <- vi[not.na] X <- X[not.na,,drop=FALSE] slab <- slab[not.na] ilab <- ilab[not.na,,drop=FALSE] # if NULL, remains NULL pred <- pred[not.na] pred.ci.lb <- pred.ci.lb[not.na] pred.ci.ub <- pred.ci.ub[not.na] weights <- weights[not.na] pch <- pch[not.na] psize <- psize[not.na] # if NULL, remains NULL colout <- colout[not.na] rows.new <- rows # rearrange rows due to NAs being omitted from plot rows.na <- rows[!not.na] # shift higher rows down according to number of NAs omitted for (j in seq_len(length(rows.na))) { rows.new[rows >= rows.na[j]] <- rows.new[rows >= rows.na[j]] - 1 } rows <- rows.new[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) } # note: yi/vi may be NA if na.act == "na.exclude" or "na.pass" k <- length(yi) # in case length of k has changed ### calculate individual CI bounds ci.lb <- yi - qnorm(level/2, lower.tail=FALSE) * sqrt(vi) ci.ub <- yi + qnorm(level/2, lower.tail=FALSE) * sqrt(vi) ### if requested, apply transformation to yi's and CI bounds if (is.function(transf)) { if (is.null(targs)) { yi <- sapply(yi, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) pred <- sapply(pred, transf) pred.ci.lb <- sapply(pred.ci.lb, transf) pred.ci.ub <- sapply(pred.ci.ub, transf) } else { yi <- sapply(yi, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) pred <- sapply(pred, transf, targs) pred.ci.lb <- sapply(pred.ci.lb, transf, targs) pred.ci.ub <- sapply(pred.ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(pred.ci.lb, pred.ci.ub) pred.ci.lb <- tmp[,1] pred.ci.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) yi[yi < olim[1]] <- olim[1] yi[yi > olim[2]] <- olim[2] ci.lb[ci.lb < olim[1]] <- olim[1] ci.ub[ci.ub > olim[2]] <- olim[2] pred.ci.lb[pred.ci.lb < olim[1]] <- olim[1] pred.ci.ub[pred.ci.ub > olim[2]] <- olim[2] } ### set default point sizes (if not specified by user) if (is.null(psize)) { if (length(plim) < 2L) stop(mstyle$stop("Argument 'plim' must be of length 2 or 3.")) wi <- sqrt(weights) if (!is.na(plim[1]) && !is.na(plim[2])) { rng <- max(wi, na.rm=TRUE) - min(wi, na.rm=TRUE) if (rng <= .Machine$double.eps^0.5) { psize <- rep(1, k) } else { psize <- (wi - min(wi, na.rm=TRUE)) / rng psize <- (psize * (plim[2] - plim[1])) + plim[1] } } if (is.na(plim[1]) && !is.na(plim[2])) { psize <- wi / max(wi, na.rm=TRUE) * plim[2] if (length(plim) == 3L) psize[psize <= plim[3]] <- plim[3] } if (!is.na(plim[1]) && is.na(plim[2])) { psize <- wi / min(wi, na.rm=TRUE) * plim[1] if (length(plim) == 3L) psize[psize >= plim[3]] <- plim[3] } if (all(is.na(psize))) psize <- rep(1, k) } ######################################################################### ### total range of CI bounds rng <- max(ci.ub, na.rm=TRUE) - min(ci.lb, na.rm=TRUE) if (annotate) { if (showweights) { plot.multp.l <- 2.00 plot.multp.r <- 2.00 } else { plot.multp.l <- 1.20 plot.multp.r <- 1.20 } } else { plot.multp.l <- 1.20 plot.multp.r <- 0.40 } ### set plot limits if (missing(xlim)) { xlim <- c(min(ci.lb, na.rm=TRUE) - rng * plot.multp.l, max(ci.ub, na.rm=TRUE) + rng * plot.multp.r) xlim <- round(xlim, digits[[2]]) #xlim[1] <- xlim[1]*max(1, digits[[2]]/2) #xlim[2] <- xlim[2]*max(1, digits[[2]]/2) } ### set x-axis limits (at argument overrides alim argument) alim.spec <- TRUE if (missing(alim)) { if (is.null(at)) { alim <- range(pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1)) alim.spec <- FALSE } else { alim <- range(at) } } ### make sure the plot and x-axis limits are sorted alim <- sort(alim) xlim <- sort(xlim) ### plot limits must always encompass the yi values if (xlim[1] > min(yi, na.rm=TRUE)) { xlim[1] <- min(yi, na.rm=TRUE) } if (xlim[2] < max(yi, na.rm=TRUE)) { xlim[2] <- max(yi, na.rm=TRUE) } ### x-axis limits must always encompass the yi values (no longer required) #if (alim[1] > min(yi, na.rm=TRUE)) { alim[1] <- min(yi, na.rm=TRUE) } #if (alim[2] < max(yi, na.rm=TRUE)) { alim[2] <- max(yi, na.rm=TRUE) } ### plot limits must always encompass the x-axis limits if (alim[1] < xlim[1]) { xlim[1] <- alim[1] } if (alim[2] > xlim[2]) { xlim[2] <- alim[2] } ### allow adjustment of position of study labels and annotations via textpos argument if (is.null(ddd$textpos)) ddd$textpos <- c(xlim[1], xlim[2]) if (length(ddd$textpos) != 2L) stop(mstyle$stop("Argument 'textpos' must be of length 2.")) if (is.na(ddd$textpos[1])) ddd$textpos[1] <- xlim[1] if (is.na(ddd$textpos[2])) ddd$textpos[2] <- xlim[2] ### set y-axis limits if (missing(ylim)) { if (x$int.only && addfit) { ylim <- c(-1.5, max(rows, na.rm=TRUE)+top) } else { ylim <- c(0.5, max(rows, na.rm=TRUE)+top) } } else { ylim <- sort(ylim) } ### generate x-axis positions if none are specified if (is.null(at)) { if (alim.spec) { at <- seq(from=alim[1], to=alim[2], length.out=steps) } else { at <- pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1) } } else { at[at < alim[1]] <- alim[1] # remove at values that are below or above the axis limits at[at > alim[2]] <- alim[2] at <- unique(at) } ### x-axis labels (apply transformation to axis labels if requested) at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } } else { at.lab <- formatC(at.lab, digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } ######################################################################### ### set/get fonts (1st for study labels, 2nd for annotations, 3rd for ilab) ### when passing a named vector, the names are for 'family' and the values are for 'font' if (missing(fonts)) { fonts <- rep(par("family"), 3) } else { if (length(fonts) == 1L) fonts <- rep(fonts, 3) if (length(fonts) == 2L) fonts <- c(fonts, fonts[1]) } if (is.null(names(fonts))) fonts <- structure(c(1L,1L,1L), names=fonts) par(family=names(fonts)[1], font=fonts[1]) ### adjust margins par.mar <- par("mar") par.mar.adj <- par.mar - c(0,3,1,1) par.mar.adj[par.mar.adj < 0] <- 0 par(mar = par.mar.adj) on.exit(par(mar = par.mar), add=TRUE) ### start plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab="", ylab="", yaxt="n", xaxt="n", xaxs="i", bty="n", ...) ### horizontal title line labline(h=ylim[2]-(top-1), lty=lty[3], ...) ### get coordinates of the plotting region par.usr <- par("usr") ### add reference line if (is.numeric(refline)) lsegments(refline, par.usr[3], refline, ylim[2]-(top-1), lty="dotted", ...) ### set cex, cex.lab, and cex.axis sizes as a function of the height of the figure height <- par.usr[4] - par.usr[3] if (is.null(cex)) { lheight <- strheight("O") cex.adj <- ifelse(k * lheight > height * 0.8, height/(1.25 * k * lheight), 1) } if (is.null(cex)) { cex <- par("cex") * cex.adj } else { if (is.null(cex.lab)) cex.lab <- cex if (is.null(cex.axis)) cex.axis <- cex } if (is.null(cex.lab)) cex.lab <- par("cex") * cex.adj if (is.null(cex.axis)) cex.axis <- par("cex") * cex.adj ######################################################################### ### if addfit and not an intercept-only model, add fitted polygons if (addfit && !x$int.only) { for (i in seq_len(k)) { if (is.na(pred[i])) next lpolygon(x=c(max(pred.ci.lb[i], alim[1]), pred[i], min(pred.ci.ub[i], alim[2]), pred[i]), y=c(rows[i], rows[i]+(height/100)*cex*efac[3], rows[i], rows[i]-(height/100)*cex*efac[3]), col=col, border=border, ...) ### this would only draw intervals if bounds fall within alim range #if ((pred.ci.lb[i] > alim[1]) && (pred.ci.ub[i] < alim[2])) # lpolygon(x=c(pred.ci.lb[i], pred[i], pred.ci.ub[i], pred[i]), y=c(rows[i], rows[i]+(height/100)*cex*efac[3], rows[i], rows[i]-(height/100)*cex*efac[3]), col=col, border=border, ...) } } ######################################################################### ### if addfit and intercept-only model, add fixed/random-effects model polygon if (addfit && x$int.only) { if (inherits(x, "rma.mv") && x$withG && x$tau2s > 1) { if (!is.logical(addpred)) { ### for multiple tau^2 (and gamma^2) values, need to specify level(s) of the inner factor(s) to compute the prediction interval ### this can be done via the addpred argument (i.e., instead of using a logical, one specifies the level(s)) if (length(addpred) == 1L) addpred <- c(addpred, addpred) temp <- predict(x, level=level, tau2.levels=addpred[1], gamma2.levels=addpred[2], pi.type=pi.type) addpred <- TRUE ### set addpred to TRUE, so if (!is.element(x$method, c("FE","EE","CE")) && addpred) further below works } else { if (addpred) { ### here addpred=TRUE, but user has not specified the level, so throw an error stop(mstyle$stop("Must specify the level of the inner factor(s) via the 'addpred' argument.")) } else { ### here addpred=FALSE, so just use the first tau^2 and gamma^2 arbitrarily (so predict() works) temp <- predict(x, level=level, tau2.levels=1, gamma2.levels=1, pi.type=pi.type) } } } else { temp <- predict(x, level=level, pi.type=pi.type) } beta <- temp$pred beta.ci.lb <- temp$ci.lb beta.ci.ub <- temp$ci.ub beta.pi.lb <- temp$pi.lb beta.pi.ub <- temp$pi.ub if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) beta.ci.lb <- sapply(beta.ci.lb, transf) beta.ci.ub <- sapply(beta.ci.ub, transf) beta.pi.lb <- sapply(beta.pi.lb, transf) beta.pi.ub <- sapply(beta.pi.ub, transf) } else { beta <- sapply(beta, transf, targs) beta.ci.lb <- sapply(beta.ci.lb, transf, targs) beta.ci.ub <- sapply(beta.ci.ub, transf, targs) beta.pi.lb <- sapply(beta.pi.lb, transf, targs) beta.pi.ub <- sapply(beta.pi.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(beta.ci.lb, beta.ci.ub) beta.ci.lb <- tmp[,1] beta.ci.ub <- tmp[,2] tmp <- .psort(beta.pi.lb, beta.pi.ub) beta.pi.lb <- tmp[,1] beta.pi.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { pred[pred < olim[1]] <- olim[1] pred[pred > olim[2]] <- olim[2] beta.ci.lb[beta.ci.lb < olim[1]] <- olim[1] beta.ci.ub[beta.ci.ub > olim[2]] <- olim[2] beta.pi.lb[beta.pi.lb < olim[1]] <- olim[1] beta.pi.ub[beta.pi.ub > olim[2]] <- olim[2] } ### add prediction interval if (!is.element(x$method, c("FE","EE","CE")) && addpred) { lsegments(max(beta.pi.lb, alim[1]), -1, min(beta.pi.ub, alim[2]), -1, lty=lty[2], col=col[2], ...) if (beta.pi.lb >= alim[1]) { lsegments(beta.pi.lb, -1-(height/150)*cex*efac[1], beta.pi.lb, -1+(height/150)*cex*efac[1], col=col[2], ...) } else { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(-1, -1+(height/150)*cex*efac[2], -1-(height/150)*cex*efac[2], -1), col=col[2], border=col[2], ...) } if (beta.pi.ub <= alim[2]) { lsegments(beta.pi.ub, -1-(height/150)*cex*efac[1], beta.pi.ub, -1+(height/150)*cex*efac[1], col=col[2], ...) } else { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(-1, -1+(height/150)*cex*efac[2], -1-(height/150)*cex*efac[2], -1), col=col[2], border=col[2], ...) } } ### polygon for the summary estimate lpolygon(x=c(beta.ci.lb, beta, beta.ci.ub, beta), y=c(-1, -1+(height/100)*cex*efac[3], -1, -1-(height/100)*cex*efac[3]), col=col[1], border=border, ...) ### add label for model estimate if (missing(mlab)) mlab <- sapply(x$method, switch, "FE"="FE Model", "EE"="EE Model", "CE"="CE Model", "RE Model", USE.NAMES=FALSE) if (is.list(mlab)) { ltext(ddd$textpos[1], -1, mlab[[1]], pos=4, cex=cex, ...) } else { ltext(ddd$textpos[1], -1, mlab, pos=4, cex=cex, ...) } } ######################################################################### ### add x-axis laxis(side=1, at=at, labels=at.lab, cex.axis=cex.axis, ...) ### add x-axis label if (missing(xlab)) xlab <- .setlab(measure, transf.char, atransf.char, gentype=1) lmtext(xlab, side=1, at=min(at) + (max(at)-min(at))/2, line=par("mgp")[1]-0.5, cex=cex.lab, ...) ### add CI ends (either | or <> if outside of axis limits) for (i in seq_len(k)) { ### need to skip missings (if check below will otherwise throw an error) if (is.na(yi[i]) || is.na(vi[i])) next ### if the lower bound is actually larger than upper x-axis limit, then everything is to the right and just draw a polygon pointing in that direction if (ci.lb[i] >= alim[2]) { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=colout[i], ...) next } ### if the upper bound is actually lower than lower x-axis limit, then everything is to the left and just draw a polygon pointing in that direction if (ci.ub[i] <= alim[1]) { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=colout[i], ...) next } lsegments(max(ci.lb[i], alim[1]), rows[i], min(ci.ub[i], alim[2]), rows[i], lty=lty[1], col=colout[i], ...) if (ci.lb[i] >= alim[1]) { lsegments(ci.lb[i], rows[i]-(height/150)*cex*efac[1], ci.lb[i], rows[i]+(height/150)*cex*efac[1], col=colout[i], ...) } else { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=colout[i], ...) } if (ci.ub[i] <= alim[2]) { lsegments(ci.ub[i], rows[i]-(height/150)*cex*efac[1], ci.ub[i], rows[i]+(height/150)*cex*efac[1], col=colout[i], ...) } else { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=colout[i], ...) } } ### add study labels on the left ltext(ddd$textpos[1], rows, slab, pos=4, cex=cex, ...) ### add info labels if (!is.null(ilab)) { if (is.null(ilab.xpos)) stop(mstyle$stop("Must specify 'ilab.xpos' argument when adding information with 'ilab'.")) if (length(ilab.xpos) != ncol(ilab)) stop(mstyle$stop(paste0("Number of 'ilab' columns (", ncol(ilab), ") does not match length of 'ilab.xpos' argument (", length(ilab.xpos), ")."))) if (!is.null(ilab.pos) && length(ilab.pos) == 1L) ilab.pos <- rep(ilab.pos, ncol(ilab)) par(family=names(fonts)[3], font=fonts[3]) for (l in seq_len(ncol(ilab))) { ltext(ilab.xpos[l], rows, ilab[,l], pos=ilab.pos[l], cex=cex, ...) } par(family=names(fonts)[1], font=fonts[1]) } ### add study annotations on the right: yi [LB, UB] ### and add model fit annotations if requested: b [LB, UB] ### (have to add this here, so that alignment is correct) if (annotate) { if (is.function(atransf)) { if (is.null(targs)) { if (addfit && x$int.only) { annotext <- cbind(sapply(c(yi, beta), atransf), sapply(c(ci.lb, beta.ci.lb), atransf), sapply(c(ci.ub, beta.ci.ub), atransf)) } else { annotext <- cbind(sapply(yi, atransf), sapply(ci.lb, atransf), sapply(ci.ub, atransf)) } } else { if (addfit && x$int.only) { annotext <- cbind(sapply(c(yi, beta), atransf, targs), sapply(c(ci.lb, beta.ci.lb), atransf, targs), sapply(c(ci.ub, beta.ci.ub), atransf, targs)) } else { annotext <- cbind(sapply(yi, atransf, targs), sapply(ci.lb, atransf, targs), sapply(ci.ub, atransf, targs)) } } ### make sure order of intervals is always increasing tmp <- .psort(annotext[,2:3]) annotext[,2:3] <- tmp } else { if (addfit && x$int.only) { annotext <- cbind(c(yi, beta), c(ci.lb, beta.ci.lb), c(ci.ub, beta.ci.ub)) } else { annotext <- cbind(yi, ci.lb, ci.ub) } } if (showweights) { if (addfit && x$int.only) { annotext <- cbind(c(unname(weights),100), annotext) } else { annotext <- cbind(unname(weights), annotext) } } annotext <- .fcf(annotext, digits[[1]]) annotext <- sub("-", annosym[4], annotext, fixed=TRUE) if (missing(width)) { width <- apply(annotext, 2, function(x) max(nchar(x))) } else { if (length(width) == 1L) width <- rep(width, ncol(annotext)) } for (j in seq_len(ncol(annotext))) { annotext[,j] <- formatC(annotext[,j], width=width[j]) } if (showweights) { annotext <- cbind(annotext[,1], "% ", annotext[,2], annosym[1], annotext[,3], annosym[2], annotext[,4], annosym[3]) } else { annotext <- cbind(annotext[,1], annosym[1], annotext[,2], annosym[2], annotext[,3], annosym[3]) } annotext <- apply(annotext, 1, paste, collapse="") annotext[grepl("NA", annotext, fixed=TRUE)] <- "" par(family=names(fonts)[2], font=fonts[2]) if (addfit && x$int.only) { ltext(ddd$textpos[2], c(rows,-1), labels=annotext, pos=2, cex=cex, ...) } else { ltext(ddd$textpos[2], rows, labels=annotext, pos=2, cex=cex, ...) } par(family=names(fonts)[1], font=fonts[1]) } ### add yi points for (i in seq_len(k)) { ### need to skip missings, as if () check below will otherwise throw an error if (is.na(yi[i])) next if (yi[i] >= alim[1] && yi[i] <= alim[2]) lpoints(x=yi[i], y=rows[i], pch=pch[i], col=colout[i], cex=cex*psize[i], ...) } #lpoints(x=yi, y=rows, pch=pch, cex=cex*psize, ...) ### add horizontal line at 0 for the standard FE/RE model display if (x$int.only && addfit) labline(h=0, lty=lty[3], ...) ### add header ltext(ddd$textpos[1], ylim[2]-(top-1)+1, header.left, pos=4, font=2, cex=cex, ...) ltext(ddd$textpos[2], ylim[2]-(top-1)+1, header.right, pos=2, font=2, cex=cex, ...) ######################################################################### ### return some information about plot invisibly res <- list(xlim=par("usr")[1:2], alim=alim, at=at, ylim=ylim, rows=rows, cex=cex, cex.lab=cex.lab, cex.axis=cex.axis) invisible(res) } metafor/R/profile.rma.mv.r0000644000176200001440000004501414046734035015144 0ustar liggesusersprofile.rma.mv <- function(fitted, sigma2, tau2, rho, gamma2, phi, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(fitted), must="rma.mv") if (steps < 2) stop(mstyle$stop("Argument 'steps' must be >= 2.")) x <- fitted parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } ddd <- list(...) if (.isTRUE(ddd$time)) time.start <- proc.time() if (!is.null(ddd$startmethod)) warning(mstyle$warning("Argument 'startmethod' has been deprecated.")) ######################################################################### ### check if user has not specified one of the sigma2, tau2, rho, gamma2, or phi arguments if (missing(sigma2) && missing(tau2) && missing(rho) && missing(gamma2) && missing(phi)) { mc <- match.call() ### total number of non-fixed components comps <- ifelse(x$withS, sum(!x$vc.fix$sigma2), 0) + ifelse(x$withG, sum(!x$vc.fix$tau2) + sum(!x$vc.fix$rho), 0) + ifelse(x$withH, sum(!x$vc.fix$gamma2) + sum(!x$vc.fix$phi), 0) if (comps == 0) stop(mstyle$stop("No components in the model for which a profile likelihood can be constructed.")) if (plot) { if (dev.cur() == 1) { par(mfrow=c(comps, 1)) #on.exit(par(mfrow=c(1,1)), add=TRUE) } } sav <- list() j <- 0 if (x$withS && any(!x$vc.fix$sigma2)) { for (pos in seq_len(x$sigma2s)[!x$vc.fix$sigma2]) { j <- j + 1 mc.vc <- mc mc.vc$sigma2 <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling sigma2 =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } if (x$withG) { if (any(!x$vc.fix$tau2)) { for (pos in seq_len(x$tau2s)[!x$vc.fix$tau2]) { j <- j + 1 mc.vc <- mc mc.vc$tau2 <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling tau2 =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } if (any(!x$vc.fix$rho)) { for (pos in seq_len(x$rhos)[!x$vc.fix$rho]) { j <- j + 1 mc.vc <- mc mc.vc$rho <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling rho =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } } if (x$withH) { if (any(!x$vc.fix$gamma2)) { for (pos in seq_len(x$gamma2s)[!x$vc.fix$gamma2]) { j <- j + 1 mc.vc <- mc mc.vc$gamma2 <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling gamma2 =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } if (any(!x$vc.fix$phi)) { for (pos in seq_len(x$phis)[!x$vc.fix$phi]) { j <- j + 1 mc.vc <- mc mc.vc$phi <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling phi =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } } ### if there is just one component, turn the list of lists into a simple list if (comps == 1) sav <- sav[[1]] sav$comps <- comps if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(sav) <- "profile.rma" return(invisible(sav)) } #if (missing(sigma2) && missing(tau2) && missing(rho) && missing(gamma2) && missing(phi)) # stop(mstyle$stop("Must specify one of the arguments 'sigma2', 'tau2', 'rho', 'gamma2', or 'phi'.")) ### check if user has specified more than one of these arguments if (sum(!missing(sigma2), !missing(tau2), !missing(rho), !missing(gamma2), !missing(phi)) > 1L) stop(mstyle$stop("Must specify only one of the arguments 'sigma2', 'tau2', 'rho', 'gamma2', or 'phi'.")) ### check if model actually contains (at least one) such a component and that it was actually estimated ### note: a component that is not in the model is NA; components that are fixed are TRUE if (!missing(sigma2) && (all(is.na(x$vc.fix$sigma2)) || all(x$vc.fix$sigma2))) stop(mstyle$stop("Model does not contain any (estimated) 'sigma2' components.")) if (!missing(tau2) && (all(is.na(x$vc.fix$tau2)) || all(x$vc.fix$tau2))) stop(mstyle$stop("Model does not contain any (estimated) 'tau2' components.")) if (!missing(rho) && c(all(is.na(x$vc.fix$rho)) || all(x$vc.fix$rho))) stop(mstyle$stop("Model does not contain any (estimated) 'rho' components.")) if (!missing(gamma2) && (all(is.na(x$vc.fix$gamma2)) || all(x$vc.fix$gamma2))) stop(mstyle$stop("Model does not contain any (estimated) 'gamma2' components.")) if (!missing(phi) && c(all(is.na(x$vc.fix$phi)) || all(x$vc.fix$phi))) stop(mstyle$stop("Model does not contain any (estimated) 'phi' components.")) ### check if user specified more than one sigma2, tau2, rho, gamma2, or rho component if (!missing(sigma2) && (length(sigma2) > 1L)) stop(mstyle$stop("Can only specify one 'sigma2' component.")) if (!missing(tau2) && (length(tau2) > 1L)) stop(mstyle$stop("Can only specify one 'tau2' component.")) if (!missing(rho) && (length(rho) > 1L)) stop(mstyle$stop("Can only specify one 'rho' component.")) if (!missing(gamma2) && (length(gamma2) > 1L)) stop(mstyle$stop("Can only specify one 'gamma2' component.")) if (!missing(phi) && (length(phi) > 1L)) stop(mstyle$stop("Can only specify one 'phi' component.")) ### check if user specified a logical if (!missing(sigma2) && is.logical(sigma2)) stop(mstyle$stop("Must specify the number for the 'sigma2' component.")) if (!missing(tau2) && is.logical(tau2)) stop(mstyle$stop("Must specify the number for the 'tau2' component.")) if (!missing(rho) && is.logical(rho)) stop(mstyle$stop("Must specify the number for the 'rho' component.")) if (!missing(gamma2) && is.logical(gamma2)) stop(mstyle$stop("Must specify the number for the 'gamma2' component.")) if (!missing(phi) && is.logical(phi)) stop(mstyle$stop("Must specify the number for the 'phi' component.")) ### check if user specified a component that does not exist if (!missing(sigma2) && (sigma2 > length(x$vc.fix$sigma2) || sigma2 <= 0)) stop(mstyle$stop("No such 'sigma2' component in the model.")) if (!missing(tau2) && (tau2 > length(x$vc.fix$tau2) || tau2 <= 0)) stop(mstyle$stop("No such 'tau2' component in the model.")) if (!missing(rho) && (rho > length(x$vc.fix$rho) || rho <= 0)) stop(mstyle$stop("No such 'rho' component in the model.")) if (!missing(gamma2) && (gamma2 > length(x$vc.fix$gamma2) || gamma2 <= 0)) stop(mstyle$stop("No such 'gamma2' component in the model.")) if (!missing(phi) && (phi > length(x$vc.fix$phi) || phi <= 0)) stop(mstyle$stop("No such 'phi' component in the model.")) ### check if user specified a component that was fixed if (!missing(sigma2) && x$vc.fix$sigma2[sigma2]) stop(mstyle$stop("Specified 'sigma2' component was fixed.")) if (!missing(tau2) && x$vc.fix$tau2[tau2]) stop(mstyle$stop("Specified 'tau2' component was fixed.")) if (!missing(rho) && x$vc.fix$rho[rho]) stop(mstyle$stop("Specified 'rho' component was fixed.")) if (!missing(gamma2) && x$vc.fix$gamma2[gamma2]) stop(mstyle$stop("Specified 'gamma2' component was fixed.")) if (!missing(phi) && x$vc.fix$phi[phi]) stop(mstyle$stop("Specified 'phi' component was fixed.")) ### if everything is good so far, get value of the variance component and set 'comp' sigma2.pos <- NA tau2.pos <- NA rho.pos <- NA gamma2.pos <- NA phi.pos <- NA if (!missing(sigma2)) { vc <- x$sigma2[sigma2] comp <- "sigma2" sigma2.pos <- sigma2 } if (!missing(tau2)) { vc <- x$tau2[tau2] comp <- "tau2" tau2.pos <- tau2 } if (!missing(rho)) { vc <- x$rho[rho] comp <- "rho" rho.pos <- rho } if (!missing(gamma2)) { vc <- x$gamma2[gamma2] comp <- "gamma2" gamma2.pos <- gamma2 } if (!missing(phi)) { vc <- x$phi[phi] comp <- "phi" phi.pos <- phi } #return(list(comp=comp, vc=vc)) if (missing(xlim)) { ### if the user has not specified xlim, set it automatically ### TODO: maybe try something based on CI later if (comp == "sigma2") { vc.lb <- max( 0, vc/4) vc.ub <- max(.1, vc*4) } if (comp == "tau2") { if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { vc.lb <- max( 0, vc/2) vc.ub <- max(.1, vc*2) } else { vc.lb <- max( 0, vc/4) vc.ub <- max(.1, vc*4) } } if (comp == "gamma2") { if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { vc.lb <- max( 0, vc/2) vc.ub <- max(.1, vc*2) } else { vc.lb <- max( 0, vc/4) vc.ub <- max(.1, vc*4) } } if (comp == "rho") { if (x$struct[1] == "CAR") { vc.lb <- max(0, vc-.5) vc.ub <- min(+.99999, vc+.5) } if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) { vc.lb <- vc/2 vc.ub <- vc*2 } if (!is.element(x$struct[1], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) { vc.lb <- max(-.99999, vc-.5) vc.ub <- min(+.99999, vc+.5) } } if (comp == "phi") { if (x$struct[2] == "CAR") { vc.lb <- max(0, vc-.5) vc.ub <- min(+.99999, vc+.5) } if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) { vc.lb <- vc/2 vc.ub <- vc*2 } if (!is.element(x$struct[2], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) { vc.lb <- max(-.99999, vc-.5) vc.ub <- min(+.99999, vc+.5) } } ### if that fails, throw an error if (is.na(vc.lb) || is.na(vc.ub)) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) xlim <- c(vc.lb, vc.ub) } else { if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) xlim <- sort(xlim) if (is.element(comp, c("sigma2", "tau2", "gamma2"))) { if (xlim[1] < 0) stop(mstyle$stop("Lower bound for profiling must be >= 0.")) } if (comp == "rho") { if (is.element(x$struct[1], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")) && xlim[1] < 0) stop(mstyle$stop("Lower bound for profiling must be >= 0.")) if (xlim[1] < -1) stop(mstyle$stop("Lower bound for profiling must be >= -1.")) if (!is.element(x$struct[1], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")) && xlim[2] > 1) stop(mstyle$stop("Upper bound for profiling must be <= 1.")) } if (comp == "phi") { if (is.element(x$struct[2], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")) && xlim[1] < 0) stop(mstyle$stop("Lower bound for profiling must be >= 0.")) if (xlim[1] < -1) stop(mstyle$stop("Lower bound for profiling must be >= -1.")) if (!is.element(x$struct[2], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")) && xlim[2] > 1) stop(mstyle$stop("Upper bound for profiling must be <= 1.")) } } vcs <- seq(xlim[1], xlim[2], length.out=steps) #return(vcs) if (length(vcs) <= 1L) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) if (parallel == "no") res <- pbapply::pblapply(vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE) if (parallel == "multicore") res <- pbapply::pblapply(vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE, cl=ncpus) #res <- parallel::mclapply(vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApplyLB(cl, vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE) } else { res <- pbapply::pblapply(vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE, cl=cl) #res <- parallel::parLapply(cl, vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApply(cl, vcs, .profile.rma.mv, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, parallel=parallel, profile=TRUE) } } lls <- sapply(res, function(x) x$ll) beta <- do.call("rbind", lapply(res, function(x) t(x$beta))) ci.lb <- do.call("rbind", lapply(res, function(x) t(x$ci.lb))) ci.ub <- do.call("rbind", lapply(res, function(x) t(x$ci.ub))) ######################################################################### if (any(lls >= logLik(x) + lltol, na.rm=TRUE)) warning(mstyle$warning("At least one profiled log-likelihood value is larger than the log-likelihood of the fitted model."), call.=FALSE) if (all(is.na(lls))) warning(mstyle$warning("All model fits failed. Cannot draw profile likelihood plot."), call.=FALSE) beta <- data.frame(beta) ci.lb <- data.frame(ci.lb) ci.ub <- data.frame(ci.ub) names(beta) <- rownames(x$beta) names(ci.lb) <- rownames(x$beta) names(ci.ub) <- rownames(x$beta) if (missing(ylim)) { if (any(!is.na(lls))) { if (xlim[1] <= vc && xlim[2] >= vc) { ylim <- range(c(logLik(x),lls), na.rm=TRUE) } else { ylim <- range(lls, na.rm=TRUE) } } else { ylim <- rep(logLik(x), 2) } ylim[1] <- ylim[1] - .1 ylim[2] <- ylim[2] + .1 } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' should be a vector of length 2.")) ylim <- sort(ylim) } if (comp == "sigma2") { if (x$sigma2s == 1L) { xlab <- expression(paste(sigma^2, " Value")) title <- expression(paste("Profile Plot for ", sigma^2)) } else { xlab <- bquote(sigma[.(sigma2)]^2 ~ "Value") title <- bquote("Profile Plot for" ~ sigma[.(sigma2)]^2) } } if (comp == "tau2") { if (x$tau2s == 1L) { xlab <- expression(paste(tau^2, " Value")) title <- expression(paste("Profile Plot for ", tau^2)) } else { xlab <- bquote(tau[.(tau2)]^2 ~ "Value") title <- bquote("Profile Plot for" ~ tau[.(tau2)]^2) } } if (comp == "rho") { if (x$rhos == 1L) { xlab <- expression(paste(rho, " Value")) title <- expression(paste("Profile Plot for ", rho)) } else { xlab <- bquote(rho[.(rho)] ~ "Value") title <- bquote("Profile Plot for" ~ rho[.(rho)]) } } if (comp == "gamma2") { if (x$gamma2s == 1L) { xlab <- expression(paste(gamma^2, " Value")) title <- expression(paste("Profile Plot for ", gamma^2)) } else { xlab <- bquote(gamma[.(gamma2)]^2 ~ "Value") title <- bquote("Profile Plot for" ~ gamma[.(gamma2)]^2) } } if (comp == "phi") { if (x$phis == 1L) { xlab <- expression(paste(phi, " Value")) title <- expression(paste("Profile Plot for ", phi)) } else { xlab <- bquote(phi[.(phi)] ~ "Value") title <- bquote("Profile Plot for" ~ phi[.(phi)]) } } sav <- list(vc=vcs, ll=lls, beta=beta, ci.lb=ci.lb, ci.ub=ci.ub, comps=1, ylim=ylim, method=x$method, vc=vc, maxll=logLik(x), xlab=xlab, title=title) names(sav)[1] <- switch(comp, sigma2="sigma2", tau2="tau2", rho="rho", gamma2="gamma2", phi="phi") class(sav) <- "profile.rma" ######################################################################### if (plot) plot(sav, pch=pch, cline=cline, ...) ######################################################################### if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } invisible(sav) } metafor/R/baujat.r0000644000176200001440000000006213457322061013542 0ustar liggesusersbaujat <- function(x, ...) UseMethod("baujat") metafor/R/leave1out.rma.mh.r0000644000176200001440000001013014027633342015360 0ustar liggesusersleave1out.rma.mh <- function(x, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mh") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!x$int.only) stop(mstyle$stop("Method only applicable for models without moderators.")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) #tau2 <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next if (is.element(x$measure, c("RR","OR","RD"))) { res <- try(suppressWarnings(rma.mh(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } else { res <- try(suppressWarnings(rma.mh(x1i=x$x1i.f, x2i=x$x2i.f, t1i=x$t1i.f, t2i=x$t2i.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp #tau2[i] <- res$tau2 I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (.isTRUE(transf) && is.element(x$measure, c("OR","RR","IRR"))) ### if transf=TRUE, apply exp transformation to ORs, RRs, and IRRs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[x$not.na], se=se[x$not.na], zval=zval[x$not.na], pval=pval[x$not.na], ci.lb=ci.lb[x$not.na], ci.ub=ci.ub[x$not.na], Q=QE[x$not.na], Qp=QEp[x$not.na], I2=I2[x$not.na], H2=H2[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, I2=I2, H2=H2) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits out$transf <- transf if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "list.rma" return(out) } metafor/R/weights.rma.peto.r0000644000176200001440000000275513775620005015507 0ustar liggesusersweights.rma.peto <- function(object, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.peto") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix")) x <- object ######################################################################### n1i <- x$ai + x$bi n2i <- x$ci + x$di Ni <- x$ai + x$bi + x$ci + x$di xt <- x$ai + x$ci yt <- x$bi + x$di wi <- xt * yt * (n1i/Ni) * (n2i/Ni) / (Ni - 1) ######################################################################### if (type == "diagonal") { weight <- rep(NA_real_, x$k.f) weight[x$not.na] <- wi / sum(wi) * 100 names(weight) <- x$slab if (na.act == "na.omit") weight <- weight[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in weights.")) return(weight) } if (type == "matrix") { Wfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Wfull[x$not.na, x$not.na] <- diag(wi) rownames(Wfull) <- x$slab colnames(Wfull) <- x$slab if (na.act == "na.omit") Wfull <- Wfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Wfull) } } metafor/R/profile.rma.ls.r0000644000176200001440000002016714046734041015137 0ustar liggesusersprofile.rma.ls <- function(fitted, alpha, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(fitted), must="rma.ls") if (steps < 2) stop(mstyle$stop("Argument 'steps' must be >= 2.")) x <- fitted parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } ddd <- list(...) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### check if user has not specified alpha argument if (missing(alpha)) { mc <- match.call() ### total number of non-fixed components comps <- sum(!x$alpha.fix) if (comps == 0) stop(mstyle$stop("No components in the model for which a profile likelihood can be constructed.")) if (plot) { if (dev.cur() == 1) { par(mfrow=c(comps, 1)) #on.exit(par(mfrow=c(1,1)), add=TRUE) } } sav <- list() j <- 0 if (any(!x$alpha.fix)) { for (pos in seq_len(x$alphas)[!x$alpha.fix]) { j <- j + 1 mc.vc <- mc mc.vc$alpha <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling alpha =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } ### if there is just one component, turn the list of lists into a simple list if (comps == 1) sav <- sav[[1]] sav$comps <- comps if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(sav) <- "profile.rma" return(invisible(sav)) } ### check if model actually contains (at least one) such a component and that it was actually estimated if (!missing(alpha) && all(x$alpha.fix)) stop(mstyle$stop("Model does not contain any estimated 'alpha' components.")) ### check if user specified more than one alpha component if (!missing(alpha) && (length(alpha) > 1L)) stop(mstyle$stop("Can only specify one 'alpha' component.")) ### check if user specified a logical if (!missing(alpha) && is.logical(alpha)) stop(mstyle$stop("Must specify the number for the 'alpha' component.")) ### check if user specified a component that does not exist if (!missing(alpha) && (alpha > x$alphas || alpha <= 0)) stop(mstyle$stop("No such 'alpha' component in the model.")) ### check if user specified a component that was fixed if (!missing(alpha) && x$alpha.fix[alpha]) stop(mstyle$stop("Specified 'alpha' component was fixed.")) ### if everything is good so far, get value of the component and set 'comp' alpha.pos <- NA if (!missing(alpha)) { vc <- x$alpha[alpha] comp <- "alpha" alpha.pos <- alpha } #return(list(comp=comp, vc=vc)) if (missing(xlim)) { ### if the user has not specified xlim, set it automatically if (comp == "alpha") { if (is.na(x$se.alpha[alpha])) { vc.lb <- vc - 4 * abs(vc) vc.ub <- vc + 4 * abs(vc) } else { vc.lb <- vc - qnorm(.995) * x$se.alpha[alpha] vc.ub <- vc + qnorm(.995) * x$se.alpha[alpha] } } ### if that fails, throw an error if (is.na(vc.lb) || is.na(vc.ub)) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) xlim <- sort(c(vc.lb, vc.ub)) } else { if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) xlim <- sort(xlim) } vcs <- seq(xlim[1], xlim[2], length.out=steps) if (length(vcs) <= 1L) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) if (parallel == "no") res <- pbapply::pblapply(vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE) if (parallel == "multicore") res <- pbapply::pblapply(vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE, cl=ncpus) #res <- parallel::mclapply(vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApplyLB(cl, vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE) } else { res <- pbapply::pblapply(vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE, cl=cl) #res <- parallel::parLapply(cl, vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApply(cl, vcs, .profile.rma.ls, obj=x, comp=comp, alpha.pos=alpha.pos, parallel=parallel, profile=TRUE) } } lls <- sapply(res, function(x) x$ll) beta <- do.call("rbind", lapply(res, function(x) t(x$beta))) ci.lb <- do.call("rbind", lapply(res, function(x) t(x$ci.lb))) ci.ub <- do.call("rbind", lapply(res, function(x) t(x$ci.ub))) ######################################################################### if (any(lls >= logLik(x) + lltol, na.rm=TRUE)) warning(mstyle$warning("At least one profiled log-likelihood value is larger than the log-likelihood of the fitted model."), call.=FALSE) if (all(is.na(lls))) warning(mstyle$warning("All model fits failed. Cannot draw profile likelihood plot."), call.=FALSE) beta <- data.frame(beta) ci.lb <- data.frame(ci.lb) ci.ub <- data.frame(ci.ub) names(beta) <- rownames(x$beta) names(ci.lb) <- rownames(x$beta) names(ci.ub) <- rownames(x$beta) if (missing(ylim)) { if (any(!is.na(lls))) { if (xlim[1] <= vc && xlim[2] >= vc) { ylim <- range(c(logLik(x),lls), na.rm=TRUE) } else { ylim <- range(lls, na.rm=TRUE) } } else { ylim <- rep(logLik(x), 2) } ylim[1] <- ylim[1] - .1 ylim[2] <- ylim[2] + .1 } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' should be a vector of length 2.")) ylim <- sort(ylim) } if (comp == "alpha") { if (x$alphas == 1L) { xlab <- expression(paste(alpha, " Value")) title <- expression(paste("Profile Plot for ", alpha)) } else { if (.isTRUE(ddd$sub1)) alpha <- alpha - 1 xlab <- bquote(alpha[.(alpha)] ~ "Value") title <- bquote("Profile Plot for" ~ alpha[.(alpha)]) } } sav <- list(vc=vcs, ll=lls, beta=beta, ci.lb=ci.lb, ci.ub=ci.ub, comps=1, ylim=ylim, method=x$method, vc=vc, maxll=logLik(x), xlab=xlab, title=title) names(sav)[1] <- "alpha" class(sav) <- "profile.rma" ######################################################################### if (plot) plot(sav, pch=pch, cline=cline, ...) ######################################################################### if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } invisible(sav) } metafor/R/labbe.r0000644000176200001440000000006013457322061013337 0ustar liggesuserslabbe <- function(x, ...) UseMethod("labbe") metafor/R/robust.rma.mv.r0000644000176200001440000001266214036345354015026 0ustar liggesusersrobust.rma.mv <- function(x, cluster, adjust=TRUE, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mv") if (missing(cluster)) stop(mstyle$stop("Must specify 'cluster' variable.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } level <- ifelse(x$level == 0, 1, ifelse(x$level >= 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))) ######################################################################### ### process cluster variable ### note: cluster variable is assumed to be of the same length as the original data passed to the model fitting function ### so we have to apply the same subsetting (if necessary) and removing of missings as done during model fitting if (length(cluster) != x$k.all) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster <- cluster[x$not.na] if (anyNA(cluster)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) ### number of clusters n <- length(unique(cluster)) ### compute degrees of freedom ### note: Stata with vce(robust) also uses n-p as the dfs, but with vce(cluster ) always uses n-1 (which seems inconsistent) dfs <- n - x$p ### check if dfs are positive (note: this also handles the case where there is a single cluster) if (dfs <= 0) stop(mstyle$stop(paste0("Number of clusters (", n, ") must be larger than the number of fixed effects (", x$p, ")."))) ### note: since we use split() below and then put things back together into a block-diagonal matrix, ### we have to make sure everything is properly ordered by the cluster variable; otherwise, the 'meat' ### block-diagonal matrix is not in the same order as the rest; so we sort all relevant variables by ### the cluster variable (including the cluster variable itself) ocl <- order(cluster) cluster <- cluster[ocl] ### construct bread = (X'WX)^-1 X'W, where W is the weight matrix if (is.null(x$W)) { ### if no weights were specified, then vb = (X'WX)^-1, so we can use that part W <- try(chol2inv(chol(x$M[ocl,ocl])), silent=TRUE) if (inherits(W, "try-error")) stop(mstyle$stop("Cannot invert marginal var-cov matrix.")) bread <- x$vb %*% crossprod(x$X[ocl,], W) } else { ### if weights were specified, then vb cannot be used A <- x$W[ocl,ocl] stXAX <- chol2inv(chol(as.matrix(t(x$X[ocl,]) %*% A %*% x$X[ocl,]))) ### as.matrix() to avoid some issues with the matrix being not symmetric (when it must be) bread <- stXAX %*% crossprod(x$X[ocl,], A) } ### construct meat part ei <- c(x$yi - x$X %*% x$beta) ### use this instead of resid(), since this guarantees that the length is correct ei <- ei[ocl] cluster <- factor(cluster, levels=unique(cluster)) if (x$sparse) { meat <- bdiag(lapply(split(ei, cluster), function(e) tcrossprod(e))) } else { meat <- bldiag(lapply(split(ei, cluster), function(e) tcrossprod(e))) } ### construct robust var-cov matrix vb <- bread %*% meat %*% t(bread) ### apply adjustments to vb as needed ### suggested in Hedges, Tipton, & Johnson (2010) -- analogous to HC1 adjustment if (.isTRUE(adjust)) vb <- (n / dfs) * vb ### what Stata does if (is.character(adjust) && (adjust=="Stata" || adjust=="Stata1")) vb <- (n / (n-1) * (x$k-1) / (x$k-x$p)) * vb ### when the model was fitted with regress if (is.character(adjust) && adjust=="Stata2") vb <- (n / (n-1)) * vb ### when the model was fitted with mixed ### dim(vb) is pxp and not sparse, so this won't blow up ### as.matrix() helps to avoid some issues with 'vb' appearing as non-symmetric (when it must be) if (x$sparse) vb <- as.matrix(vb) ### prepare results beta <- x$beta se <- sqrt(diag(vb)) names(se) <- NULL zval <- c(beta/se) pval <- 2*pt(abs(zval), df=dfs, lower.tail=FALSE) crit <- qt(level/2, df=dfs, lower.tail=FALSE) ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) QM <- try(as.vector(t(beta)[x$btt] %*% chol2inv(chol(vb[x$btt,x$btt])) %*% beta[x$btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA QM <- QM / x$m ### note: m is the number of coefficients in btt, not the number of clusters QMdf <- c(x$m, dfs) QMp <- pf(QM, df1=x$m, df2=dfs, lower.tail=FALSE) ######################################################################### ### table of cluster variable tcl <- table(cluster) res <- x res$digits <- digits ### replace elements with robust results res$ddf <- dfs res$dfs <- dfs res$vb <- vb res$se <- se res$zval <- zval res$pval <- pval res$ci.lb <- ci.lb res$ci.ub <- ci.ub res$QM <- QM res$QMdf <- QMdf res$QMp <- QMp res$n <- n res$tcl <- tcl res$test <- "t" res$meat <- matrix(NA_real_, nrow=nrow(meat), ncol=ncol(meat)) res$meat[ocl,ocl] <- as.matrix(meat) class(res) <- c("robust.rma", "rma", "rma.mv") return(res) } metafor/R/hc.rma.uni.r0000644000176200001440000001212413770364235014247 0ustar liggesusershc.rma.uni <- function(object, digits, transf, targs, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.uni", notav=c("rma.ls", "rma.uni.selmodel")) x <- object if (!x$int.only) stop(mstyle$stop("Method only applicable for models without moderators.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL yi <- x$yi vi <- x$vi k <- length(yi) if (k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (!x$allvipos) stop(mstyle$stop("Cannot use method when one or more sampling variances are non-positive.")) level <- ifelse(x$level == 0, 1, ifelse(x$level >= 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))) if (missing(control)) control <- list() ######################################################################### ### set control parameters for uniroot() and possibly replace with user-defined values con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE) con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] ######################################################################### ### original code by Henmi & Copas (2012), modified by Michael Dewey, small adjustments ### for consistency with other functions in the metafor package by Wolfgang Viechtbauer wi <- 1/vi ### fixed effects weights W1 <- sum(wi) W2 <- sum(wi^2) / W1 W3 <- sum(wi^3) / W1 W4 <- sum(wi^4) / W1 ### fixed-effects estimate of theta beta <- sum(wi*yi) / W1 ### Q statistic Q <- sum(wi * ((yi - beta)^2)) ### DL estimate of tau^2 tau2 <- max(0, (Q - (k-1)) / (W1 - W2)) vb <- (tau2 * W2 + 1) / W1 ### estimated Var of b se <- sqrt(vb) ### estimated SE of b VR <- 1 + tau2 * W2 ### estimated Var of R SDR <- sqrt(VR) ### estimated SD of R ### conditional mean of Q given R=r EQ <- function(r) (k - 1) + tau2 * (W1 - W2) + (tau2^2)*((1/VR^2) * (r^2) - 1/VR) * (W3 - W2^2) ### conditional variance of Q given R=r VQ <- function(r) { rsq <- r^2 recipvr2 <- 1 / VR^2 2 * (k - 1) + 4 * tau2 * (W1 - W2) + 2 * tau2^2 * (W1*W2 - 2*W3 + W2^2) + 4 * tau2^2 * (recipvr2 * rsq - 1/VR) * (W3 - W2^2) + 4 * tau2^3 * (recipvr2 * rsq - 1/VR) * (W4 - 2*W2*W3 + W2^3) + 2 * tau2^4 * (recipvr2 - 2 * (1/VR^3) * rsq) * (W3 - W2^2)^2 } scale <- function(r){VQ(r)/EQ(r)} ### scale parameter of the gamma distribution shape <- function(r){EQ(r)^2/VQ(r)} ### shape parameter of the gamma distribution ### inverse of f finv <- function(f) (W1/W2 - 1) * ((f^2) - 1) + (k - 1) ### equation to be solved eqn <- function(x) { integrand <- function(r) { pgamma(finv(r/x), scale=scale(SDR*r), shape=shape(SDR*r))*dnorm(r) } integral <- integrate(integrand, lower=x, upper=Inf)$value val <- integral - level / 2 #cat(val, "\n") val } t0 <- try(uniroot(eqn, lower=0, upper=2, tol=con$tol, maxiter=con$maxiter)) if (inherits(t0, "try-error")) stop(mstyle$stop("Error in uniroot().")) t0 <- t0$root u0 <- SDR * t0 ### (approximate) percentage point for the distribution of U ######################################################################### ci.lb <- beta - u0 * se ### lower CI bound ci.ub <- beta + u0 * se ### upper CI bound beta.rma <- x$beta se.rma <- x$se ci.lb.rma <- x$ci.lb ci.ub.rma <- x$ci.ub ### if requested, apply transformation to yi's and CI bounds if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) beta.rma <- sapply(beta.rma, transf) se <- NA se.rma <- NA ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) ci.lb.rma <- sapply(ci.lb.rma, transf) ci.ub.rma <- sapply(ci.ub.rma, transf) } else { beta <- sapply(beta, transf, targs) beta.rma <- sapply(beta.rma, transf, targs) se <- NA se.rma <- NA ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) ci.lb.rma <- sapply(ci.lb.rma, transf, targs) ci.ub.rma <- sapply(ci.ub.rma, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(ci.lb.rma, ci.ub.rma) ci.lb.rma <- tmp[,1] ci.ub.rma <- tmp[,2] ######################################################################### res <- list(beta=beta, se=se, ci.lb=ci.lb, ci.ub=ci.ub, beta.rma=beta.rma, se.rma=se.rma, ci.lb.rma=ci.lb.rma, ci.ub.rma=ci.ub.rma, method="DL", method.rma=x$method, tau2=tau2, tau2.rma=x$tau2, digits=digits) class(res) <- "hc.rma.uni" return(res) } metafor/R/print.confint.rma.r0000644000176200001440000000232113770373461015655 0ustar liggesusersprint.confint.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="confint.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") if (names(x)[1] == "fixed") { res.fixed <- cbind(.fcf(x$fixed[,1,drop=FALSE], digits[["est"]]), .fcf(x$fixed[,2:3,drop=FALSE], digits[["ci"]])) tmp <- capture.output(print(res.fixed, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) } if (is.element("random", names(x))) { if (names(x)[1] == "fixed") cat("\n") res.random <- .fcf(x$random, digits[["var"]]) res.random[,2] <- paste0(x$lb.sign, res.random[,2]) res.random[,3] <- paste0(x$ub.sign, res.random[,3]) tmp <- capture.output(print(res.random, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) ### this can only (currently) happen for 'rma.uni' models if (x$ci.null) message(mstyle$message(paste0("\nThe upper and lower CI bounds for tau^2 both fall below ", x$tau2.min, ".\nThe CIs are therefore equal to the null/empty set."))) } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/aggregate.escalc.r0000644000176200001440000002643114031024533015453 0ustar liggesusersaggregate.escalc <- function(x, cluster, time, V, struct="CS", rho, phi, weighted=TRUE, fun, na.rm=TRUE, subset, select, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="escalc") if (any(!is.element(struct, c("ID","CS","CAR","CS+CAR")))) stop(mstyle$stop("Unknown 'struct' specified.")) if (length(na.rm) == 1L) na.rm <- c(na.rm, na.rm) k <- nrow(x) ######################################################################### ### extract V, cluster, and subset variables mf <- match.call() mf.V <- mf[[match("V", names(mf))]] mf.cluster <- mf[[match("cluster", names(mf))]] mf.time <- mf[[match("time", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] V <- eval(mf.V, x, enclos=sys.frame(sys.parent())) # NULL if user does not specify this cluster <- eval(mf.cluster, x, enclos=sys.frame(sys.parent())) # NULL if user does not specify this time <- eval(mf.time, x, enclos=sys.frame(sys.parent())) # NULL if user does not specify this subset <- eval(mf.subset, x, enclos=sys.frame(sys.parent())) # NULL if user does not specify this ######################################################################### ### checks on cluster variable if (is.null(cluster)) stop(mstyle$stop("Must specify 'cluster' variable.")) if (anyNA(cluster)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster) != k) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not match length of data (", k, ")."))) ucluster <- unique(cluster) n <- length(ucluster) ######################################################################### ### get vi variable if (!is.null(attr(x, "vi.names"))) { # if vi.names attributes is available vi.name <- attr(x, "vi.names")[1] # take the first entry to be the vi variable } else { # if not, see if 'vi' is in the object and assume that is the vi variable if (!is.element("vi", names(x))) stop(mstyle$stop("Cannot determine name of the 'vi' variable.")) vi.name <- "vi" } if (is.null(x[[vi.name]])) stop(mstyle$stop("Cannot find 'vi' variable in data frame.")) ######################################################################### if (is.null(V)) { ### if V is not specified vi <- x[[vi.name]] ### construct V matrix based on the specified structure if (struct=="ID") R <- diag(1, nrow=k, ncol=k) if (is.element(struct, c("CS","CS+CAR"))) { if (missing(rho)) stop(mstyle$stop("Must specify 'rho' for this var-cov structure.")) if (length(rho) == 1L) rho <- rep(rho, n) if (length(rho) != n) stop(mstyle$stop(paste0("Length of 'rho' (", length(rho), ") does not match the number of clusters (", n, ")."))) if (any(rho > 1) || any(rho < -1)) stop(mstyle$stop("Value(s) of 'rho' must be in [-1,1].")) } if (is.element(struct, c("CAR","CS+CAR"))) { if (missing(phi)) stop(mstyle$stop("Must specify 'phi' for this var-cov structure.")) if (length(phi) == 1L) phi <- rep(phi, n) if (length(phi) != n) stop(mstyle$stop(paste0("Length of 'phi' (", length(phi), ") does not match the number of clusters (", n, ")."))) if (any(phi > 1) || any(phi < 0)) stop(mstyle$stop("Value(s) of 'phi' must be in [0,1].")) ### checks on time variable if (is.null(time)) stop(mstyle$stop("Must specify 'time' variable for this var-cov structure.")) if (length(time) != k) stop(mstyle$stop(paste0("Length of variable specified via 'time' (", length(time), ") does not match length of data (", k, ")."))) } if (struct=="CS") { R <- matrix(0, nrow=k, ncol=k) for (i in 1:n) { R[cluster == ucluster[i], cluster == ucluster[i]] <- rho[i] } } if (struct == "CAR") { R <- matrix(0, nrow=k, ncol=k) for (i in 1:n) { R[cluster == ucluster[i], cluster == ucluster[i]] <- outer(time[cluster == ucluster[i]], time[cluster == ucluster[i]], function(x,y) phi[i]^(abs(x-y))) } } if (struct == "CS+CAR") { R <- matrix(0, nrow=k, ncol=k) for (i in 1:n) { R[cluster == ucluster[i], cluster == ucluster[i]] <- rho[i] + (1 - rho[i]) * outer(time[cluster == ucluster[i]], time[cluster == ucluster[i]], function(x,y) phi[i]^(abs(x-y))) } } diag(R) <- 1 S <- diag(sqrt(as.vector(vi)), nrow=k, ncol=k) V <- S %*% R %*% S } else { ### if V is specified if (.is.vector(V)) { if (length(V) == 1L) V <- rep(V, k) if (length(V) != k) stop(mstyle$stop(paste0("Length of 'V' (", length(V), ") does not match length of data frame (", k, ")."))) V <- diag(as.vector(V), nrow=k, ncol=k) } if (is.data.frame(V)) V <- as.matrix(V) if (!is.null(dimnames(V))) V <- unname(V) if (!.is.square(V)) stop(mstyle$stop("'V' must be a square matrix.")) if (!isSymmetric(V)) stop(mstyle$stop("'V' must be a symmetric matrix.")) if (nrow(V) != k) stop(mstyle$stop(paste0("Dimensions of 'V' (", nrow(V), "x", ncol(V), ") do not match length of data frame (", k, ")."))) ### check that covariances are really 0 for estimates belonging to different clusters ### note: if na.rm[1] is FALSE, there may be missings in V, so skip check in those clusters for (i in 1:n) { if (any(abs(V[cluster == ucluster[i], cluster != ucluster[i]]) >= .Machine$double.eps, na.rm=TRUE)) warning(mstyle$warning(paste0("Estimates in cluster '", ucluster[i], "' appear to have non-zero covariances with estimates belonging to a different cluster.")), call.=FALSE) } } if (!is.null(attr(x, "yi.names"))) { # if yi.names attributes is available yi.name <- attr(x, "yi.names")[1] # take the first entry to be the yi variable } else { # if not, see if 'yi' is in the object and assume that is the yi variable if (!is.element("yi", names(x))) stop(mstyle$stop("Cannot determine name of the 'yi' variable.")) yi.name <- "yi" } if (is.null(x[[yi.name]])) stop(mstyle$stop("Cannot find 'yi' variable in data frame.")) ### note: there may be multiple yi/vi pairs; only first will be used yi <- as.vector(x[[yi.name]]) ### if 'subset' is not null, apply subset if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) x <- x[subset,,drop=FALSE] yi <- yi[subset] V <- V[subset,subset,drop=FALSE] cluster <- cluster[subset] k <- nrow(x) ucluster <- unique(cluster) n <- length(ucluster) if (k == 0L) stop(mstyle$stop("Processing terminated since k == 0.")) } ### remove missings in yi/vi/V if na.rm[1] is TRUE if (na.rm[1]) { has.na <- is.na(yi) | .anyNAv(V) not.na <- !has.na if (any(has.na)) { x <- x[not.na,] yi <- yi[not.na] V <- V[not.na,not.na,drop=FALSE] cluster <- cluster[not.na] } k <- nrow(x) ucluster <- unique(cluster) n <- length(ucluster) if (k == 0L) stop(mstyle$stop("Processing terminated since k == 0.")) } ### check that 'V' is positive definite (in each cluster) all.pd <- TRUE for (i in 1:n) { Vi <- V[cluster == ucluster[i], cluster == ucluster[i]] if (!anyNA(Vi) && any(eigen(Vi, symmetric=TRUE, only.values=TRUE)$values <= .Machine$double.eps)) { all.pd <- FALSE warning(mstyle$warning(paste0("'V' appears to be not positive definite in cluster ", ucluster[i], ".")), call.=FALSE) } } if (!all.pd) stop(mstyle$stop("Cannot aggregate estimates with a non-positive-definite 'V' matrix.")) ### compute aggregated estimates and corresponding sampling variances yi.agg <- rep(NA_real_, n) vi.agg <- rep(NA_real_, n) for (i in 1:n) { Vi <- V[cluster == ucluster[i], cluster == ucluster[i]] if (weighted) { Wi <- try(chol2inv(chol(Vi)), silent=FALSE) if (inherits(Wi, "try-error")) stop(mstyle$stop(paste0("Cannot take inverse of 'V' in cluster ", ucluster[i], "."))) sumWi <- sum(Wi) yi.agg[i] <- sum(Wi %*% cbind(yi[cluster == ucluster[i]])) / sumWi vi.agg[i] <- 1 / sumWi } else { ki <- sum(cluster == ucluster[i]) yi.agg[i] <- sum(yi[cluster == ucluster[i]]) / ki vi.agg[i] <- sum(Vi) / ki^2 } } if (!missing(fun)) { if (!is.list(fun) || length(fun) != 3 || any(sapply(fun, function(f) !is.function(f)))) stop(mstyle$stop("Argument 'fun' must be a list of functions of length 3.")) fun1 <- fun[[1]] fun2 <- fun[[2]] fun3 <- fun[[3]] } else { fun1 <- function(x) { m <- mean(x, na.rm=na.rm[2]) if (is.nan(m)) NA else m } fun2 <- fun1 fun3 <- function(x) { if (na.rm[2]) { tab <- table(na.omit(x)) #tab <- table(x, useNA=ifelse(na.rm[2], "no", "ifany")) } else { tab <- table(x, useNA="ifany") } val <- tail(names(sort(tab)), 1) if (is.null(val)) NA else val } } ### turn 'cluster' into a factor with the desired levels, such that split() will give the same order fcluster <- factor(cluster, levels=ucluster) xsplit <- split(x, fcluster) xagg <- lapply(xsplit, function(xi) { tmp <- lapply(xi, function(xij) { if (inherits(xij, c("numeric","integer"))) { fun1(xij) } else if (inherits(xij, c("logical"))) { fun2(xij) } else { fun3(xij) } }) as.data.frame(tmp) }) xagg <- do.call(rbind, xagg) ### turn variables that were factors back into factors facs <- sapply(x, is.factor) if (any(facs)) { for (j in which(facs)) { xagg[[j]] <- factor(xagg[[j]]) } } ### put yi.agg and vi.agg into the aggregate data at their respective positions xagg[which(names(xagg) == yi.name)] <- yi.agg xagg[which(names(xagg) == vi.name)] <- vi.agg ### add back some attributes measure <- attr(x[[yi.name]], "measure") if (is.null(measure)) measure <- "GEN" attr(xagg[[yi.name]], "measure") <- measure attr(xagg, "yi.names") <- yi.name attr(xagg, "vi.names") <- vi.name if (!missing(digits)) { attr(xagg, "digits") <- .get.digits(digits=digits, xdigits=attr(x, "digits"), dmiss=FALSE) } else { attr(xagg, "digits") <- attr(x, "digits") } if (is.null(attr(xagg, "digits"))) ### in case x no longer has a 'digits' attribute attr(xagg, "digits") <- 4 class(xagg) <- c("escalc", "data.frame") ### if 'select' is not missing, select variables to include in the output if (!missing(select)) { nl <- as.list(seq_along(x)) names(nl) <- names(x) sel <- eval(substitute(select), nl, parent.frame()) xagg <- xagg[,sel,drop=FALSE] } rownames(xagg) <- NULL return(xagg) } metafor/R/rma.mv.r0000644000176200001440000026076014055716524013517 0ustar liggesusers### fixed/random/mixed-effects multivariate/multilevel model with: ### - possibly one or multiple random intercepts (sigma2) with potentially known correlation matrices ### - possibly correlated random effects for arms/groups/levels within studies (tau2 and rho for 1st term, gamma2 and phi for 2nd term) ### model also allows for correlated sampling errors via non-diagonal V matrix # V = variance-covariance matrix of the sampling errors # sigma2 = (preset) value(s) for the variance of the random intercept(s) # tau2 = (preset) value(s) for the variance of the random effects # rho = (preset) value(s) for the correlation(s) between random effects # gamma2 = (preset) value(s) for the variance of the random effects # phi = (preset) value(s) for the correlation(s) between random effects ### structures when there is an '~ inner | outer' term in the random argument: ### - CS (compound symmetry) ### - HCS (heteroscedastic compound symmetry) ### - UN (general positive-definite matrix with no structure) ### - UNR (general positive-definite correlation matrix with a single tau2/gamma2 value) ### - AR (AR1 structure with a single tau2/gamma2 value and autocorrelation rho/phi) ### - HAR (heteroscedastic AR1 structure with multiple tau2/gamma2 values and autocorrelation rho/phi) ### - CAR (continuous time AR1 structure) ### - ID (same as CS but with rho/phi=0) ### - DIAG (same as HCS but with rho/phi=0) ### - SPEXP/SPGAU/SPLIN/SPRAT/SPSPH (spatial structures: exponential, gaussian, linear, rational quadratic, spherical) ### - PHYBM/PHYPL/PHYPD (phylogenetic structures: Brownian motion, Pagel's lambda, Pagel's delta) rma.mv <- function(yi, V, W, mods, random, struct="CS", intercept=TRUE, data, slab, subset, ### add ni as argument in the future method="REML", test="z", dfs="residual", level=95, digits, btt, R, Rscale="cor", sigma2, tau2, rho, gamma2, phi, sparse=FALSE, verbose=FALSE, control, ...) { ######################################################################### ###### setup ### check argument specifications mstyle <- .get.mstyle("crayon" %in% .packages()) if (!is.element(method, c("FE","EE","CE","ML","REML"))) stop(mstyle$stop("Unknown 'method' specified.")) if (any(!is.element(struct, c("CS","HCS","UN","AR","HAR","CAR","ID","DIAG","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","GEN","GDIAG")))) # "UNR", "PHYBM","PHYPL","PHYPD")))) stop(mstyle$stop("Unknown 'struct' specified.")) if (length(struct) == 1L) struct <- c(struct, struct) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(random)) random <- NULL if (missing(R)) R <- NULL if (missing(sigma2)) sigma2 <- NULL if (missing(tau2)) tau2 <- NULL if (missing(rho)) rho <- NULL if (missing(gamma2)) gamma2 <- NULL if (missing(phi)) phi <- NULL if (missing(control)) control <- list() ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } time.start <- proc.time() ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("tdist", "outlist", "time", "dist")) ### handle 'tdist' argument from ... (note: overrides test argument) if (.isFALSE(ddd$tdist)) test <- "z" if (.isTRUE(ddd$tdist)) test <- "t" if (!is.element(test, c("z", "t"))) stop(mstyle$stop("Invalid option selected for 'test' argument.")) if (is.character(dfs)) dfs <- match.arg(dfs, c("residual", "contain")) ### handle Rscale argument (either character, logical, or integer) if (is.character(Rscale)) Rscale <- match.arg(Rscale, c("none", "cor", "cor0", "cov0")) if (is.logical(Rscale)) Rscale <- ifelse(Rscale, "cor", "none") if (is.numeric(Rscale)) { Rscale <- round(Rscale) if (Rscale > 3 | Rscale < 0) stop(mstyle$stop("Unknown 'Rscale' value specified.")) Rscale <- switch(as.character(Rscale), "0"="none", "1"="cor", "2"="cor0", "3"="cov0") } ### handle 'dist' argument from ... if (!is.null(ddd$dist)) { if (length(ddd$dist) == 1L) ddd$dist <- c(ddd$dist, ddd$dist) if (!is.list(ddd$dist)) ddd$dist <- as.list(ddd$dist) dist.methods <- c("euclidean", "maximum", "manhattan", "gcd") for (j in 1:2) { if (!is.function(ddd$dist[[j]]) && !is.matrix(ddd$dist[[j]])) { ddd$dist[[j]] <- charmatch(ddd$dist[[j]], dist.methods, nomatch = 0) if (ddd$dist[[j]] == 0) { stop(mstyle$stop("Argument 'dist' must be one of 'euclidean', 'maximum', 'manhattan', or 'gcd'.")) } else { ddd$dist[[j]] <- dist.methods[ddd$dist[[j]]] } } } if (any(ddd$dist == "gcd")) { if (!requireNamespace("sp", quietly=TRUE)) stop(mstyle$stop("Please install the 'sp' package to compute great-circle distances.")) } } else { ddd$dist <- list("euclidean", "euclidean") } ### set defaults for formulas formula.yi <- NULL formula.mods <- NULL ### set options(warn=1) if verbose > 2 if (verbose > 2) { opwarn <- options(warn=1) on.exit(options(warn=opwarn$warn), add=TRUE) } ######################################################################### if (verbose > 1) message(mstyle$message("\nExtracting yi/V values ...")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### extract yi, V, W, ni, slab, subset, and mods values, possibly from the data frame specified via data (arguments not specified are NULL) mf.yi <- mf[[match("yi", names(mf))]] mf.V <- mf[[match("V", names(mf))]] mf.W <- mf[[match("W", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ### not yet possible to specify this mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] mf.mods <- mf[[match("mods", names(mf))]] if (is.null(mf.yi)) yi <- NULL if (is.null(mf.V)) V <- NULL if (is.null(mf.W)) W <- NULL if (is.null(mf.ni)) ni <- NULL if (is.null(mf.slab)) slab <- NULL if (is.null(mf.subset)) subset <- NULL if (is.null(mf.mods)) mods <- NULL if (!is.null(mf.yi) && !any(grepl("$", mf.yi, fixed=TRUE))) yi <- eval(mf.yi, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.V) && !any(grepl("$", mf.V, fixed=TRUE))) V <- eval(mf.V, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.W) && !any(grepl("$", mf.W, fixed=TRUE))) W <- eval(mf.W, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.ni) && !any(grepl("$", mf.ni, fixed=TRUE))) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.slab) && !any(grepl("$", mf.slab, fixed=TRUE))) slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.subset) && !any(grepl("$", mf.subset, fixed=TRUE))) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this if (!is.null(mf.mods) && !any(grepl("$", mf.mods, fixed=TRUE))) mods <- eval(mf.mods, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this ### if yi is a formula, extract yi and X (this overrides anything specified via the mods argument further below) if (inherits(yi, "formula")) { formula.yi <- yi options(na.action = "na.pass") ### set na.action to na.pass, so that NAs are not filtered out (we'll do that later) mods <- model.matrix(yi, data=data) ### extract model matrix (now mods is no longer a formula, so [a] further below is skipped) attr(mods, "assign") <- NULL ### strip assign attribute (not needed at the moment) attr(mods, "contrasts") <- NULL ### strip contrasts attribute (not needed at the moment) yi <- model.response(model.frame(yi, data=data)) ### extract yi values from model frame options(na.action = na.act) ### set na.action back to na.act names(yi) <- NULL ### strip names (1:k) from yi (so res$yi is the same whether yi is a formula or not) intercept <- FALSE ### set to FALSE since formula now controls whether the intercept is included or not } ### note: code further below ([b]) actually checks whether intercept is included or not ### in case user passed a matrix to yi, convert it to a vector if (is.matrix(yi)) yi <- as.vector(yi) ### number of outcomes before subsetting k <- length(yi) k.all <- k ### set default measure argument measure <- "GEN" if (!is.null(attr(yi, "measure"))) ### take 'measure' from yi (if it is there) measure <- attr(yi, "measure") ### add measure attribute (back) to the yi vector attr(yi, "measure") <- measure ### some checks on V (and turn V into a diagonal matrix if it is a column/row vector) if (is.null(V)) stop(mstyle$stop("Must specify 'V' argument.")) if (is.list(V) && !is.data.frame(V)) { ### list elements may be data frames (or scalars), so coerce to matrices V <- lapply(V, as.matrix) ### check that all elements are square if (any(!sapply(V, .is.square))) stop(mstyle$stop("All list elements in 'V' must be square matrices.")) ### turn list into block-diagonal (sparse) matrix if (sparse) { V <- bdiag(V) } else { V <- bldiag(V) } } ### check if user constrained V to 0 (can skip a lot of the steps below then) if ((.is.vector(V) && length(V) == 1L && V == 0) || (.is.vector(V) && length(V) == k && !anyNA(V) && all(V == 0))) { V0 <- TRUE } else { V0 <- FALSE } ### turn V into a diagonal matrix if it is a column/row vector ### note: if V is a scalar (e.g., V=0), then this will turn V into a kxk ### matrix with the value of V along the diagonal if (V0 || .is.vector(V) || nrow(V) == 1L || ncol(V) == 1L) { if (sparse) { V <- Diagonal(k, as.vector(V)) } else { V <- diag(as.vector(V), nrow=k, ncol=k) } } ### turn V into a matrix if it is a data frame if (is.data.frame(V)) V <- as.matrix(V) ### remove row and column names (important for isSymmetric() function) ### (but only do this if V has row/column names to avoid making an unnecessary copy) if (!is.null(dimnames(V))) V <- unname(V) ### check whether V is square and symmetric (can skip when V0) if (!V0 && !.is.square(V)) stop(mstyle$stop("'V' must be a square matrix.")) if (!V0 && !isSymmetric(V)) ### note: copy of V is made when doing this stop(mstyle$stop("'V' must be a symmetric matrix.")) ### check length of yi and V if (nrow(V) != k) stop(mstyle$stop(paste0("Length of 'yi' (", k, ") and length/dimensions of 'V' (", nrow(V), ") is not the same."))) ### force V to be sparse when sparse=TRUE (and V is not yet sparse) if (sparse && inherits(V, "matrix")) V <- Matrix(V, sparse=TRUE) ### process W if it was specified if (!is.null(W)) { ### turn W into a diagonal matrix if it is a column/row vector ### in general, turn W into A (arbitrary weight matrix) if (.is.vector(W) || nrow(W) == 1L || ncol(W) == 1L) { W <- as.vector(W) ### allow easy setting of W to a single value if (length(W) == 1L) W <- rep(W, k) A <- diag(W, nrow=length(W), ncol=length(W)) } else { A <- W } if (is.data.frame(A)) A <- as.matrix(A) ### remove row and column names (important for isSymmetric() function) ### (but only do this if A has row/column names to avoid making an unnecessary copy) if (!is.null(dimnames(A))) A <- unname(A) ### check whether A is square and symmetric if (!.is.square(A)) stop(mstyle$stop("'W' must be a square matrix.")) if (!isSymmetric(A)) stop(mstyle$stop("'W' must be a symmetric matrix.")) ### check length of yi and A if (nrow(A) != k) stop(mstyle$stop(paste0("Length of 'yi' (", k, ") and length/dimensions of 'W' (", nrow(A), ") is not the same."))) ### force A to be sparse when sparse=TRUE (and A is not yet sparse) if (sparse && inherits(A, "matrix")) A <- Matrix(A, sparse=TRUE) } else { A <- NULL } ### if ni has not been specified (and hence is NULL) but is an attribute of yi, get it ### note: currently ni argument removed, so this is the only way to pass ni to the function if (is.null(ni) && !is.null(attr(yi, "ni"))) ni <- attr(yi, "ni") ### check length of yi and ni ### if there is a mismatch, then ni cannot be trusted, so set it to NULL if (!is.null(ni) && length(ni) != k) ni <- NULL ### if ni is now available, add it (back) as an attribute to yi ### this is currently pointless, but may be useful if function has an ni argument #if (!is.null(ni)) # attr(yi, "ni") <- ni ######################################################################### if (verbose > 1) message(mstyle$message("Creating model matrix ...")) ### convert mods formula to X matrix and set intercept equal to FALSE ### skipped if formula has already been specified via yi argument, since mods is then no longer a formula (see [a]) if (inherits(mods, "formula")) { formula.mods <- mods options(na.action = "na.pass") ### set na.action to na.pass, so that NAs are not filtered out (we'll do that later) mods <- model.matrix(mods, data=data) ### extract model matrix attr(mods, "assign") <- NULL ### strip assign attribute (not needed at the moment) attr(mods, "contrasts") <- NULL ### strip contrasts attribute (not needed at the moment) options(na.action = na.act) ### set na.action back to na.act intercept <- FALSE ### set to FALSE since formula now controls whether the intercept is included or not } ### note: code further below ([b]) actually checks whether intercept is included or not ### turn a vector for mods into a column vector if (.is.vector(mods)) mods <- cbind(mods) ### turn a mods data frame into a matrix if (is.data.frame(mods)) mods <- as.matrix(mods) ### check if model matrix contains character variables if (is.character(mods)) stop(mstyle$stop("Model matrix contains character variables.")) ### check if mods matrix has the right number of rows if (!is.null(mods) && nrow(mods) != k) stop(mstyle$stop(paste0("Number of rows in the model matrix (", nrow(mods), ") does not match length of the outcome vector (", k, ")."))) ######################################################################### ######################################################################### ######################################################################### ### process random argument if (!is.element(method, c("FE","EE","CE")) && !is.null(random)) { if (verbose > 1) message(mstyle$message("Processing 'random' argument ...")) ### make sure random argument is always a list (so lapply() below works) if (!is.list(random)) random <- list(random) ### check that all elements are formulas if (any(sapply(random, function(x) !inherits(x, "formula")))) stop(mstyle$stop("All elements of 'random' must be formulas.")) ### check that all formulas have a vertical bar has.vbar <- sapply(random, function(f) grepl("|", paste0(f, collapse=""), fixed=TRUE)) if (any(!has.vbar)) stop(mstyle$stop("All formulas in 'random' must contain a grouping variable after the | symbol.")) ### check if any formula have a $ has.dollar <- sapply(random, function(f) grepl("$", paste0(f, collapse=""), fixed=TRUE)) if (any(has.dollar)) stop(mstyle$stop("Cannot use '$' notation in formulas in the 'random' argument (use the 'data' argument instead).")) ### check which formulas have a || has.dblvbar <- sapply(random, function(f) grepl("||", paste0(f, collapse=""), fixed=TRUE)) ### replace || with | random <- lapply(random, function(f) { if (grepl("||", paste0(f, collapse=""), fixed=TRUE)) { f <- paste0(f, collapse="") f <- gsub("||", "|", f, fixed=TRUE) f <- as.formula(f) } return(f) }) ### check which formulas in random are '~ inner | outer' formulas formulas <- list(NULL, NULL) split.formulas <- sapply(random, function(f) strsplit(paste0(f, collapse=""), " | ", fixed=TRUE)) is.inner.outer <- sapply(split.formulas, function(f) f[1] != "~1") ### make sure that there are only up to two '~ inner | outer' formulas if (sum(is.inner.outer) > 2) stop(mstyle$stop("Only up to two '~ inner | outer' formulas allowed in the 'random' argument.")) ### get '~ inner | outer' formulas if (any(is.inner.outer)) formulas[[1]] <- random[is.inner.outer][1][[1]] if (sum(is.inner.outer) == 2) formulas[[2]] <- random[is.inner.outer][2][[1]] ### figure out if a formulas has a slash (as in '~ 1 | study/id') has.slash <- sapply(random, function(f) grepl("/", paste0(f, collapse=""), fixed=TRUE)) ### check if slash is used in combination with an '~ inner | outer' term if (any(is.inner.outer & has.slash)) stop(mstyle$stop("Cannot use '~ inner | outer1/outer2' type terms in the 'random' argument.")) ### substitute + for | in all formulas (so that model.frame() below works) random.plus <- lapply(random, function(f) formula(sub("\\|", "+", paste0(f, collapse="")))) ### get all model frames corresponding to the formulas in the random argument ### mf.r <- lapply(random, get_all_vars, data=data) ### note: get_all_vars() does not carry out any functions calls within the formula ### so use model.frame(), which allows for things like 'random = ~ factor(arm) | study' ### need to use na.pass so that NAs are passed through (checks for NAs are done later) #mf.r <- lapply(random.plus, model.frame, data=data, na.action=na.pass) mf.r <- list() io <- 0 for (j in seq_along(is.inner.outer)) { if (is.inner.outer[j]) { io <- io + 1 ### for an '~ inner | outer' term with struct="GEN", expand the inner formula to the ### model matrix and re-combine this with the outer variable if (is.element(struct[io], c("GEN","GDIAG"))) { f.inner <- as.formula(strsplit(paste(random[[j]], collapse=""), " | ", fixed=TRUE)[[1]][1]) f.outer <- as.formula(paste("~", strsplit(paste(random[[j]], collapse=""), " | ", fixed=TRUE)[[1]][2])) options(na.action = "na.pass") X.inner <- model.matrix(f.inner, data=data) options(na.action = na.act) is.int <- apply(X.inner, 2, .is.intercept) colnames(X.inner)[is.int] <- "intrcpt" mf.r[[j]] <- cbind(X.inner, model.frame(f.outer, data=data, na.action=na.pass)) if (has.dblvbar[j]) # change "GEN" to "GDIAG" if the formula had a || struct[io] <- "GDIAG" } else { mf.r[[j]] <- model.frame(random.plus[[j]], data=data, na.action=na.pass) } } else { mf.r[[j]] <- model.frame(random.plus[[j]], data=data, na.action=na.pass) } } ### count number of columns in each model frame mf.r.ncols <- sapply(mf.r, ncol) ### for formulas with slashes, create interaction terms for (j in seq_along(has.slash)) { if (!has.slash[j]) next ### need to go backwards; otherwise, with 3 or more terms (e.g., ~ 1 | var1/var2/var3), the third term would be an ### interaction between var1, var1:var2, and var3; by going backwards, we get var1, var1:var2, and var1:var2:var3 for (p in mf.r.ncols[j]:1) { mf.r[[j]][,p] <- interaction(mf.r[[j]][1:p], drop=TRUE, lex.order=TRUE, sep = "/") colnames(mf.r[[j]])[p] <- paste(colnames(mf.r[[j]])[1:p], collapse="/") } } ### create list where model frames with multiple columns based on slashes are flattened out if (any(has.slash)) { if (length(mf.r) == 1L) { ### if formula only has one element of the form ~ 1 | var1/var2/..., create a list of the data frames (each with one column) mf.r <- lapply(seq(ncol(mf.r[[1]])), function(x) mf.r[[1]][x]) } else { ### if there are non-slash elements, then this flattens things out (obviously ...) mf.r <- unlist(mapply(function(mf, sl) if (sl) lapply(seq(mf), function(x) mf[x]) else list(mf), mf.r, has.slash, SIMPLIFY=FALSE), recursive=FALSE, use.names=FALSE) } ### recount number of columns in each model frame mf.r.ncols <- sapply(mf.r, ncol) } #return(mf.r) ### separate mf.r into mf.s (~ 1 | id), mf.g (~ inner | outer), and mf.h (~ inner | outer) parts mf.s <- mf.r[which(mf.r.ncols == 1)] ### if there is no '~ 1 | factor' term, this is list() ([] so that we get a list of data frames) mf.g <- mf.r[[which(mf.r.ncols >= 2)[1]]] ### if there is no 1st '~ inner | outer' terms, this is NULL ([[]] so that we get a data frame, not a list) mf.h <- mf.r[[which(mf.r.ncols >= 2)[2]]] ### if there is no 2nd '~ inner | outer' terms, this is NULL ([[]] so that we get a data frame, not a list) ### if there is no (~ 1 | factor) term, then mf.s is list(), so turn that into NULL if (length(mf.s) == 0L) mf.s <- NULL ### does the random argument include at least one (~ 1 | id) term? withS <- !is.null(mf.s) ### does the random argument include '~ inner | outer' terms? withG <- !is.null(mf.g) withH <- !is.null(mf.h) ### count number of rows in each model frame mf.r.nrows <- sapply(mf.r, nrow) ### make sure that rows in each model frame match the length of the data if (any(mf.r.nrows != k)) stop(mstyle$stop("Length of variables specified via the 'random' argument does not match length of the data.")) ### need this for profile(); with things like 'random = ~ factor(arm) | study', 'mf.r' contains variables 'factor(arm)' and 'study' ### but the former won't work when using the same formula for the refitting (same when using interaction() in the random formula) ### note: with ~ 1 | interaction(var1, var2), mf.r will have 2 columns, but is actually a 'one variable' term ### and with ~ interaction(var1, var2) | var3, mf.r will have 3 columns, but is actually a 'two variable' term ### mf.r.ncols above is correct even in these cases (since it is based on the model.frame() results), but need ### to be careful that this doesn't screw up anything in other functions (for now, mf.r.ncols is not used in any other function) mf.r <- lapply(random.plus, get_all_vars, data=data) } else { ### set defaults for some elements when method="FE/EE" formulas <- list(NULL, NULL) mf.r <- NULL mf.s <- NULL mf.g <- NULL mf.h <- NULL withS <- FALSE withG <- FALSE withH <- FALSE } ### warn user that 'struct' argument is disregarded if it has been changed ### from the default, but the model doesn't contain '~ inner | outer' terms if (!withG && struct[1] != "CS") warning(mstyle$warning("Model does not contain an '~ inner | outer' term, so 'struct' argument is disregaded."), call.=FALSE) #return(list(mf.r=mf.r, mf.s=mf.s, mf.g=mf.g, mf.h=mf.h)) ### note: checks on NAs in mf.s, mf.g, and mf.h after subsetting (since NAs may be removed by subsetting) ######################################################################### ######################################################################### ######################################################################### ### generate study labels if none are specified (or none can be found in yi argument) if (verbose > 1) message(mstyle$message("Generating/extracting study labels ...")) ### study ids (1:k sequence before subsetting) ids <- seq_len(k) ### if slab has not been specified but is an attribute of yi, get it if (is.null(slab)) { if (!is.null(attr(yi, "slab"))) slab <- attr(yi, "slab") ### check length of yi and slab (only if slab is now not NULL) ### if there is a mismatch, then slab cannot be trusted, so set it to NULL if (!is.null(slab) && length(slab) != k) slab <- NULL } if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) slab.null <- FALSE } ### if a subset of studies is specified if (!is.null(subset)) { if (verbose > 1) message(mstyle$message("Subsetting ...")) subset <- .setnafalse(subset, k=k) yi <- yi[subset] V <- V[subset,subset,drop=FALSE] A <- A[subset,subset,drop=FALSE] ni <- ni[subset] mods <- mods[subset,,drop=FALSE] slab <- slab[subset] mf.r <- lapply(mf.r, function(x) x[subset,,drop=FALSE]) mf.s <- lapply(mf.s, function(x) x[subset,,drop=FALSE]) mf.g <- mf.g[subset,,drop=FALSE] mf.h <- mf.h[subset,,drop=FALSE] ids <- ids[subset] k <- length(yi) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### add slab attribute back attr(yi, "slab") <- slab ### get the sampling variances from the diagonal of V vi <- diag(V) ### check for non-positive sampling variances (and set negative values to 0) if (any(vi <= 0, na.rm=TRUE)) { allvipos <- FALSE if (!V0) warning(mstyle$warning("There are outcomes with non-positive sampling variances."), call.=FALSE) vi.neg <- vi < 0 if (any(vi.neg, na.rm=TRUE)) { V[vi.neg,] <- 0 ### note: entire row set to 0 (so covariances are also 0) V[,vi.neg] <- 0 ### note: entire col set to 0 (so covariances are also 0) vi[vi.neg] <- 0 warning(mstyle$warning("Negative sampling variances constrained to zero."), call.=FALSE) } } else { allvipos <- TRUE } ### save full data (including potential NAs in yi/vi/V/W/ni/mods) yi.f <- yi vi.f <- vi V.f <- V W.f <- A ni.f <- ni mods.f <- mods #mf.g.f <- mf.g ### copied further below #mf.h.f <- mf.h ### copied further below #mf.s.f <- mf.s ### copied further below k.f <- k ### total number of observed outcomes including all NAs ######################################################################### ######################################################################### ######################################################################### ### stuff that need to be done after subsetting if (withS) { if (verbose > 1) message(mstyle$message(paste0("Processing '", paste0("~ 1 | ", sapply(mf.s, names), collapse=", "), "' term(s) ..."))) ### get variables names in mf.s s.names <- sapply(mf.s, names) ### one name per term ### turn each variable in mf.s into a factor (and turn each column vector into just a vector) ### if a variable was a factor to begin with, this drops any unused levels, but order of existing levels is preserved mf.s <- lapply(mf.s, function(x) factor(x[[1]])) ### check if there are any NAs anywhere in mf.s if (any(sapply(mf.s, anyNA))) stop(mstyle$stop("No NAs allowed in variables specified in the 'random' argument.")) ### how many (~ 1 | id) terms does the random argument include? (0 if none, but if withS is TRUE, must be at least 1) sigma2s <- length(mf.s) ### set default value(s) for sigma2 argument if it is unspecified if (is.null(sigma2)) sigma2 <- rep(NA_real_, sigma2s) ### allow quickly setting all sigma2 values to a fixed value if (length(sigma2) == 1L) sigma2 <- rep(sigma2, sigma2s) ### check if sigma2 is of the correct length if (length(sigma2) != sigma2s) stop(mstyle$stop(paste0("Length of 'sigma2' argument (", length(sigma2), ") does not match actual number of variance components (", sigma2s, ")."))) ### checks on any fixed values of sigma2 argument if (any(sigma2 < 0, na.rm=TRUE)) stop(mstyle$stop("Specified value(s) of 'sigma2' must be non-negative.")) ### get number of levels of each variable in mf.s (vector with one value per term) s.nlevels <- sapply(mf.s, nlevels) ### get levels of each variable in mf.s (list with levels for each variable) s.levels <- lapply(mf.s, levels) ### checks on R (note: do this after subsetting, so user can filter out ids with no info in R) if (is.null(R)) { withR <- FALSE Rfix <- rep(FALSE, sigma2s) } else { if (verbose > 1) message(mstyle$message("Processing 'R' argument ...")) withR <- TRUE ### make sure R is always a list (so lapply() below works) if (is.data.frame(R) || !is.list(R)) R <- list(R) ### check if R list has no names at all or some names are missing ### (if only some elements of R have names, then names(R) is "" for the unnamed elements, so use nchar()==0 to check for that) if (is.null(names(R)) || any(nchar(names(R)) == 0L)) stop(mstyle$stop("Argument 'R' must be a *named* list.")) ### remove elements in R that are NULL (not sure why this is needed; why would anybody ever do this?) ### maybe this had something to do with functions that repeatedly call rma.mv(); so leave this be for now R <- R[!sapply(R, is.null)] ### turn all elements in R into matrices (this would fail with a NULL element) R <- lapply(R, as.matrix) ### match up R matrices based on the s.names (and correct names of R) ### so if a particular ~ 1 | id term has a matching id=R element, the corresponding R element is that R matrix ### if a particular ~ 1 | id term does not have a matching id=R element, the corresponding R element is NULL R <- R[s.names] ### NULL elements in R would have no name, so this makes sure that all R elements have the correct s.names names(R) <- s.names ### check for which components an R matrix has been specified Rfix <- !sapply(R, is.null) ### Rfix could be all FALSE (if user has used id names in R that are not actually in 'random') ### so only do the rest below if that is *not* the case if (any(Rfix)) { ### check if given R matrices are square and symmetric if (any(!sapply(R[Rfix], .is.square))) stop(mstyle$stop("Elements of 'R' must be square matrices.")) if (any(!sapply(R[Rfix], function(x) isSymmetric(unname(x))))) stop(mstyle$stop("Elements of 'R' must be symmetric matrices.")) for (j in seq_along(R)) { if (!Rfix[j]) next ### even if isSymmetric() is TRUE, there may still be minor numerical differences between the lower and upper triangular ### parts that could lead to isSymmetric() being FALSE once we do any potentially rescaling of the R matrices further ### below; this ensures strict symmetry to avoid this issue #R[[j]][lower.tri(R[[j]])] <- t(R[[j]])[lower.tri(R[[j]])] R[[j]] <- symmpart(R[[j]]) ### if rownames are missing, copy colnames to rownames and vice-versa if (is.null(rownames(R[[j]]))) rownames(R[[j]]) <- colnames(R[[j]]) if (is.null(colnames(R[[j]]))) colnames(R[[j]]) <- rownames(R[[j]]) ### if colnames are still missing at this point, R element did not have dimension names to begin with if (is.null(colnames(R[[j]]))) stop(mstyle$stop("Elements of 'R' must have dimension names.")) } ### if user specifies the entire (k x k) correlation matrix, this removes the duplicate rows/columns #R[Rfix] <- lapply(R[Rfix], unique, MARGIN=1) #R[Rfix] <- lapply(R[Rfix], unique, MARGIN=2) ### no, the user can specify an entire (k x k) matrix; the problem is repeated dimension names ### so let's filter out rows/columns with the same dimension names R[Rfix] <- lapply(R[Rfix], function(x) x[!duplicated(rownames(x)), !duplicated(colnames(x)), drop=FALSE]) ### after the two commands above, this should always be FALSE, but leave for now just in case if (any(sapply(R[Rfix], function(x) length(colnames(x)) != length(unique(colnames(x)))))) stop(mstyle$stop("Each element of 'R' must have unique dimension names.")) ### check for R being positive definite ### skipped: even if R is not positive definite, the marginal var-cov matrix can still be; so just check for pd during optimization #if (any(sapply(R[Rfix], function(x) any(eigen(x, symmetric=TRUE, only.values=TRUE)$values <= .Machine$double.eps)))) ### any eigenvalue below double.eps is essentially 0 # stop(mstyle$stop("Matrix in R is not positive definite.")) for (j in seq_along(R)) { if (!Rfix[j]) next ### check if there are NAs in a matrix specified via R if (anyNA(R[[j]])) stop(mstyle$stop("No missing values allowed in matrices specified via 'R'.")) ### check if there are levels in s.levels which are not in R (if yes, issue an error and stop) if (any(!is.element(s.levels[[j]], colnames(R[[j]])))) stop(mstyle$stop(paste0("There are levels in '", s.names[j], "' for which there are no matching rows/columns in the corresponding 'R' matrix."))) ### check if there are levels in R which are not in s.levels (if yes, issue a warning) if (any(!is.element(colnames(R[[j]]), s.levels[[j]]))) warning(mstyle$warning(paste0("There are rows/columns in the 'R' matrix for '", s.names[j], "' for which there are no data."))) } } else { warning(mstyle$warning("Argument 'R' specified, but list name(s) not in 'random'."), call.=FALSE) withR <- FALSE Rfix <- rep(FALSE, sigma2s) R <- NULL } } } else { ### need one fixed sigma2 value for optimization function sigma2s <- 1 sigma2 <- 0 s.nlevels <- NULL s.levels <- NULL s.names <- NULL withR <- FALSE Rfix <- FALSE R <- NULL } #mf.s.f <- mf.s ### not needed at the moment ### copy s.nlevels and s.levels (needed for ranef()) s.nlevels.f <- s.nlevels s.levels.f <- s.levels ######################################################################### ### stuff that need to be done after subsetting if (withG) { tmp <- .process.G.aftersub(mf.g, struct[1], formulas[[1]], tau2, rho, isG=TRUE, k, sparse, verbose) mf.g <- tmp$mf.g g.names <- tmp$g.names g.nlevels <- tmp$g.nlevels g.levels <- tmp$g.levels g.values <- tmp$g.values tau2s <- tmp$tau2s rhos <- tmp$rhos tau2 <- tmp$tau2 rho <- tmp$rho Z.G1 <- tmp$Z.G1 Z.G2 <- tmp$Z.G2 } else { ### need one fixed tau2 and rho value for optimization function tau2s <- 1 rhos <- 1 tau2 <- 0 rho <- 0 ### need Z.G1 and Z.G2 to exist further below and for optimization function Z.G1 <- NULL Z.G2 <- NULL g.nlevels <- NULL g.levels <- NULL g.values <- NULL g.names <- NULL } mf.g.f <- mf.g ### needed for predict() ######################################################################### ### stuff that need to be done after subsetting if (withH) { tmp <- .process.G.aftersub(mf.h, struct[2], formulas[[2]], gamma2, phi, isG=FALSE, k, sparse, verbose) mf.h <- tmp$mf.g h.names <- tmp$g.names h.nlevels <- tmp$g.nlevels h.levels <- tmp$g.levels h.values <- tmp$g.values gamma2s <- tmp$tau2s phis <- tmp$rhos gamma2 <- tmp$tau2 phi <- tmp$rho Z.H1 <- tmp$Z.G1 Z.H2 <- tmp$Z.G2 } else { ### need one fixed gamma2 and phi value for optimization function gamma2s <- 1 phis <- 1 gamma2 <- 0 phi <- 0 ### need Z.H1 and Z.H2 to exist further below and for optimization function Z.H1 <- NULL Z.H2 <- NULL h.nlevels <- NULL h.levels <- NULL h.values <- NULL h.names <- NULL } mf.h.f <- mf.h ### needed for predict() # return(list(Z.G1=Z.G1, Z.G2=Z.G2, g.nlevels=g.nlevels, g.levels=g.levels, g.values=g.values, tau2=tau2, rho=rho, # Z.H1=Z.H1, Z.H2=Z.H2, h.nlevels=h.nlevels, h.levels=h.levels, h.values=h.values, gamma2=gamma2, phi=phi)) ######################################################################### ######################################################################### ######################################################################### ### check for NAs and act accordingly has.na <- is.na(yi) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) | (if (V0) FALSE else .anyNAv(V)) | (if (is.null(A)) FALSE else apply(is.na(A), 1, any)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] V <- V[not.na,not.na,drop=FALSE] A <- A[not.na,not.na,drop=FALSE] vi <- vi[not.na] ni <- ni[not.na] mods <- mods[not.na,,drop=FALSE] mf.r <- lapply(mf.r, function(x) x[not.na,,drop=FALSE]) mf.s <- lapply(mf.s, function(x) x[not.na]) ### note: mf.s is a list of vectors at this point mf.g <- mf.g[not.na,,drop=FALSE] mf.h <- mf.h[not.na,,drop=FALSE] if (is.element(struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { Z.G1 <- Z.G1[not.na,not.na,drop=FALSE] } else { Z.G1 <- Z.G1[not.na,,drop=FALSE] } Z.G2 <- Z.G2[not.na,,drop=FALSE] if (is.element(struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) { Z.H1 <- Z.H1[not.na,not.na,drop=FALSE] } else { Z.H1 <- Z.H1[not.na,,drop=FALSE] } Z.H2 <- Z.H2[not.na,,drop=FALSE] k <- length(yi) warning(mstyle$warning("Rows with NAs omitted from model fitting."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back ### note: slab is always of the same length as the full yi vector (after subsetting), so missings are not removed and slab is not added back to yi } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } ### more than one study left? if (k <= 1) stop(mstyle$stop("Processing terminated since k <= 1.")) ### check for V being positive definite (this should also cover non-positive variances) ### skipped: even if V is not positive definite, the marginal var-cov matrix can still be; so just check for pd during the optimization ### but at least issue a warning, since a fixed-effects model can then not be fitted and there is otherwise no indication why this is the case if (!V0 && any(eigen(V, symmetric=TRUE, only.values=TRUE)$values <= .Machine$double.eps)) ### any eigenvalue below double.eps is essentially 0 warning(mstyle$warning("'V' appears to be not positive definite."), call.=FALSE) ### check ratio of largest to smallest sampling variance ### note: need to exclude some special cases (0/0 = NaN, max(vi)/0 = Inf) ### TODO: use the condition number of V here instead? vimaxmin <- max(vi) / min(vi) if (!is.nan(vimaxmin) && !is.infinite(vimaxmin) && vimaxmin >= 1e7) warning(mstyle$warning("Ratio of largest to smallest sampling variance extremely large. May not be able to obtain stable results."), call.=FALSE) ### make sure that there is at least one column in X ([b]) if (is.null(mods) && !intercept) { warning(mstyle$warning("Must either include an intercept and/or moderators in model.\n Coerced intercept into the model."), call.=FALSE) intercept <- TRUE } ### add vector of 1s to the X matrix for the intercept (if intercept=TRUE) if (intercept) { X <- cbind(intrcpt=rep(1,k), mods) X.f <- cbind(intrcpt=rep(1,k.f), mods.f) } else { X <- mods X.f <- mods.f } ### drop redundant predictors ### note: need to save coef.na for functions that modify the data/model and then refit the model (regtest() and the ### various function that leave out an observation); so we can check if there are redundant/dropped predictors then tmp <- lm(yi ~ X - 1) coef.na <- is.na(coef(tmp)) if (any(coef.na)) { warning(mstyle$warning("Redundant predictors dropped from the model."), call.=FALSE) X <- X[,!coef.na,drop=FALSE] X.f <- X.f[,!coef.na,drop=FALSE] } ### check whether intercept is included and if yes, move it to the first column (NAs already removed, so na.rm=TRUE for any() not necessary) is.int <- apply(X, 2, .is.intercept) if (any(is.int)) { int.incl <- TRUE int.indx <- which(is.int, arr.ind=TRUE) X <- cbind(intrcpt=1, X[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts X.f <- cbind(intrcpt=1, X.f[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts intercept <- TRUE ### set intercept appropriately so that the predict() function works } else { int.incl <- FALSE } p <- NCOL(X) ### number of columns in X (including the intercept if it is included) ### check whether this is an intercept-only model if ((p == 1L) && .is.intercept(X)) { int.only <- TRUE } else { int.only <- FALSE } ### check if there are too many parameters for given k (currently skipped) ### set/check 'btt' argument btt <- .set.btt(btt, p, int.incl, colnames(X)) m <- length(btt) ### number of betas to test (m = p if all betas are tested) ######################################################################### ######################################################################### ######################################################################### ### stuff that need to be done after subsetting and filtering out NAs if (withS) { ### redo: turn each variable in mf.s into a factor (reevaluates the levels present, but order of existing levels is preserved) mf.s <- lapply(mf.s, factor) ### redo: get number of levels of each variable in mf.s (vector with one value per term) s.nlevels <- sapply(mf.s, nlevels) ### redo: get levels of each variable in mf.s s.levels <- lapply(mf.s, levels) ### for any single-level factor with unfixed sigma2, fix the sigma2 value to 0 if (any(is.na(sigma2) & s.nlevels == 1)) { sigma2[is.na(sigma2) & s.nlevels == 1] <- 0 warning(mstyle$warning("Single-level factor(s) found in 'random' argument. Corresponding 'sigma2' value(s) fixed to 0."), call.=FALSE) } ### create model matrix for each element in mf.s Z.S <- vector(mode="list", length=sigma2s) for (j in seq_len(sigma2s)) { if (s.nlevels[j] == 1) { Z.S[[j]] <- cbind(rep(1,k)) } else { if (sparse) { Z.S[[j]] <- sparse.model.matrix(~ mf.s[[j]] - 1) ### cannot use this for factors with a single level } else { Z.S[[j]] <- model.matrix(~ mf.s[[j]] - 1) ### cannot use this for factors with a single level } } attr(Z.S[[j]], "assign") <- NULL attr(Z.S[[j]], "contrasts") <- NULL } } else { Z.S <- NULL } ######################################################################### ### stuff that need to be done after subsetting and filtering out NAs if (withR) { ### R may contain levels that are not in ids (that's fine; just filter them out) ### also, R may not be in the order that Z.S is in, so this fixes that up for (j in seq_along(R)) { if (!Rfix[j]) next R[[j]] <- R[[j]][s.levels[[j]], s.levels[[j]]] } ### TODO: allow Rscale to be a vector so that different Rs can be scaled differently ### force each element of R to be a correlation matrix (and do some checks on that) if (Rscale=="cor" || Rscale=="cor0") { R[Rfix] <- lapply(R[Rfix], function(x) { if (any(diag(x) <= 0)) stop(mstyle$stop("Cannot use Rscale=\"cor\" or Rscale=\"cor0\" with non-positive values on the diagonal of an 'R' matrix."), call.=FALSE) tmp <- cov2cor(x) if (any(abs(tmp) > 1)) warning(mstyle$warning("Some values are larger than +-1 in an 'R' matrix after cov2cor() (see 'Rscale' argument)."), call.=FALSE) return(tmp) }) } ### rescale R so that entries are 0 to (max(R) - min(R)) / (1 - min(R)) ### this preserves the ultrametric properties of R and makes levels split at the root uncorrelated if (Rscale=="cor0") R[Rfix] <- lapply(R[Rfix], function(x) (x - min(x)) / (1 - min(x))) ### rescale R so that min(R) is zero (this is for the case that R is covariance matrix) if (Rscale=="cov0") R[Rfix] <- lapply(R[Rfix], function(x) (x - min(x))) } ######################################################################### ### create (kxk) indicator/correlation matrices for random intercepts if (withS) { D.S <- vector(mode="list", length=sigma2s) for (j in seq_len(sigma2s)) { if (Rfix[j]) { if (sparse) { D.S[[j]] <- Z.S[[j]] %*% Matrix(R[[j]], sparse=TRUE) %*% t(Z.S[[j]]) } else { D.S[[j]] <- Z.S[[j]] %*% R[[j]] %*% t(Z.S[[j]]) } # D.S[[j]] <- as.matrix(nearPD(D.S[[j]])$mat) ### this avoids that the full matrix becomes non-positive definite but adding ### a tiny amount to the diagonal of D.S[[j]] is easier and works just as well ### TODO: consider doing something like this by default } else { D.S[[j]] <- tcrossprod(Z.S[[j]]) } } } else { D.S <- NULL } ######################################################################### ### stuff that need to be done after subsetting and filtering out NAs if (withG) { tmp <- .process.G.afterrmna(mf.g, g.nlevels, g.levels, g.values, struct[1], formulas[[1]], tau2, rho, Z.G1, Z.G2, isG=TRUE, sparse, ddd$dist[[1]], verbose) mf.g <- tmp$mf.g g.nlevels <- tmp$g.nlevels g.nlevels.f <- tmp$g.nlevels.f g.levels <- tmp$g.levels g.levels.f <- tmp$g.levels.f g.levels.r <- tmp$g.levels.r g.levels.k <- tmp$g.levels.k g.levels.comb.k <- tmp$g.levels.comb.k tau2 <- tmp$tau2 rho <- tmp$rho G <- tmp$G g.Dmat <- tmp$Dmat g.rho.init <- tmp$rho.init } else { g.nlevels.f <- NULL g.levels.f <- NULL g.levels.r <- NULL g.levels.k <- NULL g.levels.comb.k <- NULL G <- NULL g.Dmat <- NULL g.rho.init <- NULL } ######################################################################### ### stuff that need to be done after subsetting and filtering out NAs if (withH) { tmp <- .process.G.afterrmna(mf.h, h.nlevels, h.levels, h.values, struct[2], formulas[[2]], gamma2, phi, Z.H1, Z.H2, isG=FALSE, sparse, ddd$dist[[2]], verbose) mf.h <- tmp$mf.g h.nlevels <- tmp$g.nlevels h.nlevels.f <- tmp$g.nlevels.f h.levels <- tmp$g.levels h.levels.f <- tmp$g.levels.f h.levels.r <- tmp$g.levels.r h.levels.k <- tmp$g.levels.k h.levels.comb.k <- tmp$g.levels.comb.k gamma2 <- tmp$tau2 phi <- tmp$rho H <- tmp$G h.Dmat <- tmp$Dmat h.phi.init <- tmp$rho.init } else { h.nlevels.f <- NULL h.levels.f <- NULL h.levels.r <- NULL h.levels.k <- NULL h.levels.comb.k <- NULL H <- NULL h.Dmat <- NULL h.phi.init <- NULL } ######################################################################### #return(list(Z.S=Z.S, sigma2=sigma2, Z.G1=Z.G1, Z.G2=Z.G2, tau2=tau2, rho=rho, G=G, Z.H1=Z.H1, Z.H2=Z.H2, gamma2=gamma2, phi=phi, H=H, Rfix=Rfix, R=R)) ######################################################################### ######################################################################### ######################################################################### Y <- as.matrix(yi) ### initial values for variance components (need to do something better here in the future; see rma.mv2() and rma.bv() for some general ideas) if (verbose > 1) message(mstyle$message("Extracting/computing initial values ...")) if (!V0) { # for V0 case, this always fails, so can skip it if (verbose > 1) { U <- try(chol(chol2inv(chol(V))), silent=FALSE) } else { U <- try(suppressWarnings(chol(chol2inv(chol(V)))), silent=TRUE) } } if (V0 || inherits(U, "try-error") || any(is.infinite(U))) { ### note: if V is sparse diagonal with 0 along the diagonal, U will not be a 'try-error' ### but have Inf along the diagonal, so need to check for this as well total <- sigma(lm(Y ~ X - 1))^2 if (is.na(total)) # if X is a saturated model, then sigma() yields NaN stop(mstyle$stop("Cannot compute initial values.")) QE <- NA QEp <- NA } else { sX <- U %*% X sY <- U %*% Y beta.FE <- try(solve(crossprod(sX), crossprod(sX, sY)), silent=TRUE) if (inherits(beta.FE, "try-error")) stop(mstyle$stop("Cannot compute initial values.")) ### TODO: consider a better way to set initial values #total <- max(.001*(sigma2s + tau2s + gamma2s), var(c(Y - X %*% res.FE$beta)) - 1/mean(1/diag(V))) #total <- max(.001*(sigma2s + tau2s + gamma2s), var(as.vector(sY - sX %*% beta)) - 1/mean(1/diag(V))) total <- max(.001*(sigma2s + tau2s + gamma2s), var(as.vector(Y) - as.vector(X %*% beta.FE)) - 1/mean(1/diag(V))) QE <- sum(as.vector(sY - sX %*% beta.FE)^2) ### QEp calculated further below } sigma2.init <- rep(total / (sigma2s + tau2s + gamma2s), sigma2s) tau2.init <- rep(total / (sigma2s + tau2s + gamma2s), tau2s) gamma2.init <- rep(total / (sigma2s + tau2s + gamma2s), gamma2s) if (is.null(g.rho.init)) { rho.init <- rep(.50, rhos) } else { rho.init <- g.rho.init } if (is.null(h.phi.init)) { phi.init <- rep(.50, phis) } else { phi.init <- h.phi.init } ######################################################################### ### set default control parameters con <- list(verbose = FALSE, optimizer = "nlminb", # optimizer to use ("optim", "nlminb", "uobyqa", "newuoa", "bobyqa", "nloptr", "nlm", "hjk", "nmk", "mads", "ucminf", "optimParallel") optmethod = "BFGS", # argument 'method' for optim() ("Nelder-Mead" and "BFGS" are sensible options) parallel = list(), # parallel argument for optimParallel() (note: 'cl' argument in parallel is not passed; this is directly specified via 'cl') cl = NULL, # arguments for optimParallel() ncpus = 1L, # arguments for optimParallel() sigma2.init = sigma2.init, # initial value(s) for sigma2 tau2.init = tau2.init, # initial value(s) for tau2 rho.init = rho.init, # initial value(s) for rho gamma2.init = gamma2.init, # initial value(s) for gamma2 phi.init = phi.init, # initial value(s) for phi REMLf = TRUE, # full REML likelihood (including all constants) evtol = 1e-07, # lower bound for eigenvalues to determine if model matrix is positive definite cholesky = ifelse(is.element(struct, c("UN","UNR","GEN")), TRUE, FALSE), # by default, use Cholesky factorization for G and H matrix for "UN", "UNR", and "GEN" structures posdefify = FALSE, # to force G and H matrix to become positive definite hessian = FALSE, # to compute Hessian hessianCtrl=list(r=8), # arguments passed on to 'method.args' of hessian() vctransf = FALSE) # if FALSE, Hessian is computed for the untransformed (raw) variance components # if TRUE, Hessian is computed for the transformed components (log/atahn/qlogis space) ### replace defaults with any user-defined values con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose ### check for missings in initial values if (anyNA(con$sigma2.init)) stop(mstyle$stop(paste0("No missing values allowed in 'sigma2.init'."))) if (anyNA(con$tau2.init)) stop(mstyle$stop(paste0("No missing values allowed in 'tau2.init'."))) if (anyNA(con$rho.init)) stop(mstyle$stop(paste0("No missing values allowed in 'rho.init'."))) if (anyNA(con$gamma2.init)) stop(mstyle$stop(paste0("No missing values allowed in 'gamma2.init'."))) if (anyNA(con$phi.init)) stop(mstyle$stop(paste0("No missing values allowed in 'phi.init'."))) ### expand initial values to correct length if (length(con$sigma2.init) == 1L) con$sigma2.init <- rep(con$sigma2.init, sigma2s) if (length(con$tau2.init) == 1L) con$tau2.init <- rep(con$tau2.init, tau2s) if (length(con$rho.init) == 1L) con$rho.init <- rep(con$rho.init, rhos) if (length(con$gamma2.init) == 1L) con$gamma2.init <- rep(con$gamma2.init, gamma2s) if (length(con$phi.init) == 1L) con$phi.init <- rep(con$phi.init, phis) ### checks on initial values set by the user (the initial values computed by the function are replaced by the user defined ones at this point) if (withS && any(con$sigma2.init <= 0)) stop(mstyle$stop("Value(s) of 'sigma2.init' must be > 0")) if (withG && any(con$tau2.init <= 0)) stop(mstyle$stop("Value(s) of 'tau2.init' must be > 0.")) if (withG && struct[1]=="CAR" && (con$rho.init <= 0 | con$rho.init >= 1)) stop(mstyle$stop("Value(s) of 'rho.init' must be in (0,1).")) if (withG && is.element(struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH")) && any(con$rho.init <= 0)) stop(mstyle$stop("Value(s) of 'rho.init' must be > 0.")) if (withG && is.element(struct[1], c("PHYPL","PHYPD")) && con$rho.init < 0) stop(mstyle$stop("Value(s) of 'rho.init' must be in >= 0.")) if (withG && !is.element(struct[1], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD")) && any(con$rho.init <= -1 | con$rho.init >= 1)) stop(mstyle$stop("Value(s) of 'rho.init' must be in (-1,1).")) if (withH && any(con$gamma2.init <= 0)) stop(mstyle$stop("Value(s) of 'gamma2.init' must be > 0.")) if (withH && struct[2]=="CAR" && (con$phi.init <= 0 | con$phi.init >= 1)) stop(mstyle$stop("Value(s) of 'phi.init' must be in (0,1).")) if (withH && is.element(struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH")) && any(con$phi.init <= 0)) stop(mstyle$stop("Value(s) of 'phi.init' must be > 0.")) if (withH && is.element(struct[2], c("PHYPL","PHYPD")) && con$phi.init < 0) stop(mstyle$stop("Value(s) of 'phi.init' must be in >= 0.")) if (withH && !is.element(struct[2], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD")) && any(con$phi.init <= -1 | con$phi.init >= 1)) stop(mstyle$stop("Value(s) of 'phi.init' must be in (-1,1).")) ### in case user manually sets con$cholesky and specifies only a single value if (length(con$cholesky) == 1L) con$cholesky <- rep(con$cholesky, 2) ### use of Cholesky factorization only applicable for models with "UN", "UNR", and "GEN" structure if (!withG) ### in case user sets cholesky=TRUE and struct="UN", struct="UNR", or struct="GEN" even though there is no 1st 'inner | outer' term con$cholesky[1] <- FALSE if (con$cholesky[1] && !is.element(struct[1], c("UN","UNR","GEN"))) con$cholesky[1] <- FALSE if (!withH) ### in case user sets cholesky=TRUE and struct="UN", struct="UNR", or struct="GEN" even though there is no 2nd 'inner | outer' term con$cholesky[2] <- FALSE if (con$cholesky[2] && !is.element(struct[2], c("UN","UNR","GEN"))) con$cholesky[2] <- FALSE ### copy initial values back (in case they were replaced by user-defined values); those values are ### then shown in the 'Variance Components in Model' table that is given when verbose=TRUE; cannot ### replace any fixed values, since that can lead to -Inf/+Inf below when transforming the initial ### values and then optim() throws an error and chol(G) and/or chol(H) is then likely to fail #sigma2.init <- ifelse(is.na(sigma2), con$sigma2.init, sigma2) #tau2.init <- ifelse(is.na(tau2), con$tau2.init, tau2) #rho.init <- ifelse(is.na(rho), con$rho.init, rho) sigma2.init <- con$sigma2.init tau2.init <- con$tau2.init rho.init <- con$rho.init gamma2.init <- con$gamma2.init phi.init <- con$phi.init ### plug in fixed values for sigma2, tau2, rho, gamma2, and phi and transform initial values con$sigma2.init <- log(sigma2.init) if (con$cholesky[1]) { if (struct[1] == "UNR") { G <- .con.vcov.UNR(tau2.init, rho.init) } else { G <- .con.vcov.UN(tau2.init, rho.init) } G <- try(chol(G), silent=TRUE) if (inherits(G, "try-error")) stop(mstyle$stop("Cannot take Choleski decomposition of initial 'G' matrix.")) if (struct[1] == "UNR") { con$tau2.init <- log(tau2.init) } else { con$tau2.init <- diag(G) ### note: con$tau2.init and con$rho.init are the 'choled' values of the initial G matrix, so con$rho.init really con$rho.init <- G[upper.tri(G)] ### contains the 'choled' covariances; and these values are also passed on the .ll.rma.mv as the initial values } if (length(con$rho.init) == 0L) con$rho.init <- 0 } else { con$tau2.init <- log(tau2.init) if (struct[1] == "CAR") con$rho.init <- qlogis(rho.init) if (is.element(struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) con$rho.init <- log(rho.init) if (!is.element(struct[1], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) con$rho.init <- atanh(rho.init) } if (con$cholesky[2]) { H <- .con.vcov.UN(gamma2.init, phi.init) H <- try(chol(H), silent=TRUE) if (inherits(H, "try-error")) stop(mstyle$stop("Cannot take Choleski decomposition of initial 'H' matrix.")) con$gamma2.init <- diag(H) ### note: con$gamma2.init and con$phi.init are the 'choled' values of the initial H matrix, so con$phi.init really con$phi.init <- H[upper.tri(H)] ### contains the 'choled' covariances; and these values are also passed on the .ll.rma.mv as the initial values if (length(con$phi.init) == 0L) con$phi.init <- 0 } else { con$gamma2.init <- log(gamma2.init) if (struct[2] == "CAR") con$phi.init <- qlogis(phi.init) if (is.element(struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) con$phi.init <- log(phi.init) if (!is.element(struct[2], c("CAR","SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) con$phi.init <- atanh(phi.init) } optimizer <- match.arg(con$optimizer, c("optim","nlminb","uobyqa","newuoa","bobyqa","nloptr","nlm","hjk","nmk","mads","ucminf","optimParallel")) optmethod <- match.arg(con$optmethod, c("Nelder-Mead","BFGS","CG","L-BFGS-B","SANN","Brent")) evtol <- con$evtol posdefify <- con$posdefify cholesky <- con$cholesky parallel <- con$parallel cl <- con$cl ncpus <- con$ncpus optcontrol <- control[is.na(con.pos)] ### get arguments that are control arguments for optimizer if (length(optcontrol) == 0L) optcontrol <- list() ### if control argument 'ncpus' is larger than 1, automatically switch to optimParallel optimizer if (ncpus > 1L) optimizer <- "optimParallel" reml <- ifelse(method=="REML", TRUE, FALSE) ### set NLOPT_LN_BOBYQA as the default algorithm for nloptr optimizer ### and by default use a relative convergence criterion of 1e-8 on the function value if (optimizer=="nloptr" && !is.element("algorithm", names(optcontrol))) optcontrol$algorithm <- "NLOPT_LN_BOBYQA" if (optimizer=="nloptr" && !is.element("ftol_rel", names(optcontrol))) optcontrol$ftol_rel <- 1e-8 ### for mads, set trace=FALSE and tol=1e-6 by default if (optimizer=="mads" && !is.element("trace", names(optcontrol))) optcontrol$trace <- FALSE if (optimizer=="mads" && !is.element("tol", names(optcontrol))) optcontrol$tol <- 1e-6 #return(list(con=con, optimizer=optimizer, optmethod=optmethod, parallel=parallel, cl=cl, ncpus=ncpus, evtol=evtol, posdefify=posdefify, optcontrol=optcontrol)) ### check that the required packages are installed if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { if (!requireNamespace("minqa", quietly=TRUE)) stop(mstyle$stop("Please install the 'minqa' package to use this optimizer.")) } if (optimizer == "nloptr") { if (!requireNamespace("nloptr", quietly=TRUE)) stop(mstyle$stop("Please install the 'nloptr' package to use this optimizer.")) } if (is.element(optimizer, c("hjk","nmk","mads"))) { if (!requireNamespace("dfoptim", quietly=TRUE)) stop(mstyle$stop("Please install the 'dfoptim' package to use this optimizer.")) } if (optimizer == "ucminf") { if (!requireNamespace("ucminf", quietly=TRUE)) stop(mstyle$stop("Please install the 'ucminf' package to use this optimizer.")) } if (optimizer == "optimParallel") { if (!requireNamespace("optimParallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'optimParallel' package to use this optimizer.")) } if (con$hessian && !requireNamespace("numDeriv", quietly=TRUE)) stop(mstyle$stop("Please install the 'numDeriv' package to compute the Hessian.")) ### check if length of sigma2.init, tau2.init, rho.init, gamma2.init, and phi.init matches number of variance components ### note: if a particular component is not included, reset (transformed) initial values (in case the user still specifies multiple initial values) if (withS) { if (length(con$sigma2.init) != sigma2s) stop(mstyle$stop(paste0("Length of 'sigma2.init' argument (", length(con$sigma2.init), ") does not match actual number of variance components (", sigma2s, ")."))) } else { con$sigma2.init <- 0 } if (withG) { if (length(con$tau2.init) != tau2s) stop(mstyle$stop(paste0("Length of 'tau2.init' argument (", length(con$tau2.init), ") does not match actual number of variance components (", tau2s, ")."))) } else { con$tau2.init <- 0 } if (withG) { if (length(con$rho.init) != rhos) stop(mstyle$stop(paste0("Length of 'rho.init' argument (", length(con$rho.init), ") does not match actual number of correlations (", rhos, ")."))) } else { con$rho.init <- 0 } if (withH) { if (length(con$gamma2.init) != gamma2s) stop(mstyle$stop(paste0("Length of 'gamma2.init' argument (", length(con$gamma2.init), ") does not match actual number of variance components (", gamma2s, ")."))) } else { con$gamma2.init <- 0 } if (withH) { if (length(con$phi.init) != phis) stop(mstyle$stop(paste0("Length of 'phi.init' argument (", length(con$phi.init), ") does not match actual number of correlations (", phis, ")."))) } else { con$phi.init <- 0 } ######################################################################### ### check whether model matrix is of full rank if (any(eigen(crossprod(X), symmetric=TRUE, only.values=TRUE)$values <= evtol)) stop(mstyle$stop("Model matrix not of full rank. Cannot fit model.")) ### which variance components are fixed? (TRUE/FALSE or NA if not applicable = not included) if (withS) { sigma2.fix <- !is.na(sigma2) } else { sigma2.fix <- NA } if (withG) { tau2.fix <- !is.na(tau2) rho.fix <- !is.na(rho) } else { tau2.fix <- NA rho.fix <- NA } if (withH) { gamma2.fix <- !is.na(gamma2) phi.fix <- !is.na(phi) } else { gamma2.fix <- NA phi.fix <- NA } vc.fix <- list(sigma2=sigma2.fix, tau2=tau2.fix, rho=rho.fix, gamma2=gamma2.fix, phi=phi.fix) ### show which variance components are included in the model, their initial value, and their specified value (NA if not specified) if (verbose) { cat("\n") cat(mstyle$verbose("Variance Components in Model:")) if (!withS && !withG && !withH) { cat(mstyle$verbose(" none")) cat("\n\n") } else { cat("\n\n") vcs <- rbind(c("sigma2" = if (withS) round(sigma2.init, digits[["var"]]) else NA, "tau2" = if (withG) round(tau2.init, digits[["var"]]) else NA, "rho" = if (withG) round(rho.init, digits[["var"]]) else NA, "gamma2" = if (withH) round(gamma2.init, digits[["var"]]) else NA, "phi" = if (withH) round(phi.init, digits[["var"]]) else NA), round(c( if (withS) sigma2 else NA, if (withG) tau2 else NA, if (withG) rho else NA, if (withH) gamma2 else NA, if (withH) phi else NA), digits[["var"]])) vcs <- data.frame(vcs, stringsAsFactors=FALSE) rownames(vcs) <- c("initial", "specified") vcs <- rbind(included=ifelse(c(rep(withS, sigma2s), rep(withG, tau2s), rep(withG, rhos), rep(withH, gamma2s), rep(withH, phis)), "Yes", "No"), fixed=unlist(vc.fix), vcs) tmp <- capture.output(print(vcs, na.print="---")) .print.output(tmp, mstyle$verbose) cat("\n") } } level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) #return(list(sigma2s, tau2s, rhos, gamma2s, phis)) ######################################################################### ######################################################################### ######################################################################### ###### model fitting, test statistics, and confidence intervals if (verbose > 1) message(mstyle$message("Model fitting ...\n")) ### estimate sigma2, tau2, rho, gamma2, and phi as needed if (optimizer=="optim") { par.arg <- "par" ctrl.arg <- ", control=optcontrol" } if (optimizer=="nlminb") { par.arg <- "start" ctrl.arg <- ", control=optcontrol" } if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { par.arg <- "par" optimizer <- paste0("minqa::", optimizer) ### need to use this since loading nloptr masks bobyqa() and newuoa() functions ctrl.arg <- ", control=optcontrol" } if (optimizer=="nloptr") { par.arg <- "x0" optimizer <- paste0("nloptr::nloptr") ### need to use this due to requireNamespace() ctrl.arg <- ", opts=optcontrol" } if (optimizer=="nlm") { par.arg <- "p" ### because of this, must use argument name pX for p (number of columns in X matrix) ctrl.arg <- paste(names(optcontrol), unlist(optcontrol), sep="=", collapse=", ") if (nchar(ctrl.arg) != 0L) ctrl.arg <- paste0(", ", ctrl.arg) } if (is.element(optimizer, c("hjk","nmk","mads"))) { par.arg <- "par" optimizer <- paste0("dfoptim::", optimizer) ### need to use this so that the optimizers can be found ctrl.arg <- ", control=optcontrol" } if (optimizer=="ucminf") { par.arg <- "par" optimizer <- paste0("ucminf::ucminf") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol" } if (optimizer=="optimParallel") { par.arg <- "par" optimizer <- paste0("optimParallel::optimParallel") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol, parallel=parallel" parallel$cl <- NULL if (is.null(cl)) { ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Control argument 'ncpus' must be >= 1.")) cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } else { if (!inherits(cl, "SOCKcluster")) stop(mstyle$stop("Specified cluster is not of class 'SOCKcluster'.")) } parallel$cl <- cl if (is.null(parallel$forward)) parallel$forward <- FALSE if (is.null(parallel$loginfo)) { if (verbose) { parallel$loginfo <- TRUE } else { parallel$loginfo <- FALSE } } } if (!is.element(method, c("FE","EE","CE")) && !is.null(random)) { ### if at least one parameter needs to be estimated if (anyNA(c(sigma2, tau2, rho, gamma2, phi))) { optcall <- paste(optimizer, "(", par.arg, "=c(con$sigma2.init, con$tau2.init, con$rho.init, con$gamma2.init, con$phi.init), .ll.rma.mv, reml=reml, ", ifelse(optimizer=="optim", "method=optmethod, ", ""), "Y=Y, M=V, A=NULL, X.fit=X, k=k, pX=p, D.S=D.S, Z.G1=Z.G1, Z.G2=Z.G2, Z.H1=Z.H1, Z.H2=Z.H2, g.Dmat=g.Dmat, h.Dmat=h.Dmat, sigma2.val=sigma2, tau2.val=tau2, rho.val=rho, gamma2.val=gamma2, phi.val=phi, sigma2s=sigma2s, tau2s=tau2s, rhos=rhos, gamma2s=gamma2s, phis=phis, withS=withS, withG=withG, withH=withH, struct=struct, g.levels.r=g.levels.r, h.levels.r=h.levels.r, g.values=g.values, h.values=h.values, sparse=sparse, cholesky=cholesky, posdefify=posdefify, vctransf=TRUE, verbose=verbose, digits=digits, REMLf=con$REMLf, dofit=FALSE", ctrl.arg, ")\n", sep="") #return(optcall) if (verbose) { opt.res <- try(eval(parse(text=optcall)), silent=!verbose) } else { opt.res <- try(suppressWarnings(eval(parse(text=optcall))), silent=!verbose) } #return(opt.res) if (optimizer == "optimParallel::optimParallel" && verbose) { tmp <- capture.output(print(opt.res$loginfo)) .print.output(tmp, mstyle$verbose) } if (inherits(opt.res, "try-error")) stop(mstyle$stop("Error during the optimization. Use verbose=TRUE and see help(rma.mv) for more details on the optimization routines.")) ### convergence checks if (is.element(optimizer, c("optim","nlminb","dfoptim::hjk","dfoptim::nmk","optimParallel::optimParallel")) && opt.res$convergence != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("dfoptim::mads")) && opt.res$convergence > optcontrol$tol) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("minqa::uobyqa","minqa::newuoa","minqa::bobyqa")) && opt.res$ierr != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (ierr = ", opt.res$ierr, ")."))) if (optimizer=="nloptr::nloptr" && !(opt.res$status >= 1 && opt.res$status <= 4)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (status = ", opt.res$status, ")."))) if (optimizer=="ucminf::ucminf" && !(opt.res$convergence == 1 || opt.res$convergence == 2)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (verbose > 2) { cat("\n") tmp <- capture.output(print(opt.res)) .print.output(tmp, mstyle$verbose) } ### copy estimated values to 'par' so code below works if (optimizer=="nloptr::nloptr") opt.res$par <- opt.res$solution if (optimizer=="nlm") opt.res$par <- opt.res$estimate if (p == k) { ### when fitting a saturated model (with REML estimation), estimated values of variance components can remain stuck ### at their initial values; this ensures that the values are fixed to zero (unless values were fixed by the user) sigma2[is.na(sigma2)] <- 0 tau2[is.na(tau2)] <- 0 rho[is.na(rho)] <- 0 gamma2[is.na(gamma2)] <- 0 phi[is.na(phi)] <- 0 } } else { ### if all parameter are fixed to known values, can skip optimization opt.res <- list(par=c(sigma2, tau2, rho, gamma2, phi)) } ### save these for Hessian computation sigma2.val <- sigma2 tau2.val <- tau2 rho.val <- rho gamma2.val <- gamma2 phi.val <- phi } else { opt.res <- list(par=c(0,0,0,0,0)) } ######################################################################### ### do the final model fit with estimated variance components fitcall <- .ll.rma.mv(opt.res$par, reml=reml, Y=Y, M=V, A=A, X.fit=X, k=k, pX=p, D.S=D.S, Z.G1=Z.G1, Z.G2=Z.G2, Z.H1=Z.H1, Z.H2=Z.H2, g.Dmat=g.Dmat, h.Dmat=h.Dmat, sigma2.val=sigma2, tau2.val=tau2, rho.val=rho, gamma2.val=gamma2, phi.val=phi, sigma2s=sigma2s, tau2s=tau2s, rhos=rhos, gamma2s=gamma2s, phis=phis, withS=withS, withG=withG, withH=withH, struct=struct, g.levels.r=g.levels.r, h.levels.r=h.levels.r, g.values=g.values, h.values=h.values, sparse=sparse, cholesky=cholesky, posdefify=posdefify, vctransf=TRUE, verbose=FALSE, digits=digits, REMLf=con$REMLf, dofit=TRUE) ### extract elements beta <- as.matrix(fitcall$beta) vb <- as.matrix(fitcall$vb) if (withS) sigma2 <- fitcall$sigma2 if (withG) { G <- as.matrix(fitcall$G) if (is.element(struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) colnames(G) <- rownames(G) <- seq_len(nrow(G)) if (is.element(struct[1], c("CS","HCS","UN","UNR","AR","HAR","CAR","ID","DIAG"))) colnames(G) <- rownames(G) <- g.levels.f[[1]] if (is.element(struct[1], c("GEN","GDIAG"))) colnames(G) <- rownames(G) <- g.names[-length(g.names)] tau2 <- fitcall$tau2 rho <- fitcall$rho } if (withH) { H <- as.matrix(fitcall$H) if (is.element(struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD"))) colnames(H) <- rownames(H) <- seq_len(nrow(H)) if (is.element(struct[2], c("CS","HCS","UN","UNR","AR","HAR","CAR","ID","DIAG"))) colnames(H) <- rownames(H) <- h.levels.f[[1]] if (is.element(struct[2], c("GEN","GDIAG"))) colnames(H) <- rownames(H) <- h.names[-length(h.names)] gamma2 <- fitcall$gamma2 phi <- fitcall$phi } M <- fitcall$M ### remove row and column names of M ### (but only do this if M has row/column names) if (!is.null(dimnames(M))) M <- unname(M) #print(M[1:8,1:8]) if (verbose > 1) message(mstyle$message("Conducting tests of the fixed effects ...")) ### ddf calculation if (test == "t") { ddf <- .ddf.calc(dfs, X=X, k=k, p=p, mf.s=mf.s, mf.g=mf.g, mf.h=mf.h) } else { ddf <- rep(NA, p) } ### QM calculation QM <- try(as.vector(t(beta)[btt] %*% chol2inv(chol(vb[btt,btt])) %*% beta[btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA rownames(beta) <- rownames(vb) <- colnames(vb) <- colnames(X) se <- sqrt(diag(vb)) names(se) <- NULL zval <- c(beta/se) if (test == "t") { QM <- QM / m QMdf <- c(m, min(ddf[btt])) QMp <- if (QMdf[2] > 0) pf(QM, df1=QMdf[1], df2=QMdf[2], lower.tail=FALSE) else NA pval <- sapply(seq_along(ddf), function(j) if (ddf[j] > 0) 2*pt(abs(zval[j]), df=ddf[j], lower.tail=FALSE) else NA) crit <- sapply(seq_along(ddf), function(j) if (ddf[j] > 0) qt(level/2, df=ddf[j], lower.tail=FALSE) else NA) } else { QMdf <- c(m, NA) QMp <- pchisq(QM, df=QMdf[1], lower.tail=FALSE) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) crit <- qnorm(level/2, lower.tail=FALSE) } ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) ######################################################################### ### heterogeneity test (Wald-type test of the extra coefficients in the saturated model) if (verbose > 1) message(mstyle$message("\nConducting heterogeneity test ...")) QE.df <- k-p if (QE.df > 0L) { if (!is.na(QE)) { ### if V is not positive definite, FE model fit will fail; then QE is NA ### otherwise compute the RSS (which is equal to the Q/QE-test statistic) QEp <- pchisq(QE, df=QE.df, lower.tail=FALSE) } } else { ### if the user fits a saturated model, then fit must be perfect and QE = 0 and QEp = 1 QE <- 0 QEp <- 1 } ### log-likelihood under a saturated model with ML estimation ll.QE <- -1/2 * (k) * log(2*base::pi) - 1/2 * determinant(V, logarithm=TRUE)$modulus ######################################################################### ###### compute Hessian hessian <- NA if (con$hessian) { if (verbose > 1) message(mstyle$message("Computing Hessian ...")) hessian <- try(numDeriv::hessian(func=.ll.rma.mv, x = if (con$vctransf) opt.res$par else c(sigma2, tau2, rho, gamma2, phi), method.args=con$hessianCtrl, reml=reml, Y=Y, M=V, A=NULL, X.fit=X, k=k, pX=p, D.S=D.S, Z.G1=Z.G1, Z.G2=Z.G2, Z.H1=Z.H1, Z.H2=Z.H2, g.Dmat=g.Dmat, h.Dmat=h.Dmat, sigma2.val=sigma2.val, tau2.val=tau2.val, rho.val=rho.val, gamma2.val=gamma2.val, phi.val=phi.val, sigma2s=sigma2s, tau2s=tau2s, rhos=rhos, gamma2s=gamma2s, phis=phis, withS=withS, withG=withG, withH=withH, struct=struct, g.levels.r=g.levels.r, h.levels.r=h.levels.r, g.values=g.values, h.values=h.values, sparse=sparse, cholesky=ifelse(c(con$vctransf,con$vctransf) & cholesky, TRUE, FALSE), posdefify=posdefify, vctransf=con$vctransf, verbose=verbose, digits=digits, REMLf=con$REMLf), silent=TRUE) if (inherits(hessian, "try-error")) { warning(mstyle$warning("Error when trying to compute Hessian."), call.=FALSE) } else { ### row/column names colnames(hessian) <- seq_len(ncol(hessian)) ### need to do this, so the subsetting of colnames below works if (sigma2s == 1) { colnames(hessian)[1] <- "sigma^2" } else { colnames(hessian)[1:sigma2s] <- paste("sigma^2.", seq_len(sigma2s), sep="") } if (tau2s == 1) { colnames(hessian)[sigma2s+1] <- "tau^2" } else { colnames(hessian)[(sigma2s+1):(sigma2s+tau2s)] <- paste("tau^2.", seq_len(tau2s), sep="") } if (rhos == 1) { colnames(hessian)[sigma2s+tau2s+1] <- "rho" } else { #colnames(hessian)[(sigma2s+tau2s+1):(sigma2s+tau2s+rhos)] <- paste("rho.", outer(seq_len(g.nlevels.f[1]), seq_len(g.nlevels.f), paste, sep=".")[upper.tri(matrix(NA,nrow=g.nlevels.f,ncol=g.nlevels.f))], sep="") colnames(hessian)[(sigma2s+tau2s+1):(sigma2s+tau2s+rhos)] <- paste("rho.", seq_len(rhos), sep="") } if (gamma2s == 1) { colnames(hessian)[sigma2s+tau2s+rhos+1] <- "gamma^2" } else { colnames(hessian)[(sigma2s+tau2s+rhos+1):(sigma2s+tau2s+rhos+gamma2s)] <- paste("gamma^2.", seq_len(gamma2s), sep="") } if (phis == 1) { colnames(hessian)[sigma2s+tau2s+rhos+gamma2s+1] <- "phi" } else { #colnames(hessian)[(sigma2s+tau2s+rhos+gamma2s+1):(sigma2s+tau2s+rhos+gamma2s+phis)] <- paste("phi.", outer(seq_len(h.nlevels.f[1]), seq_len(h.nlevels.f), paste, sep=".")[upper.tri(matrix(NA,nrow=h.nlevels.f,ncol=h.nlevels.f))], sep="") colnames(hessian)[(sigma2s+tau2s+rhos+gamma2s+1):(sigma2s+tau2s+rhos+gamma2s+phis)] <- paste("phi.", seq_len(phis), sep="") } rownames(hessian) <- colnames(hessian) ### select correct rows/columns from Hessian depending on components in the model ### FIXME: this isn't quite right, since "DIAG" and "ID" have a rho/phi element, but this is fixed at 0, so should also exclude this ### in fact, all fixed elements should be filtered out #if (withS && withG && withH) #hessian <- hessian[1:nrow(hessian),1:ncol(hessian), drop=FALSE] if (withS && withG && !withH) hessian <- hessian[1:(nrow(hessian)-2),1:(ncol(hessian)-2), drop=FALSE] if (withS && !withG && !withH) hessian <- hessian[1:(nrow(hessian)-4),1:(ncol(hessian)-4), drop=FALSE] if (!withS && withG && withH) hessian <- hessian[2:nrow(hessian),2:ncol(hessian), drop=FALSE] if (!withS && withG && !withH) hessian <- hessian[2:(nrow(hessian)-2),2:(ncol(hessian)-2), drop=FALSE] if (!withS && !withG && !withH) hessian <- NA } } ######################################################################### ###### fit statistics if (verbose > 1) message(mstyle$message("Computing fit statistics and log likelihood ...")) ### note: this only counts *estimated* variance components and correlations for the total number of parameters parms <- p + ifelse(withS, sum(ifelse(sigma2.fix,0,1)), 0) + ifelse(withG, sum(ifelse(tau2.fix,0,1)), 0) + ifelse(withG, sum(ifelse(rho.fix,0,1)), 0) + ifelse(withH, sum(ifelse(gamma2.fix,0,1)), 0) + ifelse(withH, sum(ifelse(phi.fix,0,1)), 0) ### note: this counts all variance components and correlations for the total number of parameters, even if they were fixed by the user or function #parms <- p + ifelse(withS, sigma2s, 0) + ifelse(withG, tau2s, 0) + ifelse(withG, rhos, 0) + ifelse(withH, gamma2s, 0) + ifelse(withH, phis, 0) ll.ML <- fitcall$llvals[1] ll.REML <- fitcall$llvals[2] dev.ML <- -2 * (ll.ML - ll.QE) AIC.ML <- -2 * ll.ML + 2*parms BIC.ML <- -2 * ll.ML + parms * log(k) AICc.ML <- -2 * ll.ML + 2*parms * max(k, parms+2) / (max(k, parms+2) - parms - 1) dev.REML <- -2 * (ll.REML - 0) ### saturated model has ll = 0 when using the full REML likelihood AIC.REML <- -2 * ll.REML + 2*parms BIC.REML <- -2 * ll.REML + parms * log(k-p) AICc.REML <- -2 * ll.REML + 2*parms * max(k-p, parms+2) / (max(k-p, parms+2) - parms - 1) fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML, dev.REML, AIC.REML, BIC.REML, AICc.REML), ncol=2, byrow=FALSE) dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ######################################################################### ###### prepare output if (verbose > 1) message(mstyle$message("Preparing output ...")) p.eff <- p k.eff <- k weighted <- TRUE if (is.null(ddd$outlist)) { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, sigma2=sigma2, tau2=tau2, rho=rho, gamma2=gamma2, phi=phi, QE=QE, QEp=QEp, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.f=k.f, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, int.incl=int.incl, intercept=intercept, allvipos=allvipos, coef.na=coef.na, yi=yi, vi=vi, V=V, W=A, X=X, yi.f=yi.f, vi.f=vi.f, V.f=V.f, X.f=X.f, W.f=W.f, ni=ni, ni.f=ni.f, M=M, G=G, H=H, hessian=hessian, ids=ids, not.na=not.na, subset=subset, slab=slab, slab.null=slab.null, measure=measure, method=method, weighted=weighted, test=test, dfs=dfs, ddf=ddf, btt=btt, m=m, digits=digits, level=level, sparse=sparse, dist=ddd$dist, control=control, verbose=verbose, fit.stats=fit.stats, vc.fix=vc.fix, withS=withS, withG=withG, withH=withH, withR=withR, formulas=formulas, sigma2s=sigma2s, tau2s=tau2s, rhos=rhos, gamma2s=gamma2s, phis=phis, s.names=s.names, g.names=g.names, h.names=h.names, s.levels=s.levels, s.levels.f=s.levels.f, s.nlevels=s.nlevels, s.nlevels.f=s.nlevels.f, g.nlevels.f=g.nlevels.f, g.nlevels=g.nlevels, h.nlevels.f=h.nlevels.f, h.nlevels=h.nlevels, g.levels.f=g.levels.f, g.levels.k=g.levels.k, g.levels.comb.k=g.levels.comb.k, h.levels.f=h.levels.f, h.levels.k=h.levels.k, h.levels.comb.k=h.levels.comb.k, struct=struct, Rfix=Rfix, R=R, Rscale=Rscale, mf.r=mf.r, mf.s=mf.s, mf.g=mf.g, mf.g.f=mf.g.f, mf.h=mf.h, mf.h.f=mf.h.f, Z.S=Z.S, Z.G1=Z.G1, Z.G2=Z.G2, Z.H1=Z.H1, Z.H2=Z.H2, formula.yi=formula.yi, formula.mods=formula.mods, random=random, version=packageVersion("metafor"), call=mf) } if (!is.null(ddd$outlist)) { if (ddd$outlist == "minimal") { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, sigma2=sigma2, tau2=tau2, rho=rho, gamma2=gamma2, phi=phi, QE=QE, QEp=QEp, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, measure=measure, method=method, test=test, dfs=dfs, ddf=ddf, btt=btt, m=m, digits=digits, fit.stats=fit.stats, vc.fix=vc.fix, withS=withS, withG=withG, withH=withH, withR=withR, s.names=s.names, g.names=g.names, h.names=h.names, s.nlevels=s.nlevels, g.nlevels.f=g.nlevels.f, g.nlevels=g.nlevels, h.nlevels.f=h.nlevels.f, h.nlevels=h.nlevels, g.levels.f=g.levels.f, g.levels.k=g.levels.k, g.levels.comb.k=g.levels.comb.k, h.levels.f=h.levels.f, h.levels.k=h.levels.k, h.levels.comb.k=h.levels.comb.k, struct=struct, Rfix=Rfix) } else { res <- eval(parse(text=paste0("list(", ddd$outlist, ")"))) } } time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") class(res) <- c("rma.mv", "rma") return(res) } metafor/R/dfbetas.rma.uni.r0000644000176200001440000000016413457322061015257 0ustar liggesusersdfbetas.rma.uni <- function(model, progbar=FALSE, ...) influence(model, progbar=progbar, measure="dfbetas", ...) metafor/R/to.long.r0000644000176200001440000012541214043614074013663 0ustar liggesusersto.long <- function(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, ni, data, slab, subset, add=1/2, to="none", drop00=FALSE, vlong=FALSE, append=TRUE, var.names) { mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications if (missing(measure)) stop(mstyle$stop("Must specify an effect size or outcome measure via the 'measure' argument.")) if (!is.character(measure)) stop(mstyle$stop("The 'measure' argument must be a character string.")) if (!is.element(measure, c("RR","OR","PETO","RD","AS","PHI","YUQ","YUY","RTET", ### 2x2 table measures "PBIT","OR2D","OR2DN","OR2DL", ### - transformations to SMD "MPRD","MPRR","MPOR","MPORC","MPPETO", ### - measures for matched pairs data "IRR","IRD","IRSD", ### two-group person-time data measures "MD","SMD","SMDH","ROM", ### two-group mean/SD measures "CVR","VR", ### coefficient of variation ratio, variability ratio "RPB","RBIS","D2OR","D2ORN","D2ORL", ### - transformations to r_PB, r_BIS, and log(OR) "COR","UCOR","ZCOR", ### correlations (raw and r-to-z transformed) "PCOR","ZPCOR","SPCOR", ### partial and semi-partial correlations "PR","PLN","PLO","PAS","PFT", ### single proportions (and transformations thereof) "IR","IRLN","IRS","IRFT", ### single-group person-time data (and transformations thereof) "MN","MNLN","CVLN","SDLN","SMD1", ### mean, log(mean), log(CV), log(SD), single-group SMD "MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC", ### raw/standardized mean change, log(ROM), CVR, and VR for dependent samples "ARAW","AHW","ABT"))) ### alpha (and transformations thereof) stop(mstyle$stop("Unknown 'measure' specified.")) if (is.element(measure, c("CVR","VR","PCOR","ZPCOR","SPCOR","CVLN","SDLN","VRC"))) stop(mstyle$stop("Function not available for this outcome measure.")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!is.element(to, c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) ### check if data argument has been specified if (missing(data)) data <- NULL ### need this at the end to check if append=TRUE can actually be done has.data <- !is.null(data) if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### get slab and subset arguments (will be NULL when unspecified) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ######################################################################### ######################################################################### ######################################################################### if (is.element(measure, c("RR","OR","RD","AS","PETO","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL","MPRD","MPRR","MPOR","MPORC","MPPETO"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci k <- length(ai) ### number of outcomes before subsetting if (length(ai)==0L || length(bi)==0L || length(ci)==0L || length(di)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(ai),length(bi),length(ci),length(di)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } n1i <- ai + bi n2i <- ci + di if (any(c(ai > n1i, ci > n2i), na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(ai, bi, ci, di) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(n1i < 0, n2i < 0), na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 0.")) ni.u <- ai + bi + ci + di ### unadjusted total sample sizes ### if drop00=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } if (to == "all") { ### always add to all cells in all studies ai <- ai + add ci <- ci + add bi <- bi + add di <- di + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(ai == 0L | ci == 0L | bi == 0L | di == 0L) id0[is.na(id0)] <- FALSE ai[id0] <- ai[id0] + add ci[id0] <- ci[id0] + add bi[id0] <- bi[id0] + add di[id0] <- di[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(ai == 0L | ci == 0L | bi == 0L | di == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { ai <- ai + add ci <- ci + add bi <- bi + add di <- di + add } } } ######################################################################### if (is.element(measure, c("IRR","IRD","IRSD"))) { mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) k <- length(x1i) ### number of outcomes before subsetting if (length(x1i)==0L || length(x2i)==0L || length(t1i)==0L || length(t2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(x1i),length(x2i),length(t1i),length(t2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] } if (any(c(x1i, x2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(t1i, t2i) <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- t1i + t2i ### unadjusted total sample sizes ### if drop00=TRUE, set counts to NA for studies that have no events in both arms if (drop00) { id00 <- c(x1i == 0L & x2i == 0L) id00[is.na(id00)] <- FALSE x1i[id00] <- NA x2i[id00] <- NA } if (to == "all") { ### always add to all cells in all studies x1i <- x1i + add x2i <- x2i + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) id0[is.na(id0)] <- FALSE x1i[id0] <- x1i[id0] + add x2i[id0] <- x2i[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { x1i <- x1i + add x2i <- x2i + add } } } ######################################################################### if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) k <- length(n1i) ### number of outcomes before subsetting if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(n1i)==0L || length(n2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(n1i),length(n2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] n1i <- n1i[subset] n2i <- n2i[subset] } if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(c(n1i, n2i) < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- n1i + n2i ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("COR","UCOR","ZCOR"))) { mf.ri <- mf[[match("ri", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ri) ### number of outcomes before subsetting if (length(ri)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(ri) != length(ni)) stop(mstyle$stop("Supplied data vectors are not of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ri <- ri[subset] ni <- ni[subset] } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) if (is.null(mi)) mi <- ni - xi k <- length(xi) ### number of outcomes before subsetting if (length(xi)==0L || length(mi)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(mi)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] mi <- mi[subset] } ni <- xi + mi if (any(xi > ni, na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(xi, mi) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes if (to == "all") { ### always add to all cells in all studies xi <- xi + add mi <- mi + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(xi == 0L | mi == 0L) id0[is.na(id0)] <- FALSE xi[id0] <- xi[id0] + add mi[id0] <- mi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(xi == 0L | mi == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { xi <- xi + add mi <- mi + add } } } ######################################################################### if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.ti <- mf[[match("ti", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) k <- length(xi) ### number of outcomes before subsetting if (length(xi)==0L || length(ti)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(ti)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] ti <- ti[subset] } if (any(xi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ti <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- ti ### unadjusted total sample sizes if (to == "all") { ### always add to all cells in all studies xi <- xi + add } if (to == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(xi == 0L) id0[is.na(id0)] <- FALSE xi[id0] <- xi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(xi == 0L) id0[is.na(id0)] <- FALSE if (any(id0)) { xi <- xi + add } } } ######################################################################### if (is.element(measure, c("MN","MNLN","SMD1"))) { mf.mi <- mf[[match("mi", names(mf))]] mf.sdi <- mf[[match("sdi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) sdi <- eval(mf.sdi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ni) ### number of outcomes before subsetting if (length(mi)==0L || length(sdi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(mi),length(sdi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) mi <- mi[subset] sdi <- sdi[subset] ni <- ni[subset] } if (any(sdi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) if (is.element(measure, c("MNLN","CVLN")) && any(mi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more means are negative.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] ### for SMCR, do not need to supply this mf.ni <- mf[[match("ni", names(mf))]] mf.ri <- mf[[match("ri", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) k <- length(m1i) ### number of outcomes before subsetting if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } else { if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(m1i),length(m2i),length(sd1i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] ni <- ni[subset] ri <- ri[subset] } if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } else { if (any(sd1i < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### if (is.element(measure, c("ARAW","AHW","ABT"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ai) ### number of outcomes before subsetting if (length(ai)==0L || length(mi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k == c(length(ai),length(mi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] mi <- mi[subset] ni <- ni[subset] } if (any(ai > 1, na.rm=TRUE)) stop(mstyle$stop("One or more alpha values are > 1.")) if (any(mi < 2, na.rm=TRUE)) stop(mstyle$stop("One or more mi values are < 2.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes } ######################################################################### ######################################################################### ######################################################################### ### generate study labels if none are specified if (is.null(slab)) { slab <- seq_len(k) } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) } ### if a subset of studies is specified if (!is.null(subset)) { slab <- slab[subset] if (has.data) data <- data[subset,] } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ######################################################################### ######################################################################### ######################################################################### if (is.element(measure, c("RR","OR","RD","AS","PETO","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset if (vlong) { ### create very long format dataset dat <- data.frame(rep(slab, each=4), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,1,2,2), k) dat[[3]] <- rep(c(1,2,1,2), k) dat[[4]] <- c(rbind(ai,bi,ci,di)) if (missing(var.names)) { names(dat) <- c("study", "group", "outcome", "freq") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) dat[[3]] <- factor(dat[[3]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=4),], dat) } else { ### create regular long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(ai,ci)) dat[[4]] <- c(rbind(bi,di)) if (missing(var.names)) { names(dat) <- c("study", "group", "out1", "out2") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } } ######################################################################### if (is.element(measure, c("MPRD","MPRR","MPOR"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset if (vlong) { ### create very long format dataset dat <- data.frame(rep(slab, each=4), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,1,2,2), k) dat[[3]] <- rep(c(1,2,1,2), k) dat[[4]] <- c(rbind(ai+bi,ci+di,ai+ci,bi+di)) if (missing(var.names)) { names(dat) <- c("study", "time", "outcome", "freq") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) dat[[3]] <- factor(dat[[3]], levels=c(2,1)) if (has.data && append) dat <- data.frame(data[rep(seq_len(k), each=4),], dat) } else { ### create regular long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(ai+bi,ai+ci)) dat[[4]] <- c(rbind(ci+di,bi+di)) if (missing(var.names)) { names(dat) <- c("study", "time", "out1", "out2") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } } ######################################################################### if (is.element(measure, c("MPORC","MPPETO"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset if (vlong) { ### create very long format dataset dat <- data.frame(rep(slab, each=4), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,1,2,2), k) dat[[3]] <- rep(c(1,2,1,2), k) dat[[4]] <- c(rbind(ai,bi,ci,di)) if (missing(var.names)) { names(dat) <- c("study", "out.time1", "out.time2", "freq") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) dat[[3]] <- factor(dat[[3]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=4),], dat) } else { ### create regular long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(ai,ci)) dat[[4]] <- c(rbind(bi,di)) if (missing(var.names)) { names(dat) <- c("study", "out.time1", "out1.time2", "out2.time2") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } } ######################################################################### if (is.element(measure, c("IRR","IRD","IRSD"))) { ### check for NAs in table data and act accordingly has.na <- is.na(x1i) | is.na(x2i) | is.na(t1i) | is.na(t2i) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { x1i <- x1i[not.na] x2i <- x2i[not.na] t1i <- t1i[not.na] t2i <- t2i[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(x1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(x1i,x2i)) dat[[4]] <- c(rbind(t1i,t2i)) if (missing(var.names)) { names(dat) <- c("study", "group", "events", "ptime") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } ######################################################################### if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL"))) { ### check for NAs in table data and act accordingly has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(sd2i) | is.na(n1i) | is.na(n2i) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { m1i <- m1i[not.na] m2i <- m2i[not.na] sd1i <- sd1i[not.na] sd2i <- sd2i[not.na] n1i <- n1i[not.na] n2i <- n2i[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(m1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(m1i,m2i)) dat[[4]] <- c(rbind(sd1i,sd2i)) dat[[5]] <- c(rbind(n1i,n2i)) if (missing(var.names)) { names(dat) <- c("study", "group", "mean", "sd", "n") } else { if (length(var.names) != 5L) stop(mstyle$stop("Variable names not of length 5.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } ######################################################################### if (is.element(measure, c("COR","UCOR","ZCOR"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ri) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ri <- ri[not.na] ni <- ni[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ri) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- ri dat[[3]] <- ni if (missing(var.names)) { names(dat) <- c("study", "r", "n") } else { if (length(var.names) != 3L) stop(mstyle$stop("Variable names not of length 3.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } ######################################################################### if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(xi) | is.na(mi) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { xi <- xi[not.na] mi <- mi[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(xi) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset if (vlong) { ### create very long format dataset dat <- data.frame(rep(slab, each=2), stringsAsFactors=FALSE) dat[[2]] <- rep(c(1,2), k) dat[[3]] <- c(rbind(xi,mi)) if (missing(var.names)) { names(dat) <- c("study", "outcome", "freq") } else { if (length(var.names) != 3L) stop(mstyle$stop("Variable names not of length 3.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) dat[[2]] <- factor(dat[[2]], levels=c(2,1)) if (has.data && append) dat <- cbind(data[rep(seq_len(k), each=2),], dat) } else { ### create regular long format dataset dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- xi dat[[3]] <- mi if (missing(var.names)) { names(dat) <- c("study", "out1", "out2") } else { if (length(var.names) != 3L) stop(mstyle$stop("Variable names not of length 3.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } } ######################################################################### if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(xi) | is.na(ti) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { xi <- xi[not.na] ti <- ti[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(xi) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- xi dat[[3]] <- ti if (missing(var.names)) { names(dat) <- c("study", "events", "ptime") } else { if (length(var.names) != 3L) stop(mstyle$stop("Variable names not of length 3.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } ######################################################################### if (is.element(measure, c("MN","MNLN","SMD1"))) { ### check for NAs in table data and act accordingly has.na <- is.na(mi) | is.na(sdi) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { mi <- mi[not.na] sdi <- sdi[not.na] ni <- ni[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ni) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- mi dat[[3]] <- sdi dat[[4]] <- ni if (missing(var.names)) { names(dat) <- c("study", "mean", "sd", "n") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } ######################################################################### if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC"))) { ### check for NAs in table data and act accordingly if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(sd2i) | is.na(ni) | is.na(ri) } else { has.na <- is.na(m1i) | is.na(m2i) | is.na(sd1i) | is.na(ni) | is.na(ri) } if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { m1i <- m1i[not.na] m2i <- m2i[not.na] sd1i <- sd1i[not.na] if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) sd2i <- sd2i[not.na] ni <- ni[not.na] ri <- ri[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(m1i) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- m1i dat[[3]] <- m2i dat[[4]] <- sd1i dat[[5]] <- sd2i dat[[6]] <- ni dat[[7]] <- ri if (missing(var.names)) { names(dat) <- c("study", "mean1", "mean2", "sd1", "sd2", "n", "r") } else { if (length(var.names) != 7L) stop(mstyle$stop("Variable names not of length 7.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } else { dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- m1i dat[[3]] <- m2i dat[[4]] <- sd1i dat[[5]] <- ni dat[[6]] <- ri if (missing(var.names)) { names(dat) <- c("study", "mean1", "mean2", "sd1", "n", "r") } else { if (length(var.names) != 6L) stop(mstyle$stop("Variable names not of length 6.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- cbind(data, dat) } } ######################################################################### if (is.element(measure, c("ARAW","AHW","ABT"))) { ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(mi) | is.na(ni) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit") { ai <- ai[not.na] mi <- mi[not.na] ni <- ni[not.na] slab <- slab[not.na] if (has.data) data <- data[not.na,] warning(mstyle$warning("Tables with NAs omitted."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } k <- length(ai) ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### create long format dataset dat <- data.frame(slab, stringsAsFactors=FALSE) dat[[2]] <- ai dat[[3]] <- mi dat[[4]] <- ni if (missing(var.names)) { names(dat) <- c("study", "alpha", "m", "n") } else { if (length(var.names) != 4L) stop(mstyle$stop("Variable names not of length 4.")) names(dat) <- var.names } dat[[1]] <- factor(dat[[1]]) if (has.data && append) dat <- data.frame(data, dat) } ######################################################################### rownames(dat) <- seq_len(nrow(dat)) return(dat) } metafor/R/misc.func.hidden.uni.r0000644000176200001440000001545213755031171016216 0ustar liggesusers############################################################################ ### function to calculate: ### solve(t(X) %*% W %*% X) = .invcalc(X=X, W=W, k=k) ### solve(t(X) %*% X) = .invcalc(X=X, W=diag(k), k=k) ### without taking the actual inverse .invcalc <- function(X, W, k) { sWX <- sqrt(W) %*% X res.qrs <- qr.solve(sWX, diag(k)) #res.qrs <- try(qr.solve(sWX, diag(k)), silent=TRUE) #if (inherits(res.qrs, "try-error")) # stop("Cannot compute QR decomposition.") return(tcrossprod(res.qrs)) } ############################################################################ ### function for confint.rma.uni() with Q-profile method and for the PM estimator .QE.func <- function(tau2val, Y, vi, X, k, objective, verbose=FALSE, digits=4) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (any(tau2val + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) W <- diag(1/(vi + tau2val), nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y if (verbose) cat(mstyle$verbose(paste("tau2 =", formatC(tau2val, digits=digits[["var"]], width=digits[["var"]]+4, format="f"), " RSS - objective =", formatC(RSS - objective, format="f", digits=digits[["var"]], flag=" "), "\n"))) return(RSS - objective) } ############################################################################ ### function for confint.rma.uni() with method="GENQ" .GENQ.func <- function(tau2val, P, vi, Q, level, k, p, getlower, verbose=FALSE, digits=4) { mstyle <- .get.mstyle("crayon" %in% .packages()) S <- diag(sqrt(vi + tau2val), nrow=k, ncol=k) lambda <- Re(eigen(S %*% P %*% S, symmetric=TRUE, only.values=TRUE)$values) tmp <- CompQuadForm::farebrother(Q, lambda[seq_len(k-p)]) ### starting with version 1.4.2 of CompQuadForm, the element is called 'Qq' (before it was called 'res') ### this way, things should work regardless of the version of CompQuadForm that is installed if (exists("res", tmp)) tmp$Qq <- tmp$res if (getlower) { res <- tmp$Qq - level } else { res <- (1 - tmp$Qq) - level } if (verbose) cat(mstyle$verbose(paste("tau2 =", formatC(tau2val, digits=digits[["var"]], width=digits[["var"]]+4, format="f"), " objective =", formatC(res, format="f", digits=digits[["var"]], flag=" "), "\n"))) return(res) } ############################################################################ ### generate all possible permutations # .genperms <- function(k) { # # v <- seq_len(k) # # sub <- function(k, v) { # if (k==1L) { # matrix(v,1,k) # } else { # X <- NULL # for(i in seq_len(k)) { # X <- rbind(X, cbind(v[i], Recall(k-1, v[-i]))) # } # X # } # } # # return(sub(k, v[seq_len(k)])) # # } ### generate all possible unique permutations .genuperms <- function(x) { z <- NULL sub <- function(x, y) { len.x <- length(x) if (len.x == 0L) { return(y) } else { prev.num <- 0 for (i in seq_len(len.x)) { num <- x[i] if (num > prev.num) { prev.num <- num z <- rbind(z, Recall(x[-i], c(y,num))) } } return(z) } } return(sub(x, y=NULL)) } .permci <- function(val, obj, j, exact, iter, progbar, level, digits, control) { mstyle <- .get.mstyle("crayon" %in% .packages()) ### fit model with shifted outcome res <- try(suppressWarnings(rma.uni(obj$yi - c(val*obj$X[,j]), obj$vi, weights=obj$weights, mods=obj$X, intercept=FALSE, method=obj$method, weighted=obj$weighted, test=obj$test, tau2=ifelse(obj$tau2.fix, obj$tau2, NA), control=obj$control, skipr2=TRUE)), silent=TRUE) if (inherits(res, "try-error")) stop() ### p-value based on permutation test pval <- permutest(res, exact=exact, iter=iter, progbar=FALSE, control=control)$pval[j] ### get difference between p-value and level diff <- pval - level / ifelse(control$alternative == "two.sided", 1, 2) ### show progress if (progbar) cat(mstyle$verbose(paste("pval =", formatC(pval, format="f", digits=digits[["pval"]]), " diff =", formatC(diff, format="f", digits=digits[["pval"]], flag=" "), " val =", formatC(val, format="f", digits=digits[["est"]], flag=" "), "\n"))) ### penalize negative differences, which should force the CI bound to correspond to a p-value of *at least* level diff <- ifelse(diff < 0, diff*10, diff) return(diff) } ############################################################################ ### -1 times the log likelihood (regular or restricted) for location-scale model .ll.rma.ls <- function(par, yi, vi, X, Z, reml, k, pX, alpha.val, verbose, digits, REMLf, link, mZ) { mstyle <- .get.mstyle("crayon" %in% .packages()) #beta <- par[seq_len(pX)] #alpha <- par[-seq_len(pX)] alpha <- par alpha <- ifelse(is.na(alpha.val), alpha, alpha.val) ### compute predicted tau2 values if (link == "log") { tau2 <- exp(c(Z %*% alpha)) } else { tau2 <- c(Z %*% alpha) } if (any(tau2 < 0)) { llval <- -Inf } else { ### compute weights wi <- 1/(vi + tau2) ### when using this, the optimization only pertains to the parameter(s) in 'alpha', as 'beta' is then fully ### determined by the current value(s) of 'alpha'; this is actually also how the standard RE/ME model is fitted; ### but is this really the best way of doing this? one could also optimize over beta and alpha jointly! W <- diag(wi, nrow=k, ncol=k) stXWX <- try(.invcalc(X=X, W=W, k=k), silent=TRUE) if (inherits(stXWX, "try-error")) { llval <- -Inf } else { beta <- stXWX %*% crossprod(X,W) %*% as.matrix(yi) ### compute residual sum of squares RSS <- sum(wi*(yi - X %*% beta)^2) ### log-likelihood (could leave out additive constants) if (!reml) { llval <- -1/2 * (k) * log(2*base::pi) - 1/2 * sum(log(vi + tau2)) - 1/2 * RSS } else { llval <- -1/2 * (k-pX) * log(2*base::pi) + ifelse(REMLf, 1/2 * determinant(crossprod(X), logarithm=TRUE)$modulus, 0) + -1/2 * sum(log(vi + tau2)) - 1/2 * determinant(crossprod(X,W) %*% X, logarithm=TRUE)$modulus - 1/2 * RSS } } } if (!is.null(mZ)) alpha <- mZ %*% alpha if (verbose) { cat(mstyle$verbose(paste0("ll = ", ifelse(is.na(llval), NA, formatC(llval, digits=digits[["fit"]], format="f", flag=" ")), " "))) cat(mstyle$verbose(paste0("alpha = ", paste(ifelse(is.na(alpha), NA, formatC(alpha, digits=digits[["est"]], format="f", flag=" ")), collapse=" ")))) cat("\n") } return(-1 * llval) } ############################################################################ metafor/R/contrmat.r0000644000176200001440000001103314045262731014124 0ustar liggesuserscontrmat <- function(data, grp1, grp2, last, shorten=FALSE, minlen=2, check=TRUE, append=TRUE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (!is.data.frame(data)) data <- data.frame(data) ### get variable names varnames <- names(data) ### number of variables nvars <- length(varnames) ############################################################################ ### checks on 'grp1' argument if (length(grp1) != 1L) stop(mstyle$stop("Argument 'grp1' must of length 1.")) if (!(is.character(grp1) | is.numeric(grp1))) stop(mstyle$stop("Argument 'grp1' must either be a character string or a number.")) if (is.character(grp1)) { grp1.pos <- charmatch(grp1, varnames) if (is.na(grp1.pos)) stop(mstyle$stop("Argument 'grp1' must be the name of a variable in the data frame.")) if (grp1.pos == 0L) stop(mstyle$stop("No ambiguous match found for variable name specified via 'grp1' argument.")) } else { grp1.pos <- round(grp1) if (grp1.pos < 1 | grp1.pos > nvars) stop(mstyle$stop("Specified position of 'grp1' variable does not exist in the data frame.")) } ### get grp1 variable grp1 <- data[[grp1.pos]] ### make sure there are no missing values in grp1 variable if (anyNA(grp1)) stop(mstyle$stop("Variable specified via 'grp1' argument should not contain missing values.")) ############################################################################ ### checks on 'grp2' argument if (length(grp2) != 1L) stop(mstyle$stop("Argument 'grp2' must of length 1.")) if (!(is.character(grp2) | is.numeric(grp2))) stop(mstyle$stop("Argument 'grp2' must either be a character string or a number.")) if (is.character(grp2)) { grp2.pos <- charmatch(grp2, varnames) if (is.na(grp2.pos)) stop(mstyle$stop("Argument 'grp2' must be the name of a variable in the data frame.")) if (grp2.pos == 0L) stop(mstyle$stop("No ambiguous match found for variable name specified via 'grp2' argument.")) } else { grp2.pos <- round(grp2) if (grp2.pos < 1 | grp2.pos > nvars) stop(mstyle$stop("Specified position of 'grp2' variable does not exist in the data frame.")) } ### get grp2 variable grp2 <- data[[grp2.pos]] ### make sure there are no missing values in grp2 variable if (anyNA(grp2)) stop(mstyle$stop("Variable specified via 'grp2' argument should not contain missing values.")) ############################################################################ ### get all levels (of grp1 and grp2) if (is.factor(grp1) && is.factor(grp2) && identical(levels(grp1), levels(grp2))) { lvls <- levels(grp1) } else { lvls <- sort(unique(c(levels(factor(grp1)), levels(factor(grp2))))) } ############################################################################ ### checks on 'last' argument ### if last is not specified, place most common grp2 group last if (missing(last)) last <- names(sort(table(grp2), decreasing=TRUE)[1]) if (length(last) != 1L) stop(mstyle$stop("Argument 'last' must be of length one.")) ### if last is set to NA, leave last unchanged if (is.na(last)) last <- tail(lvls, 1) last.pos <- charmatch(last, lvls) if (is.na(last.pos)) stop(mstyle$stop("Could not find specified group in 'grp1' or 'grp2' variables.")) if (last.pos == 0L) stop(mstyle$stop("No ambiguous match found for group specified via 'last' argument.")) last <- lvls[last.pos] ### reorder levels so that the reference level is always last lvls <- c(lvls[-last.pos], lvls[last.pos]) ############################################################################ ### turn grp1 and grp2 into factors with all levels grp1 <- factor(grp1, levels=lvls) grp2 <- factor(grp2, levels=lvls) ### create contrast matrix X <- model.matrix(~ grp1 - 1, contrasts.arg = list(grp1 = "contr.treatment")) - model.matrix(~ grp2 - 1, contrasts.arg = list(grp2 = "contr.treatment")) attr(X, "assign") <- NULL attr(X, "contrasts") <- NULL ### shorten variables names (if shorten=TRUE) if (shorten) lvls <- .shorten(lvls, minlen=minlen) ### add variable names if (check) { colnames(X) <- make.names(lvls, unique=TRUE) } else { colnames(X) <- lvls } ### append to original data if requested if (append) X <- cbind(data, X) ############################################################################ return(X) } metafor/R/predict.rma.r0000644000176200001440000006254614050233455014521 0ustar liggesuserspredict.rma <- function(object, newmods, intercept, tau2.levels, gamma2.levels, addx=FALSE, level, digits, transf, targs, vcov=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma", notav="rma.ls") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- object if (missing(newmods)) newmods <- NULL if (missing(intercept)) intercept <- x$intercept if (missing(tau2.levels)) tau2.levels <- NULL if (missing(gamma2.levels)) gamma2.levels <- NULL if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("pi.type", "newvi")) if (is.null(ddd$pi.type)) { pi.type <- "default" } else { pi.type <- ddd$pi.type } if (x$int.only && !is.null(newmods)) stop(mstyle$stop("Cannot specify new moderator values for models without moderators.")) ######################################################################### ### TODO: can this be simplified? (every time I sit down and stare at the mess below, it gives me a headache) if (is.null(newmods)) { ### if no new moderator values are specified if (!inherits(object, "rma.mv") || (inherits(object, "rma.mv") && any(is.element(object$struct, c("GEN","GDIAG"))))) { ### for rma.uni, rma.mh, rma.peto, and rma.glmm objects if (x$int.only) { # if intercept-only model predict only the intercept k.new <- 1 # X.new <- cbind(1) # } else { # otherwise predict for all k.f studies (including studies with NAs) k.new <- x$k.f # X.new <- x$X.f # } # } else { ### for rma.mv objects if (x$int.only) { # if intercept-only model: if (!x$withG) { # # if there is no G structure (and hence also no H structure) k.new <- 1 # # then we just need to predict the intercept once X.new <- cbind(1) # } # if (x$withG && x$withH) { # # if there is both a G and H structure if (is.null(tau2.levels) && is.null(gamma2.levels)) { # # and user has not specified tau2s.levels and gamma2.levels k.new <- x$tau2s * x$gamma2s # # then we need to predict intercepts for all combinations of tau2 and gamma2 values X.new <- cbind(rep(1,k.new)) # if (x$tau2s == 1) { # # if there is only a single tau^2 tau2.levels <- rep(1,k.new) # # then tau2.levels should be 1 repeated k.new times } else { # tau2.levels <- rep(levels(x$mf.g.f$inner), each=x$gamma2s) # # otherwise repeat actual levels gamma2s times } # if (x$gamma2s == 1) { # # if there is only a single gamma^2 value gamma2.levels <- rep(1,k.new) # # then gamma2.levels should be 1 repeated k.new times } else { # gamma2.levels <- rep(levels(x$mf.h.f$inner), times=x$tau2s) # # otherwise repeat actual levels tau2s times } # } # if ((!is.null(tau2.levels) && is.null(gamma2.levels)) || # # if user specifies only one of tau2.levels and gamma2.levels, throw an error (is.null(tau2.levels) && !is.null(gamma2.levels))) # stop(mstyle$stop("Either specify both of 'tau2.levels' and 'gamma2.levels' or neither.")) if (!is.null(tau2.levels) && !is.null(gamma2.levels)) { # # if user has specified both tau2s.levels and gamma2.levels if (length(tau2.levels) != length(gamma2.levels)) # stop(mstyle$stop("Length of 'tau2.levels' and 'gamma2.levels' is not the same.")) k.new <- length(tau2.levels) # # then we need to predict intercepts for those level combinations X.new <- cbind(rep(1,k.new)) # } # } # if (x$withG && !x$withH) { # # if there is only a G structure (and no H structure) if (is.null(tau2.levels)) { # # and user has not specified tau2.levels k.new <- x$tau2s # # then we need to predict intercepts for all tau2 values X.new <- cbind(rep(1,k.new)) # if (x$tau2s == 1) { # tau2.levels <- rep(1, k.new) # } else { # tau2.levels <- levels(x$mf.g.f$inner) # } # } else { # # and the user has specified tau2.levels k.new <- length(tau2.levels) # # then we need to predict intercepts for those levels X.new <- cbind(rep(1,k.new)) # } # gamma2.levels <- rep(1, k.new) # } # } else { # if not an intercept-only model k.new <- x$k.f # # then predict for all k.f studies (including studies with NAs) X.new <- x$X.f # if (!is.null(tau2.levels) || !is.null(gamma2.levels)) # warning(mstyle$warning("Arguments 'tau2.levels' and 'gamma2.levels' ignored when obtaining fitted values."), call.=FALSE) tau2.levels <- as.character(x$mf.g.f$inner) # gamma2.levels <- as.character(x$mf.h.f$inner) # } # } } else { ### if new moderator values have been specified if (!(.is.vector(newmods) || inherits(newmods, "matrix"))) stop(mstyle$stop(paste0("Argument 'newmods' should be a vector or matrix, but is of class '", class(newmods), "'."))) if ((!x$int.incl && x$p == 1L) || (x$int.incl && x$p == 2L)) { # if single moderator (multiple k.new possible) (either without or with intercept in the model) k.new <- length(newmods) # X.new <- cbind(c(newmods)) # } else { # in case the model has more than one predictor: if (.is.vector(newmods) || nrow(newmods) == 1L) { # # if user gives one vector or one row matrix (only one k.new): k.new <- 1 # X.new <- rbind(newmods) # } else { # # if user gives multiple rows and columns (multiple k.new): k.new <- nrow(newmods) # X.new <- cbind(newmods) # } # ### allow matching of terms by names (note: only possible if all columns in X.new and x$X have colnames) if (!is.null(colnames(X.new)) && all(colnames(X.new) != "") && !is.null(colnames(x$X)) && all(colnames(x$X) != "")) { colnames.mod <- colnames(x$X) if (x$int.incl) colnames.mod <- colnames.mod[-1] pos <- sapply(colnames(X.new), function(colname) { d <- c(adist(colname, colnames.mod, costs=c(ins=1, sub=Inf, del=Inf))) # compute edit distances with Inf costs for substitutions/deletions if (all(is.infinite(d))) # if there is no match, then all elements are Inf stop(mstyle$stop(paste0("Could not find variable '", colname, "' in the model.")), call. = FALSE) d <- which(d == min(d)) # don't use which.min() since that only finds the first minimum if (length(d) > 1L) # if there is no unique match, then there is more than one minimum stop(mstyle$stop(paste0("Could not match up variable '", colname, "' uniquely to a variable in the model.")), call. = FALSE) return(d) }) if (anyDuplicated(pos)) { # if the same name is used more than once, then there will be duplicated pos values dups <- paste(unique(colnames(X.new)[duplicated(pos)]), collapse=", ") stop(mstyle$stop(paste0("Found multiple matches for the same variable name (", dups, ")."))) } if (length(pos) != length(colnames.mod)) { no.match <- colnames.mod[seq_along(colnames.mod)[-pos]] if (length(no.match) > 3L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for these variables: ", paste0(no.match[1:3], collapse=", "), ", ..."))) if (length(no.match) > 1L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for these variables: ", paste0(no.match, collapse=", ")))) if (length(no.match) == 1L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for this variable: ", no.match))) } X.new <- X.new[,order(pos),drop=FALSE] colnames(X.new) <- colnames.mod } } if (inherits(X.new[1,1], "character")) stop(mstyle$stop(paste0("Argument 'newmods' should only contain numeric variables."))) ### if the user has specified newmods and an intercept was included in the original model, add the intercept to X.new ### but user can also decide to remove the intercept from the predictions with intercept=FALSE if (x$int.incl) { if (intercept) { X.new <- cbind(intrcpt=1, X.new) } else { X.new <- cbind(intrcpt=0, X.new) } } if (ncol(X.new) != x$p) stop(mstyle$stop(paste0("Dimensions of 'newmods' (", ncol(X.new), ") do not the match dimensions of the model (", x$p, ")."))) } #return(list(k.new=k.new, tau2=x$tau2, gamma2=x$gamma2, tau2.levels=tau2.levels, gamma2.levels=gamma2.levels)) ######################################################################### ### for rma.mv models with multiple tau^2 values, must use tau2.levels argument when using newmods to obtain prediction interval if (inherits(object, "rma.mv") && x$withG) { if (x$tau2s > 1) { if (is.null(tau2.levels)) { #warning(mstyle$warning("Must specify 'tau2.levels' argument to obtain prediction interval."), call.=FALSE) } else { ### if tau2.levels argument is a character vector, check that specified tau^2 values actually exist if (!is.numeric(tau2.levels) && anyNA(pmatch(tau2.levels, x$g.levels.f[[1]], duplicates.ok=TRUE))) stop(mstyle$stop("Non-existing levels specified via 'tau2.levels' argument.")) ### if tau2.levels argument is numeric, check that specified tau^2 values actually exist if (is.numeric(tau2.levels)) { tau2.levels <- round(tau2.levels) if (any(tau2.levels < 1) || any(tau2.levels > x$g.nlevels.f[1])) stop(mstyle$stop("Non-existing tau^2 values specified via 'tau2.levels' argument.")) } ### allow quick setting of all levels if (length(tau2.levels) == 1L) tau2.levels <- rep(tau2.levels, k.new) ### check length of tau2.levels argument if (length(tau2.levels) != k.new) stop(mstyle$stop(paste0("Length of 'tau2.levels' argument (", length(tau2.levels), ") does not match the number of predicted values (", k.new, ")."))) } } else { tau2.levels <- rep(1, k.new) } } ### for rma.mv models with multiple gamma^2 values, must use gamma.levels argument when using newmods to obtain prediction intervals if (inherits(object, "rma.mv") && x$withH) { if (x$gamma2s > 1) { if (is.null(gamma2.levels)) { #warning(mstyle$warning("Must specify 'gamma2.levels' argument to obtain prediction interval."), call.=FALSE) } else { ### if gamma2.levels argument is a character vector, check that specified gamma^2 values actually exist if (!is.numeric(gamma2.levels) && anyNA(pmatch(gamma2.levels, x$h.levels.f[[1]], duplicates.ok=TRUE))) stop(mstyle$stop("Non-existing levels specified via 'gamma2.levels' argument.")) ### if gamma2.levels argument is numeric, check that specified gamma^2 values actually exist if (is.numeric(gamma2.levels)) { gamma2.levels <- round(gamma2.levels) if (any(gamma2.levels < 1) || any(gamma2.levels > x$h.nlevels.f[1])) stop(mstyle$stop("Non-existing gamma^2 values specified via 'gamma2.levels' argument.")) } ### allow quick setting of all levels if (length(gamma2.levels) == 1L) gamma2.levels <- rep(gamma2.levels, k.new) ### check length of gamma2.levels argument if (length(gamma2.levels) != k.new) stop(mstyle$stop(paste0("Length of 'gamma2.levels' argument (", length(gamma2.levels), ") does not match the number of predicted values (", k.new, ")."))) } } else { gamma2.levels <- rep(1, k.new) } } ######################################################################### ### ddf calculation for x$test %in% c("knha","adhoc","t") but also need this ### for pi.ddf calculation when test="z" and pi.type %in% c("riley","t") if (length(x$ddf) == 1L) { ddf <- rep(x$ddf, k.new) } else { ddf <- rep(NA, k.new) for (j in seq_len(k.new)) { bn0 <- X.new[j,] != 0 ddf[j] <- min(x$ddf[bn0]) } } ddf[is.na(ddf)] <- x$k - x$p ### predicted values, SEs, and confidence intervals pred <- rep(NA_real_, k.new) vpred <- rep(NA_real_, k.new) for (i in seq_len(k.new)) { Xi.new <- X.new[i,,drop=FALSE] pred[i] <- Xi.new %*% x$beta vpred[i] <- Xi.new %*% tcrossprod(x$vb, Xi.new) } if (is.element(x$test, c("knha","adhoc","t"))) { crit <- sapply(seq_along(ddf), function(j) if (ddf[j] > 0) qt(level/2, df=ddf[j], lower.tail=FALSE) else NA) } else { crit <- qnorm(level/2, lower.tail=FALSE) } se <- sqrt(vpred) ci.lb <- pred - crit * se ci.ub <- pred + crit * se ######################################################################### if (vcov) vcovpred <- X.new %*% x$vb %*% t(X.new) if (pi.type == "simple") { crit <- qnorm(level/2, lower.tail=FALSE) vpred <- 0 } pi.ddf <- ddf if (is.element(pi.type, c("riley","t"))) { if (pi.type == "riley") pi.ddf <- ddf - x$parms + x$p if (pi.type == "t") pi.ddf <- ddf pi.ddf[pi.ddf < 1] <- 1 crit <- sapply(seq_along(pi.ddf), function(j) if (pi.ddf[j] > 0) qt(level/2, df=pi.ddf[j], lower.tail=FALSE) else NA) } if (is.null(ddd$newvi)) { newvi <- 0 } else { newvi <- ddd$newvi if (length(newvi) == 1L) newvi <- rep(newvi, k.new) if (length(newvi) != k.new) stop(mstyle$stop(paste0("Length of 'newvi' argument (", length(newvi), ") does not match the number of predicted values (", k.new, ")."))) } ######################################################################### ### prediction intervals if (!inherits(object, "rma.mv")) { ### for rma.uni, rma.mh, rma.peto, and rma.glmm objects (in rma.mh and rma.peto, tau2 = 0 by default and stored as such) pi.lb <- pred - crit * sqrt(vpred + x$tau2 + newvi) pi.ub <- pred + crit * sqrt(vpred + x$tau2 + newvi) } else { ### for rma.mv objects if (!x$withG) { ### if there is no G structure (and hence no H structure), there are no tau2 and gamma2 values, so just add the sum of all of the sigma2 values pi.lb <- pred - crit * sqrt(vpred + sum(x$sigma2) + newvi) pi.ub <- pred + crit * sqrt(vpred + sum(x$sigma2) + newvi) } if (x$withG && !x$withH) { ### if there is a G structure but no H structure if (x$tau2s == 1) { ### if there is only a single tau^2 value, always add that (in addition to the sum of all of the sigma^2 values) pi.lb <- pred - crit * sqrt(vpred + sum(x$sigma2) + x$tau2 + newvi) pi.ub <- pred + crit * sqrt(vpred + sum(x$sigma2) + x$tau2 + newvi) } else { if (is.null(tau2.levels)) { ### if user has not specified tau2.levels, cannot compute bounds pi.lb <- rep(NA, k.new) pi.ub <- rep(NA, k.new) tau2.levels <- rep(NA, k.new) } else { ### if there are multiple tau^2 values, either let user define numerically which value(s) to use or ### match the position of the specified tau2.levels to the levels of the inner factor in the model if (!is.numeric(tau2.levels)) tau2.levels <- pmatch(tau2.levels, x$g.levels.f[[1]], duplicates.ok=TRUE) pi.lb <- pred - crit * sqrt(vpred + sum(x$sigma2) + x$tau2[tau2.levels] + newvi) pi.ub <- pred + crit * sqrt(vpred + sum(x$sigma2) + x$tau2[tau2.levels] + newvi) tau2.levels <- x$g.levels.f[[1]][tau2.levels] } } } if (x$withG && x$withH) { ### if there is a G structure and an H structure if (x$tau2s == 1 && x$gamma2s == 1) { ### if there is only a single tau^2 and gamma^2 value, always add that (in addition to the sum of all of the sigma^2 values) pi.lb <- pred - crit * sqrt(vpred + sum(x$sigma2) + x$tau2 + x$gamma2 + newvi) pi.ub <- pred + crit * sqrt(vpred + sum(x$sigma2) + x$tau2 + x$gamma2 + newvi) } else { if (is.null(tau2.levels) || is.null(gamma2.levels)) { ### if user has not specified tau2.levels and gamma2.levels, cannot compute bounds pi.lb <- rep(NA, k.new) pi.ub <- rep(NA, k.new) tau2.levels <- rep(NA, k.new) gamma2.levels <- rep(NA, k.new) } else { ### if there are multiple tau^2 and/or gamma^2 values, either let user define numerically which value(s) to use or ### match the position of the specified tau2.levels and gamma2.levels to the levels of the inner factors in the model if (!is.numeric(tau2.levels)) tau2.levels <- pmatch(tau2.levels, x$g.levels.f[[1]], duplicates.ok=TRUE) if (!is.numeric(gamma2.levels)) gamma2.levels <- pmatch(gamma2.levels, x$h.levels.f[[1]], duplicates.ok=TRUE) pi.lb <- pred - crit * sqrt(vpred + sum(x$sigma2) + x$tau2[tau2.levels] + x$gamma2[gamma2.levels] + newvi) pi.ub <- pred + crit * sqrt(vpred + sum(x$sigma2) + x$tau2[tau2.levels] + x$gamma2[gamma2.levels] + newvi) tau2.levels <- x$g.levels.f[[1]][tau2.levels] gamma2.levels <- x$h.levels.f[[1]][gamma2.levels] } } } } ######################################################################### ### apply transformation function if one has been specified if (is.function(transf)) { if (is.null(targs)) { pred <- sapply(pred, transf) se <- rep(NA,k.new) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) } else { pred <- sapply(pred, transf, targs) se <- rep(NA,k.new) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) } do.transf <- TRUE } else { do.transf <- FALSE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ### use study labels from the object when the model has moderators and no new moderators have been specified ### otherwise, just use consecutive numbers to label the predicted values if (is.null(newmods) && !x$int.only) { slab <- x$slab } else { slab <- seq_len(k.new) } ### add row/colnames to vcovpred if (vcov) rownames(vcovpred) <- colnames(vcovpred) <- slab ### but when predicting just a single value, use "" as study label if (k.new == 1L) slab <- "" ### handle NAs not.na <- rep(TRUE, k.new) if (na.act == "na.omit") { if (is.null(newmods) && !x$int.only) { not.na <- x$not.na } else { not.na <- !is.na(pred) } } #if (na.act == "na.omit") { # not.na <- !is.na(pred) #} else { # not.na <- rep(TRUE, k.new) #} if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out <- list(pred=pred[not.na], se=se[not.na], ci.lb=ci.lb[not.na], ci.ub=ci.ub[not.na], pi.lb=pi.lb[not.na], pi.ub=pi.ub[not.na], cr.lb=pi.lb[not.na], cr.ub=pi.ub[not.na]) if (vcov) vcovpred <- vcovpred[not.na,not.na,drop=FALSE] if (na.act == "na.exclude" && is.null(newmods) && !x$int.only) { out <- lapply(out, function(val) ifelse(x$not.na, val, NA)) if (vcov) { vcovpred[!x$not.na,] <- NA vcovpred[,!x$not.na] <- NA } } ### add tau2.levels values to list if (inherits(object, "rma.mv") && x$withG && x$tau2s > 1) out$tau2.level <- tau2.levels ### add gamma2.levels values to list if (inherits(object, "rma.mv") && x$withH && x$gamma2s > 1) out$gamma2.level <- gamma2.levels ### remove cr part for models with a GEN structure if (inherits(object, "rma.mv") && any(is.element(object$struct, c("GEN","GDIAG")))) { out$cr.lb <- NULL out$cr.ub <- NULL out$pi.lb <- NULL out$pi.ub <- NULL out$tau2.level <- NULL out$gamma2.level <- NULL } ### add X matrix to list if (addx) { out$X <- matrix(X.new[not.na,], ncol=x$p) colnames(out$X) <- colnames(x$X) } ### add slab values to list out$slab <- slab[not.na] ### for FE/EE/CE models, remove the columns corresponding to the prediction interval bounds if (is.element(x$method, c("FE","EE","CE"))) { out$cr.lb <- NULL out$cr.ub <- NULL out$pi.lb <- NULL out$pi.ub <- NULL } out$digits <- digits out$method <- x$method out$transf <- do.transf if (x$test != "z") out$ddf <- ddf if ((x$test != "z" || is.element(pi.type, c("riley","t"))) && pi.type != "simple") out$pi.ddf <- pi.ddf class(out) <- "list.rma" if (vcov & !do.transf) { out <- list(pred=out) out$vcov <- vcovpred } return(out) } metafor/R/print.rma.peto.r0000644000176200001440000000431214036766174015171 0ustar liggesusersprint.rma.peto <- function(x, digits, showfit=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.peto") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!exists(".rmspace")) cat("\n") cat(mstyle$section("Fixed-Effects Model")) cat(mstyle$section(paste0(" (k = ", x$k, ")"))) cat("\n") if (showfit) { fs <- .fcf(x$fit.stats$ML, digits[["fit"]]) names(fs) <- c("logLik", "deviance", "AIC", "BIC", "AICc") cat("\n") tmp <- capture.output(print(fs, quote=FALSE, print.gap=2)) .print.table(tmp, mstyle) } cat("\n") if (!is.na(x$I2)) { cat(mstyle$text("I^2 (total heterogeneity / total variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, 2)), "%"))) cat("\n") } if (!is.na(x$H2)) { cat(mstyle$text("H^2 (total variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, 2))))) cat("\n") } if (!is.na(x$QE)) { cat("\n") cat(mstyle$section("Test for Heterogeneity:"), "\n") cat(mstyle$result(paste0("Q(df = ", x$k.pos-1, ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } if (any(!is.na(c(x$I2, x$H2, x$QE)))) cat("\n\n") res.table <- c(estimate=.fcf(unname(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]])) res.table.exp <- c(estimate=.fcf(exp(unname(x$beta)), digits[["est"]]), ci.lb=.fcf(exp(x$ci.lb), digits[["ci"]]), ci.ub=.fcf(exp(x$ci.ub), digits[["ci"]])) cat(mstyle$section("Model Results (log scale):")) cat("\n\n") tmp <- capture.output(.print.vector(res.table)) .print.table(tmp, mstyle) cat("\n") cat(mstyle$section("Model Results (OR scale):")) cat("\n\n") tmp <- capture.output(.print.vector(res.table.exp)) .print.table(tmp, mstyle) if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/regplot.r0000644000176200001440000000006414032075631013750 0ustar liggesusersregplot <- function(x, ...) UseMethod("regplot") metafor/R/simulate.rma.r0000644000176200001440000000365214032032623014675 0ustar liggesuserssimulate.rma <- function(object, nsim = 1, seed = NULL, olim, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma", notav=c("rma.glmm", "rma.mh", "rma.peto", "rma.uni.selmodel")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) ### as in stats:::simulate.lm if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) { RNGstate <- get(".Random.seed", envir = .GlobalEnv) } else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } ######################################################################### ### fitted values ftd <- c(object$X %*% object$beta) ### simulate for rma.uni (and rma.ls) objects if (inherits(object, "rma.uni")) val <- replicate(nsim, rnorm(object$k, mean=ftd, sd=sqrt(object$vi + object$tau2))) ### simulate for rma.mv objects if (inherits(object, "rma.mv")) val <- t(.mvrnorm(nsim, mu=ftd, Sigma=object$M)) ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) val[val < olim[1]] <- olim[1] val[val > olim[2]] <- olim[2] } ######################################################################### res <- matrix(NA_real_, nrow=object$k.f, ncol=nsim) res[object$not.na,] <- val res <- as.data.frame(res) rownames(res) <- object$slab colnames(res) <- paste0("sim_", seq_len(nsim)) if (na.act == "na.omit") res <- res[object$not.na,,drop=FALSE] attr(res, "seed") <- RNGstate return(res) } metafor/R/tes.r0000644000176200001440000000005413711074416013071 0ustar liggesuserstes <- function(x, ...) UseMethod("tes") metafor/R/funnel.rma.r0000644000176200001440000005506714054725013014356 0ustar liggesusersfunnel.rma <- function(x, yaxis="sei", xlim, ylim, xlab, ylab, steps=5, at, atransf, targs, digits, level=x$level, addtau2=FALSE, type="rstandard", back="lightgray", shade="white", hlines="white", refline, lty=3, pch=19, pch.fill=21, col, bg, label=FALSE, offset=0.4, legend=FALSE, ci.res=1000, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) yaxis <- match.arg(yaxis, c("sei", "vi", "seinv", "vinv", "ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi")) type <- match.arg(type, c("rstandard", "rstudent")) if (missing(atransf)) atransf <- FALSE atransf.char <- deparse(substitute(atransf)) ### check if sample size information is available if plotting (some function of) the sample sizes on the y-axis if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni"))) { if (is.null(x$ni)) stop(mstyle$stop("No sample size information stored in model object.")) if (anyNA(x$ni)) warning(mstyle$warning("Sample size information stored in model object \n contains NAs. Not all studies will be plotted."), call.=FALSE) } ### set y-axis label if not specified if (missing(ylab)) { if (yaxis == "sei") ylab <- "Standard Error" if (yaxis == "vi") ylab <- "Variance" if (yaxis == "seinv") ylab <- "Inverse Standard Error" if (yaxis == "vinv") ylab <- "Inverse Variance" if (yaxis == "ni") ylab <- "Sample Size" if (yaxis == "ninv") ylab <- "Inverse Sample Size" if (yaxis == "sqrtni") ylab <- "Square Root Sample Size" if (yaxis == "sqrtninv") ylab <- "Inverse Square Root Sample Size" if (yaxis == "lni") ylab <- "Log Sample Size" if (yaxis == "wi") ylab <- "Weight (in %)" } if (missing(at)) at <- NULL if (missing(targs)) targs <- NULL ### default number of digits (if not specified) if (missing(digits)) { if (yaxis == "sei") digits <- c(2L,3L) if (yaxis == "vi") digits <- c(2L,3L) if (yaxis == "seinv") digits <- c(2L,3L) if (yaxis == "vinv") digits <- c(2L,3L) if (yaxis == "ni") digits <- c(2L,0L) if (yaxis == "ninv") digits <- c(2L,3L) if (yaxis == "sqrtni") digits <- c(2L,3L) if (yaxis == "sqrtninv") digits <- c(2L,3L) if (yaxis == "lni") digits <- c(2L,3L) if (yaxis == "wi") digits <- c(2L,2L) } else { if (length(digits) == 1L) ### digits[1] for x-axis labels digits <- c(digits,digits) ### digits[2] for y-axis labels } ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers if (length(lty) == 1L) lty <- rep(lty, 2) ### 1st value = funnel lines, 2nd value = reference line ### note: pch, col, and bg must be of the same length as the original data passed to rma() ### so we have to apply the same subsetting (if necessary) and removing of NAs as was ### done during the model fitting (note: NAs are removed further below) if (length(pch) == 1L) { pch.vec <- FALSE pch <- rep(pch, x$k.all) } else { pch.vec <- TRUE } if (length(pch) != x$k.all) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) pch <- pch[x$subset] if (!inherits(x, "rma.uni.trimfill")) { if (missing(col)) col <- "black" if (length(col) == 1L) { col.vec <- FALSE col <- rep(col, x$k.all) } else { col.vec <- TRUE } if (length(col) != x$k.all) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) col <- col[x$subset] if (missing(bg)) bg <- "white" if (length(bg) == 1L) { bg.vec <- FALSE bg <- rep(bg, x$k.all) } else { bg.vec <- TRUE } if (length(bg) != x$k.all) stop(mstyle$stop(paste0("Length of the 'bg' argument (", length(bg), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) bg <- bg[x$subset] } else { ### for trimfill objects, 'col' and 'bg' are used to specify the colors of the observed and imputed data if (missing(col)) col <- c("black", "black") if (length(col) == 1L) col <- c(col, "black") col.vec <- FALSE if (missing(bg)) bg <- c("white", "white") if (length(bg) == 1L) bg <- c(bg, "white") bg.vec <- FALSE } if (length(label) != 1L) stop(mstyle$stop("Argument 'label' should be of length 1.")) ddd <- list(...) lplot <- function(..., refline2, level2, lty2) plot(...) labline <- function(..., refline2, level2, lty2) abline(...) lsegments <- function(..., refline2, level2, lty2) segments(...) laxis <- function(..., refline2, level2, lty2) axis(...) lpolygon <- function(..., refline2, level2, lty2) polygon(...) llines <- function(..., refline2, level2, lty2) lines(...) lpoints <- function(..., refline2, level2, lty2) points(...) lrect <- function(..., refline2, level2, lty2) rect(...) ltext <- function(..., refline2, level2, lty2) text(...) ### refline2, level2, and lty2 for adding a second reference line / funnel if (!is.null(ddd$refline2)) { refline2 <- ddd$refline2 } else { refline2 <- NULL } if (!is.null(ddd$level2)) { level2 <- ddd$level2 } else { level2 <- x$level } if (!is.null(ddd$lty2)) { lty2 <- ddd$lty2 } else { lty2 <- 3 } ######################################################################### ### get values for the x-axis (and corresponding vi, sei, and ni values) ### if int.only, get the observed values; otherwise, get the (deleted) residuals if (x$int.only) { if (missing(refline)) refline <- c(x$beta) if (inherits(x, "rma.mv") && addtau2) { warning(mstyle$warning("Argument 'addtau2' ignored for 'rma.mv' models."), call.=FALSE) addtau2 <- FALSE } yi <- x$yi ### yi/vi/ni is already subsetted and NAs are removed vi <- x$vi ni <- x$ni ### ni can be NULL (and there may be 'additional' NAs) sei <- sqrt(vi) if (!is.null(x$not.na.yivi)) x$not.na <- x$not.na.yivi slab <- x$slab[x$not.na] ### slab is subsetted but NAs are not removed, so still need to do this here pch <- pch[x$not.na] ### same for pch if (!inherits(x, "rma.uni.trimfill")) { col <- col[x$not.na] bg <- bg[x$not.na] } else { fill <- x$fill[x$not.na] } if (missing(xlab)) xlab <- .setlab(x$measure, transf.char="FALSE", atransf.char, gentype=1) } else { if (missing(refline)) refline <- 0 if (addtau2) { warning(mstyle$warning("Argument 'addtau2' ignored for models that contain moderators."), call.=FALSE) addtau2 <- FALSE } options(na.action = "na.pass") ### note: subsetted but include the NAs (there may be more ### NAs than the ones in x$not.na (rstudent() can fail), if (type == "rstandard") { ### so we don't use x$not.na below res <- rstandard(x) } else { res <- rstudent(x) } options(na.action = na.act) ### need to check for missings here not.na <- !is.na(res$resid) ### vector of residuals is of size k.f and can includes NAs yi <- res$resid[not.na] sei <- res$se[not.na] ni <- x$ni.f[not.na] ### ni can be NULL and can still include NAs vi <- sei^2 slab <- x$slab[not.na] pch <- pch[not.na] col <- col[not.na] bg <- bg[not.na] if (missing(xlab)) xlab <- "Residual Value" } if (inherits(x, "rma.ls") && addtau2) { warning(mstyle$warning("Argument 'addtau2' ignored for 'rma.ls' models."), call.=FALSE) addtau2 <- FALSE } tau2 <- ifelse(addtau2, x$tau2, 0) ### get weights (omit any NAs) if (yaxis == "wi") { options(na.action = "na.omit") weights <- weights(x) options(na.action = na.act) } ######################################################################### ### set y-axis limits if (missing(ylim)) { ### 1st ylim value is always the lowest precision (should be at the bottom of the plot) ### 2nd ylim value is always the highest precision (should be at the top of the plot) if (yaxis == "sei") ylim <- c(max(sei), 0) if (yaxis == "vi") ylim <- c(max(vi), 0) if (yaxis == "seinv") ylim <- c(min(1/sei), max(1/sei)) if (yaxis == "vinv") ylim <- c(min(1/vi), max(1/vi)) if (yaxis == "ni") ylim <- c(min(ni, na.rm=TRUE), max(ni, na.rm=TRUE)) if (yaxis == "ninv") ylim <- c(max(1/ni, na.rm=TRUE), min(1/ni, na.rm=TRUE)) if (yaxis == "sqrtni") ylim <- c(min(sqrt(ni), na.rm=TRUE), max(sqrt(ni), na.rm=TRUE)) if (yaxis == "sqrtninv") ylim <- c(max(1/sqrt(ni), na.rm=TRUE), min(1/sqrt(ni), na.rm=TRUE)) if (yaxis == "lni") ylim <- c(min(log(ni), na.rm=TRUE), max(log(ni), na.rm=TRUE)) if (yaxis == "wi") ylim <- c(min(weights), max(weights)) ### infinite y-axis limits can happen with "seinv" and "vinv" when one or more sampling variances are 0 if (any(is.infinite(ylim))) stop(mstyle$stop("Setting 'ylim' automatically not possible (must set y-axis limits manually).")) } else { ### make sure that user supplied limits are in the right order if (is.element(yaxis, c("sei", "vi", "ninv", "sqrtninv"))) ylim <- c(max(ylim), min(ylim)) if (is.element(yaxis, c("seinv", "vinv", "ni", "sqrtni", "lni", "wi"))) ylim <- c(min(ylim), max(ylim)) ### make sure that user supplied limits are in the appropriate range if (is.element(yaxis, c("sei", "vi", "ni", "ninv", "sqrtni", "sqrtninv", "lni"))) { if (ylim[1] < 0 || ylim[2] < 0) stop(mstyle$stop("Both y-axis limits must be >= 0.")) } if (is.element(yaxis, c("seinv", "vinv"))) { if (ylim[1] <= 0 || ylim[2] <= 0) stop(mstyle$stop("Both y-axis limits must be > 0.")) } if (is.element(yaxis, c("wi"))) { if (ylim[1] < 0 || ylim[2] < 0) stop(mstyle$stop("Both y-axis limits must be >= 0.")) } } ######################################################################### ### set x-axis limits if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) level2 <- ifelse(level2 == 0, 1, ifelse(level2 >= 1, (100-level2)/100, ifelse(level2 > .5, 1-level2, level2))) #level <- ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level)) ### note: there may be multiple level values level.min <- min(level) ### note: smallest level is the widest CI lvals <- length(level) ### calculate the CI bounds at the bottom of the figure (for the widest CI if there are multiple) if (yaxis == "sei") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]^2 + tau2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]^2 + tau2) } if (yaxis == "vi") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1] + tau2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1] + tau2) } if (yaxis == "seinv") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]^2 + tau2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]^2 + tau2) } if (yaxis == "vinv") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1] + tau2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1] + tau2) } if (missing(xlim)) { xlim <- c(min(x.lb.bot,min(yi)), max(x.ub.bot,max(yi))) ### make sure x-axis not only includes widest CI, but also all yi values rxlim <- xlim[2] - xlim[1] ### calculate range of the x-axis limits xlim[1] <- xlim[1] - (rxlim * 0.10) ### subtract 10% of range from lower x-axis bound xlim[2] <- xlim[2] + (rxlim * 0.10) ### add 10% of range to upper x-axis bound } else { xlim <- sort(xlim) ### just in case the user supplies the limits in the wrong order } } if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi"))) { if (missing(xlim)) { xlim <- c(min(yi), max(yi)) rxlim <- xlim[2] - xlim[1] ### calculate range of the x-axis limits xlim[1] <- xlim[1] - (rxlim * 0.10) ### subtract 10% of range from lower x-axis bound xlim[2] <- xlim[2] + (rxlim * 0.10) ### add 10% of range to upper x-axis bound } else { xlim <- sort(xlim) ### just in case the user supplies the limits in the wrong order } } ### if user has specified 'at' argument, make sure xlim actually contains the min and max 'at' values if (!is.null(at)) { xlim[1] <- min(c(xlim[1], at), na.rm=TRUE) xlim[2] <- max(c(xlim[2], at), na.rm=TRUE) } ######################################################################### ### set up plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, xaxt="n", yaxt="n", bty="n", ...) ### add background shading par.usr <- par("usr") lrect(par.usr[1], par.usr[3], par.usr[2], par.usr[4], col=back, border=NA, ...) ### add y-axis laxis(side=2, at=seq(from=ylim[1], to=ylim[2], length.out=steps), labels=formatC(seq(from=ylim[1], to=ylim[2], length.out=steps), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])), ...) ### add horizontal lines labline(h=seq(from=ylim[1], to=ylim[2], length.out=steps), col=hlines, ...) ######################################################################### ### add CI region(s) if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { ### add a bit to the top/bottom ylim so that the CI region(s) fill out the entire figure if (yaxis == "sei") { rylim <- ylim[1] - ylim[2] ylim[1] <- ylim[1] + (rylim * 0.10) ylim[2] <- max(0, ylim[2] - (rylim * 0.10)) } if (yaxis == "vi") { rylim <- ylim[1] - ylim[2] ylim[1] <- ylim[1] + (rylim * 0.10) ylim[2] <- max(0, ylim[2] - (rylim * 0.10)) } if (yaxis == "seinv") { rylim <- ylim[2] - ylim[1] #ylim[1] <- max(.0001, ylim[1] - (rylim * 0.10)) ### not clear how much to add to bottom ylim[2] <- ylim[2] + (rylim * 0.10) } if (yaxis == "vinv") { rylim <- ylim[2] - ylim[1] #ylim[1] <- max(.0001, ylim[1] - (rylim * 0.10)) ### not clear how much to add to bottom ylim[2] <- ylim[2] + (rylim * 0.10) } yi.vals <- seq(from=ylim[1], to=ylim[2], length.out=ci.res) if (yaxis == "sei") vi.vals <- yi.vals^2 if (yaxis == "vi") vi.vals <- yi.vals if (yaxis == "seinv") vi.vals <- 1/yi.vals^2 if (yaxis == "vinv") vi.vals <- 1/yi.vals for (m in lvals:1) { ci.left <- refline - qnorm(level[m]/2, lower.tail=FALSE) * sqrt(vi.vals + tau2) ci.right <- refline + qnorm(level[m]/2, lower.tail=FALSE) * sqrt(vi.vals + tau2) lpolygon(c(ci.left,ci.right[ci.res:1]), c(yi.vals,yi.vals[ci.res:1]), border=NA, col=shade[m], ...) llines(ci.left, yi.vals, lty=lty[1], ...) llines(ci.right, yi.vals, lty=lty[1], ...) } if (!is.null(refline2)) { ci.left <- refline2 - qnorm(level2/2, lower.tail=FALSE) * sqrt(vi.vals + tau2) ci.right <- refline2 + qnorm(level2/2, lower.tail=FALSE) * sqrt(vi.vals + tau2) llines(ci.left, yi.vals, lty=lty2, ...) llines(ci.right, yi.vals, lty=lty2, ...) } } ### add vertical reference line ### use segments so that line does not extent beyond tip of CI region if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) lsegments(refline, ylim[1], refline, ylim[2], lty=lty[2], ...) if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi"))) labline(v=refline, lty=lty[2], ...) if (!is.null(refline2)) { if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) lsegments(refline2, ylim[1], refline2, ylim[2], lty=lty2, ...) if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi"))) labline(v=refline2, lty=lty2, ...) } ######################################################################### ### add points xaxis.vals <- yi if (yaxis == "sei") yaxis.vals <- sei if (yaxis == "vi") yaxis.vals <- vi if (yaxis == "seinv") yaxis.vals <- 1/sei if (yaxis == "vinv") yaxis.vals <- 1/vi if (yaxis == "ni") yaxis.vals <- ni if (yaxis == "ninv") yaxis.vals <- 1/ni if (yaxis == "sqrtni") yaxis.vals <- sqrt(ni) if (yaxis == "sqrtninv") yaxis.vals <- 1/sqrt(ni) if (yaxis == "lni") yaxis.vals <- log(ni) if (yaxis == "wi") yaxis.vals <- weights if (!inherits(x, "rma.uni.trimfill")) { lpoints(x=xaxis.vals, y=yaxis.vals, pch=pch, col=col, bg=bg, ...) } else { lpoints(x=xaxis.vals[!fill], y=yaxis.vals[!fill], pch=pch, col=col[1], bg=bg[1], ...) lpoints(x=xaxis.vals[fill], y=yaxis.vals[fill], pch=pch.fill, col=col[2], bg=bg[2], ...) } ######################################################################### ### add L-shaped box around plot box(bty="l") ### generate x-axis positions if none are specified if (is.null(at)) { at <- axTicks(side=1) #at <- pretty(x=c(alim[1], alim[2]), n=steps-1) #at <- pretty(x=c(min(ci.lb), max(ci.ub)), n=steps-1) } else { at <- at[at > par("usr")[1]] at <- at[at < par("usr")[2]] } at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } } else { at.lab <- formatC(at.lab, digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } ### add x-axis laxis(side=1, at=at, labels=at.lab, ...) ############################################################################ ### labeling of points k <- length(yi) if (is.numeric(label) || is.character(label) || .isTRUE(label)) { if (is.numeric(label)) { label <- round(label) if (label < 0) label <- 0 if (label > k) label <- k label <- order(abs(yi - refline), decreasing=TRUE)[seq_len(label)] } else if ((is.character(label) && label == "all") || .isTRUE(label)) { label <- seq_len(k) } else if ((is.character(label) && label == "out")) { if (!is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { label <- seq_len(k) } else { label <- which(abs(yi - refline) / sqrt(vi + tau2) >= qnorm(level.min/2, lower.tail=FALSE)) } } else { label <- NULL } for (i in label) ltext(yi[i], yaxis.vals[i], slab[i], pos=ifelse(yi[i]-refline >= 0, 4, 2), offset=offset, ...) } ######################################################################### ### add legend (if requested) if (is.logical(legend) && isTRUE(legend)) lpos <- "topright" if (is.character(legend)) { lpos <- legend legend <- TRUE } if (legend && !is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { legend <- FALSE warning(mstyle$warning("Argument 'legend' only applicable if 'yaxis' is 'sei', 'vi', 'seinv', or 'vinv'."), call.=FALSE) } if (legend) { level <- c(level, 0) lvals <- length(level) add.studies <- !pch.vec && !col.vec && !bg.vec # only add 'Studies' to legend if pch, col, and bg were not vectors to begin with scipen <- options(scipen=100) lchars <- max(nchar(level))-2 options(scipen=scipen$scipen) pval1 <- NULL pval2 <- NULL phantom <- NULL ltxt <- sapply(1:lvals, function(i) { if (i == 1) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(level[i], lchars), pval2=.fcf(1, lchars))))) #return(as.expression(bquote(p > .(pval), list(pval=.fcf(level[i], lchars))))) if (i > 1 && i < lvals) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(level[i], lchars), pval2=.fcf(level[i-1], lchars))))) if (i == lvals) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(0, lchars), pval2=.fcf(level[i-1], lchars))))) }) pch.l <- rep(22, lvals) col.l <- rep("black", lvals) pt.cex <- rep(2, lvals) pt.bg <- c(shade, back) if (add.studies) { ltxt <- c(ltxt, expression(plain(Studies))) pch.l <- c(pch.l, pch[1]) col.l <- c(col.l, col[1]) pt.cex <- c(pt.cex, 1) pt.bg <- c(pt.bg, bg[1]) } if (inherits(x, "rma.uni.trimfill")) { ltxt <- c(ltxt, expression(plain(Filled~Studies))) pch.l <- c(pch.l, pch.fill[1]) col.l <- c(col.l, col[2]) pt.cex <- c(pt.cex, 1) pt.bg <- c(pt.bg, bg[2]) } legend(lpos, inset=.01, bg="white", pch=pch.l, col=col.l, pt.cex=pt.cex, pt.bg=pt.bg, legend=ltxt) } ############################################################################ ### prepare data frame to return sav <- data.frame(x=xaxis.vals, y=yaxis.vals, slab=slab, stringsAsFactors=FALSE) if (inherits(x, "rma.uni.trimfill")) sav$fill <- fill invisible(sav) } metafor/R/fitstats.r0000644000176200001440000000007413457322061014140 0ustar liggesusersfitstats <- function (object, ...) UseMethod("fitstats") metafor/R/addpoly.r0000644000176200001440000000006413457322061013732 0ustar liggesusersaddpoly <- function(x, ...) UseMethod("addpoly") metafor/R/print.anova.rma.r0000644000176200001440000001517714046526620015330 0ustar liggesusersprint.anova.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="anova.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") if (x$type == "Wald.btt") { if (is.element("rma.ls", x$class)) { cat(mstyle$section(paste0("Test of Location Coefficients (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) } else { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) } cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } if (x$type == "Wald.att") { cat(mstyle$section(paste0("Test of Scale Coefficients (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$att),"):"))) cat("\n") if (x$test == "t") { cat(mstyle$result(paste0("F(df1 = ", x$QSdf[1], ", df2 = ", x$QSdf[2], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QS(df = ", x$QSdf[1], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } if (x$type == "Wald.Xb") { if (x$m == 1) { cat(mstyle$section("Hypothesis:")) } else { cat(mstyle$section("Hypotheses:")) } tmp <- capture.output(print(x$hyp)) .print.output(tmp, mstyle$text) cat("\n") cat(mstyle$section("Results:")) cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { res.table <- data.frame(estimate=.fcf(c(x$Xb), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$Xb), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), stringsAsFactors=FALSE) } rownames(res.table) <- paste0(seq_len(x$m), ":") tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (!is.na(x$QM)) { cat("\n") if (x$m == 1) { cat(mstyle$section("Test of Hypothesis:")) } else { cat(mstyle$section("Omnibus Test of Hypotheses:")) } cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } } if (x$type == "Wald.Za") { if (x$m == 1) { cat(mstyle$section("Hypothesis:")) } else { cat(mstyle$section("Hypotheses:")) } tmp <- capture.output(print(x$hyp)) .print.output(tmp, mstyle$text) cat("\n") cat(mstyle$section("Results:")) cat("\n") if (x$test == "t") { res.table <- data.frame(estimate=.fcf(c(x$Za), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$Za), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), stringsAsFactors=FALSE) } rownames(res.table) <- paste0(seq_len(x$m), ":") tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (!is.na(x$QS)) { cat("\n") if (x$m == 1) { cat(mstyle$section("Test of Hypothesis:")) } else { cat(mstyle$section("Omnibus Test of Hypotheses:")) } cat("\n") if (x$test == "t") { cat(mstyle$result(paste0("F(df1 = ", x$QSdf[1], ", df2 = ", x$QSdf[2], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QS(df = ", x$QSdf[1], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } } if (x$type == "LRT") { res.table <- data.frame(c(x$parms.f, x$parms.r), c(.fcf(x$fit.stats.f["AIC"], digits[["fit"]]), .fcf(x$fit.stats.r["AIC"], digits[["fit"]])), c(.fcf(x$fit.stats.f["BIC"], digits[["fit"]]), .fcf(x$fit.stats.r["BIC"], digits[["fit"]])), c(.fcf(x$fit.stats.f["AICc"], digits[["fit"]]), .fcf(x$fit.stats.r["AICc"], digits[["fit"]])), c(.fcf(x$fit.stats.f["ll"], digits[["fit"]]), .fcf(x$fit.stats.r["ll"], digits[["fit"]])), c(NA, .fcf(x$LRT, digits[["test"]])), c(NA, .pval(x$pval, digits[["pval"]])), c(.fcf(x$QE.f, digits[["test"]]), .fcf(x$QE.r, digits[["test"]])), c(.fcf(x$tau2.f, digits[["var"]]), .fcf(x$tau2.r, digits[["var"]])), c(NA, NA), stringsAsFactors=FALSE) colnames(res.table) <- c("df", "AIC", "BIC", "AICc", "logLik", "LRT", "pval", "QE", "tau^2", "R^2") rownames(res.table) <- c("Full", "Reduced") res.table["Full",c("LRT","pval")] <- "" res.table["Full","R^2"] <- "" res.table["Reduced","R^2"] <- paste0(.fcf(x$R2, digits[["het"]]), "%") ### remove tau^2 and R^2 columns if full model is a FE/EE/CE model or if dealing with rma.mv or rma.ls models if (is.element(x$method, c("FE","EE","CE")) || is.element("rma.mv", x$class.f) || is.element("rma.ls", x$class.f)) res.table <- res.table[,seq_len(8)] tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/cumul.rma.peto.r0000644000176200001440000001147714030331214015145 0ustar liggesuserscumul.rma.peto <- function(x, order, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.peto") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time", "decreasing")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (is.null(ddd$decreasing)) { decreasing <- FALSE } else { decreasing <- ddd$decreasing } ######################################################################### if (grepl("^order\\(", deparse(substitute(order)))) warning(mstyle$warning("Use of order() in 'order' argument is probably erroneous."), call.=FALSE) if (missing(order)) order <- seq_len(x$k.all) if (length(order) != x$k.all) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) ### note: order variable is assumed to be of the same length as the size of the ### original dataset passed to the model fitting function and so we apply ### the same subsetting (if necessary) as was done during model fitting if (!is.null(x$subset)) order <- order[x$subset] order <- order(order, decreasing=decreasing) ai.f <- x$ai.f[order] bi.f <- x$bi.f[order] ci.f <- x$ci.f[order] di.f <- x$di.f[order] yi.f <- x$yi.f[order] vi.f <- x$vi.f[order] not.na <- x$not.na[order] slab <- x$slab[order] ids <- x$ids[order] beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!not.na[i]) next res <- try(suppressWarnings(rma.peto(ai=ai.f, bi=bi.f, ci=ci.f, di=di.f, add=x$add, to=x$to, drop00=x$drop00, level=x$level, subset=seq_len(i))), silent=TRUE) if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (.isTRUE(transf)) ### if transf=TRUE, apply exp transformation to ORs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[not.na], se=se[not.na], zval=zval[not.na], pval=pval[not.na], ci.lb=ci.lb[not.na], ci.ub=ci.ub[not.na], Q=QE[not.na], Qp=QEp[not.na], I2=I2[not.na], H2=H2[not.na]) out$slab <- slab[not.na] out$ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, I2=I2, H2=H2) out$slab <- slab out$ids <- ids } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits out$transf <- transf out$slab.null <- x$slab.null out$level <- x$level out$measure <- x$measure out$test <- x$test attr(out$estimate, "measure") <- x$measure if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- c("list.rma", "cumul.rma") return(out) } metafor/R/print.list.confint.rma.r0000644000176200001440000000140213770373571016630 0ustar liggesusersprint.list.confint.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="list.confint.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) x$digits <- NULL # so length(x) is correct if (!exists(".rmspace")) cat("\n") len <- length(x) for (j in 1:len) { res.random <- .fcf(x[[j]]$random, digits[["var"]]) res.random[,2] <- paste0(x[[j]]$lb.sign, res.random[,2]) res.random[,3] <- paste0(x[[j]]$ub.sign, res.random[,3]) tmp <- capture.output(print(res.random, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (j != len) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/radial.rma.r0000644000176200001440000002174114054725414014320 0ustar liggesusersradial.rma <- function(x, center=FALSE, xlim=NULL, zlim, xlab, zlab, atz, aty, steps=7, level=x$level, digits=2, back="lightgray", transf, targs, pch=19, arc.res=100, cex, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL if (missing(atz)) atz <- NULL if (missing(aty)) aty <- NULL ######################################################################### ### radial plots only for intercept-only models if (x$int.only) { yi <- x$yi yi.c <- yi vi <- x$vi beta <- c(x$beta) ci.lb <- x$ci.lb ci.ub <- x$ci.ub tau2 <- 1/mean(1/x$tau2) ### geometric mean of tau^2 values (hackish solution for models with multiple tau^2 values) ### note: this works for 1/mean(1/0) = 0; TODO: consider something more sophisticated here if (is.null(aty)) { atyis <- range(yi) } else { atyis <- range(aty) aty.c <- aty } } else { stop(mstyle$stop("Radial plots only applicable for models without moderators.")) } if (center) { yi <- yi - c(x$beta) beta <- 0 ci.lb <- ci.lb - c(x$beta) ci.ub <- ci.ub - c(x$beta) atyis <- atyis - c(x$beta) if (!is.null(aty)) aty <- aty - c(x$beta) } ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) zcrit <- qnorm(level/2, lower.tail=FALSE) zi <- yi / sqrt(vi+tau2) xi <- 1 / sqrt(vi+tau2) ### if vi=0 and tau2=0, then zi and xi will be Inf if (any(is.infinite(c(xi,zi)))) stop(mstyle$stop("Setting 'xlim' and 'zlim' automatically not possible (must set axis limits manually).")) ### set x-axis limits if none are specified if (missing(xlim)) { xlims <- c(0, (1.30*max(xi))) ### add 30% to upper bound } else { xlims <- sort(xlim) } ### x-axis position of the confidence interval ci.xpos <- xlims[2] + 0.12*(xlims[2]-xlims[1]) ### add 12% of range to upper bound ### x-axis position of the y-axis on the right ya.xpos <- xlims[2] + 0.14*(xlims[2]-xlims[1]) ### add 14% of range to upper bound xaxismax <- xlims[2] ### set z-axis limits if none are specified (these are the actual y-axis limits of the plot) if (missing(zlim)) { zlims <- c(min(-5, 1.10*min(zi), 1.10*ci.lb*ci.xpos, 1.10*min(atyis)*ya.xpos, 1.10*min(yi)*ya.xpos, -1.10*zcrit+xaxismax*beta), max(5, 1.10*max(zi), 1.10*ci.ub*ci.xpos, 1.10*max(atyis)*ya.xpos, 1.10*max(yi)*ya.xpos, 1.10*zcrit+xaxismax*beta)) } else { zlims <- sort(zlim) } ### adjust margins par.mar <- par("mar") par.mar.adj <- par.mar - c(0,-3,0,-5) par.mar.adj[par.mar.adj < 1] <- 1 par(mar = par.mar.adj) on.exit(par(mar = par.mar)) ### label for the x-axis if (missing(xlab)) { if (is.element(x$method, c("FE","EE","CE"))) { xlab <- expression(x[i]==1/sqrt(v[i]), ...) } else { xlab <- expression(x[i]==1/sqrt(v[i]+tau^2), ...) } } par.pty <- par("pty") par(pty="s") on.exit(par(pty = par.pty), add=TRUE) plot(NA, NA, ylim=zlims, xlim=xlims, bty="n", xaxt="n", yaxt="n", xlab=xlab, ylab="", xaxs="i", yaxs="i", ...) if (missing(cex)) cex <- par("cex") ### add polygon and +-zcrit lines polygon(c(0,xaxismax,xaxismax,0), c(zcrit, zcrit+xaxismax*beta, -zcrit+xaxismax*beta, -zcrit), border=NA, col=back, ...) segments(0, 0, xaxismax, xaxismax*beta, lty="solid", ...) segments(0, -zcrit, xaxismax, -zcrit+xaxismax*beta, lty="dotted", ...) segments(0, zcrit, xaxismax, zcrit+xaxismax*beta, lty="dotted", ...) ### add x-axis axis(side=1, ...) ### add z-axis if (is.null(atz)) { axis(side=2, at=seq(-4, 4, length.out=9), labels=NA, las=1, tcl=par("tcl")/2, ...) axis(side=2, at=seq(-2, 2, length.out=3), las=1, ...) } else { axis(side=2, at=atz, labels=atz, las=1, ...) } ### add label for the z-axis if (missing(zlab)) { if (center) { if (is.element(x$method, c("FE","EE","CE"))) { mtext(expression(z[i]==frac(y[i]-hat(theta),sqrt(v[i]))), side=2, line=par.mar.adj[2]-1, at=0, adj=0, las=1, cex=cex, ...) } else { mtext(expression(z[i]==frac(y[i]-hat(mu),sqrt(v[i]+tau^2))), side=2, line=par.mar.adj[2]-1, adj=0, at=0, las=1, cex=cex, ...) } } else { if (is.element(x$method, c("FE","EE","CE"))) { mtext(expression(z[i]==frac(y[i],sqrt(v[i]))), side=2, line=par.mar.adj[2]-2, at=0, adj=0, las=1, cex=cex, ...) } else { mtext(expression(z[i]==frac(y[i],sqrt(v[i]+tau^2))), side=2, line=par.mar.adj[2]-1, at=0, adj=0, las=1, cex=cex, ...) } } } else { mtext(zlab, side=2, line=par.mar.adj[2]-4, at=0, cex=cex, ...) } ######################################################################### ### add y-axis arc and CI arc on the right par.xpd <- par("xpd") par(xpd=TRUE) par.usr <- par("usr") asp.rat <- (par.usr[4]-par.usr[3])/(par.usr[2]-par.usr[1]) if (length(arc.res) == 1L) arc.res <- c(arc.res, arc.res/4) ### add y-axis arc if (is.null(aty)) { atyis <- seq(min(yi), max(yi), length.out=arc.res[1]) } else { atyis <- seq(min(aty), max(aty), length.out=arc.res[1]) } len <- ya.xpos xis <- rep(NA_real_,length(atyis)) zis <- rep(NA_real_,length(atyis)) for (i in seq_len(length(atyis))) { xis[i] <- sqrt(len^2/(1+(atyis[i]/asp.rat)^2)) zis[i] <- xis[i]*atyis[i] } valid <- zis > zlims[1] & zis < zlims[2] lines(xis[valid], zis[valid], ...) ### add y-axis tick marks if (is.null(aty)) { atyis <- seq(min(yi), max(yi), length.out=steps) } else { atyis <- aty } len.l <- ya.xpos len.u <- ya.xpos + .015*(xlims[2]-xlims[1]) xis.l <- rep(NA_real_,length(atyis)) zis.l <- rep(NA_real_,length(atyis)) xis.u <- rep(NA_real_,length(atyis)) zis.u <- rep(NA_real_,length(atyis)) for (i in seq_len(length(atyis))) { xis.l[i] <- sqrt(len.l^2/(1+(atyis[i]/asp.rat)^2)) zis.l[i] <- xis.l[i]*atyis[i] xis.u[i] <- sqrt(len.u^2/(1+(atyis[i]/asp.rat)^2)) zis.u[i] <- xis.u[i]*atyis[i] } valid <- zis.l > zlims[1] & zis.u > zlims[1] & zis.l < zlims[2] & zis.u < zlims[2] if (any(valid)) segments(xis.l[valid], zis.l[valid], xis.u[valid], (xis.u*atyis)[valid], ...) ### add y-axis labels if (is.null(aty)) { atyis <- seq(min(yi), max(yi), length.out=steps) atyis.lab <- seq(min(yi.c), max(yi.c), length.out=steps) } else { atyis <- aty atyis.lab <- aty.c } len <- ya.xpos+.02*(xlims[2]-xlims[1]) xis <- rep(NA_real_,length(atyis)) zis <- rep(NA_real_,length(atyis)) for (i in seq_len(length(atyis))) { xis[i] <- sqrt(len^2/(1+(atyis[i]/asp.rat)^2)) zis[i] <- xis[i]*atyis[i] } if (is.function(transf)) { if (is.null(targs)) { atyis.lab <- sapply(atyis.lab, transf) } else { atyis.lab <- sapply(atyis.lab, transf, targs) } } valid <- zis > zlims[1] & zis < zlims[2] if (any(valid)) text(xis[valid], zis[valid], .fcf(atyis.lab[valid], digits), pos=4, cex=cex, ...) ### add CI arc atyis <- seq(ci.lb, ci.ub, length.out=arc.res[2]) len <- ci.xpos xis <- rep(NA_real_,length(atyis)) zis <- rep(NA_real_,length(atyis)) for (i in seq_len(length(atyis))) { xis[i] <- sqrt(len^2/(1+(atyis[i]/asp.rat)^2)) zis[i] <- xis[i]*atyis[i] } valid <- zis > zlims[1] & zis < zlims[2] if (any(valid)) lines(xis[valid], zis[valid], ...) ### add CI tick marks atyis <- c(ci.lb, beta, ci.ub) len.l <- ci.xpos-.007*(xlims[2]-xlims[1]) len.u <- ci.xpos+.007*(xlims[2]-xlims[1]) xis.l <- rep(NA_real_,3) zis.l <- rep(NA_real_,3) xis.u <- rep(NA_real_,3) zis.u <- rep(NA_real_,3) for (i in seq_len(length(atyis))) { xis.l[i] <- sqrt(len.l^2/(1+(atyis[i]/asp.rat)^2)) zis.l[i] <- xis.l[i]*atyis[i] xis.u[i] <- sqrt(len.u^2/(1+(atyis[i]/asp.rat)^2)) zis.u[i] <- xis.u[i]*atyis[i] } valid <- zis.l > zlims[1] & zis.u > zlims[1] & zis.l < zlims[2] & zis.u < zlims[2] if (any(valid)) segments(xis.l[valid], zis.l[valid], xis.u[valid], (xis.u*atyis)[valid], ...) par(xpd=par.xpd) ######################################################################### ### add points to the plot points(x=xi, y=zi, pch=pch, cex=cex, ...) if (is.null(x$not.na.yivi)) { invisible(data.frame(x=xi, y=zi, ids=x$ids[x$not.na], slab=x$slab[x$not.na], stringsAsFactors=FALSE)) } else { invisible(data.frame(x=xi, y=zi, ids=x$ids[x$not.na.yivi], slab=x$slab[x$not.na.yivi], stringsAsFactors=FALSE)) } } metafor/R/plot.rma.uni.r0000644000176200001440000000777114054725405014643 0ustar liggesusersplot.rma.uni <- function(x, qqplot=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) par.mfrow <- par("mfrow") par(mfrow=c(2,2)) on.exit(par(mfrow = par.mfrow), add=TRUE) ######################################################################### if (x$int.only) { ###################################################################### forest(x, ...) title("Forest Plot", ...) ###################################################################### funnel(x, ...) title("Funnel Plot", ...) ###################################################################### radial(x, ...) title("Radial Plot", ...) ###################################################################### if (qqplot) { qqnorm(x, ...) } else { options(na.action = "na.pass") z <- rstandard(x)$z options(na.action = na.act) not.na <- !is.na(z) if (na.act == "na.omit") { z <- z[not.na] ids <- x$ids[not.na] not.na <- not.na[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") ids <- x$ids k <- length(z) plot(NA, NA, xlim=c(1,k), ylim=c(min(z, -2, na.rm=TRUE), max(z, 2, na.rm=TRUE)), xaxt="n", xlab="Study", ylab="", bty="l", ...) lines(seq_len(k)[not.na], z[not.na], col="lightgray", ...) lines(seq_len(k), z, ...) points(x=seq_len(k), y=z, pch=21, bg="black", ...) axis(side=1, at=seq_len(k), labels=ids, ...) abline(h=0, lty="dashed", ...) abline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) title("Standardized Residuals", ...) } } else { ###################################################################### forest(x, ...) title("Forest Plot", ...) ###################################################################### funnel(x, ...) title("Residual Funnel Plot", ...) ###################################################################### options(na.action = "na.pass") z <- rstandard(x)$z pred <- fitted(x) options(na.action = na.act) plot(pred, z, ylim=c(min(z, -2, na.rm=TRUE), max(z, 2, na.rm=TRUE)), pch=19, bty="l", xlab="Fitted Value", ylab="Standardized Residual", ...) abline(h=0, lty="dashed", ...) abline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) title("Fitted vs. Standardized Residuals", ...) ###################################################################### if (qqplot) { qqnorm(x, ...) } else { options(na.action = "na.pass") z <- rstandard(x)$z options(na.action = na.act) not.na <- !is.na(z) if (na.act == "na.omit") { z <- z[not.na] ids <- x$ids[not.na] not.na <- not.na[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") ids <- x$ids k <- length(z) plot(NA, NA, xlim=c(1,k), ylim=c(min(z, -2, na.rm=TRUE), max(z, 2, na.rm=TRUE)), xaxt="n", xlab="Study", ylab="", bty="l", ...) lines(seq_len(k)[not.na], z[not.na], col="lightgray", ...) lines(seq_len(k), z, ...) points(x=seq_len(k), y=z, pch=21, bg="black", ...) axis(side=1, at=seq_len(k), labels=ids, ...) abline(h=0, lty="dashed", ...) abline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) title("Standardized Residuals", ...) } ###################################################################### } invisible() } metafor/R/ranef.rma.mv.r0000644000176200001440000002273714050233377014604 0ustar liggesusersranef.rma.mv <- function(object, level, digits, transf, targs, verbose=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.mv") x <- object na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL expand <- FALSE # TODO: make this an option? level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) if (x$test != "t") crit <- qnorm(level/2, lower.tail=FALSE) ### TODO: check computations for user-defined weights if (!is.null(x$W)) stop(mstyle$stop("Extraction of random effects not available for models with non-standard weights.")) ######################################################################### out <- NULL if (verbose) message(mstyle$message("\nComputing inverse marginal var-cov and hat matrix ... "), appendLF = FALSE) ### compute inverse marginal var-cov and hat matrix W <- chol2inv(chol(x$M)) stXWX <- chol2inv(chol(as.matrix(t(x$X) %*% W %*% x$X))) Hmat <- x$X %*% stXWX %*% crossprod(x$X,W) if (verbose) message(mstyle$message("Done!")) ### compute residuals ei <- c(x$yi - x$X %*% x$beta) ### use this instead of resid(), since this guarantees that the length is correct ### create identity matrix if (x$sparse) { I <- Diagonal(x$k) } else { I <- diag(x$k) } if (x$withS) { # u^ = DZ'W(y - Xb) = DZ'We, where W = M^-1 # note: vpred = var(u^ - u) out <- vector(mode="list", length=x$sigma2s) names(out) <- x$s.names for (j in seq_len(x$sigma2s)) { if (verbose) message(mstyle$message(paste0("Computing BLUPs for '", paste0("~ 1 | ", x$s.names[j]), "' term ... ")), appendLF = FALSE) if (x$Rfix[j]) { if (x$sparse) { D <- x$sigma2[j] * Matrix(x$R[[j]], sparse=TRUE) } else { D <- x$sigma2[j] * x$R[[j]] } } else { if (x$sparse) { D <- x$sigma2[j] * Diagonal(x$s.nlevels[j]) } else { D <- x$sigma2[j] * diag(x$s.nlevels[j]) } } DZtW <- D %*% t(x$Z.S[[j]]) %*% W pred <- as.vector(DZtW %*% cbind(ei)) pred[abs(pred) < 100 * .Machine$double.eps] <- 0 #vpred <- D - (DZtW %*% x$Z.S[[j]] %*% D - DZtW %*% x$X %*% stXWX %*% t(x$X) %*% W %*% x$Z.S[[j]] %*% D) vpred <- D - (DZtW %*% (I - Hmat) %*% x$Z.S[[j]] %*% D) # this one is the same as ranef.rma.uni() for standard RE/ME models #vpred <- DZtW %*% (I - Hmat) %*% x$Z.S[[j]] %*% D # = var(u^) #vpred <- D - (DZtW %*% x$Z.S[[j]] %*% D) # same as lme4::ranef() #vpred <- DZtW %*% x$Z.S[[j]] %*% D if (x$test == "t") { ddf <- .ddf.calc(x$dfs, k=x$k, p=x$p, mf.s=x$mf.s[[j]], beta=FALSE) crit <- qt(level/2, df=ddf, lower.tail=FALSE) } se <- sqrt(diag(vpred)) pi.lb <- c(pred - crit * se) pi.ub <- c(pred + crit * se) pred <- data.frame(intrcpt=pred, se=se, pi.lb=pi.lb, pi.ub=pi.ub) if (na.act == "na.omit") { rownames(pred) <- x$s.levels[[j]] out[[j]] <- pred } if (na.act == "na.exclude" || na.act == "na.pass") { ### determine which levels were removed s.levels.r <- !is.element(x$s.levels.f[[j]], x$s.levels[[j]]) NAs <- rep(NA, x$s.nlevels.f[j]) tmp <- data.frame(intrcpt=NAs, se=NAs, pi.lb=NAs, pi.ub=NAs) tmp[!s.levels.r,] <- pred pred <- tmp rownames(pred) <- x$s.levels.f[[j]] out[[j]] <- pred } if (expand) { rows <- c(x$Z.S[[j]] %*% seq_along(x$s.levels[[j]])) pred <- pred[rows,] rnames <- x$s.levels[[j]][rows] rownames(pred) <- .make.unique(x$s.levels[[j]][rows]) out[[j]] <- pred } if (verbose) message(mstyle$message("Done!")) } } if (x$withG) { if (is.element(x$struct[1], c("GEN","GDIAG"))) { if (verbose) message(mstyle$message("Computation of BLUPs not currently available for struct=\"GEN\".")) } else { if (verbose) message(mstyle$message(paste0("Computing BLUPs for '", paste(x$g.names, collapse=" | "), "' term ... ")), appendLF = FALSE) G <- (x$Z.G1 %*% x$G %*% t(x$Z.G1)) * tcrossprod(x$Z.G2) GW <- G %*% W pred <- as.vector(GW %*% cbind(ei)) pred[abs(pred) < 100 * .Machine$double.eps] <- 0 #vpred <- G - (GW %*% G - GW %*% x$X %*% stXWX %*% t(x$X) %*% W %*% G) vpred <- G - (GW %*% (I - Hmat) %*% G) if (x$test == "t") { ddf <- .ddf.calc(x$dfs, k=x$k, p=x$p, mf.g=x$mf.g[[2]], beta=FALSE) crit <- qt(level/2, df=ddf, lower.tail=FALSE) } se <- sqrt(diag(vpred)) pi.lb <- c(pred - crit * se) pi.ub <- c(pred + crit * se) pred <- data.frame(intrcpt=pred, se=se, pi.lb=pi.lb, pi.ub=pi.ub) nvars <- ncol(x$mf.g) if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { r.names <- paste(formatC(x$ids[x$not.na], format="f", digits=0, width=max(nchar(x$ids[x$not.na]))), x$mf.g[[nvars]], sep=" | ") } else { #r.names <- paste(x$mf.g[[1]], x$mf.g[[2]], sep=" | ") r.names <- paste(sprintf(paste0("%", max(nchar(paste(x$mf.g[[1]]))), "s", collapse=""), x$mf.g[[1]]), x$mf.g[[nvars]], sep=" | ") } is.dup <- duplicated(r.names) pred <- pred[!is.dup,] rownames(pred) <- r.names[!is.dup] if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { #r.order <- order(x$mf.g[[nvars]][!is.dup], seq_len(x$k)[!is.dup]) r.order <- seq_len(x$k) } else { r.order <- order(x$mf.g[[2]][!is.dup], x$mf.g[[1]][!is.dup]) } pred <- pred[r.order,] out <- c(out, list(pred)) #names(out)[length(out)] <- paste(x$g.names, collapse=" | ") names(out)[length(out)] <- paste0(x$formulas[[1]], collapse="") if (verbose) message(mstyle$message("Done!")) } } if (x$withH) { if (is.element(x$struct[2], c("GEN","GDIAG"))) { if (verbose) message(mstyle$message("Computation of BLUPs not currently available for struct=\"GEN\".")) } else { if (verbose) message(mstyle$message(paste0("Computing BLUPs for '", paste(x$h.names, collapse=" | "), "' term ... ")), appendLF = FALSE) H <- (x$Z.H1 %*% x$H %*% t(x$Z.H1)) * tcrossprod(x$Z.H2) HW <- H %*% W pred <- as.vector(HW %*% cbind(ei)) pred[abs(pred) < 100 * .Machine$double.eps] <- 0 #vpred <- H - (HW %*% H - HW %*% x$X %*% stXWX %*% t(x$X) %*% W %*% H) vpred <- H - (HW %*% (I - Hmat) %*% H) if (x$test == "t") { ddf <- .ddf.calc(x$dfs, k=x$k, p=x$p, mf.h=x$mf.h[[2]], beta=FALSE) crit <- qt(level/2, df=ddf, lower.tail=FALSE) } se <- sqrt(diag(vpred)) pi.lb <- c(pred - crit * se) pi.ub <- c(pred + crit * se) pred <- data.frame(intrcpt=pred, se=se, pi.lb=pi.lb, pi.ub=pi.ub) nvars <- ncol(x$mf.h) if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { r.names <- paste(formatC(x$ids[x$not.na], format="f", digits=0, width=max(nchar(x$ids[x$not.na]))), x$mf.h[[nvars]], sep=" | ") } else { #r.names <- paste(x$mf.h[[1]], x$mf.h[[2]], sep=" | ") r.names <- paste(sprintf(paste0("%", max(nchar(paste(x$mf.h[[1]]))), "s", collapse=""), x$mf.h[[1]]), x$mf.h[[nvars]], sep=" | ") } is.dup <- duplicated(r.names) pred <- pred[!is.dup,] rownames(pred) <- r.names[!is.dup] if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYBM","PHYPL","PHYPD","GEN","GDIAG"))) { #r.order <- order(x$mf.h[[nvars]][!is.dup], seq_len(x$k)[!is.dup]) r.order <- seq_len(x$k) } else { r.order <- order(x$mf.h[[2]][!is.dup], x$mf.h[[1]][!is.dup]) } pred <- pred[r.order,] out <- c(out, list(pred)) #names(out)[length(out)] <- paste(x$h.names, collapse=" | ") names(out)[length(out)] <- paste0(x$formulas[[2]], collapse="") if (verbose) message(mstyle$message("Done!")) } } if (verbose) cat("\n") ######################################################################### ### if requested, apply transformation function if (is.function(transf)) { if (is.null(targs)) { out <- lapply(out, transf) } else { out <- lapply(out, transf, targs) } out <- lapply(out, function(x) x[,-2,drop=FALSE]) transf <- TRUE } ### make sure order of intervals is always increasing #tmp <- .psort(pi.lb, pi.ub) #pi.lb <- tmp[,1] #pi.ub <- tmp[,2] ######################################################################### if (is.null(out)) { return() } else { return(out) } } metafor/R/rstudent.rma.peto.r0000644000176200001440000000440413770377226015707 0ustar liggesusersrstudent.rma.peto <- function(model, digits, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.peto") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### delpred <- rep(NA_real_, x$k.f) vdelpred <- rep(NA_real_, x$k.f) ### note: skipping NA tables if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next res <- try(suppressWarnings(rma.peto(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, add=x$add, to=x$to, drop00=x$drop00, level=x$level, subset=-i)), silent=TRUE) if (inherits(res, "try-error")) next delpred[i] <- res$beta vdelpred[i] <- res$vb } if (progbar) pbapply::closepb(pbar) resid <- x$yi.f - delpred resid[abs(resid) < 100 * .Machine$double.eps] <- 0 #resid[abs(resid) < 100 * .Machine$double.eps * median(abs(resid), na.rm=TRUE)] <- 0 ### see lm.influence seresid <- sqrt(x$vi.f + vdelpred) stresid <- resid / seresid ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na.yivi], se=seresid[x$not.na.yivi], z=stresid[x$not.na.yivi]) out$slab <- x$slab[x$not.na.yivi] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na.yivi)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "list.rma" return(out) } metafor/R/replmiss.r0000644000176200001440000000113013634631621014131 0ustar liggesusersreplmiss <- function(x, y) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (length(y) == 0L) y <- NA ### catch cases where x is of length 0 if (length(x) == 0L) return(x) ### in case user specifies a constant to use for replacement if (length(y) == 1L) y <- rep(y, length(x)) ### check that x and y are of the same length if (length(x) != length(y)) stop(mstyle$stop("Length of 'x' and 'y' is not the same.")) #x <- ifelse(is.na(x), y, x) # this is quite a bit slower than the following x[is.na(x)] <- y[is.na(x)] return(x) } metafor/R/plot.profile.rma.r0000644000176200001440000000517414036272247015504 0ustar liggesusersplot.profile.rma <- function(x, xlim, ylim, pch=19, xlab, ylab, main, cline=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="profile.rma") if (dev.cur() == 1) { par(mfrow=c(x$comps, 1)) #on.exit(par(mfrow=c(1,1))) } missing.xlim <- missing(xlim) missing.ylim <- missing(ylim) missing.xlab <- missing(xlab) missing.ylab <- missing(ylab) missing.main <- missing(main) ### filter out some arguments for the plot() function lplot <- function(..., time, LB, startmethod, sub1) plot(...) ######################################################################### if (x$comps == 1) { if (missing.xlim) xlim <- x$xlim if (missing.ylim) ylim <- x$ylim if (missing.xlab) xlab <- x$xlab if (missing.ylab) ylab <- paste(ifelse(x$method=="REML", "Restricted ", ""), "Log-Likelihood", sep="") if (missing.main) main <- x$title if (min(x[[1]]) <= x$vc && max(x[[1]]) >= x$vc) { pos <- which(x[[1]] >= x$vc)[1] x[[1]] <- c(x[[1]][seq_len(pos-1)], x$vc, x[[1]][pos:length(x[[1]])]) x[[2]] <- c(x[[2]][seq_len(pos-1)], x$maxll, x[[2]][pos:length(x[[2]])]) } lplot(x[[1]], x[[2]], type="o", xlab=xlab, ylab=ylab, main=main, bty="l", pch=pch, xlim=xlim, ylim=ylim, ...) abline(v=x$vc, lty="dotted") abline(h=x$maxll, lty="dotted") if (cline) abline(h=x$maxll - qchisq(0.95, df=1)/2, lty="dotted") } else { for (j in seq_len(x$comps)) { if (missing.xlim) xlim <- x[[j]]$xlim if (missing.ylim) ylim <- x[[j]]$ylim if (missing.xlab) { xlab <- x[[j]]$xlab } else { if (length(xlab) == 1L) { xlab <- rep(xlab, x$comps) } } if (missing.ylab) { ylab <- paste(ifelse(x[[j]]$method=="REML", "Restricted ", ""), "Log-Likelihood", sep="") } else { if (length(ylab) == 1L) { ylab <- rep(ylab, x$comps) } } if (missing.main) { main <- x[[j]]$title } else { if (length(main) == 1L) { main <- rep(main, x$comps) } } lplot(x[[j]], xlim=xlim, ylim=ylim, pch=pch, xlab=if (missing.xlab) xlab else xlab[j], ylab=if (missing.ylab) ylab else ylab[j], main=if (missing.main) main else main[j], cline=cline, ...) } } } metafor/R/blup.rma.uni.r0000644000176200001440000000626514036322704014617 0ustar liggesusersblup.rma.uni <- function(x, level, digits, transf, targs, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav="rma.uni.selmodel") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) if (is.element(x$test, c("knha","adhoc","t"))) { crit <- qt(level/2, df=x$ddf, lower.tail=FALSE) } else { crit <- qnorm(level/2, lower.tail=FALSE) } ### TODO: check computations for user-defined weights if (!is.null(x$weights) || !x$weighted) stop(mstyle$stop("Extraction of random effects not available for models with non-standard weights.")) ######################################################################### pred <- rep(NA_real_, x$k.f) vpred <- rep(NA_real_, x$k.f) ### see Appendix in: Raudenbush, S. W., & Bryk, A. S. (1985). Empirical ### Bayes meta-analysis. Journal of Educational Statistics, 10(2), 75-98 li <- x$tau2.f / (x$tau2.f + x$vi.f) for (i in seq_len(x$k.f)[x$not.na]) { ### note: skipping NA cases Xi <- matrix(x$X.f[i,], nrow=1) pred[i] <- li[i] * x$yi.f[i] + (1 - li[i]) * Xi %*% x$beta vpred[i] <- li[i] * x$vi.f[i] + (1 - li[i])^2 * Xi %*% tcrossprod(x$vb,Xi) } se <- sqrt(vpred) pi.lb <- pred - crit * se pi.ub <- pred + crit * se ######################################################################### ### if requested, apply transformation function to 'pred' and interval bounds if (is.function(transf)) { if (is.null(targs)) { pred <- sapply(pred, transf) se <- rep(NA,x$k.f) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) } else { pred <- sapply(pred, transf, targs) se <- rep(NA,x$k.f) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(pred=pred[x$not.na], se=se[x$not.na], pi.lb=pi.lb[x$not.na], pi.ub=pi.ub[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(pred=pred, se=se, pi.lb=pi.lb, pi.ub=pi.ub) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) ######################################################################### out$digits <- digits out$transf <- transf class(out) <- "list.rma" return(out) } metafor/R/leave1out.rma.uni.r0000644000176200001440000001014414046526731015560 0ustar liggesusersleave1out.rma.uni <- function(x, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!x$int.only) stop(mstyle$stop("Method only applicable for models without moderators.")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) tau2 <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases ### also: it is possible that model fitting fails, so that generates more NAs (these NAs will always be shown in output) if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next res <- try(suppressWarnings(rma.uni(x$yi.f, x$vi.f, weights=x$weights.f, intercept=TRUE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, subset=-i, skipr2=TRUE)), silent=TRUE) if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp tau2[i] <- res$tau2 I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[x$not.na], se=se[x$not.na], zval=zval[x$not.na], pval=pval[x$not.na], ci.lb=ci.lb[x$not.na], ci.ub=ci.ub[x$not.na], Q=QE[x$not.na], Qp=QEp[x$not.na], tau2=tau2[x$not.na], I2=I2[x$not.na], H2=H2[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, tau2=tau2, I2=I2, H2=H2) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) if (is.element(x$test, c("knha","adhoc","t"))) names(out)[3] <- "tval" ### remove tau2 for FE/EE/CE models if (is.element(x$method, c("FE","EE","CE"))) out <- out[-9] out$digits <- digits out$transf <- transf if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "list.rma" return(out) } metafor/R/permutest.r0000644000176200001440000000007013457322061014323 0ustar liggesuserspermutest <- function(x, ...) UseMethod("permutest") metafor/R/radial.r0000644000176200001440000000007713457322061013536 0ustar liggesusersradial <- galbraith <- function(x, ...) UseMethod("radial") metafor/R/misc.func.hidden.r0000644000176200001440000012160714052417722015425 0ustar liggesusers############################################################################ ### function to set default 'btt' value(s) or check specified 'btt' values .set.btt <- function(btt, p, int.incl, Xnames) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (missing(btt) || is.null(btt)) { if (p > 1L) { ### if the model matrix has more than one column if (int.incl) { btt <- seq.int(from=2, to=p) ### and the model has an intercept term, test all coefficients except the intercept } else { btt <- seq_len(p) ### and the model does not have an intercept term, test all coefficients } } else { btt <- 1 ### if the model matrix has a single column, test that single coefficient } } else { if (is.character(btt)) { btt <- grep(btt, Xnames) if (length(btt) == 0L) stop(mstyle$stop("Cannot identify coefficient(s) corresponding to the specified 'btt' string.")) } else { ### round, take unique values, and sort btt <- sort(unique(round(btt))) ### check for mix of positive and negative values if (any(btt < 0) && any(btt > 0)) stop(mstyle$stop("Cannot mix positive and negative 'btt' values.")) ### keep/remove from 1:p vector as specified btt <- seq_len(p)[btt] ### (1:5)[5:6] yields c(5, NA) so remove NAs if this happens btt <- btt[!is.na(btt)] ### make sure that at least one valid value is left if (length(btt) == 0L) stop(mstyle$stop("Non-existent coefficients specified via 'btt'.")) } } return(btt) } ### function to format 'btt' values for printing .format.btt <- function(btt) { sav <- c() if (length(btt) > 1L) { while (length(btt) > 0L) { x <- rle(diff(btt)) if (x$values[1] == 1 && length(x$values) != 0L) { sav <- c(sav, c(btt[1], ":", btt[x$lengths[1] + 1])) btt <- btt[-c(1:(x$lengths[1] + 1))] #sav <- c(sav, ", ") # this adds a space between multiple a:b sets sav <- c(sav, ",") } else { sav <- c(sav, btt[1], ",") btt <- btt[-1] } } sav <- paste0(sav[-length(sav)], collapse="") } else { sav <- paste0(btt) } return(sav) } ############################################################################ ### pairwise sorting of the elements of two vectors .psort <- function(x,y) { ### t(apply(xy, 1, sort)) would be okay, but problematic if there are NAs; ### either they are removed completely (na.last=NA) or they are always put ### first/last (na.last=FALSE/TRUE); but we just want to leave the NAs in ### their position! if (is.null(x) || length(x) == 0L) ### need to catch this return(NULL) if (missing(y)) { if (is.matrix(x)) { xy <- x } else { xy <- rbind(x) ### in case x is just a vector } } else { xy <- cbind(x,y) } n <- nrow(xy) for (i in seq_len(n)) { if (anyNA(xy[i,])) next xy[i,] <- sort(xy[i,]) } colnames(xy) <- NULL return(xy) } ############################################################################ ### function to obtain the trace of a matrix .tr <- function(X) return(sum(diag(X))) ### function to check if a matrix is square .is.square <- function(X) NROW(X) == NCOL(X) ### use NROW/NCOL to better deal with scalars; compare: ### (V <- list(matrix(1, nrow=2, ncol=2), 3, c(1,4), cbind(c(2,1)))); sapply(V, function(x) nrow(x) == ncol(x)); sapply(V, function(x) NROW(x) == NCOL(x)) ### function to test whether a vector is all equal to 1s (e.g., to find intercept(s) in a model matrix) .is.intercept <- function(x, eps=1e-08) return(all(abs(x - 1) < eps)) ### function to test whether a vector is a dummy variable (i.e., consists of only 0s and 1s) .is.dummy <- function(x, eps=1e-08) return(all(abs(x) < eps | abs(x - 1) < eps)) #return(all(sapply(x, identical, 0) | sapply(x, identical, 1))) ### function to test whether something is a vector (in the sense of being atomic, not a matrix, and not NULL) .is.vector <- function(x) is.atomic(x) && !is.matrix(x) && !is.null(x) ############################################################################ ### function to format p-values ### if showeq=FALSE, c(.001, .00001) becomes c("0.0010", "<.0001") ### if showeq=TRUE, c(.001, .00001) becomes c("=0.0010", "<.0001") ### if add0=FALSE, "<.0001"; if add0=TRUE, "<0.0001" .pval <- function(p, digits=4, showeq=FALSE, sep="", add0=FALSE) { digits <- max(digits, 1) cutoff <- paste(c(".", rep(0,digits-1),1), collapse="") ncutoff <- as.numeric(cutoff) ifelse(is.na(p), paste0(ifelse(showeq, "=", ""), sep, NA), ifelse(p >= ncutoff, paste0(ifelse(showeq, "=", ""), sep, formatC(p, digits=digits, format="f")), paste0("<", sep, ifelse(add0, "0", ""), cutoff))) } ### function to format/round values in general .fcf <- function(x, digits) { if (all(is.na(x))) { # since formatC(NA, format="f", digits=2) fails x } else { trimws(formatC(x, format="f", digits=digits)) } } ############################################################################ ### function to print a named (character) vector right aligned with ### a gap of two spaces between adjacent values and no padding .print.vector <- function(x) { if (is.null(names(x))) names(x) <- seq_along(x) len.n <- nchar(names(x)) len.x <- nchar(x, keepNA=FALSE) len.max <- pmax(len.n, len.x) format <- sapply(len.max, function(x) paste("%", x, "s", sep="")) row.n <- paste(sprintf(format, names(x)), collapse=" ") row.x <- paste(sprintf(format, x), collapse=" ") cat(row.n, "\n", row.x, "\n", sep="") } ############################################################################ ### function that prints the model fitting time .print.time <- function(x) { mstyle <- .get.mstyle("crayon" %in% .packages()) hours <- floor(x/60/60) minutes <- floor(x/60) - hours*60 seconds <- round(x - minutes*60 - hours*60*60, ifelse(x > 60, 0, 2)) cat("\n") cat(mstyle$message(paste("Processing time:", hours, ifelse(hours == 0 || hours > 1, "hours,", "hour,"), minutes, ifelse(minutes == 0 || minutes > 1, "minutes,", "minute,"), seconds, ifelse(x < 60 || seconds == 0 || seconds > 1, "seconds", "second")))) cat("\n") } ############################################################################ ### function like make.unique(), but starts at .1 for the first instance ### of a repeated element .make.unique <- function(x) { x <- as.character(x) ux <- unique(x) for (i in seq_along(ux)) { xiTF <- x == ux[i] xi <- x[xiTF] if (length(xi) == 1L) next x[xiTF] <- paste(xi, seq_along(xi), sep=".") } return(x) } ############################################################################ ### function to check if extra/superfluous arguments are specified via ... .chkdots <- function(ddd, okargs) { mstyle <- .get.mstyle("crayon" %in% .packages()) for (i in seq_along(okargs)) ddd[okargs[i]] <- NULL if (length(ddd) > 0L) warning(mstyle$warning(paste0("Extra argument", ifelse(length(ddd) > 1L, "s ", " "), "(", paste0("'", names(ddd), "'", collapse=", "), ") disregarded.")), call.=FALSE) } ############################################################################ .chkclass <- function(class, must, notap, notav, type="Method") { mstyle <- .get.mstyle("crayon" %in% .packages()) obj <- as.character(match.call()[2]) obj <- substr(obj, 7, nchar(obj)-1) if (!missing(must) && !is.element(must, class)) stop(mstyle$stop(paste0("Argument '", obj, "' must be an object of class \"", must, "\".")), call.=FALSE) if (!missing(notap) && any(is.element(notap, class))) stop(mstyle$stop(paste0(type, " not applicable to objects of class \"", class[1], "\".")), call.=FALSE) #stop(mstyle$stop(paste0("Method not applicable to objects of class \"", paste0(class, collapse=", "), "\".")), call.=FALSE) if (!missing(notav) && any(is.element(notav, class))) stop(mstyle$stop(paste0(type, " not available for objects of class \"", class[1], "\".")), call.=FALSE) #stop(mstyle$stop(paste0("Method not available for objects of class \"", paste0(class, collapse=", "), "\".")), call.=FALSE) } ############################################################################ ### set axis label (for forest, funnel, and labbe functions) .setlab <- function(measure, transf.char, atransf.char, gentype, short=FALSE) { if (gentype == 1) lab <- "Observed Outcome" if (gentype == 2) lab <- "Overall Estimate" # for forest.cumul.rma() function if (gentype == 3) lab <- "Estimate" # for header ######################################################################### if (!is.null(measure)) { ###################################################################### if (is.element(measure, c("RR","MPRR"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[RR]", "Log Risk Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Risk Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Risk Ratio", "Risk Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Risk Ratio", "Risk Ratio") } } if (is.element(measure, c("OR","PETO","D2OR","D2ORN","D2ORL","MPOR","MPORC","MPPETO"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[OR]", "Log Odds Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Odds Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Odds Ratio", "Odds Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Odds Ratio", "Odds Ratio") } } if (is.element(measure, c("RD","MPRD"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Risk Difference", "Risk Difference") } else { lab <- ifelse(short, lab, "Transformed Risk Difference") } } if (measure == "AS") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Arcsine RD", "Arcsine Transformed Risk Difference") } else { lab <- ifelse(short, lab, "Transformed Arcsine Transformed Risk Difference") } } if (measure == "PHI") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Phi", "Phi Coefficient") } else { lab <- ifelse(short, lab, "Transformed Phi Coefficient") } } if (measure == "YUQ") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Yule's Q", "Yule's Q") } else { lab <- ifelse(short, lab, "Transformed Yule's Q") } } if (measure == "YUY") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Yule's Y", "Yule's Y") } else { lab <- ifelse(short, lab, "Transformed Yule's Y") } } ###################################################################### if (measure == "IRR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[IRR]", "Log Incidence Rate Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Incidence Rate Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Rate Ratio", "Incidence Rate Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Rate Ratio", "Incidence Rate Ratio") } } if (measure == "IRD") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "IRD", "Incidence Rate Difference") } else { lab <- ifelse(short, lab, "Transformed Incidence Rate Difference") } } if (measure == "IRSD") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "IRSD", "Square Root Transformed Incidence Rate Difference") } else { lab <- ifelse(short, lab, "Transformed Square Root Transformed Incidence Rate Difference") } } ###################################################################### if (measure == "MD") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "MD", "Mean Difference") } else { lab <- ifelse(short, lab, "Transformed Mean Difference") } } if (is.element(measure, c("SMD","SMDH","PBIT","OR2D","OR2DN","OR2DL","SMD1"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "SMD", "Standardized Mean Difference") } else { lab <- ifelse(short, lab, "Transformed Standardized Mean Difference") } } if (measure == "ROM") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[RoM]", "Log Ratio of Means") } else { lab <- ifelse(short, lab, "Transformed Log Ratio of Means") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Ratio of Means", "Ratio of Means (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Ratio of Means", "Ratio of Means") } } if (measure == "RPB") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Correlation", "Point-Biserial Correlation") } else { lab <- ifelse(short, lab, "Transformed Point-Biserial Correlation") } } if (measure == "CVR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[CVR]", "Log Coefficient of Variation Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Coefficient of Variation Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "CVR", "Coefficient of Variation Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "CVR", "Coefficient of Variation Ratio") } } if (measure == "VR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[VR]", "Log Variability Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Variability Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "VR", "Variability Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "VR", "Variability Ratio") } } ###################################################################### if (is.element(measure, c("COR","UCOR","RTET","RBIS"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Correlation", "Correlation Coefficient") } else { lab <- ifelse(short, lab, "Transformed Correlation Coefficient") } } if (measure == "ZCOR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, expression('Fisher\'s ' * z[r]), "Fisher's z Transformed Correlation Coefficient") } else { lab <- ifelse(short, lab, "Transformed Fisher's z Transformed Correlation Coefficient") if (atransf.char == "transf.ztor" || atransf.char == "transf.ztor.int") lab <- ifelse(short, "Correlation", "Correlation Coefficient") if (transf.char == "transf.ztor" || transf.char == "transf.ztor.int") lab <- ifelse(short, "Correlation", "Correlation Coefficient") } } ###################################################################### if (measure == "PCOR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Correlation", "Partial Correlation Coefficient") } else { lab <- ifelse(short, lab, "Transformed Partial Correlation Coefficient") } } if (measure == "ZPCOR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, expression('Fisher\'s ' * z[r]), "Fisher's z Transformed Partial Correlation Coefficient") } else { lab <- ifelse(short, lab, "Transformed Fisher's z Transformed Partial Correlation Coefficient") if (atransf.char == "transf.ztor" || atransf.char == "transf.ztor.int") lab <- ifelse(short, "Correlation", "Partial Correlation Coefficient") if (transf.char == "transf.ztor" || transf.char == "transf.ztor.int") lab <- ifelse(short, "Correlation", "Partial Correlation Coefficient") } } if (measure == "SPCOR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Correlation", "Semi-Partial Correlation Coefficient") } else { lab <- ifelse(short, lab, "Transformed Semi-Partial Correlation Coefficient") } } ###################################################################### if (measure == "PR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Proportion", "Proportion") } else { lab <- ifelse(short, lab, "Transformed Proportion") } } if (measure == "PLN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[Pr]", "Log Proportion") } else { lab <- ifelse(short, lab, "Transformed Log Proportion") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Proportion", "Proportion (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Proportion", "Proportion") } } if (measure == "PLO") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[Odds]", "Log Odds") } else { lab <- ifelse(short, lab, "Transformed Log Odds") if (atransf.char == "transf.ilogit" || atransf.char == "transf.ilogit.int" || atransf.char == "plogis") lab <- ifelse(short, "Proportion", "Proportion (logit scale)") if (transf.char == "transf.ilogit" || transf.char == "transf.ilogit.int" || transf.char == "plogis") lab <- ifelse(short, "Proportion", "Proportion") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Odds", "Odds (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Odds", "Odds") } } if (measure == "PAS") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, expression(arcsin(sqrt(p))), "Arcsine Transformed Proportion") } else { lab <- ifelse(short, lab, "Transformed Arcsine Transformed Proportion") if (atransf.char == "transf.iarcsin" || atransf.char == "transf.iarcsin.int") lab <- ifelse(short, "Proportion", "Proportion (arcsine scale)") if (transf.char == "transf.iarcsin" || transf.char == "transf.iarcsin.int") lab <- ifelse(short, "Proportion", "Proportion") } } if (measure == "PFT") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "PFT", "Double Arcsine Transformed Proportion") } else { lab <- ifelse(short, lab, "Transformed Double Arcsine Transformed Proportion") if (atransf.char == "transf.ipft.hm") lab <- ifelse(short, "Proportion", "Proportion") if (transf.char == "transf.ipft.hm") lab <- ifelse(short, "Proportion", "Proportion") } } ###################################################################### if (measure == "IR") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Rate", "Incidence Rate") } else { lab <- ifelse(short, lab, "Transformed Incidence Rate") } } if (measure == "IRLN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[IR]", "Log Incidence Rate") } else { lab <- ifelse(short, lab, "Transformed Log Incidence Rate") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Rate", "Incidence Rate (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Rate", "Incidence Rate") } } if (measure == "IRS") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Sqrt[IR]", "Square Root Transformed Incidence Rate") } else { lab <- ifelse(short, lab, "Transformed Square Root Transformed Incidence Rate") if (atransf.char == "transf.isqrt" || atransf.char == "transf.isqrt.int") lab <- ifelse(short, "Rate", "Incidence Rate (square root scale)") if (transf.char == "transf.isqrt" || transf.char == "transf.isqrt.int") lab <- ifelse(short, "Rate", "Incidence Rate") } } if (measure == "IRFT") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "IRFT", "Freeman-Tukey Transformed Incidence Rate") } else { lab <- ifelse(short, lab, "Transformed Freeman-Tukey Transformed Incidence Rate") } } ###################################################################### if (measure == "MN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Mean", "Mean") } else { lab <- ifelse(short, lab, "Transformed Mean") } } if (measure == "MNLN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[Mean]", "Log Mean") } else { lab <- ifelse(short, lab, "Transformed Log Mean") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Mean", "Mean (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Mean", "Mean") } } if (measure == "CVLN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[CV]", "Log Coefficient of Variation") } else { lab <- ifelse(short, lab, "Transformed Log Coefficient of Variation") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "CV", "Coefficient of Variation (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "CV", "Coefficient of Variation") } } if (measure == "SDLN") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[SD]", "Log Standard Deviation") } else { lab <- ifelse(short, lab, "Transformed Log Standard Deviation") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "SD", "Standard Deviation (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "SD", "Standard Deviation") } } ###################################################################### if (measure == "MC") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Mean Change", "Mean Change") } else { lab <- ifelse(short, lab, "Transformed Mean Change") } } if (is.element(measure, c("SMCC","SMCR","SMCRH"))) { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "SMC", "Standardized Mean Change") } else { lab <- ifelse(short, lab, "Transformed Standardized Mean Change") } } if (measure == "ROMC") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[RoM]", "Log Ratio of Means") } else { lab <- ifelse(short, lab, "Transformed Log Ratio of Means") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "Ratio of Means", "Ratio of Means (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "Ratio of Means", "Ratio of Means") } } if (measure == "CVRC") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[CVR]", "Log Coefficient of Variation Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Coefficient of Variation Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "CVR", "Coefficient of Variation Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "CVR", "Coefficient of Variation Ratio") } } if (measure == "VRC") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Log[VR]", "Log Variability Ratio") } else { lab <- ifelse(short, lab, "Transformed Log Variability Ratio") if (atransf.char == "exp" || atransf.char == "transf.exp.int") lab <- ifelse(short, "VR", "Variability Ratio (log scale)") if (transf.char == "exp" || transf.char == "transf.exp.int") lab <- ifelse(short, "VR", "Variability Ratio") } } ###################################################################### if (measure == "ARAW") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, "Alpha", "Cronbach's alpha") } else { lab <- ifelse(short, lab, "Transformed Cronbach's alpha") } } if (measure == "AHW") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, expression('Alpha'[HW]), "Transformed Cronbach's alpha") } else { lab <- ifelse(short, lab, "Transformed Cronbach's alpha") if (atransf.char == "transf.iahw") lab <- ifelse(short, "Alpha", "Cronbach's alpha") if (transf.char == "transf.iahw") lab <- ifelse(short, "Alpha", "Cronbach's alpha") } } if (measure == "ABT") { if (transf.char == "FALSE" && atransf.char == "FALSE") { lab <- ifelse(short, expression('Alpha'[B]), "Transformed Cronbach's alpha") } else { lab <- ifelse(short, lab, "Transformed Cronbach's alpha") if (atransf.char == "transf.iabt") lab <- ifelse(short, "Alpha", "Cronbach's alpha") if (transf.char == "transf.iabt") lab <- ifelse(short, "Alpha", "Cronbach's alpha") } } ###################################################################### } return(lab) } ############################################################################ ### stuff related to colored/styled output .get.mstyle <- function(withcrayon) { if (withcrayon) { if (exists(".mstyle")) { .mstyle <- get(".mstyle") if (!is.list(.mstyle)) .mstyle <- list(.mstyle) } else { .mstyle <- list() } if (is.null(.mstyle$section)) { section <- crayon::bold } else { section <- .mstyle$section } if (is.null(.mstyle$header)) { header <- crayon::underline } else { header <- .mstyle$header } if (is.null(.mstyle$body)) { body <- crayon::reset } else { body <- .mstyle$body } if (is.null(.mstyle$text)) { text <- crayon::reset } else { text <- .mstyle$text } if (is.null(.mstyle$result)) { result <- crayon::reset } else { result <- .mstyle$result } if (is.null(.mstyle$stop)) { stop <- crayon::combine_styles(crayon::red, crayon::bold) } else { stop <- .mstyle$stop } if (is.null(.mstyle$warning)) { warning <- crayon::yellow } else { warning <- .mstyle$warning } if (is.null(.mstyle$message)) { message <- crayon::green } else { message <- .mstyle$message } if (is.null(.mstyle$verbose)) { verbose <- crayon::cyan } else { verbose <- .mstyle$verbose } if (is.null(.mstyle$legend)) { legend <- crayon::silver } else { legend <- .mstyle$legend } } else { tmp <- function(...) paste0(...) section <- tmp header <- tmp body <- tmp text <- tmp result <- tmp stop <- tmp warning <- tmp message <- tmp verbose <- tmp legend <- tmp } return(list(section=section, header=header, body=body, text=text, result=result, stop=stop, warning=warning, message=message, verbose=verbose, legend=legend)) } .print.output <- function(x, mstyle) { if (missing(mstyle)) { for (i in seq_along(x)) { cat(x[i], "\n") } } else { for (i in seq_along(x)) { cat(mstyle(x[i]), "\n") } } } .print.table <- function(x, mstyle) { is.header <- !grepl(" [-0-9]", x) for (i in seq_along(x)) { if (is.header[i]) { x[i] <- trimws(x[i], which="right") x[i] <- mstyle$header(x[i]) } else { x[i] <- mstyle$body(x[i]) } cat(x[i], "\n") } } #.set.mstyle.1 <- parse(text=".mstyle <- list(section=make_style(\"gray90\")$bold, header=make_style(\"skyblue1\")$bold$underline, body=make_style(\"skyblue2\"), text=make_style(\"slateblue3\"), result=make_style(\"slateblue1\"))") #eval(metafor:::.set.mstyle.1) ############################################################################ .set.digits <- function(digits, dmiss) { res <- c(est=4, se=4, test=4, pval=4, ci=4, var=4, sevar=4, fit=4, het=4) if (exists(".digits")) { .digits <- get(".digits") if (is.null(names(.digits)) && length(.digits) == 1L) { # if .digits is a single unnamed scalar, set all digit values to that value res <- c(est=.digits, se=.digits, test=.digits, pval=.digits, ci=.digits, var=.digits, sevar=.digits, fit=.digits, het=.digits) } else if (any(names(.digits) != "") && any(names(.digits) == "")) { # if .digits has (at least) one unnamed element, use it to set all unnamed elements to that digits value pos <- pmatch(names(.digits), names(res)) res[c(na.omit(pos))] <- .digits[!is.na(pos)] otherval <- .digits[names(.digits) == ""][1] res[(1:9)[-c(na.omit(pos))]] <- otherval } else { pos <- pmatch(names(.digits), names(res)) res[c(na.omit(pos))] <- .digits[!is.na(pos)] } } if (!dmiss) { if (is.null(names(digits))) { res <- c(est=digits[[1]], se=digits[[1]], test=digits[[1]], pval=digits[[1]], ci=digits[[1]], var=digits[[1]], sevar=digits[[1]], fit=digits[[1]], het=digits[[1]]) } else { pos <- pmatch(names(digits), names(res)) res[c(na.omit(pos))] <- digits[!is.na(pos)] } } ### p-values are always given to at least 2 digits if (res["pval"] <= 1) res["pval"] <- 2 res } .get.digits <- function(digits, xdigits, dmiss) { res <- xdigits if (exists(".digits")) { .digits <- get(".digits") pos <- pmatch(names(.digits), names(res)) res[c(na.omit(pos))] <- .digits[!is.na(pos)] } if (!dmiss) { if (is.null(names(digits))) { res <- c(est=digits[[1]], se=digits[[1]], test=digits[[1]], pval=digits[[1]], ci=digits[[1]], var=digits[[1]], sevar=digits[[1]], fit=digits[[1]], het=digits[[1]]) } else { pos <- pmatch(names(digits), names(res)) res[c(na.omit(pos))] <- digits[!is.na(pos)] } } ### so we can still print objects created with older metafor versions (where xdigit is just an unnamed scalar) if (length(res) == 1L && is.null(names(res))) res <- c(est=res[[1]], se=res[[1]], test=res[[1]], pval=res[[1]], ci=res[[1]], var=res[[1]], sevar=res[[1]], fit=res[[1]], het=res[[1]]) ### p-values are always given to at least 2 digits if (!is.null(res["pval"]) && res["pval"] <= 1) res["pval"] <- 2 res } ############################################################################ ### check if x is logical and TRUE/FALSE (NAs and NULL always evaluate as FALSE) .isTRUE <- function(x) !is.null(x) && is.logical(x) && !is.na(x) && x .isFALSE <- function(x) !is.null(x) && is.logical(x) && !is.na(x) && !x ############################################################################ ### to register getfit method for 'rma.uni' and 'rma.mv' objects: eval(metafor:::.glmulti) .glmulti <- parse(text=" if (!(\"glmulti\" %in% .packages())) stop(\"Must load the 'glmulti' package first to use this code.\") setOldClass(\"rma.uni\") setMethod(\"getfit\", \"rma.uni\", function(object, ...) { if (object$test==\"z\") { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=Inf) } else { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=object$k-object$p) } }) setOldClass(\"rma.mv\") setMethod(\"getfit\", \"rma.mv\", function(object, ...) { if (object$test==\"z\") { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=Inf) } else { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=object$k-object$p) } }) setOldClass(\"rma.glmm\") setMethod(\"getfit\", \"rma.glmm\", function(object, ...) { if (object$test==\"z\") { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=Inf) } else { cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=object$k-object$p) } }) ") ### helper functions to make MuMIn work together with metafor .MuMIn <- parse(text=" makeArgs.rma <- function (obj, termNames, comb, opt, ...) { ret <- MuMIn:::makeArgs.default(obj, termNames, comb, opt) names(ret)[1L] <- \"mods\" ret } coefTable.rma <- function (model, ...) { MuMIn:::.makeCoefTable(model$b, model$se, coefNames = rownames(model$b)) } ") ### helper functions to make mice work together with metafor (note: no longer ### needed, as there are glance and tidy methods for rma objects in broom now) .mice <- parse(text=" glance.rma <- function (x, ...) data.frame(df.residual=df.residual(x)) tidy.rma <- function (x, ...) { ret <- coef(summary(x)) colnames(ret)[2] <- \"std.error\" ret$term <- rownames(ret) return(ret) } ") ############################################################################ ### shorten a string vector so that elements remain distinguishable .shorten <- function(x, minlen) { y <- x x <- c(na.omit(x)) n <- length(unique(x)) maxlen <- max(nchar(unique(x))) for (l in 1:maxlen) { tab <- table(x, substr(x, 1, l)) if (nrow(tab) == n && ncol(tab) == n && sum(tab[upper.tri(tab)]) == 0 && sum(tab[lower.tri(tab)]) == 0) break } if (!missing(minlen) && l < minlen) { if (minlen > maxlen) minlen <- maxlen l <- minlen } return(substr(y, 1, l)) } ############################################################################ ### simplified version of what mvtnorm::rmvnorm() does .mvrnorm <- function(n, mu, Sigma) { p <- nrow(Sigma) eS <- eigen(Sigma, symmetric = TRUE) eval <- eS$values evec <- eS$vectors Y <- matrix(rnorm(p * n), nrow = n, byrow = TRUE) %*% t(evec %*% (t(evec) * sqrt(pmax(eval, 0)))) Y <- sweep(Y, 2, mu, "+") return(Y) } ############################################################################ .setnafalse <- function(x, arg="subset", k, stoponk0=TRUE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (is.logical(x)) { if (anyNA(x)) x[is.na(x)] <- FALSE } if (is.numeric(x)) { if (anyNA(x)) x <- x[!is.na(x)] x <- as.integer(round(x)) x <- x[x != 0L] if (any(x > 0L) && any(x < 0L)) stop(mstyle$stop(paste0("Cannot mix positive and negative values for subsetting.")), call.=FALSE) if (all(x > 0L)) x <- is.element(seq_len(k), x) if (all(x < 0L)) x <- !is.element(seq_len(k), abs(x)) } if (stoponk0 && !any(x)) stop(mstyle$stop(paste0("Stopped because k = 0 after subsetting.")), call.=FALSE) #if (anyNA(x)) { # if (is.logical(x)) # x[is.na(x)] <- FALSE # if (is.numeric(x)) # x <- x[!is.na(x)] # #warning(mstyle$warning(paste0("Missing values in '", arg, "' argument treated as non-selected.")), call.=FALSE) #} return(x) } ############################################################################ # function to compute a weighted mean (this one works a bit different than # stats:::weighted.mean.default) .wmean <- function (x, w, na.rm=FALSE) { if (na.rm) { i <- !(is.na(x) | is.na(w)) x <- x[i] w <- w[i] } sum(x*w) / sum(w) } ############################################################################ .tes.intfun <- function(x, theta, tau, sei, H0, alternative, crit) { if (alternative == "two.sided") pow <- (pnorm(crit, mean=(x-H0)/sei, 1, lower.tail=FALSE) + pnorm(-crit, mean=(x-H0)/sei, 1, lower.tail=TRUE)) if (alternative == "greater") pow <- pnorm(crit, mean=(x-H0)/sei, 1, lower.tail=FALSE) if (alternative == "less") pow <- pnorm(crit, mean=(x-H0)/sei, 1, lower.tail=TRUE) res <- pow * dnorm(x, theta, tau) return(res) } .tes.lim <- function(theta, yi, vi, H0, alternative, alpha, tau2, test, tes.alternative, progbar, tes.alpha, correct, rel.tol, subdivisions, tau2.lb) { pval <- tes.default(x=yi, vi=vi, H0=H0, alternative=alternative, alpha=alpha, theta=theta, tau2=tau2, test=test, tes.alternative=tes.alternative, progbar=progbar, tes.alpha=tes.alpha, correct=correct, rel.tol=rel.tol, subdivisions=subdivisions, tau2.lb=tau2.lb, find.lim=FALSE)$pval #cat("theta = ", theta, " pval = ", pval, "\n") return(pval - tes.alpha) } ############################################################################ .fsn.fisher <- function(fsnum, pi, alpha) { k <- length(pi) X2 <- -2*sum(log(c(pi, rep(0.5, fsnum)))) return(pchisq(X2, df=2*(k+fsnum), lower.tail=FALSE) - alpha) } .fsn.fitre <- function(yi, vi) { k <- length(yi) wi <- 1/vi sumwi <- sum(wi) est <- sum(wi*yi)/sumwi Q <- sum(wi * (yi - est)^2) tau2 <- max(0, (Q - (k-1)) / (sumwi - sum(wi^2)/sumwi)) wi <- 1 / (vi + tau2) sumwi <- sum(wi) est <- sum(wi*yi)/sumwi se <- sqrt(1 / sumwi) zval <- est / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) return(list(est=est, se=se, zval=zval, pval=pval, tau2=tau2)) } .fsn.fitnew <- function(new, yi, vi, vnew, tau2, alpha, iters) { new <- ceiling(new) mus <- rep(NA_real_, iters) pvals <- rep(NA_real_, iters) for (j in 1:iters) { yinew <- c(yi, rnorm(new, 0, sqrt(vnew+tau2))) vinew <- c(vi, rep(vnew, new)) tmp <- .fsn.fitre(yinew, vinew) mus[j] <- tmp$est pvals[j] <- tmp$pval } return(list(mean = mean(mus), rejrate = mean(pvals <= alpha))) } .fsn.re <- function(fsnum, yi, vi, vnew, tau2, target, alpha, iters, verbose=FALSE) { fsnum <- ceiling(fsnum) tmp <- .fsn.fitnew(fsnum, yi, vi, vnew, tau2, alpha, iters) est <- tmp$mean diff <- est - target if (verbose) cat("fsnum =", formatC(fsnum, width=4, format="d"), " est =", .fcf(est, 4), " target =", .fcf(target, 4), " diff =", formatC(diff, format="f", digits=4, flag=" "), "\n") return(diff) } ############################################################################ metafor/R/escalc.r0000644000176200001440000022235514043614060013534 0ustar liggesusersescalc <- function(measure, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, r2i, ni, yi, vi, sei, data, slab, subset, include, add=1/2, to="only0", drop00=FALSE, vtype="LS", var.names=c("yi","vi"), add.measure=FALSE, append=TRUE, replace=TRUE, digits, ...) { ### check argument specifications mstyle <- .get.mstyle("crayon" %in% .packages()) if (missing(measure) && missing(yi)) stop(mstyle$stop("Must specify an effect size or outcome measure via the 'measure' argument.")) if (!missing(yi) && missing(measure)) measure <- "GEN" if (!is.character(measure)) stop(mstyle$stop("The 'measure' argument must be a character string.")) if (!is.element(measure, c("RR","OR","PETO","RD","AS","PHI","YUQ","YUY","RTET", ### 2x2 table measures "PBIT","OR2D","OR2DN","OR2DL", ### - transformations to SMD "MPRD","MPRR","MPOR","MPORC","MPPETO", ### - measures for matched pairs / pre-post data "IRR","IRD","IRSD", ### two-group person-time data measures "MD","SMD","SMDH","ROM", ### two-group mean/SD measures "CVR","VR", ### coefficient of variation ratio, variability ratio "RPB","RBIS","D2OR","D2ORN","D2ORL", ### - transformations to r_PB, r_BIS, and log(OR) "COR","UCOR","ZCOR", ### correlations (raw and r-to-z transformed) "PCOR","ZPCOR","SPCOR", ### partial and semi-partial correlations "PR","PLN","PLO","PAS","PFT", ### single proportions (and transformations thereof) "IR","IRLN","IRS","IRFT", ### single-group person-time data (and transformations thereof) "MN","MNLN","CVLN","SDLN","SMD1", ### mean, log(mean), log(CV), log(SD), single-group SMD "MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC", ### raw/standardized mean change, log(ROM), CVR, and VR for dependent samples "ARAW","AHW","ABT", ### alpha (and transformations thereof) "GEN"))) stop(mstyle$stop("Unknown 'measure' specified.")) if (!is.element(to, c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) if (any(!is.element(vtype, c("UB","LS","LS2","HO","ST","CS","AV","AVHO")), na.rm=TRUE)) ### vtype can be an entire vector, so use any() and na.rm=TRUE stop(mstyle$stop("Unknown 'vtype' argument specified.")) if (add.measure) { if (length(var.names) == 2L) var.names <- c(var.names, "measure") if (length(var.names) != 3L) stop(mstyle$stop("Argument 'var.names' must be of length 2 or 3.")) if (any(var.names != make.names(var.names, unique=TRUE))) { var.names <- make.names(var.names, unique=TRUE) warning(mstyle$warning(paste0("Argument 'var.names' does not contain syntactically valid variable names.\n Variable names adjusted to: var.names = c('", var.names[1], "', '", var.names[2], "', '", var.names[3], "')."))) } } else { if (length(var.names) == 3L) var.names <- var.names[1:2] if (length(var.names) != 2L) stop(mstyle$stop("Argument 'var.names' must be of length 2.")) if (any(var.names != make.names(var.names, unique=TRUE))) { var.names <- make.names(var.names, unique=TRUE) warning(mstyle$warning(paste0("Argument 'var.names' does not contain syntactically valid variable names.\n Variable names adjusted to: var.names = c('", var.names[1], "', '", var.names[2], "')."))) } } ### check if user is trying to use the 'formula interface' to escalc() ### note: if so, argument 'ai' may mistakenly be a formula, so check for that as well (further below) if (hasArg(formula) || hasArg(weights)) stop(mstyle$stop("The 'formula interface' to escalc() has been deprecated.")) ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("onlyo1", "addyi", "addvi")) ### set defaults or get onlyo1, addyi, and addvi arguments onlyo1 <- ifelse(is.null(ddd$onlyo1), FALSE, ddd$onlyo1) addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ### check if data argument has been specified if (missing(data)) data <- NULL ### need this at the end to check if append=TRUE can actually be done has.data <- !is.null(data) ### check if data argument has been specified if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### get slab and subset arguments (will be NULL when unspecified) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] mf.include <- mf[[match("include", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) include <- eval(mf.include, data, enclos=sys.frame(sys.parent())) ### get yi (in case it has been specified) mf.yi <- mf[[match("yi", names(mf))]] yi <- eval(mf.yi, data, enclos=sys.frame(sys.parent())) ### for certain measures, set add=0 by default unless user explicitly sets the add argument addval <- mf[[match("add", names(mf))]] if (is.element(measure, c("AS","PHI","RTET","IRSD","PAS","PFT","IRS","IRFT")) && is.null(addval)) add <- 0 ######################################################################### ######################################################################### ######################################################################### if (is.null(yi)) { if (is.element(measure, c("RR","OR","RD","AS","PETO","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL","MPRD","MPRR","MPOR","MPORC","MPPETO"))) { mf.ai <- mf[[match("ai", names(mf))]] if (any("~" %in% as.character(mf.ai))) stop(mstyle$stop("The 'formula interface' to escalc() has been deprecated.")) mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci k.all <- length(ai) if (length(ai)==0L || length(bi)==0L || length(ci)==0L || length(di)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(ai),length(bi),length(ci),length(di)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } n1i <- ai + bi n2i <- ci + di if (any(c(ai > n1i, ci > n2i), na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(ai, bi, ci, di) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(n1i < 0, n2i < 0), na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 0.")) ni.u <- ai + bi + ci + di ### unadjusted total sample sizes k <- length(ai) ### if drop00=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } ### save unadjusted counts ai.u <- ai bi.u <- bi ci.u <- ci di.u <- di n1i.u <- ai + bi n2i.u <- ci + di if (to == "all") { ### always add to all cells in all studies ai <- ai + add ci <- ci + add if (!onlyo1) { bi <- bi + add di <- di + add } } if (to == "only0" || to == "if0all") { #if (onlyo1) { # id0 <- c(ai == 0L | ci == 0L) #} else { id0 <- c(ai == 0L | ci == 0L | bi == 0L | di == 0L) #} id0[is.na(id0)] <- FALSE } if (to == "only0") { ### add to cells in studies with at least one 0 entry ai[id0] <- ai[id0] + add ci[id0] <- ci[id0] + add if (!onlyo1) { bi[id0] <- bi[id0] + add di[id0] <- di[id0] + add } } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry if (any(id0)) { ai <- ai + add ci <- ci + add if (!onlyo1) { bi <- bi + add di <- di + add } } } ### recompute group and total sample sizes (after add/to adjustment) n1i <- ai + bi n2i <- ci + di ni <- n1i + n2i ### ni.u computed earlier is always the 'unadjusted' total sample size ### compute proportions for the two groups (unadjusted and adjusted) p1i.u <- ai.u/n1i.u p2i.u <- ci.u/n2i.u p1i <- ai/n1i p2i <- ci/n2i ### log risk ratios if (measure == "RR") { if (addyi) { yi <- log(p1i) - log(p2i) } else { yi <- log(p1i.u) - log(p2i.u) } if (addvi) { vi <- 1/ai - 1/n1i + 1/ci - 1/n2i } else { vi <- 1/ai.u - 1/n1i.u + 1/ci.u - 1/n2i.u } } ### log odds ratio if (is.element(measure, c("OR","OR2D","OR2DN","OR2DL"))) { if (addyi) { yi <- log(p1i/(1-p1i)) - log(p2i/(1-p2i)) } else { yi <- log(p1i.u/(1-p1i.u)) - log(p2i.u/(1-p2i.u)) } if (addvi) { vi <- 1/ai + 1/bi + 1/ci + 1/di } else { vi <- 1/ai.u + 1/bi.u + 1/ci.u + 1/di.u } } ### risk difference if (measure == "RD") { if (addyi) { yi <- p1i - p2i } else { yi <- p1i.u - p2i.u } if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (addvi) { mnwp1i <- .wmean(p1i, n1i, na.rm=TRUE) ### sample size weighted average of proportions mnwp2i <- .wmean(p2i, n2i, na.rm=TRUE) ### sample size weighted average of proportions } else { mnwp1i.u <- .wmean(p1i.u, n1i.u, na.rm=TRUE) ### sample size weighted average of proportions mnwp2i.u <- .wmean(p2i.u, n2i.u, na.rm=TRUE) ### sample size weighted average of proportions } if (!all(is.element(vtype, c("UB","LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'UB', 'LS', or 'AV'.")) for (i in seq_len(k)) { ### unbiased estimate of the sampling variance if (vtype[i] == "UB") { if (addvi) { vi[i] <- p1i[i]*(1-p1i[i])/(n1i[i]-1) + p2i[i]*(1-p2i[i])/(n2i[i]-1) } else { vi[i] <- p1i.u[i]*(1-p1i.u[i])/(n1i.u[i]-1) + p2i.u[i]*(1-p2i.u[i])/(n2i.u[i]-1) } } ### large sample approximation to the sampling variance if (vtype[i] == "LS") { if (addvi) { vi[i] <- p1i[i]*(1-p1i[i])/n1i[i] + p2i[i]*(1-p2i[i])/n2i[i] } else { vi[i] <- p1i.u[i]*(1-p1i.u[i])/n1i.u[i] + p2i.u[i]*(1-p2i.u[i])/n2i.u[i] } } ### estimator assuming homogeneity (using the average proportions) if (vtype[i] == "AV") { if (addvi) { vi[i] <- mnwp1i*(1-mnwp1i)/n1i[i] + mnwp2i*(1-mnwp2i)/n2i[i] } else { vi[i] <- mnwp1i.u*(1-mnwp1i.u)/n1i.u[i] + mnwp2i.u*(1-mnwp2i.u)/n2i.u[i] } } } } ### note: addyi and addvi only implemented for measures above ### log odds ratio (Peto's method) if (measure == "PETO") { xt <- ai + ci ### frequency of outcome1 in both groups combined yt <- bi + di ### frequency of outcome2 in both groups combined Ei <- xt * n1i / ni Vi <- xt * yt * (n1i/ni) * (n2i/ni) / (ni - 1) ### 0 when xt = 0 or yt = 0 in a table yi <- (ai - Ei) / Vi ### then yi and vi is Inf (set to NA at end) vi <- 1/Vi } ### arcsine square root risk difference if (measure == "AS") { yi <- asin(sqrt(p1i)) - asin(sqrt(p2i)) vi <- 1/(4*n1i) + 1/(4*n2i) } ### phi coefficient if (measure == "PHI") { yi <- (ai*di - bi*ci)/sqrt((ai+bi)*(ci+di)*(ai+ci)*(bi+di)) if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) q1i <- 1 - p1i q2i <- 1 - p2i pi1. <- (ai+bi)/ni pi2. <- (ci+di)/ni pi.1 <- (ai+ci)/ni pi.2 <- (bi+di)/ni if (!all(is.element(vtype, c("ST","LS","CS")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'ST', 'LS', or 'CS'.")) for (i in seq_len(k)) { ### estimate of the sampling variance for stratified sampling if (vtype[i] == "ST") { vi[i] <- ((n1i[i]+n2i[i])^2*(4*n1i[i]^3*p1i[i]^2*p2i[i]*q1i[i]^2*q2i[i] + 4*n2i[i]^3*p1i[i]*p2i[i]^2*q1i[i]*q2i[i]^2 + n1i[i]*n2i[i]^2*p2i[i]*q2i[i]*(p2i[i]*q1i[i] + p1i[i]*q2i[i])*(p2i[i]*q1i[i] + p1i[i]*(4*q1i[i] + q2i[i])) + n1i[i]^2*n2i[i]*p1i[i]*q1i[i]*(p2i[i]*q1i[i] + p1i[i]*q2i[i])*(p1i[i]*q2i[i] + p2i[i]*(q1i[i] + 4*q2i[i]))))/(4*(ai[i]+ci[i])^3*(bi[i]+di[i])^3) } ### estimate of the sampling variance for cross-sectional/multinomial sampling (equation in Yule, 1912, p.603) if (vtype[i] == "LS" || vtype[i] == "CS") { vi[i] <- 1/ni[i] * (1 - yi[i]^2 + yi[i]*(1+1/2*yi[i]^2) * (pi1.[i]-pi2.[i])*(pi.1[i]-pi.2[i]) / sqrt(pi1.[i]*pi2.[i]*pi.1[i]*pi.2[i]) - 3/4 * yi[i]^2 * ((pi1.[i]-pi2.[i])^2/(pi1.[i]*pi2.[i]) + (pi.1[i]-pi.2[i])^2/(pi.1[i]*pi.2[i]))) } } } ### Yule's Q (vi equation in Yule, 1900, p.285, and Yule, 1912, p.593) if (measure == "YUQ") { yi <- (ai/bi)/(ci/di) yi <- (yi-1)/(yi+1) vi <- 1/4 * (1-yi^2)^2 * (1/ai + 1/bi + 1/ci + 1/di) } ### Yule's Y (vi equation in Yule, 1912, p.593) if (measure == "YUY") { yi <- (ai/bi)/(ci/di) yi <- (sqrt(yi)-1)/(sqrt(yi)+1) vi <- 1/16 * (1-yi^2)^2 * (1/ai + 1/bi + 1/ci + 1/di) } ### tetrachoric correlation if (measure == "RTET") { ### TODO: allow user to set control arguments for pmvnorm and optimizers ### upgrade warnings to errors (so that tables with no events or only events are skipped) #warn.before <- getOption("warn") #options(warn = 2) yi <- rep(NA_real_, k) vi <- rep(NA_real_, k) for (i in seq_len(k)) { if (is.na(ai[i]) || is.na(bi[i]) || is.na(ci[i]) || is.na(di[i])) next res <- .rtet(ai[i], bi[i], ci[i], di[i], maxcor=.9999) yi[i] <- res$yi vi[i] <- res$vi } #options(warn = warn.before) } ### probit transformation to SMD if (measure == "PBIT") { z1i <- qnorm(p1i) z2i <- qnorm(p2i) yi <- z1i - z2i vi <- 2*pi*p1i*(1-p1i)*exp(z1i^2)/n1i + 2*pi*p2i*(1-p2i)*exp(z2i^2)/n2i ### from Sanchez-Meca et al. (2003) and Rosenthal (1994; Handbook chapter) } ### seems to be right for stratified and cross-sectional/multinomial sampling ### see code/probit_transformation directory ### log(OR) transformation to SMD based on logistic distribution if (is.element(measure, c("OR2D","OR2DL"))) { yi <- sqrt(3) / pi * yi vi <- 3 / pi^2 * vi } ### log(OR) transformation to SMD based on normal distribution (Cox & Snell method) if (measure == "OR2DN") { yi <- yi / 1.65 vi <- vi / 1.65^2 } if (is.element(measure, c("MPRD","MPRR","MPOR"))) { pi12 <- bi/ni pi21 <- ci/ni pi1. <- (ai+bi)/ni pi.1 <- (ai+ci)/ni } if (measure == "MPRD") { yi <- pi1. - pi.1 vi <- pi12*(1-pi12)/ni + 2*pi12*pi21/ni + pi21*(1-pi21)/ni } if (measure == "MPRR") { yi <- log(pi1.) - log(pi.1) vi <- (pi12 + pi21) / (ni * pi1. * pi.1) } if (measure == "MPOR") { yi <- log(pi1./(1-pi1.)) - log(pi.1/(1-pi.1)) vi <- (pi12*(1-pi12) + pi21*(1-pi21) + 2*pi12*pi21) / (ni * pi1.*(1-pi1.) * pi.1*(1-pi.1)) } if (measure == "MPORC") { yi <- log(bi) - log(ci) vi <- 1/bi + 1/ci } if (measure == "MPPETO") { Ei <- (bi + ci) / 2 Vi <- (bi + ci) / 4 yi <- (bi - Ei) / Vi vi <- 1/Vi } ### Note: Could in principle also compute measures commonly used in diagnostic studies. ### But need to take the sampling method into consideration when computing vi (so need ### to give this some more thought). ### sensitivity #if (measure == "SENS") { # res <- escalc("PR", xi=ai, mi=ci, add=0, to="none", vtype=vtype) # yi <- res$yi # vi <- res$vi #} ### specificity #if (measure == "SPEC") { # res <- escalc("PR", xi=di, mi=bi, add=0, to="none", vtype=vtype) # yi <- res$yi # vi <- res$vi #} ### [...] } ###################################################################### if (is.element(measure, c("IRR","IRD","IRSD"))) { mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) k.all <- length(x1i) if (length(x1i)==0L || length(x2i)==0L || length(t1i)==0L || length(t2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(x1i),length(x2i),length(t1i),length(t2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] } if (any(c(x1i, x2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(c(t1i, t2i) <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- t1i + t2i ### unadjusted total sample sizes k <- length(x1i) ### if drop00=TRUE, set counts to NA for studies that have no events in both arms if (drop00) { id00 <- c(x1i == 0L & x2i == 0L) id00[is.na(id00)] <- FALSE x1i[id00] <- NA x2i[id00] <- NA } ### save unadjusted counts x1i.u <- x1i x2i.u <- x2i if (to == "all") { ### always add to all cells in all studies x1i <- x1i + add x2i <- x2i + add } if (to == "only0" || to == "if0all") { id0 <- c(x1i == 0L | x2i == 0L) id0[is.na(id0)] <- FALSE } if (to == "only0") { ### add to cells in studies with at least one 0 entry x1i[id0] <- x1i[id0] + add x2i[id0] <- x2i[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry if (any(id0)) { x1i <- x1i + add x2i <- x2i + add } } ### compute rates for the two groups (unadjusted and adjusted) ### t1i and t2i are the total person-times in the 1st and 2nd group ir1i.u <- x1i.u/t1i ir2i.u <- x2i.u/t2i ir1i <- x1i/t1i ir2i <- x2i/t2i ### log incidence rate ratio if (measure == "IRR") { if (addyi) { yi <- log(ir1i) - log(ir2i) } else { yi <- log(ir1i.u) - log(ir2i.u) } if (addvi) { vi <- 1/x1i + 1/x2i #vi <- 1/(x1i+1/2) + 1/(x2i+1/2) } else { vi <- 1/x1i.u + 1/x2i.u } } ### incidence rate difference if (measure == "IRD") { if (addyi) { yi <- ir1i - ir2i } else { yi <- ir1i.u - ir2i.u } if (addvi) { vi <- ir1i/t1i + ir2i/t2i ### note: same as x1i/t1i^2 + x2i/t2i^2 } else { vi <- ir1i.u/t1i + ir2i.u/t2i ### note: same as x1i.u/t1i^2 + x2i.u/t2i^2 } } ### square root transformed incidence rate difference if (measure == "IRSD") { if (addyi) { yi <- sqrt(ir1i) - sqrt(ir2i) } else { yi <- sqrt(ir1i.u) - sqrt(ir2i.u) } vi <- 1/(4*t1i) + 1/(4*t2i) } } ###################################################################### if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL","CVR","VR"))) { mf.m1i <- mf[[match("m1i", names(mf))]] ### for VR, do not need to supply this mf.m2i <- mf[[match("m2i", names(mf))]] ### for VR, do not need to supply this mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) k.all <- length(n1i) ### for these measures, need m1i, m2i, sd1i, sd2i, n1i, and n2i if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL","CVR"))) { if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(n1i)==0L || length(n2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(n1i),length(n2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } ### for this measure, need sd1i, sd2i, n1i, and n2i if (is.element(measure, c("VR"))) { if (length(sd1i)==0L || length(sd2i)==0L || length(n1i)==0L || length(n2i)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(sd1i),length(sd2i),length(n1i),length(n2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] n1i <- n1i[subset] n2i <- n2i[subset] } if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(c(n1i, n2i) < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- n1i + n2i ### unadjusted total sample sizes k <- length(n1i) ni <- ni.u mi <- ni - 2 sdpi <- sqrt(((n1i-1)*sd1i^2 + (n2i-1)*sd2i^2)/mi) di <- (m1i - m2i) / sdpi ### (raw) mean difference (with heteroscedastic variances) ### to use with pooled SDs, simply set sd1i = sd2i = sdpi or use vtype="HO" if (measure == "MD") { yi <- m1i - m2i if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (!all(is.element(vtype, c("UB","LS","HO")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'UB', 'LS', or 'HO'.")) for (i in seq_len(k)) { ### unbiased estimate of the sampling variance (does not assume homoscedasticity) if (vtype[i] == "UB" || vtype[i] == "LS") vi[i] <- sd1i[i]^2/n1i[i] + sd2i[i]^2/n2i[i] ### estimator assuming homoscedasticity if (vtype[i] == "HO") vi[i] <- sdpi[i]^2 * (1/n1i[i] + 1/n2i[i]) } } ### standardized mean difference (with pooled SDs) if (measure == "SMD") { ### apply bias-correction to di values cmi <- .cmicalc(mi) yi <- cmi * di if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) mnwyi <- .wmean(yi, ni, na.rm=TRUE) ### sample size weighted average of yi's if (!all(is.element(vtype, c("UB","LS","LS2","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'UB', 'LS', 'LS2', or 'AV'.")) for (i in seq_len(k)) { ### unbiased estimate of the sampling variance if (vtype[i] == "UB") vi[i] <- 1/n1i[i] + 1/n2i[i] + (1 - (mi[i]-2)/(mi[i]*cmi[i]^2)) * yi[i]^2 ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- 1/n1i[i] + 1/n2i[i] + yi[i]^2/(2*ni[i]) ### estimator assuming homogeneity (using sample size weighted average of the yi's) if (vtype[i] == "AV") vi[i] <- 1/n1i[i] + 1/n2i[i] + mnwyi^2/(2*ni[i]) ### large sample approximation to the sampling variance (using equation from Borenstein, 2009) if (vtype[i] == "LS2") vi[i] <- cmi[i]^2 * (1/n1i[i] + 1/n2i[i] + di[i]^2/(2*ni[i])) } } ### standardized mean difference (with heteroscedastic SDs) if (measure == "SMDH") { cmi <- .cmicalc(mi) si <- sqrt((sd1i^2 + sd2i^2)/2) yi <- cmi * (m1i - m2i) / si vi <- yi^2 * (sd1i^4 / (n1i-1) + sd2i^4 / (n2i-1)) / (2*(sd1i^2 + sd2i^2)^2) + (sd1i^2 / (n1i-1) + sd2i^2 / (n2i-1)) / ((sd1i^2 + sd2i^2)/2) vi <- cmi^2 * vi ### note: Bonett (2009) plugs in the uncorrected yi into the ### equation for vi; here, the corrected value is plugged in } ### ratio of means (response ratio) ### to use with pooled SDs, simply set sd1i = sd2i = sdpi or use vtype="HO" if (measure == "ROM") { yi <- log(m1i/m2i) if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) mn1wcvi <- .wmean(sd1i/m1i, n1i, na.rm=TRUE) ### sample size weighted average of the coefficient of variation in group 1 mn2wcvi <- .wmean(sd2i/m2i, n2i, na.rm=TRUE) ### sample size weighted average of the coefficient of variation in group 2 not.na <- !(is.na(n1i) | is.na(n2i) | is.na(sd1i/m1i) | is.na(sd2i/m2i)) mnwcvi <- (sum(n1i[not.na]*(sd1i/m1i)[not.na]) + sum(n2i[not.na]*(sd2i/m2i)[not.na])) / sum((n1i+n2i)[not.na]) ### sample size weighted average of the two CV values if (!all(is.element(vtype, c("LS","HO","AV","AVHO")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'LS', 'HO', 'AV', or 'AVHO'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance (does not assume homoscedasticity) if (vtype[i] == "LS") vi[i] <- sd1i[i]^2/(n1i[i]*m1i[i]^2) + sd2i[i]^2/(n2i[i]*m2i[i]^2) ### estimator assuming homoscedasticity if (vtype[i] == "HO") vi[i] <- sdpi[i]^2/(n1i[i]*m1i[i]^2) + sdpi[i]^2/(n2i[i]*m2i[i]^2) ### estimator using the weighted averages of the CV values if (vtype[i] == "AV") vi[i] <- mn1wcvi^2/n1i[i] + mn2wcvi^2/n2i[i] ### estimator using the weighted average of two weighted averages of the CV values if (vtype[i] == "AVHO") vi[i] <- mnwcvi^2 * (1/n1i[i] + 1/n2i[i]) } } ### point-biserial correlation obtained from the standardized mean difference ### this is based on Tate's model where Y|X=0 and Y|X=1 are normally distributed (with the same variance) ### Das Gupta (1960) describes the case where Y itself is normal, but the variance expressions therein can ### really only be used in some special cases (not useful in practice) if (is.element(measure, c("RPB","RBIS"))) { hi <- mi/n1i + mi/n2i yi <- di / sqrt(di^2 + hi) ### need this also when measure="RBIS" if (measure == "RPB") { ### this only applies when measure="RPB" if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (!all(is.element(vtype, c("ST","LS","CS")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'ST', 'LS', or 'CS'.")) for (i in seq_len(k)) { ### estimate of the sampling variance for fixed n1i and n2i (i.e., stratified sampling) if (vtype[i] == "ST" || vtype[i] == "LS") vi[i] <- hi[i]^2 / (hi[i] + di[i]^2)^3 * (1/n1i[i] + 1/n2i[i] + di[i]^2/(2*ni[i])) ### estimate of the sampling variance for fixed ni but random n1i and n2i (i.e., cross-sectional/multinomial sampling) if (vtype[i] == "CS") vi[i] <- (1-yi[i]^2)^2 * (ni[i]*yi[i]^2 / (4*n1i[i]*n2i[i]) + (2-3*yi[i]^2)/(2*ni[i])) ### from Tate (1954, 1955b) } } } ### biserial correlation obtained from the standardized mean difference (continued from above) if (measure == "RBIS") { p1i <- n1i / ni p2i <- n2i / ni zi <- qnorm(p1i, lower.tail=FALSE) fzi <- dnorm(zi) yi <- sqrt(p1i*p2i) / fzi * yi ### yi on the right-hand side is the point-biserial correlation from above #vi <- (p1i*p2i) / fzi^2 * vi ### not correct (p1i, p2i, and fzi are random variables and vi from RBP is not correct for the bivariate normal case on which RBIS is based) yi.t <- ifelse(abs(yi) > 1, sign(yi), yi) vi <- 1/(ni-1) * (p1i*p2i/fzi^2 - (3/2 + (1 - p1i*zi/fzi)*(1 + p2i*zi/fzi)) * yi.t^2 + yi.t^4) ### from Soper (1914) #vi <- 1/(ni-1) * (yi.t^4 + yi.t^2 * (p1i*p2i*zi^2/fzi^2 + (2*p1i-1)*zi/fzi - 5/2) + p1i*p2i/fzi^2) ### from Tate (1955) -- equivalent to eq. from Soper (1914) ### equation appears to work even if dichotomization is done based on a sample quantile value (so that p1i, p2i, and fzi are fixed by design) } ### SMD to log(OR) transformation based on logistic distribution if (is.element(measure, c("D2OR","D2ORL"))) { yi <- pi / sqrt(3) * di vi <- pi^2 / 3 * (1/n1i + 1/n2i + di^2/(2*ni)) } ### SMD to log(OR) transformation based on normal distribution (Cox & Snell method) if (measure == "D2ORN") { yi <- 1.65 * di vi <- 1.65^2 * (1/n1i + 1/n2i + di^2/(2*ni)) } ### coefficient of variation ratio ### note: vi computed as per eq. 12 from Nakagawa et al. (2015), but without the '-2 rho ...' terms, ### since for normally distributed data the mean and variance (and transformations thereof) are independent if (measure == "CVR") { yi <- log(sd1i/m1i) + 1/(2*(n1i-1)) - log(sd2i/m2i) - 1/(2*(n2i-1)) vi <- 1/(2*(n1i-1)) + sd1i^2/(n1i*m1i^2) + 1/(2*(n2i-1)) + sd2i^2/(n2i*m2i^2) } ### variability ratio if (measure == "VR") { yi <- log(sd1i/sd2i) + 1/(2*(n1i-1)) - 1/(2*(n2i-1)) vi <- 1/(2*(n1i-1)) + 1/(2*(n2i-1)) } } ###################################################################### if (is.element(measure, c("COR","UCOR","ZCOR"))) { mf.ri <- mf[[match("ri", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k.all <- length(ri) if (length(ri)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(ri) != length(ni)) stop(mstyle$stop("Supplied data vectors are not of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) ri <- ri[subset] ni <- ni[subset] } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) if (measure != "UCOR" && vtype == "UB") stop(mstyle$stop("Use of vtype='UB' only permitted when measure='UCOR'.")) if (measure == "UCOR" && any(ni <= 4, na.rm=TRUE)) warning(mstyle$warning("Cannot compute the bias-corrected correlation coefficient when ni <= 4."), call.=FALSE) if (measure == "ZCOR" && any(ni <= 3, na.rm=TRUE)) warning(mstyle$warning("Cannot estimate the sampling variance when ni <= 3."), call.=FALSE) ni.u <- ni ### unadjusted total sample sizes k <- length(ri) ### raw correlation coefficient if (measure == "COR") yi <- ri ### raw correlation coefficient with bias correction if (measure == "UCOR") { #yi <- ri + ri*(1-ri^2)/(2*(ni-4)) ### approximation #yi[ni <= 4] <- NA ### set corrected correlations for ni <= 4 to NA yi <- ri * .Fcalc(1/2, 1/2, (ni-2)/2, 1-ri^2) } ### sampling variances for COR or UCOR if (is.element(measure, c("COR","UCOR"))) { if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) mnwyi <- .wmean(yi, ni, na.rm=TRUE) ### sample size weighted average of yi's if (!all(is.element(vtype, c("UB","LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'UB', 'LS', or 'AV'.")) for (i in seq_len(k)) { ### unbiased estimate of the sampling variance of the bias-corrected correlation coefficient if (vtype[i] == "UB") { #vi[i] <- yi[i]^2 - 1 + (ni[i]-3)/(ni[i]-2) * ((1-ri[i]^2) + 2*(1-ri[i]^2)^2/ni[i] + 8*(1-ri[i]^2)^3/(ni[i]*(ni[i]+2)) + 48*(1-ri[i]^2)^4/(ni[i]*(ni[i]+2)*(ni[i]+4))) vi[i] <- yi[i]^2 - (1 - (ni[i]-3)/(ni[i]-2) * (1-ri[i]^2) * .Fcalc(1, 1, ni[i]/2, 1-ri[i]^2)) } ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- (1-yi[i]^2)^2/(ni[i]-1) ### estimator assuming homogeneity (using sample size weighted average of the yi's) if (vtype[i] == "AV") vi[i] <- (1-mnwyi^2)^2/(ni[i]-1) } } ### r-to-z transformed correlation if (measure == "ZCOR") { yi <- 1/2 * log((1+ri)/(1-ri)) vi <- 1/(ni-3) } ### set sampling variances for ni <= 3 to NA vi[ni <= 3] <- NA } ###################################################################### if (is.element(measure, c("PCOR","ZPCOR","SPCOR"))) { mf.ti <- mf[[match("ti", names(mf))]] mf.r2i <- mf[[match("r2i", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) r2i <- eval(mf.r2i, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k.all <- length(ti) if (measure=="PCOR" && (length(ti)==0L || length(ni)==0L || length(mi)==0L)) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (measure=="SPCOR" && (length(ti)==0L || length(ni)==0L || length(mi)==0L || length(r2i)==0L)) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (measure=="PCOR" && !all(k.all == c(length(ni),length(mi)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (measure=="SPCOR" && !all(k.all == c(length(ni),length(mi),length(r2i)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) ti <- ti[subset] r2i <- r2i[subset] mi <- mi[subset] ni <- ni[subset] } if (measure=="SPCOR" && any(r2i > 1 | r2i < 0, na.rm=TRUE)) stop(mstyle$stop("One or more R^2 values are > 1 or < 0.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) if (any(mi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more mi values are negative.")) if (any(ni - mi - 1 < 1, na.rm=TRUE)) stop(mstyle$stop("One or more dfs are < 1.")) ni.u <- ni ### unadjusted total sample sizes k <- length(ti) ### partial correlation coefficient if (measure == "PCOR") { yi <- ti / sqrt(ti^2 + (ni - mi - 1)) if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) mnwyi <- .wmean(yi, ni, na.rm=TRUE) ### sample size weighted average of yi's if (!all(is.element(vtype, c("LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'LS' or 'AV'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- (1 - yi[i]^2)^2 / (ni[i] - mi[i] - 1) ### estimator assuming homogeneity (using sample size weighted average of the yi's) if (vtype[i] == "AV") vi[i] <- (1 - mnwyi^2)^2 / (ni[i] - mi[i] - 1) } } ### r-to-z transformed partial correlation if (measure == "ZPCOR") { yi <- ti / sqrt(ti^2 + (ni - mi - 1)) yi <- 1/2 * log((1+yi)/(1-yi)) vi <- 1/(ni-mi-1) } ### semi-partial correlation coefficient if (measure == "SPCOR") { yi <- ti * sqrt(1 - r2i) / sqrt(ni - mi - 1) if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) mnwyi <- .wmean(yi, ni, na.rm=TRUE) ### sample size weighted average of yi's if (!all(is.element(vtype, c("LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'LS' or 'AV'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- (r2i[i]^2 - 2*r2i[i] + (r2i[i] - yi[i]^2) + 1 - (r2i[i] - yi[i]^2)^2) / ni[i] ### estimator assuming homogeneity (using sample size weighted average of the yi's) if (vtype[i] == "AV") vi[i] <- (r2i[i]^2 - 2*r2i[i] + (r2i[i] - mnwyi^2) + 1 - (r2i[i] - mnwyi^2)^2) / ni[i] } } } ###################################################################### if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) if (is.null(mi)) mi <- ni - xi k.all <- length(xi) if (length(xi)==0L || length(mi)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(mi)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) xi <- xi[subset] mi <- mi[subset] } ni <- xi + mi if (any(xi > ni, na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(xi, mi) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more group sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes k <- length(xi) ### save unadjusted counts xi.u <- xi mi.u <- mi k <- length(xi) if (to == "all") { ### always add to all cells in all studies xi <- xi + add mi <- mi + add } if (to == "only0" || to == "if0all") { id0 <- c(xi == 0L | mi == 0L) id0[is.na(id0)] <- FALSE } if (to == "only0") { ### add to cells in studies with at least one 0 entry xi[id0] <- xi[id0] + add mi[id0] <- mi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry if (any(id0)) { xi <- xi + add mi <- mi + add } } ### recompute sample sizes (after add/to adjustment) ni <- xi + mi ### compute proportions (unadjusted and adjusted) pri.u <- xi.u/ni.u pri <- xi/ni ### raw proportion if (measure == "PR") { if (addyi) { yi <- pri } else { yi <- pri.u } if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (addvi) { mnwpri <- .wmean(pri, ni, na.rm=TRUE) ### sample size weighted average of proportions } else { mnwpri.u <- .wmean(pri.u, ni.u, na.rm=TRUE) ### sample size weighted average of proportions } if (!all(is.element(vtype, c("UB","LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'UB', 'LS', or 'AV'.")) for (i in seq_len(k)) { ### unbiased estimate of the sampling variance if (vtype[i] == "UB") { if (addvi) { vi[i] <- pri[i]*(1-pri[i])/(ni[i]-1) } else { vi[i] <- pri.u[i]*(1-pri.u[i])/(ni.u[i]-1) } } ### large sample approximation to the sampling variance if (vtype[i] == "LS") { if (addvi) { vi[i] <- pri[i]*(1-pri[i])/ni[i] } else { vi[i] <- pri.u[i]*(1-pri.u[i])/ni.u[i] } } ### estimator assuming homogeneity (using the average proportion) if (vtype[i] == "AV") { if (addvi) { vi[i] <- mnwpri*(1-mnwpri)/ni[i] } else { vi[i] <- mnwpri.u*(1-mnwpri.u)/ni.u[i] } } } } ### proportion with log transformation if (measure == "PLN") { if (addyi) { yi <- log(pri) } else { yi <- log(pri.u) } if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (addvi) { mnwpri <- .wmean(pri, ni, na.rm=TRUE) ### sample size weighted average of proportions #mnwpri <- exp(.wmean(yi, ni, na.rm=TRUE)) ### alternative strategy (exp of the sample size weighted average of the log proportions) } else { mnwpri.u <- .wmean(pri.u, ni.u, na.rm=TRUE) ### sample size weighted average of proportions } if (!all(is.element(vtype, c("LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'LS' or 'AV'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") { if (addvi) { vi[i] <- 1/xi[i] - 1/ni[i] } else { vi[i] <- 1/xi.u[i] - 1/ni.u[i] } } ### estimator assuming homogeneity (using the average proportion) if (vtype[i] == "AV") { if (addvi) { vi[i] <- 1/(mnwpri*ni[i]) - 1/ni[i] } else { vi[i] <- 1/(mnwpri.u*ni.u[i]) - 1/ni.u[i] } } } } ### proportion with logit (log odds) transformation if (measure == "PLO") { if (addyi) { yi <- log(pri/(1-pri)) } else { yi <- log(pri.u/(1-pri.u)) } if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (addvi) { mnwpri <- .wmean(pri, ni, na.rm=TRUE) ### sample size weighted average of proportions #mnwpri <- transf.ilogit(.wmean(yi, ni, na.rm=TRUE)) ### alternative strategy (inverse logit of the sample size weighted average of the logit transformed proportions) } else { mnwpri.u <- .wmean(pri.u, ni.u, na.rm=TRUE) ### sample size weighted average of proportions } if (!all(is.element(vtype, c("LS","AV")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either 'LS' or 'AV'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") { if (addvi) { vi[i] <- 1/xi[i] + 1/mi[i] } else { vi[i] <- 1/xi.u[i] + 1/mi.u[i] } } ### estimator assuming homogeneity (using the average proportion) if (vtype[i] == "AV") { if (addvi) { vi[i] <- 1/(mnwpri*ni[i]) + 1/((1-mnwpri)*ni[i]) } else { vi[i] <- 1/(mnwpri.u*ni.u[i]) + 1/((1-mnwpri.u)*ni.u[i]) } } } } ### note: addyi and addvi only implemented for measures above ### proportion with arcsine square root (angular) transformation if (measure == "PAS") { yi <- asin(sqrt(pri)) vi <- 1/(4*ni) } ### proportion with Freeman-Tukey double arcsine transformation if (measure == "PFT") { yi <- 1/2*(asin(sqrt(xi/(ni+1))) + asin(sqrt((xi+1)/(ni+1)))) vi <- 1/(4*ni + 2) } } ###################################################################### if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.ti <- mf[[match("ti", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) k.all <- length(xi) if (length(xi)==0L || length(ti)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(xi) != length(ti)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) xi <- xi[subset] ti <- ti[subset] } if (any(xi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) if (any(ti <= 0, na.rm=TRUE)) stop(mstyle$stop("One or more person-times are <= 0.")) ni.u <- ti ### unadjusted total sample sizes k <- length(xi) ### save unadjusted counts xi.u <- xi if (to == "all") { ### always add to all cells in all studies xi <- xi + add } if (to == "only0" || to == "if0all") { id0 <- c(xi == 0L) id0[is.na(id0)] <- FALSE } if (to == "only0") { ### add to cells in studies with at least one 0 entry xi[id0] <- xi[id0] + add } if (to == "if0all") { ### add to cells in all studies if there is at least one 0 entry if (any(id0)) { xi <- xi + add } } ### compute rates (unadjusted and adjusted) iri.u <- xi.u/ti iri <- xi/ti ### raw incidence rate if (measure == "IR") { if (addyi) { yi <- iri } else { yi <- iri.u } if (addvi) { vi <- iri/ti ### note: same as xi/ti^2 } else { vi <- iri.u/ti ### note: same as xi.u/ti^2 } } ### log transformed incidence rate if (measure == "IRLN") { if (addyi) { yi <- log(iri) } else { yi <- log(iri.u) } if (addvi) { vi <- 1/xi } else { vi <- 1/xi.u } } ### square root transformed incidence rate if (measure == "IRS") { if (addyi) { yi <- sqrt(iri) } else { yi <- sqrt(iri.u) } vi <- 1/(4*ti) } ### note: addyi and addvi only implemented for measures above ### incidence rate with Freeman-Tukey transformation if (measure == "IRFT") { yi <- 1/2*(sqrt(iri) + sqrt(iri+1/ti)) vi <- 1/(4*ti) } } ###################################################################### if (is.element(measure, c("MN","MNLN","CVLN","SDLN","SMD1"))) { mf.mi <- mf[[match("mi", names(mf))]] ### for SDLN, do not need to supply this mf.sdi <- mf[[match("sdi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) sdi <- eval(mf.sdi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k.all <- length(ni) ### for these measures, need mi, sdi, and ni if (is.element(measure, c("MN","MNLN","CVLN","SMD1"))) { if (length(mi)==0L || length(sdi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(mi),length(sdi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } ### for this measure, need sdi and ni if (is.element(measure, c("SDLN"))) { if (length(sdi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(sdi) != length(ni)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) mi <- mi[subset] sdi <- sdi[subset] ni <- ni[subset] } if (any(sdi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) if (is.element(measure, c("MNLN","CVLN")) && any(mi < 0, na.rm=TRUE)) stop(mstyle$stop("One or more means are negative.")) ni.u <- ni ### unadjusted total sample sizes k <- length(ni) ### (raw) mean if (measure == "MN") { yi <- mi vi <- sdi^2/ni } ### log(mean) if (measure == "MNLN") { yi <- log(mi) vi <- sdi^2/(ni*mi^2) } ### log(CV) with bias correction ### note: vi computed as per eq. 27 from Nakagawa et al. (2015), but without the '-2 rho ...' term, ### since for normally distributed data the mean and variance (and transformations thereof) are independent if (measure == "CVLN") { yi <- log(sdi/mi) + 1/(2*(ni-1)) vi <- 1/(2*(ni-1)) + sdi^2/(ni*mi^2) } ### log(SD) with bias correction if (measure == "SDLN") { yi <- log(sdi) + 1/(2*(ni-1)) vi <- 1/(2*(ni-1)) } ### single-group SMD if (measure == "SMD1") { cmi <- .cmicalc(ni-1) yi <- cmi * mi / sdi vi <- 1/ni + yi^2/(2*ni) } } ###################################################################### if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC"))) { mf.m1i <- mf[[match("m1i", names(mf))]] ### for VRC, do not need to supply this mf.m2i <- mf[[match("m2i", names(mf))]] ### for VRC, do not need to supply this mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] ### for SMCR, do not need to supply this mf.ni <- mf[[match("ni", names(mf))]] mf.ri <- mf[[match("ri", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) k.all <- length(ni) if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC"))) { ### for these measures, need m1i, m2i, sd1i, sd2i, ni, and ri if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(sd2i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(m1i),length(m2i),length(sd1i),length(sd2i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (is.element(measure, c("SMCR"))) { ### for this measure, need m1i, m2i, sd1i, ni, and ri (do not need sd2i) if (length(m1i)==0L || length(m2i)==0L || length(sd1i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(m1i),length(m2i),length(sd1i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (is.element(measure, c("VRC"))) { ### for this measure, need sd1i, sd2i, ni, and ri if (length(sd1i)==0L || length(sd2i)==0L || length(ni)==0L || length(ri)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(sd1i),length(sd2i),length(ni),length(ri)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) } if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] ni <- ni[subset] ri <- ri[subset] } if (is.element(measure, c("MC","SMCC","SMCRH","ROMC","CVRC","VRC"))) { if (any(c(sd1i, sd2i) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } if (is.element(measure, c("SMCR"))) { if (any(sd1i < 0, na.rm=TRUE)) stop(mstyle$stop("One or more standard deviations are negative.")) } if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes k <- length(ni) ni <- ni.u mi <- ni - 1 ### (raw) mean change if (measure == "MC") { yi <- m1i - m2i vi <- (sd1i^2 + sd2i^2 - 2*ri*sd1i*sd2i) / ni } ### standardized mean change with change score standardization (using sddi) ### note: does not assume homoscedasticity, since we use sddi here if (measure == "SMCC") { cmi <- .cmicalc(mi) sddi <- sqrt(sd1i^2 + sd2i^2 - 2*ri*sd1i*sd2i) di <- (m1i - m2i) / sddi yi <- cmi * di if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (!all(is.element(vtype, c("LS","LS2")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either ''LS' or 'LS2'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- 1/ni[i] + yi[i]^2 / (2*ni[i]) ### large sample approximation to the sampling variance if (vtype[i] == "LS2") vi[i] <- cmi[i]^2 * (1/ni[i] + di[i]^2 / (2*ni[i])) } } ### standardized mean change with raw score standardization (using sd1i) ### note: yi does not assume homoscedasticity, but vi does if (measure == "SMCR") { cmi <- .cmicalc(mi) di <- (m1i - m2i) / sd1i yi <- cmi * di if (length(vtype) == 1L) vtype <- rep(vtype, k) vi <- rep(NA_real_, k) if (!all(is.element(vtype, c("LS","LS2")))) stop(mstyle$stop("For this outcome measure, 'vtype' must be either ''LS' or 'LS2'.")) for (i in seq_len(k)) { ### large sample approximation to the sampling variance if (vtype[i] == "LS") vi[i] <- 2*(1-ri[i])/ni[i] + yi[i]^2 / (2*ni[i]) ### large sample approximation to the sampling variance (using corrected (!) equation from Borenstein, 2009) if (vtype[i] == "LS2") vi[i] <- cmi[i]^2 * (2*(1-ri[i])/ni[i] + di[i]^2 / (2*ni[i])) #vi[i] <- cmi[i]^2 * 2*(1-ri[i]) * (1/ni[i] + di[i]^2 / (2*ni[i])) # as in Borenstein (2009) but this is incorrect } } ### standardized mean change with raw score standardization (using sd1i) ### with vi computation allowing for heteroscedasticity (Bonett, 2008; and JEBS article) if (measure == "SMCRH") { cmi <- .cmicalc(mi) vardi <- sd1i^2 + sd2i^2 - 2*ri*sd1i*sd2i yi <- cmi * (m1i - m2i) / sd1i vi <- vardi/(sd1i^2*(ni-1)) + yi^2 / (2*(ni-1)) vi <- cmi^2 * vi ### note: Bonett suggests plugging in the uncorrected yi into the ### equation for vi; here, the corrected value is plugged in } ### ratio of means for pre-post or matched designs (eq. 6 in Lajeunesse, 2011) ### to use with pooled SDs, simply set sd1i = sd2i = sdpi if (measure == "ROMC") { yi <- log(m1i/m2i) vi <- sd1i^2/(ni*m1i^2) + sd2i^2/(ni*m2i^2) - 2*ri*sd1i*sd2i/(m1i*m2i*ni) } ### coefficient of variation ratio for pre-post or matched designs if (measure == "CVRC") { yi <- log(sd1i/m1i) - log(sd2i/m2i) vi <- (1-ri^2)/(ni-1) + (m1i^2*sd2i^2 + m2i^2*sd1i^2 - 2*m1i*m2i*ri*sd1i*sd2i) / (m1i^2*m2i^2*ni) } ### variability ratio for pre-post or matched designs if (measure == "VRC") { yi <- log(sd1i/sd2i) vi <- (1-ri^2)/(ni-1) } } ###################################################################### if (is.element(measure, c("ARAW","AHW","ABT"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k.all <- length(ai) if (length(ai)==0L || length(mi)==0L || length(ni)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(k.all == c(length(ai),length(mi),length(ni)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) ai <- ai[subset] mi <- mi[subset] ni <- ni[subset] } if (any(ai > 1, na.rm=TRUE)) stop(mstyle$stop("One or more alpha values are > 1.")) if (any(mi < 2, na.rm=TRUE)) stop(mstyle$stop("One or more mi values are < 2.")) if (any(ni < 1, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are < 1.")) ni.u <- ni ### unadjusted total sample sizes k <- length(ai) ### raw alpha values if (measure == "ARAW") { yi <- ai vi <- 2*mi*(1-ai)^2 / ((mi-1)*(ni-2)) } ### alphas transformed with Hakstian & Whalen (1976) transformation if (measure == "AHW") { #yi <- (1-ai)^(1/3) ### technically this is the Hakstian & Whalen (1976) transformation yi <- 1 - (1-ai)^(1/3) ### but with this, yi remains a monotonically increasing function of ai vi <- 18*mi*(ni-1)*(1-ai)^(2/3) / ((mi-1)*(9*ni-11)^2) } ### alphas transformed with Bonett (2002) transformation (without bias correction) if (measure == "ABT") { #yi <- log(1-ai) - log(ni/(ni-1)) #yi <- log(1-ai) ### technically this is the Bonett (2002) transformation yi <- -log(1-ai) ### but with this, yi remains a monotonically increasing function of ai vi <- 2*mi / ((mi-1)*(ni-2)) } } ###################################################################### } else { ### in case yi is not NULL (so user wants to convert a regular data frame to an 'escalc' object) ### get vi, sei, and ni mf.vi <- mf[[match("vi", names(mf))]] mf.sei <- mf[[match("sei", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] vi <- eval(mf.vi, data, enclos=sys.frame(sys.parent())) sei <- eval(mf.sei, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k.all <- length(yi) ### if neither vi nor sei is specified, then throw an error ### if only sei is specified, then square those values to get vi ### if vi is specified, use those values if (is.null(vi)) { if (is.null(sei)) { stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) } else { vi <- sei^2 } } if (length(yi) != length(vi)) stop(mstyle$stop("Supplied data vectors are not of the same length.")) if (!is.null(ni) && (length(yi) != length(ni))) stop(mstyle$stop("Supplied data vectors are not of the same length.")) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k.all) yi <- yi[subset] vi <- vi[subset] ni <- ni[subset] } ni.u <- ni ### unadjusted total sample sizes k <- length(yi) } ######################################################################### ######################################################################### ######################################################################### ### make sure yi and vi are really vectors (and not arrays) yi <- as.vector(yi) vi <- as.vector(vi) ### check for infinite values and set them to NA is.inf <- is.infinite(yi) | is.infinite(vi) if (any(is.inf)) { warning(mstyle$warning("Some 'yi' and/or 'vi' values equal to +-Inf. Recoded to NAs."), call.=FALSE) yi[is.inf] <- NA vi[is.inf] <- NA } ### check for NaN values and set them to NA is.NaN <- is.nan(yi) | is.nan(vi) if (any(is.NaN)) { yi[is.NaN] <- NA vi[is.NaN] <- NA } ### check for negative vi's (should not happen, but just in case) vi[vi < 0] <- NA ### add study labels if specified if (!is.null(slab)) { if (length(slab) != k.all) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) if (!is.null(subset)) slab <- slab[subset] if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) } ### if include/subset is NULL, set to TRUE vector if (is.null(include)) include <- rep(TRUE, k.all) if (is.null(subset)) subset <- rep(TRUE, k.all) ### turn numeric include vector into logical vector (already done for subset) if (!is.null(include)) include <- .setnafalse(include, arg="include", k=k.all) ### apply subset to include include <- include[subset] ### subset data frame (note: subsetting of other parts already done above, so yi/vi/ni.u/slab are already subsetted) if (has.data && any(!subset)) data <- data[subset,,drop=FALSE] ### put together dataset if (has.data && append) { ### if data argument has been specified and user wants to append dat <- data.frame(data) if (replace || !is.element(var.names[1], names(dat))) { yi.replace <- rep(TRUE, k) } else { yi.replace <- is.na(dat[[var.names[1]]]) } if (replace || !is.element(var.names[2], names(dat))) { vi.replace <- rep(TRUE, k) } else { vi.replace <- is.na(dat[[var.names[2]]]) } if (replace || !is.element(var.names[3], names(dat))) { measure.replace <- rep(TRUE, k) } else { measure.replace <- is.na(dat[[var.names[3]]]) | dat[[var.names[3]]] == "" } dat[[var.names[1]]][include & yi.replace] <- yi[include & yi.replace] dat[[var.names[2]]][include & vi.replace] <- vi[include & vi.replace] if (add.measure) dat[[var.names[3]]][!is.na(yi) & include & measure.replace] <- measure if (!is.null(ni.u)) attributes(dat[[var.names[1]]])$ni[include & yi.replace] <- ni.u[include & yi.replace] } else { ### if data argument has not been specified or user does not want to append dat <- data.frame(yi=rep(NA_real_, k), vi=rep(NA_real_, k)) dat$yi[include] <- yi[include] dat$vi[include] <- vi[include] if (add.measure) dat$measure[!is.na(yi) & include] <- measure attributes(dat$yi)$ni[include] <- ni.u[include] if (add.measure) { names(dat) <- var.names } else { names(dat) <- var.names[1:2] } } ### replace missings in measure with "" if (add.measure) dat[[var.names[3]]][is.na(dat[[var.names[3]]])] <- "" ### add slab attribute to the yi vector attr(dat[[var.names[1]]], "slab") <- slab ### add measure attribute to the yi vector attr(dat[[var.names[1]]], "measure") <- measure ### add digits attribute attr(dat, "digits") <- digits ### add 'yi.names' and 'vi.names' to the first position of the corresponding attributes attr(dat, "yi.names") <- unique(c(var.names[1], attr(data, "yi.names"))) ### if 'yi.names' is not an attribute, attr() returns NULL, so this works fine attr(dat, "vi.names") <- unique(c(var.names[2], attr(data, "vi.names"))) ### if 'vi.names' is not an attribute, attr() returns NULL, so this works fine ### add 'out.names' back to object in case these attributes exist (if summary() has been used on the object) attr(dat, "sei.names") <- attr(data, "sei.names") attr(dat, "zi.names") <- attr(data, "zi.names") attr(dat, "pval.names") <- attr(data, "pval.names") attr(dat, "ci.lb.names") <- attr(data, "ci.lb.names") attr(dat, "ci.ub.names") <- attr(data, "ci.ub.names") ### keep only attribute elements from yi.names and vi.names that are actually part of the object attr(dat, "yi.names") <- attr(dat, "yi.names")[attr(dat, "yi.names") %in% colnames(dat)] attr(dat, "vi.names") <- attr(dat, "vi.names")[attr(dat, "vi.names") %in% colnames(dat)] class(dat) <- c("escalc", "data.frame") return(dat) } metafor/R/summary.rma.r0000644000176200001440000000067313770377525014574 0ustar liggesuserssummary.rma <- function(object, digits, showfit=TRUE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(digits)) { digits <- .get.digits(xdigits=object$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=object$digits, dmiss=FALSE) } object$digits <- digits class(object) <- c("summary.rma", class(object)) return(object) } metafor/R/plot.rma.peto.r0000644000176200001440000000372014054725372015010 0ustar liggesusersplot.rma.peto <- function(x, qqplot=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.peto") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) par.mfrow <- par("mfrow") par(mfrow=c(2,2)) on.exit(par(mfrow = par.mfrow), add=TRUE) ######################################################################### forest(x, ...) title("Forest Plot", ...) ######################################################################### funnel(x, ...) title("Funnel Plot", ...) ######################################################################### radial(x, ...) title("Radial Plot", ...) ######################################################################### if (qqplot) { qqnorm(x, ...) } else { options(na.action = "na.pass") z <- rstandard(x)$z options(na.action = na.act) not.na <- !is.na(z) if (na.act == "na.omit") { z <- z[not.na] ids <- x$ids[not.na] not.na <- not.na[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") ids <- x$ids k <- length(z) plot(NA, NA, xlim=c(1,k), ylim=c(min(z, -2, na.rm=TRUE), max(z, 2, na.rm=TRUE)), xaxt="n", xlab="Study", ylab="", bty="l", ...) lines(seq_len(k)[not.na], z[not.na], col="lightgray", ...) lines(seq_len(k), z, ...) points(x=seq_len(k), y=z, pch=21, bg="black", ...) axis(side=1, at=seq_len(k), labels=ids, ...) abline(h=0, lty="dashed", ...) abline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) title("Standardized Residuals", ...) } ######################################################################### invisible() } metafor/R/print.ranktest.r0000644000176200001440000000115114036766042015267 0ustar liggesusersprint.ranktest <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="ranktest") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$section("Rank Correlation Test for Funnel Plot Asymmetry")) cat("\n\n") cat(mstyle$result(paste0("Kendall's tau = ", .fcf(x$tau, digits[["est"]]), ", p ", .pval(x$pval, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n") #cat("H0: true tau is equal to 0\n\n") if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/plot.gosh.rma.r0000644000176200001440000002314014046526576015005 0ustar liggesusersplot.gosh.rma <- function(x, het="I2", pch=16, cex=0.5, out, col, alpha, border, xlim, ylim, xhist=TRUE, yhist=TRUE, hh=0.3, breaks, adjust, lwd, labels, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="gosh.rma") het <- match.arg(het, c("QE", "I2", "H2", "tau2")) if (het == "tau2" && is.element(x$method, c("FE","EE","CE"))) stop(mstyle$stop("Cannot plot 'tau2' for fixed-effects models.")) if (missing(alpha)) alpha <- nrow(x$res)^(-0.2) if (length(alpha) == 1L) alpha <- c(alpha, 0.5, 0.9) ### 1st for points, 2nd for histograms, 3rd for density lines if (length(alpha) == 2L) alpha <- c(alpha[1], alpha[2], 0.9) missout <- ifelse(missing(out), TRUE, FALSE) ### need this for panel.hist() if (missout) { if (missing(col)) col <- "black" col <- col2rgb(col) / 255 col.pnts <- rgb(col[1], col[2], col[3], alpha[1]) col.hist <- rgb(col[1], col[2], col[3], alpha[2]) col.line <- rgb(col[1], col[2], col[3], alpha[3]) } else { if (length(out) != 1L) stop(mstyle$stop("Argument 'out' should only specify a single study.")) if (out > x$k || out < 1) stop(mstyle$stop("Non-existing study chosen as potential outlier.")) if (missing(col)) col <- c("red", "blue") if (length(col) != 2L) stop(mstyle$stop("Argument 'col' should specify two colors.")) col.o <- col2rgb(col[1]) / 255 col.i <- col2rgb(col[2]) / 255 col.pnts.o <- rgb(col.o[1], col.o[2], col.o[3], alpha[1]) col.pnts.i <- rgb(col.i[1], col.i[2], col.i[3], alpha[1]) col.pnts <- ifelse(x$incl[,out], col.pnts.o, col.pnts.i) col.hist.o <- rgb(col.o[1], col.o[2], col.o[3], alpha[2]) col.hist.i <- rgb(col.i[1], col.i[2], col.i[3], alpha[2]) col.line.o <- rgb(col.o[1], col.o[2], col.o[3], alpha[3]) col.line.i <- rgb(col.i[1], col.i[2], col.i[3], alpha[3]) } if (missing(border)) border <- "white" if (length(border) == 1L) border <- c(border, border) if (length(hh) == 1L) hh <- c(hh, hh) if (x$int.only && (any(hh < 0) | any(hh > 1))) stop(mstyle$stop("Invalid value(s) specified for 'hh' argument.")) if (missing(breaks)) breaks <- "Sturges" if (length(breaks) == 1L) breaks <- list(breaks, breaks) ### use list so can also specify two vectors if (missing(adjust)) adjust <- 1 if (length(adjust) == 1L) adjust <- c(adjust, adjust) if (missing(lwd)) lwd <- 2 if (length(lwd) == 1L) lwd <- c(lwd, lwd) if (missing(labels)) { if (het == "QE" && x$int.only) labels <- expression(Q) if (het == "QE" && !x$int.only) labels <- expression(Q[E]) if (het == "I2") labels <- expression(I^2) if (het == "H2") labels <- expression(H^2) if (het == "tau2") labels <- expression(tau^2) if (x$int.only) { labels <- c(.setlab(x$measure, transf.char="FALSE", atransf.char="FALSE", gentype=2), labels) } else { labels <- c(labels, colnames(x$res)[-seq_len(5)]) } } ######################################################################### if (x$int.only) { par.mar <- par("mar") par.mar.adj <- par.mar - c(0,-1,3.1,1.1) par.mar.adj[par.mar.adj < 0] <- 0 on.exit(par(mar = par.mar)) if (xhist & yhist) layout(mat=matrix(c(1,2,3,4), nrow=2, byrow=TRUE), widths=c(1-hh[2],hh[2]), heights=c(hh[1],1-hh[1])) if (xhist & !yhist) layout(mat=matrix(c(1,2), nrow=2, byrow=TRUE), heights=c(hh[1],1-hh[1])) if (!xhist & yhist) layout(mat=matrix(c(1,2), nrow=1, byrow=TRUE), widths=c(1-hh[2],hh[2])) hx <- hist(x$res[,6], breaks=breaks[[1]], plot=FALSE) hy <- hist(x$res[,het], breaks=breaks[[2]], plot=FALSE) if (missout) { if (missing(xlim)) xlim <- range(hx$breaks) if (missing(ylim)) ylim <- range(hy$breaks) if (xhist) { d <- density(x$res[,6], adjust=adjust[1], na.rm=TRUE) brks <- hx$breaks nB <- length(brks) y <- hx$density par(mar=c(0,par.mar.adj[2:4])) plot(NULL, xlim=xlim, ylim=c(0,max(hx$density,d$y)), xlab="", ylab="", xaxt="n", yaxt="n", bty="n") rect(brks[-nB], 0, brks[-1], y, col=col.hist, border=border[1]) if (lwd[1] > 0) lines(d$x, d$y, lwd=lwd[1], col=col.line) } } else { isout <- x$incl[,out] hx.o <- hist(x$res[isout,6], breaks=hx$breaks, plot=FALSE) hx.i <- hist(x$res[!isout,6], breaks=hx$breaks, plot=FALSE) hy.o <- hist(x$res[isout,het], breaks=hy$breaks, plot=FALSE) hy.i <- hist(x$res[!isout,het], breaks=hy$breaks, plot=FALSE) if (missing(xlim)) xlim <- c(min(hx.o$breaks, hx.i$breaks), max(hx.o$breaks, hx.i$breaks)) if (missing(ylim)) ylim <- c(min(hy.o$breaks, hy.i$breaks), max(hy.o$breaks, hy.i$breaks)) if (xhist) { d.o <- density(x$res[isout,6], adjust=adjust[1], na.rm=TRUE) d.i <- density(x$res[!isout,6], adjust=adjust[1], na.rm=TRUE) brks.o <- hx.o$breaks brks.i <- hx.i$breaks nB.o <- length(brks.o) nB.i <- length(brks.i) y.o <- hx.o$density y.i <- hx.i$density par(mar=c(0,par.mar.adj[2:4])) plot(NULL, xlim=xlim, ylim=c(0,max(hx.o$density,hx.i$density,d.o$y,d.i$y)), xlab="", ylab="", xaxt="n", yaxt="n", bty="n") rect(brks.i[-nB.i], 0, brks.i[-1], y.i, col=col.hist.i, border=border[1]) rect(brks.o[-nB.o], 0, brks.o[-1], y.o, col=col.hist.o, border=border[1]) if (lwd[1] > 0) { lines(d.i$x, d.i$y, lwd=lwd[1], col=col.line.i) lines(d.o$x, d.o$y, lwd=lwd[1], col=col.line.o) } } } if (xhist & yhist) plot.new() par(mar = par.mar.adj) plot(x$res[,6], x$res[,het], xlim=xlim, ylim=ylim, pch=pch, cex=cex, col=col.pnts, bty="l", xlab=labels[1], ylab=labels[2], ...) if (missout) { if (yhist) { d <- density(x$res[,het], adjust=adjust[2], na.rm=TRUE) brks <- hy$breaks nB <- length(brks) y <- hy$density par(mar=c(par.mar.adj[1],0,par.mar.adj[3:4])) plot(NULL, xlim=c(0,max(hy$density,d$y)), ylim=ylim, xlab="", ylab="", xaxt="n", yaxt="n", bty="n") rect(0, brks[-nB], y, brks[-1], col=col.hist, border=border[2]) if (lwd[2] > 0) lines(d$y, d$x, lwd=lwd[2], col=col.line) } } else { if (yhist) { d.o <- density(x$res[isout,het], adjust=adjust[2], na.rm=TRUE) d.i <- density(x$res[!isout,het], adjust=adjust[2], na.rm=TRUE) brks.o <- hy.o$breaks brks.i <- hy.i$breaks nB.o <- length(brks.o) nB.i <- length(brks.i) y.o <- hy.o$density y.i <- hy.i$density par(mar=c(par.mar.adj[1],0,par.mar.adj[3:4])) plot(NULL, xlim=c(0,max(hy.o$density,hy.i$density,d.o$y,d.i$y)), ylim=ylim, xlab="", ylab="", xaxt="n", yaxt="n", bty="n") rect(0, brks.i[-nB.i], y.i, brks.i[-1], col=col.hist.i, border=border[2]) rect(0, brks.o[-nB.o], y.o, brks.o[-1], col=col.hist.o, border=border[2]) if (lwd[2] > 0) { lines(d.i$y, d.i$x, lwd=lwd[2], col=col.line.i) lines(d.o$y, d.o$x, lwd=lwd[2], col=col.line.o) } } } ### reset to a single figure if (xhist | yhist) layout(matrix(1)) } else { isout <- x$incl[,out] ### function for histograms with kernel density estimates on the diagonal panel.hist <- function(x, ...) { usr <- par("usr") on.exit(par(usr)) par(usr = c(usr[1:2], 0, 1.2 + hh[1])) h <- hist(x, plot=FALSE, breaks=breaks[[1]]) if (missout) { brks <- h$breaks nB <- length(brks) y <- h$density z <- y / max(y) rect(brks[-nB], 0, brks[-1], z, col=col.hist, border=border[1]) res <- density(x, adjust=adjust[1], na.rm=TRUE) res$y <- res$y / max(y) if (lwd[1] > 0) lines(res, lwd=lwd[1], col=col.line) } else { h.o <- hist(x[isout], plot=FALSE, breaks=h$breaks) h.i <- hist(x[!isout], plot=FALSE, breaks=h$breaks) brks.o <- h.o$breaks brks.i <- h.i$breaks nB.o <- length(brks.o) nB.i <- length(brks.i) y.o <- h.o$density y.i <- h.i$density z.o <- y.o / max(y.o, y.i) z.i <- y.i / max(y.o, y.i) rect(brks.i[-nB.i], 0, brks.i[-1], z.i, col=col.hist.i, border=border[1]) rect(brks.o[-nB.o], 0, brks.o[-1], z.o, col=col.hist.o, border=border[1]) res.o <- density(x[isout], adjust=adjust[1], na.rm=TRUE) res.i <- density(x[!isout], adjust=adjust[1], na.rm=TRUE) res.o$y <- res.o$y / max(y.o, y.i) res.i$y <- res.i$y / max(y.o, y.i) if (lwd[1] > 0) { lines(res.i, lwd=lwd[1], col=col.line.i) lines(res.o, lwd=lwd[1], col=col.line.o) } } box() } ### draw scatterplot matrix X <- cbind(x$res[,het], x$res[,6:ncol(x$res)]) pairs(X, pch=pch, cex=cex, diag.panel=panel.hist, col=col.pnts, labels=labels, ...) } ######################################################################### } metafor/R/fsn.r0000644000176200001440000001444414052444104013066 0ustar liggesusersfsn <- function(yi, vi, sei, data, type="Rosenthal", alpha=.05, target, weighted=FALSE, subset, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("Rosenthal", "Orwin", "Rosenberg", "REM")) if (missing(target)) target <- NULL ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("test", "verbose", "interval", "iters")) if (is.null(ddd$test)) { test <- "Stouffer" } else { test <- match.arg(ddd$test, c("Stouffer", "Fisher")) } if (is.null(ddd$verbose)) { verbose <- FALSE } else { verbose <- ddd$verbose } if (is.null(ddd$interval)) { interval <- c(0,1000) } else { interval <- ddd$interval } if (is.null(ddd$iters)) { iters <- 100000 } else { iters <- ddd$iters } meanes <- NA pval <- NA rejrate <- NA ######################################################################### ###### data setup ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() mf.yi <- mf[[match("yi", names(mf))]] mf.vi <- mf[[match("vi", names(mf))]] mf.sei <- mf[[match("sei", names(mf))]] #mf.weights <- mf[[match("weights", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] yi <- eval(mf.yi, data, enclos=sys.frame(sys.parent())) vi <- eval(mf.vi, data, enclos=sys.frame(sys.parent())) sei <- eval(mf.sei, data, enclos=sys.frame(sys.parent())) #weights <- eval(mf.weights, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) if (type %in% c("Rosenthal", "Rosenberg", "REM") || (type == "Orwin" && weighted)) { if (is.null(vi)) { if (is.null(sei)) { stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) } else { vi <- sei^2 } } } else { vi <- rep(0, length(yi)) } ### check length of yi and vi if (length(yi) != length(vi)) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) ### if a subset of studies is specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=length(yi)) yi <- yi[subset] vi <- vi[subset] } ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) if (any(yivi.na)) { not.na <- !yivi.na if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) } ######################################################################### if (type == "Rosenthal" && test == "Stouffer") { k <- length(yi) zi <- yi / sqrt(vi) z.avg <- abs(sum(zi) / sqrt(k)) pval <- pnorm(z.avg, lower.tail=FALSE) fsnum <- max(0, k * (z.avg / qnorm(alpha, lower.tail=FALSE))^2 - k) target <- NA } if (type == "Rosenthal" && test == "Fisher") { zi <- c(yi / sqrt(vi)) pi <- pnorm(abs(zi), lower.tail=FALSE) pval <- .fsn.fisher(0, pi=pi, alpha=0) if (pval >= alpha) { fsnum <- 0 } else { fsnum <- try(uniroot(.fsn.fisher, interval=interval, extendInt="upX", pi=pi, alpha=alpha)$root, silent=FALSE) if (inherits(fsnum, "try-error")) stop(mstyle$stop("Could not find fail-safe N using Fisher's method.")) } target <- NA } if (type == "Orwin") { k <- length(yi) if (weighted) { wi <- 1/vi meanes <- .wmean(yi, wi) } else { meanes <- mean(yi) } if (is.null(target)) target <- meanes / 2 if (identical(target, 0)) { fsnum <- Inf } else { if (sign(target) != sign(meanes)) target <- -1 * target fsnum <- max(0, k * (meanes - target) / target) } } if (type == "Rosenberg") { k <- length(yi) wi <- 1/vi meanes <- .wmean(yi, wi) zval <- meanes / sqrt(1/sum(wi)) w.p <- (sum(wi*yi) / qnorm(alpha/2, lower.tail=FALSE))^2 - sum(wi) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) fsnum <- max(0, k*w.p/sum(wi)) target <- NA } if (type == "REM") { res <- .fsn.fitre(yi, vi) vnew <- 1/mean(1/vi) tau2 <- res$tau2 meanes <- res$est pval <- res$pval if (is.null(target)) target <- meanes / 2 if (identical(target, 0)) { fsnum <- Inf } else { if (sign(target) != sign(meanes)) target <- -1 * target diff.lo <- .fsn.re(0, yi=yi, vi=vi, vnew=vnew, tau2=tau2, target=target, alpha=alpha, iters=iters) if ((meanes > 0 && diff.lo < 0) || (meanes < 0 && diff.lo > 0)) { fsnum <- 0 } else { fsnum <- try(uniroot(.fsn.re, interval=interval, tol=.001, extendInt=ifelse(meanes > 0,"downX","upX"), yi=yi, vi=vi, vnew=vnew, tau2=tau2, target=target, alpha=alpha, iters=iters, verbose=verbose)$root, silent=TRUE) if (inherits(fsnum, "try-error")) stop(mstyle$stop("Could not find fail-safe N based on a random-effects model.")) } rejrate <- .fsn.fitnew(fsnum, yi, vi, vnew, tau2, alpha, iters)$rejrate } } if (!is.infinite(fsnum) && abs(fsnum - round(fsnum)) >= .Machine$double.eps^0.5) { fsnum <- ceiling(fsnum) } else { fsnum <- round(fsnum) } ######################################################################### res <- list(type=type, fsnum=fsnum, alpha=alpha, pval=pval, meanes=meanes, target=target, rejrate=rejrate, digits=digits) class(res) <- "fsn" return(res) } metafor/R/points.regplot.r0000644000176200001440000000033514054724273015273 0ustar liggesuserspoints.regplot <- function(x, ...) { .chkclass(class(x), must="regplot") points(x=x$xi[x$order], y=x$yi[x$order], pch=x$pch[x$order], cex=x$psize[x$order], col=x$col[x$order], bg=x$bg[x$order]) invisible() } metafor/R/transf.r0000644000176200001440000002044314027614415013577 0ustar liggesuserstransf.rtoz <- function(xi, ...) ### resulting value between -Inf (for -1) and +Inf (for +1) atanh(xi) transf.ztor <- function(xi, ...) tanh(xi) transf.logit <- function(xi, ...) ### resulting value between -Inf (for 0) and +Inf (for +1) qlogis(xi) transf.ilogit <- function(xi, ...) plogis(xi) transf.arcsin <- function(xi, ...) ### resulting value between 0 (for 0) and asin(1) = pi/2 (for +1) asin(sqrt(xi)) transf.iarcsin <- function(xi, ...) { zi <- sin(xi)^2 zi[xi < 0] <- 0 ### if xi value is below 0 (e.g., CI bound), return 0 zi[xi > asin(1)] <- 1 ### if xi value is above maximum possible value, return 1 return(c(zi)) } transf.pft <- function(xi, ni, ...) { ### Freeman-Tukey transformation for proportions xi <- xi*ni zi <- 1/2*(asin(sqrt(xi/(ni+1))) + asin(sqrt((xi+1)/(ni+1)))) return(c(zi)) } transf.ipft <- function(xi, ni, ...) { ### inverse of Freeman-Tukey transformation for individual proportions zi <- suppressWarnings(1/2 * (1 - sign(cos(2*xi)) * sqrt(1 - (sin(2*xi)+(sin(2*xi)-1/sin(2*xi))/ni)^2))) zi <- ifelse(is.nan(zi), NA, zi) zi[xi > transf.pft(1,ni)] <- 1 ### if xi is above upper limit, return 1 zi[xi < transf.pft(0,ni)] <- 0 ### if xi is below lower limit, return 0 return(c(zi)) } transf.ipft.hm <- function(xi, targs, ...) { ### inverse of Freeman-Tukey transformation for a collection of proportions if (is.null(targs) || (is.list(targs) && is.null(targs$ni))) stop("Need to specify the sample sizes via the 'targs' argument.", call.=FALSE) if (is.list(targs)) { ni <- targs$ni } else { ni <- ni } nhm <- 1/(mean(1/ni, na.rm=TRUE)) ### calculate harmonic mean of the ni's zi <- suppressWarnings(1/2 * (1 - sign(cos(2*xi)) * sqrt(1 - (sin(2*xi)+(sin(2*xi)-1/sin(2*xi))/nhm)^2))) zi <- ifelse(is.nan(zi), NA, zi) ### it may not be possible to calculate zi zi[xi > transf.pft(1,nhm)] <- 1 ### if xi is above upper limit, return 1 zi[xi < transf.pft(0,nhm)] <- 0 ### if xi is below lower limit, return 0 return(c(zi)) } transf.isqrt <- function(xi, ...) { zi <- xi*xi zi[xi < 0] <- 0 ### if xi value is below 0 (e.g., CI bound), return 0 return(c(zi)) } transf.irft <- function(xi, ti, ...) { ### Freeman-Tukey transformation for incidence rates zi <- 1/2*(sqrt(xi) + sqrt(xi + 1/ti)) ### xi is the incidence rate (not the number of events!) return(c(zi)) } transf.iirft <- function(xi, ti, ...) { ### inverse of Freeman-Tukey transformation for incidence rates (see Freeman-Tukey_incidence.r in code directory) #zi <- (1/ti - 2*xi^2 + ti*xi^4)/(4*xi^2*ti) ### old version where transf.irft was not multiplied by 1/2 zi <- (1/ti - 8*xi^2 + 16*ti*xi^4)/(16*xi^2*ti) ### xi is the incidence rate (not the number of events!) zi <- ifelse(is.nan(zi), NA, zi) zi[xi < transf.irft(0,ti)] <- 0 ### if xi is below lower limit, return 0 zi[zi <= .Machine$double.eps] <- 0 ### avoid finite precision errors in back-transformed values (transf.iirft(transf.irft(0, 1:200), 1:200)) return(c(zi)) } transf.ahw <- function(xi, ...) { ### resulting value between 0 (for alpha=0) and 1 (for alpha=1) #zi <- (1-xi)^(1/3) zi <- 1 - (1-xi)^(1/3) return(c(zi)) } transf.iahw <- function(xi, ...) { #zi <- 1-xi^3 zi <- 1 - (1-xi)^3 zi <- ifelse(is.nan(zi), NA, zi) zi[xi > 1] <- 1 ### if xi is above upper limit, return 1 zi[xi < 0] <- 0 ### if xi is below lower limit, return 0 return(c(zi)) } transf.abt <- function(xi, ...) { ### Bonett (2002) transformation of alphas (without bias correction) #transf.abt <- function(xi, ni, ...) { ### resulting value between 0 (for alpha=0) to Inf (for alpha=1) #zi <- log(1-xi) - log(ni/(ni-1)) #zi <- log(1-xi) zi <- -log(1-xi) return(c(zi)) } transf.iabt <- function(xi, ...) { ### inverse of Bonett (2002) transformation #transf.iabt <- function(xi, ni, ...) { #zi <- 1 - exp(xi) * ni / (ni-1) #zi <- 1 - exp(xi) zi <- 1 - exp(-xi) zi <- ifelse(is.nan(zi), NA, zi) zi[xi < 0] <- 0 ### if xi is below lower limit, return 0 return(c(zi)) } transf.ztor.int <- function(xi, targs=NULL, ...) { if (is.null(targs$tau2)) targs$tau2 <- 0 if (is.null(targs$lower)) targs$lower <- xi-5*sqrt(targs$tau2) if (is.null(targs$upper)) targs$upper <- xi+5*sqrt(targs$tau2) toint <- function(zval, xi, tau2) tanh(zval) * dnorm(zval, mean=xi, sd=sqrt(tau2)) cfunc <- function(xi, tau2, lower, upper) integrate(toint, lower=lower, upper=upper, xi=xi, tau2=tau2)$value if (targs$tau2 == 0) { zi <- transf.ztor(xi) } else { zi <- mapply(xi, FUN=cfunc, tau2=targs$tau2, lower=targs$lower, upper=targs$upper) } return(c(zi)) } transf.exp.int <- function(xi, targs=NULL, ...) { if (is.null(targs$tau2)) targs$tau2 <- 0 if (is.null(targs$lower)) targs$lower <- xi-5*sqrt(targs$tau2) if (is.null(targs$upper)) targs$upper <- xi+5*sqrt(targs$tau2) toint <- function(zval, xi, tau2) exp(zval) * dnorm(zval, mean=xi, sd=sqrt(tau2)) cfunc <- function(xi, tau2, lower, upper) integrate(toint, lower=lower, upper=upper, xi=xi, tau2=tau2)$value if (targs$tau2 == 0) { zi <- exp(xi) } else { zi <- mapply(xi, FUN=cfunc, tau2=targs$tau2, lower=targs$lower, upper=targs$upper) } return(c(zi)) } transf.ilogit.int <- function(xi, targs=NULL, ...) { if (is.null(targs$tau2)) targs$tau2 <- 0 if (is.null(targs$lower)) targs$lower <- xi-5*sqrt(targs$tau2) if (is.null(targs$upper)) targs$upper <- xi+5*sqrt(targs$tau2) toint <- function(zval, xi, tau2) plogis(zval) * dnorm(zval, mean=xi, sd=sqrt(tau2)) cfunc <- function(xi, tau2, lower, upper) integrate(toint, lower=lower, upper=upper, xi=xi, tau2=tau2)$value if (targs$tau2 == 0) { zi <- transf.ilogit(xi) } else { zi <- mapply(xi, FUN=cfunc, tau2=targs$tau2, lower=targs$lower, upper=targs$upper) } return(c(zi)) } transf.dtou1 <- function(xi, ...) { u2i <- pnorm(abs(xi)/2) return((2*u2i - 1) / u2i) } transf.dtou2 <- function(xi, ...) pnorm(xi/2) transf.dtou3 <- function(xi, ...) pnorm(xi) transf.dtocles <- function(xi, ...) pnorm(xi/sqrt(2)) transf.dtorpb <- function(xi, n1i, n2i, ...) { if (missing(n1i) || missing(n2i)) { hi <- 4 } else { if (length(n1i) != length(n2i)) stop("Length of 'n1i' does not match length of 'n2i'.", call.=FALSE) if (length(n1i) != length(xi)) stop("Length of 'n1i' and 'n2i' does not match length of 'xi'.", call.=FALSE) mi <- n1i + n2i - 2 hi <- mi / n1i + mi / n2i } return(xi / sqrt(xi^2 + hi)) } transf.dtobesd <- function(xi, ...) { rpbi <- xi / sqrt(xi^2 + 4) return(0.50 + rpbi/2) } transf.dtomd <- function(xi, targs=NULL, ...) { if (is.null(targs) || (is.list(targs) && is.null(targs$sd))) stop("Need to specify a standard deviation value via the 'targs' argument.", call.=FALSE) if (is.list(targs)) { sd <- targs$sd } else { sd <- targs } if (length(sd) != 1L) stop("Specify a single standard deviation value via the 'targs' argument.", call.=FALSE) return(xi * sd) } transf.logortord <- function(xi, pc, ...) { if (length(pc) == 1L) pc <- rep(pc, length(xi)) if (length(xi) != length(pc)) stop("Length of 'xi' does not match length of 'pc'.", call.=FALSE) if (any(pc < 0) || any(pc > 1)) stop("The control group risk 'pc' must be between 0 and 1.", call.=FALSE) return(exp(xi)*pc / (1 - pc + pc * exp(xi)) - pc) } transf.logortorr <- function(xi, pc, ...) { if (length(pc) == 1L) pc <- rep(pc, length(xi)) if (length(xi) != length(pc)) stop("Length of 'xi' does not match length of 'pc'.", call.=FALSE) if (any(pc < 0) || any(pc > 1)) stop("The control group risk 'pc' must be between 0 and 1.", call.=FALSE) return(exp(xi) / (pc * (exp(xi) - 1) + 1)) } metafor/R/fitted.rma.r0000644000176200001440000000252713770363550014347 0ustar liggesusersfitted.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) ### note: fitted values can be calculated for all studies including those that ### have NA on yi/vi (and with "na.pass" these will be provided); but if there ### is an NA in the X's, then the fitted value will also be NA out <- c(object$X.f %*% object$beta) names(out) <- object$slab #not.na <- !is.na(out) if (na.act == "na.omit") out <- out[object$not.na] if (na.act == "na.exclude") out[!object$not.na] <- NA if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in results.")) if (inherits(object, "rma.ls")) { out <- list(location = out) out$scale <- c(object$Z.f %*% object$alpha) names(out$scale) <- object$slab #not.na <- !is.na(out$scale) if (na.act == "na.omit") out$scale <- out$scale[object$not.na] if (na.act == "na.exclude") out$scale[!object$not.na] <- NA if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in results.")) } return(out) } metafor/R/selmodel.r0000644000176200001440000000006613716753101014105 0ustar liggesusersselmodel <- function(x, ...) UseMethod("selmodel") metafor/R/vif.r0000644000176200001440000000005413457322061013061 0ustar liggesusersvif <- function(x, ...) UseMethod("vif") metafor/R/print.list.rma.r0000644000176200001440000000747013770373602015177 0ustar liggesusersprint.list.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="list.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) attr(x, "class") <- NULL ### remove cr.lb and cr.ub elements (if they are there) x$cr.lb <- NULL x$cr.ub <- NULL ### turn all vectors before the slab vector into a data frame slab.pos <- which(names(x) == "slab") out <- x[seq_len(slab.pos-1)] out <- data.frame(out, row.names=x$slab, stringsAsFactors=FALSE) ### in case all values were NA and have been omitted if (nrow(out) == 0L) stop(mstyle$stop("All values are NA."), call.=FALSE) ### in case there is a select element, apply it if (exists("select", where=x, inherits=FALSE)) out <- out[x$select,] if (nrow(out) == 0L) { message(mstyle$message("No values to print.")) return(invisible()) } ### if transf exists and is TRUE, set SEs to NULL so that column is omitted from the output transf.true <- 0 if (exists("transf", where=x, inherits=FALSE) && x$transf) { transf.true <- 1 out$se <- NULL } ### objects created by predict.rma() have a 'method' element ### properly format columns 1-4 (for FE models) or columns 1-6 (for RE/ME models) ### leave element tau2.level, gamma2.level, and/or element X untouched if (exists("method", where=x, inherits=FALSE)) { min.pos <- slab.pos - is.element("tau2.level", names(x)) - is.element("gamma2.level", names(x)) - is.element("X", names(x)) - is.element("Z", names(x)) - transf.true } else { min.pos <- slab.pos - transf.true } sav <- out[,seq_len(min.pos-1)] for (i in seq_len(min.pos-1)) { if (inherits(out[,i], c("integer","logical","factor","character"))) { ### do not apply formating to these classes out[,i] <- out[,i] } else { if (names(out)[i] %in% c("pred", "resid")) out[,i] <- .fcf(out[,i], digits[["est"]]) if (names(out)[i] %in% c("se")) out[,i] <- .fcf(out[,i], digits[["se"]]) if (names(out)[i] %in% c("ci.lb", "ci.ub", "cr.lb", "cr.ub", "pi.lb", "pi.ub")) out[,i] <- .fcf(out[,i], digits[["ci"]]) if (names(out)[i] %in% c("zval", "Q", "z", "X2")) out[,i] <- .fcf(out[,i], digits[["test"]]) if (names(out)[i] %in% c("pval", "Qp")) out[,i] <- .fcf(out[,i], digits[["pval"]]) if (names(out)[i] %in% c("I2", "H2")) out[,i] <- .fcf(out[,i], digits[["het"]]) if (names(out)[i] %in% c("tau2")) out[,i] <- .fcf(out[,i], digits[["var"]]) # if (names(out)[i] == "rstudent") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "dffits") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "cook.d") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "cov.r") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "tau2.del") # out[,i] <- .fcf(out[,i], digits[["var"]]) # if (names(out)[i] == "QE.del") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "hat") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "weight") # out[,i] <- .fcf(out[,i], digits[["test"]]) # if (names(out)[i] == "dfbs") # out[,i] <- .fcf(out[,i], digits[["est"]]) if (!is.character(out[,i])) out[,i] <- .fcf(out[,i], digits[["est"]]) } } if (!exists(".rmspace")) cat("\n") tmp <- capture.output(print(out, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (!exists(".rmspace") & is.null(attr(x, ".rmspace"))) cat("\n") invisible(sav) } metafor/R/metafor.news.r0000644000176200001440000000006613457322061014710 0ustar liggesusersmetafor.news <- function() news(package="metafor") metafor/R/BIC.rma.r0000644000176200001440000000225514014250111013437 0ustar liggesusersBIC.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(...)) { ### if there is just 'object' if (object$method == "REML") { out <- object$fit.stats["BIC","REML"] } else { out <- object$fit.stats["BIC","ML"] } } else { ### if there is 'object' and additional objects via ... if (object$method == "REML") { out <- sapply(list(object, ...), function(x) x$fit.stats["BIC","REML"]) } else { out <- sapply(list(object, ...), function(x) x$fit.stats["BIC","ML"]) } dfs <- sapply(list(object, ...), function(x) x$parms) out <- data.frame(df=dfs, BIC=out) ### get names of objects; same idea as in stats:::AIC.default cl <- match.call() rownames(out) <- as.character(cl[-1L]) ### check that all models were fitted to the same data yis <- lapply(list(object, ...), function(x) as.vector(x$yi)) if (!all(sapply(yis[-1], function(x) identical(x, yis[[1]])))) warning(mstyle$warning("Models not all fitted to the same data."), call.=FALSE) } return(out) } metafor/R/confint.rma.glmm.r0000644000176200001440000000031013770362453015450 0ustar liggesusersconfint.rma.glmm <- function(object, parm, level, digits, transf, targs, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.glmm", notav="rma.glmm") } metafor/R/cumul.rma.mh.r0000644000176200001440000001245714030331206014602 0ustar liggesuserscumul.rma.mh <- function(x, order, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mh") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time", "decreasing")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (is.null(ddd$decreasing)) { decreasing <- FALSE } else { decreasing <- ddd$decreasing } ######################################################################### if (grepl("^order\\(", deparse(substitute(order)))) warning(mstyle$warning("Use of order() in 'order' argument is probably erroneous."), call.=FALSE) if (missing(order)) order <- seq_len(x$k.all) if (length(order) != x$k.all) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) ### note: order variable is assumed to be of the same length as the size of the ### original dataset passed to the model fitting function and so we apply ### the same subsetting (if necessary) as was done during model fitting if (!is.null(x$subset)) order <- order[x$subset] order <- order(order, decreasing=decreasing) ai.f <- x$ai.f[order] bi.f <- x$bi.f[order] ci.f <- x$ci.f[order] di.f <- x$di.f[order] x1i.f <- x$x1i.f[order] x2i.f <- x$x2i.f[order] t1i.f <- x$t1i.f[order] t2i.f <- x$t2i.f[order] yi.f <- x$yi.f[order] vi.f <- x$vi.f[order] not.na <- x$not.na[order] slab <- x$slab[order] ids <- x$ids[order] beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!not.na[i]) next if (is.element(x$measure, c("RR","OR","RD"))) { res <- try(suppressWarnings(rma.mh(ai=ai.f, bi=bi.f, ci=ci.f, di=di.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=seq_len(i))), silent=TRUE) } else { res <- try(suppressWarnings(rma.mh(x1i=x1i.f, x2i=x2i.f, t1i=t1i.f, t2i=t2i.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=seq_len(i))), silent=TRUE) } if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (.isTRUE(transf) && is.element(x$measure, c("OR","RR","IRR"))) ### if transf=TRUE, apply exp transformation to ORs, RRs, and IRRs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[not.na], se=se[not.na], zval=zval[not.na], pval=pval[not.na], ci.lb=ci.lb[not.na], ci.ub=ci.ub[not.na], Q=QE[not.na], Qp=QEp[not.na], I2=I2[not.na], H2=H2[not.na]) out$slab <- slab[not.na] out$ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, I2=I2, H2=H2) out$slab <- slab out$ids <- ids } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits out$transf <- transf out$slab.null <- x$slab.null out$level <- x$level out$measure <- x$measure out$test <- x$test attr(out$estimate, "measure") <- x$measure if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- c("list.rma", "cumul.rma") return(out) } metafor/R/trimfill.r0000644000176200001440000000006613457322061014122 0ustar liggesuserstrimfill <- function(x, ...) UseMethod("trimfill") metafor/R/qqnorm.rma.glmm.r0000644000176200001440000000023113770374321015324 0ustar liggesusersqqnorm.rma.glmm <- function(y, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(y), must="rma.glmm", notav="rma.glmm") } metafor/R/print.permutest.rma.uni.r0000644000176200001440000000527314043267635017047 0ustar liggesusersprint.permutest.rma.uni <- function(x, digits=x$digits, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="permutest.rma.uni") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") if (!x$int.only) { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val* ", .pval(x$QMp, digits=digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val* ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (is.element(x$test, c("knha","adhoc","t"))) { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), "pval*"=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) colnames(res.table)[5] <- "pval*" # rename 'pval.' to 'pval*' if (x$permci) colnames(res.table)[6:7] <- c("ci.lb*", "ci.ub*") } else { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), "pval*"=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) colnames(res.table)[4] <- "pval*" # rename 'pval.' to 'pval*' if (x$permci) colnames(res.table)[5:6] <- c("ci.lb*", "ci.ub*") } rownames(res.table) <- rownames(x$beta) signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } if (x$int.only) res.table <- res.table[1,] cat(mstyle$section("Model Results:")) cat("\n\n") if (x$int.only) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/rcalc.r0000644000176200001440000002472314031024733013364 0ustar liggesusersrcalc <- function(x, ni, data, rtoz=FALSE, nfun="min", sparse=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (!(inherits(x, "formula") || inherits(x, "matrix") || inherits(x, "list"))) stop(mstyle$stop("Argument 'x' must be either a formula, a matrix, or a list of matrices.")) if (missing(ni)) stop(mstyle$stop("Argument 'ni' must be specified.")) if (is.character(nfun)) nfun <- match.arg(nfun, c("min", "harmonic", "mean")) ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("upper", "simplify", "rowid", "vnames", "noid")) if (is.null(ddd$upper)) { upper <- TRUE } else { upper <- ddd$upper } if (is.null(ddd$simplify)) { simplify <- TRUE } else { simplify <- ddd$simplify } na.act <- getOption("na.action") on.exit(options(na.action=na.act)) ############################################################################ ### in case x is a formula, process it if (inherits(x, "formula")) { if (missing(data)) stop(mstyle$stop("Must specify 'data' argument.")) if (!is.data.frame(data)) data <- data.frame(data) ### extract ni mf <- match.call() mf.ni <- mf[[match("ni", names(mf))]] ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ### get all variables from data options(na.action = "na.pass") dat <- get_all_vars(x, data=data) options(na.action = na.act) ### if no study id has been specified, assume it is a single study if (ncol(dat) == 3L) { dat[[4]] <- 1 noid <- TRUE } else { noid <- FALSE } vnames <- names(dat) ### check that there are really 4 variables if (ncol(dat) != 4L) stop(mstyle$stop("Formula should contain 4 variables, but contains ", ncol(dat), " variables.")) ### check that there are no missings in the variable identifiers if (anyNA(c(dat[[2]],dat[[3]]))) stop(mstyle$stop("No missing values allowed in variable identifiers.")) id <- dat[[4]] ### check that ni has the same length as there are rows in 'data' if (length(ni) != nrow(data)) stop(mstyle$stop("Argument 'ni' must be of the same length as the data frame specified via 'data'.")) ### check that there are ni missings in the study identifier if (anyNA(id)) stop(mstyle$stop("No missing values allowed in study identifier.")) ### need these to correctly sort 'dat' and 'V' back into the original order at the end ### (and need to order within rows, so that matching works correctly) id.var1 <- paste0(id, ".", as.character(dat[[2]])) id.var2 <- paste0(id, ".", as.character(dat[[3]])) id.var1.id.var2 <- .psort(id.var1, id.var2) id.var1 <- id.var1.id.var2[,1] id.var2 <- id.var1.id.var2[,2] rowid <- paste0(id.var1, ".", id.var2) dat <- split(dat, id) ni <- split(ni, id) Rlist <- list() nmi <- rep(NA, length(ni)) for (i in 1:length(dat)) { if (any(ni[[i]] < 0, na.rm=TRUE)) stop(mstyle$stop("One or more sample sizes are negative in study ", dat[[i]][[4]][[1]], ".")) if (is.function(nfun)) { nfunnmi <- nfun(ni[[i]]) if (length(nfunnmi) != 1L) stop(mstyle$stop("Function specified via 'nfun' does not return a single value.")) nmi[i] <- nfunnmi } else { if (nfun == "min") nmi[i] <- min(ni[[i]], na.rm=TRUE) if (nfun == "harmonic") nmi[i] <- 1 / mean(1/ni[[i]], na.rm=TRUE) if (nfun == "mean") nmi[i] <- mean(ni[[i]], na.rm=TRUE) } var1 <- as.character(dat[[i]][[2]]) var2 <- as.character(dat[[i]][[3]]) var1.var2 <- paste0(var1, ".", var2) var1.var2.eq <- var1 == var2 if (any(var1.var2.eq)) stop(mstyle$stop("Identical var1-var2 pair", ifelse(sum(var1.var2.eq) >= 2L, "s", ""), " (", paste0(var1.var2[var1.var2.eq], collapse=", "), ") in study ", dat[[i]][[4]][[1]], ".")) var1.var2.dup <- duplicated(var1.var2) if (any(var1.var2.dup)) stop(mstyle$stop("Duplicated var1-var2 pair", ifelse(sum(var1.var2.dup) >= 2L, "s", ""), " (", paste0(var1.var2[var1.var2.dup], collapse=", "), ") in study ", dat[[i]][[4]][[1]], ".")) ri <- dat[[i]][[1]] if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1 in study ", dat[[i]][[4]][[1]], ".")) vars <- sort(unique(c(var1, var2))) Ri <- matrix(NA, nrow=length(vars), ncol=length(vars)) diag(Ri) <- 1 rownames(Ri) <- colnames(Ri) <- vars for (j in 1:length(var1)) { Ri[var1[j],var2[j]] <- Ri[var2[j],var1[j]] <- ri[j] } Rlist[[i]] <- Ri } names(Rlist) <- names(dat) return(rcalc(Rlist, ni=nmi, simplify=simplify, rtoz=rtoz, sparse=sparse, rowid=rowid, vnames=vnames, noid=noid)) } ############################################################################ ### in case x is a list, need to loop through elements if (is.list(x)) { k <- length(x) if (length(x) != length(ni)) stop(mstyle$stop("Argument 'ni' must be of the same length as there are elements in 'x'.")) res <- list() for (i in 1:k) { res[[i]] <- rcalc(x[[i]], ni[i], upper=upper, rtoz=rtoz, ...) } if (is.null(names(x))) names(x) <- 1:k if (simplify) { ki <- sapply(res, function(x) NROW(x$dat)) dat <- cbind(id=rep(names(x), times=ki), do.call(rbind, lapply(res, "[[", "dat"))) if (sparse) { V <- bdiag(lapply(res, "[[", "V")) } else { V <- bldiag(lapply(res, "[[", "V")) } rownames(V) <- colnames(V) <- unlist(lapply(res, function(x) rownames(x$V))) if (!is.null(ddd$rowid)) { rowid <- match(ddd$rowid, paste0(dat[[1]], ".", as.character(dat[[2]]), ".", dat[[1]], ".", as.character(dat[[3]]))) dat <- dat[rowid,] V <- V[rowid,rowid] } if (!is.null(ddd$vnames)) { names(dat)[1:3] <- ddd$vnames[c(4,2,3)] names(dat)[4] <- paste0(ddd$vnames[2], ".", ddd$vnames[3]) } if (!is.null(ddd$noid) && ddd$noid) { dat[[1]] <- NULL } rownames(dat) <- 1:nrow(dat) return(list(dat=dat, V=V)) } else { names(res) <- names(x) return(res) } } ############################################################################ ### check if x is square matrix if (!is.matrix(x)) stop(mstyle$stop("Argument 'x' must be a matrix.")) if (dim(x)[1] != dim(x)[2]) stop(mstyle$stop("Argument 'x' must be a square matrix.")) ### set default dimension names dimsx <- nrow(x) dnames <- paste0("x", 1:dimsx) ### in case x has dimension names, use those if (!is.null(rownames(x))) dnames <- rownames(x) if (!is.null(colnames(x))) dnames <- colnames(x) ### in case x is a 1x1 (or 0x0) matrix, return nothing if (dimsx <= 1L) return(list(dat=NULL, V=NULL)) ### make x symmetric, depending on whether we use upper or lower part if (upper) { x[lower.tri(x)] <- t(x)[lower.tri(x)] } else { x[upper.tri(x)] <- t(x)[upper.tri(x)] } ### check if x is symmetric (can be skipped since x must now be symmetric) #if (!isSymmetric(x)) # stop(mstyle$stop("x must be a symmetric matrix.")) ### stack upper/lower triangular part of x into a column vector (this is always done column-wise!) if (upper) { ri <- cbind(x[upper.tri(x)]) } else { ri <- cbind(x[lower.tri(x)]) } ### check that correlations are in [-1,1] if (any(abs(ri) > 1, na.rm=TRUE)) stop(mstyle$stop("One or more correlations are > 1 or < -1.")) ### check that sample sizes are non-negative if (isTRUE(ni < 0)) stop(mstyle$stop("One or more sample sizes are negative.")) ### apply r-to-z transformation if requested if (rtoz) ri <- 1/2 * log((1 + ri)/(1 - ri)) ### I and J are matrices with 1:dimsx for rows and columns, respectively I <- matrix(1:dimsx, nrow=dimsx, ncol=dimsx) J <- matrix(1:dimsx, nrow=dimsx, ncol=dimsx, byrow=TRUE) ### get upper/lower triangular elements of I and J if (upper) { I <- I[upper.tri(I)] J <- J[upper.tri(J)] } else { I <- I[lower.tri(I)] J <- J[lower.tri(J)] } ### dimensions in V (must be dimsx*(dimsx-1)/2) dimsV <- length(ri) ### set up V matrix V <- matrix(NA, nrow=dimsV, ncol=dimsV) for (ro in 1:dimsV) { for (co in 1:dimsV) { i <- I[ro] j <- J[ro] k <- I[co] l <- J[co] ### Olkin & Finn (1995), equation 5, page 157 V[ro,co] <- 1/2 * x[i,j]*x[k,l] * (x[i,k]^2 + x[i,l]^2 + x[j,k]^2 + x[j,l]^2) + x[i,k]*x[j,l] + x[i,l]*x[j,k] - (x[i,j]*x[i,k]*x[i,l] + x[j,i]*x[j,k]*x[j,l] + x[k,i]*x[k,j]*x[k,l] + x[l,i]*x[l,j]*x[l,k]) ### Steiger (1980), equation 2, page 245 (provides the same result) #V[ro,co] <- 1/2 * ((x[i,k] - x[i,j]*x[j,k]) * (x[j,l] - x[j,k]*x[k,l]) + # (x[i,l] - x[i,k]*x[k,l]) * (x[j,k] - x[j,i]*x[i,k]) + # (x[i,k] - x[i,l]*x[l,k]) * (x[j,l] - x[j,i]*x[i,l]) + # (x[i,l] - x[i,j]*x[j,l]) * (x[j,k] - x[j,l]*x[l,k])) ### Steiger (1980), equation 11, page 247 for r-to-z transformed values if (rtoz) V[ro,co] <- V[ro,co] / ((1 - x[i,j]^2) * (1 - x[k,l]^2)) } } ### divide V by (n-1) for raw correlations and by (n-3) for r-to-z transformed correlations if (isTRUE(ni >= 5)) { if (rtoz) { V <- V/(ni-3) } else { V <- V/(ni-1) } } else { V <- NA*V } ### create matrix with var1 and var2 names and sort rowwise dmat <- cbind(dnames[I], dnames[J]) dmat <- t(apply(dmat, 1, sort)) ### set row/column names for V var1.var2 <- paste0(dmat[,1], ".", dmat[,2]) rownames(V) <- colnames(V) <- var1.var2 #return(list(dat=data.frame(var1=dmat[,1], var2=dmat[,2], var1.var2=var1.var2, yi=ri, vi=unname(diag(V)), ni=ni, stringsAsFactors=FALSE), V=V)) return(list(dat=data.frame(var1=dmat[,1], var2=dmat[,2], var1.var2=var1.var2, yi=ri, ni=ni, stringsAsFactors=FALSE), V=V)) } metafor/R/print.escalc.r0000644000176200001440000000356613770373476014713 0ustar liggesusersprint.escalc <- function(x, digits=attr(x,"digits"), ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="escalc") attr(x, "class") <- NULL digits <- .get.digits(digits=digits, xdigits=attr(x, "digits"), dmiss=FALSE) ### get positions of the variable names in the object ### note: if the object no longer contains a particular variable, match() returns NA; ### use na.omit(), so that length() is then zero (as needed for if statements below) yi.pos <- na.omit(match(attr(x, "yi.names"), names(x))) vi.pos <- na.omit(match(attr(x, "vi.names"), names(x))) sei.pos <- na.omit(match(attr(x, "sei.names"), names(x))) zi.pos <- na.omit(match(attr(x, "zi.names"), names(x))) pval.pos <- na.omit(match(attr(x, "pval.names"), names(x))) ci.lb.pos <- na.omit(match(attr(x, "ci.lb.names"), names(x))) ci.ub.pos <- na.omit(match(attr(x, "ci.ub.names"), names(x))) ### get rownames attribute so we can back-assign it rnames <- attr(x, "row.names") x <- data.frame(x) rownames(x) <- rnames ### round variables according to the digits argument if (length(yi.pos) > 0L) x[yi.pos] <- apply(x[yi.pos], 2, .fcf, digits[["est"]]) if (length(vi.pos) > 0L) x[vi.pos] <- apply(x[vi.pos], 2, .fcf, digits[["var"]]) if (length(sei.pos) > 0L) x[sei.pos] <- apply(x[sei.pos], 2, .fcf, digits[["se"]]) if (length(zi.pos) > 0L) x[zi.pos] <- apply(x[zi.pos], 2, .fcf, digits[["test"]]) if (length(pval.pos) > 0L) x[pval.pos] <- apply(x[pval.pos], 2, .pval, digits[["pval"]]) # note: using .pval here if (length(ci.lb.pos) > 0L) x[ci.lb.pos] <- apply(x[ci.lb.pos], 2, .fcf, digits[["ci"]]) if (length(ci.ub.pos) > 0L) x[ci.ub.pos] <- apply(x[ci.ub.pos], 2, .fcf, digits[["ci"]]) tmp <- capture.output(print(x, ...)) .print.table(tmp, mstyle) } metafor/R/weights.rma.uni.r0000644000176200001440000000314113775620010015315 0ustar liggesusersweights.rma.uni <- function(object, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.uni", notav="rma.uni.selmodel") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix")) x <- object ######################################################################### if (x$weighted) { if (is.null(x$weights)) { W <- diag(1/(x$vi + x$tau2), nrow=x$k, ncol=x$k) } else { W <- diag(x$weights, nrow=x$k, ncol=x$k) } } else { W <- diag(1/x$k, nrow=x$k, ncol=x$k) } ######################################################################### if (type == "diagonal") { wi <- as.vector(diag(W)) weight <- rep(NA_real_, x$k.f) weight[x$not.na] <- wi / sum(wi) * 100 names(weight) <- x$slab if (na.act == "na.omit") weight <- weight[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in weights.")) return(weight) } if (type == "matrix") { Wfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Wfull[x$not.na, x$not.na] <- W rownames(Wfull) <- x$slab colnames(Wfull) <- x$slab if (na.act == "na.omit") Wfull <- Wfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Wfull) } } metafor/R/forest.default.r0000644000176200001440000006314514054724744015244 0ustar liggesusersforest.default <- function(x, vi, sei, ci.lb, ci.ub, annotate=TRUE, showweights=FALSE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=95, refline=0, digits=2L, width, xlab, slab, ilab, ilab.xpos, ilab.pos, order, subset, transf, atransf, targs, rows, efac=1, pch=15, psize, plim=c(0.5,1.5), col, lty, fonts, cex, cex.lab, cex.axis, annosym, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE transf.char <- deparse(substitute(transf)) atransf.char <- deparse(substitute(atransf)) if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) yi <- x ### note: transf and atransf must be function names and cannot, for example, be arguments ### passed down from other functions (i.e., deparse(substitute(...)) will grab exactly what ### is specified for the argument), so the following function would not work: ### ### misc <- function(x, vi, tfunction=FALSE) ### forest.default(x, vi, atransf=tfunction) if (missing(targs)) targs <- NULL if (missing(at)) at <- NULL if (missing(ilab)) ilab <- NULL if (missing(ilab.xpos)) ilab.xpos <- NULL if (missing(ilab.pos)) ilab.pos <- NULL if (missing(subset)) subset <- NULL if (missing(order)) order <- NULL if (missing(psize)) psize <- NULL if (missing(col)) col <- NULL if (missing(cex)) cex <- NULL if (missing(cex.lab)) cex.lab <- NULL if (missing(cex.axis)) cex.axis <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ### digits[1] for annotations, digits[2] for x-axis labels ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers if (length(digits) == 1L) digits <- c(digits,digits) ############################################################################ ### set default line types if user has not specified 'lty' argument if (missing(lty)) { lty <- c("solid", "solid") # 1st value = CIs, 2nd value = horizontal line(s) } else { if (length(lty) == 1L) lty <- c(lty, "solid") } ### vertical expansion factor: 1st = CI end lines, 2nd = arrows if (length(efac) == 1L) efac <- rep(efac, 2) ### annotation symbols vector if (missing(annosym)) annosym <- c(" [", ", ", "]", "-") # 4th element for minus sign symbol if (length(annosym) == 3L) annosym <- c(annosym, "-") if (length(annosym) != 4L) stop(mstyle$stop("Argument 'annosym' must be a vector of length 3.")) ### set measure based on the measure attribute of yi if (is.null(attr(yi, "measure"))) { measure <- "GEN" } else { measure <- attr(yi, "measure") } ### column header estlab <- .setlab(measure, transf.char, atransf.char, gentype=3, short=TRUE) if (is.expression(estlab)) { header.right <- parse(text=paste0("bold(", estlab, " * '", annosym[1], "' * '", 100*(1-level), "% CI'", " * '", annosym[3], "')")) } else { header.right <- paste0(estlab, annosym[1], 100*(1-level), "% CI", annosym[3]) } if (is.logical(header)) { if (header) { header.left <- "Study" } else { header.left <- NULL header.right <- NULL } } else { if (!is.character(header)) stop(mstyle$stop("Argument 'header' must either be a logical or character vector.")) if (length(header) == 1L) { header.left <- header } else { header.left <- header[1] header.right <- header[2] } } if (!annotate) header.right <- NULL ddd <- list(...) if (is.null(ddd$decreasing)) { decreasing <- FALSE } else { decreasing <- ddd$decreasing } if (!is.null(ddd$clim)) olim <- ddd$clim lplot <- function(..., textpos, decreasing, clim) plot(...) labline <- function(..., textpos, decreasing, clim) abline(...) lsegments <- function(..., textpos, decreasing, clim) segments(...) laxis <- function(..., textpos, decreasing, clim) axis(...) lmtext <- function(..., textpos, decreasing, clim) mtext(...) lpolygon <- function(..., textpos, decreasing, clim) polygon(...) ltext <- function(..., textpos, decreasing, clim) text(...) lpoints <- function(..., textpos, decreasing, clim) points(...) ######################################################################### ### extract data, study labels, and other arguments if (hasArg(ci.lb) && hasArg(ci.ub)) { # CI bounds are specified by user if (length(ci.lb) != length(ci.ub)) stop(mstyle$stop("Length of 'ci.lb' and 'ci.ub' is not the same.")) if (missing(vi) && missing(sei)) { # vi/sei not specified, so calculate vi based on CI vi <- ((ci.ub - ci.lb) / (2*qnorm(level/2, lower.tail=FALSE)))^2 } else { if (missing(vi)) # vi not specified, but sei is, so set vi = sei^2 vi <- sei^2 } if (length(ci.lb) != length(vi)) stop(mstyle$stop("Length of 'vi' (or 'sei') does not match length of ('ci.lb', 'ci.ub') pairs.")) } else { # CI bounds are not specified by user if (missing(vi)) { if (missing(sei)) { stop(mstyle$stop("Must specify either 'vi', 'sei', or ('ci.lb', 'ci.ub') pairs.")) } else { vi <- sei^2 } } if (length(yi) != length(vi)) # need to do this here to avoid warning when calculating 'ci.lb' and 'ci.ub' stop(mstyle$stop("Length of 'vi' (or 'sei') does not match length of 'yi'.")) ci.lb <- yi - qnorm(level/2, lower.tail=FALSE) * sqrt(vi) ci.ub <- yi + qnorm(level/2, lower.tail=FALSE) * sqrt(vi) } ### check length of yi and vi k <- length(yi) if (length(vi) != k) stop(mstyle$stop("Length of 'yi' does not match the length of 'vi', 'sei', or the ('ci.lb', 'ci.ub') pairs.")) ### note: slab (if specified), ilab (if specified), pch (if vector), psize (if ### vector), col (if vector), subset (if specified), order (if vector) ### must have the same length as yi (including NAs) even when subsetting eventually if (missing(slab)) { if (!is.null(attr(yi, "slab")) && length(attr(yi, "slab")) == k) { slab <- attr(yi, "slab") # use slab info if it can be found in slab attribute of yi (and it has the right length) } else { slab <- paste("Study", seq_len(k)) } } else { if (is.null(slab) || (length(slab) == 1L && is.na(slab))) # slab=NULL or slab=NA can be used to suppress study labels slab <- rep("", k) } if (length(slab) != k) stop(mstyle$stop(paste0("Length of the 'slab' argument (", length(slab), ") does not correspond to the number of outcomes (", k, ")."))) if (!is.null(ilab)) { if (is.null(dim(ilab))) ilab <- cbind(ilab) if (nrow(ilab) != k) stop(mstyle$stop(paste0("Length of the 'ilab' argument (", nrow(ilab), ") does not correspond to the number of outcomes (", k, ")."))) } if (length(pch) == 1L) pch <- rep(pch, k) # pch can be a single value (which is then repeated) if (length(pch) != k) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the number of outcomes (", k, ")."))) if (!is.null(psize)) { if (length(psize) == 1L) # psize can be a single value (which is then repeated) psize <- rep(psize, k) if (length(psize) != k) stop(mstyle$stop(paste0("Length of the 'psize' argument (", length(psize), ") does not correspond to the number of outcomes (", k, ")."))) } if (!is.null(col)) { if (length(col) == 1L) # col can be a single value (which is then repeated) col <- rep(col, k) if (length(col) != k) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the number of outcomes (", k, ")."))) } else { col <- rep("black", k) } ### adjust subset if specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) if (length(subset) != k) stop(mstyle$stop(paste0("Length of the 'subset' argument (", length(subset), ") does not correspond to the number of outcomes (", k, ")."))) } ### sort the data if requested if (!is.null(order)) { if (length(order) == 1L) { order <- match.arg(order, c("obs", "yi", "prec", "vi")) if (order == "obs" || order == "yi") sort.vec <- order(yi) if (order == "prec" || order == "vi") sort.vec <- order(vi, yi) } else { if (length(order) != k) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the number of outcomes (", k, ")."))) if (grepl("^order\\(", deparse(substitute(order)))) { sort.vec <- order } else { sort.vec <- order(order, decreasing=decreasing) } } yi <- yi[sort.vec] vi <- vi[sort.vec] ci.lb <- ci.lb[sort.vec] ci.ub <- ci.ub[sort.vec] slab <- slab[sort.vec] ilab <- ilab[sort.vec,,drop=FALSE] # if NULL, remains NULL pch <- pch[sort.vec] psize <- psize[sort.vec] # if NULL, remains NULL col <- col[sort.vec] subset <- subset[sort.vec] # if NULL, remains NULL } ### if a subset of studies is specified if (!is.null(subset)) { yi <- yi[subset] vi <- vi[subset] ci.lb <- ci.lb[subset] ci.ub <- ci.ub[subset] slab <- slab[subset] ilab <- ilab[subset,,drop=FALSE] # if NULL, remains NULL pch <- pch[subset] psize <- psize[subset] # if NULL, remains NULL col <- col[subset] } k <- length(yi) # in case length of k has changed ### set rows value if (missing(rows)) { rows <- k:1 } else { if (length(rows) == 1L) # note: rows must be a single value or the same rows <- rows:(rows-k+1) # length of yi (including NAs) *after ordering/subsetting* } if (length(rows) != k) stop(mstyle$stop(paste0("Length of the 'rows' argument (", length(rows), ") does not correspond to the number of outcomes (", k, ")", ifelse(is.null(subset), ".", " after subsetting.")))) ### reverse order yi <- yi[k:1] vi <- vi[k:1] ci.lb <- ci.lb[k:1] ci.ub <- ci.ub[k:1] slab <- slab[k:1] ilab <- ilab[k:1,,drop=FALSE] # if NULL, remains NULL pch <- pch[k:1] psize <- psize[k:1] # if NULL, remains NULL col <- col[k:1] rows <- rows[k:1] ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) if (any(yivi.na)) { not.na <- !yivi.na if (na.act == "na.omit") { yi <- yi[not.na] vi <- vi[not.na] ci.lb <- ci.lb[not.na] ci.ub <- ci.ub[not.na] slab <- slab[not.na] ilab <- ilab[not.na,,drop=FALSE] # if NULL, remains NULL pch <- pch[not.na] psize <- psize[not.na] # if NULL, remains NULL col <- col[not.na] rows.new <- rows # rearrange rows due to NAs being omitted from plot rows.na <- rows[!not.na] # shift higher rows down according to number of NAs omitted for (j in seq_len(length(rows.na))) { rows.new[rows >= rows.na[j]] <- rows.new[rows >= rows.na[j]] - 1 } rows <- rows.new[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) } # note: yi/vi may be NA if na.act == "na.exclude" or "na.pass" k <- length(yi) # in case length of k has changed ### if requested, apply transformation to yi's and CI bounds if (is.function(transf)) { if (is.null(targs)) { yi <- sapply(yi, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { yi <- sapply(yi, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) yi[yi < olim[1]] <- olim[1] yi[yi > olim[2]] <- olim[2] ci.lb[ci.lb < olim[1]] <- olim[1] ci.ub[ci.ub > olim[2]] <- olim[2] } if (showweights) { # inverse variance weights after ordering/subsetting and weights <- 1/vi # omitting NAs (so these weights always add up to 100%) weights <- 100 * weights / sum(weights, na.rm=TRUE) } ### set default point sizes (if not specified by user) if (is.null(psize)) { if (any(vi <= 0, na.rm=TRUE)) { # in case any vi value is zero psize <- rep(1, k) } else { # default psize is proportional to inverse standard error (only vi's that are still in the subset are considered) if (length(plim) < 2L) # note: vi's that are NA are ignored (but vi's whose yi is NA are NOT ignored; an unlikely case in practice) stop(mstyle$stop("Argument 'plim' must be of length 2 or 3.")) wi <- 1/sqrt(vi) if (!is.na(plim[1]) && !is.na(plim[2])) { rng <- max(wi, na.rm=TRUE) - min(wi, na.rm=TRUE) if (rng <= .Machine$double.eps^0.5) { psize <- rep(1, k) } else { psize <- (wi - min(wi, na.rm=TRUE)) / rng psize <- (psize * (plim[2] - plim[1])) + plim[1] } } if (is.na(plim[1]) && !is.na(plim[2])) { psize <- wi / max(wi, na.rm=TRUE) * plim[2] if (length(plim) == 3L) psize[psize <= plim[3]] <- plim[3] } if (!is.na(plim[1]) && is.na(plim[2])) { psize <- wi / min(wi, na.rm=TRUE) * plim[1] if (length(plim) == 3L) psize[psize >= plim[3]] <- plim[3] } if (all(is.na(psize))) # if k=1, then psize is NA, so catch this (and maybe some other problems) psize <- rep(1, k) } } ######################################################################### ### total range of CI bounds rng <- max(ci.ub, na.rm=TRUE) - min(ci.lb, na.rm=TRUE) if (annotate) { if (showweights) { plot.multp.l <- 2.00 plot.multp.r <- 2.00 } else { plot.multp.l <- 1.20 plot.multp.r <- 1.20 } } else { plot.multp.l <- 1.20 plot.multp.r <- 0.40 } ### set plot limits if (missing(xlim)) { xlim <- c(min(ci.lb, na.rm=TRUE) - rng * plot.multp.l, max(ci.ub, na.rm=TRUE) + rng * plot.multp.r) xlim <- round(xlim, digits[[2]]) #xlim[1] <- xlim[1]*max(1, digits[[2]]/2) #xlim[2] <- xlim[2]*max(1, digits[[2]]/2) } ### set x-axis limits (at argument overrides alim argument) alim.spec <- TRUE if (missing(alim)) { if (is.null(at)) { alim <- range(pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1)) alim.spec <- FALSE } else { alim <- range(at) } } ### make sure the plot and x-axis limits are sorted alim <- sort(alim) xlim <- sort(xlim) ### plot limits must always encompass the yi values if (xlim[1] > min(yi, na.rm=TRUE)) { xlim[1] <- min(yi, na.rm=TRUE) } if (xlim[2] < max(yi, na.rm=TRUE)) { xlim[2] <- max(yi, na.rm=TRUE) } ### x-axis limits must always encompass the yi values (no longer required) #if (alim[1] > min(yi, na.rm=TRUE)) { alim[1] <- min(yi, na.rm=TRUE) } #if (alim[2] < max(yi, na.rm=TRUE)) { alim[2] <- max(yi, na.rm=TRUE) } ### plot limits must always encompass the x-axis limits if (alim[1] < xlim[1]) { xlim[1] <- alim[1] } if (alim[2] > xlim[2]) { xlim[2] <- alim[2] } ### allow adjustment of position of study labels and annotations via textpos argument if (is.null(ddd$textpos)) ddd$textpos <- c(xlim[1], xlim[2]) if (length(ddd$textpos) != 2L) stop(mstyle$stop("Argument 'textpos' must be of length 2.")) if (is.na(ddd$textpos[1])) ddd$textpos[1] <- xlim[1] if (is.na(ddd$textpos[2])) ddd$textpos[2] <- xlim[2] ### set y-axis limits if (missing(ylim)) { ylim <- c(0.5, max(rows, na.rm=TRUE)+top) } else { ylim <- sort(ylim) } ### generate x-axis positions if none are specified if (is.null(at)) { if (alim.spec) { at <- seq(from=alim[1], to=alim[2], length.out=steps) } else { at <- pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1) } } else { at[at < alim[1]] <- alim[1] # remove at values that are below or above the axis limits at[at > alim[2]] <- alim[2] at <- unique(at) } ### x-axis labels (apply transformation to axis labels if requested) at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } } else { at.lab <- formatC(at.lab, digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } ######################################################################### ### set/get fonts (1st for study labels, 2nd for annotations, 3rd for ilab) ### when passing a named vector, the names are for 'family' and the values are for 'font' if (missing(fonts)) { fonts <- rep(par("family"), 3) } else { if (length(fonts) == 1L) fonts <- rep(fonts, 3) if (length(fonts) == 2L) fonts <- c(fonts, fonts[1]) } if (is.null(names(fonts))) fonts <- structure(c(1L,1L,1L), names=fonts) par(family=names(fonts)[1], font=fonts[1]) ### adjust margins par.mar <- par("mar") par.mar.adj <- par.mar - c(0,3,1,1) par.mar.adj[par.mar.adj < 0] <- 0 par(mar = par.mar.adj) on.exit(par(mar = par.mar)) ### start plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab="", ylab="", yaxt="n", xaxt="n", xaxs="i", bty="n", col="black", ...) ### horizontal title line labline(h=ylim[2]-(top-1), lty=lty[2], col="black", ...) ### get coordinates of the plotting region par.usr <- par("usr") ### add reference line if (is.numeric(refline)) lsegments(refline, par.usr[3], refline, ylim[2]-(top-1), lty="dotted", col="black", ...) ### set cex, cex.lab, and cex.axis sizes as a function of the height of the figure height <- par.usr[4] - par.usr[3] if (is.null(cex)) { lheight <- strheight("O") cex.adj <- ifelse(k * lheight > height * 0.8, height/(1.25 * k * lheight), 1) } if (is.null(cex)) { cex <- par("cex") * cex.adj } else { if (is.null(cex.lab)) cex.lab <- cex if (is.null(cex.axis)) cex.axis <- cex } if (is.null(cex.lab)) cex.lab <- par("cex") * cex.adj if (is.null(cex.axis)) cex.axis <- par("cex") * cex.adj ### add x-axis laxis(side=1, at=at, labels=at.lab, cex.axis=cex.axis, col="black", ...) ### add x-axis label if (missing(xlab)) xlab <- .setlab(measure, transf.char, atransf.char, gentype=1) lmtext(xlab, side=1, at=min(at) + (max(at)-min(at))/2, line=par("mgp")[1]-0.5, cex=cex.lab, col="black", ...) ### add CI ends (either | or <> if outside of axis limits) for (i in seq_len(k)) { ### need to skip missings (if check below will otherwise throw an error) if (is.na(yi[i]) || is.na(ci.lb[i]) || is.na(ci.ub[i])) next ### if the lower bound is actually larger than upper x-axis limit, then everything is to the right and just draw a polygon pointing in that direction if (ci.lb[i] >= alim[2]) { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) next } ### if the upper bound is actually lower than lower x-axis limit, then everything is to the left and just draw a polygon pointing in that direction if (ci.ub[i] <= alim[1]) { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) next } lsegments(max(ci.lb[i], alim[1]), rows[i], min(ci.ub[i], alim[2]), rows[i], lty=lty[1], col=col[i], ...) if (ci.lb[i] >= alim[1]) { lsegments(ci.lb[i], rows[i]-(height/150)*cex*efac[1], ci.lb[i], rows[i]+(height/150)*cex*efac[1], col=col[i], ...) } else { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) } if (ci.ub[i] <= alim[2]) { lsegments(ci.ub[i], rows[i]-(height/150)*cex*efac[1], ci.ub[i], rows[i]+(height/150)*cex*efac[1], col=col[i], ...) } else { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) } } ### add study labels on the left ltext(ddd$textpos[1], rows, slab, pos=4, cex=cex, col=col, ...) ### add info labels if (!is.null(ilab)) { if (is.null(ilab.xpos)) stop(mstyle$stop("Must specify 'ilab.xpos' argument when adding information with 'ilab'.")) if (length(ilab.xpos) != ncol(ilab)) stop(mstyle$stop(paste0("Number of 'ilab' columns (", ncol(ilab), ") does not match length of 'ilab.xpos' argument (", length(ilab.xpos), ")."))) if (!is.null(ilab.pos) && length(ilab.pos) == 1L) ilab.pos <- rep(ilab.pos, ncol(ilab)) par(family=names(fonts)[3], font=fonts[3]) for (l in seq_len(ncol(ilab))) { ltext(ilab.xpos[l], rows, ilab[,l], pos=ilab.pos[l], cex=cex, ...) } par(family=names(fonts)[1], font=fonts[1]) } ### add study annotations on the right: yi [LB, UB] if (annotate) { if (is.function(atransf)) { if (is.null(targs)) { annotext <- cbind(sapply(yi, atransf), sapply(ci.lb, atransf), sapply(ci.ub, atransf)) } else { annotext <- cbind(sapply(yi, atransf, targs), sapply(ci.lb, atransf, targs), sapply(ci.ub, atransf, targs)) } ### make sure order of intervals is always increasing tmp <- .psort(annotext[,2:3]) annotext[,2:3] <- tmp } else { annotext <- cbind(yi, ci.lb, ci.ub) } if (showweights) annotext <- cbind(weights, annotext) annotext <- .fcf(annotext, digits[[1]]) annotext <- sub("-", annosym[4], annotext, fixed=TRUE) if (missing(width)) { width <- apply(annotext, 2, function(x) max(nchar(x))) } else { if (length(width) == 1L) width <- rep(width, ncol(annotext)) } for (j in seq_len(ncol(annotext))) { annotext[,j] <- formatC(annotext[,j], width=width[j]) } if (showweights) { annotext <- cbind(annotext[,1], "% ", annotext[,2], annosym[1], annotext[,3], annosym[2], annotext[,4], annosym[3]) } else { annotext <- cbind(annotext[,1], annosym[1], annotext[,2], annosym[2], annotext[,3], annosym[3]) } annotext <- apply(annotext, 1, paste, collapse="") annotext[grepl("NA", annotext, fixed=TRUE)] <- "" par(family=names(fonts)[2], font=fonts[2]) ltext(ddd$textpos[2], rows, labels=annotext, pos=2, cex=cex, col=col, ...) par(family=names(fonts)[1], font=fonts[1]) } ### add yi points for (i in seq_len(k)) { ### need to skip missings (if check below will otherwise throw an error) if (is.na(yi[i])) next if (yi[i] >= alim[1] && yi[i] <= alim[2]) lpoints(x=yi[i], y=rows[i], pch=pch[i], cex=cex*psize[i], col=col[i], ...) } #lpoints(x=yi, y=rows, pch=pch, cex=cex*psize, ...) ### add header ltext(ddd$textpos[1], ylim[2]-(top-1)+1, header.left, pos=4, font=2, cex=cex, ...) ltext(ddd$textpos[2], ylim[2]-(top-1)+1, header.right, pos=2, font=2, cex=cex, ...) ######################################################################### ### return some information about plot invisibly res <- list(xlim=par("usr")[1:2], alim=alim, at=at, ylim=ylim, rows=rows, cex=cex, cex.lab=cex.lab, cex.axis=cex.axis) invisible(res) } metafor/R/blup.r0000644000176200001440000000005613672736517013257 0ustar liggesusersblup <- function(x, ...) UseMethod("blup") metafor/R/robust.r0000644000176200001440000000007313457322061013614 0ustar liggesusersrobust <- function(x, cluster, ...) UseMethod("robust") metafor/R/tes.rma.r0000644000176200001440000000245313770377613013666 0ustar liggesuserstes.rma <- function(x, H0=0, alternative="two.sided", alpha=.05, test, tes.alternative="greater", progbar=TRUE, tes.alpha=.10, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("rma.glmm", "rma.mv", "robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) ### set defaults for digits if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(test)) test <- NULL ######################################################################### if (x$int.only) { theta <- c(x$beta) } else { options(na.action="na.omit") theta <- fitted(x) options(na.action = na.act) } tes.default(c(x$yi), vi=x$vi, H0=H0, alternative=alternative, alpha=alpha, theta=theta, tau2=x$tau2, test=test, tes.alternative=tes.alternative, progbar=progbar, tes.alpha=tes.alpha, digits=digits, ...) } metafor/R/influence.rma.uni.r0000644000176200001440000002203313770364270015624 0ustar liggesusersinfluence.rma.uni <- function(model, digits, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.uni", notav=c("rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) ddd <- list(...) .chkdots(ddd, c("btt", "measure", "time")) btt <- .set.btt(ddd$btt, x$p, int.incl=FALSE, Xnames=colnames(x$X)) m <- length(btt) if (is.null(ddd$measure)) { measure <- "all" } else { measure <- ddd$measure } if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (!measure == "cooks.distance" && inherits(model, "robust.rma")) stop(mstyle$stop("Method not available for objects of class \"robust.rma\".")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### tau2.del <- rep(NA_real_, x$k) delpred <- rep(NA_real_, x$k) vdelpred <- rep(NA_real_, x$k) s2w.del <- rep(NA_real_, x$k) QE.del <- rep(NA_real_, x$k) dffits <- rep(NA_real_, x$k) dfbs <- matrix(NA_real_, nrow=x$k, ncol=x$p) cook.d <- rep(NA_real_, x$k) cov.r <- rep(NA_real_, x$k) weight <- rep(NA_real_, x$k) ### predicted values under the full model pred.full <- x$X %*% x$beta ### calculate inverse of variance-covariance matrix under the full model (needed for the Cook's distances) svb <- chol2inv(chol(x$vb[btt,btt,drop=FALSE])) ### also need stXAX/stXX and H matrix for DFFITS calculation when not using the standard weights if (x$weighted) { if (!is.null(x$weights)) { A <- diag(x$weights, nrow=x$k, ncol=x$k) stXAX <- .invcalc(X=x$X, W=A, k=x$k) H <- x$X %*% stXAX %*% t(x$X) %*% A } } else { stXX <- .invcalc(X=x$X, W=diag(x$k), k=x$k) H <- x$X %*% stXX %*% t(x$X) } ### hat values options(na.action = "na.omit") hat <- hatvalues(x) options(na.action = na.act) ### note: skipping NA cases ### also: it is possible that model fitting fails, so that generates more NAs (these NAs will always be shown in output) if (progbar) pbar <- pbapply::startpb(min=0, max=x$k) for (i in seq_len(x$k)) { if (progbar) pbapply::setpb(pbar, i) res <- try(suppressWarnings(rma.uni(x$yi, x$vi, weights=x$weights, mods=x$X, intercept=FALSE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, subset=-i, skipr2=TRUE)), silent=TRUE) if (inherits(res, "try-error")) next ### removing an observation could lead to a model coefficient becoming inestimable if (any(res$coef.na)) next ### save tau2.del and QE.del values tau2.del[i] <- res$tau2 QE.del[i] <- res$QE ### 'deleted' predicted value for the ith observation based on the model without the ith observation included Xi <- matrix(x$X[i,], nrow=1) delpred[i] <- Xi %*% res$beta vdelpred[i] <- Xi %*% tcrossprod(res$vb,Xi) s2w.del[i] <- res$s2w ### compute DFFITS if (x$weighted) { if (is.null(x$weights)) { dffits[i] <- (pred.full[i] - delpred[i]) / sqrt(res$s2w * hat[i] * (tau2.del[i] + x$vi[i])) } else { dffits[i] <- (pred.full[i] - delpred[i]) / sqrt(res$s2w * diag(H %*% diag(tau2.del[i] + x$vi, nrow=x$k, ncol=x$k) %*% t(H)))[i] } } else { dffits[i] <- (pred.full[i] - delpred[i]) / sqrt(res$s2w * diag(H %*% diag(tau2.del[i] + x$vi, nrow=x$k, ncol=x$k) %*% t(H)))[i] } #dffits[i] <- (pred.full[i] - delpred[i]) / sqrt(vdelpred[i]) ### compute var-cov matrix of the fixed effects for the full model, but with tau2.del[i] plugged in if (x$weighted) { if (is.null(x$weights)) { vb.del <- .invcalc(X=x$X, W=diag(1/(x$vi+tau2.del[i]), nrow=x$k, ncol=x$k), k=x$k) } else { vb.del <- tcrossprod(stXAX,x$X) %*% A %*% diag(x$vi+tau2.del[i], nrow=x$k, ncol=x$k) %*% A %*% x$X %*% stXAX } } else { vb.del <- tcrossprod(stXX,x$X) %*% diag(x$vi+tau2.del[i], nrow=x$k, ncol=x$k) %*% x$X %*% stXX } ### compute DFBETA and DFBETAS dfb <- x$beta - res$beta dfbs[i,] <- dfb / sqrt(res$s2w * diag(vb.del)) #dfbs[i,] <- dfb / sqrt(diag(res$vb)) ### compute DFBETA (including coefficients as specified via btt) dfb <- x$beta[btt] - res$beta[btt] ### compute Cook's distance cook.d[i] <- crossprod(dfb,svb) %*% dfb # / x$p #cook.d[i] <- sum(1/(x$vi+tau2.del[i]) * (pred.full - x$X %*% res$beta)^2) / x$p ### compute covariance ratio cov.r[i] <- det(res$vb[btt,btt,drop=FALSE]) / det(x$vb[btt,btt,drop=FALSE]) } if (progbar) pbapply::closepb(pbar) ### calculate studentized residual resid <- x$yi - delpred resid[abs(resid) < 100 * .Machine$double.eps] <- 0 #resid[abs(resid) < 100 * .Machine$double.eps * median(abs(resid), na.rm=TRUE)] <- 0 ### see lm.influence #seresid <- sqrt(x$vi + vdelpred + tau2.del) seresid <- sqrt(x$vi * s2w.del + vdelpred + tau2.del * s2w.del) ### this yields the same results as a mean shift outlier model when using test="knha" stresid <- resid / seresid ### extract weights options(na.action="na.omit") weight <- weights(x) options(na.action = na.act) ######################################################################### inf <- matrix(NA_real_, nrow=x$k.f, ncol=8) inf[x$not.na,] <- cbind(stresid, dffits, cook.d, cov.r, tau2.del, QE.del, hat, weight) colnames(inf) <- c("rstudent", "dffits", "cook.d", "cov.r", "tau2.del", "QE.del", "hat", "weight") inf <- data.frame(inf) tmp <- dfbs dfbs <- matrix(NA_real_, nrow=x$k.f, ncol=x$p) dfbs[x$not.na,] <- tmp colnames(dfbs) <- rownames(x$beta) dfbs <- data.frame(dfbs) ######################################################################### ### determine "influential" cases is.infl <- #abs(inf$rstudent) > qnorm(.975) | abs(inf$dffits) > 3*sqrt(x$p/(x$k-x$p)) | pchisq(inf$cook.d, df=m) > .50 | #inf$cov.r > 1 + 3*m/(x$k-m) | #inf$cov.r < 1 - 3*m/(x$k-m) | inf$hat > 3*x$p/x$k | apply(abs(dfbs) > 1, 1, any) ### consider using rowAnys() from matrixStats package #print(is.infl) ######################################################################### if (na.act == "na.omit") { out <- list(rstudent=inf$rstudent[x$not.na], dffits=inf$dffits[x$not.na], cook.d=inf$cook.d[x$not.na], cov.r=inf$cov.r[x$not.na], tau2.del=inf$tau2.del[x$not.na], QE.del=inf$QE.del[x$not.na], hat=inf$hat[x$not.na], weight=inf$weight[x$not.na], inf=ifelse(is.infl & !is.na(is.infl), "*", "")[x$not.na], slab=x$slab[x$not.na], digits=digits) out <- list(inf=out) out$dfbs <- lapply(dfbs, function(z) z[x$not.na]) out$dfbs <- c(out$dfbs, list(slab=x$slab[x$not.na], digits=digits)) out <- c(out, list(ids=x$ids[x$not.na], not.na=x$not.na[x$not.na], is.infl=is.infl[x$not.na])) } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(rstudent=inf$rstudent, dffits=inf$dffits, cook.d=inf$cook.d, cov.r=inf$cov.r, tau2.del=inf$tau2.del, QE.del=inf$QE.del, hat=inf$hat, weight=inf$weight, inf=ifelse(is.infl & !is.na(is.infl), "*", ""), slab=x$slab, digits=digits) out <- list(inf=out) out$dfbs <- lapply(dfbs, function(z) z) out$dfbs <- c(out$dfbs, list(slab=x$slab, digits=digits)) out <- c(out, list(ids=x$ids, not.na=x$not.na, is.infl=is.infl)) } out <- c(out, list(tau2=x$tau2, QE=x$QE, k=x$k, p=x$p, m=m, digits=digits)) if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) class(out$inf) <- c("list.rma") class(out$dfbs) <- c("list.rma") class(out) <- c("infl.rma.uni") if (measure == "cooks.distance") { names(out$inf$cook.d) <- out$inf$slab out <- out$inf$cook.d } if (measure == "dfbetas") out <- out$dfbs if (measure == "rstudent") { if (na.act == "na.omit") { resid.f <- resid seresid.f <- seresid stresid.f <- stresid } if (na.act == "na.exclude" || na.act == "na.pass") { resid.f <- rep(NA_real_, x$k.f) seresid.f <- rep(NA_real_, x$k.f) stresid.f <- rep(NA_real_, x$k.f) resid.f[x$not.na] <- resid seresid.f[x$not.na] <- seresid stresid.f[x$not.na] <- stresid } out <- list(resid=resid.f, se=seresid.f, z=stresid.f, slab=out$inf$slab, digits=digits) class(out) <- c("list.rma") } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } return(out) } metafor/R/regtest.r0000644000176200001440000000006413457322061013753 0ustar liggesusersregtest <- function(x, ...) UseMethod("regtest") metafor/R/rstandard.rma.mv.r0000644000176200001440000001174214027645371015472 0ustar liggesusersrstandard.rma.mv <- function(model, digits, cluster, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mv", notav="robust.rma") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } misscluster <- ifelse(missing(cluster), TRUE, FALSE) if (misscluster) cluster <- seq_len(x$k.all) ######################################################################### ### process cluster variable # note: cluster variable is assumed to be of the same length as the size of # the original dataset passed to the model fitting function and so we apply # the same subsetting and removing of missings (if necessary) as was done # during model fitting if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster.f <- cluster cluster <- cluster[x$not.na] ### checks on cluster variable if (anyNA(cluster.f)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster.f) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) if (length(cluster) != x$k) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not match length of data (", x$k, ")."))) ######################################################################### options(na.action="na.omit") H <- hatvalues(x, type="matrix") options(na.action = na.act) ######################################################################### ImH <- diag(x$k) - H #ei <- ImH %*% cbind(x$yi) ei <- c(x$yi - x$X %*% x$beta) ei[abs(ei) < 100 * .Machine$double.eps] <- 0 #ei[abs(ei) < 100 * .Machine$double.eps * median(abs(ei), na.rm=TRUE)] <- 0 ### see lm.influence ### don't allow this; the SEs of the residuals cannot be estimated consistently for "robust.rma" objects #if (inherits(x, "robust.rma")) { # ve <- ImH %*% tcrossprod(x$meat,ImH) #} else { # ve <- ImH %*% tcrossprod(x$M,ImH) #} ve <- ImH %*% tcrossprod(x$M,ImH) #ve <- x$M + x$X %*% x$vb %*% t(x$X) - 2*H%*%x$M sei <- sqrt(diag(ve)) ######################################################################### if (!misscluster) { ### cluster ids and number of clusters ids <- unique(cluster) n <- length(ids) X2 <- rep(NA_real_, n) k.id <- rep(NA_integer_, n) for (i in seq_len(n)) { incl <- cluster %in% ids[i] k.id[i] <- sum(incl) vei <- as.matrix(ve[incl,incl,drop=FALSE]) if (any(eigen(vei, symmetric=TRUE, only.values=TRUE)$values <= .Machine$double.eps)) next sve <- try(chol2inv(chol(vei)), silent=TRUE) #sve <- try(solve(ve[incl,incl,drop=FALSE]), silent=TRUE) if (inherits(sve, "try-error")) next X2[i] <- rbind(ei[incl]) %*% sve %*% cbind(ei[incl]) } } ######################################################################### resid <- rep(NA_real_, x$k.f) seresid <- rep(NA_real_, x$k.f) stresid <- rep(NA_real_, x$k.f) resid[x$not.na] <- ei seresid[x$not.na] <- sei stresid[x$not.na] <- ei / sei ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na], se=seresid[x$not.na], z=stresid[x$not.na]) if (!misscluster) out$cluster <- cluster.f[x$not.na] out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) if (!misscluster) out$cluster <- cluster.f out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) if (misscluster) { out$digits <- digits class(out) <- "list.rma" return(out) } else { out <- list(out) if (na.act == "na.omit") { out[[2]] <- list(X2=X2[order(ids)], k=k.id[order(ids)], slab=ids[order(ids)]) } if (na.act == "na.exclude" || na.act == "na.pass") { ids.f <- unique(cluster.f) X2.f <- rep(NA_real_, length(ids.f)) X2.f[match(ids, ids.f)] <- X2 k.id.f <- sapply(ids.f, function(id) sum((id == cluster.f) & x$not.na)) out[[2]] <- list(X2=X2.f[order(ids.f)], k=k.id.f[order(ids.f)], slab=ids.f[order(ids.f)]) } out[[1]]$digits <- digits out[[2]]$digits <- digits names(out) <- c("obs", "cluster") class(out[[1]]) <- "list.rma" class(out[[2]]) <- "list.rma" attr(out[[1]], ".rmspace") <- TRUE attr(out[[2]], ".rmspace") <- TRUE return(out) } } metafor/R/qqnorm.rma.mh.r0000644000176200001440000000435513770400456015006 0ustar liggesusersqqnorm.rma.mh <- function(y, type="rstandard", pch=19, label=FALSE, offset=0.3, pos=13, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(y), must="rma.mh") x <- y type <- match.arg(type, c("rstandard", "rstudent")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (length(label) != 1L) stop(mstyle$stop("Argument 'label' should be of length 1.")) ######################################################################### if (type == "rstandard") { res <- rstandard(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } else { res <- rstudent(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } sav <- qqnorm(zi, pch=pch, bty="l", ...) abline(a=0, b=1, lty="solid", ...) #qqline(zi, ...) #abline(h=0, lty="dotted", ...) #abline(v=0, lty="dotted", ...) ######################################################################### ### labeling of points if ((is.character(label) && label=="none") || .isFALSE(label)) return(invisible(sav)) if ((is.character(label) && label=="all") || .isTRUE(label)) label <- x$k if (is.numeric(label)) { label <- round(label) if (label < 1 | label > x$k) stop(mstyle$stop("Out of range value for 'label' argument.")) pos.x <- sav$x[ord] pos.y <- sav$y[ord] dev <- abs(pos.x - pos.y) for (i in seq_len(x$k)) { if (sum(dev > dev[i]) < label) { if (pos <= 4) text(pos.x[i], pos.y[i], slab[i], pos=pos, offset=offset, ...) if (pos == 13) text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] >= 0, 1, 3), offset=offset, ...) if (pos == 24) text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] <= 0, 2, 4), offset=offset, ...) #text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i] >= 0, 2, 4), offset=offset, ...) } } } ######################################################################### invisible(sav) } metafor/R/misc.func.hidden.selmodel.r0000644000176200001440000001677413744066110017236 0ustar liggesusers############################################################################ .selmodel.pval <- function(yi, vi, alternative) { zi <- yi / sqrt(vi) if (alternative == "two.sided") { pval <- 2 * pnorm(abs(zi), lower.tail=FALSE) } else { pval <- pnorm(zi, lower.tail = alternative == "less") } return(pval) } .selmodel.verbose <- function(ll, beta, tau2, delta, mstyle, digits) { cat(mstyle$verbose(paste0("ll = ", ifelse(is.na(ll), NA, formatC(ll, digits=digits[["fit"]], format="f", flag=" ")), " "))) cat(mstyle$verbose(paste0("beta =", paste(ifelse(is.na(beta), NA, formatC(beta, digits=digits[["est"]], format="f", flag=" ")), collapse=" "), " "))) cat(mstyle$verbose(paste0("tau2 =", ifelse(is.na(tau2), NA, formatC(tau2, digits=digits[["var"]], format="f", flag=" ")), " "))) cat(mstyle$verbose(paste0("delta =", paste(ifelse(is.na(delta), NA, formatC(delta, digits=digits[["est"]], format="f", flag=" ")), collapse=" ")))) cat("\n") } .mapfun <- function(x, lb, ub, fun=NA) { if (is.na(fun)) { lb + (ub-lb) / (1 + exp(-x)) } else { x <- sapply(x, fun) pmin(pmax(x, lb), ub) } } .mapinvfun <- function(x, lb, ub, fun=NA) { if (is.na(fun)) { log((x-lb)/(ub-x)) } else { sapply(x, fun) } } ############################################################################ .selmodel.int <- function(yvals, yi, vi, preci, yhat, wi.fun, delta, tau2, alternative, pval.min, steps) { pval <- .selmodel.pval(yvals, vi, alternative) pval[pval < pval.min] <- pval.min pval[pval > (1-pval.min)] <- 1-pval.min wi.fun(pval, delta, yi, vi, preci, alternative, steps) * dnorm(yvals, yhat, sqrt(vi+tau2)) } .selmodel.ll.cont <- function(par, yi, vi, X.fit, preci, k, pX, pvals, deltas, delta.val, delta.transf, mapfun, delta.min, delta.max, tau2.val, tau2.transf, tau2.max, beta.val, wi.fun, steps, pgrp, alternative, pval.min, intCtrl, verbose, digits, dofit=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) beta <- par[1:pX] tau2 <- par[pX+1] delta <- par[(pX+2):(pX+1+deltas)] beta <- ifelse(is.na(beta.val), beta, beta.val) if (tau2.transf) tau2 <- exp(tau2) tau2[!is.na(tau2.val)] <- tau2.val tau2[tau2 < .Machine$double.eps*10] <- 0 tau2[tau2 > tau2.max] <- tau2.max if (delta.transf) delta <- mapply(.mapfun, delta, delta.min, delta.max, mapfun) delta <- ifelse(is.na(delta.val), delta, delta.val) yhat <- c(X.fit %*% beta) Ai <- rep(NA_real_, k) for (i in 1:k) { tmp <- try(integrate(.selmodel.int, lower=intCtrl$lower, upper=intCtrl$upper, yi=yi[i], vi=vi[i], preci=preci[i], yhat=yhat[i], wi.fun=wi.fun, delta=delta, tau2=tau2, alternative=alternative, pval.min=pval.min, steps=steps, subdivisions=intCtrl$subdivisions, rel.tol=intCtrl$rel.tol)$value, silent=TRUE) if (inherits(tmp, "try-error")) stop(mstyle$stop(paste0("Could not integrate over density in study ", i, ".")), call.=FALSE) Ai[i] <- tmp } ll <- sum(log(wi.fun(pvals, delta, yi, vi, preci, alternative, steps)) + dnorm(yi, yhat, sqrt(vi+tau2), log=TRUE) - log(Ai)) if (dofit) { res <- list(ll=ll, beta=beta, tau2=tau2, delta=delta) return(res) } if (verbose) .selmodel.verbose(ll=ll, beta=beta, tau2=tau2, delta=delta, mstyle=mstyle, digits=digits) if (verbose > 2) { xs <- seq(pval.min, 1-pval.min, length=101) ys <- wi.fun(xs, delta, yi, vi, preci=1, alternative, steps) plot(xs, ys, type="l", lwd=2, xlab="p-value", ylab="Relative Likelihood of Selection") } return(-1*ll) } ############################################################################ .selmodel.ll.stepfun <- function(par, yi, vi, X.fit, preci, k, pX, pvals, deltas, delta.val, delta.transf, mapfun, delta.min, delta.max, tau2.val, tau2.transf, tau2.max, beta.val, wi.fun, steps, pgrp, alternative, pval.min, intCtrl, verbose, digits, dofit=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) beta <- par[1:pX] tau2 <- par[pX+1] delta <- par[(pX+2):(pX+1+deltas)] beta <- ifelse(is.na(beta.val), beta, beta.val) if (tau2.transf) tau2 <- exp(tau2) tau2[!is.na(tau2.val)] <- tau2.val tau2[tau2 < .Machine$double.eps*10] <- 0 tau2[tau2 > tau2.max] <- tau2.max if (delta.transf) delta <- mapply(.mapfun, delta, delta.min, delta.max, mapfun) delta <- ifelse(is.na(delta.val), delta, delta.val) yhat <- c(X.fit %*% beta) N <- length(steps) Ai <- rep(NA_real_, k) if (alternative == "greater") { for (i in 1:k) { sei <- sqrt(vi[i]+tau2) Ai[i] <- pnorm(qnorm(steps[1], 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE) for (j in 2:N) { if (j < N) { Ai[i] <- Ai[i] + delta[j] / preci[i] * (pnorm(qnorm(steps[j], 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE) - pnorm(qnorm(steps[j-1], 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE)) } else { Ai[i] <- Ai[i] + delta[j] / preci[i] * pnorm(qnorm(steps[j-1], 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=TRUE) } } } } if (alternative == "less") { for (i in 1:k) { sei <- sqrt(vi[i]+tau2) Ai[i] <- pnorm(qnorm(steps[1], 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE) for (j in 2:N) { if (j < N) { Ai[i] <- Ai[i] + delta[j] / preci[i] * (pnorm(qnorm(steps[j], 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE) - pnorm(qnorm(steps[j-1], 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE)) } else { Ai[i] <- Ai[i] + delta[j] / preci[i] * pnorm(qnorm(steps[j-1], 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=FALSE) } } } } if (alternative == "two.sided") { for (i in 1:k) { sei <- sqrt(vi[i]+tau2) Ai[i] <- pnorm(qnorm(steps[1]/2, 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE) + pnorm(qnorm(steps[1]/2, 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE) for (j in 2:N) { if (j < N) { Ai[i] <- Ai[i] + delta[j] / preci[i] * ((pnorm(qnorm(steps[j]/2, 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE) - pnorm(qnorm(steps[j-1]/2, 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=FALSE)) + (pnorm(qnorm(steps[j]/2, 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE) - pnorm(qnorm(steps[j-1]/2, 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE))) } else { Ai[i] <- Ai[i] + delta[j] / preci[i] * (pnorm(qnorm(steps[j-1]/2, 0, sqrt(vi[i]), lower.tail=FALSE), yhat[i], sei, lower.tail=TRUE) - pnorm(qnorm(steps[j-1]/2, 0, sqrt(vi[i]), lower.tail=TRUE), yhat[i], sei, lower.tail=TRUE)) } } } } ll <- sum(log(delta[pgrp] / preci) + dnorm(yi, yhat, sqrt(vi+tau2), log=TRUE) - log(Ai)) if (dofit) { res <- list(ll=ll, beta=beta, tau2=tau2, delta=delta) return(res) } if (verbose) .selmodel.verbose(ll=ll, beta=beta, tau2=tau2, delta=delta, mstyle=mstyle, digits=digits) if (verbose > 2) { xs <- seq(0, 1, length=101) ys <- wi.fun(xs, delta, yi, vi, preci=1, alternative, steps) plot(xs, ys, type="l", lwd=2, xlab="p-value", ylab="Relative Likelihood of Selection") } return(-1*ll) } ############################################################################ metafor/R/hatvalues.rma.mv.r0000644000176200001440000000345313770364101015475 0ustar liggesusershatvalues.rma.mv <- function(model, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mv") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix")) ######################################################################### x <- model if (is.null(x$W)) { W <- chol2inv(chol(x$M)) stXWX <- chol2inv(chol(as.matrix(t(x$X) %*% W %*% x$X))) H <- as.matrix(x$X %*% stXWX %*% crossprod(x$X,W)) #H <- as.matrix(x$X %*% x$vb %*% crossprod(x$X,W)) ### x$vb may have been changed through robust() } else { A <- x$W stXAX <- chol2inv(chol(as.matrix(t(x$X) %*% A %*% x$X))) H <- as.matrix(x$X %*% stXAX %*% crossprod(x$X,A)) } ######################################################################### if (type == "diagonal") { hii <- rep(NA_real_, x$k.f) hii[x$not.na] <- as.vector(diag(H)) hii[hii > 1 - 10 * .Machine$double.eps] <- 1 ### as in lm.influence() names(hii) <- x$slab if (na.act == "na.omit") hii <- hii[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(hii) } if (type == "matrix") { Hfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Hfull[x$not.na, x$not.na] <- H rownames(Hfull) <- x$slab colnames(Hfull) <- x$slab if (na.act == "na.omit") Hfull <- Hfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Hfull) } } metafor/R/plot.infl.rma.uni.r0000644000176200001440000004472114054725350015565 0ustar liggesusersplot.infl.rma.uni <- function(x, plotinf=TRUE, plotdfbs=FALSE, dfbsnew=FALSE, logcov=TRUE, layout, slab.style=1, las=0, pch=21, bg="black", bg.infl="red", col.na="lightgray", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="infl.rma.uni") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) ######################################################################### ### check for NAs and stop if there are any when na.act == "na.fail" any.na <- is.na(as.data.frame(x$inf)) if (any(any.na) && na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) ######################################################################### ### process plotinf argument if (is.logical(plotinf)) { if (plotinf) { which.inf <- seq_len(8) } } else { which.inf <- plotinf which.inf <- which.inf[(which.inf >= 1) & (which.inf <= 8)] which.inf <- unique(round(which.inf)) if (length(which.inf) == 0L) stop(mstyle$stop("Incorrect specification of 'plotinf' argument.")) plotinf <- TRUE } ### process plotdfbs argument if (is.logical(plotdfbs)) { if (plotdfbs) { which.dfbs <- seq_len(x$p) } } else { which.dfbs <- plotdfbs which.dfbs <- which.dfbs[(which.dfbs >= 1) & (which.dfbs <= x$p)] which.dfbs <- unique(round(which.dfbs)) if (length(which.dfbs) == 0L) stop(mstyle$stop("Incorrect specification of 'plotdfbs' argument.")) plotdfbs <- TRUE } ######################################################################### if (!plotinf & !plotdfbs) stop(mstyle$stop("At least one of the arguments 'plotinf' or 'plotdfbs' argument must be TRUE.")) if (!plotinf & dfbsnew) dfbsnew <- FALSE par.mar <- par("mar") par.mar.adj <- par.mar - c(2,2,2,1) par.mar.adj[par.mar.adj < 1] <- 1 par(mar = par.mar.adj) on.exit(par(mar = par.mar)) ######################################################################### ### filter out potential arguments to abbreviate() (which cause problems with the various plot functions) lplot <- function(..., minlength, strict) plot(...) lpoints <- function(..., minlength, strict) points(...) llines <- function(..., minlength, strict) lines(...) laxis <- function(..., minlength, strict) axis(...) labline <- function(..., minlength, strict) abline(...) ######################################################################### ids <- switch(slab.style, "1" = x$ids, "2" = x$inf$slab, "3" = abbreviate(x$inf$slab, ...)) #print(ids) ######################################################################### ### plot inf values if requested if (plotinf) { ### set layout (either defaults or user-specified) ### note: could also use n2mfrow() here, but this behaves slightly differently par.mfrow <- par("mfrow") on.exit(par(mfrow = par.mfrow), add=TRUE) if (missing(layout)) { if (length(which.inf) == 2L) par(mfrow=c(2,1)) if (length(which.inf) == 3L) par(mfrow=c(3,1)) if (length(which.inf) == 4L) par(mfrow=c(2,2)) if (length(which.inf) == 5L) par(mfrow=c(5,1)) if (length(which.inf) == 6L) par(mfrow=c(3,2)) if (length(which.inf) == 7L) par(mfrow=c(7,1)) if (length(which.inf) == 8L) par(mfrow=c(4,2)) } else { layout <- layout[layout >= 1] layout <- round(layout) if (length(layout) != 2L) stop(mstyle$stop("Incorrect specification of 'layout' argument.")) par(mfrow=layout) } ###################################################################### for (i in seq_len(length(which.inf))) { if (which.inf[i] == 1) { zi <- x$inf$rstudent not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,-2,na.rm=TRUE) zi.max <- max(zi, 2,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="rstudent", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=0, lty="dashed", ...) labline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) #if (num.infl) # text(seq_len(len.ids)[ids.infl], zi[ids.infl], seq_len(len.ids)[ids.infl], pos=ifelse(zi[ids.infl] > 0, 3, 1), ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="rstudent", xlab="", ylab="", ...) } } if (which.inf[i] == 2) { zi <- x$inf$dffits not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,na.rm=TRUE) zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="dffits", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h= 0, lty="dashed", ...) labline(h= 3*sqrt(x$p/(x$k-x$p)), lty="dotted", ...) labline(h=-3*sqrt(x$p/(x$k-x$p)), lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) #if (num.infl) # text(seq_len(len.ids)[ids.infl], zi[ids.infl], seq_len(len.ids)[ids.infl], pos=ifelse(zi[ids.infl] > 0, 3, 1), ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="dffits", xlab="", ylab="", ...) } } if (which.inf[i] == 3) { zi <- x$inf$cook.d not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,na.rm=TRUE) zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="cook.d", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=qchisq(0.5, df=x$m), lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) #if (num.infl) # text(seq_len(len.ids)[ids.infl], zi[ids.infl], seq_len(len.ids)[ids.infl], pos=3, ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="cook.d", xlab="", ylab="", ...) } } if (which.inf[i] == 4) { zi <- x$inf$cov.r not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,na.rm=TRUE) zi.max <- max(zi,na.rm=TRUE) if (logcov) { lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="cov.r", xlab="", ylab="", las=las, log="y", ...) } else { lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="cov.r", xlab="", ylab="", las=las, ...) } laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=1, lty="dashed", ...) #labline(h=1+3*x$m/(x$k-x$m), lty="dotted", ...) #labline(h=1-3*x$m/(x$k-x$m), lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) #if (num.infl) # text(seq_len(len.ids)[ids.infl], zi[ids.infl], seq_len(len.ids)[ids.infl], pos=ifelse(zi[ids.infl] > 1, 3, 1), ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="cov.r", xlab="", ylab="", ...) } } if (which.inf[i] == 5) { zi <- x$inf$tau2.del not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,na.rm=TRUE) zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="tau2.del", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=x$tau2, lty="dashed", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="tau2.del", xlab="", ylab="", ...) } } if (which.inf[i] == 6) { zi <- x$inf$QE.del not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- min(zi,na.rm=TRUE) zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="QE.del", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=x$QE, lty="dashed", ...) #labline(h=qchisq(.95, df=x$k-x$p), lty="dotted") if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="QE.del", xlab="", ylab="", ...) } } if (which.inf[i] == 7) { zi <- x$inf$hat not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- 0 zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="hat", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=x$p/x$k, lty="dashed", ...) labline(h=3*x$p/x$k, lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="hat", xlab="", ylab="", ...) } } if (which.inf[i] == 8) { zi <- x$inf$weight not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } if (any(!is.na(zi))) { zi.min <- 0 zi.max <- max(zi,na.rm=TRUE) lplot(NA, NA, xlim=c(1,len.ids), ylim=c(zi.min,zi.max), xaxt="n", main="weight", xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h=100/x$k, lty="dashed", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) } else { lplot(NA, NA, xlim=c(0,1), ylim=c(0,1), xaxt="n", yaxt="n", main="weight", xlab="", ylab="", ...) } } } } ######################################################################### ### plot dfbs values if requested if (plotdfbs) { if (dfbsnew) { dev.new() par.mar <- par("mar") par.mar.adj <- par.mar - c(2,2,2,1) par.mar.adj[par.mar.adj < 1] <- 1 par(mar = par.mar.adj) on.exit(par(mar = par.mar), add=TRUE) } else { if (plotinf) { par.ask <- par("ask") par(ask=TRUE) on.exit(par(ask = par.ask), add=TRUE) } } par(mfrow=c(length(which.dfbs),1)) for (i in seq_len(length(which.dfbs))) { zi <- x$dfbs[[which.dfbs[i]]] not.na <- !is.na(zi) if (na.act == "na.omit") { zi <- zi[not.na] len.ids <- length(x$ids)-sum(!not.na) ids.infl <- x$is.infl[not.na] lab.ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { len.ids <- length(x$ids) ids.infl <- x$is.infl lab.ids <- ids } lplot(NA, NA, xlim=c(1,len.ids), ylim=range(zi,na.rm=TRUE), xaxt="n", main=paste("dfbs: ", names(x$dfbs)[which.dfbs[i]]), xlab="", ylab="", las=las, ...) laxis(side=1, at=seq_len(len.ids), labels=lab.ids, xlab="", las=las, ...) labline(h= 0, lty="dashed", ...) labline(h= 1, lty="dotted", ...) labline(h=-1, lty="dotted", ...) if (na.act == "na.exclude" || na.act == "na.pass") llines(seq_len(len.ids)[not.na], zi[not.na], col=col.na, ...) llines(seq_len(len.ids), zi, ...) lpoints(x=seq_len(len.ids), y=zi, bg=bg, pch=pch, ...) lpoints(x=seq_len(len.ids)[ids.infl], y=zi[ids.infl], bg=bg.infl, pch=pch, ...) } } ######################################################################### invisible() } metafor/R/qqnorm.rma.uni.r0000644000176200001440000001230513770376022015170 0ustar liggesusersqqnorm.rma.uni <- function(y, type="rstandard", pch=19, envelope=TRUE, level=y$level, bonferroni=FALSE, reps=1000, smooth=TRUE, bass=0, label=FALSE, offset=0.3, pos=13, lty, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(y), must="rma.uni", notav="rma.uni.selmodel") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) x <- y type <- match.arg(type, c("rstandard", "rstudent")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) draw.envelope <- envelope if (label == "out" & !envelope) { envelope <- TRUE draw.envelope <- FALSE } if (length(label) != 1L) stop(mstyle$stop("Argument 'label' should be of length 1.")) if (missing(lty)) { lty <- c("solid", "dotted") ### 1st value = diagonal line, 2nd value = pseudo confidence envelope } else { if (length(lty) == 1L) lty <- c(lty, lty) } ddd <- list(...) lqqnorm <- function(..., seed) qqnorm(...) labline <- function(..., seed) abline(...) llines <- function(..., seed) lines(...) ltext <- function(..., seed) text(...) ######################################################################### if (type == "rstandard") { res <- rstandard(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } else { res <- rstudent(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } sav <- lqqnorm(zi, pch=pch, bty="l", ...) labline(a=0, b=1, lty=lty[1], ...) #qqline(zi, ...) #abline(h=0, lty="dotted", ...) #abline(v=0, lty="dotted", ...) ######################################################################### ### construct simulation based pseudo confidence envelope if (envelope) { level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) if (!is.null(ddd$seed)) set.seed(ddd$seed) dat <- matrix(rnorm(x$k*reps), nrow=x$k, ncol=reps) options(na.action="na.omit") H <- hatvalues(x, type="matrix") options(na.action = na.act) ImH <- diag(x$k) - H ei <- ImH %*% dat ei <- apply(ei, 2, sort) if (bonferroni) { lb <- apply(ei, 1, quantile, (level/2)/x$k) ### consider using rowQuantiles() from matrixStats package ub <- apply(ei, 1, quantile, 1-(level/2)/x$k) ### consider using rowQuantiles() from matrixStats package } else { lb <- apply(ei, 1, quantile, (level/2)) ### consider using rowQuantiles() from matrixStats package ub <- apply(ei, 1, quantile, 1-(level/2)) ### consider using rowQuantiles() from matrixStats package } temp.lb <- qqnorm(lb, plot.it=FALSE) if (smooth) temp.lb <- supsmu(temp.lb$x, temp.lb$y, bass=bass) if (draw.envelope) llines(temp.lb$x, temp.lb$y, lty=lty[2], ...) #llines(temp.lb$x, temp.lb$y, lty="12", lwd=1.5, ...) temp.ub <- qqnorm(ub, plot.it=FALSE) if (smooth) temp.ub <- supsmu(temp.ub$x, temp.ub$y, bass=bass) if (draw.envelope) llines(temp.ub$x, temp.ub$y, lty=lty[2], ...) #llines(temp.ub$x, temp.ub$y, lty="12", lwd=1.5, , ...) } ######################################################################### ### labeling of points if ((is.character(label) && label=="none") || .isFALSE(label)) return(invisible(sav)) if ((is.character(label) && label=="all") || .isTRUE(label)) label <- x$k if (is.numeric(label)) { label <- round(label) if (label < 1 | label > x$k) stop(mstyle$stop("Out of range value for 'label' argument.")) pos.x <- sav$x[ord] pos.y <- sav$y[ord] dev <- abs(pos.x - pos.y) for (i in seq_len(x$k)) { if (sum(dev > dev[i]) < label) { if (pos <= 4) ltext(pos.x[i], pos.y[i], slab[i], pos=pos, offset=offset, ...) if (pos == 13) ltext(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] >= 0, 1, 3), offset=offset, ...) if (pos == 24) ltext(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] <= 0, 2, 4), offset=offset, ...) #ltext(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i] >= 0, 2, 4), offset=offset, ...) } } } else { pos.x <- sav$x[ord] pos.y <- sav$y[ord] for (i in seq_len(x$k)) { if (pos.y[i] < temp.lb$y[i] || pos.y[i] > temp.ub$y[i]) { if (pos <= 4) ltext(pos.x[i], pos.y[i], slab[i], pos=pos, offset=offset, ...) if (pos == 13) ltext(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] >= 0, 1, 3), offset=offset, ...) if (pos == 24) ltext(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] <= 0, 2, 4), offset=offset, ...) } } } ######################################################################### #if (envelope) { # invisible(list(pts=sav, ci.lb=temp.lb, ci.ub=temp.ub)) #} else { # invisible(sav) #} invisible(sav) } metafor/R/hc.r0000644000176200001440000000005713457322061012672 0ustar liggesusershc <- function(object, ...) UseMethod("hc") metafor/R/plot.cumul.rma.r0000644000176200001440000001544214054725073015170 0ustar liggesusersplot.cumul.rma <- function(x, yaxis, xlim, ylim, xlab, ylab, at, transf, atransf, targs, digits, cols=c("gray80","gray10"), grid=TRUE, pch=19, cex=1, lwd=2, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="cumul.rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(yaxis)) { if (is.null(x$tau2)) { yaxis <- "I2" } else { yaxis <- "tau2" } } else { yaxis <- match.arg(yaxis, c("tau2","I2","H2")) if (is.null(x$tau2)) stop(mstyle$stop("Cannot use yaxis=\"tau2\" for fixed-effects models.")) } if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE transf.char <- deparse(substitute(transf)) atransf.char <- deparse(substitute(atransf)) if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) if (missing(xlab)) xlab <- .setlab(x$measure, transf.char, atransf.char, gentype=2) if (missing(ylab)) { if (yaxis == "tau2") #ylab <- "Amount of Heterogeneity (tau^2)" ylab <- expression(paste("Amount of Heterogeneity ", (tau^2))) if (yaxis == "I2") #ylab <- "Percentage of Variability due to Heterogeneity (I^2)" ylab <- expression(paste("Percentage of Variability due to Heterogeneity ", (I^2))) if (yaxis == "H2") #ylab <- "Ratio of Total Variability to Sampling Variability (H^2)" ylab <- expression(paste("Ratio of Total Variability to Sampling Variability ", (H^2))) } par.mar <- par("mar") par.mar.adj <- par.mar + c(0,0.5,0,0) # need a bit more space on the right for the y-axis label par(mar = par.mar.adj) on.exit(par(mar = par.mar)) if (missing(at)) at <- NULL if (missing(targs)) targs <- NULL if (missing(digits)) { if (yaxis == "tau2") digits <- c(2L,3L) if (yaxis == "I2") digits <- c(2L,1L) if (yaxis == "H2") digits <- c(2L,1L) } else { if (length(digits) == 1L) ### digits[1] for x-axis labels digits <- c(digits,digits) ### digits[2] for y-axis labels } ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers ddd <- list(...) if (!is.null(ddd$addgrid)) grid <- ddd$addgrid ### grid argument can either be a logical or a color if (is.logical(grid)) gridcol <- "lightgray" if (is.character(grid)) { gridcol <- grid grid <- TRUE } lplot <- function(..., addgrid) plot(...) laxis <- function(..., addgrid) axis(...) ######################################################################### ### set up data frame with the values to be plotted dat <- data.frame(estim=x$estimate) if (yaxis == "tau2") dat$yval <- x$tau2 if (yaxis == "I2") dat$yval <- x$I2 if (yaxis == "H2") dat$yval <- x$H2 ### apply chosen na.action if (na.act == "na.fail" && anyNA(dat)) stop(mstyle$stop("Missing values in results.")) if (na.act == "na.omit") dat <- na.omit(dat) ### number of remaining rows/points k <- nrow(dat) ### if requested, apply transformation to estimates if (is.function(transf)) { if (is.null(targs)) { dat$estim <- sapply(dat$estim, transf) } else { dat$estim <- sapply(dat$estim, transf, targs) } } ### set xlim and ylim values if (missing(xlim)) { xlim <- range(dat$estim, na.rm=TRUE) } else { xlim <- sort(xlim) ### just in case the user supplies the limits in the wrong order } if (missing(ylim)) { ylim <- range(dat$yval, na.rm=TRUE) } else { ylim <- sort(ylim) ### just in case the user supplies the limits in the wrong order } ### if user has specified 'at' argument, make sure xlim actually contains the min and max 'at' values if (!is.null(at)) { xlim[1] <- min(c(xlim[1], at), na.rm=TRUE) xlim[2] <- max(c(xlim[2], at), na.rm=TRUE) } ### set up plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, xaxt="n", yaxt="n", ...) ### generate x-axis positions if none are specified if (is.null(at)) { at <- axTicks(side=1) } else { at <- at[at > par("usr")[1]] at <- at[at < par("usr")[2]] } at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } } else { at.lab <- formatC(at.lab, digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } ### add x-axis laxis(side=1, at=at, labels=at.lab, ...) ### add y-axis aty <- axTicks(side=2) laxis(side=2, at=aty, labels=formatC(aty, digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])), ...) ### add grid if (.isTRUE(grid)) { abline(v=at, lty="dotted", col=gridcol) abline(h=aty, lty="dotted", col=gridcol) } ### vector with color gradient for points cols.points <- colorRampPalette(cols)(k) #gray.vals.points <- seq(from=.9, to=.1, length.out=k) #cols.points <- gray(gray.vals.points) #cols <- colorRampPalette(c("yellow","red"))(k) #cols <- colorRampPalette(c("blue","red"))(k) #cols <- rev(heat.colors(k+4))[-c(1:2,(k+1):(k+2)] #cols <- rev(topo.colors(k)) #cols <- rainbow(k, start=.2, end=.4) ### add lines that have a gradient (by interpolating values) ### looks better this way, especially when k is low for (i in seq_len(k-1)) { if (is.na(dat$estim[i]) || is.na(dat$estim[i+1]) || is.na(dat$yval[i]) || is.na(dat$yval[i+1])) next estims <- approx(c(dat$estim[i], dat$estim[i+1]), n=50)$y yvals <- approx(c(dat$yval[i], dat$yval[i+1]), n=50)$y cols.lines <- colorRampPalette(c(cols.points[i], cols.points[i+1]))(50) #gray.vals.lines <- approx(c(gray.vals.points[i], gray.vals.points[i+1]), n=50)$y #cols.lines <- gray(gray.vals.lines) segments(estims[-50], yvals[-50], estims[-1], yvals[-1], col=cols.lines, lwd=lwd) } ### add lines (this does no interpolation) #segments(dat$estim[-k], dat$yval[-k], dat$estim[-1], dat$yval[-1], col=cols.points, lwd=lwd) ### add points points(x=dat$estim, y=dat$yval, pch=pch, col=cols.points, cex=cex) ### redraw box around plot box() ### return data frame invisibly dat$col <- cols.points invisible(dat) } metafor/R/residuals.rma.r0000644000176200001440000000567213770376671015077 0ustar liggesusersresiduals.rma <- function(object, type="response", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("response", "rstandard", "rstudent", "pearson", "cholesky")) ### for objects of class "rma.mh" and "rma.peto", use rstandard() to get the Pearson residuals if (inherits(object, c("rma.mh", "rma.peto")) && type == "pearson") type <- "rstandard" ######################################################################### if (type == "rstandard") { tmp <- rstandard(object) out <- c(tmp$z) names(out) <- tmp$slab } if (type == "rstudent") { tmp <- rstudent(object) out <- c(tmp$z) names(out) <- tmp$slab } ######################################################################### if (type == "response") { ### note: can calculate this even if vi is missing out <- c(object$yi.f - object$X.f %*% object$beta) out[abs(out) < 100 * .Machine$double.eps] <- 0 } if (type == "pearson") { if (inherits(object, c("rma.mh", "rma.peto", "rma.glmm"))) stop(mstyle$stop("Extraction of Pearson residuals not available for objects of class \"rma.mh\", \"rma.peto\", or \"rma.glmm\".")) out <- c(object$yi.f - object$X.f %*% object$beta) out[abs(out) < 100 * .Machine$double.eps] <- 0 se <- rep(NA_real_, object$k.f) se[object$not.na] <- sqrt(diag(object$M)) out <- out / se } if (type == "cholesky") { ### note: Cholesky residuals depend on the data order ### but only for the Cholesky residuals is QE = sum(residuals(res, type="cholesky)^2) for models where M (or rather: V) is not diagonal if (inherits(object, c("rma.mh", "rma.peto", "rma.glmm"))) stop(mstyle$stop("Extraction of Cholesky residuals not available for objects of class \"rma.mh\", \"rma.peto\", or \"rma.glmm\".")) out <- c(object$yi - object$X %*% object$beta) out[abs(out) < 100 * .Machine$double.eps] <- 0 L <- try(chol(chol2inv(chol(object$M)))) if (inherits(L, "try-error")) stop(mstyle$stop("Could not take Cholesky decomposition of the marginal var-cov matrix.")) tmp <- L %*% out out <- rep(NA_real_, object$k.f) out[object$not.na] <- tmp } if (is.element(type, c("response", "pearson", "cholesky"))) { names(out) <- object$slab #not.na <- !is.na(out) if (na.act == "na.omit") out <- out[object$not.na] if (na.act == "na.exclude") out[!object$not.na] <- NA if (na.act == "na.fail" && any(!object$not.na)) stop(mstyle$stop("Missing values in results.")) } ######################################################################### return(out) } metafor/R/print.rma.uni.r0000644000176200001440000003237114046532037015011 0ustar liggesusersprint.rma.uni <- function(x, digits, showfit=FALSE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni") if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (inherits(x, "rma.uni.trimfill")) { if (!exists(".rmspace")) cat("\n") cat(mstyle$text(paste0("Estimated number of missing studies on the ", x$side, " side: "))) cat(mstyle$result(paste0(x$k0, " (SE = ", ifelse(is.na(x$se.k0), NA, .fcf(x$se.k0, digits[["se"]])), ")"))) cat("\n") if (x$k0.est == "R0") { cat(mstyle$text(paste0("Test of H0: no missing studies on the ", x$side, " side: "))) cat(paste0(rep(" ", nchar(x$k0)), collapse="")) cat(mstyle$result(paste0("p-val ", .pval(x$p.k0, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n") } if (exists(".rmspace")) cat("\n") } if (!exists(".rmspace")) cat("\n") if (x$model == "rma.ls") { cat(mstyle$section("Location-Scale Model")) cat(mstyle$section(paste0(" (k = ", x$k, "; "))) if (x$tau2.fix) { cat(mstyle$section("user-specified tau^2 value)")) } else { cat(mstyle$section(paste0("tau^2 estimator: ", x$method, ")"))) } } else { if (is.element(x$method, c("FE","EE","CE"))) { if (x$int.only) { cat(mstyle$section(sapply(x$method, switch, "FE"="Fixed-Effects Model", "EE"="Equal-Effects Model", "CE"="Common-Effects Model", USE.NAMES=FALSE))) } else { cat(mstyle$section("Fixed-Effects with Moderators Model")) } cat(mstyle$section(paste0(" (k = ", x$k, ")"))) } else { if (x$int.only) { cat(mstyle$section("Random-Effects Model")) } else { cat(mstyle$section("Mixed-Effects Model")) } cat(mstyle$section(paste0(" (k = ", x$k, "; "))) if (x$tau2.fix) { cat(mstyle$section("user-specified tau^2 value)")) } else { cat(mstyle$section(paste0("tau^2 estimator: ", x$method, ")"))) } } } cat("\n") if (showfit) { if (x$method == "REML") { fs <- .fcf(x$fit.stats$REML, digits[["fit"]]) } else { fs <- .fcf(x$fit.stats$ML, digits[["fit"]]) } names(fs) <- c("logLik", "deviance", "AIC", "BIC", "AICc") cat("\n") tmp <- capture.output(print(fs, quote=FALSE, print.gap=2)) .print.table(tmp, mstyle) } cat("\n") if (x$model == "rma.uni" || x$model == "rma.uni.selmodel") { if (!is.element(x$method, c("FE","EE","CE"))) { if (x$int.only) { cat(mstyle$text("tau^2 (estimated amount of total heterogeneity): ")) cat(mstyle$result(paste0(.fcf(x$tau2, ifelse(abs(x$tau2) <= .Machine$double.eps*10,0,digits[["var"]])), ifelse(is.na(x$se.tau2), "", paste0(" (SE = " , .fcf(x$se.tau2, digits[["sevar"]]), ")"))))) cat("\n") cat(mstyle$text("tau (square root of estimated tau^2 value): ")) cat(mstyle$result(paste0(ifelse(x$tau2>=0, .fcf(sqrt(x$tau2), ifelse(x$tau2 <= .Machine$double.eps*10,0,digits[["var"]])), NA)))) cat("\n") } else { cat(mstyle$text("tau^2 (estimated amount of residual heterogeneity): ")) cat(mstyle$result(paste0(.fcf(x$tau2, ifelse(abs(x$tau2) <= .Machine$double.eps*10,0,digits[["var"]])), ifelse(is.na(x$se.tau2), "", paste0(" (SE = " , .fcf(x$se.tau2, digits[["sevar"]]), ")"))))) cat("\n") cat(mstyle$text("tau (square root of estimated tau^2 value): ")) cat(mstyle$result(paste0(ifelse(x$tau2>=0, .fcf(sqrt(x$tau2), ifelse(x$tau2 <= .Machine$double.eps*10,0,digits[["var"]])), NA)))) cat("\n") } } if (x$int.only) { if (!is.na(x$I2)) { cat(mstyle$text("I^2 (total heterogeneity / total variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, 2)), "%"))) cat("\n") } if (!is.na(x$H2)) { cat(mstyle$text("H^2 (total variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, 2))))) cat("\n") } } else { if (!is.na(x$I2)) { cat(mstyle$text("I^2 (residual heterogeneity / unaccounted variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$I2), NA, .fcf(x$I2, 2)), "%"))) cat("\n") } if (!is.na(x$H2)) { cat(mstyle$text("H^2 (unaccounted variability / sampling variability): ")) cat(mstyle$result(paste0(ifelse(is.na(x$H2), NA, .fcf(x$H2, 2))))) cat("\n") } } if (!is.element(x$method, c("FE","EE","CE")) && !x$int.only && !is.null(x$R2)) { cat(mstyle$text("R^2 (amount of heterogeneity accounted for): ")) cat(mstyle$result(paste0(ifelse(is.na(x$R2), NA, .fcf(x$R2, 2)), "%"))) cat("\n") } if (!is.element(x$method, c("FE","EE","CE")) || !is.na(x$I2) || !is.na(x$H2) || (!is.element(x$method, c("FE","EE","CE")) && !x$int.only && !is.null(x$R2))) cat("\n") } if (!is.na(x$QE)) { if (x$int.only) { cat(mstyle$section("Test for Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("Q(df = ", x$k-x$p, ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$section("Test for Residual Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("QE(df = ", x$k-x$p, ") = ", .fcf(x$QE, digits[["test"]]), ", p-val ", .pval(x$QEp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (x$model == "rma.uni.selmodel" && !is.na(x$LRT.tau2)) { if (x$int.only) { cat(mstyle$section("Test for Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("LRT(df = 1) = ", .fcf(x$LRT.tau2, digits[["test"]]), ", p-val ", .pval(x$LRTp.tau2, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$section("Test for Residual Heterogeneity:")) cat("\n") cat(mstyle$result(paste0("LRT(df = 1) = ", .fcf(x$LRT.tau2, digits[["test"]]), ", p-val ", .pval(x$LRTp.tau2, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (x$p > 1L && !is.na(x$QM)) { if (x$model == "rma.ls") { cat(mstyle$section(paste0("Test of Location Coefficients (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) } else { cat(mstyle$section(paste0("Test of Moderators (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):"))) } cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { cat(mstyle$result(paste0("F(df1 = ", x$QMdf[1], ", df2 = ", x$QMdf[2], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QM(df = ", x$QMdf[1], ") = ", .fcf(x$QM, digits[["test"]]), ", p-val ", .pval(x$QMp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n\n") } if (is.element(x$test, c("knha","adhoc","t"))) { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), tval=.fcf(x$zval, digits[["test"]]), df=round(x$ddf,2), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$beta), digits[["est"]]), se=.fcf(x$se, digits[["se"]]), zval=.fcf(x$zval, digits[["test"]]), pval=.pval(x$pval, digits[["pval"]]), ci.lb=.fcf(x$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$ci.ub, digits[["ci"]]), stringsAsFactors=FALSE) } rownames(res.table) <- rownames(x$beta) signif <- symnum(x$pval, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } ddd <- list(...) .chkdots(ddd, c("num")) if (.isTRUE(ddd$num)) rownames(res.table) <- paste0(1:nrow(res.table), ") ", rownames(res.table)) if (x$int.only) res.table <- res.table[1,] if (x$model == "rma.uni" || x$model == "rma.uni.selmodel") { cat(mstyle$section("Model Results:")) } else { cat(mstyle$section("Model Results (Location):")) } cat("\n\n") if (x$int.only) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) if (x$model == "rma.ls") { if (x$q > 1L && !is.na(x$QS)) { cat("\n") cat(mstyle$section(paste0("Test of Scale Coefficients (coefficient", ifelse(x$m.alpha == 1, " ", "s "), .format.btt(x$att),"):"))) cat("\n") if (is.element(x$test, c("knha","adhoc","t"))) { cat(mstyle$result(paste0("F(df1 = ", x$QSdf[1], ", df2 = ", x$QSdf[2], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } else { cat(mstyle$result(paste0("QS(df = ", x$QSdf[1], ") = ", .fcf(x$QS, digits[["test"]]), ", p-val ", .pval(x$QSp, digits[["pval"]], showeq=TRUE, sep=" ")))) } cat("\n") } if (x$test == "t") { res.table <- data.frame(estimate=.fcf(c(x$alpha), digits[["est"]]), se=.fcf(x$se.alpha, digits[["se"]]), tval=.fcf(x$zval.alpha, digits[["test"]]), df=round(x$ddf.alpha, 2), pval=.pval(x$pval.alpha, digits[["pval"]]), ci.lb=.fcf(x$ci.lb.alpha, digits[["ci"]]), ci.ub=.fcf(x$ci.ub.alpha, digits[["ci"]]), stringsAsFactors=FALSE) } else { res.table <- data.frame(estimate=.fcf(c(x$alpha), digits[["est"]]), se=.fcf(x$se.alpha, digits[["se"]]), zval=.fcf(x$zval.alpha, digits[["test"]]), pval=.pval(x$pval.alpha, digits[["pval"]]), ci.lb=.fcf(x$ci.lb.alpha, digits[["ci"]]), ci.ub=.fcf(x$ci.ub.alpha, digits[["ci"]]), stringsAsFactors=FALSE) } rownames(res.table) <- rownames(x$alpha) signif <- symnum(x$pval.alpha, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } for (j in 1:nrow(res.table)) { res.table[j, is.na(res.table[j,])] <- ifelse(x$alpha.fix[j], "---", "NA") res.table[j, res.table[j,] == "NA"] <- ifelse(x$alpha.fix[j], "---", "NA") } if (.isTRUE(ddd$num)) rownames(res.table) <- paste0(1:nrow(res.table), ") ", rownames(res.table)) if (length(x$alpha) == 1L) res.table <- res.table[1,] cat("\n") cat(mstyle$section("Model Results (Scale):")) cat("\n\n") if (length(x$alpha) == 1L) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) } if (x$model == "rma.uni.selmodel") { if (!is.na(x$LRT)) { cat("\n") cat(mstyle$section("Test for Selection Model Parameters:")) cat("\n") cat(mstyle$result(paste0("LRT(df = ", x$LRTdf, ") = ", .fcf(x$LRT, digits[["test"]]), ", p-val ", .pval(x$LRTp, digits[["pval"]], showeq=TRUE, sep=" ")))) cat("\n") } res.table <- data.frame(estimate=.fcf(c(x$delta), digits[["est"]]), se=.fcf(x$se.delta, digits[["se"]]), zval=.fcf(x$zval.delta, digits[["test"]]), pval=.pval(x$pval.delta, digits[["pval"]]), ci.lb=.fcf(x$ci.lb.delta, digits[["ci"]]), ci.ub=.fcf(x$ci.ub.delta, digits[["ci"]]), stringsAsFactors=FALSE) if (x$type == "stepfun") { rownames(res.table) <- rownames(x$ptable) res.table <- cbind(k=x$ptable$k, res.table) } else { rownames(res.table) <- paste0("delta.", seq_along(x$delta)) } #if (x$test == "t") # colnames(res.table)[3] <- "tval" signif <- symnum(x$pval.delta, corr=FALSE, na=FALSE, cutpoints=c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " ")) if (signif.stars) { res.table <- cbind(res.table, signif) colnames(res.table)[ncol(res.table)] <- "" } for (j in 1:nrow(res.table)) { res.table[j, is.na(res.table[j,])] <- ifelse(x$delta.fix[j], "---", "NA") res.table[j, res.table[j,] == "NA"] <- ifelse(x$delta.fix[j], "---", "NA") } if (length(x$delta) == 1L) res.table <- res.table[1,] cat("\n") cat(mstyle$section("Selection Model Results:")) cat("\n\n") if (length(x$delta) == 1L) { tmp <- capture.output(.print.vector(res.table)) } else { tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) } .print.table(tmp, mstyle) } if (signif.legend) { cat("\n") cat(mstyle$legend("---\nSignif. codes: "), mstyle$legend(attr(signif, "legend"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/predict.rma.ls.r0000644000176200001440000004607414046526756015152 0ustar liggesuserspredict.rma.ls <- function(object, newmods, intercept, addx=FALSE, newscale, addz=FALSE, level, digits, transf, targs, vcov=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.ls") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- object if (missing(newmods)) newmods <- NULL if (missing(intercept)) intercept <- x$intercept if (missing(newscale)) newscale <- NULL if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("pi.type", "newvi")) if (is.null(ddd$pi.type)) { pi.type <- "default" } else { pi.type <- ddd$pi.type } if (!is.null(newmods) && x$int.only && !(x$int.only && identical(newmods, 1))) stop(mstyle$stop("Cannot specify new moderator values for models without moderators.")) if (!is.null(newscale) && x$Z.int.only && !(x$Z.int.only && identical(newscale, 1))) stop(mstyle$stop("Cannot specify new scale values for models without scale variables.")) ######################################################################### if (!is.null(newmods)) { ### if newmods has been specified if (!(.is.vector(newmods) || inherits(newmods, "matrix"))) stop(mstyle$stop(paste0("Argument 'newmods' should be a vector or matrix, but is of class '", class(newmods), "'."))) if ((!x$int.incl && x$p == 1L) || (x$int.incl && x$p == 2L)) { k.new <- length(newmods) # if single moderator (multiple k.new possible) (either without or with intercept in the model) X.new <- cbind(c(newmods)) # } else { # in case the model has more than one predictor: if (.is.vector(newmods) || nrow(newmods) == 1L) { # # if user gives one vector or one row matrix (only one k.new): k.new <- 1 # X.new <- rbind(newmods) # } else { # # if user gives multiple rows and columns (multiple k.new): k.new <- nrow(newmods) # X.new <- cbind(newmods) # } # ### allow matching of terms by names (note: only possible if all columns in X.new and x$X have colnames) if (!is.null(colnames(X.new)) && all(colnames(X.new) != "") && !is.null(colnames(x$X)) && all(colnames(x$X) != "")) { colnames.mod <- colnames(x$X) if (x$int.incl) colnames.mod <- colnames.mod[-1] pos <- sapply(colnames(X.new), function(colname) { d <- c(adist(colname, colnames.mod, costs=c(ins=1, sub=Inf, del=Inf))) # compute edit distances with Inf costs for substitutions/deletions if (all(is.infinite(d))) # if there is no match, then all elements are Inf stop(mstyle$stop(paste0("Could not find variable '", colname, "' in the model.")), call. = FALSE) d <- which(d == min(d)) # don't use which.min() since that only finds the first minimum if (length(d) > 1L) # if there is no unique match, then there is more than one minimum stop(mstyle$stop(paste0("Could not match up variable '", colname, "' uniquely to a variable in the model.")), call. = FALSE) return(d) }) if (anyDuplicated(pos)) { # if the same name is used more than once, then there will be duplicated pos values dups <- paste(unique(colnames(X.new)[duplicated(pos)]), collapse=", ") stop(mstyle$stop(paste0("Found multiple matches for the same variable name (", dups, ")."))) } if (length(pos) != length(colnames.mod)) { no.match <- colnames.mod[seq_along(colnames.mod)[-pos]] if (length(no.match) > 3L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for these variables: ", paste0(no.match[1:3], collapse=", "), ", ..."))) if (length(no.match) > 1L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for these variables: ", paste0(no.match, collapse=", ")))) if (length(no.match) == 1L) stop(mstyle$stop(paste0("Argument 'newmods' does not specify values for this variable: ", no.match))) } X.new <- X.new[,order(pos),drop=FALSE] colnames(X.new) <- colnames.mod } } if (inherits(X.new[1,1], "character")) stop(mstyle$stop(paste0("Argument 'newmods' should only contain numeric variables."))) ### if the user has specified newmods and an intercept was included in the original model, add the intercept to X.new ### but user can also decide to remove the intercept from the predictions with intercept=FALSE ### one special case: when the location model is an intercept-only model, one can set newmods=1 to obtain the predicted intercept if (x$int.incl && !(x$int.only && ncol(X.new) == 1L && nrow(X.new) == 1L && X.new[1,1] == 1)) { if (intercept) { X.new <- cbind(intrcpt=1, X.new) } else { X.new <- cbind(intrcpt=0, X.new) } } if (ncol(X.new) != x$p) stop(mstyle$stop(paste0("Dimensions of 'newmods' (", ncol(X.new), ") do not match the dimensions of the model (", x$p, ")."))) } if (!is.null(newscale)) { if (!(.is.vector(newscale) || inherits(newscale, "matrix"))) stop(mstyle$stop(paste0("Argument 'newscale' should be a vector or matrix, but is of class '", class(newscale), "'."))) if ((!x$Z.int.incl && x$q == 1L) || (x$Z.int.incl && x$q == 2L)) { Z.k.new <- length(newscale) # if single moderator (multiple k.new possible) (either without or with intercept in the model) Z.new <- cbind(c(newscale)) # } else { # in case the model has more than one predictor: if (.is.vector(newscale) || nrow(newscale) == 1L) { # # if user gives one vector or one row matrix (only one k.new): Z.k.new <- 1 # Z.new <- rbind(newscale) # } else { # # if user gives multiple rows and columns (multiple k.new): Z.k.new <- nrow(newscale) # Z.new <- cbind(newscale) # } # ### allow matching of terms by names (note: only possible if all columns in Z.new and x$Z have colnames) if (!is.null(colnames(Z.new)) && all(colnames(Z.new) != "") && !is.null(colnames(x$Z)) && all(colnames(x$Z) != "")) { colnames.mod <- colnames(x$Z) if (x$Z.int.incl) colnames.mod <- colnames.mod[-1] pos <- sapply(colnames(Z.new), function(colname) { d <- c(adist(colname, colnames.mod, costs=c(ins=1, sub=Inf, del=Inf))) # compute edit distances with Inf costs for substitutions/deletions if (all(is.infinite(d))) # if there is no match, then all elements are Inf stop(mstyle$stop(paste0("Could not find variable '", colname, "' from 'newscale' in the model.")), call. = FALSE) d <- which(d == min(d)) # don't use which.min() since that only finds the first minimum if (length(d) > 1L) # if there is no unique match, then there is more than one minimum stop(mstyle$stop(paste0("Could not match up variable '", colname, "' from 'newscale' uniquely to a variable in the model.")), call. = FALSE) return(d) }) if (anyDuplicated(pos)) { # if the same name is used more than once, then there will be duplicated pos values dups <- paste(unique(colnames(Z.new)[duplicated(pos)]), collapse=", ") stop(mstyle$stop(paste0("Found multiple matches for the same variable name (", dups, ") in 'newscale'."))) } if (length(pos) != length(colnames.mod)) { no.match <- colnames.mod[seq_along(colnames.mod)[-pos]] if (length(no.match) > 3L) stop(mstyle$stop(paste0("Argument 'newscale' does not specify values for these variables: ", paste0(no.match[1:3], collapse=", "), ", ..."))) if (length(no.match) > 1L) stop(mstyle$stop(paste0("Argument 'newscale' does not specify values for these variables: ", paste0(no.match, collapse=", ")))) if (length(no.match) == 1L) stop(mstyle$stop(paste0("Argument 'newscale' does not specify values for this variable: ", no.match))) } Z.new <- Z.new[,order(pos),drop=FALSE] colnames(Z.new) <- colnames.mod } } if (inherits(Z.new[1,1], "character")) stop(mstyle$stop(paste0("Argument 'newscale' should only contain numeric variables."))) ### if the user has specified newscale and an intercept was included in the original model, add the intercept to Z.new ### but user can also decide to remove the intercept from the predictions with intercept=FALSE (only when predicting log(tau^2)) ### one special case: when the scale model is an intercept-only model, one can set newscale=1 to obtain the predicted intercept ### (which can be converted to tau^2 with transf=exp when using a log link) if (x$Z.int.incl && !(x$Z.int.only && ncol(Z.new) == 1L && nrow(Z.new) == 1L && Z.new[1,1] == 1)) { if (is.null(newmods)) { if (intercept) { Z.new <- cbind(intrcpt=1, Z.new) } else { Z.new <- cbind(intrcpt=0, Z.new) } } else { Z.new <- cbind(intrcpt=1, Z.new) } } if (ncol(Z.new) != x$q) stop(mstyle$stop(paste0("Dimensions of 'newscale' (", ncol(Z.new), ") do not match the dimensions of the scale model (", x$q, ")."))) } # four possibilities: # 1) newmods not specified, newscale not specified: get the fitted values of the studies and ci/pi bounds thereof # 2) newmods specified, newscale not specified: get the predicted mu values for these newmods values and ci bounds thereof # (note: cannot compute pi bounds, since the tau^2 values cannot be predicted) # 3) newmods not specified, newscale specified: get the predicted log(tau^2) values and ci bounds thereof # (transf=exp to obtain predicted tau^2 values when using the default log link) # 4) newmods specified, newscale specified: get the predicted mu values for these newmods values and ci/pi bounds thereof pred.mui <- TRUE if (is.null(newmods)) { if (is.null(newscale)) { k.new <- x$k.f X.new <- x$X.f Z.new <- x$Z.f tau2.f <- x$tau2.f } else { k.new <- Z.k.new addx <- FALSE pred.mui <- FALSE } } else { if (is.null(newscale)) { Z.new <- matrix(NA, nrow=k.new, ncol=x$q) tau2.f <- rep(NA, k.new) addz <- FALSE } else { tau2.f <- rep(NA_real_, Z.k.new) for (i in seq_len(Z.k.new)) { Zi.new <- Z.new[i,,drop=FALSE] tau2.f[i] <- Zi.new %*% x$alpha } if (x$link == "log") { tau2.f <- exp(tau2.f) } else { if (any(tau2.f < 0)) { warning(mstyle$warning(paste0("Negative predicted 'tau2' values constrained to 0.")), call.=FALSE) tau2.f[tau2.f < 0] <- 0 } } if (length(tau2.f) == 1L) { Z.new <- Z.new[rep(1,k.new),,drop=FALSE] tau2.f <- rep(tau2.f, k.new) } if (length(tau2.f) != k.new) stop(mstyle$stop(paste0("Dimensions of 'newmods' (", k.new, ") do not match dimensions of newscale (", length(tau2.f), ")."))) } } #return(list(k.new=k.new, tau2=x$tau2, gamma2=x$gamma2, tau2.levels=tau2.levels, gamma2.levels=gamma2.levels)) ######################################################################### ### predicted values, SEs, and confidence intervals pred <- rep(NA_real_, k.new) vpred <- rep(NA_real_, k.new) if (pred.mui) { ddf <- ifelse(is.na(x$ddf), x$k - x$p, x$ddf) for (i in seq_len(k.new)) { Xi.new <- X.new[i,,drop=FALSE] pred[i] <- Xi.new %*% x$beta vpred[i] <- Xi.new %*% tcrossprod(x$vb, Xi.new) } if (x$test == "t") { crit <- if (ddf > 0) qt(level/2, df=ddf, lower.tail=FALSE) else NA } else { crit <- qnorm(level/2, lower.tail=FALSE) } } else { ddf <- ifelse(is.na(x$ddf.alpha), x$k - x$q, x$ddf.alpha) for (i in seq_len(k.new)) { Zi.new <- Z.new[i,,drop=FALSE] pred[i] <- Zi.new %*% x$alpha vpred[i] <- Zi.new %*% tcrossprod(x$va, Zi.new) } if (x$test == "t") { crit <- if (ddf > 0) qt(level/2, df=ddf, lower.tail=FALSE) else NA } else { crit <- qnorm(level/2, lower.tail=FALSE) } } se <- sqrt(vpred) ci.lb <- pred - crit * se ci.ub <- pred + crit * se ######################################################################### if (pred.mui) { if (vcov) vcovpred <- X.new %*% x$vb %*% t(X.new) if (pi.type == "simple") { crit <- qnorm(level/2, lower.tail=FALSE) vpred <- 0 } pi.ddf <- ddf if (is.element(pi.type, c("riley","t"))) { if (pi.type == "riley") pi.ddf <- x$k - x$p - x$q if (pi.type == "t") pi.ddf <- x$k - x$p pi.ddf[pi.ddf < 1] <- 1 crit <- qt(level/2, df=pi.ddf, lower.tail=FALSE) } if (is.null(ddd$newvi)) { newvi <- 0 } else { newvi <- ddd$newvi if (length(newvi) == 1L) newvi <- rep(newvi, k.new) if (length(newvi) != k.new) stop(mstyle$stop(paste0("Length of 'newvi' argument (", length(newvi), ") does not match the number of predicted values (", k.new, ")."))) } ### prediction intervals pi.lb <- pred - crit * sqrt(vpred + tau2.f + newvi) pi.ub <- pred + crit * sqrt(vpred + tau2.f + newvi) } else { if (vcov) vcovpred <- Z.new %*% x$va %*% t(Z.new) pi.lb <- NA pi.ub <- NA } ######################################################################### ### apply transformation function if one has been specified if (is.function(transf)) { if (is.null(targs)) { pred <- sapply(pred, transf) se <- rep(NA,k.new) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) } else { pred <- sapply(pred, transf, targs) se <- rep(NA,k.new) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) } do.transf <- TRUE } else { do.transf <- FALSE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ### when predicting tau^2 values, set negative tau^2 values and CI bounds to 0 if (!pred.mui && x$link=="identity" && !is.function(transf)) { if (any(pred < 0)) warning(mstyle$warning(paste0("Negative predicted 'tau2' values constrained to 0.")), call.=FALSE) pred[pred < 0] <- 0 ci.lb[ci.lb < 0] <- 0 ci.ub[ci.ub < 0] <- 0 } ### use study labels from the object when the model has moderators and no new moderators have been specified if (pred.mui) { if (is.null(newmods)) { slab <- x$slab } else { slab <- seq_len(k.new) } } else { if (is.null(newscale)) { slab <- x$slab } else { slab <- seq_len(k.new) } } ### add row/colnames to vcovpred if (vcov) rownames(vcovpred) <- colnames(vcovpred) <- slab ### but when predicting just a single value, use "" as study label if (k.new == 1L) slab <- "" ### handle NAs not.na <- rep(TRUE, k.new) if (na.act == "na.omit") { if (pred.mui) { if (is.null(newmods)) { not.na <- x$not.na } else { not.na <- !is.na(pred) } } else { if (is.null(newscale)) { not.na <- x$not.na } else { not.na <- !is.na(pred) } } } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out <- list(pred=pred[not.na], se=se[not.na], ci.lb=ci.lb[not.na], ci.ub=ci.ub[not.na], pi.lb=pi.lb[not.na], pi.ub=pi.ub[not.na], cr.lb=pi.lb[not.na], cr.ub=pi.ub[not.na]) if (vcov) vcovpred <- vcovpred[not.na,not.na,drop=FALSE] if (na.act == "na.exclude" && is.null(newmods)) { out <- lapply(out, function(val) ifelse(x$not.na, val, NA)) if (vcov) { vcovpred[!x$not.na,] <- NA vcovpred[,!x$not.na] <- NA } } ### add X matrix to list if (addx) { out$X <- matrix(X.new[not.na,], ncol=x$p) colnames(out$X) <- colnames(x$X) } ### add Z matrix to list if (addz) { out$Z <- matrix(Z.new[not.na,], ncol=x$q) colnames(out$Z) <- colnames(x$Z) } ### add slab values to list out$slab <- slab[not.na] ### for FE/EE/CE models, remove the columns corresponding to the prediction interval bounds if (is.element(x$method, c("FE","EE","CE")) || !pred.mui) { out$cr.lb <- NULL out$cr.ub <- NULL out$pi.lb <- NULL out$pi.ub <- NULL } out$digits <- digits out$method <- x$method out$transf <- do.transf if (x$test != "z") out$ddf <- ddf if (pred.mui && (x$test != "z" || is.element(pi.type, c("riley","t"))) && pi.type != "simple") out$pi.ddf <- pi.ddf class(out) <- "list.rma" if (vcov & !do.transf) { out <- list(pred=out) out$vcov <- vcovpred } return(out) } metafor/R/weights.rma.mv.r0000644000176200001440000000361013775620001015145 0ustar liggesusersweights.rma.mv <- function(object, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.mv") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix", "rowsum")) x <- object ######################################################################### if (is.null(x$W)) { W <- chol2inv(chol(x$M)) } else { W <- x$W } ######################################################################### if (type == "diagonal") { wi <- as.vector(diag(W)) weight <- rep(NA_real_, x$k.f) weight[x$not.na] <- wi / sum(wi) * 100 names(weight) <- x$slab if (na.act == "na.omit") weight <- weight[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in weights.")) return(weight) } if (type == "matrix") { Wfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Wfull[x$not.na, x$not.na] <- W rownames(Wfull) <- x$slab colnames(Wfull) <- x$slab if (na.act == "na.omit") Wfull <- Wfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Wfull) } if (type == "rowsum") { if (!x$int.only) stop("Row-sum weights are only meaningful for intercept-only models.") wi <- rowSums(W) weight <- rep(NA_real_, x$k.f) weight[x$not.na] <- wi / sum(wi) * 100 names(weight) <- x$slab if (na.act == "na.omit") weight <- weight[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in weights.")) return(weight) } } metafor/R/confint.rma.peto.r0000644000176200001440000000376113770362606015500 0ustar liggesusersconfint.rma.peto <- function(object, parm, level, digits, transf, targs, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.peto") if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) crit <- qnorm(level/2, lower.tail=FALSE) beta <- x$beta ci.lb <- beta - crit * x$se ci.ub <- beta + crit * x$se ### if requested, apply transformation function if (.isTRUE(transf)) ### if transf=TRUE, apply exp transformation to ORs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### res <- cbind(estimate=beta, ci.lb, ci.ub) res <- list(fixed=res) rownames(res$fixed) <- "" res$digits <- digits if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/plot.rma.mh.r0000644000176200001440000000371414054725362014447 0ustar liggesusersplot.rma.mh <- function(x, qqplot=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.mh") na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) par.mfrow <- par("mfrow") par(mfrow=c(2,2)) on.exit(par(mfrow = par.mfrow), add=TRUE) ######################################################################### forest(x, ...) title("Forest Plot", ...) ######################################################################### funnel(x, ...) title("Funnel Plot", ...) ######################################################################### radial(x, ...) title("Radial Plot", ...) ######################################################################### if (qqplot) { qqnorm(x, ...) } else { options(na.action = "na.pass") z <- rstandard(x)$z options(na.action = na.act) not.na <- !is.na(z) if (na.act == "na.omit") { z <- z[not.na] ids <- x$ids[not.na] not.na <- not.na[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") ids <- x$ids k <- length(z) plot(NA, NA, xlim=c(1,k), ylim=c(min(z, -2, na.rm=TRUE), max(z, 2, na.rm=TRUE)), xaxt="n", xlab="Study", ylab="", bty="l", ...) lines(seq_len(k)[not.na], z[not.na], col="lightgray", ...) lines(seq_len(k), z, ...) points(x=seq_len(k), y=z, pch=21, bg="black", ...) axis(side=1, at=seq_len(k), labels=ids, ...) abline(h=0, lty="dashed", ...) abline(h=c(qnorm(.025),qnorm(.975)), lty="dotted", ...) title("Standardized Residuals", ...) } ######################################################################### invisible() } metafor/R/update.rma.r0000644000176200001440000000302713770400056014337 0ustar liggesusers### based on stats:::update.default but with some adjustments update.rma <- function (object, formula., ..., evaluate=TRUE) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma", notav="robust.rma") if (is.null(call <- getCall(object))) stop(mstyle$stop("Need an object with call component.")) extras <- match.call(expand.dots = FALSE)$... if (!missing(formula.)) { if (inherits(object, c("rma.uni","rma.mv"))) { if (inherits(object$call$yi, "call")) { call$yi <- update.formula(object$call$yi, formula.) } else { if (is.null(object$call$mods)) { object$call$mods <- ~ 1 call$mods <- update.formula(object$call$mods, formula.) } else { if (!any(grepl("~", object$call$mods))) { stop(mstyle$stop("The 'mods' argument in 'object' must be a formula for updating to work.")) } else { call$mods <- update.formula(object$call$mods, formula.) } } } } if (inherits(object, "rma.glmm")) call$mods <- update.formula(object$call$mods, formula.) } if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call } metafor/R/print.fsn.r0000644000176200001440000000342714052440643014224 0ustar liggesusersprint.fsn <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="fsn") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$section(paste("Fail-safe N Calculation Using the", x$type, "Approach"))) cat("\n\n") if (x$type == "Rosenthal") { cat(mstyle$text("Observed Significance Level: ")) cat(mstyle$result(.pval(x$pval, digits[["pval"]]))) cat("\n") cat(mstyle$text("Target Significance Level: ")) cat(mstyle$result(x$alpha)) } if (x$type == "Orwin") { cat(mstyle$text("Average Effect Size: ")) cat(mstyle$result(.fcf(x$meanes, digits[["est"]]))) cat("\n") cat(mstyle$text("Target Effect Size: ")) cat(mstyle$result(.fcf(x$target, digits[["est"]]))) } if (x$type == "Rosenberg") { cat(mstyle$text("Average Effect Size: ")) cat(mstyle$result(.fcf(x$meanes, digits[["est"]]))) cat("\n") cat(mstyle$text("Observed Significance Level: ")) cat(mstyle$result(.pval(x$pval, digits[["pval"]]))) cat("\n") cat(mstyle$text("Target Significance Level: ")) cat(mstyle$result(x$alpha)) } if (x$type == "REM") { cat(mstyle$text("Average Effect Size: ")) cat(mstyle$result(.fcf(x$meanes, digits[["est"]]))) cat("\n") cat(mstyle$text("Target Effect Size: ")) cat(mstyle$result(.fcf(x$target, digits[["est"]]))) #cat("\n") #cat(mstyle$text("Rejection Rate: ")) #cat(mstyle$result(.pval(x$rejrate, digits[["pval"]]))) } cat("\n\n") cat(mstyle$text("Fail-safe N: ")) cat(mstyle$result(x$fsnum)) cat("\n") if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/rma.glmm.r0000644000176200001440000026231314052500634014013 0ustar liggesusersrma.glmm <- function(ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, xi, mi, ti, ni, mods, measure, intercept=TRUE, data, slab, subset, add=1/2, to="only0", drop00=TRUE, vtype="LS", model="UM.FS", method="ML", test="z", #tdist=FALSE, #weighted=TRUE, level=95, digits, btt, nAGQ=7, verbose=FALSE, control, ...) { # tau2, ######################################################################### ###### setup mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications ### (arguments "to" and "vtype" are checked inside escalc function) if (missing(measure)) stop(mstyle$stop("Must specify 'measure' argument.")) if (!is.element(measure, c("OR","IRR","PLO","IRLN"))) stop(mstyle$stop("Unknown 'measure' specified.")) if (!is.element(method, c("FE","EE","CE","ML"))) stop(mstyle$stop("Unknown 'method' specified.")) ### in case user specifies more than one add/to value (as one can do with rma.mh() and rma.peto()) ### (never apply any kind of continuity correction to the data used in the actual model fitting for models implemented in this function) if (length(add) > 1L) add <- add[1] if (length(to) > 1L) to <- to[1] ### model argument only relevant for 2x2 table data (measure="OR") and for 2-group rate data (measure="IRR") ### UM.FS/UM.RS = unconditional GLMM with fixed/random study effects (logistic or poisson mixed-effects model with fixed/random intercepts) ### CM.EL/CM.AL = conditional GLMM (exact/approximate) (hypergeometric or conditional logistic model) ### BV/MV = bi/multivariate model (logistic or poisson mixed-effects model with unstructured covariance matrix) -- not implemented if (!is.element(model, c("UM.FS","UM.RS","CM.EL","CM.AL"))) stop(mstyle$stop("Unknown 'model' specified.")) ### no need for CM.AL for IRR -- use CM.EL if (model == "CM.AL" && measure == "IRR") model <- "CM.EL" na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(control)) control <- list() time.start <- proc.time() ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("tdist", "outlist", "onlyo1", "addyi", "addvi", "time", "retdat", "family", "retfit")) ### handle 'tdist' argument from ... (note: overrides test argument) if (.isFALSE(ddd$tdist)) test <- "z" if (.isTRUE(ddd$tdist)) test <- "t" if (!is.element(test, c("z", "t"))) stop(mstyle$stop("Invalid option selected for 'test' argument.")) ### set defaults or get onlyo1, addyi, and addvi arguments onlyo1 <- ifelse(is.null(ddd$onlyo1), FALSE, ddd$onlyo1) addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ### set default for formula.mods formula.mods <- NULL ### set options(warn=1) if verbose > 2 if (verbose > 2) { opwarn <- options(warn=1) on.exit(options(warn=opwarn$warn), add=TRUE) } ######################################################################### if (verbose) cat("\n") if (verbose > 1) message(mstyle$message("Extracting data and computing yi/vi values ...")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### extract slab, subset, and mods values, possibly from the data frame specified via data (arguments not specified are NULL) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] mf.mods <- mf[[match("mods", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) mods <- eval(mf.mods, data, enclos=sys.frame(sys.parent())) ai <- bi <- ci <- di <- x1i <- x2i <- t1i <- t2i <- xi <- mi <- ti <- ni <- NA ### calculate yi and vi values if (is.element(measure, "OR")) { mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci k <- length(ai) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } dat <- escalc(measure=measure, ai=ai, bi=bi, ci=ci, di=di, add=add, to=to, drop00=drop00, vtype=vtype, onlyo1=onlyo1, addyi=addyi, addvi=addvi) } if (is.element(measure, "IRR")) { mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) k <- length(x1i) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] } dat <- escalc(measure=measure, x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, add=add, to=to, drop00=drop00, vtype=vtype, onlyo1=onlyo1, addyi=addyi, addvi=addvi) } if (is.element(measure, "PLO")) { mf.xi <- mf[[match("xi", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) if (is.null(mi)) mi <- ni - xi k <- length(xi) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] mi <- mi[subset] } dat <- escalc(measure=measure, xi=xi, mi=mi, add=add, to=to, vtype=vtype, onlyo1=onlyo1, addyi=addyi, addvi=addvi) } if (is.element(measure, "IRLN")) { mf.xi <- mf[[match("xi", names(mf))]] mf.ti <- mf[[match("ti", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) k <- length(xi) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] ti <- ti[subset] } dat <- escalc(measure=measure, xi=xi, ti=ti, add=add, to=to, vtype=vtype, onlyo1=onlyo1, addyi=addyi, addvi=addvi) } yi <- dat$yi ### one or more yi/vi pairs may be NA/NA (note: yi/vi pairs that are NA/NA may still have 'valid' table data) vi <- dat$vi ### one or more yi/vi pairs may be NA/NA (note: yi/vi pairs that are NA/NA may still have 'valid' table data) ni <- attr(yi, "ni") ### unadjusted total sample sizes (ni.u in escalc) ### study ids (1:k sequence before subsetting) ids <- seq_len(k) ######################################################################### if (verbose > 1) message(mstyle$message("Creating model matrix ...")) ### convert mods formula to X matrix and set intercept equal to FALSE if (inherits(mods, "formula")) { formula.mods <- mods options(na.action = "na.pass") ### set na.action to na.pass, so that NAs are not filtered out (we'll do that later) mods <- model.matrix(mods, data=data) ### extract model matrix attr(mods, "assign") <- NULL ### strip assign attribute (not needed at the moment) options(na.action = na.act) ### set na.action back to na.act intercept <- FALSE ### set to FALSE since formula now controls whether the intercept is included or not } ### turn a vector for mods into a column vector if (.is.vector(mods)) mods <- cbind(mods) ### turn a mods data frame into a matrix if (is.data.frame(mods)) mods <- as.matrix(mods) ### check if model matrix contains character variables if (is.character(mods)) stop(mstyle$stop("Model matrix contains character variables.")) ### check if mods matrix has the right number of rows if (!is.null(mods) && nrow(mods) != k) stop(mstyle$stop(paste0("Number of rows in the model matrix (", nrow(mods), ") does not match length of the outcome vector (", k, ")."))) ### generate study labels if none are specified if (verbose > 1) message(mstyle$message("Generating/extracting study labels ...")) if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) slab.null <- FALSE } ### if a subset of studies is specified (note: tables, yi/vi, and ni are already subsetted above) if (!is.null(subset)) { if (verbose > 1) message(mstyle$message("Subsetting ...")) mods <- mods[subset,,drop=FALSE] slab <- slab[subset] ids <- ids[subset] } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### add slab attribute back attr(yi, "slab") <- slab k <- length(yi) ### number of tables/outcomes after subsetting (can all still include NAs) ### if drop00=TRUE, set counts to NA for studies that have no events (or all events) in both arms (corresponding yi/vi will also be NA/NA then) if (measure=="OR") { if (drop00) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } } if (measure=="IRR") { if (drop00) { id00 <- c(x1i == 0L & x2i == 0L) id00[is.na(id00)] <- FALSE x1i[id00] <- NA x2i[id00] <- NA } } ### save full data (including potential NAs in table data, yi/vi/ni/mods) (after subsetting) ai.f <- ai bi.f <- bi ci.f <- ci di.f <- di x1i.f <- x1i x2i.f <- x2i t1i.f <- t1i t2i.f <- t2i xi.f <- xi mi.f <- mi ti.f <- ti yi.f <- yi vi.f <- vi ni.f <- ni mods.f <- mods k.f <- k ### total number of tables/outcomes and rows in the model matrix (including all NAs) ### check for NAs in tables (and corresponding mods) and act accordingly if (is.element(measure, "OR")) { has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] mods <- mods[not.na,,drop=FALSE] k <- length(ai) warning(mstyle$warning("Studies with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in studies.")) } } if (is.element(measure, "IRR")) { has.na <- is.na(x1i) | is.na(x2i) | is.na(t1i) | is.na(t2i) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { x1i <- x1i[not.na] x2i <- x2i[not.na] t1i <- t1i[not.na] t2i <- t2i[not.na] mods <- mods[not.na,,drop=FALSE] k <- length(x1i) warning(mstyle$warning("Studies with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in studies.")) } } if (is.element(measure, "PLO")) { has.na <- is.na(xi) | is.na(mi) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { xi <- xi[not.na] mi <- mi[not.na] mods <- mods[not.na,,drop=FALSE] k <- length(xi) warning(mstyle$warning("Studies with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in studies.")) } } if (is.element(measure, "IRLN")) { has.na <- is.na(xi) | is.na(ti) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { xi <- xi[not.na] ti <- ti[not.na] mods <- mods[not.na,,drop=FALSE] k <- length(xi) warning(mstyle$warning("Studies with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in studies.")) } } ### note: k = number of tables (and corresponding rows of 'mods') after removing NAs ### k.f = total number of tables/outcomes and rows in the model matrix (including all NAs) stored in .f elements ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### check for NAs in yi/vi and act accordingly (yi/vi pair can be NA/NA if add=0 is used) ### note: if a table was removed because of NAs in mods, must also remove the corresponding yi/vi pair; ### also, must use mods.f here, since NAs in mods were already removed above (and need a separate ### mods.yi element, so that dimensions of the model matrix and vi are guaranteed to match up) mods.yi <- mods.f yivi.na <- is.na(yi) | is.na(vi) | (if (is.null(mods.yi)) FALSE else apply(is.na(mods.yi), 1, any)) not.na.yivi <- !yivi.na if (any(yivi.na)) { if (verbose > 1) message(mstyle$message("Handling NAs in yi/vi ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na.yivi] ni <- ni[not.na.yivi] vi <- vi[not.na.yivi] mods.yi <- mods.f[not.na.yivi,,drop=FALSE] warning(mstyle$warning("Some yi/vi values are NA."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } if (na.act == "na.fail") stop(mstyle$stop("Missing yi/vi values.")) } k.yi <- length(yi) ### number of yi/vi pairs that are not NA ### make sure that there is at least one column in X if (is.null(mods) && !intercept) { warning(mstyle$warning("Must either include an intercept and/or moderators in model.\n Coerced intercept into the model."), call.=FALSE) intercept <- TRUE } ### add vector of 1s to the X matrix for the intercept (if intercept=TRUE) if (intercept) { X <- cbind(intrcpt=rep(1,k), mods) X.f <- cbind(intrcpt=rep(1,k.f), mods.f) X.yi <- cbind(intrcpt=rep(1,k.yi), mods.yi) } else { X <- mods X.f <- mods.f X.yi <- mods.yi } ### drop redundant predictors ### note: yi may have become shorter than X due to the omission of NAs, so just use a fake yi vector here tmp <- lm(rep(0,k) ~ X - 1) coef.na <- is.na(coef(tmp)) if (any(coef.na)) { warning(mstyle$warning("Redundant predictors dropped from the model."), call.=FALSE) X <- X[,!coef.na,drop=FALSE] X.f <- X.f[,!coef.na,drop=FALSE] } ### need to do this separately for X.yi, since model matrix may have fewer rows due to removal of NA/NA pairs for yi/vi tmp <- lm(yi ~ X.yi - 1) coef.na <- is.na(coef(tmp)) if (any(coef.na)) X.yi <- X.yi[,!coef.na,drop=FALSE] ### check whether intercept is included and if yes, move it to the first column (NAs already removed, so na.rm=TRUE for any() not necessary) is.int <- apply(X, 2, .is.intercept) if (any(is.int)) { int.incl <- TRUE int.indx <- which(is.int, arr.ind=TRUE) X <- cbind(intrcpt=1, X[,-int.indx, drop=FALSE]) ### note: this removes any duplicate intercepts X.f <- cbind(intrcpt=1, X.f[,-int.indx, drop=FALSE]) ### note: this removes any duplicate intercepts intercept <- TRUE ### set intercept appropriately so that the predict() function works } else { int.incl <- FALSE } ### need to do this separately for X.yi, since model matrix may have fewer rows due to removal of NA/NA pairs for yi/vi is.int <- apply(X.yi, 2, .is.intercept) if (any(is.int)) { int.indx <- which(is.int, arr.ind=TRUE) X.yi <- cbind(intrcpt=1, X.yi[,-int.indx, drop=FALSE]) ### note: this removes any duplicate intercepts } p <- NCOL(X) ### number of columns in X (including the intercept if it is included) ### note: number of columns in X.yi may be lower than p; but computation of I^2 below is based on p ### check whether this is an intercept-only model if ((p == 1L) && .is.intercept(X)) { int.only <- TRUE } else { int.only <- FALSE } ### check if there are too many parameters for given k if (is.element(method, c("FE","EE","CE")) && p > k) stop(mstyle$stop("Number of parameters to be estimated is larger than the number of observations.")) if (!is.element(method, c("FE","EE","CE")) && (p+1) > k) stop(mstyle$stop("Number of parameters to be estimated is larger than the number of observations.")) ### set/check 'btt' argument btt <- .set.btt(btt, p, int.incl, colnames(X)) m <- length(btt) ### number of betas to test (m = p if all betas are tested) ######################################################################### ### set default control parameters con <- list(verbose = FALSE, # also passed on to glm/glmer/optim/nlminb/minqa (uobyqa/newuoa/bobyqa) package="lme4", # package for fitting logistic mixed-effects models ("lme4" or "GLMMadaptive") optimizer = "optim", # optimizer to use for CM.EL+OR ("optim", "nlminb", "uobyqa", "newuoa", "bobyqa", "clogit", "clogistic") optmethod = "BFGS", # argument 'method' for optim() ("Nelder-Mead" and "BFGS" are sensible options) scaleX = TRUE, # whether non-dummy variables in the X matrix should be rescaled before model fitting evtol = 1e-07, # lower bound for eigenvalues to determine if model matrix is positive definite dnchgcalc = "dFNCHypergeo", # method for calculating dnchg ("dFNCHypergeo" from BiasedUrn package or "dnoncenhypergeom") dnchgprec = 1e-10) # precision for dFNCHypergeo() ### replace defaults with any user-defined values con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose pos.optCtrl <- pmatch(names(control), "optCtrl", nomatch=0) if (sum(pos.optCtrl) > 0) { optCtrl <- control[[which(pos.optCtrl == 1)]] } else { optCtrl <- list() } if (con$optimizer == "optim") { con.pos <- pmatch(names(optCtrl), "REPORT", nomatch=0) ### set REPORT to 1 if it is not already set by the user if (sum(con.pos) > 0) { names(optCtrl)[which(con.pos == 1)] <- "REPORT" } else { optCtrl$REPORT <- 1 } optCtrl$trace <- con$verbose ### trace for optim is a non-negative integer } if (con$optimizer == "nlminb") optCtrl$trace <- ifelse(con$verbose > 0, 1, 0) ### set trace to 1, so information is printed every iteration if (is.element(con$optimizer, c("uobyqa", "newuoa", "bobyqa"))) optCtrl$iprint <- ifelse(con$verbose > 0, 3, 0) ### set iprint to 3 for maximum information pos.clogitCtrl <- pmatch(names(control), "clogitCtrl", nomatch=0) if (sum(pos.clogitCtrl) > 0) { clogitCtrl <- control[[which(pos.clogitCtrl == 1)]] } else { clogitCtrl <- list() } pos.clogisticCtrl <- pmatch(names(control), "clogisticCtrl", nomatch=0) if (sum(pos.clogisticCtrl) > 0) { clogisticCtrl <- control[[which(pos.clogisticCtrl == 1)]] } else { clogisticCtrl <- list() } pos.glmCtrl <- pmatch(names(control), "glmCtrl", nomatch=0) if (sum(pos.glmCtrl) > 0) { glmCtrl <- control[[which(pos.glmCtrl == 1)]] } else { glmCtrl <- list() } glmCtrl$trace <- ifelse(con$verbose > 0, TRUE, FALSE) ### trace for glmCtrl is logical pos.glmerCtrl <- pmatch(names(control), "glmerCtrl", nomatch=0) if (sum(pos.glmerCtrl) > 0) { glmerCtrl <- control[[which(pos.glmerCtrl == 1)]] } else { glmerCtrl <- list() } pos.mmCtrl <- pmatch(names(control), "mmCtrl", nomatch=0) if (sum(pos.mmCtrl) > 0) { mmCtrl <- control[[which(pos.mmCtrl == 1)]] } else { mmCtrl <- list() } pos.intCtrl <- pmatch(names(control), "intCtrl", nomatch=0) if (sum(pos.intCtrl) > 0) { intCtrl <- control[[which(pos.intCtrl == 1)]] } else { intCtrl <- list() } con.pos <- pmatch(names(intCtrl), "lower", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "lower" } else { intCtrl$lower <- -Inf } con.pos <- pmatch(names(intCtrl), "upper", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "upper" } else { intCtrl$upper <- Inf } con.pos <- pmatch(names(intCtrl), "subdivisions", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "subdivisions" } else { intCtrl$subdivisions <- 100L } con.pos <- pmatch(names(intCtrl), "rel.tol", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "rel.tol" } else { intCtrl$rel.tol <- .Machine$double.eps^0.25 } pos.hessianCtrl <- pmatch(names(control), "hessianCtrl", nomatch=0) if (sum(pos.hessianCtrl) > 0) { hessianCtrl <- control[[which(pos.hessianCtrl == 1)]] } else { hessianCtrl <- list(r=16) } #return(list(verbose=verbose, optimizer=con$optimizer, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, optCtrl=optCtrl, glmCtrl=glmCtrl, glmerCtrl=glmerCtrl, mmCtrl=mmCtrl, intCtrl=intCtrl, hessianCtrl=hessianCtrl)) if (!is.element(con$package, c("lme4", "GLMMadaptive"))) stop(mstyle$stop("Unknown package specified.")) if (!is.element(con$optimizer, c("optim","nlminb","uobyqa","newuoa","bobyqa","clogit","clogistic"))) stop(mstyle$stop("Unknown optimizer specified.")) if (con$dnchgcalc != "dnoncenhypergeom" && con$dnchgcalc != "dFNCHypergeo") stop(mstyle$stop("Unknown dnchgcalc method specified.")) if (is.element(con$optimizer, c("clogit", "clogistic")) && method == "ML") stop(mstyle$stop("Cannot use 'clogit' or 'clogistic' with method='ML'.")) if (con$package == "lme4" && is.element(measure, c("OR","IRR")) && model == "UM.RS" && method == "ML" && nAGQ > 1) { warning(mstyle$warning("Currently not possible to fit RE/ME model='UM.RS' with nAGQ > 1. nAGQ automatically set to 1."), call.=FALSE) nAGQ <- 1 } ######################################################################### ### check that the required packages are installed if (is.element(measure, c("OR","IRR"))) { if ((model == "UM.FS" && method == "ML") || (model == "UM.RS") || (model == "CM.AL" && method == "ML") || (model == "CM.EL" && method == "ML")) { if (!requireNamespace(con$package, quietly=TRUE)) stop(mstyle$stop(paste0("Please install the '", con$package, "' package to fit this model."))) } } if (is.element(measure, c("PLO","IRLN")) && method == "ML") { if (!requireNamespace(con$package, quietly=TRUE)) stop(mstyle$stop(paste0("Please install the '", con$package, "' package to fit this model."))) } if (measure == "OR" && model == "CM.EL") { if (is.element(con$optimizer, c("uobyqa", "newuoa", "bobyqa"))) { if (!requireNamespace("minqa", quietly=TRUE)) stop(mstyle$stop("Please install the 'minqa' package to fit this model.")) minqa <- get(con$optimizer, envir=loadNamespace("minqa")) con$optimizer <- "minqa" } if (con$optimizer == "optim" || con$optimizer == "nlminb" || con$optimizer == "minqa") { if (!requireNamespace("numDeriv", quietly=TRUE)) stop(mstyle$stop("Please install the 'numDeriv' package to fit this model.")) if (con$dnchgcalc == "dFNCHypergeo") { if (!requireNamespace("BiasedUrn", quietly=TRUE)) stop(mstyle$stop("Please install the 'BiasedUrn' package to fit this model.")) } } if (con$optimizer == "clogit") { if (!requireNamespace("survival", quietly=TRUE)) stop(mstyle$stop("Please install the 'survival' package to fit this model.")) coxph <- survival::coxph Surv <- survival::Surv } if (con$optimizer == "clogistic") { if (!requireNamespace("Epi", quietly=TRUE)) stop(mstyle$stop("Please install the 'Epi' package to fit this model.")) } } ### check whether model matrix is of full rank if (any(eigen(crossprod(X), symmetric=TRUE, only.values=TRUE)$values <= con$evtol)) stop(mstyle$stop("Model matrix not of full rank. Cannot fit model.")) ######################################################################### ######################################################################### ######################################################################### se.tau2 <- I2 <- H2 <- QE <- QEp <- NA level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ###### model fitting, test statistics, and confidence intervals ### upgrade warnings to errors (for some testing) #o.warn <- getOption("warn") #on.exit(options(warn = o.warn), add=TRUE) #options(warn = 2) ### rescale X matrix (only for models with moderators and models including an intercept term) if (!int.only && int.incl && con$scaleX) { Xsave <- X meanX <- colMeans(X[, 2:p, drop=FALSE]) sdX <- apply(X[, 2:p, drop=FALSE], 2, sd) ### consider using colSds() from matrixStats package is.d <- apply(X, 2, .is.dummy) ### is each column a dummy variable (i.e., only 0s and 1s)? X[,!is.d] <- apply(X[, !is.d, drop=FALSE], 2, scale) ### rescale the non-dummy variables } ######################################################################### ######################################################################### ######################################################################### ### two group outcomes (odds ratios and incidence rate ratios) if (is.element(measure, c("OR","IRR"))) { ###################################################################### if (is.element(model, c("UM.FS","UM.RS"))) { ### prepare grp-level data for the unconditional models if (measure == "OR") { ### xi mi study group1 group2 group12 offset intrcpt mod1 dat.grp <- cbind(xi=c(rbind(ai,ci)), mi=c(rbind(bi,di))) ### grp-level outcome data ai bi i 1 0 +1/2 NULL 1 x1i ### ci di i 0 1 -1/2 NULL 0 0 if (is.null(ddd$family)) { dat.fam <- binomial } else { dat.fam <- ddd$family } dat.off <- NULL } if (measure == "IRR") { ### xi ti study group1 group2 group12 offset intrcpt mod1 dat.grp <- cbind(xi=c(rbind(x1i,x2i))) ### grp-level outcome data x1i t1i i 1 0 +1/2 t1i 1 x1i ### log(ti) for offset x2i t2i i 0 1 -1/2 t2i 0 0 if (is.null(ddd$family)) { dat.fam <- poisson } else { dat.fam <- ddd$family } dat.off <- log(c(rbind(t1i,t2i))) } group1 <- rep(c(1,0), times=k) ### group dummy for 1st group (ai,bi for group 1) group2 <- rep(c(0,1), times=k) ### group dummy for 2nd group (ci,di for group 2) (not really needed) group12 <- rep(c(1/2,-1/2), times=k) ### group dummy with +- 1/2 coding study <- factor(rep(seq_len(k), each=2)) ### study factor const <- cbind(rep(1,2*k)) ### intercept for random study effects model X.fit <- X[rep(seq(k), each=2),,drop=FALSE] ### duplicate each row in X (drop=FALSE, so column names are preserved) X.fit <- cbind(group1*X.fit[,,drop=FALSE]) ### then multiply by group1 dummy (intercept, if included, becomes the group1 dummy) row.names(X.fit) <- seq_len(2*k) if (.isTRUE(ddd$retdat)) return(list(dat.grp=dat.grp, X.fit=X.fit, study=study, dat.off = if (!is.null(dat.off)) dat.off else NULL, const=const, group1=group1, group2=group2, group12=group12, dat.fam=dat.fam)) ################################################################### #################################################### ### unconditional model with fixed study effects ### #################################################### if (model == "UM.FS") { ### fit FE model if (verbose) message(mstyle$message("Fitting FE model ...")) if (k > 1) { res.FE <- try(glm(dat.grp ~ -1 + X.fit + study, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) } else { res.FE <- try(glm(dat.grp ~ -1 + X.fit + const, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) } if (inherits(res.FE, "try-error")) stop(mstyle$stop("Cannot fit FE model.")) ### log-likelihood #ll.FE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, predict(res.FE, type="response"), log=TRUE))) ### model has a NULL offset #ll.FE <- with(data.frame(dat.grp), sum(dpois(xi, predict(res.FE, type="response"), log=TRUE))) ### offset already incorporated into predict() ll.FE <- c(logLik(res.FE)) ### same as above ### fit saturated FE model (= QE model) if (verbose) message(mstyle$message("Fitting saturated model ...")) if (k > 1) { X.QE <- model.matrix(~ -1 + X.fit + study + study:group1) res.QE <- try(glm(dat.grp ~ -1 + X.QE, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) } else { res.QE <- res.FE } if (inherits(res.QE, "try-error")) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } else { QEconv <- TRUE ### log-likelihood #ll.QE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, xi/(xi+mi), log=TRUE))) ### model has a NULL offset #ll.QE <- with(data.frame(dat.grp), sum(dpois(xi, xi, log=TRUE))) ### offset not relevant for saturated model ll.QE <- c(logLik(res.QE)) ### same as above ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity b2.QE <- cbind(na.omit(coef(res.QE)[-seq_len(k+p)])) ### coef() still includes aliased coefficients as NAs, so have to filter them out vb2.QE <- vcov(res.QE, complete=FALSE)[-seq_len(k+p),-seq_len(k+p),drop=FALSE] ### aliased coefficients are removed by vcov() when complete=FALSE } if (method == "ML") { ### fit ML model ### notes: 1) not recommended alternative: using group1 instead of group12 for the random effect (since that forces the variance in group 2 to be lower) if (verbose) message(mstyle$message("Fitting ML model ...")) if (con$package == "lme4") { if (verbose) { res.ML <- try(lme4::glmer(dat.grp ~ -1 + X.fit + study + (group12 - 1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) } else { res.ML <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + study + (group12 - 1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { if (measure == "OR") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study, group12=group12) res.ML <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.fit + study, random = ~ group12 - 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } else { dat.mm <- data.frame(xi=dat.grp, study=study, group12=group12) res.ML <- try(GLMMadaptive::mixed_model(xi ~ -1 + X.fit + study + offset(dat.off), random = ~ group12 - 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } } #return(res.ML) if (inherits(res.ML, "try-error")) stop(mstyle$stop("Cannot fit ML model.")) ### log-likelihood #ll.ML <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, fitted(res.ML), log=TRUE))) ### not correct (since it does not incorporate the random effects; same as ll.FE if tau^2=0) #ll.ML <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, plogis(qlogis(fitted(res.ML)) + group12*unlist(ranef(res.ML))), log=TRUE))) ### not correct (since one really has to integrate; same as ll.FE if tau^2=0) #ll.ML <- c(logLik(res.ML)) ### this is not the same as ll.FE when tau^2 = 0 (not sure why) if (con$package == "lme4") { ll.ML <- ll.QE - 1/2 * deviance(res.ML) ### this makes ll.ML comparable to ll.FE (same as ll.FE when tau^2=0) } else { ### FIXME: When using GLMMadaptive, ll is not comparable for FE model when tau^2 = 0 ll.ML <- c(logLik(res.ML)) } } #return(list(res.FE, res.QE, ll.FE=ll.FE, ll.QE=ll.QE)) #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #res.FE <- res[[1]]; res.QE <- res[[2]]; res.ML <- res[[3]] if (is.element(method, c("FE","EE","CE"))) { beta <- cbind(coef(res.FE)[seq_len(p)]) vb <- vcov(res.FE)[seq_len(p),seq_len(p),drop=FALSE] tau2 <- 0 sigma2 <- NA parms <- p + k p.eff <- p + k k.eff <- 2*k } if (method == "ML") { if (con$package == "lme4") { beta <- cbind(lme4::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- lme4::VarCorr(res.ML)[[1]][1] } if (con$package == "GLMMadaptive") { beta <- cbind(GLMMadaptive::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- res.ML$D[1,1] } sigma2 <- NA parms <- p + k + 1 p.eff <- p + k k.eff <- 2*k } #return(list(beta=beta, vb=vb, tau2=tau2, sigma2=sigma2, parms=parms, p.eff=p.eff, k.eff=k.eff, b2.QE=b2.QE, vb2.QE=vb2.QE)) } ################################################################### ##################################################### ### unconditional model with random study effects ### ##################################################### if (model == "UM.RS") { ### fit FE model if (verbose) message(mstyle$message("Fitting FE model ...")) if (con$package == "lme4") { if (verbose) { res.FE <- try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) } else { res.FE <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { if (measure == "OR") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study, const=const) res.FE <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.fit + const, random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } else { dat.mm <- data.frame(xi=dat.grp, study=study, const=const) res.FE <- try(GLMMadaptive::mixed_model(xi ~ -1 + X.fit + const + offset(dat.off), random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } } if (inherits(res.FE, "try-error")) stop(mstyle$stop("Cannot fit FE model.")) ### log-likelihood ll.FE <- c(logLik(res.FE)) ### fit saturated FE model (= QE model) ### notes: 1) must figure out which terms are aliased in saturated model and remove those terms before fitting ### 2) sigma^2 for the study random effect must be close to the one from the FE model - so set start value to sigma from that model if (verbose) message(mstyle$message("Fitting saturated model ...")) if (k > 1) { X.QE <- model.matrix(~ -1 + X.fit + const + study:group1) res.QE <- try(glm(dat.grp ~ -1 + X.QE, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) X.QE <- X.QE[,!is.na(coef(res.QE)),drop=FALSE] if (con$package == "lme4") { if (verbose) { res.QE <- try(lme4::glmer(dat.grp ~ -1 + X.QE + (1 | study), offset=dat.off, family=dat.fam, start=c(sqrt(lme4::VarCorr(res.FE)[[1]][1])), nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) } else { res.QE <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.QE + (1 | study), offset=dat.off, family=dat.fam, start=c(sqrt(lme4::VarCorr(res.FE)[[1]][1])), nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { mmCtrl$max_coef_value <- 50 if (measure == "OR") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study) res.QE <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.QE, random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl, initial_values=list(D=matrix(res.FE$D[1,1]))), silent=!verbose) } else { dat.mm <- data.frame(xi=dat.grp, study=study) res.QE <- try(GLMMadaptive::mixed_model(xi ~ -1 + X.QE + offset(dat.off), random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } } } else { res.QE <- res.FE } if (inherits(res.QE, "try-error")) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } else { QEconv <- TRUE ### log-likelihood ll.QE <- c(logLik(res.QE)) ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity if (con$package == "lme4") { b2.QE <- cbind(lme4::fixef(res.QE)[-seq_len(p+1)]) ### aliased coefficients are already removed vb2.QE <- as.matrix(vcov(res.QE))[-seq_len(p+1),-seq_len(p+1),drop=FALSE] ### aliased coefficients are already removed } if (con$package == "GLMMadaptive") { b2.QE <- cbind(GLMMadaptive::fixef(res.QE)[-seq_len(p+1)]) ### aliased coefficients are already removed vb2.QE <- as.matrix(vcov(res.QE))[-seq_len(p+1),-seq_len(p+1),drop=FALSE] ### aliased coefficients are already removed vb2.QE <- vb2.QE[-nrow(vb2.QE), -ncol(vb2.QE)] } } if (method == "ML") { ### fit ML model ### notes: 1) not recommended alternative: using group1 instead of group12 for the random effect (since that forces the variance in group 2 to be lower) ### 2) this approach is okay if we also allow group1 random effect and intercepts to correlate (in fact, this is identical to the bivariate model) ### 3) start=c(sqrt(lme4::VarCorr(res.FE)[[1]][1])) has no effect, since the start value for tau^2 is not specified (and using 0 is probably not ideal for that) if (verbose) message(mstyle$message("Fitting ML model ...")) if (con$package == "lme4") { if (verbose) { res.ML <- try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (1 | study) + (group12 - 1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) #res.ML <- try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (group1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) #return(res.ML) } else { res.ML <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (1 | study) + (group12 - 1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) # this is identical to: #res.ML <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + const + (1 + group12 || study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { if (measure == "OR") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study, const=const, group12=group12) res.ML <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.fit + const, random = ~ 1 + group12 || study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } else { dat.mm <- data.frame(xi=dat.grp, study=study, const=const, group12=group12) res.ML <- try(GLMMadaptive::mixed_model(xi ~ -1 + X.fit + const + offset(dat.off), random = ~ 1 + group12 || study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } } if (inherits(res.ML, "try-error")) stop(mstyle$stop("Cannot fit ML model.")) ### log-likelihood ll.ML <- c(logLik(res.ML)) } #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #res.FE <- res[[1]]; res.QE <- res[[2]]; res.ML <- res[[3]] if (is.element(method, c("FE","EE","CE"))) { if (con$package == "lme4") { beta <- cbind(lme4::fixef(res.FE)[seq_len(p)]) vb <- as.matrix(vcov(res.FE))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- 0 sigma2 <- lme4::VarCorr(res.FE)[[1]][1] } if (con$package == "GLMMadaptive") { beta <- cbind(GLMMadaptive::fixef(res.FE)[seq_len(p)]) vb <- as.matrix(vcov(res.FE))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- 0 sigma2 <- res.FE$D[1,1] } parms <- p + 1 + 1 p.eff <- p + 1 k.eff <- 2*k } if (method == "ML") { if (con$package == "lme4") { beta <- cbind(lme4::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- lme4::VarCorr(res.ML)[[2]][1] sigma2 <- lme4::VarCorr(res.ML)[[1]][1] } if (con$package == "GLMMadaptive") { beta <- cbind(GLMMadaptive::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- res.ML$D[2,2] sigma2 <- res.ML$D[1,1] } parms <- p + 1 + 2 p.eff <- p + 1 k.eff <- 2*k } #return(list(beta=beta, vb=vb, tau2=tau2, sigma2=sigma2, parms=parms, p.eff=p.eff, k.eff=k.eff, b2.QE=b2.QE, vb2.QE=vb2.QE)) } ################################################################### } ###################################################################### if ((measure=="IRR" && model == "CM.EL") || (measure=="OR" && model=="CM.AL") || (measure=="OR" && model=="CM.EL")) { ### prepare data for the conditional models if (measure == "OR") { dat.grp <- cbind(xi=ai, mi=ci) ### conditional outcome data (number of cases in group 1 conditional on total number of cases) dat.off <- log((ai+bi)/(ci+di)) ### log(n1i/n2i) for offset } if (measure == "IRR") { dat.grp <- cbind(xi=x1i, mi=x2i) ### conditional outcome data (number of events in group 1 conditional on total number of events) dat.off <- log(t1i/t2i) ### log(t1i/t1i) for offset } study <- factor(seq_len(k)) ### study factor X.fit <- X if (.isTRUE(ddd$retdat)) return(list(dat.grp=dat.grp, X.fit=X.fit, study=study, dat.off = if (!is.null(dat.off)) dat.off else NULL)) ################################################################### ############################################################### ### conditional model (approx. ll for ORs / exact for IRRs) ### ############################################################### ### fit FE model if (verbose) message(mstyle$message("Fitting FE model ...")) res.FE <- try(glm(dat.grp ~ -1 + X.fit, offset=dat.off, family=binomial, control=glmCtrl), silent=!verbose) if (inherits(res.FE, "try-error")) stop(mstyle$stop("Cannot fit FE model.")) ### log-likelihood #ll.FE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, predict(res.FE, type="response"), log=TRUE))) ### offset already incorporated into predict() #ll.FE <- with(data.frame(dat.grp), sum(dpois(xi, predict(res.FE, type="response"), log=TRUE))) ### offset already incorporated into predict() ll.FE <- c(logLik(res.FE)) ### same as above ### fit saturated FE model (= QE model) if (verbose) message(mstyle$message("Fitting saturated model ...")) if (k > 1) { X.QE <- model.matrix(~ -1 + X.fit + study) res.QE <- try(glm(dat.grp ~ -1 + X.QE, offset=dat.off, family=binomial, control=glmCtrl), silent=!verbose) } else { res.QE <- res.FE } if (inherits(res.QE, "try-error")) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } else { QEconv <- TRUE ### log-likelihood #ll.QE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, xi/(xi+mi), log=TRUE))) ### offset not relevant for saturated model #ll.QE <- with(data.frame(dat.grp), sum(dpois(xi, xi, log=TRUE))) ### offset not relevant for saturated model ll.QE <- c(logLik(res.QE)) ### same as above ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity b2.QE <- cbind(na.omit(coef(res.QE)[-seq_len(p)])) ### coef() still includes aliased coefficients as NAs, so have to filter them out vb2.QE <- vcov(res.QE, complete=FALSE)[-seq_len(p),-seq_len(p),drop=FALSE] ### aliased coefficients are removed by vcov() when complete=FALSE } #return(list(res.FE, res.QE, ll.FE, ll.QE)) #res.FE <- res[[1]]; res.QE <- res[[2]] if (method == "ML") { ### fit ML model ### notes: 1) suppressMessages to suppress the 'one random effect per observation' warning if (verbose) message(mstyle$message("Fitting ML model ...")) if (con$package == "lme4") { if (verbose) { res.ML <- try(lme4::glmer(dat.grp ~ -1 + X.fit + (1 | study), offset=dat.off, family=binomial, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) } else { res.ML <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + (1 | study), offset=dat.off, family=binomial, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study) res.ML <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.fit + offset(dat.off), random = ~ 1 | study, data=dat.mm, family=binomial, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } if (inherits(res.ML, "try-error")) stop(mstyle$stop("Cannot fit ML model.")) ### log-likelihood if (con$package == "lme4") { ll.ML <- ll.QE - 1/2 * deviance(res.ML) ### this makes ll.ML comparable to ll.FE (same as ll.FE when tau^2=0) } else { ### FIXME: When using GLMMadaptive, ll is not comparable for FE model when tau^2 = 0 ll.ML <- c(logLik(res.ML)) } } #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #res.FE <- res[[1]]; res.QE <- res[[2]]; res.ML <- res[[3]] if (is.element(method, c("FE","EE","CE"))) { beta <- cbind(coef(res.FE)[seq_len(p)]) vb <- vcov(res.FE)[seq_len(p),seq_len(p),drop=FALSE] tau2 <- 0 sigma2 <- NA parms <- p p.eff <- p k.eff <- k } if (method == "ML") { if (con$package == "lme4") { beta <- cbind(lme4::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- lme4::VarCorr(res.ML)[[1]][1] } if (con$package == "GLMMadaptive") { beta <- cbind(GLMMadaptive::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- res.ML$D[1,1] } sigma2 <- NA parms <- p + 1 p.eff <- p k.eff <- k } #return(list(beta=beta, vb=vb, tau2=tau2, sigma2=sigma2, parms=parms, p.eff=p.eff, k.eff=k.eff, b2.QE=b2.QE, vb2.QE=vb2.QE)) ################################################################### } if (measure=="OR" && model=="CM.EL") { #################################################### ### conditional model (exact likelihood for ORs) ### #################################################### if (verbose) message(mstyle$message("Fitting FE model ...")) if (con$optimizer == "optim" || con$optimizer == "nlminb" || con$optimizer == "minqa") { ### fit FE model ### notes: 1) this routine uses direct optimization over the non-central hypergeometric distribution ### 2) start values from CM.AL model and 0 for tau^2 (held at 0 during the optimization since random=FALSE) ### 3) no integration for FE model, so intCtrl is not relevant ### 4) results can be sensitive to the scaling of moderators if (con$optimizer == "optim") { res.FE <- try(optim(par=c(coef(res.FE)[seq_len(p)], 0), .dnchg, method=con$optmethod, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "nlminb") { res.FE <- try(nlminb(start=c(coef(res.FE)[seq_len(p)], 0), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "minqa") { res.FE <- try(minqa(par=c(coef(res.FE)[seq_len(p)], 0), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "optim" || con$optimizer == "nlminb") { if (inherits(res.FE, "try-error") || res.FE$convergence != 0) stop(mstyle$stop("Cannot fit FE model.")) } if (con$optimizer == "minqa") { if (inherits(res.FE, "try-error") || res.FE$ierr != 0) stop(mstyle$stop("Cannot fit FE model.")) } if (verbose > 1) message(mstyle$message("Computing Hessian ...")) h.FE <- numDeriv::hessian(.dnchg, x=res.FE$par, method.args=hessianCtrl, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec) #return(list(res.FE, h.FE)) ### log-likelihood if (con$optimizer == "optim") ll.FE <- -1 * res.FE$value if (con$optimizer == "nlminb") ll.FE <- -1 * res.FE$objective if (con$optimizer == "minqa") ll.FE <- -1 * res.FE$fval ### fit saturated FE model (= QE model) ### notes: 1) must figure out which terms are aliased in saturated model and remove those terms before fitting ### 2) start values from CM.AL model and 0 for tau^2 (held at 0 during the optimization since random=FALSE) ### 3) therefore only try to fit saturated model if this was possible with CM.AL ### 4) no integration for FE model, so intCtrl is not relevant if (QEconv) { if (verbose) message(mstyle$message("Fitting saturated model ...")) if (k > 1) { is.aliased <- is.na(coef(res.QE)) X.QE <- X.QE[,!is.aliased,drop=FALSE] ### res.QE is from CM.AL model if (con$optimizer == "optim") { res.QE <- try(optim(par=c(coef(res.QE)[!is.aliased], 0), .dnchg, method=con$optmethod, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.QE, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "nlminb") { res.QE <- try(nlminb(start=c(coef(res.QE)[!is.aliased], 0), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.QE, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "minqa") { res.QE <- try(minqa(par=c(coef(res.QE)[!is.aliased], 0), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.QE, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, control=optCtrl), silent=!verbose) } if (con$optimizer == "optim" || con$optimizer == "nlminb") { if (inherits(res.QE, "try-error") || res.QE$convergence != 0) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } } if (con$optimizer == "minqa") { if (inherits(res.QE, "try-error") || res.QE$ierr != 0) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } } if (QEconv) { if (verbose > 1) message(mstyle$message("Computing Hessian ...")) h.QE <- numDeriv::hessian(.dnchg, x=res.QE$par, method.args=hessianCtrl, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.QE, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec) } } else { res.QE <- res.FE h.QE <- h.FE } #return(list(res.QE, h.QE)) } if (QEconv) { ### log-likelihood if (con$optimizer == "optim") ll.QE <- -1 * res.QE$value if (con$optimizer == "nlminb") ll.QE <- -1 * res.QE$objective if (con$optimizer == "minqa") ll.QE <- -1 * res.QE$fval ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity #return(res.QE) b2.QE <- res.QE$par ### recall: aliased coefficients are already removed hessian <- h.QE ### take hessian from hessian() (again, aliased coefs are already removed) #hessian <- res.QE$hessian ### take hessian from optim() (again, aliased coefs are already removed) p.QE <- length(b2.QE) ### how many parameters are left in saturated model? b2.QE <- b2.QE[-p.QE] ### remove last element (for tau^2, constrained to 0) hessian <- hessian[-p.QE,-p.QE,drop=FALSE] ### remove last row/column (for tau^2, constrained to 0) p.QE <- length(b2.QE) ### how many parameters are now left? is.0 <- colSums(hessian == 0L) == p.QE ### any columns in hessian entirely composed of 0s? b2.QE <- b2.QE[!is.0] ### keep coefficients where this is not the case hessian <- hessian[!is.0,!is.0,drop=FALSE] ### keep parts of hessian where this is not the case b2.QE <- cbind(b2.QE[-seq_len(p)]) ### remove first p coefficients h.A <- hessian[seq_len(p),seq_len(p),drop=FALSE] ### upper left part of hessian h.B <- hessian[seq_len(p),-seq_len(p),drop=FALSE] ### upper right part of hessian h.C <- hessian[-seq_len(p),seq_len(p),drop=FALSE] ### lower left part of hessian h.D <- hessian[-seq_len(p),-seq_len(p),drop=FALSE] ### lower right part of hessian (of which we need the inverse) chol.h.A <- try(chol(h.A), silent=!verbose) ### see if h.A can be inverted with chol() if (inherits(chol.h.A, "try-error")) { warning(mstyle$warning("Cannot invert Hessian for saturated model."), call.=FALSE) QE.Wld <- NA } else { Ivb2.QE <- h.D-h.C%*%chol2inv(chol.h.A)%*%h.B ### inverse of the inverse of the lower right part QE.Wld <- c(t(b2.QE) %*% Ivb2.QE %*% b2.QE) ### Wald statistic (note: this approach only requires taking the inverse of h.A) } ### see: https://en.wikipedia.org/wiki/Invertible_matrix#Blockwise_inversion #vb2.QE <- chol2inv(chol(hessian))[-seq_len(p),-seq_len(p),drop=FALSE] ### take inverse, then take part relevant for QE test #QE.Wld <- c(t(b2.QE) %*% chol2inv(chol(vb2.QE)) %*% b2.QE) } } if (con$optimizer == "clogit" || con$optimizer == "clogistic") { ### fit FE model ### notes: 1) this routine uses either clogit() from the survival package or clogistic() from the Epi package ### 2) the dataset must be in group-level and IPD format (i.e., not in the conditional format) ### 3) if the studies are large, the IPD dataset may also be very large, and R may run out of memory ### 4) for larger datasets, run time is often excessive (and may essentially freeze R) ### 5) suppressMessages for clogit() to suppress the 'beta may be infinite' warning ### prepare IPD dataset ### study event group1 intrcpt moderator ### i 1 1 1 x1i (repeated ai times) event <- unlist(lapply(seq_len(k), function(i) c(rep.int(1,ai[i]), rep.int(0,bi[i]), rep.int(1,ci[i]), rep.int(0,di[i])))) ### event dummy i 0 1 1 x1i (repeated bi times) group1 <- unlist(lapply(seq_len(k), function(i) c(rep.int(1,ai[i]), rep.int(1,bi[i]), rep.int(0,ci[i]), rep.int(0,di[i])))) ### group1 dummy i 1 0 0 0 (repeated ci times) study.l <- factor(rep(seq_len(k), times=ni)) ### study factor i 0 0 0 0 (repeated di times) X.fit.l <- X[rep(seq_len(k), times=ni),,drop=FALSE] ### repeat each row in X ni times each X.fit.l <- cbind(group1*X.fit.l) ### multiply by group1 dummy (including intercept, which becomes the group1 dummy) const <- rep(1,length(event)) #return(data.frame(event, group1, study.l, X.fit.l, const)) ### fit FE model if (k > 1) { if (con$optimizer == "clogit") { args.clogit <- clogitCtrl args.clogit$formula <- event ~ X.fit.l + strata(study.l) res.FE <- try(do.call(survival::clogit, args.clogit), silent=!verbose) } if (con$optimizer == "clogistic") { args.clogistic <- clogisticCtrl args.clogistic$formula <- event ~ X.fit.l args.clogistic$strata <- study.l res.FE <- try(do.call(Epi::clogistic, args.clogistic), silent=!verbose) } } else { stop(mstyle$stop(paste0("Cannot use '", con$optimizer, "' optimizer when k=1."))) } if (inherits(res.FE, "try-error")) stop(mstyle$stop("Cannot fit FE model.")) #return(res.FE) ### fit saturated FE model (= QE model) ### notes: 1) must figure out which terms are aliased in saturated model and remove those terms before fitting ### 2) fixed effects part does not include 'study' factor, since this is incorporated into the strata ### 3) however, for calculating the log likelihood, we need to go back to the conditional data, so we need to reconstruct X.QE (the study.l:group1 coefficients are the study coefficients) if (verbose) message(mstyle$message("Fitting saturated model ...")) X.QE.l <- model.matrix(~ -1 + X.fit.l + study.l:group1) X.QE.l <- X.QE.l[,!is.na(coef(res.QE)),drop=FALSE] X.QE <- X.QE[,!is.na(coef(res.QE)),drop=FALSE] if (con$optimizer == "clogit") { args.clogit <- clogitCtrl args.clogit$formula <- event ~ X.QE.l + strata(study.l) if (verbose) { res.QE <- try(do.call(survival::clogit, args.clogit), silent=!verbose) } else { res.QE <- try(suppressWarnings(do.call(survival::clogit, args.clogit)), silent=!verbose) } } if (con$optimizer == "clogistic") { args.clogistic <- clogisticCtrl args.clogistic$formula <- event ~ X.QE.l args.clogistic$strata <- study.l res.QE <- try(do.call(Epi::clogistic, args.clogistic), silent=!verbose) } if (inherits(res.QE, "try-error")) stop(mstyle$stop("Cannot fit saturated model.")) #return(res.QE) ### log-likelihood ll.FE <- -1 * .dnchg(c(cbind(coef(res.FE)),0), ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec) ll.QE <- -1 * .dnchg(c(cbind(coef(res.QE)),0), ai=ai, bi=bi, ci=ci, di=di, X.fit=X.QE, random=FALSE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec) ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity b2.QE <- cbind(coef(res.QE)[-seq_len(p)]) ### aliased coefficients are already removed vb2.QE <- vcov(res.QE)[-seq_len(p),-seq_len(p),drop=FALSE] ### aliased coefficients are already removed } #return(list(res.FE, res.QE, ll.FE=ll.FE, ll.QE=ll.QE)) #res.FE <- res[[1]]; res.QE <- res[[2]] if (method == "ML") { ### fit ML model ### notes: 1) cannot use clogit() or clogistic() for this (do not allow for the addition of random effects) ### 2) mclogit() from mclogit package may be an alternative (but it only provides PQL method) ### 3) start values from CM.AL model (add .001 to tau^2 estimate, in case estimate of tau^2 is 0) ### 4) optimization involves integration, so intCtrl is relevant ### 5) results can be sensitive to the scaling of moderators if (verbose) message(mstyle$message("Fitting ML model ...")) if (con$optimizer == "optim") { res.ML <- try(optim(par=c(beta, log(tau2+.001)), .dnchg, method=con$optmethod, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=TRUE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, intCtrl=intCtrl, control=optCtrl), silent=!verbose) } if (con$optimizer == "nlminb") { res.ML <- try(nlminb(start=c(beta, log(tau2+.001)), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=TRUE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, intCtrl=intCtrl, control=optCtrl), silent=!verbose) } if (con$optimizer == "minqa") { res.ML <- try(minqa(par=c(beta, log(tau2+.001)), .dnchg, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=TRUE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, intCtrl=intCtrl, control=optCtrl), silent=!verbose) } if (con$optimizer == "optim" || con$optimizer == "nlminb") { if (inherits(res.ML, "try-error") || res.ML$convergence != 0) stop(mstyle$stop("Cannot fit ML model.")) } if (con$optimizer == "minqa") { if (inherits(res.ML, "try-error") || res.ML$ierr != 0) stop(mstyle$stop("Cannot fit ML model.")) } if (verbose > 1) message(mstyle$message("Computing Hessian ...")) h.ML <- numDeriv::hessian(.dnchg, x=res.ML$par, method.args=hessianCtrl, ai=ai, bi=bi, ci=ci, di=di, X.fit=X.fit, random=TRUE, verbose=verbose, digits=digits, dnchgcalc=con$dnchgcalc, dnchgprec=con$dnchgprec, intCtrl=intCtrl) #return(list(res.ML, h.ML)) ### log-likelihood if (con$optimizer == "optim") ll.ML <- -1 * res.ML$value if (con$optimizer == "nlminb") ll.ML <- -1 * res.ML$objective if (con$optimizer == "minqa") ll.ML <- -1 * res.ML$fval } #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #res.FE <- res[[1]]; res.QE <- res[[2]]; res.ML <- res[[3]] if (is.element(method, c("FE","EE","CE"))) { if (con$optimizer == "optim" || con$optimizer == "nlminb" || con$optimizer == "minqa") { beta <- cbind(res.FE$par[seq_len(p)]) chol.h <- try(chol(h.FE[seq_len(p),seq_len(p)]), silent=!verbose) ### see if Hessian can be inverted with chol() if (inherits(chol.h, "try-error")) { warning(mstyle$warning("Choleski factorization of Hessian failed. Trying inversion via QR decomposition."), call.=FALSE) vb <- try(qr.solve(h.FE[seq_len(p),seq_len(p)]), silent=!verbose) ### see if Hessian can be inverted with qr.solve() if (inherits(vb, "try-error")) stop(mstyle$stop("Cannot invert Hessian for ML model.")) } else { vb <- chol2inv(chol.h) } } if (con$optimizer == "clogit" || con$optimizer == "clogistic") { beta <- cbind(coef(res.FE)[seq_len(p)]) vb <- vcov(res.FE)[seq_len(p),seq_len(p),drop=FALSE] } tau2 <- 0 sigma2 <- NA parms <- p p.eff <- p k.eff <- k } if (method == "ML") { beta <- cbind(res.ML$par[seq_len(p)]) chol.h <- try(chol(h.ML), silent=!verbose) ### see if Hessian can be inverted with chol() if (inherits(chol.h, "try-error")) { warning(mstyle$warning("Choleski factorization of Hessian failed. Trying inversion via QR decomposition."), call.=FALSE) vb.f <- try(qr.solve(h.ML), silent=!verbose) ### see if Hessian can be inverted with qr.solve() if (inherits(vb.f, "try-error")) stop(mstyle$stop("Cannot invert Hessian for ML model.")) } else { vb.f <- chol2inv(chol.h) } vb <- vb.f[seq_len(p),seq_len(p),drop=FALSE] tau2 <- exp(res.ML$par[p+1]) sigma2 <- NA parms <- p + 1 p.eff <- p k.eff <- k if (vb.f[p+1,p+1] >= 0) { se.tau2 <- sqrt(vb.f[p+1,p+1]) * tau2 ### delta rule: vb[p+1,p+1] is the variance of log(tau2), so vb[p+1,p+1] * tau2^2 is the variance of exp(log(tau2)) } else { se.tau2 <- NA } } #return(list(beta=beta, vb=vb, tau2=tau2, sigma2=sigma2, parms=parms, p.eff=p.eff, k.eff=k.eff, b2.QE=b2.QE, vb2.QE=vb2.QE)) } } ######################################################################### ######################################################################### ######################################################################### ### one group outcomes (log odds and log transformed rates) if (is.element(measure, c("PLO","IRLN"))) { ### prepare data if (measure == "PLO") { dat.grp <- cbind(xi=xi,mi=mi) if (is.null(ddd$family)) { dat.fam <- binomial } else { dat.fam <- ddd$family } dat.off <- NULL } if (measure == "IRLN") { dat.grp <- xi if (is.null(ddd$family)) { dat.fam <- poisson } else { dat.fam <- ddd$family } dat.off <- log(ti) } study <- factor(seq_len(k)) ### study factor X.fit <- X if (.isTRUE(ddd$retdat)) return(list(dat.grp=dat.grp, X.fit=X.fit, study=study, dat.off = if (!is.null(dat.off)) dat.off else NULL, dat.fam=dat.fam)) ### fit FE model if (verbose) message(mstyle$message("Fitting FE model ...")) res.FE <- try(glm(dat.grp ~ -1 + X.fit, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) if (inherits(res.FE, "try-error")) stop(mstyle$stop("Cannot fit FE model.")) ### log-likelihood #ll.FE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, predict(res.FE, type="response"), log=TRUE))) ### model has a NULL offset #ll.FE <- with(data.frame(dat.grp), sum(dpois(xi, predict(res.FE, type="response"), log=TRUE))) ### offset already incorporated into predict() ll.FE <- c(logLik(res.FE)) ### same as above ### fit saturated FE model (= QE model) ### notes: 1) suppressWarnings() to suppress warning "glm.fit: fitted probabilities numerically 0 or 1 occurred" if (verbose) message(mstyle$message("Fitting saturated model ...")) if (k > 1) { X.QE <- model.matrix(~ -1 + X.fit + study) if (verbose) { res.QE <- try(glm(dat.grp ~ -1 + X.QE, offset=dat.off, family=dat.fam, control=glmCtrl), silent=!verbose) } else { res.QE <- try(suppressWarnings(glm(dat.grp ~ -1 + X.QE, offset=dat.off, family=dat.fam, control=glmCtrl)), silent=!verbose) } } else { res.QE <- res.FE } if (inherits(res.QE, "try-error")) { warning(mstyle$warning("Cannot fit saturated model."), call.=FALSE) QEconv <- FALSE ll.QE <- NA } else { QEconv <- TRUE ### log-likelihood #ll.QE <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, xi/(xi+mi), log=TRUE))) ### model has a NULL offset #ll.QE <- with(data.frame(dat.grp), sum(dpois(xi, xi, log=TRUE))) ### offset not relevant for saturated model ll.QE <- c(logLik(res.QE)) ### same as above ### extract coefficients and variance-covariance matrix for Wald-type test for heterogeneity b2.QE <- cbind(na.omit(coef(res.QE)[-seq_len(p)])) ### coef() still includes aliased coefficients as NAs, so have to filter them out vb2.QE <- vcov(res.QE, complete=FALSE)[-seq_len(p),-seq_len(p),drop=FALSE] ### aliased coefficients are removed by vcov() when complete=FALSE } if (method == "ML") { ### fit ML model ### notes: 1) suppressMessages to suppress the 'one random effect per observation' warning if (verbose) message(mstyle$message("Fitting ML model ...")) if (con$package == "lme4") { if (verbose) { res.ML <- try(lme4::glmer(dat.grp ~ -1 + X.fit + (1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose) } else { res.ML <- suppressMessages(try(lme4::glmer(dat.grp ~ -1 + X.fit + (1 | study), offset=dat.off, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=do.call(lme4::glmerControl, glmerCtrl)), silent=!verbose)) } } if (con$package == "GLMMadaptive") { if (measure == "PLO") { dat.mm <- data.frame(xi=dat.grp[,"xi"], mi=dat.grp[,"mi"], study=study) res.ML <- try(GLMMadaptive::mixed_model(cbind(xi,mi) ~ -1 + X.fit, random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } else { dat.mm <- data.frame(xi=dat.grp, study=study) res.ML <- try(GLMMadaptive::mixed_model(xi ~ -1 + X.fit + offset(dat.off), random = ~ 1 | study, data=dat.mm, family=dat.fam, nAGQ=nAGQ, verbose=verbose, control=mmCtrl), silent=!verbose) } } if (inherits(res.ML, "try-error")) stop(mstyle$stop("Cannot fit ML model.")) #return(res.ML) ### log-likelihood #ll.ML <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, fitted(res.ML), log=TRUE))) ### not correct (since it does not incorporate the random effects; same as ll.FE if tau^2=0) #ll.ML <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, plogis(qlogis(fitted(res.ML)) + group12*unlist(ranef(res.ML))), log=TRUE))) ### not correct (since one really has to integrate; same as ll.FE if tau^2=0) #ll.ML <- with(data.frame(dat.grp), sum(dbinom(xi, xi+mi, plogis(predict(res.ML))))) ### not correct (since one really has to integrate; same as ll.FE if tau^2=0) #ll.ML <- c(logLik(res.ML)) ### this is not the same as ll.FE when tau^2 = 0 (not sure why) if (con$package == "lme4") { ll.ML <- ll.QE - 1/2 * deviance(res.ML) ### this makes ll.ML comparable to ll.FE (same as ll.FE when tau^2=0) } else { ### FIXME: When using GLMMadaptive, ll is not comparable for FE model when tau^2 = 0 ll.ML <- c(logLik(res.ML)) } } #return(list(res.FE, res.QE, res.ML, ll.FE=ll.FE, ll.QE=ll.QE, ll.ML=ll.ML)) #res.FE <- res[[1]]; res.QE <- res[[2]]; res.ML <- res[[3]] if (is.element(method, c("FE","EE","CE"))) { beta <- cbind(coef(res.FE)[seq_len(p)]) vb <- vcov(res.FE)[seq_len(p),seq_len(p),drop=FALSE] tau2 <- 0 sigma2 <- NA parms <- p p.eff <- p k.eff <- k } if (method == "ML") { if (con$package == "lme4") { beta <- cbind(lme4::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- lme4::VarCorr(res.ML)[[1]][1] } if (con$package == "GLMMadaptive") { beta <- cbind(GLMMadaptive::fixef(res.ML)[seq_len(p)]) vb <- as.matrix(vcov(res.ML))[seq_len(p),seq_len(p),drop=FALSE] tau2 <- res.ML$D[1,1] } sigma2 <- NA parms <- p + 1 p.eff <- p k.eff <- k } #return(list(beta=beta, vb=vb, tau2=tau2, sigma2=sigma2, parms=parms, p.eff=p.eff, k.eff=k.eff, b2.QE=b2.QE, vb2.QE=vb2.QE)) } ######################################################################### ######################################################################### ######################################################################### ### heterogeneity tests (Wald-type and likelihood ratio tests of the extra coefficients in the saturated model) if (verbose > 1) message(mstyle$message("Conducting heterogeneity tests ...")) if (QEconv) { ### for OR, CM.EL, & optim/nlminb/minqa, QE.Wld is already calculated, so skip this part then if (measure!="OR" || model!="CM.EL" || !is.element(con$optimizer, c("optim", "nlminb", "minqa"))) { if (nrow(vb2.QE) > 0) { chol.h <- try(chol(vb2.QE), silent=!verbose) ### see if Hessian can be inverted with chol() if (inherits(chol.h, "try-error")) { warning(mstyle$warning("Cannot invert Hessian for saturated model."), call.=FALSE) QE.Wld <- NA } else { QE.Wld <- try(c(t(b2.QE) %*% chol2inv(chol.h) %*% b2.QE), silent=!verbose) if (inherits(QE.Wld, "try-error")) { warning(mstyle$warning("Cannot invert Hessian for saturated model."), call.=FALSE) QE.Wld <- NA } } } else { QE.Wld <- 0 ### if vb2.QE has 0x0 dims, then fitted model is the saturated model and QE.Wld must be 0 } } QE.LRT <- -2 * (ll.FE - ll.QE) QE.Wld[QE.Wld <= 0] <- 0 QE.LRT[QE.LRT <= 0] <- 0 #QE.df <- length(b2.QE) ### removed coefficients are not counted if dfs are determined like this QE.df <- k-p ### this yields always the same dfs regardless of how many coefficients are removed if (QE.df > 0L) { QEp.Wld <- pchisq(QE.Wld, df=QE.df, lower.tail=FALSE) QEp.LRT <- pchisq(QE.LRT, df=QE.df, lower.tail=FALSE) } else { QEp.Wld <- 1 QEp.LRT <- 1 } } else { QE.Wld <- NA QE.LRT <- NA QEp.Wld <- NA QEp.LRT <- NA QE.df <- NA } ### calculation of I^2 and H^2 wi <- 1/vi W <- diag(wi, nrow=k.yi, ncol=k.yi) stXWX <- .invcalc(X=X.yi, W=W, k=k.yi) P <- W - W %*% X.yi %*% stXWX %*% crossprod(X.yi,W) #vt <- (k-1) / (sum(wi) - sum(wi^2)/sum(wi)) ### this only applies to the RE model #vt <- 1/mean(wi) ### harmonic mean of vi's (see Takkouche et al., 1999) vt <- (k.yi-p) / .tr(P) I2 <- 100 * tau2 / (vt + tau2) H2 <- tau2 / vt + 1 ### testing of the fixed effects in the model if (verbose > 1) message(mstyle$message("Conducting tests of the fixed effects ...")) chol.h <- try(chol(vb[btt,btt]), silent=!verbose) ### see if Hessian can be inverted with chol() if (inherits(chol.h, "try-error")) { warning(mstyle$warning("Cannot invert Hessian for QM test."), call.=FALSE) QM <- NA } else { QM <- as.vector(t(beta)[btt] %*% chol2inv(chol.h) %*% beta[btt]) } ### scale back beta and vb if (!int.only && int.incl && con$scaleX) { mX <- rbind(c(intrcpt=1, -1*ifelse(is.d[-1], 0, meanX/sdX)), cbind(0, diag(ifelse(is.d[-1], 1, 1/sdX), nrow=length(is.d)-1, ncol=length(is.d)-1))) beta <- mX %*% beta vb <- mX %*% vb %*% t(mX) X <- Xsave } ### ddf calculation if (test == "t") { ddf <- k-p } else { ddf <- NA } rownames(beta) <- rownames(vb) <- colnames(vb) <- colnames(X) ve <- diag(vb) se <- ifelse(ve >= 0, sqrt(ve), NA) names(se) <- NULL zval <- c(beta/se) if (test == "t") { QM <- QM / m QMdf <- c(m, k-p) QMp <- if (QMdf[2] > 0) pf(QM, df1=QMdf[1], df2=QMdf[2], lower.tail=FALSE) else NA pval <- if (ddf > 0) 2*pt(abs(zval), df=ddf, lower.tail=FALSE) else rep(NA,p) crit <- if (ddf > 0) qt(level/2, df=ddf, lower.tail=FALSE) else rep(NA,p) } else { QMdf <- c(m, NA) QMp <- pchisq(QM, df=QMdf[1], lower.tail=FALSE) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) crit <- qnorm(level/2, lower.tail=FALSE) } ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) #return(list(beta=beta, se=se, zval=zval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, QM=QM, QMp=QMp)) ######################################################################### ###### fit statistics if (verbose > 1) message(mstyle$message("Computing fit statistics and log likelihood ...")) ll.ML <- ifelse(is.element(method, c("FE","EE","CE")), ll.FE, ll.ML) ll.REML <- NA dev.ML <- -2 * (ll.ML - ll.QE) AIC.ML <- -2 * ll.ML + 2*parms BIC.ML <- -2 * ll.ML + parms * log(k.eff) AICc.ML <- -2 * ll.ML + 2*parms * max(k.eff, parms+2) / (max(k.eff, parms+2) - parms - 1) dev.REML <- NA AIC.REML <- NA BIC.REML <- NA AICc.REML <- NA fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML, dev.REML, AIC.REML, BIC.REML, AICc.REML), ncol=2, byrow=FALSE) dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ######################################################################### ###### prepare output if (verbose > 1) message(mstyle$message("Preparing output ...")) weighted <- TRUE if (is.null(ddd$outlist)) { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, se.tau2=se.tau2, sigma2=sigma2, I2=I2, H2=H2, vt=vt, QE.Wld=QE.Wld, QEp.Wld=QEp.Wld, QE.LRT=QE.LRT, QEp.LRT=QEp.LRT, QE.df=QE.df, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.f=k.f, k.yi=k.yi, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, int.incl=int.incl, intercept=intercept, yi=yi, vi=vi, X=X, yi.f=yi.f, vi.f=vi.f, X.f=X.f, ai=ai, bi=bi, ci=ci, di=di, ai.f=ai.f, bi.f=bi.f, ci.f=ci.f, di.f=di.f, x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, x1i.f=x1i.f, x2i.f=x2i.f, t1i.f=t1i.f, t2i.f=t2i.f, xi=xi, mi=mi, ti=ti, xi.f=xi.f, mi.f=mi.f, ti.f=ti.f, ni=ni, ni.f=ni.f, ids=ids, not.na=not.na, subset=subset, not.na.yivi=not.na.yivi, slab=slab, slab.null=slab.null, measure=measure, method=method, model=model, weighted=weighted, test=test, dfs=ddf, ddf=ddf, btt=btt, m=m, digits=digits, level=level, control=control, verbose=verbose, add=add, to=to, drop00=drop00, fit.stats=fit.stats, formula.yi=NULL, formula.mods=formula.mods, version=packageVersion("metafor"), call=mf) } if (!is.null(ddd$outlist)) { if (ddd$outlist == "minimal") { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, se.tau2=se.tau2, sigma2=sigma2, I2=I2, H2=H2, QE.Wld=QE.Wld, QEp.Wld=QEp.Wld, QE.LRT=QE.LRT, QEp.LRT=QEp.LRT, QE.df=QE.df, QEp=QEp, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.eff=k.eff, p=p, p.eff=p.eff, parms=parms, int.only=int.only, measure=measure, method=method, model=model, test=test, dfs=ddf, ddf=ddf, btt=btt, m=m, digits=digits, fit.stats=fit.stats) } else { res <- eval(parse(text=paste0("list(", ddd$outlist, ")"))) } } if (.isTRUE(ddd$retfit)) { res$res.FE <- res.FE res$res.QE <- res.QE if (method == "ML") res$res.ML <- res.ML } time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") class(res) <- c("rma.glmm", "rma") return(res) } metafor/R/vec2mat.r0000644000176200001440000000144713723245713013651 0ustar liggesusersvec2mat <- function(x, diag=FALSE, corr=!diag, dimnames) { mstyle <- .get.mstyle("crayon" %in% .packages()) p <- length(x) dims <- sqrt(2*p + 1/4) + ifelse(diag, -1/2, 1/2) if (abs(dims - round(dims)) >= .Machine$double.eps^0.5) stop(mstyle$stop("Length of 'x' does not correspond to a square matrix.")) dims <- round(dims) R <- matrix(NA, nrow=dims, ncol=dims) if (!missing(dimnames)) { if (length(dimnames) != dims) stop(mstyle$stop(paste0("Length of 'dimnames' (", length(dimnames), ") does not correspond to the dimensions of the matrix (", dims, ")."))) rownames(R) <- colnames(R) <- dimnames } R[lower.tri(R, diag=diag)] <- x R[upper.tri(R, diag=diag)] <- t(R)[upper.tri(R, diag=diag)] if (corr) diag(R) <- 1 return(R) } metafor/R/confint.rma.mh.r0000644000176200001440000000405013770362554015127 0ustar liggesusersconfint.rma.mh <- function(object, parm, level, digits, transf, targs, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.mh") if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) crit <- qnorm(level/2, lower.tail=FALSE) beta <- x$beta ci.lb <- beta - crit * x$se ci.ub <- beta + crit * x$se ### if requested, apply transformation function if (.isTRUE(transf) && is.element(x$measure, c("OR","RR","IRR"))) ### if transf=TRUE, apply exp transformation to ORs, RRs, and IRRs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### res <- cbind(estimate=beta, ci.lb, ci.ub) res <- list(fixed=res) rownames(res$fixed) <- "" res$digits <- digits if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/print.tes.r0000644000176200001440000000477614036766270014252 0ustar liggesusersprint.tes <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="tes") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$section(paste("Test of Excess Significance"))) cat("\n\n") cat(mstyle$text("Observed Number of Significant Findings: ")) cat(mstyle$result(x$O)) cat(mstyle$result(paste0(" (out of ", x$k, ")"))) cat("\n") cat(mstyle$text("Expected Number of Significant Findings: ")) cat(mstyle$result(.fcf(x$E, digits[["est"]]))) cat("\n") cat(mstyle$text("Observed Number / Expected Number: ")) cat(mstyle$result(.fcf(x$OEratio, digits[["est"]]))) cat("\n\n") if (length(x$theta) == 1L) { cat(mstyle$text("Estimated Power of Tests (based on theta = ")) cat(mstyle$result(.fcf(x$theta, digits[["est"]]))) cat(mstyle$text(")")) } else { cat(mstyle$text("Estimated Power of Tests: ")) } cat("\n\n") if (x$k > 5L) { power <- quantile(x$power) names(power) <- c("min", "q1", "median", "q3", "max") } else { power <- x$power names(power) <- seq_len(x$k) } tmp <- capture.output(.print.vector(.fcf(power, digits[["pval"]]))) .print.table(tmp, mstyle) cat("\n") cat(mstyle$text("Test of Excess Significance: ")) cat(mstyle$result(paste0("p ", .pval(x$pval, digits[["pval"]], showeq=TRUE, sep=" ")))) if (x$test == "chi2") { cat(mstyle$result(paste0(" (X^2 = ", .fcf(x$X2, digits[["test"]]), ", df = 1)"))) } if (x$test == "binom") { cat(mstyle$result(" (binomial test)")) } if (x$test == "exact") { cat(mstyle$result(" (exact test)")) } cat("\n") if (!is.null(x$theta.lim)) { cat(mstyle$text(paste0("Limit Estimate (theta_lim): "))) if (is.na(x$theta.lim[1])) { cat(mstyle$result("not estimable")) } else { cat(mstyle$result(.fcf(x$theta.lim[1], digits=digits[["est"]]))) } if (length(x$theta.lim) == 2L) { cat(mstyle$result(", ")) if (is.na(x$theta.lim[2])) { cat(mstyle$result("not estimable")) } else { cat(mstyle$result(.fcf(x$theta.lim[2], digits=digits[["est"]]))) } } if (any(!is.na(x$theta.lim))) cat(mstyle$result(paste0(" (where p = ", ifelse(x$tes.alternative == "two.sided", x$tes.alpha/2, x$tes.alpha), ")"))) cat("\n") } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/reporter.r0000644000176200001440000000006613457322061014142 0ustar liggesusersreporter <- function(x, ...) UseMethod("reporter") metafor/R/leave1out.r0000644000176200001440000000007013457322061014200 0ustar liggesusersleave1out <- function(x, ...) UseMethod("leave1out") metafor/R/print.vif.rma.r0000644000176200001440000000304514043252532014772 0ustar liggesusersprint.vif.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="vif.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") ddd <- list(...) .chkdots(ddd, c("num")) if (is.null(x$gvif)) { if (x$table) { res.table <- data.frame(estimate=.fcf(x$vif$estimate, digits[["est"]]), se=.fcf(x$vif$se, digits[["se"]]), zval=.fcf(x$vif$zval, digits[["test"]]), "pval"=.pval(x$vif$pval, digits[["pval"]]), ci.lb=.fcf(x$vif$ci.lb, digits[["ci"]]), ci.ub=.fcf(x$vif$ci.ub, digits[["ci"]]), vif=.fcf(x$vif$vif, digits[["est"]]), sif=.fcf(x$vif$sif, digits[["est"]]), stringsAsFactors=FALSE) rownames(res.table) <- rownames(x$vif) if (is.element(x$test, c("knha","adhoc","t"))) colnames(res.table)[3] <- "tval" if (.isTRUE(ddd$num)) rownames(res.table) <- paste0(1:nrow(res.table), ") ", rownames(res.table)) tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE, print.gap=2)) .print.table(tmp, mstyle) } else { print(.fcf(x$vif, digits[["est"]]), quote=FALSE, right=TRUE) } } else { cat(mstyle$section(paste0("Collinearity Diagnostics (coefficient", ifelse(x$m == 1, " ", "s "), .format.btt(x$btt),"):\n"))) cat(mstyle$result(paste0("GVIF = ", .fcf(x$gvif, digits[["est"]]), ", GSIF = ", .fcf(x$gsif, digits[["est"]]), "\n"))) } if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/confint.rma.ls.r0000644000176200001440000003027214046727462015146 0ustar liggesusersconfint.rma.ls <- function(object, parm, level, fixed=FALSE, alpha, digits, transf, targs, verbose=FALSE, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.ls") if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object k <- x$k p <- x$p if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL if (missing(control)) control <- list() level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("time", "xlim", "extint")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (!is.null(ddd$xlim)) { if (length(ddd$xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) control$vc.min <- ddd$xlim[1] control$vc.max <- ddd$xlim[2] } ### check if user has specified alpha argument random <- !missing(alpha) if (!fixed && !random) { ### if both 'fixed' and 'random' are FALSE, obtain CIs for alpha parameters cl <- match.call() ### total number of non-fixed components comps <- sum(!x$alpha.fix) if (comps == 0) stop(mstyle$stop("No components for which a CI can be obtained.")) res.all <- list() j <- 0 if (any(!x$alpha.fix)) { for (pos in seq_len(x$alphas)[!x$alpha.fix]) { j <- j + 1 cl.vc <- cl cl.vc$alpha <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for alpha =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } if (length(res.all) == 1L) { return(res.all[[1]]) } else { res.all$digits <- digits class(res.all) <- "list.confint.rma" return(res.all) } } ######################################################################### ######################################################################### ######################################################################### if (random) { type <- "PL" ###################################################################### ### check if model actually contains (at least one) such a component and that it was actually estimated if (!missing(alpha) && all(x$alpha.fix)) stop(mstyle$stop("Model does not contain any estimated 'alpha' components.")) ### check if user specified more than one alpha component if (!missing(alpha) && (length(alpha) > 1L)) stop(mstyle$stop("Can only specify one 'alpha' component.")) ### check if user specified a logical if (!missing(alpha) && is.logical(alpha)) stop(mstyle$stop("Must specify the number for the 'alpha' component.")) ### check if user specified a component that does not exist if (!missing(alpha) && (alpha > x$alphas || alpha <= 0)) stop(mstyle$stop("No such 'alpha' component in the model.")) ### check if user specified a component that was fixed if (!missing(alpha) && x$alpha.fix[alpha]) stop(mstyle$stop("Specified 'alpha' component was fixed.")) ### if everything is good so far, get value of the variance component and set 'comp' alpha.pos <- NA if (!missing(alpha)) { vc <- x$alpha[alpha] comp <- "alpha" alpha.pos <- alpha } #return(list(comp=comp, vc=vc, alpha.pos=alpha.pos)) ###################################################################### ### set control parameters for uniroot() and possibly replace with user-defined values ### set vc.min and vc.max and possibly replace with user-defined values con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE, eptries=10) if (comp == "alpha") { if (is.na(x$se.alpha[alpha])) { con$vc.min <- vc - 10 * abs(vc) con$vc.max <- vc + 10 * abs(vc) } else { con$vc.min <- vc - 10 * qnorm(level/2, lower.tail=FALSE) * x$se.alpha[alpha] con$vc.max <- vc + 10 * qnorm(level/2, lower.tail=FALSE) * x$se.alpha[alpha] } } con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose ###################################################################### vc.lb <- NA vc.ub <- NA ci.null <- FALSE ### logical if CI is a null set lb.conv <- FALSE ### logical if search converged for lower bound (LB) ub.conv <- FALSE ### logical if search converged for upper bound (UB) lb.sign <- "" ### for sign in case LB must be below vc.min ("<") or above vc.max (">") ub.sign <- "" ### for sign in case UB must be below vc.min ("<") or above vc.max (">") ###################################################################### ###################################################################### ###################################################################### ### Profile Likelihood method if (type == "PL") { if (con$vc.min > vc) stop(mstyle$stop("Lower bound of interval to be searched must be <= estimated value of component.")) if (con$vc.max < vc) stop(mstyle$stop("Upper bound of interval to be searched must be >= estimated value of component.")) objective <- qchisq(1-level, df=1) ################################################################### ### search for lower bound ### get diff value when setting component to vc.min; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the lower bound must be below vc.min epdiff <- abs(con$vc.min - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.ls(con$vc.min, obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.lb <- con$vc.min lb.conv <- TRUE lb.sign <- "<" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.ls, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, extendInt="downX", obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.ls, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.lb <- res lb.conv <- TRUE } } break } con$vc.min <- con$vc.min + epdiff } if (verbose) cat("\n") ################################################################### ### search for upper bound ### get diff value when setting component to vc.max; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the upper bound must be above vc.max epdiff <- abs(con$vc.max - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.ls(con$vc.max, obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.ub <- con$vc.max ub.conv <- TRUE ub.sign <- ">" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.ls, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, extendInt="upX", obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.ls, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, alpha.pos=alpha.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.ub <- res ub.conv <- TRUE } } break } con$vc.max <- con$vc.max - epdiff } ################################################################### } ###################################################################### ###################################################################### ###################################################################### if (!lb.conv) warning(mstyle$warning("Cannot obtain lower bound of profile likelihood CI due to convergence problems."), call.=FALSE) if (!ub.conv) warning(mstyle$warning("Cannot obtain upper bound of profile likelihood CI due to convergence problems."), call.=FALSE) ###################################################################### vc <- c(vc, vc.lb, vc.ub) if (comp == "alpha") { res.random <- rbind(vc) if (x$alphas == 1L) { rownames(res.random) <- "alpha" } else { rownames(res.random) <- paste0("alpha.", alpha.pos) } } colnames(res.random) <- c("estimate", "ci.lb", "ci.ub") } ######################################################################### ######################################################################### ######################################################################### if (fixed) { if (x$test == "t") { crit <- qt(level/2, df=x$ddf, lower.tail=FALSE) } else { crit <- qnorm(level/2, lower.tail=FALSE) } beta <- c(x$beta) ci.lb <- c(beta - crit * x$se) ci.ub <- c(beta + crit * x$se) if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] res.fixed <- cbind(estimate=beta, ci.lb=ci.lb, ci.ub=ci.ub) rownames(res.fixed) <- rownames(x$beta) } ######################################################################### ######################################################################### ######################################################################### res <- list() if (fixed) res$fixed <- res.fixed if (random) res$random <- res.random res$digits <- digits if (random) { res$ci.null <- ci.null res$lb.sign <- lb.sign res$ub.sign <- ub.sign #res$vc.min <- con$vc.min } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/print.profile.rma.r0000644000176200001440000000073313770405224015653 0ustar liggesusersprint.profile.rma <- function(x, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="profile.rma") ######################################################################### if (x$comps == 1) { res <- data.frame(x[1], x[2]) print(res) } else { x$comps <- NULL print(lapply(x, function(x) data.frame(x[1], x[2]))) } } metafor/R/trimfill.rma.uni.r0000644000176200001440000001334413770377756015520 0ustar liggesuserstrimfill.rma.uni <- function(x, side, estimator="L0", maxiter=100, verbose=FALSE, ilim, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) if (!x$int.only) stop(mstyle$stop("Trim-and-fill method only applicable for models without moderators.")) if (missing(side)) side <- NULL estimator <- match.arg(estimator, c("L0", "R0", "Q0")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) ######################################################################### yi <- x$yi vi <- x$vi wi <- x$weights ni <- x$ni ### determine side (if none is specified) if (is.null(side)) { res <- suppressWarnings(rma.uni(yi, vi, weights=wi, mods=sqrt(vi), method=x$method, weighted=x$weighted, ...)) ### TODO: add check in case there are problems with fitting the model if (res$beta[2] < 0) { side <- "right" } else { side <- "left" } } else { side <- match.arg(side, c("left", "right")) } ### flip data if examining right side if (side == "right") yi <- -1*yi ### sort data by increasing yi ix <- sort(yi, index.return=TRUE)$ix yi <- yi[ix] vi <- vi[ix] wi <- wi[ix] ni <- ni[ix] ######################################################################### k <- length(yi) k0.sav <- -1 k0 <- 0 ### estimated number of missing studies iter <- 0 ### iteration counter if (verbose) cat("\n") while (abs(k0 - k0.sav) > 0) { k0.sav <- k0 ### save current value of k0 iter <- iter + 1 if (iter > maxiter) stop(mstyle$stop("Trim and fill algorithm did not converge.")) ### truncated data yi.t <- yi[seq_len(k-k0)] vi.t <- vi[seq_len(k-k0)] wi.t <- wi[seq_len(k-k0)] res <- suppressWarnings(rma.uni(yi.t, vi.t, weights=wi.t, method=x$method, weighted=x$weighted, ...)) ### intercept estimate based on truncated data beta <- c(res$beta) yi.c <- yi - beta ### centered values yi.c.r <- rank(abs(yi.c), ties.method="first") ### ranked absolute centered values yi.c.r.s <- sign(yi.c) * yi.c.r ### signed ranked centered values ### estimate the number of missing studies with the R0 estimator if (estimator == "R0") { k0 <- (k - max(-1*yi.c.r.s[yi.c.r.s < 0])) - 1 se.k0 <- sqrt(2*max(0,k0) + 2) } ### estimate the number of missing studies with the L0 estimator if (estimator == "L0") { Sr <- sum(yi.c.r.s[yi.c.r.s > 0]) k0 <- (4*Sr - k*(k+1)) / (2*k - 1) varSr <- 1/24 * (k*(k+1)*(2*k+1) + 10*k0^3 + 27*k0^2 + 17*k0 - 18*k*k0^2 - 18*k*k0 + 6*k^2*k0) se.k0 <- 4*sqrt(varSr) / (2*k - 1) } ### estimate the number of missing studies with the Q0 estimator if (estimator == "Q0") { Sr <- sum(yi.c.r.s[yi.c.r.s > 0]) k0 <- k - 1/2 - sqrt(2*k^2 - 4*Sr + 1/4) varSr <- 1/24 * (k*(k+1)*(2*k+1) + 10*k0^3 + 27*k0^2 + 17*k0 - 18*k*k0^2 - 18*k*k0 + 6*k^2*k0) se.k0 <- 2*sqrt(varSr) / sqrt((k-1/2)^2 - k0*(2*k - k0 -1)) } ### round k0 and make sure that k0 is non-negative k0 <- max(0, round(k0)) se.k0 <- max(0, se.k0) if (verbose) cat(mstyle$verbose(paste0("Iteration: ", formatC(iter, format="f", digits=0, width=nchar(maxiter), flag="-"), " missing = ", formatC(k0, format="f", digits=0, width=nchar(k), flag="-"), " beta = ", formatC(ifelse(side == "right", -1*beta, beta), format="f", digits=x$digits[["est"]]), "\n"))) } ######################################################################### ### if estimated number of missing studies is > 0 if (k0 > 0) { ### flip data back if side is right if (side == "right") { yi.c <- -1 * (yi.c - beta) } else { yi.c <- yi.c - beta } ### create filled-in data set yi.fill <- c(x$yi.f, -1*yi.c[(k-k0+1):k]) ### apply limits if specified if (!missing(ilim)) { ilim <- sort(ilim) if (length(ilim) != 2L) stop(mstyle$stop("Argument 'ilim' must be of length 2.")) yi.fill[yi.fill < ilim[1]] <- ilim[1] yi.fill[yi.fill > ilim[2]] <- ilim[2] } vi.fill <- c(x$vi.f, vi[(k-k0+1):k]) wi.fill <- c(x$weights.f, wi[(k-k0+1):k]) ni.fill <- c(x$ni.f, ni[(k-k0+1):k]) ### add measure attribute to the yi.fill vector attr(yi.fill, "measure") <- x$measure ### fit model with imputed data res <- suppressWarnings(rma.uni(yi.fill, vi.fill, weights=wi.fill, ni=ni.fill, method=x$method, weighted=x$weighted, digits=x$digits, ...)) ### fill, ids, and slab are of length 'k.f + k0' (i.e., subsetted but with NAs) res$fill <- c(rep(FALSE,x$k.f), rep(TRUE,k0)) res$ids <- c(x$ids, (max(x$ids)+1):(max(x$ids)+k0)) if (x$slab.null) { res$slab <- c(paste("Study", x$ids), paste("Filled", seq_len(k0))) } else { res$slab <- c(x$slab, paste("Filled", seq_len(k0))) } res$slab.null <- FALSE } else { ### in case 0 studies are imputed res <- x res$fill <- rep(FALSE,k) } res$k0 <- k0 res$se.k0 <- se.k0 res$side <- side res$k0.est <- estimator res$k.all <- x$k.all + k0 if (estimator == "R0") { m <- -1:(k0-1) res$p.k0 <- 1 - sum(choose(0+m+1, m+1) * 0.5^(0+m+2)) } else { res$p.k0 <- NA } class(res) <- c("rma.uni.trimfill", class(res)) return(res) } metafor/R/plot.rma.glmm.r0000644000176200001440000000036313770373156015000 0ustar liggesusersplot.rma.glmm <- function(x, qqplot=FALSE, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.glmm", notav="rma.glmm") } metafor/R/methods.list.rma.r0000644000176200001440000000675314031643027015501 0ustar liggesusers############################################################################ "[.list.rma" <- function(x, i, ...) { ### removed j argument (see below), so can only select rows, not columns out <- x attr(out, "class") <- NULL slab.pos <- which(names(out) == "slab") if (!missing(i)) ### for X and Z element out[seq_len(slab.pos-1)] <- lapply(out[seq_len(slab.pos-1)], function(r) if (inherits(r, "matrix")) r[i,,drop=FALSE] else r[i]) ### catch cases where user selects values outside 1:k if (length(out[[1]]) == 0L) return(NULL) #out <- out[j] ### this causes all kinds of problems, so left out for now (TODO: check if this is really a problem) out$slab <- x$slab[i] ### slab can only contain NAs if user selects values outside 1:k if (anyNA(out$slab)) return(NULL) out$digits <- x$digits out$transf <- x$transf out$method <- x$method class(out) <- "list.rma" return(out) } ############################################################################ as.data.frame.list.rma <- function(x, ...) { attr(x, "class") <- NULL ### turn all vectors before the slab vector into a data frame slab.pos <- which(names(x) == "slab") out <- x[seq_len(slab.pos-1)] out <- data.frame(out, row.names=x$slab, stringsAsFactors=FALSE) ### in case all values were NA and have been omitted if (nrow(out) == 0L) return(data.frame()) ### if transf exists and is TRUE, set SEs to NULL so that column is omitted from the output if (exists("transf", where=x, inherits=FALSE) && x$transf) out$se <- NULL return(out) } ############################################################################ as.matrix.list.rma <- function(x, ...) { attr(x, "class") <- NULL ### turn all vectors before the slab vector into a matrix slab.pos <- which(names(x) == "slab") out <- x[seq_len(slab.pos-1)] out <- do.call(cbind, out) rownames(out) <- x$slab ### if transf exists and is TRUE, set SEs to NULL so that column is omitted from the output if (exists("transf", where=x, inherits=FALSE) && x$transf) out <- out[,-which(colnames(out) == "se")] return(out) } ############################################################################ ### like utils:::head.data.frame and utils:::tail.data.frame, ### but with nrow(x) replaced by length(x[[1]]) head.list.rma <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) n <- if (n < 0L) { max(length(x[[1]]) + n, 0L) } else { min(n, length(x[[1]])) } x[seq_len(n), , drop = FALSE] } tail.list.rma <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) nrx <- length(x[[1]]) n <- if (n < 0L) { max(nrx + n, 0L) } else { min(n, nrx) } x[seq.int(to = nrx, length.out = n), , drop = FALSE] } ############################################################################ `$<-.list.rma` <- function(x, name, value) { if (name %in% names(x)) { x[[name]] <- value return(x) } else { slab.pos <- which(names(x) == "slab") out <- list() for (i in seq_len(slab.pos-1)) { out[[i]] <- x[[i]] } names(out) <- names(x)[seq_len(slab.pos-1)] out[[name]] <- value for (i in (slab.pos:length(x))) { out[[i+1]] <- x[[i]] } names(out)[(slab.pos+1):(length(x)+1)] <- names(x)[slab.pos:length(x)] class(out) <- class(x) return(out) } } ############################################################################ metafor/R/cooks.distance.rma.uni.r0000644000176200001440000000020213457322061016547 0ustar liggesuserscooks.distance.rma.uni <- function(model, progbar=FALSE, ...) influence(model, progbar=progbar, measure="cooks.distance", ...) metafor/R/qqnorm.rma.mv.r0000644000176200001440000000022313770374357015024 0ustar liggesusersqqnorm.rma.mv <- function(y, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(y), must="rma.mv", notav="rma.mv") } metafor/R/coef.summary.rma.r0000644000176200001440000000251214043270170015457 0ustar liggesuserscoef.summary.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="summary.rma") x <- object if (is.element(x$test, c("knha","adhoc","t"))) { res.table <- data.frame(estimate=x$beta, se=x$se, tval=x$zval, df=x$ddf, pval=x$pval, ci.lb=x$ci.lb, ci.ub=x$ci.ub) } else { res.table <- data.frame(estimate=x$beta, se=x$se, zval=x$zval, pval=x$pval, ci.lb=x$ci.lb, ci.ub=x$ci.ub) } if (inherits(x, "rma.ls")) { res.table <- list(beta=res.table) if (x$test == "t") { res.table$alpha <- data.frame(estimate=x$alpha, se=x$se.alpha, tval=x$zval.alpha, df=x$ddf.alpha, pval=x$pval.alpha, ci.lb=x$ci.lb.alpha, ci.ub=x$ci.ub.alpha) } else { res.table$alpha <- data.frame(estimate=x$alpha, se=x$se.alpha, zval=x$zval.alpha, pval=x$pval.alpha, ci.lb=x$ci.lb.alpha, ci.ub=x$ci.ub.alpha) } } if (inherits(x, "rma.uni.selmodel")) { res.table <- list(beta=res.table) res.table$delta <- data.frame(estimate=x$delta, se=x$se.delta, zval=x$zval.delta, pval=x$pval.delta, ci.lb=x$ci.lb.delta, ci.ub=x$ci.ub.delta) if (length(x$delta) == 1L) { rownames(res.table$delta) <- "delta" } else { rownames(res.table$delta) <- paste0("delta.", 1:length(x$delta)) } } return(res.table) } metafor/R/AIC.rma.r0000644000176200001440000000273414014250066013451 0ustar liggesusersAIC.rma <- function(object, ..., k=2, correct=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(...)) { ### if there is just 'object' if (object$method == "REML") { out <- ifelse(correct, object$fit.stats["AICc","REML"], object$fit.stats["AIC","REML"]) } else { out <- ifelse(correct, object$fit.stats["AICc","ML"], object$fit.stats["AIC","ML"]) } } else { ### if there is 'object' and additional objects via ... if (object$method == "REML") { out <- sapply(list(object, ...), function(x) ifelse(correct, x$fit.stats["AICc","REML"], x$fit.stats["AIC","REML"])) } else { out <- sapply(list(object, ...), function(x) ifelse(correct, x$fit.stats["AICc","ML"], x$fit.stats["AIC","ML"])) } dfs <- sapply(list(object, ...), function(x) x$parms) out <- data.frame(df=dfs, AIC=out) if (correct) names(out)[2] <- "AICc" ### get names of objects; same idea as in stats:::AIC.default cl <- match.call() cl$k <- NULL cl$correct <- NULL rownames(out) <- as.character(cl[-1L]) ### check that all models were fitted to the same data yis <- lapply(list(object, ...), function(x) as.vector(x$yi)) if (!all(sapply(yis[-1], function(x) identical(x, yis[[1]])))) warning(mstyle$warning("Models not all fitted to the same data."), call.=FALSE) } return(out) } metafor/R/regtest.rma.r0000644000176200001440000000605414036331516014535 0ustar liggesusersregtest.rma <- function(x, model="rma", predictor="sei", ret.fit=FALSE, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("robust.rma", "rma.glmm", "rma.mv", "rma.ls", "rma.uni.selmodel")) model <- match.arg(model, c("lm", "rma")) predictor <- match.arg(predictor, c("sei", "vi", "ni", "ninv", "sqrtni", "sqrtninv")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ######################################################################### yi <- x$yi vi <- x$vi ni <- x$ni ### may be NULL p <- x$p if (inherits(x, "rma.mh") || inherits(x, "rma.peto")) { weights <- NULL X <- cbind(rep(1,length(yi))) } else { weights <- x$weights X <- x$X } if (predictor == "sei") X <- cbind(X, sei=sqrt(vi)) if (predictor == "vi") X <- cbind(X, vi=vi) if (is.element(predictor, c("ni", "ninv", "sqrtni", "sqrtninv"))) { if (is.null(ni)) { stop(mstyle$stop("No sample size information stored in model object.")) } else { if (predictor == "ni") X <- cbind(X, ni=ni) if (predictor == "ninv") X <- cbind(X, ninv=1/ni) if (predictor == "sqrtni") X <- cbind(X, ni=sqrt(ni)) if (predictor == "sqrtninv") X <- cbind(X, ni=1/sqrt(ni)) } } ### check if X of full rank (if not, cannot carry out the test) tmp <- lm(yi ~ X - 1) coef.na <- is.na(coef(tmp)) if (any(coef.na)) stop(mstyle$stop("Model matrix no longer of full rank after addition of predictor. Cannot fit model.")) if (model == "rma") { fit <- rma.uni(yi, vi, weights=weights, mods=X, intercept=FALSE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, ...) zval <- fit$zval[p+1] pval <- fit$pval[p+1] ddf <- fit$ddf } else { yi <- c(yi) ### to remove attributes fit <- lm(yi ~ X - 1, weights=1/vi) tmp <- summary(fit) zval <- coef(tmp)[p+1,3] pval <- coef(tmp)[p+1,4] ddf <- x$k - x$p - 1 } ### get the 'limit estimate' if (predictor %in% c("sei", "vi", "ninv", "sqrtninv") && p == 1L && .is.intercept(X[,1])) { if (model=="lm") { est <- coef(tmp)[1,1] ci.lb <- est - qt(x$level/2, df=ddf, lower.tail=FALSE) * coef(tmp)[1,2] ci.ub <- est + qt(x$level/2, df=ddf, lower.tail=FALSE) * coef(tmp)[1,2] } else { est <- coef(fit)[1] ci.lb <- fit$ci.lb[1] ci.ub <- fit$ci.ub[1] } } else { est <- ci.lb <- ci.ub <- NULL } res <- list(model=model, predictor=predictor, zval=zval, pval=pval, dfs=ddf, ddf=ddf, method=x$method, digits=digits, ret.fit=ret.fit, fit=fit, est=est, ci.lb=ci.lb, ci.ub=ci.ub) class(res) <- "regtest" return(res) } metafor/R/coef.rma.r0000644000176200001440000000124713770362363014003 0ustar liggesuserscoef.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") coefs <- c(object$beta) names(coefs) <- rownames(object$beta) if (inherits(object, "rma.ls")) { coefs <- list(beta=coefs) coefs$alpha <- c(object$alpha) names(coefs$alpha) <- rownames(object$alpha) } if (inherits(object, "rma.uni.selmodel")) { coefs <- list(beta=coefs) coefs$delta <- c(object$delta) if (length(object$delta) == 1L) { names(coefs$delta) <- "delta" } else { names(coefs$delta) <- paste0("delta.", 1:length(object$delta)) } } return(coefs) } metafor/R/cumul.r0000644000176200001440000000006013457322061013417 0ustar liggesuserscumul <- function(x, ...) UseMethod("cumul") metafor/R/plot.rma.uni.selmodel.r0000644000176200001440000001305314002613433016422 0ustar liggesusersplot.rma.uni.selmodel <- function(x, xlim, ylim, n=1000, prec="max", scale=FALSE, ci=FALSE, reps=1000, rug=TRUE, add=FALSE, lty=c("solid","dotted"), lwd=c(2,1), ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni.selmodel") ddd <- list(...) lplot <- function(..., seed) plot(...) llines <- function(..., seed) lines(...) lrug <- function(..., seed) rug(...) if (is.logical(ci)) citype <- "boot" if (is.character(ci)) { citype <- ci ci <- TRUE } if (!is.element(citype, c("boot", "wald"))) stop(mstyle$stop("Unknown confidence interval type specified.")) if (missing(xlim)) xlim <- c(x$pval.min, 1-x$pval.min) if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) xlim <- sort(xlim) if (xlim[1] < 0 || xlim[2] > 1) stop(mstyle$stop("Values for 'xlim' should be between 0 and 1.")) if (length(prec) != 1L) stop(mstyle$stop("Argument 'prec' should be of length 1.")) if (is.character(prec)) { if (!is.element(prec, c("min", "max", "mean", "median"))) stop(mstyle$stop("Unknown options specified for the 'prec' argument.")) if (prec == "min") prec <- x$precis[["min"]] if (prec == "max") prec <- x$precis[["max"]] if (prec == "mean") prec <- x$precis[["mean"]] if (prec == "median") prec <- x$precis[["median"]] } else { if (is.numeric(prec) && !x$precspec) prec <- 1 } delta <- x$delta steps <- x$steps ps <- seq(xlim[1], xlim[2], length.out=n) if (x$type == "stepfun") { ps <- unique(sort(c(ps, steps))) # make sure that the 'steps' values are part of 'ps' ps <- ps[ps >= xlim[1]] # but only keep ps >= xlim[1] ps <- ps[ps <= xlim[2]] # ps <= xlim[2] plot.type <- "S" } else { plot.type <- "l" } wi.fun <- x$wi.fun ys <- wi.fun(ps, delta=delta, yi=x$yi, vi=x$vi, preci=prec, alternative=x$alternative, steps=x$steps) if (ci && citype == "boot" && all(is.na(x$vb.delta))) ci <- FALSE if (ci && citype == "wald" && all(is.na(x$ci.lb.delta)) && all(is.na(x$ci.ub.delta))) ci <- FALSE if (ci && citype == "wald" && x$type != "stepfun" && sum(!x$delta.fix) >= 2L) stop(mstyle$stop("Cannot compute Wald-type confidence intervals for this selection model.")) if (ci) { if (citype == "boot") { if (!is.null(ddd$seed)) set.seed(ddd$seed) vb.delta <- x$vb.delta vb.delta.na <- is.na(diag(vb.delta)) vb.delta[vb.delta.na,] <- 0 vb.delta[,vb.delta.na] <- 0 dsim <- .mvrnorm(reps, mu=delta, Sigma=vb.delta) for (j in 1:ncol(dsim)) { dsim[,j] <- ifelse(dsim[,j] < x$delta.min[j], x$delta.min[j], dsim[,j]) dsim[,j] <- ifelse(dsim[,j] > x$delta.max[j], x$delta.max[j], dsim[,j]) } ys.ci <- lapply(ps, function(p) { ysim <- apply(dsim, 1, function(d) wi.fun(p, delta=d, yi=x$yi, vi=x$vi, preci=prec, alternative=x$alternative, steps=x$steps)) quantile(ysim, probs=c(x$level/2, 1 - x$level/2)) }) ys.ci <- do.call(rbind, ys.ci) ys.lb <- ys.ci[,1] ys.ub <- ys.ci[,2] } if (citype == "wald") { ci.lb.delta <- x$ci.lb.delta ci.ub.delta <- x$ci.ub.delta if (x$type == "stepfun") { ci.lb.delta[x$delta.fix] <- delta[x$delta.fix] ci.ub.delta[x$delta.fix] <- delta[x$delta.fix] } ys.lb <- wi.fun(ps, delta=ci.lb.delta, yi=x$yi, vi=x$vi, preci=prec, alternative=x$alternative, steps=x$steps) ys.ub <- wi.fun(ps, delta=ci.ub.delta, yi=x$yi, vi=x$vi, preci=prec, alternative=x$alternative, steps=x$steps) } } else { ys.lb <- NA ys.ub <- NA } if (scale) { #is.inf.pos <- ys == Inf #is.inf.neg <- ys == -Inf ys[is.infinite(ys)] <- NA rng.ys <- max(ys, na.rm=TRUE) - min(ys, na.rm=TRUE) min.ys <- min(ys, na.rm=TRUE) if (rng.ys > .Machine$double.eps^0.5) { ys <- (ys - min.ys) / rng.ys ys.lb <- (ys.lb - min.ys) / rng.ys ys.ub <- (ys.ub - min.ys) / rng.ys } #ys[is.inf.pos] <- 1 #ys[is.inf.neg] <- 0 } ys[ys < 0] <- 0 ys.lb[ys.lb < 0] <- 0 ys.ub[ys.ub < 0] <- 0 if (missing(ylim)) { if (is.element(x$type, c("halfnorm", "negexp", "logistic", "power", "negexppow", "halfnorm2", "negexp2", "logistic2", "power2"))) { ylim <- c(0,1) } else { if (ci) { ylim <- c(min(c(ys.lb[is.finite(ys.lb)], ys[is.finite(ys)]), na.rm=TRUE), max(c(ys.ub[is.finite(ys.ub)], ys[is.finite(ys)]), na.rm=TRUE)) } else { ylim <- range(ys[is.finite(ys)], na.rm=TRUE) } } } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' should be a vector of length 2.")) ylim <- sort(ylim) } if (add) { llines(ps, ys, type=plot.type, lty=lty[1], lwd=lwd[1], ...) } else { lplot(ps, ys, ylim=ylim, type=plot.type, lwd=lwd, xlab="p-value", ylab="Relative Likelihood of Selection", ...) } if (ci) { llines(ps, ys.lb, type=plot.type, lty=lty[2], lwd=lwd[2], ...) llines(ps, ys.ub, type=plot.type, lty=lty[2], lwd=lwd[2], ...) } if (rug && !add) lrug(x$pvals, quiet=TRUE) sav <- data.frame(xs=ps, ys=ys, ys.lb=ys.lb, ys.ub=ys.ub) invisible(sav) } metafor/R/print.hc.rma.uni.r0000644000176200001440000000203013770373536015401 0ustar liggesusersprint.hc.rma.uni <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="hc.rma.uni") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) res.table <- data.frame(method = c(x$method.rma, x$method), tau2 = .fcf(c(x$tau2.rma, x$tau2), digits[["var"]]), estimate = .fcf(c(x$beta.rma, x$beta), digits[["est"]]), se = .fcf(c(x$se.rma, x$se), digits[["se"]]), ci.lb = .fcf(c(x$ci.lb.rma, x$ci.lb), digits[["ci"]]), ci.ub = .fcf(c(x$ci.ub.rma, x$ci.ub), digits[["ci"]]), stringsAsFactors=FALSE) if (is.na(res.table$se[1])) res.table$se <- NULL rownames(res.table) <- c("rma", "hc") if (!exists(".rmspace")) cat("\n") tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (!exists(".rmspace")) cat("\n") invisible(res.table) } metafor/R/gosh.r0000644000176200001440000000005613457322061013237 0ustar liggesusersgosh <- function(x, ...) UseMethod("gosh") metafor/R/logLik.rma.r0000644000176200001440000000110313770372440014274 0ustar liggesuserslogLik.rma <- function(object, REML, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(REML)) { if (object$method == "REML") { REML <- TRUE } else { REML <- FALSE } } if (REML) { val <- object$fit.stats["ll","REML"] } else { val <- object$fit.stats["ll","ML"] } attr(val, "nall") <- object$k.eff attr(val, "nobs") <- object$k.eff - ifelse(REML, object$p.eff, 0) attr(val, "df") <- object$parms class(val) <- "logLik" return(val) } metafor/R/to.wide.r0000644000176200001440000001656014031057777013667 0ustar liggesusersto.wide <- function(data, study, grp, ref, grpvars, postfix=c(".1",".2"), addid=TRUE, addcomp=TRUE, adddesign=TRUE, minlen=2, var.names=c("id","comp","design")) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (!is.data.frame(data)) data <- data.frame(data) ### get variable names varnames <- names(data) ### number of variables nvars <- length(varnames) ### checks on 'var.names' argument if (length(var.names) != 3L) stop(mstyle$stop("Argument 'var.names' must of length 3.")) if (class(var.names) != "character") stop(mstyle$stop("Argument 'var.names' must of vector with character strings.")) if (any(var.names != make.names(var.names, unique=TRUE))) { var.names <- make.names(var.names, unique=TRUE) warning(mstyle$warning(paste0("Argument 'var.names' does not contain syntactically valid variable names.\n Variable names adjusted to: var.names = c('", var.names[1], "', '", var.names[2], "', '", var.names[3], "')."))) } ############################################################################ ### checks on 'study' argument if (length(study) != 1L) stop(mstyle$stop("Argument 'study' must of length 1.")) if (!(is.character(study) | is.numeric(study))) stop(mstyle$stop("Argument 'study' must either be a character string or a scalar.")) if (is.character(study)) { study.pos <- charmatch(study, varnames) if (is.na(study.pos)) stop(mstyle$stop("Argument 'study' must be the name of a variable in the data frame.")) if (study.pos == 0L) stop(mstyle$stop("No ambiguous match found for variable name specified via 'study' argument.")) } else { study.pos <- round(study) if (study.pos < 1 | study.pos > nvars) stop(mstyle$stop("Specified position of 'study' variable does not exist in the data frame.")) } ### get study variable study <- data[[study.pos]] ### make sure there are no missing values in study variable if (anyNA(study)) stop(mstyle$stop("Variable specified via 'study' argument should not contain missing values.")) ############################################################################ ### checks on 'grp' argument if (length(grp) != 1L) stop(mstyle$stop("Argument 'grp' must of length 1.")) if (!(is.character(grp) || is.numeric(grp))) stop(mstyle$stop("Argument 'grp' must either be a character string or a scalar.")) if (is.character(grp)) { grp.pos <- charmatch(grp, varnames) if (is.na(grp.pos)) stop(mstyle$stop("Argument 'grp' must be the name of a variable in the data frame.")) if (grp.pos == 0L) stop(mstyle$stop("No ambiguous match found for variable name specified via 'grp' argument.")) } else { grp.pos <- round(grp) if (grp.pos < 1 | grp.pos > nvars) stop(mstyle$stop("Specified position of 'grp' variable does not exist in the data frame.")) } ### get grp variable grp <- data[[grp.pos]] ### make sure there are no missing values in group variable if (anyNA(grp)) stop(mstyle$stop("Variable specified via 'grp' argument should not contain missing values.")) ### get levels of the group variable if (is.factor(grp)) { lvls <- levels(grp) } else { lvls <- sort(unique(grp)) } ############################################################################ ### checks on 'ref' argument ### if ref is not specified, use the most common group as the reference group if (missing(ref)) ref <- names(sort(table(grp), decreasing=TRUE)[1]) if (length(ref) != 1L) stop(mstyle$stop("Argument 'ref' must be of length one.")) ref.pos <- charmatch(ref, lvls) if (is.na(ref.pos)) stop(mstyle$stop("Could not find specified reference group in 'grp' variable.")) if (ref.pos == 0L) stop(mstyle$stop("No ambiguous match found for reference group specified via 'ref' argument.")) ############################################################################ ### reorder levels and data so that the reference level is always last lvls <- c(lvls[-ref.pos], lvls[ref.pos]) data <- data[order(study, factor(grp, levels=lvls)),] ### get study and group variables again study <- data[[study.pos]] grp <- data[[grp.pos]] ############################################################################ ### checks on 'grpvars' argument if (!(is.character(grpvars) || is.numeric(grpvars))) stop(mstyle$stop("Argument 'grpvars' must either be a string or numeric vector.")) if (is.character(grpvars)) { grpvars.pos <- unique(charmatch(grpvars, varnames)) if (anyNA(grpvars.pos)) stop(mstyle$stop("Argument 'grpvars' must be the names of variables in the data frame.")) if (any(grpvars.pos == 0L)) stop(mstyle$stop("One or multiple ambiguous matches for variable names specified via 'grpvars' argument.")) } else { grpvars.pos <- unique(round(grpvars)) if (any(grpvars.pos < 1) | any(grpvars.pos > nvars)) stop(mstyle$stop("Specified positions of 'grpvars' variables do not exist in the data frame.")) } ### in case the group variable is not specified as part of the group variables, add it if (!(grp.pos %in% grpvars.pos)) grpvars.pos <- c(grp.pos, grpvars.pos) ### and make sure that grp.pos is always in the first position of grpvars.pos grpvars.pos <- unique(c(grp.pos, grpvars.pos)) ############################################################################ ### restructure data set into wide format restruct <- function(x) { if (nrow(x) > 1L) { cbind(x[-nrow(x),], x[rep(nrow(x),nrow(x)-1),grpvars.pos]) } else { # to handle one-arm studies unname(c(x, rep(NA, length(grpvars.pos)))) } } dat <- lapply(split(data, study), restruct) dat <- do.call(rbind, dat) ### add postfix to outcome variable names names(dat)[grpvars.pos] <- paste0(names(dat)[grpvars.pos], postfix[1]) names(dat)[(nvars+1):ncol(dat)] <- paste0(names(dat)[(nvars+1):ncol(dat)], postfix[2]) ### fix row names rownames(dat) <- seq_len(nrow(dat)) ############################################################################ ### generate comp variable grps <- .shorten(as.character(data[[grp.pos]]), minlen=minlen) restruct <- function(x) { if (length(x) > 1L) { paste0(x[-length(x)], "-", x[length(x)]) } else { NA } } comp <- unlist(sapply(split(grps, study), restruct)) ### generate design variable restruct <- function(x) { if (length(x) > 1L) { rep(paste0(x, collapse="-"), length(x)-1) } else { NA } } design <- unlist(sapply(split(grps, study), restruct)) ############################################################################ ### add row id to dataset if (addid) { dat[[var.names[1]]] <- 1:nrow(dat) ### make sure that row id variable is always the first variable in the dataset #id.pos <- which(names(dat) == "id") #dat <- dat[c(id.pos, seq_along(names(dat))[-id.pos])] } ### add comp variable to dataset if (addcomp) dat[[var.names[2]]] <- comp ### add design variable to dataset if (adddesign) dat[[var.names[3]]] <- design ############################################################################ return(dat) } metafor/R/rstudent.rma.uni.r0000644000176200001440000000021513457322061015514 0ustar liggesusersrstudent.rma.uni <- function(model, digits, progbar=FALSE, ...) influence(model, digits=digits, progbar=progbar, measure="rstudent", ...) metafor/R/confint.rma.mv.r0000644000176200001440000005572114046726233015153 0ustar liggesusersconfint.rma.mv <- function(object, parm, level, fixed=FALSE, sigma2, tau2, rho, gamma2, phi, digits, transf, targs, verbose=FALSE, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.mv") if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object k <- x$k p <- x$p if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL if (missing(control)) control <- list() level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("time", "xlim", "extint")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (!is.null(ddd$xlim)) { if (length(ddd$xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) control$vc.min <- ddd$xlim[1] control$vc.max <- ddd$xlim[2] } ### check if user has specified one of the sigma2, tau2, rho, gamma2, or phi arguments random <- !all(missing(sigma2), missing(tau2), missing(rho), missing(gamma2), missing(phi)) if (!fixed && !random) { ### if both 'fixed' and 'random' are FALSE, obtain CIs for all variance/correlation components cl <- match.call() ### total number of non-fixed components comps <- ifelse(x$withS, sum(!x$vc.fix$sigma2), 0) + ifelse(x$withG, sum(!x$vc.fix$tau2) + sum(!x$vc.fix$rho), 0) + ifelse(x$withH, sum(!x$vc.fix$gamma2) + sum(!x$vc.fix$phi), 0) if (comps == 0) stop(mstyle$stop("No components for which a CI can be obtained.")) res.all <- list() j <- 0 if (x$withS && any(!x$vc.fix$sigma2)) { for (pos in seq_len(x$sigma2s)[!x$vc.fix$sigma2]) { j <- j + 1 cl.vc <- cl cl.vc$sigma2 <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for sigma2 =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } if (x$withG) { if (any(!x$vc.fix$tau2)) { for (pos in seq_len(x$tau2s)[!x$vc.fix$tau2]) { j <- j + 1 cl.vc <- cl cl.vc$tau2 <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for tau2 =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } if (any(!x$vc.fix$rho)) { for (pos in seq_len(x$rhos)[!x$vc.fix$rho]) { j <- j + 1 cl.vc <- cl cl.vc$rho <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for rho =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } } if (x$withH) { if (any(!x$vc.fix$gamma2)) { for (pos in seq_len(x$gamma2s)[!x$vc.fix$gamma2]) { j <- j + 1 cl.vc <- cl cl.vc$gamma2 <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for gamma2 =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } if (any(!x$vc.fix$phi)) { for (pos in seq_len(x$phis)[!x$vc.fix$phi]) { j <- j + 1 cl.vc <- cl cl.vc$phi <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for phi =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } if (length(res.all) == 1L) { return(res.all[[1]]) } else { res.all$digits <- digits class(res.all) <- "list.confint.rma" return(res.all) } } ######################################################################### ######################################################################### ######################################################################### if (random) { type <- "PL" ###################################################################### ### check if user has specified more than one of these arguments if (sum(!missing(sigma2), !missing(tau2), !missing(rho), !missing(gamma2), !missing(phi)) > 1L) stop(mstyle$stop("Must specify only one of the arguments 'sigma2', 'tau2', 'rho', 'gamma2', or 'phi'.")) ### check if model actually contains (at least one) such a component and that it was actually estimated ### note: a component that is not in the model is NA; components that are fixed are TRUE if (!missing(sigma2) && (all(is.na(x$vc.fix$sigma2)) || all(x$vc.fix$sigma2))) stop(mstyle$stop("Model does not contain any (estimated) 'sigma2' components.")) if (!missing(tau2) && (all(is.na(x$vc.fix$tau2)) || all(x$vc.fix$tau2))) stop(mstyle$stop("Model does not contain any (estimated) 'tau2' components.")) if (!missing(rho) && c(all(is.na(x$vc.fix$rho)) || all(x$vc.fix$rho))) stop(mstyle$stop("Model does not contain any (estimated) 'rho' components.")) if (!missing(gamma2) && (all(is.na(x$vc.fix$gamma2)) || all(x$vc.fix$gamma2))) stop(mstyle$stop("Model does not contain any (estimated) 'gamma2' components.")) if (!missing(phi) && c(all(is.na(x$vc.fix$phi)) || all(x$vc.fix$phi))) stop(mstyle$stop("Model does not contain any (estimated) 'phi' components.")) ### check if user specified more than one sigma2, tau2, rho, gamma2, or rho component if (!missing(sigma2) && (length(sigma2) > 1L)) stop(mstyle$stop("Can only specify one 'sigma2' component.")) if (!missing(tau2) && (length(tau2) > 1L)) stop(mstyle$stop("Can only specify one 'tau2' component.")) if (!missing(rho) && (length(rho) > 1L)) stop(mstyle$stop("Can only specify one 'rho' component.")) if (!missing(gamma2) && (length(gamma2) > 1L)) stop(mstyle$stop("Can only specify one 'gamma2' component.")) if (!missing(phi) && (length(phi) > 1L)) stop(mstyle$stop("Can only specify one 'phi' component.")) ### check if user specified a logical if (!missing(sigma2) && is.logical(sigma2)) stop(mstyle$stop("Must specify the number for the 'sigma2' component.")) if (!missing(tau2) && is.logical(tau2)) stop(mstyle$stop("Must specify the number for the 'tau2' component.")) if (!missing(rho) && is.logical(rho)) stop(mstyle$stop("Must specify the number for the 'rho' component.")) if (!missing(gamma2) && is.logical(gamma2)) stop(mstyle$stop("Must specify the number for the 'gamma2' component.")) if (!missing(phi) && is.logical(phi)) stop(mstyle$stop("Must specify the number for the 'phi' component.")) ### check if user specified a component that does not exist if (!missing(sigma2) && (sigma2 > length(x$vc.fix$sigma2) || sigma2 <= 0)) stop(mstyle$stop("No such 'sigma2' component in the model.")) if (!missing(tau2) && (tau2 > length(x$vc.fix$tau2) || tau2 <= 0)) stop(mstyle$stop("No such 'tau2' component in the model.")) if (!missing(rho) && (rho > length(x$vc.fix$rho) || rho <= 0)) stop(mstyle$stop("No such 'rho' component in the model.")) if (!missing(gamma2) && (gamma2 > length(x$vc.fix$gamma2) || gamma2 <= 0)) stop(mstyle$stop("No such 'gamma2' component in the model.")) if (!missing(phi) && (phi > length(x$vc.fix$phi) || phi <= 0)) stop(mstyle$stop("No such 'phi' component in the model.")) ### check if user specified a component that was fixed if (!missing(sigma2) && x$vc.fix$sigma2[sigma2]) stop(mstyle$stop("Specified 'sigma2' component was fixed.")) if (!missing(tau2) && x$vc.fix$tau2[tau2]) stop(mstyle$stop("Specified 'tau2' component was fixed.")) if (!missing(rho) && x$vc.fix$rho[rho]) stop(mstyle$stop("Specified 'rho' component was fixed.")) if (!missing(gamma2) && x$vc.fix$gamma2[gamma2]) stop(mstyle$stop("Specified 'gamma2' component was fixed.")) if (!missing(phi) && x$vc.fix$phi[phi]) stop(mstyle$stop("Specified 'phi' component was fixed.")) ### if everything is good so far, get value of the variance component and set 'comp' sigma2.pos <- NA tau2.pos <- NA rho.pos <- NA gamma2.pos <- NA phi.pos <- NA if (!missing(sigma2)) { vc <- x$sigma2[sigma2] comp <- "sigma2" sigma2.pos <- sigma2 } if (!missing(tau2)) { vc <- x$tau2[tau2] comp <- "tau2" tau2.pos <- tau2 } if (!missing(rho)) { vc <- x$rho[rho] comp <- "rho" rho.pos <- rho } if (!missing(gamma2)) { vc <- x$gamma2[gamma2] comp <- "gamma2" gamma2.pos <- gamma2 } if (!missing(phi)) { vc <- x$phi[phi] comp <- "phi" phi.pos <- phi } #return(list(comp=comp, vc=vc, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos)) ###################################################################### ### set control parameters for uniroot() and possibly replace with user-defined values ### set vc.min and vc.max and possibly replace with user-defined values con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE, eptries=10) if (is.element(comp, c("sigma2", "tau2", "gamma2"))) { con$vc.min <- 0 con$vc.max <- max(ifelse(vc <= .Machine$double.eps^0.5, 10, max(10, vc*100)), con$vc.min) } if (comp == "rho") { if (is.element(x$struct[1], c("CS","HCS"))) con$vc.min <- -1 ### this will fail most of the time but with retries, this may get closer to actual lower bound #con$vc.min <- min(-1/(x$g.nlevels.f[1] - 1), vc) ### this guarantees that cor matrix is semi-positive definite, but since V gets added, this is actually too strict if (is.element(x$struct[1], c("AR","HAR","CAR"))) con$vc.min <- min(0, vc) ### negative autocorrelation parameters not considered (not even sensible for CAR) if (is.element(x$struct[1], c("UN","UNR","GEN"))) con$vc.min <- -1 ### TODO: this will often fail! (but with retries, this should still work) con$vc.max <- 1 if (is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH"))) { con$vc.min <- 0 ### TODO: 0 basically always fails con$vc.max <- max(10, vc*10) } if (is.element(x$struct[1], c("PHYPL","PHYPD"))) { con$vc.min <- 0 con$vc.max <- max(2, vc*2) } } if (comp == "phi") { if (is.element(x$struct[2], c("CS","HCS"))) con$vc.min <- -1 ### this will fail most of the time but with retries, this may get closer to actual lower bound #con$vc.min <- min(-1/(x$h.nlevels.f[1] - 1), vc) ### this guarantees that cor matrix is semi-positive definite, but since V gets added, this is actually too strict if (is.element(x$struct[2], c("AR","HAR","CAR"))) con$vc.min <- min(0, vc) ### negative autocorrelation parameters not considered (not even sensible for CAR) if (is.element(x$struct[2], c("UN","UNR","GEN"))) con$vc.min <- -1 ### TODO: this will often fail! (but with retries, this should still work) con$vc.max <- 1 if (is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH"))) { con$vc.min <- 0 ### TODO: 0 basically always fails con$vc.max <- max(10, vc*10) } if (is.element(x$struct[2], c("PHYPL","PHYPD"))) { con$vc.min <- 0 con$vc.max <- max(2, vc*2) } } con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose ###################################################################### vc.lb <- NA vc.ub <- NA ci.null <- FALSE ### logical if CI is a null set lb.conv <- FALSE ### logical if search converged for lower bound (LB) ub.conv <- FALSE ### logical if search converged for upper bound (UB) lb.sign <- "" ### for sign in case LB must be below vc.min ("<") or above vc.max (">") ub.sign <- "" ### for sign in case UB must be below vc.min ("<") or above vc.max (">") ###################################################################### ###################################################################### ###################################################################### ### Profile Likelihood method if (type == "PL") { if (con$vc.min > vc) stop(mstyle$stop("Lower bound of interval to be searched must be <= estimated value of component.")) if (con$vc.max < vc) stop(mstyle$stop("Upper bound of interval to be searched must be >= estimated value of component.")) objective <- qchisq(1-level, df=1) ################################################################### ### search for lower bound ### get diff value when setting component to vc.min; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the lower bound must be below vc.min epdiff <- abs(con$vc.min - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.mv(con$vc.min, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.lb <- con$vc.min lb.conv <- TRUE if (is.element(comp, c("sigma2", "tau2", "gamma2")) && con$vc.min > 0) lb.sign <- "<" if (is.element(comp, c("rho", "phi")) && con$vc.min > -1) lb.sign <- "<" if (((comp == "rho" && is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) || (comp == "phi" && is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")))) && con$vc.min > 0) lb.sign <- "<" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.mv, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, extendInt="downX", obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.mv, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.lb <- res lb.conv <- TRUE } } break } con$vc.min <- con$vc.min + epdiff } if (verbose) cat("\n") ################################################################### ### search for upper bound ### get diff value when setting component to vc.max; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the upper bound must be above vc.max epdiff <- abs(con$vc.max - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.mv(con$vc.max, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.ub <- con$vc.max ub.conv <- TRUE if (is.element(comp, c("sigma2", "tau2", "gamma2"))) ub.sign <- ">" if (is.element(comp, c("rho", "phi")) && con$vc.max < 1) ub.sign <- ">" if ((comp == "rho" && is.element(x$struct[1], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD"))) || (comp == "phi" && is.element(x$struct[2], c("SPEXP","SPGAU","SPLIN","SPRAT","SPSPH","PHYPL","PHYPD")))) ub.sign <- ">" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.mv, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, extendInt="upX", obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.mv, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, sigma2.pos=sigma2.pos, tau2.pos=tau2.pos, rho.pos=rho.pos, gamma2.pos=gamma2.pos, phi.pos=phi.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.ub <- res ub.conv <- TRUE } } break } con$vc.max <- con$vc.max - epdiff } ################################################################### } ###################################################################### ###################################################################### ###################################################################### if (!lb.conv) warning(mstyle$warning("Cannot obtain lower bound of profile likelihood CI due to convergence problems."), call.=FALSE) if (!ub.conv) warning(mstyle$warning("Cannot obtain upper bound of profile likelihood CI due to convergence problems."), call.=FALSE) ###################################################################### vc <- c(vc, vc.lb, vc.ub) if (is.element(comp, c("sigma2", "tau2", "gamma2"))) { vcsqrt <- sqrt(ifelse(vc >= 0, vc, NA)) res.random <- rbind(vc, vcsqrt) if (comp == "sigma2") { if (length(x$sigma2) == 1L) { rownames(res.random) <- c("sigma^2", "sigma") } else { rownames(res.random) <- paste0(c("sigma^2", "sigma"), ".", sigma2.pos) } } if (comp == "tau2") { if (length(x$tau2) == 1L) { rownames(res.random) <- c("tau^2", "tau") } else { rownames(res.random) <- paste0(c("tau^2", "tau"), ".", tau2.pos) } } if (comp == "gamma2") { if (length(x$gamma2) == 1L) { rownames(res.random) <- c("gamma^2", "gamma") } else { rownames(res.random) <- paste0(c("gamma^2", "gamma"), ".", gamma2.pos) } } } else { res.random <- rbind(vc) if (comp == "rho") { if (length(x$rho) == 1L) { rownames(res.random) <- "rho" } else { rownames(res.random) <- paste0("rho.", rho.pos) } } if (comp == "phi") { if (length(x$phi) == 1L) { rownames(res.random) <- "phi" } else { rownames(res.random) <- paste0("phi.", rho.pos) } } } colnames(res.random) <- c("estimate", "ci.lb", "ci.ub") } ######################################################################### ######################################################################### ######################################################################### if (fixed) { if (x$test == "t") { crit <- sapply(seq_along(x$ddf), function(j) if (x$ddf[j] > 0) qt(level/2, df=x$ddf[j], lower.tail=FALSE) else NA) } else { crit <- qnorm(level/2, lower.tail=FALSE) } beta <- c(x$beta) ci.lb <- c(beta - crit * x$se) ci.ub <- c(beta + crit * x$se) if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] res.fixed <- cbind(estimate=beta, ci.lb=ci.lb, ci.ub=ci.ub) rownames(res.fixed) <- rownames(x$beta) } ######################################################################### ######################################################################### ######################################################################### res <- list() if (fixed) res$fixed <- res.fixed if (random) res$random <- res.random res$digits <- digits if (random) { res$ci.null <- ci.null res$lb.sign <- lb.sign res$ub.sign <- ub.sign #res$vc.min <- con$vc.min } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/leave1out.rma.peto.r0000644000176200001440000000733014027633331015731 0ustar liggesusersleave1out.rma.peto <- function(x, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.peto") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!x$int.only) stop(mstyle$stop("Method only applicable for models without moderators.")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) #tau2 <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next res <- try(suppressWarnings(rma.peto(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, add=x$add, to=x$to, drop00=x$drop00, level=x$level, subset=-i)), silent=TRUE) if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp #tau2[i] <- res$tau2 I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (.isTRUE(transf)) ### if transf=TRUE, apply exp transformation to ORs transf <- exp if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[x$not.na], se=se[x$not.na], zval=zval[x$not.na], pval=pval[x$not.na], ci.lb=ci.lb[x$not.na], ci.ub=ci.ub[x$not.na], Q=QE[x$not.na], Qp=QEp[x$not.na], I2=I2[x$not.na], H2=H2[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, I2=I2, H2=H2) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits out$transf <- transf if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "list.rma" return(out) } metafor/R/forest.cumul.rma.r0000644000176200001440000004551214054724732015516 0ustar liggesusersforest.cumul.rma <- function(x, annotate=TRUE, header=FALSE, xlim, alim, olim, ylim, top=3, at, steps=5, level=x$level, refline=0, digits=2L, width, xlab, ilab, ilab.xpos, ilab.pos, transf, atransf, targs, rows, efac=1, pch=15, psize=1, col, lty, fonts, cex, cex.lab, cex.axis, annosym, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="cumul.rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE transf.char <- deparse(substitute(transf)) atransf.char <- deparse(substitute(atransf)) if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) yi <- x$estimate if (missing(targs)) targs <- NULL if (missing(at)) at <- NULL if (missing(ilab)) ilab <- NULL if (missing(ilab.xpos)) ilab.xpos <- NULL if (missing(ilab.pos)) ilab.pos <- NULL if (missing(col)) col <- NULL if (missing(cex)) cex <- NULL if (missing(cex.lab)) cex.lab <- NULL if (missing(cex.axis)) cex.axis <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ### digits[1] for annotations, digits[2] for x-axis labels ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers if (length(digits) == 1L) digits <- c(digits,digits) ############################################################################ ### set default line types if user has not specified 'lty' argument if (missing(lty)) { lty <- c("solid", "solid") # 1st value = CIs, 2nd value = horizontal line(s) } else { if (length(lty) == 1L) lty <- c(lty, "solid") } ### vertical expansion factor: 1st = CI end lines, 2nd = arrows if (length(efac) == 1L) efac <- rep(efac, 2) ### annotation symbols vector if (missing(annosym)) annosym <- c(" [", ", ", "]", "-") # 4th element for minus sign symbol if (length(annosym) == 3L) annosym <- c(annosym, "-") if (length(annosym) != 4L) stop(mstyle$stop("Argument 'annosym' must be a vector of length 3.")) ### get measure from object measure <- x$measure ### column header estlab <- .setlab(measure, transf.char, atransf.char, gentype=3, short=TRUE) if (is.expression(estlab)) { header.right <- parse(text=paste0("bold(", estlab, " * '", annosym[1], "' * '", 100*(1-level), "% CI'", " * '", annosym[3], "')")) } else { header.right <- paste0(estlab, annosym[1], 100*(1-level), "% CI", annosym[3]) } if (is.logical(header)) { if (header) { header.left <- "Study" } else { header.left <- NULL header.right <- NULL } } else { if (!is.character(header)) stop(mstyle$stop("Argument 'header' must either be a logical or character vector.")) if (length(header) == 1L) { header.left <- header } else { header.left <- header[1] header.right <- header[2] } } if (!annotate) header.right <- NULL ddd <- list(...) if (!is.null(ddd$clim)) olim <- ddd$clim lplot <- function(..., textpos, clim) plot(...) labline <- function(..., textpos, clim) abline(...) lsegments <- function(..., textpos, clim) segments(...) laxis <- function(..., textpos, clim) axis(...) lmtext <- function(..., textpos, clim) mtext(...) lpolygon <- function(..., textpos, clim) polygon(...) ltext <- function(..., textpos, clim) text(...) lpoints <- function(..., textpos, clim) points(...) ######################################################################### ### extract data / results and other arguments vi <- x$se^2 ci.lb <- x$ci.lb ci.ub <- x$ci.ub ### check length of yi and vi k <- length(yi) # either of length k when na.action="na.omit" or k.f otherwise if (length(vi) != k) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) ### note: ilab, pch, psize, col must be of the same length as yi (which may ### or may not contain NAs; this is different than the other forest() ### functions but it would be tricky to make this fully consistent now if (x$slab.null) { slab <- paste("+ Study", x$ids) # cumul() removes the studies with NAs when na.action="na.omit" slab[1] <- paste("Study", x$ids[1]) } else { slab <- paste("+", x$slab) # cumul() removes the studies with NAs when na.action="na.omit" slab[1] <- paste(x$slab[1]) } if (!is.null(ilab)) { if (is.null(dim(ilab))) ilab <- cbind(ilab) if (nrow(ilab) != k) stop(mstyle$stop(paste0("Length of the 'ilab' argument (", nrow(ilab), ") does not correspond to the number of outcomes (", k, ")."))) } if (length(pch) == 1L) pch <- rep(pch, k) if (length(pch) != k) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the number of outcomes (", k, ")."))) if (length(psize) == 1L) psize <- rep(psize, k) if (length(psize) != k) stop(mstyle$stop(paste0("Length of the 'psize' argument (", length(psize), ") does not correspond to the number of outcomes (", k, ")."))) ### if user has set the col argument if (!is.null(col)) { if (length(col) == 1L) col <- rep(col, k) if (length(col) != k) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the number of outcomes (", k, ")."))) } else { col <- rep("black", k) } ### set rows value if (missing(rows)) { rows <- k:1 } else { if (length(rows) == 1L) rows <- rows:(rows-k+1) } if (length(rows) != k) stop(mstyle$stop(paste0("Length of the 'rows' argument (", length(rows), ") does not correspond to the number of outcomes (", k, ")."))) ### reverse order yi <- yi[k:1] vi <- vi[k:1] ci.lb <- ci.lb[k:1] ci.ub <- ci.ub[k:1] slab <- slab[k:1] ilab <- ilab[k:1,,drop=FALSE] # if NULL, remains NULL pch <- pch[k:1] psize <- psize[k:1] # if NULL, remains NULL col <- col[k:1] rows <- rows[k:1] ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) if (any(yivi.na)) { not.na <- !yivi.na if (na.act == "na.omit") { yi <- yi[not.na] vi <- vi[not.na] ci.lb <- ci.lb[not.na] ci.ub <- ci.ub[not.na] slab <- slab[not.na] ilab <- ilab[not.na,,drop=FALSE] # if NULL, remains NULL pch <- pch[not.na] psize <- psize[not.na] # if NULL, remains NULL col <- col[not.na] rows.new <- rows # rearrange rows due to NAs being omitted from plot rows.na <- rows[!not.na] # shift higher rows down according to number of NAs omitted for (j in seq_len(length(rows.na))) { rows.new[rows >= rows.na[j]] <- rows.new[rows >= rows.na[j]] - 1 } rows <- rows.new[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in results.")) } # note: yi/vi may be NA if na.act == "na.exclude" or "na.pass" k <- length(yi) # in case length of k has changed ### if requested, apply transformation to yi's and CI bounds if (is.function(transf)) { if (is.null(targs)) { yi <- sapply(yi, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { yi <- sapply(yi, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) yi[yi < olim[1]] <- olim[1] yi[yi > olim[2]] <- olim[2] ci.lb[ci.lb < olim[1]] <- olim[1] ci.ub[ci.ub > olim[2]] <- olim[2] } ######################################################################### ### total range of CI bounds rng <- max(ci.ub, na.rm=TRUE) - min(ci.lb, na.rm=TRUE) if (annotate) { plot.multp.l <- 1.20 plot.multp.r <- 1.20 } else { plot.multp.l <- 1.20 plot.multp.r <- 0.40 } ### set plot limits if (missing(xlim)) { xlim <- c(min(ci.lb, na.rm=TRUE) - rng * plot.multp.l, max(ci.ub, na.rm=TRUE) + rng * plot.multp.r) xlim <- round(xlim, digits[[2]]) #xlim[1] <- xlim[1]*max(1, digits[[2]]/2) #xlim[2] <- xlim[2]*max(1, digits[[2]]/2) } ### set x-axis limits (at argument overrides alim argument) alim.spec <- TRUE if (missing(alim)) { if (is.null(at)) { alim <- range(pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1)) alim.spec <- FALSE } else { alim <- range(at) } } ### make sure the plot and x-axis limits are sorted alim <- sort(alim) xlim <- sort(xlim) ### plot limits must always encompass the yi values if (xlim[1] > min(yi, na.rm=TRUE)) { xlim[1] <- min(yi, na.rm=TRUE) } if (xlim[2] < max(yi, na.rm=TRUE)) { xlim[2] <- max(yi, na.rm=TRUE) } ### x-axis limits must always encompass the yi values (no longer required) #if (alim[1] > min(yi, na.rm=TRUE)) { alim[1] <- min(yi, na.rm=TRUE) } #if (alim[2] < max(yi, na.rm=TRUE)) { alim[2] <- max(yi, na.rm=TRUE) } ### plot limits must always encompass the x-axis limits if (alim[1] < xlim[1]) { xlim[1] <- alim[1] } if (alim[2] > xlim[2]) { xlim[2] <- alim[2] } ### allow adjustment of position of study labels and annotations via textpos argument if (is.null(ddd$textpos)) ddd$textpos <- c(xlim[1], xlim[2]) if (length(ddd$textpos) != 2L) stop(mstyle$stop("Argument 'textpos' must be of length 2.")) if (is.na(ddd$textpos[1])) ddd$textpos[1] <- xlim[1] if (is.na(ddd$textpos[2])) ddd$textpos[2] <- xlim[2] ### set y-axis limits if (missing(ylim)) { ylim <- c(0.5, max(rows, na.rm=TRUE)+top) } else { ylim <- sort(ylim) } ### generate x-axis positions if none are specified if (is.null(at)) { if (alim.spec) { at <- seq(from=alim[1], to=alim[2], length.out=steps) } else { at <- pretty(x=c(min(ci.lb, na.rm=TRUE), max(ci.ub, na.rm=TRUE)), n=steps-1) } } else { at[at < alim[1]] <- alim[1] # remove at values that are below or above the axis limits at[at > alim[2]] <- alim[2] at <- unique(at) } ### x-axis labels (apply transformation to axis labels if requested) at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } } else { at.lab <- formatC(at.lab, digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])) } ######################################################################### ### set/get fonts (1st for study labels, 2nd for annotations, 3rd for ilab) ### when passing a named vector, the names are for 'family' and the values are for 'font' if (missing(fonts)) { fonts <- rep(par("family"), 3) } else { if (length(fonts) == 1L) fonts <- rep(fonts, 3) if (length(fonts) == 2L) fonts <- c(fonts, fonts[1]) } if (is.null(names(fonts))) fonts <- structure(c(1L,1L,1L), names=fonts) par(family=names(fonts)[1], font=fonts[1]) ### adjust margins par.mar <- par("mar") par.mar.adj <- par.mar - c(0,3,1,1) par.mar.adj[par.mar.adj < 0] <- 0 par(mar = par.mar.adj) on.exit(par(mar = par.mar)) ### start plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab="", ylab="", yaxt="n", xaxt="n", xaxs="i", bty="n", ...) ### horizontal title line labline(h=ylim[2]-(top-1), lty=lty[2], ...) ### get coordinates of the plotting region par.usr <- par("usr") ### add reference line if (is.numeric(refline)) lsegments(refline, par.usr[3], refline, ylim[2]-(top-1), lty="dotted", ...) ### set cex, cex.lab, and cex.axis sizes as a function of the height of the figure height <- par.usr[4] - par.usr[3] if (is.null(cex)) { lheight <- strheight("O") cex.adj <- ifelse(k * lheight > height * 0.8, height/(1.25 * k * lheight), 1) } if (is.null(cex)) { cex <- par("cex") * cex.adj } else { if (is.null(cex.lab)) cex.lab <- cex if (is.null(cex.axis)) cex.axis <- cex } if (is.null(cex.lab)) cex.lab <- par("cex") * cex.adj if (is.null(cex.axis)) cex.axis <- par("cex") * cex.adj ### add x-axis laxis(side=1, at=at, labels=at.lab, cex.axis=cex.axis, ...) ### add x-axis label if (missing(xlab)) xlab <- .setlab(measure, transf.char, atransf.char, gentype=2) lmtext(xlab, side=1, at=min(at) + (max(at)-min(at))/2, line=par("mgp")[1]-0.5, cex=cex.lab, ...) ### add CI ends (either | or <> if outside of axis limits) for (i in seq_len(k)) { ### need to skip missings (if check below will otherwise throw an error) if (is.na(yi[i]) || is.na(vi[i])) next ### if the lower bound is actually larger than upper x-axis limit, then everything is to the right and just draw a polygon pointing in that direction if (ci.lb[i] >= alim[2]) { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) next } ### if the upper bound is actually lower than lower x-axis limit, then everything is to the left and just draw a polygon pointing in that direction if (ci.ub[i] <= alim[1]) { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) next } lsegments(max(ci.lb[i], alim[1]), rows[i], min(ci.ub[i], alim[2]), rows[i], lty=lty[1], col=col[i], ...) if (ci.lb[i] >= alim[1]) { lsegments(ci.lb[i], rows[i]-(height/150)*cex*efac[1], ci.lb[i], rows[i]+(height/150)*cex*efac[1], col=col[i], ...) } else { lpolygon(x=c(alim[1], alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]+(1.4/100)*cex*(xlim[2]-xlim[1]), alim[1]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) } if (ci.ub[i] <= alim[2]) { lsegments(ci.ub[i], rows[i]-(height/150)*cex*efac[1], ci.ub[i], rows[i]+(height/150)*cex*efac[1], col=col[i], ...) } else { lpolygon(x=c(alim[2], alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]-(1.4/100)*cex*(xlim[2]-xlim[1]), alim[2]), y=c(rows[i], rows[i]+(height/150)*cex*efac[2], rows[i]-(height/150)*cex*efac[2], rows[i]), col=col[i], border=col[i], ...) } } ### add study labels on the left ltext(ddd$textpos[1], rows, slab, pos=4, cex=cex, col=col, ...) ### add info labels if (!is.null(ilab)) { if (is.null(ilab.xpos)) stop(mstyle$stop("Must specify 'ilab.xpos' argument when adding information with 'ilab'.")) if (length(ilab.xpos) != ncol(ilab)) stop(mstyle$stop(paste0("Number of 'ilab' columns (", ncol(ilab), ") does not match length of 'ilab.xpos' argument (", length(ilab.xpos), ")."))) if (!is.null(ilab.pos) && length(ilab.pos) == 1L) ilab.pos <- rep(ilab.pos, ncol(ilab)) par(family=names(fonts)[3], font=fonts[3]) for (l in seq_len(ncol(ilab))) { ltext(ilab.xpos[l], rows, ilab[,l], pos=ilab.pos[l], cex=cex, ...) } par(family=names(fonts)[1], font=fonts[1]) } ### add study annotations on the right: yi [LB, UB] if (annotate) { if (is.function(atransf)) { if (is.null(targs)) { annotext <- cbind(sapply(yi, atransf), sapply(ci.lb, atransf), sapply(ci.ub, atransf)) } else { annotext <- cbind(sapply(yi, atransf, targs), sapply(ci.lb, atransf, targs), sapply(ci.ub, atransf, targs)) } ### make sure order of intervals is always increasing tmp <- .psort(annotext[,2:3]) annotext[,2:3] <- tmp } else { annotext <- cbind(yi, ci.lb, ci.ub) } annotext <- .fcf(annotext, digits[[1]]) annotext <- sub("-", annosym[4], annotext, fixed=TRUE) if (missing(width)) { width <- apply(annotext, 2, function(x) max(nchar(x))) } else { if (length(width) == 1L) width <- rep(width, ncol(annotext)) } for (j in seq_len(ncol(annotext))) { annotext[,j] <- formatC(annotext[,j], width=width[j]) } annotext <- cbind(annotext[,1], annosym[1], annotext[,2], annosym[2], annotext[,3], annosym[3]) annotext <- apply(annotext, 1, paste, collapse="") annotext[grepl("NA", annotext, fixed=TRUE)] <- "" annotext <- sub("-", annosym[4], annotext, fixed=TRUE) par(family=names(fonts)[2], font=fonts[2]) ltext(ddd$textpos[2], rows, labels=annotext, pos=2, cex=cex, col=col, ...) par(family=names(fonts)[1], font=fonts[1]) } ### add yi points for (i in seq_len(k)) { ### need to skip missings (if check below will otherwise throw an error) if (is.na(yi[i])) next if (yi[i] >= alim[1] && yi[i] <= alim[2]) lpoints(x=yi[i], y=rows[i], pch=pch[i], cex=cex*psize[i], col=col[i], ...) } #lpoints(x=yi, y=rows, pch=pch, cex=cex*psize, ...) ### add header ltext(ddd$textpos[1], ylim[2]-(top-1)+1, header.left, pos=4, font=2, cex=cex, ...) ltext(ddd$textpos[2], ylim[2]-(top-1)+1, header.right, pos=2, font=2, cex=cex, ...) ######################################################################### ### return some information about plot invisibly res <- list(xlim=par("usr")[1:2], alim=alim, at=at, ylim=ylim, rows=rows, cex=cex, cex.lab=cex.lab, cex.axis=cex.axis) invisible(res) } metafor/R/profile.rma.uni.selmodel.r0000644000176200001440000002524514046734053017124 0ustar liggesusersprofile.rma.uni.selmodel <- function(fitted, tau2, delta, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(fitted), must="rma.uni.selmodel") if (steps < 2) stop(mstyle$stop("Argument 'steps' must be >= 2.")) x <- fitted if (x$betaspec) ### TODO: consider allowing profiling over beta values as well stop(mstyle$stop("Cannot profile when one or more beta values were fixed.")) parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } ddd <- list(...) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### check if user has not specified tau2 or delta argument if (missing(tau2) && missing(delta)) { mc <- match.call() ### total number of non-fixed components comps <- ifelse(!is.element(x$method, c("FE","EE","CE")) && !x$tau2.fix, 1, 0) + sum(!x$delta.fix) if (comps == 0) stop(mstyle$stop("No components in the model for which a profile likelihood can be constructed.")) if (plot) { if (dev.cur() == 1) { par(mfrow=c(comps, 1)) #on.exit(par(mfrow=c(1,1)), add=TRUE) } } sav <- list() j <- 0 if (!is.element(x$method, c("FE","EE","CE")) && !x$tau2.fix) { j <- j + 1 mc.vc <- mc mc.vc$tau2 <- 1 mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling tau2\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } if (any(!x$delta.fix)) { for (pos in seq_len(x$deltas)[!x$delta.fix]) { j <- j + 1 mc.vc <- mc mc.vc$delta <- pos mc.vc$time <- FALSE #mc.vc$fitted <- quote(x) if (progbar) cat(mstyle$verbose(paste("Profiling delta =", pos, "\n"))) sav[[j]] <- eval(mc.vc, envir=parent.frame()) } } ### if there is just one component, turn the list of lists into a simple list if (comps == 1) sav <- sav[[1]] sav$comps <- comps if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(sav) <- "profile.rma" return(invisible(sav)) } ### check if user has specified more than one of these arguments if (sum(!missing(tau2), !missing(delta)) > 1L) stop(mstyle$stop("Must specify only one of the 'tau2' or 'delta' arguments.")) ### check if model actually contains (at least one) such a component and that it was actually estimated if (!missing(tau2) && (is.element(x$method, c("FE","EE","CE")) || x$tau2.fix)) stop(mstyle$stop("Model does not contain an (estimated) 'tau2' component.")) if (!missing(delta) && all(x$delta.fix)) stop(mstyle$stop("Model does not contain any estimated 'delta' components.")) ### check if user specified more than one tau2 or delta component if (!missing(tau2) && (length(tau2) > 1L)) stop(mstyle$stop("Can only specify one 'tau2' component.")) if (!missing(delta) && (length(delta) > 1L)) stop(mstyle$stop("Can only specify one 'delta' component.")) ### check if user specified a logical if (!missing(tau2) && is.logical(tau2) && isTRUE(tau2)) tau2 <- 1 if (!missing(delta) && is.logical(delta)) stop(mstyle$stop("Must specify the number for the 'delta' component.")) ### check if user specified a component that does not exist if (!missing(tau2) && (tau2 > 1 || tau2 <= 0)) stop(mstyle$stop("No such 'tau2' component in the model.")) if (!missing(delta) && (delta > x$deltas || delta <= 0)) stop(mstyle$stop("No such 'delta' component in the model.")) ### check if user specified a component that was fixed if (!missing(tau2) && x$tau2.fix) stop(mstyle$stop("Specified 'tau2' component was fixed.")) if (!missing(delta) && x$delta.fix[delta]) stop(mstyle$stop("Specified 'delta' component was fixed.")) ### if everything is good so far, get value of the variance component and set 'comp' delta.pos <- NA if (!missing(tau2)) { vc <- x$tau2 comp <- "tau2" tau2.pos <- 1 } if (!missing(delta)) { vc <- x$delta[delta] comp <- "delta" delta.pos <- delta } #return(list(comp=comp, vc=vc)) if (missing(xlim)) { ### if the user has not specified xlim, set it automatically if (comp == "tau2") { if (is.na(x$se.tau2)) { vc.lb <- max(0, vc/4) vc.ub <- min(max(.1, vc*4), x$tau2.max) } else { vc.lb <- max(0, vc - qnorm(.995) * x$se.tau2) vc.ub <- min(max(.1, vc + qnorm(.995) * x$se.tau2), x$tau2.max) } } if (comp == "delta") { if (is.na(x$se.delta[delta])) { vc.lb <- max(0, vc/4, x$delta.min[delta]) vc.ub <- min(max(.1, vc*4), x$delta.max[delta]) } else { vc.lb <- max(0, vc - qnorm(.995) * x$se.delta[delta], x$delta.min[delta]) vc.ub <- min(max(.1, vc + qnorm(.995) * x$se.delta[delta]), x$delta.max[delta]) } } ### if that fails, throw an error if (is.na(vc.lb) || is.na(vc.ub)) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) xlim <- c(vc.lb, vc.ub) } else { if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) xlim <- sort(xlim) if (comp == "tau2") { if (xlim[1] < 0) stop(mstyle$stop("Lower bound for profiling must be >= 0.")) } if (comp == "delta") { if (xlim[1] < x$delta.min[delta]) stop(mstyle$stop(paste0("Lower bound for profiling must be >= ", x$delta.min[delta], "."))) if (xlim[2] > x$delta.max[delta]) stop(mstyle$stop(paste0("Upper bound for profiling must be <= ", x$delta.max[delta], "."))) } } vcs <- seq(xlim[1], xlim[2], length.out=steps) #return(vcs) if (length(vcs) <= 1L) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) if (parallel == "no") res <- pbapply::pblapply(vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE) if (parallel == "multicore") res <- pbapply::pblapply(vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE, cl=ncpus) #res <- parallel::mclapply(vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApplyLB(cl, vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE) } else { res <- pbapply::pblapply(vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE, cl=cl) #res <- parallel::parLapply(cl, vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE) #res <- parallel::clusterApply(cl, vcs, .profile.rma.uni.selmodel, obj=x, comp=comp, delta.pos=delta.pos, parallel=parallel, profile=TRUE) } } lls <- sapply(res, function(x) x$ll) beta <- do.call("rbind", lapply(res, function(x) t(x$beta))) ci.lb <- do.call("rbind", lapply(res, function(x) t(x$ci.lb))) ci.ub <- do.call("rbind", lapply(res, function(x) t(x$ci.ub))) ######################################################################### if (any(lls >= logLik(x) + lltol, na.rm=TRUE)) warning(mstyle$warning("At least one profiled log-likelihood value is larger than the log-likelihood of the fitted model."), call.=FALSE) if (all(is.na(lls))) warning(mstyle$warning("All model fits failed. Cannot draw profile likelihood plot."), call.=FALSE) beta <- data.frame(beta) ci.lb <- data.frame(ci.lb) ci.ub <- data.frame(ci.ub) names(beta) <- rownames(x$beta) names(ci.lb) <- rownames(x$beta) names(ci.ub) <- rownames(x$beta) if (missing(ylim)) { if (any(!is.na(lls))) { if (xlim[1] <= vc && xlim[2] >= vc) { ylim <- range(c(logLik(x),lls), na.rm=TRUE) } else { ylim <- range(lls, na.rm=TRUE) } } else { ylim <- rep(logLik(x), 2) } ylim[1] <- ylim[1] - .1 ylim[2] <- ylim[2] + .1 } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' should be a vector of length 2.")) ylim <- sort(ylim) } if (comp == "tau2") { xlab <- expression(paste(tau^2, " Value")) title <- expression(paste("Profile Plot for ", tau^2)) } if (comp == "delta") { if (x$deltas == 1L) { xlab <- expression(paste(delta, " Value")) title <- expression(paste("Profile Plot for ", delta)) } else { xlab <- bquote(delta[.(delta)] ~ "Value") title <- bquote("Profile Plot for" ~ delta[.(delta)]) } } sav <- list(vc=vcs, ll=lls, beta=beta, ci.lb=ci.lb, ci.ub=ci.ub, comps=1, ylim=ylim, method=x$method, vc=vc, maxll=logLik(x), xlab=xlab, title=title) names(sav)[1] <- switch(comp, tau2="tau2", delta="delta") class(sav) <- "profile.rma" ######################################################################### if (plot) plot(sav, pch=pch, cline=cline, ...) ######################################################################### if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } invisible(sav) } metafor/R/cumul.rma.uni.r0000644000176200001440000001251014046526671015002 0ustar liggesuserscumul.rma.uni <- function(x, order, digits, transf, targs, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!x$int.only) stop(mstyle$stop("Method only applicable for models without moderators.")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL ddd <- list(...) .chkdots(ddd, c("time", "decreasing")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (is.null(ddd$decreasing)) { decreasing <- FALSE } else { decreasing <- ddd$decreasing } ######################################################################### if (grepl("^order\\(", deparse(substitute(order)))) warning(mstyle$warning("Use of order() in 'order' argument is probably erroneous."), call.=FALSE) if (missing(order)) order <- seq_len(x$k.all) if (length(order) != x$k.all) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) ### note: order variable is assumed to be of the same length as the size of the ### original dataset passed to the model fitting function and so we apply ### the same subsetting (if necessary) as was done during model fitting if (!is.null(x$subset)) order <- order[x$subset] order <- order(order, decreasing=decreasing) yi.f <- x$yi.f[order] vi.f <- x$vi.f[order] X.f <- cbind(x$X.f[order,]) weights.f <- x$weights.f[order] not.na <- x$not.na[order] slab <- x$slab[order] ids <- x$ids[order] beta <- rep(NA_real_, x$k.f) se <- rep(NA_real_, x$k.f) zval <- rep(NA_real_, x$k.f) pval <- rep(NA_real_, x$k.f) ci.lb <- rep(NA_real_, x$k.f) ci.ub <- rep(NA_real_, x$k.f) QE <- rep(NA_real_, x$k.f) QEp <- rep(NA_real_, x$k.f) tau2 <- rep(NA_real_, x$k.f) I2 <- rep(NA_real_, x$k.f) H2 <- rep(NA_real_, x$k.f) ### note: skipping NA cases ### also: it is possible that model fitting fails, so that generates more NAs (these NAs will always be shown in output) if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!not.na[i]) next res <- try(suppressWarnings(rma.uni(yi.f, vi.f, weights=weights.f, intercept=TRUE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, subset=seq_len(i))), silent=TRUE) if (inherits(res, "try-error")) next beta[i] <- res$beta se[i] <- res$se zval[i] <- res$zval pval[i] <- res$pval ci.lb[i] <- res$ci.lb ci.ub[i] <- res$ci.ub QE[i] <- res$QE QEp[i] <- res$QEp tau2[i] <- res$tau2 I2[i] <- res$I2 H2[i] <- res$H2 } if (progbar) pbapply::closepb(pbar) ######################################################################### ### if requested, apply transformation function if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) se <- rep(NA,x$k.f) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(estimate=beta[not.na], se=se[not.na], zval=zval[not.na], pvals=pval[not.na], ci.lb=ci.lb[not.na], ci.ub=ci.ub[not.na], Q=QE[not.na], Qp=QEp[not.na], tau2=tau2[not.na], I2=I2[not.na], H2=H2[not.na]) out$slab <- slab[not.na] out$ids <- ids[not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(estimate=beta, se=se, zval=zval, pvals=pval, ci.lb=ci.lb, ci.ub=ci.ub, Q=QE, Qp=QEp, tau2=tau2, I2=I2, H2=H2) out$slab <- slab out$ids <- ids } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) if (is.element(x$test, c("knha","adhoc","t"))) names(out)[3] <- "tval" ### remove tau2 for FE/EE/CE models if (is.element(x$method, c("FE","EE","CE"))) out <- out[-9] out$digits <- digits out$transf <- transf out$slab.null <- x$slab.null out$level <- x$level out$measure <- x$measure out$test <- x$test attr(out$estimate, "measure") <- x$measure if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- c("list.rma", "cumul.rma") return(out) } metafor/R/qqnorm.rma.peto.r0000644000176200001440000000436113770400464015345 0ustar liggesusersqqnorm.rma.peto <- function(y, type="rstandard", pch=19, label=FALSE, offset=0.3, pos=13, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(y), must="rma.peto") x <- y type <- match.arg(type, c("rstandard", "rstudent")) if (x$k == 1) stop(mstyle$stop("Stopped because k = 1.")) if (length(label) != 1L) stop(mstyle$stop("Argument 'label' should be of length 1.")) ######################################################################### if (type == "rstandard") { res <- rstandard(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } else { res <- rstudent(x) not.na <- !is.na(res$z) zi <- res$z[not.na] slab <- res$slab[not.na] ord <- order(zi) slab <- slab[ord] } sav <- qqnorm(zi, pch=pch, bty="l", ...) abline(a=0, b=1, lty="solid", ...) #qqline(zi, ...) #abline(h=0, lty="dotted", ...) #abline(v=0, lty="dotted", ...) ######################################################################### ### labeling of points if ((is.character(label) && label=="none") || .isFALSE(label)) return(invisible(sav)) if ((is.character(label) && label=="all") || .isTRUE(label)) label <- x$k if (is.numeric(label)) { label <- round(label) if (label < 1 | label > x$k) stop(mstyle$stop("Out of range value for 'label' argument.")) pos.x <- sav$x[ord] pos.y <- sav$y[ord] dev <- abs(pos.x - pos.y) for (i in seq_len(x$k)) { if (sum(dev > dev[i]) < label) { if (pos <= 4) text(pos.x[i], pos.y[i], slab[i], pos=pos, offset=offset, ...) if (pos == 13) text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] >= 0, 1, 3), offset=offset, ...) if (pos == 24) text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i]-pos.y[i] <= 0, 2, 4), offset=offset, ...) #text(pos.x[i], pos.y[i], slab[i], pos=ifelse(pos.x[i] >= 0, 2, 4), offset=offset, ...) } } } ######################################################################### invisible(sav) } metafor/R/rstandard.rma.uni.r0000644000176200001440000000577313770377135015656 0ustar liggesusersrstandard.rma.uni <- function(model, digits, type="marginal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.uni", notav=c("robust.rma", "rma.uni.selmodel")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("marginal", "conditional")) x <- model if (type == "conditional" && (!is.null(x$weights) || !x$weighted)) stop(mstyle$stop("Extraction of conditional residuals not available for models with non-standard weights.")) #if (type == "conditional" & inherits(x, "robust.rma")) # stop(mstyle$stop("Extraction of conditional residuals not available for objects of class \"robust.rma\".")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ######################################################################### options(na.action="na.omit") H <- hatvalues(x, type="matrix") options(na.action = na.act) ######################################################################### ImH <- diag(x$k) - H #ei <- ImH %*% cbind(x$yi) if (type == "marginal") { ei <- c(x$yi - x$X %*% x$beta) ei[abs(ei) < 100 * .Machine$double.eps] <- 0 #ei[abs(ei) < 100 * .Machine$double.eps * median(abs(ei), na.rm=TRUE)] <- 0 ### see lm.influence ### don't allow this; the SEs of the residuals cannot be estimated consistently for "robust.rma" objects #if (inherits(x, "robust.rma")) { # ve <- ImH %*% tcrossprod(x$meat,ImH) #} else { #ve <- ImH %*% tcrossprod(x$M,ImH) #} ve <- ImH %*% tcrossprod(x$M,ImH) #ve <- x$M + x$X %*% x$vb %*% t(x$X) - 2*H%*%x$M sei <- sqrt(diag(ve)) } if (type == "conditional") { li <- x$tau2 / (x$tau2 + x$vi) pred <- rep(NA_real_, x$k) for (i in seq_len(x$k)) { Xi <- matrix(x$X[i,], nrow=1) pred[i] <- li[i] * x$yi[i] + (1 - li[i]) * Xi %*% x$beta } ei <- x$yi - pred sei <- sqrt(x$vi^2 * 1/(x$vi + x$tau2) * (1 - diag(H))) } resid <- rep(NA_real_, x$k.f) seresid <- rep(NA_real_, x$k.f) stresid <- rep(NA_real_, x$k.f) resid[x$not.na] <- ei seresid[x$not.na] <- sei stresid[x$not.na] <- ei / sei ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na], se=seresid[x$not.na], z=stresid[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits class(out) <- "list.rma" return(out) } metafor/R/summary.escalc.r0000644000176200001440000002161114032032434015215 0ustar liggesuserssummary.escalc <- function(object, out.names=c("sei","zi","pval","ci.lb","ci.ub"), var.names, H0=0, append=TRUE, replace=TRUE, level=95, olim, digits, transf, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="escalc") x <- object level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) crit <- qnorm(level/2, lower.tail=FALSE) if (length(out.names) != 5L) stop(mstyle$stop("Argument 'out.names' must be of length 5.")) if (any(out.names != make.names(out.names, unique=TRUE))) { out.names <- make.names(out.names, unique=TRUE) warning(mstyle$warning(paste0("Argument 'out.names' does not contain syntactically valid variable names.\n Variable names adjusted to: out.names = c('", out.names[1], "', '", out.names[2], "', '", out.names[3], "', '", out.names[4], "', '", out.names[5], "')."))) } if (missing(transf)) transf <- FALSE ######################################################################### ### figure out names of yi and vi variables (if possible) and extract the values (if possible) if (missing(var.names)) { ### if var.names not specified, take from object if possible if (!is.null(attr(x, "yi.names"))) { ### if yi.names attributes is available yi.name <- attr(x, "yi.names")[1] ### take the first entry to be the yi variable } else { ### if not, see if 'yi' is in the object and assume that is the yi variable if (!is.element("yi", names(x))) stop(mstyle$stop("Cannot determine name of the 'yi' variable.")) yi.name <- "yi" } if (!is.null(attr(x, "vi.names"))) { ### if vi.names attributes is available vi.name <- attr(x, "vi.names")[1] ### take the first entry to be the vi variable } else { ### if not, see if 'vi' is in the object and assume that is the vi variable if (!is.element("vi", names(x))) stop(mstyle$stop("Cannot determine name of the 'vi' variable.")) vi.name <- "vi" } } else { if (length(var.names) != 2L) stop(mstyle$stop("Argument 'var.names' must be of length 2.")) if (any(var.names != make.names(var.names, unique=TRUE))) { var.names <- make.names(var.names, unique=TRUE) warning(mstyle$warning(paste0("Argument 'var.names' does not contain syntactically valid variable names.\n Variable names adjusted to: var.names = c('", var.names[1], "', '", var.names[2], "')."))) } yi.name <- var.names[1] vi.name <- var.names[2] } yi <- x[[yi.name]] vi <- x[[vi.name]] if (is.null(yi) || is.null(vi)) stop(mstyle$stop(paste0("Cannot find variables '", yi.name, "' and/or '", vi.name, "' in the data frame."))) ######################################################################### k <- length(yi) if (length(H0) == 1L) H0 <- rep(H0, k) ### compute sei, zi, and lower/upper CI bounds; when applying a transformation, compute the transformed outcome and CI bounds sei <- sqrt(vi) zi <- c(yi - H0) / sei pval <- 2*pnorm(abs(zi), lower.tail=FALSE) if (is.function(transf)) { ci.lb <- mapply(transf, yi - crit * sei, ...) ci.ub <- mapply(transf, yi + crit * sei, ...) yi <- mapply(transf, yi, ...) attr(x, "transf") <- TRUE vi <- NULL sei <- NULL zi <- NULL pval <- NULL } else { ci.lb <- yi - crit * sei ci.ub <- yi + crit * sei attr(x, "transf") <- FALSE } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) yi[yi < olim[1]] <- olim[1] # note: zi and pval are based on unconstrained yi yi[yi > olim[2]] <- olim[2] ci.lb[ci.lb < olim[1]] <- olim[1] ci.ub[ci.ub > olim[2]] <- olim[2] } x[[yi.name]] <- yi x[[vi.name]] <- vi #return(cbind(yi, vi, sei, zi, ci.lb, ci.ub)) ### put together dataset if (append) { ### if user wants to append dat <- data.frame(x) if (replace) { ### and wants to replace all values dat[[out.names[1]]] <- sei ### if variable does not exists in dat, it will be added dat[[out.names[2]]] <- zi ### if variable does not exists in dat, it will be added dat[[out.names[3]]] <- pval ### if variable does not exists in dat, it will be added dat[[out.names[4]]] <- ci.lb ### if variable does not exists in dat, it will be added dat[[out.names[5]]] <- ci.ub ### if variable does not exists in dat, it will be added } else { ### and only wants to replace any NA values if (is.element(out.names[1], names(dat))) { ### if sei variable is in data frame, replace NA values with newly calculated values is.na.sei <- is.na(dat[[out.names[1]]]) dat[[out.names[1]]][is.na.sei] <- sei[is.na.sei] } else { dat[[out.names[1]]] <- sei ### if sei variable does not exist in dat, just add as new variable } if (is.element(out.names[2], names(dat))) { ### if zi variable is in data frame, replace NA values with newly calculated values is.na.zi <- is.na(dat[[out.names[2]]]) dat[[out.names[2]]][is.na.zi] <- zi[is.na.zi] } else { dat[[out.names[2]]] <- zi ### if zi variable does not exist in dat, just add as new variable } if (is.element(out.names[3], names(dat))) { ### if pval variable is in data frame, replace NA values with newly calculated values is.na.pval <- is.na(dat[[out.names[3]]]) dat[[out.names[3]]][is.na.pval] <- pval[is.na.pval] } else { dat[[out.names[3]]] <- pval ### if pval variable does not exist in dat, just add as new variable } if (is.element(out.names[4], names(dat))) { ### if ci.lb variable is in data frame, replace NA values with newly calculated values is.na.ci.lb <- is.na(dat[[out.names[4]]]) dat[[out.names[4]]][is.na.ci.lb] <- ci.lb[is.na.ci.lb] } else { dat[[out.names[4]]] <- ci.lb ### if ci.lb variable does not exist in dat, just add as new variable } if (is.element(out.names[5], names(dat))) { ### if ci.ub variable is in data frame, replace NA values with newly calculated values is.na.ci.ub <- is.na(dat[[out.names[5]]]) dat[[out.names[5]]][is.na.ci.ub] <- ci.ub[is.na.ci.ub] } else { dat[[out.names[5]]] <- ci.ub ### if ci.ub variable does not exist in dat, just add as new variable } } } else { ### if user does not want to append if (is.function(transf)) { dat <- data.frame(yi, ci.lb, ci.ub) names(dat) <- c(yi.name, out.names[4:5]) } else { dat <- data.frame(yi, vi, sei, zi, pval, ci.lb, ci.ub) names(dat) <- c(yi.name, vi.name, out.names) } } ### update existing digits attribute if digits is specified if (!missing(digits)) { attr(dat, "digits") <- .get.digits(digits=digits, xdigits=attr(x, "digits"), dmiss=FALSE) } else { attr(dat, "digits") <- attr(x, "digits") } if (is.null(attr(dat, "digits"))) ### in case x no longer has a 'digits' attribute attr(dat, "digits") <- 4 ### update existing var.names attribute if var.names is specified ### and make sure all other yi.names and vi.names are added back in if (!missing(var.names)) { attr(dat, "yi.names") <- unique(c(var.names[1], attr(object, "yi.names"))) } else { attr(dat, "yi.names") <- unique(c(yi.name, attr(object, "yi.names"))) } if (!missing(var.names)) { attr(dat, "vi.names") <- unique(c(var.names[2], attr(object, "vi.names"))) } else { attr(dat, "vi.names") <- unique(c(vi.name, attr(object, "vi.names"))) } ### add 'sei.names', 'zi.names', 'pval.names', 'ci.lb.names', and 'ci.ub.names' to the first position of the corresponding attributes ### note: if "xyz" is not an attribute of the object, attr(object, "xyz") returns NULL, so this works fine attr(dat, "sei.names") <- unique(c(out.names[1], attr(object, "sei.names"))) attr(dat, "zi.names") <- unique(c(out.names[2], attr(object, "zi.names"))) attr(dat, "pval.names") <- unique(c(out.names[3], attr(object, "pval.names"))) attr(dat, "ci.lb.names") <- unique(c(out.names[4], attr(object, "ci.lb.names"))) attr(dat, "ci.ub.names") <- unique(c(out.names[5], attr(object, "ci.ub.names"))) ### TODO: clean up attribute elements that are no longer actually part of the object class(dat) <- c("escalc", "data.frame") return(dat) } metafor/R/formula.rma.r0000644000176200001440000000073613770400422014523 0ustar liggesusersformula.rma <- function(x, type="mods", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma") type <- match.arg(type, c("mods", "yi", "scale")) if (type == "scale" && x$model != "rma.ls") stop(mstyle$stop("Can only use type='scale' for location-scale models.")) if (type == "mods") return(x$formula.mods) if (type == "yi") return(x$formula.yi) if (type == "scale") return(x$formula.scale) } metafor/R/nobs.rma.r0000644000176200001440000000035013770372731014022 0ustar liggesusersnobs.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") n.obs <- object$k.eff - ifelse(object$method == "REML", 1, 0) * object$p.eff return(n.obs) } metafor/R/llplot.r0000644000176200001440000002566414030121607013610 0ustar liggesusersllplot <- function(measure, yi, vi, sei, ai, bi, ci, di, n1i, n2i, data, subset, drop00=TRUE, xvals=1000, xlim, ylim, xlab, ylab, scale=TRUE, lty, lwd, col, level=99.99, refline=0, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) ### data setup if (missing(measure)) stop(mstyle$stop("Must specify an effect size or outcome measure via the 'measure' argument.")) .chkclass(class(measure), notap="rma", type="Function") if (!is.element(measure, c("GEN", "OR"))) stop(mstyle$stop("Currently only measure=\"GEN\" or measure=\"OR\" can be specified.")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (measure == "OR" && !requireNamespace("BiasedUrn", quietly=TRUE)) stop(mstyle$stop("Please install the 'BiasedUrn' package to use this function.")) if (missing(xlab)) { if (measure == "GEN") xlab <- "Observed Outcome" if (measure == "OR") xlab <- "Log Odds Ratio" } if (missing(ylab)) { if (scale) { ylab <- "Scaled Likelihood" } else { ylab <- "Likelihood" } } level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ### get ... argument ddd <- list(...) ### set defaults or get onlyo1, addyi, and addvi arguments onlyo1 <- ifelse(is.null(ddd$onlyo1), FALSE, ddd$onlyo1) addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ######################################################################### ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } ### extract values, possibly from the data frame specified via data (arguments not specified are NULL) mf <- match.call() mf.subset <- mf[[match("subset", names(mf))]] mf.lty <- mf[[match("lty", names(mf))]] mf.lwd <- mf[[match("lwd", names(mf))]] mf.col <- mf[[match("col", names(mf))]] subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) lty <- eval(mf.lty, data, enclos=sys.frame(sys.parent())) lwd <- eval(mf.lwd, data, enclos=sys.frame(sys.parent())) col <- eval(mf.col, data, enclos=sys.frame(sys.parent())) if (measure == "GEN") { mf.yi <- mf[[match("yi", names(mf))]] mf.vi <- mf[[match("vi", names(mf))]] mf.sei <- mf[[match("sei", names(mf))]] yi <- eval(mf.yi, data, enclos=sys.frame(sys.parent())) vi <- eval(mf.vi, data, enclos=sys.frame(sys.parent())) sei <- eval(mf.sei, data, enclos=sys.frame(sys.parent())) if (is.null(vi)) { if (is.null(sei)) { stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) } else { vi <- sei^2 } } if (length(yi)==0L || length(vi)==0L) stop(mstyle$stop("Cannot extract outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (length(yi) != length(vi)) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) k <- length(yi) ### number of outcomes before subsetting ### subsetting if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) yi <- yi[subset] vi <- vi[subset] } } if (measure == "OR") { mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci if (length(ai)==0L || length(bi)==0L || length(ci)==0L || length(di)==0L) stop(mstyle$stop("Cannot compute outcomes. Check that all of the required \n information is specified via the appropriate arguments.")) if (!all(length(ai) == c(length(ai),length(bi),length(ci),length(di)))) stop(mstyle$stop("Supplied data vectors are not all of the same length.")) if (any(c(ai > n1i, ci > n2i), na.rm=TRUE)) stop(mstyle$stop("One or more event counts are larger than the corresponding group sizes.")) if (any(c(ai, bi, ci, di) < 0, na.rm=TRUE)) stop(mstyle$stop("One or more counts are negative.")) k <- length(ai) ### number of outcomes before subsetting ### note studies that have at least one zero cell id0 <- c(ai == 0L | bi == 0L | ci == 0L | di == 0L) id0[is.na(id0)] <- FALSE ### note studies that have no events or all events id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ### if drop00=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00) { ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } ### subsetting if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } dat <- escalc(measure="OR", ai=ai, bi=bi, ci=ci, di=di, drop00=drop00, onlyo1=onlyo1, addyi=addyi, addvi=addvi) yi <- dat$yi ### one or more yi/vi pairs may be NA/NA vi <- dat$vi ### one or more yi/vi pairs may be NA/NA } ######################################################################### ### study ids (1:k sequence before subsetting) ids <- seq_len(k) ### setting of lty, lwd, and col arguments (if a single value, repeat k times) ### if any of these arguments is not a single value, it must have the same length as the data before subsetting if (!is.null(lty)) { if (length(lty) == 1L) { lty <- rep(lty, k) } else { if (length(lty) != k) stop(mstyle$stop(paste0("Length of 'lty' argument (", length(lty), ") does not match length of data (", k, ")."))) } } if (!is.null(lwd)) { if (length(lwd) == 1L) { lwd <- rep(lwd, k) } else { if (length(lwd) != k) stop(mstyle$stop(paste0("Length of 'lwd' argument (", length(lwd), ") does not match length of data (", k, ")."))) } } if (!is.null(col)) { if (length(col) == 1L) { col <- rep(col, k) } else { if (length(col) != k) stop(mstyle$stop(paste0("Length of 'col' argument (", length(col), ") does not match length of data (", k, ")."))) } } ### if a subset of studies is specified if (!is.null(subset)) { ids <- ids[subset] lty <- lty[subset] lwd <- lwd[subset] col <- col[subset] id0 <- id0[subset] id00 <- id00[subset] } ### number of outcomes after subsetting k <- length(yi) ### check for NAs and act accordingly if (measure == "GEN") { has.na <- is.na(yi) | is.na(vi) } if (measure == "OR") { has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) } not.na <- !has.na if (any(has.na)) { if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] ids <- ids[not.na] lty <- lty[not.na] lwd <- lwd[not.na] col <- col[not.na] id0 <- id0[not.na] id00 <- id00[not.na] k <- length(ai) warning(mstyle$warning("Studies with NAs omitted from plotting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in studies.")) } ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ######################################################################### ### set default line types (id0 studies = dashed line, id00 studies = dotted line, all others = solid line) if (measure == "GEN") { if (is.null(lty)) lty <- rep("solid", k) } if (measure == "OR") { if (is.null(lty)) lty <- ifelse(id0 | id00, ifelse(id00, "dotted", "dashed"), "solid") } ### set default line widths (4.0 to 0.4 according to the rank of vi) if (is.null(lwd)) lwd <- seq(from=4.0, to=0.4, length.out=k)[rank(vi)] ### set default line color (gray0 to gray60 according to the rank of vi) if (is.null(col)) col <- paste0("gray", round(seq(from=0, to=60, length.out=k))[rank(vi)]) ### set x-axis limits ci.lb <- yi - qnorm(level/2, lower.tail=FALSE) * sqrt(vi) ci.ub <- yi + qnorm(level/2, lower.tail=FALSE) * sqrt(vi) if (missing(xlim)) { xlim <- c(min(ci.lb, na.rm=TRUE),max(ci.ub, na.rm=TRUE)) } else { xlim <- sort(xlim) } xs <- seq(from=xlim[1], to=xlim[2], length.out=xvals) lls <- matrix(NA_real_, nrow=k, ncol=xvals) out <- matrix(TRUE, nrow=k, ncol=xvals) if (measure == "GEN") { for (i in seq_len(k)) { for (j in seq_len(xvals)) { lls[i,j] <- dnorm(yi[i], xs[j], sqrt(vi[i])) if (xs[j] >= ci.lb[i] & xs[j] <= ci.ub[i]) out[i,j] <- FALSE } } } if (measure == "OR") { for (i in seq_len(k)) { for (j in seq_len(xvals)) { lls[i,j] <- .dnchgi(xs[j], ai=ai[i], bi=bi[i], ci=ci[i], di=di[i], random=FALSE, dnchgcalc="dFNCHypergeo", dnchgprec=1e-10) if (xs[j] >= ci.lb[i] & xs[j] <= ci.ub[i]) out[i,j] <- FALSE } } } if (scale) { trapezoid <- function(x,y) sum(diff(x)*(y[-1]+y[-length(y)]))/2 lls.sum <- rep(NA_real_, k) for (i in seq_len(k)) { lls.sum[i] <- trapezoid(xs[!is.na(lls[i,])], lls[i,!is.na(lls[i,])]) } #lls.sum <- rowSums(lls, na.rm=TRUE) lls <- apply(lls, 2, "/", lls.sum) } lls[out] <- NA ### set y-axis limits if (missing(ylim)) { ylim <- c(0, max(lls, na.rm=TRUE)) } else { ylim <- sort(ylim) } plot(NA, NA, xlim=c(xlim[1], xlim[2]), ylim=ylim, xlab=xlab, ylab=ylab, ...) for (i in seq_len(k)[order(1/vi)]) { lines(xs, lls[i,], lty=lty[i], lwd=lwd[i], col=col[i], ...) } if (is.numeric(refline)) abline(v=refline, lty="solid", lwd=2, ...) invisible(lls) } metafor/R/rma.mh.r0000644000176200001440000006137014036334443013470 0ustar liggesusersrma.mh <- function(ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, measure="OR", data, slab, subset, add=1/2, to="only0", drop00=TRUE, ### for add/to/drop00, 1st element for escalc(), 2nd for MH method correct=TRUE, level=95, digits, verbose=FALSE, ...) { ######################################################################### ###### setup mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications if (!is.element(measure, c("OR","RR","RD","IRR","IRD"))) stop(mstyle$stop("Mantel-Haenszel method can only be used with measures OR, RR, RD, IRR, and IRD.")) if (length(add) == 1L) add <- c(add, 0) if (length(add) != 2L) stop(mstyle$stop("Argument 'add' should specify one or two values (see 'help(rma.mh)').")) if (length(to) == 1L) to <- c(to, "none") if (length(to) != 2L) stop(mstyle$stop("Argument 'to' should specify one or two values (see 'help(rma.mh)').")) if (length(drop00) == 1L) drop00 <- c(drop00, FALSE) if (length(drop00) != 2L) stop(mstyle$stop("Argument 'drop00' should specify one or two values (see 'help(rma.mh)').")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!is.element(to[1], c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) if (!is.element(to[2], c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) time.start <- proc.time() ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("outlist", "onlyo1", "addyi", "addvi", "time")) ### set defaults or get onlyo1, addyi, and addvi arguments onlyo1 <- ifelse(is.null(ddd$onlyo1), FALSE, ddd$onlyo1) addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ### set options(warn=1) if verbose > 2 if (verbose > 2) { opwarn <- options(warn=1) on.exit(options(warn=opwarn$warn)) } ######################################################################### if (verbose) message(mstyle$message("\nExtracting data and computing yi/vi values ...")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### extract slab and subset values, possibly from the data frame specified via data (arguments not specified are NULL) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ######################################################################### ### for RR, OR, and RD: extract/calculate ai,bi,ci,di,n1i,n2i values if (is.element(measure, c("RR","OR","RD"))) { x1i <- x2i <- t1i <- t2i <- x1i.f <- x2i.f <- t1i.f <- t2i.f <- NA mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci ni <- ai + bi + ci + di k <- length(ai) ### number of outcomes before subsetting k.all <- k ids <- seq_len(k) ### generate study labels if none are specified if (verbose) message(mstyle$message("Generating/extracting study labels ...")) if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) slab.null <- FALSE } ### if a subset of studies is specified if (!is.null(subset)) { if (verbose) message(mstyle$message("Subsetting ...")) subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] ni <- ni[subset] slab <- slab[subset] ids <- ids[subset] k <- length(ai) } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### calculate observed effect estimates and sampling variances dat <- escalc(measure=measure, ai=ai, bi=bi, ci=ci, di=di, add=add[1], to=to[1], drop00=drop00[1], onlyo1=onlyo1, addyi=addyi, addvi=addvi) yi <- dat$yi ### one or more yi/vi pairs may be NA/NA vi <- dat$vi ### one or more yi/vi pairs may be NA/NA ### if drop00[2]=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00[2]) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } ### save the actual cell frequencies and yi/vi values (including potential NAs) ai.f <- ai bi.f <- bi ci.f <- ci di.f <- di yi.f <- yi vi.f <- vi ni.f <- ni k.f <- k ### total number of tables including all NAs ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) not.na <- !has.na if (any(has.na)) { if (verbose) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] k <- length(ai) warning(mstyle$warning("Tables with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) not.na.yivi <- !yivi.na if (any(yivi.na)) { if (verbose) message(mstyle$message("Handling NAs in yi/vi ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na.yivi] vi <- vi[not.na.yivi] ni <- ni[not.na.yivi] warning(mstyle$warning("Some yi/vi values are NA."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } if (na.act == "na.fail") stop(mstyle$stop("Missing yi/vi values.")) } k.yi <- length(yi) ### number of yi/vi pairs that are not NA (needed for QE df and fit.stats calculation) ### add/to procedures for the 2x2 tables for the actual meta-analysis ### note: technically, nothing needs to be added, but Stata/RevMan add 1/2 by default for only0 studies (but drop studies with no/all events) if (to[2] == "all") { ### always add to all cells in all studies ai <- ai + add[2] bi <- bi + add[2] ci <- ci + add[2] di <- di + add[2] } if (to[2] == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(ai == 0L | bi == 0L | ci == 0L | di == 0L) ai[id0] <- ai[id0] + add[2] bi[id0] <- bi[id0] + add[2] ci[id0] <- ci[id0] + add[2] di[id0] <- di[id0] + add[2] } if (to[2] == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(ai == 0L | bi == 0L | ci == 0L | di == 0L) if (any(id0)) { ai <- ai + add[2] bi <- bi + add[2] ci <- ci + add[2] di <- di + add[2] } } n1i <- ai + bi n2i <- ci + di Ni <- ai + bi + ci + di } ######################################################################### ### for IRR and IRD: extract/calculate x1i,x2i,t1i,t2i values if (is.element(measure, c("IRR","IRD"))) { ai <- bi <- ci <- di <- ai.f <- bi.f <- ci.f <- di.f <- NA mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) ni <- t1i + t2i k <- length(x1i) ### number of outcomes before subsetting k.all <- k ids <- seq_len(k) ### generate study labels if none are specified if (verbose) message(mstyle$message("Generating/extracting study labels ...")) if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) slab.null <- FALSE } ### if a subset of studies is specified if (!is.null(subset)) { if (verbose) message(mstyle$message("Subsetting ...")) subset <- .setnafalse(subset, k=k) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] ni <- ni[subset] slab <- slab[subset] ids <- ids[subset] k <- length(x1i) } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### calculate observed effect estimates and sampling variances dat <- escalc(measure=measure, x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, add=add[1], to=to[1], drop00=drop00[1], onlyo1=onlyo1, addyi=addyi, addvi=addvi) yi <- dat$yi ### one or more yi/vi pairs may be NA/NA vi <- dat$vi ### one or more yi/vi pairs may be NA/NA ### if drop00[2]=TRUE, set counts to NA for studies that have no events in both arms if (drop00[2]) { id00 <- c(x1i == 0L & x2i == 0L) id00[is.na(id00)] <- FALSE x1i[id00] <- NA x2i[id00] <- NA } ### save the actual cell frequencies and yi/vi values (including potential NAs) x1i.f <- x1i x2i.f <- x2i t1i.f <- t1i t2i.f <- t2i yi.f <- yi vi.f <- vi ni.f <- ni k.f <- k ### total number of tables including all NAs ### check for NAs in table data and act accordingly has.na <- is.na(x1i) | is.na(x2i) | is.na(t1i) | is.na(t2i) not.na <- !has.na if (any(has.na)) { if (verbose) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { x1i <- x1i[not.na] x2i <- x2i[not.na] t1i <- t1i[not.na] t2i <- t2i[not.na] k <- length(x1i) warning(mstyle$warning("Tables with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) not.na.yivi <- !yivi.na if (any(yivi.na)) { if (verbose) message(mstyle$message("Handling NAs in yi/vi ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na.yivi] vi <- vi[not.na.yivi] ni <- ni[not.na.yivi] warning(mstyle$warning("Some yi/vi values are NA."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } if (na.act == "na.fail") stop(mstyle$stop("Missing yi/vi values.")) } k.yi <- length(yi) ### number of yi/vi pairs that are not NA (needed for QE df and fitstats calculation) ### add/to procedures for the 2x2 tables for the actual meta-analysis ### note: technically, nothing needs to be added if (to[2] == "all") { ### always add to all cells in all studies x1i <- x1i + add[2] x2i <- x2i + add[2] } if (to[2] == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) x1i[id0] <- x1i[id0] + add[2] x2i[id0] <- x2i[id0] + add[2] } if (to[2] == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(x1i == 0L | x2i == 0L) if (any(id0)) { x1i <- x1i + add[2] x2i <- x2i + add[2] } } Ti <- t1i + t2i } ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) CO <- COp <- MH <- MHp <- BD <- BDp <- TA <- TAp <- k.pos <- NA ###### model fitting, test statistics, and confidence intervals if (verbose) message(mstyle$message("Model fitting ...")) if (measure == "OR") { Pi <- ai/Ni + di/Ni Qi <- bi/Ni + ci/Ni Ri <- (ai/Ni) * di Si <- (bi/Ni) * ci R <- sum(Ri) S <- sum(Si) if (identical(R,0) || identical(S,0) || identical(R,0L) || identical(S,0L)) { beta.exp <- NA beta <- NA se <- NA zval <- NA pval <- NA ci.lb <- NA ci.ub <- NA } else { beta.exp <- R/S beta <- log(beta.exp) se <- sqrt(1/2 * (sum(Pi*Ri)/R^2 + sum(Pi*Si + Qi*Ri)/(R*S) + sum(Qi*Si)/S^2)) ### based on Robins et al. (1986) zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se } names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) ### Cochran and Cochran-Mantel-Haenszel Statistics xt <- ai + ci yt <- bi + di if (identical(sum(xt),0) || identical(sum(yt),0) || identical(sum(xt),0L) || identical(sum(yt),0L)) { CO <- NA COp <- NA MH <- NA MHp <- NA } else { CO <- (abs(sum(ai - (n1i/Ni)*xt)) - ifelse(correct, 0.5, 0))^2 / sum((n1i/Ni)*(n2i/Ni)*(xt*(yt/Ni))) COp <- pchisq(CO, df=1, lower.tail=FALSE) MH <- (abs(sum(ai - (n1i/Ni)*xt)) - ifelse(correct, 0.5, 0))^2 / sum((n1i/Ni)*(n2i/Ni)*(xt*(yt/(Ni-1)))) MHp <- pchisq(MH, df=1, lower.tail=FALSE) } ### Breslow-Day and Tarone's Test for Heterogeneity if (is.na(beta)) { BD <- NA TA <- NA BDp <- NA TAp <- NA k.pos <- 0 } else { if (identical(beta.exp,1) || identical(beta.exp,1L)) { N11 <- (n1i/Ni)*xt } else { A <- beta.exp * (n1i + xt) + (n2i - xt) B <- sqrt(A^2 - 4*n1i*xt*beta.exp*(beta.exp-1)) N11 <- (A-B) / (2*(beta.exp-1)) } pos <- (N11 > 0) & (xt > 0) & (yt > 0) k.pos <- sum(pos) N11 <- N11[pos] N12 <- n1i[pos] - N11 N21 <- xt[pos] - N11 N22 <- N11 - n1i[pos] - xt[pos] + Ni[pos] BD <- max(0, sum((ai[pos]-N11)^2 / (1/N11 + 1/N12 + 1/N21 + 1/N22)^(-1))) TA <- max(0, BD - sum(ai[pos]-N11)^2 / sum((1/N11 + 1/N12 + 1/N21 + 1/N22)^(-1))) if (k.pos > 1) { BDp <- pchisq(BD, df=k.pos-1, lower.tail=FALSE) TAp <- pchisq(TA, df=k.pos-1, lower.tail=FALSE) } else { BDp <- NA TAp <- NA } } } if (measure == "RR") { R <- sum(ai * (n2i/Ni)) S <- sum(ci * (n1i/Ni)) if (identical(sum(ai),0) || identical(sum(ci),0) || identical(sum(ai),0L) || identical(sum(ci),0L)) { beta.exp <- NA beta <- NA se <- NA zval <- NA pval <- NA ci.lb <- NA ci.ub <- NA } else { beta.exp <- R/S beta <- log(beta.exp) se <- sqrt(sum(((n1i/Ni)*(n2i/Ni)*(ai+ci) - (ai/Ni)*ci)) / (R*S)) zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se } names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) } if (measure == "RD") { beta <- sum(ai*(n2i/Ni) - ci*(n1i/Ni)) / sum(n1i*(n2i/Ni)) se <- sqrt((beta * (sum(ci*(n1i/Ni)^2 - ai*(n2i/Ni)^2 + (n1i/Ni)*(n2i/Ni)*(n2i-n1i)/2)) + sum(ai*(n2i-ci)/Ni + ci*(n1i-ai)/Ni)/2) / sum(n1i*(n2i/Ni))^2) ### equation in: Sato, Greenland, & Robins (1989) #se <- sqrt(sum(((ai/Ni^2)*bi*(n2i^2/n1i) + (ci/Ni^2)*di*(n1i^2/n2i))) / sum(n1i*(n2i/Ni))^2) ### equation in: Greenland & Robins (1985) zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) } if (measure == "IRR") { R <- sum(x1i * (t2i/Ti)) S <- sum(x2i * (t1i/Ti)) if (identical(sum(x1i),0) || identical(sum(x2i),0) || identical(sum(x1i),0L) || identical(sum(x2i),0L)) { beta.exp <- NA beta <- NA se <- NA zval <- NA pval <- NA ci.lb <- NA ci.ub <- NA } else { beta.exp <- R/S beta <- log(beta.exp) se <- sqrt(sum((t1i/Ti)*(t2i/Ti)*(x1i+x2i)) / (R*S)) zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se } names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) ### Mantel-Haenszel Statistic xt <- x1i + x2i if (identical(sum(xt),0) || identical(sum(xt),0L)) { MH <- NA MHp <- NA } else { MH <- (abs(sum(x1i - xt*(t1i/Ti))) - ifelse(correct, 0.5, 0))^2 / sum(xt*(t1i/Ti)*(t2i/Ti)) MHp <- pchisq(MH, df=1, lower.tail=FALSE) } } if (measure == "IRD") { beta <- sum((x1i*t2i - x2i*t1i)/Ti) / sum((t1i/Ti)*t2i) se <- sqrt(sum(((t1i/Ti)*t2i)^2*(x1i/t1i^2+x2i/t2i^2))) / sum((t1i/Ti)*t2i) ### from Rothland et al. (2008), chapter 15 zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) } ######################################################################### ### heterogeneity test (inverse variance method) if (verbose) message(mstyle$message("Heterogeneity testing ...")) wi <- 1/vi if (k.yi > 1) { QE <- max(0, sum(wi*(yi-beta)^2)) QEp <- pchisq(QE, df=k.yi-1, lower.tail=FALSE) I2 <- max(0, 100 * (QE - (k.yi-1)) / QE) H2 <- QE / (k.yi-1) } else { QE <- 0 QEp <- 1 I2 <- 0 H2 <- 1 } ######################################################################### ###### fit statistics if (verbose) message(mstyle$message("Computing fit statistics and log likelihood ...")) if (k.yi >= 1) { ll.ML <- -1/2 * (k.yi) * log(2*base::pi) - 1/2 * sum(log(vi)) - 1/2 * QE ll.REML <- -1/2 * (k.yi-1) * log(2*base::pi) + 1/2 * log(k.yi) - 1/2 * sum(log(vi)) - 1/2 * log(sum(wi)) - 1/2 * QE dev.ML <- -2 * (ll.ML - sum(dnorm(yi, mean=yi, sd=sqrt(vi), log=TRUE))) AIC.ML <- -2 * ll.ML + 2 BIC.ML <- -2 * ll.ML + log(k.yi) AICc.ML <- -2 * ll.ML + 2 * max(k.yi, 3) / (max(k.yi, 3) - 2) dev.REML <- -2 * (ll.REML - 0) AIC.REML <- -2 * ll.REML + 2 BIC.REML <- -2 * ll.REML + log(k.yi-1) AICc.REML <- -2 * ll.REML + 2 * max(k.yi-1, 3) / (max(k.yi-1, 3) - 2) fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML, dev.REML, AIC.REML, BIC.REML, AICc.REML), ncol=2, byrow=FALSE) } else { fit.stats <- matrix(NA, nrow=5, ncol=2, byrow=FALSE) } dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ######################################################################### ###### prepare output if (verbose) message(mstyle$message("Preparing output ...")) parms <- 1 p <- 1 p.eff <- 1 k.eff <- k tau2 <- 0 X.f <- cbind(rep(1,k.f)) intercept <- TRUE int.only <- TRUE btt <- 1 m <- 1 coef.na <- c(X=FALSE) method <- "FE" weighted <- TRUE test <- "z" ddf <- NA if (is.null(ddd$outlist)) { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, tau2.f=tau2, I2=I2, H2=H2, QE=QE, QEp=QEp, CO=CO, COp=COp, MH=MH, MHp=MHp, BD=BD, BDp=BDp, TA=TA, TAp=TAp, k=k, k.f=k.f, k.yi=k.yi, k.pos=k.pos, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, intercept=intercept, coef.na=coef.na, yi=yi, vi=vi, yi.f=yi.f, vi.f=vi.f, X.f=X.f, ai=ai, bi=bi, ci=ci, di=di, ai.f=ai.f, bi.f=bi.f, ci.f=ci.f, di.f=di.f, x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, x1i.f=x1i.f, x2i.f=x2i.f, t1i.f=t1i.f, t2i.f=t2i.f, ni=ni, ni.f=ni.f, ids=ids, not.na=not.na, subset=subset, not.na.yivi=not.na.yivi, slab=slab, slab.null=slab.null, measure=measure, method=method, weighted=weighted, test=test, ddf=ddf, dfs=ddf, btt=btt, m=m, digits=digits, level=level, add=add, to=to, drop00=drop00, correct=correct, fit.stats=fit.stats, formula.yi=NULL, formula.mods=NULL, version=packageVersion("metafor"), call=mf) } if (!is.null(ddd$outlist)) { if (ddd$outlist == "minimal") { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, I2=I2, H2=H2, QE=QE, QEp=QEp, CO=CO, COp=COp, MH=MH, MHp=MHp, BD=BD, BDp=BDp, TA=TA, TAp=TAp, k=k, k.yi=k.yi, k.pos=k.pos, k.eff=k.eff, p=p, p.eff=p.eff, parms=parms, int.only=int.only, measure=measure, method=method, test=test, ddf=ddf, dfs=ddf, btt=btt, m=m, digits=digits, fit.stats=fit.stats) } else { res <- eval(parse(text=paste0("list(", ddd$outlist, ")"))) } } time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") class(res) <- c("rma.mh", "rma") return(res) } metafor/R/rma.peto.r0000644000176200001440000003225314036334420014024 0ustar liggesusersrma.peto <- function(ai, bi, ci, di, n1i, n2i, data, slab, subset, add=1/2, to="only0", drop00=TRUE, ### for add/to/drop00, 1st element for escalc(), 2nd for Peto's method level=95, digits, verbose=FALSE, ...) { ######################################################################### ###### setup mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications if (length(add) == 1L) add <- c(add, 0) if (length(add) != 2L) stop(mstyle$stop("Argument 'add' should specify one or two values (see 'help(rma.peto)').")) if (length(to) == 1L) to <- c(to, "none") if (length(to) != 2L) stop(mstyle$stop("Argument 'to' should specify one or two values (see 'help(rma.peto)').")) if (length(drop00) == 1L) drop00 <- c(drop00, FALSE) if (length(drop00) != 2L) stop(mstyle$stop("Argument 'drop00' should specify one or two values (see 'help(rma.peto)').")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (!is.element(to[1], c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) if (!is.element(to[2], c("all","only0","if0all","none"))) stop(mstyle$stop("Unknown 'to' argument specified.")) time.start <- proc.time() ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("outlist", "time")) measure <- "PETO" ### set measure here so that it can be added below ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ### set options(warn=1) if verbose > 2 if (verbose > 2) { opwarn <- options(warn=1) on.exit(options(warn=opwarn$warn)) } ######################################################################### if (verbose) message(mstyle$message("\nExtracting data and computing yi/vi values ...")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### extract slab and subset values, possibly from the data frame specified via data (arguments not specified are NULL) mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ### extract/calculate ai,bi,ci,di,n1i,n2i values mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci ni <- ai + bi + ci + di k <- length(ai) ### number of outcomes before subsetting k.all <- k ids <- seq_len(k) ### generate study labels if none are specified if (verbose) message(mstyle$message("Generating/extracting study labels ...")) if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) if (is.factor(slab)) slab <- as.character(slab) slab.null <- FALSE } ### if a subset of studies is specified if (!is.null(subset)) { if (verbose) message(mstyle$message("Subsetting ...")) subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] ni <- ni[subset] slab <- slab[subset] ids <- ids[subset] k <- length(ai) } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### calculate observed effect estimates and sampling variances dat <- escalc(measure="PETO", ai=ai, bi=bi, ci=ci, di=di, add=add[1], to=to[1], drop00=drop00[1]) yi <- dat$yi ### one or more yi/vi pairs may be NA/NA vi <- dat$vi ### one or more yi/vi pairs may be NA/NA ### if drop00[2]=TRUE, set counts to NA for studies that have no events (or all events) in both arms if (drop00[2]) { id00 <- c(ai == 0L & ci == 0L) | c(bi == 0L & di == 0L) id00[is.na(id00)] <- FALSE ai[id00] <- NA bi[id00] <- NA ci[id00] <- NA di[id00] <- NA } ### save the actual cell frequencies and yi/vi values (including potential NAs) ai.f <- ai bi.f <- bi ci.f <- ci di.f <- di yi.f <- yi vi.f <- vi ni.f <- ni k.f <- k ### total number of tables including all NAs ### check for NAs in table data and act accordingly has.na <- is.na(ai) | is.na(bi) | is.na(ci) | is.na(di) not.na <- !has.na if (any(has.na)) { if (verbose) message(mstyle$message("Handling NAs in table data ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { ai <- ai[not.na] bi <- bi[not.na] ci <- ci[not.na] di <- di[not.na] k <- length(ai) warning(mstyle$warning("Tables with NAs omitted from model fitting."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in tables.")) } ### at least one study left? if (k < 1) stop(mstyle$stop("Processing terminated since k = 0.")) ### check for NAs in yi/vi and act accordingly yivi.na <- is.na(yi) | is.na(vi) not.na.yivi <- !yivi.na if (any(yivi.na)) { if (verbose) message(mstyle$message("Handling NAs in yi/vi ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na.yivi] vi <- vi[not.na.yivi] ni <- ni[not.na.yivi] warning(mstyle$warning("Some yi/vi values are NA."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } if (na.act == "na.fail") stop(mstyle$stop("Missing yi/vi values.")) } k.yi <- length(yi) ### number of yi/vi pairs that are not NA (needed for QE df and fit.stats calculation) ### add/to procedures for the 2x2 tables for the actual meta-analysis ### note: technically, nothing needs to be added, but Stata/RevMan add 1/2 by default for only0 studies (but drop studies with no/all events) if (to[2] == "all") { ### always add to all cells in all studies ai <- ai + add[2] bi <- bi + add[2] ci <- ci + add[2] di <- di + add[2] } if (to[2] == "only0") { ### add to cells in studies with at least one 0 entry id0 <- c(ai == 0L | bi == 0L | ci == 0L | di == 0L) ai[id0] <- ai[id0] + add[2] bi[id0] <- bi[id0] + add[2] ci[id0] <- ci[id0] + add[2] di[id0] <- di[id0] + add[2] } if (to[2] == "if0all") { ### add to cells in all studies if there is at least one 0 entry id0 <- c(ai == 0L | bi == 0L | ci == 0L | di == 0L) if (any(id0)) { ai <- ai + add[2] bi <- bi + add[2] ci <- ci + add[2] di <- di + add[2] } } n1i <- ai + bi n2i <- ci + di Ni <- ai + bi + ci + di ######################################################################### level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ###### model fitting, test statistics, and confidence intervals if (verbose) message(mstyle$message("Model fitting ...")) xt <- ai + ci ### frequency of outcome1 in both groups combined yt <- bi + di ### frequency of outcome2 in both groups combined Ei <- xt * n1i / Ni Vi <- xt * yt * (n1i/Ni) * (n2i/Ni) / (Ni - 1) ### 0 when xt = 0 or yt = 0 in a table sumVi <- sum(Vi) if (sumVi == 0L) ### sumVi = 0 when xt or yt = 0 in *all* tables stop(mstyle$stop("One of the two outcomes never occurred in any of the tables. Peto's method cannot be used.")) beta <- sum(ai - Ei) / sumVi se <- sqrt(1/sumVi) zval <- beta / se pval <- 2*pnorm(abs(zval), lower.tail=FALSE) ci.lb <- beta - qnorm(level/2, lower.tail=FALSE) * se ci.ub <- beta + qnorm(level/2, lower.tail=FALSE) * se names(beta) <- "intrcpt" vb <- matrix(se^2, dimnames=list("intrcpt", "intrcpt")) ######################################################################### ### heterogeneity test (Peto's method) if (verbose) message(mstyle$message("Heterogeneity testing ...")) k.pos <- sum(Vi > 0) ### number of tables with positive sampling variance Vi[Vi == 0] <- NA ### set 0 sampling variances to NA QE <- max(0, sum((ai - Ei)^2 / Vi, na.rm=TRUE) - sum(ai - Ei)^2 / sum(Vi, na.rm=TRUE)) if (k.pos > 1) { QEp <- pchisq(QE, df=k.yi-1, lower.tail=FALSE) I2 <- max(0, 100 * (QE - (k.yi-1)) / QE) H2 <- QE / (k.yi-1) } else { QEp <- 1 I2 <- 0 H2 <- 1 } wi <- 1/vi RSS <- sum(wi*(yi-beta)^2) ######################################################################### ###### fit statistics if (verbose) message(mstyle$message("Computing fit statistics and log likelihood ...")) ll.ML <- -1/2 * (k.yi) * log(2*base::pi) - 1/2 * sum(log(vi)) - 1/2 * RSS ll.REML <- -1/2 * (k.yi-1) * log(2*base::pi) + 1/2 * log(k.yi) - 1/2 * sum(log(vi)) - 1/2 * log(sum(wi)) - 1/2 * RSS dev.ML <- -2 * (ll.ML - sum(dnorm(yi, mean=yi, sd=sqrt(vi), log=TRUE))) AIC.ML <- -2 * ll.ML + 2 BIC.ML <- -2 * ll.ML + log(k.yi) AICc.ML <- -2 * ll.ML + 2 * max(k.yi, 3) / (max(k.yi, 3) - 2) dev.REML <- -2 * (ll.REML - 0) AIC.REML <- -2 * ll.REML + 2 BIC.REML <- -2 * ll.REML + log(k.yi-1) AICc.REML <- -2 * ll.REML + 2 * max(k.yi-1, 3) / (max(k.yi-1, 3) - 2) fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML, dev.REML, AIC.REML, BIC.REML, AICc.REML), ncol=2, byrow=FALSE) dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ######################################################################### ###### prepare output if (verbose) message(mstyle$message("Preparing output ...")) parms <- 1 p <- 1 p.eff <- 1 k.eff <- k tau2 <- 0 X.f <- cbind(rep(1,k.f)) intercept <- TRUE int.only <- TRUE btt <- 1 m <- 1 coef.na <- c(X=FALSE) method <- "FE" weighted <- TRUE test <- "z" ddf <- NA if (is.null(ddd$outlist)) { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, tau2.f=tau2, I2=I2, H2=H2, QE=QE, QEp=QEp, k=k, k.f=k.f, k.yi=k.yi, k.pos=k.pos, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, intercept=intercept, coef.na=coef.na, yi=yi, vi=vi, yi.f=yi.f, vi.f=vi.f, X.f=X.f, ai=ai, bi=bi, ci=ci, di=di, ai.f=ai.f, bi.f=bi.f, ci.f=ci.f, di.f=di.f, ni=ni, ni.f=ni.f, ids=ids, not.na=not.na, subset=subset, not.na.yivi=not.na.yivi, slab=slab, slab.null=slab.null, measure=measure, method=method, weighted=weighted, test=test, ddf=ddf, dfs=ddf, btt=btt, m=m, digits=digits, level=level, add=add, to=to, drop00=drop00, fit.stats=fit.stats, formula.yi=NULL, formula.mods=NULL, version=packageVersion("metafor"), call=mf) } if (!is.null(ddd$outlist)) { if (ddd$outlist == "minimal") { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, I2=I2, H2=H2, QE=QE, QEp=QEp, k=k, k.pos=k.pos, k.eff=k.eff, p=p, p.eff=p.eff, parms=parms, int.only=int.only, measure=measure, method=method, test=test, ddf=ddf, dfs=ddf, btt=btt, m=m, digits=digits, fit.stats=fit.stats) } else { res <- eval(parse(text=paste0("list(", ddd$outlist, ")"))) } } time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") class(res) <- c("rma.peto", "rma") return(res) } metafor/R/vif.rma.r0000644000176200001440000001172613770471454013660 0ustar liggesusersvif.rma <- function(x, btt, intercept=FALSE, table=FALSE, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("robust.rma", "rma.uni.selmodel")) vif.loc <- TRUE vif.scale <- TRUE if (inherits(x, "rma.ls")) { if (x$int.only) vif.loc <- FALSE if (x$Z.int.only) vif.scale <- FALSE if (!vif.loc && !vif.scale) stop(mstyle$stop("VIF not applicable for intercept-only models.")) } else { if (x$int.only) stop(mstyle$stop("VIF not applicable for intercept-only models.")) } if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("att")) ######################################################################### if (vif.loc) { vcov <- vcov(x) if (inherits(x, "rma.ls")) vcov <- vcov$beta if (missing(btt) || is.null(btt)) { ### remove intercept row/colum from vcov if model includes one and intercept=FALSE if (x$intercept && !intercept) vcov <- vcov[-1,-1,drop=FALSE] ### rescale vcov to correlation matrix rb <- cov2cor(vcov) ### try computing the VIFs vif <- try(diag(chol2inv(chol(rb))), silent=TRUE) if (inherits(vif, "try-error")) stop(mstyle$stop("Cannot invert var-cov matrix to compute VIFs.")) ### add NA for intercept if model includes one and intercept=FALSE if (x$intercept && !intercept && table) vif <- c(NA, vif) if (table) { tab <- coef(summary(x)) if (inherits(x, "rma.ls")) tab <- tab$beta vif <- cbind(tab, vif=vif, sif=sqrt(vif)) } else { names(vif) <- rownames(vcov) } res <- list(vif=vif, digits=digits, table=table, test=x$test) } else { btt <- .set.btt(btt, x$p, x$int.incl, colnames(x$X)) if (x$intercept && !intercept) { vcov <- vcov[-1,-1,drop=FALSE] btt <- btt - 1 if (any(btt < 1)) warning(mstyle$warning("Intercept term not included in GVIF computation."), call.=FALSE) btt <- btt[btt > 0] } m <- length(btt) rb <- cov2cor(vcov) detrv <- det(rb) gvif <- det(as.matrix(rb[btt,btt])) * det(as.matrix(rb[-btt,-btt])) / detrv gsif <- gvif^(1/(2*m)) if (x$intercept && !intercept) btt <- btt + 1 res <- list(gvif=gvif, gsif=gsif, btt=btt, m=m, digits=digits) } class(res) <- "vif.rma" } else { res <- NULL } ######################################################################### if (inherits(x, "rma.ls") && vif.scale) { res.loc <- res vcov <- vcov(x)$alpha if (is.null(ddd$att)) { ### remove intercept row/colum from vcov if model includes one and intercept=FALSE if (x$Z.intercept && !intercept) vcov <- vcov[-1,-1,drop=FALSE] ### rescale vcov to correlation matrix rb <- cov2cor(vcov) ### try computing the VIFs vif <- try(diag(chol2inv(chol(rb))), silent=TRUE) if (inherits(vif, "try-error")) stop(mstyle$stop("Cannot invert var-cov matrix to compute VIFs for the scale model.")) ### add NA for intercept if model includes one and intercept=FALSE if (x$Z.intercept && !intercept && table) vif <- c(NA, vif) if (table) { tab <- coef(summary(x))$alpha vif <- cbind(tab, vif=vif, sif=sqrt(vif)) } else { names(vif) <- rownames(vcov) } res.scale <- list(vif=vif, digits=digits, table=table, test=x$test) } else { att <- .set.btt(ddd$att, x$q, x$Z.int.incl, colnames(x$Z)) if (x$Z.intercept && !intercept) { vcov <- vcov[-1,-1,drop=FALSE] att <- att - 1 if (any(att < 1)) warning(mstyle$warning("Intercept term not included in GVIF computation."), call.=FALSE) att <- att[att > 0] } m <- length(att) rb <- cov2cor(vcov) detrv <- det(rb) gvif <- det(as.matrix(rb[att,att])) * det(as.matrix(rb[-att,-att])) / detrv gsif <- gvif^(1/(2*m)) if (x$Z.intercept && !intercept) att <- att + 1 res.scale <- list(gvif=gvif, gsif=gsif, btt=att, m=m, digits=digits) } class(res.scale) <- "vif.rma" if (vif.loc) { if (vif.scale) { res <- list(beta=res.loc, alpha=res.scale) } else { res <- res.loc } } else { res <- res.scale } } ######################################################################### return(res) } metafor/R/rstudent.rma.mh.r0000644000176200001440000000511013770377166015342 0ustar liggesusersrstudent.rma.mh <- function(model, digits, progbar=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mh") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("time")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### delpred <- rep(NA_real_, x$k.f) vdelpred <- rep(NA_real_, x$k.f) ### note: skipping NA tables if (progbar) pbar <- pbapply::startpb(min=0, max=x$k.f) for (i in seq_len(x$k.f)) { if (progbar) pbapply::setpb(pbar, i) if (!x$not.na[i]) next if (is.element(x$measure, c("RR","OR","RD"))) { res <- try(suppressWarnings(rma.mh(ai=x$ai.f, bi=x$bi.f, ci=x$ci.f, di=x$di.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } else { res <- try(suppressWarnings(rma.mh(x1i=x$x1i.f, x2i=x$x2i.f, t1i=x$t1i.f, t2i=x$t2i.f, measure=x$measure, add=x$add, to=x$to, drop00=x$drop00, correct=x$correct, level=x$level, subset=-i)), silent=TRUE) } if (inherits(res, "try-error")) next delpred[i] <- res$beta vdelpred[i] <- res$vb } if (progbar) pbapply::closepb(pbar) resid <- x$yi.f - delpred resid[abs(resid) < 100 * .Machine$double.eps] <- 0 #resid[abs(resid) < 100 * .Machine$double.eps * median(abs(resid), na.rm=TRUE)] <- 0 ### see lm.influence seresid <- sqrt(x$vi.f + vdelpred) stresid <- resid / seresid ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na.yivi], se=seresid[x$not.na.yivi], z=stresid[x$not.na.yivi]) out$slab <- x$slab[x$not.na.yivi] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na.yivi)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "list.rma" return(out) } metafor/R/print.infl.rma.uni.r0000644000176200001440000000200113770373554015735 0ustar liggesusersprint.infl.rma.uni <- function(x, digits=x$digits, infonly=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="infl.rma.uni") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (x$p == 1) { out <- list(rstudent=x$inf$rstudent, dffits=x$inf$dffits, cook.d=x$inf$cook.d, cov.r=x$inf$cov.r, tau2.del=x$inf$tau2.del, QE.del=x$inf$QE.del, hat=x$inf$hat, weight=x$inf$weight, dfbs=x$dfbs[[1]], inf=x$inf$inf, slab=x$inf$slab, digits=digits) class(out) <- "list.rma" if (infonly) out[["select"]] <- !is.na(x$is.infl) & x$is.infl } else { out <- x[1:2] out$inf[["digits"]] <- digits out$dfbs[["digits"]] <- digits attr(out$inf, ".rmspace") <- TRUE attr(out$dfbs, ".rmspace") <- TRUE if (infonly) { out$inf[["select"]] <- !is.na(x$is.infl) & x$is.infl out$dfbs[["select"]] <- !is.na(x$is.infl) & x$is.infl } } print(out) } metafor/R/confint.rma.uni.selmodel.r0000644000176200001440000003446314046726246017133 0ustar liggesusersconfint.rma.uni.selmodel <- function(object, parm, level, fixed=FALSE, tau2, delta, digits, transf, targs, verbose=FALSE, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.uni.selmodel") if (!missing(parm)) warning(mstyle$warning("Argument 'parm' (currently) ignored."), call.=FALSE) x <- object if (x$betaspec) ### TODO: consider providing CIs also for this case stop(mstyle$stop("Cannot obtain confidence intervals when one or more beta values were fixed.")) k <- x$k p <- x$p if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL if (missing(control)) control <- list() level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ddd <- list(...) .chkdots(ddd, c("time", "xlim", "extint")) if (.isTRUE(ddd$time)) time.start <- proc.time() if (!is.null(ddd$xlim)) { if (length(ddd$xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) control$vc.min <- ddd$xlim[1] control$vc.max <- ddd$xlim[2] } ### check if user has specified one of the tau2 or delta arguments random <- !all(missing(tau2), missing(delta)) if (!fixed && !random) { ### if both 'fixed' and 'random' are FALSE, obtain CIs for tau2 and all selection model parameters cl <- match.call() ### total number of non-fixed components comps <- ifelse(!is.element(x$method, c("FE","EE","CE")) && !x$tau2.fix, 1, 0) + sum(!x$delta.fix) if (comps == 0) stop(mstyle$stop("No components for which a CI can be obtained.")) res.all <- list() j <- 0 if (!is.element(x$method, c("FE","EE","CE")) && !x$tau2.fix) { j <- j + 1 cl.vc <- cl cl.vc$tau2 <- 1 cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for tau2\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } if (any(!x$delta.fix)) { for (pos in seq_len(x$deltas)[!x$delta.fix]) { j <- j + 1 cl.vc <- cl cl.vc$delta <- pos cl.vc$time <- FALSE #cl.vc$object <- quote(x) if (verbose) cat(mstyle$verbose(paste("\nObtaining CI for delta =", pos, "\n"))) res.all[[j]] <- eval(cl.vc, envir=parent.frame()) } } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } if (length(res.all) == 1L) { return(res.all[[1]]) } else { res.all$digits <- digits class(res.all) <- "list.confint.rma" return(res.all) } } ######################################################################### ######################################################################### ######################################################################### if (random) { type <- "PL" ###################################################################### ### check if user has specified more than one of these arguments if (sum(!missing(tau2), !missing(delta)) > 1L) stop(mstyle$stop("Must specify only one of the 'tau2' or 'delta' arguments.")) ### check if model actually contains (at least one) such a component and that it was actually estimated if (!missing(tau2) && (is.element(x$method, c("FE","EE","CE")) || x$tau2.fix)) stop(mstyle$stop("Model does not contain an (estimated) 'tau2' component.")) if (!missing(delta) && all(x$delta.fix)) stop(mstyle$stop("Model does not contain any estimated 'delta' components.")) ### check if user specified more than one tau2 or delta component if (!missing(tau2) && (length(tau2) > 1L)) stop(mstyle$stop("Can only specify one 'tau2' component.")) if (!missing(delta) && (length(delta) > 1L)) stop(mstyle$stop("Can only specify one 'delta' component.")) ### check if user specified a logical if (!missing(tau2) && is.logical(tau2) && isTRUE(tau2)) tau2 <- 1 if (!missing(delta) && is.logical(delta)) stop(mstyle$stop("Must specify the number for the 'delta' component.")) ### check if user specified a component that does not exist if (!missing(tau2) && (tau2 > 1 || tau2 <= 0)) stop(mstyle$stop("No such 'tau2' component in the model.")) if (!missing(delta) && (delta > x$deltas || delta <= 0)) stop(mstyle$stop("No such 'delta' component in the model.")) ### check if user specified a component that was fixed if (!missing(tau2) && x$tau2.fix) stop(mstyle$stop("Specified 'tau2' component was fixed.")) if (!missing(delta) && x$delta.fix[delta]) stop(mstyle$stop("Specified 'delta' component was fixed.")) ### if everything is good so far, get value of the variance component and set 'comp' delta.pos <- NA if (!missing(tau2)) { vc <- x$tau2 comp <- "tau2" tau2.pos <- 1 } if (!missing(delta)) { vc <- x$delta[delta] comp <- "delta" delta.pos <- delta } #return(list(comp=comp, vc=vc, tau2.pos=tau2.pos, delta.pos=delta.pos)) ###################################################################### ### set control parameters for uniroot() and possibly replace with user-defined values ### set vc.min and vc.max and possibly replace with user-defined values con <- list(tol=.Machine$double.eps^0.25, maxiter=1000, verbose=FALSE, eptries=10) if (comp == "tau2") { con$vc.min <- 0 con$vc.max <- min(max(ifelse(vc <= .Machine$double.eps^0.5, 10, max(10, vc*100)), con$vc.min), x$tau2.max) } if (comp == "delta") { con$vc.min <- max(0, x$delta.min[delta]) con$vc.max <- min(max(ifelse(vc <= .Machine$double.eps^0.5, 10, max(10, vc*10)), con$vc.min), x$delta.max[delta]) } con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose ###################################################################### vc.lb <- NA vc.ub <- NA ci.null <- FALSE ### logical if CI is a null set lb.conv <- FALSE ### logical if search converged for lower bound (LB) ub.conv <- FALSE ### logical if search converged for upper bound (UB) lb.sign <- "" ### for sign in case LB must be below vc.min ("<") or above vc.max (">") ub.sign <- "" ### for sign in case UB must be below vc.min ("<") or above vc.max (">") ###################################################################### ###################################################################### ###################################################################### ### Profile Likelihood method if (type == "PL") { if (con$vc.min > vc) stop(mstyle$stop("Lower bound of interval to be searched must be <= estimated value of component.")) if (con$vc.max < vc) stop(mstyle$stop("Upper bound of interval to be searched must be >= estimated value of component.")) objective <- qchisq(1-level, df=1) ################################################################### ### search for lower bound ### get diff value when setting component to vc.min; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the lower bound must be below vc.min epdiff <- abs(con$vc.min - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.uni.selmodel(con$vc.min, obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.lb <- con$vc.min lb.conv <- TRUE if (comp == "tau2" && con$vc.min > 0) lb.sign <- "<" if (comp == "delta" && con$vc.min > 0) lb.sign <- "<" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.uni.selmodel, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, extendInt="downX", obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.uni.selmodel, interval=c(con$vc.min, vc), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.lb <- res lb.conv <- TRUE } } break } con$vc.min <- con$vc.min + epdiff } if (verbose) cat("\n") ################################################################### ### search for upper bound ### get diff value when setting component to vc.max; this value should be positive (i.e., discrepancy must be larger than critical value) ### if it is not, then the upper bound must be above vc.max epdiff <- abs(con$vc.max - vc) / con$eptries for (i in seq_len(con$eptries)) { res <- try(.profile.rma.uni.selmodel(con$vc.max, obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose), silent=TRUE) if (!inherits(res, "try-error") && !is.na(res)) { if (!.isTRUE(ddd$extint) && res < 0) { vc.ub <- con$vc.max ub.conv <- TRUE if (comp == "tau2") ub.sign <- ">" if (comp == "delta") ub.sign <- ">" } else { if (.isTRUE(ddd$extint)) { res <- try(uniroot(.profile.rma.uni.selmodel, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, extendInt="upX", obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } else { res <- try(uniroot(.profile.rma.uni.selmodel, interval=c(vc, con$vc.max), tol=con$tol, maxiter=con$maxiter, obj=x, comp=comp, delta.pos=delta.pos, confint=TRUE, objective=objective, verbose=verbose, check.conv=TRUE)$root, silent=TRUE) } ### check if uniroot method converged if (!inherits(res, "try-error")) { vc.ub <- res ub.conv <- TRUE } } break } con$vc.max <- con$vc.max - epdiff } ################################################################### } ###################################################################### ###################################################################### ###################################################################### if (!lb.conv) warning(mstyle$warning("Cannot obtain lower bound of profile likelihood CI due to convergence problems."), call.=FALSE) if (!ub.conv) warning(mstyle$warning("Cannot obtain upper bound of profile likelihood CI due to convergence problems."), call.=FALSE) ###################################################################### vc <- c(vc, vc.lb, vc.ub) if (comp == "tau2") { vcsqrt <- sqrt(ifelse(vc >= 0, vc, NA)) res.random <- rbind(vc, vcsqrt) rownames(res.random) <- c("tau^2", "tau") } if (comp == "delta") { res.random <- rbind(vc) if (x$deltas == 1L) { rownames(res.random) <- "delta" } else { rownames(res.random) <- paste0("delta.", delta.pos) } } colnames(res.random) <- c("estimate", "ci.lb", "ci.ub") } ######################################################################### ######################################################################### ######################################################################### if (fixed) { if (x$test == "t") { crit <- qt(level/2, df=x$ddf, lower.tail=FALSE) } else { crit <- qnorm(level/2, lower.tail=FALSE) } beta <- c(x$beta) ci.lb <- c(beta - crit * x$se) ci.ub <- c(beta + crit * x$se) if (is.function(transf)) { if (is.null(targs)) { beta <- sapply(beta, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) } else { beta <- sapply(beta, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] res.fixed <- cbind(estimate=beta, ci.lb=ci.lb, ci.ub=ci.ub) rownames(res.fixed) <- rownames(x$beta) } ######################################################################### ######################################################################### ######################################################################### res <- list() if (fixed) res$fixed <- res.fixed if (random) res$random <- res.random res$digits <- digits if (random) { res$ci.null <- ci.null res$lb.sign <- lb.sign res$ub.sign <- ub.sign #res$vc.min <- con$vc.min } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(res) <- "confint.rma" return(res) } metafor/R/misc.func.hidden.prof.r0000644000176200001440000002522614046722631016373 0ustar liggesusers### for profile(), confint(), and gosh() .profile.rma.uni <- function(val, obj, parallel=FALSE, profile=FALSE, confint=FALSE, subset=FALSE, objective, model=0L, verbose=FALSE, outlist=NULL) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (parallel == "snow") library(metafor) if (profile || confint) { ### for profile and confint, fit model with tau2 fixed to 'val' res <- try(suppressWarnings( rma.uni(obj$yi, obj$vi, weights=obj$weights, mods=obj$X, intercept=FALSE, method=obj$method, weighted=obj$weighted, test=obj$test, level=obj$level, control=obj$control, tau2=val, skipr2=TRUE, outlist="minimal")), silent=TRUE) } if (profile) { if (inherits(res, "try-error")) { sav <- list(ll = NA, beta = matrix(NA, nrow=nrow(obj$beta), ncol=1), ci.lb = rep(NA, length(obj$ci.lb)), ci.ub = rep(NA, length(obj$ci.ub))) } else { sav <- list(ll = logLik(res), beta = res$beta, ci.lb = res$ci.lb, ci.ub = res$ci.ub) } } if (confint) { if (inherits(res, "try-error")) { if (verbose) cat(mstyle$verbose(paste("tau2 =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective = NA", "\n"))) stop() } else { sav <- c(-2*(logLik(res) - logLik(obj)) - objective) if (verbose) cat(mstyle$verbose(paste("tau2 =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective =", formatC(sav, digits=obj$digits[["test"]], width=obj$digits[["test"]]+4, format="f"), "\n"))) } } if (subset) { ### for subset, fit model to subset as specified by 'val' if (model >= 1L) { # special cases for gosh() for FE and RE+DL models yi <- obj$yi[val] vi <- obj$vi[val] k <- length(yi) wi <- 1/vi sumwi <- sum(wi) est <- sum(wi*yi)/sumwi Q <- 0 I2 <- 0 H2 <- 1 tau2 <- 0 if (k > 1) { Q <- sum(wi * (yi - est)^2) I2 <- max(0, 100 * (Q - (k-1)) / Q) H2 <- Q / (k-1) if (model == 2L) { tau2 <- max(0, (Q - (k-1)) / (sumwi - sum(wi^2)/sumwi)) wi <- 1 / (vi + tau2) est <- sum(wi*yi)/sum(wi) } } sav <- list(beta = est, k = k, QE = Q, I2 = I2, H2 = H2, tau2 = tau2) } else { sav <- try(suppressWarnings( rma.uni(obj$yi, obj$vi, weights=obj$weights, mods=obj$X, intercept=FALSE, method=obj$method, weighted=obj$weighted, test=obj$test, level=obj$level, control=obj$control, tau2=ifelse(obj$tau2.fix, obj$tau2, NA), subset=val, skipr2=TRUE, outlist=outlist)), silent=TRUE) } } return(sav) } .profile.rma.mv <- function(val, obj, comp, sigma2.pos, tau2.pos, rho.pos, gamma2.pos, phi.pos, parallel=FALSE, profile=FALSE, confint=FALSE, subset=FALSE, objective, verbose=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (parallel == "snow") library(metafor) if (profile || confint) { ### for profile and confint, fit model with component fixed to 'val' ### set any fixed components to their values sigma2.arg <- ifelse(obj$vc.fix$sigma2, obj$sigma2, NA) tau2.arg <- ifelse(obj$vc.fix$tau2, obj$tau2, NA) rho.arg <- ifelse(obj$vc.fix$rho, obj$rho, NA) gamma2.arg <- ifelse(obj$vc.fix$gamma2, obj$gamma2, NA) phi.arg <- ifelse(obj$vc.fix$phi, obj$phi, NA) if (comp == "sigma2") sigma2.arg[sigma2.pos] <- val if (comp == "tau2") tau2.arg[tau2.pos] <- val if (comp == "rho") rho.arg[rho.pos] <- val if (comp == "gamma2") gamma2.arg[gamma2.pos] <- val if (comp == "phi") phi.arg[phi.pos] <- val obj$control$hessian <- FALSE res <- try(suppressWarnings( rma.mv(obj$yi, V=obj$V, W=obj$W, mods=obj$X, random=obj$random, struct=obj$struct, intercept=FALSE, data=obj$mf.r, method=obj$method, test=obj$test, dfs=obj$dfs, level=obj$level, R=obj$R, Rscale=obj$Rscale, sigma2=sigma2.arg, tau2=tau2.arg, rho=rho.arg, gamma2=gamma2.arg, phi=phi.arg, sparse=obj$sparse, dist=obj$dist, control=obj$control, outlist="minimal")), silent=TRUE) } if (profile) { if (inherits(res, "try-error")) { sav <- list(ll = NA, beta = matrix(NA, nrow=nrow(obj$beta), ncol=1), ci.lb = rep(NA, length(obj$ci.lb)), ci.ub = rep(NA, length(obj$ci.ub))) } else { sav <- list(ll = logLik(res), beta = res$beta, ci.lb = res$ci.lb, ci.ub = res$ci.ub) } } if (confint) { if (inherits(res, "try-error")) { if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective = NA", "\n"))) stop() } else { sav <- c(-2*(logLik(res) - logLik(obj)) - objective) if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective =", formatC(sav, digits=obj$digits[["fit"]], width=obj$digits[["fit"]]+4, format="f"), "\n"))) } } return(sav) } .profile.rma.mh <- function(val, obj, parallel=FALSE, subset=FALSE, outlist=NULL) { if (parallel == "snow") library(metafor) if (subset) { ### for subset, fit model to subset as specified by 'val' if (is.element(obj$measure, c("RR","OR","RD"))) { sav <- try(suppressWarnings( rma.mh(ai=obj$ai, bi=obj$bi, ci=obj$ci, di=obj$di, measure=obj$measure, add=obj$add, to=obj$to, drop00=obj$drop00, correct=obj$correct, level=obj$level, subset=val, outlist=outlist)), silent=TRUE) } else { sav <- try(suppressWarnings( rma.mh(x1i=obj$x1i, x2i=obj$x2i, t1i=obj$t1i, t2i=obj$t2i, measure=obj$measure, add=obj$add, to=obj$to, drop00=obj$drop00, correct=obj$correct, level=obj$level, subset=val, outlist=outlist)), silent=TRUE) } } return(sav) } .profile.rma.peto <- function(val, obj, parallel=FALSE, subset=FALSE, outlist=NULL) { if (parallel == "snow") library(metafor) if (subset) { ### for subset, fit model to subset as specified by 'val' sav <- try(suppressWarnings( rma.peto(ai=obj$ai, bi=obj$bi, ci=obj$ci, di=obj$di, add=obj$add, to=obj$to, drop00=obj$drop00, level=obj$level, subset=val, outlist=outlist)), silent=TRUE) } return(sav) } .profile.rma.uni.selmodel <- function(val, obj, comp, delta.pos, parallel=FALSE, profile=FALSE, confint=FALSE, subset=FALSE, objective, verbose=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (parallel == "snow") library(metafor) if (profile || confint) { ### for profile and confint, fit model with component fixed to 'val' ### set any fixed components to their values tau2.arg <- ifelse(is.element(obj$method, c("FE","EE","CE")) || obj$tau2.fix, obj$tau2, NA) delta.arg <- ifelse(obj$delta.fix, obj$delta, NA) if (comp == "tau2") tau2.arg <- val if (comp == "delta") delta.arg[delta.pos] <- val ### reset steps to NA if !stepsspec (some types set steps=0 if steps was not specified) if (!obj$stepsspec) obj$steps <- NA res <- try(suppressWarnings( selmodel(obj, obj$type, alternative=obj$alternative, prec=obj$prec, scaleprec=obj$scaleprec, tau2=tau2.arg, delta=delta.arg, steps=obj$steps, verbose=FALSE, control=obj$control, skiphes=confint, skiphet=TRUE, defmap=obj$defmap, mapfun=obj$mapfun, mapinvfun=obj$mapinvfun)), silent=TRUE) } if (profile) { if (inherits(res, "try-error")) { sav <- list(ll = NA, beta = matrix(NA, nrow=nrow(obj$beta), ncol=1), ci.lb = rep(NA, length(obj$ci.lb)), ci.ub = rep(NA, length(obj$ci.ub))) } else { sav <- list(ll = logLik(res), beta = res$beta, ci.lb = res$ci.lb, ci.ub = res$ci.ub) } } if (confint) { if (inherits(res, "try-error")) { if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective = NA", "\n"))) stop() } else { sav <- c(-2*(logLik(res) - logLik(obj)) - objective) if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective =", formatC(sav, digits=obj$digits[["fit"]], width=obj$digits[["fit"]]+4, format="f"), "\n"))) } } return(sav) } .profile.rma.ls <- function(val, obj, comp, alpha.pos, parallel=FALSE, profile=FALSE, confint=FALSE, subset=FALSE, objective, verbose=FALSE) { mstyle <- .get.mstyle("crayon" %in% .packages()) if (parallel == "snow") library(metafor) if (profile || confint) { ### for profile and confint, fit model with component fixed to 'val' ### set any fixed components to their values alpha.arg <- ifelse(obj$alpha.fix, obj$alpha, NA) if (comp == "alpha") alpha.arg[alpha.pos] <- val res <- try(suppressWarnings( rma.uni(obj$yi, obj$vi, weights=obj$weights, mods=obj$X, intercept=FALSE, scale=obj$Z, link=obj$link, method=obj$method, weighted=obj$weighted, test=obj$test, level=obj$level, control=obj$control, skiphes=TRUE, alpha=alpha.arg, outlist="minimal")), silent=TRUE) } if (profile) { if (inherits(res, "try-error")) { sav <- list(ll = NA, beta = matrix(NA, nrow=nrow(obj$beta), ncol=1), ci.lb = rep(NA, length(obj$ci.lb)), ci.ub = rep(NA, length(obj$ci.ub))) } else { sav <- list(ll = logLik(res), beta = res$beta, ci.lb = res$ci.lb, ci.ub = res$ci.ub) } } if (confint) { if (inherits(res, "try-error")) { if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective = NA", "\n"))) stop() } else { sav <- c(-2*(logLik(res) - logLik(obj)) - objective) if (verbose) cat(mstyle$verbose(paste("val =", formatC(val, digits=obj$digits[["var"]], width=obj$digits[["var"]]+4, format="f"), " LRT - objective =", formatC(sav, digits=obj$digits[["fit"]], width=obj$digits[["fit"]]+4, format="f"), "\n"))) } } return(sav) } metafor/R/rstudent.rma.mv.r0000644000176200001440000001446014027645414015356 0ustar liggesusersrstudent.rma.mv <- function(model, digits, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mv", notav="robust.rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } misscluster <- ifelse(missing(cluster), TRUE, FALSE) if (misscluster) cluster <- seq_len(x$k.all) ddd <- list(...) .chkdots(ddd, c("time", "LB")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### process cluster variable # note: cluster variable is assumed to be of the same length as the size of # the original dataset passed to the model fitting function and so we apply # the same subsetting and removing of missings (if necessary) as was done # during model fitting if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster.f <- cluster cluster <- cluster[x$not.na] ### checks on cluster variable if (anyNA(cluster.f)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster.f) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) if (length(cluster) != x$k) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not match length of data (", x$k, ")."))) ### cluster ids and number of clusters ids <- unique(cluster) n <- length(ids) ######################################################################### if (parallel == "no") res <- pbapply::pblapply(seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) if (parallel == "multicore") res <- pbapply::pblapply(seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, cl=ncpus) #res <- parallel::mclapply(seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) #res <- parallel::clusterApplyLB(cl, seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) } else { res <- pbapply::pblapply(seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, cl=cl) #res <- parallel::parLapply(cl, seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) #res <- parallel::clusterApply(cl, seq_len(n), .rstudent.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) } } delresid <- rep(NA_real_, x$k) sedelresid <- rep(NA_real_, x$k) pos <- unlist(sapply(res, function(x) x$pos)) delresid[pos] <- unlist(sapply(res, function(x) x$delresid)) sedelresid[pos] <- unlist(sapply(res, function(x) x$sedelresid)) X2 <- sapply(res, function(x) x$X2) k.id <- sapply(res, function(x) x$k.id) ######################################################################### delresid[abs(delresid) < 100 * .Machine$double.eps] <- 0 resid <- rep(NA_real_, x$k.f) seresid <- rep(NA_real_, x$k.f) resid[x$not.na] <- delresid seresid[x$not.na] <- sedelresid stresid <- resid / seresid ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na], se=seresid[x$not.na], z=stresid[x$not.na]) if (!misscluster) out$cluster <- cluster.f[x$not.na] out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) if (!misscluster) out$cluster <- cluster.f out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } if (misscluster) { out$digits <- digits class(out) <- "list.rma" return(out) } else { out <- list(out) if (na.act == "na.omit") { out[[2]] <- list(X2=X2[order(ids)], k=k.id[order(ids)], slab=ids[order(ids)]) } if (na.act == "na.exclude" || na.act == "na.pass") { ids.f <- unique(cluster.f) X2.f <- rep(NA_real_, length(ids.f)) X2.f[match(ids, ids.f)] <- X2 k.id.f <- sapply(ids.f, function(id) sum((id == cluster.f) & x$not.na)) out[[2]] <- list(X2=X2.f[order(ids.f)], k=k.id.f[order(ids.f)], slab=ids.f[order(ids.f)]) } out[[1]]$digits <- digits out[[2]]$digits <- digits names(out) <- c("obs", "cluster") class(out[[1]]) <- "list.rma" class(out[[2]]) <- "list.rma" attr(out[[1]], ".rmspace") <- TRUE attr(out[[2]], ".rmspace") <- TRUE return(out) } } metafor/R/funnel.default.r0000644000176200001440000005077614054724766015243 0ustar liggesusersfunnel.default <- function(x, vi, sei, ni, subset, yaxis="sei", xlim, ylim, xlab, ylab, steps=5, at, atransf, targs, digits, level=95, back="lightgray", shade="white", hlines="white", refline=0, lty=3, pch=19, col, bg, label=FALSE, offset=0.4, legend=FALSE, ci.res=1000, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(subset)) subset <- NULL yaxis <- match.arg(yaxis, c("sei", "vi", "seinv", "vinv", "ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi")) if (missing(atransf)) atransf <- FALSE atransf.char <- deparse(substitute(atransf)) yi <- x k <- length(yi) ### check if sample size information is available if plotting (some function of) of the the sample sizes on the y-axis if (missing(ni)) ni <- NULL if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni"))) { if (is.null(ni) && !is.null(attr(yi, "ni"))) ni <- attr(yi, "ni") if (!is.null(ni) && length(ni) != k) stop(mstyle$stop("Sample size information not of same length as data.")) if (is.null(ni)) stop(mstyle$stop("No sample size information available.")) } ### check if sampling variances and/or standard errors are available if (missing(vi)) vi <- NULL if (missing(sei)) sei <- NULL if (is.null(vi)) { if (!is.null(sei)) vi <- sei^2 } if (is.null(sei)) { if (!is.null(vi)) sei <- sqrt(vi) } if (is.element(yaxis, c("sei", "vi", "seinv", "vinv", "wi"))) { if (is.null(vi)) stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) if (length(vi) != k) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) } ### set negative variances and/or standard errors to 0 if (!is.null(vi)) vi[vi < 0] <- 0 if (!is.null(sei)) sei[sei < 0] <- 0 ### get slab from attributes of yi; if not available or it doesn't have the right length, set slab <- 1:k slab <- attr(yi, "slab") if (is.null(slab) || length(slab) != k) slab <- seq_along(yi) ### set y-axis label if not specified if (missing(ylab)) { if (yaxis == "sei") ylab <- "Standard Error" if (yaxis == "vi") ylab <- "Variance" if (yaxis == "seinv") ylab <- "Inverse Standard Error" if (yaxis == "vinv") ylab <- "Inverse Variance" if (yaxis == "ni") ylab <- "Sample Size" if (yaxis == "ninv") ylab <- "Inverse Sample Size" if (yaxis == "sqrtni") ylab <- "Square Root Sample Size" if (yaxis == "sqrtninv") ylab <- "Inverse Square Root Sample Size" if (yaxis == "lni") ylab <- "Log Sample Size" if (yaxis == "wi") ylab <- "Weight (in %)" } if (missing(at)) at <- NULL if (missing(targs)) targs <- NULL ### default number of digits (if not specified) if (missing(digits)) { if (yaxis == "sei") digits <- c(2L,3L) if (yaxis == "vi") digits <- c(2L,3L) if (yaxis == "seinv") digits <- c(2L,3L) if (yaxis == "vinv") digits <- c(2L,3L) if (yaxis == "ni") digits <- c(2L,0L) if (yaxis == "ninv") digits <- c(2L,3L) if (yaxis == "sqrtni") digits <- c(2L,3L) if (yaxis == "sqrtninv") digits <- c(2L,3L) if (yaxis == "lni") digits <- c(2L,3L) if (yaxis == "wi") digits <- c(2L,2L) } else { if (length(digits) == 1L) ### digits[1] for x-axis labels digits <- c(digits,digits) ### digits[2] for y-axis labels } ### note: digits can also be a list (e.g., digits=list(2L,3)); trailing 0's are dropped for intergers if (length(lty) == 1L) lty <- rep(lty, 2) ### 1st value = funnel lines, 2nd value = reference line if (length(pch) == 1L) { pch.vec <- FALSE pch <- rep(pch, k) } else { pch.vec <- TRUE } if (length(pch) != k) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the number of outcomes (", k, ")."))) if (missing(col)) col <- "black" if (length(col) == 1L) { col.vec <- FALSE col <- rep(col, k) } else { col.vec <- TRUE } if (length(col) != k) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the number of outcomes (", k, ")."))) if (missing(bg)) bg <- "white" if (length(bg) == 1L) { bg.vec <- FALSE bg <- rep(bg, k) } else { bg.vec <- TRUE } if (length(bg) != k) stop(mstyle$stop(paste0("Length of the 'bg' argument (", length(bg), ") does not correspond to the number of outcomes (", k, ")."))) if (length(label) != 1L) stop(mstyle$stop("Argument 'label' should be of length 1.")) ddd <- list(...) lplot <- function(..., refline2, level2, lty2) plot(...) labline <- function(..., refline2, level2, lty2) abline(...) lsegments <- function(..., refline2, level2, lty2) segments(...) laxis <- function(..., refline2, level2, lty2) axis(...) lpolygon <- function(..., refline2, level2, lty2) polygon(...) llines <- function(..., refline2, level2, lty2) lines(...) lpoints <- function(..., refline2, level2, lty2) points(...) lrect <- function(..., refline2, level2, lty2) rect(...) ltext <- function(..., refline2, level2, lty2) text(...) ### refline2, level2, and lty2 for adding a second reference line / funnel if (!is.null(ddd$refline2)) { refline2 <- ddd$refline2 } else { refline2 <- NULL } if (!is.null(ddd$level2)) { level2 <- ddd$level2 } else { level2 <- 95 } if (!is.null(ddd$lty2)) { lty2 <- ddd$lty2 } else { lty2 <- 3 } ######################################################################### ### if a subset of studies is specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=length(yi)) yi <- yi[subset] vi <- vi[subset] sei <- sei[subset] ni <- ni[subset] slab <- slab[subset] pch <- pch[subset] col <- col[subset] bg <- bg[subset] } ### check for NAs and act accordingly has.na <- is.na(yi) | (if (is.element(yaxis, c("vi", "vinv"))) is.na(vi) else FALSE) | (if (is.element(yaxis, c("sei", "seinv"))) is.na(vi) else FALSE) | (if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni"))) is.na(ni) else FALSE) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] sei <- sei[not.na] ni <- ni[not.na] slab <- slab[not.na] pch <- pch[not.na] col <- col[not.na] bg <- bg[not.na] } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } if (missing(xlab)) xlab <- .setlab(attr(yi, "measure"), transf.char="FALSE", atransf.char, gentype=1) ### at least two studies left? if (length(yi) < 2L) stop(mstyle$stop("Plotting terminated since k < 2.")) ### get weights if (yaxis == "wi") { if (any(vi <= 0)) stop(mstyle$stop("Cannot plot weights when there are non-positive sampling variances in the data.")) weights <- 1/vi weights <- weights / sum(weights) * 100 } ######################################################################### ### set y-axis limits if (missing(ylim)) { ### 1st ylim value is always the lowest precision (should be at the bottom of the plot) ### 2nd ylim value is always the highest precision (should be at the top of the plot) if (yaxis == "sei") ylim <- c(max(sei), 0) if (yaxis == "vi") ylim <- c(max(vi), 0) if (yaxis == "seinv") ylim <- c(min(1/sei), max(1/sei)) if (yaxis == "vinv") ylim <- c(min(1/vi), max(1/vi)) if (yaxis == "ni") ylim <- c(min(ni), max(ni)) if (yaxis == "ninv") ylim <- c(max(1/ni), min(1/ni)) if (yaxis == "sqrtni") ylim <- c(min(sqrt(ni)), max(sqrt(ni))) if (yaxis == "sqrtninv") ylim <- c(max(1/sqrt(ni)), min(1/sqrt(ni))) if (yaxis == "lni") ylim <- c(min(log(ni)), max(log(ni))) if (yaxis == "wi") ylim <- c(min(weights), max(weights)) ### infinite y-axis limits can happen with "seinv" and "vinv" when one or more sampling variances are 0 if (any(is.infinite(ylim))) stop(mstyle$stop("Setting 'ylim' automatically not possible (must set y-axis limits manually).")) } else { ### make sure that user supplied limits are in the right order if (is.element(yaxis, c("sei", "vi", "ninv", "sqrtninv"))) ylim <- c(max(ylim), min(ylim)) if (is.element(yaxis, c("seinv", "vinv", "ni", "sqrtni", "lni", "wi"))) ylim <- c(min(ylim), max(ylim)) ### make sure that user supplied limits are in the appropriate range if (is.element(yaxis, c("sei", "vi", "ni", "ninv", "sqrtni", "sqrtninv", "lni"))) { if (ylim[1] < 0 || ylim[2] < 0) stop(mstyle$stop("Both y-axis limits must be >= 0.")) } if (is.element(yaxis, c("seinv", "vinv"))) { if (ylim[1] <= 0 || ylim[2] <= 0) stop(mstyle$stop("Both y-axis limits must be > 0.")) } if (is.element(yaxis, c("wi"))) { if (ylim[1] < 0 || ylim[2] < 0) stop(mstyle$stop("Both y-axis limits must be >= 0.")) } } ######################################################################### ### set x-axis limits if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) level2 <- ifelse(level2 == 0, 1, ifelse(level2 >= 1, (100-level2)/100, ifelse(level2 > .5, 1-level2, level2))) #level <- ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level)) ### note: there may be multiple level values level.min <- min(level) ### note: smallest level is the widest CI lvals <- length(level) ### calculate the CI bounds at the bottom of the figure (for the widest CI if there are multiple) if (yaxis == "sei") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]^2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]^2) } if (yaxis == "vi") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(ylim[1]) } if (yaxis == "seinv") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]^2) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]^2) } if (yaxis == "vinv") { x.lb.bot <- refline - qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]) x.ub.bot <- refline + qnorm(level.min/2, lower.tail=FALSE) * sqrt(1/ylim[1]) } if (missing(xlim)) { xlim <- c(min(x.lb.bot,min(yi)), max(x.ub.bot,max(yi))) ### make sure x-axis not only includes widest CI, but also all yi values rxlim <- xlim[2] - xlim[1] ### calculate range of the x-axis limits xlim[1] <- xlim[1] - (rxlim * 0.10) ### subtract 10% of range from lower x-axis bound xlim[2] <- xlim[2] + (rxlim * 0.10) ### add 10% of range to upper x-axis bound } else { xlim <- sort(xlim) ### just in case the user supplies the limits in the wrong order } } if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi"))) { if (missing(xlim)) { xlim <- c(min(yi), max(yi)) rxlim <- xlim[2] - xlim[1] ### calculate range of the x-axis limits xlim[1] <- xlim[1] - (rxlim * 0.10) ### subtract 10% of range from lower x-axis bound xlim[2] <- xlim[2] + (rxlim * 0.10) ### add 10% of range to upper x-axis bound } else { xlim <- sort(xlim) ### just in case the user supplies the limits in the wrong order } } ### if user has specified 'at' argument, make sure xlim actually contains the min and max 'at' values if (!is.null(at)) { xlim[1] <- min(c(xlim[1], at), na.rm=TRUE) xlim[2] <- max(c(xlim[2], at), na.rm=TRUE) } ######################################################################### ### set up plot lplot(NA, NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, xaxt="n", yaxt="n", bty="n", ...) ### add background shading par.usr <- par("usr") lrect(par.usr[1], par.usr[3], par.usr[2], par.usr[4], col=back, border=NA, ...) ### add y-axis laxis(side=2, at=seq(from=ylim[1], to=ylim[2], length.out=steps), labels=formatC(seq(from=ylim[1], to=ylim[2], length.out=steps), digits=digits[[2]], format="f", drop0trailing=is.integer(digits[[2]])), ...) ### add horizontal lines labline(h=seq(from=ylim[1], to=ylim[2], length.out=steps), col=hlines, ...) ######################################################################### ### add CI region(s) if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { ### add a bit to the top/bottom ylim so that the CI region(s) fill out the entire figure if (yaxis == "sei") { rylim <- ylim[1] - ylim[2] ylim[1] <- ylim[1] + (rylim * 0.10) ylim[2] <- max(0, ylim[2] - (rylim * 0.10)) } if (yaxis == "vi") { rylim <- ylim[1] - ylim[2] ylim[1] <- ylim[1] + (rylim * 0.10) ylim[2] <- max(0, ylim[2] - (rylim * 0.10)) } if (yaxis == "seinv") { rylim <- ylim[2] - ylim[1] #ylim[1] <- max(.0001, ylim[1] - (rylim * 0.10)) ### not clear how much to add to bottom ylim[2] <- ylim[2] + (rylim * 0.10) } if (yaxis == "vinv") { rylim <- ylim[2] - ylim[1] #ylim[1] <- max(.0001, ylim[1] - (rylim * 0.10)) ### not clear how much to add to bottom ylim[2] <- ylim[2] + (rylim * 0.10) } yi.vals <- seq(from=ylim[1], to=ylim[2], length.out=ci.res) if (yaxis == "sei") vi.vals <- yi.vals^2 if (yaxis == "vi") vi.vals <- yi.vals if (yaxis == "seinv") vi.vals <- 1/yi.vals^2 if (yaxis == "vinv") vi.vals <- 1/yi.vals for (m in lvals:1) { ci.left <- refline - qnorm(level[m]/2, lower.tail=FALSE) * sqrt(vi.vals) ci.right <- refline + qnorm(level[m]/2, lower.tail=FALSE) * sqrt(vi.vals) lpolygon(c(ci.left,ci.right[ci.res:1]), c(yi.vals,yi.vals[ci.res:1]), border=NA, col=shade[m], ...) llines(ci.left, yi.vals, lty=lty[1], ...) llines(ci.right, yi.vals, lty=lty[1], ...) } if (!is.null(refline2)) { ci.left <- refline2 - qnorm(level2/2, lower.tail=FALSE) * sqrt(vi.vals) ci.right <- refline2 + qnorm(level2/2, lower.tail=FALSE) * sqrt(vi.vals) llines(ci.left, yi.vals, lty=lty2, ...) llines(ci.right, yi.vals, lty=lty2, ...) } } ### add vertical reference line ### use segments so that line does not extent beyond tip of CI region if (is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) lsegments(refline, ylim[1], refline, ylim[2], lty=lty[2], ...) if (is.element(yaxis, c("ni", "ninv", "sqrtni", "sqrtninv", "lni", "wi"))) labline(v=refline, lty=lty[2], ...) ######################################################################### ### add points xaxis.vals <- yi if (yaxis == "sei") yaxis.vals <- sei if (yaxis == "vi") yaxis.vals <- vi if (yaxis == "seinv") yaxis.vals <- 1/sei if (yaxis == "vinv") yaxis.vals <- 1/vi if (yaxis == "ni") yaxis.vals <- ni if (yaxis == "ninv") yaxis.vals <- 1/ni if (yaxis == "sqrtni") yaxis.vals <- sqrt(ni) if (yaxis == "sqrtninv") yaxis.vals <- 1/sqrt(ni) if (yaxis == "lni") yaxis.vals <- log(ni) if (yaxis == "wi") yaxis.vals <- weights lpoints(x=xaxis.vals, y=yaxis.vals, pch=pch, col=col, bg=bg, ...) ######################################################################### ### add L-shaped box around plot box(bty="l") ### generate x-axis positions if none are specified if (is.null(at)) { at <- axTicks(side=1) #at <- pretty(x=c(alim[1], alim[2]), n=steps-1) #at <- pretty(x=c(min(ci.lb), max(ci.ub)), n=steps-1) } else { at <- at[at > par("usr")[1]] at <- at[at < par("usr")[2]] } at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } } else { at.lab <- formatC(at.lab, digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } ### add x-axis laxis(side=1, at=at, labels=at.lab, ...) ############################################################################ ### labeling of points k <- length(yi) if (is.numeric(label) || is.character(label) || .isTRUE(label)) { if (is.numeric(label)) { label <- round(label) if (label < 0) label <- 0 if (label > k) label <- k label <- order(abs(yi - refline), decreasing=TRUE)[seq_len(label)] } else if ((is.character(label) && label == "all") || .isTRUE(label)) { label <- seq_len(k) } else if ((is.character(label) && label == "out")) { if (!is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { label <- seq_len(k) } else { label <- which(abs(yi - refline) / sqrt(vi) >= qnorm(level.min/2, lower.tail=FALSE)) } } else { label <- NULL } for (i in label) ltext(yi[i], yaxis.vals[i], slab[i], pos=ifelse(yi[i]-refline >= 0, 4, 2), offset=offset, ...) } ######################################################################### ### add legend (if requested) if (is.logical(legend) && isTRUE(legend)) lpos <- "topright" if (is.character(legend)) { lpos <- legend legend <- TRUE } if (legend && !is.element(yaxis, c("sei", "vi", "seinv", "vinv"))) { legend <- FALSE warning(mstyle$warning("Argument 'legend' only applicable if 'yaxis' is 'sei', 'vi', 'seinv', or 'vinv'."), call.=FALSE) } if (legend) { level <- c(level, 0) lvals <- length(level) add.studies <- !pch.vec && !col.vec && !bg.vec # only add 'Studies' to legend if pch, col, and bg were not vectors to begin with scipen <- options(scipen=100) lchars <- max(nchar(level))-2 options(scipen=scipen$scipen) pval1 <- NULL pval2 <- NULL phantom <- NULL ltxt <- sapply(1:lvals, function(i) { if (i == 1) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(level[i], lchars), pval2=.fcf(1, lchars))))) #return(as.expression(bquote(p > .(pval), list(pval=.fcf(level[i], lchars))))) if (i > 1 && i < lvals) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(level[i], lchars), pval2=.fcf(level[i-1], lchars))))) if (i == lvals) return(as.expression(bquote(paste(.(pval1) < p, phantom() <= .(pval2)), list(pval1=.fcf(0, lchars), pval2=.fcf(level[i-1], lchars))))) }) pch.l <- rep(22, lvals) col.l <- rep("black", lvals) pt.cex <- rep(2, lvals) pt.bg <- c(shade, back) if (add.studies) { ltxt <- c(ltxt, expression(plain(Studies))) pch.l <- c(pch.l, pch[1]) col.l <- c(col.l, col[1]) pt.cex <- c(pt.cex, 1) pt.bg <- c(pt.bg, bg[1]) } legend(lpos, inset=.01, bg="white", pch=pch.l, col=col.l, pt.cex=pt.cex, pt.bg=pt.bg, legend=ltxt) } ############################################################################ ### prepare data frame to return sav <- data.frame(x=xaxis.vals, y=yaxis.vals, slab=slab, stringsAsFactors=FALSE) invisible(sav) } metafor/R/df.residual.rma.r0000644000176200001440000000031513770363257015265 0ustar liggesusersdf.residual.rma <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") df.resid <- object$k.eff - object$p.eff return(df.resid) } metafor/R/rstandard.rma.mh.r0000644000176200001440000000273613770377025015461 0ustar liggesusersrstandard.rma.mh <- function(model, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mh") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ######################################################################### resid <- c(x$yi.f - x$beta) resid[abs(resid) < 100 * .Machine$double.eps] <- 0 #resid[abs(resid) < 100 * .Machine$double.eps * median(abs(resid), na.rm=TRUE)] <- 0 ### see lm.influence ### note: these are like Pearson (or semi-standardized) residuals seresid <- sqrt(x$vi.f) stresid <- resid / seresid ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na.yivi], se=seresid[x$not.na.yivi], z=stresid[x$not.na.yivi]) out$slab <- x$slab[x$not.na.yivi] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na.yivi)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits class(out) <- "list.rma" return(out) } metafor/R/funnel.r0000644000176200001440000000006213457322061013563 0ustar liggesusersfunnel <- function(x, ...) UseMethod("funnel") metafor/R/print.summary.rma.r0000644000176200001440000000107113770374061015710 0ustar liggesusersprint.summary.rma <- function(x, digits=x$digits, showfit=TRUE, signif.stars=getOption("show.signif.stars"), signif.legend=signif.stars, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="summary.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) ### strip summary.rma class from object (otherwise get recursion) class(x) <- class(x)[-1] ### print with showfit=TRUE print(x, digits=digits, showfit=showfit, signif.stars=signif.stars, signif.legend=signif.legend, ...) invisible() } metafor/R/hatvalues.rma.uni.r0000644000176200001440000000375313770364141015655 0ustar liggesusershatvalues.rma.uni <- function(model, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.uni", notav="rma.uni.selmodel") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix")) ######################################################################### x <- model if (x$weighted) { if (is.null(x$weights)) { W <- diag(1/(x$vi + x$tau2), nrow=x$k, ncol=x$k) stXWX <- .invcalc(X=x$X, W=W, k=x$k) H <- x$X %*% stXWX %*% crossprod(x$X,W) #H <- x$X %*% (x$vb / x$s2w) %*% crossprod(x$X,W) ### x$vb may be changed through robust() (and when test="knha") } else { A <- diag(x$weights, nrow=x$k, ncol=x$k) stXAX <- .invcalc(X=x$X, W=A, k=x$k) H <- x$X %*% stXAX %*% crossprod(x$X,A) } } else { stXX <- .invcalc(X=x$X, W=diag(x$k), k=x$k) H <- x$X %*% tcrossprod(stXX,x$X) } ######################################################################### if (type == "diagonal") { hii <- rep(NA_real_, x$k.f) hii[x$not.na] <- diag(H) hii[hii > 1 - 10 * .Machine$double.eps] <- 1 ### as in lm.influence() names(hii) <- x$slab if (na.act == "na.omit") hii <- hii[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(hii) } if (type == "matrix") { Hfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Hfull[x$not.na, x$not.na] <- H rownames(Hfull) <- x$slab colnames(Hfull) <- x$slab if (na.act == "na.omit") Hfull <- Hfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Hfull) } } metafor/R/selmodel.rma.uni.r0000644000176200001440000012720514046527222015462 0ustar liggesusersselmodel.rma.uni <- function(x, type, alternative="greater", prec, delta, steps, verbose=FALSE, digits, control, ...) { # TODO: add a H0 argument? since p-value may not be based on H0: theta_i = 0 # TODO: argument for which deltas to include in LRT (a delta may also not be constrained under H0, so it should not be included in the LRT then) mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("rma.ls", "robust.rma")) alternative <- match.arg(alternative, c("two.sided", "greater", "less")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } time.start <- proc.time() if (!x$allvipos) stop(mstyle$stop("Cannot fit selection model when one or more sampling variances are non-positive.")) if (!x$weighted || !is.null(x$weights)) stop(mstyle$stop("Cannot fit selection model for unweighted models or models with custom weights.")) if (missing(type)) stop(mstyle$stop("Must choose a specific selection model via the 'type' argument.")) type.options <- c("beta", "halfnorm", "negexp", "logistic", "power", "negexppow", "halfnorm2", "negexp2", "logistic2", "power2", "stepfun") #type <- match.arg(type, type.options) type <- type.options[grep(type, type.options)[1]] if (is.na(type)) stop(mstyle$stop("Unknown 'type' specified.")) if (missing(control)) control <- list() ### refit RE/ME models with ML estimation if (!is.element(x$method, c("FE","EE","CE","ML"))) { #stop(mstyle$stop("Argument 'x' must either be a fixed-effects model or a model fitted with ML estimation.")) #x <- try(update(x, method="ML"), silent=TRUE) #x <- suppressWarnings(update(x, method="ML")) x <- try(suppressWarnings(rma.uni(x$yi, x$vi, weights=x$weights, mods=x$X, intercept=FALSE, method="ML", weighted=x$weighted, test=x$test, level=x$level, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, skipr2=TRUE)), silent=TRUE) } ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("time", "tau2", "beta", "skiphes", "skiphet", "scaleprec", "defmap", "mapfun", "mapinvfun")) ### handle 'tau2' argument from ... if (is.null(ddd$tau2)) { if (is.element(x$method, c("FE","EE","CE"))) { tau2 <- 0 } else { if (x$tau2.fix) { tau2 <- x$tau2 } else { tau2 <- NA } } } else { tau2 <- ddd$tau2 if (!is.na(tau2)) x$tau2.fix <- TRUE } ### handle 'beta' argument from ... if (is.null(ddd$beta)) { beta <- rep(NA, x$p) betaspec <- FALSE } else { beta <- ddd$beta betaspec <- TRUE } yi <- x$yi vi <- x$vi X <- x$X p <- x$p k <- x$k ### set precision measure if (!missing(prec) && !is.null(prec)) { precspec <- TRUE prec <- match.arg(prec, c("sei", "vi", "ninv", "sqrtninv")) ### check if sample size information is available if prec is "ninv" or "sqrtninv" if (is.element(prec, c("ninv", "sqrtninv"))) { if (is.null(x$ni) || anyNA(x$ni)) stop(mstyle$stop("No sample size information stored in model object (or sample size information stored in model object contains NAs).")) } if (prec == "sei") preci <- sqrt(vi) if (prec == "vi") preci <- vi if (prec == "ninv") preci <- 1/x$ni if (prec == "sqrtninv") preci <- 1/sqrt(x$ni) if (is.null(ddd$scaleprec) || isTRUE(ddd$scaleprec)) preci <- preci / max(preci) } else { prec <- NULL precspec <- FALSE preci <- rep(1, k) } precis <- c(min = min(preci), max = max(preci), mean = mean(preci), median = median(preci)) ### compute p-values pvals <- .selmodel.pval(yi=c(yi), vi=vi, alternative=alternative) ### checks on steps argument if (missing(steps) || (length(steps) == 1L && is.na(steps))) { stepsspec <- FALSE steps <- NA } else { stepsspec <- TRUE if (anyNA(steps)) stop(mstyle$stop("No missing values allowed in 'steps' argument.")) if (any(steps < 0 | steps > 1)) stop(mstyle$stop("Value(s) specified for 'steps' argument must be between 0 and 1.")) steps <- unique(sort(steps)) if (steps[length(steps)] != 1) steps <- c(steps, 1) } ############################################################################ ### set default control parameters con <- list(verbose = FALSE, delta.init = NULL, # initial value(s) for selection model parameter(s) beta.init = NULL, # initial value(s) for fixed effect(s) tau2.init = NULL, # initial value for tau^2 delta.min = NULL, # min possible value(s) for selection model parameter(s) delta.max = NULL, # max possible value(s) for selection model parameter(s) tau2.max = Inf, # max possible value for tau^2 pval.min = NULL, # minimum p-value to intergrate over (for selection models where this matters) optimizer = "optim", # optimizer to use ("optim", "nlminb", "uobyqa", "newuoa", "bobyqa", "nloptr", "nlm", "hjk", "nmk", "mads", "ucminf", "optimParallel") optmethod = "BFGS", # argument 'method' for optim() ("Nelder-Mead" and "BFGS" are sensible options) parallel = list(), # parallel argument for optimParallel() (note: 'cl' argument in parallel is not passed; this is directly specified via 'cl') cl = NULL, # arguments for optimParallel() ncpus = 1L, # arguments for optimParallel() beta.fix = FALSE, # fix beta in Hessian computation tau2.fix = FALSE, # fix tau2 in Hessian computation delta.fix = FALSE, # fix delta in Hessian computation htransf = FALSE, # FALSE/TRUE: Hessian is computed for the untransformed/transformed delta and tau^2 estimates hessianCtrl=list(r=6), # arguments passed on to 'method.args' of hessian() scaleX = !betaspec) # whether non-dummy variables in the X matrix should be rescaled before model fitting ### replace defaults with any user-defined values con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose optimizer <- match.arg(con$optimizer, c("optim","nlminb","uobyqa","newuoa","bobyqa","nloptr","nlm","hjk","nmk","mads","ucminf","optimParallel")) optmethod <- match.arg(con$optmethod, c("Nelder-Mead","BFGS","CG","L-BFGS-B","SANN","Brent")) parallel <- con$parallel cl <- con$cl ncpus <- con$ncpus optcontrol <- control[is.na(con.pos)] ### get arguments that are control arguments for optimizer optcontrol$intCtrl <- NULL # but remove intCtrl from this list if (length(optcontrol) == 0L) optcontrol <- list() pos.intCtrl <- pmatch(names(control), "intCtrl", nomatch=0) if (sum(pos.intCtrl) > 0) { intCtrl <- control[[which(pos.intCtrl == 1)]] } else { intCtrl <- list() } con.pos <- pmatch(names(intCtrl), "lower", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "lower" } else { intCtrl$lower <- -Inf } con.pos <- pmatch(names(intCtrl), "upper", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "upper" } else { intCtrl$upper <- Inf } con.pos <- pmatch(names(intCtrl), "subdivisions", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "subdivisions" } else { intCtrl$subdivisions <- 100L } con.pos <- pmatch(names(intCtrl), "rel.tol", nomatch=0) if (sum(con.pos) > 0) { names(intCtrl)[which(con.pos == 1)] <- "rel.tol" } else { intCtrl$rel.tol <- .Machine$double.eps^0.25 } ### if control argument 'ncpus' is larger than 1, automatically switch to optimParallel optimizer if (ncpus > 1L) optimizer <- "optimParallel" ### rescale X matrix (only for models with moderators and models including an intercept term) if (!x$int.only && x$int.incl && con$scaleX) { Xsave <- X meanX <- colMeans(X[, 2:p, drop=FALSE]) sdX <- apply(X[, 2:p, drop=FALSE], 2, sd) ### consider using colSds() from matrixStats package is.d <- apply(X, 2, .is.dummy) ### is each column a dummy variable (i.e., only 0s and 1s)? mX <- rbind(c(intrcpt=1, -1*ifelse(is.d[-1], 0, meanX/sdX)), cbind(0, diag(ifelse(is.d[-1], 1, 1/sdX), nrow=length(is.d)-1, ncol=length(is.d)-1))) X[,!is.d] <- apply(X[, !is.d, drop=FALSE], 2, scale) ### rescale the non-dummy variables } ### initial value(s) for beta if (is.null(con$beta.init)) { beta.init <- c(x$beta) } else { if (length(con$beta.init) != p) stop(mstyle$stop(paste0("Length of 'beta.init' argument (", length(con$beta.init), ") does not match actual number of parameters (", p, ")."))) beta.init <- con$beta.init } if (!x$int.only && x$int.incl && con$scaleX) { imX <- try(suppressWarnings(solve(mX)), silent=TRUE) if (inherits(imX, "try-error")) stop(mstyle$stop(paste0("Unable to rescale starting values for fixed effects."))) beta.init <- c(imX %*% cbind(beta.init)) } ### check that tau2.max is larger than the tau^2 value if (x$tau2 >= con$tau2.max) stop(mstyle$stop("Value of 'tau2.max' must be > tau^2 value.")) tau2.max <- con$tau2.max ### initial value for tau^2 if (is.null(con$tau2.init)) { tau2.init <- log(x$tau2 + 0.001) } else { if (length(con$tau2.init) != 1L) stop(mstyle$stop(paste0("Argument 'tau2.init' should specify a single value."))) if (con$tau2.init <= 0) stop(mstyle$stop("Value of 'tau2.init' must be > 0.")) if (con$tau2.init >= tau2.max) stop(mstyle$stop("Value of 'tau2.init' must be < 'tau2.max'.")) tau2.init <- log(con$tau2.init) } ### set NLOPT_LN_BOBYQA as the default algorithm for nloptr optimizer ### and by default use a relative convergence criterion of 1e-8 on the function value if (optimizer=="nloptr" && !is.element("algorithm", names(optcontrol))) optcontrol$algorithm <- "NLOPT_LN_BOBYQA" if (optimizer=="nloptr" && !is.element("ftol_rel", names(optcontrol))) optcontrol$ftol_rel <- 1e-8 ### for mads, set trace=FALSE and tol=1e-6 by default if (optimizer=="mads" && !is.element("trace", names(optcontrol))) optcontrol$trace <- FALSE if (optimizer=="mads" && !is.element("tol", names(optcontrol))) optcontrol$tol <- 1e-6 ### check that the required packages are installed if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { if (!requireNamespace("minqa", quietly=TRUE)) stop(mstyle$stop("Please install the 'minqa' package to use this optimizer.")) } if (optimizer == "nloptr") { if (!requireNamespace("nloptr", quietly=TRUE)) stop(mstyle$stop("Please install the 'nloptr' package to use this optimizer.")) } if (is.element(optimizer, c("hjk","nmk","mads"))) { if (!requireNamespace("dfoptim", quietly=TRUE)) stop(mstyle$stop("Please install the 'dfoptim' package to use this optimizer.")) } if (optimizer == "ucminf") { if (!requireNamespace("ucminf", quietly=TRUE)) stop(mstyle$stop("Please install the 'ucminf' package to use this optimizer.")) } if (optimizer == "optimParallel") { if (!requireNamespace("optimParallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'optimParallel' package to use this optimizer.")) } if (!isTRUE(ddd$skiphes) && !requireNamespace("numDeriv", quietly=TRUE)) stop(mstyle$stop("Please install the 'numDeriv' package to compute the Hessian.")) ############################################################################ if (type == "beta") { if (stepsspec) warning(mstyle$warning("Argument 'steps' ignored (not applicable to this type of selection model)."), call.=FALSE) if (precspec) warning(mstyle$warning("Argument 'prec' ignored (not applicable to this type of selection model)."), call.=FALSE) deltas <- 2L delta.transf.fun <- c("exp", "exp") delta.transf.fun.inv <- c("log", "log") delta.lb <- c(0, 0) delta.ub <- c(Inf, Inf) delta.lb.excl <- c(TRUE, TRUE) delta.ub.excl <- c(FALSE, FALSE) delta.init <- c(1, 1) delta.min <- c(1e-05, 1e-05) delta.max <- c(100, 100) H0.delta <- c(1, 1) delta.LRT <- c(TRUE, TRUE) pval.min <- 1e-5 wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) x^(delta[1]-1) * (1-x)^(delta[2]-1) .selmodel.ll <- ".selmodel.ll.cont" } if (is.element(type, c("halfnorm", "negexp", "logistic", "power"))) { if (stepsspec) { if (length(steps) != 2L) # steps should be c(alpha,1) stop(mstyle$stop("Can only specify a single value for the 'steps' argument for this type of selection model.")) } else { steps <- 0 } deltas <- 1L delta.transf.fun <- "exp" delta.transf.fun.inv <- "log" delta.lb <- 0 delta.ub <- Inf delta.lb.excl <- FALSE delta.ub.excl <- FALSE delta.init <- 1 delta.min <- 0 delta.max <- 100 H0.delta <- 0 delta.LRT <- TRUE if (type == "power") { pval.min <- 1e-5 } else { pval.min <- 0 } if (type == "halfnorm") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, exp(-delta * preci * x^2) / exp(-delta * preci * steps[1]^2)) } if (type == "negexp") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, exp(-delta * preci * x) / exp(-delta * preci * steps[1])) } if (type == "logistic") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (2 * exp(-delta * preci * x) / (1 + exp(-delta * preci * x))) / (2 * exp(-delta * preci * steps[1]) / (1 + exp(-delta * preci * steps[1])))) } if (type == "power") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (1-x)^(preci*delta) / (1-steps[1])^(preci*delta)) } .selmodel.ll <- ".selmodel.ll.cont" } if (type == "negexppow") { if (stepsspec) { if (length(steps) != 2L) # steps should be c(alpha,1) stop(mstyle$stop("Can only specify a single value for the 'steps' argument for this type of selection model.")) } else { steps <- 0 } deltas <- 2L delta.transf.fun <- c("exp", "exp") delta.transf.fun.inv <- c("log", "log") delta.lb <- c(0, 0) delta.ub <- c(Inf, Inf) delta.lb.excl <- c(FALSE, FALSE) delta.ub.excl <- c(FALSE, FALSE) delta.init <- c(1, 1) delta.min <- c(0, 0) delta.max <- c(100, 100) H0.delta <- c(0, 0) delta.LRT <- c(TRUE, TRUE) pval.min <- 0 wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, exp(-delta[1] * preci * x^(1/delta[2])) / exp(-delta[1] * preci * steps[1]^(1/delta[2]))) .selmodel.ll <- ".selmodel.ll.cont" } if (is.element(type, c("halfnorm2", "negexp2", "logistic2", "power2"))) { if (stepsspec) { if (length(steps) != 2L) # steps should be c(alpha,1) stop(mstyle$stop("Can only specify a single value for the 'steps' argument for this type of selection model.")) } else { steps <- 0 } deltas <- 2L delta.transf.fun <- c("exp", "exp") delta.transf.fun.inv <- c("log", "log") delta.lb <- c(0,0) delta.ub <- c(Inf, Inf) delta.lb.excl <- c(FALSE, FALSE) delta.ub.excl <- c(FALSE, FALSE) delta.init <- c(1, 0.25) delta.min <- c(0, 0) delta.max <- c(100, 100) H0.delta <- c(0, 0) delta.LRT <- c(TRUE, TRUE) pval.min <- 0 if (type == "halfnorm2") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (delta[1] + exp(-delta[2] * preci * x^2) / exp(-delta[2] * preci * steps[1]^2)) / (1 + delta[1])) } if (type == "negexp2") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (delta[1] + exp(-delta[2] * preci * x) / exp(-delta[2] * preci * steps[1])) / (1 + delta[1])) } if (type == "logistic2") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (delta[1] + (2 * exp(-delta[2] * preci * x) / (1 + exp(-delta[2] * preci * x))) / (2 * exp(-delta[2] * preci * steps[1]) / (1 + exp(-delta[2] * preci * steps[1])))) / (1 + delta[1])) } if (type == "power2") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) ifelse(x <= steps[1], 1, (delta[1] + (1-x)^(preci*delta[2]) / (1-steps[1])^(preci*delta[2])) / (1 + delta[1])) } .selmodel.ll <- ".selmodel.ll.cont" } if (type == "stepfun") { if (!stepsspec) stop(mstyle$stop("Must specify the 'steps' argument for this type of selection model.")) if (precspec) warning(mstyle$warning("Adding a precision measure to this selection model is undocumented and experimental."), call.=FALSE) deltas <- length(steps) delta.transf.fun <- rep("exp", deltas) delta.transf.fun.inv <- rep("log", deltas) delta.lb <- rep(0, deltas) delta.ub <- rep(Inf, deltas) delta.lb.excl <- rep(FALSE, deltas) delta.ub.excl <- rep(FALSE, deltas) delta.init <- rep(1, deltas) delta.min <- rep(0, deltas) delta.max <- rep(100, deltas) H0.delta <- rep(1, deltas) delta.LRT <- rep(TRUE, deltas) # note: delta[1] should actually not be included, but this gets constrained to 1 below anyway pval.min <- 0 if (type == "stepfun") { wi.fun <- function(x, delta, yi, vi, preci, alternative, steps) delta[sapply(x, function(p) which(p <= steps)[1])] / preci } .selmodel.ll <- ".selmodel.ll.stepfun" } ############################################################################ ### checks on delta, delta.init, delta.min, delta.max if (missing(delta)) { delta <- rep(NA, deltas) } else { if (length(delta) == 1L) delta <- rep(delta, deltas) if (length(delta) != deltas) stop(mstyle$stop(paste0("Argument 'delta' should be of length ", deltas, " for this type of selection model."))) for (j in seq_len(deltas)) { if (delta.lb.excl[j] && isTRUE(delta[j] <= delta.lb[j])) stop(mstyle$stop(paste0("Value of 'delta[", j, "]' must be > ", delta.lb[j], " for this type of selection model."))) if (!delta.lb.excl[j] && isTRUE(delta[j] < delta.lb[j])) stop(mstyle$stop(paste0("Value of 'delta[", j, "]' must be >= ", delta.lb[j], " for this type of selection model."))) } for (j in seq_len(deltas)) { if (delta.ub.excl[j] && isTRUE(delta[j] >= delta.ub[j])) stop(mstyle$stop(paste0("Value of 'delta[", j, "]' must be < ", delta.ub[j], " for this type of selection model."))) if (!delta.ub.excl[j] && isTRUE(delta[j] > delta.ub[j])) stop(mstyle$stop(paste0("Value of 'delta[", j, "]' must be <= ", delta.ub[j], " for this type of selection model."))) } } if (type == "stepfun" && is.na(delta[1])) delta[1] <- 1 if (!is.null(con$delta.min)) delta.min <- con$delta.min if (length(delta.min) == 1L) delta.min <- rep(delta.min, deltas) if (length(delta.min) != deltas) stop(mstyle$stop(paste0("Argument 'delta.min' should be of length ", deltas, " for this type of selection model."))) if (anyNA(delta.min)) stop(mstyle$stop("No missing values allowed in 'delta.min'.")) for (j in seq_len(deltas)) { if (delta.lb.excl[j] && delta.min[j] <= delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.min[", j, "]' must be > ", delta.lb[j], " for this type of selection model."))) if (!delta.lb.excl[j] && delta.min[j] < delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.min[", j, "]' must be >= ", delta.lb[j], " for this type of selection model."))) } for (j in seq_len(deltas)) { if (delta.ub.excl[j] && delta.min[j] >= delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.min[", j, "]' must be < ", delta.ub[j], " for this type of selection model."))) if (!delta.ub.excl[j] && delta.min[j] > delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.min[", j, "]' must be <= ", delta.ub[j], " for this type of selection model."))) } delta.min <- ifelse(!is.na(delta) & delta.min > delta, delta - .Machine$double.eps^0.2, delta.min) if (!is.null(con$delta.max)) delta.max <- con$delta.max if (length(delta.max) == 1L) delta.max <- rep(delta.max, deltas) if (length(delta.max) != deltas) stop(mstyle$stop(paste0("Argument 'delta.max' should be of length ", deltas, " for this type of selection model."))) if (anyNA(delta.max)) stop(mstyle$stop("No missing values allowed in 'delta.max'.")) for (j in seq_len(deltas)) { if (delta.lb.excl[j] && delta.max[j] <= delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.max[", j, "]' must be > ", delta.lb[j], " for this type of selection model."))) if (!delta.lb.excl[j] && delta.max[j] < delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.max[", j, "]' must be >= ", delta.lb[j], " for this type of selection model."))) } for (j in seq_len(deltas)) { if (delta.ub.excl[j] && delta.max[j] >= delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.max[", j, "]' must be < ", delta.ub[j], " for this type of selection model."))) if (!delta.ub.excl[j] && delta.max[j] > delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.max[", j, "]' must be <= ", delta.ub[j], " for this type of selection model."))) } if (any(delta.max < delta.min)) stop(mstyle$stop("Value(s) of 'delta.max' must be >= value(s) of 'delta.min'.")) delta.max <- ifelse(!is.na(delta) & delta.max < delta, delta + .Machine$double.eps^0.2, delta.max) if (!is.null(con$delta.init)) delta.init <- con$delta.init if (length(delta.init) == 1L) delta.init <- rep(delta.init, deltas) if (length(delta.init) != deltas) stop(mstyle$stop(paste0("Argument 'delta.init' should be of length ", deltas, " for this type of selection model."))) if (anyNA(delta.init)) stop(mstyle$stop("No missing values allowed in 'delta.init'.")) for (j in seq_len(deltas)) { if (delta.lb.excl[j] && delta.init[j] <= delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.init[", j, "]' must be > ", delta.lb[j], " for this type of selection model."))) if (!delta.lb.excl[j] && delta.init[j] < delta.lb[j]) stop(mstyle$stop(paste0("Value of 'delta.init[", j, "]' must be >= ", delta.lb[j], " for this type of selection model."))) } for (j in seq_len(deltas)) { if (delta.ub.excl[j] && delta.init[j] >= delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.init[", j, "]' must be < ", delta.ub[j], " for this type of selection model."))) if (!delta.ub.excl[j] && delta.init[j] > delta.ub[j]) stop(mstyle$stop(paste0("Value of 'delta.init[", j, "]' must be <= ", delta.ub[j], " for this type of selection model."))) } delta.init <- ifelse(!is.na(delta), delta, delta.init) if (.isTRUE(ddd$defmap) || any(is.infinite(delta.max))) { ddd$mapfun <- delta.transf.fun ddd$mapinvfun <- delta.transf.fun.inv } if (is.null(ddd$mapfun)) { mapfun <- rep(NA, deltas) } else { if (length(ddd$mapfun) == 1L) { mapfun <- rep(ddd$mapfun, deltas) } else { mapfun <- ddd$mapfun } } if (is.null(ddd$mapinvfun)) { mapinvfun <- rep(NA, deltas) } else { if (length(ddd$mapinvfun) == 1L) { mapinvfun <- rep(ddd$mapinvfun, deltas) } else { mapinvfun <- ddd$mapinvfun } } delta.init <- mapply(.mapinvfun, delta.init, delta.min, delta.max, mapinvfun) if (!is.null(con$pval.min)) pval.min <- con$pval.min if (k < p + ifelse(is.element(x$method, c("FE","EE","CE")) || x$tau2.fix, 0, 1) + sum(is.na(delta))) stop(mstyle$stop("Number of studies (k=", k, ") is too small to fit the selection model.")) ############################################################################ pvals[pvals < pval.min] <- pval.min pvals[pvals > (1-pval.min)] <- 1-pval.min if (stepsspec) { pgrp <- sapply(pvals, function(p) which(p <= steps)[1]) psteps.l <- as.character(c(0,steps[-length(steps)])) psteps.r <- as.character(steps) len.l <- nchar(psteps.l) pad.l <- sapply(max(len.l) - len.l, function(x) paste0(rep(" ", x), collapse="")) psteps.l <- paste0(psteps.l, pad.l) psteps <- paste0(psteps.l, " < p <= ", psteps.r) ptable <- table(factor(pgrp, levels=1:length(steps), labels=psteps)) ptable <- data.frame(k=as.vector(ptable), row.names=names(ptable)) if (any(ptable[["k"]] == 0L)) { if (verbose >= 1) print(ptable) if (type == "stepfun" && (any(ptable[["k"]] & is.na(delta)))) stop(mstyle$stop(paste0("One or more intervals do not contain any observed p-values", if (!verbose) " (use 'verbose=TRUE' to see which)", "."))) if (type != "stepfun" && any(ptable[["k"]])) stop(mstyle$stop(paste0("One of the intervals does not contain any observed p-values", if (!verbose) " (use 'verbose=TRUE' to see which)", "."))) } } else { pgrp <- NA ptable <- NA } ############################################################################ if (optimizer=="optim") { par.arg <- "par" ctrl.arg <- ", control=optcontrol" } if (optimizer=="nlminb") { par.arg <- "start" ctrl.arg <- ", control=optcontrol" } if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { par.arg <- "par" optimizer <- paste0("minqa::", optimizer) ### need to use this since loading nloptr masks bobyqa() and newuoa() functions ctrl.arg <- ", control=optcontrol" } if (optimizer=="nloptr") { par.arg <- "x0" optimizer <- paste0("nloptr::nloptr") ### need to use this due to requireNamespace() ctrl.arg <- ", opts=optcontrol" } if (optimizer=="nlm") { par.arg <- "p" ### because of this, must use argument name pX for p (number of columns in X matrix) ctrl.arg <- paste(names(optcontrol), unlist(optcontrol), sep="=", collapse=", ") if (nchar(ctrl.arg) != 0L) ctrl.arg <- paste0(", ", ctrl.arg) } if (is.element(optimizer, c("hjk","nmk","mads"))) { par.arg <- "par" optimizer <- paste0("dfoptim::", optimizer) ### need to use this so that the optimizers can be found ctrl.arg <- ", control=optcontrol" } if (optimizer=="ucminf") { par.arg <- "par" optimizer <- paste0("ucminf::ucminf") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol" } if (optimizer=="optimParallel") { par.arg <- "par" optimizer <- paste0("optimParallel::optimParallel") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol, parallel=parallel" parallel$cl <- NULL if (is.null(cl)) { ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Control argument 'ncpus' must be >= 1.")) cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } else { if (!inherits(cl, "SOCKcluster")) stop(mstyle$stop("Specified cluster is not of class 'SOCKcluster'.")) } parallel$cl <- cl if (is.null(parallel$forward)) parallel$forward <- FALSE if (is.null(parallel$loginfo)) { if (verbose) { parallel$loginfo <- TRUE } else { parallel$loginfo <- FALSE } } } # note: X.fit due to hessian(); pX due to nlm() optcall <- paste(optimizer, "(", par.arg, "=c(beta.init, tau2.init, delta.init), ", .selmodel.ll, ", ", ifelse(optimizer=="optim", "method=optmethod, ", ""), "yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta, delta.transf=TRUE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=tau2, tau2.transf=TRUE, tau2.max=tau2.max, beta.val=beta, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=verbose, digits=digits", ctrl.arg, ")\n", sep="") if (verbose > 1) message(mstyle$message("\nModel fitting ...\n")) #return(optcall) if (verbose) { opt.res <- try(eval(parse(text=optcall)), silent=!verbose) } else { opt.res <- try(suppressWarnings(eval(parse(text=optcall))), silent=!verbose) } #return(opt.res) if (optimizer == "optimParallel::optimParallel" && verbose) { tmp <- capture.output(print(opt.res$loginfo)) .print.output(tmp, mstyle$verbose) } if (inherits(opt.res, "try-error")) stop(mstyle$stop("Error during the optimization. Use verbose=TRUE and see help(selmodel) for more details on the optimization routines.")) ### convergence checks if (is.element(optimizer, c("optim","nlminb","dfoptim::hjk","dfoptim::nmk","optimParallel::optimParallel")) && opt.res$convergence != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("dfoptim::mads")) && opt.res$convergence > optcontrol$tol) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("minqa::uobyqa","minqa::newuoa","minqa::bobyqa")) && opt.res$ierr != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (ierr = ", opt.res$ierr, ")."))) if (optimizer=="nloptr::nloptr" && !(opt.res$status >= 1 && opt.res$status <= 4)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (status = ", opt.res$status, ")."))) if (optimizer=="ucminf::ucminf" && !(opt.res$convergence == 1 || opt.res$convergence == 2)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (verbose > 2) { cat("\n") tmp <- capture.output(print(opt.res)) .print.output(tmp, mstyle$verbose) } ### copy estimated values to 'par' so code below works if (optimizer=="nloptr::nloptr") opt.res$par <- opt.res$solution if (optimizer=="nlm") opt.res$par <- opt.res$estimate ### estimates/values of tau2 and delta on the transformed scale tau2.transf <- opt.res$par[p+1] delta.transf <- opt.res$par[(p+2):(p+1+deltas)] ### save for Hessian computation beta.val <- beta tau2.val <- tau2 delta.val <- delta ### do the final model fit with estimated values fitcall <- paste(.selmodel.ll, "(par=opt.res$par, yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta, delta.transf=TRUE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=tau2, tau2.transf=TRUE, tau2.max=tau2.max, beta.val=beta, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=FALSE, digits=digits, dofit=TRUE)\n", sep="") #return(fitcall) fitcall <- try(eval(parse(text=fitcall)), silent=!verbose) #return(fitcall) if (inherits(fitcall, "try-error")) stop(mstyle$stop("Error during the optimization. Use verbose=TRUE and see help(selmodel) for more details on the optimization routines.")) ll <- fitcall$ll beta <- cbind(fitcall$beta) tau2 <- fitcall$tau2 delta <- fitcall$delta if (any(delta <= delta.min + .Machine$double.eps^0.25) || any(delta >= delta.max - 100*.Machine$double.eps^0.25)) warning(mstyle$warning("One or more 'delta' estimates are (almost) equal to their lower or upper bound.\nTreat results with caution (or consider adjusting 'delta.min' and/or 'delta.max')."), call.=FALSE) ############################################################################ ### computing (inverse) Hessian H <- NA vb <- matrix(NA_real_, nrow=p, ncol=p) se.tau2 <- NA vb.delta <- matrix(NA_real_, nrow=deltas, ncol=deltas) if (con$beta.fix) { beta.hes <- c(beta) } else { beta.hes <- beta.val } if (con$tau2.fix) { tau2.hes <- tau2 } else { tau2.hes <- tau2.val } if (con$delta.fix) { delta.hes <- delta } else { delta.hes <- delta.val } hest <- c(is.na(beta.hes), is.na(tau2.hes), is.na(delta.hes)) if (any(hest) && !isTRUE(ddd$skiphes)) { if (verbose > 1) message(mstyle$message("\nComputing Hessian ...")) if (verbose > 3) cat("\n") if (con$htransf) { hescall <- paste("numDeriv::hessian(", .selmodel.ll, ", x=opt.res$par, method.args=con$hessianCtrl, yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta.hes, delta.transf=TRUE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=tau2.hes, tau2.transf=TRUE, tau2.max=tau2.max, beta.val=beta.hes, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=ifelse(verbose > 3, verbose, 0), digits=digits)\n", sep="") } else { hescall <- paste("numDeriv::hessian(", .selmodel.ll, ", x=c(beta, tau2, delta), method.args=con$hessianCtrl, yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta.hes, delta.transf=FALSE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=tau2.hes, tau2.transf=FALSE, tau2.max=tau2.max, beta.val=beta.hes, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=ifelse(verbose > 3, verbose, 0), digits=digits)\n", sep="") } #return(hescall) H <- try(eval(parse(text=hescall)), silent=TRUE) #return(H) if (verbose > 3) cat("\n") if (inherits(H, "try-error")) { warning(mstyle$warning("Error when trying to compute Hessian."), call.=FALSE) } else { if (deltas == 1L) { rownames(H) <- colnames(H) <- c(colnames(X), "tau2", "delta") } else { rownames(H) <- colnames(H) <- c(colnames(X), "tau2", paste0("delta.", 1:deltas)) } H.hest <- H[hest, hest, drop=FALSE] iH.hest <- try(suppressWarnings(chol2inv(chol(H.hest))), silent=TRUE) if (inherits(iH.hest, "try-error") || anyNA(iH.hest) || any(is.infinite(iH.hest))) { warning(mstyle$warning("Error when trying to invert Hessian."), call.=FALSE) } else { iH <- matrix(0, nrow=length(hest), ncol=length(hest)) iH[hest, hest] <- iH.hest if (anyNA(beta.hes)) vb[is.na(beta.hes), is.na(beta.hes)] <- iH[c(is.na(beta.hes),FALSE,rep(FALSE,deltas)), c(is.na(beta.hes),FALSE,rep(FALSE,deltas)), drop=FALSE] if (is.na(tau2.hes)) se.tau2 <- sqrt(iH[c(rep(FALSE,p),TRUE,rep(FALSE,deltas)), c(rep(FALSE,p),TRUE,rep(FALSE,deltas))]) if (anyNA(delta.hes)) vb.delta[is.na(delta.hes), is.na(delta.hes)] <- iH[c(rep(FALSE,p),FALSE,is.na(delta.hes)), c(rep(FALSE,p),FALSE,is.na(delta.hes)), drop=FALSE] } } } ############################################################################ ### Wald-type tests of the fixed effects if (verbose > 1) message(mstyle$message("Conducting tests of the fixed effects ...")) ### scale back beta and vb if (!x$int.only && x$int.incl && con$scaleX) { beta <- mX %*% beta vb <- mX %*% vb %*% t(mX) X <- Xsave } ### QM calculation QM <- try(as.vector(t(beta)[x$btt] %*% chol2inv(chol(vb[x$btt,x$btt])) %*% beta[x$btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA QMp <- pchisq(QM, df=x$m, lower.tail=FALSE) rownames(beta) <- rownames(vb) <- colnames(vb) <- colnames(X) se <- sqrt(diag(vb)) names(se) <- NULL ### inference for beta parameters zval <- c(beta/se) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) crit <- qnorm(x$level/2, lower.tail=FALSE) ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) ### inference for delta parameters se.delta <- sqrt(diag(vb.delta)) if (con$htransf) { zval.delta <- rep(NA_real_, deltas) pval.delta <- rep(NA_real_, deltas) ci.lb.delta <- c(delta.transf - crit * se.delta) ci.ub.delta <- c(delta.transf + crit * se.delta) ci.lb.delta <- mapply(.mapfun, ci.lb.delta, delta.min, delta.max, mapfun) ci.ub.delta <- mapply(.mapfun, ci.ub.delta, delta.min, delta.max, mapfun) vb.delta <- matrix(NA_real_, nrow=deltas, ncol=deltas) se.delta <- rep(NA_real_, deltas) } else { zval.delta <- (delta - H0.delta) / se.delta pval.delta <- 2*pnorm(abs(zval.delta), lower.tail=FALSE) ci.lb.delta <- c(delta - crit * se.delta) ci.ub.delta <- c(delta + crit * se.delta) } ci.lb.delta <- ifelse(ci.lb.delta < delta.lb, delta.lb, ci.lb.delta) ci.ub.delta <- ifelse(ci.ub.delta > delta.ub, delta.ub, ci.ub.delta) ### inference for tau^2 parameter if (con$htransf) { ci.lb.tau2 <- exp(tau2.transf - crit * se.tau2) ci.ub.tau2 <- exp(tau2.transf + crit * se.tau2) se.tau2 <- NA } else { ci.lb.tau2 <- tau2 - crit * se.tau2 ci.ub.tau2 <- tau2 + crit * se.tau2 } ci.lb.tau2[ci.lb.tau2 < 0] <- 0 ############################################################################ ### LRT for H0: tau^2 = 0 (only when NOT fitting a FE model) LRT.tau2 <- NA LRTp.tau2 <- NA if (!x$tau2.fix && !is.element(x$method, c("FE","EE","CE")) && !isTRUE(ddd$skiphet)) { if (verbose > 1) message(mstyle$message("Conducting heterogeneity test ...")) if (verbose > 4) cat("\n") optcall <- paste(optimizer, "(", par.arg, "=c(beta.init, tau2.init, delta.init), ", .selmodel.ll, ", ", ifelse(optimizer=="optim", "method=optmethod, ", ""), "yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta.val, delta.transf=TRUE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=0, tau2.transf=FALSE, tau2.max=tau2.max, beta.val=beta.val, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=ifelse(verbose > 4, verbose, 0), digits=digits", ctrl.arg, ")\n", sep="") opt.res <- try(eval(parse(text=optcall)), silent=!verbose) if (verbose > 4) cat("\n") if (!inherits(opt.res, "try-error")) { fitcall <- paste(.selmodel.ll, "(par=opt.res$par, yi=yi, vi=vi, X.fit=X, preci=preci, k=k, pX=p, pvals=pvals, deltas=deltas, delta.val=delta.val, delta.transf=TRUE, mapfun=mapfun, delta.min=delta.min, delta.max=delta.max, tau2.val=0, tau2.transf=FALSE, tau2.max=tau2.max, beta.val=beta.val, wi.fun=wi.fun, steps=steps, pgrp=pgrp, alternative=alternative, pval.min=pval.min, intCtrl=intCtrl, verbose=FALSE, digits=digits, dofit=TRUE)\n", sep="") fitcall <- try(eval(parse(text=fitcall)), silent=!verbose) if (!inherits(fitcall, "try-error")) { ll0 <- fitcall$ll LRT.tau2 <- max(0, -2 * (ll0 - ll)) LRTp.tau2 <- pchisq(LRT.tau2, df=1, lower.tail=FALSE) } } } ############################################################################ ### LRT for selection model parameter(s) if (verbose > 1) message(mstyle$message("Conducting LRT for selection model parameter(s) ...")) ll0 <- c(logLik(x, REML=FALSE)) LRT <- max(0, -2 * (ll0 - ll)) LRTdf <- sum(is.na(delta.val) & delta.LRT) LRTp <- ifelse(LRTdf > 0, pchisq(LRT, df=LRTdf, lower.tail=FALSE), NA) ############################################################################ ### fit statistics if (verbose > 1) message(mstyle$message("Computing fit statistics and log likelihood ...")) ### note: tau2 and delta are not counted as parameters when they were fixed by the user parms <- p + ifelse(is.element(x$method, c("FE","EE","CE")) || x$tau2.fix, 0, 1) + sum(is.na(delta.val)) ll.ML <- ll dev.ML <- -2 * ll.ML AIC.ML <- -2 * ll.ML + 2*parms BIC.ML <- -2 * ll.ML + parms * log(k) AICc.ML <- -2 * ll.ML + 2*parms * max(k, parms+2) / (max(k, parms+2) - parms - 1) fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML=NA, dev.REML=NA, AIC.REML=NA, BIC.REML=NA, AICc.REML=NA), ncol=2, byrow=FALSE) dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ############################################################################ ### prepare output if (verbose > 1) message(mstyle$message("Preparing output ...")) res <- x res$beta <- res$b <- beta res$se <- se res$zval <- zval res$pval <- pval res$ci.lb <- ci.lb res$ci.ub <- ci.ub res$vb <- vb res$betaspec <- betaspec res$tau2 <- res$tau2.f <- tau2 res$se.tau2 <- se.tau2 res$ci.lb.tau2 <- ci.lb.tau2 res$ci.ub.tau2 <- ci.ub.tau2 res$dfs <- res$ddf <- NA res$test <- "z" res$s2w <- 1 res$QE <- res$QEp <- NA res$I2 <- res$H2 <- res$vt <- NA res$R2 <- NULL res$QM <- QM res$QMp <- QMp res$delta <- delta res$vb.delta <- vb.delta res$se.delta <- se.delta res$zval.delta <- zval.delta res$pval.delta <- pval.delta res$ci.lb.delta <- ci.lb.delta res$ci.ub.delta <- ci.ub.delta res$deltas <- deltas res$delta.fix <- !is.na(delta.val) res$hessian <- H res$hest <- hest res$ll <- ll res$ll0 <- ll0 res$LRT <- LRT res$LRTdf <- LRTdf res$LRTp <- LRTp res$LRT.tau2 <- LRT.tau2 res$LRTp.tau2 <- LRTp.tau2 res$M <- diag(vi + tau2, nrow=k, ncol=k) res$model <- "rma.uni.selmodel" res$parms <- parms res$fit.stats <- fit.stats res$pvals <- pvals res$digits <- digits res$verbose <- verbose res$type <- type res$steps <- steps res$stepsspec <- stepsspec res$pgrp <- pgrp res$ptable <- ptable res$alternative <- alternative res$pval.min <- pval.min res$prec <- prec res$precspec <- precspec res$precis <- precis res$scaleprec <- ddd$scaleprec res$wi.fun <- wi.fun res$delta.lb <- delta.lb res$delta.ub <- delta.ub res$delta.lb.excl <- delta.lb.excl res$delta.ub.excl <- delta.ub.excl res$delta.min <- delta.min res$delta.max <- delta.max res$tau2.max <- tau2.max res$call <- match.call() res$control <- control res$defmap <- ddd$defmap res$mapfun <- ddd$mapfun res$mapinvfun <- ddd$mapinvfun time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") class(res) <- c("rma.uni.selmodel", class(res)) return(res) } metafor/R/weights.rma.glmm.r0000644000176200001440000000024413770400276015464 0ustar liggesusersweights.rma.glmm <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.glmm", notav="rma.glmm") } metafor/R/regplot.rma.r0000644000176200001440000005346714054724652014555 0ustar liggesusersregplot.rma <- function(x, mod, pred=TRUE, ci=TRUE, pi=FALSE, shade=TRUE, xlim, ylim, predlim, olim, xlab, ylab, at, digits=2L, transf, atransf, targs, level=x$level, pch=21, psize, plim=c(0.5,3), col="black", bg="darkgray", grid=FALSE, refline, label=FALSE, offset=c(1,1), labsize=1, lcol, lwd, lty, legend=FALSE, xvals, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma", notav=c("rma.mh","rma.peto")) if (x$int.only) stop(mstyle$stop("Plot not applicable to intercept-only models.")) na.act <- getOption("na.action") on.exit(options(na.action=na.act)) if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(transf)) transf <- FALSE if (missing(atransf)) atransf <- FALSE transf.char <- deparse(substitute(transf)) atransf.char <- deparse(substitute(atransf)) if (is.function(transf) && is.function(atransf)) stop(mstyle$stop("Use either 'transf' or 'atransf' to specify a transformation (not both).")) if (missing(targs)) targs <- NULL if (missing(ylab)) ylab <- .setlab(x$measure, transf.char, atransf.char, gentype=1, short=FALSE) if (missing(at)) at <- NULL if (missing(psize)) psize <- NULL if (missing(label)) label <- NULL ### grid argument can either be a logical or a color if (is.logical(grid)) gridcol <- "gray" if (is.character(grid)) { gridcol <- grid grid <- TRUE } ### shade argument can either be a logical or a color if (is.logical(shade)) shadecol <- c("gray85", "gray95") # first for ci, second for pi if (is.character(shade)) { if (length(shade) == 1L) shade <- c(shade, shade) shadecol <- shade shade <- TRUE } ### copy pred to addpred (since using pred below for predicted values) if (inherits(pred, "list.rma")) { addpred <- TRUE if (missing(xvals)) stop(mstyle$stop("Need to specify the 'xvals' argument.")) if (length(xvals) != length(pred$pred)) stop(mstyle$stop(paste0("Length of the 'xvals' argument (", length(xvals), ") does not correspond to the number of predicted values (", length(pred$pred), ")."))) } else { addpred <- pred } ### set refline to NA if it is not specified if (missing(refline)) refline <- NA ### set lcol, lty, and lwd (1 = reg line, 2 = ci bounds, 3 = pi bounds, 4 = refline) if (missing(lcol)) { lcol <- c("black", "black", "black", "gray40") } else { if (length(lcol) == 1L) lcol <- rep(lcol, 4) if (length(lcol) == 2L) lcol <- c(lcol[c(1,2,2)], "gray40") if (length(lcol) == 3L) lcol <- c(lcol, "gray40") } if (missing(lty)) { lty <- c("solid", "dashed", "dotted", "solid") } else { if (length(lty) == 1L) lty <- rep(lty, 4) if (length(lty) == 2L) lty <- c(lty[c(1,2,2)], "solid") if (length(lty) == 3L) lty <- c(lty, "solid") } if (missing(lwd)) { lwd <- c(3,1,1,2) } else { if (length(lwd) == 1L) lwd <- rep(lwd, 4) if (length(lwd) == 2L) lwd <- c(lwd[c(1,2,2)], 2) if (length(lwd) == 3L) lwd <- c(lwd, 2) } level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) ############################################################################ ### checks on mod argument if (missing(mod)) { if (x$p == 2L && x$int.incl) { mod <- 2 } else { stop(mstyle$stop("Need to specify the 'mod' argument.")) } } if (length(mod) != 1L) stop(mstyle$stop("Can only specify a single variable via argument 'mod'.")) if (!(is.character(mod) || is.numeric(mod))) stop(mstyle$stop("Argument 'mod' must either be a character string or a scalar.")) if (is.character(mod)) { mod.pos <- charmatch(mod, colnames(x$X)) if (is.na(mod.pos)) stop(mstyle$stop("Argument 'mod' must be the name of a moderator variable in the model.")) if (mod.pos == 0L) stop(mstyle$stop("No ambiguous match found for variable name specified via 'mod' argument.")) } else { mod.pos <- round(mod) if (mod.pos < 1 | mod.pos > x$p) stop(mstyle$stop("Specified position of 'mod' variable does not exist in the model.")) } ### extract the observed outcomes, corresponding sampling variances, model matrix, slab, and ids yi <- c(x$yi.f) vi <- x$vi.f X <- x$X.f slab <- x$slab ids <- x$ids ### get weights options(na.action = "na.pass") # using na.pass to get the entire vector (length of yi.f) weights <- try(weights(x), silent=TRUE) # does not work for rma.glmm and rma.uni.selmodel objects if (inherits(weights, "try-error")) weights <- rep(1, x$k.f) options(na.action = na.act) ### note: pch (if vector), psize (if vector), col (if vector), bg (if vector) ### must have the same length as the original dataset so we have to ### apply the same subsetting (if necessary) and removing of NAs as was ### done during the model fitting (note: NAs are removed further below) if (length(pch) == 1L) pch <- rep(pch, x$k.all) if (length(pch) != x$k.all) stop(mstyle$stop(paste0("Length of the 'pch' argument (", length(pch), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) pch <- pch[x$subset] psize.char <- FALSE if (!is.null(psize)) { if (is.character(psize)) { psize <- match.arg(psize, c("seinv", "vinv")) if (psize == "seinv") { psize <- 1 / sqrt(vi) } else { psize <- 1 / vi } psize.char <- TRUE } else { if (length(psize) == 1L) psize <- rep(psize, x$k.all) if (length(psize) != x$k.all) stop(mstyle$stop(paste0("Length of the 'psize' argument (", length(psize), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) psize <- psize[x$subset] } } if (length(col) == 1L) col <- rep(col, x$k.all) if (length(col) != x$k.all) stop(mstyle$stop(paste0("Length of the 'col' argument (", length(col), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) col <- col[x$subset] if (length(bg) == 1L) bg <- rep(bg, x$k.all) if (length(bg) != x$k.all) stop(mstyle$stop(paste0("Length of the 'bg' argument (", length(bg), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) bg <- bg[x$subset] if (!is.null(label)) { if (is.character(label)) { label <- match.arg(label, c("all", "ciout", "piout")) if (label == "all") { label <- rep(TRUE, x$k.all) if (!is.null(x$subset)) label <- label[x$subset] } } else if (is.logical(label)) { if (!is.logical(label)) stop(mstyle$stop("Argument 'label' must be a logical vector (or a single character string).")) if (length(label) == 1L) label <- rep(label, x$k.all) if (length(label) != x$k.all) stop(mstyle$stop(paste0("Length of the 'label' argument (", length(label), ") does not correspond to the size of the original dataset (", x$k.all, ")."))) if (!is.null(x$subset)) label <- label[x$subset] } else if (is.numeric(label)) { label <- round(label) label <- seq(x$k.all) %in% label } } ############################################################################ has.na <- is.na(yi) | is.na(vi) | apply(is.na(X), 1, any) not.na <- !has.na if (any(has.na)) { yi <- yi[not.na] vi <- vi[not.na] X <- X[not.na,,drop=FALSE] slab <- slab[not.na] ids <- ids[not.na] weights <- weights[not.na] pch <- pch[not.na] psize <- psize[not.na] # if NULL, remains NULL col <- col[not.na] bg <- bg[not.na] if (!is.character(label)) label <- label[not.na] } k <- length(yi) ############################################################################ ### extract values for moderator of interest xi <- X[,mod.pos] if (inherits(pred, "list.rma")) { xs <- xvals ci.lb <- pred$ci.lb ci.ub <- pred$ci.ub if (is.null(pred$pi.lb) || anyNA(pred$pi.lb)) { pi.lb <- pred$ci.lb pi.ub <- pred$ci.ub if (pi) warning(mstyle$warning("Object passed to 'pred' argument does not contain prediction interval information."), call.=FALSE) pi <- FALSE } else { pi.lb <- pred$pi.lb pi.ub <- pred$pi.ub } pred <- pred$pred if (!is.null(label) && is.character(label) && label %in% c("ciout", "piout")) { warning(mstyle$stop("Cannot label points based on the confidence/prediction interval when passing an object to 'pred'.")) label <- NULL } yi.pred <- NULL yi.ci.lb <- NULL yi.ci.ub <- NULL yi.pi.lb <- NULL yi.pi.ub <- NULL } else { ### get predicted values if (!missing(xvals)) { xs <- xvals len <- length(xs) predlim <- range(xs) } else { len <- 1000 if (missing(predlim)) { range.xi <- max(xi) - min(xi) predlim <- c(min(xi) - .04*range.xi, max(xi) + .04*range.xi) xs <- seq(predlim[1], predlim[2], length=len) } else { if (length(predlim) != 2L) stop(mstyle$stop("Argument 'predlim' must be of length 2.")) xs <- seq(predlim[1], predlim[2], length=len) } } Xnew <- rbind(colMeans(X))[rep(1,len),,drop=FALSE] Xnew[,mod.pos] <- xs if (x$int.incl) Xnew <- Xnew[,-1,drop=FALSE] tmp <- predict(x, newmods=Xnew, level=level) pred <- tmp$pred ci.lb <- tmp$ci.lb ci.ub <- tmp$ci.ub if (is.null(tmp$pi.lb) || anyNA(tmp$pi.lb)) { pi.lb <- ci.lb pi.ub <- ci.ub if (pi) warning(mstyle$warning("Cannot draw prediction interval for the given model."), call.=FALSE) pi <- FALSE } else { pi.lb <- tmp$pi.lb pi.ub <- tmp$pi.ub } Xnew <- rbind(colMeans(X))[rep(1,k),,drop=FALSE] Xnew[,mod.pos] <- xi if (x$int.incl) Xnew <- Xnew[,-1,drop=FALSE] tmp <- predict(x, newmods=Xnew, level=level) yi.pred <- tmp$pred yi.ci.lb <- tmp$ci.lb yi.ci.ub <- tmp$ci.ub if (is.null(tmp$pi.lb) || anyNA(tmp$pi.lb)) { yi.pi.lb <- yi.ci.lb yi.pi.ub <- yi.ci.ub if (!is.null(label) && is.character(label) && label == "piout") { warning(mstyle$warning("Cannot label points based on the prediction interval for the given model."), call.=FALSE) label <- NULL } } else { yi.pi.lb <- tmp$pi.lb yi.pi.ub <- tmp$pi.ub } } ############################################################################ ### if requested, apply transformation to yi's and CI/PI bounds if (is.function(transf)) { if (is.null(targs)) { yi <- sapply(yi, transf) pred <- sapply(pred, transf) ci.lb <- sapply(ci.lb, transf) ci.ub <- sapply(ci.ub, transf) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) yi.pred <- sapply(yi.pred, transf) yi.ci.lb <- sapply(yi.ci.lb, transf) yi.ci.ub <- sapply(yi.ci.ub, transf) yi.pi.lb <- sapply(yi.pi.lb, transf) yi.pi.ub <- sapply(yi.pi.ub, transf) } else { yi <- sapply(yi, transf, targs) pred <- sapply(pred, transf, targs) ci.lb <- sapply(ci.lb, transf, targs) ci.ub <- sapply(ci.ub, transf, targs) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) yi.pred <- sapply(yi.pred, transf, targs) yi.ci.lb <- sapply(yi.ci.lb, transf, targs) yi.ci.ub <- sapply(yi.ci.ub, transf, targs) yi.pi.lb <- sapply(yi.pi.lb, transf, targs) yi.pi.ub <- sapply(yi.pi.ub, transf, targs) } } ### make sure order of intervals is always increasing tmp <- .psort(ci.lb, ci.ub) ci.lb <- tmp[,1] ci.ub <- tmp[,2] tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ### apply observation/outcome limits if specified if (!missing(olim)) { if (length(olim) != 2L) stop(mstyle$stop("Argument 'olim' must be of length 2.")) olim <- sort(olim) yi[yi < olim[1]] <- olim[1] yi[yi > olim[2]] <- olim[2] pred[pred < olim[1]] <- olim[1] pred[pred > olim[2]] <- olim[2] ci.lb[ci.lb < olim[1]] <- olim[1] ci.ub[ci.ub > olim[2]] <- olim[2] pi.lb[pi.lb < olim[1]] <- olim[1] pi.ub[pi.ub > olim[2]] <- olim[2] } ### set default point sizes (if not specified by user) if (is.null(psize) || psize.char) { if (length(plim) < 2L) stop(mstyle$stop("Argument 'plim' must be of length 2 or 3.")) if (psize.char) { wi <- psize } else { wi <- sqrt(weights) } if (!is.na(plim[1]) && !is.na(plim[2])) { rng <- max(wi, na.rm=TRUE) - min(wi, na.rm=TRUE) if (rng <= .Machine$double.eps^0.5) { psize <- rep(1, k) } else { psize <- (wi - min(wi, na.rm=TRUE)) / rng psize <- (psize * (plim[2] - plim[1])) + plim[1] } } if (is.na(plim[1]) && !is.na(plim[2])) { psize <- wi / max(wi, na.rm=TRUE) * plim[2] if (length(plim) == 3L) psize[psize <= plim[3]] <- plim[3] } if (!is.na(plim[1]) && is.na(plim[2])) { psize <- wi / min(wi, na.rm=TRUE) * plim[1] if (length(plim) == 3L) psize[psize >= plim[3]] <- plim[3] } if (all(is.na(psize))) psize <- rep(1, k) } ############################################################################ if (missing(xlab)) xlab <- colnames(X)[mod.pos] if (!is.expression(xlab) && xlab == "") xlab <- "Moderator" if (missing(xlim)) { xlim <- range(xi) } else { if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' must be of length 2.")) } if (missing(ylim)) { if (pi) { ylim <- range(c(yi, pi.lb, pi.ub)) } else if (ci) { ylim <- range(c(yi, ci.lb, ci.ub)) } else { ylim <- range(yi) } } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' must be of length 2.")) } ### if user has specified 'at' argument, make sure ylim actually contains the min and max 'at' values if (!is.null(at)) { ylim[1] <- min(c(ylim[1], at), na.rm=TRUE) ylim[2] <- max(c(ylim[2], at), na.rm=TRUE) } ############################################################################ ### set up plot plot(NA, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, yaxt="n", ...) ### generate y-axis positions if none are specified if (is.null(at)) { at <- axTicks(side=2) } else { at <- at[at > par("usr")[3]] at <- at[at < par("usr")[4]] } ### y-axis labels (apply transformation to axis labels if requested) at.lab <- at if (is.function(atransf)) { if (is.null(targs)) { at.lab <- formatC(sapply(at.lab, atransf), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } else { at.lab <- formatC(sapply(at.lab, atransf, targs), digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } } else { at.lab <- formatC(at.lab, digits=digits[[1]], format="f", drop0trailing=is.integer(digits[[1]])) } ### add y-axis axis(side=2, at=at, labels=at.lab, ...) ### add predicted values / CI bounds if (shade) { if (pi) polygon(c(xs, rev(xs)), c(pi.lb, rev(pi.ub)), border=NA, col=shadecol[2], ...) if (ci) polygon(c(xs, rev(xs)), c(ci.lb, rev(ci.ub)), border=NA, col=shadecol[1], ...) } if (ci) { lines(xs, ci.lb, col=lcol[2], lty=lty[2], lwd=lwd[2], ...) lines(xs, ci.ub, col=lcol[2], lty=lty[2], lwd=lwd[2], ...) } if (pi) { lines(xs, pi.lb, col=lcol[3], lty=lty[3], lwd=lwd[3], ...) lines(xs, pi.ub, col=lcol[3], lty=lty[3], lwd=lwd[3], ...) } ### add grid if (.isTRUE(grid)) grid(col=gridcol) # grid needs to be at x and y tick positions also if using y-axis transformation ### add refline abline(h=refline, col=lcol[4], lty=lty[4], lwd=lwd[4], ...) if (addpred) lines(xs, pred, col=lcol[1], lty=lty[1], lwd=lwd[1], ...) ### redraw box box(...) ### order points by psize for plotting order.vec <- order(psize, decreasing=TRUE) xi.o <- xi[order.vec] yi.o <- yi[order.vec] pch.o <- pch[order.vec] psize.o <- psize[order.vec] col.o <- col[order.vec] bg.o <- bg[order.vec] ### add points points(x=xi.o, y=yi.o, pch=pch.o, col=col.o, bg=bg.o, cex=psize.o, ...) ### labeling of points if (!is.null(label)) { if (!is.null(label) && is.character(label) && label %in% c("ciout", "piout")) { if (label == "ciout") { label <- yi < yi.ci.lb | yi > yi.ci.ub label[xi < predlim[1] | xi > predlim[2]] <- FALSE } else { label <- yi < yi.pi.lb | yi > yi.pi.ub label[xi < predlim[1] | xi > predlim[2]] <- FALSE } } yrange <- ylim[2] - ylim[1] if (length(offset) == 2L) offset <- c(offset[1]/100 * yrange, offset[2]/100 * yrange, 1) if (length(offset) == 1L) offset <- c(0, offset/100 * yrange, 1) for (i in which(label)) { if (isTRUE(yi[i] > yi.pred[i])) { # yi.pred might be NULL, so use isTRUE() text(xi[i], yi[i] + offset[1] + offset[2]*psize[i]^offset[3], slab[i], cex=labsize, ...) } else { text(xi[i], yi[i] - offset[1] - offset[2]*psize[i]^offset[3], slab[i], cex=labsize, ...) } } } else { label <- rep(FALSE, k) } ### add legend (if requested) if (is.logical(legend) && isTRUE(legend)) lpos <- "topright" if (is.character(legend)) { lpos <- legend legend <- TRUE } if (legend) { pch.l <- NULL col.l <- NULL bg.l <- NULL lty.l <- NULL lwd.l <- NULL tcol.l <- NULL ltxt <- NULL if (length(unique(pch)) == 1L && length(unique(col)) == 1L && length(unique(bg)) == 1L) { pch.l <- NA col.l <- NA bg.l <- NA lty.l <- "blank" lwd.l <- NA tcol.l <- "white" ltxt <- "Studies" } if (addpred) { pch.l <- c(pch.l, NA) col.l <- c(col.l, NA) bg.l <- c(bg.l, NA) lty.l <- c(lty.l, NA) lwd.l <- c(lwd.l, NA) tcol.l <- c(tcol.l, "white") ltxt <- c(ltxt, "Regression Line") } if (ci) { pch.l <- c(pch.l, 22) col.l <- c(col.l, lcol[2]) bg.l <- c(bg.l, shadecol[1]) lty.l <- c(lty.l, NA) lwd.l <- c(lwd.l, 1) tcol.l <- c(tcol.l, "white") ltxt <- c(ltxt, paste0(round(100*(1-level), digits[[1]]), "% Confidence Interval")) } if (pi) { pch.l <- c(pch.l, 22) col.l <- c(col.l, lcol[3]) bg.l <- c(bg.l, shadecol[2]) lty.l <- c(lty.l, NA) lwd.l <- c(lwd.l, 1) tcol.l <- c(tcol.l, "white") ltxt <- c(ltxt, paste0(round(100*(1-level), digits[[1]]), "% Prediction Interval")) } if (length(ltxt) >= 1L) legend(lpos, inset=.01, bg="white", pch=pch.l, col=col.l, pt.bg=bg.l, lty=lty.l, lwd=lwd.l, text.col=tcol.l, pt.cex=1.5, seg.len=3, legend=ltxt) pch.l <- NULL col.l <- NULL bg.l <- NULL lty.l <- NULL lwd.l <- NULL tcol.l <- NULL ltxt <- NULL if (length(unique(pch)) == 1L && length(unique(col)) == 1L && length(unique(bg)) == 1L) { pch.l <- pch[1] col.l <- col[1] bg.l <- bg[1] lty.l <- "blank" lwd.l <- 1 tcol.l <- "black" ltxt <- "Studies" } if (addpred) { pch.l <- c(pch.l, NA) col.l <- c(col.l, lcol[1]) bg.l <- c(bg.l, NA) lty.l <- c(lty.l, lty[1]) lwd.l <- c(lwd.l, lwd[1]) tcol.l <- c(tcol.l, "black") ltxt <- c(ltxt, "Regression Line") } if (ci) { pch.l <- c(pch.l, NA) col.l <- c(col.l, lcol[2]) bg.l <- c(bg.l, NA) lty.l <- c(lty.l, lty[2]) lwd.l <- c(lwd.l, lwd[2]) tcol.l <- c(tcol.l, "black") ltxt <- c(ltxt, paste0(round(100*(1-level), digits[[1]]), "% Confidence Interval")) } if (pi) { pch.l <- c(pch.l, NA) col.l <- c(col.l, lcol[3]) bg.l <- c(bg.l, NA) lty.l <- c(lty.l, lty[3]) lwd.l <- c(lwd.l, lwd[3]) tcol.l <- c(tcol.l, "black") ltxt <- c(ltxt, paste0(round(100*(1-level), digits[[1]]), "% Prediction Interval")) } if (length(ltxt) >= 1L) legend(lpos, inset=.01, bg=NA, pch=pch.l, col=col.l, pt.bg=bg.l, lty=lty.l, lwd=lwd.l, text.col=tcol.l, pt.cex=1.5, seg.len=3, legend=ltxt) } ############################################################################ sav <- data.frame(slab, ids, xi, yi, pch, psize, col, bg, label, order=order.vec) class(sav) <- "regplot" invisible(sav) } metafor/R/ranef.rma.uni.r0000644000176200001440000000647414046527075014763 0ustar liggesusersranef.rma.uni <- function(object, level, digits, transf, targs, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.uni", notav="rma.uni.selmodel") x <- object na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(level)) level <- x$level if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } if (missing(transf)) transf <- FALSE if (missing(targs)) targs <- NULL level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) if (is.element(x$test, c("knha","adhoc","t"))) { crit <- qt(level/2, df=x$ddf, lower.tail=FALSE) } else { crit <- qnorm(level/2, lower.tail=FALSE) } ### TODO: check computations for user-defined weights if (!is.null(x$weights) || !x$weighted) stop(mstyle$stop("Extraction of random effects not available for models with non-standard weights.")) ######################################################################### pred <- rep(NA_real_, x$k.f) vpred <- rep(NA_real_, x$k.f) ### see Appendix in: Raudenbush, S. W., & Bryk, A. S. (1985). Empirical ### Bayes meta-analysis. Journal of Educational Statistics, 10(2), 75-98 li <- x$tau2.f / (x$tau2.f + x$vi.f) for (i in seq_len(x$k.f)[x$not.na]) { ### note: skipping NA cases Xi <- matrix(x$X.f[i,], nrow=1) if (is.element(x$method, c("FE","EE","CE"))) { pred[i] <- 0 vpred[i] <- 0 } else { pred[i] <- li[i] * (x$yi.f[i] - Xi %*% x$beta) vpred[i] <- li[i] * x$vi.f[i] + li[i]^2 * Xi %*% tcrossprod(x$vb,Xi) } } se <- sqrt(vpred) pi.lb <- pred - crit * se pi.ub <- pred + crit * se ######################################################################### ### if requested, apply transformation function to 'pred' and interval bounds if (is.function(transf)) { if (is.null(targs)) { pred <- sapply(pred, transf) se <- rep(NA,x$k.f) pi.lb <- sapply(pi.lb, transf) pi.ub <- sapply(pi.ub, transf) } else { pred <- sapply(pred, transf, targs) se <- rep(NA,x$k.f) pi.lb <- sapply(pi.lb, transf, targs) pi.ub <- sapply(pi.ub, transf, targs) } transf <- TRUE } ### make sure order of intervals is always increasing tmp <- .psort(pi.lb, pi.ub) pi.lb <- tmp[,1] pi.ub <- tmp[,2] ######################################################################### if (na.act == "na.omit") { out <- list(pred=pred[x$not.na], se=se[x$not.na], pi.lb=pi.lb[x$not.na], pi.ub=pi.ub[x$not.na]) out$slab <- x$slab[x$not.na] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(pred=pred, se=se, pi.lb=pi.lb, pi.ub=pi.ub) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) ######################################################################### out$digits <- digits out$transf <- transf class(out) <- "list.rma" return(out) } metafor/R/rstandard.rma.peto.r0000644000176200001440000000274213770377076016027 0ustar liggesusersrstandard.rma.peto <- function(model, digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.peto") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ######################################################################### resid <- c(x$yi.f - x$beta) resid[abs(resid) < 100 * .Machine$double.eps] <- 0 #resid[abs(resid) < 100 * .Machine$double.eps * median(abs(resid), na.rm=TRUE)] <- 0 ### see lm.influence ### note: these are like Pearson (or semi-standardized) residuals seresid <- sqrt(x$vi.f) stresid <- resid / seresid ######################################################################### if (na.act == "na.omit") { out <- list(resid=resid[x$not.na.yivi], se=seresid[x$not.na.yivi], z=stresid[x$not.na.yivi]) out$slab <- x$slab[x$not.na.yivi] } if (na.act == "na.exclude" || na.act == "na.pass") { out <- list(resid=resid, se=seresid, z=stresid) out$slab <- x$slab } if (na.act == "na.fail" && any(!x$not.na.yivi)) stop(mstyle$stop("Missing values in results.")) out$digits <- digits class(out) <- "list.rma" return(out) } metafor/R/profile.rma.uni.r0000644000176200001440000001443314050202744015306 0ustar liggesusersprofile.rma.uni <- function(fitted, xlim, ylim, steps=20, lltol=1e-03, progbar=TRUE, parallel="no", ncpus=1, cl=NULL, plot=TRUE, pch=19, cline=FALSE, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(fitted), must="rma.uni", notav="rma.uni.selmodel") if (is.element(fitted$method, c("FE","EE","CE"))) stop(mstyle$stop("Cannot profile tau2 parameter for fixed-effects models.")) if (steps < 2) stop(mstyle$stop("Argument 'steps' must be >= 2.")) x <- fitted parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } ddd <- list(...) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### if (missing(xlim)) { ### if the user has not specified xlim, try to get CI for tau^2 vc.ci <- try(suppressWarnings(confint(x)), silent=TRUE) if (inherits(vc.ci, "try-error")) { vc.lb <- NA vc.ub <- NA } else { ### min() and max() so the actual value is within the xlim bounds ### could still get NAs for the bounds if the CI is the empty set vc.lb <- min(x$tau2, vc.ci$random[1,2]) vc.ub <- max(.1, x$tau2, vc.ci$random[1,3]) ### if CI is equal to null set, then this still gives vc.ub = .1 } if (is.na(vc.lb) || is.na(vc.ub)) { ### if the CI method fails, try a Wald-type CI for tau^2 vc.lb <- max( 0, x$tau2 - qnorm(.995) * x$se.tau2) vc.ub <- max(.1, x$tau2 + qnorm(.995) * x$se.tau2) } if (is.na(vc.lb) || is.na(vc.ub)) { ### if this still results in NA bounds, use simple method vc.lb <- max( 0, x$tau2/4) vc.ub <- max(.1, x$tau2*4) } ### if all of that fails, throw an error if (is.na(vc.lb) || is.na(vc.ub)) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) xlim <- c(vc.lb, vc.ub) } else { if (length(xlim) != 2L) stop(mstyle$stop("Argument 'xlim' should be a vector of length 2.")) xlim <- sort(xlim) } vcs <- seq(xlim[1], xlim[2], length.out=steps) #return(vcs) if (length(vcs) <= 1L) stop(mstyle$stop("Cannot set 'xlim' automatically. Please set this argument manually.")) if (parallel == "no") res <- pbapply::pblapply(vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE) if (parallel == "multicore") res <- pbapply::pblapply(vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE, cl=ncpus) #res <- parallel::mclapply(vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE) #res <- parallel::clusterApplyLB(cl, vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE) #res <- parallel::clusterMap(cl, .profile.rma.uni, vcs, MoreArgs=list(obj=x, parallel=parallel, profile=TRUE), .scheduling = "dynamic") } else { res <- pbapply::pblapply(vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE, cl=cl) #res <- parallel::parLapply(cl, vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE) #res <- parallel::clusterApply(cl, vcs, .profile.rma.uni, obj=x, parallel=parallel, profile=TRUE) #res <- parallel::clusterMap(cl, .profile.rma.uni, vcs, MoreArgs=list(obj=x, parallel=parallel, profile=TRUE)) } } lls <- sapply(res, function(x) x$ll) beta <- do.call("rbind", lapply(res, function(x) t(x$beta))) ci.lb <- do.call("rbind", lapply(res, function(x) t(x$ci.lb))) ci.ub <- do.call("rbind", lapply(res, function(x) t(x$ci.ub))) ######################################################################### if (x$method %in% c("ML", "REML") && any(lls >= logLik(x) + lltol, na.rm=TRUE)) warning(mstyle$warning("At least one profiled log-likelihood value is larger than the log-likelihood of the fitted model."), call.=FALSE) if (all(is.na(lls))) warning(mstyle$warning("All model fits failed. Cannot draw profile likelihood plot."), call.=FALSE) beta <- data.frame(beta) ci.lb <- data.frame(ci.lb) ci.ub <- data.frame(ci.ub) names(beta) <- rownames(x$beta) names(ci.lb) <- rownames(x$beta) names(ci.ub) <- rownames(x$beta) if (missing(ylim)) { if (any(!is.na(lls))) { if (xlim[1] <= x$tau2 && xlim[2] >= x$tau2) { ylim <- range(c(logLik(x),lls), na.rm=TRUE) } else { ylim <- range(lls, na.rm=TRUE) } } else { ylim <- rep(logLik(x), 2) } ylim[1] <- ylim[1] - .1 ylim[2] <- ylim[2] + .1 } else { if (length(ylim) != 2L) stop(mstyle$stop("Argument 'ylim' should be a vector of length 2.")) ylim <- sort(ylim) } xlab <- expression(paste(tau^2, " Value")) title <- expression(paste("Profile Plot for ", tau^2)) sav <- list(tau2=vcs, ll=lls, beta=beta, ci.lb=ci.lb, ci.ub=ci.ub, comps=1, xlim=xlim, ylim=ylim, method=x$method, vc=x$tau2, maxll=logLik(x), xlab=xlab, title=title) class(sav) <- "profile.rma" ######################################################################### if (plot) plot(sav, pch=pch, cline=cline, ...) ######################################################################### if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } invisible(sav) } metafor/R/weights.rma.mh.r0000644000176200001440000000337213775617772015161 0ustar liggesusersweights.rma.mh <- function(object, type="diagonal", ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma.mh") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) type <- match.arg(type, c("diagonal", "matrix")) x <- object ######################################################################### if (is.element(x$measure, c("RR","OR","RD"))) { Ni <- x$ai + x$bi + x$ci + x$di } else { Ti <- x$t1i + x$t2i } if (x$measure == "OR") wi <- (x$bi / Ni) * x$ci if (x$measure == "RR") wi <- (x$ci / Ni) * (x$ai+x$bi) if (x$measure == "RD") wi <- ((x$ai+x$bi) / Ni) * (x$ci+x$di) if (x$measure == "IRR") wi <- (x$x2i / Ti) * x$t1i if (x$measure == "IRD") wi <- (x$t1i / Ti) * x$t2i ######################################################################### if (type == "diagonal") { weight <- rep(NA_real_, x$k.f) weight[x$not.na] <- wi / sum(wi) * 100 names(weight) <- x$slab if (na.act == "na.omit") weight <- weight[x$not.na] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in weights.")) return(weight) } if (type == "matrix") { Wfull <- matrix(NA_real_, nrow=x$k.f, ncol=x$k.f) Wfull[x$not.na, x$not.na] <- diag(wi) rownames(Wfull) <- x$slab colnames(Wfull) <- x$slab if (na.act == "na.omit") Wfull <- Wfull[x$not.na, x$not.na, drop=FALSE] if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) return(Wfull) } } metafor/R/ranktest.default.r0000644000176200001440000000456614014561273015567 0ustar liggesusersranktest.default <- function(x, vi, sei, subset, digits, ...) { ######################################################################### mstyle <- .get.mstyle("crayon" %in% .packages()) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(subset)) subset <- NULL ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("exact")) if (is.null(ddd$exact)) { exact <- TRUE } else { exact <- ddd$exact } ######################################################################### ### check if sampling variances and/or standard errors are available if (missing(vi)) vi <- NULL if (missing(sei)) sei <- NULL if (is.null(vi)) { if (!is.null(sei)) vi <- sei^2 } if (is.null(vi)) stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) yi <- x ### check length of yi and vi if (length(yi) != length(vi)) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) ######################################################################### ### if a subset of studies is specified if (!is.null(subset)) { subset <- .setnafalse(subset, k=length(yi)) yi <- yi[subset] vi <- vi[subset] } ### check for NAs and act accordingly has.na <- is.na(yi) | is.na(vi) if (any(has.na)) { not.na <- !has.na if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] warning(mstyle$warning("Studies with NAs omitted from test."), call.=FALSE) } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } ######################################################################### res <- rma.uni(yi, vi, method="FE") beta <- c(res$beta) vb <- c(res$vb) vi.star <- vi - vb yi.star <- (yi - beta) / sqrt(vi.star) res <- cor.test(yi.star, vi, method="kendall", exact=exact) pval <- res$p.value tau <- res$estimate res <- list(tau=tau, pval=pval, digits=digits) class(res) <- "ranktest" return(res) } metafor/R/print.gosh.rma.r0000644000176200001440000000305314046526773015163 0ustar liggesusersprint.gosh.rma <- function(x, digits=x$digits, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="gosh.rma") digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) if (!exists(".rmspace")) cat("\n") cat(mstyle$text("Model fits attempted: ")) cat(mstyle$result(length(x$fit))) cat("\n") cat(mstyle$text("Model fits succeeded: ")) cat(mstyle$result(sum(x$fit))) cat("\n\n") res.table <- matrix(NA, nrow=ncol(x$res), ncol=6) res.table[,1] <- apply(x$res, 2, mean, na.rm=TRUE) res.table[,2] <- apply(x$res, 2, min, na.rm=TRUE) res.table[,3] <- apply(x$res, 2, quantile, .25, na.rm=TRUE) res.table[,4] <- apply(x$res, 2, quantile, .50, na.rm=TRUE) res.table[,5] <- apply(x$res, 2, quantile, .75, na.rm=TRUE) res.table[,6] <- apply(x$res, 2, max, na.rm=TRUE) res.table <- .fcf(res.table, digits[["est"]]) colnames(res.table) <- c("mean", "min", "q1", "median", "q3", "max") rownames(res.table) <- colnames(x$res) if (ncol(x$res) == 6) rownames(res.table)[2] <- "Q" ### add blank row before the model coefficients in meta-regression models if (ncol(x$res) > 6) res.table <- rbind(res.table[seq_len(5),], "", res.table[6:nrow(res.table),,drop=FALSE]) ### remove row for tau^2 in FE/EE/CE models if (is.element(x$method, c("FE","EE","CE"))) res.table <- res.table[-5,] tmp <- capture.output(print(res.table, quote=FALSE, right=TRUE)) .print.table(tmp, mstyle) if (!exists(".rmspace")) cat("\n") invisible() } metafor/R/deviance.rma.r0000644000176200001440000000063513770363240014640 0ustar liggesusersdeviance.rma <- function(object, REML, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="rma") if (missing(REML)) { if (object$method == "REML") { REML <- TRUE } else { REML <- FALSE } } if (REML) { val <- object$fit.stats["dev","REML"] } else { val <- object$fit.stats["dev","ML"] } return(val) } metafor/R/cooks.distance.rma.mv.r0000644000176200001440000001250514027645330016410 0ustar liggesuserscooks.distance.rma.mv <- function(model, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mv") #if (inherits(model, "robust.rma")) ### can compute Cook's distance also for 'robust.rma' objects # stop(mstyle$stop("Method not available for objects of class \"robust.rma\".")) na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } misscluster <- ifelse(missing(cluster), TRUE, FALSE) if (misscluster) cluster <- seq_len(x$k.all) ddd <- list(...) .chkdots(ddd, c("btt", "time", "LB")) btt <- .set.btt(ddd$btt, x$p, int.incl=FALSE, Xnames=colnames(x$X)) m <- length(btt) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### process cluster variable # note: cluster variable is assumed to be of the same length as the size of # the original dataset passed to the model fitting function and so we apply # the same subsetting and removing of missings (if necessary) as was done # during model fitting if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster.f <- cluster cluster <- cluster[x$not.na] ### checks on cluster variable if (anyNA(cluster.f)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster.f) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) if (length(cluster) != x$k) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not match length of data (", x$k, ")."))) ### cluster ids and number of clusters ids <- unique(cluster) n <- length(ids) ######################################################################### ### calculate inverse of variance-covariance matrix under the full model svb <- chol2inv(chol(x$vb[btt,btt,drop=FALSE])) if (parallel == "no") res <- pbapply::pblapply(seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt) if (parallel == "multicore") res <- pbapply::pblapply(seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt, cl=ncpus) #res <- parallel::mclapply(seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt) #res <- parallel::clusterApplyLB(cl, seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt) } else { res <- pbapply::pblapply(seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt, cl=cl) #res <- parallel::parLapply(cl, seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt) #res <- parallel::clusterApply(cl, seq_len(n), .cooks.distance.rma.mv, obj=x, parallel=parallel, svb=svb, cluster=cluster, ids=ids, reestimate=reestimate, btt=btt) } } cook.d <- sapply(res, function(x) x$cook.d) ######################################################################### if (na.act == "na.omit") { out <- cook.d if (misscluster) { names(out) <- x$slab[x$not.na] } else { names(out) <- ids out <- out[order(ids)] } } if (na.act == "na.exclude" || na.act == "na.pass") { ids.f <- unique(cluster.f) out <- rep(NA_real_, length(ids.f)) out[match(ids, ids.f)] <- cook.d if (misscluster) { names(out) <- x$slab } else { names(out) <- ids.f out <- out[order(ids.f)] } } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } return(out) } metafor/R/dfbetas.rma.mv.r0000644000176200001440000001157214027645343015120 0ustar liggesusersdfbetas.rma.mv <- function(model, progbar=FALSE, cluster, reestimate=TRUE, parallel="no", ncpus=1, cl=NULL, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(model), must="rma.mv", notav="robust.rma") na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) x <- model parallel <- match.arg(parallel, c("no", "snow", "multicore")) if (parallel == "no" && ncpus > 1) parallel <- "snow" if (!is.null(cl) && inherits(cl, "SOCKcluster")) { parallel <- "snow" ncpus <- length(cl) } if (parallel == "snow" && ncpus < 2) parallel <- "no" if (parallel == "snow" || parallel == "multicore") { if (!requireNamespace("parallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'parallel' package for parallel processing.")) ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Argument 'ncpus' must be >= 1.")) } if (!progbar) { pbo <- pbapply::pboptions(type="none") on.exit(pbapply::pboptions(pbo)) } misscluster <- ifelse(missing(cluster), TRUE, FALSE) if (misscluster) cluster <- seq_len(x$k.all) ddd <- list(...) .chkdots(ddd, c("time", "LB")) if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### process cluster variable # note: cluster variable is assumed to be of the same length as the size of # the original dataset passed to the model fitting function and so we apply # the same subsetting and removing of missings (if necessary) as was done # during model fitting if (!is.null(x$subset)) cluster <- cluster[x$subset] cluster.f <- cluster cluster <- cluster[x$not.na] ### checks on cluster variable if (anyNA(cluster.f)) stop(mstyle$stop("No missing values allowed in 'cluster' variable.")) if (length(cluster.f) == 0L) stop(mstyle$stop(paste0("Cannot find 'cluster' variable (or it has zero length)."))) if (length(cluster) != x$k) stop(mstyle$stop(paste0("Length of variable specified via 'cluster' (", length(cluster), ") does not match length of data (", x$k, ")."))) ### cluster ids and number of clusters ids <- unique(cluster) n <- length(ids) ######################################################################### if (parallel == "no") res <- pbapply::pblapply(seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) if (parallel == "multicore") res <- pbapply::pblapply(seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, cl=ncpus) #res <- parallel::mclapply(seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, mc.cores=ncpus) if (parallel == "snow") { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } if (.isTRUE(ddd$LB)) { res <- parallel::parLapplyLB(cl, seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) #res <- parallel::clusterApplyLB(cl, seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) } else { res <- pbapply::pblapply(seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate, cl=cl) #res <- parallel::parLapply(cl, seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) #res <- parallel::clusterApply(cl, seq_len(n), .dfbetas.rma.mv, obj=x, parallel=parallel, cluster=cluster, ids=ids, reestimate=reestimate) } } dfbs <- lapply(res, function(x) x$dfbs) dfbs <- do.call(rbind, dfbs) ######################################################################### if (na.act == "na.omit") { out <- dfbs if (misscluster) { rownames(out) <- x$slab[x$not.na] } else { rownames(out) <- ids out <- out[order(ids),,drop=FALSE] } } if (na.act == "na.exclude" || na.act == "na.pass") { ids.f <- unique(cluster.f) out <- matrix(NA_real_, nrow=length(ids.f), ncol=x$p) out[match(ids, ids.f),] <- dfbs if (misscluster) { rownames(out) <- x$slab } else { rownames(out) <- ids.f out <- out[order(ids.f),,drop=FALSE] } } if (na.act == "na.fail" && any(!x$not.na)) stop(mstyle$stop("Missing values in results.")) colnames(out) <- rownames(x$beta) out <- data.frame(out) if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } return(out) } metafor/R/forest.r0000644000176200001440000000006213674405412013601 0ustar liggesusersforest <- function(x, ...) UseMethod("forest") metafor/R/misc.func.hidden.glmm.r0000644000176200001440000001226113717035416016356 0ustar liggesusers############################################################################ ### density of non-central hypergeometric distribution (based on Liao and Rosen, 2001) from MCMCpack ### Liao, J. G. & Rosen, O. (2001). Fast and stable algorithms for computing and sampling from the ### noncentral hypergeometric distribution. The American Statistician, 55, 366-369. .dnoncenhypergeom <- function (x=NA, n1, n2, m1, psi) { ### x=ai, n1=ai+bi, n2=ci+di, m1=ai+ci, psi=ORi mstyle <- .get.mstyle("crayon" %in% .packages()) mode.compute <- function(n1, n2, m1, psi, ll, uu) { a <- psi - 1 b <- -((m1 + n1 + 2) * psi + n2 - m1) c <- psi * (n1 + 1) * (m1 + 1) q <- b + sign(b) * sqrt(b * b - 4 * a * c) q <- -q/2 mode <- trunc(c/q) if (uu >= mode && mode >= ll) return(mode) else return(trunc(q/a)) } r.function <- function(n1, n2, m1, psi, i) { (n1 - i + 1) * (m1 - i + 1)/i/(n2 - m1 + i) * psi } ll <- max(0, m1 - n2) uu <- min(n1, m1) if (n1 < 0 | n2 < 0) stop(mstyle$stop("'n1' or 'n2' negative in dnoncenhypergeom().")) if (m1 < 0 | m1 > (n1 + n2)) stop(mstyle$stop("'m1' out of range in dnoncenhypergeom().")) if (psi <= 0) stop(mstyle$stop("'psi' [odds ratio] negative in dnoncenhypergeom().")) if (!is.na(x) & (x < ll | x > uu)) stop(mstyle$stop("'x' out of bounds in dnoncenhypergeom().")) if (!is.na(x) & length(x) > 1L) stop(mstyle$stop("'x' neither missing or scalar in dnoncenhypergeom().")) mode <- mode.compute(n1, n2, m1, psi, ll, uu) pi <- array(1, uu - ll + 1) shift <- 1 - ll if (mode < uu) { r1 <- r.function(n1, n2, m1, psi, (mode + 1):uu) pi[(mode + 1 + shift):(uu + shift)] <- cumprod(r1) } if (mode > ll) { r1 <- 1/r.function(n1, n2, m1, psi, mode:(ll + 1)) pi[(mode - 1 + shift):(ll + shift)] <- cumprod(r1) } pi <- pi/sum(pi) if (is.na(x)) { return(cbind(ll:uu, pi)) } else { return(pi[x + shift]) } } ############################################################################ ### density of non-central hypergeometric distribution for fixed- and random/mixed-effects models .dnchgi <- function(logOR, ai, bi, ci, di, mu.i, tau2, random, dnchgcalc, dnchgprec) { mstyle <- .get.mstyle("crayon" %in% .packages()) k <- length(logOR) dnchgi <- rep(NA_real_, k) ### beyond these values, the results from dFNCHypergeo (from BiasedUrn package) become unstable pow <- 12 logOR[logOR < log(10^-pow)] <- log(10^-pow) logOR[logOR > log(10^pow)] <- log(10^pow) for (i in seq_len(k)) { ORi <- exp(logOR[i]) if (dnchgcalc == "dnoncenhypergeom") { res <- try(.dnoncenhypergeom(x=ai, n1=ai+bi, n2=ci+di, m1=ai+ci, psi=ORi)) } else { res <- try(BiasedUrn::dFNCHypergeo(x=ai, m1=ai+bi, m2=ci+di, n=ai+ci, odds=ORi, precision=dnchgprec)) } if (inherits(res, "try-error")) { stop(mstyle$stop(paste0("Could not compute density of non-central hypergeometric distribution in study ", i, ".")), call.=FALSE) } else { dnchgi[i] <- res } } if (random) dnchgi <- dnchgi * dnorm(logOR, mu.i, sqrt(tau2)) return(dnchgi) } ############################################################################ ### joint density of k non-central hypergeometric distributions for fixed- and random/mixed-effects models .dnchg <- function(parms, ai, bi, ci, di, X.fit, random, verbose=FALSE, digits, dnchgcalc, dnchgprec, intCtrl) { mstyle <- .get.mstyle("crayon" %in% .packages()) p <- ncol(X.fit) k <- length(ai) beta <- parms[seq_len(p)] ### first p elemenets in parms are the model coefficients tau2 <- ifelse(random, exp(parms[p+1]), 0) ### next value is tau^2 -- optimize over exp(tau^2) value or hold at 0 if random=FALSE mu.i <- X.fit %*% cbind(beta) lli <- rep(NA_real_, k) if (!random) { for (i in seq_len(k)) { lli[i] <- log(.dnchgi(logOR=mu.i[i], ai=ai[i], bi=bi[i], ci=ci[i], di=di[i], random=random, dnchgcalc=dnchgcalc, dnchgprec=dnchgprec)) } if (verbose) cat(mstyle$verbose(paste("ll =", .fcf(sum(lli), digits[["fit"]]), " ", .fcf(beta, digits[["est"]]), "\n"))) } if (random) { for (i in seq_len(k)) { res <- try(integrate(.dnchgi, lower=intCtrl$lower, upper=intCtrl$upper, ai=ai[i], bi=bi[i], ci=ci[i], di=di[i], mu.i=mu.i[i], tau2=tau2, random=random, dnchgcalc=dnchgcalc, dnchgprec=dnchgprec, rel.tol=intCtrl$rel.tol, subdivisions=intCtrl$subdivisions, stop.on.error=FALSE), silent=!verbose) if (inherits(res, "try-error")) { stop(mstyle$stop(paste0("Could not integrate over density of non-central hypergeometric distribution in study ", i, ".")), call.=FALSE) } else { if (res$value > 0) { lli[i] <- log(res$value) } else { lli[i] <- -Inf } } } if (verbose) cat(mstyle$verbose(paste("ll = ", .fcf(sum(lli), digits[["fit"]]), " ", .fcf(tau2, digits[["var"]]), " ", .fcf(beta, digits[["est"]]), "\n"))) } return(-sum(lli)) } ############################################################################ metafor/R/coef.permutest.rma.uni.r0000644000176200001440000000076714043270410016613 0ustar liggesuserscoef.permutest.rma.uni <- function(object, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(object), must="permutest.rma.uni") x <- object if (is.element(x$test, c("knha","adhoc","t"))) { res.table <- data.frame(estimate=x$beta, se=x$se, tval=x$zval, df=x$ddf, pval=x$pval, ci.lb=x$ci.lb, ci.ub=x$ci.ub) } else { res.table <- data.frame(estimate=x$beta, se=x$se, zval=x$zval, pval=x$pval, ci.lb=x$ci.lb, ci.ub=x$ci.ub) } return(res.table) } metafor/R/bldiag.r0000644000176200001440000000360514043306643013524 0ustar liggesusersbldiag <- function(..., order) { mstyle <- .get.mstyle("crayon" %in% .packages()) mlist <- list(...) ### handle case in which a list of matrices is given if (length(mlist)==1L && is.list(mlist[[1]])) mlist <- unlist(mlist, recursive=FALSE) ### make sure each element is a matrix (so that bldiag(matrix(1, nrow=3, ncol=3), 2) also works) mlist <- lapply(mlist, function(x) if (inherits(x, "matrix")) x else diag(x, nrow=length(x), ncol=length(x))) ### find any ?x0 or 0x? matrices is00 <- sapply(mlist, function(x) any(dim(x) == c(0L,0L))) ### if all are ?x0 or 0x? matrices, return 0x0 matrix if (all(is00)) return(matrix(nrow=0, ncol=0)) ### otherwise filter out those matrices (if there are any) if (any(is00)) mlist <- mlist[!is00] csdim <- rbind(c(0,0), apply(sapply(mlist,dim), 1, cumsum)) ### consider using rowCumsums() from matrixStats package out <- array(0, dim=csdim[length(mlist) + 1,]) add1 <- matrix(rep(1:0,2), ncol=2) for (i in seq(along.with=mlist)) { indx <- apply(csdim[i:(i+1),] + add1, 2, function(x) x[1]:x[2]) if (is.null(dim(indx))) { ### non-square matrix out[indx[[1]],indx[[2]]] <- mlist[[i]] } else { ### square matrix out[indx[,1],indx[,2]] <- mlist[[i]] } } if (!missing(order)) { if (nrow(out) != ncol(out)) stop(mstyle$stop("Can only use 'order' argument for square matrices.")) if (length(order) != nrow(out)) stop(mstyle$stop(paste0("Length of the 'order' argument (", length(order), ") does not correspond to the dimensions of the matrix (", nrow(out), "x", ncol(out), ")."))) if (grepl("^order\\(", deparse(substitute(order)))) { sort.vec <- order } else { sort.vec <- order(order) } out[sort.vec, sort.vec] <- out } return(out) } metafor/R/rma.uni.r0000644000176200001440000025444414046527204013665 0ustar liggesusersrma <- rma.uni <- function(yi, vi, sei, weights, ai, bi, ci, di, n1i, n2i, x1i, x2i, t1i, t2i, m1i, m2i, sd1i, sd2i, xi, mi, ri, ti, sdi, r2i, ni, mods, scale, measure="GEN", intercept=TRUE, data, slab, subset, add=1/2, to="only0", drop00=FALSE, vtype="LS", method="REML", weighted=TRUE, test="z", #knha=FALSE, level=95, digits, btt, att, tau2, verbose=FALSE, control, ...) { ######################################################################### ###### setup mstyle <- .get.mstyle("crayon" %in% .packages()) ### check argument specifications ### (arguments "to" and "vtype" are checked inside escalc function) if (!is.element(measure, c("RR","OR","PETO","RD","AS","PHI","YUQ","YUY","RTET", ### 2x2 table measures "PBIT","OR2D","OR2DN","OR2DL", ### - transformations to SMD "MPRD","MPRR","MPOR","MPORC","MPPETO", ### - measures for matched pairs data "IRR","IRD","IRSD", ### two-group person-time data measures "MD","SMD","SMDH","ROM", ### two-group mean/SD measures "CVR","VR", ### coefficient of variation ratio, variability ratio "RPB","RBIS","D2OR","D2ORN","D2ORL", ### - transformations to r_PB, r_BIS, and log(OR) "COR","UCOR","ZCOR", ### correlations (raw and r-to-z transformed) "PCOR","ZPCOR","SPCOR", ### partial and semi-partial correlations "PR","PLN","PLO","PAS","PFT", ### single proportions (and transformations thereof) "IR","IRLN","IRS","IRFT", ### single-group person-time data (and transformations thereof) "MN","MNLN","CVLN","SDLN","SMD1", ### mean, log(mean), log(CV), log(SD), single-group SMD "MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC", ### raw/standardized mean change, log(ROM), CVR, and VR for dependent samples "ARAW","AHW","ABT", ### alpha (and transformations thereof) "GEN"))) stop(mstyle$stop("Unknown 'measure' specified.")) if (!is.element(method, c("FE","EE","CE","HS","HSk","HE","DL","DLIT","GENQ","GENQM","SJ","SJIT","PM","PMM","ML","REML","EB"))) stop(mstyle$stop("Unknown 'method' specified.")) ### in case user specifies more than one add/to value (as one can do with rma.mh() and rma.peto()) ### (any kind of continuity correction is directly applied to the outcomes, which are then analyzed as such) if (length(add) > 1L) add <- add[1] if (length(to) > 1L) to <- to[1] na.act <- getOption("na.action") if (!is.element(na.act, c("na.omit", "na.exclude", "na.fail", "na.pass"))) stop(mstyle$stop("Unknown 'na.action' specified under options().")) if (missing(tau2)) tau2 <- NULL if (missing(control)) control <- list() time.start <- proc.time() ### get ... argument and check for extra/superfluous arguments ddd <- list(...) .chkdots(ddd, c("knha", "link", "alpha", "outlist", "onlyo1", "addyi", "addvi", "time", "skipr2", "skiphes")) ### handle 'knha' argument from ... (note: overrides test argument) if (.isFALSE(ddd$knha)) test <- "z" if (.isTRUE(ddd$knha)) test <- "knha" if (!is.element(test, c("z", "t", "knha", "adhoc"))) stop(mstyle$stop("Invalid option selected for 'test' argument.")) if (missing(scale)) { model <- "rma.uni" } else { #if (!inherits(scale, "formula")) # stop(mstyle$stop("Must specify a formula for the 'scale' argument.")) if (is.element(test, c("knha", "adhoc"))) stop(mstyle$stop("Cannot use Knapp & Hartung method with location-scale models.")) model <- "rma.ls" } if (!is.null(ddd$link)) { link <- match.arg(ddd$link, c("log", "identity")) } else { link <- "log" } if (!is.null(ddd$alpha)) { alpha <- ddd$alpha } else { alpha <- NA } ### set defaults or get onlyo1, addyi, and addvi arguments onlyo1 <- ifelse(is.null(ddd$onlyo1), FALSE, ddd$onlyo1) addyi <- ifelse(is.null(ddd$addyi), TRUE, ddd$addyi) addvi <- ifelse(is.null(ddd$addvi), TRUE, ddd$addvi) ### set defaults for digits if (missing(digits)) { digits <- .set.digits(dmiss=TRUE) } else { digits <- .set.digits(digits, dmiss=FALSE) } ### set defaults for formulas formula.yi <- NULL formula.mods <- NULL formula.scale <- NULL ### set options(warn=1) if verbose > 2 if (verbose > 2) { opwarn <- options(warn=1) on.exit(options(warn=opwarn$warn)) } ######################################################################### if (verbose && !exists(".rmspace")) cat("\n") if (verbose > 1) message(mstyle$message("Extracting/computing yi/vi values ...")) ### check if data argument has been specified if (missing(data)) data <- NULL if (is.null(data)) { data <- sys.frame(sys.parent()) } else { if (!is.data.frame(data)) data <- data.frame(data) } mf <- match.call() ### for certain measures, set add=0 by default unless user explicitly sets the add argument addval <- mf[[match("add", names(mf))]] if (is.element(measure, c("AS","PHI","RTET","IRSD","PAS","PFT","IRS","IRFT")) && is.null(addval)) add <- 0 ### extract yi (either NULL if not specified, a vector, a formula, or an escalc object) mf.yi <- mf[[match("yi", names(mf))]] yi <- eval(mf.yi, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this ### if yi is not NULL and it is an escalc object, then use that object in place of the data argument if (!is.null(yi) && inherits(yi, "escalc")) data <- yi ### extract weights, slab, subset, mods, and scale values, possibly from the data frame specified via data or yi (arguments not specified are NULL) mf.weights <- mf[[match("weights", names(mf))]] mf.slab <- mf[[match("slab", names(mf))]] mf.subset <- mf[[match("subset", names(mf))]] mf.mods <- mf[[match("mods", names(mf))]] mf.scale <- mf[[match("scale", names(mf))]] weights <- eval(mf.weights, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this slab <- eval(mf.slab, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this subset <- eval(mf.subset, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this mods <- eval(mf.mods, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this scale <- eval(mf.scale, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this ai <- bi <- ci <- di <- x1i <- x2i <- t1i <- t2i <- NA if (!is.null(yi)) { ### if yi is not NULL, then yi now either contains the yi values, a formula, or an escalc object ### if yi is a formula, extract yi and X (this overrides anything specified via the mods argument further below) if (inherits(yi, "formula")) { formula.yi <- yi options(na.action = "na.pass") ### set na.action to na.pass, so that NAs are not filtered out (we'll do that later) mods <- model.matrix(yi, data=data) ### extract model matrix (now mods is no longer a formula, so [a] further below is skipped) attr(mods, "assign") <- NULL ### strip assign attribute (not needed at the moment) attr(mods, "contrasts") <- NULL ### strip contrasts attribute (not needed at the moment) yi <- model.response(model.frame(yi, data=data)) ### extract yi values from model frame options(na.action = na.act) ### set na.action back to na.act names(yi) <- NULL ### strip names (1:k) from yi (so res$yi is the same whether yi is a formula or not) intercept <- FALSE ### set to FALSE since formula now controls whether the intercept is included or not } ### note: code further below ([b]) actually checks whether intercept is included or not ### if yi is an escalc object, try to extract yi and vi (note that moderators must then be specified via the mods argument) if (inherits(yi, "escalc")) { if (!is.null(attr(yi, "yi.names"))) { ### if yi.names attributes is available yi.name <- attr(yi, "yi.names")[1] ### take the first entry to be the yi variable } else { ### if not, see if 'yi' is in the object and assume that is the yi variable if (!is.element("yi", names(yi))) stop(mstyle$stop("Cannot determine name of the 'yi' variable.")) yi.name <- "yi" } if (!is.null(attr(yi, "vi.names"))) { ### if vi.names attributes is available vi.name <- attr(yi, "vi.names")[1] ### take the first entry to be the vi variable } else { ### if not, see if 'vi' is in the object and assume that is the vi variable if (!is.element("vi", names(yi))) stop(mstyle$stop("Cannot determine name of the 'vi' variable.")) vi.name <- "vi" } ### get vi and yi variables from the escalc object (vi first, then yi) vi <- yi[[vi.name]] yi <- yi[[yi.name]] yi.escalc <- TRUE } else { yi.escalc <- FALSE } ### in case user passed a matrix to yi, convert it to a vector if (is.matrix(yi)) yi <- as.vector(yi) ### number of outcomes before subsetting k <- length(yi) k.all <- k ### if the user has specified 'measure' to be something other than "GEN", then use that for the measure argument ### otherwise, if yi has a 'measure' attribute, use that to set the 'measure' argument if (measure == "GEN" && !is.null(attr(yi, "measure"))) measure <- attr(yi, "measure") ### add measure attribute (back) to the yi vector attr(yi, "measure") <- measure ### extract vi and sei values (but only if yi wasn't an escalc object) if (!yi.escalc) { mf.vi <- mf[[match("vi", names(mf))]] mf.sei <- mf[[match("sei", names(mf))]] vi <- eval(mf.vi, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this sei <- eval(mf.sei, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this } ### extract ni argument mf.ni <- mf[[match("ni", names(mf))]] ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) ### NULL if user does not specify this ### if neither vi nor sei is specified, then throw an error ### if only sei is specified, then square those values to get vi ### if vi is specified, use those values if (is.null(vi)) { if (is.null(sei)) { stop(mstyle$stop("Must specify 'vi' or 'sei' argument.")) } else { vi <- sei^2 } } ### in case user passes a matrix to vi, convert it to a vector ### note: only a row or column matrix with the right dimensions will end with the right length if (is.matrix(vi)) vi <- as.vector(vi) ### check if user constrained vi to 0 if ((length(vi) == 1L && vi == 0) || (length(vi) == k && !anyNA(vi) && all(vi == 0))) { vi0 <- TRUE } else { vi0 <- FALSE } ### allow easy setting of vi to a single value if (length(vi) == 1L) vi <- rep(vi, k) ### note: k is number of outcomes before subsetting ### check length of yi and vi if (length(vi) != k) stop(mstyle$stop("Length of 'yi' and 'vi' (or 'sei') is not the same.")) ### if ni has not been specified but is an attribute of yi, get it if (is.null(ni) && !is.null(attr(yi, "ni"))) ni <- attr(yi, "ni") ### check length of yi and ni (only if ni is not NULL) ### if there is a mismatch, then ni cannot be trusted, so set it to NULL if (!is.null(ni) && length(ni) != k) ni <- NULL ### if ni is now available, add it (back) as an attribute to yi if (!is.null(ni)) attr(yi, "ni") <- ni ### note: one or more yi/vi pairs may be NA/NA (also a corresponding ni value may be NA) ### if slab has not been specified but is an attribute of yi, get it if (is.null(slab)) { if (!is.null(attr(yi, "slab"))) slab <- attr(yi, "slab") ### check length of yi and slab (only if slab is now not NULL) ### if there is a mismatch, then slab cannot be trusted, so set it to NULL if (!is.null(slab) && length(slab) != k) slab <- NULL } ### subsetting of yi/vi/ni values (note: mods and slab are subsetted further below) if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) yi <- yi[subset] vi <- vi[subset] ni <- ni[subset] attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back } } else { ### if yi is NULL, try to compute yi/vi based on specified measure and supplied data if (is.element(measure, c("RR","OR","PETO","RD","AS","PHI","YUQ","YUY","RTET","PBIT","OR2D","OR2DN","OR2DL","MPRD","MPRR","MPOR","MPORC","MPPETO"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.bi <- mf[[match("bi", names(mf))]] mf.ci <- mf[[match("ci", names(mf))]] mf.di <- mf[[match("di", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) bi <- eval(mf.bi, data, enclos=sys.frame(sys.parent())) ci <- eval(mf.ci, data, enclos=sys.frame(sys.parent())) di <- eval(mf.di, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) if (is.null(bi)) bi <- n1i - ai if (is.null(di)) di <- n2i - ci k <- length(ai) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] bi <- bi[subset] ci <- ci[subset] di <- di[subset] } dat <- escalc(measure=measure, ai=ai, bi=bi, ci=ci, di=di, add=add, to=to, drop00=drop00, vtype=vtype, onlyo1=onlyo1, addyi=addyi, addvi=addvi) } if (is.element(measure, c("IRR","IRD","IRSD"))) { mf.x1i <- mf[[match("x1i", names(mf))]] mf.x2i <- mf[[match("x2i", names(mf))]] mf.t1i <- mf[[match("t1i", names(mf))]] mf.t2i <- mf[[match("t2i", names(mf))]] x1i <- eval(mf.x1i, data, enclos=sys.frame(sys.parent())) x2i <- eval(mf.x2i, data, enclos=sys.frame(sys.parent())) t1i <- eval(mf.t1i, data, enclos=sys.frame(sys.parent())) t2i <- eval(mf.t2i, data, enclos=sys.frame(sys.parent())) k <- length(x1i) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) x1i <- x1i[subset] x2i <- x2i[subset] t1i <- t1i[subset] t2i <- t2i[subset] } dat <- escalc(measure=measure, x1i=x1i, x2i=x2i, t1i=t1i, t2i=t2i, add=add, to=to, drop00=drop00, vtype=vtype, addyi=addyi, addvi=addvi) } if (is.element(measure, c("MD","SMD","SMDH","ROM","RPB","RBIS","D2OR","D2ORN","D2ORL","CVR","VR"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] mf.n1i <- mf[[match("n1i", names(mf))]] mf.n2i <- mf[[match("n2i", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) n1i <- eval(mf.n1i, data, enclos=sys.frame(sys.parent())) n2i <- eval(mf.n2i, data, enclos=sys.frame(sys.parent())) k <- length(n1i) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] n1i <- n1i[subset] n2i <- n2i[subset] } dat <- escalc(measure=measure, m1i=m1i, m2i=m2i, sd1i=sd1i, sd2i=sd2i, n1i=n1i, n2i=n2i, vtype=vtype) } if (is.element(measure, c("COR","UCOR","ZCOR"))) { mf.ri <- mf[[match("ri", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ri) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ri <- ri[subset] ni <- ni[subset] } dat <- escalc(measure=measure, ri=ri, ni=ni, vtype=vtype) } if (is.element(measure, c("PCOR","ZPCOR","SPCOR"))) { mf.ti <- mf[[match("ti", names(mf))]] mf.r2i <- mf[[match("r2i", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) r2i <- eval(mf.r2i, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ti) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ti <- ti[subset] r2i <- r2i[subset] mi <- mi[subset] ni <- ni[subset] } dat <- escalc(measure=measure, ti=ti, r2i=r2i, mi=mi, ni=ni, vtype=vtype) } if (is.element(measure, c("PR","PLN","PLO","PAS","PFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) if (is.null(mi)) mi <- ni - xi k <- length(xi) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] mi <- mi[subset] } dat <- escalc(measure=measure, xi=xi, mi=mi, add=add, to=to, vtype=vtype, addyi=addyi, addvi=addvi) } if (is.element(measure, c("IR","IRLN","IRS","IRFT"))) { mf.xi <- mf[[match("xi", names(mf))]] mf.ti <- mf[[match("ti", names(mf))]] xi <- eval(mf.xi, data, enclos=sys.frame(sys.parent())) ti <- eval(mf.ti, data, enclos=sys.frame(sys.parent())) k <- length(xi) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) xi <- xi[subset] ti <- ti[subset] } dat <- escalc(measure=measure, xi=xi, ti=ti, add=add, to=to, vtype=vtype, addyi=addyi, addvi=addvi) } if (is.element(measure, c("MN","MNLN","CVLN","SDLN","SMD1"))) { mf.mi <- mf[[match("mi", names(mf))]] mf.sdi <- mf[[match("sdi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) sdi <- eval(mf.sdi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ni) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) mi <- mi[subset] sdi <- sdi[subset] ni <- ni[subset] } dat <- escalc(measure=measure, mi=mi, sdi=sdi, ni=ni, vtype=vtype) } if (is.element(measure, c("MC","SMCC","SMCR","SMCRH","ROMC","CVRC","VRC"))) { mf.m1i <- mf[[match("m1i", names(mf))]] mf.m2i <- mf[[match("m2i", names(mf))]] mf.sd1i <- mf[[match("sd1i", names(mf))]] mf.sd2i <- mf[[match("sd2i", names(mf))]] mf.ri <- mf[[match("ri", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] m1i <- eval(mf.m1i, data, enclos=sys.frame(sys.parent())) m2i <- eval(mf.m2i, data, enclos=sys.frame(sys.parent())) sd1i <- eval(mf.sd1i, data, enclos=sys.frame(sys.parent())) sd2i <- eval(mf.sd2i, data, enclos=sys.frame(sys.parent())) ri <- eval(mf.ri, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(m1i) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) m1i <- m1i[subset] m2i <- m2i[subset] sd1i <- sd1i[subset] sd2i <- sd2i[subset] ni <- ni[subset] ri <- ri[subset] } dat <- escalc(measure=measure, m1i=m1i, m2i=m2i, sd1i=sd1i, sd2i=sd2i, ri=ri, ni=ni, vtype=vtype) } if (is.element(measure, c("ARAW","AHW","ABT"))) { mf.ai <- mf[[match("ai", names(mf))]] mf.mi <- mf[[match("mi", names(mf))]] mf.ni <- mf[[match("ni", names(mf))]] ai <- eval(mf.ai, data, enclos=sys.frame(sys.parent())) mi <- eval(mf.mi, data, enclos=sys.frame(sys.parent())) ni <- eval(mf.ni, data, enclos=sys.frame(sys.parent())) k <- length(ai) ### number of outcomes before subsetting k.all <- k if (!is.null(subset)) { subset <- .setnafalse(subset, k=k) ai <- ai[subset] mi <- mi[subset] ni <- ni[subset] } dat <- escalc(measure=measure, ai=ai, mi=mi, ni=ni, vtype=vtype) } if (is.element(measure, "GEN")) stop(mstyle$stop("Specify the desired outcome measure via the 'measure' argument.")) ### note: these values are already subsetted yi <- dat$yi ### one or more yi/vi pairs may be NA/NA vi <- dat$vi ### one or more yi/vi pairs may be NA/NA ni <- attr(yi, "ni") ### unadjusted total sample sizes (ni.u in escalc) } ######################################################################### ### allow easy setting of weights to a single value if (length(weights) == 1L) weights <- rep(weights, k) ### note: k is number of outcomes before subsetting ### check length of yi and weights (only if weights is not NULL) if (!is.null(weights) && (length(weights) != k)) stop(mstyle$stop("Length of 'yi' and 'weights' is not the same.")) ### subsetting of weights if (!is.null(subset)) weights <- weights[subset] ######################################################################### if (verbose > 1) message(mstyle$message("Creating model matrix ...")) ### convert mods formula to X matrix and set intercept equal to FALSE ### skipped if formula has already been specified via yi argument, since mods is then no longer a formula (see [a]) if (inherits(mods, "formula")) { formula.mods <- mods options(na.action = "na.pass") ### set na.action to na.pass, so that NAs are not filtered out (we'll do that later) mods <- model.matrix(mods, data=data) ### extract model matrix attr(mods, "assign") <- NULL ### strip assign attribute (not needed at the moment) attr(mods, "contrasts") <- NULL ### strip contrasts attribute (not needed at the moment) options(na.action = na.act) ### set na.action back to na.act intercept <- FALSE ### set to FALSE since formula now controls whether the intercept is included or not } ### note: code further below ([b]) actually checks whether intercept is included or not ### turn a vector for mods into a column vector if (.is.vector(mods)) mods <- cbind(mods) ### turn a mods data frame into a matrix if (is.data.frame(mods)) mods <- as.matrix(mods) ### check if model matrix contains character variables if (is.character(mods)) stop(mstyle$stop("Model matrix contains character variables.")) ### check if mods matrix has the right number of rows if (!is.null(mods) && nrow(mods) != k) stop(mstyle$stop(paste0("Number of rows in the model matrix (", nrow(mods), ") does not match length of the outcome vector (", k, ")."))) ### for rma.ls models, get model matrix for scale part if (model == "rma.ls") { if (inherits(scale, "formula")) { formula.scale <- scale options(na.action = "na.pass") Z <- model.matrix(scale, data=data) colnames(Z)[grep("(Intercept)", colnames(Z))] <- "intrcpt" attr(Z, "assign") <- NULL attr(Z, "contrasts") <- NULL options(na.action = na.act) } else { Z <- scale if (.is.vector(Z)) Z <- cbind(Z) if (is.data.frame(Z)) Z <- as.matrix(Z) if (is.character(Z)) stop(mstyle$stop("Model matrix contains character variables.")) } if (nrow(Z) != k) stop(mstyle$stop(paste0("Number of rows in the model matrix specified via the 'scale' argument (", nrow(Z), ") does not match length of the outcome vector (", k, ")."))) } else { Z <- NULL } ### generate study labels if none are specified (or none have been found in yi) if (verbose > 1) message(mstyle$message("Generating/extracting study labels ...")) ### study ids (1:k sequence before subsetting) ids <- seq_len(k) if (is.null(slab)) { slab.null <- TRUE slab <- ids } else { if (anyNA(slab)) stop(mstyle$stop("NAs in study labels.")) if (length(slab) != k) stop(mstyle$stop("Study labels not of same length as data.")) slab.null <- FALSE } ### if a subset of studies is specified if (!is.null(subset)) { if (verbose > 1) message(mstyle$message("Subsetting ...")) mods <- mods[subset,,drop=FALSE] slab <- slab[subset] ids <- ids[subset] Z <- Z[subset,,drop=FALSE] } ### check if study labels are unique; if not, make them unique if (anyDuplicated(slab)) slab <- .make.unique(slab) ### add slab attribute back attr(yi, "slab") <- slab ### number of outcomes after subsetting k <- length(yi) ### check for non-positive sampling variances (and set negative values to 0) if (any(vi <= 0, na.rm=TRUE)) { allvipos <- FALSE if (!vi0) warning(mstyle$warning("There are outcomes with non-positive sampling variances."), call.=FALSE) vi.neg <- vi < 0 if (any(vi.neg, na.rm=TRUE)) { vi[vi.neg] <- 0 warning(mstyle$warning("Negative sampling variances constrained to zero."), call.=FALSE) } } else { allvipos <- TRUE } ### check for (and correct?) negative/infinite weights if (any(weights < 0, na.rm=TRUE)) stop(mstyle$stop("Negative weights not allowed.")) if (any(is.infinite(weights))) stop(mstyle$stop("Infinite weights not allowed.")) ### save full data (including potential NAs in yi/vi/weights/ni/mods/Z.f) ai.f <- ai bi.f <- bi ci.f <- ci di.f <- di x1i.f <- x1i x2i.f <- x2i t1i.f <- t1i t2i.f <- t2i yi.f <- yi vi.f <- vi weights.f <- weights ni.f <- ni mods.f <- mods Z.f <- Z k.f <- k ### total number of observed outcomes including all NAs ### check for NAs and act accordingly has.na <- is.na(yi) | is.na(vi) | (if (is.null(mods)) FALSE else apply(is.na(mods), 1, any)) | (if (is.null(Z)) FALSE else apply(is.na(Z), 1, any)) | (if (is.null(weights)) FALSE else is.na(weights)) not.na <- !has.na if (any(has.na)) { if (verbose > 1) message(mstyle$message("Handling NAs ...")) if (na.act == "na.omit" || na.act == "na.exclude" || na.act == "na.pass") { yi <- yi[not.na] vi <- vi[not.na] weights <- weights[not.na] ni <- ni[not.na] mods <- mods[not.na,,drop=FALSE] Z <- Z[not.na,,drop=FALSE] k <- length(yi) warning(mstyle$warning("Studies with NAs omitted from model fitting."), call.=FALSE) attr(yi, "measure") <- measure ### add measure attribute back attr(yi, "ni") <- ni ### add ni attribute back ### note: slab is always of the same length as the full yi vector (after subsetting), so missings are not removed and slab is not added back to yi } if (na.act == "na.fail") stop(mstyle$stop("Missing values in data.")) } ### at least one study left? if (k < 1L) stop(mstyle$stop("Processing terminated since k = 0.")) ### if k=1 and test != "z", set test="z" (other methods cannot be used) if (k == 1L && test != "z") { warning(mstyle$warning("Setting argument test=\"z\" since k=1."), call.=FALSE) test <- "z" } ### make sure that there is at least one column in X ([b]) if (is.null(mods) && !intercept) { warning(mstyle$warning("Must either include an intercept and/or moderators in model.\n Coerced intercept into the model."), call.=FALSE) intercept <- TRUE } ### add vector of 1s to the X matrix for the intercept (if intercept=TRUE) if (intercept) { X <- cbind(intrcpt=rep(1,k), mods) X.f <- cbind(intrcpt=rep(1,k.f), mods.f) } else { X <- mods X.f <- mods.f } ### drop redundant predictors ### note: need to save coef.na for functions that modify the data/model and then refit the model (regtest() and the ### various function that leave out an observation); so we can check if there are redundant/dropped predictors then tmp <- try(lm(yi ~ X - 1), silent=TRUE) if (inherits(tmp, "lm")) { coef.na <- is.na(coef(tmp)) } else { coef.na <- rep(FALSE, NCOL(X)) } if (any(coef.na)) { warning(mstyle$warning("Redundant predictors dropped from the model."), call.=FALSE) X <- X[,!coef.na,drop=FALSE] X.f <- X.f[,!coef.na,drop=FALSE] } ### check whether intercept is included and if yes, move it to the first column (NAs already removed, so na.rm=TRUE for any() not necessary) is.int <- apply(X, 2, .is.intercept) if (any(is.int)) { int.incl <- TRUE int.indx <- which(is.int, arr.ind=TRUE) X <- cbind(intrcpt=1, X[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts X.f <- cbind(intrcpt=1, X.f[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts intercept <- TRUE ### set intercept appropriately so that the predict() function works } else { int.incl <- FALSE } p <- NCOL(X) ### number of columns in X (including the intercept if it is included) ### check whether this is an intercept-only model if ((p == 1L) && .is.intercept(X)) { int.only <- TRUE } else { int.only <- FALSE } ### check if there are too many parameters for given k if (!(int.only && k == 1L)) { if (is.element(method, c("FE","EE","CE"))) { ### have to estimate p parms if (p > k) stop(mstyle$stop("Number of parameters to be estimated is larger than the number of observations.")) } else { if (is.numeric(tau2)) { ### have to estimate p parms (tau2 is fixed at value specified) if (p > k) stop(mstyle$stop("Number of parameters to be estimated is larger than the number of observations.")) } else { if ((p+1) > k) ### have to estimate p+1 parms stop(mstyle$stop("Number of parameters to be estimated is larger than the number of observations.")) } } } ### set/check 'btt' argument btt <- .set.btt(btt, p, int.incl, colnames(X)) m <- length(btt) ### number of betas to test (m = p if all betas are tested) ######################################################################### ### set default control parameters con <- list(verbose = FALSE, tau2.init = NULL, # initial value for iterative estimators (ML, REML, EB, SJ, SJIT, DLIT) tau2.min = 0, # lower bound for tau^2 value tau2.max = 100, # upper bound for tau^2 value (for PM/PMM/GENQM estimators; and passed down for tau^2 CI obtained with confint()) threshold = 10^-5, # convergence threshold (for ML, REML, EB, SJIT, DLIT) tol = .Machine$double.eps^0.25, # convergence tolerance for uniroot() as used for PM, PMM, and GENQM (also used in 'll0 - ll > con$tol' check for ML/REML) ll0check = TRUE, # should the 'll0 - ll > con$tol' check be conducted for ML/REML? maxiter = 100, # maximum number of iterations (for ML, REML, EB, SJIT, DLIT) stepadj = 1, # step size adjustment for Fisher scoring algorithm (for ML, REML, EB) REMLf = TRUE, # should |X'X| term be included in the REML log likelihood? evtol = 1e-07, # lower bound for eigenvalues to determine if model matrix is positive definite (also for checking if vimaxmin >= 1/con$evtol) alpha.init = NULL, # initial values for scale parameters optimizer = "nlminb", # optimizer to use ("optim", "nlminb", "uobyqa", "newuoa", "bobyqa", "nloptr", "nlm", "hjk", "nmk", "mads", "ucminf", "optimParallel", "constrOptim") for location-scale models optmethod = "BFGS", # argument 'method' for optim() ("Nelder-Mead" and "BFGS" are sensible options) parallel = list(), # parallel argument for optimParallel() (note: 'cl' argument in parallel is not passed; this is directly specified via 'cl') cl = NULL, # arguments for optimParallel() ncpus = 1L, # arguments for optimParallel() hessianCtrl=list(r=8), # arguments passed on to 'method.args' of hessian() scaleZ = TRUE) ### replace defaults with any user-defined values con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (verbose) con$verbose <- verbose verbose <- con$verbose ### constrain negative tau2.min values to -min(vi) (to ensure that the marginal variance is always >= 0) if (con$tau2.min < 0 && (-con$tau2.min > min(vi))) { con$tau2.min <- -min(vi) warning(mstyle$warning(paste0("Value of 'tau2.min' constrained to -min(vi) = ", .fcf(-min(vi), digits[["est"]]), ".")), call.=FALSE) } ### convergence indicator and change variable (for iterative estimators) conv <- 1 change <- con$threshold + 1 ### check whether model matrix is of full rank if (any(eigen(crossprod(X), symmetric=TRUE, only.values=TRUE)$values <= con$evtol)) stop(mstyle$stop("Model matrix not of full rank. Cannot fit model.")) ### check ratio of largest to smallest sampling variance ### note: need to exclude some special cases (0/0 = NaN, max(vi)/0 = Inf) ### TODO: use the condition number of diag(vi) here instead? vimaxmin <- max(vi) / min(vi) if (!is.nan(vimaxmin) && !is.infinite(vimaxmin) && vimaxmin >= 1/con$evtol) warning(mstyle$warning("Ratio of largest to smallest sampling variance extremely large. May not be able to obtain stable results."), call.=FALSE) ### iterations counter for iterative estimators (i.e., DLIT, SJIT, ML, REML, EB) ### (note: PM, PMM, and GENQM are also iterative, but uniroot() handles that) iter <- 0 ### set some defaults se.tau2 <- I2 <- H2 <- QE <- QEp <- NA s2w <- 1 level <- ifelse(level == 0, 1, ifelse(level >= 1, (100-level)/100, ifelse(level > .5, 1-level, level))) Y <- as.matrix(yi) ######################################################################### ###### heterogeneity estimation for standard model (rma.uni) if (model == "rma.uni") { if (is.numeric(tau2) && !is.element(method, c("FE","EE","CE"))) { ### if user has fixed the tau2 value tau2.fix <- TRUE tau2.val <- tau2 } else { tau2.fix <- FALSE tau2.val <- NA } if (verbose > 1 && !tau2.fix && !is.element(method, c("FE","EE","CE"))) message(mstyle$message("Estimating tau^2 value ...\n")) if (k == 1L) { method.sav <- method method = "k1" # set method to k1 so all of the stuff below is skipped if (!tau2.fix) tau2 <- 0 } ### Hunter & Schmidt (HS) estimator (or k-corrected HS estimator (HSk)) if (is.element(method, c("HS", "HSk"))) { if (!allvipos) stop(mstyle$stop(method, " estimator cannot be used when there are non-positive sampling variances in the data.")) wi <- 1/vi W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y if (method == "HS") { tau2 <- ifelse(tau2.fix, tau2.val, (RSS - k) / sum(wi)) } else { tau2 <- ifelse(tau2.fix, tau2.val, (k/(k-p)*RSS - k) / sum(wi)) ### HSk = (RSS - (k-p)) / sum(wi) * k/(k-p) #trP <- sum(wi) * (k-p) / k #tau2 <- ifelse(tau2.fix, tau2.val, k/(k-p) * (RSS - (k-p)) / trP) #tau2 <- ifelse(tau2.fix, tau2.val, (RSS - (k-p)) / trP) #tau2 <- ifelse(tau2.fix, tau2.val, k/(k-p) * (RSS - (k-p)) / sum(wi)) } } ### Hedges (HE) estimator (or initial value for ML, REML, EB) if (is.element(method, c("HE","ML","REML","EB"))) { stXX <- .invcalc(X=X, W=diag(k), k=k) P <- diag(k) - X %*% tcrossprod(stXX,X) RSS <- crossprod(Y,P) %*% Y V <- diag(vi, nrow=k, ncol=k) PV <- P %*% V ### note: this is not symmetric trPV <- .tr(PV) ### since PV needs to be computed anyway, can use .tr() tau2 <- ifelse(tau2.fix, tau2.val, (RSS - trPV) / (k-p)) } ### DerSimonian-Laird (DL) estimator if (method == "DL") { if (!allvipos) stop(mstyle$stop("DL estimator cannot be used when there are non-positive sampling variances in the data.")) wi <- 1/vi W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y trP <- .tr(P) tau2 <- ifelse(tau2.fix, tau2.val, (RSS - (k-p)) / trP) } ### DerSimonian-Laird (DL) estimator with iteration if (method == "DLIT") { if (is.null(con$tau2.init)) { tau2 <- 0 } else { tau2 <- con$tau2.init } while (change > con$threshold) { if (verbose) cat(mstyle$verbose(paste("Iteration", iter, "\ttau^2 =", .fcf(tau2, digits[["var"]]), "\n"))) iter <- iter + 1 old2 <- tau2 wi <- 1/(vi + tau2) if (any(tau2 + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y trP <- .tr(P) tau2 <- ifelse(tau2.fix, tau2.val, (RSS - (k-p)) / trP) tau2[tau2 < con$tau2.min] <- con$tau2.min change <- abs(old2 - tau2) if (iter > con$maxiter) { conv <- 0 break } } if (conv == 0L) stop(mstyle$stop("Algorithm did not converge.")) } ### generalized Q-statistic estimator if (method == "GENQ") { #if (!allvipos) # stop(mstyle$stop("GENQ estimator cannot be used when there are non-positive sampling variances in the data.")) if (is.null(weights)) stop(mstyle$stop("Must specify 'weights' when method='GENQ'.")) A <- diag(weights, nrow=k, ncol=k) stXAX <- .invcalc(X=X, W=A, k=k) P <- A - A %*% X %*% stXAX %*% t(X) %*% A V <- diag(vi, nrow=k, ncol=k) PV <- P %*% V ### note: this is not symmetric trP <- .tr(P) trPV <- .tr(PV) RSS <- crossprod(Y,P) %*% Y tau2 <- ifelse(tau2.fix, tau2.val, (RSS - trPV) / trP) } ### generalized Q-statistic estimator (median unbiased version) if (method == "GENQM") { if (is.null(weights)) stop(mstyle$stop("Must specify 'weights' when method='GENQM'.")) A <- diag(weights, nrow=k, ncol=k) stXAX <- .invcalc(X=X, W=A, k=k) P <- A - A %*% X %*% stXAX %*% t(X) %*% A V <- diag(vi, nrow=k, ncol=k) PV <- P %*% V ### note: this is not symmetric trP <- .tr(P) if (!tau2.fix) { RSS <- crossprod(Y,P) %*% Y if (.GENQ.func(con$tau2.min, P=P, vi=vi, Q=RSS, level=0, k=k, p=p, getlower=TRUE) > 0.5) { ### if GENQ.tau2.min is > 0.5, then estimate < tau2.min tau2 <- con$tau2.min } else { if (.GENQ.func(con$tau2.max, P=P, vi=vi, Q=RSS, level=0, k=k, p=p, getlower=TRUE) < 0.5) { ### if GENQ.tau2.max is < 0.5, then estimate > tau2.max stop(mstyle$stop("Value of 'tau2.max' too low. Try increasing 'tau2.max' or switch to another 'method'.")) } else { tau2 <- try(uniroot(.GENQ.func, interval=c(con$tau2.min, con$tau2.max), tol=con$tol, maxiter=con$maxiter, P=P, vi=vi, Q=RSS, level=0.5, k=k, p=p, getlower=FALSE, verbose=verbose, digits=digits, extendInt="no")$root, silent=TRUE) if (inherits(tau2, "try-error")) stop(mstyle$stop("Error in iterative search for tau2 using uniroot().")) } } } else { tau2 <- tau2.val } wi <- 1/(vi + tau2) } ### Sidik-Jonkman (SJ) estimator if (method == "SJ") { if (is.null(con$tau2.init)) { tau2.0 <- c(var(yi) * (k-1)/k) } else { tau2.0 <- con$tau2.init } wi <- 1/(vi + tau2.0) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y V <- diag(vi, nrow=k, ncol=k) PV <- P %*% V ### note: this is not symmetric tau2 <- ifelse(tau2.fix, tau2.val, tau2.0 * RSS / (k-p)) } ### Sidik-Jonkman (SJ) estimator with iteration if (method == "SJIT") { if (is.null(con$tau2.init)) { tau2 <- var(yi) * (k-1)/k } else { tau2 <- con$tau2.init } tau2.0 <- tau2 while (change > con$threshold) { if (verbose) cat(mstyle$verbose(paste("Iteration", iter, "\ttau^2 =", .fcf(tau2, digits[["var"]]), "\n"))) iter <- iter + 1 old2 <- tau2 wi <- 1/(vi + tau2) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) RSS <- crossprod(Y,P) %*% Y V <- diag(vi, nrow=k, ncol=k) PV <- P %*% V ### note: this is not symmetric tau2 <- ifelse(tau2.fix, tau2.val, tau2 * RSS / (k-p)) change <- abs(old2 - tau2) if (iter > con$maxiter) { conv <- 0 break } } if (conv == 0L) stop(mstyle$stop("Algorithm did not converge.")) } ### Paule-Mandel (PM) estimator if (method == "PM") { if (!allvipos) stop(mstyle$stop("PM estimator cannot be used when there are non-positive sampling variances in the data.")) if (!tau2.fix) { if (.QE.func(con$tau2.min, Y=Y, vi=vi, X=X, k=k, objective=0) < k-p) { tau2 <- con$tau2.min } else { if (.QE.func(con$tau2.max, Y=Y, vi=vi, X=X, k=k, objective=0) > k-p) { stop(mstyle$stop("Value of 'tau2.max' too low. Try increasing 'tau2.max' or switch to another 'method'.")) } else { tau2 <- try(uniroot(.QE.func, interval=c(con$tau2.min, con$tau2.max), tol=con$tol, maxiter=con$maxiter, Y=Y, vi=vi, X=X, k=k, objective=k-p, verbose=verbose, digits=digits, extendInt="no")$root, silent=TRUE) if (inherits(tau2, "try-error")) stop(mstyle$stop("Error in iterative search for tau2 using uniroot().")) } } #W <- diag(wi, nrow=k, ncol=k) #stXWX <- .invcalc(X=X, W=W, k=k) #P <- W - W %*% X %*% stXWX %*% crossprod(X,W) ### needed for se.tau2 computation below (not when using the simpler equation) } else { tau2 <- tau2.val } wi <- 1/(vi + tau2) } ### Paule-Mandel (PM) estimator (median unbiased version) if (method == "PMM") { if (!allvipos) stop(mstyle$stop("PMM estimator cannot be used when there are non-positive sampling variances in the data.")) if (!tau2.fix) { if (.QE.func(con$tau2.min, Y=Y, vi=vi, X=X, k=k, objective=0) < qchisq(0.5, df=k-p)) { tau2 <- con$tau2.min } else { if (.QE.func(con$tau2.max, Y=Y, vi=vi, X=X, k=k, objective=0) > qchisq(0.5, df=k-p)) { stop(mstyle$stop("Value of 'tau2.max' too low. Try increasing 'tau2.max' or switch to another 'method'.")) } else { tau2 <- try(uniroot(.QE.func, interval=c(con$tau2.min, con$tau2.max), tol=con$tol, maxiter=con$maxiter, Y=Y, vi=vi, X=X, k=k, objective=qchisq(0.5, df=k-p), verbose=verbose, digits=digits, extendInt="no")$root, silent=TRUE) if (inherits(tau2, "try-error")) stop(mstyle$stop("Error in iterative search for tau2. Try increasing 'tau2.max' or switch to another 'method'.")) } } } else { tau2 <- tau2.val } wi <- 1/(vi + tau2) } ### maximum-likelihood (ML), restricted maximum-likelihood (REML), and empirical Bayes (EB) estimators if (is.element(method, c("ML","REML","EB"))) { if (is.null(con$tau2.init)) { ### check if user specified initial value for tau2 tau2 <- max(0, tau2, con$tau2.min) ### if not, use HE estimate (or con$tau2.min) as initial estimate for tau2 } else { tau2 <- con$tau2.init ### if yes, use value specified by user } while (change > con$threshold) { if (verbose) cat(mstyle$verbose(paste(mstyle$verbose(paste("Iteration", iter, "\ttau^2 =", .fcf(tau2, digits[["var"]]), "\n"))))) iter <- iter + 1 old2 <- tau2 wi <- 1/(vi + tau2) if (any(tau2 + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) if (method == "ML") { PP <- P %*% P adj <- (crossprod(Y,PP) %*% Y - sum(wi)) / sum(wi^2) } if (method == "REML") { PP <- P %*% P adj <- (crossprod(Y,PP) %*% Y - .tr(P)) / .tr(PP) } if (method == "EB") { adj <- (crossprod(Y,P) %*% Y * k/(k-p) - k) / sum(wi) } adj <- adj * con$stepadj ### apply (user-defined) step adjustment while (tau2 + adj < con$tau2.min) ### use step-halving if necessary adj <- adj / 2 tau2 <- ifelse(tau2.fix, tau2.val, tau2 + adj) change <- abs(old2 - tau2) if (iter > con$maxiter) { conv <- 0 break } } if (conv == 0L) stop(mstyle$stop("Fisher scoring algorithm did not converge. See 'help(rma)' for possible remedies.")) ### check if ll is larger when tau^2 = 0 (only if ll0check=TRUE and only possible/sensible if allvipos and !tau2.fix) ### note: this doesn't catch the case where tau^2 = 0 is a local maximum if (is.element(method, c("ML","REML")) && con$ll0check && allvipos && !tau2.fix) { wi <- 1/vi W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) beta <- stXWX %*% crossprod(X,W) %*% Y RSS <- sum(wi*(yi - X %*% beta)^2) if (method == "ML") ll0 <- -1/2 * (k) * log(2*base::pi) - 1/2 * sum(log(vi)) - 1/2 * RSS if (method == "REML") ll0 <- -1/2 * (k-p) * log(2*base::pi) - 1/2 * sum(log(vi)) - 1/2 * determinant(crossprod(X,W) %*% X, logarithm=TRUE)$modulus - 1/2 * RSS wi <- 1/(vi + tau2) if (any(tau2 + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) beta <- stXWX %*% crossprod(X,W) %*% Y RSS <- sum(wi*(yi - X %*% beta)^2) if (method == "ML") ll <- -1/2 * (k) * log(2*base::pi) - 1/2 * sum(log(vi + tau2)) - 1/2 * RSS if (method == "REML") ll <- -1/2 * (k-p) * log(2*base::pi) - 1/2 * sum(log(vi + tau2)) - 1/2 * determinant(crossprod(X,W) %*% X, logarithm=TRUE)$modulus - 1/2 * RSS if (ll0 - ll > con$tol && tau2 > con$threshold) { warning(mstyle$warning("Fisher scoring algorithm may have gotten stuck at a local maximum.\n Setting tau^2 = 0. Check the profile likelihood plot with profile()."), call.=FALSE) tau2 <- 0 } } ### need to run this so that wi and P are based on the final tau^2 value wi <- 1/(vi + tau2) if (any(tau2 + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) W <- diag(wi, nrow=k, ncol=k) stXWX <- .invcalc(X=X, W=W, k=k) P <- W - W %*% X %*% stXWX %*% crossprod(X,W) } ### make sure that tau2 is >= con$tau2.min tau2 <- max(con$tau2.min, c(tau2)) ### check if any marginal variances are negative (only possible if user has changed tau2.min) if (!is.na(tau2) && any(tau2 + vi < 0)) stop(mstyle$stop("Some marginal variances are negative.")) ### verbose output upon convergence for ML/REML/EB estimators if (verbose && is.element(method, c("ML","REML","EB"))) { cat(mstyle$verbose(paste("Iteration", iter, "\ttau^2 =", .fcf(tau2, digits[["var"]]), "\n"))) cat(mstyle$verbose(paste("Fisher scoring algorithm converged after", iter, "iterations.\n"))) } ### standard error of the tau^2 estimators (also when the user has fixed/specified a tau^2 value) ### see notes.pdf and note: .tr(P%*%P) = sum(P*t(P)) = sum(P*P) (since P is symmetric) if (method == "HS") se.tau2 <- sqrt(1/sum(wi)^2 * (2*(k-p) + 4*max(tau2,0)*.tr(P) + 2*max(tau2,0)^2*sum(P*P))) ### note: wi = 1/vi if (method == "HSk") se.tau2 <- k/(k-p) * sqrt(1/sum(wi)^2 * (2*(k-p) + 4*max(tau2,0)*.tr(P) + 2*max(tau2,0)^2*sum(P*P))) #se.tau2 <- sqrt(1/trP^2 * (2*(k-p) + 4*max(tau2,0)*.tr(P) + 2*max(tau2,0)^2*sum(P*P))) # trP <- sum(wi) * (k-p) / k if (method == "HE") se.tau2 <- sqrt(1/(k-p)^2 * (2*sum(PV*t(PV)) + 4*max(tau2,0)*trPV + 2*max(tau2,0)^2*(k-p))) if (method == "DL" || method == "DLIT") se.tau2 <- sqrt(1/trP^2 * (2*(k-p) + 4*max(tau2,0)*trP + 2*max(tau2,0)^2*sum(P*P))) if (method == "GENQ" || method == "GENQM") se.tau2 <- sqrt(1/trP^2 * (2*sum(PV*t(PV)) + 4*max(tau2,0)*sum(PV*P) + 2*max(tau2,0)^2*sum(P*P))) if (method == "SJ") se.tau2 <- sqrt(tau2.0^2/(k-p)^2 * (2*sum(PV*t(PV)) + 4*max(tau2,0)*sum(PV*P) + 2*max(tau2,0)^2*sum(P*P))) if (method == "ML") se.tau2 <- sqrt(2/sum(wi^2)) ### note: wi = 1/(vi + tau2) for ML, REML, EB, PM, and SJIT if (method == "REML") se.tau2 <- sqrt(2/sum(P*P)) if (method == "EB" || method == "PM" || method == "PMM" || method == "SJIT") { #V <- diag(vi, nrow=k, ncol=k) #PV <- P %*% V ### note: this is not symmetric #se.tau2 <- sqrt((k/(k-p))^2 / sum(wi)^2 * (2*sum(PV*t(PV)) + 4*max(tau2,0)*sum(PV*P) + 2*max(tau2,0)^2*sum(P*P))) se.tau2 <- sqrt(2*k^2/(k-p) / sum(wi)^2) ### these two equations are actually identical, but this one is much simpler } if (k == 1L) method <- method.sav } ######################################################################### ###### parameter estimation for location-scale model (rma.ls) if (model == "rma.ls") { if (!is.element(method, c("ML","REML"))) stop(mstyle$stop("Location-scale models can only be fitted with ML or REML estimation.")) tau2.fix <- FALSE if (is.numeric(tau2)) warning(mstyle$warning("Argument 'tau2' ignored for location-scale models."), call.=FALSE) ### get optimizer arguments from control argument optimizer <- match.arg(con$optimizer, c("optim","nlminb","uobyqa","newuoa","bobyqa","nloptr","nlm","hjk","nmk","mads","ucminf","optimParallel","constrOptim")) optmethod <- match.arg(con$optmethod, c("Nelder-Mead","BFGS","CG","L-BFGS-B","SANN","Brent")) parallel <- con$parallel cl <- con$cl ncpus <- con$ncpus optcontrol <- control[is.na(con.pos)] ### get arguments that are control arguments for optimizer if (length(optcontrol) == 0L) optcontrol <- list() ### if control argument 'ncpus' is larger than 1, automatically switch to optimParallel optimizer if (ncpus > 1L) optimizer <- "optimParallel" ### when using an identify link, force optimizer to constrOptim if (link == "identity") optimizer <- "constrOptim" if (link == "log" && optimizer == "constrOptim") stop(mstyle$stop("Cannot use 'constrOptim' optimizer when using a log link.")) reml <- ifelse(method=="REML", TRUE, FALSE) ### set NLOPT_LN_BOBYQA as the default algorithm for nloptr optimizer ### and by default use a relative convergence criterion of 1e-8 on the function value if (optimizer=="nloptr" && !is.element("algorithm", names(optcontrol))) optcontrol$algorithm <- "NLOPT_LN_BOBYQA" if (optimizer=="nloptr" && !is.element("ftol_rel", names(optcontrol))) optcontrol$ftol_rel <- 1e-8 ### for mads, set trace=FALSE and tol=1e-6 by default if (optimizer=="mads" && !is.element("trace", names(optcontrol))) optcontrol$trace <- FALSE if (optimizer=="mads" && !is.element("tol", names(optcontrol))) optcontrol$tol <- 1e-6 ### check that the required packages are installed if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { if (!requireNamespace("minqa", quietly=TRUE)) stop(mstyle$stop("Please install the 'minqa' package to use this optimizer.")) } if (optimizer == "nloptr") { if (!requireNamespace("nloptr", quietly=TRUE)) stop(mstyle$stop("Please install the 'nloptr' package to use this optimizer.")) } if (is.element(optimizer, c("hjk","nmk","mads"))) { if (!requireNamespace("dfoptim", quietly=TRUE)) stop(mstyle$stop("Please install the 'dfoptim' package to use this optimizer.")) } if (optimizer == "ucminf") { if (!requireNamespace("ucminf", quietly=TRUE)) stop(mstyle$stop("Please install the 'ucminf' package to use this optimizer.")) } if (optimizer == "optimParallel") { if (!requireNamespace("optimParallel", quietly=TRUE)) stop(mstyle$stop("Please install the 'optimParallel' package to use this optimizer.")) } if (!isTRUE(ddd$skiphes) && !requireNamespace("numDeriv", quietly=TRUE)) stop(mstyle$stop("Please install the 'numDeriv' package to compute the Hessian.")) ### drop redundant predictors tmp <- try(lm(yi ~ Z - 1), silent=TRUE) if (inherits(tmp, "lm")) { coef.na.Z <- is.na(coef(tmp)) } else { coef.na.Z <- rep(FALSE, NCOL(Z)) } if (any(coef.na.Z)) { warning(mstyle$warning("Redundant predictors dropped from the scale model."), call.=FALSE) Z <- Z[,!coef.na.Z,drop=FALSE] Z.f <- Z.f[,!coef.na.Z,drop=FALSE] } ### check whether intercept is included and if yes, move it to the first column (NAs already removed, so na.rm=TRUE for any() not necessary) is.int <- apply(Z, 2, .is.intercept) if (any(is.int)) { Z.int.incl <- TRUE int.indx <- which(is.int, arr.ind=TRUE) Z <- cbind(intrcpt=1, Z[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts Z.f <- cbind(intrcpt=1, Z.f[,-int.indx, drop=FALSE]) ### this removes any duplicate intercepts Z.intercept <- TRUE ### set intercept appropriately so that the predict() function works } else { Z.int.incl <- FALSE } q <- NCOL(Z) ### number of columns in Z (including the intercept if it is included) ### check whether model matrix is of full rank if (any(eigen(crossprod(Z), symmetric=TRUE, only.values=TRUE)$values <= con$evtol)) stop(mstyle$stop("Model matrix for scale part of the model not of full rank. Cannot fit model.")) ### check whether this is an intercept-only model is.int <- apply(Z, 2, .is.intercept) if ((q == 1L) && is.int) { Z.int.only <- TRUE } else { Z.int.only <- FALSE } ### checks on alpha argument if (missing(alpha) || is.null(alpha) || all(is.na(alpha))) { alpha <- rep(NA, q) } else { if (length(alpha) == 1L) alpha <- rep(alpha, q) if (length(alpha) != q) stop(mstyle$stop(paste0("Length of 'alpha' argument (", length(alpha), ") does not match actual number of parameters (", q, ")."))) } ### rescale Z matrix (only for models with moderators and models including an intercept term and when alpha[1] is not fixed) if (!Z.int.only && Z.int.incl && con$scaleZ && is.na(alpha[1])) { Zsave <- Z meanZ <- colMeans(Z[, 2:q, drop=FALSE]) sdZ <- apply(Z[, 2:q, drop=FALSE], 2, sd) ### consider using colSds() from matrixStats package is.d <- apply(Z, 2, .is.dummy) ### is each column a dummy variable (i.e., only 0s and 1s)? mZ <- rbind(c(intrcpt=1, -1*ifelse(is.d[-1], 0, meanZ/sdZ)), cbind(0, diag(ifelse(is.d[-1], 1, 1/sdZ), nrow=length(is.d)-1, ncol=length(is.d)-1))) imZ <- try(suppressWarnings(solve(mZ)), silent=TRUE) Z[,!is.d] <- apply(Z[, !is.d, drop=FALSE], 2, scale) ### rescale the non-dummy variables if (any(!is.na(alpha))) { if (inherits(imZ, "try-error")) stop(mstyle$stop("Unable to rescale starting values for the scale parameters.")) alpha <- diag(imZ) * alpha } } else { mZ <- NULL } if (k == 1L && Z.int.only) { if (link == "log") con$alpha.init <- -10000 if (link == "identity") con$alpha.init <- 0.00001 } ### set/transform/check alpha.init if (verbose > 1) message(mstyle$message("Extracting/computing initial values ...")) if (is.null(con$alpha.init)) { fit <- suppressWarnings(rma.uni(yi, vi, mods=X, intercept=FALSE, method="HE", skipr2=TRUE)) tmp <- rstandard(fit) if (link == "log") { tmp <- suppressWarnings(rma.uni(log(tmp$resid^2), 4/tmp$resid^2*tmp$se^2, mods=Z, intercept=FALSE, method="FE")) #tmp <- rma.uni(log(tmp$resid^2), 4/tmp$resid^2*tmp$se^2, mods=Z, intercept=FALSE, method="FE") #tmp <- rma.uni(log(tmp$resid^2), tmp$se^2, mods=Z, intercept=FALSE, method="FE") #tmp <- rma.uni(log(tmp$resid^2), 1, mods=Z, intercept=FALSE, method="FE") alpha.init <- coef(tmp) } if (link == "identity") { #tmp <- rma.uni(tmp$resid^2, 4*tmp$resid^2*tmp$se^2, mods=Z, intercept=FALSE, method="FE") tmp <- suppressWarnings(rma.uni(tmp$resid^2, tmp$se^2, mods=Z, intercept=FALSE, method="FE")) #tmp <- rma.uni(tmp$resid^2, 1, mods=Z, intercept=FALSE, method="FE") alpha.init <- coef(tmp) if (any(Z %*% alpha.init < 0)) alpha.init <- ifelse(is.int, fit$tau2+.01, 0) if (any(Z %*% alpha.init < 0)) stop(mstyle$stop("Unable to find suitable starting values for the scale parameters.")) } } else { alpha.init <- con$alpha.init if (!Z.int.only && Z.int.incl && con$scaleZ && is.na(alpha[1])) { if (inherits(imZ, "try-error")) stop(mstyle$stop("Unable to rescale starting values for the scale parameters.")) alpha.init <- c(imZ %*% cbind(alpha.init)) } if (link == "identity" && any(Z %*% alpha.init < 0)) stop(mstyle$stop("Starting values for the scale parameters lead to one or more negative tau^2 values.")) } if (length(alpha.init) != q) stop(mstyle$stop(paste0("Length of 'alpha.init' argument (", length(alpha.init), ") does not match actual number of parameters (", q, ")."))) if (anyNA(alpha.init)) stop(mstyle$stop("No missing values allowed in 'alpha.init'.")) if (verbose > 1) message(mstyle$message("Estimating scale parameters ...\n")) ### obtain initial values for beta (only need this when optimizing over beta and alpha jointly) #wi <- 1/vi #W <- diag(wi, nrow=k, ncol=k) #stXWX <- .invcalc(X=X, W=W, k=k) #beta.init <- stXWX %*% crossprod(X,W) %*% Y if (is.element(optimizer, c("optim","constrOptim"))) { par.arg <- "par" ctrl.arg <- ", control=optcontrol" } if (optimizer=="nlminb") { par.arg <- "start" ctrl.arg <- ", control=optcontrol" } if (is.element(optimizer, c("uobyqa","newuoa","bobyqa"))) { par.arg <- "par" optimizer <- paste0("minqa::", optimizer) ### need to use this since loading nloptr masks bobyqa() and newuoa() functions ctrl.arg <- ", control=optcontrol" } if (optimizer=="nloptr") { par.arg <- "x0" optimizer <- paste0("nloptr::nloptr") ### need to use this due to requireNamespace() ctrl.arg <- ", opts=optcontrol" } if (optimizer=="nlm") { par.arg <- "p" ### because of this, must use argument name pX for p (number of columns in X matrix) ctrl.arg <- paste(names(optcontrol), unlist(optcontrol), sep="=", collapse=", ") if (nchar(ctrl.arg) != 0L) ctrl.arg <- paste0(", ", ctrl.arg) } if (is.element(optimizer, c("hjk","nmk","mads"))) { par.arg <- "par" optimizer <- paste0("dfoptim::", optimizer) ### need to use this so that the optimizers can be found ctrl.arg <- ", control=optcontrol" } if (optimizer=="ucminf") { par.arg <- "par" optimizer <- paste0("ucminf::ucminf") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol" } if (optimizer=="optimParallel") { par.arg <- "par" optimizer <- paste0("optimParallel::optimParallel") ### need to use this due to requireNamespace() ctrl.arg <- ", control=optcontrol, parallel=parallel" parallel$cl <- NULL if (is.null(cl)) { ncpus <- as.integer(ncpus) if (ncpus < 1L) stop(mstyle$stop("Control argument 'ncpus' must be >= 1.")) cl <- parallel::makePSOCKcluster(ncpus) on.exit(parallel::stopCluster(cl), add=TRUE) } else { if (!inherits(cl, "SOCKcluster")) stop(mstyle$stop("Specified cluster is not of class 'SOCKcluster'.")) } parallel$cl <- cl if (is.null(parallel$forward)) parallel$forward <- FALSE if (is.null(parallel$loginfo)) { if (verbose) { parallel$loginfo <- TRUE } else { parallel$loginfo <- FALSE } } } #return(list(con=con, optimizer=optimizer, optmethod=optmethod, optcontrol=optcontrol, ctrl.arg=ctrl.arg)) if (link == "log") { optcall <- paste(optimizer, "(", par.arg, "=alpha.init, .ll.rma.ls, ", ifelse(optimizer=="optim", "method=optmethod, ", ""), "yi=yi, vi=vi, X=X, Z=Z, reml=reml, k=k, pX=p, alpha.val=alpha, verbose=verbose, digits=digits, #hessian=TRUE, REMLf=con$REMLf, link=link, mZ=mZ", ctrl.arg, ")\n", sep="") } if (link == "identity") { optcall <- paste0("constrOptim(theta=alpha.init, f=.ll.rma.ls, grad=NULL, ui=Z, ci=rep(0,k), yi=yi, vi=vi, X=X, Z=Z, reml=reml, k=k, pX=p, alpha.val=alpha, verbose=verbose, digits=digits, REMLf=con$REMLf, link=link, mZ=mZ", ctrl.arg, ")\n") } #return(optcall) if (verbose) { opt.res <- try(eval(parse(text=optcall)), silent=!verbose) } else { opt.res <- try(suppressWarnings(eval(parse(text=optcall))), silent=!verbose) } #return(opt.res) if (optimizer == "optimParallel::optimParallel" && verbose) { tmp <- capture.output(print(opt.res$loginfo)) .print.output(tmp, mstyle$verbose) } if (inherits(opt.res, "try-error")) stop(mstyle$stop("Error during the optimization. Use verbose=TRUE and see help(rma) for more details on the optimization routines.")) ### convergence checks if (is.element(optimizer, c("optim","constrOptim","nlminb","dfoptim::hjk","dfoptim::nmk","optimParallel::optimParallel")) && opt.res$convergence != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("dfoptim::mads")) && opt.res$convergence > optcontrol$tol) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (is.element(optimizer, c("minqa::uobyqa","minqa::newuoa","minqa::bobyqa")) && opt.res$ierr != 0) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (ierr = ", opt.res$ierr, ")."))) if (optimizer=="nloptr::nloptr" && !(opt.res$status >= 1 && opt.res$status <= 4)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (status = ", opt.res$status, ")."))) if (optimizer=="ucminf::ucminf" && !(opt.res$convergence == 1 || opt.res$convergence == 2)) stop(mstyle$stop(paste0("Optimizer (", optimizer, ") did not achieve convergence (convergence = ", opt.res$convergence, ")."))) if (verbose > 2) { cat("\n") tmp <- capture.output(print(opt.res)) .print.output(tmp, mstyle$verbose) } ### copy estimated values to 'par' so code below works if (optimizer=="nloptr::nloptr") opt.res$par <- opt.res$solution if (optimizer=="nlm") opt.res$par <- opt.res$estimate ### replace fixed alpha values in opt.res$par opt.res$par <- ifelse(is.na(alpha), opt.res$par, alpha) ### try to compute vcov matrix for scale parameter estimates H <- NA va <- matrix(NA_real_, nrow=q, ncol=q) hest <- is.na(alpha) if (any(hest) && !isTRUE(ddd$skiphes)) { if (verbose > 1) message(mstyle$message("\nComputing Hessian ...")) H <- try(numDeriv::hessian(func=.ll.rma.ls, x=opt.res$par, method.args=con$hessianCtrl, yi=yi, vi=vi, X=X, Z=Z, reml=reml, k=k, pX=p, alpha.val=alpha, verbose=FALSE, digits=digits, REMLf=con$REMLf, link=link, mZ=mZ), silent=TRUE) if (inherits(H, "try-error")) { warning(mstyle$warning("Error when trying to compute Hessian."), call.=FALSE) } else { H.hest <- H[hest, hest, drop=FALSE] iH.hest <- try(suppressWarnings(chol2inv(chol(H.hest))), silent=TRUE) if (inherits(iH.hest, "try-error") || anyNA(iH.hest) || any(is.infinite(iH.hest))) { warning(mstyle$warning("Error when trying to invert Hessian."), call.=FALSE) } else { va[hest, hest] <- iH.hest } } } ### get scale parameter estimates alpha.val <- alpha alpha <- cbind(opt.res$par) ### scale back alpha and va if (!Z.int.only && Z.int.incl && con$scaleZ && is.na(alpha.val[1])) { alpha <- mZ %*% alpha va[!hest,] <- 0 va[,!hest] <- 0 va <- mZ %*% va %*% t(mZ) va[!hest,] <- NA va[,!hest] <- NA Z <- Zsave } ### set/check 'att' argument att <- .set.btt(att, q, Z.int.incl, colnames(Z)) m.alpha <- length(att) ### number of alphas to test (m = q if all alphas are tested) ### ddf calculation if (test == "t") { ddf.alpha <- k-q } else { ddf.alpha <- NA } ### QM calculation QS <- try(as.vector(t(alpha)[att] %*% chol2inv(chol(va[att,att])) %*% alpha[att]), silent=TRUE) if (inherits(QS, "try-error")) QS <- NA se.alpha <- sqrt(diag(va)) rownames(alpha) <- rownames(va) <- colnames(va) <- colnames(Z) names(se.alpha) <- NULL zval.alpha <- c(alpha/se.alpha) if (test == "t") { QS <- QS / m.alpha QSdf <- c(m.alpha, k-q) QSp <- if (QSdf[2] > 0) pf(QS, df1=QSdf[1], df2=QSdf[2], lower.tail=FALSE) else NA pval.alpha <- if (ddf.alpha > 0) 2*pt(abs(zval.alpha), df=ddf.alpha, lower.tail=FALSE) else rep(NA,q) crit.alpha <- if (ddf.alpha > 0) qt(level/2, df=ddf.alpha, lower.tail=FALSE) else NA } else { QSdf <- c(m.alpha, NA) QSp <- pchisq(QS, df=QSdf[1], lower.tail=FALSE) pval.alpha <- 2*pnorm(abs(zval.alpha), lower.tail=FALSE) crit.alpha <- qnorm(level/2, lower.tail=FALSE) } ci.lb.alpha <- c(alpha - crit.alpha * se.alpha) ci.ub.alpha <- c(alpha + crit.alpha * se.alpha) if (link == "log") tau2 <- exp(as.vector(Z %*% alpha)) if (link == "identity") tau2 <- as.vector(Z %*% alpha) } ### fixed-effects model (note: sets tau2 to zero even when tau2 value is specified) if (is.element(method, c("FE","EE","CE"))) tau2 <- 0 ######################################################################### ###### model fitting, test statistics, and confidence intervals if (verbose > 1) message(mstyle$message("\nModel fitting ...")) wi <- 1/(vi + tau2) W <- diag(wi, nrow=k, ncol=k) M <- diag(vi + tau2, nrow=k, ncol=k) if (weighted) { ######################### ### weighted analysis ### ######################### ### fit model with weighted estimation if (is.null(weights) || is.element(test, c("knha","adhoc"))) { ### if no weights are specified, use default inverse variance weights, that is, 1/vi or 1/(vi + tau2) ### also, even with weights, if test="knha" or "adhoc", need to run this to get RSS.knha ### if any vi = 0 and tau^2 is estimated to be 0 (or is set to 0 for a FE model), then get Inf for wi if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) stXWX <- .invcalc(X=X, W=W, k=k) beta <- stXWX %*% crossprod(X,W) %*% Y vb <- stXWX RSS.f <- sum(wi*(yi - X %*% beta)^2) #P <- W - W %*% X %*% stXWX %*% crossprod(X,W) #RSS.f <- crossprod(Y,P) %*% Y RSS.knha <- RSS.f } if (!is.null(weights)) { ### if weights are specified, use them (note: RSS.f is recomputed if test="knha" or "adhoc") A <- diag(weights, nrow=k, ncol=k) stXAX <- .invcalc(X=X, W=A, k=k) beta <- stXAX %*% crossprod(X,A) %*% Y vb <- stXAX %*% t(X) %*% A %*% M %*% A %*% X %*% stXAX RSS.f <- sum(wi*(yi - X %*% beta)^2) #P <- W - W %*% X %*% stXAX %*% t(X) %*% A - A %*% X %*% stXAX %*% t(X) %*% W + A %*% X %*% stXAX %*% t(X) %*% W %*% X %*% stXAX %*% t(X) %*% A #RSS.f <- crossprod(Y,P) %*% Y } #return(list(beta=beta, vb=vb, se=sqrt(diag(vb)), RSS.f=RSS.f)) ### calculate scaling factor for Knapp & Hartung method ### note: catch cases where RSS.knha is extremely small, which is probably due to all yi being equal ### then set s2w to 0 (to avoid the strange looking output we would obtain if we don't do this) if (is.element(test, c("knha","adhoc"))) { if (RSS.knha <= .Machine$double.eps) { s2w <- 0 } else { s2w <- RSS.knha / (k-p) } } } else { ########################### ### unweighted analysis ### ########################### ### fit model with unweighted estimation ### note: 1) if user has specified weights, they are ignored ### 2) but if method="GENQ/GENQM", they were used to estimate tau^2 stXX <- .invcalc(X=X, W=diag(k), k=k) beta <- stXX %*% crossprod(X,Y) vb <- tcrossprod(stXX,X) %*% M %*% X %*% stXX RSS.f <- sum(wi*(yi - X %*% beta)^2) #P <- W - W %*% X %*% tcrossprod(stXX,X) - X %*% stXX %*% crossprod(X,W) + X %*% stXX %*% crossprod(X,W) %*% X %*% tcrossprod(stXX,X) #RSS.f <- crossprod(Y,P) %*% Y ### calculate scaling factor for Knapp & Hartung method if (is.element(test, c("knha","adhoc"))) { if (any(is.infinite(wi))) stop(mstyle$stop("Division by zero when computing the inverse variance weights.")) stXWX <- .invcalc(X=X, W=W, k=k) beta.knha <- stXWX %*% crossprod(X,W) %*% Y RSS.knha <- sum(wi*(yi - X %*% beta.knha)^2) #P <- W - W %*% X %*% stXWX %*% crossprod(X,W) #RSS.knha <- c(crossprod(Y,P) %*% Y) if (RSS.knha <= .Machine$double.eps) { s2w <- 0 } else { s2w <- RSS.knha / (k-p) } } } if (verbose > 1) message(mstyle$message("Conducting tests of the fixed effects ...")) ### the Knapp & Hartung method as described in the literature is for random/mixed-effects models if (is.element(method, c("FE","EE","CE")) && is.element(test, c("knha","adhoc"))) warning(mstyle$warning("Knapp & Hartung method is not meant to be used in the context of FE models."), call.=FALSE) ### Knapp & Hartung method with ad-hoc correction so that the scale factor is always >= 1 if (test == "adhoc") s2w[s2w < 1] <- 1 ### for Knapp & Hartung method, apply scaling to vb vb <- s2w * vb ### ddf calculation if (is.element(test, c("knha","adhoc","t"))) { ddf <- k-p } else { ddf <- NA } ### QM calculation QM <- try(as.vector(t(beta)[btt] %*% chol2inv(chol(vb[btt,btt])) %*% beta[btt]), silent=TRUE) if (inherits(QM, "try-error")) QM <- NA rownames(beta) <- rownames(vb) <- colnames(vb) <- colnames(X) se <- sqrt(diag(vb)) names(se) <- NULL zval <- c(beta/se) if (is.element(test, c("knha","adhoc","t"))) { QM <- QM / m QMdf <- c(m, k-p) QMp <- if (QMdf[2] > 0) pf(QM, df1=QMdf[1], df2=QMdf[2], lower.tail=FALSE) else NA pval <- if (ddf > 0) 2*pt(abs(zval), df=ddf, lower.tail=FALSE) else rep(NA,p) crit <- if (ddf > 0) qt(level/2, df=ddf, lower.tail=FALSE) else NA } else { QMdf <- c(m, NA) QMp <- pchisq(QM, df=QMdf[1], lower.tail=FALSE) pval <- 2*pnorm(abs(zval), lower.tail=FALSE) crit <- qnorm(level/2, lower.tail=FALSE) } ci.lb <- c(beta - crit * se) ci.ub <- c(beta + crit * se) ######################################################################### ### heterogeneity test (Wald-type test of the extra coefficients in the saturated model) if (verbose > 1) message(mstyle$message("Conducting heterogeneity test ...")) if (allvipos) { ### heterogeneity test (always uses inverse variance method) # note: this is unaffected by the 'weighted' argument, since under H0, the same parameters are # estimated and weighted estimation provides the most efficient estimates; therefore, also any # arbitrary weights specified by the user are not relevant here (different from what the metan # command in Stata does!) see also: Chen, Z., Ng, H. K. T., & Nadarajah, S. (2014). A note on # Cochran test for homogeneity in one-way ANOVA and meta-analysis. Statistical Papers, 55(2), # 301-310. This shows that the weights used are not relevant. if (k > p) { wi <- 1/vi W.FE <- diag(wi, nrow=k, ncol=k) ### note: ll.REML below involves W, so cannot overwrite W stXWX <- .invcalc(X=X, W=W.FE, k=k) P <- W.FE - W.FE %*% X %*% stXWX %*% crossprod(X,W.FE) ### need P below for calculation of I^2 QE <- max(0, c(crossprod(Y,P) %*% Y)) #beta.FE <- stXWX %*% crossprod(X,W.FE) %*% Y #QE <- max(0, sum(wi*(yi - X %*% beta.FE)^2)) QEp <- pchisq(QE, df=k-p, lower.tail=FALSE) ### calculation of 'typical' sampling variance #vt <- (k-1) / (sum(wi) - sum(wi^2)/sum(wi)) ### this only applies to the RE model #vt <- 1/mean(wi) ### harmonic mean of vi's (see Takkouche et al., 1999) vt <- (k-p) / .tr(P) ### calculation of I^2 and H^2 if (is.element(method, c("FE","EE","CE"))) { I2 <- max(0, 100 * (QE - (k-p)) / QE) H2 <- QE / (k-p) } else { I2 <- 100 * tau2 / (vt + tau2) # vector for location-scale models H2 <- tau2 / vt + 1 # vector for location-scale models } } else { QE <- 0 QEp <- 1 I2 <- 0 H2 <- 1 vt <- 0 } } else { if (!vi0) warning(mstyle$warning(paste0("Cannot compute ", ifelse(int.only, "Q", "QE"), "-test, I^2, or H^2 when there are non-positive sampling variances in the data.")), call.=FALSE) vt <- NA } ######################################################################### ### compute pseudo R^2 statistic for mixed-effects models with an intercept (only for rma.uni models) if (!int.only && int.incl && !is.element(method, c("FE","EE","CE")) && model == "rma.uni" && !isTRUE(ddd$skipr2)) { if (verbose > 1) { message(mstyle$message("Fitting RE model for R^2 computation ...")) res.RE <- try(rma.uni(yi, vi, weights=weights, method=method, weighted=weighted, test=test, verbose=ifelse(verbose, TRUE, FALSE), control=con, digits=digits), silent=FALSE) } else { res.RE <- try(suppressWarnings(rma.uni(yi, vi, weights=weights, method=method, weighted=weighted, test=test, verbose=ifelse(verbose, TRUE, FALSE), control=con, digits=digits)), silent=TRUE) } if (!inherits(res.RE, "try-error")) { tau2.RE <- res.RE$tau2 if (identical(tau2.RE,0)) { R2 <- 0 } else { R2 <- max(0, 100 * (tau2.RE - tau2) / tau2.RE) } } else { R2 <- NA } } else { R2 <- NULL } ######################################################################### ###### fit statistics if (verbose > 1) message(mstyle$message("Computing fit statistics and log likelihood ...")) ### note: tau2 is not counted as a parameter when it was fixed by the user (same for fixed alpha values) parms <- p + ifelse(model == "rma.uni", ifelse(is.element(method, c("FE","EE","CE")) || tau2.fix, 0, 1), sum(is.na(alpha.val))) ll.ML <- -1/2 * (k) * log(2*base::pi) - 1/2 * sum(log(vi + tau2)) - 1/2 * RSS.f ll.REML <- -1/2 * (k-p) * log(2*base::pi) + ifelse(con$REMLf, 1/2 * determinant(crossprod(X), logarithm=TRUE)$modulus, 0) + -1/2 * sum(log(vi + tau2)) - 1/2 * determinant(crossprod(X,W) %*% X, logarithm=TRUE)$modulus - 1/2 * RSS.f if (k > p) { dev.ML <- -2 * (ll.ML - sum(dnorm(yi, mean=yi, sd=sqrt(vi), log=TRUE))) } else { dev.ML <- 0 } AIC.ML <- -2 * ll.ML + 2*parms BIC.ML <- -2 * ll.ML + parms * log(k) AICc.ML <- -2 * ll.ML + 2*parms * max(k, parms+2) / (max(k, parms+2) - parms - 1) dev.REML <- -2 * (ll.REML - 0) ### saturated model has ll = 0 when using the full REML likelihood AIC.REML <- -2 * ll.REML + 2*parms BIC.REML <- -2 * ll.REML + parms * log(k-p) AICc.REML <- -2 * ll.REML + 2*parms * max(k-p, parms+2) / (max(k-p, parms+2) - parms - 1) fit.stats <- matrix(c(ll.ML, dev.ML, AIC.ML, BIC.ML, AICc.ML, ll.REML, dev.REML, AIC.REML, BIC.REML, AICc.REML), ncol=2, byrow=FALSE) dimnames(fit.stats) <- list(c("ll","dev","AIC","BIC","AICc"), c("ML","REML")) fit.stats <- data.frame(fit.stats) ######################################################################### ###### prepare output if (verbose > 1) message(mstyle$message("Preparing output ...")) p.eff <- p k.eff <- k if (is.null(ddd$outlist)) { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, se.tau2=se.tau2, tau2.fix=tau2.fix, tau2.f=tau2, I2=I2, H2=H2, R2=R2, vt=vt, QE=QE, QEp=QEp, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.f=k.f, k.eff=k.eff, k.all=k.all, p=p, p.eff=p.eff, parms=parms, int.only=int.only, int.incl=int.incl, intercept=intercept, allvipos=allvipos, coef.na=coef.na, yi=yi, vi=vi, X=X, weights=weights, yi.f=yi.f, vi.f=vi.f, X.f=X.f, weights.f=weights.f, M=M, ai.f=ai.f, bi.f=bi.f, ci.f=ci.f, di.f=di.f, x1i.f=x1i.f, x2i.f=x2i.f, t1i.f=t1i.f, t2i.f=t2i.f, ni=ni, ni.f=ni.f, ids=ids, not.na=not.na, subset=subset, slab=slab, slab.null=slab.null, measure=measure, method=method, model=model, weighted=weighted, test=test, dfs=ddf, ddf=ddf, s2w=s2w, btt=btt, m=m, digits=digits, level=level, control=control, verbose=verbose, add=add, to=to, drop00=drop00, fit.stats=fit.stats, formula.yi=formula.yi, formula.mods=formula.mods, version=packageVersion("metafor"), call=mf) } if (!is.null(ddd$outlist)) { if (ddd$outlist == "minimal") { res <- list(b=beta, beta=beta, se=se, zval=zval, pval=pval, ci.lb=ci.lb, ci.ub=ci.ub, vb=vb, tau2=tau2, se.tau2=se.tau2, tau2.fix=tau2.fix, I2=I2, H2=H2, R2=R2, QE=QE, QEp=QEp, QM=QM, QMdf=QMdf, QMp=QMp, k=k, k.eff=k.eff, p=p, p.eff=p.eff, parms=parms, int.only=int.only, measure=measure, method=method, model=model, test=test, dfs=ddf, ddf=ddf, btt=btt, m=m, digits=digits, fit.stats=fit.stats) } else { res <- eval(parse(text=paste0("list(", ddd$outlist, ")"))) } } if (model == "rma.ls") { res$alpha <- alpha res$va <- va res$se.alpha <- se.alpha res$zval.alpha <- zval.alpha res$pval.alpha <- pval.alpha res$ci.lb.alpha <- ci.lb.alpha res$ci.ub.alpha <- ci.ub.alpha res$alpha.fix <- !is.na(alpha.val) res$q <- q res$alphas <- q res$link <- link res$Z <- Z res$Z.f <- Z.f res$tau2.f <- rep(NA, k.f) res$tau2.f[not.na] <- tau2 res$att <- att res$m.alpha <- m.alpha res$ddf.alpha <- ddf.alpha res$QS <- QS res$QSdf <- QSdf res$QSp <- QSp res$formula.scale <- formula.scale res$Z.int.incl <- Z.int.incl res$Z.intercept <- Z.int.incl res$Z.int.only <- Z.int.only res$H <- H } time.end <- proc.time() res$time <- unname(time.end - time.start)[3] if (.isTRUE(ddd$time)) .print.time(res$time) if (verbose || .isTRUE(ddd$time)) cat("\n") if (model == "rma.ls") { class(res) <- c("rma.ls", "rma.uni", "rma") } else { class(res) <- c("rma.uni", "rma") } return(res) } metafor/R/permutest.rma.uni.r0000644000176200001440000004100314036327240015672 0ustar liggesuserspermutest.rma.uni <- function(x, exact=FALSE, iter=1000, permci=FALSE, progbar=TRUE, retpermdist=FALSE, digits, control, ...) { mstyle <- .get.mstyle("crayon" %in% .packages()) .chkclass(class(x), must="rma.uni", notav=c("robust.rma", "rma.ls", "rma.uni.selmodel")) if (missing(digits)) { digits <- .get.digits(xdigits=x$digits, dmiss=TRUE) } else { digits <- .get.digits(digits=digits, xdigits=x$digits, dmiss=FALSE) } ddd <- list(...) .chkdots(ddd, c("tol", "time", "seed")) if (!is.null(ddd$tol)) # in case user specifies comptol in the old manner comptol <- ddd$tol if (.isTRUE(ddd$time)) time.start <- proc.time() ######################################################################### ### calculate number of permutations for an exact permutation test if (x$int.only) { ### for intercept-only models, 2^k possible permutations of the signs exact.iter <- 2^x$k } else { ### for meta-regression models, there are k! possible permutations of the rows of the model matrix #exact.iter <- round(exp(lfactorial(x$k))) ### note: without round(), not exactly an integer! ### however, when there are duplicated rows in the model matrix, the number of *unique* permutations ### is lower; the code below below determines the number of unique permutations ### order the X matrix X <- as.data.frame(x$X)[do.call(order, as.data.frame(x$X)),] ### determine groupings indices <- cumsum(c(TRUE, !duplicated(X)[-1])) ### this turns 1,1,1,2,2,3,4,4,4 into 1,1,1,4,4,6,7,7,7 so that the actual row numbers can be permutated indices <- rep(cumsum(rle(indices)$lengths) - (rle(indices)$lengths - 1), rle(indices)$lengths) ### determine exact number of unique permutations ### code below cancels largest ind.table value, which reduces overflow problems ind.table <- table(indices) exact.iter <- round(prod((max(ind.table)+1):x$k) / prod(factorial(ind.table[-which.max(ind.table)]))) ### cancel largest value in numerator and denominator #exact.iter <- round(factorial(x$k) / prod(factorial(ind.table))) ### definitional formula #exact.iter <- round(exp(lfactorial(x$k) - sum(lfactorial(ind.table)))) ### using log of definitional formula and then exp() } ### if 'exact=TRUE' or if the number of iterations for an exact test are smaller ### than what is specified under 'iter', then carry out the exact test if (exact || (exact.iter <= iter)) { exact <- TRUE iter <- exact.iter } if (iter == Inf) stop(mstyle$stop("Too many iterations required for exact permutation test.")) ######################################################################### ### generate seed (needed when exact=FALSE) if (!exact) { seed <- as.integer(runif(1)*2e9) } else { seed <- NA } ### set control parameters for uniroot() and possibly replace with user-defined values if (missing(control)) control <- list() con <- list(comptol=.Machine$double.eps^0.5, tol=.Machine$double.eps^0.25, maxiter=100, alternative="two.sided", p2defn="abs", stat="test", cialt="one.sided", seed=seed, distfac=1) con.pos <- pmatch(names(control), names(con)) con[c(na.omit(con.pos))] <- control[!is.na(con.pos)] if (exists("comptol", inherits=FALSE)) con$comptol <- comptol if (!exact) { if (!is.null(ddd$seed)) { set.seed(ddd$seed) } else { set.seed(con$seed) } } ######################################################################### if (progbar) cat(mstyle$verbose(paste0("Running ", iter, " iterations for ", ifelse(exact, "exact", "approximate"), " permutation test.\n"))) if (x$int.only) { ### permutation test for intercept-only models zval.perm <- try(rep(NA_real_, iter), silent=TRUE) if (inherits(zval.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) beta.perm <- try(rep(NA_real_, iter), silent=TRUE) if (inherits(beta.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) QM.perm <- try(rep(NA_real_, iter), silent=TRUE) if (inherits(QM.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) if (progbar) pbar <- pbapply::startpb(min=0, max=iter) if (exact) { ### exact permutation test for intercept-only models signmat <- as.matrix(expand.grid(replicate(x$k, list(c(1,-1))), KEEP.OUT.ATTRS=FALSE)) for (i in seq_len(iter)) { res <- try(suppressWarnings(rma.uni(signmat[i,]*x$yi, x$vi, weights=x$weights, intercept=TRUE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, btt=1, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, skipr2=TRUE)), silent=FALSE) if (inherits(res, "try-error")) next beta.perm[i] <- coef(res) zval.perm[i] <- res$zval QM.perm[i] <- res$QM if (progbar) pbapply::setpb(pbar, i) } } else { ### approximate permutation test for intercept-only models i <- 1 while (i <= iter) { signs <- sample(c(-1,1), x$k, replace=TRUE) ### easier to understand (a tad slower for small k, but faster for larger k) #signs <- 2*rbinom(x$k,1,.5)-1 res <- try(suppressWarnings(rma.uni(signs*x$yi, x$vi, weights=x$weights, intercept=TRUE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, btt=1, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, skipr2=TRUE)), silent=FALSE) if (inherits(res, "try-error")) next beta.perm[i] <- coef(res) zval.perm[i] <- res$zval QM.perm[i] <- res$QM i <- i + 1 if (progbar) pbapply::setpb(pbar, i) } } ### the first random permutation is always the observed data (avoids possibility of p=0) if (!exact) { beta.perm[1] <- coef(x) zval.perm[1] <- x$zval QM.perm[1] <- x$QM } if (con$alternative == "two.sided") { if (con$p2defn == "abs") { ### absolute value definition of the two-sided p-value if (con$stat == "test") { pval <- mean(abs(zval.perm) >= abs(x$zval) - con$comptol, na.rm=TRUE) ### based on test statistic } else { pval <- mean(abs(beta.perm) >= abs(c(x$beta)) - con$comptol, na.rm=TRUE) ### based on coefficient } } else { ### two times the one-sided p-value definition of the two-sided p-value if (con$stat == "test") { if (x$zval > median(zval.perm, na.rm=TRUE)) { pval <- 2*mean(zval.perm >= x$zval - con$comptol, na.rm=TRUE) ### based on test statistic } else { pval <- 2*mean(zval.perm <= x$zval + con$comptol, na.rm=TRUE) } } else { if (c(x$beta) > median(beta.perm, na.rm=TRUE)) { pval <- 2*mean(beta.perm >= c(x$beta) - con$comptol, na.rm=TRUE) ### based on coefficient } else { pval <- 2*mean(beta.perm <= c(x$beta) + con$comptol, na.rm=TRUE) } } } } if (con$alternative == "less") { if (con$stat == "test") { pval <- mean(zval.perm <= x$zval + con$comptol, na.rm=TRUE) ### based on test statistic } else { pval <- mean(beta.perm <= c(x$beta) + con$comptol, na.rm=TRUE) ### based on coefficient } } if (con$alternative == "greater") { if (con$stat == "test") { pval <- mean(zval.perm >= x$zval - con$comptol, na.rm=TRUE) ### based on test statistic } else { pval <- mean(beta.perm >= c(x$beta) - con$comptol, na.rm=TRUE) ### based on coefficient } } pval[pval > 1] <- 1 QMp <- mean(QM.perm >= x$QM - con$comptol, na.rm=TRUE) ######################################################################### } else { ### permutation test for meta-regression models zval.perm <- try(suppressWarnings(matrix(NA_real_, nrow=iter, ncol=x$p)), silent=TRUE) if (inherits(zval.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) beta.perm <- try(suppressWarnings(matrix(NA_real_, nrow=iter, ncol=x$p)), silent=TRUE) if (inherits(beta.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) QM.perm <- try(rep(NA_real_, iter), silent=TRUE) if (inherits(QM.perm, "try-error")) stop(mstyle$stop("Number of iterations requested too large.")) if (progbar) pbar <- pbapply::startpb(min=0, max=iter) if (exact) { ### exact permutation test for meta-regression models #permmat <- .genperms(x$k) permmat <- .genuperms(indices) ### use recursive algorithm to obtain all unique permutations for (i in seq_len(iter)) { res <- try(suppressWarnings(rma.uni(x$yi, x$vi, weights=x$weights, mods=cbind(X[permmat[i,],]), intercept=FALSE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, btt=x$btt, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, skipr2=TRUE)), silent=FALSE) if (inherits(res, "try-error")) next beta.perm[i,] <- coef(res) zval.perm[i,] <- res$zval QM.perm[i] <- res$QM if (progbar) pbapply::setpb(pbar, i) } } else { ### approximate permutation test for meta-regression models i <- 1 while (i <= iter) { res <- try(suppressWarnings(rma.uni(x$yi, x$vi, weights=x$weights, mods=cbind(X[sample(x$k),]), intercept=FALSE, method=x$method, weighted=x$weighted, test=x$test, level=x$level, btt=x$btt, tau2=ifelse(x$tau2.fix, x$tau2, NA), control=x$control, skipr2=TRUE)), silent=FALSE) if (inherits(res, "try-error")) next beta.perm[i,] <- coef(res) zval.perm[i,] <- res$zval QM.perm[i] <- res$QM i <- i + 1 if (progbar) pbapply::setpb(pbar, i) } } ### the first random permutation is always the observed data (avoids possibility of p=0) if (!exact) { beta.perm[1,] <- coef(x) zval.perm[1,] <- x$zval QM.perm[1] <- x$QM } if (con$alternative == "two.sided") { if (con$p2defn == "abs") { ### absolute value definition of the two-sided p-value if (con$stat == "test") { pval <- rowMeans(t(abs(zval.perm)) >= abs(x$zval) - con$comptol, na.rm=TRUE) ### based on test statistics } else { pval <- rowMeans(t(abs(beta.perm)) >= abs(c(x$beta)) - con$comptol, na.rm=TRUE) ### based on coefficients } } else { ### two times the one-sided p-value definition of the two-sided p-value pval <- rep(NA_real_, x$p) if (con$stat == "test") { for (j in seq_len(x$p)) { if (x$zval[j] > median(zval.perm[,j], na.rm=TRUE)) { pval[j] <- 2*mean(zval.perm[,j] >= x$zval[j] - con$comptol, na.rm=TRUE) } else { pval[j] <- 2*mean(zval.perm[,j] <= x$zval[j] + con$comptol, na.rm=TRUE) } } } else { for (j in seq_len(x$p)) { if (c(x$beta)[j] > median(beta.perm[,j], na.rm=TRUE)) { pval[j] <- 2*mean(beta.perm[,j] >= c(x$beta)[j] - con$comptol, na.rm=TRUE) } else { pval[j] <- 2*mean(beta.perm[,j] <= c(x$beta)[j] + con$comptol, na.rm=TRUE) } } } } } if (con$alternative == "less") { if (con$stat == "test") { pval <- rowMeans(t(zval.perm) <= x$zval + con$comptol, na.rm=TRUE) ### based on test statistics } else { pval <- rowMeans(t(beta.perm) <= c(x$beta) + con$comptol, na.rm=TRUE) ### based on coefficients } } if (con$alternative == "greater") { if (con$stat == "test") { pval <- rowMeans(t(zval.perm) >= x$zval - con$comptol, na.rm=TRUE) ### based on test statistics } else { pval <- rowMeans(t(beta.perm) >= c(x$beta) - con$comptol, na.rm=TRUE) ### based on coefficients } } pval[pval > 1] <- 1 QMp <- mean(QM.perm >= x$QM - con$comptol, na.rm=TRUE) } if (progbar) pbapply::closepb(pbar) ######################################################################### ### permutation-based CI ci.lb <- x$ci.lb ci.ub <- x$ci.ub if (.isTRUE(permci) || is.numeric(permci)) { level <- ifelse(x$level == 0, 1, ifelse(x$level >= 1, (100-x$level)/100, ifelse(x$level > .5, 1-x$level, x$level))) ### check if it is even possible to reject at level if (1/iter > level / ifelse(con$cialt == "one.sided", 1, 2)) { permci <- FALSE warning(mstyle$warning("Cannot obtain ", 100*(1-x$level), "% permutation-based CI; number of permutations (", iter, ") too low."), call.=FALSE) } else { ### if permci is numeric, check if existing coefficients have been specified ### otherwise, CIs will be obtained for all model coefficients if (is.numeric(permci)) { coefs <- unique(round(permci)) if (any(coefs > x$p) || any(coefs < 1)) stop(mstyle$stop("Non-existent coefficients specified via 'permci'.")) permci <- TRUE } else { coefs <- seq_len(x$p) } ci.lb <- rep(NA, x$p) ci.ub <- rep(NA, x$p) for (j in coefs) { if (progbar) cat(mstyle$verbose(paste0("Searching for lower CI bound of coefficient ", j, ": \n"))) if (con$cialt == "one.sided") { con$alternative <- "greater" } else { con$alternative <- "two.sided" } #tmp <- try(uniroot(.permci, interval=c(x$ci.lb[j], coef(x)[j]), extendInt="upX", tol=con$tol, maxiter=con$maxiter, obj=x, j=j, exact=exact, iter=iter, progbar=progbar, level=level, digits=digits, control=con)$root, silent=TRUE) tmp <- try(uniroot(.permci, interval=c(x$ci.lb[j] - con$distfac*(coef(x)[j] - x$ci.lb[j]), coef(x)[j]), extendInt="no", tol=con$tol, maxiter=con$maxiter, obj=x, j=j, exact=exact, iter=iter, progbar=progbar, level=level, digits=digits, control=con)$root, silent=TRUE) if (inherits(tmp, "try-error")) { ci.lb[j] <- NA } else { ci.lb[j] <- tmp } if (progbar) cat(mstyle$verbose(paste0("Searching for upper CI bound of coefficient ", j, ": \n"))) if (con$cialt == "one.sided") { con$alternative <- "less" } else { con$alternative <- "two.sided" } #tmp <- try(uniroot(.permci, interval=c(coef(x)[j], x$ci.ub[j]), extendInt="downX", tol=con$tol, maxiter=con$maxiter, obj=x, j=j, exact=exact, iter=iter, progbar=progbar, level=level, digits=digits, control=con)$root, silent=TRUE) tmp <- try(uniroot(.permci, interval=c(coef(x)[j], x$ci.ub[j] + con$distfac*(x$ci.ub[j] - coef(x)[j])), extendInt="no", tol=con$tol, maxiter=con$maxiter, obj=x, j=j, exact=exact, iter=iter, progbar=progbar, level=level, digits=digits, control=con)$root, silent=TRUE) if (inherits(tmp, "try-error")) { ci.ub[j] <- NA } else { ci.ub[j] <- tmp } } } } ######################################################################### out <- list(pval=pval, QMdf=x$QMdf, QMp=QMp, beta=x$beta, se=x$se, zval=x$zval, ci.lb=ci.lb, ci.ub=ci.ub, QM=x$QM, k=x$k, p=x$p, btt=x$btt, m=x$m, test=x$test, dfs=x$dfs, ddf=x$ddf, int.only=x$int.only, digits=digits, exact.iter=exact.iter, permci=permci) if (retpermdist) { out$QM.perm <- QM.perm out$zval.perm <- data.frame(zval.perm) out$beta.perm <- data.frame(beta.perm) names(out$zval.perm) <- names(out$beta.perm) <- colnames(x$X) } if (.isTRUE(ddd$time)) { time.end <- proc.time() .print.time(unname(time.end - time.start)[3]) } class(out) <- "permutest.rma.uni" return(out) } metafor/NEWS.md0000644000176200001440000022136414060132454013014 0ustar liggesusers# metafor 3.0-2 (2021-06-09) - the `metafor` package now makes use of the `mathjaxr` package to nicely render equations shown in the HTML help pages - `rma()` can now also fit location-scale models - added `selmodel()` for fitting a wide variety of selection models (and added the corresponding `plot.rma.uni.selmodel()` function for drawing the estimated selection function) - `rma.mv()` gains `dfs` argument and now provides an often better way for calculating the (denominator) degrees of freedom for approximate t- and F-tests when `dfs="contain"` - added `tes()` function for the test of excess significance - added `regplot()` function for drawing scatter plots / bubble plots based on meta-regression models - added `rcalc()` for calculating the variance-covariance matrix of correlation coefficients and `matreg()` for fitting regression models based on correlation/covariance matrices - added convenience functions `dfround()` and `vec2mat()` - added `aggregate.escalc()` function to aggregate multiple effect sizes or outcomes within studies/clusters - `regtest()` now shows the 'limit estimate' of the (average) true effect when using `sei`, `vi`, `ninv`, or `sqrtninv` as predictors (and the model does not contain any other moderators) - `vif()` gains `btt` argument and can now also compute generalized variance inflation factors; a proper `print.vif.rma()` function was also added - `anova.rma()` argument `L` renamed to `X` (the former still works, but is no longer documented) - argument `order` in `cumul()` should now just be a variable, not the order of the variable, to be used for ordering the studies and must be of the same length as the original dataset that was used in the model fitting - similarly, vector arguments in various plotting functions such as `forest.rma()` must now be of the same length as the original dataset that was used in the model fitting (any subsetting and removal of `NA`s is automatically applied) - the various `leave1out()` and `cumul()` functions now provide `I^2` and `H^2` also for fixed-effects models; accordingly, `plot.cumul.rma()` now also works with such models - fixed `level` not getting passed down to the various `cumul()` functions - `plot.cumul.rma()` argument `addgrid` renamed to `grid` (the former still works, but is no longer documented) - `forest.default()`, `forest.rma()`, and `labbe()` gain `plim` argument and now provide more flexibility in terms of the scaling of the points - `forest.rma()` gains `colout` argument (to adjust the color of the observed effect sizes or outcomes) - in the various `forest()` functions, the right header is now suppressed when `annotate=FALSE` and `header=TRUE` - `funnel.default()` and `funnel.rma()` gain `label` and `offset` arguments - `funnel.default()` and `funnel.rma()` gain `lty` argument; the reference line is now drawn by default as a dotted line (like the line for the pseudo confidence region) - the `forest` and `funnel` arguments of `reporter.rma.uni()` can now also be logicals to suppress the drawing of these plots - added `weighted` argument to `fsn()` (for Orwin's method) - added some more transformation functions - `bldiag()` now properly handles ?x0 or 0x? matrices - p-values are still given to 2 digits even when `digits=1` - `summary.escalc()` also provides the p-values (of the Wald-type tests); but when using the `transf` argument, the sampling variances, standard errors, test statistics, and p-values are no longer shown - `rma.uni()` no longer constrains a fixed tau^2 value to 0 when k=1 - slight speedup in functions that repeatedly fit `rma.uni()` models by skipping the computation of the pseudo R^2 statistic - started using the `pbapply` package for showing progress bars, also when using parallel processing - to avoid potential confusion, all references to 'credibility intervals' have been removed from the documentation; these intervals are now exclusively referred to as 'prediction intervals'; in the output, the bounds are therefore indicated now as `pi.lb` and `pi.ub` (instead of `cr.lb` and `cr.ub`); the corresponding argument names were changed in `addpoly.default()`; argument `addcred` was changed to `addpred` in `addpoly.rma()` and `forest.rma()`; however, code using the old arguments names should continue to work - one can now use `weights(..., type="rowsum")` for intercept-only `rma.mv` models (to obtain 'row-sum weights') - `simulate.rma()` gains `olim` argument; renamed the `clim` argument in `summary.escalc()` and the various `forest()` functions to `olim` for consistency (the old `clim` argument should continue to work) - show nicer network graphs for `dat.hasselblad1998` and `dat.senn2013` in the help files - added 24 datasets (`dat.anand1999`, `dat.assink2016`, `dat.baskerville2012`, `dat.bornmann2007`, `dat.cannon2006`, `dat.cohen1981`, `dat.craft2003`, `dat.crede2010`, `dat.dagostino1998`, `dat.damico2009`, `dat.dorn2007`, `dat.hahn2001`, `dat.kalaian1996`, `dat.kearon1998`, `dat.knapp2017`, `dat.landenberger2005`, `dat.lau1992`, `dat.lim2014`, `dat.lopez2019`, `dat.maire2019, `, `dat.moura2021` `dat.obrien2003`, `dat.vanhowe1999`, `dat.viechtbauer2021`) - the package now runs a version check on startup in interactive sessions; setting the environment variable `METAFOR_VERSION_CHECK` to `FALSE` disables this - refactored various functions (for cleaner/simpler code) - improved the documentation a bit # metafor 2.4-0 (2020-03-19) - version jump to 2.4-0 for CRAN release (from now on, even minor numbers for CRAN releases, odd numbers for development versions) - the various `forest()` functions gain `header` argument - `escalc()` gains `include` argument - setting `verbose=3` in model fitting functions sets `options(warn=1)` - `forest.rma()` and `forest.default()` now throw informative errors when misusing `order` and `subset` arguments - fixed failing tests due to the `stringsAsFactors=FALSE` change in the upcoming version of R - `print.infl.rma.uni()` gains `infonly` argument, to only show the influential studies - removed `MASS` from `Suggests` (no longer needed) - argument `btt` can now also take a string to grep for - added `optimParallel` as possible optimizer in `rma.mv()` - added (for now undocumented) option to fit models in `rma.glmm()` via the `GLMMadaptive` package (instead of `lme4`); to try this, use: `control=list(package="GLMMadaptive")` - started to use numbering scheme for devel version (the number after the dash indicates the devel version) - added `contrmat()` function (for creating a matrix that indicates which groups have been compared against each other in each row of a dataset) - added `to.wide()` function (for restructuring long format datasets into the wide format needed for contrast-based analyses) - `I^2` and `H^2` are also shown in output for fixed-effects models - argument `grid` in `baujat()` can now also be a color name - added (for now undocumented) `time` argument to more functions that are computationally expensive - added (for now undocumented) `textpos` argument to the various forest functions - added a new dataset (`dat.graves2010`) - added more tests # metafor 2.1-0 (2019-05-13) - added `formula()` method for objects of class `rma` - `llplot()` now also allows for `measure="GEN"`; also, the documentation and y-axis label have been corrected to indicate that the function plots likelihoods (not log likelihoods) - `confint.rma.mv()` now returns an object of class `list.confint.rma` when obtaining CIs for all variance and correlation components of the model; added corresponding `print.list.confint.rma()` function - moved `tol` argument in `permutest()` to `control` and renamed to `comptol` - added `PMM` and `GENQM` estimators in `rma.uni()` - added `vif()` function to get variance inflation factors - added `.glmulti` object for making the interaction with `glmulti` easier - added `reporter()` and `reporter.rma.uni()` for dynamically generating analysis reports for objects of class `rma.uni` - output is now styled/colored when `crayon` package is loaded (this only works on a 'proper' terminal with color support; also works in RStudio) - overhauled `plot.gosh.rma()`; when `out` is specified, it now shows two distributions, one for the values when the outlier is included and one for the values when for outlier is excluded; dropped the `hcol` argument and added `border` argument - refactored `influence.rma.uni()` to be more consistent internally with other functions; `print.infl.rma.uni()` and `plot.infl.rma.uni()` adjusted accordingly; functions `cooks.distance.rma.uni()`, `dfbetas.rma.uni()`, and `rstudent.rma.uni()` now call `influence.rma.uni()` for the computations - `rstudent.rma.uni()` now computes the SE of the deleted residuals in such a way that it will yield identical results to a mean shift outlier model even when that model is fitted with `test="knha"` - `rstandard.rma.uni()` gains `type` argument, and can now also compute conditional residuals (it still computes marginal residuals by default) - `cooks.distance.rma.mv()` gains `cluster` argument, so that the Cook's distances can be computed for groups of estimates - `cooks.distance.rma.mv()` gains `parallel`, `ncpus`, and `cl` arguments and can now make use of parallel processing - `cooks.distance.rma.mv()` should be faster by using the estimates from the full model as starting values when fitting the models with the ith study/cluster deleted from the dataset - `cooks.distance.rma.mv()` gains `reestimate` argument; when set to `FALSE`, variance/correlation components are not reestimated - `rstandard.rma.mv()` gains `cluster` argument for computing cluster-level multivariate standardized residuals - added `rstudent.rma.mv()` and `dfbetas.rma.mv()` - smarter matching of elements in `newmods` (when using a named vector) in `predict()` that also works for models with interactions (thanks to Nicole Erler for pointing out the problem) - `rma.uni()` and `rma.mv()` no longer issue (obvious) warnings when user constrains `vi` or `V` to 0 (i.e., `vi=0` or `V=0`, respectively) - `rma.mv()` does more intelligent filtering based on `NA`s in `V` matrix - `rma.mv()` now ensures strict symmetry of any (var-cov or correlation) matrices specified via the `R` argument - fixed `rma.mv()` so checks on `R` argument run as intended; also fixed an issue when multiple formulas with slashes are specified via `random` (thanks to Andrew Loignon for pointing out the problem) - suppressed showing calls on some warnings/errors in `rma.mv()` - `rma.mv()` now allows for a continuous-time autoregressive random effects structure (`struct="CAR"`) and various spatial correlation structures (`struct="SPEXP"`, `"SPGAU"`, `"SPLIN"`, `"SPRAT"`, and `"SPSPH"`) - `rma.mv()` now allows for `struct="GEN"` which models correlated random effects for any number of predictors, including continuous ones (i.e., this allows for 'random slopes') - in the various `forest()` functions, when `options(na.action="na.pass")` or `options(na.action="na.exclude")` and an annotation contains `NA`, this is now shown as a blank (instead of `NA [NA, NA]`) - the various `forest()` and `addpoly()` functions gain a `fonts` argument - the various `forest()` functions gain a `top` argument - the various `forest()` functions now show correct point sizes when the weights of the studies are exactly the same - `forest.cumul.rma()` gains a `col` argument - `funnel.default()` and `funnel.rma()` can now take vectors as input for the `col` and `bg` arguments (and also for `pch`); both functions also gain a `legend` argument - `addpoly()` functions can now also show prediction interval bounds - removed 'formula interface' from `escalc()`; until this actually adds some kind of extra functionality, this just makes `escalc()` more confusing to use - `escalc()` can now compute the coefficient of variation ratio and the variability ratio for pre-post or matched designs (`"CVRC"`, `"VRC"`) - `escalc()` does a bit more housekeeping - added (currently undocumented) arguments `onlyo1`, `addyi`, and `addvi` to `escalc()` that allow for more flexibility when computing certain bias corrections and when computing sampling variances for measures that make use of the `add` and `to` arguments - `escalc()` now sets `add=0` for measures where the use of such a bias correction makes little sense; this applies to the following measures: `"AS"`, `"PHI"`, `"RTET"`, `"IRSD"`, `"PAS"`, `"PFT"`, `"IRS"`, and `"IRFT"`; one can still force the use of the bias correction by explicitly setting the `add` argument to some non-zero value - added `clim` argument to `summary.escalc()` - added `ilim` argument to `trimfill()` - `labbe()` gains `lty` argument - `labbe()` now (invisibly) returns a data frame with the coordinates of the points that were drawn (which may be useful for manual labeling of points in the plot) - added a print method for `profile.rma` objects - `profile.rma.mv()` now check whether any of the profiled log-likelihood values is larger than the log-likelihood of the fitted model (using numerical tolerance given by `lltol`) and issues a warning if so - `profile.rma.uni()`, `profile.rma.mv()`, and `plot.profile.rma()` gain `cline` argument; `plot.profile.rma()` gains `xlim`, `ylab`, and `main` arguments - fixed an issue with `robust.rma.mv()` when the model was fitted with `sparse=TRUE` (thanks to Roger Martineau for noting the problem) - various method functions (`fitted()`, `resid()`, `predict()`, etc.) behave in a more consistent manner when model omitted studies with missings - `predict.rma()` gains `vcov` argument; when set to `TRUE`, the variance- covariance matrix of the predicted values is also returned - `vcov.rma()` can now also return the variance-covariance matrix of the fitted values (`type="fitted"`) and the residuals (`type="resid"`) - added `$<-` and `as.matrix()` methods for `list.rma` objects - fixed error in `simulate.rma()` that would generate too many samples for `rma.mv` models - added undocumented argument `time` to all model fitting functions; if set to `TRUE`, the model fitting time is printed - added more tests (also for parallel operations); also, all tests updated to use proper tolerances instead of rounding - reorganized the documentation a bit # metafor 2.0-0 (2017-06-22) - added `simulate()` method for `rma` objects; added `MASS` to `Suggests` (since simulating for `rma.mv` objects requires `mvrnorm()` from `MASS`) - `cooks.distance.rma.mv()` now works properly even when there are missing values in the data - `residuals()` gains `type` argument and can compute Pearson residuals - the `newmods` argument in `predict()` can now be a named vector or a matrix/data frame with column names that get properly matched up with the variables in the model - added `ranef.rma.mv()` for extracting the BLUPs of the random effects for `rma.mv` models - all functions that repeatedly refit models now have the option to show a progress bar - added `ranktest.default()`, so user can now pass the outcomes and corresponding sampling variances directly to the function - added `regtest.default()`, so user can now pass the outcomes and corresponding sampling variances directly to the function - `funnel.default()` gains `subset` argument - `funnel.default()` and `funnel.rma()` gain `col` and `bg` arguments - `plot.profile.rma()` gains `ylab` argument - more consistent handling of `robust.rma` objects - added a print method for `rma.gosh` objects - the (log) relative risk is now called the (log) risk ratio in all help files, plots, code, and comments - `escalc()` can now compute outcome measures based on paired binary data (`"MPRR"`, `"MPOR"`, `"MPRD"`, `"MPORC"`, and `"MPPETO"`) - `escalc()` can now compute (semi-)partial correlation coefficients (`"PCOR"`, `"ZPCOR"`, `"SPCOR"`) - `escalc()` can now compute measures of variability for single groups (`"CVLN"`, `"SDLN"`) and for the difference in variability between two groups (`"CVR"`, `"VR"`); also the log transformed mean (`"MNLN"`) has been added for consistency - `escalc()` can now compute the sampling variance for `measure="PHI"` for studies using stratified sampling (`vtpye="ST"`) - the `[` method for `escalc` objects now properly handles the `ni` and `slab` attributes and does a better job of cleaning out superfluous variable name information - added `rbind()` method for `escalc` objects - added `as.data.frame()` method for `list.rma` objects - added a new dataset (`dat.pagliaro1992`) for another illustration of a network meta-analysis - added a new dataset (`dat.laopaiboon2015`) on the effectiveness of azithromycin for treating lower respiratory tract infections - `rma.uni()` and `rma.mv()` now check if the ratio of the largest to smallest sampling variance is very large; results may not be stable then (and very large ratios typically indicate wrongly coded data) - model fitting functions now check if extra/superfluous arguments are specified via `...` and issues are warning if so - instead of defining own generic `ranef()`, import `ranef()` from `nlme` - improved output formatting - added more tests (but disabled a few tests on CRAN to avoid some issues when R is compiled with `--disable-long-double`) - some general code cleanup - renamed `diagram_metafor.pdf` vignette to just `diagram.pdf` - minor updates in the documentation # metafor 1.9-9 (2016-09-25) - started to use git as version control system, GitHub to host the repository (https://github.com/wviechtb/metafor) for the development version of the package, Travis CI as continuous integration service (https://travis-ci.org/wviechtb/metafor), and Codecov for automated code coverage reporting (https://codecov.io/github/wviechtb/metafor) - argument `knha` in `rma.uni()` and argument `tdist` in `rma.glmm()` and `rma.mv()` are now superseded by argument `test` in all three functions; for backwards compatibility, the `knha` and `tdist` arguments still work, but are no longer documented - `rma(yi, vi, weights=1, test="knha")` now yields the same results as `rma(yi, vi, weighted=FALSE, test="knha")` (but use of the Knapp and Hartung method in the context of an unweighted analysis remains an experimental feature) - one can now pass an `escalc` object directly to `rma.uni()`, which then tries to automatically determine the `yi` and `vi` variables in the data frame (thanks to Christian Roever for the suggestion) - `escalc()` can now also be used to convert a regular data frame to an `escalc` object - for `measure="UCOR"`, the exact bias-correction is now used (instead of the approximation); when `vtype="UB"`, the exact equation is now used to compute the unbiased estimate of the variance of the bias-corrected correlation coefficient; hence `gsl` is now a suggested package (needed to compute the hypergeometric function) and is loaded when required - `cooks.distance()` now also works with `rma.mv` objects; and since model fitting can take some time, an option to show a progress bar has been added - fixed an issue with `robust.rma.mv()` throwing errors when the model was fitted with `sparse=TRUE` - fixed an error with `robust.rma.mv()` when the model was fitted with user-defined weights (or a user-defined weight matrix) - added `ranef()` for extracting the BLUPs of the random effects (only for `rma.uni` objects at the moment) - reverted back to the pre-1.1-0 way of computing p-values for individual coefficients in `permutest.rma.uni()`, that is, the p-value is computed with `mean(abs(z_perm) >= abs(z_obs) - tol)` (where `tol` is a numerical tolerance) - `permutest.rma.uni()` gains `permci` argument, which can be used to obtain permutation-based CIs of the model coefficients (note that this is computationally very demanding and may take a long time to complete) - `rma.glmm()` continues to work even when the saturated model cannot be fitted (although the tests for heterogeneity are not available then) - `rma.glmm()` now allows control over the arguments used for `method.args` (via `control=list(hessianCtrl=list(...))`) passed to `hessian()` (from the `numDeriv` package) when using `model="CM.EL"` and `measure="OR"` - in `rma.glmm()`, default `method.args` value for `r` passed to `hessian()` has been increased to 16 (while this slows things down a bit, this appears to improve the accuracy of the numerical approximation to the Hessian, especially when tau^2 is close to 0) - the various `forest()` and `addpoly()` functions now have a new argument called `width`, which provides manual control over the width of the annotation columns; this is useful when creating complex forest plots with a monospaced font and we want to ensure that all annotations are properly lined up at the decimal point - the annotations created by the various `forest()` and `addpoly()` functions are now a bit more compact by default - more flexible `efac` argument in the various `forest()` functions - trailing zeros in the axis labels are now dropped in forest and funnel plots by default; but trailing zeros can be retained by specifying a numeric (and not an integer) value for the `digits` argument - added `funnel.default()`, which directly takes as input a vector with the observed effect sizes or outcomes and the corresponding sampling variances, standard errors, and/or sample sizes - added `plot.profile.rma()`, a plot method for objects returned by the `profile.rma.uni()` and `profile.rma.mv()` functions - simplified `baujat.rma.uni()`, `baujat.rma.mh()`, and `baujat.rma.peto()` to `baujat.rma()`, which now handles objects of class `rma.uni`, `rma.mh`, and `rma.peto` - `baujat.rma()` gains argument `symbol` for more control over the plotting symbol - `labbe()` gains a `grid` argument - more logical placement of labels in `qqnorm.rma.uni()`, `qqnorm.rma.mh()`, and `qqnorm.rma.peto()` functions (and more control thereof) - `qqnorm.rma.uni()` gains `lty` argument - added `gosh.rma()` and `plot.gosh.rma()` for creating GOSH (i.e., graphical display of study heterogeneity) plots based on Olkin et al. (2012) - in the (rare) case where all observed outcomes are exactly equal to each other, `test="knha"` (i.e., `knha=TRUE`) in `rma()` now leads to more appropriate results - updated datasets so those containing precomputed effect size estimates or observed outcomes are already declared to be `escalc` objects - added new datasets (`dat.egger2001` and `dat.li2007`) on the effectiveness of intravenous magnesium in acute myocardial infarction - `methods` package is now under `Depends` (in addition to `Matrix`), so that `rma.mv(..., sparse=TRUE)` always works, even under Rscript - some general code cleanup - added more tests (and used a more consistent naming scheme for tests) # metafor 1.9-8 (2015-09-28) - due to more stringent package testing, it is increasingly difficult to ensure that the package passes all checks on older versions of R; from now on, the package will therefore require, and be checked under, only the current (and the development) version of R - added `graphics`, `grDevices`, and `methods` to `Imports` (due to recent change in how CRAN checks packages) - the `struct` argument for `rma.mv()` now also allows for `"ID"` and `"DIAG"`, which are identical to the `"CS"` and `"HCS"` structures, but with the correlation parameter fixed to 0 - added `robust()` for (cluster) robust tests and confidence intervals for `rma.uni` and `rma.mv` models (this uses a robust sandwich-type estimator of the variance-covariance matrix of the fixed effects along the lines of the Eicker-Huber-White method) - `confint()` now works for models fitted with the `rma.mv()` function; for variance and correlation parameters, the function provides profile likelihood confidence intervals; the output generated by the `confint()` function has been adjusted in general to make the formatting more consistent across the different model types - for objects of class `rma.mv`, `profile()` now provides profile plots for all (non-fixed) variance and correlation components of the model when no component is specified by the user (via the `sigma2`, `tau2`, `rho`, `gamma2`, or `phi` arguments) - for `measure="MD"` and `measure="ROM"`, one can now choose between `vtype="LS"` (the default) and `vtype="HO"`; the former computes the sampling variances without assuming homoscedasticity, while the latter assumes homoscedasticity - multiple model objects can now be passed to the `fitstats()`, `AIC()`, and `BIC()` functions - check for duplicates in the `slab` argument is now done *after* any subsetting is done (as suggested by Michael Dewey) - `rma.glmm()` now again works when using `add=0`, in which case some of the observed outcomes (e.g., log odds or log odds ratios) may be `NA` - when using `rma.glmm()` with `model="CM.EL"`, the saturated model (used to compute the Wald-type and likelihood ratio tests for the presence of (residual) heterogeneity) often fails to converge; the function now continues to run (instead of stopping with an error) and simply omits the test results from the output - when using `rma.glmm()` with `model="CM.EL"` and inversion of the Hessian fails via the Choleski factorization, the function now makes another attempt via the QR decomposition (even when this works, a warning is issued) - for `rma.glmm()`, BIC and AICc values were switched around; corrected - more use of `suppressWarnings()` is made when functions repeatedly need to fit the same model, such as `cumul()`, `influence()`, and `profile()`; that way, one does not get inundated with the same warning(s) - some (overdue) updates to the documentation # metafor 1.9-7 (2015-05-22) - default optimizer for `rma.mv()` changed to `nlminb()` (instead of `optim()` with `"Nelder-Mead"`); extensive testing indicated that `nlminb()` (and also `optim()` with `"BFGS"`) is typically quicker and more robust; note that this is in principle a non-backwards compatible change, but really a necessary one; and you can always revert to the old behavior with `control=list(optimizer="optim", optmethod="Nelder-Mead")` - all tests have been updated in accordance with the recommended syntax of the `testthat` package; for example, `expect_equivalent(x,y)` is used instead of `test_that(x, is_equivalent_to(y))` - changed a few `is_identical_to()` comparisons to `expect_equivalent()` ones (that failed on Sparc Solaris) # metafor 1.9-6 (2015-05-07) - `funnel()` now works again for `rma.glmm` objects (note to self: quit breaking things that work!) - `rma.glmm()` will now only issue a warning (and not an error) when the Hessian for the saturated model cannot be inverted (which is needed to compute the Wald-type test for heterogeneity, so the test statistic is then simply set to `NA`) - `rma.mv()` now allows for two terms of the form `~ inner | outer`; the variance components corresponding to such a structure are called `gamma2` and correlations are called `phi`; other functions that work with objects of class `rma.mv` have been updated accordingly - `rma.mv()` now provides (even) more optimizer choices: `nlm()` from the `stats` package, `hjk()` and `nmk()` from the `dfoptim` package, and `ucminf()` from the `ucminf` package; choose the desired optimizer via the control argument (e.g., `control=list(optimizer="nlm")`) - `profile.rma.uni()` and `profile.rma.mv()` now can do parallel processing (which is especially relevant for `rma.mv` objects, where profiling is crucial and model fitting can be slow) - the various `confint()` functions now have a `transf` argument (to apply some kind of transformation to the model coefficients and confidence interval bounds); coefficients and bounds for objects of class `rma.mh` and `rma.peto` are no longer automatically transformed - the various `forest()` functions no longer enforce that the actual x-axis limits (`alim`) encompass the observed outcomes to be plotted; also, outcomes below or above the actual x-axis limits are no longer shown - the various `forest()` functions now provide control over the horizontal lines (at the top/bottom) that are automatically added to the plot via the `lty` argument (this also allows for removing them); also, the vertical reference line is now placed *behind* the points/CIs - `forest.default()` now has argument `col` which can be used to specify the color(s) to be used for drawing the study labels, points, CIs, and annotations - the `efac` argument for `forest.rma()` now also allows two values, the first for the arrows and CI limits, the second for summary estimates - corrected some axis labels in various plots when `measure="PLO"` - axes in `labbe()` plots now have `"(Group 1)"` and `"(Group 2)"` added by default - `anova.rma()` gains argument `L` for specifying linear combinations of the coefficients in the model that should be tested to be zero - in case removal of a row of data would lead to one or more inestimable model coefficients, `baujat()`, `cooks.distance()`, `dfbetas()`, `influence()`, and `rstudent()` could fail for `rma.uni` objects; such cases are now handled properly - for models with moderators, the `predict()` function now shows the study labels when they have been specified by the user (and `newmods` is not used) - if there is only one fixed effect (model coefficient) in the model, the `print.infl.rma.uni()` function now shows the DFBETAS values with the other case diagnostics in a single table (for easier inspection); if there is more than one fixed effect, a separate table is still used for the DFBETAS values (with one column for each coefficient) - added `measure="SMCRH"` to the `escalc()` function for the standardized mean change using raw score standardization with heteroscedastic population variances at the two measurement occasions - added `measure="ROMC"` to the `escalc()` function for the (log transformed) ratio of means (response ratio) when the means reflect two measurement occasions (e.g., for a single group of people) and hence are correlated - added own function for computing/estimating the tetrachoric correlation coefficient (for `measure="RTET"`); package therefore no longer suggests `polycor` but now suggest `mvtnorm` (which is loaded as needed) - element `fill` returned by `trimfill.rma.uni()` is now a logical vector (instead of a 0/1 dummy variable) - `print.list.rma()` now also returns the printed results invisibly as a data frame - added a new dataset (`dat.senn2013`) as another illustration of a network meta-analysis - metafor now depends on at least version 3.1.0 of R # metafor 1.9-5 (2014-11-24) - moved the `stats` and `Matrix` packages from `Depends` to `Imports`; as a result, had to add `utils` to `Imports`; moved the `Formula` package from `Depends` to `Suggests` - added `update.rma()` function (for updating/refitting a model); model objects also now store and keep the call - the `vcov()` function now also extracts the marginal variance-covariance matrix of the observed effect sizes or outcomes from a fitted model (of class `rma.uni` or `rma.mv`) - `rma.mv()` now makes use of the Cholesky decomposition when there is a `random = ~ inner | outer` formula and `struct="UN"`; this is numerically more stable than the old approach that avoided non-positive definite solutions by forcing the log-likelihood to be -Inf in those cases; the old behavior can be restored with `control = list(cholesky=FALSE)` - `rma.mv()` now requires the `inner` variable in an `~ inner | outer` formula to be a factor or character variable (except when `struct` is `"AR"` or `"HAR"`); use `~ factor(inner) | outer` in case it isn't - `anova.rma.uni()` function changed to `anova.rma()` that works now for both `rma.uni` and `rma.mv` objects - the `profile.rma.mv()` function now omits the number of the variance or correlation component from the plot title and x-axis label when the model only includes one of the respective parameters - `profile()` functions now pass on the `...` argument also to the `title()` function used to create the figure titles (esp. relevant when using the `cex.main` argument) - the `drop00` argument of the `rma.mh()` and `rma.peto()` functions now also accepts a vector with two logicals, the first applies when calculating the observed outcomes, the second when applying the Mantel-Haenszel or Peto's method - `weights.rma.uni()` now shows the correct weights when `weighted=FALSE` - argument `showweight` renamed to `showweights` in the `forest.default()` and `forest.rma()` functions (more consistent with the naming of the various `weights()` functions) - added `model.matrix.rma()` function (to extract the model matrix from objects of class `rma`) - `funnel()` and `radial()` now (invisibly) return data frames with the coordinates of the points that were drawn (may be useful for manual labeling of points in the plots) - `permutest.rma.uni()` function now uses a numerical tolerance when making comparisons (>= or <=) between an observed test statistic and the test statistic under the permuted data; when using random permutations, the function now ensures that the very first permutation correspond to the original data - corrected some missing/redundant row/column labels in some output - most `require()` calls replaced with `requireNamespace()` to avoid altering the search path (hopefully this won't break stuff ...) - some non-visible changes including more use of some (non-exported) helper functions for common tasks - dataset `dat.collins91985a` updated (including all reported outcomes and some more information about the various trials) - oh, and guess what? I updated the documentation ... # metafor 1.9-4 (2014-07-30) - added `method="GENQ"` to `rma.uni()` for the generalized Q-statistic estimator of tau^2, which allows for used-defined weights (note: the DL and HE estimators are just special cases of this method) - when the model was fitted with `method="GENQ"`, then `confint()` will now use the generalized Q-statistic method to construct the corresponding confidence interval for tau^2 (thanks to Dan Jackson for the code); the iterative method used to obtain the CI makes use of Farebrother's algorithm as implemented in the `CompQuadForm` package - slight improvements in how the `rma.uni()` function handles non-positive sampling variances - `rma.uni()`, `rma.mv()`, and `rma.glmm()` now try to detect and remove any redundant predictors before the model fitting; therefore, if there are exact linear relationships among the predictor variables (i.e., perfect multicollinearity), terms are removed to obtain a set of predictors that is no longer perfectly multicollinear (a warning is issued when this happens); note that the order of how the variables are specified in the model formula can influence which terms are removed - the last update introduced an error in how hat values were computed when the model was fitted with the `rma()` function using the Knapp & Hartung method (i.e., when `knha=TRUE`); this has been fixed - `regtest()` no longer works (for now) with `rma.mv` objects (it wasn't meant to in the first place); if you want to run something along the same lines, just consider adding some measure of the precision of the observed outcomes (e.g., their standard errors) as a predictor to the model - added `"sqrtni"` and `"sqrtninv"` as possible options for the `predictor` argument of `regtest()` - more optimizers are now available for the `rma.mv()` function via the `nloptr` package by setting `control = list(optimizer="nloptr")`; when using this optimizer, the default is to use the BOBYQA implementation from that package with a relative convergence criterion of 1e-8 on the function value (see documentation on how to change these defaults) - `predict.rma()` function now works for `rma.mv` objects with multiple tau^2 values even if the user specifies the `newmods` argument but not the `tau2.levels` argument (but a warning is issued and the prediction intervals are not computed) - argument `var.names` now works properly in `escalc()` when the user has not made use of the `data` argument (thanks to Jarrett Byrnes for bringing this to my attention) - added `plot()` function for cumulative random-effects models results as obtained with the `cumul.rma.uni()` function; the plot shows the model estimate on the x-axis and the corresponding tau^2 estimate on the y-axis in the cumulative order of the results - fixed the omitted offset term in the underlying model fitted by the `rma.glmm()` function when `method="ML"`, `measure="IRR"`, and `model="UM.FS"`, that is, when fitting a mixed-effects Poisson regression model with fixed study effects to two-group event count data (thanks to Peter Konings for pointing out this error) - added two new datasets (`dat.bourassa1996`, `dat.riley2003`) - added function `replmiss()` (just a useful helper function) - package now uses `LazyData: TRUE` - some improvements to the documentation (do I still need to mention this every time?) # metafor 1.9-3 (2014-05-05) - some minor tweaks to `rma.uni()` that should be user transparent - `rma.uni()` now has a `weights` argument, allowing the user to specify arbitrary user-defined weights; all functions affected by this have been updated accordingly - better handling of mismatched length of `yi` and `ni` vectors in `rma.uni()` and `rma.mv()` functions - subsetting is now handled as early as possible within functions with subsetting capabilities; this avoids some (rare) cases where studies ultimately excluded by the subsetting could still affect the results - some general tweaks to `rma.mv()` that should make it a bit faster - argument `V` of `rma.mv()` now also accepts a list of var-cov matrices for the observed effects or outcomes; from the list elements, the full (block diagonal) var-cov matrix `V` is then automatically constructed - `rma.mv()` now has a new argument `W` allowing the user to specify arbitrary user-defined weights or an arbitrary weight matrix - `rma.mv()` now has a new argument `sparse`; by setting this to `TRUE`, the function uses sparse matrix objects to the extent possible; this can speed up model fitting substantially for certain models (hence, the `metafor` package now depends on the `Matrix` package) - `rma.mv()` now allows for `struct="AR"` and `struct="HAR"`, to fit models with (heteroscedastic) autoregressive (AR1) structures among the true effects (useful for meta-analyses of studies reporting outcomes at multiple time points) - `rma.mv()` now has a new argument `Rscale` which can be used to control how matrices specified via the `R` argument are scaled (see docs for more details) - `rma.mv()` now only checks for missing values in the rows of the lower triangular part of the `V` matrix (including the diagonal); this way, if `Vi = matrix(c(.5,NA,NA,NA), nrow=2, ncol=2)` is the var-cov matrix of the sampling errors for a particular study with two outcomes, then only the second row/column needs to be removed before the model fitting (and not the entire study) - added five new datasets (`dat.begg1989`, `dat.ishak2007`, `dat.fine1993`, `dat.konstantopoulos2011`, and `dat.hasselblad1998`) to provide further illustrations of the use of the `rma.mv()` function (for meta-analyses combining controlled and uncontrolled studies, for meta-analyses of longitudinal studies, for multilevel meta-analyses, and for network meta-analyses / mixed treatment comparison meta-analyses) - added `rstandard.rma.mv()` function to compute standardized residuals for models fitted with the `rma.mv()` function (`rstudent.rma.mv()` to be added at a later point); also added `hatvalues.rma.mv()` for computing the hat values and `weights.rma.uni()` for computing the weights (i.e., the diagonal elements of the weight matrix) - the various `weights()` functions now have a new argument `type` to indicate whether only the diagonal elements of the weight matrix (default) or the entire weight matrix should be returned - the various `hatvalues()` functions now have a new argument `type` to indicate whether only the diagonal elements of the hat matrix (default) or the entire hat matrix should be returned - `predict.rma()` function now works properly for `rma.mv` objects (also has a new argument `tau2.levels` to specify, where applicable, the levels of the inner factor when computing prediction intervals) - `forest.rma()` function now provides a bit more control over the color of the summary polygon and is now compatible with `rma.mv` objects; also, has a new argument `lty`, which provides more control over the line type for the individual CIs and the prediction interval - `addpoly.default()` and `addpoly.rma()` now have a `border` argument (for consistency with the `forest.rma()` function); `addpoly.rma()` now yields the correct CI bounds when the model was fitted with `knha=TRUE` - `forest.cumul.rma()` now provides the correct CI bounds when the models were fitted with the Knapp & Hartung method (i.e., when `knha=TRUE` in the original `rma()` function call) - the various `forest()` functions now return information about the chosen values for arguments `xlim`, `alim`, `at`, `ylim`, `rows`, `cex`, `cex.lab`, and `cex.axis` invisibly (useful for tweaking the default values); thanks to Michael Dewey for the suggestion - the various `forest()` functions now have a new argument, `clim`, to set limits for the confidence/prediction interval bounds - `cumul.mh()` and `cumul.peto()` now get the order of the studies right when there are missing values in the data - the `transf` argument of `leave1out.rma.mh()`, `leave1out.rma.peto()`, `cumul.rma.mh()`, and `cumul.rma.peto()` should now be used to specify the actual function for the transformation (the former behavior of setting this argument to `TRUE` to exponentiate log RRs, log ORs, or log IRRs still works for back-compatibility); this is more consistent with how the `cumul.rma.uni()` and `leave1out.rma.uni()` functions work and is also more flexible - added `bldiag()` function to construct a block diagonal matrix from (a list of) matrices (may be needed to construct the `V` matrix when using the `rma.mv()` function); `bdiag()` function from the `Matrix` package does the same thing, but creates sparse matrix objects - `profile.rma.mv()` now has a `startmethod` argument; by setting this to `"prev"`, successive model fits are started at the parameter estimates from the previous model fit; this may speed things up a bit; also, the method for automatically choosing the `xlim` values has been changed - slight improvement to `profile.rma.mv()` function, which would throw an error if the last model fit did not converge - added a new dataset (`dat.linde2005`) for replication of the analyses in Viechtbauer (2007) - added a new dataset (`dat.molloy2014`) for illustrating the meta-analysis of (r-to-z transformed) correlation coefficients - added a new dataset (`dat.gibson2002`) to illustrate the combined analysis of standardized mean differences and probit transformed risk differences - computations in `weights.mh()` slightly changed to prevent integer overflows for large counts - unnecessary warnings in `transf.ipft.hm()` are now suppressed (cases that raised those warnings were already handled correctly) - in `predict()`, `blup()`, `cumul()`, and `leave1out()`, when using the `transf` argument, the standard errors (which are `NA`) are no longer shown in the output - argument `slab` in various functions will now also accept non-unique study labels; `make.unique()` is used as needed to make them unique - `vignettes("metafor")` and `vignettes("metafor_diagram")` work again (yes, I know they are not true vignettes in the strict sense, but I think they should show up on the CRAN website for the package and using a minimal valid Sweave document that is recognized by the R build system makes that happen) - `escalc()` and its `summary()` method now keep better track when the data frame contains multiple columns with outcome or effect size values (and corresponding sampling variances) for print formatting; also simplified the class structure a bit (and hence, `print.summary.escalc()` removed) - `summary.escalc()` has a new argument `H0` to specify the value of the outcome under the null hypothesis for computing the test statistics - added measures `"OR2DN"` and `"D2ORN"` to `escalc()` for transforming log odds ratios to standardized mean differences and vice-versa, based on the method of Cox & Snell (1989), which assumes normally distributed response variables within the two groups before the dichotomization - `permutest.rma.uni()` function now catches an error when the number of permutations requested is too large (for R to even create the objects to store the results in) and produces a proper error message - `funnel.rma()` function now allows the `yaxis` argument to be set to `"wi"` so that the actual weights (in %) are placed on the y-axis (useful when arbitrary user-defined have been specified) - for `rma.glmm()`, the control argument `optCtrl` is now used for passing control arguments to all of the optimizers (hence, control arguments `nlminbCtrl` and `minqaCtrl` are now defunct) - `rma.glmm()` should not throw an error anymore when including only a single moderator/predictor in the model - `predict.rma()` now returns an object of class `list.rma` (therefore, function `print.predict.rma()` has been removed) - for `rma.list` objects, added `[`, `head()`, and `tail()` methods - automated testing using the `testthat` package (still many more tests to add, but finally made a start on this) - encoding changed to UTF-8 (to use 'foreign characters' in the docs and to make the HTML help files look a bit nicer) - guess what? some improvements to the documentation! (also combined some of the help files to reduce the size of the manual a bit; and yes, it's still way too big) # metafor 1.9-2 (2013-10-07) - added function `rma.mv()` to fit multivariate/multilevel meta-analytic models via appropriate linear (mixed-effects) models; this function allows for modeling of non-independent sampling errors and/or true effects and can be used for network meta-analyses, meta-analyses accounting for phylogenetic relatedness, and other complicated meta-analytic data structures - added the AICc to the information criteria computed by the various model fitting functions - if the value of tau^2 is fixed by the user via the corresponding argument in `rma.uni()`, then tau^2 is no longer counted as an additional parameter for the computation of the information criteria (i.e., AIC, BIC, and AICc) - `rma.uni()`, `rma.glmm()`, and `rma.mv()` now use a more stringent check whether the model matrix is of full rank - added `profile()` method functions for objects of class `rma.uni` and `rma.mv` (can be used to obtain a plot of the profiled log-likelihood as a function of a specific variance component or correlation parameter of the model) - `predict.rma()` function now has an `intercept` argument that allows the user to decide whether the intercept term should be included when calculating the predicted values (rare that this should be changed from the default) - for `rma.uni()`, `rma.glmm()`, and `rma.mv()`, the `control` argument can now also accept an integer value; values > 1 generate more verbose output about the progress inside of the function - `rma.glmm()` has been updated to work with `lme4` 1.0.x for fitting various models; as a result, `model="UM.RS"` can only use `nAGQ=1` at the moment (hopefully this will change in the future) - the `control` argument of `rma.glmm()` can now be used to pass all desired control arguments to the various functions and optimizers used for the model fitting (admittedly the use of lists within this argument is a bit unwieldy, but much more flexible) - `rma.mh()` and `rma.peto()` also now have a `verbose` argument (not really needed, but added for sake of consistency across functions) - fixed (silly) error that would prevent `rma.glmm()` from running for measures `"IRR"`, `"PLO"`, and `"IRLN"` when there are missing values in the data (lesson: add some missing values to datasets for the unit tests!) - a bit of code reorganization (should be user transparent) - vignettes (`"metafor"` and `"metafor_diagram"`) are now just 'other files' in the doc directory (as these were not true vignettes to begin with) - some improvements to the documentation (as always) # metafor 1.9-1 (2013-07-20) - `rma.mh()` now also implements the Mantel-Haenszel method for incidence rate differences (`measure="IRD"`) - when analyzing incidence rate ratios (`measure="IRR"`) with the `rma.mh()` function, the Mantel-Haenszel test for person-time data is now also provided - `rma.mh()` has a new argument `correct` (default is `TRUE`) to indicate whether the continuity correction should be applied when computing the (Cochran-)Mantel-Haenszel test statistic - renamed elements `CMH` and `CMHp` (for the Cochran-Mantel-Haenszel test statistic and corresponding p-value) to `MH` and `MHp` - added function `baujat()` to create Baujat plots - added a new dataset (`dat.pignon2000`) to illustrate the use of the `baujat()` function - added function `to.table()` to convert data from vector format into the corresponding table format - added function `to.long()` to convert data from vector format into the corresponding long format - `rma.glmm()` now even runs when k=1 (yielding trivial results) - for models with an intercept and moderators, `rma.glmm()` now internally rescales (non-dummy) variables to z-scores during the model fitting (this improves the stability of the model fitting, especially when `model="CM.EL"`); results are given after back-scaling, so this should be transparent to the user - in `rma.glmm()`, default number of quadrature points (`nAGQ`) is now 7 (setting this to 100 was a bit overkill) - a few more error checks here and there for misspecified arguments - some improvements to the documentation # metafor 1.9-0 (2013-06-21) - vignette renamed to `metafor` so `vignette("metafor")` works now - added a diagram to the documentation, showing the various functions in the metafor package (and how they relate to each other); can be loaded with `vignette("metafor_diagram")` - `anova.rma.uni()` function can now also be used to test (sub)sets of model coefficients with a Wald-type test when a single model is passed to the function - the pseudo R^2 statistic is now automatically calculated by the `rma.uni()` function and supplied in the output (only for mixed-effects models and when the model includes an intercept, so that the random- effects model is clearly nested within the mixed-effects model) - component `VAF` is now called `R2` in `anova.rma.uni()` function - added function `hc()` that carries out a random-effects model analysis using the method by Henmi and Copas (2010); thanks to Michael Dewey for the suggestion and providing the code - added new dataset (`dat.lee2004`), which was used in the article by Henmi and Copas (2010) to illustrate their method - fixed missing x-axis labels in the `forest()` functions - `rma.glmm()` now computes Hessian matrices via the `numDeriv` package when `model="CM.EL"` and `measure="OR"` (i.e., for the conditional logistic model with exact likelihood); so `numDeriv` is now a suggested package and is loaded within `rma.glmm()` when required - `trimfill.rma.uni()` now also implements the `"Q0"` estimator (although the `"L0"` and `"R0"` estimators are generally to be preferred) - `trimfill.rma.uni()` now also calculates the SE of the estimated number of missing studies and, for estimator `"R0"`, provides a formal test of the null hypothesis that the number of missing studies on a given side is zero - added new dataset (`dat.bangertdrowns2004`) - the `level` argument in various functions now either accepts a value representing a percentage or a proportion (values greater than 1 are assumed to be a percentage) - `summary.escalc()` now computes confidence intervals correctly when using the `transf` argument - computation of Cochran-Mantel-Haenszel statistic in `rma.mh()` changed slightly to avoid integer overflow with very big counts - some internal improvements with respect to object attributes that were getting discarded when subsetting - some general code cleanup - some improvements to the documentation # metafor 1.8-0 (2013-04-11) - added additional clarifications about the change score outcome measures (`"MC"`, `"SMCC"`, and `"SMCR"`) to the help file for the `escalc()` function and changed the code so that `"SMCR"` no longer expects argument `sd2i` to be specified (which is not needed anyways) (thanks to Markus Kösters for bringing this to my attention) - sampling variance for the biserial correlation coefficient (`"RBIS"`) is now calculated in a slightly more accurate way - `llplot()` now properly scales the log-likelihoods - argument `which` in the `plot.infl.rma.uni()` function has been replaced with argument `plotinf` which can now also be set to `FALSE` to suppress plotting of the various case diagnostics altogether - labeling of the axes in `labbe()` plots is now correct for odds ratios (and transformations thereof) - added two new datasets (`dat.nielweise2007` and `dat.nielweise2008`) to illustrate some methods/models from the `rma.glmm()` function - added a new dataset (`dat.yusuf1985`) to illustrate the use of `rma.peto()` - test for heterogeneity is now conducted by the `rma.peto()` function exactly as described by Yusuf et al. (1985) - in `rma.glmm()`, default number of quadrature points (`nAGQ`) is now 100 (which is quite a bit slower, but should provide more than sufficient accuracy in most cases) - the standard errors of the HS and DL estimators of tau^2 are now correctly computed when tau^2 is prespecified by the user in the `rma()` function; in addition, the standard error of the SJ estimator is also now provided when tau^2 is prespecified - `rma.uni()` and `rma.glmm()` now use a better method to check whether the model matrix is of full rank - I^2 and H^2 statistics are now also calculated for mixed-effects models by the `rma.uni()` and `rma.glmm()` function; `confint.rma.uni()` provides the corresponding confidence intervals for `rma.uni` models - various `print()` methods now have a new argument called `signif.stars`, which defaults to `getOption("show.signif.stars")` (which by default is `TRUE`) to determine whether the infamous 'significance stars' should be printed - slight changes in wording in the output produced by the `print.rma.uni()` and `print.rma.glmm()` functions - some improvements to the documentation # metafor 1.7-0 (2013-02-06) - added `rma.glmm()` function for fitting of appropriate generalized linear (mixed-effects) models when analyzing odds ratios, incidence rate ratios, proportions, or rates; the function makes use of the `lme4` and `BiasedUrn` packages; these are now suggested packages and loaded within `rma.glmm()` only when required (this makes for faster loading of the `metafor` package) - added several method functions for objects of class `rma.glmm` (not all methods yet implemented; to be completed in the future) - `rma.uni()` now allows the user to specify a formula for the `yi` argument, so instead of rma(yi, vi, mods=~mod1+mod2), one can specify the same model with rma(yi~mod1+mod2, vi) - `rma.uni()` now has a `weights` argument to specify the inverse of the sampling variances (instead of using the `vi` or `sei` arguments); for now, this is all this argument should be used for (in the future, this argument may potentially be used to allow the user to define alternative weights) - `rma.uni()` now checks whether the model matrix is not of full rank and issues an error accordingly (instead of the rather cryptic error that was issued before) - `rma.uni()` now has a `verbose` argument - `coef.rma()` now returns only the model coefficients (this change was necessary to make the package compatible with the `multcomp` package; see `help(rma)` for an example); use `coef(summary())` to obtain the full table of results - the `escalc()` function now does some more extensive error checking for misspecified data and some unusual cases - `append` argument is now `TRUE` by default in the `escalc()` function - objects generated by the `escalc()` function now have their own class - added `print()` and `summary()` methods for objects of class `escalc` - added `[` and `cbind()` methods for objects of class `escalc` - added a few additional arguments to the `escalc()` function (i.e., `slab`, `subset`, `var.names`, `replace`, `digits`) - added `drop00` argument to the `escalc()`, `rma.uni()`, `rma.mh()`, and `rma.peto()` functions - added `"MN"`, `"MC"`, `"SMCC"`, and `"SMCR"` measures to the `escalc()` and `rma.uni()` functions for the raw mean, the raw mean change, and the standardized mean change (with change score or raw score standardization) as possible outcome measures - the `"IRFT"` measure in the `escalc()` and `rma.uni()` functions is now computed with `1/2*(sqrt(xi/ti) + sqrt(xi/ti+1/ti))` which is more consistent with the definition of the Freeman-Tukey transformation for proportions - added `"RTET"` measure to the `escalc()` and `rma.uni()` functions to compute the tetrachoric correlation coefficient based on 2x2 table data (the `polycor` package is therefore now a suggested package, which is loaded within `escalc()` only when required) - added `"RPB"` and `"RBIS"` measures to the `escalc()` and `rma.uni()` functions to compute the point-biserial and biserial correlation coefficient based on means and standard deviations - added `"PBIT"` and `"OR2D"` measures to the `escalc()` and `rma.uni()` functions to compute the standardized mean difference based on 2x2 table data - added the `"D2OR"` measure to the `escalc()` and `rma.uni()` functions to compute the log odds ratio based on the standardized mean difference - added `"SMDH"` measure to the `escalc()` and `rma.uni()` functions to compute the standardized mean difference without assuming equal population variances - added `"ARAW"`, `"AHW"`, and `"ABT"` measures to the `escalc()` and `rma.uni()` functions for the raw value of Cronbach's alpha, the transformation suggested by Hakstian & Whalen (1976), and the transformation suggested by Bonett (2002) for the meta-analysis of reliability coefficients (see `help(escalc)` for details) - corrected a small mistake in the equation used to compute the sampling variance of the phi coefficient (`measure="PHI"`) in the `escalc()` function - the `permutest.rma.uni()` function now uses an algorithm to find only the unique permutations of the model matrix (which may be much smaller than the total number of permutations), making the exact permutation test feasible in a larger set of circumstances (thanks to John Hodgson for making me aware of this issue and to Hans-Jörg Viechtbauer for coming up with a recursive algorithm for finding the unique permutations) - prediction interval in `forest.rma()` is now indicated with a dotted (instead of a dashed) line; ends of the interval are now marked with vertical bars - completely rewrote the `funnel.rma()` function which now supports many more options for the values to put on the y-axis; `trimfill.rma.uni()` function was adapted accordingly - removed the `ni` argument from the `regtest.rma()` function; instead, sample sizes can now be explicitly specified via the `ni` argument when using the `rma.uni()` function (i.e., when `measure="GEN"`); the `escalc()` function also now adds information on the `ni` values to the resulting data frame (as an attribute of the `yi` variable), so, if possible, this information is passed on to `regtest.rma()` - added switch so that `regtest()` can also provide the full results from the fitted model (thanks to Michael Dewey for the suggestion) - `weights.rma.mh()` now shows the weights in % as intended (thanks to Gavin Stewart for pointing out this error) - more flexible handling of the `digits` argument in the various forest functions - forest functions now use `pretty()` by default to set the x-axis tick locations (`alim` and `at` arguments can still be used for complete control) - studies that are considered to be 'influential' are now marked with an asterisk when printing the results returned by the `influence.rma.uni()` function (see the documentation of this function for details on how such studies are identified) - added additional extractor functions for some of the influence measures (i.e., `cooks.distance()`, `dfbetas()`); unfortunately, the `covratio()` and `dffits()` functions in the `stats` package are not generic; so, to avoid masking, there are currently no extractor functions for these measures - better handling of missing values in some unusual situations - corrected small bug in `fsn()` that would not allow the user to specify the standard errors instead of the sampling variances (thanks to Bernd Weiss for pointing this out) - `plot.infl.rma.uni()` function now allows the user to specify which plots to draw (and the layout) and adds the option to show study labels on the x-axis - added proper `print()` method for objects generated by the `confint.rma.uni()`, `confint.rma.mh()`, and `confint.rma.peto()` functions - when `transf` or `atransf` argument was a monotonically *decreasing* function, then confidence and prediction interval bounds were in reversed order; various functions now check for this and order the bounds correctly - `trimfill.rma.uni()` now only prints information about the number of imputed studies when actually printing the model object - `qqnorm.rma.uni()`, `qqnorm.rma.mh()`, and `qqnorm.rma.peto()` functions now have a new argument called `label`, which allows for labeling of points; the functions also now return (invisibly) the x and y coordinates of the points drawn - `rma.mh()` with `measure="RD"` now computes the standard error of the estimated risk difference based on Sato, Greenland, & Robins (1989), which provides a consistent estimate under both large-stratum and sparse-data limiting models - the restricted maximum likelihood (REML) is now calculated using the full likelihood equation (without leaving out additive constants) - the model deviance is now calculated as -2 times the difference between the model log-likelihood and the log-likelihood under the saturated model (this is a more appropriate definition of the deviance than just taking -2 times the model log-likelihood) - naming scheme of illustrative datasets bundled with the package has been changed; now datasets are called ``; therefore, the datasets are now called (`old name -> new name`): - `dat.bcg -> dat.colditz1994` - `dat.warfarin -> dat.hart1999` - `dat.los -> dat.normand1999` - `dat.co2 -> dat.curtis1998` - `dat.empint -> dat.mcdaniel1994` - but `dat.bcg` has been kept as an alias for `dat.colditz1994`, as it has been referenced under that name in some publications - added new dataset (`dat.pritz1997`) to illustrate the meta-analysis of proportions (raw values and transformations thereof) - added new dataset (`dat.bonett2010`) to illustrate the meta-analysis of Cronbach's alpha values (raw values and transformations thereof) - added new datasets (`dat.hackshaw1998`, `dat.raudenbush1985`) - (approximate) standard error of the tau^2 estimate is now computed and shown for most of the (residual) heterogeneity estimators - added `nobs()` and `df.residual()` methods for objects of class `rma` - `metafor.news()` is now simply a wrapper for `news(package="metafor")` - the package code is now byte-compiled, which yields some modest increases in execution speed - some general code cleanup - the `metafor` package no longer depends on the `nlme` package - some improvements to the documentation # metafor 1.6-0 (2011-04-13) - `trimfill.rma.uni()` now returns a proper object even when the number of missing studies is estimated to be zero - added the (log transformed) ratio of means as a possible outcome measure to the `escalc()` and `rma.uni()` functions (`measure="ROM"`) - added new dataset (`dat.co2`) to illustrate the use of the ratio of means outcome measure - some additional error checking in the various forest functions (especially when using the `ilab` argument) - in `labbe.rma()`, the solid and dashed lines are now drawn behind (and not on top of) the points - slight change to `transf.ipft.hm()` so that missing values in `targs$ni` are ignored - some improvements to the documentation # metafor 1.5-0 (2010-12-16) - the `metafor` package now has its own project website at: http://www.metafor-project.org/ - added `labbe()` function to create L'Abbe plots - the `forest.default()` and `addpoly.default()` functions now allow the user to directly specify the lower and upper confidence interval bounds (this can be useful when the CI bounds have been calculated with other methods/functions) - added the incidence rate for a single group and for two groups (and transformations thereof) as possible outcome measures to the `escalc()` and `rma.uni()` functions (`measure="IRR"`, `"IRD"`, `"IRSD"`, `"IR"`, `"IRLN"`, `"IRS"`, and `"IRFT"`) - added the incidence rate ratio as a possible outcome measure to the `rma.mh()` function - added transformation functions related to incidence rates - added the Freeman-Tukey double arcsine transformation and its inverse to the transformation functions - added some additional error checking for out-of-range p-values in the `permutest.rma.uni()` function - added some additional checking for out-of-range values in several transformation functions - added `confint()` methods for `rma.mh` and `rma.peto` objects (only for completeness sake; print already provides CIs) - added new datasets (`dat.warfarin`, `dat.los`, `dat.empint`) - some improvements to the documentation # metafor 1.4-0 (2010-07-30) - a paper about the package has now been published in the Journal of Statistical Software (https://www.jstatsoft.org/v36/i03/) - added citation info; see: `citation("metafor")` - the `metafor` package now depends on the `nlme` package - added extractor functions for the AIC, BIC, and deviance - some updates to the documentation # metafor 1.3-0 (2010-06-25) - the `metafor` package now depends on the `Formula` package - made `escalc()` generic and implemented a default and a formula interface - added the (inverse) arcsine transformation to the set of transformation functions # metafor 1.2-0 (2010-05-18) - cases where k is very small (e.g., k equal to 1 or 2) are now handled more gracefully - added sanity check for cases where all observed outcomes are equal to each other (this led to division by zero when using the Knapp & Hartung method) - the "smarter way to set the number of iterations for permutation tests" (see notes for previous version below) now actually works like it is supposed to - the `permutest.rma.uni()` function now provides more sensible results when k is very small; the documentation for the function has also been updated with some notes about the use of permutation tests under those circumstances - made some general improvements to the various forest plot functions making them more flexible in particular when creating more complex displays; most importantly, added a `rows` argument and removed the `addrows` argument - some additional examples have been added to the help files for the forest and addpoly functions to demonstrate how to create more complex displays with these functions - added `showweight` argument to the `forest.default()` and `forest.rma()` functions - `cumul()` functions not showing all of the output columns when using fixed-effects models has been corrected - `weights.rma.uni()` function now handles `NA`s appropriately - `weights.rma.mh()` and `weights.rma.peto()` functions added - `logLik.rma()` function now behaves more like other `logLik()` functions (such as `logLik.lm()` and `logLik.lme()`) # metafor 1.1-0 (2010-04-28) - `cint()` generic removed and replaced with `confint()` method for objects of class `rma.uni` - slightly improved the code to set the x-axis title in the `forest()` and `funnel()` functions - added `coef()` method for `permutest.rma.uni` objects - added `append` argument to `escalc()` function - implemented a smarter way to set the number of iterations for permutation tests (i.e., the `permutest.rma.uni()` function will now switch to an exact test if the number of iterations required for an exact test is actually smaller than the requested number of iterations for an approximate test) - changed the way how p-values for individual coefficients are calculated in `permutest.rma.uni()` to 'two times the one-tailed area under the permutation distribution' (more consistent with the way we typically define two-tailed p-values) - added `retpermdist` argument to `permutest.rma.uni()` to return the permutation distributions of the test statistics - slight improvements to the various transformation functions to cope better with some extreme cases - p-values are now calculated in such a way that very small p-values stored in fitted model objects are no longer truncated to 0 (the printed results are still truncated depending on the number of digits specified) - changed the default number of iterations for the ML, REML, and EB estimators from 50 to 100 # metafor 1.0-1 (2010-02-02) - version jump in conjunction with the upcoming publication of a paper in the Journal of Statistical Software describing the `metafor` package - instead of specifying a model matrix, the user can now specify a model formula for the `mods` argument in the `rma()` function (e.g., like in the `lm()` function) - `permutest()` function now allows exact permutation tests (but this is only feasible when k is not too large) - `forest()` function now uses the `level` argument properly to adjust the CI level of the summary estimate for models without moderators (i.e., for fixed- and random-effets models) - `forest()` function can now also show the prediction interval as a dashed line for a random-effects model - information about the measure used is now passed on to the `forest()` and `funnel()` functions, which try to set an appropriate x-axis title accordingly - `funnel()` function now has more arguments (e.g., `atransf`, `at`) providing more control over the display of the x-axis - `predict()` function now has its own `print()` method and has a new argument called `addx`, which adds the values of the moderator variables to the returned object (when `addx=TRUE`) - functions now properly handle the `na.action` `"na.pass"` (treated essentially like `"na.exclude"`) - added method for `weights()` to extract the weights used when fitting models with `rma.uni()` - some small improvements to the documentation # metafor 0.5-7 (2009-12-06) - added `permutest()` function for permutation tests - added `metafor.news()` function to display the `NEWS` file of the `metafor` package within R (based on same idea in the `animate` package by Yihui Xie) - added some checks for values below machine precision - a bit of code reorganization (nothing that affects how the functions work) # metafor 0.5-6 (2009-10-19) - small changes to the computation of the DFFITS and DFBETAS values in the `influence()` function, so that these statistics are more in line with their definitions in regular linear regression models - added option to the plot function for objects returned by `influence()` to allow plotting the covariance ratios on a log scale (now the default) - slight adjustments to various `print()` functions (to catch some errors when certain values were `NA`) - added a control option to `rma()` to adjust the step length of the Fisher scoring algorithm by a constant factor (this may be useful when the algorithm does not converge) # metafor 0.5-5 (2009-10-08) - added the phi coefficient (`measure="PHI"`), Yule's Q (`"YUQ"`), and Yule's Y (`"YUY"`) as additional measures to the `escalc()` function for 2x2 table data - forest plots now order the studies so that the first study is at the top of the plot and the last study at the bottom (the order can still be set with the `order` or `subset` argument) - added `cumul()` function for cumulative meta-analyses (with a corresponding `forest()` method to plot the cumulative results) - added `leave1out()` function for leave-one-out diagnostics - added option to `qqnorm.rma.uni()` so that the user can choose whether to apply the Bonferroni correction to the bounds of the pseudo confidence envelope - some internal changes to the class and methods names - some small corrections to the documentation # metafor 0.5-4 (2009-09-18) - corrected the `trimfill()` function - improvements to various print functions - added a `regtest()` function for various regression tests of funnel plot asymmetry (e.g., Egger's regression test) - made `ranktest()` generic and added a method for objects of class `rma` so that the test can be carried out after fitting - added `anova()` function for full vs reduced model comparisons via fit statistics and likelihood ratio tests - added the Orwin and Rosenberg approaches to `fsn()` - added H^2 measure to the output for random-effects models - in `escalc()`, `measure="COR"` is now used for the (usual) raw correlation coefficient and `measure="UCOR"` for the bias corrected correlation coefficients - some small corrections to the documentation # metafor 0.5-3 (2009-07-31) - small changes to some of the examples - added the log transformed proportion (`measure="PLN"`) as another measure to the `escalc()` function; changed `"PL"` to `"PLO"` for the logit (i.e., log odds) transformation for proportions # metafor 0.5-2 (2009-07-06) - added an option in `plot.infl.rma.uni()` to open a new device for plotting the DFBETAS values - thanks to Jim Lemon, added a much better method for adjusting the size of the labels, annotations, and symbols in the `forest()` function when the number of studies is large # metafor 0.5-1 (2009-06-14) - made some small changes to the documentation (some typos corrected, some confusing points clarified) # metafor 0.5-0 (2009-06-05) - first version released on CRAN metafor/MD50000644000176200001440000007144614060150153012226 0ustar liggesuserscb933699e0c853bf4b5232a9b7b5cc62 *DESCRIPTION 5de3c89c018ee97081f6c6fadab2d81b *NAMESPACE dc676128184b64278c8d08deb1a19006 *NEWS d3917c7aba0dede28443ebe5d88ca5d2 *NEWS.md 24db85dd05c8e7120c6ac0f0308bb1f5 *R/AIC.rma.r 7a4e9b442e8435829bda5c163d6d0eff *R/BIC.rma.r bb33d1a9016dc0fffd29963abeee97bc *R/addpoly.default.r f74a85ebb8b2bc60c9e4fe43b2c182ee *R/addpoly.r 1b0ed28c94b198f6c551c33ae0740b6e *R/addpoly.rma.r a8072017924c149501eee7df4b0b0d6d *R/aggregate.escalc.r c4669953c2de7c0ca2624ef7106364e1 *R/anova.rma.r 9b7c6d4219b7239aeb940a93f9b9f3ff *R/baujat.r bfbcd3205b9e220a385aa97bef10f740 *R/baujat.rma.r 19d002c24f9d5e730a0bf3cd8c667842 *R/bldiag.r 30b9c7c5d4eaab44326f97bd3d2776af *R/blup.r c23ed181a7af5fda51ec8b1c481311e8 *R/blup.rma.uni.r 30b7ac1933d7a0444b4adb82a21e1d38 *R/coef.permutest.rma.uni.r 93876c4c1db491a3a318e8154200a4a0 *R/coef.rma.r 4310e86470beff01c5a20d0451637740 *R/coef.summary.rma.r a05604a0e6097b9faacd69c341e3a5f9 *R/confint.rma.glmm.r 7d3a2cc91122bd8c5d4782f281c20ed6 *R/confint.rma.ls.r 6e1eb62573f7df6c2f9580f54cd6f2c2 *R/confint.rma.mh.r 1f6123f4ad00a3be4ec081c7607500d1 *R/confint.rma.mv.r a126ddc0c15b13dd53fdedc5e646f9ce *R/confint.rma.peto.r ea89b80e4d6bd8535e9e58764de7d497 *R/confint.rma.uni.r 5b87437c87ef7447057ad469905df2f3 *R/confint.rma.uni.selmodel.r f08e6225b500c02903a0b12b6f5343d3 *R/contrmat.r 104680d4ebe1513f970d1d49fb3f08fa *R/cooks.distance.rma.mv.r 3e67396d29e02b21bd712216e6964794 *R/cooks.distance.rma.uni.r eab5a85465f21f2dc9a58055ba597d10 *R/cumul.r ab951e14ea7f513adc5d5e5f718e27c5 *R/cumul.rma.mh.r c8bcd382e58cdaadbda7be3759e5b87f *R/cumul.rma.peto.r 3d62a01393285d798f0e20b1b4d53615 *R/cumul.rma.uni.r 1058e013bca95edb2d7e7ad2f325c26b *R/deviance.rma.r 377b151eee408e788262f0082a471d40 *R/df.residual.rma.r 91520d6e28853893079ab6da348c0132 *R/dfbetas.rma.mv.r be52077ac7b9873d44c4594a0759610d *R/dfbetas.rma.uni.r b3db04f49f4898c739602f1d2c5bc666 *R/dfround.r a918a44a36090cdec92149eff4c19294 *R/escalc.r 23c9fbb02e78031973d913c70aae0b10 *R/fitstats.r b262cbabdf42af06822cd9610ec6a666 *R/fitstats.rma.r 0a5f9875f94f2ea43ec9e46b42a1dda4 *R/fitted.rma.r 15a46565ce5517b1c900566156e0435c *R/forest.cumul.rma.r db14384267353107049aa9d2ae559159 *R/forest.default.r f5148247e2bb84f6c3cd524c72cd06c6 *R/forest.r 6e5e8d84e89e072413e7d81f3eddddb0 *R/forest.rma.r 6d8d5aaf14c77664a30f297142570573 *R/formula.rma.r 9cedb344963e37cbdd99d02846a0dc93 *R/fsn.r 4032c9f027367aadc5d7207499f9ac44 *R/funnel.default.r d1cbb7d3ab050ed099679e78af50aaaa *R/funnel.r 326bc507b64a0dc46fa3d8065b9190f7 *R/funnel.rma.r 2573a8e03e9331881bef8089ac03af1d *R/gosh.r 3f3f6a442293e0c8891e6a59496c4f82 *R/gosh.rma.r 7825ee9e5f1b4764d1c1ee2658bb3822 *R/hatvalues.rma.mv.r 13d6ac7b3d073dae91c1bff1b701bc81 *R/hatvalues.rma.uni.r 41d410d818a21e7bbcb798cecc8b57d2 *R/hc.r c55e4db9cda819dd7f6d264a6f8e1f3d *R/hc.rma.uni.r a98d0a0177136ef471540c3e4b3d80bb *R/influence.rma.uni.r 79a9a5487b16c86b53c7c221028ad2f4 *R/labbe.r 2b4d64d35564a4af4b1d2b40dd1e4087 *R/labbe.rma.r e9309f3a06c035f07723140e4e29952e *R/leave1out.r 5a288bf43e99b0a1ba59e1e5535da878 *R/leave1out.rma.mh.r 571ad0ea9d9afe69bed9cb55ea5560bb *R/leave1out.rma.peto.r 7e185498098f11ea64be1cb0f3f5aac9 *R/leave1out.rma.uni.r b30fe377ee001e2cc316c04d3bdbd578 *R/llplot.r 2cc66899eed1618b3040a084c51db71c *R/logLik.rma.r 7f24183a29cfc889d140bc435901a5a5 *R/matreg.r 072c5707ff325600d0763dd1d40262b2 *R/metafor.news.r 73a91864a53c1d5bc1214282fb03e6f1 *R/methods.escalc.r 3f9611811cf5b2f6a51374342e616272 *R/methods.list.rma.r c5ed07c91cf5ccd15901057f7480fcd0 *R/misc.func.hidden.escalc.r 07f7a3abc14804ed7b564344e0d9b246 *R/misc.func.hidden.glmm.r 519b8bbad9005fcf3ad2beb226525758 *R/misc.func.hidden.mv.r 38be396e04c89c2ce42bb0895440a655 *R/misc.func.hidden.prof.r ff246b7bb864b2c70c0705502112c299 *R/misc.func.hidden.r 70aa82a1a481f1728e5aaa29917487e3 *R/misc.func.hidden.selmodel.r 4dba10a35b7e2169d7324960f24f73b9 *R/misc.func.hidden.uni.r 686a48de0f40cb3ee548db4362521594 *R/model.matrix.rma.r 6c444e7b706b46a7d4836920a0d058f4 *R/nobs.rma.r 3376538a20fd1ea72c6cc0c76abff14c *R/permutest.r 44b4ab2b18afcc1109ea0770a946be60 *R/permutest.rma.uni.r 1b0a0b9a16b3e16a6c93aa81457e7f7d *R/plot.cumul.rma.r dcf4dba47e9befc6478b973aa2119b8e *R/plot.gosh.rma.r 32ea89f46d693068d24ba7f509200c90 *R/plot.infl.rma.uni.r aa11e2fa5787baafdedbb842aee16221 *R/plot.profile.rma.r 4157c16ae5f6ba991a68e83cab89b114 *R/plot.rma.glmm.r bf20419f7b3cc37503490699bc6c7bb8 *R/plot.rma.mh.r c59482623cb473d6fc5da2c5f28f84ee *R/plot.rma.peto.r f502d72abab2dd16467f01f0811f5267 *R/plot.rma.uni.r c47508af80458abb50d4571a0d4b677a *R/plot.rma.uni.selmodel.r 97869a2e48d4eb0fe2a8b4cb1389f18c *R/points.regplot.r 6deabe9946765807c931205ee59ede84 *R/predict.rma.ls.r b935b98991b725ce665bcc6c4c96be93 *R/predict.rma.r 8e0dd9df7a56c05c9c734a0c4dbdea8f *R/print.anova.rma.r d1185457e05ae14c75c7611d67e8b77b *R/print.confint.rma.r 93dc9bb0555ceed67d142bca0d1a259a *R/print.escalc.r 3e41a926044ad13b299651ef5ef4d873 *R/print.fsn.r efd8a7e879a05a5c1fe650699f0795d1 *R/print.gosh.rma.r 67885778315aa403daf99c0e19552aae *R/print.hc.rma.uni.r 18c3888f3ae7d7df2f9f03beea690854 *R/print.infl.rma.uni.r c31f88dbe62db58842075172786d5492 *R/print.list.confint.rma.r 1201f104ac5399d1b3d379dba7ec48b3 *R/print.list.rma.r e49151cb4191b67a9608bf50c38d696d *R/print.matreg.r e808fd0c92e1b8d6d2a99e40faa98254 *R/print.permutest.rma.uni.r 2a0a4ae76cc6258a4b0daa03054e21cf *R/print.profile.rma.r a52440a327b3eda44b69b33285973ffd *R/print.ranktest.r 48ce0e569dadb800d00b441298101513 *R/print.regtest.r bc72a8acbffb6b65b41ed8b6ae589586 *R/print.rma.glmm.r bf05a47ea98e90e531f06f1fcd6be873 *R/print.rma.mh.r d8a2ecd782e4450d44f9deb15685f378 *R/print.rma.mv.r 4560112f37088aca1d5f2409e2f7a616 *R/print.rma.peto.r eef1622fa3af19dd420ee870bf0303bc *R/print.rma.uni.r 0e1f5177e1ef1687c5f5b1f4d8e4c164 *R/print.robust.rma.r 11c3d363143a25342282937096a04094 *R/print.summary.rma.r 9769f02ce898f8d0d820f4244f2314da *R/print.tes.r a42d948ff89c90e3362be172fed490d0 *R/print.vif.rma.r 07d2872c04336b7fb5f58c8d0016398b *R/profile.rma.ls.r 57dfb60be7f3603409408462423bb84a *R/profile.rma.mv.r 74759ef634463b800cd13619cf1004fb *R/profile.rma.uni.r 8db07b15f01095836eaea17a05d21f22 *R/profile.rma.uni.selmodel.r b21968963ade773ffc54aeb10a96cda4 *R/qqnorm.rma.glmm.r fbc03c776abe1eb05015d7ddeeeea35a *R/qqnorm.rma.mh.r 550fb927d4cf60fa8359559a9fc9a5bf *R/qqnorm.rma.mv.r 2f1067da5a97f46d7584f9b91cea5ee2 *R/qqnorm.rma.peto.r 65730e90e6ff4a1608de7564da4a63b6 *R/qqnorm.rma.uni.r 708901230afb921438232ae1dae2f5de *R/radial.r 674b47b9a3e5872cf419fd6e13ba07fa *R/radial.rma.r e97ecd71133b048c8ae998735abb85e0 *R/ranef.rma.mv.r 87843fabc05eaab71592c95188d1eae5 *R/ranef.rma.uni.r 09693b424c8057331e75a4f5ebe7b1fb *R/ranktest.default.r 5691ae60009fa26e2ecf65a89ca06a38 *R/ranktest.r 85b250fd16fba2887a43ad4b68bd06f1 *R/ranktest.rma.r fb911351a0c54a0ced6e15dfd8f1b70a *R/rcalc.r 48adb556610afbd0ac47307d9346e443 *R/regplot.r b91d824390ab83b68465724f641df896 *R/regplot.rma.r df87428d108779be29fe136521bb0ed3 *R/regtest.default.r 90e49d4cd1211e7554e77adac3bdbe35 *R/regtest.r 872147cd9f842bfb137f338977707405 *R/regtest.rma.r 2f1f20bd89d42a7ba49e9284259e578b *R/replmiss.r 91777063e31189d96863dc3a8c9de213 *R/reporter.r 856b1a943427ace30b2129fc054ecff7 *R/reporter.rma.uni.r cb21640f752fc2f787bce133adb2831c *R/residuals.rma.r b5506d272845a5a9d29a278b386585ae *R/rma.glmm.r b4c79b55dfc156e24c92fbb2a4dafc05 *R/rma.mh.r 82b71dacb27690e1fdca234902aeacd0 *R/rma.mv.r 04db526a812e51f556efbef34e2a8d5f *R/rma.peto.r 0dd456c868b73a3a54c04c4625f29107 *R/rma.uni.r c7549400ef048f329f3bc18433c9a32a *R/robust.r 710fab4c3b21ae48e8f17cc15f7ec6e0 *R/robust.rma.mv.r 6e743379670db2e3f6564578d0e23151 *R/robust.rma.uni.r 76b26904f1cd31186b2284d94cebb190 *R/rstandard.rma.mh.r e494653cce228233b507a0344588f3be *R/rstandard.rma.mv.r 35e85f6e717a9c3abe516ae96b3e1add *R/rstandard.rma.peto.r 00b11752fc8f56fcdba9266183071bb5 *R/rstandard.rma.uni.r 068e45f8a2507ec82f30fd89cd3d27de *R/rstudent.rma.mh.r 5ff4294b017cf81c6e09ecae8aeacd42 *R/rstudent.rma.mv.r 2a7b06472d9e61e02c0d55e852dcc149 *R/rstudent.rma.peto.r 07a1ef1722a6eab8f6522afd21ff9ac4 *R/rstudent.rma.uni.r d31b9a1f07c093dac4a38e92edb83040 *R/selmodel.r 4b30898de6b835f48d0767a8480c5c12 *R/selmodel.rma.uni.r cd71bc3f3bc64f0d15c72447101d6601 *R/simulate.rma.r 35e60bbab097df845353953868adc295 *R/summary.escalc.r 34f5310ec657b5921f3189a0502dfedc *R/summary.rma.r d7b80c1abe139f2f09ba20122d530a40 *R/tes.default.r b856894dd0f3b8544da2e792c67f7972 *R/tes.r bff05570717a66d5a20eaa2a4c4450e0 *R/tes.rma.r c5870901d4becdd94aab9aaed0744618 *R/to.long.r 4a6eee605136263eed8c39a9c7765ffb *R/to.table.r 4a0d505c8b54de1c7ba5fe9b328e9532 *R/to.wide.r bf1e19be28b62925fea39d59a61ab1eb *R/transf.r 362bf474245dd51425ef3b345f298778 *R/trimfill.r c134229f59fbd0abf79234364f9c0bc4 *R/trimfill.rma.uni.r ded170a359146f58e23f880908a06799 *R/update.rma.r e58ee6eacfdf168007ad32988fdb3353 *R/vcov.rma.r cd7c79766df6af7238f4e6c2bf3ca976 *R/vec2mat.r 728fdc925021f89708a4031cb4f0b5d1 *R/vif.r dfba25eb0963fa8436fefea495dea5ba *R/vif.rma.r 0de86d84a752b52051992c19e006b068 *R/weights.rma.glmm.r 35701195d44afc99b3fac9077955b340 *R/weights.rma.mh.r c73b0df95078d79384e654a3d95834b2 *R/weights.rma.mv.r a7680815a5a067253553798438b373f2 *R/weights.rma.peto.r 7f35d6989e6eb6de2fc2ce07ef7c85a5 *R/weights.rma.uni.r 71d856125236a2aa758ab6389d4de387 *R/zzz.r 2241b8fb61a8e33ccf518633b2089f50 *README.md 59718df4efb83367bf336cd750b205a7 *build/metafor.pdf ad9cf7617939cc3d99cb224f7b6f712d *build/vignette.rds b11613383c43f425a404d823b053a8d0 *data/dat.anand1999.rda 121b1a714b4808161baa741dacd991cf *data/dat.assink2016.rda dbe3046f8030e9a3eb3e111907039a9e *data/dat.bangertdrowns2004.rda 8c795e443991e2f740a5d02c495ddd08 *data/dat.baskerville2012.rda a8e2fa07c65cf884448653809d6e1d3f *data/dat.bcg.rda bafef1a7243e27b3511c994b55f3a114 *data/dat.begg1989.rda 636be024d1197a04aaa2eb9764020af3 *data/dat.berkey1998.rda 6883d222d6b8ea7d5b6a8f99722682b5 *data/dat.bonett2010.rda 4407b3c1ea17b4f3d7ece7deda8f3959 *data/dat.bornmann2007.rda 95bc4ddf3ae86c20b7b3e655d201b196 *data/dat.bourassa1996.rda f4915d00c0b15a016fafac8f12e9ae6a *data/dat.cannon2006.rda 7fd9d83bbaa7ea4d7f1539bacfcce4b9 *data/dat.cohen1981.rda fbfd6ed08efd53935894e1e3674176da *data/dat.colditz1994.rda ec6c5c4fbddfffa2ea9e5c77bb2f5798 *data/dat.collins1985a.rda a083e75774138af7bf3cb03b04081b06 *data/dat.collins1985b.rda f41f4cd9168f46fe9584c2deff9b51ee *data/dat.craft2003.rda c9498f34413e510af3e0f18dc82f490f *data/dat.crede2010.rda 789a386680d5eceed9669a46cc380f2f *data/dat.curtis1998.rda 2e53f7ff0da5876af982fd031fff12e9 *data/dat.dagostino1998.rda df3449e886e6f7a0394f407d8f3d7d59 *data/dat.damico2009.rda ac5cf4b283f10e8b28564e1919168eeb *data/dat.debruin2009.rda 12061ecce717aa932c9dd6d5cd2876dd *data/dat.dorn2007.rda 574286cee201f86f006aaaaa346c8fd4 *data/dat.egger2001.rda 9a4f95a219a269e8579e9c9e893a4e63 *data/dat.fine1993.rda 814885eee75506d1e3e3fd950bf39ae8 *data/dat.gibson2002.rda 2ab73dc47bb8de81452d9d0c402377f4 *data/dat.graves2010.rda fcd4d40fb4c4ad1898b035e4aaf0ce2b *data/dat.hackshaw1998.rda 135c8811e5ee4e50c195607c8c8939cb *data/dat.hahn2001.rda 35f25f94cc42526e0bd12b1b1d63550c *data/dat.hart1999.rda 1cf52ee3dbb15cb8b02f3fc306b878da *data/dat.hasselblad1998.rda d5197798307e96ead5364df6e260173a *data/dat.hine1989.rda 0aaef4642127c0a1c03ed950d549e79d *data/dat.ishak2007.rda 21240e82b687891d80d342f8737971f9 *data/dat.kalaian1996.rda 9bc4d80a7c6503781baffb3926342832 *data/dat.kearon1998.rda 78bfad66c5f4b35eacd88ef802eee6f8 *data/dat.knapp2017.rda 4f9c0133c3cbc0c5fdeb015447eef2da *data/dat.konstantopoulos2011.rda e8665ae1ae2eb80206d3522e47ed0165 *data/dat.landenberger2005.rda 0cf7afddac0f2011382ff72dc0e8bb34 *data/dat.laopaiboon2015.rda d1807f1b92572474d39557b6e6beb507 *data/dat.lau1992.rda d3d4057833c5ef3ee6478cdc2b53364b *data/dat.lee2004.rda 868b986afd44a43c2f8502352316009a *data/dat.li2007.rda ef8dae199d5c98cb27eff455586c41a7 *data/dat.lim2014.rda 6b3d9c84790bf0e0f405526913f01a79 *data/dat.linde2005.rda 3b06a02b86dedf079458415e18df240d *data/dat.lopez2019.rda ee804bf21058a10babe6a764a992fcb4 *data/dat.maire2019.rda f5cbbe19df392a49d1517e335cc87c02 *data/dat.mcdaniel1994.rda 850fa1a0ce22870f8727d78e1201fbe5 *data/dat.molloy2014.rda 3b20f17a046a1b959a9c407cb7448e30 *data/dat.moura2021.rda f72e8cc6edfe28deb6f0a9c51e5806a5 *data/dat.nielweise2007.rda 2a71139c32c28290e32ceb44f768546e *data/dat.nielweise2008.rda d9cb69c45ba301fef4024a0fb3ec8f22 *data/dat.normand1999.rda 5666730f53899ad65033919e144c0b07 *data/dat.obrien2003.rda b001804122c7ddbc2ab7df392fd6410e *data/dat.pagliaro1992.rda 6e454ed1f7df21d396e25a448e1fb603 *data/dat.pignon2000.rda 39d9bc3e09b3e23f6ae784faa779b146 *data/dat.pritz1997.rda 5696b21fac60a0cb9703611748b19be0 *data/dat.raudenbush1985.rda d95a52ad22192dfe0897fdd72494238f *data/dat.riley2003.rda 61ccb8737e6a8739d0a7ec7eb15485e0 *data/dat.senn2013.rda eab17ef4c54e62df8162719b8efdcf23 *data/dat.vanhowe1999.rda 39c100bb92002c822c5d1509aaf8f581 *data/dat.viechtbauer2021.rda dad2d5f4139e921dfc9faa0b31f5369e *data/dat.yusuf1985.rda ed1dc59a0cb2de8d1bb09b13ba6b7d68 *inst/CITATION e8f7fea2a150beb5e5445cd9544f2af8 *inst/doc/diagram.pdf b438c769739c3d868cfd5766f72f7c2c *inst/doc/diagram.pdf.asis ab15425ad7959e47c27b548d9a427fa3 *inst/doc/metafor.pdf b22e3397f4cea09c48a2c785ff0cae6a *inst/doc/metafor.pdf.asis 674a1e37b84b08e97e1ab7a866d71189 *inst/reporter/apa.csl 7d3d4b866072fc4d82ded605d552d01c *inst/reporter/references.bib a1a1c77e0cbdb219be9b096becd6b940 *man/addpoly.Rd 8bf81c799c8a95a5afc601f8fd9b4f38 *man/addpoly.default.Rd b3b5da000543a585a95c11d5a55fc965 *man/addpoly.rma.Rd 18d3cae3c110a087e8cce83bbd7d47dd *man/aggregate.escalc.Rd 822738853c1f5358501ef6a8a9b6a44e *man/anova.rma.Rd 76ad6b644bc06c612bf1c5f43db767d1 *man/baujat.Rd 4cd5ce3f90a1135e82bc19eec737e4fb *man/bldiag.Rd a8a8b62e73cdb3c1a12fac9168fa94d8 *man/blup.Rd 74ea9c4d6e8cf507cbee63d256dd734e *man/coef.permutest.rma.uni.Rd e88e66e0e2318109cfd3beecdaf3b721 *man/coef.rma.Rd 5f1bdb113902d8a20c3dbdc0cd08f346 *man/confint.rma.Rd b34b1becfd39ff585c8dd89eecce8c76 *man/contrmat.Rd 77cdcf6dce130a393c7c713f8edb775e *man/cumul.Rd 69274776984289cb6909eaef7f0afc4b *man/dat.anand1999.Rd 3b0907c2ec99168ad0c3b6dfc36ef125 *man/dat.assink2016.Rd 3bcebb8e2efcc001ecd4afc265b65618 *man/dat.bangertdrowns2004.Rd a885058fc015fec4ca8431a6b113fd50 *man/dat.baskerville2012.Rd 188724e73e5adb8faba138040d965666 *man/dat.begg1989.Rd f5659ebd513abb2d4b9f9e59c7d3ebbf *man/dat.berkey1998.Rd 1ba741f2fb20d1cde278f4e9f0e17279 *man/dat.bonett2010.Rd 513165b0bb2300e302ed6f301c1cf594 *man/dat.bornmann2007.Rd 36b139e4e0cf61da966f5e02d3382a74 *man/dat.bourassa1996.Rd 6ce686f634c7e66f26de1bb0b59d18ed *man/dat.cannon2006.Rd 78ae7509e020df57a054580ac8130d89 *man/dat.cohen1981.Rd aba6e03c69972af3159727210d85dc35 *man/dat.colditz1994.Rd 7fc6ba8ece06624a191080f7f5876e2c *man/dat.collins1985a.Rd 2e1a02ddb22e2b2b1a5374dc405ca3ac *man/dat.collins1985b.Rd f63dc9e10bd401b684c9db533990d24f *man/dat.craft2003.Rd 44e8f916cecc0d7cc607def97699502f *man/dat.crede2010.Rd af980186c00f40e02a2215e35a54c987 *man/dat.curtis1998.Rd affd16511a6196fe456f180b591238a2 *man/dat.dagostino1998.Rd 6e1482be49cc38545358571d8a2e0ed6 *man/dat.damico2009.Rd 85325e9de0c8ee99fded85f6c2556177 *man/dat.debruin2009.Rd 69807f0a70b354ebbb1c777cd9b29ad0 *man/dat.dorn2007.Rd b3946f29e7fbd75cc5ededf14dc3192d *man/dat.egger2001.Rd 3b4490f9e71dc8eaa0dae89bf6140174 *man/dat.fine1993.Rd af765f46ea0c3ac25deadd20b02ffbd8 *man/dat.gibson2002.Rd 9fd8acfb45493ae67f9078160665edd2 *man/dat.graves2010.Rd 741250bb47c06f637e64011a2f0c414f *man/dat.hackshaw1998.Rd f6afcf190b6b3309af1a51fd7bb8b6c0 *man/dat.hahn2001.Rd ea92875175892ea7b97dba160fa442e3 *man/dat.hart1999.Rd c2f2f347f95b66eba323a4b24faa14ce *man/dat.hasselblad1998.Rd 2783a2559e083c059b2d839e421d3297 *man/dat.hine1989.Rd 6bb002d6cf38e328ba84a4db7ffda879 *man/dat.ishak2007.Rd 02b44a30fd7b56bfa8703ca159f78ebe *man/dat.kalaian1996.Rd 5ebd555167271d8c87a72d97e5a1d430 *man/dat.kearon1998.Rd 2b7e38fa2cccb8053aa0bee598f3987d *man/dat.knapp2017.Rd 5b94c1a36818ec3b072a274961b16104 *man/dat.konstantopoulos2011.Rd 8980b9bf2fdc6304a326a2a66a9d0a90 *man/dat.landenberger2005.Rd 39bbdb73f946bd779d69477c32513ccc *man/dat.laopaiboon2015.Rd 565e16f8a0f65f92f99f8f2f78cfee3b *man/dat.lau1992.Rd 8161b3bcfc0c66fd8bd9ba41a460ff64 *man/dat.lee2004.Rd 5edca8a8559e7efa1e5d98c9177467b4 *man/dat.li2007.Rd 1f2ee498c03b266a05353bb0a6e2ca75 *man/dat.lim2014.Rd ff0b294072ad0e3c7ec9640a8126c431 *man/dat.linde2005.Rd 1ae7a71bd5ef9a8efbb03f71167adc91 *man/dat.lopez2019.Rd 485fe80edb55cb12b4af035ec246a71a *man/dat.maire2019.Rd d199da5d5217d297b602a8c079e18c41 *man/dat.mcdaniel1994.Rd dfa233ad5fd29e80769475f16fa028b1 *man/dat.molloy2014.Rd 41b076ab383d7156a6ecc83bcdad3afa *man/dat.moura2021.Rd 64e9eb87c7d5329846482154c19ea82e *man/dat.nielweise2007.Rd 5fa85549da706083189b026f2fcd58ce *man/dat.nielweise2008.Rd dda574d95d4f234ee16bd2b2d71822f1 *man/dat.normand1999.Rd 81475cff8c15b9c57eb08f10a7a03282 *man/dat.obrien2003.Rd a6aee380156ec0b23c30439a764469e7 *man/dat.pagliaro1992.Rd ad155bb8e9b16246a5a301c4b12744ac *man/dat.pignon2000.Rd 817b0b3f0dec2a2e5dab04c3ea2a6d27 *man/dat.pritz1997.Rd 62280d8200ec43b57b5f233305873622 *man/dat.raudenbush1985.Rd b21d3400f6a41f30bb48a2030faa56d2 *man/dat.riley2003.Rd 739b57825251299cbc9802d4d2551270 *man/dat.senn2013.Rd 4fdbf0547a3512fe7038f4f2d9120744 *man/dat.vanhowe1999.Rd b2c70c74a0bd1eed670aee074a1064af *man/dat.viechtbauer2021.Rd 876b25eda039c3287d956d876b18daae *man/dat.yusuf1985.Rd 0ca8da79b17c1ddb8200de4ad820f500 *man/dfround.Rd 53fe33bf7a10875e4f5d74c7057c0762 *man/escalc.Rd 5cdee013a55a4f6a5f66be5f49119f50 *man/figures/selmodel-beta.pdf 3a4ca1f3e42229a2d8d95236f52ee045 *man/figures/selmodel-beta.png c751b09df1e4a7c8aca935bb65995c36 *man/figures/selmodel-negexppow.pdf b7fa05d41d625cba6e91de3ea8721c73 *man/figures/selmodel-negexppow.png 29253b5adec558bbdd337bdd95f410e8 *man/figures/selmodel-preston-prec.pdf fd78c4ff2eabcc64f4fc3236d0ebc055 *man/figures/selmodel-preston-prec.png 441b2e3bcd25f716dc7a2b387c33caaa *man/figures/selmodel-preston-step.pdf e998effef795e089cc9b431a03ff60f2 *man/figures/selmodel-preston-step.png ec62c2ed690b06481ee8cb943dc9e90f *man/figures/selmodel-preston.pdf e2c8d75e23961e02d8ba9d975d2ccf22 *man/figures/selmodel-preston.png 5664979843332a108b9b4ecf31aea781 *man/figures/selmodel-stepfun-fixed.pdf 0151efbb5aeba28b10d65d3f1d9f8225 *man/figures/selmodel-stepfun-fixed.png 23f4043b8715ac02a3a4ede231de5b24 *man/figures/selmodel-stepfun.pdf 417a38e4d94695e2d3ebb07911e4b796 *man/figures/selmodel-stepfun.png 3e7ec32203177f5e483b74a7c0c734cd *man/figures/structs1.png 7de0929dc70082ea7761dfa51518057e *man/figures/structs2.png d213554d74d9b7d01be6da34a328723a *man/fitstats.Rd ae03c5aa601978767ef563a4a098b9b5 *man/fitted.rma.Rd 6bd555ae0fd8c0a7bb86e70bc014da72 *man/forest.Rd 414084656adc798d815ea6308c88f136 *man/forest.cumul.rma.Rd 7795af250a7c80a24dbb07d8d14a3309 *man/forest.default.Rd 31633e97226d7ea41493ddffa9b25f76 *man/forest.rma.Rd a60d52150ad79c080bd00056524d8d90 *man/formula.rma.Rd f34e915c83a6d785de58c8c99719087d *man/fsn.Rd 4bd8411ccdd9cb5afacd106db7222bb4 *man/funnel.Rd ca31033458d6544afac31bf7d937b0f9 *man/gosh.Rd accc767e7f70c94abe9c25a6736edbb8 *man/hc.Rd d8d85a994fe0e6c57454c93ac8c521cb *man/influence.rma.mv.Rd e6f1a682d4b82248e22388b8df1b2b57 *man/influence.rma.uni.Rd 4a08e1d27375b07eedce0437e56474aa *man/labbe.Rd 52fc8139b96f851345d3dc1ec8cdb7c8 *man/leave1out.Rd 66c196bc155accafd9c9aac4d68732d1 *man/llplot.Rd 20563d95cbd152a9abe16faca5db037a *man/macros/metafor.Rd 6566f0b44067c53425d1984d248c6f94 *man/matreg.Rd 9dd82292bf3b0b612d24cd4a8c287457 *man/metafor-package.Rd d216f07152276baab220835697358006 *man/metafor.news.Rd 60db6391cfc430c8878bce41cec4baff *man/methods.escalc.Rd dc4a705f8a09981767cce5ee3e9d7a40 *man/methods.list.rma.Rd 3b4761b9ee8a56328aeabebe5c83dbb5 *man/model.matrix.rma.Rd b5f7401c7152f65b83468ee25742de4d *man/permutest.Rd 3a38f536828634533b7bd94b95af918b *man/plot.cumul.rma.Rd d1e03af4ea358261172691ee8148276b *man/plot.gosh.rma.Rd b9e9888057e21d2956c5d4b64b0fb628 *man/plot.infl.rma.uni.Rd 1efbd969eaa96ea1e677d714e4de8831 *man/plot.rma.Rd 6ecc97af1e3c4bb7602627d6781420a1 *man/plot.rma.uni.selmodel.Rd e54d208563c007d8c2b727ef71b84422 *man/predict.rma.Rd f4c62ffd189e7ea3300b3e160d5aec7b *man/print.anova.rma.Rd 0e6d85ec7389310c554402e1422058fc *man/print.confint.rma.Rd 440b4872c93ac789066424c2f49ef58a *man/print.escalc.Rd 25cbac616fcd38dc9ec73ba24e7f3b5d *man/print.fsn.Rd 24bf1e82119b5471bf37d1867190989e *man/print.gosh.rma.Rd a02b897f8d75454f2d9bc15ef3dce6f2 *man/print.hc.rma.uni.Rd 65ef50f36bc3733465dd0a9dfd020074 *man/print.list.rma.Rd 944c7802ac5504a3ea8a8ae3f50bbf3b *man/print.matreg.Rd 7d4bc51baf5a78deba0de7bac2f99038 *man/print.permutest.rma.uni.Rd d8bd7eec0bd4b85669c1f3f8152d5d8c *man/print.ranktest.rma.Rd 9f22e7d2bb2924a4442da4db70b622c6 *man/print.regtest.rma.Rd ba88682ea7b7e5d48c1cd23b9323749a *man/print.rma.Rd ca9b4a968522638e58bfe67dcb7d4be3 *man/print.robust.rma.Rd 77bda5f865a18f2cedbad1585603dbb7 *man/profile.rma.Rd 15b377e942d9bafefaac15dbc448fa7d *man/qqnorm.rma.Rd f4c18c807d5e000621e56f92a61a7ed0 *man/radial.Rd 9a5f82cd32aecbab45c1d4ef88c9e4ba *man/ranef.Rd b0cb61e45766246481d6be9428b3a09d *man/ranktest.Rd e2826111495e4769e6a20bde03ff4aea *man/rcalc.Rd c8d41f1e53b4c4f56af08231898cf2f8 *man/regplot.Rd 1ef047422c9ce720cc4e63e541ec9344 *man/regtest.Rd 4314a55fccffd5d9aea92bc5748c1273 *man/replmiss.Rd 5f7716c5cf631a863db54aeb0bf5ebe8 *man/reporter.Rd 58e106dd5948ac484e7e886d58d8a408 *man/residuals.rma.Rd 95a76b07cf325c3c65e4c61e5e76f020 *man/rma.glmm.Rd 0bfa4e6058b541b62d43617f141fc5c5 *man/rma.mh.Rd 0aa97fa00c98e77589690bc5ad7c8166 *man/rma.mv.Rd 78fa92d72a53351d7379e3f57d4eb276 *man/rma.peto.Rd 5bd330b5d0084771f5b5f388a186b8d8 *man/rma.uni.Rd c1c139b80e6075ab27eeea9896b7f181 *man/robust.Rd d4da02082e825bb6c3b3b7daf30fa382 *man/selmodel.Rd 54a77233a1cd1453a993e9a36997b249 *man/simulate.rma.Rd a7df414bc6dcf5cf0dfd8feca2abd6a5 *man/tes.Rd fbe4a0ac4aceb5040278a56fd7b7abe8 *man/to.long.Rd 713a124d2f0cb6764ecfc0946d9629b4 *man/to.table.Rd f3953c9e89b1a98903191b1e9270b463 *man/to.wide.Rd ab181663962bb8140cd5e6c65f40b0ba *man/transf.Rd 5df42097f568a647ddfae4215ed69b8c *man/trimfill.Rd 68e22e5c9954a5dbf7e7817714c2c0e4 *man/update.rma.Rd a2d7bb3493e6137d2ec658e4fb34a9db *man/vcov.rma.Rd 24c2c1bc11abd86ce44b647db4cd83b9 *man/vec2mat.Rd 8d444a6fda9a2c1070e1faeb1423778d *man/vif.Rd eb928fc8ccdec65262b570894a564d68 *man/weights.rma.Rd df74e87ff286619152915b3b02417856 *tests/testthat.R 774974aa4b8e559c464f3b307467f3cd *tests/testthat/test_analysis_example_berkey1995.r 16ce1fc4340567a5f1e612300e3a6bfb *tests/testthat/test_analysis_example_berkey1998.r 35b3d03e4cbed5a7024e12050d037a99 *tests/testthat/test_analysis_example_dersimonian2007.r 601417c02f38c4689b2fdc2105b9f5a5 *tests/testthat/test_analysis_example_gleser2009.r 74cda09a156e68e2cea18f6ee933cd71 *tests/testthat/test_analysis_example_henmi2010.r e091f25787cf4800d0ecf8526c5240fa *tests/testthat/test_analysis_example_ishak2007.r 5a0aef65184cb35fb412986840a38e22 *tests/testthat/test_analysis_example_jackson2014.r c2da191a8bb5296d9a74d08844513b48 *tests/testthat/test_analysis_example_konstantopoulos2011.r 5cda287d86e38b79b75b0a566d527c85 *tests/testthat/test_analysis_example_law2016.r 65223b699e25740323db827985a83cfe *tests/testthat/test_analysis_example_lipsey2001.r 6100f6a3911d0a92d905d447d1dd371f *tests/testthat/test_analysis_example_miller1978.r bf10d12cd2c39ef3a47681a391a357f7 *tests/testthat/test_analysis_example_morris2008.r b2cd8e93abac75b5c83afa31d52f083b *tests/testthat/test_analysis_example_normand1999.r 09f4b29e2b4a214e64bbe2e06226f2c6 *tests/testthat/test_analysis_example_raudenbush1985.r c73ee62b8399f62b2f784e92b10f922d *tests/testthat/test_analysis_example_raudenbush2009.r 4fed836247b7e8a616988aa5eb2c981a *tests/testthat/test_analysis_example_rothman2008.r f63a841b9ef50cab041063fb57be8f8a *tests/testthat/test_analysis_example_stijnen2010.r 0e3fa872264df115be2b3cdab42bc6c1 *tests/testthat/test_analysis_example_vanhouwelingen1993.r ac9cbefc3a30e3d8bfb77729ea7c2f7a *tests/testthat/test_analysis_example_vanhouwelingen2002.r 72177b5c19edb4b922a5eb00c6fc9a88 *tests/testthat/test_analysis_example_viechtbauer2005.r 3fb79e89089a916aa1a7bb2901473471 *tests/testthat/test_analysis_example_viechtbauer2007a.r e116529dc58fe341513edb4e907abb75 *tests/testthat/test_analysis_example_viechtbauer2007b.r da636cebad44a6526656dbfe8a428bc5 *tests/testthat/test_analysis_example_yusuf1985.r ac52cd866e2ea3d8675f4a016b95f8a8 *tests/testthat/test_misc_aggregate.r 9c2d677f9eb5124a801801fff70fe25e *tests/testthat/test_misc_anova.r 975e436a6681572c263eeb4a6259102c *tests/testthat/test_misc_confint.r 0406390cc03fa6737b09442b8e3f8775 *tests/testthat/test_misc_dfround.r 5d8104f48f7b769ba966d8fd3f418df5 *tests/testthat/test_misc_diagnostics_rma.mv.r df9ce17390476393a16bf32d3a529e11 *tests/testthat/test_misc_escalc.r 4cf2a68bb3842a44b53134ee5455d616 *tests/testthat/test_misc_fitstats.r fd390d2bf6e4e790fc8dd8850e6ec776 *tests/testthat/test_misc_formula.r 8c3290f3e166686edee462c12c34cafc *tests/testthat/test_misc_fsn.r 29763fc01f089ef4c22366219c844dd6 *tests/testthat/test_misc_funnel.r fa98deaeaa4ea63a6c31ee5946e69642 *tests/testthat/test_misc_handling_nas.r 2d38235ce2b3f9c03388af2525d721d2 *tests/testthat/test_misc_handling_of_edge_cases_due_to_zeros.r bb2cebdc779b0d0407c0c1abddb250e8 *tests/testthat/test_misc_influence.r e97b8398bf329c576aa22d1c3e14d532 *tests/testthat/test_misc_list_rma.r e82a16374df51557d98ba26f349bae86 *tests/testthat/test_misc_matreg.r f2be42003e99d4e14292a044634bb439 *tests/testthat/test_misc_metan_vs_rma.mh_with_dat.bcg.r c466bfebd65e138038d0ea6e08540592 *tests/testthat/test_misc_metan_vs_rma.peto_with_dat.bcg.r 0f1a3b5317d7c4d44d0a6411ca33180d *tests/testthat/test_misc_metan_vs_rma.uni_with_dat.bcg.r 8a7a49fd09ec264c2dc0578679df7677 *tests/testthat/test_misc_pdfs.r 6c1c2ca006acd29c1b471c64a5df3d35 *tests/testthat/test_misc_permutest.r 45508df8b64d0db4ab67fe7e11ac3d4e *tests/testthat/test_misc_plot_rma.r 008ebc2e130492758c217d3fa3a5f017 *tests/testthat/test_misc_predict.r e3929b37c1f8f8cf1e6a72329219ec1a *tests/testthat/test_misc_pub_bias.r 53dc7172661268681b8d3a9f8f497acc *tests/testthat/test_misc_replmiss.r 8b3d2d23a0d836c004315701aab2a1e5 *tests/testthat/test_misc_reporter.r 2b805e660496f2a91055ad81d1c7264f *tests/testthat/test_misc_residuals.r 390854f70a9949e7e3e3c833ddb36173 *tests/testthat/test_misc_rma_error_handling.r 221426dde02a528074f2e419a8335687 *tests/testthat/test_misc_rma_glmm.r 7aa221fdf4930ffc5c68020f433947ea *tests/testthat/test_misc_rma_handling_nas.r 784fc6697dc720ec7af2fc8bd2f71a16 *tests/testthat/test_misc_rma_ls.r 13cdc5f416a00d0166224af55c7da09d *tests/testthat/test_misc_rma_mv.r 6eb88b3388d785bab14eca11e51723db *tests/testthat/test_misc_rma_uni.r e720c3dd4a59e25d3305e4ccced6dac4 *tests/testthat/test_misc_rma_uni_ls.r 77a3daaa4116f4586a0ae923fc008d83 *tests/testthat/test_misc_rma_vs_direct_computation.r d821dff143967b1d3e63df3030fc5691 *tests/testthat/test_misc_rma_vs_lm.r 7fccc90c29ed0f8ad8a633e9d9318312 *tests/testthat/test_misc_robust.r ff880bbc7e7d24335208d0f539ae9773 *tests/testthat/test_misc_selmodel.r 8d445c07b06aee3ec2439d0a59641778 *tests/testthat/test_misc_setlab.r f133b0f4131a6dc4213c980af40e585d *tests/testthat/test_misc_tes.r f3c3cae6cb012d115cb3b1daa6b65d68 *tests/testthat/test_misc_to_long_table_wide.r efe61264baaffde7dc74b66fd1f0bb3a *tests/testthat/test_misc_transf.r 86e392af827239bc5e3e2a261c09111b *tests/testthat/test_misc_update.r d975cc774cf3acf6eec4009172250c69 *tests/testthat/test_misc_vcov.r 75ffe6df86de89e31dce998226318d97 *tests/testthat/test_misc_vec2mat.r dd89f70854db6e5b231e0c78afaa9c3c *tests/testthat/test_misc_vif.r 8bb178fd3402e9fe0d1a2dbfeffeabdc *tests/testthat/test_misc_weights.r 3b58055b2331342fdb9f391398b25049 *tests/testthat/test_plots_baujat_plot.r 3807af72659ef02b3d4459b5a3cec0f8 *tests/testthat/test_plots_caterpillar_plot.r 4b62d008bb1e2407b9838f489bf246ec *tests/testthat/test_plots_contour-enhanced_funnel_plot.r 5a381141c4f50f86f9cabf8c4eb2494e *tests/testthat/test_plots_cumulative_forest_plot.r e7ecf0115feaff7f7e0f710648033c3f *tests/testthat/test_plots_forest_plot_with_subgroups.r cc4bce21f828057b1313bd2b47779ee5 *tests/testthat/test_plots_funnel_plot_variations.r edd60c6bda17118e97221101222a1941 *tests/testthat/test_plots_funnel_plot_with_trim_and_fill.r 98dc54ae9c8c8d2a6137136fa7e99197 *tests/testthat/test_plots_gosh.r 1e45d74c7bf65e01668d8abda5727f2a *tests/testthat/test_plots_labbe_plot.r 28f71f22f0ef752676b2f96923e0057c *tests/testthat/test_plots_llplot.r 4c766d4351a155ebfb94169771d75577 *tests/testthat/test_plots_meta-analytic_scatterplot.r 3daea0ed4567f818d54b470a4024c89b *tests/testthat/test_plots_normal_qq_plots.r 041c7256bfc4903e60aa8bda8262ecab *tests/testthat/test_plots_plot_of_cumulative_results.r 2b449df881fdcf729819d92a3585727c *tests/testthat/test_plots_plot_of_influence_diagnostics.r 7bfb64311acb37f15b66540395537e9c *tests/testthat/test_plots_radial_plot.r 03a91cbd4728361d4e7a0b84af1ac62a *tests/testthat/test_tips_regression_with_rma.r 4d1895200a4f7556253f163497fac5e2 *tests/testthat/test_tips_rma_vs_lm_and_lme.r f0bba91e3b359e9d38dcf47c0fc19a82 *tests/testthat/tolerances.r b438c769739c3d868cfd5766f72f7c2c *vignettes/diagram.pdf.asis b22e3397f4cea09c48a2c785ff0cae6a *vignettes/metafor.pdf.asis metafor/inst/0000755000176200001440000000000014060132612012657 5ustar liggesusersmetafor/inst/reporter/0000755000176200001440000000000013713320160014522 5ustar liggesusersmetafor/inst/reporter/references.bib0000644000176200001440000001706613725417530017346 0ustar liggesusers@article{begg1994, author = {Begg, C. B. and Mazumdar, M.}, year = {1994}, title = {Operating characteristics of a rank correlation test for publication bias}, journal = {Biometrics}, volume = {50}, number = {4}, pages = {1088-1101}, doi = {10.2307/2533446} } @article{berkey1995, author = {Berkey, C. S. and Hoaglin, D. C. and Mosteller, F. and Colditz, G. A.}, year = {1995}, title = {A random-effects regression model for meta-analysis}, journal = {Statistics in Medicine}, volume = {14}, number = {4}, pages = {395-411}, doi = {10.1002/sim.4780140406} } @article{brannick2019, author = {Brannick, Michael T. and Potter, Sean M. and Benitez, Bryan and Morris, Scott B.}, year = {2019}, title = {Bias and precision of alternate estimators in meta-analysis: Benefits of blending {Schmidt--Hunter} and {Hedges} approaches}, shorttitle = {Bias and Precision of Alternate Estimators in Meta-Analysis}, journal = {Organizational Research Methods}, volume = {22}, number = {2}, pages = {490--514}, doi = {10.1177/1094428117741966} } @article{cochran1954, author = {Cochran, W. G.}, year = {1954}, title = {The combination of estimates from different experiments}, journal = {Biometrics}, volume = {10}, number = {1}, pages = {101-129}, doi = {10.2307/3001666} } @article{dersimonian1986, author = {DerSimonian, R. and Laird, N.}, year = {1986}, title = {Meta-analysis in clinical trials}, journal = {Controlled Clinical Trials}, volume = {7}, number = {3}, pages = {177-188}, doi = {10.1016/0197-2456(86)90046-2} } @article{dersimonian2007, author = {DerSimonian, R. and Kacker, R.}, year = {2007}, title = {Random-effects model for meta-analysis of clinical trials: An update}, journal = {Contemporary Clinical Trials}, volume = {28}, number = {2}, pages = {105-114}, doi = {10.1016/j.cct.2006.04.004} } @article{hardy1996, author = {Hardy, R. J. and Thompson, S. G.}, year = {1996}, title = {A likelihood approach to meta-analysis with random effects}, journal = {Statistics in Medicine}, volume = {15}, number = {6}, pages = {619-629}, doi = {10.1002/(SICI)1097-0258(19960330)15:6<619::AID-SIM188>3.0.CO;2-A} } @article{hedges1983, author = {Hedges, L. V. and Olkin, I.}, year = {1983}, title = {Regression models in research synthesis}, journal = {American Statistician}, volume = {37}, number = {2}, pages = {137-140}, doi = {10.2307/2685874} } @book{hedges1985, author = {Hedges, L. V. and Olkin, I.}, title = {Statistical methods for meta-analysis}, publisher = {Academic Press}, address = {San Diego, CA}, keywords = {meta-analysis}, year = {1985} } @article{hedges1992, author = {Hedges, L. V.}, year = {1992}, title = {Meta-analysis}, journal = {Journal of Educational Statistics}, volume = {17}, number = {4}, pages = {279-296}, doi = {10.3102/10769986017004279} } @article{higgins2002, author = {Higgins, J. P. T. and Thompson, S. G.}, year = {2002}, title = {Quantifying heterogeneity in a meta-analysis}, journal = {Statistics in Medicine}, volume = {21}, number = {11}, pages = {1539-1558}, doi = {10.1002/sim.1186} } @book{hunter1990, author = {Hunter, J. E. and Schmidt, F. L.}, title = {Methods of meta-analysis: Correcting error and bias in research findings}, publisher = {Sage}, address = {Newbury Park, CA}, year = {1990} } @article{jackson2014, author = {Jackson, D. and Turner, R. and Rhodes, K. and Viechtbauer, W.}, year = {2014}, title = {Methods for calculating confidence and credible intervals for the residual between-study variance in random effects meta-regression models}, journal = {BMC Medical Research Methodology}, volume = {14}, pages = {103}, doi = {10.1186/1471-2288-14-103} } @article{knapp2003, author = {Knapp, G. and Hartung, J.}, year = {2003}, title = {Improved tests for a random effects meta-regression with a single covariate}, journal = {Statistics in Medicine}, volume = {22}, number = {17}, pages = {2693-2710}, doi = {10.1002/sim.1482} } @article{morris1983, author = {Morris, C. N.}, year = {1983}, title = {Parametric empirical {Bayes} inference: Theory and applications}, journal = {Journal of the American Statistical Association}, volume = {78}, number = {381}, pages = {47-55}, doi = {10.2307/2287098} } @article{paule1982, author = {Paule, R. C. and Mandel, J.}, year = {1982}, title = {Consensus values and weighting factors}, journal = {Journal of Research of the National Bureau of Standards}, volume = {87}, number = {5}, pages = {377-385}, doi = {10.6028/jres.087.022} } @incollection{raudenbush2009, author = {Raudenbush, S. W.}, year = {2009}, title = {Analyzing effect sizes: Random-effects models}, booktitle = {The handbook of research synthesis and meta-analysis}, editor = {Cooper, H. and Hedges, L. V. and Valentine, J. C.}, publisher = {Russell Sage Foundation}, address = {New York}, edition = {2nd}, pages = {295-315} } @manual{rcore2020, title = {R: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2020}, url = {https://www.R-project.org/}, } @article{riley2011, author = {Riley, R. D. and Higgins, J. P. T. and Deeks, J. J.}, year = {2011}, title = {Interpretation of random effects meta-analyses}, journal = {British Medical Journal}, volume = {342}, pages = {d549}, doi = {10.1136/bmj.d549} } @article{sidik2005, author = {Sidik, K. and Jonkman, J. N.}, year = {2005}, title = {Simple heterogeneity variance estimation for meta-analysis}, journal = {Applied Statistics}, volume = {54}, number = {2}, pages = {367-384}, doi = {10.1111/j.1467-9876.2005.00489.x} } @incollection{sterne2005, author = {Sterne, J. A. C. and Egger, M.}, year = {2005}, title = {Regression methods to detect publication and other bias in meta-analysis}, booktitle = {Publication bias in meta-analysis: Prevention, assessment and adjustment}, editor = {Rothstein, H. R. and Sutton, A. J. and Borenstein, M.}, publisher = {Wiley}, address = {Chichester}, pages = {99-110} } @article{viechtbauer2005, author = {Viechtbauer, W.}, year = {2005}, title = {Bias and efficiency of meta-analytic variance estimators in the random-effects model}, journal = {Journal of Educational and Behavioral Statistics}, volume = {30}, number = {3}, pages = {261-293}, doi = {10.3102/10769986030003261} } @article{viechtbauer2010a, author = {Viechtbauer, W.}, year = {2010}, title = {Conducting meta-analyses in {R} with the metafor package}, journal = {Journal of Statistical Software}, volume = {36}, number = {3}, pages = {1-48}, doi = {10.18637/jss.v036.i03} } @article{viechtbauer2010b, author = {Viechtbauer, W. and Cheung, M. W.-L.}, year = {2010}, title = {Outlier and influence diagnostics for meta-analysis}, journal = {Research Synthesis Methods}, volume = {1}, number = {2}, pages = {112-125}, doi = {10.1002/jrsm.11} } @article{viechtbauer2015, author = {Viechtbauer, W. and Lopez-Lopez, J. A. and Sanchez-Meca, J. and Marin-Martinez, F.}, year = {2015}, title = {A comparison of procedures to test for moderators in mixed-effects meta-regression models}, journal = {Psychological Methods}, volume = {20}, number = {3}, pages = {360-374}, doi = {10.1037/met0000023} } metafor/inst/reporter/apa.csl0000644000176200001440000021037313713314420015776 0ustar liggesusers metafor/inst/doc/0000755000176200001440000000000014060132612013424 5ustar liggesusersmetafor/inst/doc/diagram.pdf0000644000176200001440000110615114060132612015530 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 1709 /Filter /FlateDecode /N 22 /First 156 >> stream xXmo6_q֢wJCI4hӸt-ADc5~ϑ㗴Ix"y)+J2'KFIrd'OHrБ23ORjRBXh-)IG*x1$3ʣ?'$ҙ$6L:ǠdտxAò+&EW2lޔ40弻zrVO/.%}Uw^)z[t%=C %Ji%z&ԯB`Eyq!r_Ҟ0'mY{S4l|tz~sgo֕%echrXNb 6 7d] qf h[FBa|(̟Hl{48;ֱGy[[ˋK~o]\S yGFokeҾrLʦL{>e/knI}}on5N,Њ k_<)yS`llR:HBj6' 8H_.8n 4鬂"v-O{`E sT7w,,LKPLAZ8nogXƣE3Y!#VAtܛY 17 @Pvb ˊ̱g,,kf^lNw:}yX~aXrFY1װ22ٰV rY|J,'JڰM=eao,_ ~YM&ʇ1x\ܼ.˫~4rZxDlZ\䬅_!|1jE@<-xnjDeR d[9luL|Lɂ9&5ԡeh\'M ĻPkKx}ʲyu.}'cft|DZsi 7q?VIdT4T12ɨ*ɸg$j4Id\_{W4 p6rOH2! ۄ m…6apMp%\8.v  ]….ap~pK0I%pOKµ$#.  }…>app&VXAL2˞dAAY2/৏endstream endobj 24 0 obj << /Subtype /XML /Type /Metadata /Length 1475 >> stream 2021-05-02T23:20:42+02:00 2021-05-02T23:20:42+02:00 Microsoft® PowerPoint® 2010 An Overview of Functions in the metafor PackageWolfgang Viechtbauer endstream endobj 25 0 obj << /Filter /FlateDecode /Length 100697 >> stream x̽ˏgy%֥Um,xqw9`n#F Ɔa `6IQnTYE֣0hh5k9'"nVV7[$uO޸{s9_><_{G?oo߾~>:ˇ?ywze?9*uG<]G.?}x~?^w7?o> jh|_ N֛w|-^ߝko"ݵRK.אze='wFR7:^g Qk爚].{{~W孞޿JM7oDj容j_|;*HtܦQ(BYd>A:WR*5Q/fw;?h/~+п}grv廠6{sǫm:+ Yw{cju>wgikL:h|n~lUIht7cdo?F?'YRV0wFfrwd]ހ;WUM;ۥ?l"װ\S x_[xJt9zo~2f^n7XE`  6ixxXl(30('  w9^TW1a>Ԟԡ'hPST7qͥ/H+ALc@kȟ0;U*}"?5O.tW-&Nw)t>f?r |0kK?\ǟs,b+37k}s꒴IۇJ:ͷ/ޚ|yO>~z{۟5rHoyo>Gw:z bԓIl׻Nߧ3!n:.G] Ϛ7Wåv]|ʂҏ?2݇gϾf~un͏>=򢲻Kmfkf]PMg<}ݑg2*?^.X_~2cށ3!iF ֧Ϯ-J G74-t SOz?s)ۤC'>_}_>o?SLp>G%/Ƹ#;!֐g{vK7E4c05]gBy _4??o>? ߽3_|7d9uVX>Ƽ?yDΚaBr;߹:n]!sk]?..DvwM~,5~7X .k6_v2#؎}fp6q@ȏhɛ{^uϰ|:}vИ͍ a(Pی o,=RNP<v{vJv{/ӷcz޷>WϿub_t|S7&DH516kzXf6{2ڌ%}VCqjug8^{iz,q_^W^GgEM#1=\-uǥ{|Wj-!R{/G%Oaa|rf#V9{)uxZtwfӃEU/V+_Z/f3;5F#Jب+96b3ţzzwr]JV:_l+P_F1%cl=LŃ 3Ti`vZgk9%ywuTOCGÙkYzNK4ӺFC\-ֵͼVԽEbQeK hĻ4=O9&3]Ӿ7lhD>rKzˏG "ut|[pv?z>_j w3_}#ƊzdgwKW-8iLǫTD_n'WAWoB:{o˜zۘ7&Gڿ\M뾅^٠WΖ[] 78ߥjx%4̮i*tQ!#n&9O܋_3+ʚttc{/iZ~q^CN޿qi/l@/;L|xV=~(wf\ 5+E"Oo-J͹v`@~(֞|R5s(ƨ@gnbʛX™f~E"ۖT=l)1 ͇/uOǜ &X:*>@*{ǭ/x0>nHmM}Dڳ{J{/0)/+e$wσKcY nzݴ %VTu=e*aEO ;׻pŭ毻t;~4f}m*eXXޛz}YR^#߁γ<ȋ>wgo|9\_}uw? 7"ۨsĹ]~om0H,T53*kwK`"jk4~w̝׿z53\*{;LC5؍hZEʥB|u> "ko;tro~OƇ-H\twVQU!77ҧi]wGʨvٍNiO'Dyxj Ş풘F_~GҾyTEИdԿ8=\z[izKpHoj1雹}ώ7oG%0egA C݆̏:63j^E032c?Foe,ىݥPڅ==GO͖lj]M>K|owXkβ:*Ɍ~e.-OfnwX/|w=Oagᶚޫ!R[ yPwy MiB,f4J z]ERk-Ը to4Sh;c.uГ2rrGoV- V=5'toug;\Tc U[>9ݿ4oe؍X_FTޙ:|Bc va,Q|7=ܽ>}{v`ot/+>ub.[v[#@6F7Cumb7 OJ:Kx|W祫: pL@/k2׌ `nN*Ae#\SHvQLzj 9t]*̬W#tc);)nMv"ŏMe죚] [r6#GŽ=6nZs] ?i?_2[TuLji]G |39v;/n,~~*G/rf7 t{9ϱf7~}rvHp/3e,+}H{cCѥ`w哓 ͦV\3weZ${ԾHOZwt0JNՊUޡԱj6S5 ;V2sxw#UOt$e׏źqaV-:ެ +.>jsL[:gij\07_BY]`HVY^H}; ҋK7ZiW=?l+єZdo,IZy½~~߼jVț-cD.3Y]I{a?NԆ}{7o3wSxQ;EܐXsh^O1>G'j(>P|TCS  O5j(<Ր!TCS rO5j=ؒ=@Y'Xh,wTdzt&MO7n2?dacsݹ8`ͽ'Z#xv ""[!=o"B=}zlqU<|s,ոU |57=l47,=<" Odiw+~Jϟvzbgݦ-M Yt{Vo6mn>re88岚g9֫kOO_<˱h;Uxo◇Y~>^gv&_'fb:pǜlI?|bcnפD(kKNjS]Rn~IcI'l:X}`#:ے[^i=% :矤|[uxa[q{ÞdڱO#ݠ8k%~[G86Co@ڡğދa'7=Ŧn?+?V8P)ݟ/ 7 S[/?16voDykƹs= |yXc=5_9ٝJOO3-uw'-1],|Vl{C> yiEnv=} aV޼~2Fd݌zpZ\{}KߧNh޸O+ ˜,n>) q?7Z ;R^|M?A `zXj5v}a{&?wS].N_m\:wȺ=J܏j:̔ͻ578kWCLw?ofk5)Oߎ-z-Ɨ]~ >x朴|}+&nr|x{*Cq/uzI͋nm)OK'ǃJKwn~r,[o^+[]8#\KAӹF.E~zQ1.CmXWtqӟ[sƟǴ~+R#i1]`tדQ3ߕXGR8mcصOcc0S y-:޼; ꅯh{ jK~47+Lج,~x}?ll z93ܥ-U`{7[=۾:W_aU3p+>4 y-t ` imړ>ێ=q R&fSvEKSyxr{#b1`QAB>O:}xxxvֿ{pʋ'*G|ܫ /KIlxkŏ],]|nL8?9q23`D<b9,OAgAGVM<Ι%5΀vBPRI@ųDgo NHZJb hXmoO{Y\zOޠeG_Gp(Q@q8X#fDeD=V2$F3l*Cb*`;js,*fe}Bd;r4z*Sf-q|P r:xpg@t|*'/^ [`~Ȍ)zm㪃_fZe$0DuZyITK n&] Y(S:MڜَsyU@\$Z3AO.Mw.+͙6 R/%x”a?ZD½ c̰eiD:&/iOvjFcMwڏ&zw#mӴQ!So y4̓(D[A=!-sToans f0hi]NH `ϣ΁Fa`hj(e ek(gl6$*KژpQ-^bU*k⸒Т1T%}mtE,wV2О-Iom\8B k8\#8ˎo+PsdڬZRal|AQ_cՃQ7,Yi`#+tx&Mt-qf=a b(e_]I!h,U vAR6B1:* gx}IUWkj@wL1A.nj.ڠIG4+{yYr[»d 4V);B"C U 3]]/ =8mMKC-zsP̟&~P+BUZz_QKS拴#ދpF T6~Y!5FϔQT%oZ/سqJ{0+ajhOx$֛ i\ oG\TEgtV=66muEa-1Q1+[6>P@n Z*k*F0Y YSYݞjaIHj !.(?7_d G@cCR}e-j+^Vx ϧOxk%i_j ݮn^c};wפit;ue/,.8AEBgBƛz9[^ò1t(@X f (Eَ֢<U沑 8-"B|o&% |ךyfإJO3l(\ח(;DY"Lv/H x/`|H(44+iy<)+YMEi,Cm!eyAP͛6pY2NG^Jgea3vZ gs8,₨cice0M, r`u.SMl9G(ɻ>!N'K̛DD!PX챈,mHԣW0V, [;% sQ91QE G#-a֣~-aw@*iT (]WYn؝fվٻ~mͻ{giWɅ, ;'Sh/K N/':bGrmTQf))2JJߤvioPaF^Vvjw]6]txE%M4AIڪd( ]j3jo7Y%`Hc͜G!;Ůl^R+$6CgPé?Ag,ץE ;hPҾqNlI+N8(;\Ȉ"G)xs8}4[hMNH}@;S:rg?G#1HPo#Qe=pFo]^]X bA/~h9.!شint|Ɠ}`Jk@"-0w‹GNE/û^FTUr5xC.ho& )iapӸFFM/(jX57'bi##`I@Qل,kTΪ .mXQ)ط RRTPR՟3 6 ]Y 2E:Q&Ɋ`1ލQg++r{VF"GQWuŠ@MF]~"焽p% hx[eTxEH#0 ZW ;C\wk3aez`Iʗ2s n;"U v[QxU цzkzʺڔǏ>SQ$s%~[4px;T[_Vi%/>4eZ魇$- 1$z"7$i 6PJ@Q^lbenʰi5PJ@ ʒޞ%4/)Ϝ MT7.%Z(]PQlq%o}>x5)d/ bJYS̢D@ST4;,?ӧ~<d'rGCz1a8Q֊eFC ^}wˋ~uݒ77~vxG #PRAbDbcF#s"2'@rEV󝤔(K=ߥdYu?=t'݋Ƴz3WOp\acz猿O)2EeX#Y4O ;O;p4@0Drc*Yj@C(적C$P mTYz ~7So;DRoPYEX֭6쩚Ch@*ʊ۰7:p|"x>Q}t"`gAFz/S_BI8Wv8WlH%nJ[ժճS.@E[쉕Y/6N8XB8+F6j }@. D sT!HS¶4x< p&mDzвb aBI%H0U ʶaHû9 ՙ@)иCPn+٪1\~,*XPIz:,Id%F{`9sڞ*` OQxOa'B6u@X y@X| ®&#`FDp3}/@iҠy5heξV 1[XI/3Z'΁ނ&[Ha@VI<%zTrȲ(ɧ=b/87H^[o9T'^1Ú!TZX]^q/n4} v+PzJfCQ E}UʏaAY&Df<85rtAE#'e5!V!BR4]VfNJwjMmk(v(#P@Ue0Z.lF՚.4@đDH5)A+%!l;헹v t'q!왡Op+U*FHZ8]:PG( yH Į5^u"Ƌd͹v`CSOp)ԠG, o6yAu^I GO)ݛEw m~\ǭ%'hRq F=J@Vmy=؉`&Y6j5EGm[1YY,(8)+NNV;cNG~!'_P=o4o@2tXy4H]OOpeJ!^wF%| }ǜ6l( ͈#tRE硥(5ъ=g~I\Aplک >ڒҲ;lc,[;ƑaQ;'-b5Ϲi 8=@I4!8Aְgjsԇjʂ616s!yz7bjj^wK\jE!Jp3cnf&1A1HM@n'.Xɔv#V gMЉlkON`My=982]A:P낣p*Rz<êYWՂEA4IVx-so6zVA ^ʀW9eev39aq{rO`^-uX3tN=3rcjip3z],~` k )u7Jº"289t$=\ 2-?sB1KUsT2(cW,\6 x# xl1]9^-$Df%V+<:gwY\M f bqySļ1g;XUj%Qa1s؄̋]?)z6zAo!bJ&u_I=昇ϔ8sp5ée4bh D6OҐE,Z ,|p0I%)~C㺚j),I8'AXO@MqO(<2"ICVlq\6WHY莲wDEe ]|$+E;G8@omV2&i~t/||~Fi 9[PiɹnEN}l~ NTFџ] FՕcA -`7Cxz!G_4 Zhťlf)Os(=":[\nAqkfƗyz2U1fY$nL9ABO޾qkMh1lSOiG4 iQR,9B]ߒ$9 9&!lFW$`K D4h 2/|g˂6"  'UœG |p%fY^2+f8?kTccQN܊Ńм(冦%B3F +l^h/"4~8KzӃN|E-XYo)mPqwlG#z!/"Yopk X,cY֫'+k&ߪ8Z%h1TeCجwTNJ )>*B=%Q"P6QU P]}(J#i`)W3`# d*Dq,b`^e'=zgYR8-hcyZl}hw1,#-DdtVlճs ~.YcuR٠^|hOL/.-O-L> g 6(c3zrx Ϭn= =E3݀_mov7!0֚ T hosK|x*ƓG=~v;? E=/Oout`E[r^+,V}3sIXgXG=홹{,>N\%, fqxQ9~hfqp{x`GCL ; :ϕ`sZ-@\3 Ge^f$BQL5l{ѿ e}K\X(R+0͠cV_ +9׵`uLh+ܲRH\'12%ǩְ1Fϵ+"bC?&pn9:ssuvfJ48HVO4ję )3 TȩahGeVoQGRvz5T g fxsgxPmcleiԄ,kt2?,,\t^m K딠rͪ1E B-ʟG ʮ1/3C[| XC3l/Dbu6f㪅? r,d}?s{覇Ȝ8i|WLl&c} 37hs,*]2ukmIS+y/i5KE|\iF9 ٓ B!J7?/# QP*12%8YI\9̯PHr'/LM@`6JbeJ|,fL0ֈS3!eo+p GDc~S†?n{[Tb1wl9\QgVև9df 1T'v3:3%{ {']` niM܏A7ညz恙wWm.ˇGwjӖ3ؑ$٩мF由 Bu~]R傪]z WƙtO{%lo(ll~[t=ClZ/"8P ŬAQ $@x6 ji!+LZű6#*##/V=|`aŠ<ݪuFI>(@cqτWmeӊvrؕkE|˒p$r8q A 181Ί@VNy!%u*~l=XYe7'lv;Qbry6gJ^xFcZWyզ94b, s [bY-ڜxmHpj:%fU%Ӛ}Bk9 U.4殠ZmZF̖V5  ];ʮi3u[ss0ϛd1'r8(W>veOhsS43s=5+My+LK4⸊ Cuc6YR--1Jsoyf'Y@׶,XߗwK;66k(Yԃ0VB|_ϏT"G"k 6zxĎ»)_hzVsS*Y܃wUQdt؀d%)oJeVtX?zQJz4]ۨQ%)XA%ҥ$)zD='JwsE~c8Q\QKx7GJ9ܓ<$JG.I+; !}vIyH}ښ2G'a:JL>E.ݿuY4F/Jnfʊ>}1^6R#C{/#f/ F݄4H;$yƈQ@kփ6M*WgQ“f121^ʲ(8Smi`$.+\h #".ZZΝ 䊠7F!cVn f0*:3X(RQe}9wXAdyNCyD;AMɹ߶^ rاĒ%@/N5p[AZV?K=ڤ޶hgA_[!XF WiW=V屆A:TQ=W10uc`3su{MTrnpmi_znjV^Zj& fV: 13܋A/Cav#VE' QO%1mJfd Ii%뒫%-!cvL{[9 zqRǪw<^bq)VygV+˾P&aC[h :Pd|0J@c_w8w"b|m, <6Fl*qd3ZfD7=JDY1i8dzG߰+bO5ᱻ*∘ǽoU&#<~60"qC,ʶආbj.1y";W Am܃71荠+"P\S=j6 +J&e}RIw8{pK')UPS~rϱQ6\L9β_V;B`5p{&jðIpc`=V[#&'~/`Cꣲmmm8A݆\5atߐ饤=v&$Wܜ`\&#=PkqSIN]Ib!mHWDǝ&w :'/"Z)HsTz uטzG$R>%z91c(w,̷aVsaj>{`;Xr38k90+Zg JgcDǢ9.p3)X[JKB?li|; Lͅlce.?Ʉ\m߰ vr쫽j(.4.BcbB= =+p5h+q]XT6m5lr[IUU`S ۙ#0oL`F '4\GeVT]`cZ@~#ᇥx6)cù&5JNfH|9֗~ժ&88pkhGOQq@#+3 O!<d.c~qR7< Bb!1艜K;u_M- Jo(Q%XI z4T'N%QSx6үE"O5H+:h(.%URk c n:nܗpduH;g'p2/Dp^E ǒV+vd2rLa;kO{@+*"2~W Tj-/@ϼϡυ^XuȞz:Q5` `(iIǜ Ù >fl`eUS>6aeUYYgg8d -F_t:w'B4,' ޙ|+N%;隅쓾DBrȆYS\RI2<<C¡Iaf&W$`/iəˀ!ޘMj̍&Ph݅.%#b0Z~g,mwg=GiwP[Sސ|O۸lƕ'=Ɍ}̬Sp@Ctq8yJ>mR/;UQToj#j#)V1ys]+yP$+PCMn{kz,܇]`ي2/hiO btJ@:I8?/f3i4M[~i:?#M|TӦ3ܑC9 1967/i3109)8Jx6 m uZ7:FC);eyJvঝsk]k3PrÙ|N.{-f63ÎBWmiPI/6e"O3 nyʒ=Gγmǯ}N?$"O~ YUu2% #]R5Lr[o2亪^JKjV"4 h#,=vAEeFu~ɫ zZ4{."֓ 'F,jsNۚgndG.3S a|ih !b<_^bȃ ahp)Z- )f3yb&p ]U9J^MeGMb(RVT ez~^ }zW M[j*]3MԎfGp"pi?EנӼFooP#~e$b&ZS& s˫F^vu=~m/<9%Jy\*+$px><^6j-|@ȭ]\6>-9t,5lYW{A&gY[Y .d4FgZ8,O-+8 i]}3=7xi6/UO۳^dUTGVOQ邘{He|9<郝8РsHl0.r[}r[׷j9rUҺbc}Hw?KO޶CL.fnWx22H/L':zo,H d^\A=7^Ak/l$F7 4i+)Vh+wL+UB ;j+<aV,ҨV3uT^8ܰ I6>Vx ~%6 SLeljTw}SxY\ g_stP&K i

ڱ^s|i|{ ů!{(:t \G{(cy!JCoZ˔poFC^>!NgvkeNnJF3w83'IK_. u2(ocƓx.Z\2Q,l_j.Ƴl.k(LsWU(~*;AVi7FRNkKd`HKyN6>!ԏdG 7 oȰ*G$iי{zUZH{[oq[+- G78ևGi=Z [wK~|G \PAJdQl$%t <EeAlyD RI^34Fs}g0 V"rjU[*Ɉ='‚Gǻ<&_z5i3Tɡ lIPh+ܖcDln^WᨶeyѺ<k*)*zpFbji16H Tݺ*~ۈK?ލݶ4Jg#h4KX?e5]IcD> ~d*=󬫬 0C!1!]*riKZ2 =sP ܖ@JRo[/s(9ZX1|ǺzxnjjCKV״k !m 1DgGwdP0A(1C3t ߬R.m|(1UX;&FV5O9fyr2N + H COTKeG|w?\a1E y,4O^޲RaĈͣȢj{15KyIK[I^/x_Ga`^z+Y J4۲Z(]Ԗ]6Co>b}|%}锍d#( *) knP5c((VJCԊ[>"˪} ^#+ZU/$֣-.b$ڻ>`9ϸoaGl$כ/zQt+mK<]8+>P+W<+&QlqcxZg/Uws9"7Gan|]`TuMh[Ci\SE ('$t,tޱ-whtk8ZzAWह:{-z$"T~Kۮzkm\<atjyA7߁6͂(Cpԑ<# 7[:.^^yp&]k_ 8x6]R]nx}gjn9zJiI[.ibVNK-l҇%$y8Bi )c)y,r%/ʴTLY![*}$[@Zj!K^y?1=id1ݘjߟ `E础GFp\khan7pش4^"&LqzڸM906LOh#{fnF)[l0q6n;./砫/xA] 䯿 gZ%q[l;SCb`ɣ~p\l& QыG AINB^k[뇰\3;ow= hh(\rPCQu*mpp 4ܶ6E T~PSC=ݺm0D!RvG ;ֲ{I`iam*Rh>Sآ5. 5n} (8ӅQC:ZkM^A dOXIk$>Mr]NGe 歉:LBAԝ/Dy|:) 7hd;|z~9CAj|ǚ>o)!2Io(x*gy\oEr<'s>W7d *ծ7zWpPSHYL|\E행۴;~w=Ϛ#$[l);kz,BN,N/4p`"ՂʜRdY[;PC;cc/z7UW_8}u {!T 1PEKӍ31rʿNAqgKh\-ׯV E4ঘDYG3a8MT(${~R,5Sr"3s4JƬ_Qs+[k6оsb!Ki2s&b:4onL "ʡ ~3TjYh'ff7f.~;@룠_5Gʔ }2(3 .X3+^vM]x,5aeoeNƞ8 Lj虒]228#ʝ:"Ls[i|5(xރ4>?E!jAJ3so,bͳ!ZP]+F댕4x jC0}L#^XϡJ{T*GI- mT6 ɀ=N7RVt}jJ~)Ifm]$3@=#IJb!r27_ /_b)?kL.<>w8jVѥL#}NLg~0LQpL+. /eG{RN>Q\VK t+JNz)W8STS69:g֋s]x]o5Z\S͏WtDJDd`7_@nSLr7Ohšʠq Y9mS8)ۍ6w:͓HgXwz^t!p.#%yctpLlCYtſK٠ź(yo~bn0/Zg*h ezw*jR rp^9u!2$Bes)GQDm:h3q[9s:=쾚$ڪQmSD8ai'mXzl$ʉ盪8\s4uJ'N]!zkm3PJ'>I]'7nc:gBL9@06QLؖ9F=U"y9`av]GE#찌z#ZihޱΦ#g}“̶$@HGfs H't}Ec_rp:ZCV|a;|5{{PMl:0CSJ䀺"<{:e*z,=,P׾d1z0K`)Í#B*i^se>H%tHs]j][ [2HV`~ '~:du=uЊJ;wk#]@3*4]𞅘&cg$8B1?}~Q2#})9tҭ1j$u妈&$ytab 4{(S=dZ=w%o /xD>ŹwsS祀\bY4I0[1yNY|{$Z36ҽ$rb^]g'M^ĥLH253W2Hf%|iv( d^;"F. 2qkUɭbKmUmjB32ˠީ\(U7C5?b*fWKNqua|e7։t҂nv$N'J=¸>[\R=:WRn:yUB3 eFYfޑ.:jwfO]Ss=TSL[iu<ŬH^b6 w'M CA.0#ֽ[>{`=gn=nxn7X5=&sim4﴿Y.?NwF:2FΑX'.j:&:e<7B P DٔXhQ8g^?Qp' sM"nX[*Iu2ʫr:5 ½ -Z^1-^ѿ bl*HV]FUXMߔEXrIn[[0Tg@N :)8/1N X, $8PHdrŝ3eWvM0д`nyKyZ{c΅2B~n|ZQej QU[n6]o˶ϯVągtxJд6f?:t}*Kс2%+-uqRv2wÅUOlU۪/TZhnqĞ}N*xQ+]˴g_73IzQpqut팏\h8l}z6tJă#7,ON;>UP\H橺ZO|zʂNoIp?# Se!#?!(q*L+(}}A*[ޖ_^xBGϾY] (^ϰ1}Zlmab,qNLx$̭XY-%M #8KrMB[fA+801hIF|ap"g?KW>o)(3z=S%4y Ha^tڏ/2gn[CW:+.9ڀ%{)]A#EeTֶźW=*Boy ~ѶT?p&ز}z|vϮꉋJzg Zc} ==UY:/>rF&}3-Ű>r8z9'bP}yO9ϼ먮q0_z>_u :{5s'pj쏏yfXldfuQYܗ$eh_Z}y݊/k kք,:~$2X|?g#imluK8Ŕg#8(/̭v>Mz̴S:8 |U]R4>I5DHgaG=K.? !=*+!,?H|~Kո~NY?GOx\<(= (mtϴtad@G1&#Vp1EU,!Y> !+T-4<73j2-֞4?[VrZC<0멃5f5wREb%ث?e]2O xƫ8\4v\ =x歉Z$6W/re]_\qtALxq0*~u$W"lC)˚38Jl əA[ߥPk6Ô[eAqSp?3b'k 8R*UfD~K֌f@Ӛ͠u9'4uQkE9ȲȘzҰg%|-\>9iQF P5Fd:qTl31uE")H0CPXu}Ϗ/*Gp*!pYꬦ*>JKI4,DDR O9J$VfZby]s1aK:H(-KɞO Qw2YJ]kxmmP1>7WSt2ݔ{\xhhZI1W,g(0D Zi& Հ34+"-8קG+0 g][{WhN.~pDO Adۚ:3.gkd@)e^%kjaK^3@_KY~I% e$|5|W@2 $Zq L ;o/w^crdFN`.o4gNr̄gJHPӕZwI~ڝ00ڑy[(u#ӧփ" Z)b<@a̠&;LFF#Xsi;DV]߬s;d۱jG/ -njz{jorSW3'߽xVt|CyѤddj8HUH*hn ;Uک}j@n+_GVYu[ٵUQF*wԬ%f)}\4P:_,Բ뤏 sanmK 1lnG߄=߻)m5U7;$K ,֤{?eʦ_4T\6޴̥v;dSCgR{'(JH8")>Evh 5\krt7]ob’<( ?'zxEtu4]6~zt'c󀬀fp,Η9ߴߌSK~iIaapLfhMq//) Ԫj)dJ = Π٫7o|IA/uV0) N,+1& n_LY%z9ݒf$˒g'%7H򚉲+NU(h3W,9Kyp<t1@Q2_ѣ/ܜL7O GPhs[K&*g71>j冀#zvc~1巤35ǡS{$iUFԵ|S5Ƨ۩Ԣ3^yǠ4$S{   ;,]t(c3ZH8mi&4l+ކӔ4uvhzrNdX%>:-_(v9 :sHk)[{wHNYM#v;lSk'wC?a=9{لlqI]BFF"ncZnkK{p2)vڡzi$ `GK1J ̵|o:[HFl)w#}k{$"\S[*_\ڊ۵gf"1j6rCud- G;\rhyKabS4y))q4w xkߙ\z^8:I*0VjRK H+{Lg*)`zճ|!}$MZ].7*Gv9~Is:Rq+nS7}Iᰍ-xx|qpdFs$߳qBM#Ȓ$c&_M3yta\֪01<0g:呭ACڤvCܦ㧁?i 9Me8h]6DzwpYui:k6^zִ~f~=¶ilI%n 'm+\Z!~C;j _ϽdFadz;yn#L$Llx|~2wq,֝VX%4A}ד£Q K9,_,tZ o#? Y[{s;HٚGV0`YAڿmѺ9n9UdƉ1+9+cku$Ya{Yg}80l~,QӲolH;ڶQ=~e00O  .`A9)cSIT(W~sML9Sǁ2)sJ9fDJxU]ooOxY2p\N:--|+B8d 'z`lX*<"_P|}Ļ>sPY̎`lYΗBD>]|..(Y3+RS+W޲B#dcxzJڃ_61 tE8xfr Rf@y_^3Y=+Phk6PuI̊h2E׬s$K֚`YAaܹ."qY7Vc \.(kK!s["/yAm+u:[Ϫл!08嚶Տeօ&҈S6Á֙E3'4sp9 c|:Ch,90[Wf_R>hC2Nhi4.Cڜp[t[RtD%M|)xVbEJc.+v,}J>8%F\2i{)~/qw[#k*I :y7B/ZRuY<N{4{/E5I{IL:#YͲ\7)YUfޒ(pǫ\m)1`z͞_)Tlh˦4qgS$]Vpj7Z8GcRf~QLZPkڒ&^9ܞ#6˓B=V6k1f]{|#h)؟|Ǜ_C)5<23+u AD9.3 X q}p9Bq)W+_ۙ8enN(|2we'y*Я>X7w,{oS 63_Yh-fY9}^Oмyxn~H;E yְl_E`x '֒{olK0wኂ|YH7__גT,-s4' ?h%i3W/Rf TrJ؂I,-V5֢ϖ<[vjZАYEL W( ~jxO j̚ntU>NRf*{INJ>+v1$ 0?sޠ< 6S-<1ZG>y)6v^ߋT,ʊ4zѥܩ5R@}cwb %Ǭ+ pi߽`˨7-!qbˏBpx}pwp FQ`thL0'>N%/t~@#БEj'')͟7dm -_f˥(؞LlgoSr^(lw( 4Sa RnMF໢p"*l[(瞥8^KفmP'(mT1u'7ʔi,3qD0~rdChFoԢi@pصKFp/9b(1^pEg '>E ųwtda V=2Ph|,䷸b9\]l{ƚm!簶uX60Uw53 >սkHײCPx\swVkJ섫v;AדpUemk?_T%n~lk !lJٴv1CՓA*ii5xk.FpVN%L.@sPc[EU^`Xw(6_ ^DW)ba^?7?(_e%)TStC,g*ogi)D)KE֑wxiYi~ᑲA$B3uRjF޴޵1!p$%ZPA-{3j.*Қ|q$=5-Qw />Is M({l7'"@I[O(qJx,i(B1V%y(M_x|ד?{⌻=%q"Z J$?b)-ɖ"=-~-bhٲ7R*y'!Mut  +â$|B)rU9WY<+lz?N!+TE?4&2qѡw'_D;b|}1P13:=BdhSi98z;`uH?or#YxN׋-ng?BJWe:|=\u\XU$#ArtN>+wFu Hzx1_#{4 mȒfliCeYn;-Rb@ "yM+QnſBk"bnW^Vز)0-/P-Ke-R%KK,bK=?{QtiǁڄYlI+>+K[u*y-|Fc"^YĽSn/kQ=Qv݂[vPHesh9Qhkĭ T?щZwzvIϮ.[ ܺ(]K#j?g;_((QFQGVR=RԘ?"sK[t"(jJ_Z ^;=RwZkmۋj~V[ҹm}}Ɋ7am:X۞m tBV@I0vgQ9,#"GM#Puv46kAݚ .OUiBdWcPocMXgBVҞ.lXJd΃l];T_۸Dk>Vf}v]Zp.TFieux³Z W$#[C4]rduEX- EEFZ~ۮ5cXVpjZ#;dc"uvklCOkO^zl,(I-)q] 7Y^ ٲd7,#e-Kj;sH98Vm[0l7T-x/~ZZvlÐܓR~y.fime}̂<`.|,lzx}Upk]{cP=vl-Dp}Xc?o`cj?,=fŻw`Cd\%Ά%yq9">HazFꅇCkMwUn (Het2SG'lS4R/KKŞځ su 3 Ѽ* UDMhٺ½^ijʙwuM3n\iONucLE6jqHl ԯ :8]&Of>X i %|=~HD+BHesaMw]^6qiY"s"q: ;nu8sdrms9' oƟ S>Tíoqvدv)}6 &wÑpM}1JNU䗅w+) دkq8|Q ٻ̆Cʢp[,FD]F&֡+6E<S^"׭3R{}f%{hMV..،Z?rokˎu^ɒKCY x˰Q%͙xcA~3?G^*v%.>u]Bm?eOMÿXsˆ,YwdH9_BqupiyƑclcПdmAHeu+Nf #u?[fv@WW]NhN}Ki!I\J+=jZ̲E򮬸PEۦq9o;_nwAS:qߖuQ$+E&oh]5J%:6=as'=y5r4a΢w3_=N|,Xt%Nl飚7(#nʙSШ)+Fe]%ic>k8P%/bϟe9x˒ϋ܂~>Y|~7gharOٟ(]%pH@)Y!$sI3->D8_ĨGf/bDZ3}Hi.֩gsn&X}pUļwC]v+;~z/D2lejT/bW!ў?g( ]{:@N{n{ruyϨQE!HMs{a;YlV6^5vpv;'uXue']fݧ3;Ή,EY#>&Nutj բ=SNx8^2c1mRս=ƈ^3)b􂻨 5T棛7n*匇*h~I fIl/-rm}3 ԺxCH/I[#]n%Z5W:vIT/n*Qt9oYQ=pղ90px 4^Jjg9l'P1*BI#}pMe -M( @phz9LNݫ7PM^/9󘻪_{iKNb{/BF]|poUh'8mԁ|bo'V,h}<~7@|hR]g\;ClyWF (9(@/zlz_]m|_ҽKPN/CokuBBWۺu{Mζ{ʨF>g pDƿOWnvb4"Z:]ʼ^ V~օ߾]kANs?{ zVA>S|k$ʿ% ˽Bty~,T>㛱d#Zj q.6d/Sq[/PUPʢdq*{4({פOVmWQ- Fu\meqToZxќe7oxY-G›. Ɯ }aGZ-ea^}XtqW :һnk84#~v<9ngg.;ٲרևob㶊V>#G_١Ku nNռz\,I-)G(=O4O*[#Uo7me$xApD`)蓲"9-_G\ 5a%!>HYYHh#](QzGOg OC> xӧUsx1 xHCûOMF v[_UD8T'M_hU@dexZJbEvxvnss,ă4wCw&9Nm, H13m/$'/gN2GЌ8jХCS.[Bq8ۘ@|=4/R9zۖG&~MѦe {YFWaI4WƸ6~S; wB'm"G4ouK,\ƾ /2%_􌾿[o>ƒD.U V9ƊgU@/oW)y gqPQeƉGM Jm<6OmC,QkE^Z 9 6J~dGj>'N EҰQ:D8]6ꝼ@A%5?.Oz\ھWz#RŲW _O"J(UVSDKgek,1N2nz,%gLKj^W1󵪽Gûf%MU*&6ۣP.¯qjNzIK퍽#Rc]V5b$CL˾ C'x:e2ʶn'hWv(Z{vd-4y_͖Sf [B/ ]o7%%]yK~M;P@څdkJfhlBP~-8VK묬[Ĵo wd/Y1LϘc|w];3Mƒ3d_Ƴh v{sQVku +7 >oP߳{#bUwy([hy%Z4\xL|/FGpbwN 3۷_1z[3l{G`j9G2g|Vmiel|Q%۩}S!-j'v(vГZ (|3"eEm=g xA*k*z:}W(z6Acќ(vxƮg޻bX Iް }F~Ȍi7hևJWvf :LmÚ^W}]\Od( 峭*lkFkgf:d {O!:͗$[u:'}:E7/MqS{#Ʒq1JzNbʱp)\_ I8EGkҾ/])N@5sz#3~zhD/ Mt3rlDlx{'&WnZhdvDBۍj {5{tj-{ si97E)3/\y[^pmI<%kSkJe9e8ڠaL}H0#ZJjm:ijԋgՌR\=mݳpQd`1fҩz"|l4˷7O?eߚ}5JVZORgm+gzH3DDq_ KL勰e-z>[D/RƂg7hE:!Xf iT-;ZR+Ύe{ͽ)OAc}a>Δ@V_}YuF*Bc%qߠIB'RtSC/U?.V%}QV$ʅ6- ^ qo5ۙ?w'R>Z^2㏧bACF[@{M84R@0k]Kmo@-*DOk'VjciH4癲@V,@Jpl{fʸh?eJܽ?jWo/ǩk7gZ5yQ l9K7MٖA_mxM{'"Rk$x&u^{cmɠ}'τlHg=UyoX+WcX/^ٜS'Ik ֪(|6gj}=S彳LK)Uv?J_NmXɕ_DM/%h c]g ̏zpK숹BᲱrH!ie1=g%/ZrًU;|jmApR jBvmq7%c^oRFK*<{ cfl_Q(dMJhe- 1aTuFSWV)%P^Դ{Fd> $p* ʖS~4@ C ңewwښ;;1Vv@n&$k]Z|Aٽ޺~vv# pb2{\ɴ7P>$-#4ӓM[朽0I&: #d3n߶V[$֍%G^~|h䠝1\'(QǍ#gHΉz%SվݱĀڱ`L3+͔=_ϓx4]R$y;%,"4*H@$olÞ8}l'Nӷ-{e|=J>-#Q?֙m+k,DII{߬:Jg͖B]AM3eHϖuI)ܵj[<}%fGf\-ATͺRufxZq4oy&SYS1Җ9[)%$d/sEiZoЗRS*ke~RoW{LV5ҭZ7xi+-qHC~:(}vU¾m/H 69瘜إּWի1i*Z9njɜAwsg􄊑JjXUv7_3\GIP9m%.fr_;%Sg>ysxT%i `r^ϬTDx/yϪd{{TLGIc]#ǚ˜A;񈜣{;r,f~5+:aKDj$ow\OJx+u%BQ\xJeG^Q ( U_BHef}7Lr&% >J"mCG=bYRRP5E竬RrUW)n{9[?IiUfQ2hkGO]rX^< Ǔ?~j*FlP5=˿HqIZu|̾ *dHY>TH>\i3{jl]+IGi_Ud}\Z,o\?-/npIK^\({CWE"JR~Y'Xdeoj?.s< ^M|)YzVwU7,8jV)$%< y%(P8E8o^ĝ).3\<ΨkwkBl+}+g@H/R2tm"a|һ;8ے>$ >;M**É3%6>gNCRU+$bQ;yY 1;$4XѳyJoAkǕ,FT~HWCNW6OԉqiK/B9SlN{)9;I1|)E1n[#(~7ͽ߅-tYV._uq ͠}@ɡ[#Z(-X(`>4:vIQu7Z+X-z$Qz^r(I{N9D҇2:#^/#֍W5g!C)TMSi*C)Ycgo8m()BZ:Ie^%ŝU:KK%̓RfBȋ>c=(|=늕KB<_c8_"rGA15"\E=#LrK w*E"{hVewds,sCl-QLjN^: LR?-+?t!F#׶YufI|@p0%rܵ[=G@@@p B ,;vvk1:\Gw>3o|HEň|\uAJGޅvwZ_0,QZNJu0RJ2ހX ;I;7LZR^.:IZ;A%Q'-y92tJÏ^jz+9g1iuQ])un%S҉li-W7f m( _Gi ϟB?f? #O=f ogLJS+ Z2(MI7^$% /YK9q&RU,.>vN252!PG*DVj*E| k~\~PpGY!0+/-UI[6@K;upΏ.A)}\5[_ƸBEp2u͏.8y%_|f=X@~GצDMۗG~3rvLj7_T Yκz^M/;9JwewagAH3+^YM/j$]"J^]BXX cA&]yqGߑѤǽ[kŮn ]}ռ-ֺLX*ޝegԍu9E%? 5wPJѤ*-Տү}KiQ#h.-wћ:(e4K:%Jra}^K?qi>zDo,hݴwORjH+uP2rZmUӻa(%X3׊gWxcz|6S^eaI.QmYǒU\U*o: Noԫ%$'5g_ ﺧ4QNҤ, t ]:gS-G.9ϳ@Hң{/<Ӧ]-CطLhgͲ-(=}PB- V \3ڙ|t,-Ł}]^/d_|#8+[;\~ csƾO7k9[͚j$$\Z] :QoԮhRf>J#}o͞(]Scd;:o(tY#*0+Ytgօ/!Y)[q#w%{'>:*w/ 9_H:<'$%+/i%eJ.^K_WZmlS s2`f@>R!}F;I^欫 Btd]٠'וځ*?߿JK%:6E[<9+5wjp!ш1QeZA% G(=i쟍Vt\WY45Am-&hml*cbsC6Lh/oJJRh}4*຤/{%JlFEc)UdN1N}f%CؒԼTz/tk_T`oSU#]a^?J>̋*)vr͛-g>xA2.u$J6nm*k~a@ ](0g)p(Γ)M[\$"'̀>t㔏h~Q*J:tJ ~ ~?J65}]b}lfjsf=maCozxnˀ}:tqz#;tc 8p $Mp$ohFnTx=Y6{\5m~nͧ[KgDWOnSiqKg4ߟ\?O駬.j?ϿOIoZ_ۿa+~~ke_~BWeDdiDf1[)mE7_/[1/X-IV2 \.z4IRBafRĵ甙VvɅvxX#S7+ȖUM|ɢ-U_PM]9lWo!$AF3j/F+l*@itRu0-)E 8(%J{LJXY(,ny64i #S2N朽nK;#8MCN8W'ZJ`cDc0*:NVe(ҖCN4ҭ2W+bh=GJ[Y<覹/h.k=_E] uܻ_4koq% vf#Օ97W%{G+qb c!k+2R+իdWz1S;v:зP>JEyƚѕbN%Jxf|.zuY]WԂ,=15{ͳ`fmXo% .ݼ[GsRiHDMxhdѨJ0;c*~ӷ6[b$ء$.2TarH>–NʸTO^JT.5Qć:(}2%?&] T ffSg(e֧& VGMWiU׬FYF9u A崫K`[r1|!)3gQYж+3p"hmJ^ٹdiH׬\>r$}x~ߑs*@5x\1xP\Qw_(MenoUZPwK-/4 FuW~R#(Hy#ՍQ'f*@/GKJvWu]TQW'E<.# iAՈBj)E;—(e ύb˰y;x\ý 8zjekDJiP2ҷLWT z~Us_JWۉ<}Wwp_G+aH/4\Ts^UsՃv߬oKzdӥaorAK"%!Ei k;al%U؛YZXyȦUoTzSr55o|wFSHnƲF3e׍v*e՜rی7 :77VXkey <{mn*F˨ڪN;]̣[r$_L9zI{1cZ8_-az(oL>*4)߱xڃJGWU'-]t^׾UB5K痁օ{ տvY  TKm Wek?5ݨ8b)x]壍ny.4JrQ;22q^[k3j#tőr!Jp>7_u[QoWVqҫKƋ܌7p cYK-^ƍSJS }ְg7֥&uB.F.fqF;ڮм~W=ΉTz ݷ2H1l=Nd%^mÔ7ʳKtStE'Pu U뫤uRX% N\[Fhc\ 'vSqTVɦVw͝4Yj+\5En vC5w[g4e#*f!z.;YaxW:*KGWW%SV(a~=oYکUr7^Nd'>^g=մ!wdmIpi*zC 4=(cZSJ%`s+S~'Ο ]*ܾJ+hvpz0-n#'pښ>&Өncp~аGI!k$\s%Q3mZu[''3:{G,dcl&ߨ3jj!{SR!kR`dSQ.+n/:2osb)/jEVMMo|lAcL [ ᚋ(gle5t4(.lr c4 Pҵ=;ɘ/]W~-\W{А.BF?%W-K[Tc_.gtS~k^G F:Ε M'VMk6Ɩ 4ME1Sk0ŋB8϶S-0C[b$.蜹=4A`q]ZjxJpm[7@$Gn5WyeFv\y~g=CLjVgMfGeޥVP=MMOF㖂ijixAaOp>wWsWDځDŽg03h4*+Vڝ\$mK ْNcgZg4¼)F~\urC;u>Рj)j*xԹN4tVkhDeM+vw3;34!=qȞjI/J+V-ľwĮPT<"F@ %0f{ #<r.}*+[٣u3Ϫױ?:o@wp $}EЦpQI(I~$ K?y42YGٛ2S˃LHY6g#!W) \>Yq㫴]^먀jѮуc+ ɥٜA:Vg#tft pțy^ ÞGB&6x| 2gbںG.O=뼋8u…Ki-W$ay%m#^c@W>б gbQ:1?_$Ջ-DMF-~AHci7L7R2yj mc4 S"7g"l`regǎ?̎8G&>a.}t-EԬ5/~$`'j6f'bXo{3pކ =szyiZR%xÒ'}7 (ƁUn[\pt_ok;"w"L͜]doK|氣CZ4U-T㈃ÜkʽsZCъgY u كƗ)5ތ]Ef>*M-S=ДGDv3~~ݑ d&ȤKd,͵9ekSvưl+v]̈i\҇3sG _an?1kMzcvb\?;{:{Љ[]2)fXʾ>~NR8%·[vxLl8׾mErb/|s=ݪyfW\ݫO Z  ~Ygޫ߃e`oj*1ib"W_^mKKv\s+>Uy/d΄k./ZK ȟeb,/ɨ=+:}Ú%͢jfT>ڶ^sM['1w: SFHF )iO+%39UޚzZMԅ;y ~M늝K!%'PUlZy%ρhfJ\h\l_m6+h~Q3XꨞpBZ8뜐|OH{4TOYhWn_4AAANdF!4Pe6pZ≅3=&;40zh|QBk[䶖J7Ɓ*2h\;hLq YQckҟoa4Aюϸ"yX`<4AT~c-gu ^kDmTz>ֆ;~-T_~"J`1/Y"E'guWzDHe`)Ӧ>78mxEwf,ac9-Y""E1p($.E|=j%U&N_ʎMBЎ&_{t6}5X E&vMF#ZָK.i̶v1kmJWKvYTNv]wYCW RNֳuFDXۻ?'7O!&g$ehMe?.[2ݥ*i_oOVW(d[Y#&5[ȭ3KX3պ)9Nqh֜u0ߡe!KiWb5 T U G!zB\V%=I7tkN+HuM!^uhghu)&}.]no1}Mdjڮ4va-Ȳ@BVƶOuo|(N}Hm K-E[׍v͓)nIJڛ]VȂ=rhkz2fzGVAj'.-Qtd |?lؽnDҧKy﷬О ҥW+)8n ](.s~j,y~$ s2R[IB÷ZKACl9z F}թ-y=1Q4] Zܳi! qCVsQl>BG,:}b뙥[Ӆ #82tבi+ٻ T$4Sc>g_X4ܟ2Ook: ]'&8" )Qڟ$t'>mOl/3uHUCҫfɖ{y %.ꍘBhepb>+FLW)$CN;?<QD7WN7%IF LP] h&Y^ϑ>d-[FKEyD]R\WtZ-'7/[Ă|vfA Jse8FҠgW:sj)YV#ޡ{^뒛'&MEЪޔJռYŏũop |?.S8BߤoQ״R9*dTQoE)dl ; ^5/sԜetZ٭{ݽwY`K)#Ծ r{1YuyVtyMV*kMsBlE##c!w~#bڞnJH!B#F4Jgv(f~˵kya$t'-kDR9IܱqYȒYh֞_i3 i7ҧx,,"%pq5mۭ ]{gq}X'+F1JO@B}bOUZA-[]WQT&Z .4"qLɲ((={x5t5*W ^y 9pgF{9GhDxN͍?vy?Bq- Z9ʃ?7 YgO7_ZvZ]w v=M;&?Ў݋_%2 /gW.)xM.R ?G;2EN t%J#Qi'm3MJ L:]Qn)^ Ry=>6s4sTn+=>S-O֛:h賷BTǶ%崞@}k}6^Mݿ\[SUF1/yrW2Pk-52H\G(=]o< ulJ0_U,ݮ>7#.$~ܕz]~RFY^}wflt=_Au+8]=(7kIE;oa<_xb5p˵C deYlt*.rylZy(cOEf3 ^ܨwM+W<ӺB{u;7;@W:S[SEhV_*>C{A]yw/Դc7[8g_sZ.= F -׍P]V  j"@|ДkoUvWv;)Jwն}"ja)o{t"k+Hg~ķo߉s:לڽ;|9P߻?FgnXk-eXsg~5i-<%Z_wM֬O[1^*#9٭/zQrc94<[ޛ|#m :@fs[PheU*SsUh9O^} ~wg}{u*][ /TnKVkw@O1 }K<=Պ-GULtJ# "V'b*19C.$4pL+DX‚s%jt=ugȥpNm׶nMU&.a];9-)Z?rL>!=UcvuU}MWHuܖz/ˡr{ֺnk. '61|ik%)~qi}i:$nte6q5K1Y;Y􈜎d?{yUD|ql*M~e(m  RV'*7+'[hP?#M+jBEDV|ISOtr?wo )_(hh >_U6^x; QVL4.Txfw|Ԃe95bwwv[yp*xǂW}DTŅOXRte+/z3yUTyV^I$ՙ?RȮ׭ ڥ&2y6߮RX^e&17whk6L:Ǿ. a!/O(&jfU|XDzE/F[AlaDpÔW3)k"Ntj/I"nw)]߿XZ5GrQw;=r~~(5INff̮Y@#m#*#7QMȪ8!z {]B5o!iȎp~9\Wվ>!r$o_.m*_h-{a [8i9Ls5ʼn~s?jt{mtisz&TaUp뉪} ddsȇ0eilNb|N=1;%8tiE<|itk#J9$2-C eNf3mH+yKz]%> vY'5\m3./K!=+Z AȩoCz6 <+!'FLE;Ҝ&2Jc kM0PUDiT\%\UBq%>4-FYMs~Wo\T$gpk{}UwK5'Iz1@禡Z.ڨe@eN ׏z zuw#" {( }Y}Q (3;#>D  WK"kaG9s[GSS8tA}kynD灶@˺xª 'Oi ^WKEWZ{U[w%Xgr(kTJg @B!%@T<( vwH+  ɇVepڻQkn4k=P]{.Jk@2ѧdzk ֖_d-{wP?{!Bw>(]Ⱥ㫴.\gvcs\YAʅҁVp:\3k,q|_t.7U#6&Ph=NJߙɪY٭A3~oO򌚝_^:Uuv'}+U]y .TU[֣sMޜwPy,fdW[c2aOn9z*s]ፚk 9t׌wH kNסVYօerSOe߲Ahzy' ">f.jʠ]v$gg6i;#q|7WFo|Nm>5jzUDQ76&MVJ Ȋ 4YrQ%xbG3+T<.X;QWZC'|:ZF.+"Y/gFXprUdjHFοV Ջ҅x %p%BRYuA-C=Qg, ncJWY?/*5P 5Pw*(Y@5!EZsil&_ir.G]xԛ3$$3Vlk,U#Z2}h o"x/,,eƚyǣݵq&`k|?ϙ]vNl7emsȆa{z-~F-k"k3l\V| hs/uL7*_uW\}n7=B \g4293Vtf Tkf~1 E%d)jJRi&^wJ{'g|wy2Ͼ? ~[VTrxkҜ#NNюꟻ,M'K:?i\WVJ[҉vN4~ib=ʬϴN޳ۆA(^坚 ʠ#y杲ә?QiSI ]$MY˜|_N49=uj8K mS>1^ @D[u^ <\ӶO 0[ JgFzm)$ri'W~ g8[k>uliVy@'vɘ dx\{5*sVOblibhqSB~ʹs֥uiYGn#}ᵼvn9a6EGƭNA[s|Y;]roF1-MvBsxɧ&ћ%&$ޱQoIa!w2{Fr'=MɡUۅ\;4%3ěo`V)/ IHN?gˍXfd҅eh'Oz\Y/7z- \ V;ѴXkue\Iv(ٻbR[l~Kw^s4-+rT$iq$Dw<2E&vwCnjB! Ɓ:h Z*|Kw5bWRΈ>㥗ȶB!-%ޒG:Yo%xX/yrZxF4@ulqr3.Ϯ@j b+~tųllQO_\!U{%6xrm9ɶ;%2n]nqΔu&=+7*|P9S3A;\.ugY{Ƶܥວ5+NE{ $,qg䢖 /ju}QޔՠX^-3ZR%Pї~β&hEdTgM'km?4É][?{Db=X/I]g򊅞G$Ж|I?#iK[ oT-ݸ!q-55:  WU `yoq>V>$hT]2uc +o=G8$)qΗT@oDRdžgx*s=XDf|I.TP+4 ς4e*X z&fyD1bj>g-}/}{G""඄~V޺wpE Tx$_q O|{;DBm+ɴ=k#'OXY~3BN&&熧}{9kw\ /^ݯ?ke^eAQcԞu`x/͋~EyO ,)|Z1RِK:I\eѦl~Y zFُ¶gĩYSwg^B1-mawKU-\6{վj]3Ӌ5_PSZ_4o(.w;#fHyXV. /Kva/Sn]c57 k{hWX kʵc57~mlWsvNk -%c-agZ{ó_Nˑ2qrethϴ#57|Y2HIhIf- AŋGЁz=^!O)F_f8Z|YElQ{ ?u)Y\?lO -jiD=-BwL$ O-ÚO"W_qgQZ+jܡTGepSIrC5xc(aeogI댛O^p Z.T&ץϞj L~f/5azdO-V5ny-9Z]ҮZ^s'tQuq K+.#o?DIꦩjhpz(ȾgŰ`m)^eonwyҵ?{OvGO>+ k)s,i:[#?5HWxU^RԓӨ8H;>#,rxvȳw iڏU|'|E!Gk~s2ŽgǞخ'VL7<̧^ڜehwWCh#6w<`,ј'ij&|*2^dv;Tl#Pac\g/ۨl!hQݳ#77YS*ϴgvqeV_c;+zzėzwa84h?<Ӝc}yőaf98S)jkpWh|\W{Pg5uEՍ^µ2sE~)||ǫ(/Vyx5Ϛ[l/A_5߱ȫDopGb.{߃V2"GwIƴ aLhcf{<[V-ޡ\&3Vib&})̵UOs6 RMQVH*p 'He}/AzWb#=ԣFK[\WԗVH4VS"G5J{.Vז!ƂڸuiYA\ݒz߸@qpPAmNUay+l%ɨQqV[E|4Y(r&33&f2&np_DL*7J4Lhp )g->uRҚQ:FZ|LF23U[[ZT>oQSPhy&^F+z5J#Kb.~.vE[U\n5[ [Kw#Fu>DZ"v*+@#`/1x98#Xa7ÍS).Hٔ#+g}.jmqLd:|[g.Gxmi'u܎L}dk)]\dޫz,6hv,VȳF&soלp5fpб}lG|3j\=97=[ⲙ BXDPF!=7Y#a8o+OsEBN"WNi6#ϢVˡF2ɨ%>RLŠp?͘p zU8]k֯vzGUprvX*G;k4' ׼1{{ҙF0ywXC]WvaJ{kZ1 (v/Ud-wVGe qE,^qaeiSZ<l*z>x5({W^uux%퓯q7rKۥh=~)qg#Rnڼ>*7l~q .GZĉV*ZKĻ犾0A `#n+lA߱6鑜~5N9.H=R,0oc#Pyr վڐFG*z{^D-oiSeڢT@#TWDFqLK֭f W(&+$s-jfl!:6jE{TQGxI4Ґ  <lTFGNV|2"Z5#IB1zU@7Tޒ/D/ ATP"?Gc-qh߶fG&B~5+n ^DYTj]h]T뷹}xuf.hBI~jHGKOl^?֯6QAF6MiOϓ1:p6(AwjّKxƩl .;i6^N6Fcz,~K |$ \=e2W]TwgtfBGIn'XgNF @I5soNFڴE_x?Poua*yW|KVL˶sͱt{UFlKbX_Ĭ ʜ;Z70Ñ>ETT!|Ncϖ/Hք3і} ݳn 6^&ݯsrnUUs|N5?[ϫWSRCk 4Ezzhzƚl]j#Жt*,81WB-uoi%_+bb@с.#CnCaxQV9jp"ݱ8 >,nم=ґ{2k a#Yes,konp;;:ԙ7'ފ%B"3'Y-{5=o(HCҗdli->Jۛb} 8~g瑁ќt润5YVICʞ=qMPP32rX`AY].tOѰm4q&r!'W7'F;s$ R[V#q2ܣΦBw8W{_r#ʗS8  x񢺪R㜪UY>| -gyGZڹ~A2J[[Ŵuvz;߶LS-**eY:`tKk<(Io2>i1m2tZH œb-8FW$zr̓"7xqJ/6 GENuŦXGGeD}ʫJePwGErJ<*+Z^;c=wΌOt}ÕgDI>+4QNo ZY*+So P*(?4Dљ ;E`(/_I)|wUPP$W\Bo7xV JnMP[|k@Qʗ]֥h7?Zt2[\ c/} TH+^Q*JΠwvK#g;5]ղVatM4Vp\j/d3.-,-ڧ[_Pg=qge8HԢ;VfyL!_n*ջ#7~KzgO.*y\CmCxAN/^M%H^I}{|SB9x衬چN&9HS 2c2gO8F2v{Y"ut >½>qY9. tN wٺO 'n'=x+Epz8~ {`soiSi`7Wu<Jy&L?<'>(>;}n®d prO:(3rYRU4?j|ӷFmT8GrHuH{<7urAnAl6p%55_%G @ TL rBݺ[TƁ%׈pqScteFs Y1೫.zãm m:s4B*=5 +}jv0X8ާ{.WN$*EʚtϭKe1m!Zc[s⑱^o)5-;aTd~sH(7e3Vܴ+:{hg5:`G2ؑC:uVd&8d_ny-)y nk#Q??tesۣٴ緃;P7n"ro4G:zᝋ""oX+/!9>+~x^ po`o7{#k;蝓{',(gB틼zS{wQ WIW#%N qlOg\`+j.Kn8lF}Ƕt|m2;೵.Fe%屮#~\H͠t!fO:Cj*7/TAmGjy2=KeW)H4<-m9 ic~Sg[p"v}c6r7 න ~ƋL\tsâ&dof囷D)쵁ȉ7qZIWyb!ߨ=gABnNyOpA̵'yvLo9$Pߴ9m9P7"shݛDNn{PҜhוXotkJt ګw}b8e>J;#$t8c}z5YրЕ[XEұBZU;T홖y5_8}-se YDA'Rc~;DoRnEsX"?INyYMy ~<ݢ'¢N՞X+dV.=W?Xt+\@ ޴Zc* gQ]CYnf~h'RZm:Q{^<< 'zol0e-<%]_XV W5RvI.s<$WU ުSw> vbaMb,N)=$X*[Bn8<{)ȄKIE5[{-Qʲ;22?W\%ysUh+/G" K?wImu|+[{Wb́SsknCVj:YZ~{2GN=k9,YxЂal;=S:}-eCzwjg&ۃQCC[zxɻeNn~{'ݯcVnWrUVV?[k-ϻ`=!7I)Z NƼrr?gT^TO1Бԯ$ȉU^y[T'm>;(qAؼv/oWy6dt/~'6喼ke=~.n+O ͣnxv3Y$c=3_啷?ǭ{U[d4崤:ٰN6VRՙD)[Krǒ|C2v!Χ'rfcԮovρ᭳劂,y״Skڸk7@1r"'+K{Ao6n J(Ll]H )s. ۜ/2%N1q 6qNU9ykxxUޘlyTiuب܎m_VئmIMSVV0v_V,Ǵ(7eeUƀ"/Rwu!e*)8Zi&W|x6:8mw;Ʊ,AA`py;4_ږ65g5 \4^0m^*TT?,*a!ֶv=ڎ;-@*Jmٕ+=n T!|ݜ,4x9Zf']bfmkﭷi=4d7G)Q+ۄJ daO~9Mq[& [-p#3XGa tXTa-H}9a`*IV w m0HM[Dm(`gImJkF5n|'= tvǶb_3׺ 8BVƲ)I9kXHn]?k/%DF`Z>?67^y/qVN6oT_6n)IL4}Pca - LنhEKsٔ-.5_N~)*ɷaZص׮b %#^k2rs"gwj-s 'OqwZh<ȆP[VNVv[zrEZLxMOJ{|\f9~%'zέڕ@2FȦ(Gsk.I6k%Ý|diS_?P{0.LBA2e'ru.[^ caiM5txN Ofq-3F\\˻Z3,SVΓZF7JE^yfS+ne|J %3=%zQRycAQef,K|~QE[ L\V5+W/4b6" 7Fjےoi$VAnEfS ..pxio;>RxEbCsaFn]}╥9$Jm7bJt[iLdšDmlp\1'X+Y.WYqU̸{uS9j5oDzb;F}{.$KʹM Q రY>nj@W*nk,R%-r떭QrN(d9 @47URK.rhοE/<^@УnOmي: }{lP &E*iˉ:y\ecYsƼqTnntF?T#*A֎D%hB07^ dѸK}ߦ*߱$BaA]E:!rqXT^~M׾yw45I5LRރ@ut_R^(i[r.AÒ rޏ֜*yIIK ؍WRTYҹbYc-dO%E)maC)#wMW=:mx wL ZP3}|i2v|rjrmMhŇ1P9gsUu֔-He ®꼶'_( Mu~=v/̞d/4[^%4HHѓl˫/Ivʔ'\gp=pW@ 4@yXx<.U+tSZ]V5W}r_.fz s׺Q,ֳ]a> o-=޹ n8(葷<)N5[cy# \y̡!%Put^/q Hp$^ᴅ Wh_He? p1٢zmUw$"/:[FVWH/H˭L ,֋b#KԳDYQN U,e ƽҺ5~zc4^`3]\t.ẘCջWiBU}ڵ:`73ܐZ; w 2.Tdܐu}:iQWnۑP9|wlo׉DCXj9Y|},nnj]'u7*QJg,UUwMR6:N>, 8J Y y-UPU~[^%eP}9ӹ/m`5%SMֹJ/Z(:$ y%-c?n˂BKi0Bue]uڡïW'͡ڱËs6!R2 QѰ\n9>rsU˰F * IXQvnU{wvxf- U؉HTc#Pza*wh«OyɸV@*gEBs!vrv4(qiKe Ppk޷NCs Jmk d)1]hw^^moIRl3Vb?l[V2ܙh![:ToP MָMu'!u7*/@6E+բಋmJʿV-׌5PUPrތdZ @ri!o#s)B|Ï/sլ䉪Wjw(-lf t #]z\ҹ@!gӢ&Y^9w[&W9ڈk_MKk5n{+=?IHLe6R݅#gK̞c[u 0VktZ~eFbaB_s{dAdFy%ݶp / >g7"v&;A-BZĜ'Z#qO:oloͦn \҇V=koZcs}-^R g:6q/CLr|\_F-ԩ -Ci hUKyӶL^uRKmF-עz/JG=:.ݻS֙v 7pjFnDʉmn7-IFN5.I- ,x0{ ޥid첬#AeCMYAvˊVZm r6SVL=?^9RWQIhnb]1Gl7}Lj5\]T)7LoqR=Q,r U78"اyI@ǝWA Tyn17bQsFɓ%mƫйIK8/> $^ףCgo4^mxEdݡ̒ͫܪhѣ%۴RPyE^AQǴ\ ~="c߽g?vX4^yӣ3@)Ѭ>M#q6ʡ}.BRi$vOH!%т꽔 jN7Z;FzWr7g)o^!-կiwSd#Pר=Swn$R#X]5| Ձ)j|(,pYnVx ZZrVcV@!Jm #k;o-.‚opc #ٕh1QTTNYڼHkA52Ugɔ~">jEsu=zڗ?Hn qRfV{}^։XI\<,Q}Xiݼj&mLWK6M_r{lm%_ramm/-_=J2 5NPZfKIJc+-ef!mVb\*ȥ@+6ea3EtKlFIJS)|5F?qZElMGK;Ry8G~CY+rep6x9kğW+yGD7uGrGepLO.J]$AV:u7nƁ %2WIɿ, RO93}XAOIz]ymģ* ;owI[Z43pc=W8 ^yrjy0 n7k.nJUgQ/vɔ7*btw-V,_vZ>liWUJ@1wwXLd qH?x,Aaü?I@;:nM+nrݞmiG_A̺j55%LW>eQKzFnI{q@W-IKr]d!u7h͉A26yN>?x Ҽ S1nW}#flR67+[P>ANjWN=#kd.4F;:T.ڴbgWVD@"Wx_8/kq-PxnY7KDkdO1;lRGcvӶgd?n+y.*,6މxT d-ɆEftM޷x%Qjut8oXj]?2UFH(i 4.:@A5Z;t[tTˆM/Z+Ȟ]늶A;o"Z.xm:_XSx6Oh'J[K?o4mϑQ/2ŨmS=g;֠3瘊T^wCkS{b^݌A;Q'|tt k**Ց汤F:l9诃[}#Qer_er(_ocjt 5& \}*Fm_}iUynn7UzgE%Bfʖc~n?5x-j١'9#Nţ^y[PHoT%s ƹ׹747pق 5P~y~Sc~:t>p7Rf ~_(: g;~\ja5Bqh#5y;. z?]B.3lDiQ E%NJ,{jcjdkʛ՚Wh"!j(_Kri3mȅ%~v{rlfF\Nz@Тh_Y:wἲGFׁy|Wrf' J%tv|qg5#?@{{pJ XOP zbiЏx6D[#<4i&pH㗅ԥ_n15_1ѮL?tP͹4OE5론- 5тK $lR;RP)YŸdzXķ*U>=Ǣe Q Q4({b8@r\-mj܃.C$IqiÏ#б)6,ɌwbCϦHہӦ'B4zO0Cw?SvUSO4,cʶLk'h(Xlۊ8AN kpѲ>[%\9n( l"}9- Xv{> Y`WͽvEN<*@ȔtTΞc4_3/ۺ?Jqi{""Ӷ%"{n90<1_/!9ݑG\J]mdEv{ZZ2_&xI,h!1Kf{Z--YPsF4( b. Bχ}vψZ}"؎yVIT859Ϙ ۊ}aQ{.GQjw["caKq;pDh|$ #(jy}.Anyؕ\xm̘pR֤3Ԟ,)Oǭ%(9ڔ}|"@|ϛ"ij?8eBI[M:R:Pl/Rq'#~+A K!|m`LI:W3RZS2͉zmStbf{X:%ّ+2cF*/"o~]RgDewO>ox~O&g2f[|qڒP:ƌIDNPR'(ɜkE;e{]GYy8W<*@lK&m{ONz3nٵ;?FuqSNnx.(n[H }Er.$}!vE֘Uk'H R1*<ҠCdtVBt԰6ՌZ6N4fb>i̜~đQ)C{d}h)MuHcQ;g),@{Ԗ ;¶)5!ÿ+\n]SXkG^P|o-3i.+77_mVZ2.^*u!2e{:R8t-]ﱭtV|-WxHtca'Z<Vغ ʋZ#|8p[Yw]޴ގZ/^/7Sk=Ӫ{5^ {Za̤UqOXđqZQcͤ\ԉX.N>:"BkrbEȔ@Sgl0#0,B@:yy!;-gU)yQ|"5J)jF5$Lm9g&zp,^}t=HWgZzJλFPyS5޼#tXz%Z\H_>N[g+2=[l_T\k1O6ľi_n <}_6<m]K$eZq,4^pm~]1λ҉d$bd.T˂Wxr#oqI1m/&Ү4Izۊ`b%('R:ev4.DʹŪ-)eYQ*ɟ)Y$az"bs\8y0xiP+o}gpP=]6{wR=:-L}Qr=D uӅ`CbI[HW\j_R;,D9ª;oyePxoR$΁ <8[$ƋKg#ѝ XQw&Ӂ۔6 8͊>Gӱrƥs9t-Iu&T<|"hw[IBaOyF(~{~)5? w؋Q3)&.Vxȶ?<A%o%ux7U|{I1 a9l9 OllE,;~6ݷU}@g* mشn0Z㍪IvOU9~ʯ/Z6AD^ǃ;y{${X b5GOu3s*Q5ѫ!B>aAy΄65!UŬZR4 ¼7ym"(e!qx iNk-_(Y)/Vz0^GYIj{$.@; :kyKzHl P풒}НrF$۟0^3S+JI"8K;Thk}sӤ# ֹJmˆiiߠ/Z$ڱ'G<6E}ChcMѦ,GM-h Ǧ8W7E)JZ(M7E˹y@㋼8Xz:]ZGIwn^>(@T&;`i06W 0V́+O։ӏ}FW\5Ѿ}Lv=,&/pZLϋw-ܱ ZlFsևe*D婸L^)"ő3ZW9e.kԋ[:LxTy#SD~q*}kfhY纤xkÚrxh%^o>'vh+_/^;Gj͠PϨ[~ǶG,mIX6}gew*,w(DV`VTv?iSơq2%l 8#Udٱ#(V" 7#wUmM^9ᝁ~QZp8O'땷V;i4/+kE3Q(WkM%0jMߕ+VkOi)Wjz"LbVwz_\{ .Jk42zэt>&p5Vu@j(εV5=Xë 5E^(5i 쥘Kh9y+)w%=g6ӐՅȧH[#m[{ ݶ$A.7'dEn}3ZK:/Ϝ,+!1-WL㝼x3d>p^NY({$QpHTd(p bt 0nҹڮ;Oܶ@*(n;kV^|8>OmyV Q Mɻ)fYEϜƍVhΡ7T ){.?Pydy{xGߑO~mhyNE:i똼\I )I>+@Q^S U>sS OVW>]mph:ry^GۜS)W RFV-Om;=; ="o8Փ\pVKbE.8~Y7տ/Edߍ+oy gx/.Io_eVOr畾jQG=e]e\%pcCE?uVCl0Jx(-oNmcSg4+\W{+g9E}Uݹ'Է{>$mr T͠ O؋Щ_Q?vlL- 5Qt eUi[x޹/nuU&{O|=xʹƵ*.'Z%Ɨ6xf*>d|G^<oթ9-mBϲ6wӃڹ^ o.hTQZۮy~" %T2ҼU*+Qxh0^` ۏ3b#HE)v,JG( H hwXיִ3PlPˬ.H:[yfP {"j-MjX] Tn^]}Xlb+0qnUnIU37Ji4W6D{PdA7Уxe۲R;Vpܔ0IAҺZ^(;ӵRIs3]B7e(P0ޤ<qmE Eݿ+ۊ:^p(IiQXgךųk8CcԀUv1!][5o_s &ޟD”d-y%|d(J+Rxht K¢ٝ'P"%l&NE.ObްUrBD ӒЖyʹVZV[:nYJ^s*d4jV^ l;u>Pq9ZTH/c)>M~V:~~攽6,%Ӷm4lH[ndM2](k&?릨䋿kz-Jn^͙禸葷S%u^݂epR=QQ禸\]VbF]ܯY*2KߵnP,WI}۶"-bPo&ZXcSl7y< 6Gubt(~;i,PuTNh}TuFċuy@=|u~ؒhۉxoOQho;'ꏜzT4kSJ;LNGVQ);pD`9w͑]1#/['FEP,u]kIu263v1U*w줥q(w(8 o//nx=,7V$=+. g'"ZH7)տ^ߤ5c=9v+׮#z'p)7|Q@yn煋:jrGtՏ4m L5-Ø Ϯc{ťӔrWVI-E¾\RL9DM1,*9f}x$DRTΫh9g͹_+%-f98{QoW%{"GEn>vʻnDc5E O>O1~pGCK:):bE|)BGQ"rWwe#%?~otz&VҮW^ɩ2T33TkHF'R`.}1vş-ʩګb+ +QJ3/{DݿWk O"ma=f+$wVޞQ|(r7'M"X7,j;ln7G8Dobb5Th\DWkZ ֊p|2ʞ;^K"QXE{Q5UQ;q\ 6C%%R~iuJ޸ZA宥mobn=mmM-g;r׿m?q/:ocsl7Lc #5;53^ݵT+cŨ'!}_!jDaPRGJܚOBF]|-,վ4vs|^Y3[sGvf@[E.jckŻfrrTƟ#ޅ߫x+^֑P(9Ȼ$:*0<$%iIr|mrX %h9@P.z&>_KhLe{*W&t\aҴ7E >VtN8m("7vyn{ﵲK04 1wLE!j>*(24?mQp_vxU9\P~/O)V'lKgPB^Mbǭi|K'W9u꜖0ۺ QMnozzSN+eeD"ba1L2,P"om=ߖU~nk$.[F.AY8-7Y\h/ӹr(9]ף^ٸ?H<>XMUY=H6cȩxìfv#JshWIJUlfRe/^y9rIEy=_>Ѻ?ޜ}GD)UhrKLVdZ_(򺖰넼{]H,HQ=ŽЎۊ8@/{U7]A,kUB>6eٗBБJW<=*2I,_̖JO^*Nz`r4YG|+dɇFVkhf &쵱<2E+>4uT:o|Β+QvvzWҮH:?vlsj"&`ˍRX8rԘiWA9={,Y;xf$Z]JԲ=4vvG\8Fxm GkܧjLcSqK'E^E5|/}1*t'n\XwY8yiEtVd.u1('pE<bW2Dty" #_ kycU%q8=;|vM>qNSZikbQG]Zm(Fcrʠ]B9;=}Z!; K\˽ٰAssX됔ZX^LqGܵ㬡 -b>fUJIHvdPHBiuR7re0-O9f>u+ږ(N%^4 +?q. gԉ3٢K'6fltS5=';r")*΀jHuF-[~ 3(Ui\pBw#t}';Y-IC̐;6qq)y"K5F"'ݿܾ?~?}ul&Wߝmozg?Q`Yo~7?m?_}m_p8tJ~?|s&Y}۩~Hg>ur=SUSӏ?~uQـގgW'h{~Ogkm~ٮƴFlˏz jsZhHF'o7I6W]?4j֑?|c43Y~v E^#iuݶy5zz_ Yiاs',S0><8׈~Q')ݿr>LJ__,r' _ْ?% 2|PډZ1>8+(gcױk+9_X9(.HGƴӍiJRUOve)Znׇl?jF$+Vwˌ$~>gũO[Rs쒞+.);ˇv%!K~_I/ go+_vM'nxM5ta~3=N\:8rs؞AԷ?"o_Oؼ 8ڼtWL8 SEf~ԛ'OEJybʙSXΉrwZUӨ\k#xӍHxHvF1:O\Ƶ?݈VQկD>ԊARI"0+19bbb6MR>YĜfyW_AGԍWw߄xi&Y$<?_|ݠGfk.e9#jf?>|oEQε*\{~}UJ]qTX]e\e^»o,?EA^:8W38,> %}3g˟RiF`q9. Μ*G]}2EPFO/t/g޸JZh3ƴr~%:sՒ2" Bn-W bzkġi)YV)V|W/BEՑw$ylp ۛx"=pa~25BKk꟣5 Ҳob+m-?xb"IbKZYD)㱏K[~v\;OZkOǿq\wx ~XM$ұörȹ'Wso]ġIݓf0[g+'*GтOBjJPUHq~E_Ƀ b+qeG~hhf6苘ax5&;(Orx'M5X})W?kzT,zJ׿ 0'sًhJ^w6kz>_<>7i>[-~s-?|?(>59Da:~&WTчo [8{5b.._æ^1=?8\FgyNL|f*mcih'Գ&ػ._)YIxh\+uF@&}.2'n;j"̻O vЃ&cX-">~‹Q؎W?k\Anڥ7qj:=v$/OؿX7?|゙p+ Ǧp}_̯?|?]|?,5.lGi~O8LіO |e?lp+g9/>G~>Mۂ6W֯?}8>%q y|qp\,|/ok<^iɲ~D_8Q#bTeU:qY߿Z+Rk\ģ9{8ţO%G@x԰OzWoEՙwןDylp ޛ_c/F Vx2J ں Wȳq͠|>KSV[)R] ~? rZ2˩OǓ?xhP|K?z ?|mja=OGu͈"MIǶQL߾ P]S[$ @&{-[|6+y*O<`?`iuG80#˅A%eXmyGayé*i&gg)Y;poɦ] nSzb9#.q8|3mJHP36ط^Be }P -hїNe8oI>i!>I(KMW|m}[np>`#I3 Cjȕ3_OLuvp2G[h52LWV+2f*HM)uiW@mY[[m?mhu>U`G Z&0mzLp/+ߕ[d(E" @ .C)Z,J*SP +®cޚX4WA7K-:=;ɕsF[SU{=8lB> ȩv1x6V[uO%1endstream endobj 26 0 obj << /Filter /FlateDecode /Length 417 >> stream x]An0Eta-Elh43SD,B#YcD~*ۢ./?^Q_~m4/f-Y=ۼT9='0kuyٯV 6af}O˛UO/}-wa~ՓNӓ]8=ٵPnғ c7EF¤jB -RZ(2-U %G-Z e m<0}(^랪>endstream endobj 27 0 obj << /Filter /FlateDecode /Length 65 >> stream x`P```d````b``@ J V ' GP C#JB3%@yxHendstream endobj 28 0 obj << /Filter /FlateDecode /Length 12731 >> stream x|xT̽{/ZIJZI i;B HHT $eƱq nqIKqUb'Fܹ#Dɓ|y~Μ;sB*ӻn'v W.Z-?CHK"?4=} ^8BAab(>( !!x K.^CW"} Eh׈9rlG!m@hڌ녒pd؊cZ]v]]Uj7]Zt3rCft } 莳JoʿBwØ!nE>~`5jf W 6Cwi1Qkm{wbhGrN8BeY@-BOו2{12 9. 3x'V%;[c=w.bӒAG߃t|cXE!pbhAt#ʿђQt F8xAb%?'B, )xg/s)=+ rϣ/ѫX BKt`;\47R7onלٳ:;ۧMmP_W[S]UY)/\:ibIqQpvVfZKq;,F^V)rTsej=@wLg ztdb:hރ(uƸnr8;b#ّ˻}^rjſu=Y`}p{YW]M6#Z3{K:baʘWIOK;KbkI<5iI](䭁`>*@1[ܔ@1wBOt:} :]boyOu6;\Wx:8IxjWY  p,B4 Ψ2ɥU.oI.MRL1.~F& J,3* k;;9b  eT#TOoc(2ZMYG3rx1Ő Wc6bU lYan}7'V<0Ӳ@CŦ|=>Sgpd5݋':| }}.:62&42+|O_6W8jqsUݕpPʑRRH2!5MB8u4vD( e VQ G 2 -e7ɱL GnŻ;B6cWbls2ML[PS*Iy9)/2R.mC|n)PȅPIޓN/ 9Y1e|tCq]l{oivkNB8!b pFp pQ/! X) gC &muJM_S~HJƾ$DOHRCF 2H;3GW\{)N&v0q)ۙV&0MLld&60uL35Lbb%+X2&.f"&2L,bb! cL0<&2&f31N&: &LLgiL11V&0D3ML42D=uL2QD5ULT2QDr&ʘD)D L1Q& g"\&r3DL`"4&L3ʄ&Lxp3DLH`Ʉ ;6&LX03abȄ =:&LhP3bBɄ 92&LHL Q&8WL|LϙO0W&L|ğCL|L{L;L?0{&~06o1[&d &^g5&~į2 /3dE&^`y&cY&N2 &~3L<Ϙ)?a)&N0c&~ēLg &gL<1&e(La&3qLę`"L<ăL<&~2&abw3qw2q3m&2-&ncV&naf&nbF&n`z&cZ&0q W3+]Lf????????????????????????????????===E;E;E;E;E;E;E;E;E; 5Ǔ3Ǔ@;hxD4xh mFJē*6ēSZG[Ksk(I@+)Ŕ.'-bJ(-'V->JSMi.Ci6Y:)uPIi()MFi*VJS(PjD1jjTw5QjfjJU* z]R9dJI&K(S*THiR>%R.ZYR6.R& J(iJ~Zg*%Z^禔L)R"%x '%G<NF ,LDH(iSSRQRc JrJs*4lPi!GsPNh+J_R;}N3JR[1蓸4JS3(O ߣ.w(=۔ޢ~KMZ)F7_ܯ(z%n2h/)HJS,-  `?|އ{w[n6[7 :k~ "\+/k/v  x ڐysgI(v砟4i]v)" PߏO"# jVӬvӬq?Y> G!8v@ C՛>{}w ;@u} u} n p3&p P)TkU{TQwbN\[-Xŵi˦-%b6G7F7$>h!+R]w?*?qu?7{yhWVK&VCjpծZZC+...)^]|`Qtaq_thohOqwt^qWtYfE;;3ӣӣmiڢSS)|)X\m8P+@Q!ѓH$BK W"\$sw&};K;qUpns^\đY?gv9bOϮE6c㭤oWSΝ Xou[#[Ə sNztέۈrjZ#o#ZB 1:VvhUEUuVN-c @=Z|#E_hZSL1uv _H۬Κ1/={PReS,#ۗTNt$"M(v.]6$A\5k!ORG}Ӏ歁ZV1Cwvv.lllllll\XX,,\ s]9ـYN@`&` hLZS-f@j5j@PeɀR$D@ P(Ly\@ d2!@ ?  H$.@ p 0@ @ P@ T;P2< 8 % >| _| gCx?~o& +W/^"N~9Ӏ~ ) ?< 8x8G@ 0!?|=~ >݀w6`/[n p#\ p% ]b;ac?ac?ac?ac?a|`|`|`|`|`|`|`|`?ac>ac>aci?_O77hx Tx$G%MACZ64>lVd! xUIH( P|Soz6|!SIxxͷ ?k, ۹95 8"ds2nBAㄲ¢2>?/-#y̿,+/MN[2)0e Ir^. yZQeJ5)$BaJYSIu_EJr7IsSoD&Lv83&yfl0rQV=DRGJ:f|!*@wE#poPA窏" D ]+kHÙjܒ ?Ѩ5$Jm 4þ'|xƧ1%M3EQT^^n*) #Hca(Ϙu Ѻf &^^R"Llx_ ~oVJVR^e%&X3HI6Mt^QIO+JTIjzSȯzF>h0LDB`A ==9pyQgc8M@98{@9KC89+'rslPYšCՒ̑1F*pR%2oS֟_~ ۊΪu)DVZWW4-k r?bptkyWϱz2\:sɒhV˟ܼ*̘ =` Š)H={/VOI{(DTGEe&^ўtٔ0sŠ;ӑbQ(^kQ$(4rTQH~`"e6'y b%K0#ɩa -ԓ78QT8EEzIZئvصaGn̝掲![n}{IXè2L1<>L-`360q>̂Ed!{ n8W[,d 噮ŞT se֜6ܢ/on?8gj\_Op$+XndzQz(\!P`SV rP^ly#U>$7g.s>&N{|TL"Lp9V"ܐJp&V-llN)ӐQhr‚+SWZbC4Yڅ+S'|I SLJ%3[g׶WVuu_Bh2uܲ[(Bq"y^2CV;b!rYUi >htK@H[ȼ,K`6ݨͺY FEɍ6wjk jI4f1)Қ_=3!k,R\UQDPZ XBf? SvkvM6eT mYڻ ,_EWĕCdkOP\{b@ > 8ȩ#ڰ#*m;us̍dTIJe BCwQ V;_ J66L=T,mܺ`BŪ 3iۼҒ! Y(ctN)`'6N2$8tf)y<4sgG(5S<!]1c :.ծ2;K*ZB~ %dG¢¢ 2{k%ADAFr4TԵHS éEOR;v 죣TErcu{g^33-o ZwF7STU :Z2e.r-Y`S50voTXc*X Z"&L`6{3*II̘)7 ?\7đ0mqI1&S'!z3]r;.KD~=x[R' KmV3F6(5 ;fXY|5X(T :OœkWEZs\]8cUdKW]zKw%')K8.m0#ۚ`&YQ;沍z7g7/(" r6s^żKtg.q0fr2_5TXʩ4I7Wα]2ߘ <$尲t{ZVJM \Sl3_L7uRZo8_nL0z,$筞TsQ5Z"BfMUP'&([CVބ[]}/cu</wYbH1r.G~}/n]>Sٿ"d ++O!  'Cbo`c. nOc`y@lo F2\u2/%umR2+ ?Z9-W-;?Ԃ96׹h^ЕS/[k{ϓP^iA_s OОd3e'99le4,;͙ʖӳ sy:O͝X)Yi֊22|9]rgkf,l8dY'U&C- 59BP1V#Yf}HF3̄Dn^6|OW)gU?k dBreX{4ENU*5~U lfEafWsc~Hez,͜36Mlyq#..tp?66s,j%'@&'ѽaFؠܜ dajYPvLY`198Yś!EeY Y1,a,,w%lxK_2/C38XSWjVX2KԐc̪쒵UfJے ;s YmMu35OX_Y>2Lx^V&+r3|w0;H$BgM򰘬&?]H@)a}Gą,KUVc3D +41k-OFY0NiM"8̈́Ӎd%q9,>$KV'K&zrXUDm!沐 y޶YTd$7Q|&Oأ"U {f@zP+!4hhυI1MV8F$Vj +2?Yd1:λuu:iWroM`L7mnyq2&*t0'Id#*DF44)NI)85{,T/A=^^e4Z^엑^R.5x j׍;.a ?mm2V#$|?nů7aOJ iiNdY+n{Ϭ K/9'J# BgjLJp8xSJh?*5 SkÊE.b)Nwa:;p@WJ@\,ܠj&1d-h'Ig<EE@ _ܘg:Eo%xl?0&'XReObQ6|`j,:\"19VN+S+f z1&^B.Iasr95oykd:*4*,٥v&8SYg*X=<!Wo;j9ƵtXH(|>KoPGLs{J#Ns*eZJ4 M۩:4tծLOJS-BcP%drgb[5,ܦ#dĸzT~$qyo9}gmWxY08 ڳ| SGa@SBO[5~y4`Y]8@voLzԁ|( 6d6!܆Gd^ҥ'u˃;AҙUK/pll&M'Xŵzv ߏ_nqluEkJ7kN2Ә&]-nsy2J.v<+RP~]Ҁ;?xVN<&$+| EH{Hb˃O^wjHaj`l*,]UlEBr>䓬C8@Zs/V珶v*@iHk,Ҙ+%Z4Le9>zc@%zi qTɄ+mKOZ~f R%p3;smeEN53ssgv ^ ?%bI`7y[ anӟ 1G wFy]fK.iJ0kiʼniXiH tÝ:<Z҅/U^$2/S-zZEG&1#|$B-<-F!g*4M(q;mƜ`JHb霹sJ8CݚhTp97~BBIF3xO+ *hLrr\\7 -CH'*$AFxw ؋K7f<#iןhb>1YjOsRSRgٟB{hIo{įt$BI#}$k'&mR'K]T;餾_StFzI;GH/ӤܤSiai> stream x]N@{? NlOhh(@;^Y ˄gfWPIv?><.._Ӽ[y|l:/Ps~;?۰Va}\Keѿo{e,6,:5M:MS2my^zRidц]hdɢ=df1YlImEdg>չ>Y{ChChshϲY, ~3+kj™`pP`pP`pP@@CYa^<h(@@C 6n#F5 yCA0o7 Yü! k7d xao5 y7=uض\}|YtIoeծendstream endobj 30 0 obj << /Filter /FlateDecode /Length1 65104 /Length 24675 >> stream x|E?~fgݛ{'rC B ! HhJi@(V, **`1{XPQA Bggvby|}<ٜNݙ33gf]` (:1;.a Cc'&1T Dy;0t?Cq3gFx.[P,hH/)+i A\GX)vA@̇ey"qry>Q0F+ʋ:<2r gUU6ꢪ_X}u@ TKaIY^m4 n `:@$@􆾐#` :ep9@)TB@0_Hde0w (9P`%fxݡtp (0ÕP\k q cG!҄n 1XcC`(0 G+L% a>e< Ξ02` <>Xo/HA `,LTVQW+Op׿r}6EɵA\wj[]u2NzXY'q=Y\s]uueuam>K^^eƯFoz=q jpl;~\_SZQlϹ>w\ٚ*;pڏ\rݳ{3$s]Zq^}=Nz$k\\QOq}M)\;f5uU5!\:략hI-A\gp=q\Oz:})Pq #CǰwTk]SSs:>[+|D k[I{m-I+muNAEșSFB q'Hp'ۄϵ=Q;aQ"I$L",'kF#Q*QBeNyXyByE9WR' ]i?:NsB4sio]Va]VagR۰q_L Cڅc.<]] Ķ k߶6M{m8:]8U_tvWG8*ιU1V[𨅧.k,fᤶ躪m/ۆ.͟6ܣ(..V;'څkj!IkNn?9]}vQcڎbZ d=x5@l^6oW^4}l/{c$9NS@Pgl0 *†ifRsG7ef{ K&`8Fa=G,68U~xPaÍyOvBw|UMp|ᓖo9C~BjĎ`[bg_d#>/8*Q+Vί_[9e{G+SԅwDMLDwѻUk{j+"[emMlRVc!ɶ:[cՓۻs3|\r224nQWkk:,S:X,ER/͢7:Sօu,Q?֍u,%փ,% ̒i0Ka)4ch(lDXKY:K,eP7Ɔ6ͤX!+(VJh4+g4UJڙesi,cu g+[8-rv9ƮbWl[El5n`7Оfv3ְ54nֱu[v7eڏF`f:=aʶ4mc`mlA]l`ng,${e{:=˞y:^#K%:^٫U:^cٛM:ަٻ]:ޣ:di6;S)N4bh.@Ot;~qqsrE/L{|י3M QBtR:ĂCm` mۊJl%iUV ޶yy ka4v{=`i~d?%g3K<_H⾶ -Af`t)Vwb?o?11Cg;>-Rj?994LjmH'%f9A: 0}!,WͰxp~=;1<Xc!yqXXl Z,sIJgE,rԲȗE,e׸EϔM|cYeo-,rҲENY9mY"?Zٲ"XղoEYݲy",,a'q*2NjYw"?q5g l74-w{(:t:ZOt%j| ~A/Wkz~Co z~GiDv%c-nn t$ CTZBKNi58h-:Ó|:t..WI_}\] !T;[P;jƨ͞a~柮J볉83 ˈϮ -kk7?ecX[S5P R5TyQMP=TRz]*m)6,A`0-f J EO_WkYnؖ-t {E?{9Mk >AOѧ K/6Ƽt3 }kJbRױvf wZ/n-ـlNUp5p k:ynpkV n;`-;a=w=Ep?lM![`+<QhvNnhW< OӰ,za/K^F?*&oWy{>|A1#>O| 8 _W5o| '$|)8G ~3 Y8y82^LT&)de*Site2SD)JR)lD)U(*eJRT*U\erP9V>R>V>Q>U()+_(G/c7q[u*'郞|RN+?(?*?)?+g__ߔ9wrAiFDB)UکFԃD:N3,G\^E+-t-]OvMq}Eߦw}~@?!z~D?O_PSW?P?TG'3s zLF=~POߩ߫OW7zN]=^Pm.6XK׆hZ ՆiõHm6Zi DmM֦h9Z6UMfh3KYZUlJ9ڥZVUhZ6WjZNkBi%Rmvvv\JZ[ԮViji[5ڭmZmv^K[G۠ݫݧݯmԶh[GmڣZ]kvh;]cnI{\{B{R{J{Zۣ=՞՞Ӟi/h/j/iWW״׵77ww{ڇAvXHXDT;}}}վԾҾ֎ihǵoI;{vZAUM;~kf8I۬====~qw.p.t^\\\\\yJrUΫEb}T____/ׯW+kUujzF}~^K[Gߠ߫ߧ߯o7-#6QA߮7;.}OAQEU]CSK[GW?gsA հv q8n|k0NƏOW7q8o\0]".E]治4rtrv|]~.W+ vB]\aptE]1;]]wvu~F&f}R=(^:h?S48HgK0M?U >rLo/8w/߷1o}C8!N}~'is6/?ci9uגw+w*s%W'=޾wcw=U9ʙ@% G}Pa {xx*=aԯ~]I$7kdf,Ьu* ̉%I)^<ŻMJO )!p~ЮA3`/=z&yۼI jΒP;t_c_M7o! w8=I}}'o"ɰ﷿E'3%ퟒ|<'Ef<\%kI6PK%{]]ɳxG"yуHv$e3#͑F^u;k,Gy11EtLrL"o98SS;K]G|聏3Y@9%#g-9G}NrgϸJ.>MB%ϸ\YZZ%Ggs9J$_1 9N%'T Jg9;=O/'NŮ AZxhx0KKˁVx9a^:d@&0/S\Ùx@!^P@-X`\ OFC t$#;&8pz [N|F2RQ}hRGA YL㚾\]ȵ:Jn$7B 놖 Sz(=Ę$1Ce'(8E6<O=Q&3Ob iPgPa![q?woc8|3`/óp?~n7&>LW O%:| o8{|>''$2x-_fdX&o9?F]Y7ՈtcD` jS {גO\܈w܏7ݜ˟E{"xK[wW.!gŒC!!;CC<rr?s?dp?ĸ~ȋ!|~ȟ@2>g(I|HWғ#d'sHg+Jr=Yw@64=dyp|G~"gCQJLbc8N/ǙKHyd |2cIX+"2c1XJ8jb%q-D1[ݶ`9nahpq|8qxr9p`8*x>ͷ f(7<8R ߖ>uF ;)^>m}3)zΥ$ 4A9]x- LK6ȣIciXHvp,";9;L>v4{iGN/8"96;LGsL%w񻇏>r򑻏|61Gq3=G!s<~A! W_' ƏBO܋OHk|6k!-ĜIEr+\ߐ/P@pOpbiAp &SH.!I3wsaNYTnCγ ]λ878u߹};9_p|ɹFl4p<0{w{8w{wO{$wg{2߻;Gv;u>ߩzǝJ06#$Gɭ ܚ܎>܎܎QxϓyOg)洹*бo?z>5x|P>v>'O/>|<}x3g  >|O>OUְy*/ W:|l3Y3!birNJD*]p[6VjYdѬ3X7z$RX?6 bi,eal&+dŬJ6ձz-eJf7vclmdكamcf',{^b/WM6{>`a);NO*;M\*gb/;{~zW"c]뭚L{4X2P.bFϼ ~3zo%s xeLW WU[l~uR|z.z iweߠ2Mk}a_ͅ}n{^ͅVj{vlUuK]'kgOp{0myxv؞@;7kJZxӧMɞ4qqcnj5raC23NK4p@~}S$NL)<ӥ;=ݦR@|fd,wC̬5&rذnf82#ZEjpcTV< YAuK6õ{W]¼9 4 Vhke rQC|dFfC\$V6rB H-+ҽgG<6&ϊG{ &5b&LmB_Dٖk  爰C!-1.Ae)f2R|Vd9Ty%A 71@cfW*2#CmRNCZ<ۻ'bY؉R s#"EpcP:15 iYV ]U2Dͺ"<lOrIk!`JL檜Y8?9 ihȜ\s"E;RطvefZÝ\s0*2}&xp99$d6dm2Lf!B#r#?hR&[tU]^&qlm6;(UTjhvv*-c 9dƕq VãQ r78wNdQdn$Ρq9fL[91r9|Y2MHPD` (CpfŅa<.yLvrDʬ<Ҫܸyצ$Brgkj^j{ZڪY%:"3 uBЅ|`$9)[zѼ& \:DGVDf@IMD.h->̉ ĆU#uwuUrZЌtg!U Ν=sjl<,o2uS̿jC9/Z|xRE},c lq{x 1w@W/ȁWL>; -uLZ܀j?;TAJ@=M[gq4qSq* jq{x {xZZ=cŝd\K~=!NYPb枇sre'J1G)ϗRu &ʗb (ykXzdx/7G^yݼuXf0RiZ9ʭ{9J~"ـٗἯǟYy/x/~`J<ǔF"^ޥ)ZY1NZ`ޱE~!-nީ-OJH-=ma3q7o{կJn|iNhv浕p;Y#戉RgXވ6ζ`hU{-BмQs$cKlIu>cg2SדSjͱXK=k= L4riH\lͶuUKnsE|m򲕼ZJk/3p6ouְcM3m2W独/A̒|tkx kk`)G|^VZ+߃խR)6cM[}E[#XmX2 yz! Zͫ* kf6WJ~bEb)sِrO5}Qn˯v/&տ̞/+]}V]ySa66+ҢWWKo3g_5hk"\'𱪂{v==:"==tvIm{BQMQ„!yeեw]WQbwq^yiw}im.]]YWQXZ1]YkʱdE&=]\W[W]T.+s= j5y؂*f*s jUՕnX{YYe.-+uVk~`˰UY/+7-_K/-Jp[\.ϫX.΋vזyؗR6+wUgcLMB^[gv)]W].e$VT0hv]Y^u{'yEy՗=0[F[WҢQuy5]܅EՕ%Ue̞Xrvu^UɂĂʊ+ɋr+$ u5Exsl(./-*t/5SyǧND}IiAIeuX-VXZSU70mUU] 0WQEm[޻2<,GU2E[ijS@̗D՟7 S\.(k}Slsh)I9Id$HG%yJ'%yB%id$IK쐤Q4H$$yDlaIAId$$('ɽlI.IKr$$Y+.m*InfInFInzIVKr$JJk$Y) I*I䱇c"=D{<y!C䱇c"=D{<y!C䱇cD<y!C"?D<y!C"?D<y!C"?D<y!C"?D{<y!Ci"O;Dv<y!C0 ; 3scG+DƎRK;Eh,1l0ư! i"T#ZDm KGP)Bd)P&s (0[@qc "*P _@Y.0S!BL0U@SL-`&/`-`F4G.`XcCG"d6B0D@H,ʥ H (`_@?Q}$ -*KKS@EeDn *XE1EQ"tUGpr: A@Ɛ1C" D"Kd\ p i{c8[cxU4 "sΊDW8#ƠI?5MDQ~pZ)}'ऀ"[E7 ZW"˗"tT|&HT'"c 8,rP>Ac'#'|W;7D^WD/ xQ  x^|NW3"mES4E11 1`v M# "!Q˃ilQp -jKzvu C]np5"Qni7 AV NVV FJ+\蟇pU>rW6#\!FleFd%D,l/DX P/`:jDբ\U \@KP*ʕ-ZV, (9  0K%fN-.`TQuQ)ōE-L0AF4q~6{Lߕ!YF 2\ *" d6@h[0oBzO`iR j ޹k6F_)C4z $7zOE-ҒjG)rh6;ֽ\DnDe]t , F@ti(NQ[.(&P!f 5zDl!@?|DoQKDz `\ S9"CC&.rDNUDR"Қ=MY~޳0wP΢qb/(gP~F DNc(ߡ(bq r k/YIQ/P>G ~ 0!(. #}\e\1mW\[(ockWe]s_r* 5;|}{Y潨Aك17):I& 6q&.Lۉi;0e;JʣmGE[[%< (Q67"ޏrq~i=F~zwb]밮Xw;m(Af,wwsL α;gvn @U4:|9M _,-˲f/^eqC\|-/N;e/̾lg?\ UimVjOudKɨ#u^u:jfWglΆq˪ G&Φ;C;f!-vye̮ͮR]Q\=X2;d-)y)/I=sˌ)Sm=ONeRĔ2&{ ƏN=j)òo=4%+;;:;P/c:`K wM =z*TІн3$MzK$͞J'fdL1U3iG,OWK1U0Ƭ1nR+٩X]ISdݺM/;x}~6 F7F68Mk +':mȞ:-g;!n'ʐI ~K<|>!lbN#ݰ!,=wd2qlr,q3kjjgYS1DPibؼxȆpI \݀2fe9*W\r92(KP,B e!((PPjQjPTTT\2e6J1JJ!JJ>J,KPf@2 e*J.J((P&L@2e,(PF@2 e(JJ&Jt(i((P @/J Jd(I(Pz@鎒 %%+JX(1((Q((P"P((QP:xxx08QB9r ʇ(wQAy-7Q@y5WQ^Aye?K(/yPEً QBy QPv< e'4lGi@ye#([Q<ʃ`]BeFQCe=(w܅Nu(kQ@6[Q֠܂r3M(7܀r=jPEYr J(W\\?Op\?Op\?Op\?Op\?FA@}A@}A@}A@}A@}A@}A@}A@}Op\>Op\>Op\>n?A4-ƅ|G_0S:]32ݴ=7NxHykwHHj{8>LV+O=7rҧoIg|u ノ>;Y;›S4)A9&WI1㒒 zvTaB}*{ޮ,L1e)| 8-z@BF5;9>Fev:y8>aa6v;7D-;OOw8j7u ?bdO_/Uph>FlWw0/:?Ͳ@-PA|h_/OT~.T> Փ_4ہ9BO;IY= l|hn'SѮ< G=##:NJfҒHoӬ 7|.]I+ݾʺmYs-ݵ|ރ=kgq6?H=H$nX 7V F!0J7vн0=*ȃzHxLpgbPu5S\kc`7Ѳ%lG-c-c-c-c-cMcz 3-eȨ07_ԁwtqۍ&oXnL.jїoD9KGuʌ{3;+ޒb?I L`9IoC ̹4$-Epq|l5'G/ i~dT4o2:gTO#4,jNP//SaPs&>a% \8ul}4cP[Mvdu{һoD\3NzFcԈ0*="Vo3:i{g ?PW7wDʌ!Q='VLտIqSF 뚞1_򸤐T̙؋\:NAWhb#;׫Ϙ=z 4wyA>|=:DuON3guBY2 *¢h(bbL $1.LHp5S0HƘ$nj  dWOTo"df 3f̈͗ڙ$'Z=욲[e" ^uN"|8)o夘3 s dGYYŃGUJ;4,2dҾn]&guעu߳ͅ0ڕJɖKze/3D~M 3Oqng?qqg;{GMẌ,Q}n6ץJfXPS4|w@wz*qFP>vqĎ6,6)&evX4dPN`uݓ˳cu@Gm)Us%KJט+2(5煵 ( T i+0甹)v6iΖ1:7)z+ux5,NGa=⛈}\nƀՙ=]8*d[iF$V{ܵq3zygFwrjany{|.Qw3 {t bA>#b#voreN\T\7\.mkS=:熾kΎd$ ϥj-c%Z+62V9YzΡ*j~M427L>R۝S|j9\^=[f<:T^gVpݔ؞7]24/ܜS,H3jprՏiS۶4Ʋс|`lcK6 `l0vb]$1ȱqRRņ%I ل%%@6S}4N9߯G3z{Ջ6tGIEo8xOUwuZI|݈c-% TUِX86|Q!Y@Bכ/5vŗZ"Vq}.=Uq ׮r]~R6!\5;FbĐrnvj&n*n s-\:R.)}Uԅa:"dkaT8֐zlp(*qD !@x}PE4xC%'8MKHBcfFa+-}xSi+F`P]αeEYhxΙ/>]`=h=m0 k"jN0)VkGX5\HtqG-\TLaptW+R0-)8r@=bŮF{jy/2+u 6G x' xW& mNUz좶vc\x3ƃ~lPaQZ R~dNގ)ݲe-\2v -16p-DD*%"b}D:zQ b\zYA+0hC0o]$0_Ya9sYKcx8] m)+²T%O'E[b-d_2ug\C~[%M寮Zp-h}p,Y0YZ)hF.Kr:PŦ@ũ@dYZrgWdY R;Kϟ;¬U֫y9w|f"5˖Ցe,g̟?WΘCˉI^IfZeS¤ Q.:..a'?DQ$p5~v:N34I0=q+6$wu"Z+ ʂ GN74,=@<ꊩSsx%\0382wA,Zׄb1)OsZkZU-͓؜"H"kQ e_a+c]|S^.oH/+tGɪYU}&>;\a ,mt,)g{.mW(2+wwe\<ёhmbƕ.\W@8ɖt(xa륥z⒬$$sƚTk:[H"Se]` c9yEE*dJec˝NyHf{]VƟK{ewl5KL޳lK3̟jڱxq9cF%`J  +XV(Q^2·1! XRDRDRd!\#(*! ntXBe Q \EPsp`~VմH[ 5>S:ԺN=G47v^;CXsa:"a }3hG6.I]\_5S8MaoFݐ!OM颭s&Z2~؎OqT!ÔEQ9xaÔ)r ?Z!+=0+2VHKjkZQwUL/VӮ !8kΖc9hVCژ*e}Ĉj$ GLd('%Eq+k+WK0io3)KC_C%Av ҤnVWZ]M&L/ݪZUfXfL{ g`)pMr4\M%:UDsM a?A̼'Q  M֘~+0BBIc~GJK^pI 3*0B}`Ds!Inp<»{%lZd+O425 5I̕Mv{=~i>^"+pxS5_y&G>U}qPK>{͚2@@X20 ḆDŝ'o~\ e9hǿ{~/]/ yP'lo9Sj}^5,@}8#mnrX"ϒxӝeh_S p81gg-r^7~#H$?Gf+?Wٱ!`"DȞau]~LUUMy[K1א+p! M'bmZ/&fAX6fmsxfYKa&,`? x)-$Ѱlrz )xaiH9aEA FPIYGDh1)n"'"s<Ȟ2_VѲ8sUJYLGnulUܴمÍ\.c"áK|߹;WU_M]%c_ yb<ZaB{In@ 24P@34P@34|ȐMx|k!GL":\N㨺 -'*`c.|世ג8Y*֖! 31(d:&=vkOкED?ZL&fɪ02a\Zf̕ҽ"k zdd5},dy飻lV01_2 m6 L-VDݚq)$0Nu^`BrGg8܉/6WI`^hЖPH=e&FZ; "e #6EpvGњOw*5[ R8b]3 ͻ_3 7@R z<6(nҍh45+ANxۺaw?}BP& T6vey2>zYVyyw˳7S~'||283;s6̻R w|¼c0fAdAdAdAdAd'6)p葇X.N>p-?endstream endobj 31 0 obj << /Filter /FlateDecode /Length 389 >> stream x]n@D{]6I(J|\`, !E9wqerqyZrOc|%sBnܝMOַo/sM:4M!2n_W7xi](q pO̡3,:Y~&anBnP[4":z{:z{ Rh`4{ ^ck 0P-2Ne Љ']0ifօ숧P'"lLFF#CD\bkp5}._u|Wu:\]N_pu:}r.Aܭq͸ײ骥rYǩ|UT?endstream endobj 32 0 obj << /Filter /FlateDecode /Length1 59740 /Length 21700 >> stream x|E?|ff{nrHB:%[ 4SvDQb+>jl; $ylB{|?d3;mg9sU`EԴL?܂dzyMiL<@*_J+W0mAšۿν`v/,Kw̜E\b1i^]W^*ӛ5K-3l-peNpήkl K*.2*<ӛ6} |YC5/l< Ϋ.m6p:%:;!tB8R 0ۘ PUPpY *D@,A`+#`,\u @,zXA1xpA$ā?C 0J`P +Blh XI0f.(6e n! B [̀!0Fx qV`X k=vpA C!&@2\. f&,(q@a!@#,+1}lP`6tԯK`B,peyfc9.\AZ+A+/nbz,6Vg}A#(/g_z̠ [HASTQ]PtS*jjbA Z!bA]" J˕ B(}nt6\ k倠_zXUZ\ނ  XAM4G! ZPg3Ait ֲT_#Ղ^/-nAh9l1AA_tocU~*W~+1A7֔[APF (hfcczuyt@iIsH+iA7 z}B]H zTПݠ*&(oloT %h)f6$4O1NtѤP=2\ahA- XJ)'hNοH9 m#hL}2bF(j /SL#@2u258!eWs b)tC?qI:cz%&f`cD!$d<2T&rl&.$ǨB#8}1}~M3 eIlfl9[ֳp }٤β NK_qZir+.@%=Ҏi}SϽ;?-ZҳOKOӁ'^{{G<;rZ:G_\i4E+9L$1Qr7s0}xLMfbz"iMo.szS;zO+8m23OKg~[w:˷a$tN9NK^>z1H9J\ -ږRb_ V .?9VrzGQ P GG7 }l3R_97xLt F 81pTdz>i>7d3z E0|/PLt7 ȫXwb'wOgwyЬY fͮ';^d((J&,/"FaW-QGPP)^4Rez>+' W`ҋxJJ"`I$pIfŤjRG` 94d YWMp5|אϰ 'ZC5:jVTi h Mp3C-4N[imNiЋE^J/ZMo7t3}^`NԚY6ˆBB٭V”&vX-$Ri$Y$Re",Fci4K `yۺ tLujt(LFtV8+Wrhͼ7q̇'>̏'d,43x <B>A| C·a,{E\ˢxc.^ X4^+/ X,_8^kX$/c|%_R|֗_/f|5_|-WX&_òZͯ׳~F~#z֟oam66o obf wP~ p~?y6o[Y.Ʒ!G((l4w4gl ?ll&l&l æwl"gc6GX1?ʏ16aQyKa9N$h=,bY:uP 6CcNi"8 -Rf)Raeep""4Xdi.?cpX<y"O ē <By:O0ų !!Ň͇h 7!s rHBj^ IB2›xKHKRH _/L~)e2W+_ 9ooП`_ o7`~3V7 0o~xNwC. #ao子xv(󇡐vX$ 7^ؿ h;wDadZ)Skhm7N{{ggg`?l0y'x%W탺e!s̮$@GGZOZO C;_/Th_"UձHLje1I fJ1 ȅ10fya1ߗr \am</[>| _b%})0{ɾL`}%sW 6a Z |_B #-|G{F?1 ~_W NBt:NS4ZDЙtMйtOKi-. "ZEӳh5ӳFGߧCOg ~A_Ck =i;z~O?c'3J ;=I;h' (cLafe*1;̦lհEbv c7[X+ʶ=^c7؛-6a{ٻ=؇#1;>Q+C=;^]=e_@PHX9|||T>WPTR)_+(#ʷwQ{Gr\9T:N⫎PsՑj:JWGj:FSǫԉ$u:ENS X-Qgu:Oej9^x-īJ]V5jZ֫g jڤ6KuL]׹ >>>>PRwOϨϪ K+k[uO}_ݯ~~~~P?Q?U?S___ԯo[;z\=T;N؈zzzzzzzLIYűԱ̱q\ yU8.t\qvvB;O[.....Vkik˵+ڕUzfV6mQ]ۤݡmӶhZZMۮ==Ԟ֞ўviiW״׵77=;^=3sKvLIYEUM;~:NtSnѭgAs K+~X?տԏ??ICt89iuNt85t:q:g3 q:Üg3rF;cng3yVm ΍۝w87;tyx,g+T<9A]6͂}l.7ճz=1]ٍpPxυB/JCa|-Fvĸ-@Ed(:)ZzGcs܁y*%ޢħWI( %g-n|BPD:,uu`uG̃ys/߬_%x %O&i\R d!YYTA69@?RGr.zr+%ס6&XAjz!.ѵp5^׉7)Z{qƿ60w@wO ;#<;W٤_[|,>$gb+ 'aV ^f]K"WYבuIuVl`Yn'[w< 7Huu/kgOY?~LpopTZ;qop&Pu8acK"ORliY[-Gt,"8; ٛaOJ:4͢6G[NK tss=}V~߂9dpj=qOwepWp@#zSe|eLQkRI*d9F2Hn_ټPK%aDY\hKmY- DKK%ْbkIY-LKy!]G'C Og |A$_Ck 90Ea?_د7v`u;y PAZG< ƋA8^ J.9M㻴4l(AOAa^^P3q8/_ C+W'PE! h(DH (\u&D1bI5XLi&K +pM_F.>rr$Ր+x=iH#A^!@xޔ-V^SO抧N󻟅2EIE ;;<;1t )cAg=`Yc\X6As:_^ǻ;>ýy"5<Ap;$vЎ B?CϠ?ah `Gpݼd򒊼De u gEpdYq7T wog]|9_/V0 ]ǗCep*8Nw)czz4a,zx218*ͷp}V ;ijLJOw%H 8,!( C"cUEr q;B:u](qǓykpu x{b펧 Ob0_B ;.@\~*]?DŽ<.4 O !4);ѳOwAEnp {q_ ^& ]G'xB:8AB&mT}DWԌ@kкI=f$Ԍ4֝G#ޞwף86c;/--FnNŲCD!&"E!CC6a9҄҅y ;-쐯C~ ; PC!@3ȁN c(QƐ$I\2LѕŤ,d5\n$wIi';.ye+-9FR`IciJ7$!(g3p(p.$p,p>" XF ,'V3 "ѣXG& \o 2Py,KM3g n9>iwؼy ?ýg1郖 a\<Ԥ`XԥtC6 h!g" PZQB[! K.py@|2*lXJVm+v l""QE T==W飸Νs_7~pn>'= ƢO[Lk7N+D_?sq~2oՇme[X/_p߆g2*91N,Y{xĽ =AwO{S*|4ị.| ị%7z2WO| ffBS@h 3l|z#W̧O1b>| "3-kc5ּSzJR̼Wx7w%Vk !VddUhMbgr @Fݴ+[*- -UFKGs7 O}y<>{x.|. "^ټe|%?_W5|-_ïz~ of~|?i,?_/WM6c~?c~so.C\R=?>_W"Yl6[h :>˜hμ_pޏ#1o"L:VGxuxVz_xoz^;3^#O7hi=/O..Ur G +SӮx-LB 9}lOM˞&J_?1&cv -x.tD'+?G c9[` <> /#q,.3! xt\h'ۉ 㝇:<;S90ğC⫃7z+$6P8Jkcnh̞1;xP<;ÔV 7_oΆ=V`,z}1dat}q9F;;}Yn=8%C C2Rz!e޲75{a(Pf9ď "$4JEhӣ,ą%nK̲@qgHQX#G>$ M&$ $#@LL²t0 Я|E_Ѫlϛ;g%EӦN^NaV(QJvbFi.]5_Ts N5=5k vv16]j="E\ '&רEyV255ɢ5a{[5HJG0a5]$#"BG ڊrm+UZ:ir񨼰#E[֑hUe.wmMٹvo(W+JgRi fͥ>ɭ}y} F+[SyZ)VK۵'SjX#j-&, GEGceh]5X]PV:(UPd*};ژQ%[Wob/+_d`iw^۴VOF<&aD!ŭiVw.c[ZGrִQyƸ\ϓ4rO.~:lvm˂l(18'%~ԚQ*P?â[=%(wqe1Kn>hѣ y;vWes5*aĘ-p#qo.4f4wAW5Ŭaz 7(bƭ# ¢K00sLV[1{L?m kTe^jbl㤆,̎1]E,W.QlFdjIbwč:TlfZةg6dZ, S] :u0?9kZEzHw' N+.*vN]c46 d_Xzl\hn+Mi{窲5[=5/d.XZ<$LuJFW07mOV7Y=y:Q˺VO+n[5ˊuxD.5rL#2FKS0av"֕G<߅V|*0S&QfӜV{*6F\K5%N-)K طw#!JcKqFէf&R,ƠEK*: K5qflcU u;y{Yt)n9 abCɚ3Zg%Mk؜gAF_XqHv96x%@zYٷFW-"xҗ)+6R/06)&$3 )f\`j-4V1*,aefI_/̸nKW3C2;R3ΡZ5S=>`)(v3 av3nxb<ڌ0׌ @Ќ۾Ȍ;Ȥ4H/5:0N2n9h}b7,R2.,R2.,R2.,R2.,R2.,R2.,R2.|/ !alS\58z4W7Uc5 XI4oq֫Z\pப&WUGjaQS&TfBvnZWJ*do,q5` 1j9VoC,ZJjd_6*RE?OWn]ut 2ڿ%Ҋʚ҆ ^qDž(z#EP[U٘:<5iQSSԚRzZӲ 7-mj4YFf2Wsc%v2]8 5UMMebXƍhsҲ|Q{j˫+V]EUc}5v`H +cڦTWWu8U}\5eM|R565TKP$Va/h0Tg8R9RTdelDꛛPK+:*Oc̅ RK뗚g* 6?lv[[dDT;-2ᗶ%$,Q~𽄣yDa%/%|!s "ԧ>iE8q[DG>2O{ޕW;Hx[[ޔ%&W$,%틲 [svIxV3SSOHx\c. KxHv $I*,$]H-<^ p;%lpM%lAmnplf 7IQ pZ $\#op+ekWH\ IX-oT%.p %\pUVJ8O J8Gr $,"af M%4H8[B~j$TK8Kb UIX(aJ %I(0_< s%̑0[, 3%G(0Ct EI*a&I(aI+aB FKȗ0JBr%0\0 C% 0X ۂ" _B~%dIȔ!!]#mJJH,!IB $Kk +d(tL[ h%!RBp aB%H$!PB_'3}%H%KpJ%hMUfZ%X$(*HSB~pBq IU/[$3IQpTwpDa HZ! _IRE[s Qg>m m#>n CH‡>h -0} $'~W^;=ޖlMy^W%"eyK% ݲsv=#Gll$'$<.1v#%<$ah6 [elE}m @Ki p#0as[D; lj .lU6**ʲ[d͛ejy 7H-`uk%pdͫeͫ$\0ay%i/FͿaulK \?6Yɲ e d="/:OzvbxJՆa+V bx1܏a 0܋ wc Ý6c& c؈ac Ínp=0\ak0 EQWaZ W`a0 EEV6_C$4 l $JP-, % 0ۀAJ  $dKȒeit |$xK%8pRډ.A``9zf!~5C8cÇ>ᴼ] ;0< cx í8`h'*L g K$4K)!WaI*Y/πGcy.fEc9GT9S&K$a K'a1 %H-!_( yb$D$DI!!\BP !%A=7#;c 'W `Oag c>pg> ^20 c؍9 0<#8cxv 0l>=)eB¹|p+DIX(Ų@B $J/aH-aJ$K!a" $IH+!EB$ }$$JH/!NME@%" CP{1a1M o`xk(G1\.bQԨ VeUʂEmYQb C8gŖWX-X^tΖErԱh閖"K 5l>5W475_ۼ3ۛw5ΝW5_LB325TPԸHin5 7I (oݐi`go+XWWnCSuuW1F=uvg~mAM5a'lcǩ;$g R-ڲhAjEQ喊ԲER2hv̢Y[fSmV45urє-&N(S2hLjAQᖂIdtj~(">rUHEQA#D`G02P慄JrUȆC,^"zUgMsG>* ^z^R"Mxg^H3oOrF9=ӜlHss]$gjf?\x>9:vi7 ̆sD'dBic)c[mfխqS <պf*Jȕ%[ 9G/^"rǶFL-nc7F䖌m]e=4UJ66766%7�sWAd45VIF͢Rcfl 0Qdʟo'x\㛙Fb|K@o'In D^ޞg$ypVBaaW>+4~w#yw`ګ$#{x30Ȟ#'GNUjuǤ~ 9YYhxw "/;0IW0j D6d,]=xj$El,*;ޝjQlVf 9119#'?XJމ'WZ95jnM :pI'o r8B㌶N'b >1lOQv͋sw~4bq@5;.M|q= tt=!"t8FL-A}AΛ;'+tL6wNhY+.ݵ;GF38CF-#=9$.0P[V9s'rT7VY#ueFG/95ߪaY|ϐGH]YlRٽD94V v|q- 06t7{(T xe*9#ԯqOt߾dT˝g NΨĎ ^n`^fܲQQӇ%%(R';-mj^#sbq°>aiOwtzhA}G_MF-n\ٗKC]z2yϑAA% {_meϥAkݰVά᳚.{>8(/ae۽R +/+JIqiNnSvC|bqlӝÛZpwuD-p8+GMKG~ZO3"#D/2^ ֝da12:>q?k{mX*4qVoLPCIIZ.z ەG>].٥Ȣ%dkoԣ2"}ilwlv'FF&jf"QKIBuh kM!{4 $ ,nʐS59:gux;V$6 ]h` -8yݫj ]r|032K܀>a:.Fv;-&uXl9>Hb.<∦Y6Fd~ʐ(t;fd^DIO~'78j~]VHc뜉oD ~yx-# 1kKk}Es >\#:>qbdY힀zs[,vߊyeyeFe[f$H^>.[ѬusBFWŽU՛WX]0qllN$pSt}cxro<`(\oΫ#:q: [2U:/Zwi$g|q4y,pcC4Tnc2W豸`<}䶫k>B!Y}':2OCHKc H842+$=+'_ipˎK |)ռ,n(>mO19Μd5ւίY4 E$vQս 54Q#ڨHO+=]mƮLsk!ć mX{n6+{x]WPwWPtgK]5GzCpMA호(GXocюb@Δ6q:=Em8 a OR\{gvoe C.}FOsfu+[W}"]!ڨf/X[UvͼˇhB99|efdbԫ|ԨrC&u켂),?ѐۘNHx"/:xf)~)4,Y0uAN2oWFU1Sz\X'`{sø=X'X'Ky;bZ9gϝ|d[;n#Ie=I9#r+ ӽ캍Q4)eÖܻ~Âclּi!OM8gD_Ń|,lES6oZ,v4\!鍖0Y v:o[zRDl~t'5λ@1 |W]Yľoa>H)X{ t!ިV'$&(,!TÎ:;،ςYu[&''rSU78y#(1Tc/h[i#cg]6+ IkX: `#T3+]ec<;E!c>yQ'8SIT60((ض]BvDg]qnOHLMU~٥MKe29yPӃ /LNJNIv'/[=%i|4 xrRa܀8I .-4Z9{t6?=5xheS360"XȹCrgzJE41mpip9E} &$ʛvEzgfŧL/2ԢymItȤ.5n'=vO1!aF Z-T?nġxFl\F;0>_^ G\Z8k.''G?]9= C]^n27R0LHCEy](GDz3 nh Y~M+?h_&wmwxz,I,a-K۲焀eر4&2%i2 !@ $fJ˳M)Li'6mIKJ!Sd$ 3MSÖ!@g;NgD#~˚|VY4􃅞Pd/*+R-JOKpDG%QsBǟ`qE3PS$)$4k܎IL.Sϣ5+BP]'%f9 ;]ɶH#fo) A';gCN5kΖCζOȦXHbh^̙ Ԏ<ɞS0 2H3bW˱gƮDh홊DyK=c+i>~o;om)N8-CUΆ֡f~p/Y6:~ǭzkt6}DTXge64)<|(Ue\Jc]mI}H W fnJS!ѷ# }+o*fuC1.oMt]qeZ~WUMvXP6 IעRrM~HfQ WEJk)Bڎ su7^5j'aDXVP&?_2S P9[(]^7jI// xӱ2v4v9_VK,j/)8z#uXbڢS$=q'vkíY7Be'>B/Zˎ97{w AzX7(g5i[ "FrZtdoPS;+aJۃDc1:Z s#C԰d@ 91ϩz2YC;9,WunP1X]2i)~ Mˣ[ Id+: o&Ɉ&z/k%8 kvt2`ѡ0Q0Olx$+L{B'QV[mKr[,@Q 18 M=M}eD?|Ӷ|F{ӖrH1 lƆf&6q7ƈމF`bh" ='ŀItS[%dyɞ7>P4J ڄaә&G1mTU}]J=MxopL2_udkxn&u 8 xZ* 5iʼn3cC 0QAɲ?l!yeriHf[S#ilbS_3m{)nW'E^=}r=Jc@I:=Xᴠ׋(=@g] 1 cǪVW4HSܜ0>aSNHXyȱ<=5 &$O[2J 0ıɝOɭr=duuW\}~g1N4,D[{U-( G3nIco8y>GlX6;-C KC4&1~vs;'Jݻ0О s\`s`c.|CB߯-}g/V=sc n0o3KQ<,vC% x]b x![[ % (dL\:Ww2I@@@@@@@@@@@@@@@@@@@@@p)J_:k_#>(}^F:WWK> w2yYs6Q`07vx ߃߇{ax) &'2Zv=~fw5+i?[h6w)I? vGI߸x@b,Cv!$ \X20J_ Btlag!/3W@Bml\ )nmidsoew^eًc1>@ FKI7BHQ:Z(闁#$$$$$$\()CC`痒h\1 FzDZ1K > stream x]0>E߀B-8h{=kr?Ew9_eqOy,64.ɠqA̛q4oE]u|}"JJ@Mx4'~)3Z6tPJA Y ze%0JQ$=CzZCZgڬR60JQ$=CzIyݾ4KJf>WWcU_fendstream endobj 34 0 obj << /Filter /FlateDecode /Length1 52524 /Length 18035 >> stream x|UŶ?fgs>INH!!$ $H I 4Sh`G,xbbū"** AH${awzYk͚L@2P0tBnzZ'a1:{SJ1xtiE/:W>* Tw80|VVDKj} e<}s>CEnqZfה̅K *a=LTF0`:1aTFd(r`C ,2Xci:ll9~(/? wvB'D@;@ a4LBmgȅPp5Mr:aݡ L pi> !A`,L8.0J`jp hB!=! pӠ4HIPUPp%\ 7Ek2A z zA7YmntWQam = aA zSUUTTQ.hA -DAfՂtqqeUzNЛ]%轂>8H}Lg$[~$AbÅaA zSM-\$hA; 'hrM4[XFt\AT.2AWzzWWj OU nuAA %t=&iNڲYSP?AC 4ZεEՎ4Cт :k7Gu.2A&w Z}JЍnF = ='9A AGK +hA GЁ3k A :VЉNtfm}u^*hu.tW!kA[Ղ>*h!mO࿁Wwt%DЪ+s-5Тb0Aך"hMϛR!*v!N y?Pb@Ҍ7O%cBDH)hܛiy7>E{>xvz^8F BHMH)Y@d=yl'{1PBJrrM١$:i%^G憐6\?C6lUV[75㺡h2@'-汖-hhز9[U|]񠮭?*;m2U|E8ۘkZqa$UmhZXlyҝqmlߵe -Zq'-=jn*]x +Ɑi*eGZ[IﱖRM=!0)vDnh^F5fj^(Bƒ8̕i^g,e`$!r&A!_+ٕUSs Rh&#Rޟhh'i M]1QŽP 4`δ3PLq$ጂa''8` yyҕHGEEj%U|pM+b ƻ 4`,iyrqoX8_Ii!h6+.`bLK6ob7(۔sȹY<## #l 9!"ATu+n @<51G!JQvЎ %8K0j $ mL&SrRN*`9"U\@jCדp9J97q[S1VW۔ %nWBPXD)QpÝJWe,ܥ)2_E"xYY\()fJJxMI ^WnUn-jSx5Iړ4f@#FE"TS#VUZVEzhhZVKziK%$E[-%I+ vC'VPrڜh^,43w+Ox4(x^|gl45l6ZQ6ͥUP?VŪ?]@X- 6ͧl[DCbch{-v .c0vl9`+ eױh$[VӎQ(bh4{=NcؓI˞fONY,cϳi<{@;K4^]&4mch2{Gه}Ll;>gӞK%v4d;io}ne߱h?>~d?ҁ;D0McG:~w;N4bh&kd4 Сj4-ܖtGX:P8֍8ZF҈CS4P::su+`(T8pq-VUjѪj`>-vvj˴efYJY)9lrVAUB0fjX cuڳyll!t` !-aK -eK!]./] rv9t6:Jhv-b!=Nl [qֱuЙ=Ş { l$l#$ً6Е^n-tg[VeBOzG>eBo /З}ž~k5g߰o }˾l{~`?@`vt ϐ~a@&;ʎB;ƎPvlva4; -~laQHaF 7m Ơ4az|ڎNXAheshe'YpȳqȷgLhbuT Y0kV?|]F/8D|˽%pm;ѹU w7bnp/y7:?c'78?s|}mH7:?B#_?W6gv؜N3es[#{l|gs{#{m`sd͑6GO6G~9r/6G8眻G /r7#lns͑6GN9mJ $g\DrƥHθKqi#.]reH#.-9H˒qH|m95q5%G\#@W+XrNr^rjs͑0#6G6G":ڜ9ms&L͙N6Gmt9`s͑D+Dp$k+ȑd#]mt9H#lms͑6GI92@#l 9nsd͑ [W2mdٜjs&03I#=G qM=qwtyO>P#:Φs\ZK|.Ut^Cŧ][^G Dџa =B,v-y;[AGBG@i1-2pZu<< , M`ҋw;n[!sB@)ĥWT;QjƪpDw~7|Nhqv$cjSݬ7@ T`5DmWCXlKSUMujNեUS F}T_m)Q@0`o`R >H'f}no7[mz.7j}DzA~?Nmb3?`Gw{}~D?Ov~A_k~WSՏԏOOgWkuKVݭQSW?zHY=QU zJ=6jZ>Rcqx}IzO֧Sit}^3"X/gRL ]5tuz>O/ "}LXDTLݕr*}~~~~R^~~~~~~~~JCSK[GWO_Z@PHXDT_?C\BFV_?oП7///WWk M-m}MWO_@PHXDT߮пwo;{}O߯??/W~L]?O jf8 0 2܆7kuSiEKM]^umvzݵM[]_v~սu7oqouos~?qtֽ۽{^}?v>}wq S3ntn4}L_ 0 3 1ۙPnv23L6f_L5Af9L73L3jfs'ٓI$y=]=<==<==<)ޞ>~T@ Og'3ēdyz=<=#<#=<=cCP74FjZ&ޱwww"{ xkEqwkJrT3q? NF h/u.obY"tXw4>LLBi55`;&~j8:onF"51H&[dSyloQ5DjG`"q-rrED3Et3C((>J J?(J'He2 Te24g::;ŝ)M{l7; J@կcqϜ$v3."#(wո.vóAqŮ8O!O1Ela8Q'GęQ}NDMiT*ZDsg曓)Ts9ݜaL,6KYl,3sr¬4so{ecX6vwY.&a̴w]8 3Ҍ2cc6M{r?ݕq_f Vv{y QH >ʶ'q@ƒ߹Ǒl2z2R=\RHY}br3dU.N&LF%b,67R&e\a\A6;55dXI77wU*qqy߸׸|`<`40 ۝Ýg]]k\/] dkk#ُ7[#n;vgStݛBs'NYгSyUބg.,;f)! $O6=1t>g v}I ;I$ 7>9 -f*&Zj8-^%h]D-IKֺjݴZKKzk}Z? )N>#/ȗ+|Mvo.-Md/#*=J8=AOS4m8UTqcW AG:UԐ{]@dtrSѹ` :7d@&0I,ȇxJC):a)v~T{bŕH8 pqG&Bܘ⪝ ݎbFrRL ' ,!Kpe_Exr5:*Hu4$MeH&-ЍCށSORzx4]{*8FlF,9tS1EIscp<7Wq2 4<=sAwms'vpO w#cw{[{HњwnhãHAKњ'@|JhѓZn-{)_zf.osw[̥Qe,>ѨbFOyA2 w8ż\b^1/?1QqsP1GcGw;bܽp^|IbLSL.Rq< Ĭ3Ŭ #po'|FÔ9w#q}us$ź3)BJIڙ4mrT%y9 k/]|qwwBu`|,|ێ tpGDN5%x xf0\'p3pz!p ;/PC ;UhIۨW(EpIgҝ#d8cIj21:r}/y!.dZ!{XH.+8)8Qp@>r5z2RN6N3ndad ,0C48*~p/1FeXfXn 228"9"Jn1f |ØyV*i ts8" 2p=a_I ~nR4q)T.Kc1Kĥhq)|U|U5gן1?#?c$N"iw&TgwgqnvDPM*= H >W)b⧞;mqwe>F_H5AFn 12L#jdÌc1m1#5&<#I$䑉u7wG(Erf>BiZ"Zn 2tbe)Tc1ݘaLcQd%,cQjsx ֵ۵{^}x"݆vnq3kfp6dh<-CLw{Svg_t}}}h" MŤjz͎fkƙ.fn4S>(s9k3Ǜ3׼1k:ޜg7 Ebsy\f^l^b^j^f^n^a^i.72WWlay,MZ4Vf"[e*AJaZV]jKl6+csYbZVEl1-cl9[c٣18{=͞eϳK5Ͷ؇c}ξd;Nf߱l;~dav~g')hKti-eYh[P+̊"bYm55Y$`k0.?@{A8kvkڙj؆2@~捘Lv鍒BoK%`e2QW*hQ6ʿ7>7wJK珎醴rY[nƺù]9tr˵pȥnpgŭltsUMlfr-=?1ZBۛ?yG`x"ῗ#~{.݂Ϻ8>nM:>~ۑ]}Kt_|n7O5\\O _CKxw[H<>/*C7^0@N?0X1ƞMik<:ןp(oc!r2SxOg+<} q%T!7.0c\a),K0W_waM1vD "^ԥQ'sEԻ1ߢyĮ%oМ]X%9;fNs5==M{y(!s坟 7Ag_bf be'ïp 9ljCI$I$$H2 s`!uJ_*ԇGWaזQL<^+v򜻗uj+aO(bx7aKqIڢ5G֊7/e7Hcl{0}a yۂ_ .W\ (Oδ (ťz^M,%](8Wn,Ch^M7{SS~Aq@F9a'a "QCb 7I5b`A21t!)s|v+{ƈe4,X[s;iY3O:er~^n΄Ǝ=jòfef I6h+9)K\lLtTLj_v9 ݡT!%3*[РFeg'xT!&6K(hbRV2 Q۲dժd,v$BjbofakFw=<.fD{(VcEăH )6ofCּZkHԐWbXrcЍ$n %.Z|rnhLfaqqy" Ctіi5}`fAYU\85b4sŊ+|2)4tlHF?ib|+~|-S Gϯ|g؄Maq~|,WOiX6.Oƽ03t%'7(s=)4K YHS1%\< MŰh#4fH6ϢH'C Ǥ43cdi>xofIFhThvq*vXnʢ1r1MfDbzJPq^ 5bb஋6iEA)T<%R)oһ1'/rkHT'58k1ñP 0yhòB>uaEM ba Nli%DXeaaY~C~4,_OdGC6XQr bmRp\c y2%Ydnȋ0VhPR&Q-jgq{\ $l;/I-Fϗ+طOGیvf cWPyїy3hYEK:f7xbݘէmncLM;YPf.Q|s ϡbC SS="y s _ b;%q@V5?c;x&]ԛϳ  %* *o-10o0Tabi{;AMyvXyt6Nvإ{/7LT R;QnSo @wo;7ng *;3;ϙnu̱wa'8Wa{/7$8&:߲2fN7Tg|ag|ag|ag|ag|ag|ag|aG ݡ+t^eP5PgA P T Z)׽+! sC9:/ǴPy"VX!-ƒC^9ieXL+D_mŴJ' \c1T}2b`Fx%VthnKT}^cR+2Lu9czQ#RŨyaNrH)-"dzS/NX=JL6<뚍X-"m9vSr磪ם+^iϫJv(yvgĹ@ԓ$ͥIV!ZX(PoK9K\j6p=rY{3cm":м3R*:R-դE8B9t+ k m*iډgj7NX-3kVl{T)\G%/6KA@<) uDu誑| l_z2 }-F]-,TY+\[ڔ׶39HZ(tVNh}XẂ!2GLQ[l׭i#W[4_U$Vعq^%X/xX|F#E>_rMZX-fZilDPZϛ6<ӹFUϟGg[o^ ։qF{i=8g""mrӮRsƾ W),]TO} ͫ5)9/Y.V?j]]b4| }"IȪ<޵[/免ڪYu!U5U5ueUIeKjKjKj' ),/YS-z+Kj*^/UXQV;[[?[SU_Y\V9[EJ*fe6;;[SRX->jxk+ qEU*ʪ,Y[R'VTR[VaoyY%U5;lhXvTW+-IT(\-qוb%5826V,Wn٘R[U)zTȾ8J kp`%5IgX߯OozUy1N3a"rԻȳen YT<+(j\T,+MY_WX-.+<zIX%'aޑ𶄷$) [$.aW%"e rxw]`.sKxV3'%s]D"B %tUB-M1Ϻdĺ%𛄣~pD/K|]I.S KH‡c1=+ KxFSKU_}𴄻%|"N HmFµ v].JP!\\ sd\ $L0Q $pK+aQF 0BBP"%PB$K+G%IH0%%$HpJ$%uR$t* T7*{6I#a-RC^R ^ IX#uxdO/ Eb4H"PB ~|%H r (ᔄv~#a%/%|!9%l% a%l$dn7 +%(U $,@| K'^BZ 3!ai %R!nJ(,!IB %K#!ZBNr)RH>*ሄ_$C~𣄃H/a$$KgԺ.$t/!NB' 1$t)!BKS!A*C~𣄃H' {%+5rޓU;Rߒ7䂍uR$( 3$L&a)&Kȗ'ar%H aqJ#!ABgx q:I#!ZB"%"$Ei6?O?j;cߏ~E=P=ߍ[߆~+wп-o_YTg?~=yBJq wKKR"wHX% MBLrR HX2 K%\$a.HB $̗0OB: j$\ ZBJ % &6H@ $J/H-E"G%IH0%ErIpJ0ҒD>E C{( \9i8iLe4)RqI܋,]$5KrK/"\d͒/8g/ʽp͢\uQ"ŵ0{~5s9/>7~wzPS_\_WSG~s=]߸)ͯOe+@=xͲkrk5595Ր =Y ֤ՌdWVʭ̮ v\=J֨@yu✃-K[fvܒ5ŹEI3s rg$M˝fZԤɹSLO˝'&ɝ4.wqcFQI#rG;<);wؚܱdhRVn&M_uC᪻ :Lv(Vw8AYJKۯlO-$$"ڭlwO'iPoRWN_|U=cͰ~- <^b2:`UZǩOKeyFDx=4533CWzH'{V'S s9ä$͌R\MDKj "YqWB#S X%D[^ a#&䭣?ap#O^[_PP[nz-?)b}ϩ^-JEZ-o>Ǹ}/o!=ӟRӵtra"y:1(1HR0I(UeB֦JLoGeJ jWv|q+I'}N}rЯon]oLu#cһwJJ*z&)Q^=*QTXҝ'ЬS;,"L :ڎ~ ͛7l: {fW} z4ٿS 4rXg~l?<OQtd\BȠAGtp24/I>W{9۷oiv1M"`fڣ{PdƢ@p 7-V{+I > ˺`^x\~?*O笔P3$.ԱXdgJ~}f,2曮3(3}L\7$oFa|xJv,7@D%A*<ݒtۡsrc3 Q땕i 쾯sHsu8z(u8;7Gqfp^Mӧ%,L )}1,Cr半r88\sܓsYy >aLK/4]L\ݯ`eAv}.riNoRIM-OW>Ϯ%8x`fvIu!K' & 2$W Ɂ݀4?}#M*fF<,@EvKN?mXPа*t6iCS~)<)-zL6z< I>9|6)s`w*g.YFEQD\=Ml/ڣm>h 73!~!ueRdw|Y.M9lR[M]>JS.ݾggái=#ï.oLtC.M1 U%*|C\0e40)`Kg$&& 9)np079: ')2ASFge=1HM.A"ofݷ0Jɿ7Rq{$)]%!!0k([LtN:WuD%M33N&&w*sZtĈ1ɡ M+aYe}SΚѯattY.MC''?f݂mH,MЍQD/S洔1hRц(aRK)6j@4RAx<>?>(O 4&MFdwUBMYzvɍ3 >i@c%!&--<ٷ]q<81S2p?jzV:XflRu?#'-q_߸NÂH 9-w:UZ?Ӝ ji~A};m=֤B cE:1=k.S=s/x2.!Wpju-dޫR$NH7{; ]ӳ{N5{ڙm'Ll=}k߾ __%Wn9y7ĩ(맜~^tμWsұa ݭO)#M=onrVM%e_슈?=͊NtzTw7C +vǭinז y6ݑ):;Ϝϝ\ksm͵6\ksm͵6\ksm͵6\_H#~tG,CT GD-~SMmklDZ)o;q_@8ǧUCpEͷ6+G;*!yUɿ.6\ksm͵6\ksm͵6\ksm͵6CM 3_PpiS׺@|lendstream endobj 35 0 obj << /Filter /FlateDecode /Length 266 >> stream x]1n0 EwB7$f\%C č{Y'ڣU=*0б{T``a pc p88(+bd 0* ["9:ȬHFdVVe pA2IRI]rJ^Pv=e.:[endstream endobj 36 0 obj << /Filter /FlateDecode /Length1 54284 /Length 18962 >> stream x`?={f; !!$@@:j: 4;b^Zc!b#*v`Cل$[3󛙝33gs J&䌾}?|PO ss+ZDr}Պvﶵ?eP۲/&0p/iXU+ʟ@غ"6,xUL M,`ӃOoc54WU#"?l8%5MX뀷y؆jO]Ks[a8`ɳ֚[=8_c%@V6@֚epJCE{lȬ p0 .$HQ0a3B p lPa Ht `2b,eV8΂Ak1 b!C^=<( fC-4@+k4`iZ8}$$G?>wzބ`?H'd)%u\Jn$[ɓM/iR*KIWJIJKCHDҩtmkyJz}>׾&t6K/}Q;zq{]h2@%ƾistM~~_//at~K?߷?=7_zc4sZq?1ogdAh9D;䣕N^`?>; Sz}iғK7Rt\^Zw-O+~w~~f!SUb%s/SE" Jy2芩xD"XW<2L%eoyn, s\d/ً%b}dHG#Pv&;dv; =AͧI~}=PN@HcZP 4`&d=!I8 jq HDñ<ϼe(/@|Gx)˶J>d51)M+$LE4-/J1[`ǫ[9Vτ3a ꤄!@$ kK<51{CE۱wHNE,XGXϵ42,Ii 4Fd#磇 . ߑB 6_ɯp>p\pdJ&U|R)H ˤp)6K1R M" ˥tBj:AivH'J'i-<"&;331""x\WxBVzDMbIG!:aZH DW*YiTIҬ4re9)m$MY!:ePNVN&{ I~~FΘHs̳U5v͞N{clJ5-gԇ-c˨/kdԏ5fϖ46:X b+JNd'l-[KzS)4N 6 4mdΣZv-nf7v bh,In:~v?`{&ct({=Ag34=ϞE"^a{7ٛt{MGwٻ4bh:v !f=lbO(f_/h6}Eǰt,}KDZ}la?x3N`_h.JAv0-@$t%[2dJ -EaX:bZtoiVH[V0aZtnYkmEYQĊ+֊sZj[ʴ2fX)Ʊþd_x  wǾgC>l?v&oPCPdbZ&s7ۿh;uӄC9Ak;Ck; @k; B+[bckE("gY^A oZJ`ae"+ʂhk4d4ȥ`LwKq~",7…7ClGixކ ؇;{ .bϽ;8nw?06cw?q B=q/ nr/r~9nwk`9ns~(w?rUG~]fv9̼0aF>raOF>saKF:|0w#FwaGqpF|2~_F80ra䐣+3:ft"%N3, FtM0#. L0[G0:e3#`D聂=H0 FP&F"F8D9xFfbf9:9 v0002ad`DC8#!##FRFRFF89;d8d:d9vv02adx ##y;8Lt0S0ɱ5}6͟ѕtLZ.h+*ngэlz=?=c ~F?_/Wt/~C}{ѳ۵ȋEҕ@h1-NSjZ2ࢭ4NMW8]MWISCFt}<'yNU܅r%{hy#c8y=2яYدD8&簎xNhCOxD6X:^W=q9P9T,{7'ˊUYݲ.){d7>mvҮ#c``?Iz=Bo;cq}>EgsGc~Fa7/[6zh}!Lmy-lvz;E[Ozqp>t8vz6`=I Nt8΄ p8΃ Mp .p .+J kznl[V nNÝp C[q?lAxvh9<OڑgYxEx +*&6xރ]>|{c>s?_Wo[?ϰ~+F5& i4K-HsRT&͓K "iT.UHRT-HNJˤQjitKz_-} }(}$>>>>>W7ҷw>{G'git@UM:($TT.R :΢ H z&D/NzE"}L_ &}Mߡ.>M?G+k[;{.}yGXDTL\BRJ+-#+'˿_!)Tu:]Tgu:W-Uy|uP].V RRV]֩Ruڠ6MZ0bhW;JuzzZ]UשՓSS1nPR7gQ/T7U/U/S7S/WPTRVQCVN^AQIYݢޢުަޮޣޫnUSՇ#N1q I)iY9yE%eU5u M-m]=u[@PHݣ~~~~~~~~UVQUS߫????_oAjh.M4ͭ隡jzڥީޥޭRkMw;''ggGGcƓSƳs ƋK+ƫknC#cob0~5efM4L1}M3 4`3 5p3Œ4CDslife2Gs91Ǜ<3,0'B,6'{x|Sg=_x|nxϟЂ'")ߠ|x.Mߣ-v;ާ #z)p1[p)[q9 _p%"K S($?cOu}|VǑ$IP`2J{%~=z}Ca*6x3^x$}gJ9>g3r~F7pL)4Z T R* AI@~P?nc{ra;7'}fڬ.W3ٞs:̹ܛ-@tU#m(|c1z3^~]GFч_+N݇;{1_bߡpQ;fuAM]leMs9,5y|s\d.6 Ҭ2\b֙Rs`6MfrTo-Ʀu6̴;~8ʌ6cn'C{Ŭ}}swF$a$t2bwdD,H4Bi&'B&YM.<\ZvX#zit2lN΄s|B{~t֞_IMW70A4nCi2l)4vp*/JHB\\ sm3 wIkd2d*`׵H&]$uA2ڵ"szvvE\ŮO]Jח*UR5ruZ@"֬VΓP Rb8e QDe2LIRJPF*iJd*Y(e!WkuyE&w{dy&Gd|B>%KLeD/FC'Pd?o P d> 1h(-fca, L(`. `aj P!ڠaC(?*XqI"!#ߘ⪝݁|Ɛ[38}9 9@. P\Ǜa! C2y<)Y,OFwE"쩼NX225@JRpߘ.1O}cTƙL7Ε悂pg/믃}2bC NF4  A)$њG@ HC; hɇ@ZD;a=FUOhGoig,b}ƒ%eaY{DTw42G]>t>L_B6}0:z"zl(Fߔ' wq}Vc$3DHNODfۀrE\،|΋r^4΋w A|MΑGJ ޟ Ĺ["/A!C[X# 5xq:{}p ~G|`5~۹<5Aq ؁= FυGѫOq^؅;&3kc̅ӗƣ 0~3췷`y6o6ip|(u-$[ auۮ.k 7{^wajRj ԥ$d6M"SNq(nF6#C[8.$p\Dn帘Ʊαtr"wp ]ɝk]k!6mmیL6㓚}qIZ2G]n#8J>W񙻚5|g|ήsvgF>s7y9ќ x,g<3>3:#2@"dQ8n%'4m%'7]dZR6mگ/ȷ'mP q6B-"-b-cVBȱRıJ+QӊxF+XMXMؠpl8n֪lDmDj8>r|R[b#rVѭsԵ6"s8٫p=a^O4{%-="-˞Em=h{l{.1\jcYٳس٫^W2mfOe?m9|O+ן6|> /sb>|+W' kAvyl|w*7p.PK(Xf*xyO,IC~[$=:{wFs #\ߤa@ޗp_%qdz.gTm6RKҵ -SFilm6Vh\-O $P+Ҋm6MfjZ6Gje%e!u߹Z|!uHk t3v)JUJ5ʵTbkmH[kZ6^ҪV[iRm>_@54ma`6"V&)l*r#3cx8h2H)ԔM9dƙf9L2H34Sits9Ӝe6Kff sy<<\m1ך)i,sy6f٬yXk*X%yz2~*1 Ÿ#]*5@.HiYCɿ'Ȣw;`>r>6z0=_} e=6TDXfiC<9Wnt3 wf4=_B=|y s9/{sͿspy-a>w| G s?0s=ss6}|3g >|>C| y[N-ůW'9޶D\sk.+qͥN]ſWMa#|n'wZZr}jj#Lo۳ޖJ%fx/q |U[xFD#?_]$K1Z4*rMYSNfKX=[Y3[X[NdZNcg l#;]nf؝nv/=b'3y"{dow.}Og }+;[Ē-r[,`+ XQVkҭLk}5kHj`kf漹OCyW ېHzbO$iR&żi iT&-Ѣm~hwV!_(>X8?sPv^O@E<IW K0,ro/,g|O]qGb^lKw;6&7OKNwQ.G&K s:6CT© &脇q?V)`{{]Vل){$}}haC[]~e<|@giܝl+~>)ɨ%0e0 5b2BX PPZMxB3rXv'GOq$XU(O䱵?O#99˾yNw抷z.fϑzMM;u0uoo-k0'[?aI$d"õ_ۜpYrI+nf"c}yX|y>KqJ:g1٣Geef%' ;(f`TH1tJ{;;帘I ̨QŬe:弘o,Yۯd(Sx!{Po~V2oF)͋)v)<.XÛR$΂u; =7&F6 댏iď%<"珺w46s2NWn}w ݱ>PYhVTW,(Xi#߸N!1yCNCߙTb}b|ޯT89XC w=EG}9{kTbsRBex$'uJgK3T/*.s}wPdxI+l'x]ڙ gw O8z1-1DsP?WquvByS39?7cyV̌`w9FBݏΠ\յQըΜ2,̞!r_X[*2{0["fB6I{F'd{KI8të8%Xv0Acs'٧]5wRxtY.;}Rb;^m`FOukݡ!^Өtil. c ͞Iݧh,\̓eb{KcjbbPrc[<+xƼR>ێ3EtwBE,H V=INvnbgmq/ +L4 кTx}+^_񎜜-u6b 7*}Y&DR~PLgO6mτ;bȆw 0K"Rn;91{b PRjUc Pm"nl% x[R N :틖֗quI1pEJ}䲍~1|mRcϴ}Y"'x2AjbϫbTUٖjzȩA(CwN=,kxNw6츑d/I%V-+3xmN{׋J 3vчffl1'e;[Rt'o`NLfweͶN#7w;{bVE c;[1!>Tl(?s~ⰡZ\޸QK fwT\pN#zds&hZ`(<~ꕌ&ٿEyZTmS,M)RىAÜ!4݉0Wa]5Hp~;^ۉ=2`9N#]*w  NSӠw5U'.)]N\u ('D'hw P=ۉu2Z$`Bv'!;8t~Iv; / >JbUxf(o4+#}Fl3 sUE8{b ໛mwJȀ?YYӑvG33q/{3fm%/yǽGdv'F I\z`Z#oaÙ|3&_o_K+b-=}\iԉN8 1C+zfH6W6WaO*'EGnb~9a+(֧vޟjvߖO7oG[Ko)mkІ&,_j2x,Sۛ6Q<$zIN_ŵs u sm6k9m;^bz+c|a;6&Em/>jkuʇxnk3bmUsNZU|="mV_5?op1nk#mrNUå>/Vk<ֲgֆʞ+WMk9:z:6ػ-M~ŀ=1a}Hk5۵?๢4;RJ;u6أi.W?ֺ8&WHs'MR_\mnminhonJohά_RYVӺ:)[66W״6y*ڼx[[X߰ʻQPmmhoZmƢ5X[Tږ-lTwִy[k*x޶ AUE * -dSGcM+li y[Zv:츷[mǁ=*ކ&Vs~ oX\v\& spi/^ׯYm㰱bEž sO8*++Zŵl*Zc5I=ԏ&$MKHs gLA{kEuMcE2{5*ZN[GsnNUZ%Ha]:"m]Z" h9I@ `Y׀Kw P5 aZ5T BrqnE] ,0_<eJ0G@f !`ivE!L (0IDu"v!L0^@NWx18c'#-`Qf mf  H0R9B@"`dIƆꉢ^87D@dXQaQo(-+ J@]aS"wMC* D $2L0` 0]E,PpHA^&R 8 ,৮Y? +d6 N7{|%K_" L'>GGȇ>/` +o xKd) *QezI^|^sEɧ<)2 BD #< `lUd+w K!u.R UE7 I ": FpeW`%. B[W`5˺k.WXE6 +/ D6phQlEɳD ]%g.4QT) X'`5V 8I]h*qD+W}i&*/"Y@F ,׫PD@mW)5]VwC ŮrXd. X+T]#G'LuG" ( nt'0<収L0.)&bE"P$oQ$_d 0^|Kr|uC+`LF % 7!w(BFoB._B#ŅFHL0&2Y@0CEE ]0Xt"N@AbDdWt"J\/RQ2B@& T@(, Ht0P@觿_QG% "H]> .E.UK"Jʢ$"r#rq~13?#?XQϪΪo_/09)`xGۍ؅{x;x͖D <^5{<^%L OOzE=izܓNШG1?i9ó4!O}ԃvOJxl>syV5^-=nvF`O L;p;wS?+,*Lbi9t] y!D 2 ~!oe!I ؂.8.O`J딠+XQR~T ݾ|^,XaKʱŢdÌ气lqCr2 d1͐rq9ư&{ @5,{ *HB.ى[3;;ɆY̙1ӵJ/ R>saJWPVܹގa;X,qQ[G[bb"?L;hg1m᧽;݁?!dP}[Ta4wÂiu A% c mAflޮz9H{{_s~YY)Éo/?.W$)##=}ĈԱR$)f #.mX)c,)$v.}tAtRT~ART8 0U1Ӓ(UwI/j70oѨCwSPuoPPSdPP&*P؁} 5Wn"2rtR]dk{xpDc2?ϥqc>;$$Hs.`&5^}sIPh;Hչ+nph ov+ӸRdN?Gх aSĐq¦w_pIE ;w^߬AP1exYl7:#RrU5.'B ɠCA a>ج'oacSV._+ɍyssSИ'f42Y57{'$d.hyE4N7߇QT1{YkИ1 oA^T0e-̊Ȋ$_=+sepQ!69{:=t{r^ݛkLS|g/[AQ3e\;2W2: AAȣL_ J)ZvyS 2%w`thHL+̲\> 2dh2[ʋ}FKN˦)e>,8S<.7i!c7ͽ>Kڿ̂ΡOAO#"&r@ȟ?%yPPIrb u{apgVM^ĿZUP( (& .(0m,2xTNUHϼ 'hn ɞ 嚫S^R_4>mJE Vд$nRAlڸ,gd \Iݟ{R3\2J,kFҀ䭒''b\fd$D/%D( .d_佶!g  yqp"W+j8]v zEΆW/Z*U9ny4sԲ3ʆYyy7M/_=%÷ܒc cë{L=b@ 7g= Ϩ0aFƈq&d rx̡M4E9yt/7~1:_qNLa9AA9eH0?:12Q&MJ(gдDpp8dkg0zddǦy՞ؽcrL 9C#ۭM w4ݥF'i>exd`[|Gn2z@1kӣ .i >$r)o`pz-,cv< eI33b겿]RFTgyt)/;||$? }9kHT@$ZC " (9 Ck^_n^um{ͧPn/#G>W|#"I/cҡ%= :$4W$ (EBu=8.""65E3leh4U/~whab?Fz!+>,Ss#Rm%1~ɑv(_v=u|a>Я?YyÆ%e :p>Nj3|),L!h1pԼ,6bl /p8Tc̞-ۯwOSzzc?rXQ5 0*K两fEMw$2}\YURG,ݔM- 4fe 2sehj#r&YiΡ#BͤP{p@DDgMK<.!nBȘA,NQpRYK P^0+%N&z' g7?jکQb #Ǥ5/.C6-cs(>ʢF) -E+2KR0$Ci_C/ɟ 79G!u6{mRߟexnL_*]ׅg&ce L̮25sLڊ6vQFRE[(#t3#*[A:EE[ ':-yB+ 2t凿o|O׆MHǻggg 2gkC?r'FOD5`osYYLIB`wo!v&?w#kJI.`OjdWA=y3gfKBkM$F I&w`P~H;"٭ pnz9gޠU-nS}=ylA!ݎ.xh@JvjJ̠0Bd¡s|(G vDwP׎IBz@wq5L20oendstream endobj 37 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 85 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 1109 >> stream xᒛ8#B6HBzFbk'wUI5WI} 7_?ͫ,z}[)>=pw>.򁙔΂|HկSq'%~ q uBq6 Se: 8yE*Ӧ{⢈Aq=ey1 }mqo=sqX~Q˃5ϋpL,E*wPp}u}<#l1nF-Xp)j./EW.-CdW LxJLd|)vJ(+\ʑi&Αݠ72—nϴzOl'5ZKE4{:kVANvp~ϸry&R8U7ڀR8nɸގ7j҄hG?GčjvW-r.Sb+:wz('54Gt8Z,bBkVqy]y=0Geu\9Ogy:DFѫh6L+μlzB޿-N{x|֬7;V?/ {endstream endobj 38 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 85 /SMask 37 0 R /Subtype /Image /Width 238 /Length 251 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&D Vendstream endobj 39 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 149 /Predictor 15 >> /Filter /FlateDecode /Height 81 /Matte [ 0 0 0 ] /Subtype /Image /Width 149 /Length 1191 >> stream xr0 - nJ&!kgv!DZnh~Nn:UuvT֩ڭS[jP^Ͽҿ~_ZͷfmD[^I%U`FToT*;XN"EzU]WTeptM*[2+TY d@>?}V}ٗBeR%XJnI|Z!A \+ݗjN5/ 7*|/s knx"KKPV)ZXz@ TzM+T.|J fXD$JNqErK )v3U@eDZ")/uFԩLHR1"qr7@X =- OᥖŨng*j&qjKk6(T]b={T8].iV9 mի-&I=TQ8gڊzXw| TtZbtfzĆcPiW^]} BA21* M#6Zϔ+&zR-ORA} DSnJ: +" 1_T+sTŠ>/{*cU7A0PVʓP ,zfORE+SVo/jr*YJSTTBʊ薅ʣ}O[20]4l}k!QG>1٭]qy؂[7 gEڅ%Q55DPZ`)Yޯ98+n[O{tiyT_9ˣVMLhp8{tWOƤZOe2 t]ܾ>͎9M'YCBqĢ8wAˏ%ԌܜSI~Eߢ+},!0"/λۍ>0X0;D g~~@{Eb͇j7q;Fg8zKRzoS[jNn:UuvT֩ڭS[jIendstream endobj 40 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 149 /Predictor 15 >> /Filter /FlateDecode /Height 81 /SMask 39 0 R /Subtype /Image /Width 149 /Length 210 >> stream xA 0 ֿcCm_m_m_m_m_m_m_m_m_m_m_m_m_m_m_m_m_m_m_mvBRendstream endobj 41 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 136 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 1536 >> stream xvJ Di؄.ۙYg5UI!{JRP ُR׿׸&3/~(|;-/n%AMZz#!Ӈ5-^?0lHkB*b# g6KIjF%c<'-$! "'1%` sb@\pIeo$;ް}UxL%sy<9noL[P\Й!^ŝvF6%1Vx NewEmJxԺߟXw;G-{wV"3W8E?r%X/+uբdo\M*B{KB&S9gܽgQA|&%oK\.l.]2ݙ{A{Pã*Ji.u[["0>`/p(h sfzrF-wc6eS/tQl/hq\岬mZ677uxR |^s6mfGTv#{q%\rN Zi3-a`\WYVõ|i W5NKAiՏ|Lt ]kKk\ҽ {AWpQˑ:4T͈Ռ{;\5y^ \4/V}qJ}UpqYt0G%wcO}n#~G4h)_Nۚp)jqxLw𵋆s4d(t'鮶ŵt R/V(/5pKXtA=pGZW3w[7Ц)FŜG../`:ٓTy06yŒvk}\KyyNۮ3@,VM-L ˻Zy}/?]1qS/|&g`IKZjɟ~2K:}ov wRWp!ݧ1*zN,endstream endobj 42 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 136 /SMask 41 0 R /Subtype /Image /Width 238 /Length 385 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"%-endstream endobj 43 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /Filter /DCTDecode /Height 148 /Subtype /Image /Width 205 /Length 1303 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNK " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz?((((((((((((((((%MH()@H"$t>")DDyG=*xM--CE Tے:S^܃Ҝ-^ Bs҄YX)ÁT`QEQEK HmH =g=P(wk-I"F@Lcf>cQEQEI`?,W p R%ֆGj4JԢeJ.Tg r=ijM3zҳNZEQE]Ɣ9CG}hiy4n4y֗>a}i7P)6E@NM%QEQE8)4Y> )ҤDې44Y4 O$$sbEWMơ(( 4m4m4m4ID'piU $ Ei4FE7i"M(VV~)d5'2:P=*f*VtVj6?e9-šsD*3l QNX@R\EQE^`*ȘnѶ T g޵L5j&TAGњ_4QIҳTKjQEQE=\;9ZAps֜nN: A'74# Hg4y>q;948T((((()CKI((((((((((((((((((((endstream endobj 44 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 205 /Predictor 15 >> /Filter /FlateDecode /Height 148 /SMask 43 0 R /Subtype /Image /Width 205 /Length 398 >> stream x1 04*V=:3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >3 >p endstream endobj 45 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 207 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2198 >> stream xג6PR #Q ΃ ݒ'TtMmwiw__ӓ1_!J~J7Ƈ7g*[`)kFO2ZќfGB?~ ԍeމ:=~0F0]$ʭb]nW kYr.K!,ʼnwJ_w ~Qvz[{R_{,e<2vKoIgP-߅WoB8Ts!W#lSPJcgo,cPx!SrS%hݫ%uC_np؋J91{{z;_|Q:w2#>Lr&ko]^z s'qqƌ| }ݢg&{DخE08}j&C{V=#Mc^wN 5vCm5=#d*Ǫq; \2:=+ۼ^rkVVE-k,_TܜKe^VU-3TͶs.W0rOksrXlš{B-C-g-t~9?rٺYnmH/94qRi.j šWܫ'onɽ[o!BןE6q0^NǕnhޮSvG^V3yevq>rSU3wŊ9po7C]`{䭭kq;[#'.R/ .Fs(pI\<abwevO\+fwے[YVƝbvn\:7psg8׹3\us :׹ys<ùun\:7psg8׹3\us :׹ys<ùun\:7psg8׹3\us :׹ys<ùun\:7psg8׹3\߹Mq⾿Kl)n]#w=nuqub嵇+lv]ŕK\jWۤ i]rvĝ/{^&}=pxt;߮\kyI{ﻦM3Nj݌]Ǭ9#*\{Wh^Ŭu.kE6s<{k΋ѯvS!c٬^uZA5k`R;?q7UL/w GQequ\]Hͫ(o/Ery>rMQ/ͻ{-ֺudU3fV;VsQ+U֭|chڰBMRzr nc++viJo}Prk)%Ws9rmXq8{Ͷ}G;sѹeՒ*L^7W6ݺԹV3\97X-2(eun-8Yʹ,)hoLn4V!3È^v\$e {A\soY Tkmٸ*g/6K~[6n۴>R깗\{N NlU!mntؕ9-v 1<lXvۇR^*}1k^){G;m:XNv-{UsƂNS2 [n7iLJ-V[4LU=>j{&/gnLqHb`%[sJ[!?i_b@-7Yoɑ(xҾ Jۯam nWfq7y $KkMqzS2f|WaعMy,!$@*:^?ZxL荣b̾2@ .XFUcKm:x"s7g[AZtK]|~OM> 9BL~)[-&gk G=)ҶZI/=\endstream endobj 46 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 207 /SMask 45 0 R /Subtype /Image /Width 238 /Length 571 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+q/endstream endobj 47 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 199 /Predictor 15 >> /Filter /FlateDecode /Height 214 /Matte [ 0 0 0 ] /Subtype /Image /Width 199 /Length 6270 >> stream x]8{ H!QWڛ8q۲2E?;g]8>YϺp|㳮?ucp5Goq^$Gޕfq2K Gs,o.Ӯ> pmA aw޵zt?yЀ^m' xej˗ͺ}h NKLq&d+{mI/l>DТݼE&}U>9M#~֩.D/Im8Ij Dxn-1z]mB[PR=Wt ca8&$&Ya"P04gl?`j *n~O8.톑x']aHRCp\uN9smW]8ZAOE'd`má; NMU $w s[CTGQ0)˴ߍæcBq> P1\m$| ġvKJF j6eiBvim8jtvyp3]p耎cD 9cЯ[ZO S'!XFU:2\ ,k9ަ8YpL}KqX6 Ӡմ#d̀\!8a]8t>G}37I,*E:ġP0lq٥68cZ _!ʹjY8*}[[-,pCg8l>lyQC)0PXJ(ܭ6o~I_ Ť7u7nT^Lz|{ٸ>Ǵ· $f̛lsU|yWtV-A0=ml  {21<^z).jmFR~r 0SwRdM@.`g=|JJiuwY³4c.A-3Wuq ni $W4- >}e^0f8o۱6DޭBw/FjLs?W,M :=6V%p1TU3-[ :3-7L&;"PY! j JZ{ޘV8RF0 i1iCsF-*w'6%燍nuM5 S{ڍ8SJ|D30t0GZgI]:94̋Wi|E\ic߽ 25fnTo=v73oSINt?p+j=k).WnyZ/=ȟZruf<{өҲmT;lëhZo0^N">A86hFm1ē̕7[Xa)/tMV'pK Rs GIͅ0YȈI$zXCoiH ~In?R KKm1}L᷼# >c=0ʜ׉'A / 'cWF.͉EQ_ejy N!& !=1YO(FU%!7д&xVLs6[Mq9rj5ׄ?UX"NҁP^M}}䃚"箺'G|y;i*  H8 ,*_U1NͶcdv@crR`B=ڢcq(]J~UbPi.I4*IY>kp|_auewn~S*^bp~t(#kڈ>0Cp- 3f7D(5M}B3}!~g4/6o,iu&Z[|$^|,X5 #@2\|;#&7mviw0HW)AHXk3tNq"C3Huw1dz(]"8(ϋJ͆@%SVds$~sro$tiHޅcϕ髍-f(?1{%Fё̶ye!4g)7kM~+UU KRntW;YnZr[U2:,nI}$)7EhzsJ7Cz?U#Zovca\3&9 "D򒕖EV 3[&g-PR.UD$m/鈱yAs^qpG/BqGOتvcUAoʋ8Shdۆq#8ܧ}j76A:W X#_"1vJ2>5.\Q5.lsT|b ?zw?^o^S" ׶6[|{ݸ' ;> hl:ptU 6kK,ob 9M1_`5k+`JwI8i#汮[7u8aFDw2o/^[87j֝C^8pN_/|H\ -a<58"/iaڰl2*"_{MqAfo&rE5W4LALT: ko`REr4Md M9-9q$i݇N) d6\ {IT{q G8F&Œ=?ƛ;ZvK;j*v99RsZ8NF8^ [ZyP1jjY=U'/{I[Lq(A4;M*jӷ_Q O#wXi"XY6 1~\ozWT~}>6W`7JMzPiR]B ]5/y=&wGۊ)Sn,oX1͂u@)OqCAPM5g> gqS|4$;k7R-9x۬Z=#n>!!)TrFۮƃEvHq><.=bΤoO=lagqAGB:y nYn@<@0K8I>K(]FW tp$L᭵iN談t&X_y8, ϚLXȡ0Z>qgS1 ':gw${dAyU<#E0GC[+ZprG$Y2|v5 (h۸^JMw<'[ym=ԉ4]; /8t69o1f`#GVHd]8xs[~+ڌ,[ˌiVlEۮq.VmR;:F1{sg+nD=o,ʆy?)~cu6(\>,nl 3G6S.ܝ5og!>XD%72Wҭ(*g^֕+8bDV3\G 8%'yzMŴ6!Op \m!cT{ y\Rs'ttx~D/8SzaZ*$9e2!L_kP`6 kcExTH1FN}fh8V㌑0q2g)\aJ%Ly0U2C7&TǀN諳p&<`j 9 ruG[VVTk+Yg8JDΆK}))N` 1 vh7Bds,S|QV18%d@Pyb8oՁ7[t,F4]So 6Cq, 9'n '__Q}}fx?Az7U("̩lź +;l;z=#la6{ft8mMmdix֦8B'CBG><vmz~g-ig#zbK>k5%s7*- vɍw&oBunG#ZưCzʝVnlIAGDC{R@f"ĽC%^<n#:!%u (o!:T6ZmxG7᫺ ,at-ZZ"@&,Y#n^Avӏy8 Џh̕y3I99Q#pq`S)Iֻ5'vid{pz؏sȋ:6xFH f+?cp{k++đςZ!y\~@\!.lۂ"x8*F#`WR( ET-1V u G~&5%Ҟ4(0J8x9#8tԷ4?+9,Zc/xD6n(yGqnLi[o/_7s8bO٥~{my%:u|W-qxqԋmD"ҷ~|qmF~97/E"}ԍS0:͎{G$W"@7j,Tv-ى;QJZاly,z9}nx|_٫GȆn~Hwr1$=kp;86]VkZ^yfr$,^o]Wt۸'ɑ(>:㳮?ug]8>YϺp|㳮?u/u{endstream endobj 48 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 199 /Predictor 15 >> /Filter /FlateDecode /Height 214 /SMask 47 0 R /Subtype /Image /Width 199 /Length 563 >> stream xA 0 տʝ<2jA*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*z*zendstream endobj 49 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 207 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2210 >> stream x[H }7S`vK3c 9[/hJ~_ 84aOyyN "_ygE#׷x=]}B=3)T$?@XP y1ԧS=3@V'8R{ Mx ~R [UTz㕃G ډ<>[~Y[od!w<? -PA إ9?W)PI1+oic6x`nzr ; ^) ,X"D92Vz'Eтm7fE򗃐gęp'Vw[ۚm)/y; ~w P)'6JkK~04).ѣA{O۲DͶ'h7銼(_4dО{2;f.-Z {^yo Re]L{,;^✇wFBm[ TҤa-y;]p9%n0m0,ywo";=qd[\UՠjMV FB2ez{=.TU[lSifZEv 8{{.̭aEeJxV0&qѐ MV',F4pٖq/mTڞR7y^7Cj.h/'ݦ)4aԸ|qeT.̽eڴ}8[iY|,rS2acKiVY,\f9}s]b>YGb5{5?f8te9yL"=.KY^M?>pH+^H%}i M'-{n r~~xiǕ̽O)]W(v~T\ ^өxKwqq9S !JW+f{R܃4f7GއQȀKw1C/!B507:tb^p5pvme 7Qn2\54ekip pӔ)5\MSk p 7Mn2\54ekip pӔ)5\MSk p 7Mn2\54ekip pӔ)5\MSk p 7Mn2\54ekip pwܰ)P¦c.;˅+Kap˳0h0.Fp9K{UgY .i[W^"eǕe8x(Jmta.O5nV"2ʹ7UstatpffEygF)Nr -ڬPgH:ΌsB;:R7`={=@;?bn>%J_ڬ؜{M|g=sQ2,j+V-^؛*j[]?JjPb`S3G˶ \DY*7{؜gV\MNs(;s\#BVu᧍dgw=A;"N7\m# 'ǩ>}(9$,_o* [Bƶ\sKF] 8bhqVoGYYﹻ:KV5 bĹ.+`g-[6eUiߺi@x^cv=ꩴb.`٥[[+Y 87sŠLZ XW"hKnj[9ޘGMRR4<%]3"8f-H+R AxѠ\1PYݯ Xо:_! .,UZi%+sW- 0=.(btr 0}Av/_hA.$7:|FipVŪ[yf=~i? d-4> /Filter /FlateDecode /Height 207 /SMask 49 0 R /Subtype /Image /Width 238 /Length 571 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+q/endstream endobj 51 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 162 /Predictor 15 >> /Filter /FlateDecode /Height 214 /Matte [ 0 0 0 ] /Subtype /Image /Width 162 /Length 5172 >> stream x] w<sۀ/Mi陙7Mod%3*>88888888v@S{Ayt=0 7) 0@t5n`<>vTs-zOG`=TAGBQ!q] H]ǰ_Bt j)q T[}JY7.EOd |[Tÿj/qU^nG.rN.kΨB C~Ubjӻ,2@\t^Ab ~܋dyѦy|&.95!.^(|;!4:*k`c]|.i:_a A\g&< )zATq"\Dw5-UY?]R]ǐ'f9de 0__Gg9Ęh7v5oqvXnT&hS%4׊W1וxX!}!p;X b|l| j|.'@,arALT J*D [fs HR )j,E|*Z]]JSmJ%Du)e:# ŎuwswFr=];@`]naWDUF@ [t:Pifa;VָbˌKaRl%; ~}L.E$0VJhXu-GZz,Ζ1hKdυMx k/ţ!wqL\iq[F-arx4ս\&Ha!JBPy22wA.:C4t>*~C*t8;1tmɎ-rс^CH9G_hb!DhVМ%nX\]!ԌQ!<]'h> ?Pg0P+p :D,MEhp#X/`̖!N.' KU)3Uσ =) f~dˇo]Ъګ h:lg({ޥO%SAaKbn"χ< P@0@l/ߚ-fOǎ[LDf7KXCdV;m1e  AՌaʺ]ToEՈ 6lL,I%~T}udAQSRp} i`CD[>/Cuߋ d/0>8,`d:G& X2yB;v&́!{+DyBCsD ͤUhq@u ^[ѝ-ˊP d +@@HeqG˸pb~!G3gͨFzKNRͤHsr[+ _2d 2L,x5Z2t/̐PjŶׯUo LC dJa؊=(䪀hF,/΅^_r; 6b#QAAuD1ep?#U"\;b<` ^R~n@ 0ȞH%[~ "=z& 43s3 J* "U^i| ;"1Q7Ix18K%T]b!ٸ;ƫdR"^` D^C|^>12K"!1)ى 7t#jʔ,){B,c,3/(=y(VJHjӴx,D$_2 ]?ȄI?T<IDInFc~#)ƀȴgMʻJƁ|5$ZИ0Wb&SA{s?CN tbS] ]gQz&,\*!6үw g4#&H&]*d~@3/>/T4,"Ymf;`6l'ږ!|<#WBjb4u)ĦPe:E .%DeU*rZl w.3e"65%P5BX9P+v љ~ ѩ eԾ!r| +ڵz͜"]%pUFtgec}^^,$?- $=q>+ڃ :1]Η4÷4FT]tD& Wʒ ]eZ%xP |y {brOASTM5"MIA;Lb{i j65&%@*e6]hvC<)fڂ)'4bM1)'MoēD+\ o6<$ehթDG -9 10J2bk^Ŋk s<%Jop3ȧ5·PiR:d0Fi' N$6B2BgB0$xwaAV>b,EQ/04S>zԂnp1&_"IdNdlZpCI4+V _4~*폆&eM0,1uC0 E犢2(<߻JqsFQ[I1I'rZelĥRoh<2]Yٚ{lݟq4Wݶr/0yt2ts#.V io}cy0Y<݋RR"Ibc=ibSJC=Rtst4fSX:XdWhTMrgL=@ %uuQ1Yn.m?ڷՎM%6}YY:]Fpl_:pe`5>|˄ cӞ ")'250|ܥ1qZ4"w 9m%>'D© `Pv LO1(),O*,~G6 IWjGڞBW[ūFLG%(tf /-1ts9DoR6={Wb͂3poUu}Qc;y:Uػf Pc\:A2m&C Fg%}toACf6^_!3ӆXH C ūr' bvsK5Qm} 18U% `>9Rݤf#[סNA):g6bV1*Um1R̵8AρbF+(t:]i'kZGKqNFxbզWs|"AXQFMXjl|OUtQe6hƨȞրwO3;2j ^x=;KoL(+屛4ٗUA #$)Uj ԖYHbߟgY <)-Tif#Ius#"5Caj>OIXQbGx@3;r`a6fvO&-y 9k'eLɣu{VǷYJ17Ğ12DZ_߸m5Dt>Ծ8t=kF{> /Filter /FlateDecode /Height 214 /SMask 51 0 R /Subtype /Image /Width 162 /Length 509 >> stream x1 0 ?؎"e 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s 6'؜`s endstream endobj 53 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 262 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2572 >> stream xډr6 Pے^dYNKHp$^gxP4_Eoϸ!˙/c~/"]c~y*=]`1߈> ܾuYMp\y1\|OX|dfiAyi1_Z,>gif~K=6˒PBDN8駈߂.IYf8.e㨟VqEݥQ DpTv0jxNefBग़CѮjG̱}=ulY%Rm_N} Ů_/f7/86귱 Dy/ ,ję\٪K^K/y 1{oRRې[}j-,0˹I+7PYR4SG}c%Kny`mb%gY#jTʼF0- ).9I X7z`qmyj)Sۢu+$qt1 c*Bm"`zQ}}zL\ܸyVXWz韦 [:m]sLEMBrfzWz&RSqiu٧w^łiЋܑ{{J$ZFru48E5#cΎ;P{PmU+ _t,ǥuo)3|7 a ]ԲVr8wH W[')jW3Sf6rRak(Kx;} \//[ f4/7]м[ MbSfܩ`J9&fV}۹Ѭ\r,pg>{Uz )&HUpo}rѻxm{{Hf.X#]<ſS% G[UUaLC.y󩸮w=W>+|e170qg׸ƍ3k\5q ׸qqk8ø5na\70qg׸ƍ3k\5q ׸qqk8ø5na\70qg׸ƍ3k\5q ׸qqk8ø5na\70qg׸ƍ3k\5q ׸qqk8ø5na\70qg׸ƍ3k\5q ׸qqk8ø5na-n}>잍IvL޶N}1?ܥpGbneEQ-d73zgN\RULҜ3 Rr &6ɕ֥eP{|x6")Ab&u}wTG$4D{H>cNiB{_9bJTLiU^Dy]E].w/e!2Õ2~ĕqXp`ԯߧʥsYCWYy be~)!}&w_?8T?JkW T$endstream endobj 54 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 262 /SMask 53 0 R /Subtype /Image /Width 238 /Length 716 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DXendstream endobj 55 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 195 /Predictor 15 >> /Filter /FlateDecode /Height 281 /Matte [ 0 0 0 ] /Subtype /Image /Width 195 /Length 9386 >> stream x]z<d'y⊨*%9f鞶-,Q8w0;^~xa? 0hڏzfs\e7`ȋKƭC|=0.30>nQ\?\CiȍScbq_>K ^@.C3 7iM|2i" qc-a8 <M <'1xȅ1"׈BJ0Q˩ćS ysWc\7>s a\b}9,qC,bp,-@薱,A5>`:,|ݏÈ|>a&^R{Cq 0t_V1X1pliʕ2y8Ea%d'Sr0Z6ZIc8"~±1k_nȈUVpekmcA(U>V܁(~Dg:Š <<.6o-8v# @Li IK/۸ 0p7;!A=p\0|~oITCk*]V1 ي@,&;1L1[:I-q-*:tܘZ. Kppc&" (,džC_$avh  9ba,`bӔRLvH.E"ho0^sH%_C_JFgΛ"@H%u_ي-(XK1C={xf0`а8tp4؀2.HJӻur-zhBL &ĬBE"ٌZDy8b4t7O/e~sgH`c=1'˽00Ca0X-Rs8/6a YJ ROV,Qq+ jLm;c 1(kU׸oF_ kV ޡ&}?nz<<4x׮\Euc .!yDA=IX+gđRf&uK:1>xEjXvA؇ _yP1]nÅ_hxA~V+eS$ lVj8dDp-M#$Zok*9h.KhN3+ÆCF#쫤ƐNeږ9L 9y?Fy\*A͜68CJ%`➎,DZh6h7 A:CiabX8)Uɥ%^0 G!m"jXXPH$@B-@Qw` $8&m~a0nu/rL=Y ho9 ER ~C m0Cm+{]!,9Wޅ6׿6cpfg$pSTdlnk%_zleO!=hsϟe(XI̤I,^C`@AJa8ؘGt-^(7ʓN'R-Vl(56fX ]aLODϙ6CTrh[Fa?qb^Q}`+Hc.]zVr1H$ _Dq.yd/^3-I\2 ȭZ؞Tm8YCĕhyf9Z 4;ˇ>?>i RĆQ0U%:WVlþnK Kᇼ/j[1e"kii(ڐߛ}H)#ˆD%a4ӞyB+?)krŦB18R3NPTKVD-ͫ@}K}FofK2w%&#4fkiLoh 5 ˞F$ΒTtv6Ů=<9`.StN.Y<,*5C`{]ލ@C5YKvLLXi|p@HqǡT.EEctxs^Z>6\ ۇ10p*)R"Ls}WP:p; bFI묹!]6u YQ~ҽi_G[Zq4Vnsps &7=W2s\c_=a,v`WA\$"nsYa.E" RBG6(':ΰrefvt||Sv.LաXϷ'52jB=I ].Dw 8mF(E1jMWti< x򧷈4RN?Q%Eɯteʟ^#\5? ṵp.ȜT(և9?OlQ7SM UgKqM[g%II{m}}v]}|}J7˼]0W {y8?F l³]zI/} ]+2>V..[& %4^+ܕJ@H6Nn djcx@ϻ9X^dYztj~Uq vg0xu4LZs/5`ibqn0J]=I5gv\M ;y2X vRD"$V][sT,Gv߱;d?QrM,5b~aH$3*g/u7ܜIM QOĘstfsf8\CX7o'朆Նt xc9 حEnL'O,8ѻ!`b$!Ob@qbܘ\UZHXCicMpBj̰ɔ3r'g1;7>=9^`hK kƊ{"u?xBV(ePibAe-l\91=J0TvVz*O Hֈ@ɓ~ C:A> fʈb# jhԞWg0f[e_x%aƀ4".@*Q$LrwbXұzg/E;X Ť.:ޠF (./C!݇[Zd4uUv* xQ\lXKў$iYcc5)#:O3KQf01_jkn:1kXń*[Ka!/z#$uO3'PO1p-U@ZGAsP5(!S5hm !2#ab_%;yejLS,-ۏ핏,gF RQDc߳Gx \VV{^{w'1q5㳔bRÑ%thm}:+K@7=ׄi 4^3RVyӜН᜜A_.vs3-dibhlhdJ5?jB4!HFmMΗJ0\W0A0ϚiUrnR~kO`ck=iDeAU R_؃$B*oq fM9"ck^IOkQ`ݑifq]]/b8!eإcQ1GVמf_ߚH qx]tGԪn9)~Rŕ7bWa@e1lMܹ+3^s߶Pv @@`3?77eՕo|U==*u:[ךH[-;qcHuO ?TXc%pU`HrdZz cCॎ_%MǫiJ 誥ꮠ[cO?Փl[.}|+*K支d^k,9g_MS4=*trgzqXcۆa[s,"ظ@c}Q& $۴5`b>C9ؒ.zy~bYOIirIh"ֲL*(R0?a}"oci`Dbj,*,Ci$E^M)xyђӪ${DX+4>~'# t쌛|gVvIP:~ܮd0-EԕeLZM FԘGLOR<ƀ#t2igokI~1èWc[` IFkhϡmQWbAY/ag.tOŌҢʞd`qk{{ǎOUA2S+z@ZĽ ~8A%j#\ܾmH5KF۶=>M( ) N0xO>Lļ "Xda>\7hڻxl3 浺-\yAfWBSJx} |X}[Ծ3ic"bt*44bh~Ǫs/EGF,H̡1Ă(0?NSa(/(7h _ 9 oʈd>ֆH|*̮$ Mn0 G<4ddYD46_dEE)jR6(;^=U+ ''m#!13{U~_hNx1 `SSTB\4P-P}yLka^٬.xFԳ凩0I2e' (NyM6bXC()hʩyj~8v v{ wKhz^ŭM!Hn氾h(3kW[*Y2^(psђf yB b^ޥ90lp0q쓑fJ=Hi/Pf[pI8bP, z,0^L4Mi<^#qeUHHYG[dWk3"S>B(7%ddZ<@eLZJ?p<SJH|t0cC`n@iek.LqmBe<5 ӑ1ӠH|8kˇS|=e6U RKT+DJ,H82Cu2>42>)vƳ|akY$]؂م\"2,bKEHOy0.:4|bo 3Qa].YM9b nFCbD+`mE";WvĀaYvt[G}fz:gɊ\r͋jmМ QkG߷}QxCApO.j걃d^b#RIAs(˖˶ti!#?t,L˫w*xEM#oP"06Cw1q|Cф\Z sئ*ʹ\Y!&fC?Nd'M 96"^?ׁjZPM>%0z4t26s Ti4:M yvS{;RUz'@&T2q:̽w Tqd?yXOY]9o[OY Uo!um=c0_yձi ߺw<okpƭK fvAsQT1;=m (Eځa6߇ 0;^~xa? w0,'endstream endobj 56 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 195 /Predictor 15 >> /Filter /FlateDecode /Height 281 /SMask 55 0 R /Subtype /Image /Width 195 /Length 731 >> stream xA 0 տʝ<2 N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$N$Nendstream endobj 57 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 335 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2681 >> stream xډrPR*Z\Md٩,np.xRIϻh48䏪ōo?ok5/O>}O]͛of~"/yN ӧ/%wJ W7=˧jǔAz߀xvQ)VꬿWITs~v=/~ FŵNV$K/ݢ\L?="fS 1ţkÚYYodͬY;X+!3gOC4#LQ풝8 8Vr͵,]zFxѼhgj{M+]>}6ּоzK›XkquVؒ2|xQ ~ݿi#iQ⣽|wPlZmn30ޛ>a D)¶1u Ƨ|wS+}\JrnUB2&m[^g[P+ Woo623d[!ۀj0 lnrm%ڐmTk + anV)FŘ:Duxn۶|m H1} eGx6w8nΕ;(l5\zIIr~5Fio.ڬ(|:&%޶ip|xWNe1R2Ho|^F+*/]!kĻp-K]^\  T~À|>ƻl=Vk+KxθB+w{ x1(s%ܲV\,^p8:k7@/繆[εKsa:Kk+uq1}!^Opo^^ַ>%w~^.qàTz yW)px]. .rtm^K7xMCHWCrflWpxWv ]sYK{{㶽F꾌^@pxw=r喥k&aM)7/q/Thnoٸ071tV%?pW{GvCL*#%p=TԚnxgJfM^v.vdʕw W^jmĝBܢ0 n!\mW\ifr}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}%\E.,r%gK.>\rYK"\r}r?{^=u=i3[š|>wD3ZMN,+ =/q?[4m=4R;/gwp1t{:Wӽ*(W]6^nUeNcʨjnD3߹ W½v=ҽsqx .DGb0=ܦŵQ,\{JCp-øreeyUTW2OGJ/w6pdJ|>@7 gww"pm^х~t7U[ܼfLa+ѵyʢWp+}tna\y͈ݬu8^zYֽsuİG7 f^Y eP5\ˋIg[cuo˝+-+F +wW̩ۈR^6dܺ쵝g[spzޜ\˰ruұܠ: 7q-^L+T%w#\Ѷc9/ŋӋȥ״o wM,^y͗xNmTױlI^77#WϺ_VE'9qWS]%ݓ6밒y5`6yK{Jcyx[)T"ӆN=ۧ;gZ=';ӧy NOZ1#ø#_endstream endobj 58 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 335 /SMask 57 0 R /Subtype /Image /Width 238 /Length 907 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+qPendstream endobj 59 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 172 /Predictor 15 >> /Filter /FlateDecode /Height 348 /Matte [ 0 0 0 ] /Subtype /Image /Width 172 /Length 9413 >> stream x]: d~Ĥs{o(GInG~j쫶_~j쫶_9v߾*_]Fz[_9Lg:vA^'/Wq9~1"BYOzr @@[ZX<$4+mXW^dg!~WlnӶvvu)ݹ)n9):}m&u]J)pvA[p hG!0_.Z/m!6˚J,~(mivsd=6kHv6u7`CuΨza{11>炝+e[A/ƙ7Jv(lv_3+4X~jAo`qkuVkAXm 狟 Ooc0@JMv+A)~q^ .m#rLx1hWtEn$ؕ`=]ڇPJYFe.RPHk Qea6f*-l8k26V* N4tҩ>s spf/}6ª:AZ_޾8&ɘ:bHgK{.2PvGF./+<3CqQ2;^N)PP[4rl5a16Z ֻ摓`'؎l2_K!=+hvgםl-xXr]#J(13``.h53#hO;&yxsk'G Lr7vOFbf{_C^Ukyi>lH'ow `$K՘8f,0s)lcPU# *,?i-b:EqTK9Ԝwȵ`+}A0ɕ[W4p"8* 1å=#>+&]tWC&8N­ڄBb .p#39o)Pd=z;}Pk`h6[\r"SF8&|ЕjiVSdp>N0 ]w5ٖky 2N[$j( {RJe _y-"uom nnAzcINRÒHi]PA {Lr16˘QW03|FE(-QvghE#4E5's6-32 !³%`ntW1[ð<ҝU%R(,i퓴d* ZI.;V'_%4>LmPS͉_g2` UAT#zfR#b-p?[l``$k8 Oⅴ [sf.25^ޚZa6Zc>I`92^F V<̓O΄*{Yx n8g削9r%Y܏m?uN>xXS7#5["KBW2k5QjbB=i<)O VUbAS``QP8%3AgǗQhճ,dT\*2J. |Rv`'?Eo&D\Abq,QD},:h<#"wS]d3Q]⑏JI2fG>2;HbWSXPV߂I(R|[Xފ1GO _-ct-[XD} keđ.13L1aV{.d'/ݓ}1k\q F]YZ2U@^V PDA={OEq@[&`cv`rS)YFD-vJ- }uPEi2CC q[#&(>_FD(М2D;Jr4]5.^"X0.b J9kZ:A\c+ԳZ΢"!B=dEO.N.aJ8"x{ץаuCs,%,gHae\,fUnư$w0e=N-}I f85=A*^EoPK EW(I{#(|Z}w XdMٝG[eGnkOJ.} +GOI\HV`? ֏aMKѳ%ө/9rѓ-8H4[ЀF$r|v,SdC&zYV@BNڋ@-xcy;ɂZxm[jCk3M`KY,NU(Kjqĝ 8 ` "N`eP~X.RA ;͂#k;mi((rUF!4zd@*~ԡ kT'3g2nsЉ 6:9TEYQk$~r(Κ & #>L";%c5ܛ HJD&SZs|+K*2LJgEr5 Gd`/"Q,=L QPW@KŇbar #XKt֌6@=Y>ʵio  HGAfק͏l @c%`rz M9(|HVKS$kA#_{9|GBoҁu01+:ˬ"hO]Vh-lV?eh\& !N[ dR줮g]/ _Z$(B%ZGfr1ut ؊`[VZ鲂!Pkas;W.#ju+?~`(Xt}v8{jCx机<܎EE5ŏYG+;,0qlO~u5%k0<L%DҟEԡ݇(V{$02^]:yu 'nz7QNa29!q*_~O3BW\˞y,sMsεW}v?O~;r~FBm'qW+21\@㪡ݝ{-9zh5X;o;#| ogo.\}<2_OI7Atx/~ӘO|RelBJ?7{!;2Nu|/+h% pRrwڊDO鵫wJ , cڒzT93=X:D0fSMK9M,"%5C.; tOE|} :g6J߫hQܔgGݽ1ko\B VT=B+3:PK[a +SX 9SpȘިE^dqW]GE7Z9'F$K'dQ$-EJlY7!MXH6x4%` _s>TPVרm99ءOe҈9szRF- {/sZ˥맔g&Wwu;YܑLR8iXM>Ȱ j}l R?vJUg4v<\ȤqQhſlL•Tzi XWN% Oo_D=yi hG_KskJGBsv]k `$J&if o^JŻq5 6QO\^k$U2UcE|i@}#oy˞`;hUZ&G2`^T$\2Ď3XG* c ғx)CQ8n)uͯ.pwWDO E Mu2ԏVݨx˴ɀ*Ro, rړ!lYFJ `[$_vZN%mYHc ڽ64:;S2A(`ۊ>cF"f(qdЍXE'-zAS풏 A}X!sZj仃B2T].k7\ O{u`]hm,}EBFgXikiazw[^%+2Lw`M*Bx`kzcKHaKpZ.2nRأBHd =qV-6Z@#'iO8XgNZ|0nANW{478XZc ߾f:kћ^VEi& X# E^xQB8O/-=ks }Gr}b0mXPʚT|'ƝszsE?Dz+],ax7}4x;|z穉cTs&u{l:<12AJL?tR< d+NC qLڎ};|80ֻ6 k{|mьc厕&Gkr:^%U}2sG{'Z5,dx VC-`RG=\=TMm[.Cw:M*38ݥ0b}?^ޟKϗҴ עZe5p t纗lqc\*3~ .>ZYzhokzTyK4LTuZ`'CX\ئ!C+**^`1.x k2dғv*,P*,9l=P7o-*t"mK~ r)yaȊv`GFPz}4;qECP=DÁo$ rԋXֲdM˖3o3ϐ"se-uC k%~Dl/`'֛e4>K {h}7޹w%y %4kRkL1<G8fEbh"yAZԌqEޤ O{ؐR]w/yĨ'@Pŵ<Ė[b'}oVDaxV|P(I"uTJѱy7OXlrVa 5zWN j܃ Wq8CgJ1@ua`֙۰k \i̱]V,=fQU,~C='/3w™ug(܂p@`]QAku{Ɇ(yȎIu}-OeЙ.\2W~A3>?ԬzⒺ!2Ɇ憻JO@W~{} J1sTr/Fy7gu(lͲ\e֊"ǩFf'Ro`(`l93Č_5_0gUAȟm$]Dh=_=؏5\XSŞM12ZES)UT݁3cBO&UBXT Tq`_[$;ƒN[@lJj$T"SiȘ PU*~y FL$ݖMJ<jt!XF5Qg$(BY.?_m$}.SEXWB=Jq ,7Us30}W~ߔ>uB.C$O껳£zbOc=5/,gGϱ#'_~j쫶_~j쫶_~j_ayendstream endobj 60 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 172 /Predictor 15 >> /Filter /FlateDecode /Height 348 /SMask 59 0 R /Subtype /Image /Width 172 /Length 680 >> stream x10 0L|ogL d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  d2L@&  ]endstream endobj 61 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 235 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2509 >> stream x钣FAάtvth99lU~ 76oooo_'qpk^W~oi֯y<X({w_HE9.(7X`_M^|C~_ M7_>7XQ!/k )4pzyxZkEq8⋏:nE\% -#\Q챥yKҫ< еڠh(6Bw3 B [XEB_ "xWgާd*G IqѦ.j,Kc$F+K֪ E?&DZ rl޶ ۓx$F6n )BH4"Ϥ%\.EcUa"eh}GށnޞNu]<Ɔ,-y#4o~cRoSiq:+ .64V,D& DxD ̭c)D$_ƹk+Q6]YmFvmamhhnQ[1%QNxѮ0W5 1sHUOō {/s?l \)Y(뒜qnW}sk1.inQ{Q{9". ОYܦҥ ݹ.giw4͉#7(S3^^mV̭J+O89Ջ6+B%"067؋".Jq5˨d,yzKk([qbV\K{9{ddP2E*t{jx;FY5;.m4k.w 43li7Oá,k>#ix?x9y S̵ylЫn ԮMݟ>f&(.v6ȟ>*֜¼SiIdsHqwҘCkV ? 2v.jWW=4fw`g^nUdIkw W* ݆yWkaq7a9:nr\u4帎iqqӔ:)u\MS븎q7M9:nr\u4帎iqqӔ:)u\MS븎q7M9:nr\u4帎iqqӔ:)u\MS븎q7M9:nr\u4帎iqqӔ:)u\MS븎q7M9=ݽE޷jO07n܉a|~ϸk >МݶaNf\+n'w7JqkyR}+¼Gbpw7SnE \Wnpmp˺{ m_i^ps,ѫ`t]lm[kU;-riQ*ŵI4wӫt٩.2tbՉ(.H٘GdNuJZpG{E%RpJoHbNJW`}}&f:$yN>Ӿ<D.68XUFB8fl %,r܍^A;:wn#5gCJ0jC~EsZG7{u檽`_~}jXs6۠++~ &.vz{Bk|^ay|[lĴRtL{F>}<.&;}ݏ4xo#ʪ*7_3endstream endobj 62 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 235 /SMask 61 0 R /Subtype /Image /Width 238 /Length 645 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2+>endstream endobj 63 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 185 /Predictor 15 >> /Filter /FlateDecode /Height 248 /Matte [ 0 0 0 ] /Subtype /Image /Width 185 /Length 6494 >> stream x]:Lҳ/VU&OHH9M;IBl[<}"/?"/? 䁢p[6򮕭uM2ǍM/pw&kC^.yē_ofB^za/6kW t<כVm^i* kg:@Er~ {$+s Xrt 3rW}q:8NuO"7ke͇7ԑ蔧C.67?3siFwGDU:0Ї& Ox|^zQz7Sʒ8#?C٢aY}̸*Y\UWuGȋ, 4ʼ>f 4gӫȩxܙ:gG.cq ;m"uN<|LS>MrÛ!SqGQϚz}#+ΔђBTe}V.d áBO3gr郃΄#u"?NJqq%ȔCK'sh.ȫIO0e}9ϸ翊 vjD!":#w\ -s'OfC])}ȿ \FcKyb֑&-dHÊ\گٺxx+5#O1(W\Lpt%[hԖfB#/D1wWhenu$n.j2nSy}U5+yאH5,:|,A[i}~Gꍪ 6X ?o5 Ԗwڧ`h82{{ d>ke3-&o_9&iz0:$h <ݴe-=w[Eȧ gsW'6!G;=4U=i8PD9F|rrkS<+ _lm+kl/|NCN!},u*ősTӒ$tCF)xV e5UKPˣ{UHCy(.x㯂28o9̈́)N뢎=xRX^fYJlZNp=젏skqgJ9[(e}+<(x01kEK$zAL}߽D 3yS?"qќ^/?*n_c/$GEarĤqWQq~. s.yS6-pw[,xn ze _ywDcG5|kx%N\e}uնnaqkw_O;a4Cg\dS45Mi=se_ ?o<"'Z !6kJ$woehL&DMjGh~7ю >[Ш&N4< Pl>:ob1b$W:,+\W:e]"Eʫeq^ITLu:w^]V cYJs!{dBxe$& (:Y yFpr NJhBNE;H=^ ,^"!~~/^t X"iӜDnBjBrѿ7_k< pp"&q(R0X518+_q)q^9%֩NYV+C;*Me=1EyFgP?\S+*+^W ^)#v-"ShNS)-1rIW%**h_8]^3B;'uN"S53VmY ڀ|V,s!p;lVPm4$>)sKӌ,o@~ƾJ؁[R2,#=41ܓ3y6?7Wme>@Y2sdrN3@aMz.BRj~rx84%F0_U*L;W8+$)(~h 6|ddBuۂT^An[fM2gI%i ?i%j`}NM`KE!gZ h]{#GStd%W1XR˧3cK$[YH,˾POWQR BJCn r9WH;2!GS4ܑ҉դeSQHiO-l!c -!NQL'z9\*/N[`d?bZU/)zˉNxb9S$+A%;3diZAD$$膛AP y C |F~r-y=}I #5]g :PAN}}eo!I=QuoE٩#>Ɋ]EG-X w8.a n=|Y[q-y M "8ʯ琥ŗd\zE~~? |I;]wub+3\cw$~qqiF}]3ɉz =l ,+m ybȉ;G_hB HIʖ|g&9r 7E+#G}7/׆F7aN@/݋CU6vЕȻ7}G+ Q3!\f&K /i[wrz. :T >`Y^$ /0r8n[i J/O=3AR*)_UL3QD 9~V.hI'B-K!ýUU盐Ýe:GnBأ]t~#*CE,6xk#8 Q"LJ y.n1SVPJ}žo+֦bQRrT]꾎C$uD[Gi!w.%`bKF=͓6"wzXrJ/랬G^ $'\O'nןol߄Ov1 2?cgLt@ "ҌXP<.ơC3ɤWbL8+ %+}Uf&˻dMI?i:kn**k\84 9l=SʼAlւ y3. 8YxΛaEkZ.w@~cTgT$gȼ#l =i.ŏ@ާR.0lOQ%ϰ=N_[Fc*OKy]&YFi=_4t(yuy !£˙^OK)ɣq6g–*{#q|H'?kf#j!JY=[n$*i琶^ې4s\&YnJ[xnq1{fZ<_12?J,QYǢIc csru(4BUc[;F*JLB/K)b4@ylyڌ _)'z:f`W][uƋk>.esb(_R5&f> Cȩ毸{pmZE%*\[k{7?A?`Ef9eHzO!NuL'"AXPRTTD$|~҃?Ei=6nSƉ0OX#DB踴+6dޖ6-`vp|qwgaWCYIxgJ"! OFQ(!vEVNvlZMv=JHxjJR6+ii- 9pvx>}.B{ZJ[Zh3E`LzKސ~cW# $"C.tVu]LoZ?|a\a$nGQ$pbX'lpXh\EIG@Ma?bYAw,<-brB-A8UP ].VESl~BM,G8bym(!= a4NM钏Jᢔs}3(Z[V_p!w^nْKIi5 }11P["K b MEQEӚ+]b$Dׁ˲K&rb j\9QPu]arYx/:"P! |4ʷ$ܻ .-ˏ"ˠMUzp\gK#H~ ڃ]|MEN<)=]*G /TI+@~cGCvIȷ?"/?"/?"EURendstream endobj 64 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 185 /Predictor 15 >> /Filter /FlateDecode /Height 248 /SMask 63 0 R /Subtype /Image /Width 185 /Length 617 >> stream x1 0 ?QWU> /Filter /FlateDecode /Height 207 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2213 >> stream xrJD%T.4UՒ퉙8[ LYʳ*5nw bN96m?Aeq̛'yy/yލOy`kܸ$POVB_r-Fk-o}7ϼϸl?׫xYVr6F2 v})AJ/6GK}eh:DM, K\#'@+=y[/X)/g66bյm.L\H{mmȔ CϐmzY V&mE+c"V` ܆<y ĥ-ҨQA0nԶ {+UEe&IkKy/7p* -2&mz5t]hAV\Nq1^^ ny (|EUVNV?ݠ ;o5\r읙 _)Jwsf -n!vGdo]"Ű qo/*{J g+u˗ wJ 5WFlYUŞnMGhŵYch[JJquYyrW\I]~IV08fKހ\nq̒73yy&aH?l@l*rJuq%yͽժ|)qGTڊF*[LUzJ?Xn+0<0[:DҌ3:FڷvGC̏$̡.UP}HJLp̷]k!w=:nḎi:븎f8:nḎi:븎f8:nḎi:븎f8:nḎi:븎f8:nḎi:븎f8:nḎi:븎f8:nḎi:븎f8=7Cn {k=W3gxb>-ޣ-C\1Mpoey+K-7ܿGZH;MZelW>lW~pwȺv,m{3[&6awO6gQXNpUᶢVKEvv"myWRwhiB$]6u,]W?jZppZuV4*iIݶQp "yS7Pn~Y#yx+"݋D\C7C nVySE^ne*+̑tqg= 8Ս^JmfVt3"a;6QQqӺr% X,l=6sUI&J˞ CBjB^B^V'9> /Filter /FlateDecode /Height 207 /SMask 65 0 R /Subtype /Image /Width 238 /Length 571 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+q/endstream endobj 67 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 201 /Predictor 15 >> /Filter /FlateDecode /Height 214 /Matte [ 0 0 0 ] /Subtype /Image /Width 201 /Length 5547 >> stream x] w ;ֳ`4N{6-3@0 U-f?$|^Cy?$|^CyH/}m/d>$;/rKBf84fWlMK\w4C VoPW&$h^7hmFӓpes$Ob2 KV .? b][_VU66&xK`e6yAHfkcP^D&f3fH S/ !a"yu{hz1&ڬML*aSe|1&ڔC$4Az,%h3uƌ2ئwc0!9SϠxC :8o^47 А/ Hh}^J広#[33}rӊt:|'RVs'mazr<}/ؔU T,2BcH꠼ FSx_EJlz7Yoўܑ(O#QGq4]Uwi'Ł(}/lH̾GrLބd9\ "5PIPb*S}QwE򪢼d/C bTG ˼Rx=dy"/\!Ъ +}Ԝݑ|>ٕ@"ahjMX>%l&7i. f0֜x6${]H!]KFEGg"BE1c.]jFWdH94:X\-,C!,тdIE2e+#$Ņ^"iN MB!7s,khHf"R"Jw4'MZWɗLwAB@DµHd$.ݯZ\ً*hD.HU(kݘ>6'k$~Hqrk{x*H WR֖v$#Ysēz T>gir=yv2 G<9r_PdP4#U|r T廆İVѐ|Ξ4Uϧ>`rip(]5/ӍG$sC"bHbd jg 65_kr~u"}gR̂1)VСX,0ot[V9B %b KULAܓF̖eaDJNvWCr^}UZ [54< \TTI>X؋lH:#@lA0 DxMxԻ fͣ߾I'^O-=pJo݅6׼/u҇rr%B\ָj"{t9G,QhMEiWH"QI,73([l^"z")XcH)h=HZrz+(͵)0]32u}Wf'W!*BcxjH?Þ0NĎۢi^}ilWMR%OˁT[:RX#fXa&:oOHJ^pHw$a*Cv ߑWy9nl66> D?,nG5LM$|F)Z$`' xpsEnl#[rUluUv9nF^.3o Ao! 6qYELr 'xO>§c"<#)x2Q`)"Qlg|(o{^V4~۔XK̮LOʔ :¼49ti{ ɮ,]+'нH <ŀHq p X̕~${+)evsɮHF[܆iJH;R*$tn $$\I]ܹrdם5P !FߔB"~:rP`1 I 1jv[]u1|k3^2 {~:W([amғ0aOw>{q뽕8$ixjbl@` 9(HJ\@H|@XimAw!xYܞȀ "'o!);?  .z+돫o>,waAFw`nR/#~ۨ,G.w}坧 YTy'R?-X$L#5.$mշ[HQ/Yr GBVF$1kJӋ䇾޹Gɵn9 މk S>mu>mZ媩 C S){rx^ܥ9APϨٸ' UXX/?bxScЌ _ ]A/"U=,2"^9ucбa)qb3N&H'j BUb0g 2"y"b:i{M<8W~;8 h;|ꮄ| D$cQ =BI }1LR>}u"pQwml}Q).Lm*#HHf+Hԅ8ܯ_F\B$Jd/@|ٜ|ʼnKg3뮠NEym8֬ Dj*%Qz”pfߌʮ<ʍndi{h8c4;^9 ^22qfI͢lwgAz9 vҧ3cbH&#AύS/iDh=Dm9R!a)HٵBk$ʱJ$ #S&xlh<)紥2nr"+ *xC{G=cը&="&*;PLGadɩ`-W2 $pdljhWtahc2H`4eNPY!:OlDQ`$H0P Vqw(VwO?WO~P?V 69&,3˟6(d yXoĖZ," c/5>ֿb6 CD:+Xg1BOxN8gH&FD=4NvSf߆FwZ zR x}DqJdZ$Go _J\B=ɿaE0 ex8Z ¨q􀊽+h˕ήw$h#4 _&[V+kOoן v3k]jh%.4mԖP v>vB/ϸrWwm##s+,t!@7 $I&yry]CCtF.CJ)őv7%ؤmu'C)Et¢Jeؤ;BqhŶ}Z~~:U6 1 }&>g_tW-Ц*!D,E&N_tW-p 33.Q ('dA j4xRlч SPwQf;i28xҋRlчE`#{8q RO(lRlM]?˵Ĩ'_b>xb\Y*LC,riڎ>t[H옵8&|b0>%R )%/JEu9+l'1WuRpEbǓن:$sEsV2s3ď~4$(Fx~} :W|e`u<}Y}Q<:8XdNǭ[[+\{.&_hod瞀]f8nɵ[_14~`#bg=o7pSFb*AxӦ #!o;t`E"ӊ!G6{"l _+jq μE&gWC,9uɸۏtwG`Þܕ!|.q݆@savJ:i}v{g2fBR}7~(B"1=H!oW)أ<q烶ճ6!"yoCy?$|^Cy?$|^Cy$vYendstream endobj 68 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 201 /Predictor 15 >> /Filter /FlateDecode /Height 214 /SMask 67 0 R /Subtype /Image /Width 201 /Length 563 >> stream xA 0 տr ̃lpxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx endstream endobj 69 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 235 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 2442 >> stream xrH ` f`ϑ;nmę[]Z1yWEq3 o?9ǯߋ/Y?ݻx<XzwH%9R ߧ7,`zI>%!?D i&w/^\BG %W4%Y+|+6efI%yEIX%HNֳK$/ 7>]v7]Zff7Z{y~~vPLTbZnGHiR Fk2%(JZ)ijB3}}t"b(ػb՘+޾zִ$՛~K-(LJ9 GXxqF=ܞd(۔BF-I77EFr|KFo^`jZɇ`&1O\s lEm)Z[+Yc98^`[zQ%[ UUbVU@m۶yZ7_VYVrXF8+ܮe^yŕ#ȴ6 t6wU ejkI6#Vg煮\$\URJNJY=s@97Eί\I.KFTl\.aŋr޾| ]jyf̪+5qCZ 킋7X)!W*8ˬyg.[٭x;Ary&K==u8ʝQpbf+.+.ϡr0aRX$ʗVnnƟtR >pS_5x+Itjd5%=qg\WһŞUW ײ3n=\)f:7psf8׹M3\us :׹is4ùun\:7psf8׹M3\us :׹is4ùun\:7psf8׹M3\us :׹is4ùun\:7psf8׹M3\us :׹is4ùun\:7pͽ߽R3n;<2Nqٻe-Q;ᢘ;rknUmU.fx,^nCNUUU.U喁Q{OVIť,U}KY%c!](ghڬJѐ&.%ECkoS5Zlͨfi^EwۭY.+|jN>ClZ7p5R͉G,fi]&N]Z5sX]jkZ2fInಚi*W{Jm!Ŋ%'z-Y6*^Erq׵>3o(DeIn=;iz1jCY~HnK-~aP-\K/^oUٛw ㎱"*.˳޺{霚w֩RRX^^)gj9U!)pв[x]%wŕ±´*κ+qm1}$wer.Ψgr:-)WڵD^FK~a&#hn+A[J~a`,ж OZȋ~ YkAǥ_g"ܮC>7nھ/(OxPƛUj- i}snYJ~OJ ~`9czjvv ^)(gN( dm[VbԾ>~2%.G=5RȢ%wnh?=z5Bfzz"6C5- !I`b7? ]rK[ڍu@U|q9DLV}Y:_nM0J5nVer%#{6vٵQ"^QUX-JFj;#ARˑ݀~𹕴{읟"kξ~2VBdk~} 8Dy<}nE5>m(eOL+i߽>θA8cӗb~6~Lժ*#nendstream endobj 70 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 235 /SMask 69 0 R /Subtype /Image /Width 238 /Length 645 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2+>endstream endobj 71 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /Filter /DCTDecode /Height 248 /Subtype /Image /Width 187 /Length 1791 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNK " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz?((((((((((((()qIEbR4.M#QEQEQEMZ>\Fڥ[s %L ^xsEQEQETZ()N@=AҘP ՘P`T+Pl?zUIEQEQE _S {ԑ Z5H*O7`Pӌ7@(((NE(ԑAV>v$QO'h =&"EE$<LQEQEPjx-ڝ$GJi6UCqQ7>Yiv6zDIſX((75jA$!3l1Q`iE nj J_)faH)DG5)YiQEQV-+ ӎas֑QwZ\Sԩ2HE:cH3gz|M#Qp!EQEةwcKS|Ú %4;zI>cJdnXJ((3V"B)BJc"҈ɥ1MjX-O*FMQEQEֵ+ &R*n m4 ][V҉ \T_f"DR`t5PEQEQNCZ6ja0Aiu8|lfy+PH#j٪GQEQE d SJ'4yMh>rGzi'y֤34AEQEQJjAJosKp!4I *T)joiSmIHА*"1IEQE=naZn6tLJxv\*-"T@1ҢHQv*M c +T&\5EEQE^Mi)W9x'](p(L7 SkVeb)Vau5 QE 9vs֝IsII掴טUY_&Z{]ޙJ$w}&Yטm2M2((G4R4'&M;4yFniȠ4EQEQEV#}c߳SVS*#TLUSEQEQEQE>?ZPn@U+H͑PUU֊(((U85n+ SӃNWvs֕:ǽKVU QEQEQEQ\2i2irhɧsb|QEQEQEQJ4J`4 Jmϥ7>4V縩* gQEQEQE4xVQ+.iRf1N(y*iB_TwIEQEQEQNkB OZ Ӗb} *]օzӋsPܸT%QEQEQE.FHXѸӖB K皍-QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEendstream endobj 72 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 187 /Predictor 15 >> /Filter /FlateDecode /Height 248 /SMask 71 0 R /Subtype /Image /Width 187 /Length 617 >> stream x1 0 ?aUU<(f;cC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814q chC814~endstream endobj 73 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 104 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 1227 >> stream xms0-8a{wLG;'{:n7wWa{WT|Ǥ}p&6__ilί XX`7 .s|n2)%u >%@V*zdUJ`W:lgƬ5nbHZOڪыn ]ǂohiY]aY0yy2`6VU wA;@Fl]k㒻 sg\dk'\Tѹ#4xJonjewq05*ٵw.DF?aPwU{nOlpM Rmwg\-iy}!Z\m+G$Vs~G,_tZ\޻hboYRΨ1^fZ&s?}NT:mFd/㲽|Ue\0/WIrЗ{zxa'U3;qtMV13n? []6^Yw8^AAVq.p|.֢S1.TWX w;nup3wϸZg?ĕb.x nU[p N܂[pu܂S\*T-W n-:Up nթ[p N܂[pu~ |e/fwoיM2$u\fa F/hՁ{%I?Ʀ\Um)=MU #܁K5hJ Ms(NFWsǭY.4fygGNxRjim M@,9%6#"ߥs<+؆l\Rat[הxo7BGxKK>N;/O݊sQb'%lI[/Zy%i`Os܌snpkFRƑ <rl;JEU:k(c<= )01&Fy 5j=#~C$.KN~47CFvYDdnE,K!pL4Z9a\qk ic=h36Ju {Wlz%8QrpbN{@2a#j˹9@dz$11zzb2c\QH[DeL/ߨv fVq\ 4endstream endobj 74 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 104 /SMask 73 0 R /Subtype /Image /Width 238 /Length 301 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"iendstream endobj 75 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /Filter /DCTDecode /Height 114 /Subtype /Image /Width 188 /Length 920 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNK r" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz?(((((((((((((((Q]RQEQEQEQJBHIJz})J>$>ARa'4QEQEQEf75i#oJ MdPi|i|TPi\ N)275QEQEQE[8aZ~jr*O[瞴zf LJ͔婔QEQEQE$oԍ9#D\nKѼѼӖB*Q9#H ɤ.)((PӼI4[yGғ4Q> YFiLfc$ՔV"4((n0ƭqi*ۭ)Z4n4 PqI`GJAnVJ-Ne r0^([}yˊpM#Lnhp=ighNYΠu$L\ZP/SG 5^()S@e'7>y֗>a>SJfoZC!4jW' >)Z~*L((((3Fh(((((((((((((((((((endstream endobj 76 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 188 /Predictor 15 >> /Filter /FlateDecode /Height 114 /SMask 75 0 R /Subtype /Image /Width 188 /Length 299 >> stream x1 0 ?aUUsendstream endobj 77 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 238 /Predictor 15 >> /Filter /FlateDecode /Height 135 /Matte [ 0 0 0 ] /Subtype /Image /Width 238 /Length 1523 >> stream xms@ =؀j%`l3LS-/~:ݹ3]ُRK\wDo_'O`YqwH8F,'\YsʎFܯ0 ր7ظp~|%,[YZ? E vpa'A.rǭ{9;*-$p{g7?2 eY2|9x3],lUK;G.)+newaq""rs6s'Kď˶, iW Ԥ^ƽFeGV\V @YMwF]aWUmӴm\ڏy jzěL޶^%A˗BKYU\)ڮm,uMkZ90|~¥R눷A)E"ļ(g%^.gU7%ڞ̭i9M=ImTÅxy+tUv} 6mRT^f½P{9+˸ :LvZ*g%J-SKQٶ^4+%{'f%wܶiV)*]V yKܶ}ѣO`K{Q{R-.ݦ, #OT3zH%}uYa_ڛ69yYb~SI_/>ZM7OJʵ1WuܾϱtWeΛpv1.gXx+fťb9=nr|ܻnp븆帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎6帎Spg]uwƨM$A 9JLHѝ,|LE̛{}̼D#UH;Q"~8*8UJאh4\o\r43Dͥ;\iFD ~ k|\Z@%5E> 5(RuGJĮC{Sm)⠁hqF-s pQ nSڜ#0w jyK 8il%tZ"O#mdH-F|S^+y8bH>3Qs|O\Μ]2ϒ= m|Ii㊓K5[N=>k:6-':Q/kQIPq|an/"A>E|uu 6wda ċ+/FԻ&z4B>-q~݁(o'֘[.1Z az5Fy GDKs/ U3Eلv4a֞c0)ӵʋΚׂGt&&z:n <["cdxZgm tɬhW47s,*Y$⒭k4ufhUm#> /Filter /FlateDecode /Height 135 /SMask 77 0 R /Subtype /Image /Width 238 /Length 382 >> stream xA 0 ֿ < ;ް2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+ae"L2V&DX+qlendstream endobj 79 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceGray /DecodeParms << /Columns 194 /Predictor 15 >> /Filter /FlateDecode /Height 148 /Matte [ 0 0 0 ] /Subtype /Image /Width 194 /Length 3918 >> stream x\L;|-J6t4ON`pz_;wh>ޡ} C@xo |/6Ϣ}CK~GXҷI-`/C*q菷R'1&_t/Eg |+>bJik£p\yqo 'CƟmt@ Q?fmݝgGrtK.:u? z8E[Zqx2/azTlTr/rB^ Au\/ =|E oB17mU A8-I ]#l(kgl$QC̣)^ Np~]B^GFЭU7zE*IunծBΗZ!_orѤ3}ҊPMcp{=*G_c|jQwn 07S ~Bh]f@0/&T ^{U-eBVᅡG@$fi%Ap=,"&TI8m z?\@bB"! ۣë P:76v |R贸s`-2/j'HꭐxL;*Mη{\~ H[KZ/պj) GuD~)Ϊ7 `̆U d"t]5 Fߋ$P,cD<nc rVi! sq7gi.lܒČ@S2(4aC`XM}Eq- yyUb,!(v1c-*Z1sJ;5:+ Ufa Eҟfغy #l威Ğ$B* t+ %KVY L-#hq3ᜋRTI2Sa.ٞ ʀ9J~lF_0 &t>Xzv \ %Bqq\jp*z ϪΣpxALUDOg o EO>>2@>+tzʹO6/^^*pblzJP!b;Y&ltMGOl`zmrsh =cC9~S:9\3~sϝ.dy߅rB!h$v\3!dV[10:#] %I\Ҭyنpt\{$U`\f۩L`{ҙpH-s% iD/杂 ~ǙtQ! ٱe% cW*%m_J(}[h-^<, SgKHJiMm1NQ]A~\ߴuĶ̽AZBhu8y!NV_馅V.W`;:y[(1o|HYrRnn<#}B7yx}=aї3]$EW;DLۅP=Rۿ73&3pD$S_ߤS(C˰nWy'KDéSm57PpDay %-paI&+lzBRS,l W]#I{$mWy'a 9P7ZһU$]\Ù мJB4M}61> ٗva!/~5bQDK<[_zH.Yd| ~O7;V= {9`l ub9<ٖۛ-bb ύJ8L$s} IVB =M{坋]"iQujXgaVzsG. =;c3^lIP/v(S#SC%@&ߗV] #C@VcOL4m&8\b9 n_lJc‹+^e8ȃU@9JDO;Iȉ0~&3 dذTv w Uಹ f8#PQbE4s0Q%'_ׯk+n* DeYQhSJc~P%yA|;CLx< ~ñkZh-fIdgBa,D `S3!#i%CPMbdvܔ<: a='U|kUYEɗ>BVE Uh { XIa(֖p± OCXab[~mg%~^(Xo=i2ۂ%,G 5'i*h)L)DfqZ{XlX]E.%f8ڄH]'bC:[B( !#FUOV$F)ije4Qmus?) {OU03jBxs$Ou? G0< v/9fѵb!dVȫ&I p/N ׆iE3 ߼o(TecHG ̴0HDw c A<')skcj,Ԍ(9c'#NRdRBO}mUlgsTApjIBn/^mZlf}X$m Ss*^`k^. 4BǑ 짊SG&mJ` ֲaUrgn ,neMji4UVy rL`XCO م$+tP33aUhzB]"?3CmHR c[oޭ*PT}`.@n0t5IAO@HFIׄ,.'i7Xz'E 7-kcA%y"=ɿwӽmRL\tit=sɣKzÛsa>|iױ>?]T!wl>ޡ} C@x;wh>ޡQ Igendstream endobj 80 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 194 /Predictor 15 >> /Filter /FlateDecode /Height 148 /SMask 79 0 R /Subtype /Image /Width 194 /Length 399 >> stream xA 0 տh`096؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈؈lendstream endobj 81 0 obj << /Type /XRef /Length 210 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 82 /ID [<08138ee8c1f63dc815aee282d32bae51>] >> stream xcb&F~0 $8`v@6(%K@az ";@+X$,~Dɂ| XMHW6ͪ"*7H 9,œ"9 li) f k`]D`nxdTA`^֛ 6\ r"sDg[/dC0s`v endstream endobj startxref 297606 %%EOF metafor/inst/doc/metafor.pdf.asis0000644000176200001440000000015313150625652016523 0ustar liggesusers%\VignetteEngine{R.rsp::asis} %\VignetteIndexEntry{Conducting Meta-Analyses in R with the metafor Package} metafor/inst/doc/metafor.pdf0000644000176200001440000161472014060132612015567 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4748 /Filter /FlateDecode /N 90 /First 750 >> stream x}R>E?I^NE Hx4@|PCA_W5aP<='؞b4<5? no\o:*է)٣^O_k|0^.y~1/'Ղ-T8-ޡ?7M' 9jt:'q4Fg.}l>^\y.>2sX2sS*mߙ\NUCާyEMsop.p\!cKtNEj*x> $bʓ5L8) <3.Mɓ'w'ЃU=[x:;vMqbڕT0v {PP"}w4izb!#*dĔQIA: Tr iՠKTNSTj;K,$"!K,|",4hwDޭ=zn*1,Dr" q9NgRpq`wvZ;}.1{<Y̮R{Ҥ탷O/TLOGs BQm_T3Fɨf)R%E!"jsQjUzl@nWTTnxg=mldu`f ؟ ,u|@'``} }:;4a 뚂7 -ofh30~(eQ}4Ah^DO`7o#C_ ǫa>b[1fOS=g/{^]kvްCvĎ[+cbYŪEͪ˫z6&#;a'ٸN_^V씝Y=Y"V Vy];ÿjv9`]]6b?ؘ]d],e6e`Ʋ+v:j<.86+2y}9Jcן q, 5[|k}f__??lsTq.Iqu}dI=^| ˷'Czl4qbf  (s_#UE}+u[[ 'Y=wVEKIE(Z,0DO ޸ wUƚޜF G(\%4L+Kf/w[uT$@tKvɫ3C Lo^ێӗ]#zeu~rjӒ\k+\BҤآdVl"Qsv9oqҜwfa)v}s2-dTү~9$*)/JԂ E:PEE::G!r"Rsgy r N]9!xv9OVEiQbBNܟeW_V"ǃDȐ=W! Q2:em0uQ(E]j]O)0MT(=l]fsv9]ۣPw[C0Kca` F* ) P%:Cr :+IGuX I%LQ$)C-9A:-#{Fx,?MçbMlfoYb@Qo%V&WCnNgm ~N}xJ\t9r4"D}z4%K+qDuôT`Y#ud{ѕaw7wz뷍pTjR}-lʘKdQV(Qh|Z% `Ղ}dY/t:&$VIWz>62LWf 1` NIT-Iaͮ qU,z~כo»VU/n%{]~z* lM򶅢sY$7J~^ \Q/ 7í}zj6ōq/UuGu^o87sn;SjBj@[oG7.ָ;84tLQzn)[,5v]_ipRDtAhTp\U܁ٴ[&VמߢdO]F7.e}CdЁY<ҡɻѱI[VPQ llkKd)xsn7ZW^}IRHn~cr%c3SEM3u#nChvR>P?a(~k`{Q`ߌ1M⸢ {u^m .fm+E5XV`P+g%DV6uvwRk>LYyy~ Dٿ0ZK=endstream endobj 92 0 obj << /Subtype /XML /Type /Metadata /Length 1337 >> stream 2013-02-06T10:52:28+01:00 2013-02-06T10:52:28+01:00 David M. Jones CMR10 endstream endobj 93 0 obj << /Type /ObjStm /Length 3241 /Filter /FlateDecode /N 90 /First 829 >> stream x[r7}߯cR)~u⍕8&ZIL$RKR9 ICjE$r0Ff/AdE"шc.fa\‰0а4 'npZHOEaKh$acaČXB0xM"{GQe02 (`0JƐUH~͓Y- 7>;QSdaX-g)`=EjBDIJ&S1Q^WqIB EѳEF]Dȡ(#"YgQ HSY=Ki4d*̇F| N:א +FT^E+1wGp;zc-x4kG/}`2|2אXAr4Hh'8l+74K_ & (*v:Sۗpv7O0ѯFeK-NW*\僯|uW>x_c=VJWz ^BWyG5^B*XJ/$~Y1AOӶ<~{_:l0:n'?"g9::Ne8z<O L`:,ߴr6[P_ͬ@! p2|;;`?Z~;;[|$]#&빶N)wl&,`ԿK5PǪU'jHDML] En# UOSv:2'"<)$h`\~:m$5Q60.X:X b!^@!GQ2ME+<&[T!ToTwo w+dHVmcLTj/~!Y(2"%{dȉ1!@/8N'[K0)v@B]kd\R8ٰ6n"r~PYI#J#GUs@o_eU2  B`x^栠APp-}6a XdGHRF jIy1K\*"dj49:d2=.;9aeSZ[X 'ۤud1@][iy7߫Q,Ka0k@9,` /gźbdL'$,uJS+n * ۀ:*ʾϢX:8-+Q#wk$Yծm25i*q זNӹ?VOSzWաQP?Wjިcu<UauN?T3PTBh1.R]{ޞ̺֤Pl'DpV]_Mʢ|0=+K7{A}T6I+7.wW9ʕW'5Up"tzH0z*L4`;8YuP)J y qGd a@#uZOŀU8gPe-ֲ9Knb2~@Df] A&wjJ*[:CD)rD@ewj]6Dă]Ճu6kU5תxI6dyq7X L"g5b1B &8th'ҽb4ק-o7`rQr"t۱  ;1΃`mfBbPSv'Y12n\I7j}U_Jh*BŸ-v6hϵrQcU5 N&`vé-TΩ AtvϠ9A@}j@< P.>β3PGDaz%_+ TqZf.ԇdX*WRn)]phTucI ,X`Y'IĞݯXҝ%X b/}{uqK^=Kx6f/:8zhgϋ,okA.pq0.8R GҙQAh0+"Gl}jdp.d7*+{s2HB,nh\A~wj/.:F͆H6W2Ȳ I6 c0¤ LfTLIF0ʆ=&NdtBFӪl0O:WCmOnK[Ԝ~+f؀rQE ڔͣ^oXKl%K2d eäevw`:2w}`N7_w!D~k*7i,?Uj{: qr/'qok$8ϭ_W7xo''Gwi_ÙKX^g:9gH\ endstream endobj 184 0 obj << /Type /ObjStm /Length 3395 /Filter /FlateDecode /N 90 /First 835 >> stream x[ms6~7$N.Υv&զm58_ςDrJՑe"x> *:&iq ,8cd)QHzIEw94R *%˴x:9}{ 6H'0M@!2PJep#TXcH*3.* 3A',31P1+-]j 20keb6\R9eQR(xԥ4sP292={cɖ.A6̼>rh#{C57\<|Xz%5ZK<_k7W)2[K;fQ5cɗχ~ox Y٫谚Cx)/s'^T$jr)G[}9rXtѓ.t<]"OyE)Lg}⏺.m> }U/>-h0? OGYp6.fQPΫF2'jZBf'>dMzjTǗyPѝQr  "E{ix$w`[&b8Ԁˣŷ1TT)=o]=xqKp kw 'BZxg??;D??-wgKk;z/r+fٲyy31zj~s9bYE$4IѴ=x*Ł'q!.U3((v2o̕yGX^4C+-D4 7(IƂ3rl9CS]iɩ`S&M6~\Sۃѭ>."kNGۜ)=(d`*j(YX 5b6Q8 h^rl)tW$0nAT;2)HZ &xnS7R%ժ;~wDո=R š썃o޿ʤ0ϗ1 u4 N!°\ƨQˀit>.0Q4ekaabU=jw?yIɻ5F&VM7Jve[`>,˼Юgb3-zUEH{jz>(Z+Zo2!_,{ifq9ͷ>.ϰt?6o|0Ob>WC_o ojfa}68FvJ!ΐl5S4Cmt,oM.<ޗU@V y_pdf ro@BA=a`:dAd-P#(dTz͍Ubk'Jl|6"B3ws9Kv&X#+bp ~A6PKP.q# GBk)P{Pt[PGS-PI  c)QS,K b /AYi.@/v'(Se崲ӒAZ.A5vnה7Ae.&dO(^y#w fZb>O&8 H&7Sb[="\tJ*\@VeЛ(07I /F'z;AM.@Y7uV0DX?#Bi;O4 &/Rp;eWmPHW Pe (<@w(<ڀ2&qZ])(cmMd'V{_精ObpipZ㼚N&֢UVB]:J[|o7NXf OQnbi/>aosZ/h˵L3G\}y'Gu+/} m6]4>Fڕ^L)0$2z831]r!6e#F.iil4!}aK]6|m}}еև#'gx!ſ+8đ'ıĉ88gY5.{F\D\`VQu2KSYLLFٙKY\/f"6M=]竉v [KZĔ_jyQ~i%xԝ;wDf ,Zendstream endobj 275 0 obj << /Type /ObjStm /Length 2891 /Filter /FlateDecode /N 90 /First 829 >> stream xZYoG~_яXv \%)R!G__eJ(tU]]gd:#]#sJX2?K㪘R/fa9ze ֢ᘊɠRg ӖSoUR4+:'3~(Yfvh8fɣE 8O_Df!N'FJfh(fcVa@u90tG_y朥J{j$E`6J1$54Q01Qa*>`R|#|R' =TVЃB#dYp^e!DIgQ;PD>Eo 8j 3%u0`h4HJD1D}Wor[WnSC駍J?$tV؃NRғ(=K tŃҥ]++.-ti]f <(]v@}]=>,> _NgŬ!U߼.U6vп(vMcyRSO8\b \Cµ <¶{Tӻ*xSN>\?($p:ProSf\ ,.Rr k0R J+' 05֦:QbVq|&ɕ&L-&/8OI 6PG Rb7pp2R`B8%-&yD pLyRJIR@dzHy e%Iu ɨ>^]fk#1BԎmO/UqjQ}E~mC^& ,~vTl.UMuB7HVBip4)1{]fK\^TXl2='௟(ia*nnb{)6pQ&$g'R;3lY[@̽ ̳P5vGebwl%OXeV-%,9Do }LJnC\p&b'Ѽ~;;br4jlm|VVYweP/Ba9M?֭E1w}Ư_wq餿NouP,Ufu!^n/ʫk6Gx%^ıAoŏD'A*Fb4?/˪_hVŬgl4gy1..5scQuѮ_x:H\xA qϻX\S!.s+q5+kĵr{]LD)>1)')~o--yܪ'-f q;?şwӪ8y #b>ͯE%YQꯩ_"L~?jRذk5e67ɻMf?Pj\so5ߖ96giU|7b6Y~k6+ኙѲQZUw &N_k'Ӷ)߇ыi\n?GL# OZ[jmt0Φ'*.Ux4U["w)DG,,w &Bk'3{#>%֘~ MnKC`nTyb/i*d%iw?wvw=T篱OK˄UE%-͒vTY{}r7-&gpV=;&[#fLpK+.m\URvZ[M gD`hL.j^ښDR+y0v xhuۈM}dDq $-SN WW!Hfzmmwe,J4%rn~` qG-lѲnGlIGb0aq@-YN>3.,3 t)p夡Φpy-:unhodsmfMq8ew۰R_re<`t㡠~ڝSg4WtrɫK )CnN j92tr ,Z_k % yX| vxWT3}1TTh4[rzr:r  CaM>@|$ⰰbO%]wi!XTt*'Nz/pE@j|zzs`g:Ҹ.laHWm=SfV AOԔY:lVV=T Ou +&):X.owԩO>{ߧ= 5 5"ȆwBζ2S"2(rstq.eL7/n1(XT]i˰Tj9 En9S%"į" A9Q{}y<"&ߔoo3<[i5vWC˻|Zћ[s<hr˒X{:KvTtj﹄eОx{W?]0endstream endobj 366 0 obj << /Type /ObjStm /Length 2498 /Filter /FlateDecode /N 90 /First 824 >> stream x[ێ7}߯P o@ $1E[޻5ل?r:ǯ d&wxn`/qav@ݰfzx?w-ofYbz{n7߹? טԨ6־JW>>>=oE⇑(}>>>>~NB0߲9Aqct> ct~It~I'tzivzivziuraSI^$H{×o'>g?7pF1=R.; H).gGrH/hQ7ָ uΫ8 /Lwb]!*<>~oۃE=[OkTUw,~D,D,r:Jp[D}-j>g :W#ڈ~GڣBأr٨@Qs|#ԍm%}Z/f}-У O>Co ]s ywW$Ї|O hWj48)bS´WuN֚sQ| as9``Õ  ,TL>P@qLPVJ$'=?I Kט|. 5Pi 2Wń(XAy׿.̏.R7}kEf^ &: Ŭlg=3?nRZa5_VDq[4}X J@`&0%^4]K3Xzk`~8Xo#c^!\FP$Jq5%)ZXzY E0liKs-[U*"PN&B܁b:~Bf4{ƞfIc}Sy퀮ΰtKkة#T0hVfQOBSfBB,x]L};QC]tePQa&#Жt"R&o0AZ ɬT_rū`bԄ>{ E!S?"VH?{JV7[*b'offitw0c bߺ902ro[;D;a}ECDo#Fp<[>BId7r ɵ=)-==)֔]ak&xĻjڎP}3 (.XqbZԎ&CZþC@"KUeV$Ϯ7Kڧ1{ X@q ղƂQgtDk̹j:6|UrŮF٩QnCO}|T wdG"v0kߕ ua P>aT CC#* ޾a-S="u@iHi U <G֠ƳgQ d&Yp1] ԍ 3X***^+PlzoHu/H"xS<}v'@I*֘*a1mg9s=i@}!-Oy'`S *&Dp\j85(5'jӔsƬy \jF[ϚVԮ|%|۠JhV_\ B:!" *WDDCy0bW5Ӯ)tSafFmkLdZ[޽o*Y`L>^Nd^@v(a> stream xśmo$ SȒ(PH.M4i_-¹$nɷzaw_g3#D)*kHrRস p-!RqC! njVCnqp:n$dѐ{jPmUR( > -5E >"h!bh h*i $JzE=SG BS<& p^*n:~%p&:d+n2>FS| 9248QhLN M=zAᦢA.4{ԃԌs b0 ^0pCM-ڠpnAmPU KľAUhse7zީO}j]D\$j{;T+IvgQ]NvŃF.I7^DнbLP>]r t'ڠe*Pvh*a&fUn|3۝ɠz> ?I׾5Oٛ뫻|v׿;G~К#/[X{AWq C]vtnqO}n1W2z*}su}WoPg k:uAdA4kWQtԧ>i"|T9*Ǟ_$^??e1~}__~1m~_oS2kWW5kWQ_-rGyt_;ͼh9ۄPs?šy ^TQ+!x{}Gf1`pWDG5ƫ3T\mzZ9³0m1[`U쾸yrHbN,|. s?wu]}<7-7[,6sRRd D|駻I^^wdz/jUcZG*@j\H̱'99X4CHr;E^ 62As,DIm3g{czy.Z:QTErL0u5Tp4y+BэQ ס Y2E r )r PspjE }\bB|[2 rl`CZ/t"0ZR o0m6DLt0z"!0: P9k=RSCr$R%'E,zIGB*6so Wq".:q)mmhqF|b%aۤ7@L\mxpM1MS\ԖmSri2H4CaLi3 `xcMs\N㖋):@NOK|K(Q&54{FγGf{x@LʦI]^Q Lh)BbM ?[|k 87#͸o?p "ޜj4CҰIt%ʁ`U+\SY T[lyـZwҥz/7yɉ ?Q=#|*[@SElቡ1m_IPY[z4YWWWloyo~u_Pm]~y˴l> xuje?2dOٙ=rcLy9o/Vr0FWh}.ٳ\u4k&kj޶ mQcj |+im; zTɫ;noQ!HcKq ,XJmHAcRY%dxrL[,U\HkVnYXC.\) io ZxP5(m[  sl'$0,d믂GBR Cb6;UbGxasj}]ej^ȓ8cXk=Rf$ʖMލ_owa~|Ρ7on,|ƻg.ƣ__dضˈ}@R #`dRȁ]H`Q%;&I^(́ #qJTzq:c87 rr>kcS|m,,r 9 !g: 2` s ,Yΐ: dg,Y+KuTg@2 Y΂Rg_}݅ȠO'RD׷jYŢ$}T0<0E"IK۫_`X+HcbH媃rHHMƚDѳGt9^;M.˶-o4U0 km~ƥTx}]5xwY7&__)83p-Cp6f|خr^|mӤxCHf$e"2{-~_m>S\gM$yEl2mZϛebI*,AQ̰@ ==ϼ)pEO6@ud9|\@HH5ӓ<~US ތq{5S_N\n"}#gJj2_˄q<;cV~>_߬xt)yycJ$T\!dbjq>krqS,$f$#EF`"KɝNP IfAr]o 1ɨ/!hG)R1N󶸪r⢛E{7%E#039PRDZU~s}iPzvӮhJy}@aHy'w A/+v6>&!O 1Kט{W@E,|X͇6sOA cxݔ먃s|b*85j:D8(x.Oؓ5K&}"N҂^GrڦIZy^<?Uy]pH*crhaрm^=N ٸCj(I9I q{Fʪޖp% Գ&k mt/`xX\W1.rf'p`58ȍ9_˝:QaR}6wIdNI߇Eqp;רh D-?e.iU!A'd7|Ov|o^@;6Aw >E~e Ҝza /<w,w^lmy~{(fpVU@zqQ O^}}퓝@8pSWEbl30\&ؾ.oXm7#wyV7ux!v& Xdqp'{OPsܖ .O^ڡ0b|^Vk'(1dvՁhAP t*VaI<(Xу4CPR@O+9zj^X4$mGdmcz{aBNx~y~ON~)զ{kdh%ϾC!DzKl)t:.f6w>o+ʫR8VՆ,Ǝ?U.=fY*D?bF߷<B._ A#E[f W5PHsT]q G=.| CymA|\\7oH*铂.NBY^1`hikO;ԴAՔ{=ۦ6@.vߟ$4¡ UG(/@昣nsoo?˜/GRZp31!s::iBj;&Z(A!ǼES$)8#=]L}aE}㢵T}VCk[[}a< ˨ 7cNO@tQ{PNeLT;Xj?Y@,QE};}l9-F آ xtOf%&Vl1Iޢp/Jon9"BXyGք e`g&݃RcR>w({T8HfzTa'4aM#B-{][e2) Еc <99~ :cRty)̠xP|Zck #cBۯ/~X4oN"q?ty3}N fr L@!PӃR Gb޻5Coa")M2q<>zCdT=&!Q)@؃À:2 j#z#91HI(}DŽlG,Ls.k/{ 'c\λ̠S`e>=@y-҉5$mM`R&}L,|źQ@)2-(*u+Toޅ>f a8٤GQF|{5G{d6ڬEoz=R+$endstream endobj 634 0 obj << /Filter /FlateDecode /Length 5924 >> stream x\KsGrҏg"8z?cZM`@@;jLCAfuUf֗_fǍF.>tp7S |מiBǛ?o/_jsSwa۷87SVYo޾&M4fR!o~vjR9Ĭxv:ژq&s_^ŏS ,rГIE# W NOlOvacsSJ[6qy'0L:a%Ϗnotrvl? n88?^Poj ^> 滂5x>L.a'3!lOULov{dtVEUrR^7q"|_;cT*lb,dNĜR kA0"-}10Zr@ 3ͬT ͬ'26hב:X31፾aM6m J+]ĘߠNʦ߈# qL֪W2t!.-?Hu &=!qS\~܁Y+FWS^ ufefT-'\rQh q''9`b~WRFL*ĶX Qi--{~CGu~·9jSP}T~īGX &mk pRo:N!{vl*G+;]:}&aA@lVU\%FNeBkR',g*D7 Tu}:C9ex?Q!]мY*]0bKۦ2h u !tK`Q{C<3Sp?ﲅM`Vk ?v#K #X  rmMB\yOsObw4uj?A"CT1ze |"ZT4LAG['` \鴣0byO1=A_ca3<7I%/2.4$[ȁ#D] % 1w?ǜt& AqŠKgh 8? Il`H~bL*I Y`.[V{C: 6-}*Kz> |<>׳ hsFd*Y23L;=${9ȔWc)磫W<)(j_] DhB&R}u)zhme }:&F1-?$1ygS*kSkl)^! F'JnO Z" <[e`vϴ˼3W W909#%qr`6ł+B9s8W|"} Aң[m|хՉ쮵e 8Hh!DPmI9Sv"mw6~ >EL7"uS@@PrrMp:A|Tbm ^Ge]3VM))XG/v_lK]ˠ3y_=~ ) HQ{,Fp˅iZMòDr 5-"[[KLPPJYp_r܁;Z8 Zpd[X$!{#-wE04 .}憲*m~26-+&->Rq;920QQrd"6.3$op?-x- :.DkE7Շ+ңw,F?Kd[ᚅ1n &4pc}j- P-Z= ﮃDl_wĐrc{ƂcܯO#<3e[m/K*N RA1Ij'ۀ"Zk"rx0P$9a5dtx^̉c̥מ VAZp|iCucrfP(5V|_P:Px,4:VߕN=%a]"&/~YoPW' d 7ۧ]X3EluP+ppS uY7Jt\pB |&[7hpq"po %]kᄊ(hZ_zj.#d5l(X~/lѯ9H12\{"(+y摿)&b-lNJ O D*Dg!jQ ָ!8NDwg]E(V$,$A/G4PmeRփt2,N {lm3#H"X˰6ِ*'*t~դOB=c8w《{Jߔɱrk@ |ȫ0B&Q@͊$Q6$y-%aMApCXH#1:zT Trynx?Gytt`>+æ7`B >JJIZ)+06լ)da̰'>%UBj12 K#W kt؍[tS(ML //,9f圏,j;=;TP_=3f8sq2o MƖ=Z փ^5I!u[2ߦxNs`t0^@)*UY%2kL%ȩEJ6"b> \@*5//('kGv`JсcҔLA_#Z\J\Mo@o( l`3&˅G!S:7~X% +MECw:y#էģ&MԪu y_2CY- }  dvCE6iu+L9~-<V_-04+]* aP f\6ȄL/B&(e܊:s,`|bNU^XNҋ1>2r2Tȱ/hęǫ:52yw3TeL:^gh85NT,^覈d/"- \yb:AZp&KL(j T;SJVsZx?0)Pbd =嬳d}Ȼݼh05 =;{>-j&$S*!^}Q!k1O~@,bYDKďGtgXa#G[:ۯ[w@oC9:3M~g}wKf2/o LQJ>b~䩣bCӱk=5|is5L&<]]-E?+Z9$h4T=$ax;Bp`-$y`,Jg|QJtPWYOYȎYҿ7Z pka#n{ v`eIrYV1X{P4Cc-TabgؾpYj,DZGMU& K30iJ\_X 7/YѰGehǑT_$;E??g6|5:rM*̓MSvVlt*ׁm8lpJ <I]R-b' -*(YNĦ crvseEf;['~9 ȃCy WoE%%0Re`2|`yMoVAkqXבsЏI]][X<.5ߚWU\ٷ*y5R :w}ű'd7b$M沙)A: toBr#S~U$d]bx+y*HdE`"}юR'}'s?o$kDN{ܽɼ+{%eNY2UT1(B!~S آ. Kti\-Y#,)hZ?@NFG䪬q'%#6EG<p20*Wqy02aZj+*%+>-Xגd1&(ڙ<=d/.;C' S9XMRvUv=lTɠYVwzh w"EW]sQ/{I8xO,;' 2L=.]mw``g[DZ",:W"aB Qbgi!'Pâ o2V/ba:3ƃ(7m zoiArE ǑW#=쎢J?r)xfyP=PdO ~Zvh 1e؇*{j=pvCpHq٦hA~L_I^u@Ik1 k֗-Hs+p A,NSdב3YMN+H8;vWlܳX]&Hd˺K槒^i/W:+z}4ێ[_W:zW} FuLq?wB^}66/Я⫥B AXr+uqCjCI^rD#Rtyg%kg)}WkC+:'м߽={kw^/`\}I @eN[)E}vCO#gF_, e/ endstream endobj 635 0 obj << /Filter /FlateDecode /Length 6426 >> stream x\[s7v~WaJl7R)zQlǑh+fFM&9ZQ!=0ݼDҦ ;߹LL_ݜ_?p%|QѪ8&06W~MrShWy 4%yQmNQ~\q~mqJ^=t4iq&ڤ㰻Ǩ԰f. o)LPS :Nzxe&pmW*JɎ zۭk4ûcTvodTYm?>•wrtң͘6O_<l>jsd? 5_C.xZ@R~x=&AwF.9*;\ a` fb&qر4MPßh)\8.G᷺b@8L?Y ߱.l)X!5N.E; J<䇓d+y,Mf0q4NnD 4QQԢ+% I6`9`=i. *mB C+wM"Ԍ,n:DTp^ ,)jfߑ<ႈlabF)GhX)A [؛§B,y'`qUAӨ)3j^㭶(j)$\Y~Ob`tpT1'0߷Zqd"ˮOZa JFYSWؘQ򃮟 q Q nTLp heE`@vQf88⍥¡V M0TY׊W=V@ģ`)N \*X؍!I+ma $*w ď}K'қX\Rc&o$^T~FV@TėwZųZ|[^U-^^lw1Lvc#)֦lKcb#n 7@)8n|c wuPP͑֨'4(r )ڽ|~ld@*Yq"@&#?xN#JrTYW #Brk+BGqO Ja8aWS#q .AK]*y Jz;֥V,Cݍ՘:Ec ෺+0_OQCa  ur7/ _@٬Ev*#5r@Sj-0xV œjXY-Uq1$d3 lm_gMAqiP0So)SuպP9j J+VGkb ֽTef;bƿ,Rzlӭ=fq1}Dir'ؤXY"l+0ѩtrh ؞ jGSGS(Rv/n+x]ů.C9EwxYИQuz٣ "O>NrMuvQrONfB۔(hO^O1V|Փ;aƠ R~=Pφ\p qR|54#1 ] =SǏ;==AXAÙwl@-ϕ_Wڱ\O tϰ;G *`7jE\=FkkI~,8A4Lj]17V('Ѩbܥ ny%E>q-~?.,u@EZ<뱬Gq.M]- uhnZElP`:k+-=GR9e#Wݎ\?*׏#ezJBch<[Et~V!WPLafeylw4 0+SH X>^tF, `XS򙤣D@+p-ɫ%J_Ϧ:p\ֵU1t*'\mm$\t-߫&xFJ8mqA83 e&y5FJ:EF%2 50n&T &g^ݯTTƺƨ+Q;b8ے&~U4m&"0a8&*Oe! \]ކt;:F9ONLr] WDOi=>@ɤ"C>ҏ:?2Փ MF#%P\'P lS 'ɒY|]/Uxfs)$3yRɡ!XB_Q3ٕ)MPK":7gȯz38"jٽbd8ܺCJ'\C WCEi ?0&ͫa`G-XrzLEA >djf)8 ?=? ~1؂LZaalû85 "qi\Ԝڊ`\\78T{zF}z4H?z ;zG9 H2VU+s(B=#!oaё/a`}*ncכ"?]J._HXW1-S$4voJcrOt/dv8oMvчz+qDz X־/ҨI~ .rVg/A)ɵ2 <[b&bT+7#]q>,4]bmnW͓Sw6 Id2J2;|AG1@ڬ"/E2xP Yu9\9,Y{ߴhtUS3Dreh4 &@pWgoFfnTTX9[@iam3SPA1plP`^6<ڽ}ס%/1{YB7QE)3WȨnJIV@.@-}_ J| 1MG;̽aP]#UWz8+.V1`dy/ό)!h&`?xMbC/rNieq5I/Ir3̷c010MLS7Yx; M4_Ēt[5wws*1 eܼߐM iS9f&z=؋ ڥ{ (?wg ]@4 qe!rݨU8?' )1_MuX^%L!u5Fͦ݇TA#]]Վ^p_6/bv/7?WhۿɍL"5o$^5rs՛rPpB:QQ}c?KP_s$|y%J9ci>oiMGLFF-FVB!8ȯk[)i;)K`Y4gℷ5}ܻ&wNxjV4%v{>[χ.K꽩b]qJRFャ3}:4dK] ЇK7XZၭX|K.1G.oJ6X%QZMtw//\K+I, 4H]l|tfi|j%E{pI}K!7`>ls,^pE3(zq0Y=C'Q%#74RL,ϯ7-֟@)֚T[zb6Zhs"AY6 =gf#l*'+9_oZWmQׇ['D_G 8t|-DƲ٩}k\.) sbxtYX\K6cl ֱ_ t9wp)ʭ±6Zy.[@=JjycpdBIwζi>)zaԇt] m#C L1I/Xkmٵ{Drl1&/";MfWŒ^K /O_񎁢D4bEo_ Ƃ*Qڽ@&;uˇWw3?Pk1Ҳ n=ЇuR4Xp2G\Mix|Zu9X&Q?mʋϪ!kjs{kqaT|М1*8ZꆳyHJ,1|d 04}1&Đ])cU9ɢ- _pb2H>Ã% 󩜻lR_O@ۮurcl-M@עH-שj lz$\Z S> stream x\r7}D70:B]*E%{fl$[;܇IQxI ~*$UMҗ"@rD7jo j`ܼ,]rzuFzom\%ѳ48`П@0Ɓ& MNwvpjC뺇&Q>tZWج'+HhhoyQ;yr1USwD+sk;XAàojq~˝br<if;9mb/WNAY&/[pb\$`W~+70|\'}>E KsRB4T` „:xAۆvcyJptFLf 5 J{%ޜ |/_Й6X}AZvQ {kAɇ3 TwZ9ֻRoaǥJk;8X]Wq"bK:?&h .y姴sK#NihMG[b6tnpaĊL\r3CM> im}*9\ij}\9G#J++y .-Q'@ la؀Kӽ؍&)X]rk~>^.j|[mj| }  ?HA+xR|Y&z@Y̼`X[9hsE)TǥxXץ϶ =)G(ưln8Qx6[7b7ܛae8=y9[+&g/l 9*|{S RrZ1q]#8~Ć&?cUv+2QNE"XN7aZ`迶Vi?xu0 @58yF ;Ͽ٩'0ÔfsY, zIw\wB:@٠SkFL䮸5$<8edPQ}u%NZ kbN[P !\4$zTƖ'lÂi\ŭlSy"Ouy3ؓ|ܗǣkȦ5㬾EI턣Tp&*(jb &y 8J@gvGe ʾf՗f`z@5c]=aiXpm*ͧ>ג3znlΝvSyJϑBpGh Pwi0gFzjRW=ՎܓOxM^pht^iYkČ8:po?sx%7PyFKlJGn}gSag[;W03&ٍCchp)?nH&iIft17f`ъzPk۫j~&T掼P9&˻MS?qӜ~+z:'T_<[ =$͘`#]~JƷ r~cZ4Q[mY5$Od7?;D5sRˬ@o `a1(#x$j m -ջ,daJ7[ `4lH@X|S  pXˇ\FɅ?s&̋(2TnqzcBa> U-]?|U(䣛}+ƀSe|1ց~J 2ToС=wQڀ`"Y6˲qENYn8TKi?6[ Vڜ܍PpfkhKXȈO[i+1Qɱ ͌s>Xrgg]?9; lr <,TVޗ]]SR#l$'L:dFa{0**Ayux[zZffi=,NxNz'xH_jvd+ov'G&ja ^Bl k1bk3=^2<;F<êPڊ0%5EL:-͠ EٔY 5)]F 5=a`()`cpI?p<K $Va b x(h8`HC1i檀]'c"2B+&"k5I GL}49o34(%I=pVj1BJE!>.G_o+ m)~d|*~JpjN`bOK/R(E>*EUlQ#SQE[}xTf6E#k΢!.MܵƟZ!4:? *idʳ{piIqOVYlx V֢-`|F$迩ЇBizSs/IKwЁ4>%8 ʨTM8"l൯+_&"Cq6+HGBD Ʌ# 4Jbn[L n#ԬuR@鸯 Pk qh%(V"( >jṔ}yROP([Vs+EYVߥ+<[ʼn*L\b+5yzjQι%dG{.;abayNua_^68MpV<\^Hc`FM/tK,wKBj YT1ek^*da"ĺdV=J0Ndd i0:ۺqI9B5`jJ8(RKc.eZm)0iX*GnQ0LlHS_z$o< ^CaKMí0(&\ju-&Y:gWz>hUa-cŌ <<5J.B7<_V0FMG`Ok u6&%&EIsֲk:ʕ/^Xc"/%^w'rL,N=@'+gnz[0j89? A%61ņC1B?*;LL5= > j >Ed74 L^xXPht#g^~ۭ* 4իBoIwt8gJ/}sLv`O} RV@QE_VSB %IknV͙\[2L?J>3 IsKD{[i7򵉞&+YnZF;*'V&I7zv.&ѶU,xSV(s]Z?h Koղ:Rl7`CRZ{ÏW a޲~/Q¿\ȄP3So[᡺l4 IxJ}jLfkS|aqSHZ橔n8"Ub\λ ">׀]1'.;-ƴ&_ꯈt %n|D'+#C o_;<?^2\(f!NⱰs[Νk}Uɟu:TSbD: Okc MŸٻ~NкFnٿKLD4[>:l3_z! j_{Duf:e2TZ<"!;]O݅`=rsT#^ՂILAuPںF?チ$DW09I(]P*|IHDڠL: ?Eh3,XAR'L ;?³#~8ۏԇq%1xLGiaz` v,qZ'9~"4عR㗉6 J UN+ ʺ횏, &ܰh+dې pXqVU<1/HPP\U:[M'tAhbR<_KYEun{P-'`úWӡRbX#7qHR~l2(&E P oa]s?T"7Alv dS&ElWvlT ḽRa",S~S!׈[>c%BƏHp?:S8a}Ϫ"V2awqK˝{㡏m'bTL@/}`*(nWx@=|.9g|Ŭu9ϲF&u()ɀYMzüڦF OaAF6/IX8E9͢ip_xp +Jsxq }*1C g^TG9-~rjv6Q';%0\}n ag,atgiI׼.邽I@C#5,wBTxܥ흚&SJ2SڈzKW>oRw=þ9Lg/T ody'B"͆Ȁ֘9֚>AY#i6D]qhx]FrƼ*_ג}({+!7$^ $y4~x8 "nDމ T_8 vUL99v<lŻOp.#P۞H8u:v;zR ` kךnDu3%- LcЄnXONScVo;v5>wdaa,F-wu~n:ݘl-endstream endobj 637 0 obj << /Filter /FlateDecode /Length 6152 >> stream x\Yr~׽o!tޗ6X G;l(SK]$gQAGTתGjG _^<;z}Gތ1wr:ͣ.kxSSG寗G?~=<G<>J(8f돞_<=`秏~.v{5S V98Sc8^caQlc4ka0흳ct:Fqʫ8ކ;Φ4#T~ ?V6z<5cN~xE\5a[G=hJa/}|Ip8HJkn5vuwLǴnR N{CƏAc/i;TQR עqvxF{-L-mHNÁ֘iMPa8s4ONSco>˘!)b-wq2.tNdN&)YS3So;8<g-50.cM[)=.xX%܂|`U4=@:v,YIٰLŬ8WU`װ^{ms!gfd耳sb^oIB  k}T9fzY[]BPk uqn95LUG{mGPvF"MIx#1Y8]A"R?Tm}, ZjNLgeZA8cSqt9207 vr6B\^70聎 x35XI)N5JK x#  5rs 3;i6[Krav?T!jFT쬃`ߍ  1.<->AМjG."[y{6V̮@+(ݭ i岔*Ězc*]j1YaANU ~u~/ ćD0&_%:<`e+(N5Xvw͍H@5"ROi7aq<8-Pq0^ AG*1cBxŔq9;+ֲ<?Ӆ*+a[2W(َ!C_W#EnMPY~g`^)%*_Qw C%}P_''%0 11=%$㦧 q<둬7"l7|7Bd_az:7p̃p8h3h'DCl]@@WtLp2 B ez 8*C( 5OթK `wq`+ kӾ[E8ɻ?-޼LoyPkQoRf8;)_W9XZkoj3n2}PApa@ZsAnDciVHX]Cr2L`dO`iv'jTz 1ufJP]ݧŧ؄<s~YUɊX5囼TAb}I$[L*m$m%b4ss%P  Q mSN4o -Co1 0sc)tk 1a\}4g %NeAy&%OE~)zC#m!U*hN3P+ Lqjtu3"Xl]"@'fL.^k@Y6&U?@HR7,R!ȉV! 3I225<@ջ,X68bHaɎ-1y5`5&1Nѩ({4k< ň2&7&ŸsֲFќM5=ME 9h"ema; 25dmF!,y@xȒ^EE2IQgB3saSޞlfn'-Bi59ؖq%%r@u-zOpk$q!`UV&}/7L! x,KezIA-qIuȤد6з)[= _'kg"I<2$B'Dd+fѦ45JVa64yh0Hpx<NI(ak oY{@'q5֨V`~+F:h0P䫱p+gf;ف7E'vz<Ė"#FSP?A S͖M)$Fܕtyi;{. pUn4ZТM 3$y֌`I>ttd*w`8a AIFz3|p݇9zO;հL@H 26kACܵ r't-i33Kɔq]H΅'&}ΉuzAb֘ڜt^&3)%1WL M +6b_[>z!ma"q؆HuցB3.2Vֆ%p(F Nk:xo4je 7:yPr,L afͺC8ˠ|h Lv3!\#E0`&/8pr"/PQXаd :* !xf(j鸪|ŕ(klՇ e[JO@{qg'3>]P,0();1y/Ek*cfQda~c|znܼnu!Rt`R0fN ˏ0l"23`Rsuhr3yIu(`B7s*\Q.Yヨ/8U{BBC²QKS3kCS(! #犍kfjs&qd 3*@WHURpfUZ,ͭƬk{b[{ՉFkn@uzmOdBSGVugzrYS.tfO(P/R5YY J]NM[A{ZmvRnXdTϒSVtݔjل\Rlt[}tJKYm@jL7xcӞazf#WcRG{İK:* Y \W^0LdB,ԆIMq`vrSvhX*RWo-e#Z^cŷ%u֏)TRG@y-\f NfM=!Т B :L@x Eo \Bitԅ SLy&okק0Ը_rLR꯵-@u''5:gՉYsZn`5N"vT#o8ĀET!C+!`j&^"Ձ|Inm) zbL[e"ڤf2e'^r$T:mA\͑*~󻮞nźn]=+6F^~UAM`b[*O54ܙ8vڭ0ɡnЛg,Taڶ{yE3eAF3tònKN4U v)~kE}uKENo@FΖ#e,:C evS&tK訽+e/ zc`\w,iFf'Ҝ5ČښUS s&rj3(cqv{Ԕ =VvWvǧQkmE96@UМ\.TՂ#P"E䴼9[M5/ޚٔ{[/bѷkGJB}BJ!Y~ClZ0U0Qs‹ =%FH{%jf_P.a`c0@arHsfI>,\AO 7g j:t*YXVWRA>5:[l 9V."<%wۨ\k, ~ެiyh+U :79ٌV Bw˴`j ,0: S{Ib~ "Y{/nDՄ7*IbLXfh,;aOz>n繴J=tm]) bPݢ'k<%KQO_[/1trr%Z\9e7\B _Ϲ¥H^TW{ ֽᘮ\ė5BYuޜx<:kj7FUCȇ[Uu x}=W4 IgHHNc_|j\ =J^yYjIc3?.*L*F7=Z+]M`9UJ|̏ #Ei;I8_5"S[2m';/2C;mb9Zs,v@+,/HsCc3[7K+|^*ykWFb^{JP rktr:.YG5S{5O-.OU66 M*%*w+SJv;&^ʙm* PQmaYx6bC׷EUdv,mbi},wTK_Xxc(O{E1? +&lKBeθ S Sj]R)B8gh!`-n50K:< td,V qW(_EI+y)@."DXNIE6njC56R-w$nul}+,8tA׋Z{ik(۷r&DH]M+hڭ`3ka\W|'?tCSn2/P? y[[2NG\V:o|Φ&Fv^|Nawob1)n W"R͆ @KW#se2f~ ` Hm 2W\z:go,V0M_xПRSendstream endobj 638 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8700 >> stream xzxWaМ`e ddJ轄B ƽބ-YHrE-dciL@N MHHH:ߕl|?<Tf=={ս%D V8a?~@o!{v{_&%#RƄt9q8N0a +##ɛ@!;#cG 9~|TT8q!#qwwNo/E!܃g}]*w\LQylYua؇"G..ݹ,}yNJϕ^W]nz N6N=ظq&L4yM>cw\ wܑl)j0A P3Zj.FS5H6Q|j Z@R q6Cj<@-&RKIRj2B-R+4j5MRAT*B77yT_ʎSoQ,6E)?5H)bTJF9R==-TwJ#/xͯ'­/~fMh(-&=~R/UێMy ٛތۭ/oܮE<>oeWm~>A|<0vwޑ~8̽;r spC %}wְ^ vdGL~Z^iۖf/ m+BUvāR=ۿf ~m#UU>{ni4Z!8QhpZ\ ݨ ~8nQY.WCBF>E6 lV06-Rvy_'>,6.2XBnԠ9*r|uNŠ4<s`cB~-gO4$ ?I*fM`TYn]H`Ϊpm#wRd*MEx=fJP`,ݯa̴r>xC h-W'<<f{<'Dm$em=4IhTZw][ eG=S $uԦAw$uI;3w۶ͤƗhI3'Q8ߍEj^}Co7qiX(y>/|1os+E72_I{%-W%{G}8:HV[,+rVeKPZC2nM3JlyO0 j]|g[_>(,}1h[$pvfz(F '*T:l`ʭ ֒TUClu@TՆij.r x)(-`0e̴/)vPe#+ C>k{_ԨBt)~_Wh6mK>!OŞ=<BӾEAv j.q›wcm} M6E At)d HJi|t48x*H[F3cZMv.{|oZp~%[*ijݚVmT1Uk6KgƵ8} 㷈EQ9%etEt#ǀz~2}>6[*I0-`77ಉ_ilDQ\ KmLldN(jwhi2PCCl&8]T6T,Wl2:=/ msJ ]H&BF5h&֯x!.K-ܿ&w*,^5*ta^|egNGvPw7ϳx!ZJ{O?I0kfoχ!T3hoet(Ńh F$&:3#D^ݻveFII34jBM: vyMԂI"Ά?'1pTY(n"Ju>+BczC 0&Qڴw[~@-A? 4 }ɢn4XpdeKHz氶ag퉝"%~]-lL)ͅY|x/s=[is7 !?=`s+J_(HU:"`6cR6#!RT ]c:y!֡j/p׫"!<5AYI_ ya }V6 !#7#"Oj<;͂f| D'v/Z1D"ݑ:G?\zDCk"@VtUyt o!яУOGXew-;j.l%%aI!\a51TpT(P[@^Г5՛U]Q5A*/1S=ۏA+:򠀍ߎ+B 3t/%tt_O&m[uᘦ^Q 8k1KbKVғa=""OA ʹ>"QytCȢNZt9#%LUtId41ȥӧ+9[~ `OLibz`Mr<ڷPʃ +җtqor[ߍ p_z%0\[8y?*eDg_9wH6M1* ;ߜzP}m9E=4'عM,37dr6rp,e)dD;"qrhAn_ Xo=AٶEJǍlHl^ (0򴵠1.GH^$+o7Xt_Ke!ZBd`Î=K`x6{7fݚ,OAJIBp %޶~C96yewՇU,vI0+MuJ£ ]iZ+a !аPcd#UjnPcw*"FޞoW{382.YFN_[gZ hDpAӏϿ*WK,?NەVc#,ܲ`Z#'ՍTPvvgkh9t;.>"oQ7*Xzw &%_֬x>mi%K1},ycˍ\r]R.O08C_LU>$$&*lө?qk#*#"+#jk++k!Ya@&嘄mfHm=+BSh(#S H`b$ <EͺCnX'+*'.'ExCKWn-}}%}A-!!:VM~W95q[H8jmzW\8֢5XDs2B=XMx<$BTB=BӯCi-J3ŃG{:n![ 3$*tzȅ} JA UoS?ئ 6B0}ض._'&2~*)۶M&EMյj:Ϡ-^tFt^R;z^+/Z`C/"m~b(Le!63kzqk&<˝׏]t!eI`y{-f H&~w%/j?D\J8O,w*k[4=SyZG$_:d 6{].H}3 >lСSn@CzoOȏEVTz}4$HpAyTD'p E*A(z(/"/|g+BҐfCNoH!AQa]5Fc я-'hS iED 4KS? V alD=H=硒@_X^QQ솖ypXa??^ݠIa'N:`-RU^h + m,)ZFXtLvzFl\su*.6{`=sOȻ,Nwm73Ja5(}̂2&6?j8_|]w9&b/zS7sEsk"RL0VN_,[?f]Թ|@/@ׄp[l%h#'C0ܪi/&]4 C샇b?oHz~{џ%z& xDoc[l3oGAWP\S嗪D`9c֖VJ/O<-AMJsMOK]JƣeҚ+ Mv-{?YO'ڏ~bVdS]=]kk:Cr҉Iam;Tu* RijHf2Y6rDw}PzOҐn61T:R$@OC  Fͼ;$h;"=q;-28)3:6>׾92Pac u0~~͌;,5rhק-gO ij(%Q)['lv6T$L2I:R .hlaĿ:='l;:Ǟ'TYkOK>(]"1?|+7Gk~⿡\A.eŪh4h!A`JIR١R@=92k#y.{O܎&c-F.\+%rsh:~ ,z,|_\' r9jz :'/9 Yq!Xi|`3 `KoDZAl?!8e_00X!¶*T3"A$BaAvM($fc"%.)ˡCp0;FIɜE|)/Q)hTu 4~;}b72?ir뼶MGZt guiqM\f;ޘS}8V9`ӌ >w◇;~?ԒeOc l @!:iwrrjKҸ%'2bIƸq.~5u?斊3 ~}6C&8 .+`;3}ǨɜuPY7hX-r݃WgB9祥o#Vuͩ ÙZhlM6'ۢ?6Su&4*lGUVVYi&,^:> ѥ; ]8 }>yG{*hz#|Y}ӯS!2R9F--D3ѬO ǏM 6I R,<\ִʮlF<*!߫7LYLYLL&Wp7[cc9B3̨(h`Bjm+PV9{ 8c{[{/YlI-'BcZCpRabĴ>@Pܽ+m䞵a7aPprrww__]^}6# vŏV3s_]9Z:sJD65:_YҸgi!L|~Vk2i"b)"ݲ2KaOpmmw oDBX77^WQ|ĵ.b}'9ƃeyde貭U9e\F )֌ $2^1O>5ڝzjxQ( iY@vϯ4TU œGbje;**,Cg>~{,] 2O[Ɖ݇ őh[2gYޘI20rBNdhydހMlQFy"+H|; !'CgD\zPoL#zx{WoE:ܴ>31لT#?=y&lw[>9R NMǪ,rMxu2R 4iŭi}%tad6KU j&"[{O?p> stream xy xTUue bTBL2( LBy2I\)PS!!! *EuC[Shi/_@ssk\8\.w [.^lv*}l+^26J`=؜GQ(S,NIJΝ67n޴K>7Ӌ-"-!;%.&}چ䄴\i[2RrMrnn沧*((X0#;y&Oۜ?icnȗiy 6d'ds8W2Uٯ[ f}aEq7&&%%oNٲ{h[qZE~K.;Ag'3ƙy333'$g|Ng%gg'UB*S8899k8k9pq:g g9F4CtN0a#G99! I)1ܱH8p<-oFu'wCcGce =l성>X<. ^|GwY!C',gc'-tx\1eԔܩWG~_̓zFqZߝQ2sLf-z}b^vM!ڊ/Ekr Xꦛ ^еนru+Aid$Z款OF|7^T#ˠPVfz={uö2)W8\h3>c0A(J-R1OЀo~0{Jax(imbR`ꭞ`nhPM8F+|*5hե%ZZ9oPʊ&W;އ[=;Z(UGRڣ! riq0S XˍF\{ T3źBy׫  厒j{m!|׃ydzI!_vXk=@[թK^|A yp >{e:N0Cu5*,:o(Iߺ#r'PhBNJIgd'#˲* Ve0r3ߵYL_ȲWjl4LO ^^*PCmO&<4s_':F?o^ēq9ͯƼԬ[Fyڋtr}yL`4`2Бn-7a:y~(|K53E8Se(Q3׫AG\`sM5- L2hWӅ-j{GӔj^a C)nT-5&27!IbIPwPiK>>>!_6UP N* )_p@Ձ!b L6H3VVҕzܯ)}TXTd=C{>o8PK F/Ls#coW'ruY9LI<*kSϞL?qv@] PT);I%[ _O* Z*ֆP;l"R)+Z&bt Xȁnl#Փbc}ݥ5 /"]otC]o E~IPB` (n U vKM͕U6ݘe [m=';>kB[l?3;K Ba\)D!S@"5iMe[w,Tyw: O UVczj͍ˑ,MdMBA9P\p6A%՜ofgߟ;pxOO{5])uw/0 XqRKsdcW "$[zJ74zzDw7=h'*JP+<-j[v-6f'ܯ0Rw GPy"[ؓzW?5N.lIEJԦ3iyXCLe1Mf$Yml m*U%j1>.6>|Mzyp'~A#lofK >{H/9Ȅ5Qo ['u@&%vC! Z=~7º7U6)sڡ2 "H1C׌Э9w *իJ6ISهwN;S#~f !;AOP,/viKeho9M5u^INNK%uNJc^.[vN>:sjmۘ@P{3J]~? *qWJt-|VԊ![9zTjtt4Q*Pifs17XB=GFj"9Mx[1O a-4Sp_3$YRJ-{ʻs%WZ𭄦)x)ui2 yI{^[F>-7*1؝= ; +h*pH3 2umĉrQEo?5Uc҇1|l?tud-qAh_~y7 D .L0'Ujf7\]*vPyh ߦ`2BkScJ,>wNY66H5yuҜ| %ɤat ]!TAu(=j!ɔW4D?t{ ,Cor;N?xtho5&ɑ 6SdwPHdy% ("'T_9ƟnK>C;mwVMqswO{--u:K ,kT(պRzYIZ^ЪRfty| 9ΟXW&bva~(sa4^ OS4;}tߗ=iIc*(?@0tշ7mUxo|&oWĎp2zb2[e#}{LP[jDF8k*]}1ERyucӕ"b9#E&3)s }{1K(5F6hv*}$Хh|/j-3r)J;Ԯ`N,u,U U8Z"!ZT͍FD3>(֋Ho~X } [<=7G~vy_}K#:>Kf*BvkjIK,cUIx"%=Ug^>Ц-K 5pq(n ~L4 ]0+h=5{ext4\+s$ZoIvz] BjdEzyPHڸ RR`<юK'OyÕ]ǩ<ҍrLU>q =ƍ8ǍpQendstream endobj 640 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4474 >> stream xXXT׶>0srגXP$ ФtbJoEԨI5EI3nrh|){߆2kګFX`$ 2o7̿I=!R -BLI2" }Dv j?fX3gN`L>[U.PoOU(~eV(1o{n!""b?dR`;c'Gzۯ w WiuI]?zۻzx0 07 i`аH.\W[z$oLioZ00=ƕ,gF1+*f5Ǭe1Lb0Bf2YLa0`1.t]f0>L??3 ĊqIe,dt==HKYh ϓ`9ti=zvXl`aa/^Dޓ{Q 竾[}}-UL1#:%qi&. `ФHF3[u4hӃ (|,B |UZ7a e['J>Ch}4oh9,#r ARfHŽx=Gb9,SCA>uVU`/=/; *Bf LXLJARXRyL&ӧ'q2N,7?>(5‡и$ ւ7Gxoĵ帤ݍs8ThMS* 8k咟h{j8F2W1ncƤs Л]m=uUhWehh\osOw_9iO %ο"})YUa2ޔ\ȺB#9QV{My0ʽ4AEh2(Ѣ!PBch* R+D(E>(+TwĎQYJ˧)OIqnOJgr61ŀ58 i%r7#CW}Q5uMe!ʀsb+K S#r8Z6yETV$AbYCG$ӣ|7L;Z:Vi !P\G]9_]ICӫU)h4c`/Yˋau ţc`dtPOGvMd QUPGn_@Ay(d@}Fj5ZEzz\ s+vIXj⼀Kqr=rق;D-OmN·b%9iAiQmpJyNbCYsFB2Pɉ'3Xlg>J@3L=l3:R`=z@MۇIνZhsz3<=6{ Oٰ-4a{24;ypPE Idzw]Ekǔ+諠Z [hRqe-ӥ&KuWawڦ# U]8m$Gv8%E1MKQ-YF-Fjq)E9`EcپFx:c=g:g C@=k&D3pˍg :F/$&pn뾨=E^-ԓpȹʫ_b.}zQ̈́P)%<#Ƿ32n ; 4 jC`08()!!øIM y)?t svOY%T.={ *Q{G:'a; [|;")*UD "\o:qz)L v f((dJOˇoMyq?4nr*rlN5]8EV"OGmo?_oyjo76u֣ #Lm4|'nC O: ˋ1}Au>T/ǽMm%p?+,IΌԪ㴊~ၰԗscRW c QΓH3gN%o)l"GaF)|~>> O;LWp\4*;%%%~]:~- y.Jrnnt!뺟]?^s9sIN@fy)F~*Ͻl Ml X ]\PFi,!dcw/+:[\P{Gd^Z.1b%[A!tS4D@BFlfZfB!י5a[W?]ήt:J/K0YQmbx~'H<1E$TMH%o'&ʾ3}5: c ZFjͪ&r+8^ٻt}KMis8N,Fm\-D^XZp7o/b 7LXڮ <.ot=U+(M΃RށX-hҒd'ޜI4~0L2!࿌X 0;EXL4,8OT٢SJ2ؙ]u>?9ZWU},Ȉ,#y!NK n[MHaoUͧG9Cz ~ߕv^ uqV8ۥxIk2:p#?J ~; əWKΉbi@ǕEǦOu.0qWYV~F(r ƢLL,__aR"5,v]\G%Q(/p@wo@KvmF)CW|M*Зjטd,0 oQ'mCK+eNXco8W%:ܞU9N8+ğD$&!0?t({ ]3`fJř n cnQ=qg~cNG+y 3]\0zOU O@]pODUcOSЦiRA i=wҷ' p28N+߭ @L@v'S/%[{D!uqTޥw5iTˁjt"#(21מI4DI14ȲW|蚋[ dS6kD`i1ܺgC5J{eX02endstream endobj 641 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3083 >> stream xVipSW~B{` !d6ӄ}_2%AxlKFW xòt/lwa&MB[&I'>\gfdә饪kҹ|ߑ1^#L͟6]$^!*}nW|z^=cpXF.M,cb|Y3#槚˗/Y0or11&J 2'RGf!&Qgx=lN]1wnFFƜ1d$4oL:c.VjЛ5RtOrs<4Ψ 3zaԫ; kB&sڛQY1a$l0Mr&lf1ә-L oVf31o30k9Lʬed2ELY2 0,1#>ϗ?ZP+:XMy{wGzQ|>ыG}, $QZ+,R7 Y$ ~Wc+gX͐kz,km]"r!P-vG 4"Xe]C/#pp)9 Y_]%KY\\ĻGt@e(~a89{jz ,kJmڻ!މLniT5 ΁ `ls_EC4M+?pើccÛarSdҖ0})tXDu2zZ6$c&YTc( Rةe,TiX scċd<l|JS9Oga,=~x]QMSپ`Ɋ;]})s>ԋcj1UUO\)@E2hYRa[{#v+u_>TQe,NQ\pluT5G؅\/ MnŐ2ȅBZb qӍ#ЯNoDbsg8Hio_䒬Ѷ,0vPTlBf Ewqo[cxN ȴ{kVe&7! c2FK˶ M筝n%e'K!|PJU8h+z 1>܄s  (H3;o_ y`,±cw Xmw_^PSk+;]i2 w`uoa<Ճ~\Q6/y5gO#*߇3?%ogCnK^{/m˟ $0AlKyZ Ϫ),ljKd@FCl~|NX׶z+'5B^`Rއd_"#DΨ ww%;<"H0ӭ?hp~tuaRK/6'-ͬJMub3q! W3ywIZ}QO^rډ!'c5ħ5*[ u߫zթS7#?*/‘3/;Q7b+8 則sK軩]jkIT)">l~C%GSei7ι}w?r'v{*JGÎh&i/9狤AyGncL,R=5C'%W@)T;.jaGY2v È}q5ʕe% _e(3 Ua[0HOjfrURxF8+XG"3J5kiwo2>_73endstream endobj 642 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2206 >> stream xE Pg{58g!A ^H18$1rOThT$Ҭe5bGn~Q.(L,]$*0`K*ˡxë*Wi5XI/b$J.f]fHOk|b|5!!s5h'e$R4td^z@>5&1No,Lޜ7/''_韚\MN>AY׬%i|YЬJH(mqF^#& ׃/~#^Sj)r)5⨗(ej IK O}'ʾ޸C.]FkF&hRނ^9|J. q'`¾={ :|ӞP`W pΚ.sYkV[1WXW!MZ$VR:)ROQh|!oQh6U˵S NXAQݺ~yŐ)쎶K[60٢`C< ƣo[Q {G8vә[KmFT!7OK6OߥHpo!fZE kF`W_z ^MdL. &]z`[л+ǞЬV݊mIzNt#`h 8A?gp.ozK(&<)3u\kExz-cg#Y5ކA7 BC-N}~XVFeHz=Tk[ùӹGLe|sU7Pu"xyI/yU:cwa 730#*ލvB9vi;pݨ4VdU>8eHDy$0(T?qh*>yjRsXJ ô!=Vc[qA݇F7v/vȜ8G9!Ϝ %x [/& aɔ?.8`6X̅|z(`pv1JFC!dxop,BJ|}$ w=I)xXp"p>&8 Ǣ3 C AA4 2,lXb3-߉ho;rLwS8/]O{ 3Irp ˑ1Ҧ{ O"M z>D5 Voj8~p~u{7|ej 9S є]m臊8އ~d$(ӥCE(lЫR^4gωog[Gʝ1Өa6[J--`dU#ņ8w((-xL\qz'Ij%E>Z>C}(0ŃMu=^CSl%'EN"#.ZYH#>"@#g1OqrS®@ao|/}}6OxW⾿&`U3{vwgp1k@:lyPё=%/SQ~ mIh8g7itg͆l_^oGo_׏v5y>`F:kpEA"+kŵ^CM-Ma/\]w\]nvhuUR%(yendstream endobj 643 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 763 >> stream x%{HSq׻-[˔V1#6_$Jj[Yخvթsv$(I=(0TSzh"{@/Vup·@0D*=3''.0y%)ARҐFq#X K`bȋ@$A8]ӝUuTd E1l\rr:6>66Mud39wpb(T9";/ֱ Xb29G)lYR6~/g3t% !*U9FPC (BXH,@j|T4J_)/ Z {-k)G6,M&)[צZtMTsCnVka Ly$KiX5WATJ̚^jgp%%޿P+nkeq'|衿L|A=]Iw(UoSp'%mWRf<o0%/Ua-:x]-`du|i gh"~Ŝfyz(`; aw߰݌UuApOr?:H/k|d2 wﭲ7D@=dݼyhCD8'e>hs^HXcynNS;o9sCh+ljs90Hs0mǒ~{eMYˇO9J>޲.Fj | ϩ2$f ffB Vendstream endobj 644 0 obj << /Filter /FlateDecode /Length 9007 >> stream x=ےu+߰ٔwh zĪr$&J!Z18D} fe* {q98z5Մ?'_DwuwOO|>,U'_]w1p8'4%s'<*O_=n~?Yfa'_~sJ1.{JхilC6E)y#>)0y{W>LkM~ :5Ngc=3㊦0K9.qG#}]X;S0M.žpୣ4)7b۟ a 'c`e8L\s {¿Em@#W|AM1y~[a.^X0}͎7`_ica7 #0๸Osf>4Fw ^c%(/YaSbYvZM s,\xR`7G2͓Y$BhD1(2- .-˃4&@H(B ޘ$ y:e%C|e۰^>57i 9/% bvrv_+1&05 KsTC36"fC wi#[1vaX3Àu s;~˿}jH !3 BQ2:5OL!"ǼK@f`<9Bfk` PpCe +:pS&DNLɦ<tbs$!K!h""H`XU0}GskHfgdAob1SZ Z: rAjӳVxT a w24gz5Ah ö2 h8h=Sq`TpxOY P)(oit#<k3;#k?4Jp + ޞp3NpI^TWJbd#߀:= Q4#U*:! fKax oIcGj@9_1qXb uN)h&w[O7Y‹,UQ#b6Z3^oۺ-eB)b e?9`xb7hz!`MV3j˳ahxpaѐDKM^(1[=l 6n"E(Xi{QSE_Ѧs,?3Yi}kC, i#gdCQy_S/T(eVP~Wr|O C@gW:<9оMgC qr(t}6һ*HJL`$z]W!=r\sQ" A{KF>D|+"0iLM}@Zmʋjj(u#SVByfV:࡭r MWE:/[e,œ\ŗD31L84qͻj~81z@2(VɆJ"@r+cˑ&rbqeB8< z.Yo{?7~C rtvA )ᄌrb~Z~&ˆ )(UJυA>C]ٲzV 8tn`7=Xĸlo'zѵ)?E Ey`s`'cTr6]D+ퟭhVF.k2Zc= j՗ ~_u5< <[HAN[:ȟWbqZ걱=k$))-s0u 3N#)b`;炔ÂeEJ$4: sp:RPY?–8^ChD|rHp)1:B5M| ޿`By`xEd#j-JL+K¿;͵ArrQZ &| 3&;Df2k^<?#-X=? uYj Z vvQX){ʀῪ, /-A+fSZ.%ENDPf h{o@aYl41`ϓb@;Cpn2RЏmy@k]6 mP꫺lyZDᙬn{½<k@D2gm(o4Zu#u8Nެ~PV͌9%\sRFR%e[/H="YΩX4%I1Tx\3yI+&zYH&v!((mX.bp(ѦCJ6JgY3zԟoy X(`KΞ siIF EIyh W`#{9fe2i?cO4 1AQ Ff]YoD' i Vw5@鴢BFFPEGς0Y BN. fLR0a4`F:$Gla%wD?Y:^ZC~#E$B X'0aZQMG2wJ^L&I'xnRr h'գ/8M&Iѩ,)Sf^fNؤF&8 ,Gt#U\(4Ay,@`CS}(Cec0ʜ ,"S(f g+~[y i>HO!SJ}_az8jyo4&SFY8%ehM8]Iw<  gotڅ2Ak̕i1JB=VID0V`MaUz3\_`Ӳ tf2F :ʸ_)6%GV[EI.&Ĕ,?DSa6* TH|+`p @1_Jb~ZH+fz咒]Lr9gȡ?aԯ-8\9f.oBYD5q4]zq\o+2ąJ5Mz0-0@0i7uqg,`8;z_)~R[s .'?X"2g6NʒGK`G  >  mb] 7S":By2c]m\{&T"_C,xIO& \/;eMQ;|P$bn?ZDE/(Mhjo53I2V@GÏN(B= rJl@Ʃx@~YZ3&¾<}?tì#{,Pn|lS&_Il6GU,0Z01CC=`+.[ey.-6)QM{+ U{[YLkv$K!ci  }ў\ea+k*g#>m.>_W<^-t "$sFeE,mS!.p 2c"YN brhP}H2urySN"Sw^jt1 ߷52?wQ7<1|WP (UmQ7:vA8 ^gB]i1R_V1Co1?y[*Kjb7 Gx3A/~?樓CfFzޗ!"??y_x] H~NI29rrX]mA ثLt?h4|w5VC/R8r;"f} ɤ~B.FK*="oYOֵMN-ӨxB0f\'MҐ.=:gD02VYy>8KA9f-6 x0rۍ{MjQ2iN봞΋dME嚔1ftfbukL~Q3Xqy`Nĩ?s`Ϳ; ^{*d(|[ (\ U7M0j.S2hE޸M4s]b'0^RMߑ%#yފ D0greܫ5ƇNΚ=`*U:Ľ CΉ', }ZДF3#hOG "e=b*P ֐V ָWΌT{9Ù$Eۺ~QB0sb~YxQ<2>sD s652-*yHlEM|%c7:$I,K21̼L8poJ:4ц𕢷~1ڠ L(#(y¤iI{<%AlO,KELi^`Ŕr® ҡYnK됏ҥӬlCޘM 4ᆰJˊQ}[fm3!K,*;6uӠÂnmVWˎK] W~`r]u[VE &vPh//_uPK/#tY`xҨJ (%4cn,2%U&+:qSP=JgE1w rQ,/Qk7EuC_W$ Lh_j٪3s[TEЂaː(/٨ GbD..G`ˎf%*"b/*VCTc*Z;gE_# ;T=HT{T1bF: ƿ1Xm]%2tu FQ1W-N:LvA j(I zM ,`;_!ޒ.ϊ{ԁ>$8Τ#ؕcY z-L9:olvP|` OjSV7{lўrlΑ9Zl4īYIRme#o¤Kx+hݜHqqWUOԮbbj4PtBz;#%,s UKIY .%F}EʅDӶ-RX( D2U~1:XMQޠen~Y= y)ef:6CV9r,Ւ _*(h*z`567Nd9`r`x_oSы)hm?FiVHGx3lA%檶mdJe{X5Qcݷ=*4Ehl7ܻ{HA3}2Uг(ikyC:(e6MV#1F86k|Nn[LZ^J '+䟮~<E,; #q~6So'$ A*H%7 nrz:{(EdܞhWB+=1&*I!yQVLT1K'~Qt0&i5[ux\Xot]Ġ4Zh,"mP;3VZ^ʱz^3Y܉.OB`cQ$_!uLї(y Ơ|{{-c6{g)۔~9q4 Șh##c~WEƬٛLl(|pX5 JDܿ;aN~mF7p \Pљdz=9EO_twVOD.bVqJzWܴ}j^w=mŸy4rǴgm ،: H^>NlB-Sw֋to:vT8ڎ83&/rxKM)ܫfÍóߵm$7(~ܭ0b0Knk=qToTfY',_1T(qFv^ZsW _151֦5<ڤ86yDT$n{%S5To{W-DYָvq5ѮjU.0̩ۖJLi'Nɋ"5)¬T!Vٖ)۾7ėFx+L#Pe"{Qɢ{㸓E׻TWwf? ߟ a=gʺy/E903vķ+NAÍlCu2]uN:8l\`pO~7ɰ4z%}U䷊u7㶁GD@%T U~b~b! kX#25jZ[g\4|'D&}lĂBrUѤwYMe;Š 5P{R> ip/8q+MaG~5Eӻv?2c^OR5M[Sl4"0>4ڦUFu/:umR ͧ6 U +m{ ]v :['QR{wWBF%k_"XMuk9*)v]_6NEծ췐bH{7q𙉘۪eC< ʖ=~:vQ/(ͯ5%0c 8QA=_N*-RrB<Li\X8KGtVޗkCj.-sӿPWiIb|KZ:x; BYKkk &u{v)fN\=:vӬlznGSE?C5]=t,-3~xtsn#&7h=k&YN?՞c%B(*bvnoX7u7P~[7{+7/7/QAt闅j mnSEC0 NvotAq΢cܦ j{-<`K:jԼز$/ ڶ.F&b}>wScW<<ͷ cʸ7\ Պ._0^>Ʈ`ؔ!06q nrQ̻?k b0<jev&5ѻdlHWyDc"Mm̝aLn=F83<_#pK60Faa?Q\pIɡE&m}PcNڱ >G dyC}럯K#MpA\ELlk|ըpGa%]LZ6quUaq϶ym[ۦb=imt5dO)/N~Prl NHb+mxvW@'E|$E:òIn*;>*Cꂽ@=`Y< Z]D[X`Nܪ ;T{ƽx6[1]v) ֯Vr#BqLN\8LLZΗb4D{o@VЀ,߻xy4۶&n|/.~&EG&-ޕN7bK0v]'_ lyxP6CqU͸۝||,]-^ &nendstream endobj 645 0 obj << /Filter /FlateDecode /Length 6236 >> stream x\[sr~WCjJ;Te>#1vNyXQIꐒY>} 3$%It==.}4{.Mvr:<%<875W9>^1:ElŽ_VW9k[h;eVkG\.M.mWkkM/mt _ܽV**mItg6Zِkd} pVv9]^N+oV&¬vV&Implu4q~rL6}P{/uή~>i[ Yzc>]*0&7Q1euR: ]Z' d߫tco+x3'c=zLۘ ϓ n9vHS{oyx'ۮ;?;:vJFCŌChm(JX*]ie;[\dk!j +!jICwx mFwF. Otu]HP0)TZYV ^ %P(NjH?i 6kq0,Nr)k5 a K{> ߪ/Qb텐gBG)])}4/c)ģ9=k#Ȍ](1cOGaQmx+vfֵ؟}B oZxJä`n J5OiS.pA5Dyq*A&%n3(],oR7|)Yy)@gȳ+}CR=\+›'!HeCLl}Ee 0>V>N:V4@``ԉhuH0)>|f[@1"ǂ=] uЀ2N&ۻrŅ!m캩]OFqd l"B}cFpq=lqԄȟc$ 'B@Qr/)5Kb#v9໺502k@bl̝EgA$`[3l+[Cd HOjQEvvsr^!lTK4aEACp̓" e[SlE3Fsyjն^(bՒlT-9YEna0pr}~D""BylђFWI E ]#G߈8œl"0bgUŦ1"!k3wrBlq|Y!`9ah$r3Ski\["=@_y_3?Z0)}~<3(UOC:9a8E!M/=27عM>s`@A"cʼ=q7"ID:at3A!wHc&nѲ)u4 X)'$&pos{;16Xpl|_6f~\n$ɜlL{ S&:-[V)K^X%mcƹCK)J.Ƨ kFX+>6&(I1c#TDIz/Yn!rCb22FTm1BgRM7GaSĩĶ@Ih58guD_3qͯy<D?;D:JcSa3;ū'+Rh=_M<";umnjs;Z6Zw%JLa4H$b[wh󖛈6?AG(:ف眝p:@yBvJ5|ڸZrV8|w ܇*JXNӯŔe$ wcm%_ֲb"8#\ %?-G@VPwq4F>io8j$EěD>˓EpNxxqq8k@n_=p\&\Wx/seb&ak1{< L4]<7{楊:o!Ho,ӤʚJ*"Y\NASi%U("ڡĆ'Z[bV1.VBz̥Jbt-; 9}zqm}6""P!$apR)ǯf=I낕1 wgqz0m"䷰6my*_>m[>L!\HE1",0 `%I54koT,`(yJ:/HRQ&~qjjy% NprKZ[Yw^{+*^\~p6\A7bUme| ӖqnXDyǿcbjb %luSnX-~z+ EI³P&vG.W߅7 d} x9^ƢDžK0 Q9,O2eȸ U cEb[t/p/UF}/W\NN-dljLKKޯ#/5?"k&Rs?{[;/Q tЩ4/GWxUGS"D}S=0z0'LnKW w#g 5x|] bx;^uYe,XEpU;}?pH>%C@p5/~1V*1N UH.Wjm^^Mړ}bz+?~9](z)<܈ Y|Mix";V*BqDaMfx%&K>,+ }? B( 7:27Þ4, p*-!2_SV:V$,#9?[+G:B9y]Ob.fA朆w^I 9 PR_?[8(쩎Z/xٴH  b6 dt0Qy U"jt!_||8;N$dh#@dJ\}B @OEXV.y`f)ވfڏd aR܄?fkjrH""uB~BBƭ?^.GL|w+Mr].^~ܮendstream endobj 646 0 obj << /Filter /FlateDecode /Length 6439 >> stream x\KsGrG/UA2%JKɻeGxH WF GuWVu`@al{嗏uˣϟE{|qs#l .<<9M:~GTZQ8I?{ YOҏy2k*ӝμ&GLΆyoHڴF Rۈ[8J;<>Kf!Ը(7 G )(NНx#QG=ŮJkFuNjJ^K@4jaZg3Qq˿neLe#Gk388^8`,gb<)H<ވ&Ug%aYgh;_FrȭVxe}iq'WRVi-Y7dI@F@tn#<9hScꡤ]7 a{@x?5Ƀ,8gJ5aMp:HHdx^` GrSNd ;Wg0v7ڃl&Z]4߁F_)γcd3@7Q KN\O=# 8ڡ XzC. |íp|y3}H7!^Ry ό<#*G3E#p>9o02R0L2=f1&3=1D2-@g;ZZc]ոtmURzyanA)jfz # b眦0h's+4ӻ/@ Ey8sň7"$Z^I&j,%Dӣ[ E)Ҩ] p|=hk̡ !هa+k!˜FƹG@c79M0ƶc" **#>K`)QEP강]`q/(FV>&G ̡EJ,~7B@ (^I|9 gcKX2.cG]z("#pkTx)"F~ 6(ǵTu.)̋Qk`ް;ȡ I%XIzϋu v_pN. 'oY5n8V67@+^I#3_`7S?YC׸TxBuDNdX PEϧZ@lu6`uk N/ir{d irIkǶig8;b!`""AQ"{+i^ Sg‘1._;ukqBצ:8Hӓghl8 1CLYrB^4RGEv]sZTJ@LKPNQ2]d\ ~>\CgBpϏ@oCO0/ܯkT)8`F{V2bx,^7Ώei;fP™Z6N5W0&$iCwǬN`Q#du '!2>JRA px"exE'}{\_]8cU:fVCP c8CXPx CY(* ,/ve RDu` 1lsX82@Gɣ'm߀C,Ǿj6=_ϸ6['M0 }p5sN/} 7VIU5R%CUwj3 o\ Υ`.FU.TP#n#xoU0JQ0@4QD vo *lnUɅתT wqxvYT<$P}VN?jZuZy`0LH!؎_bEv2H1kЭ^& rBŽ(eﹼ);Iu蛮b;ks)P,)m)"e Ufj4 ĺH:EFp Fv,*zI`Y"M)6T&{C-X Ն6}=bsǒt KvIyZC z}#&,UNdXq0甀kGtK!>lr|!UvItNq/*Yo7zĭm1w1L}ĺ8iZ4Xo_نR{,GVeI0,7 <|%L9Ǻ,c$c==d.sݭ@)Y}`'|l>f ۚ(_Vx =%{`vypg]:݉nAߥ?t^P/bZwZn5j WcpI,L8Y簭B :E}>bgojs Tdç לW 0BiUe|ΝLk]]ZJD)` 2e,KئgXaNkDlلnd'7 ="t("cuM m&.Ri<=nQQ*hWٽg8HeFmopR8Iu=+)JbȊ 1䩱m2Ȯ pe,XbSF`1p9s_G* %GbNR)^[~~(c .B\%/ŀNZڎW }TbzQyVU|E\*C2j)A٫OҌ7گ @3VT_4D-eEA$lyN~FFGx{/Jhge^8IWIWjIi**0X8,"{2vBNn'`3f,AuxՐ q[r `ܚEy&~s ff]2 d ]sWf/5zK=ݥxUUz(F&/[bzxR>&\#TlR`(QѨ|#bKaY<"wn(lf{H\bɔ3}`{OhM[|IIil"xYhg\C~T>ѯّ^&ͻVnzWٯI6N#m͙p4r(0CR(zLPSKsy.w*&h6?}; ^VLI\YM\[aB^WŶ_!qx(pP?c"h*C.i!X=te~3l ͳl|);{i1d:̐G*^o=+`,=wх_TˌWN,' 儮sS /?Mwdĺ;7- iBh#:lo)ҷED)[e゘Ձ L<|86aIE&cAPڽPQ)渓E?"d xKxMV9Du7 ~2ʍ5N^y r{Ù#1v] BLsـ8KR)Ոju&^p#a>qg}lax@8B(phO4b lxH)-M4Dgx-Ƀ>70X*7ʴÅh>dgM2MٶsETJZdU1I„}jMմ+cSg8B5$- 9dL ݂WW9lIG[cN_3usSNê:NDl ϧiR(2bװ1)1iVܲtO1WKIvVkڠ 3Us~G"s2UM-jV;2Q%lʖ(*ʷڌ_s3Rs7D\ؤ ?9endstream endobj 647 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1037 >> stream x_LSwEK-3A61CaC60Pn-*0&PZڞ,D SH1%aHEϕv]s{CSMi~Z ^c,[Jၵ/am~:G`3(>!99)?̟0EU|fXf0P ?|LM)qq%X+Kߎ-bc6k %|P%YEF^&]0jDJ *Xdhl*ʣҨ $EqT 7'&ɡ6m >zCqqocFt$Z.'1 ,:?A ? l$3Z1?8d?AZ P;}l*♫#sK+Gu]xB3ZuOFYOR˶SU<,*u(^+^gURmc0!`'P!ۯ iU؆L>[X='K7gݚ9OoOWo4\a<Z͇SxG?`tR+C7`r-:@27EOoH4r 7H|1953sz=V܎ps:go~ I6Qa;SMT=Y\,lux4. v.mZ&~ m]*E6ZX>03;꽲[r"g0G)#HNROxaVKƒHBMZJ0??C!T>/npCSs\VTOw%d&6j(2o3d?p_b|d(\UB'sk2HpVb]cR=f\6Io]9Edl(qunKy؜W:r^R5HQ?aɅ .T LVzJREQKendstream endobj 648 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3049 >> stream x}V pE؀qsi4Z'4\HHCcb%ɶdc[Voo,˶lk̫Iڒfh\h>RәIH+w!I37l~eBR 1 6NO$ſ;'0EU| yhߠ/3UhҗXrUO?bu:u@~i|CZohӳ4j)}blu_\tzPTWT 7K uM6ueF"}P]QJu |Ӂmꢃl3k bI|&^%v'v\%b2Dl&ۈTb!&aA$&{ 3. IIOsgfS GTO!SI+h fDɍ2i2CtrZV~p'͘_\X Z.D=;O#{)>gx+0C[B٘Ü›pcZ>iT|a0~8De3ߗ!3^&,ZfL 6mA .É|Bz88< ?mxzohIk^R:Om1QP~۸l׺:U4+;dbH'OTLc)P zR]_F)Z <=~u8`DqzUTdQ|F qO6ܢ:<2'ږfǁt@p9؄"%NAUa7OUu^ڱcwYzJrez (}gwB)ߐ84o JNݹ%CKj5q9Θ%>p=\,שRC^eQ5톈3g4vJS㹙hCŵ,1ɀV`Eٽ#]*iF-%|޿0ZkK]Pvxy VԳ!;@K#A#w~q+,,qdXW0gcwh* uhm9)KJ-PN{ t -Fcj2ڣиvy>tnM[a03T*aRS{X\ r,8ځgeԋ={֠j㛖ˑU>?F5>SԪ*+@/?ZҐ{p~T.M@995&eC)Ӏ]%zƏcWltd!Je| w j7V^$79M͡hostR`Qo.*)ɍ?J M3{%߿yY*Po+1K-GsBAiЦC;6hl&IH.&rHO0C dj/a ( =ORzxtm16]eT.W /6DQ53zo)u?K,3A4< /Ƴ?}΍Koe+R[Eү`j3Wi0><~p$P7;5d(uǨVѓNOOYkn q~[?GVV8U/_(MGZ(9nT]JE7+^9$ofUq9Qipô(g0Qé_3z͊էihIpG:V;j<Տn!7sPRhH4ICEN܃EdH _:F$f$oꗡU#rdtDx}Oޑfhm^3W CUbJS: !6J5F ¬kUVERRR﫧endstream endobj 649 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1039 >> stream x-RmL[Ux Lh/EP>FE 0KCq1h--愬Zz:pT27EBPL0p Dz,&n;޺>}ДRA4eJLᴸY!F0KJGXbJ9`(6Bz(@14mq4[U6!,ZHLI1!d*3 F[h7sY DUlG.lx!:FpT*|SZo/dkm+hG9\cLV!׼d(j]͸go~AaQld{]YY34yO?@I 7ɒHBm)]D;6qߍ?uMUX+obMuvx< [=;0b){q7>]-L,u׺x\#gMyq 7)w`;MwѶq78Q1lR7>c`k5&?{ψ2;(z/==֔ ^  ZTC6~qHe)4ȭ/N424A]`ķWgyn9" ӒcЏp$%B~jΗ $RG"^_^!y3_R\I_Hz.}b:stշ]~Ê빌tÖ=;Sq:@,pҙ0Xzb)|2 ky) ^!MR0bT/#y| eWFsXV[-D4J/e'eD,3R,J>s;e7=Y}9-Avc,DE8 1xդ?4q~SɈ7uwVҮ|O6m4M=CSZ"Ǧ^[]4Uk*Eendstream endobj 650 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7123 >> stream xy\SWͭR-^ꮶ]WU\ *W 0NQĺ'jmmms!}aݜs(>H$/pqu8(aHGx"G {tBڲiFX6H (8R0f;g]I&pp ltp #xE:u-W,YlNĴ8C/f1SZF}k 冀~;U p0hĠ2G* 77;ysٛmg*mkl͵˰koa8`7qCo[\>_u(s1oG- + HwE 腻j2"Ry׻4vJ⒒Q P*_ZԴK߬`]3KjQ% ]QJ csp@g3+XohСs<ՃV`Qvn>Q)w>dK;,%Gwti5g>/~lN#3ƽNOp% VfUMtנ~!5FYkЮ&i( գ2kqU8%{בW3-ǰg*,Cº=7wzo"N`pA7.#hX^>| u07P9Х%/\N;MWfVNi+d$|CB%?’ޅ>x*rwW\qyxy%tBN_W/Zr["#nQ'agɺ3^ / ̷v8U Lߙ 1cw^%x{jT zw^A3X\}Q[RoqQێ2KN<6lL)Sa,籸+(U;I>f~e L Up`MGyrWX|KBG[k4<(6&|-(EXo6Θ|knęR7\p7TZ.w}1V%]mHѲUWqO8l MBޢk2xOJ_^2B+duV7cN_4r+/bi)܇9]1 J{[tk0, _iPW>#fu˪XVNB>-˒fT>awui@H@bC텦(8w; J^-᫘wyu)\RCDb6gsm^Ӵp&5}=6c zݼuPg%xGDq.3͠X@N6;cvRJ{_,)ao!5K9QS2%%f6O*s.3:\ɻ=ژ[I†gF9sF=Nz^9q Pf5Rby<`y) |&3{`-E6m+f81~RFd_jPnS :NXLb3X^uH'lB\vNv*gjjiXx߲O7?{;C2hQ:Qg!l1!녢Htd6z>IZJ}RQ<)K1:8ifCaHv j/찬~?*=(ͪf2uJTZQVyv52ut6g[;@Ч?1jJ_|}},0j?]2݁F 㨹ϭ3tK4c@HI*\k7;TmR}_I]AvIrG':)'fgRd}:\$VDW.?QJGi1@WtJJipTdǞmώ]Q6ףn!(@O%rI1͎wǵ !OR"VSxs8r>C'cG {~Nzh҉C]X/U`R )c8-7VYm:5]J_DaL?^m]cJBq xE7'7p1yN@1(!ĖƕQ&,.dĎwpeAs+/@7 Vhm <fa-@Uq|BIĒÕ-wsϠQ RҌ2ܘӣ3ScP9ѓoJ֏d3*:KVN*r.XOaO2723ѥ%]}Qgꬼ3XngpdFZZb>s"]#AZ-x Q7p52ɘ貨_@ݎOܷy6JwedVkva0kugS2IӴ]^9R Ǧq'脄$ffswTr}mas^*;6`0HDdw:րz?OΦ}uv,3L-:/k!9+@('F$q-NT#`Y++h =|B 4 ޖ[[6^.B|$7p:C_?ߺy<q}?4_ M2ݗdsں8y"=" 1䲪s]3'{-7I&=2!lO\5WlGdSQoyV~Ǧ[˃8/ڏ^F_1`=6Ξ1m6BSS-N.QqeG:{_lhJ'Wӵ9d^4\ M<,۾W{an M}jbIYܮ] ^n_4+mUJ4BWrGO.Y|  srHs2J<~I/85*Bk+6W[79[]KJNJBJR 11%2 FA(Yt`8deܜ\cd Ű _/bD9tC:q$س=q[8 մoʏUH\S54X=$:.mvtii Bو|e tzxz^ZO|~ǩN:a9RpƎxFD-E\{0_wA]JHI呆".=eDUDLl!)w5|`| ew9~wmZ}>?UÑ6t7?=s _p_E,]1o'a<~G 77ӝ{|nN1~u?p:(0.˽eDgEhQʩ̨0}1:-ڂ~#ɿO ɱ(!AS[ ܷk5h$Upb'f\Bs1k E'; 0V\0|KdcxʣRHadπ͞^!A>AmFtHɏAlvafۅ& -JOKFrEUm^eQ6yaÆ+[W8z7e!+o8|*]L{ ;;7m\9OVowiʦʀmXC&kwIlQ^I>bj UjJ{x*)&QL~\a"|=JUYʫkȫy<NV7u">~;ؑ#~ ^}N ^vIv t^ kFo[Y%$\.Ux-c >?uy.\~h=AOBBwyDGUŚ:/lDJޓDa5'Ölʷ4zG"tZd%b/5/*>vXݻ&'Z.-MN䉈W$hAﲥd c}H+ѭd?C ;>Yâb<G`W^ʘduH ފpwtb۫kS7[l/Q&/ߖG'ޜi,!(1'XT>zN!IIh&!WV[ LnbOꠉn("L<[~fj 8^A P<%#s yo?8қRca_~_^/?z+gt[pGa~r \*a٧eu>;<(1ʲ؂ ^ZR.,F9> stream xWiteY AgN;KUE BXcIwN~{ߓtJ:Ng%"(Up.Ez{9_b8˽3ѧt}= JB٥:c֙x?B&spF;9G "ott4cM@zߨ]Y 룣d7¿9ttġBa,ް v,vv/Zv}vȜZm6,ۨuf,[ٍEЁ pT]?{}<2o6|\2T?Z2Af᪹C kk]ۋUM5 2CB[@\@^(/Zs;6r}] u}x@ ]ς:Af(@ySto|QJO(N @1]twլ*csG}zo XmJ,{LF]㤟 ךߗ? ͍4[ 4fR ށKI3^3hQ(z [8Cc*VֳKG/0an+hM$`A(_W)Z4~A io+8Ӆ~ &xu/7%|B]i0Nr^Pyo'y =#GFG[kN7} %±=%hoVUUɫ5==hF2|F,; WTн!Y[x%ܑ7v甽Sv s˨$y>TIURMcmG2ٕ$mZ".߲a#*u ͆*Edz+M6hO7.:r^|+=e2q6ǾҨl|8qF ٮZav A~HPvI{=Dk5~GyD?,`C|HdM$"<׬PrQ{;ͤ7^o[z{;:ڻ ΨA Nc e*MJ=ms DXg~?Țwz.wt?AS.oS׿|fj3,/)dX(+EW 6L's~jGo6E!s͇J7>\9캈>m o(vp7Z[5no0<>$G1uϘ]l(Nۖ'Y^*!1ً&iila(i"$`uOX! cO](qPd.T>F*mg¢Kԩ+o}Vl1sh wAK}5P ;Je[; A?ΧGY_خm}&ZJۙ6E06AAf(Xﲍlmx$D,>A#׬L[Ў~EFh3]NsZpKerfɛki^! C<3('v,{7 ̣HRovJp9X~jNe:<x|StBnM6 z\nYѻŁ'8f gVoxҡp΍1)K'c>QHqB!DOjɍDbHJf#swc撶 F7t_endstream endobj 652 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1318 >> stream xyPSW_x$y"Z}"/QiT bqNq4@@ L@Ąb5@@AFEܐ.2jKLh 7sw9G<ae!7qsIqIQ?( IAf$a פgiSS8 q8(44D" Q˵4qU2U5r6K`K%2u6e"X*rN$Ф2\<ڒ#\Nʵ(M\FĔ>ӐP"wIل+Nn>n]z{g9tq)}Jr9h7~'k6MZslu>;VGOaөQ3R˻epno @an~D(78#v2#ѵq/\lKɐ6EH-& F[AKJL孢c:y7ᬪE^3,08[``n^?ԳkWarJ3 88$njϡPۓ|%:Eϸa!K!TCXJUsp5ǧAd(r;I$'s;B^Olֽ}f3^[jlMU[l|1KPUb)*)fmm=CpR 5DU |_PpKkZ>k{nB@$D^G D[N&{| lEE#$bП4<]cHZ i;[V>hl?~~A:˾؂$tWpҔT *tz Ne> stream xVkpSe>!m8.YZ.dA..qQJbH(Ioi6m.M/iHzޓMzI6%%-k(7[EuUtEvT"wםZf?̜}y?1)IR*d5˖.f-U=DRȗKd*d2;_U%#K2+ _lQ leB*/_-lɷ)2"DYbDl'vD Hl"D'q1R*.jީw/Lhzᨓ%&t AnGZ'tZlYPeW oĕjJ5 YlnzaYd||-02r6"sB/'m!hZxAT:0%Vt8KwH" Qp̷B* /ydE{qf]LɤSgAsZ /: 3eY ZӍh:^7Qc T CaԐf )G_~ JYGof=T̎Vqi'}:A6v$rP pNY34BDL Z|w󻃝({1w ȣ'N:{sR. cBok!QPV9;B\`l,=SY *A3XEO}~C;mA-.-n.y d^;hu@xyCYTzx "zBkxجg, %uZ zm>8 ?C-+kк^eh7|fU/y람<辦?V;X!Wm;8TI+] rP#cJc+jCd Oy37ѻxt{ՌKV {f^3JnX4~+I}ks*]Y2a忴=eg`/Ltnr@C{HB7ف~mCvUκ'YV]y2bޚAG)Dp,jnxQqY8v[iPixajX .'qY`<8氹 FgS4@n|=V~B4'H~(--Hyj"R|G$_}VC39oaè*(c/ K*'\$ cܿM2_8v(_quUPʛU/Є=q_BS.vY5ٓנ64P xRbgyZ~+muAz~fQ2Q&@1_c*Ze:(|enr ӡIH߿}4 }3ApɱzƛEy|qrMN5"f'mzd,Sœc*T[T@_̶f`,wG{ŠiPGy\0]6Aj\<8s1hFǔYpwkCze0uk'ץ Den5q$@+yWNA^#([|%9gᥘxhFk6)l 4Ž2**.b ^ktm@xχxl% D~~] -)fp)MjZa(`XQ5GO4P5UoWN } QOgrh QׇM]WI4 ?@?qrPp'eGո|q.ZQQ ɕ%4w;'i M.%7;ȴ&p8IۏAԐz(sċEo)wͰO`Z@\MH1M;P#CŝEmY yGRX7.6,Ɵe稼WNCc v7MF3G^It>0^Tfm~ZטRj>^G7翆+U(KA?`_57¢m  wClfn7JȴdC* XȻ.][p;ܶNۛB [v^ʋ&n'S0ȪNG,.L"M 7lpWGgU&@4vFnkܱM6Dc]^>F _ )PX[MGUl$.pH L)E-bܳ$c;Excjp(jzF=-PF8lIendstream endobj 654 0 obj << /Filter /FlateDecode /Length 7258 >> stream x]Kqú>l;zWBRXL!B҇!Xv ,@ GuWfu.;tа]||t0׏~u׷~Jw& <rqKCc^{7O`\=<:'rp&^Ņ'}~:L%ZgwO6鐦 fǰw%LS +ZQr)ϳN>\,lw:,|_3~}v}G$XSx3i:I@S SUL!0Z4? o@1u|?&qa#6"PIW/G{ŏCq?){c`/p%HE:-l-?uO0HJIL]ב}s9x웶0siFȲ; ~Ucwb9Ԙqz/;|^o'[!v<{m/:#ٱ8\ #^~0vNmi@K0:MYRgϚm>QMݛ&ԾW,d'ho-(O\+ɿ5}.-Ϟ'p1Fh)~ux 9@s,'hM74yL#ئyŋюxXzYrev'b<0"+`T~OSvcx fK<2tNObD`F6T)<}nbJy.u%HYg(fjtw EJ&Quĭ8riwWlQ*Wa׾of5&X[W!Yf{*x}#$Uu}$^:3s^K8C]6 &{1o2`*xY7>VO|!j?y8Հ߷̯ 'MY(| =V-(0`oǮmٌƃYKn[x$9:3u 8BJ4 J&v&."28>sHGCn##xY@3*941vW^5`{Xf!)DHh @$K{i?EXdX<txT'ܖP4^2zΏ-#L<6שaH Dl-}o]/w&_EM p͈@BDV ^KbG3~Qb}T:$oy2FE:׽T0N1nx4O(r:;b\Mlǥҁ0"#;B<k_e+D4$4T7h"Z 4) $X ~s nS100G"V3/" 1!/X6dաgju'We4`fWG:bKWZrvf=< 7KG_g~zQ.|b am5x&FA)K;/$҅"3tIqbT)e\*q9X>'(lr]r *x.zK)9&QK MGQeQVH+U@&Wd,GQJPk;CdDڋ)uLY"b(i*vH֭ 0O&:PYCMH&$Ty)Ӧ"g`o~;B ahh^0+`Apz]r׺2ZeMZU7V1 f!#|/ru2<\O}"0U@9JQw`\) Lv ;/7zy6(Tء#SW<,uG}#bqV:6sԄ2>2hwaήS<;A%H368. )/',#<}B-Je{'#?#wl߭9'KsŽUݮl#dqe)0G>V F2̎L,?_ sFnEjUP h!I'S"Vpf6 JG H. <@M uZO5P>XR[7e>R _:3do YBہ4zSNV٧X:". ^̼rJNΦm`" .I~P")QT2 ^TV}*HGf3QqtΉ%ãOxfݺg.;Qgw$&"$yT@`U_@[Vj˜gT ^1a(a/VGA!탰IQ4 Z\t4PB^R|_8987V0N^ͺbK"#质YH*FnNp2v -q}q%a1a%1o.YR8>+>z<{Ey>%'y;PTڀrw:5q) I3Viw'[/2TrrMϗ#X#ƨeSlV=qyxy[LaCWG.l< z\f۰j^._IuʝKin;.IzJ7 v,qkS?}ӛ񂀮eyV :qnyZ)>GtWӟ( ,Flӊ<4C c|Fkqe]Z& NTʺtMol (󮱙X ox!=[/CBKUDZ}tʄi*&1i˧8_l~2߷Jhٜ9KXơU@&@K|=`D?aY:M? \F{[tXw5aQxL N.!uF<:?mh']Y: Q{\+5RI]>X}X]\A0JKqVy\U3U2\2't7 }8x`P9a2λ?6>}c z7W'%;88Ko1~c붰t[8\@(n sE$ sC9gY%D/Д5"Z6~6?*sdÐCW#3Yu?J/da7p|ITT(?>\uK-UQ̌l{҈V781=cd7-I .g?^( )܄-B+YH}E=یr67c؋>")ڙEiMkSڅdff*lLM1Q$ @G^PtQ[_Ѐd`~9bkDr򧮁aKY?u.è.ȱ%* ڵsLf`~E1R@H7IX^q?"M8*D V_vO>ʟ>K%k BY˰ހoX]?^T9-Vv!̆&9݀4^j5dߖW!l//zɔaqO WB03= ]]Km-//v Jխf wX/%{oܗvrЦτwuK9wDY% !qDm)Y4,/Po-N_K> stream x][q~} '/ьw yvb%c=Xy.ȝ.)٩ (C.哣5{@P_Ϧ:7O:ڳ'}/nKOm4s~3zP9Ĵ^Ӏ@^ a;zDoe.epLZ7+K-Fq=G̎bڼ$p;c4 i%9"?tPqs#8'Sunᯛ-6LV'%9ȏὟ*=c4P'Zmi :Nz{D25rͤhim+%{s^p*_Q8~dX>l1Ǎ}u6!M |0J3Ý8YM̘E/wY #o&0KޗA/#VZ^*堀LT g!w0.^sZv9 ^V%'Shyq@?~uV4Jv-BCï,PUK] X R/ޯ`$Ola>(-e K:`tDBMc][Jjlґ!C;~.ʚMVfkź}a?{Wb*V{/n]t0ބݰ& ә7aU䯓fyn>͈>LtUcv]fsx^{~jh}:,#|n"!Pʛ`;yhH[m `yYiBehqZ2FkIo߂Lu]{YF@P:UeKy0d(ǻ- rժ+f$ؚkmywr,5;Py(5Ru5obւ5廮eƧ 3; AnrN =clUR1&V4͛/9@&r/{IjV{@5!mŦ90f/e}D7):Z5$ h Agz9i hJl ΜU [L z @Mo6hq; fh|ŠpT|io*+|زM24~b+ޘ.Tx>ШUDL$ah4#GEL`O;p- >jyҌlHvO=i)(dy]:Դa4AQv=A6ЖDRf~Dյ5;.d;-9=X7Qr)Y!Wkh>sExfA9j?1FyYآI|MK~!>s6O>ר:c1[ʧp@ܐZ_QKQxG+ʍeri0J{[H.bk+wI}ɯS΍\ >wA게nX v5^u ~Y-<^~Mjx(fcPԔMr.KLR.ʠL@|Qzp-|H@`Q쁎=QB3&`ctJCd1en^׿;}*U@u2:\"Y+0 gI (+M q 2y~2F¬- Tk0a=FKq#mP'n? z٨-4mִ%pXs~>M#; P6NҒM0UuXͬ`MDiyJ0#$\#o+5x"qŽzwWCkNP!V#Cf RB %X4A@? =Qx,sy^,60+9#eSFxƁ%(!Ԅ k XRK"m(yUBD]n'$=K'*(UK$dso;c:`U.xhyӻ̦f^lz޷iL =5B秵%ZQc^:0 q Z$@SDA2ݑ =*SѡY(BUr[=c_ID 抒ꇛj[nf<ͥeOGSnHitи|UڻFΒMd0Z;³HY\G#KjZ_TvD fJrZ`Ij_lb/1zz& 5%AB;Ǫ CWL&&JoAarطTr{}+ ,H>&qƌiVR~OY}P+Bj-wēLD:Y>EŇBujUJ WB;y4-nK7Vy+n&Y>Rfmy]7,ָEt1B D:g02v22!lIJm֩ ",׈^qP0' "-F|OPY1inxkydmzE) ,hˑ 6E0TDV')>vpDyp96y2Y1dzb8#,` A##.%pP_@;=-3SZp`dAT<),2y&@3D0Qd""ܤ9R).K@pQMJ#Gjf6em&cNE/b_U"5VtQ0^E9XfE&^a[B| JQSSy7@u\2_٩6Aa5}KV[>|R9xci Lh˯Aު0m5+ZGaH[0Y|aQo=ɓ0pb]rqvyAq$n谭e֣2<1(~YSa_ sBpm4X3$Ҹa^=[{MqW %heaBjCTOAd!ֽu`Xda AB*/t `et%δ lE*- 8Lyf Y3Fpr;'0?>%L{_=e0M0ǰ#I $cET\ds^fq"lK`4!L7ɷ)#=(2{ Z]( {eJ5B S>&LN_gm/XZs2{<? H޴pJ.#Kfnu)? . "^~K )υDH .S Qՠ.z$c"ܭ' 9ϔ(K ԺRDU9ߥ2(K)Wa8-'n7l^8생]wU211 5{ Oz3 -N^Qv+l?$`,EQ c ݁Uk#CisOH j ~]r/u8C`X(P] is W؇ڸ,CjF H!@'~d- ڱAr/صT[?.uo;~޿D6(g7 􋙸m/^]ګYpei0+f\X"^e6ZuJ$ҵ< c(aG1Q8 vg("]/"K-C=ڢdr䑅|ʼ;ۦsy>`~w2zK,1 CL`fFSW ]x]vyL%E*LubRM nвė;J|XD%(1w;ȋź}wq ü@}2̭"@ErI *0S9xzd-<>wDrWؖNrz +NeW/-$_*zzHyGc"VitYt6NY [t=`Ȟ P'-SL@"SAJ,$Ujgvu#t ҪC 4aMx6o_6?PV$?  ]uq_gδs"%!m|/IVfm $ {n x@m"F ~-voT6;'1J yNbbv"-r; dT{Euu8f(zU=OuքuI%ދ"9ҦB2G>Ógg+;8yGߗMXR08BCQ8^|gK ȸA]{7t7\sjؓH˺ -{DI=ӌA&,peF-QS̮RV{BYxǨ4ubը]b|\Sm}߮Z%b6֛|D=Vܭ}ԀEs֩>^Mij #vSYez|[oof}F,J&鰞D{f aZMo^yN7g>d 'Htg{4]8ôQ o8D+CF)6>R8c=2#߰~,xsOG?o X.](4? =B?k:rp&=3vo0.ý"EqB]Jj aqrԎfvO7=2 2ȹR:\G KW8G2ilku|qT:0C8uøN ifGj۰S:'VJ7s8fU۪}2R|^mki}Lt:7.\JI+Y]X$F{Kʦ VU|XN@k,Tfއ2N9):?5׌ľMrVzĴhH7x~'i2E^,MLy&{`ǻܾ-$N2^i%\ }TL.#5;>sB( $(PIg5FP&ԟgc$TU k}^-nVA=FaS*8 i],Il6)a Ky8)4 ?CV߄ZWA}$W4>Ph4k|_{_]>@ޱPHqq VkuK">lģ ^k]>F۹tqqף٨r?BΣ|c_VdKy'Fiֱ|} Qz/=ݯsgǿ}➽yl7|3\ּy֕s*ذv`ה}Qi_*6 SfuGV(iB)X'p?).H aiZ*Q SkaCQ ]tOYj: ]\i\#>mcp'@la0VT eEDxxy0WLF*fމЎ6Q1% kdq6*pT5g"q!MϷ &R>3ԩ}i&o߲\~8$ ۚ{U& z`J;j |~Mu;Y4/-e1zW6yERG+Hm֞6)kph'+iQf shK_ޤQwt6+Px![Vyeg7rau݈"K;QM3>KMn3TKm5rA~|Naam1x]H ϗhޮ/v4exSM)Jm,kR_uz endstream endobj 656 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2429 >> stream xUkp^auyL$Ӑh4<3 y%KkYXd=lɲ]=dmْc0&ghH M1@$mI;m\Qu&]C;Ͻ{=9[|p*^jë_#%]Z|T],Hڜ +| yb}wx_ޟ{|YZ|ו(ULZ%~i&@|!TA|]4rTh7H֓DLN%JڻXsoTj(ebUTh':bB"ʔ b)HT-9*R(h5!RH%%VIbN~S*Jd Uh5bL%X;娭ip52M(ؓXT7>HZR]©% L ,.JMA)ߢ#ԤTIݸK]>⿪Trݻʻi5np?K%e}a+jVE$&d+`2{;möc;nl#hX3gڲ\^<\5+_ UAƘ>瓼dA>jvAQ(R u:[As01N pcY#`nٵRZV9 pXKꪷ w;aOSFPd7Ϣ۳-"beEicUZ|d~h(7#047yis_;].8T rڦ+/,@coqWxod2z#g4UzdRސ1ox;?+p. |P 8L&[ y ~zjmkFϞ OpeLILJG{VeF0>K#J߸KלV /ƥ"3c8ݞq+E1N; @16Kb1۴jU &ƁtWbq.o0w@cҷ6p8)tL{f(V8 ǝf95˹oH'3ק>}NTMNCjv h%(pegs'+A[NEgϒn^(.NCYfd0o0fԣ>-w@^蕦jtгT5*P;.1 *إ~vO}aSrpPevZ4\M'G2ՖN\Og._#+>On-ϫkV;8[mm-ڭxAys s漬=rΊ ueP0kw@-MFiHn6ՉDb،7gRU[%# nPN_ CxWZjhEf&==p9EzZͨ&0v\hZ!*t\yܹw0AL~kxi`H[*mZeYsA̹[? |s>>m}LIg7F g/II]^gdZ5d,-D‡S}$K|9s{DUsDHXj>8&..| ]C3C^+X\C {oS"+%6/8^8}wؽEv=: 馜s!-2UrCڪbvjR+ Sd$zƟ {:X1Y;ox;x5?ær9mnYsA]ؘiocfMDړpE&Np?IV.Y!( uԶ_X+a5cWb06>/Hv ÁH}5go.yżU?vbVw zZ [nbI4~兩nÞYr(z<wmU[Ue=*_ܲccMurD%hٯEzx&]RE9Y;*@e!~ bh0[RJl.ɽЬxxcBL ՟-lq =➻w_cؿ,endstream endobj 657 0 obj << /Filter /FlateDecode /Length 5586 >> stream x\YsF~no'PF,}ǡmhڔH&GH8&T22_^W|qɭNo~.|89 T:x 9Wώp^W^z*gWGK룟fLs-&xߺF&B*'۵66n{mNևnֽRٚc4{ -2aGsOY.a݋:[wlJݱXwr0 V6f;]o5}No|.*u4V*m7/iAؿ߬ǭ4)XXJeS7M6'М=uuI=ݍ5ʬ6 f~]{Xr6dXt_&Db^E)x۝]rʞ6 zE^fן=w/GDԠ*H y 6̖mTGggu79REX;9c| n"ҮV;VpHFEX#5eg8LӦ*=< ^|2E՜ݡޅQFpz~rPc Mx߮ e;1Tm0m, CQ0mLAh*]%XS CsN!K?Mh;hTM{h+@"'Sv_@euoHɎ|ߔ"J  `0Iy8:C-NeXZ7Uжdo\B@b/aFsa[zidīj 1DS<%d^Q{.w$dS$2~I^&ƛ%`mn| 5<+}0I '(T,Xjp7i>⻊N !P1BiM3‰|E/ϸy/x`2g&;] ω)~/6t(88v6a(޲gڌL0z݈0VLs6Z٥r5b;  /wovoˆ0eoDS(~Û?qpUm(h><4Si'jkN^T}.D65}L ʳoaRZceWΊ 2&cϴy փ98Ȑ!بo-=l݃ۑPtSߒR|r${P/%< N듺nNA)z>ۭe@DJ(AWy% ({6fR(Hnxy<"fyˑ4:$:YQ'S=_X DŽ_|w B42k6B9`ܑ:EGngWu?~h^' _2rUs'nɇqOԠυe*sĖL=82&bg\g][Yuɮc%8QޔyN#7+oX8Dm]MhwN3qE@ɗkb^$ew:M.}j~ Ȉ<ߓlÄ*l=L9arᄽ1^iX!rOߊ1ѽD D(1SqL juƿS##=ƫ5Krmbq%^ePP56dmKN\?@~ރ+GO';d%{џdFDP9It ̱,H#>3o qc \O g;0R*PLȲXvEq!0C8jM ׄIR1L -V M(RUo([dhGczCXŖaI 5!D,TbL+>)ӱar1DCE=z'BoެGF$}§6u U&j|Cd\?v{,BoQzOB@&Jd!OɗDm޿ rB %}CH&p$3c@e\Wj!Ė9|aNY>EU16Ý">cN#`y'-CP Xz~NP0 QQg(!Ιi͆hCJNQ@XR&dY +JgTD=B)d4imiτ@2{bw{MDks\چ~sNRD>c139Q;*6nBvNV =-.A.j>{dH{Y],;3Ɖ2RC.(l{} 2cJDEȑiӱ c3qO͟E.,<πPO =ULyA%5r#w]vԽ=Z;<-7vq锧C/:iy$ 5Mӻ`@rt_MoFWķ&rFz v3gt=+mOE(ՕVb dEIBZZPh\Lxe \K| aozZ=tdp %ɕ@īٛ_WK *|i:~qos}a>)dF㼧E_ sK؊ng& ϣ}#O# | 0eG6$Fiߡm v2JK{ط`NmRzٛN7co) >h|^Ljq>ZTfL7᧳ :Yfs og{wux2{o+Π2{PKiE:uf-1:+B6 w~srѫG CfhK 4ذ`xryK*bV-Ms{o헏?f[y j,|Œ"׌JTNiKO>x"Vm߭m nTޮ,#n|d"K+"@&TÇ1gK1y׮k's7CԘ]Z|˭+qQ(Sjwrk~×ҶaL.qVCc~-g ?r^짟 ]"5)G}ՒيgBИVF׳~z A.q}u)NTZ81'KRBQ3R|w68oUA3 < Rh_ fB,>O |HގDj-ۦޯ*0.P&mFjPQ|b, %ZXQ> Ő7Ze׳::|m:hC98E̯8\jlzC#G,{7VtYYM2 /<=XO< %:S 461ꏴˏǮx}O{]->Ѐm@⑅*~$EF? ZR JW. m/jE(;,gK~LT.yЖee 'E?O407p5hZo?1ŝ-N-meW>IЃsj>`.~q?fq,Ҋ> stream x%}LSgmh'9o6!n|m3 2`,-%XJ(0+mqޑAf2eS3]^u99y~$xIޱI!Bro|0p7Gy"/?H2}W >IJc՚ RQg1aёLΝ̧*V'-aJw/L:O)W0AJ*vo fʕ&Uk r.3R8 XJSk$L-!B"" ŸXOl kFbA3^D 1M'Gx㸺u6 .~+AE'$(p0QX\`i.k\at9%S+DcɟڟQ"WV]U@5toaľn(䇽oױ8,bG=}?9w:P.~W{%؄U&ś<".8տ+;y Y{4͗r$l70Dti⥡(i7Z;ܦn"4zGGz5M.@{J*,0Y-p%Lr6#؇KCXC/ehC7Oc$]"Xa,%HtJn w_<,|~QYvsSbgͨ'ߘi2}TjVTeTLHo15@UAu_co>w-|o/ե˳+s!.ohf2 H mG[!AS,wР)wX\u:/7L4.^}y@_[[ Tio~\D x Vb3Fk͗h~y!yqh4 CB vyʔP4 A| JeNUTR7!V]㞈ڡ)!8 5jl8>@ X~yD D]v8.zX؟:Fl7R CȮYZK񊴉|-"R jendstream endobj 659 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5343 >> stream xXTT־+"x;()J4&^+(R4)3g0C c4Q/%1FcM4}e%yo\0u>0{1bβΦ/#%KĿI=Iσ,J V^zo 456ҟJ$!9AQ;||jt_kʬ`*f5Œd0k:f f19xf3lf1If2Y,f02KטerƊ `1֌ ӟ fӇXJ22T$_+JK?vwdtv5{+[˹X~y{zMoKq?}A}9_4n}C#n6 J$u]%|r*+b@CoTplSj4B=iNZ9h#C.]~y{NiZ$;@϶hT\e.ev|5GZ(ehZX(Zc UjgێQ۴B 1шtp4'zBAcp CMP+o#Lmc8 WZ7I H r.? hN_󾵳1BDNyH-opx9,Cp93b/tsk:V+g 0x;^Hr\編3XXG/ark1GY-1Hjn` ;xsd(AGF d V F4;ZW]Vn rupf388]=U,ag|ݙS%}rv2al˯yUև+Cv^bhGG,n %=p%LyM'y.bX] l_%%v(zTP֕t)Cnқhϯ}1m u/@{rMcߒ#Q&hX&&¸n+ݗ)~` ] yj"٪F )HQ'A[Z*wyghO=G ܆zsRrTb&Ƣ kJ1dLp ǃ\Z4$up1)>_Av\juBBRZ7凔%ߧ*k/;<˓q*G\,썒Al4$3"!-#7/=u{@`vb{DUx|Y!_zi:AIyCf1=' o[#N+t_'0H V3waI'.ֿwY ZY7e{x̞%E'1V֨˽&en*p Ҍ4p9W30ϓ?{K,cd&ZeJC,[믜&:t~%s% 5{oMDKA8Gkri2kad$d#>c(cĜ)FS`mW-mY͋2|LQMBe->żPvZ`KeQR"We3 lbnS+4[=D%GψReC6d2K}z}3*7PCktQ5}!5I~:m*i/{%4C3c&?" ^JHRG`UrUTJU*?]_ψPɍVy J8pqT@y-͡ټ7W7:W G)F߁<>=.5 -7-1HOSCp&6Dԣna`'Wl7+>ݽƧ 8O姃ְJ Rsk>`#pS//*.lۚWUήXm%VVB9s~|Qs|M򫹄1olݲq|`uWświR&uкK`D^DrkBEɢ5e8KEԣTR;f~] ~d~iOqhfjI+3'f>Oee*A0=./OQKLNZ2o:t}4K1q5xtꎕ>RZPwZrrj}0NfCW \B!:OVgAWY vk>~ۙS.Vs0~*=t6:*ϳfqQlL%ZEgp6" _- .H(~Ϟ(ij{K4`p 3قd66qJ=B}Nd*jQO&}OZr!=;-l TIߥG<\tsMC@!SiղEhA[F껹gc>\m$'=j~fҬ.SheZݻ\#=xTߡ%s(D- wUyc@\|rۉ7h`ф'}?jo+-]gJ'TlId K$[r| 򛘋*t: *K/[cOS:-.Z'V/ZSrj9C(CUQqeׁ5o8)vuLJ{C x"s-Bpg>mGY0cCOnZK9gGOUԆov5BO\k|ۻBp_(^]`FdZazAv8˪tu*%U݂1 , 3hy}Lg:Usᆼ4ZcѤk1 vYq7gBl0a2?R*5ԏzSQѮѠ>Ș= {)RKǕd7;Z8*Znf{.==rK^UĢ ftIZM4#ť84Gw/>_;#d>9zT?/^~ꝍ*<06{ǣa*LMwri{KD,mҮuTK,b eP v 4kx1ʑ'WF_6 ܭ5PxN|>FC@Q^#K0q\@EtyUiqu#ZGX!*{(cv9)7^,=nBF wiq~sNJ2B.9#13khAHodpppqp@KrbpT-3&2Rp 3 )姖R"|j.+2bRq)yu 8'fLii)D\bg-oBeK&ތZbxNVrݴVMLL5- *}N,%nsɱ"#ߒ(Pgeg+X&vw􄕰12ԙ5ËwyLx"J?{["ټ)Wctq줴d55H -{ݩqr-OWS?A|tlhw20V9ig{ 7Ŋ*hµsk[ 1ъg&u~kWOE/2#7g+;0Dž {UhܒvLvd^ZZ=sa`nFl׳F˫}SK2Zc\rendstream endobj 660 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 P ]ZUm?eB:gׁm_Xց$i, x> stream x]Kq| taBa;lKɲeu}b(`"_|TWgUwp)9xlYY_oAoW͗MnspٛC MN 6?=~=<9dPoDͳ7[{ 4;Dg7߾Rؾ*'wꐴ1n/=Al>9hٿ8Nn=?k:gt8Uνm?p8gr00ki!mXۻZsoE\5a[G=hJ^,i@?$5 yWݤ`c*I=& Mhh[mNsvs,\?$hm4*w3:"LVfo[r ʛω|NX6 ᧜i76@2b15ݽAfoWq{w&BcaƀX؞n:ن@?FRLw {7{}6kf'[!p;?h}v&ˤ(:ly)R:+ʶE?4-Olf;h_rVau_XcR‹Y-)C$~zytkน 9-4FTY;Nv9KF@ޘ`R^Y֧e:P>6D/SĞ>6W2csmAz?=0bY;"ZPeg2WݤW;|1+7J,tƷ$2P&v #XYyh>rDnP0iɎTN7vLb8?|C)Е0x,^$9Nw=%&(_FG$Qd#2ZgX9 S`:7'R( F&l6 ^oNCɁj{<9I€Zjщgfn!y(h>{&Jd5Y""[8 ZckoE |7e<j:&~-"U[ G9Y>z>V,u݁Exy,;G0YB jN* gC'u6.\R]n%dzL ki}U=G4s(ơ,`I rvaBvU4Gy2~ \YR8"yкvr_ e3Dמ|- _ Ͻ`/x 6 +laxiBvۛYL@;yF`(fP| RTS'mJT Stw(.}rꗙ-53Ae4NAOPMGPf`_aW.E\w3S$ono/*a.,Z!:/#1s8lr1.`fp+0@"mnO45Iύ5i+yjlp+ k m_.t$"dq MD`qW7F4P 둯sb/ΰ]ɝ=Q @JZ5¿0{3^_$H⁗"~d,9dfrX5u&`j5$F΅??p+ 񙬹ՠ)W,T2U;Z,D\54i@2&9fSetռѳ-)v.STD~"b g0}|v͞(;~ǒɀeK̎jWIo9u8pD(_k +ZQ"ʻbN{.5Ӣ lÐNnJ'bJ)[/mfl^%m {W86G1^Dd a*範F1e.uyr+MɅ9gWȇ$MF#޳T=aF') ܸgxG35&F&3ԇDj:="em.K~8ޢ1'R V0$/e"9yWnd7qٌ݂),S)s27xS`dY҈%/ḟB$"Wg-<~XF[ OYzl#*J;"0iB=ց>,F;Y'}γcXѡA];'tYD׏A!BPz aʴW e"d霨@B{ʴψP΁5G3x($}$oQ2.q)xhJ7{X|-qq%Σ %;BP|83Hhj[Ryr9)6j}}Nr}HdNoonoty{cS}濇a7΃Z|W /V:\Iyk'uHwUto7Nv?HHQf Qt6 !A+ nP#we n l =z 4Πq`$ﷻApgsӪ'x(­H Ǫ|'&!\+8MӦ8N(<غ7[Zd: WL;(g'(RT23ez̸IǣB]ϐI6ՙצ麩I\g(!GWYgSMu6+@RGd#; ED.GZj]fƷ0[D?"(jK= :Z)1s,N)ZmI%u:9RZ-:p e`+_T_ DD5OGUc|l`kJOc YAV\"cIsȧj> &>QUopk$/ x~g.L铜˞8AJӀ 8l;SeZ$_}g=YgI@i[ǕQs=-m>=aJ}$EMrHka ` M-ua4l$NaO~߅JA@ Te1RtRr5=pͰyQ&S92xs#&[-fܔ4 R~^50;B\=7Q•ŊWHmILpĄLTu/KX,|˕HvfI8~ ". kHxj:4n-' 5Մ44s}揜jnŸjoT(}N!TJkaW Y>D} rӂ\@wj-:r#`.MxS|,?5<${.FI}3cBs \ 0zIb,gO;جl•lW\~0 hd|j[j FMo9"a&Jĉ.ZI6CWz-0bPS\8:9UmCx$&۬Qgeutlۆr.9? 0r\T߉Vu[|ic{U+2/saF#Nұ%HDxD~0E^2f;IaDxuR1afR>1LEKlN/UqZx ku@`xc"U&Ƨc]ǏYfb&*jn.+ (|F`'CvUS+'%`(e=mծI7uI3+E$~(cv, ׍E Guq\PKԥ C9 1>kp]<՗ `L<_v\Y#FܖMɥXj~@7:- %-1;D.qjM(M3we#w%!ϲw)x3GJ3em`|;=ٖSqB4r}q$!mZ4rX>:u `{z.:$XO{@F;j3F4+m˴5 :UKzd@Y_$"-J#^)* `hQ; u6"{V.xȨrb M,Wy~Muy8ҢD 5Tkzf 3:Og3}5%,3 V5I' Z_͹o"{SSF S>/",&s)ِ[-hli2rD;O^pLy/{moxM(L|';+Fw \<;iپ+g{ԃikSh @<,5:, 涟rɠR'vR"4VW@d52$&vXWzq!Fzk߽XMph@iLՁlYWW@#Rk[u~,?Q}X\Uts;i+p\Oy|5"]pMyt-\ۏc{]JGw̵3o=`_հ*dQm;mO{.-pkztUִ>,\;I8 EFPj׺I4{\0LSJ\T4V &`pmxPwHaop\;"^361VݛhuФqMbJTY4H-]ؠt,q>.m-kb>=o4n;NI]'/QQP`G~'C~;._ڂ*l_g uBc&ژ HQн> :[teJ>sW&sLmnΗ?:MYwʷgo(4=8FyN9Z5ٝ3&KڥSDL#.ԉP @{{jYh o+yy tnYiO!˴KcݕS,$HO%Q!}RʤEs4l$U Ҫ[cIYYDu_W /҅o*'z` ފo{HH= #בܨP%T,"_SMz Ix6EolS*:5{˷&K(,D[ܶrˬݔu٫-mcjBRNSh0p}uѲ: |R&0[đϑ\AE'eGO.o}ޔ;eM+4 gEN9:w隀]} qO`Af+Ѻ?TCJE('}KJw*0!sFEM+W9cq+tImTƙ~/&&Ha\Qg)u9:KS:;ؓv l^#O@wJlߜCd}\ȶ4;~cV])j8N[XbSf~5/!nkwRX#q(͑jsO\`~_ E=;Ię%8IAk, UKOI6/u;bPlY̫.z߆6aؿKVHX'C4 t0|`X@É*Nf*mFq05:T_6ǩ&x`O*0Z"&agǒytU'z '=5ި ؠ%/隰:ќnvg<$1`9hXSȖY ',U,nQ4oM ( jݥZݔ+X#LוRݧ[XKa;[2tP!Gf/Q > 8aghb{0h9,jF2%}=)gcnW[l~\O_>Ĭ2 ;͓~YZ.L|Bʹ5FBsa$̧r?mipb3'5-|0c29DfCчlM'`B z}͟bqlnX;8 2dh5}"7<9@㧁~/{$>=Z .5Uhendstream endobj 663 0 obj << /Filter /FlateDecode /Length 8866 >> stream x}rɕ*K'f,ߗPL#M8z, I==0A:"=|y3g_{7%]=>x,9A.}f4_?5<7-y{쟟ø6xڳ篞̹6dzms759v7+о=0lL(kxKo/wb> hu֘l7Mɮvz/]ƤQN]YxҾw[z>B>J Ü=|"{v㾤OƫhzA's[tnZcףs[R 9p0u{ &#FG+r~ZN.@죽&@xO]ֳ{%$8^Xk.-%?HXS9Nhs}h~V |MsnhA㖇Vͭ;(6ڻe_#!DF/q*N **JZ.̧3XlyGo%8+91`D;~g%Jk)a,飓^{00"bku8K[ɜ+[*!x^$xP7+knGM띉x$i%VqjWm9&n@㾚Ath,--Xȣb.iۘ a2#>e G.Ʋ9*9O4qB"Uj!~ .j0AkLsŋL-!oҥx0:h'}+LLg!Vd +0晗&awCL =_ⶈ!PԺ @ A/?G`2MP*~T< ۟_dc_40Nģ\H|认۶es CuܵAG"4zeFdDTA/Aleo^s8&Dى<Џ}v4}i,M hzBQ-L3laDEzȣ *+у7Hw?HD}6F뮀QD1)~baAs P1 އKLhf9f:u>̘ -s~+"? &3bX~I` Rt!co0 ,!"pZX G%8 J&1煇3c&౤CjjC4|mom~K 0PEiRR)\&.K?WI_j.GU |6RpJ{=,[`;";]=4 * 6VҞ2ml7} ƞ.Coz,8-TwSqtlptxmVذC_ڙVήHjsHf<7^7ZȊ:QKq$?{x)uuGGѯΗ}|!c95U`$2kHfznz^usQLKaNސ}5WGj Bb6YZd+ /% WFwxP=}m%6NP6s~6 Çv5+/˶/臙 *OJgIg\')M]7vNp%t92NE3 Zk1 oGIMж /ǩ i#ZҀҠ74#lu95?mnQh+^- 6soi/yJ`Fm@ pߌdPap{B'caR@qdr?  t}- |FBqq|;)lJ(Ѕ ?^3>BCDI9N7!lLd@W`(^>ޚ)@n #TJZ0d.DFH]nSK8߈HNl,jqVy֢:Tn)!N1dwy/w(`?)t xe/$xk+ ,Twڃ/Q XEԍf}Uʃ%_`uSa1M&W уU=DPM c0+˾ߓh䥫qz$/1 u߀І t%s#&oP*>^$>Q傜QΰC@@נI#QUhTsZ$8 0O؎#r(ؐU\ U2,I 22V욤EoA BcaW+'SD6nvrJ"M+!r+͆M]!cLŞۂ{h@Eu``8p֦>obd`#3^M#_cM G"X!-jQ$R-6dZ t8`Bt~|\ pG;; Bs$#Xvc;T pXhLn?Ͱ} DEai죑+::tRA"r3X .r0vcR=N֊.`$jM(E~_M垨)M. ¤SxK(Qb1(IJ<3L)H@P%Uh7=v#X)]?-'jz`}Oj:˘ kǦ3Rk0|}Bfޓ07 A}Q65`*cRÔ`'W &e6fE[`e;划&]xאޠ8wp|K%]S$JJV̨CVx{gPVc L|is^>EBB1d+ †40H@@gEzk{-7l˅$%r8S .7zSSzVԉ`+ĮLTŻE +~Y1siEhM{1]ZںERKJbWHA:}q~$LXu+vē?FA 9@6M8,jU\E$*6}T& tvXPԪJ7kI#c:̛M)m55:nr> ZiEkLkڋqQt,E YqJus lk{k].e D/ΗO3)Z~ 7acAo$}?MJ~7V.IJQxAk:RRU֮ml|ᆁ(U,7.yLM%p]֫,&&6Q%r4`aϺAN/iuC. V 0 J>jݶD&YeGRMl$JI~ _ NP s)ى&&r9䌽,RH$O,SWFx_*#TZ ?7 +<]xv3Y+JgbF1 _k`;NL@z!qR3(K2%:dyZƒ@LJW>0ʅb=n޾B x}NIqR ;+^j\y}]D`G:9'Ħ?VZ@-: 9綉eU@ qMt̀0EA(6WT۝> x)qdpnb_yllSAeuJx#Y颓>Lo %srHڄGI 7}t\]i僊 @7`M_ D A$Z&Asx}Us0t 4pB?UJHvSL}T[ֻ;Z_j/}>Ju\ +_%ɾK:FU+BrpK܆%&*!r" ?Z KTx6jWT]6if{vLj"Irbfq|զH{L ֶ>`zU>E 1lf=tL-V!鲪zۣm@x~7hܾFkm\(Q{q+;be?R1̕8aw-rY!f9Yxú3un{49Ȼ5v+.9RgOVi2l2[ryv` {H|M{`G'$o:3 q :ڟeG"cotLwx&WLU&h~KEar"7ӊÎ8v͖:q潌SZ# |Rk6\gຑ/NО %{FP^U_v|'8sG}E)DF5}{/_Rƥyf_:JejVK~ⰀS1fcrӳDŽfͣdȪT!~g\OC)BF̀nl/3o :Tz7a*I/.M3)PAxO/bIS3ue"bE>/B_=%46hv71NI`U%``rB% '¤:n6NZ,񣬕b"=L>\&Nw7>kÑUpwPT{bFu(@U-rjwӑק6/_}$#+G@X{7SM=07 }m'\4>8x J[;!yZS۵ wQ>>]أOHa{B.+IIu ijV:̛B9y1BP5F6#~ov<ppL8xeTU-^Gpai > &-qc;pɩJ$mV|Z#k2F~Šd*ʂ)V =3 N=U~#eYwt/B;X: M؜ޚvAKra!^Q=o ˆѶmMrE_˽ 7=XL=F@=Ǖ" OY# @;>mo{EceyCo[(wȌ7z K`45Oegt,1x+:n dJ GX #9`OsZBLc?)%q""/@OW\a(N/?]KMu~>Z>㜷5yw f-%:-nl?KhC\lDM/Bu]МGv?O,{lNH԰ Bu Sr>9p`$aSV} ]կ/4VpuP6e#aw(v]S%fU$ArO| L6hІL"u=߯!enἩm u"S.o|U&򋒶 gt)|K~J,Z|8_χӚՙ ]+}·u-?T=j~ÏQ{zPrf&VuIGZ~U>+"Zi~˓L:2R}ߕ Ƭ^74{~:+R}1C$Qϡ{#=Q]t 0P#xB1f.k#GC5C>G;_S~ IoA湪GqFcF2zr¦7s8+`Ć >mI>^WuUs1#ʉwYqN.۴@[-.WvpG"GGNw.q܈" *k<ϯylKLɺ0huXW"&u} !9G~!h|f|E }7S Hg5b/0m- c-l 846\pz -1 4zہ}5TJ r7dR!4 Ks u$߿ 'Rfytǯ,931ʣC1!V(g9fP4 a+I{ܱ(oZ@"*uf/XhIl\j{UudN&; /HxcbVtC< Ŷ} q@h<؃/;[wu5Pn I BcϬ^v4>RmB 6acprQE%KX=D\aMh\ibX#/kY"Ŵ`•xaF0RqClIu8`pd!~n#:7<{)qoUħh p zdPiu\\L)v wLtcgT;qJO HNL Oqg* endstream endobj 664 0 obj << /Filter /FlateDecode /Length 6929 >> stream x]Ks$qҏ`Դ7$[Rر:>X$; H MrF=,|~H`z:Ot/N}7'=N\rz ڷSO |h˻_]~ҜŇPFu\qw';vWytv W'ݝϓN)F yJvI8;iѻOK)AX9ՓZ3ݿg*%pawUG=t&ݥ)0Ghj5w4Хٹ1zJ~߅yքg,9q?=Mg`gG&zc-s)sk|0›)&ǽ=7ܸ)@͞Yg*$ϳO8?d[HN6J# 1O^y?B.Gٹ0I;ۉd:mŴN^FO rna>.( TT);+kM轫`̳I୲, ~iI{('1rjSA Szܯtb) Y:$6;UrX7=@Pq"ju SI4)`pAV13SeZ T2{T>LWgb23lʴVw0}BAuЮ"`Ӏ*XKCpWOq96]l Nf:uYEYdygO| ? ZSMabMg؈\H6*-Ī:[mzN&eR3LfB) |YChcuf'nJ? wDZbR,T-VƗs~oս$Np{d@!a 3wߟ9G auB!..F"h+a^3zR! o/!M025S,, 7~w֩&bjlT.: .'6qfM 5 x%CJ7M|xӀ'v)b)DcM֓cKyK[B$C?۱"-dz E8`*#w 5aD.m뒮dQm4G#D{ɝ& 6]Anch"P6N{9\`1䤍Xr+sҿڗ">Pު)+(0!1JAJi_yM =InKm齠̀t=0+6aB.^4q~zd#O.#6€P-Ɔ!٘Y$&1K(q0w^,iXEVn%3p^3i|&PdZGPSqJWf@&Dz.͸%"j&di9 /'4NoCI Hؒb- ^I ai7Q0VEugYXw7 Ԩb{ҖfBdWi=ޭ5p:5L ZȤqֆ&k2RyE}8˴AN O:PpeIGa,Xft[ͫѱw<5PS뺘Ӷ9;E :F ;A5ϮIe#uA︽`NWz5C̑a߬J36XEiz.O/~p) \A]WS-YK̶tP@͕?1trlZ.Q|&`Dz&2yHE} '0n8GJG0ťM[T}HY*FTΨ0>mߦ¦D#r٩4#hQX3>4ڸԔr\s޹(i;^_Hu)"z|Zڅ:W̟@.nʭ` `A_BDFz qq^Iq YoE拦/":. eFblE>|@'#vFm)2&^:0&Y#R"(5Y@;CD_c-\rO76> X">iH2"?tɿ N'kN[.$ ɯzV*VP@¥[RE.p]3=l|<±u8W֋'V: guE2,<@7nR֙K'uS8'8D[}qlCQHY,t)<Ԛ ud:@} ZzE [L쁹/)-ZzBJf?VaŖ|E%qV=siMTܷyGk`r"[rԭ$3ZiUf`HA"T(V}s~$0̸Fljȇg.;8Շ}ZJj%Ch2-c6?g)Inj}h::t'M9} 5'= e#OU9F~A ~Y~N}-=|#lQRVpb%@ՆtN^XXV3FD5|O!BΫem%=!6P|b M0'ogBL0u&(Yiж[eL.a+d:f*\@{÷2C%]}xYS_GVTn+ֺ:T|'šFo{0I{+II>%une$I?QDYh<"dj:i} +k@bn6 M=9bCTӑ|!53vK`8aƭˮ`m\N2Y/կNI-͈<N:@H#wG-XV:®wtm+,TkR9h+Bys|~YIC0ʇXCߖ%ۼoɣw52csȂMQ]&{?֓C1PyF^рk R~GbcjۭSyS K~ 4yxI .%^*5ҠॵO#>\%-p?V"? /mb Oڑ4dsD;qTw=_&%6T#uF2za*HDM]Xgx졹+ ^F3f#xߣ #,Pc/fN@%VNfPA=8Ov5}SLt]};6ơCmeMƊa"ncL\'gJ4_'{ y摲&-&@FXT#VMB|4խl a7z>]yF#cpg9g9m zIg.ũ1^l?^AFiY^|rchr^(ޤ->m,cZ#r%-ǥɌ -Q,qS_+ A-/fo$27A5)rN`y@ތnВ?Bֲ x@SwՌ Sn7 II7f+;Tg3́Nѷ&KoJbNuC~JaK3J˅iz jݤKQQ*lljtuvvu*ޱGx)tag1B (H_Q -)^+oC|Կ->+= AWvHoudM[N ^B+Kkx݄P9聿Q$Dž;<.jX{U 3CJ>$s0a(T;DXf9oyuF8]endstream endobj 665 0 obj << /Filter /FlateDecode /Length 6983 >> stream x][\7r8>bbMlFM֌4z4-i$+K^SU9,=Z<ՅԫK1K_`.o/^]/.ljڷK;?|vp_J2i"GO/ҌR8i.Sы6ڊIDBz„*l۝j RnO''P:e 4^@olUjJ 1h6 0wb]ʤO ]oìAom~E-<)ǣ?!-'jrpobgi;ן68^ԴP{1Bm6?owguuCd6D\?z×sw携Cyx;wvs[(6g? >/ʹylIKXcEE:KD3nc8/c *w\2Y5l>ßU;˸>Mxx# \u e=-<$QSwpRlIFLKI@`6IJ'.Y{ڊZB|H˰p"/zwW:ma#GS^j4jӇ$4I=YP%$%p 乲 M$_ ;js348ܗ q?>Q.%}V+%dU҄H }-wE4uw2N+#3e}+.㥳 L|FvrwihXYܲ?t (ovL`(24Sy>Zq,1=x3R%7zRed#h/ql0DŽI"D6C·-ҏ1v|Q,Ը`}$>73"=#p1(ْ.TvK i,-.8H8۞r2r/jÃN+h^ 6w:kx3d=Ji;Ϊ(45jfpf$@l0Lb8k@[^3 ~o' bwMN[UX:αձ n(%YyZx:ѷ(sLΙy))lᄠ{@ѐ8"g`¹LpTx#MߡyZGaEs͐o`)e/v4Kmi$qŅ1 wQCh& m1誠tkpATW(r+J' RNE<>/}yRؘ Rla(Ɂ&)nuW)#Cc9'1tչ."RvdRqxfцQ4*͑ZILB4K&T8s:H('+_皐qp^I%3UoiXUEO`J}ĠF٩: [{>SQɏObcϾ̾u~;=#C8dG x?ay.{ZϚOlho 8:*ߕ湊D&Ap-C-B4=mipF LaU=Wh"Y+S2q׾1ٱ0+ (pq2D¬AIv2Wg!x#Yr-18&/yWymW4NUw>ˍPFÔ5>q A]Iqp0?K&ߔ;!1~<}5`G |^HNL.\c 0" L1a|S5GRӺ"p0tkARkrrY3vzO`=6B #t|,{oEڄiԒ [S"yẁ.e]1I )IM2 8{2(`(#F‹LBP}ီ-P)2KߵU?؜;\"kSN8(uϿ<%OEZ,Gɼ>T4p;bfㄈ,ѩM}u'˔#ě)W (ޱrʵ:k8蔯FKj ~;^ׂAMѩY˅Ju_[G7.Uco ܖ7joMaSjf|`8eǂѸSj}9 `4ô&BY~b>6ur ѨmbAN8FD ).~Q #"{>~QoNuy'So?Ƚ/M%46(,#t%oO:t9hYOdk͖]z}Sϧ+͓5+'Ò*'k%&0Dl};BY_I>hk]s43їX}o ;- $h$x; ^dj i YHIDpxY7{V 8|qߧ@Sr XALި@龖arq♲O+M?5밀ݞ-!}7ٕu{Xjv֜9]FI$1 ]tuh0~)PthFT# ~~0Уht`~L{΋0rK )XyMw+a:mGRuڔ(s^ڴ5V#H,+mTcZ<T񲣖er amlޝRq%l^ UDRN[銴Ȫyr僗J#ypʠ u'+^5Cβi78PhL# `ѥXvZe"R="C:Y[ޫP~\aʆTA9ѩ#.74АUG]ŬYU蠇6U*ayE\LYvܲ2Úa &o&3"zG_I"Z%+`\``8B:nFr|z8IK*M\hC-@m"t<+)'w0Z׻+,%q-j Gj8u R"Tp-,8}Lꑌ`Q!AttI` 78#ЄcjSڶd"6vw&BJ0L( SntsRހ4+ =I#a9=,0T[5HH`]v3Sʍ Wh1jEBzR>/XiidmU8'Um0P$544M5ՎhS:y͛#R 5FLk΅*n sh(x@m_F}K SmuĻ-R:7,zc0V!W CQ(yqhu6XM3t]6+q]Z|N胴H՘Ir GBIS= 퇾.G#$`|mmHtP5̾gոᨈl1ʮ@0ހ/r]NAުEF'.f1$H;o2? Wnе؏a :PC滠2m R5i˔'&ʊWvLY3E[W*k5^`) -@k/,>i2Ϊl[Y{7ј2\M5*jG Gi#o=0] +S77J"ce\ òͅzj,`_G},%Ĵ*NI]=q"&y%0:f֚_G_۪A4rCZI y.皸ߧ\KPxYk,8QX>͇ĊIްՇ[6j'W:H$edt/*(=)Njz~b,*嘖Հ<5!P.z"KbDD67g9Z$Rɏ!ןQ$9pN>/?bA.):9v_#6wr}[IX:x🷽`Q̗Uޖ^^%3q%!0Qۏ\vƵ_a&edwy}{2ͣUZ=Z| ot֖j֬*TƱ7Ai?,֛ƺ~ WHL0Ϟ=*R<5X9;CsG 1tSC\r Sw⑱Syp[e[&սZbzlrc_FNJm uNBɑ9P3oB`eyO3nhm8-GD}7"-?@.yl ŶuǞwkNKętț=8.&e 3L_z\/sTi-Bַ&"eWiQעWzu!ZhB_b>-̰&;lQݡ5^@ΗUASOF|Mnʀ ^&Q[j)Y]_衾4Զm.'l!TR,F3^~S׍ί/pUm@8UJՋ5XDU u!*z[%p}LlM$a;5e%ϖ޺D7,eBPZI-bku2b 0_cK*cVVcՔu@PفSs#f*_(C73tuHds0}O ?sC;JV/?d<>1ߐl~A/$w(U&xQ?zgE2M\$8t)|SsH 񶒓9~0,}$0#d;|NX$tG0 ̗q`èGI aey3/΃z4 Q_v.v>[8k9 Wy~pO4B 2l}Ij!sf1z_|J/`-y8hu*OKXꥅLAlK1f_ c G_ Q6qgG,8A~WJ걫ж<ϟLN'nɟJtl~prDKLF`zMV8 )?q;L4*BSi3'1l/l7P@*Բ\ߑ?p_J~L\um]/=O.^@=.h[lkekW)#XNe%uD* *xlO8E0< EzGM\cKKo-qNRp˛|6O 4?/\?W7'$βADjnendstream endobj 666 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 778 >> stream xU]HSawlXlX۹ ti]}QFTcr6;;vRΚdz/1u6cK/"EHAtS]FYҵt,:˒}è@0ƅU5upTi~s}`(+XKא+0ALUTV(WR4r:u [uXbP^Fȅ؉Y^(Y;&ZI9 Y!KBKWU٫E'[8hNt`F O Kg5'! >'`1ȑ92ҲV*#LdBt6edu0q*da8n R62V&uKZOg$wI<8%$b2d|l&ӘH H&LJxtw_ >1`# JZ AK y-9 CCVR>‰P,Cޘ-,(IEcr5#7b~!^@ ^eZy˔E4ڤ fo|拮[&LK{u:%㟢Yfqfs$akϖ#O:3}z0|Bє_b$E݃6=jz5 ~N46/#﹥S\Qn`ݹ.ܨJ~1Mendstream endobj 667 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1944 >> stream x} lmWHV5nٝVJ1膄"( ށp۱}qݝGb8$ІBC[(lP-+ԗ&Mጴ#mӝ~'BqIik?{p5?07wF"-WyǠP)]T=m@$*U^S_[G,rŲ˖Q55UJYiY'WTUM$W-]ܼJ]Ԯ^X\OZF''[RMU l nԹDhl"Yj\3׮kj.-o/l,l,$ (`PP f\px9OQnۖE?N爴0]I}^-Oހk D}qHL` P0Q=ADCso6^zWJBzB-hHro`=JxT(`}ARkukvA&8:>l"kM2zr^5 2LEIȢYNZQ0(O/­ 1 3L /zܲCEx]Ǣhs,|{HNliw)G oEe#{hfFAQQK:ZKC] @i?14f=iBoal:)L>[l{k{ h'Y46"DWЧo JXjrS`Gph-yffܤ~9`//"#b4kHΥVXo1]47gܞ6N?ne@8IxO%7tv`He|q(Yf^f!R^]#!}l̞<˴ka0`$mgZl >8#x[AR;qN}Y5`- @_/q@d:I!h5o^{Ωc-2RJpvpaTjvx'u-<V=o~T`.\[/Z.,1|0za7[RM{ЅwX3JbvΆUԧ>cg{~g*rHo2"lD07xyb }0~R41C_,mi%t2K;d>c&%!U= |!#Af~1G%S }5v6Kx;&ʥ#Jk ՗n: m›UꔀNg,@p~nݟ!E+٫"tj@c$JӐ?ԛyd+. Jw}Q$uX=8Ju5_7@P*2]D8M@XإR&(ʭ*ӹϧp_ ci2pm&>˯X`2<'zGz7n.WKjg@SuBQbGHP$z#'! Ouʁš=!+e6+uJ[U?<,|fO}Q:mendstream endobj 668 0 obj << /Filter /FlateDecode /Length 164 >> stream x]O0 VB,tahU8(Nп/ СY:ߝ|]FEehvK@Fˢ@[;'nʿ?`5]M$%-NRP> stream xcd`ab`ddds 4H3a!ì=<<<, /,={ #c^qSs~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWvt-(-I-ROI-+(Me```4``b`bddqӂO|܏,IzuOX4+>]֎vɪ=SL+^~ os_ļ 4liendstream endobj 670 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 361 >> stream xcd`ab`dddwu041H3a!Wu7kc7s7˲ ~!Ș_29(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMUOB9)槤悌M(a``` ``b`bddkCd73J?͍] S{/k[Y^u[on?lp4_u?$~(}g^r?6u/tug]9}.n9.|͓xxȇ8endstream endobj 671 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 675 >> stream xPKSqua )Z=B$DNR[e}`;tsknε!sNM,RECP|hr8<9(".@P;p "0$-RJN$l?QrJ4ZTUWdj}hSNEj)= QX+Q٨eY:RKZ#i!,:VK<[FKz8V{rdf)P)fAD}=iA}Gs12I+N^ģpv ftGpѽ& :z-9&;Xv7>Li,^V B\(2k4eku1j.l- /RNn:?"vPY?`a~mdԂ>09"`jtyNJ(rh11oY(nl>/gN=qunv1󫱭|8vS<O1? ^ ML*ͦ7Vp=:T2l"'JQbB > stream xY XSg>1ikrmժ[kmUҺ+첇ٗ/ Y! A@vYTPۂKWvwZӹw:?a@]>yB?}~81d};-'#C\P\[03 }{/{.Z'LJHEN9o瞊|z܅ b"WGREMJ8Q^E͝5;]hSI qYqœei5ѩqO7{eIzjF(N:=6NFܴK2˲D+WF!޷*/fu~lu 7$mܿ)9%5LqWA%'SF bAl!f[mb)b;E k2b.G &V7gUjb JL$҈b<13s1J%9/rcwXX[^ո_L5="p<&ѓNWz?u5~|+`ǃ,z롉z8."+x8~[\d@|O@{BU@`j4lWVh)ERƼ@(jAt&.Z+!j?Ni "ȔT3vPeM@@Tҋnm!A+ڤ>GLE}A~N`*_)@,B:AǨi#Dr F~ir}CqK뎾QUp;,b9idZh<{]<]?Mophv02wǞ(ЍƜE%g5jۯ$Qv:K<@hG'5S4Eof׆YlX&zmD #r9~~l.LI F տ6MdE]B!KV}/CRW(v;@]x=\D/>ꝯDStbCб[h(7V~Wq@ VZbZ@S yٙPf7X{*!<؋wFS~g٪o4h7H-z-{sZG h5+B/ϸEŨo*FwPdNo5}w\xJ1,M^IAD8 % ƕ١i4LH搃KPK_E;e?d54`*:uPL!0Y^ U^|`0 Jȧ_IPudn`[ piJ$QG=>B{8]Bo&L*hIsW4Ĵ'N_cSnIO Ȍg1 ]2og$lEX5o%~"Kw: K W:9V*]²z؏ceM8E4<3Kv4`4(BE6Xv^ޏ[vhrA+6䟏!ǒx;V3vQn |{vH LC1!RkYZ: Ҍ._WV  -֪`#촱j CuXg35~n ovd^%l'&rwn<,mlj17}B`r)[}7oYd 9䧋A,9o h!O{=s/&rEj*yqb:ٚu ?&u*>t<2W5@6Ofd Z jro 1@+3-?"qz-`4[n 4~4*7 L扅b¨eABffK n;E٪oMtrEI'zus'E|U> dasˎ7u:̎k<.H g|WdlkOk"W{=ɑ9wn2t:kmi?m`[8x6*!v"qHL⛜`T%bŽI%yqq1M{w5ݭsMwXWg]-̥c hBy;]s0֧Uz ,m(Egh $pYMeC&#TH6ԧ¦7ӥz O*Su^:^el+h[6[}Z@1olkw 5bp\ivAJlreo g >ػə=e_F%kL9'cy1xH>dHL5ICR$Ȇ6/nx eT[4)2ry9h1_h91q/_[ݟػZ0LRgn-ַI!@a9a~at-Ȣ@ BCQE\CǶ:iك8j6%PwvzؑN.Kʭ+et^KUrzm\$^ɺ1:`2[@HN2ru7BHRKYmٍ\6_JV0Wet>̮ Jy e0R4 ("v9Y,oi P%˔xxtUu@"y;){*ʷ4;.X_vw?+֬.v (3Sҫ䕾293@_ pOr|4̮ z&Op6b'1j!yTF |⛊֐\k-ׅAt[x\&pQj{~m9@4||V|c֝a;uҰڱ,ڷjbChS?"PW*O> stream xcd`ab`ddds 24H3a!.kc7s7W ~.ȘW_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*]&s JKR|SRYt+uwRꖦn߾l8tw wnp7ow ӈ} Zb^~SgwWܼwxE,1[w2|e ~8?u߉Wpb19 ϙ< hendstream endobj 674 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x`JSendstream endobj 675 0 obj << /Filter /FlateDecode /Length 5904 >> stream x\YsGr~ }qϮY~6:䐬bÖ0$@! P+g;3+z,TבǗGS1S/ޞ<>'퉍VAF1QNzz.MIc^= ǵd"ӳ'iBy䩷~ڞ=쿡t蕅O~nbT1 ln6b RiK|c Z {_gJ>Q)q5Fa&c> ++p^F>np:͌+)54څF8\nZ1;0z " -W4!_ 7CRy8LpژDkS_͢qmm]VE5oOڎw#fؽnax.(5l (ᗍC!&vDm&/qI Vq&AVǹEe ?/zG BUpFw +8ypN8|1FΣ: M<Up`ጰCwB F6jwD@)Q dK9d_5J#H ?9N Jiox=ca _0jlʹ{9`:Ǽ I ā$##(ͧW- 3o7Af.l%l-̑V<WU/gdW8z:|zAa+lc<\' -=|:A_eZ30꼭#N l*Q䔹*ůZBUzDou۴Pm xpW0өD7HEc򩠁zԱMm~_E Tɴmi+MXU̜4W:P_6='Nkx]ZE{4֟n~j' a"rǰh+ V0n>l,3/2 o/7@s8Y04x*ov7QLsHhD4(΋ػ/^_\\wIk9=?N6 ى&-5_TG%i#֔Uk$xPH%of чj5%%ц?vYEшh:%]s1 4z:?eP"0pсfPBt4sHBuS6:[~'(T".YjE,TZ>1N:R]B*Ym3fdSoPsT0bZXdBH EM+WᚓӁl үjmFsߧ1`V* J:7qAqa}J5TzF5Ůzz ? DQb5& B\ ltY-D`Cy,n n7D :jmUt( [u :p~:a&Ys)IQQ@>!C$3y$3vCA`TӔB8@ϻC }PvYxHJ$ErOoO?KF)X1$"0ZpVW2qϝԞ~:c:vHqۀwܰ-vxpIvY9Uk~ d'hAR pF5d|^vjDi8>Lcuׅ8a<%B+=6CG.3AiAEw~,kjfvЙI A`Ѵ6Z3J+MZI^ǛҼ-w9G\,tߔ_J@zn5Uu#<3g@4nksDp~-GIx 'pM5iL=j|LOWr1%FFNoJ"w!{*[>?2.[Dj|Y70L MԔʻLg*ڿEl6(a1qM>]<[6.;ƶKOܻr +I"ٺUCTeGCZ#]| 1 xP*!#ۥ#"zZd:/йwQ ?MSF{Ej vJݙzyL֑s(B>=OS46&tM"Ӣ@ އemȞܔmi+͋bv.;ըu4{|0AʞZtTK!kNѢ3Qa L%P"مeZqlJ?Wna)ucl׸ -GMyGC_@D@A\h;q(€H?3M?D)b fT?sy>g$n!FB1(eit*g?CI 2_`+-mtحI"CpJy%,j W8#yyy;0M{iã"RzCMũ5;ə,,% @b꓀ctr,7؟laT4[h0r^!´( se*m` |i:X7%&%>ȴV_GsR*9[&F4nNQfSFN\KN)`25+jO1?.uOXW~[^AS!C*tӷ 'k` UFF{8jܨf<w>UunʝܲLd&:tvA̾;IuI@*&͜dgU|Е Zm[7zK7Ōf~Kt s**Q]ʤ WhYPӝb2 )|( -ڱ T[3.Ak$1Zr-{-Bh#} eUL [ns-AJtK#=wy+-29DV#>M'ĸj(ɫr0 r0X|9Scy|@G4^/XT56k3s(΂YEڮ!:شpFJn@1@wd#敮Em `n0YZkս5a3IT}~d L{ @Ev*{Fqc>ceA#`]4RgL_q_+౅5v^ʀڤ# 83>q>(AXin2R/\O7SԽ{oA@4:Ol 0.>A1{gI*+zΊ֣RQͥ"iR\ `ʂZw睵ɪ܏R!ډ*,\Sj]oIp;n 庬^)tvq-ô:+B{m\jňs^v(Q-КK(%BhCEi+M5{ƼLvZ} LZ Vuq>췈U FP!Z a'S/JS8Ⱥ}H|#bBc!?5p=ŐO%.~-HJV4c=%kHЉ|;kXwF2/R3Ы~xK. )DHB!d~ M`_,T{X!cBsXԸ0L?7(mUi{*fnYְ+;Ն\2]/c`]Y%s]֜A61&ݗC~q*ŭW+@)Ry\ODn|OEʪr-x=PxO)v,]v9YaOF:}0 !k듲^}[/R.\djE4ʱJy(WLož̑SL2',KK@DWD0yFD>0PF bc.]KY=u掙O핪AXL~%#`I&6+BBrKY79Kz,)ǡ8q.F^1”B`y6Y;OaZ%.ܚq­Yu#[u()|'1=X([k.uN Jta?}8J1ik5R {$ jwh/}Qu Ǧ>J6l,^7 r+H3C䜱B~_^ꘝ=e3(XibTh]i}3UM}oDncK*a&FU9}a/f-k}rQ?Qt/~'/Uev0G(`gMi~{'TUاVd 7zR>x짒-B}̫QPs/U.gS@ԾmK?J(}LV|*M(_>=/J/s~‡xxdV)n!;SjI%X'ںVLm]V GipBWoۆ93=tY+j<(I{yz/iNR:|YeȆe\)2+ǰu˞bQeffbp&b(FYRfߗ&L;r܎%Ӥ0Ft<;^a+?0TlWUbM^JCzc4J?]ge~8#^hln~`+IPiesvpyb8r4ת$3عDV(]G[e#g8rHtY-WEo>l\}J T o TK}+Yu{9З&V蔕_fٽTb;/KiIY~~IAg%sɝyIJ22GS(t#k~⿭B'[q(@X#:Ǯ4 ޹B-@exq-g)K}ٻˈOqXG/>1v_\Tendstream endobj 676 0 obj << /Filter /FlateDecode /Length 5717 >> stream x\Yo$GrH BFa XDX%?hH6&5owDfVedvT79CzJyőTT?7'6ӫ'FF`|=aAᛓOoR5O˿oNde 3Y= =~}${SZ aE Xc KϖA~ۧ *m.r1jP,$YblV0NvD7:i|nav{6rKh?%Sl|;r4N5pYPdxm wONbӄ[:zYE^e崔.cE 6_TWz1ճ;p!! sUs<QØm-DR]CTݔln.*LylauSS7* e('p4T&|&fi=6HaoV0E[(L@xۇ$RDfDΎWv=Ie] ?ds(ӽNXMqJ4dQV+7RK2TP66ݑ}R.⋮B$ h|wy] mS4$TJ!)R䁛H6:G8M^Hښ&>tyQp42oG;ע9l@S2 ID\! `Mw: `sJ2 ݬrDcҦ ӡKX$>ĔL!$O-UL.(&ϖ`P(cdǤ$@w-?&,|ۓYxHBaz蟶SI"4+ 鲀7 )K*A}Wu,SFkb#\>=+x]nٟݰ }8}zI<7uHqO Zd:T,=(=>c ~˒v)d;ʑ#?w u=;)Cԥh.Ɯ+fL\f#3K?šŅJQ ^QGoR55^Ax 01%+wDrvs4.8m$X D+ ah+QpM`sp߽B"1(mP6Dg^2i :j%Rk mӃm) '"~ c=F1M^ b J'fTN;bR%h|)!芜j DgphME#j?-8)S _!zs*V"coknijșvNX+UuB!di :g'qrKP,Ax`?2nzFP$jlԸL /"_F--VsRc,U4&%t! -든v ,Vڈ;,!Ɲkj$ҔLU AgCuxUoݒׁw^I޸0ZX9!* #pE|?w1[<*79uss~vΕn#eF2 אA}ɡ*|1 *R2xQ1}oq‘"r@9T;Fl[&YFZI;9^mfƩ/v"`;+bX*%J t:8͹d8Q^Mb!448uS7od/6LVJAY)RZ N1VΖV~,5W}|_ Bw 9LT0+Js)dr/@ǭ^"=̉IEl%ƇN6nTq^!q1l[;;BȊm,qH,MIX:$uxW@w ?#oiڒ籽.e(+RSGGGIuCRm$j^h}0b!S9&S)Ky~r\u1O+hjhtb0ާU./j Nh/\d73Wl8ItT%PZb3DrItR7"$`^ XQRқ Hi i<H4*+6&@aATQ$ ? xqكiI-Iq_a]v"&?g&E m|Lj={yb0Ԗt ([R,6PJև[}v.,$dgy]؍ȈYJ DTTdYv#+t| *sRKi;!JBsu|Ztaι pUîUYR#g'J:gm%eJ3&kj7 -XO3Z*̖ŪR$I_6C2F.Cw[xH)iL\?Kt9IGxggcJhHI+9yhUiGՔ$ B4-^S9?v\k){ZwMsX!ôum }:wiY0ɉy|,i0{$'TRo^3Z&%$_bTK ݩD<2xg;Iʥ%XxуҔN`k^*M~55m$kɤk\ D25Y!54D_n0Ge]T.1'œ_(%)]VB{|0U.U|M⢉Ws#KyvRA Z>ʺ\A.R2y/W9ո9(#]m5zH^<!,`(CSw&@R4&幖5:|ņ}WK嚫gղ ^C3z=6{̕L0v|8RgCOA[T-ntvTݜ.;,Ox]fkgM׈^O֟y.;/+ȿb BkܶLa!-h;y'ARe@8V?[|sMvo|U-y[O:Vg 5~kh0JRIHPǯ׫:V,{N @ЀxC44jj b9TgkN7F2e='#4oدE[I8 ]hg?YsWF-/ؽ,\#K>0]t߱[My pu~NBb&+[// r"}ux9F˗o/xn<('j^}@tZXlعd;v]ƏBҹWug{u-{WTrb<8zIQ5!Y{7lm44u*C {uԒ }1HoKp~wπa4`eW [hWRڿv ϴ{)9vZ={l6@ CMȗ}Gy s>%IJ肆ȺcIɛcx#la`%d]旎W`|6LȀ{1߳Ƹ?]HIVmag3Q*&Hp[icgWhu]9۩`kendstream endobj 677 0 obj << /Filter /FlateDecode /Length 3546 >> stream x[Ko #a`("QAafLJ\R =U3SZ.i'*7kzu~Z^/7{rsۃO8ps7} @_Otr:?.0p_"s1srPU֋:^{"gG7Ur0t{2 åR.ukeZ'9ҶJwCZiItW@lHeۜ]eg9 cI=W0MY'cEqh3d 8:=><89XZߧC&98?.=;-`ܭ#'"K{.l-t%:)|J%N>#|"E+q}z?ZB&m"" UvoSSLQwlVR&!yAzc Ԑ;/k*yqrT`i&&̡o`1tXB͔8;P[>1gb)ʏY1Y̌kVl CBGe,5"uX <2պTт3*0yV֒|O:hUWS,ޟp%0B_|ʣMY6HC-S`С4%"\aQƲ7C 1}$V.P=#xn{ivHj5<"ZKԹ"hr f%bdQJeCUYn,YACd*bQ'YfUҴ(ʒ-cq'i$E"dqCŁ?g!(r2YBt܁9*Y/pɒ)bM /bngKl[ b æ$Z*cPJa6ʎU-V`˺XzoE%OX,ZvHNQ1T"9S渤`>Pzn4XS]HXY{ ,_K}rOMSZюfI$3,^rr4y6"DuDF"^ F4{4|3Y3hnH>*%g5D3͗ng\ yr/0{^dhwبG\nYIa1V4Pnځ1JRKBFGz=hŭba#zN?aP-imM$M^cLɦҞ7iE#cw9@χҙk6xhVV ї%htɾy/?5:Fg㠙]ET|R! :G[H=:IZ Ӱ֓T,`l]Mvesk xc`úRқ١ؽnxOJzvDeQCegZa<(!';c_@J5cmK!@LÍAQ?;_h jrb],(P=Q7#4Nxc< 0Е2>:VA't6^ˊ>=aa\W曺Mp`5Y<,DغIk T2+C]ð^V%`4MUWvW@+o ^daǒFפ˂ʵDW /*[f^]L;I?yC4$t z XS*aV5 zro`"1ڸqfꄱ`<Cgy]n5FS=#"tQa)F ]wAeY3\(:G_!nX$%koXxa/ٶaH&OG^9'#|4?%DpOnը̈$` WiKbkG=]n_khpOdy!{ʩ;ĵRw _<ѶGP&Br,ө¦YDqSg["j-ص%U2Gu2&jS)z3FecgasD03a!6%k@P.)k~^ls~}׬$I*yK#8 c 9K8aRn˥St Ul{kYe=:4jH5q,L>h؉xD=4 1]edW jj!@bc0tʶ.UF&&.W%wٕڊ4a;Zo(0NNwX8<+^i }06( pjFt)b>:Ç5A|FrLalQڵ4D"_ى=>nNX zf (,e3fO[m.a0x u']Sst,5u}s]<1zW[BvXl:0/ 2'ø}JmVa;jGu+ :{A]ɮ@ؽb:M:CnqWi&i3XHkzɒcW=[,G7Dz;ldiz›v :ȶ aSp&r'Z5Py` ̟V0Zmx8SwwlIgB-2g>d۝⳷lBgCLc {4 2U#|!\Nf+V` ]3kJU='Jl0'x5M8,;=UԖW&|/ShTR帶J, mH5]%|*eY%v/?8 X"/5xo^ r})+"_y0|br ASDf\=[7D@}OOC~l[6{ª# )yD oE}GF|Ebx_AuԺ3rD-c)2=7")ÑMuwL/0?(`x=4W_I~٧:{~އ/| #(mC;",<ƠMh%!+bxKDU#QR~;JdQɊ_+:19`/b ~QWlߑi(w2J݊2E([Z/Nt\endstream endobj 678 0 obj << /Filter /FlateDecode /Length 6424 >> stream x\KqC 6t9z?Ё @ P\tvfVuUVu0zk/3 1 _yO_squ'F)Q/|{q Ƽ|s3—)(/x&A^x맨ų7OvR:KώO۽Ĥb c[`^LA*n~u ںi/'!Vwgi ƨ(L1z ş5b~w>q:͌+)4څqw?h+64‹l۟_ӄ~_ݗ0ic)E-ju4zr6_)SvZ uqv 0/!T` ipO9XW#%kw?Mt,>hwsxS>q1_EpVpNLjҖS&¡)ZPpLg{ !Y%]zqɂ?~tJL.İ l|8R?!J]#T*egs&ѱRKN3 4us $78}FCGw." ؑhty%ϴhI*s^?9\.ާuNCBK%Q4Dᮁ&`,H_Ӌx- $mC_ Aп֌#O/HVhY1ycl zqSY]\10EY7JР {ter~a`챈G˻SUHXKÓv#!NWD א @M {Y 0-u:*ceK4㹨>!dc(Rb!@jεySj* ok76oN`YxNBNkPP*͢2~±\d }̜J{* !VopK`>8np9 gn㜺ˠJPݐ s]2 $urm)y8{cT0͡?"/}s ]Q=ߧsbTڼ͹6_;-_ +s&xʇ42& y N& ($E1?lnmEM9,9L ܦyY L(]goL^51o!M>|+Fڝq9\~Z\F[#LæmԴyuV\aUl!si>-ؔ*i B,+qZD|^\ko<_{q-%\* gu>79EPY[o?:4a0qZa2\WWXx ~6A"Qz Xkj@ψ$FesK¤]wbka)->;aԠzۜΆ\ Mɕ&~5)6uvV@d~E;<4ԝX!J l/ ަ99ӡ%t0JVhOތ50^ q9\&"?;d34s1o c!FGBg`/#!vj0!FTt:i<.塑LHcSP.&&czPR),'aM.:K/";0&a2 CRY7?b8<u 6S 2urXT{BNq'9 #&Z#YJoz=YLVcRJqr~!CFp: D{%HãėyR21 }g .FQtlCmz\:4qlv$z=ICk+4Å1n nxX_}P3B+ֻ|p gU!\ͪ`?mceYfyUMPP7R3WDGKR s2R݈-1u#d{ja?<P]m4eLǻjK0M_ƌ{Cn*'9!&xJTQ@jrcE~ĝɈ#be`V*CN<XK%?I[.UljvZ6D \AC.U_-Lko+6荱R'@{! LۦT. mWU1!h~ ەgx\y\wR8#ɸyO}`0B՝pOϓrQ'';"O[xlR99ao¹ k8E<>PYjއu0^Yh\VڔU`FK<+b~9tN_)1|ѻD(e+2ඥMV="&+i0RѴXC{eq]jY?K! w}[^XW6,M5b֍)foY5n*xvO%iuN^V# t_F0vH2#j|X /n & P\/)%]'^Nx=XHr aLzʓ! \yܦ 9W1؆`L 8P&9s1Zz`DsIFo`|:JlC늑&aLN9q66^!K]0F)x&,{99ǰ$c6Y6r8;g2BzȞӵdJgdIAA6嶟gy|bhETǹ+j O|_>׊{daRڠTU 2?*!&1hºEnIav 7K-]s^I֚h*oS=8-:'0k<,ԅ.x]iy<J<̓阑prJWJaqa^ b0N)?VEhɑ>吞/ Ua KK^J pF-Y@C3*gKKq&&)gl=rx=T*$YKzSF=kYw-V:旵Mm>'ɰ3"Lɧ}z$VKO\G>&!ZflgeLl>}H m >ګ{DWbz=I칏®q.`]fB1.Qux)Z.1z$.\ϟ%9' {} GT8<#<XJ=x{9U|GW4]P D4rHh=zx1_W矜Xgbˈl%aLiw00(Q8%Uu5' FokۧcBݯe\̡G= QJlqVNb};^/{oEm2m\pa8<wHB,sV:{ŋMK?1 ծvFk˫`in>Q潧?tKblzUs{ԴG= ,#Tgމ=|Wy_' psysZ2;3[ƃUy|ƻLWz[z9.<`rkG%ZNlBR:yc}LGd6Oם`Mi@csoN_v<]j"AĨm քؖOqZa6BZDҶ갿Ρ6k1,NYw6 =G{\)k|1qTSpVѣ vJ#OR;Ua8=ޗp+1J,4i9Ѿu>H>Ѡwc%Es}q FkḘjN0wyڬc_7BfBQɛaUy㘣sױ-h;YyYKHXNsRL7N*Ze{6~H1mX Pt̔0''v_>Ybw]#Mݰp9Oa3??dH[˕'H u`捃ænZ4F1a/_?t~)J_Iqz'r-at wAF7 ۳gTi${fE*cFW]ĸ}"a<q5endstream endobj 679 0 obj << /Filter /FlateDecode /Length 6429 >> stream x\[s7v3/HUXyI<Hũn]j+ SÈ(9$GIߞsAnr(ɕ҃st7'_psCо.hU|k_NoRјߝ[41ӳW'<:: F;=9q8k7hQGh![jM:n۝1zJmm?.m1c :zZmR4o Fv9]NN0:l~ ;.gB;I 0ӳ2۳Nv֪ӝqC?S$0K)+R\ݶ4YSLns櫼d>Dc3<6稱ap6@2:'g!jbڼ\r <[$UƔ $oNKz)=/(>= t88LZ6j t@~6 yY?fו.gGo`)^E$&.2KPH!?o-= _U@#-&2mX_E=&7:hC3@NЛ: QJJ򃵦l~RT:lyQppNakΎ>)38P*T34 T[#T}5pB3CjΫ }ۡ,'8| ,JH}?1;A6I|S_Vs˯B(ڼ+ZM6EE `MPL?zY686tꕱh" á `Vȣ2#Ɗ49SQ6~DJJ5m9Hx]FyIhqAvM^/V!] hyccB a7V .xX s &1YXA3QO|g=if%'s~U{{%H j%P'7z{a]e"KKӋll#1F ZY]@g~QZY5  7yUk~g|]gË.]T pٴP߇s\ĕҗ)P'tƊ4"mSwLj٦qoya0bc5hݫ{^&McnGfw,3&ާ] ( ]}[ 5&˚I.&~837;HHK؅23KE6~%i+F {KG_VCrN``ӳ?97}F_O,ۄ\0N\_@vkW6a_+YF4e@u$ٸQl-i\񘳯E&W< ^=!vFA5"ad6Iž x(x6,9$mz@I^s\p# >-I? '62|AVF~ALi!@,D /KچX:pⴴ 59vӖZɫD^}6iT20Uv/mQ`|)˚MZpzܻmN[zbҬAoH ?9o174,hd2 tV 7ͽYPG,Ϥ 5UED2Bw.6të4uqWh!x%FT3&xad&BoP 8n 7OEDJ>] Rx% [y߀,lPC't~.DՈN=8XZ]5 4e] yԄe{낔+C7cznGo3+:ϡ'gS~\u&*)$:V2jh $UUV!18ᣴ( OVDpW9 OiTN)!-EEh<6aQwđtǚ Rt@t: n-z f"8+6Uδ 8 \qT~ƨ2*M=Mtr/jLd|3>d˚{_0VsYMXVu: fAT W ^| *F*oK>gm=BH.WIZ윂%R GhJ ;K }%DV8 L/ @E^89`v0;NƥVgZQpv +@8j~}j\kX -aKqB] ;.4Ǟ/Rb~-thB9pP茣95 f,L=\;ski8p]ՌSwp`MK>u:rʎڦ6 = o9w<ݖJK9LܷCaJ\6prʢk;2u)?b^sy9нnjݓ6υ@jlZnCwevicK);xfI?fm,|c ;j3C 6Qq:I-1ڏ|2@/k()UΡŶ6M1tކո>-e&(Sai$+^P2\pՃ׺?#}ENRkXM1 VvN+{m,0׎Pbi ͢ogC8R?$LsBOi-[be^eFG'R6_RhawhALϷ!RKSsD>|m({ ODJ;GHAQcn)9z:"ڤd6%-id d~:G8p\X(dkd>{)]1eqi4.LB|}XGo ~{MLә/8X Fkզ.UV4 `*5igDAtlI8g#,tWIM(&/ƃЪ(C$<.:BJDTej9`nLTO܈q,Ti7>5Ǚ.ZbUgɼ߷`.ӠDGUBE!g]yFDQq RZ݇DT xeHaJNϔ4/ C'2?#dAɛڕ Q'?380ښ&́YH#o*;1P9?X؍J  bzc&/~ KsLQ/Tu!,`\#.=wgH#_iqQÏY#׬G+Ѱ8\1=mV|暋5% CG+h-_)uH\ %v́n/I IrKrzzt0V ${VRp)V+K+=GրX&t'Dm q 䶆*G4NR*#8 *(HW#y]L+&"DAK'.#ŠnH"M@hzwl$8Ł#% *FHwu_6cs[fVTUWt@}ut*5rEb. ""ZJ9dѻ8NP۷K{Q!SQ{Yyp{E?/~z]Ƀi' KwfͧW]W\2<׏gz0`R`+ ֕^5zA3<-YWh7Q,1L`a>O߻I" ˌT@[ykyVf*FG#"@jߒS,c9v^=Q* ˶4\9͔^P֎@▬8[rPC-`iXgQt"x^1AC%ƱdQǞJY"ᄍpže>(/Dʰ<6bsbj[ɭ0lNP@u\e RȔ Qvfeq{ jgw㑓o=+9?i^Fx?jZڅxj4"iz-^GMyAO*t1~,-Febvl9'ur@b+9e|%-{3fTT_uej1y!Lslqk VsPPS2}Z*S-jW*1 )z V2hŒ~yt07UËvp rR{PUџHAz[=;~ŝ[B;JT_aOXܿ-zѴi|,;/^U*v +-];7 *Oظ6ǧ(m,-%H6//jɛ+3)Ti[{g4TjpmD?]-TZ2c_P5:m_R%KhqxHRUA&Z]IEϡ+5Z = W&Mhv9 uTnGJD.2|'1т\ۆvl*×{7kLYѶ5Rx@+^WzmJʕhK?N,Ka)덣>DΨňW+l@'C|S8- gJjd]`ρTm=fnjaJח,(6e}tEuy︷UjA3K @°XسFVV'NY\:DMJq> h,X$K<,ux$u䳊\edk 2b&Y(DY;98ݝ}ve:'&,~1v9{LF,` OSCG;5z3s;1z:BD)`' z'W>V `>6XfLKy%`OD1QR!|^ְs4d:2vmGhJ0tfyBX9gt_N,sg|6 z%f^:ruۖ8ԧZL.0MX+ , cdx0O04*&r*t0&y^vz9L=_ZTW -FX(ⓚ}T"_ؐ,PzOɵ[m.NMRrǿ (DL .y6()?*> stream x\Ko$Ǒ3C_Nz ؂9H&dP&69YY$=2 &Gd~\/{zsoY\na͑VAzl#CC滣,nfJZs9Y׵G(m(A.}vq|sI<KH 0f1םc[`|}J{߭/[Ǡ6K i÷17|\v1 ߝպ%nCNHNWTRh:Zhcw\ily!L3jC+H!zŎ[WH NoBk\upׁ7ߖ-pezeU8|P}h[:QvWϗwg/ݝ> &v,ίiI f^,xƃJDXYQta肼qf4 Hfm.h'K6]x&iR˪wBI\+o`bp"GG +YY'~\)kUHն4Ɂ|@=,6pr[oR?-g+;pjHRGVX{67~Zw=1~=ˠaXbrHt'ZFX# kWBWRք!5ތŕLepVw_LgF) /6-v!`=lb %!EyH-bP CZ ?,@Oن 0ZsIe6A|D N* -TѠl\pUKSk06( Nyn s@J~;興z+ʷ]Ҁ9…=_y-#3`}BXY*IS:A)' cR'slLn 2 BU\J܆`C(L)-,U?@pW!d4`t`yZ0caF/:wsQIr'nJ*K'pq_D uW-5`ض?~|7N Q^ڀ<30}d½K3Ѻ;lBYg7)ၶ"A2)⸱lLd~T }j]8 K? t`#4%fMJp\:wI!](+DW'l5|ǢtOؑhϙM}W}s|Q`|\8zfSglSr8vCÒ\J)~Y.So0$h1frv$ufѠh5q#R@p4Z8!I1&@d"J HrVóV3$=8(u;JP!!g 4<n>/6WCv}XJ'rSxe,m<|4ԅgaQ츷 O4Zs`_ЃȐr3S d&6Yф83xW͜]z6["jD'^X.v?+8,XɃz( h*~}dV1ObYJ.9C_ ]eG=J'%͑!&GtIϖ͠:M'w7ҤE53(LQSђ`/m6ц0JZ$>d}e}m#Zdi ,uۨr2H gsԡ;[*d}p)J$994l4p u*~ͫ ̳M() k9W-[rxCmQsZ rV^A4).Nd'77J/vSwil*ͫ|UVph@B<+͑н.24JX7ݳ%mngZiԄAcxE++. 38Fۓ| EH7Na'tφ5UpJ*C,ԝ>hzϓ=  Uo'f-?zɂdÜ*-[=.se 4;|i(o˝\dwrƥml)͋fpZM~[z7͍be7!+ 8uioP~l>JSiLڶMQ`F覹ׯnߖ74#( qr|,ͿF6+!m]'Ӓm`};OcMl[~Y lE}TƗͱޫp?H޾4%oenfC si)#^&[/jiJӖfjmW ߣHf}_ qɐ p~||lK/PX/ZO" !$W|8O "5ɠLC}!e@LcS2+us݇iꚾj),f'f4e J@ʈP}ijnƦ-M9W$>Wl2V9l+]LA9vv'N8h6NmLE 44J!6;w3}Ii  lWY0fʾir eȧƦ;7l"gք9 02vx}(m/yݶ,|<)[idYJyxyb[&S(Lsq. L.7^ ׸ bS(Ҵw\l՜vU-^!6мE;D;oiʺ1s/ReIi^f%TہÁivD {LvZLWMN&<ڊT²Y)$gbӈ1t~Yz`0|iw_tkgMfTˮK4yyCsݵj.Ò=ǮʭUFwf3.cjyYlaVتrkCc/*AN;?>|Y֯}/Q1[_^o԰6=~9t(/e}(Zs$ht̐Bo~Uw$Ә2Y)xk0I`4"TENEJ|E> ")J%LNl/x'ugmg&K m#„@ rhqtAN?\!@Q:"J̈́}4sIDuH+nsy>I )9 *Hk2qy6/*m6!*shLƈ 2Z=f yIF;"jD١)*kM"4-V|  EOΌT}5٨{5kR aZtԼlMe3KJψ9i yXN,UV*['q!baM.miUEBM>UJFE3Xi{,AepY`6FvX޿CM`EĈszQTi;g#Kbo'Ɋ<^eUه)?/uy,_Sf8[3^ymO|n(NqaMEN8=gikLz$i1sT*/p '>;}orW Ot{*y잫M*2;ڻ!iK[';"Jeݱ|Nt( Z՗SI,*BX;fTo:W'P❺adoہ7k5X^3:&@.i5̤zٲ8p$x b/;o΍.wϦYbSE$b ؚ蒪W,^y]9 d˺22+?9)0SgMBWDxpi]:856-CU:y`dZa{B{ކeGV:h XU~0.aQ#jŻQE )P)0d.FvᆬSO"|DhjpCʈcm<}xH `˓/ 6}_O4TkB;W3 y|tKkT':ғI?Y>9"sE2I6be!+߹!h7m_endstream endobj 681 0 obj << /Filter /FlateDecode /Length 5182 >> stream x\Yq~OCC0j]X` Ɇ!z?ꡇ3͝"2*C.`CdLTq|qdZQW|s{ɭn.~ ތ1`4f<|w_;n9W7y=(cVY.^*Uq֯.o/֗ fLmń./n5S Î*'ǵ66a6'pXQlpOTsOe\ƙߖzqΎi }s1 ;WqgִMix#V ~ʆ4 8zcs軨K8u&€z3e?QBaW3wSqApxW?î~ W0WCgamG7bˣh\6|nY`jCf.@ 1dsƽa#P3~ɰVM1Ѻ')ucPd>b:i-Йv)j؄8ɬdx+Z7*PV'sQl-1FhԂccØ]:tcp͜9¶61.Y*4)t&3oʌVFaЬ|7ubaVgܐD(%}5S8's62VMP]skh;1J2sY>D$W(hr_3 : jV5yQɍ vW l&ѶvE\dr¥)W4it$OPZ05JeunHJEF%w(WfTFrUJ|t\U6KH ը> Go=Zy$fv-a!D\HC!k) "vW]X1L%օ뭯0q= Wpr1 0ox aoǧ]jC_i X[Bd0^|rF[~1~Ģ( l 92Rbڂ-dIŔXyqEdD@|f 9x;bA8R̃`EweO ,S,<"2g =3E a"wCLHH<u?!s0Bxi :(h"_cu5&WL$  x+v'O@D7̬pAކy4ɛLN2)XVO$ƒQnIcxJH"V-8G# Y(RZJ` 0 R'*0 A &UI h@˶-,Xƕ]#a~4xE PUwm3u@l(|: "vc ѹr .Hk v&\@eZ}DBogm`x=^,6Xp6'Xl&X2v#1)=a9 Wb*LPǔ o +0A&LPzEo{ PpY2oC&*8Bb#Q%WTp# l,ThXQM)2PGrX%d &@ϊZ"@Ɗ4HE/ hYP(< BNs;KKjn ߖxbF^'G-;0(D.xKTQ e*U:Fy 4{  sh Gm.0ᝳw]Cx@7Mu9@<$ rt'1'~j.IÁlkܘp Sl0*;:&Hr )P0$0+f RΐH[3$ C42;HT$ۥɘ &-5 8e 9 ^Uw*DDY 3]/:QŵFSrCc?˥G\Zۅv\E0";Ṕu@c.1 qXz*''"/ V)YsG  \LhUΰ,n)#, }6n42N b`k@;`nvÏo6lhf?z=zFԟ{$A,cJ*T*5ؑy0o\#y] lG'8'/PN0@jnW64$B `)^tN[9ߑׯ .Y0cCU1A3T#1RTy fjfVNÇyxX\i>ݼwLq_>yj庨R _ͼC1Ֆ'[>G9ôEއy㙳EAm/SgW3jQ/%UnSdFm{ZnU:-?(#694V|A&NyW?P4 _3ZpNzuԞ\*yo[[..RmWti&A<0* 9M)# wgeA[gJA d}ri$b]9?Z_uYWVFd*ZZ? wQF<ТΜm;,B;b:ɖUSQǓ VcԢ=`PHNq t );տx.|iL)OjtPU%΃ .u4`dƹxc!Up]H/X:0蟉zZl+"BL7L WTs"%t[!;)aie¾#*$^qYa Jl8xkBUQKۉBw)䦤>)7%>sHˈuwHx6/ߟf@qTBѾfKܶNUu ژ F~)8#3 ޱAAʽؤes.F  gII#Z>>=0f >" WQ;1 UVz]ܑ(b)@,nFtѢ\vePΦ wݱfR[[[XMGq:YXp31y? ޲:dBgMWN(E΁IJ?Ėm3$]g}kv2*(s8̾@,ْ2D[RZKP6D;V>BI-L5rщw&ؠl+\KKYOG,v|sue ܈9 BS]&{*p X5ZDfOGpr[PWa~a!:>Pt:dy}/1\/(K]>^V,NNk?^y8=fq(+K6d?/]n {I&wf 7 Eo_|PNf02%) O?JK,sR=CI8G9/#dkfԎ2L)}n]3R*p>Yگ!bf ӳ SQ.- =,}~QnʔKǒW=49]UiJQj^q2K盈!.=F+-KJf{܄BYzMX0B;3׭@D͹u᭗+Y4q9@ߔii̚.0s gK;/?|i'՟5x}ui:ax| ޙQ?WGU'>衈ò8f=4)32JrVD*"Rt4ۢMk^¦J otMR:0u0*-zGsL׭-GQ.xT?3o{n{60G<<&{{iI;'o>NFwpiYRBy3 LzahM!AD]9ReǺ0|Gx f{6R7OWhBH5+[(=a:}8 X5_D>ϛdi<6}7j >FrD*W |A}ǚ}TtI]a C3bJ:K0f&2Cm*ka q֚t&ӿ4,4{!*+ ­)WJJj}t޵w&D,t('C ^W_!p>Q53!sZwg}xOx?4endstream endobj 682 0 obj << /Filter /FlateDecode /Length 6259 >> stream x]Ks73wc~ku-oh#f7</~wo9]?|w'6Z{۱ } Q'85|)aj(<=:I O}SbqKH b)zcIJy! []aA[m"j=b{?ga ƨ(; biCQma#E{Yp: }E%v`6vRk`o;/ 0'[m)[-ٴtO[/I}nƤUD=~4z6oːyKl:]j>: ﮑ` 8#>bhr` j*$H5=,ߩHIc欠+Jeot~+v焀zA_^z))|&}$|ukQB?kaQsrX?LO7 y:ќ518\ Ѷ=lClȭ8gk[ _ 25 K`w x$)x:H}2ur(Ie  c_Q XZ@aCnBz w;`ZN@)~'i0TE!*gw D$6]Hj/cT Ų^#t$N{٘4+;t@P颒&#9)#669%mj5VRk/=*؅%0De%PV?ڌ!uMxF xVlU3=Ԏ~D#dį 襁:~ 0$C-׾1U ġiXGWA+RD4z[rf{<Ǿl\e8@?B8v8CWZJ+hF+tԵ`0޿a4g&8kD->zU0ѻ^ykٳzoק7_Ԏߗpp`mG31Ɨ5+ČCڨhŬQnk+Wiy3NpYL`X}0_ЌӨE^;?bGgīD|T8p$ L9.sx vncUQ@UQL v D#jP5xRMO",^Kb"^4P8Oʖ>CکL05O֚CkL$qIg_4`E <$z",KCώ:phSW}E-JVF*yI&H6w( 'ėK./&!߫" ɔ3X"Y;O9:o_9(JSjT W{v,=q4JNuQR1W ؍r d?Kg4I'gI#`QGkQQք|&}l\" aIAXýlā~:"tlԡsL.kF[ x1-@8FM.nӌfJH)nsE = }3o>P5pH5|DXW.t%o͓%q1cSRg NéSsGpV-IS YpQ@=%wxAL>t[ &Lk!o.6nM*J zD5WZ蠨WӲ Dj(ԷT󦩩@#Uǿyș3u5iE7@ >@\(͹q,ԫ=|4|Z*v@sg\Ff. Aw)Jc,8: [b5l !>Ǭ"Me74SHDV,;ݏ:E%Ղm*OFTH.h6:O\^)q_l %9WUlդ\a2 gNt(T]Kuy5I>RyEFNUae9|s7Ok|eK[<4]#҆€ָwqBo W22~ ppʢɃvד~P1hH&oȊ*E[Ju/k[]yz^0HY G7h,b @NlD?g#ghj $dBOi@( ./K\f}6h7  C5ozGK8cJu#(V{$᭴`-]TVh&cH*]x"N`k ./95{gN~c'f\'/W}dAR9nH c [> BYkTkQrNfs)2{ǝL}쀇}۶p#3S3Do߫!HZGJut|BS)2Vaqciv2` L imwSj˝7kLؓ! kJ&Z3Ja٩`C6'J\iio*>]\W7ؖl[v2 %#mf=&0եH;A̐B (ۛRdFm);Yu0_#XdoR 1MP{o$Oť^A>l3F+=,·xW˄dF-*ϧ۾CKqTV"qN/݁:P(}i=vf_3f:Dʼnm>bl<ڃ}FVOlڗm~[:d*Cv]8 2F4߫^{82Qu;:hu;CXuv8v/ȁqVm[<0l> O+)Rb^x=@cb.D+o\ ћa?3VfD`TxtV^ h:j.k)]2I9Yb[jbQ%U96Jĩ>8٭fޗN*H>tV:%`\.y%FՑ ڌ-=8Uc)g%7؃5VI['d|y#s)'L1q S~rxs_FYEdKx$a*l.3wrM9IuHCR*m9D`Ti6 N>ӪLӠ']Ljӑ,Aq1(f5V#'R()wbd  Nh; zj]Q.RkG"·@ZaKT9*?d!zԘF,CbD;uQ/kG|HRHYEEPJ[mdiӠűsGxLӜ_'E^V9m AYX`~Ov#l-^i=qEff)65DW8_;rH4!fZnf >JllC \hl[,mO1I`{X7{ǽNn]a*bT:ccv0MM;FS,EJCC1,V4uxzf,H!=0 `D1˕8*TU^ KOƬW);0<2/{9xdŠTG:|Dq2|sՑ o^"h>&mqG_@d)Z89T [,/w[!lDOGEjr>P')-JyJ|@">AؕiuS8 r-i9PrI)y36وr^lgLy~߇g[56N, ,K`bүƼQ/6c3zYxd4@6 DX؉ȳ)sK͜P:ͬ\Wf8[ќg)kE,؉Os*=T xEq fTy|#뎩Fڑklb8mHn;IpJb [xVL&(v17/EGMZIiQ@)zg}ȳ~CdjBW(y EJ^]I$es5$c:Xvs]ߌ6B<}غȩئwjt=*(^1b :Te4i MUO g3q?L9>f^⹣$+6M/m=D5}XїI? :d4ʲZ!뻺9UBzd:RF4. ^ld@AvŘ{hs!-'=;9"z7A${/L:M$FW |ȋp ss,p~;9;wrk)ik dhFoW'(N2_{՛9brP* 㵦]):;q[vendstream endobj 683 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 TЅUqP(/ Ct;,:\<Fn,逫 pk i[SITa )榹I*` xS`_LrSendstream endobj 684 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6423 >> stream xYtW!4 % ȌB5@B :؀E.r,Y,YY,KrSl!,t&s' }nw+q5K׮76?Ў{$/.|Хo@Onz nNytn(M'AaM22zرӢgg$K^'NO_׈v&Ƌ%/HӦ3:.%s(cϬa#s ѫ33wE/ǥG?oe3vgpvƹig,g-ΉݱTst{V'Y.#Fn5zؼq 'M2uд/ 1sÆ bC$/jbJ%oÉbK"6fb>1X@N,$DbL,#ˉDbѕFt'z[VMl'hї"G'IH0Kt$:EHD{Bļ7ۍjW̗mok@A -t4O}qgǯ:tsgT/XEףWmOaOFɽ~=57=D;}6iU}EeD~u{Oh@̀G/_tg62ȕU/~*7tx@H){'LR@tՃ}@E'A6)KI}`C&HfQ?:9VROH$FERF!!^\{?=pCRHO"]~vX *{}lnL6`'9C%K#- ŇϞxInڐoа!_pvg5>Ajr )¨o/vE} bӰy׾"g㖹Cft>EkTҵm?~lNnB=PdW ;)Vzv>{75+f\gNǝ}']٘gy,=0Wh ^=(j}~R4r;t!-y ;*8C6%|ϕy}ݦ (w+GZTT3ETN[Ph4HΠ/`7SIUMCd@Mp'8:kcbƬb7 ?B6\9 /.#ۿvslp3&\_F`js$|Qxř썺ۯ>q樻M@{o4X,fm:sE9dڅ =yp/laP"*ë-kULIhU&%j![vԑ,.2+M 㼆]7|1` X 0ZgLe6PLe1T.aצp n im<2;c`٨uW #.YR!P=`oS` *ؚLGSi0{Iz"'h#!~|>Ƅj+ `?d: E7sôyXJ^ACn,d^;} }bPSR\ys NΩObE֙΍vi8ό2#c(R:Y|RWr )B,:&UU<nCI* d:&o`l8O]H*/.|kfh2y|@-d.g]Ƀ$n YK;g#Z6R'۳eb@-{m%oZ|`mpވW@ƠGJACc9xu3m/ C9Xa$(qk8G/h1k u*Pcx֐h  {:#ϣ @4S èw'`)P0"LuSu`O@[x#D;eߺĀC]A\x`kA?ꀗ5 bR:XJau{8!m?@$(9Wlo ZTg ~-\Tx MC|03+Z0P_-+eZ}A!{@é%g<|wH_tP&WkZۓ0P|v={t:Az5B!+4E_4I-\-Hrǰ5M SJhc;K V[U*MTn(:z405h'˅<ڥredi5y*F P7_v `% G=ͥGѢh䠗9}e)h{Eq23 N.(w|Q- Խ K6Uj3Vُ9HKK6;_-N5g7qSTeA;PG<="SAVlP6ٞL% aO(0 3x&uf\k}!Ыc)TCFФݍ+k.Aoypk(2耊 rF j ~vlmQJfV>mτK$U*rie4 08T*wh:ח+ZVRD{O?f'٤esXh@[Օ=oWq;Zwkn"?p\rjיG-MQU.*Of덟b~Pd 1T۲ʮ;:RkeT/ܽBz^ qV/zw;]ksEbQF7XWȠQOL43a8 %orRhPĺEwF ^ss5" @/h]k߅oUe i>ǹERTs'.; lL`QRɀѪ2x *p%sl[xh_@䗱i\kMcJvfցCԅ߆j|ʂp^ & Q h|a;Z{B/ׂx)d|,ꋮ(Ӷl!^~Uܟ~Iyw9hj|l_zU~:#z4 "ڌɥWr {T&8қe77m@1PxfNQI* M lOߋ lÌr_}=>h86J @6%#%QX03.@x68w[ˇZ% fY #MryEvJQj@#h(H L4 .0a >oE7˥!Wr(EiMNZŦIn.1 G7hXlb haԇG@ˆqkxqc\2,ɇMĔ괃--MxW~F[mpPy\@!k5CQW_ZAn%hg?å?/o4  U-̓rv&gly>$cMCu٪hN0$j n/-t]>IPrj4좸05Lq6g85֧_n.yz*պq#KΖ-#:{FfփJY,jnb<,6|R(+ܰFфrR1?@K@T)C>tp; ޹ >yof3ޚ:Ʃ25G >Hzm(8Y_ސ) \d9ŵ&G!P|N`7q0ge 0E bbuyh\hhzɊԂXm^%zNޡٍ)e2{᩼a>;\|jDO—͔ؐiǘNL%Ms_[rߦ޿zٳJ'`-fPwf`_}o|ԅuWQG4bݼ ҚFg -mF7.MەNK'Wj\`ΰv9 k>+%EZ)v$A! yDP&ee@B$bq! )R)z+*eF%lI~ޭ`-N)t:KfABR2c6̩ӗMjk^vĨ9c6zNYB^/pwװ#p혴ڬ!.i.> ox nHbJWN^ܸo:4oa[8֚Z.3%E ՞(V^9L'xU@}Q3ow̸3r[)Z:~7"Y OcfZ_8 ql)\bOHG ty:^މÀrUSQpw ŭ U< nQ_APa_hMȞcl_ xv;3|&zηW]7O5'޻4g oŘfA{Ps~h6ف*ϯM) Lڤ X(T(i r 0.4`ԉ8 {An:zʫDY-f_Ct8a:MACf )'f8}͠+ ZV( g{-su]S]Ds}5MYrUaQ2qi[m[q0Fs0~NeJ]A z!g+`Xqd$rw!yDyb4M2 #×Vƿ鮊)aŨ$NeRe72*\ە~%x>Y~aZTzJ)ji.2ry@S/Aă=}p΃A*]U6E'tp}ۧmT>g?Х-Xtd^@4'h$ ȚD*IؗXCWJ T; 9XRy Yh+bSm@#/aVE~yl- fLTܯXa|;F> (g'k'*C#q &0 ;/hT]qU 6565B/W3u_ MNDT?ͪD<mDFONZs|p̄nrxpoYŽ]D5Ju^\& \"F|/^a4G"/VWΫd U ݮf!J'<_$Kkl Vh1W]x(UVvEAWe)7_-UzH$uw֙^"ґ endstream endobj 685 0 obj << /Filter /FlateDecode /Length 5050 >> stream x\_oQ!&i\]l.H;G}a}H6YTk$ﮱ\bկUbp*Fy*_{}o/9;OljtлB{#ATw7p O8io{}ke "ӳW'iBy婳n ڞ] R@g9=&C+p^GW8~ؒ#&S_Г@Zk5o?sNaM8Na&&Na*0~$)epN uLfj4_gټSvZqta׍p:BdE**2 ^ld+K7XM_݀H[&d1#h5)(9\N6QnXUaʃډ^ ?j;y0~ X,$;;>E=y&!^ʍJ(@jQV&Y||QДU[arDTM ė7SwX+LeTEs^ `'ëP;iᤒH3D *-s`g>2< ჌k 06 @Ȏ3@a*֛֢5 -U1DyPVւJ4`h,QZr B?6dJIX547D;]wIf曬;G7xK^`}Z뾎Jr]x~`QibNJoLZn]W pӽ"{χB]EE -PUR]dH+I} p# =nFh[$ӽJ)7pdD!L`^m 6J`Wj'Wq̓6 ~ijDL?E6lu{6? qJDsBt+'=mMy-:A$95 -aQ#}:8X~W[#uk(z>@6f[;[:> r|V;^r{]MrƻrЀ0ԡFC5B]%K: 3'FO}%PU;OaSo tȖ'gIVJ-; d0ߴ&X9֓wcR30lΩLv@]HS#xL 7/S\`7e)MXƐڛ}u!À " sgxXO) ьm p`;4Ik:yC^:Ǒ6MYV.s +ik1Ғxvc'I5:uG80qJMA>?: µp8[b8%VOM}Mjї5)sS . H{磃aS^uO;ȀLKRTY'rʱ0)!?=|ȹde5]/V ^xٙp^ fkrv0Sc&g"߀q.~ߞ~<y:mӧ' ޸NpqDȋS 8 /Ɲ5#[G3~piAc A΅町B6GW2I[Ga*SFBPM%/*yIr,L YLs†Ӿ=F>1ݽnZC6CcNY~ S ~z+S- ҭCTj*+"yw*U0N}w&lJ'1&,x^"Bcco<yWwTv* a=u38 fwĜײgJ"1}N!,6I NR5'tiȮƺ=jC'ov:e¾=/ аt))EbhKAg…}^`m0)$ŪqjhySsiں[v!:L_Wq_[{eJvxóئ^>"}qJnK2WϢ 9Ԋ`G(\*~[G( Il>?WNF8VH{LVXMZ=c>!%/oZ8IR;*勇#8fѦ+HUX7nY}ݱJ]U%P?=×~}_x[ɫJ6r<yJkVFuYXlؾnZ,أ;$ml!Un@P]#A:ZG_2 U:M]:AC;Մnay8gycP?ilJ6R,LYH|I!4Y;*#Nقa$FY yVGhשe0T4羾@D / L6d#;ATL٣)H_&_bW?W=D9L!?|\}EP8E|BEBVE#P,I#H!ILUuF;JUemiʔħ "ٿOZy)ȃ^P zPM޻6L6g;\} oX}BDNMٹ/zjbX 9LD>+ #slkqc\aG'楸\JHC6($s> g@KۗϽNx`/M5q1+\66)tעp'8_pM$}"h{%/La7' 4KǼ}=Gq 1n* goUmqK$n?{ڿ}g#r0ˮH93AEȅhrѷ6آ;`leΚ٣m8.%Cw} 9]7!wX奄ʍ`RZmozsbg`e)%W]H|S-ڰśjtS>A.3r+M[Ş)bY^B9grbۤUܛfo1rIln %IYCE=_rE.QZsye(J&e"E4pMIb X0\/6kZBEƢkOoy u/`adT<,Ǟ.ރQ+MR9ŋ0++ZW;*R`|LȸiEBӋ4gZ%r*XEkVzccwKc)W\rZ@r2B㲚O "$6w2>\x1Bu K]ISV]TmD)a'FL BE W^Un7^"6sԞsasMWE[̤;zt%W+ fm?PF vɳ"gdWlQ8Kߗq64oG9g)>qӆbݭ{nRg>L@Y(=x^[Ř6SޭZu> ]ZPfׯXwO7E_*> }g۶VX=j͙l/(3&sK==@߄s߳?M Z^IrL l .q\^\Ksrf/fiB>v{p 7ɰk%ó~X f};C$î+R^j*k@&.UpsRȟ?{Jn%$k'oXHϛ8)?wocU%Lqk;GH5y!ŊwˎgDe9~w&&+/'lfWGs>q P ~b׬;W,[Xa )tƶ=~fo~t_~ۣ=vHEaThכ,#J/xDuendstream endobj 686 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x> stream x]OA  ~@Cå^zCPzxMfwf3> stream xOkAgnqJAMoiִ r9ғ LfAUH96#Eչˎz,Rv\j/:egZ!$!5g%0NdEK~ hBx:?u /?B/ĿB72NqEr,kZ@2AX靝z~R(T*ŌOO!ʛ;.|N񪙎o~݂-؆6~Q'#9N+p4$Hgendstream endobj 689 0 obj << /Filter /FlateDecode /Length 5295 >> stream x\KGryO> -kEBex^9p̐KzGDfeFVeuOS̩GdfϪO O9`NN~<oNljw C6oNpza9OoN d"ӳ< [?DmOޜY:KH 0䏫7T!%E ƯbRiW+|c֭vk9Zç6F-c2F FqcVa2Fjwa_]VOk\N:g+#E":KBSI]X AWjq^`O8 R/ja{ZWu! NN)kjkBh\spx6꒩whe'F!TyΘ* V)l=saVԧ {SӪN6k !iW㰰z]֧/jvڼ{~h^mmy7 QS?}m].Cat`-l#`MTeQ"A( ͧlX\;6Y?~"1ӳߞ}y,'j!SA QȮ5IEHnw$6 JpquCH䵧qHq*eW "X!p%nmpC N8HlL[GB6Aok6/=Mm~[W&|X a$m?tU6 @|?צØ3$a/wu]U}mm~_,^ʣ/ؽs=`5;y䓕5؅_un:(Im=2{Ut]wn߾.t2~__År~PXۮP6]ij3f\UaK[gTƝ?vuwea_b>TefM3"5UM946w;0vx=ʻ3^oMի.Ǩn b䍗O;u26?:nv[61rw]wmg1^BrPgU͇dY5/%֛t59Dm6&AɺMmw}MQ̯f=>1Mm2.x#uﹼ^f^1u44׵sS0V?=S& 7,@OO|}İ'> Obrݹ%jMs~)Cw,f8stT7 9tB0!gX'sfNvЍa34j8KzJE$\5InTy] ]++Hb#C̗9nBB ! ՘8hBvpy5y_f~?H<}K9h8blUmڔ͹9Ov"Ulf͝g2 Rl?/K;ƱJc /r&%zڀF|E(0cEYV$VG'e=r+oW[ -zcII6a =.iբ9tdh#G) RԦ.̀ü0MW4lPRzHsO+h@P5s|>:gפ#]v-Mu|`6("0O뒘Ĝ"$z:&13ׅఌ6##ݛpTDiV MïM{sY[G" d,oFgZƎ,~Zʔ0NcL' D'hA%҃sBJSh\-_i:`?%Y 4`,ȸctU==/HSI)'gDc) 윞cS;3ƵZTeЂa&B\)Rff+FَHeFkӘy}k?8?Y 6*"`!q30I*i1VDi1I߯YeTQ^X_Ke0&=ݧ,l.r9S 0Bs^0SqŖdv=Na{AK7Zj-3nS*x&?h~`r-Yd U0YAŶ {DBrN^D|6\5 ,>^y9 D@n4p![U!qf@#C^A6 E*@y 8dfQ|%j)8j܌ĉupBI,A@ hx ` aev5[^vZ؍FrVE)^ymև`z?{cQ/z=GYH57L֩"o?=}+1'񩔃Hҡ%g7X!; IE'(@fŏ&HK  [‡VJ-Gɠ~wM~4Z9=mQ1/!@l=,VɨYP 6㒻HѝcqF%"MtfOZ05R#ڱȗΜ]>`Iǁ zy5p6H?U t aVb k0piƼ@r=9Cˎ9*˄<5G<{P(5ZfFEɍ]$h%`k+Kg^h!02yI!Z='tkOlXtMLL֗ezd*C$>{;Mt-瘘ǒ4;\^1+@/n@/49l͏5-so`=S1A7J{iH9t3"r VDI3>"sMłTcBqDg#ߞ$hO@J`i/,q,(oo ȳR/4qBJ=;D78qvz[=]Έ,w` Ʀ&;;ܠ|KКA͹)*藩fޏj(ߵ[`T5f M}E6&aij&A[ntQ9&&X Ao϶-ݒ٠|$]b3d}]TRN&tV6ߡ353&pa SLMyJ9׈Ѐ53 owFx-S{iٔkÊ)͇FGL1:ó!4 "ՠΓ;.(a\\ nL|_ɟJX2H: `&uǼhVYȂeQwn&p-5~ؒpR³k K^~6j r:D Q%.[-!1E['7+Ѥ_v/ mOpfG ֓ /cSGB`̤oqM쨛,7ӳimxLb.?0:0)`ob򱟝 ʱƊꈗ!+'AfMnex坨XE1Ũ96(:6|M,b$5'!ђ1 xE5hDSlo36~wUUsa깢Bm,Nի PFqyJ9?OKu^j5&wPJSi`"B i j&,jgKu,d&$Ap!$4W'傩Y5૊%8`E$1V}(A=cRD-I"2 JU1HiyOt“7#I) 8VΓZV2q˺ >b;Ru [ Er&5jz c1V,QTVu?sk|n.mL'2Cr;̭T;DZ[M5nYbKm1Oa,JaĀ|d!e7q"+\dr8okK_1Y,En3?a1'ȶZ{6U,IfA1 Vzc9 C{/6}Id<1a<&lZ877 @'֣?5}ޝX޾;M׬/o_goS_w }2Ceuyi{/~j|O6[І 7š 7&X.T8j.~ 5+C=٧F|dwu;g?=4~Y >簏D *14,ps'EkpПendstream endobj 690 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 691 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 TХC 8QC_Bg|wY˕m_Xց$i,XX8+/pSj&lOm^{+x"UU b'h9 fh*q\C in X3 FJSsendstream endobj 692 0 obj << /Filter /FlateDecode /Length 6171 >> stream xU$,vg$?""/X IgxpNܞiEo wyw Vg_/` vs>.o/.7 z<_>BKnVK8=>_y#0VL8/W)t ׭w˕ww8a 7r#` V9+ gҸwR..J%dIA)"KaKazǸKnE-99)azyٟϾ?^Z ĩ<3%L D5c-~|~wg߃jx_VXVN,w2`ŗ ՙtZZ+Y:{8VLi^pfWHP>3&+ htB3fW4<0f!bP]V%vX~xtϋ/Q"ʁDFJ62Jmؔ'~:)KI&/L@ GJ2\yua # Hퟤ&R]T@;\x\ CXuUC*ßЇt  _qѽ7pnﶋ77Wn8\wW˰?Ό,`?p)1:Nf 'wg"A-Ρ#:fNb9zO8,bYak`aC @h׳Nv| +$w \%Q9m9N-ZtGJ4-@ @bA~G@r\Z[ d9ꈜ 4s{)3Ǚ#yR@0™HS0p[B,(XR:/2Pr4caNbv4̜$#$-@2%'(xDfN @/2Pr (07El<9c،7) DkVh0(wN z+ᾹoV_w/aDW[%7 0MBd5@A*m vqj Z9)Yq.a7*cЌZ'J7G:L= _|*YYAPdytFN5p`8A`$r *y8ōi嫄>,8pJUME iҎ2L4 ۩$pS` VDOlhT1 }d J @S|,J)=JϨSig3{cRvGϻEea\' &U+uB(tӓ a-qS qPD! _@3drf mT OVG5ԑUK$)>-vjjj 7c㖴nO=Yp n~7fľzmia鵬$2$\*q W)9z##>hb)4/""\p`a snBbxE&$!i)ڧStM"6PiR3T\F8d?O$<}TN(<*p5 !&I'v=۳g3ݛX`.%c8,^vKܿO4"ΩFt>$n:``@aa`/#زEC6'Yy5 2:XEHt@ ]-j?A1Z/$^EJ"~g G>^r-\,8ˁW)JLp .Aؕ@6&v XH>CAt%FrHMW|zNwKAӥRDjxGV! a-OI,7& N` euxᴔ]xOrFKSP{]˝ H vh&ZAtt`az PEd{j@كT|rN:*/lCr㜁6b{dORC܇? EZcB$P=uuЛq!u[Œ*Xx=U,*+aK$2:#UEkJSj " I5uc=a6RϘfZqX]0qhZr6 7xjq T5PPcc[/~wA]3#b_Y4,ov[xnWusD $*($JI||–<&bS14G e:&ިq]MF z}C-7A\ KԱSzI<Vt]JQV'>>C5!Vʂ[S$!S 8^q}WBnr ߒ2?O2z_s.dZd.=T#|N Iay-u4)їK#~ ni]CVQ)FzLmzǼErIc+Fe)+H"7zDdo{;X2G.y?Z eqˁMdӵqtU9QKj20n1u߰R֌($[@ḯZ1. f J^ER w+ RiI8B ;AU)T]50^JϰRB]6|qʌPk Y:_;zIbTEzR|jp9S6;i5 Wa f?*"C܊ylF=1xRс1S{5ߑiR&GxmaUWz0'7\ͽ/ /(@i'.bF` ^XdG/%=ڕD}<voCLP΃m[!;x' 2 7~ֻx}\u#vT^6B!:| Sb(josERmLF051" 2%]'}`KIm) %JF?c$UK+wLGm(E7dbB n,z i?*jM$9VnW!B-4?aaW,R^*AзPؑ~>q˙_j 98oeF0 TIH#KDuҚaPɿOB`NjXm]Ӄnj gOѝHv:цNV@ W?xy}ؚp lǖ42~WBi&\8c'jCCQ pHy+%MNWE}ᯎȧ$Yخx1g aAp4SF5b_"qU'>|̊@ٗ}nPePe+w $y]Ngu/6Pkdh} a1NO~:bӳfi,^~i"G>N*XX *A[/1ZsCN &ˌ*`69 "$ԽMşUJMafz3WarUyTܬ]FWhqwUK.U\%ÝQ-s< )w iuCKng: Hw,B P ^=xAGABbQ;C!^y /8 EY4U[],Ǵ?n^?GP@VhzpK3RV}&3 )Q*$9aJ,/-IL;`.I!SxYY|\ᨅBp~ϫs^9nPAzwAy3pr@X3SP%Duߏ!_G!z\N 43tlq z a9 v>}}@ӛ&rQ3ZM--!##jCSE[$CSZ՞<b 9$-n)>s, zۺ\ͦӯ!ѕZ-xğ!Ƞ[\!.ϴb'uP?ӉjougNuJ י}D)y`h2&# ~6t뷤\.`;#=[^XUx!AC$\+ة&{L DѻlL[ݝ],.QeHK©ðߟi#">Ap~&[ʹG} hQHrJMV6wuj]RZx)|Y C@7,Cj@{ᕳMM&Iޗ ePIrܾ̆p]ߖeoHr6Mdo k˰eGv'!~<9涷e'e vm qEo@Bb*2m Z2ehEf Wś]SWmlO2웳)66ޜUe{<ǖܕ _E ee6j`.0WGz[DNqtAK%pMm-ONqdiW?NQnR=MI/C6'-?F5ʬ{nynqm3)An!pDɵ=Ff(GXmy\ǦlYSQڇ#k@ o1<{fNL3?V[P7©s ~5mωc&L4 _G͵V?‘4z^WeHv/(&XqJx͈x4E;߼i"n+g{> stream x]O10 VB,tahU@p' aKB,N>~l#GpƲ5 HeQ7-Yy!' dv~W3RUiZB 'mUu1 R 8SW`JM5☛&e=O) CSoendstream endobj 694 0 obj << /Filter /FlateDecode /Length 7336 >> stream x=n\q~|A`C[~J!_oQySqM' ܛ&qR> c} xKrʾ77hc^rކb]lN,?9G@ L) 6<;>V^XX~IGV缹~%qW°*µ^ Ё_6/%m.B=^ghe\frI"6sI9#('Br˓Xk|#_Ze[@mhZY{@~WMIj< J(^tqR`WB@k2::=&dBc: }g/:_FJy2uBF x4;mdLv6Ft66Ě}mx-8ƴ,[2-(y-U/6JYvsz $4o~ ],t"Xq k>dSE삱Bmo}WlX\ؾߗ_*)$x" ԃ69ϙMkI7f>fklazɚȦ]y8 n)`a d@b @щts/Ddlm_BhJ~Af3JT@VtRgo﫡kYLe6jL 7Iު6͈x|}xNEdD 4~i9T 1[D bP}Gp5ptl0$zҶoYpt, U04R7ũ" 7@1g.!E+́SV XUݗPHxɈ75j 9J؉-)/C={(Yx"kQ-ϰa== Z&v\B}/p@(ϗKVvޏG-gOUr5w"㿈ΘnΠc93"3^F~ˌ "VF\7C+`r f'Tuaα-:!pe! <9,~PBR%S"P@($A#ámowш qpKuv=ެ`|Gh4XuÛ: yYs}&xQ98(6reP4FkyTDό>'^!B P\gh d|愮c^y ƈ!fw}ິiGbG2 S<K`wV$ YLc{BpWe#F玱ﻓ,Ytq668-zqK yJTѕ`>($...|Xpuk8ud>ZDOl0+7C5/L*PzP%0*ZpV7tdcYɑ-wuXi[M,aUK> ݉tfmMZ_~eH-{DXPBTW %nyj{ĒHp TjZ~@S!mNlĜiem9` W˽&ƈi-?`.jE^%W M1kU~3~/Nc3`R/@@pZ"þ+H]wdD=P렢X[F1Ϙl2{p4Ix׋>73JP"ْ$?獘BP%AQ=7͛-<ճ\7}Z )oxb;^9s2V$hYƴoq`APWU]&Z937N7m k  LF>2.s:l {?7C>yA:b:|m)`*TvI^:!VQoƓ_SaM]':|Q'F1:^WmX`|iȺ /:tϕEZ3IC0[C[աb~HhnH:u:6`71oA]6.r I_6?)eg,?v3;%#K\Up0?ա\bpH#6juUuoC[}e|0 BpIcڊ:5r`4@px"!|$R{8E)"˝Ŀ]k:Tuhx'H#쐎Ftd,kgx `cha34 hf3LPuԴ)I#m alBY헲0Mzͺ{d,8e tO (W-c1\XޮM;;ZwJZgbv '|^', Z0K`GNQUǥlD%$7cnW  C|ܗ|;Mߤʼnzc&ZBp^)>5.ORqĖ2Ot#q Z.t`Y=`Qepf.F#ZJwaw 7*Ŭm mD(jPc:,9\Maxm&zNdm'oñlmTSmJwͷ}P+gR7 DdQ2%yy[ݑ%xHs)4N;\x4eI^-ZO6ڐ6c|5%n{EWѧrUZo xYKqn }ɏu V/EW8[y gg6Vuoez%t"I(2٪Xsn\w)^j3C&2o57fZځRUVXݱj9IP51:8BzDTzD:hނp\xD43g(ѠI }+VR?H Dç;!/UY=PԦqM֪xՎ,tsL/8tnZ9+N/75^^7h$k{vJBc&BK5M8Bڼ1BjZLOYkk 9O CW0ҵ %wDx51y/.MeI*hQ5LyguOWJewBCS&g,?aFZSF5< L7NX~yÐ K6O x[9e\8z5mPUJ]:w5u<3X !=^rX}۷7T8U.r㛋5S,ku1\’eZu]-+D徉f>',bEwS{W$z֟xAF>xZD\5~~%!;r\T\K,=NLs'UlIf'nyw;mN&Α )4 ,a]r:a_0e0A:1?蹓-ws/_w{Ո!j[O1`x='uu}La ^M7/8FGB>t`#j=`QmcI.dcX]ҍ*o`B:0HfaC$?d f~(`u;`]Ӣ[[CMX Nߓyt> t)45\RI"t؀WLP|7/H뤎x[tfJ7NST)9Nu&}Hi1Cs>La Y쬌Dk[T>aF:Xɘ:a˒aN{ Z 1+rC#wUGfJ@{/>\J|U&6m(#jdJ8?Y!3fe5b}QBSs.&j%~1o/ǻjGp礷9(Q/|fN NY+ jjNC„3 on)/֐h M>K P+tu7`]o14*{n-zgZ}F}ˆ:|.v\Vt]Kۊ+rWTsbuX2E2*ָKƚR` uw VJ֞ 軾B"-Ű]mDю8}b-h ypB~)0W";(Vu5kʧ*RGejυ/5㈏ͲVt@P1=Vt=Jyf OŽ7efdw'@75I۸bK_B+7h[Ar=~2\`r!( R"Wʋ+VZ͋@YR_l|EmM x֛Xl | F7y;b+)XttOW-1ZՖϷHBdLݏ[VBϲ7L)/l4 Jfg)d6cFw4 v  KJ>KUea0y}W[$;0-NfYuNQACD?tu OrpDqWzLBҢ[Zo'y*np.Fwy K&K֥]J/|h,?myMQə/^_q}Uh.>;oh;&?+m=ͨ~W(@2gF}(Z+coqhde͐VLp۞g/xhޮ@ÿIV?rvePt%w y[9ۯB| D(xg9^Q+:;o /ˍ!=N!q~:JzB9lE]?oCP ?ߝ%εzR_z%Ĺ1+ ս,X=]nH 7hRUSMRf8'<I`FTVnL0)e/@lGa!"FR}g.e8}Ν -RjyJCbɔ9tRf#I[5-puxʖn>n9 JCnRCI]@MfI%&byl=wN.#s)<s>{?JcE5kүů$O: Q6!1;5Q"T  r]O3ߛ99/ x8P/6 )|c̡f5= D)0g0 G: I < t%‰f/,~{LY3G1ca 0)=/w^ew G5R"y+` EA`u.ZʫVSx=neߴ`8:| V9#zo3pWUe'o:I)Fy''e14OZ9_-_jYT$0,ZK~$%v׈oSBKW߅0 \b>dƟ8f%T|?=$nWZ8b]neawC蠆"BԮ˦/8zIB,oDl D-C+P/CI<8mvB;hZP hQx"zVY~lg9@brR@CXS~+AC> ~k K(:E;B5ܧ@IfYJ"8a{W#+ oz唴v2ke˫rS[̧0:*o֚ig|lQWORz%E?Tendstream endobj 695 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2448 >> stream xU PSW Rץ堢Uk+V!$I $~P<#B"-.>S꣑jmj۵quvθ7`Ŷ;;3;9sI$yM$E ?=51(ʘ I2 Ԛ,2)Y' ͐hт iܹrRF:a&ݠ),i:f/ 9sڤ%3.Y^!f+*tM|\:n7LZi:QUJ3dr2-=hg-pADk F"&&aD xXIAb21Jl&D7>/@ !&tD?8Eyy-5M> '))@ݓHH%?ҫu>o}C|3q7ZV̑Eq)rQ֘.ՐVmXT]0sQ(,HdSϩ|%.4"?v&!1-썽gbc&#ȟ2|;OŜAjFEb5D<9< D?1;GX:XmAaz_dYF20}T{|o}Ocwh&;Mm SdwfM&>a1a/Y.DB?8t-jNV#4fby!4Y5X\^/@,B>O5½6Oi?W)kg #?:(OoF3̀N[s@`B2sxHzH Wl,6J T8񸋳#_4D(PL>K'c{YŠ֬&cH<8MOQkz- 5~+ ;S{z( #T^{ƃO˧޴T&-Uѩqw:"~ܢSLaQQiOXwzW7!cH|moKoυEIG'$d̙o5ZJ*ҽף"e)0 *49EE9[ a{qwn 3Ow-j5TkVhsﭨ;9+?_#~U1; v[*չ- [i)׾u )H 9]םD~9PglH(.),L:m^m?-= )1 &o Fhi$Umc>ZUY]{L֔V%l`z,W8oMڭBs5LzfCml7|= ;}[ 6%nz+gH"~5HlߤAZALA8Лy橼$A1'/ݪhvWXp]zRYU tMM[n6z1V A ]+ *[kI\/4HU7b.vb0K􅦚b;Yxfߕ-IK+,.ɇ*᫟x ӓj]hY-UI\I8Ks=B7Gendstream endobj 696 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O0 R_b CAp}Ilk6|/`,@4XebX8*/d{Sj$<.yU!t&D]MmL#z9dT\~p8榹I*`~xS /vTendstream endobj 697 0 obj << /Filter /FlateDecode /Length 4482 >> stream xˎG zMGH;a/h{v%G>S"^ȑ TbެoVb+/vg[0wgߜFF`d# 8L\.v?úru~}F;ʕRqҭcvu;jl-Fjؿ>7bB0¨&0lo 9|S6J!lE*5XI]pmn 0\0vWZy5۵rc.j3 RQ.C/:#WmK"J}&iˁ%pEkC otLG4 $]< v<ȯ խF mβ"p4ZC{ĜCH Rwi l.l腵p N"EGr"N/|u\8ik"_;߲is#E 2Nk3}5l>C]?&E[E(+= 8Ԗf ʨ mՠ7"CȦzD9@Hٿ tv.p6 @c>h̋t >2HZoO"yǤsEʖ㨁 D)y ;2Q ;s%}F&[4 $(_YlV1,G%鈋Jo *Ń(({KsΡadgO^NZ8ԧ2ΫHV;s؈O e-'ͬZiZ6Yt}€9fDgQ ~̈m Vj!\WvA c kX~vW`f΀zc>BdO}E+x"VmM}YG+x@pm19 U!ce௻7]p_Bpѧu:znq%m [IBvGc:i$W Mw4I' ONbwigu{t6HNށ /µ)m4쮂o}yۘY- Q;ڗ1_rr \8"d $#z\qS$OߦXB-U5ޒUa5Mw;_!W ;hsfN,E2<`aЁS5a 7.: 04O`X"OJ>΄<+Ac*EҗPXVc@n_ Χ*{< SA|h2.u-ڲRQ,u&T˭ieͣDbдIm e=Pze=^&{6 7G˛:O+x&M[l.(/vSpxRL_ͬm_MMţns'G>( RA O=%]{Z+I#e*JǶ󳿞W@vuxW~ }RmI~c(2cާA7veeϫ^Uu ?Ӭ:1 })/7ysgqew0Mg͓ y+['{̼"X`eUVGtQVeFbIb-`TvK4V/r0SxBNJ?)\8@fŽ^^+~+QKC_tuP@i//?b>*ꮩ9HqaG,[oe%C[yI]K"SԪAkWBA Aoٗfe;F=q. 8B%dILd.>uQb##M ߱l <+d' c34-Dia@@w~8!Dz>P H_JW.xp m2dAјq4I8$vb %=^%>T@LGek/fTAoc<Ȫ|s?)WF(~Hp kͅ+N+1` 3$zxpvFCēi&^`fk,U7` p7 ,EQe;` =U+B_U; &e3>w8e΢N$WĶ  }rهI'"Kr´{pQ}?=MHN0Jptn^Rp{y!it#ֻ{U!JH<s6-\NaJOO9kKؙmK7|YAV}@$bMGAs_}YR SDNull 2AZc/*f .\ii?lڶҳGIqG{T.M)"7KY9be#Za§ImHЀJĽ?; d:G'C,iu\CsলH%XG>] ^^䉹Imal#ĒI? 7yzzRAF c 1!GIy"STT~9(uF>]z[^6` }of8t-~j*jz78*޿д1K6̟U+4aJ펈 Q2t=9Xk-K}mdTQ9̮$h'ܗ>wtFrdxΤendstream endobj 698 0 obj << /Filter /FlateDecode /Length 5491 >> stream x\Yodu~Oy?T%]ח;3 %~hOjZ-v𒇷x̌ a/ó|ga}<xO~mGaՑKNCUqH1滣?_C9|hӫ_[Ҙ#^QkqpaH\^n=cv78qqu &ڤj{jfZ~viuM5acqԫ hk5W8I8!09T :?ֿ[FzXk?QKF:}w;I.c<>Z 堍xcqW)yk^}^ ]Z}mm^֦>SP1AJCmqU旵j-Kke,u|,m=ƾ6ov_ګj+(M8FDx]o.&Ҽ{Xua]v2-D w]Mse.}|jU6o_槧3lkN}Ϻc--n4 *VͯkS 6)_ǘ#F|96yYkSw4fwxș9>g(ܳzU;UmW,_wYkSw@jaߥowuUA"),еklc`Dx< |gkG^$Lx̐V ӈ ~CFo`k2VfL^LO19nuPqn ȰEg!,["L%%~-[{X)}3o{-@[P!j-_ᇉ܂7`B<*6P ~_<L(o+ưц:zP̦,Hnd]jc4(sVB%$Go8*ayLZ2hCf0ڥ /go`;y{vUsiw)ηFc3x3+o'B*v܏q~/'" M$gogLW *,ßRt+tVOj[6,L h=c(^h2z (e~kdz4!֭xk/,j b;zLRn<%Syog\Es& Ɓ ?0DlvmA{M_xK^$UR)~-#^&/G6䏟򨌶$s~ O_ș]bOYh 8/|r{2:Ҁ'qqe:"GZsfPwy^C龾#5@k4IPWέI2F\7Kd ɷuxN?Dao([6&D1̝P!!o s3 Kd/YĨf6<>V0ox 8bA]LD`p>ECn醊br8wB/ F<:<#dăo]x_򀤠0y2nt}x2+\^g&?0c/\P40SkIt;`ڠC+"C |QYU iz80&~kp+)MocTLz>N,_ ͠uJKy04i8YȲ܅)EL }V00 Mݔ,h@Ir"wb\EXiZ0nK O-Q䖏QRqhm: p~ꇄFeb6xCS2i o+ڔ"4/h}ل6ϭQb6D凛CQ?@ G$~q{yv_NPZtZ璔‹nS|w) HsgI*9"ψ9.q*aYN_*UnzSeR'KG)nI.(-t黎+\w23_Y mt}zPw9OVEP6:w@h; ~s"c0' 5@|_j<]WNb-hˋƶxଜS.: 9 #l`*4|K6mxPwI,Onv$ፑo*zAg8Bư/dL;"i!5 qˬhPP["mf,_-j>Bc#,zL⽉Whaz}&+f EX8RR4}_aD\׬,"p3/Q~bAGY{Ud9r\|F0+8<(ɫ>ZC_Rcx; 3,>G_1:\3J\sXFX]?m1?9 !5gܫC۞L}:z鉾+՛{I7=~: A:N:C^:en{ z)ڢq*lJ#)}yo?[KVs| X-P\W0W`Yq ڟl/3L̕0TZ/RD})0%,Iy..6h &~[ pb3}MKĵmʁ˰_6qBhȐ0i ./D)-ʶ,ӱ[YT޳ pJm{ FDXjcXlog4>@hEbtOׄt9C->֜1F<7'/S=(p)<٫5>#՞BWT>+)?F}|j_iO"K#]m1.q&~@8t6^yWC 4*Hܖ,;ގx.ygϢtoW<Ԧy|v/?+K1X)2ԦTM/اƆc?]Xd[c: Uml@+z;kz(|n!O1o/vGzj)Ɍ*-K9,AiNiu/MV(Vdy"Z5 ea򊧸2/%X䌛3Nq3^-Eaz|*QǕGkIYxS#Tj)=q&qu>W,񆱘fdF5!.ەz ̉2gBX *|5u8{} LL[R[\"=XqQ:28۲h]>^^%5UԼ2fO^BˏśMϹ5Pa!g)YbJ t5LWcːQN$hh$/I7B8_T-MkDzZۖʹ6.ERnc|D•NlF?;XrBC^L#e)!5VJ򄠕DjhZ^,ܟyIS*;~қ ;-BYE~TF`__°\(BQx6]vS<2hU IW l}Mxu.٨[>e.`m酿W*ͫ]?(%endstream endobj 699 0 obj << /Filter /FlateDecode /Length 4780 >> stream x쐊 O71 M=*P9`nXH*ttr 98) ,0PuG{@~)Ek {@ٳ[T@v @/*ڎ#AM).錗"r# 0o#NXe g N]wGZ1 K^ow A-y.]4W4* N $$ܒK6> >AtZE(0Fx!M߲:-4矶MMHU؞JcdhJsג  cuFi& N ̀W^=/'K6u-ďׅIM1ف7)/h ѿM3 }liiAB/DuG٨ ȼPbʝ"|!,-A&#a04Qi7M'n2?B;qYHsVtNd_J #2=!iqӔ+:eN`:+îqe59o8 5a,4)Wg\:Z<3Pd"dӻTbA'ß;ϋ1՘e\p d g.72oFҒ*#96m{Cqiex]"pl7<#S46m~u"R{G[!Vd2׫ͫ<= b2s8;A()QraAMgLgc^*$FЬ H CR. گb>*ʳR\% }lIA=w/]^`%= C8s>4{A3lrU?b-x+A; Y?y <:l;Trԅ5 `bw@i l[#_erOlCd`EKOAA'rڶ-uo k²=.ZKRm5#[X[ WbQ^: )|[>`"~ٚS#x.UoEDDPh(ȗ,8 oaDUU m''T Ad}JE]9z ŗ.=?r߆Vd/]M:nY} o^ ? )UƏmx׆i_N-;8m8jëM޵a;G_]Іmfu'qnSW_lKIJpjLFoÇ6܆׫LuX&eb9='¿pdq; 1?\ G^zw}[mМ'6tm00O@̐0a" L;p1{І~aN8 k6Trbi_=ex WvŭyuV('q85M !dPElj VV_񩧛ak1O6 b\jXϹF wi;i*QWP`ڼ:kNfW ^efZGZ[1Y ra : u;FsO)d^?>fhI.<%ϛ.l+{ :4ά\+<_X.%pr\jQDu[>M0JNsW ,Jb`C ,QA} 6Jq $^NW o1abeoa(| S#.(BrBF\ W<@r{)?Nl)҆ǹO.X6rW͎")jl#"VQ*5hz ߭Ode7q0p%aJ S$$ Bp>2,SqKַ"^ 6j(u %=h:$[}#TsSUZcF=\{zάh9UO.c!&q\}fTwLHil.p:=4 \TSLD~gG! D3lK\G8wSQW'*Mg|jsvI.WQ[uQ9QA5bJ 7ĶW^cSi̍M@ۜ@7]T: g&]t(~|sxW3>=VtsSAou5&:} ]̤džz;V| ]2U:ґ[Әf'4g)xjNzeV #'fL?w0WΎnüE1UK( BJtIo&a+ lе?|2*lj WzFYBmJ-'BgyQ\TzE @y`kf,12@镆#QQO,IOJunLs|wi [٩R܉-#7'r>eXfBu?\l~&G`}f6]FZԣ3>uףW5V$;cNٛE[!%QNW- bm TdP9lcD+j8J?t䈁ǵ2۞DfG/lh(=&SԟV)<9UC\f !nxrљBY괌w. Dղ>l}Ժڼe\eC KS׻dqDEJ ֍z-/tCOV(ԣ5aPiq \`;rá꛾}>!Va.vϘ=aժkGQͣUs_'>%$7fŚr7=uBml$nEL4$bpӅVj_6g}rnpVCmm oV].(d.k$Lu&IvH%yyX 4)Ǝ Z8ܙe_ꄖ1c/Y>d~ l4~k-y|]ߩ^;[!Wc){q|'*0D t2׌j:JB}$IoϹ`?jS3]d)?G{XX58' .vcQa_$JʐnMueQmXwTouLj?1Qכ`Cvhd%|@5S]ɏrܢï &[she/M?tnf. gN@"GpQ/M}ɒeeҌgr gQ*ZY $[n;%> stream x=]o$qKhBH8;p@+N@y&wOǓο>U]=ս=˥rᚳ=5]UO(O;^N?w'6Z5zy c ߝ\Û2y;s\“1(OϯNhCy䩷~ڞߝ ʭSl xnu&FcnxcǕTa}u ںaQ>1zxi÷1j4Nruf|M(}+pYVWa`;#D":[PI] 6׫3&7jVBx엛!-  NCX@pO 5@Uïn^-izM3:=v Rzi/!|ػFa(( PdцHI=b)M0"C87}g4ަwY{F[DP^wMWVe]Kl(&nwYȾ|VqL_Lwê8s؊tl1Vj %cFv[x1}:I 2_#8B B I,F*hUiCfAҏ̢_9/,ͰΛD RJy8 +Sh{s?QZ=+\!qjpS2ZuE0I X*p?EzJyD̜i'}&Na}NtQ8Pt$rK\\;)'$36CB86$ j|H dϸ/Z)("r~@#s⑛g@H$j ̚IlsL$/lNE5Rz|@x1ĭ"-:X9ap\ 髥p/2ɴLL="S>iBcж_WMee%38])=fC[nAY͍ CA&v?f"r |P.rpt*!+^jtRHRqGzKZFY23a3P1(.#EWѪbĺ @])ߴw84zQYx<x Vbϣ̃nQ W7 "`=(Z/pz@&j7 z.ni 9lH7i{lCR'pw;mF\C,w`tM>{Rm`u jP &gR/b&L"akd4E̙53'|JM;[)9Znж*$57Ȁb[ :nZX-ȀⰮX]V:7=eHւԣ߆h ) x ˸F x:Ty|†L*4u(EIUQfIOb#LITmԑ[*[kn'N_L`aMDt6APwBp ~γcb|ՔK> < $74%.۷p}3J/l;m4DV73Mg!ĥ%s1uw] p*q歓mSs})[3 Ms:r]f'Rg'K hjh ^Y",Mma#i}x: ćvٻf-&Az4uDЫЀBe we S,;>|A"4HǗC#? PmTxޑ+?֑#'1@lqV*yi 1269%[z4[#k0W(?5'29A51B2I|]% |UWa'j$W eNmIrX R|rCu_\2dpP(?Stp9Lqi8i`YD!$Lkb\mM.) uQ%UpGO1˽+63l&.=+J|&+sr sU}< 7ܺS yC6 l)|o3f[B9Y]^U= 0(g"9CNrxyKbl@eg(g5-o~){ w=:HIŽ*}[;*"RCN&luBQpї3i:4J}nv5*}.x!6\ Yccr7ocW~3f .W+Y9 ;GyRRc0W$n)'1UJ䓃r1]H_HKI;%)E.:c3j`:yZRH<2s23{ƒRk2k$)11"O=r_'YYgb^;sޔ 4wbɉT7e8/s ?!)]^~]o˄su(2 ueh@U.<~4@>#G`E(TMV..G]X}j#xP@ {jObl1{q?vKMMqLti;/Iigzߕwe"]L]veb}}_vWO.%E>Sӳvץ ܃Cb=$o".h];cau;|A23SI׃bL/x{&˧ fD۝(a]wQ̆2̴2EDZGqƒK3}VѶyBJkN1Sit R Kzŷ;.sR_ZU?W~1~xUj}"R "'7T"'T \_z5˧ ♥?)At^e y0W-:(G,_j TĜK1Set({jR,4/fܿ;\ahκg>I,5C]:a)%JӜJ[tuдWخ01p50*0ȍYu:F-қxga.j4qwB2T|f5۱:@gR+>/Hٲh^+qv4ϰЁ3?FN56;ctT(AJ^Ǐy;3!FT[ D>OT6+3vER@hRk"*QM)dz@VA#XJ6?˅b`Vei8}+OtZj-JH: 1H<3i6ۣ96NZw!ѵ&9LL2 ^zOuذ߇+D [>Uij/H`WLR`3B+^K2R6uW>TB.{tՍ ew_ ] `}\t8Ed5.7^lVjo׹8F3s1!7H[DSۙ- ^%?&D\XպnǶ{'j\rЬPz.k10HVx9zm,`2U^*>AxJma. l?`z=BYVtfHz_fLP[u*w׆Eauå؁C{ta,t8<Sm`f|?Δ?{{9aA<,_H2mzI&z2n`ѪviIP׮1}MKp m}"_"!Xh*1[*LH*,d&`YOέ羉|SmQyV?\![h)MN,rId#~/&\\v{:n4 YOT٤h%gʿ%`x/lNq]E,V̉_6{M:vZL8]?@spE DK:)Őי@2%-[-y*>Fr6愨e9s-~qSH^)$S.wpYK;/v֒zad)zwNhϷ'l+N8J4bn΄`k*֜}$sbxAmYǙ/~Yϟo+DZbPP0.bH3ƧEf n=U#%ro+Ρu> stream x\[s7r~g#NiNg<lUŵeV݇#&yd}ٿ_1Ɯ4TR `F/_wcЫ@ϮOz>q>xv* Cm9 :.snή7;ż[tiHjsWTSodevvЇaA6;ctmz?{zPyRw0`4ttmcwKsk31.B5ֿU~jAnKF]t qI{6'N?|}z*mf\W8(0(ƞǧ3>U{c/Kſ"8oߟAnc{=ky*>ZJ[:W4&w6i7`V 3P⇝3J|r'5^gY0_MRB˟©gPAL7 O+3>΃;kqP읱} ݧ`bɚA X(̰( gxf!)NvA~SSQǞf:Ɂ2X)gC.}08?r1QS뭗: YGҰ6QSe!)QSn[𡎙hk6]05 r`T;5$1?W/>^a:5@m%eg2qp, | `z(?toĨ Qt2bTiFZ(ASyX## BC0dA; RG{}d }M$m#?`|&(Xׯp*]n=_\mQz09}sqxql-yυ5|Egr^Yq!uJwʼ~0^ ۷²*W =Nx=Ԣ'i01`rM`/re*یd '+4.訉@4G[C>z@+;ifB Z}P>YIɟU{ 3E+5G+ d9Wo^-heƽdǞW 5zZ5‹( bArbw}Θ;U$?Jhc7Mf`N+!` pX XcXRZKas! zـhZ``C0N4;c r B8e $_FpH?{ 0(S l`Rl& 4L3>pd`]U1$d&LS! VSGYpM#FiA#N,4v1*9hf|x:h2)2hzj2+I5M#s ˝<ԃ,N|wFyN#i"j)pg?ȈsmkJEB+ \)pmlNDhfF3p %8;Jhs:D<Db#(at{G#*C@:LʄIwsJ2(䞛)])9l)7kPxR3u#Q^f5߈}%aĀ vPD+K5/oin?v>`SrSbȣ{--K5gG9ZU,/b2Ԥ;}921q;2-@ٍV 0[:d*ned4+ ?6r Go1`Y6S dQQ01ϔ!J9h7`wdMRejHޣDeRm[:a&eLN~R Y'R+ٻapz&nR iiAܸ"qDRƦYr)I?^9=<zc0$De鎈9 Mky%&?P󓁚ӄo}|S=dvʶvM`<@9Tany*E!7ºVY)I.yXMeޓ)ܻ gWƮA7l ] "ۑ#K q,H)Q}%[9m]K.4&]e#G{W ŴѦbrs M1HYڭ~*0UHe)/^M ؐVO93<)(% Lb=6˟QF>3KZl#Kce#  /z_ t{T@>FIHʘĻ:\xF* dv4,ES[fva%5eT53z0h.#.͏.0< :9 * a{m>YdgZ[;*v/<|Ȗ;*RXwlꆿsYSS.ɛi3;N f=_H6L~39`F3*4r KdL0dA N5༪YqO?\AЈԏ2ѼCm.GdK_[I4u/qSSڼ]m~]yzD..|8Um3.ןb ͷ:B\"^-2{Uwй./ X^;\.e' 68:֋;3h,׽pssA ]lux}}q R.C&"ò`9K%Y>хB@(&W$n3'*6PEz0htrxH{:"YDRh fQ&d=OGQu;0]  &I2shH ] TO]iIⰚ$܄G}ق=o%1q!.s/o䌋[U!Mrl2qE}Rl8|ԅ6CG)嬈.'sV|,9<# k !&5>f7$]wUI aBwcKC6]tTȸ.b#< sPrĆȫy&ck?21aA.P \^h յbۋAI4lj~r@a0,"H~9N ;"9,'M -QosllD 9r-GI̢*}W5!ҾqcRҼ q&'Ғtz2|RUl c $ϴ\eׂTe[tklS#SbK7zoB ștӈDOu kG]h r+4)Ƌ#C%N sroj6+J'S &NSP iSӃGl!9̷"!ϜVS#9,R->,c8: cCVR+8!S8Rۏ W -8C,tTIAj<"R*1hzcjCT;=K pؗo5$NzZcqSlcs&7?6.4bX>gj!+Vl{t)okz:'Iiǚirc#osn픷$mȒiSmєmiO,v@q}%r Ʃ -7iWV23u_Hü%hX.q).P̑?"_bW~Ai+W%Opfg1a-\ 8mUPeMaRd3TE VňcSoE&K`VN1Ad wQ9P B_G(5ӛ&6 /+3.`sS.,WP_k$9\|'6>+V?݄͡h/^ Q(IE9Ӏ D'pK?a p*sk0)˅eEdL7z!! bW:2 LzL~Z|u4],$Bq8 -Sغq/3Ou75ɎC'e\ё#,HVKt0p8Z;һb M9AlF@I{hok7H$ܺ2|?駭sٲ5̨9jƗ},șЅbbvDͮkMs(ZΛ$ XiE ~keOKĐǏG/1hKn?M#5$G. |2v?Ϝ!\'΋s-{ wvBG:z)zrx)9i8DI+orR-T5})`x\ ޟa 6wcXi7#g\˕K>邷7]ӖU)g)Ɉ{+ # N1å:eˬQz[ֲzGyAȳ5xL?%ǫ%O^-95bgF VZ:̉/Q4m[ 1̿eXJ85; ih ֔ Oe~iyCVJ8Y"i$glf4 z \\MN4X9q=e(ӄ'!0>0^2>kҠ lT&w@HA.Vs@PYGP}e`b7KY):#p+ 5@DUMP= JS\S}}+?p uߋOӰ4zhMI0?R bO$u>qsHO KrsS̖_;N ʜ7cAMٮ UxopL&y:+x+95TU0@n8MƐluRR,.kn)|?4l<}yz-7r"X0e7+H]-7ԷuC)d2:> stream x[[o9v~WF^RkyI ^dǣd<-ɲw$Dzfa5˭LE~aW~^_U0Nj/hFF00FL:|w&nS&~%rs"# 71jt^Sj#lx=owbT1>m4c Z i7 hc~Cd҇,FX mx:p0ӉDt c. .hpi0w"j H!~Ǯ뇄W@y;T*z$-uDp_'ڼ(iחXoXܨFјQǛ ɉBm~|Ȳ?Bqu+x*0rrcզxhB_ݐގe}x(|#7;9Z>kZךhl77xhG`BaU~ß8zxyݼzVAi5ۼzöQ)%@5jĎinJ{Ugh@N#L̐ٴr uÜ=S&(F4P`yPu@τXÁHL2A0r($8 aԪm3ML[PRq <(QD%B3MY16rt3HX "%3gԣ8^O^C홌9鸴WvM'Ba9zT!:J &9z(~EK8vSdnӈ^/J0k@dl V~Ԉ.\B"ZB1` x8ȭUUċQ}7PԵ(#uD#=wPD'ǰh>aroS(`u1qV X8FO+Purb+ĝQG"I=ɡClq'S龜J/iU l "jT*'SEG Lk&~S\'|D%&<xIj.9M}mƒ14I}q""H͋`$/C*wE!AS!046-9|^~AŀSATjzȼќpz4e= y" m}bldzqiBQi)k$铦pٰV,3a&N% >.'zlg};XZH3쯪mwz_CiE֙r3S|6sëdZvXԈ,*@[ч?28BOH\,*zs=Yf8tD.cCƢ;?v^/L4m\9Z@n-e0`ֳuk-Eӂ B hGl:Ý 3V+I9Iu3_Cwס]JIQK0S *aLdwm-UZT̡bج6宭)הf}1=V$Qg& ʃM\ &;{d\2~zpp6ݞEn)ؙw-[r-V" ɞ}H,mdGNeZ | }Mo^(\o's.j3_!QS)asTQ $MZ*I}Xwuf-=m9*(h ¤jîDX U>p#\C]Mti$NAPr$KTZN Aw)}8tKF26(&3D2 4[3H&L2s.$]`F6\\z$HD2{1 S$S]24쑬GtnO>'PNG !⧖i;Θ*/ywƘU%qOODՅ;yץ['(Cl{YP D_ զyՁ)ͧ V$o>OSss@s^eㇺlC H*dG實 4esMzBa̲r:;"R[n|L);R5W2,W-Yd5h0=}X&E:Vn҂[^lsBzR*T _8ڳ3P*)S|d`p Mjޤ8;{H]]a0L4oFKV2_Q' T1JL=pac!ת%"@V%$]2iȕTCgҚ9Ri&+e1FjŦCfoє3KjraBZҞkٮR@/8[u7y^)Wda?o1XE1:nzج`ʹqc}K:C4׷S|46#JňŦ۸]8B?s1ڬ綾s ~rHTdP鳛jp2nX-7ݯM ]D1 bjZ%6qFò}͠LHl+e,6M= jGO5a*L1a1yf'eNo'Qf hq(wGUn 3\N }})ZG)'>GȎ'^T@Ow+=Lz,V|\zV7mjӡNkIx* jwkmE kHi!nL T|H}>xC1<צ ĊKaPN~79Yz1#JzTZ˪Hi&u.  Us縢%ѹ',@h\-宿 zkP*.})ӂ{׉^= p<&2wUľɋCїcH'd偆ygwCMƏ H$,u5G*QfKyS"]m!&vv1뾜Gk>,4m$~4&M =C d *6ɠ +]}]4w^>(,&_ow!_Ѭ)f5ݞ[}v(PӼ{()Zz6Vhߝ|f,G5!Rn07@NzUhHKsg8L+KZҖ#Be!deUet^uY<{)|{?{*endstream endobj 703 0 obj << /Filter /FlateDecode /Length 5611 >> stream x][su~S |Iiz4vSުQMFDKr$Q7HI9b{q9T Tv ?:1ިaԾLm<=/^r ^xyIP:y:i9}yuQjx{):ϛN {5wz|ڊI5Nfލn[9GOӤ必a4ՠYv83齜pMhaĴ9Ͻm>oyѹ3*5u-v7jl~&!5quA 1 `?^ijݦp7ΎZG*͓1GjR+bhׅ6CݍjQ4Gu㠄4DLK]-z23팔ywŽ(+'Kf|qJSLkټn"?A˓8iNp!ZIz58f`?!ktHd-~Has]hh|v*) M7W0>O87{E''?f؞G0[l]rZU`SyV\LD[%ڼ#;DH%-kq0ګes*S O4-#R#^! ٷfq-nfFX>n: poa2V{f^%:d؆сL?+Z/=e3CɇG2I>ŧ^*B 1κi=_hڠ{?v鎄<Ëjd_%O,e @%lKeQ 8izSm;պ)l")7Mv#H},Rғ8RA[lRbMl r %$Ldyߣ>,I(fWhD scY!9W7/#^d8Hv;Xdf/F2+9$UR8瑶ǚZYtnҰ ~pNuF3m m$)BA/P7[ۆ섲v+mvnn&d:IfWwz{p0xȓ"h搑(\2r+پoUFiajyIN3^z̤p/o3xMCĪ-_W4E7$; wrrθn4?5#ch`%朾' MC|u.|#2F, .y#d[Y7* 2)"${pȖI[<&[3s2J ЉgJ` vE3mѥGzd%D[є]?g! ]1[xP _[&s'Y?kC+f y D[XR9_Bc*- =+!s%6Л߾Q-IǦ!2rk-j%oe۰%RI$oցcCrtzǻKb C&Ӵ/dzǛm[9U Wg9RPJ|0Ep (M\>e,߯{R:9=źF4hy`imK), qt8+W]KcEaYpT҇LMdFz MG2ps0p>d[p|GtP[EB 9:<1uXX OjͦYAއ_ԥH/w# G㍔0wX`_`Cf#j3 =kg2kRNdžVZ݀j{2i3pRI±9j27M29A>7WJ FӬV _<,Cp >\}j32{>Vϸɓqߩ5*g:W$;z0rF2u䪪DCqѣvELU:TRv9q^-zޖzo M%jX9n3xJdUVMtMm)C} 7+ ki{]&UDN[Q-!@d*Ҟ0ooW+Յ'XX?.U:G VcB/SFa)ZQcĚΕureR ܙSb-'j`yOD8\t6^:$ y:SʟN^S xGִzVB[nvZ@W M'U0-{\ HxX;i!z73eX} O5ua\}a6l \I>4@pgUEÃAzq{LB~,'!!$ejJ+}v|w~gx<~u:JyB~[ɕzHO'U8.K T+ 5O?5bISPxOiP)dl'FIUT#'\Idwk~9*U9zcimݓ8j*VūY͆PÏwngF][\!AeS  tNZ&.$y\C8+*UK p˓<&jryu!+LL6J (oXXloI&SqoFht nA[| ?e}q[#ri*X/6R8ŭ,U2,o;kqStZN"#i1 搛s:7,b-ֺ2?; GˣsV`(Ô;23\PkD)؜qofg_U!9E*WYlѳyp֌Eq{,n\fQ sX;VA%\:3E$(G gvHoOyΣ 9kP#E 2I"5\B֩ T%qn*^Wa{Gk.kV ( Ih4Ild>mEcr4rCI]6J9Ob{Z!F.᳕mF}iLV!zp:>=f6)R1d_ > [\+Kk:.v,r,mbyKHuy -ǀTdCS8G˨,/2݊eMI174L‘=&1IXqL\:yA #M@ \R{_z~ !"J[\&@9Adԛ؟\ONc"kEhuGay$$H[KH].e4@E7G^oV99 ,6w7IisiddKm",ǛF0($8г qqU:`1GsQ5mѐ˽;i5]g QAf\rDA.iB7(szFErϬ(ͮ%;ORa:3ܤ|8lF?Χtb,܎Uho4{) qXq\uj1KJGߠZ)H'_ėE|+dZi$caF* `E]3O5g墯1Y]$|%|L(ZMNQ}eu{ qlE+`Q++&ŕ"Z( fK3-RMc!: Jt+/ܘfC^pIaoN>;E̳t» vyb@}*(]a\>:spIo<c|qތա2_ '4KaS< pǖ $smGIٜ~<d؄` ;g)Ƚ:2SW^hE86d ūvE^m j t8R{S9_3WivA1\#cB 4BfİF&#miF0LХH]ebct\*ڵ}0j0ӲUKDf4"vΤ<2K 6F p~ު"̀.4*X*|BA$/b{! R'D ٸ\F'&c>{0knh梔"2|G W&Bs^Clw@"JB;W?uN`*hW;wTu2@ hPV6{"r2p7۩%(V[|i7Khtw|Wn8]M]LC(lb_YQ7i,/N6\R#UM c,iL+ڊK|,]螓gtHU|cʶyy}*w>Hfijt, ΏyPH70{n~pӗ"-N6~+|xyn Aх@5mDB/"2ʔ2jp)0vLikNC_ck Y<ƾ!qs*-u)M4n9 Γ`jU !P1CT<(̰2A8(iw]2ԱB QWD&IytJ rY+ebr(XLL  ԐVyC5ҫRmendstream endobj 704 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 . 2D! }I.|D/J`H_#49ua:X8 d z&T.z75O$ڪZk;Al#0éTW\.Xyp8I.~r 6/ISendstream endobj 705 0 obj << /Filter /FlateDecode /Length 4763 >> stream x\r$G}="lc&0jVYdVUwedh}VUɓYn!z}\|wޜhU_`d#|䯋+2\,>;EQD88E o}]9Y*Kߺ7˕U!%E w7K?uv){!V;jc^wC17b2F z1 ߝWk!tggNAחPTRhd&hcYV} BsBj,H!zEUƯkARBwqh&8mL^2kSÿ&Yl=tXojV_i{VLAyo'@;>p^hC^u㊤1)aǯib[oaiEp]ԣ{`[5ga|TttՃ^^l +)(`^'3io0!Cvtq,2 U{߷a;VjxR9/jWyݷ]"7O)Yoe%XL.A*kBm[fm_=&G p;ӻzE2&=t Z.,m PYqpTGKD֮RI`r2fJ;8 &l}Tˠa[ཤo.$]a#- nwQvs#w5V&& F_֓M9//* tBhmy#IoDPhѡ # pP'1rM-r)پH%@x0RjnGKBH&{~;)}c}"Hqɿ = Ve:ghpYC'o'RYu]_R@2&<=8nv?PѨ(e=R{h I; hҰLp)"7leddכ$sN׬"8:&#'iiO<@h6aiýN+T &ߟ $:cb* ק- "ɠx=Tp/UJu-&O\wxYTkU}',8M%D D'l4J{pT!aj`_V8DR}KvIY܄yND"úp\[A= _8RDo߲ h'vL笱9_jӰ AHuq:#IEqWco+6vR1&"|Hÿ  >w0rh{ci& %֤ aXd&ɃF7#"MF4ck$F55G0)c{͠)%B0n@wIZa}2 2ϭ8Z i-٥\ngp3_FyjxwlYJ k&L3kLhGSS{k,Ke ͜i6M+Q4.%m@d"D*{Hl.rJ=dMK:Zp@y2K"0b`ݰAr)9|>!NLSB%0;6&_Ye\鹃1|q_|*gqsI]hTڍKB) ]Q%G~'=!3]FK?MP BI>8oFnBS(JZG^Zb =sr C, ѷHg2zBrAX&Fj״u?jlr =@27y-0g=nsCUӹcJM8VelS+ێyic%;~YSl-v_ w.Hيl]vvX$Dݟƨ`mfbahh sdl>,Y&In$-oe_v`26Aӗ)?zcrCc,LᮔCǃmPgBP5y7"[φ$blGCL)N(@}WQKb~V@Fgn6W`?h#xuši<ѶqȲ}zr$`߳Tƙ񑈫<zd4K hNMߥNmp43̝>& ͰC/ǴQKL+P?^ϕ9ijmv]egrƄ3I8fdޮh9x &O7p0=_zLoKYmIFP >9l6|im *(R17ۄ%= \}!'/F%Ƒ}5F/H hg۷4Xݚ8nfBI}:ݫgYsMpAt/FxzΪYS5* y4ը {W90)l;#u6JYI~&GZAͼWdps šN{QA^x'Q?a+<1؃KH~ҖgZe-QbM:[{rk#5.iݼƻ*8\3#~w59.{]jMR)g^r5.^zj?3Ds[C-V·})gBDt}q-Ӽ¼8gIoMnR)M_/sɈ)i~ 92_\7*%q垇;ħ@b=jB?.GBQlwè_Dzx8do>{θ: g+Z$Ȼϙwus- ی <%C/%#C-Dx~c: {O4( W<Ӑn+iK_vh9Ka0B.l,YOMfM3\EɃiWү {GF94עXnOXѺ]_++3<+\mFZnVtd>-Ce7560-Y֖Cr!d)RQ|02̃Xpd:#Z41fቐIp`C&[2 L2ݮ@&`Y9ӑq97FFœs&< d#8TSp9?vZ|*"Ii"sVx*N F&OVC Xy-M떗X G{tp$JH-pj9\e*l3pc6i}t A"(6(-ygO,D:7bH<1c`+F87332yX ZGLV:eϩ'WƧ,E~ٓ{mfD&+YɃJ;LV7oIzPwἴ$]D!J^> A:2Uwc癔%p m^=ҧAj@jX[AJs})J)endstream endobj 706 0 obj << /Filter /FlateDecode /Length 5646 >> stream x]˒qݏsO]e2EDxic@0 %}OVWVe?Ź3$ }tΪُ̪oNj'E_?$wz+^vr:ok 74gWWɘ<[N9[zk55v$lQsTʥP\6izqw}chAK:P Z7mMRfF+mNևi{8+wy)v'Dt~ӵ8kw&I8@thWO~O@&93d?_݌tH'g uNq9{=%7 K7J)xT~FX'/O_g!q1QKgc%IEFHRs"$lN6Mw&^Mr" Uv8{s}#<F_̜'-klzg24$p;*Zcz0lH vH;, O9ڟb z> ?V\&eGchja[}8uջOO}p(LB>C [Zm:?,(# 8qGvt813~v8rs9PKF 8LTy3 b|4`#X' `qvL3r!`:W&S6ݬd&Bw\B!R!cdDr3ixuN6e;Бڮ"2 .D0]ݜ1YOW'Ѩӟ`ۇMdSj.Іz3+Ɂ.vKL p֐Os4R}v_z:69B8 6sLKYi!ْZtUk#hCGXG*= B~>XjI D֢Q< !|#O>uxχ鎄-LXN:a4HF[n`Mu ?]s#@E1.UY*.}{{=Skd/JԗIJ 'ZBj!.F"YRÖ-FFHlƼ$IEFH+ATd$kP뾢lp;Wv}+;4}fܳ]!zc]mc<>u~nsj~LnA[%kei*S(NDB-QЪR -D@ijh7gͼ,V$Ҳ>%M,ߪ&D(`{ `˭,,FNI/y1'iJN* FHjR02@RS$  a|Ωv 4 adfn #$5+H bAI) F'ٺ@.)rUr+Z]N1SSjw}RZ˔uo]٠]Mmҵ:B<)kQvw(xUΛf-.>XțD N x5㥨_I.ULcw:eFŽ耍UWZXAQAF巇 8nȨks:V8wH@AFg w9lG-|ԏ8AT(Had{ήt02B0 IEFHbJO"#$9x("#E:,Ud YU3NLϦMt]9_/7X$ HZeYzY!Me̪T^cV)]23:jv‰O/>zt< ׼xc ,ٵ1IM FHjbU02@Rs$ Ԍ a|NQI+ FHj*Z02@Rׂ$ lM +9,Pn`+0Fzm !9>R"ׄlH5!Z}bCH\̝),ՄIKՄ ̪&dMcU.VԄlrU$jd \NM=+SR3Q+TmP9Gfߗ@/$A< }YP!@`Ya vB]%J "4Td),12BQVV!Iޒ T<ˢ_EFHʰa\l A.dhƥ{)ۘd\ML[_p VkJbV[g2^]Fo+uoXqR|I =IC_rT02@R$ t ad #pָ4d& #$57-H lAÌ FHj\0r>rIyƵg?Z[g\YUXʳ>@ rꌴ ^|@,ڝƹpX- KfY 1´(,uмCy:ű4^]偷VV>:a)#^jI.UkCqR{s_^q(o/q?p Pa9:*jq78C6Yy0q% R:x KT zu#(`¶[ԦSKER:u{.12@b™ˍǂ"#$&/Bcddf}6NY{h\SHO {𜢔L\OyzC9#x7|(OmNн-mRqHgI WݖO W_g!Қw- awjf<}bK5[}q 6,/+^']V;ҁZuLӗKo!C>/*Su45@ 6/Z"nbLӐk|v}m1=Q':Vs'(5- EXS>rxw##GUgNL!X)G.3&v̵X7?o'Ai9%_8s*"b8PۚǩHܴ?[,t=|<^urz\OV/Hqv3}'MlȻeD] xIty9ԁ0:e6eS.cmDhclnTqRkݑ2NZD՘׾-I7]|r:*:kr ɚewb-Н!\>SwMK+(Xh^GXy^3buΛ..{qnYw@X{ψ ~4qk/ޒ[5bWip\N/wцU@#o÷w[@Lsţb#ɦ-`tvE{X߈)ee/9?>œ)WŬYQ|f\v0uv_Ѩ2%b*cޘ^Oj=xyL}P٥{/> stream x]O10 . 2D! }I.|D/J`H_#49ua:X8 d z&TJUZF'mUu 6T+h.,S\ HJ$pLg9NLSendstream endobj 708 0 obj << /Filter /FlateDecode /Length 7450 >> stream x]msHq.#KU *ua̭,ʯfmK \Qiz_ߞ%yOn.|f~?ߓi7ݼ =/o6?y:8@޼pc갉>7_ny*c~w$~wSK7@mr٤rg'_wJo=~JE]h2heCھ%ma[sSό_K5/M'l^3 2Cm^\_lM|񇋟5koJ?o.\~rHT3HYJ3V.=ş6z(M[ A 1}2] ۑMYYi; 9v$D=E6u"İٹ=Z7T*14SXyDy~O`&gdR:Id@[1,S-6ܙh,2K%2߲땷h(= eq_nW4Pͳo|=޾ڼ}yu<|u)6FV'yZv*Ӷ9w*ySƣO֓TB؁ØTNOud9ٰ:sa4*.qEz$ιS{c|rnY*4*NH en<ǣeR\U)h ǚʶh`ۢKP0IhDY <\kHmрG0v6mt乶x CwkVx :`k^22x mvYGƷ,"Xc ,[QOebXm'ёG0 'N=< q`:@D.> NJ~$yPǰOo~_^ 4Wm])w @@ȽOae AZpN (B$B9xirlE.fV0.Si{8*ip|+GzThy#13r+c,aU82_3̄Ep E$ 8-Zk\g~>Ǽ+|Y4҈HlR0AMO_ "D C#/ta s4D]Ǚ*,J8Q1%g!xbdgUVәbU1-Cgg4ߞGo{]+jTb_49: Jm'h/jvXx^ c:{q&FCGYJ23Jaq~I_=.4Ww})dСXc/ قL7U^h•1@j1UΕGjy9oBXZ״}\^Vd/+~~OQ9| `ooK NvAn{O(Gy6Q· tqvma q?ѽ\󙠂\-WEZ:LPX9wzȫ$n0@)7s{mNpPr"fARD .łK3,#h{u7Ӹ i/^r+bzӂdL_2L0_n_J0ѝ$3L2a@bOm:8'6TCL%=S$y{h ۫c)rvBE>eSVPHp|-wQ ?Nk JFS pmO}^_ û/wt%&],6k)ǡPn/b|/w^1 /]̌ LWVqjGc.<ڛi&b]9G Ω0/Mɖ*2]j˗@nck=v6)jHiB-򏌹p䏄?ͨ+U-^v{U2hb˔oXWdnC#f*k=AIe>d\R+3k!'= 8(3bmJ"+Mf=.}eXi!4*G}Ss;v֤ۛBpR.14wl# suJ'WKXY,a|e#ǎ2\&?fEȳdcYj38r7}f s}xyvxYs_2rSR=XҠ2 ~uj /S+j~J8=@!fGC`O}2 -D`QRA\ddH LAA$]ԡUʅ <q)Jh5b,/qڳlg8!>eadXazN¢y1 /U6R/K 2fKJdƌ~2fJnZ#JiatVYl Ywm*ܻF~EE:/_44ܡ_Zt " `x;8*o$1g.@`fyoWX7#4چ {}yn cThTJjz`ь@s)-3x(z:7zLWz^wzp888˺8ۻ8Ƌ8׋8 xK K0kDF-kdF?kFQkFckĶFukFkFg"чўzx{H}׈LV`م5f)Lƚ8Y&kfe;3Y5t&cfdԚ:Z2aklͨY3%{fdLM~ϱ>{n7ޯa#X@9Z*tjՑ ;q/M;2z ,h ^!(fwo`/w]nwZ /dVy~oyXF@0`gؾAs2`c$ e) w#Av, k'fqzvN@u5lfݔ>|~PqÊf͎*[7>±gx<*q.`GDvO,f v[?V7h;p,pyY >dG0xO\= ;2vs*$T}/Rr_dG+]wWo.$pZ~dq>"?Ei&ʏzoѧ]&ѨL#5 Zy;#'zJo5կV~7+M_鍦VɊrKR'Hɦ0 eO{(@ZVZC }p` YdZPOMs2m7-Ct))ٚTz5z&5Z& &iû6e>0xqvcȗچk%aH„[$yjhis[Xpn5=w"չL  -D@X)(2)L6PaEEWl(k:"+NRB ـ߄r ~[#JGۻ#jaeҝ2H='`m#J˰BBN QգR0wZҫ6*]zjAxJ8Hi˷7G8^2XvHX(OO!eW4=3R6C 庨4, |ѷ=u6܉Y`Ϯº_l7㔢"{gqҤy] ~}Cش\&g,=tRMޠ2R6đ|JԬer #-Zf̦TrgѲk6Y8o6+#):|~y>hz:fgtVR-sf,;LcP(#W#zNW2(K*H&nB$/ BO\O7(Dz~+ա0UK!~5NjӴK/)%_團Di՝#mjK[ ?a`7;LeBbl ݢQ; Dv/Lc ԎYhJO4j#%;%ioGrTk1;NhWhʵ6[O U|GW>}*7Z:yX'n7]7>JXNh'r^Zy^NO\ |pRF+',ʋg9淸n֌H3ϯC͐⺿mw;wdb "XyY6= "Fs3Bk]{uErWuƠ=^#j{ITlN^5dg]zvgy_̍+/ ¤[>3,c}>ןF^; 2޶{L," ģ'6V_اw@hw~:ys:[OWlCX}YQendstream endobj 709 0 obj << /Filter /FlateDecode /Length 9257 >> stream x][\q~'aƞI/I l"? K:\IO}U}Ϝ.%: @0|XS]}Ksv_py /b=ߍl9T/_޴T/_\zGBM؞H,ӛ/wW{spvlj yu> }->Õ=S۽4֚=Mpb!C3^\vi`ɻe+LK=S3"S_ѣƧhn޻C-q[ քqsGdSv{_ ~XK0%KӿVD&M;@[Ov|L _>E@/DAɏ76ɞ63E 9-OBk' y?Ԙ!T_~+Z{W ޽+Y2cXH _m(o.vߋ51?DB;bLaWP=!{`h̟/*xx+>bzl:!;ٱRXUIfsX`9ᚮQX+Z5 #LqUa2Ydr>"l2A;K`Oj )윹fx 1*&"O6@6d[^$Oh=i 9?` DE@`y%66W670--S [D (b]߃vF %cЊXf@B ,$>uJi-$cZ .x4(6 10,P (7 ײ r03i*K, D @c@ :BxX.LoY>!M|aUq4މBya#a !b=쌁q SR p/ǀ d-ӄd4y}" e, L4^9+w? Hl!i#%x!h:p~fpkgVm$) qdKD>BxdRds#eOCI*$_Bh[,R ٰH{G?C E$gyʿ.c'+$6PdCTc5 .Ph'WD?NP;Oda$IygH-ҥbp [*V2*TSf$Tf=Qmv6>Lk|lym.1s %`)Je)1Kh-,)>)O!80/  mK9XΐG>JK$n)/w?[ ]ٽS{&(8c\+R*"Д! \LY@ NNvf֧[tD5 cB-'-Z>y=i*!QM{=%O'PyceYh%4Gjg8F5Mx >w[I4CdL\ HEU!rcBl4 \fBvn(d#HBٛ I)!BrIiŁ4F s,Q&H)lHɟk$\(*(sN eԪv$[y"yBFFdHw(j3)f k#'ްm cqgTڹ EpP{8C LY~̞e/R"H`+qѓfSZ,b-U5)\8`l/R!rD_+؇i&!LO"]CHfpEs$N2Fv!$[!(^'HQi8Y(L Ov IR 8K̀0=8h8ǡ\R|`G3RgHaH%;P u' i^'.d0jA?hÀALU*툃!LB-+|vt: P8Ȑ+HUoAZ\H}Ø-NLsBO-Rt r`0$qSN+ x(n Nf~@-*kX'm =Y>{z*aޚrEX􇥗![]̣xlAu_,SjŒ=[[{q]Uhp惾{{؁u`9{Od=︑dg-5p %iU)l ( RkKgoQCVx 0:Uxtbd~ s4]1_U6&.] @ p]1S\^n j5;E3K}] 8Y =štwb ຜ2?#D1K\y8E:;!7pL.Ն"6Waw2,4[rݙ!hӍvT Eb=1 +2= z"< ([g(o}z=Jd򞎨z=b.qdJ6zDJauk8F#1jI<\$^GdU)~!;AI+]KQ[<{Fv#aoh\Ĭ#60xMvǢM=׹`sxplz7Z)Yw߾WoJoH[yϽ]=ȽI;)(e#n'Z="j~`*2:͔W+sU{b2K+&bbћ%S.iclnlVbX`X/WZ-.Ƚ2)d2i8>4MK;O ٓL,q{rR=_Րߏ+.+(D_J79Xv[{.Vhadٮ>kaUܐzq>z X\Tp۫{#BQ\p"iUrj8,FG L[{23'/|dVϾ(9 $aвgWz5[Ø@b嘄"JrAѧ<"*Ha̾.݃:ْV2dG'qkLlƑq[kz\.*j3ܨXWwkOO7FuG C:Vn|CCG'9%W=i t@kjy2ڷCP6Bqxk?[pMܗx9_-Z<*vT>G&e56#T+lWߋG5³=|loOl`x_tMP;i\o_lnMYWLW{^֑<<⶧c^Mn0%of7asj8AyDlSGlq-QJ6EFFKQQyVz {^Sa0Ik :YZmt,$zʔy:AԔ"4 b6^H1_c i<lpB^mU<& D I~^{H[lYZ,>G'=%sU/.O ZDqzj) d1t ˡ|r@Lr㏼r{Tmui.݂(?*vzEo@`k c! \U}rp2g-5_G^dzjէ>n$9ʼ[~7UgJؘ$eoFewjj8A7U+:]W\W-eG,[73lPqExoWx&٘9'%<1݊JI(2UddٜkĄ(K^~m FѾĚn.E1h"cNe?^TGh>aK>}\%K5M?,z% m}'~v$#QKdl`a=65?=N1N-}[☊U_$3–ʨ+6'D[=j U"0E3_jkjȋIࣃ5'Ŭq߳I1KkOR}T7$|ŦS/Ft>umB!ugќ6K{2jm Gӝ"bl_ ygnu$02Jlv4 D,ͭ$gI4=m%LomxP4taG_SOлo5-!쬑6S{at֧s[+Zx{rUS CIՖT:`xhwݭj۾^zO5=@jt%^zJVGڴKĭ:~g"irغ?*L =gcqG_G){u|9qSe.l/%YcJ-0!pI^!Ԋ)Sˁ_\_y>UIOK\"ӧ?o+KeFKdnץkSM@>[Ѫmisа[E qe_9gW]fG (yǪ:@,'}u5SzU#‘ocYF@ۏ[M0 q, l9r$s& KR׌}"k3<݉juQ`/G>By<<{ |˯N\7:;?xvǦ=p虘>3a_suendstream endobj 710 0 obj << /Filter /FlateDecode /Length 5723 >> stream xt_~n p_.bEw52[{z쿾_-О8T X}#ftXt'$v孴L>M#"q@p&gz\CYk[Tm_b^uW|ߒa%RϚJ{Cje4cTEC%h )$\F.ZBCTD{n# j~.բL&|,G jZ.  !hFlSPIRyx_+{նoQ@4y\TJ*w /aTBd lIݦ(*[:xXy;xȤ4d0@|D&Q-;&>p $JOy^e3&ݰ庰%o7J\%aRf˯j%儾̗VYh%)&+tO!l}|Vy!@ź⤍"ys*wXt08:s\J2JbfRdAӀ{taCx(^\0Q*FxT3 "xoZF]qfn>TZن_OeL,;Į n]dOwJ(]VQA>k_ʯ(k>*i l ҋ!sz_biecO<CbK*Qn"]1T썎G2nP@Ф>S>ِcYsuR#Q pH3Pz'?5mZP%uh&Cđ,4Bݰ6QYOJ a~ 5r, rƼ ;_8 %vUra0M2)FFEC_[٥h1s$T[g0xʯšJ& qن6*0G}|))P-FIbB8ɱ8IĵD nۃ\LMU|;!ND0)'nz:2y,UWc–`*QWUUL䢮>q*v/%Y[<$$ҭPd]ܓK)9" 5 mcr=P:͋#4XD{h* C Z2#͸Ȍif@4}MPx1{9̪|d}]r%_Ge B[ki{!79]U;*NRDŽ2hQt x&\0,@ІE2u8Kⶖ])>MSN}(##cPG{. bXsb z3Բhf@Fuhp8Gh)Ud @Xz8Տ #~>L8tPW?@I(b`z_Xe'BW4If&oj$ResqU<(utDr ܜAJS\$C#_ERsoG<X!5 00_a*1OMTFtWgAqX9a zg#U#MX1$S uRWا]/+()*T8R&͒+F]( R4bݑ. OnK1ZVJ}>;YҘ(-ʃUUMz S YYX򵆯SX:bQ{ŀ"z*rTXGz;|& )xnaϐ\>PN= cȥ^8yO!JJ9Ж*l?xKLTtj׹g5K|U""1w]-U`8U)xENIAºlZ祽̱#§, rXe5bw ?E먟AmEٰ\V8pan&`Isi5Vm']l؋cj$w키A&x-{ ΉE d ۚ,x;x 'u}dJcHt_bLDFKR]JX E|bhKuӅ QDER" ؄ usLZ‹ksb^I J%3Ĥxe! nԎ~)|b9@~ϳar:;UŌMN e<^nw%ncUΥ.2(ewLLҘS iyyoR'Oi$"+x9}t{"\ `ݗwJ֚ojq x F1˂=fդ)Gr>8C?b!0N`96ݪ~qsRcS(<5liQ:$z(=ŽޞžV 太e#9JQ}iJ L¬[1뽑){㤒r]%LtRDm39zW@$ ^P$X%*.:]1Mڶiu |W@{vt}l_'NJ1k V(.EmIy*/5/dʅeL6QZE,9^m%n"GҢ.kw!p$# GqFvWZkVjUۧhԝO8]sF?yJS^d%<+ B8Ug *Xl4w T4qݲ+P'6|#5o1U ! R,ٛr'NׅM;#3R%xwv֐=uJǔ%}y?1uRl++DQ+=ʂIX ZYI g;PcU$ڝ7[JfA9U>v4Yuv7׺Z N7 U9Knj%L۶7" FS)aI_XZC%HOUo$+ ݫ ārDB?NtS)qKw k_oa%ZD6ԥ|#]-Wϝ%/нH>?sy򅽗eCE4 U4/O ڌ}>My_n'ɮɪ> stream x[[o$~Gcu~I xb!3FhHj[=w;$<֌,m0f~sXFr#_{qw7lg?ݙVa|[0FL6p=&L{nʟӾ3cQnߟ&ȍ~n?`t[Abgw۝U!"㇏[1u ںaQfmޫ9c1FF?lw1 d҇aSmqK v2Q$S8C%vaH;ANk5`s^6i)avoӁ_ -}8Ӷzmd)tƧQGqz|;M;{~Wף,DyF=:q;Ox j|)ѿ(VH J9jpɪ_o\No~*;\m%=+~Ʒr,2  &n L)A9x'ɨ'y:{ʲ/7ݨVL|WwvlvuVyG2W9NjV)TUe[Uhe5jCJXH6vZlwJi3*cG,V?maixz'8/zR3}Oo^ELy-ըH.)8Dhh~}3e[{,6Isa/GG{l4wGw{lzP2>Ϭj3VU!u{δUZAvN3lՂ NW[t8C~:;5,n)@6yvgG!+KU!N-m:X@ C #|\6g-ΗF ` ĐpzR]!>4A֫9"Et' 870]4 W>@qH.f , Zwn`AM8r fv vJg`N|}Gg\2 a sif `%Ku$}*I=$ycqXZ=6z|l2_lHsIaa)jHʈb@n J"+u#E(]=׏xj@xHH9T0MěJ#rAA9yRHJj-1D;?FuNҺ"0(5|k5W&QR%xūT@D~ݙxՊw@ˍ6t`e#NX^}hTR㲸;i4q_L)"A-Nm ȝ&X!!`f0hJ bz cC)ۈ)5\ !O9< p*Ԏ>^:"edZ}uXCgߴKOFK4}o4yȰt`S:u91e&,G1KMM}v vy!o_X3KR!>n}JΉ' h@SrKF#h7C->keY6A[l& S0KZͳ{/@`s}i.V?xܰYUu,w3)|2Ez҄ | Ϩzg9Gvĝ@[wuzŽ:҉c/?*oh }ViE;Cx{WmvqOz0*bO‚WkGW׳%DxwѥgS%E+P:b=GK 6WRWz[CXZ U&2Z!mEDKѺzΧ@˙/Yo{aݺ'` j;S"-4vEN!JU*U܌$]~\V%(c$pw{秊R݅!P @u:7{ةg\X mN)d=,wC{2QڠVK^S0R6޻WUQo]2*sKzץ$)k;T2]Ee9DuX_ԟxDFxL5s@;*EȬТ%wˇj}e"zl@ 5\~Mgx{_ZHMor~Ĕp"IaofY}y*Qs2~/AKBtpI}\å #oȋW B=ӻ.)No*-6OR4Jn\P`&Ly@Q%֮GwVX{Rq@KhU]As \[!KhtP|%HuQ,ܰ T|~yl) zxVhs/* *Ԏ؉=f[\)OmZT7Ի)Jc-ɵpU"(HuөT˂ }M * ]S5Y}jƁ]nRz(0LvAxRLM'`;['lC<[\ikg1cUnܾ ƨ'fyPM}K8X4=D1: D|A^3D'ab)u >mClC*+Qz{LR61nn?n ^S9u#5^Q-ZsOEw%Y# Hv3pH"8- l2\ A2wK0#/HsWm9oO6Tn &x0Þ cϾԗڨkvʒtې*d'./%iS챓Eiv!&wWX|&Os>zΫZ;ᆛυQO5tELuϯѴ9鯷)) >/&Ap|.q*BycQzu,Є]UXwf_Ь h,a{: $ī]~Y|>pȦ#%+uDP7][!Փ BU}m#$^dWjnz,1/IƖ䏯ulbB)],5NP Q(]p2S])$3Iξ \[,w:T쒡xG4&53"Ct),JxtUA&IQX8wpO0IE3Ҧ &uazhK%TX{S~n5/yϋ-"ī[iZHM9xTf:ү]0I\ԅ '^s,gDR vRt-95S¤z ז_>NX>80a:˒qlOb1D==g'`lVz~{mو9 7lf9Hv7̮n/(]7Hw5jo, H{wꝼ:.ީ5wWc 8, O1DB'NoVj2ҏ[YiMK1"W/TzV` endstream endobj 712 0 obj << /Filter /FlateDecode /Length 5420 >> stream xնR./^v?YϼRtG+&-cu0+u|! eo~w{r,8f Dwc4!lԦK`tYR6t#ܟυ]~ ;m"8/ݪ{}R^S /z`N/3szgZgefg,Ki׌q?xEd܋?]AL<уDxjf<-eFBO0arи uR&Cy ^D#HH/xy %p2 }0s\<2̜HMAʰq2L-8'<Ǎ)X.xq2`r mwHa|=Aɬ0g?E(tBENJu1|>ڃbh44d<Jt1l:@ByS@$->Zߟ=O?ϙE4nЙ󠀵uÌ7=U3-PZp׭V;Dy٭uAbL,d-_=϶jI!f| Fw#Eݯ+Ui}H:\\8y RH)YqN!+ V(v)zfOgX㚽>$qA/qXkX|X%*H*q/W X=`BÁj j|/^0 %_} _ݧ ?&рSE$xZ l2JX\0m|#p\, LtKEUb#6ǀ#][8-MtN6 ^;\$z L bp/a90p=SiUXZ HSh'WT[+duUV)C8w<8!B6-΀D9Z8!0Oo4ؒ0o`]A2%H8xbI:WJеeߟs02I%x]&u3pNd"R9 qyT#[,Rd$ Bh4TYx0ABXjH~xIC$igN3ZpR'x”\GY&a S;lCtZ F-[$$2aʊA~T10a"@7ge#x7&dlʭ0?hxQm2c@57{mg4:jƈWJZoBImhXpΏ).Nb&SY"Ŵ${Bh͂ZsSEUDOL]pbA#SS&%㸠U9 ؀ѐ)y˻_ۼUWH|F#>s:Fhh+ZCc=:YAqug!TXΣJuHehB-G;AzӜ0>D6-B|兯 F6->SBT2 {l?hOG d] *@ U! W)B lI!mD,WAi:[nSl1F "iYHc"$aueX]uQ"9Gbm,6Hy{$ȪC)c]:e,%H`}$<mnF;OllB6Au肦߇?\CKT*Ybnbfo!;Eͳ *>-DWE~2bǀ!y= BV JTkIgQ4_BXJlDx>&΂o=Cש 9^3G5d] 20)YŔ4SM1s]?MbAD7wU"P%^KħglSX#_,pQ46R& 6s6i$l6J1IyC|Bp& ^oz]YPkOgC]ս-$m٫H%DN5U'-@3zDpGk):9!(}]r9GH8 (kTɑ}<skjb{\Fc *׃Xy7NBIU]0_qNzg*K>d>+Gm+OwE/c )4%-Qum@)BDT'T3N~RՏP|[->3V^ DKR\󃸉jGbRA*2 yi++Y^PӁ [N7/D|`sP2(2\SޕĈO;7{:ֺ2cO]]7Ż]sU&{l>w[26]"e 0-7eU7B-uY*Ig%dvݖ{"m\79mJyRYY2<&${̫10{)>tJZd.1,e7c9 MSdVdE&FMSj>;.=0k$7)n͡4+þ ys!+PnRX{-oΊ671- L#|l24M 悯쳦Ɛonj>fhv%ZBNd,F&odo(>-PMo&Z(=Esg&wp(_;J<|[־,/ko) ዤ؍ZI um''uP!PXYtHUlcZeΈ:9}&-nh 5ºճq^]+ؐĚ4/!򻊱Ukn byZn8ޡ}64UZK|z71<^GFMKBcf_?tOA2U}]%݀N 9v{Oܒ]țIC(z{-G=7*\UeUDju"Z?}J SUēj%.+LN҅xxo_V.ON'Le@ή.{u1Hx qt+TS7g҅IKkMqlU'M-zg$zhXC ,Q-ӛHŃiXYs ­_Ub Q4[ yD?ExyiupUVQsK3<`}Xm\LjT |H+ Hc {ch'j Ѻ$S~cGM9u؊>S=&`q{r/A<pғ-k"-K?"Mf`м75vZc@`U<.o[j)Kwvܮ5[,JnnGwSl/ K.:e͆椰tmQsB3Ɋ& -붺_.mv$6tvu268,{x~S"TM4h!6C6`+Zm-v#\z CIxCi*Q4Fǁ4i{!G 4V]hXQZC7ͮsD:+y swP}I[la!1`b8&wsk PaXxb_v];dCGzߊUӞ{w-DџQ 10uo45f@ hɄP 2X58!kA ZBz\!SL x79?ZLxu@wIfĝ14ė T)2k(u"&(1v6Q4Tțj}sN ͐YJBwmX / e@Fil;2 T7D4έp㜐*'ğ`(?un7/yNDZǝxlSl׾WJ 3{ƨb(,vTV-y K>]`zF0 FF,otޡ @-^TQ}% Yl."F[6FԊN1[Fm?_s>endstream endobj 713 0 obj << /Filter /FlateDecode /Length 7310 >> stream x=ێ\}T;{>9:lHnl xfҴd.Wў]>#KNOCڎV)4}}v`9.^}r:29gxAuYpaJƝ|1v{-tV^W l1€GݼI< yJ6g٤hjd{R A2V.cd[vgSRR!n76e͛-.gb\qGs~MffSn'F840~'!JVoq荵 [\MW7W]`UY׷3M~g;3Ѡs:<6׸hR!Uj?pm0,:=A w+dzp!qgQ;׉p/ \WyVzCp)&kћ:6GS̰ ilr-elUHXVQfF`㶎b#oʦo$O aEgb;FAHw^Pؿ"e&m}%B< N c22\ &Eo>© O,]p֒vQw$'D?M #^m:YKZBL g@V< >On )6_`o'y%YEw =bqֲ p]ۭn+.yI:pE{ZLi45jkqs Ȗ{8yfArYsf&Z!VO{d\I:ia6?)Y$^l?.  #!4A$6x)y҂9JG A,eۃ\Vg %HMo@bu'!t@A"ᚊy}Yi3\) $9~j8߾̙? ͌y M|Lz~+A86e 'Uw\MU/!AXFN<5S:e:Jn;ʽv@ $GW$x~}‡Qt'\h/ }zv#{  >i.]DL*#L]U `UA[*&Z9?6_oA.M_ϐx}%Wp|`{Q!Ԛ.h'5N }O:<&(s /aĊMak6+`-&R/͏loOwy]mqi^OJyc$owxbk[q9mT:Լ.Ci^qZ6.]T_na%3g\ez@]:4(M!_*N<;/wE@ܢ4ʔuS0s:-lݮgȲ1?n,OKO XAҒkކ`R3 DJ,|}eӷ| f-v)4V/ |^Mi\T4Co2NFKwnWҜbi:9lMD'PN6Us4WSV6Z .tyYऻOw]h] @fW1w@e*ywܫh;CGyp u]6fD% Dӥh(B{W隲eO畾l( c}^,ld3l׮\d7b]xߦ59̲"ƪa ,'&o|ȧޛLܔ I]t9g zdyIUj5JhS:z[ SHgƬ0YnOrn]V%OPYvuw;@->dBLQQN>+k:B_ΝQ }y=_=; %QkL'Ra2Un 5hoe'MA->$Gry+ V?S4lm1[Cd=s;=a)`){{5+oW{at1ZWWOFH6 eIT,tUXΜ#`xk{b,ͭܫ|'Uwm8t.=-:j7z;H1GOidŽ3M O@¤P3{gYIeJis%}o0xtN(2u r8G]։h^ F x'ݾJUuD/xy,qj0vYmYWrN7z(a$*Ɏ@2ҩ%~\1Y9u[6Iѧ'p!'fFcƧ*?t'_@_c壘bYl ꬃ=bjrj_қ@ڗg2faUz4Or𢢯 yC+`#`JX+_hrZx~;Ô%ڮwW kgAgJ67$.,~UX48쩌3P vW*gJvO@lKcëi^(8pQ@=N}x~ǁ́?)' *B|naS\O '0XNC ` OAWUjGx`d&rMO|QAI#b0a (t @0*a}舨-c(MC9SoW+җqҟQKC84wVp9M3*ꆷ,G: Fŀ2ԚP=T)ilJ}tg߫jr V1A|Z7)2)½tץy]"e=, r}wm̠R}>V`~~ s=ב/DA־}$bJ}yl]QTE24kg!ѯ%q/?/hYiDg}R(d}'ucQjM%N|` u Z k%iBa)!Bvc"zjҋQ(X}!q/8Z/˽jg%~J9v1"9 PB*? -h/CĄtR4zz uIȑAa#BS*IrF ^fD j} ֗eWȲR1-| gf9*F`O*n&Wb8*z;sRl*x"dTJi/*'ȃl|F2_U *[qP(HHD@9dS3iaA/kvCAl3 lAalH׽*W}qY& s@Ys,fed_yɂO+? 0VsTTkTw ȴ)Q 2h^H Ъagt[jp_{OUp_iǕ9LKlX 3sZQz@E/{F0) WWP.yG1˘@CqaҼfT0.b&Xt0j ~0D;0FvIZI-R+xoVkX NEcMnrdft,u kUC'`WY;<Ȕr.'9=h٤8|Fe\1o4r:ow$LO;*GIikl\ *`2ƛ7%8:ʉ1r`4%,ҥCd}"dv P]kyT[o> stream x}]s\ɑ] G~CuW+{G FhrFdVݛ3&NʏYY-[_}7/yA%(o5zh ,Ƿ/iw<rЖv/z=\ޕT-/rXZ?s\|ˡ,K;PCePt٧nYʂٻ% jHyM"iulGAְC]\_} \? W>26cٽ~忾X]r3kبC:RnWJ 0Z c߇;bÂu%+ضvC4l Lnv_^^85uaqW=||B'v dRl;,y/o}~]rH^} }Vov:bE]޽ P.1\HWǕr=FYgsɱj\Vs .A%Q%S6s9^KK[NRE.eJ_z|N0 $cyz*ږ]:F(.C8Z#!xdP`tʳ+K(j~{<|Cn|C089yp΃s\28 ep.s\:8ups\:8 mpns68uШ~|CLJ2>apvg78 npvg?8~p089 aps089qps889qpNs48Cnθ!(msYV%C!J5 sNRBǂ$!dO7!}u3H((626<$!Nhn$iӇ{,! +x4 hpEwf`CBDUU"!ŌaD,a?b9i8}F|($$sČ)B/ )ah4,@#.XT$ />Eh4C6 s=xȲrc%`c,nEp`XSڂO$ \QV{A!)bT4, FX@:5GWhB-(KVJI>[ {-D΁YU\l"aS]aeqbM'>chW U;0f'r`maB]3'H&z[#fB̉6*Dlkf3CBwH`D;^#5``Mpl<+OdP)ƆN:A|sȝP-<# !G,!IHۢX ) /m\ZD# \+v9YoKyUՙ#i}P5+443 K9TXz)YMlPti{ꄬ9e1em޸F M6^E_a:Ss?,G.0+~50 |k[v+:%Kups 3GR,G{ 2`/AbuNP0e1:p+yy ͵HV;VS S}=%C1f"v'C-З~`SG0H- DBH}pʡ#WΣhnYxEba4\Q'&$E}<JFO!;P{)j ,pWiOl,mNy>5ToB*WƎ\# =y!}wBW괨o.>wK^u #tMb젞{(Ȣ^"rM pe(_ƃdzZ1ΧW kXxs"G"% eP‡R0OORiX4)OјȊ|J2cD*-J%ʢ|T!Rw>§h>1Z:O^Ý'ޗG1tlӖ@g/qg<')"0RJ%~u)U@%8?Rꈧ1RC5d|OU̔|=RI)qAkde[R.S fJ>QўPIVW,|"R~;wf*OfF|Kc]$HAkIMB̔|DZ}6tX2K⿤`aRIM7Є;%9cY# DгFS˝ROC+Ja|C p d)˓¤}4%6%Kl SJsȌbS0'՟J2L#kHI6O,L))U0ElC"F3 6zgM> )Os2K4 @%@)+*Yg6qfԆ]OY$K)6O#hC ׹xYpιiA(Uй3P{R j m!VT):3O`Spzs\)ROR>@j:)ΩuMC2{ʴb*J2F$ L`)6èP)sgzGȇQ|Z P*C!?+zVGjb3Z&R0SIUC4C(D>< =AObjE1I6բV4DHΌSlbX:)% >$zJQ+ uQ)>dn- eE &3itnx$Hf)!9#-#>E'x8,ro*|1hJ(^!WUL<@L0 1b44E =eZHG =)J4R%h)=%֩h vP2[J!&纀<$e=V$V5gȼWx vJՄx \!EcfC)؍$&l ,1"E`%E|C.= Hpu2a,+ UX%|imz y|V t`=G%doz: Y'#%t?NHJF:*_&}_`)[0=$cjASPG9,=) x&z^ ړʭHLy`j٤'L[ y(Kf ҖalDĉGj[sVFZGN N$K@/GPB]yȊ2>x",=Mίɭ 즄9䞓&+ J-+07Ir5X$8PkwM1ɁTk.&I>ε5A< rAgђxp3lmi!'\(B(8aXd| xy >ްB SxN3 a\"F"̃p$Ұ߭G53jʢUˠ^}fAb6(G;c^>}>bl7ްw7^Lu틗U7 > r'nw4R#pڿ{uD"Zže=֮_㛋sNwjHT픕[p/::'sqJuXPY͊t8Ž[e!oy%(oSAJ׋^bQlzy*̕niN@<\ ^#>\ ;@^ ,~dd *Ȯg ݤ'm̲f%ӲԲ$?$$ ɔ&*S8nZ8?>oO9>uOH]:=}U&}hn*{\?~>y(.}|8ynTÑI 5E,n7t5+ZѸ=9::16ղZ i>eЯo03"%EKmݷ:@0 g]p#-v)LmZbY ;/DZ?~iy 2$ ϲL0}gۧy BKs̳:Ok~lby$ȩ㇈ !,{AEGlyoȘ @<(Z(HZW"<|Q>OVf5ƁMuYl[ۼ _NZ2Aߑԫh7q0է2>yZo7uѿMlw)ly9zӜًlQ |Pq+2:ӣ{fA3/faɮZyiO-Ze6? <'ħBB+g;Zَ9B9Wi€Rb݅4+V0e~)}a\^{XW<յXBd\țg,n)S)gO'ז|j3DERIR=FA]3)gg.n7KY=㥜C|E*%z;n3\UB$ި00%CǪ8^Bv0.8oVY,LƤHس[j#:oJ>N8'(X$& %ݹќ  R 㣵e/^ ;Qb^`SN'.ćc4L0i?eFj,],(iNXc翍93e=5)>(lM 3͎/LdIar%*@fLf+bs<dIECӣ !MsLTmG2TT'N fJc} 6^iP,5f4'yC+ZWz6' Clo޺<~=Y;~*+y칟+zB{m߲!9MϜ+O*s(,^hbv_cW)J^홨 Be3w !a 0Vbuܣ2o$-#=72ÛJwU2@GvF5<3 W}9r&c'CVP:@{RX$̯X5M>oxNX #M O9+`G.z]Jų0_h5{.μOe4)犫t8ǬQ4Rd6L, vԥ=eyC"ե k`p9-n(n-|5g|ѷy&n~oXYFi$xVbyMzf݁.\urjyHm~?GJH)Y9 $sma)jIkN顮e$0cՐoD95xMgVsg pm=(O]QܠW!B#IiR{i/<Ӟ CD;usQӟzH6:s5UB72gI[tu{d/{q0))9*eǿm" KٽIa5V^c>N+{6}hKr|\"ՔTVZohVPcv^}F'в{ y$(Ô ]\ d6˶=}ad5&&wYbô']\I Ye=Ԁ*_1nWr?}C=[maIϐ,#hn\q& 53XfN`Y zLvir=I\ӽ~t$B||~?q=7D2sݙƧ^shڟ?o1.y-Ir\<;+?)81H5bd%خ* *'nI?MRfbF7Pǽ]ctΓ!5 !:k&rqZxӭ%Iī1-~zV;rؔd6q~Ƿ$@xK_%ib^# rkۅ])| //z_㛩|2ҳvLZGےKK;g Cd6[Bd# +G}xmd:1;BkNFSM-!p7jΝM F|eMNwgή&)1 NAܙcQhMt_4=B!d<Xs8)й?3ˋ3YГm]?AsP@'IqKnifo'=$;V]2h՗36}{#>!û@LժMMۛi|ky9uT#BvJ`iQMՔ/36UF")foٝ$dSO"]krulx|yytpf$vט*߱XEɽY,DI՚k^O ޓ꣸h_u-2OBG yy\aMǜ$hR&lXGrȯ}J9kj:tvaT(3mUlsX$}Ʈ&dX1:.Oܔx#9M\cW{CuN !j~7E?\qk°Uoñ.hԞnhKIe 2?ʠ@znr?6W?UKpn*QPW.dGͳJe Rt\Rg6mJX3vh'9iP58dQacV-፞HNd{1QKb*JozNuy%Ǥ:IשmάX@ OdXz;x۵2x9 [e51|2v&)"&Ta/8჌%ڇ(~:xz*I)"c c"<; @VMwOhxX䓜lk{f?z'b8Z!t4G]O~C^ ULǝel5k/M^]3Z[KVav4ȳWg _JU3>%"xUtw=dk i61pQ#c[5,52ΟyZajs,d{ NXu6%۠9=zT.7F} !/ R>ߌZ`ij9cpewЮbx &&97,fi`o j;` ㅘ=vj}z"&*vPil aNKvuh8G[#R"9bY7(O/ nNK7(2W^^܅,[>oX2=yzFHD- JCY`~mo`\mAn%/&0?R5qOL/ Vc+C[>~))L@9bZV;o&&;z /'fjs9cwLƌfbM3 /M_> dqeZvxpGφ_'V,*iNidߛ> Z/tjiWCendstream endobj 715 0 obj << /Filter /FlateDecode /Length 5769 >> stream x\Ks$7rq?bBn[]w#ڵc+zHN="gFzUHPݤ+s G"_&ۗj//߼ѽ<<>x'o1@nnG*͛yy1_?o^ɐT//^ ˨_~//޼X:`ᅋݼ`R1lRtV Q;k|lS>l[=(&Lc|K-6=|42@},o7{"&*֌Q b]$adS'`0h聡6}K`=lf Ô"yÝ6G'3iZ5 r|mɣ@ Sp*p'3T9l*d0s ]< mal:By"vE& ޻$xybW<JĶ)?n>AhF &s *GZ/i{bPNnNdx(xYUI)8q}UIH{48q;PǁxO#sLRh4'횇I{,TAUU,[#__9݇6`(Gac! |\IicveL&>uڨP =);Y\8YWUrtb.Tpjfҟ^9c[#twDF q6[5f2#\{i&^+R"4(oZUQ/@ 4}[L8X|h('ؐS OՁ.$4$WALNPp.F]J6DgLӷs՟9 1IV< >;__u"-_D+!!5?uv#U$fc̽ iG! Nk~Ǐ]U y/ox`EA%Lҷ.UxFU z+d`xD 즁H#jk ;[I}ԑKsV6wZt¼ԣ+/xN>2& ^bb\q 'ԳDC kt1 v}64kN851XKcmAWAS9 y4rWi#OP++M=SC-C\Yq7 x\$s4r4᜼ҼqW^4?)S4?)y O4oKkryJS;˾)\:wi~mqQQ6WO܍κYZlj5&d^=B@A YB~"HV6+'M>贆M@0z][Gn)fó! (r-'!0S3O31}ET0IA,~9+.,HA3۸YOjY`_JMci^uiumF&M0&C$26UY~4pR"gqsOWJS(O i3K-cbZ؁1$]mݡ]DZ$S HPr֔HQ*vF}IDH9`D~oQE._Ѹs8N#YI%f3-r iJr=yU{MA'>>k783|N/д ˧AO6,@'!˯;q\ W<r߲H 3=*rՇ&YN{ƢB [V`G s|^:ڟiE2᳗šc}H4:~wsħ5 @*Yl#GX{yOU Jko:fIanVVOmcE[o zw.8M7"V[>]vNj@igM}?h-\ZtSNSN~,3&yJS6~yxZٔb.s$݇njhT>pj.~S+[Z7'bGYd;c14^Lr P#bs x'CQ642Vr_FjdD, Q] /uB1hlVq\24ݺS%ȱP^CJFTrծWN8w|I1bz >D׎+*5J*d ~ћLx* "Ķ r&rJL : T+wSCTXBY69bz?ˌI PU@"W7Ya9`%v9v_4aDnnSL!opjՌl=2x͞24O"<3 e/R^03俒,bpLx>҇)uP{|zmٛUfX8; K[Y'TqrYHU_Vty^1 G]G XBzyT.^.E, 19}88P\r!!Wa%S<}{'H0Li ~*p`\7㝵+ HP#Oh<,~f jU:OyhVEd3+wMEik"6F?xΎr3~ DUc|p`s}W{Zu.r֫"l˫GGp3ǯ.QqS!rk9P a-ۄuJΦ㪫s鉬2k:><~ʼ|y5ڙY1&y1]˄=jH?Q ?xզK{.Y5$wFO >2|ρ^K ]Y$^V!g.wא*06-"D@b/}L͊bo}Ku9lFyQ>r-6>ՃF8a\\9^r?H?.+KG\h ZR+7yi̾bicI,5C^{3ȧgэPz;5Zgp0[7fT|_ ]V"E ׌QǴn- 4Q,CLMβw Dfb̚7ta2B!q*yo)[Ro70"zTr⎳~H@bJ NqAо]䁔Fw ]>W,8(`xuu1fп |WD!ߝkк%&wJWB -ǬCCWEsLS`ϸq;[җQNUԖ15[gS~Iu9Vq!/dƕc)ZtW_ҽȇs,$;gϾ3{|J ŵ#[ǧ[;,:CѼ<羯=(OKxm^K.=6}=7+xAD /ꕩy՞Uf;t6h.\l)-H0ﵫ[;tֿ+]+yS v&{H 7Se]:(@kt@;KTb/t.{T2)9p┾_Svxq-ipY+fܕQuiJSu֏^,oQM4c9~&Y#5 #eU -?N xxxu!3blS }$'F?^`}>B'8ηӂsc@).Z”7- (,SUh0ݵ\xB2x~1[O}6RY4phλX%'NI~p׆ۮ$ɷCںK Oq/ύ+:Oz1s+ D|+Z{`d(ϤG!?q"~xߺ3u6E8̕<3GPW``;Uw{5QEeA=Dk؟hzh//M endstream endobj 716 0 obj << /Filter /FlateDecode /Length 6109 >> stream x\I\9r |n%(_s_c`31`wCۇEˌTVIjɿA>2$ZՀC^q ӹXg|3ZF%Q7%y~{/0t-QDy~,(ϕ[Dm/ߞ˅XDtJ#P/bB:fbZ l _NٸM)"xڼBDZu ںM=]Y:fW0 a֠7?_(!fZz_HE88 q~ysog[cV%8dC6g .b[ 6{l;N[цyE\uM\35Sw<# D(1\e!DU!oN \ (6oj+bA5%`{~oz7AU̝1Bԅ$NcaryqJ$F4^Iו=V;LK@Oُ.FX . {s 2pI/I<#`-a |3!>]v圄uAD" $a*['qcEgr yxlzE犻tmI( ,;Dԏ2$[0 =j !?Lw몣v$AMryrDN5"H3)*U I{QU64j=]>,{ِ*2B^ޱcͲKb>&^sarX;yՌxt#$S>!c{sdkX5T82x"`#i|: >/f$*;¾j0 c- Dtyz0V:a O&q崄d dqF3~^W5XqZ&ͽ?]X lGWh\̃f M! # I4 g3K4|skGqr̘\6rXѩrN/]>ySOPsȥp%9 ztӰ:E+crp`n'iT2 aF#NC#?bsq-OI"~ՆDx|'vPY\E56R(h uK ]#@n^td\衣 5c&љIc-S;6H1ޤ܃f7@͜W.I)oVUSG׎'W{ ,D` k=`0T,98vj FF!Ch«a #1 #dZzO;)zܿ 0%P<]4~{W Y5)@d aҙ7:"#ɞ6)o{4g_"{qK_"i<oK8muQ6 N#7xAeg' Fs"|*rsdqSؠ?{X#ů] >햳tO""|g,ȘLuĄTs% S[F`v9S\Fb$  clj y@W`^SzdOQ,$iV>D[ƞ )B l ^/Zx޸e.TKGdZ;Qd(q] Ԑ׺\'LmY\g=eī<)Kĵ_{0ɑEpm]%l0kHty8⨃bRyap攋eԶBH!Jdd&/S5 + *(2* #N l!Eo;Z}.rL.R2_<j2\sĨwm\TKp+`{sTA\7.%x_)Td!;r'\Q0Ћf̷%M$ҠB6u)CQpOaCJAk(G"qNҽMrbS/i E"dөܓ۾۴TJU3e',lؼw%濫@̀ ̿9{@mJ)VK>cbRZec Sc^`kQ-eY&{x&]zVα~"^0ӻT=]82e ♌rV]u9IvM0KUPZͲ zFAZHݑʵz2VenkAtT e[)~M@Wkx̝\ p(g*01:+.l7PF.bT7VbW[mf̜*֑G (K;#.BEC6. "8 ^C!>*p_|;5vB9ܿ"ۛ4ng-^^469]OG $ -bKV'r?.@ĽEt<_s*k#BCWF5A;(zu?< (]$1IP6߯fU6:xd0B1WE, u8вD:S{#jKqbίhsnaMc-9rJcEn4?[>ERU:<6GwiIjuJxqc|9 9a`Ǐ M>R[w&IR#*/"k̆5Aײ T`8 d)|i|˶*^WaG}" cVW%6ԑAe*BDQ|^=߁<*/%MLa/|jDŬYk(zaƃ<'Nhd+c7t\.+n#,c:io:4~Ï}V9K5kSQ`YqZ TzG7ad4<|< 0K@5YcP4h@Ws]+Yڼm}^{[%RkyCnptx\|[ )5&0!'`Fa>O).+M3~Xw}?ueaO"jF+e݅r/=/}}_{W'~L!{yS$S!~_-pF GAH!;nkj%->iOh姟G$%[` -ɜ^vukXW)#TPW2f.&|Y(OԦSKI'#ȺW_6Ca[!ep0Ûrİiˑ 8l2ݏ@x >}v摶f؊^>d+^.q7oQ%go=W0F.N\]gugAAzh2(VY]qpVmO:I=_yN&@P/L}a,q'X'Z2]uW<#,p3%$U4'0Q#zZx>J k U9OewGW[d^q9nV+8<@7Kbz )c5 KeWv? (\Z+^nBaF ;vd 6HD McK іA֯FݥI//+n}|ݩ)߬(ͯI͉2T(ھi&OA-͛;VxW1<}1Kf,4[4+TK}Eg{ FY\>>v !%55U]Q_FcrW!+ %uS}CE3U).W'\=-ap,LXORN/^._cmxHN:WP>WmS%h*ZN J8Βd,B,w?m1G6{7_ x3#"ԧXdҚC> stream x]O0 Rb CAp}:t8K结ϲ.>)-n H0dY h> stream x\s7r#r]*sW.U]~Kt$EJw$%ٺ KJri4ПnO NG/~p=1adiOc_ \ӗ" MVV胳˓f Gz'wn-qJ^~Ѹf^Fk5g{1Vѣezs X)+u6hzQJޙw4ўd8N[m7‚D\monΤ>0gQ5D8@gCY2Nf<(38BjW/>}y_; sfdvetSFYXA} g7(g'*P<>;h#>W* ؓ{<|؃7b#-V} fdg :d} {cK.xajYt8!ZֹhE xSJl;CwpB= r=t={z.qD6U,?gt]r! |ьYfu&(?`xn{u?kt ؚ Tѩś¶ V?n}Eq98 CUL@(=u9U]`|}V?[@JnR)y)QFQ6W"]U8v2 Q$;ƝGd|Y"9iИa:JV|",,I5Y|!4d!! ߐ#kNY?p^Q:kXi9KV?$8J?L4GL+mĤ&BkNH5Z;BLs+1"Ւ吂Hxj6X:[g{aj{|i}!cޑ\kvj&$[)n8DU~7.!Ԅ'x)B rΘRstc p.ω*MFsD(h+$/Ӂ "21s)"3fRї1cH`s> LNJpIxK%`Mj}oP/R:tWDxǑ@x;+ k yrS(A'jM@"I hXPÂL,҉vZJF1vABL,m 3Oef(QJ@_c$Rs3U1e; XRvS[hJ[oLMt68O #j&'"Zx0Zڑn +5i{(7CYG*r3edeGnmL'.e>Jဳ{jj&߂HȊ"3A :UF86%ڂȁ!RnM G2K ҒӉ`T2 }'C(e1ضtR.z/kk ǤI/J-ID܍v"x=V6& $pu$csDGWs?&;} "=KjjE)0ZwlH+zE7p2)GPW{CB)՜=H{Qf6R$D mFyҽ_:bmz(<!)Ue.?zݏ!y<':>'Lʤ׿saHI`Y6) W.RwUh&ُY=hM!0^b+w{ ̸]h 02 FJ5|/ )Y;쳘KMq3nUdSl1$^ci;h1UބD\{c:dQ"Om~7+n] m }D#T9QT>EB[L}Y~W\/@gTG2䨌oz$J߬YȷՁ볓Ob9}FyITYA2C|L.&OFHq$""|QZT]%CD GNςNjRĸFa V2Qb0IG1 AxT4raU™=\C$鲘s㞩.[JC,ͯa;--(,JV鼵nҖjќ$c]%eY'-l!kΉ/B '}5Q"J9Ǝs&4 Z&gȃaziAfWt^|#[qRel6H1Z9 sd6s9J!!wAf(,&k։:3QR+aP7ѧΈJ?U[^\y (]WY,!s7I4Z"OQab3}|CϸWM$C{+pX0<*W3Wf01o.aϊ$b}˕v,p"{곲5j+WWg3,ؚ IY0pD[`(k\J2NP/6epʗTMKf!ԋ= I|`\.Ѥfz Iill>HgrmG^a \R:W(Bu)po[P_h\N-M=9_i[ ;/v.z"BW L8OءXFUV^7$[R j_~ɏrS+Xcn`[rX#JU̞#nƗu,dM}}O)rKj Iɏ)ks(cGE½julY("#??Jx Hpms:څk@LLOvs^0C6R(|z_*ClPlz^W<}mx(i;Q$weW1eP0eYl92>1}^\-e,~MrfG3ZrV7d-ñB ?uL~ ǚlƗME)$|d'~Q4HH[nkyt+-ՋRx]ah .SLS>w?1>zkȺO$_q4Q?F7$FKߦځв(n;˜P7bЍnq2x%8.W3BSU' H\h5u,圮fhf/DMc]`߾fjk[.~ Τ\O4QuRbuk(|kT7*@n;bqeDGʿ }p|𨮶yMl,*a jM9?_z> stream x]O10 BBVUAp$6|/`,@L4[ub,,O\r)x@w|^6#NRP<說J`2ٴ}A*)J7cnw>`FStendstream endobj 720 0 obj << /BBox [ 1366.23 5611.96 1399.33 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 721 0 obj << /BBox [ 1391.83 5611.96 1424.93 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 722 0 obj << /BBox [ 1417.33 5611.96 1450.43 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 723 0 obj << /BBox [ 1442.93 5611.96 1476.04 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 724 0 obj << /BBox [ 1468.44 5611.96 1501.54 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 725 0 obj << /BBox [ 1494.04 5611.96 1527.14 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 726 0 obj << /BBox [ 1519.54 5611.96 1552.64 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 727 0 obj << /BBox [ 1545.14 5611.96 1578.25 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 728 0 obj << /BBox [ 1570.75 5611.96 1603.85 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 729 0 obj << /BBox [ 1596.25 5611.96 1629.35 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 730 0 obj << /BBox [ 1621.85 5611.96 1654.95 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 731 0 obj << /BBox [ 1647.35 5611.96 1680.45 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 732 0 obj << /BBox [ 1672.95 5611.96 1706.05 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 733 0 obj << /BBox [ 1698.45 5611.96 1731.56 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 734 0 obj << /BBox [ 1724.06 5611.96 1757.16 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 735 0 obj << /BBox [ 1749.56 5611.96 1782.66 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 157 >> stream xeA@E &ZhiMZ^_ډ5Q ?:Մ&: 8BXHF J\|T_r>Ɋh̽4R[`PjXd/pLʜ @u%=L(aLtc V} m`X=b -5endstream endobj 736 0 obj << /BBox [ 1775.16 5611.96 1808.26 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 155 >> stream xeK@ Lsw `ƴ&m^_iЪFfAR2ʂꄂQLR(qvU|e2uUI0(,52N7C"e xxFbfak5A#ߎ365+endstream endobj 737 0 obj << /BBox [ 1800.76 5611.96 1833.86 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 158 >> stream xeM@ &ZhiM /jMt L 9AuBApĕ +A8l}x&+qAj|+ A ΐIS'> stream xeQ 0 ?ALfmO D' o6q&ڏ$dә%Hs.\A1RXY**9TW.tK-)?=i`OeE0nD':SfIOR켮$e Wb8P{ f0c[aݣ^5endstream endobj 739 0 obj << /BBox [ 1851.87 5611.96 1884.97 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 156 >> stream xeA @ E9?ALNօH+.+ɮ54V@ѪZmH j,Kntƒ={ԑVWRLRb/RJI0WR ܖ<\ \Y7^PCo5endstream endobj 740 0 obj << /BBox [ 1877.37 5611.96 1910.47 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 158 >> stream xeA@E L8օ0ZcZKk~B/|N5c V# G8Ia%5{K.p0[n {b&$ &ªx?*)sv|. st[PCiL2]W,{-^Be|[h5endstream endobj 741 0 obj << /BBox [ 1902.97 5611.96 1936.07 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 158 >> stream xeM @ 9;ALf&s]V ﴥ@Ȯ5 ^4(v7o$95R \ró#-99XpQ4BЕH(xPN=P*7Rm^Ò˒1د{{0}{DC }4endstream endobj 742 0 obj << /BBox [ 1928.47 5611.96 1961.57 5627.16 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeA @ }N$3LN k]xi;mmL<טEl0тMM{5Nx9G AI*$FKst| m0Lsv=V7@ k9endstream endobj 743 0 obj << /BBox [ 1954.07 5611.96 1987.18 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeA @ E9ENLr]H+ ﴥU@#ZՄ  VBApIF J앬/w`2 gZj"2KH\PHuo8M qUW<.ŗv X;op{9Xendstream endobj 744 0 obj << /BBox [ 1979.58 5611.96 2012.68 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 158 >> stream xeA@E &ZhiM.ښ(?kT 9AuBAčtA8YU_r>Ɋhߌ-HHѝ+WeTT8^ 9|s6{P=2׭so=b 5Yq4qendstream endobj 745 0 obj << /BBox [ 2005.18 5611.96 2038.28 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeM @ F9EN@p׺- Յ7*a ۵'L )e*jA}!6(q1ށ)|сc5GK%AY"zN.pD @`k0۸*M Ӿgҕ.+ px 8endstream endobj 746 0 obj << /BBox [ 2030.78 5611.96 2063.88 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 156 >> stream xeM @ 9;ALf&s]V {dӚtIS\V ;@|a+WKdq}Ʌp.9ޑjrpQ4Bp?љH(xbWJ-PZn¥VYO;6cQXܧ_hOg4Iendstream endobj 747 0 obj << /BBox [ 2056.29 5611.96 2089.39 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 156 >> stream xeM @ 9;AL9uD+ L@Ȯ34Ƙ8+v5ȰXr!KntƒSNI果aP\Qጃh`Е"xPNZݞ&(K[p.J,q&-\]sRKo3endstream endobj 748 0 obj << /BBox [ 2081.89 5611.96 2114.99 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe] 0 "'I״ 6S ͏5<@r6*BApU!VWA|Stp;0YX?-6Q`%aEƜIc7ހq<(^ae:5a.tE ,B4#P9oendstream endobj 749 0 obj << /BBox [ 2107.39 5611.96 2140.49 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xeQ 0 "'I۴ 6> stream xeQ0 ' L|> stream xeA @ }N$3ifN k]xi;mL<טEl0'ʂVTph!MM\ňs5NxS T*%hUIU,:99f| m0rd<,iO8ArJ?l5@G709lendstream endobj 752 0 obj << /BBox [ 2184.09 5611.96 2217.2 5627.16 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I, 6> stream xeQ 0 "'Iڬ 69Wendstream endobj 754 0 obj << /BBox [ 2235.2 5611.96 2268.3 5638.66 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeM @ }N$3Z@EZu feךy0Ę( ZRL66*Fp;0K2PZQPK@ɤ \!1O<{Ph\&Y s].<8q-h ?8endstream endobj 755 0 obj << /BBox [ 2260.8 5611.96 2293.9 5638.66 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeM @ }N$3Z@EZu feךy0Ę( ZRL66*Fp;0KOfhI B$c x(k0 LL纾'\ 38q-h8endstream endobj 756 0 obj << /BBox [ 2286.3 5611.96 2319.4 5627.16 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'Iڬ 6> stream xeQ0N hYn16M4B]c}R"h.D $loTS_w`%ǐx_WV; Cq+$2X?7`ÈB30lssٞq(?0ludXC o9bendstream endobj 758 0 obj << /BBox [ 2337.41 5611.96 2370.51 5638.66 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeM @ }N$3Z@EZu feךy0Ę( ZRL66*Fp;0KBBiEA5 \dRvɘ}'ʿ=L(eS. 8q-h8endstream endobj 759 0 obj << /BBox [ 2363.01 5611.96 2396.11 5665.57 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeA@=33ĝu1Iuk[MK>F5a$B.U(4:I\_%QboTXeCʂAX *pLʜ sw7[Qk V8 ^ ( Kd=9endstream endobj 760 0 obj << /BBox [ 2388.51 5611.96 2421.61 5673.27 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeK @ }N$3 w  "кNk3&Z3&eA+B*87I\JT1\J'SɻuYIZ((F̎ɘ}'J߀qy&RS.lݽrz* ?2jd@/9endstream endobj 761 0 obj << /BBox [ 2414.11 5611.96 2447.21 5700.17 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeA@=3ĝu1Iuk[MK>F5a̩j $$үbATYR_7`):/B#ϨL0(edlx?9|laD!&`*\4RB%NDf{ 9dendstream endobj 762 0 obj << /BBox [ 2439.61 5611.96 2472.71 5700.17 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xeM @ }NN@pg]xi;ֶ & $0,}HR械*կүK7%?[r!xk*)qIgPb OR껒͖F(K-7pmZN|g!?@l,::`O{z#9jendstream endobj 763 0 obj << /BBox [ 2465.21 5611.96 2498.32 5692.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeQ0wN  L|s>x[f2}ֹM!$~6j#H)F.U(4:I\_Qbodn)?-k**A)d, ( :W`|w.2 9 nO+?`6a/s9endstream endobj 764 0 obj << /BBox [ 2490.82 5611.96 2523.92 5727.07 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xeA@ }OԶ33'0q.)CՂ*)TPا_71nj<5GQK LTyaٞ!j! ;:8c 5|9yendstream endobj 765 0 obj << /BBox [ 2516.32 5611.96 2549.42 5742.47 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xe;0 ݧ ur$ hZqK,%;F5a,ST $$nobA]:r)fZP+O`PX*Gnp#eN? x~TFc30?YuX$`~y%Bg/9pendstream endobj 766 0 obj << /BBox [ 2541.92 5611.96 2575.02 5742.47 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeK@=3C9օ0ƴ&Յח>2!shToA2Au 8@XH&&D˚ڥ >)bϫI2S&|bPB$f_9k0sxe\'0AżY3P:9endstream endobj 767 0 obj << /BBox [ 2567.43 5611.96 2600.53 5834.77 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe=0 ݧ 9[B-RqKIEr>C{R"&G 2TINRp'0咃8MBQS,o HC7>q9&bSɺ>(Raepb9endstream endobj 768 0 obj << /BBox [ 2593.03 5611.96 2626.13 5857.88 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe 0 {"Oi'@tC6azu Y HNEЪ -dhUTYRp;0yJh%D|\ 9| ?}„B\Kg`*ym^ѤJ/L=o9endstream endobj 769 0 obj << /BBox [ 2618.53 5611.96 2651.63 5892.48 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xe10 ݧ Lĉs$ hZ듖8?Ъ^0d A}&U*IX".=\ ,["$K*1RtQe7C"u.d|.=„L(2]] DVad-S36e:endstream endobj 770 0 obj << /BBox [ 2644.13 5611.96 2677.23 5957.78 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe 0 {"Oi'm| ل̓o7gw0|zhUv/@R2ʂZA}&U*ɉ".=\ L-xaŐ 5TtJ%F|;$R t{ [)2] $O°C.ʖh6h9endstream endobj 771 0 obj << /BBox [ 2669.63 5611.96 2702.73 5996.28 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe 0 {"Oi'm| ل̓o7gw0!~ZՀ &2!'8BPo$P%9Qb+K-;p1y*ƒJN)D)Rv89ATKGPKTq{<VXaQtl*=>:9endstream endobj 772 0 obj << /BBox [ 2695.23 5611.96 2728.34 6046.18 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xeK @ }N$Mq]H+.cca@Cnؾ`1Q"Wڤ$8q.vO`J9YZАK*XZt;DrfSy`vsK%\T>QW7lJ_Y2tpjwK:endstream endobj 773 0 obj << /BBox [ 2720.84 5611.96 2753.94 6030.88 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe0;O Җ'0yn1MBtי0'iU{_jNx94G49Z,$F%b \!1ʻfRʶ.{>3.`LRaYƚd#b@9endstream endobj 774 0 obj << /BBox [ 2746.34 5611.96 2779.44 6173.09 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe;0 ݧ yeZe-"9ۇV5` $LEPM _e(q1vO`%;pIyRI)Eh> stream xeK @ }N$3 w  "кNk.|rhl]z8c |l/9endstream endobj 776 0 obj << /BBox [ 2797.44 5611.96 2830.54 6496.01 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe0;O O`mb6̓oiwB_shUv/@eA-BNp>*CsĹ/K|S)܁K,$5 :1Y$ `!Oet{ [)J] pP!U.=>l9endstream endobj 777 0 obj << /BBox [ 2823.04 5611.96 2856.15 6434.5 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xe0 {OPmݺ'0|&w.K/Tl&@R2ʂZ`A}&t$'JlCZ,[؀3)eXRI)EVEKOH!/往q>2n\ty_3#7vo"kXA_p:endstream endobj 778 0 obj << /BBox [ 2848.55 5611.96 2881.65 6469.11 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xe0 {OP۱nx>Q[ .K/ՈFP jUH]]r%..[n&++ej!,KBRTL ~Ai0kyеp)&%3tpj89endstream endobj 779 0 obj << /BBox [ 2874.15 5611.96 2907.25 6492.21 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xeA@ }OvN;'0q.> stream xe 0 {"Oi'm| ل7p;h>Cj H)LP](4IdؤQbsvO`VSyu#%"2KF3!Kܡ2aB! 0Weg~9sY3p9p9endstream endobj 781 0 obj << /BBox [ 2925.25 5611.96 2958.36 6565.21 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xe 0 {"Oi'm| ل̓o6;R|:Ո f P jc$MJK2TĦg!X0,KB*.7C&eo<}z#(^a&*5i+Za\w]8c -|9`endstream endobj 782 0 obj << /BBox [ 2950.86 5611.96 2983.96 6772.82 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xe 0 {"OvI'm| ل7۰;Rh?C`$LEP](Nh,$X%QK2KnbëP-T Jb(wHM|?qYfbSɺ^֮g\ 3I%U.k:endstream endobj 783 0 obj << /BBox [ 2976.36 5611.96 3009.46 6830.52 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xe 0 {"Ovi'm| ل7;Rh?'Щ6ؿa٨  $V J\\.\ LVW,KB " _7C&en ~@0? +0/r~ݽ#T+$ l-gl/^9endstream endobj 784 0 obj << /BBox [ 3001.96 5611.96 3035.06 6684.41 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeQ0N Bn1u/zՀeA-BNp>&SDs.#\ L)7> stream xe10 ݧ qr$ hZIZ8> stream xe10 ݧ Աs$ hZI[؊<;N 3S"'h4fXebA8vO`MtzJRbI(F9w0R&OӶl 9.c2[h l:endstream endobj 787 0 obj << /BBox [ 3078.57 5611.96 3111.67 6892.03 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xeA @ }N$4Z@EZ ?0֬ {P"#T2IZeXU86w`U $huI,59>.p'c2@P ?aB!.30Rar(IN°Bff6 4e:endstream endobj 788 0 obj << /BBox [ 3104.17 5611.96 3137.27 6953.53 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe0;O -O`mb6׷[cwB_sUoX@RdZ A&SD/|S|5Nd5TtJ%xnpD ? x*Ƶ#,(ĥLV*v\== Sh05re3vd:endstream endobj 789 0 obj << /BBox [ 3129.67 5611.96 3162.77 6938.13 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeK @ }Nd2 w  "P]x}.LH>z3eA+BNpoI$:1\e >)5@EC %Q` (*B ɘ}{Rn\*ٶ\z]cp+l05Y=*5g젃/f:endstream endobj 790 0 obj << /BBox [ 3155.27 5611.96 3188.38 7026.63 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xeA@ }OvfJgN`\x&[ 6/͜ՄFLEP](4KQⲥo`%Z8V2T*Jb#1#eN xƹ3(^auY|d3D7w0ËC:b 5i^: endstream endobj 791 0 obj << /BBox [ 3180.88 5611.96 3213.98 6845.92 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xe 0 {"Oi'9>l׷sL)?tטEl0T B! d#.U~M:8x Bjs,F%& + 1ǂ/<{4{QJ2,K׏&Y_!yT<7I'<d9endstream endobj 792 0 obj << /BBox [ 3206.38 5611.96 3239.48 7011.23 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe10 ݧ M|$ hZqS8<z7, )e2Au 8CHbJ K2-715@ ¥T J-KM(%|1aA! 'k\筝֞q *Lo"[fA_]Z9endstream endobj 793 0 obj << /BBox [ 3231.98 5611.96 3265.08 6826.72 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe=0 ݧ 9[B-RaHlYJ>Coqxc,hEHg2k25*FRp'0Kx J* Q` (ʑ4wd>Onn$>׹T $uUjfA_cR9endstream endobj 794 0 obj << /BBox [ 3257.48 5611.96 3290.58 6907.43 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe10 ݧ ԉ}$ hZI[8q9f2a&/c~..`rBrkfB _kH: endstream endobj 795 0 obj << /BBox [ 3283.08 5611.96 3316.18 6822.82 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 169 >> stream xeM @ }Nd2?Z@EZVۅf>xkS=a)Ũ & #ĤDWV)Aqu6w`jQ 4X&"eXI3>.pB9V| ?ÄBf`>y\ڋ-拨J ]aX!>|3pxi=:endstream endobj 796 0 obj << /BBox [ 3308.58 5611.96 3341.68 6807.42 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe10 ݧ 7s$ hZI[زɳ7x*42)$qu;E&OlwLX)~|kw\@X!9+ev-gb9endstream endobj 797 0 obj << /BBox [ 3334.19 5611.96 3367.29 6769.02 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xeA@=aN`u1IuqB/:ՈfL.E jpPeb(vO`ʞcz!yDM%eRbI(4w0RS0.aF!.լ䥲uھg߾g\  ɒRU.q:endstream endobj 798 0 obj << /BBox [ 3359.69 5611.96 3392.79 6715.22 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe 0 {"Oi'm| ل7۰;h>Cj 3H΅LP] j0PeQbsvO`*Vb(yB #jTlKBa9wȤ'0.;ŒBլd^E:mۃm .P cEH*[fB _o:endstream endobj 799 0 obj << /BBox [ 3385.29 5611.96 3418.39 6669.02 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 170 >> stream xe 0 {"Oi'm| ل7L)4!u' 92AuFpHBJnDdށXCň#jTlKBh \!0.ÌBլd^E<-}'L/%V+B^d#p:#endstream endobj 800 0 obj << /BBox [ 3410.89 5611.96 3443.99 6588.31 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe 0 {"O.i'm| ل׷۰;PH>Cb$%,hE(NdX%1\e >ɳk?zhTŒJ(D-wHd̚twTX)r[]]w\@X!U.n: endstream endobj 801 0 obj << /BBox [ 3436.39 5611.96 3469.5 6507.61 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xe 0 {"Oi'm| ل7p;Ph?:ՈfP#8APH&&%...\ LV,˫#D,(%f2)s,0.aF!j*0/NtKn@$@E]8c -|i9endstream endobj 802 0 obj << /BBox [ 3462 5611.96 3495.1 6380.7 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe10 ݧ q|$ hZI[ 8C{bV F' X%*['=\ LKbJL0(ew0RS`O{ NX]u\g_▇-hF p9endstream endobj 803 0 obj << /BBox [ 3487.5 5611.96 3520.6 6326.9 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xeM@ &ZhӚT^_B{_x$ՄF : 8@h$q#Fr%6'kj'\ LJūL((j,*V̙!2'|# Wyu׋?U*W_)X;5K9endstream endobj 804 0 obj << /BBox [ 3513.1 5611.96 3546.2 6199.99 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xeM @ 9;ALf&s- vaB Nf͋Ҕ2UNS0_Xj$RYRi' 璃ƆilXSIጣheaFwJl"MC =iq.uj-Ϙ88 w G-}DKgH9(endstream endobj 805 0 obj << /BBox [ 3538.6 5611.96 3571.7 6130.79 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe10 Ew'0v' ZjJOZH;`˒|hUv3L )e2A-A IN p'0eg_^x JN)DT9ATKGPK0YoxEDVPugjAgl}&9endstream endobj 806 0 obj << /BBox [ 3564.2 5611.96 3597.3 6034.68 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeM @ F9EN@p׺- o:vaB ZՀ &2A5CNp INYSO`%;EJN)D yopD Ade#L(6 bu3.\<ʑ*j`=>\9$endstream endobj 807 0 obj << /BBox [ 3589.71 5611.96 3622.81 5938.58 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeA@E0 3 Lܵ.<6FMZ^_ZۺB]{\J%cT'TX6rHN@j؁ URQx+>78uWA :cIWC2۪2Idqz8b |8xendstream endobj 808 0 obj << /BBox [ 3615.31 5611.96 3648.41 5938.58 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe10 ݧ I|$ hZ' j;(Ojn $L&E e%9Qb+o`ʖ(FQci%AY"" > stream xeA@ }Ovv:'0q.> stream xe0 {OP[nx>Q[`%j#H΅LP] j0IIDڥ >Xcj!$ 5y* VJK$>e2)s4|aD!`2Ks^p[֬PD{fXC h9endstream endobj 811 0 obj << /BBox [ 3692.02 5611.96 3725.12 5800.17 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xe 0 {"Oi'm| ل7L)4u' fP#8APHB5dށ)Cǣj 6J%dVq+)s,0.ÌBլTyN VYT-m:endstream endobj 812 0 obj << /BBox [ 3717.52 5611.96 3750.62 5746.37 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe;0 ݧ qr$ hZ'-} 8|S`1K9Lwv1'JlCZRN'|Y'gL)E"e8> F2~@R0r 0RIϽ_zBCwG٠+VPf9endstream endobj 813 0 obj << /BBox [ 3743.12 5611.96 3776.22 5707.87 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeK 1 }NĤm@tDatXPH>ݵ 7 fNEPP!i,$q*DK% N&/ Z䆚kRfNs`̩݁qv q0S:-~.#Xc 78b |9endstream endobj 814 0 obj << /BBox [ 3768.62 5611.96 3801.72 5704.07 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 165 >> stream xeK@=3CN`u1Iu>2! _shToA2 8@$q~ ^dM=j!Z œT ``^sTKe\ KdXC _O9endstream endobj 815 0 obj << /BBox [ 3794.22 5611.96 3827.32 5692.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe] 0 "'I, ߦ@@6a[7-Gw@̜BPPhH%QI}{ߌgFaMM`RjXL9p< >@pS ]ÀRLQ8i#S&].Zx9endstream endobj 816 0 obj << /BBox [ 3819.72 5611.96 3852.83 5704.07 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 162 >> stream xeK@=3aN`N]xmLkҺN /9\ )9eA+BAI&&)"kjn&Ϯ3=D҄VT F<> stream xeQ0wN  L|s>x[f2}ֹM!$~6j#H)F.U(4:I\_Qbodn)?-DKNl`P,9:pBʜ sw7[Q"skN0_o(  p{x 9endstream endobj 818 0 obj << /BBox [ 3870.93 5611.96 3904.03 5665.57 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 168 >> stream xeQ 0 "'I۴ 6MM%E]Z,[o)P4XRI)E!*7C"e_ x*sGPKr)q3G|f&AQh^z+ 9endstream endobj 819 0 obj << /BBox [ 3896.43 5611.96 3929.53 5650.27 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe]0 '&(Ā -hb%/wZ5`3R"&{ ߥ%9QbK>elf-RdmCwH̡ Ov8w}-T.knwgHq-B g7:9endstream endobj 820 0 obj << /BBox [ 3922.03 5611.96 3955.13 5638.66 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 164 >> stream xeQ 0 "'I۬ 6> stream xeQ0wN ڲn16M!)_5 ' 92Au 8@h$q~DeM}ށXIB4 Zy* B \!2'o8f# WɼyXDߜa,k 9endstream endobj 822 0 obj << /BBox [ 3973.14 5611.96 4006.24 5642.56 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 166 >> stream xe]0 '&(Ā -`be뗶jf$LEPM IIr۵K |S.9_R5ZWtJ%$99|Ua@!p 0Kr3=J t?{ - 9endstream endobj 823 0 obj << /BBox [ 3998.64 5611.96 4031.74 5642.56 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I۬ 6!m@ϰF5H9qendstream endobj 824 0 obj << /BBox [ 4024.24 5611.96 4057.34 5630.96 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I۬ 6.p3'܀qFb_a*<,^F q5@G79endstream endobj 825 0 obj << /BBox [ 4049.74 5611.96 4082.84 5642.56 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ0 '&(Ā -hbeۗͶkTOAr.dBApH&&9˚J'Sbͣ)j,* C$I >o8f# Wɼ·teg#ޠ诟a,k 9endstream endobj 826 0 obj << /BBox [ 4075.34 5611.96 4108.45 5627.16 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I, 6/2O<}Ʋ=(y L)Orvn# 8OZF6-9xendstream endobj 827 0 obj << /BBox [ 4100.95 5611.96 4134.05 5634.86 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeK @ }Nd& w  "v fL>9fH!'؃$~nĈs-.Vh3 ,ˤ<7C$c֌/<~# (ĥLTeiC<ۡ/"hT9endstream endobj 828 0 obj << /BBox [ 4126.45 5611.96 4159.55 5627.16 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I, 6> stream xeQ 0 "'I۬ 6> stream xeA @ E9ENd&s]H+.Z0!]}QT'Zj%Jr%.Nw`b2S QLȃ`PJ, EH*8] 5ǂO<܀Fb0e:/a qI 75*9Dendstream endobj 831 0 obj << /BBox [ 4203.16 5611.96 4236.26 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 157 >> stream xeK@ 00k]x55i]x}mBH ?]n=aI)SJH0$lHRqRG%78r2'S]:Eș 58{.T˾am4(ߎ#64endstream endobj 832 0 obj << /BBox [ 4228.66 5611.96 4261.76 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I۬ 6> stream xeQ 0 "'I۴ 6> stream xeK@ &ZhiMZ^_5Q |TO@r6*ꄂIc!6(qqQ} N&+2ERո* AWȤ̩ w`aB!v `*nKq׹I׹V_\70vsB o5endstream endobj 835 0 obj << /BBox [ 4305.36 5611.96 4338.46 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xe] 0 "'Io@tC6a:)G/', )e*jBNp&SDg+#L37g˱M%AY"zɮ\ 9| aA!rUz8[:떞׏x(6~PF6tc9endstream endobj 836 0 obj << /BBox [ 4330.96 5611.96 4364.07 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 157 >> stream xeK@@ 9օ0ZcZօחi(/|wj 3HJ!'8AP_H|#c#ɉ#/ K> lW.N)Dd\.pD @`vƪfbbuM6767|dr #vy4endstream endobj 837 0 obj << /BBox [ 4356.47 5611.96 4389.57 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I۴ 6> stream xeA@E 3 Lܵ.<ӚT^_ڦ5Q^vj#H)FUPP i$q#FJ%NV՗`j)<)Y@ͮ*A)dd|^ 9U|{Q ܜeoeM/$yth5endstream endobj 839 0 obj << /BBox [ 4407.57 5611.96 4440.67 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 159 >> stream xeA@E sw `ƴ&Յwڦ5Q `ך)v/Arv*V b!7qdU} N&/yv*B1ʂ($d|^ Y APS{Qk0np`O/hLith4endstream endobj 840 0 obj << /BBox [ 4433.17 5611.96 4466.27 5623.26 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 167 >> stream xeQ 0 "'I 6> stream xeM@ ]1IKX Z3 #H΅9 8Z$q#Fr#N>/ TjX:PMԝ`0J,  \!1k8g#,z[ƴ-ߍ#65endstream endobj 842 0 obj << /BBox [ 4484.28 5611.96 4517.38 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 159 >> stream xeA@E 3pw `ƴ&m^i5Q `ת&fAJ1rAIč)A+'|hLdF\sUY22NB!eNO<ԺFaL^t[$nX=b 5"endstream endobj 843 0 obj << /BBox [ 4509.78 5611.96 4542.88 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 156 >> stream xeM @ 9EN@p׺i o4!|=CaI)ST# $9QbdW}.\r3MRd89| KGQ- Euoo_ ߍ36f5endstream endobj 844 0 obj << /BBox [ 4535.38 5611.96 4568.48 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 155 >> stream xeM @ 9;AL9uD+R @]cюԓƘ8+vy2kn%ѩBFnt“SNJ/v+*q  Rd/RJI0ERf {4> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 846 0 obj << /BBox [ 4586.48 5611.96 4619.59 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 847 0 obj << /BBox [ 4612.09 5611.96 4645.19 5619.46 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 82 >> stream x255QH/*247г4T0*r[Er"Fz@.HWB&(+ Lendstream endobj 848 0 obj << /BBox [ 2174.59 6938.33 2744.34 7176.54 ] /Filter /FlateDecode /FormType 1 /Group 526 0 R /Matrix [ 1 0 0 1 0 0 ] /Subtype /Form /Type /XObject /Length 161 >> stream xEA@=:3'0q׺FۘD]x}i#d'zw2A!)$MJRq2 sh bJ[%5Emș;rx7`\8JW`*Qymvq&cr2UI PA5endstream endobj 849 0 obj << /Filter /FlateDecode /Length 8719 >> stream x=˒\uYӮ|AS,zN GRY9.Wl&YY4969MqHIsiTJ ]By{#|c99^_‹RӜ\ ̫Wx/{EqxvT{fwN.>*I+k>i;gJ~`|޽GTT0U&){F+9Yv<F8+<Cڢ^.+X/MU}i>)ww8]:6<bKlC]<~ɟ]9/ߧvopsJӟid"̭>-n̤:Xv!<&}ЉOyG#C#L%mN͕6qш+ )X:' 9g=RuL]x]z]m&)@G"ѻíuCևD!X]5}lq'WV{gu-2}QnmF}d[ƶ#DNm wnc_7~"LNt"O#Zb:tG%717".E xDψӰKߧf@m;J߸x6_IP xqɡ 鈹3rTF:zO/h@¸۶o E%,7|zm{Ԫrllr ?P]7LH\Uh_Ɏ,\3fEhH'jcmZ;-x%cmr$|:9 K"XR$dHl;PIV̈́uW 󈬬Ա ySGCƪ EjU(%0`e6l{qbDl{}'go_ص? \M3h09{yAaP:\|)4%t`gE$ ,}Np.Np0֒CN2(P@="XDT7 EoCTqs}ŝʙˣJ#]5)~$&@=3قJҡ8FOhnYO<9Z&9MIhgӹr Jf.,;߂cvOifxB DOȃ<8,' d!.uX3!A#@k3uT *JP+Of`PX^2\(%:弑|>||5y. sV=>vƦ;c ݎErI~؅;z27UFcVͭty #gz2݄)MeAItbyJ*V&Z8q63aG3͠BrӃ&cY$>T>6x-,QP^ ,^+K:%:be%oi+MQ&LlT~;^QϚJ:VUy;Iފ \&0ebۇKfb+8s06(hm"1<=oc__TE-#yW2PH MlXw;x:}}hڣi Oi{K7R2=~h6e{|7n:8]'t8Ǥ)K Uaw۴ ա=nt5lA5߳IQ, t$tS/˂'m> }?M=e@ cLL ]#(>B "ɐ2@QI5'S9#c(Thoy0ί` T >;$(0&Hꝅ]1hl\0Ѝ~&ޣv2 }i,b Y,켳B eO~ő5F LH$ Ÿ;S 4x%V6쭻-/d*؟P Dte AbWas"w}g +nYt6.}41`ó0ٯƨ1M|Śצ)$[.BL(wӱC!s!Nu{vIrpiGgDg$ClTg7{ƩjFhсGGaAkA\h}1l "CK^<0VoG-ڭ"s"tso湇l Huew!~}F!.W1ܕ  mr:,Om^,\EyNvK4o:BaIN"1Ij(p9≢`Se(OH5;ITqc2?/ɐpʧIa {[SY}'?'mǯ< Ux_7^pa0hA)&@1La hZ2,ɾ\;AJ§ǛgQ(Z!5^ h5sًz(: Rw'/YE7_!2hz_Mѻ%|d StS5+0Ѻ}- ;R߰:Y*R0ܛma >VSx`zrfʅLS:Avm1Bвc $e1]1^ٰ%VD{JA@5Yׇ#.%.MV~l9S1gW{>آ|ͪkz BhHSq8IC|*$0.$r}.w a`mE+k^֓y.#?VYUf(Y"=Wqy=S 0 1ٕ}oMV&sy:ݭ}d㇪t]DgK Ɨ/cYE\,܊AB Jף2fRFL,"Gqe!VP۷%P@}ml?/2yXxI:f=?+MsQr94 bqd刾I# שm\ s+ɵ1!+:}TCK{6f?l=]|e)Tg]*b:/U`қW6O=m˱|Z&:LaG6XDZ@=7|~2y<)H.G9Q*`=.w% i8csezF"쵳XgwdJ(*X˳>d՗(8j oDcVw6N!nPS;KS87=H@ZuSIa1HMsiu 5?AdFjЬͫkUj/] @ K\(O4X\XZbGJq ē 79>mאdy8?s@& j*|D6fdv G;B}5 ]LWR|\-]P2p" JdJ16V5r9! E͕pId"_TzIPT;2 daJqB )ЀU]- u_SIk{k=.}K 0˟ q EC\<> =̢`ս>UFTsDN+;l2x>sZ.$yl3elע%`ZSM A"vXeVL`Dž908emSwoSG;z|e(Z|Vt+Fwۇ^LMHcg (f(}hR]][)ۓx$Rp3NGqSJ辢N&X1dSͦ|Ꭽ,(#ė*JH6?&dU[CI1 )V//fYKO6٨Ɯ$`B4jn_ha[~\uf,lmSCP:Dmx%I$v9qhoߴGQ[bLvw+ v+pS] r~K9gu!{XoNn/5_n:#b}ު$Iw.$r h8:e) 馕jtEF(@%\NcwCo,EE[A%sQ~,0Vh87e/cY}ԋd]uN{̋W ݝ/'}&ĦQ&@۟RӔ⁚<< |NՏh*jy8IuSFn<A7ld1WH6-zcJ&)zg " `| `6|[omT]W: %N5LRЁu gx(rHإOon|Zq7֛clZ:GP!z\W:` dN.[|d^O)X2ئX=)pF44Gι=0OİJޛO|QOgg9JQ +O{џoKBg lZy}PuWL\I]i/Y-hT>#bC[O]H[vdpo4x>9ٽgVkhOP3 ZlOWi+'fzAɀ³}ego,V~VSQ$=uelokuNca5j$ G}xG na˵F8!n%ϹO @Xǽ~ hݬTR(`A[/vG^%FeJj @q免!gw>f;_20{Z$@8c׽Ji@EZ_m<_=\5 f^DN2{ZiDʊmlĸtp: D@mXdwF-P,ă7p{2߃u CQH# vu\%:LpŻRL^bQX 4vIfIu HӬ*BL-h/^7%#>|dzi8'5[O"5([< 3ҜAb)Hc-NÉ+tt͓<40xl?!p%_+䇵AWWÔl%Ajͣ[;U)R2d]̃҂ ]=`R,V߭g4=†Lcf|cqj#L W*ʥhI]B&.Yz(AC^:pSR^J o Jqin,uz1F,vG5EDrQ<`ndnq[9z|e@D xcPI/2Z7}+Yv]zK{J"p:$'xj4VYso-8U?̄yas>Vj'"kO{^Kơ7L79掋)13?JQZ$" A*7zxJBw팋>Qvp+W>//'5W*c&T3NW#YeW'uU +j攈~;)"h:jQD] ӝ"Pm:w fIc)%cgnj̫W~WU6ڋ0lqc,vqfKS|LD pƜq'#N$ݒH*C ~]y5 yKa9;vˮ+1@ӓ7rI>=`kPKշ [SqFXȎ]9x0~Կ=_}RSaD\=)OEc_f98T_P]cHa(['TCP\|H]ۣՅvFxG_6{{}ap #dƛMKT6*\}/r|pMO}ɕZ;p2Eg*p]*lŖr[.2`ޛ\0\hrDw* t[p8(,8qH=$qßлuԧkV@X_p2$)֎;49}zx$DͷeGYDe)na?bpr-#evA rcfp=H 3r8#=դv|tH9t W":zI\:(;t&Ind1K@7j!*Trs] ,C o@͊~H `Tw ZSJX{b1Z+ Rn:!Ȼm ͙03X%"k=,ɢEW7 7rAI,YcXc<]xƀ˕ő=vf:-Jٛ0H%XIW{e]cݫ&8h\ͲH& 9RHrjg¶Wa9⦞MQLcGMMeMA6PC{4o 7l)_˩У&ϟpr>h',d׬9ڛ&q^"+;ݐqcd=SfUI2M rEv.vymO|}{tھؖ\U"Oa vG KS'_txe(T+Ose~QmN'h˾m P H׌W;L7-F`^JV^!()rNf2>׃@R4]|K׆%vQ F:rJ *faYyBDrcc.8VNp#NQEOR̘Lr1q0՗ЉMB 5+rQgS FIr/J{'ID ŒH'@tt~k( FN'% CdMwO9J"Usd#|NTkbmT6bGUDډ; gF3lEj3wr*z8RHѢk 1 F<}Eendstream endobj 850 0 obj << /Filter /FlateDecode /Length 7739 >> stream x]IsuWpSCݪ3X^$[aIP8F>4 C DSI*A4 չ{kb: ??1E||cSÓ_+1/8n'2s\dsB:.~vV`tw/TOw阍u)N/+م;q4Ax9GoZ3w4l0=v4 {qSpF0Ӕ;Ķ avw?~Y-O.tnQMɯ=8qF/tpWtEầ2L 쭅cjOOL%䲵4.;~~{w@Cqo`&@4>&wtb:(G <Ze'z1ڃ-Q^-Nk6jF Wg'r]aö$a2psIdOS(~fpDH۬8gpٵj34b1ƬI)>~ML i|zSݟ!;c/$o/c;ɡg`ܿ𫦄A1%arWq^ ALx~4Ixj1by_t:MB$Ѕ;"dOGlC(ĂR'J3[41UиJ 0EL9'9w(Qs@dkW4vĤdWoR^.#T+oFT).Cr)s'FlĜ.2aQO| s $+@&? Z`uLO~)鄂L"N5kzdn 4?' [vpx[Uf-( %^6\R̽+dX`kYz_6 Nng HvtG|NHl~/$ m zW@Rihp|򺝧$i,ݷ=~@/}5`%@!#0@eX*2V(Xeψ׍/zN!΄P+p I _ӬɺT(ЀPTN$QB7:xv;iAJ- ]SJmL He$۬dk~cQ8<)UK/vi>;ހ~$t1W4n)@G*l HDdF, +aB- Dy:y߿4oxnVzg94#Z9]ǀ@ ɕ|:Y$3tTRg 7+Gƀ9__޽_Z0sX)eJ @Ch*na 6r zdiӼGeB/ Vвk̦8.?`FVD>: S:5+~v\ϩ ,mΈiI@BOr[q*mkvMc\E]M{'} a$*['4!h/G%5〦`K${~l$Cz[Z5ԽcGy҈O6X$R:^"D*VI\  CP0֚ts]lh$+&B@ 7ٕ[/x-CʣX#6X8d2ŝzhLƕ&Ams!3;Rqb R+1̀~j̠MJ@I})LP5O:,2 $U(mj\S#&ьJ:gr O +8AŃawk%8ŀ8ʼno/>#iAVܺ)X[ "eѷnW4:A!6O2!sl뺈4/( %$~Оxʃ%-O_p^6cÁv8Z>(o'XxNUM=~cMk}f &ꬢ<*%3_ش"%ng{f•pzD|'') y#=Q sqۅҧ7Ld1Dgk 3z=ѱFE1BlQIO$:wKymZ7|Wr/=/Қ_=+S `{{4#.yiO4l=o!'DLr,_?yᆳ xˮtR>L4?j/,_JQ̈́FU bl^}u ! uP5[x!Nͱ'&S$vU9ץY2B3 Պˉ,rS7Y>r;Ҕf ʶ2J TaiOhGسAyd;"wP}D|9=.7<'0Տo! $ v1 3/hO67ϯo$| L>5d#sٰ¡ ;cy>`(5!SG!z4S?`0*GR KBxyBo'BMq=tW"uT"]F׼NrnJnCmr{q IXWl) x%S+?.W%Sz"2ʾK0 ʰ[r3Í0Ph]rSew ]݈ H@w6>N:mM |$v Ծ_,óxų07ܗ/T96+;Н&B A[i;}h!#.4S^͒Eǁy%RRT5LE]gaI1T>_n⦠#>"{*ΏS;Kl 9C>ld\D)Bp~nށf*Jw~#eӫv*YW׃~LEE<+,ZX exubsp6|Xr[Q{E lu"5.Ho1CFA\6-x DyʀAVyߏ۔-rptA6XRio֍vl7-<,Z* A; [apuvءDM|eJ9O|z$! L`+и+L"F2Bd_Gi7gNEK~`R 1F* b9=fW: |1$v=zsw׼l ] X?lzL 1eBjGz1z7%a'nd?q:}\mg>@̋bDR3K^ 2:j`J.,[vx+3eΠKƍH+SPAW)[w  T_ȱX(k,O99R,C]⟫ o…&/,$NJY7C3-T6o!ټc}2Uc|W£yW)">#g00B^.0!曞OTy*aR+:g);Wx" Td͇섈~^ف:G@CLBDy$\Ҧ3xl˂ot9Ik~b?yS"#B~,`O ltS']H^83tb</ uQY5#4}.!v; ;HQW-.w}6WDFd uF@ SS7DgWOuy(W3ESCSϡ)%9FEI~ ,{Ji>MZ w6:.{YO ؇p_mيdŻ/z_Sc]c!ŵr:U[WsA["?_<(Uw4ʕn}"9j_zj%a3=P&t CkK˘v]>i(q,JL2w᷇xߴ/wy?ri"xr_WcjPG IehP#ɴbiokkitlp⏅Ė?P3EP*4^^U|^C{·;b$=`p>sG}!yڃ3Q=eg++7v| fa,d/}'b.)F݉[(i?k`Ucʺ*+^ѮҦYY&\0jr #d'_i:$QA|h͂~-\6]^NԷX6`epx$EV4|qNOFꢹІBwgk΢53yxSc6cR:#2w\Tf]S P>$8DEjwtص;?ߜc|u3*Tn)̡|A%'4+sQs_W€e4_ xWEt =!TI">> :{2ܬV aC߈.M $y6verV%d \2BQW &ݼƮ ^E=E+, 4Jj~}TWJQYD! 'JYV7nNMb-Aalqu.!1SҰfQF~H-vbMwIu򠤞/ψpQz[=Vyq~īNyuVWyz!xYk/݆D5sdW-Ajw.fh7SI+D3?(Pݪ~EcqCT3Om4jJ4endstream endobj 851 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 363 >> stream xcd`ab`ddt v04qH3a!Skc7s7˚ﷄ ~O XX_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*c8)槤1000103012ԽK2>}wy߯jcws~7yŞ>~nSg>;3wߛgO^-ym̾^];ǂXπ| wt\eXp~`:endstream endobj 852 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 196 /Subtype /Image /Width 269 /Length 6121 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?RP #8CB۽o̎RAX}E# S,@.5AC])#Fsҗ#kPtQP*}Blu/ֶHRj?f̣nè쯎ѷڏj̯?\Rְ_h)@2_kdQ ^#2> VҐGClSkLGp)PfrQj4{Q{3?Ҝ 8WA\t 8}+CbPTv~ iaK0iZA@JW!7'ޕ) J= %(8W=vړ>C<@/ p1j}TQZeªDzR1U,\Tz͒Xf.-u:WYO\jNf&BkR3JL#4+/hÑڌ iwH.WM|FzY&_?xȣ˩0)U*^c8 8 p^ѓvߥ9(ːԚڅ剃0;y~чDvSlmnE$Xx—r"A*jin Kh"22f٢}oeK |I w˟(6lL|Ou" lO}=Jn]-`.9Lr VبI%}fv#3u95bM<%O7#zK^cko⑲ kJVLI<֊.sL5+FRV"?V^MfxOI!RFp3\{ pū:VwB@ye6BIiuXjR4]!V݌~)֭Wa?% cu ZE DAQRR ڲhgq꫑SY)w1RDu!+H!\?4 IMRyMQg~#4)E3Oϰ{n8&$TsH&Ta{X$[r~arewCwj;! 2Hd gdһ)J\E<jr͈$J~5'$pGLEuu.$`wT6Ywnq  s2m- 9ҫi?k\B0$R&葥LI[wdU]03$VxmߴXT[ڼC a":=*utΊv#a*Q"Te#8djvil;G{rUGDV~%.74gbqM9{ϡn F/GƲ%82Yjrw#4﬐ܬխOw$-']r}j>ƽ^DyW2`};wesU{oIEAJ${ jldd/o1]I^t?FCq/C͸*3UrG3;պ,*Tb4cq/vmT ZREIe_k9%\:/m5tY* s5-,׵g2E]ih ?qꡥW,AVg[.NOjCp]ҭ<pyV~+$E|2Mdudzi5B>)zO+&Opx&,tHb4s[3'?SLj-YMX=+8 b(?ʼn&/mqzi"\A椱2-yd WO%"`6 _XV3ٛ-0u2qL$t1cv$hF[Ċ$GUc}iEbd* {mZƉD zbqxmF#Ӯ%p<>P1T)p\:#m8ښ'(x9>ƽ#e<#k2 ##{>{03RYbL_%ߞaY 򦗉P`zJ+[Хtz*͔}U3Zı'L(?1T.nFz[)t.WMEhrIIH`?O&lѺydv0=G.aȍ !+6+Vm~ÚN3Yp1`&Jٖ̍7l'L{fs7Y 8aFSrA!2,I$OSL)ǭ)Ԗ4 \@2QCU3KS@j#F; SWJǞK1*` 1Pg$fܝU<ՙOȾ?ʡIDzPUGju4@S™A^h %u4ZZwQs'D 0(H.<_KqUJ;ʇ.*œnݱU^ԬDi~~} GLlSBc_Zi5_"|9D~7m^if̸ uy "bL=?Tg{ޓ4iu!jnyT+74?5bF0F{5S4X FF$9E(4mg54XcEFZiӁbX.K\! EEQ)AKѻ4XDQNrmh&4sM@Z(w8q횒HT$?) Da՝o1=SҞzE?ZO*?O֌PhLymJsE 7MqH*y}bvHҌZ4P1sK)G4҃L?8q=2}EySB$qRM^Y$@+&[W>Uʲ'X%1HPZzsLCi J (٢i3IHx ҡmI\QXRCjc \Knc"@e_7' rHj\U_a)r'Rg33*^U݈֕1LҰ9߿'Ull0WE#[/嶶f#ɡ"I3XevHClc"NiSz@V+FП2HRRGzЖ%-!:W-4xUϮ(TZ1=T Ԟ ;M|<@1X*Ͷn+ڻ5FP7}}+XSs&@3Im$ێzUm٫SKr~o*6j )7FQ":Ef:lAFeP2@ѕwiIkiZ |²~_y3l~W>&VS2t3ig!d>aM,r=6Mqw?"9p+/b$ )h'$YJsZݩ6?!]D2rߦ9b0>mJVf,iȹچ -.n"GVݞGHGJ qUoHQN2jV#G[\Mq^jƘ*UA="S-W+)'\q7nJ_<˅T?JRRX|Csބ\vO`O[ =) `Gc2½֔E^Bccz)A.rX ˀ,0pj`~gLPkEa\r?_{NV4ǥS+fu%nZ]JNSB0 :|gJԿ(T\UYͼ;qM9bI9'.xTH)bڊRQKI@-%-0,8;@zQ\` J UžxLU% sY\ܚ`1vL~u~0zTyIE#;}j66B*>\~c}*>t9cZ.C*Au(Jv\ow hO8Sr\A໭]{v΢їF}rH91$nnzEW?TKY3JJFiƐPz+c6> ?hn CYu,O8aКΙ$fn?iD۔-P`dV!$ZūhQI4ߖ8Lr9:kM#E%f1nvHTTՁ9>Fi1b{Z"8/Q{ƪ"dFEBtJ\PPKIZ yq5RϜgQ{ QKq$1V* ubXG*I>ъey7_դn#ɥi֐y5KɆQԺZܷJQΚ#]Sk=nijrd`۳va?{y8JHtQDNȂElbҀh+94FKv3T= fC@vF#6*nf#-yG9CU{)bH9=MM[lzq: lJ*RzPҊRR(i -&iE+NBTOb"iAE2NEbZ͔F'֊+Ha(QVf%-PAE SEv14Q@1(QK((Q@z^Q@endstream endobj 853 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @2 0~ 8ʀ0%!tpw'0^GGpȍ%pu[ΖXpm!',31ܔ<݀wxJyɫӸzȺ;czLqJg4m *)J79l! 47I,|J K6Sendstream endobj 854 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 399 >> stream xcd`ab`ddds4H3a!Ckc7s7ᅣ ~.ȘW_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*]&s JKR|SR3Yfظ" g.qcu~.]RWW4f%+(?u)}Sv8*R5Di[̙}-s`nJ[k:[uweJuWqDL?aw]+k*Z##9*/?q֌U|e ~8O6{ Sq=b1ùjendstream endobj 855 0 obj << /Filter /FlateDecode /Length 4388 >> stream x[Ksʑ-`Ol;;CaEڤljW&)O743|I|04FJ'/>:GőVb#ၨoV;xpoʴ*oǰtQD:ሾ(WJQK֏Qob)+/1z!LvTUj RpG'_N8\P Ʊ^0VRh+\[Ǡh 0O9aSG-~_|z<|5Z1%.j.P HE5:PX F99 Wm`)S^*|(۩O8! ?Aک0-aґ kpst:2*Ӆb&G_1-`,h20E bB + ɾ>Ga)nMhCQ&jn! ؼϒ`c􁬅cSsvb &HmIC8yS~ 4i)|3!\Lڲ1z0t<$xa'm4biJrh^X~;AF& Y5\pN.z&8=|u,G _g{Y+M#=YqM# zL9 Br޺w&>6®X-=ņ `>)F[j3|2+mc ¡5a%%z37jܨE뵵 LfA>N_rxg P` dؚEKCRf(3b^Zɖ®ǯIyY'r=([w h ) +ѩfnv{C}T~譭Ё?f ^h'as'.9TNѽ45ߘ\jNIEQM׉Ծd2HTQ`q嬊[[Vа$0%X[&ÍzxY-9&gpMw&d7N}v9_2D\VC?٘y:!JbÀR[itBiN{~B8a$)@p? pܹΕCM@|vJ"_;1ܒ,&1JQBi8wU0ûxg(뇲˵ S hQ8732JPzV8g0CJ@ q>rlR(HN.`\ 2ׄq s$Sޞvy|AL>8i:CLyMQxIa8HaŕDD[Anjєezp09݁ iamg@` LD'!& ,j/gebKL[*CT# ˹dH:'Cj9a ~.)c3lb11 iA27`r˓GC`&V>n1HG[ٔ)0׳ e?j;Dz3/М)Z{ i"m:f*DVQɋn7FRjWfɌ=jH`™/6JV: "%Z#x )АN& Hc -LC}A01B`,PIm >y^OS5 b 2kSJϒz!_*o 8:̑$N|GdlAcĎdQ=( _|4VýѸ$k*_$@X qްQ'<c($_3$dLxUM4Pjc䕫iCT'E|%9"UOCߤ!iD@f-\)eU>;j)]\p]wl*X kPMDAVoϨ<޷Af3JwQ"t\:utFHՔ>--I\Me~I=<~T7 wF8( f-\aCoP?X5oHW'ЍԞf*ٷAKY9Dӓɀ9cJWƁhGYf{@nS: [9F#Ln5gi3NB1(x|X!dFL DmJt{(̴Me3gU%WM:þ(\V_3Vm`2y/AޓL .ˠ5As_cؔYf!1~N;mRM9\} ͪ8L+^t)Pvz[!話2TS-cemiӹXiL*# ߐ18/A?F[vik xURI \pm?T9a-5 !募O:J ؇ Ji3rL[K1< /{aCBIxi#%j; Aq$Pр]In)ڦEMɌPcV4[&f>C lY0yU"`큪.!v5i\dXmouݮJtъvaSOGܭA&%_lOy8{xUCȔ=2tފ{d۩'6ȒXHȖe>"gJ0 88G׌TQ{^\x1:U!}~CSYBA. tЁh1xjn}>i8Ȍ<{TInX]0 Z/tIӱ!~VȇfgK:퐥catP`Mom|&mmp MK5ӥ~4}$[#-LԀt%c]7o)]oʡڼ$u60[Q>lS~^nVAVQ29aWFnsѴ0糯k2L~[Vx%骇Գm70 p[gswu_]&)gWĸ31vĵ4bѦ]%]̍MWaW/V]A=^:WubvVffx^m 7mc^Y@a:R ma2%"KNE؁6#d}r6XXrZZUv).7*1&杛|(b )djY5Hɷ8ٸ.E`ҟDu_mFo&X]aGZ Tҋv8M{g` Mb/sIh}io[w8Z/e&F,{* ] TC`"\rʅ0+g@cX)jTgM|hmf2S$ȚĭlJi;R$b+k_e(cy)}WuNDw`|V!:E:9"]N^3n-eY;BX"'u%Q]U| AjX c/gOa!g{w!)77]_q?çuA ߃L*yoI?Ony Jendstream endobj 856 0 obj << /Type /XRef /Length 577 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 857 /ID [<56c577c7ca28943dd532144d90f4d889><05d0980cc2471b847a2896b41ab8f883>] >> stream xMHTQ߽::M\D-ZQse CEm\֢r3 RID6mmj $ީ\{gsm᧕e#KCP \OO }rb!C rab,8!ݫpCރC2A3+,#ixy[rT+wX,=a{X8 qX^y `[oOen0 +x#;~X%'ENW5T,F5l)F v VRYS ݩ)+soF}"y^{PuUnrfz(Whg)Xgo}LXWfGj{;vw~5 ly^QB_)RnϧKULJ\I1\{:JJĎ8JRd&DȷŴr(!͉nWb3:}'Ą?gw؞2ss'Yu1%aj:el >}~/_JE:uϏM endstream endobj startxref 464508 %%EOF metafor/inst/doc/diagram.pdf.asis0000644000176200001440000000014013150625652016466 0ustar liggesusers%\VignetteEngine{R.rsp::asis} %\VignetteIndexEntry{Diagram of Functions in the metafor Package} metafor/inst/CITATION0000644000176200001440000000131614052500052014013 0ustar liggesuserscitHeader("To cite the metafor package in publications, please use:") citEntry(entry = "Article", title = "Conducting meta-analyses in {R} with the {metafor} package", author = personList(as.person("Wolfgang Viechtbauer")), journal = "Journal of Statistical Software", year = "2010", volume = "36", number = "3", pages = "1--48", url = "https://doi.org/10.18637/jss.v036.i03", textVersion = paste("Viechtbauer, W. (2010).", "Conducting meta-analyses in R with the metafor package.", "Journal of Statistical Software, 36(3), 1-48.", "https://doi.org/10.18637/jss.v036.i03") )