lava/0000755000176200001440000000000013520662622011201 5ustar liggesuserslava/NAMESPACE0000644000176200001440000003743413520655354012437 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("%++%","function") S3method("%++%",character) S3method("%++%",default) S3method("%++%",lvm) S3method("%++%",matrix) S3method("+",estimate) S3method("+",lvm) S3method("Graph<-",lvm) S3method("Graph<-",lvmfit) S3method("Model<-",lvm) S3method("Model<-",lvmfit) S3method("Model<-",multigroup) S3method("Model<-",multigroupfit) S3method("[",sim) S3method("addvar<-",lvm) S3method("cancel<-",lvm) S3method("constrain<-",default) S3method("constrain<-",multigroupfit) S3method("covariance<-",lvm) S3method("covfix<-",lvm) S3method("distribution<-",lvm) S3method("edgelabels<-",graphNEL) S3method("edgelabels<-",lvm) S3method("edgelabels<-",lvmfit) S3method("exogenous<-",lvm) S3method("functional<-",lvm) S3method("heavytail<-",lvm) S3method("index<-",lvm) S3method("index<-",lvmfit) S3method("intercept<-",lvm) S3method("kill<-",lvm) S3method("labels<-",default) S3method("latent<-",lvm) S3method("nodecolor<-",default) S3method("nodecolor<-",lvm) S3method("nonlinear<-",lvm) S3method("ordinal<-",lvm) S3method("parameter<-",lvm) S3method("parameter<-",lvmfit) S3method("parfix<-",lvm) S3method("randomslope<-",lvm) S3method("regfix<-",lvm) S3method("regression<-",lvm) S3method("rmvar<-",lvm) S3method("transform<-",lvm) S3method("variance<-",lvm) S3method(Graph,lvm) S3method(Graph,lvmfit) S3method(Model,default) S3method(Model,lvm) S3method(Model,lvmfit) S3method(Model,multigroup) S3method(Model,multigroupfit) S3method(Weights,default) S3method(addattr,graphNEL) S3method(addattr,lvm) S3method(addattr,lvmfit) S3method(addvar,lvm) S3method(adjMat,lvm) S3method(adjMat,lvmfit) S3method(ancestors,lvm) S3method(ancestors,lvmfit) S3method(baptize,lvm) S3method(bootstrap,lvm) S3method(bootstrap,lvmfit) S3method(cancel,lvm) S3method(children,lvm) S3method(children,lvmfit) S3method(click,default) S3method(coef,effects) S3method(coef,estimate) S3method(coef,lvm) S3method(coef,lvm.mixture) S3method(coef,lvmfit) S3method(coef,multigroup) S3method(coef,multigroupfit) S3method(coef,multinomial) S3method(coef,ordreg) S3method(coef,pcor) S3method(coef,summary.estimate) S3method(coef,summary.lvmfit) S3method(coef,twostageCV) S3method(coef,zibreg) S3method(compare,default) S3method(confint,effects) S3method(confint,lvmfit) S3method(confint,multigroupfit) S3method(constrain,default) S3method(correlation,data.frame) S3method(correlation,lvmfit) S3method(correlation,matrix) S3method(covariance,formula) S3method(covariance,lvm) S3method(covfix,lvm) S3method(density,sim) S3method(deriv,"function") S3method(deriv,lvm) S3method(descendants,lvm) S3method(descendants,lvmfit) S3method(distribution,lvm) S3method(dsep,lvm) S3method(edgeList,lvm) S3method(edgeList,lvmfit) S3method(edgelabels,graphNEL) S3method(edgelabels,lvm) S3method(edgelabels,lvmfit) S3method(effects,lvmfit) S3method(endogenous,list) S3method(endogenous,lm) S3method(endogenous,lvm) S3method(endogenous,lvmfit) S3method(endogenous,multigroup) S3method(estimate,MAR) S3method(estimate,default) S3method(estimate,estimate.sim) S3method(estimate,formula) S3method(estimate,list) S3method(estimate,lvm) S3method(estimate,multigroup) S3method(estimate,twostage.lvm) S3method(exogenous,list) S3method(exogenous,lm) S3method(exogenous,lvm) S3method(exogenous,lvmfit) S3method(exogenous,multigroup) S3method(family,zibreg) S3method(finalize,lvm) S3method(formula,lvm) S3method(formula,lvmfit) S3method(functional,lvm) S3method(gof,lvmfit) S3method(heavytail,lvm) S3method(iid,data.frame) S3method(iid,default) S3method(iid,estimate) S3method(iid,glm) S3method(iid,lvm.mixture) S3method(iid,matrix) S3method(iid,multigroupfit) S3method(iid,multinomial) S3method(iid,numeric) S3method(index,lvm) S3method(index,lvmfit) S3method(information,data.frame) S3method(information,glm) S3method(information,lvm) S3method(information,lvm.missing) S3method(information,lvm.mixture) S3method(information,lvmfit) S3method(information,multigroup) S3method(information,multigroupfit) S3method(information,multinomial) S3method(information,table) S3method(information,zibreg) S3method(intercept,lvm) S3method(kappa,data.frame) S3method(kappa,multinomial) S3method(kappa,table) S3method(kill,lvm) S3method(labels,graphNEL) S3method(labels,lvm) S3method(labels,lvmfit) S3method(latent,list) S3method(latent,lvm) S3method(latent,lvmfit) S3method(latent,multigroup) S3method(logLik,lvm) S3method(logLik,lvm.missing) S3method(logLik,lvm.mixture) S3method(logLik,lvmfit) S3method(logLik,multigroup) S3method(logLik,multigroupfit) S3method(logLik,ordreg) S3method(logLik,pcor) S3method(logLik,zibreg) S3method(manifest,list) S3method(manifest,lvm) S3method(manifest,lvm.mixture) S3method(manifest,lvmfit) S3method(manifest,multigroup) S3method(merge,estimate) S3method(merge,glm) S3method(merge,lm) S3method(merge,lvm) S3method(merge,lvmfit) S3method(merge,multinomial) S3method(model.frame,estimate) S3method(model.frame,lvm.mixture) S3method(model.frame,lvmfit) S3method(model.frame,multigroupfit) S3method(model.frame,multinomial) S3method(modelPar,lvm) S3method(modelPar,lvmfit) S3method(modelPar,multigroup) S3method(modelPar,multigroupfit) S3method(modelVar,lvm) S3method(modelVar,lvmfit) S3method(moments,lvm) S3method(moments,lvm.missing) S3method(moments,lvmfit) S3method(nonlinear,lvm) S3method(nonlinear,lvmfit) S3method(nonlinear,twostage.lvm) S3method(ordinal,lvm) S3method(parents,lvm) S3method(parents,lvmfit) S3method(parfix,lvm) S3method(parpos,default) S3method(parpos,lvm) S3method(parpos,lvmfit) S3method(parpos,multigroup) S3method(parpos,multigroupfit) S3method(pars,default) S3method(pars,glm) S3method(pars,lvm) S3method(pars,lvm.missing) S3method(pars,survreg) S3method(path,graphNEL) S3method(path,lvm) S3method(path,lvmfit) S3method(plot,estimate) S3method(plot,lvm) S3method(plot,lvm.mixture) S3method(plot,lvmfit) S3method(plot,multigroup) S3method(plot,multigroupfit) S3method(plot,mvn.mixture) S3method(plot,sim) S3method(plot,twostage.lvm) S3method(predict,lvm) S3method(predict,lvm.missing) S3method(predict,lvm.mixture) S3method(predict,lvmfit) S3method(predict,multinomial) S3method(predict,ordreg) S3method(predict,twostage.lvmfit) S3method(predict,twostageCV) S3method(predict,zibreg) S3method(print,Combine) S3method(print,CrossValidated) S3method(print,bootstrap.lvm) S3method(print,effects) S3method(print,equivalence) S3method(print,estimate) S3method(print,estimate.sim) S3method(print,fix) S3method(print,gkgamma) S3method(print,gof.lvmfit) S3method(print,lvm) S3method(print,lvm.mixture) S3method(print,lvm.predict) S3method(print,lvmfit) S3method(print,lvmfit.randomslope) S3method(print,modelsearch) S3method(print,multigroup) S3method(print,multigroupfit) S3method(print,multinomial) S3method(print,mvn.mixture) S3method(print,offdiag) S3method(print,ordinal.lvm) S3method(print,ordreg) S3method(print,pcor) S3method(print,sim) S3method(print,summary.estimate) S3method(print,summary.lvm.mixture) S3method(print,summary.lvmfit) S3method(print,summary.multigroupfit) S3method(print,summary.ordreg) S3method(print,summary.sim) S3method(print,summary.zibreg) S3method(print,transform.lvm) S3method(print,twostage.lvm) S3method(print,twostageCV) S3method(print,zibreg) S3method(profile,lvmfit) S3method(randomslope,lvm) S3method(randomslope,lvmfit) S3method(regfix,lvm) S3method(regression,formula) S3method(regression,lvm) S3method(residuals,lvm) S3method(residuals,lvmfit) S3method(residuals,multigroupfit) S3method(residuals,zibreg) S3method(rmvar,lvm) S3method(roots,lvm) S3method(roots,lvmfit) S3method(score,glm) S3method(score,lm) S3method(score,lvm) S3method(score,lvm.missing) S3method(score,lvm.mixture) S3method(score,lvmfit) S3method(score,multigroup) S3method(score,multigroupfit) S3method(score,ordreg) S3method(score,pcor) S3method(score,survreg) S3method(score,zibreg) S3method(sim,default) S3method(sim,lvm) S3method(sim,lvmfit) S3method(sim,mvn.mixture) S3method(simulate,lvm) S3method(simulate,lvmfit) S3method(sinks,lvm) S3method(sinks,lvmfit) S3method(stack,estimate) S3method(stack,glm) S3method(stack,lvmfit) S3method(subset,lvm) S3method(summary,CrossValidated) S3method(summary,effects) S3method(summary,estimate) S3method(summary,lvm) S3method(summary,lvm.mixture) S3method(summary,lvmfit) S3method(summary,multigroup) S3method(summary,multigroupfit) S3method(summary,ordreg) S3method(summary,sim) S3method(summary,twostageCV) S3method(summary,zibreg) S3method(totaleffects,lvmfit) S3method(tr,matrix) S3method(transform,lvm) S3method(twostage,estimate) S3method(twostage,lvm) S3method(twostage,lvm.mixture) S3method(twostage,lvmfit) S3method(twostage,twostage.lvm) S3method(variance,formula) S3method(variance,lvm) S3method(vars,graph) S3method(vars,list) S3method(vars,lm) S3method(vars,lvm) S3method(vars,lvmfit) S3method(vcov,effects) S3method(vcov,estimate) S3method(vcov,lvm.mixture) S3method(vcov,lvmfit) S3method(vcov,multigroupfit) S3method(vcov,multinomial) S3method(vcov,ordreg) S3method(vcov,pcor) S3method(vcov,zibreg) export("%++%") export("%ni%") export("Graph<-") export("Missing<-") export("Model<-") export("addvar<-") export("cancel<-") export("categorical<-") export("constrain<-") export("covariance<-") export("covfix<-") export("distribution<-") export("edgelabels<-") export("eventTime<-") export("exogenous<-") export("functional<-") export("heavytail<-") export("index<-") export("intercept<-") export("intfix<-") export("kill<-") export("labels<-") export("latent<-") export("nodecolor<-") export("nonlinear<-") export("offdiag<-") export("ordinal<-") export("parameter<-") export("parfix<-") export("randomslope<-") export("regfix<-") export("regression<-") export("revdiag<-") export("rmvar<-") export("timedep<-") export("transform<-") export("variance<-") export(By) export(CoefMat) export(CoefMat.multigroupfit) export(Col) export(Combine) export(Diff) export(Expand) export(GM2.lvm) export(GM3.lvm) export(Gamma.lvm) export(Graph) export(Grep) export(IV) export(Identical) export(Inverse) export(Missing) export(Model) export(NA2x) export(NR) export(OR) export(PD) export(Range.lvm) export(Ratio) export(Specials) export(Weights) export(aalenExponential.lvm) export(addattr) export(addhook) export(addvar) export(adjMat) export(ancestors) export(as.sim) export(backdoor) export(baptize) export(beta.lvm) export(binomial.lvm) export(binomial.rd) export(binomial.rr) export(blockdiag) export(bootstrap) export(cancel) export(categorical) export(children) export(chisq.lvm) export(click) export(closed.testing) export(colorbar) export(colsel) export(commutation) export(compare) export(complik) export(confband) export(confpred) export(constrain) export(constraints) export(contr) export(correlation) export(covariance) export(covfix) export(coxExponential.lvm) export(coxGompertz.lvm) export(coxWeibull.lvm) export(csplit) export(curly) export(cv) export(decomp.specials) export(density.sim) export(descendants) export(describecoef) export(devcoords) export(diagtest) export(distribution) export(dmvn0) export(dsep) export(edgeList) export(edgelabels) export(endogenous) export(equivalence) export(estimate) export(eventTime) export(exogenous) export(expit) export(finalize) export(fixsome) export(foldr) export(forestplot) export(fplot) export(functional) export(gaussian.lvm) export(gaussian_logLik.lvm) export(getMplus) export(getSAS) export(gethook) export(getoutcome) export(gkgamma) export(gof) export(graph2lvm) export(heavytail) export(idplot) export(igraph.lvm) export(iid) export(images) export(index) export(information) export(intercept) export(intfix) export(kill) export(ksmooth2) export(latent) export(lava) export(lava.options) export(loggamma.lvm) export(logit) export(logit.lvm) export(lognormal.lvm) export(lvm) export(makemissing) export(manifest) export(measurement) export(measurement.error) export(mixture) export(modelPar) export(modelVar) export(modelsearch) export(moments) export(multigroup) export(multinomial) export(mvn.lvm) export(mvnmix) export(na.pass0) export(nonlinear) export(normal.lvm) export(odds) export(offdiag) export(offdiags) export(ones.lvm) export(ordinal) export(ordreg) export(p.correct) export(parameter) export(parents) export(pareto.lvm) export(parfix) export(parlabels) export(parpos) export(pars) export(parsedesign) export(partialcor) export(path) export(pcor) export(pdfconvert) export(plot.sim) export(plotConf) export(poisson.lvm) export(predictlvm) export(probit.lvm) export(procformula) export(randomslope) export(regfix) export(regression) export(reindex) export(revdiag) export(riskcomp) export(rmvar) export(rmvn0) export(roots) export(rot2D) export(rot3D) export(rotate2) export(rsq) export(scheffe) export(score) export(sequence.lvm) export(sim) export(sinks) export(spaghetti) export(starter.multigroup) export(startvalues) export(startvalues0) export(startvalues1) export(startvalues2) export(student.lvm) export(summary.sim) export(surface) export(threshold.lvm) export(tigol) export(timedep) export(toformula) export(totaleffects) export(tr) export(trim) export(twostage) export(twostageCV) export(uniform.lvm) export(updatelvm) export(variance) export(variances) export(vars) export(vec) export(wait) export(weibull.lvm) export(wkm) export(wrapvec) export(x2NA) export(zibreg) importFrom(grDevices,col2rgb) importFrom(grDevices,colorRampPalette) importFrom(grDevices,colors) importFrom(grDevices,gray.colors) importFrom(grDevices,heat.colors) importFrom(grDevices,palette) importFrom(grDevices,rainbow) importFrom(grDevices,rgb) importFrom(grDevices,topo.colors) importFrom(grDevices,xy.coords) importFrom(graphics,abline) importFrom(graphics,axis) importFrom(graphics,box) importFrom(graphics,contour) importFrom(graphics,contour.default) importFrom(graphics,curve) importFrom(graphics,identify) importFrom(graphics,image) importFrom(graphics,layout) importFrom(graphics,lines) importFrom(graphics,locator) importFrom(graphics,matplot) importFrom(graphics,mtext) importFrom(graphics,par) importFrom(graphics,plot) importFrom(graphics,plot.new) importFrom(graphics,plot.window) importFrom(graphics,points) importFrom(graphics,polygon) importFrom(graphics,rect) importFrom(graphics,rug) importFrom(graphics,segments) importFrom(graphics,text) importFrom(graphics,title) importFrom(methods,as) importFrom(methods,new) importFrom(stats,AIC) importFrom(stats,addmargins) importFrom(stats,approxfun) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,confint) importFrom(stats,confint.default) importFrom(stats,cor) importFrom(stats,cov) importFrom(stats,cov2cor) importFrom(stats,density) importFrom(stats,deriv) importFrom(stats,dnorm) importFrom(stats,effects) importFrom(stats,family) importFrom(stats,fft) importFrom(stats,formula) importFrom(stats,get_all_vars) importFrom(stats,glm) importFrom(stats,lm) importFrom(stats,logLik) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,model.weights) importFrom(stats,na.omit) importFrom(stats,na.pass) importFrom(stats,nlminb) importFrom(stats,p.adjust) importFrom(stats,pchisq) importFrom(stats,pnorm) importFrom(stats,predict) importFrom(stats,printCoefmat) importFrom(stats,pt) importFrom(stats,qchisq) importFrom(stats,qf) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,quantile) importFrom(stats,rbinom) importFrom(stats,rchisq) importFrom(stats,residuals) importFrom(stats,rgamma) importFrom(stats,rlnorm) importFrom(stats,rmultinom) importFrom(stats,rnorm) importFrom(stats,rpois) importFrom(stats,rt) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,simulate) importFrom(stats,terms) importFrom(stats,uniroot) importFrom(stats,update) importFrom(stats,update.formula) importFrom(stats,var) importFrom(stats,vcov) importFrom(survival,is.Surv) importFrom(utils,combn) importFrom(utils,data) importFrom(utils,getFromNamespace) importFrom(utils,getTxtProgressBar) importFrom(utils,glob2rx) importFrom(utils,head) importFrom(utils,methods) importFrom(utils,modifyList) importFrom(utils,packageVersion) importFrom(utils,read.csv) importFrom(utils,setTxtProgressBar) importFrom(utils,stack) importFrom(utils,tail) importFrom(utils,txtProgressBar) importFrom(utils,write.table) lava/demo/0000755000176200001440000000000013520655354012131 5ustar liggesuserslava/demo/inference.R0000644000176200001440000000014213520655354014207 0ustar liggesusersexample(gof) example(effects) example(estimate.default) example(modelsearch) example(predict.lvm) lava/demo/simulation.R0000644000176200001440000000004013520655354014432 0ustar liggesusersexample(sim) example(eventTime) lava/demo/estimation.R0000644000176200001440000000006513520655354014431 0ustar liggesusersexample(estimate) example(constrain) example(zigreg) lava/demo/lava.R0000644000176200001440000000013313520655354013174 0ustar liggesusersdemo(lava:::model) demo(lava:::simulation) demo(lava:::estimation) demo(lava:::inference) lava/demo/model.R0000644000176200001440000000015213520655354013352 0ustar liggesusersexample(lvm) example(regression) example(covariance) example(intercept) example(labels) example(plot.lvm) lava/demo/00Index0000644000176200001440000000015313520655354013262 0ustar liggesuserslava All demos model Model specification simulation Simulation estimation Estimation inference Inference lava/data/0000755000176200001440000000000013520655354012116 5ustar liggesuserslava/data/calcium.rda0000644000176200001440000001412213520655365014225 0ustar liggesusers\xUUD J-) >P%j 2)JQ OEEPDDDp@zMnQZěγM欻.k_nɽ]\ B5ƅ?ZdȠCFL#DxQ ~kM͆rF~\w.=]o6 Lyi׆~999}3FsiPxLl*#{ۀ}A#p750붭݌\sfF;wxjF:sdkB#\g:ojc^#9D#/kcIF63f_2٧_HEOf.='_̝V+?x{F#P1,9Uw.]}*Eß>G*<*a;Z6,˝=\9| cg$kbOW`5i}@ϓcFiwF-+\:W6UDž XnkdnIF6ֶzh #~h8l%0[0bѺyx-{<~| -7N51Ku#{i;g>d3Zs}ҺzNw.&D~s:Fa#gF'E?osU߳ӌvZmod[ۼW&ȱgY?Fe9Rs:_lu`Oߘa癕.#yHqG*Hg/3D0@Rދ:1%Z*M!:a%Hksnf/av"WkX';(?~).@c[0C?aAsoGz~/!>s^k]g٧8H>|~Y/ _(hJ<Լxqg6p^#BB=-!{{I \?N9t?ѿkxu~mgwsi_;7~KԸ_r߃d/]:˟9?|=3_=4kqV_[pOw[Q+OjZ3;R nUTFgtا;k͋s|\=gK&+L q|l~Ma=a0fcVa>MjA{4O?Y'y{w+oѷNQ%2f(Og…m/+r8{\zز[犾5GZ56Kv~`k)OԷwMTzYFЪa%'Pv~?Pa&codnx=.##4jzAZf*F5?|~ڦ˻*"qz<ۺԚ8Oa;?X؊pHy=w| R$<w؇K\?~JԽT]}naTOk.%uu2Hc序CuR#}q?kGTY FLL]M>Umz]x3 mu=k?QmC>uR'o ߻)^5z1yƃEVn۽YS~ew}*ں2oyuuŷBq:rޥ;6:a^2Sn k!sG~E7}O}Ϥ ..gˎ=~hיq{?q]8Yx~8p 6﹎t^Nö{ ]= ['uOl1Jð?7.٦6>{ %,slL-1a\3xF_mp3?|_a3m pa x$w16)L}@h9&*ؖ?:`S\3s4Q,m+Ѓ>%f lm?֞MgAx'dQoI;4պ8DӉ|oN泽B~Oc1m6+?pyMķq|ˠ<>Jzd8HR|8Ѝ|Ӓr{'#1IZ| K315(Ug@^NZ|jK{{19ؠU ؓrbiʕĩ@Hnuɋ5 Vk$lkH*Ka[ A7l )&GJGޔ[񏡸?d0. iķyy7_1߆xNM\0cw ~dJ9:74!LIGc9TIMq\xd>m3m]) ɫ))7XoGQ&m'iR[[HWҀHO֑^\@QtfƼ5<86I*'к'+w +wXhT~?-sq|ocGL JoM1fh:~?Wx/%C\ s{.p=\ s{.Dj 5P].Dj t.^Kx/].\.[.pQ8G] u:#CGlava/data/serotonin.rda0000644000176200001440000006526113520655366014643 0ustar liggesusersͻ 8k?dH! JvBQDD"C%!i Ihl3ey{d~{}]9=k[={^gvZ[W`0&V/̈a0c8Pev aG;[٥R@fl$-"x+@Sy`+)roڊ;rxvh o6d¸wLz_vMWԿ Xhq=R$̦~8 +C[3z/LW9&BwW` s 6L)DLQlr!d˭qͅm*@#?bc:UW0+z5mi [Tu0.{ţhUn50'6{ LV#%Ь=a楛cEy (%>8O\&}xlfJ:0kgZ#P39n< 0Bc.~kHz@לoQ.O:Ln-PpMôO$9w.LUlBv%A+n02㋛j>¤H4Էfb*1t_ rx8Le9L>ʀ׋KU)@brq9U0#rq:O#6M*Ce!9ʨ)(b j~]v@V!N]TG&@cLfC)5)~ nHJ.zKr(r𜨅&7% q}1Aܝ@>yRdlA݃]^^Gel{rkY1L#_d&1 qeV݅ni^ۨuUʈ&F ͏TȦc0fw2LؓnKy%s7.p_##^ =l0%4g8 xHָ0zk={+^ gJa&9?ƵnSJiCkaKkC Ln(wU`rץtz2hJӑ0Uc&@FV@,swPNgu\LT{ 'dxƫ)sWiO4X4"[頛01kűbCO:A7S+%y6LK?jwFSԞݼ蹵}C ق,0OםY"G|LȷHu~9ƒϹ'wc WUzrMqDg5t<6>Hm`F c4.=TɎ]{`2j0hF5Gz''!#b0arV{T*0)oNu)mHVŊ{>gq016AbL 5W _H9ݐ1Pq9'W=:V{,K^Xcq0^W oƟ_; PB2v+5:./}{8΃m0qPOM˻fxh,yfRU02=ΡFOfkf`S LZ$5/Ѥ7hu TȀ11N٥q!?|RƽhtvJɧ41AǏ0~z6<0yzJ[eX"1Z u]xMs4ֆxmLW Hkp(,8S͂Ǿ 0m ldň͈^L?Hqf`X, ;L [5m864ȡ 4/ F{^@gCw\2c_sh LEl0Wuc&߅0B/`LHw$D|6M [7PC 8-q ]l]ӇQ3'ƤFU8I-%X;ͬ_T;.Ν`YPs/1it]SQqT5Y?wߟ|K(>|ϴB87xڰ%tagSZ/r|jc%Y/a.٠zzI)gOUm;3$мQ ( C|2x|g4!a+7IDqGq!h|O-^7rFì_0ZWSA&:q0=aPDۗnE͓0TMEzi_C,@mPV8q {\ z oq4ZA/^s}oTo`94(-Z*'Ի}6i'LAcZc׎ZD ي٣ugAc03j 5sa2XƾI7L%NJf)}amXȪx̾m>}HVz^kI/zUNSpH[=xPt"z@ԗk'\}}V!\&'^fh G-dз\m@zX)p f] )@BUM%$Yij GzlGFC^h+;/[} ȉ9nMϨ0〫|xa8Akr09d9c}ݝ _큮SSž' è9>jǭ`VgKh iڌaZ n_B9k1fXS7ۏ3R0z|%.Ƀ@ٵ)H*_몁ńX #Lq*z-GUہP.Vpͫv0PcpitGy2a~Cy҃0y_kQ =qssb U3(@r`јUYhnY"lcg3,́^!+5[`|KD߸1y δu1dW]_Ms!SZϦ-P$=)Fof߂Q}Z`1LI@8gUV [yrCdGy#*_%u:m;t8bHC& &001> 'Z:]3 {y|y&%gl1+}_VaaWj,V9Y `\oKRa)P=4>ڝ BIE| '|eUj`QLnjdX̢M17Tmr dtaƚ0)`\:%jVzSȝCذb q\\shMßE G,n,;"gCZg Z#@iy Li5pl~C-Dt߰0mxe{fd5!7g{#xܖ|hF-;O9è.Ek. O}=Sy򹰘T+Ʒp;7`ko;F2=T C0;>mji'oh"K+ף!b 3UoрVyh8v D!ǃף?wOB8S*eQE͏y@ۘct8Xz6ނ~ {2dtlw>r{n~4Qzlt\^,/ HvÜ{7,ܯ:&fTw %ߝ{'qs OLd\ɳC0}T uT)E^3Jѹ܁jqn<~N)0/yzTS3['s-Yqed)V]#0*jc,lE{FuԸF@)<ݬ% ؼ\׾C}0g+W}kث8]>ĩgD`pװ0~Oa= 8¬R) 46s S05c{o`$|)aXj/#~yl O e\R"魘tMQR@ю=v쫵#z>EK@nx&Bs1Ȇ 7`rL9 ocD;ʌ@݄S O)!J˝=ؖ Ժ/osDYM,L]k ?Z^F^=hMɢ/>N8h`&_baުd-Y5LK|r2FUhSrR*|>1M T qU 0?`w 'թVsQk^o~#0b\fC/77~ObXIwז86bux;z(J0m0e,2t[raPbAf,zVC F'Yiгh= lM_V[|c:&\ټ '%Ȓn `kӶc707Wx {/rh]_O#ݼ&@5TIDAcafB4v{u~~6ZW̟LsĀ2[h7}L[P}Tg+:Yf<}Pl[q߸N@$>٫@ wЬ2KS м#{(;e&_x;ޭ 3 A6h}n!9R`vhTwhѥ"ƍ@0\MLno}k $3L2w0nkph L[ ?(g{h p?d4Lnr#?sA{#|` L c'PJy c0a:7^<&SM?,"/KNAo|%ï";xu L OªRO49=nک.⣭DR2O}EZzfsO")[m=]0F3YOfϲ|c?;}I0EڮHUZakǺg(5}zu8LYXS>n487Ō/~h#0aU5v(ٯ6}3eF-o\ TX 4͡؂'̔7L txql0:eHųCiaƴR&*Nacnq8#mNa^o0:Όd;OuI|@$ IE}Ѻ\t6LdÏꈵL0y0\S6l~!z, N9c? fOr5nt2jNqq;p<}p#h=ea>e‰Q F,z09s.x3_8·!ÎbZޡ[F[P8χ%(¨lZu d:@f{#HTN=skk`*^Cx`Áhq3²z4FƜqrY8ލ r3Lݙ'6 3O1Îg8X,oNwlȤQ[ hO|.:MTёqQ9&Xν}DvYθǬx%NƩFΥ_hd;D_7WOܦKKs; Sz' Q]H˩&QG{iZ _rF: +Pe;kj5$8t5htRDoǞP,ۅǂ}b2gl(Wpdzr*q*E[,>}ulR V@KzR R&Zxj7Ab&[Tk ?l%~`"oLi#?X@K^d'VT -d6U<_SV_tUG,OXrVH7W]$ʣM,2nXF|6̔mF>/?6 <{Y7`(0Nd< NtQ ˃xš ,&K6ڗVBI>g\ zMgg|pPɬuS֊bMߦ{i?G>_}.HAu;&V~?rzi8κ )~t;}5p,WM*vQ|r8첤o[7O/Q,H)5%mnLBzx 'q4g_lWaYd^DO8yf4{dlTl]ⳤdf]d+Bmp ~jѿH=xyo@sYMˋ]U"u 9{e]Sc4* n@_E2%NK0ޯ7Jt,c3E܁Y81Q&wA5@L0BK#LL~UX"g&<Ê 2d5N^F) a|4ɾʼ;LS`׬?TCfG?o )ê'R1mm}F۰ A6pd` Peޅ & ,)J;W! ^Geڶ&Om34~ͭ8o41?)S?cw{bWf0 `m)tGob9Na*|abb ̜|:&6][@>{(E8WqLy;MB[_Ne``0N%`o/J`~uN+$ v[y :3ANS8A(N7[HIJTiҗb{%r&հiv2=큣qHɵG.>(vz~LO5~H,hZ8Fݏa92r[sp n,,KEMr~53@n3dftwk@z-嵙Ƿ!iފ0-|J$I$/+-[{Z ;-/šZ@Qz4'>~s^(Hda8Hg^ jҖ[>Ϟ{95ߜ̶ލ4*jb'x 0;?CU7x=_ zMrA-0$6TD&VY~N Zn(.I.5 7]dFF@a|1 W 6! @xxA;Y[ElsR8I' Qv< 1(|^tU;_sYIeNrgj.#<}\r` MA<:0 aK QQȣv(X;DOQ0:l(HЃOtF2Eyllp'^(ϥ(O!NΖIޏ|@hwxC6GpØbV[ $C~t|SFֈ{@ ޱ7fmAYC?>]ƝFs|QrMy}'710(,;ݎ3k;P152ᶽ@f> /|e;V ˗ N@2>kѾ`RXRuJØ0Vxs+?ձ7Vuj,5bi'fGѺaPY]<ʀ؂^` d#B'V%x_p} Df0F4^[qZCӆq0O1Iۺ5y&n, ~3kMasCm8``cX*)*=>mr l9 G<)RO"1p&_V)\As'{o,&e2MYG?]= :=sN}mAf'nFNqOU0?ۑp&z-;S%a$W@ʸ _8hIh|ձ9сCŋTa]S֡39@0f1݉ Α䎻^ӮgXAoL͇կx8 ޫ0ή˵(C۶Ø dS')b"@:gRqR͛i/dk06L{ù^u0*xAz [^9^1rނ (^}`$"8&C bگ@!ݕ]O6Q^5 hfgr+7\QM m0"3@<.dۿQl_ۥToqV3F{/kBwc#}=c mu$lgz Ypwa>60|~Б)S'CZ<'H8jXx7%D$$5AogV(7o1i؍oB!-#BWqQG&~({D -=O_KBZZ<ʱ+Gtˆy"Q /zԻ;:0tͪsw  Sm0+i;]je@j^uƉW4Z cĹ۳0"! J20vPe; .: ӥe5,^';ъD@=o$kM?Aªf`}pߏ:?V^[ oPD\oPax gOi3i ]ԛ:8]ݟk.=-LK0kb,{ L \) mMĜ;{V~Kq3[ u5Q{\P WM°[or$!xPim|"@W~ { ⭅a@*έ[KIXtc|a`tr f;z˵6 ugڦNO=e ]2TM/ 4?4 7=?Ú-`|[[+[^juCg'<~ͪNg`0_ק]B^0fkF4}2b!mOx0 PtdAhb ʹ0ңk״ZR1bD4~=V'Dr)(@7o^;hu?-ŏ-`关z K0D$Pl^D<+r]aѳ%k=A(my\41[R:=_BZ( 3W2G<=h 0(lΖ7y22vk:PR}u0x\h\k ]πψ(7}^ ;,`Sٯ)R@RG:in@0Ef F&;Uv3: ϸ N_u}ܳ )́0}RDȬ9QvFN0:/". cI+jER `?<'tHG*V'PԘF\o2t<{Ȱ_~—}^ޗD1sim׋5QY_x=wFjNh='1)zh7v-6hZQf==(T?nC깳&aHH8Y(<ԯbpG/jL%WbkN!|L! {Y&u'KL(tWw'yj* TsdNH$4,> :4/L  Dq&YLoevRf+Ng3{:8Ad0&abF,Py@j7mxrjJqjU`,w}57 ]L _y_$?vꭵ:2^]H$XQeEs dJd67G粶ɚnKv/xQ:2wpI!>t_O5#qFe3~%޽?d3@dxhm۪L伴 @` $<فNBv'Z'y[kS55y/md/ɼu0bLq(P\+<{x}ݗO )dC5Mh/ЙMaP5V8T_ix\hD#跃3b`L$߫X>EljۊGu"b1Qc dr}WQ;! !":'Gae4. o!NuATOQy]l'On-5?,5یk1a8_.:8Wn9qq+[( B7We_C~9A p:;\Y3k^[o~B#R>q:׎a!NAo@&辖huͿ/.Ag(<6Q1!@6#v=EotjuRͦhP"r [JάBq2jkdIvG/d3!Z_n 4)CqGI@?P&7luRtn83F9uCU}?"oas@ޘRJf1bP}R6 nsx}% 1:yz)0kW7}Jq6Ȣ>J0п.AcwV(b?n(=PFccvJ0o/G5~ZO @:'Ń}Y_ׅܦ@dJNkFǯ.f"ˏƻOQk]P}>ce@p}4! MX`?u{{ تC'hTvrba+N0>Sv0cW{@9N)YS"#vci x6_m0:<1 ݧ}@%}AEw`B0a`6ɳ0ti17焱@6GQrZq_N蝬U]2`4VXϓbY{g´( s=ne v*DbkTSV'uX)?5+BϷ}#$Dz"l(Hi>=ZpV_Qo qW# ?+*tgjsRM oޕjtL7³@lxB#0pqk@L@p/~ _53U.MqW R&b%;`0X;3+U/aDB꧳Z'; 7dj=5Pt ZXԿ\QnW6}!\ 'Fz=qZ?2`1!BRF468<r_ ~i+>gIL6vWeD{=gW t:P1/kPyJ&Jd@)^?9F&LR]5HbPHGM؂NeP웧o`pb/W?~4xXDſ"~Ͷ0s` T|5+>kOvQ}mc3 xF6}JO馎?xxY% c8tyz(Ғ^~@jhzb%lCu=o5a 9z+ Ro<T>܂Klo܄+ 3=ŒBOϱ;0o\Y8=a--;Ͻn= hwϔ@5G).va7y`K׷( OlQxjrG i)Q{| xq:/x;(^7u|hj# si\ʥ~#Q*m8t*< 4O׺}\agd*f {Y|CcjkB A{G+X7/t5 r81dh:)bܱ v 'UithS9m?(e@N0N۸XnAL[4~$0N>;9 NaM^3Vi}2SN&O[:tta}|SI㊋b㦳SbqSrM}D7dd#:R^D6xȏ xZH_ Day1('rwȻ"݋fx$?R?zfoip$[pT5B;uοm؊c/y0GZ m]$K^ @m [y! Wڶ~ajB㽷rjHDsj1d&zifRi 8)A/* dRz&Ԇg"p*?&;.43/ N3"<8P<[ry_e!RrrWyVnW\Ex.uo  K3OF6ϋPx5`t5;Rv- ȋ,^n0e pׂnFEVDڞ{EhZ;q,_4YA#-z_ՌQ.v8/NVL,&GXͶ~jcu$R g Lj}ZuxCS/'LfMw}Cߌ`~@O+H.c` ]D.<h Cɳ$Б{ t'n`D#ߍpG-^0zo֟g7lc?e|mڣ .$ϓPI ƭ%1nS~j#nmakʹ2vC)-2+p8 #őԬ7_CGcdR{1}ؑpK}=ۡd! NM} sܫkoo.ऄbNBMHߩm=p l2#BSQ2^CP,͊ s|-6B-Lf6)dsAVGe,(n]rfd0|dĸ6A Y4οe)QL73F7?0+`t{RۑbW ]XwQ,q,IU#gBhF 0Nm1dXA{$aVe}k#ltNLg;2|=0\ ӷ3dꑱCŮX&ȧ@30ubj膵 X~cK[b)m!I"͉쏯®cF$[kέߥ1]C(1rA{Y:qPm벱qVlt˼j*H>Hi\W #^߰/p;пu^;ia;\e{ot@8L6tԶK#]9}HbΗ*0H132}'$ ]nHL0~a[n&,ӻE0wWg9>ƿ^;TK9]$yN׸ R+IGqXCJМ{;Nje)X,ұ}T6]QK r7p/8Qiw1e] 3p=N5}9?L{wt,O6{y Vs2 f bj1OUAeBVcXN@sS]|M,ԛO͒d:+dpB-iT%mftTXxsB iL}~7cE.>Ժ|$ òiU:oTc*}XN0f+ӽ w~[ 34ϸ>[3DZn#vD;f*X͟q2?qNk"#o5 HT(;LˏB n :v7+a ϙQVȴiH!,K!;.E!Gזtc9 g[㱢[d쾚TcYm!^|vxS%M{OCc`,+p[ Bihf^hS&Xݷ 2jCrlBu+NBcv tW8*tсեwaX WgÎ Dz$"t1 'LҽoX`tN6byyؔEpWA6<@Ӏ9+ $,#=4T=ڥivPHVND J\4=pGV?^]lPxaKɍ'f1H5%7=Y(K7I`/n84f2LW,i sFC"ڽWHS c 2*p.kWX୬BJG[ K-#}oJAf7p?D\r7Nń03H-g]@j/DIm:֢E)h~9ujBy"+7XyndR|44z Pt&QFFW q6b#ǵM18 Fe B.E$kK!{c H-ЃHn9LJ@v"S9uV5_jdhEfĝܭ>ӖН]"x0B6(xN,̫;VQ[E\% 2V*v!M}'#\|]d2Y>!]GDqs"dȀ՟!0S*16ر!9x![h۩5Ԕ뾈G*ͳ/&" &BF#B/eʚZv.;sSfZuO,!hH)*e@O }=ò H__../4L<-/1@hmrϟGEla1oe" W%b?c*#Y7|a;7Pm2lə'Pg=N1 b1 2h=H1|.x?\x˥LdТv Z#*s7!GX!~F/1k B<-^BZ?m}~yMmw%T; /*"=@r2Ӳ!tߟwAM"<+.8l1T1#>};bOՂiKwÛ}R[ mܾ'L ݞ/_NAR??\ح6vgh zk`P)"5))9-m6k:y3AQ(0-G`]_VhKfw_"cHզuNbT<$8i&*DRK3j ?#Q?lմ yFկ`YХ@;Z21w7%L3"X?w"s{<9ζ8G(ʹdcP2[qqV/ҿ>m(RUi64G1]b`[i4nE(trͷH S.%]znh%/\Ato02Oxczw/s~?Pv>tt,]OBƯYr Z(Ilq O ')rhFm)R:s=wt|-q;d&aݓОfpp[~<,z,KZ*B <.fTmZxXφ^ƫWBW?Sv[BOiHǸ?O*YULP$ `}Mo"߫ec'"o)% #.y*|']/^_*H.#Ҷ0 .g@uqv~U `K\3ejaEՆoj̠h?06 !}ʶȄVwe2y2}.%B4n!&C}{2tl`J਎I1=kڡZ@X' so# ZBdx#6,,<z_2SJ\cHi5q-#ȢE$ϥy%I @Z.ۦ{d=kXäz~{UFJ_;T w>p5+sݍND @cទa!^+ 1no{*tXnw:9m0mM5,2P BL1 v $L|uT>_"W@V?ߋ\ Ҕq=ޮ{Yj: ½=u}L*⪾3$NCdN_("'8[w 4rgȸhx)V 9ƦNr+g b)!Pyz[ v u~ɿ+zW(ߝ﯎9}yN?{{ZWGJJ>~6N?i|ߊ gn.O_i^-iT?*ǯv]Ww/߾o͞cK ;=b=vվcͶopA # Kv#ԫ{>0/o"=:<3Xp@:m7(9!3]ycx3%lP=l& 1fZiՌ#Ps#/2ݙRI()Ұ3؊ZM:d_),7nunBI߇e+ְO}l :0ٔX(RR$3sBjf.AtB^~31`DTAzЭjNa}# ׿Ҏm ;[U0Eڌy-wӛ1J<]Stf M>CfOb" 0P´׆|Q{WDTbֈ[ġ|G!ƪy'%f."2kkAó60Ţz.2U<_}NaOib2]Ųm C&=]Ů9 !ځ>s*/z0i k2|p{0+nS~fa1P Wʒ6Z 0a*}|XlWaJBy54[E,8Vy $2y)BR{dvBZ! D p ^`1ۏF 8< cYsԍ#M]ruG | ⡝0k֝ы8FM8f}ּȀ骟90Qr% ^˫' TK5JHw_ _$X̎z*6MHq+Zo>{NHKBL:+jBR`@V R=n3龷XÈ"G/Q>朤 R~w~i_["3PY|c$9878?{ӑ.WvH mmoű9O'õsdw\cS!HKmf@롵1v~Ц,Z/Z+egߘBߴSx[Vq@:R,szr>x=H"9|c]dtֈPꃗ71>z Rq3dv[FÛ.G##{X^^%s)PX{ t)%.[rԼ7YB5YS{~)m'k,kphR_S09mB=kž+zfSSHcARsNϏBVzꛫЛe4Z%.d̿YLq{(b2*DF&:_ny-jgxfPcTK0~yF[qȤdK^; Yh+P"Z2b1dn֧Me]8&.;֟Zzm>ȴ^mtr^-cpK7 :wF=xǗNz.moT?7ҸRM-ͳYG{izy,]wvzFK-Y㸴ɀ=~^Kԣ{,9ݗ{`˿gK,_7̿\j;.G.[wۥq%ݗfKry-|߶6:/QuK$g7l_0^)]8.]VwI ~^K/-s\ZKJK`.ۻҼ帢wis,O/ociR8v_?//>K|ձ~%{Y%9=-8{67>}~u[Kqw{e?w\9,wR]M~qm~-/R{9.,+YW]7S]C[?[+WX9g[vu߽~W>[w{_u̟WR2g~oo\f{ܝ_wTP/Ո&k"2tLɋ,e*cXbjN-&޿]v_Yןy˳FBJU {=4mRGsLQ,0_zϟ^ sH8z\ӟ*$bɩ~]U"J Gsj=S)wԵ]6 t]k~:UGGn]E<אz5ݣ2GK`دVԱhM_VN=ݾj&]Bv?9>?jP烋!~؍;:.aя/n)[{S ͷS^<*hMJKri|_*uw?'`4{[XvtőZߺ?GS;]gWr|߲V~~o030ٺ{/G`s홳OGur3}_]hK >oh,B{oW;KKvˢ3,zyzx89sC3REgz-hs'm-~nlava/data/twindata.rda0000644000176200001440000016302413520655366014432 0ustar liggesusers7zXZi"6!X<])TW"nRʟA,ʿBJƋ_ib, (}:S葰R5+2||"QN7R+U $G|We$=_DǍƗLQ)ejɓ.&8ᐾp% zձnzˮw:IU0tSOUt`8<|йfMڨ d䢇KK+q4I-ɶwHX~_ f9-lfs#>،K@\Qn6!Mޥ}tѱ^|l|geJomS aNj+/ qi~6gq^/Q T S>)55dtnTt?^Eyf |&7Qp'y xo1ރvno4iT:14F3bcx{ fױ||E^E;K˛e(A+Fޚ QϦfn:KuסS9]L%!UxxFYݯvJgFpaiD;܆CىHsyl+.K${ #BWU Cd# ֻ*f:pi?ʨVz*v<Xl\qt&B08!4MF6h+V{?p  [3V,N}ZiwNP *s7Dr$k%dF:Nt\)~RxE2L(t'3 ݘ34 귧x#A1y*pz̗#kƯZ^HѪS!"Q/(Xdy5S|ԃv4 'NSetVAJŤUBZ1PXݫz־hT\d8?cOSQiP4 $)tF(86|8C,j}6QS*ʈSCK_):qO0$A9z|UQCD 3PݸبGܡ9t{5 FOL΃Vb@ujPv_9jW9\ 1$2nl TF9X0~])%wGeVYĖ+o0DbE+A]_ɽBN,8\)OQ΍p;tW$w$d*+C=dtt b/"0"=*:2%tK 9pvyBu\u.gp3BH6UIZH8Wu(Jȟ#xt/t~zKT IxNhyl(gW`U 'W6ܾqmÖ&F/ĐXbÖYH#LiE)es*P/~gBu?zݟeu燦͚@ij[#-&^v_:%ty_x"Š̍9Vu7[ԇuO-9Q22L-#W7*ݤF4ARjF K]Sc,dmmy8KAom9"Z!@!yuկkq`fd]z!,"y%N(ihx7̎&fۯq{Sc>vݴv gۆcLGrxCDmg8ݳ%^zʨH1_L4*˖."yI#ORzð: Ed?Uh H^v|"kKXAWԊ=6Xk? f-#1q)T6*I޴GԑHCF {`ZJ-+!J%rvrпZ(?}wH r8!^O`b.H$#2mKo/t6N]EjW'\*T/#I |{lڐ.)TUJȧ7:6\)S:и:=O] (?}@ ZXWqgTkNrk'Vq]x$̎*G2Чٸ@)oU6d$`^.^yN ZN8`ϥ_6 fG)|S_= Fxo@r@P~Eaaj9 9X>Xk|C]-v3F!7RT+W&z;GaU`n=PZP'dFXs5C1Uّ-|>MADm˛K4v G{k0D3o#)}Psw0-, g@ m=̠Qr j+_]|Ά{[P>ZJ2% Pd3CI,v'(k'Mk5‚ ˆp[ܐ)mʦ^;;kI{/ բyRR՞[^ϧb5?}LMx ٶٯ@<|S-xYrܴ7AꟐNVݫDŽa YkL}ɔT N nAg۵Փ&O?ąщ O/GTQLGADh|6/LuF]S%86䉸 XW+]:=Oܨh 9LAduZub2$pj3KQ"`3F9X R]R h*~hYl Ml6;"7 t#L(m^# S zپg mSG|qqx} ٤*|B2DK+16N^{ q \놔.>N Ivp0}_1|}tb&5v&wx[xXրШ'9 S@=D?}mZo0^A%LtB˫M_Z=@6l}'hl@V;Z1<:TbbIMVB=INjYwvE).ߦ5i@ ,/@gsM/}X- [5f?m͑=H89A GyBgpKt&A˫RAizUk%`_N$ɀLo]hԹ~s&„qhGqnLqU Bh,Tbiz-Btq >$>UTQ2ߐ5O$3'(}xreh)j; +b`E e{`>cz6B^s.٤]Fߝm-MxNP|iwǼ؃1A8Brr쩵i L ,htnʡ >r>Nّw79c. nu}4Noو[}]EZ#u/="XE/p/(@bkb+o{كI>nG~ 0mC\5<ߩll9OT럨HP[(6B ,\RV{y8񐒩bk>i)PHs+.3fevHW,KB&f1WvU̐/i@[aA0|/ڦ1qX#lGh-秼1,G9|A|T$\K㛌/ ,b{o8N4tnigY=qd@R2]wOeVvLTRXֽ_Zā b;ؓ*Vrv̍MD8v7/( lYlZTP8gt5TY-`\b:5"+Lz\ז=C49Y$w7qHrz\P,\mUϼBlդ-x{myz\m1ڬ椢&g|iXh$FOK3W#&/F`sUrR$0Ϊt;I<6<,Ⱥ<K2cR5u8z& n2D@-¨_K[9#I"7ӻUkY7z^^3O`(so6G#qa{"5''4oQ`}7b w^Q3z²:%EYC|8vWK֙{‰( JHL3t>\Ҕʥ-ZA5#[{t0af/CIR}} b`|<)q$_kyjh$6~(|cD؟lϕFc|AC"N[ׁ Y3Q6؃i_UH1T>4 sQ4"Akz8fUavmCl:LMՎ) JDl@{ üD]^ܣU-x/Єw![Ίbt:r![BoA'm J3eAw7h+/$hX/̹p5 \ڢ 1G:RYu:Ix^׳JA$FSw@&ˊ8Z>S0E6cn- ~rC6Yhj@&P^bY_j1j}rsUG~0*ax\3j%I J7Gz<Lj65=ԝ%hjc +rlN~IK-\_lS[K!ׂ;EIpΉJńY >,)h"!5;/wcyc-Ï=Q1j@{gv%?`w #Uų٧/r.Q3wr{2|SͬEqg6T zXrr.ر8ڦ1mٌ[nt"8E\+~)>d>p$ v'r._Z)xf:[v>]k${4kA7/*gGxGb& ՘8IÔZ]RuY}]j;槂mK < fWcm:g ._]4/NEG ؙaXqqqLߧ 93 Y: MXM5gF\A Y+w^!D-}봱tF%#3B,^SKTyU>>FD+Gm.8˷bB5-PJ=-[sp͖FqyJ{yփNwnu28 Lf qXb*jZ|^tV3W21Kgf>S"ʳECWE4/e*ztI$\I d߯bL췉:/8;g7xEUjПuTcHAv3+&$ s ף$k&)6&? ܑKub/vū}jǬHpzGXQ7@8#~_XZ"sKƅW=TpB",[A 3 ]C:F<"=!gqJ4 yómXx4 ŹPnJ>\= A5.;}<h2ޓ u 6{̣Tٚ|]Β½PCvX.Y;jv>6@F/.J6>W4(n$B !%¾_[,l0Jm2Jp78G^ p\yrsҞqdPn. N '4VȔלoMaHn5$-dCzv9|'A|Lԏ R[؋x U/IU3(IJ5* ڱ'E1]n%XpǨ TihLX0Fy77~HsDCI  k#o%jɒS_Z\PY2-1y0;c^rK44-q7/q OdG)#7.0@-?Nh4-ؽA[Əw%$}AZIwY -4 kV +4xJMǟ O`tbRATC|v,Ы ߒ`j2}Uu Mf]eG[Ǒ\wXP4A E IJZV;Z %j XcӁR$+h;gp`Cб,f Ţf\^ -d @2*s4v`w̦ oT6=犏G{? ~N)((U-x 29ͨ )iVӸ.A լ@WgiaqZ132֭a2Uc8!\b`zT«cY>lGEiiq”LU+ 8eJJL@@N ?=>#gej(O/EWhmv37bJ3,RpQ .c7$g2[ӑz`w,NEZ:J #&9KpN!m׺r?Kb܊Ky`}C{$j/ {9v.7a` bf *CkυY%) O6j*+jhCl0e#@r=srFCe? ,g0`8,X%c픠M h㸧u̘:ɢ:h!fxMf1A3h z{g[Ef/~OGsRUZ'RD!z`/<'‚۱o\k^&)!ŅϧCGly4#5,wֿwN/NN,1hL8JPr2~Ct=y=+530 :@DOD?iH>/ӆo~1DKawy'eӵpʿJZ7 LYh9'2lF&[+ΡYL5$wF4$V? ̛Imt f>{m@ ︾ooH^+7{%nAr3KN=V ؏I+iVIOޏnr0^A5W3} 2B9TU'{y" ,_%w%'K2.*dM:q\ PEUXb]K{HMZ8<=s8 XeI@~6]p; NftgCj)J9|jb7esr |fP~#jPaI"REbcWvt9'%^Q;VCEM {$:@ $yQBnO Ѭ!DԻwZ '.9T!8'6-gcw%gҚ-B'UFsZ6%#w[QX!=SVgy Ly̿k_v2L^_3ilV:I)-5 oÎm7湇(1×Y%WT_<&ZT$N8D-@jJuߍ1m֣&i]9?>\K"k2po >Lopm$L^*EWs['CD Br;+{P܉5 3@ e;nZ?GGqpi|Vujnp@Pan}b۰2[ Iy]L$F>oNRî~0 x\-w󏿄82A⴮Xɜz.>$g ha&/. uwg~nuXf5?qw0\QKc3Xg;|+h0 Jy<&(dʡXJbb7;'6>CsT6ONaOf x(p!|U7G`e&'܋ j:5 Aj Z <a[:8@71ە9+öCeLmb2.0Ņi;ijC_э:յ1& `AXF괣-3dyE:AU3:TMSQFZYпӷ\.28KG]Rl湘[>-=Rm>,f@'̅hXO&L  L"O}I/Ŋ .'.{vD&yuoUi7nAR[R[s/)uK99eSnZrH?4uL>)=jmn\N >[)聥A+%v- hz00,zKBDj"ϯgJ9rѧaXMiM #LD=" M$3(` c rűsHS5Xv\w!|QXd@JL BWvu:5JlOuspWaVruZyY=2=oP' (I1vs:?4OaU?>hzIE2>ح4U't>c$0ƅ3\—H(2RoV/7CF+Fd+³3c?$M *Ϻ^j5}ꨅjn樀(E ɪz['.Ӳq Q}ql։)mwىIGŽwQkF'aH5/)/<grBNB}%ʋ89fJEgsլπ)mrw+!^關ֈSƒ9H ^Ε"^qYHmBXP\۶Mhk Ժb!D$^fdW=(t \?/h:gR|y6(N FE/)NAnC!M洱=¹ ɉFQHpg]S:w0C&t.@V|D8nGɤC)#M`2jd0Xb .[zF `j>r#a:(-K/җgB6#蒍Q'Z]%eI6M,z{n^a űM52 CCBrc F@)/>KH( OˎKܔ MkΗl܇˳=6~Tw6r~.F֭h|ԯ8[C餓@U8/'{׍y8D V!UXDJ}y?~ni0|K]"Kүo8Uj=e{3d]=WYsVzsXbLc"L9ҿ_Y+^*9@ +\NVzohbCN bBX+O6F=<7k+z^X3Y>ǒנs¯';AYLmzڑdfNb.BvS*/>кE [ J΢k*')C r4͒A?_[Z$ge)~{7hpR ?3!'ݬho XH+ݺPŹdq?Dd %7cT I$}IP/0=i @OTln_!E_@x#c-\_]Wiֲ3+ˣ'88dLFJЦ51Z[ e/n89;+ZlP8M&i  =_ckNs?DX +qE@d/ f-a,͌s;jc`tPN'2"HeAg?=9ёE|aPނR]\'D *>@+* ]- D4@= ĕ19d?0K~/-aRbr)T*X^ h7ӌQ{Oi 2LxI݁aA;Ao}Wz饛(GlYZgW 7;h;nolCD%.9nq%')hr' ?wGyq.C$I+<3^40%L7ugVpI6eFW^ũjex4zQM4)QX@<ϊ-[Zfζ3pl)&"_:]4g@x͘V>@F_^&s{K`xNHogWRǼr4PkEF 6(mbJS+t52SeA]F=[1&_H76n|^h HVl^\V, 1T/)@mpX'?('R;oQG!669zZ'v1BUפA kaSTIb8_*2ߡ@,yY5o#gLjIƟ3_g,W˻ +ӣùCif6Y9D*O/l0A,1>f\k* =_!-0?$Ң8/'; Ɩ)WX,*JYd/̳Oy^Ii=$R8w Ez)66ޑ:8. b5NW6ԬMETNZd-l +VˆAqu(|Y@@7#3K*k|:s:w(.0}Vϡa*Q5ӄU9zobgJ]CDaEZ~ң/WMvwM3fiKae=qy//S}rJv,oS|YvlKM@|Kޓ c>vQJ+KgE㹾@hA@ήb:oɷ"vSm&lz2jZCF7oSs)Xп@.7$WGUW.c((9ұir0K0)u yN"ü51ܕ{bn7u6 Z9Ǘ]?VJuȗc(ޤ7J˱g *G [.U vg>x;r kr 5۽ 7o%+? < 4D]lnqQ)K= cMĞp0loE>,L"+W\g ˰w5)S94*(0 w!ϐ-?܋,/Ytpsk_'X+e .e[=YG=Yܔ^3BT#Ҝ"AHR5`9TF裓<˚_1CsZ .+m98@s~XHeQuߍ$҄VnhH'Hjc\Ab!JײGsN=mh/,l(q'1rD=^;IEd"hCR/McS S}.Rx_jnϒ!w'V./ [;X3W&=HfYzpÍωAhZ|ʻ a[dXuBe: rMu_P#A!]咓IԳyҠ\Ҵ]QIbԟ)^RC("{gښ#YpT,Лct3Hİ(@B٘Ϩۓ>M=G5ng JⰍel +Xf8#* 8lЌW;o90HXM, t>łd yC{˹W42kMx\uEV9A#@Q Y:tb6,T?0)<"Yn_P'`qS@u(gUgyˍ@IC$d_89BZ`IJ6j)[$9vHpbnc2/oLrEAC@r9=Pu_7 )ϧDtpktީ{2}0OpD*d+[mnS02>2ؠ.l 4RA]{?"+M.\]r,Z RC(4b5e )yK+C;6U=YϤ׬T'&tMpw~;k(}^rOHLlktU*9 a et9w+?:G=qJ-E<eOaY+Cs`l"QMGy;BG`Bmb@-W(jq=ѿ{4|kCb1$rki[j&o$!Nn/G(X=#Fb[Qw.~[J JzQ[ 1:JXj6A9e$ GkFѠJX?,ڥ8_UgЋ$Da3k\ugB) Wű(y bM:DꆻȼJ9!NDHm{@J-n g@Tu 1('ܓ 9B\XKC̒fʱ^_%%5`o}yxXvuY bOwɣON2ObJYn,^YOgL/t)&8.l-Kc*D(%CmG4+a$躾ˋN\19%y%L6ð*t@>*p9MdX`2 m eQ?[h`Ifqp9yZFmڎjB1bN3 òS> j7I$871A. ({͚0%3ޅW]ĦU8E`jO˵zT9 HkؤN B:m 9@e ;j"aJ?8ՁF źCKAK!i?QF?CK*5ccjl+n"'IJbE#x CCp-H."Ͱ\@ j:)bII'svbŃH8YFMYHM`+7_B8sʭ0_zt3㊐ݣ6@/KP]Y0<o jەH*7[)4ϛU5Nsg !n1P˖wo4ǬW*b1MzrGhQdͨd_GNi~=1URCf(K=Qdkij0ʽԢ$ @[;Ú)˝2m:ׁasƁOPǎEO|r9n kn.p;?LX{ezSާ8f;OA\N{fW] tj6*ˊ. Zn&ɥ8 XX}ipbzD 8g͜]T<GHAldړ|U/F D>[m4HG(!FR8tS({y2HVF09y0ee>N]yq&2[mx)Lf\#d݇޸A^y ҽJ~tR8KaݖT*E)/q$@UZipw47%Ҿ}L&=zS v.G#~(-9f+Fw0Fphkǭ4εTNEg x|SWYfL Ax1*զ~e-!js^?%%;b>H ZIu$M)bInYbgC?O2_Zf/Ւ,p8oB b˙>7}5GJPs\JtLW /^CgLG}ss{= e:dS(N;=gRnyA$' E m .]о:z& "O/yӯ>"a%ѦO *B4n y/fH!q;HBj>xɿ'Se ~#-jkGUq;im;|,s]fㅯv~k|!D)/KⲀ/[Ԇg֝#?`& QU .8MFZ8Q7px0f2rrI^QCך:|0'Q\dkbUe@YS#+RT9(Tj89YgBD~ @8`wGT,!<{$Rf㡭cזD w m4$Eb:;W }AEYX#lhQ~EAKl=ԫ۬Y/;t*flJ| 1.Z F=w}{Sm ťx!M'hڅ;L`j'LCsSB+#)HZ.o/SݲNu9S/|N)P J1,orwZQr漿Op,363.X˳ _+SES7 ۨώ,dȩ)_uSj9:p{u< JHDB<< 9DOyA>%8}ϕ+"d' bzl/rFɺ$e};Q +@nl s>ݙ'Q^j8"[ ߞ AӋBMoUNi 3.=/M{.Xro&|b0$ie҆_c{5~RBE{4{mS|0>hNC^jOԮ֔T!6S.0|Qzx^6WD=P̐n$D ,/aMqM q:{A'_sQj55~+-ƷAّ϶5 d =dSz'P2.`י [T.h9I^2. #l+ql-&P~vI6/Z8rB3m/>\`&T"6TFO]Au|G^۾CcZuG 2+`[|`#1ZzO-Alu΀*æbi(KnfѽDCF&)kSڼdh! ̸G^N3N^IUx@bѥɚE׿7&$dtyP?:|f8eޙ>gc6~od%trpg=x7I!4rdo*zDi<\SaF߆PQyCʹh+bU8 XNh`!`-TjK9!k;.ϬZ}ss1&NPo "zQcx<[Ƭ93%5Y0r9ix_ ֿm3_!N9@80wAb>{yo?u%uX`/k g1ޒŞ<^!*]M ۟%ʢeN qE;>葠r޲Mu}0eVKJ$ ؁:{.aJʎK+RASTt ?cfOY- glǷ4X}AcjU_tx'cIf._2Eci%hvQ3<3/}Da=!Y rhbM6KioU6L?q x 1&kk;v;I{X n WƑQpۘS͎zoYJU3r'S*z.Uo,l֨=Z{aa9y@6z>oTP`QbjĚ /v&Mw<yqh'b q?{xqhC=!ܸ_BaJndThUeޓ^1xzmve'V:F%pq@DJe0gh^i Zc9TF0m֔ﲖ2B2FSh3]5B$N%FFP T.VWZf+; sZCCp1Xxocm>ѣf^v?x6kOsJUViq]j11 J#)m0ٸ~8lh#7=x%<4m!%2G<@b)3t]Y~29҆g7 xTuMbN-Y(s}||腒e ?+-j[/ffG8eJmxؔD8cS⬶5bA7Š65wxL 0WaW(pTNe=>!%@`WBYpwygK=̠ӻCH~ۭTu\;ސX4xe짳 i(7JsƪL'@OF@X}ZFZJe,jtrG\f.LpePU5zK.M$[|@`r\ aඛ%ZNO1'u |UdvVAg.Ly_D eP#uƛ]MVG3 IqEK#,כG0Fh\82:+`4# AxoCC+E((+;p9P\o$h%*g\Z64ć*%k^SHwssBmZG0 oQ{ڰ8w(m͠3TL-}W mɂa0=(L -~[Âi,ADRs=Υ'ܲ_yӆJT3W(xX,}ey Ϝ!.o9w^ٴy`:4LfmM)mk8IK#/Olƿ?^)lHAyOCCzv0$ZPDT,[MyY 2>r'Ȥk:ҁ%j7nAdMr/RaTpm .jK|<τ"POC ;Ï:sx}* 7<,dnNA:콘13pO$1YeRoV%zj:;{1x*xn*0hC߉!vΕ pjSW]0^jć'܉5 mXNZ/$[+/W>ph8 膦%+pMބrfD9zѻWx꾉x,6X ?qC9t= +N@(ܙo]HnaX+h$N7pdG_`8XUAp\e[`эOSKN w**퍺sU hRCl֍ޢemID]RIN1Yٵq!?d7#sd6r.&{_ (F}6ϨPkFl[K]uiolqV`u%Mw?VN3Lc\# h!D{6M0; ϱ%4yNgsvs?sk^GBf? eM(h*a%mqkܐ>Sy'_b0h$V >Yj3eu^ԄS>Eq'rW\T` nNpbOφz}oz[Wt{J6A\f13(vM6Tq=V5+g3$GM0W%oڰ:RA'SM(J`V5lۜv0G:cdc7#z /9v ǭ"ݮ4ϨE{De488"*,Y/#W$K:`X5cFm(5Ln/*Ǟ+gAu8 / DwNGn[3Ye,֝./MrV˕čM)N Fa*8 9<Ԇ"1M>AټF@AOA'N!v.VݥkNծ[f )7{ZT|HW%ùǟ wۢ|&%<{m(s)-F h^^ŌySb;a1?6ڃ~_iQ[1bNwU< PZճGH>5i.cp涣ƠU[׃m9.l_^+c/۶ H.&|[*Oynȡ `:O%CEא1S\f?}Pn0i χǵ;juwpAeEw4EI~`ZfCmClKbQ+{KPg&+sn;H/K h>Ot(DZևWF@N<{!:·7>i'.=WRY:I Ӧ_hG?1} V;dyiHt}U`St~B)HsdXby*!k,6rXOS}&+'ӄDD۵!Kڏu@0@[ Ol1x~ Mz[ʶU~4TɇnnV x.%Ce4̆8c_?t"0z T@W*P،5"O*i'=-[ u=71h BU~YKP;/=7#P'NE VEؘ`Y/aoLūf:ɾ-〲}1S xdx"C%ؒ{ Q:*ؿth 0#2<\a"r/U:r;ynwi)W~qTK_hIq1w?\͗5Gi5]KPœأ7ړ#v@{D*0<wx"@Q AY&"EohF{mFN>ni4O )8+f7W*\\']Jו8G: BUo*jz Ɣ Bs4Lq5VmLT(6w3`?"S/nC%Pұ|x;B0b&SCPasԳ*f2k{]"kNkl,?[,CR 8?fwBҾ@fi'a5:<B\Q2DPORKR]2WwѳTL?>w]iX>ԦG756h[ xtV*"Yd!UN h\R0'S͡]LNVo[,][ $;]Pi~) 49uwTLTTw0Մq+i?L"[,ND[^tsn0P\M:# Vd7m\#>x" ka!%UKrʸ^Hr+qD&^57B۫*Y S%g3]砆Մ W( Qc6V dQTZ.eVw070&.oTqScɅ>uV!·XE))TsaR^ۻhe=j`^4BHbaJMG{5穒p9\)nOf5(>Vƈޫ39CPoy%9uoDqUҐ͙S+LvNq>a{ۋNٛiQ_ue o6"mB^s\6 {+3aMM]*`R*\7c>d105}{ܙ.S\C ghNfdPYT3Qu(/҂I@x0LQ8dv]H/dɘT] 9p%kBNף6N\&z~PS8AP[u E#`KnH\}<̈́$K]}&VLb 4NEvf^ye)ѝ(}8!w+iU0f'uo`k>!#%1Rf^m 2C5),ׇtzZ#쁺uęyY,rl)#ZOx!>DRwCelc0=yDK~u]zZjx%&QR _cxR`# z @"ؐ_ɇ?3h!^y&]3K_/Ui*KHTe:,I)bZY9Z Jxcqr3t'aS_Hj)(JE Hz0d wNqx[F5uJْF>&))e >w A3%<5ADð/mq=fAYWVŭVُ3ޮ$ "˵y2;!}1@xKK:}>>*"VIGB'g`^FM7L1`9yEC5 =Έ v?#-}*S2~5n5QFQ[Jq?X eB`MO}oR^MImޛ䄲\b|:<޴nRI0PPwgM,? tRSjYO;j͓d- @%Sf"־rUtx@Rڢ`pzKQ⽚ӱ~'_~ ګEDbUl`=yyG5\yeش_7ӈEDu+HDdA_XWVO.Ӫb/'^ERyAnNI6K!(] @ ")W jfP'}Zݷ8?(eƏ{H_QRyٱJ\]?grV8~В,sţ,(=+/O~*[>Q b4AũJU>>|)\x |c#04m͍+B<чk'PսJ}R#I;ΕĚ{_> 5zgΪ9Mm){7>B%4.9L6])/fϹ?`n;:{iubA3HxcThm# v_=4g\Mw%j˶byP3Qj\%T-TM7(wĚK1 [DbԁpsA YFJ0 ;a]6;o2YUЕ bGծGA%d[hO7}tiU+)*VJi"ӖDZmq^e=@vqHo"[^qP,J+DgcڐTq&,b$.a?N֮},>n?`dg_PBvdʝ%pjUBpO kEDˤ p8Z p^yqw.2tRSZ$Z/Br&, ?K6<q#ƀ!>U?N ZA{G/50[2Q%2DZIeNz1C3V \ݬ@¶:KAsݲ0SNyХn]z8mE~RlLK:T=)~%3؇N,ugZeI]%'Dו98)J"+y5.\ QΙA-oVRubl=8=xuS.I0ƆGեI@KzjhJ|JLpuhE!XYcBb p8O9rr]_ꤽ;ef"nY{ɏýX]f$:,`RH<Qg4g1 4$Lt; ћ3#YLz. h1󘘓9Lg8-#+b}ylvSXa۳>gBu|tZK8(>9NCTCUn 2nf]}n'K";D=r j1İ(7Ϩ4H}x#4z:)[;Ko|7}{fߟFrxﰔ f?r-:f M'RO^%S9[zzJ85 Ě117 U.uP|6X/S܍|`t_Ip YsgBsGue rK&-۽8 7DDgA?0E1j/Kaʹ[?Io dd.+rN[JŦu͹ ZjWa1+A%sB;'2\N -VڃGy3]2$Q`<.08-3J;9v.?wHORAlLO@/7pw3g$/%enY8+qa(M /1H_ڰ΀ rWB &+ta@=f3*ȃ{Chxm$jz)I݃9/ èU$w|{aUO=ʭw~! =9 Mft?)}nﴫ}+Y׋uJL3Ɣ&%"1hC2kK^? xSដQ 04,mTu)w:Cvؾ`au'тZxm&q]ЮϫM~Qmj8xMj`/#L}1Wi[K .{qH1Dy$h , x0_!dT jT RHqpkN:( 1eZbl-3ɻl'ĉUFJ8PhcV<}H(ބ:ޝ$388wL΁(po/kAf{֩UGfl*"el+oܿ{qQWC4ٟi%D9VuBFn{L缍XB&MtHTuO&2~DȜ6Z's A̕Ϳ:w(Е-&eHw-$N`3]!y?d-p^V7RÏ!%[ch>IhN}] qZZq{fq!XJVa\=ʭOܲR0]w& Rd%&B5. 62gSȐ55Xn:V^LǷ߇ q>ý3HTGCW/x*s@ST.% Uā"Y:@39iH!p FDYj߱\aDSx ZbVS{B,H4ӛ2RS .cUx 6%$Io'gg&c.kOhl`dsb XFo;Y:0EG]7Sw8h3^; 9*>oWo콝y:#sDs'~t B#Џս?IsnЈ|e n+eL?Qmlop 7hpJ*[] b8|%5"DSȫxyg;ik X| nʄkr:^Lh?!.TKn _"@tLˢɬzJt3#Q+i;ؽsP,*>v&$3/@aŠ%,:B3edCozR#ߣ4QO^X@g-὞m}3qێ]gä°4ڶ [g!~ k;"Vf@*!G39m5ômլKaH-2z]ƝX5b߇`MߪzP߸++JӜ`HK# f,;˦#f[ˊGDͧ_AAű<"_Ex"}Lk®Kd<:)'c"_Eq vmP^Y̖ZaǓ6 bNpA͸,\Z6WD2: vVop?X0f {ƌi۽4Chvhf6i co@E_W^P#GI!~_x#5e\U GZy9DFst;Q-#2ܪ=TX_ 2g}BT ;jGJ]G$Rq3g&MOV,83kBn8U.:E\Q샔^[ e+|R+{e@j-T`[nH:@cp'lZ aLm{1U]DOIg3ֶH;![[@MYh'C6}fwM8L2) ::GAQIByZ ~N7SoKRAΠ#wН%C] B.I4xqh*eMN*J$cRo5۱?Խ}>9spr{)ݾ=ک0~SJ!NkHxDI#9+?CmJuFA sVg SbV8=qZf:g}!@б(p~!;֠t]R',h~W RCNQr/DËs5ulalWjXjK c9F*Iar,P9` kW1+/nlK Mܦ_>/ 8~2 YQroϲ[Hֻ&k ;gFOAzzl-K3rgc-%[@NH Bj;FdQn?Nm.b%,*m1D~M^]7T*SC.P@n&}%vEƤk13 aLaWuS:S`t9OPf} 5$4OC3kvqQsg;^YJ7D|:ʬd$̵My֜P5vix/>.T.y!Z[^hF˺2^1k־of3uͣ]Ǎ *Ed'9 _ Evf-e%p5'R.hamz?d~Ԙ>i%@bb(Q=5;RyQBB_"zO9)=^|$vj5Chֱ b+U$b!gцbq g?A'\đQ?akSȫSܼ-׾KT݋CPe&t{o|XZA4 /!01R׎aYvr+olU- 5.ޖ=\W\mr-|o)g$@3_§9٢oqp|Ӻv\[qGF7qLvm$_oњ5Bۯe` Ew չ0F*,.:!L)L~ Wr@pȌ6zNF9dq އv]0S~h%TQE> :O/P\MDr@(eB<.g:X0be9pT:R+!"qyJm,C I"8aE򁅄?\ϕ-GHЋf@yVP؀4߭3>`"x#;s9Wg>36T`Ɋ3Dw~BI~w:-43)RoE)ZSX{uy !:ZJ}@߿UVFѮTjpډU8F@ydPsfJJK"#ZQZ]@IupkJVrc-EGCFYI"WbYv9$Y^^'w$~|3htS7MhφKԹAc+HisM&-Ol3?!2ܫO%iS{JoF"-%s_{Mcˍ3M` 7W[87_*"yL+:xHS31Z^ʏn-4ZӶ<&vu.tsw&C]ЁWi6{ A=nO j-6^Bp/q9t4֬t?z =|@HhLjQdyI3S-!:=L%V+]wl:n}|EN\iQ>(xNm]ܙhUX87$ֳv4NNŨ㙆\egՂ?JRw|x"/4^ۓ n8W5gy!DeH |+0#ELrf_-aޅSvoDm\&?LC[m:hG7m/ca#hhƹs6T/$?7^g،ʿOľCa[T:ccfLAgq4KV#.@F/5 ~t5e{K5F(:|FV"aV\BhZnKR#QӗdU8j\)'qtEbHv$BYvY٥5yE{XI"I YTj >5Bk'NXҡ8t@GS&9]h4ҚQ7yQ4^˛\P&~_Fؘ}Ӛ EJ9,4+b? 6P7u"}xҨrthBE8D |{ $cb&M*Pa>Qan[wV Mnn&=kH9} s<KIȹʘPZ9`)^c yփmTgFcxZE,j罌Ey3v , ^7d"˽-ҺU/}2t>u㍾ w <'Wx'M#E7 = WM?fOrjV~;6QmD RNނ8ڏF`)`<ϡbPz357G#¤ !Ɇ]aɣdh󠫏P<~,Bݒib+zɯbW22,|nX^JEj39ssbxm C+\ eB6p >ɞkퟶe$Ri:]Gg^1̖Ak$:AQYn63Aa^XV}ܲG Q׮o!Cg_z/<%C#h{\d\z?}UdB Rz^ h&g;;Qٳ]VtyɆ+'A=#9Jk5LVn:LkGlPl/€tiÑ|yW)l^r(OıBE{ݱf%NMKR%6׉+qa .^*ɘ0c1 m+g >ˢl` 2/ [GMt=i1c#Uoқ) ]ׁddž~e A w+8JYjS(S{c(U*,֏▸x~ݨ Dѻ}rDgjt`+5]@)ƢlUB?'C~xRߘPbobF p: ,.xӀy\ɟ*ם#(O3̈ }8F.-" wUnKdժO`ݺ5I q4m%UojE' 1A_~>8%y<,7GS0?F2$ YR[ da z|7b;Ѐ("@Q{WTR= 򩵸@.7v ðgT=s7COTy+VU ES?5*RRy)&"ӑBQRNB \fMU܉KH=4|ӀsD]':B0ϼ1׹;aCZq6ָ3Ǧ@0'UYzjqW94tg6=tS/ҍƳHܚB1Vˀtf q]b)9P<-R:ZAd0]O_ӬR!E`+^3bN**$LԧY%}y~ő1ɞ?E23a8 0kޔ |KJK GmEh$ ;I)P9UƱ:Ǭ:I1iwqm0SxźzNJXW?.LgY{3MF bWnjG@l!~Ǧp3LZ^sM_YHEմEܛ̪ E'\ BcľIo8.s{CKKЀ£aGO^`- f2\gP}f1w\YT~$k^+O>~%R{7MAo=4F6V!>_ȡĘ.I:O ז `Ynj؉jO%}x cUB<-V}pjە9l>| t.t,qz2Rh1J)6G&QmJpBKʨ3&M.Wt.ג pdLl$REV=VGappJZ%6;SO@%j;sz*0s8ިs+?[њl)`̆"6XKڪo ^=x 떜tؽN_r⦊!"͜뚒M.mbYԎsF8׹䇠TsU}N(Q=\cl7XW2yHE/jeoY'f)*KMאi%6ǖ[4ɫCd}INnlA#'kЧ}l Ml09CgN,¼gBG| ՙ~އIU`0l ?EES"72@uFKܼ:tL;X^ Q_Ȋz }1郈67 !2c`t_YZ` Jʭ$/"PWSHg» ~^:9$AH\\< @Qer\R], b$hx#*ȟp6Ed[*-avs$1LW@2E1RE26p'͐4s{3+ j we+񠸩|ykPz2|S@@-^ۈT3V!N&lz#f@r82Vޭğ#f)D97PBkB2$6ٻí놔c036s>(1GPiՊ_5mWBs#%?{|NTo@<_T$,oW+!]ik9Ask](B!^ĞB(#FMDZ\| (hqavk{^Ҥ[j%OXR c U.G2[Ҿ%7iʅH`_\660@%<E9 5{MމL J cGT X@Mo:pXنuWsр6Dh48M:V[IfW+*f4F;2.~<›.-~a5sٺ}Uy K +;בA}/,LL!x5KG*a |8߆%%:+ ~4!3P](Dƅ9틁tx k p^^w;`ӝn[5{XW,2-V3%[J낎;Zt F+%#vFi^ۖ` \(2 () ޛ\KWTt)MX2CiryѲ=M^Zҩ1A%q>SkS1{9kP*wyʹ*Aw3 Ho6- ˾aOW*@B?dZwa,=fi=ӎ!A:.]m/][jI?jw@fܒo/7/֎/wimv"B٪6E\B<-[+ɠJOG`ub۶C'Uި' HÖ;KZ.g`Qq~ٕckY¤KhGQLCtZ<8'h!,\1_c8~wܾ,&:C=Z)e$ ׬P]g2;@ S:j流eUb@\bh8c:3<Ǿ2x!# 6"/ڌ7ɶakH-c F;Qvd{xC1W$TXjm'41n<}s&Gj@XWĄ.BYφ C>VOk& !~K?t1? 0*j6Me7$&Tn¸, _+jd#|,[\5?x1ڣ{fn1pxDF[ ri@zL >WH9D` 5 bn0Oh0-?cKpE4`wGn$`oBI+}sk+, @ SJ]Np)ِ6fR0cRF/ M:Ω^].@Wc|z>=碤O޸SavHQ,YX!al~(ݰ/O>B94ݼKkM`ݢ֌V24w1C.?<\qt8Z 8n;Fa9cׅQ|}KQ\̞#^K:e V:/D㢔",eג6ߺFmV8,΂ӱ ~lw,G)wʊ~sr)KV{˅Ef QcZRjSIxrϷiX+0θ9R}cP.dò-z S; 1VwGU#amB ^aKP9>1ɬ@Ce+Ya_{{p/c ,8a4;!v^h Jb3>o"+cjՎ AP8 tMdoFzǽ9% ş~P] l?إS "8ǤnϞ6G(虄[Bius%aO2 $+AǛ3 xT_j}h'yTr i\^ٰJO$ʴvԤ2 .70vWt -1 <ֶ`9b{!ژ@mÖkؚE.M*bIҾM(q,8، Cc+{ NI}^֯e[q[`s)E>]:k82C*UYUw2 Eނ'nUΛ:>(]WIF *Ld-EY7ۊjr&cjdn6Jn6˜J$Ne?d>Bhg+eD8 Ja2pq*_1~qA}wxM=N]ptF0\r옗ϩK%HVӘ;X퍨 n![St'!KcU$zw'/oPpz{cv"QĻ9{@F,dv-g .@m`WE,b (CUktCξ싪QG.\jY|y|jKP!A|du!| Ȭ!McڹS{f}4Ø,cwޏ(↑=IW6¡c&W( " b0Zq|&\G \  _)t-W Nj\^#b\:lދ?5J=Wӑ' o>+¾f0H$^/.a$;I'~3@{\Mk*\73EХ}ldòF@0AhG@h8H ࢬ 2J!Fw+v\&$)i?$"dUq*TR.걓3ܼw뾻Q#҉YJS0Uק~y3kE ;n{(*E|,OhKmYMI4#[e@d-Nh9^(-֩=xຫkƇy#W%9^6yɅ`A`+,XCLz-2MxA&<[Weo x&;>uϖT& ȐW5wQe;ks0񞱐ֻOB{R9?XnvbLF七o,xd݄dTȶЁ aOv>v@wX)fW%Aq4:Z]E¼[l˺Y9'̓ORL\Lbk{tԑuC@ OROvui4~Y{YʪAuwx4жS%-˫5(V[50E M6f.#Y Spd"7g)vUUU2&q,ҢJɷgyL6ޟI!]ڡS)xgp3_YRZH˯|=iJt޲cuYPZFQ`ZIj/-'&qd=I X>nqp,w1:sQ1sg԰>a/R0hgk(6a`gfSol װ9Gw .kO54+tB+$h“щ's\ ϱZ#Cި@$a7..gO.Aܡ"hhĉ22Z%~U, ߁{dՌw9H| 61c$hfCRBl^->Ts/ԪIu9$ldj3{z^4bݿu{88w( \)WG'Z˚ ?K*d'b"送q5.-|Zְ(0v9~ZsHN91`{; ._즿@[Nm*bעآPX_a3bxRw5+o-+M.YQJpO?Hgk<HW1:wc^RO̜ARGqQNicDvo^͎RL^vo}A}ܞrp <7~Xn𿀃?Jq~'ax!QsYDD34$y;IhJ ׫exE:+}? ~Y%涘iWXzbS~v8,SNfLKi@>9B9 K'l{ZؓjB@D{ U6Y`M=X ܺ2;:&.z:q\#UeǞ˷.bT,uvNv4P{=jH/ s v6?k(jF70.X OїW1&EH1ÏǏ{M.珉뒥whu`716=q_'*ʐX W}|7bYIp e71R~'o=;~8 oF,o `zLGcA$=C㼕Rƽڛ3a Si@wUPBw!O#E> I⊳n[Ӈ)/s#z*|[?PqFi/+3Q۞!@s]/J<&E۴pJrjš%"$%&bX9IÚr M앏x/7Ž)s"AoWk(\PZڄYc/5^6u:%6gכJO6.}2yV7M]$B%, 1OAXɀ!>s1SWA{J ܢ=Ù{W[bcM&&Qɀ6`q&KWWorf]h)i,|εtiT~fsb)DJGMp1~έX' SeZWC]t毁1FDw~j ..Xnޝ)7KTq&G+o7{wfG Zw%iǧk. " a ʁFaYD>?'g޴`SvZܕve /677:icq41)m 㽆5$QkC)zGeߞ9J7Qq< Hc -yQP*CАlBtlr/^LJ=3 :DE) PKiÌBTa1*v#aʧ &!T#3g;}&ڋPSW8YtTtXcѣ%Va*]WD=<3?5g+/ F2,)Yz$]@ 圂_0>ԽhrbU^#qOS?@#="uasӠ鰎R <(a=!O%Z(F VMj5T&S@)2&=%NKv>۟'ncLo`>R|=i2 y T R@ccOL!<\RTb:XO))I) #/ߛ="ً:1nL * 9_K]V4n<)i$ȊwX'\ fsQ3h-OdvfhfvI0Q!}xSdy3& ,KL6Lna 橙p'tY'}8t,d!:Ղ\L3yE>X:_lNN^2bH [Ljv(r3sﶮ{Tkơ]v2Tg_ pJ_bXFBx>ĕ1x2}TJdKgw77q?t6U$͘i_Ӫ(H(#fuV;8.;P%S[mb3Y8,q@m=⡩2a wҲBQ»h 7g- g "`zKo:k.6SvqWt_-;.D HOPE';?*T<,tӥe% O .R{tH#qI^meV(5)d²hp$BBrdp9xަcV5Ĝj] :Cw*[ɟkwd;#Pϸ=lX{:oYr0Wy ÂAJ98JG&so3; ZLO+Ojv/Ze4E *d(턁wEVa[nz14E_y&E~0W4c'#U ^rE}3dI& pvd'Rut Zw2z[h\qGI<8'2Z9T,9YL $Qz7wČ}sVno1{$ZcdeJXZuJwX{E]&_,=)L7 nX~}?*q1V Ԁ/L;T=ûN&kwZ ⼻"pɝmEI-MoӔ\{4n=Fg~t *j^51Ѥe.Uhi?q2/8RǶZ" jxG+/hL s DjfBPV#r-,xEۡRz.XTiKbz93sh ?i+s.#3t#b7wr( #V埱韷%)* ?}:wF~M>X9@]Fau+0ϖޣ<5yrDUIClKpa1;qaC jfHиS jľ3x0s&aPsh yg VfEo=!~m/?;h0gTIJ p<琀25~O 嵩ηhub Ll+~mʰЮXAX9ֲqA 6VL KKѣƮi6'ur),Pj 0X#(ZLP2ޕLtf! MoY6hOeGU*z o\,i;F!#B~ *^o8I!rdBrRT3Sxo{Rh9k 6{:/j%#8奆?SO+ܯCڃ][(bLė HҸ:LbG dwWBIUpE&sk; !ꥢt͢높= o]f^4h,-՞;;?-T"3o):ncLm {cy;Rܸz%@Grh,h(L!IU?/ߟ(PlLd~jT 1]1%"֥k҃<娂ˆe!nz'WG%Wl?40|Ʌrd`t', [jJm*-$fji/1!A{Sz@`: }ᵄ𻨌s0˩!BLE602Xw!&2K J.ֹ/e#[y)uDt(~HPo(k@&K`7.-%U:cʴn n#<*Ta#ɾ{W J :3k=k_u0L,jmh܎k J.11.^OR=yteBW: 2Wrcp1J5 JsK`)/USg&-osPȁ6unpe=#BF>oA IMd~zf)LU ]&܅m (H!;2JEiCJ cWX#/m}K߈]u֣xFBvR=z]e`C<3ߺSdɼ7|&u_VLɰko/y@OI^t`D*>;j"F Ā`XQ3 }3ܩD[vh` $h{aW*xk@1J% }M6^#YH+r>' }D wd;V-+K_,AD3J.ma[]0^n}#~'pū)#y)A~%!{q\owF}"fo b4z(( -WQ5(Ch\V iS^+A#"xcĨjJ졽eZjb@u'<GTOrU4mD׎=:19'/amq^ǫilnt2vH>W|Y]eM|Ʌ62k{P(+"&*\OXz\VZn0P<->Ż~-(L9M1)ePU1??bD!v Tn<=H $+Fv mLU5M6lRe ]\Å'?E<#;3`TB$E3Hs5Œ,NDduBnWԑtɘ6Ujخ1zl7tƽ48 @i|84k{1*gYtc);B<<)0NT jh\g؝\__-=۶V`LnK7)Y}P!w}8* H Ҳ~m2z~kf}:KuDVߓJ,$La؆89.(cr2Ϫ=rp_{ G21KZy4pg1Riu"O>D?\R_WRFhtLn>blQ]BJS0Aڳ,=q`172?UzOĊQ7y]sX^1Os$w`Ye% 2P@l]HwF|)*AE`cjrBI1 $G2\rn=tP0AjI!cMo aSFh!!ZL? pF }@#61rˍUyEW)e <]u$!+# >T ?zQpgmǧ&:'.gۗk(K{]~-i @4rAF g hƉp!e/P+ťF="ҙ4R]gBjHzA93J ;AVF b nxMB#f5ߓi{F1¸09WO⧏R]pػ)鑂x90fF:# s+ Zh"i_UQ*4=aPZiܡ:ii`a+)}{{B)LVM \АnF2н[r™{~GG7=^g}rdu* Zμl4WDmpWw4h7'U! ]E^ґ&\j?(Ả|?mfI;"@Q 6{)6y3r>KT lxشgҴg:KB/ Ul~K'mO>I4YíM,E-.(o.5\ܧ>X/ XO<鯡.Qh^#&Iv&.GW%ϣjcB20QտRfNhalbżeLEALR l[24E8BHeq $Wism8KJ.u[v5s~إ{s햰R®+oA H>gN2׻nR?!na ᐝNyt*P05͖3SĜ]XӪJ]HdH;x"?WYp([R"# 9mF ;d9B- yN# W%9wzmJq[$ @A/RIK(&_PL]dQĜU:*\.>MCl:ؠ{/3$a~S5yZԥ7[M1 .N@MzD~Hp3ݧLv4{P^9cCsh _2WK||QTGI؍c #*֔@wjOH92JS9FTPڪA3^K2Q6@P!n=h %ybZzߣ_xpǤoch\>IUΞ4q *U_ǁኢGH?n2;kсVqOs!"/I-psBYbD2\UM>3k&q;j5QAq1yoJ R(Zİ0 s+-_EuQ]lȢ"ȿ}HV s_'U96_I/D4WVN,s|l־6٥c P'p{ J#oÏq6 @u7pLM s>YBe@@~BR*} ޤsG5>Wp!Ky5 {9&d8ILʒen/una?zӘ]yM e9 c[O#ocdQnu_`壟[Q*|ӮTH*9N!Q\ھ; APڟAN`-?(J!AP4\2Cx5U )4泝-7;T۹>2C8NeVZoFSFq=FO%O-ڶ:s3| wV@!]ӸBВoQvz*48_M/8 =8qUzod#y|<H[ j)Qʚ#e$A*4:@ <֠ Pp:|Gdcp330Ǝm7+PULsdZ)bT@ɋH\Fiϗ3.)}  x>W>A@ 4 /@q`'nb6S;).ahlzjm(g0ɇ ]dvڬZ=#CbV)لYy`!j"Y h"ecm#>!IvXvEmHƦ*֠ퟡs%G+WW,KlpX>>'tCjfǡY­CF zA_LnSUsT&uŸu?5A!2s|F,C%= fH, qBwE%Q)uPVlHh:\w8ҍj M.^[}uR%n-a95t"(JTj=qJO Qbgƿhj<`6s :Maj6pzGOQ雏4f\:FsE|{; ϩϧǯ'UNf5?VuIYnc|chCC;PDdxDQ`I*=`TH;Ia6w?t]4Jd`_{)Dѻj`%)kW.8E)%E=:"p>#P;6Pe됋esE+#.+X ԟ-&"{$LyQW[z4WGoM=yu ]ʛuEsdJ\gO1j\9ۆBTDYg<2q!LƋqJdF³q6+3aqܟVEW8u}\dMddqj'cH1K Z8KI=Sj~=uAŤ&U X#򄀲d/\l{my_?D'$dQ;X9<18..3,H!Jft'66}>0 YZlava/data/hubble.rda0000644000176200001440000000103413520655365014047 0ustar liggesusersuTKavgt%+:DApB#o V*ڙqf]QL@;'# DBI)cW-BݢA}=|3<20s?ąQ]zT"B5c,sЮJuJ% `9uw7]|'nI)Zk?(':Ŝ,A?~ܙF&YN~eX:..i_KW~){ǰ)1|wwpBT:Fܽc tL@/"ǟDrn;܄xZkAIgT朿%ň3RBP.U5'u+>su/?yJoMay3_Wt3b,nA ϴO]R X}%H\EsvvRW 3vΝlava/data/nsem.rda0000644000176200001440000006507113520655366013564 0ustar liggesusers]y4U|,RBB%=ЬB(CH%E%$P2I"4IBH1 gX@ Yv BOa`^[3k+: #?o]F0,k Z4Xw;.>fR#]z(cthTXExCg~;= e#ò=Aj" ~n!_T 1&y#&5XfX#8HДڽd'w)1ƻ+}@;'S,~?i/g#^hƙ׍5puPO_ƲhJdC\_43G׌2[&r p$),'%8[ , 'qytݜFJeot?W8aMGIz=lgp02qvDz}~\_p.Rn,J$4;'u6U_L,y%+"\')oǠ@+\&sU!{.h<%5Vmu_C8e#`02R{{1&zXApׇ3[l@~|)Jl0NwXڒ0m4P+3OP_^Br?8V-2J+kHz?%!p5.w_#W[Gpҭ`Y.SU~`)rQ~bmŧ9BRwky̦+9,6d:х>W$SJol;~FNe4\d#XLw^sDy rΛ2$z)e&vtn罸|! '\Cdơg63xYJdXWrl[U|pB8Hv=L88o 6vo1RḔ7 UûZbz!&,шv:5c*^4M{c몲7D3ۆcB0E=f>8Nً緾d!V}Re]%qilehg҇KEBߡSkͫ^+-`H]C/y!갤I5{{k~Qj9 'ogЕ=ZaFy!o vpDw|=N~6!5+ʓZڛ-֣3F5:N,~%-9\H 2yyIǯb%#sv-\Hi, +حk }ە]" ظ^a_1} 6)Uwz ʗ1-$w]q 6 q(8Z9+y\;c74##}* N͇ŷIo'8,ns^AuktD15= ݹq&*=̀dHָ-ٿ$g̢Д|+AXx_Rbr vB**\ڝ{z18E-%캫tG>2>+u`Y߲- o45'nwljWmUpu_mT^#Nҩ: FR$=[gF~{I<~u[Sn5Ê558Q%'nK>Iq/NjY"apEQ>wO$8ʞZs@im.;$t)˃wݞZWw r+_2.ioP, σfn2FcQl8LI?}*4_]f U$J:aA݄(gέx.r]kX@tm/,UHcR[8_ٺ1#|b֦!l5P]Ԇxua=\*ݫEOgJ5&xxysh$@~"p.SNzKR~p; wM^6s.f֯X~+V;zJQ' z{0e~9VQ~eQ7PO^Lf=9>wedeBcL+p_d+[o2p~CʃbuޕBg茲<(|/f+ñG6]DCΎ/'X9Ϫ}2si_ulнAyf'zP";"K3va/Cf3/QY<lavșahm| s^ÌX&}'fAguA%?Xs l{k|eS-$+ߛѩ0zbq?wͰ(8}XM,%'}B 0^kQ]OsCqdWػCNk=2 +OwC凲T%cu}kW6?&,zC$e7fp/sH1$v͠)Ө7}ʵrsq )EMwQpe8pMDL%b'4mdjH&LۨV cvOT`j?fŖB2f+;HRf7V<] xlй{az&_*>KT%BCX4by.MOx8aPzq,5?_P)M?CQnn0G)l4'!U+d؊4$cud[Q;f9OrSR`A:MVy=EYҶ6nS9"vE%/v+]J>ޒ &ג{Z m?N ɓLfLÿ`N kG /NRMZfdA~x^O;As(#L@uqN#$>ͯ{u'vlTUf*&`vQpAvS%C%NN4S?2w$FWt7xrv1La't:Ԯ37A Cշu7#Nr|}e 'uX$dᬄJyuEH͑Huf*Гv?'$g/YsT"ζ(ǹ9c=E{,Woo-#x%PQa1 FSY`׸ȑ30hy#<ĠQ,ܝc'NY8)ZHtMMt/.31L]s0!u6tDY wԠ%VՍ?{ڕyud~ F/162!# u. t[u|]?}ʒ6Lp> Kx&eXV:ߚ58x vVʼnQ_8oR8( ay;"I4 '>< xF"v8`RD. v*;E6'~?X\˧Lcеm|.z<Дy1#oX:GÐ7'A2 XxU X+7' aȸ{Yg_%J26:Dҝ;b6nC^9]9BvH`L:^{$CaBHOҳϮIuB~aYbX=TLX{$sfzË-/`wS w cJD>թǕMw,gW~o3;]q-dH V:PtNco o1{["z:`nZNNe傞zл>[[XJ>} $2Nc5]w 釄E;{ :L%|4֑!Ӛ*D-B+U~ypZF{ql"Ӝ~|: J w=\x۱1D9f ?`мM'ɾG2)|gˢ蔕GayV趝N#/{OzO0/vY A /;YS4y*H(I;qRfc@6я Gp$cT,cE>7 {Ul|9OTb{@2A?ǺOLq5c gw4C㡣黧#8d>$}oOGtISmQQ_̳W9Fa##Ӊ^Suč(9~Jl:JП7H,yLEha<`EPԈz0t=۝ݓmεNFBޝO|1o#i=0F*=:bArI({ݮcwnNT78%̩X!?N3e_M="~AAUVW<҉:wY7ZD%Lv-sŔ7q{d}qw ɔΛ#aW5'%d[5skP0 D}nD iV}-b*g#xzRsoT=vdaخ6)18z+,n&vukq%YUzgI/ٞU<6 4:qY\'$>'8/6{v3r/tMԆƭ]B_[<8NXJ U"j#`g*Cױ㱢]\i{B51*0)_ ]>yqΙF9u |*jj.yx [A[@sȪHsm-"5NwiY%XPyֈ/_2 mCV<p  )M0vAws$66lPM2YKX}c>03}C1={yK`ߘccEaFMX~U dE4lq1ڄq%ld |h[c&M}Ol Yx>(wbt ̦HM XnK<{=)a~%u{Kco8ɨ L.pR}`>Wu- G{eDBb֖ 5c^{  ^S_~ac/<${obMwlJ-n}h}7Y![}G(= `jm`LU| UgWGzYr'ZH&=k5;5X yO4)UA*%h=kjէaUc>q۪e[F2pAI⦠%gLrq^`t"q>cY[҇W"'FI dY7UfJ#rrwl8ynr +? RDV&ml|v y̕oc/&y[GWhj<W@4H~jn{XV`R*_˻khRqZ$ Fˇ`h${ sP)5/_ \eT- ӱ/I?o0p{+)i>3N¥roEسvx dU<-7=p& 6KfB'ݫ $6anc85@?qf$v¬3.>+V84z/ //x]5/taLBx?V g&OuJ~f= :N5ݻpX,sϼWg^$\~)o ik"5V߉_^nU!t롟]8yIweRC0 z?,^:3u+0ڼ*PNн>> pLaI t>aa 3t.ꥌ̳JwLBގGWUGJkiMJ>Fڢȥ]1IQՃ!=z%X2xI?[ւ;45pUL S$"/(ZW,-7m:Y ovevK^lۺ꙽fW#dο3ni$caFa\5XoG|Ȫi I礚l(\cWzDaA34tfߋO0~}E̗<͠ycXW 0Bpd.QO8hC8LZH˾x~wПr5ݩ}iG)󳗺K\tu*'X0MReHlp_8,Iy>:?;g6_p]tY'?YKoR7\8(Ne*L;նq,: ~ ('4Y.M#K9S![-^wG \wlŎ4:'A8p :_H?^ 87( ?/,Z[¤&MCN# cXbMQ4iiOf6|PP(R9d<#JEԳ@ZW${%PvT<<%^n 䞾 P23]ߛa>%!Mquǹs4|dҷ ,JaE=Yk9%DCm0< *y԰7sPjbӏH`GYl3u=9&N|s &r]%[oeY<] =#,mh?%8Xc t$;Z H OmGql:s9h}{O<`)mqC:/(ٽNF\J?_W 0{L}{V]'O0&Jt_d= -ݻzˏIy?wضks|Ś##EWXf$˼< :!ǎI)$1~fm"Hy5.YmQy)cLS r'X_;e*/7ւ ޮ}U!{ry-,eЯͲct]遢Q Uk>|UF޹a~z)dkpwp;z6!e8q@")|0&Fq{._WCe `q0&ƪ76e=͎|rN_s硯8'I*{`ed|8AmуSh ߉Pv@E# $(u mؕ=ڭMյ/8L q䭉QVTxIZĝ\N%%>ٗ)09b8$Qm3魗twT?%=$Џwpy|mk%.Ɵf_PqQr OyM@H:Bh8Pw|"7$CyyP**ad#?#hogu(H *: aQ#dž 51J77R1Or;~8|i8sc=otJa}߻X!=Gr| 秚ev,onaɣ;\$=g-tF EVٱ*Oz'B)J5BF"?ߗ;l6lJgCWLRrNw_C*,\˫1jI )gh 8ʹn&Ve$߁UDgw`MdQRϸ؏k9A0s*R'Pk3G\\ jBs,kY:gg ;SaH8uƷ>ZdyQHߐ2&YXB¬C`8 |ccU'Y2չougq6שR Лv3SlmBCc) pQ0u1H[oAnӨI6_W¥ww0ⴂ_c,Nwd͢L_jܒq""qf'H '~q'+e zIst pwK7m-9{:6>17qT$M,A<:yZh$`Ls^{?v*_W_%8S| ?kOa6DS<, t\rswUJ,]Nw =dHYU_`z0x3LmUk"3,s>(ǯr`>?;e gGY8; ӲW߽ 3xTt.AUW`j}If|WFI#rh04<֮It7#|Yùl>$z`lAeO¡QM0Gpd2Qp*"/= \wOG}vaTd.{Ͽ DPE!JG[_ADb:, od[Վgڸa޴ycX& +]ecB/ء(*y~=W85G1|JpXBKy:Yplk=SE4vlr!-FWlIhwW/+)UY/;ޙQRĝ Lj |Y͖܅#`VG8Ƚc?ew]zqhX:q}2Uż-!5秶ےhNҳ69O\ʯ $c: 'Rg2E>$W9ۉLNr$U]'+ڞ@sa+I$ioU{ F$:ob#z̩&K#]yhu(FCRP]sF9P~> S^tdr%aR{#7"vi&nX=Y_OK0 c #ۘcP"~'`ljí ZJ"LFn{>%X8/dd6h ƩV-EH{ӣ*hW> ;uܞ+Q$}_5 K{-xG8m,p}Jl hV1"Ytk" {{ƥe@ttu<~9azrC|4zb ,N#Pd.wU|ʉo";EöM%&hC糠AHû_,Bo%'N 3j ^՝G=?qiX'LP:u. bC4%~# 6`c2dž0VqN=ޕ܂#׍;۠=W$62'vVtEn[狰#$/RRZ=E޺w$qvuW^)ė ݃쉁Nӷ|$)R>O*30ƙgUAY0m{܆Sw#/[Yl1P:.)0-wjJo/ʂʰbAXՠRqM27((:jv-8@0)x<)`F|w}w=7ܓpvd 04$X :ۇ3-K4xgKa8i׾l!{Ih> d}7~`lwI߀O@uip'Ǽzua(֫/2] &+0Tw3=~0vO"8Ri@0OY^/O\?t'{XכǾ)\V#:S94[8Bd؞|ܫЃO`γV#̸KLmh$Ӥ =$" 0#pDfNܓ2]dlS$ys 9qne@6fx}+Cfؿς ,(ayHUjԏ0A穐YG>m?-*]޾'Zm}x¡L|_U?]/][lNӫGVgnճtzF-t)`apaO~y$4ltdqI.RGRj'l4t^VeѢxl;\X{&u5yp7?EjTU7+;)X/1|4Iϩ3\> ph޾Zg*hni}&F'>e|`iʸ5wd/]+[`ԙh=I<|$WN;].'SA5sw PHO:-T~) snۀY] R нDk juOFoԨ0ˮ?MU%Kz#?fZzLԯto2Y-ڮf$nt?p:F{옪*Jqsxї^Lf+{.'XU*D&UinuVx_ 1ǞiVlzNz}s9X-cAq&Vy y+m zF[C>qN[0&͝Y^5>g.-ݐ;X}O-q_kq 4 g\ xƄ\%a¦ͣIJ|BsU .6?ZqhG9f6X8$FMZTt`55dJ,95k8$F,aՂJX^o;C0*i}] ^:{CCH9O0 +y䫸ݮk53yu>ʽ2ܟi@xo*qՑ+ԩoSU>]+@~DsIOMIH(^I0 \H3}Y&t[G0y[-tk*x9Hۛ`єda WÚ5_ #8zFwLhB&[tv>8}jE"P-4yq~Ǐ;_6_z'a0X!FXJ2zP&V[O>9;=4e4$ 94C~7^4LRI^KB ^3S CR@i1o_#qz"vPdߟDZmKU³L >i0]̮0ND|YSf=|PW`lzANȶH];!z؁Wn>53p}8MuM5((Yӧ`+%lK}`en4R{]X3zɇ`k !a]$=l:K@ij=)XhKp'fnCfidz`' Igr'q l>eLQL{$EJ b1-5?ޱKB_zC6$UC{B'XT<`o:}]\ >mK 7{pBr1c)D{Xqxn~߷zVLՓsopS(ޘb=ytLf|t] J3sW`L^-4A8.I Sxx`x4<[=3KݾH0RKX~JswIK\c}S^v7L*t2ۥe}{g?pW\,W f4jq~;aJǖw@S=_.t.W.foW&F#mqajΘ?8p߲2t1vhE5UU>A߄wui?=q1@P[4x`yW:Tҷnq^NW1qI:*qOpCߴY1TOHi6SpAhD=R6I|%0Je#c8jv`Mwv덏 F309#tm^5 U0EUn4m/؍;5bw0ɐ%|0Z :B¼VOp8[ݍ'QT ,?{|kr!{e)}MiU0žEv8vXokylT}#n==+sv|җ@Zǟ_$f%0wDs}auc ]RnMkxRu 3R{b6 ߰o-Gqa~_qV5鼤wga6fG@LEJ?=qakx?n>{o &-eܴ؜'DKUQ fa36&|WOAM !Qj6eb5\xue,o~G]꧇I@5~[SDt |_R2_%`ҍP0i~TpUKEH5!֔RJP&}ER$x߽TYĪ/]+?")Fa!ytVҜG5vpRxeX _[Vՙ%rFM0w=(Ԃr^SHyWٱƨ2^EmĈd:YM?' kZ'9/=#tUKsZl3nm{`MwI!έ>b\GM'`UK{ } T#eay\o AI:r;NZ##v$s{7yF.iN@_3_'L4yTkԤ@)8Cۮоfw aPzL6YyK C߽]rpm)Ϟsn;g˿#Cɫڒk7I¥[ЙYr 3Ĵ6"Aɇ${R=4{aWnP= Swv~&lc ǎ^@9;8 _J z0}o0LQ+l 2ϖ@w9~%a!~{QW^ e,w]7#%qRÕ'PD'i(SZ*6v# Y<i\Ԛfϸ_3k3"z9m#WK ^Ea|{햎;0|٧k&zKb0q"(;l]WZ'fl쵕yMI/g; F[oSb. :,xf5N9d]<~`t}^Jm/8F[~A+.3D﫜* z^o3U9Fj5T+l3 L!~FW1;# II Wc Q9G~ñf?Ec *o֌SBCgʠ=~cXj>|[dC] KB/i2] /7SvlNjM8z4@y^«]i>ij.=bo]nnwKpҨ.xk">a2w/jm{=U@Zx~6`\`9V0~k$9H4IgߕbV}q5M>IbK}j9 ڹ$eFEAW/5)~'UB!1j5<I#d|RlYSݽ4^C{yi2e˪KByz 0{Iw`\ڣu 񤊠tfa\{l;ILM::02+[K}s)1JvGP.-AӴyso`| ɱ%(qJg~TA%W]]СQSgt_C*EBwjB+{^2ᵻBLhxxU Jͷ~'([Ω3FeD4Js誯 g^K2qjUCsm G$A7{Aa-`Kgj5oXwX3_^{uo!YI%lٸSoqi-oN$(*{B>k׃۱%Z gǼalC/Tǎф+`F_t4Ο`7P3nڇ:=~t>O}U+(P]֥}[>EX@5gc_pBlV: W鯬Sa߬`Xy5 $Md}9낰Ց8!ͷ 6K3oyEefDlE,t~ 3-ǂN%ΰ9d$,6d{ޜ;,47O۔g<'O28` _;k3b:[;|/.jW.z')cB,pۖVӳ|V#:u2xO6f7[;I)cPP`Y! R:6Ԡøx̹Qf'p8 cr\k :GN[6XpKU~&f,cn0imߨeehcc]Ԝ]Ci{4}J7} WiL{gzM?o7U{Gj[E{i݃|W 8lo2Xakrm3Zp(TX 9EWRe !#[' =fUop{O=ӊe~Μy!@TiG&C[^!XMzSW &t$tB${Ɯ4r#`p+l>P5mP6ޯ3ޖacWwLbP~T5'P~ZNw;LSPhM+*w@d @ۨlt\=8^btiH >y"kR:pIf$:bÈ?{'ҷԡ21܁S  azD(lw[q[3/.>}5z:iֳ$?/矢:,}#d_9’'TqNIX-nwd6Ihq>v LH~k.oKW&tާkŝ?r7c?6\ɺ5Q+G=R/NgʠqFɔ`|ٖx,jXHf4 H7wI*8-fG* deH%n.=R ̓~cY蛨{{~M %n˼.ḿ3 8]!DR4vKޝNNِqM ,dϸЅe[]0gbOFTpVN g=U9!)80?K*s7j7:O!r:b?\džF#8XtA{g\YⲚ~B5/[р|ylUޱwu+fIH{?ny!M=T2T=+1#fćmkDG"uسIBp4\טl_Lֻ#"^uoJ>ݬxf&oqչLy~>t2UVb;ߜC~CONPSD9īx;&zOYq:?bpA29(&LdQiϙ8Xbq{G6&ɍ#se_v{k0aȽűz12OZc+m!G6ؽ%6o|"9?m>}BF, u3 /SeΟ&isDIh?aV7uφ(A]'knYZɮQlR, zFLL(/$(^yufSNnyCaߓwzzL-)k_#_={"Ainac28Ux@2νxgyNt<(T<ɿAC׼#u'p$5 Wg9B+׊#{1{}獮>d#H |8 wj tfٌgRcM9_W-$i68x ~%3PۏXE܄P)VvӠqןT烨qsy̼Oɬ7KE3A{U2}0=96 'f{Gm—~a@Q~==(VBm6DA[1OD)0ȓujK J'ZHzTcW2 xr )GVDh;q=ۭ=Yf :9_7`]m.3hSx3W:C]+}"0sʅY6{`k߬vYcYB))ȶ}~!X37TEѷ/„N.+E~'4{^{&T[NN2.ILz6ou?P\G4f>fKSILg}bI"9U-݇!Ӧ" $]ŕϵ%tp= =]E't57զ ^5?֤<4Bk^:Ґ 莰vCE>Hۢ5$@z.?yCԄn{HGO\õqgF;?hlw,)/ZN0>9iQ{Rs+;W C e} E#UC76<>y3q>Y%BF{ldnrx-e~'C{xB4}lԷ0:mƆ $睖R4ȾpG1 L#&-Iq:rn2čE c%rBطA3BrpȃصC͎8C(і-(,n5F׮{rlQ7dJ##m{(A_Sa^E[9;uD[Gf}-Sg=U !EƧD[uVs]T(B=ش;WwGxksspQ#ug8)-x#+tWwԛ,X\ݒtw=>s%?; =9.ࢹK}|n',C9~--ݥy&=e?Uβ ^LuǺ6Q=&[jgHɞV Xgo,Io1붟9=ꚧF>B㗗Pnx̣_INvO#N̗eVyS oO'㢚ydCehb ~x aTPb9r[|˫Cg0A=U3wpht^3rBZgKliEz,bnBc2?[ 0l6+bpb)xhHl|a0zڙ4v_[qOG6zr]ytsEdd7$E2[7|2n8"3Y1{Ա(9鏋1uy[`ݫ>Hnt` q4ݥ t[ %=ụPy{*(qLe< eN {Ղ#g5|uχ!f,X2- XѪ+&IڔW|, ٗW#B=Ҡ7[;`ש#wƄ fo^ gcӏCKH'n0&8axl~* ML69P[lϛ܆![/ <%8\Ձ *|#[CpQ<:G.W_ꎽogx/f2ԇ>)d'p5=~P9*vbu`cHeӗHp}iQg:,?ozIe'(8} fS*`d)q fFΆUx-caݧH84bOݶnO>[%sTKi]i^߯_ω-F @:>FJaEs]XghCQ|;3 F1~(=_A X?9B 1w%`t[kqM̸($Mا=gŠ햢j3 L hu`!EiK[1'ʖOü5kdrq3 BFΘ`g Dq,}yW YтbZ4lH 'nx{)>0Q}o"ޥ?8 ZX02y;gghdbM}H9Oc֔}"ck8q)# ?vKSPa=g=5 .0WUWζ^ZӀVi<=$I,u|dg4$y.1ċJ-֐ GDbǯ> ӑ?lc~ڥ[kѶo'=E*v7r_J][J[`[N+85gsqWOy`uusg?enlava/data/semdata.rda0000644000176200001440000003715013520655366014235 0ustar liggesusers7zXZi"6!XBx>)])TW"nRʟA,ʿBJƅ&@B"d"R(]krFSЕŐ+0hKDZIK\O qL~b5ڻ yU-v%Yʋ@mN_CHN1ipڽ$Ry/o*vo"TQOT0wf%L˗̿qo9ssH˒n L -1FݸC5.8(9ؼ$]PFl9VID _|d*wۦCƌes6~Ǻ#?J^uX1 yc7<)aq?O@y?)7^n(k <'l$}m7i?m0Pżq) G3oޑ>d Pр|3A"SƺYS1:FfI$_[bL b,`(O&0;$}í(FO`ap/Ĺ Ű]M}gm ߪy@ld) ݋hGΚRU_H瘩dNKS6sĥêG}Jt9݁e@?bW 4\=rW-7v.כtXc&Pn' Ӄ "M aK)ݼc]HɲFO zgycOtϿR6rC2~5ӒbvW6AX4y8څP2H*A9aZ%˴AR&}7 ; (a mɱ h߶_C]_ޱۯ.`1aٮw}F%cȸK9y>^|qXפ2j; sfJ %%v W Dd@Eu(4T΢d1g m]dw~T nLܗ΍726M#_2D`BHApϒ7y!>!aPNE#ܳ.'iUh:Ï}T.#3aQ x-f7Q: ?gN\F\qsk,i?%vLF!T1&Ԃ|jac:рƋ<_}#>Iz=(ՈY,[Y!C~S/A}ʽྷ.usbm'w=[q?|?>0{w>U`Rp CήS$y%Aa- 3Ӊ\nTT-py*eyYZ8i’=,"% S2돡쯸CH#ArwFjC2x Mw=ԭH f NK(p4VLI[jBҡAҥBYN<#{d0]Rey2Ӡ z{ |=Q{#+`CR_{k1."3'Nޘ.m e]eٻ1OTce0'f}3M+6ɾItۏd^fI=[;:6EgM]ṝgT,, #3 VI)qBHՍ6;˪̢:axRNOsU0n^LVpLcMŏ!jHb<Ar׋_ U Czcfyw WWLi]`aA%ڏkn^U!*Bl(DltT莜4?H9m\6 apX`Ш LXBnp~Zܵ,i-{ {wYݲ`ŁjRC}T l#M=;]C?)ASLaSEL¡̕LhH\3nuE-s%B񆞛!)3*Hmf Zoų1`C&e{(Gifd-`i,$-..`I-i65~3X^Q6U6Y )&..$oMoƜBT,  eR(ͺp__ tϊk.m˓9* ^H\D|dա#(fu\A}*\4\]_ڌDf &G 5q>XVvjiڶ;CjQBx'Q5k}A/tύ@Jr1S}\!#(M腙#_ ;l`p f}s70)lMx䣱7om2,Y\7)|  ۝a;j.'_q%U *s`V2ks6 ٳ2,7:\+q/_vvM J_@rkGgaNu!:ŝZ-WsN/m|')̄A{ߓ ,Z|ҩV!]ڛIs$^F^<#;%94ul)5S랫\0<<15GnNڛnأ8U=6~r>} dy2-/~Гɚډ~eJLhe3"gHr^5$(%]p2Oz,W=T•<7@E+E6j *mQo>ZhBPN)}]|rq.4HV%i& \jta8[^uz"|a˷;5.g+%,rY~xRWv[8 /fޡ9>Zj<$}`J>ᣢh]֪p lz"5Phx܂&`vvWoXp?l`h@>] ӛS@&6Bl|ĺRXhcCB2}mT렃H5ewawhypԪ׀a|">zn/3Nȴ^SVJz~D %>UUy=,Sf hWOdi*]0P񟻻KP8WLaʽ ≯LY:Z'Bh7MH%: y9 Jx& vL1 AY>y`Av /CaúS70l;)?@ ~(zd/HA)2 qvl,(2>߉NEZX 9#W~ֲM\sLRl; rYaU^XnDp A3KF>:aw L?ͯܛ^^pJkOHGYr`ARUJ)xkGml=2afKa%-M)M/ ^"]0yJ GJ^{$a'%2}Np)U98XE\9BTcT\h׫njuLsz(/k5KL}O+VϺ>t1(*R!m_4)j.НT¢Լ9lzAbxٷ2& U}qiP>vҙ@ !daLJ8ujBԷ1&Dɘ #HX HQ,Vޯ ]I=Km> JSQt|vs[W[4ψg a<+COj~9"~[>Lŧ3C`iaS9C3 L;"l&M,8珳 id ziZvh 8mV@5?ݲOm)z-WJ+]̌9pLh+K*ȗ[by S[+B]GITg0#ޱlTf|0!R=A)Akȵ ^dJo>ly)UsZ~Db}L*|Ue<,*J{Jpq{AdtE+h 2C-O@*ݒ@Tmx*\JL{,\'vF7j4ݹF-ƶ)/>#HQXM 89@K wRFC pI+||ֽ:Fqn'}JlD{2L]ºxuߎG}4QjZ t$P7MD`ѝS? @ɼ|zF{Ep^դbsI&S DF͋ 6D*OMUڄI{&3+aM ngm*3 H֑]=:d4.()!o3TX=wפsv~w&W ׂXMfvs8/)ް`~tkk!fo?η;~ZM*(z]E08,+Q^Opԕ pE$J2D* >oghC|7Ly)'b7v+nmZ@b@2,z<FElF""/Sѩ&xH!C<-bU_T3[hS^p1A:Dɣ [9n=Umi)[wfْ׻.T1.סb!!w C9%G[$Y;\Wj?Ys|w5H4Kp a+v>U*YL][˔"$\ +?f*.FԣvbzK5T #>oTrZ?!.ܯ"~Etn+YH55Bq,WzV\ yl*"Ȳnlx [#_oo5w,jǽȳ"YKXt yAx@d ԭ@H#t%A$1D&_hbO`+9l6%"w)k^,/P\۲܎eΚqɋ|貏, /S Az86kUE.q)yW):Dh& dm3gD"pT7Yb\`\ָt!BZnX5_LӷBVn6o~ 'vÖr0aX7րi~rWBM'ܗY k t2taB ="[7MTT*$I}azȩ~R3w2֩[U`}B1ıd-taZ=xu 7A[^n]PT:hoO{J*6"GOZYj pTFeX7AQw"1_z[whuс)? ֍5$}K| HJ'џ(ս,G4Ff[a lsˡhb2`_"xx^uR d]_1 M7ޢ*i‰\(r0T祁Lbѕy ky^zg"o&Z.2:gTXXs8TۯmRÙ8e|.S dO@ֺޗb_%#xlV@IX`VZ.e=F)g$%uҢ҆8P>CBb% dI=OǑX'\Ý6>d0+1AC )MNc9XIrSl hT\Aѱb#?3_˺w>Kr8OD ey[n,cu#IHPZ >:x YF7nֱ ]n z/T4 Sa􏛐ߦ6`vL= zfB>cUBO@2@Ē$?a: Yޭ*U`Q&L-ہhC =7UF%*ib~v*~6EpuKM>?C6=qz9GeD35n5j6n#סG3ƙ3&F[K+2Q`2QQ 푴ߩtًCcL,Ƹ%^wgBp:2*"8=P뾁ʖ湡P^2e7zmWI=jUd~PF^bb;3' E8ͥgv(g=J0 ludr5`i[g4!4dbT]Oc̸!lg9'[ )_N6eYC5z+Vᨨtd`׵9D]ºl)jc9.vPp>D#%K*&_=ǙRaƯ<^/:L|i0]gr"zW} T0.ZuąʽA?l%LKs E='`A?7/m|a\&S[1*ek1І_Vim?<5[g#ezkx'}E^*?]䅲盝yPv^f0`uV'9Qwo`=diR l>:Y~?_o9c]>(G2 P5ɆYea_>9|@$f.ھT dObtFݸbitF'Nvo.c?m&~^߂W(tKL—x]Z-m Dӳq!'/25?7SMS*1 4>}w-d3hђΊUT 3`Z4X@LLWO%/,Qp D0=O4av~r xFoOُ !ҟTkYZ%BIJG}㢤97wbc_+j5O^'9 _b %h-& %4z3Q?7 X$!i*W跭Ff@9DAkSuN)#v\k$ǘֵٶ j:]]mBXK$LƍbɬĎ.b-'v(dJY!8 ǣQh)*鱻p0ʗ.dk!x<8fU܎mށȢ!, M Rz!:s^3n% Yk4Iс?62̯cYlZX&V'ZGw 0}xp`{2 B",4䀹>V la^<?j| ܷ"HP'𮿨:Ӥ5Y3 ~<&1mEFGGSoeFGڢkWo^5>F׻z]쓊(T8P0UA51}=e5_%jB -IO^/}20G:6;vx?J zy)^|5V-d ,/U(ܹ;va%pw&]zk5`(V:,75".=Ae%1'yB[琼CIfhҟAm=ÝAOמolqKF/,Z!@ 9B'7e T?:qLbA"1r_ ˜#{5M RżĔ. LFvOuЀcpWE-u j͓Xu/`uIpc_D چoe5窢 YqS'`$(Wg cO4>ؐ%M'ꅡQ BMd&L52xa ų"[k ՒZѿ3W[j~![w1{t&t5:CGcKVhoO=Ϯ@]` ~ tJh*+:^ؒOl!KtȦIG3Hܕ)>9/k CRG(GH-(j+yȗU2(ϥa/N|2ISTfria'BĊ7# L5c\M=~żQ(QW?ohz\_Z|b4d8Nz_d;Mq#|F0˵N #Q ZƓu=bJS;isϹ=*gYl5+b LU@ Ti̩c? XnMMaNTfS:_"{2C@8)S8^gRxs۲T "E.骵%_xT5#&M'jz Ŷkڇ9$'Zf.7XޑvDt:m B ۦg ?] >گ/0n{<@ ҐӠrZ_jhC5o\XvvMիt,rP՝> +cHLK-DT_`CYqR- VQG01AY$x+E-Q0i7]ZZwc$*.A-2Bȑ٪\ ΝЍpCi:I-QFį.qLӉ5^U7aɱ_,DAd.1h4֊X"ސzhT>ST%p7inFUrʺ/JW0T-l=#I; vz41^|md + ! 3P#e?Ac_ tk%x;1 ?ݵS4|n%8/YLcM13b_u(}FnK\JG,5۰*TVw}E~ uLDSkcm-t`aNz;;gVV8vmS"xngK{Z DŽޚ4|kӁQq8oш"me@q8&,6aOCu#b|S[07HuŃ9BT @NxՒ[$*JͦXu>y]xM6+7ƭ_"IIG;ޜ?"}=eLvDNu69΢,$y&pj]{q[H] xʆv# Y:6E9_!g[Z5UppgWzـp#sש6 0kYհ8\`c3\ռj@dޝB´P] S?G Wy'QT?e{+N#NϠf| ׀HĮlܙ::Y'fk7^hd-zf;&uE¸;Vrqb:8o!_UccǞBWѧŽ9tPwaznk"P8'_ԳR|w#ĢZe~/--2acj|vD.8܄}L3">w{!`Zx'j5x8&ڢM_Vta厍-"K$ a#'$a9Y1ZL9v\%TK~e=Ft1W|2(vV˧6r\5+6[id[0S#0;VAcEȾa97V=V gkt8uɻ&GfNsSplJ=3{ware٣*E^e ZE\tIH=ȸwKo> ㌤/ȥ]>BeY/R@[!Vµ間m.]ЖBqFg|9ۍ.ɁFtas#Ocq 7O @Ij5Bupt" =A0P=NjaPQ&aoYxĸjGv!?(O ZoXU0N5e^T8#i.ߠ7ߺ2E`)gYI5gF@5e\O/j٥īw(bč0(x\Yt)m{T>aAΆz`q@fXi`@bpń/OO:Oz!{TRY4㯿]Z9'zp= nw,|=6hhBQҍlf_#T6 !ߟ{ӟs_j\g b t+^nRuxw󆅣fìOG]K1hiPqmj.Va%6}\*Y~@B bY"!OF_Vs}}N۠dEU@eFI23.neq?r#S%~33tWU?|6 E>0 YZlava/data/hubble2.rda0000644000176200001440000000113513520655365014133 0ustar liggesusers]So@>m  D <#N:J C7jU)IxvAHHYҕt 6Cw:=--!. {pG\2Bc+ż7[ 8 OƁA*p8LSI@= %lZ?B?n]֣wfxPzw3>}68hhxۚX(lʩM߰kQ3 X,GFݦLfY =Y[{༑LiCְi-aAÖ0Js7Q;nZcyBH/@G$ 帯Rbv``_+.{ȿ,I s?K]b~Ł,+?+H^}M #ڎKY9ߗ5)iACS{=̢U-Y&h,,?]nD>^Oo3JgϵI3|vۋ6ZqZ7t9Q/.wPxY~ZWh9e9ZVuh95h9Zjn/05370^h3=a^KMjzP ?λglava/data/bmidata.rda0000644000176200001440000011567413520655365014227 0ustar liggesusersy4?$Q2+IBB9!E"D*E%II2E>yyySz>纯k؏}yE=-z[((i6~Rnb9ƕ-6W(wl넰[" i/̪H8Bv<G.ҩm0n&5VW {ʏR} Si+$<>#v4//`}ПˢavH "tglЯ"#v4ewI7Kz,}mD+&G\'t8It/=H֑Mzn]~J}…yҞFá7wY7 "B ?r簥f3z^~%֓*r /OI"v]lq Nۧh5Ճ;,/RÐC8:XeKh pO+UO*O<5n _.JԡTlI7p&Z1x33DONWRj> #XihrpG;Ra]/S=VRxfamBI5T8@Rӗ|V>q'+*a=Bڍnpe mEP(@žG7c㡠c_O4 @6Xh߿dq$9c47R\GaMǛqFo7iw<ӛ'I,|ͳXta;Nn}b>y'~q<āWC2EZL-6ɒ>(r϶󯍟lkY5,4w?>mOqkxxWԫboER۽TQ(Yi)^Gu}}Hϸx{;ybϪ3Q@V^v>hFT,Y<6cS/4>8ۨ9m䯴n~0Btjg!,J놥P˥R`JY77xOd"vzŝ5]Eo= X[=cn K;g)$j1P|Sv/vBU/(^W:$hn݆sm_`rvuMW\_TƗDL*X 1PDβ)53Eaǵc8S>*qJ&4lŠ]c՝_[HM%:]z ϫiDa7 'л|zRZ];$%Ff/('~q7;196t'l0v'勵*8h^δ#Zdt2݆&gB&Ъ+d۱sJy(6+2애e lV!CٿzK|M'Emל0:;KaӨuVLp|]8Z,sȎه^Г?2`b77VoKK3.sN{ eߗ 'уN8%S)N? G_/">V'RX”,:|!Bj4lNAӊ^]*8]]z%-^)&Y}DBt惑ܿ(0؁`,/Gh7M18?nm MO̸5G2ݹ.Tw?/ 5Gc7xG]+0j\.`-ô>4snb"@ U:o+lޙu#@.W:;sp?s,ptC?Xjއ̭)=dܤ\n)Ӗi`:1b_5i}6;G*)#ql{K4{vtFT[)69͟aa@-cp_(LH\ <ӧ&+oi/a~*S0x*},9 ίxpGV0NJ J60Ec.8,Xk; K~<#+?X@9ٓe5;9jwo L-&POE+ѧG^@՟0|qРpKs/v5#8 tX)bL態!ya8m+W繼FLxa:SsW~7Nn򣀲})\MynNgZ|Q!^sZbAJحJy!OsSn$G*hM| rv0P~kGI}OxO5A8͘>R-x_sn+%oW6bB/w]&: C^Q;eNfBx3fXfK޷t fϫCNBe-rIՃ_){b\pV48QCin:Ţ8^~zRkvQH v5=T7A2]vSVxiMK'[o]9ocǃBs"\TyX 'nz;_vcNnV {m&kD=tPT儦$#R=S-9NUaoz"y''0_lY >wCt4 NxƯۡ2s6pN?(8/%}bfpܶf 3+Dra5,XBozk&HGHw͈DR{N˿{)ESF)Dy_'씝jp Ā45ƚR$zB٧b2ը L+ꪐ`dބV6~4KMuk/?b uh!"]=lSn- 8Ri[sceԼ=;p%s:"C }q.Cz@z&X'PCFN'̗xH-.|Pj<ZjL8dki'n~VA\#4UʄkwN0Ү0d,- ZZW7woga&ҼTz >t-iGq10_t`{96#yEtnLC-`6MvPC9쓽MH?F)L@Ws/f6rhk`Ɋ#vH__7]JUSgrm89gYC"P2p TaĦC/B]Qo.k?\ypE=m`M66a.Y`lmj _0bQ,CR}]hPv]&՜ Z~VX m6۴$dy"f"yN {dKOĢj/ Sw-h2מ;foUVcs>ǿqx}|1_=OϩL siO'7|!|L#Pj ulsݼ<7_SI ufGc'8/>Mn[N]7'C'KXlo6c3ߺ߼͸,M~K@߸22 ׺.և:'Λ?_O!-ˮdpw3a~aؗ+x7t߅7n׏y-\د ՝Uoov?q/6M6>/qM<{_?|?O6eYHWo;y}n?8~>/}ǚ ښ[ozp_/~7hS7lڻYo3.jnKK3uҿWls󺙗O?7y??'^cqg3O6}r湛vl߸lkӯ#'*hl&/xf=>,o3M|mSNMOɧ?MoϿٗ7&op?9lf|M~Gw37_7oޜ_{7E[IS~l5f^6Ѧ~&6ݼn6y=[ˏ.M9gSwkna7t/'_7ʦ?L0~ճq;73G7OGߣ 7ʊ0Pi~2" z+N.H1ߟrqM=I|>*Kf_ b4kAXK] >9H ,9e?:-ht8yxdW/=-%`(sJ ,O%m.ȱdh(d8s`/ gr:pہ ?Dq3FBV'00Kf~HYͽ0q^'&f~`@܂O#`0o-{L`&Xw;MTocT :v:"9gӲ_+@Xa Il͛4g .3ۣZ4 _ey詰h (0Y0kM 6Fmq^9Ng"PoZrg;vz^,t69Ce'kJb$?Kh"܋SQlԂp0{yR:`g'y2ľ&zq vcUǶR[Tw ]U?99 5k=H9 !{Mn ^F͞ g07hiOúC?#w_T۠lƇ"('=`a*#4s?OK;8b.XFyYat!S^^4*7{5~ c+휺!ux1$?O'OEg&bNqBS=];m"=k߮YQSF(+RGÌ +#l~mx fkaJnǷr)TE5i ;}\n㈾; $-n.YՆ6;L`̡P`8gZa% d++ѪE|ݗ"o 5_OQ0B˷,e s9W>+T d[|K}`ܦl!Y̝H[M4[2"ꓵg}Cguf >ԛ^[(610Aoqgma~4vݿ'6cS.S *N쾲_ 5#Éd_4ҩn]Bȯ[aqȳY,w|7Ƿ}@*IoaXN6]jLLgKò8:~`U]1IÚ^$OnV ,Hw :=ɲC䘪Ҥ ]}c0 fKXw 8u0~6 rczxV}pe&:v6,;!@ʏk#a䂷\@ $\Pe[;&r |zoT,c郁1?MsIu ܬ1qjr`0{Z% gr}X g-RwV\t0IofVtx= aNq=ϵHRT97w3CEPl|wuFGJƛ-0T3Q$~8g+%3 CCwvh! !eRb?5_`*vx'}?eG_&lsc ?JN28CbHC,^-T"P(J0aqT!j".!Cd}/@okLʦ =OHqS")7ƓGG`<ۉՉԞ **WK0Y fv5P{O7c*趷b嘅gJd,us˻i 5znmi$Jb_e%;4{$ LB3X_oR ߘL2`1l@EnWDŽ@GfِTiiy:grG"GH=2r_Rm!RJhd|G /dƓVcĕBmTÆor^gI$?"6~|ۿz-6(T CcuK+,$R%a𤳞 ;COSl.\j{ԋpN76pMD W4{A͘<0)HC/.AooCR[>Ng`$w;4 Ml8F_:f;瀹1]7J@Bh~ g{e\uౝKrOdW&V goƴ YqϒPoP, L<֞[S8~Urۻө{G6 yG}}iX+Rq[C(n/lV FppU_1|[Ɇ,0n,\+˙l0S=+Ҽ K/BWF3J9feq6LIXgVg_.>!PR:(J]2e eLʵ ER-w*Z7k^#]XBβBS;g{7' etm{'Y:};4^.Є%(QeuzIղNT 0E%[J$17aŋ<6T0QRbPv._ٱ;1Kzqa),idMW=)a(1H 6ܰ$ [^jqL/rOWXŹ* $3qwRc0Ϝtwб݃ŹAMǼ$vVb4B2^[Pyڹ 1)IrepxD4Jv9, !-]U옟SU*)uI]}\'? ܣ٘RCh2| \#َUBSkHYZG=(qW),i8 oAuܷBA'@ SY?g' {/l0u'v* 1q;`A?ff(ۈ8ttgC.Y(Q*{L 6)[DД;9O Iy;W 0Yk7.|RlX"T&F1^71腇AggӏdsC?[K]jQ$?ekO? BDͱ*st]wi*)ƹO8cCYz@j Psh !3[Hv0=u Zhu>)œ^/e0P?x-'yvI8| iC uAԾhP:.P|K(wU&O0"rEl{|K֟i?-c2qeK)d ݯO0\@/9_K=D fEo9ZYrJd2B?YakE+.۬3UBBsZY%v/=ӓB(a)bb_$x*To8Cz)88oҙ# #;:e4΁0Hai\=Ygʉ<#Rp堀9 u95cXYUqR.I|D- /ȋ嬁BPt1I#:q.ZVqTX~njT.`$ _]گleIXUeqvTN%@J#eK2PSufDeJ ux2m.H^c'|@-_ݿt>@~lnֈ M@b߾O"Pٟ?u #`(S&.ax1O" QUHKD(1Ba|\>a gPw.avP=" h<=f/Y9Ab$xAzˍ%Ps:KM"`Y>^z ߟ76 Bɜ`0CV:Ӝe&L0g=# 9ƪ6Y{t <;-0W_C4&NiN iuB,֝&(K;F$S0 f[a} "}n'=}tj]J9cShOF*lNA/VWa)ȷbl\[Zeǫ2%4K8yB2]: zh~O)U0UQytK~*?SMnq{ as<!F&f}q0dI2Bgn@ϗ&PdlƆ VAO]y41R~Ŭ&Wvjn7*+IkzNr#5vXMzN~q!%9+Kzkt{r=y$Ττ`%2XVB;/ S?q.@82+*|W^4`C9۫BM&tLXLL7>U s" P-K*p)t<=?-m=E7*}*f<,/wwk=GOv@{N\/ u{ NYwB2}^`! h%儕T>,ŸO<leL@  M~]`ti@NW`YI4:c7\^I_|}*pSഺ+8[S <ԋ ؀u,|3O.HjJ\{R*,b۠j-%\W DҼiqܽJ2r OcnY˽"ױ3c]Xe%-}V:f1;[z@tWHX=k:[ ƒiw'/:x-~ EzSH {(/(-e'1`B2X>,i86E&o70Y|`Xd~h5oDtJMKLs<N{.B+[v|Y; z۩BtkCfƴ)j\1fyұJ*~ eV,rE.m9?[Ѯ.; zK#9Փ.nNB|Ӛs#&8ˏ<sбF,u7d!_}0=3i93߅`ڙ1F_v .=A odlvv, ;I`) - qhkbʸLa䱐N-ԐR:HI$/^ _Kʂ~~6@䰫SսNϷa>Z$CK*п5&တtAb0݋޶޼JLXS.h3Nlmr% F^j7U>sA8Ixt"vQ5k@p:t3X܋+2wDG g!, ŴNR[?׽!6z\8ht!WY~b3.H8~0 ӿz^w[ /.P@rgU•,W06_|YvE]f`@ p_yk[aɲhQ? D G" %רڿ^\vAoӡfr;{g?{Om_;ڝ_Pyg%Azz0B'CW2tG `GYÜg5.L&f9,DR鿇FA0 {w yi.)&Q?+gەQSf|[c4/6L'ݾai)K~ls&q!v $N^d+ۑFqF]B)εi<B,6ˏ Y\;)Z'vo9J# $w;8 ep "{;RgnCxdNVxx_'TI$T6.NᰱO8\zX C巶'w>V2IO#2Âɵե8n2',;}Loani68'a}@>:Q"6?Vm@*kam?:o&h2v*|$e75­$LCwѽ cxS +VU(BWv~TD nSH}B_!ŒK<58~|'׆P hY8p1rOS=MH^#Z*Td‚[i4Qj$`GlfseTqzLoX{`%L9H3rJt8&kd`7[?#!Bz1W? q4Uϱ='\Kv8lOVny^*^p1괷<t!YaN/6M)?@{[=iC dxwx2$dR5/NO>Y PwS:Xan+r#vf?_wVđEb7 d)|!\O']f-ᤤ=gU A96V< >ߟljKɽ8sin 7K?%ȝM-' \8F]Cy?ZPs5-w_:@G ɥwU_@;޼J/m-!в'Oy'yє^]F3-j #yf6Ec0;dvٞ6W{2NE\'_Pj5)楨! oh;ZNйh 7NXvb Wcڽ)]epR~Ov : _Wo=36]OR7&l+[T8x&yc]BoE v<;VCq 8jewJs*44rZ"J 0S& _2e߃ٯ)r[ VN\V=<@v'=V?ƚZaBulbtW] pުfC01b6i+}n˝8Air™Hqy"o+uz9Tpq)&;{DI~0L^D?:P폫2ozsh ݝE^{NZߊ-#`rrW'qֈޱ7 ؘze0<nX'5~N߆?_~IJ"G0ywʒEz,IaL.iS*[q\u£[G/LK$D{y`UvQ{e M~ qe'v4.d*4<` mkXeB] zlwқqNppM!uֱ-H>wg8ֳܩ/|ʽŏY/!y%4-5.wv/cɼp1Xn} [esw{x@TP'a]7}ȼ`YT4k'zNJ bd,QaߓL{r@euHzhˑl;a69]pHFbpg!wpraO@*,~#q{ Ztf0xq|`巣݊}4lǾh'ah|{b0E@H)V.Ğ㺃IGءoBV|/ ^Q5+hܛqMpY6'-6@T2Ysdzauq&it.\2ҖW*}Ongc_USV\O|u+gǏXi285W;mBwI?t֨܊S.eŨ^Bbi|w%;_ƥ{ '޽{WEXi!.E^riʼn+`ТV4ȒscؾZ KCAw/}WW -ׄ'=q~4?fIhaA񳸁BP0%JQY_ (yѢ&_̲O X!v5ƍ#T[ ɫ3AVΊGSPI8gk1GAvU⻒hb+zU][yS0[V*F.m>`])rE,œPPNv[p%RJ…mٙ=ø%)"=rS+< Qw١-^Aɢ q{fE3}2ѰT}*^г9iO6K><#%8q% HN;:c"&txsUoݱCC'„Nm,'g=r 4|rN\mCw,nUb_wc{D_Q)t-Nz60Ľ^MNq>ցrX茦:׏#Am`5@Z ın9O\HQz@>3ICSoM~lcan'ag}ɘWDŽ}Ua Ïn諯 ;05kF $c̛rє#h5zv~<3 Fv}wk'՞ǁg+׵^cC~w ]UbI_d!χaHQa*9Leޝ" s~ڈXȲMIu֫đE:3o`_zmhsA9zT{a%h^H /aՄ3s_Y<7"QUn?1Oq7,kIPY#fta8$R0l)<7x׏g8cC.u2^no+i)+"G\!(yah0 ?~Ez^Zs?+Hg9ZDK/jap p1Lsyz!KZ&XДrƯK6곏L4u&K'.c $y,Cly-2JY\rBg¶*.$~áU a53^-ɋd>wz0٭;.lР35`=B܆~eآBhޣ%Xc;6n]m7bcmQS6O|xydaܭ8v'~92,x&'pґݯa7‚5v]|"vһhf/ m U{ `/dN4X?]f@O0SJ-9~Icoq'ߍSH |'w[{͎c%jexf+y b~ӅŚM2!}^|ĺ]D刢lm}<[`;%*s CuVZka-̟U;Dž?Ԫ]1;P;Cl?RM1̞Nl}'?N@ݐS]z8-a˞ Nj/P$b %th˜ϊ6Li οjȽj+xijHO|Y^e~x ;m2c&-9r%oGd*Hf:+{VYb9 {qFcv]Ī9[>rXK]/+=5C.I]u I3sfBW/{ dlt7niL ='&p1/pr6IOٕ͆kM X2({2A>;UGV3 x;m=0Emp8~Lޜ~|MƮ06dO_L862ӯ i mf|6ƉCNJ2F| a4} 3V425xDJ %>b/<7oLVӳec>`h3 \Hi"ꋸor6̏Ja]׏qX#c<v6G]_يU!ux%Obi`P=! ϥ|׌K#5'Ƙī9C'_XbGR}UyhTu'զO-f` ) DZvf%Tdx:tABvPl̷LGHCwa,1:Ԇ-2R:$n>{1^F12%@Ϭ+gUf>G}f:j~3n]wTg "L[5^:_YlTjy^peK&z}$;n}2+Y!vK3cKV_G 66kdЂ-l ٍpiEZgih3n)bρL8qQ''e3]F^ʓU%cm|͸~yo"@q.mr@OBc:XsS`"񄜽0+4q oOk0ccȾ/1lƙң|`6Z8=f%i]5:,f@uu߿S38_'7SA|C'dRBqXu?v8r&s=GiS 5jd|],VԧɊ`X]s%UgmyLcf>[4\E2{(vUgaS=ЂsvP; G0S)N=&y*aXfZK.*ø+!..N$@8_QJ)ѱ[\[a!&`cfX[| {Z"+M9|5Pb/!4~Ew蝎C.`K ՟ZjB0ft)kWa+gE]TP¸LkS`tV)観,ޢ 3[&xC'G'CF dL4cZ1BnnItp/sM؂ ̣`gN$ [u|(\ #" 3;Ft(Nq:t$Kjي=߿EW).}w}I|>E Zl"P?ey6l=_#9ԏKN AIkq)$hOZi+p}/7̶>tw‰I˒Ձ$PrGz*|u^!-U]>n? (nsbC'6ipޚ>3PzZ"FWQlkXϿpeC銌 h4*\5}-$),D-5)Ab(SSļkT7`jDŽ|w rnz^6Ӆn9ޕHIzj vDP36fږΎTү3Ұ_'+xTiLJm Akd7pœ ̯d4Cm601L*8EکK84$kD;s ~iyaBpAiɢT|bT"?D^+*"Fuēd4~}={l{Õ;뽆/Wj pBzdo" Kݴ~W jB- Q$\Idcy@O¸wC=gāFТM4)& [jv0{+M 7^1م)|m1G0ʯ@ek> o  aY+g0J-4T%Kpȓ'lї+ՙz2L7 C玑 wBM &lqc}.PEPܒs%L|^$~ԖCckf 0ryD4?Meřq`R +9fYIﲨ?AJmcWE?\v+i:X5߳Hh} -Y%75H'G)M73kqqcy-^uݶ?\1>rϹƄSE!y*cr+|ZfeB"y`|Ph1 I/+NMl❋'?${A{K _OkU.W|%8/J'.>ʢ׮d)(<uAwCHfc.$C1@骰-B,XuWNSx.)UGDr"=SY`b_A 9;0̟vH).I[fXt yےtn;]l0pk}pi fڕ^Xm+O2А{,ɳO߲3s ޡ8wo5X5ߞ7cGy)i:KOHMMEryj%3;絵(H}6u;~@gQ&˾\eRYCsG`vn/}IMe%g`GXh)=ԃcn\e#Cykqa='CggTmB-z=WM>]l#.۝@5uVmf^W2Aύ87/曊7'+`x&9hZpiJg~v ־RoiZ ,/u 0eD GQYBG-SZ S_+ wؽlq_!'t]Ŏ/5MQu([q<.Ddv6EBqtЅSDz1-WOZ@YtH=7M? rO;sPkkB"j;Zgg`*+p<`CWIxy|ڛ=U?ix)'YvKULL7tzк3$;)j8Sm jw0_[A)*v(|4+H~˔d'<(ђjWqX +jp8md$,|Qo; ﺷgaVNnOimI|q/>[IПc'nu5ϬUcs~fF>"Pًyo20 9W \ ٗ9u(}[œp{>BM\X':=bjYT9D>]@ W~KH~V m8xGUfFEќD_XC=VrHy[ ,CN_!/ aM`8EPdV|xmSX>@afCF{d^َ~>'d`ZMW0iÏEg_ߛ&^;iĨ(Ըu`#/x:S6n6>{zCj" Jf{ih^yo(t3]`+F̱:ч5qWߕӺ6$T0vud!LJ=Ǟ|l=E _OUepϋֿi]㙨`f^B^ag@C~D<;BH'6 WǡyLtu:nK]o'cdT"qԃ]y4\!vB.竝[胢p3"s}&avjFk6C욕~*IĆ!3S,@6=liCwyWqro>&P-! 稛 p ux"Loo{IyǢ n[8zf¤S}|0:Ҟ?tftD aHuh_ONo#lЁ)a`rutCXH> syn.#RW5*/a # 齰0tS xDZ]l6^\"g?cq{ sb|O3d*);Z)uܽ4C)]հ@I864 \g8mao~Γ\4[`ql>+X^ΨϱIg<XښSMnֶൊ¢ 7H>~[j]!Ѹgjxs)OSЕ|bݷ0dwɷ)@KttFsW)9Xd%J IbG\($Ѱ%I_H<̓hzY|cnSᢊ^7@>;lׄU뒀,U N5뀕R4WcR3Si7k`3~ B}bq^M4, r6MJȼ"ݶmF/~dQы`{ kӆss>7ns,vi 4m: kNTUB3c+S]xw*S,ߺ^e,Vls46:$/ J@t86C 8ZF*/,Rp 3\vr+S*;aSv˽$⼓tFLНt-69&ϠG4n +댜H#gg< utM!H{>\Uøc{`^Uq-7#*E=αf#9:l 7yY~+C\U`qQ>=R{K{]D ai3(bߠtr7Q8gWOY70 sӻs`칄ISs'9W5H)dٹCRtwa`D=g8qѪ/G]_hC瀖h_RT |F60~2nYM뚳t!x^&f$HP/],= )8SL›r&852RlG3ShNbUH&.6YAQ`JLwA` |O#dNA QK׉_ddz4?雃E[h,&[^5P}%1 & oYew|Cpmy )~Ow@Q]PG;qvP]زodfÂ[FPhȖ 58ᑾ"tܾ$%3;7򑨸33D(0篕č֓dZGb:Аuw6hOzk 7K ј i{ywq]; K>d7_WrWI\@eq%0V&nopȹGh 6Q^8΍(E~4'_E޾Oͷi(S_|6~}!ɣckuq\<~ \+ j0Zղ6*lu,%K;op!^Q̰&*ڔgס;#$ 1e`ۋ /aA1'b!>fHXden/* D%$~Zcy]>r; Ump KBZ~-?{mi~Hχ6.b[Z-ʅZy%xpi9wC&sۈ uEBi.qn'T$ZVtp̸.9n`2Ŧ'i7Y BkW Rio~@Cj[;9hxDC݊oC+G_PS@x ;LdvpW9Xze3/_#|R!M#_.A;Gt~/c%جiuCK^dMK80L<,3Y`KA^1'mIp7SL֭䪻X {B# qD!-@%.4 2'>mr8bZ{Lr?R)]:scIv%ìK)KL8u h<'v8A](Ft9yeykyar5:I,)-$Z6Ghq7QЯcgOxu.0FX} u CM\fѥa6y=.ZK\H]⦏o(atSO~[u[q%9p2ꎽdGASUʾq$O F-4H.Gy7"Y0 T(# 矞,N3w+$BB񌦊FӿvAzԪ4.H}&A5քNOxX9k 3Ϋm;{x~*: NI$og{ iCW`QLJsOW@br=t/5g/6> W(3ɢ G%$| W2l|J+3B C d6{Ijv-y 4ZzhヌēnWlyv61UL HߦqѕfwOz#NȘZbkJ}З4vt6f16-]s4,-'V=mw{LTy4sYقQ\vj"G\SAW 1E& Ai>q?`M'IboF=Ɛ ҏNC&QyXsJdo?쎀ŕ꾥k+_W~S݄Z޷Ru]Q|kS&OLPKb8et6Ld n}{scyػ-Guvn]z(!o+<ga )Gs;\vn"fiCOb-avYжYNIo {Ԅ`Ud\jvׄ:Bd%C}0ƯyيS-o1;FuG ֚ ۻpeChlU>ڎS62t( =]H\<{'w%O+_㳜{Sv1A/߰ޟX3̙w=[ƨc7u7H@ i'wQwo`ǩ; V+d༁nvl0y7VĽ C^m5caխQh91RxH)~弤g]ŞɵL+ޡq87&MZ\ROkLιGA #$UG+|цW~y¢o#spgm;z.)H`X #_4;1*%1\OrAset(@Ar,^jp5]Ya@K6EUs;aB ^eZ;c_<'xa潽2X;|$|i 1< -pQCH7)xN_Gk)!ob{gMP=Z$+y'B'#m~Xnow"LPt҅$NJU5&kc;N[Q*aɬ;&Ii]  U_cl)cDra.Жw$k8gà\2.G Xoͫd:[ Kbʃ=Э2 &YyU ?͏sBNSmϰ4{XQK  oƐs46gL%Wu89,|8W]Еsqt?oZ%8_-G;x{}vZ$b ۡcwO4ֽ-Y`K~>{KX]ܕO,a ge k\.Bsv xQNSN TC;V2  ~-ڭK Wk$oUZ9θ\f /dͰBtܰ\=?.e㯿m(Yg?i2];P6d5Qb2h/i'8ݻ:l~V'X|Z O_g Ķ*!rviZ}Ni?kח70XVf$PCV3kɽ$v qz; 2lnuʮ};&z_;;-nhbw0wlpJbmP|zz!]#x;"YEjCiVԝ:a;FĆQUCM*%u\O1 g>4a4b`ˬTnm;a'z"d3ZūRבŔH:y-iW#SÙU~W8^)+ufVU,$SSE"UxNcwJa͖=QוBdSL=޳o\ }50zSWK݃kD|'   Gfʂ@;9+һz. +|F5~o<e܄mqc|qBzGƶ1!Nʠw)nDYSгX=7ސba#x~킭F8x/`tZzO|ه.ʪJ%R?\jHluˆΓ!N|lL(k!vjW@f} ׵ '>b*׊'`VsdJAHc1j,+U^4I+0^Krhu?)QX$Q~c\8ZVZkipP\v,= @\m pdG貼!not.Z#'a!-}IXǿkKya[ 6hβ{Qp7WUB|9?^},yog0EFé8Er9&_ D6Ɩ#FwJlS [Ise<`znŻ0sK^B5˼Oƕ+P܁#ki lWYH n OĖ;'m,fo;]ꅩѱ7ⰪC~SO../ Q4aȶ b_|S@ީ##,Zተv;qDZ}Iwj*]\vu Xi͈~c:@pl+\?F40C,9 vŒmD$2+}4ƱR 09] c:t03d#LX*[Oy_[TDž'pefZvF| 2&P&<}ź'[CO~_oD&aD̤% ^0<95"W, P8|,{A2\u86d R()38FzRn"*n3k=]Kb&g}y^Eʕ(εÕǰmo cgSm&?IF=@˜MGgIcQ5s,hnȳ'R"Hu; Y XH{o ,{蛞3mS{F[&`q֊8,+ Ю߃zb쒨e*4nOcZOYMP!9|MjVjq*fʹK1yQ+ <\OooIw;u |- ۖn N4UbԁEWrֶEo4܂>(!Ci;ٍcBM|Pm?0Ŷk)m!B+PI"h /E#oɰ3+KWr_O~rU32A|D-aH?R+#xdH yfRatƃGU:;eM)Y7q5{jj. WMO[BX0$>F_]sy);"?)HcQ%KzFCx?ctA8 $`[ZZm<1%؉ίf¡-y>l$}}d(LVi a*>`f! 뵃faFFi3h1iXؑL.w:z Cnaofh4Z\w #۴鼠OԊV% Wv>;}B U->G `F/Sfa¨ gR7]]=L0M%-VZ^PALMQЁ Ntg,'!qMfXrԻB;D&ECfj +'*llj9Qwtk(mU0#hղ2'+&*Sf\0}k'ΦI`&ʹ=4u9,\KXP`g-C]Mkv`Q d}GTPT< CZM҇P{/˿ޮJ;RכG ,c/zeo[ pzHQ5B3ȠRj"{CH9Bɻ~Z03G}L`MjVr^Gq%hboޱFr^d 2#՟T?P| oi@^ԻQGmE{n;IR=G~c=+>"Yt<&*ɹ3P&}?2aHi/Ue}ލ:{_ZK;?ΝR;=~`مoa4]>^F1.HsC%4 [0T̤}Բ6Ez־[0NHA(uhH; TpN\fA@8U %*4AO^9eQ܄Ox!,V;%{XLw~f_/-vghpX[&^qv/8"4nv h(Ԭw+Ea4(.ZnR!rmhhXӋ#;@52el |@#(PT;Öri 0uPM2m"4MD R{o)!Ha,U:8av_hl{}^I* )+S_I1`S1Eil3M;7 4${y!Ѹ~„ݫߢQ(,pv0y ^|뱠x:蒙 W"2=oX6?d+꒓@eۖDz{k85ZI3{pO2 E~mViZO!WSmEI5t+/>$Gߋ%]jzWqHr&QhS<@?d a@hW~k/*aMbP|[f9lwNeBQdjN4K+.IªJt[6vz|m$ٕ`5[(;ՏAg)U^E~xoEF煓Cgu`mGG]0e4Y쒱OW1Lisp)\4A` ?mb.6 >:l;|yR/3a(^f2 (l-ʷt t/aWFl=z^A!׽C_6)mV H+鎍P:} {YixX7 e(賹rYiHF֭W #92x/];!@3o2aWؠCd̝yh .O% bU~h ilѿ?~i1J8宓q\WYTmP][S'pu Y?y"j'ZK ns|g9;k!S~2ɣX/~m+q2 Mǥq"})\aIQ(V7ht 2)y5h6zBҞ%H=M6-jD M+$+g'[Dii+GDwS7%C; oC3l#߁gY̅ڼ#0Mr*0۷!]O&l.u.mV^]t1>na}͋Z4Hl#iaPw4`qrnd/<}OTgؚcCUD,?QXչo-^ZХ얧T#9r{^*̰dy[@Nc6pUǎ-{I*7D_镽@A縏9(Vgtdta\p'h; >h{Ew'ù9l4{t(^8f1`:%W7} V-&0s9Ʈ n χܬ(n!No,;+9zxY;(7`-% ete;d%zozOAG\>=R )1p2wLXKvwKSpuBL=n _ܟOANez$)q_ ӔJ!@Yꑟ [Djag24_t~}5/b'Y #K.XCkU"?fa0v &L#T#a;,f>H$T70~*=+W65~I<ƚIɝ'q)ᓏIIikCtT>%޹&֫<\[[K\4ޘFff܋n3~kϚػ3Gs3qOay!Se@XQ.-[aӴ"{[9,BQ]]z,Ih]S!G؃=L|anf2imZѫ3*?)nN&d%wLS_:aЗ@ $ \t1T1nܾqt}>S{sSM#!fzr?L86||I=@Q|Q/-s#IM3h(fN9 m!FP tA`l4,Iڀ&iJ.h =ƫ~&[b,|71[^mz%w K3 !DX0>w7-nɄ{/ &o=riWi\×' ywh0oV`b؃okoߐKcRָ s^ q4=yJ67J!@\3V0/M&CPvo/+mLB`0u_qckBgi[eLX7Z/*>ŀ=%-0_u|KtN? NԶ@?9;}Ո|E&2PQ1@HtɄPɧaBR.pMn5} wl;d}Oj}yQ;< ԙ_׌ޑZ#8륫RsP &FGT z9\1(슑8op5dR**U51  M $`UXv<#4)w^ܱ Y LVl;[S"-! g&ňtr_+',Ic^{v ƹ.D;c{Rg`Gu_  l})IXĘ\}ݬ0'R?wוE}ݔ{ml[qzԋ5(U5NEZᶧ0Lq %@ ,m'X izMiüАƼ{pg9,N+}/PzjO*+ +1sAZǨ) hp-eMΏ$肪xyCT5TxN=je6H_-Р_}uu¼{],`HnARkbXɎf5f 'vʬ *~}yO}M^> |98 Nj|dr80_iAnR*+)OwNB}GF$ Z)Jjms^a-_Kl@>r47/@)0lx0ԭn&@aG#4z7=CY{F"C~)6_`bB3hϻyrMvv0a;6,e-alo)0 =0_ec wzck yuZ_m~Oէ3|գOŪfx{@Swc|`l/Bδw8L^oYs)/Ǡ-Ue#+ky" co4b후B[%vx|IZuT<9sX{ӝ4,jjcz*k\ 2UwgŎ؀PeHoOCcVXA)3h\Jii>u"qꉑ]k8I沊 Q}j,i6y\6y@lBm6! s%l[vʬJ D*qPOSOCa3X胔;$ ?fX4[oP_ws`2j&^(B}.gB [^U7ERx<1 KM)QoXfL9,TfV nY+5U!alP !|jkSuq4#'U$L:B.uqR%JA ?O ͍/P2w6R, RNEŰ-L! 咳!}R/_$y#O#>UD;+3BhǓlava/data/bmd.rda0000644000176200001440000000400713520655365013353 0ustar liggesusersX U~#H*"""*J (G611ƨ1ںwc 1cts7c1̻EmZx#%߻/?;9͟:V5~Iv5vk}K?P_)Uc*TA7Xbg 0{+Q WO1RbhŁ1c+Q8Lqb I#SSG*)~8J+tů3G+f*Q7c)f)f+W8Qqd)9yS)NWX8SqblE"o*(S,U)+4> ++*V+:kk?.]ىOy_%o6i֝Rn42$mE%ͧ}:Cz>4/\"!"J/ x_Hc?/\RzÖ3ߏzVbGw,ңOz9?*n͏aD,˷6Ιu_mf5VW>]b%RŚ"  ;@"g73zI?$jqCꗾ~IG?I=5A7H2{LI H h~E'I ;~")LqߒTI/u`v&u%(j9(-=։Wy)>Rxa9Ij+? ]&z9C?gJ:!x: f-g |*ܩ'3^Hr3%>-IIݩ v\Hr̛)fy"O-폽}"H<տ1ao{[w7@bw{un|^%O O/ ~`7u:r?ay/jd=GxRw^I{/a*_2 zaM}xBSoE/O|=gO_ηuj? Oä>o@Y~'u~$_ GqɠGxzG1R@or<O['t@ oXO?m+Xoŭ@0o[?[#+>;g5S]?hyh: jvֱ~Ɔx2OX_oۜG<}j8,ơ +}y˪?QWCo}DqU=uki}J Թ{MWy~Ao>_q`?[Vy} oV%Zy_/طRWU>OS>uyU"!gvznG@~ܡ/t:]^α2?gb嶙|>yA_rq\{я7uQ_ 7Qw ؙ0]q.ayXw;Ӭއ@?Q߫>:<'9y;8k7+=;8bysC/JmY雔}̟`mt,p@3>E/ֶ]vpxg;[gGweGuȼk+Ku5mUæ1)>O^qk/jmto[Mi[K&Dǿkm֭;bܺ6lava/data/serotonin2.rda0000644000176200001440000006747413520655366014735 0ustar liggesusersT.6EDD@ DD (&DDAD1 * ""AI&TKFr9g{^={?vSflQSXI$Da\ғW?H$՚k7^U$(|=\};dx '̄}*J xGk| cL_YMDIɃ>Fǟ{! A~Y9p'IʶWS"5܉ux\klk߿9n8Ft4g#7=H>㫸EFb&+8 .uc&qŕ? S/gN~$8wZ<~\/M>}KNHsU÷NrP,R_ms𾫙"HyDݵ vZE[+8(>I!u>l)ۧn!!f^,lq*}m=b滴sdF?~V4\[x"Z{.ig8z>#A+ 99:j24; gyY7궝\g}c#;İq'Ó!e' ~r̂q9bD|kx!~۹% W<HDR'_ $H+de#}uy?KJA\GJP/]țoKae"ZvȿqvO_߉/I&kx KgT K<zpW1y-J݈r)&<Ko)F 97q5)(sVK0'ܫ{J'ꭅ*|0! ;ȈH< ܍$n0w`'ұZ)0_H lg]Μ_쇋 $IܘГ+ǜYHA:8Klշp-"zLp 4繳Eْ7>DshMA a6}{VR]b~DjZSWKTzB(;6BKryn/ѝiF X) .㎃] Hy܀ïo2 ~C#o!7]!\jסɐ"Ad1X>Ml_:JLk%֨ -C'@ v#hXGX O4zb1a|΄\}2 mL7S _Rz15c|D;bwW][h`h6q /\SUt` oIʷ1o`[u_1xHے} 54 lӹ+mIV: 3q^U/(]xl R'CaYGځ`}db*VU o=yDaƦycD/X 0Pط6^.~z%AvX!|=y^2-BĹ%Z;ZHm?† g^A]nU1~S;}ͽ'"uc^Agԫ^Ê]9 :=#Hn½!neQy9,=_3TR u7hi`{Ƅ 8KxVJk[=s}2<68}Po() 2ޖl~ D|_jIץ=!dIu ǽ}K]3} rk_^x&et~){caV)KyfvzxŒOwLAOZ=şqO{F^x[$W~Gjwx2W"ȿj!fUw-Upm'8qݍ$Yxx+qK1,6d _Q*LzVK]"'#:3y(+U ֧ʇⒾDÈ5X+(,1i³[Sc&$弍EXC1{]w3hy'2pVVݜ2>^lj.ĺ@ģߜZ&%z#'ۏ`WL+gr@`6 ]byGwPqZW{3C{ K뚐iI?)+-,DESDI?Cqtӯ\7ǭNu ?Pi^d[qV#ߓ܃̈KpD.ͺV{GR`j|wX̥O`*IMϽ_k<{kL;d,NSVVɬV5l9Ԧ|r*qI;MsʭŁթH] 'ӟM-a{c9(p T|L; NszT ށ2?S>yu3uok+Wq0 mZs [h"&d#D2#`54$$`}ZT9c W^ELQeIyB!t 괷q=u;>)"/%po!1~Mr «|yU6`J-ŚAf*a8vjAp8'a>d g |Z; R؞/PͦLXurXnT) n}aCgK-}y[;J?QN߲rnOƳhǹY|iF<uTjzvcZi<ڤ$1wU}%ѪZ\wjCfBpI<]DrVC%o4~nb cMQ";XȮKC-^NC66w:Due ވU,"|zޘ [*d [B->I$wy ۡK;`ۋr+縳ԍg%1,MDb5N:q1% >AX詭܅8vĽQ$7}E ~ 1V {͉ܻ) -o!M';_ .+naӢB7%367l^='PT6XRp7_ى8tNT Rnb@;o/D׃ ^n#\$^o_z]ׅl2yG [G,ĪvJP[gE;m#vj΂j\fx+eG+\s Si(sLKߒOV*%υ lEH.e:❰&%1C/8~Nߌq7YαknsUOEbZ7QԺ)̈z=BrԖVA8n L/ҙܱ^*m>xDCW2м%nqihvU ݈)݁}< f7%nm :w~yay T;}z;n;eLԍ`TEkܿf-9 MG^j%0՚uf'xh1A>,0Ɉ{k*=!6Y ờ :j剏 SNEȁ|9^ZQx<4h!/wdN^D٣LL6z ήuJer+A$`+FQ WE:^#(}a=TP}K-rgd6N۲"uۂcsV&,^ڌ3Uga\+RGqpxE浧0x(>"ߺv}6ْnTZKKHAdo|N7A)@? oy vv{3ZsՍ u7Dwa.g|:untPɐ\<"b>j+SkکŮj+XO8DZ6LX 3#魻Lg^ѡ19Iui>sl=sAO=/ӥWN9`%$~9VDQ(Α;gy}v10Gc8SΘbqX؜oHٌ܇a"İǽ&P9=X Zq)V<-D_$Xu--%&I0_.L;2G若,qވ!G$|Z1 khаz^4FY:JZ`=H/B69N'90:M1i{íFKHU ,FobB9n>W tIZcWG0A7n3zbh$kDU_p 3qzL3rI7jB lۗ-mt804[@ɜ! &2T`!\4b}\ GϺ6qy'#S cJw3/w5T @SgF(H.3p9(u3GKr/N~>)4Օ5Ǔ1P{m9pY mx5z ߊnDk;3n>$XCd|Btj>&@0\<:>^kڨz%{G8 soWt`5"klwAf5̅:_WRUޜ]sAxb1pa Th^߈'uI!ҩ{9 z#L,yMC!wmfL1.08=ԘzMCC\o2y&8٢`b=A,oNk[@;$QtP = /9?L~:[S>I#5c5fZV%Έ,Iy[%Թ=+rf4_nl睵Oƃy~0c{Iќj?{y^(SWzn*-pȫ5cFG)Gik&Č7AlC'h@UX _*S:[-"c]6l`GU<ٿ|䞊m2Qƃ?z8Q-h#Ze6%)AKtxT1\<:/rPGNn8;6-{Bt뙼r}xYOtq-6u6̬3y% ?fEe=q>|xf <#0bḽz P*IҌojj>kY=Lj-AgZhgѸPn'Szϯ{YP*:HIRR1A1ZEyɝ0'K-' ?s"py؁qՎMx(ٶ~aP?iTƝbOTԩ`D|v21I{;ǿW$_IA2Lt\I2(l#TcqV>n3zY^SڏkH}^O/4ӿ,+MEdMh$m7 C4Q\K⛯"vƣ=GmiBGvݬ2u`׾to*/M ?Z+) "ՆkQ=hx}q+s2 9 xϽK}B:tq,SM;݃o7ߝIچEgc nGgsi+-cGܶ?LtEi7ajZR=")'i1ak,5 I C#QD&=mV$@ xb-憆3wsPѫN]wyỺ =YMɍL{pokTO_Oma C1<0].-"`n߻sQLL;bcu:} g_XTo4Y a4d=<؋wL~s@y%z| Zy"_c%`vz!z+Da~tϙ~5 ;ן$Xu]V\$G5@aQ?´^3q 3ӬTqڳG+~0~w'ܙwuAK @d$Kj';*q~{niqM#`=L߯׎G*ȑ+GL^j/?sD‡^s0ՁhL=|9Sy*#iI^x}\b̫GFL*]G' p# %7y_@R`2&}-b?zPj$H}϶tŘ+R9پ؇~C18<M6|qYU7XS-`ԫ*2x)!SY> 1 q}O.uGO#kTW'KpCUkxFbĞx8R{1.g|(9;B#bOp.{,y%tYb/.N/5/юq5>TbyɚmN^[]a8}Prr2ya''1AWyWD.")0PTN_aϚ&Юv$S"_o#Nn70SuP,e7 -{뿹£&`zd#=y<gnʿގ',JkrB겜Z.Wj7e܃ A%0q2JmXu5ܓ[Y,KupW6k;1D])Ǖkx-\&#:xpɭ51S-8_'=Vʡq"gLfϥô&Smط;9MǞ\h_LMٵMխd U0#6V|O;ko2Y\dìavrhUw>1:up0}e"|-mb_O΅p&o=SL&%^gZz@3;=Yy2MGሁb)MMF4l`. La+k 7乧p|h5jb:JZм.0}LyjHu^͑!u2Fb>`Ֆ?n/| -FJ]ImChi gsnM_* Sf]CtH5YZmǥaBDfcyp1y}o/j\-{1Q<$zJiװ+yyxPp+ ކ;43S$~ BQUʂ9_&K\l9xz:43"7[pcrַgv;50(sS;$g,$-R}N´wL>j' UfǧxSTVJzs Gag;`mpVؖgʎrǢkW-~T>cgZ797$aj/62LU DގTC+뼞eP3P48k)0i7_Jr{S1.הɧs}C Iy70Y-P-.%*q*_!<@J,V߃l6-^vBݦ o⾿H0/){Ƶѓc>`vH-MР!<56__; =%n?`bnP'ko;6 }, Lw* `똙z2K`N*s:ʻh9ר]"wJEӺk$W vZ 5v^YߪtjDW8#jΰ"fŃKI092ML ⿼R6󹶫;.0<u9.DV8`dǐRṽnNyKgY R?@zЭag\^eN?0?lu³X?gnÊ SERD)_o&qi;BYpC!;^I(γ7ۛ(_H -OŒHרTD/k7k)3ϟtq\b[L>ZˌH-fU 0| .,K=xz Β>0CZq nj[ z.6Fti֗3ǕtsT.yz3j'?qU>\;_2m%/#J7lko4C!]0O4D{G(rI9;nxr2i),]NzC7 4I7=NP~s1e#*+]qD.I8ZcMU/\%nMtLMCi jt? 3S)$B nOA|m4O%Ij˿VW޵10e<mիamݐ/)MPjJBs`|6(k./2I: ˣQMf;[l) 92)I:0mA[|e>{בO38%C8E9x9uMSSA8UpF$c87B %;}ΤkNmOýGɱ!:ܽK`:L:W~gnpæ1v~dT#zwi^[ lc^=(έ ޳ϔk%˛C66rCˬ+4 yN܇ TIxǢ]@teP wuy)'" N%zR#؜hP{=NwE"tax2S6$sHJgi/TwʑxdI\ b,t0WgoXwk&\gMigAYp=~lYcYzFM0ULiFϗpD^fE2w֠C+Oe~ě :$a<}K GF&pQ5-d2Z42;W (j$Lб:WmץՐmp(.iL+%8*bHl߃N}^ KB$Ϳv ryDFHl*'?܀sx[ɌGQ> -5 I.Xpm.жԭT V]7wmpo7mX*kob51l)(."蓥U4ѩ{ m?}y#@rx2s4%)#-?4DƇfEx|DCbR]c 1\m9 -Lu`,0ߏ/h5/26h6ɝxКeObISymo7|^ïӽ% S9D=;I%& Yl'|2fo;DZc?,dnuKsˣ_h4TTj!?72!t|4/ʁL_t\{-!n;x}ַQaQi+j~jw^ qNW$ Dۼ>##L<^5qg?VUJR@E!ݿtn G. 1voe7uW u=pOC8\s[ZL[.P+vy3'N &|47EL k M񔨿pz[{=44C.xF,غ  Ffm>i$Rq #{K,ԶT6Ck;e?#P|7W=-ޝX'Bk3# ;qཤ,xčR/.[-Vf6nw9M|XNٵ6O*'CQGU@:ӶWœq VtKwz'X2'O=ߦ nxa5G̃mv;F`tfiLlF~XXDƗq? Ztز`⢙]qS:+r;D6 OR>L6kMF8\;N"+"e `B'âK4{Zp{fDofԘLlb0KJ~ Tg+wFI;k[%@U ,К~0K j?o ߦ9w(``WBRYvV [6z^khuSLZs7_O|uNO- Y8 \ `bpu³x:63ӟ5dXwne_lf6\…Vo9`B%&hilc7(̮hs >m6!޷cCFq'%3غa#g}rÂ}Ze[B pJ^&-Npc-FxvB :U4rE#wbR?5uw5 ?DœIN3= PgO9;BoB.|(yG6!tqb 9o1kPgx۵t˃xt!Q'++%#/"K`q*ۆgSOe3'{~ kRωxK['֮o`Oo`q~H;xIwʁoJя P[a`5q_6kۤCuf0\AaHNj* 1Y(,Fo',޵-T҂pGhn x z }eA٬Sk1A3D3҄-bF3_dc5YDc`uhx}~5XV<E%w#vP@s]Umk p%vY37 S݇fdX#q:;>~Dzzn7Qo8!:yg\m0r<#E*g=u;?n;XdEʏ~c5֟۵!f=ܧ;YH;  I#ØZ qAdޫxإ0exхNHjoE \2"5qՈp7O>ץwb[0+=0 3p zL *]R8o*FP/ġ]l8֎cJH0_U'\3~0APaNS;X>b- qWWf 9t_Gd"wxz-7?!ES'9͙f|<8#z\_S!]I$"k į6'$]3;+v_3*loj5/{zZ1V76E$~uvXG/n 0 h_Ϫ2kN/:<e7Ag_rvo0e0dZ Jl+qFlx|EgО*sOݑY[nE wX%kbE[Ǯa&(tca8<ˌHO-K3#Y7 %McJ#LWdc`wJNoR#D|\Grl3!-ӟ0,SS0JԴʕvFìq^^@ܽ|J{: V M?|Fm*PpE)3XSIRdmxYF72bL 8yQ"AQ|[ LVFt- 7`:nG<*Ej{uנW1'{lۗ;+sy7QÒù0t>t '/' W;+q+AzZ;tS :yֳ‡evR8I+k{SFNe7p%}ͽjW~< wuO| ` ul,D?N~Tha]3DW35"a&[/l4L>G7Biihҍ2)* ]/~)@ԍ`Ƭ!jzrmXPp*Zsc.ms&,\}HAe+4Pjt0UArJ/Fu_ ɖێzڹǤ.K#t?BMf oSS98-] {v|bwE;=ϣ\GcӋ~m(=ȗ2JrQ#ŽYok woTYND6q'jrF?3어$\Qimy8_"1^o:st~8Fԟ[;,ƷvxQ1e㙄nX=zd7Fg`P%XJ?^ߞHG/w6yN¬mJ+='*'\d@d*eF)^]{_Tu(5O?0qJ6USB ][N"y]`Y'Ċ[9Vt"f[ 0j~:N冲 {/9#ގJ1Н[련 &>@㢦Mnn>J9nZb)s>ly*s kTIʸ-\0"X'\z &.{M#'3fiwA3~'Et2)"8˙dbZ8sg+@/R*O"9_z ĈlLN!#ˆ>땍ˮ@ĨEgvܱRk E)b;pn#Ӫ^M?ggDXca 01KS CK*n9W.mљ2T4:Y75Glw3ܣ}YݳQÄ'dQ7O] .I~i'%Bo4F\iRѻ졝CXZɣ;ۯB5'wbf)]mB9V㜉:AX`k)E0t.@ >RYwT81 0H6CCFz~6$3g|݈3ÒcpYzDtQ|ݕDs}#imG" PJn)"F;1ele o4w"JYv^ no>'8:{0c6LhSg1j0˥5so4=qG$ k+eʱT%UoT\5񉓁oCV=f_^F6"Z\F~lڧIBe3:s Gw/@f;sI@WZu=Yʫ_xJE%bANL߄(i%-?FbARN~Zak.<tu3;dy,,nP8JA%;zgMw(¤ӦN0qMG*}mN^eT#86%LX{9R:]qtZQv(Agj7r{eXᩲgL4Jp;oc|n wuZcl$sq|܅֌hbjKh!x1/]a JvƷUcy6 *7'XG0Ի輗(#Hv\jah>9V6/CbC`.v2T=/a;vxŎL,'tXk}i F%Cz8ޮG[i80ıƙ8dDz1흽0u7I 8чsj}10R|JbaGl1+yQ֎!KCͯ2A0^s#Gs)HZzBE+hO-~| N^?̽K-x|IՉL)jƳ{^L6D+m 7?igbPOr8 L-!}MO\ØG1ISyaVQ_iW/[=Lͼsl#0XG^^g^+Wp}'(eؓkeqyJgQ\v^m5MF‹cgBAAݍ:՚PlsM˜U26> 43#' c5m%a;m6,i9?a0^B %KiA+OծȈЩ8ilgpJ[0 =CneTX8HτqǓ3s\Eқ cڗp{(⦞ͷ;OpKS52'Grނ "xbBz(n l2ٽ jNF:.i))ٍӻT% _C{޽]Vm܀;7)ݹw>7w۞V*KF0rq>πJ:Tʅ+ s;XlDڷ[ [qG1N{}MZR}o惞;{#Y!bW0!2 o6VÔ`<-cE0.S/ Y֒'ӗzgV4_8kTI0#d ^4j@gJ؎kbY̜`*>WmbF &&9Lhޜd3i}@ ڗry%; bxW;X8Jˆ1K;EfM<׀#.t Ѽa/H0Gt)xH𦛸i79$<">xāE1ng"f@_OHh$ON h x[Èի7>e@/P-L5oЗYd?X* /agr@/ hT8M #Pq5A"V{aD:y|Wd }ÝhnP81mv ]ptSo '.9eyL),mc!s~"Zsiꛅg{Ha'Tzaʹ;~8q/3|c@\%C}⍠o^ȏi{6)+jQ^.,n/@lgdb_@٧GUp#(ÏnEm[%+mHZ;|vRSyo0%{{# dWo`VyUaKeܶab^4 u7b7 s_GRya9Bjz^2-̈RLcQ,^^։'Ggna*Ӏ0b+;iރ$5o!^9:L,͖v^#&>,u6 B;qp|J3|X\p{օ5Џ{8 \jښӘ+C˫0L*L8^Rd͜gpރkLik|wHpx1r{}Ǜ6[!/n-SU:)Q Uw}ZL7RңJ<|o0)>_Z]>s'A9*LkSwSλaREHWŲ_GA{BXs]USbv t>[⅏_KlN$S"= WJEن '8g&* ru-)6]݋7F5*xbLd}S @iExn SBqڽGF4nVBps>|t^kdjx\"Wgm#kylӕ{jI+z|([aKFy~H<)pX3r=Q Yדp*WuP4t ?9Ӹ. &)w=Sxތ4eS5הgcq+9΋iq#c;Y=T> /Д8r. LdcŶFw_rI sďnD uv:p=q(Z1{ swCr= <x UxOxN?d Yi&p;~G>\4{oN NשoG9x3_-*uv~޿K~y<]Yw3?wǟY?ټW_=zW_㯮?3~YߍWº~_F3w?/=ٺFߥWq.;S3y ߵ/zݿ7)|Ο_mtͩ-fTd>*!s-o#\ViWo?ۀ>(վ߭+ LВ@0[O s%gK┚Zݴ`+s!fH4,(жȅ޳ҖGxE@oģޯ;5U;&)tnPO6]\t׭%xD.,ృf&g oq`u_>pق(NgmFf;[{qcKנ6> =rK[ B'7'^-4}&.n;yt2gFLwbRp6ݭ &5\Č^bU@7W sX戜?]ҏkp$ >"[(.טue Eۖ=@8oES0TqJu&)-5 rEuU9qVrAܜ*gF ln\U%`ږq& w)q{_r:"Gz%Iy)``Ox̯_vMp:!GVvx6dsy՞xXW3g۞ 3}S쇩d09qC&ooo%/lw ,@a NV<ϼP/9/7؄1Ta"3<%Qw[q}Owi?9B[G;L`Xz'Jʟ:z oO<%5gMy~^sz7R5tK3}#Pν^bB<-F(/WV>N@wa|9eIv=bM.ߝ]oxW_J9yٺ _}S6KiE(F V=dq[V7qe8zM]҈QAn e]~&\<[pSE;}alݶ0k{J2~'E~-|"^0riL-kn>ܖ;{|(#}C1e/([l34\Z 0f}=fhOiNàK+_ŕ0]khxIIh*^.Y_<#ˁEDL-ftU[9r¼M}0ym7U0Wbtv;ɻaV!ڮ0ysN`/Ac/7u;;ޙ˗DX6o&hZ#>TDdq[}<]1{U<OKL]@:\#UWQUO{^B-ǿgmD0K0D!S(`ܡrҾg JYޥsl7ֵ&jM4$|7oS>1]f`jdv)驡C>;CX-0X|*Ts9HGhys0>zӧ?;@w@}|RQNk=I{;[o矍Uܿ{O?3tEww?)߰Zs\GHu rsy95wղ qlhk'}7doIQLnpvd7IlŸ]]_5MD;`ERo{M%"mz;JuOQ"+a寙c|uC-$:i ^Km_P{_aw=+RG[CWX1wjk?);p?%9oOt#R竬yڪ]azX46?_kJ4RWs| 6pt<<]RZq}ʿ҉do\i A'*xU .camה*;EsfzSHy/Oh{#VB#u^MԡMs^585WЕ_3HʡQK$gon*ڑԚb/Wsc|I˯]Ư.U8_? qwrWw˿k_.=tj>g]Wqy_zГW?H$՚vՇuRn҅]DŽ?^ɷ٭t޴c歛xlm+ݭ?,/^MJoqFf7oeq??8q˅f8tZ4tc~?,4%Ylava/data/indoorenv.rda0000644000176200001440000004463613520655365014630 0ustar liggesusers7?~Bf҄4I%iРTHAe TD)QO!JeJD}2옏g>>|/9kuu}4r\P(&+Ӫ̸za0S֬~cideu$:vUW[gnf0Rk_f9:+KKrfԗŷXS۩goXy"u)?~)8r3 <_ݱҢw9\RobaORw"`J[*77td(إT>!9@vaLVŮ"$+ &z&k[4HWnF+E/d|G7*RUpۥDQpY`kqPfĊ#i!Y ^k^.=`pgyqzpܠ]*>~׺d;Ef) KF>{peh4`|lbg3dG5߼Y:~cKs4LiuΌ"!, c{eʏ=ugfI+wR^7RJȂe0q=]yRjĵeiᰍK6?yW}M)/N'={ ˊ_q`w?sl֖J39.9_o3T $LZq \wQ-3|]+{b\;Z λZifC)/jwR*8.I(D3kJy4)b6V{p5/.F3!͚ `urAf?>撗%J_찰xP+줆Gbg Fr]u. wpR郛AoãH˶%@Ml1d`KO;Woāobk CbOc)ؐ޿5KZ*⧢K KV`R%.Id|.KXJ~ư>yjуX.@,^,AuS)p,5k=փ KЕvK6v,AtR5jdQ1(fMX 3-KZJmؤz3aH'S"ot!~ڊcWg{6 z88U&:&5FbpH\ -.# ~x /M'xpQհ)k0_#j=ٕ\Ø,y ;^eҒ"쵷$Ka`%xO[ᢺtl-,y u'LMqJ~J3K8pU@<4DcNE84We35A5NpS5Z`ιrT!tmN2*X16?^b5~ n>\_J A_: y"Y/P:ɰ.R+ 4zszƮlE ;[Ul\:{γ[܍sq?r. DwN8i{6#-1~" <9a;?T?bgdFZ.ʦY᡾y[? Fa 0vߵcLul^l:\, hU`#(&?CWv WzL6 # 8 4\~::Y+kpBsXo~0-h;>8x'R_)ًI2LՐ7hPՑHE\_GT`荢%)jSKqP1-2%}.0> ,srV~ !LeC:.P+Udc殾:gq`)MAo cluԙ|X.?,aVQ_A1obcOPBH`W.=Nf U~ڤ +pQ==ܛid4̼m8 8âV4ߖ@uZoX=@Zh{m*N}%=$^yy9F# 50,7cJ_̟J>h}H|zfFHlȒX{6FMPqc.ɜ2tq0]$ NR 'Yj.8jL$[ZtyL0ZmvOK/򾊭,2$dt,0,lƇ6 2xC6r[׸#rWÎ=:3VSX-z-G.ty;`sOFK1єx` ͭy̲|`WHie ^` ]Giܺ 'SDlIu >;z|c'6-En?z@2\;&R`4ˉ?+jVq'\ xčujr~%b&1qԾ~vz#~> 55g$}܉d.T'/;AQڟ:D443n>gҽ4z(ޡYmt4aYX$;HFC& 0!X`;7̌1~y+n CXzѕ*vW|5 q&Xs[8ӿyP `Ī:ߢ; fزrő ObzZ$dCo5YClc2VzMo_{\93嶛1'sŭ `T>$3x:}w*1jr:WGhCb_N}F/ { %~'N޿CJ˝]3ֲ^~0󌜉m G`;i~M Nͽ;J +T~ D ~U$ Ca0޴2L<` y 7Kw ^*b0Гʽ="8G617'¯z %$rZݦ}ltr*]H3dv4uvA (G)33O*^4‰]u qfsQ-Epk 'ֆi֋ƝI|WyO=OmY} E\ +sخ'ŏm#TE/qL0nJ<9=M37B[;D1iXW[4%=0uPeq(N`}I{˴*f6L-Za[*ެi`= sl5?{*{E:`ǂ? [ wuħ R#j./_k:Wydˁ7#Syd:B_)yi_aT֠9GO4co} Hm[{`<&A.)lbooîJ 8K-l.| ⿑"mМhuUf6{޵ׄy;C.CuPhRݭq]!@{ ;5?wub|1ؗ$*m"TFRzH!xBnS\2IEWx#+7̋τ&΁H?۔k(wCr#ga0QxVX ĂdPslfڐ{s^#gVY S%r/S^c(nczhJ|DJ⼀rM$OG;fnl &?J6߃R-_'HbLld'x1p䧇$Kn/$UلCZŌ =1*Oum/I2OU ]#znMFwe3e|.67z'X %$XIUҔр& ;XAX%3X3oUű_\!=udr[_W iSā)GHVP"߼7~m=pa}MgdX>t1k"9ӍŅ'qTuvx׵3%a[ıʫƼ_?&1B0uv+}zC`;UK4X?=8'܍YG@;c}X*+p*9E/(62!o _(7=,˯Aj q,vZ,Efe=#٬|Z^J|J3t 075TJm*)!k]^ÛͻkXuGzrJJ:3~qC(րì 'I_JiVz='e~gM1L'~=փ-kԗl)H=mGlFŘ>ᬗ: |BoyT1ű@icnd .9gٵQ&s ;+(DCjk:acfM^>~ҞKݐNZw3dt6:>*7OfsasdcRoU*lZOa$Tk.vH밮ܷ"M4`qPD0,$3 h~KDF[hl3)g 0)Gz[`8+84>3Zmn|-jw\ǹ2ږU޸$ Aف#/usb+F ~/ 6MxVNj ] Ƙ?ǁ`Qbzj< =mGvE q *tp:ګ[fqiMWb~Ikw?+x+8-3`C-Pa%A{2p/?}[atNJ5\jyHf6}L knɏ0&GջSٞrguNXy+bݣlVygȸfb+ԙ5]Be)@=yPNb*,䢟#X=mqkt i-J\mb8wY!'Jhc{ھ|9]x?lɷ.u5Шc+O0n;14ɨ.|V!6!)Gn _~y"GѠ)IS{QB׾p8`F۳06u*O I bʭaI'7h4翏Fa&(w3` Ao#Ǯ=Y{֒'qQE!S\} 2;x5;aQnF^wO԰XAI;x O3/6[eBmWq_uyKvE:}@7pS({QlOp[Èx䐔pswJo/qg`ITn#.B±o_aTuƮ^ôxP+Ŷ{\ڗ2s+L]Lڻ'd޷VyFcV<-Ʌp6e%i7~CaFxTP0TɃ3I}I0qMQthTI8he3QM0Q?i C`>$Ehk8TtVvZ2mi0mw]ݭI$qU6;^ӝG-?R m=,/Je >Efޤd¯T1.jb|OpYv63i8loJ%ǃ`,,0Bhf%߳"oB$Gn%n \h~`fk \M' C)ip߃&ۃMp偵U %30pyhz㰈,>#+u*x_՜V8PQGv|89_ sJqu0/uW:&33I)tH=}nz4ikn1w;z[ "_Lbc9$L~8.w]ı-,({`;i:b~2[ [ a@2Te6Nޥ[LlgQT 7vwH> іt[]umzNpf&R\ÉNB+][-k* N yEc[r6 SN2u`q]?"9mNXrN[+o瀿=$ͳ0ͳġ 6.ya- p߳"S^4_kݙ}=hڍ^4| ,s߹m?U%)0(q]X4)\c*G`UJ1Nn#zz,p`[mqO*ezw=u;F HǼ3'HPMCz7D޳3R U*T ƛCU{y;fSL%vp KD.DH$^|| EҌ?< ]p+q k` Њ;j&8CEa<}(A8'' kޘhH0T7[~NR<$ei2. Y)Q-Ćj3.Yl'c\rKo^;+Q Ưᨁ@8$M(o,`  A-̰{=`{xa4h>t&> ^Ԧu xDߊū#{A%ga^~F>K .uYei}eʠe|Ƹ=&d*Q"/,ϿǥhvɦIs*q)}u>>F-8xǬ,dc R!.إ]y N8ku%ߋYIş@O!G`KtHU9fjJ]H"5la, {:VZ'{X*q C#f&UfEE{sN}&Vdc^]w 7"'d)b1SR lAfH/v, wv{CA!+Xdhsw'UZ$t^8~3_vo\]ن[w;+8{>|j#V+.so"JÒߎEEhW&;guopwԔ<ݦ0g_Jz4*֥'=IqHvvQ'\7N| O9tW-6W{s Ծ{;V\fs6/{&"|y`;A^Qƚ<`5n8yQ:/L8;'a/ŴI[cyaJiı; ayƛB]m8X0zz5Tބ79{}R%>% u3BCBoB`AtWa|vn]L6]};VeipJD>epJs*R_]2~=^ԡ$TdZ;V1X a_ :neQH@vmXqt>?Ie,ƌfp^ 9nhY%ȺBT082å8GglL+pUۻlj5{"79)y6]rլe ] `&𰫚7o~-;֫ycqkq#cCZ/[o$gR_Bd4|J·1'JS|Z C9 횇{ӷ\rǾ6c{ʗ;b|H!Dr\!7W7W 43n=k&Bӎ [a.l8mc 򝄩gO4K"&nޕ_g \pBr2{Խǰm`U5ײqT$e;r5aSzuPx܍ȟwj= 4o:8YoIњ\0~O\vt~XĞowKr,:T keA_s 0hayn~qxz ܲ0u|5gAMwk9 VO[fWnS gKAE$-Q pnP mZۿʿJ#۽03[K:ڷOvK?`AQ'~V9Q A|XJ07dxݶ`Y7d6>r˙}fֵG&($|cWM9bJX";[Wxz9:{I&wpZ=PUxM P`jZܣ;v[]Z<C e {z-w1*N9gYt>YHY4x 2Úv]I)*\j=p ya{oCWESS\˟˰mT)L,WZʜd#6.EO\p)'׬j;Y6Y#d#^ *yg$$Wvdؼ+i6Q<ۖk|ΘB -V7q7of&߉9KBwO p|{f{5N,6=ŅsߏOlT` 2}ӵ$V7Ur{UcqZwv.ͥLTp%*ۿ'qq$3Kt> L2 )hV/^Y!C:IQx#]yzxn %aaA^}[g/N>$9/ U,!>gȝ&/t3ƹKБ֡F2wP&R-`eW_9FVaхq~`t|ޟ=GSpv(Y)'zXwyMRsЇ;quŪw5ujIX:8}vfj"q>uEHBV\.cgy{qPò'rTќH?a) ^k}@9~m9+e{i|sWFO⼭ff0Fة 6{:\xn}ŮpWyyc E'<5&7cN}4ڣ8ɽGkSRCUg9?@+$mL T{mz#(h+Ap0-Z壄ݎ{f{J8,ìbwv!Xm2qUoeDr˙-Ù묵8tfyHI#V4!Ғ+&G/Mu|/B7C_>LԬB?jWmZ\pAK8N3|`Ule"ud<wC<ƅo %.GHK,ks.)OBK,޸TzW0f!GqTo\f?fW$X E5 LtX@C^=.:$Q >”#֬0p~Q7Jiw ls^-~,9?p^X{;`ޫ YOfX#^&TZMۧd8"vvqcεK1!kNPp?1a8e^2lpfc!~[M6jc:S3)J%yfw(b}Փs8u u㚦!Puë5Zj:yqv@-gjL:u C[S V~KMo'-a7N0$LBЄ5v\{ }ǹq/V-)SzKx-ejLb֍rߴd9m|ۓT_Pþ~MNxKomZ͇W}O?An=qv2QBTܻ14l nli.>%iK/":T6]i(]o\(Lpv7p~MfWq$4VX+9ކSv]ai8Bۗq{ZsNoUw:]I@WGo3O\ )hz6-ɺhF0>ߛ*o6+bf.j;8pEuc̩uJѠDgnPjmq>jpC!g8z>lgWN)UWm˅xaGL3M{{`t!^,'~E0Q{O$sVH?]bM֤.:]tJ[$L{5?;D)4N*:ZY "rtfD R$äD~Kj DhrPVjKzϡ"8L\PK‹ aX#ٻ@sTRvvvHe{=_5fa<1l+u74s1d'NM}ageTj0OXФ& mQӍ[JRW`2狟SٳW='$v&!_CыȝT8trʌ}j9J Vtq Զ* s7 3Kӝa`T=8 ։ː싎%'H&Q?)ic!`8>9&;3tнq~(ɛ$N299An<̤^C68k0\ *?4'c ӛ: |m=wp ݡొoۦ>YCEA+}\~0-0{Ԗ%.tYZ[ ąƟVHϖOIxk<7U{{}9fNhʪu{̀?=qv"&aޑ5/66]7JyCofPl?v'3ul9ow#zX햐3UkTYW LˠҢ2PSg0=] |Z {5 )-к2&>@IuC6倮5\z?LXG5KXs:if &jR8:ረ+RٙuJpO9^̅@X3 )?w3rlMa>#@V i'9^o܀s|{GqrM ܽm B)B]v2^'B{ o|/{Wd_c^ sߢV@pSzLm͐+x7E21>\ouT-DF|i+?g~l##_W'NB#Q)C{|XW3gMp?:Xc8Vx_+r.=Y ITҠ9F\B&+D'Aؓ4Pa&g\' a -Ǣ4;e ?޼Er>8ZᴊG[B#сmϩpЌMkb,Ҋn:ScUAh(e=-sYy 0@S@W}ts9Z*dn^ քj")4n,҅&8RHu8_]Sw_sƩS"{I7({G}Z`Vhٴ+̘â]OHBHj2^Ӳ!>ӰfY}.hdUScgi 6H8.um0^ͳ9?5=Mr^[PJTG.+_bwҶJkm0 vt 0wrHĭ:Eu3nJ iA(;a>L=;y+HE2^o=-W dx`!+-!rF'xF/\V(}УW~ ֎X]M0< JZ)|:Ay/YMuKR#-u;ꈀߕVa Xt?@\5@޾ ly!ZA2GAڱsOlI q%2ƃEC!B0Gv4w~) 0*TǙ;Le| KdWpUaYNAb%½cP3?ޮ7mwʅ0 vH.[0̱'׎mr+Á~~3h'퓵K*pB۽(6X}o3z܍uwgqsm懺JGLckcېHl}VRi!=+3J `Ӝ* tҷ_'y %ҧ>y"-Eaԃ%1}!Q.-0ש UM_9͢OdeXl)>$&++z }GNuS#;xzkő_i854>.Tgrݛco^s!IE+vOד|)÷!3-;7yl0{V _kw[ UF.oM>NsAmXʞ]3wa}M|JPXa&qeAB4ftW~\I:g&75,vbOŽ;Hyނъ'lBqIz,(P 1K]OI.8O LƜ1+sч͛k>TI |,] ҫs1$ _mLe?qX;m'e{fͅUz-(0߽ 0վ$z,.u2ɢgf`+}$G՗ǽg5;w>E²3=a\gU 01?y;I뚟諪0?1pO7JT!Wh{H"Z\80qz \S'hݙ}D7ƻ `afR?2QaՆ#V)]_RsfI2Ypcdwͼ ~M\yi+NdO`tn$ԇ5@s$NeŮƗl÷:ӥ#SO]`u~pҝˎ,w+B֞.BVTE0g$ԈYZbǽNb*xEU\6v,| .L\2C5oxtyUx8l%qo&8Gi:Eu3y* {w._rj=q9 $q9}4nǡ_ōvK[[}0ά ]7*NŐN0li$;d_*Ҥww-.l-*kbkf+kUc*%y>{.]sS90|4Rsd܃o"4(搴iǝK~y7Ͻs }Cj:vś*q̧-|s8v9 .tdY[F*:~Ǧl(o\9 ]Y=JSHO6Yqd-Ǹ%6uS%FP8ݿ~ql0JSOeMN!N5G&6Bא/ǫX+<&cC ґ.y`.;JIFЃq cN_/fM#nxema<cCnv(YmMRx+K{XNQq}gft9J:ŠfM.3  ,Wv=SVT#QgehFS;  Xzs&' 5kliSgery]/9Bm u BRh[f{0[u~KҀq0+”zB>.$Vt 'Iמ+8G{^ 7})hT^mR%|"~8kk YwXY *Α~@U%}f ?_K6 0k>N0bQ Cٰ,4 =jaPv9ߓ8u)I1T4渏?vIMQ mI =ޚ`@8rFc牣LpX.wCMtqtw$1Zcu`rV0S]dfMrxtI]`:H6_O`N0Mkߪ%9E'a W]Iw(M M.53TG9l vMl!ĺ(x; Ɖv78xޅP~ Y ǕHۗE{z`ld)Q*&޶813&ן AH6HBuiz~iݺ 5^Cˌ8vBLҒXCn> #9&[ڞ0kc 46U:OEuoY3}^eV7| -Ιʼn}5U$}`%C{yofɞUP[u(M;msUv2oEسN mjRG5j*kJJJz`5mit.HV_~;]p]()?STBPlava/data/brisa.rda0000644000176200001440000003724413520655365013722 0ustar liggesusers7zXZi"6!X>f])TW"nRʟA,ʿBJxߵeh;<)-Vʻ~$?^8`BNawg0 u酊8<53uXMweFI)!Y) #86-DcFEj>9O`<Q-8ʣӫl?̇͆OR9b<ٵ󗋃g_*JwWRSp\7YkpϳCsMa{>ˀ+cBjIZ䍷ɨɘt8%j2'ٶ)Kጙ (XF@ktO(VƹbU|sheO%[7~24]RKG˧N+7AO@eFtH?)nj]J*هgNĢ)GB~p H*{}YW*|qvZq*8HZ@ "Zowj3k2/"*Ԛ$J!x}ι)nb]K`@2tN {^;[ |2"8'\ `ݙy;4N|ڵ2hJt)GgmݴD]{aZOgQd1{*\FJ qe{RMshz1,BDVg~uEV3JeU&pLOh8I+{ 7hIV٨Br%0۝WgH;srظ;TK3'mhxIƣ*;SqcT#&u"^G[ǻvn=UF@qKvB'2 cʪH$lJ1Xotah"V7aK ܌'Xu#{P1"^)]%Z4!?bVvAVD 8=odLloj!B&32+]k2EY 4nTMy6wPa1Uг}I^Io>`3[IYˋ{;ue{0wݿq[oz,J2;x/7{r&ڼ 9yޱU& ~+}BCO5f~ð%XInm8Erali $ #4*,{6&"ǧGA'/I6P$ERoE hN}xf)-AB]xbdN9y(/k0EW-A9r^aE-w䝫$%: ݤ dw]-,W|{n slW 5TJX\5l8S֮4[QcTF:݆#1uw,! '+h%i('81z*t8g oޮeG`߂z9a'{;U<`$G by Mlw'g$EPV#lcvXF PBJ31}M.{(:j9 IQMY+"j/IKaρϜ9:0coS,Y77,A?@X ]>/xz7E!@юuÜ$(fY:Vۂm^L̷<ʎYr9CE)<)oңai*obp.h9u@ѨSOV|r/=P'spΆx4c\I۲8tVv6P:ؾVkl7n o+ I܄Jl'B )Vp}sӭMj[‹rp(uR.J;/-:IZT- HAq*N:EՑ e5}aE'GX /AIķ8yw!9-56f}Jab/hn{e7^_!r7C~vK0Ќ`3r%^H&-JVơ-p$ H55qANTĻQ0\&8x?v/` )7J+X=%3娙'|v|\ YuCDKY3'gKo.Hkcĝh63^ܦ(_.4/<>N@J8)g[6Ss7LOFBOt2)ɸMMDZEjG\L'^+p~ %Y|߮THRM 7+@9dnYUO-ü)CQo,$Ñ,Khgt~o J ĪcHLI0*z,5ym`3 #z;(z?,G`` ׺x!K*\ *-.}DO3tYF0%DhNO̕+ 1zfuec+s=Ay\iԳM3 L~O]@N){+ez#w0'[}ckˈ <{OB[r K75Q]&i`h[D! yLh?d|w*W@nJdsl3Z$aLeB nSBv&-yL(0MVK:/OzӃ/*s U#%13olZܪti-V+Y=Î48tޡXo[AUqa)` }z h۔;@Hp,:9l*j@Mp6a\Y@?ARBj V,J!hk {&r7LĹV{g-9Tn-Wb)tFp>7(ڂ!ӀxLr*!!?Zxo +2?&ƌMyn]CR80HLT̙b+W=xȶsVEr|k?\\fd%o-;ĪOp5>< A.~]  :|tAG 2)΁JR#aW f".Ɲ-^  *4I.j#<7k]~~EDJKZ=Zxm>O5P~Lۅ ՛4:uz;rB׮ds͚nrLV `nj~mw$V2{9ߙ1Z jްFំy[Si y$ZPޔ(s 4=" d}-W-++cyl]O|Oq1ʊlDѡl!Q W) σ%f;ZS.}Cfo!ٱ+hm`VXH%(]9CF%C~;I{%iS[1׀%ߋC ˸+'+hH,EN򦖃؊pDwuw=%U{YYZg_S*b"G~@{j5 D݌HC[;NrEr>w:Ba7Tϥ6/?$d"FLW+̍G3ZM]]GrykxxOslv !SzeOe˴y(Z^;OL7WxI?H[rfYϴL騆E&Ĭ`ed& 2tt;~47%yʼn{F累K$Fc|1IT2B9^_+?V,/s.a;J&<n:̛JJ#]pZFm;oXv=xȡoٗ uS.}za] qElr!y9U k)E/@txt0 0ӡ*j5$4_NŤJ" "d:XnM9_|l~p&HDmW%*U%U PZ9>Ұ7lrHQ79A..t 08a;{&rؽeƹRT^) .34HeJBMS<%2_ؠO&`XE$imp w4Rڥ*#V\(J_O}7K.q|߼7OZT y{S%Dƌ2\"mvwbLJ5BqSrSd7I##,.$WjE*1~8_5u :lٮowuK 3D4GN˲JZ`S/tTz?Wypд8:0L!Ng#82T#kA0;[PĥؿΟQ7׋]TNIütS9,CM/d i17~~iGC6O=z-VHG>9Ȕֶ)3Z=&x6Q_IP[ꁱ*,SA gl:Dq *,kw?Uڵ#C>D;f2Nu0a:)~@"|:VP ϿulqT`[K49Fm}oφD:uy 4}\w]0 ϾMs Χ/_9b'"DB,88۪&5@ ])g>`xvmq?THQ"Zܽ!&A9l>1);#=K\"fbRːh~..JVwQ2Jp%N!ӶEC=Dr(M P:afLmR/.&vPb׽!_n駦?[ڰ9yS acxEE&\2FqaNK~T.3dB(+|~]`yk dߤ ._oigFoTuvu vZn@vne "O |[517'"C筢PP[ꟺC!ڐ܍\޴l{`P0I'{+?/ޑleT{ x^[ \HYwgs&wx$%e/%eKXv)[lvA B'S4NvzUQ1EW]}QxO3'ZZa!bux TMɬ_v]vk8i-N|JyZ*Zf^ѩH.n^t]Of NPw7@_ԌZ\O  -8@i"V6-Bd2C+PAs;"#ai9l^B,.iDZNɆ@a,c2&sPd!rZEc(q^sؙ$_y1:L9&Yc 4+p~E/D+OV^ƾkE<@X>^v4 CVCW2Ԓ {Ono6⋬]#Oz"QTBXq-'2Ƿ̆Tauz hx/d.u , luv!XZ]9[{:}i{`9φNƆR@Y`/vZ6>!?$enZ6)XA:m+VJ[#!,s_e0?^(B,rfDnxPy[tb ܘ')qBUmb * QCaȀU)"[A7&.}}tW]dz@VwLxRMT)" l}8kHĵQ>N;8pLHѪc L;ͰG)ڐ6gb!҆e?GV4iP|1GNL/9)o${1 ~!ݚK-ɷD$mz2891.RhJ{ X5S\ր}ICùC z?|HN#51@aڴ16ᇁ D s5W,bcۏf":mmweqSe"ˈ1](]} icޛ"ny42<3 jVޝaZPDOUYA @.>ʘWE..N"O6 tD/+puW$/Ȧ*=ljg+`ɾ/5hkЦ|<,%¤ڑN04YUq)~mכzNebj\5kF뻱3?x65o~#Tu#1Xs 9Rͭp|ۣ+Ѐ+uAJ&<5_$G 7A}?施J}Y&{iֽ)W;~'&Oʔ̳WI/N;Y,Aѧ?m\w,+XbLjH3uG_dW/q8*>cݐdYmm.k 븘 8@U+Ⱥ&#`aiZ MԝS kĈ'ek4]3%̱u)zbArd?nQb, &]4f5G3J/;+Qgwz޺_%x)U#):F+W)+R6E`RƝ㻣-&*sn%i 7lvk_=O N#vx=퍔_Yܑլ23Թqe ڦ"=Yo>2 E!4=ZJ,h5MLDdi3>qu }e3BK9݈ptNȞ遗ҏ󰖽=Pkcu+d ?CQ[ ֲ:$_Pxss$IEY]mf0y5xM,+I(1=Wv9!?B #ihGoa;J{>{]^fk}ܓv7/$3n=g+ y]IٛلI{Sb͸KzyKЮoQ4'uU-"Ҧ~V~8|z0y[7// ~~.mD 8ڞNAn"2œc6W"=D?spj]Lf!0^\@0o$ s97<֪-oCxWCW'gf2D=;TE0@.) ^W\H ٕnfqYB=8_;?$CDx=Xec5U3P if[ˀą_mi+5L@8PJͰ‰#*] ϴH NikYNRzК59뾕Ek[J# .a<^C@e~`Jc AZyN6uŝM'uMեJ3IzPh9RȬsY@j!͌ĝ&`а0? +BU@'bguE\uxRӲƭ"uKtA5ͅKP /KÿEWon҅틅fq2BuWpCJ[$j}DpTiˣ6nҶNN`hAz7q^1w3C5QOձ1vY`~%3t~=՘u VJ\ Ǒe)-[*i֕$?"y uidJ^]Ԓ:I(ޡ@bbaTqV1{V@00291EΛ"Z^4*b~ehi:z4Baچs>aMݢZ\31XQ׭v;g2wgd,LuՑ1W)ŲmIznx+c]{cTfJ&)d _PE r#\v\1-How#ҾJ j}H6DOex{!BBP;u-KPaZhfERkh$mffD-tT帷 '2t|?)l>5)A ^xC!A%:Tc2=)cƁt%ɂ$sɣ:^F<:jD|MJ 2&1_ɫih4@@-hnfξR7Q?9WmY:kTcBxm!7< 8r"ënaE9=)ҕCWݳ\j 4[t4TbYhNv7HQ7A\DWSf >BL;sN* 7d@> -ZM "ߢ퀄gR XVJ덊쒤n?ͲewzuS҄< {!3?вIjՌPv ұ:ցve~,;I f `~˫C&58ͦD8;J(ngJI&teDiDvgTD ƖmUKCE/Kt rH3j.J:[Lz|R)\?]Puok4sP v.+mL}*z1S W]5{j숯m/"@\>W}j>K:1pV\fJ0v1.lc8D;}B&JB:@XhHc-(΅T#/(?\oc.$#Qh8yX| &+K`”B\:x<_ɚ+9|\so ah{6vLz $-ԫ%׌py觻a~wrUPҪ#D˞~ȸ*WʚEbŷہ 8 c{n*uFӖ9;Cg_ ͪ"Qꗶ<ܲۡq[@\x<.XmϤ4H2})\nRKNz\u.e*49M q{ww]hMyr~1"/Gߍzv3u2V91+M3sê} .p ЯgyƲnYv[{.2~í/]Cu 6X<'.~!r9F9y"0t,ZfenE,4$RV,k7 ~ rv,O5G k dk9gޓhppWu\LWv^8<{Ah#Yf~S"lߝWqh7c;a'Ǣ/Zyw\+o괽yl[دGNyZP8db%`$ecLCzeQ.|):ZG @6xG׈~IMhb+b= rSԗ4\ /4塔éГz} .u-{/1Vr_a%Xw\L=a0uDK[ZCv\ךLco_Хt?r<XZrŏc"6rn{C ` HҸ^X Ą?nAn&N9dsB5 AxxBu Н= efJlB85~kT4*0?TUo>x6Xs` o#56#:2QRo_D97U*BrʍiLavr44*=XI\BV*|%a% ,\Nފ%o,>(XQ)WDq1*L>${5L0źs+ ii᝺|LJa.ߠms:j¾/U,D9 Zq,֗GN[1Mfi9JZS-a8E=7?9_"eTu}z!ǃ$(Xs N/+a_P f'ЊѫfeehztE4 ޱ Ʉx 媊eO(,JRZb(sNJh7 ?^IZ'Ċz'{j&7M%lbc,%d1PP de~5+j*Dz֢xd3V˜6'Phx9׀BU8UY0]dBBsnW5ƾDzũS"4Pں/njj Ս!& sl[LkeHE>oE(> Aa I (B[Mc =8_{ *YQ٬[9A6^n./')K7oSq dȚ&ˈ9Uk2J@%8H <{CJpClYm-OH `Q6zRPP,yOBFU JCbGAdMAx_bn(Do _ý9hn%GdJ1D}c1.N2Dp&õ1ږXxPb(+7 pmpQ{8&'Qnuu8CW|>O:nz(kWtQ_Å5d"m5|Sے&[/y}>C Nvz#0sRU2v@ȶn~Y KS}gXG(4k10jrf0Jq&e?%YEd8r[ nό-Cqtgԗͬ v./{NLg:|AJ'}i f*Ϛ hgڿyp5l&Cv od]N ؉oSB! ",5V)6cLM|1`JkhO}(r:<h@[w?U7&-!3_Z lTӱpCFeC/PQT2Jy0A$L ʀ$(V[jԍ}6ץ=vț:l¼TR>VwTݍ]$iV?pk't2p2XcY-GE_pمѓ\{euc0$ <'GE DDϹSg{u];$&NM OFV>N*f{Ap0(p vŻAͪ>ȀA_+E}J_nyd%Clj먥sݒ%P#^){GAw#FTDoI.k6'?n|a07:1IAVHƻ2-.uٜXf}ct΃RMz~ !w?V,k#(C9l1YrF" cy%$O Pu4>$BJDsу\}5ŸM`*9^g\rؑ;(x0 vs҆?,U9P#v0WƧ'Pg@{IgNV?{։zxϟT/wJ[kΓ 'VKД)Knr[UZ|q'} AdU!3iZp/pgtO9OVNlF`2 th5V"ЛP oQ8VʓO>'е8OUp(lWոkJ af$7Ҙ5x}߀To?Y(OɾAX4e<>]`_y/];tVdMj%xfGw ۡgIqL8(ŀw,{!I9?CAzguիv_P2iОhaO$$KNp3+bc#5r;13ճ}3u?B7 ѳ[гt1@c&1TM|·f֨>0 YZlava/data/missingdata.rda0000644000176200001440000010030013520655366015106 0ustar liggesusers7zXZi"6!Xn])TW"nRʟA,ʿBJƛT#(O N, m*Y9B/5 xgd\-Yxׇٛ( ^( wPyjc\s@U npJ0%Q#D"jWd3%88zc咱\tX*nU"*V"c:w ]Iq#qۮ@PHzTEHU)-8WM0pJh"2.Ya7?[\H}څf#ͅi=4?G{3\{'Pf'ق:SN7r BɾR]BlEC3ZuPC>u;E+f[#hgrL;#zp~8E//:NΠO4Bp⤲|v&LzIɌQ/@uʟyصĬ#؎< #+%lzv[M$wGiBcCLQxIv9$4kXSC)x/MjDH{#qzo̽:|_e @(_O?v>u&u݄`\ǞU7P:}@ʃ5h>͜}v]=r!Ř93M`uh5 Y\qR&nܟ>/ b}[Zr hT #<~` -N+']Qp 8G|w4΂hLhlΡcyMvÀ׏O8}w@۳t4֧Ԍm@^`"IؼڔmӖG?p<foz`bZ_Qr\-9^YAT1[lJ2&Xï-~lU_5mHy|CrU^P ZtCW`ܱ -mM3OhښÞ ن4Q^{0XF8j&RkKaC/ hi58h4z/ 2-IK?(]ja?2|R>N  jsE($'xf7k-?g8ДE%g*Q8;=4&Ύ\P1i`XD*Ѭ\buݼ}I**i'̍ERCSwNOlR+UX~"ĪW1ŇWcfBOz,xg4sUr/fѶ~F}ɗ4y R"$۫Y\4Ɣ$(Y/k۾NDirV+c4mp3!zh=(goK<U ]M ;"11td FemU/OH?-fY,N%Oq?zXXR˭D/ gqu$ V̍ley=B d#2nFШLPhȕ+#b ؔw]\埊 w7[́Cg al^ِ-٩_b^Tw n8^쌤@* y .0okA-nRAld%գ: .2OPh7 u(=H6d];K9=9mY˟}Gv5Pa0ULm~Ąl6o4GQl+ lLxDV/WNY8O6 UR{깤5> >'aK8Ԇ|!*[CW ?B1HôX(!S{˄M3^b35Wȟ+9!v $ vY;V-iK5K%-S/r[锚N; Y#< Rh~FQbIqC¢^d&sF.&꺘zP=֮^2): 2@J|.:?Es׵>2!.|'~ t_2GuFYy Q^>ʦL (l=+cm9o5pG߼ƴPC2ذQ$7{#ҠbZL)vZii|D3{WiըɡdӒ8`Y^Fh_\/)-Đ|kp3dJEٿmӧX7UJSNTQg͕!C>A9 llKTsj^p++SUr OQƱ(~։lQHwn/*}ĸw.8ԑ=cc݌ Wp*zvU+C2P.5Jt'E2AՈZ,jvh-$NboJ5g!Z6(ݍ5 '8%Jp>J#[XA=-ݔ }ü-S/ '%|<5v Q@N@B+^֘jZ]!&Ԡ/1第%{UtS:TgBr1nӷ' ϗpM_Vž3KEwvADUO4ceF̺΢jc ? uG 4ba%p7K.6}ސ[۬^>;b#AEOqvaB7/G2 7]ʃk{f=4FӇ5܄29 qN=@@}{v^ϛD1)z 6`kղ~Z_fb巨nMCpBDFf":>q'W) fƉWO_PNF xr ϠI(6C/!S _󯾨wVMG)vX#A§^BJW9śػ;Vtg)i*p>RoEmSzeꢬlD 1mZwշؾ,[vn-ǹv5=m6E-a,ոMfPM3ZJo|B%Ɵ SuS(n1d !򯡵D$nu`J]f$l>kI wٌ.دK"Gܕ?vsqc ^Y(Kw 6ik4ZM#èfW/)]Nٽ7 o.z-KRXD[+lX?o5su[Iـh;i'Cy'ZO󰟅d׹rBiN/eU}ƃ IfrZF gkorJ6^LXOak`ᾊa%9Q B.ǡl"a_Ȓ]=Hi2G|/6R8k~GC"0pH pgx gJ`M+ ObكIײ 4qp!44*AYQ <\%#f@̓ 6'> q1 , ;W44\ЀL&+İ:N(u͚YTn]gUO.Eu#}UUΗLd&F`f(Q knC1Լk/|..-} k&E &%PX&Jâ-mGȟ 4*ٗsͽj>k tz`@!c}%]ޘ@Ԍ!NF@էjE {r_%i򡊼pxz!9:ЗИbc9T_ y06g%m8!;%UC)ͤtZgq%,AG08 \{NeS ݲ挛5~GRۃ&,}Gx[F]tĩȐf}IaiU(F%PiFѴ Q&z g󆗦lÉAV܏m v]s]R1Wc|BS`#fBEiuHP] f`=!!փ]溴F{.Q8&.2ɍdrWfB2hvo[ƞ ,VBS]ƿc,#dY[~*+-#n; <@bQ9D8;!2k;n8Ys)No^?Xȑn7H0 QI_ /FxVF2zOKN;\@0;\xڽ#\s6Ր>^zY݂Ti2$438k# 1ێ{7`?0@_~ڤꤟL"^f\Ȃ'<*k!J!OJ;ٗBoo.ύ:#7B6J4tjh |66cUlԆ62=",MPXr]%C`0VhOp lBleo6Ma%kqmjq\<~AjѪ3 NÊkorPyI"\˘_:n7 YKQ$5u1*QT#GwBӠ#36@Fz39PU 32U8q ܨ MĆ9cuBōP }1Q$2v>yi]09\ջ؛ H@/%36 ;Sj@}竱qE/kũ@Tsޭ2֡|jlzف~ؑ DYwXg8j蜥_L21{2/%H iH͠7ʄW}N1EaC8K9v3SK؀stΒ K LVhx5'J$_kæE,uDY/$\AvF? -MQ99 q AC9g 5S_NkV+I7H5Wo]_qF";pOoK"OAe\g3|-bNg)A̻ \mcac-#W@| hD@o^YQE"k!Sװ,D)cК"Ayؙvh@^:UܮiDO6fTq)T拲hjܢ(&ac+K#v"O\nI/7;G(J,ڍom,.Nˬ]/v:d n(<ߴݱ"e%x@?ULpȋC˖duD^DH:ŷ5. !|Cˣ_FIDH"aŹ*:&?^XYQDIC1zh@G(m8!h{>Nr}]%nL߯)|.MfA>sa9Q:QR9g2IH Ds&.Xd4ۉ= )1j7rir0pK!iO|>n%aGgt)?w$C1h|pzϘ8Ѱa=iM}gi$Ү3Ij^}dMp:uwẗ0sb35BX%' yJdbhD2r@\? Oٷո  #8{ rsZx$m> d~^Vtm7fH9)8x Xe- Aj yFcsHKXh@+Xk=LW5J+1QӽQ.,44 <߄j W?;NɁ:&*aW.$at>Qyv~b&_ЉWb3P?b޴ YAiWǩkoF:!j;oaLhw71~mC455?3Qȳ0Hz4H XsM[RW42Z~0rMM750|$kʉOd*={W}Bu*4V2~(mvt>JOzGgGIT fM%U{G` =!Y+aLLu^WUe1nNRfi~3t[n{%?H4qr_7E$qlc,Ss8]0ݚyS^Z /O-WDz#C,+CޭtD+t\#rC˧2ÝvA " ܛ(lmK5͞hʲpqEq1c⹓UJɀ 34nR D= ch+FJ&thcA0w= ulك#IX@gΕ\c>Ƚ{:>>{ռ <%8zgíP..'o@#fE~ ʹ{Ci;EӒg*ờ *ܰ9u/|]a{xgj&Ι>h2a,mjI0AnslC<a(44rB=ҊJȅio^ny_ [ttkHt*%J$mTt IT`Fir nV88FBgn-P 78P h2]K4R7{(RzN,1x砎j*O4!,!NoA8bҵܦ'g6P4jC cRa#Ɨ]uɝd۪#mN1T1'X(~q[RF{yarܬx=Y}j2H $3N*ZiNSb#XvK+?){ݘ!($|>@sV T/ ߎ BR/7|sadE34LJTrAs利C7ExWL|VGSX Z ]}vB1aaιĪa5(R>d=' դz$9ȆuPyKLņJ|Y5Dԯerjarcuu0QF-"5g+U'TE '/#4k}Ho@; |؝$^߉tlK۫N.ع@v7ŔPvl?ˡWCI)t.)W񮟸e e-c'6|>;tU@; 2\1L-hpE+[jĕ gXzTVC[1K^i{L0CȠpKsZa(3-;ͪ=zE(cZ0tEK۱MrtG3IӐճeب%3U F+)6!{kv̰-q"Ӥg3x ]/@M{bU8<6ʆXoR>@W!JW[-<=v8 ƃ|1(Ehdk\?-`~]-@do} D{+Mu9!,~Ę}&5[xxy9ci*qAnJ)Hx2pwSH?G[7ǪQ)ZIDz!3~h|`1i@f/Om|?oA^[hgi;~Dm>[n4P+dqVpxgBؽ_C=dsYZO#.8?zNÊ_^a{gLA~wln,%Zwn^[J?3[bx|#b.MƁ8xb6]K7E M@ɞmc%MoW& +lۇ?86V,A|d1D1iRF+._ݪ%{;@}nAY;t^y2MOCMl BOɫޮ'ѽ8vڣ;8~s3;z"0a@65-mܯ3+/w>B5yHo/mbVWO/9{T!3R9NW6a+z0rDRB^ʏ ᕠ|*kIԄG)z RBaړH?e7O]k'6a28u\'k  x9Ca*M8hˤD+et؏ [Nѓ dc %tk+Z}wf,ជ3Vj9`,n+%Xa[{9]&r'V9v]G-uKTma)Z?)ӑZ5r sq>w;>sYM>/={AkE \E]bJMTtC*`pi|YXSԚ~]MJ΁LrhՒrߥq&z}ԭ MVLNJh4Ns_V[d;YiZB0GA=^iP`.xEl={J=|#"겔׫XK/KX1$vV\'[Dq+re;A~)d4_*YFg$c"85OݙAh?i #.x(K:")+.rQ XB^,,o8ڮڅ'f5't/\,)\9tRdu{gqOЇ$G9[5]0~3(Fͤ<~TMCk%%4hhVث.~2SO(~hœ: q,2}i63?L Lƈjp<J$g(^j/kS[}AYO`d-XۋXe*nCn. AEŤ[!Xvnfb(B;8C;T"  ܖ HztRltBiHrHh؏/K^4~?&a}UY30l!u>d*`5K'(!m Jhfe ;~(Ёlyat6<34N^fE_^Ox)}u2j3y<Ӗx<0HkL [ W3WiņC"Ff^D!8gN\[7 fPp?6 h:g̮-*0dQ,[7\ {w#}|SlVxttd;~:o3KJQ3.{v`CWGx|d،zJSOݦ/&P3 Pك]Ky'Xk_5Sp:DW9"8^`}%QIP[#x\z>|ѩ˛/GL7 帪2M?M绒~imR夦+ɇ## S lS.b;`_2&bR,MOwqd>?O5+VRA&xD*'0vuU5"˻o&Nki\pHh}3a3XRcU}?P"j[ k"!j&G1xVt O?޸aq @8!S|/2L[-gTWuJh:4 8gbxSI,[~վ&z\0D+wKo~N v}ۘs.OBF7+pd6"F nIL1ҷ-F}9b&#q(TU]M88k0mkfY YC2a>T_5P!rk!M[ѭ 큏:q3B4~w}išḶs!ϭw1?A:-!ݘ3ըEv˚oo7LUx ᠣ3rW1^}9ZXoԍt+"ilͰ|E@ەB,>4huJӮD/kߺT.TKWDХ$ )\D3CYePR][C5фeJ?-UB1`<D'O2&己5捄'(Uze>:}@5dc(l"dW|& 1H ~ amO1 GŗzʙEF4C|4d=`D\9K*#1oU4(e|WPƸ~by|LeMf|Z fv X9>-T%]2 _Vd&[.EL ' Y\Gg&5'FF)`VwICP^%_q&'Ä53BMdVŊ笃]o5a WZ5Y\| XkAeO3l ]shhr\ü,Q U/ڸ@~>"s+bjkI$vlj6.^PM&\6LTJ-?~!X ,Mjr8~q+ O0fo.Jw+\{ɳ"9Ź|ڔ|Iॕ8bÛjZue],+1#8֠6  <$ KY{ca,  Q`pЃ$I>6Fi q؊rWI`UAH?g*WG\mj!*r)N:xe2Ẃx1V">h_D߲(@ض13k駈 ?:5:0=)LX6c5:/*IkeFFpvqfc۰Sd#DK֠S<ڍNwS?hU}m60/tJl@ :p~rjž('cb3^TcمIUH_ o'A~w #kF)O+k-y҆⛶(WM,}m=`ӧ#yVALA ˺S{tEd&&>w*s@Xyʳ ?< Hވ&.^A"`2|-_ObgG!{㟪8g3Ƈ=bְJ&/RQݠ e%ENN؀402!UD>|ɩ`éfZ"3ZAlےD24ӴW?+D>uF61Vɥ^"_tU_6`QC7<ٌ6fH2Bu#lB㷈hݥ{;Dl?V ms\وn†rn'Ƿ;9v0Tƒϑ-g:k$<Cq VkAA ڨޝ^\ HkfdmY[a yAFVGx!Ŵ{!,\nzM&NS"'!\T ݡq ߩ,#GH9CQKc5w!w6n #饍\=mr:5PH"A؛͙f޾C\r>z?!YX!qDn, xݠ{srXgn%'Z0mo~ MU6lH6. 5|c3'a$h5 !U,.R/ $ؙYCzmL+f#9wKUՇF6go^Ӱ+&c:A8 ̼'X#yjm@gVQĢvv9HSߨ螳AvǮr+(uH P:n(Yf)H?4Fڛ8n ǖ#љ%c=pH~^'2Z9$ 0ŭw?Y`WB7*er@1OjvWXЉ5[uN5&[)72+F$Gdmvۛ;n# ʉmdFC_~N&^r L>%L< ]4;>~"a,o+ h2@9x9Jtp? n Q;=TVrXI,o)%ْdTF i!(O8G8e-" U10UlӶ@_߲踪̬35%P7e*"D<q^cJL 5,YTc@?P¦nœ6:\Vx3SZyP `bS`̟\$;}c/ߑ!uDZ;k0 ޱgBC7"P/X[HCr{}bVQ\T4/ 5ˮCĞO~׍ؾPͯXX]I1#a}-UF&۳ ~_j8mr=f4ޚ|~1&PBk]qYː bX6MB']h&)ўlyYӶE?ZY#DJ+B EHWP/mٔZ"ʉ9^wH ]{$]4Cyy$a%}\7P<'d=a_dz9fOOw[a/p9@ا0GuX*U@Y6sn'\l65oo+ טּ10(_2Vhp.N0S*͢v\k 1`q${~*eED݀y661&]2z_'kPy vc{DDmυچ J:Q(6W/(LtyYMLP&Cn]h~l޶=\hẌ́sTl=46_݃o} &ce"{m Xj=ZNQ'{ʼ-%Wc6.6w yʜ>~A d>A%~`d\٨YtsV)DiCl2r 7d@|{Ο1go(Ʋ-VVrbYN@_7jLȹ­qnkIWҧ]C"4`PYWiȁx;Gܯ(7D z`썰_}&>ؙbj[>2?">Oqƅ*t\@P9\)zgW uqv:4S3ZO':@8h#C%0 G1ZeՕ%4~Z^|""wdLԿY29:#\|'_$.-mȒY@.l.moCrnV' (g!OvW'@&U_~/NkHQ Lx',*[(_eC2c|.drb paX-¬0؞zI4F{=k:Gz4IE$O*BzgJ6*R9)yPvPxw[ ٚ`Z [4ӯ!K*"Eu<jX4"dVBsңcr?[huz]?ȇTi u-XwzYMˀ_uJpw5Zd'.ڷмXI &#E1U܃* (-TK=đe,Q aRR(dYy˱ї·.-rr-)NA?WHJx5HT GyaDE/^AU~c6Ӧl[~%/Ȁ~ -brow"Sb.De7j*8R2Huuv\ T߇Xӏ?3 Y9B6!yb1yGlm&!["6yo]B|SL 4Qb͕ Wm{Xfie<< .aOoR"jwgdںi?+y,T]y V7ĉ[4uo~+bbZB7ɦAmtB!4SG6FTI; ͘CZ m(!nc#\r2GZɾ-BXۙO3H {&9½ES_|Ҳq$z?ɼf볏̺ƪe{Z Ok.}vAyP,P(#1R-]kE}U:I7OH!9NO<iKsDmDD*c # UWW)K7v2[VEIC9&zH/{8T XɻB4uql-yiBA94>쯒zWF2ۗ?1pStuWjQ *r-dcvĠ:E Xn0hRCtl,s %+}g&.-Z meg9ii ުom,N;v> a cTM}@б9`Vى|so+m,j#8 9vNA嵍6q,0%p2N#S#_(77nk/RDetg:ܩֹyQV_v>[m۵Ƥ:)NQkYԖ`>t84f{0^/G1Mט)P%(RQe4w#Dx9٠ C hmfP`s5%c{I(;ӍAD8CtH&N)@OvK/)1<$),%&,QH'׳uR|JYztaىN`u7>(^ˁkUzl E&rMCQ{cTd1Eze؀Y>h$mэ,/8yrD&8msO$F<"Y6VPAģ#2~iӲ0Ts􄣶w\|hZ˰/Q? 4$q5h9 Wɗ~ Ưpš,>I5U {诌; 0)"~1ek31$5;W>3ic颮<*jUHO%& ߒ&2wK4 61Vrz5;jƻUj4q<ֆb^]τ=i=uo|1| W0wv>T7a+G%dZklģtKb(E%kâ,܉R|q Gܻ;PYjE$zR|!Iv[l0*| [A`,!>;0U ,{1WxarS-2$u+ibMޞ) |J謃 (:j}l^8w2PR\~0$9$_lPol]ÃUk94ť^鉅VMiS.C:#*+xat"Z̧::Ows2 HUf9&eNsĝUm8^p '2Wx_DDQv\v{$Pe@J ցGXP`ѭ[yGBRQȪ#G~qzZ ߼][Rw~ u$Ys蠛9q2U^R5JCkA zcQͪPI/:LD0V"`W5JaׄDoe&KԜju? `5M6>=â.^Ƀ6.nܚبm: [%c1늑"Z`rgLP?1Ij(9Tq4s"OYEz#F$P[ہ(PQ5%Zjrs Rm: O֠AtYW 1s^o#/MfQBe ֿ$<16Cn\&&(xH M9w|RRgmvԋuM#^& *aɤȋd0Ӥ'&إ>>@>X9X"䐌~HǨUSGG)DJd8#]3d&5 \o8^f:Hیo1b^3Bܰ Ig(Z:Yq?MlwsB"ogxQQvt`,哺Bw4}~bg\Jʃ t bjJYћXxNIV+D͸isK< T$Q|yfIMbyX@'">mqf:^XK&c'F PdJ\RR/p^cZDԜ֨s1C0&N\em @>Ϫ|Y; FixXS91#8wq`'{SOIR”V &}jdv6=<> FG 1^έF :u`g]l"X0U$& ,P] U"OmVqs2?FJ%-qO  4pTwtq7~w~I*43 ':O;Dj>$ܒTnx?CTDjB4 05}=n.ayߦtPj 8-W%tEm9:lpq^f=F-y"b^KJ @צc WdEQ|Ѓ.}2'8kq}pYQAB 7k k{Pa&溺QThԐs57"5 _|&՞@ ܨPY傏ISj]܎2n '9e /)Fo F\gfk T ն)ݴC|c=И} juͭ1q! "3s}OWvT2$KÝn8aSdxX̫8z7tsu$(/},Ch9qՏ/N֞x=)wD &H^AgzV'yw/W"^d`!mYf5RUaqe$h@'wsƌӠYA} +e'{mwk^!%i S?w2-!H+g*{{Ƒx/ɿDPK0ㇻϭ xn4Aek8Օ.(Xv.ݳ̾nvMC췡w~ eg8cT;'2^MSE4G 2~@muq5]8'IVQ_ )8gETWŲ6 t9EfCϞ# O!IVrUo6Y-;Kjh'ۮAORm|~6ݫP#ABW*]C*qD~$ {=<.[ X?!.&WILqֲ=$ p}c z) ^J62ohnSIbtLOVUgQV)QO5Cě)o=:"EK38%&Q?>a÷jHa5'sq1泐O>|&?X{U"|˅LX %YkkY_s}o)}]XSO=w߻|x24I2`&mw\)ڈl * 0YpZ`TeŪЀB;k/5ޯXPrq֬ Q%RW;D#gL{Q@h ܜԻs84#&JR' `D= 0aTxD,3{r20ﮯ3aPT4f&߇4u+VLTj;|efgUjN# քA#| K5x< ,:npz%* {az; Zɳݑ-U!Fu`l>}^~ K1 [o͍/$~hȽnگ6{ɠȽMQurf浥}gD2D!/1i]mMI;[o&lmHlR*tAMg|o?&PRw:NeEw]#g_W7c*ekq\wZ* uѢt)STBu*Xb~24 VD$.(X[P(85?Նci˾7I֦ق+Un7c⧏|M)6 ,çSA79a▰ *{0Rf?ϗ:ԏ*_Z/켟8HA_%\(W`sbc3-=p8A53@DZ!xĤe(e>C!'4bqi v ^+ ~ĻqΪ:}9U5SzQ9]?.*D:6-.` kMvĚx-`FLu'ۻ*z$`%B˟i:C9t=x;Iagx*fRӰaZq<pK艖b@ E65 Ep Swߨ^&|Q1hCC, $ITw8k 󮽯3<GT~% C7F<2%6;1;; Tʾe14;+Dh)N)siUjӅ>ix32}u"IVcnrqDTK(WCAhˋ/wEULͲ؏ RH1`pXe_-;ŀ$:8']ț5K"_ %/HCĦ8rolβgeW3C %RZzWa?:]Qw gv7[Jw{[>`>q!?F jm0kMxFj b*"Vzs5 mނE F0km>bE6L;*mr=r7wT]JIO5q[ i:g+Y:')8|LTcU7 ;ujMk=%F37k#V?!pg0L;IUksK) Ju~F˖Z<6ObcϢZ䠅LE¯Ƒm} ]j8_9W*եԪe#(GmYg [.^`Ntg [k>ݟ\fǢ=V Z Fvzz|^Éު]9:{0+x.4yQT2 Uq_94 v)IW8W0eM-& U.Ey䎟c"ԓT4+|v\F(oh2ِT'q2(YRv;`|.5kj2c wV?#I<%HIykkg'6,eck]˘YpD(CiC'0+Pr]*Q|*h*RBLT9زusxbrǯeIHmL N^f@…tkǃ/(( 㚺|@Q䡆O-pu63ĤP4vWs_& h;Ιd$ؙ:v<#C BSfKG^;JR՚ $@Hb9y#Nt40,(9xxID_ rY~Zӣd.I$*?۴ %&H[PJ"fy{w[s j-OܦX!.J|JUT3Os$W~?Ŧ+xb-"zYQ`@#}'ޛ`)Z D}JnO78EhYEh?U Lj3"q, 0aZ;, V:TOxG=Trd@ȝi0ûNJ1ib0*; |c zEw{\vEpsɧUffw}LSL(O6bu|LtG0|7G|^Ud9|µM} ;maSyb,bjpDz QA,~ ?/F,`|`"Dz8/-1en])zV9|cQ0eShq|o86d6?VkŃ6as̓zT)C>se\6%ESie#^[跫OMxeʼoȅ ȌNm |x\f 'CsZї LG! Uޝ`eLF`=5X53s_ X|YRH\>^= bk'k*-^%ޓ4||Ʀk8MDY!0oGldMƄNAW4l:) ^!Gͧ 1\>7,Î])w`Ok@1*ق/KeSK*p?J@pז&4<< I.o0^u-ى0q5\ JAV V߅hH!Qu$簤{8ڃwՒ#D>&_eJBHŒo'tZ9 %@m^+]ovYIO졊uT#ޥ_W񠳩#.38 oMxHiCwI~ [jZ` FqH~f%R$ *# AH r V7.Y 6ۑ)lԗѡ3&IBJ茙 rͼEwAW*u 9@藎oK $yXnGQ9:w#4QK-^gOqO'>Bh)qO)͍ CU |R)+E&y' [ѴP"("5g#fҎq 6DCm{-9kB᪾h*5/W"<+ bb(ZE6^aۗ4S\Yy6ߋlƅtuZ%}8'u!; iiK;gpkm֘AA\hR`9BYnB$(q|ۖp&7E)Z7z†-Hz 4(0 \"'e451-t( ZLt鰍OoW, ypO#Kв;3ΒnWa$g#&tƥQUɸ8U+{5ᅫUJ|6N^kh$Ad 5U5RIΡf-}m'TۿJ*P*߿.|s?e*d>_3 5u>~!Na7+[#4Aod4*# ҘaۄhvsZ(L:?0ww 1a9{ )Vc?`y\4Ԟ0ޔiq)ۚ)].vH6IMhN%ߵ-YA.So$l<\K̃Ro!A^:\j&]\Ӏ)*Q8񼑎D~tnT\wč>c. ڐiɫE4QΏ MCwy}:’)$kG@«,_f&JGS/`/ 2̢`ڶz6\4e*Gk;]M 9`Xe(cDф|tLoښ%fQc7 _OTV Uzzȝg>HTֻs";̺GQ7[G< L\VEK?1/gqPUb+Ԝ8h$n帓ߏc5ˆD4to5FyT E5J"Km {,4C7kΟ#qwՙq)XbfwN w]%u>JQ]@SI׀)u#K}sڷzK(EgIg{,".DT 5fTBGjة+{:[ЇO"LQBMlQD] O%ԁZک-8!b $e0H0AxҘ[%i؉ZӪiob=Ӷ̫qmd- >1 $䶈xd4ޛ+rцR?9Pз#S;hkvm л40݅ * ?uN8`} Tm?61X,d+aD@Lsث`T;aп!yZyT=*.BPtOF[iʍ;Ƭd7O?v2S_3">'.WHzwܮsƋVA'AOubKsfc"%g0a6)L =XT^uE"Ѣ+a%>4*NXee}zQ (HQ?IY\W|(5W>+4`P:ֹ#a^bV–ŞR9 JW]P*']?@--,7-\G%jtMN**}T? bw0+R`恸z+b%L#.܄ 6NH h5*ؚ-[(N*#iP8U~服wobVfq/拋WqVӚ(G)]tJ$#1cOsGħ",oC'kL$VgMj:wCpufNH}=޳bdh`&=ww{-i{'@E?Ҙ*r WMf R/[0PV<4+ɴ\ yCLw}/Bt? />vA]qxE[vs2 KLJI7vJv#;>{.Mdqp4oZm^:*t\t!όג0}$=HO N#;% M Ow>0 YZlava/man/0000755000176200001440000000000013520655354011760 5ustar liggesuserslava/man/confpred.Rd0000644000176200001440000000231213520655354014045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confpred.R \name{confpred} \alias{confpred} \title{Conformal prediction} \usage{ confpred(object, data, newdata = data, alpha = 0.05, mad, ...) } \arguments{ \item{object}{Model object (lm, glm or similar with predict method) or formula (lm)} \item{data}{data.frame} \item{newdata}{New data.frame to make predictions for} \item{alpha}{Level of prediction interval} \item{mad}{Conditional model (formula) for the MAD (locally-weighted CP)} \item{...}{Additional arguments to lower level functions} } \value{ data.frame with fitted (fit), lower (lwr) and upper (upr) predictions bands. } \description{ Conformal predicions } \examples{ set.seed(123) n <- 200 x <- seq(0,6,length.out=n) delta <- 3 ss <- exp(-1+1.5*cos((x-delta))) ee <- rnorm(n,sd=ss) y <- (x-delta)+3*cos(x+4.5-delta)+ee d <- data.frame(y=y,x=x) newd <- data.frame(x=seq(0,6,length.out=50)) cc <- confpred(lm(y~splines::ns(x,knots=c(1,3,5)),data=d), data=d, newdata=newd) if (interactive()) { plot(y~x,pch=16,col=lava::Col("black"),ylim=c(-10,10),xlab="X",ylab="Y") with(cc, lava::confband(newd$x,lwr,upr,fit, lwd=3,polygon=TRUE,col=Col("blue"),border=FALSE)) } } lava/man/rotate2.Rd0000644000176200001440000000077513520655354013640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rotation.R \name{rotate2} \alias{rotate2} \alias{rot2D} \alias{rot3D} \title{Performs a rotation in the plane} \usage{ rotate2(x, theta = pi) } \arguments{ \item{x}{Matrix to be rotated (2 times n)} \item{theta}{Rotation in radians} } \value{ Returns a matrix of the same dimension as \code{x} } \description{ Performs a rotation in the plane } \examples{ rotate2(cbind(c(1,2),c(2,1))) } \author{ Klaus K. Holst } \keyword{hplot} lava/man/estimate.lvm.Rd0000644000176200001440000001126713520655354014666 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate.lvm.R \name{estimate.lvm} \alias{estimate.lvm} \title{Estimation of parameters in a Latent Variable Model (lvm)} \usage{ \method{estimate}{lvm}(x, data = parent.frame(), estimator = NULL, control = list(), missing = FALSE, weights, weightsname, data2, id, fix, index = !quick, graph = FALSE, messages = lava.options()$messages, quick = FALSE, method, param, cluster, p, ...) } \arguments{ \item{x}{\code{lvm}-object} \item{data}{\code{data.frame}} \item{estimator}{String defining the estimator (see details below)} \item{control}{control/optimization parameters (see details below)} \item{missing}{Logical variable indiciating how to treat missing data. Setting to FALSE leads to complete case analysis. In the other case likelihood based inference is obtained by integrating out the missing data under assumption the assumption that data is missing at random (MAR).} \item{weights}{Optional weights to used by the chosen estimator.} \item{weightsname}{Weights names (variable names of the model) in case \code{weights} was given as a vector of column names of \code{data}} \item{data2}{Optional additional dataset used by the chosen estimator.} \item{id}{Vector (or name of column in \code{data}) that identifies correlated groups of observations in the data leading to variance estimates based on a sandwich estimator} \item{fix}{Logical variable indicating whether parameter restriction automatically should be imposed (e.g. intercepts of latent variables set to 0 and at least one regression parameter of each measurement model fixed to ensure identifiability.)} \item{index}{For internal use only} \item{graph}{For internal use only} \item{messages}{Control how much information should be printed during estimation (0: none)} \item{quick}{If TRUE the parameter estimates are calculated but all additional information such as standard errors are skipped} \item{method}{Optimization method} \item{param}{set parametrization (see \code{help(lava.options)})} \item{cluster}{Obsolete. Alias for 'id'.} \item{p}{Evaluate model in parameter 'p' (no optimization)} \item{...}{Additional arguments to be passed to lower-level functions} } \value{ A \code{lvmfit}-object. } \description{ Estimate parameters. MLE, IV or user-defined estimator. } \details{ A list of parameters controlling the estimation and optimization procedures is parsed via the \code{control} argument. By default Maximum Likelihood is used assuming multivariate normal distributed measurement errors. A list with one or more of the following elements is expected: \describe{ \item{start:}{Starting value. The order of the parameters can be shown by calling \code{coef} (with \code{mean=TRUE}) on the \code{lvm}-object or with \code{plot(..., labels=TRUE)}. Note that this requires a check that it is actual the model being estimated, as \code{estimate} might add additional restriction to the model, e.g. through the \code{fix} and \code{exo.fix} arguments. The \code{lvm}-object of a fitted model can be extracted with the \code{Model}-function.} \item{starterfun:}{Starter-function with syntax \code{function(lvm, S, mu)}. Three builtin functions are available: \code{startvalues}, \code{startvalues0}, \code{startvalues1}, ...} \item{estimator:}{ String defining which estimator to use (Defaults to ``\code{gaussian}'')} \item{meanstructure}{Logical variable indicating whether to fit model with meanstructure.} \item{method:}{ String pointing to alternative optimizer (e.g. \code{optim} to use simulated annealing).} \item{control:}{ Parameters passed to the optimizer (default \code{stats::nlminb}).} \item{tol:}{ Tolerance of optimization constraints on lower limit of variance parameters. } } } \examples{ dd <- read.table(header=TRUE, text="x1 x2 x3 0.0 -0.5 -2.5 -0.5 -2.0 0.0 1.0 1.5 1.0 0.0 0.5 0.0 -2.5 -1.5 -1.0") e <- estimate(lvm(c(x1,x2,x3)~u),dd) ## Simulation example m <- lvm(list(y~v1+v2+v3+v4,c(v1,v2,v3,v4)~x)) covariance(m) <- v1~v2+v3+v4 dd <- sim(m,10000) ## Simulate 10000 observations from model e <- estimate(m, dd) ## Estimate parameters e ## Using just sufficient statistics n <- nrow(dd) e0 <- estimate(m,data=list(S=cov(dd)*(n-1)/n,mu=colMeans(dd),n=n)) rm(dd) ## Multiple group analysis m <- lvm() regression(m) <- c(y1,y2,y3)~u regression(m) <- u~x d1 <- sim(m,100,p=c("u,u"=1,"u~x"=1)) d2 <- sim(m,100,p=c("u,u"=2,"u~x"=-1)) mm <- baptize(m) regression(mm,u~x) <- NA covariance(mm,~u) <- NA intercept(mm,~u) <- NA ee <- estimate(list(mm,mm),list(d1,d2)) ## Missing data d0 <- makemissing(d1,cols=1:2) e0 <- estimate(m,d0,missing=TRUE) e0 } \seealso{ estimate.default score, information } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/bmd.Rd0000644000176200001440000000102513520655354013007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{bmd} \alias{bmd} \title{Longitudinal Bone Mineral Density Data (Wide format)} \format{data.frame} \source{ Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. } \description{ Bone Mineral Density Data consisting of 112 girls randomized to receive calcium og placebo. Longitudinal measurements of bone mineral density (g/cm^2) measured approximately every 6th month in 3 years. } \seealso{ calcium } \keyword{datasets} lava/man/covariance.Rd0000644000176200001440000000650213520655354014364 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/covariance.R \name{covariance} \alias{covariance} \alias{covariance<-} \alias{covariance.lvm} \alias{covariance<-.lvm} \alias{covfix<-} \alias{covfix} \alias{covfix<-.lvm} \alias{covfix.lvm} \alias{variance} \alias{variance<-} \alias{variance.lvm} \alias{variance<-.lvm} \title{Add covariance structure to Latent Variable Model} \usage{ \method{covariance}{lvm}(object, var1=NULL, var2=NULL, constrain=FALSE, pairwise=FALSE,...) <- value } \arguments{ \item{object}{\code{lvm}-object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{var1}{Vector of variables names (or formula)} \item{var2}{Vector of variables names (or formula) defining pairwise covariance between \code{var1} and \code{var2})} \item{constrain}{Define non-linear parameter constraints to ensure positive definite structure} \item{pairwise}{If TRUE and \code{var2} is omitted then pairwise correlation is added between all variables in \code{var1}} \item{value}{List of parameter values or (if \code{var1} is unspecified)} } \value{ A \code{lvm}-object } \description{ Define covariances between residual terms in a \code{lvm}-object. } \details{ The \code{covariance} function is used to specify correlation structure between residual terms of a latent variable model, using a formula syntax. For instance, a multivariate model with three response variables, \deqn{Y_1 = \mu_1 + \epsilon_1} \deqn{Y_2 = \mu_2 + \epsilon_2} \deqn{Y_3 = \mu_3 + \epsilon_3} can be specified as \code{m <- lvm(~y1+y2+y3)} Pr. default the two variables are assumed to be independent. To add a covariance parameter \eqn{r = cov(\epsilon_1,\epsilon_2)}, we execute the following code \code{covariance(m) <- y1 ~ f(y2,r)} The special function \code{f} and its second argument could be omitted thus assigning an unique parameter the covariance between \code{y1} and \code{y2}. Similarily the marginal variance of the two response variables can be fixed to be identical (\eqn{var(Y_i)=v}) via \code{covariance(m) <- c(y1,y2,y3) ~ f(v)} To specify a completely unstructured covariance structure, we can call \code{covariance(m) <- ~y1+y2+y3} All the parameter values of the linear constraints can be given as the right handside expression of the assigment function \code{covariance<-} if the first (and possibly second) argument is defined as well. E.g: \code{covariance(m,y1~y1+y2) <- list("a1","b1")} \code{covariance(m,~y2+y3) <- list("a2",2)} Defines \deqn{var(\epsilon_1) = a1} \deqn{var(\epsilon_2) = a2} \deqn{var(\epsilon_3) = 2} \deqn{cov(\epsilon_1,\epsilon_2) = b1} Parameter constraints can be cleared by fixing the relevant parameters to \code{NA} (see also the \code{regression} method). The function \code{covariance} (called without additional arguments) can be used to inspect the covariance constraints of a \code{lvm}-object. } \examples{ m <- lvm() ### Define covariance between residuals terms of y1 and y2 covariance(m) <- y1~y2 covariance(m) <- c(y1,y2)~f(v) ## Same marginal variance covariance(m) ## Examine covariance structure } \seealso{ \code{\link{regression<-}}, \code{\link{intercept<-}}, \code{\link{constrain<-}} \code{\link{parameter<-}}, \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/op_concat.Rd0000644000176200001440000000131513520655354014214 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operators.R \name{\%++\%} \alias{\%++\%} \title{Concatenation operator} \usage{ x \%++\% y } \arguments{ \item{x}{First object} \item{y}{Second object of same class} } \description{ For matrices a block-diagonal matrix is created. For all other data types he operator is a wrapper of \code{paste}. } \details{ Concatenation operator } \examples{ ## Block diagonal matrix(rnorm(25),5)\%++\%matrix(rnorm(25),5) ## String concatenation "Hello "\%++\%" World" ## Function composition f <- log \%++\% exp f(2) } \seealso{ \code{blockdiag}, \code{\link{paste}}, \code{\link{cat}}, } \author{ Klaus K. Holst } \keyword{misc} \keyword{utilities} lava/man/calcium.Rd0000644000176200001440000000136413520655354013670 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{calcium} \alias{calcium} \title{Longitudinal Bone Mineral Density Data} \format{A data.frame containing 560 (incomplete) observations. The 'person' column defines the individual girls of the study with measurements at visiting times 'visit', and age in years 'age' at the time of visit. The bone mineral density variable is 'bmd' (g/cm^2).} \source{ Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. } \description{ Bone Mineral Density Data consisting of 112 girls randomized to receive calcium og placebo. Longitudinal measurements of bone mineral density (g/cm^2) measured approximately every 6th month in 3 years. } \keyword{datasets} lava/man/gof.Rd0000644000176200001440000000543313520655354013027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gof.R \name{gof} \alias{gof} \alias{gof.lvmfit} \alias{moments} \alias{moments.lvm} \alias{information} \alias{information.lvmfit} \alias{score} \alias{score.lvmfit} \alias{logLik.lvmfit} \title{Extract model summaries and GOF statistics for model object} \usage{ gof(object, ...) \method{gof}{lvmfit}(object, chisq=FALSE, level=0.90, rmsea.threshold=0.05,all=FALSE,...) moments(x,...) \method{moments}{lvm}(x, p, debug=FALSE, conditional=FALSE, data=NULL, latent=FALSE, ...) \method{logLik}{lvmfit}(object, p=coef(object), data=model.frame(object), model=object$estimator, weights=Weights(object), data2=object$data$data2, ...) \method{score}{lvmfit}(x, data=model.frame(x), p=pars(x), model=x$estimator, weights=Weights(x), data2=x$data$data2, ...) \method{information}{lvmfit}(x,p=pars(x),n=x$data$n,data=model.frame(x), model=x$estimator,weights=Weights(x), data2=x$data$data2, ...) } \arguments{ \item{object}{Model object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{x}{Model object} \item{p}{Parameter vector used to calculate statistics} \item{data}{Data.frame to use} \item{latent}{If TRUE predictions of latent variables are included in output} \item{data2}{Optional second data.frame (only for censored observations)} \item{weights}{Optional weight matrix} \item{n}{Number of observations} \item{conditional}{If TRUE the conditional moments given the covariates are calculated. Otherwise the joint moments are calculated} \item{model}{String defining estimator, e.g. "gaussian" (see \code{estimate})} \item{debug}{Debugging only} \item{chisq}{Boolean indicating whether to calculate chi-squared goodness-of-fit (always TRUE for estimator='gaussian')} \item{level}{Level of confidence limits for RMSEA} \item{rmsea.threshold}{Which probability to calculate, Pr(RMSEA } \keyword{package} lava/man/internal.Rd0000644000176200001440000000257413520655354014073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \name{startvalues} \alias{startvalues} \alias{startvalues0} \alias{startvalues1} \alias{startvalues2} \alias{startvalues3} \alias{starter.multigroup} \alias{addattr} \alias{modelPar} \alias{modelVar} \alias{matrices} \alias{pars} \alias{pars.lvm} \alias{pars.lvmfit} \alias{pars.glm} \alias{score.glm} \alias{procdata.lvmfit} \alias{reorderdata} \alias{graph2lvm} \alias{igraph.lvm} \alias{subgraph} \alias{finalize} \alias{index.lvm} \alias{index.lvmfit} \alias{index} \alias{reindex} \alias{index<-} \alias{rmvn0} \alias{dmvn0} \alias{logit} \alias{expit} \alias{tigol} \alias{randomslope} \alias{randomslope<-} \alias{lisrel} \alias{variances} \alias{offdiags} \alias{describecoef} \alias{parlabels} \alias{rsq} \alias{stdcoef} \alias{CoefMat} \alias{CoefMat.multigroupfit} \alias{deriv} \alias{updatelvm} \alias{checkmultigroup} \alias{profci} \alias{estimate.MAR} \alias{missingModel} \alias{Inverse} \alias{Identical} \alias{gaussian_logLik.lvm} \alias{addhook} \alias{gethook} \alias{multigroup} \alias{Weights} \alias{fixsome} \alias{parfix} \alias{parfix<-} \alias{merge} \alias{IV} \alias{parameter} \alias{Specials} \alias{procformula} \alias{getoutcome} \alias{decomp.specials} \alias{na.pass0} \title{For internal use} \description{ For internal use } \author{ Klaus K. Holst } \keyword{utilities} lava/man/mixture.Rd0000644000176200001440000000450513520655354013750 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mixture.R \name{mixture} \alias{mixture} \title{Estimate mixture latent variable model.} \usage{ mixture(x, data, k = length(x), control = list(), vcov = "observed", names = FALSE, ...) } \arguments{ \item{x}{List of \code{lvm} objects. If only a single \code{lvm} object is given, then a \code{k}-mixture of this model is fitted (free parameters varying between mixture components).} \item{data}{\code{data.frame}} \item{k}{Number of mixture components} \item{control}{Optimization parameters (see details) #type Type of EM algorithm (standard, classification, stochastic)} \item{vcov}{of asymptotic covariance matrix (NULL to omit)} \item{names}{If TRUE returns the names of the parameters (for defining starting values)} \item{...}{Additional arguments parsed to lower-level functions} } \description{ Estimate mixture latent variable model } \details{ Estimate parameters in a mixture of latent variable models via the EM algorithm. The performance of the EM algorithm can be tuned via the \code{control} argument, a list where a subset of the following members can be altered: \describe{ \item{start}{Optional starting values} \item{nstart}{Evaluate \code{nstart} different starting values and run the EM-algorithm on the parameters with largest likelihood} \item{tol}{Convergence tolerance of the EM-algorithm. The algorithm is stopped when the absolute change in likelihood and parameter (2-norm) between successive iterations is less than \code{tol}} \item{iter.max}{Maximum number of iterations of the EM-algorithm} \item{gamma}{Scale-down (i.e. number between 0 and 1) of the step-size of the Newton-Raphson algorithm in the M-step} \item{trace}{Trace information on the EM-algorithm is printed on every \code{trace}th iteration} } Note that the algorithm can be aborted any time (C-c) and still be saved (via on.exit call). } \examples{ \donttest{ m0 <- lvm(list(y~x+z,x~z)) distribution(m0,~z) <- binomial.lvm() d <- sim(m0,2000,p=c("y~z"=2,"y~x"=1),seed=1) ## unmeasured confounder example m <- baptize(lvm(y~x, x~1)); intercept(m,~x+y) <- NA set.seed(42) M <- mixture(m,k=2,data=d,control=list(trace=1,tol=1e-6)) summary(M) lm(y~x,d) estimate(M,"y~x") ## True slope := 1 } } \seealso{ \code{mvnmix} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/estimate.default.Rd0000644000176200001440000001342413520655354015511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate.default.R \name{estimate.default} \alias{estimate.default} \alias{estimate} \alias{estimate.estimate} \alias{merge.estimate} \title{Estimation of functional of parameters} \usage{ \method{estimate}{default}(x = NULL, f = NULL, ..., data, id, iddata, stack = TRUE, average = FALSE, subset, score.deriv, level = 0.95, iid = robust, type = c("robust", "df", "mbn"), keep, use, regex = FALSE, contrast, null, vcov, coef, robust = TRUE, df = NULL, print = NULL, labels, label.width, only.coef = FALSE, back.transform = NULL, folds = 0, cluster, R = 0, null.sim) } \arguments{ \item{x}{model object (\code{glm}, \code{lvmfit}, ...)} \item{f}{transformation of model parameters and (optionally) data, or contrast matrix (or vector)} \item{...}{additional arguments to lower level functions} \item{data}{\code{data.frame}} \item{id}{(optional) id-variable corresponding to iid decomposition of model parameters.} \item{iddata}{(optional) id-variable for 'data'} \item{stack}{if TRUE (default) the i.i.d. decomposition is automatically stacked according to 'id'} \item{average}{if TRUE averages are calculated} \item{subset}{(optional) subset of data.frame on which to condition (logical expression or variable name)} \item{score.deriv}{(optional) derivative of mean score function} \item{level}{level of confidence limits} \item{iid}{if TRUE (default) the iid decompositions are also returned (extract with \code{iid} method)} \item{type}{type of small-sample correction} \item{keep}{(optional) index of parameters to keep from final result} \item{use}{(optional) index of parameters to use in calculations} \item{regex}{If TRUE use regular expression (perl compatible) for keep,use arguments} \item{contrast}{(optional) Contrast matrix for final Wald test} \item{null}{(optional) null hypothesis to test} \item{vcov}{(optional) covariance matrix of parameter estimates (e.g. Wald-test)} \item{coef}{(optional) parameter coefficient} \item{robust}{if TRUE robust standard errors are calculated. If FALSE p-values for linear models are calculated from t-distribution} \item{df}{degrees of freedom (default obtained from 'df.residual')} \item{print}{(optional) print function} \item{labels}{(optional) names of coefficients} \item{label.width}{(optional) max width of labels} \item{only.coef}{if TRUE only the coefficient matrix is return} \item{back.transform}{(optional) transform of parameters and confidence intervals} \item{folds}{(optional) aggregate influence functions (divide and conquer)} \item{cluster}{(obsolete) alias for 'id'.} \item{R}{Number of simulations (simulated p-values)} \item{null.sim}{Mean under the null for simulations} } \description{ Estimation of functional of parameters. Wald tests, robust standard errors, cluster robust standard errors, LRT (when \code{f} is not a function)... } \details{ iid decomposition \deqn{\sqrt{n}(\widehat{\theta}-\theta) = \sum_{i=1}^n\epsilon_i + o_p(1)} can be extracted with the \code{iid} method. } \examples{ ## Simulation from logistic regression model m <- lvm(y~x+z); distribution(m,y~x) <- binomial.lvm("logit") d <- sim(m,1000) g <- glm(y~z+x,data=d,family=binomial()) g0 <- glm(y~1,data=d,family=binomial()) ## LRT estimate(g,g0) ## Plain estimates (robust standard errors) estimate(g) ## Testing contrasts estimate(g,null=0) estimate(g,rbind(c(1,1,0),c(1,0,2))) estimate(g,rbind(c(1,1,0),c(1,0,2)),null=c(1,2)) estimate(g,2:3) ## same as cbind(0,1,-1) estimate(g,as.list(2:3)) ## same as rbind(c(0,1,0),c(0,0,1)) ## Alternative syntax estimate(g,"z","z"-"x",2*"z"-3*"x") estimate(g,z,z-x,2*z-3*x) estimate(g,"?") ## Wilcards estimate(g,"*Int*","z") estimate(g,"1","2"-"3",null=c(0,1)) estimate(g,2,3) ## Usual (non-robust) confidence intervals estimate(g,robust=FALSE) ## Transformations estimate(g,function(p) p[1]+p[2]) ## Multiple parameters e <- estimate(g,function(p) c(p[1]+p[2],p[1]*p[2])) e vcov(e) ## Label new parameters estimate(g,function(p) list("a1"=p[1]+p[2],"b1"=p[1]*p[2])) ##' ## Multiple group m <- lvm(y~x) m <- baptize(m) d2 <- d1 <- sim(m,50) e <- estimate(list(m,m),list(d1,d2)) estimate(e) ## Wrong estimate(e,id=rep(seq(nrow(d1)),2)) estimate(lm(y~x,d1)) ## Marginalize f <- function(p,data) list(p0=lava:::expit(p["(Intercept)"] + p["z"]*data[,"z"]), p1=lava:::expit(p["(Intercept)"] + p["x"] + p["z"]*data[,"z"])) e <- estimate(g, f, average=TRUE) e estimate(e,diff) estimate(e,cbind(1,1)) ## Clusters and subset (conditional marginal effects) d$id <- rep(seq(nrow(d)/4),each=4) estimate(g,function(p,data) list(p0=lava:::expit(p[1] + p["z"]*data[,"z"])), subset=d$z>0, id=d$id, average=TRUE) ## More examples with clusters: m <- lvm(c(y1,y2,y3)~u+x) d <- sim(m,10) l1 <- glm(y1~x,data=d) l2 <- glm(y2~x,data=d) l3 <- glm(y3~x,data=d) ## Some random id-numbers id1 <- c(1,1,4,1,3,1,2,3,4,5) id2 <- c(1,2,3,4,5,6,7,8,1,1) id3 <- seq(10) ## Un-stacked and stacked i.i.d. decomposition iid(estimate(l1,id=id1,stack=FALSE)) iid(estimate(l1,id=id1)) ## Combined i.i.d. decomposition e1 <- estimate(l1,id=id1) e2 <- estimate(l2,id=id2) e3 <- estimate(l3,id=id3) (a2 <- merge(e1,e2,e3)) ## If all models were estimated on the same data we could use the ## syntax: ## Reduce(merge,estimate(list(l1,l2,l3))) ## Same: iid(a1 <- merge(l1,l2,l3,id=list(id1,id2,id3))) iid(merge(l1,l2,l3,id=TRUE)) # one-to-one (same clusters) iid(merge(l1,l2,l3,id=FALSE)) # independence ## Monte Carlo approach, simple trend test example m <- categorical(lvm(),~x,K=5) regression(m,additive=TRUE) <- y~x d <- simulate(m,100,seed=1,'y~x'=0.1) l <- lm(y~-1+factor(x),data=d) f <- function(x) coef(lm(x~seq_along(x)))[2] null <- rep(mean(coef(l)),length(coef(l))) ## just need to make sure we simulate under H0: slope=0 estimate(l,f,R=1e2,null.sim=null) estimate(l,f) } lava/man/constrain-set.Rd0000644000176200001440000001364713520655354015053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/constrain.R \name{constrain<-} \alias{constrain<-} \alias{constrain} \alias{constrain.default} \alias{constrain<-.multigroup} \alias{constrain<-.default} \alias{constraints} \alias{parameter<-} \title{Add non-linear constraints to latent variable model} \usage{ \method{constrain}{default}(x,par,args,...) <- value \method{constrain}{multigroup}(x,par,k=1,...) <- value constraints(object,data=model.frame(object),vcov=object$vcov,level=0.95, p=pars.default(object),k,idx,...) } \arguments{ \item{x}{\code{lvm}-object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{Real function taking args as a vector argument} \item{par}{Name of new parameter. Alternatively a formula with lhs specifying the new parameter and the rhs defining the names of the parameters or variable names defining the new parameter (overruling the \code{args} argument).} \item{args}{Vector of variables names or parameter names that are used in defining \code{par}} \item{k}{For multigroup models this argument specifies which group to add/extract the constraint} \item{object}{\code{lvm}-object} \item{data}{Data-row from which possible non-linear constraints should be calculated} \item{vcov}{Variance matrix of parameter estimates} \item{level}{Level of confidence limits} \item{p}{Parameter vector} \item{idx}{Index indicating which constraints to extract} } \value{ A \code{lvm} object. } \description{ Add non-linear constraints to latent variable model } \details{ Add non-linear parameter constraints as well as non-linear associations between covariates and latent or observed variables in the model (non-linear regression). As an example we will specify the follow multiple regression model: \deqn{E(Y|X_1,X_2) = \alpha + \beta_1 X_1 + \beta_2 X_2} \deqn{V(Y|X_1,X_2) = v} which is defined (with the appropiate parameter labels) as \code{m <- lvm(y ~ f(x,beta1) + f(x,beta2))} \code{intercept(m) <- y ~ f(alpha)} \code{covariance(m) <- y ~ f(v)} The somewhat strained parameter constraint \deqn{ v = \frac{(beta1-beta2)^2}{alpha}} can then specified as \code{constrain(m,v ~ beta1 + beta2 + alpha) <- function(x) (x[1]-x[2])^2/x[3] } A subset of the arguments \code{args} can be covariates in the model, allowing the specification of non-linear regression models. As an example the non-linear regression model \deqn{ E(Y\mid X) = \nu + \Phi(\alpha + \beta X)} where \eqn{\Phi} denotes the standard normal cumulative distribution function, can be defined as \code{m <- lvm(y ~ f(x,0)) # No linear effect of x} Next we add three new parameters using the \code{parameter} assigment function: \code{parameter(m) <- ~nu+alpha+beta} The intercept of \eqn{Y} is defined as \code{mu} \code{intercept(m) <- y ~ f(mu)} And finally the newly added intercept parameter \code{mu} is defined as the appropiate non-linear function of \eqn{\alpha}, \eqn{\nu} and \eqn{\beta}: \code{constrain(m, mu ~ x + alpha + nu) <- function(x) pnorm(x[1]*x[2])+x[3]} The \code{constraints} function can be used to show the estimated non-linear parameter constraints of an estimated model object (\code{lvmfit} or \code{multigroupfit}). Calling \code{constrain} with no additional arguments beyound \code{x} will return a list of the functions and parameter names defining the non-linear restrictions. The gradient function can optionally be added as an attribute \code{grad} to the return value of the function defined by \code{value}. In this case the analytical derivatives will be calculated via the chain rule when evaluating the corresponding score function of the log-likelihood. If the gradient attribute is omitted the chain rule will be applied on a numeric approximation of the gradient. } \examples{ ############################## ### Non-linear parameter constraints 1 ############################## m <- lvm(y ~ f(x1,gamma)+f(x2,beta)) covariance(m) <- y ~ f(v) d <- sim(m,100) m1 <- m; constrain(m1,beta ~ v) <- function(x) x^2 ## Define slope of x2 to be the square of the residual variance of y ## Estimate both restricted and unrestricted model e <- estimate(m,d,control=list(method="NR")) e1 <- estimate(m1,d) p1 <- coef(e1) p1 <- c(p1[1:2],p1[3]^2,p1[3]) ## Likelihood of unrestricted model evaluated in MLE of restricted model logLik(e,p1) ## Likelihood of restricted model (MLE) logLik(e1) ############################## ### Non-linear regression ############################## ## Simulate data m <- lvm(c(y1,y2)~f(x,0)+f(eta,1)) latent(m) <- ~eta covariance(m,~y1+y2) <- "v" intercept(m,~y1+y2) <- "mu" covariance(m,~eta) <- "zeta" intercept(m,~eta) <- 0 set.seed(1) d <- sim(m,100,p=c(v=0.01,zeta=0.01))[,manifest(m)] d <- transform(d, y1=y1+2*pnorm(2*x), y2=y2+2*pnorm(2*x)) ## Specify model and estimate parameters constrain(m, mu ~ x + alpha + nu + gamma) <- function(x) x[4]*pnorm(x[3]+x[1]*x[2]) \donttest{ ## Reduce Ex.Timings e <- estimate(m,d,control=list(trace=1,constrain=TRUE)) constraints(e,data=d) ## Plot model-fit plot(y1~x,d,pch=16); points(y2~x,d,pch=16,col="gray") x0 <- seq(-4,4,length.out=100) lines(x0,coef(e)["nu"] + coef(e)["gamma"]*pnorm(coef(e)["alpha"]*x0)) } ############################## ### Multigroup model ############################## ### Define two models m1 <- lvm(y ~ f(x,beta)+f(z,beta2)) m2 <- lvm(y ~ f(x,psi) + z) ### And simulate data from them d1 <- sim(m1,500) d2 <- sim(m2,500) ### Add 'non'-linear parameter constraint constrain(m2,psi ~ beta2) <- function(x) x ## Add parameter beta2 to model 2, now beta2 exists in both models parameter(m2) <- ~ beta2 ee <- estimate(list(m1,m2),list(d1,d2),control=list(method="NR")) summary(ee) m3 <- lvm(y ~ f(x,beta)+f(z,beta2)) m4 <- lvm(y ~ f(x,beta2) + z) e2 <- estimate(list(m3,m4),list(d1,d2),control=list(method="NR")) e2 } \seealso{ \code{\link{regression}}, \code{\link{intercept}}, \code{\link{covariance}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/regression-set.Rd0000644000176200001440000001056113520655354015223 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/regression.R \name{regression<-} \alias{regression<-} \alias{regression} \alias{regression<-.lvm} \alias{regression.lvm} \alias{regfix} \alias{regfix<-} \alias{regfix.lvm} \alias{regfix<-.lvm} \title{Add regression association to latent variable model} \usage{ \method{regression}{lvm}(object = lvm(), to, from, fn = NA, messages = lava.options()$messages, additive=TRUE, y, x, value, ...) \method{regression}{lvm}(object, to=NULL, quick=FALSE, ...) <- value } \arguments{ \item{object}{\code{lvm}-object.} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{A formula specifying the linear constraints or if \code{to=NULL} a \code{list} of parameter values.} \item{to}{Character vector of outcome(s) or formula object.} \item{from}{Character vector of predictor(s).} \item{fn}{Real function defining the functional form of predictors (for simulation only).} \item{messages}{Controls which messages are turned on/off (0: all off)} \item{additive}{If FALSE and predictor is categorical a non-additive effect is assumed} \item{y}{Alias for 'to'} \item{x}{Alias for 'from'} \item{quick}{Faster implementation without parameter constraints} } \value{ A \code{lvm}-object } \description{ Define regression association between variables in a \code{lvm}-object and define linear constraints between model equations. } \details{ The \code{regression} function is used to specify linear associations between variables of a latent variable model, and offers formula syntax resembling the model specification of e.g. \code{lm}. For instance, to add the following linear regression model, to the \code{lvm}-object, \code{m}: \deqn{ E(Y|X_1,X_2) = \beta_1 X_1 + \beta_2 X_2} We can write \code{regression(m) <- y ~ x1 + x2} Multivariate models can be specified by successive calls with \code{regression}, but multivariate formulas are also supported, e.g. \code{regression(m) <- c(y1,y2) ~ x1 + x2} defines \deqn{ E(Y_i|X_1,X_2) = \beta_{1i} X_1 + \beta_{2i} X_2 } The special function, \code{f}, can be used in the model specification to specify linear constraints. E.g. to fix \eqn{\beta_1=\beta_2} , we could write \code{regression(m) <- y ~ f(x1,beta) + f(x2,beta)} The second argument of \code{f} can also be a number (e.g. defining an offset) or be set to \code{NA} in order to clear any previously defined linear constraints. Alternatively, a more straight forward notation can be used: \code{regression(m) <- y ~ beta*x1 + beta*x2} All the parameter values of the linear constraints can be given as the right handside expression of the assigment function \code{regression<-} (or \code{regfix<-}) if the first (and possibly second) argument is defined as well. E.g: \code{regression(m,y1~x1+x2) <- list("a1","b1")} defines \eqn{E(Y_1|X_1,X_2) = a1 X_1 + b1 X_2}. The rhs argument can be a mixture of character and numeric values (and NA's to remove constraints). The function \code{regression} (called without additional arguments) can be used to inspect the linear constraints of a \code{lvm}-object. For backward compatibility the "$"-symbol can be used to fix parameters at a given value. E.g. to add a linear relationship between \code{y} and \code{x} with slope 2 to the model \code{m}, we can write \code{regression(m,"y") <- "x$2"}. Similarily we can use the "@"-symbol to name parameters. E.g. in a multiple regression we can force the parameters to be equal: \code{regression(m,"y") <- c("x1@b","x2@b")}. Fixed parameters can be reset by fixing (with \$) them to \code{NA}. } \note{ Variables will be added to the model if not already present. } \examples{ m <- lvm() ## Initialize empty lvm-object ### E(y1|z,v) = beta1*z + beta2*v regression(m) <- y1 ~ z + v ### E(y2|x,z,v) = beta*x + beta*z + 2*v + beta3*u regression(m) <- y2 ~ f(x,beta) + f(z,beta) + f(v,2) + u ### Clear restriction on association between y and ### fix slope coefficient of u to beta regression(m, y2 ~ v+u) <- list(NA,"beta") regression(m) ## Examine current linear parameter constraints ## ## A multivariate model, E(yi|x1,x2) = beta[1i]*x1 + beta[2i]*x2: m2 <- lvm(c(y1,y2) ~ x1+x2) } \seealso{ \code{\link{intercept<-}}, \code{\link{covariance<-}}, \code{\link{constrain<-}}, \code{\link{parameter<-}}, \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/vars.Rd0000644000176200001440000000563613520655354013234 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vars.R \name{vars} \alias{vars} \alias{vars.lvm} \alias{vars.lvmfit} \alias{latent} \alias{latent<-} \alias{latent.lvm} \alias{latent<-.lvm} \alias{latent.lvmfit} \alias{latent.multigroup} \alias{manifest} \alias{manifest.lvm} \alias{manifest.lvmfit} \alias{manifest.multigroup} \alias{exogenous} \alias{exogenous<-} \alias{exogenous.lvm} \alias{exogenous<-.lvm} \alias{exogenous.lvmfit} \alias{exogenous.multigroup} \alias{endogenous} \alias{endogenous.lvm} \alias{endogenous.lvmfit} \alias{endogenous.multigroup} \title{Extract variable names from latent variable model} \usage{ vars(x,...) endogenous(x,...) exogenous(x,...) manifest(x,...) latent(x,...) \method{exogenous}{lvm}(x, xfree = TRUE,...) <- value \method{exogenous}{lvm}(x,variable,latent=FALSE,index=TRUE,...) \method{latent}{lvm}(x,clear=FALSE,...) <- value } \arguments{ \item{x}{\code{lvm}-object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{variable}{list of variables to alter} \item{latent}{Logical defining whether latent variables without parents should be included in the result} \item{index}{For internal use only} \item{clear}{Logical indicating whether to add or remove latent variable status} \item{xfree}{For internal use only} \item{value}{Formula or character vector of variable names.} } \value{ Vector of variable names. } \description{ Extract exogenous variables (predictors), endogenous variables (outcomes), latent variables (random effects), manifest (observed) variables from a \code{lvm} object. } \details{ \code{vars} returns all variables of the \code{lvm}-object including manifest and latent variables. Similarily \code{manifest} and \code{latent} returns the observered resp. latent variables of the model. \code{exogenous} returns all manifest variables without parents, e.g. covariates in the model, however the argument \code{latent=TRUE} can be used to also include latent variables without parents in the result. Pr. default \code{lava} will not include the parameters of the exogenous variables in the optimisation routine during estimation (likelihood of the remaining observered variables conditional on the covariates), however this behaviour can be altered via the assignment function \code{exogenous<-} telling \code{lava} which subset of (valid) variables to condition on. Finally \code{latent} returns a vector with the names of the latent variables in \code{x}. The assigment function \code{latent<-} can be used to change the latent status of variables in the model. } \examples{ g <- lvm(eta1 ~ x1+x2) regression(g) <- c(y1,y2,y3) ~ eta1 latent(g) <- ~eta1 endogenous(g) exogenous(g) identical(latent(g), setdiff(vars(g),manifest(g))) } \seealso{ \code{\link{endogenous}}, \code{\link{manifest}}, \code{\link{latent}}, \code{\link{exogenous}}, \code{\link{vars}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/eventTime.Rd0000644000176200001440000001066613520655354014220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eventTime.R \name{eventTime} \alias{eventTime} \alias{eventTime<-} \title{Add an observed event time outcome to a latent variable model.} \usage{ eventTime(object, formula, eventName = "status", ...) } \arguments{ \item{object}{Model object} \item{formula}{Formula (see details)} \item{eventName}{Event names} \item{\dots}{Additional arguments to lower levels functions} } \description{ For example, if the model 'm' includes latent event time variables are called 'T1' and 'T2' and 'C' is the end of follow-up (right censored), then one can specify } \details{ \code{eventTime(object=m,formula=ObsTime~min(T1=a,T2=b,C=0,"ObsEvent"))} when data are simulated from the model one gets 2 new columns: - "ObsTime": the smallest of T1, T2 and C - "ObsEvent": 'a' if T1 is smallest, 'b' if T2 is smallest and '0' if C is smallest Note that "ObsEvent" and "ObsTime" are names specified by the user. } \examples{ # Right censored survival data without covariates m0 <- lvm() distribution(m0,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(m0,"censtime") <- coxExponential.lvm(rate=10) m0 <- eventTime(m0,time~min(eventtime=1,censtime=0),"status") sim(m0,10) # Alternative specification of the right censored survival outcome ## eventTime(m,"Status") <- ~min(eventtime=1,censtime=0) # Cox regression: # lava implements two different parametrizations of the same # Weibull regression model. The first specifies # the effects of covariates as proportional hazard ratios # and works as follows: m <- lvm() distribution(m,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(m,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) m <- eventTime(m,time~min(eventtime=1,censtime=0),"status") distribution(m,"sex") <- binomial.lvm(p=0.4) distribution(m,"sbp") <- normal.lvm(mean=120,sd=20) regression(m,from="sex",to="eventtime") <- 0.4 regression(m,from="sbp",to="eventtime") <- -0.01 sim(m,6) # The parameters can be recovered using a Cox regression # routine or a Weibull regression model. E.g., \dontrun{ set.seed(18) d <- sim(m,1000) library(survival) coxph(Surv(time,status)~sex+sbp,data=d) sr <- survreg(Surv(time,status)~sex+sbp,data=d) library(SurvRegCensCov) ConvertWeibull(sr) } # The second parametrization is an accelerated failure time # regression model and uses the function weibull.lvm instead # of coxWeibull.lvm to specify the event time distributions. # Here is an example: ma <- lvm() distribution(ma,"eventtime") <- weibull.lvm(scale=3,shape=0.7) distribution(ma,"censtime") <- weibull.lvm(scale=2,shape=0.7) ma <- eventTime(ma,time~min(eventtime=1,censtime=0),"status") distribution(ma,"sex") <- binomial.lvm(p=0.4) distribution(ma,"sbp") <- normal.lvm(mean=120,sd=20) regression(ma,from="sex",to="eventtime") <- 0.7 regression(ma,from="sbp",to="eventtime") <- -0.008 set.seed(17) sim(ma,6) # The regression coefficients of the AFT model # can be tranformed into log(hazard ratios): # coef.coxWeibull = - coef.weibull / shape.weibull \dontrun{ set.seed(17) da <- sim(ma,1000) library(survival) fa <- coxph(Surv(time,status)~sex+sbp,data=da) coef(fa) c(0.7,-0.008)/0.7 } # The Weibull parameters are related as follows: # shape.coxWeibull = 1/shape.weibull # scale.coxWeibull = exp(-scale.weibull/shape.weibull) # scale.AFT = log(scale.coxWeibull) / shape.coxWeibull # Thus, the following are equivalent parametrizations # which produce exactly the same random numbers: model.aft <- lvm() distribution(model.aft,"eventtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) distribution(model.aft,"censtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) set.seed(17) sim(model.aft,6) model.cox <- lvm() distribution(model.cox,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) distribution(model.cox,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) set.seed(17) sim(model.cox,6) # The minimum of multiple latent times one of them still # being a censoring time, yield # right censored competing risks data mc <- lvm() distribution(mc,~X2) <- binomial.lvm() regression(mc) <- T1~f(X1,-.5)+f(X2,0.3) regression(mc) <- T2~f(X2,0.6) distribution(mc,~T1) <- coxWeibull.lvm(scale=1/100) distribution(mc,~T2) <- coxWeibull.lvm(scale=1/100) distribution(mc,~C) <- coxWeibull.lvm(scale=1/100) mc <- eventTime(mc,time~min(T1=1,T2=2,C=0),"event") sim(mc,6) } \author{ Thomas A. Gerds, Klaus K. Holst } \keyword{models} \keyword{regression} \keyword{survival} lava/man/commutation.Rd0000644000176200001440000000055213520655354014610 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/commutation.R \name{commutation} \alias{commutation} \title{Finds the unique commutation matrix} \usage{ commutation(m, n = m) } \arguments{ \item{m}{rows} \item{n}{columns} } \description{ Finds the unique commutation matrix K: \eqn{K vec(A) = vec(A^t)} } \author{ Klaus K. Holst } lava/man/spaghetti.Rd0000644000176200001440000000434613520655354014246 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spaghetti.R \name{spaghetti} \alias{spaghetti} \title{Spaghetti plot} \usage{ spaghetti(formula, data, id = "id", group = NULL, type = "o", lty = 1, pch = NA, col = 1:10, alpha = 0.3, lwd = 1, level = 0.95, trend.formula = formula, tau = NULL, trend.lty = 1, trend.join = TRUE, trend.delta = 0.2, trend = !is.null(tau), trend.col = col, trend.alpha = 0.2, trend.lwd = 3, trend.jitter = 0, legend = NULL, by = NULL, xlab = "Time", ylab = "", add = FALSE, ...) } \arguments{ \item{formula}{Formula (response ~ time)} \item{data}{data.frame} \item{id}{Id variable} \item{group}{group variable} \item{type}{Type (line 'l', stair 's', ...)} \item{lty}{Line type} \item{pch}{Colour} \item{col}{Colour} \item{alpha}{transparency (0-1)} \item{lwd}{Line width} \item{level}{Confidence level} \item{trend.formula}{Formula for trendline} \item{tau}{Quantile to estimate (trend)} \item{trend.lty}{Trend line type} \item{trend.join}{Trend polygon} \item{trend.delta}{Length of limit bars} \item{trend}{Add trend line} \item{trend.col}{Colour of trend line} \item{trend.alpha}{Transparency} \item{trend.lwd}{Trend line width} \item{trend.jitter}{Jitter amount} \item{legend}{Legend} \item{by}{make separate plot for each level in 'by' (formula, name of column, or vector)} \item{xlab}{Label of X-axis} \item{ylab}{Label of Y-axis} \item{add}{Add to existing device} \item{...}{Additional arguments to lower level arguments} } \description{ Spaghetti plot for longitudinal data } \examples{ if (interactive() & requireNamespace("mets")) { K <- 5 y <- "y"\%++\%seq(K) m <- lvm() regression(m,y=y,x=~u) <- 1 regression(m,y=y,x=~s) <- seq(K)-1 regression(m,y=y,x=~x) <- "b" N <- 50 d <- sim(m,N); d$z <- rbinom(N,1,0.5) dd <- mets::fast.reshape(d); dd$num <- dd$num+3 spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend.formula=~factor(num),trend=TRUE,trend.col="darkblue") dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend=TRUE,trend.col="darkblue") spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), trend.formula=~num+I(num^2),trend=TRUE,trend.col="darkblue") } } \author{ Klaus K. Holst } lava/man/plot.lvm.Rd0000644000176200001440000000567313520655354014035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.lvm} \alias{plot.lvm} \alias{plot.lvmfit} \title{Plot path diagram} \usage{ \method{plot}{lvm}(x, diag = FALSE, cor = TRUE, labels = FALSE, intercept = FALSE, addcolor = TRUE, plain = FALSE, cex, fontsize1 = 10, noplot = FALSE, graph = list(rankdir = "BT"), attrs = list(graph = graph), unexpr = FALSE, addstyle = TRUE, plot.engine = lava.options()$plot.engine, init = TRUE, layout = lava.options()$layout, edgecolor = lava.options()$edgecolor, graph.proc = lava.options()$graph.proc, ...) } \arguments{ \item{x}{Model object} \item{diag}{Logical argument indicating whether to visualize variance parameters (i.e. diagonal of variance matrix)} \item{cor}{Logical argument indicating whether to visualize correlation parameters} \item{labels}{Logical argument indiciating whether to add labels to plot (Unnamed parameters will be labeled p1,p2,...)} \item{intercept}{Logical argument indiciating whether to add intercept labels} \item{addcolor}{Logical argument indiciating whether to add colors to plot (overrides \code{nodecolor} calls)} \item{plain}{if TRUE strip plot of colors and boxes} \item{cex}{Fontsize of node labels} \item{fontsize1}{Fontsize of edge labels} \item{noplot}{if TRUE then return \code{graphNEL} object only} \item{graph}{Graph attributes (Rgraphviz)} \item{attrs}{Attributes (Rgraphviz)} \item{unexpr}{if TRUE remove expressions from labels} \item{addstyle}{Logical argument indicating whether additional style should automatically be added to the plot (e.g. dashed lines to double-headed arrows)} \item{plot.engine}{default 'Rgraphviz' if available, otherwise visNetwork,igraph} \item{init}{Reinitialize graph (for internal use)} \item{layout}{Graph layout (see Rgraphviz or igraph manual)} \item{edgecolor}{if TRUE plot style with colored edges} \item{graph.proc}{Function that post-process the graph object (default: subscripts are automatically added to labels of the nodes)} \item{...}{Additional arguments to be passed to the low level functions} } \description{ Plot the path diagram of a SEM } \examples{ if (interactive()) { m <- lvm(c(y1,y2) ~ eta) regression(m) <- eta ~ z+x2 regression(m) <- c(eta,z) ~ x1 latent(m) <- ~eta labels(m) <- c(y1=expression(y[scriptscriptstyle(1)]), y2=expression(y[scriptscriptstyle(2)]), x1=expression(x[scriptscriptstyle(1)]), x2=expression(x[scriptscriptstyle(2)]), eta=expression(eta)) edgelabels(m, eta ~ z+x1+x2, cex=2, lwd=3, col=c("orange","lightblue","lightblue")) <- expression(rho,phi,psi) nodecolor(m, vars(m), border="white", labcol="darkblue") <- NA nodecolor(m, ~y1+y2+z, labcol=c("white","white","black")) <- NA plot(m,cex=1.5) d <- sim(m,100) e <- estimate(m,d) plot(e) m <- lvm(c(y1,y2) ~ eta) regression(m) <- eta ~ z+x2 regression(m) <- c(eta,z) ~ x1 latent(m) <- ~eta plot(lava:::beautify(m,edgecol=FALSE)) } } \author{ Klaus K. Holst } \keyword{hplot} \keyword{regression} lava/man/missingdata.Rd0000644000176200001440000000160513520655354014554 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{missingdata} \alias{missingdata} \title{Missing data example} \format{list of data.frames} \source{ Simulated } \description{ Simulated data generated from model \deqn{E(Y_i\mid X) = X, \quad cov(Y_1,Y_2\mid X)=0.5} } \details{ The list contains four data sets 1) Complete data 2) MCAR 3) MAR 4) MNAR (missing mechanism depends on variable V correlated with Y1,Y2) } \examples{ data(missingdata) e0 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[1]]) ## No missing e1 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]]) ## CC (MCAR) e2 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]],missing=TRUE) ## MCAR e3 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]]) ## CC (MAR) e4 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]],missing=TRUE) ## MAR } \keyword{datasets} lava/man/plot.sim.Rd0000644000176200001440000001033013520655354014011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.sim.R \name{plot.sim} \alias{plot.sim} \alias{density.sim} \title{Plot method for simulation 'sim' objects} \usage{ \method{plot}{sim}(x, estimate, se = NULL, true = NULL, names = NULL, auto.layout = TRUE, byrow = FALSE, type = "p", ask = grDevices::dev.interactive(), col = c("gray60", "orange", "darkblue", "seagreen", "darkred"), pch = 16, cex = 0.5, lty = 1, lwd = 0.3, legend, legendpos = "topleft", cex.legend = 0.8, plot.type = c("multiple", "single"), polygon = TRUE, density = 0, angle = -45, cex.axis = 0.8, alpha = 0.2, main, cex.main = 1, equal = FALSE, delta = 1.15, ylim = NULL, xlim = NULL, ylab = "", xlab = "", rug = TRUE, rug.alpha = 0.5, line.col = scatter.col, line.lwd = 1, line.lty = 1, line.alpha = 1, scatter.ylab = "Estimate", scatter.ylim = NULL, scatter.xlim = NULL, scatter.alpha = 0.5, scatter.col = col, border = col, true.lty = 2, true.col = "gray70", true.lwd = 1.2, density.plot = TRUE, scatter.plot = FALSE, running.mean = scatter.plot, ...) } \arguments{ \item{x}{sim object} \item{estimate}{columns with estimates} \item{se}{columns with standard error estimates} \item{true}{(optional) vector of true parameter values} \item{names}{(optional) names of estimates} \item{auto.layout}{Auto layout (default TRUE)} \item{byrow}{Add new plots to layout by row} \item{type}{plot type} \item{ask}{if TRUE user is asked for input, before a new figure is drawn} \item{col}{colour (for each estimate)} \item{pch}{plot symbol} \item{cex}{point size} \item{lty}{line type} \item{lwd}{line width} \item{legend}{legend} \item{legendpos}{legend position} \item{cex.legend}{size of legend text} \item{plot.type}{'single' or 'multiple' (default)} \item{polygon}{if TRUE fill the density estimates with colour} \item{density}{if non-zero add shading lines to polygon} \item{angle}{shading lines angle of polygon} \item{cex.axis}{Font size on axis} \item{alpha}{Semi-transparent level (1: non-transparent, 0: full)} \item{main}{Main title} \item{cex.main}{Size of title font} \item{equal}{Same x-axis and y-axis for all plots} \item{delta}{Controls the amount of space around axis limits} \item{ylim}{y-axis limits} \item{xlim}{x-axis limits} \item{ylab}{y axis label} \item{xlab}{x axis label} \item{rug}{if TRUE add rug representation of data to x-axis} \item{rug.alpha}{rug semi-transparency level} \item{line.col}{line colour (running mean, only for scatter plots)} \item{line.lwd}{line width (running mean, only for scatter plots)} \item{line.lty}{line type (running mean, only for scatter plots)} \item{line.alpha}{line transparency} \item{scatter.ylab}{y label for density plots} \item{scatter.ylim}{y-axis limits for density plots} \item{scatter.xlim}{x-axis limits for density plots} \item{scatter.alpha}{semi-transparency of scatter plot} \item{scatter.col}{scatter plot colour} \item{border}{border colour of density estimates} \item{true.lty}{true parameter estimate line type} \item{true.col}{true parameter colour} \item{true.lwd}{true parameter line width} \item{density.plot}{if TRUE add density plot} \item{scatter.plot}{if TRUE add scatter plot} \item{running.mean}{if TRUE add running average estimate to scatter plot} \item{...}{additional arguments to lower level functions} } \description{ Density and scatter plots } \examples{ n <- 1000 val <- cbind(est1=rnorm(n,sd=1),est2=rnorm(n,sd=0.2),est3=rnorm(n,1,sd=0.5), sd1=runif(n,0.8,1.2),sd2=runif(n,0.1,0.3),sd3=runif(n,0.25,0.75)) plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE,scatter.plot=TRUE) plot.sim(val,estimate=c(1,3),true=c(0,1),se=c(4,6),xlim=c(-3,3), scatter.ylim=c(-3,3),scatter.plot=TRUE) plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE, plot.type="single",scatter.plot=TRUE) plot.sim(val,estimate=c(1),se=c(4,5,6),plot.type="single",scatter.plot=TRUE) plot.sim(val,estimate=c(1,2,3),equal=TRUE,scatter.plot=TRUE) plot.sim(val,estimate=c(1,2,3),equal=TRUE,byrow=TRUE,scatter.plot=TRUE) plot.sim(val,estimate=c(1,2,3),plot.type="single",scatter.plot=TRUE) plot.sim(val,estimate=1,se=c(3,4,5),plot.type="single",scatter.plot=TRUE) density.sim(val,estimate=c(1,2,3),density=c(0,10,10),angle=c(0,45,-45)) } lava/man/bootstrap.Rd0000644000176200001440000000063213520655354014265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap.R \name{bootstrap} \alias{bootstrap} \title{Generic bootstrap method} \usage{ bootstrap(x, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments} } \description{ Generic method for calculating bootstrap statistics } \seealso{ \code{bootstrap.lvm} \code{bootstrap.lvmfit} } \author{ Klaus K. Holst } lava/man/ksmooth2.Rd0000644000176200001440000000266313520655354014024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ksmooth.R \name{ksmooth2} \alias{ksmooth2} \alias{surface} \title{Plot/estimate surface} \usage{ ksmooth2(x, data, h = NULL, xlab = NULL, ylab = NULL, zlab = "", gridsize = rep(51L, 2), ...) } \arguments{ \item{x}{formula or data} \item{data}{data.frame} \item{h}{bandwidth} \item{xlab}{X label} \item{ylab}{Y label} \item{zlab}{Z label} \item{gridsize}{grid size of kernel smoother} \item{...}{Additional arguments to graphics routine (persp3d or persp)} } \description{ Plot/estimate surface } \examples{ ksmooth2(rmvn0(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1, rgl=FALSE,theta=30) if (interactive()) { ksmooth2(rmvn0(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1) ksmooth2(function(x,y) x^2+y^2, c(-20,20)) ksmooth2(function(x,y) x^2+y^2, xlim=c(-5,5), ylim=c(0,10)) f <- function(x,y) 1-sqrt(x^2+y^2) surface(f,xlim=c(-1,1),alpha=0.9,aspect=c(1,1,0.75)) surface(f,xlim=c(-1,1),clut=heat.colors(128)) ##play3d(spin3d(axis=c(0,0,1), rpm=8), duration=5) } if (interactive()) { surface(function(x) dmvn0(x,sigma=diag(2)),c(-3,3),lit=FALSE,smooth=FALSE,box=FALSE,alpha=0.8) surface(function(x) dmvn0(x,sigma=diag(2)),c(-3,3),box=FALSE,specular="black")##' } if (!inherits(try(find.package("fields"),silent=TRUE),"try-error")) { f <- function(x,y) 1-sqrt(x^2+y^2) ksmooth2(f,c(-1,1),rgl=FALSE,image=fields::image.plot) } } lava/man/vec.Rd0000644000176200001440000000063413520655354013027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vec.R \name{vec} \alias{vec} \title{vec operator} \usage{ vec(x, matrix = FALSE, sep = ".", ...) } \arguments{ \item{x}{Array} \item{matrix}{If TRUE a row vector (matrix) is returned} \item{sep}{Seperator} \item{...}{Additional arguments} } \description{ vec operator } \details{ Convert array into vector } \author{ Klaus Holst } lava/man/nldata.Rd0000644000176200001440000000043313520655354013512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{nldata} \alias{nldata} \title{Example data (nonlinear model)} \format{data.frame} \source{ Simulated } \description{ Example data (nonlinear model) } \keyword{datasets} lava/man/Graph.Rd0000644000176200001440000000102413520655354013305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/graph.R \name{Graph} \alias{Graph} \alias{Graph<-} \title{Extract graph} \usage{ Graph(x, ...) Graph(x, ...) <- value } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{New \code{graphNEL} object} } \description{ Extract or replace graph object } \examples{ m <- lvm(y~x) Graph(m) } \seealso{ \code{\link{Model}} } \author{ Klaus K. Holst } \keyword{graphs} \keyword{models} lava/man/twostage.Rd0000644000176200001440000000061513520655354014106 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/twostage.R \name{twostage} \alias{twostage} \title{Two-stage estimator} \usage{ twostage(object, ...) } \arguments{ \item{object}{Model object} \item{...}{Additional arguments to lower level functions} } \description{ Generic function. } \seealso{ twostage.lvm twostage.lvmfit twostage.lvm.mixture twostage.estimate } lava/man/twostageCV.Rd0000644000176200001440000000311113520655354014331 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/twostage.R \name{twostageCV} \alias{twostageCV} \title{Cross-validated two-stage estimator} \usage{ twostageCV(model1, model2, data, control1 = list(trace = 0), control2 = list(trace = 0), knots.boundary, mc.cores = 1, nmix = 1:4, df = 1:9, fix = TRUE, std.err = TRUE, nfolds = 5, rep = 1, messages = 0, ...) } \arguments{ \item{model1}{model 1 (exposure measurement error model)} \item{model2}{model 2} \item{data}{data.frame} \item{control1}{optimization parameters for model 1} \item{control2}{optimization parameters for model 1} \item{knots.boundary}{boundary points for natural cubic spline basis} \item{mc.cores}{number of cores to use for parallel computations} \item{nmix}{number of mixture components} \item{df}{spline degrees of freedom} \item{fix}{automatically fix parameters for identification (TRUE)} \item{std.err}{calculation of standard errors (TRUE)} \item{nfolds}{Number of folds (cross-validation)} \item{rep}{Number of repeats of cross-validation} \item{messages}{print information (>0)} \item{...}{additional arguments to lower level functions} } \description{ Cross-validated two-stage estimator for non-linear SEM } \examples{ \donttest{ ## Reduce Ex.Timings m1 <- lvm( x1+x2+x3 ~ u1, latent= ~u1) m2 <- lvm( y ~ 1 ) m <- functional(merge(m1,m2), y ~ u, value=function(x) sin(x)+x) distribution(m, ~u1) <- uniform.lvm(-6,6) d <- sim(m,n=500,seed=1) nonlinear(m2) <- y~u1 val <- twostageCV(m1, m2, data=d, std.err=FALSE, df=2:6, nmix=1:2, nfolds=2, mc.cores=1) val } } lava/man/Missing.Rd0000644000176200001440000000355313520655354013666 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Missing.R \name{Missing} \alias{Missing} \alias{Missing,} \alias{Missing<-} \title{Missing value generator} \usage{ Missing(object, formula, Rformula, missing.name, suffix = "0", ...) } \arguments{ \item{object}{\code{lvm}-object.} \item{formula}{The right hand side specifies the name of a latent variable which is not always observed. The left hand side specifies the name of a new variable which is equal to the latent variable but has missing values. If given as a string then this is used as the name of the latent (full-data) name, and the observed data name is 'missing.data'} \item{Rformula}{Missing data mechanism with left hand side specifying the name of the observed data indicator (may also just be given as a character instead of a formula)} \item{missing.name}{Name of observed data variable (only used if 'formula' was given as a character specifying the name of the full-data variable)} \item{suffix}{If missing.name is missing, then the name of the oberved data variable will be the name of the full-data variable + the suffix} \item{...}{Passed to binomial.lvm.} } \value{ lvm object } \description{ Missing value generator } \details{ This function adds a binary variable to a given \code{lvm} model and also a variable which is equal to the original variable where the binary variable is equal to zero } \examples{ library(lava) set.seed(17) m <- lvm(y0~x01+x02+x03) m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) sim(m,10) m <- lvm(y~1) m <- Missing(m,"y","r") ## same as ## m <- Missing(m,y~1,r~1) sim(m,10) ## same as m <- lvm(y~1) Missing(m,"y") <- r~x sim(m,10) m <- lvm(y~1) m <- Missing(m,"y","r",suffix=".") ## same as ## m <- Missing(m,"y","r",missing.name="y.") ## same as ## m <- Missing(m,y.~y,"r") sim(m,10) } \author{ Thomas A. Gerds } lava/man/confint.lvmfit.Rd0000644000176200001440000000407213520655354015212 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confint.R \name{confint.lvmfit} \alias{confint.lvmfit} \alias{confint.multigroupfit} \title{Calculate confidence limits for parameters} \usage{ \method{confint}{lvmfit}(object, parm = seq_len(length(coef(object))), level = 0.95, profile = FALSE, curve = FALSE, n = 20, interval = NULL, lower = TRUE, upper = TRUE, ...) } \arguments{ \item{object}{\code{lvm}-object.} \item{parm}{Index of which parameters to calculate confidence limits for.} \item{level}{Confidence level} \item{profile}{Logical expression defining whether to calculate confidence limits via the profile log likelihood} \item{curve}{if FALSE and profile is TRUE, confidence limits are returned. Otherwise, the profile curve is returned.} \item{n}{Number of points to evaluate profile log-likelihood in over the interval defined by \code{interval}} \item{interval}{Interval over which the profiling is done} \item{lower}{If FALSE the lower limit will not be estimated (profile intervals only)} \item{upper}{If FALSE the upper limit will not be estimated (profile intervals only)} \item{\dots}{Additional arguments to be passed to the low level functions} } \value{ A 2xp matrix with columns of lower and upper confidence limits } \description{ Calculate Wald og Likelihood based (profile likelihood) confidence intervals } \details{ Calculates either Wald confidence limits: \deqn{\hat{\theta} \pm z_{\alpha/2}*\hat\sigma_{\hat\theta}} or profile likelihood confidence limits, defined as the set of value \eqn{\tau}: \deqn{logLik(\hat\theta_{\tau},\tau)-logLik(\hat\theta)< q_{\alpha}/2} where \eqn{q_{\alpha}} is the \eqn{\alpha} fractile of the \eqn{\chi^2_1} distribution, and \eqn{\hat\theta_{\tau}} are obtained by maximizing the log-likelihood with tau being fixed. } \examples{ m <- lvm(y~x) d <- sim(m,100) e <- estimate(lvm(y~x), d) confint(e,3,profile=TRUE) confint(e,3) \donttest{ ## Reduce Ex.timings B <- bootstrap(e,R=50) B } } \seealso{ \code{\link{bootstrap}{lvm}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/binomial.rd.Rd0000644000176200001440000000156313520655354014452 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/binomial.rrw.R \name{binomial.rd} \alias{binomial.rd} \alias{binomial.rr} \title{Define constant risk difference or relative risk association for binary exposure} \usage{ binomial.rd(x, response, exposure, target.model, nuisance.model, exposure.model = binomial.lvm(), ...) } \arguments{ \item{x}{model} \item{response}{response variable (character or formula)} \item{exposure}{exposure variable (character or formula)} \item{target.model}{variable defining the linear predictor for the target model} \item{nuisance.model}{variable defining the linear predictor for the nuisance model} \item{exposure.model}{model for exposure (default binomial logit link)} \item{...}{additional arguments to lower level functions} } \description{ Set up model as defined in Richardson, Robins and Wang (2017). } lava/man/children.Rd0000644000176200001440000000075613520655354014047 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/children.R \name{children} \alias{children} \alias{parents} \alias{ancestors} \alias{descendants} \alias{roots} \alias{sinks} \alias{adjMat} \alias{edgeList} \title{Extract children or parent elements of object} \usage{ children(object, ...) } \arguments{ \item{object}{Object} \item{\dots}{Additional arguments} } \description{ Generic method for memberships from object (e.g. a graph) } \author{ Klaus K. Holst } lava/man/Model.Rd0000644000176200001440000000120513520655354013305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R \name{Model} \alias{Model} \alias{Model<-} \title{Extract model} \usage{ Model(x, ...) Model(x, ...) <- value } \arguments{ \item{x}{Fitted model} \item{\dots}{Additional arguments to be passed to the low level functions} \item{value}{New model object (e.g. \code{lvm} or \code{multigroup})} } \value{ Returns a model object (e.g. \code{lvm} or \code{multigroup}) } \description{ Extract or replace model object } \examples{ m <- lvm(y~x) e <- estimate(m, sim(m,100)) Model(e) } \seealso{ \code{\link{Graph}} } \author{ Klaus K. Holst } \keyword{models} lava/man/trim.Rd0000644000176200001440000000064713520655354013231 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trim.R \name{trim} \alias{trim} \title{Trim tring of (leading/trailing/all) white spaces} \usage{ trim(x, all = FALSE, ...) } \arguments{ \item{x}{String} \item{all}{Trim all whitespaces?} \item{\dots}{additional arguments to lower level functions} } \description{ Trim tring of (leading/trailing/all) white spaces } \author{ Klaus K. Holst } lava/man/bootstrap.lvm.Rd0000644000176200001440000000461113520655354015063 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap.R \name{bootstrap.lvm} \alias{bootstrap.lvm} \alias{bootstrap.lvmfit} \title{Calculate bootstrap estimates of a lvm object} \usage{ \method{bootstrap}{lvm}(x,R=100,data,fun=NULL,control=list(), p, parametric=FALSE, bollenstine=FALSE, constraints=TRUE,sd=FALSE,messages=lava.options()$messages, parallel=lava.options()$parallel, mc.cores=NULL, ...) \method{bootstrap}{lvmfit}(x,R=100,data=model.frame(x), control=list(start=coef(x)), p=coef(x), parametric=FALSE, bollenstine=FALSE, estimator=x$estimator,weights=Weights(x),...) } \arguments{ \item{x}{\code{lvm}-object.} \item{R}{Number of bootstrap samples} \item{data}{The data to resample from} \item{fun}{Optional function of the (bootstrapped) model-fit defining the statistic of interest} \item{control}{Options to the optimization routine} \item{p}{Parameter vector of the null model for the parametric bootstrap} \item{parametric}{If TRUE a parametric bootstrap is calculated. If FALSE a non-parametric (row-sampling) bootstrap is computed.} \item{bollenstine}{Bollen-Stine transformation (non-parametric bootstrap) for bootstrap hypothesis testing.} \item{constraints}{Logical indicating whether non-linear parameter constraints should be included in the bootstrap procedure} \item{sd}{Logical indicating whether standard error estimates should be included in the bootstrap procedure} \item{messages}{Control amount of messages printed} \item{parallel}{If TRUE parallel backend will be used} \item{mc.cores}{Number of threads (if NULL foreach::foreach will be used, otherwise parallel::mclapply)} \item{\dots}{Additional arguments, e.g. choice of estimator.} \item{estimator}{String definining estimator, e.g. 'gaussian' (see \code{estimator})} \item{weights}{Optional weights matrix used by \code{estimator}} } \value{ A \code{bootstrap.lvm} object. } \description{ Draws non-parametric bootstrap samples } \examples{ m <- lvm(y~x) d <- sim(m,100) e <- estimate(lvm(y~x), data=d) \donttest{ ## Reduce Ex.Timings B <- bootstrap(e,R=50,parallel=FALSE) B } } \seealso{ \code{\link{confint.lvmfit}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/twostage.lvmfit.Rd0000644000176200001440000000706313520655354015412 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/twostage.R \name{twostage.lvmfit} \alias{twostage.lvmfit} \alias{twostage.lvm} \alias{twostage.lvm.mixture} \alias{twostage.estimate} \alias{nonlinear} \alias{nonlinear<-} \title{Two-stage estimator (non-linear SEM)} \usage{ \method{twostage}{lvmfit}(object, model2, data = NULL, predict.fun = NULL, id1 = NULL, id2 = NULL, all = FALSE, formula = NULL, std.err = TRUE, ...) } \arguments{ \item{object}{Stage 1 measurement model} \item{model2}{Stage 2 SEM} \item{data}{data.frame} \item{predict.fun}{Prediction of latent variable} \item{id1}{Optional id-variable (stage 1 model)} \item{id2}{Optional id-variable (stage 2 model)} \item{all}{If TRUE return additional output (naive estimates)} \item{formula}{optional formula specifying non-linear relation} \item{std.err}{If FALSE calculations of standard errors will be skipped} \item{...}{Additional arguments to lower level functions} } \description{ Two-stage estimator for non-linear structural equation models } \examples{ m <- lvm(c(x1,x2,x3)~f1,f1~z, c(y1,y2,y3)~f2,f2~f1+z) latent(m) <- ~f1+f2 d <- simulate(m,100,p=c("f2,f2"=2,"f1,f1"=0.5),seed=1) ## Full MLE ee <- estimate(m,d) ## Manual two-stage \dontrun{ m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 e1 <- estimate(m1,d) pp1 <- predict(e1,f1~x1+x2+x3) d$u1 <- pp1[,] d$u2 <- pp1[,]^2+attr(pp1,"cond.var")[1] m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta e2 <- estimate(m2,d) } ## Two-stage m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta pred <- function(mu,var,data,...) cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1]) (mm <- twostage(m1,model2=m2,data=d,predict.fun=pred)) if (interactive()) { pf <- function(p) p["eta"]+p["eta~u1"]*u + p["eta~u2"]*u^2 plot(mm,f=pf,data=data.frame(u=seq(-2,2,length.out=100)),lwd=2) } ## Splines f <- function(x) cos(2*x)+x+-0.25*x^2 m <- lvm(x1+x2+x3~eta1, y1+y2+y3~eta2, latent=~eta1+eta2) functional(m, eta2~eta1) <- f d <- sim(m,500,seed=1,latent=TRUE) m1 <- lvm(x1+x2+x3~eta1,latent=~eta1) m2 <- lvm(y1+y2+y3~eta2,latent=~eta2) mm <- twostage(m1,m2,formula=eta2~eta1,type="spline") if (interactive()) plot(mm) nonlinear(m2,type="quadratic") <- eta2~eta1 a <- twostage(m1,m2,data=d) if (interactive()) plot(a) kn <- c(-1,0,1) nonlinear(m2,type="spline",knots=kn) <- eta2~eta1 a <- twostage(m1,m2,data=d) x <- seq(-3,3,by=0.1) y <- predict(a, newdata=data.frame(eta1=x)) if (interactive()) { plot(eta2~eta1, data=d) lines(x,y, col="red", lwd=5) p <- estimate(a,f=function(p) predict(a,p=p,newdata=x))$coefmat plot(eta2~eta1, data=d) lines(x,p[,1], col="red", lwd=5) confband(x,lower=p[,3],upper=p[,4],center=p[,1], polygon=TRUE, col=Col(2,0.2)) l1 <- lm(eta2~splines::ns(eta1,knots=kn),data=d) p1 <- predict(l1,newdata=data.frame(eta1=x),interval="confidence") lines(x,p1[,1],col="green",lwd=5) confband(x,lower=p1[,2],upper=p1[,3],center=p1[,1], polygon=TRUE, col=Col(3,0.2)) } \dontrun{ ## Reduce timing ## Cross-validation example ma <- lvm(c(x1,x2,x3)~u,latent=~u) ms <- functional(ma, y~u, value=function(x) -.4*x^2) d <- sim(ms,500)#,seed=1) ea <- estimate(ma,d) mb <- lvm() mb1 <- nonlinear(mb,type="linear",y~u) mb2 <- nonlinear(mb,type="quadratic",y~u) mb3 <- nonlinear(mb,type="spline",knots=c(-3,-1,0,1,3),y~u) mb4 <- nonlinear(mb,type="spline",knots=c(-3,-2,-1,0,1,2,3),y~u) ff <- lapply(list(mb1,mb2,mb3,mb4), function(m) function(data,...) twostage(ma,m,data=data,st.derr=FALSE)) a <- cv(ff,data=d,rep=1,mc.cores=1) a } } lava/man/curly.Rd0000644000176200001440000000261613520655354013412 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/curly.R \name{curly} \alias{curly} \title{Adds curly brackets to plot} \usage{ curly(x, y, len = 1, theta = 0, wid, shape = 1, col = 1, lwd = 1, lty = 1, grid = FALSE, npoints = 50, text = NULL, offset = c(0.05, 0)) } \arguments{ \item{x}{center of the x axis of the curly brackets (or start end coordinates (x1,x2))} \item{y}{center of the y axis of the curly brackets (or start end coordinates (y1,y2))} \item{len}{Length of the curly brackets} \item{theta}{angle (in radians) of the curly brackets orientation} \item{wid}{Width of the curly brackets} \item{shape}{shape (curvature)} \item{col}{color (passed to lines/grid.lines)} \item{lwd}{line width (passed to lines/grid.lines)} \item{lty}{line type (passed to lines/grid.lines)} \item{grid}{If TRUE use grid graphics (compatability with ggplot2)} \item{npoints}{Number of points used in curves} \item{text}{Label} \item{offset}{Label offset (x,y)} } \description{ Adds curly brackets to plot } \examples{ if (interactive()) { plot(0,0,type="n",axes=FALSE,xlab="",ylab="") curly(x=c(1,0),y=c(0,1),lwd=2,text="a") curly(x=c(1,0),y=c(0,1),lwd=2,text="b",theta=pi) curly(x=-0.5,y=0,shape=1,theta=pi,text="c") curly(x=0,y=0,shape=1,theta=0,text="d") curly(x=0.5,y=0,len=0.2,theta=pi/2,col="blue",lty=2) curly(x=0.5,y=-0.5,len=0.2,theta=-pi/2,col="red",shape=1e3,text="e") } } lava/man/getSAS.Rd0000644000176200001440000000122213520655354013372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zgetsas.R \name{getSAS} \alias{getSAS} \title{Read SAS output} \usage{ getSAS(infile, entry = "Parameter Estimates", ...) } \arguments{ \item{infile}{file (csv file generated by ODS)} \item{entry}{Name of entry to capture} \item{\dots}{additional arguments to lower level functions} } \description{ Run SAS code like in the following: } \details{ ODS CSVALL BODY="myest.csv"; proc nlmixed data=aj qpoints=2 dampstep=0.5; ... run; ODS CSVALL Close; and read results into R with: \code{getsas("myest.csv","Parameter Estimates")} } \seealso{ getMplus } \author{ Klaus K. Holst } lava/man/ordreg.Rd0000644000176200001440000000134713520655354013536 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ordreg.R \name{ordreg} \alias{ordreg} \title{Univariate cumulative link regression models} \usage{ ordreg(formula, data = parent.frame(), offset, family = stats::binomial("probit"), start, fast = FALSE, ...) } \arguments{ \item{formula}{formula} \item{data}{data.frame} \item{offset}{offset} \item{family}{family (default proportional odds)} \item{start}{optional starting values} \item{fast}{If TRUE standard errors etc. will not be calculated} \item{...}{Additional arguments to lower level functions} } \description{ Ordinal regression models } \examples{ m <- lvm(y~x) ordinal(m,K=3) <- ~y d <- sim(m,100) e <- ordreg(y~x,d) } \author{ Klaus K. Holst } lava/man/brisa.Rd0000644000176200001440000000037113520655354013350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{brisa} \alias{brisa} \title{Simulated data} \format{data.frame} \source{ Simulated } \description{ Simulated data } \keyword{datasets} lava/man/csplit.Rd0000644000176200001440000000160113520655354013543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/csplit.R \name{csplit} \alias{csplit} \alias{foldr} \title{Split data into folds} \usage{ csplit(x, p = NULL, replace = FALSE, return.index = FALSE, k = 2, ...) } \arguments{ \item{x}{Data or integer (size)} \item{p}{Number of folds, or if a number between 0 and 1 is given two folds of size p and (1-p) will be returned} \item{replace}{With or with-out replacement} \item{return.index}{If TRUE index of folds are returned otherwise the actual data splits are returned (default)} \item{k}{(Optional, only used when p=NULL) number of folds without shuffling} \item{...}{additional arguments to lower-level functions} } \description{ Split data into folds } \examples{ foldr(5,2,rep=2) csplit(10,3) csplit(iris[1:10,]) ## Split in two sets 1:(n/2) and (n/2+1):n csplit(iris[1:10,],0.5) } \author{ Klaus K. Holst } lava/man/op_match.Rd0000644000176200001440000000075313520655354014046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operators.R \name{\%ni\%} \alias{\%ni\%} \title{Matching operator (x not in y) oposed to the \code{\%in\%}-operator (x in y)} \usage{ x \%ni\% y } \arguments{ \item{x}{vector} \item{y}{vector of same type as \code{x}} } \value{ A logical vector. } \description{ Matching operator } \examples{ 1:10 \%ni\% c(1,5,10) } \seealso{ \code{\link{match}} } \author{ Klaus K. Holst } \keyword{misc} \keyword{utilities} lava/man/toformula.Rd0000644000176200001440000000111513520655354014255 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/toformula.R \name{toformula} \alias{toformula} \title{Converts strings to formula} \usage{ toformula(y = ".", x = ".") } \arguments{ \item{y}{vector of predictors} \item{x}{vector of responses} } \value{ An object of class \code{formula} } \description{ Converts a vector of predictors and a vector of responses (characters) i#nto a formula expression. } \examples{ toformula(c("age","gender"), "weight") } \seealso{ \code{\link{as.formula}}, } \author{ Klaus K. Holst } \keyword{models} \keyword{utilities} lava/man/closed.testing.Rd0000644000176200001440000000171013520655354015173 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multipletesting.R \name{closed.testing} \alias{closed.testing} \alias{p.correct} \title{Closed testing procedure} \usage{ closed.testing(object, idx = seq_along(coef(object)), null = rep(0, length(idx)), ...) } \arguments{ \item{object}{estimate object} \item{idx}{Index of parameters to adjust for multiple testing} \item{null}{Null hypothesis value} \item{...}{Additional arguments} } \description{ Closed testing procedure } \examples{ m <- lvm() regression(m, c(y1,y2,y3,y4,y5,y6,y7)~x) <- c(0,0.25,0,0.25,0.25,0,0) regression(m, to=endogenous(m), from="u") <- 1 variance(m,endogenous(m)) <- 1 set.seed(2) d <- sim(m,200) l1 <- lm(y1~x,d) l2 <- lm(y2~x,d) l3 <- lm(y3~x,d) l4 <- lm(y4~x,d) l5 <- lm(y5~x,d) l6 <- lm(y6~x,d) l7 <- lm(y7~x,d) (a <- merge(l1,l2,l3,l4,l5,l6,l7,subset=2)) if (requireNamespace("mets",quietly=TRUE)) { p.correct(a) } as.vector(closed.testing(a)) } lava/man/nsem.Rd0000644000176200001440000000040513520655354013210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{nsem} \alias{nsem} \title{Example SEM data (nonlinear)} \format{data.frame} \source{ Simulated } \description{ Simulated data } \keyword{datasets} lava/man/labels-set.Rd0000644000176200001440000000365713520655354014315 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/labels.R \name{labels<-} \alias{labels<-} \alias{labels} \alias{labels<-.default} \alias{labels.lvm} \alias{labels.lvmfit} \alias{labels.graphNEL} \alias{edgelabels} \alias{edgelabels<-} \alias{edgelabels<-.lvm} \alias{nodecolor} \alias{nodecolor<-} \alias{nodecolor<-.default} \title{Define labels of graph} \usage{ \method{labels}{default}(object, ...) <- value \method{edgelabels}{lvm}(object, to, ...) <- value \method{nodecolor}{default}(object, var=vars(object), border, labcol, shape, lwd, ...) <- value } \arguments{ \item{object}{\code{lvm}-object.} \item{\dots}{Additional arguments (\code{lwd}, \code{cex}, \code{col}, \code{labcol}), \code{border}.} \item{value}{node label/edge label/color} \item{to}{Formula specifying outcomes and predictors defining relevant edges.} \item{var}{Formula or character vector specifying the nodes/variables to alter.} \item{border}{Colors of borders} \item{labcol}{Text label colors} \item{shape}{Shape of node} \item{lwd}{Line width of border} } \description{ Alters labels of nodes and edges in the graph of a latent variable model } \examples{ m <- lvm(c(y,v)~x+z) regression(m) <- c(v,x)~z labels(m) <- c(y=expression(psi), z=expression(zeta)) nodecolor(m,~y+z+x,border=c("white","white","black"), labcol="white", lwd=c(1,1,5), lty=c(1,2)) <- c("orange","indianred","lightgreen") edgelabels(m,y~z+x, cex=c(2,1.5), col=c("orange","black"),labcol="darkblue", arrowhead=c("tee","dot"), lwd=c(3,1)) <- expression(phi,rho) edgelabels(m,c(v,x)~z, labcol="red", cex=0.8,arrowhead="none") <- 2 if (interactive()) { plot(m,addstyle=FALSE) } m <- lvm(y~x) labels(m) <- list(x="multiple\\nlines") if (interactive()) { op <- par(mfrow=c(1,2)) plot(m,plain=TRUE) plot(m) par(op) d <- sim(m,100) e <- estimate(m,d) plot(e,type="sd") } } \author{ Klaus K. Holst } \keyword{aplot} \keyword{graphs} lava/man/getMplus.Rd0000644000176200001440000000066413520655354014055 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zgetmplus.R \name{getMplus} \alias{getMplus} \title{Read Mplus output} \usage{ getMplus(infile = "template.out", coef = TRUE, ...) } \arguments{ \item{infile}{Mplus output file} \item{coef}{Coefficients only} \item{\dots}{additional arguments to lower level functions} } \description{ Read Mplus output files } \seealso{ getSAS } \author{ Klaus K. Holst } lava/man/modelsearch.Rd0000644000176200001440000000243313520655354014537 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/modelsearch.R \name{modelsearch} \alias{modelsearch} \title{Model searching} \usage{ modelsearch(x, k = 1, dir = "forward", type = "all", ...) } \arguments{ \item{x}{\code{lvmfit}-object} \item{k}{Number of parameters to test simultaneously. For \code{equivalence} the number of additional associations to be added instead of \code{rel}.} \item{dir}{Direction to do model search. "forward" := add associations/arrows to model/graph (score tests), "backward" := remove associations/arrows from model/graph (wald test)} \item{type}{If equal to 'correlation' only consider score tests for covariance parameters. If equal to 'regression' go through direct effects only (default 'all' is to do both)} \item{...}{Additional arguments to be passed to the low level functions} } \value{ Matrix of test-statistics and p-values } \description{ Performs Wald or score tests } \examples{ m <- lvm(); regression(m) <- c(y1,y2,y3) ~ eta; latent(m) <- ~eta regression(m) <- eta ~ x m0 <- m; regression(m0) <- y2 ~ x dd <- sim(m0,100)[,manifest(m0)] e <- estimate(m,dd); modelsearch(e,messages=0) modelsearch(e,messages=0,type="cor") } \seealso{ \code{\link{compare}}, \code{\link{equivalence}} } \author{ Klaus K. Holst } \keyword{htest} lava/man/predictlvm.Rd0000644000176200001440000000171213520655354014421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predictlvm} \alias{predictlvm} \title{Predict function for latent variable models} \usage{ predictlvm(object, formula, p = coef(object), data = model.frame(object), ...) } \arguments{ \item{object}{Model object} \item{formula}{Formula specifying which variables to predict and which to condition on} \item{p}{Parameter vector} \item{data}{Data.frame} \item{...}{Additional arguments to lower level functions} } \description{ Predictions of conditinoal mean and variance and calculation of jacobian with respect to parameter vector. } \examples{ m <- lvm(c(x1,x2,x3)~u1,u1~z, c(y1,y2,y3)~u2,u2~u1+z) latent(m) <- ~u1+u2 d <- simulate(m,10,"u2,u2"=2,"u1,u1"=0.5,seed=123) e <- estimate(m,d) ## Conditional mean given covariates predictlvm(e,c(x1,x2)~1)$mean ## Conditional variance of u1,y1 given x1,x2 predictlvm(e,c(u1,y1)~x1+x2)$var } \seealso{ predict.lvm } lava/man/Range.lvm.Rd0000644000176200001440000000055513520655354014105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/constrain.R \name{Range.lvm} \alias{Range.lvm} \title{Define range constraints of parameters} \usage{ Range.lvm(a = 0, b = 1) } \arguments{ \item{a}{Lower bound} \item{b}{Upper bound} } \value{ function } \description{ Define range constraints of parameters } \author{ Klaus K. Holst } lava/man/serotonin2.Rd0000644000176200001440000000041413520655354014350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{serotonin2} \alias{serotonin2} \title{Data} \format{data.frame} \source{ Simulated } \description{ Description } \seealso{ serotonin } \keyword{datasets} lava/man/fplot.Rd0000644000176200001440000000146413520655354013400 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fplot.R \name{fplot} \alias{fplot} \title{fplot} \usage{ fplot(x, y, z = NULL, xlab, ylab, ..., z.col = topo.colors(64), data = parent.frame(), add = FALSE, aspect = c(1, 1), zoom = 0.8) } \arguments{ \item{x}{X variable} \item{y}{Y variable} \item{z}{Z variable (optional)} \item{xlab}{x-axis label} \item{ylab}{y-axis label} \item{...}{additional arggument to lower-level plot functions} \item{z.col}{color (use argument alpha to set transparency)} \item{data}{data.frame} \item{add}{if TRUE use current active device} \item{aspect}{aspect ratio} \item{zoom}{zoom level} } \description{ Faster plot via RGL } \examples{ if (interactive()) { data(iris) fplot(Sepal.Length ~ Petal.Length+Species, data=iris, size=2, type="s") } } lava/man/path.Rd0000644000176200001440000000453113520655354013206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/path.R \name{path} \alias{path} \alias{effects} \alias{path.lvm} \alias{effects.lvmfit} \alias{totaleffects} \title{Extract pathways in model graph} \usage{ \method{path}{lvm} (object, to = NULL, from, all=FALSE, ...) \method{effects}{lvmfit} (object, to, from, ...) } \arguments{ \item{object}{Model object (\code{lvm})} \item{\dots}{Additional arguments to be passed to the low level functions} \item{to}{Outcome variable (string). Alternatively a formula specifying response and predictor in which case the argument \code{from} is ignored.} \item{from}{Response variable (string), not necessarily directly affected by \code{to}.} \item{all}{If TRUE all simple paths (in undirected graph) is returned on/off.} } \value{ If \code{object} is of class \code{lvmfit} a list with the following elements is returned \item{idx}{ A list where each element defines a possible pathway via a integer vector indicating the index of the visited nodes. } \item{V }{ A List of covariance matrices for each path. } \item{coef }{A list of parameters estimates for each path} \item{path }{A list where each element defines a possible pathway via a character vector naming the visited nodes in order. } \item{edges }{Description of 'comp2'} If \code{object} is of class \code{lvm} only the \code{path} element will be returned. The \code{effects} method returns an object of class \code{effects}. } \description{ Extract all possible paths from one variable to another connected component in a latent variable model. In an estimated model the effect size is decomposed into direct, indirect and total effects including approximate standard errors. } \note{ For a \code{lvmfit}-object the parameters estimates and their corresponding covariance matrix are also returned. The \code{effects}-function additionally calculates the total and indirect effects with approximate standard errors } \examples{ m <- lvm(c(y1,y2,y3)~eta) regression(m) <- y2~x1 latent(m) <- ~eta regression(m) <- eta~x1+x2 d <- sim(m,500) e <- estimate(m,d) path(Model(e),y2~x1) parents(Model(e), ~y2) children(Model(e), ~x2) children(Model(e), ~x2+eta) effects(e,y2~x1) ## All simple paths (undirected) path(m,y1~x1,all=TRUE) } \seealso{ \code{children}, \code{parents} } \author{ Klaus K. Holst } \keyword{graphs} \keyword{methods} \keyword{models} lava/man/equivalence.Rd0000644000176200001440000000205213520655354014547 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence.R \name{equivalence} \alias{equivalence} \title{Identify candidates of equivalent models} \usage{ equivalence(x, rel, tol = 0.001, k = 1, omitrel = TRUE, ...) } \arguments{ \item{x}{\code{lvmfit}-object} \item{rel}{Formula or character-vector specifying two variables to omit from the model and subsequently search for possible equivalent models} \item{tol}{Define two models as empirical equivalent if the absolute difference in score test is less than \code{tol}} \item{k}{Number of parameters to test simultaneously. For \code{equivalence} the number of additional associations to be added instead of \code{rel}.} \item{omitrel}{if \code{k} greater than 1, this boolean defines wether to omit candidates containing \code{rel} from the output} \item{\dots}{Additional arguments to be passed to the lower-level functions} } \description{ Identifies candidates of equivalent models } \seealso{ \code{\link{compare}}, \code{\link{modelsearch}} } \author{ Klaus K. Holst } lava/man/rmvar.Rd0000644000176200001440000000142613520655354013401 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kill.R \name{rmvar} \alias{rmvar} \alias{rmvar<-} \alias{kill} \alias{kill<-} \title{Remove variables from (model) object.} \usage{ rmvar(x, ...) <- value } \arguments{ \item{x}{Model object} \item{\dots}{additional arguments to lower level functions} \item{value}{Vector of variables or formula specifying which nodes to remove} } \description{ Generic method for removing elements of object } \examples{ m <- lvm() addvar(m) <- ~y1+y2+x covariance(m) <- y1~y2 regression(m) <- c(y1,y2) ~ x ### Cancel the covariance between the residuals of y1 and y2 cancel(m) <- y1~y2 ### Remove y2 from the model rmvar(m) <- ~y2 } \seealso{ \code{cancel} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/plot.estimate.Rd0000644000176200001440000000162313520655354015041 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.estimate.R \name{plot.estimate} \alias{plot.estimate} \title{Plot method for 'estimate' objects} \usage{ \method{plot}{estimate}(x, f, idx, intercept = FALSE, data, confint = TRUE, type = "l", xlab = "x", ylab = "f(x)", col = 1, add = FALSE, ...) } \arguments{ \item{x}{estimate object} \item{f}{function of parameter coefficients and data parsed on to 'estimate'. If omitted a forest-plot will be produced.} \item{idx}{Index of parameters (default all)} \item{intercept}{include intercept in forest-plot} \item{data}{data.frame} \item{confint}{Add confidence limits} \item{type}{plot type ('l')} \item{xlab}{x-axis label} \item{ylab}{y-axis label} \item{col}{color} \item{add}{add plot to current device} \item{...}{additional arguments to lower-level functions} } \description{ Plot method for 'estimate' objects } lava/man/correlation.Rd0000644000176200001440000000057013520655354014572 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/correlation.R \name{correlation} \alias{correlation} \title{Generic method for extracting correlation coefficients of model object} \usage{ correlation(x, ...) } \arguments{ \item{x}{Object} \item{\dots}{Additional arguments} } \description{ Generic correlation method } \author{ Klaus K. Holst } lava/man/iid.Rd0000644000176200001440000000156613520655354013024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iid.R \name{iid} \alias{iid} \alias{iid.default} \title{Extract i.i.d. decomposition (influence function) from model object} \usage{ iid(x,...) \method{iid}{default}(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) } \arguments{ \item{x}{model object} \item{...}{additional arguments} \item{id}{(optional) id/cluster variable} \item{bread}{(optional) Inverse of derivative of mean score function} \item{folds}{(optional) Calculate aggregated iid decomposition (0:=disabled)} \item{maxsize}{(optional) Data is split in groups of size up to 'maxsize' (0:=disabled)} } \description{ Extract i.i.d. decomposition (influence function) from model object } \examples{ m <- lvm(y~x+z) distribution(m, ~y+z) <- binomial.lvm("logit") d <- sim(m,1e3) g <- glm(y~x+z,data=d,family=binomial) crossprod(iid(g)) } lava/man/sim.Rd0000644000176200001440000002154113520655354013042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim.lvm.R \name{sim} \alias{sim} \alias{sim.lvmfit} \alias{sim.lvm} \alias{simulate.lvmfit} \alias{simulate.lvm} \alias{transform<-} \alias{transform<-.lvm} \alias{transform.lvm} \alias{functional} \alias{functional<-} \alias{functional.lvm} \alias{functional<-.lvm} \alias{distribution} \alias{distribution<-} \alias{distribution.lvm} \alias{distribution<-.lvm} \alias{heavytail} \alias{heavytail<-} \alias{weibull.lvm} \alias{binomial.lvm} \alias{poisson.lvm} \alias{uniform.lvm} \alias{multinomial.lvm} \alias{beta.lvm} \alias{normal.lvm} \alias{mvn.lvm} \alias{lognormal.lvm} \alias{gaussian.lvm} \alias{GM2.lvm} \alias{GM3.lvm} \alias{probit.lvm} \alias{logit.lvm} \alias{pareto.lvm} \alias{student.lvm} \alias{chisq.lvm} \alias{coxGompertz.lvm} \alias{coxWeibull.lvm} \alias{coxExponential.lvm} \alias{aalenExponential.lvm} \alias{Gamma.lvm} \alias{gamma.lvm} \alias{loggamma.lvm} \alias{categorical} \alias{categorical<-} \alias{threshold.lvm} \alias{ones.lvm} \alias{sequence.lvm} \title{Simulate model} \usage{ \method{sim}{lvm}(x, n = NULL, p = NULL, normal = FALSE, cond = FALSE, sigma = 1, rho = 0.5, X = NULL, unlink=FALSE, latent=TRUE, use.labels = TRUE, seed=NULL, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments to be passed to the low level functions} \item{n}{Number of simulated values/individuals} \item{p}{Parameter value (optional)} \item{normal}{Logical indicating whether to simulate data from a multivariate normal distribution conditional on exogenous variables hence ignoring functional/distribution definition} \item{cond}{for internal use} \item{sigma}{Default residual variance (1)} \item{rho}{Default covariance parameter (0.5)} \item{X}{Optional matrix of fixed values of variables (manipulation)} \item{unlink}{Return Inverse link transformed data} \item{latent}{Include latent variables (default TRUE)} \item{use.labels}{convert categorical variables to factors before applying transformation} \item{seed}{Random seed} } \description{ Simulate data from a general SEM model including non-linear effects and general link and distribution of variables. } \examples{ ################################################## ## Logistic regression ################################################## m <- lvm(y~x+z) regression(m) <- x~z distribution(m,~y+z) <- binomial.lvm("logit") d <- sim(m,1e3) head(d) e <- estimate(m,d,estimator="glm") e ## Simulate a few observation from estimated model sim(e,n=5) ################################################## ## Poisson ################################################## distribution(m,~y) <- poisson.lvm() d <- sim(m,1e4,p=c(y=-1,"y~x"=2,z=1)) head(d) estimate(m,d,estimator="glm") mean(d$z); lava:::expit(1) summary(lm(y~x,sim(lvm(y[1:2]~4*x),1e3))) ################################################## ### Gamma distribution ################################################## m <- lvm(y~x) distribution(m,~y+x) <- list(Gamma.lvm(shape=2),binomial.lvm()) intercept(m,~y) <- 0.5 d <- sim(m,1e4) summary(g <- glm(y~x,family=Gamma(),data=d)) \dontrun{MASS::gamma.shape(g)} args(lava::Gamma.lvm) distribution(m,~y) <- Gamma.lvm(shape=2,log=TRUE) sim(m,10,p=c(y=0.5))[,"y"] ################################################## ### Beta ################################################## m <- lvm() distribution(m,~y) <- beta.lvm(alpha=2,beta=1) var(sim(m,100,"y,y"=2)) distribution(m,~y) <- beta.lvm(alpha=2,beta=1,scale=FALSE) var(sim(m,100)) ################################################## ### Transform ################################################## m <- lvm() transform(m,xz~x+z) <- function(x) x[1]*(x[2]>0) regression(m) <- y~x+z+xz d <- sim(m,1e3) summary(lm(y~x+z + x*I(z>0),d)) ################################################## ### Non-random variables ################################################## m <- lvm() distribution(m,~x+z+v+w) <- list(sequence.lvm(0,5),## Seq. 0 to 5 by 1/n ones.lvm(), ## Vector of ones ones.lvm(0.5), ## 0.8n 0, 0.2n 1 ones.lvm(interval=list(c(0.3,0.5),c(0.8,1)))) sim(m,10) ################################################## ### Cox model ### piecewise constant hazard ################################################ m <- lvm(t~x) rates <- c(1,0.5); cuts <- c(0,5) ## Constant rate: 1 in [0,5), 0.5 in [5,Inf) distribution(m,~t) <- coxExponential.lvm(rate=rates,timecut=cuts) \dontrun{ d <- sim(m,2e4,p=c("t~x"=0.1)); d$status <- TRUE plot(timereg::aalen(survival::Surv(t,status)~x,data=d, resample.iid=0,robust=0),spec=1) L <- approxfun(c(cuts,max(d$t)),f=1, cumsum(c(0,rates*diff(c(cuts,max(d$t))))), method="linear") curve(L,0,100,add=TRUE,col="blue") } ################################################## ### Cox model ### piecewise constant hazard, gamma frailty ################################################## m <- lvm(y~x+z) rates <- c(0.3,0.5); cuts <- c(0,5) distribution(m,~y+z) <- list(coxExponential.lvm(rate=rates,timecut=cuts), loggamma.lvm(rate=1,shape=1)) \dontrun{ d <- sim(m,2e4,p=c("y~x"=0,"y~z"=0)); d$status <- TRUE plot(timereg::aalen(survival::Surv(y,status)~x,data=d, resample.iid=0,robust=0),spec=1) L <- approxfun(c(cuts,max(d$y)),f=1, cumsum(c(0,rates*diff(c(cuts,max(d$y))))), method="linear") curve(L,0,100,add=TRUE,col="blue") } ## Equivalent via transform (here with Aalens additive hazard model) m <- lvm(y~x) distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) distribution(m,~z) <- Gamma.lvm(rate=1,shape=1) transform(m,t~y+z) <- prod sim(m,10) ## Shared frailty m <- lvm(c(t1,t2)~x+z) rates <- c(1,0.5); cuts <- c(0,5) distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) distribution(m,~z) <- loggamma.lvm(rate=1,shape=1) \dontrun{ mets::fast.reshape(sim(m,100),varying="t") } ################################################## ### General multivariate distributions ################################################## \dontrun{ m <- lvm() distribution(m,~y1+y2,oratio=4) <- VGAM::rbiplackcop ksmooth2(sim(m,1e4),rgl=FALSE,theta=-20,phi=25) m <- lvm() distribution(m,~z1+z2,"or1") <- VGAM::rbiplackcop distribution(m,~y1+y2,"or2") <- VGAM::rbiplackcop sim(m,10,p=c(or1=0.1,or2=4)) } m <- lvm() distribution(m,~y1+y2+y3,TRUE) <- function(n,...) rmvn0(n,sigma=diag(3)+1) var(sim(m,100)) ## Syntax also useful for univariate generators, e.g. m <- lvm(y~x+z) distribution(m,~y,TRUE) <- function(n) rnorm(n,mean=1000) sim(m,5) distribution(m,~y,"m1",0) <- rnorm sim(m,5) sim(m,5,p=c(m1=100)) ################################################## ### Regression design in other parameters ################################################## ## Variance heterogeneity m <- lvm(y~x) distribution(m,~y) <- function(n,mean,x) rnorm(n,mean,exp(x)^.5) if (interactive()) plot(y~x,sim(m,1e3)) ## Alternaively, calculate the standard error directly addvar(m) <- ~sd ## If 'sd' should be part of the resulting data.frame constrain(m,sd~x) <- function(x) exp(x)^.5 distribution(m,~y) <- function(n,mean,sd) rnorm(n,mean,sd) if (interactive()) plot(y~x,sim(m,1e3)) ## Regression on variance parameter m <- lvm() regression(m) <- y~x regression(m) <- v~x ##distribution(m,~v) <- 0 # No stochastic term ## Alternative: ## regression(m) <- v[NA:0]~x distribution(m,~y) <- function(n,mean,v) rnorm(n,mean,exp(v)^.5) if (interactive()) plot(y~x,sim(m,1e3)) ## Regression on shape parameter in Weibull model m <- lvm() regression(m) <- y ~ z+v regression(m) <- s ~ exp(0.6*x-0.5*z) distribution(m,~x+z) <- binomial.lvm() distribution(m,~cens) <- coxWeibull.lvm(scale=1) distribution(m,~y) <- coxWeibull.lvm(scale=0.1,shape=~s) eventTime(m) <- time ~ min(y=1,cens=0) if (interactive()) { d <- sim(m,1e3) require(survival) (cc <- coxph(Surv(time,status)~v+strata(x,z),data=d)) plot(survfit(cc) ,col=1:4,mark.time=FALSE) } ################################################## ### Categorical predictor ################################################## m <- lvm() ## categorical(m,K=3) <- "v" categorical(m,labels=c("A","B","C")) <- "v" regression(m,additive=FALSE) <- y~v \dontrun{ plot(y~v,sim(m,1000,p=c("y~v:2"=3))) } m <- lvm() categorical(m,labels=c("A","B","C"),p=c(0.5,0.3)) <- "v" regression(m,additive=FALSE,beta=c(0,2,-1)) <- y~v ## equivalent to: ## regression(m,y~v,additive=FALSE) <- c(0,2,-1) regression(m,additive=FALSE,beta=c(0,4,-1)) <- z~v table(sim(m,1e4)$v) glm(y~v, data=sim(m,1e4)) glm(y~v, data=sim(m,1e4,p=c("y~v:1"=3))) transform(m,v2~v) <- function(x) x=='A' sim(m,10) ################################################## ### Pre-calculate object ################################################## m <- lvm(y~x) m2 <- sim(m,'y~x'=2) sim(m,10,'y~x'=2) sim(m2,10) ## Faster } \author{ Klaus K. Holst } \keyword{datagen} \keyword{models} \keyword{regression} lava/man/NA2x.Rd0000644000176200001440000000103013520655354013011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NA2x.R \name{NA2x} \alias{NA2x} \alias{x2NA} \title{Convert to/from NA} \usage{ NA2x(s, x = 0) } \arguments{ \item{s}{The input vector (of arbitrary class)} \item{x}{The elements to transform into \code{NA} resp. what to transform \code{NA} into.} } \value{ A vector with same dimension and class as \code{s}. } \description{ Convert vector to/from NA } \examples{ ##' x2NA(1:10, 1:5) NA2x(x2NA(c(1:10),5),5)##' } \author{ Klaus K. Holst } \keyword{manip} lava/man/intercept.Rd0000644000176200001440000000414413520655354014247 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fix.R \name{intercept} \alias{intercept} \alias{intercept<-} \alias{intercept.lvm} \alias{intercept<-.lvm} \alias{intfix} \alias{intfix<-} \alias{intfix.lvm} \alias{intfix<-.lvm} \title{Fix mean parameters in 'lvm'-object} \usage{ \method{intercept}{lvm}(object, vars, ...) <- value } \arguments{ \item{object}{\code{lvm}-object} \item{\dots}{Additional arguments} \item{vars}{character vector of variable names} \item{value}{Vector (or list) of parameter values or labels (numeric or character) or a formula defining the linear constraints (see also the \code{regression} or \code{covariance} methods).} } \value{ A \code{lvm}-object } \description{ Define linear constraints on intercept parameters in a \code{lvm}-object. } \details{ The \code{intercept} function is used to specify linear constraints on the intercept parameters of a latent variable model. As an example we look at the multivariate regression model \deqn{ E(Y_1|X) = \alpha_1 + \beta_1 X} \deqn{ E(Y_2|X) = \alpha_2 + \beta_2 X} defined by the call \code{m <- lvm(c(y1,y2) ~ x)} To fix \eqn{\alpha_1=\alpha_2} we call \code{intercept(m) <- c(y1,y2) ~ f(mu)} Fixed parameters can be reset by fixing them to \code{NA}. For instance to free the parameter restriction of \eqn{Y_1} and at the same time fixing \eqn{\alpha_2=2}, we call \code{intercept(m, ~y1+y2) <- list(NA,2)} Calling \code{intercept} with no additional arguments will return the current intercept restrictions of the \code{lvm}-object. } \note{ Variables will be added to the model if not already present. } \examples{ ## A multivariate model m <- lvm(c(y1,y2) ~ f(x1,beta)+x2) regression(m) <- y3 ~ f(x1,beta) intercept(m) <- y1 ~ f(mu) intercept(m, ~y2+y3) <- list(2,"mu") intercept(m) ## Examine intercepts of model (NA translates to free/unique paramete##r) } \seealso{ \code{\link{covariance<-}}, \code{\link{regression<-}}, \code{\link{constrain<-}}, \code{\link{parameter<-}}, \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/confband.Rd0000644000176200001440000000521513520655354014024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confband.R \name{confband} \alias{confband} \alias{forestplot} \title{Add Confidence limits bar to plot} \usage{ confband(x, lower, upper, center = NULL, line = TRUE, delta = 0.07, centermark = 0.03, pch, blank = TRUE, vert = TRUE, polygon = FALSE, step = FALSE, ...) } \arguments{ \item{x}{Position (x-coordinate if vert=TRUE, y-coordinate otherwise)} \item{lower}{Lower limit (if NULL no limits is added, and only the center is drawn (if not NULL))} \item{upper}{Upper limit} \item{center}{Center point} \item{line}{If FALSE do not add line between upper and lower bound} \item{delta}{Length of limit bars} \item{centermark}{Length of center bar} \item{pch}{Center symbol (if missing a line is drawn)} \item{blank}{If TRUE a white ball is plotted before the center is added to the plot} \item{vert}{If TRUE a vertical bar is plotted. Otherwise a horizontal bar is used} \item{polygon}{If TRUE polygons are added between 'lower' and 'upper'.} \item{step}{Type of polygon (step-function or piecewise linear)} \item{...}{Additional low level arguments (e.g. col, lwd, lty,...)} } \description{ Add Confidence limits bar to plot } \examples{ plot(0,0,type="n",xlab="",ylab="") confband(0.5,-0.5,0.5,0,col="darkblue") confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5) set.seed(1) K <- 20 est <- rnorm(K) se <- runif(K,0.2,0.4) x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) x[c(3:4,10:12),] <- NA rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) rownames(x)[which(is.na(est))] <- "" signif <- sign(x[,2])==sign(x[,3]) forestplot(x,text.right=FALSE) forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5) forestplot(x,vert=TRUE,text=FALSE) forestplot(x,vert=TRUE,text=FALSE,pch=NA) ##forestplot(x,vert=TRUE,text.vert=FALSE) ##forestplot(val,vert=TRUE,add=TRUE) z <- seq(10) zu <- c(z[-1],10) plot(z,type="n") confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE) confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE) z <- seq(0,1,length.out=100) plot(z,z,type="n") confband(z,z,z^2,polygon="TRUE",col=Col("darkblue")) set.seed(1) k <- 10 x <- seq(k) est <- rnorm(k) sd <- runif(k) val <- cbind(x,est,est-sd,est+sd) par(mfrow=c(1,2)) plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="") axis(2) confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2) plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="") axis(1) confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE) } \seealso{ \code{confband} } \author{ Klaus K. Holst } \keyword{iplot} lava/man/complik.Rd0000644000176200001440000000267613520655354013720 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/complik.R \name{complik} \alias{complik} \title{Composite Likelihood for probit latent variable models} \usage{ complik(x, data, k = 2, type = c("nearest", "all"), pairlist, messages = 0, estimator = "normal", ...) } \arguments{ \item{x}{\code{lvm}-object} \item{data}{data.frame} \item{k}{Size of composite groups} \item{type}{Determines number of groups. With \code{type="nearest"} (default) only neighboring items will be grouped, e.g. for \code{k=2} (y1,y2),(y2,y3),... With \code{type="all"} all combinations of size \code{k} are included} \item{pairlist}{A list of indices specifying the composite groups. Optional argument which overrides \code{k} and \code{type} but gives complete flexibility in the specification of the composite likelihood} \item{messages}{Control amount of messages printed} \item{estimator}{Model (pseudo-likelihood) to use for the pairs/groups} \item{\dots}{Additional arguments parsed on to lower-level functions} } \value{ An object of class \code{clprobit} inheriting methods from \code{lvm} } \description{ Estimate parameters in a probit latent variable model via a composite likelihood decomposition. } \examples{ m <- lvm(c(y1,y2,y3)~b*x+1*u[0],latent=~u) ordinal(m,K=2) <- ~y1+y2+y3 d <- sim(m,50,seed=1) e1 <- complik(m,d,control=list(trace=1),type="all") } \seealso{ estimate } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/serotonin.Rd0000644000176200001440000000276613520655354014302 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{serotonin} \alias{serotonin} \title{Serotonin data} \format{data.frame} \source{ Simulated } \description{ This simulated data mimics a PET imaging study where the 5-HT2A receptor and serotonin transporter (SERT) binding potential has been quantified into 8 different regions. The 5-HT2A cortical regions are considered high-binding regions measurements. These measurements can be regarded as proxy measures of the extra-cellular levels of serotonin in the brain \tabular{rll}{ day \tab numeric \tab Scan day of the year \cr age \tab numeric \tab Age at baseline scan \cr mem \tab numeric \tab Memory performance score \cr depr \tab numeric \tab Depression (mild) status 500 days after baseline \cr gene1 \tab numeric \tab Gene marker 1 (HTR2A) \cr gene2 \tab numeric \tab Gene marker 2 (HTTTLPR) \cr cau \tab numeric \tab SERT binding, Caudate Nucleus \cr th \tab numeric \tab SERT binding, Thalamus \cr put \tab numeric \tab SERT binding, Putamen \cr mid \tab numeric \tab SERT binding, Midbrain \cr aci \tab numeric \tab 5-HT2A binding, Anterior cingulate gyrus \cr pci \tab numeric \tab 5-HT2A binding, Posterior cingulate gyrus \cr sfc \tab numeric \tab 5-HT2A binding, Superior frontal cortex \cr par \tab numeric \tab 5-HT2A binding, Parietal cortex \cr } } \keyword{datasets} lava/man/partialcor.Rd0000644000176200001440000000143213520655354014407 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/partialcor.R \name{partialcor} \alias{partialcor} \title{Calculate partial correlations} \usage{ partialcor(formula, data, level = 0.95, ...) } \arguments{ \item{formula}{formula speciying the covariates and optionally the outcomes to calculate partial correlation for} \item{data}{data.frame} \item{level}{Level of confidence limits} \item{...}{Additional arguments to lower level functions} } \value{ A coefficient matrix } \description{ Calculate partial correlation coefficients and confidence limits via Fishers z-transform } \examples{ m <- lvm(c(y1,y2,y3)~x1+x2) covariance(m) <- c(y1,y2,y3)~y1+y2+y3 d <- sim(m,500) partialcor(~x1+x2,d) } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/Combine.Rd0000644000176200001440000000123013520655354013617 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine.R \name{Combine} \alias{Combine} \title{Report estimates across different models} \usage{ Combine(x, ...) } \arguments{ \item{x}{list of model objects} \item{...}{additional arguments to lower-level functions} } \description{ Report estimates across different models } \examples{ data(serotonin) m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) m2 <- lm(cau ~ age + gene1,data=serotonin) m3 <- lm(cau ~ age*gene2,data=serotonin) Combine(list(A=m1,B=m2,C=m3),fun=function(x) c("_____"="",R2=" "\%++\%format(summary(x)$r.squared,digits=2))) } \author{ Klaus K. Holst } lava/man/Expand.Rd0000644000176200001440000000126613520655354013473 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Expand.R \name{Expand} \alias{Expand} \title{Create a Data Frame from All Combinations of Factors} \usage{ Expand(`_data`, ...) } \arguments{ \item{_data}{Data.frame} \item{...}{vectors, factors or a list containing these} } \description{ Create a Data Frame from All Combinations of Factors } \details{ Simple wrapper of the 'expand.grid' function. If x is a table then a data frame is returned with one row pr individual observation. } \examples{ dd <- Expand(iris, Sepal.Length=2:8, Species=c("virginica","setosa")) summary(dd) T <- with(warpbreaks, table(wool, tension)) Expand(T) } \author{ Klaus K. Holst } lava/man/addvar.Rd0000644000176200001440000000055413520655354013514 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/addvar.R \name{addvar} \alias{addvar} \alias{addvar<-} \title{Add variable to (model) object} \usage{ addvar(x, ...) } \arguments{ \item{x}{Model object} \item{\dots}{Additional arguments} } \description{ Generic method for adding variables to model object } \author{ Klaus K. Holst } lava/man/measurement.error.Rd0000644000176200001440000000307613520655354015732 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measurement.error.R \name{measurement.error} \alias{measurement.error} \title{Two-stage (non-linear) measurement error} \usage{ measurement.error(model1, formula, data = parent.frame(), predictfun = function(mu, var, data, ...) mu[, 1]^2 + var[1], id1, id2, ...) } \arguments{ \item{model1}{Stage 1 model} \item{formula}{Formula specifying observed covariates in stage 2 model} \item{data}{data.frame} \item{predictfun}{Predictions to be used in stage 2} \item{id1}{Optional id-vector of stage 1} \item{id2}{Optional id-vector of stage 2} \item{...}{Additional arguments to lower level functions} } \description{ Two-stage measurement error } \examples{ m <- lvm(c(y1,y2,y3)~u,c(y3,y4,y5)~v,u~~v,c(u,v)~x) transform(m,u2~u) <- function(x) x^2 transform(m,uv~u+v) <- prod regression(m) <- z~u2+u+v+uv+x set.seed(1) d <- sim(m,1000,p=c("u,u"=1)) ## Stage 1 m1 <- lvm(c(y1[0:s],y2[0:s],y3[0:s])~1*u,c(y3[0:s],y4[0:s],y5[0:s])~1*v,u~b*x,u~~v) latent(m1) <- ~u+v e1 <- estimate(m1,d) pp <- function(mu,var,data,...) { cbind(u=mu[,"u"],u2=mu[,"u"]^2+var["u","u"],v=mu[,"v"],uv=mu[,"u"]*mu[,"v"]+var["u","v"]) } (e <- measurement.error(e1, z~1+x, data=d, predictfun=pp)) ## uu <- seq(-1,1,length.out=100) ## pp <- estimate(e,function(p,...) p["(Intercept)"]+p["u"]*uu+p["u2"]*uu^2)$coefmat if (interactive()) { plot(e,intercept=TRUE,line=0) f <- function(p) p[1]+p["u"]*u+p["u2"]*u^2 u <- seq(-1,1,length.out=100) plot(e, f, data=data.frame(u), ylim=c(-.5,2.5)) } } \seealso{ stack.estimate } lava/man/diagtest.Rd0000644000176200001440000000230513520655354014053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagtest.R \name{diagtest} \alias{diagtest} \alias{odds} \alias{riskcomp} \alias{OR} \alias{Ratio} \alias{Diff} \title{Calculate diagnostic tests for 2x2 table} \usage{ diagtest(table, positive = 2, exact = FALSE, p0 = NA, confint = c("logit", "arcsin", "pseudoscore", "exact"), ...) } \arguments{ \item{table}{Table or (matrix/data.frame with two columns)} \item{positive}{Switch reference} \item{exact}{If TRUE exact binomial proportions CI/test will be used} \item{p0}{Optional null hypothesis (test prevalenc, sensitivity, ...)} \item{confint}{Type of confidence limits} \item{...}{Additional arguments to lower level functions} } \description{ Calculate prevalence, sensitivity, specificity, and positive and negative predictive values } \details{ Table should be in the format with outcome in columns and test in rows. Data.frame should be with test in the first column and outcome in the second column. } \examples{ M <- as.table(matrix(c(42,12, 35,28),ncol=2,byrow=TRUE, dimnames=list(rater=c("no","yes"),gold=c("no","yes")))) diagtest(M,exact=TRUE) } \author{ Klaus Holst } lava/man/Col.Rd0000644000176200001440000000156313520655354012771 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Col.R \name{Col} \alias{Col} \title{Generate a transparent RGB color} \usage{ Col(col, alpha = 0.2, locate = 0) } \arguments{ \item{col}{Color (numeric or character)} \item{alpha}{Degree of transparency (0,1)} \item{locate}{Choose colour (with mouse)} } \value{ A character vector with elements of 7 or 9 characters, '"\#"' followed by the red, blue, green and optionally alpha values in hexadecimal (after rescaling to '0 ... 255'). } \description{ This function transforms a standard color (e.g. "red") into an transparent RGB-color (i.e. alpha-blend<1). } \details{ This only works for certain graphics devices (Cairo-X11 (x11 as of R>=2.7), quartz, pdf, ...). } \examples{ plot(runif(1000),cex=runif(1000,0,4),col=Col(c("darkblue","orange"),0.5),pch=16) } \author{ Klaus K. Holst } \keyword{color} lava/man/multinomial.Rd0000644000176200001440000000370413520655354014605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multinomial.R \name{multinomial} \alias{multinomial} \alias{kappa.multinomial} \alias{kappa.table} \alias{gkgamma} \title{Estimate probabilities in contingency table} \usage{ multinomial(x, data = parent.frame(), marginal = FALSE, transform, vcov = TRUE, iid = TRUE, ...) } \arguments{ \item{x}{Formula (or matrix or data.frame with observations, 1 or 2 columns)} \item{data}{Optional data.frame} \item{marginal}{If TRUE the marginals are estimated} \item{transform}{Optional transformation of parameters (e.g., logit)} \item{vcov}{Calculate asymptotic variance (default TRUE)} \item{iid}{Return iid decomposition (default TRUE)} \item{...}{Additional arguments to lower-level functions} } \description{ Estimate probabilities in contingency table } \examples{ set.seed(1) breaks <- c(-Inf,-1,0,Inf) m <- lvm(); covariance(m,pairwise=TRUE) <- ~y1+y2+y3+y4 d <- transform(sim(m,5e2), z1=cut(y1,breaks=breaks), z2=cut(y2,breaks=breaks), z3=cut(y3,breaks=breaks), z4=cut(y4,breaks=breaks)) multinomial(d[,5]) (a1 <- multinomial(d[,5:6])) (K1 <- kappa(a1)) ## Cohen's kappa K2 <- kappa(d[,7:8]) ## Testing difference K1-K2: estimate(merge(K1,K2,id=TRUE),diff) estimate(merge(K1,K2,id=FALSE),diff) ## Wrong std.err ignoring dependence sqrt(vcov(K1)+vcov(K2)) ## Average of the two kappas: estimate(merge(K1,K2,id=TRUE),function(x) mean(x)) estimate(merge(K1,K2,id=FALSE),function(x) mean(x)) ## Independence ##' ## Goodman-Kruskal's gamma m2 <- lvm(); covariance(m2) <- y1~y2 breaks1 <- c(-Inf,-1,0,Inf) breaks2 <- c(-Inf,0,Inf) d2 <- transform(sim(m2,5e2), z1=cut(y1,breaks=breaks1), z2=cut(y2,breaks=breaks2)) (g1 <- gkgamma(d2[,3:4])) ## same as \dontrun{ gkgamma(table(d2[,3:4])) gkgamma(multinomial(d2[,3:4])) } ##partial gamma d2$x <- rbinom(nrow(d2),2,0.5) gkgamma(z1~z2|x,data=d2) } \author{ Klaus K. Holst } lava/man/pcor.Rd0000644000176200001440000000066313520655354013217 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pcor.R \name{pcor} \alias{pcor} \title{Polychoric correlation} \usage{ pcor(x, y, X, start, ...) } \arguments{ \item{x}{Variable 1} \item{y}{Variable 2} \item{X}{Optional covariates} \item{start}{Optional starting values} \item{...}{Additional arguments to lower level functions} } \description{ Maximum likelhood estimates of polychoric correlations } lava/man/colorbar.Rd0000644000176200001440000000251413520655354014054 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zcolorbar.R \name{colorbar} \alias{colorbar} \title{Add color-bar to plot} \usage{ colorbar(clut = Col(rev(rainbow(11, start = 0, end = 0.69)), alpha), x.range = c(-0.5, 0.5), y.range = c(-0.1, 0.1), values = seq(clut), digits = 2, label.offset, srt = 45, cex = 0.5, border = NA, alpha = 0.5, position = 1, direction = c("horizontal", "vertical"), ...) } \arguments{ \item{clut}{Color look-up table} \item{x.range}{x range} \item{y.range}{y range} \item{values}{label values} \item{digits}{number of digits} \item{label.offset}{label offset} \item{srt}{rotation of labels} \item{cex}{text size} \item{border}{border of color bar rectangles} \item{alpha}{Alpha (transparency) level 0-1} \item{position}{Label position left/bottom (1) or top/right (2) or no text (0)} \item{direction}{horizontal or vertical color bars} \item{\dots}{additional low level arguments (i.e. parsed to \code{text})} } \description{ Add color-bar to plot } \examples{ \dontrun{ plotNeuro(x,roi=R,mm=-18,range=5) colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), x=c(-40,40),y.range=c(84,90),values=c(-5:5)) colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), x=c(-10,10),y.range=c(-100,50),values=c(-5:5), direction="vertical",border=1) } } lava/man/stack.estimate.Rd0000644000176200001440000000255513520655354015175 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stack.R \name{stack.estimate} \alias{stack.estimate} \title{Stack estimating equations} \usage{ \method{stack}{estimate}(x, model2, D1u, inv.D2u, propensity, dpropensity, U, keep1 = FALSE, propensity.arg, estimate.arg, na.action = na.pass, ...) } \arguments{ \item{x}{Model 1} \item{model2}{Model 2} \item{D1u}{Derivative of score of model 2 w.r.t. parameter vector of model 1} \item{inv.D2u}{Inverse of deri} \item{propensity}{propensity score (vector or function)} \item{dpropensity}{derivative of propensity score wrt parameters of model 1} \item{U}{Optional score function (model 2) as function of all parameters} \item{keep1}{If FALSE only parameters of model 2 is returned} \item{propensity.arg}{Arguments to propensity function} \item{estimate.arg}{Arguments to 'estimate'} \item{na.action}{Method for dealing with missing data in propensity score} \item{...}{Additional arguments to lower level functions} } \description{ Stack estimating equations (two-stage estimator) } \examples{ m <- lvm(z0~x) Missing(m, z ~ z0) <- r~x distribution(m,~x) <- binomial.lvm() p <- c(r=-1,'r~x'=0.5,'z0~x'=2) beta <- p[3]/2 d <- sim(m,500,p=p) m1 <- estimate(r~x,data=d,family=binomial) d$w <- d$r/predict(m1,type="response") m2 <- estimate(z~1, weights=w, data=d) (e <- stack(m1,m2,propensity=TRUE)) } lava/man/NR.Rd0000644000176200001440000000320013520655354012561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/optims.R \name{NR} \alias{NR} \title{Newton-Raphson method} \usage{ NR(start, objective = NULL, gradient = NULL, hessian = NULL, control, args = NULL, ...) } \arguments{ \item{start}{Starting value} \item{objective}{Optional objective function (used for selecting step length)} \item{gradient}{gradient} \item{hessian}{hessian (if NULL a numerical derivative is used)} \item{control}{optimization arguments (see details)} \item{args}{Optional list of arguments parsed to objective, gradient and hessian} \item{...}{additional arguments parsed to lower level functions} } \description{ Newton-Raphson method } \details{ \code{control} should be a list with one or more of the following components: \itemize{ \item{trace} integer for which output is printed each 'trace'th iteration \item{iter.max} number of iterations \item{stepsize}: Step size (default 1) \item{nstepsize}: Increase stepsize every nstepsize iteration (from stepsize to 1) \item{tol}: Convergence criterion (gradient) \item{epsilon}: threshold used in pseudo-inverse \item{backtrack}: In each iteration reduce stepsize unless solution is improved according to criterion (gradient, armijo, curvature, wolfe) } } \examples{ # Objective function with gradient and hessian as attributes f <- function(z) { x <- z[1]; y <- z[2] val <- x^2 + x*y^2 + x + y structure(val, gradient=c(2*x+y^2+1, 2*y*x+1), hessian=rbind(c(2,2*y),c(2*y,2*x))) } NR(c(0,0),f) # Parsing arguments to the function and g <- function(x,y) (x*y+1)^2 NR(0, gradient=g, args=list(y=2), control=list(trace=1,tol=1e-20)) } lava/man/makemissing.Rd0000644000176200001440000000145213520655354014560 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makemissing.R \name{makemissing} \alias{makemissing} \title{Create random missing data} \usage{ makemissing(data, p = 0.2, cols = seq_len(ncol(data)), rowwise = FALSE, nafun = function(x) x, seed = NULL) } \arguments{ \item{data}{data.frame} \item{p}{Fraction of missing data in each column} \item{cols}{Which columns (name or index) to alter} \item{rowwise}{Should missing occur row-wise (either none or all selected columns are missing)} \item{nafun}{(Optional) function to be applied on data.frame before return (e.g. \code{na.omit} to return complete-cases only)} \item{seed}{Random seed} } \value{ data.frame } \description{ Generates missing entries in data.frame/matrix } \author{ Klaus K. Holst } \keyword{utilities} lava/man/hubble.Rd0000644000176200001440000000057513520655354013517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{hubble} \alias{hubble} \title{Hubble data} \format{data.frame} \source{ Freedman, W. L., et al. 2001, AstroPhysicalJournal, 553, 47. } \description{ Velocity (v) and distance (D) measures of 36 Type Ia super-novae from the Hubble Space Telescope } \keyword{datasets} lava/man/pdfconvert.Rd0000644000176200001440000000152213520655354014421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdfconvert.R \name{pdfconvert} \alias{pdfconvert} \title{Convert pdf to raster format} \usage{ pdfconvert(files, dpi = 300, resolution = 1024, gs, gsopt, resize, format = "png", ...) } \arguments{ \item{files}{Vector of (pdf-)filenames to process} \item{dpi}{DPI} \item{resolution}{Resolution of raster image file} \item{gs}{Optional ghostscript command} \item{gsopt}{Optional ghostscript arguments} \item{resize}{Optional resize arguments (mogrify)} \item{format}{Raster format (e.g. png, jpg, tif, ...)} \item{\dots}{Additional arguments} } \description{ Convert PDF file to print quality png (default 300 dpi) } \details{ Access to ghostscript program 'gs' is needed } \seealso{ \code{dev.copy2pdf}, \code{printdev} } \author{ Klaus K. Holst } \keyword{iplot} lava/man/mvnmix.Rd0000644000176200001440000000270313520655354013567 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mvnmix.R \name{mvnmix} \alias{mvnmix} \title{Estimate mixture latent variable model} \usage{ mvnmix(data, k = 2, theta, steps = 500, tol = 1e-16, lambda = 0, mu = NULL, silent = TRUE, extra = FALSE, n.start = 1, init = "kmpp", ...) } \arguments{ \item{data}{\code{data.frame}} \item{k}{Number of mixture components} \item{theta}{Optional starting values} \item{steps}{Maximum number of iterations} \item{tol}{Convergence tolerance of EM algorithm} \item{lambda}{Regularisation parameter. Added to diagonal of covariance matrix (to avoid singularities)} \item{mu}{Initial centres (if unspecified random centres will be chosen)} \item{silent}{Turn on/off output messages} \item{extra}{Extra debug information} \item{n.start}{Number of restarts} \item{init}{Function to choose initial centres} \item{...}{Additional arguments parsed to lower-level functions} } \value{ A \code{mixture} object } \description{ Estimate mixture latent variable model } \details{ Estimate parameters in a mixture of latent variable models via the EM algorithm. } \examples{ data(faithful) set.seed(1) M1 <- mvnmix(faithful[,"waiting",drop=FALSE],k=2) M2 <- mvnmix(faithful,k=2) if (interactive()) { par(mfrow=c(2,1)) plot(M1,col=c("orange","blue"),ylim=c(0,0.05)) plot(M2,col=c("orange","blue")) } } \seealso{ \code{mixture} } \author{ Klaus K. Holst } \keyword{models} \keyword{regression} lava/man/baptize.Rd0000644000176200001440000000052113520655354013703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/baptize.R \name{baptize} \alias{baptize} \title{Label elements of object} \usage{ baptize(x, ...) } \arguments{ \item{x}{Object} \item{\dots}{Additional arguments} } \description{ Generic method for labeling elements of an object } \author{ Klaus K. Holst } lava/man/cv.Rd0000644000176200001440000000223613520655354012662 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cv.R \name{cv} \alias{cv} \title{Cross-validation} \usage{ cv(modelList, data, K = 5, rep = 1, perf, seed = NULL, mc.cores = 1, shared = NULL, ...) } \arguments{ \item{modelList}{List of fitting functions or models} \item{data}{data.frame} \item{K}{Number of folds (default 5, 0 splits in 1:n/2, n/2:n with last part used for testing)} \item{rep}{Number of repetitions (default 1)} \item{perf}{Performance measure (default RMSE)} \item{seed}{Optional random seed} \item{mc.cores}{Number of cores used for parallel computations} \item{shared}{function applied to each fold with results send to each model} \item{...}{Additional arguments parsed to models in modelList and perf} } \description{ Cross-validation } \details{ Generic cross-validation function } \examples{ f0 <- function(data,...) lm(...,data) f1 <- function(data,...) lm(Sepal.Length~Species,data) f2 <- function(data,...) lm(Sepal.Length~Species+Petal.Length,data) x <- cv(list(m0=f0,m1=f1,m2=f2),rep=10, data=iris, formula=Sepal.Length~.) x2 <- cv(list(f0(iris),f1(iris),f2(iris)),rep=10, data=iris) } \author{ Klaus K. Holst } lava/man/By.Rd0000644000176200001440000000154513520655354012626 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/By.R \name{By} \alias{By} \title{Apply a Function to a Data Frame Split by Factors} \usage{ By(x, INDICES, FUN, COLUMNS, array = FALSE, ...) } \arguments{ \item{x}{Data frame} \item{INDICES}{Indices (vector or list of indices, vector of column names, or formula of column names)} \item{FUN}{A function to be applied to data frame subsets of 'data'.} \item{COLUMNS}{(Optional) subset of columns of x to work on} \item{array}{if TRUE an array/matrix is always returned} \item{...}{Additional arguments to lower-level functions} } \description{ Apply a Function to a Data Frame Split by Factors } \details{ Simple wrapper of the 'by' function } \examples{ By(datasets::CO2,~Treatment+Type,colMeans,~conc) By(datasets::CO2,~Treatment+Type,colMeans,~conc+uptake) } \author{ Klaus K. Holst } lava/man/blockdiag.Rd0000644000176200001440000000071713520655354014173 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/blockdiag.R \name{blockdiag} \alias{blockdiag} \title{Combine matrices to block diagonal structure} \usage{ blockdiag(x, ..., pad = 0) } \arguments{ \item{x}{Matrix} \item{\dots}{Additional matrices} \item{pad}{Vyalue outside block-diagonal} } \description{ Combine matrices to block diagonal structure } \examples{ A <- diag(3)+1 blockdiag(A,A,A,pad=NA) } \author{ Klaus K. Holst } lava/man/timedep.Rd0000644000176200001440000000431613520655354013702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/timedep.R \name{timedep} \alias{timedep} \alias{timedep<-} \title{Time-dependent parameters} \usage{ timedep(object, formula, rate, timecut, type = "coxExponential.lvm", ...) } \arguments{ \item{object}{Model} \item{formula}{Formula with rhs specifying time-varying covariates} \item{rate}{Optional rate parameters. If given as a vector this parameter is interpreted as the raw (baseline-)rates within each time interval defined by \code{timecut}. If given as a matrix the parameters are interpreted as log-rates (and log-rate-ratios for the time-varying covariates defined in the formula).} \item{timecut}{Time intervals} \item{type}{Type of model (default piecewise constant intensity)} \item{...}{Additional arguments to lower level functions} } \description{ Add time-varying covariate effects to model } \examples{ ## Piecewise constant hazard m <- lvm(y~1) m <- timedep(m,y~1,timecut=c(0,5),rate=c(0.5,0.3)) \dontrun{ d <- sim(m,1e4); d$status <- TRUE dd <- mets::lifetable(Surv(y,status)~1,data=d,breaks=c(0,5,10)); exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval, dd, family=poisson))) } ## Piecewise constant hazard and time-varying effect of z1 m <- lvm(y~1) distribution(m,~z1) <- ones.lvm(0.5) R <- log(cbind(c(0.2,0.7,0.9),c(0.5,0.3,0.3))) m <- timedep(m,y~z1,timecut=c(0,3,5),rate=R) \dontrun{ d <- sim(m,1e4); d$status <- TRUE dd <- mets::lifetable(Surv(y,status)~z1,data=d,breaks=c(0,3,5,Inf)); exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval+z1:interval, dd, family=poisson))) } ## Explicit simulation of time-varying effects m <- lvm(y~1) distribution(m,~z1) <- ones.lvm(0.5) distribution(m,~z2) <- binomial.lvm(p=0.5) #variance(m,~m1+m2) <- 0 #regression(m,m1[m1:0] ~ z1) <- log(0.5) #regression(m,m2[m2:0] ~ z1) <- log(0.3) regression(m,m1 ~ z1,variance=0) <- log(0.5) regression(m,m2 ~ z1,variance=0) <- log(0.3) intercept(m,~m1+m2) <- c(-0.5,0) m <- timedep(m,y~m1+m2,timecut=c(0,5)) \dontrun{ d <- sim(m,1e5); d$status <- TRUE dd <- mets::lifetable(Surv(y,status)~z1,data=d,breaks=c(0,5,Inf)) exp(coef(glm(events ~ offset(log(atrisk)) + -1 + interval + interval:z1, dd, family=poisson))) } } \author{ Klaus K. Holst } lava/man/scheffe.Rd0000644000176200001440000000151413520655354013653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scheffe.R \name{scheffe} \alias{scheffe} \title{Calculate simultaneous confidence limits by Scheffe's method} \usage{ scheffe(model, newdata = model.frame(model), level = 0.95) } \arguments{ \item{model}{Linear model} \item{newdata}{new data frame} \item{level}{confidence level (0.95)} } \description{ Function to compute the Scheffe corrected confidence interval for the regression line } \examples{ x <- rnorm(100) d <- data.frame(y=rnorm(length(x),x),x=x) l <- lm(y~x,d) plot(y~x,d) abline(l) d0 <- data.frame(x=seq(-5,5,length.out=100)) d1 <- cbind(d0,predict(l,newdata=d0,interval="confidence")) d2 <- cbind(d0,scheffe(l,d0)) lines(lwr~x,d1,lty=2,col="red") lines(upr~x,d1,lty=2,col="red") lines(lwr~x,d2,lty=2,col="blue") lines(upr~x,d2,lty=2,col="blue") } lava/man/predict.lvm.Rd0000644000176200001440000000261313520655354014500 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.lvm} \alias{predict.lvm} \alias{predict.lvmfit} \title{Prediction in structural equation models} \usage{ \method{predict}{lvm}(object, x = NULL, y = NULL, residual = FALSE, p, data, path = FALSE, quick = is.null(x) & !(residual | path), ...) } \arguments{ \item{object}{Model object} \item{x}{optional list of (endogenous) variables to condition on} \item{y}{optional subset of variables to predict} \item{residual}{If true the residuals are predicted} \item{p}{Parameter vector} \item{data}{Data to use in prediction} \item{path}{Path prediction} \item{quick}{If TRUE the conditional mean and variance given covariates are returned (and all other calculations skipped)} \item{\dots}{Additional arguments to lower level function} } \description{ Prediction in structural equation models } \examples{ m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u d <- sim(m,100) e <- estimate(m,d) ## Conditional mean (and variance as attribute) given covariates r <- predict(e) ## Best linear unbiased predictor (BLUP) r <- predict(e,vars(e)) ## Conditional mean of y3 giving covariates and y1,y2 r <- predict(e,y3~y1+y2) ## Conditional mean gives covariates and y1 r <- predict(e,~y1) ## Predicted residuals (conditional on all observed variables) r <- predict(e,vars(e),residual=TRUE) } \seealso{ predictlvm } lava/man/Grep.Rd0000644000176200001440000000164713520655354013154 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Grep.R \name{Grep} \alias{Grep} \title{Finds elements in vector or column-names in data.frame/matrix} \usage{ Grep(x, pattern, subset = TRUE, ignore.case = TRUE, ...) } \arguments{ \item{x}{vector, matrix or data.frame.} \item{pattern}{regular expression to search for} \item{subset}{If TRUE returns subset of data.frame/matrix otherwise just the matching column names} \item{ignore.case}{Default ignore case} \item{...}{Additional arguments to 'grep'} } \value{ A data.frame with 2 columns with the indices in the first and the matching names in the second. } \description{ Pattern matching in a vector or column names of a data.frame or matrix. } \examples{ data(iris) head(Grep(iris,"(len)|(sp)")) } \seealso{ \code{\link{grep}}, and \code{\link{agrep}} for approximate string matching, } \author{ Klaus K. Holst } \keyword{misc} \keyword{utilities} lava/man/twindata.Rd0000644000176200001440000000132413520655354014062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lava-package.R \docType{data} \name{twindata} \alias{twindata} \title{Twin menarche data} \format{data.frame} \source{ Simulated } \description{ Simulated data \tabular{rll}{ id \tab numeric \tab Twin-pair id \cr zyg \tab character \tab Zygosity (MZ or DZ) \cr twinnum \tab numeric \tab Twin number (1 or 2) \cr agemena \tab numeric \tab Age at menarche (or censoring) \cr status \tab logical \tab Censoring status (observed:=T,censored:=F) \cr bw \tab numeric \tab Birth weight \cr msmoke \tab numeric \tab Did mother smoke? (yes:=1,no:=0) \cr } } \keyword{datasets} lava/man/ordinal-set.Rd0000644000176200001440000000111313520655354014464 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ordinal.R \name{ordinal<-} \alias{ordinal<-} \alias{ordinal} \title{Define variables as ordinal} \usage{ ordinal(x, ...) <- value } \arguments{ \item{x}{Object} \item{...}{additional arguments to lower level functions} \item{value}{variable (formula or character vector)} } \description{ Define variables as ordinal in latent variable model object } \examples{ if (requireNamespace("mets")) { m <- lvm(y + z ~ x + 1*u[0], latent=~u) ordinal(m, K=3) <- ~y+z d <- sim(m, 100, seed=1) e <- estimate(m, d) } } lava/man/wrapvec.Rd0000644000176200001440000000051113520655354013713 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrapvec.R \name{wrapvec} \alias{wrapvec} \title{Wrap vector} \usage{ wrapvec(x, delta = 0L, ...) } \arguments{ \item{x}{Vector or integer} \item{delta}{Shift} \item{...}{Additional parameters} } \description{ Wrap vector } \examples{ wrapvec(5,2) } lava/man/zibreg.Rd0000644000176200001440000000374413520655354013541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zib.R \name{zibreg} \alias{zibreg} \title{Regression model for binomial data with unkown group of immortals} \usage{ zibreg(formula, formula.p = ~1, data, family = stats::binomial(), offset = NULL, start, var = "hessian", ...) } \arguments{ \item{formula}{Formula specifying} \item{formula.p}{Formula for model of disease prevalence} \item{data}{data frame} \item{family}{Distribution family (see the help page \code{family})} \item{offset}{Optional offset} \item{start}{Optional starting values} \item{var}{Type of variance (robust, expected, hessian, outer)} \item{...}{Additional arguments to lower level functions} } \description{ Regression model for binomial data with unkown group of immortals (zero-inflated binomial regression) } \examples{ ## Simulation n <- 2e3 x <- runif(n,0,20) age <- runif(n,10,30) z0 <- rnorm(n,mean=-1+0.05*age) z <- cut(z0,breaks=c(-Inf,-1,0,1,Inf)) p0 <- lava:::expit(model.matrix(~z+age) \%*\% c(-.4, -.4, 0.2, 2, -0.05)) y <- (runif(n) Description: A general implementation of Structural Equation Models with latent variables (MLE, 2SLS, and composite likelihood estimators) with both continuous, censored, and ordinal outcomes (Holst and Budtz-Joergensen (2013) ). Mixture latent variable models and non-linear latent variable models (Holst and Budtz-Joergensen (2019) ). The package also provides methods for graph exploration (d-separation, back-door criterion), simulation of general non-linear latent variable models, and estimation of influence functions for a broad range of statistical models. URL: https://github.com/kkholst/lava BugReports: https://github.com/kkholst/lava/issues License: GPL-3 LazyLoad: yes Depends: R (>= 3.0) Imports: grDevices, graphics, methods, numDeriv, stats, survival, SQUAREM, utils Suggests: KernSmooth, Matrix, Rgraphviz, data.table, ellipse, fields, foreach, geepack, gof (>= 0.9), graph, igraph (>= 0.6), lava.tobit (>= 0.4.7), lme4, mets (>= 1.1), nlme, optimx, polycor, quantreg, rgl, testthat (>= 0.11), visNetwork, zoo ByteCompile: yes Encoding: UTF-8 RoxygenNote: 6.1.1 NeedsCompilation: no Packaged: 2019-08-01 21:34:45 UTC; klaus Repository: CRAN Date/Publication: 2019-08-01 22:20:02 UTC lava/tests/0000755000176200001440000000000013520655354012347 5ustar liggesuserslava/tests/test-all.R0000644000176200001440000000013113520655354014212 0ustar liggesusers#library("lava") suppressPackageStartupMessages(library("testthat")) test_check("lava") lava/tests/testthat/0000755000176200001440000000000013520655354014207 5ustar liggesuserslava/tests/testthat/test-multigroup.R0000644000176200001440000000547013520655354017524 0ustar liggesuserscontext("Multiple Group") test_that("Multiple group I", { m <- lvm(y~x) set.seed(1) d <- sim(m,100) ## Just a stratified analysis e <- estimate(list("Group A"=m,"Group B"=m),list(d,d)) testthat::expect_true(mean((coef(e)[c(1,3)]-coef(lm(y~x,d)))^2)<1e-9) testthat::expect_true(mean((coef(e)[c(2,5)]-coef(lm(y~x,d)))^2)<1e-9) }) test_that("Multiple group II", { m <- baptize(lvm(y~x)) set.seed(1) d <- sim(m,100) ## Just a standard linear regression (single group) e <- estimate(list(m,m),list(d,d)) testthat::expect_identical(coef(e,type=2)[[1]],coef(e,type=2)[[2]]) testthat::expect_true(mean((coef(e,type=2)[[1]][1:2,1]-coef(lm(y~x,cbind(d,d))))^2)<1e-9) }) context("Missing data") test_that("Missing data analysis", { ## Random intercept model m <- lvm(c(y1,y2,y3)~x+u); latent(m) <- ~u set.seed(1) ## Missing on first two outcomes d <- makemissing(sim(m,200),p=0.3,cols=c("y1","y2")) e <- estimate(m,d,missing=TRUE) testthat::expect_true("lvm.missing"%in%class(e)) testthat::expect_true(sum(unlist(lapply(e$estimate$model$data,nrow)))==200) ## Convergence: g <- gof(e) testthat::expect_true(mean(score(e))<1e-3) testthat::expect_true(g$rankV==length(pars(e))) }) test_that("Multiple group, missing data analysis", { m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u m <- baptize(fixsome(m)) regression(m,u~x) <- NA covariance(m,~u) <- NA set.seed(1) ## Missing on all outcomes d1 <- makemissing(sim(m,500),cols=c("y1","y2"),p=0.3) d2 <- makemissing(sim(m,500),cols=c("y1","y2"),p=0.3) e <- estimate(list(m,m),list(d1,d2),missing=TRUE) g <- gof(e) testthat::expect_true(g$n==1000) testthat::expect_true(mean(score(e))<1e-3) testthat::expect_true(g$rankV==length(pars(e))) }) test_that("Multiple group, constraints", { m1 <- lvm(y ~ f(x,beta)+f(z,beta2)) m2 <- lvm(y ~ f(x,psi) + z) ## And simulate data from them set.seed(1) d1 <- sim(m1,100) d2 <- sim(m2,100) ## Add 'non'-linear parameter constraint constrain(m2,psi ~ beta2) <- function(x) x ## Add parameter beta2 to model 2, now beta2 exists in both models parameter(m2) <- ~ beta2 ee <- estimate(list(m1,m2),list(d1,d2)) m <- lvm(y1 ~ x1 + beta2*z1) regression(m) <- y2 ~ beta2*x2 + z2 d <- cbind(d1,d2); names(d) <- c(paste0(names(d1),1),paste0(names(d1),2)) e <- estimate(m,d) b1 <- coef(e,2,labels=TRUE)["beta2",1] b2 <- constraints(ee)[1] testthat::expect_true(mean((b1-b2)^2)<1e-5) ## "Multiple group, constraints (non-linear in x) m <- lvm(y[m:v] ~ 1) addvar(m) <- ~x parameter(m) <- ~a+b constrain(m,m~a+b+x) <- function(z) z[1]+z[2]*z[3] ee <- estimate(list(m,m),list(d1[1:5,],d1[6:10,])) b1 <- coef(lm(y~x,d1[1:10,])) b2 <- coef(ee)[c("a@1","b@1")] testthat::expect_true(mean(b1-b2)^2<1e-4) }) lava/tests/testthat/test-estimate_default.R0000644000176200001440000000356013520655354020632 0ustar liggesuserscontext("Inference") test_that("estimate.default", { m <- lvm(c(y1,y2)~x+z,y1~~y2) ## set.seed(1) d <- sim(m,20) dd <- mets::fast.reshape(d) l1 <- lm(y1~x+z,d) l2 <- lm(y2~x+z,d) ll <- merge(l1,l2) testthat::expect_equivalent(ll$coefmat[,1],c(coef(l1),coef(l2))) e1 <- estimate(l1) f1 <- estimate(l1,function(x) x^2, use=2) testthat::expect_true(coef(l1)["x"]^2==f1$coefmat[1]) e1b <- estimate(NULL,coef=coef(l1),vcov=vcov(estimate(l1))) e1c <- estimate(NULL,coef=coef(l1),iid=iid(l1)) testthat::expect_equivalent(vcov(e1b),vcov(e1c)) testthat::expect_equivalent(crossprod(iid(e1)),vcov(e1b)) f1b <- estimate(e1b,function(x) x^2) testthat::expect_equivalent(f1b$coefmat[2,,drop=FALSE],f1$coefmat) h1 <- estimate(l1,cbind(0,1,0)) testthat::expect_true(h1$coefmat[,5]==e1$coefmat["x",5]) ## GEE if (requireNamespace("geepack",quietly=TRUE)) { l <- lm(y~x+z,dd) g1 <- estimate(l,id=dd$id) g2 <- geepack::geeglm(y~x+z,id=dd$id,data=dd) testthat::expect_equivalent(g1$coefmat[,c(1,2,5)], as.matrix(summary(g2)$coef[,c(1,2,4)])) } ## Several parameters e1d <- estimate(l1, function(x) list("X"=x[2],"Z"=x[3])) testthat::expect_equivalent(e1d$coefmat,e1$coefmat[-1,]) testthat::expect_true(rownames(estimate(l1, function(x) list("X"=x[2],"Z"=x[3]),keep="X")$coefmat)=="X") testthat::expect_true(rownames(estimate(l1, labels=c("a"), function(x) list("X"=x[2],"Z"=x[3]),keep="X")$coefmat)=="a") a0 <- estimate(l1,function(p,data) p[1]+p[2]*data[,"x"], average=TRUE) a1 <- estimate(l1,function(p,data) p[1]+p[2]*data[,"x"]+p[3], average=TRUE) a <- merge(a0,a1,labels=c("a0","a1")) estimate(a,diff) testthat::expect_equivalent(estimate(a,diff)$coefmat,e1$coefmat[3,,drop=FALSE]) stack }) lava/tests/testthat/test-model.R0000644000176200001440000000743513520655354016420 0ustar liggesuserscontext("Model specification") test_that("Basic model building blocks", { m <- lvm(y[m]~x) covariance(m) <- y~z testthat::expect_true(covariance(m)$rel["z","y"]==1) testthat::expect_true(regression(m)$rel["x","y"]==1) ## Children parent,nodes testthat::expect_match(children(m,~x),"y") testthat::expect_match(parents(m,~y),"x") testthat::expect_equivalent(parents(m),vars(m)) testthat::expect_equivalent(children(m),vars(m)) ## Remove association cancel(m) <- y~z+x testthat::expect_true(covariance(m)$rel["z","y"]==0) testthat::expect_true(regression(m)$rel["x","y"]==0) ## Remove variable kill(m) <- ~x testthat::expect_equivalent(vars(m),c("y","z")) testthat::expect_true(intercept(m)["y"]=="m") m <- lvm(c(y1,y2,y3)~x) d <- sim(m,50) e <- estimate(m,d) ## Equivalence ##equivalence(e,silent=TRUE) ## formula f <- formula(m,all=TRUE) testthat::expect_true(length(f)==length(vars(m))) testthat::expect_true(all(unlist(lapply(f,function(x) inherits(x,"formula"))))) ## Parametrization m <- lvm(c(y1,y2,y3)~u) latent(m) <- ~u m2 <- fixsome(m,param=NULL) testthat::expect_true(all(is.na(regression(m2)$values))) m2 <- fixsome(m,param="relative") testthat::expect_true(regression(m2)$values["u","y1"]==1) testthat::expect_true(intercept(m2)[["y1"]]==0) m2 <- fixsome(m,param="hybrid") testthat::expect_true(regression(m2)$values["u","y1"]==1) testthat::expect_true(intercept(m2)[["u"]]==0) m2 <- fixsome(m,param="absolute") testthat::expect_true(all(is.na(regression(m2)$values))) testthat::expect_true(intercept(m2)[["u"]]==0) testthat::expect_true(covariance(m2)$values["u","u"]==1) ## Merge m1 <- lvm(c(y1,y2,y3)~1*u1[m1:v1]) latent(m1) <- ~u1 m2 <- lvm(c(y1,y2,y3)~2*u2[m2:v2]) latent(m2) <- ~u2 mm <- m1%++%m2 testthat::expect_true(covariance(mm)$labels["u1","u1"]=="v1") testthat::expect_true(intercept(mm)[["u2"]]=="m2") ## LISREL mm <- fixsome(mm) L <- lisrel(mm,rep(1,length(coef(mm)))) testthat::expect_equivalent(L$B,matrix(0,2,2)) testthat::expect_equivalent(L$Theta,diag(3)) testthat::expect_equivalent(L$Psi,diag(2)) }) test_that("Linear constraints", { m <- lvm(c(y[m:v]~b*x)) constrain(m,b~a) <- base::identity }) if (requireNamespace("graph",quietly = TRUE)) test_that("Graph attributes", { m <- lvm(y~x) g1 <- graph::updateGraph(plot(m,noplot=TRUE)) m1 <- graph2lvm(g1) testthat::expect_equivalent(m1$M,m$M) col <- "blue"; v <- "y" g1 <- lava::addattr(g1,"fill",v,col) testthat::expect_match(col,graph::nodeRenderInfo(g1)$fill[v]) nodecolor(m,v) <- "blue" g2 <- Graph(m,add=TRUE) testthat::expect_true(inherits(g2,"graph")) testthat::expect_match(col,graph::nodeRenderInfo(g2)$fill[v]) testthat::expect_match(addattr(g2,"fill")["y"],"blue") graph::graphRenderInfo(g2)$rankdir <- "LR" Graph(m) <- g2 testthat::expect_true(graph::graphRenderInfo(Graph(m))$rankdir=="LR") ## Labels labels(m) <- c(y="Y") addattr(Graph(m,add=TRUE),"label") testthat::expect_true(addattr(finalize(m),"label")[["y"]]=="Y") labels(g2) <- c(y="Y") testthat::expect_true(!is.null(graph::nodeRenderInfo(g2)$label["y"])) edgelabels(m,y~x) <- "a" testthat::expect_true(!is.null(edgelabels(finalize(m)))) }) test_that("Categorical variables", { m <- lvm() categorical(m,K=3,p=c(0.1,0.5)) <- ~x d1 <- simulate(m,10,seed=1) categorical(m,K=3) <- ~x d2 <- simulate(m,10,seed=1) testthat::expect_false(identical(d1,d2)) regression(m,additive=FALSE,y~x) <- c(0,-5,5) d <- simulate(m,100,seed=1) l <- lm(y~factor(x),d) testthat::expect_true(sign(coef(l))[2]==-sign(coef(l))[3]) }) lava/tests/testthat/test-sim.R0000644000176200001440000001377713520655354016116 0ustar liggesuserscontext("Simulation") test_that("Constrain, transform I", { m <- lvm(,~y+x) distribution(m,~x) <- sequence.lvm() transform(m,y~x) <- function(x) x with(sim(m,10),testthat::expect_equivalent(y,x)) m <- lvm(y~1,~x) distribution(m,~x) <- sequence.lvm() intercept(m,~y) <- "ym" covariance(m,~y) <- 0.001 constrain(m,ym~x) <- function(x) x d <- simulate(m,200) testthat::expect_true(mean((d$y-d$x)^2)<0.1) }) test_that("Missing", { m <- lvm(y~1) m <- Missing(m,y~1,r~x) set.seed(1) d <- simulate(m,1e3,seed=1) testthat::expect_equal(sum(d$r),sum(!is.na(d$y0))) g <- glm(r~x,data=d,family=binomial) testthat::expect_true(all.equal(coef(g),c(0,1),tolerance=0.2,check.attributes=FALSE)) }) test_that("sim.default I", { m <- lvm(y~x+e) distribution(m,~y) <- 0 distribution(m,~x) <- uniform.lvm(a=-1.1,b=1.1) transform(m,e~x) <- function(x) (1*x^4)*rnorm(length(x),sd=1) onerun <- function(iter=NULL,...,n=2e3,b0=1,idx=2) { d <- sim(m,n,p=c("y~x"=b0)) l <- lm(y~x,d) res <- c(coef(summary(l))[idx,1:2], confint(l)[idx,], estimate(l,only.coef=TRUE)[idx,2:4]) names(res) <- c("Estimate","Model.se","Model.lo","Model.hi", "Sandwich.se","Sandwich.lo","Sandwich.hi") res } val <- sim(onerun,R=2,b0=1,n=10,messages=0,mc.cores=1) testthat::expect_true(nrow(val)==2) val <- sim(val,R=2,b0=1,n=10,type=0) ## append results testthat::expect_true(nrow(val)==4) s1 <- summary(val,estimate=c(1,1),confint=c(3,4,6,7),true=c(1,1),names=c("Model","Sandwich")) testthat::expect_true(length(grep("Coverage",rownames(s1)))>0) testthat::expect_equivalent(colnames(s1),c("Model","Sandwich")) val <- sim(onerun,R=2,cl=TRUE,seed=1,messages=0,mc.cores=2) testthat::expect_true(val[1,1]!=val[1,2]) onerun2 <- function(a,b,...) { return(cbind(a=a,b=b,c=a-1,d=a+1)) } R <- data.frame(a=1:2,b=3:4) dm <- capture.output(val2 <- sim(onerun2,R=R,messages=1,mc.cores=2)) testthat::expect_true(all(R-val2[,1:2]==0)) res <- summary(val2) testthat::expect_equivalent(res["Mean",],c(1.5,3.5,0.5,2.5)) testthat::expect_output(print(val2[1,]),"a b c d") testthat::expect_output(print(val2[1,]),"1 3 0 2") res <- summary(val2,estimate="a",se="b",true=1.5,confint=c("c","d")) testthat::expect_true(res["Coverage",]==1) testthat::expect_true(res["SE/SD",]==mean(val2[,"b"])/sd(val2[,"a"])) }) test_that("distributions", { m <- lvm(y1~x) distribution(m,~y1) <- binomial.lvm("probit") distribution(m,~y2) <- poisson.lvm() distribution(m,~y3) <- normal.lvm(mean=1,sd=2) distribution(m,~y3) <- lognormal.lvm() distribution(m,~y3) <- pareto.lvm() distribution(m,~y3) <- loggamma.lvm() distribution(m,~y3) <- weibull.lvm() distribution(m,~y3) <- chisq.lvm() distribution(m,~y3) <- student.lvm(mu=1,sigma=1) testthat::expect_output(print(distribution(m)$y2),"Family: poisson") testthat::expect_output(print(distribution(m)$y1),"Family: binomial") latent(m) <- ~u testthat::expect_output(print(m),"binomial\\(probit\\)") testthat::expect_output(print(m),"poisson\\(log\\)") ## Generator: m <- lvm() distribution(m,~y,TRUE) <- function(n,...) { res <- exp(rnorm(n)); res[seq(min(n,5))] <- 0 return(res) } d <- sim(m,10) testthat::expect_true(all(d[1:5,1]==0)) testthat::expect_true(all(d[6:10,1]!=0)) m <- lvm() distribution(m,~y,parname="a",init=2) <- function(n,a,...) { rep(1,n)*a } testthat::expect_true(all(sim(m,2)==2)) testthat::expect_true(all(sim(m,2,p=c(a=10))==10)) testthat::expect_equivalent(sim(m,2,p=c(a=10)),sim(m,2,a=10)) ## Multivariate distribution m <- lvm() rmr <- function(n,rho,...) rmvn0(n,sigma=diag(2)*(1-rho)+rho) distribution(m,~y1+y2,rho=0.9) <- rmr testthat::expect_equivalent(c("y1","y2"),colnames(d <- sim(m,5))) ## Special 'distributions' m <- lvm() distribution(m,~x1) <- sequence.lvm(int=TRUE) distribution(m,~x2) <- sequence.lvm(a=1,b=2) distribution(m,~x3) <- sequence.lvm(a=NULL,b=2) distribution(m,~x4) <- sequence.lvm(a=2,b=NULL) ex <- sim(m,5) testthat::expect_equivalent(ex$x1,1:5) testthat::expect_equivalent(ex$x2,seq(1,2,length.out=5)) testthat::expect_equivalent(ex$x3,seq(-2,2)) testthat::expect_equivalent(ex$x4,seq(2,6)) m <- lvm() distribution(m,~x1) <- ones.lvm() distribution(m,~x2) <- ones.lvm(p=0.5) distribution(m,~x3) <- ones.lvm(interval=c(0.4,0.6)) ex <- sim(m,10) testthat::expect_equivalent(ex$x1,rep(1,10)) testthat::expect_equivalent(ex$x2,c(rep(0,5),rep(1,5))) testthat::expect_equivalent(ex$x3,c(0,0,0,1,1,1,0,0,0,0)) m <- lvm() testthat::expect_error(distribution(m,~y) <- threshold.lvm(p=c(0.5,.75))) distribution(m,~y) <- threshold.lvm(p=c(0.25,.25)) set.seed(1) testthat::expect_equivalent(1:3,sort(unique(sim(m,200))[,1])) ## distribution(m,~y) <- threshold.lvm(p=c(0.25,.25),labels=letters[1:3]) ## testthat::expect_equivalent(c("a","b","c"),sort(unique(sim(m,200))[,1])) }) test_that("eventTime", { m <- lvm(eventtime~x) distribution(m,~eventtime) <- coxExponential.lvm(1/100) distribution(m,~censtime) <- coxWeibull.lvm(1/500) eventTime(m) <- time~min(eventtime=1,censtime=0) set.seed(1) d <- sim(m,100) testthat::expect_equivalent((d$time0) { ## At least major version 1 x <- transform(data.frame(lava:::rmvn0(1000,sigma=0.5*diag(2)+0.5)), X1=as.numeric(cut(X1,breaks=3))-1,X2=as.numeric(cut(X2,breaks=3))-1) m <- covariance(lvm(),X1~X2) ordinal(m,K=3,constrain=list("t1","t2")) <- ~X1 ordinal(m,K=3,constrain=list("t1","t2")) <- ~X2 ## e <- estimate(m,x) e <- estimate(list(m,m),list(x[1:500,],x[501:1000,]),estimator="normal") estimate(e) } } }) test_that("Multiple group constraints I", { m1 <- lvm(y[m:v] ~ f(x,beta)+f(z,beta2)) d1 <- sim(m1,500,seed=1); d2 <- sim(m1,500,seed=2) ##coef(estimate(m1,d1)) constrain(m1,beta2~psi) <- function(x) 2*x m2 <- lvm(y[m:v] ~ f(x,beta2) + z) constrain(m2,beta2~psi) <- function(x) 2*x mg <- multigroup(list(m1,m2),list(d1,d2)) ee <- estimate(mg) testthat::expect_true(length(coef(ee))==5) testthat::expect_equivalent(constraints(ee)[1],2*coef(ee)["psi@1"]) # Est testthat::expect_equivalent(constraints(ee)[2],2*coef(ee,2)[[1]]["psi",2]) # Std.Err }) test_that("Multiple group constraints II", { data("twindata",package="lava") twinwide <- reshape(twindata,direction="wide", idvar="id",timevar="twinnum") l <- lvm(~bw.1+bw.2) covariance(l) <- bw.1 ~ bw.2 e <- estimate(l,subset(twinwide,zyg.1=="MZ"),control=list(method="NR")) B <- cbind(1,-1); colnames(B) <- c("bw.1,bw.1","bw.2,bw.2") colnames(B) <- gsub(",",lava.options()$symbols[2],colnames(B)) lava::compare(e,contrast=B) B2 <- rbind(c(1,-1,0,0),c(0,0,1,-1)) colnames(B2) <- c("bw.1","bw.2","bw.1,bw.1","bw.2,bw.2") colnames(B2) <- gsub(",",lava.options()$symbols[2],colnames(B2)) lava::compare(e,contrast=B2) l <- lvm(~bw.1+bw.2) covariance(l) <- bw.1 ~ bw.2 intercept(l,~bw.1+bw.2) <- "m" covariance(l,~bw.1+bw.2) <- "s" covariance(l,bw.1~bw.2) <- "r1" l2 <- l covariance(l2,bw.1~bw.2) <- "r2" DZ <- subset(twinwide,zyg.1=="MZ") MZ <- subset(twinwide,zyg.1=="DZ") ## e <- estimate(l,MZ) ## e2 <- estimate(l2,DZ) parameter(l) <- ~r2 parameter(l2) <- ~r1 ee <- estimate(list(MZ=l,DZ=l2),list(MZ,DZ),control=list(method="NR",tol=1e-9,constrain=FALSE)) testthat::expect_true(mean(score(ee)^2)<1e-9) constrain(ee,h~r2+r1) <- function(x) 2*(x[1]-x[2]) ce <- constraints(ee) testthat::expect_equivalent(constraints(ee)[1],2*diff(coef(ee)[3:4])) testthat::expect_true(length(coef(ee))==4) testthat::expect_true(nrow(ce)==1) testthat::expect_true(all(!is.na(ce))) }) lava/tests/testthat/test-plot.R0000644000176200001440000001205513520655354016270 0ustar liggesuserscontext("Graphics functions") test_that("color", { cur <- palette() old <- lava:::mypal() testthat::expect_equivalent(col2rgb(cur),col2rgb(old)) testthat::expect_equivalent(col2rgb(palette()),col2rgb(lava:::mypal(set=FALSE))) testthat::expect_equivalent(Col("red",0.5),rgb(1,0,0,0.5)) testthat::expect_equivalent(Col(c("red","blue"),0.5),rgb(c(1,0),c(0,0),c(0,1),0.5)) testthat::expect_equivalent(Col(c("red","blue"),c(0.2,0.5)),rgb(c(1,0),c(0,0),c(0,1),c(0.2,0.5))) testthat::expect_equivalent(Col(rgb(1,0,0),0.5),rgb(1,0,0,0.5)) plot(0,xlim=c(0,1),ylim=c(0,1),type="n",ann=FALSE,axes=FALSE) devc1 <- devcoords() par(mar=c(0,0,0,0)) plot(0,xlim=c(0,1),ylim=c(0,1),type="n",ann=FALSE,axes=FALSE) devc2 <- devcoords() figx <- c("fig.x1","fig.x2","fig.y1","fig.y2") devx <- c("dev.x1","dev.x2","dev.y1","dev.y2") testthat::expect_equivalent(devc1[figx],devc2[devx]) }) if (requireNamespace("visualTest",quietly=TRUE)) { gropen <- function(resolution=200,...) { tmpfile <- tempfile(fileext=".png") png(file=tmpfile,width=200,height=200) res <- dev.cur() return(structure(tmpfile,dev=res)) } grcompare <- function(file1,file2,...) { res <- visualTest::isSimilar(file1,file2,...) unlink(c(file1,file2)) return(res) } test_that("plotConf", { set.seed(1) x <- rnorm(50) y <- rnorm(50,x) z <- rbinom(50,1,0.5) d <- data.frame(y,z,x) l <- lm(y~x*z) d1 <- gropen() par(mar=c(0,0,0,0)) plotConf(l,var1="x",var2="z",col=c("black","blue"),alpha=0.5,legend=FALSE) dev.off() newd <- data.frame(x=seq(min(x),max(x),length.out=100)) l0 <- lm(y~x,subset(d,z==0)) ci0 <- predict(l0,newdata=newd,interval="confidence") l1 <- lm(y~x,subset(d,z==1)) ci1 <- predict(l1,newdata=newd,interval="confidence") d2 <- gropen() par(mar=c(0,0,0,0)) plot(y~x,col=c("black","blue")[z+1],pch=16,ylim=c(min(ci0,ci1,y),max(ci0,ci1,y))) lines(newd$x,ci0[,1],col="black",lwd=2) lines(newd$x,ci1[,1],col="blue",lwd=2) confband(newd$x,lower=ci0[,2],upper=ci0[,3],polygon=TRUE,col=Col("black",0.5),border=FALSE) confband(newd$x,lower=ci1[,2],upper=ci1[,3],polygon=TRUE,col=Col("blue",0.5),border=FALSE) points(y~x,col=c("black","blue")[z+1],pch=16) dev.off() testthat::expect_true(grcompare(d1,d2,threshold=5)) d1 <- gropen() par(mar=c(0,0,0,0)) l <- lm(y~z) plotConf(l,var2="z",var1=NULL,jitter=0,col="black",alpha=0.5,xlim=c(.5,2.5),ylim=range(y)) dev.off() d2 <- gropen() par(mar=c(0,0,0,0)) plot(y~I(z+1),ylim=range(y),xlim=c(0.5,2.5),pch=16,col=Col("black",0.5)) l0 <- lm(y~-1+factor(z)) confband(1:2,lower=confint(l0)[,1],upper=confint(l0)[,2],lwd=3, center=coef(l0)) dev.off() testthat::expect_true(grcompare(d1,d2,threshold=10)) }) test_that("forestplot", { set.seed(1) K <- 20 est <- rnorm(K); est[c(3:4,10:12)] <- NA se <- runif(K,0.2,0.4) x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) rownames(x)[which(is.na(est))] <- "" signif <- sign(x[,2])==sign(x[,3]) forestplot(x) ## TODO }) test_that("plot.sim", { onerun2 <- function(a,b,...) { return(cbind(a=a,b=b,c=a-1,d=a+1)) } R <- data.frame(a=1:2,b=3:4) val2 <- sim(onerun2,R=R,type=0) plot(val2) plot(val2,plot.type="single") density(val2) ## TODO }) test_that("spaghetti", { K <- 5 y <- "y"%++%seq(K) m <- lvm() regression(m,y=y,x=~u) <- 1 regression(m,y=y,x=~s) <- seq(K)-1 regression(m,y=y,x=~x) <- "b" d <- sim(m,5) dd <- mets::fast.reshape(d); dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4),trend=TRUE,trend.col="darkblue") ## TODO }) test_that("ksmooth", { ## TODO }) test_that("plot.lvm", { ## TODO m <- lvm(y~1*u[0:1],u~1*x) latent(m) <- ~u plot(m) d <- sim(m,20,seed=1) e <- estimate(m,d) plot(e) plot(lava:::beautify(m)) g <- igraph.lvm(m) testthat::expect_true(inherits(g,"igraph")) }) test_that("images", { ## TODO }) test_that("labels,edgelabels", { ## TODO }) test_that("colorbar", { ## TODO }) test_that("fplot", { ## TODO }) test_that("interactive", { ## TODO }) test_that("pdfconvert", { ## TODO }) test_that("plot.estimate", { ## TODO }) test_that("logo", { lava(seed=1) }) } lava/tests/testthat/test-graph.R0000644000176200001440000000107713520655354016415 0ustar liggesuserscontext("Inference") test_that("d-separation",{ m <- lvm(x5 ~ x4+x3, x4~x3+x1, x3~x2, x2~x1) testthat::expect_true(dsep(m,x5~x1|x3+x4)) testthat::expect_false(dsep(m,x5~x1|x2+x4)) testthat::expect_true(dsep(m,x5~x1|x2+x3+x4)) testthat::expect_false(dsep(m,~x1+x2+x3|x4)) testthat::expect_true(setequal(ancestors(m,~x5),setdiff(vars(m),"x5"))) testthat::expect_true(setequal(ancestors(m,~x1),NULL)) testthat::expect_true(setequal(descendants(m,~x5),NULL)) testthat::expect_true(setequal(descendants(m,~x1),setdiff(vars(m),"x1"))) }) lava/tests/testthat/test-inference.R0000644000176200001440000004144313520655354017253 0ustar liggesuserscontext("Inference") test_that("Effects",{ m <- lvm() regression(m) <- c(y1,y2,y3)~u; latent(m) <- ~u regression(m) <- c(z1,z2,z3)~v; latent(m) <- ~v regression(m) <- u~v regression(m) <- c(u,v,z3,y1)~x d <- sim(m,100,seed=1) start <- c(rep(0,6),rep(1,17)) suppressWarnings(e <- estimate(m,d,control=list(iter.max=0,start=start))) f <- summary(ef <- effects(e,y1~x))$coef testthat::expect_true(all(f[,2]>0)) ## Std.err testthat::expect_equal(f["Total",1],3) testthat::expect_equal(f["Direct",1],1) f2 <- summary(effects(e,u~v))$coef testthat::expect_equal(f2["Total",1],1) testthat::expect_equal(f2["Direct",1],1) testthat::expect_equal(f2["Indirect",1],0) testthat::expect_output(print(ef),"Mediation proportion") testthat::expect_equivalent(confint(ef)["Direct",], confint(e)["y1~x",]) testthat::expect_equivalent(totaleffects(e,y1~x)[,1:4],f["Total",]) ##g <- graph::updateGraph(plot(m,noplot=TRUE)) ##testthat::expect_equivalent(path(g,y1~x),path(m,y1~x)) }) test_that("Profile confidence limits", { m <- lvm(y~b*x) constrain(m,b~psi) <- identity set.seed(1) d <- sim(m,100,seed=1) e <- estimate(m, d) ci0 <- confint(e,3) ci <- confint(e,3,profile=TRUE) testthat::expect_true(mean((ci0-ci)^2)<0.1) }) test_that("IV-estimator", { m <- lvm(c(y1,y2,y3)~u); latent(m) <- ~u set.seed(1) d <- sim(m,100,seed=1) e0 <- estimate(m,d) e <- estimate(m,d,estimator="iv") ## := MLE testthat::expect_true(mean((coef(e)-coef(e0))^2)<1e-9) }) test_that("glm-estimator", { m <- lvm(y~x+z) regression(m) <- x~z distribution(m,~y+z) <- binomial.lvm("logit") set.seed(1) d <- sim(m,1e3,seed=1) head(d) e <- estimate(m,d,estimator="glm") c1 <- coef(e,2)[c("y","y~x","y~z"),1:2] c2 <- estimate(glm(y~x+z,d,family=binomial))$coefmat[,1:2] testthat::expect_equivalent(c1,c2) }) test_that("gaussian", { m <- lvm(y~x) d <- simulate(m,100,seed=1) S <- cov(d[,vars(m),drop=FALSE]) mu <- colMeans(d[,vars(m),drop=FALSE]) f <- function(p) lava:::gaussian_objective.lvm(p,x=m,S=S,mu=mu,n=nrow(d)) g <- function(p) lava:::gaussian_score.lvm(p,x=m,n=nrow(d),data=d,indiv=TRUE) s1 <- numDeriv::grad(f,c(0,1,1)) s2 <- g(c(0,1,1)) testthat::expect_equal(s1,-colSums(s2),tolerance=0.1) }) test_that("Association measures", { P <- matrix(c(0.25,0.25,0.25,0.25),2) a1 <- lava:::assoc(P) testthat::expect_equivalent(-log(0.25),a1$H) testthat::expect_true(with(a1, all(c(kappa,gamma,MI,U.sym)==0))) p <- lava:::prob.normal(sigma=diag(nrow=2),breaks=c(-Inf,0),breaks2=c(-Inf,0))[1] testthat::expect_equal(p[1],0.25) ## q <- qnorm(0.75) ## m <- ordinal(lvm(y~x),~y, K=3)#, breaks=c(-q,q)) ## normal.threshold(m,p=c(0,1,2)) }) test_that("equivalence", { m <- lvm(c(y1,y2,y3)~u,u~x,y1~x) latent(m) <- ~u d <- sim(m,100,seed=1) cancel(m) <- y1~x regression(m) <- y2~x e <- estimate(m,d) ##eq <- equivalence(e,y1~x,k=1) dm <- capture.output(eq <- equivalence(e,y2~x,k=1)) testthat::expect_output(print(eq),paste0("y1",lava.options()$symbol[2],"y3")) testthat::expect_true(all(c("y1","y3")%in%eq$equiv[[1]][1,])) }) test_that("multiple testing", { testthat::expect_equivalent(lava:::holm(c(0.05/3,0.025,0.05)),rep(0.05,3)) ci1 <- scheffe(l <- lm(1:5~c(0.5,0.7,1,1.3,1.5))) ci2 <- predict(l,interval="confidence") testthat::expect_equivalent(ci1[,1],ci2[,1]) testthat::expect_true(all(ci1[,2]ci2[,3])) }) test_that("modelsearch and GoF", { m <- lvm(c(y1,y2)~x) d <- sim(m,100,seed=1) e <- estimate(lvm(c(y1,y2)~1,y1~x),d) e0 <- estimate(lvm(c(y1,y2)~x,y1~~y2),d) s1 <- modelsearch(e,silent=TRUE,type="correlation") testthat::expect_true(nrow(s1$res)==2) s1b <- modelsearch(e,silent=TRUE,type="regression") testthat::expect_true(nrow(s1b$res)==4) s2 <- modelsearch(e0,silent=TRUE,dir="backward") testthat::expect_true(nrow(s2$res)==3) e00 <- estimate(e0,vcov=vcov(e0))$coefmat ii <- match(s2$res[,"Index"],rownames(e00)) testthat::expect_equivalent(e00[ii,5],s2$test[,2]) s3 <- modelsearch(e0,dir="backward",k=3) testthat::expect_true(nrow(s3$res)==1) ee <- modelsearch(e0,dir="backstep",messages=FALSE) testthat::expect_true(inherits(ee,"lvm")) ## TODO gof(e,all=TRUE) r <- rsq(e)[[1]] testthat::expect_true(abs(summary(lm(y1~x,d))$r.square-r["y1"])<1e-5) }) test_that("Bootstrap", { y <- rep(c(0,1),each=5) x <- 1:10 e <- estimate(y~x,lvm=TRUE) B1 <- bootstrap(e,R=2,silent=TRUE,mc.cores=1,sd=TRUE) B2 <- bootstrap(e,R=2,silent=TRUE,bollenstine=TRUE,mc.cores=1) testthat::expect_false(B1$bollenstine) testthat::expect_true(B2$bollenstine) testthat::expect_true(nrow(B1$coef)==2) testthat::expect_output(print(B1),"Standard errors:") dm <- capture.output(B3 <- bootstrap(e,R=2,fun=function(x) coef(x)[2]^2+10)) testthat::expect_true(all(mean(B3$coef)>10)) y <- rep(c(0,1),each=5) x <- 1:10 m <- lvm(y~b*x) constrain(m,alpha~b) <- function(x) x^2 e <- estimate(m,data.frame(y=y,x=x)) b <- bootstrap(e,R=1,silent=TRUE) testthat::expect_output(print(b),"alpha") }) test_that("Survreg", { m <- lvm(y0~x) transform(m,y~y0) <- function(x) pmin(x[,1],2) transform(m,status~y0) <- function(x) x<2 d <- simulate(m,100,seed=1) require('survival') m <- survreg(Surv(y,status)~x,data=d,dist='gaussian') s <- score(m) testthat::expect_true(length(pars(m))==length(coef(m))+1) testthat::expect_true(abs(attr(score(m,pars(m)),'logLik')-logLik(m))<1e-9) testthat::expect_true(mean(colSums(s)^2)<1e-6) testthat::expect_equivalent(vcov(m),attr(s,'bread')) }) test_that("Combine", { ## Combine model output data(serotonin) m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) m2 <- lm(cau ~ age + gene1,data=serotonin) cc <- Combine(list('model A'=m1,'model B'=m2),fun=function(x) c(R2=format(summary(x)$r.squared,digits=2))) testthat::expect_true(nrow(cc)==length(coef(m1))+1) testthat::expect_equivalent(colnames(cc),c('model A','model B')) testthat::expect_equivalent(cc['R2',2],format(summary(m2)$r.squared,digits=2)) }) test_that("zero-inflated binomial regression (zib)", { set.seed(1) n <- 1e3 x <- runif(n,0,20) age <- runif(n,10,30) z0 <- rnorm(n,mean=-1+0.05*age) z <- cut(z0,breaks=c(-Inf,-1,0,1,Inf)) p0 <- lava::expit(model.matrix(~z+age) %*% c(-.4, -.4, 0.2, 2, -0.05)) y <- (runif(n)0 df <- function(x) 2*x*log(x) + x df2 <- function(x) 2*log(x) + 3 op <- NR(5,f,df,df2,control=list(tol=1e-40)) ## Find root testthat::expect_equivalent(round(op$par,digits=7),.6065307) op2 <- estfun0(5,gradient=df) op3 <- estfun(5,gradient=df,hessian=df2,control=list(tol=1e-40)) testthat::expect_equivalent(op$par,op2$par) testthat::expect_equivalent(op$par,op3$par) }) if (requireNamespace("nlme",quietly = TRUE)) test_that("Prediction with missing data, random intercept", { ## Random intercept model m <- lvm(c(y1,y2,y3)~u[0]) latent(m) <- ~u regression(m) <- y1~x1 regression(m) <- y2~x2 regression(m) <- y3~x3 d <- simulate(m,1e3,seed=1) dd <- reshape(d,varying=list(c('y1','y2','y3'),c('x1','x2','x3')),direction='long',v.names=c('y','x')) ##system.time(l <- lme4::lmer(y~x+(1|id), data=dd, REML=FALSE)) system.time(l <- nlme::lme(y~x,random=~1|id, data=dd, method="ML")) m0 <- lvm(c(y1[m:v],y2[m:v],y3[m:v])~1*u[0]) latent(m0) <- ~u regression(m0,y=c('y1','y2','y3'),x=c('x1','x2','x3')) <- rep('b',3) system.time(e <- estimate(m0,d)) mytol <- 1e-6 mse <- function(x,y=0) mean(na.omit(as.matrix(x)-as.matrix(y))^2) testthat::expect_true(mse(logLik(e),logLik(l))500)]) d0 <- transform(datasets::CO2,conc500=conc>500) t1 <- by(d0[,"uptake"],d0[,c("Treatment","Type","conc500")],mean) t2 <- By(datasets::CO2,~Treatment+Type+I(conc>500),colMeans,~uptake) testthat::expect_true(inherits(t2,"array")) testthat::expect_equivalent(sort(t2),sort(t1)) }) test_that("Expand", { dd <- Expand(iris, Sepal.Length=2:8, Species=c("virginica","setosa")) testthat::expect_identical(levels(iris$Species),levels(dd$Species)) testthat::expect_true(nrow(dd)==14) d0 <- datasets::warpbreaks[,c("wool","tension")] T <- table(d0) d1 <- Expand(T) testthat::expect_identical(dim(d0),dim(d1)) testthat::expect_identical(table(d1),T) testthat::expect_identical(expand.grid(1:2,1:2),Expand(1:2,1:2)) testthat::expect_identical(expand.grid(a=1:2,b=1:2),Expand(a=1:2,b=1:2)) }) test_that("formulas", { f <- toformula(c('y1','y2'),'x'%++%1:5) ff <- getoutcome(f) testthat::expect_equivalent(trim(ff,all=TRUE),"c(y1,y2)") testthat::expect_true(length(attr(ff,'x'))==5) }) test_that("trim", { testthat::expect_true(length(grep(" ",trim(" test ")))==0) testthat::expect_true(length(gregexpr(" ",trim(" t e s t "))[[1]])==3) testthat::expect_true(length(grep(" ",trim(" t e s t ",all=TRUE)))==0) }) test_that("Matrix operations:", { ## vec operator testthat::expect_equivalent(vec(diag(3)),c(1,0,0,0,1,0,0,0,1)) testthat::expect_true(nrow(vec(diag(3),matrix=TRUE))==9) ## commutaion matrix A <- matrix(1:16 ,ncol=4) K <- commutation(A) testthat::expect_equivalent(K%*%as.vector(A),vec(t(A),matrix=TRUE)) ## Block diagonal A <- diag(3)+1 B <- blockdiag(A,A,A,pad=NA) testthat::expect_equivalent(dim(B),c(9,9)) testthat::expect_true(sum(is.na(B))==81-27) }) test_that("wrapvev", { testthat::expect_equivalent(wrapvec(5,2),c(3,4,5,1,2)) testthat::expect_equivalent(wrapvec(seq(1:5),-1),c(5,1,2,3,4)) }) test_that("matrix functions", { A <- revdiag(1:3) testthat::expect_equivalent(A,matrix(c(0,0,1,0,2,0,3,0,0),3)) testthat::expect_equivalent(1:3,revdiag(A)) revdiag(A) <- 4 testthat::expect_equivalent(rep(4,3),revdiag(A)) diag(A) <- 0 offdiag(A) <- 5 testthat::expect_true(sum(offdiag(A))==6*5) A <- matrix(0,3,3) offdiag(A,type=3) <- 1:6 B <- crossprod(A) testthat::expect_equivalent(solve(A),Inverse(A)) testthat::expect_equivalent(det(B),attr(Inverse(B,chol=TRUE),"det")) }) test_that("All the rest", { testthat::expect_false(lava:::versioncheck(NULL)) testthat::expect_true(lava:::versioncheck("lava",c(1,4,1))) op <- lava.options(debug=TRUE) testthat::expect_true(lava.options()$debug) lava.options(op) A <- diag(2); colnames(A) <- c("a","b") testthat::expect_output(printmany(A,A,2,rownames=c("A","B"),bothrows=FALSE),"a b") testthat::expect_output(printmany(A,A[1,,drop=FALSE],2,rownames=c("A","B"),bothrows=FALSE),"a b") testthat::expect_output(printmany(A,A,2,rownames=c("A","B"),name1="no.1",name2="no.2", bothrows=TRUE),"no.1") ##printmany(A,A,2,name1="no.1",name2="no.2",bothrows=T) }) lava/NEWS0000644000176200001440000004133313520655354011710 0ustar liggesusers# -*- mode: org -*- * Version 1.6.6 <2019-08-01 Thu> - Weighted kmeans++ (wkm). Gaussian mixture models (mvnmix) are now initialized by default using kmeans++. - sim method implemented for mvnmix models. - Bug fix: Newton-Raphson method (lava::NR) used a numerical approximation of the Hessian even when submitted as attribute to the objective function. * Version 1.6.5 <2019-02-07 Thu> - Maintenance release. * Version 1.6.4 <2018-11-18 Mon> - New simulation distributions: constant relative risk and risk difference models as in Richardson, Robins and Wang, 2017): binomial.rd, binomial.rr. Base on new hook 'simulate.multiple.inputs' which allows the distribution to depend non-linearly on multiple different input variables. - sim.lvm: 'X' argument can now fix (manipulate) any variable and not only exogenous variables. - Summary function for sim.default updated (the 'estimate' argument can now be a list with each element being the estimate position and optionally standard error and true value). - Starting values updated for mixture models. The parameter names can be obtained with mixture(...,names=TRUE) and set with mixture(...,control=list(start=...))). - Naming conventions for multigroup parameters: 'par@g' (par: name of parameter, g: first group number where 'par' is observed). Starting values can be specified with estimate(...,control(list(start=...))). - New print and summary methods for mixture models. - Renamed (weighted) K-means function 'km' to 'wkm'. - Derivative method deriv.function based on complex step derivatives. - twostageCV: estimation of mixture models are now parallelized if mc.cores>1. * Version 1.6.3 <2018-08-03 Fri> - Fixed problems with plots (Rgraphviz) - Better print method for twostageCV - Improved M-step in mixture.method * Version 1.6.2 <2018-07-02 Mon> - twostageCV: cross-validate two-stage estimator - rmvn, dmvn moved to mets package (C++ implementation, old versions renamed to lava::rmvn0, lava::dmvn0) - mediation proportion handled correctly when direct effect is zero - unit tests clean-up (namespace) - merge.lvm now correctly handles fixed covariance parameters * Version 1.6.1 <2018-03-28 Wed> - Newton-raphson algorithm made more robust. - New sim.as method. plot.sim method now by default only plots density estimates - Compatibility fix with Matrix library * Version 1.6.0 <2018-01-11 Thu> - Mixture Latent variable models (mixture). Fast version requires 'mets' packages; Gaussian mixture models (mvnmix); weighted k-means (km) - estimate.default: 'keep', 'use' arguments can be specified as regular expressions (with argument regex=TRUE). Summary method now returns Wald test (null: all parameters being zero). - makemissing: seed argument added. - Global change: 'silent' argument renamed to 'messages' - New utility functions: Grep, Na2x, x2NA, wait, waitclick, rotation, Rot2d, Rot3d - Condition numbers calculated via SVD - na.pass0: returns data.frame with original number of rows but with zeros (or first level of factors) in the rows with missing data. - stack: 'weights' argument renamed to 'propensity'. If propensity=TRUE, the first argument (model) will be treated as propensity score model (glm) and 'predict' method will be used for the predictions. - estimate.formula now by default wraps glm such that 'iid' method return matrix of same size as full data (with zero rows where data are missing). - Updated output functions for class 'sim' (print method and plot).. Plot method: density.alpha is applied to each standard error ('se') level. - composite likelihood (complik) refactored + new example. 'ordinal' method now cleans up properly when variables are removed (rmvar, subset). - twostage: fixed for mixture model (class 'lvm.mixture'). New help page + examples. Predict function updated (newdata argument where covariate levels can be specified). * Version 1.5.1 <2017-09-25 Mon> - conformal predictions: confpred - warnings (char2num used instead of coersion via as.numeric) - %++% for function compositon - New summary.effects methods with mediation proportion in the output - New hook: remove.hooks (see example ordinal.lvm) - constrain methods now handled more robustly in sim.lvm allowing both vectorized and non-vectorized functions - Non-linear associations can now be specified with 'nonlinear' method. Estimation via the 'twostage' function. - Robust standard errors added to the IV estimator (2SLS) - New cross-validation function: cv (and csplit function for creating random sets). * Version 1.5.0 <2017-03-16 Thu> - lava.tobit is longer required for ordinal and censored responses. Default is now to use the implementation in the 'mets' package. - Composite likelihood method (complik) updated - weight argument renamed to weights in agreement with lm, glm, coxph, ... - sim.default: new argument 'arg' passed on to simulation function - sim.default: new argument 'iter'. If TRUE the iteration number is passed to function call as first argument (default FALSE) - estimate.default: Wildcards/global expressions can now be used for specifying contrasts based on the syntax of the functions 'contr', 'parsedesign'. See examples on the help-page. The argument transform.ci has been renamed to back.transform. - correlation methods for matrices and data.frames (either pairwise or full MLE). All methods can now return the influence functions. - revdiag: dimnames are kept - Combine: output updated - forestplot: point estimates shown by default - backdoor now works without conditioning set (yields all possible conditioning sets) - New formula syntax: y+x~v+z same as c(y,x)~v+z - spaghetti: trend.formula can now contain a factor statement on the rhs * Version 1.4.7 <2017-01-26 Wed> - Maintenance release - models can now be specified as y1+y2~x1+x2 instead of c(y1,2y)~x1+x2 - sim method now has a seed argument * Version 1.4.6 <2016-12-14 Wed> - New backtrace algorithms for Newton-Raphson optimization routine. - 'diagtest' updated. * Version 1.4.5 <2016-10-25 Tue> - New graph functions: dsep: check for d-separation (conditional independence). backdoor: check backdoor criterion of a graph (lvm-object). adjMat: return adjaceny matrix. edgeList: return edge list. ancestors: return ancenstors of nodes. descendants: return descendants of nodes. - All simple paths in a graph can now be extracted with: path(...,all=TRUE) - Covariance parameters are now reference with ~~ instead of ,. Applies to setting starting values in 'estimate', parameters in 'sim','compare','estimate',... To use the old syntax set 'lava.options(symbol=c("~",","))' - 'layout' argument added to lava.options (default 'dot') - visNetwork support, new 'plot.engine' argument added to plot methods. - bootstrap.lvmfit now default returns original estimates. - print, transform methods updated (transform output). - '+' operator overloaded for lvm and estimate objects (merge). - New composite likelihood function: complik. - New functions for simple association measures: riskcomp, rdiff, rratio,... - New argument 'latent' in simulate method. If FALSE the latent variables are dropped from the returned data.frame. - modelsearch by default now shows both directional or undirectional associations (type='all' vs type='cor'). - sim.default now stores timings. New print functions (data.table like output). - lvm model can now be updated with the 'sim' function, for instance setting parameter values for the simulation only once: m <- sim(m,p=p,...), with faster subsequent calls sim(m,n=n). - estimate.default can now simulate p-values ('R' argument). Returns an object which can also be used as input for 'estimate'. - Bug fixes: NR optimization with back-tracing; fixed matrices.lvm when called without variance parameters; fixed a bug in r-square computations. - Contrast matrix can be specified with the function 'contr'. * Version 1.4.4 <2016-08-13 Sat> - estimate.default will now use the id-variable of an 'estimate' object if the 'id' argument is left unspecified. For multinomial,gkgamma,kappa additional arguments (...) are now parsed on the 'estimate.default' (including id). - Updated print/summary methods for 'estimate.default'. Sample/cluster-size added to output. - Code clean-up and optimization. Smarter calculations of kronecker products, and some regular expressions updated. - New function 'predictlvm' which return jacobian. - Intercepts can now be specified via parantheses, e.g., y ~ (-2) + x - 'getoutcome' with sep argument for splitting '|' statements in formulas. - Partial gamma, gkgamma, updated (probability interpretation, homogeneity tests removed) - 'moments' function now returns conditional mean with multiple rows. Side effect fixed across multiple functions - twostage function with support for mixture models - Beta (Beta.lvm) and Finite Gaussian (GM2.lvm,GM3.lvm) Mixtures added. - 'sim': parameters can now be specified as part of '...' - summary.sim: calculate Wald CI if confint=TRUE, otherwise use the user supplied confidence limits. - Clopper-pearson intervals and exact binomial tests added to 'diagtest'. - Interval censoring with 'normal' estimator, which now also works with 'binary' definitions. - default plot style updated. * Version 1.4.3 <2016-04-11 Mon> - partial gamma coefficients (gkgamma) - Unit tests works with new testthat version - Avoid trying to fork new processes on windows (bootstrap,sim.default) * Version 1.4.2 <2016-04-05 Wed> - Code optimization and minor bug fixes - Travis-CI, unit-tests - glm estimator update (censored regression) - polychoric correlations (pcor) - New utility functions: wrapvec, offdiag - simulation: regression design on parameters (see weibull + variance hetereogeneity example in help('sim')) - Byte compile by default * Version 1.4.1 <2015-06-13 Sat> - New plot.estimate method - Documentation and examples updated * Version 1.4.0 <2015-02-15 Sun> - Linear measurement error model: 'measurement.error' - Diagnostic tests: 'diagtest' - 'plotConf' updated with support for special function terms (I, poly, ns, ...). Old version is available (not in namespace) as lava:::plotConf0 - Pareto distribution: 'pareto.lvm' - Code clean-up/optimization: 'EventTime', 'stack' - 'estimate.default' new syntax for contrast specification (parsedesign) - 'regression.lvm' with y,x argument (as alias for to,from) - plot longitudinal data: 'spaghetti' - Examples updated * Version 1.3.0 <2014-11-18 Tue> - New syntax for categorical predictors (method 'categorical' and argument 'additive=FALSE' with 'regression method) - Argument 'intervals' added to 'ones.lvm' for piece-wise constant effects - Argument 'average=TRUE' now needed for empirical averages in estimate.default - Fixed a bug in score.glm (with weights and offset) introduced in version 1.2.6 - estimate.default: - small-sample corrections - Default id from row names in estimate.default (used with merge method) - iid decompostion also returned for hypothesis contrasts - keep argument added to estimate.default and merge - labels argument added to estimate.default - 'images' function for visualization of tabular data added to namespace - 'ksmooth' and 'surface' for surface estimation and visualization of bivariate data and functions - 'dsort': Sort data.frames - general multivariate distributions in simulations. see example in 'sim' - 'or2prob', 'tetrachoric' for conversion from OR to probabilities (and tetrachoric correlations). 'prob.normal': calculates probabilities from threshold model given thresholds and variance See also mets:::assoc for calculations of kappa, gamma, uncer.coef. 'normal.threshold': returns thresholds,variance,mu from model with categorical outcomes. - Multiple testing routines: closed.testing, p.correct, ... - 'Missing' method updated with a simple 'suffix' argument - Back-tracing updated in Newton-Raphson routine * Version 1.2.6 <2014-05-07 Wed> - New 'stack' function for two-stage estimation (via 'estimate' objects) - New 'blocksample' function for resampling clustered data. - New function 'Missing' to generate complex missing data patterns - Weibull parametrization of 'coxWeibull.lvm' rolled back (ver. 1.2.4). The function 'weibull.lvm' now leads to Accelerated Failure Time model (see examples of 'eventTime') - iid function cleanup (new 'bread' attribute). iid.glm now gives correct estimated influence functions for 'quasi' link (constant variance) - Parameter constraints on (co)variance parameters now possible with the syntax lvm(...,y~~a*x) (corresponding to covariance(...,y~x)<-"a") - Some additional utilities: pdfconvert, scheffe, images, click. confband updated with 'polygon' argument. - New function getMplus: Import results from Mplus - New function getSAS: Import SAS ODS - New 'edgecolor' argument of plot-function * Version 1.2.5 <2014-03-13 Thu> - 'merge' method added for combining 'estimate' objects - Adjustments to starting values - Function 'categorical' for adding categorical predictors to simulation model - Improved flexibility in simulations with 'transform','constrain' (ex: categorical predictors) - Added 'dataid' argument to estimate.default allowing different id for 'data' and i.i.d. decomposition of model parameter estimates. With the argument 'stack=FALSE' influence functions within clusters will not be stacked together. - R-squared values (+ approximate standard errors/i.i.d. decomposition) via 'rsq(model,TRUE)' - New infrastructure for adding additional parameters to models (no user-visible changes). - multinomial function for calculating influence curves for multinomial probabilities. 'gammagk' and 'kappa' methods for calculating Goodman-Kruskals gamma and Cohens kappa coefficients. - ordreg function for univariate ordinal regression models - iid methods for data.frames/matrices (empirical mean and variance) - Support for using 'mets::cluster.index' in GEE-type models (much faster). - plotConf updated (vcov argument added and more graphical arguments parsed to plotting functions) - Additional unit-tests implemented - New 'forestplot' and 'Combine' functions - Covariance structure may now be specified using '~~', e.g. 'lvm(c(y,v)~~z+u)' specifies correlation between residuals of (y,z),(y,u),(v,z),(v,u). * Version 1.2.4 <2013-12-01 Sun> - Avoid estimating IC in 'estimate.default' when 'vcov' argument is given. - New default starting values - Time-varying effects via 'timedep' - R-squared added to summary - alias: covariance->variance - added size argument to binomial.lvm; * Version 1.2.3 <2013-10-27 Sun> - 'subset' argument added to estimate.default. Calculates empirical averages conditional on subsets of data - Improved output from compare/estimate functions - Minor bug fixes (plot, predict) - sim: Piecewise constant rates with coxEponential.lvm. New aalenExponential.lvm function for additive models. Functions ones.lvm and sequence.lvm for deterministic variables. * Version 1.2.2 <2013-07-10 Wed> - Regression parameters are now by default referenced using '~', e.g. "y~x" instead of "y<-x". Applies to setting starting values in 'estimate', parameters in 'sim','compare','estimate',.... To use the old syntax set 'lava.options(symbol=c("<-","<->"))' - Newton-Raphson/scoring procedure updated - Search-interval for profile likelihood CI improved (for variance parameters) - 'estimate.default' updated (LRT) - 'iid' updated (variance now obtained as tensor product of the result) - progress bar for 'bootstrap' and 'modelsearch' - various minor bug fixes - new functions: Expand (expand.grid wrapper), By (by wrapper) * Version 1.2.1 <2013-05-10 Fri> - Optimization + minor bug fixes * Version 1.2.0 <2013-03-28 Thu> - New method 'iid' for extracting i.i.d. decomposition (influence functions) from model objects (e.g. glm, lvm, ...) - Method 'estimate' can now be used on model objects to transform parameters (Delta method) or conduct Wald tests. Average effects, i.e. averaging functionals over the empirical distribution is also possible including calculation of standard errors. - 'curereg' function for estimating mixtures of binary data. - Instrumental Variable (IV) estimator (two-stage least-squares) optimized. - New distributions: Gamma.lvm, coxWeibull.lvm, coxExponential.lvm, coxGompertz.lvm. New method 'eventTime' (for simulation of competing risks data) lava/R/0000755000176200001440000000000013520655354011406 5ustar liggesuserslava/R/residuals.R0000644000176200001440000000224013520655354013522 0ustar liggesusersIsqrt <- function(X) { eX <- eigen(X); with(eX, vectors %*% diag(1/sqrt(values),nrow=length(values)) %*% t(vectors)) } ##' @export residuals.multigroupfit <- function(object,data=model.frame(object),p=coef(object), k, ...) { pp <- modelPar(object,p,...) if (!missing(k)) return(residuals(object$model$lvm[[k]],data=data[[k]],p=pp$p[[k]],...)) res <- c() for (i in seq(length(pp$p))) { res <- c(res, list(residuals(object$model$lvm[[i]],data=data[[i]],p=pp$p[[i]],...))) } return(res) } ##' @export residuals.lvmfit <- function(object,data=model.frame(object),p=coef(object),...) { residuals(Model(object), data=data, p=p, ...) } ##' @export residuals.lvm <- function(object,data=model.frame(object),std=FALSE,p=coef(object),...) { Y <- setdiff(manifest(object), exogenous(object)) Pr <- predict(object,p=p,data=data) PrY <- Pr[,Y,drop=FALSE] ## y <- endogenous(object)[match(endogenous(object),manifest(object))] r <- as.matrix(data[,Y,drop=FALSE]-(PrY)) res <- r if (std) { S <- attributes(Pr)$cond.var; if (length(Y)>1) { res <- r%*%Isqrt(S) } else res <- 1/sqrt(S[1,1])*r } colnames(res) <- colnames(r) res } lava/R/iid.R0000644000176200001440000001004013520655354012271 0ustar liggesusers##' Extract i.i.d. decomposition (influence function) from model object ##' ##' Extract i.i.d. decomposition (influence function) from model object ##' @export ##' @usage ##' ##' iid(x,...) ##' ##' \method{iid}{default}(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) ##' ##' @aliases iid.default ##' @param x model object ##' @param id (optional) id/cluster variable ##' @param bread (optional) Inverse of derivative of mean score function ##' @param folds (optional) Calculate aggregated iid decomposition (0:=disabled) ##' @param maxsize (optional) Data is split in groups of size up to 'maxsize' (0:=disabled) ##' @param ... additional arguments ##' @examples ##' m <- lvm(y~x+z) ##' distribution(m, ~y+z) <- binomial.lvm("logit") ##' d <- sim(m,1e3) ##' g <- glm(y~x+z,data=d,family=binomial) ##' crossprod(iid(g)) ##' iid <- function(x,...) UseMethod("iid") ##' @export iid.default <- function(x,bread,id=NULL,folds=0,maxsize=(folds>0)*1e6,...) { if (!any(paste("score",class(x),sep=".") %in% methods("score"))) { warning("Not available for this class") return(NULL) } if (folds>0 || maxsize>0 || (!missing(id) && lava.options()$cluster.index)) { if (!requireNamespace("mets",quietly=TRUE)) stop("Requires 'mets'") } if (folds>0) { U <- Reduce("rbind",mets::divide.conquer(function(data) score(x,data=data,...), id=id, data=data,size=round(nrow(data)/folds))) } else { U <- score(x,indiv=TRUE,...) } pp <- pars(x) if (!missing(bread) && is.null(bread)) { bread <- suppressWarnings(vcov(x)) } if (missing(bread)) bread <- attributes(U)$bread if (is.null(bread)) { bread <- attributes(x)$bread if (is.null(bread)) bread <- x$bread if (is.null(bread)) { if (maxsize>0) { ff <- function(p) colSums(Reduce("rbind",mets::divide.conquer(function(data) score(x,data=data,p=p,...), data=data,size=maxsize))) I <- -numDeriv::jacobian(ff,pp,method=lava.options()$Dmethod) } else { I <- -numDeriv::jacobian(function(p) score(x,p=p,indiv=FALSE,...),pp,method=lava.options()$Dmethod) } bread <- Inverse(I) } } iid0 <- U%*%bread if (!missing(id)) { N <- nrow(iid0) if (!lava.options()$cluster.index) { iid0 <- matrix(unlist(by(iid0,id,colSums)),byrow=TRUE,ncol=ncol(bread)) } else { iid0 <- mets::cluster.index(id,mat=iid0,return.all=FALSE) } attributes(iid0)$N <- N } colnames(iid0) <- colnames(U) return(structure(iid0,bread=bread)) } ##' @export iid.multigroupfit <- function(x,...) iid.default(x,combine=TRUE,...) ##' @export iid.matrix <- function(x,...) { p <- NCOL(x) n <- NROW(x) mu <- colMeans(x,na.rm=TRUE); S <- var(x,use="pairwise.complete.obs")*(n-1)/n iid1 <- t(t(x)-mu) iid2 <- matrix(ncol=(p+1)*p/2,nrow=n) pos <- 0 nn <- c() cc <- mu for (i in seq(p)) for (j in seq(i,p)) { pos <- pos+1 cc <- c(cc,S[i,j]) iid2[,pos] <- (iid1[,i]*iid1[,j])-cc[length(cc)] nn <- c(nn,paste(colnames(x)[c(i,j)],collapse=lava.options()$symbols[2])) } colnames(iid1) <- colnames(x); colnames(iid2) <- nn names(cc) <- c(colnames(iid1),colnames(iid2)) iid1[is.na(iid1)] <- 0 iid2[is.na(iid2)] <- 0 structure(cbind(iid1/n,iid2/n), coef=cc, mean=mu, var=S) } ##' @export iid.numeric <- function(x,...) { n <- length(x) mu <- mean(x); S <- var(x)*(n-1)/n iid1 <- t(t(x)-mu) structure(cbind(mean=iid1/n,var=(iid1^2-S)/n),coef=c(mean=mu,var=S),mean=mu,var=S) } ##' @export iid.data.frame <- function(x,...) { if (!all(apply(x[1,,drop=FALSE],2,function(x) inherits(x,c("numeric","integer"))))) stop("Don't know how to handle data.frames of this type") iid(as.matrix(x)) } lava/R/revdiag.R0000644000176200001440000000410313520655354013150 0ustar liggesusers##' Create/extract 'reverse'-diagonal matrix or off-diagonal elements ##' @title Create/extract 'reverse'-diagonal matrix or off-diagonal elements ##' @aliases revdiag revdiag<- offdiag offdiag<- ##' @usage ##' revdiag(x,...) ##' offdiag(x,type=0,...) ##' ##' revdiag(x,...) <- value ##' offdiag(x,type=0,...) <- value ##' @param x vector ##' @param value For the assignment function the values to put in the diagonal ##' @param type 0: upper and lower triangular, 1: upper triangular, 2: lower triangular, 3: upper triangular + diagonal, 4: lower triangular + diagonal ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export revdiag <- function(x,...) { if (NCOL(x)==1) { res <- matrix(0,length(x),length(x)) revdiag(res) <- x return(res) } n <- max(ncol(x),nrow(x)) x[cbind(rev(seq(n)),seq(n))] } ##' @export "revdiag<-" <- function(x,...,value) { n <- max(ncol(x),nrow(x)) x[cbind(rev(seq(n)),seq(n))] <- value x } ##' @export offdiag <- function(x,type=0,...) { ##if (NCOL(x)==1) return(NULL) if (type%in%c(1,3)) { ii <- which(upper.tri(x,diag=(type==3))) } else if (type%in%c(2,4)) { ii <- which(lower.tri(x,diag=(type==4))) } else { ii <- c(which(lower.tri(x,diag=FALSE)),which(upper.tri(x,diag=FALSE))) } res <- x[ii] class(res) <- c("offdiag",class(res)) attributes(res) <- c(attributes(res),list(type=type,dimension=dim(x),index=ii,nam=dimnames(x))) return(res) } ##' @export "offdiag<-" <- function(x,type=0,...,value) { if (type%in%c(1,3)) { ii <- which(upper.tri(x,diag=(type==3))) } else if (type%in%c(2,4)) { ii <- which(lower.tri(x,diag=(type==4))) } else { ii <- c(which(lower.tri(x,diag=FALSE)),which(upper.tri(x,diag=FALSE))) } x[ii] <- value return(x) } ##' @export print.offdiag <- function(x,...) { ## type <- attr(x,"type") nn <- attr(x,"dimension") M <- matrix(NA,nn[1],nn[2]) M[attr(x,"index")] <- x dimnames(M) <- attr(x,"nam") print(M,na.print="",...) } lava/R/iv.R0000644000176200001440000002471613520655354012161 0ustar liggesusers###{{{ Objective IV_method.lvm <- NULL IV_objective.lvm <- function(x,p,data,...) { IV2(x,data,...) } IV_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } IV0_method.lvm <- NULL IV0_objective.lvm <- function(x,p,data,...) { IV2(x,data,type="non-robust",...) } IV0_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } IV1_method.lvm <- NULL IV1_objective.lvm <- function(x,p,data,...) { IV(x,data) } IV1_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } ###}}} Objective CondVar <- function(S,idx) { idx2 <- setdiff(seq_len(ncol(S)),idx) S11 <- S[idx2,idx2]; S22 <- S[idx,idx] S12 <- S[idx2,idx] S11-S12%*%solve(S22)%*%t(S12) } varest <- function(x,data) { p <- IV(x,data)$estimate idx <- match(names(p),coef(x,mean=TRUE)) x0 <- parfix(Model(x),idx,p) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) A <- t(index(x)$A) Afix <- A; Afix[t(index(x)$M0)==1] <- 0 A[A!=0] <- 1 k <- nrow(A) I <- diag(nrow=k) Ap <- modelVar(x)$A ## Estimated parameter matrix indicators <- setdiff(vars(x)[rowSums(A)==1],exogenous(x)) responses <- endogenous(x,top=TRUE) y.indicators <- responses[rowSums(A[responses,])==1] Sigma <- var(data[,manifest(x)]) var.eta <- c() for (eta in latent(x)) { reachable <- acc(x$M,eta) ys <- intersect(names(reachable),y.indicators) lambdas <- c() for (y in ys) { pp <- path(Model(x), from=eta, to=y) lambda1 <- 0 for (i in seq_along(pp)) { lambda <- 1 for (j in seq_len(length(pp[[i]])-1)) lambda <- lambda*Ap[pp[[i]][j],pp[[i]][j+1]] lambda1 <- lambda1+lambda } lambdas <- c(lambdas,lambda1) } val <- outer(1/lambdas,1/lambdas)*Sigma[ys,ys] var.eta <- c(var.eta, mean(val[upper.tri(val)])) } S <- rep(0,k); S[match(manifest(x),vars(x))] <- diag(Sigma); S[match(latent(x),vars(x))] <- var.eta; names(S) <- vars(x) I <- diag(nrow=k) IA <- (I-t(Ap)) IA%*%cbind(S)%*%t(IA) } ## Instrumental Variable Estimator / 2SLS ##' @export IV <- function(m,data,R2thres=0,type="robust", ...) { if (length(constrain(m))>0) stop("Nonlinear constrains not supported!") if (inherits(m,"lvmfit")) { m <- Model(m) } R2 <- cor(data[,manifest(m)])^2 A <- t(index(m)$A) Afix <- A; Afix[t(index(m)$M0)==1] <- 0 A[A!=0] <- 1 P <- index(m)$P k <- nrow(A) I <- diag(nrow=k) B <- rbind(I,solve(I-A)) VV <- B%*%P%*%t(B) u.var <- index(m)$vars lat.idx <- with(index(m), which(vars%in%latent)) if (length(lat.idx)==0) stop("Estimator only defined for models with latent variable") y.var <- endogenous(m) y.idx <- which(index(m)$vars%in%y.var) x.idx <- which(vars(m)%in%exogenous(m)) ## Set of Indicator variables: indicators <- c() for (i in seq_len(nrow(A))) { ifix <- (Afix[i,]==1) if ((sum(ifix)==1) & all(A[i,-which(ifix)]==0)) indicators <- c(indicators, i) } y.indicators <- intersect(indicators,y.idx) y.scale <- list() for (eta in lat.idx) { pred.eta <- intersect(y.idx, which(Afix[,eta]==1)) ## Candidates for ## eta = y-epsilon if (length(pred.eta)<1) pred.eta <- intersect(lat.idx, which(Afix[,eta]==1)) myidx <- c() for (y in pred.eta) { y.pred <- setdiff(eta,which(A[y,]==1)) ## No other variables predicting y? if (length(y.pred)==0) myidx <- c(myidx,y) } y.scale <- c(y.scale, list(myidx)) } if (any(unlist(lapply(y.scale, function(x) length(x)))<1)) stop("At least one scale-measurement pr. latent variable") vv <- setdiff(seq_len(k),c(unlist(y.scale),x.idx)) Ecor <- list() eta.surrogate <- c() latno <- 0 for (e in lat.idx) { latno <- latno+1 y0 <- y.scale[[latno]][1] if (!(y0%in%lat.idx)) { eta.surrogate <- c(eta.surrogate,vars(m)[y0]) Ecor <- c(Ecor,list(y0)) } else { v0 <- vars(m)[-c(e,indicators)] ##m.sub <- subset(m,vars(m)[c(e,indicators)]) m.sub <- rmvar(m,v0) i <- 0 while (i0) { Debug(vars(m)[v]) pred.lat <- intersect(pred,lat.idx) # Any latent predictors? lpos <- match(v,lat.idx) lppos <- match(pred.lat,lat.idx) ecor <- c(v,unlist(Ecor[lppos])) if (!is.na(lpos)) { v0 <- match(eta.surrogate[lpos],vars(m)) ecor <- c(ecor,Ecor[[lpos]]) } else { v0 <- v } ecor <- unique(c(v0,ecor)) XX <- vars(m)[A[v,]==1] intpred <- exogenous(m) newf <- c() if (length(pred.lat)>0) { intpred <- vars(m) for (i in seq_along(pred.lat)) { uncor <- which(colSums(VV[ecor,k+seq_len(k),drop=FALSE])==0) uncor <- setdiff(uncor,c(lat.idx)) mypred <- vars(m)[uncor] XX[XX==vars(m)[pred.lat[i]]] <- eta.surrogate[lppos[i]] intpred <- intersect(intpred,mypred) f <- toformula(eta.surrogate[lppos[i]],mypred) ff <- c(ff, f) f2 <- list(f) names(f2) <- vars(m)[i] newf <- c(newf,f2) } } intpred <- intersect(intpred,manifest(m)) R2max <- apply(R2[XX,intpred,drop=FALSE],2,max) if (any(R2max=R2thres] newf <- list(intpred); names(newf) <- vars(m)[v] instruments <- c(instruments, newf) covariates <- unique(c(setdiff(colnames(A)[A[v,]==1],latent(m)),intpred)) if (length(covariates)==0) stop("No instruments") Z <- model.matrix(toformula("",c("1",XX)),data) Y <- as.matrix(data[,vars(m)[v0]]) V <- model.matrix(toformula("",c("1",unique(covariates))),data) count <- count+1 V. <- c(V.,list(V)) Z. <- c(Z.,list(Z)) Y. <- c(Y.,list(Y)) XX <- vars(m)[A[v,]==1 & Afix[v,]!=1] parname <- c(parname, c(vars(m)[v0],paste(vars(m)[v],XX,sep=lava.options()$symbol[1]))) } else { if (vars(m)[v]%in%latent(m)) { lpos <- match(v,lat.idx) v0 <- match(eta.surrogate[lpos],vars(m)) Y <- matrix(data[,vars(m)[v0]],ncol=1) Y. <- c(Y.,list(Y)) V. <- c(V.,list(cbind(rep(1,nrow(Y))))) Z. <- c(Z.,list(cbind(rep(1,nrow(Y))))) parname <- c(parname, names(eta.surrogate)[lpos]) } } } LS <- function(X) { with(svd(X), v%*%diag(1/d,nrow=length(d))%*%t(u)) } ##projection <- function(X) X%*%LS(X) P0 <- lapply(V.,LS) Zhat <- list(); for (i in seq_along(Z.)) Zhat <- c(Zhat, list(V.[[i]]%*%(P0[[i]]%*%Z.[[i]]))) ZhatLS <- lapply(Zhat,LS) theta <- list(); for (i in seq_along(Y.)) theta <- c(theta, list(ZhatLS[[i]]%*%Y.[[i]])) u <- c() for (i in seq_along(Y.)) u <- cbind(u, Y.[[i]]-Z.[[i]]%*%theta[[i]]) covu <- crossprod(u)/nrow(u) theta.npar <- unlist(lapply(theta,length)) theta.ncum <- c(0,cumsum(theta.npar)) vartheta <- matrix(0,ncol=sum(theta.npar),nrow=sum(theta.npar)) for (i in seq_along(theta)) { for (j in seq(i,length(theta))) { idx1 <- seq_len(theta.npar[i]) + theta.ncum[i] idx2 <- seq_len(theta.npar[j]) + theta.ncum[j] if (type=="robust") { zi <- ZhatLS[[i]] for (k in seq(nrow(zi))) zi[k,] <- zi[k,]*u[,i] zj <- ZhatLS[[j]] for (k in seq(nrow(zj))) zj[k,] <- zj[k,]*u[,j] uZZ <- zi%*%t(zj) } else { uZZ <- covu[i,j]* (ZhatLS[[i]]%*%t(ZhatLS[[j]])) } vartheta[idx1,idx2] <- uZZ if (i!=j) { vartheta[idx2,idx1] <- t(uZZ) } } } parname[which(parname%in%eta.surrogate)] <- names(eta.surrogate)[which(eta.surrogate%in%parname)] coef <- cbind(unlist(theta),diag(vartheta)^0.5); rownames(coef) <- parname; colnames(coef) <- c("Estimate","Std.Err") res <- list(estimate=coef[,1], vcov=vartheta) attributes(res)$surrogates <- eta.surrogate attributes(res)$instruments <- instruments return(res) } IV2 <- function(m,data,control=list(),...) { if (is.null(control$R2thres)) control$R2thres <- 0 res <- IV(m,data,R2thres=control$R2thres,...) p <- res$estimate idx <- match(names(p),coef(m,mean=TRUE)) x0 <- parfix(m,idx,p) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) idx0 <- order(idx) p0 <- p[idx0] V0 <- res$vcov[idx0,idx0] if (is.null(control$variance) || control$variance) { suppressWarnings(e0 <- estimate(x0,data,...,messages=0,quick=TRUE)) p0 <- c(p0,e0) V0 <- V0%++%matrix(0,ncol=length(e0),nrow=length(e0)) } R2 <- noquote(formatC(cor(data[,manifest(m)])^2)) colnames(R2) <- rownames(R2) <- manifest(m) l1 <- noquote(rbind(paste(latent(m),collapse=","), paste(attributes(res)$surrogates,collapse=","), "")) rownames(l1) <- c("Latent variables","Surrogate variables:","") colnames(l1) <- "" ii <- attributes(res)$instruments I <- noquote(matrix(NA,ncol=2,nrow=length(ii))) rownames(I) <- rep("",nrow(I)) colnames(I) <- c("Response","Instruments") for (i in seq_along(ii)) { I[i,] <- c(names(ii)[i],paste(ii[[i]],collapse=",")) } mymsg <- list(l1,I); list(estimate=p0,vcov=V0,summary.message=function(...) { mymsg }) } lava/R/regression.R0000644000176200001440000002262713520655354013722 0ustar liggesusers ##' Add regression association to latent variable model ##' ##' Define regression association between variables in a \code{lvm}-object and ##' define linear constraints between model equations. ##' ##' ##' The \code{regression} function is used to specify linear associations ##' between variables of a latent variable model, and offers formula syntax ##' resembling the model specification of e.g. \code{lm}. ##' ##' For instance, to add the following linear regression model, to the ##' \code{lvm}-object, \code{m}: ##' \deqn{ E(Y|X_1,X_2) = \beta_1 X_1 + \beta_2 X_2} ##' We can write ##' ##' \code{regression(m) <- y ~ x1 + x2} ##' ##' Multivariate models can be specified by successive calls with ##' \code{regression}, but multivariate formulas are also supported, e.g. ##' ##' \code{regression(m) <- c(y1,y2) ~ x1 + x2} ##' ##' defines ##' \deqn{ E(Y_i|X_1,X_2) = \beta_{1i} X_1 + \beta_{2i} X_2 } ##' ##' The special function, \code{f}, can be used in the model specification to ##' specify linear constraints. E.g. to fix \eqn{\beta_1=\beta_2} ##' , we could write ##' ##' \code{regression(m) <- y ~ f(x1,beta) + f(x2,beta)} ##' ##' The second argument of \code{f} can also be a number (e.g. defining an ##' offset) or be set to \code{NA} in order to clear any previously defined ##' linear constraints. ##' ##' Alternatively, a more straight forward notation can be used: ##' ##' \code{regression(m) <- y ~ beta*x1 + beta*x2} ##' ##' All the parameter values of the linear constraints can be given as the right ##' handside expression of the assigment function \code{regression<-} (or ##' \code{regfix<-}) if the first (and possibly second) argument is defined as ##' well. E.g: ##' ##' \code{regression(m,y1~x1+x2) <- list("a1","b1")} ##' ##' defines \eqn{E(Y_1|X_1,X_2) = a1 X_1 + b1 X_2}. The rhs argument can be a ##' mixture of character and numeric values (and NA's to remove constraints). ##' ##' The function \code{regression} (called without additional arguments) can be ##' used to inspect the linear constraints of a \code{lvm}-object. ##' ##' For backward compatibility the "$"-symbol can be used to fix parameters at ##' a given value. E.g. to add a linear relationship between \code{y} and ##' \code{x} with slope 2 to the model \code{m}, we can write ##' \code{regression(m,"y") <- "x$2"}. Similarily we can use the "@@"-symbol to ##' name parameters. E.g. in a multiple regression we can force the parameters ##' to be equal: \code{regression(m,"y") <- c("x1@@b","x2@@b")}. Fixed parameters ##' can be reset by fixing (with \$) them to \code{NA}. ##' ##' @aliases regression regression<- regression<-.lvm regression.lvm regfix ##' regfix regfix<- regfix.lvm regfix<-.lvm ##' @param object \code{lvm}-object. ##' @param value A formula specifying the linear constraints or if ##' \code{to=NULL} a \code{list} of parameter values. ##' @param to Character vector of outcome(s) or formula object. ##' @param from Character vector of predictor(s). ##' @param fn Real function defining the functional form of predictors (for ##' simulation only). ##' @param messages Controls which messages are turned on/off (0: all off) ##' @param additive If FALSE and predictor is categorical a non-additive effect is assumed ##' @param y Alias for 'to' ##' @param x Alias for 'from' ##' @param quick Faster implementation without parameter constraints ##' @param \dots Additional arguments to be passed to the low level functions ##' @usage ##' \method{regression}{lvm}(object = lvm(), to, from, fn = NA, ##' messages = lava.options()$messages, additive=TRUE, y, x, value, ...) ##' \method{regression}{lvm}(object, to=NULL, quick=FALSE, ...) <- value ##' @return A \code{lvm}-object ##' @note Variables will be added to the model if not already present. ##' @author Klaus K. Holst ##' @seealso \code{\link{intercept<-}}, \code{\link{covariance<-}}, ##' \code{\link{constrain<-}}, \code{\link{parameter<-}}, ##' \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @examples ##' ##' m <- lvm() ## Initialize empty lvm-object ##' ### E(y1|z,v) = beta1*z + beta2*v ##' regression(m) <- y1 ~ z + v ##' ### E(y2|x,z,v) = beta*x + beta*z + 2*v + beta3*u ##' regression(m) <- y2 ~ f(x,beta) + f(z,beta) + f(v,2) + u ##' ### Clear restriction on association between y and ##' ### fix slope coefficient of u to beta ##' regression(m, y2 ~ v+u) <- list(NA,"beta") ##' ##' regression(m) ## Examine current linear parameter constraints ##' ##' ## ## A multivariate model, E(yi|x1,x2) = beta[1i]*x1 + beta[2i]*x2: ##' m2 <- lvm(c(y1,y2) ~ x1+x2) ##' ##' @export "regression<-" <- function(object,...,value) UseMethod("regression<-") ##' @export regression.formula <- function(object,...) regression(lvm(),object,...) ##' @export "regression<-.lvm" <- function(object, to=NULL, quick=FALSE, ..., value) { dots <- list(...) if (length(dots$additive)>0 && !dots$additive && !inherits(value,"formula")) { regression(object,beta=value,...) <- to return(object) } if (!is.null(to) || !is.null(dots$y)) { regfix(object, to=to, ...) <- value return(object) } else { if (is.list(value)) { for (v in value) { regression(object,...) <- v } return(object) } if (inherits(value,"formula")) { fff <- procformula(object,value,...) object <- fff$object lhs <- fff$lhs xs <- fff$xs ys <- fff$ys res <- fff$res X <- fff$X if (fff$iscovar) { ## return(covariance(object,var1=decomp.specials(lhs[[1]]),var2=X)) covariance(object) <- toformula(decomp.specials(lhs[[1]]),X) return(object) } if (!is.null(lhs) && nchar(lhs[[1]])>2 && substr(lhs[[1]],1,2)=="v(") { v <- update(value,paste(decomp.specials(lhs),"~.")) covariance(object,...) <- v return(object) } if (length(lhs)==0) { index(object) <- reindex(object) return(object) } for (i in seq_len(length(ys))) { y <- ys[i] for (j in seq_len(length(xs))) { if (length(res[[j]])>1) { regfix(object, to=y[1], from=xs[j],...) <- res[[j]][2] } else { object <- regression(object,to=y[1],from=xs[j],...) } } } object$parpos <- NULL return(object) } if (!is.list(value) | length(value)>2) stop("Value should contain names of outcome (to) and predictors (from)") if (all(c("to","from")%in%names(value))) { xval <- value$x; yval <- value$y } else { yval <- value[[1]]; xval <- value[[2]] } regression(object, to=yval, from=xval,...) } } ##' @export `regression` <- function(object,to,from,...) UseMethod("regression") ##' @export `regression.lvm` <- function(object=lvm(),to,from,fn=NA,messages=lava.options()$messages, additive=TRUE, y,x,value,...) { if (!missing(y)) { if (inherits(y,"formula")) y <- all.vars(y) to <- y } if (!missing(x)) { if (inherits(x,"formula")) x <- all.vars(x) from <- x } if (!additive) { if (!inherits(to,"formula")) to <- toformula(to,from) x <- attributes(getoutcome(to))$x K <- object$attributes$nordinal[x] if (is.null(K) || is.na(K)) { K <- list(...)$K if (is.null(K)) stop("Supply number of categories, K (or use method 'categorical' before calling 'regression').") object <- categorical(object,x,...) } dots <- list(...); dots$K <- K dots$x <- object dots$formula <- to dots$regr.only <- TRUE object <- do.call("categorical",dots) return(object) } if (missing(to)) { return(regfix(object)) } if (inherits(to,"formula")) { if (!missing(value)) { regression(object,to,messages=messages,...) <- value } else { regression(object,messages=messages,...) <- to } object$parpos <- NULL return(object) } if (is.list(to)) { for (t in to) regression(object,messages=messages,...) <- t object$parpos <- NULL return(object) } sx <- strsplit(from,"@") xx <- sapply(sx, FUN=function(i) i[1]) ps <- sapply(sx, FUN=function(i) i[2]) sx <- strsplit(xx,"$",fixed=TRUE) xs <- sapply(sx, FUN=function(i) i[1]) fix <- char2num(sapply(sx, FUN=function(i) i[2])) allv <- index(object)$vars object <- addvar(object, c(to,xs), messages=messages, reindex=FALSE) for (i in to) for (j in xs) { object$M[j,i] <- 1 if (!is.na(fn)) functional(object,j,i) <- fn } if (lava.options()$exogenous) { newexo <- setdiff(xs,c(to,allv)) exo <- exogenous(object) if (length(newexo)>0) exo <- unique(c(exo,newexo)) exogenous(object) <- setdiff(exo,to) } if (lava.options()$debug) { print(object$fix) } object$fix[xs,to] <- fix object$par[xs,to] <- ps object$parpos <- NULL index(object) <- reindex(object) return(object) } lava/R/print.R0000644000176200001440000001550013520655354012666 0ustar liggesusers###{{{ print.lvm ##' @export `print.lvm` <- function(x, ..., print.transform=TRUE,print.exogenous=TRUE) { res <- NULL myhooks <- gethook("print.hooks") for (f in myhooks) { res <- do.call(f, list(x=x,...)) } if (is.null(res)) { k <- length(vars(x)) L <- rep(FALSE,k); names(L) <- vars(x); L[latent(x)] <- TRUE cat("Latent Variable Model\n") ##;" \n\twith: ", k, " variables.\n", sep=""); if (k==0) { cat("\nEmpty\n") return() } ff <- formula(x,char=TRUE,all=TRUE) R <- Rx <- Rt <- c() exo <- exogenous(x) for (f in ff) { oneline <- as.character(f); y <- strsplit(f,"~")[[1]][1] y <- trim(y) { col1 <- as.character(oneline) D <- attributes(distribution(x)[[y]])$family Tr <- x$attributes$transform[[y]] col2 <- tryCatch(x$attributes$type[[y]],error=function(...) NULL) if (is.null(col2) || is.na(col2)) { if (!is.na(x$covfix[y,y]) && x$covfix[y,y]==0L) { col2 <- "deterministic" } else { col2 <- "gaussian" } } if (!is.null(Tr)){ col1 <- paste0(y,' ~ ',paste0(Tr$x,collapse="+"),sep="") Rt <- rbind(Rt, c(col1,"")) } if (!is.null(D$family)) { col2 <- paste0(D$family) } if (!is.null(D$link)) col2 <- paste0(col2,"(",D$link,")") if (!is.null(D$par)) col2 <- paste0(col2,"(",paste(D$par,collapse=","),")") if (is.list(distribution(x)[[y]]) && is.vector(distribution(x)[[y]][[1]])) col2 <- "fixed" if (L[y]) col2 <- paste0(col2,", latent") if (y%in%exo) { Rx <- rbind(Rx,c(col1,col2)) } else { if (is.null(Tr)) { R <- rbind(R,c(col1,col2)) } } } } if (length(R)>0) { rownames(R) <- paste(" ",R[,1]," "); colnames(R) <- rep("",ncol(R)) print(R[,2,drop=FALSE],quote=FALSE,...) } if (print.exogenous && length(Rx)>0) { cat("\nExogenous variables:") rownames(Rx) <- paste(" ",gsub("~ 1","",Rx[,1])," "); colnames(Rx) <- rep("",ncol(Rx)) print(Rx[,2,drop=FALSE],quote=FALSE,...) } if (print.transform && length(Rt)>0) { cat("\nTransformations:") rownames(Rt) <- paste(" ",gsub("~ 1","",Rt[,1])," "); colnames(Rt) <- rep("",ncol(Rt)) print(Rt[,2,drop=FALSE],quote=FALSE,...) } } cat("\n") invisible(x) } ###}}} print.lvm ###{{{ print.lvmfit ##' @export `print.lvmfit` <- function(x,type=2,labels=FALSE,...) { print(CoefMat(x,labels=labels,type=type,...),quote=FALSE,right=TRUE) minSV <- attr(vcov(x),"minSV") if (!is.null(minSV) && minSV<1e-12) { warning("Small singular value: ", format(minSV)) } pseudo <- attr(vcov(x),"pseudo") if (!is.null(pseudo) && pseudo) warning("Singular covariance matrix. Pseudo-inverse used.") invisible(x) } ###}}} print.lvmfit ###{{{ print.lvmfit.randomslope ##' @export print.lvmfit.randomslope <- function(x,labels=FALSE,type=2,...) { print(CoefMat(x,labels=labels,type=type,...),quote=FALSE,right=TRUE) invisible(x) } ###}}} ###{{{ print.multigroupfit ##' @export print.multigroupfit <- function(x,groups=NULL,...) { if (is.null(groups)) { if (x$model$missing) { modelclass <- attributes(x$model0)$modelclass nmis <- attributes(x$model0)$nmis orggroup <- unique(modelclass) groupn <- unlist(lapply(orggroup,function(i) sum(modelclass==i))) cumsumgroup <- cumsum(c(0,groupn)) groups <- unlist(lapply(orggroup,function(i) which.min(nmis[which(modelclass==i)])+cumsumgroup[i])) ## groups with max. number of variables for (i in seq_len(length(groups))) { if (nmis[groups[i]]>0) warning("No complete cases in group ",i,". Showing results of group with max number of variables. All coefficients can be extracted with 'coef'. All missing pattern groups belonging to this sub-model can be extracted by calling: coef(..., groups=c(",paste(which(modelclass==i),collapse=","),"))") } if (!is.null(x$model$mnameses)) x$model$names <- x$model$mnames } else { groups <- seq_len(length(x$model$lvm)) } } res <- coef(x,type=2,groups=groups,...) counter <- 0 dots <- list(...) dots$groups <- groups type <- if (is.null(dots$type)) { dots$type <- 2 ## dots$type <- ifelse("lvmfit.randomslope"%in%class(x),2,9) } myargs <- c(list(x=x), dots) myargs$groups <- groups CC <- do.call("CoefMat.multigroupfit",myargs) for (cc in res) { counter <- counter+1 cat(rep("_",52),"\n",sep="") cat("Group ", counter, sep="") myname <- x$model$names[counter] if (!is.null(myname) && !is.na(myname)) cat(": ",myname,sep="") if (!x$model$missing) cat(" (n=",nrow(Model(x)$data[[groups[counter]]]), ")", sep="") cat("\n") print(CC[[counter]],quote=FALSE,right=TRUE) } cat("\n") invisible(x) } ###}}} print.multigroupfit ###{{{ print.multigroup ##' @export print.multigroup <- function(x,...) { cat("\n") cat("Number of groups:", x$ngroup, "\n") cat("Number of free parameters (not counting mean-parameters):", x$npar,"\n") ## cat("Parameter-vector:", unlist(x$parlist), "\n\n") cat("Number of free mean parameters:", length(grep("m",x$mean)),"\n") ## cat("Mean-vector:", x$mean, "\n\n") invisible(x) } ###}}} print.multigroup ###{{{ printmany printmany <- function(A,B,nspace=1,name1=NULL,name2=NULL,digits=3,rownames=NULL,emptystr=" ",bothrows=!is.table(A),right=TRUE,print=TRUE,...) { A <- format(A, digits=digits, right=right, ...) B <- format(B, digits=digits, right=right, ...) nA <- nrow(A); nB <- nrow(B) if (nrow(A)0 && any(htidx!=0)) { res <- htidx[htidx>0] attributes(res)$couple <- unlist(x$attributes$heavytail.couple)[htidx>0] return(res) } return(NULL) } couples <- attributes(heavytail(x))$couple newval <- 1 if (length(couples)>0) newval <- max(couples)+1 x$attributes$heavytail.couple[var] <- newval x$attributes$heavytail[var] <- df return(x) } heavytail.init.hook <- function(x,...) { x$attributes$heavytail <- list() x$attributes$heavytail.couple <- list() return(x) } heavytail.sim.hook <- function(x,data,...) { n <- nrow(data) hvar <- heavytail(x) if (length(hvar)==0) return(data) couples <- unique(attributes(hvar)$couple) h.type <- list() for (j in couples) h.type <- c(h.type, list( hvar[(which(attributes(hvar)$couple==j))])) for (i in seq_along(couples)) { df <- hvar[[i]][1] Z <- rchisq(n,df=df)/df for (v in names(h.type[[i]])) { data[,v] <- data[,v]/sqrt(Z) } } return(data) } lava/R/scheffe.R0000644000176200001440000000220513520655354013133 0ustar liggesusers##' Function to compute the Scheffe corrected confidence ##' interval for the regression line ##' ##' @title Calculate simultaneous confidence limits by Scheffe's method ##' @param model Linear model ##' @param newdata new data frame ##' @param level confidence level (0.95) ##' @export ##' @examples ##' x <- rnorm(100) ##' d <- data.frame(y=rnorm(length(x),x),x=x) ##' l <- lm(y~x,d) ##' plot(y~x,d) ##' abline(l) ##' d0 <- data.frame(x=seq(-5,5,length.out=100)) ##' d1 <- cbind(d0,predict(l,newdata=d0,interval="confidence")) ##' d2 <- cbind(d0,scheffe(l,d0)) ##' lines(lwr~x,d1,lty=2,col="red") ##' lines(upr~x,d1,lty=2,col="red") ##' lines(lwr~x,d2,lty=2,col="blue") ##' lines(upr~x,d2,lty=2,col="blue") scheffe <- function(model,newdata=model.frame(model),level=0.95) { df <- model$df.residual p <- model$rank alpha <- 1-level ## Scheffe value uses 1-tailed F critical value scheffe.crit <- sqrt(p*qf(1-alpha,p,df)) ci <- predict(model,newdata,interval="confidence",level=level) delta <- scheffe.crit/qt(1-alpha/2,df) ci[,2] <- ci[,1] -(ci[,1]-ci[,2])*delta ci[,3] <- ci[,1] +(ci[,3]-ci[,1])*delta return(ci) } lava/R/interactive.R0000644000176200001440000000624113520655354014051 0ustar liggesusers##' @export colsel <- function(locate,...) { ytop <- rep(seq(1/26,1,by=1/26),each=26)[1:657] ybottom <- rep(seq(0,1-1/26,by=1/26),each=26)[1:657] xleft <- rep(seq(0,1-1/26,by=1/26),times=26)[1:657] xright <- rep(seq(1/26,1,by=1/26),times=26)[1:657] pall <- round(col2rgb(colors())/256) pall <- colSums(pall) ; pall2 <- character(0) pall2[pall>0] <- "black" pall2[pall==0] <- "white" par(mar=c(0,0,1,0)) plot.new() title(main="Palette of colors()") rect(xleft,ybottom,xright,ytop,col=colors()) text(x=xleft+((1/26)/2) ,y=ytop-((1/26)/2) ,labels = 1:657 ,cex=0.55 ,col=pall2) if (missing(locate)) return(invisible(NULL)) colmat <- matrix(c(1:657,rep(NA,26^2-657)),byrow=T,ncol=26,nrow=26) cols <- NA for(i in seq_len(locate)) { h <- locator(1) if(any(h$x<0,h$y<0,h$x>1,h$y>1)) stop("locator out of bounds!") else { cc <- floor(h$x/(1/26))+1 rr <- floor(h$y/(1/26))+1 cols[i] <- colors()[colmat[rr,cc]] } } return(cols) } ##' Extension of the \code{identify} function ##' ##' For the usual 'X11' device the identification process is ##' terminated by pressing any mouse button other than the first. For ##' the 'quartz' device the process is terminated by pressing either ##' the pop-up menu equivalent (usually second mouse button or ##' 'Ctrl'-click) or the 'ESC' key. ##' @title Identify points on plot ##' @usage ##' \method{click}{default}(x, y=NULL, label=TRUE, n=length(x), pch=19, col="orange", cex=3, ...) ##' idplot(x,y,...,id=list()) ##' @aliases idplot click.default click colsel ##' @param x X coordinates ##' @param y Y coordinates ##' @param label Should labels be added? ##' @param n Max number of inputs to expect ##' @param pch Symbol ##' @param col Colour ##' @param cex Size ##' @param id List of arguments parsed to \code{click} function ##' @param \dots Additional arguments parsed to \code{plot} function ##' @author Klaus K. Holst ##' @seealso \code{\link{idplot}}, \code{identify} ##' @examples ##' if (interactive()) { ##' n <- 10; x <- seq(n); y <- runif(n) ##' plot(y ~ x); click(x,y) ##' ##' data(iris) ##' l <- lm(Sepal.Length ~ Sepal.Width*Species,iris) ##' res <- plotConf(l,var2="Species")## ylim=c(6,8), xlim=c(2.5,3.3)) ##' with(res, click(x,y)) ##' ##' with(iris, idplot(Sepal.Length,Petal.Length)) ##' } ##' @keywords iplot ##' @export click <- function(x,...){ UseMethod("click") } ##' @export click.default <- function(x, y=NULL, label=TRUE, n=length(x), pch=19, col="orange", cex=3, ...) { xy <- xy.coords(x, y); x <- xy$x; y <- xy$y sel <- rep(FALSE, length(x)); res <- integer(0) while(sum(sel) < n) { ans <- identify(x[!sel], y[!sel], n=1, plot=FALSE, ...) if(!length(ans)) break ans <- which(!sel)[ans] points(x[ans], y[ans], pch = pch, col=col, cex=cex) if (label) text(x[ans], y[ans], ans) sel[ans] <- TRUE res <- c(res, ans) } res } ##' @export idplot <- function(x,y,...,id=list()) { plot(x,y,...) id$x <- x; id$y <- y do.call("click",id) } lava/R/score.survreg.R0000644000176200001440000000243113520655354014340 0ustar liggesusers##' @export pars.survreg <- function(x,...) { c(coef(x),scale=x$scale) } ##' @export score.survreg <- function(x,p,scale=TRUE,logscale=FALSE,indiv.logLik=FALSE,...) { npar <- NROW(x$var) m <- model.frame(x) X <- model.matrix(terms(x), m) hasscale <- npar>length(x$coefficients) if (!missing(p)) { if (hasscale) sigma <- tail(p,1) p <- p[seq(length(p)-1)] x$linear.predictors <- as.vector(X%*%p) x$coefficients <- p x$scale <- sigma } derivatives <- residuals(x, type = "matrix") w <- model.weights(m) if (is.null(w)) w <- 1 dldLP <- w*derivatives[,"dg"] ## Derivative wrt linear-predictor p=Xbeta S <- apply(X,2,function(x) x*dldLP) if (!is.null(x$naive.var)) { V <- x$naive.var } else { V <- x$var } if (hasscale && scale) { ds <- cbind("logsigma"=derivatives[,"ds"]) if (!logscale) { ds <- ds/x$scale names(ds) <- "sigma" } S <- cbind(S,ds) } if (hasscale && !scale) { V <- V[-npar,-npar,drop=FALSE] } attributes(S)$logLik <- if (indiv.logLik) derivatives[,"g"] else sum(derivatives[,"g"]) attributes(S)$bread <- V return(S) } lava/R/diagtest.R0000644000176200001440000002211113520655354013332 0ustar liggesusers##' @export logit <- function(p) log(p/(1-p)) ##' @export expit <- function(z) 1/(1+exp(-z)) ##' @export tigol <- expit ##' Calculate prevalence, sensitivity, specificity, and positive and ##' negative predictive values ##' ##' @title Calculate diagnostic tests for 2x2 table ##' @aliases diagtest odds riskcomp OR Ratio Diff ##' @param table Table or (matrix/data.frame with two columns) ##' @param positive Switch reference ##' @param exact If TRUE exact binomial proportions CI/test will be used ##' @param p0 Optional null hypothesis (test prevalenc, sensitivity, ...) ##' @param confint Type of confidence limits ##' @param ... Additional arguments to lower level functions ##' @author Klaus Holst ##' @details Table should be in the format with outcome in columns and ##' test in rows. Data.frame should be with test in the first ##' column and outcome in the second column. ##' @examples ##' M <- as.table(matrix(c(42,12, ##' 35,28),ncol=2,byrow=TRUE, ##' dimnames=list(rater=c("no","yes"),gold=c("no","yes")))) ##' diagtest(M,exact=TRUE) ##' @export diagtest <- function(table,positive=2,exact=FALSE,p0=NA,confint=c("logit","arcsin","pseudoscore","exact"),...) { if (!inherits(table,c("table","data.frame","matrix","multinomial"))) stop("Expecting a table or data.frame.") if (is.table(table)) { lev <- dimnames(table)[[2]] } if (inherits(table,"multinomial")) { lev <- dimnames(table$P)[[2]] } if (!is.table(table) & (is.matrix(table) || is.data.frame(table))) { if (is.factor(table[,2])) { lev <- levels(table[,2]) } else lev <- unique(table[,2]) } if (is.character(positive)) { positive <- match(positive,lev) } if (!(positive%in%c(1,2))) stop("Expecting and index of 1 or 2.") negative <- positive%%2+1L if (!is.null(confint) && confint[1]=="exact") exact <- TRUE if (exact) { if (!is.table(table) && (is.matrix(table) || is.data.frame(table))) { table <- base::table(table[,c(1,2),drop=FALSE]) ##names(dimnames(table)) <- colnames(table)[1:2] } if (!is.table(table) || nrow(table)!=2 || ncol(table)!=2) stop("2x2 table expected") n <- sum(table) nc <- colSums(table) nr <- rowSums(table) test <- TRUE if (is.na(p0)) { test <- FALSE p0 <- 0.5 } ## Prevalence p1 <- with(stats::binom.test(nc[positive],n,p=p0),c(estimate,conf.int,p.value)) ## Test marginal p2 <- with(stats::binom.test(nr[positive],n,p=p0),c(estimate,conf.int,p.value)) ## Sensitivity/Specificity sens <- with(stats::binom.test(table[positive,positive],nc[positive],p=p0),c(estimate,conf.int,p.value)) spec <- with(stats::binom.test(table[negative,negative],nc[negative],p=p0),c(estimate,conf.int,p.value)) ## PPV,NPV ppv <- with(stats::binom.test(table[positive,positive],nr[positive],p=p0),c(estimate,conf.int,p.value)) npv <- with(stats::binom.test(table[negative,negative],nr[negative],p=p0),c(estimate,conf.int,p.value)) ## Accuracy acc <- with(stats::binom.test(table[positive,positive]+table[negative,negative],n,p=p0),c(estimate,conf.int,p.value)) ## Symmetry (McNemar): ## number of discordant pairs under null: b~bin(b+c,0.5) sym <- with(stats::binom.test(table[positive,negative],table[positive,negative]+table[negative,positive],p=0.5),c(estimate,conf.int,p.value)) coefmat <- rbind(Prevalence=p1, Test=p2, Sensitivity=sens, Specificity=spec, PositivePredictiveValue=ppv, NegativePredictiveValue=npv, Accuracy=acc, Homogeneity=sym) if (!test) coefmat[seq(nrow(coefmat)-1),4] <- NA coefmat <- cbind(coefmat[,1,drop=FALSE],NA,coefmat[,-1,drop=FALSE]) colnames(coefmat) <- c("Estimate","Std.Err","2.5%","97.5%","P-value") res <- list(table=table, prop.table=table/sum(table), coefmat=coefmat) } else { if (inherits(table,"table")) M <- multinomial(table) else { if (inherits(table,"multinomial")) { M <- table table <- round(M$P*nrow(M$data)) } else { M <- multinomial(table[,1:2],...) table <- base::table(table) } } calc_diag <- function(p,...) { P <- matrix(p[1:4],2) p1 <- sum(P[,positive]) p2 <- sum(P[positive,]) res <- c(Prevalence=p1, ##(p[1]+p[2]), Test=p2, ##(p[1]+p[3]), Sensitivity=P[positive,positive]/p1, ## p[1]/(p[1]+p[2]), # Prob test + | given (true) disease (True positive rate) Specificity=P[negative,negative]/(1-p1), ## p[4]/(1-p[1]-p[2]), # Prob test - | given no disease (True negative rate) PositivePredictiveValue=P[positive,positive]/p2, ## p[1]/(p[1]+p[3]), # Prob disease | test + NegativePredictiveValue=P[negative,negative]/(1-p2), ## p[4]/(1-p[1]-p[3]), # Prob disease free | test - Accuracy=(P[1,1]+P[2,2])/sum(P), Homogeneity=P[negative,positive]-P[positive,negative] ) if (!is.null(confint)) { if (tolower(confint[1])=="logit") { res[seq(length(res)-1)] <- logit(res[seq(length(res)-1)]) } else if (tolower(confint[1])=="arcsin") { res[seq(length(res)-1)] <- asin(sqrt(res[seq(length(res)-1)])) } } return(res) } names(dimnames(table)) <- paste0(c("Test:","Outcome:"),names(dimnames(table))) prfun <- function(x,...) { printCoefmat(x$coefmat[,c(-2)],na.print="",...) printline() cat("\n") cat("Prevalence: Prob( outcome+ )\n") cat("Test: Prob( test+ )\n") cat("Sensitivity (True positive rate): Prob( test+ | outcome+ )\n") cat("Specificity (True negative rate): Prob( test- | outcome- )\n") cat("Positive predictive value (Precision): Prob( outcome+ | test+ )\n") cat("Negative predictive value: Prob( outcome- | test- )\n") cat("Accuracy: Prob( correct classification )\n") cat("Homogeneity/Symmetry: Prob( outcome+ ) - Prob( test+ )\n") } btransform <- NULL if (!is.null(confint)) { if (tolower(confint[1])=="logit") { btransform <- function(x) { rbind(expit(x[seq(nrow(x)-1),,drop=FALSE]), x[nrow(x),,drop=FALSE]) } } else if (tolower(confint[1])=="pseudoscore") { ## TODO, agresti-ryu, biometrika 2010 } else if (tolower(confint[1])=="arcsin") { btransform <- function(x) { rbind(sin(x[seq(nrow(x)-1),,drop=FALSE])^2, x[nrow(x),,drop=FALSE]) } } } res <- estimate(M,calc_diag,print=prfun,null=c(rep(p0,7),0),back.transform=btransform,...) } CI <- confint[1] if (exact) CI <- "exact" if (is.null(CI)) CI <- "wald" res <- structure(c(res, list(table=table, prop.table=table/sum(table), confint=CI, positive=positive, negative=negative, levels=dimnames(table) )), class=c("diagtest","estimate")) res$call <- match.call() rownames(res$coefmat) <- gsub("\\[|\\]","",rownames(res$coefmat)) names(res$coef) <- rownames(res$coefmat) return(res) } print.diagtest <- function(x,...) { cat("Call: "); print(x$call) cat("Confidence limits: ", x$confint,"\n",sep="") printline() printmany(x$table,x$prop.table,nspace=2,...) cat("\nPositive outcome: '", x$levels[[2]][x$positive],"'\n",sep="") ##cat("\tNegative outcome: '", x$levels[[2]][x$positive%%2+1],"'\n",sep="") printline() printCoefmat(x$coefmat[,c(-2)],na.print="",...) printline() cat("\n") cat("Prevalence: Prob( outcome+ )\n") cat("Test: Prob( test+ )\n") cat("Sensitivity (True positive rate): Prob( test+ | outcome+ )\n") cat("Specificity (True negative rate): Prob( test- | outcome- )\n") cat("Positive predictive value (Precision): Prob( outcome+ | test+ )\n") cat("Negative predictive value: Prob( outcome- | test- )\n") cat("Accuracy: Prob( correct classification )\n") if (x$confint=="exact") { cat("Homogeneity/Symmetry: Prob( outcome+, test- | discordant ), H0: p=0.5 \n") } else { cat("Homogeneity/Symmetry: H0: Prob( outcome+ ) - Prob( test+ ), H0: p=0\n") } cat("\n") } summary.diagtest <- function(x,...) { x[c("iid","print","id","compare")] <- NULL return(x) } lava/R/matrices.R0000644000176200001440000004151113520655354013342 0ustar liggesusers###{{{ `matrices` <- function(x,...) UseMethod("matrices") ###{{{ matrices.lvm mat.lvm <- function(x,ii=index(x),...) { A <- ii$A ## Matrix with fixed parameters and ones where parameters are free M1 <- ii$M1 ## Index of free and _unique_ regression parameters P <- ii$P ## Matrix with fixed variance parameters and ones where parameters are free P1 <- ii$P1 ## Index of free and _unique_ regression parameters constrain.par <- names(constrain(x)) parval <- list() parBelongsTo <- list(mean=seq_len(ii$npar.mean), reg=seq_len(ii$npar.reg)+ii$npar.mean, cov=seq_len(ii$npar.var)+ii$npar.mean+ii$npar.reg, epar=seq_len(ii$npar.ex)+with(ii,npar.reg+npar.var+npar.mean), cpar=numeric()) idxA <- which(M1==1) pidxA <- parBelongsTo$reg if (ii$npar.reg>0) { A[idxA] <- pidxA for (p in ii$parname) { idx <- which((x$par==p)) newval <- A[idx[1]] attributes(newval)$reg.idx <- idx attributes(newval)$reg.tidx <- which(t(x$par==p)) parval[[p]] <- newval if (length(idx)>1) { idxA <- c(idxA,idx[-1]) pidxA <- c(pidxA,rep(A[idx[1]],length(idx)-1)) A[idx] <- A[idx[1]] } } ## duplicate parameters } pars.var <- parBelongsTo$cov idxdiag <- (seq(ncol(P1))-1)*ncol(P1) + seq(ncol(P1)) idxP <- idxdiag[which(P1[idxdiag]==1)] pidxP <- pars.var[seq_len(length(idxP))] P[idxP] <- pidxP pars.off.diag <- pars.var if (length(pidxP)>0) { pars.off.diag <- pars.off.diag[-seq_len(length(pidxP))] } counter <- 0 if (length(pars.off.diag)>0 & ncol(P)>1) for (i in seq_len(ncol(P1)-1)) for (j in seq(i+1,nrow(P1))) { if (ii$P1[j,i]!=0) { counter <- counter+1 pos <- c(j+(i-1)*ncol(P1), i+(j-1)*ncol(P1)) P[j,i] <- P[i,j] <- pars.off.diag[counter] idxP <- c(idxP,pos); pidxP <- c(pidxP,P[j,i],P[i,j]) } } if (length(ii$covparname)>0) for (p in ii$covparname) { idx <- which(x$covpar==p) isOffDiag <- !(idx[1]%in%idxdiag) if (!(p%in%ii$parname)) { parval[[p]] <- P[idx[1]] } attributes(parval[[p]])$cov.idx <- idx if (length(idx)>1+isOffDiag) { P[idx[-seq(1+isOffDiag)]] <- parval[[p]] } if (ii$npar.reg>0 && p%in%ii$parname) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.reg <- which(x$par==p) P[idx] <- A[idx.reg[1]] atr <- attributes(parval[[p]]) parval[[p]] <- A[idx.reg[1]] attributes(parval[[p]]) <- atr idxP <- c(idxP,idx) pidxP <- c(pidxP,rep(P[idx[1]],length(idx))) } else { idxP <- c(idxP,idx[-seq(1+isOffDiag)]) pidxP <- c(pidxP,rep(P[idx[1]],length(idx)-1-isOffDiag)) } } ## duplicate parameters idxM <- c() pidxM <- seq_len(ii$npar.mean) v <- NULL ## named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) v <- rep(0,length(x$mean)) names(v) <- colnames(P) if (ii$npar.mean>0) { idxM <- which(ii$v1==1) v[idxM] <- pidxM } if (any(fixed)) v[fixed] <- unlist(x$mean[fixed]) for (p in ii$mparname) { idx <- which(x$mean==p) if (!(p%in%c(ii$parname,ii$covparname))) { if (length(idx)>1) { pidxM <- c(pidxM,rep(v[idx[1]],length(idx)-1)) idxM <- c(idxM,idx[-1]) } parval[[p]] <- v[idx[1]] v[idx] <- parval[[p]] } attributes(parval[[p]])$m.idx <- idx if (p %in% ii$covparname & !(p %in% ii$parname)) { parBelongsTo$cov <- c(parBelongsTo$cov,p) idx.2 <- which(x$covpar==p) v[idx] <- P[idx.2[1]] pidxM <- c(pidxM,rep(P[idx.2[1]],length(idx))) idxM <- c(idxM,idx) } if (p %in% ii$parname) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.2 <- which(x$par==p) v[idx] <- A[idx.2[1]] pidxM <- c(pidxM,rep(A[idx.2[1]],length(idx))) idxM <- c(idxM,idx) } } ## Ex-parameters idxE <- NULL pidxE <- parBelongsTo$epar ## named <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) epar <- rep(0,length(x$exfix)) names(epar) <- names(x$expar) if (!(ii$npar.ex==0)) { idxE <- which(ii$e1==1) epar[idxE] <- pidxE } if (any(fixed)) epar[fixed] <- unlist(x$exfix[fixed]) for (p in ii$eparname) { idx <- which(x$exfix==p) if (!(p%in%c(ii$parname,ii$covparname,ii$mparname))) { if (length(idx)>1) { idxE <- c(idxE,idx[-1]) pidxE <- c(pidxE,rep(epar[idx[1]],length(idx)-1)) } parval[[p]] <- epar[idx[1]] } attributes(parval[[p]])$e.idx <- idx if (length(idx)>1) epar[idx[-1]] <- parval[[p]] if (p %in% setdiff(ii$covparname,c(ii$parname,ii$mparname))) { parBelongsTo$cov <- c(parBelongsTo$cov,p) idx.2 <- which(x$covpar==p) epar[idx] <- P[idx.2[1]] pidxE <- c(pidxE,rep(P[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } if (p %in% setdiff(ii$parname,ii$mparname)) { parBelongsTo$reg <- c(parBelongsTo$reg,p) idx.2 <- which(x$par==p) epar[idx] <- A[idx.2[1]] pidxE <- c(pidxE,rep(A[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } if (p %in% ii$mparname) { parBelongsTo$mean <- c(parBelongsTo$mean,p) idx.2 <- which(x$mean==p) epar[idx] <- v[idx.2[1]] pidxE <- c(pidxE,rep(v[idx.2[1]],length(idx))) idxE <- c(idxE,idx) } } ee <- cbind(idxE,pidxE); rownames(ee) <- names(x$expar)[ee[,1]] ## Constrained... constrain.par <- names(constrain(x)) constrain.idx <- NULL if (length(constrain.par)>0) { constrain.idx <- list() for (p in constrain.par) { reg.tidx <- reg.idx <- cov.idx <- m.idx <- e.idx <- NULL myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { parval[xargs] <- 0 } if (p%in%ii$parname.all) { reg.idx <- which(x$par==p) reg.tidx <- which(t(x$par==p)) } if (p%in%ii$covparname.all) { cov.idx <- which(x$covpar==p) } if (p%in%ii$mparname.all) { m.idx <- which(x$mean==p) } if (p%in%ii$eparname.all) { e.idx <- which(x$exfix==p) } constrain.idx[[p]] <- list(reg.idx=reg.idx,reg.tidx=reg.tidx,cov.idx=cov.idx,m.idx=m.idx,e.idx=e.idx) } } parBelongsTo <- lapply(parBelongsTo,function(x) sort(unique(x))) return(list(mean=cbind(idxM,pidxM), reg=cbind(idxA,pidxA), cov=cbind(idxP,pidxP), epar=ee, parval=parval, constrain.idx=constrain.idx, parBelongsTo=parBelongsTo)) } matrices.lvm <- function(x,pars,meanpar=NULL,epars=NULL,data=NULL,...) { ii <- index(x) pp <- c(rep(NA,ii$npar.mean),pars,epars) ##v <- NULL v <- ii$v0 if (!is.null(meanpar) && length(meanpar)>0) { pp[seq(ii$npar.mean)] <- meanpar v[ii$mean[,1]] <- meanpar[ii$mean[,2]] } A <- ii$A A[ii$reg[,1]] <- pp[ii$reg[,2]] P <- ii$P P[ii$cov[,1]] <- pp[ii$cov[,2]] e <- NULL if (length(x$expar)>0) { e <- rep(0,length(x$expar)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) if (any(fixed)) e[fixed] <- unlist(x$exfix[fixed]) if (nrow(ii$epar)>0) e[ii$epar[,1]] <- pp[ii$epar[,2]] names(e) <- names(x$expar) } parval <- lapply(ii$parval,function(x) { res <- pp[x]; attributes(res) <- attributes(x); res }) ## Constrained... constrain.par <- names(constrain(x)) constrain.idx <- NULL cname <- constrainpar <- c() if (length(constrain.par)>0 && is.numeric(c(pars,meanpar))) { constrain.idx <- list() for (p in constrain.par) { cname <- c(cname,p) myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { if (!is.null(data)) { parval[xargs] <- (data)[xargs] } else parval[xargs] <- 0 } val <- unlist(c(parval,constrainpar,x$mean,e)[attributes(myc)$args]) cpar <- myc(val); constrainpar <- c(constrainpar,list(cpar)); names(constrainpar) <- cname if (p%in%ii$parname.all) { if (!is.null(val)) A[ii$constrain.idx[[p]]$reg.idx] <- cpar } if (p%in%ii$covparname.all) { if (!is.null(val)) P[ii$constrain.idx[[p]]$cov.idx] <- cpar } if (p%in%ii$mparname.all) { if (!is.null(val)) v[ii$constrain.idx[[p]]$m.idx] <- cpar } if (p%in%ii$eparname.all) { if (!is.null(val)) e[ii$constrain.idx[[p]]$e.idx] <- cpar } } } return(list(A=A, P=P, v=v, e=e, parval=parval, constrain.idx=ii$constrain.idx, constrainpar=constrainpar)) } ###}}} matrices.lvm ###{{{ matrices.multigroup matrices.multigroup <- function(x, p, ...) { pp <- modelPar(x,p) res <- list() for (i in seq_len(x$ngroup)) res <- c(res, list(matrices2(x$lvm[[i]],pp$p[[i]]))) return(res) } ###}}} matrices2 <- function(x,p,...) { m0 <- p[seq_len(index(x)$npar.mean)] p0 <- p[with(index(x),seq_len(npar)+npar.mean)] e0 <- p[with(index(x),seq_len(npar.ex)+npar.mean+npar)] matrices(x,p0,m0,e0,...) } ###{{{ matrices, to be superseeded by above definition matrices.lvm <- function(x,pars,meanpar=NULL,epars=NULL,data=NULL,...) { ii <- index(x) A <- ii$A ## Matrix with fixed parameters and ones where parameters are free M1 <- ii$M1 ## Index of free and _unique_ regression parameters P <- ii$P ## Matrix with fixed variance parameters and ones where parameters are free P1 <- ii$P1 ## Index of free and _unique_ regression parameters constrain.par <- names(constrain(x)) parval <- list() if (ii$npar.reg>0) { A[which(M1==1)] <- pars[seq_len(ii$npar.reg)] for (p in ii$parname) { idx <- which((x$par==p)) newval <- A[idx[1]] attributes(newval)$reg.idx <- idx attributes(newval)$reg.tidx <- which(t(x$par==p)) parval[[p]] <- newval if (length(idx)>1) { A[idx[-1]] <- parval[[p]] } } ## duplicate parameters } if (ii$npar.reg==0) { pars.var <- pars } else { pars.var <- pars[-seq_len(ii$npar.reg)] } diag(P)[ii$which.diag] <- pars.var[seq_along(ii$which.diag)] pars.off.diag <- pars.var if (length(ii$which.diag)>0) pars.off.diag <- pars.off.diag[-seq_along(ii$which.diag)] counter <- 0 if (length(pars.off.diag)>0 & ncol(P)>1) for (i in seq_len(ncol(P1)-1)) for (j in seq(i+1,nrow(P1))) { if (ii$P1[j,i]!=0) { counter <- counter+1 P[j,i] <- pars.off.diag[counter] } } if (length(ii$covparname)>0) for (p in ii$covparname) { idx <- which(x$covpar==p) if (!(p%in%ii$parname)) { parval[[p]] <- P[idx[1]] } attributes(parval[[p]])$cov.idx <- idx if (length(idx)>1) { P[idx[-1]] <- parval[[p]] } if (ii$npar.reg>0 && p%in%ii$parname) { idx.reg <- which(x$par==p) P[idx] <- A[idx.reg[1]] atr <- attributes(parval[[p]]) parval[[p]] <- A[idx.reg[1]] ###????? attributes(parval[[p]]) <- atr } } ## duplicate parameters P[upper.tri(P)] <- t(P)[upper.tri(P)] ## Symmetrize... v <- NULL { ## named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) v <- rep(0,length(x$mean)) names(v) <- colnames(P) if (!(is.null(meanpar) | ii$npar.mean==0)) v[ii$v1==1] <- meanpar if (any(fixed)) v[fixed] <- unlist(x$mean[fixed]) for (p in ii$mparname) { idx <- which(x$mean==p) if (!(p%in%c(ii$parname,ii$covparname))) { parval[[p]] <- v[idx[1]] } attributes(parval[[p]])$m.idx <- idx if (length(idx)>1) v[idx[-1]] <- parval[[p]] if (p %in% ii$covparname & !(p %in% ii$parname)) { idx.2 <- which(x$covpar==p) v[idx] <- P[idx.2[1]] } if (p %in% ii$parname) { idx.2 <- which(x$par==p) v[idx] <- A[idx.2[1]] } } } ## Ex-parameters e <- NULL { ## named <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) fixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) e <- rep(0,length(x$exfix)) names(e) <- names(x$expar) if (!(is.null(epars) | ii$npar.ex==0)) e[which(ii$e1==1)] <- epars if (any(fixed)) e[fixed] <- unlist(x$exfix[fixed]) for (p in ii$eparname) { idx <- which(x$exfix==p) if (!(p%in%c(ii$parname,ii$covparname,ii$mparname))) { parval[[p]] <- e[idx[1]] } attributes(parval[[p]])$e.idx <- idx if (length(idx)>1) e[idx[-1]] <- parval[[p]] if (p %in% setdiff(ii$covparname,c(ii$parname,ii$mparname))) { idx.2 <- which(x$covpar==p) e[idx] <- P[idx.2[1]] } if (p %in% setdiff(ii$parname,ii$mparname)) { idx.2 <- which(x$par==p) e[idx] <- A[idx.2[1]] } if (p %in% ii$mparname) { idx.2 <- which(x$mean==p) e[idx] <- v[idx.2[1]] } } } ## Constrained... constrain.idx <- NULL cname <- constrainpar <- c() if (length(constrain.par)>0 && is.numeric(c(pars,meanpar,e))) { constrain.idx <- list() for (p in constrain.par) { cname <- c(cname,p) reg.tidx <- reg.idx <- cov.idx <- m.idx <- e.idx <- NULL myc <- constrain(x)[[p]] xargs <- manifest(x)[na.omit(match(attributes(myc)$args,manifest(x)))] if (length(xargs)>0) { if (!is.null(data)) { parval[xargs] <- (data)[xargs] } else parval[xargs] <- 0 } val <- rbind(unlist(c(parval,constrainpar,x$mean,e)[attributes(myc)$args])) cpar <- myc(val); constrainpar <- c(constrainpar,list(cpar)); names(constrainpar) <- cname if (p%in%ii$parname.all) { reg.idx <- which(x$par==p) reg.tidx <- which(t(x$par==p)) if (!is.null(val)) A[reg.idx] <- cpar##myc(val) } if (p%in%ii$covparname.all) { cov.idx <- which(x$covpar==p) if (!is.null(val)) P[cov.idx] <- cpar##myc(val) } if (p%in%ii$mparname.all) { m.idx <- which(x$mean==p) if (!is.null(val)) v[m.idx] <- cpar##myc(val) } if (p%in%ii$eparname.all) { e.idx <- which(x$exfix==p) if (!is.null(val)) e[e.idx] <- cpar##myc(val) } constrain.idx[[p]] <- list(reg.idx=reg.idx,reg.tidx=reg.tidx,cov.idx=cov.idx,m.idx=m.idx,e.idx=e.idx) } } if (x$index$sparse & !is.character(class(pars)[1])) { A <- as(A,"sparseMatrix") P <- as(P,"sparseMatrix") v <- as(v,"sparseMatrix") } return(list(A=A, P=P, v=v, e=e, parval=parval, constrain.idx=constrain.idx, constrainpar=constrainpar)) } ###}}} matrices Obsolete lava/R/lmers.R0000644000176200001440000000472313520655354012661 0ustar liggesusers##v <- lmerplot(l1,varcomp=TRUE,colorkey=TRUE,lwd=0,col=rainbow(20)) lmerplot <- function(model,x,id,y,transform,re.form=NULL,varcomp=FALSE,colorbar=TRUE,mar=c(4,4,4,6),col,index=seq(50),...) { if (varcomp) { Z <- lme4::getME(model,"Z") nn <- unlist(lapply(lme4::getME(model,"Ztlist"),nrow)) ve <- lme4::getME(model,"sigma")^2 vu <- varcomp(model,profile=FALSE)$varcomp L <- Matrix::Diagonal(sum(nn),rep(vu,nn)) V <- Z%*%L%*%(Matrix::t(Z)) Matrix::diag(V) <- Matrix::diag(V)+ve cV <- Matrix::cov2cor(V) if (!is.null(index)) { index <- intersect(seq(nrow(cV)),index) cV <- cV[index,index,drop=FALSE] } if (colorbar) { opt <- par(mar=mar) } if (missing(col)) col <- rev(gray.colors(16,0,1)) image(seq(nrow(cV)),seq(ncol(cV)),as.matrix(cV),xlab="",ylab="",col=col,zlim=c(0,1),...) if (colorbar) { uu <- devcoords() xrange <- c(uu$fig.x2,uu$dev.x2) xrange <- diff(xrange)/3*c(1,-1)+xrange yrange <- c(uu$fig.y1,uu$fig.y2) colorbar(direction="vertical",x.range=xrange,y.range=yrange,clut=col,values=seq(0,1,length.out=length(col)),srt=0,position=2) par(opt) } return(invisible(V)) } if (missing(y)) y <- model.frame(model)[,1] yhat <- predict(model) if (!is.null(re.form)) ymean <- predict(model,re.form=re.form) if (!missing(transform)) { yhat <- transform(yhat) if (!is.null(re.form)) ymean <- transform(ymean) y <- transform(y) } plot(y ~ x, col=Col(id,0.3), pch=16,...) if (!is.null(re.form)) points(ymean ~ x, pch="-",cex=4); for (i in unique(id)) { idx <- which(id==i) lines(yhat[idx]~x[idx],col=i) } } varcomp <- function(x,profile=TRUE,...) { cc <- cbind(lme4::fixef(x),diag(as.matrix(vcov(x)))^.5) cc <- cbind(cc,cc[,1]-qnorm(0.975)*cc[,2],cc[,1]+qnorm(0.975)*cc[,2], 2*(1-pnorm(abs(cc[,1])/cc[,2]))) pr <- NULL if (profile) pr <- confint(x) colnames(cc) <- c("Estimate","Std.Err","2.5%","97.5%","p-value") vc <- lme4::VarCorr(x) res <- structure(list(coef=lme4::fixef(x), vcov=as.matrix(vcov(x)), coefmat=cc, confint=pr, varcomp=vc[[1]][,], residual=attributes(vc)$sc^2 ), class="estimate.lmer") res } lava/R/Col.R0000644000176200001440000000237713520655354012257 0ustar liggesusersmypal <- function(set=TRUE,...) { oldpal <- palette() col <- c("black","darkblue","darkred","goldenrod","mediumpurple", "seagreen","aquamarine3","violetred1","salmon1", "lightgoldenrod1","darkorange2","firebrick1","violetred1", "gold") if (!set) return(col) palette(col) invisible(oldpal) } ##' This function transforms a standard color (e.g. "red") into an ##' transparent RGB-color (i.e. alpha-blend<1). ##' ##' This only works for certain graphics devices (Cairo-X11 (x11 as of R>=2.7), quartz, pdf, ...). ##' @title Generate a transparent RGB color ##' @param col Color (numeric or character) ##' @param alpha Degree of transparency (0,1) ##' @param locate Choose colour (with mouse) ##' @return A character vector with elements of 7 or 9 characters, '"\#"' ##' followed by the red, blue, green and optionally alpha values in ##' hexadecimal (after rescaling to '0 ... 255'). ##' @author Klaus K. Holst ##' @examples ##' plot(runif(1000),cex=runif(1000,0,4),col=Col(c("darkblue","orange"),0.5),pch=16) ##' @keywords color ##' @export Col <- function(col,alpha=0.2,locate=0) { if (locate>0) return(colsel(locate)) mapply(function(x,alpha) do.call(rgb,as.list(c(col2rgb(x)/255,alpha))), col,alpha) } lava/R/combine.R0000644000176200001440000000434713520655354013155 0ustar liggesusers excoef <- function(x,digits=2,p.digits=3,format=FALSE,fun,se=FALSE,ci=TRUE,pvalue=TRUE,...) { cc <- coef(summary(x)) res <- round(cbind(cc[,1:3,drop=FALSE],confint(x)),max(1,digits)) pvalround <- round(cc[,4], max(1, p.digits)) if (format) { res <- base::format(res,digits=digits,...) pval <- format(pvalround,p.digits=p.digits,...) } else { pval <- format(pvalround) } pval <- paste0("p=",pvalround) pval[which(pvalround<10^(-p.digits))] <- paste0("p<0.",paste(rep("0",p.digits-1),collapse=""),"1") res <- cbind(res,pval) nc <- apply(res,2,function(x) max(nchar(x))-nchar(x)) res2 <- c() for (i in seq(nrow(res))) { row <- paste0(if(res[i,1]>=0) " " else "", res[i,1], paste(rep(" ",nc[i,1]),collapse=""), if (res[i,1]<0) " ", if (se) paste0(" (", res[i,2], ")", paste(rep(" ",nc[i,2]), collapse="")), if (ci) paste0(" [", res[i,4], ";", res[i,5], "]", paste(rep(" ",nc[i,4]+nc[i,5]),collapse="")), if (pvalue) paste0(" ", res[i,6])) res2 <- rbind(res2," "=row) } names(res2) <- names(coef(x)) if (!missing(fun)) { res2 <- c(res2,fun(x)) } res2 } ##' Report estimates across different models ##' ##' @title Report estimates across different models ##' @param x list of model objects ##' @param ... additional arguments to lower-level functions ##' @author Klaus K. Holst ##' @examples ##' data(serotonin) ##' m1 <- lm(cau ~ age*gene1 + age*gene2,data=serotonin) ##' m2 <- lm(cau ~ age + gene1,data=serotonin) ##' m3 <- lm(cau ~ age*gene2,data=serotonin) ##' ##' Combine(list(A=m1,B=m2,C=m3),fun=function(x) ##' c("_____"="",R2=" "%++%format(summary(x)$r.squared,digits=2))) ##' @export Combine <- function(x,...) { ll <- lapply(x,excoef,...) nn <- lapply(ll,names) n0 <- unique(unlist(nn,use.names=FALSE)) res <- matrix(NA,ncol=length(ll),nrow=length(n0)) colnames(res) <- seq(length(ll)) rownames(res) <- n0 for (i in seq(length(ll))) { res[match(names(ll[[i]]),n0),i] <- ll[[i]] } colnames(res) <- names(ll) class(res) <- c("Combine","matrix") return(res) } ##' @export print.Combine <- function(x,...) { print(as.table(x),...) } lava/R/wrapvec.R0000644000176200001440000000054513520655354013204 0ustar liggesusers##' Wrap vector ##' ##' Wrap vector ##' @param x Vector or integer ##' @param delta Shift ##' @param ... Additional parameters ##' @export ##' @examples ##' wrapvec(5,2) wrapvec <- function(x,delta=0L,...) { if (length(x)==1 && floor(x)==x && x>0) { x <- seq(x) } if (delta==0L) return(x) x[(seq_along(x)+delta-1L)%%length(x)+1L] } lava/R/napass0.R0000644000176200001440000000251213520655354013076 0ustar liggesusers impute0 <- function(object,rows,idx,na.action=na.omit,value,...) { if (missing(rows) && missing(idx)) { df <- na.action(object,...) rows <- attr(df,"na.action") } if (!missing(idx)) { obs1 <- setdiff(seq(length(object)),idx)[1] } else { obs1 <- setdiff(seq(NROW(object)),rows)[1] } if (missing(value)) { fobs <- object[obs1] if (is.logical(fobs)) value <- FALSE else if (is.character(fobs)) value <- fobs else if (is.factor(fobs)) value <- levels(fobs)[1] else value <- 0 } if (!missing(idx)) { object[idx] <- value return(object) } if (is.matrix(object)) { object[rows,] <- value } else { object[rows] <- value } return(object) } ##' @export na.pass0 <- function(object,all=TRUE,na.action=na.omit, ...) { ## Fill in "zeros" in the design matrix where we have missing data df <- na.action(object,...) idx <- attr(df,"na.action") if (is.matrix(object) || is.vector(object)) { object <- impute0(object,rows=idx,...) } else { for (i in seq_len(NCOL(object))) { object[[i]] <- impute0(object[[i]],rows=idx,...) } } if (!is.null(idx)) return(structure(object,na.action=structure(idx,class="pass0"))) return(object) } lava/R/zgetsas.R0000644000176200001440000000216713520655354013217 0ustar liggesusers##' Run SAS code like in the following: ##' ##' ODS CSVALL BODY="myest.csv"; ##' proc nlmixed data=aj qpoints=2 dampstep=0.5; ##' ... ##' run; ##' ODS CSVALL Close; ##' ##' and read results into R with: ##' ##' \code{getsas("myest.csv","Parameter Estimates")} ##' ##' @title Read SAS output ##' @param infile file (csv file generated by ODS) ##' @param entry Name of entry to capture ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export ##' @seealso getMplus getSAS <- function(infile,entry="Parameter Estimates",...) { con <- file(infile, blocking = FALSE) inp <- readLines(con) close(con) linestart <- 1; lineend <- length(inp) idx <- sapply(inp,function(x) length(grep(entry, x))>0) if (sum(idx)==1) { linestart <- which(idx) for (i in seq(linestart,length(inp))) { lineend <- i-1 if (inp[i]=="") break; } } else { stop("No match or duplicate entries!") } subinp <- inp[(linestart+1):(lineend)] con <- textConnection(subinp) res <- read.csv(con,header=TRUE) close(con) return(res) } lava/R/baptize.R0000644000176200001440000000312513520655354013170 0ustar liggesusers##' Generic method for labeling elements of an object ##' ##' @title Label elements of object ##' @param x Object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @export `baptize` <- function(x,...) UseMethod("baptize") ##' @export baptize.lvm <- function(x,labels,overwrite=FALSE,unique=FALSE,...) { p <- describecoef(x, mean=TRUE) sym <- lava.options()$symbols MeanFix <- intfix(x) RegFix <- regfix(x) CovFix <- covfix(x) count <- 0 curlab <- parlabels(x) coef(x) for (i in seq_along(p)) { p0 <- p[[i]] if (attributes(p0)$type=="reg") { curfix <- RegFix$values[p0[2],p0[1]] curlab <- RegFix$labels[p0[2],p0[1]] if (all(is.na(c(curfix,curlab))) | overwrite) { count <- count+1 st <- ifelse(missing(labels),paste(p0[1],p0[2],sep=sym[1]),labels[count]) regfix(x,from=p0[2],to=p0[1]) <- st } } else if (attributes(p0)$type=="cov") { curfix <- CovFix$values[p0[2],p0[1]] curlab <- CovFix$labels[p0[2],p0[1]] if (all(is.na(c(curfix,curlab))) | overwrite) { count <- count+1 st <- ifelse(missing(labels),paste(p0[1],p0[2],sep=sym[2]),labels[count]) covfix(x,p0[2],p0[1],exo=FALSE) <- st } } else { ## Mean parameter curfix <- MeanFix[[p0]] if (length(curfix)>0) if (is.na(curfix) | overwrite) { count <- count+1 st <- ifelse(missing(labels),p0,labels[count]) intfix(x,p0) <- st } } } if (index(x)$npar.ex>0) { x$exfix[is.na(x$exfix)] <- names(x$exfix)[is.na(x$exfix)] index(x) <- reindex(x) } return(x) } lava/R/vars.R0000644000176200001440000000653113520655354012511 0ustar liggesusers##' Extract variable names from latent variable model ##' ##' Extract exogenous variables (predictors), endogenous variables (outcomes), ##' latent variables (random effects), manifest (observed) variables from a ##' \code{lvm} object. ##' ##' \code{vars} returns all variables of the \code{lvm}-object including ##' manifest and latent variables. Similarily \code{manifest} and \code{latent} ##' returns the observered resp. latent variables of the model. ##' \code{exogenous} returns all manifest variables without parents, e.g. ##' covariates in the model, however the argument \code{latent=TRUE} can be used ##' to also include latent variables without parents in the result. Pr. default ##' \code{lava} will not include the parameters of the exogenous variables in ##' the optimisation routine during estimation (likelihood of the remaining ##' observered variables conditional on the covariates), however this behaviour ##' can be altered via the assignment function \code{exogenous<-} telling ##' \code{lava} which subset of (valid) variables to condition on. Finally ##' \code{latent} returns a vector with the names of the latent variables in ##' \code{x}. The assigment function \code{latent<-} can be used to change the ##' latent status of variables in the model. ##' ##' @aliases vars vars.lvm vars.lvmfit latent latent<- latent.lvm latent<-.lvm ##' latent.lvmfit latent.multigroup manifest manifest.lvm manifest.lvmfit ##' manifest.multigroup exogenous exogenous<- exogenous.lvm exogenous<-.lvm ##' exogenous.lvmfit exogenous.multigroup endogenous endogenous.lvm ##' endogenous.lvmfit endogenous.multigroup ##' @usage ##' ##' vars(x,...) ##' ##' endogenous(x,...) ##' ##' exogenous(x,...) ##' ##' manifest(x,...) ##' ##' latent(x,...) ##' ##' \method{exogenous}{lvm}(x, xfree = TRUE,...) <- value ##' ##' \method{exogenous}{lvm}(x,variable,latent=FALSE,index=TRUE,...) ##' ##' \method{latent}{lvm}(x,clear=FALSE,...) <- value ##' ##' @param x \code{lvm}-object ##' @param variable list of variables to alter ##' @param latent Logical defining whether latent variables without parents ##' should be included in the result ##' @param index For internal use only ##' @param clear Logical indicating whether to add or remove latent variable ##' status ##' @param xfree For internal use only ##' @param value Formula or character vector of variable names. ##' @param \dots Additional arguments to be passed to the low level functions ##' @return Vector of variable names. ##' @author Klaus K. Holst ##' @seealso \code{\link{endogenous}}, \code{\link{manifest}}, ##' \code{\link{latent}}, \code{\link{exogenous}}, \code{\link{vars}} ##' @keywords models regression ##' @examples ##' ##' g <- lvm(eta1 ~ x1+x2) ##' regression(g) <- c(y1,y2,y3) ~ eta1 ##' latent(g) <- ~eta1 ##' endogenous(g) ##' exogenous(g) ##' identical(latent(g), setdiff(vars(g),manifest(g))) ##' ##' @export `vars` <- function(x,...) UseMethod("vars") ##' @export `vars.graph` <- function(x,...) { graph::nodes(x) } ##' @export `vars.lvm` <- function(x,...) { colnames(x$M) } ##' @export `vars.lvmfit` <- function(x,...) { vars(Model(x),...) } ##' @export vars.list <- function(x,...) { varlist <- c() for (i in seq_along(x)) { varlist <- c(varlist, vars(x[[i]])) } varlist <- unique(varlist) return(varlist) } ##' @export `vars.lm` <- function(x,...) { c(endogenous(x),exogenous(x)) } lava/R/vcov.R0000644000176200001440000000074013520655354012507 0ustar liggesusers##' @export vcov.lvmfit <- function(object,...) { res <- object$vcov if (inherits(object,"lvm.missing")) { resnames <- names(coef(object)) } else { resnames <- coef(Model(object),fix=FALSE, mean=object$control$meanstructure) } colnames(res) <- rownames(res) <- resnames return(res) } ##' @export vcov.multigroupfit <- function(object,...) { res <- object$vcov colnames(res) <- rownames(res) <- object$model$name return(res) } lava/R/binomial.rrw.R0000644000176200001440000000767613520655354014154 0ustar liggesusers##' Define constant risk difference or relative risk association for binary exposure ##' ##' Set up model as defined in Richardson, Robins and Wang (2017). ##' @param x model ##' @param response response variable (character or formula) ##' @param exposure exposure variable (character or formula) ##' @param target.model variable defining the linear predictor for the target model ##' @param nuisance.model variable defining the linear predictor for the nuisance model ##' @param exposure.model model for exposure (default binomial logit link) ##' @param ... additional arguments to lower level functions ##' @aliases binomial.rd binomial.rr ##' @export binomial.rd <- function(x,response,exposure, target.model,nuisance.model, exposure.model=binomial.lvm(),...) { binomial.rrw(x,response,exposure,target.model,nuisance.model,exposure.model,type="rd",...) } ##' @export binomial.rr <- function(x,response,exposure, target.model,nuisance.model, exposure.model=binomial.lvm(),...) { binomial.rrw(x,response,exposure,target.model,nuisance.model,exposure.model,type="rr",...) } binomial.rrw <- function(x, response, exposure, target.model, nuisance.model, exposure.model=binomial.lvm(), type="rd", ...) { if (inherits(response,"formula")) { vars <- all.vars(response) if (length(vars)==1L) { response <- vars } else { yf <- getoutcome(response, sep="|") exposure <- attr(yf,"x")[[1]] if (length(attr(yf,"x"))>1) target.model <- attr(yf,"x")[[2]] if (length(attr(yf,"x"))>2) nuisance.model <- attr(yf,"x")[[3]] response <- yf[1] } } if (inherits(exposure,"formula")) exposure <- all.vars(exposure) if (inherits(target.model,"formula")) target.model <- all.vars(target.model) if (inherits(nuisance.model,"formula")) nuisance.model <- all.vars(nuisance.model) if (type=="rd") { val <- list(list(input=c(exposure,target.model,nuisance.model), fun=simulate.binomial.rd, type="Binomial regression (exposure | risk-difference | odds-product)")) } else { val <- list(list(input=c(exposure,target.model,nuisance.model), fun=simulate.binomial.rr, type="Binomial regression (exposure | relative-risk | odds-product)")) } if (is.null(distribution(x)[[exposure]])) distribution(x, exposure) <- binomial.lvm(link="logit") covariance(x,c(target.model,nuisance.model)) <- 0 distribution(x,exposure) <- exposure.model names(val) <- response x$attributes$multiple.inputs <- val return(x) } simulate.binomial.rd <- function(x,data,inputs,...) { exposure <- data[,inputs[1]] lp1 <- data[,inputs[2]] lp2 <- data[,inputs[3]] rd <- tanh(lp1) op <- exp(lp2) pp <- RD_OP(rd,op) pp <- pp[,1]*(1-exposure) + pp[,2]*exposure y <- rbinom(NROW(data), 1, pp) } simulate.binomial.rr <- function(x,data,inputs,...) { exposure <- data[,inputs[1]] lp1 <- data[,inputs[2]] lp2 <- data[,inputs[3]] rr <- exp(lp1) op <- exp(lp2) pp <- RR_OP(rr,op) pp <- pp[,1]*(1-exposure) + pp[,2]*exposure y <- rbinom(NROW(data), 1, pp) } ##' @export Identical <- function(x,y=1,tolerance = .Machine$double.eps^0.5) { Mod(x-y)0) p0[op1] = 0.5 * (1 - rd[op1]) p1 <- p0 + rd cbind(p0,p1) } RR_OP <- function(rr,op) { b <- op*(1+rr) a <- rr*(1-op) p0 <- (-b + sqrt(b^2 + 4*a*op))/(2*a) op1 <- which(sapply(op,function(x) Identical(x,1))) if (length(op1)>0) p0[op1] = 1/(1+rr) p1 <- p0*rr cbind(p0,p1) } lava/R/mvnmix.R0000644000176200001440000002316613520655354013057 0ustar liggesuserstoTheta <- function(mu,Sigma,p) { theta <- c(as.vector(t(mu)), as.vector(t(Sigma)), p[-nrow(mu)]) return(theta) } toPar <- function(theta, D, k) { mus <- Sigmas <- c() for (j in 1:k) { muj.idx <- (1+((j-1)*D)):(j*D) mus <- rbind(mus, theta[muj.idx]) Sigmaj.start <- k*D+1 + ((j-1)*D^2) Sigmaj.idx <- Sigmaj.start + 1:D^2-1 Sigmas <- rbind(Sigmas, theta[Sigmaj.idx]) }; ps <- tail(theta,k-1); ps <- c(ps,1-sum(ps)) return(list(mu=mus, Sigma=Sigmas, p=ps)) } getMeanVar <- function(object,k,iter,...) { if (missing(iter)) pp <- with(object,toPar(pars,D,k)) else pp <- with(object,toPar(thetas[iter,],D,k)) res <- list() for (i in 1:object$k) { mu <- pp$mu[i,] V <- matrix(pp$Sigma[i,],ncol=object$D); res <- c(res, list(list(mean=mu, var=V))) } if (missing(k)) return(res) else return(res[[k]]) } #' Estimate mixture latent variable model #' #' Estimate mixture latent variable model #' #' Estimate parameters in a mixture of latent variable models via the EM #' algorithm. #' #' @param data \code{data.frame} #' @param k Number of mixture components #' @param theta Optional starting values #' @param steps Maximum number of iterations #' @param tol Convergence tolerance of EM algorithm #' @param lambda Regularisation parameter. Added to diagonal of covariance matrix (to avoid #' singularities) #' @param mu Initial centres (if unspecified random centres will be chosen) #' @param silent Turn on/off output messages #' @param extra Extra debug information #' @param n.start Number of restarts #' @param init Function to choose initial centres #' @param ... Additional arguments parsed to lower-level functions #' @return A \code{mixture} object #' @author Klaus K. Holst #' @seealso \code{mixture} #' @keywords models regression #' @examples #' #' data(faithful) #' set.seed(1) #' M1 <- mvnmix(faithful[,"waiting",drop=FALSE],k=2) #' M2 <- mvnmix(faithful,k=2) #' if (interactive()) { #' par(mfrow=c(2,1)) #' plot(M1,col=c("orange","blue"),ylim=c(0,0.05)) #' plot(M2,col=c("orange","blue")) #' } #' #' @export mvnmix mvnmix <- function(data, k=2, theta, steps=500, tol=1e-16, lambda=0, mu=NULL, silent=TRUE, extra=FALSE, n.start=1, init="kmpp", ... ) { if (k<2) stop("Only one cluster") ## theta = (mu1, ..., muk, Sigma1, ..., Sigmak, p1, ..., p[k-1]) if (is.vector(data)) data <- matrix(data,ncol=1) if (is.data.frame(data)) data <- as.matrix(data) i <- 0 E <- tol D <- ncol(data) yunique <- unique(data) if (n.start>1) extra <- FALSE logllmax <- -Inf for (ii in seq(n.start)) { if (ii>1) mu <- NULL if (missing(theta)) { mus <- c() if (!is.null(mu)) { mus <- mu } else { if (!exists(init)) { ## Random select centres idx <- sample(NROW(data),k) } else { idx <- do.call(init, list(data, k)) } mus <- unlist(lapply(idx, function(i) cbind(data)[i,,drop=TRUE])) } Sigmas <- rep(as.vector(cov(data)),k) ps <- rep(1/k,k-1) theta <- c(mus,Sigmas,ps) } theta0 <- theta if (!silent) cat(i,":\t", paste(formatC(theta0),collapse=" "),"\n") thetas <- members <- c() while ((i=tol)) { if (extra) thetas <- rbind(thetas, theta) pp <- toPar(theta,D,k) mus <- pp$mu; Sigmas <- pp$Sigma; ps <- pp$p ## E(xpectation step) lphis <- c() for (j in 1:k) { C <- matrix(Sigmas[j,],ncol=D); diag(C) <- diag(C)+lambda ## Assure C is not singular lphis <- cbind(lphis, lava::dmvn0(data,mus[j,],C,log=TRUE)) } gammas <- c() ## denom <- t(ps%*%t(phis)) for (j in 1:k) { gammas <- cbind(gammas, log(ps[j]) + lphis[,j]) } ## llmax <- apply(gammas,1,max) ## for (j in 1:k) { ## gammas[,j] <- gammas[,j]-llmax ## } gammas <- exp(gammas) # denom <- rowSums(gammas) for (j in 1:k) gammas[,j] <- gammas[,j]/denom # Posterior sqrtgammas <- sqrt(gammas) ## M(aximization step) mus.new <- c() Sigmas.new <- c() for (j in 1:k) { mu.new <- colSums(gammas[,j]*data)/sum(gammas[,j]) mus.new <- rbind(mus.new, mu.new) wcy <- sqrtgammas[,j]*t(t(data)-mus.new[j,]) Sigma.new <- t(wcy)%*%wcy/sum(gammas[,j]) Sigmas.new <- rbind(Sigmas.new, as.vector(Sigma.new)) }; ps.new <- colMeans(gammas) theta.old <- theta if (extra) members <- cbind(members, apply(gammas,1,function(x) order(x,decreasing=TRUE)[1])) theta <- toTheta(mus.new,Sigmas.new,ps.new) E <- sum((theta-theta.old)^2) i <- i+1 iter <- i if (!silent) cat(i,":\t", paste(formatC(theta),collapse=" "), ",\t\te=",formatC(E), "\n",sep="") } if (n.start>1) { logll <- sum(log(denom)) if (logll>logllmax) { logllmax <- logll theta.keep <- theta gammas.keep <- gammas E.keep <- E } } } if (n.start>1) { theta <- theta.keep gammas <- gammas.keep E <- E.keep } myvars <- colnames(data) if (is.null(myvars)) myvars <- colnames(data) <- paste("y",1:NCOL(data),sep="") data <- as.data.frame(data) m <- lvm(myvars,silent=TRUE); m <- covariance(m,myvars,pairwise=TRUE) models <- datas <- c() for (i in 1:k) { models <- c(models, list(m)) datas <- c(datas, list(data)) } membership <- apply(gammas,1,function(x) order(x,decreasing=TRUE)[1]) res <- list(pars=theta, thetas=thetas , gammas=gammas, member=membership, members=members, k=k, D=D, data=data, E=E, prob=rbind(colMeans(gammas)), iter=iter, models=models, multigroup=multigroup(models,datas) ) class(res) <- c("mvn.mixture","lvm.mixture") parpos <- c() npar1 <- D+D*(D-1)/2 for (i in 1:k) parpos <- c(parpos, list(c(seq_len(D)+(i-1)*D, k*D + seq_len(npar1)+ (i-1)*(npar1)))) theta <- c(unlist(lapply(getMeanVar(res),function(x) x$mean)), unlist(lapply(getMeanVar(res),function(x) c(diag(x$var),unlist(x$var[upper.tri(x$var)]))))) res$theta <- rbind(theta) res$parpos <- parpos res$opt <- list(estimate=theta) res$vcov <- solve(information(res,type="E")) return(res) } ##' @export print.mvn.mixture <- function(x,...) { par <- toPar(x$pars,x$D,x$k) space <- paste(rep(" ",12),collapse="") for (i in 1:x$k) { cat("Cluster ",i," (p=",formatC(par$p[i]),"):\n",sep="") cat(rep("-",50),"\n",sep="") cat("\tcenter = \n ",space,paste(formatC(par$mu[i,]),collapse=" "),sep="") cat("\n\tvariance = \t"); V <- matrix(formatC(par$Sigma[i,],flag=" "),ncol=x$D); colnames(V) <- rep("",x$D); rownames(V) <- rep(space,x$D) print(V, quote=FALSE) cat("\n") } invisible(par) } ##' @export plot.mvn.mixture <- function(x, label=2,iter,col,alpha=0.5,nonpar=TRUE,...) { opts <- list(...) ## cols <- opts$col; if(is.null(cols)) cols <- 1:gmfit$k if (missing(col)) col <- 1:x$k lwd <- opts$lwd; if (is.null(lwd)) lwd <- 2 cex <- opts$cex; if(is.null(cex)) cex <- 0.9 y <- as.matrix(x$data) if (is.vector(y)) y <- matrix(y,ncol=1) pp <- getMeanVar(x,iter=iter) D <- ncol(y) pi <- colSums(x$gammas)/nrow(x$gammas) if (D==1) { if (nonpar) plot(density(as.vector(y)), main="", ...) else plot(density(y), main="", type="n", col="lightgray", ...) if (!is.null(label)) { for (i in 1:x$k) { rug(y[x$member==i], col=col[i]) } } else rug(y) cc <- par("usr") { mycurve <- function(xx) { a <- 0; for (i in 1:(x$k)) a <- a+pi[i]*dnorm(xx,pp[[i]]$mean,sqrt(pp[[i]]$var[1])) a } curve(mycurve, from=cc[1], to=cc[2], add=TRUE, lwd=lwd,...) } } if (D==2) { if (!requireNamespace("ellipse")) stop("ellipse required") plot(y, type="n", ...) for (i in 1:x$k) { C1 <- with(pp[[i]], ellipse::ellipse(var, centre=mean)) lines(C1, col=col[i], lwd=lwd) } if (!is.null(label)) { for (i in 1:x$k) { if (label==1 | missing(iter)) { pot <- y[which(x$member==i),] } else { pot <- y[which(x$members[,iter]==i),] } points(pot, cex=cex, pch=16, col=Col(col[i],alpha)) } } else points(y, cex=cex) } if (D==3) { if (!requireNamespace("rgl")) stop("rgl required") rgl::plot3d(y, type="n", box=FALSE) for (i in 1:x$k) { pot <- y[which(x$member==i),] rgl::plot3d(pot, type="s", radius=0.1, col=col[i], add=TRUE) ee <- rgl::ellipse3d(pp[[i]]$var,centre=pp[[i]]$mean) rgl::plot3d(ee, col=col[i], alpha=alpha, add = TRUE) } } } ##' @export sim.mvn.mixture <- function(x,n,...) { pars <- getMeanVar(x) K <- length(pars) p <- tail(coef(x),K-1); p <- c(p,1-sum(p)) ng <- as.vector(rmultinom(1,n,p)) res <- c() for (i in seq(K)) { res <- rbind(res, mets::rmvn(ng[i],pars[[i]]$mean,pars[[i]]$var)) } return(res) } lava/R/deriv.R0000644000176200001440000001712713520655354012652 0ustar liggesusers##' @export deriv.function <- function(expr, parameter_, ..., parameter_.increment =.Machine$double.xmin) { p <- length(parameter_) f0 <- expr(parameter_) z0 <- numeric(p) res <- matrix(NA,nrow=length(f0),ncol=p) for (i in seq(p)) { z <- z0; z[i] <- parameter_.increment*1i res[,i] <- Im(expr(parameter_+z,...))/parameter_.increment } res } ##' @export deriv.lvm <- function(expr, p, mom, conditional=FALSE, meanpar=TRUE, mu=NULL, S=NULL, second=FALSE, zeroones=FALSE, all=!missing(mom),...) { if (missing(mom) & !missing(p)) { mom <- modelVar(expr,p,conditional=conditional,...) all <- TRUE if (mom$npar==length(p)) meanpar <- NULL } ii <- index(expr) npar.total <- npar <- ii$npar; npar.reg <- ii$npar.reg npar.mean <- ifelse(is.null(meanpar),0,ii$npar.mean) npar.ex <- ii$npar.ex meanpar <- seq_len(npar.mean) nn <- expr$parpos if (is.null(nn)) { nn <- matrices2(expr, seq_len(npar+npar.mean+npar.ex)); nn$A[ii$M0!=1] <- 0 nn$P[ii$P0!=1] <- 0 nn$v[ii$v0!=1] <- 0 nn$e[ii$e0!=1] <- 0 } regr.idx <- seq_len(npar.reg) + npar.mean var.idx <- seq_len(npar-npar.reg) + (npar.mean + npar.reg) mean.idx <- seq_len(npar.mean) npar.total <- npar+length(mean.idx) epar.idx <- seq_len(npar.ex)+npar.total npar.total <- npar.total+length(epar.idx) if (zeroones | is.null(ii$dA)) { dimA <- length(ii$A) if (ii$sparse) { ## Not used yet... if (!requireNamespace("Matrix",quietly=TRUE)) stop("package Matrix not available") dP <- dA <- Matrix::Matrix(0, nrow=dimA, ncol=npar.total) } else { dP <- dA <- matrix(0, nrow=dimA, ncol=npar.total) } if (npar.reg>0) { dA[,regr.idx] <- sapply(regr.idx, function(i) izero(which(t(nn$A)==i),nrow(dA)) ) } if (npar>npar.reg) { dP[,var.idx] <- sapply(var.idx, function(i) izero(which(nn$P==i),nrow(dA)) ) } res <- list(dA=dA, dP=dP) { if (ii$sparse) { dv <- Matrix::Matrix(0, nrow=length(expr$mean), ncol=npar.total) } else { dv <- matrix(0, nrow=length(expr$mean), ncol=npar.total) } if (!is.null(meanpar) & npar.mean>0) dv[,mean.idx] <- sapply(mean.idx, function(i) izero(which(nn$v==i),length(expr$mean)) ) res <- c(res, list(dv=dv)) } } else { res <- with(ii, list(dA=dA, dP=dP, dv=dv)) for (pp in nn$parval) { res$dP[attributes(pp)$cov.idx,pp] <- 1 res$dv[attributes(pp)$m.idx,pp] <- 1 } } if (!all) return(res) ## Non-linear constraints: cname <- constrainpar <- c() if (!missing(p) && length(index(expr)$constrain.par)>0) { for (pp in index(expr)$constrain.par) { myc <- constrain(expr)[[pp]] if (!is.null(myc)) { parval <- mom$parval vals <- c(parval,constrainpar,mom$v,mom$e)[attributes(myc)$args] fval <- try(myc(unlist(vals)),silent=TRUE) fmat <- inherits(fval,"try-error") if (fmat) fval <- myc(rbind(unlist(vals))) if (!is.null(attributes(fval)$grad)) { if (fmat) { Gr <- attributes(fval)$grad(rbind(unlist(vals))) } else { Gr <- attributes(fval)$grad(unlist(vals)) } } else { if (fmat) { Gr <- as.numeric(numDeriv::jacobian(myc, rbind(unlist(vals)))) } else { Gr <- as.numeric(numDeriv::jacobian(myc, unlist(vals))) } } mat.idx <- mom$constrain.idx[[pp]] cname <- c(cname,pp) attributes(fval)$grad <- Gr attributes(fval)$vals <- vals constrainpar <- c(constrainpar,list(fval)); names(constrainpar) <- cname for (jj in seq_len(length(vals))) { allpars <- c(nn$A[attributes(vals[[jj]])$reg.idx[1]], nn$P[attributes(vals[[jj]])$cov.idx[1]], nn$v[attributes(vals[[jj]])$m.idx[1]], nn$e[attributes(vals[[jj]])$e.idx[1]] ) if (!is.null(mat.idx$cov.idx)) res$dP[mat.idx$cov.idx,allpars] <- Gr[jj] if (!is.null(mat.idx$reg.idx)) res$dA[mat.idx$reg.tidx,allpars] <- Gr[jj] if (!is.null(res$dv) & !is.null(mat.idx$m.idx)) res$dv[mat.idx$m.idx,allpars] <- Gr[jj] } } } } if (is.null(ii$Kkk)) { nobs <- nrow(mom$J) ii$Ik <- diag(nrow=nobs) ii$Im <- diag(nrow=ncol(ii$A)) ## ii$Kkk <- commutation(nobs,sparse=FALSE) } K <- nobs ## if (N>10) { if (!lava.options()$devel) { dG <- with(mom, kronprod(t(IAi),G,res$dA)) G3 <- with(mom, kronprod(G,G,res$dP)) GP <- with(mom,G%*%P) G1 <- with(mom, kronprod(GP,ii$Ik,dG)) G2 <- G1[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] dS <- G1+G2+G3 } else { dG <- with(mom, kronprod(t(IAi),G,res$dA[,ii$parBelongsTo$reg,drop=FALSE])) G3 <- with(mom, kronprod(G,G,res$dP[,ii$parBelongsTo$cov,drop=FALSE])) GP <- with(mom,G%*%P) G1 <- with(mom, kronprod(GP,ii$Ik,dG)) G2 <- G1[as.vector(matrix(seq_len(K^2),K,byrow=TRUE)),] dS <- matrix(0,nrow=nrow(G1),ncol=ncol(res$dA)) dS[,ii$parBelongsTo$reg] <- G1+G2; dS[,ii$parBelongsTo$cov] <- G3 } res <- c(res, list(dG=dG, dS=dS)) if (!is.null(mom$v)) { if (lava.options()$devel) { dG <- with(mom, kronprod(t(IAi),G,res$dA[,with(ii$parBelongsTo,c(mean,reg)),drop=FALSE])) } dxi <- with(mom, kronprod(rbind(v),dG)) ## with(mom, kronprod(rbind(v),ii$Ik,dG)) if (is.matrix(mom$v) && nrow(mom$v)>1) { ## reorder k <- nrow(dxi)/nrow(mom$v) idx0 <- seq(nrow(mom$v))*k-k+1 idx <- unlist(lapply(1:k,function(x) idx0+x-1)) dxi <- dxi[idx,,drop=FALSE] } if (!is.null(res$dv)) { if (!(lava.options()$devel)) { if (is.matrix(mom$v) && nrow(mom$v)>1) { dxi <- dxi + (mom$G%*%res$dv)%x%cbind(rep(1,nrow(mom$v))) } else { dxi <- dxi+ mom$G%*%res$dv } } else { dxi <- dxi+ mom$G%*%res$dv[,with(ii$parBelongsTo,c(mean,reg))] } } res <- c(res, list(dxi=dxi)) if (!is.null(mu)) { muv <- rbind(mu-mom$xi) dT <- suppressMessages(-t(ii$Ik%x%muv + muv%x%ii$Ik) %*% dxi) res <- c(res, list(dT=dT)) } } if (second) { k <- nrow(ii$A) K <- ii$Kkk ## commutation(k,k) I <- ii$Ik ## diag(k) I2 <- diag(nrow=k*k) d2S1 <- t( (I %x% K %x% I) %*% ( ( I2 %x% as.vector(mom$G) )%*% dG + ( as.vector(mom$P) %x% I2 )%*% (dP) ) %*% t(dG) ) d2S2 <- K%*%d2S1 d2S3 <- t( (I %x% K %x% I) %*% ( ( I2 %x% as.vector(mom$G) )%*% dG + ( as.vector(mom$G) %x% I2 )%*% dG ) %*% t(dP) ) vec.d2S <- d2S1+d2S2+d2S3 res <- c(res, list(d2vecS=vec.d2S)) } return(res) } lava/R/plot.sim.R0000644000176200001440000003433513520655354013306 0ustar liggesusers##' @export ##' @export density.sim density.sim <- function(x,...,plot.type="single") { plot.sim(x,...,scatter.plot=FALSE,plot.type=plot.type) } ##' Plot method for simulation 'sim' objects ##' ##' Density and scatter plots ##' @examples ##' n <- 1000 ##' val <- cbind(est1=rnorm(n,sd=1),est2=rnorm(n,sd=0.2),est3=rnorm(n,1,sd=0.5), ##' sd1=runif(n,0.8,1.2),sd2=runif(n,0.1,0.3),sd3=runif(n,0.25,0.75)) ##' ##' plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE,scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1,3),true=c(0,1),se=c(4,6),xlim=c(-3,3), ##' scatter.ylim=c(-3,3),scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1,2),true=c(0,0),se=c(4,5),equal=TRUE, ##' plot.type="single",scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1),se=c(4,5,6),plot.type="single",scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1,2,3),equal=TRUE,scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1,2,3),equal=TRUE,byrow=TRUE,scatter.plot=TRUE) ##' plot.sim(val,estimate=c(1,2,3),plot.type="single",scatter.plot=TRUE) ##' plot.sim(val,estimate=1,se=c(3,4,5),plot.type="single",scatter.plot=TRUE) ##' ##' density.sim(val,estimate=c(1,2,3),density=c(0,10,10),angle=c(0,45,-45)) ##' @aliases density.sim plot.sim ##' @export ##' @export plot.sim ##' @param x sim object ##' @param estimate columns with estimates ##' @param se columns with standard error estimates ##' @param true (optional) vector of true parameter values ##' @param names (optional) names of estimates ##' @param auto.layout Auto layout (default TRUE) ##' @param byrow Add new plots to layout by row ##' @param type plot type ##' @param ask if TRUE user is asked for input, before a new figure is drawn ##' @param col colour (for each estimate) ##' @param pch plot symbol ##' @param cex point size ##' @param lty line type ##' @param lwd line width ##' @param legend legend ##' @param legendpos legend position ##' @param cex.legend size of legend text ##' @param plot.type 'single' or 'multiple' (default) ##' @param polygon if TRUE fill the density estimates with colour ##' @param density if non-zero add shading lines to polygon ##' @param angle shading lines angle of polygon ##' @param cex.axis Font size on axis ##' @param alpha Semi-transparent level (1: non-transparent, 0: full) ##' @param main Main title ##' @param cex.main Size of title font ##' @param equal Same x-axis and y-axis for all plots ##' @param delta Controls the amount of space around axis limits ##' @param ylim y-axis limits ##' @param xlim x-axis limits ##' @param ylab y axis label ##' @param xlab x axis label ##' @param rug if TRUE add rug representation of data to x-axis ##' @param rug.alpha rug semi-transparency level ##' @param line.col line colour (running mean, only for scatter plots) ##' @param line.lwd line width (running mean, only for scatter plots) ##' @param line.lty line type (running mean, only for scatter plots) ##' @param line.alpha line transparency ##' @param scatter.ylab y label for density plots ##' @param scatter.ylim y-axis limits for density plots ##' @param scatter.xlim x-axis limits for density plots ##' @param scatter.alpha semi-transparency of scatter plot ##' @param scatter.col scatter plot colour ##' @param border border colour of density estimates ##' @param true.lty true parameter estimate line type ##' @param true.col true parameter colour ##' @param true.lwd true parameter line width ##' @param density.plot if TRUE add density plot ##' @param scatter.plot if TRUE add scatter plot ##' @param running.mean if TRUE add running average estimate to scatter plot ##' @param ... additional arguments to lower level functions plot.sim <- function(x,estimate,se=NULL,true=NULL, names=NULL, auto.layout=TRUE, byrow=FALSE, type="p", ask=grDevices::dev.interactive(), col=c("gray60","orange","darkblue","seagreen","darkred"), pch=16,cex=0.5,lty=1,lwd=0.3, legend, legendpos="topleft", cex.legend=0.8, plot.type=c("multiple","single"), polygon=TRUE, density=0, angle=-45, cex.axis=0.8, alpha=0.2, main, cex.main=1, equal=FALSE, delta=1.15, ylim=NULL, xlim=NULL, ylab="", xlab="", rug=TRUE, rug.alpha=0.5, line.col=scatter.col, line.lwd=1, line.lty=1, line.alpha=1, scatter.ylab="Estimate", scatter.ylim=NULL, scatter.xlim=NULL, scatter.alpha=0.5, scatter.col=col, border=col, true.lty=2,true.col="gray70",true.lwd=1.2, density.plot=TRUE, scatter.plot=FALSE, running.mean=scatter.plot, ...) { if (missing(estimate)) { estimate <- seq(ncol(x)) } if (is.null(estimate)) { av <- apply(x[,drop=FALSE],2,function(z) cumsum(z)/seq(length(z))) graphics::matplot(x,type="p",pch=pch, cex=cex, col=col,...) graphics::matlines(av,type="l",col=col,lty=lty,...) if (!is.null(true)) abline(h=true,lty=true.lty,...) if (missing(legend)) legend <- colnames(x) if (!is.null(legend)) graphics::legend(legendpos,legend=legend,bg="white", col=scatter.col,lty=lty,pch=pch,...) return(invisible(NULL)) } if (is.character(estimate)) { estimate <- match(estimate,colnames(x)) } K <- length(estimate) est <- tru <- c() if (length(se)>0) { if (K==1 && !is.list(se)) se <- list(se) else se <- as.list(se) } else { est <- estimate; tru <- true } for (i in seq_along(estimate)) { est <- c(est,list(rep(estimate[i],length(se[[i]])))) if (!is.null(true)) tru <- c(tru,list(rep(true[i],length(se[[i]])))) } if (length(se)>0) { for (i in seq_along(se)) { if (is.character(se[[i]])) se[[i]] <- match(se[[i]],colnames(x)) } } ss <- summary.sim(x,estimate=unlist(est),se=unlist(se),true=unlist(tru),names=names) oldpar <- NULL on.exit({ par(oldpar) return(invisible(ss)) }) single <- tolower(plot.type[1])=="single" if (auto.layout) { nc <- (scatter.plot || running.mean) + density.plot nr <- min(6,K) if (single) nr <- 1 oma.multi = c(2, 0, 2, 0) mar.multi = c(1.5, 4.1, 1, 1) oldpar <- par(mar=mar.multi, oma=oma.multi, cex.axis=cex.axis,las=1, ask=FALSE) if (byrow) { par(mfrow=c(nr,nc)) } else { par(mfcol=c(nc,nr)) } } dys <- c() maxdy <- 0 if (density.plot) for (i in seq(K)) { ii <- estimate[i] y <- as.vector(x[,ii]) dy <- stats::density(y,...) dys <- c(dys,list(dy)) maxdy <- max(maxdy,dy$y) } if (equal || single) { if (is.null(scatter.ylim)) { rg <- range(x[,estimate]) rg <- rg+c(-1,1)*abs(diff(rg)*(delta-1)) scatter.ylim <- rep(list(rg),K) } if (density.plot) { if (is.null(ylim)) ylim <- rep(list(c(0,maxdy*delta)),K) if (is.null(xlim)) xlim <- scatter.ylim } } if (!is.null(ylim)) { if (!is.list(ylim)) ylim <- list(ylim) ylim <- rep(ylim,length.out=K) } ylab <- rep(ylab,length.out=K) if (!is.null(scatter.ylim)) { if (!is.list(scatter.ylim)) scatter.ylim <- list(scatter.ylim) scatter.ylim <- rep(scatter.ylim,length.out=K) } if (!is.null(xlim)) { if (!is.list(xlim)) xlim <- list(xlim) xlim <- rep(xlim,length.out=K) } if (missing(main)) { main <- NULL if (!missing(names)) main <- names else if (K>1 && !single) main <- colnames(ss) } if (!is.null(main)) main <- rep(main,length.out=K) if (missing(lty)) { lty <- rep(1,K) if (single || !polygon) { lty <- 1:20 } } my.scatter.sim <- function(i,add=FALSE,colors,...) { ii <- estimate[i] if (!missing(colors)) { scatter.col <- line.col <- true.col <- colors[1] } y <- as.vector(x[,ii]) args <- list(y,ylab=scatter.ylab[i],col=Col(scatter.col[1],scatter.alpha),cex=cex,pch=pch,type=type) if (!is.null(scatter.ylim)) args <- c(args,list(ylim=scatter.ylim[[i]])) if (scatter.plot) { if (!add) { do.call(graphics::plot,args) } else { do.call(graphics::points,args) } } if (running.mean) { lines(cumsum(y)/seq_along(y),col=line.col[1],lwd=line.lwd,lty=line.lty) if (!is.null(true)) abline(h=true[i],lty=true.lty,col=true.col[1],lwd=true.lwd) } } my.density.sim <- function(i,add=FALSE,colors, alphas=alpha, auto.legend=TRUE, densities=NULL, angles=angle, ...) { ii <- estimate[i] y <- as.vector(x[,ii]) if (!missing(colors)) { col <- border <- colors col <- true.col <- colors } if (density.plot) { dy <- stats::density(y) if (is.null(ylim)) { density.ylim0 <- c(0,max(dy$y)*delta) } else { density.ylim0 <- ylim[[i]] } if (is.null(xlim)) { density.xlim0 <- range(dy$x) } else { density.xlim0 <- xlim[[i]] } if (!add) graphics::plot(0,0,type="n",main="",ylab=ylab,xlab=xlab,ylim=density.ylim0,xlim=density.xlim0) if (polygon) { with(dy, graphics::polygon(c(x,rev(x)),c(y,rep(0,length(y))),col=Col(col[1],alpha=alphas[1]),border=NA,density=densities[1],angle=angles[1])) if (!is.null(border)) with(dy, lines(x,y,col=border[1],lty=lty[1],lwd=lwd[1])) } else { graphics::lines(dy,main="",lty=lty[1],col=col[1],lwd=lwd[1]) } if (rug) graphics::rug(y,col=Col(col[1],rug.alpha[1])) if (!is.null(main) && !(running.mean || scatter.plot)) { title(main[i],cex.main=cex.main) } if (!is.null(true)) { abline(v=true[i],lty=true.lty,col=true.col,lwd=true.lwd) } if (!is.null(se)) { se.pos <- match(se[[i]],unlist(se)) ns <- length(se.pos)+1 se.alpha <- rep(alphas,length.out=ns)[-1] se.border <- rep(border,length.out=ns)[-1] se.col <- rep(col,length.out=ns)[-1] se.lty <- rep(lty,length.out=ns)[-1] se.lwd <- rep(lwd,length.out=ns)[-1] xx <- dy$x for (j in seq_along(se.pos)) { if (polygon) { yy <- dnorm(xx,mean=ss["Mean",se.pos[j]],sd=ss["SE",se.pos[j]]) if (se.alpha[j]>0) graphics::polygon(c(xx,rev(xx)),c(yy,rep(0,length(yy))),col=Col(se.col[j],alpha=se.alpha[j]),border=NA,density=densities[j],angle=angles[j]) if (!is.null(border)) lines(xx,yy,col=se.border[j],lty=se.lty[j],lwd=se.lwd[j]) } else { graphics::curve(dnorm(x,mean=ss["Mean",se.pos[j]],sd=ss["SE",se.pos[j]]),lwd=se.lwd[j],lty=se.lty[j],col=se.col[j],add=TRUE) } } if (auto.legend) legend <- c("Kernel",colnames(ss)[se.pos]) if (!is.null(legend)) { if (polygon) { dcol <- c(col[1],se.col) fill <- Col(dcol,alpha) fill[which(alpha==0)] <- NA border[which(alpha==0)] <- NA dcol[which(alpha!=0)] <- NA graphics::legend(legendpos,legend, fill=fill, lty=1, col=dcol, border=border, cex=cex.legend) } else { graphics::legend(legendpos,legend, col=c(col[1],se.col), lty=c(lty[1],se.lty), lwd=c(lwd[1],se.lwd), cex=cex.legend) } } } } } if (single) { nk <- unlist(lapply(se,length)) col <- rep(col,length.out=K) for (i in seq(K)) { my.scatter.sim(i,add=(i>1),colors=col[i]) } if (!is.null(main) && !byrow) { title(main[1],cex.main=cex.main) } if (missing(legend)) legend <- colnames(x)[estimate] legendold <- legend legend <- NULL alpha <- rep(alpha,length.out=K) density <- rep(density,length.out=K) angle <- rep(angle,length.out=K) for (i in seq_len(K)) { alphas <- if (K==1L) alpha else alpha[i] densities <- density[i] if (!is.null(densities) && densities<1) densities <- NULL if (length(se)>0) alphas <- c(alphas,rep(0,nk[i])) my.density.sim(i,add=(i>1),colors=col[i],alphas=alphas, densities=densities, angles=angle[i], auto.legend=FALSE) } if (!is.null(legendold)) { legend <- rep(legendold,length.out=K) graphics::legend(legendpos,legend, fill=Col(col,alpha),border=col,cex=cex.legend) } } else { for (i in seq(K)) { my.scatter.sim(i) if (!is.null(main) && !byrow && scatter.plot) { title(main[i],cex.main=cex.main) } my.density.sim(i,auto.legend=missing(legend)) if (i==1 && ask) par(ask=ask) } } } lava/R/toformula.R0000644000176200001440000000157013520655354013544 0ustar liggesusers##' Converts strings to formula ##' ##' Converts a vector of predictors and a vector of responses (characters) i#nto ##' a formula expression. ##' ##' ##' @param y vector of predictors ##' @param x vector of responses ##' @return An object of class \code{formula} ##' @author Klaus K. Holst ##' @seealso \code{\link{as.formula}}, ##' @keywords models utilities ##' @examples ##' ##' toformula(c("age","gender"), "weight") ##' ##' @export toformula <- function (y = ".", x = ".") { xst <- x[1] xn <- length(x) if (xn > 1) for (i in 2:length(x)) { xst <- paste(xst, "+", x[i]) } yst <- y[1] yn <- length(y) if (yn > 1) { yst <- paste0("c(", yst) for (i in 2:length(y)) { yst <- paste0(yst, ", ", y[i]) } yst <- paste0(yst, ")") } ff <- paste(yst, "~", xst) return(as.formula(ff)) } lava/R/measurement.error.R0000644000176200001440000000550113520655354015207 0ustar liggesusers##' Two-stage (non-linear) measurement error ##' ##' Two-stage measurement error ##' @param model1 Stage 1 model ##' @param formula Formula specifying observed covariates in stage 2 model ##' @param data data.frame ##' @param predictfun Predictions to be used in stage 2 ##' @param id1 Optional id-vector of stage 1 ##' @param id2 Optional id-vector of stage 2 ##' @param ... Additional arguments to lower level functions ##' @seealso stack.estimate ##' @export ##' @examples ##' m <- lvm(c(y1,y2,y3)~u,c(y3,y4,y5)~v,u~~v,c(u,v)~x) ##' transform(m,u2~u) <- function(x) x^2 ##' transform(m,uv~u+v) <- prod ##' regression(m) <- z~u2+u+v+uv+x ##' set.seed(1) ##' d <- sim(m,1000,p=c("u,u"=1)) ##' ##' ## Stage 1 ##' m1 <- lvm(c(y1[0:s],y2[0:s],y3[0:s])~1*u,c(y3[0:s],y4[0:s],y5[0:s])~1*v,u~b*x,u~~v) ##' latent(m1) <- ~u+v ##' e1 <- estimate(m1,d) ##' ##' pp <- function(mu,var,data,...) { ##' cbind(u=mu[,"u"],u2=mu[,"u"]^2+var["u","u"],v=mu[,"v"],uv=mu[,"u"]*mu[,"v"]+var["u","v"]) ##' } ##' (e <- measurement.error(e1, z~1+x, data=d, predictfun=pp)) ##' ##' ## uu <- seq(-1,1,length.out=100) ##' ## pp <- estimate(e,function(p,...) p["(Intercept)"]+p["u"]*uu+p["u2"]*uu^2)$coefmat ##' if (interactive()) { ##' plot(e,intercept=TRUE,line=0) ##' ##' f <- function(p) p[1]+p["u"]*u+p["u2"]*u^2 ##' u <- seq(-1,1,length.out=100) ##' plot(e, f, data=data.frame(u), ylim=c(-.5,2.5)) ##' } measurement.error <- function(model1, formula, data=parent.frame(), predictfun=function(mu,var,data,...) mu[,1]^2+var[1], id1, id2, ...) { if (!inherits(model1,c("lvmfit","lvm.mixture"))) stop("Expected lava object ('lvmfit','lvm.mixture',...)") if (missing(formula)) stop("formula needed for stage two (right-hand side additional covariates)") p1 <- coef(model1,full=TRUE) uhat <- function(p=p1) { P <- predict(model1,p=p,x=manifest(model1)) cbind(predictfun(P,attributes(P)$cond.var,data)) } if (missing(id1)) id1 <- seq(nrow(model.frame(model1))) if (missing(id2)) id2 <- seq(nrow(model.frame(model1))) if (!inherits(model1,"estimate")) e1 <- estimate(NULL,coef=p1,id=id1,iid=iid(model1)) u <- uhat() X0 <- model.matrix(formula, data) Y <- model.frame(formula,data)[,1] X <- cbind(X0,u) stage.two <- lm(Y~-1+X) names(stage.two$coefficients) <- colnames(X) if (!inherits(stage.two,"estimate")) e2 <- estimate(stage.two, id=id2) U <- function(alpha=p1,beta=coef(stage.two)) { X <- cbind(X0,uhat(alpha)) r <- (Y-X%*%beta)/summary(stage.two)$sigma^2 apply(X,2,function(x) sum(x*r)) } Ia <- -numDeriv::jacobian(function(p) U(p),p1) stacked <- stack(e1,e2,Ia) res <- c(stacked,list(naive=e2,lm=coef(summary(stage.two)),fun=predictfun)) structure(res,class=c("measurement.error","estimate")) } lava/R/addattr.R0000644000176200001440000000241313520655354013154 0ustar liggesusers##' @export `addattr` <- function(x,...) UseMethod("addattr") ##' @export `addattr.lvmfit` <- function(x,...) addattr(Model(x),...) ##' @export `addattr.lvm` <- function(x, attr, var=NULL, val=TRUE, fun=graph::nodeRenderInfo,debug=FALSE,...) { if (!is.null(var)) { Graph(x) <- addattr(Graph(x), attr=attr, var=var, val=val, fun=fun, debug=debug) return(x) } else { addattr(Graph(x), attr=attr, var=var, val=val, fun=fun) } } ##' @export `addattr.graphNEL` <- function(x, attr, var=NULL, val=TRUE,fun="graph::nodeRenderInfo",debug=FALSE,...) { if (is.null(var)) { ff <- strsplit(fun,"::")[[1]] if (length(ff)>1) { ff <- getFromNamespace(ff[2],ff[1]) } f <- do.call(ff,list(x)) if (is.null(val) || !is.logical(f[[attr]])) attrvar <- f[[attr]] else attrvar <- names(f[[attr]])[which(val==f[[attr]])] return(attrvar) } if (is.character(val)) myexpr <- paste0("list(",attr,"=c(", paste0("\"",var,"\"=\"",val,"\"" , collapse=", "), "))") else myexpr <- paste0("list(",attr,"=c(", paste0("\"",var,"\"=",val, collapse=", "), "))") Debug(list("str=",myexpr),debug) eval(parse(text=paste0(fun,"(x) <- ",myexpr))) return(x) } lava/R/categorical.R0000644000176200001440000000341713520655354014013 0ustar liggesusers##' @export categorical <- function(x,formula,K,beta,p,liability=FALSE,regr.only=FALSE,exo=TRUE,...) { if (is.character(formula)) { regr <- FALSE X <- formula } else { y <- getoutcome(formula) X <- attributes(y)$x regr <- TRUE if (length(y)==0) regr <- FALSE if (length(attributes(y)$x)==0) { X <- y; regr <- FALSE } } if (!missing(p)) { if (!missing(K)) { if (!(K==length(p) || K==length(p)+1)) stop("Wrong dimension of 'p'") if (length(K)==length(p)) { if (!identical(sum(p),1.0)) stop("Not a probability vector") p <- p[-length(p)] } } if (is.numeric(p) && sum(p)>1) warning("'p' sum > 1") if (is.logical(all.equal(1.0,sum(p)))) p <- p[-length(p)] } if (missing(K)) { if (!is.null(list(...)$labels)) K <- length(list(...)$labels) if (!missing(beta)) K <- length(beta) if (!missing(p)) K <- length(p)+1 } if (!regr.only) { if (missing(p)) p <- rep(1/K,K-1) pname <- names(p) if (is.null(pname)) pname <- rep(NA,K-1) ordinal(x,K=K,liability=liability,p=p,constrain=pname,exo=exo,...) <- X if (!regr) return(x) } if (missing(beta)) beta <- rep(0,K) fname <- paste(gsub(" ","",deparse(formula)),seq(K)-1,sep=":") fpar <- names(beta) if (is.null(fpar)) fpar <- fname parameter(x,fpar,start=beta) <- fname val <- paste0("function(x,p,...) p[\"",fpar[1],"\"]*(x==0)") for (i in seq(K-1)) { val <- paste0(val,"+p[\"",fpar[i+1],"\"]*(x==",i,")") } functional(x,formula) <- eval(parse(text=val)) return(x) } ##' @export 'categorical<-' <- function(x,...,value) categorical(x,value,...) lava/R/utils.R0000644000176200001440000002553113520655354012677 0ustar liggesuserschar2num <- function(x,...) { idx <- grep("^[-]*[0-9\\.]+",x,perl=TRUE,invert=TRUE) if (length(idx)>0) x[idx] <- NA as.numeric(x) } ###{{{ substArg substArg <- function(x,env,...) { if (!missing(env)) { a <- with(env,substitute(x)) } else { a <- substitute(x) } myclass <- tryCatch(class(eval(a)),error=function(e) NULL) if (is.null(myclass) || myclass=="name") { res <- unlist(sapply(as.character(a), function(z) { trimmed <- gsub(" ","",z,fixed=TRUE) val <- strsplit(trimmed,"+",fixed=TRUE) if (val[1]=="") val <- NULL val })); attributes(res)$names <- NULL return(res) } return(eval(a)) } ## g <- function(zz,...) { ## env=new.env(); assign("x",substitute(zz),env) ## substArg(zz,env=env) ## } ## h <- function(x,...) { ## env=new.env(); assign("x",substitute(x),env) ## substArg(x,env=TRUE) ## } ###}}} ###{{{ procrandomslope procrandomslope <- function(object,data=object$data,...) { Xfix <- FALSE xfix <- myfix <- list() xx <- object for (i in seq_len(object$ngroup)) { x0 <- object$lvm[[i]] data0 <- data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x0,exo=TRUE))] xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) { ## Yes, random slopes Xfix<-TRUE } xx$lvm[[i]] <- x0 } if (Xfix) { for (k in seq_len(object$ngroup)) { x0 <- object$lvm[[k]] data0 <- data[[k]] nrow <- length(vars(x0)) xpos <- lapply(xfix[[k]],function(y) which(regfix(x0)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix0 <- list(var=xfix[[k]], col=colpos, row=rowpos) myfix <- c(myfix, list(myfix0)) for (i in seq_along(myfix0$var)) for (j in seq_along(myfix0$col[[i]])) regfix(x0, from=vars(x0)[myfix0$row[[i]][j]],to=vars(x0)[myfix0$col[[i]][j]]) <- colMeans(data0[,myfix0$var[[i]],drop=FALSE],na.rm=TRUE) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) object$lvm[[k]] <- x0 } object <- multigroup(object$lvm,data,fix=FALSE,exo.fix=FALSE) } return(list(model=object,fix=myfix)) } ###}}} procrandomslope ###{{{ kronprod ## ' Calculate matrix product with kronecker product ## ' ## ' \deqn{(A\crossprod B) Y} ## ' @title Calculate matrix product with kronecker product ## ' @param A ## ' @param B ## ' @param Y ## ' @author Klaus K. Holst kronprod <- function(A,B,Y) { if (missing(Y)) { ## Assume 'B'=Identity, (A otimes B)Y k <- nrow(B)/ncol(A) res <- rbind(apply(B,2,function(x) matrix(x,nrow=k)%*%t(A))) return(res) } rbind(apply(Y,2,function(x) B%*%matrix(x,nrow=ncol(B))%*%t(A))) } ###}}} kronprod ###{{{ izero izero <- function(i,n) { ## n-1 zeros and 1 at ith entry x <- rep(0,n); x[i] <- 1 x } ###}}} ###{{{ Debug `Debug` <- function(msg, cond=lava.options()$debug) { if (cond) print(paste(msg, collapse=" ")) } ###}}} ###{{{ categorical2dummy categorical2dummy <- function(x,data,messages=0,...) { x0 <- x X <- intersect(index(x)$exogenous,colnames(data)) catX <- c() for (i in X) { if (!is.numeric(data[,i])) catX <- c(catX,i) } if (length(catX)==0) return(list(x=x,data=data)) f <- as.formula(paste("~ 1+", paste(catX,collapse="+"))) opt <- options(na.action="na.pass") M <- model.matrix(f,data) options(opt) Mnames <- colnames(M) Mpos <- attributes(M)$assign A <- index(x)$A F <- regfix(x) count <- 0 for (i in catX) { count <- count+1 mnames <- Mnames[Mpos==count] kill(x0) <- i Y <- colnames(A)[A[i,]==1] if (length(mnames)==1) { fix <- as.list(F$labels[i,]) fixval <- F$values[i,] fix[which(!is.na(fixval))] <- fixval[na.omit(fixval)] regression(x0,to=Y,from=mnames,messages=messages) <- fix[Y] } else { x0 <- regression(x0,to=Y,from=mnames,messages=messages) } } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) return(list(x=x0,data=cbind(data,M))) } ###}}} ###{{{ procdata.lvm `procdata.lvm` <- function(x,data,categorical=FALSE, na.method=ifelse(any(is.na(data[,intersect(colnames(data),manifest(x))])),"complete.obs","pairwise.complete.obs"), missing=FALSE ) { if (is.numeric(data) & !is.list(data)) { data <- rbind(data) } if (is.data.frame(data) | is.matrix(data)) { nn <- colnames(data) data <- as.data.frame(data); colnames(data) <- nn; rownames(data) <- NULL obs <- setdiff(intersect(vars(x), colnames(data)),latent(x)) Debug(obs) mydata <- subset(data, select=obs) if (NROW(mydata)==0) stop("No observations") for (i in seq_len(ncol(mydata))) { if (inherits(mydata[,i],"Surv")) mydata[,i] <- mydata[,i][,1] if (is.character(mydata[,i]) | is.factor(mydata[,i])) mydata[,i] <- as.numeric(as.factor(mydata[,i]))-1 } S <- NULL n <- nrow(mydata) if (n==1) { S <- diag(nrow=ncol(mydata)); colnames(S) <- rownames(S) <- obs } if (na.method=="complete.obs" && !missing) { mydata0 <- na.omit(mydata) n <- nrow(mydata0) mu <- colMeans(mydata0) if (is.null(S) && n>2) S <- (n-1)/n*cov(mydata0) ## MLE variance matrix of observed variables rm(mydata0) } nS <- is.null(S) || any(is.na(S)) if (na.method=="pairwise.complete.obs" || nS) { mu <- colMeans(mydata,na.rm=TRUE) if (nS) { n <- nrow(mydata) S <- (n-1)/n*cov(mydata,use="pairwise.complete.obs") S[is.na(S)] <- 1e-3 } } } else if (is.list(data)) { if ("cov"%in%names(data)) data$S <- data$cov if ("var"%in%names(data)) data$S <- data$var if ("mean"%in%names(data)) data$mu <- data$mean n <- data$n S <- reorderdata.lvm(x,data$S) mu <- reorderdata.lvm(x,data$mu) ## if (is.null(n)) stop("n was not specified"); } else stop("Unexpected type of data!"); if (nrow(S)!=ncol(S)) stop("Wrong type of data!"); return(list(S=S,mu=mu,n=n)) } ###}}} ###{{{ reorderdata.lvm `reorderdata.lvm` <- function(x, data) { if (is.vector(data)) { nn <- names(data) ii <- na.omit(match(index(x)$manifest, nn)) data[ii,drop=FALSE] } else { nn <- colnames(data) ii <- na.omit(match(index(x)$manifest, nn)) data[ii,ii,drop=FALSE] } } ###}}} ###{{{ symmetrize `symmetrize` <- function(M, upper=TRUE) { if (length(M)==1) return(M) if (!is.matrix(M) | ncol(M)!=nrow(M)) stop("Only implemented for square matrices.") if (upper) { for (i in seq_len(ncol(M)-1)) for (j in seq(i+1,nrow(M))) M[i,j] <- M[j,i] return(M) } else { for (i in seq_len(ncol(M))) for (j in seq_len(nrow(M))) if (M[i,j]==0) M[i,j] <- M[j,i] else M[j,i] <- M[i,j] return(M) } } ###}}} ###{{{ naiveGrad naiveGrad <- function(f, x, h=1e-9) { nabla <- numeric(length(x)) for (i in seq_along(x)) { xh <- x; xh[i] <- x[i]+h nabla[i] <- (f(xh)-f(x))/h } return(nabla) } ###}}} ###{{{ CondMom # conditional on Compl(idx) CondMom <- function(mu,S,idx,X) { idxY <- idx idxX <- setdiff(seq_len(ncol(S)),idxY) SXX <- S[idxX,idxX,drop=FALSE]; SYY <- S[idxY,idxY,drop=FALSE] SYX <- S[idxY,idxX,drop=FALSE] iSXX <- solve(SXX) condvar <- SYY-SYX%*%iSXX%*%t(SYX) if (missing(mu)) return(condvar) muY <- mu[,idxY,drop=FALSE] muX <- mu[,idxX,drop=FALSE] if (is.matrix(mu)) Z <- t(X-muX) else Z <- apply(X,1,function(xx) xx-muX) SZ <- t(SYX%*%iSXX%*%Z) ## condmean <- matrix( if (is.matrix(mu)) condmean <- SZ+muY else condmean <- t(apply(SZ,1,function(x) muY+x)) ## ,ncol=ncol(SZ),nrow=nrow(SZ)) return(list(mean=condmean,var=condvar)) } ###}}} CondMom ###{{{ Depth-First/acc (accessible) DFS <- function(M,v,explored=c()) { explored <- union(explored,v) incident <- M[v,] for (v1 in setdiff(which(incident==1),explored)) { explored <- DFS(M,v1,explored) } return(explored) } acc <- function(M,v) { if (is.character(v)) v <- which(colnames(M)==v) colnames(M)[setdiff(DFS(M,v),v)] } ###}}} Depth-First/acc (accessible) npar.lvm <- function(x) { return(index(x)$npar+ index(x)$npar.mean+index(x)$npar.ex) } as.numeric.list <- function(x,...) { lapply(x,function(y) ifelse(is.na(as.numeric(y)),y,as.numeric(y))) } edge2pair <- function(e) { sapply(e,function(x) strsplit(x,"~")) } numberdup <- function(xx) { ## Convert to numbered list dup.xx <- duplicated(xx) ## dups <- xx[dup.xx] xx.new <- numeric(length(xx)) count <- 0 for (i in seq_along(xx)) { if (!dup.xx[i]) { count <- count+1 xx.new[i] <- count } else { xx.new[i] <- xx.new[match(xx[i],xx)[1]] } } return(xx.new) } extractvar <- function(f) { yy <- getoutcome(f) xx <- attributes(terms(f))$term.labels myvars <- all.vars(f) return(list(y=yy,x=xx,all=myvars)) } ##' @export getoutcome <- function(formula,sep,...) { aa <- attributes(terms(formula,...)) if (aa$response==0) { res <- NULL } else { res <- paste(deparse(formula[[2]]),collapse="") } if (!missing(sep) && length(aa$term.labels)>0) { attributes(res)$x <- lapply(strsplit(aa$term.labels,"\\|")[[1]], function(x) as.formula(paste0("~",x))) } else { attributes(res)$x <- aa$term.labels } return(res) } ##' @export Specials <- function(f,spec,split2="+",...) { tt <- terms(f,spec) pos <- attributes(tt)$specials[[spec]] if (is.null(pos)) return(NULL) x <- rownames(attributes(tt)$factors)[pos] st <- gsub(" ","",x) res <- unlist(strsplit(st,"[()]"))[2] if (is.null(split2)) return(res) unlist(strsplit(res,"+",fixed=TRUE)) } ##' @export decomp.specials <- function(x,pattern="[()]",pattern2=NULL, pattern.ignore=NULL, sep="[,\\+]",perl=TRUE,reverse=FALSE,...) { st <- gsub(" |^\\(|)$","",x) # Remove white space and leading/trailing parantheses if (!is.null(pattern.ignore)) { if (grepl(pattern.ignore,st,perl=perl,...)) return(st) } if (!is.null(pattern)) { st <- rev(unlist(strsplit(st,pattern,perl=perl,...)))[1] } if (!is.null(pattern2)) { st <- (unlist(strsplit(st,pattern2,perl=perl,...))) if (reverse) st <- rev(st) } unlist(strsplit(st,sep,perl=perl,...)) } Decomp.specials <- function(x,pattern="[()]") { st <- gsub(" ","",x) st <- gsub("\n","",st) mysplit <- rev(unlist(strsplit(st,pattern))) type <- mysplit[2] vars <- mysplit[1] res <- unlist(strsplit(vars,",")) if (type=="s" | type=="seq") { return(paste0(res[1],seq(char2num(res[2])))) } unlist(strsplit(vars,",")) } printline <- function(n=70) { cat(rep("_", n), "\n", sep=""); } lava/R/gof.R0000644000176200001440000003417013520655354012311 0ustar liggesusers##' @export rsq <- function(x,stderr=FALSE) { if (stderr) { v <- endogenous(x) vpar <- paste(v,v,sep=lava.options()$symbol[2]) iid.v <- iid(model.frame(x)[,v]) iid.mod <- iid(x) coef0 <- c(attributes(iid.v)$coef[vpar], coef(x)[vpar]) iid0 <- cbind(iid.v[,vpar],iid.mod[,vpar]) p <- length(v) idx <- seq_len(p); ee <- estimate(NULL,data=NULL, function(x) { res <- (x[idx]-x[idx+p])/x[idx] names(res) <- v as.list(res) }, print=function(x,...) { cat("\nR-squared:\n\n") print(x$coefmat) }, coef=coef0, iid=iid0) res <- ee ##res <- list(ee) ## for (lat in latent(x)) { ## v <- intersect(children(x,lat),endogenous(x)) ## vpar <- paste(v,v,sep=lava.options()$symbol[2]) ## lpar <- paste(lat,lat,sep=lava.options()$symbol[2]) ## rpar <- paste(v,lat,sep=lava.options()$symbol[1]) ## fix <- c(x$model$fix[lat,v,drop=TRUE],x$model$covfix[lat,lat]) ## pp <- coef(x) ## idx <- x$model$parpos$A[lat,v] ## idx2 <- x$model$parpos$P[lat,lat] ## p0 <- c(idx,idx2) ## p1 <- setdiff(unique(p0),0) ## p2 <- match(p0,p1) ## k <- length(v) ## coef0 <- c(pp[p1],attributes(iid.v)$coef[vpar]) ## iid0 <- cbind(iid.mod[,p1],iid.v[,vpar]) ## ee <- estimate(NULL,data=NULL, ## function(p) { ## p. <- p[p2] ## p.[is.na(p.)] <- fix[is.na(p.)] ## res <- p.[seq_len(k)]^2*p.[k+1]/tail(p,k) ## names(res) <- v ## as.list(res) ## }, ## print=function(x,...) { ## cat("\nVariance explained by '", lat,"':\n\n",sep="") ## print(x$coefmat) ## },coef=coef0,iid=iid0) ## res <- c(res,list(ee)) ## } return(res) } v <- c(endogenous(x),setdiff(latent(x),parameter(Model(x)))) res <- coef(x,9,std="yx") idx <- with(attributes(res), which(type=="variance" & (var==from))) nam <- attributes(res)$var[idx] res <- 1-res[idx,5] names(res) <- nam res <- list("R-squared"=res) ## M <- moments(x,coef(x)) ## v <- setdiff(vars(x),exogenous(x)) ## vvar <- M$Cfull[cbind(v,v)] ## rsq <- (vvar-M$P[cbind(v,v)])/vvar if (length(latent(x))>0) { M <- moments(x,coef(x)) nn <- names(res) for (lat in latent(x)) { v <- intersect(children(x,lat),endogenous(x)) varl <- M$Cfull[lat,lat] varv <- M$Cfull[cbind(v,v)] fix <- c(x$model$fix[lat,v,drop=TRUE]) pp <- coef(x) if (inherits(x,"lvm.missing")) { mp <- match(coef(x$model),names(coef(x))) pp <- pp[mp] } idx1 <- x$model$parpos$A[lat,v] ##idx2 <- x$model$parpos$P[lat,lat] ##idx3 <- x$model$parpos$P[cbind(v,v)] p0 <- c(idx1) p1 <- setdiff(unique(p0),0) p2 <- match(p0,p1) p <- pp[p1] p. <- p[p2] p.[is.na(p.)] <- fix[is.na(p.)] k <- length(v) val <- (p.^2*varl)/varv; names(val) <- v res <- c(res,list(val)) nn <- c(nn,paste0("Variance explained by '",lat,"'")) } names(res) <- nn } res } satmodel <- function(object,logLik=TRUE,data=model.frame(object), control=list(trace=1), weights=Weights(object),estimator=object$estimator, missing=inherits(object,"lvm.missing"), regr=FALSE, ...) { if (object$estimator=="gaussian" & logLik & !missing) { if (class(object)[1]%in%c("multigroupfit","multigroup")) { ll <- structure(0,nall=0,nobs=0,df=0,class="logLik") for (i in seq_len(Model(object)$ngroup)) { l0 <- logLik(Model(Model(object))[[i]],data=model.frame(object)[[i]],type="sat") ll <- ll+l0 for (atr in c("nall","nobs","df")) attributes(ll)[[atr]] <- attributes(ll)[[atr]]+attributes(l0)[[atr]] } } return(logLik(object, type="sat")) } covar <- exogenous(object) y <- endogenous(object) m0 <- Model(object) if (length(covar)>0) suppressWarnings(m0 <- regression(m0,y,covar)) if (length(latent(m0))>0) kill(m0) <- latent(m0) cancel(m0) <- y if (!regr) suppressWarnings(covariance(m0) <- y) else { if (length(y)>1) { for (i in seq_len(length(y)-1)) for (j in seq(i+1,length(y))) { m0 <- regression(m0,y[i],y[j]) } } exogenous(m0) <- covar } if (is.null(control$start)) { mystart <- rep(0,with(index(m0), npar.mean+npar)) mystart[variances(m0,mean=TRUE)] <- 1 control$start <- mystart } message("Calculating MLE of saturated model:\n") e0 <- estimate(m0,data=data,weights=weights,estimator=estimator,messages=0,control=control,missing=missing,...) if (logLik) return(logLik(e0)) return(e0) } condition <- function(x) { a <- svd(x) return(max(a$d)/min(a$d)) } ##' Extract model summaries and GOF statistics for model object ##' ##' Calculates various GOF statistics for model object including global ##' chi-squared test statistic and AIC. Extract model-specific mean and variance ##' structure, residuals and various predicitions. ##' ##' ##' @aliases gof gof.lvmfit moments moments.lvm information information.lvmfit ##' score score.lvmfit logLik.lvmfit ##' @param object Model object ##' @param x Model object ##' @param p Parameter vector used to calculate statistics ##' @param data Data.frame to use ##' @param latent If TRUE predictions of latent variables are included in output ##' @param data2 Optional second data.frame (only for censored observations) ##' @param weights Optional weight matrix ##' @param n Number of observations ##' @param conditional If TRUE the conditional moments given the covariates are ##' calculated. Otherwise the joint moments are calculated ##' @param model String defining estimator, e.g. "gaussian" (see ##' \code{estimate}) ##' @param debug Debugging only ##' @param chisq Boolean indicating whether to calculate chi-squared ##' goodness-of-fit (always TRUE for estimator='gaussian') ##' @param level Level of confidence limits for RMSEA ##' @param rmsea.threshold Which probability to calculate, Pr(RMSEA0) { ## SRMR.endo <- mean(c(R[idx,idx][upper.tri(R[idx,idx],diag=TRUE)],R2[idx])^2)^0.5 ## res <- c(res,list("SRMR(endogenous)"=SRMR.endo)) ## } } if (rnkV==ncol(vcov(object)) && (!is.null(minSV) && minSV>1e-12)) { rmseafun <- function(...) { epsilon <- function(lambda) sapply(lambda,function(x) ifelse(x>0 & qdf>0,sqrt(x/(qdf*(n))),0)) ## n-1,n vs. n-df opf <- function(l,p) suppressWarnings(p-pchisq(q,df=qdf,ncp=l)) alpha <- (1-level)/2 RMSEA <- epsilon(q-qdf) B <- max(q-qdf,0) lo <- hi <- list(root=0) if (RMSEA>0 && opf(0,p=1-alpha)<0) { hi <- uniroot(function(x) opf(x,p=1-alpha),c(0,B)) } if (opf(B,p=alpha)<0) { lo <- uniroot(function(x) opf(x,p=alpha),c(B,n)) } ci <- c(epsilon(c(hi$root,lo$root))) RMSEA <- c(RMSEA=RMSEA,ci); names(RMSEA) <- c("RMSEA",paste0(100*c(alpha,(1-alpha)),"%")) pval <- pchisq(q,qdf,(n*qdf*rmsea.threshold^2),lower.tail=FALSE) res <- list(aa=((q-qdf)/(2*qdf)^0.5),RMSEA=RMSEA, level=level, rmsea.threshold=rmsea.threshold, pval.rmsea=pval) return(res) } rmseaval <- tryCatch(rmseafun(),error=function(e) NULL) res <- c(res,rmseaval) } } else { res <- list(n=n, logLik=loglik, BIC=myBIC, AIC=myAIC) } res <- c(res, L2score=l2D, rankV=rnkV, cond=condnum, k=nrow(vcov(object))) class(res) <- "gof.lvmfit" return(res) } ##' @export print.gof.lvmfit <- function(x,optim=TRUE,...) { if (!is.null(x$n)) { with(x, cat("\n Number of observations =", n, "\n")) } if (is.null(x$fit)) { with(x, cat(" Log-Likelihood =", logLik, "\n")) } with(x, cat(" BIC =", BIC, "\n", "AIC =", AIC, "\n")) if (!is.null(x$fit)) with(x, cat(" log-Likelihood of model =", fit$estimate[1], "\n\n", "log-Likelihood of saturated model =", fit$estimate[2], "\n", "Chi-squared statistic: q =", fit$statistic, ", df =", fit$parameter, "\n P(Q>q) =", fit$p.value, "\n")) if (!is.null(x$RMSEA)) { rr <- round(x$RMSEA*10000)/10000 rmsea <- paste0(rr[1]," (",rr[2],";",rr[3],")") cat("\n RMSEA (",x$level*100,"% CI): ", rmsea,"\n",sep="") cat(" P(RMSEA<",x$rmsea.threshold,")=", x$pval.rmsea,"\n",sep="") } for (i in c("TLI","CFI","NFI","SRMR","SRMR(endogenous)")) if (!is.null(x[[i]])) cat("", i,"=",x[[i]],"\n") if (optim) { cat("\nrank(Information) = ",x$rankV," (p=", x$k,")\n",sep="") cat("condition(Information) = ",x$cond,"\n",sep="") cat("mean(score^2) =",x$L2score,"\n") } invisible(x) } ## gof.multigroupfit <- function(object,...) { ## L0 <- logLik(object); df0 <- attributes(L0)$df ## L1 <- logLik(object,type="sat"); df1 <- attributes(L1)$df ## df <- df1-df0; names(df) <- "df" ## Q <- -2*(L0-L1); attributes(Q) <- NULL; names(Q) <- "chisq"; ## pQ <- pchisq(Q,df,lower.tail=FALSE) ## values <- c(L0,L1); names(values) <- c("log likelihood (model)", "log likelihood (saturated model)") ## res <- list(statistic = Q, parameter = df, ## p.value=pQ, method = "Likelihood ratio test", ## estimate = values) ## class(res) <- "htest" ## return(res) ## } lava/R/subset.R0000644000176200001440000000326113520655354013040 0ustar liggesusers##' Extract subset of latent variable model ##' ##' Extract measurement models or user-specified subset of model ##' ##' ##' @aliases measurement ##' @param x \code{lvm}-object. ##' @param vars Character vector or formula specifying variables to include in ##' subset. ##' @param \dots Additional arguments to be passed to the low level functions ##' @return A \code{lvm}-object. ##' @author Klaus K. Holst ##' @keywords models regression ##' @examples ##' ##' m <- lvm(c(y1,y2)~x1+x2) ##' subset(m,~y1+x1) ##' ##' @export ##' @method subset lvm subset.lvm <- function(x, vars, ...) { if (missing(vars)) return(x) if (inherits(vars,"formula")) vars <- all.vars(vars) if (!all(vars%in%vars(x))) stop("Not a subset of model") latentvars <- intersect(vars,latent(x)) ## g0 <- subGraph(vars, Graph(x)) ## res <- graph2lvm(g0) res <- lvm(vars) M <- t(x$M[vars,vars,drop=FALSE]) for (i in seq_len(nrow(M))) { if (any(M[,i]==1)) { res <- regression(res, y=rownames(M)[M[,i]==1], x=rownames(M)[i], ...) } } if (length(latentvars)>0) latent(res) <- latentvars res$cov[vars,vars] <- x$cov[vars,vars] ## Fixed parameters: res$par[vars,vars] <- x$par[vars,vars] res$fix[vars,vars] <- x$fix[vars,vars] res$covpar[vars,vars] <- x$covpar[vars,vars] res$covfix[vars,vars] <- x$covfix[vars,vars] res$mean[vars] <- x$mean[vars] res$attributes <- x$attributes for (i in seq_along(x$attributes)) { val <- x$attributes[[i]] if (length(val)>0) { val <- val[intersect(vars,names(val))] res$attributes[[i]] <- val } } index(res) <- reindex(res) return(res) } lava/R/parsedesign.R0000644000176200001440000000574413520655354014047 0ustar liggesuserssumsplit <- function(x,...) { plus <- strsplit(x,"\\+")[[1]] spl <- unlist(lapply(plus, function(x) { val <- strsplit(x,"\\-")[[1]] val[-1] <- paste0("-",val[-1]) setdiff(val,"") })) res <- c() for (st in spl) { st <- gsub(" ","",st) st0 <- gsub("^[-0-9\\*]*","",st) val <- gsub("\\*","",regmatches(st,gregexpr("^[-0-9\\*]*",st))[[1]]) if (val=="") val <- "1" val <- switch(val,"-"=-1,val) res <- c(res,val,st0) } return(res) } ##' @export parsedesign <- function(coef,x,...,regex=FALSE,diff=TRUE) { if (!is.vector(coef)) coef <- stats::coef(coef) if (is.numeric(coef) && !is.null(names(coef))) coef <- names(coef) dots <- lapply(substitute(list(...)),function(x) x)[-1] expr <- suppressWarnings(inherits(try(x,silent=TRUE),"try-error")) if (expr) { ee <- c(deparse(substitute(x)), unlist(lapply(dots, deparse))) } else { ee <- c(deparse(x), sapply(dots, function(x) deparse(x))) } if (!expr && is.numeric(x)) { return(do.call(contr, list(c(list(x),list(...)),n=length(coef),diff=diff))) } res <- c() diff <- rep(diff,length.out=length(ee)) count <- 0 for (e in ee) { count <- count+1 diff0 <- FALSE Val <- rbind(rep(0,length(coef))) if (grepl('\"',e)) { diff0 <- diff[count] && grepl("^c\\(",e) e0 <- gsub(" |\\)$|^c\\(","",e) ff <- strsplit(e0,'\"')[[1L]] } else { ff <- sumsplit(e) } for (i in seq(length(ff)/2)) { val0 <- gsub("[*()]","",ff[2*(i-1)+1]) val <- char2num(val0) if (is.na(val)) { val <- switch(val0,"-"=-1,1) } par0 <- ff[2*i] par0int <- as.integer(char2num(par0)) if (!regex) par0 <- glob2rx(par0) if (is.na(par0int)) par0int <- grep(par0,coef) if (length(par0int)>1) { diff0 <- diff[count] for (k in seq_along(par0int)) { if (par0int[k]<=length(Val)) { if (diff[count]) { Val[par0int[k]] <- val } else { Val0 <- Val; Val0[] <- 0 Val0[par0int[k]] <- val res <- rbind(res,Val0) } } } } else { if (length(par0int)>0 && par0int<=length(Val)) Val[par0int] <- val } } if (diff0) { n <- sum(Val!=0) if (n>1) { Val0 <- Val ii <- which(Val0!=0) Val <- matrix(0,nrow=n-1,ncol=length(Val)) for (i in seq(n-1)) { Val[i,ii[c(1,i+1)]] <- Val0[ii[c(1,i+1)]]*c(1,-1) } } } if (any(Val!=0)) res <- rbind(res,Val) } res } lava/R/finalize.R0000644000176200001440000002205013520655354013331 0ustar liggesusers##' @export `finalize` <- function(x,...) UseMethod("finalize") ##' @export `finalize.lvm` <- function(x, diag=FALSE, cor=FALSE, addcolor=TRUE, intercept=FALSE, plain=FALSE, cex, fontsize1=10, cols=lava.options()$node.color, unexpr=FALSE, addstyle=TRUE, ...) { g <- as(new("graphAM",adjMat=x$M,"directed"),"graphNEL") graph::nodeRenderInfo(g)$fill <- NA graph::nodeRenderInfo(g)$label <- NA graph::nodeRenderInfo(g)$label[vars(x)] <- vars(x) graph::nodeRenderInfo(g)$shape <- x$graphdef$shape Lab <- NULL for (i in seq_len(length(x$noderender))) { nn <- unlist(x$noderender[[i]]) if (length(nn)>0) { R <- list(as.list(x$noderender[[i]])); names(R) <- names(x$noderender)[i] if (names(x$noderender)[i]!="label") graph::nodeRenderInfo(g) <- x$noderender[i] else Lab <- R[[1]] } } if (!is.null(Lab)) { ## Ugly hack to allow mathematical annotation nn <- names(graph::nodeRenderInfo(g)$label) LL <- as.list(graph::nodeRenderInfo(g)$label) LL[names(Lab)] <- Lab if (any(unlist(lapply(LL,function(x) is.expression(x) || is.name(x) || is.call(x))))) { graph::nodeRenderInfo(g) <- list(label=as.expression(LL)) } else graph::nodeRenderInfo(g) <- list(label=LL) names(graph::nodeRenderInfo(g)$label) <- nn ii <- which(names(graph::nodeRenderInfo(g)$label)=="") if (length(ii)>0) graph::nodeRenderInfo(g)$label <- graph::nodeRenderInfo(g)$label[-ii] } graph::edgeDataDefaults(g)$futureinfo <- x$edgerender$futureinfo graph::edgeRenderInfo(g)$lty <- x$graphdef$lty graph::edgeRenderInfo(g)$lwd <- x$graphdef$lty graph::edgeRenderInfo(g)$col <- x$graphdef$col graph::edgeRenderInfo(g)$textCol <- x$graphdef$textCol graph::edgeRenderInfo(g)$arrowhead <- x$graphdef$arrowhead graph::edgeRenderInfo(g)$dir <- x$graphdef$dir graph::edgeRenderInfo(g)$arrowtail <- "none" graph::edgeRenderInfo(g)$cex <- x$graphdef$cex graph::edgeRenderInfo(g)$label <- x$graphdef$label for (i in seq_len(length(x$edgerender))) { ee <- x$edgerender[[i]] if (length(ee)>0 && names(x$edgerender)[i]!="futureinfo") { graph::edgeRenderInfo(g)[names(x$edgerender)[i]][names(ee)] <- ee } } opt <- options(warn=-1) var <- rownames(covariance(x)$rel) if (unexpr) { mylab <- as.character(graph::edgeRenderInfo(g)$label); names(mylab) <- names(graph::edgeRenderInfo(g)$label) g@renderInfo@edges$label <- as.list(mylab) } if (intercept) { ## mu <- intfix(x) ## nNA <- sum(is.na(mu)) ## if (nNA>0) ## mu[is.na(mu)] <- paste("m",seq_len(nNA)) ## mu <- unlist(mu) ## x <- addNode(mu,x) ## for (i in seq_along(mu)) { ## print(mu[i]) ## x <- addEdge(var[i], var[i], x) ## } ## x <- addattr(x,attr="shape",var=mu,val="none") } allEdges <- graph::edgeNames(g) regEdges <- c() feedback <- c() A <- index(x)$A if (index(x)$npar.reg>0) for (i in seq_len(nrow(A)-1)) for (j in (i+1):(ncol(A))) { if(A[i,j]==1 & A[j,i]==1) feedback <- c(feedback, paste0(var[i],"~",var[j]), paste0(var[j],"~",var[i])) if (A[j,i]==0 & x$M[j,i]!=0) { g <- graph::removeEdge(var[j],var[i],g) } if (A[i,j]==1) regEdges <- c(regEdges,paste0(var[i],"~",var[j])) if (A[j,i]==1) regEdges <- c(regEdges,paste0(var[j],"~",var[i])) } varEdges <- corEdges <- c() delta <- ifelse(diag,0,1) if (cor | diag) { for (r in seq_len(nrow(covariance(x)$rel)-delta) ) { for (s in (r+delta):ncol(covariance(x)$rel) ) { if (cor | r==s) if (covariance(x)$rel[r,s]==1 & (!any(c(var[r],var[s])%in%exogenous(x)))) { newedges <- c() if (A[r,s]!=1) { g <- graph::addEdge(var[r],var[s], g) newedges <- paste0(var[r],"~",var[s]) } else { if (A[s,r]!=1) { g <- graph::addEdge(var[s],var[r], g) newedges <- c(newedges,paste0(var[s],"~",var[r])) } } if (r==s) varEdges <- c(varEdges, newedges ) if (r!=s) corEdges <- c(corEdges,newedges) } } } } if (length(x$edgerender$futureinfo)>0) { estr <- names(x$edgerender$futureinfo$label) estr <- estr[which(unlist(lapply(estr,nchar))>0)] revestr <- sapply(estr, function(y) paste(rev(unlist(strsplit(y,"~"))),collapse="~")) revidx <- which(revestr%in%graph::edgeNames(g)) count <- 0 for (i in estr) { count <- count+1 for (f in names(x$edgerender$futureinfo)) { if (count%in%revidx) { g@renderInfo@edges[[f]][[revestr[count]]] <- x$edgerender$futureinfo[[f]][[i]] } else { g@renderInfo@edges[[f]][[i]] <- x$edgerender$futureinfo[[f]][[i]] } } } } allEdges <- unique(c(regEdges,corEdges,varEdges)) corEdges <- setdiff(corEdges,regEdges) for (e in allEdges) { dir <- "forward"; lty <- 1; arrowtail <- "none" if (e %in% feedback) { dir <- "none"; lty <- 1; arrowtail <- "closed" } if (e %in% varEdges) { dir <- "none"; lty <- 2; arrowtail <- "none" } if (e %in% corEdges) { dir <- "none"; lty <- 2; arrowtail <- "closed" } arrowhead <- "closed" estr <- e for (f in c("col","cex","textCol","lwd","lty")) { if (!(estr%in%names(graph::edgeRenderInfo(g)[[f]])) || is.na(graph::edgeRenderInfo(g)[[f]][[estr]])) g <- addattr(g,f,var=estr, val=x$graphdef[[f]], fun="graph::edgeRenderInfo") } if (addstyle) { g <- addattr(g,"lty",var=estr,val=lty,fun="graph::edgeRenderInfo") g <- addattr(g,"direction",var=estr,val=dir,fun="graph::edgeRenderInfo") g <- addattr(g,"dir",var=estr,val=dir,fun="graph::edgeRenderInfo") g <- addattr(g,"arrowhead",var=estr,val=arrowhead,fun="graph::edgeRenderInfo") g <- addattr(g,"arrowtail",var=estr,val=arrowtail,fun="graph::edgeRenderInfo") g <- addattr(g,attr="fontsize",var=estr,val=fontsize1,fun="graph::edgeRenderInfo") } if (is.null(graph::edgeRenderInfo(g)$label)) graph::edgeRenderInfo(g)$label <- expression() if (!missing(cex)) if (!is.null(cex)) graph::nodeRenderInfo(g)$cex <- cex } if (plain) { g <- addattr(g,attr="shape",var=vars(x),val="none") } else { if (addcolor) { if (is.null(x$noderender$fill)) notcolored <- vars(x) else notcolored <- vars(x)[is.na(x$noderender$fill)] nodecolor(g, intersect(notcolored,exogenous(x))) <- cols[1] nodecolor(g, intersect(notcolored,endogenous(x))) <- cols[2] nodecolor(g, intersect(notcolored,latent(x))) <- cols[3] if (!is.null(trv <- x$attributes$transform)) { nodecolor (g, names(trv)) <- cols[4] } ## nodecolor(x, intersect(notcolored,survival(x))) <- cols[4] myhooks <- gethook("color.hooks") count <- 3 for (f in myhooks) { count <- count+1 res <- do.call(f, list(x=x,subset=notcolored)) if (length(cols)>=count) { nodecolor(g,res$vars) <- cols[count] } else { nodecolor(g, res$vars) <- res$col } } } } options(opt) attributes(g)$feedback <- (length(feedback)>0) return(g) } lava/R/confint.R0000644000176200001440000000604213520655354013173 0ustar liggesusers##' Calculate Wald og Likelihood based (profile likelihood) confidence intervals ##' ##' Calculates either Wald confidence limits: \deqn{\hat{\theta} \pm ##' z_{\alpha/2}*\hat\sigma_{\hat\theta}} or profile likelihood confidence ##' limits, defined as the set of value \eqn{\tau}: ##' \deqn{logLik(\hat\theta_{\tau},\tau)-logLik(\hat\theta)< q_{\alpha}/2} ##' ##' where \eqn{q_{\alpha}} is the \eqn{\alpha} fractile of the \eqn{\chi^2_1} ##' distribution, and \eqn{\hat\theta_{\tau}} are obtained by maximizing the ##' log-likelihood with tau being fixed. ##' ##' @title Calculate confidence limits for parameters ##' @param object \code{lvm}-object. ##' @param parm Index of which parameters to calculate confidence limits for. ##' @param level Confidence level ##' @param profile Logical expression defining whether to calculate confidence ##' limits via the profile log likelihood ##' @param curve if FALSE and profile is TRUE, confidence limits are ##' returned. Otherwise, the profile curve is returned. ##' @param n Number of points to evaluate profile log-likelihood in ##' over the interval defined by \code{interval} ##' @param interval Interval over which the profiling is done ##' @param lower If FALSE the lower limit will not be estimated (profile intervals only) ##' @param upper If FALSE the upper limit will not be estimated (profile intervals only) ##' @param \dots Additional arguments to be passed to the low level functions ##' @return A 2xp matrix with columns of lower and upper confidence limits ##' @author Klaus K. Holst ##' @seealso \code{\link{bootstrap}{lvm}} ##' @keywords models regression ##' @examples ##' ##' m <- lvm(y~x) ##' d <- sim(m,100) ##' e <- estimate(lvm(y~x), d) ##' confint(e,3,profile=TRUE) ##' confint(e,3) ##' \donttest{ ## Reduce Ex.timings ##' B <- bootstrap(e,R=50) ##' B ##' } ##' @aliases confint.multigroupfit ##' @export ##' @method confint lvmfit confint.lvmfit <- function(object,parm=seq_len(length(coef(object))),level=0.95,profile=FALSE,curve=FALSE,n=20,interval=NULL,lower=TRUE,upper=TRUE,...) { if (is.character(parm)) { parm <- parpos(Model(object),p=parm) parm <- parm[attributes(parm)$ord] } if (!profile) { return(confint.default(object,parm=parm,level=level,...)) } res <- c() for (i in parm) { res <- rbind(res, profci.lvmfit(object,parm=i,level=level,profile=profile,n=n,curve=curve,interval=interval,lower=lower,upper=upper,...)) if (curve) return(res) } rownames(res) <- names(coef(object))[parm] colnames(res) <- paste((c(0,1) + c(1,-1)*(1-level)/2)*100,"%") return(res) } ##' @export confint.multigroupfit <- function(object,parm=seq_along(pars(object)),level=0.95, estimates=TRUE,...) { p <- 1-(1-level)/2 res <- cbind(pars(object),pars(object)) + qnorm(p)*cbind(-1,1)%x%diag(vcov(object))^0.5 colnames(res) <- paste0(c(1-p,p)*100,"%") rownames(res) <- parpos(object); rownames(res)[is.na(rownames(res))] <- "" if (estimates) res <- cbind(coef(object,type=0)[,c(1,2,4)],res) res[parm,,drop=FALSE] } lava/R/zgetmplus.R0000644000176200001440000002150613520655354013567 0ustar liggesusers##' Read Mplus output files ##' ##' @title Read Mplus output ##' @param infile Mplus output file ##' @param coef Coefficients only ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export ##' @seealso getSAS `getMplus` <- function(infile="template.out",coef=TRUE,...) { if (coef) { start <- "MODEL RESULTS" end1 <- "R-SQUARE" res0 <- findfileseg(infile,start)[-c(seq(5))] res <- sapply(res0,function(x) { val <- strsplit(x," ")[[1]]; val[val!=""] }) res <- res[unlist(lapply(res, length))!=0] coef.idx <- unlist(lapply(res, length))>3 lab.idx <- which(!coef.idx) count <- 0 mycoef <- c() myrownames <- c() for (i in seq_along(res)) { if (i %in% lab.idx) { count <- count+1 } else { val <- char2num(res[[i]][-1]) if (length(val)<5) val <- c(val,rep(0,5-length(val))) mycoef <- rbind(mycoef, val) myrownames <- c(myrownames, paste(paste(res[[lab.idx[count]]],collapse=" "),res[[i]][1]) ) } } rownames(mycoef) <- myrownames colnames(mycoef) <- c("Estimate","Std.Err","Z-value","Std","StdYX") return(mycoef) } start <- "Estimate S.E. Est./S.E." end1 <- "MODEL RESULTS" ## end2 <- "QUALITY OF NUMERICAL RESULTS" res <- findfileseg(infile,start,end1); cat(paste(res,"\n")) res <- findfileseg(infile, "TESTS OF MODEL FIT", "Chi-Square Test of Model Fit for the Baseline Model") cat(paste(res,"\n")) } `findfileseg` <- function(infile, startstring, endstring,nlines) { con <- file(infile, blocking = FALSE) inp <- readLines(con) close(con) linestart <- 1; lineend <- length(inp) mycmd1 <- paste0("grep -n \"",startstring,"\" ", infile); a1 <- system(mycmd1,intern=TRUE); if (length(a1)>0) linestart <- char2num(strsplit(a1,":")[[1]][1]) nn <- length(inp) if (!missing(nlines)) nn <- linestart+nlines if (missing(endstring)) { for (i in seq(linestart,nn)) { lineend <- i-1 if (inp[i]==inp[i-1]) break; } } else { mycmd2 <- paste0("grep -n \"",endstring,"\" ", infile); a2 <- system(mycmd2,intern=TRUE); if (length(a2)>0) lineend <- char2num(strsplit(a2,":")[[1]][1]) } res <- inp[linestart:lineend-1] return(res) } ################################################## ### Generate code and run mplus... ################################################## `mplus` <- function(file="template.mplus",wait=TRUE,intern=TRUE,...) { if (!file.exists(file)) file <- paste0(file,".mplus") if (!file.exists(file)) stop("File does not exist") if (!exists("winecmd")) winecmd <- "wine" if (!exists("mplus.directory")) mplus.directory <- "" mycmd <- paste0(winecmd, " \"", mplus.directory, "mplus.exe\" ", file) system(mycmd, wait=wait, intern=TRUE) prefix <- strsplit(file, ".", fixed=TRUE)[[1]][1] return(getMplus(paste0(prefix,".out"),coef=TRUE)) } `toMplus.data.frame` <- function(x, datafile="data.tab", mplusfile="template.mplus", na.string=".", model="!f1 by x1;", analysis=NULL, categorical=NULL, group, run=FALSE, techout=FALSE,missing=TRUE,...) { write.table(x, file=datafile, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE, na=na.string) varnames <- c() ngroups <- ceiling(ncol(x)/4) for (i in seq_len(ngroups)) { newline <- c("\t",colnames(x)[((i-1)*4+1):min(ncol(x), (i*4))],"\n") varnames <- c(varnames, newline) } zz <- file(mplusfile, "w") # open an output file connection cat(file=zz, "TITLE: ...\n") cat(file=zz, "!-----------------------------------------------------\n") cat(file=zz,"DATA:\n\tFILE=\"", datafile, "\";\n") cat(file=zz,"VARIABLE:\n\tNAMES ARE\n") cat(file=zz, varnames, ";\n") if (!missing(group)) { groups <- unique(x[,group]) mygroupdef <- paste("(",paste(groups,groups,sep="=",collapse=","),")") cat(file=zz, "GROUPING IS ", group, mygroupdef, ";\n", sep="") } else { cat(file=zz, "!GROUPING IS g (1=male, 2=female);\n") } cat(file=zz, "USEVARIABLES=\n", varnames,";\n") if (!is.null(categorical)) cat(file=zz, paste("CATEGORICAL=",paste(categorical,collapse=" "),";\n")) cat(file=zz, "MISSING=",na.string,";\n",sep="") cat(file=zz, "!IDVARIABLE=?;\n") cat(file=zz, "!DEFINE: define new variables here;\n") cat(file=zz, "!SAVEDATA: save data and/or results;\n\n") if (is.null(analysis)) { cat(file=zz, "ANALYSIS: TYPE=MEANSTRUCTURE"); if (missing) cat(file=zz, " MISSING;\n") else cat(file=zz,";\n") cat(file=zz, "ESTIMATOR=ML;\n") cat(file=zz, "INFORMATION=EXPECTED;\n") cat(file=zz, "ITERATIONS=5000;\n") cat(file=zz, "CONVERGENCE=0.00005;\n\n") } else { cat(file=zz,"ANALYSIS:\n") cat(file=zz, analysis,"\n") } cat(file=zz, "!-----------------------------------------------------\n") cat(file=zz, "MODEL:\n") cat(file=zz, model,"\n") cat(file=zz, "!-----------------------------------------------------\n") if (!techout) cat(file=zz, "OUTPUT: STANDARDIZED;\n") else cat(file=zz, "OUTPUT: MODINDICES(0); TECH1; TECH2; TECH5; STANDARDIZED;\n") cat(file=zz, "!\tSAMPSTAT;RESIDUAL;CINTERVAL;MODINDICES(0);\n") cat(file=zz, "!Other output options are:\n") cat(file=zz, "!\tSTANDARDIZED; !Standardized coefficients\n") cat(file=zz, "!\tH1SE; !Standard errors for the H1 model\n") cat(file=zz, "!\tH1TECH3; !Estimated covar,corr matrices for par. estimates\n") cat(file=zz, "!\tPATTERNS; !Summary of missing data patterns\n") cat(file=zz, "!\tFSCOEFFICIENT; !Factor score coefficients and posterior covar matrix\n") cat(file=zz, "!\tFSDERTERMINACY; !Factor score determinacy for each factor\n") cat(file=zz, "!\tTECH1; !Parameter specifications and starting values\n") cat(file=zz, "!\tTECH2 !Parameter derivatives;\n") cat(file=zz, "!\tTECH3; !Covar and Corr matrices for estimates\n") cat(file=zz, "!\tTECH4; !Estimated means and covar for the latent variables\n") cat(file=zz, "!\tTECH5; !Optimization matrix\n") cat(file=zz, "!\tTECH6; !Optimization for categorical variables\n") cat(file=zz, "!\tTECH7; !output for type Mixture\n") cat(file=zz, "!\tTECH8; !Output for type mixture\n") cat(file=zz, "!\tTECH9; !Error messages for MC study\n") cat(file=zz, "!\tMONTECARLO: File is\n") close(zz) if (run & exists("mplus")) { res <- mplus(mplusfile) outfile <- paste0(strsplit(mplusfile,".",fixed=TRUE)[[1]][1],".out") getMplus(outfile) return(res) } } `toMplus.lvmfit` <- function(x, model=NULL, data=model.frame(x), run=TRUE, categorical=NULL,##binary(Model(x)), mplusfile="template.mplus", ...) { mymodel <- "" M <- index(x)$M P <- index(x)$P nn <- vars(x) p <- length(nn) lat.var <- latent(x) lat.idx <- match(lat.var, vars(x)) for (i in seq_len(p)) { for (j in seq_len(p)) { if (M[i,j]!=0) { var1 <- nn[i]; var2 <- nn[j]; if (i %in% lat.idx & !(j %in% lat.idx)) {## & !(j %in% lat.idx)) { mymodel <- paste0(mymodel, "\n", var1, " by ", var2, ";") } else { mymodel <- paste0(mymodel, "\n", var2, " on ", var1, ";") } } } } for (i in seq_len(p-1)) { for (j in ((i+1):p)) { if (P[i,j]!=0) { var1 <- nn[i]; var2 <- nn[j]; mymodel <- paste0(mymodel, "\n", var1, " with ", var2, ";") } } } if (is.null(model)) model <- mymodel mydata <- subset(as.data.frame(data), select=setdiff(nn,lat.var)) toMplus.data.frame(mydata,model=mymodel,run=run, mplusfile=mplusfile, ...) } lava/R/optims.R0000644000176200001440000002372313520655354013053 0ustar liggesusersnlminb2 <- function(start,objective,gradient,hessian,...) { nlminbcontrols <- c("eval.max","iter.max","trace","abs.tol","rel.tol","x.tol","step.min") dots <- list(...) control <- list(...)$control control <- control[names(control)%in%nlminbcontrols] dots$control <- control if (length(dots$trace)>0 && dots$trace>0) cat("\n") mypar <- c(list(start=start,objective=objective,gradient=gradient,hessian=hessian),dots) mypar["debug"] <- NULL do.call("nlminb", mypar) } nlminb1 <- function(start,objective,gradient,hessian,...) { nlminb2(start,objective,gradient=gradient,hessian=NULL,...) } nlminb0 <- function(start,objective,gradient,hessian,...) { nlminb2(start,objective,gradient=NULL,hessian=NULL,...) } ################################################################################ estfun <- function(start,objective,gradient,hessian,NR=FALSE,...) { myobj <- function(x,...) { S <- gradient(x,...) crossprod(S)[1] } if (!missing(hessian) && !is.null(hessian)) { mygrad <- function(x) { H <- hessian(x) S <- gradient(x) 2*S%*%H } } else { hessian <- function(x) numDeriv::jacobian(gradient,x,method=lava.options()$Dmethod) mygrad <- function(x) { H <- hessian(x) S <- gradient(x) 2*S%*%H } } if (NR) { op <- lava::NR(start,gradient=gradient,hessian=hessian,...) } else { op <- nlminb2(start,myobj,mygrad,hessian=NULL,...) } return(op) } estfun0 <- function(...,hessian=NULL) estfun(...,hessian=hessian) ################################################################################ ## Newton-Raphson/Scoring ################################################################################ ##' @title Newton-Raphson method ##' ##' @param start Starting value ##' @param objective Optional objective function (used for selecting step length) ##' @param gradient gradient ##' @param hessian hessian (if NULL a numerical derivative is used) ##' @param control optimization arguments (see details) ##' @param args Optional list of arguments parsed to objective, gradient and hessian ##' @param ... additional arguments parsed to lower level functions ##' @details ##' \code{control} should be a list with one or more of the following components: ##' \itemize{ ##' \item{trace} integer for which output is printed each 'trace'th iteration ##' \item{iter.max} number of iterations ##' \item{stepsize}: Step size (default 1) ##' \item{nstepsize}: Increase stepsize every nstepsize iteration (from stepsize to 1) ##' \item{tol}: Convergence criterion (gradient) ##' \item{epsilon}: threshold used in pseudo-inverse ##' \item{backtrack}: In each iteration reduce stepsize unless solution is improved according to criterion (gradient, armijo, curvature, wolfe) ##' } ##' @export ##' @examples ##' # Objective function with gradient and hessian as attributes ##' f <- function(z) { ##' x <- z[1]; y <- z[2] ##' val <- x^2 + x*y^2 + x + y ##' structure(val, gradient=c(2*x+y^2+1, 2*y*x+1), ##' hessian=rbind(c(2,2*y),c(2*y,2*x))) ##' } ##' NR(c(0,0),f) ##' ##' # Parsing arguments to the function and ##' g <- function(x,y) (x*y+1)^2 ##' NR(0, gradient=g, args=list(y=2), control=list(trace=1,tol=1e-20)) ##' ##' NR <- function(start,objective=NULL,gradient=NULL,hessian=NULL,control,args=NULL,...) { control0 <- list(trace=0, stepsize=1, lambda=0, ngamma=0, gamma2=0, backtrack=TRUE, iter.max=200, tol=1e-6, stabil=FALSE, epsilon=1e-9) if (!missing(control)) { control0[names(control)] <- control # Backward compatibility: if (!is.null(control0$gammma)) control0$stepsize <- control0$gamma } ## conditions to select the step length if(control0$backtrack[1] == "armijo"){ control0$backtrack <- c(1e-4,0) # page 33 } if(control0$backtrack[1] == "curvature"){ control0$backtrack <- c(0,0.9) # page 34 } if(control0$backtrack[1] == "wolfe"){ control0$backtrack <- c(1e-4,0.9) } if(!is.logical(control0$backtrack) || length(control0$backtrack)!=1){ if(length(control0$backtrack) != 2){ stop("control$backtrack must have length two if not TRUE or FALSE \n") } if(any(!is.numeric(control0$backtrack)) || any(abs(control0$backtrack)>1)){ stop("elements in control$backtrack must be in [0,1] \n") } if(control0$backtrack[2]==0){ control0$backtrack[2] <- +Inf # no Wolfe condition } } obj <- objective grad <- gradient hess <- hessian if (!is.null(args)) { if (!is.list(args)) args <- list(args) if (!is.null(objective)) obj <- function(p) do.call(objective, c(list(p),args)) if (!is.null(gradient)) grad <- function(p) do.call(gradient, c(list(p),args)) if (!is.null(hessian)) hess <- function(p) do.call(hessian, c(list(p),args)) } if (control0$trace>0) { cat("\nIter=0\t") if (!is.null(obj)) cat("Objective=",obj(as.double(start))) cat(";\t\n \tp=", paste0(formatC(start), collapse=" "),"\n") } gradFun = !is.null(grad) if (!gradFun & is.null(hess)) { hess <- function(p) { ff <- obj(p) res <- attributes(ff)$hess if (is.function(res)) { res <- res(p) attributes(res)$grad <- as.vector(attributes(ff)$grad(p)) } else { attributes(res)$grad <- as.vector(attributes(ff)$grad) } return(res) } grad <- function(p) { if (control0$trace>0) print("Numerical gradient") numDeriv::jacobian(obj,p) } } oneiter <- function(p.orig,Dprev,return.mat=FALSE,iter=1) { D <- I <- NULL # Place-holders for gradient and negative hessian if (!is.logical(control0$backtrack)) { # Back-tracking based on objective function evaluations objective.origin <- obj(p.orig) D <- attributes(objective.origin)$grad I <- attributes(objective.origin)$hess if (!is.null(I)) I <- -I } if (is.null(D) || is.null(I)) { if (!is.null(hess)) { H <- hess(p.orig) } if (is.null(hess) || is.null(H)) { if (control0$trace>0) print("Numerical Hessian") I <- -numDeriv::jacobian(grad,p.orig,method=lava.options()$Dmethod) } else { I <- -H } D <- attributes(I)$grad if (is.null(D)) { D <- grad(p.orig) } } if (return.mat) return(list(D=D,I=I)) if (control0$stabil) { if (control0$lambda!=0) { if (control0$lambda<0) { sigma <- (t(D)%*%(D))[1] } else { sigma <- control0$lambda } sigma <- min(sigma,10) I <- I+control0$gamma2*sigma*diag(nrow=nrow(I)) } else { sigma <- ((D)%*%t(D)) I <- I+control0$gamma2*(sigma) } } iI <- Inverse(I, symmetric=TRUE, tol=control0$epsilon) Delta <- control0$stepsize*tryCatch(solve(I, cbind(as.vector(D))), error=function(...) { ## Fall back to Pseudo-Inverse using SVD: iI%*%cbind(as.vector(D))}) Lambda <- 1 ## Initial step-size if (identical(control0$backtrack, TRUE)) { mD0 <- mean(Dprev^2) mD <- mean(D^2) p <- p.orig + as.vector(Lambda*Delta) while (mD>=mD0) { if (gradFun) { D = grad(p) } else { DI <- oneiter(p,return.mat=TRUE) D = DI$D } mD = mean(D^2) if (is.nan(mD)) mD=mD0 Lambda <- Lambda/2 if (Lambda<1e-4) break; p <- p.orig + as.vector(Lambda*Delta) } } else if(identical(control0$backtrack, FALSE)) { p <- p.orig + Lambda*Delta } else { # objective(p.orig) - obj(p) <= mu*Lambda*grad(p.orig)*Delta ## curvature c_D.origin_Delta <- control0$backtrack * c(rbind(D) %*% Delta) p <- p.orig + as.vector(Lambda*Delta) mD0 <- c(objective.origin + Lambda * c_D.origin_Delta[1], abs(c_D.origin_Delta[2]))# objective.new <- obj(p) grad.new <- attributes(objective.new)$grad if (is.null(grad.new)) { grad.new <- grad(p) } mD <- c(objective.new, abs(grad.new %*% Delta)) count <- 0 while (any(mD>mD0) || any(is.nan(mD))) { count <- count+1 Lambda <- Lambda/2 if (Lambda<1e-4) break; p <- p.orig + Lambda*Delta objective.new <- obj(p) grad.new <- attributes(objective.new)$grad if(!is.infinite(mD0[1])){ mD0[1] <- objective.origin + Lambda * c_D.origin_Delta[1]# mD[1] <- objective.new } if(!is.infinite(mD0[2])){ if (is.null(grad.new)) { grad.new <- grad(p) } mD[2] <- abs(grad.new %*% Delta) } } } return(list(p=p,D=D,iI=iI)) } count <- count2 <- 0 thetacur <- start stepsizecount <- 0 Dprev <- rep(Inf,length(start)) for (jj in seq_len(control0$iter.max)) { stepsizecount <- stepsizecount+1 count <- count+1 count2 <- count2+1 newpar <- oneiter(thetacur,Dprev,iter=jj) Dprev <- newpar$D thetacur <- newpar$p if (!is.null(control0$nstepsize) && control0$nstepsize>0) { if (control0$nstepsize<=stepsizecount) { control0$stepsize <- sqrt(control0$stepsize) stepsizecount <- 0 } } if (count2==control0$trace) { cat("Iter=", count,"\t",sep="") if (!is.null(obj)) cat("Objective=",obj(as.double(newpar$p))) cat(";\n\tD=", paste0(formatC(newpar$D), collapse = " "), "\n") cat("\tp=", paste0(formatC(thetacur), collapse = " "), "\n") count2 <- 0 } if (mean(newpar$D^2)^.5tol) if (omitrel) { ## Don't save models including 'rel' keep <- c() if (length(Equiv)>0) { for (i in seq_len(length(Equiv))) { newvars <- s$var[[Equiv[i]]] if (!any(apply(newvars,1,function(z) all(z%in%myvars)))) keep <- c(keep,Equiv[i]) } Equiv <- keep } keep <- c() if (length(Improve)>0) { for (i in seq_len(length(Improve))) { newvars <- s$var[[Improve[i]]] if (!any(apply(newvars,1,function(z) all(z%in%myvars)))) keep <- c(keep,Improve[i]) } Improve <- keep } } eqvar <- ivar <- NULL models <- list() if (length(Equiv)>0){ for (i in seq_len(length(Equiv))) { xnew <- x0 newvars <- s$var[[Equiv[i]]] for (j in seq_len(nrow(newvars))) { exo.idx <- which(newvars[j,]%in%index(x0)$exogenous) if (length(exo.idx)>0) { xnew <- regression(xnew,from=newvars[j,exo.idx],to=newvars[j,setdiff(1:2,exo.idx)]) } else { covariance(xnew) <- newvars } } models <- c(models,list(xnew)) } eqvar <- s$var[Equiv] } if (length(Improve)>0) { for (i in seq_len(length(Improve))) { xnew <- x0 newvars <- s$var[[Improve[i]]] for (j in seq_len(nrow(newvars))) { exo.idx <- which(newvars[j,]%in%index(x0)$exogenous) if (length(exo.idx)>0) { xnew <- regression(xnew,from=newvars[j,exo.idx],to=newvars[j,setdiff(1:2,exo.idx)]) } else { covariance(xnew) <- newvars } } models <- c(models,list(xnew)) } ivar <- s$var[Improve] } res <- list(equiv=eqvar, improve=ivar, scoretest=s, models=models, I=Improve, E=Equiv, T0=T0, vars=myvars) class(res) <- "equivalence" return(res) } ##' @export print.equivalence <- function(x,...) { cat(" 0)\t ",paste0(x$vars,collapse=lava.options()$symbol[2])," (",formatC(x$T0),")\n") cat("Empirical equivalent models:\n") if (length(x$E)==0) cat("\t none\n") else for (i in seq_len(length(x$E))) { cat(" ",i,")\t ", x$scoretest$res[x$E[i],"Index"], " (",x$scoretest$res[x$E[i],1],")", "\n",sep="") } cat("Candidates for model improvement:\n") if (length(x$I)==0) cat("\t none\n") else for (i in seq_len(length(x$I))) { cat(" ",i,")\t ", x$scoretest$res[x$I[i],"Index"], " (",x$scoretest$res[x$I[i],1],")", "\n",sep="") } invisible(x) } holm <- function(p) { k <- length(p) w <- 1/k ii <- order(p) po <- p[ii] qs <- min(1,po[1]/w) for (i in 2:k) { qs <- c(qs, min(1, max(qs[i-1],po[i]*(1-w*(i-1))/w))) } return(qs) } lava/R/effects.R0000644000176200001440000001657613520655354013167 0ustar liggesusers ##' @export totaleffects <- function(object,...,value) UseMethod("totaleffects") ##' @export totaleffects.lvmfit <- function(object,to,...,level=0.95) { p <- (1-level)/2 q <- qnorm(p) res <- c() if (inherits(to,"formula")) { if (substr(deparse(to[3]),1,1)==".") { trim <- function(x) sapply(x,function(z) gsub(" ","",z,fixed=TRUE)) to <- trim(strsplit(deparse(to),"~")[[1]][1]) } else { to <- list(to) } } if (is.null(list(...)$from) & is.character(to)[1]) { to <- lapply(paste(to,setdiff(vars(object),to),sep="~"),FUN=as.formula) } ef <- function(tt) { f <- effects(object,tt,...) rbind(with(f$totalef,c(est,sd,est/sd,2*(pnorm(abs(est/sd),lower.tail=FALSE)),est+q*sd,est-q*sd))) } if (is.list(to)) { for (tt in to) { res <- rbind(res,ef(tt)) } } else res <- ef(to) colnames(res) <- c("Estimate","Std.Err","z value","Pr(>|z|)", paste0(c(1-p,p)*100,"%")) rownames(res) <- to res } ##' @export effects.lvmfit <- function(object,to,from,...) { if (missing(to)) { return(summary(object)) } P <- path(object,to=to,from=from,...) if (is.null(P$path)) { if (inherits(to,"formula")) { f <- extractvar(to) to <- f$y; from <- f$x } } else { from <- P$path[[1]][1] to <- tail(P$path[[1]],1) } cc <- coef(object,type=9,labels=FALSE) ## All parameters (fixed and variable) cc0 <- cbind(coef(object)) ## Estimated parameters i1 <- na.omit(match(rownames(cc),rownames(cc0))) idx.cc0 <- which(rownames(cc)%in%rownames(cc0)); ## Position of estimated parameters among all parameters S <- matrix(0,nrow(cc),nrow(cc)); rownames(S) <- colnames(S) <- rownames(cc) V <- object$vcov npar.mean <- index(object)$npar.mean S[idx.cc0,idx.cc0] <- V[i1,i1] ## "Covariance matrix" of all parameters cclab <- rownames(coef(object,type=9,labels=TRUE)) ## Identify equivalence constraints cctab <- table(cclab) equiv <- which(cctab>1) for (i in seq_len(length(equiv))) { orgpos <- which(cclab==(names(equiv)[i])) pos <- orgpos[-1] for (p in pos) S[p,-orgpos[1]] <- S[-orgpos[1],p] <- S[orgpos[1],-p] } idx.orig <- unique(unlist(P$idx)) coefs.all <- cc[idx.orig] S.all <- S[idx.orig,idx.orig] idx.all <- numberdup(unlist(P$idx)) pos <- 1; idx.list <- P$idx; for (i in seq_len(length(idx.list))) { K <- length(idx.list[[i]]) idx.list[[i]] <- idx.all[pos:(pos+K-1)]; pos <- pos+K } margef <- list() if (length(coefs.all)==1 && is.na(coefs.all)) { totalef <- list(est=0,sd=0) margef <- c(margef,list(est=0,sd=NA)) } else { totalef <- prodsumdelta(coefs.all, idx.list, S.all,...) for (i in seq_len(length(idx.list))) { margef <- c(margef, list(prodsumdelta(coefs.all, idx.list[i], S.all,...))) } } directidx <- which(lapply(P$path,length)==2) inef.list <- idx.list if (length(directidx)==0) { directef <- list(est=0, sd=NA) } else { inef.list <- inef.list[-directidx] directef <- margef[[directidx]] } if (length(inef.list)==0) { totalinef <- list(est=0,sd=NA,grad=NA,hess=NA) } else { totalinef <- prodsumdelta(coefs.all, inef.list, S.all,...) } nn <- c("total","direct","indirect") for (i in seq_len(length(margef))) { if (length(P$path[[i]])>2) { nn <- c(nn,paste(rev(P$path[[i]]),collapse=lava.options()$symbol[1])) } } b <- c(totalef$est,directef$est,totalinef$est,totalinef$b) names(b) <- nn D <- t(cbind(totalef$grad,directef$grad,totalinef$grad,totalinef$D)) V <- D%*%S.all%*%t(D) val <- list(coef=b, vcov=V, grad=D, paths=P$path, totalef=totalef, directef=directef, totalinef=totalinef, margef=margef, from=from, to=to) class(val) <- "effects" val } ##' @export print.effects <- function(x,digits=4,...) { s <- summary(x,...) print(s$coef,digits=digits,...) cat("\n") print(s$medprop$coefmat[,c(1,3,4),drop=FALSE],digits=digits,...) return(invisible(x)) } ##' @export coef.effects <- function(object,...) { object$coef } ##' @export vcov.effects <- function(object,...) { object$vcov } ##' @export summary.effects <- function(object,...) { totalef <- with(object$totalef, cbind(est,sd[1])) directef <- with(object$directef, cbind(est,sd[1])) totindirectef <- with(object$totalinef, cbind(est,sd[1])) rownames(totalef) <- "Total" rownames(directef) <- "Direct" rownames(totindirectef) <- "Indirect" nn <- indirectef <- c() K <- seq_len(length(object$margef)) for (i in K) { if (length(object$paths[[i]])>2) { nn <- c(nn,paste(rev(object$paths[[i]]),collapse=lava.options()$symbol[1])) indirectef <- rbind(indirectef, with(object$margef[[i]], c(est,sd))) } }; rownames(indirectef) <- nn mycoef <- rbind(totalef,directef,totindirectef,indirectef) mycoef <- cbind(mycoef,mycoef[,1]/mycoef[,2]) mycoef <- cbind(mycoef,2*(pnorm(abs(mycoef[,3]),lower.tail=FALSE))) colnames(mycoef) <- c("Estimate","Std.Err","z value","Pr(>|z|)") medprop <- NULL if (totindirectef[1]!=0) { if (abs(coef(object)[2])<1e-12) { medprop <- estimate(NULL,coef=c("Mediation proportion"=1),vcov=matrix(NA)) } else { medprop <- estimate(object, function(x) list("Mediation proportion"=1-x[2]/x[1])) ##medprop <- estimate(object, function(x) list("Mediation proportion"=logit(x[3]/x[1])),back.transform=expit) } } list(coef=mycoef,medprop=medprop) } ##' @export confint.effects <- function(object,parm,level=0.95,...) { mycoef <- summary(object)$coef p <- 1-(1-level)/2 res <- mycoef[,1] + + qnorm(p)*cbind(-1,1)%x%mycoef[,2] colnames(res) <- paste0(c(1-p,p)*100,"%") rownames(res) <- rownames(mycoef) res } prodtrans <- function(betas) { k <- length(betas) res <- prod(betas) nabla <- numeric(k) for (i in seq_len(k)) nabla[i] <- prod(betas[-i]) H <- matrix(0,k,k) if (k>1) for (i in seq_len(k-1)) for (j in (i+1):k) H[j,i] <- H[i,j] <- prod(c(1,betas[-c(i,j)])) attr(res,"gradient") <- nabla attr(res,"hessian") <- H return(res) } prodsumdelta <- function(betas,prodidx,S,order=1) { ## Delta-method k <- length(prodidx) p <- length(betas) if (p==1) { return(list(est=betas, sd=sqrt(S), grad=0, beta=betas, D=0, hess=0)) } val <- 0; grad <- numeric(p) D <- matrix(0,nrow=p,ncol=k) beta <- numeric(k) H <- matrix(0,p,p) for (i in seq_len(k)) { ii <- prodidx[[i]] myterm <- prodtrans(betas[ii]); if (order>1) { H0 <- attributes(myterm)$hessian Sigma <- S[ii,ii] print(sum(diag(Sigma%*%H0))/2) val <- val + (myterm + sum(diag(Sigma%*%H0))/2) } else { val <- val + myterm beta[i] <- myterm } D[ii,i] <- attributes(myterm)$gradient grad[ii] <- grad[ii] + attributes(myterm)$gradient }; grad <- matrix(grad,ncol=1) return(list(est=val, sd=sqrt(t(grad)%*%S%*%grad), grad=grad, b=beta, D=D, hess=H)) } lava/R/pars.R0000644000176200001440000000240713520655354012501 0ustar liggesusers##' @export `pars` <- function(x,...) UseMethod("pars") ##' @export pars.default <- function(x,...) { if (!is.null(x$opt$estimate)) return(x$opt$estimate) if (!is.null(x$opt$par)) return(x$opt$par) if (!is.null(x$coef)) return(x$coef) return(coef(x)) } ##' @export pars.lvm.missing <- function(x,reorder=FALSE,...) { res <- pars.default(x) if (reorder) { idx <- match(coef(Model(x)),names(coef(x))) return(res[idx]) } return(res) } ###{{{ pars.multigroupfit ## pars.multigroupfit <- function(x,...) { ## res <- pars.default(x) ## lapply(ee$model$lvm,coef)) ## coef() ##} ###}}} ###{{{ pars.lvm ##' @export pars.lvm <- function(x, A, P, v, e, ...) { parres <- A[index(x)$M1==1] diagcorfree <- diag(P)[diag(index(x)$P1)==1] parres <- c(parres, diagcorfree) if (ncol(A)>1) for (i in seq_len(ncol(index(x)$P1)-1)) for (j in seq(i+1,nrow(index(x)$P1))) { if (index(x)$P1[j,i]!=0) { parres <- c(parres, P[j,i]) } } if (length(parres)>0) names(parres) <- paste0("p",seq_len(length(parres))) if (!missing(v)) { parres <- c( v[which(index(x)$v1==1)], parres) } if (!missing(e)) { parres <- c( parres, e[which(index(x)$e1==1)] ) } return(parres) } ###}}} pars.lvm lava/R/endogenous.R0000644000176200001440000000174613520655354013707 0ustar liggesusers##' @export `endogenous` <- function(x,...) UseMethod("endogenous") ##' @export `endogenous.lvmfit` <- function(x,...) { endogenous(Model(x),...) } ##' @export `endogenous.lvm` <- function(x,top=FALSE,latent=FALSE,...) { observed <- manifest(x) if (latent) observed <- vars(x) if (top) { M <- x$M res <- c() for (i in observed) if (!any(M[i,]==1)) res <- c(res, i) return(res) } exo <- exogenous(x) return(setdiff(observed,exo)) } ##' @export endogenous.list <- function(x,...) { endolist <- c() for (i in seq_along(x)) { endolist <- c(endolist, endogenous(x[[i]])) } endolist <- unique(endolist) return(endolist) } ##' @export `endogenous.multigroup` <- function(x,...) { endogenous(Model(x)) } ##' @export `endogenous.lm` <- function(x,...) { getoutcome(formula(x))[1] } lava/R/estimate.lvm.R0000644000176200001440000010227613520655354014151 0ustar liggesusers###{{{ estimate.lvm ##' Estimation of parameters in a Latent Variable Model (lvm) ##' ##' Estimate parameters. MLE, IV or user-defined estimator. ##' ##' A list of parameters controlling the estimation and optimization procedures ##' is parsed via the \code{control} argument. By default Maximum Likelihood is ##' used assuming multivariate normal distributed measurement errors. A list ##' with one or more of the following elements is expected: ##' ##' \describe{ ##' \item{start:}{Starting value. The order of the parameters can be shown by ##' calling \code{coef} (with \code{mean=TRUE}) on the \code{lvm}-object or with ##' \code{plot(..., labels=TRUE)}. Note that this requires a check that it is ##' actual the model being estimated, as \code{estimate} might add additional ##' restriction to the model, e.g. through the \code{fix} and \code{exo.fix} ##' arguments. The \code{lvm}-object of a fitted model can be extracted with the ##' \code{Model}-function.} ##' ##' \item{starterfun:}{Starter-function with syntax ##' \code{function(lvm, S, mu)}. Three builtin functions are available: ##' \code{startvalues}, \code{startvalues0}, \code{startvalues1}, ...} ##' ##' \item{estimator:}{ String defining which estimator to use (Defaults to ##' ``\code{gaussian}'')} ##' ##' \item{meanstructure}{Logical variable indicating ##' whether to fit model with meanstructure.} ##' ##' \item{method:}{ String pointing to ##' alternative optimizer (e.g. \code{optim} to use simulated annealing).} ##' ##' \item{control:}{ Parameters passed to the optimizer (default ##' \code{stats::nlminb}).} ##' ##' \item{tol:}{ Tolerance of optimization constraints on lower limit of ##' variance parameters. } } ##' ##' @param x \code{lvm}-object ##' @param data \code{data.frame} ##' @param estimator String defining the estimator (see details below) ##' @param control control/optimization parameters (see details below) ##' @param missing Logical variable indiciating how to treat missing data. ##' Setting to FALSE leads to complete case analysis. In the other case ##' likelihood based inference is obtained by integrating out the missing data ##' under assumption the assumption that data is missing at random (MAR). ##' @param weights Optional weights to used by the chosen estimator. ##' @param weightsname Weights names (variable names of the model) in case ##' \code{weights} was given as a vector of column names of \code{data} ##' @param data2 Optional additional dataset used by the chosen ##' estimator. ##' @param id Vector (or name of column in \code{data}) that identifies ##' correlated groups of observations in the data leading to variance estimates ##' based on a sandwich estimator ##' @param fix Logical variable indicating whether parameter restriction ##' automatically should be imposed (e.g. intercepts of latent variables set to ##' 0 and at least one regression parameter of each measurement model fixed to ##' ensure identifiability.) ##' @param index For internal use only ##' @param graph For internal use only ##' @param messages Control how much information should be ##' printed during estimation (0: none) ##' @param quick If TRUE the parameter estimates are calculated but all ##' additional information such as standard errors are skipped ##' @param method Optimization method ##' @param param set parametrization (see \code{help(lava.options)}) ##' @param cluster Obsolete. Alias for 'id'. ##' @param p Evaluate model in parameter 'p' (no optimization) ##' @param ... Additional arguments to be passed to lower-level functions ##' @return A \code{lvmfit}-object. ##' @author Klaus K. Holst ##' @seealso estimate.default score, information ##' @keywords models regression ##' @export ##' @method estimate lvm ##' @examples ##' dd <- read.table(header=TRUE, ##' text="x1 x2 x3 ##' 0.0 -0.5 -2.5 ##' -0.5 -2.0 0.0 ##' 1.0 1.5 1.0 ##' 0.0 0.5 0.0 ##' -2.5 -1.5 -1.0") ##' e <- estimate(lvm(c(x1,x2,x3)~u),dd) ##' ##' ## Simulation example ##' m <- lvm(list(y~v1+v2+v3+v4,c(v1,v2,v3,v4)~x)) ##' covariance(m) <- v1~v2+v3+v4 ##' dd <- sim(m,10000) ## Simulate 10000 observations from model ##' e <- estimate(m, dd) ## Estimate parameters ##' e ##' ##' ## Using just sufficient statistics ##' n <- nrow(dd) ##' e0 <- estimate(m,data=list(S=cov(dd)*(n-1)/n,mu=colMeans(dd),n=n)) ##' rm(dd) ##' ##' ## Multiple group analysis ##' m <- lvm() ##' regression(m) <- c(y1,y2,y3)~u ##' regression(m) <- u~x ##' d1 <- sim(m,100,p=c("u,u"=1,"u~x"=1)) ##' d2 <- sim(m,100,p=c("u,u"=2,"u~x"=-1)) ##' ##' mm <- baptize(m) ##' regression(mm,u~x) <- NA ##' covariance(mm,~u) <- NA ##' intercept(mm,~u) <- NA ##' ee <- estimate(list(mm,mm),list(d1,d2)) ##' ##' ## Missing data ##' d0 <- makemissing(d1,cols=1:2) ##' e0 <- estimate(m,d0,missing=TRUE) ##' e0 `estimate.lvm` <- function(x, data=parent.frame(), estimator=NULL, control=list(), missing=FALSE, weights, weightsname, data2, id, fix, index=!quick, graph=FALSE, messages=lava.options()$messages, quick=FALSE, method, param, cluster, p, ...) { cl <- match.call() if (!base::missing(param)) { oldparam <- lava.options()$param lava.options(param=param) on.exit(lava.options(param=oldparam)) } if (!base::missing(method)) { control["method"] <- list(method) } Optim <- list( iter.max=lava.options()$iter.max, trace=ifelse(lava.options()$debug,3,0), gamma=lava.options()$gamma, gamma2=1, ngamma=lava.options()$ngamma, backtrack=lava.options()$backtrack, lambda=0.05, abs.tol=1e-9, epsilon=1e-10, delta=1e-10, rel.tol=1e-10, S.tol=1e-5, stabil=FALSE, start=NULL, constrain=lava.options()$constrain, method=NULL, starterfun="startvalues0", information="E", meanstructure=TRUE, sparse=FALSE, tol=lava.options()$tol) defopt <- lava.options()[] defopt <- defopt[intersect(names(defopt),names(Optim))] Optim[names(defopt)] <- defopt if (length(control)>0) { Optim[names(control)] <- control } if (is.environment(data)) { innames <- intersect(ls(envir=data),vars(x)) data <- as.data.frame(lapply(innames,function(x) get(x,envir=data))) names(data) <- innames } if (length(exogenous(x)>0)) { catx <- categorical2dummy(x,data) x <- catx$x; data <- catx$data } if (!lava.options()$exogenous) exogenous(x) <- NULL redvar <- intersect(intersect(parlabels(x),latent(x)),colnames(data)) if (length(redvar)>0) warning(paste("Latent variable exists in dataset",redvar)) ## Random-slopes: xfix <- setdiff(colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))],latent(x)) if (base::missing(fix)) { fix <- ifelse(length(xfix)>0,FALSE,TRUE) } Debug(list("start=",Optim$start)) if (!base::missing(cluster)) id <- cluster ## Weights... if (!base::missing(weights)) { if (is.character(weights)) { weights <- data[,weights,drop=FALSE] if (!base::missing(weightsname)) { colnames(weights) <- weightsname } else { yvar <- index(x)$endogenous nw <- seq_len(min(length(yvar),ncol(weights))) colnames(weights)[nw] <- yvar[nw] } } weights <- cbind(weights) } else { weights <- NULL } if (!base::missing(data2)) { if (is.character(data2)) { data2 <- data[,data2] } } else { data2 <- NULL } ## Correlated clusters... if (!base::missing(id)) { if (is.character(id)) { id <- data[,id] } } else { id <- NULL } Debug("procdata") val <- try({ dd <- procdata.lvm(x,data=data,missing=missing) S <- dd$S; mu <- dd$mu; n <- dd$n var.missing <- setdiff(vars(x),colnames(S)) }, silent=TRUE) if (inherits(val,"try-error")) { var.missing <- setdiff(vars(x),colnames(data)) S <- NULL; mu <- NULL; n <- nrow(data) } ## if (fix) { if (length(var.missing)>0) {## Convert to latent: new.lat <- setdiff(var.missing,latent(x)) if (length(new.lat)>0) x <- latent(x, new.lat) } ##} ## Run hooks (additional lava plugins) myhooks <- gethook() for (f in myhooks) { res <- do.call(f, list(x=x,data=data,weights=weights,data2=data2,estimator=estimator,optim=Optim)) if (!is.null(res$x)) x <- res$x if (!is.null(res$data)) data <- res$data if (!is.null(res$weights)) weights <- res$weights if (!is.null(res$data2)) data2 <- res$data2 if (!is.null(res$optim)) Optim <- res$optim if (!is.null(res$estimator)) estimator <- res$estimator rm(res) } if (is.null(estimator)) { if (!missing(weights) && !is.null(weights)) { estimator <- "normal" } else estimator <- "gaussian" } checkestimator <- function(x,...) { ffname <- paste0(x,c("_objective","_gradient"),".lvm") exists(ffname[1])||exists(ffname[2]) } if (!checkestimator(estimator)) { ## Try down/up-case version estimator <- tolower(estimator) if (!checkestimator(estimator)) { estimator <- toupper(estimator) } } ObjectiveFun <- paste0(estimator, "_objective", ".lvm") GradFun <- paste0(estimator, "_gradient", ".lvm") if (!exists(ObjectiveFun) & !exists(GradFun)) stop("Unknown estimator.") Method <- paste0(estimator, "_method", ".lvm") if (!exists(Method)) { Method <- "nlminb1" } else { Method <- get(Method) } NoOptim <- "method"%in%names(control) && is.null(control$method) if (is.null(Optim$method) && !(NoOptim)) { Optim$method <- if (missing && Method!="nlminb0") "nlminb1" else Method } if (index) { ## Proces data and setup some matrices x <- fixsome(x, measurement.fix=fix, S=S, mu=mu, n=n,debug=messages>1) if (messages>1) message("Reindexing model...\n") if (length(xfix)>0) { index(x) <- reindex(x,sparse=Optim$sparse,zeroones=TRUE,deriv=TRUE) } else { x <- updatelvm(x,sparse=Optim$sparse,zeroones=TRUE,deriv=TRUE,mean=TRUE) } } if (is.null(estimator) || estimator==FALSE) { return(x) } if (length(index(x)$endogenous)==0) stop("No observed outcome variables. Check variable names in model and data.") if (!Optim$meanstructure) { mu <- NULL } nparall <- index(x)$npar + ifelse(Optim$meanstructure, index(x)$npar.mean+index(x)$npar.ex,0) ## Get starting values if (!missing(p)) { start <- p Optim$start <- p } else { myparnames <- coef(x,mean=TRUE) paragree <- FALSE paragree.2 <- c() if (!is.null(Optim$start)) { paragree <- myparnames%in%names(Optim$start) paragree.2 <- names(Optim$start)%in%myparnames } if (sum(paragree)>=length(myparnames)) Optim$start <- Optim$start[which(paragree.2)] if (! (length(Optim$start)==length(myparnames) & sum(paragree)==0)) if (is.null(Optim$start) || sum(paragree)0 | (length(xconstrain)>0 & XconstrStdOpt | !lava.options()$test)) { ## Yes x0 <- x if (length(xfix)>0) { myclass <- c("lvmfit.randomslope",myclass) nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) x0 <- x for (i in seq_along(myfix$var)) for (j in seq_len(length(myfix$col[[i]]))) regfix(x0, from=vars(x0)[myfix$row[[i]][j]], to=vars(x0)[myfix$col[[i]][j]]) <- colMeans(data[,myfix$var[[i]],drop=FALSE]) x0 <- updatelvm(x0,zeroones=TRUE,deriv=TRUE) x <- x0 ## Alter start-values/constraints: new.par.idx <- which(coef(mymodel,mean=TRUE,fix=FALSE)%in%coef(x0,mean=TRUE,fix=FALSE)) if (length(Optim$start)>length(new.par.idx)) Optim$start <- Optim$start[new.par.idx] lower <- lower[new.par.idx] if (Optim$constrain) { constrained <- match(constrained,new.par.idx) } } mydata <- as.matrix(data[,manifest(x0)]) myObj <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { res <- do.call(ObjectiveFun, list(x=x0, p=pp, data=mydata[ii,], n=1, weights=weights[ii,], data2=data2[ii,])) } else { res <- do.call(ObjectiveFun, list(x=x0, p=pp, data=mydata[ii,], n=1, weights=weights[ii,], data2=data2)) } return(res) } sum(sapply(seq_len(nrow(data)),myfun)) } myGrad <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { rr <- do.call(GradFun, list(x=x0, p=pp, data=mydata[ii,,drop=FALSE], n=1, weights=weights[ii,], data2=data2)) } else { rr <- do.call(GradFun, list(x=x0, p=pp, data=mydata[ii,,drop=FALSE], n=1, weights=weights[ii,], data2=data2[ii,])) } return(rr) } ss <- rowSums(rbind(sapply(seq_len(nrow(data)),myfun))) if (Optim$constrain) { ss[constrained] <- ss[constrained]*pp[constrained] } return(ss) } myInfo <- function(pp) { myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { x0$fix[cbind(rowpos[[i]],colpos[[i]])] <- index(x0)$A[cbind(rowpos[[i]],colpos[[i]])] <- data[ii,xfix[i]] } if (is.list(data2)) { res <- do.call(InformationFun, list(p=pp, obj=myObj, x=x0, data=data[ii,], n=1, weights=weights[ii,], data2=data2)) } else { res <- do.call(InformationFun, list(p=pp, obj=myObj, x=x0, data=data[ii,], n=1, weights=weights[ii,], data2=data2[ii,])) } return(res) } L <- lapply(seq_len(nrow(data)),function(x) myfun(x)) val <- apply(array(unlist(L),dim=c(length(pp),length(pp),nrow(data))),c(1,2),sum) if (!is.null(attributes(L[[1]])$grad)) { attributes(val)$grad <- colSums ( matrix( unlist(lapply(L,function(i) attributes(i)$grad)) , ncol=length(pp), byrow=TRUE) ) } return(val) } ################################################## } else { ## No, standard model ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) xconstrain <- c() for (i in seq_len(length(constrain(x)))) { z <- constrain(x)[[i]] xx <- intersect(attributes(z)$args,manifest(x)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x))[i] y <- names(which(unlist(lapply(intercept(x),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) iconstrain <- unlist(lapply(xconstrain,function(x) x$idx)) MkOffset <- function(pp,grad=FALSE) { if (length(xconstrain)>0) { Mu <- matrix(0,nrow(data),length(vars(x))); colnames(Mu) <- vars(x) M <- modelVar(x,p=pp,data=data) M$parval <- c(M$parval, x$mean[unlist(lapply(x$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) D <- cbind(rbind(pp)%x%cbind(rep(1,nrow(Mu))), data[,xconstrain[[i]]$exo,drop=FALSE])[,myidx,drop=FALSE] mu <- try(xconstrain[[i]]$func(D),silent=TRUE) if (is.data.frame(mu)) mu <- mu[,1] if (inherits(mu,"try-error") || NROW(mu)!=NROW(Mu)) { ## mu1 <- with(xconstrain[[i]], ## apply(data[,exo,drop=FALSE],1, ## function(x) func(unlist(c(pp,x))[myidx]))) mu <- apply(D,1,xconstrain[[i]]$func) } Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(x)] return(offsets) } return(NULL) } myObj <- function(pp) { if (Optim$constrain) { pp[constrained] <- exp(pp[constrained]) } offset <- MkOffset(pp) mu0 <- mu; S0 <- S; x0 <- x if (!is.null(offset)) { x0$constrain[iconstrain] <- NULL data0 <- data[,manifest(x0)] data0[,endogenous(x)] <- data0[,endogenous(x)]-offset pd <- procdata.lvm(x0,data=data0) S0 <- pd$S; mu0 <- pd$mu x0$mean[yconstrain] <- 0 } do.call(ObjectiveFun, list(x=x0, p=pp, data=data, S=S0, mu=mu0, n=n, weights=weights ,data2=data2, offset=offset )) } myGrad <- function(pp) { if (Optim$constrain) pp[constrained] <- exp(pp[constrained]) S <- do.call(GradFun, list(x=x, p=pp, data=data, S=S, mu=mu, n=n, weights=weights , data2=data2##, offset=offset )) if (Optim$constrain) { S[constrained] <- S[constrained]*pp[constrained] } if (is.null(mu) & index(x)$npar.mean>0) { return(S[-c(seq_len(index(x)$npar.mean))]) } if (length(S)0) { return(I[-seq_len(index(x)$npar.mean),-seq_len(index(x)$npar.mean)]) } return(I) } } myHess <- function(pp) { p0 <- pp if (Optim$constrain) pp[constrained] <- exp(pp[constrained]) I0 <- myInfo(pp) attributes(I0)$grad <- NULL D <- attributes(I0)$grad if (is.null(D)) { D <- myGrad(p0) attributes(I0)$grad <- D } if (Optim$constrain) { I0[constrained,-constrained] <- apply(I0[constrained,-constrained,drop=FALSE],2,function(x) x*pp[constrained]); I0[-constrained,constrained] <- t(I0[constrained,-constrained]) if (sum(constrained)==1) { I0[constrained,constrained] <- I0[constrained,constrained]*outer(pp[constrained],pp[constrained]) - D[constrained] } else { I0[constrained,constrained] <- I0[constrained,constrained]*outer(pp[constrained],pp[constrained]) - diag(D[constrained],ncol=length(constrained)) } } return(I0) } if (is.null(tryCatch(get(InformationFun),error = function (x) NULL))) myInfo <- myHess <- NULL if (is.null(tryCatch(get(GradFun),error = function (x) NULL))) myGrad <- NULL if (messages>1) message("Optimizing objective function...") if (Optim$trace>0 & (messages>1)) message("\n") ## Optimize with lower constraints on the variance-parameters if ((is.data.frame(data) | is.matrix(data)) && nrow(data)==0) stop("No observations") if (!missing(p)) { opt <- list(estimate=p) } else { if (!is.null(Optim$method)) { optarg <- list(start=Optim$start, objective=myObj, gradient=myGrad, hessian=myHess, lower=lower, control=Optim, debug=debug) if (length(Optim$method)>1) { Optim$optimx.method <- Optim$method } if (!is.null(Optim$optimx.method)) { Optim$method <- "optimx" } if (Optim$method%in%c("optimx","optim")) { optimcontrolnames <- c("trace", "follow.on", "save.failures", "maximize", "all.methods", "kkt", "kkttol", "kkt2tol", "starttests", "dowarn", "badval", "usenumDeriv", "fnscale", "parscale", "ndeps", "maxit", "abstol", "reltol", #"alpha","beta","gamma", "REPORT", "type", "lmm", "factr", "pgtol") if (!is.null(optarg$control)) { optarg$control[names(optarg$control)%ni%optimcontrolnames] <- NULL } args <- names(formals(get(Optim$method))) names(optarg)[1] <- "par" if (is.null(optarg$upper)) optarg$upper <- Inf if (!is.null(optarg[["objective"]])) names(optarg)[2] <- "fn" if (!is.null(optarg[["gradient"]])) names(optarg)[3] <- "gr" ##if (!is.null(optarg[["hessian"]])) names(optarg)[4] <- "hess" optarg$hessian <- NULL optarg[names(optarg)%ni%args] <- NULL } if (!is.null(Optim$optimx.method)) optarg$method <- Optim$optimx.method opt <- do.call(Optim$method, optarg) if (inherits(opt,"optimx")) { opt <- list(par=coef(opt)[1,]) } if (is.null(opt$estimate)) opt$estimate <- opt$par if (Optim$constrain) { opt$estimate[constrained] <- exp(opt$estimate[constrained]) } if (XconstrStdOpt & !is.null(myGrad)) opt$gradient <- as.vector(myGrad(opt$par)) else { opt$gradient <- numDeriv::grad(myObj,opt$par) } } else { if (!NoOptim) { opt <- do.call(ObjectiveFun, list(x=x,data=data,control=control,...)) opt$gradient <- rep(0,length(opt$estimate)) } else { opt <- list(estimate=Optim$start, gradient=rep(0,length(Optim$start))) } } } if (!is.null(opt$convergence)) { if (opt$convergence != 0) warning("Lack of convergence. Increase number of iteration or change starting values.") } else if (!is.null(opt$gradient) && mean(opt$gradient)^2>1e-3) warning("Lack of convergence. Increase number of iteration or change starting values.") if (quick) { return(opt$estimate) } ## Calculate std.err: pp <- rep(NA,length(coefname)); names(pp) <- coefname pp.idx <- NULL if (!is.null(names(opt$estimate))) { pp[names(opt$estimate)] <- opt$estimate pp.idx <- na.omit(match(coefname,names(opt$estimate))) } else { pp[] <- opt$estimate pp.idx <- seq(length(pp)) } ## TODO: ## if (length(pp.idx)!=length(pp)) { ## pp <- rep(NA,length(coefname)); names(pp) <- coefname ## pp[] <- opt$estimate ## pp.idx <- seq(length(pp)) ## } suppressWarnings(mom <- tryCatch(modelVar(x, pp, data=data),error=function(x)NULL)) if (NoOptim) { asVar <- matrix(NA,ncol=length(pp),nrow=length(pp)) } else { if (messages>1) message("\nCalculating asymptotic variance...\n") asVarFun <- paste0(estimator, "_variance", ".lvm") if (!exists(asVarFun)) { if (is.null(myInfo)) { if (!is.null(myGrad)) myInfo <- function(pp) numDeriv::jacobian(myGrad,pp,method=lava.options()$Dmethod) else myInfo <- function(pp) numDeriv::hessian(myObj,pp) } I <- myInfo(opt$estimate) asVar <- tryCatch(Inverse(I), error=function(e) matrix(NA, length(opt$estimate), length(opt$estimate))) } else { asVar <- tryCatch(do.call(asVarFun, list(x=x,p=opt$estimate,data=data,opt=opt)), error=function(e) matrix(NA, length(opt$estimate), length(opt$estimate))) } if (any(is.na(asVar))) { warning("Problems with asymptotic variance matrix. Possibly non-singular information matrix!") } if (!is.null(attributes(asVar)$pseudo) && attributes(asVar)$pseudo) { warning("Near-singular covariance matrix, using pseudo-inverse!") } diag(asVar)[diag(asVar)==0] <- NA } mycoef <- matrix(NA,nrow=nparall,ncol=4) mycoef[pp.idx,1] <- opt$estimate ## Will be finished during post.hooks res <- list(model=x, call=cl, coef=mycoef, vcov=asVar, mu=mu, S=S, ##A=A, P=P, model0=mymodel, ## Random slope hack estimator=estimator, opt=opt,expar=x$expar, data=list(model.frame=data, S=S, mu=mu, C=mom$C, v=mom$v, n=n, m=length(latent(x)), k=length(index(x)$manifest), data2=data2), weights=weights, data2=data2, cluster=id, pp.idx=pp.idx, graph=NULL, control=Optim) class(res) <- myclass myhooks <- gethook("post.hooks") for (f in myhooks) { res0 <- do.call(f,list(x=res)) if (!is.null(res0)) res <- res0 } if(graph) { res <- edgelabels(res,type="est") } return(res) } ###}}} estimate.lvm lava/R/cancel.R0000644000176200001440000000231713520655354012761 0ustar liggesusers##' Generic cancel method ##' ##' @title Generic cancel method ##' @param x Object ##' @param \dots Additioal arguments ##' @author Klaus K. Holst ##' @aliases cancel<- ##' @export "cancel" <- function(x,...) UseMethod("cancel") ##' @export "cancel<-" <- function(x,...,value) UseMethod("cancel<-") ##' @export "cancel<-.lvm" <- function(x, ..., value) { cancel(x,value,...) } ##' @export cancel.lvm <- function(x,value,...) { if (inherits(value,"formula")) { lhs <- getoutcome(value) if (length(lhs)==0) yy <- NULL else yy <- decomp.specials(lhs) xf <- attributes(terms(value))$term.labels if(identical(all.vars(value),xf)) return(cancel(x,xf)) res <- lapply(xf,decomp.specials) xx <- unlist(lapply(res, function(z) z[1])) for (i in yy) { for (j in xx) cancel(x) <- c(i,j) } index(x) <- reindex(x) return(x) } for (v1 in value) for (v2 in value) if (v1!=v2) { if (all(c(v1,v2)%in%vars(x))) { x$M[v1,v2] <- 0 x$par[v1,v2] <- x$fix[v1,v2] <- x$covpar[v1,v2] <- x$covfix[v1,v2] <- NA x$cov[v1,v2] <- 0 } } x$parpos <- NULL index(x) <- reindex(x) return(x) } lava/R/lvm.R0000644000176200001440000000631713520655354012336 0ustar liggesusers##' Initialize new latent variable model ##' ##' Function that constructs a new latent variable model object ##' ##' @aliases lvm print.lvm summary.lvm ##' @param x Vector of variable names. Optional but gives control of the ##' sequence of appearance of the variables. The argument can be given as a ##' character vector or formula, e.g. \code{~y1+y2} is equivalent to ##' \code{c("y1","y2")}. Alternatively the argument can be a formula specifying ##' a linear model. ##' @param \dots Additional arguments to be passed to the low level functions ##' @param latent (optional) Latent variables ##' @param messages Controls what messages are printed (0: none) ##' @return Returns an object of class \code{lvm}. ##' @author Klaus K. Holst ##' @seealso \code{\link{regression}}, \code{\link{covariance}}, ##' \code{\link{intercept}}, ... ##' @keywords models regression ##' @export ##' @examples ##' ##' m <- lvm() # Empty model ##' m1 <- lvm(y~x) # Simple linear regression ##' m2 <- lvm(~y1+y2) # Model with two independent variables (argument) ##' m3 <- lvm(list(c(y1,y2,y3)~u,u~x+z)) # SEM with three items ##' lvm <- function(x=NULL, ..., latent=NULL, messages=lava.options()$messages) { M <- C <- par <- fix <- numeric(); mu <- list() noderender <- list( fill=c(), shape=c(), label=c() ) edgerender <- list(lty=c(), lwd=c(), col=c(), textCol=c(), est=c(), arrowhead=c(), dir=c(), cex=c(), futureinfo=list()) graphrender <- list(recipEdges="distinct") graphdefault <- list( "fill"="white", "shape"="rectangle", "label"=expression(NA), "lty"=1, "lwd"=1, "col"="black", "textCol"="black", "est"=0, "arrowhead"="open", "dir"="forward", "cex"=1.5, "label"=expression(), "futureinfo"=c()) modelattr <- list( randomslope=list(), survival=list(), parameter=list(), categorical=list(), distribution=list(), nonlinear=list(), functional=list(), label=list()) res <- list(M=M, par=par, cov=C, covpar=C, fix=fix, covfix=fix,latent=list(), mean=mu, index=NULL, exogenous=NA, constrain=list(), constrainY=list(), attributes=modelattr, noderender=noderender, edgerender=edgerender, graphrender=graphrender, graphdef=graphdefault) class(res) <- "lvm" myhooks <- gethook("init.hooks") for (f in myhooks) { res <- do.call(f, list(x=res)) } myvar <- NULL if (!is.list(x)) x <- list(x,...) for (myvar in x) { if (inherits(myvar,"formula")) { regression(res,messages=messages) <- myvar } if (is.character(myvar)) { res <- addvar(res, myvar, messages=messages) } } if (!is.null(myvar)) { index(res) <- reindex(res,zeroones=TRUE) } if (!is.null(latent)) { latent(res) <- latent } return(res) } lava/R/zcolorbar.R0000644000176200001440000000513413520655354013531 0ustar liggesusers##' Add color-bar to plot ##' ##' @title Add color-bar to plot ##' @param clut Color look-up table ##' @param x.range x range ##' @param y.range y range ##' @param values label values ##' @param digits number of digits ##' @param label.offset label offset ##' @param srt rotation of labels ##' @param cex text size ##' @param border border of color bar rectangles ##' @param alpha Alpha (transparency) level 0-1 ##' @param position Label position left/bottom (1) or top/right (2) or no text (0) ##' @param direction horizontal or vertical color bars ##' @param \dots additional low level arguments (i.e. parsed to \code{text}) ##' @export ##' @examples ##' \dontrun{ ##' plotNeuro(x,roi=R,mm=-18,range=5) ##' colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), ##' x=c(-40,40),y.range=c(84,90),values=c(-5:5)) ##' ##' colorbar(clut=Col(rev(rainbow(11,start=0,end=0.69)),0.5), ##' x=c(-10,10),y.range=c(-100,50),values=c(-5:5), ##' direction="vertical",border=1) ##' } colorbar <- function(clut=Col(rev(rainbow(11,start=0,end=0.69)),alpha), x.range=c(-.5,.5),y.range=c(-.1,.1), values=seq(clut),digits=2,label.offset,srt=45, cex=0.5,border=NA, alpha=0.5, position=1, direction=c("horizontal","vertical"),...) { nlut <- length(clut) X <- length(agrep(tolower(direction[1]),"horizontal"))>0 scale <- ifelse(X,diff(x.range),diff(y.range))/nlut barsize <- ifelse(X,diff(y.range),diff(x.range)) if (missing(label.offset)) label.offset <- barsize/3 delta <- ifelse(X,x.range[1],y.range[1]) if (!is.null(values)) dM <- diff(range(values))/(nlut-1) for (i in seq_len(nlut+1)-1) { pos <- delta + (i-1)*scale if (X) { x1 <- pos; x2 <- pos+scale; y1 <- y.range[1]; y2 <- y.range[2] } else { y1 <- pos; y2 <- pos+scale; x1 <- x.range[1]; x2 <- x.range[2] } if (i>0) rect(x1,y1,x2,y2, col=clut[i], border=border, xpd=TRUE) } if (!is.null(values)) { for (i in seq_len(nlut+1)-1) { pos <- delta + (i-1)*scale rund <- format(round(min(values)+dM*i,max(1,digits)),digits=digits) x0 <- pos+(1+0.5)*scale; y0 <- y.range[2]+label.offset if (!X) { y0 <- x0; if (position==1) x0 <- x.range[1]-label.offset if (position==2) x0 <- x.range[1]+label.offset*5 if (position==3) x0 <- x.range[1]+label.offset*1 } if (i0) { if (lava.options()$sparse) { requireNamespace("Matrix",quietly=TRUE) newNA <- newM <- Matrix::Matrix(0,k,k) newNAc <- newNA; diag(newNAc) <- NA newcov <- Matrix::Diagonal(k) } else { newM <- matrix(0,k,k) newcov <- diag(k) } newNA <- matrix(NA,k,k) colnames(newM) <- rownames(newM) <- colnames(newcov) <- rownames(newcov) <- colnames(newNA) <- rownames(newNA) <- new newmean <- as.list(rep(NA,k)) N <- nrow(x$cov) if (is.null(N)) { N <- 0 x$M <- newM x$cov <- newcov; x$covfix <- x$fix <- x$par <- x$covpar <- newNA x$mean <- newmean } else { if (lava.options()$sparse) { x$M <- Matrix::bdiag(x$M, newM) ## Add regression labels.R x$cov <- Matrix::bdiag(x$cov, newcov) ## Add covariance x$par <- Matrix::bdiag(x$par, newNA) ## Add regression labels x$covpar <- Matrix::bdiag(x$covpar, newNA) ## Add covariance labels x$fix <- Matrix::bdiag(x$fix, newNA) x$covfix <- Matrix::bdiag(x$covfix, newNA) } else { x$M <- blockdiag(x$M, newM, pad=0) ## Add regression labels x$cov <- blockdiag(x$cov, newcov, pad=0) ## Add covariance x$par <- blockdiag(x$par, newNA, pad=NA) ## Add regression labels x$covpar <- blockdiag(x$covpar, newNA, pad=NA) ## Add covariance labels x$fix <- blockdiag(x$fix, newNA, pad=NA) ## x$covfix <- blockdiag(x$covfix, newNA, pad=NA) ## } x$mean <- c(x$mean, newmean) } names(x$mean)[N+seq_len(k)] <- colnames(x$M)[N+seq_len(k)] <- rownames(x$M)[N+seq_len(k)] <- colnames(x$covfix)[N+seq_len(k)] <- rownames(x$covfix)[N+seq_len(k)] <- colnames(x$fix)[N+seq_len(k)] <- rownames(x$fix)[N+seq_len(k)] <- colnames(x$covpar)[N+seq_len(k)] <- rownames(x$covpar)[N+seq_len(k)] <- colnames(x$par)[N+seq_len(k)] <- rownames(x$par)[N+seq_len(k)] <- colnames(x$cov)[N+seq_len(k)] <- rownames(x$cov)[N+seq_len(k)] <- new if (messages>1) { if (k==1) message("\tAdded '", new, "' to model.\n", sep="") else message("\tAdded ",paste(paste("'",new,"'",sep=""),collapse=",")," to model.\n", sep="") } exogenous(x) <- c(new,exogenous(x)) } if (reindex) index(x) <- reindex(x) return(x) } lava/R/predict.R0000644000176200001440000002637613520655354013201 0ustar liggesusers##' @export predict.lvmfit <- function(object,x=NULL,y=NULL,data=model.frame(object),p=coef(object),...) { predict(Model(object),x=x,y=y,p=p,data=data,...) } ##' @export predict.lvm.missing <- function(object,x=NULL,y=NULL,data=model.frame(object),p=coef(object),...) { idx <- match(coef(Model(object)),names(coef(object))) xx <- exogenous(object) p <- p[idx] if (!is.null(x)) { if (inherits(x,"formula")) { xy <- getoutcome(x) if (length(xy)>0) { if (is.null(y)) y <- decomp.specials(xy) } x <- attributes(xy)$x } x <- intersect(x,endogenous(object)) if (is.null(y)) y <- setdiff(vars(object),c(x,xx)) } obs0 <- !is.na(data[,x,drop=FALSE]) data[,xx][which(is.na(data[,xx]),arr.ind=TRUE)] <- 0 pp <- predict.lvmfit(object,x=x,y=y,data=data,p=p,...) if (all(obs0)) return(pp) if (!requireNamespace("mets",quietly=TRUE)) stop("Requires 'mets'") obs <- mets::fast.pattern(obs0) res <- matrix(nrow=nrow(data),ncol=NCOL(pp)) for (i in seq(nrow(obs$pattern))) { jj <- which(obs$pattern[i,]==1) ii <- which(obs$group==i-1) if (length(jj)==0) { res[ii,] <- NA } else { res[ii,] <- predict.lvmfit(object,...,p=p,x=x[jj],y=y,data=data[ii,,drop=FALSE])[,colnames(pp),drop=FALSE] } } attributes(res) <- attributes(pp) return(res) } ##' Prediction in structural equation models ##' ##' Prediction in structural equation models ##' @param object Model object ##' @param x optional list of (endogenous) variables to condition on ##' @param y optional subset of variables to predict ##' @param residual If true the residuals are predicted ##' @param p Parameter vector ##' @param data Data to use in prediction ##' @param path Path prediction ##' @param quick If TRUE the conditional mean and variance given covariates are returned (and all other calculations skipped) ##' @param \dots Additional arguments to lower level function ##' @seealso predictlvm ##' @examples ##' m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' ##' ## Conditional mean (and variance as attribute) given covariates ##' r <- predict(e) ##' ## Best linear unbiased predictor (BLUP) ##' r <- predict(e,vars(e)) ##' ## Conditional mean of y3 giving covariates and y1,y2 ##' r <- predict(e,y3~y1+y2) ##' ## Conditional mean gives covariates and y1 ##' r <- predict(e,~y1) ##' ## Predicted residuals (conditional on all observed variables) ##' r <- predict(e,vars(e),residual=TRUE) ##' ##' @method predict lvm ##' @aliases predict.lvmfit ##' @export predict.lvm <- function(object,x=NULL,y=NULL,residual=FALSE,p,data,path=FALSE,quick=is.null(x)&!(residual|path),...) { ## data = data.frame of exogenous variables if (!quick && !all(exogenous(object)%in%colnames(data))) stop("data.frame should contain exogenous variables") m <- moments(object,p,data=data,...) if (quick) { ## Only conditional moments given covariates ii <- index(object) P.x <- m$P; P.x[ii$exo.idx, ii$exo.idx] <- 0 Cy.x <- (m$IAi%*% tcrossprod(P.x,m$IAi))[ii$endo.idx,ii$endo.idx,drop=FALSE] X <- ii$exogenous mu.0 <- m$v; mu.0[ii$exo.idx] <- 0 if (length(X)>0) { mu.x <- matrix(0,ncol=nrow(data),nrow=length(mu.0)) mu.x[ii$exo.idx,] <- t(data[,X,drop=FALSE]) xi.x <- t(m$IAi[ii$endo.idx,,drop=FALSE]%*%(mu.0 + mu.x)) } else { xi.x <- m$xi%x%rep(1,nrow(data)) colnames(xi.x) <- ii$endogenous ##xi.x <- matrix(as.vector(m$IAi[ii$endo.obsidx,]%*%mu.0),ncol=nrow(data),nrow=length(mu.0)) ##rownames(xi.x) <- names(mu.0) } return(structure(xi.x,cond.var=Cy.x, p=m$p, e=m$e)) } X <- exogenous(object) Y <- setdiff(manifest(object), X) if (path) { X <- colnames(data) Y <- setdiff(Y,X) idx <- which(vars(object)%in%X) if (length(Y)==0) stop("New data set should only contain exogenous variables and a true subset of the endogenous variables for 'path' prediction.") A <- t(m$A) A[,idx] <- 0 ## i.e., A <- A%*%J IAi <- solve(diag(nrow=nrow(A))-t(A)) mu.0 <- m$v; mu.0[X] <- 0 mu.x <- matrix(0,ncol=nrow(data),nrow=length(mu.0)) mu.x[idx,] <- t(data[,vars(object)[idx],drop=FALSE]) pred <- t(IAi%*%(mu.0 + mu.x)) return(pred) ## Y <- endogenous(object,top=TRUE) ## X <- setdiff(manifest(object),Y) } IAi <- m$IAi eta.idx <- match(latent(object),vars(object)) obs.idx <- match(manifest(object),vars(object)) X.idx.all <- match(X, vars(object)) Y.idx.all <- match(Y, vars(object)) ## Calculation of conditional variance given X=x P.x <- m$P; P.x[X.idx.all, X.idx.all] <- 0 C.x <- (IAi%*% P.x %*%t(IAi)) Cy.x <- C.x[Y.idx.all,Y.idx.all,drop=FALSE] ## Calculation of conditional mean given X=x mu.0 <- m$v; mu.0[X.idx.all] <- 0 if (length(X)>0) { xs <- data[,X,drop=FALSE] mu.x <- apply(xs, 1, FUN=function(i) {res <- rep(0,length(mu.0)); res[X.idx.all] <- i; res}) xi.x <- (IAi%*%(mu.0 + mu.x)) } else { xi.x <- matrix(as.vector(IAi%*%mu.0),ncol=nrow(data),nrow=length(mu.0)) rownames(xi.x) <- names(mu.0) } attr(xi.x,"cond.var") <- Cy.x if (path) return(t(xi.x)) Ey.x <- xi.x[Y.idx.all,,drop=FALSE] Eeta.x <- xi.x[eta.idx,,drop=FALSE] Cy.epsilon <- P.x%*%t(IAi) ## Covariance y,residual ##Czeta.y <- Cy.epsilon[eta.idx,index(object)$endo.idx] A <- m$A IA <- diag(nrow=nrow(A))-t(A) y0 <- intersect(Y,colnames(data)) ys <- data[,y0,drop=FALSE] y0.idx <- match(y0,Y) ry <- t(ys)-Ey.x[y0.idx,,drop=FALSE] if (!is.null(x)) { if (inherits(x,"formula")) { xy <- getoutcome(x) if (length(xy)>0) { if (is.null(y)) y <- decomp.specials(xy) } x <- attributes(xy)$x } if (length(x)==0) { if (!is.null(y)) { xi.x <- xi.x[y,,drop=FALSE] attr(xi.x,"cond.var") <- Cy.x[y,y,drop=FALSE] } return(t(xi.x)) } x <- intersect(x,endogenous(object)) if (is.null(y)) y <- setdiff(vars(object),c(x,exogenous(object))) if (length(x)>0) { E.x <- xi.x[y,,drop=FALSE] + C.x[y,x]%*%solve(C.x[x,x])%*%ry[x,,drop=FALSE] } else { E.x <- xi.x[y,,drop=FALSE] } if (residual) { Vhat <- matrix(0, nrow(data), length(vars(object))); colnames(Vhat) <- vars(object) Vhat[,obs.idx] <- as.matrix(data[,manifest(object),drop=FALSE]) Vhat[,y] <- t(E.x) return(t((IA%*%t(Vhat)-m$v))) } res <- t(E.x); colnames(res) <- y if (length(x)>0) { attr(res,"cond.var") <- C.x[y,y,drop=FALSE]-C.x[y,x,drop=FALSE]%*%solve(C.x[x,x,drop=FALSE])%*%C.x[x,y,drop=FALSE] } else { attr(res,"cond.var") <- C.x[y,y,drop=FALSE] } return(res) } ys <- data[,Y,drop=FALSE] ry <- t(ys)-Ey.x if (length(eta.idx)>0) { Ceta.x <- C.x[eta.idx,eta.idx] Lambda <- A[Y.idx.all,eta.idx,drop=FALSE] ##, ncol=length(eta.idx)) Cetay.x <- Ceta.x%*%t(Lambda) KK <- Cetay.x %*% solve(Cy.x) Eeta.y <- Eeta.x + KK %*% ry Ceta.y <- Ceta.x - KK%*% t(Cetay.x) } else { Eeta.y <- NA Ceta.y <- NA } Vhat <- matrix(0, nrow(data), length(vars(object))); colnames(Vhat) <- vars(object) Vhat[,obs.idx] <- as.matrix(data[,manifest(object)]) if (length(eta.idx)>0) Vhat[,latent(object)] <- t(Eeta.y) epsilonhat <- (t( IA%*%t(Vhat) - m$v ))[,c(endogenous(object),latent(object)),drop=FALSE] if (residual) { return(epsilonhat) } mydata <- matrix(0,ncol=ncol(A),nrow=nrow(data)); colnames(mydata) <- vars(object) mydata[,manifest(object)] <- as.matrix(data[,manifest(object)]) for (i in latent(object)) mydata[,i] <- m$v[i] res <- cbind(t(Ey.x)) ## Conditional mean attr(res, "cond.var") <- Cy.x attr(res, "blup") <- t(Eeta.y) attr(res, "var.blup") <- Ceta.y attr(res, "Ey.x") <- Ey.x attr(res, "eta.x") <- Eeta.x attr(res, "epsilon.y") <- epsilonhat attr(res, "p") <- m$p attr(res, "e") <- m$e class(res) <- c("lvm.predict","matrix") return(res) } ##' @export print.lvm.predict <- function(x,...) print(x[,]) ##' Predict function for latent variable models ##' ##' Predictions of conditinoal mean and variance and calculation of ##' jacobian with respect to parameter vector. ##' @export ##' @param object Model object ##' @param formula Formula specifying which variables to predict and which to condition on ##' @param p Parameter vector ##' @param data Data.frame ##' @param ... Additional arguments to lower level functions ##' @seealso predict.lvm ##' @examples ##' m <- lvm(c(x1,x2,x3)~u1,u1~z, ##' c(y1,y2,y3)~u2,u2~u1+z) ##' latent(m) <- ~u1+u2 ##' d <- simulate(m,10,"u2,u2"=2,"u1,u1"=0.5,seed=123) ##' e <- estimate(m,d) ##' ##' ## Conditional mean given covariates ##' predictlvm(e,c(x1,x2)~1)$mean ##' ## Conditional variance of u1,y1 given x1,x2 ##' predictlvm(e,c(u1,y1)~x1+x2)$var predictlvm <- function(object,formula,p=coef(object),data=model.frame(object),...) { model <- Model(object) if (!missing(formula)) { yx <- getoutcome(formula) y <- decomp.specials(yx) x <- attr(yx,"x") x <- setdiff(x,index(model)$exogenous) } else { y <- index(model)$latent x <- index(model)$endogenous } endo <- with(index(model),setdiff(vars,exogenous)) idxY <- match(y,endo) idxX <- match(x,endo) ny <- length(y) if (ny==0) return(NULL) m <- modelVar(model,p,conditional=TRUE,data=data,latent=TRUE) D <- deriv.lvm(model,p,conditional=TRUE,data=data,latent=TRUE) N <- nrow(data) ii0 <- seq(N) iiY <- sort(unlist(lapply(idxY,function(x) ii0+N*(x-1)))) k <- ncol(m$xi) J <- matrix(seq(k^2),k) if (length(idxX)==0) { ## Return conditional mean and variance given covariates M <- m$xi[,idxY,drop=FALSE] dM <- D$dxi[iiY,,drop=FALSE] V <- m$C[idxY,idxY,drop=FALSE] dV <- D$dS[as.vector(J[idxY,idxY]),,drop=FALSE] } else { iiX <- sort(unlist(lapply(idxX,function(x) ii0+N*(x-1)))) X <- as.matrix(data[,x,drop=FALSE]) rX <- X-m$xi[,idxX,drop=FALSE] dX <- D$dxi[iiX,,drop=FALSE] ic <- solve(m$C[idxX,idxX,drop=FALSE]) c2 <- m$C[idxY,idxX,drop=FALSE] B <- c2%*%ic ## Conditional variance V <- m$C[idxY,idxY,drop=FALSE]-B%*%t(c2) dV <- D$dS[as.vector(J[idxY,idxY]),,drop=FALSE] - ( (B%x%diag(nrow=ny))%*%D$dS[as.vector(J[idxY,idxX]),,drop=FALSE] + -(B%x%B)%*%D$dS[as.vector(J[idxX,idxX]),,drop=FALSE] + (diag(nrow=ny)%x%B)%*%D$dS[as.vector(J[idxX,idxY]),,drop=FALSE] ) ## Conditional mean M <- m$xi[,idxY,drop=FALSE]+rX%*%t(B) dB <- (ic%x%diag(nrow=ny))%*%D$dS[as.vector(J[idxY,idxX]),,drop=FALSE]+ -(ic%x%B)%*%D$dS[as.vector(J[idxX,idxX]),,drop=FALSE] ## Find derivative of transposed matrix n0 <- as.vector(matrix(seq(prod(dim(B))),ncol=nrow(B),byrow=TRUE)) dB. <- dB[n0,,drop=FALSE] dM <- D$dxi[iiY,,drop=FALSE] + ((diag(nrow=ny)%x%rX)%*%dB.) - kronprod(B,dX) } colnames(M) <- y dimnames(V) <- list(y,y) return(list(mean=M,mean.jacobian=dM,var=V,var.jacobian=dV)) } lava/R/confpred.R0000644000176200001440000000505513520655354013336 0ustar liggesusers##' Conformal predicions ##' ##' @title Conformal prediction ##' @param object Model object (lm, glm or similar with predict method) or formula (lm) ##' @param data data.frame ##' @param newdata New data.frame to make predictions for ##' @param alpha Level of prediction interval ##' @param mad Conditional model (formula) for the MAD (locally-weighted CP) ##' @param ... Additional arguments to lower level functions ##' @return data.frame with fitted (fit), lower (lwr) and upper (upr) predictions bands. ##' @examples ##' set.seed(123) ##' n <- 200 ##' x <- seq(0,6,length.out=n) ##' delta <- 3 ##' ss <- exp(-1+1.5*cos((x-delta))) ##' ee <- rnorm(n,sd=ss) ##' y <- (x-delta)+3*cos(x+4.5-delta)+ee ##' d <- data.frame(y=y,x=x) ##' ##' newd <- data.frame(x=seq(0,6,length.out=50)) ##' cc <- confpred(lm(y~splines::ns(x,knots=c(1,3,5)),data=d), data=d, newdata=newd) ##' if (interactive()) { ##' plot(y~x,pch=16,col=lava::Col("black"),ylim=c(-10,10),xlab="X",ylab="Y") ##' with(cc, ##' lava::confband(newd$x,lwr,upr,fit, ##' lwd=3,polygon=TRUE,col=Col("blue"),border=FALSE)) ##' } ##' @export confpred <- function(object,data,newdata=data,alpha=0.05,mad,...) { ## Split algorithm if (inherits(object,"formula")) { object <- do.call("lm",list(object,data=data,...)) } dd <- csplit(data,0.5) muhat.new <- predict(object,newdata=newdata) ## New predictions muhat.1 <- predict(object,newdata=dd[[1]]) ## Training muhat.2 <- predict(object,newdata=dd[[2]]) ## Ranking R2 <- abs(dd[[2]][,1]-muhat.2) if (missing(mad)) mad <- formula(object) if (is.null(mad)) { mad.new <- rep(1,nrow(newdata)) } else { ## Locally-weighted conformal ffinference if (names(dd[[2]])[1] %ni% names(newdata)) { newdata <- cbind(0,newdata); names(newdata)[1] <- names(dd[[2]])[1] } X0 <- model.matrix(mad,data=newdata) if (inherits(mad,"formula")) { X2 <- model.matrix(mad,dd[[2]]) mad.obj <- stats::lm.fit(x=X2,y=R2) mad2 <- X2%*%mad.obj$coefficients mad.new <- X0%*%mad.obj$coefficients } else { mad.obj <- do.call(mad,list(y=R2,x=dd[[2]])) mad2 <- predict(mad.obj,newdata=dd[[2]]) mad.new <- predict(mad.obj,newdata=newdata) } R2 <- R2/mad2 } k <- ceiling((nrow(data)/2+1)*(1-alpha)) if (k==0) k <- 1 if (k>length(R2)) k <- length(R2) q <- sort(R2)[k] ## 1-alpha quantile lo <- muhat.new - q*mad.new up <- muhat.new + q*mad.new data.frame(fit=muhat.new,lwr=lo,upr=up) } lava/R/estimate.list.R0000644000176200001440000000044113520655354014315 0ustar liggesusers ##' @export estimate.list <- function(x,...) { if (inherits(x[[1]],"lvm")) return(estimate.lvmlist(x,...)) res <- lapply(x,function(x) estimate(x,...)) class(res) <- c("estimate.list","list") res } coef.estimate.list <- function(object,...) { lapply(object,coef) } lava/R/predict.mixture.R0000644000176200001440000000267713520655354014673 0ustar liggesusers##' @export predict.lvm.mixture <- function(object,p=coef(object,full=TRUE),model="normal",predict.fun=NULL,...) { p0 <- coef(object,full=FALSE) pp <- p[seq_along(p0)] pr <- p[length(p0)+seq(length(p)-length(p0))]; if (length(pr)geq[i]) return(TRUE) if (xyz[i]=length(geq)) return(TRUE) return(FALSE) } lava.env <- new.env() assign("init.hooks",c(),envir=lava.env) assign("remove.hooks",c(),envir=lava.env) assign("estimate.hooks",c(),envir=lava.env) assign("color.hooks",c(),envir=lava.env) assign("sim.hooks",c(),envir=lava.env) assign("post.hooks",c(),envir=lava.env) assign("print.hooks",c(),envir=lava.env) assign("plot.post.hooks",c(),envir=lava.env) assign("plot.hooks",c(),envir=lava.env) assign("options", list( trace=0, tol=1e-6, gamma=1, backtrack="wolfe", ngamma=0, iter.max=300, eval.max=250, constrain=FALSE, allow.negative.variance=FALSE, progressbarstyle=3, itol=1e-16, cluster.index=versioncheck("mets",c(0,2,7)), tobit=versioncheck("lava.tobit",c(0,5)), Dmethod="simple", ##"Richardson" messages=ifelse(interactive(), 1, 0), parallel=TRUE, param="relative", sparse=FALSE, test=TRUE, coef.names=FALSE, constrain=TRUE, graph.proc="beautify", regex=FALSE, min.weight=1e-3, exogenous=TRUE, plot.engine="Rgraphviz", node.color=c(exogenous="lightblue",endogenous="orange", latent="yellowgreen",transform="lightgray"), edgecolor=FALSE, layout="dot", ## symbols=c("<-","<->"), symbols=c("~","~~"), devel=FALSE, debug=FALSE), envir=lava.env) lava/R/distribution.R0000644000176200001440000003242213520655354014253 0ustar liggesusers ###{{{ distribution ##' @export "distribution<-" <- function(x,...,value) UseMethod("distribution<-") ##' @export "distribution" <- function(x,...,value) UseMethod("distribution") ##' @export "distribution<-.lvm" <- function(x,variable,parname=NULL,init,mdist=FALSE,...,value) { if (inherits(variable,"formula")) variable <- all.vars(variable) dots <- list(...) if (!missing(value)) { for (obj in c("variable","parname","init","mdist")) if (!is.null(attr(value,obj)) && eval(substitute(missing(a),list(a=obj)))) assign(obj,attr(value,obj)) if (is.function(value) & mdist & is.null(parname)) parname <- TRUE } if (!is.null(parname) || length(dots)>0) { if (length(parname)>1 || (is.character(parname))) { if (missing(init)) { parameter(x,start=rep(1,length(parname))) <- parname } else { parameter(x,start=init) <- parname } gen <- function(n,p,...) { args <- c(n,as.list(p[parname]),dots) names(args) <- names(formals(value))[seq(length(parname)+1)] do.call(value,args) } } else { gen <- value if ("..."%ni%names(formals(gen))) formals(gen) <- c(formals(gen),alist(...=)) formals(gen) <- modifyList(formals(gen),dots) } distribution(x,variable,mdist=TRUE) <- list(gen) return(x) } if (length(variable)==1 && !mdist) { addvar(x) <- as.formula(paste("~",variable)) if (is.numeric(value)) value <- list(value) if (!is.null(attributes(value)$mean)) intercept(x,variable) <- attributes(value)$mean if (!is.null(attributes(value)$variance)) variance(x,variable,exo=TRUE) <- attributes(value)$variance x$attributes$distribution[[variable]] <- value ## Remove from 'mdistribution' vars <- which(names(x$attributes$mdistribution$var)%in%variable) for (i in vars) { pos <- x$attributes$mdistribution$var[[i]] x$attributes$mdistribution$fun[pos] <- NULL x$attributes$mdistribution$var[which(x$attributes$mdistribution$var==pos)] <- NULL above <- which(x$attributes$mdistribution$var>pos) if (length(above)>0) x$attributes$mdistribution$var[above] <- lapply(x$attributes$mdistribution$var[above],function(x) x-1) } return(x) } if (is.list(value) && length(value)==1 && (is.function(value[[1]]) || is.null(value[[1]]))) { addvar(x) <- variable ## Multivariate distribution if (is.null(x$attributes$mdistribution)) x$attributes$mdistribution <- list(var=list(), fun=list()) vars <- x$attributes$mdistribution$var if (any(ii <- which(names(vars)%in%variable))) { num <- unique(unlist(vars[ii])) vars[which(unlist(vars)%in%num)] <- NULL newfunlist <- list() numleft <- unique(unlist(vars)) for (i in seq_along(numleft)) { newfunlist <- c(newfunlist, x$attributes$mdistribution$fun[[numleft[i]]]) ii <- which(unlist(vars)==numleft[i]) vars[ii] <- i } K <- length(numleft) x$attributes$mdistribution$var <- vars x$attributes$mdistribution$fun <- newfunlist } else { K <- length(x$attributes$mdistribution$fun) } if (length(distribution(x))>0) distribution(x,variable) <- rep(list(NULL),length(variable)) x$attributes$mdistribution$var[variable] <- K+1 x$attributes$mdistribution$fun <- c(x$attributes$mdistribution$fun,value) return(x) } if ((length(value)!=length(variable) & length(value)!=1)) stop("Wrong number of values") for (i in seq_along(variable)) if (length(value)==1) { distribution(x,variable[i],...) <- value } else { distribution(x,variable[i],...) <- value[[i]] } return(x) } ##' @export "distribution.lvm" <- function(x,var,value,multivariate=FALSE,...) { if (!missing(value)) { distribution(x,var,...) <- value return(x) } if (multivariate) return(x$attributes$mdistribution) x$attributes$distribution[var] } ###}}} distribution ###{{{ normal/gaussian ##' @export normal.lvm <- function(link="identity",mean,sd,log=FALSE,...) { rnormal <- if(log) rlnorm else rnorm fam <- stats::gaussian(link); fam$link <- link f <- function(n,mu,var,...) rnormal(n,fam$linkinv(mu),sqrt(var)) if (!missing(mean)) attr(f,"mean") <- mean if (!missing(sd)) attr(f,"variance") <- sd^2 attr(f,"family") <- fam return(f) } ##' @export gaussian.lvm <- normal.lvm ##' @export lognormal.lvm <- function(...) structure(normal.lvm(...,log=TRUE),family=list(family="log-normal",...)) ###}}} normal/gaussian ###{{{ poisson ##' @export poisson.lvm <- function(link="log",lambda,...) { fam <- stats::poisson(link); fam$link <- link f <- function(n,mu,...) { if (missing(n)) { return(fam) } rpois(n,fam$linkinv(mu)) } if (!missing(lambda)) attr(f,"mean") <- fam$linkfun(lambda) attr(f,"family") <- fam attr(f,"var") <- FALSE return(f) } ###}}} poisson ###{{{ pareto ## @examples ## m <- lvm() ## categorical(m,K=3) <- ~x ## distribution(m,~y) <- pareto.lvm(lambda=1) ## regression(m,additive=FALSE) <- y~x ## regression(m) <- y~z ## d <- sim(m,1e4,p=c("y~x:0"=1,"y~x:1"=1,"y~x:2"=exp(1))) ## ## X <- model.matrix(y~-1+factor(x)+z,data=d) ## mlogL <- function(theta) { ## lambda <- exp(theta[1]) ## mu <- exp(X%*%theta[-1]) ## -sum(log(lambda*mu*(1+mu*d$y)^{-lambda-1})) ## } ## nlminb(rep(0,ncol(X)+1),mlogL) ##' @export pareto.lvm <- function(lambda=1,...) { ## shape: lambda, scale: mu ## Density f(y): lambda*mu*(1+mu*y)^{-lambda-1} ## Survival S(y): (1+mu*y)^{-lambda} ## Inverse CDF: u -> ((1-u)^{-1/lambda}-1)/mu f <- function(n,mu,var,...) { ((1-runif(n))^(-1/lambda)-1)/exp(mu) } attr(f,"family") <- list(family="pareto", par=c(lambda=lambda)) return(f) } ###}}} pareto ###{{{ threshold ##' @export threshold.lvm <- function(p,labels=NULL,...) { if (sum(p)>1 || any(p<0 | p>1)) stop("wrong probability vector") ; if (!is.null(labels)) return(function(n,...) { return(cut(rnorm(n),breaks=c(-Inf,qnorm(cumsum(p)),Inf),labels=labels)) }) function(n,...) cut(rnorm(n),breaks=c(-Inf,qnorm(cumsum(p)),Inf)) } ###}}} threshold ###{{{ binomial ##' @export binomial.lvm <- function(link="logit",p,size=1) { if (substitute(link)==quote(identity)) { link <- "identity" } fam <- stats::binomial(link); fam$link <- link f <- function(n,mu,var,...) { if (missing(n)) { return(fam) } rbinom(n,size,fam$linkinv(mu)) } attr(f,"family") <- fam attr(f,"var") <- FALSE if (!missing(p)) attr(f,"mean") <- fam$linkfun(p) ## f <- switch(link, ## logit = ## function(n,mu,var,...) rbinom(n,1,tigol(mu)), ## cloglog = ## function(n,mu,var,...) rbinom(n,1,1-exp(-exp(1-mu))), ## function(n,mu,var=1,...) rbinom(n,1,pnorm(mu,sd=sqrt(var))) ## ### function(n,mu=0,var=1,...) (rnorm(n,mu,sqrt(var))>0)*1 ## ) ##} return(f) } ##' @export logit.lvm <- binomial.lvm("logit") ##' @export probit.lvm <- binomial.lvm("probit") ###}}} binomial ###{{{ Gamma ##' @export Gamma.lvm <- function(link="inverse",shape,rate,unit=FALSE,var=FALSE,log=FALSE,...) { fam <- stats::Gamma(link); fam$link <- link rgam <- if (!log) rgamma else function(...) log(rgamma(...)) if (!missing(shape) & !missing(rate)) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=rate) if (!missing(shape) & missing(rate)) { if (unit) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=shape) else if (var) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=sqrt(shape/var)) else f <- function(n,mu,var,...) rgam(n,shape=shape,rate=shape/fam$linkinv(mu)) } if (missing(shape) & !missing(rate)) { if (unit) f <- function(n,mu,var,...) rgam(n,shape=shape,rate=rate) else if (var) f <- function(n,mu,var,...) rgam(n,shape=rate^2*var,rate=rate) else f <- function(n,mu,var,...) rgam(n,shape=rate*fam$linkinv(mu),rate=rate) } if (missing(shape) & missing(rate)) { if (var) f <- function(n,mu,var,...) rgam(n,shape=var,rate=1) else f <- function(n,mu,var,...) rgam(n,shape=fam$linkinv(mu),rate=1) } attr(f,"family") <- fam attr(f,"var") <- FALSE return(f) } ##' @export loggamma.lvm <- function(...) Gamma.lvm(...,log=TRUE) ###}}} Gamma ###{{{ chisq ##' @export chisq.lvm <- function(df=1,...) { function(n,mu,var,...) mu + rchisq(n,df=df) } ###}}} chisq ###{{{ student (t-distribution) ##' @export student.lvm <- function(df=2,mu,sigma,...) { f <- function(n,mu,var,...) mu + sqrt(var)*rt(n,df=df) if (!missing(mu)) attr(f,"mean") <- mu if (!missing(sigma)) attr(f,"variace") <- sigma^2 return(f) } ###}}} student (t-distribution) ###{{{ uniform ##' @export uniform.lvm <- function(a,b) { if (!missing(a) & !missing(b)) f <- function(n,mu,var,...) mu+runif(n,a,b) else f <- function(n,mu,var,...) (mu+(runif(n,-1,1)*sqrt(12)/2*sqrt(var))) return(f) } ###}}} uniform ###{{{ weibull ## see also eventTime.R for coxWeibull ##' @export weibull.lvm <- function(scale=1,shape=2) { ## accelerated failure time (AFT) regression ## parametrization. ## ## We parametrize the Weibull distribution (without covariates) as follows: ## hazard(t) = 1/shape * exp(-scale/shape) * t^(1/shape-1) ## The hazard is: ## - rising if shape > 1 ## - declining if shape <1 ## - constant if shape=1 ## ## AFT regression ## hazard(t|Z) = 1/shape * exp(-scale/shape) * t^(1/shape-1) exp(-beta/shape*Z) ## scale^(-1/shape) = exp(a0+a1*X) ## PH regression ## scale = exp(b0+ b1*X) f <- function(n,mu,var,...) { (- log(runif(n)) * exp(log(scale)/shape) * exp(mu/shape))^{shape} ## scale * (-log(1-runif(n)))^{1/shape} ## (- (log(runif(n)) / (1/scale)^(shape) * exp(-mu)))^(1/shape) } attr(f,"family") <- list(family="weibull", regression="AFT", par=c(shape=shape,scale=scale)) return(f) } ###}}} weibull ###{{{ sequence ##' @export sequence.lvm <- function(a=0,b=1,integer=FALSE) { if (integer) { f <- function(n,...) seq(n) return(f) } if (is.null(a) || is.null(b)) { if (!is.null(a)) { f <- function(n,...) seq(a,length.out=n) } else { f <- function(n,...) seq(n)-(n-b) } } else { f <- function(n,...) seq(a,b,length.out=n) } return(f) } ###}}} sequence ###{{{ ones ##' @export ones.lvm <- function(p=1,interval=NULL) { f <- function(n,...) { if (!is.null(interval)) { val <- rep(0L,n) if (!is.list(interval)) interval <- list(interval) for (i in seq_along(interval)) { ii <- interval[[i]] lo <- round(ii[1]*n) hi <- round(ii[2]*n) val[seq(lo,hi)] <- 1L } return(val) } if (p==0) return(rep(0L,n)) val <- rep(1L,n) if (p>0 && p<1) val[seq(n*(1-p))] <- 0L val } return(f) } ###}}} ones ###{{{ beta ##' @export beta.lvm <- function(alpha=1,beta=1,scale=TRUE) { ## CDF: F(x) = B(x,alpha,beta)/B(alpha,beta) ## Mean: alpha/(alpha+beta) ## Var: alpha*beta/((alpha+beta)^2*(alpha+beta+1)) if (scale) f <- function(n,mu,var,...) { m <- alpha/(alpha+beta) v <- alpha*beta/((alpha+beta)^2*(alpha+beta+1)) y <- stats::rbeta(n,shape1=alpha,shape2=beta) mu+(y-m)*sqrt(var/v) } else f <- function(n,mu,var,...) stats::rbeta(n,shape1=alpha,shape2=beta) return(f) } ###}}} beta ##' @export mvn.lvm <- function(N=2,rho=0.5,sigma=NULL,parname="rho") { f <- function(n,rho) { if (is.null(sigma)) { sigma <- diag(nrow=N)*(1-rho) + rho } rmvn0(n,sigma=sigma) } if (!is.null(sigma)) parname <- TRUE structure(f,parname=parname,init=rho,mdist=TRUE) } ###{{{ Gaussian mixture ##' @export GM2.lvm <- function(...,parname=c("Pr","M1","M2","V1","V2"),init=c(0.5,-4,4,1,1)) { f <- function(n,pr,m1,m2,v1,v2) { y1 <- rnorm(n,m1,v1^0.5) if (pr>=1) return(y1) z <- rbinom(n,1,pr) y2 <- rnorm(n,m2,v2^0.5) return(z*y1+(1-z)*y2) } structure(f,parname=parname,init=init) } ##' @export GM3.lvm <- function(...,parname=c("Pr1","Pr2","M1","M2","M3","V1","V2","V3"),init=c(0.25,0.5,-4,0,4,1,1,1)) { f <- function(n,pr1,pr2,m1,m2,m3,v1,v2,v3) { p <- c(pr1,pr2,1-pr1-pr2) y1 <- rnorm(n,m1,v1^0.5) y2 <- rnorm(n,m2,v2^0.5) y3 <- rnorm(n,m3,v3^0.5) z <- stats::rmultinom(n,1,p) rowSums(cbind(y1,y2,y3)*t(z)) } structure(f,parname=parname,init=init) } ###}}} Gaussian mixture lava/R/onload.R0000644000176200001440000000126313520655354013007 0ustar liggesusers'.onLoad' <- function(libname, pkgname="lava") { addhook("heavytail.init.hook","init.hooks") addhook("glm.estimate.hook","estimate.hooks") addhook("ordinal.estimate.hook","estimate.hooks") addhook("cluster.post.hook","post.hooks") addhook("ordinal.sim.hook","sim.hooks") addhook("color.ordinal","color.hooks") addhook("ordinal.remove.hook","remove.hooks") } '.onAttach' <- function(libname, pkgname="lava") { #desc <- utils::packageDescription(pkgname) #packageStartupMessage(desc$Package, " version ",desc$Version) lava.options(cluster.index=versioncheck("mets",c(0,2,7)), tobit=versioncheck("lava.tobit",c(0,5))) } lava/R/eventTime.R0000644000176200001440000003444513520655354013503 0ustar liggesusers##' Add an observed event time outcome to a latent variable model. ##' ##' For example, if the model 'm' includes latent event time variables ##' are called 'T1' and 'T2' and 'C' is the end of follow-up (right censored), ##' then one can specify ##' ##' \code{eventTime(object=m,formula=ObsTime~min(T1=a,T2=b,C=0,"ObsEvent"))} ##' ##' when data are simulated from the model ##' one gets 2 new columns: ##' ##' - "ObsTime": the smallest of T1, T2 and C ##' - "ObsEvent": 'a' if T1 is smallest, 'b' if T2 is smallest and '0' if C is smallest ##' ##' Note that "ObsEvent" and "ObsTime" are names specified by the user. ##' ##' @author Thomas A. Gerds, Klaus K. Holst ##' @keywords survival models regression ##' @examples ##' ##' # Right censored survival data without covariates ##' m0 <- lvm() ##' distribution(m0,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(m0,"censtime") <- coxExponential.lvm(rate=10) ##' m0 <- eventTime(m0,time~min(eventtime=1,censtime=0),"status") ##' sim(m0,10) ##' ##' # Alternative specification of the right censored survival outcome ##' ## eventTime(m,"Status") <- ~min(eventtime=1,censtime=0) ##' ##' # Cox regression: ##' # lava implements two different parametrizations of the same ##' # Weibull regression model. The first specifies ##' # the effects of covariates as proportional hazard ratios ##' # and works as follows: ##' m <- lvm() ##' distribution(m,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(m,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' m <- eventTime(m,time~min(eventtime=1,censtime=0),"status") ##' distribution(m,"sex") <- binomial.lvm(p=0.4) ##' distribution(m,"sbp") <- normal.lvm(mean=120,sd=20) ##' regression(m,from="sex",to="eventtime") <- 0.4 ##' regression(m,from="sbp",to="eventtime") <- -0.01 ##' sim(m,6) ##' # The parameters can be recovered using a Cox regression ##' # routine or a Weibull regression model. E.g., ##' \dontrun{ ##' set.seed(18) ##' d <- sim(m,1000) ##' library(survival) ##' coxph(Surv(time,status)~sex+sbp,data=d) ##' ##' sr <- survreg(Surv(time,status)~sex+sbp,data=d) ##' library(SurvRegCensCov) ##' ConvertWeibull(sr) ##' ##' } ##' ##' # The second parametrization is an accelerated failure time ##' # regression model and uses the function weibull.lvm instead ##' # of coxWeibull.lvm to specify the event time distributions. ##' # Here is an example: ##' ##' ma <- lvm() ##' distribution(ma,"eventtime") <- weibull.lvm(scale=3,shape=0.7) ##' distribution(ma,"censtime") <- weibull.lvm(scale=2,shape=0.7) ##' ma <- eventTime(ma,time~min(eventtime=1,censtime=0),"status") ##' distribution(ma,"sex") <- binomial.lvm(p=0.4) ##' distribution(ma,"sbp") <- normal.lvm(mean=120,sd=20) ##' regression(ma,from="sex",to="eventtime") <- 0.7 ##' regression(ma,from="sbp",to="eventtime") <- -0.008 ##' set.seed(17) ##' sim(ma,6) ##' # The regression coefficients of the AFT model ##' # can be tranformed into log(hazard ratios): ##' # coef.coxWeibull = - coef.weibull / shape.weibull ##' \dontrun{ ##' set.seed(17) ##' da <- sim(ma,1000) ##' library(survival) ##' fa <- coxph(Surv(time,status)~sex+sbp,data=da) ##' coef(fa) ##' c(0.7,-0.008)/0.7 ##' } ##' ##' ##' # The Weibull parameters are related as follows: ##' # shape.coxWeibull = 1/shape.weibull ##' # scale.coxWeibull = exp(-scale.weibull/shape.weibull) ##' # scale.AFT = log(scale.coxWeibull) / shape.coxWeibull ##' # Thus, the following are equivalent parametrizations ##' # which produce exactly the same random numbers: ##' ##' model.aft <- lvm() ##' distribution(model.aft,"eventtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) ##' distribution(model.aft,"censtime") <- weibull.lvm(scale=-log(1/100)/2,shape=0.5) ##' set.seed(17) ##' sim(model.aft,6) ##' ##' model.cox <- lvm() ##' distribution(model.cox,"eventtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' distribution(model.cox,"censtime") <- coxWeibull.lvm(scale=1/100,shape=2) ##' set.seed(17) ##' sim(model.cox,6) ##' ##' # The minimum of multiple latent times one of them still ##' # being a censoring time, yield ##' # right censored competing risks data ##' ##' mc <- lvm() ##' distribution(mc,~X2) <- binomial.lvm() ##' regression(mc) <- T1~f(X1,-.5)+f(X2,0.3) ##' regression(mc) <- T2~f(X2,0.6) ##' distribution(mc,~T1) <- coxWeibull.lvm(scale=1/100) ##' distribution(mc,~T2) <- coxWeibull.lvm(scale=1/100) ##' distribution(mc,~C) <- coxWeibull.lvm(scale=1/100) ##' mc <- eventTime(mc,time~min(T1=1,T2=2,C=0),"event") ##' sim(mc,6) ##' ##' ##' @export ##' @aliases eventTime<- ##' @param object Model object ##' @param formula Formula (see details) ##' @param eventName Event names ##' @param \dots Additional arguments to lower levels functions eventTime <- function(object,formula,eventName="status",...) { if (missing(formula)) return(object$attributes$eventHistory) if (inherits(eventName,"formula")) eventName <- all.vars(eventName) ff <- as.character(formula) timeName <- all.vars(update.formula(formula,"~1")) if (length(timeName)==0){ timeName <- "observedTime" rhs <- ff[[2]] }else{ rhs <- ff[[3]] } ## rhs <- tolower(rhs) latentTimes <- strsplit(rhs,"[(,)]")[[1]] if (latentTimes[1]!="min") stop(paste("Formula ",formula," does not have the required form, ", "e.g. ~min(T1=1,T2=2,C=0), see (examples in) help(eventTime).")) latentTimes <- latentTimes[-1] NT <- length(latentTimes) events <- vector(NT,mode="character") for (lt in seq_len(NT)){ tmp <- strsplit(latentTimes[lt],"=")[[1]] stopifnot(length(tmp) %in% c(1,2)) if (length(tmp)==1){ events[lt] <- as.character(lt) latentTimes[lt] <- tmp } else{ events[lt] <- tmp[2] latentTimes[lt] <- tmp[1] } } events <- gsub(" ","",events) eventnum <- char2num(events) if (all(!is.na(eventnum))) { events <- eventnum } else { events <- gsub("\"","",events) } addvar(object) <- timeName eventTime <- list(names=c(timeName,eventName), latentTimes=gsub(" ","",latentTimes), events=events ) transform(object, y=eventTime$names, x=eventTime$latentTimes) <- function(z) { idx <- apply(z,1,which.min) cbind(z[cbind(seq(NROW(z)),idx)], eventTime$events[idx]) } if (is.null(object$attributes$eventHistory)) { object$attributes$eventHistory <- list(eventTime) names(object$attributes$eventHistory) <- timeName } else { object$attributes$eventHistory[[timeName]] <- eventTime } return(object) } ##' @export "eventTime<-" <- function(object,...,value) { eventTime(object,value,...) } ## addhook("color.eventHistory","color.hooks") ## color.eventHistory <- function(x,subset=vars(x),...) { ## return(list(vars=intersect(subset,binary(x)),col="indianred1")) ## } addhook("plothook.eventHistory","plot.post.hooks") plothook.eventHistory <- function(x,...) { eh <- x$attributes$eventHistory for (f in eh) { x <- regression(x,to=f$names[1],from=f$latentTimes) latent(x) <- f$latentTimes kill(x) <- f$names[2] } timedep <- x$attributes$timedep for (i in seq_len(length(timedep))) { x <- regression(x,to=names(timedep)[i],from=timedep[[i]]) } return(x) } addhook("colorhook.eventHistory","color.hooks") colorhook.eventHistory <- function(x,subset=vars(x),...) { return(list(vars=intersect(subset,unlist(x$attributes$timedep)),col="lightblue4")) } addhook("print.eventHistory","print.hooks") print.eventHistory <- function(x,...) { eh <- x$attributes$eventHistory timedep <- x$attributes$timedep if (is.null(eh) & is.null(timedep)) return(NULL) ehnames <- unlist(lapply(eh,function(x) x$names)) cat("Event History Model\n") ff <- formula(x,char=TRUE,all=TRUE) R <- c() for (f in ff) { oneline <- as.character(f); y <- gsub(" ","",strsplit(f,"~")[[1]][1]) if (!(y %in% ehnames)) { col1 <- as.character(oneline) D <- attributes(distribution(x)[[y]])$family col2 <- "Normal" if (!is.null(D$family)) col2 <- paste0(D$family) if (!is.null(D$link)) col2 <- paste0(col2,"(",D$link,")") if (!is.null(D$par)) col2 <- paste0(col2,"(",paste(D$par,collapse=","),")") R <- rbind(R,c(col1," ",col2)) } } for (y in names(eh)) { col1 <- paste0(y, " = min(",paste(eh[[y]]$latentTimes,collapse=","),")") eh[[y]]$names[2] col2 <- paste0(eh[[y]]$names[2], " := {",paste(eh[[y]]$events,collapse=","),"}") R <- rbind(R,c(col1,"",col2)) } rownames(R) <- rep("",nrow(R)); colnames(R) <- rep("",ncol(R)) print(R,quote=FALSE,...) cat("\n") for (i in seq_len(length(timedep))) { cat("Time-dependent covariates:\n\n") cat(paste("",names(timedep)[i],"~", paste(timedep[[i]],collapse="+")),"\n") } TRUE } ## addhook("simulate.eventHistory","sim.hooks") ## simulate.eventHistory <- function(x,data,...){ ## if (is.null(eventTime(x))) { ## return(data) ## } ## else{ ## for (eh in eventTime(x)) { ## if (any((found <- match(eh$latentTimes,names(data),nomatch=0))==0)){ ## warning("Cannot find latent time variable: ", ## eh$latentTimes[found==0],".") ## } ## else{ ## for (v in seq_along(eh$latentTimes)) { ## if (v==1){ ## initialize with the first latent time and event ## eh.time <- data[,eh$latentTimes[v]] ## eh.event <- rep(eh$events[v],NROW(data)) ## } else{ ## now replace if next time is smaller ## ## in case of tie keep the first event ## eh.event[data[,eh$latentTimes[v]] 1 ## - declining if shape <1 ## - constant if shape=1 ## ## scale = exp(b0 + b1*X) f <- function(n,mu,Scale=scale,Shape=shape,...) { (- log(runif(n)) / (Scale * exp(mu)))^(1/Shape) } ff <- formals(f) expr <- "(- log(runif(n)) / (Scale * exp(mu)))^{1/Shape}" if (inherits(scale,"formula")) scale <- all.vars(scale)[1] if (is.character(scale)) { names(ff)[3] <- scale expr <- gsub("Scale",scale,expr) } if (inherits(shape,"formula")) shape <- all.vars(shape)[1] if (is.character(shape)) { names(ff)[4] <- shape expr <- gsub("Shape",shape,expr) } formals(f) <- ff e <- parse(text=expr) body(f) <- as.call(c(as.name("{"), e)) attr(f,"family") <- list(family="weibull", regression="PH", par=c(shape=shape,scale=scale)) return(f) } ##' @export coxExponential.lvm <- function(scale=1,rate,timecut){ if (missing(rate)) rate=1/scale if (missing(scale)) scale=1/rate if (missing(timecut)) { return(coxWeibull.lvm(shape=1,scale)) } if (NROW(rate)>length(timecut)) stop("Number of time-intervals (cuts) does not agree with number of rate parameters (beta0)") par <- paste(timecut,rate,sep=":") if (is.matrix(rate)) par <- "..." timecut <- c(timecut,Inf) f <- function(n,mu,...) { Ai <- function() { vals <- matrix(0,ncol=length(timecut)-1,nrow=n) ival <- numeric(n) if (is.matrix(rate)) { mu <- cbind(mu[,1],cbind(1,as.matrix(mu[,-1]))%*%t(rate)) rate <- rep(1,length(timecut)-1) } for (i in seq(length(timecut)-1)) { u <- -log(runif(n)) ##rexp(n,1) if (NCOL(mu)>1) { vals[,i] <- timecut[i] + u*exp(-mu[,1]-mu[,i+1])/(rate[i]) } else { vals[,i] <- timecut[i] + u*exp(-mu)/(rate[i]) } idx <- which(vals[,i]<=timecut[i+1] & ival==0) ival[idx] <- vals[idx,i] } ival } Ai() } attributes(f)$family <- list(family="CoxExponential",par=par) return(f) } ##' @export aalenExponential.lvm <- function(scale=1,rate,timecut=0){ if (missing(rate)) rate=1/scale if (missing(scale)) scale=1/rate if (missing(timecut)==1) { return(coxWeibull.lvm(shape=1,scale)) } if (length(rate)>length(timecut)) stop("Number of time-intervals (cuts) does not agree with number of rate parameters (beta0)") par <- paste(timecut,rate,sep=":") if (is.matrix(rate)) par <- "..." timecut <- c(timecut,Inf) f <- function(n,mu,...) { Ai <- function() { vals <- matrix(0,ncol=length(timecut)-1,nrow=n) ival <- numeric(n) if (is.matrix(rate)) { mu <- cbind(mu[,1],cbind(1,as.matrix(mu[,-1]))%*%t(rate)) rate <- rep(1,length(timecut)-1) } for (i in seq(length(timecut)-1)) { u <- -log(runif(n)) ##rexp(n,1) if (NCOL(mu)>1) { vals[,i] <- timecut[i] + u/(rate[i]+mu[,1]+mu[,i+1]) } else { vals[,i] <- timecut[i] + u/(rate[i]+mu) } idx <- which(vals[,i]<=timecut[i+1] & ival==0) ival[idx] <- vals[idx,i] } ival } Ai() } attributes(f)$family <- list(family="aalenExponential",par=par) return(f) } ##' @export coxGompertz.lvm <- function(shape=1,scale) { f <- function(n,mu,var,...) { (1/shape) * log(1 - (shape/scale) * (log(runif(n)) * exp(-mu))) } attr(f,"family") <- list(family="gompertz",par=c(shape,scale)) return(f) } lava/R/weights.R0000644000176200001440000000017613520655354013207 0ustar liggesusers##' @export `Weights` <- function(x,...) UseMethod("Weights") ##' @export Weights.default <- function(x,...) eval(x$weights) lava/R/img.R0000644000176200001440000001213713520655354012311 0ustar liggesusersimg <- function(x,idx,col=list(gray.colors(10,1,0.2)), ylab="Item",xlab="Subject",lab=TRUE, border=1,rowcol=FALSE,plotfun=NULL, axis1=TRUE,axis2=TRUE,yaxs="r",xaxs="r",cex.axis=0.4,...) { x0 <- seq(nrow(x)) y0 <- seq(ncol(x)) image(x=x0,y=y0,as.matrix(x),col=col[[1]],axes=FALSE,ylab=ylab,xlab=xlab,xaxs=xaxs,yaxs=yaxs,...) if (axis1) { axis(1,at=seq(nrow(x)),lwd=0.5,cex.axis=cex.axis,las=3) if (lab) suppressWarnings(title("",xlab=xlab,...)) } if (axis2) { axis(2,at=seq(ncol(x)),lwd=0.5,cex.axis=cex.axis,las=1) if (lab) suppressWarnings(title("",ylab=ylab,...)) } if (!is.null(plotfun)) { plotfun(...) } if (!missing(idx)) { if (rowcol) { for (i in seq_len(length(idx))) image(x=x0,y=idx[[i]],as.matrix(x[,idx[[i]]]),col=col[[i]],add=TRUE,xaxs=xaxs,yaxs=yaxs,...) } else for (i in seq_len(length(idx))) image(x=idx[[i]],y=y0,as.matrix(x[idx[[i]],]),col=col[[i]],add=TRUE,xaxs=xaxs,yaxs=yaxs,...) } } ##' Visualize categorical by group variable ##' ##' @title Organize several image calls (for visualizing categorical data) ##' @param x data.frame or matrix ##' @param group group variable ##' @param ncol number of columns in layout ##' @param byrow organize by row if TRUE ##' @param colorbar Add color bar ##' @param colorbar.space Space around color bar ##' @param label.offset label offset ##' @param order order ##' @param colorbar.border Add border around color bar ##' @param main Main title ##' @param rowcol switch rows and columns ##' @param plotfun Alternative plot function (instead of 'image') ##' @param axis1 Axis 1 ##' @param axis2 Axis 2 ##' @param mar Margins ##' @param col Colours ##' @param ... Additional arguments to lower level graphics functions ##' @author Klaus Holst ##' @examples ##' X <- matrix(rbinom(400,3,0.5),20) ##' group <- rep(1:4,each=5) ##' images(X,colorbar=0,zlim=c(0,3)) ##' images(X,group=group,zlim=c(0,3)) ##' \dontrun{ ##' images(X,group=group,col=list(RColorBrewer::brewer.pal(4,"Purples"), ##' RColorBrewer::brewer.pal(4,"Greys"), ##' RColorBrewer::brewer.pal(4,"YlGn"), ##' RColorBrewer::brewer.pal(4,"PuBuGn")),colorbar=2,zlim=c(0,3)) ##' } ##' images(list(X,X,X,X),group=group,zlim=c(0,3)) ##' images(list(X,X,X,X),ncol=1,group=group,zlim=c(0,3)) ##' images(list(X,X),group,axis2=c(FALSE,FALSE),axis1=c(FALSE,FALSE), ##' mar=list(c(0,0,0,0),c(0,0,0,0)),yaxs="i",xaxs="i",zlim=c(0,3)) ##' @export images <- function(x,group,ncol=2,byrow=TRUE,colorbar=1,colorbar.space=0.1,label.offset=0.02, order=TRUE,colorbar.border=0,main,rowcol=FALSE,plotfun=NULL, axis1,axis2,mar, col=list(c("#EFF3FF", "#BDD7E7", "#6BAED6", "#2171B5"), c("#FEE5D9", "#FCAE91", "#FB6A4A", "#CB181D"), c("#EDF8E9", "#BAE4B3", "#74C476", "#238B45"), c("#FEEDDE", "#FDBE85", "#FD8D3C", "#D94701")), ...) { if (is.data.frame(x) || is.matrix(x)) x <- list(x) K <- length(x) lout <- matrix(seq(K),ncol=ncol,byrow=byrow) hei <- rep(1,nrow(lout))/nrow(lout) wid <- rep(1,ncol)/ncol if (colorbar==1) { wid <- c(rep(1,ncol)/ncol*(1-colorbar.space),colorbar.space) lout <- cbind(lout,K+1) } if (colorbar==2) { hei <- c(rep(1,nrow(lout))/nrow(lout)*(1-colorbar.space),colorbar.space) lout <- rbind(lout,K+1) } if (missing(group)) { group <- rep(1,nrow(x[[1]])) } if (missing(main)) main <- rep("",K) if (!is.list(col)) col <- list(col) group <- factor(group) idxs <- lapply(levels(group), function(x) which(group==x)) layout(lout,widths=wid,heights=hei) ##if (missing(mar)) par(mar=c(4,4,3,0)) if (missing(axis2)) axis2 <- c(TRUE,rep(FALSE,K-1)) if (missing(axis1)) axis1 <- rep(TRUE,K) for (i in seq(length(x))) { ## if (!missing(mar)) par(mar=mar[[i]]) img(x[[i]],idxs,col,axis2=axis2[i],axis1=axis1[i],main=main[i],rowcol=rowcol,plotfun=plotfun[[i]],...) ## if (missing(mar)) par(mar=c(4,2,3,2)) } G <- nlevels(group) M <- length(col[[1]]) if (colorbar==1) { par(mar=c(0,0,0,2)) plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1)) for (i in seq(G)) { lava::colorbar(col[[i]],values=seq(M)-1,direction="horizontal", y.range=c(1-i/(G+1),1-i/(G+1)+label.offset), border=colorbar.border,x.range=c(0,1),srt=0,cex=0.6) text(0.5,1-i/(G+1)-label.offset, levels(group)[i]) } } if (colorbar==2) { par(mar=c(0,0,0,0)) plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1)) for (i in seq(G)) { xr <- c(1-i/(G+1),1-i/(G+1)+.1)-.1/2 lava::colorbar(col[[i]],values=seq(M)-1,direction="horizontal", x.range=xr, border=colorbar.border,y.range=c(0.3,0.5),srt=0,cex=0.6) text(mean(xr),.1, levels(group)[i]) } } } lava/R/cv.R0000644000176200001440000001054313520655354012144 0ustar liggesusersrmse1 <- function(fit,data,response=NULL,...) { yhat <- predict(fit,newdata=data,...) if (is.null(response)) response <- endogenous(fit) y <- data[,response] c(RMSE=mean(as.matrix(y-yhat)^2)) } ##' Cross-validation ##' ##' Generic cross-validation function ##' @title Cross-validation ##' @param modelList List of fitting functions or models ##' @param data data.frame ##' @param K Number of folds (default 5, 0 splits in 1:n/2, n/2:n with last part used for testing) ##' @param rep Number of repetitions (default 1) ##' @param perf Performance measure (default RMSE) ##' @param seed Optional random seed ##' @param mc.cores Number of cores used for parallel computations ##' @param shared function applied to each fold with results send to each model ##' @param ... Additional arguments parsed to models in modelList and perf ##' @author Klaus K. Holst ##' @examples ##' f0 <- function(data,...) lm(...,data) ##' f1 <- function(data,...) lm(Sepal.Length~Species,data) ##' f2 <- function(data,...) lm(Sepal.Length~Species+Petal.Length,data) ##' x <- cv(list(m0=f0,m1=f1,m2=f2),rep=10, data=iris, formula=Sepal.Length~.) ##' x2 <- cv(list(f0(iris),f1(iris),f2(iris)),rep=10, data=iris) ##' @export cv <- function(modelList, data, K=5, rep=1, perf, seed=NULL, mc.cores=1, shared=NULL, ...) { if (is.vector(data)) data <- cbind(data) if (missing(perf)) perf <- rmse1 if (!is.list(modelList)) modelList <- list(modelList) nam <- names(modelList) if (is.null(nam)) nam <- paste0("model",seq_along(modelList)) args0 <- list(...) args <- args0 if (!is.null(shared)) { sharedres <- shared(data,...) args <- c(args, sharedres) } ## Models run on full data: if (is.function(modelList[[1]])) { fit0 <- lapply(modelList, function(f) do.call(f,c(list(data),args))) } else { fit0 <- modelList } ## In-sample predictive performance: perf0 <- lapply(fit0, function(fit) do.call(perf,c(list(fit,data=data),args))) namPerf <- names(perf0[[1]]) names(fit0) <- names(perf0) <- nam n <- NROW(data) M <- length(perf0) # Number of models P <- length(perf0[[1]]) # Number of performance measures if (!is.null(seed)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } if (K==0) { rep <- 1 K <- 1 folds <- list(csplit(seq(n))) } else { folds <- foldr(n,K,rep) } arg <- expand.grid(R=seq(rep),K=seq(K)) #,M=seq_along(modelList)) dim <- c(rep,K,M,P) PerfArr <- array(0,dim) dimnames(PerfArr) <- list(NULL,NULL,nam,namPerf) ff <- function(i) { R <- arg[i,1] k <- arg[i,2] fold <- folds[[R]] dtest <- data[fold[[k]],,drop=FALSE] dtrain <- data[unlist(fold[-k]),,drop=FALSE] args <- args0 if (!is.null(shared)) { sharedres <- shared(dtrain,...) args <- c(args, sharedres) } if (is.function(modelList[[1]])) { fits <- lapply(modelList, function(f) do.call(f,c(list(dtrain),args))) } else { fits <- lapply(modelList, function(m) do.call(update,c(list(m,data=dtrain),args))) } perfs <- lapply(fits, function(fit) do.call(perf,c(list(fit,data=dtest),args))) do.call(rbind,perfs) } if (mc.cores>1) { val <- parallel::mcmapply(ff,seq(nrow(arg)),SIMPLIFY=FALSE,mc.cores=mc.cores) } else { val <- mapply(ff,seq(nrow(arg)),SIMPLIFY=FALSE) } for (i in seq(nrow(arg))) { R <- arg[i,1] k <- arg[i,2] PerfArr[R,k,,] <- val[[i]] } structure(list(cv=PerfArr, call=match.call(), names=nam, rep=rep, folds=K, fit=fit0), class="CrossValidated") } ##' @export summary.CrossValidated <- function(object,...) { return(coef(object)) } ##' @export print.CrossValidated <- function(x,...) { res <- coef(x) print(res,quote=FALSE) } coef.CrossValidated <- function(object,...) { res <- apply(object$cv,3:4,function(x) mean(x)) if (length(object$names)==nrow(res)) rownames(res) <- object$names res } lava/R/gkgamma.R0000644000176200001440000001056513520655354013144 0ustar liggesusersgoodmankruskal_gamma <- function(P,...) { nr <- nrow(P); nc <- ncol(P) Pconc <- 0 for (i in seq_len(nr-1)) { h <- seq(i+1,nr) for (j in seq_len(nc-1)) { k <- seq(j+1,nc) Pconc <- Pconc+2*P[i,j]*sum(P[h,k]) } } Pdisc <- 0 for (i in seq_len(nr-1)) { h <- seq(i+1,nr) for (j in (seq_len(nc-1)+1)) { k <- seq(1,j-1) Pdisc <- Pdisc+2*P[i,j]*sum(P[h,k]) } } list(C=Pconc,D=Pdisc,gamma=(Pconc-Pdisc)/(Pconc+Pdisc)) } ##' @export gkgamma <- function(x,data=parent.frame(),strata=NULL,all=FALSE,iid=TRUE,...) { if (inherits(x,"formula")) { xf <- getoutcome(x,sep="|") xx <- attr(xf,"x") if (length(xx)==0) stop("Not a valid formula") yx <- update(as.formula(paste0(xf,"~.")),xx[[1]]) if (length(xx)>1) { strata <- interaction(model.frame(xx[[2]],data=data)) x <- yx } else { x <- model.frame(yx,data=data) } } if (!is.null(strata)) { dd <- split(data,strata) gam <- lapply(dd,function(d,...) gkgamma(x,data=d,...), ..., iid=TRUE, keep=1:2) mgam <- Reduce(function(x,y,...) merge(x,y,...),gam) ps <- estimate(multinomial(strata),data=data,...) mgam <- merge(mgam,ps) psi <- 2*length(gam)+seq(length(coef(ps))) res <- estimate(mgam,function(p,...) { k <- length(p)/3 cd <- lapply(seq(k),function(x) p[(1:2)+2*(x-1)]) dif <- unlist(lapply(cd,function(x) x[1]-x[2])) tot <- unlist(lapply(cd,function(x) x[1]+x[2])) gam <- dif/tot ## Conditional gammas given Z=z px2 <- p[psi]^2 pgamma <- sum(dif*px2)/sum(tot*px2) #weights <- px2*tot/sum(px2*tot) #pgamma <- sum(weights*gam) c(gam,pgamma=pgamma) },labels=c(paste0("\u03b3:",names(dd)),"pgamma"), iid=iid) if (!iid) { for (i in seq_along(gam)) gam[[i]][c("iid","id")] <- NULL } homtest <- estimate(res,lava::contr(seq_along(gam),length(gam)+1),iid=FALSE) attributes(res) <- c(attributes(res), list(class=c("gkgamma","estimate"), cl=match.call(), strata=gam, homtest=homtest)) return(res) } if (is.table(x) || is.data.frame(x) || is.matrix(x)) { x <- multinomial(x) } if (!inherits(x,"multinomial")) stop("Expected table, data.frame or multinomial object") structure(estimate(x,function(p) { P <- x$position; P[] <- p[x$position] goodmankruskal_gamma(P) },iid=iid,data=data,...), cl=match.call(), class=c("gkgamma","estimate")) } ##' @export print.gkgamma <- function(x,call=TRUE,...) { if (call) { cat("Call: ") print(attr(x,"cl")) printline(50) } n <- x$n if (!is.null(attr(x,"strata"))) { cat("Strata:\n\n") for (i in seq_along(attr(x,"strata"))) { with(attributes(x), cat(paste0(names(strata)[i], " (n=",strata[[i]]$n, if (strata[[i]]$ncluster0) return(names(latentidx)) else return(NULL) } if (inherits(var,"formula")) var <- all.vars(var) if (clear) { x$noderender$shape[var] <- "rectangle" x$latent[var] <- NULL ## intfix(x,var) <- NA } else { if (!all(var%in%vars(x))) { addvar(x,messages=messages,reindex=FALSE,) <- setdiff(var,vars(x)) } x$noderender$shape[var] <- "ellipse" x$latent[var] <- TRUE ord <- intersect(var,ordinal(x)) if (length(ord)>0) ordinal(x,K=NULL) <- ord } xorg <- exogenous(x) exoset <- setdiff(xorg,var) if (length(exoset) to continue...")) res <- try(scan("", what=0, quiet=TRUE, nlines=1), silent=TRUE) } waitclick <- function() if(is.null(locator(1))) invisible(NULL) lava/R/glmest.R0000644000176200001440000002543713520655354013037 0ustar liggesusers glm.estimate.hook <- function(x,estimator,...) { yy <- c() if (length(estimator)>0 && estimator=="glm") { for (y in endogenous(x)) { fam <- attributes(distribution(x)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (!(tolower(fam$family)%in% c("gaussian","gamma","inverse.gaussian","weibull"))) { yy <- c(yy,y) } } if (length(yy)>0) covariance(x,yy) <- 1 } return(c(list(x=x,estimator=estimator,...))) } GLMest <- function(m,data,control=list(),...) { yvar <- endogenous(m) res <- c() count <- 0 V <- NULL mymsg <- c() iids <- c() breads <- c() et <- eventTime(m) yvar.et <- rep(NA,length(yvar)) names(yvar.et) <- yvar if (!is.null(et)) { for (i in seq_along(et)) { ## if (!survival::is.Surv(data[,et[[i]]$names[1]])) ## data[,et[[i]]$names[1]] <- with(et[[i]], ## survival::Surv(data[,names[1]],data[,names[2]])) yvar <- setdiff(yvar,c(et[[i]]$latentTimes[-1],et[[i]]$names)) yvar.et[et[[i]]$latentTimes[1]] <- et[[i]]$names[1] } } for (y in yvar) { count <- count+1 xx <- parents(m,y) fam <- attributes(distribution(m)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (!is.null(fam$link)) { mymsg <- c(mymsg, with(fam, paste0(family,"(",link,")"))) } else { mymsg <- c(mymsg, with(fam, paste0(family))) } if (length(xx)==0) xx <- 1 nn0 <- paste(y,xx,sep=lava.options()$symbol[1]) y0 <- y ## isEventTime <- !is.na(yvar.et[y]) ## if (isEventTime) { ## y <- yvar.et[y] ## } #nn0 <- paste(y,xx,sep=lava.options()$symbol[1]) f <- as.formula(paste0(y,"~",paste(xx,collapse="+"))) isSurv <- inherits(data[1,y],"Surv") if (isSurv) { g <- survival::survreg(f,data=data,dist=fam$family,...) } else { g <- glm(f,family=fam,data=data,...) } p <- pars(g) ii <- iid(g) V0 <- attr(ii,"bread") iids <- cbind(iids,ii) y <- y0 names(p)[1] <- y if (length(p)>1) { nn <- paste(y,xx,sep=lava.options()$symbol[1]) names(p)[seq_along(nn)+1] <- nn0 if (length(p)>length(nn)+1) names(p)[length(p)] <- paste(y,y,sep=lava.options()$symbol[2]) } if (tolower(fam$family)%in%c("gaussian","gamma","inverse.gaussian") && !isSurv) { iids <- cbind(iids,0) null <- matrix(0); dimnames(null) <- list("scale","scale") V0 <- blockdiag(V0,null,pad=0) } breads <- c(breads,list(V0)) res <- c(res, list(p)); } coefs <- unlist(res) idx <- na.omit(match(coef(m),names(coefs))) coefs <- coefs[idx] ##V <- Reduce(blockdiag,breads)[idx,idx] V <- crossprod(iids[,idx]) ##V <- crossprod(iids[,idx]) mymsg <- noquote(cbind(mymsg)) colnames(mymsg) <- "Family(Link)"; rownames(mymsg) <- paste(yvar,":") list(estimate=coefs,vcov=V,breads=breads,iid=iids[,idx],summary.message=function(...) { mymsg }, dispname="Dispersion:") } GLMscore <- function(x,p,data,indiv=TRUE,logLik=FALSE,...) { yvar <- endogenous(x) S <- pnames <- c() count <- 0 breads <- c() L <- 0 for (y in yvar) { count <- count+1 xx <- parents(x,y) pname <- c(y,paste0(y,sep=lava.options()$symbol[1],xx),paste(y,y,sep=lava.options()$symbol[2])) pidx <- na.omit(match(pname,coef(x))) fam <- attributes(distribution(x)[[y]])$family if (is.null(fam)) fam <- stats::gaussian() if (length(xx)==0) xx <- 1 f <- as.formula(paste0(y,"~",paste(xx,collapse="+"))) isSurv <- inherits(data[1,y],"Surv") if (inherits(data[,y],"Surv")) { g <- survival::survreg(f,data=data,dist=fam$family) } else { g <- glm(f,family=fam,data=data) } p0 <- p[pidx] if (!isSurv) L0 <- logL.glm(g,p=p0,indiv=TRUE,...) if (tolower(fam$family)%in%c("gaussian","gamma","inverse.gaussian") && !isSurv) { p0 <- p0[-length(p0)] S0 <- score(g,p=p0,indiv=TRUE,pearson=TRUE,...) V0 <- attr(S0,"bread") r <- attr(S0,"pearson") ## dispersion <- mean(r^2) S0 <- cbind(S0,scale=0) null <- matrix(0); dimnames(null) <- list("scale","scale") V0 <- blockdiag(V0,null,pad=0) } else { S0 <- score(g,p=p0,indiv=TRUE,...) if (isSurv) L0 <- attr(S0,"logLik") V0 <- attr(S0,"bread") } L <- L+sum(L0) breads <- c(breads,list(V0)) S <- c(S,list(S0)) pnames <- c(pnames, list(pname)); } coefs <- unlist(pnames) idx <- na.omit(match(coefs,coef(x))) idx <- order(idx) V <- Reduce(blockdiag,breads)[idx,idx] S1 <- Reduce(cbind,S)[,idx,drop=FALSE] colnames(S1) <- coef(x) attributes(S1)$bread <- V attributes(S1)$logLik <- structure(L,nobs=nrow(data),nall=nrow(data),df=length(p),class="logLik") if (!indiv) S1 <- colSums(S1) return(S1) } ##' @export score.lm <- function(x, p=coef(x), data, indiv=FALSE, y, X, offset=NULL, weights=NULL, dispersion=TRUE, ...) { if (missing(data)) { X <- model.matrix(x) y <- model.frame(x)[,1] } else { X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } if(any(is.na(p))) warning("Over-parameterized model") Xbeta <- X%*%p if (is.null(offset)) offset <- x$offset if (!is.null(offset)) Xbeta <- Xbeta+offset r <- y-Xbeta if (is.null(weights)) weights <- x$weights if (!is.null(weights)) { sigma2 <- sum(r^2*weights)/(length(r)-length(p)) r <- r*weights } else { sigma2 <- sum(r^2)/(length(r)-length(p)) } if (!dispersion) sigma2 <- 1 ##sigma2 <- suppressWarnings(summary(x)$sigma^2) A <- as.vector(r)/sigma2 S <- apply(X,2,function(x) x*A) if (!indiv) return(colSums(S)) suppressWarnings(attributes(S)$bread <- vcov(x)) return(S) } ##' @export score.glm <- function(x,p=coef(x),data,indiv=FALSE,pearson=FALSE, y,X,link,dispersion,offset=NULL,weights=NULL,...) { if (inherits(x,"glm")) { link <- family(x) if (missing(data)) { X <- model.matrix(x) y <- model.frame(x)[,1] } else { X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } offset <- x$offset } else { if (missing(link)) stop("Family needed") if (missing(data)) stop("data needed") X <- model.matrix(formula(x),data=data) y <- model.frame(formula(x),data=data)[,1] } if (is.character(y) || is.factor(y)) { y <- as.numeric(as.factor(y))-1 } ## g <- link$linkfun ginv <- link$linkinv dginv <- link$mu.eta ## D[linkinv] ##dg <- function(x) 1/dginv(g(x)) ## Dh^-1 = 1/(h'(h^-1(x))) canonf <- do.call(link$family,list()) ## caninvlink <- canonf$linkinv canlink <- canonf$linkfun Dcaninvlink <- canonf$mu.eta Dcanlink <- function(x) 1/Dcaninvlink(canlink(x)) ##gmu <- function(x) g(caninvlink(x)) ##invgmu <- function(z) canlink(ginv(z)) h <- function(z) Dcanlink(ginv(z))*dginv(z) if(any(is.na(p))) stop("Over-parameterized model") Xbeta <- X%*%p if (!is.null(offset)) Xbeta <- Xbeta+offset if (missing(data) && !is.null(x$offset) && is.null(offset) ) Xbeta <- Xbeta+x$offset pi <- ginv(Xbeta) r <- y-pi if (!is.null(x$prior.weights) || !is.null(weights)) { if (is.null(weights)) weights <- x$prior.weights } else { weights <- !is.na(r) } a.phi <- 1 r <- r*weights rpearson <- as.vector(r)/link$variance(pi)^.5 if (length(p)>length(coef(x))) { a.phi <- p[length(coef(x))+1] } else if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { suppressWarnings(a.phi <- summary(x)$dispersion) ##a.phi <- sum(rpearson^2)*x$df.residual/x$df.residual^2 } A <- as.vector(h(Xbeta)*r)/a.phi S <- apply(X,2,function(x) x*A) if (!indiv) return(colSums(S)) if (pearson) attr(S,"pearson") <- rpearson suppressWarnings(attributes(S)$bread <- vcov(x)) if (x$family$family=="quasi" && x$family$link=="identity" && x$family$varfun=="constant") attributes(S)$bread <- -Inverse(information.glm(x)) return(S) } ##' @export pars.glm <- function(x,...) { if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { res <- c(coef(x),suppressWarnings(summary(x)$dispersion)) names(res)[length(res)] <- "Dispersion" return(res) } return(coef(x)) } logL.glm <- function(x,p=pars.glm(x),data,indiv=FALSE,...) { if (!missing(data)) { x <- update(x,data=data,...) } f <- family(x) ginv <- f$linkinv X <- model.matrix(x) n <- nrow(X) ##disp <- 1; p0 <- p if (tolower(family(x)$family)%in%c("gaussian","gamma","inverse.gaussian")) { if (length(p)==ncol(X)) { ##disp <- suppressWarnings((summary(x)$dispersion)) } else { ##disp <- tail(p,1) p0 <- p[-length(p)] } } if(any(is.na(p))) { warning("Over-parametrized model") } Xbeta <- X%*%p0 if (!is.null(x$offset)) Xbeta <- Xbeta+x$offset y <- model.frame(x)[,1] mu <- ginv(Xbeta) w <- x$prior.weights dev <- f$dev.resids(y,mu,w) if (indiv) { } loglik <- length(p)-(f$aic(y,n,mu,w,sum(dev))/2+x$rank) structure(loglik,nobs=n,df=length(p),class="logLik") } ##' @export iid.glm <- function(x,...) { ## if (x$family$family=="quasi" && x$family$link=="identity" && x$family$varfun=="constant") { ## return(iid.default(x,information.glm,...)) ## } iid.default(x,...) } hessian.glm <- function(x,p=coef(x),...) { numDeriv::jacobian(function(theta) score.glm(x,p=theta,indiv=FALSE,...),p) } ##' @export information.glm <- function(x,...) hessian.glm(x,...) robustvar <- function(x,id=NULL,...) { U <- score(x,indiv=TRUE) II <- unique(id) K <- length(II) J <- 0 if (is.null(id)) { J <- crossprod(U) } else { for (ii in II) { J <- J+tcrossprod(colSums(U[which(id==ii),,drop=FALSE])) } J <- K/(K-1)*J } iI <- vcov(x) V <- iI%*%J%*%iI return(V) } glm_logLik.lvm <- function(object,...) { attr(GLMscore(object,...),"logLik") } glm_method.lvm <- NULL glm_objective.lvm <- function(x,p,data,...) { GLMest(x,data,...) } glm_gradient.lvm <- function(x,p,data,...) { -GLMscore(x,p,data,...) } glm_variance.lvm <- function(x,p,data,opt,...) { opt$vcov } lava/R/sim.lvm.R0000644000176200001440000007633513520655354013134 0ustar liggesusers##' Simulate model ##' ##' Simulate data from a general SEM model including non-linear effects and ##' general link and distribution of variables. ##' ##' @aliases sim sim.lvmfit sim.lvm ##' simulate.lvmfit simulate.lvm ##' transform<- transform<-.lvm transform.lvm ##' functional functional<- functional.lvm functional<-.lvm ##' distribution distribution distribution<- distribution.lvm distribution<-.lvm ##' heavytail heavytail<- ##' weibull.lvm ##' binomial.lvm ##' poisson.lvm ##' uniform.lvm ##' multinomial.lvm ##' beta.lvm ##' normal.lvm mvn.lvm ##' lognormal.lvm ##' gaussian.lvm ##' GM2.lvm ##' GM3.lvm ##' probit.lvm ##' logit.lvm ##' pareto.lvm ##' student.lvm ##' chisq.lvm ##' coxGompertz.lvm ##' coxWeibull.lvm ##' coxExponential.lvm ##' aalenExponential.lvm ##' Gamma.lvm gamma.lvm ##' loggamma.lvm ##' categorical categorical<- ##' threshold.lvm ##' ones.lvm ##' sequence.lvm ##' @usage ##' \method{sim}{lvm}(x, n = NULL, p = NULL, normal = FALSE, cond = FALSE, ##' sigma = 1, rho = 0.5, X = NULL, unlink=FALSE, latent=TRUE, ##' use.labels = TRUE, seed=NULL, ...) ##' @param x Model object ##' @param n Number of simulated values/individuals ##' @param p Parameter value (optional) ##' @param normal Logical indicating whether to simulate data from a ##' multivariate normal distribution conditional on exogenous variables hence ##' ignoring functional/distribution definition ##' @param cond for internal use ##' @param sigma Default residual variance (1) ##' @param rho Default covariance parameter (0.5) ##' @param X Optional matrix of fixed values of variables (manipulation) ##' @param unlink Return Inverse link transformed data ##' @param latent Include latent variables (default TRUE) ##' @param use.labels convert categorical variables to factors before applying transformation ##' @param seed Random seed ##' @param \dots Additional arguments to be passed to the low level functions ##' @author Klaus K. Holst ##' @keywords models datagen regression ##' @export ##' @examples ##' ################################################## ##' ## Logistic regression ##' ################################################## ##' m <- lvm(y~x+z) ##' regression(m) <- x~z ##' distribution(m,~y+z) <- binomial.lvm("logit") ##' d <- sim(m,1e3) ##' head(d) ##' e <- estimate(m,d,estimator="glm") ##' e ##' ## Simulate a few observation from estimated model ##' sim(e,n=5) ##' ################################################## ##' ## Poisson ##' ################################################## ##' distribution(m,~y) <- poisson.lvm() ##' d <- sim(m,1e4,p=c(y=-1,"y~x"=2,z=1)) ##' head(d) ##' estimate(m,d,estimator="glm") ##' mean(d$z); lava:::expit(1) ##' summary(lm(y~x,sim(lvm(y[1:2]~4*x),1e3))) ##' ################################################## ##' ### Gamma distribution ##' ################################################## ##' m <- lvm(y~x) ##' distribution(m,~y+x) <- list(Gamma.lvm(shape=2),binomial.lvm()) ##' intercept(m,~y) <- 0.5 ##' d <- sim(m,1e4) ##' summary(g <- glm(y~x,family=Gamma(),data=d)) ##' \dontrun{MASS::gamma.shape(g)} ##' args(lava::Gamma.lvm) ##' distribution(m,~y) <- Gamma.lvm(shape=2,log=TRUE) ##' sim(m,10,p=c(y=0.5))[,"y"] ##' ################################################## ##' ### Beta ##' ################################################## ##' m <- lvm() ##' distribution(m,~y) <- beta.lvm(alpha=2,beta=1) ##' var(sim(m,100,"y,y"=2)) ##' distribution(m,~y) <- beta.lvm(alpha=2,beta=1,scale=FALSE) ##' var(sim(m,100)) ##' ################################################## ##' ### Transform ##' ################################################## ##' m <- lvm() ##' transform(m,xz~x+z) <- function(x) x[1]*(x[2]>0) ##' regression(m) <- y~x+z+xz ##' d <- sim(m,1e3) ##' summary(lm(y~x+z + x*I(z>0),d)) ##' ################################################## ##' ### Non-random variables ##' ################################################## ##' m <- lvm() ##' distribution(m,~x+z+v+w) <- list(sequence.lvm(0,5),## Seq. 0 to 5 by 1/n ##' ones.lvm(), ## Vector of ones ##' ones.lvm(0.5), ## 0.8n 0, 0.2n 1 ##' ones.lvm(interval=list(c(0.3,0.5),c(0.8,1)))) ##' sim(m,10) ##' ################################################## ##' ### Cox model ##' ### piecewise constant hazard ##' ################################################ ##' m <- lvm(t~x) ##' rates <- c(1,0.5); cuts <- c(0,5) ##' ## Constant rate: 1 in [0,5), 0.5 in [5,Inf) ##' distribution(m,~t) <- coxExponential.lvm(rate=rates,timecut=cuts) ##' \dontrun{ ##' d <- sim(m,2e4,p=c("t~x"=0.1)); d$status <- TRUE ##' plot(timereg::aalen(survival::Surv(t,status)~x,data=d, ##' resample.iid=0,robust=0),spec=1) ##' L <- approxfun(c(cuts,max(d$t)),f=1, ##' cumsum(c(0,rates*diff(c(cuts,max(d$t))))), ##' method="linear") ##' curve(L,0,100,add=TRUE,col="blue") ##' } ##' ################################################## ##' ### Cox model ##' ### piecewise constant hazard, gamma frailty ##' ################################################## ##' m <- lvm(y~x+z) ##' rates <- c(0.3,0.5); cuts <- c(0,5) ##' distribution(m,~y+z) <- list(coxExponential.lvm(rate=rates,timecut=cuts), ##' loggamma.lvm(rate=1,shape=1)) ##' \dontrun{ ##' d <- sim(m,2e4,p=c("y~x"=0,"y~z"=0)); d$status <- TRUE ##' plot(timereg::aalen(survival::Surv(y,status)~x,data=d, ##' resample.iid=0,robust=0),spec=1) ##' L <- approxfun(c(cuts,max(d$y)),f=1, ##' cumsum(c(0,rates*diff(c(cuts,max(d$y))))), ##' method="linear") ##' curve(L,0,100,add=TRUE,col="blue") ##' } ##' ## Equivalent via transform (here with Aalens additive hazard model) ##' m <- lvm(y~x) ##' distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) ##' distribution(m,~z) <- Gamma.lvm(rate=1,shape=1) ##' transform(m,t~y+z) <- prod ##' sim(m,10) ##' ## Shared frailty ##' m <- lvm(c(t1,t2)~x+z) ##' rates <- c(1,0.5); cuts <- c(0,5) ##' distribution(m,~y) <- aalenExponential.lvm(rate=rates,timecut=cuts) ##' distribution(m,~z) <- loggamma.lvm(rate=1,shape=1) ##' \dontrun{ ##' mets::fast.reshape(sim(m,100),varying="t") ##' } ##' ################################################## ##' ### General multivariate distributions ##' ################################################## ##' \dontrun{ ##' m <- lvm() ##' distribution(m,~y1+y2,oratio=4) <- VGAM::rbiplackcop ##' ksmooth2(sim(m,1e4),rgl=FALSE,theta=-20,phi=25) ##' m <- lvm() ##' distribution(m,~z1+z2,"or1") <- VGAM::rbiplackcop ##' distribution(m,~y1+y2,"or2") <- VGAM::rbiplackcop ##' sim(m,10,p=c(or1=0.1,or2=4)) ##' } ##' m <- lvm() ##' distribution(m,~y1+y2+y3,TRUE) <- function(n,...) rmvn0(n,sigma=diag(3)+1) ##' var(sim(m,100)) ##' ## Syntax also useful for univariate generators, e.g. ##' m <- lvm(y~x+z) ##' distribution(m,~y,TRUE) <- function(n) rnorm(n,mean=1000) ##' sim(m,5) ##' distribution(m,~y,"m1",0) <- rnorm ##' sim(m,5) ##' sim(m,5,p=c(m1=100)) ##' ################################################## ##' ### Regression design in other parameters ##' ################################################## ##' ## Variance heterogeneity ##' m <- lvm(y~x) ##' distribution(m,~y) <- function(n,mean,x) rnorm(n,mean,exp(x)^.5) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Alternaively, calculate the standard error directly ##' addvar(m) <- ~sd ## If 'sd' should be part of the resulting data.frame ##' constrain(m,sd~x) <- function(x) exp(x)^.5 ##' distribution(m,~y) <- function(n,mean,sd) rnorm(n,mean,sd) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Regression on variance parameter ##' m <- lvm() ##' regression(m) <- y~x ##' regression(m) <- v~x ##' ##distribution(m,~v) <- 0 # No stochastic term ##' ## Alternative: ##' ## regression(m) <- v[NA:0]~x ##' distribution(m,~y) <- function(n,mean,v) rnorm(n,mean,exp(v)^.5) ##' if (interactive()) plot(y~x,sim(m,1e3)) ##' ## Regression on shape parameter in Weibull model ##' m <- lvm() ##' regression(m) <- y ~ z+v ##' regression(m) <- s ~ exp(0.6*x-0.5*z) ##' distribution(m,~x+z) <- binomial.lvm() ##' distribution(m,~cens) <- coxWeibull.lvm(scale=1) ##' distribution(m,~y) <- coxWeibull.lvm(scale=0.1,shape=~s) ##' eventTime(m) <- time ~ min(y=1,cens=0) ##' if (interactive()) { ##' d <- sim(m,1e3) ##' require(survival) ##' (cc <- coxph(Surv(time,status)~v+strata(x,z),data=d)) ##' plot(survfit(cc) ,col=1:4,mark.time=FALSE) ##' } ##' ################################################## ##' ### Categorical predictor ##' ################################################## ##' m <- lvm() ##' ## categorical(m,K=3) <- "v" ##' categorical(m,labels=c("A","B","C")) <- "v" ##' regression(m,additive=FALSE) <- y~v ##' \dontrun{ ##' plot(y~v,sim(m,1000,p=c("y~v:2"=3))) ##' } ##' m <- lvm() ##' categorical(m,labels=c("A","B","C"),p=c(0.5,0.3)) <- "v" ##' regression(m,additive=FALSE,beta=c(0,2,-1)) <- y~v ##' ## equivalent to: ##' ## regression(m,y~v,additive=FALSE) <- c(0,2,-1) ##' regression(m,additive=FALSE,beta=c(0,4,-1)) <- z~v ##' table(sim(m,1e4)$v) ##' glm(y~v, data=sim(m,1e4)) ##' glm(y~v, data=sim(m,1e4,p=c("y~v:1"=3))) ##' ##' transform(m,v2~v) <- function(x) x=='A' ##' sim(m,10) ##' ##' ################################################## ##' ### Pre-calculate object ##' ################################################## ##' m <- lvm(y~x) ##' m2 <- sim(m,'y~x'=2) ##' sim(m,10,'y~x'=2) ##' sim(m2,10) ## Faster ##' "sim" <- function(x,...) UseMethod("sim") ##' @export sim.lvmfit <- function(x,n=nrow(model.frame(x)),p=pars(x),xfix=TRUE,...) { m <- Model(x) if ((nrow(model.frame(x))==n) & xfix) { X <- exogenous(x) mydata <- model.frame(x) for (pred in X) { distribution(m, pred) <- list(mydata[,pred]) } } sim(m,n=n,p=p,...) } ##' @export sim.lvm <- function(x,n=NULL,p=NULL,normal=FALSE,cond=FALSE,sigma=1,rho=.5, X=NULL,unlink=FALSE,latent=TRUE,use.labels=TRUE,seed=NULL,...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } v.env <- c("A","M","P","PP","PPdiag","xx","vv","mdist","mdistnam","mii", "nn","mu","xf","xfix","X", "vartrans","multitrans","multitrans.idx", "X.idx","ii.mvn","xconstrain.idx","xconstrain", "xconstrain.par","covparnames","exo_constrainY") setup <- is.null(n) && is.null(X) ## Save environment (variables v.env) and return sim object loadconfig <- !is.null(x$sim.env) && !setup && (length(list(...))==0 && length(p)==0) Yfix <- NULL if (loadconfig) { for (v in setdiff(v.env,"X")) assign(v, x$sim.env[[v]]) if (is.null(X)) X <- x$sim.env[['X']] } else { if (!is.null(n) && n<1) return(NULL) p <- c(p,unlist(list(...))) xx <- exogenous(x) if (!is.null(X)) { if (is.null(n)) n <- nrow(X) if (!is.null(colnames(X))) { yfix <- setdiff(colnames(X),xx) if (length(yfix)>0) Yfix <- X[,yfix,drop=FALSE] xx0 <- intersect(xx,colnames(X)) if (length(xx0)>0) X <- as.matrix(X[,xx0,drop=FALSE]) } } else { if (!is.null(p)) { i1 <- unique(na.omit(c(match(names(p),xx), match(names(p),paste0(xx,lava.options()$symbol[2],xx))))) covariance(x) <- xx[i1] } } ## index(x) <- reindex(x) vv <- vars(x) nn <- setdiff(vv,parameter(x)) mu <- unlist(lapply(x$mean, function(l) ifelse(is.na(l)|is.character(l),0,l))) xf <- intersect(unique(parlabels(x)),xx) xfix <- c(randomslope(x),xf); if (length(xfix)>0) normal <- FALSE ## Match parameter names if ((!is.null(names(p)) && all(!is.na(names(p)))) || length(p)!=(index(x)$npar+index(x)$npar.mean+index(x)$npar.ex) | is.null(names(p))) { nullp <- is.null(p) p0 <- p ep <- NULL ei <- which(index(x)$e1==1) if (length(ei)>0) ep <- unlist(x$expar)[ei] p <- c(rep(1, index(x)$npar+index(x)$npar.mean),ep) p[seq_len(index(x)$npar.mean)] <- 0 p[index(x)$npar.mean + variances(x)] <- sigma p[index(x)$npar.mean + offdiags(x)] <- rho if (!nullp) { c1 <- coef(x,mean=TRUE,fix=FALSE) c2 <- coef(x,mean=TRUE,fix=FALSE,labels=TRUE) idx1 <- na.omit(match(names(p0),c1)) idx11 <- na.omit(match(names(p0),c2)) idx2 <- na.omit(which(names(p0)%in%c1)) idx22 <- na.omit(which(names(p0)%in%c2)) if (length(idx1)>0 && !any(is.na(idx1))) p[idx1] <- p0[idx2] if (length(idx11)>0 && !any(is.na(idx11))) p[idx11] <- p0[idx22] } } M <- modelVar(x,p,data=NULL) A <- M$A; P <- M$P if (!is.null(M$v)) mu <- M$v ## Square root of residual variance matrix PP <- with(svd(P), v%*%diag(sqrt(d),nrow=length(d))%*%t(u)) ## Multivariate distributions mdist <- distribution(x,multivariate=TRUE)$var mdistnam <- names(mdist) mii <- match(mdistnam,vars(x)) if (length(distribution(x))>0 ) { ii <- match(names(distribution(x)),vv) ii.mvn <- setdiff(seq(ncol(P)),c(ii,mii)) } else { ii.mvn <- seq(ncol(P)) } PPdiag <- sum(abs(offdiag(PP[ii.mvn,ii.mvn,drop=FALSE])^2))<1e-20 } if (!setup) { E <- matrix(0,ncol=ncol(P),nrow=n) if (length(ii.mvn)>0) { ## Error term for conditional normal distributed variables if (PPdiag) { for (i in ii.mvn) E[,i] <- rnorm(n,sd=PP[i,i]) } else { E[,ii.mvn] <- matrix(rnorm(length(ii.mvn)*n),ncol=length(ii.mvn))%*%PP[ii.mvn,ii.mvn,drop=FALSE] } } if (length(mdistnam)>0) { fun <- distribution(x,multivariate=TRUE)$fun for (i in seq_along(fun)) { mv <- names(which(unlist(mdist)==i)) ii <- match(mv,vv) E[,ii] <- distribution(x,multivariate=TRUE)$fun[[i]](n,p=p,object=x) # ,...) } } ## Simulate exogenous variables (covariates) res <- matrix(0,ncol=length(nn),nrow=n) colnames(res) <- nn } if (!loadconfig) { vartrans <- names(x$attributes$transform) multitrans <- multitrans.idx <- NULL if (length(x$attributes$multitransform)>0) { multitrans <- unlist(lapply(x$attributes$multitransform,function(z) z$y)) for (i in (seq_along(x$attributes$multitransform))) { multitrans.idx <- c(multitrans.idx,rep(i,length(x$attributes$multitransform[[i]]$y))) } } xx <- unique(c(exogenous(x, latent=FALSE, index=TRUE),xfix)) xx <- setdiff(xx,vartrans) X.idx <- match(xx,vv) } if (!setup) { res[,X.idx] <- t(mu[X.idx]+t(E[,X.idx])) if (is.null(X) || NCOL(X)0) for (i in seq_along(xx)) { mu.x <- mu[X.idx[i]] dist.x <- distribution(x,xx[i])[[1]] if (is.list(dist.x) && is.function(dist.x[[1]])) dist.x <- dist.x[[1]] if (is.list(dist.x)) { dist.x <- dist.x[[1]] if (length(dist.x)==1) dist.x <- rep(dist.x,n) } if (is.function(dist.x)) { res[,X.idx[i]] <- dist.x(n=n,mu=mu.x,var=P[X.idx[i],X.idx[i]]) } else { if (is.null(dist.x) || is.na(dist.x)) { } else { if (length(dist.x)!=n) stop("'",vv[X.idx[i]], "' fixed at length ", length(dist.x)," != ",n,".") res[,X.idx[i]] <- dist.x ## Deterministic } } } } if (!is.null(X)) { ii <- match(colnames(X),vv) res0 <- res for (i in seq(ncol(X))) res[,ii[i]] <- X[,i] } } simuled <- c(xx) resunlink <- NULL if (unlink) { resunlink <- res } if ( normal | ( is.null(distribution(x)) & is.null(functional(x)) & is.null(constrain(x))) ) { if(cond) { ## Simulate from conditional distribution of Y given X mypar <- pars(x,A,P,mu) Ey.x <- predict(x, mypar, data.frame(res)) Vy.x <- attributes(Ey.x)$cond.var PP <- with(svd(Vy.x), v%*%diag(sqrt(d),nrow=length(d))%*%t(u)) yy <- Ey.x + matrix(n*ncol(Vy.x),ncol=ncol(Vy.x))%*%PP res <- cbind(yy, res[,xx]); colnames(res) <- c(colnames(Vy.x),xx) return(res) } ## Simulate from sim. distribution (Y,X) (mv-normal) I <- diag(nrow=length(nn)) IAi <- Inverse(I-t(A)) colnames(E) <- vv dd <- t(apply(heavytail.sim.hook(x,E),1,function(x) x+mu)) res <- dd%*%t(IAi) colnames(res) <- vv } else { if (!loadconfig) { xc <- index(x)$vars xconstrain.idx <- unlist(lapply(lapply(constrain(x),function(z) attributes(z)$args),function(z) length(intersect(z,xc))>0)) xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),xc) xconstrain.par <- names(xconstrain.idx)[xconstrain.idx] covparnames <- unique(as.vector(covariance(x)$labels)) exo_constrainY <- intersect(exogenous(x),names(x$constrainY)) } if (setup) { sim.env <- c() sim.env[v.env] <- list(NULL) for (v in v.env) if (!is.null(get(v))) sim.env[[v]] <- get(v) x$sim.env <- sim.env return(x) } if (length(xconstrain)>0) for (i in which(xconstrain.idx)) { ff <- constrain(x)[[i]] myargs <- attributes(ff)$args D <- matrix(0,n,length(myargs)) for (j in seq_len(ncol(D))) { if (myargs[j]%in%xconstrain) D[,j] <- res[,myargs[j]] else D[,j] <- M$parval[[myargs[j]]] } val <- try(apply(D,1,ff),silent=TRUE) if (inherits(val,"try-error") || NROW(val)0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x))[i] y <- names(which(unlist(lapply(intercept(x),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) for (i in exo_constrainY) { cc <- x$constrainY[[i]] args <- cc$args args <- if (is.null(args) || length(args)==0) res[,i] else res[,args] res[,i] <- cc$fun(args,p) # ,...) } res <- data.frame(res) if (length(vartrans)>0) { parvals <- parpos(x)$parval parvalsnam <- setdiff(names(parvals),xx) if (length(parvalsnam)>0) { Parvals <- p[unlist(parvals)]; res <- cbind(res, cbind(rep(1,nrow(res)))%x%rbind(Parvals)) colnames(res)[seq(length(Parvals))+ncol(res)-length(Parvals)] <- names(parvals) } } leftovers <- c() itercount <- 0 while (length(simuled)0) stop("Infinite loop (feedback).") itercount <- itercount+1 } for (i in leftovers) { if (length(Yfix)>0 && i %in% colnames(Yfix)) { if (NROW(Yfix) == 1) { res[,i] <- rep(Yfix[,i],length.out=nrow(res)) } else res[,i] <- rep(Yfix[,i,drop=TRUE],length.out=nrow(res)) simuled <- c(simuled,i) next } if (i%in%vartrans) { xtrans <- x$attributes$transform[[i]]$x if (all(xtrans%in%c(simuled,names(parvals)))) { xtr <- res[,xtrans,drop=FALSE] if (use.labels) { lb <- x$attributes$labels lb.idx <- na.omit(match(names(lb),xtrans)) ## For categorical variables turn them into factors so we can ## use the actual labels in function calls/transform if (length(lb.idx)>0) { xtr <- as.data.frame(xtr) for (lb0 in lb.idx) { lab <- lb[[names(xtr)[lb0]]] xtr[,lb0] <- factor(xtr[,lb0],levels=seq_along(lab)-1,labels=lab) } } } suppressWarnings(yy <- with(x$attributes$transform[[i]], fun(xtr))) ##fun(res[,xtrans]))) if (NROW(yy) != NROW(res)) { ## apply row-wise res[,i] <- with(x$attributes$transform[[i]], ##apply(res[,xtrans,drop=FALSE],1,fun)) apply(xtr,1,fun)) } else { colnames(yy) <- NULL res[,i] <- yy } simuled <- c(simuled,i) } } else if (i%in%multitrans) { idx0 <- match(i,multitrans) idx <- multitrans.idx[idx0] mtval <- x$attributes$multitransform[[idx]] if (all(mtval$x%in%simuled)) { res[,mtval$y] <- mtval$fun(res[,mtval$x]) simuled <- c(simuled,mtval$y) break; } } else { ipos <- which(yconstrain%in%i) if (length(ipos)==0 || all(xconstrain[[ipos]]$exo%in%simuled)) { pos <- match(i,vv) relations <- colnames(A)[A[,pos]!=0] simvars <- x$attributes$simvar[[i]] dist.i <- distribution(x,i)[[1]] ## User-specified distribution function dist.xx <- NULL if (is.function(dist.i)) { dist.args0 <- names(formals(dist.i)) dist.args <- setdiff(dist.args0,c("n","mean","mu","var","...")) dist.xx <- intersect(names(res),dist.args) ## Variables influencing distribution } if (all(c(relations,simvars,dist.xx)%in%simuled)) { ## Only depending on already simulated variables if (x$mean[[pos]]%in%xconstrain.par && length(ipos)==0) { mu.i <- res[,x$mean[[pos]] ] } else { mu.i <- mu[pos] } if (length(ipos)>0) { pp <- unlist(M$parval[xconstrain[[ipos]]$warg]) myidx <- with(xconstrain[[ipos]],order(c(wargidx,exoidx))) ## myidx <- with(xconstrain[[ipos]], ## match(attr(func,"args"), c(warg,exo))) X <- with(xconstrain[[ipos]], if (length(pp)>0) cbind(rbind(pp)%x%cbind(rep(1,nrow(res))), res[,exo,drop=FALSE]) else res[,exo,drop=FALSE]) yy <- try(with(xconstrain[[ipos]], func(X[,myidx])),silent=TRUE) if (NROW(yy) != NROW(res)) { ## apply row-wise mu.i <- #mu.i + with(xconstrain[[ipos]], apply(res[,exo,drop=FALSE],1, function(x) func( unlist(c(pp,x))[myidx]))) } else { mu.i <- ##mu.i+ yy } } for (From in relations) { f <- functional(x,i,From)[[1]] if (!is.function(f)) f <- function(x,...) x reglab <- regfix(x)$labels[From,pos] if (reglab%in%c(xfix,xconstrain.par)) { if (is.function(f)) { if (length(formals(f))>1) { mu.i <- mu.i + res[,reglab]*f(res[,From],p) } else { mu.i <- mu.i + res[,reglab]*f(res[,From]) } } else mu.i <- mu.i + res[,reglab]*res[,From] } else { if (is.function(f)) { if (length(formals(f))>1) { mu.i <- mu.i + A[From,pos]*f(res[,From],p) } else { mu.i <- mu.i + A[From,pos]*f(res[,From]) } } else mu.i <- mu.i + A[From,pos]*res[,From] } } if (!is.function(dist.i)) { res[,pos] <- mu.i + E[,pos] if (unlink) resunlink[,pos] <- res[,pos] } else { if (length(simvars)>0) { ## Depends on mu and also on other variables (e.g. time-depending effect) if (length(mu.i)==1) mu.i <- rep(mu.i,n) mu.i <- cbind("m0"=mu.i,res[,simvars,drop=FALSE]) } new.args <- list(n=n) mu.arg <- intersect(c("mean","mu"),dist.args0) if (length(mu.arg)>0) { new.args <- c(new.args,list(mu.i)) names(new.args)[length(new.args)] <- mu.arg[1] } var.arg <- intersect(c("var"),dist.args0) if (length(var.arg)>0) { new.args <- c(new.args,list(P[pos,pos])) names(new.args)[length(new.args)] <- var.arg[1] } for (jj in dist.xx) { new.args <- c(new.args,list(res[,jj,drop=TRUE])) names(new.args)[length(new.args)] <- jj } res[,pos] <- do.call(dist.i,new.args) if (unlink) resunlink[,pos] <- mu.i } if (length(x$constrainY)>0 && i%in%names(x$constrainY)) { cc <- x$constrainY[[i]] args <- cc$args args <- if (is.null(args) || length(args)==0) res[,pos] else { ii <- intersect(names(M$parval),args) args0 <- args args <- res[,intersect(args,colnames(res)),drop=FALSE] if (length(ii)>0) { pp <- rbind(unlist(M$parval[ii]))%x%cbind(rep(1,n)) colnames(pp) <- ii args <- cbind(res,pp)[,args0,drop=FALSE] } } res[,pos] <- cc$fun(args,p) # ,...) } simuled <- c(simuled,i) } } } } } res <- res[,nn,drop=FALSE] } res <- as.data.frame(res) myhooks <- gethook("sim.hooks") for (f in myhooks) { res <- do.call(f, list(x=x,data=res,p=p,modelpar=M)) } if (unlink) res <- resunlink res <- as.data.frame(res) self <- x$attributes$selftransform for (v in names(self)) { res[,v] <- self[[v]](res[,v]) } if (!latent && length(latent(x))>0) return(subset(res[,-which(colnames(res)%in%latent(x))])) return(res) } ##' @export simulate.lvm <- function(object,nsim,seed=NULL,...) { sim(object,nsim,seed=seed,...) } ##' @export simulate.lvmfit <- function(object,nsim,seed=NULL,...) { sim(object,nsim,seed=seed,...) } lava/R/mixture.R0000644000176200001440000005045413520655354013236 0ustar liggesusers###{{{ mixture #' Estimate mixture latent variable model. #' #' Estimate mixture latent variable model #' #' Estimate parameters in a mixture of latent variable models via the EM #' algorithm. #' #' The performance of the EM algorithm can be tuned via the \code{control} #' argument, a list where a subset of the following members can be altered: #' #' \describe{ \item{start}{Optional starting values} \item{nstart}{Evaluate #' \code{nstart} different starting values and run the EM-algorithm on the #' parameters with largest likelihood} \item{tol}{Convergence tolerance of the #' EM-algorithm. The algorithm is stopped when the absolute change in #' likelihood and parameter (2-norm) between successive iterations is less than #' \code{tol}} \item{iter.max}{Maximum number of iterations of the #' EM-algorithm} \item{gamma}{Scale-down (i.e. number between 0 and 1) of the #' step-size of the Newton-Raphson algorithm in the M-step} \item{trace}{Trace #' information on the EM-algorithm is printed on every \code{trace}th #' iteration} } #' #' Note that the algorithm can be aborted any time (C-c) and still be saved #' (via on.exit call). #' #' @param x List of \code{lvm} objects. If only a single \code{lvm} object is #' given, then a \code{k}-mixture of this model is fitted (free parameters #' varying between mixture components). #' @param data \code{data.frame} #' @param k Number of mixture components #' @param control Optimization parameters (see details) #' #type Type of EM algorithm (standard, classification, stochastic) #' @param vcov of asymptotic covariance matrix (NULL to omit) #' @param names If TRUE returns the names of the parameters (for defining starting values) #' @param ... Additional arguments parsed to lower-level functions #' @author Klaus K. Holst #' @seealso \code{mvnmix} #' @keywords models regression #' @examples #' #' \donttest{ #' m0 <- lvm(list(y~x+z,x~z)) #' distribution(m0,~z) <- binomial.lvm() #' d <- sim(m0,2000,p=c("y~z"=2,"y~x"=1),seed=1) #' #' ## unmeasured confounder example #' m <- baptize(lvm(y~x, x~1)); #' intercept(m,~x+y) <- NA #' #' set.seed(42) #' M <- mixture(m,k=2,data=d,control=list(trace=1,tol=1e-6)) #' summary(M) #' lm(y~x,d) #' estimate(M,"y~x") #' ## True slope := 1 #' } #' #' @export mixture mixture <- function(x, data, k=length(x), control=list(), vcov="observed", names=FALSE, ...) { MODEL <- "normal" ##type=c("standard","CEM","SEM"), ## type <- tolower(type[1]) ## if (type[1]!="standard") { ## return(mixture0(x,data=data,k=k,control=control,type=type,...)) ## } optim <- list( rerun=TRUE, ## EM (accelerated): K=1, ## K=1, first order approx, slower but some times more stable square=TRUE, step.max0=1, step.min0=1, mstep=4, kr=1, objfn.inc=2, keep.objfval=TRUE, convtype= "parameter", ## convtype = "objfn", maxiter=1500, tol=1e-5, trace=0, ## Starting values: start=NULL, startbounds=c(-2,2), startmean=FALSE, nstart=1, prob=NULL, ## Newton raphson: delta=1e-2, constrain=TRUE, stopc=2, lbound=1e-9, stabil=TRUE, gamma=0.5, gamma2=1, newton=10, lambda=0 # Stabilizing factor (avoid singularities of I) ) if (!missing(control)) optim[base::names(control)] <- control if ("iter.max"%in%base::names(optim)) optim$maxiter <- optim$iter.max if (k==1) { if (is.list(x)) res <- estimate(x[[1]],data,...) else res <- estimate(x,data,...) return(res) } start0 <- NULL xx <- x if (inherits(x,"lvm")) { xx <- rep(list(x), k) } mg <- multigroup(xx, rep(list(data),k), fix=FALSE) ppos <- parpos(mg) parname <- attr(ppos,"name") naparname <- which(is.na(parname)) parname[naparname] <- mg$name[naparname] if (names) { return(parname) } if (class(x)[1]=="lvm") { index(x) <- reindex(x,zeroones=TRUE,deriv=TRUE) if ((is.null(optim$start) || length(optim$start)0) lower[vpos] <- optim$lbound ## Setup optimization constraints } lower <- c(rep(-Inf,mg$npar.mean), lower) constrained <- which(is.finite(lower)) if (!any(constrained)) optim$constrain <- FALSE mymodel <- list(multigroup=mg,k=k,data=data,parpos=ParPos); class(mymodel) <- "lvm.mixture" if (any(is.na(optim$start)) || length(optim$start)0) start[mg$npar.mean + offdiagpos] <- 0 if (optim$nstart>1) { myll <- constrLogLikS(start) for (i in 1:optim$nstart) { newstart <- runif(Npar,optim$startbounds[1],optim$startbounds[2]); newmyll <- constrLogLikS(newstart) if (newmyllNpar) { optim$prob <- optim$start[Npar+seq_len(k-1)] optim$start <- optim$start[seq_len(Npar)] } if (is.null(optim$prob)) optim$prob <- rep(1/k,k-1) thetacur <- optim$start probcur <- with(optim, c(prob,1-sum(prob))) if (optim$constrain) { thetacur[constrained] <- log(thetacur[constrained]) } PosteriorProb <- function(pp,priorprob,constrain=FALSE) { if (!is.list(pp)) { if (constrain) { pp[constrained] <- exp(pp[constrained]) } if (missing(priorprob)) priorprob <- pp[seq(Npar+1,length(pp))] pp <- lapply(ParPos,function(x) pp[x]) } priorprob <- pmax(priorprob,1e-16) priorprob <- priorprob/sum(priorprob) k <- length(pp) logff <- sapply(seq(k), function(j) (logLik(mg$lvm[[j]],p=pp[[j]],data=data,indiv=TRUE,model=MODEL))) logplogff <- t(apply(logff,1, function(z) z+log(priorprob))) ## Log-sum-exp (see e.g. NR) zmax <- apply(logplogff,1,max) logsumpff <- log(rowSums(exp(logplogff-zmax)))+zmax gamma <- exp(apply(logplogff,2,function(y) y - logsumpff)) ## Posterior class probabilities return(gamma) } negLogLik <- function(p) { if (optim$constrain) { p[constrained] <- exp(p[constrained]) } myp <- lapply(ParPos,function(x) p[x]) K <- length(myp) prob <- p[seq(Npar+1,Npar+K-1)]; prob <- c(prob,1-sum(prob)) logff <- sapply(1:length(myp), function(j) (logLik(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL))) ## logff <- sapply(1:length(myp), ## function(j) -normal_objective.lvm(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE)) logplogff <- t(apply(logff,1, function(y) y+log(prob))) zmax <- apply(logplogff,1,max) logsumpff <- log(rowSums(exp(logplogff-zmax)))+zmax loglik <- sum(logsumpff) return(-loglik) } ObjEstep <- function(p,gamma,pr) { if (optim$constrain) { p[constrained] <- exp(p[constrained]) } myp <- lapply(ParPos,function(x) p[x]) loglik = 0; for (j in seq_along(myp)) loglik = loglik + gamma[,j]*(logLik(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL)) ## logff <- sapply(1:length(myp), function(j) gamma[,j]*(logLik(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL))) ## zmax <- apply(logff,1,max) ## ffz <- apply(logff,2,function(x) exp(x-zmax)) ## logsff <- log(rowSums(ffz))+zmax ## loglik <- sum(logsff) return(-sum(loglik)) } GradEstep <- function(p,gamma,pr) { if (optim$constrain) { p[constrained] <- exp(p[constrained]) } myp <- lapply(ParPos,function(x) p[x]) ## logff <- sapply(1:length(myp), function(j) log(gamma[,j])+(logLik(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL))) ## Exp-sum normalization: exp(xi)/sum(exp(xi)) = exp(xi-b)*exp(b)/sum(exp(xi-b)exp(b)) ## = exp(xi-b)/sum(exp(xi-b)), b=max(xi) ## zmax <- apply(logff,1,max) ## ffz <- apply(logff,2,function(x) exp(x-zmax)) ## sffz <- rowSums(ffz) D <- lapply(1:length(myp), function(j) { ## K <- ffz[,j]/sffz val <- score(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL) apply(val,2,function(x) x*gamma[,j]) }) D0 <- matrix(0,nrow(data),length(p)) for (j in 1:k) D0[,ParPos[[j]]] <- D0[,ParPos[[j]]]+D[[j]] S <- colSums(D0) if (optim$constrain) { S[constrained] <- S[constrained]*p[constrained] } return(-as.vector(S)) } Information <- function(p,gamma,pr) { if (optim$constrain) { p[constrained] <- exp(p[constrained]) } myp <- lapply(ParPos,function(x) p[x]) D <- lapply(1:length(myp), function(j) { ## K <- ffz[,j]/sffz val <- score(mg$lvm[[j]],p=myp[[j]],data=data,indiv=TRUE,model=MODEL) apply(val,2,function(x) x*gamma[,j]) }) D0 <- matrix(0,nrow(data),length(p)) for (j in 1:k) D0[,ParPos[[j]]] <- D0[,ParPos[[j]]]+D[[j]] if (optim$constrain) { for (j in constrained) D0[,j] <- D0[,j]*p[j] } S <- colSums(D0) structure(crossprod(D0), grad=S) } EMstep <- function(p,all=FALSE) { thetacur <- p[seq(Npar)] gamma <- PosteriorProb(p,constrain=optim$constrain) probcur <- colMeans(gamma) ## I <- function(p) { ## I <- Information(p,gamma,probcur) ## D <- attr(I, "grad") ## res <- -Inverse(I) ## res <- -I ## attributes(res)$grad <- D ## res ## } D <- function(p) GradEstep(p,gamma,probcur) ## if (optim$newton>0) { ## newpar <- NR(thetacur, gradient=D) ## } ## if (mean(newpar$gradient^2)>optim$tol) { newpar <- nlminb(thetacur,function(p) ObjEstep(p,gamma,probcur), D) ## } thetacur <- newpar$par thetacur0 <- thetacur if (optim$constrain) { thetacur0[constrained] <- exp(thetacur[constrained]) } p <- c(thetacur,probcur) if (all) { res <- list(p=p,gamma=gamma, theta=rbind(thetacur0), prob=rbind(probcur)) return(res) } return(p) } em.idx <- match(c("K","method","square","step.min0","step.max0","mstep", "objfn.inc","kr", ##"keep.objfval","convtype", "maxiter","tol","trace"),base::names(optim)) em.control <- optim[na.omit(em.idx)] if (!is.null(em.control$trace)) em.control$trace <- em.control$trace>0 p <- c(thetacur,probcur) opt <- SQUAREM::squarem(p,fixptfn=EMstep,##objfn=negLogLik, control=em.control) ## opt2 <- nlminb(opt$par, function(p) negLogLik(p=p), control=list(trace=1)) val <- EMstep(opt$par,all=TRUE) delta <- 1e-6 if (any(val$prob|z|)") Types <- rep("other",length(parname)) Variable <- rep(NA,length(parname)) From <- rep(NA,length(parname)) Latent <- c() for (i in 1:length(mm)) { cc.idx <- order(coef(mm[[i]],p=seq_along(p[[i]]),type=2)[,1]) cc <- coef(mm[[i]],p=p[[i]],vcov=vcov(object)[myp[[i]],myp[[i]]],data=NULL,labels=labels,type=2) Latent <- union(Latent,attr(cc,"latent")) Coefs[ParPos[[i]],] <- cc[cc.idx,,drop=FALSE] Types[ParPos[[i]]] <- attr(cc,"type")[cc.idx] Variable[ParPos[[i]]] <- attr(cc, "var")[cc.idx] From[ParPos[[i]]] <- attr(cc, "from")[cc.idx] cc <- CoefMat(mm[[i]],p=p[[i]],vcov=vcov(object)[myp[[i]],myp[[i]]],data=NULL,labels=labels) coefs <- c(coefs, list(cc)) ncluster <- c(ncluster,sum(object$member==i)) } rownames(Coefs) <- parname res <- list(coef=coefs, coefmat=Coefs, coeftype=Types, type=type, var=Variable, from=From, latent=Latent, ncluster=ncluster, prob=tail(object$prob,1), AIC=AIC(object), s2=sum(score(object)^2)) class(res) <- "summary.lvm.mixture" return(res) } ##' @export print.summary.lvm.mixture <- function(x,...) { if (x$type>0) { cc <- x$coefmat attr(cc,"type") <- x$coeftype attr(cc,"latent") <- x$latent attr(cc,"var") <- x$var attr(cc,"from") <- x$from cat("Mixing parameters:\n") cat(" ", paste(as.vector(formatC(x$prob))),"\n") print(CoefMat(cc), quote=FALSE) return(invisible()) } for (i in 1:length(x$coef)) { cat("Cluster ",i," (n=",x$ncluster[i],", Prior=", formatC(x$prob[i]),"):\n",sep="") cat(rep("-",50),"\n",sep="") print(x$coef[[i]], quote=FALSE) if (i0 && label) { ## nn[ii] <- paste0("p",seq_along(ii)) ## } nn <- object$parname colnames(res) <- nn if (class) return(object$gammas) if (list) { res <- list() for (i in 1:object$k) { nn <- coef(object$multigroup$lvm[[i]]) cc <- coef(object)[object$parpos[[i]]] base::names(cc) <- nn res <- c(res, list(cc)) } return(res) } if (full) { pp <- object$prob[,seq(ncol(object$prob)-1),drop=FALSE] colnames(pp) <- paste0("pr",seq(ncol(pp))) res <- cbind(res,pp) } if (prob) { res <- object$prob } if (missing(iter)) return(res[N,,drop=TRUE]) else return(res[iter,]) } ###}}} coef ##' @export model.frame.lvm.mixture <- function(formula,...) { return(formula$data) } ##' @export iid.lvm.mixture <- function(x,...) { bread <- vcov(x) structure(t(bread%*%t(score(x,indiv=TRUE))),bread=bread) } ##' @export manifest.lvm.mixture <- function(x,...) { manifest(x$multigroup,...) } lava/R/multipletesting.R0000644000176200001440000000410713520655354014764 0ustar liggesuserspzmax <- function(alpha,S) { ##P(Zmax > z) Family wise error rate, Zmax = max |Z_i| if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") k <- nrow(S) z <- qnorm(1-alpha/2) 1-mets::pmvn(lower=rep(-z,k),upper=rep(z,k),sigma=cov2cor(S)) } ##' @export p.correct <- function(object,idx,alpha=0.05) { S <- vcov(object); if (!missing(idx)) S <- S[idx,idx,drop=FALSE] f <- function(a) pzmax(a,S)-alpha uniroot(f,lower=0,upper=0.05)$root } ##' Closed testing procedure ##' ##' Closed testing procedure ##' @aliases closed.testing p.correct ##' @param object estimate object ##' @param idx Index of parameters to adjust for multiple testing ##' @param null Null hypothesis value ##' @param ... Additional arguments ##' @export ##' @examples ##' m <- lvm() ##' regression(m, c(y1,y2,y3,y4,y5,y6,y7)~x) <- c(0,0.25,0,0.25,0.25,0,0) ##' regression(m, to=endogenous(m), from="u") <- 1 ##' variance(m,endogenous(m)) <- 1 ##' set.seed(2) ##' d <- sim(m,200) ##' l1 <- lm(y1~x,d) ##' l2 <- lm(y2~x,d) ##' l3 <- lm(y3~x,d) ##' l4 <- lm(y4~x,d) ##' l5 <- lm(y5~x,d) ##' l6 <- lm(y6~x,d) ##' l7 <- lm(y7~x,d) ##' ##' (a <- merge(l1,l2,l3,l4,l5,l6,l7,subset=2)) ##' if (requireNamespace("mets",quietly=TRUE)) { ##' p.correct(a) ##' } ##' as.vector(closed.testing(a)) ##' closed.testing <- function(object,idx=seq_along(coef(object)),null=rep(0,length(idx)),...) { B <- diag(nrow=length(idx)) e <- estimate(object,keep=idx) combs <- pvals <- c() for (i in seq_along(idx)) { co <- combn(length(idx),i) pp <- numeric(ncol(co)) for (j in seq_along(pp)) { pp[j] <- compare(e,contrast=B[co[,j],,drop=FALSE],null=null[co[,j]],...)$p.value } combs <- c(combs,list(co)) pvals <- c(pvals,list(pp)) } pmax <- c() for (k in seq_along(idx)) { pk <- c() for (i in seq_along(idx)) { cols <- apply(combs[[i]],2,function(x) k%in%x) pk <- c(pk,pvals[[i]][which(cols)]) } pmax <- c(pmax,max(pk)) } return(structure(pmax,comb=combs,pval=pvals)) } lava/R/spaghetti.R0000644000176200001440000002353313520655354013527 0ustar liggesusers##' Spaghetti plot for longitudinal data ##' ##' @title Spaghetti plot ##' @param formula Formula (response ~ time) ##' @param data data.frame ##' @param id Id variable ##' @param group group variable ##' @param type Type (line 'l', stair 's', ...) ##' @param lty Line type ##' @param pch Colour ##' @param col Colour ##' @param alpha transparency (0-1) ##' @param lwd Line width ##' @param level Confidence level ##' @param trend.formula Formula for trendline ##' @param tau Quantile to estimate (trend) ##' @param trend.lty Trend line type ##' @param trend.join Trend polygon ##' @param trend.delta Length of limit bars ##' @param trend Add trend line ##' @param trend.col Colour of trend line ##' @param trend.alpha Transparency ##' @param trend.lwd Trend line width ##' @param trend.jitter Jitter amount ##' @param legend Legend ##' @param by make separate plot for each level in 'by' (formula, name of column, or vector) ##' @param xlab Label of X-axis ##' @param ylab Label of Y-axis ##' @param add Add to existing device ##' @param ... Additional arguments to lower level arguments ##' @author Klaus K. Holst ##' @export ##' @examples ##' if (interactive() & requireNamespace("mets")) { ##' K <- 5 ##' y <- "y"%++%seq(K) ##' m <- lvm() ##' regression(m,y=y,x=~u) <- 1 ##' regression(m,y=y,x=~s) <- seq(K)-1 ##' regression(m,y=y,x=~x) <- "b" ##' N <- 50 ##' d <- sim(m,N); d$z <- rbinom(N,1,0.5) ##' dd <- mets::fast.reshape(d); dd$num <- dd$num+3 ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend.formula=~factor(num),trend=TRUE,trend.col="darkblue") ##' dd$num <- dd$num+rnorm(nrow(dd),sd=0.5) ## Unbalance ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend=TRUE,trend.col="darkblue") ##' spaghetti(y~num,dd,id="id",lty=1,col=Col(1,.4), ##' trend.formula=~num+I(num^2),trend=TRUE,trend.col="darkblue") ##' } spaghetti <- function(formula,data,id="id",group=NULL, type="o",lty=1,pch=NA,col=1:10,alpha=0.3,lwd=1, level=0.95, trend.formula=formula,tau=NULL, trend.lty=1,trend.join=TRUE,trend.delta=0.2, trend=!is.null(tau),trend.col=col, trend.alpha=0.2,trend.lwd=3, trend.jitter=0, legend=NULL, by=NULL, xlab="Time",ylab="",add=FALSE,...) { ##spaghetti <- function(formula,data,id,type="l",lty=1,col=Col(1),trend=FALSE,trend.col="darkblue",trend.alpha=0.2,trend.lwd=3,xlab="Time",ylab="",...) { if (!lava.options()$cluster.index) stop("mets not available? Check 'lava.options()cluster.index'.") if (!is.null(by)) { if (is.character(by) && length(by==1)) { by <- data[,by] } else if (inherits(by,"formula")) { ##by <- model.matrix(update(by,~-1+.), model.frame(~.,data,na.action=na.pass)) by <- model.frame(by,data,na.action=na.pass) } cl <- match.call(expand.dots=TRUE) cl$by <- NULL datasets <- split(data,by) res <- c() for (d in datasets) { cl$data <- d res <- c(res, eval(cl,parent.frame())) } return(invisible(res)) } if (!is.null(group)) { if (is.character(group) && length(group==1)) { M <- data[,group] } else if (inherits(group,"formula")) { ##M <- model.matrix(update(group,~-1+.),data) M <- model.frame(group,data,na.action=na.pass) } else { M <- group } if (!add) plot(formula,data=data,xlab=xlab,ylab=ylab,...,type="n") dd <- split(data,M) K <- length(dd) if (length(type)0))) nn <- widenames[idx] ord <- order(char2num(unlist(lapply(nn,function(x) gsub(vname,"",x))))) idx[ord] } if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (length(x)==0) { data <- data[,c(id,y),drop=FALSE] wide <- mets::fast.reshape(data,id=id,varying=y,...) yidx <- Idx(y,names(wide)) Y <- wide[,yidx,drop=FALSE] X <- NULL matplot(t(Y),type=type,lty=lty,pch=pch,lwd=lwd,col=Col(col[1],alpha[1]),xlab=xlab,ylab=ylab,...) } else { data <- data[,c(id,x,y),drop=FALSE] wide <- mets::fast.reshape(data[order(data[,id],data[,x]),],id=id,varying=c(y,x),...) yidx <- Idx(y,names(wide)) xidx <- Idx(x,names(wide)) Y <- wide[,yidx,drop=FALSE] X <- wide[,xidx,drop=FALSE] matplot(t(X),t(Y),type=type,pch=pch,lty=lty,lwd=lwd,col=Col(col[1],alpha[1]),xlab=xlab,ylab=ylab,add=add,...) if (trend) { if (is.numeric(trend.formula)) { trend.formula <- sort(trend.formula) tf <- toformula(y,"1") res <- c() if (!is.null(tau)) { if (length(trend.alpha)0) confband(trend.formula,res[,j],line=FALSE,col=trend.col[j],lty=trend.lty[j],lwd=trend.lwd[j],delta=trend.delta,...) } } else { confband(trend.formula,res[,2],res[,3],res[,1],col=Col(trend.col,trend.alpha),lty=trend.lty,lwd=trend.lwd,polygon=trend.join,...) } } else { tf <- getoutcome(trend.formula) if (is.list(tf)) { trend.formula <- update(trend.formula,toformula(y,".")) } if (!is.null(tau)) { ##if (!require(quantreg)) stop("Install 'quantreg'") suppressWarnings(r1 <- quantreg::rq(trend.formula,data=data,tau=tau)) newdata <- data.frame(seq(min(X,na.rm=TRUE),max(X,na.rm=TRUE),length.out=100)) names(newdata) <- x pr <- predict(r1,newdata=newdata,interval="confidence",level=level) ##confband(xx,pr[,3],pr[,2],polygon=TRUE,col=Col(trend.col,trend.alpha),border=FALSE) for (i in seq_along(tau)) lines(newdata[,1],pr[,i],col=trend.col,lwd=trend.lwd,lty=trend.lty) } else { l1. <- lm(trend.formula,data) l1 <- estimate(l1.,id=data[,id],level=level) xy <- plotConf(l1.,vcov=vcov(l1),data=data,partres=FALSE,plot=FALSE,level=level,...) xx <- xy$x pr <- xy$predict$fit if (is.factor(xx)) { xx <- char2num(as.character(xx)) if (trend.jitter>0) xx <- jitter(xx,trend.jitter) confband(xx,pr[,3],pr[,2],pr[,1],col=trend.col,lwd=2) } else { confband(xx,pr[,3],pr[,2],polygon=TRUE,col=Col(trend.col,trend.alpha),border=FALSE) lines(xx,pr[,1],col=trend.col,lwd=trend.lwd,lty=trend.lty) } } } } } return(invisible(list(Y,X))) } lava/R/children.R0000644000176200001440000000712113520655354013322 0ustar liggesusers##' Generic method for memberships from object (e.g. a graph) ##' ##' @title Extract children or parent elements of object ##' @export ##' @aliases children parents ancestors descendants roots sinks adjMat edgeList ##' @param object Object ##' @param \dots Additional arguments ##' @author Klaus K. Holst "children" <- function(object,...) UseMethod("children") ##' @export "parents" <- function(object,...) UseMethod("parents") ##' @export "roots" <- function(object,...) UseMethod("roots") ##' @export "sinks" <- function(object,...) UseMethod("sinks") ##' @export "descendants" <- function(object,...) UseMethod("descendants") ##' @export "ancestors" <- function(object,...) UseMethod("ancestors") ##' @export "adjMat" <- function(object,...) UseMethod("adjMat") ##' @export "edgeList" <- function(object,...) UseMethod("edgeList") ##' @export adjMat.lvm <- function(object,...) t(object$M) ##' @export adjMat.lvmfit <- function(object,...) adjMat(Model(object),...) ##' @export edgeList.lvmfit <- function(object,...) edgeList(Model(object),...) ##' @export edgeList.lvm <- function(object,labels=FALSE,...) { edgelist <- data.frame(from=NULL,to=NULL) A <- adjMat(object) for (i in 1:nrow(A)) { ii <- which(A[,i]>0) if (length(ii)>0) edgelist <- rbind(edgelist,data.frame(from=ii,to=i)) } if (labels) edgelist <- as.data.frame(apply(edgelist,2,function(x) vars(object)[x])) edgelist } ##' @export parents.lvmfit <- function(object,...) parents(Model(object),...) ##' @export children.lvmfit <- function(object,...) children(Model(object),...) ##' @export descendants.lvmfit <- function(object,...) descendants(Model(object),...) ##' @export ancestors.lvmfit <- function(object,...) ancestors(Model(object),...) ##' @export roots.lvmfit <- function(object,...) roots(Model(object),...) ##' @export sinks.lvmfit <- function(object,...) sinks(Model(object),...) ##' @export parents.lvm <- function(object,var,...) { A <- index(object)$A if (missing(var)) { return(rownames(A)) } if (inherits(var,"formula")) var <- all.vars(var) res <- lapply(var, function(v) rownames(A)[A[,v]!=0]) res <- unique(unlist(res)) if (length(res)==0) res <- NULL res } ##' @export children.lvm <- function(object,var,...) { A <- index(object)$A if (missing(var)) { return(rownames(A)) } if (inherits(var,"formula")) var <- all.vars(var) res <- lapply(var, function(v) rownames(A)[A[v,]!=0]) res <- unique(unlist(res)) if (length(res)==0) res <- NULL res } ##' @export ancestors.lvm <- function(object,x,...) { if (inherits(x,"formula")) x <- all.vars(x) res <- c() left <- setdiff(vars(object),x) count <- 0 child <- x while (length(x)>0) { count <- count+1 x <- parents(object,child) child <- intersect(x,left) res <- union(res,child) left <- setdiff(left,child) } if (length(res)==0) res <- NULL return(res) } ##' @export descendants.lvm <- function(object,x,...) { if (inherits(x,"formula")) x <- all.vars(x) res <- c() left <- setdiff(vars(object),x) count <- 0 parent <- x while (length(x)>0) { count <- count+1 x <- children(object,parent) parent <- intersect(x,left) res <- union(res,parent) left <- setdiff(left,parent) } if (length(res)==0) res <- NULL return(res) } ##' @export roots.lvm <- function(object,...) { return(exogenous(object,index=FALSE,...)) } ##' @export sinks.lvm <- function(object,...) { return(endogenous(object,top=TRUE,...)) } lava/R/subgraph.R0000644000176200001440000000114513520655354013345 0ustar liggesuserssubgraph <- function(g,from,to,Tree=new("graphNEL",node=c(to,from),edgemode="directed"),...) { adjnodes <- graph::adj(g,from)[[1]] if (length(adjnodes)==0) return(Tree) for (v in adjnodes) { if (v==to) { Tree <- graph::addEdge(from, v, Tree) } re1 <- graph::acc(g,v)[[1]] ## Reachable nodes from v if ((to %in% names(re1)[re1>0])) { if (!(v %in% graph::nodes(Tree))) Tree <- graph::addNode(v,Tree) Tree <- graph::addEdge(from, v, Tree) Tree <- path(g,v,to,Tree) } } return(Tree) } lava/R/measurement.R0000644000176200001440000000115213520655354014055 0ustar liggesusers##' @export `measurement` <- function(x, ...) { M <- x$M latent.idx <- match(latent(x),vars(x)) obs.idx <- match(manifest(x),vars(x)) if (length(latent.idx)==0) return(NULL) measurementmodels <- c() for (i in seq_along(latent.idx)) { ii <- latent.idx[i] relation <- M[ii,obs.idx]==1 byNodes <- names(relation)[relation] newnodes <- c(latent(x)[i],byNodes) lvm1 <- subset(x,newnodes) measurementmodels <- c(measurementmodels, list(lvm1)) } measurementmodels } lava/R/multinomial.R0000644000176200001440000001643313520655354014072 0ustar liggesusers ##' Estimate probabilities in contingency table ##' ##' @title Estimate probabilities in contingency table ##' @aliases multinomial kappa.multinomial kappa.table gkgamma ##' @param x Formula (or matrix or data.frame with observations, 1 or 2 columns) ##' @param data Optional data.frame ##' @param marginal If TRUE the marginals are estimated ##' @param transform Optional transformation of parameters (e.g., logit) ##' @param vcov Calculate asymptotic variance (default TRUE) ##' @param iid Return iid decomposition (default TRUE) ##' @param ... Additional arguments to lower-level functions ##' @export ##' @examples ##' set.seed(1) ##' breaks <- c(-Inf,-1,0,Inf) ##' m <- lvm(); covariance(m,pairwise=TRUE) <- ~y1+y2+y3+y4 ##' d <- transform(sim(m,5e2), ##' z1=cut(y1,breaks=breaks), ##' z2=cut(y2,breaks=breaks), ##' z3=cut(y3,breaks=breaks), ##' z4=cut(y4,breaks=breaks)) ##' ##' multinomial(d[,5]) ##' (a1 <- multinomial(d[,5:6])) ##' (K1 <- kappa(a1)) ## Cohen's kappa ##' ##' K2 <- kappa(d[,7:8]) ##' ## Testing difference K1-K2: ##' estimate(merge(K1,K2,id=TRUE),diff) ##' ##' estimate(merge(K1,K2,id=FALSE),diff) ## Wrong std.err ignoring dependence ##' sqrt(vcov(K1)+vcov(K2)) ##' ##' ## Average of the two kappas: ##' estimate(merge(K1,K2,id=TRUE),function(x) mean(x)) ##' estimate(merge(K1,K2,id=FALSE),function(x) mean(x)) ## Independence ##' ##' ##' ## Goodman-Kruskal's gamma ##' m2 <- lvm(); covariance(m2) <- y1~y2 ##' breaks1 <- c(-Inf,-1,0,Inf) ##' breaks2 <- c(-Inf,0,Inf) ##' d2 <- transform(sim(m2,5e2), ##' z1=cut(y1,breaks=breaks1), ##' z2=cut(y2,breaks=breaks2)) ##' ##' (g1 <- gkgamma(d2[,3:4])) ##' ## same as ##' \dontrun{ ##' gkgamma(table(d2[,3:4])) ##' gkgamma(multinomial(d2[,3:4])) ##' } ##' ##' ##partial gamma ##' d2$x <- rbinom(nrow(d2),2,0.5) ##' gkgamma(z1~z2|x,data=d2) ##' @author Klaus K. Holst multinomial <- function(x,data=parent.frame(),marginal=FALSE,transform,vcov=TRUE,iid=TRUE,...) { formula <- NULL if (inherits(x,"formula")) { trm <- terms(x) if (length(attr(trm,"term.labels"))>1) { x <- update(x,as.formula(paste0(".~ interaction(", paste0(attr(trm,"term.labels"),collapse=","),")"))) trm <- terms(x) } formula <- x x <- as.matrix(model.frame(trm,data)) if (ncol(x)>1) x <- x[,c(seq(ncol(x)-1)+1,1),drop=FALSE] } else { trm <- NULL } if (!vcov) iid <- FALSE if (is.table(x) && iid) x <- lava::Expand(x) if (NCOL(x)==1) { if (!is.table(x)) { x <- as.factor(x) lev <- levels(x) k <- length(lev) n <- length(x) P <- table(x)/n } else { n <- sum(x) P <- x/n lev <- names(x) k <- length(lev) } if (iid) { iid <- matrix(0,n,k) for (i in seq(k)) { iid[,i] <- (1*(x==lev[i])-P[i])/n }; varcov <- crossprod(iid) } else { iid <- varcov <- NULL if (vcov) { varcov <- tcrossprod(cbind(P))/n diag(varcov) <- P*(1-P)/n } } coefs <- as.vector(P); names(coefs) <- paste0("p",seq(k)) res <- list(call=match.call(), coef=coefs,P=P,vcov=varcov,iid=iid,position=seq(k),levels=list(lev),data=x, terms=trm) class(res) <- "multinomial" return(res) } if (!is.table(x)) { if (NCOL(x)!=2L) stop("Matrix or data.frame with one or two columns expected") x <- as.data.frame(x) x[,1] <- as.factor(x[,1]) x[,2] <- as.factor(x[,2]) lev1 <- levels(x[,1]) lev2 <- levels(x[,2]) k1 <- length(lev1) k2 <- length(lev2) M <- table(x) n <- sum(M) } else { lev1 <- rownames(x) lev2 <- colnames(x) k1 <- length(lev1) k2 <- length(lev2) M <- x n <- sum(x) } Pos <- P <- M/n if (iid) { iid <- matrix(0,n,k1*k2) for (j in seq(k2)) { for (i in seq(k1)) { pos <- (j-1)*k1+i iid[,pos] <- (x[,1]==lev1[i])*(x[,2]==lev2[j])-P[i,j] Pos[i,j] <- pos } }; iid <- iid/n } else { iid <- varcov <- NULL } coefs <- as.vector(P); names(coefs) <- as.vector(outer(seq(k1),seq(k2),function(...) paste0("p",...))) position1 <- position2 <- NULL if (marginal) { p1 <- rowSums(P) p2 <- colSums(P) names(p1) <- paste0("p",seq(k1),".") names(p2) <- paste0("p",".",seq(k2)) coefs <- c(coefs,p1,p2) position1 <- length(P)+seq(k1) position2 <- length(P)+k1+seq(k2) if (!is.null(iid)) { iid1 <- apply(Pos,1,function(x) rowSums(iid[,x])) iid2 <- apply(Pos,2,function(x) rowSums(iid[,x])) iid <- cbind(iid,iid1,iid2) colnames(iid) <- names(coefs) } } if (!missing(transform) && !is.null(iid)) { f <- function(p) do.call(transform,list(p)) D <- diag(numDeriv::grad(f,coefs),ncol=length(coefs)) coefs <- f(coefs) iid <- iid%*%t(D) } if (vcov && !is.null(iid)) varcov <- crossprod(iid) res <- list(call=match.call(), formula=formula, coef=coefs,P=P,vcov=varcov,iid=iid, position=Pos, call=match.call(), levels=list(lev1,lev2), data=x, position1=position1,position2=position2, ## Position of marginals) terms=trm ) class(res) <- "multinomial" if (length(list(...))>0) { res <- structure(estimate(res,...),class=c("multinomial","estimate")) } res } ##' @export model.frame.multinomial <- function(formula,...) { formula$data } ##' @export iid.multinomial <- function(x,...) { x$iid } ##' @export coef.multinomial <- function(object,...) { object$coef } ##' @export vcov.multinomial <- function(object,...) { object$vcov } ##' @export predict.multinomial <- function(object,newdata,type=c("prob","map"),...) { if (missing(newdata) || is.null(newdata)) newdata <- object$data if (!is.null(object$formula) && is.data.frame(newdata)) { trm <- terms(object$formula) newdata <- model.frame(trm,newdata)[,-1] } px <- rowSums(object$P) idx <- match(trim(as.character(newdata)),trim(rownames(object$P))) pcond <- object$P for (i in seq(nrow(pcond))) pcond[i,] <- pcond[i,]/px[i] pr <- pcond[idx,,drop=FALSE] if (tolower(type[1])%in%c("map","class")) { pr <- colnames(pr)[apply(pr,1,which.max)] } return(pr) } ## logLik.multinomial <- function(object,...) { ## } ##' @export print.multinomial <- function(x,...) { cat("Call: "); print(x$call) cat("\nJoint probabilities:\n") print(x$P,quote=FALSE) if (length(dim(x$P))>1) { cat("\nConditional probabilities:\n") print(predict(x,newdata=rownames(x$P)),quote=FALSE) } cat("\n") print(estimate(NULL,coef=coef(x),vcov=vcov(x))) ## stderr <- diag(vcov(x))^.5 ## StdErr <- x$position ## StdErr[] <- stderr[StdErr] ## cat("\nStd.Err:\n") ## print(StdErr,quote=FALSE) ## cat("\nPosition:\n") ## print(x$position,quote=FALSE) } lava/R/manifest.R0000644000176200001440000000120213520655354013332 0ustar liggesusers##' @export `manifest` <- function(x,...) UseMethod("manifest") ##' @export `manifest.lvm` <- function(x,...) { if (length(vars(x))>0) setdiff(vars(x),latent(x)) else NULL } ##' @export `manifest.lvmfit` <- function(x,...) { manifest(Model(x)) } ##' @export manifest.list <- function(x,...) { manifestlist <- c() for (i in seq_along(x)) { manifestlist <- c(manifestlist, manifest(x[[i]])) } ## endolist <- unique(manifestlist) return(manifestlist) } ##' @export `manifest.multigroup` <- function(x,...) { manifest(Model(x)) } lava/R/backdoor.R0000644000176200001440000000570013520655354013317 0ustar liggesusers##' Backdoor criterion ##' ##' Check backdoor criterion of a lvm object ##' @param object lvm object ##' @param f formula. Conditioning, z, set can be given as y~x|z ##' @param cond Vector of variables to conditon on ##' @param ... Additional arguments to lower level functions ##' @param return.graph Return moral ancestral graph with z and effects from x removed ##' @examples ##' m <- lvm(y~c2,c2~c1,x~c1,m1~x,y~m1, v1~c3, x~c3,v1~y, ##' x~z1, z2~z1, z2~z3, y~z3+z2+g1+g2+g3) ##' ll <- backdoor(m, y~x) ##' backdoor(m, y~x|c1+z1+g1) ##' @export backdoor <- function(object, f, cond, ..., return.graph=FALSE) { y <- getoutcome(f, sep = "|") x <- attr(y, "x") if (length(x) > 1) { cond <- all.vars(x[[2]]) } x <- all.vars(x[[1]]) nod <- vars(object) des <- descendants(object, x) ch <- children(object, x) g0 <- cancel(object, toformula(x, ch)) if (!base::missing(cond)) { val <- dsep(g0, c(y, x), cond = cond) && !any(cond %in% des) if (return.graph) { res <- dsep(g0, c(y, x), cond = cond, return.graph=TRUE) attr(res,"result") <- val return(res) } return(val) } cset <- base::setdiff(nod, c(des, x, y)) ## possible conditioning set pp <- path(g0,from=x,to=y,all=TRUE) ## All backdoor paths M <- adjMat(g0) Collider <- function(vec) { M[vec[2],vec[1]] & M[vec[2],vec[3]] } blockList <- collideList <- c() for (i in seq_along(pp)) { p0 <- pp[[i]] blocks <- c() collide <- c() for (j in seq(length(p0)-2)) { if (Collider(p0[0:2 + j])) { collide <- c(collide,p0[1+j]) } else { blocks <- c(blocks,p0[1+j]) } } blockList <- c(blockList,list(blocks)) collideList <- c(collideList,list(collide)) } res <- list(blockList) ## Paths with colliders: col <- unlist(lapply(collideList,function(x) !is.null(x))) if (length(col)>0) col <- which(col) ## List of variables which are not on path between x and y: optional <- setdiff(cset,c(unlist(collideList),unlist(blockList))) callrecurs <- function(col,res=list()) { if (length(col)==0) return(res) blockList0 <- blockList blockList0[col] <- NULL blockList0 <- lapply(blockList0, function(x) setdiff(x,unlist(collideList[col]))) if (!any(unlist(lapply(blockList0,is.null)))) { res <- c(res, list(blockList0)) } for (i in seq_along(col)) { col0 <- col[-i] if (length(col0)>0) res <- callrecurs(col0,res) } return(res) } if (length(col)>0) res <- c(res,callrecurs(col)) ## Any element can be included from 'optional' For a given element ## in 'include' at least one element in each member of the list ## must be included in the conditioning set. return(list(optional=optional, include=res)) } lava/R/nonlinear.R0000644000176200001440000001133013520655354013514 0ustar liggesusers##' @export "nonlinear<-" <- function(object,...,value) UseMethod("nonlinear<-") ##' @export "nonlinear" <- function(object,...) UseMethod("nonlinear") naturalcubicspline <- function(x, knots=stats::median(x,na.rm=TRUE), boundary=range(x,na.rm=TRUE)) { ## C2 functions, piecewise cubic breaks <- c(boundary[1],knots,boundary[2]) K <- length(breaks) g <- function(x,tau) (x-tau)^3*((x-tau)>0) gg <- matrix(0,nrow=length(x),ncol=K) for (i in seq(K)) { gg[,i] <- g(x,breaks[i]) } B <- matrix(0,nrow=length(x),ncol=K-2) for (i in seq(K-2)) { B[,i] <- gg[,i] - (breaks[K]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K-1] + (breaks[K-1]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K] } cbind(x,B) } ncspred <- function(mu, var, knots=c(-5,0,5)) { breaks <- knots K <- length(breaks) v <- as.vector(var) k <- sqrt(v/(2*pi)) g <- function(x,tau) { x0 <- (x-tau) x2 <- x0^2 p0 <- 1-pnorm(-x0/sqrt(v)) # P(x>tau|...) k*(2*v + x2)*exp(-(x0/(sqrt(2*v)))^2) + x0*(x2+3*v)*p0 } n <- NROW(mu) gg <- matrix(0,nrow=n,ncol=K) for (i in seq(K)) { gg[,i] <- g(mu,breaks[i]) } B <- matrix(0,nrow=n,ncol=K-2) for (i in seq(K-2)) { B[,i] <- gg[,i] - (breaks[K]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K-1] + (breaks[K-1]-breaks[i])/(breaks[K]-breaks[K-1])*gg[,K] } cbind(mu,B) } ##' @export nonlinear.lvm <- function(object, to, from=NULL, type=c("quadratic"), knots=c(-5,0,5), names, ...) { if (missing(to)) { return(object$attributes$nonlinear) } if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) myvars <- all.vars(to) from <- setdiff(myvars,yy) to <- yy } if (length(to)>1) stop("Supply only one response variable") if (length(from)>1) stop("Supply only one explanatory variable") object <- cancel(object, c(from,to)) variance(object) <- to f <- pred <- NULL if (tolower(type)[1]%in%c("ncs","spline","naturalspline","cubicspline","natural cubic spline")) { if (is.null(knots)) stop("Need cut-points ('knots')") if (length(knots)<3) { warning("Supply at least three knots (one interior and boundaries)") ## Fall-back to linear type <- "linear" } if (missing(names)) names <- paste0(from,"_",seq(length(knots)-1)) f <- function(p,x) { B <- cbind(1,naturalcubicspline(x,knots=knots[-c(1,length(knots))],boundary=knots[c(1,length(knots))])) colnames(B) <- c("(Intercept)",names) as.vector(B%*%p) } pred <- function(mu,var,...) { B <- ncspred(mu,var,knots=knots) structure(B,dimnames=list(NULL,names)) } } if (tolower(type)[1]=="linear") { if (missing(names)) names <- from f <- function(p,x) p[1] + p[2]*x pred <- function(mu,var,...) { structure(cbind(mu[,1]),dimnames=list(NULL,names)) } } if (tolower(type)[1]=="quadratic") { if (missing(names)) names <- paste0(from,"_",1:2) f <- function(p,x) p[1] + p[2]*x + p[3]*(x*x) pred <- function(mu,var,...) { structure(cbind(mu[,1],mu[,1]^2+var[1]),dimnames=list(NULL,names)) } } if (tolower(type)[1]%in%c("piecewise","piecewise linear","linear")) { if (is.null(knots)) stop("Need cut-points ('knots')") } if (tolower(type)[1]%in%c("exp","exponential")) { if (missing(names)) names <- paste0(from,"_",1:2) f <- function(p,x) p[1] + p[2]*x + p[3]*exp(x) pred <- function(mu,var,...) { structure(cbind(mu[,1], exp(0.5*var[1] + mu[,1])),dimnames=list(NULL,names)) } } if (tolower(type)[1]%in%c("exp0")) { if (missing(names)) names <- paste0(from,"_",1) f <- function(p,x) p[1] + p[2]*exp(x) pred <- function(mu,var,...) { structure(cbind(exp(0.5*var[1] + mu[,1])),dimnames=list(NULL,names)) } } object$attributes$nonlinear[[to]] <- list(x=from, p=length(names)+1, newx=names, f=f, pred=pred, type=tolower(type[1])) return(object) } ##' @export nonlinear.lvmfit <- function(object, to, ...) { if (missing(to)) { return(Model(object)$attributes$nonlinear) } Model(object) <- nonlinear(Model(object),to=to,...) return(object) } ##' @export nonlinear.twostage.lvm <- function(object, ...) { return(object$nonlinear) } ##' @export nonlinear.lvmfit <- function(object, ...) { return(object$nonlinear) } ##' @export `nonlinear<-.lvm` <- function(object, ..., type="quadratic", value) { nonlinear(object,to=value,type=type,...) } lava/R/formula.R0000644000176200001440000000106513520655354013200 0ustar liggesusers##' @export formula.lvm <- function(x,char=FALSE,all=FALSE,...) { A <- index(x)$A res <- c() for (i in seq_len(ncol(A))) { if (all || !(colnames(A)[i]%in%c(index(x)$exogenous,parameter(x)) )) { f <- paste(colnames(A)[i],"~ 1") if (any(A[,i]!=0)) { f <- (paste(colnames(A)[i],"~",paste(colnames(A)[A[,i]!=0],collapse="+"))) } if (!char) f <- formula(f) res <- c(res, list(f)) } } return(res) } ##' @export formula.lvmfit <- formula.lvm lava/R/multipleinput.R0000644000176200001440000000152713520655354014451 0ustar liggesuserssimulate.multiple.inputs <- function(x,data,...) { minp <- x$attributes$multiple.inputs if (length(minp)>0) { for (i in seq_along(minp)) { outcome <- names(minp[i]) inp <- minp[[i]]$input fun <- minp[[i]]$fun data[,outcome] <- fun(x, data, inp) } } return(data) } addhook("simulate.multiple.inputs","sim.hooks") printhook.multiple.inputs <- function(x,...) { minp <- x$attributes$multiple.inputs if (length(minp)>0) { outcomes <- names(minp) for (i in seq_along(minp)) { cat(minp[[i]]$type, ":\n\n") st <- paste0(outcomes[i]," ~ ", paste0(minp[[i]]$input,collapse=" | ")) cat(" ", st, "\n") cat("\n") } } return(NULL) } addhook("printhook.multiple.inputs","print.hooks") lava/R/estimate.multigroup.R0000644000176200001440000006335213520655354015563 0ustar liggesusers###{{{ estimate.multigroup ##' @export `estimate.multigroup` <- function(x, control=list(), estimator=NULL, weights, weightsname, data2, id=NULL, messages=lava.options()$messages, quick=FALSE, param, cluster, ...) { cl <- match.call() Optim <- list( iter.max=lava.options()$iter.max, trace=ifelse(lava.options()$debug,3,0), gamma=lava.options()$gamma, ngamma=lava.options()$ngamma, backtrace=TRUE, gamma2=1, lambda=0.05, abs.tol=1e-9, epsilon=1e-10, delta=1e-10, S.tol=1e-6, stabil=FALSE, start=NULL, constrain=lava.options()$constrain, method=NULL, starterfun=startvalues0, information="E", meanstructure=TRUE, sparse=FALSE, lbound=1e-9, reindex=FALSE, tol=lava.options()$tol) if (!missing(param)) { oldparam <- lava.options()$param lava.options(param=param) on.exit(lava.options(param=oldparam)) } if (!missing(cluster)) id <- cluster defopt <- lava.options()[] defopt <- defopt[intersect(names(defopt),names(Optim))] Optim[names(defopt)] <- defopt if (length(control)>0) { Optim[names(control)] <- control } Debug("Start values...") if (!is.null(Optim$start) & length(Optim$start)==(x$npar+x$npar.mean)) { mystart <- Optim$start } else { if (messages>1) cat("Obtaining starting value...") if (is.null(control$starterfun) && lava.options()$param!="relative") Optim$starterfun <- startvalues0 mystart <- with(Optim, starter.multigroup(x,meanstructure=meanstructure,starterfun=starterfun,messages=messages,fix=FALSE)) if (!is.null(Optim$start)) { pname <- names(Optim$start) ppos <- parpos.multigroup(x,p=pname,mean=TRUE) if (any(!is.na(ppos))) mystart[ppos] <- Optim$start[na.omit(match(attributes(ppos)$name,pname))] } if (messages>1) cat("\n") } Debug(mystart) Debug("Constraints...") ## Setup optimization constraints lower <- rep(-Inf, x$npar); for (i in seq_len(x$ngroup)) { vpos <- sapply(x$parlist[[i]][variances(x$lvm[[i]],mean=FALSE)], function(y) char2num(substr(y,2,nchar(y)))) if (length(vpos)>0) lower[vpos] <- Optim$lbound } if (Optim$meanstructure) lower <- c(rep(-Inf,x$npar.mean), lower) if (any(Optim$constrain)) { if (length(Optim$constrain)!=length(lower)) constrained <- is.finite(lower) else constrained <- Optim$constrain constrained <- which(constrained) lower[] <- -Inf Optim$constrain <- TRUE mystart[constrained] <- log(mystart[constrained]) } if (!missing(weights)) { if (is.character(weights)) { stweights <- weights weights <- list() for (i in seq_along(x$data)) { newweights <- as.matrix(x$data[[i]][,stweights]) colnames(newweights) <- index(x$lvm[[i]])$endogenous[seq_len(ncol(newweights))] weights <- c(weights, list(newweights)) } } } else { weights <- NULL } if (!missing(data2)) { if (is.character(data2)) { stdata2 <- data2 data2 <- list() for (i in seq_along(x$data)) { newdata <- as.matrix(x$data[[i]][,stdata2,drop=FALSE]) dropcol <- apply(newdata,2,function(x) any(is.na(x))) newdata <- newdata[,!dropcol,drop=FALSE] colnames(newdata) <- index(x$lvm[[i]])$endogenous[seq_len(ncol(newdata))] data2 <- c(data2, list(newdata)) } } } else { data2 <- NULL } ### Run hooks (additional lava plugins) myhooks <- gethook() newweights <- list() newdata2 <- list() newoptim <- newestimator <- NULL for (f in myhooks) { for ( i in seq_len(x$ngroup)) { res <- do.call(f, list(x=x$lvm[[i]],data=x$data[[i]],weights=weights[[i]],data2=data2[[i]],estimator=estimator,optim=Optim)) if (!is.null(res$x)) x$lvm[[i]] <- res$x if (!is.null(res$data)) x$data[[i]] <- res$data if (!is.null(res$weights)) newweights <- c(newweights,list(res$weights)) if (!is.null(res$data2)) newdata2 <- c(newdata2,list(res$data2)) if (!is.null(res$optim)) newoptim <- res$optim if (!is.null(res$estimator)) newestimator <- res$estimator } if (!is.null(newestimator)) estimator <- newestimator if (!is.null(newoptim)) Optim <- newoptim if (!is.null(res$weights)) if (!any(unlist(lapply(newweights,is.null)))) { weights <- newweights } if (!is.null(res$data2)) if (!any(unlist(lapply(newdata2,is.null)))) { data2 <- newdata2 } } if (is.null(estimator)) { if (!missing(weights) && !is.null(weights)) { estimator <- "normal" } else estimator <- "gaussian" } checkestimator <- function(x,...) { ffname <- paste0(x,c("_objective","_gradient"),".lvm") exists(ffname[1])||exists(ffname[2]) } if (!checkestimator(estimator)) { ## Try down/up-case version estimator <- tolower(estimator) if (!checkestimator(estimator)) { estimator <- toupper(estimator) } } Method <- paste0(estimator, "_method", ".lvm") if (!exists(Method)) Method <- "nlminb1" else Method <- get(Method) if (is.null(Optim$method)) { Optim$method <- Method } ## Check for random slopes Xfix <- FALSE Xconstrain <- FALSE xfix <- list() for (i in seq_len(x$ngroup)) { x0 <- x$lvm[[i]] data0 <- x$data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x0,exo=TRUE))] xconstrain0 <- intersect(unlist(lapply(constrain(x0),function(z) attributes(z)$args)),manifest(x0)) xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) Xfix<-TRUE ## Yes, random slopes if (length(xconstrain0)>0) Xconstrain <- TRUE ## Yes, nonlinear regression } ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) XconstrStdOpt <- TRUE xconstrainM <- TRUE xconstrain <- c() if (Xconstrain) for (i in seq_len(x$ngroup)) { x0 <- x$lvm[[i]] data0 <- x$data[[i]] constr0 <- lapply(constrain(x0), function(z)(attributes(z)$args)) xconstrain0 <- intersect(unlist(constr0), manifest(x0)) xconstrain <- c(xconstrain, list(xconstrain0)) if (length(xconstrain0)>0) { constrainM0 <- names(constr0)%in%unlist(x0$mean) for (i in seq_len(length(constr0))) { if (!constrainM0[i]) { if (xconstrain0%in%constr0[[i]]) { xconstrainM <- FALSE } } } if (xconstrainM & ((is.null(control$method) || Optim$method=="nlminb0") & (lava.options()$test & estimator=="gaussian")) ) { XconstrStdOpt <- FALSE Optim$method <- "nlminb0" if (is.null(control$constrain)) control$constrain <- TRUE } } } ## Define objective function and first and second derivatives ObjectiveFun <- paste0(estimator, "_objective", ".lvm") GradFun <- paste0(estimator, "_gradient", ".lvm") if (!exists(ObjectiveFun) & !exists(GradFun)) stop("Unknown estimator.") InformationFun <- paste0(estimator, "_hessian", ".lvm") parord <- modelPar(x,seq_len(with(x,npar+npar.mean)))$p mymodel <- x parkeep <- c() myclass <- c("multigroupfit","lvmfit") myfix <- list() if (Xfix | (Xconstrain & XconstrStdOpt | !lava.options()$test)) { ## Model with random slopes: ############################################################# if (Xfix) { myclass <- c(myclass,"lvmfit.randomslope") for (k in seq_len(x$ngroup)) { x1 <- x0 <- x$lvm[[k]] data0 <- x$data[[k]] nrow <- length(vars(x0)) xpos <- lapply(xfix[[k]],function(y) which(regfix(x0)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix0 <- list(var=xfix[[k]], col=colpos, row=rowpos) myfix <- c(myfix, list(myfix0)) for (i in seq_along(myfix0$var)) for (j in seq_along(myfix0$col[[i]])) regfix(x0, from=vars(x0)[myfix0$row[[i]][j]],to=vars(x0)[myfix0$col[[i]][j]]) <- colMeans(data0[,myfix0$var[[i]],drop=FALSE],na.rm=TRUE) index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) x$lvm[[k]] <- x0 parkeep <- c(parkeep, parord[[k]][coef(x1,mean=TRUE,fix=FALSE)%in%coef(x0,mean=TRUE,fix=FALSE)]) } parkeep <- sort(unique(parkeep)) ## Alter start-values: if (length(mystart)!=length(parkeep)) mystart <- mystart[parkeep] lower <- lower[parkeep] x <- multigroup(x$lvm,x$data,fix=FALSE,exo.fix=FALSE) } parord <- modelPar(x,seq_along(mystart))$p mydata <- list() for (i in seq_len(x$ngroup)) { mydata <- c(mydata, list(as.matrix(x$data[[i]][,manifest(x$lvm[[i]])]))) } myObj <- function(theta) { if (Optim$constrain) theta[constrained] <- exp(theta[constrained]) pp <- modelPar(x,theta)$p res <- 0 for (k in seq_len(x$ngroup)) { x0 <- x$lvm[[k]] data0 <- x$data[[k]] if (Xfix) { xfix0 <- xfix[[k]] myfix0 <- myfix[[k]] } p0 <- pp[[k]] myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- data0[ii,xfix0[i]] } if (is.list(data2[[k]][ii,])) { res <- do.call(ObjectiveFun, list(x=x0, p=p0, data=data0[ii,manifest(x0),drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]])) } else { res <- do.call(ObjectiveFun, list(x=x0, p=p0, data=data0[ii,manifest(x0),drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,])) } return(res) } res <- res + sum(sapply(seq_len(nrow(mydata[[k]])),myfun)) } res } myGrad <- function(theta) { if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p D0 <- res <- rbind(numeric(length(mystart))) for (k in seq_len(x$ngroup)) { if (Xfix) { myfix0 <- myfix[[k]] } x0 <- x$lvm[[k]] myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- x$data[[k]][ii,xfix[[k]][i]] } if (is.list(data2[[k]][ii,])) { } else { val <- do.call(GradFun, list(x=x0, p=pp[[k]], data=mydata[[k]][ii,,drop=FALSE], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,])) } return(val) } D <- D0; D[parord[[k]]] <- rowSums(sapply(seq_len(nrow(mydata[[k]])),myfun)) res <- res+D } if (Optim$constrain) { res[constrained] <- res[constrained]*theta[constrained] } return(as.vector(res)) } myInformation <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p I0 <- res <- matrix(0,length(theta),length(theta)) grad <- grad0 <- numeric(length(theta)) for (k in seq_len(x$ngroup)) { x0 <- x$lvm[[k]] if (Xfix) { myfix0 <- myfix[[k]] } myfun <- function(ii) { if (Xfix) for (i in seq_along(myfix0$var)) { x0$fix[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- index(x0)$A[cbind(myfix0$row[[i]],myfix0$col[[i]])] <- x$data[[k]][ii,xfix[[k]][i]] } I <- I0 J <- do.call(InformationFun, list(x=x0, p=pp[[k]], data=mydata[[k]][ii,], n=1, S=NULL, weights=weights[[k]][ii,], data2=data2[[k]][ii,], type=Optim$information ) ) D <- grad0 if (!is.null(attributes(J)$grad)) { D[ parord[[k]] ] <- attributes(J)$grad attributes(I)$grad <- D } I[ parord[[k]], parord[[k]] ] <- J return(I) } L <- lapply(seq_len(nrow(x$data[[k]])),function(x) myfun(x)) if (!is.null(attributes(L[[1]])$grad)) grad <- grad + rowSums(matrix((unlist(lapply(L,function(x) attributes(x)$grad))),ncol=length(L))) res <- res + apply(array(unlist(L),dim=c(length(theta),length(theta),nrow(x$data[[k]]))),c(1,2),sum) } if (!is.null(attributes(L[[1]])$grad)) attributes(res)$grad <- grad return(res) } } else { ## Model without random slopes: ########################################################### ## Non-linear parameter constraints involving observed variables? (e.g. nonlinear regression) yconstrain <- c() iconstrain <- c() xconstrain <- c() for (j in seq_len(x$ngroup)) { x0 <- x$lvm[[j]] data0 <- x$data[[j]] xconstrain0 <- c() for (i in seq_len(length(constrain(x0)))) { z <- constrain(x0)[[i]] xx <- intersect(attributes(z)$args,manifest(x0)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(x0))[i] y <- names(which(unlist(lapply(intercept(x0),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain0 <- c(xconstrain0,list(el)) } } yconstrain0 <- unlist(lapply(xconstrain0,function(x) x$endo)) iconstrain0 <- unlist(lapply(xconstrain0,function(x) x$idx)) xconstrain <- c(xconstrain, list(xconstrain0)) yconstrain <- c(yconstrain, list(yconstrain0)) iconstrain <- c(iconstrain, list(iconstrain0)) } MkOffset <- function(pp,x,data,xconstrain,grad=FALSE) { if (length(xconstrain)>0) { Mu <- matrix(0,nrow(data),length(vars(x))); colnames(Mu) <- vars(x) M <- modelVar(x,p=pp,data=data) M$parval <- c(M$parval, x$mean[unlist(lapply(x$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) mu <- with(xconstrain[[i]], apply(data[,exo,drop=FALSE],1, function(x) func( unlist(c(pp,x))[myidx]))) Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(x)] return(offsets) } return(NULL) } myObj <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p res <- c() for (i in seq_len(x$ngroup)) { offset <- MkOffset(pp[[i]],x$lvm[[i]],x$data[[i]],xconstrain[[i]]) x0 <- x$lvm[[i]] data0 <- x$data[[i]][,index(x$lvm[[i]])$manifest,drop=FALSE] S <- x$samplestat[[i]]$S mu <- x$samplestat[[i]]$mu n <- x$samplestat[[i]]$n if (!is.null(offset)) { x0$constrain[iconstrain[[i]]] <- NULL pd <- procdata.lvm(x0,data0[,endogenous(x0),drop=FALSE]-offset) S[endogenous(x0),endogenous(x0)] <- pd$S mu[endogenous(x0)] <- pd$mu n <- pd$n x0$mean[yconstrain[[i]]] <- 0 } res <- c(res, do.call(ObjectiveFun, list(x=x0, p=pp[[i]], data=data0, S=S, mu=mu, n=n, weights=weights[[i]], data2=data2[[i]], offset=offset))) } sum(res) } if (!exists(GradFun)) { myGrad <- NULL } else { myGrad <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p D0 <- res <- rbind(numeric(length(theta))) for (i in seq_len(x$ngroup)) { repval <- with(x$samplestat[[i]], do.call(GradFun, list(x=x$lvm[[i]],p=pp[[i]], data=x$data[[i]][,index(x$lvm[[i]])$manifest,drop=FALSE], S=S,mu=mu,n=n, weights=weights[[i]], data2=data2[[i]]))) D <- D0; D[ parord[[i]] ] <- repval res <- res + D } if (Optim$constrain) { res[constrained] <- res[constrained]*theta[constrained] } return(as.vector(res)) } } myInformation <- function(theta) { theta0 <- theta if (Optim$constrain) { theta[constrained] <- exp(theta[constrained]) } pp <- modelPar(x,theta)$p I0 <- res <- matrix(0,length(theta),length(theta)) for (i in seq_len(x$ngroup)) { I <- I0; I[ parord[[i]], parord[[i]] ] <- with(x$samplestat[[i]], do.call(InformationFun, list(p=pp[[i]], x=x$lvm[[i]], data=x$data[[i]], S=S, mu=mu, n=n, weights=weights[[i]], data2=data2[[i]], type=Optim$information))) res <- res + I } D <- myGrad(theta0) if (Optim$constrain) { res[constrained,-constrained] <- apply(res[constrained,-constrained,drop=FALSE],2,function(x) x*theta[constrained]); res[-constrained,constrained] <- t(res[constrained,-constrained]) if (sum(constrained)==1) { res[constrained,constrained] <- res[constrained,constrained]*outer(theta[constrained],theta[constrained]) - (D[constrained]) } else { res[constrained,constrained] <- res[constrained,constrained]*outer(theta[constrained],theta[constrained]) - diag(D[constrained],nrow=length(constrained)) } } attributes(res)$grad <- D return(res) } } ############################################################## if (!exists(InformationFun)) myInformation <- NULL else if (is.null(get(InformationFun))) myInformation <- NULL if (is.null(get(GradFun))) myGrad <- NULL if (messages>1) cat("Optimizing objective function...\n") if (lava.options()$debug) { print(lower) print(Optim$constrain) print(Optim$method) } opt <- do.call(Optim$method, list(start=mystart, objective=myObj, gradient=myGrad, hessian=myInformation, lower=lower, control=Optim)) opt$estimate <- opt$par if (Optim$constrain) { opt$estimate[constrained] <- exp(opt$estimate[constrained]) } if (quick) return(list(opt=opt,vcov=NA)) if (is.null(myGrad) | !XconstrStdOpt ) { ## if (!requireNamespace("numDeriv")) { ## opt$gradient <- naiveGrad(myObj, opt$estimate) ## } else { opt$gradient <- numDeriv::grad(myObj, opt$par, method=lava.options()$Dmethod) } else { opt$gradient <- myGrad(opt$estimate) } if (!is.null(opt$convergence)) { if (opt$convergence!=0) warning("Lack of convergence. Increase number of iteration or change starting values.") } else if (!is.null(opt$gradient) && mean(opt$gradient)^2>1e-3) warning("Lack of convergence. Increase number of iteration or change starting values.") if (!XconstrStdOpt) { myInformation <- function(theta) information(x,p=theta) } else { if (is.null(myInformation)) { ## if (!requireNamespace("numDeriv")) stop("I do not know how to calculate the asymptotic variance of this estimator. ## For numerical approximation please install the library 'numDeriv'.") if (!is.null(myGrad) & XconstrStdOpt) myInformation <- function(theta) numDeriv::jacobian(myGrad, theta, method=lava.options()$Dmethod) else { myInformation <- function(theta) numDeriv::hessian(myObj, theta) } } } I <- myInformation(opt$estimate) asVar <- tryCatch(Inverse(I), error=function(e) matrix(NA, length(mystart), length(mystart))) res <- list(model=x, model0=mymodel, call=cl, opt=opt, meanstructure=Optim$meanstructure, vcov=asVar, estimator=estimator, weights=weights, data2=data2, cluster=id) class(res) <- myclass myhooks <- gethook("post.hooks") for (f in myhooks) { res0 <- do.call(f,list(x=res)) if (!is.null(res0)) res <- res0 } return(res) } ###}}} ###{{{ estimate.list estimate.lvmlist <- function(x, data, messages=lava.options()$messages, fix, missing=FALSE, ...) { if (base::missing(data)) { return(estimate(x[[1]],x[[2]],missing=missing,...)) } nm <- length(x) if (nm==1) { return(estimate(x[[1]],data,missing=missing,...)) } if (!all(unlist(lapply(x, function(y) inherits(y,"lvm"))))) stop ("Expected a list of 'lvm' objects.") if (is.data.frame(data)) { warning("Only one dataset - going for standard analysis on each submodel.") res <- c() for (i in seq_len(nm)) { res <- c(res, list(estimate(x[[i]],data=data,messages=0,missing=missing, ...))) } return(res) } if (nm!=length(data)) stop("Supply dataset for each model") Xfix <- FALSE xfix <- list() for (i in seq_along(x)) { data0 <- data[[i]] xfix0 <- colnames(data0)[(colnames(data0)%in%parlabels(x[[i]],exo=TRUE))] xfix <- c(xfix, list(xfix0)) if (length(xfix0)>0) { ## Yes, random slopes Xfix<-TRUE } } if (base::missing(fix)) { fix <- ifelse(Xfix,FALSE,TRUE) } mg <- multigroup(x,data,fix=fix,missing=missing,...) res <- estimate(mg,...) return(res) } ###}}} lava/R/exogenous.R0000644000176200001440000000461313520655354013551 0ustar liggesusers##' @export `exogenous` <- function(x,...) UseMethod("exogenous") ##' @export "exogenous<-" <- function(x,...,value) UseMethod("exogenous<-") ##' @export `exogenous<-.lvm` <- function(x, xfree=TRUE, ...,value) { if (inherits(value,"formula")) { value <- all.vars(value) } not.in <- !(value%in%vars(x)) if (any(not.in)) { addvar(x,reindex=FALSE) <- value[not.in] } xorg <- exogenous(x) x$exogenous <- value if (!is.null(value) & xfree) { notexo.idx <- xorg[which(!(xorg%in%value))] if (length(notexo.idx)>0) { ## & mom) { if (length(notexo.idx)>1) { covariance(x,notexo.idx,pairwise=TRUE,exo=TRUE) <- NA } covariance(x,notexo.idx,vars(x),exo=TRUE) <- NA intercept(x,notexo.idx) <- x$mean[notexo.idx] } } index(x) <- reindex(x) return(x) } ##' @export `exogenous.lvm` <- function(x,variable,latent=FALSE,index=TRUE,...) { if (!missing(variable)) { exogenous(x) <- variable return(x) } if (!index) { if (latent) { allvars <- vars(x) } else { allvars <- manifest(x) } M <- x$M res <- c() for (i in allvars) if (!any(M[,i]==1) & !any(is.na(x$cov[i,]))) # & any(M[i,]==1)) res <- c(res, i) return(res) } if (is.null(x$exogenous)) return(x$exogenous) if (all(!is.na(x$exogenous)) & !latent) { return(x$exogenous[x$exogenous%in%index(x)$manifest]) } if (!latent) return(index(x)$exogenous) return(exogenous(x,latent=latent,index=FALSE,...)) } ##' @export `exogenous.lvmfit` <- function(x,...) { exogenous(Model(x),...) } ##' @export exogenous.list <- function(x,...) { exolist <- c() endolist <- c() for (i in seq_along(x)) { exolist <- c(exolist, exogenous(x[[i]])) endolist <- c(endolist, endogenous(x[[i]])) } endolist <- unique(endolist) exolist <- unique(exolist) return(exolist[!(exolist%in%endolist)]) } ##' @export `exogenous.multigroup` <- function(x,...) { exogenous(Model(x)) } ##' @export `exogenous.lm` <- function(x,...) { attr(getoutcome(formula(x)),"x") } lava/R/confband.R0000644000176200001440000002260013520655354013303 0ustar liggesusers##' Add Confidence limits bar to plot ##' ##' @title Add Confidence limits bar to plot ##' @param x Position (x-coordinate if vert=TRUE, y-coordinate otherwise) ##' @param lower Lower limit (if NULL no limits is added, and only the ##' center is drawn (if not NULL)) ##' @param upper Upper limit ##' @param center Center point ##' @param line If FALSE do not add line between upper and lower bound ##' @param delta Length of limit bars ##' @param centermark Length of center bar ##' @param pch Center symbol (if missing a line is drawn) ##' @param blank If TRUE a white ball is plotted before the center is ##' added to the plot ##' @param vert If TRUE a vertical bar is plotted. Otherwise a horizontal ##' bar is used ##' @param polygon If TRUE polygons are added between 'lower' and 'upper'. ##' @param step Type of polygon (step-function or piecewise linear) ##' @param ... Additional low level arguments (e.g. col, lwd, lty,...) ##' @seealso \code{confband} ##' @export ##' @keywords iplot ##' @aliases confband forestplot ##' @author Klaus K. Holst ##' @examples ##' plot(0,0,type="n",xlab="",ylab="") ##' confband(0.5,-0.5,0.5,0,col="darkblue") ##' confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5) ##' ##' set.seed(1) ##' K <- 20 ##' est <- rnorm(K) ##' se <- runif(K,0.2,0.4) ##' x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2)) ##' x[c(3:4,10:12),] <- NA ##' rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse=""))) ##' rownames(x)[which(is.na(est))] <- "" ##' signif <- sign(x[,2])==sign(x[,3]) ##' forestplot(x,text.right=FALSE) ##' forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5) ##' forestplot(x,vert=TRUE,text=FALSE) ##' forestplot(x,vert=TRUE,text=FALSE,pch=NA) ##' ##forestplot(x,vert=TRUE,text.vert=FALSE) ##' ##forestplot(val,vert=TRUE,add=TRUE) ##' ##' z <- seq(10) ##' zu <- c(z[-1],10) ##' plot(z,type="n") ##' confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE) ##' confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE) ##' ##' z <- seq(0,1,length.out=100) ##' plot(z,z,type="n") ##' confband(z,z,z^2,polygon="TRUE",col=Col("darkblue")) ##' ##' set.seed(1) ##' k <- 10 ##' x <- seq(k) ##' est <- rnorm(k) ##' sd <- runif(k) ##' val <- cbind(x,est,est-sd,est+sd) ##' par(mfrow=c(1,2)) ##' plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="") ##' axis(2) ##' confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2) ##' plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="") ##' axis(1) ##' confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE) confband <- function(x,lower,upper,center=NULL,line=TRUE,delta=0.07, centermark=0.03, pch,blank=TRUE,vert=TRUE,polygon=FALSE,step=FALSE,...) { if (polygon) { if (step) { x1 <- rep(x,each=2)[-1] y1 <- rep(lower, each=2); y1 <- y1[-length(y1)] x2 <- rep(rev(x),each=2); x2 <- x2[-length(x2)] y2 <- rep(rev(upper),each=2)[-1] xx <- c(x1,x2) if (!is.null(center)) center <- rep(center,each=2)[-1] yy <- c(y1,y2) } else { xx <- c(x,rev(x)) yy <- c(lower,rev(upper)) } polygon(xx,yy,...) if (line && !is.null(center)) { mlines <- function(x,y,...,border,fillOddEven) lines(x,y,...) mlines(xx[seq(length(xx)/2)],center,...) } return(invisible(NULL)) } if (vert) { if (line && !missing(lower) && !missing(upper)) segments(x,lower,x,upper,...) if (!missing(lower)) segments(x-delta,lower,x+delta,lower,...) if (!missing(upper)) segments(x-delta,upper,x+delta,upper,...) if (!is.null(center)) { if (!missing(pch)) { if (blank) points(x,center,pch=16,col="white") points(x,center,pch=pch,...) } else { segments(x-centermark,center,x+centermark,center,...) } } } else { if (line && !missing(lower) && !missing(upper)) segments(lower,x,upper,x,...) if (!missing(lower)) segments(lower,x-delta,lower,x+delta,...) if (!missing(upper)) segments(upper,x-delta,upper,x+delta,...) if (!is.null(center)) { if (!missing(pch)) { if (blank) points(center,x,pch=16,col="white") points(center,x,pch=pch,...) } else { segments(center,x-centermark,center,x+centermark,...) } } } if (missing(lower)) lower <- NULL if (missing(upper)) upper <- NULL invisible(c(x,lower,upper,center)) } ##' @export forestplot <- function(x,lower,upper,line=0,labels, text=TRUE,text.right=text,text.fixed=NULL,text.vert=TRUE, adj=NULL, delta=0,axes=TRUE,cex=1,pch=15, xlab="",ylab="",sep,air, xlim,ylim,mar,box1=FALSE,box2=FALSE, vert=FALSE,cex.axis=1,cex.estimate=0.6, add=FALSE, reset.par=FALSE,...) { if (is.matrix(x)) { lower <- x[,2]; upper <- x[,3] if (ncol(x)>3) cex <- x[,4] x <- x[,1] } if (missing(mar) && !add) { if (vert) { mar <- c(8,4,1,1) } else { mar <- c(4,8,1,1) } } if (missing(labels)) labels <- names(x) K <- length(x) onelayout <- FALSE if (!add) { def.par <- par(no.readonly=TRUE) if (reset.par) on.exit(par(def.par)) if (text.right) { if (vert) { layout(rbind(1,2),heights=c(0.2,0.8)) } else { layout(cbind(2,1),widths=c(0.8,0.2)) } } else { onelayout <- TRUE layout(1) } } if (vert) { if (missing(ylim)) { if (missing(air)) air <- max(upper-lower,na.rm=TRUE)*0.4 ylim <- range(c(x,lower-air,upper+air),na.rm=TRUE) } if (missing(xlim)) xlim <- c(1,K) } else { if (missing(ylim)) ylim <- c(1,K) if (missing(xlim)) { if (missing(air)) air <- max(upper-lower,na.rm=TRUE)*0.4 xlim <- range(c(x,lower-air,upper+air),na.rm=TRUE) } } args0 <- list(...) formatCargsn <- names(formals(args(formatC)))[-1] nn <- setdiff(names(args0),formatCargsn) plotargs <- args0[nn] mainplot <- function(...) { par(mar=mar) ## bottom,left,top,right do.call("plot",c(list(x=0,type="n",axes=FALSE,xlab=xlab,ylab=ylab,xlim=xlim,ylim=ylim),plotargs)) if (box1) box() if (axes) { if (vert) { axis(2,cex.axis=cex.axis) } else { axis(1,cex.axis=cex.axis) } } } if (onelayout && !add) mainplot() if (text) { xpos <- upper if (text.right && !add) { if (vert) { par(mar=c(0,mar[2],0,mar[4])) } else { par(mar=c(mar[1],0,mar[3],0)) } plot.new() if (vert) { plot.window(xlim=xlim,ylim=c(0,0.5)) } else { plot.window(ylim=ylim,xlim=c(0,0.5)) } if (box2) box() xpos[] <- 0 } if (!is.null(text.fixed)) { if (is.logical(text.fixed) && text.fixed) text.fixed <- max(xpos) xpos <- rep(text.fixed,length.out=K) } nn <- intersect(names(args0),formatCargsn) args <- args0[nn] for (i in seq_len(K)) { st <- c(do.call(formatC,c(list(x=x[i]),args)), paste0("(", do.call(formatC,c(list(x=lower[i]),args)),"; ", do.call(formatC,c(list(x=upper[i]),args)),")")) if (text.vert) { st <- paste0(" ",st[1]," ",st[2],collapse="") st <- paste(" ", st) } if (vert) { if (!is.na(x[i])) { if (!text.vert) { if (text.right) xpos[i] <- xpos[i]+0.025 graphics::text(i,xpos[i],paste(st,collapse="\n"),xpd=TRUE, offset=3, cex=cex.estimate, adj=adj) } else { if (!is.na(x[i])) graphics::text(i,xpos[i],st,xpd=TRUE, srt=90, offset=0, pos=4, cex=cex.estimate, adj=adj) } } } else { if (!is.na(x[i])) graphics::text(xpos[i],i,st,xpd=TRUE,pos=4,cex=cex.estimate, adj=adj) } } } if (!onelayout && !add) mainplot() if (!is.null(line)) { if (vert) { abline(h=line,lty=2,col="lightgray") } else { abline(v=line,lty=2,col="lightgray") } } if (!missing(sep)) { if (vert) { abline(v=sep+.5,col="gray") } else { abline(h=sep+.5,col="gray") } } do.call("confband", c(list(x=seq(K),lower=lower,upper=upper,x, pch=pch,cex=cex,vert=vert,blank=FALSE), plotargs)) if (!add) { if (is.null(adj)) adj <- NA if (vert) { mtext(labels,1,at=seq(K),las=2,line=1,cex=cex.axis, adj=adj) } else { mtext(labels,2,at=seq(K),las=2,line=1,cex=cex.axis, adj=adj) } } } lava/R/trim.R0000644000176200001440000000077713520655354012517 0ustar liggesusers##' Trim tring of (leading/trailing/all) white spaces ##' @title Trim tring of (leading/trailing/all) white spaces ##' @param x String ##' @param all Trim all whitespaces? ##' @param \dots additional arguments to lower level functions ##' @author Klaus K. Holst ##' @export trim <- function(x,all=FALSE,...) { ## y <- gsub("^ .", "", x) # remove leading white space ## y <- gsub(". $", "", x) # remove trailing white space if (!all) return(gsub("^\\s+|\\s+$", "", x)) return(gsub("\\s","",x)) } lava/R/logo.R0000644000176200001440000000202413520655354012467 0ustar liggesusersgfilter <- function(x,sigma=1) { gridfn <- function(fn,width,height,center=TRUE) { jx <- seq_len(height) jy <- seq_len(width) if (center) { jx <- jx/height-0.5 jy <- jy/width-0.5 } outer(jx, jy, FUN=fn) } width <- ncol(x); height <- nrow(x) oscunits <- gridfn(function(x,y) ((-1)^(x+y)),height=height,width=width,center=FALSE) x0 <- x*oscunits ## translate origo to center of image X <- fft(x0) d <- gridfn(function(x,y) (x^2+y^2),height=height,width=width,center=TRUE) Gn <- exp(-2*(base::pi*sigma)^2*d) # frequency response H <- X*Gn res <- Re(fft(H,inverse=TRUE))/(width*height)*oscunits return(res) } ##' @export lava <- function(seed,w=128,h=w,bw=4,sigma=5000,bg=20000,numcol=128,col=grDevices::heat.colors(numcol),...) { if (!missing(seed)) set.seed(seed) x <- matrix(rnorm(w*h,bg,sigma),nrow=h, ncol=w) x0 <- gfilter(x,sigma=bw) y <- (x0-min(x0)+1)^1.2 opt <- graphics::par(mai=c(0,0,0,0)) graphics::image(y,axes=FALSE,col=col) graphics::par(opt) invisible(y) } lava/R/lava-package.R0000644000176200001440000001757013520655354014057 0ustar liggesusers ##' Estimation and simulation of latent variable models ##' ##' Framwork for estimating parameters and simulate data from Latent Variable ##' Models. ##' ##' @name lava-package ##' @importFrom graphics plot lines points abline points text layout ##' par plot.new plot.window title rect locator segments image ##' mtext box axis polygon matplot contour contour.default ##' identify rug curve ##' @importFrom grDevices xy.coords col2rgb rgb colors rainbow ##' topo.colors gray.colors palette colorRampPalette heat.colors ##' @importFrom utils stack combn read.csv getTxtProgressBar ##' setTxtProgressBar txtProgressBar head tail modifyList ##' getFromNamespace packageVersion write.table methods data ##' glob2rx ##' @importFrom stats density deriv effects lm family simulate vcov ##' var cov cor coef model.frame model.weights as.formula ##' model.matrix rnorm rchisq runif rlnorm pnorm qnorm na.omit AIC ##' terms logLik qt pt update update.formula confint approxfun ##' pchisq confint.default formula fft uniroot rbinom predict sd ##' addmargins residuals dnorm quantile qf cov2cor qchisq ##' get_all_vars p.adjust rpois rt rmultinom rgamma printCoefmat ##' glm nlminb na.pass na.omit ##' @importFrom survival is.Surv ##' @importFrom methods new as ##' @aliases lava-package lava ##' @docType package ##' @author Klaus K. Holst Maintainer: ##' @keywords package ##' @examples ##' ##' lava() ##' NULL ##' Longitudinal Bone Mineral Density Data ##' ##' Bone Mineral Density Data consisting of 112 girls randomized to receive ##' calcium og placebo. Longitudinal measurements of bone mineral density ##' (g/cm^2) measured approximately every 6th month in 3 years. ##' ##' ##' @name calcium ##' @docType data ##' @format A data.frame containing 560 (incomplete) observations. The 'person' ##' column defines the individual girls of the study with measurements at ##' visiting times 'visit', and age in years 'age' at the time of visit. The ##' bone mineral density variable is 'bmd' (g/cm^2). ##' @source Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. ##' @keywords datasets NULL ##' Longitudinal Bone Mineral Density Data (Wide format) ##' ##' Bone Mineral Density Data consisting of 112 girls randomized to receive ##' calcium og placebo. Longitudinal measurements of bone mineral density ##' (g/cm^2) measured approximately every 6th month in 3 years. ##' @name bmd ##' @docType data ##' @source Vonesh & Chinchilli (1997), Table 5.4.1 on page 228. ##' @format data.frame ##' @keywords datasets ##' @seealso calcium NULL ##' Simulated data ##' ##' Simulated data ##' @name brisa ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @name bmidata ##' @docType data ##' @format data.frame ##' @keywords datasets NULL ##' Hubble data ##' ##' Velocity (v) and distance (D) measures of 36 Type Ia super-novae from the Hubble ##' Space Telescope ##' @name hubble ##' @docType data ##' @format data.frame ##' @source Freedman, W. L., et al. 2001, AstroPhysicalJournal, 553, 47. ##' @keywords datasets NULL ##' Hubble data ##' ##' @name hubble2 ##' @seealso hubble ##' @docType data ##' @format data.frame ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @name indoorenv ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Missing data example ##' ##' Simulated data generated from model ##' \deqn{E(Y_i\mid X) = X, \quad cov(Y_1,Y_2\mid X)=0.5} ##' ##' The list contains four data sets ##' 1) Complete data ##' 2) MCAR ##' 3) MAR ##' 4) MNAR (missing mechanism depends on variable V correlated with Y1,Y2) ##' @examples ##' data(missingdata) ##' e0 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[1]]) ## No missing ##' e1 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]]) ## CC (MCAR) ##' e2 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[2]],missing=TRUE) ## MCAR ##' e3 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]]) ## CC (MAR) ##' e4 <- estimate(lvm(c(y1,y2)~b*x,y1~~y2),missingdata[[3]],missing=TRUE) ## MAR ##' @name missingdata ##' @docType data ##' @format list of data.frames ##' @source Simulated ##' @keywords datasets NULL ##' Example data (nonlinear model) ##' ##' @name nldata ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Example SEM data (nonlinear) ##' ##' Simulated data ##' @name nsem ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Example SEM data ##' ##' Simulated data ##' @name semdata ##' @docType data ##' @source Simulated ##' @format data.frame ##' @keywords datasets NULL ##' Serotonin data ##' ##' This simulated data mimics a PET imaging study where the 5-HT2A ##' receptor and serotonin transporter (SERT) binding potential has ##' been quantified into 8 different regions. The 5-HT2A ##' cortical regions are considered high-binding regions ## 'which are a priori known to yield quite similar and highly correlated ##' measurements. These measurements can be regarded as proxy measures of ##' the extra-cellular levels of serotonin in the brain ##' \tabular{rll}{ ##' day \tab numeric \tab Scan day of the year \cr ##' age \tab numeric \tab Age at baseline scan \cr ##' mem \tab numeric \tab Memory performance score \cr ##' depr \tab numeric \tab Depression (mild) status 500 days after baseline \cr ##' gene1 \tab numeric \tab Gene marker 1 (HTR2A) \cr ##' gene2 \tab numeric \tab Gene marker 2 (HTTTLPR) \cr ##' cau \tab numeric \tab SERT binding, Caudate Nucleus \cr ##' th \tab numeric \tab SERT binding, Thalamus \cr ##' put \tab numeric \tab SERT binding, Putamen \cr ##' mid \tab numeric \tab SERT binding, Midbrain \cr ##' aci \tab numeric \tab 5-HT2A binding, Anterior cingulate gyrus \cr ##' pci \tab numeric \tab 5-HT2A binding, Posterior cingulate gyrus \cr ##' sfc \tab numeric \tab 5-HT2A binding, Superior frontal cortex \cr ##' par \tab numeric \tab 5-HT2A binding, Parietal cortex \cr ##' } ##' @name serotonin ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Data ##' ##' Description ##' @seealso serotonin ##' @name serotonin2 ##' @docType data ##' @format data.frame ##' @source Simulated ##' @keywords datasets NULL ##' Twin menarche data ##' ##' Simulated data ##' \tabular{rll}{ ##' id \tab numeric \tab Twin-pair id \cr ##' zyg \tab character \tab Zygosity (MZ or DZ) \cr ##' twinnum \tab numeric \tab Twin number (1 or 2) \cr ##' agemena \tab numeric \tab Age at menarche (or censoring) \cr ##' status \tab logical \tab Censoring status (observed:=T,censored:=F) \cr ##' bw \tab numeric \tab Birth weight \cr ##' msmoke \tab numeric \tab Did mother smoke? (yes:=1,no:=0) \cr ##' } ##' @name twindata ##' @docType data ##' @format data.frame ##' @keywords datasets ##' @source Simulated NULL ##' For internal use ##' ##' @title For internal use ##' @name startvalues ##' @rdname internal ##' @author Klaus K. Holst ##' @keywords utilities ##' @export ##' @aliases ##' startvalues0 startvalues1 startvalues2 startvalues3 ##' starter.multigroup ##' addattr modelPar modelVar matrices pars pars.lvm ##' pars.lvmfit pars.glm score.glm procdata.lvmfit modelPar modelVar ##' matrices reorderdata graph2lvm igraph.lvm subgraph finalize ##' index.lvm index.lvmfit index reindex index<- ##' rmvn0 dmvn0 logit expit tigol ##' randomslope randomslope<- lisrel variances offdiags describecoef ##' parlabels rsq stdcoef CoefMat CoefMat.multigroupfit deriv updatelvm ##' checkmultigroup profci estimate.MAR missingModel Inverse Identical ##' gaussian_logLik.lvm addhook gethook multigroup Weights fixsome ##' parfix parfix<- merge IV parameter index index<- ##' Specials procformula getoutcome decomp.specials ##' na.pass0 NULL lava/R/contr.R0000644000176200001440000000235513520655354012663 0ustar liggesusers##' Create contrast matrix ##' ##' Create contrast matrix typically for use with 'estimate' (Wald tests). ##' @export ##' @param p index of non-zero entries (see example) ##' @param n Total number of parameters (if omitted the max number in p will be used) ##' @param diff If FALSE all non-zero entries are +1, otherwise the second non-zero element in each row will be -1. ##' @param ... Additional arguments to lower level functions ##' @aliases contr parsedesign ##' @examples ##' contr(2,n=5) ##' contr(as.list(2:4),n=5) ##' contr(list(1,2,4),n=5) ##' contr(c(2,3,4),n=5) ##' contr(list(c(1,3),c(2,4)),n=5) ##' contr(list(c(1,3),c(2,4),5)) ##' ##' parsedesign(c("aa","b","c"),"?","?",diff=c(FALSE,TRUE)) contr <- function(p,n,diff=TRUE,...) { if (missing(n)) n <- max(unlist(p)) if (is.character(p)) { return(parsedesign(n,p,...)) } if (is.list(p)) { return(Reduce(rbind,lapply(p, function(x) do.call(contr, list(x,n,diff[1L]))))) } if (is.character(n)) n <- length(n) if (!is.numeric(n)) { try(n <- length(coef(n)),silent=TRUE) } B <- matrix(0,ncol=n,nrow=max(1L,length(p)-1L)) B[,p[1]] <- 1L if (length(p)>1L) B[cbind(seq(nrow(B)),p[-1])] <- ifelse(diff[1L],-1,1) return(B) } lava/R/cluster.hook.R0000644000176200001440000000502113520655354014147 0ustar liggesuserscluster.post.hook <- function(x,...) { if (class(x)[1]=="multigroupfit") { if (is.null(x$cluster)) return(NULL) if (any(unlist(lapply(x$cluster,is.null)))) return(NULL) allclusters <- unlist(x$cluster) uclust <- unique(allclusters) K <- length(uclust) G <- x$model$ngroup S0 <- lapply(score(x,indiv=TRUE), function(x) { x[which(is.na(x))] <- 0; x }) S <- matrix(0,length(pars(x)),nrow=K) for (i in uclust) { for (j in seq_len(G)) { idx <- which(x$cluster[[j]]==i) if (length(idx)>0) S[i,] <- S[i,] + colSums(S0[[j]][idx,,drop=FALSE]) } } J <- crossprod(S) I <- information(x,type="hessian",...) iI <- Inverse(I) asVar <- iI%*%J%*%iI x$vcov <- asVar return(x) } ## lvmfit: if (!is.null(x$cluster)) { uclust <- unique(x$cluster) K <- length(uclust) S <- score(x,indiv=TRUE) #,...) I <- information(x,type="hessian") #,...) iI <- Inverse(I) S0 <- matrix(0,ncol=ncol(S),nrow=K) count <- 0 for (i in uclust) { count <- count+1 S0[count,] <- colSums(S[which(x$cluster==i),,drop=FALSE]) }; adj1 <- K/(K-1) ## p <- ncol(S) ## adj1 <- K/(K-p) ## Mancl & DeRouen, 2001 J <- adj1*crossprod(S0) col3 <- sqrt(diag(iI)); ## Naive se nn <- c("Estimate","Robust SE", "Naive SE", "P-value") asVar <- iI%*%J%*%iI } else { asVar <- x$vcov } diag(asVar)[diag(asVar)==0] <- NA mycoef <- x$opt$estimate x$vcov <- asVar SD <- sqrt(diag(asVar)) Z <- mycoef/SD pval <- 2*(pnorm(abs(Z),lower.tail=FALSE)) if (is.null(x$cluster)) { col3 <- Z nn <- c("Estimate","Std. Error", "Z-value", "P-value") } newcoef <- cbind(mycoef, SD, col3, pval); nparall <- index(x)$npar + ifelse(x$control$meanstructure, index(x)$npar.mean,0) if (!is.null(x$expar)) { nparall <- nparall+length(x$expar) } mycoef <- matrix(NA,nrow=nparall,ncol=4) mycoef[x$pp.idx,] <- newcoef colnames(mycoef) <- nn mynames <- c() if (x$control$meanstructure) { mynames <- vars(x)[index(x)$v1==1] } if (index(x)$npar>0) { mynames <- c(mynames,paste0("p",seq_len(index(x)$npar))) } if (!is.null(x$expar)) { mynames <- c(mynames,names(x$expar)) } rownames(mycoef) <- mynames x$coef <- mycoef return(x) } lava/R/modelPar.R0000644000176200001440000000556413520655354013306 0ustar liggesusers ##' @export `modelPar` <- function(x,p,...) UseMethod("modelPar") ###{{{ modelPar.lvmfit ##' @export modelPar.lvmfit <- function(x, p=pars(x), ...) modelPar(Model(x),p=p,...) ###}}} modelPar.lvmfit ###{{{ modelPar ##' @export modelPar.lvm <- function(x,p, ...) { npar <- index(x)$npar npar.mean <- index(x)$npar.mean if (length(p)!=npar & length(p)<(npar+npar.mean)) stop("Wrong dimension of parameter vector!") p2 <- NULL if (length(p)!=npar) { ## if meanstructure meanpar <- p[seq_len(npar.mean)] p. <- p if (length(meanpar)>0) { p. <- p[-seq_len(npar.mean)] } else meanpar <- NULL p <- p.[seq_len(npar)] if (npar>0) { p2 <- p.[-seq_len(npar)] } else p2 <- p. } else { meanpar <- NULL p2 <- NULL } return(list(p=p,meanpar=meanpar,p2=p2)) } ###}}} modelpar.lvm ###{{{ modelPar.multigroupfit ##' @export modelPar.multigroupfit <- function(x,p=pars(x),...) { modelPar(Model(x),p,...) } ###}}} ###{{{ modelPar.multigroup ##' @export modelPar.multigroup <- function(x,p, ...) { if (length(p)==x$npar) { pp <- lapply(x$parposN,function(z) p[z]) res <- list(p=pp, par=pp, mean=NULL) return(res) } ppos <- x$parposN pp <- lapply(ppos,function(z) p[z+x$npar.mean]) if (length(pp)==0) pp <- lapply(seq_len(x$ngroup),function(x) logical()) mm <- lapply(x$meanposN,function(x) p[x]) if (is.null(mm)) mm <- lapply(seq_len(x$ngroup),logical()) pm <- mm for (i in seq_len(length(pm))) pm[[i]] <- c(pm[[i]],pp[[i]]) res <- list(p=pm,par=pp,mean=mm) return(res) } ###}}} modelPar2.multigroup <- function(x,p, ...) { npar <- x$npar npar.mean <- x$npar.mean k <- x$ngroup if (length(p)!=npar & length(p)!=(npar+npar.mean)) stop("Wrong dimension of parameter vector!") if (length(p)!=npar) { ## if meanstructure meanpar <- p[seq_len(npar.mean)] p. <- p[-seq_len(npar.mean)] } else { meanpar <- NULL p. <- p } parlist <- list(); for (i in seq_len(k)) parlist[[i]] <- numeric(length(x$parlist[[i]])) if (!is.null(meanpar)) { meanlist <- list(); for (i in seq_len(k)) meanlist[[i]] <- numeric(length(x$meanlist[[i]])) } if (length(p.)>0) for (i in seq_along(p.)) { for (j in seq_len(k)) { idx <- match(paste0("p",i), x$parlist[[j]]) if (!is.na(idx)) parlist[[j]][idx] <- p.[i] if (!is.null(meanpar)) { midx <- match(paste0("p",i), x$meanlist[[j]]) if (!is.na(midx)) meanlist[[j]][midx] <- p.[i] } } } if (!is.null(meanpar)) { for (i in seq_along(meanpar)) { for (j in seq_len(k)) { idx <- match(paste0("m",i), x$meanlist[[j]]) if (!is.na(idx)) meanlist[[j]][idx] <- meanpar[i] } } } else { meanlist <- NULL } p0 <- parlist for (i in seq_along(p0)) p0[[i]] <- c(meanlist[[i]],parlist[[i]]) return(list(p=p0, par=parlist, mean=meanlist)) } lava/R/assoc.R0000644000176200001440000001434513520655354012650 0ustar liggesusersnormal.threshold <- function(object,p=coef(object),...) { M <- moments(object,p=p) ord <- ordinal(Model(object)) K <- attributes(ord)$K cK <- c(0,cumsum(K-1)) breaks.orig <- list() for (i in seq(K)) { breaks.orig <- c(breaks.orig,list(M$e[seq(K[i]-1)+cK[i]])) } breaks <- lapply(breaks.orig, ordreg_threshold) names(breaks) <- names(K) ii <- match(names(K),vars(object)) sigma <- M$Cfull[ii,ii] list(breaks=breaks,sigma=sigma,mean=M$v[ii],K=K) } prob.normal <- function(sigma,breaks,breaks2=breaks) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (ncol(sigma)!=2 || missing(breaks)) stop("Wrong input") P <- matrix(ncol=length(breaks2)-1, nrow=length(breaks)-1) for (i in seq(length(breaks)-1)) for (j in seq(length(breaks2)-1)) P[i,j] <- mets::pmvn(lower=c(breaks[i],breaks2[j]),upper=c(breaks[i+1],breaks2[j+1]),sigma=sigma) return(P) } assoc <- function(P,sigma,breaks,...) { if (missing(P)) P <- prob.normal(sigma,breaks,...) Agree <- sum(diag(P)) marg.row <- rowSums(P) marg.col <- colSums(P) Chance <- sum(marg.row*marg.col) kap <- (Agree-Chance)/(1-Chance) gam <- goodmankruskal_gamma(P)$gamma inf <- information_assoc(P) res <- c(list(kappa=kap,gamma=gam),inf) if (!missing(sigma)) res <- c(res,rho=sigma[1,2]) return(res) } ################################################## ### Risk comparison ################################################## ## or:= riskcomp(x,scale=odds) // OR ##' @export riskcomp <- function(x,...,scale,op="/",type=1,struct=FALSE) { val <- c(x,unlist(list(...))) if (!missing(scale)) val <- do.call(scale,list(val)) if (!struct && length(val)==2) { if (type==2) { return(do.call(op,list(val[2],val[1]))) } else if (type==1) { return(do.call(op,list(val[1],val[2]))) } return(c(do.call(op,list(val[2],val[1])), do.call(op,list(val[1],val[2])))) } outer(val,val,op) offdiag(outer(val,val,op) ,type=type) } ##' @export Ratio <- function(x,...) riskcomp(x,...,op="/") ##' @export Diff <- function(x,...) riskcomp(x,...,op="-") ################################################## ## Odds ratio ################################################## ##' @export odds <- function(x) x/(1-x) logor <- function(x) { c(log(prod(diag(x))/prod(revdiag(x))),sum(1/x)^.5) } ##' @export OR <- function(x,tabulate=FALSE,log=FALSE,...) { if (!inherits(x,c("multinomial","table"))) { val <- riskcomp(x,...,scale=odds) if (log) val <- base::log(val) return(val) } if (inherits(x,"multinomial")) { M <- x } else { M <- multinomial(x) } pos <- M$position if (ncol(pos)!=2 & ncol(pos)!=2) stop("Only for 2x2 tables") orfun <- function(p,...) { list(logOR=sum(log(p[diag(pos)]))-sum(log(p[revdiag(pos)]))) } estimate(M,orfun,back.transform=exp) } ################################################## ## Information theoretical measures ################################################## information_assoc <- function(P,base=exp(1),...) { P.row <- rowSums(P) P.col <- colSums(P) H.row <- H.col <- H <- 0 for (j in seq_along(P.col)) if (P.col[j]>0) H.col <- H.col - P.col[j]*log(P.col[j]+(P.col[j]==0),base=base) for (i in seq_along(P.row)) { if (P.row[i]>0) H.row <- H.row - P.row[i]*log(P.row[i]+(P.row[i]==0),base=base) for (j in seq_along(P.col)) { if (P[i,j]>0) H <- H - P[i,j]*log(P[i,j],base=base) } } I <- H.row+H.col-H return(list(MI=I,H=H,H.row=H.row,H.col=H.col, U.row=I/H.row,U.col=I/H.col,U.sym=2*I/(H.row+H.col))) } ##' @export information.data.frame <- function(x,...) { information(multinomial(x,marginal=TRUE),...) } ##' @export information.table <- function(x,...) { information(multinomial(x,marginal=TRUE),...) } ##' @export information.multinomial <- function(x,...) { estimate(x,function(p,object,...) { P <- object$position; P[] <- p[object$position] information_assoc(P)},...) } ################################################## ## Independence tests ################################################## independence <- function(x,...) { if (is.table(x) || is.data.frame(x) || is.matrix(x)) { x <- multinomial(x) } if (!inherits(x,"multinomial")) stop("Expected table, data.frame or multinomial object") if (length(x$levels)!=2) stop("Data from two categorical variables expected") f <- function(p) { P <- x$position; P[] <- p[x$position] n <- nrow(x$iid) k1 <- length(x$levels[[1]]) k2 <- length(x$levels[[2]]) A1 <- matrix(0,ncol=length(p),nrow=k1) for (i in seq(k1)) A1[i,x$position[i,]] <- 1 A2 <- matrix(0,ncol=length(p),nrow=k2) for (i in seq(k2)) A2[i,x$position[,i]] <- 1 P1 <- A1%*%p P2 <- A2%*%p I <- P1%*%t(P2) Q <- P-I # Q <- sum(n*P*(log(I[1,1])-P1 sum((P-I)^2) ##V <- sqrt(sum((P*n-I*n)^2/I/n) /(n*(min(k1,k2)-1))) V <- sqrt(sum((P-I)^2/I) / ((min(k1,k2)-1))) return(V) sum(n*Q^2/I)^0.25 return((sum((P-I)^2))^.5) ## V } ## M <- P*n ## O2 <- colSums(M) ## O1 <- rowSums(M) ## M[1,1]-O1[1]*O2[1]/200 ## M[2,2]-O1[2]*O2[2]/200 ## sum((M-I*n)^2/(I*n)) ## sum((P*n-I*n)^2/I/n) ## sum(Q) ## sum(Q^2) ## M <- P ## chisq.test(M,correct=FALSE) return(estimate(x,function(p) list(cramersV=f(p)),iid=TRUE,...)) e <- estimate(x,f,iid=TRUE,print=function(x,...) { cat("\tTest for independence\n\n") cat("Test statistc:\t ", formatC(x$coefmat[1]/x$coefmat[2]), "\nP-value:\t ", x$coefmat[5],"\n\n") print(estimate(x)) },...) return(list(p.value=e$coefmat[5])) ## Q <- sum((a$coefmat[1,1]/a$coefmat[1,2])) ## df <- nrow(a$coefmat) ## res <- list(##data.name=hypothesis, ## statistic = Q, parameter = df, ## p.value=pchisq(Q,df=1,lower.tail=FALSE), ## method = "Test for independence") ## class(res) <- "htest" ## res } ## independence(x) ## chisq.test(table(dd)) lava/R/labels.R0000644000176200001440000003224613520655354013002 0ustar liggesusers###{{{ labels ##' Define labels of graph ##' ##' Alters labels of nodes and edges in the graph of a latent variable model ##' ##' ##' @aliases labels<- labels labels<-.default labels.lvm labels.lvmfit ##' labels.graphNEL edgelabels edgelabels<- edgelabels<-.lvm nodecolor ##' nodecolor<- nodecolor<-.default ##' @author Klaus K. Holst ##' @export ##' @keywords graphs aplot ##' @examples ##' m <- lvm(c(y,v)~x+z) ##' regression(m) <- c(v,x)~z ##' labels(m) <- c(y=expression(psi), z=expression(zeta)) ##' nodecolor(m,~y+z+x,border=c("white","white","black"), ##' labcol="white", lwd=c(1,1,5), ##' lty=c(1,2)) <- c("orange","indianred","lightgreen") ##' edgelabels(m,y~z+x, cex=c(2,1.5), col=c("orange","black"),labcol="darkblue", ##' arrowhead=c("tee","dot"), ##' lwd=c(3,1)) <- expression(phi,rho) ##' edgelabels(m,c(v,x)~z, labcol="red", cex=0.8,arrowhead="none") <- 2 ##' if (interactive()) { ##' plot(m,addstyle=FALSE) ##' } ##' ##' m <- lvm(y~x) ##' labels(m) <- list(x="multiple\nlines") ##' if (interactive()) { ##' op <- par(mfrow=c(1,2)) ##' plot(m,plain=TRUE) ##' plot(m) ##' par(op) ##' ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' plot(e,type="sd") ##' } ##' @param object \code{lvm}-object. ##' @param value node label/edge label/color ##' @param to Formula specifying outcomes and predictors defining relevant ##' edges. ##' @param \dots Additional arguments (\code{lwd}, \code{cex}, \code{col}, ##' \code{labcol}), \code{border}. ##' @param var Formula or character vector specifying the nodes/variables to ##' alter. ##' @param border Colors of borders ##' @param labcol Text label colors ##' @param shape Shape of node ##' @param lwd Line width of border ##' @usage ##' \method{labels}{default}(object, ...) <- value ##' \method{edgelabels}{lvm}(object, to, ...) <- value ##' \method{nodecolor}{default}(object, var=vars(object), ##' border, labcol, shape, lwd, ...) <- value `labels<-` <- function(object,...,value) UseMethod("labels<-") ##' @export `labels<-.default` <- function(object,...,value) { labels(object,value) } ##' @export labels.graphNEL <- function(object,lab=NULL,...) { if (is.null(lab)) return(graph::nodeRenderInfo(object)$label) graph::nodeRenderInfo(object) <- list(label=lab) names(graph::nodeRenderInfo(object)$label) <- graph::nodes(object); return(object) } ##' @export labels.lvmfit <- function(object,lab=NULL,...) { if (is.null(lab)) return(object$noderender$label) object$noderender$label <- lab return(object) } ##' @export `labels.lvm` <- function(object,lab=NULL,...) { if (is.null(lab)) return(object$noderender$label) if (is.null(object$noderender$label)) object$noderender$label <- lab else object$noderender$label[names(lab)] <- lab return(object) } ###}}} labels ###{{{ edgelabels ##' @export "edgelabels<-.lvmfit" <- function(object,to,from,est=TRUE,edges=NULL,cex=1,...,value) { if (is.null(edges)) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- setdiff(all.vars(to),yy) to <- yy } edges <- paste(from,to,sep="~") } edges. <- paste0("\"", edges, "\"") fromto <- edge2pair(edges) val <- c() for (i in seq_along(edges)) { val <- c(val, formatC(effects(object,from=fromto[[i]][1],to=fromto[[i]][2],messages=0)$directef[[1]]) ) } if (est) mytext <- paste("c(", paste(paste0(edges.,"=expression(",as.character(value),"==\"",val,"\")"),collapse=","),")") else mytext <- paste("c(", paste(paste0(edges.,"=expression(",as.character(value),")"),collapse=","),")") graph::edgeRenderInfo(Graph(object))$label <- eval(parse(text=mytext)) graph::edgeRenderInfo(Graph(object))$cex[edges] <- cex return(object) } ##' @export edgelabels.lvmfit <- function(object,value,type,pthres,intercept=FALSE,format.fun=formatC,...) { if (!missing(value)) { edgelabels(object,...) <- value return(object) } if (missing(type)) return(graph::edgeRenderInfo(Graph(object))$label) Afix <- index(object)$A ## Matrix with fixed parameters and ones where parameters are free ##Pfix <- index(object)$P ## Matrix with fixed covariance parameters and ones where param ##mfix <- index(object)$v0 npar.mean <- index(object)$npar.mean Par <- object$coef mpar <- c() if (npar.mean>0) { mpar <- do.call(format.fun,list(Par[seq_len(npar.mean)])) Par <- Par[-seq_len(npar.mean),,drop=FALSE] } Par <- switch(type, sd = paste0(do.call(format.fun,list(Par[,1,drop=FALSE])), " (", do.call(format.fun,list(Par[,2,drop=FALSE])), ")"), est = do.call(format.fun,list(Par[,1,drop=FALSE])), pval = do.call(format.fun,list(Par[,4,drop=FALSE])), name = rownames(Par), none = "" ) AP <- matrices(Model(object), Par,mpar) ## Ignore expar A <- AP$A; P <- AP$P P[exogenous(object),exogenous(object)] <- NA gr <- finalize(Model(object), ...) Anz <- A; Anz[Afix==0] <- NA gr <- edgelabels(gr, lab=Anz) Pnz <- P; Pnz[Model(object)$cov==0] <- NA if (intercept) { idx <- which(!is.na(diag(Pnz))) diag(Pnz)[idx] <- paste(paste0("[",AP$v[idx],"]"),diag(Pnz)[idx],sep="\n") } gr <- edgelabels(gr, lab=Pnz, expr=!intercept) Graph(object) <- gr return(object) } ##' @export `edgelabels` <- function(object, ...) UseMethod("edgelabels") ##' @export `edgelabels<-` <- function(object,...,value) UseMethod("edgelabels<-") ##' @export `edgelabels<-.lvm` <- function(object,to,...,value) { edgelabels(object,to=to, lab=value,...) } ##' @export `edgelabels<-.graphNEL` <- function(object,...,value) { edgelabels(object,lab=value,...) } ##' @export `edgelabels.graphNEL` <- function(object, lab=NULL, to=NULL, from=NULL, cex=1.5, lwd=1, lty=1, col="black", labcol="black", arrowhead="closed", expr=TRUE, debug=FALSE,...) { if (is.null(lab)) { return(graph::edgeRenderInfo(object)$label) } if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- all.vars(to[[3]])##setdiff(all.vars(to),yy) if (length(from)==0) from <- yy to <- yy } M <- as(object, Class="matrix") nodes <- graph::nodes(object) if (is.null(graph::edgeRenderInfo(object)$label)) graph::edgeRenderInfo(object)$label <- expression() if (!is.null(lab)) { if (!is.null(from) & !is.null(to)) { estr <- paste0("\"",from,"~",to,"\"") estr2 <- paste0(from,"~",to) if (length(lab)!=length(estr2)) lab <- rep(lab,length(estr2)) if (length(col)!=length(estr2)) col <- rep(col,length(estr2)) if (length(cex)!=length(estr2)) cex <- rep(cex,length(estr2)) if (length(lwd)!=length(estr2)) lwd <- rep(lwd,length(estr2)) if (length(lty)!=length(estr2)) lty <- rep(lty,length(estr2)) if (length(arrowhead)!=length(estr2)) arrowhead <- rep(arrowhead,length(estr2)) if (length(labcol)!=length(estr2)) labcol <- rep(labcol,length(estr2)) curedges <- names(graph::edgeRenderInfo(object)$label) Debug(estr,debug) estr2.idx <- which(estr2%in%curedges) newstr.idx <- setdiff(seq_along(estr2),estr2.idx) newstr <- estr2[newstr.idx] estr2 <- estr2[estr2.idx] if (length(estr2)>0) { if (!is.null(lab)) graph::edgeRenderInfo(object)$label[estr2] <- lab[estr2.idx] if (!is.null(cex)) graph::edgeRenderInfo(object)$cex[estr2] <- cex[estr2.idx] if (!is.null(col)) graph::edgeRenderInfo(object)$col[estr2] <- col[estr2.idx] if (!is.null(lwd)) graph::edgeRenderInfo(object)$lwd[estr2] <- lwd[estr2.idx] if (!is.null(lty)) graph::edgeRenderInfo(object)$lty[estr2] <- lty[estr2.idx] if (!is.null(labcol)) graph::edgeRenderInfo(object)$textCol[estr2] <- labcol[estr2.idx] if (!is.null(arrowhead)) graph::edgeRenderInfo(object)$arrowhead[estr2] <- arrowhead[estr2.idx] } if (length(newstr)>0) { if (!is.null(lab)) graph::edgeDataDefaults(object)$futureinfo$label[newstr] <- lab[newstr.idx] if (!is.null(cex)) graph::edgeDataDefaults(object)$futureinfo$cex[newstr] <- cex[newstr.idx] if (!is.null(col)) graph::edgeDataDefaults(object)$futureinfo$col[newstr] <- col[newstr.idx] if (!is.null(lwd)) graph::edgeDataDefaults(object)$futureinfo$lwd[newstr] <- lwd[newstr.idx] if (!is.null(lty)) graph::edgeDataDefaults(object)$futureinfo$lty[newstr] <- lty[newstr.idx] if (!is.null(labcol)) graph::edgeDataDefaults(object)$futureinfo$textCol[newstr] <- labcol[newstr.idx] if (!is.null(arrowhead)) graph::edgeDataDefaults(object)$futureinfo$arrowhead[newstr] <- arrowhead[newstr.idx] } return(object) } ## Used by "edgelabels.lvmfit" for (r in seq_len(nrow(M))) for (s in seq_len(ncol(M))) { if (M[r,s]!=0 & !is.na(lab[r,s])) { estr <- paste0("\"",nodes[r],"~",nodes[s],"\"") estr2 <- paste0(nodes[r],"~",nodes[s]) Debug(estr, debug) if (expr) st <- eval(parse(text=paste0("expression(",lab[r,s],")"))) else st <- lab[r,s] graph::edgeRenderInfo(object)$label[estr2] <- st } } } return(object) } ##' @export `edgelabels.lvm` <- function(object, lab=NULL, to=NULL, from=NULL, cex=1.5, lwd=1, lty=1, col="black", labcol="black", arrowhead="closed", expr=TRUE, debug=FALSE,...) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) from <- all.vars(to[[3]]) if (length(from)==0) from <- yy to <- yy } if (is.null(lab)) { res <- c(object$edgerender$label,object$edgerender$futureinfo$label) if (!is.null(to) && !is.null(from)) { estr <- apply(Expand(from,to),1,function(x) paste0(x,collapse="~")) res <- res[estr] } return(res) } M <- object$M nodes <- colnames(M) if (is.null(object$edgerender$label)) object$edgerender$label <- expression() if (!is.null(lab)) { if (!is.null(from) & !is.null(to)) { estr <- paste0("\"",from,"~",to,"\"") estr2 <- paste0(from,"~",to) if (length(lab)!=length(estr2)) lab <- rep(lab,length(estr2)) if (length(col)!=length(estr2)) col <- rep(col,length(estr2)) if (length(cex)!=length(estr2)) cex <- rep(cex,length(estr2)) if (length(lwd)!=length(estr2)) lwd <- rep(lwd,length(estr2)) if (length(lty)!=length(estr2)) lty <- rep(lty,length(estr2)) if (length(labcol)!=length(estr2)) labcol <- rep(labcol,length(estr2)) if (length(arrowhead)!=length(estr2)) arrowhead <- rep(arrowhead,length(estr2)) curedges <- names(object$edgerender$label) Debug(estr,debug) estr2.idx <- which(estr2%in%curedges) newstr.idx <- setdiff(seq_along(estr2),estr2.idx) newstr <- estr2[newstr.idx] estr2 <- estr2[estr2.idx] if (length(estr2)>0) { if (!is.null(lab)) object$edgerenderlabel[estr2] <- lab[estr2.idx] if (!is.null(cex)) object$edgerender$cex[estr2] <- cex[estr2.idx] if (!is.null(col)) object$edgerender$col[estr2] <- col[estr2.idx] if (!is.null(lwd)) object$edgerender$lwd[estr2] <- lwd[estr2.idx] if (!is.null(lty)) object$edgerender$lty[estr2] <- lty[estr2.idx] if (!is.null(labcol)) object$edgerender$textCol[estr2] <- labcol[estr2.idx] if (!is.null(arrowhead)) object$edgerender$arrowhead[estr2] <- arrowhead[estr2.idx] } if (length(newstr)>0) { if (!is.null(lab)) object$edgerender$futureinfo$label[newstr] <- lab[newstr.idx] if (!is.null(cex)) object$edgerender$futureinfo$cex[newstr] <- cex[newstr.idx] if (!is.null(col)) object$edgerender$futureinfo$col[newstr] <- col[newstr.idx] if (!is.null(lwd)) object$edgerender$futureinfo$lwd[newstr] <- lwd[newstr.idx] if (!is.null(lty)) object$edgerender$futureinfo$lty[newstr] <- lty[newstr.idx] if (!is.null(labcol)) object$edgerender$futureinfo$textCol[newstr] <- labcol[newstr.idx] if (!is.null(arrowhead)) object$edgerender$futureinfo$arrowhead[newstr] <- arrowhead[newstr.idx] } return(object) } ## Used by "edgelabels.lvmfit" for (r in seq_len(nrow(M))) for (s in seq_len(ncol(M))) { if (M[r,s]!=0 & !is.na(lab[r,s])) { estr <- paste0("\"",nodes[r],"~",nodes[s],"\"") estr2 <- paste0(nodes[r],"~",nodes[s]) Debug(estr, debug) if (expr) st <- eval(parse(text=paste0("expression(",lab[r,s],")"))) else st <- lab[r,s] object$edgerender$label[estr2] <- st } } } return(object) } ###}}} edgelabels lava/R/NA2x.R0000644000176200001440000000110213520655354012273 0ustar liggesusers##' Convert to/from NA ##' ##' Convert vector to/from NA ##' @aliases NA2x x2NA ##' @param s The input vector (of arbitrary class) ##' @param x The elements to transform into \code{NA} resp. what to transform ##' \code{NA} into. ##' @return A vector with same dimension and class as \code{s}. ##' @author Klaus K. Holst ##' @keywords manip ##' @examples##' ##' x2NA(1:10, 1:5) ##' NA2x(x2NA(c(1:10),5),5)##' ##' @export NA2x <- function(s,x=0) { sapply(s, function(y) ifelse(is.na(y),x,y) ) } ##' @export x2NA <- function(s,x=0) { sapply(s, function(y) ifelse(y%in%x,NA,y) ) } lava/R/csplit.R0000644000176200001440000000321013520655354013023 0ustar liggesusers##' Split data into folds ##' ##' @title Split data into folds ##' @param x Data or integer (size) ##' @param p Number of folds, or if a number between 0 and 1 is given two folds of size p and (1-p) will be returned ##' @param replace With or with-out replacement ##' @param return.index If TRUE index of folds are returned otherwise the actual data splits are returned (default) ##' @param k (Optional, only used when p=NULL) number of folds without shuffling ##' @param ... additional arguments to lower-level functions ##' @export ##' @aliases csplit foldr ##' @examples ##' foldr(5,2,rep=2) ##' csplit(10,3) ##' csplit(iris[1:10,]) ## Split in two sets 1:(n/2) and (n/2+1):n ##' csplit(iris[1:10,],0.5) ##' @author Klaus K. Holst csplit <- function(x,p=NULL,replace=FALSE,return.index=FALSE,k=2,...) { if (length(x)==1 & is.numeric(x)) x <- seq(x) N <- NROW(x) if (is.null(p)) { ## K <- base::round(N/k) idx <- split(seq(N),sort(rep(seq(k),length.out=N,each=K))) } else { if (p<1) { ## two folds (size N*p and N*(1-p)) idx1 <- base::sample(N,base::round(p*N),replace=replace) idx <- list(idx1, base::sample(setdiff(seq(N),idx1),replace=replace)) } else { ## Number of folds (equal size) idx <- split(sample(seq(N)), rep(seq(p), length=N)) } } if (return.index) return(idx) if (!is.vector(x)) { return(lapply(idx,function(ii) x[ii,,drop=FALSE])) } return(lapply(idx,function(ii) x[ii])) } ##' @export foldr <- function(n,K=5,rep=10) { replicate(rep,split(sample(seq(n)), rep(seq(K), length=n)),simplify=FALSE) } lava/R/variances.R0000644000176200001440000000116113520655354013503 0ustar liggesusers### Return position of variance elements in the parameter vector (without mean parameters) ### Optimization constraints are needed on these parameters ##' @export variances <- function(x,mean=FALSE) { ## if (is.null(x$parpos)) ## x$parpos <- parpos(x) x$parpos <- parpos(Model(x),mean=TRUE) res <- diag(x$parpos$P)[which(diag(index(x)$P0)==1)] if (!mean) { return(res - index(x)$npar.mean) } return(res) } ## And the off-diagonal (covariance) parameters ##' @export offdiags <- function(x,mean=FALSE) { parpos <- parpos(x,mean=mean) pp <- parpos$P pp[lower.tri(pp)][(index(x)$P0)[lower.tri(pp)]==1] } lava/R/logLik.R0000644000176200001440000002725713520655354012767 0ustar liggesusers###{{{ logLik.lvm ##' @export logLik.lvm <- function(object,p,data,model="gaussian",indiv=FALSE,S,mu,n,debug=FALSE,weights=NULL,data2=NULL,...) { cl <- match.call() xfix <- colnames(data)[(colnames(data)%in%parlabels(object,exo=TRUE))] constr <- lapply(constrain(object), function(z)(attributes(z)$args)) xconstrain <- intersect(unlist(constr), manifest(object)) xconstrainM <- TRUE if (length(xconstrain)>0) { constrainM <- names(constr)%in%unlist(object$mean) for (i in seq_len(length(constr))) { if (!constrainM[i]) { if (any(constr[[i]]%in%xconstrain)) xconstrainM <- FALSE } } } Debug(xfix,debug) if (missing(n)) { n <- nrow(data) if (is.null(n)) n <- data$n } lname <- paste0(model,"_logLik.lvm") logLikFun <- get(lname) if (length(xfix)>0 | (length(xconstrain)>0 & !xconstrainM & !lava.options()$test & model!="gaussian")) { ##### Random slopes! x0 <- object if (length(xfix)>0) { Debug("random slopes...",debug) nrow <- length(vars(object)) xpos <- lapply(xfix,function(y) which(regfix(object)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) { regfix(x0, from=vars(x0)[myfix$row[[i]][j]],to=vars(x0)[myfix$col[[i]][j]]) <- data[1,myfix$var[[i]]] } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } return(logLikFun(x0,data=data[ii,,drop=FALSE], p=p,weights=weights[ii,,drop=FALSE],data2=data2[ii,,drop=FALSE], model=model,debug=debug,indiv=indiv,...)) } loglik <- sapply(seq_len(nrow(data)),myfun) if (!indiv) { loglik <- sum(loglik) n <- nrow(data) attr(loglik, "nall") <- n attr(loglik, "nobs") <- n attr(loglik, "df") <- length(p) class(loglik) <- "logLik" } return(loglik) } if (xconstrainM) { xconstrain <- c() for (i in seq_len(length(constrain(object)))) { z <- constrain(object)[[i]] xx <- intersect(attributes(z)$args,manifest(object)) if (length(xx)>0) { warg <- setdiff(attributes(z)$args,xx) wargidx <- which(attributes(z)$args%in%warg) exoidx <- which(attributes(z)$args%in%xx) parname <- names(constrain(object))[i] y <- names(which(unlist(lapply(intercept(object),function(x) x==parname)))) el <- list(i,y,parname,xx,exoidx,warg,wargidx,z) names(el) <- c("idx","endo","parname","exo","exoidx","warg","wargidx","func") xconstrain <- c(xconstrain,list(el)) } } if (length(xconstrain)>0) { yconstrain <- unlist(lapply(xconstrain,function(x) x$endo)) iconstrain <- unlist(lapply(xconstrain,function(x) x$idx)) Mu <- matrix(0,nrow(data),length(vars(object))); colnames(Mu) <- vars(object) M <- modelVar(object,p=p,data=data) M$parval <- c(M$parval, object$mean[unlist(lapply(object$mean,is.numeric))]) for (i in seq_len(length(xconstrain))) { pp <- unlist(M$parval[xconstrain[[i]]$warg]); myidx <- with(xconstrain[[i]],order(c(wargidx,exoidx))) mu <- with(xconstrain[[i]], apply(data[,exo,drop=FALSE],1, function(x) { func(unlist(c(pp,x))[myidx]) })) Mu[,xconstrain[[i]]$endo] <- mu } offsets <- Mu%*%t(M$IAi)[,endogenous(object),drop=FALSE] object$constrain[iconstrain] <- NULL object$mean[yconstrain] <- 0 loglik <- do.call(lname, c(list(object=object,p=p,data=data,indiv=indiv,weights=weights,data2=data2,offset=offsets),list(...))) } else { cl[[1]] <- logLikFun loglik <- eval.parent(cl) } } else { loglik <- 0 if (length(xconstrain)>0 && NROW(data)>1) { for (ii in seq(nrow(data))) { cl$data <- data[ii,] cl$weights <- weights[ii,] cl$data2 <- data2[ii,] loglik <- loglik+eval.parent(cl) } } else { cl[[1]] <- logLikFun loglik <- eval.parent(cl) } } if (is.null(attr(loglik,"nall"))) attr(loglik, "nall") <- n if (is.null(attr(loglik,"nobs"))) attr(loglik, "nobs") <- n##-length(p) if (is.null(attr(loglik,"df"))) attr(loglik, "df") <- length(p) class(loglik) <- "logLik" return(loglik) } ###}}} ###{{{ gaussian_loglik ##' @export gaussian_logLik.lvm <- function(object,p,data, type=c("cond","sim","exo","sat","cond2"), weights=NULL, indiv=FALSE, S, mu, n, offset=NULL, debug=FALSE, meanstructure=TRUE,...) { exo.idx <- with(index(object), exo.obsidx) endo.idx <- with(index(object), endo.obsidx) if (type[1]=="exo") { if (length(exo.idx)==0 || any(is.na(exo.idx))) return(0) } cl <- match.call() if (type[1]=="cond") { cl$type <- "sim" L0 <- eval.parent(cl) cl$type <- "exo" L1 <- eval.parent(cl) loglik <- L0-L1 return(loglik) } if (missing(n)) { if (is.vector(data)) n <- 1 else n <- nrow(data) } k <- length(index(object)$manifest) if (!is.null(offset) && type[1]!="exo") { data[,colnames(offset)] <- data[,colnames(offset)]-offset } if (type[1]=="sat") { if (missing(S)) { d0 <- procdata.lvm(object,data=data) S <- d0$S; mu <- d0$mu; n <- d0$n } if (missing(p)) p <- rep(1,length(coef(object))) L1 <- logLik(object,p,data,type="exo",meanstructure=meanstructure) ## Sigma <- (n-1)/n*S ## ML = 1/n * sum((xi-Ex)^2) Sigma <- S loglik <- -(n*k)/2*log(2*base::pi) -n/2*(log(det(Sigma)) + k) - L1 P <- length(endo.idx) k <- length(exo.idx) npar <- P*(1+(P-1)/2) if (meanstructure) npar <- npar+ (P*k + P) attr(loglik, "nall") <- n attr(loglik, "nobs") <- n attr(loglik, "df") <- npar class(loglik) <- "logLik" return(loglik) } myidx <- switch(type[1], sim = seq_along(index(object)$manifest), cond = { endo.idx }, cond2 = { endo.idx }, exo = { exo.idx } ) mom <- moments(object, p, conditional=(type[1]=="cond2"), data=data) if (!lava.options()$allow.negative.variance && any(diag(mom$P)<0)) return(NaN) C <- mom$C xi <- mom$xi if (type[1]=="exo") { C <- C[exo.idx,exo.idx,drop=FALSE] xi <- xi[exo.idx,drop=FALSE] } Debug(list("C=",C),debug) k <- nrow(C) iC <- Inverse(C,det=TRUE, symmetric = TRUE) detC <- attributes(iC)$det if (!is.null(weights)) { weights <- cbind(weights) K <- length(exo.idx)+length(endo.idx) if (ncol(weights)!=1 & ncol(weights)!=K) { w.temp <- weights weights <- matrix(1,nrow=nrow(weights),ncol=K) weights[,endo.idx] <- w.temp } if (type=="exo") weights <- NULL } notdatalist <- (!is.list(data) | is.data.frame(data)) if (missing(n)) if (!missing(data)) n <- NROW(data) if (!missing(n)) if (notdatalist & (n<2 | indiv | !is.null(weights))) { if (n==1) data <- rbind(data) res <- numeric(n) data <- data[,index(object)$manifest,drop=FALSE] loglik <- 0; for (i in seq_len(n)) { ti <- as.numeric(data[i,myidx]) if (meanstructure) { ti <- cbind(ti-as.numeric(xi)) } if (!is.null(weights)) { W <- diag(weights[i,],nrow=length(weights[i,])) val <- -k/2*log(2*base::pi) -1/2*log(detC) - 1/2*(t(ti)%*%W)%*%iC%*%(ti) } else { val <- -k/2*log(2*base::pi) -1/2*log(detC) - 1/2*t(ti)%*%iC%*%(ti) } if (indiv) res[i] <- val loglik <- loglik + val } if (indiv) return(res) } else { if (missing(S)) { d0 <- procdata.lvm(object,data=data) S <- d0$S; mu <- d0$mu; n <- d0$n } S <- S[myidx,myidx,drop=FALSE] mu <- mu[myidx,drop=FALSE] T <- S if (meanstructure) { W <- crossprod(rbind(mu-xi)) T <- S+W } loglik <- -(n*k)/2*log(2*base::pi) -n/2*(log(detC) + tr(T%*%iC)) } return(loglik) } ###}}} ###{{{ logLik.lvmfit ##' @export logLik.lvmfit <- function(object, p=coef(object), data=model.frame(object), model=object$estimator, weights=Weights(object), data2=object$data$data2, ...) { logLikFun <- paste0(model,"_logLik.lvm") if (!exists(logLikFun)) { model <- "gaussian" } l <- logLik.lvm(object$model0,p,data,model=model,weights=weights, data2=data2, ...) return(l) } ###}}} logLik.lvmfit ###{{{ logLik.lvm.missing ##' @export logLik.lvm.missing <- function(object, p=pars(object), model=object$estimator, weights=Weights(object$estimate), ...) { logLik(object$estimate$model0, p=p, model=model, weights=weights, ...) } ###}}} ###{{{ logLik.multigroup ##' @export logLik.multigroup <- function(object,p,data=object$data,weights=NULL,type=c("cond","sim","exo","sat"),...) { res <- procrandomslope(object) pp <- with(res, modelPar(model,p)$p) if (type[1]=="sat") { n <- 0 df <- 0 loglik <- 0 for (i in seq_len(object$ngroup)) { m <- Model(object)[[i]] L <- logLik(m,p=pp[[i]],data=object$data[[i]],type="sat") df <- df + attributes(L)$df loglik <- loglik + L n <- n + object$samplestat[[i]]$n } attr(loglik, "nall") <- n attr(loglik, "nobs") <- n##-df attr(loglik, "df") <- df class(loglik) <- "logLik" return(loglik) } n <- 0 loglik <- 0; for (i in seq_len(object$ngroup)) { n <- n + object$samplestat[[i]]$n val <- logLik(object$lvm[[i]],pp[[i]],data[[i]],weights=weights[[i]],type=type,...) loglik <- loglik + val } attr(loglik, "nall") <- n attr(loglik, "nobs") <- n##-length(p) attr(loglik, "df") <- length(p) class(loglik) <- "logLik" return(loglik) } ###}}} logLik.multigroup ###{{{ logLik.multigroupfit ##' @export logLik.multigroupfit <- function(object, p=pars(object), weights=Weights(object), model=object$estimator, ...) { logLik(object$model0,p=p,weights=weights,model=model,...) } ###}}} logLik.multigroup lava/R/kill.R0000644000176200001440000000441013520655354012463 0ustar liggesusers##' Generic method for removing elements of object ##' ##' @title Remove variables from (model) object. ##' @aliases rmvar rmvar<- kill kill<- ##' @param x Model object ##' @param value Vector of variables or formula specifying which nodes to ##' remove ##' @param \dots additional arguments to lower level functions ##' @usage ##' rmvar(x, ...) <- value ##' @seealso \code{cancel} ##' @author Klaus K. Holst ##' @keywords models regression ##' @export ##' @examples ##' m <- lvm() ##' addvar(m) <- ~y1+y2+x ##' covariance(m) <- y1~y2 ##' regression(m) <- c(y1,y2) ~ x ##' ### Cancel the covariance between the residuals of y1 and y2 ##' cancel(m) <- y1~y2 ##' ### Remove y2 from the model ##' rmvar(m) <- ~y2 ##' "rmvar" <- function(x, ...) UseMethod("rmvar") ##' @export "kill" <- function(x, ...) UseMethod("kill") ##' @export "kill<-" <- function(x, ..., value) UseMethod("kill<-") ##' @export "rmvar<-" <- function(x, ..., value) UseMethod("rmvar<-") ##' @export "kill<-.lvm" <- function(x, ..., value) { kill(x,value) } ##' @export "rmvar<-.lvm" <- get("kill<-.lvm") ##' @export "kill.lvm" <- function(x, value, ...) { if (inherits(value,"formula")) value <- all.vars(value) idx <- which(names(x$exfix)%in%value) if (length(idx)>0) { x$attributes$parameter[idx] <- x$expar[idx] <- x$exfix[idx] <- NULL if (length(x$exfix)==0) { x$exfix <- x$expar <- x$attributes$parameter <- NULL } index(x) <- reindex(x) } idx <- which(vars(x)%in%value) if (length(idx)!=0){ vv <- vars(x)[idx] keep <- setdiff(seq_along(vars(x)),idx) x$M <- x$M[keep,keep,drop=FALSE] x$par <- x$par[keep,keep,drop=FALSE] x$fix <- x$fix[keep,keep,drop=FALSE] x$covpar <- x$covpar[keep,keep,drop=FALSE] x$covfix <- x$covfix[keep,keep,drop=FALSE] x$cov <- x$cov[keep,keep,drop=FALSE] x$mean <- (x$mean)[-idx] x$exogenous <- setdiff(exogenous(x),vv) x$latent[vv] <- NULL } else{ ## remove variables that cannot be accessed by vars in the hook vv <- value } myhooks <- gethook("remove.hooks") for (f in myhooks) { x <- do.call(f, list(x=x,var=vv,...)) } index(x) <- reindex(x) return(x) } ##' @export "rmvar.lvm" <- get("kill.lvm") lava/R/correlation.R0000644000176200001440000000643713520655354014064 0ustar liggesusers##' Generic correlation method ##' ##' @title Generic method for extracting correlation coefficients of model object ##' @param x Object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @export "correlation" <- function(x,...) UseMethod("correlation") ##' @export correlation.lvmfit <- function(x,z=TRUE,iid=FALSE,back.transform=TRUE,...) { pp <- matrices2(Model(x), with(index(x),seq_len(npar+npar.mean+npar.ex)))$P pos <- pp[lower.tri(pp)][(index(x)$P0)[lower.tri(pp)]==1] if (length(pos)<1) return(NULL) pp0 <- pp pp0[index(x)$P0!=1] <- 0; pp0[lower.tri(pp0)] <- 0 mynames <- vars(x) n <- nrow(pp0) ff <- function(p) { res <- numeric(length(pos)) nn <- character(length(pos)) for (i in seq_along(pos)) { p0 <- pos[i] idx <- which(pp0==p0) rowpos <- (idx-1)%%n + 1 colpos <- ceiling(idx/n) coefpos <- c(p0,pp0[rbind(c(rowpos,rowpos),c(colpos,colpos))]) pval <- pp[rbind(c(rowpos,rowpos),c(colpos,colpos))] phi.v1.v2 <- numeric(3); newval <- p[coefpos] phi.v1.v2[coefpos!=0] <- newval phi.v1.v2[coefpos==0] <- pval[tail(coefpos==0,2)] rho <- atanh(phi.v1.v2[1]/sqrt(prod(phi.v1.v2[-1]))) res[i] <- rho nn[i] <- paste(mynames[c(rowpos,colpos)],collapse="~") } structure(res,names=nn) } V <- NULL if (!iid) V <- vcov(x) if (back.transform) { back.transform <- tanh } else { back.transform <- NULL } estimate(x,coef=coef(x),vcov=V,f=ff,back.transform=back.transform,iid=iid,...) } ##' @export correlation.matrix <- function(x,z=TRUE,back.transform=TRUE,mreg=FALSE,return.all=FALSE,...) { if (mreg) { m <- lvm() covariance(m,pairwise=TRUE) <- colnames(x) try(e <- estimate(m,as.data.frame(x),...),silent=TRUE) res <- correlation(e,...) if (return.all) { return(list(model=m,estimate=e,correlation=res)) } return(res) } if (ncol(x)==2) { ii <- iid(x) ee <- estimate(coef=attributes(ii)$coef[3:5], iid=ii[,3:5]) if (z) { if (back.transform) { ee <- estimate(ee, function(x) atanh(x[2]/sqrt(x[1]*x[3])), back.transform=tanh) } else { ee <- estimate(ee, function(x) atanh(x[2]/sqrt(x[1]*x[3]))) } } else { ee <- estimate(ee, function(x) x[2]/sqrt(x[1]*x[3])) } return(ee) } e <- c() R <- diag(nrow=ncol(x)) dimnames(R) <- list(colnames(x),colnames(x)) for (i in seq(ncol(x)-1)) for (j in seq(i+1,ncol(x))) { e <- c(e,list(correlation(x[,c(i,j)],z=z,back.transform=FALSE,...))) R[j,i] <- coef(e[[length(e)]]) if (z) R[j,i] <- tanh(R[j,i]) } R <- R[-1,-ncol(R),drop=FALSE] res <- do.call(merge, c(e, paired=TRUE)) if (z && back.transform) { res <- estimate(res,back.transform=tanh, print=function(x,digits=1,...) { print(x$coefmat[,-2,drop=FALSE],...) cat("\n") print(offdiag(R,type=4),digits=digits,...) }) } return(res) } ##' @export correlation.data.frame <- function(x,...) { correlation(as.matrix(x),...) } lava/R/By.R0000644000176200001440000000256113520655354012107 0ustar liggesusers##' Apply a Function to a Data Frame Split by Factors ##' ##' Simple wrapper of the 'by' function ##' @title Apply a Function to a Data Frame Split by Factors ##' @param x Data frame ##' @param INDICES Indices (vector or list of indices, vector of column names, or formula of column names) ##' @param FUN A function to be applied to data frame subsets of 'data'. ##' @param COLUMNS (Optional) subset of columns of x to work on ##' @param array if TRUE an array/matrix is always returned ##' @param ... Additional arguments to lower-level functions ##' @author Klaus K. Holst ##' @export ##' @examples ##' By(datasets::CO2,~Treatment+Type,colMeans,~conc) ##' By(datasets::CO2,~Treatment+Type,colMeans,~conc+uptake) By <- function(x,INDICES,FUN,COLUMNS,array=FALSE,...) { if (inherits(INDICES,"formula")) { INDICES <- as.list(model.frame(INDICES,x)) } else { if (is.character(INDICES) && length(INDICES)!=nrow(x)) { INDICES <- as.list(x[,INDICES,drop=FALSE]) } } if (!missing(COLUMNS)) { if (inherits(COLUMNS,"formula")) { x <- model.frame(COLUMNS,x) } else { x <- x[,COLUMNS,drop=FALSE] } } a <- by(x, INDICES, FUN=FUN, ...) if (NCOL(x)==1 && !array) { ##DimElem <- length(a[rep(1,length(dim(a)))][[1]]) a <- a[] attr(a,"call") <- NULL } return(a) } lava/R/transform.R0000644000176200001440000000620213520655354013544 0ustar liggesusers##' @export "transform<-" <- function(`_data`,...,value) UseMethod("transform<-") ##' @export "transform<-.lvm" <- function(`_data`,formula=NULL,...,value) { transform(`_data`,formula,value,...) } ##' @export print.transform.lvm <- function(x,...) { for (i in seq_along(x)) { cat("Variable: ", names(x)[i],"\n",sep="") cat("Transformation: (",paste0(x[[i]]$x,collapse=","),") -> ",sep="") print(x[[i]]$fun) cat("\n") } invisible(x) } ##' @export "transform.lvm" <- function(`_data`,formula,value,post=TRUE,y,x,...) { if (missing(formula)) { if (length(tr <- `_data`$attributes$transform)==0) { return(NULL) } return(structure(`_data`$attributes$transform,class="transform.lvm")) } if (!missing(y) && !missing(x)) { xx <- x } else { if (is.character(formula)) { y <- NULL; xx <- formula } else { y <- getoutcome(formula) xx <- attributes(y)$x } } if (length(xx)==0) { xx <- y; y <- NULL } if (length(y)==0) { if (post) { `_data`$constrainY[xx] <- NULL `_data`$constrain[xx] <- NULL if (is.null(`_data`$attributes$selftransform)) `_data`$attributes$selftransform <- list() `_data`$attributes$selftransform[[xx]] <- value return(`_data`) } `_data`$attributes$selftransform[xx] <- NULL constrain(`_data`,xx,y,...) <- value return(`_data`) } `_data`$attributes$selftransform[y] <- NULL addvar(`_data`) <- y intercept(`_data`,y) <- 0; covariance(`_data`,y) <- 0 if (is.null(`_data`$attributes$transform)) `_data`$attributes$transform <- list() if (is.null(value)) `_data`$attributes$transform[y] <- NULL else { if (length(y)>1) { if (is.null(`_data`$attributes$multitransform)) `_data`$attributes$multitransform <- list() `_data`$attributes$multitransform for (yi in y) { `_data`$attributes$transform[yi] <- NULL } rmidx <- c() for (i in seq_along(`_data`$attributes$multitransform)) { l <- `_data`$attributes$multitransform[[i]] if (any(y%in%letters)) rmidx <- c(rmidx,i) } if (length(rmidx)>0) `_data`$attributes$transform[rmidx] <- NULL `_data`$attributes$multitransform <- c(`_data`$attributes$multitransform, list(list(fun=value,y=y,x=xx))) } else { `_data`$attributes$transform[[y]] <- list(fun=value,x=xx) } } return(`_data`) } addhook("plothook.transform","plot.post.hooks") plothook.transform <- function(x,...) { trans <- x$attributes$transform transnames <- names(trans) for (v in transnames) { xx <- trans[[v]][["x"]] if (length(xx)>0) { x <- regression(x,x=xx,y=v) edgelabels(x,from=xx,to=v,col="gray70") <- "" } } return(x) } lava/R/twostage.R0000644000176200001440000004650213520655354013375 0ustar liggesuserstwostagelvm <- function(object, model2, formula=NULL, model.object=FALSE, predict.fun=NULL, type="quadratic",...) { if (!inherits(model2,c("lvm"))) stop("Expected lava object ('lvm',...)") if (!is.null(formula)) { model2 <- nonlinear(model2, formula, type=type) } nonlin <- NULL val <- nonlinear(model2) if (is.null(formula) && length(val)==0 && length(nonlinear(object))>0) { val <- nonlinear(object) } xnam <- c() if (length(val)>0) { predict.fun <- NULL for (i in seq_along(val)) { if (!all(val[[i]]$newx%in%xnam)) { xnam <- union(xnam,val[[i]]$newx) predict.fun <- c(predict.fun, list(val[[i]]$pred)) } model2$attributes$nonlinear <- NULL if (inherits(object,"lvmfit")) { object$model$attributes$nonlinear <- NULL } model2 <- regression(model2, to=names(val)[i], from=val[[i]]$newx) } nonlin <- val } if (model.object) { model <- Model(object) %++% model2 cl <- match.call(expand.dots=TRUE) cl[[1]] <- twostage cl$object <- object cl$model2 <- model2 cl$predict.fun <- predict.fun cl["model.object"] <- NULL return(structure(list(model=model, nonlinear=nonlin, call=cl), class="twostage.lvm")) } res <- c(list(object=object, model2=model2), list(...)) res$predict.fun <- predict.fun res$nonlinear <- val return(res) } uhat <- function(p=coef(model1), model1, data=model.frame(model1), nlobj) { if (!is.function(nlobj)) { predict.fun <- lapply(nlobj, function(x) x[["pred"]]) } else { predict.fun <- nlobj } if (inherits(model1, "lvm.mixture")) { if (is.list(predict.fun)) { unams <- lapply(nlobj,function(x) x$newx) unam <- unique(unlist(unams)) res <- matrix(0, NROW(data), ncol=length(unam)) colnames(res) <- unam for (i in seq_along(predict.fun)) { res[, unams[[i]]] <- predict(model1, p=p, data=data, predict.fun=predict.fun[[i]]) } return(res) } else { Pr <- cbind(predict(model1, p=p, data=data, predict.fun=predict.fun)) return(Pr) } ##P <- list(mean=Pr, var=attr(Pr,"cond.var")) } else { P <- predictlvm(model1, p=p, data=data) } if (is.list(predict.fun)) { unams <- lapply(nlobj,function(x) x$newx) unam <- unique(unlist(unams)) args <- list(P$mean, P$var, data) res <- matrix(0, NROW(data), ncol=length(unam)) colnames(res) <- unam for (i in seq_along(predict.fun)) { res[, unams[[i]]] <- do.call(predict.fun[[i]], args) } return(res) } return(cbind(predict.fun(P$mean, P$var, model.frame(model1)))) } ##' Two-stage estimator ##' ##' Generic function. ##' ##' @seealso twostage.lvm twostage.lvmfit twostage.lvm.mixture twostage.estimate ##' @export ##' @param object Model object ##' @param ... Additional arguments to lower level functions "twostage" <- function(object,...) UseMethod("twostage") ##' Two-stage estimator (non-linear SEM) ##' ##' Two-stage estimator for non-linear structural equation models ##' @export ##' @param object Stage 1 measurement model ##' @param model2 Stage 2 SEM ##' @param data data.frame ##' @param predict.fun Prediction of latent variable ##' @param id1 Optional id-variable (stage 1 model) ##' @param id2 Optional id-variable (stage 2 model) ##' @param all If TRUE return additional output (naive estimates) ##' @param formula optional formula specifying non-linear relation ##' @param std.err If FALSE calculations of standard errors will be skipped ##' @param ... Additional arguments to lower level functions ##' @aliases twostage.lvmfit twostage.lvm twostage.lvm.mixture twostage.estimate nonlinear nonlinear<- ##' @examples ##' m <- lvm(c(x1,x2,x3)~f1,f1~z, ##' c(y1,y2,y3)~f2,f2~f1+z) ##' latent(m) <- ~f1+f2 ##' d <- simulate(m,100,p=c("f2,f2"=2,"f1,f1"=0.5),seed=1) ##' ##' ## Full MLE ##' ee <- estimate(m,d) ##' ##' ## Manual two-stage ##' \dontrun{ ##' m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 ##' e1 <- estimate(m1,d) ##' pp1 <- predict(e1,f1~x1+x2+x3) ##' ##' d$u1 <- pp1[,] ##' d$u2 <- pp1[,]^2+attr(pp1,"cond.var")[1] ##' m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta ##' e2 <- estimate(m2,d) ##' } ##' ##' ## Two-stage ##' m1 <- lvm(c(x1,x2,x3)~f1,f1~z); latent(m1) <- ~f1 ##' m2 <- lvm(c(y1,y2,y3)~eta,c(y1,eta)~u1+u2+z); latent(m2) <- ~eta ##' pred <- function(mu,var,data,...) ##' cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1]) ##' (mm <- twostage(m1,model2=m2,data=d,predict.fun=pred)) ##' ##' if (interactive()) { ##' pf <- function(p) p["eta"]+p["eta~u1"]*u + p["eta~u2"]*u^2 ##' plot(mm,f=pf,data=data.frame(u=seq(-2,2,length.out=100)),lwd=2) ##' } ##' ##' ## Splines ##' f <- function(x) cos(2*x)+x+-0.25*x^2 ##' m <- lvm(x1+x2+x3~eta1, y1+y2+y3~eta2, latent=~eta1+eta2) ##' functional(m, eta2~eta1) <- f ##' d <- sim(m,500,seed=1,latent=TRUE) ##' m1 <- lvm(x1+x2+x3~eta1,latent=~eta1) ##' m2 <- lvm(y1+y2+y3~eta2,latent=~eta2) ##' mm <- twostage(m1,m2,formula=eta2~eta1,type="spline") ##' if (interactive()) plot(mm) ##' ##' nonlinear(m2,type="quadratic") <- eta2~eta1 ##' a <- twostage(m1,m2,data=d) ##' if (interactive()) plot(a) ##' ##' kn <- c(-1,0,1) ##' nonlinear(m2,type="spline",knots=kn) <- eta2~eta1 ##' a <- twostage(m1,m2,data=d) ##' x <- seq(-3,3,by=0.1) ##' y <- predict(a, newdata=data.frame(eta1=x)) ##' ##' if (interactive()) { ##' plot(eta2~eta1, data=d) ##' lines(x,y, col="red", lwd=5) ##' ##' p <- estimate(a,f=function(p) predict(a,p=p,newdata=x))$coefmat ##' plot(eta2~eta1, data=d) ##' lines(x,p[,1], col="red", lwd=5) ##' confband(x,lower=p[,3],upper=p[,4],center=p[,1], polygon=TRUE, col=Col(2,0.2)) ##' ##' l1 <- lm(eta2~splines::ns(eta1,knots=kn),data=d) ##' p1 <- predict(l1,newdata=data.frame(eta1=x),interval="confidence") ##' lines(x,p1[,1],col="green",lwd=5) ##' confband(x,lower=p1[,2],upper=p1[,3],center=p1[,1], polygon=TRUE, col=Col(3,0.2)) ##' } ##' ##' \dontrun{ ## Reduce timing ##' ## Cross-validation example ##' ma <- lvm(c(x1,x2,x3)~u,latent=~u) ##' ms <- functional(ma, y~u, value=function(x) -.4*x^2) ##' d <- sim(ms,500)#,seed=1) ##' ea <- estimate(ma,d) ##' ##' mb <- lvm() ##' mb1 <- nonlinear(mb,type="linear",y~u) ##' mb2 <- nonlinear(mb,type="quadratic",y~u) ##' mb3 <- nonlinear(mb,type="spline",knots=c(-3,-1,0,1,3),y~u) ##' mb4 <- nonlinear(mb,type="spline",knots=c(-3,-2,-1,0,1,2,3),y~u) ##' ff <- lapply(list(mb1,mb2,mb3,mb4), ##' function(m) function(data,...) twostage(ma,m,data=data,st.derr=FALSE)) ##' a <- cv(ff,data=d,rep=1,mc.cores=1) ##' a ##'} twostage.lvmfit <- function(object, model2, data=NULL, predict.fun=NULL, id1=NULL, id2=NULL, all=FALSE, formula=NULL, std.err=TRUE, ...) { if (!is.null(predict.fun)) { object$attributes$nonlinear <- list() model2$attributes$nonlinear <- list() } val <- twostagelvm(object=object,model2=model2,predict.fun=predict.fun, id1=id1, id2=id2, all=all, formula=formula, ...) object <- val$object model2 <- val$model2 predict.fun <- val$predict.fun p1 <- coef(object) if (length(val$nonlinear)==0) { val$nonlinear <- predict.fun } pp <- uhat(p1,object,nlobj=val$nonlinear) newd <- data newd[,colnames(pp)] <- pp model2 <- estimate(model2,data=newd,...) p2 <- coef(model2) if (std.err) { if (is.null(id1)) id1 <- seq(nrow(model.frame(object))) if (is.null(id2)) id2 <- seq(nrow(model.frame(model2))) model1 <- object if (!inherits(object,"estimate")) { model1 <- estimate(NULL,coef=p1,id=id1,iid=iid(object)) } e2 <- estimate(model2, id=id2) U <- function(alpha=p1,beta=p2) { pp <- uhat(alpha,object,nlobj=val$nonlinear) newd <- model.frame(model2) newd[,colnames(pp)] <- pp score(model2,p=beta,data=newd) } Ia <- numDeriv::jacobian(function(p) U(p),p1) stacked <- stack(model1,e2,Ia) } else { e2 <- estimate(coef=p2,vcov=NA) } coef <- model2$coef res <- model2 res$estimator <- "generic" if (std.err) { res[names(stacked)] <- stacked cc <- stacked$coefmat[,c(1,2)]; cc <- cbind(cc,cc[,1]/cc[,2],stacked$coefmat[,5]) coef[,] <- cc res$coef <- coef res$vcov <- vcov(stacked) if (all) { res$naive <- model2 res$naive.robust <- e2 } } else { res$coef[,-1] <- NA } res$fun <- predict.fun res$estimate1 <- object res$estimate2 <- model2 res$nonlinear <- val$nonlinear structure(res,class=c("twostage.lvmfit","measurement.error","lvmfit","estimate")) } ##' @export estimate.twostage.lvm <- function(x,data,...) { if (missing(data)) stop("'data' needed") m1 <- x$call$object m2 <- x$call$model2 nl <- x$nonlinear if (!inherits(m1,"lvmfit")) { args <- c(list(x=m1, data=data), list(...)) args <- args[intersect(names(as.list(base::args(estimate.lvm))),names(args))] m1 <- do.call(estimate, args) } m2$attributes$nonlinear <- nl twostage(object=m1,model2=m2,data=data,predict.fun=nl[[1]]$pred,...) } ##' @export twostage.twostage.lvm <- function(object,...) estimate.twostage.lvm(object,...) ##' @export twostage.lvm <- function(object,model2,data=NULL, ...) { if (is.null(data)) { return(twostagelvm(object=object, model2=model2, model.object=TRUE, ...)) } args <- c(list(x=object, data=data), list(...)) args <- args[intersect(names(as.list(base::args(estimate.lvm))),names(args))] e1 <- do.call(estimate, args) twostage(object=e1,model2=model2,data=data, ...) } ##' @export twostage.lvm.mixture <- twostage.lvmfit ##' @export twostage.estimate <- twostage.lvmfit ##' @export print.twostage.lvm <- function(x,...) { printline() cat("Model 1:\n") print(Model(x$call$object)) printline() cat("Model 2:\n") print(Model(x$call$model2)) } ##' @export plot.twostage.lvm <- function(x,...) { model <- x$model m1 <- Model(x$call$object) m2 <- x$call$model2 nl <- nonlinear(x) model <- regression(model, to=nl[[1]]$newx, from=nl[[1]]$x) elist <- edgeList(m1) vlist <- vars(m1) model <- beautify(model) for (i in seq_len(nrow(elist))) { e <- toformula(y=vlist[elist[i,2]],x=vlist[elist[i,1]]) edgelabels(model, e, cex=0.7) <- 1 } elist <- edgeList(m2) vlist <- vars(m2) for (i in seq_len(nrow(elist))) { e <- toformula(vlist[elist[i,2]],vlist[elist[i,1]]) edgelabels(model, e, cex=0.7) <- 2 } nodecolor(model, nl[[1]]$newx) <- "gray" for (xx in nl[[1]]$newx) { e <- toformula(y=names(nl)[1],x=xx) edgelabels(model,e,col="gray", cex=0.7, lty=1) <- 2 } for (xx in nl[[1]]$newx) { e <- toformula(y=xx,x=nl[[1]]$x) edgelabels(model,e,col="gray", cex=0.7, lty=2) <- "" } plot(model, ...) } ##' @export predict.twostage.lvmfit <- function(object, newdata, variable=names(nonlinear(object)), p=coef(object), type=c("model2","latent"), x=NULL, ...) { if (missing(newdata)) stop("provide data for prediction") nl <- nonlinear(object) unam <- unique(unlist(lapply(nl,function(x) x$x))) if (is.vector(newdata) || all(unam%in%colnames(newdata))) type <- "latent" if (tolower(type[1])%ni%c("latent")) { p1 <- coef(object$estimate1) pred1 <- uhat(p1, data=newdata, object$estimate1, nlobj=nl) if (tolower(type[1])==c("model1")) return(pred1) newdata <- as.data.frame(newdata) newdata[,colnames(pred1)] <- pred1 pred <- predict(object$estimate2,x=x,p=p,data=newdata,...) attr(pred,"p") <- NULL attr(pred,"e") <- NULL attr(pred,"cond.var") <- NULL return(pred) } ## Association between predicted latent variables and child nodes: if (is.numeric(variable)) { variable <- names(nonlinear(object))[variable] } nl <- nl[variable] res <- matrix(nrow=NROW(newdata),ncol=length(nl)) colnames(res) <- names(nl) ##unam <- unique(unlist(lapply(nl, function(x) x$newx))) ##colnames(res) <- unam if (!is.null(x)) { newd <- newdata m0 <- Model(object$estimate2) p0 <- p } for (i in seq_along(nl)) { pnam <- c(variable,paste0(variable,"~",nl[[i]]$newx)) pidx <- match(pnam,names(coef(object))) b <- p[pidx] F <- nl[[i]]$f if (is.vector(newdata)) { res[,i] <- F(b,newdata) } else { res[,i] <- F(b,newdata[,nl[[i]]$x]) if (!is.null(x)) { newd[,nl[[i]]$newx] <- 0 latent(m0,clear=TRUE) <- names(nl)[i] regression(m0, y=names(nl)[i], x=paste0(names(nl)[i],".offset")) <- 1 p0[pidx] <- 0 } } } if (!is.null(x) && !is.vector(newdata)) { latentres <- res colnames(latentres) <- paste0(colnames(res),".offset",collapse="") newd <- cbind(newd,latentres) res <- predict(m0,data=newd,p=p0,x=x,...) attr(res,"cond.var") <- NULL } return(res) } ##' Cross-validated two-stage estimator ##' ##' Cross-validated two-stage estimator for non-linear SEM ##' @export ##' @param model1 model 1 (exposure measurement error model) ##' @param model2 model 2 ##' @param data data.frame ##' @param control1 optimization parameters for model 1 ##' @param control2 optimization parameters for model 1 ##' @param knots.boundary boundary points for natural cubic spline basis ##' @param mc.cores number of cores to use for parallel computations ##' @param nmix number of mixture components ##' @param df spline degrees of freedom ##' @param fix automatically fix parameters for identification (TRUE) ##' @param std.err calculation of standard errors (TRUE) ##' @param nfolds Number of folds (cross-validation) ##' @param rep Number of repeats of cross-validation ##' @param messages print information (>0) ##' @param ... additional arguments to lower level functions ##' @examples ##' \donttest{ ## Reduce Ex.Timings ##' m1 <- lvm( x1+x2+x3 ~ u1, latent= ~u1) ##' m2 <- lvm( y ~ 1 ) ##' m <- functional(merge(m1,m2), y ~ u, value=function(x) sin(x)+x) ##' distribution(m, ~u1) <- uniform.lvm(-6,6) ##' d <- sim(m,n=500,seed=1) ##' nonlinear(m2) <- y~u1 ##' val <- twostageCV(m1, m2, data=d, std.err=FALSE, df=2:6, nmix=1:2, ##' nfolds=2, mc.cores=1) ##' val ##' } twostageCV <- function(model1, model2, data, control1=list(trace=0), control2=list(trace=0), knots.boundary, mc.cores=1, nmix=1:4, df=1:9, fix=TRUE, std.err=TRUE, nfolds=5, rep=1, messages=0, ...) { F <- nonlinear(model2) if (length(F)==0) { stop("Specify at least one nonlinear association in 'model2' (using the 'nonlinear' method)") } form <- as.formula(paste(names(F)[1], "~", x=F[[1]]$x)) op <- options(warn=-1) if (fix) { model1 <- baptize(fixsome(model1, param="relative")) intercept(model1, latent(model1)) <- NA } e1a <- estimate(model1, data=data, control=control1) if (missing(knots.boundary)) knots.boundary <- range(predict(e1a,vars(e1a))) ## Starting values for mixture models plab <- parlabels(model1) pfree <- setdiff(coef(model1),plab) pfree.idx <- match(pfree,coef(model1)) ## Index of unlabeled parameters intpos <- setdiff(parpos(model1)$v,0) pfree.int <- intersect(pfree.idx,intpos) ## Free intercept parameters p0 <- coef(e1a) pint <- p0[setdiff(intpos,pfree.int)] startf <- function(n) { u0 <- seq(knots.boundary[1],knots.boundary[2],length.out=n); names(u0) <- paste0("p",seq_along(u0)) c(pint,u0,p0[-intpos]) } control1$start <- NULL ee <- list(e1a) nmix <- setdiff(nmix,1) if (mc.cores>1) { val <- parallel::mclapply(as.list(nmix), function(k) { if (messages>0) cat("Fitting mixture model with", k, "components\n") mixture(model1, k=k, data=data, control=c(control1,list(start=startf(k)))) }, mc.cores=mc.cores) ee <- c(ee, val) } else { for (k in nmix) { if (messages>0) cat("Fitting mixture model with", k, "components\n") ee <- c(ee, list(mixture(model1, k=k, data=data, control=c(control1,list(start=startf(k)))))) } } AIC1 <- unlist(lapply(ee,AIC)) names(AIC1) <- c(1,nmix) ii <- which.min(AIC1) ## Exposure measurement model e1 <- ee[[ii]] ## Selected model by AIC MM <- list(nonlinear(model2, form, type="linear")) df <- setdiff(df, 1) Knots <- list() for (i in df) { knots <- seq(knots.boundary[1],knots.boundary[2],length.out=i+1) Knots <- c(Knots, list(knots)) MM <- c(MM, list(nonlinear(model2, form, type="spline", knots=knots))) } if (!inherits(e1, "lvm.mixture")) { f0 <- function(data) list(e0=estimate(model1,data=data,control=c(control1,list(start=coef(e1))))) } else { f0 <- function(data) list(e0=mixture(model1,data=data,k=e1$k,control=c(control1,list(start=coef(e1))))) } ff <- lapply(MM, function(m) function(data,e0,...) twostage(e0,m,data=data,std.derr=FALSE)) a <- cv(ff,data=data,K=nfolds,rep=rep,mc.cores=mc.cores,shared=f0) M <- MM[[which.min(coef(a))]] e2 <- twostage(e1,M,data,control=control2, std.err=std.err) options(op) res <- list(AIC1=cbind(AIC1), model1=e1, model2=e2, cv=coef(a), knots=c(list(NA),Knots), nfolds=nfolds, rep=rep) structure(res, class="twostageCV") } ##' @export print.twostageCV <- function(x,...) { printline(70) i1 <- which.min(x$AIC1) nmix <- rownames(x$AIC1)[i1] cat("Selected mixture model: ",nmix," component", ifelse(i1>1, "s",""),"\n", sep="") print(x$AIC1) printline(70) i2 <- which.min(x$cv) splinedf <- unlist(lapply(x$knots,function(x) if (any(is.na(x))) return(1) else length(x)-1)) cat("Selected spline model degrees of freedom: ", splinedf[i2] ,"\n", sep="") knots <- rbind(x$knots[[i2]]) if (is.na(knots)) knots <- "none" cat("Knots:", paste(formatC(knots,...) , collapse=" "), "\n\n") rmse <- x$cv rownames(rmse) <- paste0("df:",splinedf) colnames(rmse) <- paste0("RMSE(nfolds=",x$nfolds,", rep=",x$rep,")") print(rmse) printline(70) cat("\n") print(CoefMat(x$model2,...),quote=FALSE) } ##' @export coef.twostageCV <- function(object,...) { coef(Model(object),...) } vcov.twostageCV <- function(object,...) { vcov(Model(object),...) } iid.twostageCV <- function(object,...) { iid(Model(object),...) } Model.twostageCV <- function(x,...) { x$model2 } ##' @export summary.twostageCV <- function(object,...) { with(object, list(model1=summary(model1), AIC1=AIC1, cv=cv, knots=knots, model2=summary(model2))) } ##' @export predict.twostageCV <- function(object,... ) { predict(Model(object),...) } lava/R/normal.R0000644000176200001440000001416113520655354013024 0ustar liggesusersintrootpn <- function(p) { ## Find integer root of x^2-x-2*p=0 n <- 0.5*(1+sqrt(1+8*p)) if (floor(n)!=n) n <- NA return(n) } rho2sigma <- function(rho) { if (length(rho)==1) return(diag(2)*(1-rho)+rho) p <- introotpn(length(rho)) if (is.na(p)) stop("Unexpected length of correlation coefficients (p=n*(n-1)/2).") sigma <- diag(nrow=p) offdiag(sigma,type=2) <- rho offdiag(sigma,type=3) <- offdiag(t(sigma),type=3) return(sigma) } ##' @export rmvn0 <- function(n,mu,sigma,rho,...) { if (!missing(rho)) sigma <- rho2sigma(rho) if (!missing(mu) && missing(sigma)) sigma <- diag(nrow=length(mu)) if (missing(sigma)) sigma <- matrix(1) if (is.vector(sigma)) sigma <- diag(sigma,ncol=length(sigma)) if (missing(mu)) mu <- rep(0,ncol(sigma)) PP <- with(svd(sigma), v%*%diag(sqrt(d),ncol=length(d))%*%t(u)) res <- matrix(rnorm(ncol(sigma)*n),ncol=ncol(sigma))%*%PP if (NROW(mu)==nrow(res) && NCOL(mu)==ncol(res)) return(res+mu) return(res+cbind(rep(1,n))%*%mu) } ##' @export dmvn0 <- function(x,mu,sigma,rho,log=FALSE,nan.zero=TRUE,norm=TRUE,...) { if (!missing(rho)) sigma <- rho2sigma(rho) if (!missing(mu) && missing(sigma)) sigma <- diag(nrow=length(mu)) if (missing(sigma)) sigma <- matrix(1) if (is.vector(sigma)) sigma <- diag(sigma,ncol=length(sigma)) if (missing(mu)) mu <- rep(0,ncol(sigma)) if (length(sigma)==1) { k <- 1 isigma <- structure(cbind(1/sigma),det=as.vector(sigma)) } else { k <- ncol(sigma) isigma <- Inverse(sigma) } if (!missing(mu)) { if (NROW(mu)==NROW(x) && NCOL(mu)==NCOL(x)) { x <- x-mu } else { x <- t(t(x)-mu) } } logval <- -0.5*(base::log(2*base::pi)*k+ base::log(attributes(isigma)$det)+ rowSums((x%*%isigma)*x)) if (nan.zero) logval[is.nan(logval)] <- -Inf if (log) return(logval) return(exp(logval)) } normal_method.lvm <- "nlminb0" normal_objective.lvm <- function(x,p,data,weights=NULL,data2=NULL,indiv=FALSE,...) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) set.seed(1) ii <- lava::index(x) x.idx <- ii$exo.idx y <- ii$endogenous ord <- lava::ordinal(x) atr <- attributes(ord) ord <- intersect(y,ord) attributes(ord) <- atr status <- rep(0,length(y)) status[match(ord,y)] <- 2 Table <- (length(y)==length(ord)) && (length(x.idx)==0) if (Table) { pat <- mets::fast.pattern(data[,y,drop=FALSE],categories=max(data[,y,drop=FALSE])+1) data <- pat$pattern colnames(data) <- y } mu <- predict(x,data=data,p=p) S <- attributes(mu)$cond.var class(mu) <- "matrix" thres <- matrix(0,nrow=length(y),max(1,attributes(ord)$K-1)); rownames(thres) <- y for (i in seq_len(length(attributes(ord)$fix))) { nn <- names(attributes(ord)$idx)[i] ii <- attributes(ord)$idx[[nn]] val <- (attributes(mu)$e[ii]) thres[nn,seq_len(length(val))] <- cumsum(c(val[1],exp(val[-1]))) } yl <- yu <- as.matrix(data[,y,drop=FALSE]) if (!inherits(yl[1,1],c("numeric","integer","logical")) || !inherits(yu[1,1],c("numeric","integer","logical"))) stop("Unexpected data (normal_objective)") if (!is.null(data2)) { yu[,colnames(data2)] <- data2 status[match(colnames(data2),y)] <- 1 } l <- mets::loglikMVN(yl,yu,status,mu,S,thres) if (!is.null(weights)) { ##if (is.matrix(weights)) weights <- weights[,1] l <- l*weights } if (Table) { l <- l[pat$group+1] } if (indiv) return(-l) return(-sum(l)) } normal_logLik.lvm <- function(object,p,data,data2=NULL,...) { res <- -normal_objective.lvm(x=object,p=p,data=data,data2=data2,...) return(res) } normal_gradient.lvm <- function(x,p,data,weights=NULL,data2=NULL,indiv=FALSE,...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") if (is.null(ordinal(x)) && is.null(data2) && is.null(weights)) { D <- deriv.lvm(x,p=p) M <- moments(x,p) Y <- as.matrix(data[,manifest(x)]) mu <- M$xi%x%rep(1,nrow(Y)) ss <- -mets::scoreMVN(Y,mu,M$C,D$dxi,D$dS) if (!indiv) return(colSums(ss)) return(ss) } if (indiv) { return(numDeriv::jacobian(function(p0) normal_objective.lvm(x,p=p0,data=data,weights=weights,data2=data2,indiv=TRUE,...),p,method=lava.options()$Dmethod)) } numDeriv::grad(function(p0) normal_objective.lvm(x,p=p0,data=data,weights=weights,data2=data2,...),p,method=lava.options()$Dmethod) } normal_hessian.lvm <- function(x,p,outer=FALSE,data2=NULL,...) { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") dots <- list(...); dots$weights <- NULL if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) save.seed <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", save.seed, envir = .GlobalEnv)) if (!outer) { f <- function(p) { set.seed(1) do.call("normal_objective.lvm", c(list(x,p=p,indiv=FALSE,data2=data2),dots)) } g <- function(p) { set.seed(1) do.call("normal_gradient.lvm", c(list(x,p=p,indiv=FALSE,data2=data2),dots)) } if (is.null(ordinal(x)) && is.null(data2)) return(numDeriv::jacobian(g,p)) else { return(numDeriv::hessian(f,p)) } } ## Else calculate outer product of the score (empirical variance of score) S <- normal_gradient.lvm(x,p=p,indiv=TRUE,...) J <- t(S)%*%S attributes(J)$grad <- colSums(S) return(J) } ##normal_gradient.lvm <- normal_hessian.lvm <- NULL lava/R/parlabels.R0000644000176200001440000000067613520655354013507 0ustar liggesusers##' @export parlabels <- function(x,exo=FALSE) { res <- c(unlist(intfix(x)[unlist(lapply(intfix(x), function(y) !is.na(y) & !is.numeric(y)))]), regfix(x)$labels[!is.na(regfix(x)$labels)], covfix(x)$labels[!is.na(covfix(x)$labels)]) if (!is.null(x$exfix)) res <- c(res, unlist(x$exfix[!is.na(x$exfix) & !is.numeric(x$exfix)])) if (exo) res <- intersect(res,index(Model(x))$exogenous) return(res) } lava/R/index.sem.R0000644000176200001440000002715413520655354013434 0ustar liggesusers##' @export updatelvm <- function(x,mean=TRUE,...) { index(x) <- reindex(x,mean=mean,...) x$parpos <- parpos(x,mean=mean,...) return(x) } ##' @export "index" <- function(x,...) UseMethod("index") ##' @export "index<-" <- function(x,...,value) UseMethod("index<-") ##' @export "index.lvm" <- function(x,...) { x$index } ##' @export "index.lvmfit" <- function(x,...) { index(Model(x)) } ##' @export "index<-.lvm" <- function(x,...,value) { x$index <- value; return(x) } ##' @export "index<-.lvmfit" <- function(x,...,value) { Model(x)$index <- value; return(x) } ### A ## Matrix with fixed parameters and ones where parameters are free ### J ## Manifest variable selection matrix ### M0 ## Index of free regression parameters ### M1 ## Index of free and _unique_ regression parameters ### P ## Matrix with fixed variance parameters and ones where parameters are free ### P0 ## Index of free variance parameters ### P1 ## Index of free and _unique_ regression parameters ### npar.var ## Number of covariance parameters ##' @export `reindex` <- function(x, sparse=FALSE,standard=TRUE,zeroones=FALSE,deriv=FALSE,mean=TRUE) { ## Extract indices of parameters from model x$parpos <- NULL M <- x$M eta <- latent(x) ## Latent variables/Factors m <- length(eta) obs <- manifest(x) ## Manifest/Observed variables endo <- endogenous(x) exo <- exogenous(x) ##,index=FALSE) allvars <- vars(x) eta.idx <- na.omit(match(eta,allvars)) obs.idx <- na.omit(match(obs,allvars)) exo.idx <- na.omit(match(exo,allvars)) exo.obsidx <- na.omit(match(exo,obs)) endo.obsidx <- na.omit(match(endo,obs)) fix.idx <- !is.na(x$fix) ## Index of fixed parameters covfix.idx <- !is.na(x$covfix) ## Index of fixed covariance parameters constrain.par <- NULL if (length(constrain(x))>0) constrain.par <- names(constrain(x)) M0 <- M; M0[fix.idx] <- 0 ## Matrix of indicators of free regression-parameters (removing fixed parameters) M1 <- M0; ## Matrix of indiciator of free _unique_ regression parameters (removing fixed _and_ duplicate parameters) parname <- unique(x$par[!is.na(x$par)]) for (p in parname) { ii <- which(x$par==p) if (length(ii)>1) M1[ii[-1]] <- 0 if (p %in% constrain.par) M0[ii] <- M1[ii] <- 0 } npar.reg <- sum(M1) ## Number of free regression parameters P <- x$cov; P0 <- P; P0[covfix.idx] <- 0 ## Matrix of indicators of free covariance-parameters (removing fixed parameters) if (length(exo.idx)>0) P0[exo.idx,exo.idx] <- 0 ## 6/1-2011 P1 <- P0 ## Matrix of indiciator of free _unique_ variance parameters (removing fixed _and_ duplicate parameters) covparname <- unique(x$covpar[!is.na(x$covpar)]) for (p in covparname) { ii <- which(x$covpar==p) if (length(ii)>1) P1[ii[-1]] <- 0 if (p%in%c(parname,constrain.par)) P0[ii] <- P1[ii] <- 0 } npar.var <- sum(c(diag(P1),P1[lower.tri(P1)])) A <- M A[fix.idx] <- x$fix[fix.idx] ## ... with fixed parameters in plac P[covfix.idx] <- x$covfix[covfix.idx] ## ... with fixed parameters in plac px <- Jy <- J <- diag(nrow=length(vars(x))) if (m>0) { J[eta.idx,eta.idx] <- 0; J <- J[-eta.idx,,drop=FALSE] } ## Selection matrix (selecting observed variables) { ## Selection matrix (selection endogenous variables) if (length(c(eta.idx,exo.idx))>0) { Jy[c(eta.idx,exo.idx),c(eta.idx,exo.idx)] <- 0; Jy <- Jy[-c(eta.idx,exo.idx),,drop=FALSE] } ## Cancelation matrix (cancels rows with exogenous variables) px[exo.idx,exo.idx] <- 0 } ## Creating indicitor of free mean-parameters fixed <- sapply(x$mean, function(y) is.numeric(y) & !is.na(y)) named <- sapply(x$mean, function(y) is.character(y) & !is.na(y)) mparname <- NULL if (length(named)>0) mparname <- unlist(unique(x$mean[named])) v0 <- rep(1,length(x$mean)) ## Vector of indicators of free mean-parameters v0[exo.idx] <- 0 if (length(fixed)>0) v0[fixed] <- 0; v1 <- v0 for (p in mparname) { idx <- which(x$mean==p) if (length(idx)>1) { v1[idx[-1]] <- 0 } if (p%in%c(parname,covparname,constrain.par)) v0[idx] <- v1[idx] <- 0 } ## duplicate parameters ### ### Extra parameters ### efixed <- sapply(x$exfix, function(y) is.numeric(y) & !is.na(y)) enamed <- sapply(x$exfix, function(y) is.character(y) & !is.na(y)) if(length(enamed)>0){ eparname <- unlist(unique(x$exfix[enamed])) } else{ eparname <- NULL } ## Extra parameters e0 <- rep(1,length(x$expar)) ## Indicators of free extra par. if (length(efixed)>0) e0[efixed] <- 0 e1 <- e0 for (p in eparname) { idx <- which(x$exfix==p) if (length(idx)>1) { e1[idx[-1]] <- 0 } if (p%in%c(parname,covparname,constrain.par,mparname)) e0[idx] <- e1[idx] <- 0 } ## duplicate parameters ## Return: ## Adjacency-matrix (M) ## Matrix of regression-parameters (0,1) _with_ fixed parameters (A) ## Matrix of variance-parameters (indicators 0,1) (P) ## Manifest selection matrix (J), ## Position of variables matrix (Apos), ## Position of covariance variables matrix (Ppos), ## Position/Indicator matrix of free regression parameters (M0) res <- list(vars=allvars, manifest=obs, exogenous=exo, latent=eta, endogenous=endo, exo.idx=exo.idx, eta.idx=eta.idx, exo.obsidx=exo.obsidx, endo.obsidx=endo.obsidx, obs.idx=obs.idx, endo.idx=setdiff(obs.idx,exo.idx)) if (standard) { res <- c(res, list(M=M, A=A, P=P, P0=P0, P1=P1, M0=M0, M1=M1, v0=v0, v1=v1, e0=e0, e1=e1, npar=(npar.reg+npar.var), npar.reg=npar.reg, npar.var=npar.var, npar.mean=sum(v1), npar.ex=sum(e1), constrain.par=constrain.par)) which.diag <- NULL if (length(P1)>0) which.diag <- which(diag(P1==1)) res <- c(res, list(parname.all=parname, parname=setdiff(parname,constrain.par), which.diag=which.diag, covparname.all=covparname, covparname=setdiff(covparname,constrain.par), meanfixed=fixed, meannamed=named, mparname.all=mparname, mparname=setdiff(mparname,constrain.par), eparname.all=eparname, eparname=setdiff(eparname,constrain.par), J=J, Jy=Jy, px=px, sparse=sparse)) parname.all.reg.idx <- parname.all.reg.tidx <- parname.reg.tidx <- parname.reg.idx <- c() for (p in res$parname.all) { ipos <- which((x$par==p)) tipos <- which(t(x$par==p)) if (p%in%res$parname) { parname.reg.idx <- c(parname.reg.idx, list(ipos)) parname.reg.tidx <- c(parname.reg.tidx, list(tipos)) } parname.all.reg.idx <- c(parname.all.reg.idx, list(ipos)) parname.all.reg.tidx <- c(parname.all.reg.tidx, list(tipos)) }; if (length(parname.reg.idx)>0) { names(parname.reg.idx) <- names(parname.reg.tidx) <- res$parname } if (length(parname.all.reg.idx)>0) { names(parname.all.reg.idx) <- names(parname.all.reg.tidx) <- res$parname.all } covparname.all.idx <- covparname.idx <- c() for (p in res$covparname.all) { ipos <- which(x$covpar==p) if (p%in%res$covparname) covparname.idx <- c(covparname.idx, list(ipos)) covparname.all.idx <- c(covparname.all.idx, list(ipos)) }; if (length(covparname.idx)>0) names(covparname.idx) <- res$covparname if (length(covparname.all.idx)>0) names(covparname.all.idx) <- res$covparname.all mparname.all.idx <- mparname.idx <- c() for (p in res$mparname.all) { ipos <- which(x$mean==p) if (p%in%mparname) mparname.idx <- c(mparname.idx, list(ipos)) mparname.all.idx <- c(mparname.all.idx, list(ipos)) }; if (length(mparname.idx)>0) names(mparname.idx) <- res$mparname if (length(mparname.all.idx)>0) names(mparname.all.idx) <- res$mparname.all eparname.all.idx <- eparname.idx <- c() for (p in res$eparname.all) { ipos <- which(x$exfix==p) if (p%in%eparname) eparname.idx <- c(eparname.idx, list(ipos)) eparname.all.idx <- c(eparname.all.idx, list(ipos)) }; if (length(eparname.idx)>0) names(eparname.idx) <- res$eparname if (length(eparname.all.idx)>0) names(eparname.all.idx) <- res$eparname.all res <- c(res, list(mparname.idx=mparname.idx, covparname.idx=covparname.idx, parname.reg.idx=parname.reg.idx, parname.reg.tidx=parname.reg.tidx, mparname.all.idx=mparname.all.idx, eparname.all.idx=eparname.all.idx, covparname.all.idx=covparname.all.idx, parname.all.reg.idx=parname.all.reg.idx, parname.all.reg.tidx=parname.all.reg.tidx )) } else { res <- index(x) } if (zeroones) { if (sparse) { if (!requireNamespace("Matrix",quietly=TRUE)) stop("package Matrix not available") Ik <- Matrix::Diagonal(length(obs)) Im <- Matrix::Diagonal(ncol(A)) Kkk <- NULL J <- as(J, "sparseMatrix") Jy <- as(Jy, "sparseMatrix") px <- as(px, "sparseMatrix") } else { Ik <- diag(nrow=length(obs)) Im <- diag(nrow=ncol(A)) } Kkk <- NULL res[c("Ik","Im","Kkk")] <- NULL res <- c(res, list(Ik=Ik, Im=Im, Kkk=Kkk)) } if (deriv && length(P)>0) { if (res$npar.mean>0 & mean) D <- deriv.lvm(x,meanpar=rep(1,res$npar.mean),zeroones=TRUE) else D <- deriv.lvm(x,meanpar=NULL,zeroones=TRUE) res[c("dA","dP","dv")] <- NULL res <- c(res, list(dA=D$dA, dP=D$dP, dv=D$dv)) } if (length(P)>0) res <- c(res,mat.lvm(x,res)) return(res) } lava/R/Objective.R0000644000176200001440000002201513520655354013443 0ustar liggesusers###{{{ gaussian gaussian_method.lvm <- "nlminb2" `gaussian_objective.lvm` <- function(x,p,data,S,mu,n,...) { mp <- modelVar(x,p=p,data=data,...) C <- mp$C ## Model specific covariance matrix xi <- mp$xi ## Model specific mean-vector if (!lava.options()$allow.negative.variance && any(diag(mp$P)<0)) return(NaN) iC <- Inverse(C,det=TRUE, symmetric = TRUE) detC <- attributes(iC)$det if (n<2) { z <- as.numeric(data-xi) val <- log(detC) + tcrossprod(z,crossprod(z,iC))[1] return(0.5*val) } if (!is.null(mu)){ W <- suppressMessages(crossprod(rbind(mu-xi))) T <- S+W } else { T <- S } res <- n/2*log(detC) + n/2*tr(T%*%iC) ## Objective function (Full Information ML) ## if (any(attr(iC,"lambda")<1e-16)) res <- res-1e2 return(res) } `gaussian_hessian.lvm` <- function(x,p,n,...) { dots <- list(...); dots$weights <- NULL do.call("information", c(list(x=x,p=p,n=n),dots)) } gaussian_gradient.lvm <- function(x,p,data,S,mu,n,...) { dots <- list(...); dots$weights <- NULL if (n>2) data <- NULL val <- -gaussian_score.lvm(x,p=p,S=S,mu=mu,n=n,data=data,reindex=FALSE,...) if (!is.null(nrow(val))) { val <- colSums(val) } val } gaussian_score.lvm <- function(x, data, p, S, n, mu=NULL, weights=NULL, debug=FALSE, reindex=FALSE, mean=TRUE, constrain=TRUE, indiv=FALSE,...) { if (!is.null(data)) { if ((nrow(data)<2 | !is.null(weights))| indiv) { mp <- modelVar(x,p,data=data[1,]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) MeanPar <- attributes(mp)$meanpar D <- with(attributes(mp), deriv.lvm(x, meanpar=MeanPar, p=pars, mom=mp, mu=NULL)) ##, all=length(constrain(x))>0)) myvars <- (index(x)$manifest) if (NCOL(data)!=length(myvars)) { data <- subset(data,select=myvars) } score <- matrix(ncol=length(p),nrow=NROW(data)) score0 <- -1/2*as.vector(iC)%*%D$dS if (!is.null(weights)) { W0 <- diag(nrow=length(myvars)) widx <- match(colnames(weights),myvars) } for (i in seq_len(NROW(data))) { z <- as.numeric(data[i,]) u <- z-as.numeric(mp$xi) if (!is.null(weights)) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- as.numeric(crossprod(u,iC%*%W)%*%D$dxi + -1/2*(as.vector((iC - iC %*% crossprod(rbind(u)) %*% iC)%*%W)) %*% D$dS ) } else { score[i,] <- as.numeric(score0 + crossprod(u,iC)%*%D$dxi + 1/2*as.vector(iC%*%crossprod(rbind(u))%*%iC)%*%D$dS) } }; colnames(score) <- names(p) return(score) } } ### Here the emperical mean and variance of the population are sufficient statistics: if (missing(S)) { data0 <- na.omit(data[,manifest(x),drop=FALSE]) n <- NROW(data0) S <- cov(data0)*(n-1)/n mu <- colMeans(data0) } mp <- modelVar(x,p) C <- mp$C xi <- mp$xi iC <- Inverse(C,det=FALSE, symmetric = TRUE) Debug("Sufficient stats.",debug) if (!is.null(mu) & !is.null(xi)) { W <- crossprod(rbind(mu-xi)) T <- S+W } else { T <- S } D <- deriv.lvm(x, meanpar=attributes(mp)$meanpar, mom=mp, p=p, mu=mu, mean=mean) vec.iC <- as.vector(iC) if (lava.options()$devel) { Grad <- numeric(length(p)) imean <- with(index(x)$parBelongsTo,mean) Grad[-imean] <- n/2*crossprod(D$dS[,-imean], as.vector(iC%*%T%*%iC)-vec.iC) } else { Grad <- n/2*crossprod(D$dS, as.vector(iC%*%T%*%iC)-vec.iC) } if (!is.null(mu) & !is.null(xi)) { if (!(lava.options()$devel)) { Grad <- Grad - (n/2*crossprod(D$dT,vec.iC)) } else { Grad[with(index(x)$parBelongsTo,c(mean,reg))] <- Grad[with(index(x)$parBelongsTo,c(mean,reg))] - (n/2*crossprod(D$dT,vec.iC)) } } res <- as.numeric(Grad) return(rbind(res)) } ###}}} gaussian ###{{{ gaussian variants ## Maximum Likelihood with numerical gradient + hessian gaussian0_objective.lvm <- gaussian_objective.lvm gaussian1_objective.lvm <- gaussian_objective.lvm gaussian1_gradient.lvm <- function(...) gaussian_gradient.lvm(...) gaussian1_hessian.lvm <- function(x,p,...) { myg2 <- function(p1) gaussian_gradient.lvm(x,p=p1,...) myg3 <- function(p1) numDeriv::jacobian(myg2,p1) return(myg3(p)) ## myg <- function(p1) gaussian_objective.lvm(x,p=p1,...) ## numDeriv::hessian(myg,p) } ## BHHH gaussian2_method.lvm <- "NR" gaussian2_objective.lvm <- gaussian_objective.lvm gaussian2_gradient.lvm <- gaussian_gradient.lvm gaussian2_hessian.lvm <- function(x,p,n,data,...) { S <- -score(x,p=p,n=n,data=data,indiv=TRUE,...) I <- t(S)%*%S attributes(I)$grad <- colSums(S) return(I) } ###}}} ###{{{ Weighted weighted_method.lvm <- "NR" weighted_gradient.lvm <- function(x,p,data,weights,indiv=FALSE,...) { myvars <- index(x)$manifest if (NCOL(data)!=length(myvars)) data <- subset(data,select=myvars) score <- matrix(ncol=length(p),nrow=NROW(data)) myy <- index(x)$endogenous myx <- index(x)$exogenous mynx <- setdiff(myvars,myx) W0 <- diag(nrow=length(myy)) widx <- match(colnames(weights),myy) pp <- modelPar(x,p) mp <- moments(x,p=p,conditional=TRUE,data=data[1,]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) v <- matrix(0,ncol=length(vars(x)),nrow=NROW(data)) colnames(v) <- vars(x) for (i in mynx) v[,i] <- mp$v[i] for (i in myx) v[,i] <- data[,i] xi <- t(mp$G%*%t(v)) u <- as.matrix(data)[,myy]-xi D <- deriv.lvm(x, meanpar=pp$meanpar, p=pp$p, mom=mp, mu=NULL) if (NROW(data)==1) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- as.numeric(crossprod(u,iC%*%W)%*%D$dxi + -1/2*(as.vector((iC - iC %*% crossprod(rbind(u)) %*% iC)%*%W)) %*% D$dS) return(-score) } Gdv <- mp$G%*%D$dv for (i in seq_len(NROW(data))) { W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) dxi <- (t(as.numeric(v[i,]))%x%diag(nrow=length(myy)))%*%D$dG + Gdv score[i,] <- -0.5*as.vector(iC%*%W)%*%D$dS + as.numeric(crossprod(u[i,],iC%*%W)%*%dxi + 1/2*as.vector(iC%*%crossprod(rbind(u[i,]))%*%iC%*%W)%*%D$dS) ## score[i,] <- -0.5*as.vector(iC)%*%D$dS + ## as.numeric(crossprod(u[i,],iC)%*%dxi + ## 1/2*as.vector(iC%*%tcrossprod(u[i,])%*%iC)%*%D$dS) } if (indiv) return(-score) colSums(-score) } weighted_hessian.lvm <- function(...) { S <- weighted_gradient.lvm(...,indiv=TRUE) res <- crossprod(S) attributes(res)$grad <- colSums(-S) res } weighted0_method.lvm <- "estfun" weighted0_gradient.lvm <- function(...) { val <- -gaussian_score.lvm(...) colSums(val) } weighted0_hessian.lvm <- NULL weighted2_method.lvm <- "estfun" weighted2_gradient.lvm <- function(x,p,data,weights,indiv=FALSE,...) { myvars <- index(x)$manifest if (NCOL(data)!=length(myvars)) data <- subset(data,select=myvars) score <- matrix(ncol=length(p),nrow=NROW(data)) myy <- index(x)$endogenous myx <- index(x)$exogenous mynx <- setdiff(myvars,myx) W0 <- diag(nrow=length(myy)) widx <- match(colnames(weights),myy) pp <- modelPar(x,p) for (i in seq_len(NROW(data))) { z <- as.matrix(data[i,myy]) mp <- moments(x,p=p,conditional=TRUE,data=data[i,]) u <- as.numeric(z-mp$xi[,1]) iC <- Inverse(mp$C,det=FALSE, symmetric = TRUE) D <- deriv.lvm(x, meanpar=pp$meanpar, p=pp$p, mom=mp, mu=NULL) W <- W0; diag(W)[widx] <- as.numeric(weights[i,]) score[i,] <- -0.5*as.vector(iC%*%W)%*%D$dS + as.numeric(crossprod(u,iC%*%W)%*%D$dxi + 1/2*as.vector(iC%*%crossprod(rbind(u))%*%iC%*%W)%*%D$dS) } if (indiv) return(-score) colSums(-score) } weighted2_hessian.lvm <- NULL ###}}} Weighted ###{{{ Simple `Simple_hessian.lvm` <- function(p,...) { matrix(NA, ncol=length(p), nrow=length(p)) } Simple_gradient.lvm <- function(x,p,...) { naiveGrad(function(pp) Simple_objective.lvm(x,pp,...), p) } `Simple_objective.lvm` <- function(x, p=p, S=S, n=n, ...) { m. <- moments(x,p) C <- m.$C npar <- m.$npar detC <- det(C) iC <- Inverse(C, symmetric = TRUE) if (detC<0 | inherits(iC, "try-error")) return(.Machine$double.xmax) res <- n/2*(log(detC) + tr(S%*%iC) - log(det(S)) - npar) res } ###}}} ObjectiveSimple lava/R/model.R0000644000176200001440000000245213520655354012634 0ustar liggesusers##' Extract model ##' ##' Extract or replace model object ##' ##' ##' @aliases Model Model<- ##' @usage ##' ##' Model(x, ...) ##' ##' Model(x, ...) <- value ##' ##' @param x Fitted model ##' @param value New model object (e.g. \code{lvm} or \code{multigroup}) ##' @param \dots Additional arguments to be passed to the low level functions ##' @return Returns a model object (e.g. \code{lvm} or \code{multigroup}) ##' @author Klaus K. Holst ##' @seealso \code{\link{Graph}} ##' @keywords models ##' @examples ##' ##' m <- lvm(y~x) ##' e <- estimate(m, sim(m,100)) ##' Model(e) ##' ##' @export `Model` <- function(x,...) UseMethod("Model") ##' @export `Model.default` <- function(x,...) x ##' @export `Model.lvm` <- function(x,...) x ##' @export `Model.lvmfit` <- function(x,...) x$model ##' @export `Model.multigroup` <- function(x,...) x$lvm ##' @export `Model.multigroupfit` <- function(x,...) x$model ##' @export "Model<-" <- function(x,...,value) UseMethod("Model<-") ##' @export "Model<-.lvm" <- function(x,...,value) { x <- value; return(x) } ##' @export "Model<-.lvmfit" <- function(x,...,value) { x$model <- value; return(x) } ##' @export "Model<-.multigroup" <- function(x,...,value) { x$lvm <- value; return(x) } ##' @export "Model<-.multigroupfit" <- function(x,...,value) { x$model <- value; return(x) } lava/R/ordreg.R0000644000176200001440000001223113520655354013012 0ustar liggesusersordreg_threshold <- function(theta) { v <- theta[1] if (length(theta)>1) v <- cumsum(c(v,exp(theta[seq(length(theta)-1L)+1L]))) return(v) } ordreg_ithreshold <- function(v) { theta <- v[1] if (length(v)>1) theta <- c(theta,log(-rev(diff(rev(v))))) return(theta) } ordreg_dthreshold <- function(theta) { K <- length(theta)+1 Da <- matrix(0,K,K-1) Da[seq(K-1),1L] <- 1L for (i in seq_len(K-2)+1) Da[seq(i,K-1),i] <- exp(theta[i]) Da } ##' Ordinal regression models ##' ##' @title Univariate cumulative link regression models ##' @param formula formula ##' @param data data.frame ##' @param offset offset ##' @param family family (default proportional odds) ##' @param start optional starting values ##' @param fast If TRUE standard errors etc. will not be calculated ##' @param ... Additional arguments to lower level functions ##' @export ##' @author Klaus K. Holst ##' @examples ##' m <- lvm(y~x) ##' ordinal(m,K=3) <- ~y ##' d <- sim(m,100) ##' e <- ordreg(y~x,d) ordreg <- function(formula,data=parent.frame(),offset,family=stats::binomial("probit"),start,fast=FALSE,...) { y <- ordered(model.frame(update(formula,.~0),data)[,1]) lev <- levels(y) X <- model.matrix(update(formula,.~.+1),data=data)[,-1,drop=FALSE] up <- new.env() assign("h",family$linkinv,envir=up) assign("dh",family$mu.eta,envir=up) assign("y",as.numeric(y),envir=up) assign("X",X,envir=up) assign("K",nlevels(y),envir=up) assign("n",length(y),envir=up) assign("p",NCOL(X),envir=up) assign("threshold", function(theta,K) ordreg_threshold(theta[seq(K-1)]), envir=up) assign("dthreshold",function(theta,K) ordreg_dthreshold(theta[seq(K-1)]), envir=up) ff <- function(theta) -ordreg_logL(theta,up) gg <- function(theta) -ordreg_score(theta,up) if (missing(start)) start <- with(up,c(rep(-1,up$K-1),rep(0,p))) op <- nlminb(start,ff,gg) cc <- op$par; if (fast) return(structure(cc,threshold=up$threshold(cc,up$K))) ##,up$K))) nn <- c(paste(lev[-length(lev)], lev[-1L], sep = "|"), colnames(X)) I <- -ordreg_hessian(cc,up) names(cc) <- nn dimnames(I) <- list(nn,nn) res <- list(vcov=solve(I),coef=cc,call=match.call(),up=up,opt=op) structure(res,class="ordreg") } ##' @export print.ordreg <- function(x,...) { cat("Call:\n"); print(x$call) cat("\nParameter Estimates:\n") print(x$coef) } ##' @export summary.ordreg <- function(object,alpha=0.95,...) { res <- cbind(coef(object),diag(vcov(object))^.5) pp <- 1-(1-alpha)/2 qq <- qnorm(pp) res <- cbind(res,res[,1]-res[,2]*qq,res[,1]+res[,2]*qq,2*(1-pnorm(abs(res[,1])/res[,2]))) colnames(res) <- c("Estimate","Std.Err",paste0(round(c(1-pp,pp)*1000)/10,"%"),"P-value") res <- list(coef=res,logLik=logLik(object),AIC=AIC(object)) class(res) <- "summary.ordreg" return(res) } ##' @export print.summary.ordreg <- function(x,alpha=0.95,...) { cat("AIC: ", x$AIC, "\n\n") print(x$coef) cat("\n") } ##' @export score.ordreg <- function(x,p=coef(x),indiv=FALSE,...) { ordreg_score(p,x$up) if (!indiv) return(colSums(x$up$score)) x$up$score } ##' @export logLik.ordreg <- function(object,p=coef(object),indiv=FALSE,...) { ordreg_logL(p,object$up) res <- log(object$up$pr) if (!indiv) res <- sum(res) structure(res,nall=length(object$up$pr),nobs=object$up$pr,df=length(p),class="logLik") } ##' @export coef.ordreg <- function(object,...) object$coef ##' @export vcov.ordreg <- function(object,...) object$vcov ordreg_logL <- function(theta,env,indiv=FALSE,...) { if (length(theta)!=with(env,p+K-1)) stop("Wrong dimension") env$theta <- theta if (env$p>0) beta <- with(env,theta[seq(p)+K-1]) alpha <- with(env, threshold(theta,K)) env$alpha <- alpha env$beta <- beta if (env$p>0) eta <- env$X%*%beta else eta <- cbind(rep(0,env$n)) env$lp <- kronecker(-eta,rbind(alpha),"+") F <- with(env,h(lp)) Pr <- cbind(F,1)-cbind(0,F) pr <- Pr[with(env,cbind(seq(n),as.numeric(y)))] env$pr <- pr sum(log(pr)) } ordreg_score <- function(theta,env,...) { if (!identical(theta,env$theta)) ordreg_logL(theta,env) Da <- with(env,dthreshold(theta,K)) dF <- with(env, cbind(dh(lp),0)) idx1 <- with(env,which(as.numeric(y)==1)) S1 <- cbind(Da[as.numeric(env$y),,drop=FALSE],-env$X) S1 <- dF[with(env,cbind(seq(n),as.numeric(y)))]*S1 y2 <- env$y-1; y2[idx1] <- env$K S2 <- cbind(Da[y2,,drop=FALSE],-env$X) S2 <- dF[cbind(seq(env$n),y2)]*S2 env$score <- 1/env$pr*(S1-S2) colSums(env$score) } ordreg_hessian <- function(theta,env,...) { numDeriv::jacobian(function(p) ordreg_score(p,env,...),theta,...) } ##' @export predict.ordreg <- function(object,p=coef(object),type=c("prob","cumulative"),...) { env <- object$up env$theta <- p if (env$p>0) beta <- with(env,theta[seq(p)+K-1]) alpha <- with(env, threshold(theta,K)) env$alpha <- alpha env$beta <- beta if (env$p>0) eta <- env$X%*%beta else eta <- cbind(rep(0,env$n)) env$lp <- kronecker(-eta,rbind(alpha),"+") F <- with(env,h(lp)) if (tolower(type)[1]=="cumulative") return(F) Pr <- cbind(F,1)-cbind(0,F) return(Pr) } lava/R/score.R0000644000176200001440000001156713520655354012656 0ustar liggesusers##' @export `score` <- function(x,...) UseMethod("score") ###{{{ score.lvm ##' @export score.lvm <- function(x, data, p, model="gaussian", S, n, mu=NULL, weights=NULL, data2=NULL, debug=FALSE, reindex=FALSE, mean=TRUE, constrain=TRUE, indiv=TRUE,...) { cl <- match.call() lname <- paste0(model,"_score.lvm") if (!exists(lname)) { lname <- paste0(model,"_gradient.lvm") mygrad <- get(lname) scoreFun <- function(...) -mygrad(...) if (is.null(mygrad)) { stop("Missing gradient") } } else { scoreFun <- get(lname) } if (missing(data) || is.null(data)) { cl[[1]] <- scoreFun score <- eval.parent(cl) return(rbind(score)) } if (is.null(index(x)$dA) | reindex) x <- updatelvm(x,zeroones=TRUE,deriv=TRUE) xfix <- colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))] xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),index(x)$manifest) Debug(xfix,debug) if (missing(n)) { n <- nrow(data) } if (length(xfix)>0 | length(xconstrain)>0) { ##### Random slopes! x0 <- x if (length(xfix)>0) { Debug("random slopes...",debug) nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) { regfix(x0, from=vars(x0)[myfix$row[[i]][j]],to=vars(x0)[myfix$col[[i]][j]]) <- data[1,myfix$var[[i]]] } index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } pp <- modelPar(x0,p) ##p0 <- with(pp, c(meanpar,p,p2)) myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } return(scoreFun(x0,data=data[ii,], p=with(pp,c(meanpar,p,p2)),weights=weights[ii,,drop=FALSE],data2=data2[ii,,drop=FALSE],model=model,debug=debug,indiv=indiv,...)) } score <- t(sapply(seq_len(nrow(data)),myfun)) if (!indiv) { score <- colSums(rbind(score)) } if (length(score)0) myorder[[x$allmis]] <- NULL for (i in seq_along(S)) S0[myorder[[i]],] <- S[[i]] if (length(x$allmis)>0) { S0 <- S0[-x$orderlist[[x$allmis]],] } S0[is.na(S0)] <- 0 colnames(S0) <- names(coef(x)) return(S0) } return(S) } ###}}} score.lvm.missing ###{{{ score.multigroupfit ##' @export score.multigroupfit <- function(x,p=pars(x), weights=Weights(x), estimator=x$estimator, ...) { score(x$model0, p=p, weights=weights, model=estimator,...) } ###}}} score.multigroupfit ###{{{ score.multigroup ##' @export score.multigroup <- function(x,data=x$data,weights=NULL,data2=NULL,p,indiv=combine,combine=FALSE,...) { rm <- procrandomslope(x) pp <- with(rm, modelPar(model,p)$p) parord <- modelPar(rm$model,seq_len(with(rm$model,npar+npar.mean)))$p S <- list() for (i in seq_len(x$ngroup)) { S0 <- rbind(score(x$lvm[[i]],p=pp[[i]],data=data[[i]],weights=weights[[i]],data2=data2[[i]],indiv=indiv,...)) S1 <- matrix(ncol=length(p),nrow=nrow(S0)) S1[,parord[[i]]] <- S0 S <- c(S, list(S1)) } if (combine) { S <- Reduce("rbind",S); S[is.na(S)] <- 0 if (!indiv) S <- colSums(S) return(S) } if (indiv) return(S) res <- matrix(0,nrow=1,ncol=length(p)) for (i in seq_len(x$ngroup)) res[,parord[[i]]] <- res[,parord[[i]]] + S[[i]][,parord[[i]]] return(as.vector(res)) } ###}}} score.multigroup ###{{{ score.lvmfit ##' @export score.lvmfit <- function(x, data=model.frame(x), p=pars(x), model=x$estimator, weights=Weights(x), data2=x$data$data2, ...) { score(x$model0,data=data,p=p,model=model,weights=weights,data2=data2,...) } ###}}} score.lvmfit lava/R/modelsearch.R0000644000176200001440000002536013520655354014025 0ustar liggesusers##' Model searching ##' ##' Performs Wald or score tests ##' ##' ##' @aliases modelsearch ##' @param x \code{lvmfit}-object ##' @param k Number of parameters to test simultaneously. For \code{equivalence} ##' the number of additional associations to be added instead of \code{rel}. ##' @param dir Direction to do model search. "forward" := add ##' associations/arrows to model/graph (score tests), "backward" := remove ##' associations/arrows from model/graph (wald test) ##' @param type If equal to 'correlation' only consider score tests for covariance parameters. If equal to 'regression' go through direct effects only (default 'all' is to do both) ##' @param ... Additional arguments to be passed to the low level functions ##' @return Matrix of test-statistics and p-values ##' @author Klaus K. Holst ##' @seealso \code{\link{compare}}, \code{\link{equivalence}} ##' @keywords htest ##' @examples ##' ##' m <- lvm(); ##' regression(m) <- c(y1,y2,y3) ~ eta; latent(m) <- ~eta ##' regression(m) <- eta ~ x ##' m0 <- m; regression(m0) <- y2 ~ x ##' dd <- sim(m0,100)[,manifest(m0)] ##' e <- estimate(m,dd); ##' modelsearch(e,messages=0) ##' modelsearch(e,messages=0,type="cor") ##' @export modelsearch <- function(x,k=1,dir="forward",type='all',...) { if (dir=="forward") { res <- forwardsearch(x,k,type=type,...) return(res) } if (dir=="backstep") { res <- backwardeliminate(x,...) return(res) } res <- backwardsearch(x,k,...) return(res) } backwardeliminate <- function(x, keep=NULL, pthres=0.05, AIC=FALSE, messages=0, missing=FALSE, intercepts=FALSE, maxsteps=Inf, information="E", data, ...) { if (inherits(x,"lvm")) { M <- x } else { M <- Model(x) } if(missing(data)) data <- model.frame(x) dots <- list(...) if (is.null(dots$control$start)) { p0 <- estimate(M,data,quick=TRUE,messages=messages,missing=FALSE,...) dots$control <- c(dots$control, list(start=p0,information="E")) } ff <- function() { ii <- grep("m",names(coef(M))) vv <- variances(M,mean=TRUE) args <- c(list(x=M,data=data,missing=missing,quick=TRUE,messages=messages),dots) cc <- do.call("estimate",args) if (is.numeric(cc)) { I0 <- information(M,p=cc,data=data,type=information)[-c(ii,vv),-c(ii,vv)] cc0 <- cc[-c(ii,vv)] res <- (pnorm(abs(cc0/sqrt(diag(solve(I0)))),lower.tail=FALSE))*2 attributes(res)$coef <- cc } else { coefs <- coef(cc) res <- (pnorm(abs(coefs/sqrt(diag(vcov(cc)))),lower.tail=FALSE))*2 res <- res[-c(ii,vv)] attributes(res)$coef <- coefs } return(res) } done <- FALSE; i <- 0; while (!done & iw)", "Index"); rownames(PM) <- rep("",nrow(PM)) res <- list(res=PM,test=res$test) class(res) <- "modelsearch" res } forwardsearch <- function(x,k=1,messages=lava.options()$messages,type='all',exclude.var=NULL,...) { if (!inherits(x,"lvmfit")) stop("Expected an object of class 'lvmfit'.") p <- pars(x,reorder=TRUE) cur <- Model(x) Y <- endogenous(x) X <- exogenous(x) V <- vars(x) q <- length(Y); qx <- length(X) npar.sat <- q+q*(q-1)/2 + q*qx npar.cur <- index(cur)$npar nfree <- npar.sat-npar.cur if (nfree0) { message("Calculating score test for ",ncol(restrictedcomb), " models:") count <- 0 pb <- txtProgressBar(style=lava.options()$progressbarstyle,width=40) } for (i in seq_len(ncol(restrictedcomb))) { if (messages>0) { count <- count+1 setTxtProgressBar(pb, count/ncol(restrictedcomb)) } varlist <- c() altmodel <- cur ## HA: altmodel, H0: cur for (j in seq_len(k)) { myvar <- restricted[restrictedcomb[j,i],] if (any(wx <- V[myvar]%in%X)) { altmodel <- regression(altmodel,V[myvar][which(!wx)],V[myvar][which(wx)]) } else { if (directional[i]) { covariance(altmodel,pairwise=TRUE) <- V[myvar] } covariance(altmodel,pairwise=TRUE) <- V[myvar] } varlist <- rbind(varlist, V[myvar]) } altmodel$parpos <- NULL altmodel <- updatelvm(altmodel,deriv=TRUE,zeroones=TRUE,mean=TRUE) cc <- coef(altmodel, mean=TRUE,messages=0,symbol=lava.options()$symbol) cc0 <- coef(cur, mean=TRUE,messages=0,symbol=lava.options()$symbol) p1 <- numeric(length(p)+k) ## Need to be sure we place 0 at the correct position for (ic in seq_along(cc)) { idx <- match(cc[ic],cc0) if (!is.na(idx)) p1[ic] <- p[idx] } if (x$estimator=="gaussian" && !inherits(x,"lvm.missing")) { Sc2 <- score(altmodel,p=p1,data=NULL, model=x$estimator,weights=Weights(x),S=S,mu=mu,n=n) } else { Sc2 <- score(altmodel,p=p1,data=model.frame(x), model=x$estimator,weights=Weights(x)) } I <- information(altmodel,p=p1,n=n,data=model.frame(x),weights=Weights(x),estimator=x$estimator) ##[-rmidx,-rmidx] iI <- try(Inverse(I), silent=TRUE) Q <- ifelse (inherits(iI, "try-error"), NA, ## Score test (Sc2)%*%iI%*%t(Sc2) ) Tests <- c(Tests, Q) Vars <- c(Vars, list(varlist)) } if (messages>0) close(pb) ord <- order(Tests); Tests <- cbind(Tests, pchisq(Tests,k,lower.tail=FALSE)); colnames(Tests) <- c("Test Statistic", "P-value") PM <- c() for (i in seq_len(nrow(Tests))) { if (!is.na(Tests[i,1])) { vv <- apply(Vars[[i]],1,function(x) paste(x,collapse=lava.options()$symbol[2-directional[i]])) newrow <- c(formatC(Tests[i,1]), formatC(Tests[i,2]), paste(vv,collapse=",")) PM <- rbind(PM, newrow) } } if (is.null(PM)) { message("Saturated model") return(invisible(NULL)) } Tests <- Tests[ord,,drop=FALSE] Vars <- Vars[ord] PM <- PM[ord,,drop=FALSE] colnames(PM) <- c("Score: S", "P(S>s)", "Index"); rownames(PM) <- rep("",nrow(PM)) res <- list(res=PM, test=Tests, var=Vars, directional=directional) class(res) <- "modelsearch" return(res) } ##' @export print.modelsearch <- function(x,tail=nrow(x$res),adj=c("holm","BH"),...) { N <- nrow(x$res) if (!is.null(adj)) { ## adjp <- rev(holm(as.numeric(x$test[,2]))) adjp <- rbind(sapply(adj,function(i) p.adjust(x$test[,2],method=i))) colnames(adjp) <- adj x$res <- cbind(x$res,rbind(formatC(adjp))) } print(x$res[seq(N-tail+1,N),], quote=FALSE, ...) invisible(x) } lava/R/fix.R0000644000176200001440000004143213520655354012323 0ustar liggesusers###{{{ print.fix ##' @export print.fix <- function(x,exo=FALSE,...) { switch(attributes(x)$type, reg = cat("Regression parameters:\n"), cov = cat("Covariance parameters:\n"), mean = cat("Intercept parameters:\n")) M <- linconstrain(x,print=TRUE) invisible(x) } linconstrain <- function(x,print=TRUE,indent=" ",exo=FALSE,...) { idx <- seq_len(attributes(x)$nvar) idx0 <- setdiff(idx,attributes(x)$exo.idx) if (!exo & attributes(x)$type!="reg") idx <- idx0 if (attributes(x)$type=="mean") { if (length(idx)>0){ M <- rbind(unlist(x[idx])) rownames(M) <- "" M[is.na(M)] <- "*" } else { M <- NULL } } else { if (length(x$rel)==0) { M <- NULL } else { M <- x$rel[idx,idx,drop=FALSE] M[M==0] <- NA M[M==1] <- "*" M[which(!is.na(x$labels[idx,idx]))] <- x$labels[idx,idx][which(!is.na(x$labels[idx,idx]))] M[which(!is.na(x$values[idx,idx]))] <- x$values[idx,idx][which(!is.na(x$values[idx,idx]))] if (attributes(x)$type=="reg") M <- t(M[,idx0,drop=FALSE]) } } if (print) { M0 <- M if (NROW(M)>0) rownames(M0) <- paste(indent,rownames(M)) print(M0,quote=FALSE,na.print="",...) } invisible(M) } ###}}} print.fix ###{{{ intfix ##' @export "intfix" <- function(object,...) UseMethod("intfix") ##' @export "intfix<-" <- function(object,...,value) UseMethod("intfix<-") ##' Fix mean parameters in 'lvm'-object ##' ##' Define linear constraints on intercept parameters in a \code{lvm}-object. ##' ##' ##' The \code{intercept} function is used to specify linear constraints on the ##' intercept parameters of a latent variable model. As an example we look at ##' the multivariate regression model ##' ##' \deqn{ E(Y_1|X) = \alpha_1 + \beta_1 X} \deqn{ E(Y_2|X) = \alpha_2 + \beta_2 ##' X} ##' ##' defined by the call ##' ##' \code{m <- lvm(c(y1,y2) ~ x)} ##' ##' To fix \eqn{\alpha_1=\alpha_2} we call ##' ##' \code{intercept(m) <- c(y1,y2) ~ f(mu)} ##' ##' Fixed parameters can be reset by fixing them to \code{NA}. For instance to ##' free the parameter restriction of \eqn{Y_1} and at the same time fixing ##' \eqn{\alpha_2=2}, we call ##' ##' \code{intercept(m, ~y1+y2) <- list(NA,2)} ##' ##' Calling \code{intercept} with no additional arguments will return the ##' current intercept restrictions of the \code{lvm}-object. ##' ##' @aliases intercept intercept<- intercept.lvm intercept<-.lvm intfix intfix ##' intfix<- intfix.lvm intfix<-.lvm ##' @param object \code{lvm}-object ##' @param vars character vector of variable names ##' @param value Vector (or list) of parameter values or labels (numeric or ##' character) or a formula defining the linear constraints (see also the ##' \code{regression} or \code{covariance} methods). ##' @param \dots Additional arguments ##' @usage ##' \method{intercept}{lvm}(object, vars, ...) <- value ##' @return ##' ##' A \code{lvm}-object ##' @note ##' ##' Variables will be added to the model if not already present. ##' @author Klaus K. Holst ##' @seealso \code{\link{covariance<-}}, \code{\link{regression<-}}, ##' \code{\link{constrain<-}}, \code{\link{parameter<-}}, ##' \code{\link{latent<-}}, \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @export ##' @examples ##' ##' ##' ## A multivariate model ##' m <- lvm(c(y1,y2) ~ f(x1,beta)+x2) ##' regression(m) <- y3 ~ f(x1,beta) ##' intercept(m) <- y1 ~ f(mu) ##' intercept(m, ~y2+y3) <- list(2,"mu") ##' intercept(m) ## Examine intercepts of model (NA translates to free/unique paramete##r) ##' ##' "intercept" <- function(object,...) UseMethod("intercept") ##' @export ##' @export intercept.lvm <- intfix.lvm <- function(object,value,...) { if (!missing(value)) { intercept(object,...) <- value return(object) } res <- object$mean; attr(res,"type") <- "mean" attr(res,"exo.idx") <- index(object)$exo.idx attr(res,"nvar") <- length(res) class(res) <- "fix" return(res) } ##' @export "intercept<-" <- function(object,...,value) UseMethod("intercept<-") ##' @export ##' @export "intercept<-.lvm" <- "intfix<-.lvm" <- function(object, vars,...,value) { if (!missing(vars) && inherits(value,"formula")) value <- all.vars(value) if (inherits(value,"formula")) { lhs <- getoutcome(value) yy <- decomp.specials(lhs) if ((inherits(value[[3]],"logical") && is.na(value[[3]]))) { intfix(object,yy) <- NA return(object) } tt <- terms(value) xf <- attributes(terms(tt))$term.labels res <- lapply(xf,decomp.specials)[[1]] myvalue <- char2num(as.list(res)) myvalue <- lapply(myvalue, function(x) ifelse(x=="NA",NA,x)) intfix(object,yy) <- myvalue object$parpos <- NULL return(object) } if (inherits(vars,"formula")) { vars <- all.vars(vars) } object$mean[vars] <- value newindex <- reindex(object) object$parpos <- NULL index(object)[names(newindex)] <- newindex return(object) } ###}}} intfix ###{{{ covfix ##' @export "covfix" <- function(object,...) UseMethod("covfix") ##' @export covfix.lvm <- function(object,...) { res <- list(rel=object$cov, labels=object$covpar, values=object$covfix); attr(res,"type") <- "cov" attr(res,"exo.idx") <- index(object)$exo.idx attr(res,"nvar") <- NROW(res$rel) class(res) <- "fix" return(res) } ##' @export "covfix<-" <- function(object,...,value) UseMethod("covfix<-") ##' @export "covfix<-.lvm" <- function(object, var1, var2=var1, pairwise=FALSE, exo=FALSE, ..., value) { if (inherits(var1,"formula")) { var1 <- all.vars(var1) } if (inherits(var2,"formula")) { var2 <- all.vars(var2) } object <- addvar(object,c(var1,var2),reindex=FALSE,...) allvars <- c(var1,var2) xorg <- exogenous(object) exoset <- setdiff(xorg,allvars) if (!exo & length(exoset)1) { cond <- all.vars(xx[[2]]) } } if (inherits(cond,"formula")) { cond <- all.vars(cond) } nod <- vars(object) x <- intersect(x,nod) cond <- intersect(cond,nod) V <- c(x,cond) ## Ancenstral graph keep <- c(V,ancestors(object,V)) del <- setdiff(nod,keep) if (length(del)>0) object <- rmvar(object,del) ## moralized graph man <- object for (v in V) { pa <- parents(object,v) if (length(pa)>1) man$M[pa,pa] <- 1 } man.sel <- rmvar(man,cond) ii <- match(x,vars(man.sel)) A <- with(man.sel, (t(M)+M)>0) dsep <- c() for (i in ii) { conn <- DFS(A,i) i0 <- setdiff(ii,i) dsep <- c(dsep,!any(i0%in%conn)) } res <- all(dsep) attr(man.sel,"dsep") <- res if (return.graph) return(man.sel) return(res) } lava/R/blockdiag.R0000644000176200001440000000146513520655354013456 0ustar liggesusers##' Combine matrices to block diagonal structure ##' @title Combine matrices to block diagonal structure ##' @param x Matrix ##' @param \dots Additional matrices ##' @param pad Vyalue outside block-diagonal ##' @author Klaus K. Holst ##' @export ##' @examples ##' A <- diag(3)+1 ##' blockdiag(A,A,A,pad=NA) blockdiag <- function(x,...,pad=0) { if (is.list(x)) xx <- x else xx <- list(x,...) rows <- unlist(lapply(xx,nrow)) crows <- c(0,cumsum(rows)) cols <- unlist(lapply(xx,ncol)) ccols <- c(0,cumsum(cols)) res <- matrix(pad,nrow=sum(rows),ncol=sum(cols)) for (i in seq_len(length(xx))) { idx1 <- seq_len(rows[i])+crows[i]; idx2 <- seq_len(cols[i])+ccols[i] res[idx1,idx2] <- xx[[i]] } colnames(res) <- unlist(lapply(xx,colnames)); rownames(res) <- unlist(lapply(xx,rownames)) return(res) } lava/R/Grep.R0000644000176200001440000000236613520655354012435 0ustar liggesusers##' Finds elements in vector or column-names in data.frame/matrix ##' ##' Pattern matching in a vector or column names of a data.frame or matrix. ##' @param x vector, matrix or data.frame. ##' @param pattern regular expression to search for ##' @param subset If TRUE returns subset of data.frame/matrix otherwise just the matching column names ##' @param ignore.case Default ignore case ##' @param ... Additional arguments to 'grep' ##' @return A data.frame with 2 columns with the indices in the first and the ##' matching names in the second. ##' @author Klaus K. Holst ##' @seealso \code{\link{grep}}, and \code{\link{agrep}} for approximate string ##' matching, ##' @keywords misc utilities ##' @examples ##' data(iris) ##' head(Grep(iris,"(len)|(sp)")) ##' @export `Grep` <- function(x, pattern, subset=TRUE, ignore.case = TRUE,...) { if (is.data.frame(x)) nn <- names(x) else if (is.matrix(x)) nn <- colnames(nn) else nn <- x ii <- grep(pattern,nn,ignore.case=ignore.case,...) if (subset) { if (is.matrix(x) || is.data.frame(x)) return(x[,ii,drop=FALSE]) else return(x[ii]) } res <- data.frame(index=ii,name=nn[ii]); res } lava/R/summary.R0000644000176200001440000001247013520655354013232 0ustar liggesusers###{{{ summary.lvm ##' @export `summary.lvm` <- function(object,...) { k <- length(vars(object)) ## cat("Latent Variable Model \n\twith: ", k, " variables.\n", sep=""); print(object,print.transform=FALSE,...) if (length(transform(object))>0) { cat("\nTransformations:\n") print(transform(object),quote=FALSE,...) } cat("\n") if (length(index(object))>0) cat("Number of free parameters: ", with(index(object),npar+npar.mean+npar.ex),"\n", sep="") if (k==0) return() ##cat("Npar=", index(object)$npar, "+", index(object)$npar.mean, "\n", sep="") cat("\n") print(regression(object),...) print(covariance(object),...) print(intercept(object),...) if (length(object$exfix)>0) { cat("Additional parameters:\n") val <- unlist(object$exfix) M <- rbind(val); colnames(M) <- names(val) rownames(M) <- " " print(M,quote=FALSE,...) } if (length(constrain(object))>0) { cat("Non-linear constraints:\n") print(constrain(object),quote=FALSE,...) } ## printmany(object$cov, printmany(object$covpar, object$covfix, name1="Labels:", name2="Fixed:", print=FALSE), name1="covariance:") cat("\n") } ###}}} summary.lvm ###{{{ summary.lvmfit ##' @export `summary.lvmfit` <- function(object,std="xy", type=9, labels=2, ...) { cc <- CoefMat(object,labels=labels,std=std,type=type,...) mycoef <- coef(object,type=9) nlincon <- attributes(mycoef)$nlincon nonexo <- setdiff(vars(object),index(Model(object))$exogenous) attributes(mycoef) <- attributes(mycoef)[1:2] mygof <- object$opt$summary.message if (is.null(mygof)) { mygof <- gof } if (class(object)[1]=="lvm.missing") { nn <- unlist(lapply(object$multigroup$data, nrow)) nc <- nn[object$cc] if (length(nc)==0) nc <- 0 ngroup <- object$multigroup$ngroup res <- list(object=object, coef=mycoef, coefmat=cc, nlincon=nlincon, gof=mygof(object), n=sum(nn), nc=nc, ngroup=ngroup, varmat=modelVar(object)$P[nonexo,nonexo], latent=latent(object), opt=object$opt, vcov=vcov(object), estimator=object$estimator, rsq=rsq(object)) } else { n <- nrow(model.frame(object)) if (is.null(n)) n <- model.frame(object)$n res <- list(coef=mycoef, coefmat=cc, nlincon=nlincon, gof=mygof(object), n=n, nc=n, latent=latent(object), opt=object$opt, vcov=vcov(object), estimator=object$estimator, rsq=rsq(object))##, varmat=modelVar(object)$P[nonexo,nonexo]) } class(res) <- "summary.lvmfit" res } ##' @export print.summary.lvmfit <- function(x,varmat=TRUE,...) { if (!is.null(x$control$method)) { l2D <- sum(x$opt$grad^2) rnkV <- qr(x$vcov)$rank if (l2D>1e-2) warning("Possible problems with convergence!") cat("||score||^2=",l2D,"\n",sep="") np <- nrow(x$vcov) if (rnkV1e-2) warning("Possible problems with convergence!") cat("||score||^2=",l2D,"\n") cat("Latent variables:", x$latent, "\n") print(x$object,...) ##print(x$coefmat,quote=FALSE,right=TRUE) printline() if (!is.null(attributes(x$coefmat)$nlincon)) { cat("Non-linear constraints:\n") print(attributes(x$coefmat)$nlincon) printline() } cat("Estimator:",x$estimator,"\n") printline() if (!is.null(x$gof)) { print(x$gof) printline() } invisible(x) } ###}}} summary.multigroupfit ###{{{ summary.multigroup ##' @export summary.multigroup <- function(object,...) { for (m in object$lvm) print(m,...) print(object) invisible(object) } ###}}} lava/R/plot.R0000644000176200001440000003363413520655354012520 0ustar liggesusers###{{{ plot.lvm ##' Plot path diagram ##' ##' Plot the path diagram of a SEM ##' ##' ##' @aliases plot.lvmfit ##' @param x Model object ##' @param diag Logical argument indicating whether to visualize ##' variance parameters (i.e. diagonal of variance matrix) ##' @param cor Logical argument indicating whether to visualize ##' correlation parameters ##' @param labels Logical argument indiciating whether to add labels ##' to plot (Unnamed parameters will be labeled p1,p2,...) ##' @param intercept Logical argument indiciating whether to add ##' intercept labels ##' @param addcolor Logical argument indiciating whether to add colors ##' to plot (overrides \code{nodecolor} calls) ##' @param plain if TRUE strip plot of colors and boxes ##' @param cex Fontsize of node labels ##' @param fontsize1 Fontsize of edge labels ##' @param noplot if TRUE then return \code{graphNEL} object only ##' @param graph Graph attributes (Rgraphviz) ##' @param attrs Attributes (Rgraphviz) ##' @param unexpr if TRUE remove expressions from labels ##' @param addstyle Logical argument indicating whether additional ##' style should automatically be added to the plot (e.g. dashed ##' lines to double-headed arrows) ##' @param plot.engine default 'Rgraphviz' if available, otherwise ##' visNetwork,igraph ##' @param init Reinitialize graph (for internal use) ##' @param layout Graph layout (see Rgraphviz or igraph manual) ##' @param edgecolor if TRUE plot style with colored edges ##' @param graph.proc Function that post-process the graph object ##' (default: subscripts are automatically added to labels of the ##' nodes) ##' @param ... Additional arguments to be passed to the low level ##' functions ##' @author Klaus K. Holst ##' @keywords hplot regression ##' @examples ##' ##' if (interactive()) { ##' m <- lvm(c(y1,y2) ~ eta) ##' regression(m) <- eta ~ z+x2 ##' regression(m) <- c(eta,z) ~ x1 ##' latent(m) <- ~eta ##' labels(m) <- c(y1=expression(y[scriptscriptstyle(1)]), ##' y2=expression(y[scriptscriptstyle(2)]), ##' x1=expression(x[scriptscriptstyle(1)]), ##' x2=expression(x[scriptscriptstyle(2)]), ##' eta=expression(eta)) ##' edgelabels(m, eta ~ z+x1+x2, cex=2, lwd=3, ##' col=c("orange","lightblue","lightblue")) <- expression(rho,phi,psi) ##' nodecolor(m, vars(m), border="white", labcol="darkblue") <- NA ##' nodecolor(m, ~y1+y2+z, labcol=c("white","white","black")) <- NA ##' plot(m,cex=1.5) ##' ##' d <- sim(m,100) ##' e <- estimate(m,d) ##' plot(e) ##' ##' m <- lvm(c(y1,y2) ~ eta) ##' regression(m) <- eta ~ z+x2 ##' regression(m) <- c(eta,z) ~ x1 ##' latent(m) <- ~eta ##' plot(lava:::beautify(m,edgecol=FALSE)) ##' } ##' @export ##' @method plot lvm `plot.lvm` <- function(x,diag=FALSE,cor=TRUE,labels=FALSE,intercept=FALSE,addcolor=TRUE,plain=FALSE,cex,fontsize1=10,noplot=FALSE,graph=list(rankdir="BT"), attrs=list(graph=graph), unexpr=FALSE, addstyle=TRUE,plot.engine=lava.options()$plot.engine,init=TRUE, layout=lava.options()$layout, edgecolor=lava.options()$edgecolor, graph.proc=lava.options()$graph.proc, ...) { if (is.null(vars(x))) { message("Nothing to plot: model has no variables.") return(NULL) } index(x) <- reindex(x) myhooks <- gethook("plot.post.hooks") for (f in myhooks) { x <- do.call(f, list(x=x,...)) } plot.engine <- tolower(plot.engine) if (plot.engine=="rgraphviz" && (!(requireNamespace("graph",quietly=TRUE)) || !(requireNamespace("Rgraphviz",quietly=TRUE)))) { plot.engine <- "visnetwork" } if (plot.engine=="visnetwork" && (!(requireNamespace("visNetwork",quietly=TRUE)))) { plot.engine <- "igraph" } if (plot.engine=="igraph") { if (!requireNamespace("igraph",quietly=TRUE)) { message("package 'Rgraphviz','igraph' or 'visNetwork' not available") return(NULL) } L <- igraph::layout.sugiyama(g <- igraph.lvm(x,...))$layout if (noplot) return(graph::updateGraph(g)) dots <- list(...) if (is.character(layout)) plot(g,layout=L,...) else plot(g,layout=layout,...) return(invisible(g)) } if (plot.engine=="visnetwork") { g <- vis.lvm(x,labels=labels,...) return(g) } if (init) { if (!is.null(graph.proc)) { x <- do.call(graph.proc, list(x,edgecol=edgecolor,...)) } g <- finalize(x,diag=diag,cor=cor,addcolor=addcolor,intercept=intercept,plain=plain,cex=cex,fontsize1=fontsize1,unexpr=unexpr,addstyle=addstyle) } else { g <- Graph(x) } if (labels) { AP <- matrices(x,paste0("p",seq_len(index(x)$npar))) mylab <- AP$P; mylab[AP$A!="0"] <- AP$A[AP$A!="0"] mylab[!is.na(x$par)] <- x$par[!is.na(x$par)] mylab[!is.na(x$covpar)] <- x$covpar[!is.na(x$covpar)] g <- edgelabels(g, lab=mylab) } if (lava.options()$debug) { plot(g) } else { ## graphRenderInfo(g)$recipEdges <- "distinct" .savedOpt <- options(warn=-1) ## Temporarily disable warnings as renderGraph comes with a stupid warning when labels are given as "expression" dots <- list(...) dots$attrs <- attrs dots$x <- g dots$recipEdges <- "distinct" if (attributes(g)$feedback) dots$recipEdges <- c("combine") if (is.null(dots$layoutType)) dots$layoutType <- layout[1] if (all(index(x)$A==0)) dots$layoutType <- "circo" g <- do.call(getFromNamespace("layoutGraph","Rgraphviz"), dots) ## Temporary work around: graph::nodeRenderInfo(g)$fill <- graph::nodeRenderInfo(dots$x)$fill graph::nodeRenderInfo(g)$col <- graph::nodeRenderInfo(dots$x)$col graph::edgeRenderInfo(g)$col <- graph::edgeRenderInfo(dots$x)$col if (noplot) return(g) res <- tryCatch(Rgraphviz::renderGraph(g),error=function(e) NULL) ## Redo nodes to avoid edges overlapping node borders ##par(new=TRUE) ##res <- tryCatch(Rgraphviz::renderGraph(g,drawEdges=NULL,new=FALSE),error=function(e) NULL) ## options(.savedOpt) } myhooks <- gethook("plot.hooks") for (f in myhooks) { do.call(f, list(x=x,...)) } invisible(g) } ###}}} plot.lvm ###{{{ vis.lvm vis.lvm <- function(m,randomSeed=1,width="100%",height="700px",labels=FALSE,cor=TRUE,...) { if (!requireNamespace("visNetwork",quietly=TRUE)) stop("'visNetwork' required") types <- rep("endogenous",length(vars(m))) types[index(m)$eta.idx] <- "latent" types[index(m)$exo.idx] <- "exogenous" col <- lava.options()$node.color colors <- rep(col[2],length(types)) colors[index(m)$eta.idx] <- col[3] colors[index(m)$exo.idx] <- col[1] trf <- transform(m) if (length(trf)>0) { colors[which(index(m)$vars%in%names(trf))] <- col[4] } shapes <- rep("box",length(types)) shapes[index(m)$eta.idx] <- "circle" nodes <- data.frame(id=seq_along(types), label=vars(m), color=colors, shape=shapes, shadow=TRUE, size=rep(1.0,length(types)), group=types) edges <- cbind(edgeList(m))#,shadow=TRUE) AP <- matrices(m,paste0("p",seq_len(index(m)$npar))) if (labels) { mylab <- AP$A; mylab[!is.na(m$par)] <- m$par[!is.na(m$par)] lab <- c() for (i in seq(nrow(edges))) { lab <- c(lab,t(mylab)[edges[i,1],edges[i,2]]) } edges <- cbind(edges,label=lab) } if (length(edges)>0) edges <- cbind(edges,dashes=FALSE,arrows="from") if (cor) { mylab <- AP$P mylab[!is.na(m$covpar)] <- m$covpar[!is.na(m$covpar)] coredges <- data.frame(from=numeric(),to=numeric(),label=character()) for (i in seq_len(nrow(mylab)-1)) { for (j in seq(i+1,nrow(mylab))) { if (mylab[i,j]!="0") { coredges <- rbind(coredges, data.frame(from=i,to=j,label=mylab[i,j])) } } } if (nrow(coredges)>0) { if (!labels) coredges <- coredges[,1:2,drop=FALSE] coredges <- cbind(coredges,dashes=TRUE,arrows="false") edges <- rbind(edges,coredges) } } if (length(edges)>0) edges$physics <- TRUE v <- visNetwork::visNetwork(nodes,edges,width=width,height=height,...) v <- visNetwork::visEdges(v, arrows=list(from=list(enabled=TRUE, scaleFactor = 0.5)), scaling = list(min = 2, max = 2)) v <- visNetwork::visLayout(v,randomSeed=randomSeed) v } ###}}} vis.lvm ###{{{ plot.lvmfit ##' @export `plot.lvmfit` <- function(x,diag=TRUE,cor=TRUE,type,noplot=FALSE,fontsize1=5,f,graph.proc=lava.options()$graph.proc,...) { if (!missing(f)) { return(plot.estimate(x,f=f,...)) } .savedOpt <- options(warn=-1) ## Temporarily disable warnings as renderGraph comes with a warning when labels are given as "expression" if (!requireNamespace("graph",quietly=TRUE)) { plot(Model(x),...) return(invisible(x)) } g <- Graph(x) newgraph <- FALSE if (is.null(g)) { newgraph <- TRUE if (!is.null(graph.proc)) { Model(x) <- beautify(Model(x),edgecol=FALSE,...) } Graph(x) <- finalize(Model(x), diag=TRUE, cor=FALSE, fontsize1=fontsize1, ...) } if(noplot) return(Graph(x)) if (newgraph) { if (missing(type)) type <- "est" x <- edgelabels(x, type=type, diag=diag, cor=cor, fontsize1=fontsize1, ...) } else { if (!missing(type)) { x <- edgelabels(x, type=type, diag=diag, cor=cor, fontsize1=fontsize1, ...) } } g <- Graph(x) var <- rownames(covariance(Model(x))$rel) if (!cor) { delta <- 1 for (r in seq_len(nrow(covariance(Model(x))$rel)-delta) ) { for (s in seq(r+delta,ncol(covariance(Model(x))$rel)) ) { if (covariance(Model(x))$rel[r,s]==1) { g <- graph::removeEdge(var[r],var[s], g) g <- graph::removeEdge(var[s],var[r], g) } } } } if (!diag) { for (r in seq_len(nrow(covariance(Model(x))$rel)) ) { if (graph::isAdjacent(g,var[r],var[r])) g <- graph::removeEdge(var[r],var[r],g) } } m <- Model(x); Graph(m) <- g g <- plot(m, diag=diag, cor=cor, fontsize1=fontsize1, init=FALSE, ...) options(.savedOpt) invisible(g) } ###}}} plot.lvmfit ###{{{ plot.multigroup ##' @export plot.multigroup <- function(x,diag=TRUE,labels=TRUE,...) { k <- x$ngroup for (i in seq_len(k)) plot(x$lvm[[i]],diag=diag,labels=labels, ...) } ##' @export plot.multigroupfit <- function(x,...) { plot(Model(x),...) } ###}}} ###{{{ igraph.lvm ##' @export igraph.lvm <- function(x,layout=igraph::layout.kamada.kawai,...) { requireNamespace("igraph",quietly=TRUE) oC <- covariance(x)$rel for (i in seq_len(nrow(oC)-1)) for (j in seq(i+1,nrow(oC))) { if (oC[i,j]!=0) { x <- regression(x,vars(x)[i],vars(x)[j]) x <- regression(x,vars(x)[j],vars(x)[i]) } } g <- igraph::graph.adjacency(x$M,mode="directed") igraph::V(g)$color <- "lightblue" igraph::V(g)$label <- vars(x) igraph::V(g)$shape <- "rectangle" for (i in match(latent(x),igraph::V(g)$name)) { igraph::V(g)$shape[i] <- "circle" igraph::V(g)$color[i] <- "green" } endo <- index(x)$endogenous for (i in match(endo,igraph::V(g)$name)) { igraph::V(g)$color[i] <- "orange" } igraph::E(g)$label <- as.list(rep("",length(igraph::E(g)))) oE <- edgelabels(x) for (i in seq_along(igraph::E(g))) { st <- as.character(oE[i]) if (length(st)>0) igraph::E(g)$label[[i]] <- st } g$layout <- layout(g) return(g) } ###}}} igraph.lvm beautify <- function(x,col=lava.options()$node.color,border=rep("black",3),labcol=rep("darkblue",3),edgecol=TRUE,...) { if (is.null(x$noderender$fill)) notcolored <- vars(x) else notcolored <- vars(x)[is.na(x$noderender$fill)] x0 <- intersect(notcolored,exogenous(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[1] x0 <- intersect(notcolored,endogenous(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[2] x0 <- intersect(notcolored,latent(x)) if (length(x0)>0) nodecolor(x, x0, border=border[1], labcol=labcol[1]) <- col[3] trimmed <- gsub("[[:digit:]]*$","",vars(x)) keep <- num <- c() for (i in seq_len(length(vars(x)))) { lb <- labels(x)[vars(x)[i]] if (is.null(try(eval(lb),silent=TRUE))) { keep <- c(keep,i) num <- c(num,gsub(trimmed[i],"",vars(x)[i])) } } if (length(keep)>0) { trimmed <- trimmed[keep] trim <- gsub(" ",",",trimmed) lab <- paste0('"',vars(x)[keep],'"',"=",paste0("expression(",trim,"[scriptscriptstyle(",num,")])"),collapse=",") labels(x) <- eval(parse(text=paste("c(",lab,")"))) } if (!edgecol) return(x) iex <- index(x)$exo.idx ien <- index(x)$endo.idx ila <- index(x)$eta.idx for (i in iex) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[1]) <- elab2 } } for (i in ien) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[2]) <- elab2 } } for (i in ila) { for (j in which(x$M[i,]==1)) { elab <- edgelabels(x,to=vars(x)[j],from=rev(vars(x)[i])) elab2 <- try(eval(elab),silent=TRUE) if (is.null(elab2)) elab2 <- "" if (is.null(try(eval(elab),silent=TRUE))) elab <- "" edgelabels(x, to=vars(x)[j], from=rev(vars(x)[i]), cex=2, lwd=3,col=col[3]) <- elab2 } } x } lava/R/missingMLE.R0000644000176200001440000002374013520655354013546 0ustar liggesusers###{{{ missingModel missingModel <- function(model,data,var=endogenous(model),fix=FALSE,type=2,keep=NULL,weights=NULL,data2=NULL,cluster=NULL,...) { if (!inherits(model,"lvm")) stop("Needs a lvm-object") if (type==3) { var <- manifest(model) } data.mis <- is.na(data[,var,drop=FALSE]) colnames(data.mis) <- var patterns <- unique(data.mis,MARGIN=1) mis.type <- apply(data.mis,1, function(x) which(apply(patterns,1,function(y) identical(x,y)))) pattern.allmis <- which(apply(patterns,1,all)) ## Remove entry with all missing models <- datasets <- weights <- data2 <- clusters <- c() mymodel <- baptize(model) pattern.compl <- 0 count <- 0 A <- index(model)$A topendo <- endogenous(model,top=TRUE) exo <- exogenous(model) exclude <- c() warned <- FALSE for (i in setdiff(seq_len(nrow(patterns)),pattern.allmis)) { exoremove <- c() count <- count+1 mypattern <- patterns[i,] m0 <- mymodel; if (any(mypattern)) { latent(m0) <- colnames(data.mis)[mypattern] if (type>1) { mytop <- intersect(topendo,colnames(data.mis)[mypattern]) if (!is.null(mytop)) { rmvar(m0) <- mytop for (xx in exo) { ## If exogenous variable only have effect on missing variables, ## then remove it from the model if (all(c(rownames(A)[A[xx,]==1])%in%mytop) && !(xx%in%m0$par) ##&& !(xx%in%names(index(m0))$parval) ) { exoremove <- c(exoremove,xx) rmvar(m0) <- xx } } } } } else pattern.compl <- count ## d0 <- data[mis.type==i,manifest(m0),drop=FALSE]; d0 <- data[which(mis.type==i),c(manifest(m0),keep),drop=FALSE]; if (!is.list(weights)) { w0.var <- intersect(manifest(m0),colnames(weights)) w0 <- weights[which(mis.type==i),w0.var,drop=FALSE]; } if (!is.list(data2)) { w02.var <- intersect(manifest(m0),colnames(data2)) w02 <- data2[which(mis.type==i),w02.var,drop=FALSE]; } clust0 <- cluster[which(mis.type==i)] ex0 <- exogenous(m0) <- setdiff(exo,exoremove) xmis <- which(apply(d0[,ex0,drop=FALSE],1,function(x) any(is.na(x)))) if (length(xmis)>0) { misx <- ex0[apply(d0[xmis,ex0,drop=FALSE],2,function(x) any(is.na(x)))] if (!warned) warning("Missing exogenous variables: ", paste(misx,collapse=","), ". Removing rows...") warned <- TRUE d0 <- d0[-xmis,,drop=FALSE] w0 <- w0[-xmis,,drop=FALSE] clust0 <- clust0[-xmis] w02 <- w02[-xmis,,drop=FALSE] } if (length(misx <- intersect(ex0,latent(m0)))>0) { warning("Missing exogenous variables:", paste(misx,collapse=","), "! Remove manually!.") } ## else { if( sum(unlist(index(m0)[c("npar","npar.mean")]))>0 ) { models <- c(models, list(m0)) datasets <- c(datasets, list(d0)) weights <- c(weights, list(w0)) if (!is.list(data2)) data2 <- c(data2, list(w02)) clusters <- c(clusters, list(clust0)) } else { exclude <- c(exclude,count) } } } rmset <- c() for (i in seq_len(length(datasets))) { if (nrow(datasets[[i]])==0) rmset <- c(rmset,i) } if (length(rmset)>0) { models[[rmset]] <- NULL datasets[[rmset]] <- NULL weights[[rmset]] <- NULL data2[[rmset]] <- NULL clusters[[rmset]] <- NULL patterns <- patterns[-rmset,,drop=FALSE] } Patterns <- patterns if (length(exclude)>0) Patterns <- Patterns[-exclude,] pattern.allcomp<- which(apply(Patterns,1,function(x) all(!x))) ## Complete cases res <- list(models=models, datasets=datasets, weights=weights, data2=data2, clusters=clusters, patterns=Patterns, pattern.compl=pattern.compl, pattern.allmis=pattern.allmis, pattern.allcomp=pattern.allcomp, mis.type=mis.type) return(res) } ###}}} ###{{{ estimate.MAR.lvm ##' @export estimate.MAR <- function(x,data,which=endogenous(x),fix,type=2,startcc=FALSE,control=list(),messages=lava.options()$messages,weights,data2,cluster,onlymodel=FALSE,estimator="gaussian",hessian=TRUE,keep=NULL,...) { cl <- match.call() Debug("estimate.MAR") redvar <- intersect(intersect(parlabels(x),latent(x)),colnames(data)) if (length(redvar)>0 & (messages>0)) warning(paste("Remove latent variable colnames from dataset",redvar)) xfix <- setdiff(colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))],latent(x)) if (missing(fix)) fix <- ifelse(length(xfix)>0,FALSE,TRUE) S <- diag(nrow=length(manifest(x))); mu <- rep(0,nrow(S)); K <- length(exogenous(x)) vnames <- index(x)$manifest names(mu) <- rownames(S) <- colnames(S) <- vnames if (K>0) { xx <- subset(Model(x),exogenous(x)) exogenous(xx) <- NULL covfix(xx, vars(xx)) <- NA xx <- covariance(xx,exogenous(x),exogenous(x)) datax <- data[,exogenous(x),drop=FALSE] exo.idx <- match(exogenous(x),manifest(x)) mu0 <- colMeans(datax,na.rm=TRUE) cov0 <- cov(datax,use="pairwise.complete.obs")*(nrow(datax)-1)/nrow(datax) cov0upper <- cov0[upper.tri(cov0,diag=TRUE)] exogenous(xx) <- NULL coefpos <- matrices(xx,seq_len(K*(K-1)/2+K))$P ii <- coefpos[upper.tri(coefpos,diag=TRUE)] start <- c(mu0, cov0upper[order(ii)]) S[exo.idx,exo.idx] <- cov0 mu[exo.idx] <- mu0 ## message("\n") } x0 <- x x <- fixsome(x, measurement.fix=fix, exo.fix=TRUE, S=S, mu=mu, n=1) if (messages>1) message("Identifying missing patterns...") val <- missingModel(x,data,var=which,type=type,keep=c(keep,xfix),weights=weights,data2=data2,cluster=cluster,...) if (messages>1) message("\n") if (nrow(val$patterns)==1) { res <- estimate(x, data=data, fix=fix, weights=weights, data2=data2, estimator=estimator, messages=messages, control=control, ...) return(res) } if (startcc & is.null(control$start)) { if (messages>1) message("Obtaining starting value...") start0 <- rep(1,sum(unlist(index(x)[c("npar","npar.mean")]))) mystart <- tryCatch( (estimate(x,data=na.omit(data),messages=0, weights=weights,data2=data2,estimator=estimator,quick=TRUE,... )), error=function(e) rep(1,sum(unlist(index(x)[c("npar","npar.mean")]))) ) control$start <- mystart if (messages>1) message("\n") } if (is.null(control$meanstructure)) control$meanstructure <- TRUE mg0 <- with(val, suppressWarnings(multigroup(models,datasets,fix=FALSE,exo.fix=FALSE,missing=FALSE))) if (!is.null(names(control$start))) { parorder1 <- attributes(parpos(mg0,p=names(control$start)))$name paridx <- match(parorder1,names(control$start)) ## newpos <- paridx[which(!is.na(paridx))] start0 <- control$start start0[which(!is.na(paridx))] <- control$start[na.omit(paridx)] names(start0)[which(!is.na(paridx))] <- names(control$start[na.omit(paridx)]) control$start <- start0 } if (onlymodel) return(list(mg=mg0,val=val,weights=val$weights,data2=val$data2,cluster=val$clusters)) if (all(unlist(lapply(val$weights,is.null)))) val$weights <- NULL if (all(unlist(lapply(val$data2,is.null)))) val$data2 <- NULL if (all(unlist(lapply(val$clusters,is.null)))) val$clusters <- NULL e.mis <- estimate(mg0,control=control,messages=messages, weights=val$weights,data2=val$data2, cluster=val$clusters,estimator=estimator,...) cc <- coef(e.mis,type=1) mynames <- c() if (e.mis$model$npar.mean>0) mynames <- c(mynames,paste0("m",seq_len(e.mis$model$npar.mean))) if (e.mis$model$npar>0) mynames <- c(mynames,paste0("p",seq_len(e.mis$model$npar))) rownames(cc) <- mynames mycc <- val$pattern.allcomp ## Position of complete-case model nmis <- with(val, as.numeric(table(mis.type)[pattern.allmis])) ## Number of completely missing observations if (length(nmis)>0 & length(mycc)>0) ## Any individuals with all missing? if (val$pattern.allmis0) { nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) for (i in seq_along(xfix)) regfix(x, from=vars(x)[rowpos[[i]]],to=vars(x)[colpos[[i]]]) <- rep(colMeans(data[,xfix[i],drop=FALSE],na.rm=TRUE),length(rowpos[[i]])) x <- updatelvm(x,zeroones=TRUE,deriv=TRUE) } ord <- c() ordlist <- list() for (i in seq_len(nrow(val$patterns))) { ordlist <- c(ordlist, list(which(val$mis.type==i))) ord <- c(ord, ordlist[[i]]) } res <- with(val, list(coef=cc, patterns=patterns, table=table(mis.type), mis.type=mis.type, order=ord, orderlist=ordlist, nmis=nmis, allmis=pattern.allmis, cc=mycc, ncc=as.numeric(table(mis.type)[pattern.allcomp]), multigroup=e.mis$model, estimate=e.mis, model=x, model0=x0, vcov=e.mis$vcov, opt=e.mis$opt, control=control, data=list(model.frame=data), estimator=estimator, call=cl )) class(res) <- c("lvm.missing","lvmfit") if (inherits(e.mis,"lvmfit.randomslope")) class(res) <- c(class(res),"lvmfit.randomslope") if (hessian & is.null(cluster)) { if (messages>1) message("Calculating asymptotic variance...\n") res$vcov <- solve(information(res$estimate,type="hessian")) cc[] <- coef(e.mis,type=1,vcov=res$vcov) res$coef <- cc } return(res) } ###}}} estimate.MAR.lvm lava/R/frobnorm.R0000644000176200001440000000007013520655354013352 0ustar liggesusersfrobnorm <- function(x,y=0,...) { sum((x-y)^2)^.5 } lava/R/parpos.R0000644000176200001440000000412513520655354013037 0ustar liggesusers ##' Generic method for finding indeces of model parameters ##' ##' @title Generic method for finding indeces of model parameters ##' @param x Model object ##' @param \dots Additional arguments ##' @author Klaus K. Holst ##' @export `parpos` <- function(x,...) UseMethod("parpos") ##' @export parpos.default <- function(x,p,...) { if (is.numeric(p)) return(p) na.omit(match(coef(x),p)) } ##' @export parpos.multigroup <- function(x,p,mean=TRUE,...) { if (missing(p)) { p <- unique(unlist(lapply(x$lvm, function(z) setdiff(parlabels(z),names(constrain(z))) ))) } if (!is.character(p)) p <- names(p) p0 <- rep(NA,with(x,npar+npar.mean)); names(p0) <- c(x$mean,x$par) for (i in seq_along(x$lvm)) { cur <- parpos(x$lvm[[i]],p=p) if (length(cur)>0) { p0[c(x$meanpos[[i]],x$parpos[[i]])[cur]] <- names(cur) M <- na.omit(match(names(cur),p)) if (length(M)>0) p <- p[-M] } if (length(p)==0) break; } p1 <- which(!is.na(match(x$name,p))) p0[p1] <- x$name[p1] return(structure(which(!is.na(p0)),name=p0)) ## return(p0) } ##' @export parpos.multigroupfit <- function(x,...) parpos.multigroup(x$model0,...) ##' @export parpos.lvm <- function(x,p,mean=TRUE,...) { if (!missing(p)) { if (!is.character(p)) p <- names(p) cc1 <- coef(Model(x),mean=mean,fix=FALSE) cc2 <- coef(Model(x),mean=mean,fix=FALSE,labels=TRUE) idx1 <- na.omit(match(p,cc1)) idx11 <- na.omit(match(p,cc2)) res <- (union(idx1,idx11)); if (length(res)!=length(p)) { names(res) <- cc1[res] } else { names(res) <- p } ## res <- idx1; res[!is.na(idx11)] <- idx11[!is.na(idx11)] ## names(res) <- p ord <- order(res) res <- sort(res) attributes(res)$ord <- ord return(res) } if (mean) nn <- with(index(x),matrices2(x,seq_len(npar+npar.mean+npar.ex))) ## Position of parameters else nn <- with(index(x),matrices(x,seq_len(npar),NULL,seq_len(npar.ex)+npar)) nn$A[index(x)$M0!=1] <- 0 nn$P[index(x)$P0!=1] <- 0 nn$v[index(x)$v0!=1] <- 0 nn$e[index(x)$e0!=1] <- 0 nn } ##' @export parpos.lvmfit <- parpos.lvm lava/R/graph2lvm.R0000644000176200001440000000060313520655354013432 0ustar liggesusers##' @export `graph2lvm` <- function(g, debug=FALSE, messages=0) { res <- lvm(graph::nodes(g), debug=debug, messages=messages) M <- t(as(g, Class="matrix")) for (i in seq_len(nrow(M))) { if (any(M[,i]==1)) { res <- regression(res, rownames(M)[M[,i]==1], rownames(M)[i], messages=messages) } } res } lava/R/path.R0000644000176200001440000002007613520655354012472 0ustar liggesusers##' Extract all possible paths from one variable to another connected component ##' in a latent variable model. In an estimated model the effect size is ##' decomposed into direct, indirect and total effects including approximate ##' standard errors. ##' ##' @title Extract pathways in model graph ##' @export ##' @aliases path effects path.lvm effects.lvmfit ##' totaleffects ##' @seealso \code{children}, \code{parents} ##' @return If \code{object} is of class \code{lvmfit} a list with the following ##' elements is returned \item{idx}{ A list where each element defines a ##' possible pathway via a integer vector indicating the index of the visited ##' nodes. } \item{V }{ A List of covariance matrices for each path. } ##' \item{coef }{A list of parameters estimates for each path} \item{path }{A ##' list where each element defines a possible pathway via a character vector ##' naming the visited nodes in order. } \item{edges }{Description of 'comp2'} ##' ##' If \code{object} is of class \code{lvm} only the \code{path} element will be ##' returned. ##' ##' The \code{effects} method returns an object of class \code{effects}. ##' @note For a \code{lvmfit}-object the parameters estimates and their ##' corresponding covariance matrix are also returned. The ##' \code{effects}-function additionally calculates the total and indirect ##' effects with approximate standard errors ##' @author Klaus K. Holst ##' @keywords methods models graphs ##' @examples ##' ##' m <- lvm(c(y1,y2,y3)~eta) ##' regression(m) <- y2~x1 ##' latent(m) <- ~eta ##' regression(m) <- eta~x1+x2 ##' d <- sim(m,500) ##' e <- estimate(m,d) ##' ##' path(Model(e),y2~x1) ##' parents(Model(e), ~y2) ##' children(Model(e), ~x2) ##' children(Model(e), ~x2+eta) ##' effects(e,y2~x1) ##' ## All simple paths (undirected) ##' path(m,y1~x1,all=TRUE) ##' ##' @usage ##' \method{path}{lvm} (object, to = NULL, from, all=FALSE, ...) ##' \method{effects}{lvmfit} (object, to, from, ...) ##' @param object Model object (\code{lvm}) ##' @param to Outcome variable (string). Alternatively a formula specifying ##' response and predictor in which case the argument \code{from} is ignored. ##' @param from Response variable (string), not necessarily directly affected by ##' \code{to}. ##' @param all If TRUE all simple paths (in undirected graph) is returned ##' on/off. ##' @param \dots Additional arguments to be passed to the low level functions ##' @export path <- function(object,...) UseMethod("path") ##' @export path.lvmfit <- function(object,to=NULL,from,...) { mypath <- pathM(Model(object)$M,to,from,...) cc <- coef(object,type=9,labels=FALSE) ## All parameters (fixed and variable) cc0 <- coef(object,type=2) ## Estimated parameters i1 <- na.omit(match(rownames(cc),rownames(cc0))) idx.cc0 <- which(rownames(cc)%in%rownames(cc0)); ## Position of estimated parameters among all parameters S <- matrix(0,nrow(cc),nrow(cc)); rownames(S) <- colnames(S) <- rownames(cc) V <- object$vcov S[idx.cc0,idx.cc0] <- V[i1,i1] ## "Covariance matrix" of all parameters idx <- list() coefs <- list() V <- list() for (i in seq_along(mypath)) { xx <- mypath[[i]] ii <- c() for (j in seq_len(length(xx)-1)) { st <- paste0(xx[j+1], lava.options()$symbol[1], xx[j]) ii <- c(ii, match(st,rownames(cc))) } idx <- c(idx, list(ii)) V <- c(V, list(S[ii,ii])) coefs <- c(coefs, list(cc[ii])) } edges <- list() for (i in seq_along(mypath)) { p0 <- mypath[[i]] ee <- c() for (i in seq_len(length(p0)-1)) { ee <- c(ee, paste(p0[i],p0[i+1],sep="~")) } edges <- c(edges, list(ee)) } res <- list(idx=idx,V=V,coef=coefs, path=mypath, edges=edges) return(res) } ##' @export path.lvm <- function(object,to=NULL,from,all=FALSE,...) { pathM(object$M,to=to,from=from,all=all,...) } ##' @export path.graphNEL <- function(object,to,from,...) { if (inherits(to,"formula")) { fvar <- extractvar(to) if (length(fvar$x)==1 & length(fvar$y)==1) return(path(object,to=fvar$y,from=fvar$x)) res <- list() for (y in fvar$y) { for (x in fvar$x) { cat("x=",x, " y=",y, "\n") res <- c(res, list(path(object,to=y,from=x))) } } return(res) } ff <- function(g,from=1,to=NULL,res=list()) { M <- graph::edgeMatrix(g) i1 <- which(M[1,]==from) for (i in i1) { e <- M[,i]; newto <- e[2]; if (is.null(to) || M[2,i]==to) { res <- c(res, list(M[,i])) } newpath <- ff(g,from=newto,to=to,list()) if (length(newpath)>0) for (j in seq_along(newpath)) { if (is.null(to) || (tail(newpath[[j]],1)==to)) res <- c(res, list(c(M[,i],newpath[[j]][-1]))) } } return(res) } idxfrom <- ifelse(is.numeric(from),from,which(from==graph::nodes(object))) ##M <- as(object,"matrix") ##reachable <- acc(M,graph::nodes(object)[idxfrom]) reachable <- graph::acc(object,graph::nodes(object)[idxfrom])[[1]] if (is.null(to)) { idxto <- reachable } else { idxto <- ifelse(is.numeric(to),to,which(to==graph::nodes(object))) } if (!(graph::nodes(object)[idxto] %in% names(reachable))) ## return(structure(list(),to=to[1],from=from[1])) return(NULL) ## stop("No directional relationship between variables") mypaths <- ff(object,idxfrom,idxto) res <- list() for (i in seq_along(mypaths)) { res <- c(res, list(graph::nodes(object)[mypaths[[i]]])) } return(res) } pathM <- function(M,to,from,all=FALSE,...) { nn <- colnames(M) if (inherits(to,"formula")) { fvar <- extractvar(to) if (length(fvar$x)==1 & length(fvar$y)==1) return(pathM(M,to=fvar$y,from=fvar$x,all=all)) res <- list() for (y in fvar$y) { for (x in fvar$x) { cat("x=",x, " y=",y, "\n") res <- c(res, list(pathM(M,to=y,from=x,all=all))) } } return(res) } if (all) { ## Get all simple paths res <- simplePaths(to,from,from,M,list()) return(res) } ff <- function(g,from=1,to=NULL,res=list()) { i1 <- which(M[from,]==1) for (i in i1) { ## e <- M[,i]; newto <- e[2]; if (is.null(to) || i==to) { res <- c(res, list(c(from,i))) } newpath <- ff(g,from=i,to=to,list()) if (length(newpath)>0) for (j in seq_along(newpath)) { if (is.null(to) || (tail(newpath[[j]],1)==to)) res <- c(res, list(c(c(from,i),newpath[[j]][-1]))) } } return(res) } idxfrom <- ifelse(is.numeric(from),from,which(from==nn)) reachable <- acc(M,nn[idxfrom]) if (is.null(to)) { idxto <- reachable } else { idxto <- ifelse(is.numeric(to),to,which(to==nn)) } if (!(nn[idxto] %in% reachable)) return(NULL) ## stop("No directional relationship between variables") mypaths <- ff(M,idxfrom,idxto) res <- list() for (i in seq_along(mypaths)) { res <- c(res, list(nn[mypaths[[i]]])) } return(res) } ## Find all simple paths (no cycles) in an undirected graph simplePaths <- function(target,currentpath,visited,adjmat,allpaths) { lastnode <- currentpath[length(currentpath)] A <- (adjmat+t(adjmat))>0 if (lastnode==target) { allpaths <- c(allpaths,list(currentpath)) } else { for (neighbour in rownames(adjmat)[which(A[,lastnode])]) { if (!(neighbour%in%visited)) { currentpath <- c(currentpath,neighbour) visited <- c(visited,neighbour) allpaths <- simplePaths(target,currentpath,visited,adjmat,allpaths) visited <- setdiff(visited,neighbour) currentpath <- currentpath[-length(currentpath)] } } } return(allpaths) } lava/R/Inverse.R0000644000176200001440000000220513520655354013143 0ustar liggesusers##' @export Inverse <- function(X,tol=lava.options()$itol,det=TRUE,names=!chol,chol=FALSE,symmetric=FALSE) { n <- NROW(X) if (n==1L) { res <- 1/X if (det) attributes(res)$det <- X if (chol) attributes(res)$chol <- X return(res) } if (chol) { L <- chol(X) res <- chol2inv(L) if (det) attributes(res)$det <- prod(diag(L)^2) if (chol) attributes(res)$chol <- X } else { if(symmetric){ decomp <- eigen(X, symmetric = TRUE) D <- decomp$values U <- decomp$vectors V <- decomp$vectors }else{ X.svd <- svd(X) U <- X.svd$u V <- X.svd$v D <- X.svd$d } id0 <- numeric(n) idx <- which(abs(D)>tol) id0[idx] <- 1/D[idx] res <- V%*%diag(id0,nrow=length(id0))%*%t(U) if (det) attributes(res)$det <- prod(D[D>tol]) attributes(res)$pseudo <- (length(idx)0) { ## rhs of the form F(x+y) invlink <- strsplit(yx[[xidx]],"\\(.*\\)")[[1]][1] if (invlink%in%c("f","v","I","") || grepl("\\+",invlink)) { ## Reserved for setting linear constraints invlink <- NULL } else { yx[[xidx]] <- gsub(paste0(invlink,"\\(|\\)$"),"",yx[[xidx]]) } } } ## Handling constraints with negative coefficients ## while not tampering with formulas like y~f(x,-2) st <- yx[[xidx]] st <- gsub("\\-","\\+\\-",gsub("\\+\\-","\\-",st)) ## Convert - to +- (to allow for splitting on '+') ##gsub("[^,]\\-","\\+\\-",st) ## Convert back any - not starting with ',' st <- gsub(",\\+",",",st) ## Remove + inside 'f' and 'v' constraints st <- gsub("^\\+","",st) ## Remove leading plus yx[[xidx]] <- st ## Match '+' but not when preceeded by ( ... ) X <- strsplit(yx[[xidx]],"\\+(?![^\\(]*\\))", perl=TRUE)[[1]] ##regex <- "(?!(\\(*))[\\(\\)]" regex <- "[\\(\\)]" ## Keep squares brackets and |(...) statements ## Extract variables from expressions like ## f(x,b) -> x,b and 2*x -> 2,cx ## but avoid to tamper with transformation expressions: ## a~(x*b) res <- lapply(X,decomp.specials,regex,pattern2="\\*",pattern.ignore="~",reverse=TRUE,perl=TRUE) ##OLD: ##res <- lapply(X,decomp.specials,pattern2="[*]",reverse=TRUE) xx <- unlist(lapply(res, function(x) x[1])) xxf <- lapply(as.list(xx),function(x) decomp.specials(x,NULL,pattern2="\\[|~",perl=TRUE)) xs <- unlist(lapply(xxf,function(x) x[1])) ## Alter intercepts? intpos <- which(vapply(xs,function(x) grepl("^[\\+\\-]*[\\.|0-9]+$",x), 0)==1) ## Match '(int)' intpos0 <- which(vapply(X,function(x) grepl("^\\([\\+\\-]*[\\.|0-9]+\\)$",x),0)==1) yy <- ys <- NULL if (length(lhs)>0) { yy <- decomp.specials(lhs) yyf <- lapply(yy,function(y) decomp.specials(y,NULL,pattern2="[",fixed=TRUE,perl=FALSE)) ys <- unlist(lapply(yyf,function(x) x[1])) } notexo <- c() if (!is.null(object)) { if (length(lhs)>0) { object <- addvar(object,ys,reindex=FALSE,...) notexo <- ys ## Add link-transformation if (!is.null(invlink)) { if (invlink=="") { object <- transform(object,ys,NULL,post=FALSE) covariance(object,ys) <- NA } else { ff <- function(x) {}; body(ff) <- parse(text=paste0(invlink,"(x)")) object <- transform(object,ys,ff,post=FALSE) covariance(object,ys) <- 0 } } } if (length(intpos>0)) { xs[intpos[1]] <- gsub("\\+","",xs[intpos[1]]) if (xs[intpos[1]]==1 && (!length(intpos0)>0) ) { xs[intpos[1]] <- NA } intercept(object,ys) <- char2num(xs[intpos[1]]) xs <- xs[-intpos] res[intpos] <- NULL } object <- addvar(object,xs,reindex=FALSE ,...) exolist <- c() for (i in seq_len(length(xs))) { ## Extract transformation statements: var~(expr) xf0 <- strsplit(xx[[i]],"~")[[1]] if (length(xf0)>1) { myexpr <- xf0[2] ftr <- toformula(y="",x=paste0("-1+I(",myexpr,")")) xtr <- all.vars(ftr) xf0 <- xf0[1] transform(object, y=xf0, x=xtr) <- function(x) { structure(model.matrix(ftr,as.data.frame(x)),dimnames=list(NULL,xf0)) } } xf <- unlist(strsplit(xf0,"[\\[\\]]",perl=TRUE)) if (length(xf)>1) { xpar <- strsplit(xf[2],":")[[1]] if (length(xpar)>1) { val <- ifelse(xpar[2]=="NA",NA,xpar[2]) valn <- char2num(val) covariance(object,xs[i]) <- ifelse(is.na(valn),val,valn) } val <- ifelse(xpar[1]=="NA",NA,xpar[1]) valn <- char2num(val) if (is.na(val) || val!=".") { intercept(object,xs[i]) <- ifelse(is.na(valn),val,valn) notexo <- c(notexo,xs[i]) } } else { exolist <- c(exolist,xs[i]) } } for (i in seq_len(length(ys))) { y <- ys[i] yf <- unlist(strsplit(yy[i],"[\\[\\]]",perl=TRUE)) if (length(yf)>1) { ypar <- strsplit(yf[2],":")[[1]] if (length(ypar)>1) { val <- ifelse(ypar[2]=="NA",NA,ypar[2]) valn <- char2num(val) covariance(object,y) <- ifelse(is.na(valn),val,valn) } val <- ifelse(ypar[1]=="NA",NA,ypar[1]) valn <- char2num(val) if (is.na(val) || val!=".") { intercept(object,y) <- ifelse(is.na(valn),val,valn) } } } curvar <- index(object)$var if (exo) { oldexo <- exogenous(object) newexo <- setdiff(exolist,c(notexo,curvar,ys)) exogenous(object) <- union(newexo,setdiff(oldexo,notexo)) } } return(list(object=object, yx=yx, X=X, ys=ys, xx=xx, xs=xs, yy=yy, ys=ys, res=res, notexo=notexo, intpos=intpos, invlink=invlink, lhs=lhs, iscovar=iscovar)) } lava/R/covariance.R0000644000176200001440000002436013520655354013650 0ustar liggesusers ##' Add covariance structure to Latent Variable Model ##' ##' Define covariances between residual terms in a \code{lvm}-object. ##' ##' The \code{covariance} function is used to specify correlation structure ##' between residual terms of a latent variable model, using a formula syntax. ##' ##' For instance, a multivariate model with three response variables, ##' ##' \deqn{Y_1 = \mu_1 + \epsilon_1} ##' ##' \deqn{Y_2 = \mu_2 + \epsilon_2} ##' ##' \deqn{Y_3 = \mu_3 + \epsilon_3} ##' ##' can be specified as ##' ##' \code{m <- lvm(~y1+y2+y3)} ##' ##' Pr. default the two variables are assumed to be independent. To add a ##' covariance parameter \eqn{r = cov(\epsilon_1,\epsilon_2)}, we execute the ##' following code ##' ##' \code{covariance(m) <- y1 ~ f(y2,r)} ##' ##' The special function \code{f} and its second argument could be omitted thus ##' assigning an unique parameter the covariance between \code{y1} and ##' \code{y2}. ##' ##' Similarily the marginal variance of the two response variables can be fixed ##' to be identical (\eqn{var(Y_i)=v}) via ##' ##' \code{covariance(m) <- c(y1,y2,y3) ~ f(v)} ##' ##' To specify a completely unstructured covariance structure, we can call ##' ##' \code{covariance(m) <- ~y1+y2+y3} ##' ##' All the parameter values of the linear constraints can be given as the right ##' handside expression of the assigment function \code{covariance<-} if the ##' first (and possibly second) argument is defined as well. E.g: ##' ##' \code{covariance(m,y1~y1+y2) <- list("a1","b1")} ##' ##' \code{covariance(m,~y2+y3) <- list("a2",2)} ##' ##' Defines ##' ##' \deqn{var(\epsilon_1) = a1} ##' ##' \deqn{var(\epsilon_2) = a2} ##' ##' \deqn{var(\epsilon_3) = 2} ##' ##' \deqn{cov(\epsilon_1,\epsilon_2) = b1} ##' ##' Parameter constraints can be cleared by fixing the relevant parameters to ##' \code{NA} (see also the \code{regression} method). ##' ##' The function \code{covariance} (called without additional arguments) can be ##' used to inspect the covariance constraints of a \code{lvm}-object. ##' # ##' ##' @aliases covariance covariance<- covariance.lvm covariance<-.lvm ##' covfix<- covfix covfix<-.lvm covfix.lvm ##' variance variance<- variance.lvm variance<-.lvm ##' @param object \code{lvm}-object ##' @param var1 Vector of variables names (or formula) ##' @param var2 Vector of variables names (or formula) defining pairwise ##' covariance between \code{var1} and \code{var2}) ##' @param constrain Define non-linear parameter constraints to ensure positive definite structure ##' @param pairwise If TRUE and \code{var2} is omitted then pairwise correlation is added between all variables in \code{var1} ##' @param \dots Additional arguments to be passed to the low level functions ##' @param value List of parameter values or (if \code{var1} is unspecified) ##' @usage ##' \method{covariance}{lvm}(object, var1=NULL, var2=NULL, constrain=FALSE, pairwise=FALSE,...) <- value ##' @return A \code{lvm}-object ##' @author Klaus K. Holst ##' @seealso \code{\link{regression<-}}, \code{\link{intercept<-}}, ##' \code{\link{constrain<-}} \code{\link{parameter<-}}, \code{\link{latent<-}}, ##' \code{\link{cancel<-}}, \code{\link{kill<-}} ##' @keywords models regression ##' @export ##' @examples ##' ##' m <- lvm() ##' ### Define covariance between residuals terms of y1 and y2 ##' covariance(m) <- y1~y2 ##' covariance(m) <- c(y1,y2)~f(v) ## Same marginal variance ##' covariance(m) ## Examine covariance structure ##' ##' `covariance` <- function(object,...) UseMethod("covariance") ##' @export "variance<-" <- function(object,...,value) UseMethod("covariance<-") ##' @export `variance` <- function(object,...) UseMethod("variance") ##' @export "variance.lvm" <- function(object,...) covariance(object,...) ##' @export "variance.formula" <- function(object,...) covariance(lvm(),object,...) ##' @export "covariance.formula" <- function(object,...) covariance(lvm(),object,...) ##' @export "variance<-.lvm" <- function(object,...,value) { covariance(object,...) <- value return(object) } ##' @export "covariance<-" <- function(object,...,value) UseMethod("covariance<-") ##' @export "covariance<-.lvm" <- function(object, var1=NULL, var2=NULL, constrain=FALSE, pairwise=FALSE, ..., value) { if (!is.null(var1)) { if (inherits(var1,"formula")) { lhs <- getoutcome(var1) xf <- attributes(terms(var1))$term.labels xx <- unlist(lapply(xf, function(x) x[1])) if (length(lhs)==0) { covfix(object,var1,var2,pairwise=pairwise,...) <- value object$parpos <- NULL return(object) } else { yy <- decomp.specials(lhs) } } else { yy <- var1; xx <- var2 } covfix(object,var1=yy,var2=xx,pairwise=pairwise,...) <- value object$parpos <- NULL return(object) } if (is.list(value)) { for (v in value) { covariance(object,pairwise=pairwise,constrain=constrain,...) <- v } return(object) } if (inherits(value,"formula")) { lhs <- getoutcome(value) if (length(lhs)==0) { return(covariance(object,all.vars(value),constrain=constrain,pairwise=pairwise,...)) } yy <- decomp.specials(lhs) tt <- terms(value, specials=c("f","v")) xf <- attributes(terms(tt))$term.labels res <- lapply(xf,decomp.specials) nx <- length(xf) if (nx==1) { if(is.null(attr(tt,"specials")$f) | length(res[[1]])<2) { if(is.null(attr(tt,"specials")$v) & is.null(attr(tt,"specials")$f)) { for (i in yy) for (j in res[[1]]) object <- covariance(object, c(i,j), pairwise=TRUE, constrain=constrain, ...) } else { covfix(object,var1=yy,var2=NULL) <- res[[1]] } } else { covfix(object,var1=yy,var2=res[[1]][1]) <- res[[1]][2] } object$parpos <- NULL return(object) } xx <- unlist(lapply(res, function(z) z[1])) for (y in yy) for (i in seq_along(xx)) { if (length(res[[i]])>1) { covfix(object, var1=y, var2=res[[i]][1]) <- res[[i]][2] } else if ((i+1)%in%attr(tt,"specials")$f | (i+1)%in%attr(tt,"specials")$v) { covfix(object, var1=y, var2=NULL) <- res[[i]] } else { object <- covariance(object,c(y,xx[i]),pairwise=TRUE,...) } } object$parpos <- NULL return(object) } else covariance(object,value,pairwise=pairwise,...) } ##' @export `covariance.lvm` <- function(object,var1=NULL,var2=NULL,exo=FALSE,pairwise=FALSE,constrain=FALSE,value,...) { if (!missing(value)) { covariance(object,var1=var1,var2,exo=exo,pariwise=pairwise,constrain=constrain,...) <- value return(object) } if (!is.null(var1)) { if (inherits(var1,"formula")) { covariance(object,constrain=constrain, pairwise=pairwise,exo=exo,...) <- var1 return(object) } allvars <- var1 if (!missing(var2)) { if (inherits(var2,"formula")) var2 <- all.vars(var2) allvars <- c(allvars,var2) } if (constrain) { if (length(allvars)!=2) stop("Constraints only implemented for pairs") return(covarianceconst(object,allvars[1],allvars[2],...)) } object <- addvar(object, allvars, messages=0, reindex=FALSE) xorg <- exogenous(object) exoset <- setdiff(xorg,allvars) if (!exo & length(exoset)0))) } for (i in seq_len(nm)) { x0 <- models[[i]] data0 <- datasets[[i]] if (length(exogenous(x0)>0)) { catx <- categorical2dummy(x0,data0) models[[i]] <- catx$x; datasets[[i]] <- catx$data } if (!lava.options()$exogenous) exogenous(models[[i]]) <- NULL } models.orig <- NULL ###################### ### MLE with MAR mechanism ###################### if (missing) { reservedpars <- c() mynpar <- c() for (i in seq_len(nm)) { ## Fix some parameters (predictors,latent variables,...) d0 <- datasets[[i]][1,,drop=FALSE]; d0[,] <- 1 if (fix) models[[i]] <- fixsome(models[[i]], exo.fix=exo.fix, measurement.fix=fix, data=d0) ## Find named/labelled parameters rpar <- unique(parlabels(models[[i]])) reservedpars <- c(reservedpars, rpar) mynpar <- c(mynpar, with(index(models[[1]]), npar+npar.mean+npar.ex)) }; reservedpars <- unique(reservedpars) nonamepar <- sum(mynpar) ## Find unique parameter-names for all parameters newpars <- c() i <- 0 pos <- 1 while(pos<=nonamepar) { i <- i+1 newname <- paste0("par",i) if (!(newname%in%reservedpars)) { newpars <- c(newpars,newname) pos <- pos+1 } } pos <- 0 models0 <- list() datasets0 <- list() complidx <- c() nmodels <- 0 modelclass <- c() nmis <- c() for (i in seq_len(nm)) { myvars <- unlist(intersect(colnames(datasets[[i]]),c(vars(models[[i]]),xfix[[i]],keep))) mydata <- datasets[[i]][,myvars] if (any(is.na(mydata))) { if (i>1) pos <- pos+mynpar[i-1] models[[i]] <- baptize(models[[i]],newpars[pos+seq_len(mynpar[i])] ,overwrite=FALSE) val <- missingModel(models[[i]],mydata,fix=FALSE,keep=keep,...) nmodels <- c(nmodels,length(val$models)) complidx <- c(complidx,val$pattern.allcomp+nmodels[i]+1) nmis0 <- rowSums(val$patterns); allmis <- which(nmis0==ncol(val$patterns)) if (length(allmis)>0) nmis0 <- nmis0[-allmis] nmis <- c(nmis,nmis0) datasets0 <- c(datasets0, val$datasets) models0 <- c(models0, val$models) modelclass <- c(modelclass,rep(i,length(val$models))) } else { datasets0 <- c(datasets0, list(mydata)) models0 <- c(models0, list(models[[i]])) modelclass <- c(modelclass,i) nmis <- c(nmis,0) } } models.orig <- models suppressWarnings( val <- multigroup(models0,datasets0,fix=FALSE,missing=FALSE,exo.fix=TRUE,...) ) val$models.orig <- models.orig; val$missing <- TRUE val$complete <- complidx-1 val$mnames <- mynames attributes(val)$modelclass <- modelclass attributes(val)$nmis <- nmis return(val) } ###################### ### Usual analysis: ###################### warned <- FALSE for (i in seq_len(nm)) { if (inherits(datasets[[i]],c("data.frame","matrix"))) { myvars <- intersect(colnames(datasets[[i]]),c(vars(models[[i]]),xfix[[i]],keep)) if (any(is.na(datasets[[i]][,myvars]))) { if (!warned) warning(paste0("Missing data encountered. Going for complete-case analysis")) warned <- TRUE datasets[[i]] <- na.omit(datasets[[i]][,myvars,drop=FALSE]) } } } exo <- exogenous(models) means <- lvms <- As <- Ps <- ps <- exs <- datas <- samplestat <- list() for (i in seq_len(nm)) { if (!is.null(exogenous(models[[i]]))) { if (any(is.na(exogenous(models[[i]])))) { exogenous(models[[i]]) <- exo } } mydata <- datasets[[i]] mymodel <- fixsome(models[[i]], data=mydata, measurement.fix=fix, exo.fix=exo.fix) mymodel <- updatelvm(mymodel,zeroones=TRUE,deriv=TRUE) P <- index(mymodel)$P1; P[P==0] <- NA P[!is.na(P) & !is.na(mymodel$covpar)] <- mymodel$covpar[!is.na(P) & !is.na(mymodel$covpar)] A <- index(mymodel)$M1; A[A==0] <- NA A[!is.na(A) & !is.na(mymodel$par)] <- mymodel$par[!is.na(A) & !is.na(mymodel$par)] mu <- unlist(mymodel$mean)[which(index(mymodel)$v1==1)] #ex <- names(mymodel$expar)[which(index(mymodel)$e1==1)] ex <- mymodel$exfix if (length(ex)>0) { if (any(is.na(ex))) ex[is.na(ex)] <- mymodel$expar[is.na(ex)] ex <- ex[which(index(mymodel)$e1==1)] } p <- pars(mymodel, A, P, e=ex) p[p=="1"] <- NA means <- c(means, list(mu)) lvms <- c(lvms, list(mymodel)) datas <- c(datas, list(mydata)) samplestat <- c(samplestat, list(procdata.lvm(models[[i]],data=mydata))) As <- c(As, list(A)) Ps <- c(Ps, list(P)) ps <- c(ps, list(p)) exs <- c(exs, list(ex)) }; ###### pp <- unlist(ps) parname <- unique(pp[!is.na(pp)]) pidx <- is.na(char2num(parname)) parname <- unique(unlist(pp[!is.na(pp)])); nfree <- sum(is.na(pp)) + length(parname) if (nfree>0) { pp0 <- lapply(ps, is.na) usedname <- cbind(parname, rep(NA,length(parname))) counter <- 1 pres <- pres0 <- pp0 for (i in seq_len(length(pp0))) { if (length(pp0[[i]]>0)) for (j in seq_len(length(pp0[[i]]))) { pidx <- match(ps[[i]][j],parname) if (pp0[[i]][j]) { pres[[i]][j] <- paste0("p",counter) pres0[[i]][j] <- counter counter <- counter+1 } else if (!is.na(pidx)) { if (!is.na(usedname[pidx,2])) { pres[[i]][j] <- usedname[pidx,2] pres0[[i]][j] <- char2num(substr(pres[[i]][j],2,nchar(pres[[i]][j]))) } else { val <- paste0("p",counter) pres[[i]][j] <- val pres0[[i]][j] <- counter usedname[pidx,2] <- val counter <- counter+1 } } else { pres[[i]][j] <- NA } } } mypar <- paste0("p",seq_len(nfree)) myparPos <- pres0 myparpos <- pres myparlist <- lapply(pres, function(x) x[!is.na(x)]) } else { myparPos <- NULL mypar <- NULL myparpos <- NULL myparlist <- NULL } ### Mean parameter mm <- unlist(means) meanparname <- unique(mm[!is.na(mm)]) midx <- is.na(char2num(meanparname)); meanparname <- meanparname[midx] any.mean <- sum(is.na(mm)) + length(meanparname) nfree.mean <- sum(is.na(mm)) + length(setdiff(meanparname,parname)) ## mean.fixed <- na.omit(match(parname,mm)) mean.omit <- lapply(means,function(x) na.omit(match(parname,x))) if (any.mean>0) { mm0 <- lapply(means, is.na) usedname <- cbind(meanparname, rep(NA,length(meanparname))) counter <- 1 res0 <- res <- mm0 for (i in seq_len(length(mm0))) { if (length(mm0[[i]])>0) for (j in seq_len(length(mm0[[i]]))) { midx <- match(means[[i]][j],meanparname) if (mm0[[i]][j]) { res[[i]][j] <- paste0("m",counter) res0[[i]][j] <- counter counter <- counter+1 } else if (!is.na(midx)) { pidx <- match(meanparname[midx],pp) if (!is.na(pidx)) { res[[i]][j] <- unlist(myparlist)[pidx] res0[[i]][j] <- char2num(substr(res[[i]][j],2,nchar(res[[i]][j]))) + nfree.mean } else { if (!is.na(usedname[midx,2])) { res[[i]][j] <- usedname[midx,2] res0[[i]][j] <- char2num(substr(res[[i]][j],2,nchar(res[[i]][j]))) } else { val <- paste0("m",counter) res[[i]][j] <- val res0[[i]][j] <- counter usedname[midx,2] <- val counter <- counter+1 } } } else { res[[i]][j] <- NA } } } mymeanPos <- res0 mymeanpos <- res mymeanlist <- lapply(res, function(x) x[!is.na(x)]) mymean <- unique(unlist(mymeanlist)) } else { mymeanPos <- NULL mymean <- NULL mymeanpos <- NULL mymeanlist <- NULL } ### Extra parameters m0 <- p0 <- c() coefs <- coefsm <- mm0 <- mm <- pp0 <- pp <- c() for (i in seq_len(length(myparPos))) { mi <- mymeanPos[[i]] pi <- myparPos[[i]] p1 <- setdiff(pi,p0) p0 <- c(p0,p1) ## pp0 <- c(pp0,list(match(p1,pi)+nfree.mean)) pp0 <- c(pp0,list(match(p1,pi))) if (length(mean.omit[[i]])>0) mi <- mi[-mean.omit[[i]]] m1 <- setdiff(mi,m0) m0 <- c(m0,m1) mm0 <- c(mm0,list(match(m1,mi))) pp <- c(pp,list(c(m1,p1+nfree.mean))) if (length(p1)>0) coefs <- c(coefs,paste(coef(lvms[[i]],fix=FALSE,mean=FALSE)[pp0[[i]]],i,sep="@")) #coefs <- c(coefs,paste(i,coef(lvms[[i]],fix=FALSE,mean=FALSE)[pp0[[i]]],sep="@")) if (length(m1)>0) { coefsm0 <- paste(coef(lvms[[i]],fix=FALSE,mean=TRUE)[mm0[[i]]],i,sep="@") #coefsm0 <- paste(i,coef(lvms[[i]],fix=FALSE,mean=TRUE)[mm0[[i]]],sep="@") coefsm <- c(coefsm,coefsm0) } } coefs <- c(coefsm,coefs) res <- list(npar=nfree, npar.mean=nfree.mean, ngroup=length(lvms), names=mynames, lvm=lvms, data=datas, samplestat=samplestat, A=As, P=Ps, expar=exs, meanpar=names(mu), name=coefs, coef=pp, coef.idx=pp0, par=mypar, parlist=myparlist, parpos=myparpos, mean=mymean, meanlist=mymeanlist, meanpos=mymeanpos, parposN=myparPos, meanposN=mymeanPos, models.orig=models.orig, missing=missing ) class(res) <- "multigroup" checkmultigroup(res) return(res) } ###}}} ###{{{ checkmultigroup checkmultigroup <- function(x) { ## Check validity: for (i in seq_len(x$ngroup)) { if (nrow(x$data[[i]])<2) { warning("With only one observation in the group, all parameters should be inherited from another a group!") } } } ###}}} checkmultigroup lava/R/zib.R0000644000176200001440000003437313520655354012327 0ustar liggesusers##' Dose response calculation for binomial regression models ##' ##' @title Dose response calculation for binomial regression models ##' @param model Model object or vector of parameter estimates ##' @param intercept Index of intercept parameters ##' @param slope Index of intercept parameters ##' @param prob Index of mixture parameters (only relevant for ##' \code{zibreg} models) ##' @param x Optional weights ##' length(x)=length(intercept)+length(slope)+length(prob) ##' @param level Probability at which level to calculate dose ##' @param ci.level Level of confidence limits ##' @param vcov Optional estimate of variance matrix of parameter ##' estimates ##' @param family Optional distributional family argument ##' @param EB Optional ratio of treatment effect and adverse effects ##' used to find optimal dose (regret-function argument) ##' @author Klaus K. Holst ##' @export PD <- function(model,intercept=1,slope=2,prob=NULL,x,level=0.5, ci.level=0.95,vcov,family, EB=NULL) { if (is.vector(model)) { beta <- model if (missing(vcov)) stop("vcov argument needed") if (missing(family)) stop("family argument needed") } else beta <- coef(model) if (missing(vcov)) vcov <- stats::vcov(model) if (missing(family)) family <- stats::family(model) N <- length(intercept)+length(slope)+length(prob) if (length(intercept)0) res <- c(res, constrain(m)) } return(res) } return(Model(x)$constrain) } if (is.numeric(x)) { b <- x } else { b <- pars(x) } if (missing(vcov)) { S <- stats::vcov(x) } else { S <- vcov } if (!missing(idx)) { b <- b[idx]; S <- S[idx,idx,drop=FALSE] } fb <- fun(b) pl <- 1-(1-level)/2 D <- rbind(numDeriv::grad(fun,b)) se <- (D%*%S%*%t(D))^0.5 res <- c(fb,se,fb+c(-1,1)*qnorm(pl)*c(se)) pstr <- paste0(format(c(round(1000-1000*pl),round(pl*1000))/10),"%") names(res) <- c("Estimate","Std.Err",pstr) res } ##' @export "constrain<-.multigroupfit" <- "constrain<-.multigroup" <- function(x,par,k=1,...,value) { constrain(Model(x)$lvm[[k]],par=par,...) <- value return(x) } ##' @export "constrain<-.default" <- function(x,par,args,...,value) { if (inherits(par,"formula")) { lhs <- getoutcome(par) xf <- attributes(terms(par))$term.labels par <- lhs if (par%in%vars(x)) { if (is.na(x$mean[[par]])) { intercept(x,par) <- par } else { par <- x$mean[[par]] } } args <- xf } if (is.null(value) || suppressWarnings(is.na(value))) { if (!is.null(par)) { Model(x)$constrain[[par]] <- NULL Model(x)$constrainY[[par]] <- NULL } else { Model(x)$constrain[[args]] <- NULL } return(x) } for (i in args) { if (!(i%in%c(parlabels(Model(x)),vars(Model(x)), names(constrain(x))))) { if (lava.options()$messages>1) message("\tAdding parameter '", i,"'\n",sep="") parameter(x,messages=0) <- i } } if (par%in%vars(x)) { if (!"..."%in%names(formals(value))) { formals(value) <- c(formals(value),alist(...=)) } Model(x)$constrainY[[par]] <- list(fun=value,args=args) } else { ## Wrap around do.call, since functions are not really ## parsed as call-by-value in R, and hence setting ## attributes to e.g. value=cos, will be overwritten ## if value=cos is used again later with new args. Model(x)$constrain[[par]] <- function(x) do.call(value,list(x)) attributes(Model(x)$constrain[[par]])$args <- args index(Model(x)) <- reindex(Model(x)) } return(x) } ##' @export constraints <- function(object,data=model.frame(object),vcov=object$vcov,level=0.95, p=pars.default(object),k,idx,...) { if (class(object)[1]=="multigroupfit") { if (!missing(k)) { if (class(data)[1]=="list") data <- data[[k]] parpos <- modelPar(object, seq_len(length(p)))$p[[k]] if (nrow(data)>1 & !missing(idx)) { res <- t(apply(data,1,function(x) constraints(Model(object)$lvm[[k]],data=x,p=p[parpos],vcov=vcov[parpos,parpos],level=level)[idx,])) return(res) } return(constraints(Model(object)$lvm[[k]],data=data,p=p[parpos],vcov=vcov[parpos,parpos],level=level)) } return(attributes(CoefMat.multigroupfit(object,data=data,vcov=vcov,...))$nlincon) } if (NROW(data)>1 & !missing(idx)) { res <- t(apply(data,1,function(x) constraints(object,data=x,p=p,vcov=vcov,level=level)[idx,],...)) return(res) } if (length(index(object)$constrain.par)<1) return(NULL) parpos <- Model(object)$parpos if (is.null(parpos)) { parpos <- with(index(object),matrices2(Model(object),seq_len(npar+npar.mean+npar.ex))) parpos$A[index(object)$M0==0] <- 0 parpos$P[index(object)$P0==0] <- 0 parpos$v[index(object)$v1==0] <- 0 parpos$e[index(object)$e1==0] <- 0 } myidx <- unlist(lapply(parpos$parval, function(x) { if (!is.null(attributes(x)$reg.idx)) { return(parpos$A[attributes(x)$reg.idx[1]]) } else if (!is.null(attributes(x)$cov.idx)) { return(parpos$P[attributes(x)$cov.idx[1]]) } else if (!is.null(attributes(x)$m.idx)) { return(parpos$v[attributes(x)$m.idx[1]]) } else if (!is.null(attributes(x)$e.idx)) return(parpos$e[attributes(x)$e.idx[1]]) else NA })) names(myidx) <- names(parpos$parval) mynames <- c() N <- length(index(object)$constrain.par) if (N>0) res <- c() count <- 0 mydata <- rbind(numeric(length(manifest(object)))) colnames(mydata) <- manifest(object) data <- rbind(data) iname <- intersect(colnames(mydata),colnames(data)) mydata[1,iname] <- unlist(data[1,iname]) for (pp in index(object)$constrain.par) { count <- count+1 myc <- constrain(Model(object))[[pp]] mycoef <- numeric(6) val.idx <- myidx[attributes(myc)$args] val.idx0 <- na.omit(val.idx) M <- modelVar(Model(object),p=p,data=as.data.frame(mydata)) vals <- with(M,c(parval,constrainpar))[attributes(myc)$args] fval <- try(myc(unlist(vals)),silent=TRUE) fmat <- inherits(fval,"try-error") if (fmat) { fval <- myc(rbind(unlist(vals))) } mycoef[1] <- fval myc0 <- function(theta) { theta0 <- unlist(vals); theta0[!is.na(val.idx)] <- theta if (fmat) { res <- myc(rbind(theta0)) } else { res <- myc(theta0) } return(res) } vals0 <- unlist(vals)[!is.na(val.idx)] if (length(vals0)==0) mycoef[2] <- NA else { if (!is.null(attributes(fval)$grad)) { if (fmat) { Gr <- cbind(attributes(fval)$grad(rbind(unlist(vals0)))) } else { Gr <- cbind(attributes(fval)$grad(unlist(vals0))) } } else { if (fmat) { Gr <- cbind(as.numeric(numDeriv::jacobian(myc0, unlist(vals0)))) } else { Gr <- cbind(as.numeric(numDeriv::jacobian(myc0, rbind(unlist(vals0))))) } } V <- vcov[val.idx0,val.idx0] mycoef[2] <- (t(Gr)%*%V%*%Gr)^0.5 } ## if (second) { ## if (!is.null(attributes(fval)$hessian)) { ## H <- attributes(fval)$hessian(unlist(vals)) ## } else { ## H <- hessian(myc, unlist(vals)) ## } ## HV <- H%*%vcov[val.idx,val.idx] ## mycoef[1] <- mycoef[1] + 0.5*sum(diag(HV)) ## mycoef[2] <- mycoef[2] + 0.5*sum(diag(HV%*%HV)) ## } mycoef[3] <- mycoef[1]/mycoef[2] mycoef[4] <- 2*(pnorm(abs(mycoef[3]),lower.tail=FALSE)) mycoef[5:6] <- mycoef[1] + c(1,-1)*qnorm((1-level)/2)*mycoef[2] res <- rbind(res,mycoef) mynames <- c(mynames,pp) if (!is.null(attributes(fval)$inv)){ res2 <- attributes(fval)$inv(mycoef[c(1,5,6)]) res <- rbind(res, c(res2[1],NA,NA,NA,res2[2],res2[3])) mynames <- c(mynames,paste0("inv(",pp,")")) } } rownames(res) <- mynames colnames(res) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)", "2.5%", "97.5%") return(res) } lava/R/graph.R0000644000176200001440000000204513520655354012633 0ustar liggesusers##' Extract graph ##' ##' Extract or replace graph object ##' ##' ##' @aliases Graph Graph<- ##' @usage ##' ##' Graph(x, ...) ##' ##' Graph(x, ...) <- value ##' ##' @param x Model object ##' @param value New \code{graphNEL} object ##' @param \dots Additional arguments to be passed to the low level functions ##' @author Klaus K. Holst ##' @seealso \code{\link{Model}} ##' @keywords graphs models ##' @export ##' @examples ##' ##' m <- lvm(y~x) ##' Graph(m) ##' ##' @export `Graph` <- function(x,...) UseMethod("Graph") ##' @export `Graph.lvm` <- function(x,add=FALSE,...) { if ((is.null(x$graph) || length(x$graph)==0) & add) { m <- Model(x) return(plot(m,noplot=TRUE)) } else return(x$graph) } ##' @export `Graph.lvmfit` <- function(x,...) Graph.lvm(x,...) ##' @export "Graph<-" <- function(x,...,value) UseMethod("Graph<-") ##' @export "Graph<-.lvmfit" <- function(x,...,value) { x$graph <- value; return(x) } ##' @export "Graph<-.lvm" <- function(x,...,value) { x$graph <- value; return(x) } lava/R/fixsome.R0000644000176200001440000000771513520655354013215 0ustar liggesusers ##' @export fixsome <- function(x, exo.fix=TRUE, measurement.fix=TRUE, S, mu, n, data, x0=FALSE, na.method="complete.obs", param=lava.options()$param,...) { if (is.null(param)) { param <- "none" } else { paramval <- c("hybrid","relative","none","absolute") param <- agrep(param,paramval,max.distance=0,value=TRUE) } if (is.character(measurement.fix)) { param <- measurement.fix measurement.fix <- TRUE } var.missing <- c() if (!missing(data) | !missing(S)) { if (!missing(data)) { dd <- procdata.lvm(x,data=data,na.method=na.method) } else { dd <- procdata.lvm(x, list(S=S,mu=mu,n=n)) } S <- dd$S; mu <- dd$mu; n <- dd$n var.missing <- setdiff(index(x)$manifest,colnames(S)) } else { S <- NULL; mu <- NULL } if (measurement.fix & param!="none") { if (length(var.missing)>0) {## Convert to latent: new.lat <- setdiff(var.missing,latent(x)) if (length(new.lat)>0) x <- latent(x, new.lat) } etas <- latent(x) ys <- endogenous(x) M <- x$M for (e in etas) { ## Makes sure that at least one arrow from latent variable is fixed (identification) ys. <- names(which(M[e,ys]==1)) if (length(ys.)>0) { if (tolower(param)=="absolute") { if (is.na(intercept(x)[[e]])) intercept(x,e) <- 0 if (is.na(x$covfix[e,e]) & is.na(x$covpar[e,e])) covariance(x,e) <- 1 } else { if (param=="hybrid") { if (is.na(intercept(x)[[e]])) intercept(x,e) <- 0 if (all(is.na(x$fix[e, ]==1)) & is.na(x$covpar[e,e]) & is.na(x$covfix[e,e])) regfix(x,from=e,to=ys.[1]) <- 1 } else { ## relative if (all(is.na(x$fix[e, ]==1)) & is.na(x$covpar[e,e]) & is.na(x$covfix[e,e])) regfix(x,from=e,to=ys.[1]) <- 1 if (!any(unlist(lapply(intercept(x)[ys.],is.numeric))) & is.na(intercept(x)[[e]])) { if (tryCatch(any(idx <- !is.na(x$fix[e,ys.])),error=function(x) FALSE)) { intercept(x, ys.[which(idx)[1]]) <- 0 } else { intercept(x,ys.[1]) <- 0 } } } } } } } if (is.null(S)) x0 <- TRUE if (exo.fix) { if (x0) { S0 <- diag(nrow=length(index(x)$manifest)) mu0 <- rep(0,nrow(S0)) } else { S0 <- S S0[is.na(S0)] <- 0 mu0 <- mu e0 <- eigen(S0) thres <- lava.options()$itol^(1/2) if (any(e0$values0) { for (i in seq_along(exo.idx)) for (j in seq_along(exo.idx)) { i. <- exo_all.idx[i]; j. <- exo_all.idx[j] myval <- S0[exo.idx[i],exo.idx[j]]; if (i.==j. & myval==0) { ##warning("Overparametrized model. Problem with '"%++%index(x)$vars[j.]%++%"'") myval <- 1 } else if (is.na(myval) || is.nan(myval)) myval <- 0 x$covfix[i.,j.] <- x$covfix[j.,i.] <- myval } x$mean[exo_all.idx] <- mu0[exo.idx] } } index(x) <- reindex(x) return(x) } lava/R/functional.R0000644000176200001440000000254413520655354013700 0ustar liggesusers##' @export "functional<-" <- function(x,...,value) UseMethod("functional<-") ##' @export "functional<-.lvm" <- function(x,to,from,...,value) { if (inherits(to,"formula")) { yy <- decomp.specials(getoutcome(to)) myvars <- all.vars(to) xx <- setdiff(myvars,yy) if (length(yy)*length(xx)>length(value) & length(value)!=1) stop("Wrong number of values") count <- 0 for (y in yy) { count <- count+1 for (i in seq_along(xx)) { suppressWarnings(x <- regression(x,to=y,from=xx[i],messages=0)) count <- count+1 if (length(value)==1) { functional(x, to=y, from=xx[i],...) <- value } else functional(x, to=y, from=xx[i],...) <- value[[count]] } } return(x) } if (missing(from) | missing(to)) return(x) edges <- paste(from,to,sep="~") x$attributes$functional[[edges]] <- value return(x) } ##' @export "functional" <- function(x,...) UseMethod("functional") ##' @export functional.lvm <- function(x,to,from,value,...) { if (!missing(value)) { functional(x,to,from,...) <- value return(x) } if (missing(from)) return(x$attributes$functional) edges <- paste(from,to,sep="~") x$attributes$functional[edges] } lava/R/complik.R0000644000176200001440000001071213520655354013170 0ustar liggesusers##' Composite Likelihood for probit latent variable models ##' ##' Estimate parameters in a probit latent variable model via a composite ##' likelihood decomposition. ##' @param x \code{lvm}-object ##' @param data data.frame ##' @param k Size of composite groups ##' @param type Determines number of groups. With \code{type="nearest"} (default) ##' only neighboring items will be grouped, e.g. for \code{k=2} ##' (y1,y2),(y2,y3),... With \code{type="all"} all combinations of size \code{k} ##' are included ##' @param pairlist A list of indices specifying the composite groups. Optional ##' argument which overrides \code{k} and \code{type} but gives complete ##' flexibility in the specification of the composite likelihood ##' @param messages Control amount of messages printed ##' @param \dots Additional arguments parsed on to lower-level functions ##' @param estimator Model (pseudo-likelihood) to use for the pairs/groups ##' @return An object of class \code{clprobit} inheriting methods from \code{lvm} ##' @author Klaus K. Holst ##' @seealso estimate ##' @keywords models regression ##' @export ##' @examples ##' m <- lvm(c(y1,y2,y3)~b*x+1*u[0],latent=~u) ##' ordinal(m,K=2) <- ~y1+y2+y3 ##' d <- sim(m,50,seed=1) ##' e1 <- complik(m,d,control=list(trace=1),type="all") complik <- function(x,data,k=2,type=c("nearest","all"),pairlist,messages=0,estimator="normal", ...) { y <- setdiff(endogenous(x),latent(x)) binsurv <- rep(FALSE,length(y)) for (i in 1:length(y)) { z <- try(data[,y[i]],silent=TRUE) ## binsurv[i] <- is.Surv(z) | (is.factor(z) && length(levels(z))==2) if (!inherits(z,"try-error")) binsurv[i] <- inherits(z,"Surv") | (is.factor(z)) } ord <- ordinal(x) binsurv <- unique(c(y[binsurv],ord)) if (!missing(pairlist)) { binsurvpos <- which(colnames(data)%in%endogenous(x)) } else { binsurvpos <- which(colnames(data)%in%binsurv) } if (missing(pairlist)) { if (type[1]=="all") { mypar <- combn(length(binsurv),k) ## all pairs (or multiplets), k=2: k*(k-1)/2 } else { mypar <- sapply(0:(length(binsurv)-k), function(x) x+1:k) } } else { mypar <- pairlist } if (is.matrix(mypar)) { mypar0 <- mypar; mypar <- c() for (i in seq(ncol(mypar0))) mypar <- c(mypar, list(mypar0[,i])) } nblocks <- length(mypar) mydata0 <- data[,,drop=FALSE] mydata <- as.data.frame(matrix(NA, nblocks*nrow(data), ncol=ncol(data))) names(mydata) <- names(mydata0) for (i in 1:ncol(mydata)) { if (is.factor(data[,i])) { mydata[,i] <- factor(mydata[,i],levels=levels(mydata0[,i])) } if (survival::is.Surv(data[,i])) { S <- data[,i] for (j in 2:nblocks) S <- rbind(S,data[,i]) S[,1] <- NA mydata[,i] <- S } } for (ii in 1:nblocks) { data0 <- data; for (i in binsurvpos[-mypar[[ii]]]) { if (survival::is.Surv(data[,i])) { S <- data0[,i]; S[,1] <- NA data0[,i] <- S } else { data0[,i] <- NA if (is.factor(data[,i])) data0[,i] <- factor(data0[,i],levels=levels(data[,i])) } } mydata[(1:nrow(data))+(ii-1)*nrow(data),] <- data0 } suppressWarnings(e0 <- estimate(x,data=mydata,estimator=estimator,missing=TRUE,messages=messages, ...)) S <- score(e0,indiv=TRUE) nd <- nrow(data) block1 <- which((1:nd)%in%(rownames(S))) blocks <- sapply(1:nblocks, function(x) 1:length(block1)+length(block1)*(x-1)) if (nblocks==1) { Siid <- S } else { Siid <- matrix(0,nrow=length(block1),ncol=ncol(S)) for (j in 1:ncol(blocks)) { Siid <- Siid+S[blocks[,j],] } } iI <- solve(information(e0,type="hessian")) J <- t(Siid)%*%(Siid) e0$iidscore <- Siid e0$blocks <- blocks e0$vcov <- iI%*%J%*%iI ## thetahat-theta0 :=(asymp) I^-1*S => var(thetahat) = iI*var(S)*iI cc <- e0$coef; cc[,2] <- sqrt(diag(e0$vcov)) cc[,3] <- cc[,1]/cc[,2]; cc[,4] <- 2*(1-pnorm(abs(cc[,3]))) e0$coef <- cc e0$bread <- iI class(e0) <- c("estimate.complik",class(e0)) return(e0) } score.estimate.complik <- function(x,indiv=FALSE,...) { if (!indiv) return(colSums(x$iidscore)) x$iidscore } iid.estimate.complik <- function(x,...) { iid.default(x,bread=x$bread,...) } lava/R/information.R0000644000176200001440000001771313520655354014067 0ustar liggesusers##' @export `information` <- function(x,...) UseMethod("information") ###{{{ information.lvm ##' @export information.lvm <- function(x,p,n,type=ifelse(model=="gaussian", c("E","hessian","varS","outer","sandwich","robust","num"),"outer"), data,weights=NULL, data2=NULL, model="gaussian", method=lava.options()$Dmethod, inverse=FALSE, pinv=TRUE, score=TRUE,...) { if (missing(n)) n <- NROW(data) if (type[1]%in%c("sandwich","robust")) { cl <- match.call() cl$inverse <- !inverse cl$type <- "outer" A <- eval.parent(cl) cl$inverse <- !(cl$inverse) cl$type <- ifelse(type[1]=="sandwich","E","hessian") B <- eval.parent(cl) return(B%*%A%*%B) } if (type[1]%in%c("num","hessian","obs","observed") | (type[1]%in%c("E","hessian") & model!="gaussian")) { myf <- function(p0) score(x, p=p0, model=model,data=data, weights=weights,data2=data2,indiv=FALSE,n=n) ##...) I <- -numDeriv::jacobian(myf,p,method=method) res <- (I+t(I))/2 # Symmetric result if (inverse) { if (pinv) iI <- Inverse(res) else iI <- solve(res) return(iI) } return(res) } if (type[1]=="varS" | type[1]=="outer") { S <- score(x,p=p,data=na.omit(data),model=model,weights=weights,data2=data2,indiv=TRUE,...) res <- t(S)%*%S if (inverse) { if (pinv) iI <- Inverse(res) else iI <- solve(res) return(iI) } attributes(res)$grad <- colSums(S) return(res) } if (n>1) { xfix <- colnames(data)[(colnames(data)%in%parlabels(x,exo=TRUE))] xconstrain <- intersect(unlist(lapply(constrain(x),function(z) attributes(z)$args)),manifest(x)) if (length(xfix)>0 | length(xconstrain)>0) { ##### Random slopes! x0 <- x if (length(xfix)>0) { nrow <- length(vars(x)) xpos <- lapply(xfix,function(y) which(regfix(x)$labels==y)) colpos <- lapply(xpos, function(y) ceiling(y/nrow)) rowpos <- lapply(xpos, function(y) (y-1)%%nrow+1) myfix <- list(var=xfix, col=colpos, row=rowpos) for (i in seq_along(myfix$var)) for (j in seq_along(myfix$col[[i]])) regfix(x0, from=vars(x0)[myfix$row[[i]]][j],to=vars(x0)[myfix$col[[i]]][j]) <- data[1,myfix$var[[i]]] index(x0) <- reindex(x0,zeroones=TRUE,deriv=TRUE) } pp <- modelPar(x0,p) p0 <- with(pp, c(meanpar,p,p2)) myfun <- function(ii) { if (length(xfix)>0) for (i in seq_along(myfix$var)) { for (j in seq_along(myfix$col[[i]])) { index(x0)$A[cbind(myfix$row[[i]],myfix$col[[i]])] <- data[ii,myfix$var[[i]]] } } ww <- NULL if (!is.null(weights)) ww <- weights[ii,] return(information(x0,p=p,n=1,type=type,weights=ww,data=data[ii,])) } L <- lapply(seq_len(nrow(data)),function(y) myfun(y)) val <- apply(array(unlist(L),dim=c(length(p0),length(p0),nrow(data))),c(1,2),sum) if (inverse) { if (pinv) iI <- Inverse(val) else iI <- solve(val) return(iI) } return(val) } } if (!is.null(weights) && is.matrix(weights)) { L <- lapply(seq_len(nrow(weights)),function(y) information(x,p=p,n=1,type=type,weights=weights[y,])) val <- apply(array(unlist(L),dim=c(length(p),length(p),nrow(weights))),c(1,2),sum) if (inverse) { if (pinv) iI <- Inverse(val) else iI <- solve(val) return(iI) } return(val) } mp <- moments(x,p,data=data) pp <- modelPar(x,p) D <- deriv.lvm(x, meanpar=pp$meanpar, mom=mp, p=p)##, all=length(constrain(x))>0) C <- mp$C iC <- Inverse(C,det=FALSE, symmetric = TRUE) if (is.null(weights)) { ## W <- diag(ncol(iC)) } else { if (length(weights)0)) { if (inverse) { if (pinv) iI <- Inverse(information_Sigma) else iI <- solve(information_Sigma) return(iI) } return(information_Sigma) } ii <- index(x) if (is.null(weights)) { information_mu <- n*t(dxi) %*% (iC) %*% (dxi) } else { information_mu <- n*t(dxi) %*% (iC%*%W) %*% (dxi) } if (!(lava.options()$devel)) { information <- information_Sigma+information_mu } else { mparidx <- with(ii$parBelongsTo,c(mean,reg)) information <- information_Sigma information[mparidx,mparidx] <- information[mparidx,mparidx] + information_mu } if (inverse) { if (pinv) iI <- Inverse(information, symmetric = TRUE) else iI <- solve(information) return(iI) } return(information) } ###}}} information.lvm ###{{{ information.lvmfit ##' @export information.lvmfit <- function(x,p=pars(x),n=x$data$n,data=model.frame(x),model=x$estimator,weights=Weights(x), data2=x$data$data2, ...) { I <- information(x$model0,p=p,n=n,data=data,model=model, weights=weights,data2=data2,...) if (ncol(I)0) return(names(rsidx)[rsidx]) else return(NULL) } if (inherits(covar,"formula")) { covar <- all.vars(covar) } if (clear) { ## x <- addattr(x,attr="shape",var=var,val="rectangle") x$attributes$randomslope[covar] <- FALSE } else { if (!is.null(random) & !is.null(response)) { if (inherits(random,"formula")) { random <- all.vars(random) } if (inherits(response,"formula")) { response <- all.vars(response) } if (length(covar)!=length(response)) stop("Vectors should be of the same length!") if (!(random%in%latent(x))) { addvar(x) <- random latent(x) <- random } if (missing(param) || !is.null(param)) { if (!missing(postfix)) newlatent <- paste0(random,postfix) else newlatent <- paste(random,covar,sep=".") covariance(x,random) <- 1 for (i in seq_along(covar)) { if (missing(param)) { x <- regression(x,to=newlatent[i],from=random) } else { if (inherits(param,"formula")) { param <- all.vars(param) } if (length(param)!=length(newlatent)) param <- rep(param,length(newlatent)) regfix(x,to=newlatent[i], from=random) <- param[i] } regfix(x,to=response[i],from=newlatent[i]) <- covar[i] latent(x) <- newlatent[i] covariance(x,newlatent[i]) <- 0 } } else { for (i in seq_along(covar)) { regfix(x,to=response[i],from=random) <- covar[i] } } } else { x$attributes$randomslope[covar] <- TRUE } } index(x) <- reindex(x) return(x) } ##' @export `randomslope.lvmfit` <- function(x,...) { randomslope(Model(x),...) } lava/R/plotConf.R0000644000176200001440000003043313520655354013320 0ustar liggesusers##' Plot regression line (with interactions) and partial residuals. ##' ##' @title Plot regression lines ##' @param model Model object (e.g. \code{lm}) ##' @param var1 predictor (Continuous or factor) ##' @param var2 Factor that interacts with \code{var1} ##' @param data data.frame to use for prediction (model.frame is used as default) ##' @param ci.lty Line type for confidence limits ##' @param ci Boolean indicating wether to draw pointwise 95\% confidence limits ##' @param level Level of confidence limits (default 95\%) ##' @param pch Point type for partial residuals ##' @param lty Line type for estimated regression lines ##' @param lwd Line width for regression lines ##' @param npoints Number of points used to plot curves ##' @param xlim Range of x axis ##' @param col Color (for each level in \code{var2}) ##' @param colpt Color of partial residual points ##' @param alpha Alpha level ##' @param cex Point size ##' @param delta For categorical \code{var1} ##' @param centermark For categorical \code{var1} ##' @param jitter For categorical \code{var1} ##' @param cidiff For categorical \code{var1} ##' @param mean For categorical \code{var1} ##' @param legend Boolean (add legend) ##' @param trans Transform estimates (e.g. exponential) ##' @param partres Boolean indicating whether to plot partial residuals ##' @param partse . ##' @param labels Optional labels of \code{var2} ##' @param vcov Optional variance estimates ##' @param predictfun Optional predict-function used to calculate confidence limits and predictions ##' @param plot If FALSE return only predictions and confidence bands ##' @param new If FALSE add to current plot ##' @param \dots additional arguments to lower level functions ##' @return list with following members: ##' \item{x}{Variable on the x-axis (\code{var1})} ##' \item{y}{Variable on the y-axis (partial residuals)} ##' \item{predict}{Matrix with confidence limits and predicted values} ##' @author Klaus K. Holst ##' @seealso \code{termplot} ##' @aliases plotConf ##' @export ##' @examples ##' n <- 100 ##' x0 <- rnorm(n) ##' x1 <- seq(-3,3, length.out=n) ##' x2 <- factor(rep(c(1,2),each=n/2), labels=c("A","B")) ##' y <- 5 + 2*x0 + 0.5*x1 + -1*(x2=="B")*x1 + 0.5*(x2=="B") + rnorm(n, sd=0.25) ##' dd <- data.frame(y=y, x1=x1, x2=x2) ##' lm0 <- lm(y ~ x0 + x1*x2, dd) ##' plotConf(lm0, var1="x1", var2="x2") ##' abline(a=5,b=0.5,col="red") ##' abline(a=5.5,b=-0.5,col="red") ##' ### points(5+0.5*x1 -1*(x2=="B")*x1 + 0.5*(x2=="B") ~ x1, cex=2) ##' ##' data(iris) ##' l <- lm(Sepal.Length ~ Sepal.Width*Species,iris) ##' plotConf(l,var2="Species") ##' plotConf(l,var1="Sepal.Width",var2="Species") ##' ##' \dontrun{ ##' ## lme4 model ##' dd$Id <- rbinom(n, size = 3, prob = 0.3) ##' lmer0 <- lme4::lmer(y ~ x0 + x1*x2 + (1|Id), dd) ##' plotConf(lmer0, var1="x1", var2="x2") ##' } ##' @keywords hplot regression plotConf <- function(model, var1=NULL, var2=NULL, data=NULL, ci.lty=0, ci=TRUE, level=0.95, pch=16, lty=1, lwd=2, npoints=100, xlim, col=NULL, colpt, alpha=0.5, cex=1, delta=0.07, centermark=0.03, jitter=0.2, cidiff=FALSE, mean=TRUE, legend=ifelse(is.null(var1),FALSE,"topright"), trans=function(x) {x}, partres=inherits(model,"lm"), partse=FALSE, labels, vcov, predictfun, plot=TRUE, new=TRUE, ...) { if (inherits(model,"formula")) model <- lm(model,data=data,...) if (inherits(model,"lmerMod")) { intercept <- lme4::fixef(model)["(Intercept)"] } else { intercept <- coef(model)["(Intercept)"] } if (is.na(intercept)) intercept <- 0 if (is.null(data)) { curdata <- get_all_vars(model,data=model.frame(model)) } else { curdata <- get_all_vars(formula(model), data=data) } if (inherits(model,"lmerMod")) { curdata0 <- model.frame(model, data = data, fixed.only = FALSE) } else { curdata0 <- model.frame(model,data) ## Checking for factors } if (is.null(var1) && is.null(var2)) { var1 <- colnames(curdata)[2] var10 <- colnames(curdata0)[2] } else var10 <- var1 responseorig <- colnames(curdata)[1] if (inherits(curdata0[,var10],c("character","factor"))) { curdata <- curdata0 var2 <- var10; var1 <- NULL } dots <- list(...) response <- all.vars(formula(model))[1] cname <- colnames(curdata)[-1] if (!is.factor(curdata[,var2]) & !is.null(var2)) { curdata[,var2] <- as.factor(curdata[,var2]) colnames(curdata)[1] <- response model <- update(model,as.formula(paste(response,"~.")),data=curdata) } thelevels <- levels(curdata[,var2]) if (missing(labels)) labels <- thelevels k <- ifelse(is.null(var2),1,length(thelevels)) if (is.null(col)) { col <- c("black","darkblue","darkred","goldenrod","mediumpurple", "seagreen","aquamarine3","violetred1","salmon1", "lightgoldenrod1","darkorange2","firebrick1","violetred1", "gold") } if (missing(xlim)) { if (!is.null(var1)) xlim <- range(curdata[,var1]) else xlim <- c(0,length(thelevels))+0.5 } dots$xlim <- xlim if (is.null(var1) & !is.null(var2)) { ##npoints <- 1 x <- unique(curdata[,var2]) npoints <- 1#length(x) } else { x <- seq(xlim[1], xlim[2], length.out=npoints) } xx <- c() newdata <- data.frame(id=seq(npoints)) partdata <- curdata var1.idx <- 0 ii <- 1 for (nn in cname) { ii <- ii+1 v <- curdata[,nn] if (!is.null(var1) && nn==var1) { var1.idx <- ii newdata <- cbind(newdata, rep(x, k)) partdata[,nn] <- 0 } else { if (is.factor(v)) { if (nn%in%var2) { newdata <- cbind(newdata, factor(rep(levels(v), each=npoints),levels=thelevels)) partdata[,nn] <- factor(rep(levels(v)[1], nrow(partdata)),levels=levels(v)) } else { newdata <- cbind(newdata, factor(rep(levels(v)[1], k*npoints), levels=levels(v))) } } else { if (is.logical(v)) newdata <- cbind(newdata,FALSE) else newdata <- cbind(newdata,0) } } }; colnames(newdata) <- c("_id", cname) partdata[,response] <- newdata[,response] <- 0 atr <- c("terms") attributes(newdata)[atr] <- attributes(curdata)[atr] attributes(partdata)[atr] <- attributes(partdata)[atr] Y <- model.frame(model)[,1] if(inherits(Y,"Surv")) Y <- Y[,1] XX <- model.matrix(formula(terms(model)),data=newdata) if (inherits(model,"lmerMod")) { bb <- lme4::fixef(model) } else { bb <- coef(model) } if (!missing(vcov)) SS <- vcov else { if (inherits(model,"geeglm")) { SS <- (summary(model)$cov.unscaled) } else { SS <- as.matrix(stats::vcov(model)) } } bidx <- which(apply(XX,2,function(x) !all(x==0))) notbidx <- setdiff(seq(length(bb)),bidx) bb0 <- bb; bb0[notbidx] <- 0 myse <- apply(XX[,bidx,drop=FALSE],1,function(x) rbind(x)%*%SS[bidx,bidx,drop=FALSE]%*%cbind(x))^.5 ci.all <- list(fit=XX%*%bb0,se.fit=myse) z <- qnorm(1-(1-level)/2) ci.all$fit <- cbind(ci.all$fit,ci.all$fit-z*ci.all$se.fit,ci.all$fit+z*ci.all$se.fit) if (!missing(predictfun)) { R <- Y-predict(model, newdata=partdata) ci.all <- predict(model, newdata=newdata, se.fit=TRUE, interval = "confidence", level=level,...) } else { XX0 <- model.matrix(formula(terms(model)),data=partdata) R <- Y-XX0%*%bb } if (inherits(model,"lmerMod")) { uz <- as.matrix(unlist(lme4::ranef(model))%*%do.call(rbind,lme4::getME(model,"Ztlist")))[1,] R <- R-uz } pr <- trans(intercept + R) if (is.na(intercept)) { intercept <- 0 if (!is.null(var2)) { intercept <- coef(model)[paste0(var2,thelevels)][as.numeric(curdata[,var2])] } } if (is.null(dots$ylim)) { if (partres) { if (cidiff) dots$ylim <- range(pr) else dots$ylim <- range(trans(c(ci.all$fit)),pr) } else dots$ylim <- trans(range(ci.all$fit)) } if (is.null(dots$ylab)) dots$ylab <- responseorig if (is.null(var1)) { dots$axes=FALSE if (is.null(dots$xlab)) dots$xlab <- "" } else { if (is.null(dots$xlab)) dots$xlab <- var1 } if (!plot) return(list(x=x, y=pr, predict=ci.all, predict.newdata=newdata)) plot.list <- c(x=0,y=0,type="n",dots) if (new) { do.call(graphics::plot, plot.list) if (is.null(var1)) { box() axis(2) axis(1,at=seq(length(thelevels)),labels) } } col.trans <- Col(col,alpha) Wrap <- function(k,n) { (seq_len(k)-1)%%n +1 } col.i <- Wrap(k,length(col)); col.k <- col[col.i]; lty.k <- lty[Wrap(k,length(lty))] pch.k <- pch[Wrap(k,length(pch))] if (!is.null(var1)) { for (i in seq_len(k)) { ci0 <- trans(ci.all$fit[(npoints*(i-1)+1):(i*npoints),]) y <- ci0[,1]; yu <- ci0[,3]; yl <- ci0[,2] lines(y ~ x, col=col.k[i], lwd=lwd, lty=lty.k[i]) if (ci) { lines(yl ~ x, lwd=1, col=col.k[i], lty=ci.lty) lines(yu ~ x, lwd=1, col=col.k[i], lty=ci.lty) xx <- c(x, rev(x)) yy <- c(yl, rev(yu)) polygon(xx,yy, col=col.trans[col.i[i]], lty=0) } } } ii <- as.numeric(curdata[,var2]) if (is.null(var1)) { xx <- curdata[,var2] x <- jitter(as.numeric(xx),jitter) if (missing(colpt)) colpt <- Col(col[1],alpha) if (partres>0) points(pr ~ x,pch=pch[1], col=colpt[1], cex=cex, ...) mycoef <- bb[paste0(var2,thelevels)][-1] if (inherits(model,c("lm","glm"))) myconf <- confint(model)[paste0(var2,thelevels)[-1],,drop=FALSE] else { myconf <- matrix(mycoef,ncol=2,nrow=length(mycoef)) myconf <- myconf + qnorm(0.975)*cbind((diag(as.matrix(SS))[-1])^0.5)%x%cbind(-1,1) } for (pos in seq(k)) { if (cidiff) { if (pos>1) { ci0 <- trans(intercept+myconf[pos-1,]) yl <- ci0[1]; yu <- ci0[2]; y <- trans(intercept+mycoef[pos-1]) } else { yu <- yl <- NULL; y <- trans(intercept) } } else if (partse) { y0 <- pr[xx==levels(xx)[pos]] ci0 <- confint(lm(y0~1)) yl <- ci0[1]; yu <- ci0[2]; y <- trans(mean(y0)) } else { ci0 <- rbind(trans(ci.all$fit[(npoints*(pos-1)+1):(pos*npoints),])) y <- ci0[,1]; yu <- ci0[,3]; yl <- ci0[,2] } if (!mean) y <- NULL confband(pos,yl,yu,delta=delta,center=y,centermark=centermark,col=col[1],lwd=lwd[1],lty=lty[1],cex=cex) } } else { if (partres) { xx <- curdata[,var1] if (!missing(colpt)) { points(pr ~ xx, col=colpt, cex=cex, pch=pch[1],...) } else { if (!is.null(var2)) points(pr ~ xx, col=col.k[ii], pch=pch.k[ii], cex=cex, ...) else points(pr ~ xx, col=col[1], pch=pch[1], cex=cex,...) } } } if (k>1 && legend!=FALSE) { if (length(lty)>1) legend(legend, legend=thelevels, col=col.k, pch=pch.k, bg="white", lty=lty.k,cex=cex) else legend(legend, legend=thelevels, col=col.k, pch=pch.k, bg="white",cex=cex) } invisible(list(x=xx, y=pr, predict=ci.all, predict.newdata=newdata)) } lava/R/estimate.formula.R0000644000176200001440000000557613520655354015025 0ustar liggesusers##' @export estimate.formula <- function(x, data, weights, family=stats::gaussian, ..., model="glm", lvm=FALSE) { cl <- match.call() if (lvm) { cl[[1]] <- quote(estimate0) return(eval(cl,envir=parent.frame())) } if (missing(data)) { data <- environment(x) cl$data <- quote(data) } mf <- match.call(expand.dots = FALSE) m <- match(c("x", "data", "weights", "subset", "etastart", "mustart", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(stats::model.frame) mf$na.action <- na.pass0 names(mf)[which(names(mf)=="x")] <- "formula" mf <- eval(mf, parent.frame()) idx <- attr(na.omit(mf),"na.action") weights <- as.vector(model.weights(mf)) if (length(idx)>0) { if (is.null(weights)) weights <- rep(1,nrow(mf)) if (length(idx)>0) weights[idx] <- 0 cl$weights <- quote(weights) cl$na.action <- quote(na.pass0) } cl$data <- quote(data) argsModelObj <- names(formals(model)) dots <- list(...) idx <- which(names(dots) %ni% argsModelObj) rmarg <- c("model","raw","lvm") if (length(idx)>0) rmarg <- c(names(dots)[idx],rmarg) cl[rmarg] <- NULL names(cl)[names(cl)=="x"] <- "formula" cl[[1]] <- as.name(model) fit <- eval(cl) if (length(idx)==0) return(fit) optarg <- NULL if (length(idx)>0) { optarg <- dots[idx] } do.call(estimate, c(list(fit),optarg)) } estimate0 <- function(x,data=parent.frame(),pred.norm=c(),unstruct=FALSE,messages=0,id=NULL,distribution=NULL,estimator="gaussian",...) { formulaId <- union(Specials(x,c("cluster")),Specials(x,c("id"))) formulaSt <- paste0("~.-cluster(",formulaId,")-id(",formulaId,")") if (!is.null(formulaId)) { id <- formulaId x <- update(x,as.formula(formulaSt)) } if (!is.null(id)) x <- update(x,as.formula(paste(".~.+",id))) mf <- model.frame(x,data) yvar <- names(mf)[1] y <- mf[,yvar] opt <- options(na.action="na.pass") mm <- model.matrix(x,data) options(opt) covars <- colnames(mm) covars <- unlist(lapply(covars, function(x) gsub("[^a-zA-Z0-9._]","",x))) colnames(mm) <- covars if (attr(terms(x),"intercept")==1) { covars <- covars[-1] it <- c() } else { it <- "0" } if (!is.null(id)) covars <- setdiff(covars,id) model <- lvm(toformula(yvar,c(it,covars)),messages=0) if (!is.null(distribution)) { lava::distribution(model,yvar) <- distribution estimator <- "glm" } mydata <- na.omit(as.data.frame(cbind(data.frame(y),mm))); names(mydata)[1] <- yvar exogenous(model) <- setdiff(covars,pred.norm) if (unstruct) { model <- covariance(model,pred.norm,pairwise=TRUE) } estimate(model,mydata,messages=messages,id=id,estimator=estimator,...) } lava/R/makemissing.R0000644000176200001440000000247713520655354014052 0ustar liggesusers##' Generates missing entries in data.frame/matrix ##' ##' @title Create random missing data ##' @param data data.frame ##' @param p Fraction of missing data in each column ##' @param cols Which columns (name or index) to alter ##' @param rowwise Should missing occur row-wise (either none or all selected columns are missing) ##' @param nafun (Optional) function to be applied on data.frame before return (e.g. \code{na.omit} to return complete-cases only) ##' @param seed Random seed ##' @return data.frame ##' @author Klaus K. Holst ##' @keywords utilities ##' @export makemissing <- function(data,p=0.2,cols=seq_len(ncol(data)),rowwise=FALSE,nafun=function(x) x, seed=NULL) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } p <- rep(p,length.out=length(cols)) if (!rowwise) for (i in seq_along(cols)) { data[rbinom(nrow(data),1,p[i])==1,cols[i]] <- NA } else data[which(rbinom(nrow(data),1,p)==1),cols] <- NA return(nafun(data)) } lava/R/partialcor.R0000644000176200001440000000330513520655354013672 0ustar liggesusers##' Calculate partial correlations ##' ##' Calculate partial correlation coefficients and confidence limits via Fishers ##' z-transform ##' ##' ##' @param formula formula speciying the covariates and optionally the outcomes ##' to calculate partial correlation for ##' @param data data.frame ##' @param level Level of confidence limits ##' @param ... Additional arguments to lower level functions ##' @return A coefficient matrix ##' @author Klaus K. Holst ##' @keywords models regression ##' @examples ##' ##' m <- lvm(c(y1,y2,y3)~x1+x2) ##' covariance(m) <- c(y1,y2,y3)~y1+y2+y3 ##' d <- sim(m,500) ##' partialcor(~x1+x2,d) ##' ##' @export partialcor <- function(formula,data,level=0.95,...) { y <- getoutcome(formula) if (length(y)==0) { preds <- all.vars(formula) yy <- setdiff(names(data),preds) } else { yy <- decomp.specials(y) preds <- attr(y,"x") } if (length(yy)<2) return(NULL) res <- c() for (i in seq_len(length(yy)-1)) for (j in seq(i+1,length(yy))) { f <- as.formula(paste("cbind(",yy[i],",",yy[j],")~", paste(preds,collapse="+"))) res <- rbind(res, partialcorpair(f,data,level=level)) rownames(res)[nrow(res)] <- paste(yy[i],yy[j],sep="~") } return(res) } partialcorpair <- function(formula,data,level=0.95,...) { l <- lm(formula,data) k <- ncol(model.matrix(l)) n <- nrow(model.matrix(l)) r <- residuals(l) rho <- cor(r)[1,2] zrho <- atanh(rho) var.z <- 1/(n-k-3) ci.z <- zrho + c(-1,1)*qnorm(1-(1-level)/2)*sqrt(var.z) ci.rho <- tanh(ci.z) z <- 1/sqrt(var.z)*zrho p.z <- 2*(pnorm(-abs(z))) # p-value using z-transform for H_0: rho=0. return(c(cor=rho,z=z,pval=p.z,lowerCI=ci.rho[1],upperCI=ci.rho[2])) } lava/R/ksmooth.R0000644000176200001440000001234613520655354013223 0ustar liggesusers##' Plot/estimate surface ##' ##' @export ##' @aliases ksmooth2 surface ##' @param x formula or data ##' @param data data.frame ##' @param h bandwidth ##' @param xlab X label ##' @param ylab Y label ##' @param zlab Z label ##' @param gridsize grid size of kernel smoother ##' @param ... Additional arguments to graphics routine (persp3d or persp) ##' @examples ##' ksmooth2(rmvn0(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1, ##' rgl=FALSE,theta=30) ##' ##' if (interactive()) { ##' ksmooth2(rmvn0(1e4,sigma=diag(2)*.5+.5),c(-3.5,3.5),h=1) ##' ksmooth2(function(x,y) x^2+y^2, c(-20,20)) ##' ksmooth2(function(x,y) x^2+y^2, xlim=c(-5,5), ylim=c(0,10)) ##' ##' f <- function(x,y) 1-sqrt(x^2+y^2) ##' surface(f,xlim=c(-1,1),alpha=0.9,aspect=c(1,1,0.75)) ##' surface(f,xlim=c(-1,1),clut=heat.colors(128)) ##' ##play3d(spin3d(axis=c(0,0,1), rpm=8), duration=5) ##' } ##' ##' if (interactive()) { ##' surface(function(x) dmvn0(x,sigma=diag(2)),c(-3,3),lit=FALSE,smooth=FALSE,box=FALSE,alpha=0.8) ##' surface(function(x) dmvn0(x,sigma=diag(2)),c(-3,3),box=FALSE,specular="black")##' ##' } ##' ##' if (!inherits(try(find.package("fields"),silent=TRUE),"try-error")) { ##' f <- function(x,y) 1-sqrt(x^2+y^2) ##' ksmooth2(f,c(-1,1),rgl=FALSE,image=fields::image.plot) ##' } ksmooth2 <- function(x,data,h=NULL,xlab=NULL,ylab=NULL,zlab="",gridsize=rep(51L,2),...) { if (is.function(x)) { args <- c(list(f=x,h=h,xlab=xlab,ylab=ylab,zlab=zlab),list(...)) if (is.null(args$xlim) && !missing(data)) { if (is.list(data)) { args$xlim <- data[[1]] args$ylim <- data[[2]] } else args$xlim <- data } return(do.call(surface,args)) } if (inherits(x,"formula")) { x <- model.frame(x,data) } if (length(gridsize)==1) gridsize <- rep(gridsize,2) if (is.null(h)) h <- apply(as.matrix(x),2,sd)*nrow(x)^(-1/5) est <- KernSmooth::bkde2D(x, bandwidth=h, gridsize=gridsize) if (is.null(xlab)) xlab <- names(x)[1] if (is.null(ylab)) ylab <- names(x)[2] surface(est$fhat, x=est$x1, y=est$x2, est$fhat, xlab=xlab, ylab=ylab, zlab=zlab, ...) return(invisible(est)) } ##' @export surface <- function(f,xlim=c(0,1),ylim=xlim,n=rep(100,2),col,clut="gold",clut.center,x,y,rgl=TRUE,expand=0.5,nlevels=10,col.contour="black",contour=TRUE,persp=TRUE,image="image",...) { if (missing(x)) { if (length(n)==1) n <- rep(n,2) x <- seq(xlim[1],xlim[2],length.out=n[1]) y <- seq(ylim[1],ylim[2],length.out=n[2]) } if (is.function(f)) { xy <- as.matrix(expand.grid(x,y)) if (inherits(try(f(c(x[1],y[1])),silent=TRUE),"try-error")) { f <- matrix(f(xy[,1],xy[,2]),nrow=length(x),ncol=length(y)) } else { val <- f(xy) if (length(val)0) estimator <- nestimator[1] ## Binary outcomes -> censored regression if (is.null(dim(data))) return(NULL) if (is.null(estimator) || estimator%in%c(nestimator2,nestimator)) { for (i in setdiff(lava::endogenous(x),bin)) { if (is.character(data[,i]) | is.factor(data[,i])) { # Transform binary 'factor' y <- as.factor(data[,i]) data[,i] <- as.numeric(y)-1 if (hasTobit && nlevels(y)==2 && !is.null(estimator) && estimator%in%c("gaussian","tobit")) { lava.tobit::binary(x) <- i } else { estimator <- nestimator[1] ordinal(x,K=nlevels(y)) <- i } } } ord <- ordinal(x) if (length(ord)>0 && !is.null(estimator) && estimator%in%nestimator2) { if (hasTobit) { lava.tobit::binary(x) <- ord } else { estimator <- nestimator[1] } } if (hasTobit) bin <- intersect(lava.tobit::binary(x),vars(x)) if (length(bin)>0 && (is.null(estimator) || estimator%in%"normal")) { estimator <- nestimator[1] ordinal(x,K=2) <- bin } if (length(bin)>0 && estimator%in%nestimator2) { estimator <- nestimator2[1] if (is.null(weights)) { W <- data[,bin,drop=FALSE]; W[W==0] <- -1; colnames(W) <- bin weights <- lava::lava.options()$threshold*W } else { ## if (!all(binary(x)%in%colnames(data))) ## W <- data[,binary(x),drop=FALSE]; W[W==0] <- -1; colnames(W) <- binary(x) ## attributes(W)$data2 <- weights ## weights <- W ## weights[,binary(x)] <- W } for (b in bin) { data[!is.na(data[,b]),b] <- 0 } ## data[,binary(x)] <- 0 if (!is.null(data2)) { estimator <- "tobitw" } } } ## Transform 'Surv' objects data2 <- mynames <- NULL if (is.null(estimator) || estimator%in%nestimator[1] || (!hasTobit && estimator%in%nestimator2)) { for (i in setdiff(lava::endogenous(x),c(bin,ord))) { if (survival::is.Surv(data[,i])) { S <- data[,i] y1 <- S[,1] if (attributes(S)$type=="left") { y2 <- y1 y1[S[,2]==0] <- -Inf } if (attributes(S)$type=="right") { y2 <- y1 y2[S[,2]==0] <- Inf } if (attributes(S)$type=="interval2") { y2 <- S[,2] } if (attributes(S)$type=="interval") { y2 <- S[,2] y2[S[,3]==1L] <- y1[S[,3]==1L] } if (!(attributes(S)$type%in%c("left","right","interval2","interval"))) stop("Surv type not supported.") mynames <- c(mynames,i) y2 <- cbind(y2) colnames(y2) <- i data2 <- cbind(data2,y2) data[,i] <- y1 estimator <- "normal" } } } W <- NULL if (length(estimator)>0 && estimator%in%nestimator2 && hasTobit) { for (i in setdiff(lava::endogenous(x),bin)) { if (survival::is.Surv(data[,i])) { estimator <- nestimator2[1] S <- data[,i] y <- S[,1] if (attributes(S)$type=="left") w <- S[,2]-1 if (attributes(S)$type=="right") w <- 1-S[,2] if (attributes(S)$type=="interval2") { w <- S[,3]; w[w==2] <- (-1) } mynames <- c(mynames,i) W <- cbind(W,w) data[,i] <- y } } if (length(W)>0) { colnames(W) <- mynames if (!is.null(weights)) { wW <- intersect(colnames(weights),colnames(W)) if (length(wW)>0) weights[,wW] <- W[,wW] Wo <- setdiff(colnames(W),wW) if (length(Wo)>0) weights <- cbind(weights,W[,Wo,drop=FALSE]) } else { weights <- W; } } } return(c(list(x=x,data=data,weights=weights,data2=data2,estimator=estimator),dots)) } ##' Define variables as ordinal ##' ##' Define variables as ordinal in latent variable model object ##' @export ##' @aliases ordinal ordinal<- ##' @param x Object ##' @param ... additional arguments to lower level functions ##' @param value variable (formula or character vector) ##' @examples ##' if (requireNamespace("mets")) { ##' m <- lvm(y + z ~ x + 1*u[0], latent=~u) ##' ordinal(m, K=3) <- ~y+z ##' d <- sim(m, 100, seed=1) ##' e <- estimate(m, d) ##' } ##' "ordinal<-" <- function(x,...,value) UseMethod("ordinal<-") ##' @export "ordinal<-.lvm" <- function(x,...,value) { ordinal(x, value, ...) } ##' @export "ordinal" <- function(x,...) UseMethod("ordinal") ##' @export print.ordinal.lvm <- function(x,...) { cat(rep("_",28),"\n",sep="") for (i in x) { val <- attr(x,"fix")[[i]] if (length(val)==0) cat(paste(i,"binary",sep=":"),"\n") else print(unlist(attr(x,"fix")[[i]]),quote=FALSE) cat(rep("_",28),"\n",sep="") } } ##' @export `ordinal.lvm` <- function(x,var=NULL,K=2, constrain, breaks=NULL, p, liability=TRUE, labels, exo=FALSE, ...) { if (inherits(var,"formula")) { var <- all.vars(var) } if (is.null(var)) { ordidx <- unlist(x$attributes$ordinal) KK <- unlist(x$attributes$nordinal) idx <- x$attributes$ordinalparname fix <- lapply(idx,function(z) x$exfix[z]) liability <- x$attributes$liability labels <- x$attributes$labels if (length(ordidx)>0) { val <- names(ordidx) return(structure(val,K=KK,idx=idx,fix=fix,liability=liability,labels=labels,class="ordinal.lvm")) } else return(NULL) } if (K[1]==0L || is.null(K[1]) || (is.logical(K) & !K[1])) { idx <- na.omit(match(var,names(x$attributes$ordinal))) if (length(idx)>0) { pp <- unlist(x$attributes$ordinalparname[idx]) if (!is.null(pp)) parameter(x,remove=TRUE) <- pp if (!is.null(x$attributes$ordinalparname)) x$attributes$ordinalparname <- x$attributes$ordinalparname[-idx] x$attributes$ordinal <- x$attributes$ordinal[-idx] ##x$attributes$labels[var] <- NULL x$attributes$type <- x$attributes$type[-idx] x$attributes$constrainY <- x$attributes$constrainY[setdiff(names(x$attributes$constrainY),var)] x$attributes$liability <- x$attributes$liability[-idx] x$attributes$nordinal <- x$attributes$nordinal[-idx] x$attributes$normal <- x$attributes$normal[-idx] exo <- intersect(var,exogenous(x,latent=TRUE)) if (length(exo)>0) { intercept(x,var) <- NA covariance(x,var) <- NA exogenous(x) <- union(exogenous(x),exo) } } return(x) } if (!missing(p)) breaks <- qnorm(cumsum(p)) if (!is.null(breaks)) { breaks <- ordreg_ithreshold(breaks) K <- length(breaks)+1 } if (!missing(labels)) K <- length(labels) if (length(var)>length(K)) K <- rep(K[1],length(var)) if (length(var)==1 && !missing(constrain)) constrain <- list(constrain) if (length(var)>1) { if (!missing(labels) && !is.list(labels)) labels <- rep(list(labels),length(var)) if (!missing(breaks) && !is.list(breaks)) breaks <- rep(list(breaks),length(var)) if (!missing(constrain) && !is.list(constrain)) constrain <- rep(list(constrain),length(var)) } addvar(x) <- var for (i in seq_len(length(var))) { if (K[i]>2 || (K[i]==2 && !liability)) { parname <- paste0(var[i],":",paste(seq(K[i]-1)-1,seq(K[i]-1),sep="|")) newpar <- if (is.null(breaks)) { rep(-1,K[i]-1) } else if (is.list(breaks)) breaks[[i]] else breaks if (length(newpar)2,"categorical","binary") if (K[i]>2) intfix(x,var[i],NULL) <- 0 if (!liability) { mytr <- function(y,p,idx,...) { breaks <- c(-Inf,ordreg_threshold(p[idx]),Inf) as.numeric(cut(y,breaks=breaks))-1 } myalist <- substitute(alist(y=,p=,idx=pp), list(pp=x$attributes$ordinalparname[[var[i]]])) formals(mytr) <- eval(myalist) transform(x,var[i],post=FALSE) <- mytr } } x$attributes$liability[var] <- liability x$attributes$ordinal[var] <- TRUE if (!missing(labels)) { if (length(var)==1) labels <- list(labels) x$attributes$labels[var] <- labels } x$attributes$nordinal[var] <- K x$attributes$normal[var] <- FALSE covfix(x,var,NULL,exo=exo) <- 1 if (is.null(index(x))) index(x) <- reindex(x) return(x) } lava/R/coef.R0000644000176200001440000006764613520655354012470 0ustar liggesusers###{{{ coef.lvm ##' @export `coef.lvm` <- function(object, mean=TRUE, fix=TRUE, symbol=lava.options()$symbol, messages=lava.options()$messages, p, data, vcov, type=9, labels=lava.options()$coef.names, ...) { if (fix) object <- fixsome(object,measurement.fix=FALSE) if (!missing(p)) { coefs <- matrix(NA,nrow=length(p),ncol=4); coefs[,1] <- p rownames(coefs) <- c(coef(object,mean=TRUE,fix=FALSE)[c(seq_len(index(object)$npar.mean))], {if (index(object)$npar>0) paste0("p",seq_len(index(object)$npar)) }, {if (index(object)$npar.ex>0) paste0("e",seq_len(index(object)$npar.ex))} ) if (missing(vcov)) { if (!missing(data) && !is.null(data)) { I <- information(object,p=p,data=data,type="E") myvcov <- solve(I) } else { myvcov <- matrix(NA,length(p),length(p)) } object$vcov <- myvcov } else object$vcov <- vcov coefs[,2] <- sqrt(diag(object$vcov)) coefs[,3] <- coefs[,1]/coefs[,2] coefs[,4] <- 2*(pnorm(abs(coefs[,3]),lower.tail=FALSE)) colnames(coefs) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)") object$coefficients <- coefs; return(coef.lvmfit(object,type=type,labels=labels,symbol=symbol,...)) } ## Free regression/covariance parameters AP <- matrices(object, paste0("p",seq_len(index(object)$npar))) A <- AP$A; A[index(object)$M1==0] <- "0" ## Only free parameters P <- AP$P; P[index(object)$P1==0] <- "0"; P[upper.tri(P)] <- "0" nn <- vars(object) res <- c() resname <- c() ii <- which(t(A)!="0",arr.ind=TRUE) rname <- paste(nn[ii[,1]],nn[ii[,2]],sep=symbol[1]) if (labels) { rname2 <- t(regfix(Model(object))$labels)[ii] rname[which(!is.na(rname2))] <- rname2[which(!is.na(rname2))] } res <- rname resname <- c(resname,t(A)[ii]) ii <- which(P!="0",arr.ind=TRUE) if (length(symbol)<2) rname <- paste(nn[ii[,2]],nn[ii[,1]],sep=lava.options()$symbol[2]) else rname <- paste(nn[ii[,2]],nn[ii[,1]],sep=symbol[2]) if (labels) { rname2 <- (covfix(Model(object))$labels)[ii] rname[which(!is.na(rname2))] <- rname2[which(!is.na(rname2))] } res <- c(res,rname) resname <- c(resname,P[ii]) names(res) <- resname resnum <- sapply(resname, function(s) char2num(substr(s,2,nchar(s)))) res <- res[order(resnum)] if (mean) { nmean <- sum(index(object)$v1==1) if (nmean>0) { if (!labels) res <- c(vars(object)[index(object)$v1==1], res) else { mres <- c() for (i in seq_len(length(index(object)$v1))) { val <- index(object)$v1[i] if (val==1) { if (!is.na(intfix(Model(object))[[i]])) { mres <- c(mres, intfix(Model(object))[[i]]) } else mres <- c(mres, vars(object)[i]) } } res <- c(mres,res) } names(res)[seq_len(nmean)] <- paste0("m",seq_len(nmean)) } } if (!is.null(object$expar) && sum(index(object)$e1==1)>0) { n2 <- names(object$expar)[index(object)$e1==1] if (labels) { count <- 0 for (i in seq_len(length(index(object)$e1))) { if (index(object)$e1[i]==1) { val <- object$exfix[[i]] count <- count+1 if(!is.na(val)) n2[count] <- val } } } names(n2) <- paste0("e",seq_len(length(n2))) res <- c(res,n2) } if (messages>1) { cat(paste(res, collapse="\n")); cat("\n") } if (!is.null(object$order)) res <- res[object$order] res } ###}}} ###{{{ coef.lvmfit ##' @export `coef.lvmfit` <- function(object, type=ifelse(missing(type),-1,2), symbol=lava.options()$symbol, data, std=NULL, labels=lava.options()$coef.names, ##labels=TRUE, vcov, vcov.type, reliability=FALSE, second=FALSE, ...) { res <- (pars.default(object,...)) if (type<0 && !is.null(names(res))) return(res) if (is.null(object$control$meanstructure)) meanstructure <- TRUE else meanstructure <- object$control$meanstructure npar <- index(object)$npar; npar.mean <- index(object)$npar.mean*meanstructure npar.ex <- index(object)$npar.ex para <- parameter(Model(object)) para.idx <- which(vars(object)%in%para) if (inherits(object,"lvm.missing")) { if (length(object$cc)==0) {## No complete cases coefs <- coef(object$estimate) c1 <- coef(Model(object),mean=TRUE,fix=FALSE) c1. <- coef(Model(object),mean=FALSE,fix=FALSE) nn <- gsub("^[0-9]*@","",names(coefs)) myorder <- match(c1,nn) myorder.reg <- order(na.omit(match(nn,c1.))) myorder.extra <- c() } else { myorder <- na.omit(modelPar(object$multigroup,seq_len(npar+npar.mean))$p[[object$cc]]) myorder.reg <- na.omit(modelPar(object$multigroup,seq_len(npar))$p[[object$cc]]) myorder.extra <- seq_len(index(object)$npar.ex)+length(myorder) myorder <- c(myorder,myorder.extra) } } else { myorder <- seq_len(npar+npar.mean) myorder.reg <- seq_len(npar) myorder.extra <- seq_len(index(object)$npar.ex)+length(myorder) myorder <- c(myorder,myorder.extra) } if (type<0) { names(res)[seq_len(length(myorder))] <- coef(Model(object),fix=FALSE, mean=meanstructure, symbol=symbol)[order(myorder)] return(res) } latent.var <- latent(object) Type <- Var <- From <- c() Astd <- Pstd <- vstd <- NULL if (!is.null(std)) { stdCoef <- stdcoef(object) { switch(tolower(std), latent = {Astd=stdCoef$Astar; Pstd=stdCoef$Pstar; vstd=stdCoef$vstar}, y = {Astd=stdCoef$AstarY; Pstd=stdCoef$PstarY; vstd=stdCoef$vstarY}, xy = {Astd=stdCoef$AstarXY; Pstd=stdCoef$PstarXY; vstd=stdCoef$vstarXY}, yx = {Astd=stdCoef$AstarXY; Pstd=stdCoef$PstarXY; vstd=stdCoef$vstarXY} ) } } myparnames <- paste0("p",seq_len(npar+npar.ex))[myorder.reg] p <- matrices(Model(object), myparnames) A <- p$A P <- p$P mycoef <- object$coef if (!missing(vcov.type) | !missing(vcov)) { if (!missing(vcov)) { mycoef[,2] <- sqrt(diag(vcov))[myorder] } else { if (!missing(data)) myvcov <- information(object,type=vcov.type,data=data,inverse=TRUE) else myvcov <- information(object,type=vcov.type,inverse=TRUE) mycoef[,2] <- sqrt(diag(myvcov))[myorder] } mycoef[,3] <- mycoef[,1]/mycoef[,2] mycoef[,4] <- 2*(pnorm(abs(mycoef[,3]),lower.tail=FALSE)) } coefs <- mycoef[myorder,,drop=FALSE] nn <- colnames(A) free <- A!="0" free[index(object)$M1!=1] <- FALSE nlincon <- matrix(Model(object)$par%in%names(constrain(Model(object))),nrow(A)) if (missing(data)) { data <- matrix(0,ncol=length(index(Model(object))$manifest)); colnames(data) <- index(Model(object))$manifest } nlincon.estimates.full<- constraints(object,second=second,data=data) nlincon.estimates <- nlincon.estimates.full[,-(5:6),drop=FALSE] matched <- c() res <- c() for (i in seq_len(ncol(A))) for (j in seq_len(nrow(A))) { val <- A[j,i] if (val!="0") { matching <- match(val,rownames(coefs)) matched <- c(matched,matching) if (!is.na(matching)) { if (free[j,i]) newrow <- matrix(coefs[matching,],nrow=1) else { newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) } } else { Debug(list("(i,j)", i, ",", j)) if (nlincon[j,i]) { newrow <- matrix(nlincon.estimates[Model(object)$par[j,i],],nrow=1) } else { newrow <- matrix(c(Model(object)$fix[j,i], NA, NA, NA), nrow=1) } } if (!is.null(std)) { newrow <- cbind(newrow,Astd[j,i]) } if (labels & !is.na(regfix(Model(object))$labels[j,i])) { rownames(newrow) <- regfix(Model(object))$labels[j,i] if (labels>1) { newst <- paste0(nn[i],symbol[1],nn[j]) if (rownames(newrow)!=newst) rownames(newrow) <- paste(rownames(newrow),newst,sep=":") } } else { rownames(newrow) <- paste0(nn[i],symbol[1],nn[j]) } if (free[j,i] | type>2) { res <- rbind(res, newrow) Type <- c(Type,"regression") Var <- c(Var, nn[i]) From <- c(From, nn[j]) } } } free.var <- P!="0" free.var[index(object)$P1!=1] <- FALSE nlincon.var <- matrix(Model(object)$covpar%in%names(constrain(Model(object))),nrow(P)) if (type>0) ## Variance estimates: for (i in seq_len(ncol(p$P))) for (j in seq(i,nrow(p$P))) { val <- p$P[j,i] if (!(i%in%para.idx)) if (val!="0" & !any(vars(object)[c(i,j)]%in%index(Model(object))$exogenous)) if (type>1 | !all(vars(object)[c(i,j)]%in%index(Model(object))$manifest)) { matching <- match(val,rownames(coefs)) matched <- c(matched,matching) if (!is.na(matching)) { if (free.var[j,i]) newrow <- matrix(coefs[matching,],nrow=1) else newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) ## We don't want to report p-values of tests on the boundary of the parameter space if (i==j) newrow[,4] <- NA } else { Debug(list("(i,j)", i, ",", j)) if (nlincon.var[j,i]) { newrow <- matrix(nlincon.estimates[Model(object)$covpar[j,i],],nrow=1) } else { newrow <- matrix(c(Model(object)$covfix[j,i], NA, NA, NA), nrow=1) } } if (!missing(std)) { newrow <- cbind(newrow,Pstd[i,j]) } if (length(symbol)<2) { if (nn[i]!=nn[j]) { part2 <- paste(nn[i],nn[j],sep=lava.options()$symbol[2]) } else part2 <- nn[i] } else { part2 <- paste0(nn[i],symbol[2],nn[j]) } if (labels & !is.na(covfix(Model(object))$labels[j,i])) { rownames(newrow) <- covfix(Model(object))$labels[j,i] if (labels>1) { if (rownames(newrow)!=part2) rownames(newrow) <- paste(rownames(newrow),part2,sep=":") } } else { rownames(newrow) <- part2 } if ((free.var[j,i]) | type>2) { res <- rbind(res, newrow) Type <- c(Type,"variance") Var <- c(Var, nn[i]) From <- c(From, nn[j]) } } } ## Mean parameter: nlincon.mean <- lapply(Model(object)$mean, function(x) x%in%names(constrain(Model(object))) ) if (type>0 & npar.mean>0) { midx <- seq_len(npar.mean) rownames(coefs)[midx] <- paste0("m",myorder[midx]) munames <- rownames(coefs)[seq_len(npar.mean)] meanpar <- matrices(Model(object), myparnames, munames)$v for (i in seq_len(length(meanpar))) { if (!index(Model(object))$vars[i]%in%index(Model(object))$exogenous) { val <- meanpar[i] matching <- match(val,rownames(coefs)) if (!is.na(matching)) { if (index(object)$v1[i]==1) ## if free-parameter newrow <- matrix(coefs[matching,],nrow=1) else newrow <- matrix(c(coefs[matching,1],NA,NA,NA), nrow=1) } else { if (nlincon.mean[[i]]) { newrow <- matrix(nlincon.estimates[Model(object)$mean[[i]],],nrow=1) } else { newrow <- matrix(c(as.numeric(meanpar[i]), NA, NA, NA), nrow=1) } } if (!missing(std)) { newrow <- cbind(newrow,vstd[i]) } if (labels & !(is.na(intfix(Model(object))[[i]]) | is.numeric(intfix(Model(object))[[i]]))) { rownames(newrow) <- intfix(Model(object))[[i]] if (labels>1) { if (rownames(newrow)!=index(Model(object))$vars[i]) rownames(newrow) <- paste(rownames(newrow),index(Model(object))$vars[i],sep=":") } } else { rownames(newrow) <- index(Model(object))$vars[i] } if ((index(object)$v1[i]==1) | type>2) { res <- rbind(res, newrow) Type <- c(Type,ifelse(!(i%in%para.idx),"intercept","parameter")) Var <- c(Var, index(Model(object))$vars[i]) From <- c(From, NA) } } } } if (type>0 && length(myorder.extra>0)) { cc <- coefs[myorder.extra,,drop=FALSE] rownames(cc) <- rownames(index(object)$epar)[which(index(object)$e1==1)] cc <- cbind(cc,rep(NA,ncol(res)-ncol(cc))) res <- rbind(res,cc) Type <- c(Type,rep("extra",length(myorder.extra))) Var <- c(Var,rep(NA,length(myorder.extra))) From <- c(From,rep(NA,length(myorder.extra))) } mycolnames <- colnames(coefs) if (!is.null(std)) mycolnames <- c(mycolnames, paste("std",std,sep=".")) colnames(res) <- mycolnames attributes(res)$type <- Type attributes(res)$var <- Var attributes(res)$from <- From attributes(res)$latent <- latent.var attributes(res)$nlincon <- nlincon.estimates.full return(res) } ###}}} coef.lvmfit ###{{{ coef.multigroup ##' @export coef.multigroup <- function(object,...) { return(object$parpos) } ###}}} coef.multigroup ###{{{ coef.multigroupfit ##' @export coef.multigroupfit <- function(object, type=0,vcov, ext=FALSE, labels=lava.options()$coef.names, symbol=lava.options()$symbol, covsymb=NULL,groups=NULL,...) { if (type==0) { res <- pars(object); if (is.null(names(res))) names(res) <- object$model$name return(res) } if (type==1) { theta <- pars(object) if (missing(vcov)) theta.sd <- sqrt(diag(object$vcov)) else theta.sd <- sqrt(diag(vcov)) res <- cbind(theta,theta.sd,(Z <- theta/theta.sd),2*(pnorm(abs(Z),lower.tail=FALSE))) if (is.null(rownames(res))) rownames(res) <- object$model$name colnames(res) <- c("Estimate","Std. Error", "Z value", "Pr(>|z|)") return(res) } cc <- coef(object, type=1, symbol=symbol, ...) model <- Model(object) parpos <- modelPar(model, seq_len(nrow(cc)))$p npar.mean <- object$model$npar.mean npar <- object$model$npar mynames <- c() if (npar.mean>0) { mynames <- unlist(object$model$meanlist) mynames <- names(mynames)[!duplicated(mynames)] } if (npar>0) { mynames <- c(mynames,object$model$par) } res <- list() misrow <- list() parpos2 <- list() if (is.null(groups)) groups <- seq(model$ngroup) if (length(groups)==0) groups <- seq(model$ngroup) for (i in groups) { orignames <- coef(object$model0$lvm[[i]],fix=FALSE,mean=object$meanstructure, messages=0, symbol=lava.options()$symbol) if (ext) { newnames. <- coef(Model(model)[[i]],fix=FALSE, mean=object$meanstructure, messages=0, labels=labels, symbol=symbol) newnames <- coef(Model(model)[[i]],fix=FALSE, mean=object$meanstructure, messages=0, labels=labels,symbol=lava.options()$symbol) newcoef <- matrix(NA,ncol=4,nrow=length(newnames)) rownames(newcoef) <- newnames. idx <- match(orignames,newnames) newcoef[idx,] <- cc[parpos[[i]],,drop=FALSE] newparpos <- rep(NA,length(newnames)) newparpos[idx] <- parpos[[i]] parpos2 <- c(parpos2, list(newparpos)) misrow <- c(misrow, list(setdiff(seq_len(length(newnames)),idx))) } else { newcoef <- cc[parpos[[i]],,drop=FALSE] rownames(newcoef) <- orignames } colnames(newcoef) <- colnames(cc) ## Position of variance parameters: varpos <- variances(Model(model)[[i]],mean=FALSE) ## Number of parameters resp mean-parameters p <- nrow(newcoef); p0 <- length(coef(Model(model)[[i]],fix=FALSE, mean=FALSE, messages=0)) newcoef[(p-p0) + varpos,4] <- NA res <- c(res, list(newcoef)) } if (ext) { for (i in seq(length(groups))) { if (length(misrow[[i]])>0) { nn <- rownames(res[[i]])[misrow[[i]]] for (j in setdiff(seq_len(length(groups)),i)) { nn2 <- rownames(res[[j]]) matching <- na.omit(match(nn,nn2)) matching <- setdiff(matching,misrow[[j]]) if (length(matching)>0) { idxj <- match(nn2[matching],nn2) idxi <- match(nn2[matching],rownames(res[[i]])) res[[i]][nn2[matching],] <- res[[j]][nn2[matching],] parpos2[[i]][idxi] <- parpos2[[j]][idxj] nn <- setdiff(nn,nn2[matching]) } if (length(nn)<1) break; } } } attributes(res)$parpos <- parpos2 } return(res) } ###}}} ###{{{ CoefMat ##' @export CoefMat.multigroupfit <- function(x,type=9, labels=lava.options()$coef.names, symbol=lava.options()$symbol[1], data=NULL,groups=seq(Model(x)$ngroup),...) { cc <- coef(x,type=type,ext=TRUE,symbol=symbol,data=data,groups=groups) parpos <- attributes(cc)$parpos res <- c() nlincon.estimates <- c() nlincon.names <- c() count <- k <- 0 for (i in groups) { k <- k+1 m0 <- Model(Model(x))[[i]] mycoef <- cc[[k]] npar <- index(m0)$npar npar.mean <- index(m0)$npar.mean if (npar>0) rownames(mycoef)[(seq(npar))+npar.mean] <- paste0("p",seq(npar)) m0$coefficients <- mycoef m0$opt$estimate <- mycoef[,1] Vcov <- vcov(x)[parpos[[k]],parpos[[k]],drop=FALSE]; colnames(Vcov) <- rownames(Vcov) <- rownames(mycoef) m0$vcov <- Vcov cc0 <- coef.lvmfit(m0,type=type,labels=labels,symbol=symbol) attributes(cc0)$dispname <- x$opt$dispname res <- c(res, list(CoefMat(cc0))) newnlin <- attributes(cc0)$nlincon if (length(newnlin)>0) if (count==0) { count <- count+1 nlincon.estimates <- newnlin nlincon.names <- rownames(newnlin) } else { for (j in seq_len(NROW(newnlin))) { if (!(rownames(newnlin)[j]%in%nlincon.names)) { nlincon.estimates <- rbind(nlincon.estimates,newnlin[j,,drop=FALSE]) nlincon.names <- c(nlincon.names,rownames(newnlin)[j]) } } } } rownames(nlincon.estimates) <- nlincon.names attributes(res)$nlincon <- nlincon.estimates return(res) } ##' @export CoefMat <- function(x, digits = max(3, getOption("digits") - 2), type=9, symbol=lava.options()$symbol[1],...) { cc <- x if (!is.matrix(x)) { cc <- coef(x,type=type,symbol=symbol,...) } res <- c() mycoef <- format(round(cc,max(1,digits)),digits=digits) mycoef[,4] <- formatC(cc[,4],digits=digits-1,format="g", preserve.width="common",flag="") mycoef[is.na(cc)] <- "" mycoef[cc[,4]<1e-12,4] <- " <1e-12" M <- ncol(cc) Nreg <- sum(attributes(cc)$type=="regression") Nvar <- sum(attributes(cc)$type=="variance") Nint <- sum(attributes(cc)$type=="intercept") latent.var <- attributes(cc)$latent if (Nreg>0) { reg.idx <- which(attributes(cc)$type=="regression") latent.from <- which(attributes(cc)$from[reg.idx]%in%latent.var) latent.from <- latent.from[which(is.na(match(attributes(cc)$var[latent.from],latent.var)))] reg.idx <- setdiff(reg.idx,latent.from) Nmeas <- length(latent.from) if (Nmeas>0) { first.entry <- c() for (i in latent.var) { pos <- match(i,attributes(cc)$from[latent.from]) if (!is.na(pos)) first.entry <- c(first.entry, pos) } res <- rbind(res, c("Measurements:",rep("",M))) count <- 0 Delta <- FALSE for (i in latent.var) { count <- count+1 Delta <- !Delta Myidx <- which(attributes(cc)$from==i & attributes(cc)$type=="regression" & !(attributes(cc)$var%in%latent.var)) prefix <- ifelse(Delta," "," ") for (j in Myidx) { newrow <- mycoef[j,] newname <- rownames(cc)[j] res <- rbind(res,c(paste(prefix,newname),newrow)) } } } if ((Nreg-Nmeas)>0) { responses <- unique(attributes(cc)$var[reg.idx]) first.entry <- c() for (i in responses) { pos <- match(i,attributes(cc)$var[reg.idx]) first.entry <- c(first.entry, pos) } res <- rbind(res, c("Regressions:",rep("",M))) count <- 0 Delta <- FALSE for (i in reg.idx) { count <- count+1 newrow <- mycoef[i,] newname <- rownames(cc)[i] if (count%in%first.entry) Delta <- !Delta prefix <- ifelse(Delta," "," ") res <- rbind(res,c(paste(prefix,newname),newrow)) } } } if (Nint>0) { int.idx <- which(attributes(cc)$type=="intercept") res <- rbind(res, c("Intercepts:",rep("",M))) for (i in int.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] res <- rbind(res,c(paste(" ",newname),newrow)) } } par.idx <- which(attributes(cc)$type=="parameter") parres <- rbind(c("Additional Parameters:",rep("",M))) for (i in par.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] parres <- rbind(parres,c(paste(" ",newname),newrow)) } extra.idx <- which(attributes(cc)$type=="extra") for (i in extra.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] parres <- rbind(parres,c(paste(" ",newname),newrow)) } if (nrow(parres)>1) res <- rbind(res,parres) if (Nvar>0) { var.idx <- which(attributes(cc)$type=="variance") vname <- "Residual Variances:" if (!is.list(x)) { if (!is.null(attributes(x)$dispname)) vname <- attributes(x)$dispname } else if (!is.null(x$opt$dispname)) vname <- x$opt$dispname res <- rbind(res, c(vname,rep("",M))) for (i in var.idx) { newrow <- mycoef[i,] newname <- rownames(cc)[i] res <- rbind(res,c(paste(" ",newname),newrow)) } } res0 <- res[,-1] rownames(res0) <- format(res[,1],justify="left") res0 } ###}}} CoefMat ###{{{ standardized coefficients stdcoef <- function(x,p=coef(x),...) { M0 <- moments(x,p=p,...) A <- t(M0$A) P <- M0$P v <- M0$v C <- M0$Cfull N <- diag(sqrt(diag(C)),ncol=nrow(C)); colnames(N) <- rownames(N) <- vars(x) iN <- N; diag(iN)[diag(N)>0] <- 1/diag(iN)[diag(N)>0] diag(iN)[diag(N)==0] <- NA Nn <- N; Nn[] <- 0; diag(Nn) <- 1 Nn[latent(x),latent(x)] <- N[latent(x),latent(x)] iNn <- Nn; diag(iNn) <- 1/diag(Nn) Ny <- Nn; Ny[endogenous(x),endogenous(x)] <- N[endogenous(x),endogenous(x)] iNy <- Ny; diag(iNy) <- 1/diag(Ny) ## Standardized w.r.t. latent,y and x: AstarXY <- t(iN%*%A%*%N) PstarXY <- iN%*%P%*%iN if (!is.null(v)) vstarXY <- iN%*%v else vstarXY <- NULL pstdXY <- pars(Model(x),A=AstarXY,P=PstarXY,v=vstarXY) ## Standardized w.r.t. latent, y: AstarY <- t(iNy%*%A%*%Ny) PstarY <- iNy%*%P%*%iNy if (!is.null(v)) vstarY <- iNy%*%v else vstarY <- NULL ## Standardized w.r.t. latent only: Astar <- t(iNn%*%A%*%Nn) Pstar <- iNn%*%P%*%iNn if (!is.null(v)) vstar <- iNn%*%v else vstar <- NULL pstd <- pars(Model(x),A=Astar,Pstar,v=vstar) k <- length(p)-length(pstd) res <- list(par=cbind(p,c(pstd,rep(NA,k)),c(pstdXY,rep(NA,k))), AstarXY=AstarXY, PstarXY=PstarXY, vstarXY=vstarXY, AstarY=AstarY, PstarY=PstarY, vstarY=vstarY, Astar=Astar, Pstar=Pstar, vstar=vstar) return(res) } ###}}} standardized coefficients lava/R/kappa.R0000644000176200001440000000216013520655354012624 0ustar liggesusers################################################## ## Cohen's kappa ################################################## ##' @export kappa.multinomial <- function(z,all=FALSE,...) { pp <- length(coef(z)) if ((length(z$levels)!=2) || !(identical(z$levels[[1]],z$levels[[2]]))) stop("Expected square table and same factor levels in rows and columns") k <- length(z$levels[[1]]) zeros <- rbind(rep(0,pp)) A0 <- zeros; A0[diag(z$position)] <- 1 A <- matrix(0,ncol=pp,nrow=2*k) for (i in seq(k)) A[i,z$position[i,]] <- 1 for (i in seq(k)) A[i+k,z$position[,i]] <- 1 b <- estimate(z,function(p) as.vector(rbind(A0,A)%*%p),iid=TRUE) b2 <- estimate(b,function(p) c(p[1],sum(p[seq(k)+1]*p[seq(k)+k+1])),iid=TRUE) if (!all) { return(estimate(b2,function(p) list(kappa=(p[1]-p[2])/(1-p[2])),iid=TRUE,...)) } estimate(b2,function(p) list(kappa=(p[1]-p[2])/(1-p[2]),agree=p[1], independence=p[2]),iid=TRUE,...) } ##' @export kappa.table <- function(z,...) { kappa(multinomial(Expand(z)),...) } ##' @export kappa.data.frame <- function(z,...) { kappa(multinomial(z),...) } lava/R/nodecolor.R0000644000176200001440000000246413520655354013523 0ustar liggesusers##' @export `nodecolor<-` <- function(object,var,...,value) UseMethod("nodecolor<-") ##' @export `nodecolor<-.lvm` <- function(object, var=vars(object), border, labcol, shape, lwd, ..., value) { if (length(var)>0 & length(value)>0) { if (inherits(var,"formula")) var <- all.vars(var) object$noderender$fill[var] <- value if (!missing(border)) object$noderender$col[var] <- border if (!missing(shape)) object$noderender$shape[var] <- shape if (!missing(labcol)) object$noderender$textCol[var] <- labcol if (!missing(lwd)) object$noderender$lwd[var] <- lwd } return(object) } ##' @export `nodecolor<-.default` <- function(object, var=vars(object), border, labcol, shape, lwd, ..., value) { if (length(var)>0 & length(value)>0) { if (inherits(var,"formula")) var <- all.vars(var) object <- addattr(object,attr="fill",var=var,val=value) if (!missing(border)) object <- addattr(object,attr="col",var=var,val=border) if (!missing(shape)) object <- addattr(object,attr="shape",var=var,val=shape) if (!missing(labcol)) object <- addattr(object,attr="textCol",var=var,val=labcol) if (!missing(lwd)) object <- addattr(object,attr="lwd",var=var,val=lwd) } return(object) } lava/R/model.frame.R0000644000176200001440000000103513520655354013721 0ustar liggesusers##' @export model.frame.lvmfit <- function(formula, all=FALSE,...) { mydata <- formula$data$model.frame if (!is.data.frame(mydata) & !is.matrix(mydata)) return(mydata) if (all) return(mydata) ## xfix <- colnames(mydata)[(colnames(mydata)%in%parlabels(formula$model0,exo=TRUE))] xfix <- colnames(mydata)[(colnames(mydata)%in%parlabels(formula$model0))] return( mydata[,c(manifest(formula),xfix),drop=FALSE] ) } ##' @export model.frame.multigroupfit <- function(formula,...) { mydata <- formula$model$data return(mydata) } lava/R/moments.R0000644000176200001440000000375013520655354013220 0ustar liggesusersMoments <- function(x,p,data,conditional=TRUE,...) { } ##' @export `moments` <- function(x,...) UseMethod("moments") ##' @export moments.lvmfit <- function(x, p=pars(x),...) moments(Model(x),p=p,...) ##' @export moments.lvm.missing <- function(x, p=pars(x), ...) { idx <- match(coef(Model(x)),names(coef(x))) moments.lvmfit(x,p=p[idx],...) } ##' @export moments.lvm <- function(x, p, debug=FALSE, conditional=FALSE, data=NULL, latent=FALSE, ...) { ### p: model-parameters as obtained from e.g. 'startvalues'. ### (vector of regression parameters and variance parameters) ### meanpar: mean-parameters (optional) ii <- index(x) pp <- modelPar(x,p) AP <- with(pp, matrices(x,p,meanpar=meanpar,epars=p2,data=data,...)) P <- AP$P v <- AP$v if (!is.null(v)) { names(v) <- ii$vars } J <- ii$J if (conditional) { J <- ii$Jy if (latent) { J <- diag(nrow=length(ii$vars))[sort(c(ii$endo.idx,ii$eta.idx)),,drop=FALSE] } px <- ii$px exo <- exogenous(x) ## if (missing(row)) { v <- rbind(v) %x% cbind(rep(1,nrow(data))) if (length(ii$exo.idx)>0) { v[,ii$exo.idx] <- as.matrix(data[,exo]) } ## } else { ## if (!is.null(v)) ## v[exo] <- as.numeric(data[row,exo]) ## } P <- px%*% tcrossprod(P, px) } Im <- diag(nrow=nrow(AP$A)) if (ii$sparse) { IAi <- with(AP, as(Inverse(Im-t(A)),"sparseMatrix")) ##IAi <- as(solve(Matrix::Diagonal(nrow(A))-t(A)),"sparseMatrix") G <- as(J%*%IAi,"sparseMatrix") } else { IAi <- Inverse(Im-t(AP$A)) G <- J%*%IAi } xi <- NULL if (!is.null(v)) { xi <- v%*%t(G) ## Model-specific mean vector } Cfull <- as.matrix(IAi %*% tcrossprod(P,IAi)) C <- as.matrix(J %*% tcrossprod(Cfull,J)) return(list(Cfull=Cfull, C=C, v=v, e=AP$e, xi=xi, A=AP$A, P=P, IAi=IAi, J=J, G=G, npar=ii$npar, npar.reg=ii$npar.reg, npar.mean=ii$npar.mean, npar.ex=ii$npar.ex, parval=AP$parval, constrain.idx=AP$constrain.idx, constrainpar=AP$constrainpar)) } lava/R/wkm.R0000644000176200001440000000556413520655354012341 0ustar liggesusers## kmeans++ ## Assign centre of first cluster randomly from observations. ## 1) Calculate min distances of observations to current clusters, D ## 2) Sample new cluster centre from distribution p(x) = D^2(x)/sum(D^2) ## Repeat 1-2 until all clusters are assigned kmpp <- function(y,k=2) { Dist <- function(y1,y2) sum(y1-y2)^2 n <- NROW(y) ii <- numeric(k) u <- runif(k) ii[1] <- sample(n,1) D <- matrix(0,n,k-1) for (i in seq_len(k-1)+1) { D[,i-1] <- apply(y,1,function(x) Dist(x,y[ii[i-1],])) D2 <- apply(D[,seq(i-1),drop=FALSE],1,min) pdist <- cumsum(D2/sum(D2)) ii[i] <- mets::fast.approx(pdist,u[i]) } return(ii) } ##' Weighted K-means ##' ##' Weighted K-means via Lloyd's algorithm ##' @param x Data (or formula) ##' @param mu Initial centers (or number centers chosen randomly among x) ##' @param data optional data frmae ##' @param weights Optional weights ##' @param iter.max Max number of iterations ##' @param n.start Number of restarts ##' @param init method to create initial centres (default kmeans++) ##' @param ... Additional arguments to lower level functions ##' @export ##' @author Klaus K. Holst ##' wkm <- function(x, mu, data, weights=rep(1,NROW(x)), iter.max=20, n.start=5, init="kmpp", ...) { ## Lloyd's algorithm if (inherits(x, "formula")) x <- stats::model.matrix(x,data=data) x <- cbind(x) random.start <- TRUE if (is.list(mu)) { random.start <- FALSE n.start=1 K <- length(mu) } else { K <- mu } sswmin <- Inf mus <- ssws <- NULL cl0 <- rep(1,NROW(x)) for (k in seq(n.start)) { if (random.start) { if (!exists(init)) { ## Random select centres idx <- sample(NROW(x),K) } else { idx <- do.call(init, list(x, K)) } mu <- lapply(idx, function(i) cbind(x)[i,,drop=TRUE]) } mus <- c(mus, list(mu)) for (i in seq(iter.max)) { d <- Reduce(cbind,lapply(mu, function(m) weights*colSums((t(x)-m)*(t(x)-m)))) cl <- apply(d,1,which.min) for (j in seq_along(mu)) { idx <- which(cl==j) if (length(idx)) { mu[[j]] <- colSums(cbind(apply(x[idx,,drop=FALSE],2, function(x) x*weights[idx])))/sum(weights[idx]) } } if (sum(cl0-cl)==0L) break; # No change in assigment } ssw <- sum(d[cbind(seq(NROW(d)),cl)]) ssws <- c(ssws,ssw) if (ssw < sswmin) { sswmin <- ssw clmin <- cl mumin <- mu } } mu <- structure(mu,class="by",dim=K,dimnames=list(class=seq(K))) withinclusterss <- as.vector(by(d[cbind(seq(NROW(d)),cl)],cl,sum)) return(list(cluster=cl, center=mu, ssw=withinclusterss)) } lava/R/merge.R0000644000176200001440000001310313520655354012626 0ustar liggesusers##' @export `%++%.lvm` <- function(x,y) merge(x,y) ##' @export "+.lvm" <- function(x,...) { merge(x,...) } ## ##' @export ## "+.lm" <- function(x,...) { ## merge(x,...) ## } ##' @export merge.lvm <- function(x,y,...) { objects <- list(x,y,...) if (length(objects)<2) return(x) m <- objects[[1]] for (i in seq(2,length(objects))) { m2 <- objects[[i]] if (length(latent(m2))>0) latent(m) <- latent(m2) if (length(m2$constrain)>0) m$constrain <- c(m$constrain,m2$constrain) M <- (index(m2)$A) P <- (index(m2)$P) nn <- vars(m2) for (j in seq_len(nrow(M))) { if (any(idx <- M[j,]!=0)) { val <- as.list(rep(NA,sum(idx==TRUE))) if (any(idx. <- !is.na(m2$par[j,idx]))) val[idx.] <- m2$par[j,idx][idx.] if (any(idx. <- !is.na(m2$fix[j,idx]))) val[idx.] <- m2$fix[j,idx][idx.] regression(m,to=nn[idx],from=nn[j],messages=0) <- val } P0 <- P[j,]; P0[seq_len(j-1)] <- 0 idx <- P[j,]!=0 | m2$covfix[j,]==0 idx[is.na(idx)] <- FALSE if (any(idx)) { val <- as.list(rep(NA,sum(idx==TRUE))) if (any(idx. <- !is.na(m2$covpar[j,idx]))) val[idx.] <- m2$covpar[j,idx][idx.] if (any(idx. <- !is.na(m2$covfix[j,idx]))) val[idx.] <- m2$covfix[j,idx][idx.] covariance(m,nn[idx],nn[j],messages=0) <- val } } intercept(m,nn) <- intercept(m2) m2x <- exogenous(m2) if (length(m2x)>0) exogenous(m) <- c(exogenous(m),m2x) } index(m) <- reindex(m) return(m) } ##' @export "+.estimate" <- function(x,...) { merge(x,...) } ##' @export merge.estimate <- function(x,y,...,id,paired=FALSE,labels=NULL,keep=NULL,subset=NULL) { objects <- list(x,y, ...) if (length(nai <- names(objects)=="NA")>0) names(objects)[which(nai)] <- "" if (!missing(subset)) { coefs <- unlist(lapply(objects, function(x) coef(x)[subset])) } else { coefs <- unlist(lapply(objects,coef)) } if (!is.null(labels)) { names(coefs) <- labels } else { names(coefs) <- make.unique(names(coefs)) } if (!missing(id) && is.null(id)) { ## Independence between datasets in x,y,... nn <- unlist(lapply(objects,function(x) nrow(x$iid))) cnn <- c(0,cumsum(nn)) id <- list() for (i in seq_along(nn)) id <- c(id,list(seq(nn[i])+cnn[i])) } if (missing(id)) { if (paired) { ## One-to-one dependence between observations in x,y,... id <- rep(list(seq(nrow(x$iid))),length(objects)) } else { id <- lapply(objects,function(x) x$id) } } else { nn <- unlist(lapply(objects,function(x) NROW(iid(x)))) if (length(id)==1 && is.logical(id)) { if (id) { if (any(nn[1]!=nn)) stop("Expected objects of the same size: ", paste(nn,collapse=",")) id0 <- seq(nn[1]); id <- c() for (i in seq(length(nn))) id <- c(id,list(id0)) } else { id <- c() N <- cumsum(c(0,nn)) for (i in seq(length(nn))) id <- c(id,list(seq(nn[i])+N[i])) } } if (length(id)!=length(objects)) stop("Same number of id-elements as model objects expected") idlen <- unlist(lapply(id,length)) if (!identical(idlen,nn)) stop("Wrong lengths of 'id': ", paste(idlen,collapse=","), "; ", paste(nn,collapse=",")) } ##if (any(unlist(lapply(id,is.null)))) stop("Id needed for each model object") ##iid <- Reduce("cbind",lapply(objects,iid)) ids <- iidall <- c(); count <- 0 for (z in objects) { count <- count+1 clidx <- NULL id0 <- id[[count]] iidz <- iid(z) if (is.null(id0)) { id0 <- rownames(iidz) if (is.null(id0)) stop("Need id for object number ", count) } if (!missing(subset)) iidz <- iidz[,subset,drop=FALSE] if (!lava.options()$cluster.index) { iid0 <- matrix(unlist(by(iidz,id0,colSums)),byrow=TRUE,ncol=ncol(iidz)) ids <- c(ids, list(sort(unique(id0)))) } else { if (!requireNamespace("mets",quietly=TRUE)) stop("'mets' package required") clidx <- mets::cluster.index(id0,mat=iidz,return.all=TRUE) iid0 <- clidx$X ids <- c(ids, list(id0[as.vector(clidx$firstclustid)+1])) } iidall <- c(iidall, list(iid0)) } id <- unique(unlist(ids)) iid0 <- matrix(0,nrow=length(id),ncol=length(coefs)) model.index <- c() colpos <- 0 for (i in seq(length(objects))) { relpos <- seq_along(coef(objects[[i]])) if (!missing(subset)) relpos <- seq_along(subset) iid0[match(ids[[i]],id),relpos+colpos] <- iidall[[i]] model.index <- c(model.index,list(relpos+colpos)) colpos <- colpos+tail(relpos,1) } rownames(iid0) <- id res <- estimate.default(NULL, coef=coefs, stack=FALSE, data=NULL, iid=iid0, id=id, keep=keep) res$model.index <- model.index return(res) } ##' @export merge.lm <- function(x,y,...) { args <- c(list(x,y),list(...)) nn <- names(formals(merge.estimate)[-seq(3)]) idx <- na.omit(match(nn,names(args))) models <- args; models[idx] <- NULL mm <- lapply(args,function(x) tryCatch(estimate(x),error=function(e) NULL)) names(mm)[1:2] <- c("x","y") ii <- which(unlist(lapply(mm,is.null))) if (length(ii)>0) mm[ii] <- NULL do.call(merge,c(mm,args[idx])) } ##' @export merge.glm <- merge.lm ##' @export merge.lvmfit <- merge.lm ##' @export merge.multinomial <- function(x,...) { merge.estimate(x,...) } lava/R/vec.R0000644000176200001440000000135213520655354012307 0ustar liggesusers##' vec operator ##' ##' Convert array into vector ##' @title vec operator ##' @param x Array ##' @param matrix If TRUE a row vector (matrix) is returned ##' @param sep Seperator ##' @param ... Additional arguments ##' @author Klaus Holst ##' @export vec <- function(x,matrix=FALSE,sep=".",...) { if (is.vector(x) && !is.list(x)) { res <- x } else if (is.list(x)) { res <- stats::setNames(unlist(x),names(x)) } else { if (is.matrix(x) && is.null(rownames(x))) { nn <- colnames(x) } else { nn <- apply(expand.grid(dimnames(x)),1,function(x) paste(x,collapse=sep)) } res <- as.vector(x); names(res) <- nn } if (matrix) return(cbind(res)) return(res) } lava/R/compare.R0000644000176200001440000001577513520655354013176 0ustar liggesusers##' Performs Likelihood-ratio, Wald and score tests ##' @title Statistical tests ##' @aliases compare ##' @export ##' @param object \code{lvmfit}-object ##' @param \dots Additional arguments to low-level functions ##' @return Matrix of test-statistics and p-values ##' @author Klaus K. Holst ##' @seealso \code{\link{modelsearch}}, \code{\link{equivalence}} ##' @keywords htest ##' @examples ##' m <- lvm(); ##' regression(m) <- c(y1,y2,y3) ~ eta; latent(m) <- ~eta ##' regression(m) <- eta ~ x ##' m2 <- regression(m, c(y3,eta) ~ x) ##' set.seed(1) ##' d <- sim(m,1000) ##' e <- estimate(m,d) ##' e2 <- estimate(m2,d) ##' ##' compare(e) ##' ##' compare(e,e2) ## LRT, H0: y3<-x=0 ##' compare(e,scoretest=y3~x) ## Score-test, H0: y3~x=0 ##' compare(e2,par=c("y3~x")) ## Wald-test, H0: y3~x=0 ##' ##' B <- diag(2); colnames(B) <- c("y2~eta","y3~eta") ##' compare(e2,contrast=B,null=c(1,1)) ##' ##' B <- rep(0,length(coef(e2))); B[1:3] <- 1 ##' compare(e2,contrast=B) ##' ##' compare(e,scoretest=list(y3~x,y2~x)) compare <- function(object,...) UseMethod("compare") ##' @export compare.default <- function(object,...,par,contrast,null,scoretest,Sigma,level=.95,df=NULL) { if (!missing(par) || (!missing(contrast) && is.character(contrast))) { if (!missing(contrast) && is.character(contrast)) par <- contrast contrast <- rep(0,length(coef(object))) myidx <- parpos(Model(object),p=par) contrast[myidx] <- 1 contrast <- diag(contrast,nrow=length(contrast))[which(contrast!=0),,drop=FALSE] if (!missing(null) && length(null)>1) null <- null[attributes(myidx)$ord] } ### Wald test if (!missing(contrast)) { B <- contrast p <- coef(object) pname <- names(p) B <- rbind(B); colnames(B) <- if (is.vector(contrast)) names(contrast) else colnames(contrast) if (missing(Sigma)) { Sigma <- vcov(object) } if (ncol(B)0, id=d$id, average=TRUE) ##' ##' ## More examples with clusters: ##' m <- lvm(c(y1,y2,y3)~u+x) ##' d <- sim(m,10) ##' l1 <- glm(y1~x,data=d) ##' l2 <- glm(y2~x,data=d) ##' l3 <- glm(y3~x,data=d) ##' ##' ## Some random id-numbers ##' id1 <- c(1,1,4,1,3,1,2,3,4,5) ##' id2 <- c(1,2,3,4,5,6,7,8,1,1) ##' id3 <- seq(10) ##' ##' ## Un-stacked and stacked i.i.d. decomposition ##' iid(estimate(l1,id=id1,stack=FALSE)) ##' iid(estimate(l1,id=id1)) ##' ##' ## Combined i.i.d. decomposition ##' e1 <- estimate(l1,id=id1) ##' e2 <- estimate(l2,id=id2) ##' e3 <- estimate(l3,id=id3) ##' (a2 <- merge(e1,e2,e3)) ##' ##' ## If all models were estimated on the same data we could use the ##' ## syntax: ##' ## Reduce(merge,estimate(list(l1,l2,l3))) ##' ##' ## Same: ##' iid(a1 <- merge(l1,l2,l3,id=list(id1,id2,id3))) ##' ##' iid(merge(l1,l2,l3,id=TRUE)) # one-to-one (same clusters) ##' iid(merge(l1,l2,l3,id=FALSE)) # independence ##' ##' ##' ## Monte Carlo approach, simple trend test example ##' ##' m <- categorical(lvm(),~x,K=5) ##' regression(m,additive=TRUE) <- y~x ##' d <- simulate(m,100,seed=1,'y~x'=0.1) ##' l <- lm(y~-1+factor(x),data=d) ##' ##' f <- function(x) coef(lm(x~seq_along(x)))[2] ##' null <- rep(mean(coef(l)),length(coef(l))) ## just need to make sure we simulate under H0: slope=0 ##' estimate(l,f,R=1e2,null.sim=null) ##' ##' estimate(l,f) ##' @aliases estimate estimate.default estimate.estimate merge.estimate ##' @method estimate default ##' @export estimate.default <- function(x=NULL,f=NULL,...,data,id, iddata,stack=TRUE,average=FALSE,subset, score.deriv,level=0.95,iid=robust, type=c("robust","df","mbn"), keep,use, regex=FALSE, contrast,null,vcov,coef, robust=TRUE,df=NULL, print=NULL,labels,label.width, only.coef=FALSE,back.transform=NULL, folds=0, cluster, R=0, null.sim) { cl <- match.call(expand.dots=TRUE) if (!missing(use)) { p0 <- c("f","contrast","only.coef","subset","average","keep","labels") cl0 <- cl cl0[c("use",p0)] <- NULL cl0$keep <- use cl$x <- eval(cl0,parent.frame()) cl[c("vcov","use")] <- NULL return(eval(cl,parent.frame())) } expr <- suppressWarnings(inherits(try(f,silent=TRUE),"try-error")) if (!missing(coef)) { pp <- coef } else { pp <- suppressWarnings(try(stats::coef(x),"try-error")) if (inherits(x,"survreg") && length(pp)0) && robust) && (missing(vcov) || is.null(vcov) || (is.logical(vcov) && vcov[1]==FALSE && !is.na(vcov[1])))) { ## If user supplied vcov, then don't estimate IC if (missing(score.deriv)) { if (!is.logical(iid)) { iidtheta <- iid iid <- TRUE } else { suppressWarnings(iidtheta <- iid(x,folds=folds)) } } else { suppressWarnings(iidtheta <- iid(x,score.deriv=score.deriv,folds=folds)) } } else { if (missing(vcov) || (is.logical(vcov) && !is.na(vcov)[1])) suppressWarnings(vcov <- stats::vcov(x)) iidtheta <- NULL } if (!missing(subset)) { e <- substitute(subset) expr <- suppressWarnings(inherits(try(subset,silent=TRUE),"try-error")) if (expr) subset <- eval(e,envir=data) ##subset <- eval(e, data, parent.frame()) if (is.character(subset)) subset <- data[,subset] if (is.numeric(subset)) subset <- subset>0 } idstack <- NULL ## Preserve id from 'estimate' object if (missing(id) && inherits(x,"estimate") && !is.null(x$id)) id <- x$id if (!missing(id) && iid) { if (is.null(iidtheta)) stop("'iid' method needed") nprev <- nrow(iidtheta) if (inherits(id,"formula")) { id <- interaction(get_all_vars(id,data)) } ## e <- substitute(id) ## expr <- suppressWarnings(inherits(try(id,silent=TRUE),"try-error")) ## if (expr) id <- eval(e,envir=data) ##if (!is.null(data)) id <- eval(e, data) if (is.logical(id) && length(id)==1) { id <- if(is.null(iidtheta)) seq(nrow(data)) else seq(nprev) stack <- FALSE } if (is.character(id) && length(id)==1) id <- data[,id,drop=TRUE] if (!is.null(iidtheta)) { if (length(id)!=nprev) { if (!is.null(x$na.action) && (length(id)==length(x$na.action)+nprev)) { warning("Applying na.action") id <- id[-x$na.action] } else stop("Dimensions of i.i.d decomposition and 'id' does not agree") } } else { if (length(id)!=nrow(data)) { if (!is.null(x$na.action) && (length(id)==length(x$na.action)+nrow(data))) { warning("Applying na.action") id <- id[-x$na.action] } else stop("Dimensions of i.i.d decomposition and 'id' does not agree") } } if (stack) { N <- nrow(iidtheta) clidx <- NULL atr <- attributes(iidtheta) atr$dimnames <- NULL atr$dim <- NULL if (!lava.options()$cluster.index) { iidtheta <- matrix(unlist(by(iidtheta,id,colSums)),byrow=TRUE,ncol=ncol(iidtheta)) attributes(iidtheta)[names(atr)] <- atr idstack <- sort(unique(id)) } else { clidx <- mets::cluster.index(id,mat=iidtheta,return.all=TRUE) iidtheta <- clidx$X attributes(iidtheta)[names(atr)] <- atr idstack <- id[as.vector(clidx$firstclustid)+1] } if (is.null(attributes(iidtheta)$N)) { attributes(iidtheta)$N <- N } } else idstack <- id } else { if (!is.null(data)) idstack <- rownames(data) } if (!is.null(iidtheta) && (length(idstack)==nrow(iidtheta))) rownames(iidtheta) <- idstack if (!robust) { if (inherits(x,"lm") && family(x)$family=="gaussian" && is.null(df)) df <- x$df.residual if (missing(vcov)) suppressWarnings(vcov <- stats::vcov(x)) } if (!is.null(iidtheta) && robust && (missing(vcov) || is.null(vcov))) { ## if (is.null(f)) V <- crossprod(iidtheta) ### Small-sample corrections for clustered data K <- NROW(iidtheta) N <- attributes(iidtheta)$N if (is.null(N)) N <- K p <- NCOL(iidtheta) adj0 <- K/(K-p) ## Mancl & DeRouen, 2001 adj1 <- K/(K-1) ## Mancl & DeRouen, 2001 adj2 <- (N-1)/(N-p)*(K/(K-1)) ## Morel,Bokossa & Neerchal, 2003 if (tolower(type[1])=="mbn" && !is.null(attributes(iidtheta)$bread)) { V0 <- V iI0 <- attributes(iidtheta)$bread I0 <- Inverse(iI0) ##I1 <- crossprod(iidtheta%*%I0) delta <- min(0.5,p/(K-p)) phi <- max(1,tr(I0%*%V0)*adj2/p) V <- adj2*V0 + delta*phi*iI0 } if (tolower(type[1])=="df") { V <- adj0*V } if (tolower(type[1])=="df1") { V <- adj1*V } if (tolower(type[1])=="df2") { V <- adj2*V } } else { if (!missing(vcov)) { if (length(vcov)==1 && is.na(vcov)) vcov <- matrix(NA,length(pp),length(pp)) V <- vcov } else { suppressWarnings(V <- stats::vcov(x)) } } ## Simulate p-value if (R>0) { if (is.null(f)) stop("Supply function 'f'") if (missing(null.sim)) null.sim <- rep(0,length(pp)) est <- f(pp) if (is.list(est)) { nn <- names(est) est <- unlist(est) names(est) <- nn } if (missing(labels)) { labels <- colnames(rbind(est)) } res <- simnull(R,f,mu=null.sim,sigma=V,labels=labels) return(structure(res, class=c("estimate.sim","sim"), coef=pp, vcov=V, f=f, estimate=est)) } if (!is.null(f)) { form <- names(formals(f)) dots <- ("..."%in%names(form)) form0 <- setdiff(form,"...") parname <- "p" if (!is.null(form)) parname <- form[1] # unless .Primitive if (length(form0)==1 && !(form0%in%c("object","data"))) { ##names(formals(f))[1] <- "p" parname <- form0 } if (!is.null(iidtheta)) { arglist <- c(list(object=x,data=data,p=vec(pp)),list(...)) names(arglist)[3] <- parname } else { arglist <- c(list(object=x,p=vec(pp)),list(...)) names(arglist)[2] <- parname } if (!dots) { arglist <- arglist[intersect(form0,names(arglist))] } newf <- NULL if (length(form)==0) { arglist <- list(vec(pp)) ##newf <- function(p,...) do.call("f",list(p,...)) newf <- function(...) do.call("f",list(...)) val <- do.call("f",arglist) } else { val <- do.call("f",arglist) if (is.list(val)) { nn <- names(val) val <- do.call("cbind",val) ##newf <- function(p,...) do.call("cbind",f(p,...)) newf <- function(...) do.call("cbind",f(...)) } } k <- NCOL(val) N <- NROW(val) D <- attributes(val)$grad if (is.null(D)) { D <- numDeriv::jacobian(function(p,...) { if (length(form)==0) arglist[[1]] <- p else arglist[[parname]] <- p if (is.null(newf)) return(do.call("f",arglist)) return(do.call("newf",arglist)) }, pp) } if (is.null(iidtheta)) { pp <- structure(as.vector(val),names=names(val)) V <- D%*%V%*%t(D) } else { if (!average || (N1) { ## More than one parameter (and depends on data) if (!missing(subset)) { ## Conditional estimate val <- apply(val,2,function(x) x*subset) } D0 <- matrix(nrow=k,ncol=length(pp)) for (i in seq_len(k)) { D1 <- D[seq(N)+(i-1)*N,,drop=FALSE] if (!missing(subset)) ## Conditional estimate D1 <- apply(D1,2,function(x) x*subset) D0[i,] <- colMeans(D1) } D <- D0 iid2 <- iidtheta%*%t(D) } else { ## Single parameter if (!missing(subset)) { ## Conditional estimate val <- val*subset D <- apply(rbind(D),2,function(x) x*subset) } D <- colMeans(rbind(D)) iid2 <- iidtheta%*%D } pp <- vec(colMeans(cbind(val))) iid1 <- (cbind(val)-rbind(pp)%x%cbind(rep(1,N)))/N if (!missing(id)) { if (!lava.options()$cluster.index) iid1 <- matrix(unlist(by(iid1,id,colSums)),byrow=TRUE,ncol=ncol(iid1)) else { iid1 <- mets::cluster.index(id,mat=iid1,return.all=FALSE) } } if (!missing(subset)) { ## Conditional estimate phat <- mean(subset) iid3 <- cbind(-1/phat^2 * (subset-phat)/N) ## check if (!missing(id)) { if (!lava.options()$cluster.index) { iid3 <- matrix(unlist(by(iid3,id,colSums)),byrow=TRUE,ncol=ncol(iid3)) } else { iid3 <- mets::cluster.index(id,mat=iid3,return.all=FALSE) } } iidtheta <- (iid1+iid2)/phat + rbind(pp)%x%iid3 pp <- pp/phat V <- crossprod(iidtheta) } else { if (nrow(iid1)!=nrow(iid2)) { message("Assuming independence between model iid decomposition and new data frame") V <- crossprod(iid1) + crossprod(iid2) } else { iidtheta <- iid1+iid2 V <- crossprod(iidtheta) } } } } } if (is.null(V)) { res <- cbind(pp,NA,NA,NA,NA) } else { if (length(pp)==1) res <- rbind(c(pp,diag(V)^0.5)) else res <- cbind(pp,diag(V)^0.5) beta0 <- res[,1] if (!missing(null) && missing(contrast)) beta0 <- beta0-null if (!is.null(df)) { za <- qt(1-alpha/2,df=df) pval <- 2*pt(abs(res[,1]/res[,2]),df=df,lower.tail=FALSE) } else { za <- qnorm(1-alpha/2) pval <- 2*pnorm(abs(res[,1]/res[,2]),lower.tail=FALSE) } res <- cbind(res,res[,1]-za*res[,2],res[,1]+za*res[,2],pval) } colnames(res) <- c("Estimate","Std.Err",alpha.str,"P-value") if (nrow(res)>0) if (!is.null(nn)) { rownames(res) <- nn } else { nn <- attributes(res)$varnames if (!is.null(nn)) rownames(res) <- nn if (is.null(rownames(res))) rownames(res) <- paste0("p",seq(nrow(res))) } if (NROW(res)==0L) { coefs <- NULL } else { coefs <- res[,1,drop=TRUE]; names(coefs) <- rownames(res) } res <- structure(list(coef=coefs,coefmat=res,vcov=V, iid=NULL, print=print, id=idstack),class="estimate") if (iid) ## && is.null(back.transform)) res$iid <- iidtheta if (length(coefs)==0L) return(res) if (!missing(contrast) | !missing(null)) { p <- length(res$coef) if (missing(contrast)) contrast <- diag(nrow=p) if (missing(null)) null <- 0 if (is.vector(contrast) || is.list(contrast)) { contrast <- contr(contrast, names(res$coef), ...) ## if (length(contrast)==p) contrast <- rbind(contrast) ## else { ## cont <- contrast ## contrast <- diag(nrow=p)[cont,,drop=FALSE] ## } } cc <- compare(res,contrast=contrast,null=null,vcov=V,level=level,df=df) res <- structure(c(res, list(compare=cc)),class="estimate") if (!is.null(df)) { pval <- with(cc,pt(abs(estimate[,1]-null)/estimate[,2],df=df,lower.tail=FALSE)*2) } else { pval <- with(cc,pnorm(abs(estimate[,1]-null)/estimate[,2],lower.tail=FALSE)*2) } res$coefmat <- with(cc, cbind(estimate,pval)) colnames(res$coefmat)[5] <- "P-value" rownames(res$coefmat) <- cc$cnames if (!is.null(res$iid)) { res$iid <- res$iid%*%t(contrast) colnames(res$iid) <- cc$cnames } res$compare$estimate <- NULL res$coef <- res$compare$coef res$vcov <- res$compare$vcov names(res$coef) <- gsub("(^\\[)|(\\]$)","",rownames(res$coefmat)) } if (!is.null(back.transform)) { res$coefmat[,c(1,3,4)] <- do.call(back.transform,list(res$coefmat[,c(1,3,4)])) res$coefmat[,2] <- NA } if (!missing(keep) && !is.null(keep)) { if (is.character(keep)) { if (regex) { nn <- rownames(res$coefmat) keep <- unlist(lapply(keep, function(x) grep(x,nn, perl=TRUE))) } else { keep <- match(keep,rownames(res$coefmat)) } } res$coef <- res$coef[keep] res$coefmat <- res$coefmat[keep,,drop=FALSE] if (!is.null(res$iid)) res$iid <- res$iid[,keep,drop=FALSE] res$vcov <- res$vcov[keep,keep,drop=FALSE] } if (!missing(labels)) { names(res$coef) <- labels if (!is.null(res$iid)) colnames(res$iid) <- labels colnames(res$vcov) <- rownames(res$vcov) <- labels rownames(res$coefmat) <- labels } if (!missing(label.width)) { rownames(res$coefmat) <- make.unique(unlist(lapply(rownames(res$coefmat), function(x) toString(x,width=label.width)))) } if (only.coef) return(res$coefmat) res$call <- cl res$back.transform <- back.transform res$n <- nrow(data) res$ncluster <- nrow(res$iid) return(res) } simnull <- function(R,f,mu,sigma,labels=NULL) { X <- rmvn0(R,mu=mu,sigma=sigma) est <- f(mu) res <- apply(X,1,f) if (is.list(est)) { nn <- names(est) est <- unlist(est) names(est) <- nn res <- matrix(unlist(res),byrow=TRUE,ncol=length(est)) } else { res <- t(rbind(res)) } if (is.null(labels)) { labels <- colnames(rbind(est)) if (is.null(labels)) labels <- paste0("p",seq_along(est)) } colnames(res) <- labels return(res) } ##' @export estimate.estimate.sim <- function(x,f,R=0,labels,...) { atr <- attributes(x) if (R>0) { if (missing(f)) { val <- simnull(R,f=atr[["f"]],mu=atr[["coef"]],sigma=atr[["vcov"]]) res <- rbind(x,val) for (a in setdiff(names(atr),c("dim","dimnames"))) attr(res,a) <- atr[[a]] } else { res <- simnull(R,f=f,mu=atr[["coef"]],sigma=atr[["vcov"]]) for (a in setdiff(names(atr),c("dim","dimnames","f"))) attr(res,a) <- atr[[a]] attr(f,"f") <- f est <- unlist(f(atr[["coef"]])) if (missing(labels)) labels <- colnames(rbind(est)) attr(res,"estimate") <- est } if (!missing(labels)) colnames(res) <- labels return(res) } if (missing(f)) { if (!missing(labels)) colnames(res) <- labels return(x) } est <- f(atr[["coef"]]) res <- apply(x,1,f) if (is.list(est)) { res <- matrix(unlist(res),byrow=TRUE,ncol=length(est)) } else { res <- t(rbind(res)) } if (missing(labels)) { labels <- colnames(rbind(est)) if (is.null(labels)) labels <- paste0("p",seq_along(est)) } colnames(res) <- labels for (a in setdiff(names(atr),c("dim","dimnames","f","estimate"))) attr(res,a) <- atr[[a]] attr(f,"f") <- f attr(res,"estimate") <- unlist(est) return(res) } ##' @export print.estimate.sim <- function(x,level=.95,...) { quantiles <- c((1-level)/2,1-(1-level)/2) est <- attr(x,"estimate") mysummary <- function(x,INDEX,...) { x <- as.vector(x) res <- c(mean(x,na.rm=TRUE), sd(x,na.rm=TRUE), quantile(x,quantiles,na.rm=TRUE), est[INDEX], mean(abs(x)>abs(est[INDEX]),na.rm=TRUE)) names(res) <- c("Mean","SD",paste0(quantiles*100,"%"), "Estimate","P-value") res } env <- new.env() assign("est",attr(x,"estimate"),env) environment(mysummary) <- env print(summary(x,fun=mysummary,...)) } estimate.glm <- function(x,...) { estimate.default(x,...) } ##' @export print.estimate <- function(x, type=0L, digits=4L, width=25L, std.error=TRUE, p.value=TRUE, sep="_______",sep.which, sep.labels=NULL,indent=" ", unique.names=TRUE, na.print="", ...) { if (!is.null(x$print)) { x$print(x,digits=digits,width=width,...) return(invisible(x)) } if (type>0 && !is.null(x$call)) { cat("Call: "); print(x$call) printline(50) } if (type>0) { if (!is.null(x[["n"]]) && !is.null(x[["k"]])) { cat("n = ",x[["n"]],", clusters = ",x[["k"]],"\n\n",sep="") } else { if (!is.null(x[["n"]])) { cat("n = ",x[["n"]],"\n\n",sep="") } if (!is.null(x[["k"]])) { cat("n = ",x[["k"]],"\n\n",sep="") } } } cc <- x$coefmat if (!is.null(rownames(cc)) && unique.names) rownames(cc) <- make.unique(unlist(lapply(rownames(cc), function(x) toString(x,width=width)))) if (!std.error) cc <- cc[,-2,drop=FALSE] if (!p.value) cc[,-ncol(cc),drop=FALSE] sep.pos <- c() if (missing(sep.which) && !is.null(x$model.index)) { sep.which <- unlist(lapply(x$model.index,function(x) tail(x,1)))[-length(x$model.index)] } if (missing(sep.which)) sep.which <- NULL if (!is.null(sep.which)) { sep0 <- 0%in%sep.which if (sep0) sep.which <- setdiff(sep.which,0) cc0 <- c() sep.which <- c(0,sep.which,nrow(cc)) N <- length(sep.which)-1 for (i in seq(N)) { if ((sep.which[i]+1)<=nrow(cc)) cc0 <- rbind(cc0, cc[seq(sep.which[i]+1,sep.which[i+1]),,drop=FALSE]) if (i0) rownames(cc)[sep.pos] <- rep(paste0(rep("_",max(nchar(rownames(cc)))),collapse=""),length(sep.pos)) } print(cc,digits=digits,na.print=na.print,...) if (!is.null(x$compare)) { cat("\n",x$compare$method[3],"\n") cat(paste(" ",x$compare$method[-(1:3)],collapse="\n"),"\n") if (length(x$compare$method)>4) { out <- character() out <- with(x$compare, c(out, paste(names(statistic), "=", format(round(statistic, 4))))) out <- with(x$compare, c(out, paste(names(parameter), "=", format(round(parameter,3))))) fp <- with(x$compare, format.pval(p.value, digits = digits)) out <- c(out, paste("p-value", if (substr(fp, 1L, 1L) == "<") fp else paste("=", fp))) cat(" ",strwrap(paste(out, collapse = ", ")), sep = "\n") } } } ##' @export vcov.estimate <- function(object,list=FALSE,...) { res <- object$vcov nn <- names(coef(object,...)) if (list && !is.null(object$model.index)) { return(lapply(object$model.index, function(x) object$vcov[x,x])) } dimnames(res) <- list(nn,nn) res } ##' @export coef.estimate <- function(object,mat=FALSE,list=FALSE,messages=lava.options()$messages,...) { if (mat) return(object$coefmat) if (messages>0 && !is.null(object$back.transform)) message("Note: estimates on original scale (before 'back.transform')") if (list && !is.null(object$model.index)) { return(lapply(object$model.index, function(x) object$coef[x])) } object$coef } ##' @export summary.estimate <- function(object,...) { p <- coef(object,messages=0) test <- estimate(coef=p,vcov=vcov(object,messages=0), contrast=as.list(seq_along(p)),...) object$compare <- test$compare object <- object[c("coef","coefmat","vcov","call","ncluster","model.index","compare")] class(object) <- "summary.estimate" object } ##' @export coef.summary.estimate <- function(object,...) { object$coefmat } ##' @export print.summary.estimate <- function(x,...) { print.estimate(x,type=2L,...) } ##' @export iid.estimate <- function(x,...) { if (is.null(x$iid)) return(NULL) dimn <- dimnames(x$iid) if (!is.null(dimn)) { dimn[[2]] <- names(coef(x)) } else { dimn <- list(NULL,names(coef(x))) } structure(x$iid,dimnames=dimn) } ##' @export model.frame.estimate <- function(formula,...) { NULL } lava/R/profile.R0000644000176200001440000000361713520655354013200 0ustar liggesusers##' @export profile.lvmfit <- function(fitted,idx,tau,...) { mm <- parfix(Model(fitted),idx,tau) index(mm) <- reindex(mm,zeroones=TRUE,deriv=TRUE) fixed <- attributes(mm)$fixed plogl <- function(tau0) { for (i in fixed$v) { mm$mean[[i]] <- tau0 } for (i in seq_len(nrow(fixed$A))) { index(mm)$A[fixed$A[i,1],fixed$A[i,2]] <- mm$fix[fixed$A[i,1],fixed$A[i,2]] <- tau0 } for (i in seq_len(nrow(fixed$P))) { index(mm)$P[fixed$P[i,1],fixed$P[i,2]] <- mm$covfix[fixed$P[i,1],fixed$P[i,2]] <- tau0 } for (i in length(fixed$e)) { index(mm)$exfix[i] <- tau0 } dots <- list(...) if (!is.null(dots$control)) control <- dots$control else control <- list() control$start <- coef(fitted) dots$control <- control dots$index <- FALSE dots$fix <- FALSE dots$messages <- 0 dots$quick <- TRUE dots$data <- model.frame(fitted) dots$x <- mm ee <- do.call("estimate",dots) return(logLik(mm,p=ee,data=dots$data)) } val <- sapply(tau,plogl) attributes(val) <- NULL val } profci.lvmfit <- function(x,parm,level=0.95,interval=NULL,curve=FALSE,n=20,lower=TRUE,upper=TRUE,...) { ll <- logLik(x)-qchisq(level,1)/2 pp <- function(tau) (profile.lvmfit(x,parm,tau) - ll) tau0 <- coef(x)[parm] tau0.sd <- x$vcov[parm,parm]^0.5 if (is.null(interval)) { interval <- tau0 + 3*c(-1,1)*tau0.sd if (parm%in%(variances(x)+index(x)$npar.mean)) interval[1] <- max(1e-5,interval[1]) } if (curve) { xx <- seq(interval[1],interval[2],length.out=n) val <- sapply(xx,pp) res <- cbind(par=xx,val=val) return(res) } low <- up <- NA if (lower) low <- uniroot(pp,interval=c(interval[1],tau0))$root if (upper) up <- uniroot(pp,interval=c(tau0,interval[2]))$root ## res <- rbind(lower$root,upper$root); rownames(res) <- coef() return(c(low,up)) } lava/R/lisrel.R0000644000176200001440000000303213520655354013021 0ustar liggesusers lisrel <- function(model,p,X=NULL,muX=NULL,varX=NULL,...) { mom <- moments(model,p) A <- t(index(model)$M) eta.idx <- match(latent(model),vars(model)) exo.idx <- match(exogenous(model),vars(model)) y <- setdiff(manifest(model), exogenous(model)) y.idx <- match(y, vars(model)) ## Jy <- Jx <- Jeta <- I <- diag(length(vars(model))) ## if (length(eta.idx)>0) ## J[eta.idx,eta.idx] <- 0; J <- J[-eta.idx,] ## Jeta[obs.idx,obs.idx] <- 0; Jeta <- J[-obs.idx,] A <- t(mom$A) Lambda <- A[y.idx,eta.idx,drop=FALSE] K <- A[y.idx,exo.idx,drop=FALSE] B <- A[eta.idx,eta.idx,drop=FALSE] I <- diag(nrow=nrow(B)) Gamma <- A[eta.idx,exo.idx,drop=FALSE] V <- mom$P Psi <- V[eta.idx,eta.idx] ## Residual variance Theta <- V[y.idx,y.idx] ## - IBi <- if (ncol(I)>0) solve(I-B) else I LIBi <- Lambda%*%IBi Phi <- LIBi%*%Gamma + K Veta.x <- IBi%*%Psi%*%IBi ## Variance of eta given x COVetay.x <- Veta.x%*%t(Lambda) ## Covariance of eta,y given x ## Vy.x <- Lambda%*%COVetay.x + Theta ## Omega Vy.x <- LIBi%*%Psi%*%t(LIBi) + Theta if (!is.null(X)) { Ey.x <- t(apply(as.matrix(X)%*% t(LIBi%*%Gamma + K),1,function(x) x + mom$v[y.idx])) } else Ey.x <- NULL CV <- COVetay.x%*%Vy.x ## Sigma <- Vy.x + Phi%*%varX%*%t(Phi) return(list(mu=mom$v, Lambda=Lambda, K=K, B=B, I=I, Gamma=Gamma, Psi=Psi, Theta=Theta, IBi=IBi, LIBi=LIBi, Phi=Phi, Vy.x=Vy.x, Veta.x=Veta.x, COVetay.x=COVetay.x, CV=CV, Ey.x=Ey.x)) } lava/R/fplot.R0000644000176200001440000000376713520655354012672 0ustar liggesusers##' Faster plot via RGL ##' @title fplot ##' @export ##' @examples ##' if (interactive()) { ##' data(iris) ##' fplot(Sepal.Length ~ Petal.Length+Species, data=iris, size=2, type="s") ##' } ##' @param x X variable ##' @param y Y variable ##' @param z Z variable (optional) ##' @param xlab x-axis label ##' @param ylab y-axis label ##' @param ... additional arggument to lower-level plot functions ##' @param z.col color (use argument alpha to set transparency) ##' @param data data.frame ##' @param add if TRUE use current active device ##' @param aspect aspect ratio ##' @param zoom zoom level fplot <- function(x,y,z=NULL,xlab,ylab,...,z.col=topo.colors(64), data=parent.frame(),add=FALSE,aspect=c(1,1),zoom=0.8) { if (!requireNamespace("rgl",quietly=TRUE)) stop("Requires 'rgl'") if (inherits(x,"formula")) { y <- getoutcome(x) x <- attributes(y)$x if (length(x)>1) { z <- as.numeric(with(data, get(x[2]))) } if (length(x)==0) { x <- seq(nrow(data)) if (missing(xlab)) xlab <- "Index" } else { if (missing(xlab)) xlab <- x[1] x <- with(data, get(x[1])) } if (missing(ylab)) ylab <- y y <- with(data, get(y)) } else { if (missing(y)) { y <- x if (missing(ylab)) ylab <- deparse(substitute(x)) x <- seq(nrow(data)) if (missing(xlab)) xlab <- "Index" } else { if (missing(xlab)) xlab <- deparse(substitute(x)) if (missing(ylab)) ylab <- deparse(substitute(y)) } } rgl::.check3d() if (!is.null(z)) { ncol <- length(z.col); glut <- approxfun(seq(min(z),max(z),length.out=ncol),seq(ncol)) rgl::plot3d(x,y,0,col=z.col[round(glut(z))],xlab=xlab,ylab=ylab,add=add,...) } else { rgl::plot3d(x,y,0,xlab=xlab,ylab=ylab,add=add,...) } rgl::view3d(0,0,fov=0,zoom=zoom) rgl::aspect3d(c(aspect,1)) } lava/R/sim.default.R0000644000176200001440000004247413520655354013757 0ustar liggesusers##' Wrapper function for mclapply ##' ##' @export ##' @param x function or 'sim' object ##' @param R Number of replications or data.frame with parameters ##' @param f Optional function (i.e., if x is a matrix) ##' @param colnames Optional column names ##' @param messages Messages ##' @param mc.cores Number of cores to use ##' @param cl (optional) cluster to use for parallelization ##' @param blocksize Split computations in blocks ##' @param type type=0 is an alias for messages=1,mc.cores=1,blocksize=R ##' @param seed (optional) Seed (needed with cl=TRUE) ##' @param args (optional) list of named arguments passed to (mc)mapply ##' @param iter If TRUE the iteration number is passed as first argument to (mc)mapply ##' @param ... Additional arguments to (mc)mapply ##' @aliases sim.default as.sim ##' @seealso summary.sim plot.sim print.sim ##' @examples ##' m <- lvm(y~x+e) ##' distribution(m,~y) <- 0 ##' distribution(m,~x) <- uniform.lvm(a=-1.1,b=1.1) ##' transform(m,e~x) <- function(x) (1*x^4)*rnorm(length(x),sd=1) ##' ##' onerun <- function(iter=NULL,...,n=2e3,b0=1,idx=2) { ##' d <- sim(m,n,p=c("y~x"=b0)) ##' l <- lm(y~x,d) ##' res <- c(coef(summary(l))[idx,1:2], ##' confint(l)[idx,], ##' estimate(l,only.coef=TRUE)[idx,2:4]) ##' names(res) <- c("Estimate","Model.se","Model.lo","Model.hi", ##' "Sandwich.se","Sandwich.lo","Sandwich.hi") ##' res ##' } ##' val <- sim(onerun,R=10,b0=1,messages=0,mc.cores=1) ##' val ##' ##' val <- sim(val,R=40,b0=1,mc.cores=1) ## append results ##' summary(val,estimate=c(1,1),confint=c(3,4,6,7),true=c(1,1)) ##' ##' summary(val,estimate=c(1,1),se=c(2,5),names=c("Model","Sandwich")) ##' summary(val,estimate=c(1,1),se=c(2,5),true=c(1,1),names=c("Model","Sandwich"),confint=TRUE) ##' ##' if (interactive()) { ##' plot(val,estimate=1,c(2,5),true=1,names=c("Model","Sandwich"),polygon=FALSE) ##' plot(val,estimate=c(1,1),se=c(2,5),main=NULL, ##' true=c(1,1),names=c("Model","Sandwich"), ##' line.lwd=1,col=c("gray20","gray60"), ##' rug=FALSE) ##' plot(val,estimate=c(1,1),se=c(2,5),true=c(1,1), ##' names=c("Model","Sandwich")) ##' } ##' ##' f <- function(a=1,b=1) { ##' rep(a*b,5) ##' } ##' R <- Expand(a=1:3,b=1:3) ##' sim(f,R,type=0) ##' sim(function(a,b) f(a,b), 3, args=c(a=5,b=5),type=0) ##' sim(function(iter=1,a=5,b=5) iter*f(a,b), type=0, iter=TRUE, R=5) sim.default <- function(x=NULL,R=100,f=NULL,colnames=NULL, messages=lava.options()$messages, mc.cores,blocksize=2L*mc.cores, cl,type=1L,seed=NULL,args=list(),iter=FALSE,...) { stm <- proc.time() oldtm <- rep(0,5) if (missing(mc.cores) || .Platform$OS.type=="windows") { if (.Platform$OS.type=="windows") { ## Disable parallel processing on windows mc.cores <- 1L } else { mc.cores <- getOption("mc.cores",parallel::detectCores()) } } if (type==0L) { mc.cores <- 1L if (inherits(R,c("matrix","data.frame")) || length(R)>1) { blocksize <- NROW(R) } else { blocksize <- R } messages <- 0 } if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } if (mc.cores>1L || !missing(cl)) requireNamespace("parallel",quietly=TRUE) newcl <- FALSE if (!missing(cl) && is.logical(cl)) { if (.Platform$OS.type=="windows" || TRUE) { ## Don't fork processes on windows cl <- NULL mc.cores <- 1 } else { if (cl) { cl <- parallel::makeForkCluster(mc.cores) if (!is.null(seed)) parallel::clusterSetRNGStream(cl,seed) newcl <- TRUE } } } olddata <- NULL dots <- list(...) mycall <- match.call(expand.dots=FALSE) if (inherits(x,c("data.frame","matrix"))) olddata <- x if (inherits(x,"sim")) { oldtm <- attr(x,"time") oldcall <- attr(x,"call") x <- attr(x,"f") if (!is.null(f)) x <- f ex <- oldcall[["..."]] for (nn in setdiff(names(ex),names(dots))) { dots[[nn]] <- ex[[nn]] val <- list(ex[[nn]]); names(val) <- nn mycall[["..."]] <- c(mycall[["..."]],list(val)) } } else { if (!is.null(f)) x <- f if (!is.function(x)) stop("Expected a function or 'sim' object.") } if (is.null(x)) stop("Must give new function argument 'f'.") res <- val <- NULL on.exit({ if (messages>0) close(pb) if (newcl) parallel::stopCluster(cl) if (is.null(colnames) && !is.null(val)) { if (is.matrix(val[[1]])) { colnames <- base::colnames(val[[1]]) } else { colnames <- names(val[[1]]) } } base::colnames(res) <- colnames if (!is.null(olddata)) res <- rbind(olddata,res) attr(res,"call") <- mycall attr(res,"f") <- x class(res) <- c("sim","matrix") if (idx.done1) { parval_provided <- TRUE parval <- as.data.frame(R) if (is.vector(R)) names(parval) <- NULL else if (inherits(R,c("matrix","data.frame"))) names(parval) <- colnames(R) R <- NROW(parval) } else { parval <- as.data.frame(1:R) names(parval) <- NULL } nfolds <- max(1,round(R/blocksize)) idx <- split(1:R,sort((1:R)%%nfolds)) idx.done <- 0 count <- 0 if (messages>0) pb <- txtProgressBar(style=lava.options()$progressbarstyle,width=40) robx <- function(iter__,...) tryCatch(x(...),error=function(e) NA) if (iter) formals(robx)[[1]] <- NULL for (ii in idx) { count <- count+1 if (!missing(cl) && !is.null(cl)) { pp <- c(as.list(parval[ii,,drop=FALSE]),dots,list(cl=cl,fun=robx,SIMPLIFY=FALSE),args) } else { pp <- c(as.list(parval[ii,,drop=FALSE]),dots,list(mc.cores=mc.cores,FUN=robx,SIMPLIFY=FALSE),args) } ##if (!iter & !parval_provided) pp[[1]] <- NULL if (mc.cores>1) { if (!missing(cl) && !is.null(cl)) { val <- do.call(parallel::clusterMap,pp) } else { val <- do.call(parallel::mcmapply,pp) } } else { pp$mc.cores <- NULL val <- do.call(mapply,pp) } if (messages>0) setTxtProgressBar(pb, count/length(idx)) if (is.null(res)) { ##res <- array(NA,dim=c(R,dim(val[[1]])),dimnames=c(list(NULL),dimnames(val[[1]]),NULL)) res <- matrix(NA,ncol=length(val[[1]]),nrow=R) } res[ii,] <- Reduce(rbind,val) ##rr <- abind::abind(val,along=length(dim(res))) ##res[ii,] <- abind(val,along=length(dim(res))) idx.done <- max(ii) } } ##' @export "[.sim" <- function (x, i, j, drop = FALSE) { atr <- attributes(x) if (!is.null(dim(x))) { class(x) <- "matrix" } else { class(x) <- class(x)[-1] } x <- NextMethod("[",drop=drop) atr.keep <- c("call","time") if (missing(j)) atr.keep <- c(atr.keep,"f") attributes(x)[atr.keep] <- atr[atr.keep] if (!drop) class(x) <- c("sim",class(x)) x } ##' @export "as.sim" <- function (object, name, ...) { if (is.vector(object)) { object <- (structure(cbind(object), class=c("sim", "matrix"))) if (!missing(name)) colnames(object) <- name return(object) } structure(object, class=c("sim", class(object))) } Time <- function(sec,print=FALSE,...) { h <- sec%/%3600 m0 <- (sec%%3600) m <- m0%/%60 s <- m0%%60 res <- c(h=h,m=m,s=s) if (print) { if (h>0) cat(h,"h ",sep="") if (m>0) cat(m,"m ",sep="") cat(s,"s",sep="") return(invisible(res)) } return(res) } Print <- function(x,n=5,digits=max(3,getOption("digits")-3),...) { mat <- !is.null(dim(x)) if (!mat) { x <- cbind(x) colnames(x) <- "" } if (is.null(rownames(x))) { rownames(x) <- seq(nrow(x)) } sep <- rbind("---"=rep('',ncol(x))) if (n<1) { print(x,quote=FALSE,digits=digits,...) } else { ## hd <- base::as.matrix(base::format(utils::head(x,n),digits=digits,...)) ## tl <- base::as.matrix(base::format(utils::tail(x,n),digits=digits,...)) ## print(rbind(hd,sep,tl),quote=FALSE,...) if (NROW(x)<=(2*n)) { hd <- base::format(utils::head(x,2*n),digits=digits,...) print(hd, quote=FALSE,...) } else { hd <- base::format(utils::head(x,n),digits=digits,...) tl <- base::format(utils::tail(x,n),digits=digits,...) print(rbind(base::as.matrix(hd),sep,base::as.matrix(tl)), quote=FALSE,...) } } invisible(x) } ##' @export print.sim <- function(x,...) { s <- summary(x,minimal=TRUE,...) attr(x,"f") <- attr(x,"call") <- NULL if (!is.null(dim(x))) { class(x) <- "matrix" } Print(x,...) cat("\n") print(s,extra=FALSE,...) return(invisible(x)) } ##' @export print.summary.sim <- function(x,group=list(c("^mean$","^sd$","^se$","^se/sd$","^coverage"), c("^min$","^[0-9.]+%$","^max$"), c("^na$","^missing$"), c("^true$","^bias$","^rmse$")), lower.case=TRUE, na.print="", digits = max(3, getOption("digits") - 2), quote=FALSE, time=TRUE, extra=TRUE, ...) { if (extra) { cat(attr(x,"n")," replications",sep="") if (time && !is.null(attr(x,"time"))) { cat("\t\t\t\t\tTime: ") Time(attr(x,"time")["elapsed"],print=TRUE) } cat("\n\n") } nn <- rownames(x) if (lower.case) nn <- tolower(nn) gg <- lapply(group, function(x) unlist(lapply(x,function(v) grep(v,nn)))) gg <- c(gg,list(setdiff(seq_along(nn),unlist(gg)))) x0 <- c() ng <- length(gg) for (i in seq(ng)) { x0 <- rbind(x0, x[gg[[i]],,drop=FALSE], { if(i0) NA}) } print(structure(x0,class="matrix")[,,drop=FALSE],digits=digits,quote=quote,na.print=na.print,...) if (extra) cat("\n") invisible(x) } ##' Summary method for 'sim' objects ##' ##' Summary method for 'sim' objects ##' @export ##' @export summary.sim ##' @param object sim object ##' @param estimate (optional) columns with estimates ##' @param se (optional) columns with standard error estimates ##' @param confint (optional) list of pairs of columns with confidence limits ##' @param true (optional) vector of true parameter values ##' @param fun (optional) summary function ##' @param names (optional) names of ##' @param unique.names if TRUE, unique.names will be applied to column names ##' @param minimal if TRUE, minimal summary will be returned ##' @param level confidence level ##' @param quantiles quantiles ##' @param ... additional levels to lower-level functions summary.sim <- function(object,estimate=NULL,se=NULL, confint=!is.null(se)&&!is.null(true),true=NULL, fun,names=NULL,unique.names=TRUE,minimal=FALSE, level=0.95,quantiles=c(0,.025,0.5,.975,1),...) { if (is.list(estimate)) { est <- estimate if (is.null(names)) names <- base::names(est) estimate <- c() nse <- is.null(se) ntrue <- is.null(true) elen <- unlist(lapply(est,length)) est <- lapply(est, function(e) c(e, rep(NA,max(elen)-length(e)))) for (e in est) { estimate <- c(estimate,e[1]) if (length(e)>1 && nse) se <- c(se,e[2]) if (length(e)>2 && ntrue) true <- c(true,e[3]) } cl <- match.call() cl[c("estimate","se","true","names")] <- list(estimate,se,true,names) } if (minimal) { fun <- function(x,se,confint,...) { res <- c(Mean=mean(x,na.rm=TRUE), SD=sd(x,na.rm=TRUE)) if (!missing(se) && !is.null(se)) { res <- c(res, c(SE=mean(se,na.rm=TRUE))) res <- c(res, c("SE/SD"=res[["SE"]]/res[["SD"]])) } return(res) } } mfun <- function(x,...) { res <- c(mean(x,na.rm=TRUE), sd(x,na.rm=TRUE), if (length(quantiles)>0) quantile(x,quantiles,na.rm=TRUE), mean(is.na(x))) if (length(quantiles)>0) { nq <- paste0(quantiles*100,"%") idx <- which(quantiles==1) if (length(idx)>0) nq[idx] <- "Max" idx <- which(quantiles==0) if (length(idx)>0) nq[idx] <- "Min" } names(res) <- c("Mean","SD", if (length(quantiles)>0) nq, "Missing") res } tm <- attr(object,"time") N <- max(length(estimate),length(se),length(true)) if (!is.null(estimate)) estimate <- rep(estimate,length.out=N) if (!is.null(se)) se <- rep(se,length.out=N) if (!is.null(true)) { if (is.null(estimate)) N <- ncol(object) true <- rep(true,length.out=N) } if (!is.null(estimate) && is.character(estimate)) { estimate <- match(estimate,colnames(object)) } if (!missing(fun)) { if (!is.null(estimate)) m.est <- object[,estimate,drop=FALSE] else m.est <- object m.se <- NULL if (!is.null(se)) m.se <- object[,se,drop=FALSE] m.ci <- NULL if (!is.null(confint)) m.ci <- object[,confint,drop=FALSE] res <- lapply(seq(ncol(m.est)), function(i,...) fun(m.est[,i,drop=TRUE],se=m.se[,i,drop=TRUE],confint=m.ci[,1:2+(i-1)*2],...,INDEX=i),...) res <- matrix(unlist(res),nrow=length(res[[1]]),byrow=FALSE) if (is.null(dim(res))) { res <- rbind(res) } if (is.null(rownames(res))) { rownames(res) <- names(fun(0,m.se,m.ci,INDEX=1,...)) if (is.null(rownames(res))) rownames(res) <- rep("",nrow(res)) } if (is.null(colnames(res))) { colnames(res) <- colnames(m.est) } return(structure(res, n=NROW(object), time=tm, class=c("summary.sim","matrix"))) } if (!is.null(estimate)) { est <- apply(object[,estimate,drop=FALSE],2,mfun) } else { est <- apply(object,2,mfun) } if (!is.null(true)) { if (length(true)!=ncol(est)) { ##stop("'true' should be of same length as 'estimate'.") true <- rep(true,length.out=ncol(estimate)) } est <- rbind(est, rbind(True=true),rbind(Bias=est["Mean",]-true), rbind(RMSE=((est["Mean",]-true)^2+(est["SD",])^2)^.5) ) } if (!is.null(se)) { if (is.character(se)) { se <- match(se,colnames(object)) } if (length(se)!=ncol(est)) stop("'se' should be of same length as 'estimate'.") est <- rbind(est, SE=apply(object[,se,drop=FALSE],2, function(x) val <- c(mean(x,na.rm=TRUE)))) est <- rbind(est,"SE/SD"=est["SE",]/est["SD",]) } if (!is.null(confint) && (length(confint)>1 || confint)) { if (is.character(confint)) { confint <- match(confint,colnames(object)) } if (length(confint)==1 && confint) { if (is.null(se)) stop("Supply confidence limits or SE") confint <- c() pos <- ncol(object) for (i in seq_along(estimate)) { z <- 1-(1-level)/2 CI <- cbind(object[,estimate[i]]-qnorm(z)*object[,se[i]], object[,estimate[i]]+qnorm(z)*object[,se[i]]) colnames(CI) <- NULL object <- cbind(object,CI) confint <- c(confint,pos+1:2) pos <- pos+2 } } if (length(confint)!=2*length(estimate)) stop("'confint' should be of length 2*length(estimate).") Coverage <- c() for (i in seq_along(estimate)) { Coverage <- c(Coverage, mean((object[,confint[2*(i-1)+1]]true[i]),na.rm=TRUE)) } est <- rbind(est,Coverage=Coverage) } if (!is.null(names)) { if (length(names)0) for (j in seq_len(length(pos))) { meang[[ pos[j] ]][i] <- pp$meanpar[j] } } ## Weighted average wp <- unlist(lapply(pg, function(y) { ppos <- !is.na(y) myweight <- W[ppos]/sum(W[ppos]) sum(y[ppos]*myweight) })) wmean <- unlist(lapply(meang, function(y) { ppos <- !is.na(y) myweight <- W[ppos]/sum(W[ppos]) sum(y[ppos]*myweight) })) res <- c(wmean,wp) res[!is.finite(res) | is.nan(res) | is.na(res) | is.complex(res)] <- .5 return(as.numeric(res)) } ###}}} ###{{{ startmean startmean <- function(x,p,mu) { if (is.null(mu)) return(p) meanpar <- numeric(index(x)$npar.mean) mymeans <- vars(x)[index(x)$v1==1] midx <- na.omit(match(names(mu),mymeans)) meanpar[midx] <- mu[midx] AP <- matrices(x,p,meanpar) nu <- numeric(length(vars(x))) nu[vars(x)%in%manifest(x)] <- mu meanstart <- ((diag(nrow=nrow(AP$A))-t(AP$A))%*%nu)[index(x)$v1==1] names(meanstart) <- vars(x)[index(x)$v1==1] return( c(meanstart, p) ) } ###}}} ###{{{ startvalues3 `startvalues3` <- function(x, S, debug=FALSE, tol=1e-6,...) { S <- reorderdata.lvm(x,S) if (nrow(S)!=length(manifest(x))) stop("Number of observed variables in data and models does not agree.") A <- t(index(x)$M) ## Adjacency matrix m <- nrow(A) ## Number of variables A0 <- t(index(x)$M0) ## Adjacency matrix (without fixed parameters) obs.idx <- index(x)$obs.idx; ##obs.idx <- as.vector(J%*%(seq_len(m))); latent.idx <- setdiff(seq_len(m), obs.idx) lat <- colnames(A)[latent.idx] exo.idx <- index(x)$exo.idx ## match(exogenous(x),vars(x)) exo.idxObs <- index(x)$exo.obsidx ##match(exogenous(x),manifest(x)) AP0 <- moments(x, rep(0,index(x)$npar)) newP <- t(AP0$P) newA <- t(AP0$A) fixed <- t(x$fix) for (i in latent.idx) { fix.idx <- colnames(fixed)[which(!is.na(t(fixed[,i])))[1]] lambda0 <- newA[fix.idx,i] rel.idx <- which(A0[,i]==1) rel.all <- which(A[,i]==1) rel.pos <- colnames(A)[rel.all] ## Estimation of lambda (latent -> endogenous) for (j in rel.idx) { lambda <- lambda0*S[fix.idx, j]/S[fix.idx,fix.idx] newA[j,i] <- lambda } lambdas <- newA[rel.pos,i] ## Estimation of beta (covariate -> latent) exo2latent <- which(A0[i,exo.idx]==1) exo.pos <- colnames(S)[exo.idxObs[exo2latent]] varX.eta <- S[exo.pos, exo.pos] InvvarX.eta <- Inverse(varX.eta,tol=1e-3) rel.pos <- setdiff(rel.pos,lat) covXY <- S[exo.pos, rel.pos,drop=FALSE] beta <- 0 for (j in seq_len(length(rel.pos))) beta <- beta + 1/lambdas[j]*InvvarX.eta %*% covXY[,j] beta <- beta/length(rel.pos) for (k in seq_len(length(exo.pos))) { if (A0[i,exo.pos[k]]==1) { newA[i,exo.pos[k]] <- beta[k] } } beta.eta <- matrix(newA[i,exo.pos], ncol=1) ## Estimation of zeta^2 (variance of latent variable) betavar <- matrix(beta.eta,nrow=1)%*%varX.eta%*%beta.eta zetas <- c() for (r1 in seq_len(length(rel.pos)-1)) for (r2 in seq(r1+1,length(rel.pos))) { zetas <- c(zetas, S[rel.pos[r1], rel.pos[r2]]/ (lambdas[r1]*lambdas[r2]) - betavar) } zeta <- mean(zetas) newP[i,i] <- zeta for (j in rel.all) { pos <- colnames(newA)[j] vary <- S[pos,pos] - newA[pos,i]^2*(zeta+betavar) newP[pos,pos] <- ifelse(vary<0.25,0.25,vary) } } Debug(list("start=",start), debug) start <- pars(x, A=t(newA), P=newP) return(start) } ###}}} startvalues3 ###{{{ startvalues2 ## Estimate sub-models (measurement models) ##' @export `startvalues2` <- function(x, S, mu=NULL, debug=FALSE, messages=0,...) { if (messages>0) cat("Obtaining start values...\n") S <- reorderdata.lvm(x,S) ss <- startvalues(x,S) Debug(list("ss=",ss),debug); g <- measurement(x,messages=0) keep <- c() if (length(g)>1) { for (i in seq_len(length(g))) { if (length(endogenous(g[[i]]))>2) keep <- c(keep,i) } g <- g[keep] } if (length(g)<2) return(startmean(x,ss,mu=mu)) ## if (messages>0) cat("Fitting marginal measurement models...\n") op <- options(warn=-1) e <- lapply(g, function(y) { estimate(y, data=list(S=S[manifest(y),manifest(y),drop=FALSE], mu=mu[manifest(y)], n=100), control=list(meanstructure=FALSE, starterfun="startvalues", estimator="Simple", method="nlminb1"), optcontrol=list(), debug=FALSE, messages=0) }) for (l in e) { ## a <- coef(l$estimate)[,1] a <- coef(l) for (i in seq_len(length(a))) { pos <- match(names(a)[i],names(ss)) if (!is.na(pos)) ss[pos] <- a[i] } } options(op) startmean(x,ss,mu=mu) } ###}}} startvalues2 ###{{{ startvalues0 ##' @export startvalues1 <- function(x,S,mu=NULL,tol=1e-6,delta=1e-6,...) { p0 <- startvalues(x,S,mu,...) p0[index(x)$npar.mean+variances(x)] <- 0.1 p0[index(x)$npar.mean+offdiags(x)] <- 0 p0 } startvalues00 <- function(x,S,mu=NULL,tol=1e-6,delta=1e-6,...) { p0 <- startvalues(x,S,mu,...) p0 <- numeric(length(p0)) P0 <- x$cov*1 ##P0[!is.na(x$covfix)] <- ##P0 <- x$covfix; P0[is.na(P0)] <- 0 ##diag(P0)[index(x)$endo.idx] <- diag(S)[index(x)$endo.obsidx]/2 ##lu <- min(diag(P0)[index(x)$endo.idx])/2 ## diag(P0)[] <- 0.1 ## diag(P0)[index(x)$endo.idx] <- 1 diag(P0)[index(x)$eta.idx] <- 0.1 ##mean(diag(S)[index(x)$endo.idx])/2 ee <- eigen(P0) tol <- 1e-6 ii <- ee$values ii[ee$values0) ## pp[seq(length(meanstart))] <- meanstart ## } names(pp) <- coef(x, messages=0, fixed=FALSE, mean=TRUE)[seq_len(length(pp))] pp[!is.finite(pp) | is.nan(pp) | is.na(pp)] <- 0.01 return(pp) } ###}}} startvalues0 ###{{{ startvalues ## McDonald & Hartmann, 1992 ##' @export startvalues <- function(x, S, mu=NULL, debug=FALSE, messages=lava.options()$messages, tol=1e-6, delta=1e-6,...) { ## As proposed by McDonald & Hartmann, 1992. ## Implementation based on John Fox's implementation in the 'sem' R-package S <- reorderdata.lvm(x,S) if (nrow(S)!=length(manifest(x))) stop("Number of observed variables in data and models does not agree.") J <- index(x)$J ## Manifest selection P0 <- index(x)$P0 ## covariance 'adjacency' A <- t(index(x)$M) ## Adjacency matrix n <- nrow(S) ## Number of manifest variables m <- nrow(A) ## Number of variables A0 <- t(index(x)$M0) ## Adjacency matrix (without fixed parameters) obs.idx <- as.vector(J%*%(seq_len(m))); latent.idx <- setdiff(seq_len(m), obs.idx) s <- sqrt(diag(S)) suppressWarnings(R <- (cov2cor(S))) ## S/outer(s,s) C <- P0 Debug(list("obs.idx", obs.idx), debug) C[obs.idx,obs.idx] <- R ## Estimates of covariance between latent and manifest variables Debug((C), debug) for (i in latent.idx) { inRelation <- A[obs.idx,i]==1 for (j in seq_len(length(obs.idx))) { Debug((j), debug) C[obs.idx[j],i] <- C[i,obs.idx[j]] <- if (any(inRelation)) { numerator <- sum(R[j, which(inRelation)]) denominator <- sqrt(sum(R[which(inRelation), which(inRelation)])) numerator/denominator ## as proposed by McDonald & Hartmann } else { runif(1, .3, .5) ## No arrows => small random covariance } } } ## Estimates of covariance between latent variables for (i in latent.idx) { for (j in latent.idx) { C[i,j] <- C[j,i] <- if (i==j) { 1 } else { inRelation.i <- A[obs.idx, i]==1 inRelation.j <- A[obs.idx, j]==1 if ((any(inRelation.i)) | (any(inRelation.j))) { numerator <- sum(R[which(inRelation.i), which(inRelation.j)]) denominator <- sqrt( sum(R[which(inRelation.i), which(inRelation.i)]) * sum(R[which(inRelation.j), which(inRelation.j)])) numerator/(denominator+0.01) ## Avoid division by zero } else { runif(1, .3, .5) } } } } if (debug) { print("C="); print(C); } Ahat <- matrix(0,m,m) C[is.nan(C)] <- 0 for (j in seq_len(m)) { ## OLS-estimates relation <- A[j,]==1 if (!any(relation)) next Ahat[j, relation] <- tryCatch(Inverse(C[relation,relation] + diag(nrow=sum(relation))*delta,tol=1e-3) %*% C[relation,j], error=function(...) 0) } Ahat[obs.idx,] <- Ahat[obs.idx,]*matrix(s, n, m) Ahat[,obs.idx] <- Ahat[,obs.idx]/matrix(s, m, n, byrow=TRUE) Chat <- C Chat[obs.idx,] <- Chat[obs.idx,]*matrix(s,n,m) ## Chat[,obs.idx] <- Chat[,obs.idx]*matrix(s,m,n,byrow=TRUE) ## Phat <- (diag(m)-Ahat)%*%Chat%*%t(diag(m)-Ahat) ##diag(Phat) <- abs(diag(Phat)) ## Guarantee PD-matrix: Phat[is.nan(Phat) | is.na(Phat)] <- 0 diag(Phat)[diag(Phat)==0] <- 1 eig <- eigen(Phat) L <- abs(eig$values); L[L<1e-3] <- 1e-3 Phat <- eig$vectors%*%diag(L,ncol=ncol(eig$vectors))%*%t(eig$vectors) Debug(list("start=",start), debug) start <- pars(x, A=t(Ahat*A0), P=(Phat*P0)) names(start) <- coef(x, messages=0, fixed=FALSE, mean=FALSE)[seq_len(length(start))] res <- startmean(x,start,mu) res[!is.finite(res) | is.nan(res) | is.na(res)] <- 1 res } ###}}} startvalues lava/R/pdfconvert.R0000644000176200001440000000274513520655354013713 0ustar liggesusers##' Convert PDF file to print quality png (default 300 dpi) ##' ##' Access to ghostscript program 'gs' is needed ##' @title Convert pdf to raster format ##' @param files Vector of (pdf-)filenames to process ##' @param dpi DPI ##' @param resolution Resolution of raster image file ##' @param gs Optional ghostscript command ##' @param gsopt Optional ghostscript arguments ##' @param resize Optional resize arguments (mogrify) ##' @param format Raster format (e.g. png, jpg, tif, ...) ##' @param \dots Additional arguments ##' @seealso \code{dev.copy2pdf}, \code{printdev} ##' @export ##' @author Klaus K. Holst ##' @keywords iplot pdfconvert <- function(files, dpi=300, resolution=1024, gs, gsopt, resize, format="png", ...) { if (missing(gsopt)) gsopt <- "-dSAFTER -dBATCH -dNOPAUSE -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4" if (missing(gs)) { gs <- names(which(Sys.which(c("gs", "gswin32c", "gswin64c")) != "")) } cmd1 <- paste0(gs," -r",dpi," -dBackgroundColor='16#ffffff'") if (missing(resize)) { resize <- paste0("mogrify -resize ", resolution) } for (f in files) { f0 <- strsplit(f,".pdf")[1] f.out <- paste(f0,format,sep=".") f.pdf <- paste(f0,"pdf",sep=".") mycmd1 <- paste0(cmd1, " ", gsopt, " -sOutputFile=", f.out, " > /dev/null ", f.pdf) mycmd2 <- paste0(resize, " ", f.out) cat(f.pdf) system(mycmd1) cat(" -> ") system(mycmd2) cat(f.out, "\n") } } lava/R/bootstrap.R0000644000176200001440000001675113520655354013560 0ustar liggesusers##' Generic method for calculating bootstrap statistics ##' ##' @title Generic bootstrap method ##' @param x Model object ##' @param \dots Additional arguments ##' @seealso \code{bootstrap.lvm} \code{bootstrap.lvmfit} ##' @author Klaus K. Holst ##' @export bootstrap <- function(x,...) UseMethod("bootstrap") ##' Calculate bootstrap estimates of a lvm object ##' ##' Draws non-parametric bootstrap samples ##' ##' @param x \code{lvm}-object. ##' @param R Number of bootstrap samples ##' @param fun Optional function of the (bootstrapped) model-fit defining the ##' statistic of interest ##' @param data The data to resample from ##' @param control Options to the optimization routine ##' @param p Parameter vector of the null model for the parametric bootstrap ##' @param parametric If TRUE a parametric bootstrap is calculated. If FALSE a ##' non-parametric (row-sampling) bootstrap is computed. ##' @param bollenstine Bollen-Stine transformation (non-parametric bootstrap) for bootstrap hypothesis testing. ##' @param constraints Logical indicating whether non-linear parameter ##' constraints should be included in the bootstrap procedure ##' @param estimator String definining estimator, e.g. 'gaussian' (see ##' \code{estimator}) ##' @param weights Optional weights matrix used by \code{estimator} ##' @param sd Logical indicating whether standard error estimates should be ##' included in the bootstrap procedure ##' @param messages Control amount of messages printed ##' @param parallel If TRUE parallel backend will be used ##' @param mc.cores Number of threads (if NULL foreach::foreach will be used, otherwise parallel::mclapply) ##' @param \dots Additional arguments, e.g. choice of estimator. ##' @aliases bootstrap.lvmfit ##' @usage ##' ##' \method{bootstrap}{lvm}(x,R=100,data,fun=NULL,control=list(), ##' p, parametric=FALSE, bollenstine=FALSE, ##' constraints=TRUE,sd=FALSE,messages=lava.options()$messages, ##' parallel=lava.options()$parallel, ##' mc.cores=NULL, ##' ...) ##' ##' \method{bootstrap}{lvmfit}(x,R=100,data=model.frame(x), ##' control=list(start=coef(x)), ##' p=coef(x), parametric=FALSE, bollenstine=FALSE, ##' estimator=x$estimator,weights=Weights(x),...) ##' ##' @return A \code{bootstrap.lvm} object. ##' @author Klaus K. Holst ##' @seealso \code{\link{confint.lvmfit}} ##' @keywords models regression ##' @examples ##' m <- lvm(y~x) ##' d <- sim(m,100) ##' e <- estimate(lvm(y~x), data=d) ##' \donttest{ ## Reduce Ex.Timings ##' B <- bootstrap(e,R=50,parallel=FALSE) ##' B ##' } ##' @export bootstrap.lvm <- function(x,R=100,data,fun=NULL,control=list(), p, parametric=FALSE, bollenstine=FALSE, constraints=TRUE,sd=FALSE,messages=lava.options()$messages, parallel=lava.options()$parallel, mc.cores=NULL, ...) { coefs <- sds <- c() on.exit(list(coef=coefs[-1,], sd=sds[-1,], coef0=coefs[1,], sd0=sds[1,], model=x)) pb <- NULL if (messages>0) pb <- txtProgressBar(style=lava.options()$progressbarstyle,width=40) pmis <- missing(p) bootfun <- function(i) { if (i==0) { d0 <- data } else { if (!parametric | pmis) { d0 <- data[sample(seq_len(nrow(data)),replace=TRUE),] } else { d0 <- sim(x,p=p,n=nrow(data)) } } suppressWarnings(e0 <- estimate(x,data=d0,control=control,messages=messages,index=FALSE,...)) if ((messages>0) && getTxtProgressBar(pb)<(i/R)) { setTxtProgressBar(pb, i/R) } if (!is.null(fun)) { coefs <- fun(e0) newsd <- NULL } else { coefs <- coef(e0) newsd <- c() if (sd) { newsd <- e0$coef[,2] } if (constraints & length(constrain(x))>0) { cc <- constraints(e0,...) coefs <- c(coefs,cc[,1]) names(coefs)[seq(length(coefs)-length(cc[,1])+1,length(coefs))] <- rownames(cc) if (sd) { newsd <- c(newsd,cc[,2]) } } } return(list(coefs=coefs,sds=newsd)) } if (bollenstine) { e0 <- estimate(x,data=data,control=control,messages=0,index=FALSE,...) mm <- modelVar(e0) mu <- mm$xi Y <- t(t(data[,manifest(e0)])-as.vector(mu)) Sigma <- mm$C S <- (ncol(Y)-1)/ncol(Y)*var(Y) sSigma <- with(eigen(Sigma),vectors%*%diag(sqrt(values),ncol=ncol(vectors))%*%t(vectors)) isS <- with(eigen(S),vectors%*%diag(1/sqrt(values),ncol=ncol(vectors))%*%t(vectors)) data <- as.matrix(Y)%*%(isS%*%sSigma) colnames(data) <- manifest(e0) } i <- 0 if (parallel) { if (is.null(mc.cores) && requireNamespace("foreach",quietly=TRUE)) { res <- foreach::"%dopar%"(foreach::foreach (i=0:R),bootfun(i)) } else { if (is.null(mc.cores)) mc.cores <- 1 res <- parallel::mclapply(0:R,bootfun,mc.cores=mc.cores) } } else { res <- lapply(0:R,bootfun) } if (messages>0) { setTxtProgressBar(pb, 1) close(pb) } coefs <- matrix(unlist(lapply(res, function(x) x$coefs)),nrow=R+1,byrow=TRUE) nn <- names(res[[1]]$coefs) if (!is.null(nn)) colnames(coefs) <- nn sds <- NULL if (sd) sds <- matrix(unlist(lapply(res, function(x) x$sds)),nrow=R+1,byrow=TRUE) if (!is.null(fun)) { rownames(coefs) <- c() res <- list(coef=coefs[-1,,drop=FALSE],coef0=coefs[1,],model=x) } else { colnames(coefs) <- names(res[[1]]$coefs) rownames(coefs) <- c(); if (sd) colnames(sds) <- colnames(coefs) res <- list(coef=coefs[-1,,drop=FALSE], sd=sds[-1,,drop=FALSE], coef0=coefs[1,], sd0=sds[1,], model=x, bollenstine=bollenstine) } class(res) <- "bootstrap.lvm" return(res) } ##' @export bootstrap.lvmfit <- function(x,R=100,data=model.frame(x), control=list(start=coef(x)), p=coef(x), parametric=FALSE, bollenstine=FALSE, estimator=x$estimator,weights=Weights(x),...) bootstrap.lvm(Model(x),R=R,data=data,control=control,estimator=estimator,weights=weights,parametric=parametric,bollenstine=bollenstine,p=p,...) ##' @export "print.bootstrap.lvm" <- function(x,idx,level=0.95,...) { cat("Non-parametric bootstrap statistics (R=",nrow(x$coef),"):\n\n",sep="") uplow <-(c(0,1) + c(1,-1)*(1-level)/2) nn <- paste(uplow*100,"%") c1 <- t(apply(x$coef,2,function(x) c(mean(x), sd(x), quantile(x,uplow)))) c1 <- cbind(x$coef0,c1[,1]-x$coef0,c1[,-1,drop=FALSE]) colnames(c1) <- c("Estimate","Bias","Std.Err",nn) if (missing(idx)) { print(format(c1,...),quote=FALSE) } else { print(format(c1[idx,,drop=FALSE],...),quote=FALSE) } if (length(x$sd)>0) { c2 <- t(apply(x$sd,2,function(x) c(mean(x), sd(x), quantile(x,c(0.025,0.975))))) c2 <- cbind(c2[,1],c2[,1]-x$sd0,c2[,-1]) colnames(c2) <- c("Estimate","Bias","Std.Err","2.5%","97.5%") cat("\nStandard errors:\n") if (missing(idx)) { print(format(c2,...),quote=FALSE) } else { print(format(c2[idx,,drop=FALSE],...),quote=FALSE) } } cat("\n") invisible(x) } lava/R/plot.estimate.R0000644000176200001440000000327213520655354014325 0ustar liggesusers##' Plot method for 'estimate' objects ##' ##' Plot method for 'estimate' objects ##' @export ##' @param x estimate object ##' @param f function of parameter coefficients and data parsed on to 'estimate'. ##' If omitted a forest-plot will be produced. ##' @param idx Index of parameters (default all) ##' @param intercept include intercept in forest-plot ##' @param data data.frame ##' @param confint Add confidence limits ##' @param type plot type ('l') ##' @param xlab x-axis label ##' @param ylab y-axis label ##' @param col color ##' @param add add plot to current device ##' @param ... additional arguments to lower-level functions plot.estimate <- function(x,f,idx,intercept=FALSE,data,confint=TRUE,type="l",xlab="x",ylab="f(x)",col=1,add=FALSE,...) { if (!missing(f) && !is.null(f)) { data <- as.list(data) env <- new.env() for (y in names(data)) { assign(y,data[[y]],env) } environment(f) <- env pp <- estimate(x,f,..., vcov=vcov(x),iid=FALSE)$coefmat if (!add) suppressWarnings(plot(data[[1]],pp[,1],xlab=xlab,ylab=ylab,type=type,...)) else lines(data[[1]],pp[,1],xlab=xlab,ylab=ylab,type=type,col=col,...) if (confint) confband(data[[1]],pp[,3],pp[,4],polygon=TRUE,col=Col(col),lty=0) return(invisible(pp)) } if (!is.null(x$coefmat)) { pp <- x$coefmat[,c(1,3,4),drop=FALSE] } else { pp <- cbind(coef(x),confint(x)) } if (!missing(idx)) pp <- pp[idx,,drop=FALSE] if (!intercept) { idx <- match("(Intercept)",rownames(pp)) if (length(idx)>0 && !any(is.na(idx))) pp <- pp[-idx,,drop=FALSE] } forestplot(pp[rev(seq(nrow(pp))),,drop=FALSE],...) } lava/R/commutation.R0000644000176200001440000000111013520655354014061 0ustar liggesusers##' Finds the unique commutation matrix K: ##' \eqn{K vec(A) = vec(A^t)} ##' ##' @title Finds the unique commutation matrix ##' @param m rows ##' @param n columns ##' @author Klaus K. Holst ##' @export commutation <- function(m, n=m) { if (inherits(m,"matrix")) { n <- ncol(m) m <- nrow(m) } H <- function(i,j) { ## mxn-matrix with 1 at (i,j) Hij <- matrix(0, nrow=m, ncol=n) Hij[i,j] <- 1 Hij } K <- matrix(0,m*n,m*n) for (i in seq_len(m)) for (j in seq_len(n)) K <- K + H(i,j)%x%t(H(i,j)) K } lava/R/describecoef.R0000644000176200001440000000204713520655354014151 0ustar liggesusers##' @export describecoef <- function(x,par,from,to,mean=TRUE) { p <- coef(x, mean=mean) if (!missing(from)) { st1 <- paste0(to,lava.options()$symbol[1],from) st2 <- paste0(to,lava.options()$symbol[2],from) st3 <- paste0(from,lava.options()$symbol[2],to) pos <- na.omit(match(unique(c(st1,st2,st3)),p)) attributes(pos) <- NULL return(pos) } res <- strsplit(p,lava.options()$symbol[2]) var.idx <- which(unlist(lapply(res,length))>1) ## Variance parameters rest.idx <- setdiff(seq_along(p),var.idx) res[rest.idx] <- strsplit(p[rest.idx],lava.options()$symbol[1]) mean.idx <- which(unlist(lapply(res,length))==1) ## Mean parameters reg.idx <- setdiff(rest.idx,mean.idx) names(res)[mean.idx] <- paste0("m",seq_along(mean.idx)) for (i in var.idx) attr(res[[i]],"type") <- "cov" for (i in mean.idx) attr(res[[i]],"type") <- "mean" for (i in reg.idx) attr(res[[i]],"type") <- "reg" if (missing(par)) return(res) return(res[par]) } lava/R/Missing.R0000644000176200001440000000537313520655354013152 0ustar liggesusers##' Missing value generator ##' ##' This function adds a binary variable to a given \code{lvm} model ##' and also a variable which is equal to the original variable where ##' the binary variable is equal to zero ##' ##' @title Missing value generator ##' @param object \code{lvm}-object. ##' @param formula The right hand side specifies the name of a latent ##' variable which is not always observed. The left hand side ##' specifies the name of a new variable which is equal to the latent ##' variable but has missing values. If given as a string then this ##' is used as the name of the latent (full-data) name, and the ##' observed data name is 'missing.data' ##' @param Rformula Missing data mechanism with left hand side ##' specifying the name of the observed data indicator (may also just ##' be given as a character instead of a formula) ##' @param missing.name Name of observed data variable (only used if ##' 'formula' was given as a character specifying the name of the ##' full-data variable) ##' @param suffix If missing.name is missing, then the name of the ##' oberved data variable will be the name of the full-data variable + ##' the suffix ##' @param ... Passed to binomial.lvm. ##' @return lvm object ##' @aliases Missing, Missing<- ##' @examples ##' library(lava) ##' set.seed(17) ##' m <- lvm(y0~x01+x02+x03) ##' m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) ##' sim(m,10) ##' ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r") ##' ## same as ##' ## m <- Missing(m,y~1,r~1) ##' sim(m,10) ##' ##' ## same as ##' m <- lvm(y~1) ##' Missing(m,"y") <- r~x ##' sim(m,10) ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r",suffix=".") ##' ## same as ##' ## m <- Missing(m,"y","r",missing.name="y.") ##' ## same as ##' ## m <- Missing(m,y.~y,"r") ##' sim(m,10) ##' ##' @export ##' @author Thomas A. Gerds Missing <- function(object,formula,Rformula,missing.name,suffix="0",...){ if (is.character(Rformula)) { indicatorname <- Rformula Rformula <- toformula(Rformula,1) } else { indicatorname <- all.vars(Rformula)[1] } if (length(all.vars(formula))==1) formula <- all.vars(formula) if (is.character(formula)) { if (missing(missing.name)) missing.name <- paste0(formula,suffix) formula <- toformula(missing.name,formula) } newf <- update(formula,paste(".~.+",indicatorname)) if (is.null(distribution(object,indicatorname)[[1]]) || length(list(...))>0) { distribution(object,indicatorname) <- binomial.lvm(...) } transform(object,newf) <- function(u){ out <- u[,1] out[u[,2]==0] <- NA out } regression(object) <- Rformula object } ##' @export "Missing<-" <- function(object,formula,...,value) { Missing(object,formula,value,...) } lava/R/devcoords.R0000644000176200001440000000253313520655354013524 0ustar liggesusers##' Returns device-coordinates and plot-region ##' ##' @title Returns device-coordinates and plot-region ##' @return A \code{list} with elements ##' \item{dev.x1}{Device: Left x-coordinate} ##' \item{dev.x2}{Device: Right x-coordinate} ##' \item{dev.y1}{Device Bottom y-coordinate} ##' \item{dev.y2}{Device Top y-coordinate} ##' \item{fig.x1}{Plot: Left x-coordinate} ##' \item{fig.x2}{Plot: Right x-coordinate} ##' \item{fig.y1}{Plot: Bottom y-coordinate} ##' \item{fig.y2}{Plot: Top y-coordinate} ##' @author Klaus K. Holst ##' @export ##' @keywords hplot `devcoords` <- function() { cc <- par("usr") ## extremes of coordinates of plotting region (x1,x2,y1,y2) plotinch <- par("pin") ## Plot dimensions (width,height) in inches margininch <- par("mai") ## Margin sizes in inches (bottom, left, top ,right) plotlenX <- cc[2]-cc[1] unitinchX <- plotlenX/plotinch[1] plotlenY <- cc[4]-cc[3] unitinchY <- plotlenY/plotinch[2] deviceXleft <- cc[1]-unitinchX*margininch[2] deviceXright <- cc[2]+unitinchX*margininch[4] deviceYtop <- cc[4]+unitinchY*margininch[3] deviceYbottom <- cc[3]-unitinchY*margininch[1] return(list(dev.x1=deviceXleft, dev.x2=deviceXright, dev.y1=deviceYbottom, dev.y2=deviceYtop, fig.x1=cc[1], fig.x2=cc[2], fig.y1=cc[3], fig.y2=cc[4])) } lava/MD50000644000176200001440000003630013520662622011513 0ustar liggesusers98312a65ffee41ed114694a8292e1e82 *DESCRIPTION 355d8fe71dd00fbd1d58b9f6a8369506 *INDEX 5e55763817c9da3b564293fa8075e643 *NAMESPACE e4b08b5a1519cfc9b8a39ab69d895037 *NEWS 9b2b3e46eeca1420e8aaaddab38a8c37 *R/By.R f9d661fb382bd48b75ef324d09a07cc9 *R/Col.R 9a51c793e1a8d12fa5af4ea0229b5017 *R/Expand.R 4b845c3061290c24f878b0a1436751aa *R/Grep.R 5ebb030e43f8fe6e69f7809dab8af236 *R/Inverse.R 89669eb70adbdca87251eeb515d85b67 *R/Missing.R 291e3a4ff6aaad4901edc8e23c1d8340 *R/NA2x.R 15d8bd15b799d10f6683a72454ffda70 *R/Objective.R 19d8bd81197b7e110bb5ab2c50533cd9 *R/addattr.R a49fe6dd862d504ca51b23b68a057586 *R/addhook.R 37cebfff08b3b61d160041c876450061 *R/addvar.R eb2978babdb24b036659249b79c24949 *R/assoc.R 7cee8bab2bd28560b4e2beb28349f32f *R/backdoor.R 72d0dd1d3c242af7f8b55c0079a375bb *R/baptize.R 9ebdb4e380ccbe24818f7097bebb35b4 *R/binomial.rrw.R c9c640ae87a1af46daf4a4fe454757b9 *R/blockdiag.R 7555abfabbd91786c53ca58f48c7329c *R/bootstrap.R 3c58f3f735e1fd45f1053a49f24e5794 *R/cancel.R f69a41842da6a838a2db5e6957a14b04 *R/categorical.R 4601abc235f465ce825a59659be5d272 *R/children.R 7beae3992487f31b3239e4baaab02713 *R/cluster.hook.R 161784bc6be5ffd0ab46d1b982654266 *R/coef.R 2cef12541c8b745f7c646cc4f1e1ca8d *R/combine.R 3ba19073c83d454508c62d3e5544e596 *R/commutation.R 1140a2c70ed2d7a528cf2f0d60c8724d *R/compare.R f066fcc36f7875747d23d761e542d69d *R/complik.R f2e7e1b57d177158660d7f7c0760ebc8 *R/confband.R 0ddb35002100d89369dbe45ee262d01f *R/confint.R 57cf6680804189d13f86910664a4d897 *R/confpred.R 0c2ecd2af4a7deb76aa75d4eb0fcf2eb *R/constrain.R 58b433ace6f2831f55b881d67b8938c1 *R/contr.R bd1f96f5ea3cda1d26d7038aa4952aab *R/correlation.R 0201e9b2ae74db60f73a2f80add55646 *R/covariance.R 73f6b448c2306dad9a3b63d58f61f207 *R/csplit.R c4590e0308dfdbf1d73020148704c953 *R/curly.R 7c18b9b7de234939085913d0449361b9 *R/cv.R 695452fcd6fedee31d75a5c462acf5bd *R/deriv.R 8e80a916289fda805bbadd7970418593 *R/describecoef.R 88e5630b0b6dc23354bee40481f42dcc *R/devcoords.R ce26b0705a3324ad453a183ad088508b *R/diagtest.R 4488b08fde8b69289c5c484b4c96fb75 *R/distribution.R d0241ddae5286772627fe28e493a40e9 *R/dsep.R c353acde15d428af574b52e7e64987b8 *R/effects.R ddb2c2b3000c091de53b27af98f10956 *R/endogenous.R 5cb0c44cd55c7aecdb7cfe5365c805fd *R/equivalence.R 2c588635fd0a937be6a4f30d1ac49dbb *R/estimate.default.R 722ecfc5aaae6223e495743e3b95db27 *R/estimate.formula.R 89c9b4ecf6c4c8a43819bd1cc99e5388 *R/estimate.list.R 458c0bc1ee5b6abafba397eba337c6a7 *R/estimate.lvm.R 709efdd391608805df247448d7e5ee7c *R/estimate.multigroup.R b442b1ba45c6d2677a71e04926c930fb *R/eventTime.R 2e2ce990d5fa7628f1853e577c94314c *R/exogenous.R 64e53188f9c7f8c41a1ff6465878dc96 *R/finalize.R 486b3562f73f39dfd3d9fb11802edab4 *R/fix.R b74eb958ff9d7869fc66bcf30392bc87 *R/fixsome.R 487d903d7b432bdc272d1f237115d925 *R/formula.R 8ee72c492a7cb557ef4d2036d5946b7d *R/fplot.R ff3abc34a7b831ef0c49bbf225b30d6b *R/frobnorm.R d59990f070e808f5bb3c9dc9d6af51d4 *R/functional.R a13d82635679bc892ac8b5791bae97cb *R/gkgamma.R 2d75d07c32eba94eb860f33a50175301 *R/glmest.R 6999881e599828388718df8a477946e8 *R/gof.R 26d5b81ada18cdcd770fde2c6651357d *R/graph.R 72705ea7346ad6cf0e3b0f2506fedb28 *R/graph2lvm.R ee3fc89201b2a99be92ff3fb1388cb92 *R/heavytail.R 6920114214ff72aa80cee1c723bc5313 *R/iid.R 0cd3ff8cff0b22c06829b3c67bf4b042 *R/img.R 0ebf084c7c07c65191709af9971e8ef8 *R/index.sem.R 6a0e1f1e717ee92c52fe06e7ad0d6f36 *R/information.R fa85806548f749dcf42bffc7f3d97ac4 *R/interactive.R 5c9c660a2be51b67f3ce12b3577ab6d5 *R/iv.R 3e085acb5bd959d3d592bf5e1de555a7 *R/kappa.R ab004cd0f2419720fa9bd755624f33a7 *R/kill.R 6595e85f1805a34772d43ea54ad6945e *R/ksmooth.R d47e3bc62bb2bbff2f2e0e56d882173a *R/labels.R 81e55bdc3a4f9bfa0b35a1938dc923ea *R/latent.R 4f66aa034902e02dc7cd8647fc515cf6 *R/lava-package.R 63ca86e7757e0dfd9159e4834b924bde *R/lisrel.R 7cfb7d83bce74003e89042f538f82895 *R/lmers.R e031aba54e107e318b2bdeedaf9e6264 *R/logLik.R 965f9148202dc588c6f333e0fb032d41 *R/logo.R 06b2dd114107e53f1c12a197f735d608 *R/lvm.R 95b4ebaed9264698b855ded3d0d87aaf *R/makemissing.R ebf64fdd61d648ffc648cee0b2ad9b03 *R/manifest.R f0f8a1ad7fc1ca97c5af77a5a275ab8f *R/matrices.R 4b05ec8fc43db25e09eafb3b9c9d3d19 *R/measurement.R 2e59175f3d95e08b511a1bc2e8ab8a9a *R/measurement.error.R 1f47c08029cbe95e6dd0dc58088f775e *R/merge.R b62f6547b3a6b9b3f84d6f0167533bb9 *R/missingMLE.R 6f8b40c9372af9c7160459cdd5d7619d *R/mixture.R 90ee09af235159a546e7d1dc11f058c4 *R/model.R 9fcafa0394ba81e4982aff0d2e65bc32 *R/model.frame.R c64df4d8651e129a99ac52e90f84739b *R/modelPar.R ca097422372ea58ae2f0b640a1cfe2a8 *R/modelVar.R 5d4ea3ef7e8cc71cca9620e6cc44dd76 *R/modelsearch.R 279c0bd9878c99331ccde2385efd29ca *R/moments.R 60eea9c6318e3f52c79df0dab0c6b0c1 *R/multigroup.R 39c0c1c5eab5f43b39bb802aa776b6bb *R/multinomial.R 562e50b5d2916e9716c4c6bc15dac812 *R/multipleinput.R 1f9bfa57aa1a9c3e875d73f20ad8104e *R/multipletesting.R 1d1ab5d45476434b7c0e2418110d8749 *R/mvnmix.R 5f3f0350fae550e3208ceeb7034ce0d5 *R/napass0.R c245590d634326ec5fc4a33922b8f62c *R/nodecolor.R 40d1a084456a87ddf29156ed99b9157e *R/nonlinear.R 8de13eaca8c619d7769e85f4dd27b235 *R/normal.R ad94a4174b4975e2353998b2c3ee84f9 *R/onload.R 70fa043e0ea0129bfdf77a4979ff5512 *R/operators.R 84574d75e4239188c7b1cdaa9cc2bf3d *R/optims.R 5a58794558c58d4c78ee123813e96277 *R/ordinal.R d7f7cdaa2f36b69aa8eedcfdd0ca3177 *R/ordreg.R ede5ff6ffb5bb2a1ec45c97e44a22de3 *R/parameter.R 73e333257401da27c72e002ce37f7051 *R/parlabels.R 58b93d0108be5bae602a7ff9fa7ec148 *R/parpos.R fb6925e5e1b024c8e486734416a9e1f2 *R/pars.R 099a9737f4a0bff97a41d5f9fca9d5ce *R/parsedesign.R aa8756bff3dbeaee91ab62b914271571 *R/partialcor.R 5e4b89fa9d945c3cf7fad384483677af *R/path.R e01bed23b281a83775d0b758b9d206cd *R/pcor.R bacb0c975e2a4b2037099f40a827c11a *R/pdfconvert.R 6a878713c0a9b8bccf459e9fb3b523cc *R/plot.R a2753b721432b8819276c795e6192aaf *R/plot.estimate.R eda451ae2e0f3a3ccb5de12f523aeae8 *R/plot.sim.R 6f96f4b84e8f98b32882b7410a4204b9 *R/plotConf.R 9a96bfe58fff6fec7ae133faf601350c *R/predict.R e8d3ad8e5b814a9c653fa568b12fe836 *R/predict.mixture.R 21b51a066882e74a5e65030b75796b11 *R/print.R 9381c9d87c8c919c4a656c0fe0fc6ac6 *R/procformula.R 3aedc59366eac10f170ea92de230da39 *R/profile.R a3b8f843649749ee06f63f1642769b57 *R/randomslope.R 0699c30dda0612ad504479cfd0081ce5 *R/regression.R a496539084a932cb8d4d2ed1543e2d59 *R/residuals.R 7c91f0e7318aaab377b5b1edc52be857 *R/revdiag.R 9bb8ecf385a051b16ee1a0a27e97f14e *R/rotation.R 1d7249f7d62b8b0a49dafcee7409f130 *R/scheffe.R aa7edd22264a294652172167e0f5087b *R/score.R 60f707d9fe306bb27717ddb92e95d714 *R/score.survreg.R 70e8e69d56165d347d3e1d1351aa6f82 *R/sim.default.R eee8a891c2becff2d00efb3312f6db99 *R/sim.lvm.R 2c9043a67901f7a8764aaccb1e67242d *R/spaghetti.R 644bbc6225442c3965cc08299a8274c4 *R/stack.R e59c04cb5df9a111073417aba8696e06 *R/startvalues.R c7ca1427cdecb0bdf6460fc1cef3ce69 *R/subgraph.R fcf92cafd2d4cb9d995837adf629d85b *R/subset.R e6bd6f93c59e17dd02b1bbc7b170bbc9 *R/summary.R dda5d57fbe303014f555098e538e9e29 *R/timedep.R deed4d83d746a5bfb5632a5153794025 *R/toformula.R 8612e2ee4584c59436ab771ca65026f7 *R/tr.R adfc5d9e0b7597d861a9678469dbae4b *R/transform.R e12fa84df7c4bffbfe3a26c4e1544280 *R/trim.R eb818de1cc52e955b390225bb2851fce *R/twostage.R 7b68c52f43ea65db0e8c6f925a5352f2 *R/utils.R 9f1708fb07673983ad7e6be149eeaac4 *R/variances.R 6532387d6ab9a50903aaaf60373a6681 *R/vars.R 3e3ec5d34770afa878c6e7c65f2f5e10 *R/vcov.R ee72f62d9ec5637e8f1b7362b24d4776 *R/vec.R 0e214ee70a78faf4678bb99216dedef2 *R/wait.R c6d12d6aeec59e858ee0b790131a0b6a *R/weights.R bd53bf3bf1a49488aaa28d43e4181f87 *R/wkm.R ef670a9b5a126aa296c23165736511be *R/wrapvec.R 55cb639b23c7383c96e56b7a0943279d *R/zcolorbar.R 86fd0e4c7fd1bb9bddfb47d7ca7e5be0 *R/zgetmplus.R 3a4ef8511d7e0e35a86c8a1d5c8653f8 *R/zgetsas.R d18d3711717119051a6c77f4610908b5 *R/zib.R c78cb9b37fbe5fe509f69274802fd345 *data/bmd.rda a2e8dd799e12b072efc7a249d90e737c *data/bmidata.rda 6fbd622666b498d794848d8c754877c5 *data/brisa.rda e47572d45f7df118cbdb4c6537abb588 *data/calcium.rda 106d52f7bcb49eb320b29536f3e5547b *data/hubble.rda b8e4ff54548ad3912de430c76856833c *data/hubble2.rda 9d5c14c6825247ef4d0ef4acfe355016 *data/indoorenv.rda 07fcbe39aa400e2d274be1055991369a *data/missingdata.rda 5a108bfe4a3c79da59ebd23eed0d92b8 *data/nldata.rda 91f0d5b7d6d148448b7e1331823348df *data/nsem.rda 159334b6a1d017ece388745c2b8d7192 *data/semdata.rda 35ed5b17c78c0e48bc527ed3ac8e5376 *data/serotonin.rda 64d6fbe9ac20be55131bf79f1be53658 *data/serotonin2.rda b240e9ba831f2c46024ad095d71d5150 *data/twindata.rda 6d29355c2d2381668b48f533289a9a56 *demo/00Index 49cbb725daeee9df4bcd3f3218d15af7 *demo/estimation.R cdefb6eb610f302036b2de243a99a517 *demo/inference.R 25f8dfc7d3b67315792b61f64f6778f0 *demo/lava.R a08f2482ed1aa2c382c8e518ecc4a779 *demo/model.R 891ffcc55aa9fc46d121187705269df5 *demo/simulation.R c5820407bd929a32983142f1a21e4fc1 *inst/CITATION 6e005bd715141b2e0e3b9e00466900cf *inst/doc/reference.pdf e5565bd9058e67350fee135e726a1203 *inst/gof1.png 353943bdc7be1e7b960a39a6f3151f3b *inst/lava1.png 142883ae5125f895240b57b6de582158 *inst/me1.png fc920f32cbf64f061c136182e3884cdb *inst/mediation1.png dd5c946311f4359b326bd6ab9d012d17 *inst/mediation2.png 7b6ffb90e95d2678700214aa78fa5f1d *man/By.Rd f8e3420d7ddf9e5b98ee8b36eb5be760 *man/Col.Rd b1af3c10bb1131414d2a2cb7bb09eb01 *man/Combine.Rd 61ae311aaca910331d64972e0f036c09 *man/Expand.Rd a6a7d1968ee9d64d3e5b993af09c04da *man/Graph.Rd 185e2bc937f72143c0cf2520e5d6c3a9 *man/Grep.Rd 57b370a192f5d4cef3ffc8db9fe8b862 *man/Missing.Rd acb160f41f5f0764e55e4031c49f3d89 *man/Model.Rd 543a3c5c8aed9bb51493b3cf1b739064 *man/NA2x.Rd 03bbb7f3bc48aa5478ec0ba688cfd021 *man/NR.Rd e3e3435c6fd3ec4aafef404473982c43 *man/PD.Rd 43b2d9cc18d196d229a196b7ae262aae *man/Range.lvm.Rd 59623c19b395b35aefffc1c10edf3057 *man/addvar.Rd 1430e3bcc89c125ad372745707156d7b *man/backdoor.Rd e7e95df43364b92a679509086c8ff05e *man/baptize.Rd b3d5092d49fe344ecfdddc2dc0159820 *man/binomial.rd.Rd 16047192af5bae954f82a50a1cd3876f *man/blockdiag.Rd 8cd1a006a4b05401f14820110650a601 *man/bmd.Rd 30af5e935e4e346a0a9ecba2ce9bf8bd *man/bmidata.Rd b399672fcaf643917b85171b48e3f544 *man/bootstrap.Rd eabd4d9e26c438d9774e3dce1d681f8b *man/bootstrap.lvm.Rd cbcdc5d58164c1d28d5faf01316c9d11 *man/brisa.Rd f2f728c0587313c8db0c1870b3229983 *man/calcium.Rd cdbe152faa7797b3292a3885e0f7ad6b *man/cancel.Rd 5be6986bee085c84684ae0f2c78a5564 *man/children.Rd 0637793104d239e30fccf961befeec89 *man/click.Rd 898bb2629d8fe24fae185b8304775de6 *man/closed.testing.Rd c2baddc8f5089c4294e27d953ed638ca *man/colorbar.Rd 723c960e50860f937f9f92dfd39a8d88 *man/commutation.Rd fc3e122d0cb13f821992aaabc69f16b3 *man/compare.Rd 75605d3fe7d14ee3a5bb37439aea94b1 *man/complik.Rd 57915e5f1a2d5b39f7a40f7c764e265b *man/confband.Rd 2d5666a6dc7cbe51c6cbea25d303d4d6 *man/confint.lvmfit.Rd c3484c5a491a90ef44674fb535a3038f *man/confpred.Rd 6cdcb59dd2b167246f615bb921f7c30e *man/constrain-set.Rd 79c29dc5b1c5773253d96d9e1b1e7460 *man/contr.Rd fa7cd815048af0283d7547d700c594ef *man/correlation.Rd 9bd9fd58066941179a2504e690b1becf *man/covariance.Rd 62dfecf6e4bed96cc9948bf8ce4db249 *man/csplit.Rd 624b34e57266db19c5a6742c2c1936c2 *man/curly.Rd 3312826657798ff6f59467021746207c *man/cv.Rd a040fa84bce00149e50204487f8261c7 *man/devcoords.Rd f93ab6183689f51995ababc766ca8491 *man/diagtest.Rd 123b7eb66e6c708638fdfed4e9d34877 *man/dsep.lvm.Rd 4b0bf5db7c0be50882c9c5fb9414b58b *man/equivalence.Rd 2f55a23b4a0665f1536a246368239409 *man/estimate.default.Rd 0a7d15f1c6a22a704357d1ec7809c704 *man/estimate.lvm.Rd 563e34cc4f17143c32f6eb9541b78bb8 *man/eventTime.Rd f85872da97bef1d3de77770ecf1e2f92 *man/fplot.Rd 78a3104a3649ba232919f3afc1941244 *man/getMplus.Rd 67f938720672c7fed9fac04ec2d7d994 *man/getSAS.Rd 154fc5f7728778ae532621052ee6bdfa *man/gof.Rd d68f6594a617404eb95003b853375b1b *man/hubble.Rd 075d9aa117a3501d1c60508d8fff039d *man/hubble2.Rd cb471dffc2cb20755317b9dcaf3b6e0c *man/iid.Rd f67f2101cbe369f34fa6b878d49a1f10 *man/images.Rd 76e0ca4ac5656534f5b33df1146ca978 *man/indoorenv.Rd 10ca8d812f2ade6d60c38a640e4205d2 *man/intercept.Rd 86906000553c1d876b173f05184cfdd5 *man/internal.Rd b16e24b9b4062574a07c7d51398e4789 *man/ksmooth2.Rd f02a7f008d8d76bd8725513679292e78 *man/labels-set.Rd 368cbcf37fa99cee77995e66d6aaafda *man/lava-package.Rd 25b001072239aab8ce12ae6f52b6ce33 *man/lava.options.Rd 39a9e86540ce34595ddae4c383481417 *man/lvm.Rd 27ab6ab711e8c82a0c1dd1a29a82bffa *man/makemissing.Rd ee9ae343f59ad6620ced7422f6f53d73 *man/measurement.error.Rd 556da354f27b0b1f4c96a27ef7fa80bc *man/missingdata.Rd d8e062b6bbdfa7de5d9c5ece24f7572a *man/mixture.Rd e6bc3582b7f3998d833ed258c3a14d68 *man/modelsearch.Rd 6d49d0357ba6da87116a599faf940434 *man/multinomial.Rd bdd49183ebc7dcbbc45542737ac66bba *man/mvnmix.Rd 24ad677894b70b2d252999291ae5246e *man/nldata.Rd cebf30fbfdd01962a81f5403c02849bd *man/nsem.Rd 875e20f9314df1bf0ca8d539d7dfb62b *man/op_concat.Rd 77c27cfed5e8d232f25194a63bd90b7e *man/op_match.Rd 6c6fa51e8661c673fdb434ec26c12d1f *man/ordinal-set.Rd 3b1ef7ffe3bb0f0f2c8723f6bfa6fa6d *man/ordreg.Rd 5201fc27fa2eef2593220b6b0f9c3351 *man/parpos.Rd 7a3b3a67afc2755df0b141c32328bb4a *man/partialcor.Rd dfaae47a37cc3c5a4009ba082f584277 *man/path.Rd 8701da0b3ed59679e48e101d01401f84 *man/pcor.Rd 808be5a530f5cc1486d5354ef88a8046 *man/pdfconvert.Rd 223b2c205a3d2a095b27c357e2bba001 *man/plot.estimate.Rd 143fa9da6a979b5c2f3c5191161d15ab *man/plot.lvm.Rd 855f4caaabb1d3736ace312c03b6c1a1 *man/plot.sim.Rd 51272dfc8f981822af7dc494c0b715e7 *man/plotConf.Rd 8bdbde4a1f919278add5c7ed51c2030b *man/predict.lvm.Rd f560ad2da032c7b4a4f264102c0928a1 *man/predictlvm.Rd 7e16cbfccddc2c13d8dbb9ff6803644e *man/regression-set.Rd 31781fbf5da9b66994663f6b0d594364 *man/revdiag.Rd 2aaaad9f2e1e799cd7b416e2dba2f43f *man/rmvar.Rd dd02aa60ac8de54f079759259312c746 *man/rotate2.Rd b37ad4a8de5eb670fe00d7a082533980 *man/scheffe.Rd 0b6322ea6b679a4e01f15d7287dff581 *man/semdata.Rd 70c657aac2d9b3f38e338bf00ae6c8dd *man/serotonin.Rd 29ed5f5ff7966b18d4d2786922816155 *man/serotonin2.Rd 87002b954ee9ea757db9b135b596928e *man/sim.Rd 31af54ebedf31e95169f6b5e18dd1b0f *man/sim.default.Rd 0621058cf58d86a561b58ffa03199d2f *man/spaghetti.Rd 005d220605f2297156939f24d7fd3ab3 *man/stack.estimate.Rd 8aa9020192187714eeb76ebc86433991 *man/subset.lvm.Rd 8dfd0a81805137c796b3b11376880a2f *man/summary.sim.Rd 74d9828f0d6b1eee3c681c8a7343f96f *man/timedep.Rd 22b6ec2f7df041c76e3df1dab09a5813 *man/toformula.Rd b6152bb0398362f381d70121e67ea59c *man/tr.Rd 95cae60f23cb8c5d6a7946eb535f9485 *man/trim.Rd 7f7648b1439f40e220ead810e924c79c *man/twindata.Rd 34a3918d7cec0f56fcc7f09993ed9cf1 *man/twostage.Rd a01802573b017c6e2044ab4536d135f5 *man/twostage.lvmfit.Rd 8bf9babc19475fb597d04a480379b0ad *man/twostageCV.Rd 5c001d81644ae00d46c03e55ae9e2a96 *man/vars.Rd 9c72a8e15757f169f1e0464fb284e778 *man/vec.Rd 881bcac8b54feb60fadb12db62d310b7 *man/wait.Rd 523b0c39f210fddc5c13fb7ade41ffaa *man/wkm.Rd 9a7e9cdea94ccf9c802cf86a5b5a5b71 *man/wrapvec.Rd 6bb11d642f624d521be6f36d1bdf7fab *man/zibreg.Rd daa9175db1f86ba2dc6b10b16523377f *tests/test-all.R 018c81107999367b520f0eb513db7f68 *tests/testthat/test-constrain.R 2af635ca762b6b2fe30b5f981d29808e *tests/testthat/test-estimate_default.R c3b68a222a25da8b305ed527c464fc9d *tests/testthat/test-graph.R 98d24e2dc238c1aea2f518066d1a9801 *tests/testthat/test-inference.R 84235322cc7ed3d374ac3ad72f7107b4 *tests/testthat/test-misc.R 6b89129caf17088dfb8d46886fbd60b2 *tests/testthat/test-model.R efbb4958f837151a69c08a56030bd02c *tests/testthat/test-multigroup.R 7ee542e680302c4ad55fc29a01f63b93 *tests/testthat/test-plot.R 2f1318a2b02490193f35be848ce380a6 *tests/testthat/test-sim.R lava/INDEX0000644000176200001440000001667713520655354012020 0ustar liggesusers* Estimation and simulation of latent variable models ** Model building addvar Add variable to (model) object adjMat Extract adjancey matrix from model/graph ancestors Extract ancestors of nodes baptize Label elements of object cancel Generic cancel method categorical Define categorical variables (predictors) children Extract children or parent elements of object constrain<- Add non-linear constraints to latent variable model covariance Add covariance structure to Latent Variable Model descendants Extract descendants of nodes describecoef Show parameter names edgeList Extract edge list from model/graph edgelabels Define labels on edges of graph eventTime Add an observed event time outcome to a latent variable model. fixsome Constrain parameters in measurement models (identifiability) functional Add non-linear associations (for simulation only) intercept Fix mean parameters in 'lvm'-object rmvar Remove variables from (model) object. labels<- Define labels of graph lvm Initialize new latent variable model makemissing Create random missing data measurement Extract measurement models merge Merge model objects (lvm, estimate, ...) multigroup Define multiple group object nodecolor Set node colours ordinal Define variables as ordinal parameter Define additional parameters of the model parpos Generic method for finding indeces of model parameters parents Extract parents of nodes path Extract pathways in model graph plot.lvm Plot path diagram Range.lvm Define range constraints of parameters regression<- Add regression association to latent variable model subset.lvm Extract subset of latent variable model timedep Time-dependent parameters transform Create non-linear parameter constraints vars Extract variable names from latent variable model ** Model inference backdoor Check backdoor criterion bootstrap Generic bootstrap method bootstrap.lvm Calculate bootstrap estimates of a lvm object closed.testing Closed testing procedure confint.lvmfit Calculate confidence limits for parameters contr Create contrast matrices compare Statistical tests Performs Likelihood-ratio, Wald and score tests complik Composite likelihood inference confpred Conformal prediction limits correlation Generic method for extracting correlation coefficients of model object cv Cross-validation function dsep Check d-separation criterion in graph equivalence Identify candidates of equivalent models effects Mediation; calculate indirect, direct and total effects estimate.default Aggregation of parameters and data estimate.lvm Estimation of parameters in a Latent Variable Model (lvm) gof Extract model summaries and GOF statistics for model object gkgamma Kruskal-Gamma for contigency tables Graph Extract graph iid Extract i.i.d. decomposition (influence function) from model object IV Instrumental variables estimator (2SLS) kappa Cohens kappa lava.options Set global options for 'lava' measurement.error Two-stage estimator for (non-linear) measurement error models Model Extract model modelsearch Model searching moments Estimate model-specific mean and variance multinomial Estimate probabilities in contingency table nonlinear Define non-linear associations (see 'twostage') ordreg Ordinal regression models partialcor Calculate partial correlations p.correct Multiple testing adjustment pcor Polychoric correlations plot.estimate Forest-plot or regression line plot predict.lvm Prediction in structural equation models profile Profile likelihood residuals Extract residuals riskcomp Calculate association measure (see also 'OR','logor','Diff','Ratio') scheffe Simulatenous confidence bands (lm) score Extract score function of model fit stack.estimate Stack estimating equations sim Simulate model sim.default Wrapper function for mclapply startvalues Starting values twostage Two-stage estimator (non-linear SEM) twostageCV Cross-validation for non-linear SEM mixture models zibreg Regression model for binomial data with unknown group of unaffected ** Utilities %++% Concatenation operator %ni% Matching operator (x not in y) oposed to the '%in%'-operator (x in y) By Apply a Function to a Data Frame Split by Factors click Identify points on plot Col Generate a transparent RGB color Combine Report estimates across different models commutation Finds the unique commutation matrix csplit Define random folds of data curly Add curly brackets to plot dsort Sort data.frame Expand Create a Data Frame from All Combinations of Factors fplot Faster plots of large data via rgl getSAS Read SAS output (ODS) getMplus Read Mplus output Inverse Generalized inverse ksmooth2 Estimate and visualize bivariate density PD Dose response calculation for binomial regression models blockdiag Combine matrices to block diagonal structure colsel Select colour(s) interactively colorbar Add colorbar to plot confband Add Confidence limits bar to plot devcoords Returns device-coordinates and plot-region diagtest Calculate diagnostic tests for 2x2 table images Organize several image calls (for visualizing categorical data) org Convert object to ascii suitable for org-mode parsedesign Create contrast matrix from expression plotConf Plot regression lines pdfconvert Convert pdf to raster format procformula Process formula revdiag Create/extract 'reverse'-diagonal matrix surface Visualize function surface offdiag Extract or set off-diagonal elements of matrix density.sim Plot sim object spaghetti Plot longitudinal data toformula Converts strings to formula tr Trace operator trim Trim tring of (leading/trailing/all) white spaces vec vec operator wrapvec Wrap vector ** Distributions Missing Add missing mechanism to model normal.lvm lognormal.lvm poisson.lvm threshold.lvm binomial.lvm Gamma.lvm loggamma.lvm chisq.lvm student.lvm uniform.lvm weibull.lvm sequence.lvm ones.lvm beta.lvm GM2.lvm GM3.lvm coxWeibull.lvm coxExponential.lvm aalenExponential.lvm coxGompertz.vlm heavytail.lvm ** Datasets bmd Longitudinal Bone Mineral Density Data (Wide format) bmidata Data brisa Simulated data calcium Longitudinal Bone Mineral Density Data hubble Hubble data hubble2 Hubble data indoorenv Data nldata Example data (nonlinear model) missingdata Data nsem Example SEM data (nonlinear) semdata Example SEM data serotonin Serotonin data serotonin2 Data twindata Twin menarche data lava/inst/0000755000176200001440000000000013520655354012162 5ustar liggesuserslava/inst/mediation2.png0000644000176200001440000007505213520655354014734 0ustar liggesusersPNG  IHDR) IDATx{|%n6͍] fAP(EB+m-QbZ>E*@6 KBB}d7&3s9;_|39|9D"PDW@@h R@T 4JP) *@@h R@T 4JP) *@@h R@T 4JP) ЂxG =z4/BZV@GP)2*0r ]`ܹ @Z*''gɒ%3Aw^QEPْ 6,ZJz/bnnWHmdnW`bܹsΝZjUnnʕ+ihhuuu^ZKRU cT׮EĀ5jw(Z~#GnݺP(4qDaoSNzV`07nLD"gΜ233-:DO$Ι3g[n]`l9rݻ#gMYe˖ 8077>uꔒ>#fy~m(o~3xիWSXv38-$ %%%FqڴikBhqЍW_EM67߼B7n[B/~ Cvv6;,׿+ZvSOt:҈-ZkdÆ 5kּcǎUx@͘17@uvNNOO4hsN YnB讻ڸqcAA1c2jF.hqoD=z4TWW#nfz/]wEp tIIɉ!?Oɔ>-[اm81!rBq秥EɆB  ^/ynBUUUE"Çs ht\(B%ΝD"ǎ #\,0A 0O>y뭷B_1G9 9r$B|SN=CӧO!##!hBC`M233zѣGO8Q]]}a"ʁ!++KF򟤺vj9Oxzi$ ?ÿ~aĜB/]^曟~iR& F.DI;HnݺׇBH$lٲ;cϞ=GZO<_ڳgOo\vǏϜ9b)S8p,ʟ3QF!V.ZMo~b|wǵY`[w x~iNN߿|oرcdh& F."p@Q<'Nlٲ%ѵTA ,?~|+"g:>x`zz:{ڵk޽I'`'Yu2d9+(jjj] )=l3P) *@@5jpҢD4  \Z$ … wAnrϟ駟{4#GRVxQntQA^dIyyYbEqq]̙HS\\l2gφajI gʡ@o۶ H vYSS#***Ν;SOtww߿~%%%J\P.,ٳ7l؀裏*++/\H_Џݻaҥ9 )AShNJ+֬Ys=bM_ioo(R //VTU%Q HԸh**F2OD+VZJ8pTTT$V>{fϞB .l2tȥ ~yyydZ h>ދ/nqiF illû3=h!d-@aŊϟ///x%#)U~О,q_D s1V將LyZט9B .\~ɓOcM>]Z]'@sq6CX>=#T!*(1-+*+U˞cxO6/zՋ|Ş0@\I&!JJJn.\cr t>CaÆ2hm2ك:x𠜬,YB:ŨQ^~evQ;qc1"2t2c$^QAgرl`0PrĞ u$˱6=iӦg?͛%gm6>Ta=ɼ".+,Ф)&t\C:h OжJ!P}=y|U~1捦!hEӭ1ݟ{Nѐ]?8uR fdJ2`Ū$S`xE *4gnjCܹsHpjmQeƴ_z*VX5ht[r)8z555<̡CNHJ! hȬ^OIm1Gcf{>}:9ɓT,ڽ-(%#M&j/#|j}8!Cng+[!옾3ےC<9Qo~QE Ixz@gp֭Zn+)ڣ#G!ʼnBEqs|WIwH @{}}:C[Eu +رQu{n.[h\19! LɆ#ıdɒ;v,X`ڴiRO,T?.ެ$d.X^ #8#t>Ix/%7 @ݻ:*-_%^1sDv01 zqvlڍLWWxz&dzZ{3o޼zr׬YH@6ZW>lnzA<'dH %XTR`UOWMT3sΕ$wfST!}Txk׮?>oΝ4 '| N<4[`„ 333;;;}رӦM H Ŵh~rx490'b1:%//ONٲe8߲Z.lCN$iݔ*p<18 \u֭+W|7[o5==3ٱPVQQz!K+!F|]jK#=sLnҥK?Sz:($F) ? DBljc1¥# TilUV͘1gӦM+..޵k`֭v{=Qk W\,mÃ~W_}U*z^AZ۸[WݘH8f>4WzW F{޹sgMM?qF3k֬2zCя444=k   B---T|2|a0fΜyСɓ'׿޲eܹsGkڏ?wq\odfO|"y_}Uzzn'h4k֬ylٲo߾g}h4~'uuus)++8p%s3v]]]A@pcccoo/+=zp8 򲲲۪`08rȜ_xWUU9l͖ vWwvv qƅB!P 8pBd̙ ZgϞ&L4hg^QQ`^YY>ǏgφtVt:}>^7LC%zqM&SFFFAAD"^ K.>}fѨhSAAAuuuff&y_m(V"3ϟWޢ$ q&mrk̂M ϒ=Ϟ={Æ P(x{-Ќ9&{V-(( " \.b6MF"M]]]˖-[|^onn ._pZZZ|x8ZǏ h4`ŋB"pV,X =z4 9bɔ~Yv뮻f9 %K3ҥK&)=== 2d2l.ϗv5lݺȑ#~d߾}Vjwy'yHd߾}ϟh4+,,5jTOO?qY,P( _|qr|>DnXt:-ZN'A`vttt\uuuo~k|MNeddHh zzzF#:A'3e[1E}K)+ H@ogNZ + e˖Yf1~*))YlٳgϜ9Ci`=z4#G9r$89p8Ka *gI2e O8OYdYfسgϾ}>5!  IDATZBLKK=ztIIIkkdx<999ӦM{srr;{lrcǎ1B;d! E=\̫ !TQQaXz#<0g}:7䤥,kݺuGꫯ)_y啎H<<6 233*ɗzHHQ0YVVV4>ͧ?F7GQd^B*/1Ǝ]T䣄=/]cn(Dqq>"^߼ysKKٳJ?,7<~7ғtb|(d R}JAN, ՙm4uFF[4pJ aCCG$;.~_OOO0 71Εt!-nù3&Ģ^4*g$ĺ5pٛ^G'?Yl `=eL^Ν;71ہQO(wrlyEzG#D!Ω)y A&fxl=1<.>QTV}1P=G]hDqS Dka*ά$|عg{i$[^}^{<g%s6 F,.xF9ލUhbۙ$_ρYf*u9äbaӍ|3hٜv˩Y2gmii'UVVRH}嗏`0<7p?r!dٜN'B \.Wvvvggg$X,cƌq:FF=77`0L0F ==]rݙ(6G?Xڔm=yY0~>О9abŞ-O |% 3@o{H@g?G3FYo3|B֭'O6mZOc;?~_c=vu!4MYYAھ}OVVVΛ7oРA?N~ֶUUUٳ̙3SNE5 !7x߹svl;233??~ʔ)oldDDТEɵ }N1 "} YԩS7`0 FJSM*!d6f3Btjva7!C$~ԩS>fyܸqyyyΝtA---'O$\{z]]]-0aBUUUff)Sȅ#NB… D H6~xɓ%%%1*!j9+p82339Eiiiii)89*Du[ޟMy$56,6[ciHV#@ӿʮz;󟢺<-+PF҉Ԍ}8 ?%滜^|)r˜S! gm{O̶g ns}dxуVoپKZ{~%r LO4BwT?Or{ap}m[J\_r~6M&f=s/z\F1] HsrO+uq]5ƧI%qb&)ρwE:!X-CH3Zb9Q)%f/<ι$RҲUg#ˡfVFĠMs c-8 gABx H(G+3:XZdƯ~b;+*zD/ {H&E!sMVOM-Lѷ.h:(',JCʫ3s3_J!Eí2Uy~s/9 9LBivOc^G5#K u ]CڏsyU ˯eO1Hl2Q chIYbDJ!~PYp!D/] t{N`I{b5)Ρ^6ž5)BRSSH ?؜I0C-߱17DH2 OIB RUhj!2)и Hr(}[ji Ɵ|OӱӉ#,Q=H/ʱ`dBQȆR91QemO MCx#13 WH&gvPNH0W9]Lݑh?߅ $ż*Ӏ$|cvҩqh9pRJ3YX ޛư#r^ H_N[3 #Jǜ֤ٲE9;?hWۢ6VQ_!%5>qc{-}h}P*4rEWXiR 4<<1_9#:$[Y~q2"Q[u¨$j_bs7W(asK2朮@G*3Ϡ!˃W\f3hu GwY aeVSEߧMl$sSe$i!w{ =qF KlCj?kq1~x2(НR>`\G5Z~#ur\N'-;%+@ Qjzƍg֬Yeee{ٲeSLzgGN8Q^^.%]>qMZqFs$TIU`ɭ߅8s3bFcE( )[(I7}TV׆CBŋ{=RsssO<貲?~dѢEp8>5o}g cj%.-b(8Gw);nΝw}Fp8ї^[/;XK n sEO y?׊J2am 嬊P$-[̘1cŊP(4lذDWr<_9(ٓ`*=H^)AϞ={Æ ߜѣG_z%C?^`2ʠ{6m(Aq?əșF&OF>(ǚ5kBNB fqxc)bz=hcĉ/3gɍxf4yyǣuPuĂ5X9Of1K88Ү_HkNS*R?wN/Q BJx-BIWg9%95ZxVWX,O.L.gϞB.O rss}뮽ZEk%(NB&cUBL Jz&/bŊ]v͙3b[Gpj% IteJHF#ImqK*]DHpuFFaP 3FߣtWOѝ9qezG,хITOY7nx ЀPG}c͚5 JPi@B@@h  ؀l  |0s б@8B*@@hACf]vedd`,7 ͳjbP<~0~YYYA`̓ Sҏ߿?I{V.ϔg 7tyH8'M@ w𪪪1c y8p`ʔ)3lnn)))guuȑ#1YYYX>??nXD|{L!Ԟ#'r-x|_uyΛ7o7|3 ?W^yo>`]]<_z{H3ĠT 4JP) *ErD!fСC gvvvIIIvv6<XZZ| 6 [!C`̳pĈZc/\==c0홈D"B*@@h R@T 4JP) *@@myfƍg֬Yeees۹sgMMM8&ܷo׭['O|a,"wW_}+OPEEŹs4 <,Yr̙7xpUrڵ+W3س<B/޿?fϞiӦ555sر3gάu۶mpUԩSgΜ6mŋqB ˍ$qO@T 4JP) *@Kº?8ѵh$"ѵG̘14ѵG3pEbϪݻw={nJtEb{Ha~ɒ%8Şـ+@{V@#&Mt}%16{C&" ] )$= W 1SYY] PD"PݯppX7%|>F4Id`0 k4^$@o۶-33kW1AHdVۚ5kVXT] ` IDATpg9{?ᮻZ|#nЌ3v5gΜUV7:6 j] ~=G#'''+++ѵH>ɤI>۷o---3gٳ_\re[[y\m(9 9yyyBRzO>Mx nF+WZbEyy1c, B`0 xr(Cp8֬YCĚ5k>L?^s͚5!2%7;={UH'=#^z%! b$CZgD_w/P؏F_vn?d$nժU ,((~g ̜0a;y >{ܸq/>|СC8@_/X`~!C g3Et{3gΜkĉիW{<9dȐo… Fq̘13PL&2~AWWWO0y͚5C YYYUUUTjPeڴiUUUcƌyG)nD"/K/D>u!% N4o!k&iڵԉjpb%MyA]< {~kw?~b뭷H%-uŊ...>t́tfnݻ瞉'ܦ̺uzɓMdɒ3gμÆ 9}K?7?0{&g ~zĈV!dXw˖-[vh\z5\XxjAzÖ-["H>ѨO~^g&Xl߾)A;;vL2e~^?s cQ{ 5ky?OR=e$iiiXrKs2x#Ht ԩSyyy0N,YX"++K͚5&##nӏ23Ji{FSSl b۶m *:zfD(;4hЕ[P؟%/Gmmm^!p )|oiه6!jLty^tw3(-h4lsPuMt-@A/rlvk@nnnnnjkkpFA>=!9ʽv\.F;OKKkkkc&.C_ůnI1h7nxjr4QQP.\cRBŋ{=@>|ٲe#˗/'R`75y5p8ڶ!ёG5Ȍg' ~gqz!Z377։{m*T((( e[fff8VV(皎KMo =-!bZ5Vmii0`Q.h((аN494gFj/iU?M8<\yyy=pBnO?4uA)0H2Gj+̀@+.lxCM&ByR\> [M&SlfjtdԼ[ &) Ӳ;;;Z<*W/IǑpAFFFFL-MrSbI=A-5B"s U u BO~ŒEssh% v l6{fl@ESl @>_;ÃSHL&H3++ae4rV*TZIZ6 ⟿!r}),KNnsWL%+[F#: x uvPvc'J/O 5s%Ҷ4C,===#233N4x$H7>D'FyyY ĉ~(@rJ^Ό.uL)@xΚ;KVn!uŒ!HlP$}f볊ujWI 4f@Ey(BiPDja&jXEUIFqh.& _O@1p:PxzV˳7 `/ 455\QGvˀ1'9ʿkP$''N}$9Z)Æ hV 6XIsssz寥[[E#//=Ҍ˾e4pdLCɋהm A|>bCAX%J2#=y/b/"Fmmmaam{:jBss3}ڨ&b, j*n;MbbahXH . Ng)AB~؂R h<;wf麾 e*UނV~Nw%j5b/*^(TPHXж@Fe:}@inna`@_hSh 444+ h{j%?!d2BHB> EdBX v1׳$#Kgg'!bؐ3=!{sq@\;!E4rss[h6,a*AK@ANu=s\6 +-`:s麾 e Ox#!}JBq),BЕМj`!qt=_m%8"@SGl-FjS+i 1\rsFNDdN#ikk.B! $&t(̽NiZ`\a@Np8,DxЖyHBA FD؇4wqDSEgEo(Ν;kjj%K9s76O{X"++K͚5ӈ#^~edS`j{jRlP+dj"B"!D(>#4mZ9B5`7Gj@/]T"&͗tDt2fP3 ===VeDG\Z BD3{/' upfn gP h@ pyH<O$A,;u!пo/]ݕb@39T,F1u>߂{5!mLH*4mX9sl߾=~޴ˢsUMVuGCD MjADEZb^ңxo??W^]TTO jNбlAφF@"gH"v&h4J;' 04G۷G}衇n>lȑ7|s+jBFhCV,`74Bl6KiGbZ~x^#W_-eAJr.(uy\ɤ |-(] sw1v Njmυp? dhNg~hQE Ik8!D.\PRROƽbɁDX"'bX/wd2IP F P8Xgi?yφLKKf4Vk{EB+Dp@xW֯_geggoڴ)uJ]I˟WLlLB탌xk䙝M_T& -@oڴ?֭[wkſNjv_Y 4s(`Y=J$QΉ }9Zl6rdu!I__~?y"[O,`ϒur'eeeuPi,UJ 8:ݻvӿm;"% ofiӰ.^Y:_Y,T+i{†,ye{{{Q,UJ ~p׿/~:vP!h4.  B~%# ox<`/Ǘ-ZhBn{}'Ox/JgQ\*(Rz&;"D%p܀_Z捲2%"ZڹkY:dGh4XF;U+@޽捲q: ~Æ Atwwtr,@ BZs"yyy=x<3yCo喣GrM=GD)233;CyQ{ $z> Knݺ3fA0DokDoŒ=>ӽ==(/U`Ϣ< #O$Z4b8CY%7!il4hӏjqYWW'p}>_Տ&YzWeЌs#7Ӊkc Zk?Y_uFR$!k$EjvtߙI%ph]VF)ӧgggo޼y:"!vNtu@3<9J{j$ۋ©0@/\pӧO_~}N\.eքq#?0alRY8"h$^CmpeZ8rM4 !TRR+la#o`  ,I=ŢwU2377ތ]>;0fN8d2ٳ!txWG(1BH.EO ] %{iDѤYYdddx[bPp̤ٴi~~x񯓚.,Cg;|;nY`bxkqEΟ=ot{{{W^}EEEϟ8qbj*Bx(hϟ2Kcv^O*, c4564};~xEE鬨8z / jtEx؅a\sӃ 2?{I1EA¡sp΀&hi陽Q@ܹs˖-F2Fںu;U3UM79+=& ol6{] es't:Y$B`0\jΝ555?ƍ=ϬY .\H?RWWw)T94HZvkyT2<Q 9!=\% ADWd^D]AɜX6{lrP(xC}~+P% Gu`F)ޜ qދFa-nϩzχ*ji,'@K*OA9rDdVX1hР-[̚5 {q%-4C g=$p tȨл2ɲZ;;.(*#7J,%tRyjQmOm8]ݽ?r"2\LM[7HtB(Pjr~MQ`e}}=F eg~? ~H#BV"簩ߜQؐ;]UJL?|:fN555?uUͣ<A%rِs9@¼B|p\.&ۻ83A7)RDɲ%ۊxI1Y!C<@l/|FCV &}" dd1lc^ɲ&ꪮ{Ù(RɮK7Ah:w9ΩyэoGEPz DNQQ{#(Q}0>u6:==}gnܸ+c=4n K 7AfͤtpYX]]b{l6|@Hunz,I+Lim݀ \A?DEn\qǂ(?ɘdyXm[,h{T*K/u>sܹ8U՜{\kg4Zw,V A?^!Rn8*P `! ~ ]-I#!\%I2VWW?]dlE4؜R'@PU5I W+?*BQXn'hGz<&K% lh|&ku=Х]~/hZY (3W8u_O'(O 1] _*8$q(]:> ͨT*)u]V_HRVIgB }:jAi}.geF\b 躞\1 +N7"<#8d2A,7'9 Y}vQ AF,ᙐs=׃RIr?`DQKD,IDAT&aPR&Iu݀}ZZ9Z 3PnPҹj|ߧ|ާаxƵA9Kdh5yLV } AoHQۑ:-rQ}j'Z"#6f]HRE`ܞn>`CX}5! )}h~:7>՚ \8ڃh.u]2p"I2-]anrb1 zCf|;tCuӜBBAD ˲$ qd@;ƧD`"X[ui9 A^DA?YЪm;!q#˭Scp AO.B&m?K.H)~"d jABUcP AU>rG <~8IBkDЋcDgsN{BH3:"v`5-VӸ&bٌ:m1Y;9:Ao9Ϋp-Oސ+ w@ԦBFo/ zɷ|"FGGnxh%U-bUd2f7@ \*K[eĐ6 cq+=A `oZ+=J7²eYa]㻌^mܐ3V_65K|2ݯ}6f^@O ¶#?9HN-yUW)TZNB!< ʂ> l^ErXJ!5KQTn ^*L$Q}>ԪsQG"):-144D|!Hޞ/zuX2/y#:NS"SJf^!A߃g?~|||uL###g̅J z 63rǝܼyѐJ}0ub2ըjV,F5Ni A>AmU KaY㎪>}c3c*ccc; 9P6Zmv91DzN c AUF B-8ٔ7_.!AQS-e]&娣O L#yok$HVF0ˆl#}pzoLњ;ba"G:H7jzhg %9iZ5gِq_n/..Y)6>m $E;U痖" ㋂Bz%p_#˅AҭFCE(ODK}/HDy晼TT"㋀z{r*:8#{酅QiMasGD"C;AFv]׍gjuu5Ed|:6>\5ﵖoWU5kuZ;i'h뺆aDR(,슣iM.|)(BHu49LeYzy({\.s~l*_|U@J FGV[BNvEEQ[ݯф;y#N FȲHfm}Q;;A/rkZwv$)WzQ8h_x0ec5~/ENV z+GyYy[ay3_L&\q?z$Ru,;(i;֕7YYY:=L4'p7Xֺdwj搣!|x0|;b[wE|V˶|&M $Avj. j4ghcazihLanުQƪ!h46(g%f1/5W7/uz+aS"RU59z%7WAglԱ-j*$˲Q$#Y\B:u=7]r[JDӱX qH!;XϲZw-KnL2;@W(Gr\,˾OLLD>.i'%Rc*k׮Z;4>$jYՀ;x8mr3Qѓ|ZF(WAeYc)+W~ _dⵯqVis!s=j'غlێՕ0J5[$Iٳgjj*&woA QDz\yANNNF Q3',&]vV1#O2(}!488l6"3(JO AkfhlYeh"CBYUO̱x ;u\;!YEQvZT c}Ju,=rkp\;mv崞>QgL2|(jϞ=w܉p~NЕҒyac!8cL|?İn#qlq/QYoǡrժ}sNc1vi%hl_8cn`g'/gz(;d2k7_'*B(x L^ߤgDz!5Eٿ[y>h'/gb3r֊ Y)Gz[l"B~T3.A?Q)2u?n fpHM}?M~vř3Lxtka~w\ׅPlVxo}<j?i(BUGڬ+Gˍ\.l nQGu>_E_gA`&>+&rA٨q 5A U\EeP1foQb~] EHA'Vy6R]ԜD$O5LaٕL& Dv(B몿|qv.+Bj 0`>s挦iǏ|uRUUwEH ʢ5?eBj+Z*J!~i&qm#A2,äc9$)|D M#a ['ɓ'_{ǝyOg>^xaMQ7P1)Z|}A$);s`˹^tm_g!U;VB&d̖iWj.A4-O m\v؞Um$_maAHcܮT7FH [ðmi٠>gL<ϟ={###9rϜ={ɆT*Jmjq?I4EQpgY,a˲LݻTUFqEQa|ZM&Iyo%2^]Iif2EQ2AxR{nATUeY֭[e ~"Ҿ},"B4 L/JTJEeYvg```~~^EEQ,EQx+e493Mu݁۶UU$u]clPWji!f:C[՗dG\psl,u=˲7}qEٶ-80(E%r/nq~+W˺&HGEQ,N7 [NZ-LNNE㖖LEQVWW%IT*djkz.IR $ITX,r\.8I˲iZ2Tp˲XR)/qfSE*4Mw[uEQ4N!$/4MWZ:1 ss޲Av;AI ?{-yիWEQl£4 <_oߊF?=}[]Nwqkj}+|R?DDoq܍!IҲ,$qxDp1ׂGBAx~7?$I]GGGsLPŋEQg}͛KT*.\ؿ{c=0 >l8%᥸iZ^^uD"aYNxd2:i$I:mƃ ض8z]l6KDV#I2JU*]D.3 eYUUt| J%x}˲ݻWWW4,0 $Ex [bYyAj:Mxrx<6}?cvR=zJcf;ol˗ggg\nTϸw^Mӆao|\ţ^C8 N4cYq0(n:v n$x0жmekpn 9$IzW*ApGe<N$IZ Y%j5J9d2OID"0p;Ӽ\.K.u;? MOOr]ɓ~aA3Ԙy|}W\b|{{7X&B(ߺukeS]lgV:Len0[{ȑ#QDzSNJ%My?{/uP 5j>>???@@@AAABBBCCCDDDEEEFFFGGGHHHIIIJJJKKKLLLMMMNNNOOOPPPQQQRRRSSSTTTUUUVVVWWWXXXYYYZZZ[[[\\\^^^___```aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrssstttuuuvvvwwwxxxyyyzzz{{{|||}}}~~~q;IDATxݍ՝,Zj[X$$b@DȃK5Yb4c$bW?;5Ͽbx86p<2 XdqȤ/g3 `bQfo.Ӷ [ݒ?o*u+)|O?ѳ65ؗA܃?󅃗ӿ9gL=yٴrwn97Pa: =|9CUOޚ?iMm`}yG{=#M,i~xxҫ (. ֯7a]}^t˧Gj[ܖ~# g:$i}zhtMt$\6xR \ok~WZu?|nTC.m rJ+ֻR3_,}E;=5w`F}e_n8pDqnmxfeE i`m;fw5d݂.k=G7 {wk⭼7gU kmZ?l:+/ͧ=o=.&wSu@֚WocިY pxݦ7rpм O&I>pt98/wn7MaY{\ BT G`-#?p.^({ ׄz\?zzj^+Xsa(N=ﶷ=|Wy`? CauO0+la~}#X=u7~Gh VNYzjkAjǿa~jNNb}urv?'R=o4Z'fX%C_7Z- 'VLV˧vܷ1'nv g. I^NhoS Žڜ= BFck:]2QD wN09{7?9ie 1ެ6č_1Q&pwx>pYܻ|[pyRLU| |_e V*^<^-V] `aڧ~mN}+8J?Op%Xԩq$Ww-EW&>iXBokw{,*zjk}\puU`Q%'U`Q,u͟T޷@N RՏ(K:V%+"վB7o޳t,zW>޳9,9_ːkZ -q-&w+Knqww+[N i5=[B\cWP+*w.gWM|*{NמW,V ?_7~{O'\ۑz!BbR{X:,$Z^nks_GRT(v;$w`}m,87!XXȷ/g`X|E6/a ,.\Xߗ+گ r@ۖ~}~h%oRe ,V}N!μwe,;-W| }=ЛUL( V;x|j=//`ZaJxyl#p̗乮juݯ}&r~AO5G'(vzH>A/Ƚlڋ\mpf+ |qEUȹ~iGL=1a^a{N>)eV]gUi;Ty_M/6I40)x* I/_~ց'.Wgx.#kI231 'f$29aLJ $ݖg58`lJ&0I+p\IR"76_b \_%k4|K 0I7~WILm,m+F&gISoq71X\K0EP"|}% |% +@L}$`ʮb_)Tr)-\bI_a:PBb;K 0e߸Svg4LUoL5C0 T)%SxK7cq3H2=0W_YJp1& ++^ qD|L.>2Lmg8 ŢWt=Lp$E^4x|0a s oɽ4܋s\\,L47q0[O|P}Fbo`)h8}`·96jzY xk&om߽ m`·y:20_Sx,">+&|g{VΙN6)5M'R͡,7| U̩SSꃹU\k #[xZj0廂8[XO=';oc6\5emM|UPerSwQ`1#!b+k\}$]5!fM=fQ͏osW"|+J|8?ԧqh+%>E Ln_ ૻ(+/rS꣸\t~m8]|g$A.6OLsA..ofks*xTn(|}Tq~m&k _c5}gpË&| 6 #8l1V},_ _.-/g'4S8j*U5]xY?gick9gL=yٴrHpSwGp9 |s5cE{^0@GRxҫ (.':4܋nQmKp1<~|ä?šYOp yh`m$K 7 VH⎀HQ^=+KVi :»8"+<%g-|x2FUx+y >~e0|#+ fhqٿ KfzD^k𶦦۷o_[[;==w޲8@ puuHKK)6.;`SSSRRR444dq"SSӂ ww/_fff,́yƚ_]]I(r\TZyǏwڕbFFFEEEEX,;;;$ewFP9V^^.H-tЁɽ{,N@ID"}:g/--ptt$rv,?n``@_|==={{Ī*"d6@geeE0ݻG(\X{ۣG HBaRRQǎIgrGuuu]](޽{WVVP ֽ{w)b)JNN|*o2k-lmmuttݻ' Igh=T_D"ѳglllHy;ZZZ޽{tVdffx2 EQ|>)%%tCPoF:Gi׮]޽>} YZCr8& ...YYYWD"Ȑ)kl6I(>zw@^({K=LLLD|RW@ xENHi;ݽ{tPȰaqnݻW[[K: mۢАt)vppxAEE,良֭[7)}...酅VNvj۷'D\n>}^xC: ;(c0|;`َ555X< LٳN:",Ύ%&&bqtS___ZZ$!!A Ǐ+ҪajjjmmP__O: E)[唕vjmgooXYYI: 2UD"IOOڈ;55tPvJ􊶜XleeE:"(11‚tP^2Qڂݻ233tP^R9Z[X={RѣGPEfff0@ HLL477755%"r7BUUť\9iiiݻw'w@QÇ{ )lkNSSt`kk(Hg#333tB:<رYBBBSS,sӧO\nǎI?iii<tP4 8ihh(**277'D.8::&%%F+yE:x...O<),,$UNII ;///''tPV9^ªp8555PT&[c]An߾t’f8iB\ իUGp<{ XQA3Ix40q 4AMP9@T4AMP9@UNG=ea3*Nàņбڛj*Bv?k=zz7rBTzb1EQc݄Ž$*xf'y5\د?5(^0<¶_h۩ރ:|Ͽ]Iw+EQ[W2Dtr-q5}zlM| EQ_3Ϛfm  EQWß\o~~+ȭ~[wRsf$|968 r*JRn/{P5zZ~ hknoҞŢl{7K9mWskx`xDzbnIe* *Y,I?\YYhKYnkbÇ^_,k(Q,ӓZ9>9ngcau Jʞ<,J- ;t˙SGZ[ᢑOzO>x?'5֋ 8>E'6=:˩n*o2֞o]?ćM1ڛۑxR%`:Zg9] t tQ9&(⨰%bɅ#ͭ5uB*ϗ\vNt8a.{]NMX__ԩӪgt d}V<{Jhh( EEE24h&ᾑMÅP=& *hr& *hr& *hBN666ߏb1q+{Xd)СÉ{T(H$@ 455xsRyƎٳ_ڵkǎ^^^eee`KHHX`c:wL:{ŭY&44Аth=TRt֭[CCCIg/Z椳@+rÇO ͛X[["}"믿ݼy3,X>]hѢӧO+dPp'C(kמ9s7{ҏbM6M( k ѣ'NxdϞ=n:|0%"##?N=z4,,Sr([q8Y6i$mmm//'O꒎h6nXPPWN7h#GQFÅ5e˖{*m߼vy[YYA(D秧~Y"==}ԩ,@QX$D"ї_~٣G͛v:{;wQ>>>...k׮%q,,,Μ9w]zt9WVV6nܸEyyyPFFFΝmll1b8J rXNN΄ n:`Ywԩ(/TJII3g޵kWYC_h,J #޽"O4YX> -[viMK5o;-H-ZY >rHXX'Emٲ%--~SQ rɮ]ST߿?***88]v( TXvmaa/frt?~<88ĉ۷'@)ંH$}*6Okiiy{{萎P9L׼?@:1b 333#@ 555?gϞO3Y0WmmɓǍG:ʚ2eʾ}IgPX0TEEܹs7 7o^||<, &***;v5k<==IgQ.eee˖-2d, ?00(]]ݳgWUU;tEYL8q޽z"Ey׏?gʔ)(T͜9ȑ#666(;@0}>},\tŁa?~cǎEQD"YdXc obm߾]GGgbtE!3gLMMIg-]vڴiBt k]t?Oxx,^G9s̱cIgcN:믿a'c;{}BCC555IgWx.cǎ:u k |>̘1%TxbHH,ܮ]7V5w֭C-(55uΜ9S-ʡۖ-[ۇ{ʯI&۷gϞGիWG5KKci?|4H$~P(ܾ};^&LLLΜ9?FEE 7p/dѢE<[(m4Dg϶[d , }'N1bČ3Hg`:Tl )S}WS0*GǏ=uTY@bW_}kd/?~/fΜK: ЄbaiAH_EE7|3fYnK.ׯcc#,{9RVXXO?8q/;xˑϟܹo߾I&Lhv:,, /;x #5n8pnEQFRWW=zɓ'H`Tt<~x̙𚧧wHH8a$$$̚5+$$}oqqqٽ{ϳgHg "sssr?gҸ˗]>SL{$;;- #/\pϟ755uQ9,MZz-,,uo۶ rss'L/888.\qѨpT}ׯomr """:e&V,x<# v9˅Y yuُj?~ɒ%<ѣ!!!!!!<˗/m֯_ѨUblhTh|ӐQRoРh rJi9ir[wϬOx|J[K>}vhJJJ7nhr5HLGϬOHSglڴiذaR ^9[l <IJ+Pձ'w-#'A M;Km4R$&DǼ]vHҥsՉ'v$i*xVvKAq .XXX, ƍU5D@V 쾗u/ϟ?߹sg-rf_Ctl7rWƯ\|9 2iҤ3`KhL]un4ߥVt-hSVVd7qԗ&l6׈lx-qbPEUUU9::3״y1$=Dl|kPSPP`ooTCK$TDPA-==z|2TH z|U#55UES(NTTTX[[O٠ۭ/`? ?x` sVk E)5SSS fxTYCD"`AݻPo. ԩ/yUu Ot^߆ۮ orux|à_4Z)qVTIIIkkh$SPP0xL%l~~Xws6CQH(iUUՇ? Kr?˱,k$m1宮SLLL u xvn\*}CQԃk}ʡB+իW_+)X,ZgAn6l[Gsn%D>ϤI"bNݻW*Gh Eٳg=htņwniiI:mڙkZb):ՙ,sbKOlmmr4{9=z=]ּ=#O-,((T唙9i5ɿ aǜ|G: Br(?mBi<ł/_QZ~xpYM]UsΡo!r(o<]X-h$Hp`iUL0w\_@l4VX̡ ^"ퟹ*ͧ}\9oۺ#>>>4iժUavIېјQlvTWW zPثW/I*?cܹ2E%bꉢ\HIA.?~|Μ9Srk4JkaǷ뻸hhhp8MMM׭[B(-<==u:]h&0ׅxazܿ*((lߡ`b3]#َf}9{g͚m66[q.\Śhnn1mt a_T dq_WfY^ŋ7N&&&s|' 9P ӋջN_reݺuzzzvIZJ1g9oڰaî],EFug[f上eR `ӦM۷op׭/ihlj'U_(ٴi_7o۷رc SN-..޻w/+,**jݺu)))=sӴuhƌiUYlҵkWvյk&''vi묡mвX13џUY)WWM6cf1bEQ.\;wD"9zTk/^޽nP[3qy|WeEHP+ Xnnnwpp M:x۷-zRFMF.K"4*7槱Ν//++9rdLLLvt޽V ԩ$rޔÜz|>300ի.鰠bbb_d.kggmll[#G/Z/XҥK&M?R`iӦq6NGbѣG嗙3gΚ5 KڀDnnnjjj]tщv?|xrrN f)++9r̙3cbbd}+##c͚5bx֭2=(-r\reedd4dȐÇ]t9qĒ%KfϞl2Cd EEEÆ (%K8qttt6lؾ}B!=%`==׿mI[W__A۩Ar}V.dɒߺu=>>`Pbں [l޼AAAdÛ39I;vxСwΞ=GׯҢ?`0`.Fm ЧOhOO>lΝXY ob,MFy󦺺{XX8 gP9 eeeennN:;Ι3ܹs1bDbb"D 7p/:t`ddD:{5,x+x<^@@Pt0D"!b탬CCCiӦ-[t"`4TCE4۷oTT8 H'B0X,fԊb|}}ccc]]]/^H:0 he#G׸\EΟ?9|҉Y|rܹ3==}|>L^t,?u588gڵ yr<ry ++9rϷmP9 CQ͞2eիW322<==o߾M:`(y>pC7R+',`(弩Cw6VN3Qiֻw77ƣP vaFu \]]/\@:A4\SYk£rJ*GG/_nhhaCCCҡ@&0`(]>}?ǣJ4|Qigll~Iq@ʔn@ 幰ʕ+))) 'P9yMCCcڵGPPnn.D X>PJ^9:tYf9;;\P9ӧÆ [G4Cr+W7/1 ZvxK.=vXpp1c={F: 0f9cjj_wmݺuڤCG`(Tοsrr4h+ I'À`(T.]D:|4Cr>]dIDDD\\ÇI'½ݼysffիHg(,lmmO81e @ i57nXYY 8ȑ#t" 4CrڂfO2%666++?$( Xk߾}ޠ!Ha@0HKޠ .>}5kjkkI'R^Dt 1`[[[Cb҉*paMX,ĉccc =<<]F:`(T/]4,,Æ KKK#HQPBȔΝ;Җ/_nllqFҡ4C^ w~Y__߱cnٲt"`(rhyU0q4:gΜ1)&`&wwX)Qffuuu7l`llL:B,ڞ4(((11qܹCr>STTmT@VCGG H:| GAW^]__e˖N:hX>&;vQQQ 2dΝ;B!DLQFݸqO?+ r rZYYY```JJƍ ;F:{9;;{xxs.TCr@ff+RSS-{v$]"HAQQ=Y 3fHYӼK}u$^l *hr& *hr&&H$,t `?3;'{3q'YO0UKrH$q8)A>͙B=RlCn1v5h-`"Xf'/iݧIu1U^1 D?i7q:*ͳ_UfdU&B;vJ'fH rqZ D$˴(J5%50x]:vszk;b&B?e:N?ճcWmk6Q9LʁH$/kY GoHjTr-_fϴ՚(6gO^铎r6)Px n o~Iir DPH&BBB0**P9P9L$ (i&ŘA0.BB0**P9 rxeE:D"%zj55pcccq*0aҵkfggo޼Ύt`", qݧON:&""""DFJBB{E:A0.磝 LYjjjmvڵp/0ˡG󣝃E*|x61 P92|rSN鑎DP9L$ qaM^|nݺܟG(i&,Gv}5k 4t%{9Lʑ"X6dWo,paMZbbbV^N:Ø`"TNeddY?3 4rڢt[lٳ'87i&BN}};.^z!Co&B崔D"9v옻k7̄1 D}U455Iǁ˜`"TGzɪU$ɯjaaA:|4r>,000%%% t(D!8|A0*}"""\]]ccc}}}Iǁ`"T?ݽ{wŊxE\Ø`"TΛ?zz"ZaL0^Dݬ|˖-w OHǁ½&*5966}P98~iyyիWF@0Ƚ{/_޽{˗/kkkRF߰aCaaaPP8  f۶m׮]۸q+8 C <*66}P9@FDD[mm͛7LEznIIIk"""tuuIr>/^X~}aa={Ir{쉌\vI2p/dK,9r]GG'::}P9 Cu9s7GDzz|ɓ'HF@lذ!33300ޞt`TD"9}sA]]󟨨kO:0*qr^0ڛx">*q\9у.//+pa q\.owފ+"""hhT0rW\diiI:1T0Z]]ݻ/_n:l m{9ÄYX,n~SGGի rxƮ^zA0 k͏v 8MMMWG;'Jr k`Ν#Gtuux"d8tVNttGCCCllȑ#9)(-\X`@@C大/[ɓ>` Y)++[n]ffmz!Gv6^^^CthƒH$3Ƚ:Zcc#ŒbL>7""bƍ&L7o *@ = IyϞ$ٳʕ+H'兟t'nݘx;yAAAxk'{9fo P9@T4AMP9@T4AM#T7N.mF|T#W]oK kxZ`$C0*dzCEB|v7' @p/qw$'/6@PtiB0˽kg~Mb}OIg2TC]_cH*)ϋooa }84d6Wńkx?2z}b(w-)( eF{ZAQT򟯒|6A(r{FwI-r& *hr& *hr& *hm=jN HG K" \B:{9{wlS r&4AMP9@T4AMP9@RŸj`IENDB`lava/inst/doc/0000755000176200001440000000000013520655354012727 5ustar liggesuserslava/inst/doc/reference.pdf0000644000176200001440000014241613520655365015372 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 2748 /Filter /FlateDecode /N 47 /First 357 >> stream xZmO~|ۮv<ؾZU BW` qj;,_ljvUq{9s^fa.SLt r=0,dAppLhPȤexaJL>}J:h2%]Qi+fy* |O2&QP(}}\`Nք* Ţ4²04 |( XrEA@6"<$ % VWLy,iC}eKф"DET?b7/?1{(w;d/"Ig٫#B'pt]׎q~}Y/ث tYNMώ}8K JjbV0d?[޾U.j2+Y3)W6'I>9} x P7j~%Ql𼠵@ݓK f<.3?`:S.߽Ϭ4{ @qU{=Z$c3 <=z7lN_,O,/ (;KBAA9hYE|qSiL9dR euߪUci~YHS"qGNmhLh`WNo*?[YtLccUVJ;d3ƨɇSmv$!ڗ"~d_QMj޶Tx}tsΓn:#:k$"a)՞JE!12@Azퟆс bВ81F0!Ä6\4w{]-%fi7ADA*, 0f N hXOY Z@)95|J6 Mc)50]|Cc>'O|/~ů>[~}< g'3o)A<Βt3| OuHiȬ΂7f,fl2 M: S@@[]m#vuz'ns@.VnU}D 4g"]VeNG/NgMgncB"fVHDmoCQuR˜"$)$#"d{\ -K%*1cXDSG3]d mKYDi熧4C~vʹW0>|EVn,$j5hf(ad$5,E>llgn'gm~ JQ(*l35_WN|4W\?kC@Έ)s; X;ۚ+=|`ᑕP,5fhWf&'dTN=;|ka4m224Q׊Eo'M;e-9ʎTǍ疗,."2c,ԙ iCvuF OHRaqJ|7w JD+ӮAА7Ya m ɎQe_Yn%mTUNOڛYIyFc [ЭU[=%m:LhIr|wHsmچ4$Mu%t`Az?ֶ[{ !r)i yb.ʙ vےm5nO mm+юlOq&|{GXҜםx{_>NѽZ03Wn>2нfόP3vON`f`-{7tPTn>ܤӺ\g_Y@e;R5X78B#3+E|ټn !\ⶭ-[CøûKmWϒߩ6Y3VlưdX4U_ݚ8U3ڙ [ƔT_fx & L^gۄ$^ہ~I#&/-e~xSF> stream 2019-08-01T23:34:45+02:00 2019-08-01T23:34:45+02:00 TeX Untitled endstream endobj 50 0 obj << /Filter /FlateDecode /Length 9211 >> stream x=vFvZ$͠ψ0 gd#k&'ǚE R=nvLy1ߓ̽p hH#'9^ U?e);7O>yaήO(tV]? ]ޜ}gLBj~I5 ;Ҥ,gdin˛'?$Ź0:YoeɛwX*q'B+\ |Y4, ~4c0>i[0A~>}Ve\|F&Tr–*=<n1|ƃkd>/ npQ'O^%->PRa~f@BAaKG"['Iy}P[')jW Swg Q߮9|>HTd=g7ձzCJ?dԧD~b]AޒliwH._ody[ yٝ4;k 3`܄ `c:NAC~ s8[h&Ov75ج1EjKwg8$~mUEepGJW&BlHiC=}C# =`/91iyKN@~T"S(&\*0/Dm~)f)ItQI2|/{]M 17 8a#a)h3SO`qI$a*Ukxɓ3ʦB0 r6~ihHeś A11v)ސ.uȲ.5 b ;v=wX?mdh)N⩙tmN'5hs EE9HBX^%hdHq6·S?ZLۄ0tN*9CtlpxJ{,V@ApI/ dEҜmceAi`6w%.Jtho"eZ;w}&=<_8V:N!6$S+蜥,3 6{v?YX \rkꡞ_ZMXjS8s"0@"4"Q+wB6)JHSO@gH/MroW|瓷&9FHCqH7v22̠g4Oq'iuN2xXӣZy \+T_?fٱbTl~<p\Sx cn}cW%S_d`r^bXo*ٸCšeHaM2j9q -}<~?W>=`Z67,5i/l980 ,Cr`* ,{q.00WP Z1J:,r>!֞;A22hș}R HL1G9GVpx:wXVی)%fVt\ER6f.FdtBWA.h|/@L=p-\ =ޡ"B6)ɕ"e=:#&Q55ЧS yc@1%zPV00ɦR(l:6wq똠.XUbX ?zXoy$ \'-AQ߶Vl?Yn,nfkk/ۄz A5sDHL4\*Up #ZV/l*$-+5ےXsB5r_V3# TcFwK e-yygau! aZ:Ѡ0{Jt@UjjU,/,U>-5lwRLR`Zbߵɹ`5y&P-oh0f쮪^Ɋ˓u΃ |[LVumƖf!3dkÿA|~Z.40RmZ'f?Lo\,S%s`Ry]`2DЙ%3\ѡȦ~,[w`,d* z#P-+\Pu)6܂d~$#^8v֖6\2SSK XDA 7ڗRjD0¯GeՌEb1hޘRv:sF?qL ÛiP`9`~-M"ܚ?Xy=nuq7)NIy4u*GhӘD5q^Zͻ^`x'wΔIsP}?zO4 HE܄U QnZq&_8ڍ*vާ ]@hCրP=Ȍ94SqƢ( c0ߍmQg-ۊ: h*f4᳸CW(T^^ rg| )βO? @3s@ϣVj|5_5j"mdFoX@Y_;z mD~ PcE&h8TmE^0`ӥ㨹6֭ٵY2OqvrDqV\ s5rh6}lv bNioO JRp$-hj{z3Nۤʦ 3XW G2.A`>cZ< c4ŲMzwi#tAAL&cCyڅ!#lUo{Tu,7>8PޒJ?-j<(6{',o;]%]k.=[\h*隗 d*U{iͶ?rYu>l/O|*infR J-Y¥4؄vޛ,,R۷EΑo=FraQ#`ٝ&5sSgE@7QݳZp//~Ŕ۶J XdJv"Q+;&Mdt/r7oݲe5(zՒu֋-. عґ#`kB_P `cޔ7-<~gwwюSL'oB xnit,?y<`*s.fqBw6Z]9IzfC69VJ6md "#ﬗjLkoh*F qb(>֒'nc6 boZOwިHw怸JMG ;5}R۟6 D3vwoИشBլWjnlܲ˼:ɶ(`L=7ScGK9t޻/sDLG3Ѧe6wPE}]2 mHZғ֤߂L!IYUw<bP"hlk7 薀/VrB.BbyqW=vWȼQJe<(O$MzBhk>YBs騏BPDPn5, ?<2( "SQjq˼O*aU&&,R-% IW9!qe>oamwsO3Y`ozfo%yz^cd&sExu2,S,4f[ ۗfćz[YM--֐H_[zPMˀt?%8ɛ`z *Pa#yb 2ֽ!Pg4?$.v32|q׀0+R=І*dz;:(Ze\0*l&dlyO26 JdIT^ʕQ[qA2lUn[*bcRwC#S&1j=tva0%BT0V`wfeߡm#{ e9l9zrubCUߵ߰wv;sg f+N pR);\)lvj-ycʈܦ2%Tr-SYz ]V!O]/ ,_f*Ew0"yQBa]1AAWI]P*ư4 'E;&/T<- 3=u3®Сr| 4,9nP.`.D33D _~~xGug.C CnS UoW5cB27;K3ٵ&tӭYy4a}4qWɪZoc7Ͽ<>x;q]z]WEo 8bEՈI{e/hGv!tx=D?;P6[Ɂ0gAnrw.zq:zҨ!2u1aR ^Ʌ:;fT%־lT+]42A"hS:E& h0dU)ܜӃOA$/x=#+_Qт0 Dop>2h~js~PUft#,-)R5Geœ҃r~ۑKܽxF?{f޼<ܓ$D@τ+d*0_iN񘚦Q ^Kfn 's)l#䱜}˜fMn+{.F>4"2to*m3Sem'm@q)߸>T"eo}˽bѾ:_He*ݵυ܀:6d^Nra"ޕmMbi0JN+pIGl->k:K}{F}qqrinV}a}v]wL.I֗KRWtΦ3j)Jq<ߟ0\ZkM]uտ> stream xcd`ab`dddw 141M~H3a!cڏגּ N >~Q7aaYcb9K[ *23J45 --u ,sS2|K2RsKԒJ +}rbt;M̒ ԢT[ s~nAiIjo~JjQsYbC8/w , u~L1mG`aSt)e+Vtfb-[=S~ o~3|ƺl{^ro=ף|8AK'ju̫Y8qM == z޽{myU._>9a~Y{;7vs,^ tGawmayZy%eIU1 ;<G4e^zDWN͔V[tRi͇?6s)ty8_SOg]9(n9.|Σ<> stream xXy\Sg־ru!hR " .Md',"@_vŽK*Vkhm3Uթm߹K7Ug~&޼99yN,(a(ȨP9+"M aH˘,|bOگ(eD1fi K LLgpedTRLhpHrFLՖ-NDMef+|i }RI -Q젚A[3իrYI<wQ閮S="%JVHT;t[:E4` `,?(ŮKfaG80 \e9GEaˤ`{lcK-XzUؒu~%N7vYpҌRSո.X)c.0Y,D,aPsq}yc4v֔\,"&KdeDˢnT<6c)`eVvH'.]b00X8D)rB51 [\uFmK| 6Md0J(LvE-Bq *=fX`-9i%_fմN0Dbǁ9 U$cࡗоgVh3LP},!M}aeElhэg(r֠:ZdG/%p_ic}5޸Sf%Nq&X*y8 |6f<>3 ͳ^]o-9 /}L#^"2PEr V)oEjb߄hXqqs'Tpbv`-|q>FjG0(R0%WWe6+2+3P=//**G ȣaQi[H.%C69ucqeĸG/_-h4Я3Ӵ1d1fd2,4' L ԡmR}}<ѽK@wCb]Q'{h\]/:n)2zϲ-dR+.ocr ]RSHV_x?c v?Kui'6i{ߘݤ秀x#n>/蹟p}]WZU9HE7̘ҷĥjfU?o34U/wjo^ U ɶw$4% ď;od"9aW0bSEDp5sjm㣤%NV5#رdOf&$vakQQ'mmlWD Zͩ '+uW7Qi<\N^G5_/Nh?#^h]}ۮ}ׯmjm;|PYW>1ؗcr+Ғe~+rd߿~ c;_f ՏWTxS-LЙQ©6DE+]N=#5{)ANsBrC2RrCD[fHMzun,` TxT̢Zs\ubejbRWo~]$؆J# B',C.wtFXus^>m 8CN,a&I0*, PN;8`{^]{kzڲwALD3xe,|D?haӺ{?,['K_@  )`mLԀU- 4DZVyxb^KE8[ XU@>!&٢'+D`lym3DsSMr].qqyYRyaĞ:aXz%~C,1#GPAn]m"LH,m.);04zBAJ,("5Wg5Sێ^D_={ܵd1k q637@A|)x0e%< 7IWUU!ba30xƉǺ]au*L<;ovxgч>f(OlΞ,y§Xl;6rVKЧ,o,Lᩐ,5x᪈w1gR2WyF8;{tl.,SNjN v*Me=$98S"K<1 /Mo1d% vyxB]peat:=ߴ fw|cg Hs l*~/OF ^BO#̤w /R}5KBJ^~&9b?Iɥi#).KSHu5&wʳn&A֓WD_lyTwޠixL"GC =[k [^[_wo"ΞNM!~Z|`^ `S9RXl9\G* ~{#ǂ~Q;\--EKop0 ^)\ =#kf\Em +DhÉ-Q'4kI:h+E-v0o-O*Ng̱17ž{4YUM&2%FBD'l?1qu7[1vq} b^7f k"F/3 Z Z^ Ll0NDϸ@ x ? %aX룪>L1QB}&Ce/kOOʪi<>*5 ޼]c痿 9 -,E ݹ.t5{Xj3**ε4c3e@sIh ZmWTBem `SRs`1po0~\MjN,'&ey8mrhI᫄X@㱃UD8J1o,ɴEEb6̜Wg782y} i4 aUPZxmcDp* ܱJn֒]L+9t1cWP\˫DQ@xlSd 8 Z- Z4C<E/ )\Bmx2  p"SLSHٰP(<E{ "P&ٰf@aCP-7!#J2:W:Xh-,10{|¢Q FQG(@jWendstream endobj 53 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2221 >> stream x}TyTSW~!&B g\Fq*W(n0O `"H1"(H0. /Neδc_?;|."x<(VIUTux4y|}mW`81pf:tU|ǻqX|_l0h D=Fʛ:KU$&QF3!SL˝SCrMdl^Ѧj2=+bfTL'ID:V N(t 'ZFhDe^+)2h}Y1 5Ubb -FuL-jhzzzquZE9ZX bx{ZQsʴ,^˲ F&Sd *cFDzL^7$i=σ =A%B`LV%,\H*8$("XB3Y8""fsBb1'"h\m??~) :dG> B3|k7ů)'\WC Gt7+Mr!U? CvvJ2քk@:(p]BV82^ 1 0WR"Zru=V=N .qn_T ">lKJ?9!'iڥX LNS:k09!v2j] C1l%$ӎ)-(P_@C@K2}m 4/Rcy&.1 Dэܼ'Aw|4%/ծ])ݷ qfB-ƛ1Ĵ̀w㷫B0Ph?v| tTRQQzvx(`Ӻ:k Y PvӚ=t"wfx>lC[b L֯o81\S )Ե~ +7>=qNljuBP^ʚ ]W o-haPSh˧ BI+xLt{eE'#pj=CD/P4ΗPWk;7j'Tp*| 3qƌQq|8bpS67BJhS,2T@LJ,?wmBRɴN_~@mnkk8/QgEb':xd W[Gᡪϣx,"ŖwˋEp7h>ݯu$kAvUl6Lƍdi}Tв梡rl;`,{pœqB!'cW/2_8΃mx\w1i2#WʗX<g۝Co1$sذ7Ti)+ BeƾcRx}/6/X r4'Yl N^ _+OH8]@ \pB-]N ͅcĉ+c#€pvůJ@qM ?9phm8Ԑ. 2Lb浮 _ߊ{ًi)o_w;!|! :f]e(''fE yhrs#}MG>HEK9hMud:B?e5pORZrUژK^'nqzIpu&R+O ZۢʆkB걫ӃC+@ő-:ضo)ZWSR6D+гtɇĽK6sw"MpWzbf[#߳5i&ᰮEdqsKQ;F~Hnp |{/=PYz^SIeliWLG@\(IbǴ|WRII@UQSahy; .[ws߫'}Ϟ؄Tendstream endobj 54 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 425 >> stream xcd`ab`ddds H3a!cޏגּ N >~Q7aaaYCN{, ̌yME% ɚ : F Eɉy %%@NBp~rfjIMFII~yy^bn^~QByfIBPjqjQYj[~^_bnصz`9?$H7?%(1e͏|?6 ==_tuUqݿfCVSН{?w>Y˺W)쮖/&]+ Vtt'uug͞z]7enj,oL)޵tĦruukqX'=q)z| ~8O0wD3\WX*y8\e`fendstream endobj 55 0 obj << /Filter /FlateDecode /Length 187 >> stream x]1 EwN HM%]2^o MUuxHoa> stream xk`wԚN=i C+2x{E{Y&o WzPtM '!x"/E 9z&FÇz-\x2ݸ?Cskex:G?^p/յ6#KeRT.rX!7tjkAM֦zJ4̖FY,]m3f]QU M) 4&wCt2 F֛:%'g '^5ue&usچiQct(C$#d=.w_ Cx{ӷ'}V-OcB Z ;>c)GECE?S; * &b"j"%Rb7<6x١c=3a#1HVp2N'RV+eg76"~yendstream endobj 57 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5379 >> stream xX XS׶>1$jTmjEE[: Se& B02! FQjZS[ֱj]'׷־{}gãzP<:2*a"86Oe qo_5>3Djxkؚ؏ K-[ 9&DF%BCrc&M44F>=0_!_VD*"UQ*?"p|zx|eQ|"FXK#WC*\%U@ZS#UCrSjy[&1r?e(GdJ*TCΏ!sbd4<4@Q{XV?WP\O/4?\,Q(!juQԱ##U^;/\Iӕ3"]>;W7?ABkb"hQp%=—F,9jv{p!Bj(zrޥSè%AyRQK2j95A\(j%5IfQ1j.5G9RʕrܨT/ՏX-ޠzR7%&)+j-1/Y Jz,TYVIO/3+fOߞ]o}RazOHgd#]'ZGY?`lNP^oX6 f\/ #oG(Z 2,3;0ۆf0 8vp>{h<ק,deFdgJ*>5z ^RԦeJ3J: BvOo1B2rNgw!.ŏ<*]{&<- 4Zsђ3GztdK #lQS_7UdJ\4fh ~Ӟٰpxxdөc&f5w2Pmaԍ n;t-aiDΰJ`kRx5e'e+';jj.d}!_J1qLO vsq<O6a{0&L' \$ㄥX4ruJEiI&[q&vna{G}+G}}9燎GX&Ot}+aKRpF]!A  O$p({Uvr8&nj'炙PJn.QMNiaֶ=-4ll,m,rf,Z @ZL @%F)7ˑ!ejb`0'|2y3Weɗ"f /d`eÆ{ (}&288&_bw31[:•ptХCG )owbl ň[6wrS*%4%HE }EHxxGCcxs>zCw8bHzn؍k*4g ˫dyk (s7Y7d]1Y(Z "j,^*h 3*AN@91 !\3.>7^PƁmU%3&%$FNj [x: #=4xe維kRز608"}L]!PFA_F%w2(99eWs0GtCNvG!0h]2ő3;쬏~om; zu"o)4cY}bG֞R< ld7Javbq$7Ϟq˳zt4x1hG#)wp[*nbVnZaHӡmFC̺LU_)Zdn&9F3?]1F/8⡣y7U dфx)~F.8n[D`_;΁NcLXEfjש>V.Cﮫ24φ\űRBUE "xfQ|E\n7ީT7JJYg#vjbXld>-eflwWϟ]muB7IܱNIZUNA~_Œڹ[̺m/oxyiTצgBt{ h`l6†]>rnSfŃZe!=~la+fwcV;jV*9Ml^?V貒3$ɋp,`tB A`Á **Roʶ%6> )x~LJ0Tqcs'ԝʴ |h/^|pGd %ݔiSb%tNHHGF "n.L>G&xrgKo腷81s}N(!H{U>_uÞn8|yM9MI7gtR%jE_Q6#V(]zuy{3 !Z"rRLny'Yg6R-ͦ/I֑Lx·Vq-ęB&{ }//(nor1o2v_X{0AJJU'.L7buǂVhT(%WN*lRqⰍGՂ#ⲱ 5%Fk-LS]W ~B؆D+N9&- O+ U 阏g^ON^(tW|<HO&2NH.L5"L+ j"aXe3#mYW㞻.?XcJC-Esh(MQ`_--PDnşо+ou".x(\U]qO-p(xU{رR hE5rٖv:Hy8ʣċ_(0o[LG{jJBZZFG|׺ĸ 70*1K PR ,tI26oʑU sY+nM6%VȮl>P t7$ x{֑3CHj#a0OzZV"*Ǣ-mkViAOm˩Om fMp%fkY kkOQ[|)^eb#kpjϦN83xV~q c/Hʻ6Y6ҭ_{Nع= D>حI9Pq*:8Fqk)K+f[aId&C$q=tx7H?8ܣ(~c,dN͒=SŽץoQa, ]1U)7l; Ye}7؁=upV*iP 8Zcּb=a&nMC4r'jܒ1..غ7I0U寵#91lŏ·.BxbK0t&.ۅ%3l!:dd.iKk֩[ K 7%߻)~ݗg==cƠzݗutD,|뉩|]nQqzΚ55%wP^ }ա -e > 'A[b96lcBw*?+ؼީd|oy\}Xb_h%~qDMN/I l7#(f$j kz?7,"H/jٻb-Z^L+/kS7s GFLZ"ٯ>}^V45 q0U߅wmحn7LԯH҃b'fL![\ iBu!^|`JАW?hisr̊E[jn W0;$ O%GCx6iO~s=*ml>V{2@e k>HRhkm.1.P j"웧1~qIb2 qпHM|~ Pib{석Q`NT d&+7|.lőϸ$>V Fo4qYhӚi#'se˼rυpEl$T@k4GRÃŬ_ OnH@2°?wo#{ ;̉X+m.\|tu5D$h4(H1`F?5s|pAoOmbjh#v#>.idнd _Y./Wݣ&0*\A7xCv'oGw3 ^>wFl35(,-X\mc"Y)X5X޾DmK s--q4uamB;S}p*iκ PZ׵ )$ OXf3 IFzsQ@ηe ]'D[~w tK, s(< G䒓H]%7=RzAQ endstream endobj 58 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4963 >> stream xXyTSֿ1psEjujUQUPk:*0@d0AAhZN:Ծiwn{z A|ceAκ{ox㑾۶-~{{ǽ8A\cWBmRMN/Ny0tIJ _Il4",\5Cׂ7\2kozĈ <\$LJh$ߗteYɀ %8γ{ \U$$ ꬊ g=‡HJ;8lV?;t 9}=@ :n$DrϾnu95^4:-Gn ]L"0`cuL4y$zta!?"1 \ MoXgo\<ʟ= w 6r;x2 $tY<'1h<3w@f{vCpP] < Y"yp>CHCۏ]HKf5 =@*e[mzLrςVO܎HxyO-pk l?k" 9ݓYCڃێfXnsk8A0sҲZ!ty/ cTlƭ4 ,>U09չb@4](Hڂ2t9PqNS!~VRb]2p7?yPetQ܎X4Cf^`tCP|=uf@yz_3n`iR5[(6 SVo"-, {3z*zG~?"qOKm=*gyyp9 lhW-$fRlL7H"k #.]-0a"@styЃ IW lwI3]Ff_$̜ c4륟s:B{6 j?N0̼FNn5G7;Āc$ϦϜvam&Rdi&PE r 7< 2u9kȼƔ>9ա@r);yɔ O8B^@wEu&yV4VpЦGVW,LȬyg,PS ;;n)Lޱuut (j+uPncR4Ƨ_j-d ѽy'4hR(Pͮ I O Ѩ%  )-kPJC>ik} e3g/X! ,҅{FΡ2*에Ί} E3 \9!J5̟4Z ciXF>ub/V"'ns6kQ`N0J0sF3NZ teB J,&OX5);90%0l)FO^Uvm0$ޅz@((-(+5y=GPסѣ SA^ܲ$eD<4Cs ^/TlBR/nycvf\$$e"53=tGK\{O}Mf[j@NlP6TnAt8|Q4, G "Z_J@%)I Ve[ _T Tڢ˅R#3$Ŭʟ11߽G5g% ^Z '[Z||xxJ/e:GƆ~zf Ւ?_< 6&@j{YNc _(,TC*.]9{?tŕ%V-t tAu#*F=_Sk&Pīلwjd򗔶q<5E; paoĈQT58ydY/SՔZ (CSϗdAs]}10/u;Z(.?-]K%>n ]nO7vr["YmzLuu6iˎؽqlFȸP3}|ELUZ,q"%H&ޅ، ޻ϫ =X}p޹٣ x?hOCk{ t:|98߷~W_|hLrWA5wR߮\]雤48c"`o7Fg0|)Q6+?,U 3wڔܠz- B>upy_fa\>[e,&$km5Zi;P'Ho>\W\TecJܻ3o­!7=j tl:ytF8=_hHx1iD*-0?;fr!&<|ɓw!8Dhn2paNtyu\:ı}mMCfJ0ٷQjbI^O ӫ=ZhnM|hcāʲܧ=A(&I鋐~:- NcI*xt̎n@Et dC~F2B_#{ e R ϙ8/{Z…pIGOˁ{-AS$im@$ COA1HG_˳6m# Ͽo98aW[ [a ̍Oդ*Hzwy%30k̋[8`xy -k*=;$SV 桾/a$_b}}\Fn-7+r2J-g%>[l1e_[ͼa&^Oώkv%jV-*:]TcQ'WF,||1˻g0G$(cRS\/9t 9ԕchV,G忛`jfњN x|ΟAפ$2',]^IfFJR8J0& AQvyz> _KmFPofy t *zW6; u@(`I)֨Z4@0~猾"[g ̼o-}ʃcD gQ@ : KK궎~>k7/ QӝT=&Rݟއ=B.A{_` jӯ>W-O3zW63i !t*A)m:fb3l%nuWV+ (ZmI11 ?AŃv@Sě H=sA/Wҫ~zΕiB.jKg R6el  >rFABr*;/9k<' u^zlwDR|.w[ćˬ,חW1:1='7SŠ/0#;5$xEefYU2T]]Ѡ*I/|[.ne][heB"e,ͦz@wY QHfଔUz嫋0CU4JLLc{ʳX؁֡>=K :m(zfAiP^]f?WaK畣ݧV*@UQ+D{dTJ n*u b U 'G"ui6%hۢ_p4yOReZ-Lil:,Jz`&cII׬]QTN5)S24U<3=]tz6)Pm.yԒ<]&LQ)*+gۦmKN:gHחj ?j@g\dWvjz;ۚRt7<&}ViZ Fwzfc ֟)^~u ,~=gO+6 N-><iȐ8""d9QX א.PB|"AH\n+aVdUF^b6ڃ?}  + 2++j1v?C?6p֯hgpe2l+䰥ceo \UF"2qqJ4Z\Gendstream endobj 59 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1128 >> stream xEmLSWB{)WM!?|'qd cD2RmRC^ J!Z0-eꔁ 2q3&N3?8|nw4/INΓ9T|EtB^wfo+ i%5NIcbC iԜ Ҩ6>>=z'S㥹d9hzbhڶ1^]#YZd1ݔ!X).^!%X%efբR,fFL_e˖\,͑e]V"n1;s^%68֬-nvfI,*{+sNXJQ|KJT"Nu)+K帜8GU-ƦxSQaG2*?u {;PWh|B? y/En8/_6 WM:2FԎ#=^w&oЌyu /ImK=|Q)<s}hw3o,&-Y~Uұh4pvp#n|qA^hZ#K^w7BHA3%TjO>m^ LĕL>B8:EK&.0&Ġsɠ(9(H?}z^!gM1 ^CT}ۡf, $^z } k9wRw P|@= / cVL2J>8T7' d 5~epdLk띪T+M=8a)BJ^GBb\NuA}p^OLQendstream endobj 60 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2055 >> stream x{PTϺQBFYhc("VT+ Xv+ WR\j&Nt8MftΙ9}3g~{8/-=Vsse\`lr`2lqeXzo/' qK9HDQ,éљ$U*iiq %XUZa˖Mkׯ"Y!*"D\!M SRX*TUK(ju bfDZ5E)U"/K".I%%+ f];NTT(T.)K+ٟS" *)HYGUX[ttsaX6 װnl [ja88稹۹eݜ'iK“dm&:\6A){0{hbSc>%= bxJ!PFhOJF| ݠfH x=2xj ̓KᴭYBq%axěP R&W V\2͎^2xoT2)eU_Ϭ><\oBzGs OPj'~5%/Pja'4tyjK Ur&aKX}UhW $r;]Tm$U th{`Kl:::&MB Qh2]Hgǥ҉|]{({f!dV|/uv޷`§R(5ʬX'Vrgʒ<9?y1ozjdTYXM6UN m4? F9'Vz%H{}=mGseE!bp2glN ߾z<ꁐ꫗WXdo% foA@2?}:[;!E/h/'zުwk8f7;$fp涵t3أgȻ8ҋ5Cjn &]I/ɒ5u<?_olthxz-h58VL*Tee ȫeԲ᮷m~֗ɚ?jfvǏ@Xc1)Hy̥ ϼ.wF?=lZF5cGJ*IHr'hfYճD7YXoQ橡5|`3Y 05/Ř_ԃj .ğD?Es~LVԕs- 7'2AEy^Tc=N,X3~~a|lendstream endobj 61 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 459 >> stream xcd`ab`dddu 21L~H3a!cO]VY~'YP٨vu0wa$=K{*DfFʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UR=ᜟ[PZZZdh r|?wo1ߗ>d'\9Oong/˞]"vfa }}/vkAF=;ÓM3|Ǻil5 -Vݵپ|^U+%Wܲw߬W~K54Ӎ:mɊ֭[ _Z8zn՝#ÝlYN^;q&{r\,y8WO7endstream endobj 62 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 415 >> stream xcd`ab`ddds4H3a!C,,Q7laaaYK{, ̌yME% ɚ : F Eɉy %%@NBp~rfjIMFII~yy^bn^~QByfIBPjqjQYj[~^_bnصz`9?$H7?%(1eƏ.|7]g'%yyEsk^rS,7jÑ"U~K7͜7qJ7-?E>H}^wg]d]n[]wGC~X5U:"?igX%WiӾ,`0}n9.> stream x%O`n} Hz@B0&7' (Mdκt ʈ,@Uԁ]$  cbƻ1Ƽ]Osx硐ˁ(bC)a!7j ]qk?,X{[;A'62z 9)*9\MRhL:#]Bo |!SHxL11@(K1YӓfDڛLE/wu ٸni11YN\MHb !D!t9!aϷֽ`ٶ?OZNR"*삕ٕoBQtpLd 0ERb;3;aejwiO! $=Roꈧܝ53-#a .,i/> F!CtzjaʤY 3߾17IoIFTGPbWLB xәmckcfi*nͮ׃q[\uIJ{:endstream endobj 64 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 343 >> stream x=O@j$!!: EVCd :4ڒda`q4F+IW8 y~m/@yx]wQ+>$>9%]lvEW^:]c7a^F뾗7UkIб*ZúB@ES0~^:mYVJ[)TIdiq k\FAљc4{ԛm 2& ABAqBQ3v3YrEX‡| ^}g:!0UXsz7݉pXY=J-YڱPu|౧EgYqES5endstream endobj 65 0 obj << /Filter /FlateDecode /Length 9352 >> stream x=rǑw &x8wU,˖dݠ}Ai@=!Ex5p]|WVVW V#=Cz~}/BMv ~Oh9ݮ~O\=zv/"VʻAjel֟=kzA 9bΣh0ZhcqB0~{xTKB |6A7؋\!XVZ61!vpЪ4˳:Ćm듛˛z͑ET듫'g #$xӳ<#t!RGpoƪ{BoV©AٽgŸK#`p(5{ {*no[W(\HF!{YJZO9|fs@D>Bts:DY=dp5UqvCas f?x:@̢&jH aicTY?x=4Qcp QGT vH-B@'\@`DT1("Dxa7 Ch  s;0"!CHilY`P!!&aIDD2 ـx7ר0#HA1zM} b8%*Je4Bl/"iY3@@ 3Bqc!;w#;@ H5J)8Qr61D2fjVh"C8^AXYOdOD6͡0V9fȞCEaz@#W$v+d :WӠp'Y&;@T"dl6#ΚG oe L\V`bd\=m|jX=o(NX=z)JqZS+6V]b7HZxYM"W袘Y|j.68EGEWb:q+(Y+K+EsKOE`(-FL2]&c 7LɨeL/3i2̬ll'Luɚg,~)dٝG[ L'k䢲&Mnvu>?>}CZwcXâ2P.{.{_/+y1Rt7r A72 h#[9e4ݯ0_`|O%w6|lHr!h J%@щB =/@< +A{ &A4Zh&6C"BceK$v/`[uմ˪wPvfL2S JV{"EE1/V@ 0d  e N|%%Z䒎$D ]i%<KdlR8=!D6Q3=VR4LR:h"D!$ ёFv`X ڔf,K+ ЈDO(L(p "*}1%&:>0DHZGuFAXmQCbgl8$oIe9Z 䞭VU2S@TɫN%p9"D%R&6M &oPor H["ߨoB2I 4Q`[ZoKߖ[fQmi[m!,|_v>'%t=HL:(S<^s<Lڸzb:T#P3Bc,F8> XF/o&YN>LʝD'?n(\zv U&GL(jwj܂xҰ}&`/qLWҭޙC;v8Hrq9ƝM[lP03V}~wDmQo0GHܙ3^"$1XlCXje8plJ1J،&"έ/uh,m$FcHZ܀L/8 Lw4`EXbq sqAH(o>ϒ@GH5P!:Ѹ ޲M!$O '0u՗Й3y+D6z$-{2yD m04}N=aY_ m ::̠PȆx̔a]Ją@9N XC+8DOFrDC[>I 'ws"$:**c!&|(k~1tv d-dz/H~Mto*LUZl[(E\ 5A")O/DA]N(Vg vdz69 BL2Qɧ @0UK*oF*}% \eƨ!ѳ0< ADLgX<mc/ 41@1-,QbcZ ̥O\ c4#UD1Diah4x}s, 9_GRE`aM1*+X&$ ς (:}xbNCqrf$~5)$:hk qc *8Hjpc<惇q*S OQPNpEg`pϰ=,Mnblbс/` ^h;F1oqC!AsUH]IEȑH1Wƈۚ&)9JPR$S* ahqz,vE fLD"'q|zaP1‚Hb$PDJrc;ItQL?6IhIDZ!F3x<*blDjlmn>uNP036PsאmUM$8xrL:U'EF-ɏp)cb`yq-,!:Hkg^ p"x:v,>ak1dz8xx}z^KxVہu'kvyP]Ip}թ,:Q,}Fbbb2#\QƷjy eg-o2E9si^),4\dig9Q5╞VmN^[k5 iXE;#7<~zv.Wd[Ր*3 [D8\F%a,\q,H+]'۔"B|+009\gSrNE( b.Jw0LI0U+6n;[/S}07 Σ uHw}aDKR.}ӕ,*|R6ܨ#8"u"%TҵJk[oLnqs/ȣw5/;'e E!; /$٣ tEG:iH_N@Cfy'{mc9N{}lcKc=Wũ!>/DS!T֢ޡ*>E۰\{#Y %Q1x }?XyCHMWn _.MO/7sq kǻ8ʈ&Xy8c&\]yABT@ } T;LbOګu<]u|b7`7 BUǃU n8TxσfxMU~>Nߑ+E諭Yxcḅ7/2~Ay#9Gֹ1D  ^yS7ooٙCvcbVSFtٴyT 02 Kx3o0s.AlaM8xa$ptY0,HVMrb܄q.߄!"r"ް#RNJD7UĝVK)-@:7r'D`E4☐ɲܱp-=]46qEv[/8@q D@ E +am6Aƴ#Apa>w#qiU1]cL6bXR8{E*P*8gnݺ T5"פ[aA]n46#}GSpudE!W\x)7/wmY*&V9fZ(+RhBRT*w * #BeI;2Zs[O}pt;Ӈorv#~'^R~ov}v}s=+.|qf}HH11;6_|L$$޴p<63-%f}}zW7^{!N1;^?z6l]o4gq]̱e _}tpx+}+bc}zY{笝{n%eCwE956EyZ\4]x.[Ŋ1-H\w](tg72t(3}$JtCRRtO|.I(`p3ٴj-Ҹ#2pAmW j f-?8K25|wa3h;+s#4jtН% hQ~>] Y.rL<(x4^We­*Lziԅ_^'5r1jYN(%=1J'{7vE7% A<=xl7R|EoE4}ڮ-" ="u\W/x>Fgկb^8h|wUd-0.-d-9p9R NX 'nO;wi hZ,;{ѝÖjWZp*bDμiVkXr<$n%hm-w~voa>>YK:N3zXPTUYky)gZjm!ems_)M|Y&J7e]mۅwECBPrNF|*wOnȰ6%KޒϾA\J}3@y=uoDK[)mŨFA>:8.ˮ=ԏ&Qq#r2<]Lqڑ>{y9Sx6c#o%}?[ ̵ji - i`p;p?(4JyB?_,RCыrAsz#'+iNMG{ve?DZ c+F|KXvW&8CEe-{_6W+alX|kN'if;Fvۡeg5A]Rˡ2]>mCWŋUYe`>{×6ʖxP8zFn=wLc~g?{땜MolXe-t(ЋzѫBw/CZ|1{EM[FWM lqߧ߇?wLW""_ݑ)x ]bF "h+ٞx0*6,m>wX)&-ҵ ,9 Ѹ.n/ BOAa}'; .-%dxSx/;kk8iykd>bO˦Ig, A`psg_!`u 'I#W q:%"_شN #^hfrEI[bۋW'\6^W/*?qS!hS;>RQ7H%qendstream endobj 66 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1365 >> stream xiPSW#WE.IRDȦ0C!lGjQiZNYPEp`ZEFJJLڊι;rܹw H\STS^ay!Z WFYlrEp@RB@yMX-S.QKV˥j/ҍ9eyR>)]).^ʬ(eWܻ/SouAO$D †%D#Xžp$gB|B.qKz*3DN,^ <--kWIGE'z2tAUڣ@+wK3:rC@P~;/71&FG9s1 U44`) CfjS5{YHq7B2jI&_d|4'}eSf6IbR2+q=5CNh3vG=RjaC$\r0ؿUw}p3a!%(K%*> ƶx2?V6`sSX›F;5xIy4 rUSj&TXwH#µj<g]*I5$UL.aMa6hV.pԼYw& z%7@Ayf.Jܛ >,kX8ҊaH qfJ)Wfx>m^5y1",{T?Bc$ 2Љ)}j=sCe GrleiG韄1zD榮Z8F7~Ҷn-v;hפ_gfEwƎs&HxKn9@;z5])9Дݣ |R?N6jG7K`=U cdg}Es#ow\kXƭA.^X NosܮΪP;cR?4ĺ 53EOkOXYMX-!_.endstream endobj 67 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 377 >> stream xcd`ab`dddsuH3a!׮nVY~'Y߭sdo2WՂ+ 0027;Teg(h$k*ZZ(X*8e&')&d&99 ə% 6%%VzzEv: % AũEe) ny% ~ `I܂Ғ"Ԣ\@%  ] L,?:~}WsNc3Kt S%Äp5w Xf+[nWW?-xx55tuwI+ɬ9lnNys\*;9-Rùsύ< l endstream endobj 68 0 obj << /Type /XRef /Length 93 /Filter /FlateDecode /DecodeParms << /Columns 4 /Predictor 12 >> /W [ 1 2 1 ] /Info 3 0 R /Root 2 0 R /Size 69 /ID [<472a087e30297dfb535afb114dbfbdda>] >> stream xcb&F~ cnKL @  !l$8ف102XZ@B,$H)II DlAJփ endstream endobj startxref 50082 %%EOF lava/inst/gof1.png0000644000176200001440000013675513520655354013545 0ustar liggesusersPNG  IHDR) IDATx{UOuUWߧ{fzzn$!+]4* ɲEYVE5$">dQYDPEA@Q@Pard.g~ws~Ꙟ_###EG>ڼysP@)iu[SBYE|{Cxw +8kvر_4ނA o~!tVKP( uqPVo&B;0 ü #Gvw|88 ب*??/[;NN.ʪ"L"Z[[|>kyxG___{{{KK s.kYdeJ_~0 666^zǎw_0 Bx>W^y !LQC0J4B(4BPj* RPP( 4B(T) FMP(5 h BQ@S(JBBPj* RPP( 4B(T) FMP(5 h BQ@S(JBBPj* RPP( 4B(T) FMP(5 (}-(( q8۷ow:Kq&{/~q.@(+G>344?O/p89x hg|fGAY>;ڶm]uN!=I~-?[WkKݻWU%:hJ$;oYLTHHP(5 )1G۶m]eeS(KOv,j>;HARզ\MY5>;XI RPTTgQ@S*} PET)hJ Qe#4ơ`KFɤQ%20a~W_z%\/eIhJ,5SoIW˼\rO~["]]]_#:EǷmа}v_>3g+o6m B6mz?JAYի!-#G|x?я–7xkx//;+/駟d2~?S^x!B3Θ'?Dzzzt: ;<#p6?z_`{D";<~կ4M]|w}wSSӕW^9==3<_r_z###ӟ*^x^{g%Gd2}{ofNLLlڴ !iӦ)ݻ?я뮻O*sϺu.R~6 ۹PՁ K-eWgϞ RX[8000qW_:Sٿ+^o}}߾~~9zokͭꫯ8þcBH4<EQt򗿼k׮/}K`2۷矿뮻gL;馛]j@BJ`׮]رcBO4І=a{&ya3<}nϞ=8,P[[[__ ]|ͭ7|B//<:0??j#G}Ֆ *'=YW?}׃_fggo+N/I_|]wr .۪ݻw~7BɧGe:s`m۶g}VӴ{ .=w?d2?駟;I}4m˖-mmm?߈}*MTPdB̝wwGk}gqF>?s?׭[wСn[U .k׮:ttә!IZZZ6n>ݻwر~c=aw}7(uos=w- nڴG)J-%׮Y~x=ʊ0*E__$I|;{ eC 뮻__VGGYItuر㼥i A-h$HJ䤚G)P^Л*o=:H'xbP+P^P H$BhrrrUt: Pn HY4W\qrV]=.yEQxGEV B(SheQ__r qNs)x7?B~8W0W^y%eǎƼmbMYLLLd2`ll#u]7>>k~s^BHEӈfTL>ln/~qiBs9u h*eY\b߾k׮O}S~[V__`#[lo {gVvۍ7ިiZ__8l7ݹs;@SV-9á(J2O%ٿE]{)8prm߾_nVx}?S駟&?Bw?~|Ϟ={̌(\pAkkΝ;SBhhh+8묳< 7pP~dYx<^wR댌ܹg?BH^{-l? /H??[Q暇zMOOy?H0I[nQq_~A9)X2!R 199Ӄy睦Y:@SN"^ڵk T}'ws>Su]G~~o@9o6< / wBw[l1-tR{wO* ֹ;99y%"6mD y睊twwo޼yڵBw[nCV Bw귾-bZ*z8rWUؓ<[l׾m۶x<~= ^z饣GNP\<+jZh&♞z---Rh쬪MMM 0 fqa:~Q΂,tMY,[FeFUꌇBY @SV34yqMYHV6W '[;*ДIP.a߿PNEaYVQٹáPhJ"JכU(ژ r?R.T3(J4E|> mp IdYgʊ)ˆ((zl9r̋dhh? mVyy1 ]yeY.WV}Spź\bA &NקR)R5M3W:$qWYGf(< tr8+^j&ڵkrP AQQ[ָ\`0q၁\.G:;:::D_tDP^"}XӴ%pݙL& WP9 F$FQ0eP(â҈&@S.CCC >$IdPap_f.[f irϷd2|>oOe(蓚\.p81æ"|>됋jJPfz!i֋}}}u766VQ(ZkYl盚samPE,x<`)$?~pXRhJr 8NӺq8??؏X,f,RFZ >/& [hYEQW BAUUZ P,YYcXww>jp8DJv ;i>ooo'}$5R 5|6]}r8A-%I[Y\@D MRih*~ǣ(i,V| !ax6eUU8񀯣^$ ,*'(RD\4ae!ܹUUÇCqYM[P__O~$L|rd2K"P%6BBzX1.BkvvWt: >id\.GzӃ#7{Ǒӌڦ88AX]sss,Zy"xr\"Us8. ,#TO:BlܸnBKyf$x|XY'&&BLG bZ322"I"o|ohh8twqM7裏VgXbk<7n8nbb"L`sndp:!$~ ^46 yփd[@ R;:: o͚5<2(J]]DU>\1*cnn.k[(f)V].W$Y#?~?o a:;;)"˲1=R)'dSO~gUB7FnMӦ?,xh҂JѨ(:_(\WWWԎ[(򆆆r;0S@KT7FŒW> >Yq${wx7ݻ^ oꜽȲdt6!/א4MkmmucccXdYJf円dYzLLLyc&d2WL8+ nZIT(p$ ȲFuե=O]]]65R35d*տ~.m1G>˼%$)pD`'] 4mff~:nڲeKu^M Aa-LZkSVzYA;gٞ_z;_u8E-iCCC]RK=-f^D1 cg8׫*YĎtb%I z ,hi<['+l@1"tz<|>+RA`eYvńi~~>JQ|.*i755E"_p[hdtI.5Nj7 ?^ZT`0hSM8+@}С<,\.K`bmmmtqhhȦW4kAL 4Ms:`0rҒ$QʐK% cxOoo/~-IԂRe$x{ݳgԔ(k֬پ}mfôBv:uuue}Eh.ҁiZ,3EqxxZ%I ByСbB(ɐҟNY՞E)긼hVʑ#G<+ Ī)T,ϋ*Q%ꪫzM6!}٫gЕ['I7 IDATEAA[[['''B,'Iw<'] 'z=~,”9>>[*X(B,r LپTI_}էz /755mٲeʐ/5?44nPbȲ\,bMy<B>U>[n'?i&a~_q9J%2==0 5Gz~%IbF 8p8Q汱1ARGyvvvܳiPeY(?²ƍaxPA0xɧTU{g>311a۷oǫsvt:eP?/p4z='Y5q$An)`Z>exÜiUF{{s:J1p'k@ka 820===*^>efbbBeZ?77gDż='\Pʥv(K5k>P# ^/QXb1<<ٙf?\/L+f}ATJ-P(L$IrvrdY(HA {^(>蹹9;77WOQbu>q U&BP2|ss3*tq_`wffdOYjWJjy2& :0 Hd:>pD'] 0h5q#MMM(Jَd2#«PqI4dfggeY. 4V^ V,˚ Lfe=5[Á\|8be9JmHf8At 쭶&ˑ[p/˲PGI<ŋ} aL,e9 rZ`1=p\_)P.MX/ gEe2]y nXFTUeYbI,b@+L8_duJA49a-Ii}Z t5EWϛ6BbUUx8Fi6;;K6[Ju.K/L&mBo%2FB"vu(>OsT@|8/5|>Z] E`mL&Bni"B+ޣ\.K<VjpGkurx<-yUWet,;<<ʲf'ۋ(U"tMTqYgqaVt F ܌} h4v֑8t%MiщNW-iZ}}=^-V\犢<p8XVV Y}uۼyŻd9T*tJT(\[Gobr\`ʄ,rrk755500xoڴ0!"8/J,5ǀeٚaP*UBŷz뷿˰>Ry\F\#~i0= BnݺBkBBJٳ^Rݻw/x0 `V~*Ks#r9qU-_MӲ,A?@df+!t!Ii1Z⬎'*;vO~RS&NTUūylrqg@ I!ϫp8LP"FFFAcF磇d(~ьgggMd2Еq,BYq8\At+FRTZǫ u*{^ ]e׹ /  w$IEӬe rZZZxfNpEtdy<ߟJ]  2i@@*U*:]ZDQѣ%EPzTI$i^E0^zB\nPXoya(0SSSp in: ,:557$Q ,֢m pQEQ ,q<e;0{u"|LtZ*0 q䷨!r 0*2??,CP*r2o߾;lݺ[nء qFa,:h́a4mpp0˲܌ ---cccD~ԉ'zh 4 [ww\)fhRB-( F(Pd``Nf e O3vgyiK/T2 gFR(5MYHh$p0,FBA' 2;ϋ׬YiSrYQOuz?Dv\>ϸ`u_K Byf tmT套^*P.EX@өݞd@()C % phhπO_t[PO>Rm`kkkoo} hb1 k;11"jKPM{3.If:<TiBT;`O10 |>\qtll-iZ]]]8ƒ0L(E44Ԩ =f؂eYQϨMЃIy3fY􌏏Wȑk븚L%LB` sCĆ |aItNs>F8*v@ p8zzz"!P?'_a8ZsOMMtb4A맧<2!~._Zaǡ,*'---A;#Hcybfyh7^z)0JRO,˰BIeYd200I$0Tat:.4HY|P cF333Nǜ`*Rc\dqdrh4M#!V)V|2$J-L' ?KŽɚvjqC+ e,5TK ΒaZ:z!#gY ׋b̐׹P(KX٬͚XY%ϫ(,Pw7ǭ:ӊ+P*r`ŁQAӹqcʅB!Ht,V_XM@ƴѣG=H8̡Cb`,˞~飣&O _UUuadL(911QȠ *8=@@(UU ÚЉ,>8occccccoo/NfggKٱۄ: /]`:>lw)27ژ_1+)^g߃lyaRe7zZ(د(4,XsYDAܹf1?Ru .`#t:mٞx<.IGg_UՉY; D9Lꮧڂ ?~1aJfgXE1H /?vd2;M<5+a?\L!,hY ?*?!MkkR8Ʀ&;P t7 Dyuuu. *( PXM ì]^`4bd7b!P i~xx8ɘaiH9+tFId?3NpgI!(zS#5؅ ˰x$I6lw_w"z{{ 3|Ztp8/_B>xO?c_X,f݅S0HB BK) *%(3ܙ:=] f|7JY[Upt@:bUUx:xІ}xP't*n7q %,C&O*;˲~pN)t[pT |S#E vUߧ(aF2iLZ*Zg``.hXaֲp8 Gэ  !ǎ0G˜,%:taTS3ɐ ج% _Lc @SPdpq~[=*+ UOnԕƵIX0L s|q t`0hlWHd D3蘚•eEdta$`<ᇉfQ#-, Xӊq҆-3R㸎E=b]Ղ l:)O$t8B(^V'yakӼP14JRUr).`biQ|>_]].]nh!DK ڹ p9Hm"p#C|>?55f 7*ݪt^8үn:bW-y`m5G~ȷ ?nƁed:_]>)փt *+](((EiZ"Эt 4ACkQY™dvvvvNLL?~< ];IDQ=p t.H˽bԨB'΂ D:gpT8,d["yKS`z0>X(tpr9I.+'9T#ӱDqnƑ$t:NXm왙\QN؛$) rdl@u B8Nv_@ۍbH((T444@p<6ȯ ;F3 L[RAzHdV\0kv\"j04["t)H#ڔ@3}NEQ ;ƈp̃:xbX 힝YZ(IJ,4Ʉ$& 3AQakF(q KerϚ,I_U|kkq# iF˪gM86 ? A ܞ~c$c7 `#dЉEBxƪ===zjiaz Bop'hjj"2?NZ&two IDAT6!`d":Ç+b|FLHQi><vrR`j]s>jЬ`0IDU#GXoUUBap{5wAn7l6 {B<Y4SNADI$@"Sy1tn[qE8t,@?& FN&xpqRS9$ J/hccExvX1p2Mlxd {?r=X,Ъo񌏏[_LR.˲1&tuu,<5MkhhW$<-v\n;LH*h9ȪL :#XQdY2u1uǩ`X]LQ4vNJڇ t `$LU*/[Ou.iu5 ,RA2 DCCCSS,r!]ŲBp־>PH&oMNN*B&A,%"L檪Dezzr6leOt@9VFF Ǎ$IE˭*ܜn C4go]瀜ZF%V1TϨ~nO?m~\ ze}ȟFe114Qwf24z{Y[XY X `Y <"G" vC*]hll?}:p.&x< ĺ95LQI`WW~ (GW_U[z)25mBeL pAKd]n4Xg=ziyQL@!n ^v+ۙuEIRǎ}wx07s gM4 3~+Jy<lvj,WRCeY}~3Avmccc E%w\J,,$4;;kTKK 腳mPȸbcV\.glA@>j\DBg|l6 1v3 B8M244&d|$Aw]. Y].U13]@[`^%h6BEMbt?HQq$0[4Bk痮K}dYnmQM bT?b 5Tv[c+)c/An&K'W 6n8<1j\ k$J,ǎiA1 jfWf}MKT#s@/7 x<ɲǭWx u0 88Zyt^*]&\;J40ovhk]Q!]PUovn]$9dY,Lv!IH$ɉj ,F'. O$*DTGBLj/,>X ,K^(Syz@YK!jvj@ MӠdͦ+}xolle PT"yrYNX. Odq5`zzZp5@B8>~8}E!S"xCƞ7)ϓ9$aGgff /Ɂׯyc!Xpd㥞XjQ^ъwYYSv t G]]m``@W4a@/͛7766FѲU@hJ6r-~nl[Q=yf_rnCCCAN {bb⭷ނ [PTT3 2XLA'փ@CYnS,tlzzڨ,˒VQH Z]C[(ນ>D/}DI&*ᕂ t E)n:FM H$IF)-4=N[HBy "O'[\p84i\_~@q0 iׇw}Ey^f,\bډ@jaNLIŇ ˲ 0HcaPp8ƥ0L8koo/u0n|>PMZI:^1"Zx6;mf=[K:|ﯯ/laa{[l߾3BHAy{+IVSUnTaTU-= W:ta>Fy:4 h1eJB'"IpbG* 4k֬/~k]uC&bV@/bPk6l؀_C܁k׮5mZ ۉF٬u9tJw4mll {N )_~Yg[o!x;wbr:. BigjGXؤ-N0 MOOuM#Bw6V,vuL}p;6@ ~O,NIJlSS(t&ccvWȆn4sP:.xeɒ nHb WQXjS<E,7{B*CT p4HIze05:cu&9UU-sJ"I6dYiqک$?`*FQYsw>chD j65-nƐG„>::0 ʲ bNaS7Nuю; Y,F>rjnccc*CXX  a]]L-/("D8V[GX !y/Q_QeU[8pj622K/A B8Q/˲:҆X,.IA ˲SdBimmm P 2uV*o2C}jEQt=jN'hL&GGGa}bkV8˜\$Dos}P(:<@Puiae12NXEH$j$]PgՃc/dTdo'D"d,&ݣ(#f4X|@|>r@?Xw`J7s ^ͭyTUxM799 xIBS 0$Y2S-:a>LnʲL _,ItX?\r߃-N]քz%I}D噙LU[P..u&J&_,'IqЀMr7033CJ  BvΐĭЉ.}tQϰؘuuu09D"AZY Bi,4n?{{ 5c í}>0IRt|7|rC0 5FY BzZ`ӵJdhMiZlqX?|>S/)Neq.]I #`bc̜h4C, uBq;jA uN$D"7xc~A\8i4L`ri %/Dyy;utL amy<WV"M|>O60;{&y9r"R,nCQzc5ykᯰ|!iȑ#GI&---gu_m۶UP$-,L@M )+?-P|,DQ^QU9ck}2 B{|SQz8ySG-<ŷ9NL +`M˲Ӄs֮]z<;t.^l<|) -nTۨMР#$$+v.W[ ~-랄=b浦i6ဦ*BlI ^;??O|b֭k֬y~jjjrrqD"r?xqX1jEWo]|PTsẺ:&}}}j---+渖&n?`  ֈ`: RQ]8Xd"96c Adq%HC;+(~{ǸdYk"rg*./u#\..0ˉ|>o굫0;Q_qovm~~sss_Z_0 MVMȈ#-J-)*sde#H0 UQ AyE[hoo0LLL@UhrH,œCCCxsݠ;KPd2. $L0>>N*(^X+0a`XHeQj0H1 _췘M؞0W2 ?]0Bs7pi(E*ILݻxS*o:lA5MK%;¨N׿d2aÆX,qsA8#7Ꚛ lCCC*"CAe( ġjL&^/v+Y*p&88߂3`!Z[ş%*Vj5?seKNlŏa-Gsoo/(W_ݲe>Scl>#i%WWt:ÇrRzo:@;'cS( `O t 6y|>޽{/"t"#N pv;::zr$ Bpa!x;/QBv"yjkkCMLLtttA~r%,p#R(8Pɺץ5#"477ŰfƇX,FtM_n-[LW"P΢5,y'V򿚦ُT,vFNzF'p(\p`{xN4y8&訂^|{sG#FxCD"* 9TAX,Dz{{'9|Xՙx^״ N+aÁSWx!k9[/%rmR0BJ$"(5A͛7߿_QFkatQ#]*B\p>G(o\\;.|+333 ô<>d  3@N  gxv1yv)ՎHpUO;;;!ƸTٷo0d0qb'QUqw%T0G$EZ$*Bv:/)oۜs9===oF&O,*ܐMFĥa; q dV} TUD":Cd8xDb ˲aŠ>q13HӽpNTU&&&`r}t\1ׯPkӕra') L ;At( mPTUmhhйb,g,YB!9\KG$ѼMo. ! |\QUD"AyZl%?4UU-ٖ.E駟F^XmS,fggUU  EMq8---6T?O7oLPVDx7JPNB"M?lY1TŬ~Sܜ,I.e O jverxu/,{fgvVJ$+c#]FØ`(#c'qA(p(S)rN8 IGG_2ZieIcx>=εk{|O|ǙT?q3ǝ͆._˲$|;#G8glllttT~}3EQ|MtŹsp/($I˥t]w/j43L D'/ htϞ=`Y,1CAi2jjB ɤ#Bi}`Cv=qdbqqdpZ:+ uh u=QbTuꕶ_Q#H4+v2y[/~ S"S ?~Ν;|g>s1xw?3R>ËvmG WWWIWKӆMMM5{`q 5Ղ5B`0H`1lGEMHc *!AӴh4m۶ ]pFRj 7^]^'Um)؏ ND"8 S-lCX13UdN^%|3d}M2\YYPD_2_"v^mC E=zcZwItѣG*f~P{+J;v|߀SSS?]w#,Ν;ͥ!%5bX}E '\vhj Pa) Cb4M4*fii ($ف(<ŊŢ,<_#UxɃ{Z>c,H@$ƊIraf(Dcr;}#"ҙ`pHi3vhx֔oCm /Cd:Nz ]dGV؆]<'e IDATnNqD:35$y~߾}8q[lKaE|>ǹ-r`018]Å񅅅L&ST@spQcRA&iHswRAB߿D֞[n$(]b ?FY#5\C0ԵN8a[9pSOMMMyOB."U`#1=&`@/[l*~9Oc.U˼o'>qw8p{kIw ͚Tch0$GxT*ț9ڮB(rr.MQ<={!{G}&w1Aš.b [~w xg1v?Z5dA~ͳH$/v(mU:H.-Ig(F$;wNuCS0 B0+'ޔӦR,..[+Kb.>ڹsٳgt$o}ԉ(Ym4RV'ȍ? >ij(0P<+w]ׁȶj6)|ڽ 0N-q~zgz\M-!y~2ꪫwxn&O?/w@o$O]ׅ >w)zlI.AJINpmmXSA:Ϊ_VqF^±cN>-I 7P54ME. \{JwSN9;M =v?7O:ղ|z]s5 跾ZSj|>R\ 4in .4sӧ90*ʕW^ ;p)|>J+:";G^z6MӨ ?>#j3:Vp}0QѨ(|[8&pYijj %Z IRVvfACB+&I|f'|֭ `gT:xxPBYwYOfSZX^^ndVVVfffŢ#Ajo~7tS$А?C[\{7\.;B>oK(JC z~~~qqȯd2xMӜ/v4 rzcǎ u=LjTa[?^THIrܰ\.*epKiR ۵PM4M#;j]v.;-oy Y(ЁꫯF%^}>94wǰ^Ä<~Fg!՝;wj =$DȲ<00pСUן|Igj)*mJUijӧO;Rh4z…$iVTTUMֵ(ZBW_q 0_Ŏql4͚ ѐfG峇Bԧ>7^KZ)o u[$'e:KJ5 éъR$2)Jd !w :@}8>(RUn喍'řh4z 74ZO߶(wX(f;v,//9>>NNőH$r\.V^8ݠ#/-تORP;D9 RFq&v -jJZV).O^`}0 %!mj  T*|; iODO* VǑpLn#-VVVUq4h6ld5m)xBcq:|`¼T*aS,hAAzS@/"wOlH2LJ>3.\<~,;uU1MZF!j5lX[/b10~Z}Si?Ϯ61a0s5% `/`&M-e&`нk*?>/c *QWWWjvlo|c'T @.'IqRyHm#Jp8L]iSUERU211Nsa`dw]ǎC;S_y`ڿKvq$}|W}QYOL*-w#ເ a8+}:0BPP?ۢV8y{1^VSUZ,H$*wth A{G['?o}[q\.˿ O=zthh6nHR$G?y(mUr"( ?16gȺJ' ppx</G)h<M[;s뭷~ _Z=&(d2IZߟɯ$ PI }뭷.,,ݙ<2$rR.Ckݸrଌ\ C>hr 7|>=g9;}47urkok-hŢX<<+N &p8JA'9;OSkP(]_Gƃ/p0yP(J EQ ~8WaJX+a˭p,ݻrzB*.dwT*m!>|CaRw}wtl/__.4ۓTokdf2D"PEQRҤ\.nd6e0 P {0fq*뮻nP(ʟxEK5#$I< rʕJ\SCzezcԩSW^y[Ç-oPwu'Q 9);q/o۶ EQz m9ժ{ "A| ۢ|>_*B!=T3$aePo@ ۮM$C(E[1W_}RX|~V4]JQ{?ٳgcC$|dzK"qgϞ}gykf˖-o{nÇ{Tw-(0;NkKU:aTxqTUݠ͆΋"4F|>}p?m79y~hhcI^|Ẹ IR2ۿ("p.w' T4֢B@e ZQ^:'lٲŻ)W-ti8@A.%ԃt㸧~ZuJ>T*NNO[n.nH25=pYP.UU_kE2 £RT(`C^0;}0<`pˡP]ӰqG|@*'QeXi ХU$b-hc@fw KkSP(̩S.M:A\<->G?r&Y__^FvoE2"\{.ߵNM<0ۇ IDATs @Qs0&8r766:u*0+9 à J%p d2{h-*Sonbyyzz]&AXj]qxkkko0NRy* Yx7ܧ똮؇<6lقK)t9rr9M9Ñ0ѳgbAG@oO8uv*`1kkk胮jdm-<:Kzʄ* &!mxttG|%L&ӂBc(ںu+m q###( zݖPtQq Ϟ=.Cf:s(7{O àhV$٪ZYԬ$Ir☜L$\X,R\;ɕNzarxAPZ@_v911Ӗ,v$\|Yr#\W0DW4%NG8.j! s#4Lڃ(vT.}>z#?N-|>$, =tWPW_mV=}&Yp+WW*fffnJIRX( d#|0k0J2 FNEzTh4JYr"0m焐G=ABm~BKx7@G$Qhmx'*l(c,{@޸~ I<CZZ=tP8VUݸ%w69) ~nKKK藀lUg7@^o脵5k(@@UU%-TUmVu(J.˂`k}(ܗl.bII!sy~ddr9}4IpQiⷜ O, dYpqp5Rq$v4>88辈nT*J%HÁi pɐdb͉WVVNBUQsvvT*Yy\.NgΜ`bbBuJwFhN1DJ{I4M lCnW ^eA ViP˩) VOT5D+xK&V n,/`YpZjؐ"JaxP }m]qϷs`0nB… TʋGV,GvuJ ðV蠴4u.40 Zq|:vS8ix^W ( pv[$Iְ5h/=qh͆7%IFjtY,GlMq\XTJ߆RAݶm~x^`Pv T*PerBF+"XoI/Y rLJL"۹sab#},p릦˵B![<ͅad%B3LV!=\< @mBC={<ؗ<A@'En x)mݺ'N 2p8Fcrǩݾw:,)Х _m۶AF( k$jummmrrҋbiN,b @S*P4e :io6ٻB(6͑MIT.r]C0jb8qD;ÑC4X@ߟdb>WUwBxzl:ժmqB L~^2UUjtF{YLA^l6[T(#}> =ӸwG(tWUR|CRyM: F }kB-[vmU)O;" BYck<3 #σD$ <'5k1i@L;޶ضm5o0&KpʏDҒuTNY!cQ9k`zT@hVV)z$Kczx#xߏj$&''rAטh U4yvMұӂTWׁbe>Et v7}Z鬩~Pjk~.sd-艉 rE9cI#}-.QfH;Vtf8WGS6 F-]_u`BUʎ8a p0] CNW(dtT_]]]]]%C8^{-M&EQe(H p *E'Eg.E]}>P|aX(XYYiXI,iF}$8OfS!r˅7`0h5"rm&[#_]vJz00֑---,",(kp Ӄg˖-P*4?ikq.u]Š[x<[ |brr(,MÝM, (FQ/uۧO&'" @8r(UUk/ 86Z̏[cbM.B6S֢$IHb&4ع7 sgu}jj JE%,kkk'P(xYJ>B'rg"uV 5d:p1ڞ,õ/4 Ambb";4Mqه|"71>$ZXu 9D"Q12ҸZ ` ~A066h۶mC L02 ÈD" Hf+aqLL&I>ٟ9$5[R/4̹\5xpjZ~S///d*) BA2łFR KWB!n(lB2E )ʰ>vj^Aڎl/}  %IR\1*M٬ I^,i&.a d:Sa*)iYGYI8rp;_y!23 )SA5xAs_ ˁ(6;tApkAiG -y77Lŵ@`04G"pm2lDQa -[&9dY^XXpRzCOB"3rp8 v9 jdҽAp-//[e!6 /|!=F]$I='JPWtrm+T&G"X,I,d7)IEE qT_ۣm,*',b<$7|~S`ӊbH-UUWS8|J~ꫯޒs0 u(AV%gFZm*s7l厕Q2ڰ+++PiadYdHp@pԲ]ׁ1NA%;i\F+Iqg%"t ݪ:e2 wfppZSSSMgRҏ7]Mi֍S*pfxxd\@zlz J)݂bΝ1vے(T(cA1>>OLW>' ˽-^&bqllz 6lE+-E6Q}>_^S휎yupul6C ^Ӟ* \#%RIfI.;1w7w?Y}8 FnFn;aӄ:1'Q8;Zk-`}}ݶi!^UUטHQw}2dY?TU]^HQo y'6wq,zJL5 ctt|&H5 !טA4NUղl0$ (k y\ PqjZ)S0`0XVѨ,_R 9= f?R)0nNL=dLsH.hb@rbg^Mt]%_(Sxf$RfT zև5\gΜZ;pWV%IET*pa+\!z0(\.ŃL^9K&V{/V-rL]!DS&Z-@ЪR)P^>\VPVC+ibiU(J.怷mw%|߰~ٳ̙3.sΑz(tpr!ehj5]"8qL4NQ =N yUUG{Ɓ:V.Z+(˶=xw,--W;v={"r@E3pN+z'e1 0 hkr[w q۶mgľ}ࣳgʲnɒ$ng5:994lvi!EF^f?  b>e˶0HhǙ9::|VVVAWxuX 8i /a Fy߹s'T`+r4%} t`&"[~C] FQU0A"L11W<u%L wX,P IZM9)uMIER,[!'O2¾#h7߿粜C[^?֨RM;lrJEUUkY^a(a4ϟ?eEQd2$5~[__&"`022 ixbQ wpp\.CY!onn$8TKOlPoWnY0UﳾNa['(A7Ysn*n}>(u.liZ:nsjjvAr`b\ @;KKKD"\^T/ ,r< @co7_{A@`0H:^:q%I~a8y!{׮]tz~~믿 V{o/k:rǕJ%DxCKKKPZrt}0~wKe+H$^9j$b1ɮV@4bPi9s&H$B.cFh^eYt=#2+++d2[ d@d˖-y0Yui[q&;mDQY\.#yfNf[!IRTF:L 4.lٲM(0nRd]vf&(~ꥒP >D axs(ȭ/y0, QΤЩSpH$?T. 3A" :KU0nu(nZ… M-SMŅ wv`\.uUUᢠ%#gv~Z(eF\n!+I))ݰT*aY#B8vIpڨ~RI-2MڥLVϟo*C`$W@JۈPv^^^nH\4#0 bEQܹsؚVl6QzD߁@*Ryl ˚aG4][[s)﹓gP>'4M[\\TU]5YdĻ]P*rՂ׋` yd/]#L&NCz\Êaҙiw@~4-R <ǣxX4)"yJ.h4 < A#.aN4(1G[ubw倮mL?=PCzm`?:02#+qa8:.7Ǒॄ2ѦN7F߾}z>22e~qc<ӊĩ-a^ £'w1͎ B(7=`%,//#LOO^eY^YY<9+Rl4v Lfj,gϞfR)[?K./$ì#Bwʣ$I6\$Q'=}) %#I/-rd]׭0m 2d\j>/u* mېX BVT*Ez<0 LƖة  h6 2޽{'&&8$4r)`%` Qё p=(B{@4݀BrxHddddaa<AH$Z yy)ۏ?NFɌ`6yѝq714QCy6ZГ;ÕkkkX>p`% AK^`ԔKzT|p tR0 C_]]m4R9sKdfֈFQP!h]KЏ j:5¦\όp;F9Y1Nj~%^:m#'D"133(XZR=;;q)d*z2^Z '،iPAȐbVT*EpzEH0BUզ5S,wNozK~1K0SK}bV)d\Ν;O>@~!!ۦJsR,#u;}B%UUZb5kD"0OxA O\PT2EІa@2/G))/_Uf#6Ck)WZ^ KGqh|>fr0l*w߾}aO@juLP($ah%!l%4FM@{oC* si6#A\( .ٸFeY4ӧO#~L$b`Ƶ@ (($ ei$TmƁt_FGGa`KKKk/eYV4 ݽ~@ ƸR><W|}>_<ᵵ55%I$fPiFdA$9WR)J\ڔ@-\NG*L᰽X蘛8== y/F4޵88Z A Mtй\X-DG|>! xlkP(Dc)O75yh_`֑eyhhȥKtv1a"#Cev! ϗJܛ$U{9A7 BJPaP^o 0E V؝BAUU/"0{bdHSn?{F]m]Y1??Ol?ԅSB  9!sjT*^IY>oii c;ز9X$IHsσ 77a ? rf;LXtc9B8]iTXSCyX 27>>NiZ $AXA 4M "ǭGJgggZe|`>А;]NLLDv5Hk(Hi0fh+(DWPW.it:gY*0QJ:44Ԗ8L{:Nu(z QnDQnFTjkn?j-}~10]TD" qU#$|Flٲ*fVk${eP[Bթ+=00Չxoxan ]"{+_JP{Htwmzy諊6@ dw4A-h7M2tI@IPaE^o. Iǝ:u*HK/,,W6 x٢KOO_=J&''g?{vw[HTVCښ$IV^a5V@4tSm܁^ h.G";w Uݻry[orVjI 9^>mKrM`Ne>Lӟ… t/} u c֭R5L ~r I,wAy*O&ko nm`Nѱm|Rf;{5 Êx/gGЅBa@gΜ|8-{6ڜNjHbbI8n QuX%Ŵ$)ϣNY^7UC l=Pϲ8z.7xWz}?}饗nO9{!r<g=JuT8399ib>W@|t[kS!-P ''6<ϛ靠UU B!t_|ECh0|衇:ԝ_`Нے@@uQxûH$P4%~O|;_*ҫA20A_XZZj\;АumOOu}'2 \.70Baf ]rsMUKu}ii)t 6~Y!jZ[dA3t77,,,`jvvRAرcugOv \],˹\MloZZFPZvQrEQ2Lr 4Cvh)|R ÑH؞sa>&c)0fؔP%PPȶ ? XæĹs瘁pɃY <ϯz00t6pVT*E+ A3l&,d* A3+atвf;-p8v'ug0 #MwfgpGS9cM7%2/5/%0f`Ь'z?4CGDb N=#h A_^prn2t Gfw#A_fdq~gOEiq00tGfn /`4#F,M O 6:"~ #h$ӎ2cS·KSqI^f 2:7X1&bo0 ^|wt m#h6`/ekcF 6DVzIy>Jq6-A3'Nqoݺg`(A_Ȥ]I{ ̰ Ie(3lv4;>#hnW@̿\D _~`0ثQ0 UUZaIw5 tۇDb#|x~^<߷onPS07?N^?zx^?ףp+O~ףpK/#O}Ssq0000)A3000)A3000)A3000)A3000).|>_^Fm0<4ގ-(hףpD?4J†xj5z !row4å]_0000\`Ч`Ч`Ч`Ч`Ч`Ч`ЧDG?с~z84}ѫ*L>|_pl XGAc}}ȑ#Tn[__plП O]Ǐб]SO?^7ph4K/Uտ뿾{="_W>|pV?of?u}{,Ey' (=؁z=>{w t??އDw{4{n޽of?u}Κi$a3d Z_"(hȌ!-$rhlAD*wĞ|{yӾ-Kᄎ=m-H$eee't:ׯ id2E"|ss`t<[.S'yϮz^coރnnn4MӴ#&)HEox2麞 N SDMglP(x"D3hDd2k|E0466~||dt.~YGO+t.www_^^677ۍNY %H$  ñΎ4:N!>uVRJ>co;<<(//鹺2:n \`ap8u a$\.7'+[P(h(h(h(h(h(h(h(h(h(h288$d=誑<ŁV]̈́ t_/ D9Q'xg!:GS{ >Âcq E'@礣 @S , @Wd ÕdЫ0Z*2 %`SH0&8gv0%Dp5y^0Ef]uX}YI aA#ckGKY᏷{ Y б-̱ieU*'xl-  nvoۏods T`+rt@sH90m?,ec`4p*¸]p.誁ha>p8nЍ1b˗I@|vΝ}2LA ?gϞرcSL!B-L8?$MMMͳg!?F`ccG:⛘< ֭ǏIAqI&N!pXЈ.fΜ~)2L]]]A E=KJJHAC:0`A#>}CH@˗$kkkA шstt4>cE{xxhbM+fGEE`#,hD/S ]xqРA222 4vݺutbg۶ms%Dx^zS;y!C455I,hD;ڵ֭-uuu{?> Bh6l`٤ n?^IIt‚FtԦMGEER]]}3g"lXЈ.\f͚_ ++K:aA#RVV^x HAYd `A#rss{QFF ?9sݻWBeEǿ3B4.W )+((hСƤhSNÆ ;p yyyQQQ .$,hDw-:uTAA Gsٵk",hDw[nwaSS^zB4BлwoMo޼ ?H! !͛7ٳ'33tuuuAAAY`A# ##6}jYoʔ)fffߌΝ`Ah… UUUnnnP4B3~x Ah*++k֭P4BٱcGXXسgHOOσɑBX}GZZȑ#fͪ"^||||||~WA( /Xo۷;;;B-X5a̘1\AMΜ9qFA( -XnݤpzWsGO;vHHH&Dl:u / 6ItKBBȑ#FҲ$GܔL2ôzPw4B?#++{ԩE|tRSSuΝ;B]X5C]]ѣnnnEEEډ'z{{%?P:v|x^^,dڵŋ\!~?~-[H:p85k={b#&!>޽˗7oN:L<BBB !SWW?Gϟ___O:`yn„ W&E`A#$,k߾}Ç/(( GP޽;iҤ={3t1 //޽{;;;\rYHHŋcbbTUUIgOx`ڵkG555_>{,`A#$p'Oڵ}qq18ckk;rM61Lq4BBn:''k׮»sM2%88ɉt𘛛FGG{{{ gϞvZ.]Hǡ,hJIIiC})8z捭#GIǡ Ł^^^֋-҅ 6mtݻB/@H]t FIٵ;gϞ}8lgc{uĔ}I,kڵ'DDD$$$pɒ%FfѢEF3f=х)%%eܹ'Ob0ebb"wG~2Cz %^oN:qǏ?>11&_{gCl3Ñ>>>Mh q{!֭kkk_|TWW_׀v{l} %45I1񸔥; ><))DFF?> ۙ8ŁX?#++l͚5ҍuҥK-^ڙѺ2‚Yl-~e992___XHG Gȸ a*A |`yB:pwhEO .=N:]rK. y& [8aCUloڴܡAʰaFvlg8rWЃPԃ,*S/5p#nd!L⛐UB? @E4!-]xtH0jcchxm2iG3zQ^dօ3qzHt]̈́ DpPW\+rvAA"!/",d@Ƕh| ׻`؛rXpt8Q?ۙÁ ٦m,)7U[<_αL^9٭v F孽tߑVTA/:r0 n=T @S , @Wd ɕdЫ4 LHlѾ}^](;Ǻʨ]*Ωa7p֜ؔG`2U%o01cZV N;l`LN ۤjv0?xZ5RR ;Y!@_#e#{<BRf'p8Uƚs$%3?'wO=eCL_ʝpuM:RZPЍԴ~~ѢEOe='HI•\oa!t.wBǶ0ǦOeU*"~`9p,`Q^M,.]:~D'udU%M+@MUdF= 9]]nLe,v&PS0?_Q;7rQ={/ +(<|')WЛ Q+rNRQ]۹Y$HH025-) ᩽cߌ'8k\qiCO-vBW ]5-ҝPX lޖA>F??n9Z5­Y^ڙVmI zTOaC<0ύZp.>_2h,ҝPl w3aZ^@Ɲ;w{1%[<^?"=zOaןM0~ Z(fP] WR*j#q}ZR߀*p*a<4qG / `AM/{9MB(ë4j?tf⅒?wzeqE2n=#'sPk.r'lV0sdu Ku; 櫌5HNLAp8p۞CTV:t͆ >g+޿ϗQ+r' +Bu-KF]Wa T0L:NxY ÷I{Hk~8j`0k4(̬UͰj/PRZږ[.D|ad ntUA{d;M M%xA< )oGKA9,T~ );p'57\gR tyujlQv $]+MVϗuT߼*Hzu9#>hqA_AIHIrADwP3ŋŧ}G/ =<7*ΩG%:8E("a;5pitD$;p'˒%K~xuOkWO+bwDFFgi~O DK.aS6 Nw9=bb3")w $j&Nxă{j*;l~5☘lgjَN:]zӁK0Z[^ٜ巢>XܺRGGQ`A#$2  IDATرc/_gמ7ezX+[(j7k5;猇vrV35]I̸v{*2㗿m M}IIIׯ_ݽwkYYSKiI)(K6ޫP% jJuа,.nx &f- e sss]֚:thɼa0'OkMֻs΋/x~{XXQpttyt~GgΜquuy^zOw iee켼O>ܹsժUC РCD$%%eQ`0BBBH!i޼yL&͛7͛7:>TRRrwwY0o|ǏQXXx=uuK">lmmGw/_7TUUUp8ܧO (- >4͎ѣY```NNS;k7n044dX˗/_w.^b2w!GL(++ٔ)S>}҂PPP?x𠬬 z!?\.\ҥKnn}<<<_\|ƍz:A!))YWW'Sryy9HLTVVw߿e|"644^QQϤS$--k󇯯7|[wUН;w~6N}xн{w)Att4=z4߷liiUXX-#~TA/UZԩND^aa-[o.رC@G'޽{ZZ6N}o߾%66=}ݻw dѰa?~%600ƩO[[;ʬˇDӶmlnn.Q:♠ `XojэVqq1H=y$..nh/^yB$P|Unݞ={&SmWJBWYY9wÇfn0̉';vL!MMM}*hhσ ͛7ϯ]vСC- `Ay1CzKZ/**JZZZOljc: 'rt $b^~k.ͫ/>>>{Y,hIII)) Ae_&ӧ7"L۷}E?'>}<|PCPVI@d...&&&DF7o^PPlA/11QCPVǎq/...77wƌ/==qFD-hKKK>R͗B-R\\rrI? >L g۷%6cIL)E =zlqn)j֚5klmmK:(((tڕR իڷoO:7ơ7t͜93$$t 7Anݺ%Q{S zҥKés[iVV^*$=3411:lΝ;Hg3gH@B(h2dH||/~n9ɓ'\FA><66VQFMtRFFuN=2fE(]s>͟-//oΩ|;*FA3fwԤiӦ-q)U;vL^^~ԩ4U"f0 sp:~zС-[•ߺuCH 4Lg3oJJJ=*%%E: WL)D+={fdd mD0`۷I@$544oڴIhѣG_pt Z^AСC/]$`ذaqqqS /^loo?p@AZ,&Ԃ:x0G6m|ׯ 2N8QRR2sLAZLZZZ]]tjAkkkgff sP*0`ݻwI@_g#Go!ԂYf߿_ȃgggG5W鬤dG&GW\!]Ѓ zQee% 'ɓ'oݺU. m۶%%%8Oa4L0nke1L|D!̝;Ғtٳ'N#@A9rn###I@BI: 4ƍSVRR0`?4A#F|2H]vM64"4,Zhl6Dt1==t$X/^XzcǘL&,gH!S***C 9}4Iqqq⭤=""BYYt~244ħA`;w'@ r |I&mڴcǎY۶m߾}K:+heeQFRRRgϞ 8ה)SD~nn+h7o޾}hu̙3ixZcǎ"3***H#qF[n޽#SxxxQQъ+H)))ZHQɂfdd!L4\1Jݸqbeo+4 ϔ;GG˗/p]lnݺǏKJJ"@,t :"\`dd4tАAd5*::tZ^^^GUPP EjjjdeeI# K,9y$}1c}~ &"p_|BDܲe̙3ihV6muK:QUUĉ7mԥKY\n(hӧeH+V[>gImm ,X`eeE:|YIIt :JAܹsKKKi5nv SL>|8,SYY8PA޽{:#AϏn7:3sO0ti*EfXG7oIg8eee''Çxb###///A]P@KKkCí>>>۷ouDŒ3>}xbA4E}n۶tݶm? ۶m #6mڐ@ST,h6lؒ%KžGQVVO;v|rΝQ__B666ңGVSS#Gf- ڵkCBBh۷oH)A70`@@@OHg6m̙3gժU8p EEE:::S  n:h$A}ƍw Yb̘1ᤃBCCڵkIgj ,h055=pիr[?~ӧɓ'wڕ>hcG4())EFF;t>cXg~ ,bjر;,M40 __Nn 4ixEIϟGN: uikkNA_]ЍJ: ueƍNNN8>}d'''Y('A$ ڵʚ6mZuu58Կ L2&<QFmܸƆt+..#hĤ@RRrӦMvvv_&FmccC:8xッIgzzzSЗt#77+WOf͒۶m 211!E4{NSSt _ҥKG[nMKKۻw/ *&&ٳ۷'Edl 'O5jTNN8!!!gYDѣGcbbzcL&t Zς`7zzz^ׯ 1vtlذ͛.\PPP Eîۂndjjohh8hРxq8;K gϮ8)??_WWt Z1mڴ>NZɓݻwBiUUUƍ֭ۆ p:Ŀiii>|}̘1|Dhh(vO8::zxx̞=tQUPPM] 7JKKmmm322Hi&ŋ+Vp8qףFZrQHgax8z4Ȭ^:88xžd۷OQQݽtxԩS#""G:h#hhWЍ:ut%+++Qeɒ% rpp ,9sfϟر#,"SM K]\\ Hݴi~w1Nx"Nv Wɢ#55޻w_ѵlll?n``@:5442H/pb"q `ii7a„ӧ{{{-,,׏NHH E***t/wXYY]reԩ"Zӿŋׯ_D:>|Μ98p4`AիjtQSS)((III&L6l,͛7XaA7[M\M7.ݻwÇtA9}ttݺu#E b`AUllW^ 4)00̙3/^l۶-,)77CStka .|-Dܲ3gNDD,|W77'Nʒ#plⰠ5|7o2ۤq}=6mc0666O>p@jj=<<^Jl۶mttmff&8MwرcwH: 4E0auڵN:9;;ٱX,ҡ'33s̙Ǐ9s&R?|0gΜ?L2e„ |ˡ'aAp>|xԩ[XX=_~RRRd#>>=zXlY3Ο?m6NP' o7#5k>>I#Ro8g ̙3k,\e E@EE͛7ccc ߯_.]=z积m̴UR24'VѣG Almmϝ;KRyuBB½{ӕcii٧OEEE {k<0Cט5gÂaɉ/))СU5o9[q-?31ئuudVEn){^-5x--=׮]i~NjGGǫWFCCCZZZcYhkkԴ5"z $%%[V6gPSP%zktĤRŏ۷o93j->}b99ݻ7z~~~5ΙTBNӽwB9f;#:*TW6[qL.</{Q+9sחtMUUU)))/^hhh044411166ܹs훼uP=Mnwvnm_׬r~+O^1pv/UooYDlܸlᤃ JMNNܼ9ׯ?~f:wldddddԹsg?^iT%@BϹwjZR~rQ;‰+|u`AرcǎW^|ƍ(**zg`'7d S^x32h`^Cmfto8:TUUy TnSղoD~{#RSS","F? @'4}EWk  Vqj1/FK:,h$ j6L֌f (==,h1&KETr43 =te5j ,hFi˛ydϰ?2`n5+''K)X?M8O?+Λ&|]ld s=[,bӳJ!{)x…Beee05~fݺugv׵@MqEG{|, h ‚F?lޞ-z*K^IkZ򒺄p]v]/) 5ͭGGqt:O<_yW7Xw^uuu"wޝt \5Dÿ'|ڰữXYY dԜ߽{t L:xJNNn|,,hDi۷o?qǏI7RRRFb=GYJbb"ߠ>,hDuZZZ۷owuumhh E|ܾ}ښt ,h$,--@:xD}XH4,X 33ٳ*%##C:j4۷o&D%$$ hDBxx{UU, O@ ,h$J:wpB///ADۃC:j41cƌݻw/ dʒDφ \@:H;E4=-z,$q $MMiӦד"bݻ'E4UcƌYl JBBBNNt,h$f͚URRE:ȸ{n~H@‚Fm޽{} !>>~СS naA#&##sȑ3f"p тDe9,VRRk@,h$Fѭ[;wBi7n4hXjխ[n߾M:u h (--1bDTTTvHg"KKDA:A#񡪪Z[[K: `;,h$VLMM/^L:\~}Ȑ!SFfԩUUU'O$ZL:j,h$N:Up87otЁt2XH IKK9rϤPBZZZnH@-S׮]w4" !Cc֭wM|(‚FlŊ>v $֖khhZLt`wEWWt2ݻK(|tak\bTZZt ,hDw>vؓ'OH8`']XC">>%%%ǏoQNKKkƍSNmhh o<رcG)PkaA#փ Z~= |C:j-,h={vA-*0Ç*w߿?)))Ԑ*111F"4BӵkW___///AZÇS >F;mڴ !Gݻwh˖-gΜIJJ"gϞutt$>+#Ľw988\pA߿|| &mvǎuʫW 4BM۷ʕ+IiӧO q-,,}9rӧuttHg۷oBB$O)V!n۷oԩ|޽{}v?XŋINddĉI@PL:ѣoP=}tX[[[A:?,hZLZZرcs)++#;B:,hxn:www6M0FyyyIIIΝ f@lnnqFƎK0(,hxzR"##I 'N";qA;;;ccc!.q4BVSS#>+..nδ0 2o߾4!c3\FNЏϝ$2.;<%$09`ȩ, ZT.SCЍ4jf$4H0*!uL&% tiQ tm톗$ԅY3"pSRomp@) JhP@R"4 old`]ze/{fmۮ=T7=vs6"ЀV"(zxEƆbyB6ٞ1 cltRIGpo `?t%/^,W֤p%g-+QUώov(H"Ѐ"kde|tA4AfYa|0>#Ѐ"w/7E;;]ov)ED-{c¡ǛZ3 _IPthT;&V)m @*\yŶ6n6 #i⪐a^i[Ks%Ofdn3:4r6⛄w?55=P'hP@R"4(E@)~ }9x\^: >RWcvmKo@) JhP@R"4(E@) JhP@R"4(E@) J}IENDB`