VGAM/0000755000176200001440000000000013565474753011027 5ustar liggesusersVGAM/NAMESPACE0000644000176200001440000005376413565414540012251 0ustar liggesusers# These functions are # Copyright (C) 1998-2019 T.W. Yee, University of Auckland. # All rights reserved. useDynLib(VGAM, .registration = TRUE) export(gatnbinomial.mix) export(moments.nbin.gait, moments.pois.gait) export(gatpoisson.mix) export(dgaitnbinom.mix, pgaitnbinom.mix, pgaitnbinom.mix, rgaitnbinom.mix) export(dgaitpois.mix, pgaitpois.mix, pgaitpois.mix, rgaitpois.mix) export(gatnbinomial.mlm, EIM.GATNB.speciald, GATNB.deriv012) export(gait.errorcheck) export(is.Numeric2) export(dgaitnbinom.mlm) export(pgaitnbinom.mlm) export(qgaitnbinom.mlm) export(rgaitnbinom.mlm) export(gatpoisson.mlm) export(dgaitpois.mlm) export(pgaitpois.mlm) export(qgaitpois.mlm) export(rgaitpois.mlm) export(dgaitbinom.mlm) export(pgaitbinom.mlm) export(qgaitbinom.mlm) export(rgaitbinom.mlm) export(dzipfmb, pzipfmb, qzipfmb, rzipfmb) exportMethods(rootogram4) export(rootogram4vglm) importFrom("stats", "xtabs", "na.omit") importFrom("grDevices", "n2mfrow") export( "rootogram0", "rootogram0.default") importFrom("graphics", "abline", "axis", "box", "hist", "lines", "par", "plot", "points", "polygon", "rect") S3method("rootogram0", "default") S3method("plot", "rootogram0") export(add1.vglm) export(drop1.vglm) export(fitmodel.VGAM.expression) export(assign2assign, findterms, subsetassign) importFrom("stats", "add1", "drop1", "factor.scope", "update.default") exportMethods(step4) export(step4vglm) importFrom("stats", "add.scope", "drop.scope", "extractAIC") S3method(add1, vglm, add1.vglm) S3method(drop1, vglm, drop1.vglm) S3method(extractAIC, vglm, extractAIC.vglm) export(extractAIC.vglm) exportMethods(dfterms) export(dftermsvglm) export(gabinomial.mlm) export(wz.merge) export(getarg) export(rainbow.sky) export(loglink, logneglink, logofflink, negidentitylink, logitlink) export(logloglink, clogloglink, reciprocallink, negloglink, probitlink) export(negreciprocallink, rhobitlink, fisherzlink, multilogitlink) export(foldsqrtlink, extlogitlink, logclink, cauchitlink) export(gordlink, pordlink, nbordlink, nbord2link) export(zeta.specials) export(stieltjes) export(loglogloglink) importFrom("graphics", "legend") export(hdeffsev, seglines) export(gipoisson.mlm) export(gtbinomial) export(bell, bellff, dbell, rbell) exportMethods(calibrate) export(fnumat2R) export(dtrinorm, rtrinorm, trinormal) export(car.relatives) export(attr.assign.x.vglm) importFrom("stats", "stat.anova") export(ordsup, ordsup.vglm) export(R2latvar) importFrom("stats", anova) export(dtrinorm, rtrinorm) export(mux5) export( wald.stat, wald.stat.vlm) export(score.stat, score.stat.vlm) export( lrt.stat, lrt.stat.vlm) export(mills.ratio, mills.ratio2) export(which.etas) export(which.xij) exportMethods(TIC) export(TIC) export(TICvlm) export(retain.col, d3theta.deta3) export(ghn100, ghw100) export(hdeff, hdeff.vglm) export(calibrate.rrvglm.control, calibrate.rrvglm) importFrom("utils", "tail") importFrom("stats", ".nknots.smspl") export(sm.os) export(label.cols.y) export(prob.munb.size.VGAM) export(negbinomial.initialize.yj) export(mroot2) export(psint) export(psintpvgam) export(startstoppvgam) export(summarypvgam, show.summary.pvgam) S3method(df.residual, pvgam, df.residual_pvgam) export(df.residual_pvgam) exportMethods(endf) export(endfpvgam) export(vcov.pvgam) S3method(vcov, pvgam, vcovpvgam) export(show.pvgam) importFrom("graphics", "polygon") export(model.matrixpvgam) S3method(model.matrix, pvgam, model.matrixpvgam) importFrom("stats", "ppoints") export(doazeta, poazeta, qoazeta, roazeta, oazeta) export(doapospois, poapospois, qoapospois, roapospois, oapospoisson) export(doalog, poalog, qoalog, roalog, oalog) export(ddiffzeta, pdiffzeta, qdiffzeta, rdiffzeta, diffzeta) export(dotzeta, potzeta, qotzeta, rotzeta, otzeta) export(dotpospois, potpospois, qotpospois, rotpospois, otpospoisson) export(dotlog, potlog, qotlog, rotlog, otlog) export(doilog, poilog, qoilog, roilog, oilog) export(doizipf, poizipf, qoizipf, roizipf, oizipf) export(gharmonic, gharmonic2) export(pzeta, qzeta, rzeta) export(qzipf) export(doizeta, poizeta, qoizeta, roizeta, oizeta) export(bisection.basic) export(Zeta.aux, deflat.limit.oizeta) export(topple, dtopple, ptopple, qtopple, rtopple) export(oiposbinomial, doiposbinom, poiposbinom, qoiposbinom, roiposbinom) export(doipospois, poipospois, qoipospois, roipospois, oipospoisson) export(deflat.limit.oipospois) export(zoabetaR) export(sm.ps, get.X.VLM.aug, psv2magic) export(checkwz) export(process.constraints) export(mux22, mux111) importFrom("splines", "splineDesign") export(AR1EIM) export(AR1.gammas) importFrom("stats", "cov") export(as.char.expression) export(predictvglmS4VGAM) export(EIM.posNB.speciald, EIM.NB.speciald, EIM.posNB.specialp, EIM.NB.specialp) export(.min.criterion.VGAM) export(pzoibetabinom, pzoibetabinom.ab, rzoibetabinom, rzoibetabinom.ab, dzoibetabinom, dzoibetabinom.ab, Init.mu) export(log1mexp) export(dzoabeta, pzoabeta, qzoabeta, rzoabeta) export(logitoffsetlink) export(showvglmS4VGAM) export(showvgamS4VGAM) export(subsetarray3) export(tapplymat1) export(findFirstMethod) export(summaryvglmS4VGAM) export(showsummaryvglmS4VGAM) S3method(vcov, vlm, vcovvlm) S3method(coef, vlm, coefvlm) S3method(df.residual, vlm, df.residual_vlm) S3method(model.matrix, vlm, model.matrixvlm) S3method(formula, vlm, formulavlm) export(vcov.vlm, coef.vlm, formula.vlm, model.matrix.vlm) export(has.interceptvlm) exportMethods(has.intercept) export(term.namesvlm) exportMethods(term.names) export(responseNamevlm) exportMethods(responseName) importFrom("grDevices", "chull") importFrom("graphics", "abline", "arrows", "axis", "lines", "matlines", "matplot", "matpoints", "mtext", "par", "points", "rug", "segments", "text") importFrom("methods", "as", "is", "new", "slot", "slot<-", "slotNames", "callNextMethod", "existsMethod", "signature", "show") importFrom("stats", ".getXlevels", "as.formula", "contrasts<-", "dbeta", "dbinom", "delete.response", "deriv3", "dgamma", "dgeom", "dnbinom", "dt", "dweibull", "getCall", "integrate", "is.empty.model", "lm.fit", "median", "model.offset", "model.response", "model.weights", "na.fail", "napredict", "optim", "pbeta", "pbinom", "pgamma", "pgeom", "pnbinom", "polym", "printCoefmat", "plogis", "qlogis", "pweibull", "qbeta", "qbinom", "qchisq", "qf", "qgamma", "qgeom", "qnbinom", "qt", "quantile", "qweibull", "rbeta", "rbinom", "rgamma", "rgeom", "rlnorm", "rlogis", "rnbinom", "runif", "rweibull", "sd", "spline", "terms.formula", "time", "uniroot", "update.formula", "var", "weighted.mean") importFrom("utils", "flush.console", "getS3method", "head") importFrom("stats4", profile) # For S4, not S3 export(profilevglm) # For S4, not S3 importFrom("stats", "approx") export(vplot.profile) export(vpairs.profile) importFrom("grDevices", "dev.flush", "dev.hold") importFrom("graphics", "frame") importFrom("stats4", confint) # For S4, not S3 export(confintvglm) # For S4, not S3 export(confintrrvglm) # For S4, not S3 export(confintvgam) # For S4, not S3 exportMethods(confint) # For S4, not S3 export(dgenpois) export(AR1) export(dAR1) export(param.names) export(is.buggy.vlm) exportMethods(is.buggy) importFrom("splines", splineDesign, bs, ns) export(nparam, nparam.vlm, nparam.vgam, nparam.rrvglm, nparam.qrrvglm, nparam.rrvgam) export(linkfun, linkfun.vglm) export(sm.bs, sm.ns, sm.scale.default, sm.poly, sm.scale) exportMethods(coefficients, coef) importFrom("stats", coefficients, coef) export(case.names, coef, coefficients, df.residual, fitted, fitted.values, formula, residuals, variable.names, weights) export(expected.betabin.ab, grid.search, grid.search2, grid.search3, grid.search4) exportMethods(QR.Q, QR.R) export(QR.Q, QR.R) export(Select, subsetcol) export(simulate.vlm) importFrom("stats", simulate) export(familyname.vlm) export(familyname.vglmff) exportMethods(familyname) export(logLik.qrrvglm) importFrom("stats4", BIC) exportMethods(BIC) export(BICvlm) export(check.omit.constant) export(I.col) export(dbiclaytoncop, rbiclaytoncop, biclaytoncop) export(bistudentt, dbistudentt) export(dbinormcop, pbinormcop, rbinormcop, binormalcop) export(kendall.tau) export(expint, expexpint, expint.E1) export(pgamma.deriv, pgamma.deriv.unscaled, truncweibull) export(binom2.rho.ss) export(arwz2wz) export(link2list) export(multilogit) export(perks, dperks, pperks, qperks, rperks) export(gumbelII, dgumbelII, pgumbelII, qgumbelII, rgumbelII) export(makeham, dmakeham, pmakeham, qmakeham, rmakeham) export(gompertz, dgompertz, pgompertz, qgompertz, rgompertz) export(lindley, dlind, plind, rlind) export(w.wz.merge, w.y.check, vweighted.mean.default) export(is.parallel.matrix, is.parallel.vglm, is.zero.matrix, is.zero.vglm) exportMethods(is.parallel, is.zero) export(nvar_vlm) importFrom("stats4", nobs) exportMethods(nobs) importFrom("stats4", AIC, coef, summary, plot, logLik, vcov) exportMethods(AIC, AICc, coef, summary, plot, logLik, vcov) export(npred, npred.vlm) exportMethods(npred) export(hatvalues, hatvaluesvlm) exportMethods(hatvalues) importFrom("stats", hatvalues) importFrom("stats", dfbeta) # Added 20140509 export(dfbeta, dfbetavlm) exportMethods(dfbeta) export(hatplot, hatplot.vlm) exportMethods(hatplot) export(VGAMenv) export(lrtest, lrtest_vglm) export(update_default, update_formula) export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.rrvgam, nvar.rcim) export( nobs.vlm) export(plota21) export(Confint.rrnb, Confint.nb1) export(vcovrrvglm) export(posbernoulli.b, posbernoulli.t, posbernoulli.tb, aux.posbernoulli.t) export(N.hat.posbernoulli) export(dposbern, rposbern) export(is.empty.list) export( Build.terms.vlm, interleave.VGAM, interleave.cmat, procVec, eijfun, ResSS.vgam, valt.control, trivial.constraints, vcontrol.expression, vplot, vplot.default, vplot.factor, vplot.list, vplot.matrix, vplot.numeric, vvplot.factor) export( m2a,a2m,vforsub, vbacksub, vchol) export( case.namesvlm, variable.namesvlm ) export(expgeometric, dexpgeom, pexpgeom, qexpgeom, rexpgeom, genrayleigh, dgenray, pgenray, qgenray, rgenray, exppoisson, dexppois, pexppois, qexppois, rexppois, explogff, dexplog, pexplog, qexplog, rexplog) export(Rcim, plotrcim0, rcim, summaryrcim) export(moffset) export(plotqvar, qvplot, Qvar, qvar) export(depvar, depvar.vlm) export(put.caption) export( cm.VGAM, cm.nointercept.VGAM, cm.zero.VGAM, Deviance.categorical.data.vgam, lm2qrrvlm.model.matrix, vlabel, dimm) export(is.smart, smart.mode.is, wrapup.smart, setup.smart, sm.min1, sm.min2) export( smart.expression, get.smart, get.smart.prediction, put.smart) export(dbinorm, pbinorm, rbinorm, binormal) export(pnorm2, dnorm2) export(iam, fill, fill1, fill2, fill3, biamhcop, dbiamhcop, pbiamhcop, rbiamhcop, bigamma.mckay, freund61, frechet, dfrechet, pfrechet, qfrechet, rfrechet, bifrankcop, dbifrankcop, pbifrankcop, rbifrankcop, biplackettcop, dbiplackcop, pbiplackcop, rbiplackcop, benini1, dbenini, pbenini, qbenini, rbenini, maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell, bifgmexp, bifgmcop, dbifgmcop, pbifgmcop, rbifgmcop, bigumbelIexp, erf, erfc, lerch, lambertW, log1pexp, truncpareto, dtruncpareto, qtruncpareto, rtruncpareto, ptruncpareto, paretoff, dpareto, qpareto, rpareto, ppareto, paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV, paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII, paretoII, dparetoII, qparetoII, rparetoII, pparetoII, dparetoI, qparetoI, rparetoI, pparetoI, cens.gumbel, gumbelff, gumbel, dgumbel, pgumbel, qgumbel, rgumbel, foldnormal, dfoldnorm, pfoldnorm, qfoldnorm, rfoldnorm, cennormal, cens.normal, double.cens.normal, rec.normal, rec.normal.control, rec.exp1, rec.exp1.control, cens.rayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh, drice, price, qrice, rrice, riceff, marcumQ, dskellam, rskellam, skellam, inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, waldff, expexpff1, expexpff) export( AICvlm, AICvgam, AICrrvglm, AICqrrvglm, # AICvglm, anova.vgam, anova.vglm, bisa, dbisa, pbisa, qbisa, rbisa, betabinomialff, betabinomial, double.expbinomial, dbetabinom, pbetabinom, rbetabinom, dbetabinom.ab, pbetabinom.ab, rbetabinom.ab, biplot.qrrvglm, dbort, rbort, borel.tanner, care.exp, care.exp2, cauchy, cauchy1, concoef.rrvgam, concoef.Coef.rrvgam, concoef.Coef.qrrvglm, concoef.qrrvglm, cdf, cdf.lms.bcg, cdf.lms.bcn, cdf.lms.yjn, cdf.vglm, Coef.rrvgam, Coefficients, coefqrrvglm, coefvlm, coefvgam, coefvsmooth.spline, coefvsmooth.spline.fit, constraints, constraints.vlm, deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn, deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm, deviance.vlm, deviance.qrrvglm, df.residual_vlm, dirmultinomial, dirmul.old, dtheta.deta, d2theta.deta2) S3method(anova, vgam) S3method(anova, vglm) S3method(as.character, SurvS4) S3method(biplot, qrrvglm) S3method(biplot, rrvglm) S3method(deviance, qrrvglm) S3method(deviance, vlm) S3method(logLik, qrrvglm) S3method(logLik, vlm) S3method(model.matrix, qrrvglm, model.matrixqrrvglm) S3method(nobs, vlm) S3method(persp, rrvgam) S3method(plot, rrvgam) S3method(plot, vgam) S3method(predict, rrvgam) S3method(predict, rrvglm) S3method(predict, vgam) S3method(predict, vlm) S3method(simulate, vlm) S3method(sm.scale, default) S3method(summary, grc) S3method(summary, qrrvglm) S3method(summary, rrvgam) S3method(summary, rrvglm) S3method(terms, vlm) export(cloglog,cauchit,extlogit,explink,fisherz,logc,loge,logneg,logit, logoff,negreciprocal, probit,reciprocal,rhobit, golf,polf,nbolf,nbolf2,Cut) export(ordpoisson) export(poisson.points, dpois.points) export( erlang, dfelix, felix, fittedvlm, fittedvsmooth.spline, foldsqrt, formulavlm, formulaNA.VGAM, garma, gaussianff, hypersecant, hypersecant01, hyperg, inv.binomial, InverseBrat, inverse.gaussianff, is.Numeric, mccullagh89, leipnik, dlevy, plevy, qlevy, rlevy, levy, lms.bcg.control, lms.bcn.control, lmscreg.control, lms.yjn.control, lms.bcg, lms.bcn, lms.yjn, lms.yjn2, dlms.bcn, qlms.bcn, lqnorm, dbilogis, pbilogis, rbilogis, bilogistic, logistic1, logistic, logLik.vlm, latvar.rrvgam, latvar.Coef.qrrvglm, latvar.rrvglm, latvar.qrrvglm, lvplot.rrvgam, Rank, Rank.rrvglm, Rank.qrrvglm, Rank.rrvgam, Max.Coef.qrrvglm, Max.qrrvglm, is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.rrvgam, is.bell, model.matrixqrrvglm, model.matrixvlm, model.framevlm, nakagami, dnaka, pnaka, qnaka, rnaka, namesof, nlminbcontrol, negloge, Opt.Coef.qrrvglm, Opt.qrrvglm, persp.rrvgam) export( micmen ) export( plot.rrvgam, plotpreplotvgam, plotvglm, plotvlm, plotvsmooth.spline, powerlink, predict.rrvgam, predictrrvgam, predictors, predictors.vglm, predictqrrvglm, predict.rrvglm, predict.vgam, predictvglm, predict.vlm, predictvsmooth.spline, predictvsmooth.spline.fit, show.Coef.rrvgam, show.Coef.qrrvglm, show.Coef.rrvglm, show.rrvglm, show.summary.rrvgam, show.summary.qrrvglm, show.summary.rrvglm, show.summary.vgam, show.summary.vglm, show.summary.vlm, show.vanova, show.vgam, show.vglm, show.vlm, show.vglmff, show.vsmooth.spline, process.binomial2.data.VGAM, process.categorical.data.VGAM, negzero.expression.VGAM, qtplot, qtplot.default, qtplot.gumbel, qtplot.gumbelff, qtplot.lms.bcg, qtplot.lms.bcn, qtplot.lms.yjn, qtplot.lms.yjn2, qtplot.vextremes, qtplot.vglm, explot.lms.bcn, rlplot, rlplot.gevff, rlplot.gev, rlplot.vextremes, rlplot.vglm, rlplot, rlplot.vglm, rrar.control) export( SurvS4, is.SurvS4, as.character.SurvS4, show.SurvS4, simple.exponential, better.exponential, simple.poisson, seq2binomial, size.binomial, sm.scale1, sm.scale2, summary.rrvgam, summary.grc, summary.qrrvglm, summary.rrvglm, summaryvgam, summaryvglm, summaryvlm, s.vam, terms.vlm, termsvlm, Tol.Coef.qrrvglm, Tol.qrrvglm, triangle, dtriangle, ptriangle, qtriangle, rtriangle, valid.vknotl2, vcovvlm, vglm.fit, vgam.fit, vglm.garma.control, vglm.multinomial.control, vglm.multinomial.deviance.control, vglm.VGAMcategorical.control, vlm, vlm.control, vnonlinear.control, wweights, yeo.johnson, dzipf, pzipf, rzipf, zipf, zeta, zetaff, dzeta) export(lm2vlm.model.matrix) export(vlm2lm.model.matrix) importFrom("stats", model.matrix) importFrom("stats", model.frame) importFrom("stats", terms) importFrom("stats", resid) importFrom("stats", residuals) importFrom("stats", fitted) importFrom("stats", predict) importFrom("stats", df.residual) importFrom("stats", deviance) importFrom("stats", fitted.values) importFrom("stats", effects) importFrom("stats", weights) importFrom("stats", formula) importFrom("stats", case.names) importFrom("stats", variable.names) importFrom("stats", dchisq, pchisq, pf, dexp, rexp, dpois, ppois, qpois, rpois, dnorm, pnorm, qnorm, rnorm) importFrom("graphics", persp) export(ddagum, rdagum, qdagum, pdagum, dagum) export(dfisk, pfisk, qfisk, rfisk, fisk) export(dlomax, plomax, qlomax, rlomax, lomax) export(dinv.lomax, pinv.lomax, qinv.lomax, rinv.lomax, inv.lomax) export(dparalogistic, pparalogistic, qparalogistic, rparalogistic, paralogistic) export(dinv.paralogistic, pinv.paralogistic, qinv.paralogistic, rinv.paralogistic, inv.paralogistic) export(dsinmad, psinmad, qsinmad, rsinmad, sinmad) export(lognormal) export(dpolono, ppolono, rpolono) export(dgpd, pgpd, qgpd, rgpd, gpd) export(dgev, pgev, qgev, rgev, gev, gevff) export(dlaplace, plaplace, qlaplace, rlaplace, laplace) export(dalap, palap, qalap, ralap, alaplace1.control, alaplace2.control, alaplace3.control, alaplace1, alaplace2, alaplace3) export(dloglap, ploglap, qloglap, rloglap) export(loglaplace1.control, loglaplace1) export(dlogitlap, plogitlap, qlogitlap, rlogitlap, logitlaplace1.control, logitlaplace1) export(dprobitlap, pprobitlap, qprobitlap, rprobitlap) export(dclogloglap, pclogloglap, qclogloglap, rclogloglap) export(dcard, pcard, qcard, rcard, cardioid) export(fff, fff.control, mbesselI0, vonmises) export( AA.Aa.aa, AB.Ab.aB.ab, ABO, acat, betaR, betaff, dbetageom, pbetageom, rbetageom, betageometric, dbetanorm, pbetanorm, qbetanorm, rbetanorm, # betanorm, betaprime, betaII, zipebcom, binom2.or, dbinom2.or, rbinom2.or, binom2.rho, dbinom2.rho, rbinom2.rho, binom2.Rho, binomialff, biplot.rrvglm, brat, bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm, cao.control, cao, cdf.lmscreg, cgo, chisq, clo, concoef, Coef, Coef.qrrvglm, Coef.rrvglm, Coef.vlm, predictqrrvglm, cratio, cumulative, propodds, prplot, prplot.control) export( deplot.lmscreg, dirichlet, exponential, A1A2A3) export( lgamma1, lgamma3) export( gammahyperbola, gengamma.stacy, gamma1, gamma2, gammaR, gammaff) export(dlgamma, plgamma, qlgamma, rlgamma) export(dgengamma.stacy, pgengamma.stacy, qgengamma.stacy, rgengamma.stacy) export( dbenf, pbenf, qbenf, rbenf, genbetaII.Loglikfun4, genbetaII, dgenbetaII, genpoisson, geometric, truncgeometric, dlino, plino, qlino, rlino, lino, grc, dhzeta, phzeta, qhzeta, rhzeta, hzeta, negidentity, identitylink, dprentice74, prentice74, amlnormal, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2, dkumar, pkumar, qkumar, rkumar, kumar, dyules, pyules, qyules, ryules, yulesimon, logff, dlog, plog, qlog, rlog, logF, dlogF, loglinb2, loglinb3, loglog, lvplot.qrrvglm, lvplot.rrvglm, Max, MNSs, dmultinomial, multinomial, margeffS4VGAM, cratio.derivs, margeff) export( huber2, huber1, dhuber, edhuber, phuber, qhuber, rhuber) export( slash, dslash, pslash, rslash) export( deunif, peunif, qeunif, reunif, denorm, penorm, qenorm, renorm, sc.studentt2, dsc.t2, psc.t2, qsc.t2, rsc.t2, deexp, peexp, qeexp, reexp) export( meplot, meplot.default, meplot.vlm, guplot, guplot.default, guplot.vlm, posNBD.Loglikfun2, NBD.Loglikfun2, negbinomial, negbinomial.size, polya, polyaR, uninormal, SURff, normal.vcm, nbcanlink, tobit, dtobit, ptobit, qtobit, rtobit, Opt, perspqrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg, plotvgam.control, plotvgam, plot.vgam, cens.poisson, poissonff, dposbinom, pposbinom, qposbinom, rposbinom, posbinomial, dposgeom, pposgeom, qposgeom, rposgeom, # posgeometric, dposnegbin, pposnegbin, qposnegbin, rposnegbin, posnegbinomial, dposnorm, pposnorm, qposnorm, rposnorm, posnormal, dpospois, ppospois, qpospois, rpospois, pospoisson, qtplot.lmscreg, rdiric, rigff, rrar, rrvglm.control, rrvglm.optim.control) export(eta2theta, theta2eta, rrvglm, simplex, dsimplex, rsimplex, sratio, s, studentt, studentt2, studentt3, Kayfun.studentt, Tol, trplot.qrrvglm, trplot, rcqo, cqo, qrrvglm.control, vgam.control, vgam, vglm.control, vglm, vsmooth.spline, weibull.mean, weibullR, yip88, dzabinom, pzabinom, qzabinom, rzabinom, zabinomial, zabinomialff, dzageom, pzageom, qzageom, rzageom, zageometric, zageometricff, dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial, zanegbinomialff, dzapois, pzapois, qzapois, rzapois, zapoisson, zapoissonff, dzibinom, pzibinom, qzibinom, rzibinom, zibinomial, zibinomialff, dzigeom, pzigeom, qzigeom, rzigeom, zigeometric, zigeometricff, dzinegbin, pzinegbin, qzinegbin, rzinegbin, zinegbinomial, zinegbinomialff, dzipois, pzipois, qzipois, rzipois, zipoisson, zipoissonff, mix2exp, mix2normal, mix2poisson, mix2exp.control, mix2normal.control, mix2poisson.control, skewnormal, dskewnorm, rskewnorm, tikuv, dtikuv, ptikuv, qtikuv, rtikuv) exportClasses(vglmff, vlm, vglm, vgam, rrvglm, qrrvglm, grc, rcim, vlmsmall, rrvgam, summary.vgam, summary.vglm, summary.vlm, summary.qrrvglm, summary.rrvgam, summary.rrvglm, Coef.rrvglm, Coef.qrrvglm, Coef.rrvgam, vcov.qrrvglm, vsmooth.spline.fit, vsmooth.spline) exportClasses(SurvS4) exportMethods( Coef, coefficients, constraints, effects, predict, fitted, fitted.values, resid, residuals, show, terms, model.frame, model.matrix, summary, coef, AIC, AICc, plot, logLik, vcov, deviance, cdf, df.residual, lv, latvar, Max, Opt, Tol, biplot, deplot, lvplot, qtplot, rlplot, meplot, trplot, vplot, formula, case.names, variable.names, weights, persp) exportMethods(AIC, AICc, coef, summary, plot, logLik, vcov) VGAM/demo/0000755000176200001440000000000013565414530011736 5ustar liggesusersVGAM/demo/distributions.R0000755000176200001440000000257213565414530014774 0ustar liggesusers# Demo for the maximum likelihood estimation of parameters from # some selected distributions # At the moment this is copied from some .Rd file ## Negative binomial distribution ## Data from Bliss and Fisher (1953). appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1)) fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree, weights = w, crit = "coef", half.step = FALSE) summary(fit) coef(fit, matrix = TRUE) Coef(fit) deviance(fit) # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above ## Beta distribution set.seed(123) bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1))) fit1 <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) # Useful for intercept-only models # General A and B, and with a covariate bdata <- transform(bdata, x2 = runif(nn)) bdata <- transform(bdata, mu = logit(0.5 - x2, inverse = TRUE), prec = exp(3.0 + x2)) # prec == phi bdata <- transform(bdata, shape2 = prec * (1 - mu), shape1 = mu * prec) bdata <- transform(bdata, y = rbeta(nn, shape1 = shape1, shape2 = shape2)) bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1 fit2 <- vglm(Y ~ x2, data = bdata, trace = TRUE, betaff(A = 5, B = 13, lmu = elogit(min = 5, max = 13))) coef(fit2, matrix = TRUE) VGAM/demo/zipoisson.R0000755000176200001440000000217413565414530014125 0ustar liggesusers# Demo for Zero-Inflated Poisson set.seed(111) zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, pstr01 = logit(-0.5 + 1*x2, inverse = TRUE), pstr02 = logit( 0.5 - 1*x2, inverse = TRUE), Ps01 = logit(-0.5 , inverse = TRUE), Ps02 = logit( 0.5 , inverse = TRUE), lambda1 = loge(-0.5 + 2*x2, inverse = TRUE), lambda2 = loge( 0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01), y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02)) with(zdata, table(y1)) # Eyeball the data with(zdata, table(y2)) with(zdata, stem(y2)) fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef") fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef") coef(fit1, matrix = TRUE) # These should agree with the above values coef(fit2, matrix = TRUE) # These should agree with the above values head(fit1@misc$pobs0) # The estimate of P(Y=0) coef(fit1) coef(fit1, matrix=TRUE) Coef(fit1) VGAM/demo/vgam.R0000755000176200001440000000070513565414530013020 0ustar liggesusers# Demo for vgam data(hunua, package = "VGAM") fit.h <- vgam(agaaus ~ s(altitude), binomialff, data = hunua) plot(fit.h, se = TRUE, lcol = "blue", scol = "orange", llwd = 2, slwd = 2, las = 1) nn <- nrow(hunua) ooo <- with(hunua, order(altitude)) with(hunua, plot(altitude[ooo], fitted(fit.h)[ooo], type = "l", ylim = 0:1, lwd = 2, col = "blue", las = 1)) points(agaaus + (runif(nn)-0.5)/30 ~ altitude, hunua, col = "orange") VGAM/demo/cqo.R0000755000176200001440000000720113565414530012646 0ustar liggesusers# Demo for constrained quadratic ordination (CQO; aka # canonical Gaussian ordination) data(hspider, package = "VGAM") hspider[, 1:6] <- scale(hspider[, 1:6]) # standardize environmental vars ## Rank-1 model (unequal tolerances, deviance = 1176.0) set.seed(123) p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, quasipoissonff, data = hspider, Bestof = 10, Crow1positive = FALSE, eq.tolerances = FALSE, I.tolerances = FALSE) par(mfrow = c(3, 3)) lvplot(p1, lcol = 1:12, llwd = 2, llty = 1:12, y = TRUE, pch = 1:12, pcol = 1:12, las = 1, main = "Hunting spider data") print(cancoef(p1), digits = 3) print(Coef(p1), digits = 3) # trajectory plot trplot(p1, which = 1:3, log = "xy", type = "b", lty = 1, col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "), lwd = 2, lty = 1, col = c("blue", "orange", "green")) abline(a = 0, b = 1, lty = "dashed") ## Rank-2 model (equal tolerances, deviance = 856.5) set.seed(111) r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, quasipoissonff, data = hspider, Rank = 2, Bestof = 10, I.tolerances = TRUE, eq.tolerances = TRUE, Crow1positive = c(FALSE, FALSE)) print(ccoef(r2), digits = 3) print(Coef(r2), digits = 3) clr <- (1:(10+1))[-7] # Omit yellow colour adj <- c(-0.1, -0.1, -0.1, 1.1, 1.1, 1.1, -0.1, -0.1, -0.1, 1.1) # With C arrows lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = FALSE, C = TRUE, Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj, las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE) # With circular contours lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = TRUE, C = FALSE, Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj, las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE) # With neither C arrows or circular contours lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = FALSE, C = FALSE, Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj, las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE) # Perspective plot persp(r2, xlim = c(-5, 5), ylim = c(-3, 6), theta = 50, phi = 20) ## Gaussian logit regression ## Not recommended actually because the number of sites is far too low. ## Deviance = 154.6, equal tolerances. ybin <- with(hspider, 0 + (cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) > 0)) # Matrix of 0s and 1s colnames(ybin) <- paste0(colnames(ybin), ".01") hspider <- data.frame(hspider, ybin) set.seed(1312) b1 <- cqo(ybin[, -c(1, 5)] ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, quasibinomialff(mv = TRUE), Bestof = 4, I.tolerances = TRUE, data = hspider, eq.tolerances = TRUE, Crow1positive = FALSE) lvplot(b1, type = "predictors", llwd = 2, las = 1, ylab = "logit mu", ylim = c(-20, 11), lcol = 1:10) c1 <- Coef(b1) cts <- c("Trocterr", "Pardmont", "Alopfabr", "Arctlute") text(c1@Optimum[1, cts], logit(c1@Maximum[cts])+1.0, cts) round(t(Coef(b1, I.tolerances = FALSE)@C), dig = 3) # On the probability scale lvplot(b1, type = "fitted", llwd = 2, las = 1, llty = 1, ylab = "Probability of presence", ylim = c(0, 1), lcol = 1:10) VGAM/demo/lmsqreg.R0000755000176200001440000000170613565414530013542 0ustar liggesusers# Demo for lmsqreg # At the moment this is copied from lms.bcn.Rd data(bmi.nz, package = "VGAM") fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = bmi.nz, trace = TRUE) head(predict(fit), 3) head(fitted(fit), 3) head(bmi.nz, 3) # Person 1 is near the lower quartile of BMI amongst people his age head(cdf(fit), 3) # Quantile plot par(bty = "l", mar = c(5, 4, 4, 3) + 0.1, xpd = TRUE, mfrow = c(1, 2)) qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles", xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4) # Density plot ygrid <- seq(15, 43, len = 100) # BMI ranges par(lwd = 2) aa <- deplot(fit, x0 = 20, y = ygrid, main = "Density functions at Age = 20, 42 and 55", xlab = "BMI") aa aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, lty = 2, col = "orange") aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, lty = 4, col = 4, Attach = TRUE) aa@post$deplot # Contains density function values VGAM/demo/00Index0000755000176200001440000000041413565414530013072 0ustar liggesusersbinom2.or Bivariate logistic model cqo Constrained auadratic ordination distributions Maximum likelihood estimation of some distributions lmsqreg LMS quantile regression vgam Vector generalized additive models zipoisson Zero inflated Poisson VGAM/demo/binom2.or.R0000755000176200001440000000207013565414530013670 0ustar liggesusers# Demo for binom2.or data(hunua, package = "VGAM") Hunua <- hunua Hunua <- transform(Hunua, y00 = (1-agaaus) * (1-kniexc), y01 = (1-agaaus) * kniexc, y10 = agaaus * (1-kniexc), y11 = agaaus * kniexc) fit <- vgam(cbind(y00, y01, y10, y11) ~ s(altitude, df = c(4, 4, 2.5)), binom2.or(zero = NULL), data = Hunua) par(mfrow = c(2, 3)) plot(fit, se = TRUE, scol = "darkgreen", lcol = "blue") summary(fit) # Plot the marginal functions together mycols <- c("blue", "orange") plot(fit, which.cf = 1:2, lcol = mycols, scol = mycols, overlay = TRUE, se = TRUE, llwd = 2, slwd = 2) legend(x = 100, y = -4, leg = c("Agathis australis", "Knightia excelsa"), col = mycols, lty = 1) # Plot the odds ratio ooo <- order(fit@x[, 2]) plot(fit@x[ooo, 2], exp(predict(fit)[ooo, "log(oratio)"]), log = "y", xlab = "Altitude (m)", ylab = "Odds ratio (log scale)", col = "blue", type = "b", las = 1) abline(h = 1, lty = 2) # Denotes independence between species VGAM/LICENCE.note0000755000176200001440000000421113565414530012744 0ustar liggesusersSoftware and datasets to support 'Vector Generalized Linear and Additive Models: With an Implementation in R', first edition, by T. W. Yee. Springer, 2015. This file is intended to clarify ownership and copyright: where possible individual files also carry brief copyright notices. This file was adapted from the file of the same name from the MASS bundle. Copyrights ========== Some slightly-modified FORTRAN subroutines from http://pages.cs.wisc.edu/~deboor/pgs/ are used for the B-spline computations. Some modified LINPACK subroutines appear in the files ./src/vlinpack?.f Portions of the smoothing code called by vsmooth.spline() is based on an adaptation of F. O'Sullivan's BART code. Regarding file ./src/lerchphi.c, this program is copyright by Sergej V. Aksenov (http://www.geocities.com/saksenov) and Ulrich D. Jentschura (jentschura@physik.tu-dresden.de), 2002. Version 1.00 (May 1, 2002) R function pgamma.deriv() operates by a wrapper function to a Fortran subroutine written by R. J. Moore. The subroutine was modified to run using double precision. The original code came from http://lib.stat.cmu.edu/apstat/187. R functions expint(x), expexpint(x), expint.E1(x) operate by wrapper functions to code downloaded from http://www.netlib.org/specfun/ei My understanding is that the dataset files VGAM/data/* and VGAMdata/data/* are not copyright. All other files are copyright (C) 1998-2015 T. W. Yee. Licence ======= This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 or 3 of the License (at your option). This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Files share/licenses/GPL-2 and share/licenses/GPL-3 in the R (source or binary) distribution are copies of versions 2 and 3 of the 'GNU General Public License'. These can also be viewed at https://www.r-project.org/Licenses/ t.yee@auckland.ac.nz VGAM/ChangeLog0000755000176200001440000000074513565414530012575 0ustar liggesusers2015-10-26 Thomas Yee * R/links.q (all link functions): big changes, when deriv >= 1 wrt 'inverse' argument. For example, logit(p, deriv = 1, inverse = TRUE) is now logit(p, deriv = 1, inverse = FALSE). Models fitted under <= VGAM 0.9-9 and saved might not work under >= VGAM 1.0-0. 2015-10-26 Thomas Yee * R/family.normal.R (tobit): tobit()@weight implements Fisher scoring entirely. VGAM/data/0000755000176200001440000000000013565414647011734 5ustar liggesusersVGAM/data/marital.nz.rda0000644000176200001440000002427013565414647014510 0ustar liggesusers7zXZi"6!X(y])TW"nRʟxq5(Б\;>p›Tg.6UvQ-Bd$3Fl4R;\iq|ؗdk RT6s96jS#?EAFNQvOz> χ24|%p{~c\YV|jGRJ LY|Ec.Nۯ)eXgn6> ?Z (Z+MkT0j7-+P W BL̥RL:XW9juKdޛJlᇾ@q9Y`VF,4Qq*PuGE!ݕ2ը&{r/8&Q¾h3M33ܚHe+50`SJDbx&ah{Cb|ߒgw7}qZ8 D?ޑ u $NSqfeCјGp%HO|`ic7_5ρF v;iU s6lt6_tkeJݓ^;p2]8Wl.1<`Bl7}059Ĭ̵Ds`488ݚW ː\B?EL$sgTDz]!iJKEP#o )]*#{Wm5byy6 Q41;!^QHgR2!ژARt58O S~W4{t޹7_r5pQuIػq3z5̢TUU3}E]WU㿤dMxE2CzF g U"){`T5Ӭ-=^ ~b v<%|ޏzbl8-juw/4V`ztmRSDC c-:L`s3y<'1.n=N]EQ֛" . 4r:>S_RmKˬ.EU4o a={B 8;3vqh@lC:C9Vy&rP#]ߥ+"IDG#-wP+-Rq$4zqq4$scZQl)%o *A%Ryc!p0OZOAQ3 #D_ cwQVX}[ޑ4Ʊ\/-85 ȵՓJcIw.rHL͛ɉ)ϊ#bR@\RP7i J9p?`d-/a,y6 }-=1b#uC.${v } ?o{.s굙c-,pWtel|5P$ ۆ|\ OC8­+ 56CKYL8[d`܅STc,Kح1A&{U\wp Ist >K܅_u$KHJoqs[-*st§ @}if$B$ ױF(vN&qSٞ<'UFw|pf#QJŕ:bPuf@/ ƒyL@C)^Z]#iQ9l#VyČKtؚt{rKIY6yWcR*bclS.%W_1߻,w>.Ep\34-̋!AL.1eD!|z6/v*7 lLqn*v`8 " 6I6ki9(?CRu4 sI0wm~gJWV'c݆B.,y|.FU$"sm3"TfCY C~˜O{$90Z,bm!68~J)C:849-(줶 ieElKQkz©IG XxFISС[uf32@NIv%G'C@CH9/^66]uq>KBB)P7 =v47`y<; v2>^2o U~9,"!K\b AImDH4}0i"]ɰ^e IR]$<ψ&#Vo`hɻ^<=~Y`B{}rZk7@z }&QkT lr9R}Gu 9fI3![rt5"1`v'Y#ZghR8[ehJsXEv'5)B*D CT=haa}́O; ۫$ӶHЗ e3&&:s= ݝwVs2L~t{qrrrN,Hlr}fW%JHJ uf.bYZ TWeb6+ 2GKױβ^Tl聠YAVv{%l_]тRr9b>k 1yZņ$um(,䁎 ޑLouys;\&!TMt 5 a(wւ S*qo$g KC k~5ڋA7|K2T{8U|&!:z%'i4зMH zy,xӶ^W‚ێɈ2VYv]~' ӊe#izHxSKOv%P)d}SGz%w໥9x|kˆ}![H ;=!-ƤX"8֧PM7^bG[̒c2!4{NQ9[h$፳EZb; qXr桯PҟHǗD1~,gvmʻ-%+ڜ9ZKOg6Asֵ+Z<LMfӨ!al5p֤y9.u,Èu=BeM XinvH`YHo3\+6[Vډu(JK ? ho}JKTkpV½K=^*^@poH*Ћ B9QɃmgi17?Fǿj7PĊϧwC dw9e~EBF|q^У.O#7KZDRsȘV/my*m'c0PyZܤ6FQg=d {3V[QRʔr:`IJ.9t 9~5CE$nvo䰎g]R4 LhN{\ \YB1%{<+/[49g)2 yRp!@1Y{.R38k/Aaai#e_)j@^,؅b?kjp Ŷ{R(B%0,Wߊ,}`P*[Wc/$.a/Li-yXo?BNnW *<- r׎޻iSWπz`]wUP+F?e/G'Sӑer:&K$ur2Z2yy#|g>{ QwS{-[D3 %UVn3d@؀~Q:՜ {=R9)D}mӐ7)۽ ~#U Ҟ0o6-̗fan{xBW鼊'fZe*ā oDR #U U&r/T5qɂ<.amR̤G-0e һd]K=ݗt|"YwspČe$ﮎ0*~%F‹&i=Aل7 cE0ቀ͇։}Apͯ< i ZeE C T5!95X+R6Q7Sef];o~)mn.38m" ,4aX&hqT]n79ǰ ȁV 8C9yvc]\ȟ~KF2K"W5k{-w\L$>~M㎟vnH|oێFZb!z4_~rK)%Y}9QlCVkF*OV+:PdnL-^hqif?HE)P )IKl)|'97gXtvגz5 \>XFѕX/,!ݞFd ᯐ4[ea/_D)[밄S^Đ[BD}v!8l-9QsZu[ofzٛ,"ۙU uV瞴9}-T0Y;zX7nIMV/8q׺uo?.C_z>h?zC 4L8/4 .0}eQOr%ܖzJ~\RJYS ٣GHk?!Rlf>jDFD}̖y~%Zz$[n=!b2Zj% 7ZDX273>~REN-9`~?Nwj5Tf6'LʪΜ(z`a|>"ZkH./5c|f}=~ms<<͢ކ>+lq5|wbK|_u'28OJhC<xG=bmK/ީsVJeEM_;G`j!Vv_\ 4BG{]p? ׂ슄K~RMme+(*!Mn;dPx7 ɇHYnM}nLScnV +?" C+W׻'GP̪Cw VZgh@aݦq u*\pc,ix&' 0`É3C*޴_%㐭3(ݰ8nϯ014v5cHCa 9ʂaQ55y|5 7Et<@")|sVpfpg遹o7Tt˸#Nka1f^Trߗ-]Ei| /JO60AqaEI3v ɱo}{FevDp\ktXQxZ`'XU@KŒ!eABN?V]@,bU4S}X^J" ya9/WqW%Uz0OI`Am6O {U:'}t*2{.a!JC: !8]0N>,Qfz*q,^O|uV$\.Mgְ=!"zHKDӣ)#ć )@зFvZ^Q\QɝV q1M8O O<6VWEo5FF/qI8yTS\1C\ʼP3ެ,v"d9 [HT80n!)#=V`<\_-"]KA Ԋ1n`K&bɘ.~,fhyc#XVDj5-WZb"SUեC֩5k3 2Wi}ژ| !suzm[xUuSvØx;+aY.rU`p<;&̀ o^^ZӱU%:4nznЛk P w#'@6?̗,Vzuch*9Y}|S/電Ȩ_x[!Oy9q׍ౢ3=qii~z7@ކS%MTna$a-Il3'Rnw b*/-m,_3_y+G\5P/ k4ƒ0 aMG|< &A4ˮ &ÉM8\FNutB"yqǔۑ6x nŤLAGB8ܸw%xMU{_V~Sk)]( v=BQ״CMb%BsWQ9-KK> YldmLXZZٜ[c@e0<@ #r j? ¢tƨ\7+tP;*cALfJmK`v1OnT{4k +XV=AGR rxXAs@qa#\ X`RB۠!hLCIrUi8LRyB`Q^2Ҕ/Z/la@`R㠉1kEOq S6fOl?3?QԭZ`9wG"۲< ᣯnpghnUoq%FfEhBm2Z@].g:iR^n\о$ v?<\N!>4,c[GV1 \L lvذmr;CIdP˂ڙ%os4c;~#dW0a4 UQ=Ǵ.Ul-DXɃxnCYNzĉڵ,yi vs[ޙ_x2)4ƿOr ͎7dHKrH!;*ǁGfgmE!f~d\vL~;h/:pT H,ylym4 ٰH5p^ ;M5jߡk=^]3#qw[γKtyCn vNA(w++M JlJPhH[X1amV,xwT5mNlyI M "KG,,[(: {-,u h!#5xCȕ4,V@V~X-|\RsȢHU-3԰V<\;r'&h48T=ްh2{(hٮ)XO@»5ʨd 7J ~DTc1q36m'u1&Uu4*8]XrC0(!)u JL~&8{Eum,`o2 |+4p8֘ kIѱe_u?ޜϼ|mMXd6^Ɍ5 bޣ1Qo1o #GnDa\@i@ˏc *7gdr> efXh~+jaҞ(/.zR4-Xx.;V< 0,0?qdI+:4sZ_純L/F $< _jZ򰏓բh]MҢ14L 2"t䂚_M&d.|.om% PPW ?]K V~oQy-LEc5nm+ #y6B^;ƌ (a$PG\Mo€tcv}9R TɌ@derת/΃qzY0#l!+MY ,tM!PG]c¦&_dXEpUbHqS:+Ï2J^ RF#~_ヴ ru >jf1pVej(I[~$Ċ{MQWr3 Z⟅wa.PNS\T9[-酿G+fegJL,-&{j5@4gGUan"]{)jיЛXc`g{j8`e8-@S"z1YЉCic )zl ȀtVT,2+}A%T{ecHLfHT;671K5lBoagbmLQϵ,>0 YZVGAM/data/crashbc.rda0000644000176200001440000000056613565414647014040 0ustar liggesusers[O@&7X|QyD!)q6"~هۉ'R(je,3ʩ:ٴ,֋R,|NH-Ѥc9)! RI 5rly  LSY9G1h#-F#sԌ=AuNW {G/ֻ},>k9_-sL-]XljXsY)گ%m2^>Z; ]c3  #1 t;B]!/ B#dx% VGAM/data/hormone.txt.bz20000644000176200001440000000052613565414647014643 0ustar liggesusersBZh91AY&SY2,PZ@`@q8abJij3PIꪌLA ji 4AD/Ȃ ʄ@a%(liQ2"20ę@Q@A$GDwϾ>'0+Q CȭjXEӄf.*+&Ӫql!m6]4AI‚4ĖO{̉ttiNEv[QÜ9q59RSm$JXatqJ* 4ry/33t uTCV!Y$vf"ix LR1Sw{ڝG%rJ>"(Hm(VGAM/data/car.all.rda0000644000176200001440000001545713565414647013754 0ustar liggesusersBZh91AY&SYt8w}}8y^]*y+ ί`<^ܪzjlݰ}O}|˘h@Оh)i?E=M=MiL&MIF=A 4h4 =)bi=Fj`#5 4#M 2i jx$%O5 dQOPi $ ii =@ 4$M$76ԍ4zOC&f6Ci= hdLhh4d4ldmODhid dh24@ 4 ihh@24%&$h&$4S&O)Q(2zCMh<z@4=!='hi P!$;ıFEUN 10[eAZ+bmjUTelzY4e+ZXj5MeToeFڅq0V[AiXj0T[JhEJ FVFmK-m(6ԫEimZZn9m5.Uc0ihQlmj6J*(T4VS i\Mq:62**DAd̤fIf bES!€H%M0ҁ)U"-8iSDN0DiE SUR 6 2 MWnb;ϕ}=< з=!/բ 2|'cĐQhAD?M jq D h.44ѡ4uL/ܼV0QiQ)QKbZFUPs]k *X'67lnZW[Ȭz ]v?W v綸Ȓ:@,f'^ 28nATap|ĕ< ,C$8\Po&u\, 52ʃbmBTt!NE;*k9'L!!&ct0iP̡%IIuy\ i9s k&Ex@AfEJ{Y{f|vZ͌;ڭ7i2+Łۑkkg_!S+ UȒAXpEznmu'ؾechڠ@ M5aTUFEUݺ+o O>6=㷆(8*R$XY6d/➻{w?W{UcʟmǮva%['q5Œ|}3ҟ1y\]*VcUTrF^+16bzi8BwIwoq0/]|. QxV5Nټ}o 0bȣ"H@p.a4qbnDx[L~Р}"09`ʞY84F`qSɩ6=/؜|A2N} XŎo6N~ՎuzWCoTrK,7yvS@͈@BV ]w[- C S:eBHC95 h9-* KIZڬDrX`!z;UDզ- #7QjHs8 nke@\ u#jD&-ђ@$$b;]УXYwȦdJs D%&/"kدN݀,-kFk`t9oKvBos|+v/p  s)enu]H!HFV] 7v"= чS5ifGfˢہUWXHhM fAO2C籟H3^7RGRԉJ@mI91e;W4y!n\n{i&@84 :gǺ tKbE_̋VMrŁ1Tnx[7LB2uC2')))0bP*nk<}fWNQT"Bd)Iex+<SG]Wz u C~ DH L-Z mkX䉖ZQz|86 W^Z ԪK=Ѐ9΢[=\"ŮjȆ=PPg˪ Z@U/,Z33$ix)6 uO Ox1"(f@ D+X^QY{ 2x֗Η <WɩYWN0 sscss~.\6MU(V(Q**^y.(Ct2(f0quU|M*̘^ץ9xsg:^US#c-gWOAb 'VhDOVυO2h-FbˣnڏK[n='PstHB E)PXw^k_o Ph P1lҞ/FƮh*MxޮsPA ESi$;,Vɖ]ua/Hh~G£QksFx>{u{'"kJ'jpzLvHB]~v`o|O91뇅,QM zbPy&AtP$-цJy c|q*w:($N9 \V 0sbl) c3x&}$ש!MlJ2m7sȬ(!S3͵q݇kl"789\OfuQ~,xB*IdG mu*;MЯ30 f1NQܣU^[jRED#D9]j=X!A(Tx4I$0l ,7͹b1B4x `lPgKV Ul,p$k6XjBmu(%`0V-h@X |}C.i$"hBDl%YG9"!Dbm(&_ < Bd6*d1""| oʟERUz$łC">|(ɀrԔ]Us"_MKQj;ڈN-rZj"pĐ6HQvV0D=uӍc~naJ0w08@!X!<5fl.88wU.D`d^m k*hH/x8?Mȡe .+33f1oqvnv%cX{#s)鹽H_X+YNŃcČAu 8=Ȱ^-=PqĸiB>\mQn}Zc=,}d;nQM}ǩ~xc#+n[iz9$#IC77 )Q{Z'ڜ{C.8H0|-gmǙ_xy4=c Y6G1SY%a჌ggẑ`+;S<{7˛v4]/$$ppk['7KJݍ|<ۻwPDN9b?/,Bb1:0T<|Hv"̀#lC aNQY!La!ZR0s1?G0@ ZC!S} | `Q9 > ߿)„eVGAM/data/grain.us.txt.bz20000644000176200001440000000156313565414647014724 0ustar liggesusersBZh91AY&SY(ـ@+EPvZ xjyT4 M0P` d)4 E)`vHMEUK%GJh_phL.֘Zb`Fřܭ p B@yo.Py+l9}ޱNӴhmjC[7;#5/;ބsqݧʓAyx\WrS7kj5,hr,ÍBb4t]%{GE܉WEÆPg5TBc TO#*ލr8{kMe *s&r`AN{`e׌s5RY8B oyР$8pf9"R X  1X!̲IX(k*j)S SRŕ0l[jZ/f ;B@ w'fLk 4TRPXqmX5ct[ cM3VΗ B'Y9`BH[@*YhC)'LUH ps&{حtBHhCmRˏ[]$IjLwg~8[GBE 8봞Us eFw$"r2U-sڷt" _>!".I=j- /i㒘&*x >BmLD-P% `ap._Fg'OXaDbb{Yt5hJLV%u6iv˾Č ~&$zSrf 1 BwVf7- Q&pFo H_ܑN$ '@VGAM/data/machinists.txt.gz0000644000176200001440000000012013565414647015247 0ustar liggesusersSPHLNLI+)VO+J-R 64C1&H"F`Y1@1H""fD,"8ĢVGAM/data/lakeO.rda0000644000176200001440000000052013565414647013454 0ustar liggesusersmMK@I?l@- """̡أ׭-$[~Xߘ.}vvg {4CDY6e CT+sYMCDk{zgzޠw>l9~5a3ME'o4ycſGo2VS73`23]cA+Hz+' #{ e/$KbySϝdFрZAw8zA&u({Va2|՜*jd9S# xK;9VGAM/data/ducklings.rda0000644000176200001440000000106113565414647014405 0ustar liggesusers r0b```b`fcd`b2Y# 'L)MK/f`` T[,#ɪy[~}ۉ\:,dbK_ 0x 7F6vna jJM^~C累+o=*5j/++`?9m\!V//} 5^=@dV=XUI)Q2k8x3i}lyw}l-߇{hjŕ?vfW~4f}:)8 UOmd"WHG ҲuJa e˥ w$S {{VGAM/data/ruge.rda0000644000176200001440000000040213565414647013362 0ustar liggesusers r0b```b`fcd`b2Y# 'ST, V> ?@ (BWB @Dz75Vep`ُ.;p % VJhAi (u`6 k^bnj1 *Ȗ_WR &i,/C`41` c01` sư2 ,C82L,Sto&$\ JI,IK+zyMVGAM/data/enzyme.txt.gz0000644000176200001440000000015313565414646014421 0ustar liggesusersU1 0 н Ji{$> 3Z}PPA!"9|Т9e8oԍܓ-.8(ګVf߳vz~w VGAM/data/backPain.txt.gz0000644000176200001440000000070313565414646014623 0ustar liggesusers}n0 Tsj6R*Zm}1p&.ů84/h^ؼls?wy"<~˩]r{߯YNcvukґоt KzRXD,@lo8e!`IfBf 㺊.31G4p~t聖*e<` 秥SwG?Dhl#LOPjxBjT%Tyw8Oъl dth+RC]!nmXhBpxk S%hM v409G;ȢˈnֽuЦ(i@F +6w/2̼ިFUBrEτpsf)#(VV2a-RFI%R&a-Z0I,VL5i[Tm3hXXe421X`+31Ы4-Y$[m[JM,BаdZAKlZLcwxޗ8,BE#š\41pA]8''9vibٙ&+54LڑZ)BZQSRi1;!`9e͍ϖBoUyFa-VadKP<&"" !28"y9g4FdGK.SKh* %]$v4:&иKąvT!6SjY^Wo[Ƕ1D/'| y\JENbZ*?rBH(xjH s VGAM/data/venice90.rda0000644000176200001440000001755413565414647014062 0ustar liggesusers7zXZi"6!X0])TW"nRʟxq5(БH:7PZ``o۲=oîa5xi^~@7,jaM4pwjM7ui#Ks|y0óA i+f-"9BKsGb |вӇ nLO0}@f y@k Le  }%}*<|SqT>o`6< A|w]0͘zxfYe02pc%7\jimvPDRXV6 wެA9ld0U<؟F "= u}Z-n]ۇ]UV@QPh(mP׉rEKJsC4+MW߾,l,1j%]['LRtAt92n7a_#0QƊgВ47U<| Kβhe^W'^\KH3?m/(g>KPn+sQdE_TZXA#騅k!wXdK=GVa 1,Wn@hہJB6*{0=˖:'')8!A10It ^-l=VtUkojF^;nq̄`Wb*OVLS[)R;Pm<WC?'< ^ 3f(nQNz:2VoCx[s> a=JnA% ] l&% X?dZіu"3`+ϴb+x:g,WyAzU1  h2g6tm#M㉨m.bQ/Ī{\V4Qp{A.!吭qhP.1E 'vb*=6E:| ~tQe%S`),+\Ht_j]f3ܹ ZYfSZ마Dӓ^XXFM^1F9%bԷ׀-8u<0Ak6a+gKk₷}DN_2J؆zIc>0Jw TƹhЬ~6ij돚N#K5leWP*1P/_!Ҕwv8eڗ,dV -VyPXK˨*ڨ I?XiUȿj [SOJD6j~},(cؿU/Hؖm&>~^AB L’It t$:iuZ\Z8=0TD$'0}u :*MJ}Z~Fuk{˝S7嗅 ةj {٫dN?KsAF?T<@㿗ex1nx:yV^YuL)>B~Ґb 1= CmQhEs>F\q2c BCFR=?bVfe1n?Tqx\Pqrۮi;q8`uf !L^ )>;Qgfl} S'3r(6^H7zLHD2FL)a;o\^4ƘPڣb1:SixOvOd{'3"r+;qv>(SN?#S&ፔ2Cooon27*+7TTzztɒ )Ej! qmLb/rglx;yƊn]œ–%1PIv a0o4=nKкriM6gaoD;ֿ=o6-wKc 1PW/#MaIu߷;[zs)Cz5ݦKgFO6@q{A+fNzH(y!4F"4ZcDzc'ʤ76 ,m)d 8ȁ /Ʉ,!˃n íξ2AۛTǃ$[|\uȶyn2d&yfntiY 52O Ĵ-Z)'XB]n([,VuUiO+?2K<p+Ylm/g=7ߛ(\6՛Ͻ]Ra16bUO8.K^hږ|1 zEKh W-5}ldI{]%63<84RO5:wo̟^-]ܖjyJ)jI/heU7(!`^ iGun+? 7 ް˴q Paа;YPT>jfiTǒ>8^@>e=ѽu>4#F!:dT|b͞vQ$)tX!Ц&AO%1%zRlnww@Wŧ 1Ѐꬤ2a]g#مr(.z 0`)1W#]._]$\.MU 4j4Y(|)ND$ KVQ(Y5m F#-Gzw,]dVt;QT`Y /v-;b5:ٓ7٫l.`a]?a.+` vgVDmlo Dkw/DLbC]|}:vÖj[DH/Ai~QƋ4uWZ-ħƇvm6yK9%pʫ632*TKrm_!9,0; m/ 0ћYi_yfS}ślO2e>ސɅh|CCR̿vm D`Z^F maC j#G}r@= Jb,fB"%,qnudB4 _ Q\9JxB]WbWFUڣ@J^ёՅ ;VˊA툅9ţ˃y.h|uʉNՀ0bѵ=eЪoJmp )/qD̊$|4U3AN 7{aBpi3_E]zQQH9@{G*蜂Ι6 uE'?WY uI,Sle4jjx+ A(#$[ګU(.?!ui.hAauw QEuF-ySq!6S*谏/Ncc*F;{Hئf'ݳ|ȣ]ad WU2F>?fZr;v-L´jbWۜ&5eZL憖J'vU7Gj J*vWRGE%* .a/u<+8WIH7)."ڍazh#,ihZ&Lי,!w#XLzxPH=m4RL֣gGO4Z1Q~`^qV~(EE}дjO.ͪyQq&yمɳci˳sٮVR`^8"KϹ?;;Y?rSo=ish(cjK (QG@䳸ء0 AމMn beA%tF8w <JϭM,D,bݍn">&kܸʬ** r$׺I oM\%+ ?32Քo/;DhP W6<Ρqɰt9֝Y`V}o@43֔J^ =O.oH32F4_b4 <5VǕT]ݠ.#ѿ]x]Ҭ)`8 #_Z?v=)LBԿWUW{fpkVt54s=|eNBS nQziUo#< LѭGt #/oaqqI.f=59:VFFOix#vRD[6,=CHCDz b͒97ܐ@b>i ;fp*JRbzY~֭Ɛ~7%:r96Eb*1 U=7yd->Ku ^+D$3gHčׯ+w4\&6%giXuv9jز}d H)RJs[2]w0^}h@̆l_re42?4mz*onN`~:+4ɶ0i+l9U.a1568t.('ř{-O6h56q#mO;pA>8mbEK/wM5>tgb~p NV;0% X k#yh`Ȗ1LRo7>ߨ;'F;<ۖڙX#PCڧAR*pAKv 6U$8d9'l-˜*W{u\Ӕ "0$nnyCB5"`Q@sLPY+-O-Odt 1Iߢ,عV'Ķ$Nx%ZM>s]nrQ̾%bǕ!Z)jIpfZѴLRm5Iu춛YK*庌BojbL"  |L"P* {i"ދHlWo@B0_'I"x ECNOTOQB4ċ}XU.45c$G0|"0lfd7 ߼͗qmJG(Gi5wA5 P,N=\z(CBi_kO%NHXe? ggEjDIlR٥Βy^m b[P=-zV`ٗ Tf,uVum1GH;0'92+CC-;I/$K?oS !Qu.3FЕA3$ w8qJWe5*tρZw~F,|†<6ZLd=#{ @hh7/#V+ơ"}\s^Ofyo.w:MwhNU{ N,s2#VojI`p훉eAݰkKu3y2XaY⨉cmFo(&@( ͂եzSWy0/nSFĸf*ac ;15 Ŋ_Ĭ0^p߹N|!8|Q$(7ԸGRc!|6QNݾ~K{%HW#!w cfq1ѣ@YJ5J^)bXlm#wC/=<íNIcL"KҭH*>LEh&4qwaOTʛp]:ⱴ@L-̠ՠ" .*(@G|ʺϕHW̺WTadM!.QcAO>Bmy0 %!}NPK.42V=fdKj-yljF Zy0sFF.Te^] (JCϞ|YˉcDZ=.BX$|EfK|)4s ΍A B!7Pc$y}*tRP}|Gnl?>/>0 YZVGAM/data/backPain.rda0000644000176200001440000000073613565414647014142 0ustar liggesusersBZh91AY&SYKP@?<9rsv jyj)5#C@4!R 2B`a4 = #Ih>Fdl{S ѤULK9bd&HP @R$߷eiDTFĢ1E!"a]3b1xH2rT"QR"f8r!Nͼne2fqrst뮺N6d'v!f.@u)„:XVGAM/data/cfibrosis.rda0000644000176200001440000000041113565414647014403 0ustar liggesusers 0 6e(v ك/ӮuR>Gi;Xth!4}ӱpҋ(;9g֧(c`-s3'wn ׻5NWMu}v]&8SyOJRo]KVR~FjwG]Ύ \8*ЇGqQ5&%N"?F(nj!QB,D)ɚڣ'̱+VGAM/data/chinese.nz.txt.gz0000644000176200001440000000057413565414646015165 0ustar liggesusersER9rA s6XEgETtI_xrϟcaW8cdB0έagv̫f]`QVi))Wq%yBD 0ji,i*lnZ UWo{[·9PO\ΉcRV`Gl-2[6{($G$g6 M G6 vw{͎,$*7u)5%xlώa}@=_<֎P AF;S d43_'oVX2`uNF)h3Qa5i% KIBɂ`, a@4% 鮴 VGAM/data/ucberk.txt.gz0000644000176200001440000000017513565414647014372 0ustar liggesusers%1P C'>30Po_K,q}9Nl?ܷuu=N^5 -9 ҵ&HGSb*'}&-FVGAM/data/crashf.rda0000644000176200001440000000052513565414647013674 0ustar liggesusersm[K1'lA𥿢t.} PAmߋY3f$g&ڏ#)6+Lja6m8">nFe\!yqgew&s;#nI8/AH6rj}Lڃ;G?^ܸԑ%b?s wg7w6(s(I~aI5XK&|N{W}>6۪p>pV43haocY=~Oq )x@ ` XTRJLiPZ*TëW ߟ~Cl;VGAM/data/bmi.nz.txt.xz0000644000176200001440000001221013565414646014325 0ustar liggesusers7zXZi"6!X4J]iLsm|חe]tE@Аs"=Bh bܔ,)qMg}nwM/(0AX9xrQD[9Ox K['F'y@6Nx C* .qF:"̋ŒBV]TPR2$VW;x[٨a%4hA»ΔF A= ڣNF<\^++ш]̶&$rs֬ "qH SfHʃa?bgw]2&mk߃֕Z᮰sl=hdBSmdj,hYЅǟ&|?bjCjYt5fSzo{2"2'{ץTt_Mv; BiՊ%QXDܠlu$XV¤sR':cNVo֭C*g.'h,#{1ih8 Et!r(J78Ji`iy)ք{8?Α^%V,)v*Rgs1lgzN5=bjjlecpnT6JO YuWp}b,k@S#FZ/bY|B#A ڗ.<>҃YvCQDM M#YÜUXL09^e6-FGF4)3bT(M+C&nn.0+BK?nǩ)y)'3 ,Ufk^7}=xUSadzW`-ff*3ڃlm{]K6(!k10̚5n P"etsBNqeB Gp/'_?Y}upH=ii tRpd+)P}JzdxdbXbS\WhBi*PUWye14hiOkϓH(ɷW[+C2{pI`Лԧ%9`4i8([C!g;i#|P+p5m+& ~-_1\MaS-28lń e%@Of@100➼>3`*2$y^U~_j .겾dIWE}A/$*וkDϛrM g|zoGWQSGF՞c2)aOjN(h",b4&.pΙMFLG' :~Ҡ/c3 ?k︔R*Zb(ո'': Y=ͲV6x_Ó eI*F3 0PkΊMtz4BNHOCwX\Oke\25Lׇ|6Q15TJ,{߳h[Hbnh{Y4dPŵ*xp>#Ueǿ1gfW̺L%ù3ZM絽sA@D0>2i=SJ9%@?15fA['P{.۸&Xc3e&C< کd)^{@(CQO`DgrLjyBS^On4-ōaz8Gߣ.iQ;8Z\odwhH}^d; "RHYkӫo쭯֗=]9|xy(VױpeˈG<=mr@7&0x1C c*Q00^E{06dٞjF"a~z /x?.b09i 99gN떨vNW*%J)zv4\<֪9M6%}X!쐿vbu٨$a1*]H4ytWҷ'V5X(#v-%r˜nkQLこi b~2_=ٜWV'{2TIÆ\*aym݄b&E}7;$F 'C 㘢dhOcRvhhlxC:6 S]b4azL~^;S^60 Ri%5U-@@Ď RTV PtM "g"D0L^ZBUbd s?zh€b97pYr6Is.KGy\?WD߯|Yi26dN# r$~>uI׆ -M]z_ߧ.>f 0Jv|!/ltmKb#嗄E+<RFoӥ5ji-EE o$'Ik |:& \d'5V(. F16D^IJ1<q겘f0ѽ)] ^w+« y#S\DbgSJZjD:)Hh~{w0W)3NT0 QpF<΋B!C!D#CS:2 s䊻W3uMt pOxH 3ԻwJ%B ڞ"F 2i`ipŀncӦ~ۿo< 1(!cD[\P 0 YZVGAM/data/corbet.rda0000644000176200001440000000036513565414647013706 0ustar liggesusers]Mo@ED~ЖsӘ/&?(ŤI &^w`<<;yw٬DZl; ?:ݬ(?#=nnALuO>0#<'gLZ/K\<u$~Cgúbx"Ľkt.dJ7mp<晫WHyQBVcmKQ3L7VGAM/data/Huggins89.t1.rda0000644000176200001440000000067413565414647014543 0ustar liggesusersWN@"E~Vtµ+ 1QLߐo?q޹ZM$s;szgԳ,X`oe.iH]=zaUlG'{ [:?p]1vK2Ms<'g'lDy3oyI}wa=u[y]I>]qKwfҧzЭwq]W_1'Z7@" ߛez e哾 e3yCY̅Tfg($n5U=wΆsPV>Pmǻ I뵎Vq8mpx^p䰥ZBj V&XM`5jB͗ 3xzPt:R(F^;~bVGAM/data/lirat.txt.gz0000644000176200001440000000046113565414647014230 0ustar liggesusersUQm@ t?)2k2EDp%y Po@9Ifg1 RfcR6`dkzd殃 |钡L+T\a<Тv3h_ `{sew y}fqqμAtb; .a8)u߀t:6L93V( PFh洔) }BCV-@ Y֢[0C`1RF<hݭ,YD3M VGAM/data/hspider.rda0000644000176200001440000000250113565414647014060 0ustar liggesusersXLuq܁ܝl1وe1}R4L;Ķ9cTaYi\OV1eZJ IA}{tr|?_}xwV frI0[tjcP_PnY?y`>P<62 X7@s`[qU#7I~rx}ֿۏl3X[Ӯovt/2Q7{Uhh2х}`;\s)1{Uo^-U[i=`6xro ˓t]?Uv4:iB+MzJW+_wEuo@8X/z;t=;ҴslU7?/uLT$owў΃m}Wjg2n܎68Gɱ\$sWiKCiswvl-*׉R;f晿nYa\wș&{XݲHטwȥf-ơ\8{_|콑BY9.L~87ˁ~~ YG—||Qѳǥ7!s~Ǽx8̲Jq_\Bo^yY/N'{b Q6Hl3LV|'8ceG6mKc/*o3D?r}\w8N 9UF?ͣRI8cv9/c}~| [8L_H'0oITu`'i]ͰшOl28>\R|N~273,^EY}$hhjzS#xmy~}9sCZ@Qº3f9huZYSyk7wu" )p"\7FCmr >}рwqQ%xT j;5WD DySK䡨"_}zh}]Pá -81ylAADa (E *`!I$iI$% ȆH6D! ldC$"ɆXךOØ+\ܡiTm%VGAM/data/pneumo.rda0000644000176200001440000000041413565414647013726 0ustar liggesusers]J1XE.D\V ҭ0)V]h>O f:;ܟ̳a%"J $CKϳ.֖"Yq⣫GR-5j8w's&d_3|ހi|(<Z9} w"IgJo"O%}d)Q)NEo|!2`|.+f>^-|zi1KOZ$ޞ/7 j_$8]~USsQ?k׾"|\=w}ږmgg ὞5 7ō`NP`A$X 63LdZ0-VLk {0aÔ3HYYVGAM/data/leukemia.rda0000644000176200001440000000051113565414647014215 0ustar liggesusersMK@@'Xl"(ZlQ̀D$mIG'Fq[&B6[cq=$Cۋ29s? @hVGAM/data/alcoff.rda0000644000176200001440000000104313565414647013654 0ustar liggesusers]MkQ$i R ڪ1$MlUX+V v "v"QD׮ԭeνxs?{gvzO'Rr=d㨘cLKf7_AX@  .5]t0 Ð"\64">HId)9,%9藹xCpKmءi[qA`~3(K OAQkuLjkp;?<_hqg_:+<rRDž!-iaJys <|OtώO̞>z ScPͤyA;g7 0|2>Y(ҵ$ΤhT0.|6Tط[tZ`اtޕfȹvț6rFSe)X1Nf2Y VLE]Z͵L4AYpFQ0hĄ%#FV[UhՄU%VYz÷~?2AVGAM/data/crashi.rda0000644000176200001440000000075313565414647013702 0ustar liggesusers]KSQǵJ\eуm'%*52+5誫5ΡK!Sџ}9{WƘI$L2EJ0]kF[ƤQ c@>2<,汄whnN~lǮuL= CճXQlWWpb='q\Sn"ce3-9XckN3gL{5lZ Vfj~RJcmv\G[..;>vZ6ބmn7Ý02bw Ą .\(Pq¤ S „O>|*TӤO#|GP80KVGAM/data/venice.rda0000644000176200001440000000173413565414647013702 0ustar liggesusersBZh91AY&SY,TH@DDgDDETDDDDDDDDDDPxDi?Ԩj=OPh ѧ"m M4Fl@4*MѠ@42m@4@  S2i0#FF# dd00&C].e kTfjQ"R RZDF$M PH4H J1*,THUΪȨThTIE:dh!/7qC#ArP(Ae@ M?#KS[&K\kٻH#Ϥ~RAń2@)P eʔC$7g$H`^LЧ hU&L, !K.exoZ"R* c`>L@Fg=i 7#5}=.0lUV(@)* %U?5eFj#ֵgUUUJY&LUUTBI$$$LDDAů\!\̅EȮVw̮ʲIY`ks C<ewjm2`=A@@JMbjzS =L2j O%$b i4%*CL5TC@FFbfËn+V+)lPn2+aBV&1h/[rYV$bh lljF4FZH+lVX[)-FlZ-bIEfشml[(V™[ T"جj f"A$Hmbƍ(QecImE$QR42DLjcb̩"@1`ȉcX%"LdI-mbbHMR$H"McPf`RLRQH԰3 S!J&Ѳh(V(0Q #a,L I1 $"KHAj LSV٢! ѲFT(  E10F6*#V352f Dk4I)F$&ƌZ Mb͊ IJ Hc0@VTlRXj(-j6F5S* b%S)TXRRTʐBL(E$M QQQEd6 6kcXlTh(*PiIIYE#FSaIEdhb Lmc[d"QTk1b!,V Xmh*#cZ+`X 52-&b2H&1&"e %4H Q 1B*(6 !K"!*DMEY2h P$4S1Eh! EDd4&`-)2kQDѨIbE# 4AMIF&YMbe&(ر3iL`2#lL2HM-!*-&fULm%L$FQi,M!02FFdb$ Fli$P`F`SSLY$ MXQƱ!lIbJh"2FC&əF EEѱD1`!4TjaDLQ`H$#@ch3(,ĚDARReF 22A`MѨjM R ",dl"4͈QhɠQH("#h ,d 4b&53)22),FeA"(HВɪ1lLhň(d2h$k*$STU%cYXF# FcLJD&JT&E*0be0LɊ%$EjM$0mL"D22j)fF1Hc4ʋc)dEHb&)F d"E,Tk F LX1hVMFڥd%Mh1أPj"ɱJ)lXDDdИ*@lQhh2-J2I i &#`ڋXƠ(#F2d6IHn-]֪g|:tf,є+fZ"+vaÑ :#T f[}TͶY80+N9r^'8W Md4xK8ZPfxHhFZnc(++{P-k1;BQGI[% -e Fw$S lVGAM/data/alclevels.rda0000644000176200001440000000104713565414647014400 0ustar liggesusers]MkQ$ *BFQA[$ib[\Պ_5b[)ۺPn,D׮܈V2 Y,%9N-si<,\۰I 30 3q&@+gPR#_J<9 x?|ݿуǝ]|h/Ixd$1QtιJ,>$?;>f{ްW߻pNmFw=s|#ί1'/r/]^Joúv@׫/? C^QIRj;4fXsm#'[F^Vl7j`8T=X Z$GVd77`Dш#FqFYʷ`UѪU#VXe=|[z4 ״VGAM/data/melbmaxtemp.rda0000644000176200001440000001024513565414647014741 0ustar liggesusersBZh91AY&SY #3=$`3L@@0@ ` y!l"" >(x,v" mҙSU2 452R`z&4ѣL%?R&@dLCF& @@40@h!*GFa0 =F?(thҦ(PR%RSkv:}y=<.(Lf$`A?VJƩdӻWs7֣"lI1DR_!mxt{wD oL\DѧjfTnb)n.i ,bFhlm`n$AvhDy'!Z2pWp|KOҝY?LFgBZ*]C qU1e$FI8`W7Bmov i|X-盻[Y;(L4d{}`1!](x%8*>XC((x_:(~tƵ,hYև`tf0#5 # WPҢQU"*SR1=J)=1*mlݭ䈰։9[jM9nÙݲXc6[ksnٛb)e)%'带fUy*Y ^i%$Ea-n-&@ᛛQ4[DbnD^@K ZFJ隩"zJI*jF&`ŵ[b3$kmlhFnՔZᴈCU r4GBSM^pvBjmirPi7f5258  UWI@$q?~! woZ7SδX=sI;as~5}?%jKt}@ s*2$*>tPB'D Bt$ RH&7ssVzN<ϭ0 &d>mf&wg;b2i7@z1{YL΀$URTBNQ) o0;„脹<rU A%H]G[VeZE.qFR:izDT]\7{tǥg a )69XH+^]ٙ[hq9,o]Ȓ7ڀyGێ6PQrEkugxDcPCO]KӞ\\0TPClH+K/CsGMC$,FE0] Vy|i^D0-Yz1[kֳcX!1I|č;wļyѧ3ad6&vRp[+rg:{EBE) $U &L$3Z X\Nʪɉlb5mib͚90]+j%3m0Xd\D=9+}V/z^[^s%bnR&F5c׶=DV5l6yg mM˶5'K޷;6OTjJkm˳:T%[9mhadx ؛e1CekJaa۶EWTBvlKFI˶t3]bjN8[c5围O4bhbč 60REXݳjӫ]EJc`Dr%𣝶mdضWh/X-;0ݳ&mYm0,IF (QJ$;Ty=k`]J|rhKhJGC>˻3O?8_.?HvW\;rwBF k~a|9r椮iG& `iy:+_s+,p nCz,9PYЇnPrE$\U OrɱƬMJ31FcyHZ͸(re_k*Nt"ie˭D,y鞘=iWV*[-.U3f}&vaܸwJx5PGf^(#Y8hʵ5qW)=Av&w,oMFw+wUt2JAGKck잛EK$Q'x`=W>ł>OpIyLM+g3şZh$d  _%, &e2<j**-ma.$@3/jn̓r m&\έ tֲVkg'n2&:!-rI$ JD$Nt !44(M4/%ExYʦ$E<CZ|GjwLX&)OmNJpdMJi _RJPs){6S4hƔ{)LbekҮpӹ I_Jw$| 2+3 S!31]qϚ$d Sn u߲R<|MꋪΔ'4͙5xkI(D$tRi57^j,ϧ'4+ʖo*QԈ I H:.8"rӁ 9K.pAQ8RNCp$IS )P㣒N8N.#ENzc%)>R NA3gItIx#))Yr }D4_ɼ<<18d7t>4F-:-KOa9qy9wX)„5hVGAM/data/Huggins89table1.rda0000644000176200001440000000067513565414647015312 0ustar liggesusersWN@ D?+p۪@L &~8m dNҞ;9wN=In,[IKs07[½tkvQv sS1]0qJ!2M3 <'g'hy65.3oyq}Fwi=u[6y]q>]QKw֧zЭwq]W_'Z@, ޛez e哾 e3yCYTfg(4n5U=ΆY瀡|ݱ I뵎Vq8mpx9^r䰥ZBj V&XM`5jB- 5zzPl6nVL߻­F|ٚz/pdXeVGAM/data/prinia.rda0000644000176200001440000000231613565414647013710 0ustar liggesusersKh1ZkQYTT*`Ki}U^<(zRDRǶ "5~0;$ۙr{K Vc[/j[dz/gx&?T>oѶ=s絏^=UUt;Zv+zdq-|]9pvn_t?"O?+eEg˒X%m;rM-io2xJɽn_:Saq;g?Ɲc}[?ݨ=+ngW o~cke?FV^VʶwF+Jzid븪Q;B~NG1F2rK[$c'Qr>4;yh;Jabj#jyI(&^˫?*į]ܒK,Yg*e78,}UϠeh{^=NR.AIʞ7/ʟ #V=|3muAVBaH x`[_/Тz~l\N<>%~ }p|rDvv&/Nws@8 vzx ; dx"[[Qk<3&~­?&+;a.T)[_p@8-UK5~+.n`8Ϻ;? _֏j~U qN @U~qT9:ʡ{>'D㱮}4t ^j9P؟G^T?Tu^&в~*Ёo p!3(W~wuIԁ3C\ɵݙ=V.+ɦTnM\3ndI֓R\FIrI$;iIh36h36h36h36ϔCGO'˵Ўlڟ+v2]f{37nVGAM/data/gew.txt.gz0000644000176200001440000000104013565414647013671 0ustar liggesusers=T[j1 )|al8iBo_^{#c{}=~~﫿zxyU}smu 蟸'g 'kvEaA8Ă |7#m2p ~ohn:{4D"b|C]q 5+?3fb[Mt975v[*Y2]bF8+^+KX A<*-G=fD^dWLnj|a5㹔~đo.9)MR`w˕pم0aنrD7rpUlv5hz,qUҭ ٞm{N)W*hհ-A-RJں=ԽΕ61(5d(=% ;3$ό鼮ihMny.lq6nͮ^#4^+9Im˦|3y߲-M3>W:1cwas0KTaH0>Pר%=}jxlh\_zb߾~9.F]UzԸv !>Fg9MY9|7C69w{rz2'yߘ P'% #=\P|@&da~+sJqe\;/ߣ_1,9VGAM/data/crashp.rda0000644000176200001440000000057013565414647013706 0ustar liggesusersm[O@QTLH @ߍo6D޸ Cu|{9}<3XX14e[~x1aiJVUٯO7,r4e#.ON]#j҄N `ZWY}ݟ:A͸u(_pʟS:5dz]RĜ%S'UwE57ǚ0.^xޓWm HaE|]-eu} >b/xԕŎJ\̖ž辕F:z>!' Cˆ0&LS `=P.4 Bh䢑F.*uy$VGAM/data/flourbeetle.rda0000644000176200001440000000053013565414647014732 0ustar liggesusers r0b```b`fcd`b2Y# 'N/-JJM-Ie``1s6}%`{O,K? ">L?eNeh7UFU-1m~mO%^LfG:^u;_;T=t_'*#C\:_8s,0pcPƇH@ŵ^0fhAȚZ d"Ȟ_ 9$S+ r)0̜ X΢r=Ѽ(?OI,JI,IK+0c, VGAM/data/wine.rda0000644000176200001440000000041713565414647013370 0ustar liggesusers}QAn0)HʑHN\`$$Fը>TkldwvlnwE%@(”@ Cc3 㣹&v5C4fS%dU=T5|`u`%zEcCtiCwVDdKv{ꖖsg7uVEgΓqn_Ֆ5w5W99UUO?JqiҗnB}a"E0#$xhL%3V%`'s` :VGAM/data/auuc.rda0000644000176200001440000000036613565414647013366 0ustar liggesusers r0b```b`fcd`b2Y# 'XZ, d1H̡8z@u? sV@N U]=/3KuPZM]:Լ SGּb CI s~nnjQr*, JlL׼t('75%393/΢r=d[X`"Hlt''$ r$$0/VGAM/data/olym12.txt.gz0000644000176200001440000000163213565414647014241 0ustar liggesusers]v8zz߱@ә4}zB26jđ0gsU鯛miZ7ݻ^5;w*%z&:p=e|ӤPro,S^S6,V9ѣ66(m*UA2a *33U~" !X*DBi*҂B͈z[)兪F(om ,&4ใȪ{ +Dp^+xt*EZЮo||XʼR!F^ @yB׃:PF5e 1B)=Y4֨asMY∼&sǫc=/j->ޚńQʱΊ$$E6ظ, PG6`@k{<'APF̕(*Z;F1 }8Csp($V7eB{ NiAUS[墇Pdا+gBA*ƩXڡsV݇P Z9ZfO^qRftkk;G{K+NEXX7GC8adb;|3 vۅnfc?p5`b!i=(,rsnN]HctH 5gSJ:j\ ft=vQI'6Y!BgE 6ؤR(ǐ8]pVGAM/data/chest.nz.txt.bz20000644000176200001440000000074413565414646014731 0ustar liggesusersBZh91AY&SYx.&ـ@"@p8TTɦSQ4ɠJ( S&&ABSɪmJ64wV,]@s&"MLAL[uw.tحwFN1fK%dqI8o^ 4.zӭ gHUrX%oY(?"lϬw[˩a{' H %ᔕI+g7ЁiA6đg6]f5V6$D2<إ>R# g7x'2fI1a/GcA tn!rִ$m"|e 9_sFTlВ9ޱ I Y~2 {` HʳK+/0WV6.p \%VGAM/data/finney44.rda0000644000176200001440000000032213565414647014061 0ustar liggesusers r0b```b`fcd`b2Y# 'HK41a``A  S/-Pz&̲_ a !ւ ϻ"U;iCk^bnj1!T 9?/aH,IHMr9K`&qLanerNb1J WJbI^ZP?vVGAM/man/0000755000176200001440000000000013565414530011565 5ustar liggesusersVGAM/man/oilog.Rd0000644000176200001440000000450513565414527013177 0ustar liggesusers\name{oilog} \alias{oilog} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-inflated Logarithmic Distribution Family Function } \description{ Fits a 1-inflated logarithmic distribution. } \usage{ oilog(lpstr1 = "logitlink", lshape = "logitlink", type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"), ishape = NULL, gpstr1 = ppoints(8), gshape = ppoints(8), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr1, lshape}{ Link functions. For \code{lpstr1}: the same idea as \code{\link{zipoisson}} except it applies to a structural 1. } \item{gpstr1, gshape, ishape}{ For initial values. See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 1-inflated logarithmic distribution is a mixture distribution of the logarithmic distribution with some probability of obtaining a (structural) 1. Thus there are two sources for obtaining the value 1. This distribution is written here in a way that retains a similar notation to the one-inflated positive-Poisson, i.e., the probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}. See \code{\link{oipospoisson}}. This family function can handle multiple responses. } %\section{Warning }{ % Under- or over-flow may occur if the data is ill-conditioned. % Lots of data is needed to estimate the parameters accurately. % Usually, probably the \code{shape} parameter is best modelled as % intercept-only. %} \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{Oilog}}, \code{\link{logff}}, \code{\link{Oizeta}}. } \examples{ \dontrun{ odata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data odata <- transform(odata, pstr1 = logitlink(-1 + x2, inverse = TRUE), shape = 0.5) odata <- transform(odata, y1 = roilog(nn, shape, pstr1 = pstr1)) with(odata, table(y1)) fit1 <- vglm(y1 ~ x2, oilog(zero = "shape"), data = odata, trace = TRUE) coef(fit1, matrix = TRUE) } } \keyword{models} \keyword{regression} VGAM/man/V1.Rd0000644000176200001440000000444613565414527012360 0ustar liggesusers\name{V1} \alias{V1} \docType{data} \title{ V1 Flying-Bombs Hits in London } \description{ A small count data set. During WWII V1 flying-bombs were fired from sites in France (Pas-de-Calais) and Dutch coasts towards London. The number of hits per square grid around London were recorded. } \usage{ data(V1) } \format{ A data frame with the following variables. \describe{ \item{hits}{ Values between 0 and 4, and 7. Actually, the 7 is really imputed from the paper (it was recorded as "5 and over"). } \item{ofreq}{ Observed frequency, i.e., the number of grids with that many hits. } } } \details{ The data concerns 576 square grids each of 0.25 square kms about south London. The area was selected comprising 144 square kms over which the basic probability function of the distribution was very nearly constant. V1s, which were one type of flying-bomb, were a ``Vergeltungswaffen'' or vengeance weapon fired during the summer of 1944 at London. The V1s were informally called Buzz Bombs or Doodlebugs, and they were pulse-jet-powered with a warhead of 850 kg of explosives. Over 9500 were launched at London, and many were shot down by artillery and the RAF. Over the period considered the total number of bombs within the area was 537. It was asserted that the bombs tended to be grouped in clusters. However, a basic Poisson analysis shows this is not the case. Their guidance system being rather primitive, the data is consistent with a Poisson distribution (random). } \source{ Clarke, R. D. (1946). An application of the Poisson distribution. \emph{Journal of the Institute of Actuaries}, \bold{72}(3), 481. } \references{ Feller, W. (1970). \emph{An Introduction to Probability Theory and Its Applications}, Vol. 1, Third Edition. John Wiley and Sons: New York, USA. % p.160--1 } \seealso{ \code{\link[VGAM]{poissonff}}. } \examples{ V1 mean(with(V1, rep(hits, times = ofreq))) var(with(V1, rep(hits, times = ofreq))) sum(with(V1, rep(hits, times = ofreq))) \dontrun{ barplot(with(V1, ofreq), names.arg = as.character(with(V1, hits)), main = "London V1 buzz bomb hits", col = "lightblue", las = 1, ylab = "Frequency", xlab = "Hits") } } \keyword{datasets} % % VGAM/man/biamhcop.Rd0000644000176200001440000000574113565414527013653 0ustar liggesusers\name{biamhcop} \alias{biamhcop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ali-Mikhail-Haq Distribution Family Function } \description{ Estimate the association parameter of Ali-Mikhail-Haq's bivariate distribution by maximum likelihood estimation. } \usage{ biamhcop(lapar = "rhobitlink", iapar = NULL, imethod = 1, nsimEIM = 250) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar}{ Link function applied to the association parameter \eqn{\alpha}{alpha}, which is real and \eqn{-1 < \alpha < 1}{-1 < alpha < 1}. See \code{\link{Links}} for more choices. } \item{iapar}{ Numeric. Optional initial value for \eqn{\alpha}{alpha}. By default, an initial value is chosen internally. If a convergence failure occurs try assigning a different value. Assigning a value will override the argument \code{imethod}. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method. If failure to converge occurs try the other value, or else specify a value for \code{iapar}. } \item{nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = y_1 y_2 / ( 1 - \alpha (1 - y_1) (1 - y_2) ) }{% P(Y1 < = y1, Y2 < = y2) = y1 * y2 / ( 1 - alpha * (1 - y1) * (1 - y2) ) } for \eqn{-1 < \alpha < 1}{-1 < alpha < 1}. The support of the function is the unit square. The marginal distributions are the standard uniform distributions. When \eqn{\alpha = 0}{alpha = 0} the random variables are independent. This is an Archimedean copula. % A variant of Newton-Raphson is used, which only seems to work for an % intercept model. % It is a very good idea to set \code{trace = TRUE}. % This \pkg{VGAM} family function is prone to numerical difficulties. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ %Hutchinson, T. P. and Lai, C. D. (1990) %\emph{Continuous Bivariate Distributions, Emphasising Applications}, %Adelaide, South Australia: Rumsby Scientific Publishing. Balakrishnan, N. and Lai, C.-D. (2009) \emph{Continuous Bivariate Distributions}, 2nd ed. New York: Springer. } \author{ T. W. Yee and C. S. Chee } \note{ The response must be a two-column matrix. Currently, the fitted value is a matrix with two columns and values equal to 0.5. This is because each marginal distribution corresponds to a standard uniform distribution. } \seealso{ \code{\link{rbiamhcop}}, \code{\link{bifgmcop}}, \code{\link{bigumbelIexp}}, \code{\link{rbilogis}}, \code{\link{simulate.vlm}}. } \examples{ ymat <- rbiamhcop(1000, apar = rhobitlink(2, inverse = TRUE)) fit <- vglm(ymat ~ 1, biamhcop, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} VGAM/man/auxposbernoulli.t.Rd0000644000176200001440000000510413565414527015557 0ustar liggesusers\name{aux.posbernoulli.t} \alias{aux.posbernoulli.t} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Auxiliary Function for the Positive Bernoulli Family Function with Time Effects } \description{ Returns behavioural effects indicator variables from a capture history matrix. } \usage{ aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{ Capture history matrix. Rows are animals, columns are sampling occasions, and values should be 0s and 1s only. } \item{check.y}{ Logical, if \code{TRUE} then some basic checking is performed. } \item{rename, name}{ If \code{rename = TRUE} then the behavioural effects indicator are named using the value of \code{name} as the prefix. If \code{FALSE} then use the same column names as \code{y}. } } \details{ This function can help fit certain capture--recapture models (commonly known as \eqn{M_{tb}} or \eqn{M_{tbh}} (no prefix \eqn{h} means it is an intercept-only model) in the literature). See \code{\link{posbernoulli.t}} for details. } \value{ A list with the following components. \describe{ \item{cap.hist1}{ A matrix the same dimension as \code{y}. In any particular row there are 0s up to the first capture. Then there are 1s thereafter. } \item{cap1}{ A vector specifying which time occasion the animal was first captured. } \item{y0i}{ Number of noncaptures before the first capture. } \item{yr0i}{ Number of noncaptures after the first capture. } \item{yr1i}{ Number of recaptures after the first capture. } } } % \author{ Thomas W. Yee. } %\note{ % Models \eqn{M_{tbh}}{M_tbh} can be fitted using the % \code{xij} argument (see \code{\link{vglm.control}}) % to input the behavioural effect indicator variables. % Rather than manually setting these up, they may be more conveniently % obtained by \code{\link{aux.posbernoulli.t}}. See % the example below. % % %} %\section{Warning }{ % % See \code{\link{posbernoulli.tb}}. % % %} \seealso{ \code{\link{posbernoulli.t}}, \code{\link{deermice}}. } \examples{ # Fit a M_tbh model to the deermice data: (pdata <- aux.posbernoulli.t(with(deermice, cbind(y1, y2, y3, y4, y5, y6)))) deermice <- data.frame(deermice, bei = 0, # Add this pdata$cap.hist1) # Incorporate these head(deermice) # Augmented with behavioural effect indicator variables tail(deermice) } \keyword{models} \keyword{regression} VGAM/man/AICvlm.Rd0000644000176200001440000001060313565414527013175 0ustar liggesusers\name{AICvlm} \alias{AICvlm} %\alias{AICvglm} \alias{AICvgam} \alias{AICrrvglm} \alias{AICqrrvglm} \alias{AICrrvgam} \alias{AICc,vglm-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Akaike's Information Criterion } \description{ Calculates the Akaike information criterion for a fitted model object for which a log-likelihood value has been obtained. } \usage{ AICvlm(object, \dots, corrected = FALSE, k = 2) AICvgam(object, \dots, k = 2) AICrrvglm(object, \dots, k = 2) AICqrrvglm(object, \dots, k = 2) AICrrvgam(object, \dots, k = 2) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Some \pkg{VGAM} object, for example, having class \code{\link{vglm-class}}. } \item{\dots}{ Other possible arguments fed into \code{logLik} in order to compute the log-likelihood. } \item{corrected}{ Logical, perform the finite sample correction? } \item{k}{ Numeric, the penalty per parameter to be used; the default is the classical AIC. } } \details{ The following formula is used for VGLMs: \eqn{-2 \mbox{log-likelihood} + k n_{par}}{-2*log-likelihood + k*npar}, where \eqn{n_{par}}{npar} represents the number of parameters in the fitted model, and \eqn{k = 2} for the usual AIC. One could assign \eqn{k = \log(n)} (\eqn{n} the number of observations) for the so-called BIC or SBC (Schwarz's Bayesian criterion). This is the function \code{AICvlm()}. This code relies on the log-likelihood being defined, and computed, for the object. When comparing fitted objects, the smaller the AIC, the better the fit. The log-likelihood and hence the AIC is only defined up to an additive constant. Any estimated scale parameter (in GLM parlance) is used as one parameter. For VGAMs and CAO the nonlinear effective degrees of freedom for each smoothed component is used. This formula is heuristic. These are the functions \code{AICvgam()} and \code{AICcao()}. The finite sample correction is usually recommended when the sample size is small or when the number of parameters is large. When the sample size is large their difference tends to be negligible. The correction is described in Hurvich and Tsai (1989), and is based on a (univariate) linear model with normally distributed errors. } \value{ Returns a numeric value with the corresponding AIC (or BIC, or \dots, depending on \code{k}). } \author{T. W. Yee. } \note{ AIC has not been defined for QRR-VGLMs, yet. Using AIC to compare \code{\link{posbinomial}} models with, e.g., \code{\link{posbernoulli.tb}} models, requires \code{posbinomial(omit.constant = TRUE)}. See \code{\link{posbinomial}} for an example. A warning is given if it suspects a wrong \code{omit.constant} value was used. Where defined, \code{AICc(...)} is the same as \code{AIC(..., corrected = TRUE)}. } \references{ Hurvich, C. M. and Tsai, C.-L. (1989) Regression and time series model selection in small samples, \emph{Biometrika}, \bold{76}, 297--307. % Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986). % \emph{Akaike Information Criterion Statistics}. % D. Reidel Publishing Company. } \section{Warning }{ This code has not been double-checked. The general applicability of \code{AIC} for the VGLM/VGAM classes has not been developed fully. In particular, \code{AIC} should not be run on some \pkg{VGAM} family functions because of violation of certain regularity conditions, etc. } \seealso{ VGLMs are described in \code{\link{vglm-class}}; VGAMs are described in \code{\link{vgam-class}}; RR-VGLMs are described in \code{\link{rrvglm-class}}; \code{\link[stats]{AIC}}, \code{\link{BICvlm}}, \code{\link{TICvlm}}, \code{\link{drop1.vglm}}, \code{\link{extractAIC.vglm}}. % 20190410 % One day % \code{\link{AICc,vglm-method}} for \code{AICc()} applied to % \code{"vglm"} objects. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) coef(fit1, matrix = TRUE) AIC(fit1) AICc(fit1) # Quick way AIC(fit1, corrected = TRUE) # Slow way (fit2 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = FALSE, reverse = TRUE), data = pneumo)) coef(fit2, matrix = TRUE) AIC(fit2) AICc(fit2) AIC(fit2, corrected = TRUE) } \keyword{models} \keyword{regression} VGAM/man/otlog.Rd0000644000176200001440000000306313565414527013210 0ustar liggesusers\name{otlog} \alias{otlog} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-truncated Logarithmic Distribution } \description{ Estimating the (single) parameter of the 1-truncated logarithmic distribution. } \usage{ otlog(lshape = "logitlink", gshape = ppoints(8), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, gshape, zero}{ Same as \code{\link{logff}}. } } \details{ The 1-truncated logarithmic distribution is a logarithmic distribution but with the probability of a one being zero. The other probabilities are scaled to add to unity. Some more details can be found at \code{\link{logff}}. Multiple responses are permitted. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ %} \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{Otlog}}, \code{\link{logff}}, \code{\link{oalog}}, \code{\link{oilog}}, \code{\link{simulate.vlm}}. } \examples{ odata <- data.frame(y1 = rotlog(n = 1000, shape = logitlink(1/3, inverse = TRUE))) ofit <- vglm(y1 ~ 1, otlog, data = odata, trace = TRUE, crit = "c") coef(ofit, matrix = TRUE) Coef(ofit) \dontrun{with(odata, hist(y1, shape = TRUE, breaks = seq(0.5, max(y1) + 0.5, by = 1), border = "blue")) x <- seq(1, with(odata, max(y1)), by = 1) with(odata, lines(x, dotlog(x, Coef(ofit)[1]), col = "orange", type = "h", lwd = 2)) } } \keyword{models} \keyword{regression} VGAM/man/posnegbinUC.Rd0000644000176200001440000000776713565414527014317 0ustar liggesusers\name{Posnegbin} \alias{Posnegbin} \alias{dposnegbin} \alias{pposnegbin} \alias{qposnegbin} \alias{rposnegbin} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive-Negative Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the positive-negative binomial distribution. } \usage{ dposnegbin(x, size, prob = NULL, munb = NULL, log = FALSE) pposnegbin(q, size, prob = NULL, munb = NULL, lower.tail = TRUE, log.p = FALSE) qposnegbin(p, size, prob = NULL, munb = NULL) rposnegbin(n, size, prob = NULL, munb = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Fed into \code{\link[stats]{runif}}. } \item{size, prob, munb, log}{ Same arguments as that of an ordinary negative binomial distribution (see \code{\link[stats:NegBinomial]{dnbinom}}). Some arguments have been renamed slightly. % This is called \eqn{\theta}{theta} in the \code{\link[MASS]{rnegbin}} % function in the \code{MASS} library. Short vectors are recycled. The parameter \code{1/size} is known as a dispersion parameter; as \code{size} approaches infinity, the negative binomial distribution approaches a Poisson distribution. Note that \code{prob} must lie in \eqn{(0,1)}, otherwise a \code{NaN} is returned. } \item{log.p, lower.tail}{ Same arguments as that of an ordinary negative binomial distribution (see \code{\link[stats:NegBinomial]{pnbinom}}). } } \details{ The positive-negative binomial distribution is a negative binomial distribution but with the probability of a zero being zero. The other probabilities are scaled to add to unity. The mean therefore is \deqn{\mu / (1-p(0))}{% munb / (1-p(0))} where \eqn{\mu}{munb} the mean of an ordinary negative binomial distribution. % 20120405; no longer true to a superior method: % The arguments of % \code{rposnegbin()} % are fed into % \code{\link[stats:NegBinomial]{rnbinom}} until \eqn{n} positive values % are obtained. } \value{ \code{dposnegbin} gives the density, \code{pposnegbin} gives the distribution function, \code{qposnegbin} gives the quantile function, and \code{rposnegbin} generates \eqn{n} random deviates. } \references{ Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer, D. B. (1996) Modelling the abundances of rare species: statistical models for counts with extra zeros. \emph{Ecological Modelling}, \bold{88}, 297--308. } \author{ T. W. Yee } %\note{ % 20120405; no longer true to a superior method: % The running time % of \code{rposnegbin()} % is slow when \code{munb} is very close to zero. % %} \seealso{ \code{\link{gatnbinomial.mlm}}, \code{\link{Gaitnbinom.mlm}}, \code{\link{posnegbinomial}}, \code{\link{zanegbinomial}}, \code{\link{zinegbinomial}}, \code{\link[stats:NegBinomial]{rnbinom}}. % \code{\link[MASS]{rnegbin}}, } \examples{ munb <- 5; size <- 4; n <- 1000 table(y <- rposnegbin(n, munb = munb, size = size)) mean(y) # sample mean munb / (1 - (size / (size + munb))^size) # population mean munb / pnbinom(0, mu = munb, size = size, lower.tail = FALSE) # same as before x <- (-1):17 (ii <- dposnegbin(x, munb = munb, size = size)) max(abs(cumsum(ii) - pposnegbin(x, munb = munb, size = size))) # Should be 0 \dontrun{ x <- 0:10 barplot(rbind(dposnegbin(x, munb = munb, size = size), dnbinom(x, mu = munb, size = size)), beside = TRUE, col = c("blue","green"), main = paste("dposnegbin(munb = ", munb, ", size = ", size, ") (blue) vs", " dnbinom(mu = ", munb, ", size = ", size, ") (green)", sep = ""), names.arg = as.character(x)) } # Another test for pposnegbin() nn <- 5000 mytab <- cumsum(table(rposnegbin(nn, munb = munb, size = size))) / nn myans <- pposnegbin(sort(as.numeric(names(mytab))), munb = munb, size = size) max(abs(mytab - myans)) # Should be 0 } \keyword{distribution} VGAM/man/margeff.Rd0000644000176200001440000001246713565414527013503 0ustar liggesusers\name{margeff} \alias{margeff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Marginal Effects for Several Categorical Response Models } \description{ Marginal effects for the multinomial logit model and cumulative logit/probit/... models and continuation ratio models and stopping ratio models and adjacent categories models: the derivative of the fitted probabilities with respect to each explanatory variable. } \usage{ margeff(object, subset = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{vglm}} object, with one of the following family functions: \code{\link{multinomial}}, \code{\link{cumulative}}, \code{\link{cratio}}, \code{\link{sratio}} or \code{\link{acat}}. } \item{subset}{ Numerical or logical vector, denoting the required observation(s). Recycling is used if possible. The default means all observations. } \item{\dots}{ further arguments passed into the other methods functions. % e.g., \code{subset}. } } \details{ Computes the derivative of the fitted probabilities of the categorical response model with respect to each explanatory variable. Formerly one big function, this function now uses S4 dispatch to break up the computations. % 20151215 The function \code{margeff()} is \emph{not} generic. However, it calls the function \code{margeffS4VGAM()} which \emph{is}. This is based on the class of the \code{VGAMff} argument, and it uses the S4 function \code{\link[methods]{setMethod}} to correctly dispatch to the required methods function. The inheritance is given by the \code{vfamily} slot of the \pkg{VGAM} family function. } \value{ A \eqn{p} by \eqn{M+1} by \eqn{n} array, where \eqn{p} is the number of explanatory variables and the (hopefully) nominal response has \eqn{M+1} levels, and there are \eqn{n} observations. In general, if \code{is.numeric(subset)} and \code{length(subset) == 1} then a \eqn{p} by \eqn{M+1} matrix is returned. } % \references{ ~put references to the literature/web site here ~ } \author{ T. W. Yee, with some help and motivation from Stasha Rmandic. } \section{Warning }{ Care is needed in interpretation, e.g., the change is not universally accurate for a unit change in each explanatory variable because eventually the `new' probabilities may become negative or greater than unity. Also, the `new' probabilities will not sum to one. This function is not applicable for models with data-dependent terms such as \code{\link{bs}} and \code{\link{poly}}. Also the function should not be applied to models with any terms that have generated more than one column of the LM model matrix, such as \code{\link{bs}} and \code{\link{poly}}. For such try using numerical methods such as finite-differences. The \code{formula} in \code{object} should comprise of simple terms of the form \code{ ~ x2 + x3 + x4}, etc. Some numerical problems may occur if the fitted values are close to 0 or 1 for the \code{\link{cratio}} and \code{\link{sratio}} models. Models with offsets may result in an incorrect answer. } \note{ For \code{\link{multinomial}} this function should handle any value of \code{refLevel} and also any constraint matrices. However, it does not currently handle the \code{xij} or \code{form2} arguments, nor \code{\link{vgam}} objects. % 20151211; this is now false, so can delete this: % For \code{\link{multinomial}}, % if \code{subset} is numeric then the function uses a \code{for} loop over % the observations (slow). % The default computations use vectorization; this uses more memory than a % \code{for} loop but is faster. Some other limitations are imposed, e.g., for \code{\link{acat}} models only a \code{\link{loglink}} link is allowed. } \seealso{ \code{\link{multinomial}}, \code{\link{cumulative}}, \code{\link{propodds}}, \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}, \code{\link{vglm}}. } \examples{ # Not a good example for multinomial() because the response is ordinal!! ii <- 3; hh <- 1/100 pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo) fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative(reverse = TRUE, parallel = TRUE), data = pneumo) fitted(fit)[ii, ] mynewdata <- with(pneumo, data.frame(let = let[ii] + hh)) (newp <- predict(fit, newdata = mynewdata, type = "response")) # Compare the difference. Should be the same as hh --> 0. round(digits = 3, (newp-fitted(fit)[ii, ])/hh) # Finite-difference approxn round(digits = 3, margeff(fit, subset = ii)["let",]) # Other examples round(digits = 3, margeff(fit)) round(digits = 3, margeff(fit, subset = 2)["let",]) round(digits = 3, margeff(fit, subset = c(FALSE, TRUE))["let",,]) # recycling round(digits = 3, margeff(fit, subset = c(2, 4, 6, 8))["let",,]) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % set \code{i=1:n}. % hh * margeff(fit, i=ii)["let",] % cumulative(reverse=TRUE, parallel=TRUE), % cumulative(reverse=FALSE, parallel=TRUE), % cumulative(reverse=TRUE, parallel=FALSE), % cumulative(reverse=FALSE, parallel=FALSE), VGAM/man/betanormUC.Rd0000644000176200001440000000542513565414527014127 0ustar liggesusers\name{Betanorm} \alias{Betanorm} \alias{dbetanorm} \alias{pbetanorm} \alias{qbetanorm} \alias{rbetanorm} \title{The Beta-Normal Distribution} \description{ Density, distribution function, quantile function and random generation for the univariate beta-normal distribution. } \usage{ dbetanorm(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) pbetanorm(q, shape1, shape2, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE) qbetanorm(p, shape1, shape2, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE) rbetanorm(n, shape1, shape2, mean = 0, sd = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{shape1, shape2}{ the two (positive) shape parameters of the standard beta distribution. They are called \code{a} and \code{b} respectively in \code{\link[base:Special]{beta}}. } \item{mean, sd}{ the mean and standard deviation of the univariate normal distribution (\code{\link[stats:Normal]{Normal}}). } \item{log, log.p}{ Logical. If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}. } \item{lower.tail}{ Logical. If \code{TRUE} then the upper tail is returned, i.e., one minus the usual answer. } } \value{ \code{dbetanorm} gives the density, \code{pbetanorm} gives the distribution function, \code{qbetanorm} gives the quantile function, and \code{rbetanorm} generates random deviates. } \references{ pp.146--152 of Gupta, A. K. and Nadarajah, S. (2004) \emph{Handbook of Beta Distribution and Its Applications}, New York: Marcel Dekker. } \author{ T. W. Yee } \details{ The function \code{betauninormal}, the \pkg{VGAM} family function for estimating the parameters, has not yet been written. % for the formula of the probability density function and other details. } %\note{ %} %\seealso{ % zz code{link{betauninormal}}. %} \examples{ \dontrun{ shape1 <- 0.1; shape2 <- 4; m <- 1 x <- seq(-10, 2, len = 501) plot(x, dbetanorm(x, shape1, shape2, m = m), type = "l", ylim = 0:1, las = 1, ylab = paste("betanorm(",shape1,", ",shape2,", m=",m, ", sd=1)", sep = ""), main = "Blue is density, orange is cumulative distribution function", sub = "Gray lines are the 10,20,...,90 percentiles", col = "blue") lines(x, pbetanorm(x, shape1, shape2, m = m), col = "orange") abline(h = 0, col = "black") probs <- seq(0.1, 0.9, by = 0.1) Q <- qbetanorm(probs, shape1, shape2, m = m) lines(Q, dbetanorm(Q, shape1, shape2, m = m), col = "gray50", lty = 2, type = "h") lines(Q, pbetanorm(Q, shape1, shape2, m = m), col = "gray50", lty = 2, type = "h") abline(h = probs, col = "gray50", lty = 2) pbetanorm(Q, shape1, shape2, m = m) - probs # Should be all 0 } } \keyword{distribution} VGAM/man/diffzetaUC.Rd0000644000176200001440000000371113565414527014110 0ustar liggesusers\name{Diffzeta} \alias{Diffzeta} \alias{ddiffzeta} \alias{pdiffzeta} \alias{qdiffzeta} \alias{rdiffzeta} \title{ Differenced Zeta Distribution } \description{ Density, distribution function, quantile function, and random generation for the differenced zeta distribution. } \usage{ ddiffzeta(x, shape, start = 1, log = FALSE) pdiffzeta(q, shape, start = 1, lower.tail = TRUE) qdiffzeta(p, shape, start = 1) rdiffzeta(n, shape, start = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{ Same as in \code{\link[stats]{runif}}. } \item{shape, start}{ Details at \code{\link{diffzeta}}. % For \code{rdiffzeta()} this pa%arameter must be of length 1. } \item{log, lower.tail}{ Same as in \code{\link[stats]{runif}}. } } \details{ This distribution appears to work well on the distribution of English words in such texts. Some more details are given in \code{\link{diffzeta}}. } \value{ \code{ddiffzeta} gives the density, \code{pdiffzeta} gives the distribution function, \code{qdiffzeta} gives the quantile function, and \code{rdiffzeta} generates random deviates. } %\references{ %} \author{ T. W. Yee } \note{ Given some response data, the \pkg{VGAM} family function \code{\link{diffzeta}} estimates the parameter \code{shape}. Function \code{pdiffzeta()} suffers from the problems that \code{\link{plog}} sometimes has, i.e., when \code{p} is very close to 1. } \seealso{ \code{\link{diffzeta}}, \code{\link{zetaff}}, \code{\link{zipf}}, \code{\link{Oizeta}}. } \examples{ ddiffzeta(1:20, 0.5, start = 2) rdiffzeta(20, 0.5) \dontrun{ shape <- 0.8; x <- 1:10 plot(x, ddiffzeta(x, shape = shape), type = "h", ylim = 0:1, sub = "shape=0.8", las = 1, col = "blue", ylab = "Probability", main = "Differenced zeta distribution: blue=PMF; orange=CDF") lines(x + 0.1, pdiffzeta(x, shape = shape), col = "orange", lty = 3, type = "h") } } \keyword{distribution} VGAM/man/concoef.Rd0000644000176200001440000000576113565414527013507 0ustar liggesusers\name{concoef} \alias{concoef} %\alias{ccoef} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract Model Constrained/Canonical Coefficients } \description{ \code{concoef} is a generic function which extracts the constrained (canonical) coefficients from objects returned by certain modelling functions. } \usage{ concoef(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the extraction of canonical coefficients is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. } } \details{ For constrained quadratic and ordination models, \emph{canonical coefficients} are the elements of the \bold{C} matrix used to form the latent variables. They are highly interpretable in ecology, and are looked at as weights or loadings. They are also applicable for reduced-rank VGLMs. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } %\note{ %} \section{Warning }{ \code{\link{concoef}} replaces \code{ccoef}; the latter is deprecated. % \code{\link{concoef}} and \code{\link{ccoef}} are identical, % but the latter will be deprecated soon. For QO models, there is a direct inverse relationship between the scaling of the latent variables (site scores) and the tolerances. One normalization is for the latent variables to have unit variance. Another normalization is for all the species' tolerances to be unit (provided \code{eq.tolerances} is \code{TRUE}). These two normalizations cannot simultaneously hold in general. For rank \eqn{R} models with \eqn{R>1} it becomes more complicated because the latent variables are also uncorrelated. An important argument when fitting quadratic ordination models is whether \code{eq.tolerances} is \code{TRUE} or \code{FALSE}. See Yee (2004) for details. } \seealso{ \code{\link{concoef-method}}, \code{concoef.qrrvglm}, \code{concoef.cao}, \code{\link[stats]{coef}}. } \examples{ \dontrun{ set.seed(111) # This leads to the global solution hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Crow1positive = FALSE) concoef(p1) } } \keyword{models} \keyword{regression} % family = quasipoissonff, data = hspider, Crow1positive = FALSE VGAM/man/propodds.Rd0000644000176200001440000000576713565414527013733 0ustar liggesusers\name{propodds} \alias{propodds} \title{ Proportional Odds Model for Ordinal Regression } \description{ Fits the proportional odds model to a (preferably ordered) factor response. } \usage{ propodds(reverse = TRUE, whitespace = FALSE) } \arguments{ \item{reverse, whitespace}{ Logical. Fed into arguments of the same name in \code{\link{cumulative}}. } } \details{ The \emph{proportional odds model} is a special case from the class of \emph{cumulative link models}. It involves a logit link applied to cumulative probabilities and a strong \emph{parallelism} assumption. A parallelism assumption means there is less chance of numerical problems because the fitted probabilities will remain between 0 and 1; however the \emph{parallelism} assumption ought to be checked, e.g., via a likelihood ratio test. This \pkg{VGAM} family function is merely a shortcut for \code{cumulative(reverse = reverse, link = "logit", parallel = TRUE)}. Please see \code{\link{cumulative}} for more details on this model. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Agresti, A. (2010) \emph{Analysis of Ordinal Categorical Data}, 2nd ed. Hoboken, NJ, USA: Wiley. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \section{Warning }{ No check is made to verify that the response is ordinal if the response is a matrix; see \code{\link[base:factor]{ordered}}. } \seealso{ \code{\link{cumulative}}, \code{\link{R2latvar}}. } \examples{ # Fit the proportional odds model, p.179, in McCullagh and Nelder (1989) pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) depvar(fit) # Sample proportions weights(fit, type = "prior") # Number of observations coef(fit, matrix = TRUE) constraints(fit) # Constraint matrices summary(fit) # Check that the model is linear in let ---------------------- fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, data = pneumo) \dontrun{ plot(fit2, se = TRUE, lcol = 2, scol = 2) } # Check the proportional odds assumption with a LRT ---------- (fit3 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = FALSE, reverse = TRUE), data = pneumo)) pchisq(deviance(fit) - deviance(fit3), df = df.residual(fit) - df.residual(fit3), lower.tail = FALSE) lrtest(fit3, fit) # Easier } \keyword{models} \keyword{regression} % pneumo$let <- log(pneumo$exposure.time) VGAM/man/rdiric.Rd0000644000176200001440000000406613565414527013344 0ustar liggesusers\name{rdiric} \alias{rdiric} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Dirichlet distribution } \description{ Generates Dirichlet random variates. } \usage{ rdiric(n, shape, dimension = NULL, is.matrix.shape = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{n}{ number of observations. Note it has two meanings, see \code{is.matrix.shape} below. } \item{shape}{ the shape parameters. These must be positive. If \code{dimension} is specifed, values are recycled if necessary to length \code{dimension}. } \item{dimension}{ the dimension of the distribution. If \code{dimension} is not numeric then it is taken to be \code{length(shape)} (or \code{ncol(shape)} if \code{is.matrix.shape == TRUE}). } \item{is.matrix.shape}{ Logical. If \code{TRUE} then \code{shape} must be a matrix, and then \code{n} is no longer the number of rows of the answer but the answer has \code{n * nrow(shape)} rows. If \code{FALSE} (the default) then \code{shape} is a vector and each of the \code{n} rows of the answer have \code{shape} as its shape parameters. } } \details{ This function is based on a relationship between the gamma and Dirichlet distribution. Random gamma variates are generated, and then Dirichlet random variates are formed from these. } \value{ A \code{n} by \code{dimension} matrix of Dirichlet random variates. Each element is positive, and each row will sum to unity. If \code{shape} has names then these will become the column names of the answer. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. } \author{ Thomas W. Yee } \seealso{ \code{\link{dirichlet}} is a \pkg{VGAM} family function for fitting a Dirichlet distribution to data. } \examples{ ddata <- data.frame(rdiric(n = 1000, shape = c(y1 = 3, y2 = 1, y3 = 4))) fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet, data = ddata, trace = TRUE) Coef(fit) coef(fit, matrix = TRUE) } \keyword{distribution} VGAM/man/prplot.Rd0000644000176200001440000000465413565414527013413 0ustar liggesusers\name{prplot} \alias{prplot} \alias{prplot.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Probability Plots for Categorical Data Analysis } \description{ Plots the fitted probabilities for some very simplified special cases of categorical data analysis models. } \usage{ prplot(object, control = prplot.control(...), ...) prplot.control(xlab = NULL, ylab = "Probability", main = NULL, xlim = NULL, ylim = NULL, lty = par()$lty, col = par()$col, rcol = par()$col, lwd = par()$lwd, rlwd = par()$lwd, las = par()$las, rug.arg = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Currently only an \code{\link{cumulative}} object. This includes a \code{\link{propodds}} object since that \pkg{VGAM} family function is a special case of \code{\link{cumulative}}. } \item{control}{ List containing some basic graphical parameters. } \item{xlab, ylab, main, xlim, ylim, lty }{ See \code{\link[graphics]{par}} and \code{...} below. } \item{col, rcol, lwd, rlwd, las, rug.arg}{ See \code{\link[graphics]{par}} and \code{...} below. Arguments starting with \code{r} refer to the rug. Argument \code{rug.arg} is logical: add a rug for the distinct values of the explanatory variable? } \item{\dots}{ Arguments such as \code{xlab} which are fed into \code{prplot.control()}. Only a small selection of graphical arguments from \code{\link[graphics]{par}} are offered. } } \details{ For models involving one term in the RHS of the formula this function plots the fitted probabilities against the single explanatory variable. } \value{ The object is returned invisibly with the \code{preplot} slot assigned. This is obtained by a call to \code{plotvgam()}. } %\references{ %% ~put references to the literature/web site here ~ %} %\author{ %T. W. Yee %} \note{ This function is rudimentary. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{cumulative}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) M <- npred(fit) # Or fit@misc$M \dontrun{ prplot(fit) prplot(fit, lty = 1:M, col = (1:M)+2, rug = TRUE, las = 1, ylim = c(0, 1), rlwd = 2) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/oizeta.Rd0000644000176200001440000000454513565414527013365 0ustar liggesusers\name{oizeta} \alias{oizeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-inflated Zeta Distribution Family Function } \description{ Fits a 1-inflated zeta distribution. } \usage{ oizeta(lpstr1 = "logitlink", lshape = "loglink", type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"), ishape = NULL, gpstr1 = ppoints(8), gshape = exp((-3:3) / 4), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr1, lshape}{ For \code{lpstr1}: the same idea as \code{\link{zipoisson}} except it applies to a structural 1. } \item{gpstr1, gshape, ishape}{ For initial values. See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 1-inflated zeta distribution is a mixture distribution of the zeta distribution with some probability of obtaining a (structural) 1. Thus there are two sources for obtaining the value 1. This distribution is written here in a way that retains a similar notation to the zero-inflated Poisson, i.e., the probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}. See \code{\link{zipoisson}}. This family function can handle multiple responses. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. Lots of data is needed to estimate the parameters accurately. Usually, probably the shape parameter is best modelled as intercept-only. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{Oizeta}}, \code{\link{zetaff}}, \code{\link{oazeta}}, \code{\link{otzeta}}, \code{\link{diffzeta}}, \code{\link{zeta}}, \code{\link{Oizipf}}. } \examples{ \dontrun{ odata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data odata <- transform(odata, pstr1 = logitlink(-1 + x2, inverse = TRUE), shape = exp(-0.5)) odata <- transform(odata, y1 = roizeta(nn, shape, pstr1 = pstr1)) with(odata, table(y1)) fit1 <- vglm(y1 ~ x2, oizeta(zero = "shape"), data = odata, trace = TRUE) coef(fit1, matrix = TRUE) } } \keyword{models} \keyword{regression} VGAM/man/smartpred.Rd0000644000176200001440000001742513565414527014074 0ustar liggesusers\name{smartpred} \alias{smartpred} \alias{sm.bs} \alias{sm.ns} \alias{sm.scale} \alias{sm.scale.default} \alias{sm.poly} \title{ Smart Prediction } \description{ Data-dependent parameters in formula terms can cause problems in when predicting. The \pkg{smartpred} package saves data-dependent parameters on the object so that the bug is fixed. The \code{\link[stats]{lm}} and \code{\link[stats]{glm}} functions have been fixed properly. Note that the \pkg{VGAM} package by T. W. Yee automatically comes with smart prediction. } \usage{ sm.bs(x, df = NULL, knots = NULL, degree = 3, intercept = FALSE, Boundary.knots = range(x)) sm.ns(x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(x)) sm.poly(x, ..., degree = 1, coefs = NULL, raw = FALSE) sm.scale(x, center = TRUE, scale = TRUE) } %\usage{ %lm() %glm() %ns() %bs() %poly() %scale() %vglm() %rrvglm() %vgam() %cao() %cqo() %uqo() %} \arguments{ \item{x}{ The \code{x} argument is actually common to them all. } \item{df, knots, intercept, Boundary.knots}{ See \code{\link[splines]{bs}} and/or \code{\link[splines]{ns}}. } \item{degree, \dots, coefs, raw}{ See \code{\link[stats]{poly}}. } \item{center, scale}{ See \code{\link[base]{scale}}. } } \value{ The usual value returned by \code{\link[splines]{bs}}, \code{\link[splines]{ns}}, \code{\link[stats]{poly}} and \code{\link[base]{scale}}, When used with functions such as \code{\link[VGAM]{vglm}} the data-dependent parameters are saved on one slot component called \code{smart.prediction}. } \section{Side Effects}{ The variables \code{.max.smart}, \code{.smart.prediction} and \code{.smart.prediction.counter} are created while the model is being fitted. They are created in a new environment called \code{smartpredenv}. These variables are deleted after the model has been fitted. However, if there is an error in the model fitting function or the fitting model is killed (e.g., by typing control-C) then these variables will be left in \code{smartpredenv}. At the beginning of model fitting, these variables are deleted if present in \code{smartpredenv}. % In S-PLUS they are created in frame 1. During prediction, the variables \code{.smart.prediction} and \code{.smart.prediction.counter} are reconstructed and read by the smart functions when the model frame is re-evaluated. After prediction, these variables are deleted. If the modelling function is used with argument \code{smart = FALSE} (e.g., \code{vglm(..., smart = FALSE)}) then smart prediction will not be used, and the results should match with the original \R functions. } \details{ \R version 1.6.0 introduced a partial fix for the prediction problem because it does not work all the time, e.g., for terms such as \code{I(poly(x, 3))}, \code{poly(c(scale(x)), 3)}, \code{bs(scale(x), 3)}, \code{scale(scale(x))}. See the examples below. Smart prediction, however, will always work. % albeit, not so elegantly. The basic idea is that the functions in the formula are now smart, and the modelling functions make use of these smart functions. Smart prediction works in two ways: using \code{\link{smart.expression}}, or using a combination of \code{\link{put.smart}} and \code{\link{get.smart}}. } \author{T. W. Yee and T. J. Hastie} %\note{ % In S-PLUS you will need to load in the \pkg{smartpred} library with % the argument \code{first = T}, e.g., % \code{library(smartpred, lib = "./mys8libs", first = T)}. % Here, \code{mys8libs} is the name of a directory of installed packages. % To install the smartpred package in Linux/Unix, type something like % \code{Splus8 INSTALL -l ./mys8libs ./smartpred_0.8-2.tar.gz}. %} %\note{ % In \R and % prior to the \pkg{VGAM} package using name spaces, the location of the % variables was the workspace. The present use of \code{smartpredenv} % is superior, and is somewhat similar to the S-PLUS implementation in % that the user is more oblivious to its existence. % %} \seealso{ \code{\link{get.smart.prediction}}, \code{\link{get.smart}}, \code{\link{put.smart}}, \code{\link{smart.expression}}, \code{\link{smart.mode.is}}, \code{\link{setup.smart}}, \code{\link{wrapup.smart}}. For \code{\link[VGAM]{vgam}} in \pkg{VGAM}, \code{\link[VGAM]{sm.ps}} is important. Commonly used data-dependent functions include \code{\link[base]{scale}}, \code{\link[stats]{poly}}, \code{\link[splines]{bs}}, \code{\link[splines]{ns}}. In \R, the functions \code{\link[splines]{bs}} and \code{\link[splines]{ns}} are in the \pkg{splines} package, and this library is automatically loaded in because it contains compiled code that \code{\link[splines]{bs}} and \code{\link[splines]{ns}} call. % The website \url{http://www.stat.auckland.ac.nz/~yee} % contains more information such as how to write a % smart function, and other technical details. The functions \code{\link[VGAM]{vglm}}, \code{\link[VGAM]{vgam}}, \code{\link[VGAM]{rrvglm}} and \code{\link[VGAM]{cqo}} in T. W. Yee's \pkg{VGAM} package are examples of modelling functions that employ smart prediction. } \section{WARNING }{ % In S-PLUS, % if the \code{"bigdata"} library is loaded then it is % \code{detach()}'ed. This is done because % \code{scale} cannot be made smart if \code{"bigdata"} is loaded % (it is loaded by default in the Windows version of % Splus 8.0, but not in Linux/Unix). % The function \code{\link[base]{search}} tells what is % currently attached. % In \R and S-PLUS The functions \code{\link[splines]{bs}}, \code{\link[splines]{ns}}, \code{\link[stats]{poly}} and \code{\link[base]{scale}} are now left alone (from 2014-05 onwards) and no longer smart. They work via safe prediction. The smart versions of these functions have been renamed and they begin with \code{"sm."}. The functions \code{\link[splines]{predict.bs}} and \code{predict.ns} are not smart. That is because they operate on objects that contain attributes only and do not have list components or slots. The function \code{\link[stats:poly]{predict.poly}} is not smart. } \examples{ # Create some data first n <- 20 set.seed(86) # For reproducibility of the random numbers ldata <- data.frame(x2 = sort(runif(n)), y = sort(runif(n))) library("splines") # To get ns() in R # This will work for R 1.6.0 and later fit <- lm(y ~ ns(x2, df = 5), data = ldata) \dontrun{ plot(y ~ x2, data = ldata) lines(fitted(fit) ~ x2, data = ldata) new.ldata <- data.frame(x2 = seq(0, 1, len = n)) points(predict(fit, new.ldata) ~ x2, new.ldata, type = "b", col = 2, err = -1) } # The following fails for R 1.6.x and later. It can be # made to work with smart prediction provided # ns is changed to sm.ns and scale is changed to sm.scale: fit1 <- lm(y ~ ns(scale(x2), df = 5), data = ldata) \dontrun{ plot(y ~ x2, data = ldata, main = "Safe prediction fails") lines(fitted(fit1) ~ x2, data = ldata) points(predict(fit1, new.ldata) ~ x2, new.ldata, type = "b", col = 2, err = -1) } # Fit the above using smart prediction \dontrun{ library("VGAM") # The following requires the VGAM package to be loaded fit2 <- vglm(y ~ sm.ns(sm.scale(x2), df = 5), uninormal, data = ldata) fit2@smart.prediction plot(y ~ x2, data = ldata, main = "Smart prediction") lines(fitted(fit2) ~ x2, data = ldata) points(predict(fit2, new.ldata, type = "response") ~ x2, data = new.ldata, type = "b", col = 2, err = -1) } } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} %lm(..., smart = TRUE) %glm(..., smart = TRUE) %ns() %bs() %poly() %scale() %vglm(..., smart = TRUE) %rrvglm(..., smart = TRUE) %vgam(..., smart = TRUE) %cao(..., smart = TRUE) %cqo(..., smart = TRUE) %uqo(..., smart = TRUE) %library(smartpred, lib = "./mys8libs", first = T) VGAM/man/nparamvglm.Rd0000644000176200001440000000424313565414527014231 0ustar liggesusers\name{nparam.vlm} \alias{nparam.vlm} \alias{nparam} %\alias{nparam.vglm} \alias{nparam.vgam} \alias{nparam.rrvglm} \alias{nparam.qrrvglm} \alias{nparam.rrvgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Number of Parameters } \description{ Returns the number of parameters in a fitted model object. } \usage{ nparam(object, \dots) nparam.vlm(object, dpar = TRUE, \dots) nparam.vgam(object, dpar = TRUE, linear.only = FALSE, \dots) nparam.rrvglm(object, dpar = TRUE, \dots) nparam.qrrvglm(object, dpar = TRUE, \dots) nparam.rrvgam(object, dpar = TRUE, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Some \pkg{VGAM} object, for example, having class \code{\link{vglmff-class}}. } \item{\dots}{ Other possible arguments fed into the function. } \item{dpar}{ Logical, include any (estimated) dispersion parameters as a parameter? } \item{linear.only}{ Logical, include only the number of linear (parametric) parameters? } } \details{ The code was copied from the \code{AIC()} methods functions. } \value{ Returns a numeric value with the corresponding number of parameters. For \code{\link{vgam}} objects, this may be real rather than integer, because the nonlinear degrees of freedom is real-valued. } \author{T. W. Yee. } %\note{ % This code has not been checked fully. % % %} %\references{ % Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986). % \emph{Akaike Information Criterion Statistics}. % D. Reidel Publishing Company. %} \section{Warning }{ This code has not been double-checked. } \seealso{ VGLMs are described in \code{\link{vglm-class}}; VGAMs are described in \code{\link{vgam-class}}; RR-VGLMs are described in \code{\link{rrvglm-class}}; \code{\link{AICvlm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) coef(fit1) coef(fit1, matrix = TRUE) nparam(fit1) (fit2 <- vglm(hits ~ 1, poissonff, weights = ofreq, data = V1)) coef(fit2) coef(fit2, matrix = TRUE) nparam(fit2) nparam(fit2, dpar = FALSE) } \keyword{models} \keyword{regression} VGAM/man/nbordlink.Rd0000644000176200001440000000730413565414527014050 0ustar liggesusers\name{nbordlink} %\name{nbolf} \alias{nbordlink} % \alias{nbolf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Negative Binomial-Ordinal Link Function } \description{ Computes the negative binomial-ordinal transformation, including its inverse and the first two derivatives. } \usage{ nbordlink(theta, cutpoint = NULL, k = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{cutpoint, k}{ Here, \code{k} is the \eqn{k} parameter associated with the negative binomial distribution; see \code{\link{negbinomial}}. The cutpoints should be non-negative integers. If \code{nbordlink()} is used as the link function in \code{\link{cumulative}} then one should choose \code{reverse = TRUE, parallel = TRUE}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The negative binomial-ordinal link function (NBOLF) can be applied to a parameter lying in the unit interval. Its purpose is to link cumulative probabilities associated with an ordinal response coming from an underlying negative binomial distribution. See \code{\link{Links}} for general information about \pkg{VGAM} link functions. } \value{ See Yee (2018) for details. } \references{ Yee, T. W. (2018) \emph{Ordinal ordination with normalizing link functions for count data}, (in preparation). } \author{ Thomas W. Yee } \note{ Numerical values of \code{theta} too close to 0 or 1 or out of range result in large positive or negative values, or maybe 0 depending on the arguments. Although measures have been taken to handle cases where \code{theta} is too close to 1 or 0, numerical instabilities may still arise. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the negative binomial distribution (see \code{\link{negbinomial}}) that has been recorded as an ordinal response using known cutpoints. } \section{Warning }{ Prediction may not work on \code{\link{vglm}} or \code{\link{vgam}} etc. objects if this link function is used. } \seealso{ \code{\link{Links}}, \code{\link{negbinomial}}, \code{\link{pordlink}}, \code{\link{gordlink}}, \code{nbord2link}, \code{\link{cumulative}}, \code{\link{CommonVGAMffArguments}}. } \examples{ \dontrun{ nbordlink("p", cutpoint = 2, k = 1, short = FALSE) nbordlink("p", cutpoint = 2, k = 1, tag = TRUE) p <- seq(0.02, 0.98, by = 0.01) y <- nbordlink(p,cutpoint = 2, k = 1) y. <- nbordlink(p,cutpoint = 2, k = 1, deriv = 1) max(abs(nbordlink(y,cutpoint = 2, k = 1, inv = TRUE) - p)) # Should be 0 #\ dontrun{ par(mfrow = c(2, 1), las = 1) #plot(p, y, type = "l", col = "blue", main = "nbordlink()") #abline(h = 0, v = 0.5, col = "red", lty = "dashed") # #plot(p, y., type = "l", col = "blue", # main = "(Reciprocal of) first NBOLF derivative") } # Another example nn <- 1000 x2 <- sort(runif(nn)) x3 <- runif(nn) mymu <- exp( 3 + 1 * x2 - 2 * x3) k <- 4 y1 <- rnbinom(nn, mu = mymu, size = k) cutpoints <- c(-Inf, 10, 20, Inf) cuty <- Cut(y1, breaks = cutpoints) #\ dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) } table(cuty) / sum(table(cuty)) fit <- vglm(cuty ~ x2 + x3, trace = TRUE, cumulative(reverse = TRUE, multiple.responses = TRUE, parallel = TRUE, link = nbordlink(cutpoint = cutpoints[2:3], k = k))) head(depvar(fit)) head(fitted(fit)) head(predict(fit)) coef(fit) coef(fit, matrix = TRUE) constraints(fit) fit@misc } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/put.smart.Rd0000644000176200001440000000241513565414527014021 0ustar liggesusers\name{put.smart} \alias{put.smart} \title{ Adds a List to the End of the List ``.smart.prediction'' } \description{ Adds a list to the end of the list \code{.smart.prediction} in \code{smartpredenv}. } \usage{ put.smart(smart) } \arguments{ \item{smart}{ a list containing parameters needed later for smart prediction. } } \value{ Nothing is returned. } \section{Side Effects}{ The variable \code{.smart.prediction.counter} in \code{smartpredenv} is incremented beforehand, and \code{.smart.prediction[[.smart.prediction.counter]]} is assigned the list \code{smart}. If the list \code{.smart.prediction} in \code{smartpredenv} is not long enough to hold \code{smart}, then it is made larger, and the variable \code{.max.smart} in \code{smartpredenv} is adjusted accordingly. } \details{ \code{put.smart} is used in \code{"write"} mode within a smart function. It saves parameters at the time of model fitting, which are later used for prediction. The function \code{put.smart} is the opposite of \code{\link{get.smart}}, and both deal with the same contents. } \seealso{ \code{\link{get.smart}}. } \examples{ print(sm.min1) } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/bigamma.mckay.Rd0000644000176200001440000000755013565414527014571 0ustar liggesusers\name{bigamma.mckay} \alias{bigamma.mckay} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Gamma: McKay's Distribution } \description{ Estimate the three parameters of McKay's bivariate gamma distribution by maximum likelihood estimation. } \usage{ bigamma.mckay(lscale = "loglink", lshape1 = "loglink", lshape2 = "loglink", iscale = NULL, ishape1 = NULL, ishape2 = NULL, imethod = 1, zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape1, lshape2}{ Link functions applied to the (positive) parameters \eqn{a}, \eqn{p} and \eqn{q} respectively. See \code{\link{Links}} for more choices. } \item{iscale, ishape1, ishape2}{ Optional initial values for \eqn{a}, \eqn{p} and \eqn{q} respectively. The default is to compute them internally. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ One of the earliest forms of the bivariate gamma distribution has a joint probability density function given by \deqn{f(y_1,y_2;a,p,q) = (1/a)^{p+q} y_1^{p-1} (y_2-y_1)^{q-1} \exp(-y_2 / a) / [\Gamma(p) \Gamma(q)]}{% f(y1,y2;a,p,q) = (1/a)^(p+q) y1^(p-1) (y2-y1)^(q-1) exp(-y2/a) / [gamma(p) gamma(q)] } for \eqn{a > 0}, \eqn{p > 0}, \eqn{q > 0} and \eqn{0 < y_1 < y_2}{0 aa names(aa@post$deplot) aa@post$deplot$newdata head(aa@post$deplot$y) head(aa@post$deplot$density) } } \keyword{dplot} \keyword{models} \keyword{regression} VGAM/man/cdf.lmscreg.Rd0000644000176200001440000000407513565414527014257 0ustar liggesusers\name{cdf.lmscreg} \alias{cdf.lmscreg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cumulative Distribution Function for LMS Quantile Regression } \description{ Computes the cumulative distribution function (CDF) for observations, based on a LMS quantile regression. } \usage{ cdf.lmscreg(object, newdata = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} quantile regression model, i.e., an object produced by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}} with a family function beginning with \code{"lms."}. } \item{newdata}{ Data frame where the predictions are to be made. If missing, the original data is used. } \item{\dots}{ Parameters which are passed into functions such as \code{cdf.lms.yjn}. } } \details{ The CDFs returned here are values lying in [0,1] giving the relative probabilities associated with the quantiles \code{newdata}. For example, a value near 0.75 means it is close to the upper quartile of the distribution. } \value{ A vector of CDF values lying in [0,1]. } \references{ Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The data are treated like quantiles, and the percentiles are returned. The opposite is performed by \code{\link{qtplot.lmscreg}}. The CDF values of the model have been placed in \code{@post$cdf} when the model was fitted. } \seealso{ \code{\link{deplot.lmscreg}}, \code{\link{qtplot.lmscreg}}, \code{\link{lms.bcn}}, \code{\link{lms.bcg}}, \code{\link{lms.yjn}}. } \examples{ fit <- vgam(BMI ~ s(age, df=c(4, 2)), lms.bcn(zero = 1), data = bmi.nz) head(fit@post$cdf) head(cdf(fit)) # Same head(depvar(fit)) head(fitted(fit)) cdf(fit, data.frame(age = c(31.5, 39), BMI = c(28.4, 24))) } \keyword{models} \keyword{regression} VGAM/man/yeo.johnson.Rd0000644000176200001440000000470013565414527014334 0ustar liggesusers\name{yeo.johnson} \alias{yeo.johnson} %- Also NEED an '\alias' for EACH other topic documented here. \title{Yeo-Johnson Transformation} \description{ Computes the Yeo-Johnson transformation, which is a normalizing transformation. } \usage{ yeo.johnson(y, lambda, derivative = 0, epsilon = sqrt(.Machine$double.eps), inverse = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{Numeric, a vector or matrix. } \item{lambda}{Numeric. It is recycled to the same length as \code{y} if necessary. } \item{derivative}{Non-negative integer. The default is the ordinary function evaluation, otherwise the derivative with respect to \code{lambda}.} \item{epsilon}{ Numeric and positive value. The tolerance given to values of \code{lambda} when comparing it to 0 or 2. } \item{inverse}{ Logical. Return the inverse transformation? } } \details{ The Yeo-Johnson transformation can be thought of as an extension of the Box-Cox transformation. It handles both positive and negative values, whereas the Box-Cox transformation only handles positive values. Both can be used to transform the data so as to improve normality. They can be used to perform LMS quantile regression. } \value{ The Yeo-Johnson transformation or its inverse, or its derivatives with respect to \code{lambda}, of \code{y}. } \references{ Yeo, I.-K. and Johnson, R. A. (2000) A new family of power transformations to improve normality or symmetry. \emph{Biometrika}, \bold{87}, 954--959. Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. } \author{ Thomas W. Yee } \note{ If \code{inverse = TRUE} then the argument \code{derivative = 0} is required. } \seealso{ \code{\link{lms.yjn}}, \code{\link[MASS]{boxcox}}. } \examples{ y <- seq(-4, 4, len = (nn <- 200)) ltry <- c(0, 0.5, 1, 1.5, 2) # Try these values of lambda lltry <- length(ltry) psi <- matrix(as.numeric(NA), nn, lltry) for (ii in 1:lltry) psi[, ii] <- yeo.johnson(y, lambda = ltry[ii]) \dontrun{ matplot(y, psi, type = "l", ylim = c(-4, 4), lwd = 2, lty = 1:lltry, ylab = "Yeo-Johnson transformation", col = 1:lltry, las = 1, main = "Yeo-Johnson transformation with some values of lambda") abline(v = 0, h = 0) legend(x = 1, y = -0.5, lty = 1:lltry, legend = as.character(ltry), lwd = 2, col = 1:lltry) } } \keyword{models} \keyword{regression} VGAM/man/oiposbinomUC.Rd0000644000176200001440000000765713565414527014507 0ustar liggesusers\name{Oiposbinom} \alias{Oiposbinom} \alias{doiposbinom} \alias{poiposbinom} \alias{qoiposbinom} \alias{roiposbinom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Positive Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the one-inflated positive binomial distribution with parameter \code{pstr1}. } \usage{ doiposbinom(x, size, prob, pstr1 = 0, log = FALSE) poiposbinom(q, size, prob, pstr1 = 0) qoiposbinom(p, size, prob, pstr1 = 0) roiposbinom(n, size, prob, pstr1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, p, q, n}{Same as \code{\link{Posbinom}}. } \item{size, prob}{Same as \code{\link{Posbinom}}. } \item{pstr1}{ Probability of a structural one (i.e., ignoring the positive binomial distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having a positive binomial distribution. However, \code{pstr1} can also be negative, in which case it ceases its interpretation as a probability, and this is known as \emph{one-deflation}. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The probability function of \eqn{Y} is 1 with probability \eqn{\phi}{phi}, and \eqn{PosBinomial(size, prob)}{PosBinomial(size, prob)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{% P(Y=1) = phi + (1-phi) * P(W=1)} where \eqn{W} is distributed as a positive \eqn{binomial(size, prob)}{binomial(size, prob)} random variable. } \value{ \code{doiposbinom} gives the density, \code{poiposbinom} gives the distribution function, \code{qoiposbinom} gives the quantile function, and \code{roiposbinom} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr1} is recycled to the required length, and usually has values which lie in the interval \eqn{[0,1]}. % % % These functions actually allow for the \emph{zero-deflated binomial} distribution. Here, \code{pstr1} is also permitted to lie in the interval \eqn{[-A, 0]} for some positive quantity \eqn{A}. The resulting probability of a unit value is \emph{less than} the nominal positive binomial value, and the use of \code{pstr1} to stand for the probability of a structural 1 loses its meaning. % % % If \code{pstr1} equals \eqn{A} then this corresponds to the 0- and 1-truncated binomial distribution. } \seealso{ \code{\link{posbinomial}}, \code{\link[stats:binomial]{dbinom}}, \code{\link{binomialff}}. % \code{\link{oiposbinomial}}, } \examples{ size <- 10; prob <- 0.2; pstr1 <- 0.4; x <- (-1):size (ii <- doiposbinom(x, size, prob, pstr1 = pstr1)) table(roiposbinom(100, size, prob, pstr1 = pstr1)) round(doiposbinom(x , size, prob, pstr1 = pstr1) * 100) # Should be similar \dontrun{ x <- 0:size par(mfrow = c(2, 1)) # One-Inflated Positive Binomial barplot(rbind(doiposbinom(x, size, prob, pstr1 = pstr1), dposbinom(x, size, prob)), beside = TRUE, col = c("blue", "orange"), main = paste("OIPB(", size, ",", prob, ", pstr1 = ", pstr1, ") (blue) vs", " PosBinomial(", size, ",", prob, ") (orange)", sep = ""), names.arg = as.character(x)) # Zero-deflated Pos Binomial deflat.limit <- -dposbinom(1, size, prob) / (1 - dposbinom(1, size, prob)) deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1)) newpstr1 <- round(deflat.limit, 3) + 0.001 # A little from the boundary barplot(rbind(doiposbinom(x, size, prob, pstr1 = newpstr1), dposbinom(x, size, prob)), beside = TRUE, col = c("blue","orange"), main = paste("ODPB(", size, ",", prob, ", pstr1 = ", newpstr1, ") (blue) vs", " PosBinomial(", size, ",", prob, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} VGAM/man/rootogram4vglm.Rd0000644000176200001440000001170413565414527015050 0ustar liggesusers\name{rootogram4} \alias{rootogram4} \alias{rootogram4vglm} \title{ Rootograms (S4 generic) for Assessing Goodness of Fit of Probability Models } \description{ A graphical technique for comparing the observed and fitted counts from a probability model, on a square root scale. } \usage{ rootogram4(object, \dots) rootogram4vglm(object, newdata = NULL, breaks = NULL, max = NULL, xlab = NULL, main = NULL, width = NULL, \dots) } \arguments{ \item{object}{ an object of class \code{"vglm"}. zz This includes \code{"vgam"} because \code{"vlm"} handles both VGLM and VGAM objects. % It is strongly recommended that this be the full model % because a backward direction is taken first. } \item{newdata}{ Data upon which to base the calculations. The default is the one used to fit the model. } \item{breaks}{numeric. Breaks for the histogram intervals.} \item{max}{maximum count displayed.} \item{xlab, main}{graphical parameters.} \item{width}{numeric. Widths of the histogram bars.} % \item{pkg}{which package to call \code{rootogram()}. % The first is the default. } \item{\dots}{ any additional arguments to \code{rootogram.default} and \code{plot.rootogram} in \pkg{countreg}. % \code{\link[countreg]{rootogram.default}} % and \code{\link[countreg]{plot.rootogram}}. % and its associated functions. } } \value{ See \code{rootogram} in \pkg{countreg}; an object of class \code{"rootogram0"} inheriting from \code{"data.frame"} with about 8 variables. % \code{\link[countreg]{rootogram}}; } \details{ Rootograms are a useful graphical technique for comparing the observed counts with the expected counts given a probability model. % on \code{\link[countreg]{rootogram}} This S4 implementation is based very heavily on \code{rootogram} coming from \pkg{countreg}. This package is primarily written by A. Zeileis and C. Kleiber. That package is currently on R-Forge but not CRAN, and it is based on S3. Since \pkg{VGAM} is written using S4, it was necessary to define an S4 generic function called \code{rootogram4()} which dispatches appropriately for S4 objects. % The second package is \pkg{vcd} is on CRAN and is written % by David Meyer [aut, cre], % Achim Zeileis ORCID iD [aut], % Kurt Hornik [aut], % Florian Gerber [ctb], % Michael Friendly [ctb]. Currently, only a selected number of \pkg{VGAM} family functions are implemented. Over time, hopefully more and more will be completed. } \note{ The function names used coming from \pkg{countreg} have been renamed slightly to avoid conflict. % Ditto for \pkg{vcd}. } \section{Warning}{ This function is rudimentary and based totally on the implementation in \pkg{countreg}. % and \pkg{vcd}. } \seealso{ \code{\link{vglm}}, \code{\link{vgam}}, \code{\link[stats]{glm}}, \code{\link{zipoisson}}, \code{\link{zapoisson}}, \code{rootogram} in \pkg{countreg}. % \code{rootogram} in \pkg{vcd}. % \code{\link[countreg]{rootogram}}. } \references{ Friendly, M. and Meyer, D. (2016) \emph{Discrete Data Analysis with R: Visualization and Modeling Techniques for Categorical and Count Data}, Boca Raton, FL, USA: Chapman & Hall/CRC Press. Kleiber, C. and Zeileis, A. (2016) \dQuote{Visualizing Count Data Regressions Using Rootograms.} \emph{The American Statistician}, \bold{70}(3), 296--303. \doi{10.1080/00031305.2016.1173590}. Tukey, J. W. (1977) \emph{Exploratory Data Analysis}, Reading, MA, USA: Addison-Wesley. } %\references{ % Hastie, T. J. and Pregibon, D. (1992) % \emph{Generalized linear models.} % Chapter 6 of \emph{Statistical Models in S} % eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. % Venables, W. N. and Ripley, B. D. (2002) % \emph{Modern Applied Statistics with S.} % New York: Springer (4th ed). %} \author{ Package \pkg{countreg} is primarily written by A. Zeileis and C. Kleiber. Function \code{rootogram4()} is based very heavily on \pkg{countreg}. % and \pkg{vcd}. % Package \pkg{vcd} is written % by David Meyer [aut, cre], % Achim Zeileis ORCID iD [aut], % Kurt Hornik [aut], % Florian Gerber [ctb], % Michael Friendly [ctb]. } \examples{ \dontrun{ data("hspider", package = "VGAM") # Count responses hs.p <- vglm(Pardlugu ~ CoveHerb, poissonff, data = hspider) hs.nb <- vglm(Pardlugu ~ CoveHerb, negbinomial, data = hspider) hs.zip <- vglm(Pardlugu ~ CoveHerb, zipoisson, data = hspider) hs.zap <- vglm(Pardlugu ~ CoveHerb, zapoisson, data = hspider) opar <- par(mfrow = c(2, 2)) # Plot the rootograms rootogram4(hs.p, max = 15, main = "poissonff") rootogram4(hs.nb, max = 15, main = "negbinomial") rootogram4(hs.zip, max = 15, main = "zipoisson") rootogram4(hs.zap, max = 15, main = "zapoisson") par(opar) } } \keyword{models} %\donttest{} %\dontshow{utils::example("lm", echo = FALSE)} % ( pkg = c("countreg", "vcd"), \dots) VGAM/man/rhobitlink.Rd0000644000176200001440000000563513565414527014240 0ustar liggesusers\name{rhobitlink} \alias{rhobitlink} % \alias{rhobit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Rhobit Link Function } \description{ Computes the rhobit link transformation, including its inverse and the first two derivatives. } \usage{ rhobitlink(theta, bminvalue = NULL, bmaxvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bminvalue, bmaxvalue}{ Optional boundary values, e.g., values of \code{theta} which are less than or equal to -1 can be replaced by \code{bminvalue} before computing the link function value. And values of \code{theta} which are greater than or equal to 1 can be replaced by \code{bmaxvalue} before computing the link function value. See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The \code{rhobitlink} link function is commonly used for parameters that lie between \eqn{-1} and \eqn{1}. Numerical values of \code{theta} close to \eqn{-1} or \eqn{1} or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the rhobit of \code{theta}, i.e., \code{log((1 + theta)/(1 - theta))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{(exp(theta) - 1)/(exp(theta) + 1)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } %\references{ %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. % % %} \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to \eqn{-1} or \eqn{1}. One way of overcoming this is to use \code{bminvalue}, etc. The correlation parameter of a standard bivariate normal distribution lies between \eqn{-1} and \eqn{1}, therefore this function can be used for modelling this parameter as a function of explanatory variables. The link function \code{rhobitlink} is very similar to \code{\link{fisherzlink}}, e.g., just twice the value of \code{\link{fisherzlink}}. } \seealso{ \code{\link{Links}}, \code{\link{binom2.rho}}, \code{\link{fisherz}}. } \examples{ theta <- seq(-0.99, 0.99, by = 0.01) y <- rhobitlink(theta) \dontrun{ plot(theta, y, type = "l", las = 1, ylab = "", main = "rhobitlink(theta)") abline(v = 0, h = 0, lty = 2) } x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01)) rhobitlink(x) # Has NAs rhobitlink(x, bminvalue = -1 + .Machine$double.eps, bmaxvalue = 1 - .Machine$double.eps) # Has no NAs } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/fill.Rd0000644000176200001440000002352413565414527013016 0ustar liggesusers\name{fill} \alias{fill} \alias{fill1} %- \alias{fill2} %- \alias{fill3} %- \alias{fill4} %- \alias{fill5} %- \alias{fill6} %- \alias{fill7} %- \alias{fill8} %- \alias{fill9} %- \alias{fill10} %- \alias{fill11} %- \alias{fill12} %- \alias{fill13} %- \alias{fill14} %- \alias{fill15} %- \alias{fill16} %- \alias{fill17} %- \alias{fill18} %- \alias{fill19} %- \alias{fill20} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Creates a Matrix of Appropriate Dimension } \description{ A support function for the argument \code{xij}, it generates a matrix of an appropriate dimension. } \usage{ fill(x, values = 0, ncolx = ncol(x)) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A vector or matrix which is used to determine the dimension of the answer, in particular, the number of rows. After converting \code{x} to a matrix if necessary, the answer is a matrix of \code{values} values, of dimension \code{nrow(x)} by \code{ncolx}. } \item{values}{ Numeric. The answer contains these values, which are recycled \emph{columnwise} if necessary, i.e., as \code{matrix(values, ..., byrow=TRUE)}. } \item{ncolx}{ The number of columns of the returned matrix. The default is the number of columns of \code{x}. } } \details{ The \code{xij} argument for \code{\link{vglm}} allows the user to input variables specific to each linear/additive predictor. For example, consider the bivariate logit model where the first/second linear/additive predictor is the logistic regression of the first/second binary response respectively. The third linear/additive predictor is \code{log(OR) = eta3}, where \code{OR} is the odds ratio. If one has ocular pressure as a covariate in this model then \code{xij} is required to handle the ocular pressure for each eye, since these will be different in general. [This contrasts with a variable such as \code{age}, the age of the person, which has a common value for both eyes.] In order to input these data into \code{\link{vglm}} one often finds that functions \code{fill}, \code{fill1}, etc. are useful. All terms in the \code{xij} and \code{formula} arguments in \code{\link{vglm}} must appear in the \code{form2} argument too. } \value{ \code{matrix(values, nrow=nrow(x), ncol=ncolx)}, i.e., a matrix consisting of values \code{values}, with the number of rows matching \code{x}, and the default number of columns is the number of columns of \code{x}. } %\references{ % More information can be found at % \url{http://www.stat.auckland.ac.nz/~yee}. % % %} % \section{Warning }{ % Care is needed in such cases. % See the examples below. % %} \author{ T. W. Yee } \note{ The effect of the \code{xij} argument is after other arguments such as \code{exchangeable} and \code{zero}. Hence \code{xij} does not affect constraint matrices. Additionally, there are currently 3 other identical \code{fill} functions, called \code{fill1}, \code{fill2} and \code{fill3}; if you need more then assign \code{fill4 = fill5 = fill1} etc. The reason for this is that if more than one \code{fill} function is needed then they must be unique. For example, if \eqn{M=4} then \code{xij = op ~ lop + rop + fill(mop) + fill(mop)} would reduce to \code{xij = op ~ lop + rop + fill(mop)}, whereas \code{xij = op ~ lop + rop + fill1(mop) + fill2(mop)} would retain all \eqn{M} terms, which is needed. % The constraint matrices, as returned by \code{constraints}, do not % have a different meaning when \code{xij} is used. In Examples 1 to 3 below, the \code{xij} argument illustrates covariates that are specific to a linear predictor. Here, \code{lop}/\code{rop} are the ocular pressures of the left/right eye in an artificial dataset, and \code{mop} is their mean. Variables \code{leye} and \code{reye} might be the presence/absence of a particular disease on the LHS/RHS eye respectively. % % Examples 1 and 2 are deliberately misspecified. % The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but % is correct because the coefficients are multiplied by the zeros % produced from \code{fill}. In Example 3, the \code{xij} argument illustrates fitting the (exchangeable) model where there is a common smooth function of the ocular pressure. One should use regression splines since \code{\link{s}} in \code{\link{vgam}} does not handle the \code{xij} argument. However, regression splines such as \code{\link[splines]{bs}} and \code{\link[splines]{ns}} need to have the same basis functions here for both functions, and Example 3 illustrates a trick involving a function \code{BS} to obtain this, e.g., same knots. Although regression splines create more than a single column per term in the model matrix, \code{fill(BS(lop,rop))} creates the required (same) number of columns. } \seealso{ \code{\link{vglm.control}}, \code{\link{vglm}}, \code{\link{multinomial}}, \code{\link{Select}}. } \examples{ fill(runif(5)) fill(runif(5), ncol = 3) fill(runif(5), val = 1, ncol = 3) # Generate eyes data for the examples below. Eyes are independent (OR=1). nn <- 1000 # Number of people eyesdata <- data.frame(lop = round(runif(nn), 2), rop = round(runif(nn), 2), age = round(rnorm(nn, 40, 10))) eyesdata <- transform(eyesdata, mop = (lop + rop) / 2, # Mean ocular pressure op = (lop + rop) / 2, # Value unimportant unless plotting # op = lop, # Choose this if plotting eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye eyesdata <- transform(eyesdata, leye = rbinom(nn, size = 1, prob = logitlink(eta1, inverse = TRUE)), reye = rbinom(nn, size = 1, prob = logitlink(eta2, inverse = TRUE))) # Example 1. All effects are linear. fit1 <- vglm(cbind(leye,reye) ~ op + age, family = binom2.or(exchangeable = TRUE, zero = 3), data = eyesdata, trace = TRUE, xij = list(op ~ lop + rop + fill(lop)), form2 = ~ op + lop + rop + fill(lop) + age) head(model.matrix(fit1, type = "lm")) # LM model matrix head(model.matrix(fit1, type = "vlm")) # Big VLM model matrix coef(fit1) coef(fit1, matrix = TRUE) # Unchanged with 'xij' constraints(fit1) max(abs(predict(fit1)-predict(fit1, new = eyesdata))) # Predicts correctly summary(fit1) \dontrun{ plotvgam(fit1, se = TRUE) # Wrong, e.g., because it plots against op, not lop. # So set op = lop in the above for a correct plot. } # Example 2. This model uses regression splines on ocular pressure. # It uses a trick to ensure common basis functions. BS <- function(x, ...) sm.bs(c(x,...), df = 3)[1:length(x), , drop = FALSE] # trick fit2 <- vglm(cbind(leye,reye) ~ BS(lop,rop) + age, family = binom2.or(exchangeable = TRUE, zero = 3), data = eyesdata, trace = TRUE, xij = list(BS(lop,rop) ~ BS(lop,rop) + BS(rop,lop) + fill(BS(lop,rop))), form2 = ~ BS(lop,rop) + BS(rop,lop) + fill(BS(lop,rop)) + lop + rop + age) head(model.matrix(fit2, type = "lm")) # LM model matrix head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix coef(fit2) coef(fit2, matrix = TRUE) summary(fit2) fit2@smart.prediction max(abs(predict(fit2) - predict(fit2, new = eyesdata))) # Predicts correctly predict(fit2, new = head(eyesdata)) # Note the 'scalar' OR, i.e., zero=3 max(abs(head(predict(fit2)) - predict(fit2, new = head(eyesdata)))) # Should be 0 \dontrun{ plotvgam(fit2, se = TRUE, xlab = "lop") # Correct } # Example 3. Capture-recapture model with ephemeral and enduring # memory effects. Similar to Yang and Chao (2005), Biometrics. deermice <- transform(deermice, Lag1 = y1) M.tbh.lag1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight + Lag1, posbernoulli.tb(parallel.t = FALSE ~ 0, parallel.b = FALSE ~ 0, drop.b = FALSE ~ 1), xij = list(Lag1 ~ fill(y1) + fill(y2) + fill(y3) + fill(y4) + fill(y5) + fill(y6) + y1 + y2 + y3 + y4 + y5), form2 = ~ sex + weight + Lag1 + fill(y1) + fill(y2) + fill(y3) + fill(y4) + fill(y5) + fill(y6) + y1 + y2 + y3 + y4 + y5 + y6, data = deermice, trace = TRUE) coef(M.tbh.lag1) } \keyword{models} \keyword{regression} %This function is unrelated to the \code{zero} argument found in many %\pkg{VGAM} family functions. [zz implies I should call it %\code{fill1(x, value=0, ncolx=ncol(x))} and create .Rd file for %\code{zero} argument.] %eyesdata$leye <- ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0) %eyesdata$reye <- ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0) % \deqn{logit P(Y_k=1) = f_k(x_{ijk}) }{% % logit P(Y_k=1) = f_k(x_{ijk}) } % for \code{k=1,2}. % fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyesdata) % Models using the \code{xij} argument may or may not predict correctly, % and inference obtained using \code{summary} may be incorrect. % 20191104; put this here, as it does not use fill() and this .Rd expensive: %# Example 2. Model OR as a linear function of mop. %fit2 <- vglm(cbind(leye, reye) ~ op + age, data = eyesdata, trace = TRUE, % binom2.or(exchangeable = TRUE, zero = NULL), % xij = list(op ~ lop + rop + mop), % form2 = ~ op + lop + rop + mop + age) %head(model.matrix(fit2, type = "lm")) # LM model matrix %head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix %coef(fit2) %coef(fit2, matrix = TRUE) # Unchanged with 'xij' %max(abs(predict(fit2) - predict(fit2, new = eyesdata))) # Predicts correctly %summary(fit2) %\ dontrun { %plotvgam(fit2, se = TRUE) # Wrong because it plots against op, not lop. % } VGAM/man/sratio.Rd0000644000176200001440000001014613565414527013365 0ustar liggesusers\name{sratio} \alias{sratio} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ordinal Regression with Stopping Ratios } \description{ Fits a stopping ratio logit/probit/cloglog/cauchit/... regression model to an ordered (preferably) factor response. } \usage{ sratio(link = "logitlink", parallel = FALSE, reverse = FALSE, zero = NULL, whitespace = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the \eqn{M} stopping ratio probabilities. See \code{\link{Links}} for more choices. } \item{parallel}{ A logical, or formula specifying which terms have equal/unequal coefficients. } \item{reverse}{ Logical. By default, the stopping ratios used are \eqn{\eta_j = logit(P[Y=j|Y \geq j])}{eta_j = logit(P[Y=j|Y>=j])} for \eqn{j=1,\dots,M}. If \code{reverse} is \code{TRUE}, then \eqn{\eta_j = logit(P[Y=j+1|Y \leq j+1])}{eta_j = logit(P[Y=j+1|Y<=j+1])} will be used. } \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,\ldots,\eqn{M}\}. The default value means none are modelled as intercept-only terms. } \item{whitespace}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ In this help file the response \eqn{Y} is assumed to be a factor with ordered values \eqn{1,2,\dots,M+1}, so that \eqn{M} is the number of linear/additive predictors \eqn{\eta_j}{eta_j}. There are a number of definitions for the \emph{continuation ratio} in the literature. To make life easier, in the \pkg{VGAM} package, we use \emph{continuation} ratios (see \code{\link{cratio}}) and \emph{stopping} ratios. Continuation ratios deal with quantities such as \code{logitlink(P[Y>j|Y>=j])}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. Simonoff, J. S. (2003) \emph{Analyzing Categorical Data}, New York, USA: Springer-Verlag. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response should be either a matrix of counts (with row sums that are all positive), or a factor. In both cases, the \code{y} slot returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts. For a nominal (unordered) factor response, the multinomial logit model (\code{\link{multinomial}}) is more appropriate. Here is an example of the usage of the \code{parallel} argument. If there are covariates \code{x1}, \code{x2} and \code{x3}, then \code{parallel = TRUE ~ x1 + x2 -1} and \code{parallel = FALSE ~ x3} are equivalent. This would constrain the regression coefficients for \code{x1} and \code{x2} to be equal; those of the intercepts and \code{x3} would be different. } \section{Warning }{ No check is made to verify that the response is ordinal if the response is a matrix; see \code{\link[base:factor]{ordered}}. } \seealso{ \code{\link{cratio}}, \code{\link{acat}}, \code{\link{cumulative}}, \code{\link{multinomial}}, \code{\link{margeff}}, \code{\link{pneumo}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, sratio(parallel = TRUE), data = pneumo)) coef(fit, matrix = TRUE) constraints(fit) predict(fit) predict(fit, untransform = TRUE) } \keyword{models} \keyword{regression} VGAM/man/weibull.mean.Rd0000644000176200001440000000660413565414527014452 0ustar liggesusers\name{weibull.mean} \alias{weibull.mean} %\alias{weibullff} %\alias{weibull.lsh} %\alias{weibull3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Weibull Distribution Family Function, Parameterized by the Mean } \description{ Maximum likelihood estimation of the 2-parameter Weibull distribution. The mean is one of the parameters. No observations should be censored. } \usage{ weibull.mean(lmean = "loglink", lshape = "loglink", imean = NULL, ishape = NULL, probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean, lshape}{ Parameter link functions applied to the (positive) mean parameter (called \eqn{mu} below) and (positive) shape parameter (called \eqn{a} below). See \code{\link{Links}} for more choices. } \item{imean, ishape}{ Optional initial values for the mean and shape parameters. } \item{imethod, zero, probs.y}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ See \code{\link{weibullR}} for most of the details for this family function too. The mean of \eqn{Y} is \eqn{b \, \Gamma(1+ 1/a)}{b * gamma(1+ 1/a)} (returned as the fitted values), and this is the first parameter (a \code{\link{loglink}} link is the default because it is positive). The other parameter is the positive shape paramter \eqn{a}, also having a default \code{\link{loglink}} link. This \pkg{VGAM} family function currently does not handle censored data. Fisher scoring is used to estimate the two parameters. Although the expected information matrices used here are valid in all regions of the parameter space, the regularity conditions for maximum likelihood estimation are satisfied only if \eqn{a>2} (according to Kleiber and Kotz (2003)). If this is violated then a warning message is issued. One can enforce \eqn{a>2} by choosing \code{lshape = logofflink(offset = -2)}. Common values of the shape parameter lie between 0.5 and 3.5. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \author{ T. W. Yee } \note{ See \code{\link{weibullR}} for more details. This \pkg{VGAM} family function handles multiple responses. } %\section{Warning}{ % This function is under development to handle other censoring situations. % The version of this function which will handle censored data will be %} \seealso{ \code{\link{weibullR}}, \code{\link[stats:Weibull]{dweibull}}, \code{\link{truncweibull}}, \code{\link{gev}}, \code{\link{lognormal}}, \code{\link{expexpff}}, \code{\link{maxwell}}, \code{\link{rayleigh}}, \code{\link{gumbelII}}. } \examples{ wdata <- data.frame(x2 = runif(nn <- 1000)) # Complete data wdata <- transform(wdata, mu = exp(-1 + 1 * x2), x3 = rnorm(nn), shape1 = exp(1), shape2 = exp(2)) wdata <- transform(wdata, y1 = rweibull(nn, shape = shape1, scale = mu / gamma(1 + 1/shape1)), y2 = rweibull(nn, shape = shape2, scale = mu / gamma(1 + 1/shape2))) fit <- vglm(cbind(y1, y2) ~ x2 + x3, weibull.mean, data = wdata, trace = TRUE) coef(fit, matrix = TRUE) sqrt(diag(vcov(fit))) # SEs summary(fit, presid = FALSE) } \keyword{models} \keyword{regression} VGAM/man/biplackettcop.Rd0000644000176200001440000000603113565414527014706 0ustar liggesusers\name{biplackettcop} \alias{biplackettcop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plackett's Bivariate Copula Family Function } \description{ Estimate the association parameter of Plackett's bivariate distribution (copula) by maximum likelihood estimation. } \usage{ biplackettcop(link = "loglink", ioratio = NULL, imethod = 1, nsimEIM = 200) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the (positive) odds ratio \eqn{\psi}{psi}. See \code{\link{Links}} for more choices and information. } \item{ioratio}{ Numeric. Optional initial value for \eqn{\psi}{psi}. If a convergence failure occurs try assigning a value or a different value. } \item{imethod, nsimEIM}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The defining equation is \deqn{\psi = H \times (1-y_1-y_2+H) / ((y_1-H) \times (y_2-H))}{% psi = H*(1-y1-y2+H) / ((y1-H)*(y2-H))} where \eqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = H_{\psi}(y_1,y_2)}{P(Y1 <= y1, Y2 <= y2)= H(y1,y2)} is the cumulative distribution function. The density function is \eqn{h_{\psi}(y_1,y_2) =}{h(y1,y2) =} \deqn{\psi [1 + (\psi-1)(y_1 + y_2 - 2 y_1 y_2) ] / \left( [1 + (\psi-1)(y_1 + y_2) ]^2 - 4 \psi (\psi-1) y_1 y_2 \right)^{3/2}}{% psi*[1 + (psi-1)*(y1 + y2 - 2*y1*y2) ] / ( [1 + (psi-1)*(y1 + y2)]^2 - 4*psi*(psi-1)*y1*y2)^(3/2)} for \eqn{\psi > 0}{psi > 0}. Some writers call \eqn{\psi}{psi} the \emph{cross product ratio} but it is called the \emph{odds ratio} here. The support of the function is the unit square. The marginal distributions here are the standard uniform although it is commonly generalized to other distributions. If \eqn{\psi = 1}{psi=1} then \eqn{h_{\psi}(y_1,y_2) = y_1 y_2}{h(y1,y2) = y1*y2}, i.e., independence. As the odds ratio tends to infinity one has \eqn{y_1=y_2}{y1=y2}. As the odds ratio tends to 0 one has \eqn{y_2=1-y_1}{y2=1-y1}. Fisher scoring is implemented using \code{\link{rbiplackcop}}. Convergence is often quite slow. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Plackett, R. L. (1965) A class of bivariate distributions. \emph{Journal of the American Statistical Association}, \bold{60}, 516--522. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. Currently, the fitted value is a 2-column matrix with 0.5 values because the marginal distributions correspond to a standard uniform distribution. } \seealso{ \code{\link{rbiplackcop}}, \code{\link{bifrankcop}}. } \examples{ \dontrun{ ymat <- rbiplackcop(n = 2000, oratio = exp(2)) plot(ymat, col = "blue") fit <- vglm(ymat ~ 1, fam = biplackettcop, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) vcov(fit) head(fitted(fit)) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/cqo.Rd0000644000176200001440000006125313565414527012653 0ustar liggesusers\name{cqo} \alias{cqo} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fitting Constrained Quadratic Ordination (CQO)} \description{ A \emph{constrained quadratic ordination} (CQO; formerly called \emph{canonical Gaussian ordination} or CGO) model is fitted using the \emph{quadratic reduced-rank vector generalized linear model} (QRR-VGLM) framework. } \usage{ cqo(formula, family = stop("argument 'family' needs to be assigned"), data = list(), weights = NULL, subset = NULL, na.action = na.fail, etastart = NULL, mustart = NULL, coefstart = NULL, control = qrrvglm.control(...), offset = NULL, method = "cqo.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, contrasts = NULL, constraints = NULL, extra = NULL, smart = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a symbolic description of the model to be fit. The RHS of the formula is applied to each linear predictor. Different variables in each linear predictor can be chosen by specifying constraint matrices. } \item{family}{ a function of class \code{"vglmff"} (see \code{\link{vglmff-class}}) describing what statistical model is to be fitted. This is called a ``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}} for general information about many types of arguments found in this type of function. Currently the following families are supported: \code{\link{poissonff}}, \code{\link{binomialff}} (\code{\link{logitlink}} and \code{\link{clogloglink}} links available), \code{\link{negbinomial}}, \code{\link{gamma2}}. Sometimes special arguments are required for \code{cqo()}, e.g., \code{binomialff(multiple.responses = TRUE)}. % \code{\link{gaussianff}}. % Also, \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}} % may or may not work. % \code{negbinomial(deviance = TRUE)}, % \code{gamma2(deviance = TRUE)}. } \item{data}{ an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{cqo} is called. } \item{weights}{ an optional vector or matrix of (prior) weights to be used in the fitting process. Currently, this argument should not be used. } \item{subset}{ an optional logical vector specifying a subset of observations to be used in the fitting process. } \item{na.action}{ a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link[base]{options}}, and is \code{na.fail} if that is unset. The ``factory-fresh'' default is \code{na.omit}. } \item{etastart}{ starting values for the linear predictors. It is a \eqn{M}-column matrix. If \eqn{M = 1} then it may be a vector. Currently, this argument probably should not be used. } \item{mustart}{ starting values for the fitted values. It can be a vector or a matrix. Some family functions do not make use of this argument. Currently, this argument probably should not be used. } \item{coefstart}{ starting values for the coefficient vector. Currently, this argument probably should not be used. } \item{control}{ a list of parameters for controlling the fitting process. See \code{\link{qrrvglm.control}} for details. } \item{offset}{ This argument must not be used. % especially when \code{I.tolerances = TRUE}. % a vector or \eqn{M}-column matrix of offset values. % These are \emph{a priori} known and are % added to the linear predictors during fitting. } \item{method}{ the method to be used in fitting the model. The default (and presently only) method \code{cqo.fit} uses \emph{iteratively reweighted least squares} (IRLS). } \item{model}{ a logical value indicating whether the \emph{model frame} should be assigned in the \code{model} slot. } \item{x.arg, y.arg}{ logical values indicating whether the model matrix and response matrix used in the fitting process should be assigned in the \code{x} and \code{y} slots. Note the model matrix is the LM model matrix. % ; to get the VGLM % model matrix type \code{model.matrix(vglmfit)} where % \code{vglmfit} is a \code{vglm} object. } \item{contrasts}{ an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}. } \item{constraints}{ an optional list of constraint matrices. The components of the list must be named with the term it corresponds to (and it must match in character format). Each constraint matrix must have \eqn{M} rows, and be of full-column rank. By default, constraint matrices are the \eqn{M} by \eqn{M} identity matrix unless arguments in the family function itself override these values. If \code{constraints} is used it must contain \emph{all} the terms; an incomplete list is not accepted. Constraint matrices for \eqn{x_2}{x_2} variables are taken as the identity matrix. } \item{extra}{ an optional list with any extra information that might be needed by the family function. } % \item{qr.arg}{ logical value indicating whether % the slot \code{qr}, which returns the QR decomposition of the % VLM model matrix, is returned on the object. % } \item{smart}{ logical value indicating whether smart prediction (\code{\link{smartpred}}) will be used. } \item{\dots}{ further arguments passed into \code{\link{qrrvglm.control}}. } } \details{ QRR-VGLMs or \emph{constrained quadratic ordination} (CQO) models are estimated here by maximum likelihood estimation. Optimal linear combinations of the environmental variables are computed, called \emph{latent variables} (these appear as \code{latvar} for \eqn{R=1} else \code{latvar1}, \code{latvar2}, etc. in the output). Here, \eqn{R} is the \emph{rank} or the number of ordination axes. Each species' response is then a regression of these latent variables using quadratic polynomials on a transformed scale (e.g., log for Poisson counts, logit for presence/absence responses). The solution is obtained iteratively in order to maximize the log-likelihood function, or equivalently, minimize the deviance. The central formula (for Poisson and binomial species data) is given by \deqn{\eta = B_1^T x_1 + A \nu + \sum_{m=1}^M (\nu^T D_m \nu) e_m}{% eta = B_1^T x_1 + A nu + sum_{m=1}^M (nu^T D_m nu) e_m} where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept), \eqn{x_2}{x_2} is a vector of environmental variables, \eqn{\nu=C^T x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables, \eqn{e_m} is a vector of 0s but with a 1 in the \eqn{m}th position. The \eqn{\eta}{eta} are a vector of linear/additive predictors, e.g., the \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m = log(E[Y_m])} for the \eqn{m}th species. The matrices \eqn{B_1}, \eqn{A}, \eqn{C} and \eqn{D_m} are estimated from the data, i.e., contain the regression coefficients. The tolerance matrices satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}. Many important CQO details are directly related to arguments in \code{\link{qrrvglm.control}}, e.g., the argument \code{noRRR} specifies which variables comprise \eqn{x_1}{x_1}. Theoretically, the four most popular \pkg{VGAM} family functions to be used with \code{cqo} correspond to the Poisson, binomial, normal, and negative binomial distributions. The latter is a 2-parameter model. All of these are implemented, as well as the 2-parameter gamma. % The Poisson is or should be catered for by % \code{\link{quasipoissonff}} and \code{\link{poissonff}}, and the % binomial by \code{\link{quasibinomialff}} and \code{\link{binomialff}}. % Those beginning with \code{"quasi"} have dispersion parameters that % are estimated for each species. %the negative binomial by \code{\link{negbinomial}}, and the normal by %\code{gaussianff}. %For overdispersed Poisson data, using \code{\link{quasipoissonff}} is %strongly recommended over \code{\link{negbinomial}}; the latter is %\emph{very} sensitive to departures from the model assumptions. For initial values, the function \code{.Init.Poisson.QO} should work reasonably well if the data is Poisson with species having equal tolerances. It can be quite good on binary data too. Otherwise the \code{Cinit} argument in \code{\link{qrrvglm.control}} can be used. %(and negative binomial) It is possible to relax the quadratic form to an additive model. The result is a data-driven approach rather than a model-driven approach, so that CQO is extended to \emph{constrained additive ordination} (CAO) when \eqn{R=1}. See \code{\link{cao}} for more details. In this documentation, \eqn{M} is the number of linear predictors, \eqn{S} is the number of responses (species). Then \eqn{M=S} for Poisson and binomial species data, and \eqn{M=2S} for negative binomial and gamma distributed species data. Incidentally, \emph{Unconstrained quadratic ordination} (UQO) may be performed by, e.g., fitting a Goodman's RC association model; see \code{\link{uqo}} and the Yee and Hadi (2014) referenced there. For UQO, the response is the usual site-by-species matrix and there are no environmental variables; the site scores are free parameters. UQO can be performed under the assumption that all species have the same tolerance matrices. } \value{ An object of class \code{"qrrvglm"}. % Note that the slot \code{misc} has a list component called % \code{deviance.Bestof} which gives the history of deviances over all % the iterations. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. ter Braak, C. J. F. and Prentice, I. C. (1988) A theory of gradient analysis. \emph{Advances in Ecological Research}, \bold{18}, 271--317. %Yee, T. W. (2005) %On constrained and unconstrained %quadratic ordination. %\emph{Manuscript in preparation}. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee. Thanks to Alvin Sou for converting a lot of the original FORTRAN code into C. } \note{ The input requires care, preparation and thought---\emph{a lot more} than other ordination methods. Here is a partial \bold{checklist}. \describe{ \item{(1)}{ The number of species should be kept reasonably low, e.g., 12 max. Feeding in 100+ species wholesale is a recipe for failure. Choose a few species carefully. Using 10 well-chosen species is better than 100+ species thrown in willy-nilly. } \item{(2)}{ Each species should be screened individually first, e.g., for presence/absence is the species totally absent or totally present at all sites? For presence/absence data \code{sort(colMeans(data))} can help avoid such species. } \item{(3)}{ The number of explanatory variables should be kept low, e.g., 7 max. } \item{(4)}{ Each explanatory variable should be screened individually first, e.g., is it heavily skewed or are there outliers? They should be plotted and then transformed where needed. They should not be too highly correlated with each other. } \item{(5)}{ Each explanatory variable should be scaled, e.g., to mean 0 and unit variance. This is especially needed for \code{I.tolerance = TRUE}. } \item{(6)}{ Keep the rank low. Only if the data is very good should a rank-2 model be attempted. Usually a rank-1 model is all that is practically possible even after a lot of work. The rank-1 model should always be attempted first. Then might be clever and try use this for initial values for a rank-2 model. } \item{(7)}{ If the number of sites is large then choose a random sample of them. For example, choose a maximum of 500 sites. This will reduce the memory and time expense of the computations. } \item{(8)}{ Try \code{I.tolerance = TRUE} or \code{eq.tolerance = FALSE} if the inputted data set is large, so as to reduce the computational expense. That's because the default, \code{I.tolerance = FALSE} and \code{eq.tolerance = TRUE}, is very memory hungry. } } By default, a rank-1 equal-tolerances QRR-VGLM model is fitted (see \code{\link{qrrvglm.control}} for the default control parameters). If \code{Rank > 1} then the latent variables are always transformed so that they are uncorrelated. By default, the argument \code{trace} is \code{TRUE} meaning a running log is printed out while the computations are taking place. This is because the algorithm is computationally expensive, therefore users might think that their computers have frozen if \code{trace = FALSE}! The argument \code{Bestof} in \code{\link{qrrvglm.control}} controls the number of models fitted (each uses different starting values) to the data. This argument is important because convergence may be to a \emph{local} solution rather than the \emph{global} solution. Using more starting values increases the chances of finding the global solution. Always plot an ordination diagram (use the generic function \code{\link{lvplot}}) and see if it looks sensible. Local solutions arise because the optimization problem is highly nonlinear, and this is particularly true for CAO. %Convergence of QRR-VGLMs can be difficult, especially for binary %data. If this is so, then setting \code{I.tolerances = TRUE} or %\code{eq.tolerances = TRUE} may help, especially when the number of sites, %\eqn{n}, is small. %If the negative binomial family function \code{\link{negbinomial}} is %used for \code{cqo} then set \code{negbinomial(deviance = TRUE)} %is necessary. This means to minimize the deviance, which the fast %algorithm can handle. Many of the arguments applicable to \code{cqo} are common to \code{\link{vglm}} and \code{\link{rrvglm.control}}. The most important arguments are \code{Rank}, \code{noRRR}, \code{Bestof}, \code{I.tolerances}, \code{eq.tolerances}, \code{isd.latvar}, and \code{MUXfactor}. When fitting a 2-parameter model such as the negative binomial or gamma, it pays to have \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE}. This is because numerical problems can occur when fitting the model far away from the global solution when \code{I.tolerances = TRUE}. Setting the two arguments as described will slow down the computation considerably, however it is numerically more stable. In Example 1 below, an unequal-tolerances rank-1 QRR-VGLM is fitted to the hunting spiders dataset, and Example 2 is the equal-tolerances version. The latter is less likely to have convergence problems compared to the unequal-tolerances model. In Example 3 below, an equal-tolerances rank-2 QRR-VGLM is fitted to the hunting spiders dataset. The numerical difficulties encountered in fitting the rank-2 model suggests a rank-1 model is probably preferable. In Example 4 below, constrained binary quadratic ordination (in old nomenclature, constrained Gaussian logit ordination) is fitted to some simulated data coming from a species packing model. With multivariate binary responses, one must use \code{multiple.responses = TRUE} to indicate that the response (matrix) is multivariate. Otherwise, it is interpreted as a single binary response variable. In Example 5 below, the deviance residuals are plotted for each species. This is useful as a diagnostic plot. This is done by (re)regressing each species separately against the latent variable. Sometime in the future, this function might handle input of the form \code{cqo(x, y)}, where \code{x} and \code{y} are matrices containing the environmental and species data respectively. } \section{Warning }{ Local solutions are not uncommon when fitting CQO models. To increase the chances of obtaining the global solution, increase the value of the argument \code{Bestof} in \code{\link{qrrvglm.control}}. For reproducibility of the results, it pays to set a different random number seed before calling \code{cqo} (the function \code{\link[base:Random]{set.seed}} does this). The function \code{cqo} chooses initial values for \bold{C} using \code{.Init.Poisson.QO()} if \code{Use.Init.Poisson.QO = TRUE}, else random numbers. Unless \code{I.tolerances = TRUE} or \code{eq.tolerances = FALSE}, CQO is computationally expensive with memory and time. It pays to keep the rank down to 1 or 2. If \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE} then the cost grows quickly with the number of species and sites (in terms of memory requirements and time). The data needs to conform quite closely to the statistical model, and the environmental range of the data should be wide in order for the quadratics to fit the data well (bell-shaped response surfaces). If not, RR-VGLMs will be more appropriate because the response is linear on the transformed scale (e.g., log or logit) and the ordination is called \emph{constrained linear ordination} or CLO. Like many regression models, CQO is sensitive to outliers (in the environmental and species data), sparse data, high leverage points, multicollinearity etc. For these reasons, it is necessary to examine the data carefully for these features and take corrective action (e.g., omitting certain species, sites, environmental variables from the analysis, transforming certain environmental variables, etc.). Any optimum lying outside the convex hull of the site scores should not be trusted. Fitting a CAO is recommended first, then upon transformations etc., possibly a CQO can be fitted. For binary data, it is necessary to have `enough' data. In general, the number of sites \eqn{n} ought to be much larger than the number of species \emph{S}, e.g., at least 100 sites for two species. Compared to count (Poisson) data, numerical problems occur more frequently with presence/absence (binary) data. For example, if \code{Rank = 1} and if the response data for each species is a string of all absences, then all presences, then all absences (when enumerated along the latent variable) then infinite parameter estimates will occur. In general, setting \code{I.tolerances = TRUE} may help. This function was formerly called \code{cgo}. It has been renamed to reinforce a new nomenclature described in Yee (2006). } \seealso{ \code{\link{qrrvglm.control}}, \code{\link{Coef.qrrvglm}}, \code{\link{predictqrrvglm}}, \code{\link{calibrate.qrrvglm}}, \code{\link{model.matrixqrrvglm}}, \code{\link{vcovqrrvglm}}, \code{\link{rcqo}}, \code{\link{cao}}, \code{\link{uqo}}, \code{\link{rrvglm}}, \code{\link{poissonff}}, \code{\link{binomialff}}, \code{\link{negbinomial}}, \code{\link{gamma2}}, \code{\link{lvplot.qrrvglm}}, \code{\link{perspqrrvglm}}, \code{\link{trplot.qrrvglm}}, \code{\link{vglm}}, \code{\link[base:Random]{set.seed}}, \code{\link{hspider}}, \code{\link[VGAMdata]{trapO}}. % \code{\link{rrvglm.control}}, % \code{\link{vcovqrrvglm}}, %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \examples{ \dontrun{ # Example 1; Fit an unequal tolerances model to the hunting spiders data hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental variables set.seed(1234) # For reproducibility of the results p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, fam = poissonff, data = hspider, Crow1positive = FALSE, eq.tol = FALSE) sort(deviance(p1ut, history = TRUE)) # A history of all the iterations if (deviance(p1ut) > 1177) warning("suboptimal fit obtained") S <- ncol(depvar(p1ut)) # Number of species clr <- (1:(S+1))[-7] # Omits yellow lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1) # Ordination diagram legend("topright", leg = colnames(depvar(p1ut)), col = clr, pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2) (cp <- Coef(p1ut)) (a <- latvar(cp)[cp@latvar.order]) # Ordered site scores along the gradient # Names of the ordered sites along the gradient: rownames(latvar(cp))[cp@latvar.order] (aa <- Opt(cp)[, cp@Optimum.order]) # Ordered optimums along the gradient aa <- aa[!is.na(aa)] # Delete the species that is not unimodal names(aa) # Names of the ordered optimums along the gradient trplot(p1ut, which.species = 1:3, log = "xy", type = "b", lty = 1, lwd = 2, col = c("blue","red","green"), label = TRUE) -> ii # Trajectory plot legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "), lwd = 2, lty = 1, col = c("blue", "red", "green")) abline(a = 0, b = 1, lty = "dashed") S <- ncol(depvar(p1ut)) # Number of species clr <- (1:(S+1))[-7] # Omits yellow persp(p1ut, col = clr, label = TRUE, las = 1) # Perspective plot # Example 2; Fit an equal tolerances model. Less numerically fraught. set.seed(1234) p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE) sort(deviance(p1et, history = TRUE)) # A history of all the iterations if (deviance(p1et) > 1586) warning("suboptimal fit obtained") S <- ncol(depvar(p1et)) # Number of species clr <- (1:(S+1))[-7] # Omits yellow persp(p1et, col = clr, label = TRUE, las = 1) # Example 3: A rank-2 equal tolerances CQO model with Poisson data # This example is numerically fraught... need I.toler = TRUE too. set.seed(555) p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE, I.toler = TRUE, Rank = 2, Bestof = 3, isd.latvar = c(2.1, 0.9)) sort(deviance(p2, history = TRUE)) # A history of all the iterations if (deviance(p2) > 1127) warning("suboptimal fit obtained") lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4), C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey", pcol = "blue", pch = "+", chull = TRUE, ccol = "grey") # Example 4: species packing model with presence/absence data set.seed(2345) n <- 200; p <- 5; S <- 5 mydata <- rcqo(n, p, S, fam = "binomial", hi.abundance = 4, eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE) myform <- attr(mydata, "formula") set.seed(1234) b1et <- cqo(myform, binomialff(multiple.responses = TRUE, link = "clogloglink"), data = mydata) sort(deviance(b1et, history = TRUE)) # A history of all the iterations lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1) Coef(b1et) # Compare the fitted model with the 'truth' cbind(truth = attr(mydata, "concoefficients"), fitted = concoef(b1et)) # Example 5: Plot the deviance residuals for diagnostic purposes set.seed(1234) p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, eq.tol = TRUE, trace = FALSE) sort(deviance(p1et, history = TRUE)) # A history of all the iterations if (deviance(p1et) > 1586) warning("suboptimal fit obtained") S <- ncol(depvar(p1et)) par(mfrow = c(3, 4)) for (ii in 1:S) { tempdata <- data.frame(latvar1 = c(latvar(p1et)), sppCounts = depvar(p1et)[, ii]) tempdata <- transform(tempdata, myOffset = -0.5 * latvar1^2) # For species ii, refit the model to get the deviance residuals fit1 <- vglm(sppCounts ~ offset(myOffset) + latvar1, poissonff, data = tempdata, trace = FALSE) # For checking: this should be 0 # print("max(abs(c(Coef(p1et)@B1[1,ii],Coef(p1et)@A[ii,1])-coef(fit1)))") # print( max(abs(c(Coef(p1et)@B1[1,ii],Coef(p1et)@A[ii,1])-coef(fit1))) ) # Plot the deviance residuals devresid <- resid(fit1, type = "deviance") predvalues <- predict(fit1) + fit1@offset ooo <- with(tempdata, order(latvar1)) plot(predvalues + devresid ~ latvar1, data = tempdata, col = "red", xlab = "latvar1", ylab = "", main = colnames(depvar(p1et))[ii]) with(tempdata, lines(latvar1[ooo], predvalues[ooo], col = "blue")) } } } \keyword{models} \keyword{regression} %legend("topright", x=1, y=135, leg = colnames(depvar(p1ut)), col = clr, % pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2) VGAM/man/chinese.nz.Rd0000644000176200001440000000567013565414527014136 0ustar liggesusers\name{chinese.nz} \alias{chinese.nz} \docType{data} \title{ Chinese Population in New Zealand 1867--2001 Data} \description{ The Chinese population in New Zealand from 1867 to 2001, along with the whole of the New Zealand population. } \usage{data(chinese.nz)} \format{ A data frame with 27 observations on the following 4 variables. \describe{ \item{\code{year}}{Year. } \item{\code{male}}{Number of Chinese males. } \item{\code{female}}{Number of Chinese females. } \item{\code{nz}}{Total number in the New Zealand population. } } } \details{ Historically, there was a large exodus of Chinese from the Guangdong region starting in the mid-1800s to the gold fields of South Island of New Zealand, California, and southern Australia, etc. Discrimination then meant that only men were allowed entry, to hinder permanent settlement. In the case of New Zealand, the government relaxed its immigration laws after WWII to allow wives of Chinese already in NZ to join them because China had been among the Allied powers. Gradual relaxation in the immigration and an influx during the 1980s meant the Chinese population became increasingly demographically normal over time. The NZ total for the years 1867 and 1871 exclude the Maori population. Three modifications have been made to the female column to make the data internally consistent with the original table. % The second value of 4583 looks erroneous, as seen by the plot below. } %\source{ %} \references{ Page 6 of \emph{Aliens At My Table: Asians as New Zealanders See Them} by M. Ip and N. Murphy, (2005). Penguin Books. Auckland, New Zealand. } \examples{ \dontrun{ par(mfrow = c(1, 2)) plot(female / (male + female) ~ year, chinese.nz, type = "b", ylab = "Proportion", col = "blue", las = 1, cex = 0.015 * sqrt(male + female), # cex = 0.10 * sqrt((male + female)^1.5 / sqrt(female) / sqrt(male)), main = "Proportion of NZ Chinese that are female") abline(h = 0.5, lty = "dashed", col = "gray") fit1.cnz <- vglm(cbind(female, male) ~ year, binomialff, data = chinese.nz) fit2.cnz <- vglm(cbind(female, male) ~ sm.poly(year, 2), binomialff, data = chinese.nz) fit4.cnz <- vglm(cbind(female, male) ~ sm.bs(year, 5), binomialff, data = chinese.nz) lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple", lty = 1) lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green", lty = 2) lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange", lwd = 2, lty = 1) legend("bottomright", col = c("purple", "green", "orange"), lty = c(1, 2, 1), leg = c("linear", "quadratic", "B-spline")) plot(100*(male+female)/nz ~ year, chinese.nz, type = "b", ylab = "Percent", ylim = c(0, max(100*(male+female)/nz)), col = "blue", las = 1, main = "Percent of NZers that are Chinese") abline(h = 0, lty = "dashed", col = "gray") } } \keyword{datasets} % Albany, Auckland, New Zealand. VGAM/man/hyperg.Rd0000644000176200001440000001031613565414527013361 0ustar liggesusers\name{hyperg} %\alias{hyperg} \alias{hyperg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Hypergeometric Family Function } \description{ Family function for a hypergeometric distribution where either the number of white balls or the total number of white and black balls are unknown. } \usage{ hyperg(N = NULL, D = NULL, lprob = "logitlink", iprob = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{N}{ Total number of white and black balls in the urn. Must be a vector with positive values, and is recycled, if necessary, to the same length as the response. One of \code{N} and \code{D} must be specified. } \item{D}{ Number of white balls in the urn. Must be a vector with positive values, and is recycled, if necessary, to the same length as the response. One of \code{N} and \code{D} must be specified. } \item{lprob}{ Link function for the probabilities. See \code{\link{Links}} for more choices. } \item{iprob}{ Optional initial value for the probabilities. The default is to choose initial values internally. } } \details{ Consider the scenario from \code{\link[stats]{dhyper}} where there are \eqn{N=m+n} balls in an urn, where \eqn{m} are white and \eqn{n} are black. A simple random sample (i.e., \emph{without} replacement) of \eqn{k} balls is taken. The response here is the sample \emph{proportion} of white balls. In this document, \code{N} is \eqn{N=m+n}, \code{D} is \eqn{m} (for the number of ``defectives'', in quality control terminology, or equivalently, the number of marked individuals). The parameter to be estimated is the population proportion of white balls, viz. \eqn{prob = m/(m+n)}. Depending on which one of \code{N} and \code{D} is inputted, the estimate of the other parameter can be obtained from the equation \eqn{prob = m/(m+n)}, or equivalently, \code{prob = D/N}. However, the log-factorials are computed using \code{\link[base]{lgamma}} and both \eqn{m} and \eqn{n} are not restricted to being integer. Thus if an integer \eqn{N} is to be estimated, it will be necessary to evaluate the likelihood function at integer values about the estimate, i.e., at \code{trunc(Nhat)} and \code{ceiling(Nhat)} where \code{Nhat} is the (real) estimate of \eqn{N}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{cqo}}, and \code{\link{cao}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ Thomas W. Yee } \note{ The response can be of one of three formats: a factor (first level taken as success), a vector of proportions of success, or a 2-column matrix (first column = successes) of counts. The argument \code{weights} in the modelling function can also be specified. In particular, for a general vector of proportions, you will need to specify \code{weights} because the number of trials is needed. } \seealso{ \code{\link[stats]{dhyper}}, \code{\link{binomialff}}. } \section{Warning }{ No checking is done to ensure that certain values are within range, e.g., \eqn{k \leq N}{k <= N}. } \examples{ nn <- 100 m <- 5 # Number of white balls in the population k <- rep(4, len = nn) # Sample sizes n <- 4 # Number of black balls in the population y <- rhyper(nn = nn, m = m, n = n, k = k) yprop <- y / k # Sample proportions # N is unknown, D is known. Both models are equivalent: fit <- vglm(cbind(y,k-y) ~ 1, hyperg(D = m), trace = TRUE, crit = "c") fit <- vglm(yprop ~ 1, hyperg(D = m), weight = k, trace = TRUE, crit = "c") # N is known, D is unknown. Both models are equivalent: fit <- vglm(cbind(y, k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l") fit <- vglm(yprop ~ 1, hyperg(N = m+n), weight = k, trace = TRUE, crit = "l") coef(fit, matrix = TRUE) Coef(fit) # Should be equal to the true population proportion unique(m / (m+n)) # The true population proportion fit@extra head(fitted(fit)) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/lrt.stat.Rd0000644000176200001440000000752413565414527013645 0ustar liggesusers\name{lrt.stat} \alias{lrt.stat} \alias{lrt.stat.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Likelihood Ratio Test Statistics Evaluated at the Null Values } \description{ Generic function that computes likelihood ratio test (LRT) statistics evaluated at the null values (consequently they do not suffer from the Hauck-Donner effect). } \usage{ lrt.stat(object, ...) lrt.stat.vlm(object, values0 = 0, subset = NULL, omit1s = TRUE, all.out = FALSE, trace = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object, values0, subset}{ Same as in \code{\link{wald.stat.vlm}}. } \item{omit1s, all.out, trace}{ Same as in \code{\link{wald.stat.vlm}}. } \item{\dots}{ Ignored for now. } } \details{ When \code{summary()} is applied to a \code{\link{vglm}} object a 4-column Wald table is produced. The corresponding p-values are generally viewed as inferior to those from a likelihood ratio test (LRT). For example, the Hauck and Donner (1977) effect (HDE) produces p-values that are biased upwards (see \code{\link{hdeff}}). Other reasons are that the Wald test is often less accurate (especially in small samples) and is not invariant to parameterization. By default, this function returns p-values based on the LRT by deleting one column at a time from the big VLM matrix and then starting up IRLS to convergence (hopefully). Twice the difference between the log-likelihoods (or equivalently, the difference in the deviances if they are defined) is asymptotically chi-squared with 1 degree of freedom. One might expect the p-values from this function therefore to be more accurate and not suffer from the HDE. Thus this function is a recommended alternative (if it works) to \code{\link{summaryvglm}} for testing for the significance of a regression coefficient. } \value{ By default, a vector of signed square root of the LRT statistics; these are asymptotically standard normal under the null hypotheses. If \code{all.out = TRUE} then a list is returned with the following components: \code{lrt.stat} the signed LRT statistics, \code{pvalues} the 2-sided p-values, \code{Lrt.stat2} the usual LRT statistic, \code{values0} the null values. % and some other are detailed in \code{\link{wald.stat.vlm}} % By default, a vector of (2-sided test) p-values. % If the model is intercept-only then a \code{NULL} is returned % by default. % If \code{lrt.stat = TRUE} then a 2-column matrix is returned % comprising of p-values and LRT statistics. } %\references{ %} \author{ T. W. Yee. } \section{Warning }{ See \code{\link{wald.stat.vlm}}. } %\note{ % Only models with a full-likelihood are handled, % so that quasi-type models such as \code{\link{quasipoissonff}} % should not be fed in. %% One day this function might allow for terms, %% such as arising from \code{\link[stats]{poly}} %% and \code{\link[splines]{bs}}. %% i.e., some of the columns are grouped together, %} \seealso{ \code{\link{score.stat}}, \code{\link{wald.stat}}, \code{\link{summaryvglm}}, \code{\link{anova.vglm}}, \code{\link{vglm}}, \code{\link{lrtest}}, \code{\link{confintvglm}}, \code{\link[stats]{pchisq}}, \code{\link{profilevglm}}, \code{\link{hdeff}}. % \code{\link[stats]{profile}}, % \code{\link[MASS]{profile.glm}}, % \code{\link[MASS]{plot.profile}}. % \code{\link{multinomial}}, % \code{\link{cumulative}}, } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) cbind(coef(summary(fit)), "signed LRT stat" = lrt.stat(fit, omit1s = FALSE)) summary(fit, lrt0 = TRUE) # Easy way to get it } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} VGAM/man/Max.Rd0000644000176200001440000000432713565414527012615 0ustar liggesusers\name{Max} \alias{Max} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Maximums } \description{ Generic function for the \emph{maximums} (maxima) of a model. } \usage{ Max(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the computation or extraction of a maximum (or maximums) is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. Sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Different models can define a maximum in different ways. Many models have no such notion or definition. Maximums occur in quadratic and additive ordination, e.g., CQO or CAO. For these models the maximum is the fitted value at the optimum. For quadratic ordination models there is a formula for the optimum but for additive ordination models the optimum must be searched for numerically. If it occurs on the boundary, then the optimum is undefined. For a valid optimum, the fitted value at the optimum is the maximum. % e.g., CQO or UQO or CAO. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ %} \seealso{ \code{Max.qrrvglm}, \code{\link{Tol}}, \code{\link{Opt}}. } \examples{ \dontrun{ set.seed(111) # This leads to the global solution hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, Bestof = 2, data = hspider, Crow1positive = FALSE) Max(p1) index <- 1:ncol(depvar(p1)) persp(p1, col = index, las = 1, llwd = 2) abline(h = Max(p1), lty = 2, col = index) } } \keyword{models} \keyword{regression} VGAM/man/logofflink.Rd0000644000176200001440000000431313565414527014215 0ustar liggesusers\name{logofflink} \alias{logofflink} % \alias{logoff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log Link Function with an Offset } \description{ Computes the log transformation with an offset, including its inverse and the first two derivatives. } \usage{ logofflink(theta, offset = 0, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{offset}{ Offset value. See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The log-offset link function is very commonly used for parameters that are greater than a certain value. In particular, it is defined by \code{log(theta + offset)} where \code{offset} is the offset value. For example, if \code{offset = 0.5} then the value of \code{theta} is restricted to be greater than \eqn{-0.5}. Numerical values of \code{theta} close to \code{-offset} or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the log of \code{theta+offset}, i.e., \code{log(theta+offset)} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(theta)-offset}. For \code{deriv = 1}, then the function returns \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ The default means this function is identical to \code{\link{loglink}}. Numerical instability may occur when \code{theta} is close to \code{-offset}. } \seealso{ \code{\link{Links}}, \code{\link{loglink}}. } \examples{ \dontrun{ logofflink(seq(-0.2, 0.5, by = 0.1)) logofflink(seq(-0.2, 0.5, by = 0.1), offset = 0.5) log(seq(-0.2, 0.5, by = 0.1) + 0.5) } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/model.framevlm.Rd0000644000176200001440000000440713565414527014777 0ustar liggesusers\name{model.framevlm} \alias{model.framevlm} \title{Construct the Model Frame of a VLM Object} \usage{ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots) } \arguments{ \item{object}{a model object from the \pkg{VGAM} \R package that inherits from a \emph{vector linear model} (VLM), e.g., a model of class \code{"vglm"}.} \item{\dots}{further arguments such as \code{data}, \code{na.action}, \code{subset}. See \code{\link[stats]{model.frame}} for more information on these. } \item{setupsmart, wrapupsmart}{ Logical. Arguments to determine whether to use smart prediction. } } \description{ This function returns a \code{\link{data.frame}} with the variables. It is applied to an object which inherits from class \code{"vlm"} (e.g., a fitted model of class \code{"vglm"}). } \details{Since \code{object} is an object which inherits from class \code{"vlm"} (e.g., a fitted model of class \code{"vglm"}), the method will either returned the saved model frame used when fitting the model (if any, selected by argument \code{model = TRUE}) or pass the call used when fitting on to the default method. This code implements \emph{smart prediction} (see \code{\link{smartpred}}). } \value{ A \code{\link{data.frame}} containing the variables used in the \code{object} plus those specified in \code{\dots}. } \seealso{ \code{\link[stats]{model.frame}}, \code{\link{model.matrixvlm}}, \code{\link{predictvglm}}, \code{\link{smartpred}}. } \references{ Chambers, J. M. (1992) \emph{Data for models.} Chapter 3 of \emph{Statistical Models in S} eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. } \examples{ # Illustrates smart prediction pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2), multinomial, data = pneumo, trace = TRUE, x = FALSE) class(fit) check1 <- head(model.frame(fit)) check1 check2 <- model.frame(fit, data = head(pneumo)) check2 all.equal(unlist(check1), unlist(check2)) # Should be TRUE q0 <- head(predict(fit)) q1 <- head(predict(fit, newdata = pneumo)) q2 <- predict(fit, newdata = head(pneumo)) all.equal(q0, q1) # Should be TRUE all.equal(q1, q2) # Should be TRUE } \keyword{models} VGAM/man/AR1UC.Rd0000644000176200001440000000476013565414527012704 0ustar liggesusers\name{dAR1} \alias{dAR1} \alias{dAR1} %\alias{pAR1} %\alias{qAR1} %\alias{rAR1} \title{The AR-1 Autoregressive Process} \description{ Density for the AR-1 model. } \usage{ dAR1(x, drift = 0, var.error = 1, ARcoef1 = 0.0, type.likelihood = c("exact", "conditional"), log = FALSE) } \arguments{ \item{x,}{vector of quantiles.} \item{drift}{ the scaled mean (also known as the \emph{drift} parameter), \eqn{\mu^*}{mu^*}. Note that the mean is \eqn{\mu^* /(1-\rho)}{mu^* / (1-rho)}. The default corresponds to observations that have mean 0. } \item{log}{ Logical. If \code{TRUE} then the logarithm of the density is returned. } \item{type.likelihood, var.error, ARcoef1}{ See \code{\link{AR1}}. The argument \code{ARcoef1} is \eqn{\rho}{rho}. The argument \code{var.error} is the variance of the i.i.d. random noise, i.e., \eqn{\sigma^2}{sigma^2}. If \code{type.likelihood = "conditional"} then the first element or row of the result is currently assigned \code{NA}---this is because the density of the first observation is effectively ignored. } } \value{ \code{dAR1} gives the density. % \code{pAR1} gives the distribution function, and % \code{qAR1} gives the quantile function, and % \code{rAR1} generates random deviates. } \author{ T. W. Yee and Victor Miranda } \details{ Most of the background to this function is given in \code{\link{AR1}}. All the arguments are converted into matrices, and then all their dimensions are obtained. They are then coerced into the same size: the number of rows is the maximum of all the single rows, and ditto for the number of columns. } %\note{ %} \seealso{ \code{\link{AR1}}. } \examples{ nn <- 100; set.seed(1) tdata <- data.frame(index = 1:nn, TS1 = arima.sim(nn, model = list(ar = -0.50), sd = exp(1))) fit1 <- vglm(TS1 ~ 1, AR1, data = tdata, trace = TRUE) rhobitlink(-0.5) coef(fit1, matrix = TRUE) (Cfit1 <- Coef(fit1)) summary(fit1) # SEs are useful to know logLik(fit1) sum(dAR1(depvar(fit1), drift = Cfit1[1], var.error = (Cfit1[2])^2, ARcoef1 = Cfit1[3], log = TRUE)) fit2 <- vglm(TS1 ~ 1, AR1(type.likelihood = "cond"), data = tdata, trace = TRUE) (Cfit2 <- Coef(fit2)) # Okay for intercept-only models logLik(fit2) head(keep <- dAR1(depvar(fit2), drift = Cfit2[1], var.error = (Cfit2[2])^2, ARcoef1 = Cfit2[3], type.likelihood = "cond", log = TRUE)) sum(keep[-1]) } \keyword{distribution} VGAM/man/binormal.Rd0000644000176200001440000000656313565414527013677 0ustar liggesusers\name{binormal} \alias{binormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Normal Distribution Family Function } \description{ Maximum likelihood estimation of the five parameters of a bivariate normal distribution. } \usage{ binormal(lmean1 = "identitylink", lmean2 = "identitylink", lsd1 = "loglink", lsd2 = "loglink", lrho = "rhobitlink", imean1 = NULL, imean2 = NULL, isd1 = NULL, isd2 = NULL, irho = NULL, imethod = 1, eq.mean = FALSE, eq.sd = FALSE, zero = c("sd", "rho")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean1, lmean2, lsd1, lsd2, lrho}{ Link functions applied to the means, standard deviations and \code{rho} parameters. See \code{\link{Links}} for more choices. Being positive quantities, a log link is the default for the standard deviations. } \item{imean1, imean2, isd1, isd2, irho, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{eq.mean, eq.sd}{ Logical or formula. Constrains the means or the standard deviations to be equal. % 20150530; FALSE now; they work separately: % Only one of these arguments may be assigned a value. } } \details{ For the bivariate normal distribution, this fits a linear model (LM) to the means, and by default, the other parameters are intercept-only. The response should be a two-column matrix. The correlation parameter is \code{rho}, which lies between \eqn{-1} and \eqn{1} (thus the \code{\link{rhobitlink}} link is a reasonable choice). The fitted means are returned as the fitted values, which is in the form of a two-column matrix. Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \section{Warning}{ This function may be renamed to \code{normal2()} or something like that at a later date. } %\references{ % %} \author{ T. W. Yee } \note{ If both equal means and equal standard deviations are desired then use something like \code{constraints = list("(Intercept)" = matrix(c(1,1,0,0,0, 0,0,1,1,0 ,0,0,0,0,1), 5, 3))} and maybe \code{zero = NULL} etc. } \seealso{ \code{\link{uninormal}}, \code{\link{trinormal}}, \code{\link{pbinorm}}, \code{\link{bistudentt}}. % \code{\link{gaussianff}}, % \code{\link{pnorm2}}, } \examples{ set.seed(123); nn <- 1000 bdata <- data.frame(x2 = runif(nn), x3 = runif(nn)) bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2 * x2), y2 = rnorm(nn, 3 + 4 * x2)) fit1 <- vglm(cbind(y1, y2) ~ x2, binormal(eq.sd = TRUE), data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) constraints(fit1) summary(fit1) # Estimated P(Y1 <= y1, Y2 <= y2) under the fitted model var1 <- loglink(2 * predict(fit1)[, "loglink(sd1)"], inverse = TRUE) var2 <- loglink(2 * predict(fit1)[, "loglink(sd2)"], inverse = TRUE) cov12 <- rhobitlink(predict(fit1)[, "rhobitlink(rho)"], inverse = TRUE) head(with(bdata, pbinorm(y1, y2, mean1 = predict(fit1)[, "mean1"], mean2 = predict(fit1)[, "mean2"], var1 = var1, var2 = var2, cov12 = cov12))) } \keyword{models} \keyword{regression} VGAM/man/logclink.Rd0000644000176200001440000000372613565414527013674 0ustar liggesusers\name{logclink} \alias{logclink} % \alias{logc} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Complementary-log Link Function } \description{ Computes the Complementary-log Transformation, Including its Inverse and the First Two Derivatives. } \usage{ logclink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The complementary-log link function is suitable for parameters that are less than unity. Numerical values of \code{theta} close to 1 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the log of \code{theta}, i.e., \code{log(1-theta)} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{1-exp(theta)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1. One way of overcoming this is to use \code{bvalue}. } \seealso{ \code{\link{Links}}, \code{\link{loglink}}, \code{\link{clogloglink}}, \code{\link{logloglink}}, \code{\link{logofflink}}. } \examples{ \dontrun{ logclink(seq(-0.2, 1.1, by = 0.1)) # Has NAs } logclink(seq(-0.2,1.1,by=0.1),bvalue=1-.Machine$double.eps) # Has no NAs } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/probitlink.Rd0000644000176200001440000000542413565414527014244 0ustar liggesusers\name{probitlink} \alias{probitlink} %\alias{probit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Probit Link Function } \description{ Computes the probit transformation, including its inverse and the first two derivatives. } \usage{ probitlink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The probit link function is commonly used for parameters that lie in the unit interval. It is the inverse CDF of the standard normal distribution. Numerical values of \code{theta} close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the probit of \code{theta}, i.e., \code{qnorm(theta)} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{pnorm(theta)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or 0. One way of overcoming this is to use \code{bvalue}. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the univariate normal distribution (see \code{\link{uninormal}}). } \seealso{ \code{\link{Links}}, \code{\link{logitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}, \code{\link[stats]{Normal}}. } \examples{ p <- seq(0.01, 0.99, by = 0.01) probitlink(p) max(abs(probitlink(probitlink(p), inverse = TRUE) - p)) # Should be 0 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01)) probitlink(p) # Has NAs probitlink(p, bvalue = .Machine$double.eps) # Has no NAs \dontrun{p <- seq(0.01, 0.99, by = 0.01); par(lwd = (mylwd <- 2)) plot(p, logitlink(p), type = "l", col = "limegreen", ylab = "transformation", las = 1, main = "Some probability link functions") lines(p, probitlink(p), col = "purple") lines(p, clogloglink(p), col = "chocolate") lines(p, cauchitlink(p), col = "tan") abline(v = 0.5, h = 0, lty = "dashed") legend(0.1, 4, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), col = c("limegreen", "purple", "chocolate", "tan"), lwd = mylwd) par(lwd = 1) } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/vsmooth.spline.Rd0000644000176200001440000001552713565414527015064 0ustar liggesusers\name{vsmooth.spline} \alias{vsmooth.spline} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Vector Cubic Smoothing Spline } \description{ Fits a vector cubic smoothing spline. } \usage{ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL, i.constraint = diag(M), x.constraint = diag(M), constraints = list("(Intercepts)" = i.constraint, x = x.constraint), all.knots = FALSE, var.arg = FALSE, scale.w = TRUE, nk = NULL, control.spar = list()) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A vector, matrix or a list. If a list, the \code{x} component is used. If a matrix, the first column is used. \code{x} may also be a complex vector, in which case the real part is used, and the imaginary part is used for the response. In this help file, \code{n} is the number of unique values of \code{x}. } \item{y}{ A vector, matrix or a list. If a list, the \code{y} component is used. If a matrix, all but the first column is used. In this help file, \code{M} is the number of columns of \code{y} if there are no constraints on the functions. } \item{w}{ The weight matrices or the number of observations. If the weight matrices, then this must be a \code{n}-row matrix with the elements in matrix-band form (see \code{iam}). If a vector, then these are the number of observations. By default, \code{w} is the \code{M} by \code{M} identity matrix, denoted by \code{matrix(1, n, M)}. } \item{df}{ Numerical vector containing the degrees of freedom for each component function (smooth). If necessary, the vector is recycled to have length equal to the number of component functions to be estimated (\code{M} if there are no constraints), which equals the number of columns of the \code{x}-constraint matrix. A value of 2 means a linear fit, and each element of \code{df} should lie between 2 and \code{n}. The larger the values of \code{df} the more wiggly the smooths. } \item{spar}{ Numerical vector containing the non-negative smoothing parameters for each component function (smooth). If necessary, the vector is recycled to have length equal to the number of component functions to be estimated (\code{M} if there are no constraints), which equals the number of columns of the \code{x}-constraint matrix. A value of zero means the smooth goes through the data and hence is wiggly. A value of \code{Inf} may be assigned, meaning the smooth will be linear. By default, the \code{NULL} value of \code{spar} means \code{df} is used to determine the smoothing parameters. } \item{all.knots}{ Logical. If \code{TRUE} then each distinct value of \code{x} will be a knot. By default, only a subset of the unique values of \code{x} are used; typically, the number of knots is \code{O(n^0.25)} for \code{n} large, but if \code{n <= 40} then all the unique values of \code{x} are used. } \item{i.constraint}{ A \code{M}-row constraint matrix for the intercepts. It must be of full column rank. By default, the constraint matrix for the intercepts is the \code{M} by \code{M} identity matrix, meaning no constraints. } \item{x.constraint}{ A \code{M}-row constraint matrix for \code{x}. It must be of full column rank. By default, the constraint matrix for the intercepts is the \code{M} by \code{M} identity matrix, meaning no constraints. } \item{constraints}{ An alternative to specifying \code{i.constraint} and \code{x.constraint}, this is a list with two components corresponding to the intercept and \code{x} respectively. They must both be a \code{M}-row constraint matrix with full column rank. } \item{var.arg}{ Logical: return the pointwise variances of the fit? Currently, this corresponds only to the nonlinear part of the fit, and may be wrong. } \item{scale.w}{ Logical. By default, the weights \code{w} are scaled so that the diagonal elements have mean 1. } \item{nk}{ Number of knots. If used, this argument overrides \code{all.knots}, and must lie between 6 and \code{n}+2 inclusive. } \item{control.spar}{ See \code{\link[stats]{smooth.spline}}. } } \details{ The algorithm implemented is detailed in Yee (2000). It involves decomposing the component functions into a linear and nonlinear part, and using B-splines. The cost of the computation is \code{O(n M^3)}. The argument \code{spar} contains \emph{scaled} smoothing parameters. } \value{ An object of class \code{"vsmooth.spline"} (see \code{vsmooth.spline-class}). } \references{ Yee, T. W. (2000) Vector Splines and Other Vector Smoothers. Pages 529--534. In: Bethlehem, J. G. and van der Heijde, P. G. M. \emph{Proceedings in Computational Statistics COMPSTAT 2000}. Heidelberg: Physica-Verlag. } \author{ Thomas W. Yee } \note{ This function is quite similar to \code{\link[stats]{smooth.spline}} but offers less functionality. For example, cross validation is not implemented here. For \code{M = 1}, the results will be generally different, mainly due to the different way the knots are selected. The vector cubic smoothing spline which \code{s()} represents is computationally demanding for large \eqn{M}. The cost is approximately \eqn{O(n M^3)} where \eqn{n} is the number of unique abscissae. Yet to be done: return the \emph{unscaled} smoothing parameters. } %~Make other sections like WARNING with \section{WARNING }{....} ~ \section{WARNING}{ See \code{\link{vgam}} for information about an important bug. } \seealso{ \code{vsmooth.spline-class}, \code{plot.vsmooth.spline}, \code{predict.vsmooth.spline}, \code{iam}, \code{\link{sm.os}}, \code{\link[VGAM]{s}}, \code{\link[stats]{smooth.spline}}. } \examples{ nn <- 20; x <- 2 + 5*(nn:1)/nn x[2:4] <- x[5:7] # Allow duplication y1 <- sin(x) + rnorm(nn, sd = 0.13) y2 <- cos(x) + rnorm(nn, sd = 0.13) y3 <- 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints y <- cbind(y1, y2, y3) ww <- cbind(rep(3, nn), 4, (1:nn)/nn) (fit <- vsmooth.spline(x, y, w = ww, df = 5)) \dontrun{ plot(fit) # The 1st and 3rd functions do not differ by a constant } mat <- matrix(c(1,0,1, 0,1,0), 3, 2) (fit2 <- vsmooth.spline(x, y, w = ww, df = 5, i.constr = mat, x.constr = mat)) # The 1st and 3rd functions do differ by a constant: mycols <- c("orange", "blue", "orange") \dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) } p <- predict(fit, x = model.matrix(fit, type = "lm"), deriv = 0) max(abs(depvar(fit) - with(p, y))) # Should be 0; and fit@y is not good par(mfrow = c(3, 1)) ux <- seq(1, 8, len = 100) for (dd in 1:3) { pp <- predict(fit, x = ux, deriv = dd) \dontrun{with(pp, matplot(x, y, type = "l", main = paste("deriv =", dd), lwd = 2, ylab = "", cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)) } } } \keyword{regression} \keyword{smooth} VGAM/man/seq2binomial.Rd0000644000176200001440000000757613565414527014466 0ustar liggesusers\name{seq2binomial} \alias{seq2binomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Two-stage Sequential Binomial Distribution Family Function } \description{ Estimation of the probabilities of a two-stage binomial distribution. } \usage{ seq2binomial(lprob1 = "logitlink", lprob2 = "logitlink", iprob1 = NULL, iprob2 = NULL, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. % apply.parint = TRUE, \arguments{ \item{lprob1, lprob2}{ Parameter link functions applied to the two probabilities, called \eqn{p} and \eqn{q} below. See \code{\link{Links}} for more choices. } \item{iprob1, iprob2}{ Optional initial value for the first and second probabilities respectively. A \code{NULL} means a value is obtained in the \code{initialize} slot. } \item{parallel, zero}{ Details at \code{\link{Links}}. If \code{parallel = TRUE} then the constraint also applies to the intercept. } } \details{ This \pkg{VGAM} family function fits the model described by Crowder and Sweeting (1989) which is described as follows. Each of \eqn{m} spores has a probability \eqn{p} of germinating. Of the \eqn{y_1}{y1} spores that germinate, each has a probability \eqn{q} of bending in a particular direction. Let \eqn{y_2}{y2} be the number that bend in the specified direction. The probability model for this data is \eqn{P(y_1,y_2) =}{P(y1,y2) =} \deqn{ {m \choose y_1} p^{y_1} (1-p)^{m-y_1} {y_1 \choose y_2} q^{y_2} (1-q)^{y_1-y_2}}{% {choose(m,y1)} p^{y1} (1-p)^{m-y1} {choose(y1,y2)} q^{y2} (1-q)^{y1-y2}} for \eqn{0 < p < 1}, \eqn{0 < q < 1}, \eqn{y_1=1,\ldots,m}{y1=1,\ldots,m} and \eqn{y_2=1,\ldots,y_1}{y2=1,\ldots,y1}. Here, \eqn{p} is \code{prob1}, \eqn{q} is \code{prob2}. Although the Authors refer to this as the \emph{bivariate binomial} model, I have named it the \emph{(two-stage) sequential binomial} model. Fisher scoring is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Crowder, M. and Sweeting, T. (1989). Bayesian inference for a bivariate binomial distribution. \emph{Biometrika}, \bold{76}, 599--603. } \author{ Thomas W. Yee } \note{ The response must be a two-column matrix of sample proportions corresponding to \eqn{y_1}{y1} and \eqn{y_2}{y2}. The \eqn{m} values should be inputted with the \code{weights} argument of \code{\link{vglm}} and \code{\link{vgam}}. The fitted value is a two-column matrix of estimated probabilities \eqn{p} and \eqn{q}. A common form of error is when there are no trials for \eqn{y_1}{y1}, e.g., if \code{mvector} below has some values which are zero. } \seealso{ \code{\link{binomialff}}, \code{\link{cfibrosis}}. } \examples{ sdata <- data.frame(mvector = round(rnorm(nn <- 100, m = 10, sd = 2)), x2 = runif(nn)) sdata <- transform(sdata, prob1 = logitlink(+2 - x2, inverse = TRUE), prob2 = logitlink(-2 + x2, inverse = TRUE)) sdata <- transform(sdata, successes1 = rbinom(nn, size = mvector, prob = prob1)) sdata <- transform(sdata, successes2 = rbinom(nn, size = successes1, prob = prob2)) sdata <- transform(sdata, y1 = successes1 / mvector) sdata <- transform(sdata, y2 = successes2 / successes1) fit <- vglm(cbind(y1, y2) ~ x2, seq2binomial, weight = mvector, data = sdata, trace = TRUE) coef(fit) coef(fit, matrix = TRUE) head(fitted(fit)) head(depvar(fit)) head(weights(fit, type = "prior")) # Same as with(sdata, mvector) # Number of first successes: head(depvar(fit)[, 1] * c(weights(fit, type = "prior"))) # Number of second successes: head(depvar(fit)[, 2] * c(weights(fit, type = "prior")) * depvar(fit)[, 1]) } \keyword{models} \keyword{regression} VGAM/man/bistudenttUC.Rd0000644000176200001440000000474313565414527014507 0ustar liggesusers\name{Bistudentt} \alias{Bistudentt} \alias{dbistudentt} %\alias{rbistudentt} \title{Bivariate Student-t Distribution Cumulative Distribution Function} \description{ Density for the bivariate Student-t distribution distribution. % cumulative distribution function % quantile function % and % random generation } \usage{ dbistudentt(x1, x2, df, rho = 0, log = FALSE) } \arguments{ \item{x1, x2}{vector of quantiles.} \item{df, rho}{ vector of degrees of freedom and correlation parameter. For \code{df}, a value \code{Inf} is currently not working. % standard deviations and correlation parameter. } % \item{n}{number of observations. % Same as \code{\link[stats]{rt}}. % } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } % \item{rho}{ % See \code{\link{bistudenttal}}. % } } \value{ \code{dbistudentt} gives the density. % \code{pnorm2} gives the cumulative distribution function, % \code{qnorm2} gives the quantile function, and % \code{rbistudentt} generates random deviates (\eqn{n} by 2 matrix). } % \author{ T. W. Yee } \details{ % The default arguments correspond to the standard bivariate Student-t % distribution with correlation parameter \eqn{\rho = 0}{rho = 0}. % That is, two independent standard Student-t distibutions. % Let \code{sd1} be \code{sqrt(var1)} and % written \eqn{\sigma_1}{sigma_1}, etc. % Then the general formula for the correlation coefficient is % \eqn{\rho = cov / (\sigma_1 \sigma_2)}{rho = cov / (sigma_1 * sigma_2)} % where \eqn{cov} is argument \code{cov12}. % Thus if arguments \code{var1} and \code{var2} are left alone then % \code{cov12} can be inputted with \eqn{\rho}{rho}. One can think of this function as an extension of \code{\link[stats]{dt}} to two dimensions. See \code{\link{bistudentt}} for more information. } \references{ Schepsmeier, U. and Stober, J. (2013) Derivatives and Fisher information of bivariate copulas. \emph{Statistical Papers}. } %\section{Warning}{ % % %} %\note{ % For \code{rbistudentt()}, % if the \eqn{i}th variance-covariance matrix is not % positive-definite then the \eqn{i}th row is all \code{NA}s. %} \seealso{ \code{\link{bistudentt}}, \code{\link[stats]{dt}}. } \examples{ \dontrun{ N <- 101; x <- seq(-4, 4, len = N); Rho <- 0.7; mydf <- 10 ox <- expand.grid(x, x) zedd <- dbistudentt(ox[, 1], ox[, 2], df = mydf, rho = Rho, log = TRUE) contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5) } } \keyword{distribution} VGAM/man/s.Rd0000644000176200001440000001252113565414527012325 0ustar liggesusers\name{s} \alias{s} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Defining Smooths in VGAM Formulas } \description{ \code{s} is used in the definition of (vector) smooth terms within \code{vgam} formulas. This corresponds to 1st-generation VGAMs that use backfitting for their estimation. The effective degrees of freedom is prespecified. } \usage{ s(x, df = 4, spar = 0, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{x}{ covariate (abscissae) to be smoothed. Note that \code{x} must be a \emph{single} variable and not a function of a variable. For example, \code{s(x)} is fine but \code{s(log(x))} will fail. In this case, let \code{logx <- log(x)} (in the data frame), say, and then use \code{s(logx)}. At this stage bivariate smoothers (\code{x} would be a two-column matrix) are not implemented. } \item{df}{ numerical vector of length \eqn{r}. Effective degrees of freedom: must lie between 1 (linear fit) and \eqn{n} (interpolation). Thus one could say that \code{df-1} is the \emph{effective nonlinear degrees of freedom} (ENDF) of the smooth. Recycling of values will be used if \code{df} is not of length \eqn{r}. If \code{spar} is positive then this argument is ignored. Thus \code{s()} means that the effective degrees of freedom is prespecified. If it is known that the component function(s) are more wiggly than usual then try increasing the value of this argument. } \item{spar}{ numerical vector of length \eqn{r}. Positive smoothing parameters (after scaling) . Larger values mean more smoothing so that the solution approaches a linear fit for that component function. A zero value means that \code{df} is used. Recycling of values will be used if \code{spar} is not of length \eqn{r}. } \item{\dots}{ Ignored for now. } } \details{ In this help file \eqn{M} is the number of additive predictors and \eqn{r} is the number of component functions to be estimated (so that \eqn{r} is an element from the set \{1,2,\ldots,\eqn{M}\}). Also, if \eqn{n} is the number of \emph{distinct} abscissae, then \code{s} will fail if \eqn{n < 7}. \code{s}, which is symbolic and does not perform any smoothing itself, only handles a single covariate. Note that \code{s} works in \code{\link{vgam}} only. It has no effect in \code{\link{vglm}} (actually, it is similar to the identity function \code{\link[base:AsIs]{I}} so that \code{s(x2)} is the same as \code{x2} in the LM model matrix). It differs from the \code{s()} of the \pkg{gam} package and the \code{\link[mgcv]{s}} of the \pkg{mgcv} package; they should not be mixed together. Also, terms involving \code{s} should be simple additive terms, and not involving interactions and nesting etc. For example, \code{myfactor:s(x2)} is not a good idea. % It also differs from the S-PLUS \code{s} which allows % \code{spar} to be negative; \pkg{VGAM} does not allow this. } \value{ A vector with attributes that are (only) used by \code{vgam}. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. } \author{ Thomas W. Yee } \note{ The vector cubic smoothing spline which \code{s()} represents is computationally demanding for large \eqn{M}. The cost is approximately \eqn{O(n M^3)} where \eqn{n} is the number of unique abscissae. Currently a bug relating to the use of \code{s()} is that only constraint matrices whose columns are orthogonal are handled correctly. If any \code{s()} term has a constraint matrix that does not satisfy this condition then a warning is issued. See \code{\link{is.buggy}} for more information. A more modern alternative to using \code{s} with \code{\link{vgam}} is to use \code{\link{sm.os}} or \code{\link{sm.ps}}. This does not require backfitting and allows automatic smoothing parameter selection. However, this alternative should only be used when the sample size is reasonably large (\eqn{> 500}, say). These are called Generation-2 VGAMs. Another alternative to using \code{s} with \code{\link{vgam}} is \code{\link[splines]{bs}} and/or \code{\link[splines]{ns}} with \code{\link{vglm}}. The latter implements half-stepping, which is helpful if convergence is difficult. } % ~Make other sections like WARNING with \section{WARNING }{....} ~ \seealso{ \code{\link{vgam}}, \code{\link{is.buggy}}, \code{\link{sm.os}}, \code{\link{sm.ps}}, \code{\link{vsmooth.spline}}. } \examples{ # Nonparametric logistic regression fit1 <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua) \dontrun{ plot(fit1, se = TRUE) } # Bivariate logistic model with artificial data nn <- 300 bdata <- data.frame(x1 = runif(nn), x2 = runif(nn)) bdata <- transform(bdata, y1 = rbinom(nn, size = 1, prob = logitlink(sin(2 * x2), inverse = TRUE)), y2 = rbinom(nn, size = 1, prob = logitlink(sin(2 * x2), inverse = TRUE))) fit2 <- vgam(cbind(y1, y2) ~ x1 + s(x2, 3), trace = TRUE, binom2.or(exchangeable = TRUE), data = bdata) coef(fit2, matrix = TRUE) # Hard to interpret \dontrun{ plot(fit2, se = TRUE, which.term = 2, scol = "blue") } } \keyword{models} \keyword{regression} \keyword{smooth} % binom2.or(exchangeable = TRUE ~ s(x2, 3)) VGAM/man/sinmadUC.Rd0000644000176200001440000000414713565414527013573 0ustar liggesusers\name{Sinmad} \alias{Sinmad} \alias{dsinmad} \alias{psinmad} \alias{qsinmad} \alias{rsinmad} \title{The Singh-Maddala Distribution} \description{ Density, distribution function, quantile function and random generation for the Singh-Maddala distribution with shape parameters \code{a} and \code{q}, and scale parameter \code{scale}. } \usage{ dsinmad(x, scale = 1, shape1.a, shape3.q, log = FALSE) psinmad(q, scale = 1, shape1.a, shape3.q, lower.tail = TRUE, log.p = FALSE) qsinmad(p, scale = 1, shape1.a, shape3.q, lower.tail = TRUE, log.p = FALSE) rsinmad(n, scale = 1, shape1.a, shape3.q) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1.a, shape3.q}{shape parameters.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dsinmad} gives the density, \code{psinmad} gives the distribution function, \code{qsinmad} gives the quantile function, and \code{rsinmad} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{sinmad}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The Singh-Maddala distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{sinmad}}, \code{\link{genbetaII}}. } \examples{ sdata <- data.frame(y = rsinmad(n = 3000, scale = exp(2), shape1 = exp(1), shape3 = exp(1))) fit <- vglm(y ~ 1, sinmad(lss = FALSE, ishape1.a = 2.1), data = sdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) } \keyword{distribution} VGAM/man/Huggins89.t1.Rd0000644000176200001440000001210213565414527014166 0ustar liggesusers\name{Huggins89.t1} \alias{Huggins89.t1} \alias{Huggins89table1} \docType{data} \title{ Table 1 of Huggins (1989) } \description{ Simulated capture data set for the linear logistic model depending on an occasion covariate and an individual covariate for 10 trapping occasions and 20 individuals. %% ~~ A concise (1-5 lines) description of the dataset. ~~ } \usage{ data(Huggins89table1) data(Huggins89.t1) } \format{ The format is a data frame. %chr "Huggins89.t1" } \details{ Table 1 of Huggins (1989) gives this toy data set. Note that variables \code{t1},\ldots,\code{t10} are occasion-specific variables. They correspond to the response variables \code{y1},\ldots,\code{y10} which have values 1 for capture and 0 for not captured. Both \code{Huggins89table1} and \code{Huggins89.t1} are identical. The latter used variables beginning with \code{z}, not \code{t}, and may be withdrawn very soon. %% ~~ If necessary, more details than the __description__ above ~~ } %\source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ %} \references{ Huggins, R. M. (1989) On the statistical analysis of capture experiments. \emph{Biometrika}, \bold{76}, 133--140. %% ~~ possibly secondary sources and usages ~~ } \examples{ Huggins89table1 <- transform(Huggins89table1, x3.tij = t01, T02 = t02, T03 = t03, T04 = t04, T05 = t05, T06 = t06, T07 = t07, T08 = t08, T09 = t09, T10 = t10) small.table1 <- subset(Huggins89table1, y01 + y02 + y03 + y04 + y05 + y06 + y07 + y08 + y09 + y10 > 0) # fit.tbh is the bottom equation on p.133. # It is a M_tbh model. fit.tbh <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + x3.tij, xij = list(x3.tij ~ t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 + T02 + T03 + T04 + T05 + T06 + T07 + T08 + T09 + T10 - 1), posbernoulli.tb(parallel.t = TRUE ~ x2 + x3.tij), data = small.table1, trace = TRUE, form2 = ~ x2 + x3.tij + t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 + T02 + T03 + T04 + T05 + T06 + T07 + T08 + T09 + T10) # These results differ a bit from Huggins (1989), probably because # two animals had to be removed here (they were never caught): coef(fit.tbh) # First element is the behavioural effect sqrt(diag(vcov(fit.tbh))) # SEs constraints(fit.tbh, matrix = TRUE) summary(fit.tbh, presid = FALSE) fit.tbh@extra$N.hat # Estimate of the population site N; cf. 20.86 fit.tbh@extra$SE.N.hat # Its standard error; cf. 1.87 or 4.51 fit.th <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2, posbernoulli.t, data = small.table1, trace = TRUE) coef(fit.th) constraints(fit.th) coef(fit.th, matrix = TRUE) # M_th model summary(fit.th, presid = FALSE) fit.th@extra$N.hat # Estimate of the population size N fit.th@extra$SE.N.hat # Its standard error fit.bh <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2, posbernoulli.b(I2 = FALSE), data = small.table1, trace = TRUE) coef(fit.bh) constraints(fit.bh) coef(fit.bh, matrix = TRUE) # M_bh model summary(fit.bh, presid = FALSE) fit.bh@extra$N.hat fit.bh@extra$SE.N.hat fit.h <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2, posbernoulli.b, data = small.table1, trace = TRUE) coef(fit.h, matrix = TRUE) # M_h model (version 1) coef(fit.h) summary(fit.h, presid = FALSE) fit.h@extra$N.hat fit.h@extra$SE.N.hat Fit.h <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2, posbernoulli.t(parallel.t = TRUE ~ x2), data = small.table1, trace = TRUE) coef(Fit.h) coef(Fit.h, matrix = TRUE) # M_h model (version 2) summary(Fit.h, presid = FALSE) Fit.h@extra$N.hat Fit.h@extra$SE.N.hat } \keyword{datasets} %\dontrun{ %} % data(Huggins89table1) %## maybe str(Huggins89table1) ; plot(Huggins89table1) ... % coef(fit1, matrix = TRUE) # M_t model % Huggins89.t1 <- transform(Huggins89.t1, xx2 = c(matrix(x2, 2, 10, byrow = TRUE))) %This code below is equivalent to the above fit.tbh (same name). %But this version uses manual construction of the constraint matrices: %tau <- 10 %Hlist <-list("(Intercept)" = cbind(bhvr.effect = c(rep(0, len = tau), % rep(1, len = tau-1)), % overall.intercept = 1), % x2 = cbind(rep(1, len = 2*tau-1)), % Zedd = cbind(rep(1, len = 2*tau-1))) %fit.tbh <- % vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + Zedd, % xij = list(Zedd ~ z01 + z02 + z03 + z04 + z05 + z06 + z07 + z08 + z09 + z10 + % Z02 + Z03 + Z04 + Z05 + Z06 + Z07 + Z08 + Z09 + Z10 - 1), % posbernoulli.tb, data = small.t1, trace = TRUE, % constraints = Hlist, % form2 = ~ x2 + Zedd + % z01 + z02 + z03 + z04 + z05 + z06 + z07 + z08 + z09 + z10 + % Z02 + Z03 + Z04 + Z05 + Z06 + Z07 + Z08 + Z09 + Z10) VGAM/man/prats.Rd0000644000176200001440000000375013565414527013220 0ustar liggesusers\name{prats} \alias{prats} \docType{data} \title{ Pregnant Rats Toxological Experiment Data } \description{ A small toxological experiment data. The subjects are fetuses from two randomized groups of pregnant rats, and they were given a placebo or chemical treatment. The number with birth defects were recorded, as well as each litter size. } \usage{ data(prats) } \format{ A data frame with the following variables. \describe{ \item{treatment}{ A \code{0} means control; a \code{1} means the chemical treatment. } \item{alive, litter.size}{ The number of fetuses alive at 21 days, out of the number of fetuses alive at 4 days (the litter size). } } } \details{ The data concerns a toxological experiment where the subjects are fetuses from two randomized groups of 16 pregnant rats each, and they were given a placebo or chemical treatment. The number with birth defects and the litter size were recorded. Half the rats were fed a control diet during pregnancy and lactation, and the diet of the other half was treated with a chemical. For each litter the number of pups alive at 4 days and the number of pups that survived the 21 day lactation period, were recorded. } \source{ Weil, C. S. (1970) Selection of the valid number of sampling units and a consideration of their combination in toxicological studies involving reproduction, teratogenesis or carcinogenesis. \emph{Food and Cosmetics Toxicology}, \bold{8}(2), 177--182. %Food and Cosmetics Toxicology %Fd. Cosmet. Toxicol. } \references{ Williams, D. A. (1975) The Analysis of Binary Responses From Toxicological Experiments Involving Reproduction and Teratogenicity. \emph{Biometrics}, \bold{31}(4), 949--952. } \seealso{ \code{\link[VGAM]{betabinomial}}, \code{\link[VGAM]{betabinomialff}}. } \examples{ prats colSums(subset(prats, treatment == 0)) colSums(subset(prats, treatment == 1)) summary(prats) } \keyword{datasets} % % VGAM/man/AR1EIM.Rd0000644000176200001440000002335513565414527013010 0ustar liggesusers\name{AR1EIM} \alias{AR1EIM} %- Also NEED an '\alias' for EACH other topic documented here. \title{Computation of the Exact EIM of an Order-1 Autoregressive Process } \description{Computation of the exact Expected Information Matrix of the Autoregressive process of order-\eqn{1} (AR(\eqn{1})) with Gaussian white noise and stationary random components. } \usage{ AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL, WNsd = NULL, ARcoeff1 = NULL, eps.porat = 1e-2) } \arguments{ \item{x}{ A vector of quantiles. The gaussian time series for which the EIMs are computed. If multiple time series are being analyzed, then \code{x} must be a matrix where each column allocates a response. That is, the number of columns (denoted as \eqn{NOS}) must match the number of responses. } \item{var.arg}{ Logical. Same as with \code{\link[VGAM:AR1]{AR1}}. } \item{p.drift}{ A numeric vector with the \emph{scaled mean(s)} (commonly referred as \emph{drift}) of the AR process(es) in turn. Its length matches the number of responses. } \item{WNsd, ARcoeff1}{ Matrices. The standard deviation of the white noise, and the correlation (coefficient) of the AR(\eqn{1}) model, for \bold{each} observation. That is, the dimension for each matrix is \eqn{N \times NOS}{N x NOS}, where \eqn{N} is the number of observations and \eqn{NOS} is the number of responses. Else, these arguments are recycled. } \item{eps.porat}{ A very small positive number to test whether the standar deviation (\code{WNsd}) is close enough to its value estimated in this function. See below for further details. } } \details{ This function implements the algorithm of Porat and Friedlander (1986) to \emph{recursively} compute the exact expected information matrix (EIM) of Gaussian time series with stationary random components. By default, when the VGLM/VGAM family function \code{\link[VGAM:AR1]{AR1}} is used to fit an AR(\eqn{1}) model via \code{\link[VGAM:vglm]{vglm}}, Fisher scoring is executed using the \bold{approximate} EIM for the AR process. However, this model can also be fitted using the \bold{exact} EIMs computed by \code{AR1EIM}. Given \eqn{N} consecutive data points, \eqn{ {y_{0}, y_{1}, \ldots, y_{N - 1} } }{ {y[0], y[1], \ldots, y[N - 1]} } with probability density \eqn{f(\boldsymbol{y})}{f(y)}, the Porat and Friedlander algorithm calculates the EIMs \eqn{ [J_{n-1}(\boldsymbol{\theta})] }{J(n-1)[\theta]}, for all \eqn{1 \leq n \leq N}{1 \le n \le N}. This is done based on the Levinson-Durbin algorithm for computing the orthogonal polynomials of a Toeplitz matrix. In particular, for the AR(\eqn{1}) model, the vector of parameters to be estimated under the VGAM/VGLM approach is \deqn{ \boldsymbol{\eta} = (\mu^{*}, \log(\sigma^2), rhobit(\rho)),}{ \eta = ( mu^*, log(sigma^2), rhobit(rho)), } where \eqn{\sigma^2}{sigma^2} is the variance of the white noise and \eqn{mu^{*}}{mu^*} is the drift parameter (See \code{\link[VGAM:AR1]{AR1}} for further details on this). %Compared to \code{\link[stats]{arima}}, this family function differs %in the following ways. %1. %2. %3. %The following quote from \code{\link[stats]{arima}} reveals a weakness: %"jsdjfksf". %This is a well-known weakness in \code{\link[stats]{arima}}, however, %some simulations suggest that the VGAM se is more accurate. Consequently, for each observation \eqn{n = 1, \ldots, N}, the EIM, \eqn{J_{n}(\boldsymbol{\theta})}{Jn[\theta]}, has dimension \eqn{3 \times 3}{3 x 3}, where the diagonal elements are: %Notice, however, that the Porat and Friedlander algorithm considers %\eqn{ { y_t } }{ {y[t]}} as a zero-mean process. %Then, for each \eqn{n = 1, \ldots, N}, %\eqn{ [J_{n}(\boldsymbol{\theta})] }{Jn[\theta]} is a %\eqn{2 \times 2}{2 x 2} matrix, with elements \deqn{ J_{[n, 1, 1]} = E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \mu^{*} )^2 ], }{ J[n, 1, 1] = E[ -\delta^2 log f(y) / \delta (mu^*)^2 ], } \deqn{ J_{[n, 2, 2]} = E[ -\partial^2 \log f(\boldsymbol{y}) / \partial (\sigma^2)^2 ], }{ J[n, 2, 2] = E[ - \delta^2 log f(y) / \delta (\sigma^2)^2 ],} and \deqn{ J_{[n, 3, 3]} = E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \rho )^2 ]. }{ J[n, 3, 3] = E[ -\delta^2 log f(y) / \delta (rho)^2]. } As for the off-diagonal elements, one has the usual entries, i.e., \deqn{ J_{[n, 1, 2]} = J_{[n, 2, 1]} = E[ -\partial^2 \log f(\boldsymbol{y}) / \partial \sigma^2 \partial \rho], }{ J[n, 1, 2] = J[n, 2, 1] = E[ -\delta^2 log f(y) / \delta \sigma^2 \delta rho ],} etc. If \code{var.arg = FALSE}, then \eqn{\sigma} instead of \eqn{\sigma^2} is estimated. Therefore, \eqn{J_{[n, 2, 2]}}{J[n, 2, 2]}, \eqn{J_{[n, 1, 2]}}{J[n, 1, 2]}, etc., are correspondingly replaced. Once these expected values are internally computed, they are returned in an array of dimension \eqn{N \times 1 \times 6}{N x 1 x 6}, of the form \deqn{J[, 1, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]}, J_{[ , 3, 3]}, J_{[ , 1, 2]}, J_{[, 2, 3]}, J_{[ , 1, 3]} ]. }{ J[, 1, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3], J[ , 1, 2], J[ , 2, 3], J[ , 1, 3] ]. } \code{AR1EIM} handles multiple time series, say \eqn{NOS}. If this happens, then it accordingly returns an array of dimension \eqn{N \times NOS \times 6 }{N x NOS x 6}. Here, \eqn{J[, k, ]}, for \eqn{k = 1, \ldots, NOS}, is a matrix of dimension \eqn{N \times 6}{N x 6}, which stores the EIMs for the \eqn{k^{th}}{k}th response, as above, i.e., \deqn{J[, k, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]}, J_{[ , 3, 3]}, \ldots ], }{ J[, k, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3], \ldots ], } the \emph{bandwith} form, as per required by \code{\link[VGAM:AR1]{AR1}}. } \value{ An array of dimension \eqn{N \times NOS \times 6}{N x NOS x 6}, as above. This array stores the EIMs calculated from the joint density as a function of \deqn{\boldsymbol{\theta} = (\mu^*, \sigma^2, \rho). }{ \theta = (mu^*, sigma^2, rho). } Nevertheless, note that, under the VGAM/VGLM approach, the EIMs must be correspondingly calculated in terms of the linear predictors, \eqn{\boldsymbol{\eta}}{\eta}. } \note{ For simplicity, one can assume that the time series analyzed has a 0-mean. Consequently, where the family function \code{\link[VGAM:AR1]{AR1}} calls \code{AR1EIM} to compute the EIMs, the argument \code{p.drift} is internally set to zero-vector, whereas \code{x} is \emph{centered} by subtracting its mean value. } \section{Asymptotic behaviour of the algorithm}{ For large enough \eqn{n}, the EIMs, \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)}, become approximately linear in \eqn{n}. That is, for some \eqn{n_0}{n0}, \deqn{ J_n(\boldsymbol{\theta}) \equiv J_{n_0}(\boldsymbol{\theta}) + (n - n_0) \bar{J}(\boldsymbol{\theta}),~~~~~~(**) }{ Jn(\theta) -> Jn0(\theta) + (n - n0) * Jbar(\theta), (*) } where \eqn{ \bar{J}(\boldsymbol{\theta}) }{ Jbar(\theta)} is a constant matrix. This relationsihip is internally considered if a proper value of \eqn{n_0}{n0} is determined. Different ways can be adopted to find \eqn{n_0}{n0}. In \code{AR1EIM}, this is done by checking the difference between the internally estimated variances and the entered ones at \code{WNsd}. If this difference is less than \code{eps.porat} at some iteration, say at iteration \eqn{n_0}{n0}, then \code{AR1EIM} takes \eqn{ \bar{J}(\boldsymbol{\theta})}{Jbar(\theta)} as the last computed increment of \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)}, and extraplotates \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)}, for all \eqn{k \geq n_0 }{k \ge n0} using \eqn{(*)}. Else, the algorithm will complete the iterations for \eqn{1 \leq n \leq N}{1 \le n \le N}. Finally, note that the rate of convergence reasonably decreases if the asymptotic relationship \eqn{(*)} is used to compute \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)}, \eqn{k \geq n_0 }{k \ge n0}. Normally, the number of operations involved on this algorithm is proportional to \eqn{N^2}. See Porat and Friedlander (1986) for full details on the asymptotic behaviour of the algorithm. } \section{Warning}{ Arguments \code{WNsd}, and \code{ARcoeff1} are matrices of dimension \eqn{N \times NOS}{N x NOS}. Else, these arguments are accordingly recycled. } \references{ Porat, B. and Friedlander, B. (1986) Computation of the Exact Information Matrix of Gaussian Time Series with Stationary Random Components. \emph{IEEE Transactions on Acoustics, Speech, and Signal Processing}, \bold{54(1)}, 118--130. } \author{ V. Miranda and T. W. Yee. } \seealso{ \code{\link[VGAM:AR1]{AR1}}. } \examples{ set.seed(1) nn <- 500 ARcoeff1 <- c(0.3, 0.25) # Will be recycled. WNsd <- c(exp(1), exp(1.5)) # Will be recycled. p.drift <- c(0, 0) # Zero-mean gaussian time series. ### Generate two (zero-mean) AR(1) processes ### ts1 <- p.drift[1]/(1 - ARcoeff1[1]) + arima.sim(model = list(ar = ARcoeff1[1]), n = nn, sd = WNsd[1]) ts2 <- p.drift[2]/(1 - ARcoeff1[2]) + arima.sim(model = list(ar = ARcoeff1[2]), n = nn, sd = WNsd[2]) ARdata <- matrix(cbind(ts1, ts2), ncol = 2) ### Compute the exact EIMs: TWO responses. ### ExactEIM <- AR1EIM(x = ARdata, var.arg = FALSE, p.drift = p.drift, WNsd = WNsd, ARcoeff1 = ARcoeff1) ### For response 1: head(ExactEIM[, 1 ,]) # NOTICE THAT THIS IS A (nn x 6) MATRIX! ### For response 2: head(ExactEIM[, 2 ,]) # NOTICE THAT THIS IS A (nn x 6) MATRIX! } VGAM/man/hormone.Rd0000644000176200001440000001012213565414527013525 0ustar liggesusers\name{hormone} \alias{hormone} \docType{data} \title{ Hormone Assay Data } \description{ A hormone assay data set from Carroll and Ruppert (1988). %% ~~ A concise (1-5 lines) description of the dataset. ~~ } \usage{data(hormone)} \format{ A data frame with 85 observations on the following 2 variables. \describe{ \item{\code{X}}{a numeric vector, suitable as the x-axis in a scatter plot. The reference method. } \item{\code{Y}}{a numeric vector, suitable as the y-axis in a scatter plot. The test method. } } } \details{ %% ~~ If necessary, more details than the __description__ above ~~ The data is given in Table 2.4 of Carroll and Ruppert (1988), and was downloaded from \code{http://www.stat.tamu.edu/~carroll} prior to 2019. The book describes the data as follows. The data are the results of two assay methods for hormone data; the scale of the data as presented is not particularly meaningful, and the original source of the data refused permission to divulge further information. As in a similar example of Leurgans (1980), the old or reference method is being used to predict the new or test method. The overall goal is to see whether we can reproduce the test-method measurements with the reference-method measurements. Thus calibration might be of interest for the data. % from \url{http://www.stat.tamu.edu/~carroll}. } %\source{ % Originally, %} \references{ Carroll, R. J. and Ruppert, D. (1988) \emph{Transformation and Weighting in Regression}. New York, USA: Chapman & Hall. Leurgans, S. (1980) Evaluating laboratory measurement techniques. \emph{Biostatistics Casebook}. Eds.: Miller, R. G. Jr., and Efron, B. and Brown, B. W. Jr., and Moses, L. New York, USA: Wiley. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. } \seealso{ \code{\link[VGAM]{uninormal}}, \code{\link[VGAM]{rrvglm}}. } \examples{ \dontrun{ data(hormone) summary(hormone) modelI <-rrvglm(Y ~ 1 + X, data = hormone, trace = TRUE, uninormal(zero = NULL, lsd = "identitylink", imethod = 2)) # Alternative way to fit modelI modelI.other <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE, uninormal(zero = NULL, lsd = "identitylink")) # Inferior to modelI modelII <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE, family = uninormal(zero = NULL)) logLik(modelI) logLik(modelII) # Less than logLik(modelI) # Reproduce the top 3 equations on p.65 of Carroll and Ruppert (1988). # They are called Equations (1)--(3) here. # Equation (1) hormone <- transform(hormone, rX = 1 / X) clist <- list("(Intercept)" = diag(2), X = diag(2), rX = rbind(0, 1)) fit1 <- vglm(Y ~ 1 + X + rX, family = uninormal(zero = NULL), constraints = clist, data = hormone, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) # Actually, the intercepts do not seem significant plot(Y ~ X, hormone, col = "blue") lines(fitted(fit1) ~ X, hormone, col = "orange") # Equation (2) fit2 <- rrvglm(Y ~ 1 + X, uninormal(zero = NULL), hormone, trace = TRUE) coef(fit2, matrix = TRUE) plot(Y ~ X, hormone, col = "blue") lines(fitted(fit2) ~ X, hormone, col = "red") # Add +- 2 SEs lines(fitted(fit2) + 2 * exp(predict(fit2)[, "loglink(sd)"]) ~ X, hormone, col = "orange") lines(fitted(fit2) - 2 * exp(predict(fit2)[, "loglink(sd)"]) ~ X, hormone, col = "orange") # Equation (3) # Does not fit well because the loglink link for the mean is not good. fit3 <- rrvglm(Y ~ 1 + X, maxit = 300, data = hormone, trace = TRUE, uninormal(lmean = "loglink", zero = NULL)) coef(fit3, matrix = TRUE) plot(Y ~ X, hormone, col = "blue") # Does not look okay. lines(exp(predict(fit3)[, 1]) ~ X, hormone, col = "red") # Add +- 2 SEs lines(fitted(fit3) + 2 * exp(predict(fit3)[, "loglink(sd)"]) ~ X, hormone, col = "orange") lines(fitted(fit3) - 2 * exp(predict(fit3)[, "loglink(sd)"]) ~ X, hormone, col = "orange") } } \keyword{datasets} % from \url{http://www.stat.tamu.edu/~carroll/data/hormone_data.txt}. VGAM/man/residualsvglm.Rd0000644000176200001440000001255613565414527014754 0ustar liggesusers\name{residualsvglm} %\alias{resid} %\alias{residuals} \alias{residualsvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{Residuals for a VGLM fit} \description{ Residuals for a vector generalized linear model (VGLM) object. } \usage{ residualsvglm(object, type = c("working", "pearson", "response", "deviance", "ldot", "stdres"), matrix.arg = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Object of class \code{"vglm"}, i.e., a \code{\link{vglm}} fit. } \item{type}{ The value of this argument can be abbreviated. The type of residuals to be returned. The default is the first one: working residuals corresponding to the IRLS algorithm. These should be defined for all models. They are sometimes be added to VGAM plots of estimated component functions (see \code{\link{plotvgam}}). Pearson residuals for GLMs, when squared and summed over the data set, total to the Pearson chi-squared statistic. For VGLMs, Pearson residuals involve the working weight matrices and the score vectors. Under certain limiting conditions, Pearson residuals have 0 means and identity matrix as the variance-covariance matrix. Response residuals are simply the difference between the observed values and the fitted values. Both have to be of the same dimension, hence not all families have response residuals defined. Deviance residuals are only defined for models with a deviance function. They tend to GLMs mainly. This function returns a \code{NULL} for those models whose deviance is undefined. The choice \code{"ldot"} should not be used currently. Standardized residuals are currently only defined for 2 types of models: (i) GLMs (\code{\link{poissonff}}, \code{\link{binomialff}}); (ii) those fitted to a two-way table of counts, e.g., \code{\link{cumulative}}, \code{\link{acat}}, \code{\link{multinomial}}, \code{\link{sratio}}, \code{\link{cratio}}. For (ii), they are defined in Section 2.4.5 of Agresti (2018) and are also the output from the \code{"stdres"} component of \code{\link[stats]{chisq.test}}. For the test of independence they are a useful type of residual. Their formula is \code{(observed - expected) / sqrt(V)}, where \code{V} is the residual cell variance (also see Agresti, 2007, section 2.4.5). When an independence null hypothesis is true, each standardized residual (corresponding to a cell in the table) has a a large-sample standard normal distribution. Currently this function merely extracts the table of counts from \code{object} and then computes the standardized residuals like \code{\link[stats]{chisq.test}}. %standardized residuals, % \code{(observed - expected) / sqrt(V)}, where \code{V} is % the residual cell variance (Agresti, 2007, section 2.4.5 % for the case where \code{x} is a matrix, \code{n * p * (1 - % p)} otherwise). } \item{matrix.arg}{ Logical, which applies when if the pre-processed answer is a vector or a 1-column matrix. If \code{TRUE} then the value returned will be a matrix, else a vector. % Note that it is always a matrix if \eqn{M>1}. } % \item{\dots}{Arguments passed into \code{predictvlm}. % } } \details{ This function returns various kinds of residuals, sometimes depending on the specific type of model having been fitted. Section 3.7 of Yee (2015) gives some details on several types of residuals defined for the VGLM class. Standardized residuals for GLMs are described in Section 4.5.6 of Agresti (2013) as the ratio of the raw (response) residuals divided by their standard error. They involve the generalized hat matrix evaluated at the final IRLS iteration. When applied to the LM, standardized residuals for GLMs simplify to \code{\link[stats]{rstandard}}. For GLMs they are basically the Pearson residual divided by the square root of 1 minus the leverage. % This applies to two way tables: %Furthermore, the standardized %residual squared, when df=1, %coincides exactly with the Pearson \eqn{X^2} statistic. } \value{ If that residual type is undefined or inappropriate then \code{NULL} is returned, otherwise a matrix or vector of residuals is returned. } \references{ Agresti, A. (2007) \emph{An Introduction to Categorical Data Analysis, 2nd ed.}, New York: John Wiley & Sons. Page 38. Agresti, A. (2013) \emph{Categorical Data Analysis, 3rd ed.}, New York: John Wiley & Sons. Agresti, A. (2018) \emph{An Introduction to Categorical Data Analysis, 3rd ed.}, New York: John Wiley & Sons. } %\author{ Thomas W. Yee } %\note{ % Setting \code{se.fit = TRUE} and \code{type = "response"} % will generate an error. %} \section{Warning }{ This function may change in the future, especially those whose definitions may change. } \seealso{ \code{\link[stats]{resid}}, \code{\link{vglm}}, \code{\link[stats]{chisq.test}}, \code{\link{hatvalues}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) resid(fit) # Same as having type = "working" (the default) resid(fit, type = "response") resid(fit, type = "pearson") resid(fit, type = "stdres") # Test for independence } \keyword{models} \keyword{regression} % untransform = FALSE, extra = object@extra, VGAM/man/oizipf.Rd0000644000176200001440000000461213565414527013365 0ustar liggesusers\name{oizipf} \alias{oizipf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-inflated Zipf Distribution Family Function } \description{ Fits a 1-inflated Zipf distribution. } \usage{ oizipf(N = NULL, lpstr1 = "logitlink", lshape = "loglink", type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"), ishape = NULL, gpstr1 = ppoints(8), gshape = exp((-3:3) / 4), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{N}{ Same as \code{\link{zipf}}. } \item{lpstr1, lshape}{ For \code{lpstr1}: the same idea as \code{\link{zipoisson}} except it applies to a structural 1. } \item{gpstr1, gshape, ishape}{ For initial values. See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 1-inflated Zipf distribution is a mixture distribution of the Zipf distribution with some probability of obtaining a (structural) 1. Thus there are two sources for obtaining the value 1. This distribution is written here in a way that retains a similar notation to the zero-inflated Poisson, i.e., the probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}. See \code{\link{zipoisson}}. This family function can handle multiple responses. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. Lots of data is needed to estimate the parameters accurately. Usually, probably the shape parameter is best modelled as intercept-only. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{Oizipf}}. \code{\link{zipf}}, \code{\link{Oizeta}}. } \examples{ \dontrun{ odata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data odata <- transform(odata, pstr1 = logitlink(-1 + x2, inverse = TRUE), myN = 10, shape = exp(-0.5)) odata <- transform(odata, y1 = roizipf(nn, N = myN, s = shape, pstr1 = pstr1)) with(odata, table(y1)) fit1 <- vglm(y1 ~ x2, oizipf(zero = "shape"), data = odata, trace = TRUE) coef(fit1, matrix = TRUE) } } \keyword{models} \keyword{regression} VGAM/man/poisson.pointsUC.Rd0000644000176200001440000000355413565414527015326 0ustar liggesusers\name{PoissonPoints} \alias{PoissonPoints} \alias{dpois.points} %\alias{ppois.points} %\alias{qpois.points} \alias{rpois.points} \title{Poisson Points Distribution} \description{ Density for the PoissonPoints distribution. % distribution function, quantile function % and random generation } \usage{ dpois.points(x, lambda, ostatistic, dimension = 2, log = FALSE) } %ppois.points(q, lambda, ostatistic, dimension = 2, log = FALSE) %qpois.points(p, lambda, ostatistic, dimension = 2, log = FALSE) %rpois.points(n, lambda, ostatistic, dimension = 2, log = FALSE) \arguments{ \item{x}{vector of quantiles.} \item{lambda}{ the mean density of points. } \item{ostatistic}{ positive values, usually integers. } \item{dimension}{ Either 2 and/or 3. } % \item{p}{vector of probabilities.} % \item{n}{number of observations. % Same as \code{\link[stats:Uniform]{runif}}. % } \item{log}{ Logical; if TRUE, the logarithm is returned. } } \value{ \code{dpois.points} gives the density. % and % \code{ppois.points} gives the distribution function, % \code{qpois.points} gives the quantile function, and % \code{rpois.points} generates random deviates. } %\author{ T. W. Yee } \details{ See \code{\link{poisson.points}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } %\section{Warning }{ %} \seealso{ \code{\link{poisson.points}}, \code{\link[stats:Poisson]{dpois}}, \code{\link{Maxwell}}. } \examples{ \dontrun{ lambda <- 1; xvec <- seq(0, 2, length = 400) plot(xvec, dpois.points(xvec, lambda, ostat = 1, dimension = 2), type = "l", las = 1, col = "blue", sub = "First order statistic", main = paste("PDF of PoissonPoints distribution with lambda = ", lambda, " and on the plane", sep = "")) } } \keyword{distribution} VGAM/man/gammahyperbola.Rd0000644000176200001440000000533113565414527015054 0ustar liggesusers\name{gammahyperbola} \alias{gammahyperbola} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gamma Hyperbola Bivariate Distribution } \description{ Estimate the parameter of a gamma hyperbola bivariate distribution by maximum likelihood estimation. } \usage{ gammahyperbola(ltheta = "loglink", itheta = NULL, expected = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ltheta}{ Link function applied to the (positive) parameter \eqn{\theta}{theta}. See \code{\link{Links}} for more choices. } \item{itheta}{ Initial value for the parameter. The default is to estimate it internally. } \item{expected}{ Logical. \code{FALSE} means the Newton-Raphson (using the observed information matrix) algorithm, otherwise the expected information matrix is used (Fisher scoring algorithm). } } \details{ The joint probability density function is given by \deqn{f(y_1,y_2) = \exp( -e^{-\theta} y_1 / \theta - \theta y_2 )}{% f(y1,y2) = exp( -exp(-theta) * y1 / theta - theta * y2) } for \eqn{\theta > 0}{theta > 0}, \eqn{y_1 > 0}{y1 > 0}, \eqn{y_2 > 1}{y2 > 1}. The random variables \eqn{Y_1}{Y1} and \eqn{Y_2}{Y2} are independent. The marginal distribution of \eqn{Y_1}{Y1} is an exponential distribution with rate parameter \eqn{\exp(-\theta)/\theta}{exp(-theta)/theta}. The marginal distribution of \eqn{Y_2}{Y2} is an exponential distribution that has been shifted to the right by 1 and with rate parameter \eqn{\theta}{theta}. The fitted values are stored in a two-column matrix with the marginal means, which are \eqn{\theta \exp(\theta)}{theta * exp(theta)} and \eqn{1 + 1/\theta}{1 + 1/theta}. The default algorithm is Newton-Raphson because Fisher scoring tends to be much slower for this distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Reid, N. (2003) Asymptotics and the theory of inference. \emph{Annals of Statistics}, \bold{31}, 1695--1731. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. } \seealso{ \code{\link{exponential}}. } \examples{ gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, theta = exp(-2 + x2)) gdata <- transform(gdata, y1 = rexp(nn, rate = exp(-theta)/theta), y2 = rexp(nn, rate = theta) + 1) fit <- vglm(cbind(y1, y2) ~ x2, gammahyperbola(expected = TRUE), data = gdata) coef(fit, matrix = TRUE) Coef(fit) head(fitted(fit)) summary(fit) } \keyword{models} \keyword{regression} % fit <- vglm(cbind(y1, y2) ~ x2, gammahyperbola, data = gdata, trace = TRUE, crit = "coef") VGAM/man/auuc.Rd0000644000176200001440000000200013565414527013007 0ustar liggesusers\name{auuc} \alias{auuc} \docType{data} \title{ Auckland University Undergraduate Counts Data} \description{ Undergraduate student enrolments at the University of Auckland in 1990. } \usage{data(auuc)} \format{ A data frame with 4 observations on the following 5 variables. \describe{ \item{Commerce}{a numeric vector of counts.} \item{Arts}{a numeric vector of counts.} \item{SciEng}{a numeric vector of counts.} \item{Law}{a numeric vector of counts.} \item{Medicine}{a numeric vector of counts.} } } \details{ Each student is cross-classified by their colleges (Science and Engineering have been combined) and the socio-economic status (SES) of their fathers (1 = highest, down to 4 = lowest). } \source{ Dr Tony Morrison. } \references{ Wild, C. J. and Seber, G. A. F. (2000) \emph{Chance Encounters: A First Course in Data Analysis and Inference}, New York: Wiley. } \examples{ auuc \dontrun{ round(fitted(grc(auuc))) round(fitted(grc(auuc, Rank = 2))) } } \keyword{datasets} VGAM/man/vglm-class.Rd0000644000176200001440000001654313565414527014143 0ustar liggesusers\name{vglm-class} \docType{class} \alias{vglm-class} \title{Class ``vglm'' } \description{ Vector generalized linear models. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{vglm(...)}. % ~~ describe objects here ~~ } \section{Slots}{ In the following, \eqn{M} is the number of linear predictors. \describe{ \item{\code{extra}:}{Object of class \code{"list"}; the \code{extra} argument on entry to \code{vglm}. This contains any extra information that might be needed by the family function. } \item{\code{family}:}{Object of class \code{"vglmff"}. The family function. } \item{\code{iter}:}{Object of class \code{"numeric"}. The number of IRLS iterations used. } \item{\code{predictors}:}{Object of class \code{"matrix"} with \eqn{M} columns which holds the \eqn{M} linear predictors. } \item{\code{assign}:}{Object of class \code{"list"}, from class \code{ "vlm"}. This named list gives information matching the columns and the (LM) model matrix terms. } \item{\code{call}:}{Object of class \code{"call"}, from class \code{ "vlm"}. The matched call. } \item{\code{coefficients}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. A named vector of coefficients. } \item{\code{constraints}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A named list of constraint matrices used in the fitting. } \item{\code{contrasts}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The contrasts used (if any). } \item{\code{control}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list of parameters for controlling the fitting process. See \code{\link{vglm.control}} for details. } \item{\code{criterion}:}{Object of class \code{"list"}, from class \code{ "vlm"}. List of convergence criterion evaluated at the final IRLS iteration. } \item{\code{df.residual}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The residual degrees of freedom. } \item{\code{df.total}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The total degrees of freedom. } \item{\code{dispersion}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The scaling parameter. } \item{\code{effects}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The effects. } \item{\code{fitted.values}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The fitted values. %This may be missing or consist entirely %of \code{NA}s, e.g., the Cauchy model. } \item{\code{misc}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A named list to hold miscellaneous parameters. } \item{\code{model}:}{Object of class \code{"data.frame"}, from class \code{ "vlm"}. The model frame. } \item{\code{na.action}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list holding information about missing values. } \item{\code{offset}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. If non-zero, a \eqn{M}-column matrix of offsets. } \item{\code{post}:}{Object of class \code{"list"}, from class \code{ "vlm"} where post-analysis results may be put. } \item{\code{preplot}:}{Object of class \code{"list"}, from class \code{ "vlm"} used by \code{\link{plotvgam}}; the plotting parameters may be put here. } \item{\code{prior.weights}:}{Object of class \code{"matrix"}, from class \code{ "vlm"} holding the initially supplied weights. } \item{\code{qr}:}{Object of class \code{"list"}, from class \code{ "vlm"}. QR decomposition at the final iteration. } \item{\code{R}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The \bold{R} matrix in the QR decomposition used in the fitting. } \item{\code{rank}:}{Object of class \code{"integer"}, from class \code{ "vlm"}. Numerical rank of the fitted model. } \item{\code{residuals}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The \emph{working} residuals at the final IRLS iteration. } \item{\code{ResSS}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. Residual sum of squares at the final IRLS iteration with the adjusted dependent vectors and weight matrices. } \item{\code{smart.prediction}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list of data-dependent parameters (if any) that are used by smart prediction. } \item{\code{terms}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The \code{\link[stats]{terms}} object used. } \item{\code{weights}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The weight matrices at the final IRLS iteration. This is in matrix-band form. } \item{\code{x}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The model matrix (LM, not VGLM). } \item{\code{xlevels}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The levels of the factors, if any, used in fitting. } \item{\code{y}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The response, in matrix form. } \item{\code{Xm2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{Ym2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{callXm2}:}{ Object of class \code{"call"}, from class \code{ "vlm"}. The matched call for argument \code{form2}. } } } \section{Extends}{ Class \code{"vlm"}, directly. } \section{Methods}{ \describe{ \item{cdf}{\code{signature(object = "vglm")}: cumulative distribution function. Applicable to, e.g., quantile regression and extreme value data models.} \item{deplot}{\code{signature(object = "vglm")}: Applicable to, e.g., quantile regression.} \item{deviance}{\code{signature(object = "vglm")}: deviance of the model (where applicable). } \item{plot}{\code{signature(x = "vglm")}: diagnostic plots. } \item{predict}{\code{signature(object = "vglm")}: extract the linear predictors or predict the linear predictors at a new data frame.} \item{print}{\code{signature(x = "vglm")}: short summary of the object. } \item{qtplot}{\code{signature(object = "vglm")}: quantile plot (only applicable to some models). } \item{resid}{\code{signature(object = "vglm")}: residuals. There are various types of these. } \item{residuals}{\code{signature(object = "vglm")}: residuals. Shorthand for \code{resid}. } \item{rlplot}{\code{signature(object = "vglm")}: return level plot. Useful for extreme value data models.} \item{summary}{\code{signature(object = "vglm")}: a more detailed summary of the object. } } } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %\url{http://www.stat.auckland.ac.nz/~yee} } \author{ Thomas W. Yee } %\note{ ~~further notes~~ } %~Make other sections like WARNING with \section{WARNING }{....} ~ \seealso{ \code{\link{vglm}}, \code{\link{vglmff-class}}, \code{\link{vgam-class}}. } \examples{ # Multinomial logit model pneumo <- transform(pneumo, let = log(exposure.time)) vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo) } \keyword{classes} VGAM/man/bellff.Rd0000644000176200001440000000374513565414527013325 0ustar liggesusers\name{bellff} \alias{bellff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bell Distribution Family Function } \description{ Estimating the shape parameter of the Bell distribution by maximum likelihood estimation. } \usage{ bellff(lshape = "loglink", zero = NULL, gshape = expm1(1.6 * ppoints(7))) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, zero, gshape}{ More information is at \code{\link{CommonVGAMffArguments}}. } } \details{ The Bell distribution has a probability density function that can be written \deqn{f(y;s) = \frac{s^y \exp(1 - e^s) B_y}{y!} }{% f(y;s) = s^y * exp(1 - exp(s)) * B_y / y!} for \eqn{y=0(1)\infty}{y=0(1)Inf} and shape parameter \eqn{0 0}{mu > 0}, \eqn{a > 0}{shape > 0} and \eqn{y > 0}. Here, \eqn{\Gamma(\cdot)}{gamma()} is the gamma function, as in \code{\link[base:Special]{gamma}}. The mean of \emph{Y} is \eqn{\mu=\mu}{mu=mu} (returned as the fitted values) with variance \eqn{\sigma^2 = \mu^2 / a}{sigma^2 = mu^2 / shape}. If \eqn{01}{shape>1} then the density is zero at the origin and is unimodal with mode at \eqn{y = \mu - \mu / a}{y = mu - mu / shape}; this can be achieved with \code{lshape="logloglink"}. By default, the two linear/additive predictors are \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and \eqn{\eta_2=\log(a)}{eta2=log(shape)}. This family function implements Fisher scoring and the working weight matrices are diagonal. This \pkg{VGAM} family function handles \emph{multivariate} responses, so that a matrix can be used as the response. The number of columns is the number of species, say, and \code{zero=-2} means that \emph{all} species have a shape parameter equalling a (different) intercept only. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ The parameterization of this \pkg{VGAM} family function is the 2-parameter gamma distribution described in the monograph McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ T. W. Yee } \note{ The response must be strictly positive. A moment estimator for the shape parameter may be implemented in the future. If \code{mu} and \code{shape} are vectors, then \code{rgamma(n = n, shape = shape, scale = mu/shape)} will generate random gamma variates of this parameterization, etc.; see \code{\link[stats]{GammaDist}}. % For \code{\link{cqo}} and \code{\link{cao}}, taking the logarithm % of the response means (approximately) a \code{\link{gaussianff}} family % may be used on the transformed data. } \seealso{ \code{\link{gamma1}} for the 1-parameter gamma distribution, \code{\link{gammaR}} for another parameterization of the 2-parameter gamma distribution that is directly matched with \code{\link[stats]{rgamma}}, \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution, \code{\link{expexpff}}, \code{\link[stats]{GammaDist}}, \code{\link{gordlink}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}, \code{\link{negloglink}}. } \examples{ # Essentially a 1-parameter gamma gdata <- data.frame(y = rgamma(n = 100, shape = exp(1))) fit1 <- vglm(y ~ 1, gamma1, data = gdata) fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef") coef(fit2, matrix = TRUE) c(Coef(fit2), colMeans(gdata)) # Essentially a 2-parameter gamma gdata <- data.frame(y = rgamma(n = 500, rate = exp(-1), shape = exp(2))) fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef") coef(fit2, matrix = TRUE) c(Coef(fit2), colMeans(gdata)) summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/is.zero.Rd0000644000176200001440000000265013565414527013456 0ustar liggesusers\name{is.zero} \alias{is.zero} \alias{is.zero.matrix} \alias{is.zero.vglm} \title{Zero Constraint Matrices} \description{ Returns a logical vector from a test of whether an object such as a matrix or VGLM object corresponds to a 'zero' assumption. } \usage{ is.zero.matrix(object, \dots) is.zero.vglm(object, \dots) } \arguments{ \item{object}{ an object such as a coefficient matrix of a \code{\link{vglm}} object, or a \code{\link{vglm}} object. } \item{\dots}{ additional optional arguments. Currently unused. } } \details{ These functions test the effect of the \code{zero} argument on a \code{\link{vglm}} object or the coefficient matrix of a \code{\link{vglm}} object. The latter is obtained by \code{coef(vglmObject, matrix = TRUE)}. } \value{ A vector of logicals, testing whether each linear/additive predictor has the \code{zero} argument applied to it. It is \code{TRUE} if that linear/additive predictor is intercept-only, i.e., all other regression coefficients are set to zero. No checking is done for the intercept term at all, i.e., that it was estimated in the first place. } \seealso{ \code{\link{constraints}}, \code{\link{vglm}}. } \examples{ coalminers <- transform(coalminers, Age = (age - 42) / 5) fit <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers) is.zero(fit) is.zero(coef(fit, matrix = TRUE)) } \keyword{models} \keyword{regression} VGAM/man/machinists.Rd0000644000176200001440000000311113565414527014220 0ustar liggesusers\name{machinists} \alias{machinists} \docType{data} \title{ Machinists Accidents } \description{ A small count data set involving 414 machinists from a three months study, of accidents around the end of WWI. } \usage{ data(machinists) } \format{ A data frame with the following variables. \describe{ \item{accidents}{ The number of accidents } \item{ofreq}{ Observed frequency, i.e., the number of machinists with that many accidents } } } \details{ The data was collected over a period of three months. There were 414 machinists in total. Also, there were data collected over six months, but it is not given here. } \source{ Incidence of Industrial Accidents. Report No. 4 (Industrial Fatigue Research Board), Stationery Office, London, 1919. } \references{ Greenwood, M. and Yule, G. U. (1920). An Inquiry into the Nature of Frequency Distributions Representative of Multiple Happenings with Particular Reference to the Occurrence of Multiple Attacks of Disease or of Repeated Accidents. \emph{Journal of the Royal Statistical Society}, \bold{83}, 255--279. } \seealso{ \code{\link[VGAM]{negbinomial}}, \code{\link[VGAM]{poissonff}}. } \examples{ machinists mean(with(machinists, rep(accidents, times = ofreq))) var(with(machinists, rep(accidents, times = ofreq))) \dontrun{ barplot(with(machinists, ofreq), names.arg = as.character(with(machinists, accidents)), main = "Machinists accidents", col = "lightblue", las = 1, ylab = "Frequency", xlab = "accidents") } } \keyword{datasets} % % VGAM/man/otlogUC.Rd0000644000176200001440000000432513565414527013442 0ustar liggesusers\name{Otlog} \alias{Otlog} \alias{dotlog} \alias{potlog} \alias{qotlog} \alias{rotlog} \title{ One-truncated Logarithmic Distribution } \description{ Density, distribution function, quantile function, and random generation for the one-truncated logarithmic distribution. } \usage{ dotlog(x, shape, log = FALSE) potlog(q, shape, log.p = FALSE) qotlog(p, shape) rotlog(n, shape) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{ Vector of quantiles. For the density, it should be a vector with integer values \eqn{> 1} in order for the probabilities to be positive. } \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{shape}{ The parameter value \eqn{c} described in in \code{\link{logff}}. Here it is called \code{shape} because \eqn{0 1} then the length is taken to be the number required.} \item{shape1.a}{shape parameter.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dfisk} gives the density, \code{pfisk} gives the distribution function, \code{qfisk} gives the quantile function, and \code{rfisk} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{fisk}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The Fisk distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{fisk}}, \code{\link{genbetaII}}. } \examples{ fdata <- data.frame(y = rfisk(n = 1000, shape = exp(1), scale = exp(2))) fit <- vglm(y ~ 1, fisk(lss = FALSE), data = fdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) } \keyword{distribution} VGAM/man/vglm.control.Rd0000644000176200001440000002660613565414527014520 0ustar liggesusers\name{vglm.control} \alias{vglm.control} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Control Function for vglm() } \description{ Algorithmic constants and parameters for running \code{vglm} are set using this function. } \usage{ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE, criterion = names(.min.criterion.VGAM), epsilon = 1e-07, half.stepsizing = TRUE, maxit = 30, noWarning = FALSE, stepsize = 1, save.weights = FALSE, trace = FALSE, wzepsilon = .Machine$double.eps^0.75, xij = NULL, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{checkwz}{ logical indicating whether the diagonal elements of the working weight matrices should be checked whether they are sufficiently positive, i.e., greater than \code{wzepsilon}. If not, any values less than \code{wzepsilon} are replaced with this value. } \item{Check.rank}{ logical indicating whether the rank of the VLM matrix should be checked. If this is not of full column rank then the results are not to be trusted. The default is to give an error message if the VLM matrix is not of full column rank. } \item{Check.cm.rank}{ logical indicating whether the rank of each constraint matrix should be checked. If this is not of full column rank then an error will occur. Under no circumstances should any constraint matrix have a rank less than the number of columns. } \item{criterion}{ character variable describing what criterion is to be used to test for convergence. The possibilities are listed in \code{.min.criterion.VGAM}, but most family functions only implement a few of these. } \item{epsilon}{ positive convergence tolerance epsilon. Roughly speaking, the Newton-Raphson/Fisher-scoring iterations are assumed to have converged when two successive \code{criterion} values are within \code{epsilon} of each other. } \item{half.stepsizing}{ logical indicating if half-stepsizing is allowed. For example, in maximizing a log-likelihood, if the next iteration has a log-likelihood that is less than the current value of the log-likelihood, then a half step will be taken. If the log-likelihood is still less than at the current position, a quarter-step will be taken etc. Eventually a step will be taken so that an improvement is made to the convergence criterion. \code{half.stepsizing} is ignored if \code{criterion == "coefficients"}. } \item{maxit}{ maximum number of (usually Fisher-scoring) iterations allowed. Sometimes Newton-Raphson is used. } \item{noWarning}{ logical indicating whether to suppress a warning if convergence is not obtained within \code{maxit} iterations. This is ignored if \code{maxit = 1} is set. } \item{stepsize}{ usual step size to be taken between each Newton-Raphson/Fisher-scoring iteration. It should be a value between 0 and 1, where a value of unity corresponds to an ordinary step. A value of 0.5 means half-steps are taken. Setting a value near zero will cause convergence to be generally slow but may help increase the chances of successful convergence for some family functions. } \item{save.weights}{ logical indicating whether the \code{weights} slot of a \code{"vglm"} object will be saved on the object. If not, it will be reconstructed when needed, e.g., \code{summary}. Some family functions have \code{save.weights = TRUE} and others have \code{save.weights = FALSE} in their control functions. } \item{trace}{ logical indicating if output should be produced for each iteration. Setting \code{trace = TRUE} is recommended in general because \pkg{VGAM} fits a very broad variety of models and distributions, and for some of them, convergence is intrinsically more difficult. Monitoring convergence can help check that the solution is reasonable or that a problem has occurred. It may suggest better initial values are needed, the making of invalid assumptions, or that the model is inappropriate for the data, etc. } \item{wzepsilon}{ small positive number used to test whether the diagonals of the working weight matrices are sufficiently positive. } \item{xij}{ A formula or a list of formulas. Each formula has a RHS giving \eqn{M} terms making up a covariate-dependent term (whose name is the response). That is, it creates a variable that takes on different values for each linear/additive predictor, e.g., the ocular pressure of each eye. The \eqn{M} terms must be unique; use \code{\link{fill1}}, \code{fill2}, \code{fill3}, etc. if necessary. Each formula should have a response which is taken as the name of that variable, and the \eqn{M} terms are enumerated in sequential order. Each of the \eqn{M} terms multiply each successive row of the constraint matrix. When \code{xij} is used, the use of \code{form2} is also required to give \emph{every} term used by the model. The function \code{\link{Select}} can be used to select variables beginning with the same character string. } % \item{jix}{ % A formula or a list of formulas specifying % which explanatory variables are to be plotted for each \code{xij} term. % For example, in the code below, % the term \code{BS(dumm)} could be plotted against either % \code{dum1} or \code{dum2}, therefore % either \code{jix=dum1} or \code{jix=dum2} are ok. % This argument is made use of by \code{plotvgam()}. % Each formula has a RHS giving \eqn{r_k} unique terms, % one for each column of the constraint matrix. % Each formula should have a response that matches the % \code{formula} argument. % The argument \code{jix} is a reversal of \code{xij} to emphasize % the same framework for handling terms involving covariates that have % different values for each linear/additive predictor. % % } \item{\dots}{ other parameters that may be picked up from control functions that are specific to the \pkg{VGAM} family function. } } \details{ Most of the control parameters are used within \code{vglm.fit} and you will have to look at that to understand the full details. Setting \code{save.weights = FALSE} is useful for some models because the \code{weights} slot of the object is the largest and so less memory is used to store the object. However, for some \pkg{VGAM} family function, it is necessary to set \code{save.weights = TRUE} because the \code{weights} slot cannot be reconstructed later. } \value{ A list with components matching the input names. A little error checking is done, but not much. The list is assigned to the \code{control} slot of \code{vglm} objects. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee} \note{ Reiterating from above, setting \code{trace = TRUE} is recommended in general. In Example 2 below there are two covariates that have linear/additive predictor specific values. These are handled using the \code{xij} argument. } \section{Warning}{ For some applications the default convergence criterion should be tightened. Setting something like \code{criterion = "coef", epsilon = 1e-09} is one way to achieve this, and also add \code{trace = TRUE} to monitor the convergence. Setting \code{maxit} to some higher number is usually not needed, and needing to do so suggests something is wrong, e.g., an ill-conditioned model, over-fitting or under-fitting. } \seealso{ \code{\link{vglm}}, \code{\link{fill1}}. The author's homepage has further documentation about the \code{xij} argument; see also \code{\link{Select}}. } \examples{ # Example 1. pneumo <- transform(pneumo, let = log(exposure.time)) vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo, crit = "coef", step = 0.5, trace = TRUE, epsil = 1e-8, maxit = 40) # Example 2. The use of the xij argument (simple case). ymat <- rdiric(n <- 1000, shape = rep(exp(2), len = 4)) mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n), z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n)) mydat <- transform(mydat, X = x1, Z = z1) mydat <- round(mydat, digits = 2) fit2 <- vglm(ymat ~ X + Z, dirichlet(parallel = TRUE), data = mydat, trace = TRUE, xij = list(Z ~ z1 + z2 + z3 + z4, X ~ x1 + x2 + x3 + x4), form2 = ~ Z + z1 + z2 + z3 + z4 + X + x1 + x2 + x3 + x4) head(model.matrix(fit2, type = "lm")) # LM model matrix head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix coef(fit2) coef(fit2, matrix = TRUE) max(abs(predict(fit2)-predict(fit2, new = mydat))) # Predicts correctly summary(fit2) \dontrun{ # plotvgam(fit2, se = TRUE, xlab = "x1", which.term = 1) # Bug! # plotvgam(fit2, se = TRUE, xlab = "z1", which.term = 2) # Bug! plotvgam(fit2, xlab = "x1") # Correct plotvgam(fit2, xlab = "z1") # Correct } # Example 3. The use of the xij argument (complex case). set.seed(123) coalminers <- transform(coalminers, Age = (age - 42) / 5, dum1 = round(runif(nrow(coalminers)), digits = 2), dum2 = round(runif(nrow(coalminers)), digits = 2), dum3 = round(runif(nrow(coalminers)), digits = 2), dumm = round(runif(nrow(coalminers)), digits = 2)) BS <- function(x, ..., df = 3) sm.bs(c(x,...), df = df)[1:length(x),,drop = FALSE] NS <- function(x, ..., df = 3) sm.ns(c(x,...), df = df)[1:length(x),,drop = FALSE] # Equivalently... BS <- function(x, ..., df = 3) head(sm.bs(c(x,...), df = df), length(x), drop = FALSE) NS <- function(x, ..., df = 3) head(sm.ns(c(x,...), df = df), length(x), drop = FALSE) fit3 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1, dum2), fam = binom2.or(exchangeable = TRUE, zero = 3), xij = list(NS(dum1, dum2) ~ NS(dum1, dum2) + NS(dum2, dum1) + fill(NS( dum1))), form2 = ~ NS(dum1, dum2) + NS(dum2, dum1) + fill(NS(dum1)) + dum1 + dum2 + dum3 + Age + age + dumm, data = coalminers, trace = TRUE) head(model.matrix(fit3, type = "lm")) # LM model matrix head(model.matrix(fit3, type = "vlm")) # Big VLM model matrix coef(fit3) coef(fit3, matrix = TRUE) \dontrun{ plotvgam(fit3, se = TRUE, lcol = "red", scol = "blue", xlab = "dum1") } } \keyword{models} \keyword{regression} % zz 20090506 put elsewhere: % % %# Example 4. The use of the xij argument (complex case). %# Here is one method to handle the xij argument with a term that %# produces more than one column in the model matrix. %# The constraint matrix for 'op' has one column. %POLY3 <- function(x, ...) { % # A cubic; ensures that the basis functions are the same. % poly(c(x,...), 3)[1:length(x),] %} % %\dontrun{ %fit4 <- vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE, % fam = binom2.or(exchangeable=TRUE, zero=3), data=eyesdata, % xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) + % fill(POLY3(lop,rop))), % form2 = ~ POLY3(op) + POLY3(lop,rop) + POLY3(rop,lop) + % fill(POLY3(lop,rop))) %coef(fit4) %coef(fit4, matrix=TRUE) %head(predict(fit4)) %} VGAM/man/tikuv.Rd0000644000176200001440000000761113565414527013231 0ustar liggesusers\name{tikuv} \alias{tikuv} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Short-tailed Symmetric Distribution Family Function } \description{ Fits the short-tailed symmetric distribution of Tiku and Vaughan (1999). } \usage{ tikuv(d, lmean = "identitylink", lsigma = "loglink", isigma = NULL, zero = "sigma") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{d}{ The \eqn{d} parameter. It must be a single numeric value less than 2. Then \eqn{h = 2-d>0} is another parameter. } \item{lmean, lsigma}{ Link functions for the mean and standard deviation parameters of the usual univariate normal distribution (see \bold{Details} below). They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively. See \code{\link{Links}} for more choices. } % \item{emean, esigma}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. %emean = list(), esigma = list(), % % } \item{isigma}{ Optional initial value for \eqn{\sigma}{sigma}. A \code{NULL} means a value is computed internally. } \item{zero}{ A vector specifying which linear/additive predictors are modelled as intercept-only. The values can be from the set \{1,2\}, corresponding respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}. If \code{zero = NULL} then all linear/additive predictors are modelled as a linear combination of the explanatory variables. For many data sets having \code{zero = 2} is a good idea. See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The short-tailed symmetric distribution of Tiku and Vaughan (1999) has a probability density function that can be written \deqn{f(y) = \frac{K}{\sqrt{2\pi} \sigma} \left[ 1 + \frac{1}{2h} \left( \frac{y-\mu}{\sigma} \right)^2 \right]^2 \exp\left( -\frac12 (y-\mu)^2 / \sigma^2 \right) }{% f(y) = (K/(sqrt(2*pi)*sigma)) * [1 + (1/(2*h)) * ((y-mu)/sigma)^2]^2 * exp( -0.5 * (y-mu)^2/ sigma^2) } where \eqn{h=2-d>0}, \eqn{K} is a function of \eqn{h}, \eqn{-\infty < y < \infty}{-Inf < y < Inf}, \eqn{\sigma > 0}{sigma > 0}. The mean of \eqn{Y} is \eqn{E(Y) = \mu}{E(Y) = mu} and this is returned as the fitted values. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Akkaya, A. D. and Tiku, M. L. (2008) Short-tailed distributions and inliers. \emph{Test}, \bold{17}, 282--296. Tiku, M. L. and Vaughan, D. C. (1999) A family of short-tailed symmetric distributions. \emph{Technical report, McMaster University, Canada}. } \author{ Thomas W. Yee } \note{ The density function is the product of a univariate normal density and a polynomial in the response \eqn{y}. The distribution is bimodal if \eqn{d>0}, else is unimodal. A normal distribution arises as the limit as \eqn{d} approaches \eqn{-\infty}{-Inf}, i.e., as \eqn{h} approaches \eqn{\infty}{Inf}. Fisher scoring is implemented. After fitting the value of \code{d} is stored in \code{@misc} with component name \code{d}. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned, e.g., when \eqn{d} is very close to 2 or approaches \code{-Inf}. } \seealso{ \code{\link{dtikuv}}, \code{\link{uninormal}}. } \examples{ m <- 1.0; sigma <- exp(0.5) tdata <- data.frame(y = rtikuv(n = 1000, d = 1, m = m, s = sigma)) tdata <- transform(tdata, sy = sort(y)) fit <- vglm(y ~ 1, tikuv(d = 1), data = tdata, trace = TRUE) coef(fit, matrix = TRUE) (Cfit <- Coef(fit)) with(tdata, mean(y)) \dontrun{ with(tdata, hist(y, prob = TRUE)) lines(dtikuv(sy, d = 1, m = Cfit[1], s = Cfit[2]) ~ sy, data = tdata, col = "orange") } } \keyword{models} \keyword{regression} VGAM/man/clo.Rd0000644000176200001440000000264613565414527012647 0ustar liggesusers\name{clo} \alias{clo} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Redirects the User to rrvglm() } \description{ Redirects the user to the function \code{\link{rrvglm}}. } \usage{ clo(...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{\dots}{ Ignored. } } \details{ CLO stands for \emph{constrained linear ordination}, and is fitted with a statistical class of models called \emph{reduced-rank vector generalized linear models} (RR-VGLMs). It allows for generalized reduced-rank regression in that response types such as Poisson counts and presence/absence data can be handled. Currently in the \pkg{VGAM} package, \code{\link{rrvglm}} is used to fit RR-VGLMs. However, the Author's opinion is that linear responses to a latent variable (composite environmental gradient) is not as common as unimodal responses, therefore \code{\link{cqo}} is often more appropriate. The new CLO/CQO/CAO nomenclature described in Yee (2006). } \value{ Nothing is returned; an error message is issued. } \references{ Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{Thomas W. Yee} \seealso{ \code{\link{rrvglm}}, \code{\link{cqo}}. } \examples{ \dontrun{ clo() } } \keyword{models} \keyword{regression} VGAM/man/nakagamiUC.Rd0000644000176200001440000000515213565414527014065 0ustar liggesusers\name{Nakagami} \alias{Nakagami} \alias{dnaka} \alias{pnaka} \alias{qnaka} \alias{rnaka} \title{Nakagami Distribution } \description{ Density, cumulative distribution function, quantile function and random generation for the Nakagami distribution. } \usage{ dnaka(x, scale = 1, shape, log = FALSE) pnaka(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qnaka(p, scale = 1, shape, ...) rnaka(n, scale = 1, shape, Smallno = 1.0e-6) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. % Must be a positive integer of length 1. } \item{scale, shape}{ arguments for the parameters of the distribution. See \code{\link{nakagami}} for more details. For \code{rnaka}, arguments \code{shape} and \code{scale} must be of length 1. } \item{Smallno}{ Numeric, a small value used by the rejection method for determining the upper limit of the distribution. That is, \code{pnaka(U) > 1-Smallno} where \code{U} is the upper limit. } \item{\ldots}{ Arguments that can be passed into \code{\link[stats]{uniroot}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dnaka} gives the density, \code{pnaka} gives the cumulative distribution function, \code{qnaka} gives the quantile function, and \code{rnaka} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{nakagami}} for more details. } %\note{ % %} \seealso{ \code{\link{nakagami}}. } \examples{ \dontrun{ x <- seq(0, 3.2, len = 200) plot(x, dgamma(x, shape = 1), type = "n", col = "black", ylab = "", ylim = c(0,1.5), main = "dnaka(x, shape = shape)") lines(x, dnaka(x, shape = 1), col = "orange") lines(x, dnaka(x, shape = 2), col = "blue") lines(x, dnaka(x, shape = 3), col = "green") legend(2, 1.0, col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("shape =", c(1, 2, 3))) plot(x, pnorm(x), type = "n", col = "black", ylab = "", ylim = 0:1, main = "pnaka(x, shape = shape)") lines(x, pnaka(x, shape = 1), col = "orange") lines(x, pnaka(x, shape = 2), col = "blue") lines(x, pnaka(x, shape = 3), col = "green") legend(2, 0.6, col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("shape =", c(1, 2, 3))) } probs <- seq(0.1, 0.9, by = 0.1) pnaka(qnaka(p = probs, shape = 2), shape = 2) - probs # Should be all 0 } \keyword{distribution} VGAM/man/zeta.Rd0000644000176200001440000001073013565414527013026 0ustar liggesusers\name{zeta} \alias{zeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Riemann's Zeta Function } \description{ Computes Riemann's zeta function and its first two derivatives. Also can compute the Hurwitz zeta function. } \usage{ zeta(x, deriv = 0, shift = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A complex-valued vector/matrix whose real values must be \eqn{\geq 1}{>= 1}. Otherwise, \code{x} may be real. It is called \eqn{s} below. If \code{deriv} is 1 or 2 then \code{x} must be real and positive. } \item{deriv}{ An integer equalling 0 or 1 or 2, which is the order of the derivative. The default means it is computed ordinarily. } \item{shift}{ Positive and numeric, called \eqn{A} below. Allows for the Hurwitz zeta to be returned. The default corresponds to the Riemann formula. } } \details{ The (Riemann) formula for real \eqn{s} is \deqn{\sum_{n=1}^{\infty} 1 / n^s.}{% sum_{n=1}^Inf 1 / n^s.} While the usual definition involves an infinite series that converges when the real part of the argument is \eqn{> 1}, more efficient methods have been devised to compute the value. In particular, this function uses Euler-Maclaurin summation. Theoretically, the zeta function can be computed over the whole complex plane because of analytic continuation. The (Riemann) formula used here for analytic continuation is \deqn{\zeta(s) = 2^s \pi^{s-1} \sin(\pi s/2) \Gamma(1-s) \zeta(1-s).}{% zeta(s) = 2^s * pi^(s-1) * sin(pi*s/2) * gamma(1-s) * zeta(1-s).} This is actually one of several formulas, but this one was discovered by Riemann himself and is called the \emph{functional equation}. The Hurwitz zeta function for real \eqn{s > 0} is \deqn{\sum_{n=0}^{\infty} 1 / (A + n)^s.}{% sum_{n=0}^Inf 1 / (A + n)^s.} where \eqn{0 < A} is known here as the \code{shift}. Since \eqn{A=1} by default, this function will therefore return Riemann's zeta function by default. Currently derivatives are unavailable. } \section{Warning}{ This function has not been fully tested, especially the derivatives. In particular, analytic continuation does not work here for complex \code{x} with \code{Re(x)<1} because currently the \code{\link[base:Special]{gamma}} function does not handle complex arguments. } \value{ The default is a vector/matrix of computed values of Riemann's zeta function. If \code{shift} contains values not equal to 1, then this is Hurwitz's zeta function. % The derivative is attached as an attribute zz. } \references{ Riemann, B. (1859) Ueber die Anzahl der Primzahlen unter einer gegebenen Grosse. \emph{Monatsberichte der Berliner Akademie, November 1859}. Edwards, H. M. (1974) \emph{Riemann's Zeta Function}. Academic Press: New York. Markman, B. (1965) The Riemann zeta function. \emph{BIT}, \bold{5}, 138--141. Abramowitz, M. and Stegun, I. A. (1972) \emph{Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables}, New York: Dover Publications Inc. } \author{ T. W. Yee, with the help of Garry J. Tee. } \note{ Estimation of the parameter of the zeta distribution can be achieved with \code{\link{zetaff}}. } \seealso{ \code{\link{zetaff}}, \code{\link{oazeta}}, \code{\link{oizeta}}, \code{\link{otzeta}}, \code{\link{lerch}}, \code{\link[base:Special]{gamma}}. } \examples{ zeta(2:10) \dontrun{ curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange", las = 1, main = expression({zeta}(x))) curve(zeta, 1.2, 12, add = TRUE, col = "orange") abline(v = 0, h = c(0, 1), lty = "dashed", col = "gray") curve(zeta, -14, -0.4, col = "orange", main = expression({zeta}(x))) abline(v = 0, h = 0, lty = "dashed", col = "gray") # Close up plot x <- seq(0.04, 0.8, len = 100) # Plot of the first derivative plot(x, zeta(x, deriv = 1), type = "l", las = 1, col = "blue", xlim = c(0.04, 3), ylim = c(-6, 0), main = "zeta'(x)") x <- seq(1.2, 3, len = 100) lines(x, zeta(x, deriv = 1), col = "blue") abline(v = 0, h = 0, lty = "dashed", col = "gray") } zeta(2) - pi^2 / 6 # Should be 0 zeta(4) - pi^4 / 90 # Should be 0 zeta(6) - pi^6 / 945 # Should be 0 zeta(8) - pi^8 / 9450 # Should be 0 zeta(0, deriv = 1) + 0.5 * log(2*pi) # Should be 0 } \keyword{math} % curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange") % curve(zeta, 1.2, 12, add = TRUE, col = "orange") % abline(v = 0, h = c(0,1), lty = "dashed") VGAM/man/felix.Rd0000644000176200001440000000323213565414527013171 0ustar liggesusers\name{felix} \alias{felix} %- Also NEED an '\alias' for EACH other topic documented here. \title{Felix Distribution Family Function} \description{ Estimates the parameter of a Felix distribution by maximum likelihood estimation. } \usage{ felix(lrate = extlogitlink(min = 0, max = 0.5), imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lrate}{ Link function for the parameter, called \eqn{a} below; see \code{\link{Links}} for more choices and for general information. } \item{imethod}{ See \code{\link{CommonVGAMffArguments}}. Valid values are 1, 2, 3 or 4. } } \details{ The Felix distribution is an important basic Lagrangian distribution. The density function is \deqn{f(y;a) = \frac{ 1 }{((y-1)/2)!} y^{(y-3)/2} a^{(y-1)/2} \exp(-ay) }{% f(y;a) = (1 / ((y-1)/2)!) * y^((y-3)/2) * a^((y-1)/2) * exp(-ay)} where \eqn{y=1,3,5,\ldots} and \eqn{0 < a < 0.5}. The mean is \eqn{1/(1-2a)} (returned as the fitted values). Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Consul, P. C. and Famoye, F. (2006) \emph{Lagrangian Probability Distributions}, Boston, USA: Birkhauser. } \author{ T. W. Yee } %\note{ % %} \seealso{ \code{\link{dfelix}}, \code{\link{borel.tanner}}. } \examples{ fdata <- data.frame(y = 2 * rpois(n = 200, 1) + 1) # Not real data! fit <- vglm(y ~ 1, felix, data = fdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/foldnormal.Rd0000644000176200001440000001131213565414527014215 0ustar liggesusers\name{foldnormal} \alias{foldnormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Folded Normal Distribution Family Function } \description{ Fits a (generalized) folded (univariate) normal distribution. } \usage{ foldnormal(lmean = "identitylink", lsd = "loglink", imean = NULL, isd = NULL, a1 = 1, a2 = 1, nsimEIM = 500, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean, lsd}{ Link functions for the mean and standard deviation parameters of the usual univariate normal distribution. They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively. See \code{\link{Links}} for more choices. } % \item{emean, esd}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % emean=list(), esd=list(), % } \item{imean, isd}{ Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}. A \code{NULL} means a value is computed internally. See \code{\link{CommonVGAMffArguments}}. } \item{a1, a2}{ Positive weights, called \eqn{a_1}{a1} and \eqn{a_2}{a2} below. Each must be of length 1. } \item{nsimEIM, imethod, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ If a random variable has an ordinary univariate normal distribution then the absolute value of that random variable has an ordinary \emph{folded normal distribution}. That is, the sign has not been recorded; only the magnitude has been measured. More generally, suppose \eqn{X} is normal with mean \code{mean} and standard deviation \code{sd}. Let \eqn{Y=\max(a_1 X, -a_2 X)}{Y=max(a1*X, -a2*X)} where \eqn{a_1}{a1} and \eqn{a_2}{a2} are positive weights. This means that \eqn{Y = a_1 X}{Y = a1*X} for \eqn{X > 0}, and \eqn{Y = a_2 X}{Y = a2*X} for \eqn{X < 0}. Then \eqn{Y} is said to have a \emph{generalized folded normal distribution}. The ordinary folded normal distribution corresponds to the special case \eqn{a_1 = a_2 = 1}{a1 = a2 = 1}. The probability density function of the ordinary folded normal distribution can be written \code{dnorm(y, mean, sd) + dnorm(y, -mean, sd)} for \eqn{y \ge 0}. By default, \code{mean} and \code{log(sd)} are the linear/additive predictors. Having \code{mean=0} and \code{sd=1} results in the \emph{half-normal} distribution. The mean of an ordinary folded normal distribution is \deqn{E(Y) = \sigma \sqrt{2/\pi} \exp(-\mu^2/(2\sigma^2)) + \mu [1-2\Phi(-\mu/\sigma)] }{% E(Y) = sigma*sqrt(2/pi)*exp(-mu^2/(2*sigma^2)) + mu*[1-2*Phi(-mu/sigma)] } and these are returned as the fitted values. Here, \eqn{\Phi()}{Phi} is the cumulative distribution function of a standard normal (\code{\link[stats:Normal]{pnorm}}). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Lin, P. C. (2005) Application of the generalized folded-normal distribution to the process capability measures. \emph{International Journal of Advanced Manufacturing Technology}, \bold{26}, 825--830. Johnson, N. L. (1962) The folded normal distribution: accuracy of estimation by maximum likelihood. \emph{Technometrics}, \bold{4}, 249--256. } \author{ Thomas W. Yee } \note{ The response variable for this family function is the same as \code{\link{uninormal}} except positive values are required. Reasonably good initial values are needed. Fisher scoring using simulation is implemented. See \code{\link{CommonVGAMffArguments}} for general information about many of these arguments. Yet to do: implement the results of Johnson (1962) which gives expressions for the EIM, albeit, under a different parameterization. Also, one element of the EIM appears to require numerical integration. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. It is recommended that several different initial values be used to help avoid local solutions. } \seealso{ \code{\link{rfoldnorm}}, \code{\link{uninormal}}, \code{\link[stats:Normal]{dnorm}}, \code{\link{skewnormal}}. } \examples{ \dontrun{ m <- 2; SD <- exp(1) fdata <- data.frame(y = rfoldnorm(n <- 1000, m = m, sd = SD)) hist(with(fdata, y), prob = TRUE, main = paste("foldnormal(m = ", m, ", sd = ", round(SD, 2), ")")) fit <- vglm(y ~ 1, foldnormal, data = fdata, trace = TRUE) coef(fit, matrix = TRUE) (Cfit <- Coef(fit)) # Add the fit to the histogram: mygrid <- with(fdata, seq(min(y), max(y), len = 200)) lines(mygrid, dfoldnorm(mygrid, Cfit[1], Cfit[2]), col = "orange") } } \keyword{models} \keyword{regression} VGAM/man/nakagami.Rd0000644000176200001440000000711013565414527013631 0ustar liggesusers\name{nakagami} \alias{nakagami} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Nakagami Regression Family Function } \description{ Estimation of the two parameters of the Nakagami distribution by maximum likelihood estimation. } \usage{ nakagami(lscale = "loglink", lshape = "loglink", iscale = 1, ishape = NULL, nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? } \item{lscale, lshape}{ Parameter link functions applied to the \emph{scale} and \emph{shape} parameters. Log links ensure they are positive. See \code{\link{Links}} for more choices and information. } \item{iscale, ishape}{ Optional initial values for the shape and scale parameters. For \code{ishape}, a \code{NULL} value means it is obtained in the \code{initialize} slot based on the value of \code{iscale}. For \code{iscale}, assigning a \code{NULL} means a value is obtained in the \code{initialize} slot, however, setting another numerical value is recommended if convergence fails or is too slow. } } \details{ The Nakagami distribution, which is useful for modelling wireless systems such as radio links, can be written \deqn{f(y) = 2 (shape/scale)^{shape} y^{2 \times shape-1} \exp(-shape \times y^2/scale) / \Gamma(shape)}{% 2 * (shape/scale)^shape * y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape)} for \eqn{y > 0}, \eqn{shape > 0}, \eqn{scale > 0}. The mean of \eqn{Y} is \eqn{\sqrt{scale/shape} \times \Gamma(shape+0.5) / \Gamma(shape)}{sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)} and these are returned as the fitted values. By default, the linear/additive predictors are \eqn{\eta_1=\log(scale)}{eta1=log(scale)} and \eqn{\eta_2=\log(shape)}{eta2=log(shape)}. Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Nakagami, M. (1960) The \emph{m}-distribution: a general formula of intensity distribution of rapid fading, pp.3--36 in: \emph{Statistical Methods in Radio Wave Propagation}. W. C. Hoffman, Ed., New York: Pergamon. } \author{ T. W. Yee } \note{ The Nakagami distribution is also known as the Nakagami-\emph{m} distribution, where \eqn{m=shape} here. Special cases: \eqn{m=0.5} is a one-sided Gaussian distribution and \eqn{m=1} is a Rayleigh distribution. The second moment is \eqn{E(Y^2)=m}. If \eqn{Y} has a Nakagami distribution with parameters \emph{shape} and \emph{scale} then \eqn{Y^2} has a gamma distribution with shape parameter \emph{shape} and scale parameter \emph{scale/shape}. } \seealso{ \code{\link{rnaka}}, \code{\link{gamma2}}, \code{\link{rayleigh}}. } \examples{ nn <- 1000; shape <- exp(0); Scale <- exp(1) ndata <- data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape))) nfit <- vglm(y1 ~ 1, nakagami, data = ndata, trace = TRUE, crit = "coef") ndata <- transform(ndata, y2 = rnaka(nn, scale = Scale, shape = shape)) nfit <- vglm(y2 ~ 1, nakagami(iscale = 3), data = ndata, trace = TRUE) head(fitted(nfit)) with(ndata, mean(y2)) coef(nfit, matrix = TRUE) (Cfit <- Coef(nfit)) \dontrun{ sy <- with(ndata, sort(y2)) hist(with(ndata, y2), prob = TRUE, main = "", xlab = "y", ylim = c(0, 0.6), col = "lightblue") lines(dnaka(sy, scale = Cfit["scale"], shape = Cfit["shape"]) ~ sy, data = ndata, col = "orange") } } \keyword{models} \keyword{regression} VGAM/man/qtplot.gumbel.Rd0000644000176200001440000000760613565414527014670 0ustar liggesusers\name{qtplot.gumbel} \alias{qtplot.gumbel} \alias{qtplot.gumbelff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Quantile Plot for Gumbel Regression } \description{ Plots quantiles associated with a Gumbel model. } \usage{ qtplot.gumbel(object, show.plot = TRUE, y.arg = TRUE, spline.fit = FALSE, label = TRUE, R = object@misc$R, percentiles = object@misc$percentiles, add.arg = FALSE, mpv = object@misc$mpv, xlab = NULL, ylab = "", main = "", pch = par()$pch, pcol.arg = par()$col, llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd, tcol.arg = par()$col, tadj = 1, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} extremes model of the Gumbel type, produced by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}, and with a family function that is either \code{\link{gumbel}} or \code{\link{gumbelff}}. } \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will be done. } \item{y.arg}{ Logical. Add the raw data on to the plot? } \item{spline.fit}{ Logical. Use a spline fit through the fitted percentiles? This can be useful if there are large gaps between some values along the covariate. } \item{label}{ Logical. Label the percentiles? } \item{R}{ See \code{\link{gumbel}}. } \item{percentiles}{ See \code{\link{gumbel}}. } \item{add.arg}{ Logical. Add the plot to an existing plot? } \item{mpv}{ See \code{\link{gumbel}}. } \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. } \item{main}{ Title of the plot. See \code{\link[graphics]{title}}. } \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. } \item{pcol.arg}{ Color of the points. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{llty.arg}{ Line type. Line type. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{lcol.arg}{ Color of the lines. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{llwd.arg}{ Line width. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{tcol.arg}{ Color of the text (if \code{label} is \code{TRUE}). See the \code{col} argument of \code{\link[graphics]{par}}. } \item{tadj}{ Text justification. See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{sub} and \code{las}. } } \details{ There should be a single covariate such as time. The quantiles specified by \code{percentiles} are plotted. } \value{ The object with a list called \code{qtplot} in the \code{post} slot of \code{object}. (If \code{show.plot = FALSE} then just the list is returned.) The list contains components \item{fitted.values}{ The percentiles of the response, possibly including the MPV. } \item{percentiles}{ The percentiles (small vector of values between 0 and 100. } } %\references{ ~put references to the literature/web site here ~ } \author{ Thomas W. Yee } \note{ Unlike \code{\link{gumbel}}, one cannot have \code{percentiles = NULL}. } \seealso{ \code{\link{gumbel}}. } \examples{ ymat <- as.matrix(venice[, paste("r", 1:10, sep = "")]) fit1 <- vgam(ymat ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE), data = venice, trace = TRUE, na.action = na.pass) head(fitted(fit1)) \dontrun{ par(mfrow = c(1, 1), bty = "l", xpd = TRUE, las = 1) qtplot(fit1, mpv = TRUE, lcol = c(1, 2, 5), tcol = c(1, 2, 5), lwd = 2, pcol = "blue", tadj = 0.4, ylab = "Sea level (cm)") qtplot(fit1, perc = 97, mpv = FALSE, lcol = 3, tcol = 3, lwd = 2, tadj = 0.4, add = TRUE) -> saved head(saved@post$qtplot$fitted) } } \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/zipebcom.Rd0000644000176200001440000002070013565414527013671 0ustar liggesusers\name{zipebcom} \alias{zipebcom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exchangeable Bivariate cloglog Odds-ratio Model From a Zero-inflated Poisson Distribution } \description{ Fits an exchangeable bivariate odds-ratio model to two binary responses with a complementary log-log link. The data are assumed to come from a zero-inflated Poisson distribution that has been converted to presence/absence. } \usage{ zipebcom(lmu12 = "clogloglink", lphi12 = "logitlink", loratio = "loglink", imu12 = NULL, iphi12 = NULL, ioratio = NULL, zero = c("phi12", "oratio"), tol = 0.001, addRidge = 0.001) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu12, imu12}{ Link function, extra argument and optional initial values for the first (and second) marginal probabilities. Argument \code{lmu12} should be left alone. Argument \code{imu12} may be of length 2 (one element for each response). } \item{lphi12}{ Link function applied to the \eqn{\phi}{phi} parameter of the zero-inflated Poisson distribution (see \code{\link{zipoisson}}). See \code{\link{Links}} for more choices. } \item{loratio}{ Link function applied to the odds ratio. See \code{\link{Links}} for more choices. } \item{iphi12, ioratio}{ Optional initial values for \eqn{\phi}{phi} and the odds ratio. See \code{\link{CommonVGAMffArguments}} for more details. In general, good initial values (especially for \code{iphi12}) are often required, therefore use these arguments if convergence failure occurs. If inputted, the value of \code{iphi12} cannot be more than the sample proportions of zeros in either response. } % \item{ephi12, eoratio}{ % List. Extra argument for each of the links. % emu12 = list(), ephi12 = list(), eoratio = list(), % See \code{earg} in \code{\link{Links}} for general information. % } \item{zero}{ Which linear/additive predictor is modelled as an intercept only? A \code{NULL} means none. The default has both \eqn{\phi}{phi} and the odds ratio as not being modelled as a function of the explanatory variables (apart from an intercept). } \item{tol}{ Tolerance for testing independence. Should be some small positive numerical value. } \item{addRidge}{ Some small positive numerical value. The first two diagonal elements of the working weight matrices are multiplied by \code{1+addRidge} to make it diagonally dominant, therefore positive-definite. } } \details{ This \pkg{VGAM} family function fits an exchangeable bivariate odds ratio model (\code{\link{binom2.or}}) with a \code{\link{clogloglink}} link. The data are assumed to come from a zero-inflated Poisson (ZIP) distribution that has been converted to presence/absence. Explicitly, the default model is \deqn{cloglog[P(Y_j=1)/(1-\phi)] = \eta_1,\ \ \ j=1,2}{% cloglog[P(Y_j=1)/(1-phi)] = eta_1,\ \ \ j=1,2} for the (exchangeable) marginals, and \deqn{logit[\phi] = \eta_2,}{% logit[phi] = eta_2,} for the mixing parameter, and \deqn{\log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = \eta_3,}{% log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = eta_3,} specifies the dependency between the two responses. Here, the responses equal 1 for a success and a 0 for a failure, and the odds ratio is often written \eqn{\psi=p_{00}p_{11}/(p_{10}p_{01})}{psi=p00 p11 / (p10 p01)}. We have \eqn{p_{10} = p_{01}}{p10 = p01} because of the exchangeability. The second linear/additive predictor models the \eqn{\phi}{phi} parameter (see \code{\link{zipoisson}}). The third linear/additive predictor is the same as \code{\link{binom2.or}}, viz., the log odds ratio. Suppose a dataset1 comes from a Poisson distribution that has been converted to presence/absence, and that both marginal probabilities are the same (exchangeable). Then \code{binom2.or("clogloglink", exch=TRUE)} is appropriate. Now suppose a dataset2 comes from a \emph{zero-inflated} Poisson distribution. The first linear/additive predictor of \code{zipebcom()} applied to dataset2 is the same as that of \code{binom2.or("clogloglink", exch=TRUE)} applied to dataset1. That is, the \eqn{\phi}{phi} has been taken care of by \code{zipebcom()} so that it is just like the simpler \code{\link{binom2.or}}. Note that, for \eqn{\eta_1}{eta_1}, \code{mu12 = prob12 / (1-phi12)} where \code{prob12} is the probability of a 1 under the ZIP model. Here, \code{mu12} correspond to \code{mu1} and \code{mu2} in the \code{\link{binom2.or}}-Poisson model. If \eqn{\phi=0}{phi=0} then \code{zipebcom()} should be equivalent to \code{binom2.or("clogloglink", exch=TRUE)}. Full details are given in Yee and Dirnbock (2009). The leading \eqn{2 \times 2}{2 x 2} submatrix of the expected information matrix (EIM) is of rank-1, not 2! This is due to the fact that the parameters corresponding to the first two linear/additive predictors are unidentifiable. The quick fix around this problem is to use the \code{addRidge} adjustment. The model is fitted by maximum likelihood estimation since the full likelihood is specified. Fisher scoring is implemented. The default models \eqn{\eta_2}{eta2} and \eqn{\eta_3}{eta3} as single parameters only, but this can be circumvented by setting \code{zero=NULL} in order to model the \eqn{\phi}{phi} and odds ratio as a function of all the explanatory variables. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. When fitted, the \code{fitted.values} slot of the object contains the four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively. These estimated probabilities should be extracted with the \code{fitted} generic function. } \section{Warning }{ The fact that the EIM is not of full rank may mean the model is naturally ill-conditioned. Not sure whether there are any negative consequences wrt theory. For now it is certainly safer to fit \code{\link{binom2.or}} to bivariate binary responses. } \references{ Yee, T. W. and Dirnbock, T. (2009) Models for analysing species' presence/absence data at two time points. Journal of Theoretical Biology, \bold{259}(4), 684--694. } %\author{ T. W. Yee } \note{ The \code{"12"} in the argument names reinforce the user about the exchangeability assumption. The name of this \pkg{VGAM} family function stands for \emph{zero-inflated Poisson exchangeable bivariate complementary log-log odds-ratio model} or ZIP-EBCOM. See \code{\link{binom2.or}} for details that are pertinent to this \pkg{VGAM} family function too. Even better initial values are usually needed here. The \code{xij} (see \code{\link{vglm.control}}) argument enables environmental variables with different values at the two time points to be entered into an exchangeable \code{\link{binom2.or}} model. See the author's webpage for sample code. } \seealso{ \code{\link{binom2.or}}, \code{\link{zipoisson}}, \code{\link{clogloglink}}, \code{\link{CommonVGAMffArguments}}. } \examples{ zdata <- data.frame(x2 = seq(0, 1, len = (nsites <- 2000))) zdata <- transform(zdata, eta1 = -3 + 5 * x2, phi1 = logitlink(-1, inverse = TRUE), oratio = exp(2)) zdata <- transform(zdata, mu12 = clogloglink(eta1, inverse = TRUE) * (1-phi1)) tmat <- with(zdata, rbinom2.or(nsites, mu1 = mu12, oratio = oratio, exch = TRUE)) zdata <- transform(zdata, ybin1 = tmat[, 1], ybin2 = tmat[, 2]) with(zdata, table(ybin1, ybin2)) / nsites # For interest only \dontrun{ # Various plots of the data, for interest only par(mfrow = c(2, 2)) plot(jitter(ybin1) ~ x2, data = zdata, col = "blue") plot(jitter(ybin2) ~ jitter(ybin1), data = zdata, col = "blue") plot(mu12 ~ x2, data = zdata, col = "blue", type = "l", ylim = 0:1, ylab = "Probability", main = "Marginal probability and phi") with(zdata, abline(h = phi1[1], col = "red", lty = "dashed")) tmat2 <- with(zdata, dbinom2.or(mu1 = mu12, oratio = oratio, exch = TRUE)) with(zdata, matplot(x2, tmat2, col = 1:4, type = "l", ylim = 0:1, ylab = "Probability", main = "Joint probabilities")) } # Now fit the model to the data. fit <- vglm(cbind(ybin1, ybin2) ~ x2, zipebcom, data = zdata, trace = TRUE) coef(fit, matrix = TRUE) summary(fit) vcov(fit) } \keyword{models} \keyword{regression} VGAM/man/cardUC.Rd0000644000176200001440000000517213565414527013230 0ustar liggesusers\name{Card} \alias{Card} \alias{dcard} \alias{pcard} \alias{qcard} \alias{rcard} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cardioid Distribution } \description{ Density, distribution function, quantile function and random generation for the cardioid distribution. } \usage{ dcard(x, mu, rho, log = FALSE) pcard(q, mu, rho, lower.tail = TRUE, log.p = FALSE) qcard(p, mu, rho, tolerance = 1e-07, maxits = 500, lower.tail = TRUE, log.p = FALSE) rcard(n, mu, rho, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{mu, rho}{ See \code{\link{cardioid}} for more information. } \item{tolerance, maxits, ...}{ The first two are control parameters for the algorithm used to solve for the roots of a nonlinear system of equations; \code{tolerance} controls for the accuracy and \code{maxits} is the maximum number of iterations. \code{rcard} calls \code{qcard} so the \code{...} can be used to vary the two arguments. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ See \code{\link{cardioid}}, the \pkg{VGAM} family function for estimating the two parameters by maximum likelihood estimation, for the formula of the probability density function and other details. } \value{ \code{dcard} gives the density, \code{pcard} gives the distribution function, \code{qcard} gives the quantile function, and \code{rcard} generates random deviates. } %\references{ } \author{ Thomas W. Yee and Kai Huang } \note{ Convergence problems might occur with \code{rcard}. } \seealso{ \code{\link{cardioid}}. } \examples{ \dontrun{ mu <- 4; rho <- 0.4; x <- seq(0, 2*pi, len = 501) plot(x, dcard(x, mu, rho), type = "l", las = 1, ylim = c(0, 1), col = "blue", ylab = paste("[dp]card(mu=", mu, ", rho=", rho, ")"), main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pcard(x, mu, rho), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qcard(probs, mu, rho) lines(Q, dcard(Q, mu, rho), col = "purple", lty = 3, type = "h") lines(Q, pcard(Q, mu, rho), col = "purple", lty = 3, type = "h") abline(h = c(0,probs, 1), v = c(0, 2*pi), col = "purple", lty = 3) max(abs(pcard(Q, mu, rho) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/biplackettcopUC.Rd0000644000176200001440000000336413565414527015144 0ustar liggesusers\name{Biplackett} \alias{Biplackett} \alias{dbiplackcop} \alias{pbiplackcop} \alias{rbiplackcop} \title{Plackett's Bivariate Copula } \description{ Density, distribution function, and random generation for the (one parameter) bivariate Plackett copula. %distribution. } \usage{ dbiplackcop(x1, x2, oratio, log = FALSE) pbiplackcop(q1, q2, oratio) rbiplackcop(n, oratio) } \arguments{ \item{x1, x2, q1, q2}{vector of quantiles.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{oratio}{the positive odds ratio \eqn{\psi}{psi}.} \item{log}{ Logical. If \code{TRUE} then the logarithm is returned. } } \value{ \code{dbiplackcop} gives the density, \code{pbiplackcop} gives the distribution function, and \code{rbiplackcop} generates random deviates (a two-column matrix). } \references{ Mardia, K. V. (1967) Some contributions to contingency-type distributions. \emph{Biometrika}, \bold{54}, 235--249. } \author{ T. W. Yee } \details{ See \code{\link{biplackettcop}}, the \pkg{VGAM} family functions for estimating the parameter by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } %\note{ %} \seealso{ \code{\link{biplackettcop}}, \code{\link{bifrankcop}}. } \examples{ \dontrun{ N <- 101; oratio <- exp(1) x <- seq(0.0, 1.0, len = N) ox <- expand.grid(x, x) zedd <- dbiplackcop(ox[, 1], ox[, 2], oratio = oratio) contour(x, x, matrix(zedd, N, N), col = "blue") zedd <- pbiplackcop(ox[, 1], ox[, 2], oratio = oratio) contour(x, x, matrix(zedd, N, N), col = "blue") plot(rr <- rbiplackcop(n = 3000, oratio = oratio)) par(mfrow = c(1, 2)) hist(rr[, 1]) # Should be uniform hist(rr[, 2]) # Should be uniform } } \keyword{distribution} VGAM/man/yulesimon.Rd0000644000176200001440000000521313565414527014107 0ustar liggesusers\name{yulesimon} \alias{yulesimon} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Yule-Simon Family Function } \description{ Estimating the shape parameter of the Yule-Simon distribution. } \usage{ yulesimon(lshape = "loglink", ishape = NULL, nsimEIM = 200, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape}{ Link function for the shape parameter, called \eqn{\rho}{rho} below. See \code{\link{Links}} for more choices and for general information. } \item{ishape}{ Optional initial value for the (positive) parameter. See \code{\link{CommonVGAMffArguments}} for more information. The default is to obtain an initial value internally. Use this argument if the default fails. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The probability function is \deqn{f(y;\rho) = \rho*beta(y,\rho+1),}{% f(y;rho) = rho*beta(y,rho+1),} where the parameter \eqn{\rho>0}{rho>0}, \eqn{beta} is the \code{\link[base]{beta}} function, and \eqn{y=1,2,\ldots}{y=1,2,...}. The function \code{\link{dyules}} computes this probability function. The mean of \eqn{Y}, which is returned as fitted values, is \eqn{\rho/(\rho-1)}{rho/(rho-1)} provided \eqn{\rho > 1}{rho > 1}. The variance of \eqn{Y} is \eqn{\rho^2/((\rho-1)^2 (\rho-2))}{rho^2/((rho-1)^2 (rho-2))} provided \eqn{\rho > 2}{rho > 2}. The distribution was named after Udny Yule and Herbert A. Simon. Simon originally called it the Yule distribution. This family function can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Simon, H. A. (1955) On a class of skew distribution functions. \emph{Biometrika}, \bold{42}, 425--440. } \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{ryules}}, \code{\link{simulate.vlm}}. } \examples{ ydata <- data.frame(x2 = runif(nn <- 1000)) ydata <- transform(ydata, y = ryules(nn, shape = exp(1.5 - x2))) with(ydata, table(y)) fit <- vglm(y ~ x2, yulesimon, data = ydata, trace = TRUE) coef(fit, matrix = TRUE) summary(fit) } \keyword{models} \keyword{regression} %# Generate some yulesimon random variates %set.seed(123) %nn = 400 %x = 1:20 %alpha = 1.1 # The parameter %probs = dyulesimon(x, alpha) %\dontrun{ %plot(x, probs, type="h", log="y")} %cs = cumsum(probs) %tab = table(cut(runif(nn), brea = c(0,cs,1))) %index = (1:length(tab))[tab>0] %y = rep(index, times=tab[index]) VGAM/man/frechet.Rd0000644000176200001440000001205413565414527013504 0ustar liggesusers\name{frechet} \alias{frechet} %\alias{frechet2} %\alias{frechet3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Frechet Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter Frechet distribution. % and 3-parameter } \usage{ frechet(location = 0, lscale = "loglink", lshape = logofflink(offset = -2), iscale = NULL, ishape = NULL, nsimEIM = 250, zero = NULL) } %frechet3(anchor = NULL, ldifference = "loglink", lscale = "loglink", % lshape = "logloglink", % ilocation = NULL, iscale = NULL, ishape = NULL, % zero = NULL, effpos = .Machine$double.eps^0.75) %- maybe also 'usage' for other objects documented here. \arguments{ \item{location}{ Numeric. Location parameter. It is called \eqn{a} below. } \item{lscale, lshape}{ Link functions for the parameters; see \code{\link{Links}} for more choices. } \item{iscale, ishape, zero, nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for information. } % \item{edifference}{ % % Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } % \item{anchor}{ % An ``anchor'' point for estimating the location parameter. This must % be a value no greater than \code{min(y)} where \code{y} is the response. % The location parameter is \eqn{A - D} where % \eqn{A} is the anchor, % \eqn{D} is the ``difference'' (default is to make this positive). % The default value of \code{anchor} means \code{min(y)} is chosen. % % } % \item{ldifference}{ % Parameter link function for the difference \eqn{D} between the anchor % point and the location parameter estimate. % The default keeps this difference positive so that numerical % problems are less likely to occur. % } % \item{ilocation}{ % Optional initial value for the location parameter. % A good choice can speed up the convergence rate markedly. % A \code{NULL} means it is chosen internally. % } } \details{ The (3-parameter) Frechet distribution has a density function that can be written \deqn{f(y) = \frac{sb}{(y-a)^2} [b/(y-a)]^{s-1} \, \exp[-(b/(y-a))^s] }{% f(y) = ((s*b) / (y-a)^2) * exp[-(b/(y-a))^s] * [b/(y-a)]^(s-1)} for \eqn{y > a} and scale parameter \eqn{b > 0}. The positive shape parameter is \eqn{s}. The cumulative distribution function is \deqn{F(y) = \exp[-(b/(y-a))^s]. }{% F(y) = exp[-(b/(y-a))^s].} The mean of \eqn{Y} is \eqn{a + b \Gamma(1-1/s)}{a + b*gamma(1-1/s)} for \eqn{s > 1} (these are returned as the fitted values). The variance of \eqn{Y} is \eqn{b^2 [ \Gamma(1-2/s) - \Gamma^2(1-1/s)]}{b^2 * [gamma(1 - 2/s) - gamma(1 - 1/s)^2]} for \eqn{s > 2}. Family \code{frechet} has \eqn{a} known, and \eqn{\log(b)}{log(b)} and \eqn{\log(s - 2)}{log(s - 2)} are the default linear/additive predictors. The working weights are estimated by simulated Fisher scoring. % Note that the \code{\link{logloglink}} link ensures \eqn{s > 1}. % whereas \code{frechet3} estimates it. % Estimating \eqn{a} well requires a lot of data and % a good choice of \code{ilocation} will help speed up convergence. % For \code{frechet3} the default linear/additive predictors are % \eqn{\log(D)}{log(D)}, % It would be great if the first linear/additive predictor was a direct % function of the location parameter, but this can run the risk that % the estimate is out of range (i.e., greater than \code{min(y)}). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005) \emph{Extreme Value and Related Models with Applications in Engineering and Science}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \section{Warning}{ % Convergence for \code{frechet3} can be very slow, especially if the % initial value for the location parameter is poor. Setting something % like \code{maxit = 200, trace = TRUE} is a good idea. Family function \code{frechet} may fail for low values of the shape parameter, e.g., near 2 or lower. } %\note{ % Family function \code{frechet3} uses % the BFGS quasi-Newton update formula for the % working weight matrices. Consequently the estimated variance-covariance % matrix may be inaccurate or simply wrong! The standard errors must be % therefore treated with caution; these are computed in functions such % as \code{vcov()} and \code{summary()}. % If \code{fit} is a \code{frechet3} fit then \code{fit@extra$location} % is the final estimate of the location parameter, and % \code{fit@extra$LHSanchor} is the anchor point. %} \seealso{ \code{\link{rfrechet}}, \code{\link{gev}}. } \examples{ \dontrun{ set.seed(123) fdata <- data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1))) with(fdata, hist(y1)) fit2 <- vglm(y1 ~ 1, frechet, data = fdata, trace = TRUE) coef(fit2, matrix = TRUE) Coef(fit2) head(fitted(fit2)) with(fdata, mean(y1)) head(weights(fit2, type = "working")) vcov(fit2) } } \keyword{models} \keyword{regression} VGAM/man/Opt.Rd0000644000176200001440000000514713565414527012633 0ustar liggesusers\name{Opt} \alias{Opt} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Optimums } \description{ Generic function for the \emph{optimums} (or optima) of a model. } \usage{ Opt(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the computation or extraction of an optimum (or optimums) is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. Sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Different models can define an optimum in different ways. Many models have no such notion or definition. Optimums occur in quadratic and additive ordination, e.g., CQO or CAO. For these models the optimum is the value of the latent variable where the maximum occurs, i.e., where the fitted value achieves its highest value. For quadratic ordination models there is a formula for the optimum but for additive ordination models the optimum must be searched for numerically. If it occurs on the boundary, then the optimum is undefined. At an optimum, the fitted value of the response is called the \emph{maximum}. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \note{ In ordination, the optimum of a species is sometimes called the \emph{species score}. } %\section{Warning }{ %} \seealso{ \code{Opt.qrrvglm}, \code{\link{Max}}, \code{\link{Tol}}. } \examples{ set.seed(111) # This leads to the global solution hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Crow1positive = FALSE) Opt(p1) \dontrun{ clr <- (1:(ncol(depvar(p1))+1))[-7] # Omits yellow persp(p1, col = clr, las = 1, main = "Vertical lines at the optimums") abline(v = Opt(p1), lty = 2, col = clr) } } \keyword{models} \keyword{regression} % index <- 1:ncol(depvar(p1)) % persp(p1, col = index, las = 1, main = "Vertical lines at the optimums") % # abline(v = Opt(p1), lty = 2, col = index) % rug(Opt(p1), col = clr, side = 3) VGAM/man/weibullR.Rd0000644000176200001440000001575213565414527013661 0ustar liggesusers\name{weibullR} \alias{weibullR} %\alias{weibullff} %\alias{weibull.lsh} %\alias{weibull3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Weibull Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter Weibull distribution. No observations should be censored. } \usage{ weibullR(lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL, lss = TRUE, nrfs = 1, probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, lscale}{ Parameter link functions applied to the (positive) shape parameter (called \eqn{a} below) and (positive) scale parameter (called \eqn{b} below). See \code{\link{Links}} for more choices. } \item{ishape, iscale}{ Optional initial values for the shape and scale parameters. } \item{nrfs}{ Currently this argument is ignored. Numeric, of length one, with value in \eqn{[0,1]}. Weighting factor between Newton-Raphson and Fisher scoring. The value 0 means pure Newton-Raphson, while 1 means pure Fisher scoring. The default value uses a mixture of the two algorithms, and retaining positive-definite working weights. } \item{imethod}{ Initialization method used if there are censored observations. Currently only the values 1 and 2 are allowed. } \item{zero, probs.y, lss}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The Weibull density for a response \eqn{Y} is \deqn{f(y;a,b) = a y^{a-1} \exp[-(y/b)^a] / (b^a)}{% f(y;a,b) = a y^(a-1) * exp(-(y/b)^a) / [b^a]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}. The cumulative distribution function is \deqn{F(y;a,b) = 1 - \exp[-(y/b)^a].}{% F(y;a,b) = 1 - exp(-(y/b)^a).} The mean of \eqn{Y} is \eqn{b \, \Gamma(1+ 1/a)}{b * gamma(1+ 1/a)} (returned as the fitted values), and the mode is at \eqn{b\,(1-1/a)^{1/a}}{b * (1- 1/a)^(1/a)} when \eqn{a>1}. The density is unbounded for \eqn{a<1}. The \eqn{k}th moment about the origin is \eqn{E(Y^k) = b^k \, \Gamma(1+ k/a)}{E(Y^k) = b^k * gamma(1+ k/a)}. The hazard function is \eqn{a t^{a-1} / b^a}{a * t^(a-1) / b^a}. This \pkg{VGAM} family function currently does not handle censored data. Fisher scoring is used to estimate the two parameters. Although the expected information matrices used here are valid in all regions of the parameter space, the regularity conditions for maximum likelihood estimation are satisfied only if \eqn{a>2} (according to Kleiber and Kotz (2003)). If this is violated then a warning message is issued. One can enforce \eqn{a>2} by choosing \code{lshape = logofflink(offset = -2)}. Common values of the shape parameter lie between 0.5 and 3.5. Summarized in Harper et al. (2011), for inference, there are 4 cases to consider. If \eqn{a \le 1} then the MLEs are not consistent (and the smallest observation becomes a hyperefficient solution for the location parameter in the 3-parameter case). If \eqn{1 < a < 2} then MLEs exist but are not asymptotically normal. If \eqn{a = 2} then the MLEs exist and are normal and asymptotically efficient but with a slower convergence rate than when \eqn{a > 2}. If \eqn{a > 2} then MLEs have classical asymptotic properties. The 3-parameter (location is the third parameter) Weibull can be estimated by maximizing a profile log-likelihood (see, e.g., Harper et al. (2011) and Lawless (2003)), else try \code{\link{gev}} which is a better parameterization. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 1, New York: Wiley. Lawless, J. F. (2003) \emph{Statistical Models and Methods for Lifetime Data}, 2nd ed. {Hoboken, NJ, USA: John Wiley & Sons}. Rinne, Horst. (2009) \emph{The Weibull Distribution: A Handbook}. Boca Raton, FL, USA: CRC Press. Gupta, R. D. and Kundu, D. (2006) On the comparison of Fisher information of the Weibull and GE distributions, \emph{Journal of Statistical Planning and Inference}, \bold{136}, 3130--3144. Harper, W. V. and Eschenbach, T. G. and James, T. R. (2011) Concerns about Maximum Likelihood Estimation for the Three-Parameter {W}eibull Distribution: Case Study of Statistical Software, \emph{The American Statistician}, \bold{65(1)}, {44--54}. Smith, R. L. (1985) Maximum likelihood estimation in a class of nonregular cases. \emph{Biometrika}, \bold{72}, 67--90. Smith, R. L. and Naylor, J. C. (1987) A comparison of maximum likelihood and Bayesian estimators for the three-parameter Weibull distribution. \emph{Applied Statistics}, \bold{36}, 358--369. } \author{ T. W. Yee } \note{ Successful convergence depends on having reasonably good initial values. If the initial values chosen by this function are not good, make use the two initial value arguments. This \pkg{VGAM} family function handles multiple responses. The Weibull distribution is often an alternative to the lognormal distribution. The inverse Weibull distribution, which is that of \eqn{1/Y} where \eqn{Y} has a Weibull(\eqn{a,b}) distribution, is known as the log-Gompertz distribution. There are problems implementing the three-parameter Weibull distribution. These are because the classical regularity conditions for the asymptotic properties of the MLEs are not satisfied because the support of the distribution depends on one of the parameters. Other related distributions are the Maxwell and Rayleigh distributions. } \section{Warning}{ This function is under development to handle other censoring situations. The version of this function which will handle censored data will be called \code{cenweibull()}. It is currently being written and will use \code{\link{SurvS4}} as input. It should be released in later versions of \pkg{VGAM}. If the shape parameter is less than two then misleading inference may result, e.g., in the \code{summary} and \code{vcov} of the object. } \seealso{ \code{\link{weibull.mean}}, \code{\link[stats:Weibull]{dweibull}}, \code{\link{truncweibull}}, \code{\link{gev}}, \code{\link{lognormal}}, \code{\link{expexpff}}, \code{\link{maxwell}}, \code{\link{rayleigh}}, \code{\link{gumbelII}}. } \examples{ wdata <- data.frame(x2 = runif(nn <- 1000)) # Complete data wdata <- transform(wdata, y1 = rweibull(nn, shape = exp(1), scale = exp(-2 + x2)), y2 = rweibull(nn, shape = exp(2), scale = exp( 1 - x2))) fit <- vglm(cbind(y1, y2) ~ x2, weibullR, data = wdata, trace = TRUE) coef(fit, matrix = TRUE) vcov(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/iam.Rd0000644000176200001440000000677613565414527012650 0ustar liggesusers\name{iam} \alias{iam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Index from Array to Matrix } \description{ Maps the elements of an array containing symmetric positive-definite matrices to a matrix with sufficient columns to hold them (called matrix-band format.) } \usage{ iam(j, k, M, both = FALSE, diag = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{j}{ An integer from the set \{\code{1:M}\} giving the row number of an element. } \item{k}{ An integer from the set \{\code{1:M}\} giving the column number of an element. } \item{M}{ The number of linear/additive predictors. This is the dimension of each positive-definite symmetric matrix. } \item{both}{ Logical. Return both the row and column indices? See below for more details. } \item{diag}{ Logical. Return the indices for the diagonal elements? If \code{FALSE} then only the strictly upper triangular part of the matrix elements are used. } } \details{ Suppose we have \eqn{n} symmetric positive-definite square matrices, each \eqn{M} by \eqn{M}, and these are stored in an \code{array} of dimension \code{c(n,M,M)}. Then these can be more compactly represented by a \code{matrix} of dimension \code{c(n,K)} where \code{K} is an integer between \code{M} and \code{M*(M+1)/2} inclusive. The mapping between these two representations is given by this function. It firstly enumerates by the diagonal elements, followed by the band immediately above the diagonal, then the band above that one, etc. The last element is \code{(1,M)}. This function performs the mapping from elements \code{(j,k)} of symmetric positive-definite square matrices to the columns of another matrix representing such. This is called the \emph{matrix-band} format and is used by the \pkg{VGAM} package. } \value{ This function has a dual purpose depending on the value of \code{both}. If \code{both=FALSE} then the column number corresponding to the \code{j}-\code{k} element of the matrix is returned. If \code{both = TRUE} then \code{j} and \code{k} are ignored and a list with the following components are returned. \item{row.index}{ The row indices of the upper triangular part of the matrix (This may or may not include the diagonal elements, depending on the argument \code{diagonal}). } \item{col.index}{ The column indices of the upper triangular part of the matrix (This may or may not include the diagonal elements, depending on the argument \code{diagonal}). } } %\references{ % The website \url{http://www.stat.auckland.ac.nz/~yee} contains % some additional information. % % %} \author{ T. W. Yee } \note{ This function is used in the \code{weight} slot of many \pkg{VGAM} family functions (see \code{\link{vglmff-class}}), especially those whose \eqn{M} is determined by the data, e.g., \code{\link{dirichlet}}, \code{\link{multinomial}}. } \seealso{ \code{\link{vglmff-class}}. %\code{ima}. } \examples{ iam(1, 2, M = 3) # The 4th column represents element (1,2) of a 3x3 matrix iam(NULL, NULL, M = 3, both = TRUE) # Return the row and column indices dirichlet()@weight M <- 4 temp1 <- iam(NA, NA, M = M, both = TRUE) mat1 <- matrix(NA, M, M) mat1[cbind(temp1$row, temp1$col)] = 1:length(temp1$row) mat1 # More commonly used temp2 <- iam(NA, NA, M = M, both = TRUE, diag = FALSE) mat2 <- matrix(NA, M, M) mat2[cbind(temp2$row, temp2$col)] = 1:length(temp2$row) mat2 # Rarely used } \keyword{manip} \keyword{programming} VGAM/man/zipfmbUC.Rd0000644000176200001440000000515513565414527013607 0ustar liggesusers\name{Zipfmb} \alias{Zipfmb} \alias{dzipfmb} \alias{pzipfmb} \alias{qzipfmb} \alias{rzipfmb} \title{The Zipf-Mandelbrot Distribution} \description{ Density, distribution function, quantile function and random generation for the Mandelbrot distribution. } \usage{ dzipfmb(x, shape, start = 1, log = FALSE) pzipfmb(q, shape, start = 1, lower.tail = TRUE, log.p = FALSE) qzipfmb(p, shape, start = 1) rzipfmb(n, shape, start = 1) } \arguments{ \item{x}{vector of (non-negative integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of random values to return.} \item{shape}{vector of positive shape parameter.} \item{start}{integer, the minimum value of the support of the distribution.} \item{log, log.p}{logical; if TRUE, probabilities p are given as log(p)} \item{lower.tail}{logical; if TRUE (default), probabilities are P[X <= x], otherwise, P[X > x].} } \details{ The probability mass function of the zipf-mandelbrot distribution is given by \deqn{\Pr(Y=y;s) = \frac{s \; \Gamma(y_{min})}{\Gamma(y_{min}-s)} \cdot \frac{\Gamma(y-s)}{\Gamma(y+1)}}{P(Y=y) = ((b)Gamma(a))/(Gamma(a-b)) * Gamma(y+-b)/Gamma(y+1)} where \eqn{0 \leq b < 1}{0<=b<1} and the starting value start being by default 1. } \value{ \code{dzipfmb} gives the density, \code{pzipfmb} gives the distribution function, \code{qzipfmb} gives the quantile function, and \code{rzipfmb} generates random deviates. } \references{ Mandelbrot, B. (1961). On the theory of word frequencies and on related Markovian models of discourse. In R. Jakobson, \emph{Structure of Language and its Mathematical Aspects}, pp. 190--219, Providence, RI, USA. American Mathematical Society. Moreno-Sanchez, I. and Font-Clos, F. and Corral, A. (2016). Large-Scale Analysis of Zipf's Law in English Texts. \emph{PLos ONE}, \bold{11}(1), 1--19. } \author{M. Chou, with edits by T. W. Yee.} %\note{ % The \pkg{VGAMzm} family function \code{\link{zipfmbrot}} estimates the % shape parameter \eqn{b}. %} \seealso{ \code{\link{Zipf}}. % \code{\link{zipfmbrot}}. } \examples{ aa <- 1:10 (pp <- pzipfmb(aa, shape = 0.5, start = 1)) cumsum(dzipfmb(aa, shape = 0.5, start = 1)) # Should be same qzipfmb(pp, shape = 0.5, start = 1) - aa # Should be all 0s rdiffzeta(30, 0.5) \dontrun{ x <- 1:10 plot(x, dzipfmb(x, shape = 0.5), type = "h", ylim = 0:1, sub = "shape=0.5", las = 1, col = "blue", ylab = "Probability", main = "Zipf-Mandelbrot distribution: blue=PMF; orange=CDF") lines(x + 0.1, pzipfmb(x, shape = 0.8), col = "orange", lty = 3, type = "h") } } \keyword{distribution} VGAM/man/sinmad.Rd0000644000176200001440000001027513565414527013342 0ustar liggesusers\name{sinmad} \alias{sinmad} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Singh-Maddala Distribution Family Function } \description{ Maximum likelihood estimation of the 3-parameter Singh-Maddala distribution. } \usage{ sinmad(lscale = "loglink", lshape1.a = "loglink", lshape3.q = "loglink", iscale = NULL, ishape1.a = NULL, ishape3.q = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5), gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. % zero = ifelse(lss, -(2:3), -c(1, 3)) \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale, lshape3.q}{ Parameter link functions applied to the (positive) parameters \eqn{a}, \code{scale}, and \eqn{q}. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, ishape3.q, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{ishape3.q} is needed to obtain good estimates for the other parameters. } \item{gscale, gshape1.a, gshape3.q}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 3-parameter Singh-Maddala distribution is the 4-parameter generalized beta II distribution with shape parameter \eqn{p=1}. It is known under various other names, such as the Burr XII (or just the Burr distribution), Pareto IV, beta-P, and generalized log-logistic distribution. More details can be found in Kleiber and Kotz (2003). Some distributions which are special cases of the 3-parameter Singh-Maddala are the Lomax (\eqn{a=1}), Fisk (\eqn{q=1}), and paralogistic (\eqn{a=q}). The Singh-Maddala distribution has density \deqn{f(y) = aq y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+q}]}{% f(y) = aq y^(a-1) / [b^a (1 + (y/b)^a)^(1+q)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and the others are shape parameters. The cumulative distribution function is \deqn{F(y) = 1 - [1 + (y/b)^a]^{-q}.}{% F(y) = 1 - [1 + (y/b)^a]^(-q).} The mean is \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(q - 1/a) / \Gamma(q)}{% E(Y) = b gamma(1 + 1/a) gamma(q - 1/a) / gamma(q)} provided \eqn{-a < 1 < aq}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{Sinmad}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ sdata <- data.frame(y = rsinmad(n = 1000, shape1 = exp(1), scale = exp(2), shape3 = exp(0))) fit <- vglm(y ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE) fit <- vglm(y ~ 1, sinmad(lss = FALSE, ishape1.a = exp(1)), data = sdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) # Harder problem (has the shape3.q parameter going to infinity) set.seed(3) sdata <- data.frame(y1 = rbeta(1000, 6, 6)) # hist(with(sdata, y1)) if (FALSE) { # These struggle fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE) fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE, crit = "coef") Coef(fit1) } # Try this remedy: fit2 <- vglm(y1 ~ 1, data = sdata, trace = TRUE, stepsize = 0.05, maxit = 99, sinmad(lss = FALSE, ishape3.q = 3, lshape3.q = "logloglink")) coef(fit2, matrix = TRUE) Coef(fit2) } \keyword{models} \keyword{regression} VGAM/man/betabinomial.Rd0000644000176200001440000001755613565414527014526 0ustar liggesusers\name{betabinomial} \alias{betabinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Beta-binomial Distribution Family Function } \description{ Fits a beta-binomial distribution by maximum likelihood estimation. The two parameters here are the mean and correlation coefficient. } \usage{ betabinomial(lmu = "logitlink", lrho = "logitlink", irho = NULL, imethod = 1, ishrinkage = 0.95, nsimEIM = NULL, zero = "rho") } %- maybe also 'usage' for other objects documented here. % ishrinkage = 0.95, nsimEIM = NULL, zero = 2 \arguments{ \item{lmu, lrho}{ Link functions applied to the two parameters. See \code{\link{Links}} for more choices. The defaults ensure the parameters remain in \eqn{(0,1)}, however, see the warning below. } \item{irho}{ Optional initial value for the correlation parameter. If given, it must be in \eqn{(0,1)}, and is recyled to the necessary length. Assign this argument a value if a convergence failure occurs. Having \code{irho = NULL} means an initial value is obtained internally, though this can give unsatisfactory results. } \item{imethod}{ An integer with value \code{1} or \code{2} or \ldots, which specifies the initialization method for \eqn{\mu}{mu}. If failure to converge occurs try the another value and/or else specify a value for \code{irho}. } \item{zero}{ Specifyies which linear/additive predictor is to be modelled as an intercept only. If assigned, the single value can be either \code{1} or \code{2}. The default is to have a single correlation parameter. To model both parameters as functions of the covariates assign \code{zero = NULL}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{ishrinkage, nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for more information. The argument \code{ishrinkage} is used only if \code{imethod = 2}. Using the argument \code{nsimEIM} may offer large advantages for large values of \eqn{N} and/or large data sets. } } \details{ There are several parameterizations of the beta-binomial distribution. This family function directly models the mean and correlation parameter, i.e., the probability of success. The model can be written \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)} where \eqn{P} has a beta distribution with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Here, \eqn{N} is the number of trials (e.g., litter size), \eqn{T=NY} is the number of successes, and \eqn{p} is the probability of a success (e.g., a malformation). That is, \eqn{Y} is the \emph{proportion} of successes. Like \code{\link{binomialff}}, the fitted values are the estimated probability of success (i.e., \eqn{E[Y]} and not \eqn{E[T]}) and the prior weights \eqn{N} are attached separately on the object in a slot. The probability function is \deqn{P(T=t) = {N \choose t} \frac{Be(\alpha+t, \beta+N-t)} {Be(\alpha, \beta)}}{% P(T=t) = choose(N,t) Be(alpha+t, beta+N-t) / Be(alpha, beta)} where \eqn{t=0,1,\ldots,N}, and \eqn{Be} is the \code{\link[base:Special]{beta}} function with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Recall \eqn{Y = T/N} is the real response being modelled. The default model is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)} and \eqn{\eta_2 = logit(\rho)}{eta2 = logit(rho)} because both parameters lie between 0 and 1. The mean (of \eqn{Y}) is \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)} and the variance (of \eqn{Y}) is \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}. Here, the correlation \eqn{\rho}{rho} is given by \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)} and is the correlation between the \eqn{N} individuals within a litter. A \emph{litter effect} is typically reflected by a positive value of \eqn{\rho}{rho}. It is known as the \emph{over-dispersion parameter}. This family function uses Fisher scoring. Elements of the second-order expected derivatives with respect to \eqn{\alpha}{alpha} and \eqn{\beta}{beta} are computed numerically, which may fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta}, \eqn{N} or else take a long time. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. Suppose \code{fit} is a fitted beta-binomial model. Then \code{fit@y} contains the sample proportions \eqn{y}, \code{fitted(fit)} returns estimates of \eqn{E(Y)}, and \code{weights(fit, type="prior")} returns the number of trials \eqn{N}. } \references{ Moore, D. F. and Tsiatis, A. (1991) Robust estimation of the variance in moment methods for extra-binomial and extra-Poisson variation. \emph{Biometrics}, \bold{47}, 383--401. Prentice, R. L. (1986) Binary regression using an extended beta-binomial distribution, with discussion of correlation induced by covariate measurement errors. \emph{Journal of the American Statistical Association}, \bold{81}, 321--327. } \author{ T. W. Yee } \note{ This function processes the input in the same way as \code{\link{binomialff}}. But it does not handle the case \eqn{N=1} very well because there are two parameters to estimate, not one, for each row of the input. Cases where \eqn{N=1} can be omitted via the \code{subset} argument of \code{\link{vglm}}. The \emph{extended} beta-binomial distribution of Prentice (1986) is currently not implemented in the \pkg{VGAM} package as it has range-restrictions for the correlation parameter that are currently too difficult to handle in this package. However, try \code{lrho = "rhobitlink"}. } \section{Warning }{ If the estimated rho parameter is close to 0 then it pays to try \code{lrho = "rhobitlink"}. One day this may become the default link function. This family function is prone to numerical difficulties due to the expected information matrices not being positive-definite or ill-conditioned over some regions of the parameter space. If problems occur try setting \code{irho} to some numerical value, \code{nsimEIM = 100}, say, or else use \code{etastart} argument of \code{\link{vglm}}, etc. } \seealso{ \code{\link{betabinomialff}}, \code{\link{Betabinom}}, \code{\link{binomialff}}, \code{\link{betaff}}, \code{\link{dirmultinomial}}, \code{\link{lirat}}, \code{\link{simulate.vlm}}. } \examples{ # Example 1 bdata <- data.frame(N = 10, mu = 0.5, rho = 0.8) bdata <- transform(bdata, y = rbetabinom(100, size = N, prob = mu, rho = rho)) fit <- vglm(cbind(y, N-y) ~ 1, betabinomial, data = bdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(cbind(depvar(fit), weights(fit, type = "prior"))) # Example 2 fit <- vglm(cbind(R, N-R) ~ 1, betabinomial, lirat, trace = TRUE, subset = N > 1) coef(fit, matrix = TRUE) Coef(fit) t(fitted(fit)) t(depvar(fit)) t(weights(fit, type = "prior")) # Example 3, which is more complicated lirat <- transform(lirat, fgrp = factor(grp)) summary(lirat) # Only 5 litters in group 3 fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial(zero = 2), data = lirat, trace = TRUE, subset = N > 1) coef(fit2, matrix = TRUE) \dontrun{ with(lirat, plot(hb[N > 1], fit2@misc$rho, xlab = "Hemoglobin", ylab = "Estimated rho", pch = as.character(grp[N > 1]), col = grp[N > 1])) } \dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991) with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, xlab = "Hemoglobin level", ylab = "Proportion Dead", main = "Fitted values (lines)", las = 1)) smalldf <- with(lirat, lirat[N > 1, ]) for (gp in 1:4) { xx <- with(smalldf, hb[grp == gp]) yy <- with(smalldf, fitted(fit2)[grp == gp]) ooo <- order(xx) lines(xx[ooo], yy[ooo], col = gp) } } } \keyword{models} \keyword{regression} VGAM/man/smart.expression.Rd0000644000176200001440000000142613565414527015411 0ustar liggesusers\name{smart.expression} \alias{smart.expression} \title{ S Expression for Smart Functions } \description{ \code{smart.expression} is an S expression for a smart function to call itself. It is best if you go through it line by line, but most users will not need to know anything about it. It requires the primary argument of the smart function to be called \code{"x"}. The list component \code{match.call} must be assigned the value of \code{match.call()} in the smart function; this is so that the smart function can call itself later. } \seealso{ \code{\link[base]{match.call}}. } \examples{ print(sm.min2) } %\keyword{smartpred} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. % Edited manually 17/2/03, 9/7/03 VGAM/man/predictvglm.Rd0000644000176200001440000001202513565414527014402 0ustar liggesusers\name{predictvglm} \alias{predictvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{Predict Method for a VGLM fit} \description{ Predicted values based on a vector generalized linear model (VGLM) object. } \usage{ predictvglm(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, deriv = 0, dispersion = NULL, untransform = FALSE, type.fitted = NULL, percentiles = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Object of class inheriting from \code{"vlm"}, e.g., \code{\link{vglm}}. } \item{newdata}{ An optional data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used. } \item{type}{ The value of this argument can be abbreviated. The type of prediction required. The default is the first one, meaning on the scale of the linear predictors. This should be a \eqn{n \times M}{n x M} matrix. The alternative \code{"response"} is on the scale of the response variable, and depending on the family function, this may or may not be the mean. Often this is the fitted value, e.g., \code{fitted(vglmObject)} (see \code{\link{fittedvlm}}). Note that the response is output from the \code{@linkinv} slot, where the \code{eta} argument is the \eqn{n \times M}{n x M} matrix of linear predictors. The \code{"terms"} option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale. The terms have been centered. } \item{se.fit}{ logical: return standard errors? } \item{deriv}{ Non-negative integer. Currently this must be zero. Later, this may be implemented for general values. } \item{dispersion}{ Dispersion parameter. This may be inputted at this stage, but the default is to use the dispersion parameter of the fitted model. } % \item{extra}{ % A list containing extra information. % This argument should be ignored. % } \item{type.fitted}{ Some \pkg{VGAM} family functions have an argument by the same name. If so, then one can obtain fitted values by setting \code{type = "response"} and choosing a value of \code{type.fitted} from what's available. If \code{type.fitted = "quantiles"} is available then the \code{percentiles} argument can be used to specify what quantile values are requested. } \item{percentiles}{ Used only if \code{type.fitted = "quantiles"} is available and is selected. } \item{untransform}{ Logical. Reverses any parameter link function. This argument only works if \code{type = "link", se.fit = FALSE, deriv = 0}. Setting \code{untransform = TRUE} does not work for all \pkg{VGAM} family functions; only ones where there is a one-to-one correspondence between a simple link function and a simple parameter might work. } \item{\dots}{Arguments passed into \code{predictvlm}. } } \details{ Obtains predictions and optionally estimates standard errors of those predictions from a fitted \code{\link{vglm}} object. This code implements \emph{smart prediction} (see \code{\link{smartpred}}). } \value{ If \code{se.fit = FALSE}, a vector or matrix of predictions. If \code{se.fit = TRUE}, a list with components \item{fitted.values}{Predictions} \item{se.fit}{Estimated standard errors} \item{df}{Degrees of freedom} \item{sigma}{The square root of the dispersion parameter} } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } \note{ Setting \code{se.fit = TRUE} and \code{type = "response"} will generate an error. The arguments \code{type.fitted} and \code{percentiles} are provided in this function to give more convenience than modifying the \code{extra} slot directly. } \section{Warning }{ This function may change in the future. } \seealso{ \code{\link[stats]{predict}}, \code{\link{vglm}}, \code{predictvlm}, \code{\link{smartpred}}, \code{\link{calibrate}}. } \examples{ # Illustrates smart prediction pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ poly(c(scale(let)), 2), propodds, data = pneumo, trace = TRUE, x.arg = FALSE) class(fit) (q0 <- head(predict(fit))) (q1 <- predict(fit, newdata = head(pneumo))) (q2 <- predict(fit, newdata = head(pneumo))) all.equal(q0, q1) # Should be TRUE all.equal(q1, q2) # Should be TRUE head(predict(fit)) head(predict(fit, untransform = TRUE)) p0 <- head(predict(fit, type = "response")) p1 <- head(predict(fit, type = "response", newdata = pneumo)) p2 <- head(predict(fit, type = "response", newdata = pneumo)) p3 <- head(fitted(fit)) all.equal(p0, p1) # Should be TRUE all.equal(p1, p2) # Should be TRUE all.equal(p2, p3) # Should be TRUE predict(fit, type = "terms", se = TRUE) } \keyword{models} \keyword{regression} % untransform = FALSE, extra = object@extra, VGAM/man/dagum.Rd0000644000176200001440000000771613565414527013172 0ustar liggesusers\name{dagum} \alias{dagum} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Dagum Distribution Family Function } \description{ Maximum likelihood estimation of the 3-parameter Dagum distribution. } \usage{ dagum(lscale = "loglink", lshape1.a = "loglink", lshape2.p = "loglink", iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25), gshape2.p = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. % zero = ifelse(lss, -(2:3), -c(1, 3)) \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale, lshape2.p}{ Parameter link functions applied to the (positive) parameters \code{a}, \code{scale}, and \code{p}. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, ishape2.p, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{ishape2.p} is needed to obtain a good estimate for the other parameter. } \item{gscale, gshape1.a, gshape2.p}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 3-parameter Dagum distribution is the 4-parameter generalized beta II distribution with shape parameter \eqn{q=1}. It is known under various other names, such as the Burr III, inverse Burr, beta-K, and 3-parameter kappa distribution. It can be considered a generalized log-logistic distribution. Some distributions which are special cases of the 3-parameter Dagum are the inverse Lomax (\eqn{a=1}), Fisk (\eqn{p=1}), and the inverse paralogistic (\eqn{a=p}). More details can be found in Kleiber and Kotz (2003). The Dagum distribution has a cumulative distribution function \deqn{F(y) = [1 + (y/b)^{-a}]^{-p}}{% F(y) = [1 + (y/b)^(-a)]^(-p)} which leads to a probability density function \deqn{f(y) = ap y^{ap-1} / [b^{ap} \{1 + (y/b)^a\}^{p+1}]}{% f(y) = ap y^(ap-1) / [b^(ap) (1 + (y/b)^a)^(p+1)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and the others are shape parameters. The mean is \deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(1 - 1/a) / \Gamma(p)}{% E(Y) = b gamma(p + 1/a) gamma(1 - 1/a) / gamma(p)} provided \eqn{-ap < 1 < a}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. From Kleiber and Kotz (2003), the MLE is rather sensitive to isolated observations located sufficiently far from the majority of the data. Reliable estimation of the scale parameter require \eqn{n>7000}, while estimates for \eqn{a} and \eqn{p} can be considered unbiased for \eqn{n>2000} or 3000. } \seealso{ \code{\link{Dagum}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ ddata <- data.frame(y = rdagum(n = 3000, scale = exp(2), shape1 = exp(1), shape2 = exp(1))) fit <- vglm(y ~ 1, dagum(lss = FALSE), data = ddata, trace = TRUE) fit <- vglm(y ~ 1, dagum(lss = FALSE, ishape1.a = exp(1)), data = ddata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/binormalcop.Rd0000644000176200001440000000751613565414527014400 0ustar liggesusers\name{binormalcop} \alias{binormalcop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gaussian Copula (Bivariate) Family Function } \description{ Estimate the correlation parameter of the (bivariate) Gaussian copula distribution by maximum likelihood estimation. } \usage{ binormalcop(lrho = "rhobitlink", irho = NULL, imethod = 1, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. % apply.parint = TRUE, \arguments{ \item{lrho, irho, imethod}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more link function choices. } \item{parallel, zero}{ Details at \code{\link{CommonVGAMffArguments}}. If \code{parallel = TRUE} then the constraint is applied to the intercept too. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = \Phi_2 ( \Phi^{-1}(y_1), \Phi^{-1}(y_2); \rho ) }{% P(Y1 <= y1, Y2 <= y2) = Phi_2(\Phi^(-1)(y_1), \Phi^(-1)(y_2); \rho)} for \eqn{-1 < \rho < 1}{-1 < rho < 1}, \eqn{\Phi_2}{Phi_2} is the cumulative distribution function of a standard bivariate normal (see \code{\link{pbinorm}}), and \eqn{\Phi}{Phi} is the cumulative distribution function of a standard univariate normal (see \code{\link[stats]{pnorm}}). The support of the function is the interior of the unit square; however, values of 0 and/or 1 are not allowed. The marginal distributions are the standard uniform distributions. When \eqn{\rho = 0}{rho=0} the random variables are independent. This \pkg{VGAM} family function can handle multiple responses, for example, a six-column matrix where the first 2 columns is the first out of three responses, the next 2 columns being the next response, etc. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Schepsmeier, U. and Stober, J. (2013) Derivatives and Fisher information of bivariate copulas. \emph{Statistical Papers}. } \author{ T. W. Yee } \note{ The response matrix must have a multiple of two-columns. Currently, the fitted value is a matrix with the same number of columns and values equal to 0.5. This is because each marginal distribution corresponds to a standard uniform distribution. This \pkg{VGAM} family function is fragile; each response must be in the interior of the unit square. Setting \code{crit = "coef"} is sometimes a good idea because inaccuracies in \code{\link{pbinorm}} might mean unnecessary half-stepping will occur near the solution. } \seealso{ \code{\link{rbinormcop}}, \code{\link[stats]{pnorm}}, \code{\link{kendall.tau}}. } \examples{ nn <- 1000 ymat <- rbinormcop(n = nn, rho = rhobitlink(-0.9, inverse = TRUE)) bdata <- data.frame(y1 = ymat[, 1], y2 = ymat[, 2], y3 = ymat[, 1], y4 = ymat[, 2], x2 = runif(nn)) summary(bdata) \dontrun{ plot(ymat, col = "blue") } fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1, # 2 responses, e.g., (y1,y2) is the first fam = binormalcop, crit = "coef", # Sometimes a good idea data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) head(fitted(fit1)) summary(fit1) # Another example; rho is a linear function of x2 bdata <- transform(bdata, rho = -0.5 + x2) ymat <- rbinormcop(n = nn, rho = with(bdata, rho)) bdata <- transform(bdata, y5 = ymat[, 1], y6 = ymat[, 2]) fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata, binormalcop(lrho = "identitylink"), trace = TRUE) \dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) } } \keyword{models} \keyword{regression} % for real \eqn{\rho}{rho} in (-1,1). VGAM/man/dirmultinomial.Rd0000644000176200001440000001566013565414527015123 0ustar liggesusers\name{dirmultinomial} \alias{dirmultinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{Fitting a Dirichlet-Multinomial Distribution } \description{ Fits a Dirichlet-multinomial distribution to a matrix response. } \usage{ dirmultinomial(lphi = "logitlink", iphi = 0.10, parallel = FALSE, zero = "M") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lphi}{ Link function applied to the \eqn{\phi}{phi} parameter, which lies in the open unit interval \eqn{(0,1)}. See \code{\link{Links}} for more choices. } \item{iphi}{ Numeric. Initial value for \eqn{\phi}{phi}. Must be in the open unit interval \eqn{(0,1)}. If a failure to converge occurs then try assigning this argument a different value. } \item{parallel}{ A logical (formula not allowed here) indicating whether the probabilities \eqn{\pi_1,\ldots,\pi_{M-1}}{pi_1,\ldots,pi_{M-1}} are to be equal via equal coefficients. Note \eqn{\pi_M}{pi_M} will generally be different from the other probabilities. Setting \code{parallel = TRUE} will only work if you also set \code{zero = NULL} because of interference between these arguments (with respect to the intercept term). } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \eqn{\{1,2,\ldots,M\}}. If the character \code{"M"} then this means the numerical value \eqn{M}, which corresponds to linear/additive predictor associated with \eqn{\phi}{phi}. Setting \code{zero = NULL} means none of the values from the set \eqn{\{1,2,\ldots,M\}}. } } \details{ The Dirichlet-multinomial distribution arises from a multinomial distribution where the probability parameters are not constant but are generated from a multivariate distribution called the Dirichlet distribution. The Dirichlet-multinomial distribution has probability function \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) = {N_{*} \choose {y_1,\ldots,y_M}} \frac{ \prod_{j=1}^{M} \prod_{r=1}^{y_{j}} (\pi_j (1-\phi) + (r-1)\phi)}{ \prod_{r=1}^{N_{*}} (1-\phi + (r-1)\phi)}}{% P(Y_1=y_1,\ldots,Y_M=y_M) = C_{y_1,\ldots,y_M}^{N_{*}} prod_{j=1}^{M} prod_{r=1}^{y_{j}} (pi_j (1-phi) + (r-1)phi) / prod_{r=1}^{N_{*}} (1-phi + (r-1)phi)} where \eqn{\phi}{phi} is the \emph{over-dispersion} parameter and \eqn{N_{*} = y_1+\cdots+y_M}{N_* = y_1+\cdots+y_M}. Here, \eqn{a \choose b}{C_b^a} means ``\eqn{a} choose \eqn{b}'' and refers to combinations (see \code{\link[base]{choose}}). The above formula applies to each row of the matrix response. In this \pkg{VGAM} family function the first \eqn{M-1} linear/additive predictors correspond to the first \eqn{M-1} probabilities via \deqn{\eta_j = \log(P[Y=j]/ P[Y=M]) = \log(\pi_j/\pi_M)}{% eta_j = log(P[Y=j]/ P[Y=M]) = log(pi_j/pi_M)} where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor (\eqn{\eta_M=0}{eta_M=0} by definition for \eqn{P[Y=M]} but not for \eqn{\phi}{phi}) and \eqn{j=1,\ldots,M-1}. The \eqn{M}th linear/additive predictor corresponds to \code{lphi} applied to \eqn{\phi}{phi}. Note that \eqn{E(Y_j) = N_* \pi_j}{E(Y_j) = N_* pi_j} but the probabilities (returned as the fitted values) \eqn{\pi_j}{pi_j} are bundled together as a \eqn{M}-column matrix. The quantities \eqn{N_*} are returned as the prior weights. The beta-binomial distribution is a special case of the Dirichlet-multinomial distribution when \eqn{M=2}; see \code{\link{betabinomial}}. It is easy to show that the first shape parameter of the beta distribution is \eqn{shape1=\pi(1/\phi-1)}{shape1=pi*(1/phi-1)} and the second shape parameter is \eqn{shape2=(1-\pi)(1/\phi-1)}{shape2=(1-pi)*(1/phi-1)}. Also, \eqn{\phi=1/(1+shape1+shape2)}{phi=1/(1+shape1+shape2)}, which is known as the \emph{intra-cluster correlation} coefficient. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. If the model is an intercept-only model then \code{@misc} (which is a list) has a component called \code{shape} which is a vector with the \eqn{M} values \eqn{\pi_j(1/\phi-1)}{pi_j * (1/phi-1)}. % zz not sure: These are the shape parameters of the underlying % Dirichlet distribution. } \references{ Paul, S. R., Balasooriya, U. and Banerjee, T. (2005) Fisher information matrix of the Dirichlet-multinomial distribution. \emph{Biometrical Journal}, \bold{47}, 230--236. Tvedebrink, T. (2010) Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetics. \emph{Theoretical Population Biology}, \bold{78}, 200--210. Yu, P. and Shaw, C. A. (2014). An Efficient Algorithm for Accurate Computation of the Dirichlet-Multinomial Log-Likelihood Function. \emph{Bioinformatics}, \bold{30}, 1547--54. % url {doi:10.1093/bioinformatics/btu079}. % number = {11}, % first published online February 11, 2014 } \author{ Thomas W. Yee } \section{Warning }{ This \pkg{VGAM} family function is prone to numerical problems, especially when there are covariates. } \note{ The response can be a matrix of non-negative integers, or else a matrix of sample proportions and the total number of counts in each row specified using the \code{weights} argument. This dual input option is similar to \code{\link{multinomial}}. To fit a `parallel' model with the \eqn{\phi}{phi} parameter being an intercept-only you will need to use the \code{constraints} argument. Currently, Fisher scoring is implemented. To compute the expected information matrix a \code{for} loop is used; this may be very slow when the counts are large. Additionally, convergence may be slower than usual due to round-off error when computing the expected information matrices. } \seealso{ \code{\link{dirmul.old}}, \code{\link{betabinomial}}, \code{\link{betabinomialff}}, \code{\link{dirichlet}}, \code{\link{multinomial}}. } \examples{ nn <- 5; M <- 4; set.seed(1) ydata <- data.frame(round(matrix(runif(nn * M, max = 100), nn, M))) # Integer counts colnames(ydata) <- paste("y", 1:M, sep = "") fit <- vglm(cbind(y1, y2, y3, y4) ~ 1, dirmultinomial, data = ydata, trace = TRUE) head(fitted(fit)) depvar(fit) # Sample proportions weights(fit, type = "prior", matrix = FALSE) # Total counts per row \dontrun{ ydata <- transform(ydata, x2 = runif(nn)) fit <- vglm(cbind(y1, y2, y3, y4) ~ x2, dirmultinomial, data = ydata, trace = TRUE) Coef(fit) coef(fit, matrix = TRUE) (sfit <- summary(fit)) vcov(sfit) } } \keyword{models} \keyword{regression} % zz \eqn{\alpha_j = P[Y=j] \times (1/\phi - 1)}{alpha_j = P[Y=j] * % (1/phi - 1)} are the shape parameters, % for \eqn{j=1,\ldots,M}. % Currently, initial values can be improved upon. % \dontrun{ # This does not work: VGAM/man/topple.Rd0000644000176200001440000000405413565414527013370 0ustar liggesusers\name{topple} \alias{topple} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Topp-Leone Distribution Family Function } \description{ Estimating the parameter of the Topp-Leone distribution by maximum likelihood estimation. } \usage{ topple(lshape = "logitlink", zero = NULL, gshape = ppoints(8), parallel = FALSE, type.fitted = c("mean", "percentiles", "Qlink"), percentiles = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, gshape}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{zero, parallel}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. Using \code{"Qlink"} is for quantile-links in \pkg{VGAMextra}. } } \details{ The Topple distribution has a probability density function that can be written \deqn{f(y;s) = 2 s (1 - y) [y (2-y)]^{s-1}}{% f(y;s) = 2 * s * (1 - y) * (y * (2-y))^(s-1)} for \eqn{0j|Y \geq j])}{eta_j = logit(P[Y>j|Y>=j])} for \eqn{j=1,\dots,M}. If \code{reverse} is \code{TRUE}, then \eqn{\eta_j = logit(P[Y=j])}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. } \author{ Thomas W. Yee } \note{ The response should be either a matrix of counts (with row sums that are all positive), or a factor. In both cases, the \code{y} slot returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts. For a nominal (unordered) factor response, the multinomial logit model (\code{\link{multinomial}}) is more appropriate. Here is an example of the usage of the \code{parallel} argument. If there are covariates \code{x1}, \code{x2} and \code{x3}, then \code{parallel = TRUE ~ x1 + x2 -1} and \code{parallel = FALSE ~ x3} are equivalent. This would constrain the regression coefficients for \code{x1} and \code{x2} to be equal; those of the intercepts and \code{x3} would be different. } \section{Warning }{ No check is made to verify that the response is ordinal if the response is a matrix; see \code{\link[base:factor]{ordered}}. } \seealso{ \code{\link{sratio}}, \code{\link{acat}}, \code{\link{cumulative}}, \code{\link{multinomial}}, \code{\link{margeff}}, \code{\link{pneumo}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, cratio(parallel = TRUE), data = pneumo)) coef(fit, matrix = TRUE) constraints(fit) predict(fit) predict(fit, untransform = TRUE) margeff(fit) } \keyword{models} \keyword{regression} %Simonoff, J. S. (2003) %\emph{Analyzing Categorical Data}, %New York: Springer-Verlag. VGAM/man/inv.binomial.Rd0000644000176200001440000000663713565414527014463 0ustar liggesusers\name{inv.binomial} \alias{inv.binomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{Inverse Binomial Distribution Family Function} \description{ Estimates the two parameters of an inverse binomial distribution by maximum likelihood estimation. } \usage{ inv.binomial(lrho = extlogitlink(min = 0.5, max = 1), llambda = "loglink", irho = NULL, ilambda = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lrho, llambda}{ Link function for the \eqn{\rho}{rho} and \eqn{\lambda}{lambda} parameters. See \code{\link{Links}} for more choices. } \item{irho, ilambda}{ Numeric. Optional initial values for \eqn{\rho}{rho} and \eqn{\lambda}{lambda}. } \item{zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The inverse binomial distribution of Yanagimoto (1989) has density function \deqn{f(y;\rho,\lambda) = \frac{ \lambda \,\Gamma(2y+\lambda) }{\Gamma(y+1) \, \Gamma(y+\lambda+1) } \{ \rho(1-\rho) \}^y \rho^{\lambda}}{% f(y;rho,lambda) = (lambda * Gamma(2y+lambda)) * [rho*(1-rho)]^y * rho^lambda / (Gamma(y+1) * Gamma(y+lambda+1))} where \eqn{y=0,1,2,\ldots}{y=0,1,2,...} and \eqn{\frac12 < \rho < 1}{0.5 < rho < 1}, and \eqn{\lambda > 0}{lambda > 0}. The first two moments exist for \eqn{\rho>\frac12}{rho>0.5}; then the mean is \eqn{\lambda (1-\rho) /(2 \rho-1)}{lambda*(1-rho)/(2*rho-1)} (returned as the fitted values) and the variance is \eqn{\lambda \rho (1-\rho) /(2 \rho-1)^3}{lambda*rho*(1-rho)/(2*rho-1)^3}. The inverse binomial distribution is a special case of the generalized negative binomial distribution of Jain and Consul (1971). It holds that \eqn{Var(Y) > E(Y)} so that the inverse binomial distribution is overdispersed compared with the Poisson distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Yanagimoto, T. (1989) The inverse binomial distribution as a statistical model. \emph{Communications in Statistics: Theory and Methods}, \bold{18}, 3625--3633. Jain, G. C. and Consul, P. C. (1971) A generalized negative binomial distribution. \emph{SIAM Journal on Applied Mathematics}, \bold{21}, 501--513. Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall } \author{ T. W. Yee } \note{ This \pkg{VGAM} family function only works reasonably well with intercept-only models. Good initial values are needed; if convergence failure occurs use \code{irho} and/or \code{ilambda}. Some elements of the working weight matrices use the expected information matrix while other elements use the observed information matrix. Yet to do: using the mean and the reciprocal of \eqn{\lambda}{lambda} results in an EIM that is diagonal. } \seealso{ \code{\link{negbinomial}}, \code{\link{poissonff}}. } \examples{ idata <- data.frame(y = rnbinom(n <- 1000, mu = exp(3), size = exp(1))) fit <- vglm(y ~ 1, inv.binomial, data = idata, trace = TRUE) with(idata, c(mean(y), head(fitted(fit), 1))) summary(fit) coef(fit, matrix = TRUE) Coef(fit) sum(weights(fit)) # Sum of the prior weights sum(weights(fit, type = "work")) # Sum of the working weights } \keyword{models} \keyword{regression} %fit <- vglm(y ~ 1, inv.binomial(ilambda = 1), trace = TRUE, crit = "c", checkwz = FALSE) VGAM/man/score.stat.Rd0000644000176200001440000000545013565414527014153 0ustar liggesusers\name{score.stat} \alias{score.stat} \alias{score.stat.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Rao's Score Test Statistics Evaluated at the Null Values } \description{ Generic function that computes Rao's score test statistics evaluated at the null values (consequently they do not suffer from the Hauck-Donner effect). } \usage{ score.stat(object, ...) score.stat.vlm(object, values0 = 0, subset = NULL, omit1s = TRUE, all.out = FALSE, iterate = TRUE, trace = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object, values0, subset}{ Same as in \code{\link{wald.stat.vlm}}. } \item{omit1s, all.out, iterate}{ Same as in \code{\link{wald.stat.vlm}}. } \item{trace}{ Same as in \code{\link{wald.stat.vlm}}. } \item{\dots}{ Ignored for now. } } \details{ The (Rao) \emph{score test} (also known as the \emph{Lagrange multiplier test} in econometrics) is a third general method for hypothesis testing under a likelihood-based framework (the others are the likelihood ratio test and Wald test; see \code{\link{lrt.stat}} and \code{\link{wald.stat}}). Asymptotically, the three tests are equivalent. The Wald test is not invariant to parameterization, and the usual Wald test statistics computed at the estimates make it vulnerable to the Hauck-Donner effect (HDE; see \code{\link{hdeff}}). This function is similar to \code{\link{wald.stat}} in that one coefficient is set to 0 (by default) and the \emph{other} coefficients are iterated by IRLS to get their MLE subject to this constraint. } \value{ By default the signed square root of the Rao score statistics are returned. If \code{all.out = TRUE} then a list is returned with the following components: \code{score.stat} the score statistic, \code{SE0} the standard error of that coefficient, \code{values0} the null values. Approximately, the default score statistics output are standard normal random variates if each null hypothesis is true. } %\references{ % %} \author{ Thomas W. Yee } %\note{ %} \section{Warning }{ See \code{\link{wald.stat.vlm}}. } \seealso{ \code{\link{wald.stat}}, \code{\link{lrt.stat}}, \code{\link{summaryvglm}}, \code{\link[stats]{summary.glm}}, \code{\link{anova.vglm}}, \code{\link{vglm}}, \code{\link{hdeff}}. % \code{\link{anova.vglm}}, } \examples{ set.seed(1) pneumo <- transform(pneumo, let = log(exposure.time), x3 = rnorm(nrow(pneumo))) (pfit <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, data = pneumo)) score.stat(pfit) # No HDE here; should be similar to the next line: coef(summary(pfit))[, "z value"] # Wald statistics computed at the MLE summary(pfit, score0 = TRUE) } \keyword{models} \keyword{regression} VGAM/man/betageomUC.Rd0000644000176200001440000000404313565414527014076 0ustar liggesusers\name{Betageom} \alias{Betageom} \alias{dbetageom} \alias{pbetageom} %\alias{qbetageom} \alias{rbetageom} \title{The Beta-Geometric Distribution} \description{ Density, distribution function, and random generation for the beta-geometric distribution. } \usage{ dbetageom(x, shape1, shape2, log = FALSE) pbetageom(q, shape1, shape2, log.p = FALSE) rbetageom(n, shape1, shape2) } \arguments{ \item{x, q}{vector of quantiles. } % \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{shape1, shape2}{ the two (positive) shape parameters of the standard beta distribution. They are called \code{a} and \code{b} in \code{\link[base:Special]{beta}} respectively. } \item{log, log.p}{ Logical. If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}. } } \value{ \code{dbetageom} gives the density, \code{pbetageom} gives the distribution function, and \code{rbetageom} generates random deviates. % \code{qbetageom} gives the quantile function, and } \author{ T. W. Yee } \details{ The beta-geometric distribution is a geometric distribution whose probability of success is not a constant but it is generated from a beta distribution with parameters \code{shape1} and \code{shape2}. Note that the mean of this beta distribution is \code{shape1/(shape1+shape2)}, which therefore is the mean of the probability of success. % See zz code{link{betageomzz}}, the \pkg{VGAM} family function % for estimating the parameters, % for the formula of the probability density function and other details. } \note{ \code{pbetageom} can be particularly slow. } \seealso{ \code{\link{geometric}}, \code{\link{betaff}}, \code{\link[stats:Beta]{Beta}}. } \examples{ \dontrun{ shape1 <- 1; shape2 <- 2; y <- 0:30 proby <- dbetageom(y, shape1, shape2, log = FALSE) plot(y, proby, type = "h", col = "blue", ylab = "P[Y=y]", main = paste( "Y ~ Beta-geometric(shape1=", shape1,", shape2=", shape2, ")", sep = "")) sum(proby) } } \keyword{distribution} VGAM/man/kumar.Rd0000644000176200001440000000575613565414527013216 0ustar liggesusers\name{kumar} \alias{kumar} %- Also NEED an '\alias' for EACH other topic documented here. \title{Kumaraswamy Regression Family Function} \description{ Estimates the two parameters of the Kumaraswamy distribution by maximum likelihood estimation. } \usage{ kumar(lshape1 = "loglink", lshape2 = "loglink", ishape1 = NULL, ishape2 = NULL, gshape1 = exp(2*ppoints(5) - 1), tol12 = 1.0e-4, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2}{ Link function for the two positive shape parameters, respectively, called \eqn{a} and \eqn{b} below. See \code{\link{Links}} for more choices. } % \item{eshape1, eshape2}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % eshape1 = list(), eshape2 = list(), % } \item{ishape1, ishape2}{ Numeric. Optional initial values for the two positive shape parameters. } \item{tol12}{ Numeric and positive. Tolerance for testing whether the second shape parameter is either 1 or 2. If so then the working weights need to handle these singularities. } \item{gshape1}{ Values for a grid search for the first shape parameter. See \code{\link{CommonVGAMffArguments}} for more information. % Lower and upper limits for a grid search for the first shape parameter. } \item{zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The Kumaraswamy distribution has density function \deqn{f(y;a = shape1,b = shape2) = a b y^{a-1} (1-y^{a})^{b-1}}{% a*b*y^(a-1)*(1-y^a)^(b-1)} where \eqn{0 < y < 1} and the two shape parameters, \eqn{a} and \eqn{b}, are positive. The mean is \eqn{b \times Beta(1+1/a,b)}{b * Beta(1+1/a,b)} (returned as the fitted values) and the variance is \eqn{b \times Beta(1+2/a,b) - (b \times Beta(1+1/a,b))^2}{b * Beta(1+2/a,b) - (b * Beta(1+1/a,b))^2}. Applications of the Kumaraswamy distribution include the storage volume of a water reservoir. Fisher scoring is implemented. Handles multiple responses (matrix input). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Kumaraswamy, P. (1980). A generalized probability density function for double-bounded random processes. \emph{Journal of Hydrology}, \bold{46}, 79--88. Jones, M. C. (2009). Kumaraswamy's distribution: A beta-type distribution with some tractability advantages. \emph{Statistical Methodology}, \bold{6}, 70--81. } \author{ T. W. Yee } %\note{ % %} \seealso{ \code{\link{dkumar}}, \code{\link{betaff}}, \code{\link{simulate.vlm}}. } \examples{ shape1 <- exp(1); shape2 <- exp(2) kdata <- data.frame(y = rkumar(n = 1000, shape1, shape2)) fit <- vglm(y ~ 1, kumar, data = kdata, trace = TRUE) c(with(kdata, mean(y)), head(fitted(fit), 1)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/chest.nz.Rd0000644000176200001440000000254513565414527013624 0ustar liggesusers\name{chest.nz} \alias{chest.nz} \docType{data} \title{ Chest Pain in NZ Adults Data} \description{ Presence/absence of chest pain in 10186 New Zealand adults. } \usage{data(chest.nz)} \format{ A data frame with 73 rows and the following 5 variables. \describe{ \item{age}{a numeric vector; age (years).} \item{nolnor}{a numeric vector of counts; no pain on LHS or RHS.} \item{nolr}{a numeric vector of counts; no pain on LHS but pain on RHS.} \item{lnor}{a numeric vector of counts; no pain on RHS but pain on LHS.} \item{lr}{a numeric vector of counts; pain on LHS and RHS of chest.} } } \details{ Each adult was asked their age and whether they experienced any pain or discomfort in their chest over the last six months. If yes, they indicated whether it was on their LHS and/or RHS of their chest. } \source{ MacMahon, S., Norton, R., Jackson, R., Mackie, M. J., Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995) Fletcher Challenge-University of Auckland Heart & Health Study: design and baseline findings. \emph{New Zealand Medical Journal}, \bold{108}, 499--502. } \examples{ \dontrun{ fit <- vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)), binom2.or(exchan = TRUE, zero = NULL), data = chest.nz) coef(fit, matrix = TRUE) } \dontrun{ plot(fit, which.cf = 2, se = TRUE) } } \keyword{datasets} VGAM/man/betabinomialff.Rd0000644000176200001440000002042713565414527015031 0ustar liggesusers\name{betabinomialff} \alias{betabinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Beta-binomial Distribution Family Function } \description{ Fits a beta-binomial distribution by maximum likelihood estimation. The two parameters here are the shape parameters of the underlying beta distribution. } \usage{ betabinomialff(lshape1 = "loglink", lshape2 = "loglink", ishape1 = 1, ishape2 = NULL, imethod = 1, ishrinkage = 0.95, nsimEIM = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2}{ Link functions for the two (positive) shape parameters of the beta distribution. See \code{\link{Links}} for more choices. } \item{ishape1, ishape2}{ Initial value for the shape parameters. The first must be positive, and is recyled to the necessary length. The second is optional. If a failure to converge occurs, try assigning a different value to \code{ishape1} and/or using \code{ishape2}. } \item{zero}{ Can be an integer specifying which linear/additive predictor is to be modelled as an intercept only. If assigned, the single value should be either \code{1} or \code{2}. The default is to model both shape parameters as functions of the covariates. If a failure to converge occurs, try \code{zero = 2}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{ishrinkage, nsimEIM, imethod}{ See \code{\link{CommonVGAMffArguments}} for more information. The argument \code{ishrinkage} is used only if \code{imethod = 2}. Using the argument \code{nsimEIM} may offer large advantages for large values of \eqn{N} and/or large data sets. } } \details{ There are several parameterizations of the beta-binomial distribution. This family function directly models the two shape parameters of the associated beta distribution rather than the probability of success (however, see \bold{Note} below). The model can be written \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)} where \eqn{P} has a beta distribution with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Here, \eqn{N} is the number of trials (e.g., litter size), \eqn{T=NY} is the number of successes, and \eqn{p} is the probability of a success (e.g., a malformation). That is, \eqn{Y} is the \emph{proportion} of successes. Like \code{\link{binomialff}}, the fitted values are the estimated probability of success (i.e., \eqn{E[Y]} and not \eqn{E[T]}) and the prior weights \eqn{N} are attached separately on the object in a slot. The probability function is \deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)} {B(\alpha, \beta)}}{% P(T=t) = choose(N,t) B(alpha+t, beta+N-t) / B(alpha, beta)} where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the beta function with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}. Recall \eqn{Y = T/N} is the real response being modelled. The default model is \eqn{\eta_1 = \log(\alpha)}{eta1 = log(alpha)} and \eqn{\eta_2 = \log(\beta)}{eta2 = log(beta)} because both parameters are positive. The mean (of \eqn{Y}) is \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)} and the variance (of \eqn{Y}) is \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}. Here, the correlation \eqn{\rho}{rho} is given by \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)} and is the correlation between the \eqn{N} individuals within a litter. A \emph{litter effect} is typically reflected by a positive value of \eqn{\rho}{rho}. It is known as the \emph{over-dispersion parameter}. This family function uses Fisher scoring. The two diagonal elements of the second-order expected derivatives with respect to \eqn{\alpha}{alpha} and \eqn{\beta}{beta} are computed numerically, which may fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta}, \eqn{N} or else take a long time. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. Suppose \code{fit} is a fitted beta-binomial model. Then \code{fit@y} (better: \code{depvar(fit)}) contains the sample proportions \eqn{y}, \code{fitted(fit)} returns estimates of \eqn{E(Y)}, and \code{weights(fit, type = "prior")} returns the number of trials \eqn{N}. } \references{ Moore, D. F. and Tsiatis, A. (1991) Robust estimation of the variance in moment methods for extra-binomial and extra-Poisson variation. \emph{Biometrics}, \bold{47}, 383--401. Prentice, R. L. (1986) Binary regression using an extended beta-binomial distribution, with discussion of correlation induced by covariate measurement errors. \emph{Journal of the American Statistical Association}, \bold{81}, 321--327. } \author{ T. W. Yee } \note{ This function processes the input in the same way as \code{\link{binomialff}}. But it does not handle the case \eqn{N=1} very well because there are two parameters to estimate, not one, for each row of the input. Cases where \eqn{N=1} can be omitted via the \code{subset} argument of \code{\link{vglm}}. Although the two linear/additive predictors given above are in terms of \eqn{\alpha}{alpha} and \eqn{\beta}{beta}, basic algebra shows that the default amounts to fitting a logit link to the probability of success; subtracting the second linear/additive predictor from the first gives that logistic regression linear/additive predictor. That is, \eqn{logit(p) = \eta_1 - \eta_2}{logit(p) = eta1 - eta2}. This is illustated in one of the examples below. The \emph{extended} beta-binomial distribution of Prentice (1986) is currently not implemented in the \pkg{VGAM} package as it has range-restrictions for the correlation parameter that are currently too difficult to handle in this package. } \section{Warning }{ This family function is prone to numerical difficulties due to the expected information matrices not being positive-definite or ill-conditioned over some regions of the parameter space. If problems occur try setting \code{ishape1} to be some other positive value, using \code{ishape2} and/or setting \code{zero = 2}. This family function may be renamed in the future. See the warnings in \code{\link{betabinomial}}. } \seealso{ \code{\link{betabinomial}}, \code{\link{Betabinom}}, \code{\link{binomialff}}, \code{\link{betaff}}, \code{\link{dirmultinomial}}, \code{\link{lirat}}, \code{\link{simulate.vlm}}. } \examples{ # Example 1 N <- 10; s1 <- exp(1); s2 <- exp(2) y <- rbetabinom.ab(n = 100, size = N, shape1 = s1, shape2 = s2) fit <- vglm(cbind(y, N-y) ~ 1, betabinomialff, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(fit@misc$rho) # The correlation parameter head(cbind(depvar(fit), weights(fit, type = "prior"))) # Example 2 fit <- vglm(cbind(R, N-R) ~ 1, betabinomialff, data = lirat, trace = TRUE, subset = N > 1) coef(fit, matrix = TRUE) Coef(fit) fit@misc$rho # The correlation parameter t(fitted(fit)) t(depvar(fit)) t(weights(fit, type = "prior")) # A "loglink" link for the 2 shape parameters is a logistic regression: all.equal(c(fitted(fit)), as.vector(logitlink(predict(fit)[, 1] - predict(fit)[, 2], inverse = TRUE))) # Example 3, which is more complicated lirat <- transform(lirat, fgrp = factor(grp)) summary(lirat) # Only 5 litters in group 3 fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomialff(zero = 2), data = lirat, trace = TRUE, subset = N > 1) coef(fit2, matrix = TRUE) coef(fit2, matrix = TRUE)[, 1] - coef(fit2, matrix = TRUE)[, 2] # logitlink(p) \dontrun{ with(lirat, plot(hb[N > 1], fit2@misc$rho, xlab = "Hemoglobin", ylab = "Estimated rho", pch = as.character(grp[N > 1]), col = grp[N > 1])) } \dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991) with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, las = 1, xlab = "Hemoglobin level", ylab = "Proportion Dead", main = "Fitted values (lines)")) smalldf <- with(lirat, lirat[N > 1, ]) for (gp in 1:4) { xx <- with(smalldf, hb[grp == gp]) yy <- with(smalldf, fitted(fit2)[grp == gp]) ooo <- order(xx) lines(xx[ooo], yy[ooo], col = gp) } } } \keyword{models} \keyword{regression} VGAM/man/genpoisson.Rd0000644000176200001440000001142413565414527014250 0ustar liggesusers\name{genpoisson} \alias{genpoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Poisson distribution } \description{ Estimation of the two-parameter generalized Poisson distribution. } \usage{ genpoisson(llambda = "rhobitlink", ltheta = "loglink", ilambda = NULL, itheta = NULL, use.approx = TRUE, imethod = 1, ishrinkage = 0.95, zero = "lambda") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llambda, ltheta}{ Parameter link functions for \eqn{\lambda} and \eqn{\theta}. See \code{\link{Links}} for more choices. The \eqn{\lambda} parameter lies at least within the interval \eqn{[-1,1]}; see below for more details, and an alternative link is \code{\link{rhobitlink}}. The \eqn{\theta} parameter is positive, therefore the default is the log link. } \item{ilambda, itheta}{ Optional initial values for \eqn{\lambda} and \eqn{\theta}. The default is to choose values internally. } \item{use.approx}{ Logical. If \code{TRUE} then an approximation to the expected information matrix is used, otherwise Newton-Raphson is used. } \item{imethod}{ An integer with value \code{1} or \code{2} or \code{3} which specifies the initialization method for the parameters. If failure to converge occurs try another value and/or else specify a value for \code{ilambda} and/or \code{itheta}. } \item{ishrinkage, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } % \item{zero}{ % An integer vector, containing the value 1 or 2. % If so, \eqn{\lambda} or \eqn{\theta} respectively % are modelled as an intercept only. % If set to \code{NULL} then both linear/additive predictors are modelled % as functions of the explanatory variables. % } } \details{ The generalized Poisson distribution has density \deqn{f(y) = \theta(\theta+\lambda y)^{y-1} \exp(-\theta-\lambda y) / y!}{ f(y) = \theta(\theta+\lambda * y)^(y-1) * exp(-\theta-\lambda * y) / y!} for \eqn{\theta > 0} and \eqn{y = 0,1,2,\ldots}. Now \eqn{\max(-1,-\theta/m) \leq \lambda \leq 1}{ max(-1,-\theta/m) \le lambda \le 1} where \eqn{m (\geq 4)}{m (\ge 4)} is the greatest positive integer satisfying \eqn{\theta + m\lambda > 0} when \eqn{\lambda < 0} [and then \eqn{P(Y=y) = 0} for \eqn{y > m}]. Note the complicated support for this distribution means, for some data sets, the default link for \code{llambda} will not always work, and some tinkering may be required to get it running. As Consul and Famoye (2006) state on p.165, the lower limits on \eqn{\lambda} and \eqn{m \ge 4}{m >= 4} are imposed to ensure that there are at least 5 classes with nonzero probability when \eqn{\lambda} is negative. An ordinary Poisson distribution corresponds to \eqn{\lambda = 0}{lambda = 0}. The mean (returned as the fitted values) is \eqn{E(Y) = \theta / (1 - \lambda)} and the variance is \eqn{\theta / (1 - \lambda)^3}. For more information see Consul and Famoye (2006) for a summary and Consul (1989) for full details. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Consul, P. C. and Famoye, F. (2006) \emph{Lagrangian Probability Distributions}, Boston, USA: Birkhauser. Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall Consul, P. C. (1989) \emph{Generalized Poisson Distributions: Properties and Applications}. New York, USA: Marcel Dekker. } \section{Warning }{ Monitor convergence! This family function is fragile. Don't get confused because \code{theta} (and not \code{lambda}) here really matches more closely with \code{lambda} of \code{\link[stats:Poisson]{dpois}}. } \author{ T. W. Yee } \note{ This family function handles multiple responses. This distribution is potentially useful for dispersion modelling. Convergence problems may occur when \code{lambda} is very close to 0 or 1. If a failure occurs then you might want to try something like \code{llambda = extlogitlink(min = -0.9, max = 1)} to handle the LHS complicated constraint, and if that doesn't work, try \code{llambda = extlogitlink(min = -0.8, max = 1)}, etc. } \seealso{ \code{\link{poissonff}}, \code{\link[stats:Poisson]{dpois}}. \code{\link{dgenpois}}, \code{\link{rhobitlink}}, \code{\link{extlogitlink}}. } \examples{ gdata <- data.frame(x2 = runif(nn <- 200)) gdata <- transform(gdata, y1 = rpois(nn, exp(2 - x2))) # Poisson data fit <- vglm(y1 ~ x2, genpoisson, data = gdata, trace = TRUE) coef(fit, matrix = TRUE) summary(fit) } \keyword{models} \keyword{regression} % yettodo: see csda 2009, 53(9): 3478--3489. %{% f(y) = theta*(theta+lambda*y)^(y-1) exp(-theta-lambda*y) / y!} VGAM/man/expint3.Rd0000644000176200001440000000516713565414527013465 0ustar liggesusers\name{expint} \alias{expint} \alias{expexpint} \alias{expint.E1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Exponential Integral and Variants } \description{ Computes the exponential integral \eqn{Ei(x)} for real values, as well as \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} and \eqn{E_1(x)} and their derivatives (up to the 3rd derivative). } \usage{ expint(x, deriv = 0) expexpint(x, deriv = 0) expint.E1(x, deriv = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Numeric. Ideally a vector of positive reals. } \item{deriv}{Integer. Either 0, 1, 2 or 3. } } \details{ The exponential integral \eqn{Ei(x)} function is the integral of \eqn{\exp(t) / t}{exp(t) / t} from 0 to \eqn{x}, for positive real \eqn{x}. The function \eqn{E_1(x)} is the integral of \eqn{\exp(-t) / t}{exp(-t) / t} from \eqn{x} to infinity, for positive real \eqn{x}. } \value{ Function \code{expint(x, deriv = n)} returns the \eqn{n}th derivative of \eqn{Ei(x)} (up to the 3rd), function \code{expexpint(x, deriv = n)} returns the \eqn{n}th derivative of \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} (up to the 3rd), function \code{expint.E1(x, deriv = n)} returns the \eqn{n}th derivative of \eqn{E_1(x)} (up to the 3rd). } \references{ \url{http://www.netlib.org/specfun/ei}. } \author{ T. W. Yee has simply written a small wrapper function to call the NETLIB FORTRAN code. Xiangjie Xue modified the functions to calculate derivatives. Higher derivatives can actually be calculated---please let me know if you need it. } \section{Warning }{ These functions have not been tested thoroughly. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[base:log]{log}}, \code{\link[base:log]{exp}}. There is also a package called \pkg{expint}. } \examples{ \dontrun{ par(mfrow = c(2, 2)) curve(expint, 0.01, 2, xlim = c(0, 2), ylim = c(-3, 5), las = 1, col = "orange") abline(v = (-3):5, h = (-4):5, lwd = 2, lty = "dotted", col = "gray") abline(h = 0, v = 0, lty = "dashed", col = "blue") curve(expexpint, 0.01, 2, xlim = c(0, 2), ylim = c(-3, 2), las = 1, col = "orange") abline(v = (-3):2, h = (-4):5, lwd = 2, lty = "dotted", col = "gray") abline(h = 0, v = 0, lty = "dashed", col = "blue") curve(expint.E1, 0.01, 2, xlim = c(0, 2), ylim = c(0, 5), las = 1, col = "orange") abline(v = (-3):2, h = (-4):5, lwd = 2, lty = "dotted", col = "gray") abline(h = 0, v = 0, lty = "dashed", col = "blue") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} VGAM/man/coefvlm.Rd0000644000176200001440000000442213565414527013517 0ustar liggesusers\name{coefvlm} \alias{coefvlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract Model Coefficients } \description{ Extracts the estimated coefficients from VLM objects such as VGLMs. } \usage{ coefvlm(object, matrix.out = FALSE, label = TRUE, colon = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the extraction of coefficients is meaningful. This will usually be a \code{\link{vglm}} object. } \item{matrix.out}{ Logical. If \code{TRUE} then a matrix is returned. The explanatory variables are the rows. The linear/additive predictors are the columns. The constraint matrices are used to compute this matrix. } \item{label}{ Logical. If \code{FALSE} then the \code{names} of the vector of coefficients are set to \code{NULL}. } \item{colon}{ Logical. Explanatory variables which appear in more than one linear/additive predictor are labelled with a colon, e.g., \code{age:1}, \code{age:2}. However, if it only appears in one linear/additive predictor then the \code{:1} is omitted by default. Then setting \code{colon = TRUE} will add the \code{:1}. } } \details{ This function works in a similar way to applying \code{coef()} to a \code{\link[stats]{lm}} or \code{\link[stats]{glm}} object. However, for VGLMs, there are more options available. } \value{ A vector usually. A matrix if \code{matrix.out = TRUE}. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ %} \seealso{ \code{\link{vglm}}, \code{\link{coefvgam}}, \code{\link[stats]{coef}}. % \code{\link{coef-method}}, } \examples{ zdata <- data.frame(x2 = runif(nn <- 200)) zdata <- transform(zdata, pstr0 = logitlink(-0.5 + 1*x2, inverse = TRUE), lambda = loglink( 0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y2 = rzipois(nn, lambda, pstr0 = pstr0)) fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, trace = TRUE) coef(fit2, matrix = TRUE) # Always a good idea coef(fit2) coef(fit2, colon = TRUE) } \keyword{models} \keyword{regression} VGAM/man/corbet.Rd0000644000176200001440000000254513565414527013346 0ustar liggesusers\name{corbet} \alias{corbet} \docType{data} \title{ Corbet's Butterfly Data %% ~~ data name/kind ... ~~ } \description{ About 3300 individual butterflies were caught in Malaya by naturalist Corbet trapping butterflies. They were classified to about 500 species. %% ~~ A concise (1-5 lines) description of the dataset. ~~ } \usage{data(corbet)} \format{ A data frame with 24 observations on the following 2 variables. \describe{ \item{\code{species}}{Number of species. } \item{\code{ofreq}}{Observed frequency of individual butterflies of that species. } } } %%\format{ %% The format is: %% chr "corbet" %%} \details{ In the early 1940s Corbet spent two years trapping butterflies in Malaya. Of interest was the total number of species. Some species were so rare (e.g., 118 species had only one specimen) that it was thought likely that there were many unknown species. %% ~~ If necessary, more details than the __description__ above } %%\source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ %%} \references{ Fisher, R. A., Corbet, A. S. and Williams, C. B. (1943) The Relation Between the Number of Species and the Number of Individuals in a Random Sample of an Animal Population. \emph{Journal of Animal Ecology}, \bold{12}, 42--58. } \examples{ summary(corbet) } \keyword{datasets} VGAM/man/latvar.Rd0000644000176200001440000000433713565414527013362 0ustar liggesusers\name{latvar} \alias{lv} \alias{latvar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Latent Variables } \description{ Generic function for the \emph{latent variables} of a model. } \usage{ latvar(object, ...) lv(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the extraction of latent variables is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. Sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Latent variables occur in reduced-rank regression models, as well as in quadratic and additive ordination models. For the latter two, latent variable values are often called \emph{site scores} by ecologists. Latent variables are linear combinations of the explanatory variables. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \section{Warning}{ \code{\link{latvar}} and \code{\link{lv}} are identical, but the latter will be deprecated soon. Latent variables are not really applicable to \code{\link{vglm}}/\code{\link{vgam}} models. } \seealso{ \code{latvar.qrrvglm}, \code{latvar.rrvglm}, \code{latvar.cao}, \code{\link{lvplot}}. } \examples{ \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars set.seed(123) p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Rank = 1, df1.nl = c(Zoraspin = 2.5, 3), Bestof = 3, Crow1positive = TRUE) var(latvar(p1)) # Scaled to unit variance # Scaled to unit variance c(latvar(p1)) # Estimated site scores } } \keyword{models} \keyword{regression} VGAM/man/logff.Rd0000644000176200001440000000643713565414527013171 0ustar liggesusers\name{logff} \alias{logff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Logarithmic Distribution } \description{ Estimating the (single) parameter of the logarithmic distribution. } \usage{ logff(lshape = "logitlink", gshape = ppoints(8), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape}{ Parameter link function for the parameter \eqn{c}, which lies between 0 and 1. See \code{\link{Links}} for more choices and information. Soon \code{logfflink()} will hopefully be available for event-rate data. } \item{gshape, zero}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The logarithmic distribution is a generalized power series distribution that is based specifically on the logarithmic series (scaled to a probability function). Its probability function is \eqn{f(y) = a c^y / y}{f(y) = a * c^y / y}, for \eqn{y=1,2,3,\ldots}{y=1,2,3,...}, where \eqn{0 < c < 1} (called \code{shape}), and \eqn{a = -1 / \log(1-c)}{a = -1 / log(1-c)}. The mean is \eqn{a c/(1-c)}{a*c/(1-c)} (returned as the fitted values) and variance is \eqn{a c (1-ac) /(1-c)^2}{a*c*(1-a*c)/(1-c)^2}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Chapter 7 of Johnson N. L., Kemp, A. W. and Kotz S. (2005) \emph{Univariate Discrete Distributions}, 3rd edition, Hoboken, New Jersey: Wiley. Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ The function \code{\link[base:Log]{log}} computes the natural logarithm. In the \pkg{VGAM} library, a link function with option \code{\link{loglink}} corresponds to this. Multiple responses are permitted. The logarithmic distribution is sometimes confused with the log-series distribution. The latter was used by Fisher et al. for species abundance data and has two parameters. } \seealso{ \code{\link{rlog}}, \code{\link{oalog}}, \code{\link{oilog}}, \code{\link{otlog}}, \code{\link[base:Log]{log}}, \code{\link{loglink}}, \code{\link{logofflink}}, \code{\link{explogff}}, \code{\link{simulate.vlm}}. } \examples{ ldata <- data.frame(y = rlog(n = 1000, shape = logitlink(0.2, inverse = TRUE))) fit <- vglm(y ~ 1, logff, data = ldata, trace = TRUE, crit = "c") coef(fit, matrix = TRUE) Coef(fit) \dontrun{with(ldata, hist(y, prob = TRUE, breaks = seq(0.5, max(y) + 0.5, by = 1), border = "blue")) x <- seq(1, with(ldata, max(y)), by = 1) with(ldata, lines(x, dlog(x, Coef(fit)[1]), col = "orange", type = "h", lwd = 2)) } # Example: Corbet (1943) butterfly Malaya data corbet <- data.frame(nindiv = 1:24, ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12, 14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3)) fit <- vglm(nindiv ~ 1, logff, data = corbet, weights = ofreq) coef(fit, matrix = TRUE) shapehat <- Coef(fit)["shape"] pdf2 <- dlog(x = with(corbet, nindiv), shape = shapehat) print(with(corbet, cbind(nindiv, ofreq, fitted = pdf2 * sum(ofreq))), digits = 1) } \keyword{models} \keyword{regression} VGAM/man/linkfun.Rd0000644000176200001440000000231513565414527013531 0ustar liggesusers\name{linkfun} \alias{linkfun} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Link Functions } \description{ Generic function for returning the link functions of a fitted object. } \usage{ linkfun(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object which has parameter link functions. } \item{\dots}{ Other arguments fed into the specific methods function of the model. } } \details{ Fitted models in the \pkg{VGAM} have parameter link functions. This generic function returns these. } \value{ The value returned depends specifically on the methods function invoked. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{linkfun.vglm}}, \code{\link{multilogitlink}}, \code{\link{vglm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) coef(fit1, matrix = TRUE) linkfun(fit1) linkfun(fit1, earg = TRUE) fit2 <- vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo) coef(fit2, matrix = TRUE) linkfun(fit2) linkfun(fit2, earg = TRUE) } \keyword{models} \keyword{regression} VGAM/man/gengamma.Rd0000644000176200001440000001360513565414527013643 0ustar liggesusers\name{gengamma.stacy} \alias{gengamma.stacy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Gamma distribution family function } \description{ Estimation of the 3-parameter generalized gamma distribution proposed by Stacy (1962). } \usage{ gengamma.stacy(lscale = "loglink", ld = "loglink", lk = "loglink", iscale = NULL, id = NULL, ik = NULL, imethod = 1, gscale.mux = exp((-4:4)/2), gshape1.d = exp((-5:5)/2), gshape2.k = exp((-5:5)/2), probs.y = 0.3, zero = c("d", "k")) } %- maybe also 'usage' for other objects documented here. % yettodo: 20171221; use \cite{ye:chen:2017} to get very good init values. \arguments{ \item{lscale, ld, lk}{ Parameter link function applied to each of the positive parameters \eqn{b}, \eqn{d} and \eqn{k}, respectively. See \code{\link{Links}} for more choices. } \item{iscale, id, ik}{ Initial value for \eqn{b}, \eqn{d} and \eqn{k}, respectively. The defaults mean an initial value is determined internally for each. } \item{gscale.mux, gshape1.d, gshape2.k}{ See \code{\link{CommonVGAMffArguments}} for information. Replaced by \code{iscale}, \code{id} etc. if given. } \item{imethod, probs.y, zero}{ See \code{\link{CommonVGAMffArguments}} for information. % An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. % The values must be from the set \{1,2,3\}. % The default value means none are modelled as intercept-only terms. } } \details{ The probability density function can be written \deqn{f(y;b,d,k) = d b^{-d k} y^{d k-1} \exp[-(y/b)^d] / \Gamma(k)}{% f(y;b,d,k) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k)} for scale parameter \eqn{b > 0}, and Weibull-type shape parameter \eqn{d > 0}, gamma-type shape parameter \eqn{k > 0}, and \eqn{y > 0}. The mean of \eqn{Y} is \eqn{b \times \Gamma(k+1/d) / \Gamma(k)}{b*gamma(k+1/d)/gamma(k)} (returned as the fitted values), which equals \eqn{bk}{b*k} if \eqn{d=1}. There are many special cases, as given in Table 1 of Stacey and Mihram (1965). In the following, the parameters are in the order \eqn{b,d,k}. The special cases are: Exponential \eqn{f(y;b,1,1)}, Gamma \eqn{f(y;b,1,k)}, Weibull \eqn{f(y;b,d,1)}, Chi Squared \eqn{f(y;2,1,a/2)} with \eqn{a} degrees of freedom, Chi \eqn{f(y;\sqrt{2},2,a/2)}{f(y;sqrt(2),2,a/2)} with \eqn{a} degrees of freedom, Half-normal \eqn{f(y;\sqrt{2},2,1/2)}{f(y;sqrt(2),2,1/2)}, Circular normal \eqn{f(y;\sqrt{2},2,1)}{f(y;sqrt(2),2,1)}, Spherical normal \eqn{f(y;\sqrt{2},2,3/2)}{f(y;sqrt(2),2,3/2)}, Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}. Also the log-normal distribution corresponds to when \code{k = Inf}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Stacy, E. W. (1962) A generalization of the gamma distribution. \emph{Annals of Mathematical Statistics}, \bold{33}(3), 1187--1192. Stacy, E. W. and Mihram, G. A. (1965) Parameter estimation for a generalized gamma distribution. \emph{Technometrics}, \bold{7}, 349--358. Prentice, R. L. (1974) A log gamma model and its maximum likelihood estimation. \emph{Biometrika}, \bold{61}, 539--544. } \section{Warning }{ Several authors have considered maximum likelihood estimation for the generalized gamma distribution and have found that the Newton-Raphson algorithm does not work very well and that the existence of solutions to the log-likelihood equations is sometimes in doubt. Although Fisher scoring is used here, it is likely that the same problems will be encountered. It appears that large samples are required, for example, the estimator of \eqn{k} became asymptotically normal only with 400 or more observations. It is not uncommon for maximum likelihood estimates to fail to converge even with two or three hundred observations. With covariates, even more observations are needed to increase the chances of convergence. Using covariates is not advised unless the sample size is at least a few thousand, and even if so, modelling 1 or 2 parameters as intercept-only is a very good idea (e.g., \code{zero = 2:3}). Monitoring convergence is also a very good idea (e.g., set \code{trace = TRUE}). Half-stepping is not uncommon, and if this occurs, then the results should be viewed with more suspicion. } \author{ T. W. Yee } \note{ The notation used here differs from Stacy (1962) and Prentice (1974). Poor initial values may result in failure to converge so if there are covariates and there are convergence problems, try using or checking the \code{zero} argument (e.g., \code{zero = 2:3}) or the \code{ik} argument or the \code{imethod} argument, etc. } \seealso{ \code{\link{rgengamma.stacy}}, \code{\link{gamma1}}, \code{\link{gamma2}}, \code{\link{prentice74}}, \code{\link{simulate.vlm}}, \code{\link{chisq}}, \code{\link{lognormal}}, \code{\link{rayleigh}}, \code{\link{weibullR}}. } \examples{ k <- exp(-1); Scale <- exp(1); dd <- exp(0.5); set.seed(1) gdata <- data.frame(y = rgamma(2000, shape = k, scale = Scale)) gfit <- vglm(y ~ 1, gengamma.stacy, data = gdata, trace = TRUE) coef(gfit, matrix = TRUE) } \keyword{models} \keyword{regression} %# Another example %gdata <- data.frame(x2 = runif(nn <- 5000)) %gdata <- transform(gdata, Scale = exp(1), % d = exp( 0 + 1.2* x2), % k = exp(-1 + 2 * x2)) %gdata <- transform(gdata, y = rgengamma.stacy(nn, scale = Scale, d = d, k = k)) %fit <- vglm(y ~ x2, gengamma.stacy(zero = 1, iscale = 6), data = gdata, trace = TRUE) %fit <- vglm(y ~ x2, gengamma.stacy(zero = 1), data = gdata, trace = TRUE, maxit = 50) %coef(fit, matrix = TRUE) VGAM/man/persp.qrrvglm.Rd0000644000176200001440000001707613565414527014717 0ustar liggesusers\name{perspqrrvglm} \alias{perspqrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Perspective plot for QRR-VGLMs } \description{ Produces a perspective plot for a CQO model (QRR-VGLM). It is only applicable for rank-1 or rank-2 models with argument \code{noRRR = ~ 1}. } \usage{ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE, xlim = NULL, ylim = NULL, zlim = NULL, gridlength = if (Rank == 1) 301 else c(51,51), which.species = NULL, xlab = if (Rank == 1) "Latent Variable" else "Latent Variable 1", ylab = if (Rank == 1) "Expected Value" else "Latent Variable 2", zlab = "Expected value", labelSpecies = FALSE, stretch = 1.05, main = "", ticktype = "detailed", col = if (Rank == 1) par()$col else "white", llty = par()$lty, llwd = par()$lwd, add1 = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Object of class \code{"qrrvglm"}, i.e., a constrained quadratic ordination (CQO) object. } \item{varI.latvar}{ Logical that is fed into \code{\link{Coef.qrrvglm}}. } \item{refResponse}{ Integer or character that is fed into \code{\link{Coef.qrrvglm}}. } \item{show.plot}{ Logical. Plot it? } \item{xlim, ylim}{ Limits of the x- and y-axis. Both are numeric of length 2. See \code{\link[graphics]{par}}. } \item{zlim}{ Limits of the z-axis. Numeric of length 2. Ignored if rank is 1. See \code{\link[graphics]{par}}. } \item{gridlength}{ Numeric. The fitted values are evaluated on a grid, and this argument regulates the fineness of the grid. If \code{Rank = 2} then the argument is recycled to length 2, and the two numbers are the number of grid points on the x- and y-axes respectively. } \item{which.species}{ Numeric or character vector. Indicates which species are to be plotted. The default is to plot all of them. If numeric, it should contain values in the set \{1,2,\ldots,\eqn{S}\} where \eqn{S} is the number of species. } \item{xlab, ylab}{ Character caption for the x-axis and y-axis. By default, a suitable caption is found. See the \code{xlab} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{zlab}{Character caption for the z-axis. Used only if \code{Rank = 2}. By default, a suitable caption is found. See the \code{xlab} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{labelSpecies}{Logical. Whether the species should be labelled with their names. Used for \code{Rank = 1} only. The position of the label is just above the species' maximum. } \item{stretch}{ Numeric. A value slightly more than 1, this argument adjusts the height of the y-axis. Used for \code{Rank = 1} only. } \item{main}{ Character, giving the title of the plot. See the \code{main} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{ticktype}{ Tick type. Used only if \code{Rank = 2}. See \code{\link[graphics]{persp}} for more information. } \item{col}{ Color. See \code{\link[graphics]{persp}} for more information. } \item{llty}{ Line type. Rank-1 models only. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{llwd}{ Line width. Rank-1 models only. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{add1}{ Logical. Add to an existing plot? Used only for rank-1 models. } \item{\dots}{ Arguments passed into \code{\link[graphics]{persp}}. Useful arguments here include \code{theta} and \code{phi}, which control the position of the eye. } } \details{ For a rank-1 model, a perspective plot is similar to \code{\link{lvplot.qrrvglm}} but plots the curves along a fine grid and there is no rugplot to show the site scores. For a rank-2 model, a perspective plot has the first latent variable as the x-axis, the second latent variable as the y-axis, and the expected value (fitted value) as the z-axis. The result of a CQO is that each species has a response surface with elliptical contours. This function will, at each grid point, work out the maximum fitted value over all the species. The resulting response surface is plotted. Thus rare species will be obscured and abundant species will dominate the plot. To view rare species, use the \code{which.species} argument to select a subset of the species. A perspective plot will be performed if \code{noRRR = ~ 1}, and \code{Rank = 1} or \code{2}. Also, all the tolerance matrices of those species to be plotted must be positive-definite. } \value{ For a rank-2 model, a list with the following components. \item{fitted}{ A \eqn{(G_1 \times G_2)}{(G1*G2)} by \eqn{M} matrix of fitted values on the grid. Here, \eqn{G_1}{G1} and \eqn{G_2}{G2} are the two values of \code{gridlength}. } \item{latvar1grid, latvar2grid}{ The grid points for the x-axis and y-axis. } \item{max.fitted}{ A \eqn{G_1}{G1} by \eqn{G_2}{G2} matrix of maximum of the fitted values over all species. These are the values that are plotted on the z-axis. } For a rank-1 model, the components \code{latvar2grid} and \code{max.fitted} are \code{NULL}. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. } \author{ Thomas W. Yee } \note{ Yee (2004) does not refer to perspective plots. Instead, contour plots via \code{\link{lvplot.qrrvglm}} are used. For rank-1 models, a similar function to this one is \code{\link{lvplot.qrrvglm}}. It plots the fitted values at the actual site score values rather than on a fine grid here. The result has the advantage that the user sees the curves as a direct result from a model fitted to data whereas here, it is easy to think that the smooth bell-shaped curves are the truth because the data is more of a distance away. } \seealso{ \code{\link[graphics]{persp}}, \code{\link{cqo}}, \code{\link{Coef.qrrvglm}}, \code{\link{lvplot.qrrvglm}}, \code{\link[graphics]{par}}, \code{\link[graphics]{title}}. } \examples{\dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Good idea when I.tolerances = TRUE set.seed(111) r1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, trace = FALSE, I.tolerances = TRUE) set.seed(111) # r2 below is an ill-conditioned model r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, isd.lv = c(2.4, 1.0), Muxfactor = 3.0, trace = FALSE, poissonff, data = hspider, Rank = 2, eq.tolerances = TRUE) sort(deviance(r1, history = TRUE)) # A history of all the fits sort(deviance(r2, history = TRUE)) # A history of all the fits if (deviance(r2) > 857) stop("suboptimal fit obtained") persp(r1, xlim = c(-6, 5), col = 1:4, label = TRUE) # Involves all species persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220)) # Omit the two dominant species to see what is behind them persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220), which = (1:10)[-c(8, 10)]) # Use zlim to retain the original z-scale } } \keyword{models} \keyword{regression} \keyword{graphs} VGAM/man/zageomUC.Rd0000644000176200001440000000436113565414527013600 0ustar liggesusers\name{Zageom} \alias{Zageom} \alias{dzageom} \alias{pzageom} \alias{qzageom} \alias{rzageom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Geometric Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-altered geometric distribution with parameter \code{pobs0}. } \usage{ dzageom(x, prob, pobs0 = 0, log = FALSE) pzageom(q, prob, pobs0 = 0) qzageom(p, prob, pobs0 = 0) rzageom(n, prob, pobs0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{prob, log}{ Parameters from the ordinary geometric distribution (see \code{\link[stats:Geometric]{dgeom}}). } \item{pobs0}{ Probability of (an observed) zero, called \eqn{pobs0}. The default value of \code{pobs0 = 0} corresponds to the response having a positive geometric distribution. } } \details{ The probability function of \eqn{Y} is 0 with probability \code{pobs0}, else a positive geometric(prob) distribution. } \value{ \code{dzageom} gives the density and \code{pzageom} gives the distribution function, \code{qzageom} gives the quantile function, and \code{rzageom} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{zageometric}}, \code{\link{zigeometric}}, \code{\link{rposgeom}}. } \examples{ prob <- 0.35; pobs0 <- 0.05; x <- (-1):7 dzageom(x, prob = prob, pobs0 = pobs0) table(rzageom(100, prob = prob, pobs0 = pobs0)) \dontrun{ x <- 0:10 barplot(rbind(dzageom(x, prob = prob, pobs0 = pobs0), dgeom(x, prob = prob)), beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("ZAG(prob = ", prob, ", pobs0 = ", pobs0, ") [blue] vs", " Geometric(prob = ", prob, ") [orange] densities", sep = "")) } } \keyword{distribution} VGAM/man/gumbelUC.Rd0000644000176200001440000000751313565414527013573 0ustar liggesusers\name{gumbelUC} \alias{dgumbel} \alias{pgumbel} \alias{qgumbel} \alias{rgumbel} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Gumbel Distribution } \description{ Density, distribution function, quantile function and random generation for the Gumbel distribution with location parameter \code{location} and scale parameter \code{scale}. } \usage{ dgumbel(x, location = 0, scale = 1, log = FALSE) pgumbel(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) qgumbel(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) rgumbel(n, location = 0, scale = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{location}{the location parameter \eqn{\mu}{mu}. This is not the mean of the Gumbel distribution (see \bold{Details} below). } \item{scale}{the scale parameter \eqn{\sigma}{sigma}. This is not the standard deviation of the Gumbel distribution (see \bold{Details} below). } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Uniform]{punif}} or \code{\link[stats:Uniform]{qunif}}. } } \details{ The Gumbel distribution is a special case of the \emph{generalized extreme value} (GEV) distribution where the shape parameter \eqn{\xi}{xi} = 0. The latter has 3 parameters, so the Gumbel distribution has two. The Gumbel distribution function is \deqn{G(y) = \exp \left( - \exp \left[ - \frac{y-\mu}{\sigma} \right] \right) }{% G(y) = exp( -exp[ - (y-mu)/sigma ] ) } where \eqn{-\infty0}{sigma>0}. Its mean is \deqn{\mu - \sigma * \gamma}{% mu - sigma * gamma} and its variance is \deqn{\sigma^2 * \pi^2 / 6}{% sigma^2 * pi^2 / 6} where \eqn{\gamma}{gamma} is Euler's constant (which can be obtained as \code{-digamma(1)}). See \code{\link{gumbel}}, the \pkg{VGAM} family function for estimating the two parameters by maximum likelihood estimation, for formulae and other details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } \value{ \code{dgumbel} gives the density, \code{pgumbel} gives the distribution function, \code{qgumbel} gives the quantile function, and \code{rgumbel} generates random deviates. } \references{ Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. } \author{ T. W. Yee } \note{ The \pkg{VGAM} family function \code{\link{gumbel}} can estimate the parameters of a Gumbel distribution using maximum likelihood estimation. } \seealso{ \code{\link{gumbel}}, \code{\link{gumbelff}}, \code{\link{gev}}, \code{\link{dgompertz}}. } \examples{ mu <- 1; sigma <- 2; y <- rgumbel(n = 100, loc = mu, scale = sigma) c(mean(y), mu - sigma * digamma(1)) # Sample and population means c(var(y), sigma^2 * pi^2 / 6) # Sample and population variances \dontrun{ x <- seq(-2.5, 3.5, by = 0.01) loc <- 0; sigma <- 1 plot(x, dgumbel(x, loc, sigma), type = "l", col = "blue", ylim = c(0, 1), main = "Blue is density, red is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1) abline(h = 0, col = "blue", lty = 2) lines(qgumbel(seq(0.05, 0.95, by = 0.05), loc, sigma), dgumbel(qgumbel(seq(0.05, 0.95, by = 0.05), loc, sigma), loc, sigma), col = "purple", lty = 3, type = "h") lines(x, pgumbel(x, loc, sigma), type = "l", col = "red") abline(h = 0, lty = 2) } } \keyword{distribution} VGAM/man/beniniUC.Rd0000644000176200001440000000443513565414527013564 0ustar liggesusers\name{Benini} \alias{Benini} \alias{dbenini} \alias{pbenini} \alias{qbenini} \alias{rbenini} \title{The Benini Distribution} \description{ Density, distribution function, quantile function and random generation for the Benini distribution with parameter \code{shape}. } \usage{ dbenini(x, y0, shape, log = FALSE) pbenini(q, y0, shape, lower.tail = TRUE, log.p = FALSE) qbenini(p, y0, shape, lower.tail = TRUE, log.p = FALSE) rbenini(n, y0, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{y0}{the scale parameter \eqn{y_0}{y0}. } \item{shape}{the positive shape parameter \eqn{b}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dbenini} gives the density, \code{pbenini} gives the distribution function, \code{qbenini} gives the quantile function, and \code{rbenini} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{benini1}}, the \pkg{VGAM} family function for estimating the parameter \eqn{s} by maximum likelihood estimation, for the formula of the probability density function and other details. } %\note{ % %} \seealso{ \code{\link{benini1}}. } \examples{ \dontrun{ y0 <- 1; shape <- exp(1) xx <- seq(0.0, 4, len = 101) plot(xx, dbenini(xx, y0 = y0, shape = shape), type = "l", col = "blue", main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1, las = 1, ylab = "", xlab = "x") abline(h = 0, col = "blue", lty = 2) lines(xx, pbenini(xx, y0 = y0, shape = shape), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qbenini(probs, y0 = y0, shape = shape) lines(Q, dbenini(Q, y0 = y0, shape = shape), col = "purple", lty = 3, type = "h") pbenini(Q, y0 = y0, shape = shape) - probs # Should be all zero } } \keyword{distribution} VGAM/man/otpospoisUC.Rd0000644000176200001440000000344013565414527014352 0ustar liggesusers\name{Otpospois} \alias{Otpospois} \alias{dotpospois} \alias{potpospois} \alias{qotpospois} \alias{rotpospois} \title{ One-truncated Positive-Poisson Distribution } \description{ Density, distribution function, quantile function, and random generation for the one-truncated positive-Poisson distribution. } \usage{ dotpospois(x, lambda, log = FALSE) potpospois(q, lambda, log.p = FALSE) qotpospois(p, lambda) rotpospois(n, lambda) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{ Same as \code{\link{Pospois}}. } \item{lambda, log, log.p}{ Same as \code{\link{Pospois}}. } } \details{ The one-truncated positive-Poisson is a Poisson distribution but with the probability of a one and a zero being 0. That is, its support is 2, 3, \ldots. The other probabilities are scaled to add to unity. Some more details are given in \code{\link{pospoisson}}. } \value{ \code{dotpospois} gives the density, \code{potpospois} gives the distribution function, \code{qotpospois} gives the quantile function, and \code{rotpospois} generates random deviates. } %\references{ %} \author{ T. W. Yee } \note{ Given some response data, the \pkg{VGAM} family function \code{\link{otpospoisson}} estimates the parameter \code{lambda}. } \seealso{ \code{\link{otpospoisson}}, \code{\link{Pospois}}, \code{\link{Oipospois}}. } \examples{ dotpospois(1:20, 0.5) rotpospois(20, 0.5) \dontrun{ lambda <- 4; x <- 1:10 plot(x, dotpospois(x, lambda = lambda), type = "h", ylim = 0:1, sub = "lambda=4", las = 1, col = "blue", ylab = "Probability", main = "1-truncated positive-Poisson distribution: blue=PMF; orange=CDF") lines(x + 0.1, potpospois(x, lambda = lambda), col = "orange", lty = 3, type = "h") } } \keyword{distribution} VGAM/man/marital.nz.Rd0000644000176200001440000000234313565414527014143 0ustar liggesusers\name{marital.nz} \alias{marital.nz} \docType{data} \title{ New Zealand Marital Data } \description{ Some marital data mainly from a large NZ company collected in the early 1990s. } \usage{data(marital.nz)} \format{ A data frame with 6053 observations on the following 3 variables. \describe{ \item{\code{age}}{a numeric vector, age in years} \item{\code{ethnicity}}{a factor with levels \code{European} \code{Maori} \code{Other} \code{Polynesian}. Only Europeans are included in the data set. } \item{\code{mstatus}}{a factor with levels \code{Divorced/Separated}, \code{Married/Partnered}, \code{Single}, \code{Widowed}. } } } \details{ This is a subset of a data set collected from a self-administered questionnaire administered in a large New Zealand workforce observational study conducted during 1992--3. The data were augmented by a second study consisting of retirees. The data can be considered a reasonable representation of the white male New Zealand population in the early 1990s. } \source{ Clinical Trials Research Unit, University of Auckland, New Zealand. } \references{ See \code{\link{bmi.nz}} and \code{\link{chest.nz}}. } \examples{ summary(marital.nz) } \keyword{datasets} VGAM/man/posgeomUC.Rd0000644000176200001440000000523513565414527013770 0ustar liggesusers\name{Posgeom} \alias{Posgeom} \alias{dposgeom} \alias{pposgeom} \alias{qposgeom} \alias{rposgeom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive-Geometric Distribution } \description{ Density, distribution function, quantile function and random generation for the positive-geometric distribution. } \usage{ dposgeom(x, prob, log = FALSE) pposgeom(q, prob) qposgeom(p, prob) rposgeom(n, prob) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Fed into \code{\link[stats]{runif}}. } \item{prob}{ vector of probabilities of success (of an ordinary geometric distribution). Short vectors are recycled. } \item{log}{ logical. } } \details{ The positive-geometric distribution is a geometric distribution but with the probability of a zero being zero. The other probabilities are scaled to add to unity. The mean therefore is \eqn{1/prob}{1/prob}. As \eqn{prob}{prob} decreases, the positive-geometric and geometric distributions become more similar. Like similar functions for the geometric distribution, a zero value of \code{prob} is not permitted here. } \value{ \code{dposgeom} gives the density, \code{pposgeom} gives the distribution function, \code{qposgeom} gives the quantile function, and \code{rposgeom} generates random deviates. } %\references{ %None. %} \author{ T. W. Yee } %\note{ % 20120405; no longer true to a superior method: % For \code{rposgeom()}, the arguments of the function are fed % into \code{\link[stats:Geometric]{rgeom}} until \eqn{n} positive % values are obtained. This may take a long time if \code{prob} % has values close to 1. % The family function \code{posgeometric} needs not be written. % If it were, then it would estimate % \eqn{prob}{prob} by maximum likelihood estimation. %} \seealso{ \code{\link{zageometric}}, \code{\link{zigeometric}}, \code{\link[stats:Geometric]{rgeom}}. % \code{posgeometric}, } \examples{ prob <- 0.75; y <- rposgeom(n = 1000, prob) table(y) mean(y) # Sample mean 1 / prob # Population mean (ii <- dposgeom(0:7, prob)) cumsum(ii) - pposgeom(0:7, prob) # Should be 0s table(rposgeom(100, prob)) table(qposgeom(runif(1000), prob)) round(dposgeom(1:10, prob) * 1000) # Should be similar \dontrun{ x <- 0:5 barplot(rbind(dposgeom(x, prob), dgeom(x, prob)), beside = TRUE, col = c("blue", "orange"), main = paste("Positive geometric(", prob, ") (blue) vs", " geometric(", prob, ") (orange)", sep = ""), names.arg = as.character(x), las = 1, lwd = 2) } } \keyword{distribution} VGAM/man/benini.Rd0000644000176200001440000000566413565414527013341 0ustar liggesusers\name{benini1} \alias{benini1} %- Also NEED an '\alias' for EACH other topic documented here. \title{Benini Distribution Family Function } \description{ Estimating the 1-parameter Benini distribution by maximum likelihood estimation. } \usage{ benini1(y0 = stop("argument 'y0' must be specified"), lshape = "loglink", ishape = NULL, imethod = 1, zero = NULL, parallel = FALSE, type.fitted = c("percentiles", "Qlink"), percentiles = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y0}{ Positive scale parameter. } \item{lshape}{ Parameter link function and extra argument of the parameter \eqn{b}, which is the shape parameter. See \code{\link{Links}} for more choices. A log link is the default because \eqn{b} is positive. } \item{ishape}{ Optional initial value for the shape parameter. The default is to compute the value internally. } \item{imethod, zero, parallel}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. Using \code{"Qlink"} is for quantile-links in \pkg{VGAMextra}. } } \details{ The Benini distribution has a probability density function that can be written \deqn{f(y) = 2 s \exp(-s[(\log(y/y_0))^2]) \log(y/y_0) / y }{% f(y) = 2*s*exp(-s * [(log(y/y0))^2]) * log(y/y0) / y} for \eqn{0 < y_0 < y}{0 < y0 < y}, and shape \eqn{s > 0}. The cumulative distribution function for \eqn{Y} is \deqn{F(y) = 1 - \exp(-s[(\log(y/y_0))^2]).}{% F(y) = 1 - exp(-s * [(log(y / y0))^2]). } Here, Newton-Raphson and Fisher scoring coincide. The median of \eqn{Y} is now returned as the fitted values, by default. This \pkg{VGAM} family function can handle a multiple responses, which is inputted as a matrix. On fitting, the \code{extra} slot has a component called \code{y0} which contains the value of the \code{y0} argument. } %\section{Warning}{ % % % The median of \eqn{Y}, which are returned as the fitted values, % may be incorrect. % % %} \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. % Section 7.1, pp.235--8 } \author{ T. W. Yee } \note{ Yet to do: the 2-parameter Benini distribution estimates another shape parameter \eqn{a}{a} too. Hence, the code may change in the future. } \seealso{ \code{\link{Benini}}. } \examples{ y0 <- 1; nn <- 3000 bdata <- data.frame(y = rbenini(nn, y0 = y0, shape = exp(2))) fit <- vglm(y ~ 1, benini1(y0 = y0), data = bdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) fit@extra$y0 c(head(fitted(fit), 1), with(bdata, median(y))) # Should be equal } \keyword{models} \keyword{regression} VGAM/man/binomialff.Rd0000644000176200001440000002322213565414527014171 0ustar liggesusers\name{binomialff} %\alias{binomial} \alias{binomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Binomial Family Function } \description{ Family function for fitting generalized linear models to binomial responses % , where the dispersion parameter may be known or unknown. %dispersion = 1, % onedpar = !multiple.responses, parallel = FALSE, } \usage{ binomialff(link = "logitlink", multiple.responses = FALSE, parallel = FALSE, zero = NULL, bred = FALSE, earg.link = FALSE) } %- maybe also 'usage' for other objects documented here. % apply.parint = FALSE, \arguments{ \item{link}{ Link function; see \code{\link{Links}} and \code{\link{CommonVGAMffArguments}} for more information. } % \item{dispersion}{ % Dispersion parameter. By default, maximum likelihood is used to % estimate the model because it is known. However, the user can specify % \code{dispersion = 0} to have it estimated, or else specify a known % positive value (or values if \code{multiple.responses} is \code{TRUE}). % } \item{multiple.responses}{ Multivariate response? If \code{TRUE}, then the response is interpreted as \eqn{M} independent binary responses, where \eqn{M} is the number of columns of the response matrix. In this case, the response matrix should have \eqn{Q} columns consisting of counts (successes), and the \code{weights} argument should have \eqn{Q} columns consisting of the number of trials (successes plus failures). % zero/one values only. If \code{FALSE} and the response is a (2-column) matrix, then the number of successes is given in the first column, and the second column is the number of failures. } % \item{onedpar}{ % One dispersion parameter? If \code{multiple.responses}, % then a separate dispersion % parameter will be computed for each response (column), by default. % Setting \code{onedpar = TRUE} will pool them so that there is only one % dispersion parameter to be estimated. % } \item{parallel}{ A logical or formula. Used only if \code{multiple.responses} is \code{TRUE}. This argument allows for the parallelism assumption whereby the regression coefficients for a variable is constrained to be equal over the \eqn{M} linear/additive predictors. If \code{parallel = TRUE} then the constraint is not applied to the intercepts. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the matrix response. See \code{\link{CommonVGAMffArguments}} for more information. } \item{earg.link}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{bred}{ Details at \code{\link{CommonVGAMffArguments}}. Setting \code{bred = TRUE} should work for multiple responses (\code{multiple.responses = TRUE}) and all \pkg{VGAM} link functions; it has been tested for \code{\link{logitlink}} only (and it gives similar results to \pkg{brglm} but not identical), and further testing is required. One result from fitting bias reduced binary regression is that finite regression coefficients occur when the data is separable (see example below). Currently \code{\link{hdeff.vglm}} does not work when \code{bred = TRUE}. } } \details{ This function is largely to mimic \code{\link[stats:Binomial]{binomial}}, however there are some differences. % If the dispersion parameter is unknown, then the resulting estimate is % not fully a maximum likelihood estimate (see pp.124--8 of McCullagh % and Nelder, 1989). % A dispersion parameter that is less/greater than unity corresponds to % under-/over-dispersion relative to the binomial model. % Over-dispersion is more common in practice. % Setting \code{multiple.responses = TRUE} is necessary % when fitting a Quadratic RR-VGLM % (see \code{\link{cqo}}) because the response is a matrix of \eqn{M} % columns (e.g., one column per species). Then there will be \eqn{M} % dispersion parameters (one per column of the response matrix). When used with \code{\link{cqo}} and \code{\link{cao}}, it may be preferable to use the \code{\link{clogloglink}} link. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{cqo}}, and \code{\link{cao}}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. Altman, M. and Gill, J. and McDonald, M. P. (2004) \emph{Numerical Issues in Statistical Computing for the Social Scientist}, Hoboken, NJ, USA: Wiley-Interscience. Ridout, M. S. (1990) Non-convergence of Fisher's method of scoring---a simple example. \emph{GLIM Newsletter}, 20(6). } \author{ Thomas W. Yee } \note{ If \code{multiple.responses} is \code{FALSE} (default) then the response can be of one of two formats: a factor (first level taken as failure), or a 2-column matrix (first column = successes) of counts. The argument \code{weights} in the modelling function can also be specified as any vector of positive values. In general, 1 means success and 0 means failure (to check, see the \code{y} slot of the fitted object). Note that a general vector of proportions of success is no longer accepted. The notation \eqn{M} is used to denote the number of linear/additive predictors. If \code{multiple.responses} is \code{TRUE}, then the matrix response can only be of one format: a matrix of 1's and 0's (1 = success). % 20180219; commented this out: % The call \code{binomialff(dispersion = 0, ...)} is equivalent to % \code{quasibinomialff(...)}. The latter was written so that R users % of \code{quasibinomial()} would only need to add a ``\code{ff}'' % to the end of the family function name. % Regardless of whether the dispersion parameter is to be estimated or % not, its value can be seen from the output from the \code{summary()} % of the object. % With the introduction of name spaces for the \pkg{VGAM} package, % \code{"ff"} can be dropped for this family function. Fisher scoring is used. This can sometimes fail to converge by oscillating between successive iterations (Ridout, 1990). See the example below. } \seealso{ \code{\link{hdeff.vglm}}, \code{\link{Links}}, \code{\link{rrvglm}}, \code{\link{cqo}}, \code{\link{cao}}, \code{\link{betabinomial}}, \code{\link{gtbinomial}}, \code{\link{posbinomial}}, \code{\link{zibinomial}}, \code{\link{double.expbinomial}}, \code{\link{seq2binomial}}, \code{\link{amlbinomial}}, \code{\link{simplex}}, \code{\link[stats:Binomial]{binomial}}, \code{\link{simulate.vlm}}, \pkg{safeBinaryRegression}, \code{\link{residualsvglm}}. % \code{\link{quasibinomialff}}, % \code{\link{matched.binomial}}, } \section{Warning }{ % With a multivariate response, assigning a known dispersion parameter % for \emph{each} response is not handled well yet. % Currently, only a single known dispersion parameter is handled well. See the above note regarding \code{bred}. The maximum likelihood estimate will not exist if the data is \emph{completely separable} or \emph{quasi-completely separable}. See Chapter 10 of Altman et al. (2004) for more details, and \pkg{safeBinaryRegression} and \code{\link{hdeff.vglm}}. Yet to do: add a \code{sepcheck = TRUE}, say, argument to further detect this problem and give an appropriate warning. } \examples{ shunua <- hunua[sort.list(with(hunua, altitude)), ] # Sort by altitude fit <- vglm(agaaus ~ poly(altitude, 2), binomialff(link = clogloglink), data = shunua) \dontrun{ plot(agaaus ~ jitter(altitude), shunua, ylab = "Pr(Agaaus = 1)", main = "Presence/absence of Agathis australis", col = 4, las = 1) with(shunua, lines(altitude, fitted(fit), col = "orange", lwd = 2)) } # Fit two species simultaneously fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(multiple.responses = TRUE), data = shunua) \dontrun{ with(shunua, matplot(altitude, fitted(fit2), type = "l", main = "Two species response curves", las = 1)) } # Shows that Fisher scoring can sometime fail. See Ridout (1990). ridout <- data.frame(v = c(1000, 100, 10), r = c(4, 3, 3), n = rep(5, 3)) (ridout <- transform(ridout, logv = log(v))) # The iterations oscillates between two local solutions: glm.fail <- glm(r / n ~ offset(logv) + 1, weight = n, binomial(link = 'cloglog'), ridout, trace = TRUE) coef(glm.fail) # vglm()'s half-stepping ensures the MLE of -5.4007 is obtained: vglm.ok <- vglm(cbind(r, n-r) ~ offset(logv) + 1, binomialff(link = clogloglink), ridout, trace = TRUE) coef(vglm.ok) # Separable data set.seed(123) threshold <- 0 bdata <- data.frame(x2 = sort(rnorm(nn <- 100))) bdata <- transform(bdata, y1 = ifelse(x2 < threshold, 0, 1)) fit <- vglm(y1 ~ x2, binomialff(bred = TRUE), data = bdata, criter = "coef", trace = TRUE) coef(fit, matrix = TRUE) # Finite!! summary(fit) \dontrun{ plot(depvar(fit) ~ x2, data = bdata, col = "blue", las = 1) lines(fitted(fit) ~ x2, data = bdata, col = "orange") abline(v = threshold, col = "gray", lty = "dashed") } } \keyword{models} \keyword{regression} % a vector of proportions of success, % In particular, for a general vector of proportions, % you will need to specify \code{weights} because the number of trials % is needed. % To input general positive values into the \code{weights} argument of % \code{\link{vglm}}/\code{\link{vgam}} one needs to input a 2-column % response. %quasibinomialff() %quasibinomialff(link = "probitlink") VGAM/man/expexpff1.Rd0000644000176200001440000000667413565414527014005 0ustar liggesusers\name{expexpff1} \alias{expexpff1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exponentiated Exponential Distribution } \description{ Estimates the two parameters of the exponentiated exponential distribution by maximizing a profile (concentrated) likelihood. } \usage{ expexpff1(lrate = "loglink", irate = NULL, ishape = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lrate}{ Parameter link function for the (positive) \eqn{\lambda}{rate} parameter. See \code{\link{Links}} for more choices. } \item{irate}{ Initial value for the \eqn{\lambda}{rate} parameter. By default, an initial value is chosen internally using \code{ishape}. } \item{ishape}{ Initial value for the \eqn{\alpha}{shape} parameter. If convergence fails try setting a different value for this argument. } } \details{ See \code{\link{expexpff}} for details about the exponentiated exponential distribution. This family function uses a different algorithm for fitting the model. Given \eqn{\lambda}{rate}, the MLE of \eqn{\alpha}{shape} can easily be solved in terms of \eqn{\lambda}{rate}. This family function maximizes a profile (concentrated) likelihood with respect to \eqn{\lambda}{rate}. Newton-Raphson is used, which compares with Fisher scoring with \code{\link{expexpff}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Gupta, R. D. and Kundu, D. (2001) Exponentiated exponential family: an alternative to gamma and Weibull distributions, \emph{Biometrical Journal}, \bold{43}, 117--130. } \author{ T. W. Yee } \note{ This family function works only for intercept-only models, i.e., \code{y ~ 1} where \code{y} is the response. The estimate of \eqn{\alpha}{shape} is attached to the \code{misc} slot of the object, which is a list and contains the component \code{shape}. As Newton-Raphson is used, the working weights are sometimes negative, and some adjustment is made to these to make them positive. Like \code{\link{expexpff}}, good initial values are needed. Convergence may be slow. } \section{Warning }{The standard errors produced by a \code{summary} of the model may be wrong. } \seealso{ \code{\link{expexpff}}, \code{\link{CommonVGAMffArguments}}. } \examples{ # Ball bearings data (number of million revolutions before failure) edata <- data.frame(bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60, 48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64, 68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92, 128.04, 173.40)) fit <- vglm(bbearings ~ 1, expexpff1(ishape = 4), trace = TRUE, maxit = 250, checkwz = FALSE, data = edata) coef(fit, matrix = TRUE) Coef(fit) # Authors get c(0.0314, 5.2589) with log-lik -112.9763 logLik(fit) fit@misc$shape # Estimate of shape # Failure times of the airconditioning system of an airplane eedata <- data.frame(acplane = c(23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95)) fit <- vglm(acplane ~ 1, expexpff1(ishape = 0.8), trace = TRUE, maxit = 50, checkwz = FALSE, data = eedata) coef(fit, matrix = TRUE) Coef(fit) # Authors get c(0.0145, 0.8130) with log-lik -152.264 logLik(fit) fit@misc$shape # Estimate of shape } \keyword{models} \keyword{regression} VGAM/man/amlpoisson.Rd0000644000176200001440000001163213565414527014251 0ustar liggesusers\name{amlpoisson} \alias{amlpoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Poisson Regression by Asymmetric Maximum Likelihood Estimation } \description{ Poisson quantile regression estimated by maximizing an asymmetric likelihood function. } \usage{ amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4, link = "loglink") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{w.aml}{ Numeric, a vector of positive constants controlling the percentiles. The larger the value the larger the fitted percentile value (the proportion of points below the ``w-regression plane''). The default value of unity results in the ordinary maximum likelihood (MLE) solution. } \item{parallel}{ If \code{w.aml} has more than one value then this argument allows the quantile curves to differ by the same amount as a function of the covariates. Setting this to be \code{TRUE} should force the quantile curves to not cross (although they may not cross anyway). See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Integer, either 1 or 2 or 3. Initialization method. Choose another value if convergence fails. } \item{digw }{ Passed into \code{\link[base]{Round}} as the \code{digits} argument for the \code{w.aml} values; used cosmetically for labelling. } \item{link}{ See \code{\link{poissonff}}. } } \details{ This method was proposed by Efron (1992) and full details can be obtained there. % Equation numbers below refer to that article. The model is essentially a Poisson regression model (see \code{\link{poissonff}}) but the usual deviance is replaced by an asymmetric squared error loss function; it is multiplied by \eqn{w.aml} for positive residuals. The solution is the set of regression coefficients that minimize the sum of these deviance-type values over the data set, weighted by the \code{weights} argument (so that it can contain frequencies). Newton-Raphson estimation is used here. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Efron, B. (1991) Regression percentiles using asymmetric squared error loss. \emph{Statistica Sinica}, \bold{1}, 93--125. Efron, B. (1992) Poisson overdispersion estimates based on the method of asymmetric maximum likelihood. \emph{Journal of the American Statistical Association}, \bold{87}, 98--107. Koenker, R. and Bassett, G. (1978) Regression quantiles. \emph{Econometrica}, \bold{46}, 33--50. Newey, W. K. and Powell, J. L. (1987) Asymmetric least squares estimation and testing. \emph{Econometrica}, \bold{55}, 819--847. } \author{ Thomas W. Yee } \note{ On fitting, the \code{extra} slot has list components \code{"w.aml"} and \code{"percentile"}. The latter is the percent of observations below the ``w-regression plane'', which is the fitted values. Also, the individual deviance values corresponding to each element of the argument \code{w.aml} is stored in the \code{extra} slot. For \code{amlpoisson} objects, methods functions for the generic functions \code{qtplot} and \code{cdf} have not been written yet. About the jargon, Newey and Powell (1987) used the name \emph{expectiles} for regression surfaces obtained by asymmetric least squares. This was deliberate so as to distinguish them from the original \emph{regression quantiles} of Koenker and Bassett (1978). Efron (1991) and Efron (1992) use the general name \emph{regression percentile} to apply to all forms of asymmetric fitting. Although the asymmetric maximum likelihood method very nearly gives regression percentiles in the strictest sense for the normal and Poisson cases, the phrase \emph{quantile regression} is used loosely in this \pkg{VGAM} documentation. In this documentation the word \emph{quantile} can often be interchangeably replaced by \emph{expectile} (things are informal here). } \section{Warning }{ If \code{w.aml} has more than one value then the value returned by \code{deviance} is the sum of all the (weighted) deviances taken over all the \code{w.aml} values. See Equation (1.6) of Efron (1992). } \seealso{ \code{\link{amlnormal}}, \code{\link{amlbinomial}}, \code{\link{alaplace1}}. } \examples{ set.seed(1234) mydat <- data.frame(x = sort(runif(nn <- 200))) mydat <- transform(mydat, y = rpois(nn, exp(0 - sin(8*x)))) (fit <- vgam(y ~ s(x), fam = amlpoisson(w.aml = c(0.02, 0.2, 1, 5, 50)), mydat, trace = TRUE)) fit@extra \dontrun{ # Quantile plot with(mydat, plot(x, jitter(y), col = "blue", las = 1, main = paste(paste(round(fit@extra$percentile, digits = 1), collapse = ", "), "percentile-expectile curves"))) with(mydat, matlines(x, fitted(fit), lwd = 2)) } } \keyword{models} \keyword{regression} VGAM/man/lirat.Rd0000644000176200001440000000451513565414527013202 0ustar liggesusers\name{lirat} \alias{lirat} \docType{data} \title{ Low-iron Rat Teratology Data } \description{ Low-iron rat teratology data. } \usage{data(lirat)} \format{ A data frame with 58 observations on the following 4 variables. \describe{ \item{\code{N}}{Litter size.} \item{\code{R}}{Number of dead fetuses.} \item{\code{hb}}{Hemoglobin level.} \item{\code{grp}}{Group number. Group 1 is the untreated (low-iron) group, group 2 received injections on day 7 or day 10 only, group 3 received injections on days 0 and 7, and group 4 received injections weekly.} } } \details{ The following description comes from Moore and Tsiatis (1991). The data comes from the experimental setup from Shepard et al. (1980), which is typical of studies of the effects of chemical agents or dietary regimens on fetal development in laboratory rats. Female rats were put in iron-deficient diets and divided into 4 groups. One group of controls was given weekly injections of iron supplement to bring their iron intake to normal levels, while another group was given only placebo injections. Two other groups were given fewer iron-supplement injections than the controls. The rats were made pregnant, sacrificed 3 weeks later, and the total number of fetuses and the number of dead fetuses in each litter were counted. For each litter the number of dead fetuses may be considered to be Binomial(\eqn{N,p}) where \eqn{N} is the litter size and \eqn{p} is the probability of a fetus dying. The parameter \eqn{p} is expected to vary from litter to litter, therefore the total variance of the proportions will be greater than that predicted by a binomial model, even when the covariates for hemoglobin level and experimental group are accounted for. } \source{ Moore, D. F. and Tsiatis, A. (1991) Robust Estimation of the Variance in Moment Methods for Extra-binomial and Extra-Poisson Variation. \emph{Biometrics}, \bold{47}, 383--401. } \references{ Shepard, T. H., Mackler, B. and Finch, C. A. (1980) Reproductive studies in the iron-deficient rat. \emph{Teratology}, \bold{22}, 329--334. } \examples{ \dontrun{ # cf. Figure 3 of Moore and Tsiatis (1991) plot(R / N ~ hb, data = lirat, pch = as.character(grp), col = grp, las = 1, xlab = "Hemoglobin level", ylab = "Proportion Dead") } } \keyword{datasets} VGAM/man/kendall.tau.Rd0000644000176200001440000000556113565414527014273 0ustar liggesusers\name{kendall.tau} \alias{kendall.tau} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Kendall's Tau Statistic } \description{ Computes Kendall's Tau, which is a rank-based correlation measure, between two vectors. } \usage{ kendall.tau(x, y, exact = FALSE, max.n = 3000) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, y}{ Numeric vectors. Must be of equal length. Ideally their values are continuous and not too discrete. Let \code{length(x)} be \eqn{N}, say. } \item{exact}{ Logical. If \code{TRUE} then the exact value is computed. } \item{max.n}{ Numeric. If \code{exact = FALSE} and \code{length(x)} is more than \code{max.n} then a random sample of \code{max.n} pairs are chosen. } } \details{ Kendall's tau is a measure of dependency in a bivariate distribution. Loosely, two random variables are \emph{concordant} if large values of one random variable are associated with large values of the other random variable. Similarly, two random variables are \emph{disconcordant} if large values of one random variable are associated with small values of the other random variable. More formally, if \code{(x[i] - x[j])*(y[i] - y[j]) > 0} then that comparison is concordant \eqn{(i \neq j)}. And if \code{(x[i] - x[j])*(y[i] - y[j]) < 0} then that comparison is disconcordant \eqn{(i \neq j)}. Out of \code{choose(N, 2}) comparisons, let \eqn{c} and \eqn{d} be the number of concordant and disconcordant pairs. Then Kendall's tau can be estimated by \eqn{(c-d)/(c+d)}. If there are ties then half the ties are deemed concordant and half disconcordant so that \eqn{(c-d)/(c+d+t)} is used. } \value{ Kendall's tau, which lies between \eqn{-1} and \eqn{1}. } %\references{ %} %\author{ % T. W. Yee. %} %\note{ %This function has not been tested thoroughly. %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \section{Warning}{ If \code{length(x)} is large then the cost is \eqn{O(N^2)}, which is expensive! Under these circumstances it is not advisable to set \code{exact = TRUE} or \code{max.n} to a very large number. } \seealso{ \code{\link{binormalcop}}, \code{\link[stats]{cor}}. } \examples{ N <- 5000; x <- 1:N; y <- runif(N) true.rho <- -0.8 ymat <- rbinorm(N, cov12 = true.rho) # Bivariate normal, aka N_2 x <- ymat[, 1] y <- ymat[, 2] \dontrun{plot(x, y, col = "blue")} kendall.tau(x, y) # A random sample is taken here kendall.tau(x, y) # A random sample is taken here kendall.tau(x, y, exact = TRUE) # Costly if length(x) is large kendall.tau(x, y, max.n = N) # Same as exact = TRUE (rhohat <- sin(kendall.tau(x, y) * pi / 2)) # This formula holds for N_2 actually true.rho # rhohat should be near this value } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} VGAM/man/clogloglink.Rd0000644000176200001440000001055213565414527014371 0ustar liggesusers\name{clogloglink} \alias{clogloglink} %\alias{cloglog} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Complementary Log-log Link Function } \description{ Computes the complementary log-log transformation, including its inverse and the first two derivatives. } \usage{ clogloglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}} for general information about links. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The complementary log-log link function is commonly used for parameters that lie in the unit interval. But unlike \code{\link{logitlink}}, \code{\link{probitlink}} and \code{\link{cauchitlink}}, this link is not symmetric. It is the inverse CDF of the extreme value (or Gumbel or log-Weibull) distribution. Numerical values of \code{theta} close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the complimentary log-log of \code{theta}, i.e., \code{log(-log(1 - theta))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{1-exp(-exp(theta))}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \eqn{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or 0. One way of overcoming this is to use \code{bvalue}. Changing 1s to 0s and 0s to 1s in the response means that effectively a loglog link is fitted. That is, tranform \eqn{y} by \eqn{1-y}. That's why only one of \code{\link{clogloglink}} and \code{logloglink} is written. With constrained ordination (e.g., \code{\link{cqo}} and \code{\link{cao}}) used with \code{\link{binomialff}}, a complementary log-log link function is preferred over the default \code{\link{logitlink}}, for a good reason. See the example below. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the extreme value distribution. } \seealso{ \code{\link{Links}}, \code{\link{logitoffsetlink}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{cauchitlink}}, \code{\link{pgumbel}}. } \examples{ p <- seq(0.01, 0.99, by = 0.01) clogloglink(p) max(abs(clogloglink(clogloglink(p), inverse = TRUE) - p)) # Should be 0 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01)) clogloglink(p) # Has NAs clogloglink(p, bvalue = .Machine$double.eps) # Has no NAs \dontrun{ p <- seq(0.01, 0.99, by = 0.01) plot(p, logitlink(p), type = "l", col = "limegreen", lwd = 2, las = 1, main = "Some probability link functions", ylab = "transformation") lines(p, probitlink(p), col = "purple", lwd = 2) lines(p, clogloglink(p), col = "chocolate", lwd = 2) lines(p, cauchitlink(p), col = "tan", lwd = 2) abline(v = 0.5, h = 0, lty = "dashed") legend(0.1, 4, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), col = c("limegreen", "purple", "chocolate", "tan"), lwd = 2) } \dontrun{ # This example shows that clogloglink is preferred over logitlink n <- 500; p <- 5; S <- 3; Rank <- 1 # Species packing model: mydata <- rcqo(n, p, S, eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE, family = "binomial", hi.abundance = 5, seed = 123, Rank = Rank) fitc <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata, fam = binomialff(multiple.responses = TRUE, link = "cloglog"), Rank = Rank) fitl <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata, fam = binomialff(multiple.responses = TRUE, link = "logitlink"), Rank = Rank) # Compare the fitted models (cols 1 and 3) with the truth (col 2) cbind(concoef(fitc), attr(mydata, "concoefficients"), concoef(fitl)) } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/rayleigh.Rd0000644000176200001440000001111613565414527013666 0ustar liggesusers\name{rayleigh} \alias{rayleigh} \alias{cens.rayleigh} %- Also NEED an '\alias' for EACH other topic documented here. \title{Rayleigh Regression Family Function } \description{ Estimating the parameter of the Rayleigh distribution by maximum likelihood estimation. Right-censoring is allowed. } \usage{ rayleigh(lscale = "loglink", nrfs = 1/3 + 0.01, oim.mean = TRUE, zero = NULL, parallel = FALSE, type.fitted = c("mean", "percentiles", "Qlink"), percentiles = 50) cens.rayleigh(lscale = "loglink", oim = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale}{ Parameter link function applied to the scale parameter \eqn{b}. See \code{\link{Links}} for more choices. A log link is the default because \eqn{b} is positive. } \item{nrfs}{ Numeric, of length one, with value in \eqn{[0,1]}. Weighting factor between Newton-Raphson and Fisher scoring. The value 0 means pure Newton-Raphson, while 1 means pure Fisher scoring. The default value uses a mixture of the two algorithms, and retaining positive-definite working weights. } \item{oim.mean}{ Logical, used only for intercept-only models. \code{TRUE} means the mean of the OIM elements are used as working weights. If \code{TRUE} then this argument has top priority for working out the working weights. \code{FALSE} means use another algorithm. } \item{oim}{ Logical. For censored data only, \code{TRUE} means the Newton-Raphson algorithm, and \code{FALSE} means Fisher scoring. } \item{zero, parallel}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. Using \code{"Qlink"} is for quantile-links in \pkg{VGAMextra}. } } \details{ The Rayleigh distribution, which is used in physics, has a probability density function that can be written \deqn{f(y) = y \exp(-0.5 (y/b)^2)/b^2}{% f(y) = y*exp(-0.5*(y/b)^2)/b^2} for \eqn{y > 0} and \eqn{b > 0}. The mean of \eqn{Y} is \eqn{b \sqrt{\pi / 2}}{b * sqrt(pi / 2)} (returned as the fitted values) and its variance is \eqn{b^2 (4-\pi)/2}{b^2 (4-pi)/2}. The \pkg{VGAM} family function \code{cens.rayleigh} handles right-censored data (the true value is greater than the observed value). To indicate which type of censoring, input \code{extra = list(rightcensored = vec2)} where \code{vec2} is a logical vector the same length as the response. If the component of this list is missing then the logical values are taken to be \code{FALSE}. The fitted object has this component stored in the \code{extra} slot. The \pkg{VGAM} family function \code{rayleigh} handles multiple responses. } \section{Warning}{ The theory behind the argument \code{oim} is not fully complete. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ The \code{\link{poisson.points}} family function is more general so that if \code{ostatistic = 1} and \code{dimension = 2} then it coincides with \code{\link{rayleigh}}. Other related distributions are the Maxwell and Weibull distributions. % http://www.math.uah.edu/stat/special/MultiNormal.html % The distribution of R is known as the Rayleigh distribution, %named for William Strutt, Lord Rayleigh. It is a member of the %family of Weibull distributions, named in turn for Wallodi Weibull. } \seealso{ \code{\link{Rayleigh}}, \code{\link{genrayleigh}}, \code{\link{riceff}}, \code{\link{maxwell}}, \code{\link{weibullR}}, \code{\link{poisson.points}}, \code{\link{simulate.vlm}}. } \examples{ nn <- 1000; Scale <- exp(2) rdata <- data.frame(ystar = rrayleigh(nn, scale = Scale)) fit <- vglm(ystar ~ 1, rayleigh, data = rdata, trace = TRUE) head(fitted(fit)) with(rdata, mean(ystar)) coef(fit, matrix = TRUE) Coef(fit) # Censored data rdata <- transform(rdata, U = runif(nn, 5, 15)) rdata <- transform(rdata, y = pmin(U, ystar)) \dontrun{ par(mfrow = c(1, 2)) hist(with(rdata, ystar)); hist(with(rdata, y)) } extra <- with(rdata, list(rightcensored = ystar > U)) fit <- vglm(y ~ 1, cens.rayleigh, data = rdata, trace = TRUE, extra = extra, crit = "coef") table(fit@extra$rightcen) coef(fit, matrix = TRUE) head(fitted(fit)) } \keyword{models} \keyword{regression} VGAM/man/posbernoulli.t.Rd0000644000176200001440000002535013565414527015046 0ustar liggesusers\name{posbernoulli.t} %\alias{posbernoulli} \alias{posbernoulli.t} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Bernoulli Family Function with Time Effects } \description{ Fits a GLM/GAM-like model to multiple Bernoulli responses where each row in the capture history matrix response has at least one success (capture). Sampling occasion effects are accommodated. % Behavioural effects are accommodated via the \code{xij} argument % of \code{\link{vglm.control}}. } \usage{ posbernoulli.t(link = "logitlink", parallel.t = FALSE ~ 1, iprob = NULL, p.small = 1e-4, no.warning = FALSE, type.fitted = c("probs", "onempall0")) } %- maybe also 'usage' for other objects documented here. %apply.parint = FALSE, \arguments{ \item{link, iprob, parallel.t}{ See \code{\link{CommonVGAMffArguments}} for information. By default, the parallelism assumption does not apply to the intercept. Setting \code{parallel.t = FALSE ~ -1}, or equivalently \code{parallel.t = FALSE ~ 0}, results in the \eqn{M_0}/\eqn{M_h} model. } \item{p.small, no.warning}{ A small probability value used to give a warning for the Horvitz--Thompson estimator. Any estimated probability value less than \code{p.small} will result in a warning, however, setting \code{no.warning = TRUE} will suppress this warning if it occurs. This is because the Horvitz-Thompson estimator is the sum of the reciprocal of such probabilities, therefore any probability that is too close to 0 will result in an unstable estimate. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The default is to return a matrix of probabilities. If \code{"onempall0"} is chosen then the the probability that each animal is captured at least once in the course of the study is returned. The abbreviation stands for one minus the probability of all 0s, and the quantity appears in the denominator of the usual formula. % 20190503 } } \details{ These models (commonly known as \eqn{M_t} or \eqn{M_{th}} (no prefix \eqn{h} means it is an intercept-only model) in the capture--recapture literature) operate on a capture history matrix response of 0s and 1s (\eqn{n \times \tau}{n x tau}). Each column is a sampling occasion where animals are potentially captured (e.g., a field trip), and each row is an individual animal. Capture is a 1, else a 0. No removal of animals from the population is made (closed population), e.g., no immigration or emigration. Each row of the response matrix has at least one capture. Once an animal is captured for the first time, it is marked/tagged so that its future capture history can be recorded. Then it is released immediately back into the population to remix. It is released immediately after each recapture too. It is assumed that the animals are independent and that, for a given animal, each sampling occasion is independent. And animals do not lose their marks/tags, and all marks/tags are correctly recorded. The number of linear/additive predictors is equal to the number of sampling occasions, i.e., \eqn{M = \tau}, say. The default link functions are \eqn{(logit \,p_{1},\ldots,logit \,p_{\tau})^T}{(logit p_(1),\ldots,logit p_(tau))^T} where each \eqn{p_{j}} denotes the probability of capture at time point \eqn{j}. The fitted value returned is a matrix of probabilities of the same dimension as the response matrix. % Thus \eqn{M = \tau}{M = tau}. A conditional likelihood is maximized here using Fisher scoring. Each sampling occasion has a separate probability that is modelled here. The probabilities can be constrained to be equal by setting \code{parallel.t = FALSE ~ 0}; then the results are effectively the same as \code{\link{posbinomial}} except the binomial constants are not included in the log-likelihood. If \code{parallel.t = TRUE ~ 0} then each column should have at least one 1 and at least one 0. It is well-known that some species of animals are affected by capture, e.g., trap-shy or trap-happy. This \pkg{VGAM} family function does \emph{not} allow any behavioral effect to be modelled (\code{\link{posbernoulli.b}} and \code{\link{posbernoulli.tb}} do) because the denominator of the likelihood function must be free of behavioral effects. % via covariates that are specific to each sampling occasion, % e.g., through the \code{xij} argument. % Ignoring capture history effects would mean % \code{\link{posbinomial}} could be used by aggregating over % the sampling occasions. % If there are no covariates that are specific to each occasion % then the response matrix can be summed over the columns and % \code{\link{posbinomial}} could be used by aggregating over % the sampling occasions. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. Upon fitting the \code{extra} slot has a (list) component called \code{N.hat} which is a point estimate of the population size \eqn{N} (it is the Horvitz-Thompson (1952) estimator). And there is a component called \code{SE.N.hat} containing its standard error. } \references{ Huggins, R. M. (1991) Some practical aspects of a conditional likelihood approach to capture experiments. \emph{Biometrics}, \bold{47}, 725--732. Huggins, R. M. and Hwang, W.-H. (2011) A review of the use of conditional likelihood in capture--recapture experiments. \emph{International Statistical Review}, \bold{79}, 385--400. Otis, D. L. and Burnham, K. P. and White, G. C. and Anderson, D. R. (1978) Statistical inference from capture data on closed animal populations, \emph{Wildlife Monographs}, \bold{62}, 3--135. Yee, T. W. and Stoklosa, J. and Huggins, R. M. (2015) The \pkg{VGAM} package for capture--recapture data using the conditional likelihood. \emph{Journal of Statistical Software}, \bold{65}, 1--33. \url{http://www.jstatsoft.org/v65/i05/}. % \bold{65}(5), 1--33. } \author{ Thomas W. Yee. } \note{ % Models \eqn{M_{tbh}}{M_tbh} can be fitted using the % \code{xij} argument (see \code{\link{vglm.control}}) % to input the behavioural effect indicator % variables. Rather than manually setting these % up, they may be more conveniently obtained by % \code{\link{aux.posbernoulli.t}}. % See the example below. The \code{weights} argument of \code{\link{vglm}} need not be assigned, and the default is just a matrix of ones. Fewer numerical problems are likely to occur for \code{parallel.t = TRUE}. Data-wise, each sampling occasion may need at least one success (capture) and one failure. Less stringent conditions in the data are needed when \code{parallel.t = TRUE}. Ditto when parallelism is applied to the intercept too. % for \code{apply.parint = TRUE}. The response matrix is returned unchanged; i.e., not converted into proportions like \code{\link{posbinomial}}. If the response matrix has column names then these are used in the labelling, else \code{prob1}, \code{prob2}, etc. are used. Using \code{AIC()} or \code{BIC()} to compare \code{\link{posbernoulli.t}}, \code{\link{posbernoulli.b}}, \code{\link{posbernoulli.tb}} models with a \code{\link{posbinomial}} model requires \code{posbinomial(omit.constant = TRUE)} because one needs to remove the normalizing constant from the log-likelihood function. See \code{\link{posbinomial}} for an example. % If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of % the response matrix are not present then it pays to add % such rows to the response matrix and assign a small but % positive prior weight. % For example, if \eqn{\tau=2}{tau=2} then there should be % (0,1) rows, % (1,0) rows and % (1,1) rows present in the response matrix. } %\section{Warning }{ % % See \code{\link{posbernoulli.tb}}. % % %} \seealso{ \code{\link{posbernoulli.b}}, \code{\link{posbernoulli.tb}}, \code{\link{Select}}, \code{\link{deermice}}, \code{\link{Huggins89table1}}, \code{\link{Huggins89.t1}}, \code{\link{dposbern}}, \code{\link{rposbern}}, \code{\link{posbinomial}}, \code{\link{AICvlm}}, \code{\link{BICvlm}}, \code{\link{prinia}}. % \code{\link{aux.posbernoulli.t}}, % \code{\link{vglm.control}} for \code{xij}, % \code{\link{huggins91}}. } \examples{ M.t <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, posbernoulli.t, data = deermice, trace = TRUE) coef(M.t, matrix = TRUE) constraints(M.t, matrix = TRUE) summary(M.t, presid = FALSE) M.h.1 <- vglm(Select(deermice, "y") ~ sex + weight, trace = TRUE, posbernoulli.t(parallel.t = FALSE ~ -1), data = deermice) coef(M.h.1, matrix = TRUE) constraints(M.h.1) summary(M.h.1, presid = FALSE) head(depvar(M.h.1)) # Response capture history matrix dim(depvar(M.h.1)) M.th.2 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE, posbernoulli.t(parallel.t = FALSE), data = deermice) lrtest(M.h.1, M.th.2) # Test the parallelism assumption wrt sex and weight coef(M.th.2) coef(M.th.2, matrix = TRUE) constraints(M.th.2) summary(M.th.2, presid = FALSE) head(model.matrix(M.th.2, type = "vlm"), 21) M.th.2@extra$N.hat # Estimate of the population size; should be about N M.th.2@extra$SE.N.hat # SE of the estimate of the population size # An approximate 95 percent confidence interval: round(M.th.2@extra$N.hat + c(-1, 1) * 1.96 * M.th.2@extra$SE.N.hat, 1) # Fit a M_h model, effectively the parallel M_t model, using posbinomial() deermice <- transform(deermice, ysum = y1 + y2 + y3 + y4 + y5 + y6, tau = 6) M.h.3 <- vglm(cbind(ysum, tau - ysum) ~ sex + weight, posbinomial(omit.constant = TRUE), data = deermice, trace = TRUE) max(abs(coef(M.h.1) - coef(M.h.3))) # Should be zero logLik(M.h.3) - logLik(M.h.1) # Difference is due to the binomial constants } \keyword{models} \keyword{regression} %# Fit a M_tbh model: %pdata <- aux.posbernoulli.t(with(deermice, cbind(y1, y2, y3, y4, y5, y6))) # Convenient %deermice <- data.frame(deermice, bei = 0, pdata$cap.hist1) # Put all into 1 dataframe %head(deermice) # Augmented with behavioural effect indicator variables %M.tbh.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight + age + bei, % posbernoulli.t(parallel.t = TRUE ~ sex + weight + age + bei - 1), % data = deermice, trace = TRUE, % xij = list(bei ~ bei1 + bei2 + bei3 + bei4 + bei5 + bei6 - 1), % form2 = ~ bei1 + bei2 + bei3 + bei4 + bei5 + bei6 + % sex + weight + age + bei) %coef(M.tbh.1, matrix = TRUE) %head(deermice, 3) %head(model.matrix(M.tbh.1, type = "vlm"), 20) %summary(M.tbh.1, presid = FALSE) %head(depvar(M.tbh.1)) # Response capture history matrix %dim(depvar(M.tbh.1)) VGAM/man/deermice.Rd0000644000176200001440000000417213565414527013643 0ustar liggesusers\name{deermice} %\alias{Perom} \alias{deermice} \docType{data} \title{ Captures of Peromyscus maniculatus (Also Known as Deer Mice). %% ~~ data name/kind ... ~~ } \description{ Captures of \emph{Peromyscus maniculatus} collected at East Stuart Gulch, Colorado, USA. %% ~~ A concise (1-5 lines) description of the dataset. ~~ } % data(Perom) \usage{ data(deermice) } \format{ The format is a data frame. } \details{ \emph{Peromyscus maniculatus} is a rodent native to North America. The deer mouse is small in size, only about 8 to 10 cm long, not counting the length of the tail. Originally, the columns of this data frame represent the sex (\code{m} or \code{f}), the ages (\code{y}: young, \code{sa}: semi-adult, \code{a}: adult), the weights in grams, and the capture histories of 38 individuals over 6 trapping occasions (1: captured, 0: not captured). The data set was collected by V. Reid and distributed with the \pkg{CAPTURE} program of Otis et al. (1978). \code{deermice} has 38 deermice whereas \code{Perom} had 36 deermice (\code{Perom} has been withdrawn.) In \code{deermice} the two semi-adults have been classified as adults. The \code{sex} variable has 1 for female, and 0 for male. %% ~~ If necessary, more details than the __description__ above ~~ } %\source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ %} \references{ Huggins, R. M. (1991) Some practical aspects of a conditional likelihood approach to capture experiments. \emph{Biometrics}, \bold{47}, 725--732. Otis, D. L. et al. (1978) Statistical inference from capture data on closed animal populations, \emph{Wildlife Monographs}, \bold{62}, 3--135. %% ~~ possibly secondary sources and usages ~~ } \seealso{ \code{\link[VGAM:posbernoulli.b]{posbernoulli.b}}, \code{\link[VGAM:posbernoulli.t]{posbernoulli.t}}, \code{\link{fill1}}. } \examples{ head(deermice) \dontrun{ fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + age, posbernoulli.t(parallel.t = TRUE), data = deermice, trace = TRUE) coef(fit1) coef(fit1, matrix = TRUE) } } \keyword{datasets} VGAM/man/UtilitiesVGAM.Rd0000644000176200001440000000716213565414527014516 0ustar liggesusers\name{UtilitiesVGAM} \alias{UtilitiesVGAM} \alias{param.names} \alias{dimm} \alias{interleave.VGAM} \title{Utility Functions for the VGAM Package } \description{ A set of common utility functions used by \pkg{VGAM} family functions. } \usage{ param.names(string, S = 1, skip1 = FALSE, sep = "") dimm(M, hbw = M) interleave.VGAM(.M, M1, inverse = FALSE) } \arguments{ \item{string}{ Character. Name of the parameter. } \item{M, .M}{ Numeric. The total number of linear/additive predictors, called \eqn{M}. By total, it is meant summed over the number of responses. Often, \eqn{M} is the total number of parameters to be estimated (but this is not the same as the number of regression coefficients, unless the RHS of the formula is an intercept-only). The use of \code{.M} is unfortunate, but it is a compromise solution to what is presented in Yee (2015). Ideally, \code{.M} should be just \code{M}. } \item{M1}{ Numeric. The number of linear/additive predictors for one response, called \eqn{M_1}. This argument used to be called \code{M}, but is now renamed properly. } \item{inverse}{ Logical. Useful for the inverse function of \code{interleave.VGAM()}. } \item{S}{ Numeric. The number of responses. } \item{skip1, sep}{ The former is logical; should one skip (or omit) \code{"1"} when \code{S = 1}? The latter is the same argument as \code{\link[base]{paste}}. } \item{hbw}{ Numeric. The half-bandwidth, which measures the number of bands emanating from the central diagonal band. } } \value{ For \code{param.names()}, this function returns the parameter names for \eqn{S} responses, i.e., \code{string} is returned unchanged if \eqn{S=1}, else \code{paste(string, 1:S, sep = "")}. For \code{dimm()}, this function returns the number of elements to be stored for each of the working weight matrices. They are represented as columns in the matrix \code{wz} in e.g., \code{vglm.fit()}. See the \emph{matrix-band} format described in Section 18.3.5 of Yee (2015). For \code{interleave.VGAM()}, this function returns a reordering of the linear/additive predictors depending on the number of responses. The arguments presented in Table 18.5 may not be valid in your version of Yee (2015). } %\section{Warning }{ % The \code{zero} argument is supplied for convenience but conflicts %} \details{ See Yee (2015) for some details about some of these functions. } \references{ Yee, T. W. (2015) Vector Generalized Linear and Additive Models: With an Implementation in R. New York, USA: \emph{Springer}. } \seealso{ \code{\link{CommonVGAMffArguments}}, \code{\link{VGAM-package}}. } \author{T. W. Yee. Victor Miranda added the \code{inverse} argument to \code{interleave.VGAM()}. } %\note{ % See \code{\link{Links}} regarding a major change in % %} \examples{ param.names("shape", 1) # "shape" param.names("shape", 3) # c("shape1", "shape2", "shape3") dimm(3, hbw = 1) # Diagonal matrix; the 3 elements need storage. dimm(3) # A general 3 x 3 symmetrix matrix has 6 unique elements. dimm(3, hbw = 2) # Tridiagonal matrix; the 3-3 element is 0 and unneeded. M1 <- 2; ncoly <- 3; M <- ncoly * M1 mynames1 <- param.names("location", ncoly) mynames2 <- param.names("scale", ncoly) (parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]) # The following is/was in Yee (2015) and has a poor/deceptive style: (parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]) parameters.names[interleave.VGAM(M, M1 = M1, inverse = TRUE)] } \keyword{distribution} \keyword{regression} \keyword{programming} \keyword{models} VGAM/man/AR1.Rd0000644000176200001440000002350713565414527012454 0ustar liggesusers\name{AR1} \alias{AR1} % \alias{AR1.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Autoregressive Process with Order-1 Family Function } \description{ Maximum likelihood estimation of the three-parameter AR-1 model } \usage{ AR1(ldrift = "identitylink", lsd = "loglink", lvar = "loglink", lrho = "rhobitlink", idrift = NULL, isd = NULL, ivar = NULL, irho = NULL, imethod = 1, ishrinkage = 0.95, type.likelihood = c("exact", "conditional"), type.EIM = c("exact", "approximate"), var.arg = FALSE, nodrift = FALSE, print.EIM = FALSE, zero = c(if (var.arg) "var" else "sd", "rho")) } % zero = c(-2, -3) % AR1.control(epsilon = 1e-6, maxit = 30, stepsize = 1, ...) % deviance.arg = FALSE, %- maybe also 'usage' for other objects documented here. \arguments{ \item{ldrift, lsd, lvar, lrho}{ Link functions applied to the scaled mean, standard deviation or variance, and correlation parameters. The parameter \code{drift} is known as the \emph{drift}, and it is a scaled mean. See \code{\link{Links}} for more choices. } \item{idrift, isd, ivar, irho}{ Optional initial values for the parameters. If failure to converge occurs then try different values and monitor convergence by using \code{trace = TRUE}. For a \eqn{S}-column response, these arguments can be of length \eqn{S}, and they are recycled by the columns first. A value \code{NULL} means an initial value for each response is computed internally. } \item{ishrinkage, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. The default for \code{zero} assumes there is a drift parameter to be estimated (the default for that argument), so if a drift parameter is suppressed and there are covariates, then \code{zero} will need to be assigned the value 1 or 2 or \code{NULL}. } \item{var.arg}{ Same meaning as \code{\link{uninormal}}. } \item{nodrift}{ Logical, for determining whether to estimate the drift parameter. The default is to estimate it. If \code{TRUE}, the drift parameter is set to 0 and not estimated. } \item{type.EIM}{ What type of expected information matrix (EIM) is used in Fisher scoring. By default, this family function calls \code{\link[VGAM:AR1EIM]{AR1EIM}}, which recursively computes the exact EIM for the AR process with Gaussian white noise. See Porat and Friedlander (1986) for further details on the exact EIM. If \code{type.EIM = "approximate"} then approximate expression for the EIM of Autoregressive processes is used; this approach holds when the number of observations is large enough. Succinct details about the approximate EIM are delineated at Porat and Friedlander (1987). } \item{print.EIM}{ Logical. If \code{TRUE}, then the first few EIMs are printed. Here, the result shown is the sum of each EIM. } \item{type.likelihood}{ What type of likelihood function is maximized. The first choice (default) is the sum of the marginal likelihood and the conditional likelihood. Choosing the conditional likelihood means that the first observation is effectively ignored (this is handled internally by setting the value of the first prior weight to be some small positive number, e.g., \code{1.0e-6}). See the note below. } % \item{epsilon, maxit, stepsize,...}{ % Same as \code{\link[VGAM:vglm.control]{vglm.control}}. % % % } } \details{ The AR-1 model implemented here has \deqn{Y_1 \sim N(\mu, \sigma^2 / (1-\rho^2)), }{% Y(1) ~ N(mu, sigma^2 / (1-rho^2), } and \deqn{Y_i = \mu^* + \rho Y_{i-1} + e_i, }{% Y(i) = mu^* + rho * Y(i-1) + e(i) } where the \eqn{e_i}{e(i)} are i.i.d. Normal(0, sd = \eqn{\sigma}{sigma}) random variates. Here are a few notes: (1). A test for weak stationarity might be to verify whether \eqn{1/\rho}{1/rho} lies outside the unit circle. (2). The mean of all the \eqn{Y_i}{Y(i)} is \eqn{\mu^* /(1-\rho)}{mu^* / (1-rho)} and these are returned as the fitted values. (3). The correlation of all the \eqn{Y_i}{Y(i)} with \eqn{Y_{i-1}}{Y(i-1)} is \eqn{\rho}{rho}. (4). The default link function ensures that \eqn{-1 < \rho < 1}{-1 < rho < 1}. % (1). ... whether \eqn{\mu^*}{mu^*} is intercept-only. } \section{Warning}{ Monitoring convergence is urged, i.e., set \code{trace = TRUE}. Moreover, if the exact EIMs are used, set \code{print.EIM = TRUE} to compare the computed exact to the approximate EIM. Under the VGLM/VGAM approach, parameters can be modelled in terms of covariates. Particularly, if the standard deviation of the white noise is modelled in this way, then \code{type.EIM = "exact"} may certainly lead to unstable results. The reason is that white noise is a stationary process, and consequently, its variance must remain as a constant. Consequently, the use of variates to model this parameter contradicts the assumption of stationary random components to compute the exact EIMs proposed by Porat and Friedlander (1987). To prevent convergence issues in such cases, this family function internally verifies whether the variance of the white noise remains as a constant at each Fisher scoring iteration. If this assumption is violated and \code{type.EIM = "exact"} is set, then \code{AR1} automatically shifts to \code{type.EIM = "approximate"}. Also, a warning is accordingly displayed. %Thus, if modelling the standard deviation of the white noise %is required, the use of \code{type.EIM = "approximate"} is %highly recommended. %Yet to do: add an argument that allows the scaled mean parameter %to be deleted, i.e, a 2-parameter model is fitted. %Yet to do: \code{ARff(p.lag = 1)} should hopefully be written soon. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Porat, B. and Friedlander, B. (1987) The Exact Cramer-Rao Bond for Gaussian Autoregressive Processes. \emph{IEEE Transactions on Aerospace and Electronic Systems}, \bold{AES-23(4)}, 537--542. } \author{ Victor Miranda (exact method) and Thomas W. Yee (approximate method).} \note{ %For \code{type.likelihood = "conditional"}, %the prior weight for the first observation is set to %some small positive number, which has the effect of deleting %that observation. %However, \eqn{n} is still the original \eqn{n} so that %statistics such as the residual degrees of freedom are %unchanged (uncorrected possibly). Multiple responses are handled. The mean is returned as the fitted values. % Argument \code{zero} can be either a numeric or a character % vector. It must specify the position(s) or name(s) of the % parameters to be modeled as intercept-only. If names are used, % notice that parameter names in this family function are % \deqn{c("drift", "var" or "sd", "rho").} %Practical experience has shown that half-stepping is a very %good idea. The default options use step sizes that are %about one third the usual step size. Consequently, %\code{maxit} is increased to about 100, by default. } \seealso{ \code{\link{AR1EIM}}, \code{\link{vglm.control}}, \code{\link{dAR1}}, \code{\link[stats]{arima.sim}}. } \examples{ ### Example 1: using arima.sim() to generate a 0-mean stationary time series. nn <- 500 tsdata <- data.frame(x2 = runif(nn)) ar.coef.1 <- rhobitlink(-1.55, inverse = TRUE) # Approx -0.65 ar.coef.2 <- rhobitlink( 1.0, inverse = TRUE) # Approx 0.50 set.seed(1) tsdata <- transform(tsdata, index = 1:nn, TS1 = arima.sim(nn, model = list(ar = ar.coef.1), sd = exp(1.5)), TS2 = arima.sim(nn, model = list(ar = ar.coef.2), sd = exp(1.0 + 1.5 * x2))) ### An autoregressive intercept--only model. ### ### Using the exact EIM, and "nodrift = TRUE" ### fit1a <- vglm(TS1 ~ 1, data = tsdata, trace = TRUE, AR1(var.arg = FALSE, nodrift = TRUE, type.EIM = "exact", print.EIM = FALSE), crit = "coefficients") Coef(fit1a) summary(fit1a) \dontrun{ ### Two responses. Here, the white noise standard deviation of TS2 ### ### is modelled in terms of 'x2'. Also, 'type.EIM = exact'. ### fit1b <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = NULL, nodrift = TRUE, var.arg = FALSE, type.EIM = "exact"), constraints = list("(Intercept)" = diag(4), "x2" = rbind(0, 0, 1, 0)), data = tsdata, trace = TRUE, crit = "coefficients") coef(fit1b, matrix = TRUE) summary(fit1b) ### Example 2: another stationary time series nn <- 500 my.rho <- rhobitlink(1.0, inverse = TRUE) my.mu <- 1.0 my.sd <- exp(1) tsdata <- data.frame(index = 1:nn, TS3 = runif(nn)) set.seed(2) for (ii in 2:nn) tsdata$TS3[ii] <- my.mu/(1 - my.rho) + my.rho * tsdata$TS3[ii-1] + rnorm(1, sd = my.sd) tsdata <- tsdata[-(1:ceiling(nn/5)), ] # Remove the burn-in data: ### Fitting an AR(1). The exact EIMs are used. fit2a <- vglm(TS3 ~ 1, AR1(type.likelihood = "exact", # "conditional", type.EIM = "exact"), data = tsdata, trace = TRUE, crit = "coefficients") Coef(fit2a) summary(fit2a) # SEs are useful to know Coef(fit2a)["rho"] # Estimate of rho, for intercept-only models my.rho # The 'truth' (rho) Coef(fit2a)["drift"] # Estimate of drift, for intercept-only models my.mu /(1 - my.rho) # The 'truth' (drift) } } \keyword{models} \keyword{regression} %fit1a <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = c(1:4, 6)), % data = tsdata, trace = TRUE) %head(weights(fit2a, type = "prior")) # First one is effectively deleted %head(weights(fit2a, type = "working")) # Ditto VGAM/man/betageometric.Rd0000644000176200001440000001016113565414527014673 0ustar liggesusers\name{betageometric} \alias{betageometric} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Beta-geometric Distribution Family Function } \description{ Maximum likelihood estimation for the beta-geometric distribution. } \usage{ betageometric(lprob = "logitlink", lshape = "loglink", iprob = NULL, ishape = 0.1, moreSummation = c(2, 100), tolerance = 1.0e-10, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lprob, lshape}{ Parameter link functions applied to the parameters \eqn{p}{prob} and \eqn{\phi}{phi} (called \code{prob} and \code{shape} below). The former lies in the unit interval and the latter is positive. See \code{\link{Links}} for more choices. } \item{iprob, ishape}{ Numeric. Initial values for the two parameters. A \code{NULL} means a value is computed internally. } \item{moreSummation}{ Integer, of length 2. When computing the expected information matrix a series summation from 0 to \code{moreSummation[1]*max(y)+moreSummation[2]} is made, in which the upper limit is an approximation to infinity. Here, \code{y} is the response. } \item{tolerance}{ Positive numeric. When all terms are less than this then the series is deemed to have converged. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. If used, the value must be from the set \{1,2\}. } } \details{ A random variable \eqn{Y} has a 2-parameter beta-geometric distribution if \eqn{P(Y=y) = p (1-p)^y}{P(Y=y) = prob * (1-prob)^y} for \eqn{y=0,1,2,\ldots}{y=0,1,2,...} where \eqn{p}{prob} are generated from a standard beta distribution with shape parameters \code{shape1} and \code{shape2}. The parameterization here is to focus on the parameters \eqn{p}{prob} and \eqn{\phi = 1/(shape1+shape2)}{phi = 1/(shape1+shape2)}, where \eqn{\phi}{phi} is \code{shape}. The default link functions for these ensure that the appropriate range of the parameters is maintained. The mean of \eqn{Y} is \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) = shape2 / (shape1-1) = (1-prob) / (prob-phi)} if \code{shape1 > 1}, and if so, then this is returned as the fitted values. The geometric distribution is a special case of the beta-geometric distribution with \eqn{\phi=0}{phi=0} (see \code{\link{geometric}}). However, fitting data from a geometric distribution may result in numerical problems because the estimate of \eqn{\log(\phi)}{log(phi)} will 'converge' to \code{-Inf}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Paul, S. R. (2005) Testing goodness of fit of the geometric distribution: an application to human fecundability data. \emph{Journal of Modern Applied Statistical Methods}, \bold{4}, 425--433. } \author{ T. W. Yee } \note{ The first iteration may be very slow; if practical, it is best for the \code{weights} argument of \code{\link{vglm}} etc. to be used rather than inputting a very long vector as the response, i.e., \code{vglm(y ~ 1, ..., weights = wts)} is to be preferred over \code{vglm(rep(y, wts) ~ 1, ...)}. If convergence problems occur try inputting some values of argument \code{ishape}. If an intercept-only model is fitted then the \code{misc} slot of the fitted object has list components \code{shape1} and \code{shape2}. } \seealso{ \code{\link{geometric}}, \code{\link{betaff}}, \code{\link{rbetageom}}. } \examples{ bdata <- data.frame(y = 0:11, wts = c(227,123,72,42,21,31,11,14,6,4,7,28)) fitb <- vglm(y ~ 1, betageometric, data = bdata, weight = wts, trace = TRUE) fitg <- vglm(y ~ 1, geometric, data = bdata, weight = wts, trace = TRUE) coef(fitb, matrix = TRUE) Coef(fitb) sqrt(diag(vcov(fitb, untransform = TRUE))) fitb@misc$shape1 fitb@misc$shape2 # Very strong evidence of a beta-geometric: pchisq(2 * (logLik(fitb) - logLik(fitg)), df = 1, lower.tail = FALSE) } \keyword{models} \keyword{regression} VGAM/man/binom2.orUC.Rd0000644000176200001440000000742613565414527014130 0ustar liggesusers\name{Binom2.or} \alias{Binom2.or} \alias{dbinom2.or} \alias{rbinom2.or} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Binary Regression with an Odds Ratio } \description{ Density and random generation for a bivariate binary regression model using an odds ratio as the measure of dependency. } \usage{ rbinom2.or(n, mu1, mu2 = if (exchangeable) mu1 else stop("argument 'mu2' not specified"), oratio = 1, exchangeable = FALSE, tol = 0.001, twoCols = TRUE, colnames = if (twoCols) c("y1","y2") else c("00", "01", "10", "11"), ErrorCheck = TRUE) dbinom2.or(mu1, mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"), oratio = 1, exchangeable = FALSE, tol = 0.001, colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{n}{ number of observations. Same as in \code{\link[stats]{runif}}. The arguments \code{mu1}, \code{mu2}, \code{oratio} are recycled to this value. } \item{mu1, mu2}{ The marginal probabilities. Only \code{mu1} is needed if \code{exchangeable = TRUE}. Values should be between 0 and 1. } \item{oratio}{ Odds ratio. Must be numeric and positive. The default value of unity means the responses are statistically independent. } \item{exchangeable}{ Logical. If \code{TRUE}, the two marginal probabilities are constrained to be equal. } \item{twoCols}{ Logical. If \code{TRUE}, then a \eqn{n} \eqn{\times}{*} \eqn{2} matrix of 1s and 0s is returned. If \code{FALSE}, then a \eqn{n} \eqn{\times}{*} \eqn{4} matrix of 1s and 0s is returned. } \item{colnames}{ The \code{dimnames} argument of \code{\link[base]{matrix}} is assigned \code{list(NULL, colnames)}. } \item{tol}{ Tolerance for testing independence. Should be some small positive numerical value. } \item{ErrorCheck}{ Logical. Do some error checking of the input parameters? } } \details{ The function \code{rbinom2.or} generates data coming from a bivariate binary response model. The data might be fitted with the \pkg{VGAM} family function \code{\link{binom2.or}}. The function \code{dbinom2.or} does not really compute the density (because that does not make sense here) but rather returns the four joint probabilities. } \value{ The function \code{rbinom2.or} returns either a 2 or 4 column matrix of 1s and 0s, depending on the argument \code{twoCols}. The function \code{dbinom2.or} returns a 4 column matrix of joint probabilities; each row adds up to unity. } \author{ T. W. Yee } \seealso{ \code{\link{binom2.or}}. } \examples{ nn <- 1000 # Example 1 ymat <- rbinom2.or(nn, mu1 = logitlink(1, inv = TRUE), oratio = exp(2), exch = TRUE) (mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2"))) (myor <- mytab["0","0"] * mytab["1","1"] / (mytab["1","0"] * mytab["0","1"])) fit <- vglm(ymat ~ 1, binom2.or(exch = TRUE)) coef(fit, matrix = TRUE) bdata <- data.frame(x2 = sort(runif(nn))) # Example 2 bdata <- transform(bdata, mu1 = logitlink(-2 + 4 * x2, inverse = TRUE), mu2 = logitlink(-1 + 3 * x2, inverse = TRUE)) dmat <- with(bdata, dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = exp(2))) ymat <- with(bdata, rbinom2.or(n = nn, mu1 = mu1, mu2 = mu2, oratio = exp(2))) fit2 <- vglm(ymat ~ x2, binom2.or, data = bdata) coef(fit2, matrix = TRUE) \dontrun{ matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4, type = "l", main = "Joint probabilities", ylim = 0:1, ylab = "Probabilities", xlab = "x2", las = 1) legend("top", lty = 1:4, col = 1:4, legend = c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)", "3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)")) } } \keyword{distribution} VGAM/man/zanegbinomial.Rd0000644000176200001440000002346713565414527014715 0ustar liggesusers\name{zanegbinomial} \alias{zanegbinomial} \alias{zanegbinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Negative Binomial Distribution } \description{ Fits a zero-altered negative binomial distribution based on a conditional model involving a binomial distribution and a positive-negative binomial distribution. } \usage{ zanegbinomial(zero = "size", type.fitted = c("mean", "munb", "pobs0"), mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, lpobs0 = "logitlink", lmunb = "loglink", lsize = "loglink", imethod = 1, ipobs0 = NULL, imunb = NULL, iprobs.y = NULL, gprobs.y = (0:9)/10, isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) zanegbinomialff(lmunb = "loglink", lsize = "loglink", lonempobs0 = "logitlink", type.fitted = c("mean", "munb", "pobs0", "onempobs0"), isize = NULL, ionempobs0 = NULL, zero = c("size", "onempobs0"), mds.min = 1e-3, iprobs.y = NULL, gprobs.y = (0:9)/10, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imethod = 1, imunb = NULL, nsimEIM = 500) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs0}{ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here. See \code{\link{Links}} for more choices. } \item{lmunb}{ Link function applied to the \code{munb} parameter, which is the mean \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution. See \code{\link{Links}} for more choices. } \item{lsize}{ Parameter link function applied to the reciprocal of the dispersion parameter, called \code{k}. That is, as \code{k} increases, the variance of the response decreases. See \code{\link{Links}} for more choices. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{lonempobs0, ionempobs0}{ Corresponding argument for the other parameterization. See details below. } % \item{epobs0, emunb, esize}{ % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % epobs0 = list(), emunb = list(), esize = list(), % } \item{ipobs0, imunb, isize}{ Optional initial values for \eqn{p_0}{pobs0} and \code{munb} and \code{k}. If given then it is okay to give one value for each response/species by inputting a vector whose length is the number of columns of the response matrix. } \item{zero}{ % Integer valued vector, may be assigned, e.g., \eqn{-3} or \eqn{3} if % the probability of an observed value is to be modelled with the % covariates. Specifies which of the three linear predictors are modelled as intercept-only. % By default, the \code{k} and \eqn{p_0}{pobs0} % parameters for each response are modelled as % single unknown numbers that are estimated. All parameters can be modelled as a function of the explanatory variables by setting \code{zero = NULL} (not recommended). A negative value means that the value is recycled, e.g., setting \eqn{-3} means all \code{k} are intercept-only for \code{zanegbinomial}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{nsimEIM, imethod}{ See \code{\link{CommonVGAMffArguments}}. } % \item{ishrinkage}{ % See \code{\link{negbinomial}} % and \code{\link{CommonVGAMffArguments}}. % } \item{iprobs.y, gsize.mux, gprobs.y}{ See \code{\link{negbinomial}}. % and \code{\link{CommonVGAMffArguments}}. } \item{cutoff.prob, eps.trig}{ See \code{\link{negbinomial}}. % and \code{\link{CommonVGAMffArguments}}. } \item{mds.min, max.support, max.chunk.MB}{ See \code{\link{negbinomial}}. % and \code{\link{CommonVGAMffArguments}}. } } \details{ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0}, or \eqn{Y} has a positive-negative binomial distribution with probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1}, which is modelled as a function of the covariates. The zero-altered negative binomial distribution differs from the zero-inflated negative binomial distribution in that the former has zeros coming from one source, whereas the latter has zeros coming from the negative binomial distribution too. The zero-inflated negative binomial distribution is implemented in the \pkg{VGAM} package. Some people call the zero-altered negative binomial a \emph{hurdle} model. For one response/species, by default, the three linear/additive predictors for \code{zanegbinomial()} are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(pobs0), log(munb), log(k))^T}. This vector is recycled for multiple species. The \pkg{VGAM} family function \code{zanegbinomialff()} has a few changes compared to \code{zanegbinomial()}. These are: (i) the order of the linear/additive predictors is switched so the negative binomial mean comes first; (ii) argument \code{onempobs0} is now 1 minus the probability of an observed 0, i.e., the probability of the positive negative binomial distribution, i.e., \code{onempobs0} is \code{1-pobs0}; (iii) argument \code{zero} has a new default so that the \code{pobs0} is intercept-only by default. Now \code{zanegbinomialff()} is generally recommended over \code{zanegbinomial()}. Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = (1-p_0) \mu_{nb} / [1 - (k/(k+\mu_{nb}))^k].}{% mu = (1-pobs0) * munb / [1 - (k/(k+munb))^k].} If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned. } \references{ Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer, D. B. (1996) Modelling the abundances of rare species: statistical models for counts with extra zeros. \emph{Ecological Modelling}, \bold{88}, 297--308. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. } \section{Warning }{ This family function is fragile; it inherits the same difficulties as \code{\link{posnegbinomial}}. Convergence for this \pkg{VGAM} family function seems to depend quite strongly on providing good initial values. This \pkg{VGAM} family function is computationally expensive and usually runs slowly; setting \code{trace = TRUE} is useful for monitoring convergence. Inference obtained from \code{summary.vglm} and \code{summary.vgam} may or may not be correct. In particular, the p-values, standard errors and degrees of freedom may need adjustment. Use simulation on artificial data to check that these are reasonable. } \author{ T. W. Yee } \note{ Note this family function allows \eqn{p_0}{pobs0} to be modelled as functions of the covariates provided \code{zero} is set correctly. It is a conditional model, not a mixture model. Simulated Fisher scoring is the algorithm. This family function effectively combines \code{\link{posnegbinomial}} and \code{\link{binomialff}} into one family function. This family function can handle multiple responses, e.g., more than one species. } \seealso{ \code{\link{dzanegbin}}, \code{\link{posnegbinomial}}, \code{\link{gatnbinomial.mlm}}, \code{\link{Gaitnbinom.mlm}}, \code{\link{negbinomial}}, \code{\link{binomialff}}, \code{\link{rposnegbin}}, \code{\link{zinegbinomial}}, \code{\link{zipoisson}}, \code{\link[stats:NegBinomial]{dnbinom}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ zdata <- data.frame(x2 = runif(nn <- 2000)) zdata <- transform(zdata, pobs0 = logitlink(-1 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzanegbin(nn, munb = exp(0+2*x2), size = exp(1), pobs0 = pobs0), y2 = rzanegbin(nn, munb = exp(1+2*x2), size = exp(1), pobs0 = pobs0)) with(zdata, table(y1)) with(zdata, table(y2)) fit <- vglm(cbind(y1, y2) ~ x2, zanegbinomial, data = zdata, trace = TRUE) coef(fit, matrix = TRUE) head(fitted(fit)) head(predict(fit)) } } \keyword{models} \keyword{regression} % lpobs0 = "logitlink", lmunb = "loglink", lsize = "loglink", % type.fitted = c("mean", "pobs0"), % ipobs0 = NULL, isize = NULL, zero = "size", % probs.y = 0.75, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % gsize = exp((-4):4), % imethod = 1, nsimEIM = 250, ishrinkage = 0.95) %zanegbinomial( %zero = "size", type.fitted = c("mean", "pobs0"), % nsimEIM = 250, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % lpobs0 = "logitlink", lmunb = "loglink", lsize = "loglink", % imethod = 1, ipobs0 = NULL, probs.y = 0.75, % ishrinkage = 0.95, isize = NULL, gsize = exp((-4):4)) %zanegbinomialff(lmunb = "loglink", lsize = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), isize = NULL, % ionempobs0 = NULL, zero = c("size", "onempobs0"), % probs.y = 0.75, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % gsize = exp((-4):4), % imethod = 1, nsimEIM = 250, ishrinkage = 0.95) VGAM/man/cens.normal.Rd0000644000176200001440000000672113565414527014307 0ustar liggesusers\name{cens.normal} \alias{cens.normal} % 20131111: just for \pkg{cg}: % 20140609: just for \pkg{cg}: \alias{cennormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Censored Normal Distribution } \description{ Maximum likelihood estimation for the normal distribution with left and right censoring. } \usage{ cens.normal(lmu = "identitylink", lsd = "loglink", imethod = 1, zero = "sd") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, lsd}{ Parameter link functions applied to the mean and standard deviation parameters. See \code{\link{Links}} for more choices. The standard deviation is a positive quantity, therefore a log link is the default. } \item{imethod}{ Initialization method. Either 1 or 2, this specifies two methods for obtaining initial values for the parameters. } \item{zero}{ A vector, e.g., containing the value 1 or 2; if so, the mean or standard deviation respectively are modelled as an intercept only. Setting \code{zero = NULL} means both linear/additive predictors are modelled as functions of the explanatory variables. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ This function is like \code{\link{uninormal}} but handles observations that are left-censored (so that the true value would be less than the observed value) else right-censored (so that the true value would be greater than the observed value). To indicate which type of censoring, input \code{extra = list(leftcensored = vec1, rightcensored = vec2)} where \code{vec1} and \code{vec2} are logical vectors the same length as the response. If the two components of this list are missing then the logical values are taken to be \code{FALSE}. The fitted object has these two components stored in the \code{extra} slot. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ %} \author{ T. W. Yee } \note{ This function, which is an alternative to \code{\link{tobit}}, cannot handle a matrix response and uses different working weights. If there are no censored observations then \code{\link{uninormal}} is recommended instead. % Function \code{\link{cens.normal1}} will be depreciated soon. % It is exactly the same as \code{\link{cens.normal}}. } \seealso{ \code{\link{tobit}}, \code{\link{uninormal}}, \code{\link{double.cens.normal}}. } \examples{ \dontrun{ cdata <- data.frame(x2 = runif(nn <- 1000)) # ystar are true values cdata <- transform(cdata, ystar = rnorm(nn, m = 100 + 15 * x2, sd = exp(3))) with(cdata, hist(ystar)) cdata <- transform(cdata, L = runif(nn, 80, 90), # Lower censoring points U = runif(nn, 130, 140)) # Upper censoring points cdata <- transform(cdata, y = pmax(L, ystar)) # Left censored cdata <- transform(cdata, y = pmin(U, y)) # Right censored with(cdata, hist(y)) Extra <- list(leftcensored = with(cdata, ystar < L), rightcensored = with(cdata, ystar > U)) fit1 <- vglm(y ~ x2, cens.normal, data = cdata, crit = "c", extra = Extra) fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)), data = cdata, crit = "c", trace = TRUE) coef(fit1, matrix = TRUE) max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be 0 names(fit1@extra) } } \keyword{models} \keyword{regression} VGAM/man/olym.Rd0000644000176200001440000000446713565414527013055 0ustar liggesusers\name{olympics} \alias{olym08} \alias{olym12} \docType{data} \title{ 2008 and 2012 Summer Olympic Final Medal Count Data} \description{ Final medal count, by country, for the Summer 2008 and 2012 Olympic Games. } \usage{ data(olym08) data(olym12) } \format{ A data frame with 87 or 85 observations on the following 6 variables. \describe{ \item{\code{rank}}{a numeric vector, overall ranking of the countries. } \item{\code{country}}{a factor. } \item{\code{gold}}{a numeric vector, number of gold medals. } \item{\code{silver}}{a numeric vector, number of silver medals. } \item{\code{bronze}}{a numeric vector, number of bronze medals. } \item{\code{totalmedal}}{a numeric vector, total number of medals. } % \item{\code{country}}{a factor. character vector. } } } \details{ The events were held during (i) August 8--24, 2008, in Beijing; and (ii) 27 July--12 August, 2012, in London. % This is a simple two-way contingency table of counts. } % \source{ % url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html}, % url{http://www.london2012.com/medals/medal-count/}. % } \references{ The official English website was/is \code{http://en.beijing2008.cn} and \code{http://www.london2012.com}. Help from Viet Hoang Quoc is gratefully acknowledged. } \seealso{ \code{\link[VGAM]{grc}}. } \examples{ summary(olym08) summary(olym12) ## maybe str(olym08) ; plot(olym08) ... \dontrun{ par(mfrow = c(1, 2)) myylim <- c(0, 55) with(head(olym08, n = 8), barplot(rbind(gold, silver, bronze), col = c("gold", "grey", "brown"), # No "silver" or "bronze"! # "gold", "grey71", "chocolate4", names.arg = country, cex.names = 0.5, ylim = myylim, beside = TRUE, main = "2008 Summer Olympic Final Medal Count", ylab = "Medal count", las = 1, sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze")) with(head(olym12, n = 8), barplot(rbind(gold, silver, bronze), col = c("gold", "grey", "brown"), # No "silver" or "bronze"! names.arg = country, cex.names = 0.5, ylim = myylim, beside = TRUE, main = "2012 Summer Olympic Final Medal Count", ylab = "Medal count", las = 1, sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze")) } } \keyword{datasets} VGAM/man/trplot.Rd0000644000176200001440000000411213565414527013404 0ustar liggesusers\name{trplot} \alias{trplot} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Trajectory Plot } \description{ Generic function for a trajectory plot. } \usage{ trplot(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which a trajectory plot is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. They usually are graphical parameters, and sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Trajectory plots can be defined in different ways for different models. Many models have no such notion or definition. For quadratic and additive ordination models they plot the fitted values of two species against each other (more than two is theoretically possible, but not implemented in this software yet). } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. (2012) On constrained and unconstrained quadratic ordination. \emph{Manuscript in preparation}. } \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{trplot.qrrvglm}}, \code{\link{perspqrrvglm}}, \code{\link{lvplot}}. } \examples{ \dontrun{ set.seed(123) hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars p1cqo <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE) nos <- ncol(depvar(p1cqo)) clr <- 1:nos # OR (1:(nos+1))[-7] to omit yellow trplot(p1cqo, which.species = 1:3, log = "xy", col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "), lwd = 2, lty = 1, col = c("blue", "orange", "green")) abline(a = 0, b = 1, lty = "dashed", col = "grey") } } \keyword{models} \keyword{regression} VGAM/man/skewnormal.Rd0000644000176200001440000000661513565414527014254 0ustar liggesusers\name{skewnormal} \alias{skewnormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Univariate Skew-Normal Distribution Family Function } \description{ Maximum likelihood estimation of the shape parameter of a univariate skew-normal distribution. } \usage{ skewnormal(lshape = "identitylink", ishape = NULL, nsimEIM = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, ishape, nsimEIM}{ See \code{\link{Links}} and \code{\link{CommonVGAMffArguments}}. } } \details{ The univariate skew-normal distribution has a density function that can be written \deqn{f(y) = 2 \, \phi(y) \, \Phi(\alpha y)}{% f(y) = 2 * phi(y) * Phi(alpha * y)} where \eqn{\alpha}{alpha} is the shape parameter. Here, \eqn{\phi}{phi} is the standard normal density and \eqn{\Phi}{Phi} its cumulative distribution function. When \eqn{\alpha=0}{alpha=0} the result is a standard normal distribution. When \eqn{\alpha=1}{alpha=1} it models the distribution of the maximum of two independent standard normal variates. When the absolute value of the shape parameter increases the skewness of the distribution increases. The limit as the shape parameter tends to positive infinity results in the folded normal distribution or half-normal distribution. When the shape parameter changes its sign, the density is reflected about \eqn{y=0}. The mean of the distribution is \eqn{\mu=\alpha \sqrt{2/(\pi (1+\alpha^2))}}{mu=alpha*sqrt(2/(pi*(1+alpha^2)))} and these are returned as the fitted values. The variance of the distribution is \eqn{1-\mu^2}{1-mu^2}. The Newton-Raphson algorithm is used unless the \code{nsimEIM} argument is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Azzalini, A. A. (1985) A class of distributions which include the normal. \emph{Scandinavian Journal of Statistics}, \bold{12}, 171--178. Azzalini, A. and Capitanio, A. (1999) Statistical applications of the multivariate skew-normal distribution. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{61}, 579--602. } \author{ Thomas W. Yee } \note{ It is a good idea to use several different initial values to ensure that the global solution is obtained. This family function will be modified (hopefully soon) to handle a location and scale parameter too. } \section{Warning }{ It is well known that the EIM of Azzalini's skew-normal distribution is singular for skewness parameter tending to zero, and thus produces influential problems. } \seealso{ \code{\link{skewnorm}}, \code{\link{uninormal}}, \code{\link{foldnormal}}. } \examples{ sdata <- data.frame(y1 = rskewnorm(nn <- 1000, shape = 5)) fit1 <- vglm(y1 ~ 1, skewnormal, data = sdata, trace = TRUE) coef(fit1, matrix = TRUE) head(fitted(fit1), 1) with(sdata, mean(y1)) \dontrun{ with(sdata, hist(y1, prob = TRUE)) x <- with(sdata, seq(min(y1), max(y1), len = 200)) with(sdata, lines(x, dskewnorm(x, shape = Coef(fit1)), col = "blue")) } sdata <- data.frame(x2 = runif(nn)) sdata <- transform(sdata, y2 = rskewnorm(nn, shape = 1 + 2*x2)) fit2 <- vglm(y2 ~ x2, skewnormal, data = sdata, trace = TRUE, crit = "coef") summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/zetaUC.Rd0000644000176200001440000000433713565414527013264 0ustar liggesusers\name{Zeta} \alias{Zeta} \alias{dzeta} \alias{pzeta} \alias{qzeta} \alias{rzeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{The Zeta Distribution } \description{ Density, distribution function, quantile function and random generation for the zeta distribution. } \usage{ dzeta(x, shape, log = FALSE) pzeta(q, shape, lower.tail = TRUE) qzeta(p, shape) rzeta(n, shape) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Poisson}}. } \item{shape}{ The positive shape parameter \eqn{p}. } \item{lower.tail, log}{ Same meaning as in \code{\link[stats]{Normal}}. } } \details{ The density function of the zeta distribution is given by \deqn{y^{-s-1} / \zeta(s+1)}{% y^(-s-1) / zeta(s+1)} where \eqn{s>0}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is Riemann's zeta function. } \value{ \code{dzeta} gives the density, \code{pzeta} gives the distribution function, \code{qzeta} gives the quantile function, and \code{rzeta} generates random deviates. } \references{ Johnson N. L., Kotz S., and Balakrishnan N. (1993) \emph{Univariate Discrete Distributions}, 2nd ed. New York: Wiley. % Lindsey, J. K. (2002zz) % \emph{Applied Statistical Modelling}, 2nd ed. % London: Chapman & Hall.zz % Knight, K. (2002zz) % Theory book. % London: Chapman & Hall.zz } \author{ T. W. Yee } \note{ \code{qzeta()} runs slower and slower as \code{shape} approaches 0 and \code{p} approaches 1. The \pkg{VGAM} family function \code{\link{zetaff}} estimates the shape parameter \eqn{s}. } %\section{Warning}{ % These functions have not been fully tested. %} \seealso{ \code{\link{zeta}}, \code{\link{zetaff}}, \code{\link{Oazeta}}, \code{\link{Oizeta}}, \code{\link{Otzeta}}. } \examples{ dzeta(1:20, shape = 2) myshape <- 0.5 max(abs(pzeta(1:200, myshape) - cumsum(1/(1:200)^(1+myshape)) / zeta(myshape+1))) # Should be 0 \dontrun{ plot(1:6, dzeta(1:6, 2), type = "h", las = 1, col = "orange", ylab = "Probability", main = "zeta probability function; orange: shape = 2; blue: shape = 1") points(0.10 + 1:6, dzeta(1:6, 1), type = "h", col = "blue") } } \keyword{distribution} VGAM/man/betaR.Rd0000644000176200001440000001135713565414527013126 0ustar liggesusers\name{betaR} \alias{betaR} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Two-parameter Beta Distribution Family Function } \description{ Estimation of the shape parameters of the two-parameter beta distribution. } \usage{ betaR(lshape1 = "loglink", lshape2 = "loglink", i1 = NULL, i2 = NULL, trim = 0.05, A = 0, B = 1, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2, i1, i2}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more choices. } \item{trim}{ An argument which is fed into \code{mean()}; it is the fraction (0 to 0.5) of observations to be trimmed from each end of the response \code{y} before the mean is computed. This is used when computing initial values, and guards against outliers. } \item{A, B}{ Lower and upper limits of the distribution. The defaults correspond to the \emph{standard beta distribution} where the response lies between 0 and 1. } \item{parallel, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The two-parameter beta distribution is given by \eqn{f(y) =} \deqn{(y-A)^{shape1-1} \times (B-y)^{shape2-1} / [Beta(shape1,shape2) \times (B-A)^{shape1+shape2-1}]}{% (y-A)^(shape1-1) * (B-y)^(shape2-1) / [Beta(shape1,shape2) * (B-A)^(shape1+shape2-1)]} for \eqn{A < y < B}, and \eqn{Beta(.,.)} is the beta function (see \code{\link[base:Special]{beta}}). The shape parameters are positive, and here, the limits \eqn{A} and \eqn{B} are known. The mean of \eqn{Y} is \eqn{E(Y) = A + (B-A) \times shape1 / (shape1 + shape2)}{E(Y) = A + (B-A) * shape1 / (shape1 + shape2)}, and these are the fitted values of the object. For the standard beta distribution the variance of \eqn{Y} is \eqn{shape1 \times shape2 / [(1+shape1+shape2) \times (shape1+shape2)^2]}{ shape1 * shape2 / ((1+shape1+shape2) * (shape1+shape2)^2)}. If \eqn{\sigma^2= 1 / (1+shape1+shape2)} then the variance of \eqn{Y} can be written \eqn{\sigma^2 \mu (1-\mu)}{mu*(1-mu)*sigma^2} where \eqn{\mu=shape1 / (shape1 + shape2)}{mu=shape1 / (shape1 + shape2)} is the mean of \eqn{Y}. Another parameterization of the beta distribution involving the mean and a precision parameter is implemented in \code{\link{betaff}}. % 20120525: % Regularity conditions not satisfied; support depends on the parameters: % If \eqn{A} and \eqn{B} are unknown, then the \pkg{VGAM} family function % \code{beta4()} can be used to estimate these too. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) Chapter 25 of: \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, New York: Wiley. Gupta, A. K. and Nadarajah, S. (2004) \emph{Handbook of Beta Distribution and Its Applications}, New York: Marcel Dekker. %Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) %\emph{Statistical Distributions}, %Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. % Documentation accompanying the \pkg{VGAM} package at % \url{https://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ Thomas W. Yee } \note{ The response must have values in the interval (\eqn{A}, \eqn{B}). \pkg{VGAM} 0.7-4 and prior called this function \code{\link{betaff}}. } \seealso{ \code{\link{betaff}}, % \code{\link{zoibetaR}}, \code{\link[stats:Beta]{Beta}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{betabinomialff}}, \code{\link{betageometric}}, \code{\link{betaprime}}, \code{\link{rbetageom}}, \code{\link{rbetanorm}}, \code{\link{kumar}}, \code{\link{simulate.vlm}}. } \examples{ bdata <- data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))) fit <- vglm(y ~ 1, betaR(lshape1 = "identitylink", lshape2 = "identitylink"), data = bdata, trace = TRUE, crit = "coef") fit <- vglm(y ~ 1, betaR, data = bdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) # Useful for intercept-only models bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1 fit <- vglm(Y ~ 1, betaR(A = 5, B = 13), data = bdata, trace = TRUE) Coef(fit) c(meanY = with(bdata, mean(Y)), head(fitted(fit),2)) } \keyword{models} \keyword{regression} % 3/1/06; this works well: % fit <- vglm(y~1, beta.abqn(link = logofflink(offset = 1), tr = TRUE, crit = "c") % 3/1/06; this does not work so well: % it <- vglm(y~1, beta.abqn(link = logofflink(offset = 0), tr = TRUE, crit = "c") % Interesting!! VGAM/man/constraints.Rd0000644000176200001440000001273713565414527014443 0ustar liggesusers\name{constraints} \alias{constraints} \alias{constraints.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Constraint Matrices } \description{ Extractor function for the \emph{constraint matrices} of objects in the \pkg{VGAM} package. } \usage{ constraints(object, ...) constraints.vlm(object, type = c("lm", "term"), all = TRUE, which, matrix.out = FALSE, colnames.arg = TRUE, rownames.arg = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Some \pkg{VGAM} object, for example, having class \code{\link{vglmff-class}}. } \item{type}{ Character. Whether LM- or term-type constraints are to be returned. The number of such matrices returned is equal to \code{nvar(object, type = "lm")} and the number of terms, respectively. } \item{all, which}{ If \code{all = FALSE} then \code{which} gives the integer index or a vector of logicals specifying the selection. } \item{matrix.out}{ Logical. If \code{TRUE} then the constraint matrices are \code{\link[base]{cbind}()ed} together. The result is usually more compact because the default is a list of constraint matrices. } \item{colnames.arg, rownames.arg}{ Logical. If \code{TRUE} then column and row names are assigned corresponding to the variables. } \item{\dots}{ Other possible arguments such as \code{type}. } } \details{ Constraint matrices describe the relationship of coefficients/component functions of a particular explanatory variable between the linear/additive predictors in VGLM/VGAM models. For example, they may be all different (constraint matrix is the identity matrix) or all the same (constraint matrix has one column and has unit values). VGLMs and VGAMs have constraint matrices which are \emph{known}. The class of RR-VGLMs have constraint matrices which are \emph{unknown} and are to be estimated. } \value{ The extractor function \code{constraints()} returns a list comprising of constraint matrices---usually one for each column of the VLM model matrix, and in that order. The list is labelled with the variable names. Each constraint matrix has \eqn{M} rows, where \eqn{M} is the number of linear/additive predictors, and whose rank is equal to the number of columns. A model with no constraints at all has an order \eqn{M} identity matrix as each variable's constraint matrix. For \code{\link{vglm}} and \code{\link{vgam}} objects, feeding in \code{type = "term"} constraint matrices back into the same model should work and give an identical model. The default are the \code{"lm"}-type constraint matrices; this is a list with one constraint matrix per column of the LM matrix. See the \code{constraints} argument of \code{\link{vglm}}, and the example below. } \author{T. W. Yee } \note{ In all \pkg{VGAM} family functions \code{zero = NULL} means none of the linear/additive predictors are modelled as intercepts-only. Other arguments found in certain \pkg{VGAM} family functions which affect constraint matrices include \code{parallel} and \code{exchangeable}. The \code{constraints} argument in \code{\link{vglm}} and \code{\link{vgam}} allows constraint matrices to be inputted. If so, then \code{constraints(fit, type = "lm")} can be fed into the \code{constraints} argument of the same object to get the same model. The \code{xij} argument does not affect constraint matrices; rather, it allows each row of the constraint matrix to be multiplied by a specified vector. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. %\url{http://www.stat.auckland.ac.nz/~yee} contains additional %information. } \seealso{ \code{\link{is.parallel}}, \code{\link{is.zero}}. VGLMs are described in \code{\link{vglm-class}}; RR-VGLMs are described in \code{\link{rrvglm-class}}. Arguments such as \code{zero} and \code{parallel} found in many \pkg{VGAM} family functions are a way of creating/modifying constraint matrices conveniently, e.g., see \code{\link{zero}}. See \code{\link{CommonVGAMffArguments}} for more information. } \examples{ # Fit the proportional odds model: pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ sm.bs(let, 3), cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) coef(fit1, matrix = TRUE) constraints(fit1) # Parallel assumption results in this constraints(fit1, type = "term") # Same as the default ("vlm"-type) is.parallel(fit1) # An equivalent model to fit1 (needs the type "term" constraints): clist.term <- constraints(fit1, type = "term") # "term"-type constraints # cumulative() has no 'zero' argument to set to NULL (a good idea # when using the 'constraints' argument): (fit2 <- vglm(cbind(normal, mild, severe) ~ sm.bs(let, 3), data = pneumo, cumulative(reverse = TRUE), constraints = clist.term)) abs(max(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be zero # Fit a rank-1 stereotype (RR-multinomial logit) model: fit <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all) constraints(fit) # All except the first are the estimated A matrix } \keyword{models} \keyword{regression} VGAM/man/hzeta.Rd0000644000176200001440000000512513565414527013200 0ustar liggesusers\name{hzeta} \alias{hzeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Haight's Zeta Family Function } \description{ Estimating the parameter of Haight's zeta distribution } \usage{ hzeta(lshape = "logloglink", ishape = NULL, nsimEIM = 100) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape}{ Parameter link function for the parameter, called \eqn{\alpha}{alpha} below. See \code{\link{Links}} for more choices. Here, a log-log link keeps the parameter greater than one, meaning the mean is finite. } \item{ishape,nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The probability function is \deqn{f(y) = (2y-1)^{(-\alpha)} - (2y+1)^{(-\alpha)},}{% f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),} where the parameter \eqn{\alpha>0}{alpha>0} and \eqn{y=1,2,\ldots}{y=1,2,...}. The function \code{\link{dhzeta}} computes this probability function. The mean of \eqn{Y}, which is returned as fitted values, is \eqn{(1-2^{-\alpha}) \zeta(\alpha)}{(1-2^(-alpha))*zeta(alpha)} provided \eqn{\alpha > 1}{alpha > 1}, where \eqn{\zeta}{zeta} is Riemann's zeta function. The mean is a decreasing function of \eqn{\alpha}{alpha}. The mean is infinite if \eqn{\alpha \leq 1}{alpha <= 1}, and the variance is infinite if \eqn{\alpha \leq 2}{alpha <= 2}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Pages 533--4 of Johnson N. L., Kemp, A. W. and Kotz S. (2005) \emph{Univariate Discrete Distributions}, 3rd edition, Hoboken, New Jersey: Wiley. } \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{Hzeta}}, \code{\link{zeta}}, \code{\link{zetaff}}, \code{\link{loglog}}, \code{\link{simulate.vlm}}. } \examples{ shape <- exp(exp(-0.1)) # The parameter hdata <- data.frame(y = rhzeta(n = 1000, shape)) fit <- vglm(y ~ 1, hzeta, data = hdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) # Useful for intercept-only models; should be same as shape c(with(hdata, mean(y)), head(fitted(fit), 1)) summary(fit) } \keyword{models} \keyword{regression} %# Generate some hzeta random variates %set.seed(123) %nn <- 400 %x <- 1:20 %shape <- 1.1 # The parameter %probs <- dhzeta(x, shape) %\dontrun{ %plot(x, probs, type="h", log="y")} %cs <- cumsum(probs) %tab <- table(cut(runif(nn), brea = c(0,cs,1))) %index <- (1:length(tab))[tab>0] %y <- rep(index, times=tab[index]) VGAM/man/grc.Rd0000644000176200001440000003651713565414527012651 0ustar liggesusers\name{grc} \alias{grc} \alias{rcim} \alias{uqo} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Row-Column Interaction Models including Goodman's RC Association Model and Unconstrained Quadratic Ordination } \description{ Fits a Goodman's RC association model (GRC) to a matrix of counts, and more generally, row-column interaction models (RCIMs). RCIMs allow for unconstrained quadratic ordination (UQO). } \usage{ grc(y, Rank = 1, Index.corner = 2:(1 + Rank), str0 = 1, summary.arg = FALSE, h.step = 1e-04, ...) rcim(y, family = poissonff, Rank = 0, M1 = NULL, weights = NULL, which.linpred = 1, Index.corner = ifelse(is.null(str0), 0, max(str0)) + 1:Rank, rprefix = "Row.", cprefix = "Col.", iprefix = "X2.", offset = 0, str0 = if (Rank) 1 else NULL, summary.arg = FALSE, h.step = 0.0001, rbaseline = 1, cbaseline = 1, has.intercept = TRUE, M = NULL, rindex = 2:nrow(y), cindex = 2:ncol(y), iindex = 2:nrow(y), ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{y}{ For \code{grc()}: a matrix of counts. For \code{rcim()}: a general matrix response depending on \code{family}. Output from \code{table()} is acceptable; it is converted into a matrix. Note that \code{y} should be at least 3 by 3 in dimension. } \item{family}{ A \pkg{VGAM} family function. By default, the first linear/additive predictor is fitted using main effects plus an optional rank-\code{Rank} interaction term. Not all family functions are suitable or make sense. All other linear/additive predictors are fitted using an intercept-only, so it has a common value over all rows and columns. For example, \code{\link{zipoissonff}} may be suitable for counts but not \code{\link{zipoisson}} because of the ordering of the linear/additive predictors. If the \pkg{VGAM} family function does not have an \code{infos} slot then \code{M1} needs to be inputted (the number of linear predictors for an ordinary (usually univariate) response, aka \eqn{M}). The \pkg{VGAM} family function also needs to be able to handle multiple responses (currently not all of them can do this). } \item{Rank}{ An integer from the set \{0,\ldots,\code{min(nrow(y), ncol(y))}\}. This is the dimension of the fit in terms of the interaction. For \code{grc()} this argument must be positive. A value of 0 means no interactions (i.e., main effects only); each row and column is represented by an indicator variable. } \item{weights}{ Prior weights. Fed into \code{\link{rrvglm}} or \code{\link{vglm}}. } \item{which.linpred}{ Single integer. Specifies which linear predictor is modelled as the sum of an intercept, row effect, column effect plus an optional interaction term. It should be one value from the set \code{1:M1}. } \item{Index.corner}{ A vector of \code{Rank} integers. These are used to store the \code{Rank} by \code{Rank} identity matrix in the \code{A} matrix; corner constraints are used. } \item{rprefix, cprefix, iprefix}{ Character, for rows and columns and interactions respectively. For labelling the indicator variables. } \item{offset}{ Numeric. Either a matrix of the right dimension, else a single numeric expanded into such a matrix. } \item{str0}{ Ignored if \code{Rank = 0}, else an integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\}, specifying the row that is used as the structural zero. Passed into \code{\link{rrvglm.control}} if \code{Rank > 0}. Set \code{str0 = NULL} for none. } \item{summary.arg}{ Logical. If \code{TRUE} then a summary is returned. If \code{TRUE} then \code{y} may be the output (fitted object) of \code{grc()}. } \item{h.step}{ A small positive value that is passed into \code{summary.rrvglm()}. Only used when \code{summary.arg = TRUE}. } \item{\dots}{ Arguments that are passed into \code{rrvglm.control()}. } \item{M1}{ The number of linear predictors of the \pkg{VGAM} \code{family} function for an ordinary (univariate) response. Then the number of linear predictors of the \code{rcim()} fit is usually the number of columns of \code{y} multiplied by \code{M1}. The default is to evaluate the \code{infos} slot of the \pkg{VGAM} \code{family} function to try to evaluate it; see \code{\link{vglmff-class}}. If this information is not yet supplied by the family function then the value needs to be inputted manually using this argument. } \item{rbaseline, cbaseline}{ Baseline reference levels for the rows and columns. Currently stored on the object but not used. } \item{has.intercept}{ Logical. Include an intercept? } \item{M, cindex}{ \eqn{M} is the usual \pkg{VGAM} \eqn{M}, viz. the number of linear/additive predictors in total. Also, \code{cindex} means column index, and these point to the columns of \code{y} which are part of the vector of linear/additive predictor \emph{main effects}. For \code{family = multinomial} it is necessary to input these arguments as \code{M = ncol(y)-1} and \code{cindex = 2:(ncol(y)-1)}. % except for the possibly the 1st 1 (due to identifiability constraints). } \item{rindex, iindex}{ \code{rindex} means row index, and these are similar to \code{cindex}. \code{iindex} means interaction index, and these are similar to \code{cindex}. } } \details{ Goodman's RC association model fits a reduced-rank approximation to a table of counts. A Poisson model is assumed. The log of each cell mean is decomposed as an intercept plus a row effect plus a column effect plus a reduced-rank component. The latter can be collectively written \code{A \%*\% t(C)}, the product of two `thin' matrices. Indeed, \code{A} and \code{C} have \code{Rank} columns. By default, the first column and row of the interaction matrix \code{A \%*\% t(C)} is chosen to be structural zeros, because \code{str0 = 1}. This means the first row of \code{A} are all zeros. This function uses \code{options()$contrasts} to set up the row and column indicator variables. In particular, Equation (4.5) of Yee and Hastie (2003) is used. These are called \code{Row.} and \code{Col.} (by default) followed by the row or column number. The function \code{rcim()} is more general than \code{grc()}. Its default is a no-interaction model of \code{grc()}, i.e., rank-0 and a Poisson distribution. This means that each row and column has a dummy variable associated with it. The first row and first column are baseline. The power of \code{rcim()} is that many \pkg{VGAM} family functions can be assigned to its \code{family} argument. For example, \code{\link{uninormal}} fits something in between a 2-way ANOVA with and without interactions, \code{\link{alaplace2}} with \code{Rank = 0} is something like \code{\link[stats]{medpolish}}. Others include \code{\link{zipoissonff}} and \code{\link{negbinomial}}. Hopefully one day \emph{all} \pkg{VGAM} family functions will work when assigned to the \code{family} argument, although the result may not have meaning. \emph{Unconstrained quadratic ordination} (UQO) can be performed using \code{rcim()} and \code{grc()}. This has been called \emph{unconstrained Gaussian ordination} in the literature, however the word \emph{Gaussian} has two meanings which is confusing; it is better to use \emph{quadratic} because the bell-shape response surface is meant. UQO is similar to CQO (\code{\link{cqo}}) except there are no environmental/explanatory variables. Here, a GLM is fitted to each column (species) that is a quadratic function of hypothetical latent variables or gradients. Thus each row of the response has an associated site score, and each column of the response has an associated optimum and tolerance matrix. UQO can be performed here under the assumption that all species have the same tolerance matrices. See Yee and Hadi (2014) for details. It is not recommended that presence/absence data be inputted because the information content is so low for each site-species cell. The example below uses Poisson counts. } \value{ An object of class \code{"grc"}, which currently is the same as an \code{"rrvglm"} object. Currently, a rank-0 \code{rcim()} object is of class \code{\link{rcim0-class}}, else of class \code{"rcim"} (this may change in the future). % Currently, % a rank-0 \code{rcim()} object is of class \code{\link{vglm-class}}, % but it may become of class \code{"rcim"} one day. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. and Hadi, A. F. (2014) Row-column interaction models, with an R implementation. \emph{Computational Statistics}, \bold{29}, 1427--1445. Goodman, L. A. (1981) Association models and canonical correlation in the analysis of cross-classifications having ordered categories. \emph{Journal of the American Statistical Association}, \bold{76}, 320--334. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information about the setting up of the %indicator variables. } \author{ Thomas W. Yee, with assistance from Alfian F. Hadi. } \note{ These functions set up the indicator variables etc. before calling \code{\link{rrvglm}} or \code{\link{vglm}}. The \code{...} is passed into \code{\link{rrvglm.control}} or \code{\link{vglm.control}}, This means, e.g., \code{Rank = 1} is default for \code{grc()}. The data should be labelled with \code{\link[base]{rownames}} and \code{\link[base]{colnames}}. Setting \code{trace = TRUE} is recommended to monitor convergence. Using \code{criterion = "coefficients"} can result in slow convergence. If \code{summary = TRUE} then \code{y} can be a \code{"grc"} object, in which case a summary can be returned. That is, \code{grc(y, summary = TRUE)} is equivalent to \code{summary(grc(y))}. It is not possible to plot a \code{grc(y, summary = TRUE)} or \code{rcim(y, summary = TRUE)} object. } \section{Warning}{ The function \code{rcim()} is experimental at this stage and may have bugs. Quite a lot of expertise is needed when fitting and in its interpretion thereof. For example, the constraint matrices applies the reduced-rank regression to the first (see \code{which.linpred}) linear predictor and the other linear predictors are intercept-only and have a common value throughout the entire data set. This means that, by default, \code{family =} \code{\link{zipoissonff}} is appropriate but not \code{family =} \code{\link{zipoisson}}. Else set \code{family =} \code{\link{zipoisson}} and \code{which.linpred = 2}. To understand what is going on, do examine the constraint matrices of the fitted object, and reconcile this with Equations (4.3) to (4.5) of Yee and Hastie (2003). The functions temporarily create a permanent data frame called \code{.grc.df} or \code{.rcim.df}, which used to be needed by \code{summary.rrvglm()}. Then these data frames are deleted before exiting the function. If an error occurs then the data frames may be present in the workspace. } \seealso{ \code{\link{rrvglm}}, \code{\link{rrvglm.control}}, \code{\link{rrvglm-class}}, \code{summary.grc}, \code{\link{moffset}}, \code{\link{Rcim}}, \code{\link{Select}}, \code{\link{Qvar}}, \code{\link{plotrcim0}}, \code{\link{cqo}}, \code{\link{multinomial}}, \code{\link{alcoff}}, \code{\link{crashi}}, \code{\link{auuc}}, \code{\link[VGAM:olym08]{olym08}}, \code{\link[VGAM:olym12]{olym12}}, \code{\link{poissonff}}, \code{\link[stats]{medpolish}}. } \examples{ # Example 1: Undergraduate enrolments at Auckland University in 1990 fitted(grc1 <- grc(auuc)) summary(grc1) grc2 <- grc(auuc, Rank = 2, Index.corner = c(2, 5)) fitted(grc2) summary(grc2) model3 <- rcim(auuc, Rank = 1, fam = multinomial, M = ncol(auuc)-1, cindex = 2:(ncol(auuc)-1), trace = TRUE) fitted(model3) summary(model3) # Median polish but not 100 percent reliable. Maybe call alaplace2()... \dontrun{ rcim0 <- rcim(auuc, fam = alaplace1(tau = 0.5), trace=FALSE, maxit = 500) round(fitted(rcim0), digits = 0) round(100 * (fitted(rcim0) - auuc) / auuc, digits = 0) # Discrepancy depvar(rcim0) round(coef(rcim0, matrix = TRUE), digits = 2) Coef(rcim0, matrix = TRUE) # constraints(rcim0) names(constraints(rcim0)) # Compare with medpolish(): (med.a <- medpolish(auuc)) fv <- med.a$overall + outer(med.a$row, med.a$col, "+") round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s } # Example 2: 2012 Summer Olympic Games in London \dontrun{ top10 <- head(olym12, 10) grc1.oly12 <- with(top10, grc(cbind(gold, silver, bronze))) round(fitted(grc1.oly12)) round(resid(grc1.oly12, type = "response"), digits = 1) # Resp. resids summary(grc1.oly12) Coef(grc1.oly12) } # Example 3: UQO; see Yee and Hadi (2014) \dontrun{ n <- 100; p <- 5; S <- 10 pdata <- rcqo(n, p, S, es.opt = FALSE, eq.max = FALSE, eq.tol = TRUE, sd.latvar = 0.75) # Poisson counts true.nu <- attr(pdata, "latvar") # The 'truth'; site scores attr(pdata, "tolerances") # The 'truth'; tolerances Y <- Select(pdata, "y", sort = FALSE) # Y matrix (n x S); the "y" vars uqo.rcim1 <- rcim(Y, Rank = 1, str0 = NULL, # Delta covers entire n x M matrix iindex = 1:nrow(Y), # RRR covers the entire Y has.intercept = FALSE) # Suppress the intercept # Plot 1 par(mfrow = c(2, 2)) plot(attr(pdata, "optimums"), Coef(uqo.rcim1)@A, col = "blue", type = "p", main = "(a) UQO optimums", xlab = "True optimums", ylab = "Estimated (UQO) optimums") mylm <- lm(Coef(uqo.rcim1)@A ~ attr(pdata, "optimums")) abline(coef = coef(mylm), col = "orange", lty = "dashed") # Plot 2 fill.val <- NULL # Choose this for the new parameterization plot(attr(pdata, "latvar"), c(fill.val, concoef(uqo.rcim1)), las = 1, col = "blue", type = "p", main = "(b) UQO site scores", xlab = "True site scores", ylab = "Estimated (UQO) site scores" ) mylm <- lm(c(fill.val, concoef(uqo.rcim1)) ~ attr(pdata, "latvar")) abline(coef = coef(mylm), col = "orange", lty = "dashed") # Plots 3 and 4 myform <- attr(pdata, "formula") p1ut <- cqo(myform, family = poissonff, eq.tol = FALSE, trace = FALSE, data = pdata) c1ut <- cqo(Select(pdata, "y", sort = FALSE) ~ scale(latvar(uqo.rcim1)), family = poissonff, eq.tol = FALSE, trace = FALSE, data = pdata) lvplot(p1ut, lcol = 1:S, y = TRUE, pcol = 1:S, pch = 1:S, pcex = 0.5, main = "(c) CQO fitted to the original data", xlab = "Estimated (CQO) site scores") lvplot(c1ut, lcol = 1:S, y = TRUE, pcol = 1:S, pch = 1:S, pcex = 0.5, main = "(d) CQO fitted to the scaled UQO site scores", xlab = "Estimated (UQO) site scores") } } \keyword{models} \keyword{regression} % plot(grc.oly1) % # Saturated model: % oly2 <- with(top10, grc(cbind(gold,silver,bronze), Rank = 2)) % round(fitted(oly2)) % round(fitted(oly2)) - with(top10, cbind(gold,silver,bronze)) % summary(oly2) # Saturated model % zz 20100927 unsure % Then \code{.grc.df} is deleted before exiting the function. % print(Coef(rcim0, matrix = TRUE), digits = 3) % Prior to 201310: % str0 = if (!Rank) NULL else { % if (M1 == 1) 1 else setdiff(1:(M1 * ncol(y)), % c(1 + (1:ncol(y)) * M1, Index.corner)) % }, % str0 = if (Rank > 0) 1 else NULL, % Index.corner = if (!Rank) NULL else 1 + M1 * (1:Rank), VGAM/man/biclaytoncopUC.Rd0000644000176200001440000000477513565414527015015 0ustar liggesusers\name{Biclaytoncop} \alias{dbiclaytoncop} %\alias{pbiclaytoncop} \alias{rbiclaytoncop} \title{Clayton Copula (Bivariate) Distribution} \description{ Density and random generation for the (one parameter) bivariate Clayton copula distribution. } \usage{ dbiclaytoncop(x1, x2, apar = 0, log = FALSE) rbiclaytoncop(n, apar = 0) } %pbiclaytoncop(q1, q2, rho = 0) \arguments{ \item{x1, x2}{vector of quantiles. The \code{x1} and \code{x2} should both be in the interval \eqn{(0,1)}. } \item{n}{number of observations. Same as \code{\link[stats]{rnorm}}. } \item{apar}{the association parameter. Should be in the interval \eqn{[0, \infty)}{[0, Inf)}. The default corresponds to independence. } \item{log}{ Logical. If \code{TRUE} then the logarithm is returned. % Same as \code{\link[stats]{rnorm}}. } } \value{ \code{dbiclaytoncop} gives the density at point (\code{x1},\code{x2}), \code{rbiclaytoncop} generates random deviates (a two-column matrix). % \code{pbiclaytoncop} gives the distribution function, and } \references{ % A Model for Association in Bivariate Survival Data Clayton, D. (1982) A model for association in bivariate survival data. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{44}, 414--422. } \author{ R. Feyter and T. W. Yee } \details{ See \code{\link{biclaytoncop}}, the \pkg{VGAM} family functions for estimating the parameter by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } \note{ \code{dbiclaytoncop()} does not yet handle \code{x1 = 0} and/or \code{x2 = 0}. %Yettodo: allow \code{x1} and/or \code{x2} to have values 1, %and to allow any values for \code{x1} and/or \code{x2} to be %outside the unit square. } \seealso{ \code{\link{biclaytoncop}}, \code{\link{binormalcop}}, \code{\link{binormal}}. } \examples{ \dontrun{ edge <- 0.01 # A small positive value N <- 101; x <- seq(edge, 1.0 - edge, len = N); Rho <- 0.7 ox <- expand.grid(x, x) zedd <- dbiclaytoncop(ox[, 1], ox[, 2], apar = Rho, log = TRUE) par(mfrow = c(1, 2)) contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5, las = 1) plot(rbiclaytoncop(1000, 2), col = "blue", las = 1) } } \keyword{distribution} %plot(r <- rbiclaytoncop(n = 3000, apar = exp(2)), col = "blue") %par(mfrow = c(1, 2)) %hist(r[, 1]) # Should be uniform %hist(r[, 2]) # Should be uniform VGAM/man/posbernUC.Rd0000644000176200001440000001155713565414527013773 0ustar liggesusers\name{posbernUC} \alias{posbernUC} \alias{dposbern} \alias{rposbern} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Bernoulli Sequence Model } \description{ Density, and random generation for multiple Bernoulli responses where each row in the response matrix has at least one success. } \usage{ rposbern(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2), Xmatrix = NULL, cap.effect = 1, is.popn = FALSE, link = "logitlink", earg.link = FALSE) dposbern(x, prob, prob0 = prob, log = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ response vector or matrix. Should only have 0 and 1 values, at least two columns, and each row should have at least one 1. } \item{nTimePts}{Number of sampling occasions. Called \eqn{\tau} in \code{\link{posbernoulli.b}} and \code{\link{posbernoulli.t}}. } \item{n}{number of observations. Usually a single positive integer, else the length of the vector is used. See argument \code{is.popn}. } \item{is.popn}{ Logical. If \code{TRUE} then argument \code{n} is the population size and what is returned may have substantially less rows than \code{n}. That is, if an animal has at least one one in its sequence then it is returned, else that animal is not returned because it never was captured. % Put in other words, only animals captured at least once are % returned in the sample. } \item{Xmatrix}{ Optional \bold{X} matrix. If given, the \bold{X} matrix is not generated internally. } \item{cap.effect}{ Numeric, the capture effect. Added to the linear predictor if captured previously. A positive or negative value corresponds to a trap-happy and trap-shy effect respectively. } % \item{double.ch}{ % Logical. % If \code{TRUE} then the values of \code{ch0}, \code{ch1}, \ldots are % 2 or 0, else 1 or 0. % Setting this argument \code{TRUE} means that a model can be fitted % with half the capture history in both denominator and numerator % (this is a compromise of the Huggins (1991) model where the full % capture history only appears in the numerator). % } \item{pvars}{ Number of other numeric covariates that make up the linear predictor. Labelled \code{x1}, \code{x2}, \ldots, where the first is an intercept, and the others are independent standard \code{\link[stats:Uniform]{runif}} random variates. The first \code{pvars} elements of \code{xcoeff} are used. } \item{xcoeff}{ The regression coefficients of the linear predictor. These correspond to \code{x1}, \code{x2}, \ldots, and the first is for the intercept. The length of \code{xcoeff} must be at least \code{pvars}. } \item{link, earg.link}{ The former is used to generate the probabilities for capture at each occasion. Other details at \code{\link{CommonVGAMffArguments}}. } \item{prob, prob0}{ Matrix of probabilities for the numerator and denominators respectively. The default does \emph{not} correspond to the \eqn{M_b} model since the \eqn{M_b} model has a denominator which involves the capture history. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The form of the conditional likelihood is described in \code{\link{posbernoulli.b}} and/or \code{\link{posbernoulli.t}} and/or \code{\link{posbernoulli.tb}}. The denominator is equally shared among the elements of the matrix \code{x}. } \value{ \code{rposbern} returns a data frame with some attributes. The function generates random deviates (\eqn{\tau} columns labelled \code{y1}, \code{y2}, \ldots) for the response. Some indicator columns are also included (those starting with \code{ch} are for previous capture history). The default setting corresponds to a \eqn{M_{bh}} model that has a single trap-happy effect. Covariates \code{x1}, \code{x2}, \ldots have the same affect on capture/recapture at every sampling occasion (see the argument \code{parallel.t} in, e.g., \code{\link{posbernoulli.tb}}). % and these are useful for the \code{xij} argument. The function \code{dposbern} gives the density, } %\references{ } \author{ Thomas W. Yee. } \note{ The \code{r}-type function is experimental only and does not follow the usual conventions of \code{r}-type R functions. It may change a lot in the future. The \code{d}-type function is more conventional and is less likely to change. } \seealso{ \code{\link{posbernoulli.tb}}, \code{\link{posbernoulli.b}}, \code{\link{posbernoulli.t}}. % \code{\link{huggins91}}, } \examples{ rposbern(n = 10) attributes(pdata <- rposbern(n = 100)) M.bh <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + x3, posbernoulli.b(I2 = FALSE), data = pdata, trace = TRUE) constraints(M.bh) summary(M.bh) } \keyword{distribution} \keyword{datagen} %double.ch = FALSE, % and those starting with \code{z} are zero. VGAM/man/exppoisson.Rd0000644000176200001440000000503613565414527014275 0ustar liggesusers\name{exppoisson} \alias{exppoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{Exponential Poisson Distribution Family Function} \description{ Estimates the two parameters of the exponential Poisson distribution by maximum likelihood estimation. } \usage{ exppoisson(lrate = "loglink", lshape = "loglink", irate = 2, ishape = 1.1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, lrate}{ Link function for the two positive parameters. See \code{\link{Links}} for more choices. } \item{ishape, irate}{ Numeric. Initial values for the \code{shape} and \code{rate} parameters. Currently this function is not intelligent enough to obtain better initial values. } \item{zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The exponential Poisson distribution has density function \deqn{f(y; \beta = rate, \lambda = shape) = \frac{\lambda \beta}{1 - e^{-\lambda}} \, e^{-\lambda - \beta y + \lambda \exp{(-\beta y)}}}{% f(y; a = shape, b = rate) = (a*b/(1 - e^(-a))) * e^{-a - b*y + a * e^(-b*y)}} where \eqn{y > 0}, and the parameters shape, \eqn{\lambda}{a}, and rate, \eqn{\beta}{b}, are positive. The distribution implies a population facing discrete hazard rates which are multiples of a base hazard. This \pkg{VGAM} family function requires the \code{hypergeo} package (to use their \code{genhypergeo} function). The median is returned as the fitted value. % This \pkg{VGAM} family function requires the \pkg{hypergeo} package % (to use their \code{\link[hypergeo]{genhypergeo}} function). } \section{Warning }{ This \pkg{VGAM} family function does not work properly! } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Kus, C., (2007). A new lifetime distribution. \emph{Computational Statistics and Data Analysis}, \bold{51}, 4497--4509. } \author{ J. G. Lauder, jamesglauder@gmail.com } \seealso{ \code{\link{dexppois}}, \code{\link{exponential}}, \code{\link{poisson}}. } \examples{ \dontrun{ shape <- exp(1); rate <- exp(2) rdata <- data.frame(y = rexppois(n = 1000, rate = rate, shape = shape)) library("hypergeo") # Required! fit <- vglm(y ~ 1, exppoisson, data = rdata, trace = FALSE, maxit = 1200) c(with(rdata, median(y)), head(fitted(fit), 1)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/oipospoisUC.Rd0000644000176200001440000000715213565414527014343 0ustar liggesusers\name{Oipospois} \alias{Oipospois} \alias{doipospois} \alias{poipospois} \alias{qoipospois} \alias{roipospois} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Positive Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for the one-inflated positive Poisson distribution with parameter \code{pstr1}. } \usage{ doipospois(x, lambda, pstr1 = 0, log = FALSE) poipospois(q, lambda, pstr1 = 0) qoipospois(p, lambda, pstr1 = 0) roipospois(n, lambda, pstr1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, p, q, n}{Same as \code{\link{Pospois}}.} \item{lambda}{ Vector of positive means. } \item{pstr1}{ Probability of a structural one (i.e., ignoring the positive Poisson distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having a positive Poisson distribution. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The probability function of \eqn{Y} is 1 with probability \eqn{\phi}{phi}, and \eqn{PosPoisson(\lambda)}{PosPoisson(lambda)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{% P(Y=1) = phi + (1-phi) * P(W=1)} where \eqn{W} is distributed as a positive \eqn{Poisson(\lambda)}{Poisson(lambda)} random variate. } \value{ \code{doipospois} gives the density, \code{poipospois} gives the distribution function, \code{qoipospois} gives the quantile function, and \code{roipospois} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr1} is recycled to the required length, and usually has values which lie in the interval \eqn{[0,1]}. These functions actually allow for the \emph{zero-deflated Poisson} distribution. Here, \code{pstr1} is also permitted to lie in the interval \code{[-lambda / (expm1(lambda) - lambda), 0]}. The resulting probability of a unit count is \emph{less than} the nominal positive Poisson value, and the use of \code{pstr1} to stand for the probability of a structural 1 loses its meaning. % % % When \code{pstr1} equals \code{-lambda / (expm1(lambda) - lambda)} this corresponds to the 0- and 1-truncated Poisson distribution. } \seealso{ \code{\link{Pospois}}, \code{\link{oapospoisson}}, \code{\link{oipospoisson}}, \code{\link{otpospoisson}}, \code{\link{pospoisson}}, \code{\link[stats:Poisson]{dpois}}, \code{\link{poissonff}}. } \examples{ lambda <- 3; pstr1 <- 0.2; x <- (-1):7 (ii <- doipospois(x, lambda, pstr1 = pstr1)) table(roipospois(100, lambda, pstr1 = pstr1)) round(doipospois(1:10, lambda, pstr1 = pstr1) * 100) # Should be similar \dontrun{ x <- 0:10 par(mfrow = c(2, 1)) # One-Inflated Positive Poisson barplot(rbind(doipospois(x, lambda, pstr1 = pstr1), dpospois(x, lambda)), beside = TRUE, col = c("blue", "orange"), main = paste("OIPP(", lambda, ", pstr1 = ", pstr1, ") (blue) vs", " PosPoisson(", lambda, ") (orange)", sep = ""), names.arg = as.character(x)) deflat.limit <- -lambda / (expm1(lambda) - lambda) # 0-deflated Pos Poisson newpstr1 <- round(deflat.limit, 3) + 0.001 # Inside and near the boundary barplot(rbind(doipospois(x, lambda, pstr1 = newpstr1), dpospois(x, lambda)), beside = TRUE, col = c("blue","orange"), main = paste("ODPP(", lambda, ", pstr1 = ", newpstr1, ") (blue) vs", " PosPoisson(", lambda, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} VGAM/man/gammaR.Rd0000644000176200001440000001131313565414527013265 0ustar liggesusers\name{gammaR} \alias{gammaR} %- Also NEED an '\alias' for EACH other topic documented here. \title{ 2-parameter Gamma Regression Family Function } \description{ Estimates the 2-parameter gamma distribution by maximum likelihood estimation. } \usage{ gammaR(lrate = "loglink", lshape = "loglink", irate = NULL, ishape = NULL, lss = TRUE, zero = "shape") } % zero = ifelse(lss, -2, -1) %- maybe also 'usage' for other objects documented here. \arguments{ % \item{nowarning}{ Logical. Suppress a warning? } \item{lrate, lshape}{ Link functions applied to the (positive) \emph{rate} and \emph{shape} parameters. See \code{\link{Links}} for more choices. } % \item{expected}{ % Logical. Use Fisher scoring? The default is yes, otherwise % Newton-Raphson is used. % expected = TRUE, % } \item{irate, ishape}{ Optional initial values for \emph{rate} and \emph{shape}. A \code{NULL} means a value is computed internally. If a failure to converge occurs, try using these arguments. } % \item{zero}{ % An integer specifying which % linear/additive predictor is to be modelled as an intercept only. % If assigned, the single value should be either 1 or 2 or \code{NULL}. % The default is to model \eqn{shape} as an intercept only. % A value \code{NULL} means neither 1 or 2. % } \item{zero, lss}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The density function is given by \deqn{f(y; rate, shape) = \exp(-rate \times y) \times y^{shape-1} \times rate^{shape} / \Gamma(shape)}{% f(y; rate, shape) = exp(-rate * y) y^(shape-1) rate^(shape) / gamma(shape)} for \eqn{shape > 0}, \eqn{rate > 0} and \eqn{y > 0}. Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma function, as in \code{\link[base:Special]{gamma}}. The mean of \emph{Y} is \eqn{\mu = shape/rate}{mu = shape/rate} (returned as the fitted values) with variance \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 = mu^2 /shape = shape/rate^2}. By default, the two linear/additive predictors are \eqn{\eta_1 = \log(rate)}{eta1 = log(rate)} and \eqn{\eta_2 = \log(shape)}{eta2 = log(shape)}. % 20180403: picked up a bug: % \eqn{\eta_1 = \log(shape)}{eta1 = log(shape)} and % \eqn{\eta_2 = \log(rate)}{eta2 = log(rate)}. % expected = FALSE does not work well. 20140828. % The argument \code{expected} refers to the type of information % matrix. The expected information matrix corresponds to Fisher scoring % and is numerically better here. The observed information matrix % corresponds to the Newton-Raphson algorithm and may be withdrawn % from the family function in the future. If both algorithms work then % the differences in the results are often not huge. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Most standard texts on statistical distributions describe the 2-parameter gamma distribution, e.g., Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ The parameters \eqn{rate} and \eqn{shape} match with the arguments \code{rate} and \code{shape} of \code{\link[stats]{rgamma}}. The order of the arguments agree too. Here, \eqn{scale = 1/rate} is used, so one can use \code{\link{negloglink}}. Multiple responses are handled. If \eqn{rate = 1} use the family function \code{\link{gamma1}} to estimate \eqn{shape}. The reciprocal of a 2-parameter gamma random variate has an \emph{inverse gamma} distribution. One might write a \pkg{VGAM} family function called \code{invgammaR()} to estimate this, but for now, just feed in the reciprocal of the response. % 20180403 } \seealso{ \code{\link{gamma1}} for the 1-parameter gamma distribution, \code{\link{gamma2}} for another parameterization of the 2-parameter gamma distribution, \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution, \code{\link{expexpff}}, \code{\link{simulate.vlm}}, \code{\link[stats]{rgamma}}, \code{\link{negloglink}}. } \examples{ # Essentially a 1-parameter gamma gdata <- data.frame(y1 = rgamma(n <- 100, shape = exp(1))) fit1 <- vglm(y1 ~ 1, gamma1, data = gdata, trace = TRUE) fit2 <- vglm(y1 ~ 1, gammaR, data = gdata, trace = TRUE, crit = "coef") coef(fit2, matrix = TRUE) Coef(fit2) # Essentially a 2-parameter gamma gdata <- data.frame(y2 = rgamma(n = 500, rate = exp(1), shape = exp(2))) fit2 <- vglm(y2 ~ 1, gammaR, data = gdata, trace = TRUE, crit = "coef") coef(fit2, matrix = TRUE) Coef(fit2) summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/log1mexp.Rd0000644000176200001440000000315613565414527013623 0ustar liggesusers\name{log1mexp} \alias{log1mexp} \alias{log1pexp} \title{ Logarithms with an Unit Offset and Exponential Term } \description{ Computes \code{log(1 + exp(x))} and \code{log(1 - exp(-x))} accurately. } \usage{ log1mexp(x) log1pexp(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A vector of reals (numeric). Complex numbers not allowed since \code{\link[base]{expm1}} and \code{\link[base]{log1p}} do not handle these. } } \details{ %% ~~ If necessary, more details than the description above ~~ Computes \code{log(1 + exp(x))} and \code{log(1 - exp(-x))} accurately. An adjustment is made when \eqn{x} is away from 0 in value. } \value{ \code{log1mexp(x)} gives the value of \eqn{\log(1-\exp(-x))}{log(1-exp(-x))}. \code{log1pexp(x)} gives the value of \eqn{\log(1+\exp(x))}{log(1+exp(x))}. } \references{ Maechler, Martin (2012). Accurately Computing log(1-exp(-|a|)). Assessed from the \pkg{Rmpfr} package. } \author{ This is a direct translation of the function in Martin Maechler's (2012) paper by Xiangjie Xue and T. W. Yee. } \note{ If \code{NA} or \code{NaN} is present in the input, the corresponding output will be \code{NA}. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[base]{log1p}}, \code{\link[base]{expm1}}, \code{\link[base]{exp}}, \code{\link[base]{log}} } \examples{ x <- c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf, NA) log1pexp(x) log(1 + exp(x)) # Naive; suffers from overflow log1mexp(x) log(1 - exp(-x)) y <- -x log1pexp(y) log(1 + exp(y)) # Naive; suffers from inaccuracy } VGAM/man/A1A2A3.Rd0000644000176200001440000000447213565414527012701 0ustar liggesusers\name{A1A2A3} \alias{A1A2A3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The A1A2A3 Blood Group System } \description{ Estimates the three independent parameters of the the A1A2A3 blood group system. } \usage{ A1A2A3(link = "logitlink", inbreeding = FALSE, ip1 = NULL, ip2 = NULL, iF = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to \code{p1}, \code{p2} and \code{f}. See \code{\link{Links}} for more choices. } \item{inbreeding}{ Logical. Is there inbreeding? % Logical. Is the HWE assumption to be made? } \item{ip1, ip2, iF}{ Optional initial value for \code{p1}, \code{p2} and \code{f}. } } \details{ The parameters \code{p1} and \code{p2} are probabilities, so that \code{p3=1-p1-p2} is the third probability. The parameter \code{f} is the third independent parameter if \code{inbreeding = TRUE}. If \code{inbreeding = FALSE} then \eqn{f = 0} and Hardy-Weinberg Equilibrium (HWE) is assumed. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. } \author{ T. W. Yee } \note{ The input can be a 6-column matrix of counts, with columns corresponding to \code{A1A1}, \code{A1A2}, \code{A1A3}, \code{A2A2}, \code{A2A3}, \code{A3A3} (in order). Alternatively, the input can be a 6-column matrix of proportions (so each row adds to 1) and the \code{weights} argument is used to specify the total number of counts for each row. } \seealso{ \code{\link{AA.Aa.aa}}, \code{\link{AB.Ab.aB.ab}}, \code{\link{ABO}}, \code{\link{MNSs}}. % \code{\link{AB.Ab.aB.ab2}}, } \examples{ ymat <- cbind(108, 196, 429, 143, 513, 559) fit <- vglm(ymat ~ 1, A1A2A3(link = probitlink), trace = TRUE, crit = "coef") fit <- vglm(ymat ~ 1, A1A2A3(link = logitlink, ip1 = 0.3, ip2 = 0.3, iF = 0.02), trace = TRUE, crit = "coef") Coef(fit) # Estimated p1 and p2 rbind(ymat, sum(ymat) * fitted(fit)) sqrt(diag(vcov(fit))) } \keyword{models} \keyword{regression} % 20190213; made dontrun, for VGAM 1.1-0. Used to be link = probit VGAM/man/triangle.Rd0000644000176200001440000000756313565414527013702 0ustar liggesusers\name{triangle} \alias{triangle} %- Also NEED an '\alias' for EACH other topic documented here. \title{Triangle Distribution Family Function } \description{ Estimating the parameter of the triangle distribution by maximum likelihood estimation. } \usage{ triangle(lower = 0, upper = 1, link = extlogitlink(min = 0, max = 1), itheta = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lower, upper}{lower and upper limits of the distribution. Must be finite. Called \eqn{A} and \eqn{B} respectively below. } \item{link}{ Parameter link function applied to the parameter \eqn{\theta}{theta}, which lies in \eqn{(A,B)}. See \code{\link{Links}} for more choices. The default constrains the estimate to lie in the interval. } \item{itheta}{ Optional initial value for the parameter. The default is to compute the value internally. } } \details{ The triangle distribution has a probability density function that consists of two lines joined at \eqn{\theta}{theta}, which is the location of the mode. The lines intersect the \eqn{y = 0} axis at \eqn{A} and \eqn{B}. Here, Fisher scoring is used. On fitting, the \code{extra} slot has components called \code{lower} and \code{upper} which contains the values of the above arguments (recycled to the right length). The fitted values are the mean of the distribution, which is \eqn{(A + B + \theta)/3}{(A + B + theta)/3}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Kotz, S. and van Dorp, J. R. (2004) Beyond Beta: Other Continuous Families of Distributions with Bounded Support and Applications. Chapter 1. World Scientific: Singapore. Nguyen, H. D. and McLachlan, G. J. (2016) Maximum likelihood estimation of triangular and polygon distributions. \emph{Computational Statistics and Data Analysis}, \bold{102}, 23--36. } \author{ T. W. Yee } \section{Warning}{ The MLE regularity conditions do not hold for this distribution (e.g., the first derivative evaluated at the mode does not exist because it is not continuous) so that misleading inferences may result, e.g., in the \code{summary} and \code{vcov} of the object. Additionally, convergence to the MLE often appears to fail. } \note{ The response must contain values in \eqn{(A, B)}. For most data sets (especially small ones) it is very common for half-stepping to occur. % 20130603 Arguments \code{lower} and \code{upper} and \code{link} must match. For example, setting \code{lower = 0.2} and \code{upper = 4} and \code{link = extlogitlink(min = 0.2, max = 4.1)} will result in an error. Ideally \code{link = extlogitlink(min = lower, max = upper)} ought to work but it does not (yet)! Minimal error checking is done for this deficiency. } \seealso{ \code{\link{Triangle}}, \code{\link{Topple}}, \code{\link{simulate.vlm}}. } \examples{ # Example 1 tdata <- data.frame(y = rtriangle(n <- 3000, theta = 3/4)) fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = tdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(fit@extra$lower) head(fitted(fit)) with(tdata, mean(y)) # Example 2; Kotz and van Dorp (2004), p.14 rdata <- data.frame(y = c(0.1, 0.25, 0.3, 0.4, 0.45, 0.6, 0.75, 0.8)) fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = rdata, trace = TRUE, crit = "coef", maxit = 1000) Coef(fit) # The MLE is the 3rd order statistic, which is 0.3. fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = rdata, trace = TRUE, crit = "coef", maxit = 1001) Coef(fit) # The MLE is the 3rd order statistic, which is 0.3. } \keyword{models} \keyword{regression} % 20130603: yettodo: fix up so ideally % link = extlogitlink(min = lower, max = upper), itheta = NULL) % works. VGAM/man/zinegbinUC.Rd0000644000176200001440000000654613565414527014132 0ustar liggesusers\name{Zinegbin} \alias{Zinegbin} \alias{dzinegbin} \alias{pzinegbin} \alias{qzinegbin} \alias{rzinegbin} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Negative Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-inflated negative binomial distribution with parameter \code{pstr0}. } \usage{ dzinegbin(x, size, prob = NULL, munb = NULL, pstr0 = 0, log = FALSE) pzinegbin(q, size, prob = NULL, munb = NULL, pstr0 = 0) qzinegbin(p, size, prob = NULL, munb = NULL, pstr0 = 0) rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{ Same as in \code{\link[stats]{runif}}. } \item{size, prob, munb, log}{ Arguments matching \code{\link[stats:NegBinomial]{dnbinom}}. The argument \code{munb} corresponds to \code{mu} in \code{\link[stats:NegBinomial]{dnbinom}} and has been renamed to emphasize the fact that it is the mean of the negative binomial \emph{component}. } \item{pstr0}{ Probability of structural zero (i.e., ignoring the negative binomial distribution), called \eqn{\phi}{phi}. } } \details{ The probability function of \eqn{Y} is 0 with probability \eqn{\phi}{phi}, and a negative binomial distribution with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{% P(Y=0) = phi + (1-phi) * P(W=0)} where \eqn{W} is distributed as a negative binomial distribution (see \code{\link[stats:NegBinomial]{rnbinom}}.) See \code{\link{negbinomial}}, a \pkg{VGAM} family function, for the formula of the probability density function and other details of the negative binomial distribution. } \value{ \code{dzinegbin} gives the density, \code{pzinegbin} gives the distribution function, \code{qzinegbin} gives the quantile function, and \code{rzinegbin} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. These functions actually allow for \emph{zero-deflation}. That is, the resulting probability of a zero count is \emph{less than} the nominal value of the parent distribution. See \code{\link{Zipois}} for more information. } \seealso{ \code{\link{zinegbinomial}}, \code{\link[stats:NegBinomial]{rnbinom}}, \code{\link{rzipois}}. } \examples{ munb <- 3; pstr0 <- 0.2; size <- k <- 10; x <- 0:10 (ii <- dzinegbin(x, pstr0 = pstr0, mu = munb, size = k)) max(abs(cumsum(ii) - pzinegbin(x, pstr0 = pstr0, mu = munb, size = k))) # 0 table(rzinegbin(100, pstr0 = pstr0, mu = munb, size = k)) table(qzinegbin(runif(1000), pstr0 = pstr0, mu = munb, size = k)) round(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k) * 1000) # Should be similar \dontrun{barplot(rbind(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k), dnbinom(x, mu = munb, size = k)), las = 1, beside = TRUE, col = c("blue", "green"), ylab = "Probability", main = paste("ZINB(mu = ", munb, ", k = ", k, ", pstr0 = ", pstr0, ") (blue) vs NB(mu = ", munb, ", size = ", k, ") (green)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} VGAM/man/inv.paralogistic.Rd0000644000176200001440000000624213565414527015342 0ustar liggesusers\name{inv.paralogistic} \alias{inv.paralogistic} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Inverse Paralogistic Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter inverse paralogistic distribution. } \usage{ inv.paralogistic(lscale = "loglink", lshape1.a = "loglink", iscale = NULL, ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale}{ Parameter link functions applied to the (positive) parameters \code{a} and \code{scale}. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{ishape1.a} is needed to obtain a good estimate for the other parameter. } \item{gscale, gshape1.a}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 2-parameter inverse paralogistic distribution is the 4-parameter generalized beta II distribution with shape parameter \eqn{q=1} and \eqn{a=p}. It is the 3-parameter Dagum distribution with \eqn{a=p}. More details can be found in Kleiber and Kotz (2003). The inverse paralogistic distribution has density \deqn{f(y) = a^2 y^{a^2-1} / [b^{a^2} \{1 + (y/b)^a\}^{a+1}]}{% f(y) = a^2 y^(a^2-1) / [b^(a^2) (1 + (y/b)^a)^(a+1)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and \eqn{a} is the shape parameter. The mean is \deqn{E(Y) = b \, \Gamma(a + 1/a) \, \Gamma(1 - 1/a) / \Gamma(a)}{% E(Y) = b gamma(a + 1/a) gamma(1 - 1/a) / gamma(a)} provided \eqn{a > 1}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{Inv.paralogistic}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), scale = exp(2))) fit <- vglm(y ~ 1, inv.paralogistic(lss = FALSE), data = idata, trace = TRUE) fit <- vglm(y ~ 1, inv.paralogistic(imethod = 2, ishape1.a = 4), data = idata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/is.parallel.Rd0000644000176200001440000000303313565414527014267 0ustar liggesusers\name{is.parallel} \alias{is.parallel} \alias{is.parallel.matrix} \alias{is.parallel.vglm} \title{Parallelism Constraint Matrices} \description{ Returns a logical vector from a test of whether an object such as a matrix or VGLM object corresponds to a parallelism assumption. } \usage{ is.parallel.matrix(object, \dots) is.parallel.vglm(object, type = c("term", "lm"), \dots) } \arguments{ \item{object}{ an object such as a constraint matrix or a \code{\link{vglm}} object. } \item{type}{ passed into \code{\link{constraints}}. } \item{\dots}{ additional optional arguments. Currently unused. } } \details{ These functions may be useful for categorical models such as \code{\link{propodds}}, \code{\link{cumulative}}, \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}, \code{\link{multinomial}}. } \value{ A vector of logicals, testing whether each constraint matrix is a one-column matrix of ones. Note that parallelism can still be thought of as holding if the constraint matrix has a non-zero but constant values, however, this is currently not implemented. No checking is done that the constraint matrices have the same number of rows. } \seealso{ \code{\link{constraints}}, \code{\link{vglm}}. } \examples{ \dontrun{ require("VGAMdata") fit <- vglm(educ ~ sm.bs(age) * sex + ethnicity, cumulative(parallel = TRUE), head(xs.nz, 200)) is.parallel(fit) is.parallel(fit, type = "lm") # For each column of the LM matrix } } \keyword{models} \keyword{regression} VGAM/man/guplot.Rd0000644000176200001440000000517713565414527013406 0ustar liggesusers\name{guplot} \alias{guplot} \alias{guplot.default} \alias{guplot.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gumbel Plot } \description{ Produces a Gumbel plot, a diagnostic plot for checking whether the data appears to be from a Gumbel distribution. } \usage{ guplot(object, ...) guplot.default(y, main = "Gumbel Plot", xlab = "Reduced data", ylab = "Observed data", type = "p", ...) guplot.vlm(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{ A numerical vector. \code{NA}s etc. are not allowed.} \item{main}{Character. Overall title for the plot. } \item{xlab}{Character. Title for the x axis. } \item{ylab}{Character. Title for the y axis. } \item{type}{Type of plot. The default means points are plotted. } \item{object}{ An object that inherits class \code{"vlm"}, usually of class \code{\link{vglm-class}} or \code{\link{vgam-class}}. } \item{\dots}{ Graphical argument passed into \code{\link[graphics]{plot}}. See \code{\link[graphics]{par}} for an exhaustive list. The arguments \code{xlim} and \code{ylim} are particularly useful. } } \details{ If \eqn{Y} has a Gumbel distribution then plotting the sorted values \eqn{y_i} versus the \emph{reduced values} \eqn{r_i} should appear linear. The reduced values are given by \deqn{r_i = -\log(-\log(p_i)) }{% r_i = - log(- log(p_i)) } where \eqn{p_i} is the \eqn{i}th plotting position, taken here to be \eqn{(i-0.5)/n}. Here, \eqn{n} is the number of observations. Curvature upwards/downwards may indicate a Frechet/Weibull distribution, respectively. Outliers may also be detected using this plot. The function \code{guplot} is generic, and \code{guplot.default} and \code{guplot.vlm} are some methods functions for Gumbel plots. } \value{ A list is returned invisibly with the following components. \item{x }{The reduced data. } \item{y }{The sorted y data. } } %% zz not sure about the reference \references{ Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. Gumbel, E. J. (1958) \emph{Statistics of Extremes}. New York, USA: Columbia University Press. } \author{ T. W. Yee } \note{ The Gumbel distribution is a special case of the GEV distribution with shape parameter equal to zero. } \seealso{ \code{\link{gumbel}}, \code{\link{gumbelff}}, \code{\link{gev}}, \code{\link{venice}}. } \examples{\dontrun{guplot(rnorm(500), las = 1) -> ii names(ii) guplot(with(venice, r1), col = "blue") # Venice sea levels data }} \keyword{models} \keyword{regression} VGAM/man/logUC.Rd0000644000176200001440000000407413565414527013100 0ustar liggesusers\name{Log} \alias{Log} \alias{dlog} \alias{plog} \alias{qlog} \alias{rlog} \title{ Logarithmic Distribution } \description{ Density, distribution function, quantile function, and random generation for the logarithmic distribution. } \usage{ dlog(x, shape, log = FALSE) plog(q, shape, log.p = FALSE) qlog(p, shape) rlog(n, shape) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{ Same interpretation as in \code{\link[stats]{runif}}. } \item{shape}{ The shape parameter value \eqn{c} described in in \code{\link{logff}}. % Here it is called \code{shape} because \eqn{0 1} then the length is taken to be the number required. } \item{size, prob, log}{ Parameters from the ordinary binomial distribution (see \code{\link[stats:Binomial]{dbinom}}). } \item{pobs0}{ Probability of (an observed) zero, called \eqn{pobs0}. The default value of \code{pobs0 = 0} corresponds to the response having a positive binomial distribution. } } \details{ The probability function of \eqn{Y} is 0 with probability \code{pobs0}, else a positive binomial(size, prob) distribution. } \value{ \code{dzabinom} gives the density and \code{pzabinom} gives the distribution function, \code{qzabinom} gives the quantile function, and \code{rzabinom} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{zibinomial}}, \code{\link{rposbinom}}. % \code{\link{zabinomial}}, } \examples{ size <- 10; prob <- 0.15; pobs0 <- 0.05; x <- (-1):7 dzabinom(x, size = size, prob = prob, pobs0 = pobs0) table(rzabinom(100, size = size, prob = prob, pobs0 = pobs0)) \dontrun{ x <- 0:10 barplot(rbind(dzabinom(x, size = size, prob = prob, pobs0 = pobs0), dbinom(x, size = size, prob = prob)), beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("ZAB(size = ", size, ", prob = ", prob, ", pobs0 = ", pobs0, ") [blue] vs", " Binom(size = ", size, ", prob = ", prob, ") [orange] densities", sep = "")) } } \keyword{distribution} VGAM/man/diffzeta.Rd0000644000176200001440000000365513565414527013667 0ustar liggesusers\name{diffzeta} \alias{diffzeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Differenced Zeta Distribution Family Function } \description{ Estimates the parameter of the differenced zeta distribution. } \usage{ diffzeta(start = 1, lshape = "loglink", ishape = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, ishape}{ Same as \code{\link{zetaff}}. } \item{start}{ Smallest value of the support of the distribution. Must be a positive integer. } } \details{ The PMF is \deqn{P(Y=y) = (a/y)^{s} - (a/(1+y))^{s},\ \ s>0,\ \ y=a,a+1,\ldots,}{% P(Y=y) = (a/y)^(s) - / (a/(1+y))^(s), s>0, y=a,a+1,...,} where \eqn{s} is the positive shape parameter, and \eqn{a} is \code{start}. According to Moreno-Sanchez et al. (2016), this model fits quite well to about 40 percent of all the English books in the Project Gutenberg data base (about 30,000 texts). Multiple responses are handled. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Moreno-Sanchez, I. and Font-Clos, F. and Corral, A. Large-Scale Analysis of Zipf's Law in English Texts, 2016. PLoS ONE, \bold{11}(1), 1--19. } \author{ T. W. Yee } %\note{ % The \code{\link{zeta}} function may be used to compute values % of the zeta function. % % %} \seealso{ \code{\link{Diffzeta}}, \code{\link{zetaff}}, \code{\link{zeta}}, \code{\link{zipf}}, \code{\link{zipf}}. } \examples{ odata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data odata <- transform(odata, shape = loglink(-0.25 + x2, inverse = TRUE)) odata <- transform(odata, y1 = rdiffzeta(nn, shape)) with(odata, table(y1)) ofit <- vglm(y1 ~ x2, diffzeta, data = odata, trace = TRUE, crit = "coef") coef(ofit, matrix = TRUE) } \keyword{models} \keyword{regression} % VGAM/man/hatvalues.Rd0000644000176200001440000002012213565414527014053 0ustar liggesusers% 20120312 % Modified from file src/library/stats/man/influence.measures.Rd \name{hatvalues} %\title{Regression Deletion Diagnostics} \title{Hat Values and Regression Deletion Diagnostics} %\concept{studentized residuals} %\concept{standardized residuals} %\concept{Cook's distances} %\concept{Covariance ratios} \concept{DFBETAs} %\concept{DFFITs} %\alias{influence.measures} %\alias{print.infl} %\alias{summary.infl} %\alias{hat} \alias{hatvalues} %\alias{hatvalues.lm} \alias{hatvaluesvlm} \alias{hatplot} \alias{hatplot.vlm} %\alias{rstandard} %\alias{rstandard.lm} %\alias{rstandard.glm} %\alias{rstudent} %\alias{rstudent.lm} %\alias{rstudent.glm} \alias{dfbeta} \alias{dfbetavlm} %\alias{dfbetas} %\alias{dfbetas.lm} %\alias{dffits} %\alias{covratio} %\alias{cooks.distance} %\alias{cooks.distance.lm} %\alias{cooks.distance.glm} \usage{ hatvalues(model, \dots) hatvaluesvlm(model, type = c("diagonal", "matrix", "centralBlocks"), \dots) hatplot(model, \dots) hatplot.vlm(model, multiplier = c(2, 3), lty = "dashed", xlab = "Observation", ylab = "Hat values", ylim = NULL, \dots) dfbetavlm(model, maxit.new = 1, trace.new = FALSE, smallno = 1.0e-8, ...) } \arguments{ \item{model}{an \R object, typically returned by \code{\link{vglm}}. %or \code{\link{glm}}. } \item{type}{Character. The default is the first choice, which is a \eqn{nM \times nM}{nM x nM} matrix. If \code{type = "matrix"} then the \emph{entire} hat matrix is returned. If \code{type = "centralBlocks"} then \eqn{n} central \eqn{M \times M}{M x M} block matrices, in matrix-band format. } % \item{diag}{Logical. If \code{TRUE} then the diagonal elements % of the hat matrix are returned, else the \emph{entire} hat matrix is % returned. % In the latter case, it is a \eqn{nM \times nM}{nM x nM} matrix. % } \item{multiplier}{Numeric, the multiplier. The usual rule-of-thumb is that values greater than two or three times the average leverage (at least for the linear model) should be checked. } \item{lty, xlab, ylab, ylim}{Graphical parameters, see \code{\link[graphics]{par}} etc. The default of \code{ylim} is \code{c(0, max(hatvalues(model)))} which means that if the horizontal dashed lines cannot be seen then there are no particularly influential observations. } \item{maxit.new, trace.new, smallno}{ Having \code{maxit.new = 1} will give a one IRLS step approximation from the ordinary solution (and no warnings!). Else having \code{maxit.new = 10}, say, should usually mean convergence will occur for all observations when they are removed one-at-a-time. Else having \code{maxit.new = 2}, say, should usually mean some lack of convergence will occur when observations are removed one-at-a-time. Setting \code{trace.new = TRUE} will produce some running output at each IRLS iteration and for each individual row of the model matrix. The argument \code{smallno} multiplies each value of the original prior weight (often unity); setting it identically to zero will result in an error, but setting a very small value effectively removes that observation. } % \item{infl}{influence structure as returned by % \code{\link{lm.influence}} or \code{\link{influence}} (the latter % only for the \code{glm} method of \code{rstudent} and % \code{cooks.distance}).} % \item{res}{(possibly weighted) residuals, with proper default.} % \item{sd}{standard deviation to use, see default.} % \item{dispersion}{dispersion (for \code{\link{glm}} objects) to use, % see default.} % \item{hat}{hat values \eqn{H_{ii}}{H[i,i]}, see default.} % \item{type}{type of residuals for \code{glm} method for \code{rstandard.}} % \item{x}{the \eqn{X} or design matrix.} % \item{intercept}{should an intercept column be prepended to \code{x}?} \item{\dots}{further arguments, for example, graphical parameters for \code{hatplot.vlm()}. % passed to or from other methods. } } \description{ When complete, a suite of functions that can be used to compute some of the regression (leave-one-out deletion) diagnostics, for the VGLM class. % This suite of functions can be used to compute some of the % regression (leave-one-out deletion) diagnostics for linear and % generalized linear models discussed in Belsley, Kuh and Welsch % (1980), Cook and Weisberg (1982), etc. } \details{ The invocation \code{hatvalues(vglmObject)} should return a \eqn{n \times M}{n x M} matrix of the diagonal elements of the hat (projection) matrix of a \code{\link{vglm}} object. To do this, the QR decomposition of the object is retrieved or reconstructed, and then straightforward calculations are performed. The invocation \code{hatplot(vglmObject)} should plot the diagonal of the hat matrix for each of the \eqn{M} linear/additive predictors. By default, two horizontal dashed lines are added; hat values higher than these ought to be checked. % The primary high-level function is \code{influence.measures} % which produces a class \code{"infl"} object tabular display % showing the DFBETAS for each model variable, DFFITS, covariance % ratios, Cook's distances and the diagonal elements of the % hat matrix. Cases which are influential with respect to any % of these measures are marked with an asterisk. % The functions \code{dfbetas}, \code{dffits}, \code{covratio} % and \code{cooks.distance} provide direct access to the % corresponding diagnostic quantities. Functions \code{rstandard} % and \code{rstudent} give the standardized and Studentized % residuals respectively. (These re-normalize the residuals to % have unit variance, using an overall and leave-one-out measure % of the error variance respectively.) % Values for generalized linear models are approximations, as % described in Williams (1987) (except that Cook's distances % are scaled as \eqn{F} rather than as chi-square values). The % approximations can be poor when some cases have large influence. % The optional \code{infl}, \code{res} and \code{sd} arguments are % there to encourage the use of these direct access functions, % in situations where, e.g., the underlying basic influence % measures (from \code{\link{lm.influence}} or the generic % \code{\link{influence}}) are already available. % Note that cases with \code{weights == 0} are \emph{dropped} from all % these functions, but that if a linear model has been fitted with % \code{na.action = na.exclude}, suitable values are filled in for the % cases excluded during fitting. % The function \code{hat()} exists mainly for S (version 2) % compatibility; we recommend using \code{hatvalues()} instead. } \note{ It is hoped, soon, that the full suite of functions described at \code{\link[stats]{influence.measures}} will be written for VGLMs. This will enable general regression deletion diagnostics to be available for the entire VGLM class. % For \code{hatvalues}, \code{dfbeta}, and \code{dfbetas}, the method % for linear models also works for generalized linear models. } \author{ T. W. Yee. } %\references{ % Belsley, D. A., Kuh, E. and Welsch, R. E. (1980) % \emph{Regression Diagnostics}. % New York: Wiley. % % Cook, R. D. and Weisberg, S. (1982) % \emph{Residuals and Influence in Regression}. % London: Chapman and Hall. % % Williams, D. A. (1987) % Generalized linear model diagnostics using the deviance and single % case deletions. \emph{Applied Statistics} \bold{36}, 181--191. % % Fox, J. (1997) % \emph{Applied Regression, Linear Models, and Related Methods}. Sage. % % Fox, J. (2002) % \emph{An R and S-Plus Companion to Applied Regression}. % Sage Publ.; \url{http://www.socsci.mcmaster.ca/jfox/Books/Companion/}. % % %} \seealso{ \code{\link{vglm}}, \code{\link{cumulative}}, \code{\link[stats]{influence.measures}}. } \examples{ # Proportional odds model, p.179, in McCullagh and Nelder (1989) pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative, data = pneumo) hatvalues(fit) # n x M matrix, with positive values all.equal(sum(hatvalues(fit)), fit@rank) # Should be TRUE \dontrun{ par(mfrow = c(1, 2)) hatplot(fit, ylim = c(0, 1), las = 1, col = "blue") } } \keyword{regression} VGAM/man/yip88.Rd0000644000176200001440000001231313565414527013043 0ustar liggesusers\name{yip88} \alias{yip88} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Poisson Distribution (Yip (1988) algorithm) } \description{ Fits a zero-inflated Poisson distribution based on Yip (1988). } \usage{ yip88(link = "loglink", n.arg = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function for the usual \eqn{\lambda}{lambda} parameter. See \code{\link{Links}} for more choices. } \item{n.arg}{ The total number of observations in the data set. Needed when the response variable has all the zeros deleted from it, so that the number of zeros can be determined. } \item{imethod}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The method implemented here, Yip (1988), maximizes a \emph{conditional} likelihood. Consequently, the methodology used here deletes the zeros from the data set, and is thus related to the positive Poisson distribution (where \eqn{P(Y=0) = 0}). The probability function of \eqn{Y} is 0 with probability \eqn{\phi}{phi}, and Poisson(\eqn{\lambda}{lambda}) with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{% P(Y=0) = phi + (1-phi) * P(W=0)} where \eqn{W} is Poisson(\eqn{\lambda}{lambda}). The mean, \eqn{(1-\phi) \lambda}{(1-phi) * lambda}, can be obtained by the extractor function \code{fitted} applied to the object. This family function treats \eqn{\phi}{phi} as a scalar. If you want to model both \eqn{\phi}{phi} and \eqn{\lambda}{lambda} as a function of covariates, try \code{\link{zipoisson}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Yip, P. (1988) Inference about the mean of a Poisson distribution in the presence of a nuisance parameter. \emph{The Australian Journal of Statistics}, \bold{30}, 299--306. Angers, J-F. and Biswas, A. (2003) A Bayesian analysis of zero-inflated generalized Poisson model. \emph{Computational Statistics & Data Analysis}, \bold{42}, 37--46. } \author{ Thomas W. Yee } \note{ The data may be inputted in two ways. The first is when the response is a vector of positive values, with the argument \code{n} in \code{yip88} specifying the total number of observations. The second is simply include all the data in the response. In this case, the zeros are trimmed off during the computation, and the \code{x} and \code{y} slots of the object, if assigned, will reflect this. The estimate of \eqn{\phi}{phi} is placed in the \code{misc} slot as \code{@misc$pstr0}. However, this estimate is computed only for intercept models, i.e., the formula is of the form \code{y ~ 1}. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. Yip (1988) only considered \eqn{\phi}{phi} being a scalar and not modelled as a function of covariates. To get around this limitation, try \code{\link{zipoisson}}. Inference obtained from \code{summary.vglm} and \code{summary.vgam} may or may not be correct. In particular, the p-values, standard errors and degrees of freedom may need adjustment. Use simulation on artificial data to check that these are reasonable. } \seealso{ \code{\link{zipoisson}}, \code{\link{Zipois}}, \code{\link{zapoisson}}, \code{\link{pospoisson}}, \code{\link{poissonff}}, \code{\link{dzipois}}. } \examples{ phi <- 0.35; lambda <- 2 # Generate some artificial data y <- rzipois(n <- 1000, lambda, phi) table(y) # Two equivalent ways of fitting the same model fit1 <- vglm(y ~ 1, yip88(n = length(y)), subset = y > 0) fit2 <- vglm(y ~ 1, yip88, trace = TRUE, crit = "coef") (true.mean <- (1-phi) * lambda) mean(y) head(fitted(fit1)) fit1@misc$pstr0 # The estimate of phi # Compare the ZIP with the positive Poisson distribution pp <- vglm(y ~ 1, pospoisson, subset = y > 0, crit = "c") coef(pp) Coef(pp) coef(fit1) - coef(pp) # Same head(fitted(fit1) - fitted(pp)) # Different # Another example (Angers and Biswas, 2003) --------------------- abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1)) abdata <- subset(abdata, w > 0) yy <- with(abdata, rep(y, w)) fit3 <- vglm(yy ~ 1, yip88(n = length(yy)), subset = yy > 0) fit3@misc$pstr0 # Estimate of phi (they get 0.5154 with SE 0.0707) coef(fit3, matrix = TRUE) Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520) head(fitted(fit3)) mean(yy) # Compare this with fitted(fit3) } \keyword{models} \keyword{regression} % 20140101; try to put into a data frame but it gives a numerical % problem: %# Another example (Angers and Biswas, 2003) --------------------- %abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1)) %abdata <- subset(abdata, w > 0) %abdata <- subset(abdata, y > 0) %Abdata <- data.frame(yy = with(abdata, rep(y, w))) %fit3 <- vglm(yy ~ 1, yip88(n = nrow(Abdata)), data = Abdata) %fit3@misc$pstr0 # Estimate of phi (they get 0.5154 with SE 0.0707) %coef(fit3, matrix = TRUE) %Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520) %head(fitted(fit3)) %with(Abdata, mean(yy)) # Compare this with fitted(fit3) VGAM/man/tobitUC.Rd0000644000176200001440000000623213565414527013436 0ustar liggesusers\name{Tobit} \alias{Tobit} \alias{dtobit} \alias{ptobit} \alias{qtobit} \alias{rtobit} \title{The Tobit Distribution} \description{ Density, distribution function, quantile function and random generation for the Tobit model. } \usage{ dtobit(x, mean = 0, sd = 1, Lower = 0, Upper = Inf, log = FALSE) ptobit(q, mean = 0, sd = 1, Lower = 0, Upper = Inf, lower.tail = TRUE, log.p = FALSE) qtobit(p, mean = 0, sd = 1, Lower = 0, Upper = Inf, lower.tail = TRUE, log.p = FALSE) rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{Lower, Upper}{vector of lower and upper thresholds. } \item{mean, sd, lower.tail, log, log.p}{ see \code{\link[stats:Normal]{rnorm}}. } } \value{ \code{dtobit} gives the density, \code{ptobit} gives the distribution function, \code{qtobit} gives the quantile function, and \code{rtobit} generates random deviates. } \author{ T. W. Yee } \details{ See \code{\link{tobit}}, the \pkg{VGAM} family function for estimating the parameters, for details. Note that the density at \code{Lower} and \code{Upper} is the the area to the left and right of those points. Thus there are two spikes (but less in value); see the example below. Consequently, \code{dtobit(Lower) + dtobit(Upper) + } the area in between equals unity. % 20141223; this is old: % Note that the density at \code{Lower} and \code{Upper} is the % value of \code{\link[stats:Normal]{dnorm}} evaluated there plus % the area to the left/right of that point too. } %\note{ %} \seealso{ \code{\link{tobit}}, \code{\link[stats:Normal]{rnorm}}. } \examples{ mu <- 0.5; x <- seq(-2, 4, by = 0.01) Lower <- -1; Upper <- 2.0 integrate(dtobit, lower = Lower, upper = Upper, mean = mu, Lower = Lower, Upper = Upper)$value + dtobit(Lower, mean = mu, Lower = Lower, Upper = Upper) + dtobit(Upper, mean = mu, Lower = Lower, Upper = Upper) # Adds to unity \dontrun{ plot(x, ptobit(x, m = mu, Lower = Lower, Upper = Upper), type = "l", ylim = 0:1, las = 1, col = "orange", ylab = paste("ptobit(m = ", mu, ", sd = 1, Lower =", Lower, ", Upper =", Upper, ")"), main = "Orange is cumulative distribution function; blue is density", sub = "Purple lines are the 10,20,...,90 percentiles") abline(h = 0) lines(x, dtobit(x, m = mu, Lower = Lower, Upper = Upper), col = "blue") probs <- seq(0.1, 0.9, by = 0.1) Q <- qtobit(probs, m = mu, Lower = Lower, Upper = Upper) lines(Q, ptobit(Q, m = mu, Lower = Lower, Upper = Upper), col = "purple", lty = "dashed", type = "h") lines(Q, dtobit(Q, m = mu, Lower = Lower, Upper = Upper), col = "darkgreen", lty = "dashed", type = "h") abline(h = probs, col = "purple", lty = "dashed") max(abs(ptobit(Q, m = mu, Lower = Lower, Upper = Upper) - probs)) # Should be 0 endpts <- c(Lower, Upper) # Endpoints have a spike (not quite, actually) lines(endpts, dtobit(endpts, m = mu, Lower = Lower, Upper = Upper), col = "blue", lwd = 3, type = "h") } } \keyword{distribution} VGAM/man/genbetaII.Rd0000644000176200001440000001405013565414527013711 0ustar liggesusers\name{genbetaII} \alias{genbetaII} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Beta Distribution of the Second Kind } \description{ Maximum likelihood estimation of the 4-parameter generalized beta II distribution. } \usage{ genbetaII(lscale = "loglink", lshape1.a = "loglink", lshape2.p = "loglink", lshape3.q = "loglink", iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, ishape3.q = NULL, lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5), gshape2.p = exp(-5:5), gshape3.q = exp(-5:5), zero = "shape") } %- maybe also 'usage' for other objects documented here. % zero = ifelse(lss, -(2:4), -c(1, 3:4)) \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale, lshape2.p, lshape3.q}{ Parameter link functions applied to the shape parameter \code{a}, scale parameter \code{scale}, shape parameter \code{p}, and shape parameter \code{q}. All four parameters are positive. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, ishape2.p, ishape3.q}{ Optional initial values for the parameters. A \code{NULL} means a value is computed internally using the arguments \code{gscale}, \code{gshape1.a}, etc. } \item{gscale, gshape1.a, gshape2.p, gshape3.q}{ See \code{\link{CommonVGAMffArguments}} for information. Replaced by \code{iscale}, \code{ishape1.a} etc. if given. } % \item{gshape1.a, gscale, gshape2.p, gshape3.q}{ % See \code{\link{CommonVGAMffArguments}} for information. % } \item{zero}{ The default is to set all the shape parameters to be intercept-only. See \code{\link{CommonVGAMffArguments}} for information. % An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. } } \details{ This distribution is most useful for unifying a substantial number of size distributions. For example, the Singh-Maddala, Dagum, Fisk (log-logistic), Lomax (Pareto type II), inverse Lomax, beta distribution of the second kind distributions are all special cases. Full details can be found in Kleiber and Kotz (2003), and Brazauskas (2002). The argument names given here are used by other families that are special cases of this family. Fisher scoring is used here and for the special cases too. The 4-parameter generalized beta II distribution has density \deqn{f(y) = a y^{ap-1} / [b^{ap} B(p,q) \{1 + (y/b)^a\}^{p+q}]}{% f(y) = a y^(ap-1) / [b^(ap) B(p,q) (1 + (y/b)^a)^(p+q)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}. Here \eqn{B} is the beta function, and \eqn{b} is the scale parameter \code{scale}, while the others are shape parameters. The mean is \deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(q - 1/a) / (\Gamma(p) \, \Gamma(q))}{% E(Y) = b gamma(p + 1/a) gamma(q - 1/a) / ( gamma(p) gamma(q))} provided \eqn{-ap < 1 < aq}; these are returned as the fitted values. %The distribution is motivated by the incomplete beta function %\eqn{B_y(p,q)} which is the integral from 0 to \eqn{y} of the integrand %\eqn{u^{p-1} (1-u)^{q-1}}{u^(p-1) (1-u)^(q-1)} where \eqn{y>0}. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. Brazauskas, V. (2002) Fisher information matrix for the Feller-Pareto distribution. \emph{Statistics & Probability Letters}, \bold{59}, 159--167. } \author{ T. W. Yee, with help from Victor Miranda. } \note{ The default is to use a grid search with respect to all four parameters; this is quite costly and is time consuming. If the self-starting initial values fail, try experimenting with the initial value arguments. Also, the constraint \eqn{-ap < 1 < aq} may be violated as the iterations progress so it pays to monitor convergence, e.g., set \code{trace = TRUE}. Successful convergence depends on having very good initial values. This is rather difficult for this distribution so that a grid search is conducted by default. One suggestion for increasing the estimation reliability is to set \code{stepsize = 0.5} and \code{maxit = 100}; see \code{\link{vglm.control}}. } \section{Warning}{ This distribution is very flexible and it is not generally recommended to use this family function when the sample size is small---numerical problems easily occur with small samples. Probably several hundred observations at least are needed in order to estimate the parameters with any level of confidence. Neither is the inclusion of covariates recommended at all---not unless there are several thousand observations. The mean is finite only when \eqn{-ap < 1 < aq}, and this can be easily violated by the parameter estimates for small sample sizes. Try fitting some of the special cases of this distribution (e.g., \code{\link{sinmad}}, \code{\link{fisk}}, etc.) first, and then possibly use those models for initial values for this distribution. } \seealso{ \code{\link{dgenbetaII}}, \code{\link{betaff}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{lomax}}, \code{\link{inv.lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{lino}}, \code{\link{CommonVGAMffArguments}}, \code{\link{vglm.control}}. } \examples{ \dontrun{ gdata <- data.frame(y = rsinmad(3000, shape1 = exp(1), scale = exp(2), shape3 = exp(1))) # A special case! fit <- vglm(y ~ 1, genbetaII(lss = FALSE), data = gdata, trace = TRUE) fit <- vglm(y ~ 1, data = gdata, trace = TRUE, genbetaII(ishape1.a = 3, iscale = 7, ishape3.q = 2.3)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/simulate.vlm.Rd0000644000176200001440000001664013565414527014511 0ustar liggesusers% 20131230; adapted from simulate.Rd from R 3.0.2 % \newcommand{\CRANpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}} \name{simulate.vlm} \title{Simulate Responses for VGLMs and VGAMs} \description{ Simulate one or more responses from the distribution corresponding to a fitted model object. } \usage{ \method{simulate}{vlm}(object, nsim = 1, seed = NULL, \dots) } \alias{simulate.vlm} \arguments{ \item{object}{an object representing a fitted model. Usually an object of class \code{\link{vglm-class}} or \code{\link{vgam-class}}. } \item{nsim, seed}{ Same as \code{\link[stats]{simulate}}. } % \item{seed}{an object specifying if and how the random number % generator should be initialized (\sQuote{seeded}).\cr % For the "lm" method, either \code{NULL} or an integer that will be % used in a call to \code{set.seed} before simulating the response % vectors. If set, the value is saved as the \code{"seed"} attribute % of the returned value. The default, \code{NULL} will not change the % random generator state, and return \code{\link{.Random.seed}} as the % \code{"seed"} attribute, see \sQuote{Value}. % } \item{\dots}{additional optional arguments.} } \value{ Similar to \code{\link[stats]{simulate}}. Note that many \pkg{VGAM} family functions can handle multiple responses. This can result in a longer data frame with more rows (\code{nsim} multiplied by \code{n} rather than the ordinary \code{n}). In the future an argument may be available so that there is always \code{n} rows no matter how many responses were inputted. % Typically, a list of length \code{nsim} of simulated responses. Where % appropriate the result can be a data frame (which is a special type of % list). % %% a *matrix* seems very natural and is more efficient % %% for large-scale simulation, already for stats:::simulate.lm (in ../R/lm.R ) % For the \code{"lm"} method, the result is a data frame with an % attribute \code{"seed"}. If argument \code{seed} is \code{NULL}, the % attribute is the value of \code{\link{.Random.seed}} before the % simulation was started; otherwise it is the value of the argument with % a \code{"kind"} attribute with value \code{as.list(\link{RNGkind}())}. } \details{ This is a methods function for \code{\link[stats]{simulate}} and hopefully should behave in a very similar manner. Only \pkg{VGAM} family functions with a \code{simslot} slot have been implemented for \code{\link[stats]{simulate}}. } \seealso{ Currently the \pkg{VGAM} family functions with a \code{simslot} slot are: \code{\link{alaplace1}}, \code{\link{alaplace2}}, \code{\link{betabinomial}}, \code{\link{betabinomialff}}, \code{\link{betaR}}, \code{\link{betaff}}, \code{\link{biamhcop}}, \code{\link{bifrankcop}}, \code{\link{bilogistic}}, \code{\link{binomialff}}, \code{\link{binormal}}, \code{\link{binormalcop}}, \code{\link{biclaytoncop}}, \code{\link{cauchy}}, \code{\link{cauchy1}}, \code{\link{chisq}}, \code{\link{dirichlet}}, \code{\link{dagum}}, \code{\link{erlang}}, \code{\link{exponential}}, \code{\link{bifgmcop}}, \code{\link{fisk}}, \code{\link{gamma1}}, \code{\link{gamma2}}, \code{\link{gammaR}}, \code{\link{gengamma.stacy}}, \code{\link{geometric}}, \code{\link{gompertz}}, \code{\link{gumbelII}}, \code{\link{hzeta}}, \code{\link{inv.lomax}}, \code{\link{inv.paralogistic}}, \code{\link{kumar}}, \code{\link{lgamma1}}, \code{\link{lgamma3}}, \code{\link{lindley}}, \code{\link{lino}}, \code{\link{logff}}, \code{\link{logistic1}}, \code{\link{logistic}}, \code{\link{lognormal}}, \code{\link{lomax}}, \code{\link{makeham}}, \code{\link{negbinomial}}, \code{\link{negbinomial.size}}, \code{\link{paralogistic}}, \code{\link{perks}}, \code{\link{poissonff}}, \code{\link{posnegbinomial}}, \code{\link{posnormal}}, \code{\link{pospoisson}}, \code{\link{polya}}, \code{\link{polyaR}}, \code{\link{posbinomial}}, \code{\link{rayleigh}}, \code{\link{riceff}}, \code{\link{simplex}}, \code{\link{sinmad}}, \code{\link{slash}}, \code{\link{studentt}}, \code{\link{studentt2}}, \code{\link{studentt3}}, \code{\link{triangle}}, \code{\link{uninormal}}, \code{\link{yulesimon}}, \code{\link{zageometric}}, \code{\link{zageometricff}}, \code{\link{zanegbinomial}}, \code{\link{zanegbinomialff}}, \code{\link{zapoisson}}, \code{\link{zapoissonff}}, \code{\link{zigeometric}}, \code{\link{zigeometricff}}, \code{\link{zinegbinomial}}, \code{\link{zipf}}, \code{\link{zipoisson}}, \code{\link{zipoissonff}}. % \code{\link{logF}}, % \code{\link{tobit}}, See also \code{\link{RNG}} about random number generation in \R, \code{\link{vglm}}, \code{\link{vgam}} for model fitting. } \section{Warning}{ With multiple response and/or multivariate responses, the order of the elements may differ. For some \pkg{VGAM} families, the order is \eqn{n \times N \times F}{n x N x F}, where \eqn{n} is the sample size, \eqn{N} is \code{nsim} and \eqn{F} is \code{ncol(fitted(vglmObject))}. For other \pkg{VGAM} families, the order is \eqn{n \times F \times N}{n x F x N}. An example of each is given below. } \examples{ nn <- 10; mysize <- 20; set.seed(123) bdata <- data.frame(x2 = rnorm(nn)) bdata <- transform(bdata, y1 = rbinom(nn, size = mysize, p = logitlink(1+x2, inverse = TRUE)), y2 = rbinom(nn, size = mysize, p = logitlink(1+x2, inverse = TRUE)), f1 = factor(as.numeric(rbinom(nn, size = 1, p = logitlink(1+x2, inverse = TRUE))))) (fit1 <- vglm(cbind(y1, aaa = mysize - y1) ~ x2, # Matrix response (2-colns) binomialff, data = bdata)) (fit2 <- vglm(f1 ~ x2, binomialff, model = TRUE, data = bdata)) # Factor response set.seed(123); simulate(fit1, nsim = 8) set.seed(123); c(simulate(fit2, nsim = 3)) # Use c() when model = TRUE # An n x N x F example set.seed(123); n <- 100 bdata <- data.frame(x2 = runif(n), x3 = runif(n)) bdata <- transform(bdata, y1 = rnorm(n, 1 + 2 * x2), y2 = rnorm(n, 3 + 4 * x2)) fit1 <- vglm(cbind(y1, y2) ~ x2, binormal(eq.sd = TRUE), data = bdata) nsim <- 1000 # Number of simulations for each observation my.sims <- simulate(fit1, nsim = nsim) dim(my.sims) # A data frame aaa <- array(unlist(my.sims), c(n, nsim, ncol(fitted(fit1)))) # n by N by F summary(rowMeans(aaa[, , 1]) - fitted(fit1)[, 1]) # Should be all 0s summary(rowMeans(aaa[, , 2]) - fitted(fit1)[, 2]) # Should be all 0s # An n x F x N example n <- 100; set.seed(111); nsim <- 1000 zdata <- data.frame(x2 = runif(n)) zdata <- transform(zdata, lambda1 = loglink(-0.5 + 2 * x2, inverse = TRUE), lambda2 = loglink( 0.5 + 2 * x2, inverse = TRUE), pstr01 = logitlink( 0, inverse = TRUE), pstr02 = logitlink(-1.0, inverse = TRUE)) zdata <- transform(zdata, y1 = rzipois(n, lambda = lambda1, pstr0 = pstr01), y2 = rzipois(n, lambda = lambda2, pstr0 = pstr02)) zip.fit <- vglm(cbind(y1, y2) ~ x2, zipoissonff, data = zdata, crit = "coef") my.sims <- simulate(zip.fit, nsim = nsim) dim(my.sims) # A data frame aaa <- array(unlist(my.sims), c(n, ncol(fitted(zip.fit)), nsim)) # n by F by N summary(rowMeans(aaa[, 1, ]) - fitted(zip.fit)[, 1]) # Should be all 0s summary(rowMeans(aaa[, 2, ]) - fitted(zip.fit)[, 2]) # Should be all 0s } \keyword{models} \keyword{datagen} VGAM/man/double.cens.normal.Rd0000644000176200001440000000557313565414527015564 0ustar liggesusers\name{double.cens.normal} \alias{double.cens.normal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Univariate Normal Distribution with Double Censoring } \description{ Maximum likelihood estimation of the two parameters of a univariate normal distribution when there is double censoring. } \usage{ double.cens.normal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loglink", imu = NULL, isd = NULL, zero = "sd") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{r1, r2}{ Integers. Number of smallest and largest values censored, respectively. } \item{lmu, lsd}{ Parameter link functions applied to the mean and standard deviation. See \code{\link{Links}} for more choices. } \item{imu, isd, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ This family function uses the Fisher information matrix given in Harter and Moore (1966). The matrix is not diagonal if either \code{r1} or \code{r2} are positive. By default, the mean is the first linear/additive predictor and the log of the standard deviation is the second linear/additive predictor. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Harter, H. L. and Moore, A. H. (1966) Iterative maximum-likelihood estimation of the parameters of normal populations from singly and doubly censored samples. \emph{Biometrika}, \bold{53}, 205--213. } \author{ T. W. Yee } \note{ This family function only handles a vector or one-column matrix response. The \code{weights} argument, if used, are interpreted as frequencies, therefore it must be a vector with positive integer values. With no censoring at all (the default), it is better (and equivalent) to use \code{\link{uninormal}}. } \seealso{ \code{\link{uninormal}}, \code{\link{cens.normal}}, \code{\link{tobit}}. } \examples{\dontrun{ # Repeat the simulations described in Harter and Moore (1966) SIMS <- 100 # Number of simulations (change this to 1000) mu.save <- sd.save <- rep(NA, len = SIMS) r1 <- 0; r2 <- 4; nn <- 20 for (sim in 1:SIMS) { y <- sort(rnorm(nn)) y <- y[(1+r1):(nn-r2)] # Delete r1 smallest and r2 largest fit <- vglm(y ~ 1, double.cens.normal(r1 = r1, r2 = r2)) mu.save[sim] <- predict(fit)[1, 1] sd.save[sim] <- exp(predict(fit)[1, 2]) # Assumes a log link and ~ 1 } c(mean(mu.save), mean(sd.save)) # Should be c(0,1) c(sd(mu.save), sd(sd.save)) } # Data from Sarhan and Greenberg (1962); MLEs are mu = 9.2606, sd = 1.3754 strontium90 <- data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9)) fit <- vglm(y ~ 1, double.cens.normal(r1 = 2, r2 = 3, isd = 6), data = strontium90, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} VGAM/man/foldsqrtlink.Rd0000644000176200001440000001102413565414527014574 0ustar liggesusers\name{foldsqrtlink} \alias{foldsqrtlink} % \alias{foldsqrt} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Folded Square Root Link Function } \description{ Computes the folded square root transformation, including its inverse and the first two derivatives. } \usage{ foldsqrtlink(theta, min = 0, max = 1, mux = sqrt(2), inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{min, max, mux}{ These are called \eqn{L}, \eqn{U} and \eqn{K} below. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The folded square root link function can be applied to parameters that lie between \eqn{L} and \eqn{U} inclusive. Numerical values of \code{theta} out of range result in \code{NA} or \code{NaN}. } \value{ For \code{foldsqrtlink} with \code{deriv = 0}: \eqn{K (\sqrt{\theta-L} - \sqrt{U-\theta})}{K * (sqrt(theta-L) - sqrt(U-theta))} or \code{mux * (sqrt(theta-min) - sqrt(max-theta))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then some more complicated function that returns a \code{NA} unless \code{theta} is between \code{-mux*sqrt(max-min)} and \code{mux*sqrt(max-min)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } %\references{ % %} \author{ Thomas W. Yee } \note{ The default has, if \code{theta} is 0 or 1, the link function value is \code{-sqrt(2)} and \code{+sqrt(2)} respectively. These are finite values, therefore one cannot use this link function for general modelling of probabilities because of numerical problem, e.g., with \code{\link{binomialff}}, \code{\link{cumulative}}. See the example below. } \seealso{ \code{\link{Links}}. } \examples{ p <- seq(0.01, 0.99, by = 0.01) foldsqrtlink(p) max(abs(foldsqrtlink(foldsqrtlink(p), inverse = TRUE) - p)) # Should be 0 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01)) foldsqrtlink(p) # Has NAs \dontrun{ p <- seq(0.01, 0.99, by = 0.01) par(mfrow = c(2, 2), lwd = (mylwd <- 2)) y <- seq(-4, 4, length = 100) for (d in 0:1) { matplot(p, cbind(logitlink(p, deriv = d), foldsqrtlink(p, deriv = d)), type = "n", col = "purple", ylab = "transformation", las = 1, main = if (d == 0) "Some probability link functions" else "First derivative") lines(p, logitlink(p, deriv = d), col = "limegreen") lines(p, probitlink(p, deriv = d), col = "purple") lines(p, clogloglink(p, deriv = d), col = "chocolate") lines(p, foldsqrtlink(p, deriv = d), col = "tan") if (d == 0) { abline(v = 0.5, h = 0, lty = "dashed") legend(0, 4.5, c("logitlink", "probitlink", "clogloglink", "foldsqrtlink"), lwd = 2, col = c("limegreen", "purple", "chocolate", "tan")) } else abline(v = 0.5, lty = "dashed") } for (d in 0) { matplot(y, cbind(logitlink(y, deriv = d, inverse = TRUE), foldsqrtlink(y, deriv = d, inverse = TRUE)), type = "n", col = "purple", xlab = "transformation", ylab = "p", lwd = 2, las = 1, main = if (d == 0) "Some inverse probability link functions" else "First derivative") lines(y, logitlink(y, deriv = d, inverse = TRUE), col = "limegreen") lines(y, probitlink(y, deriv = d, inverse = TRUE), col = "purple") lines(y, clogloglink(y, deriv = d, inverse = TRUE), col = "chocolate") lines(y, foldsqrtlink(y, deriv = d, inverse = TRUE), col = "tan") if (d == 0) { abline(h = 0.5, v = 0, lty = "dashed") legend(-4, 1, c("logitlink", "probitlink", "clogloglink", "foldsqrtlink"), lwd = 2, col = c("limegreen", "purple", "chocolate", "tan")) } } par(lwd = 1) } # This is lucky to converge fit.h <- vglm(agaaus ~ sm.bs(altitude), binomialff(link = foldsqrtlink(mux = 5)), data = hunua, trace = TRUE) \dontrun{ plotvgam(fit.h, se = TRUE, lcol = "orange", scol = "orange", main = "Orange is Hunua, Blue is Waitakere") } head(predict(fit.h, hunua, type = "response")) \dontrun{ # The following fails. pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative(link = foldsqrtlink(mux = 10), par = TRUE, rev = TRUE), data = pneumo, trace = TRUE, maxit = 200) } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/truncweibull.Rd0000644000176200001440000001075113565414527014605 0ustar liggesusers\name{truncweibull} \alias{truncweibull} %\alias{truncweibullff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Truncated Weibull Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter Weibull distribution with lower truncation. No observations should be censored. } \usage{ truncweibull(lower.limit = 1e-5, lAlpha = "loglink", lBetaa = "loglink", iAlpha = NULL, iBetaa = NULL, nrfs = 1, probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = "Betaa") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lower.limit}{ Positive lower truncation limits. Recycled to the same dimension as the response, going across rows first. The default, being close to 0, should mean effectively the same results as \code{\link{weibullR}} if there are no response values that are smaller. } \item{lAlpha, lBetaa}{ Parameter link functions applied to the (positive) parameters \code{Alpha} (called \eqn{\alpha} below) and (positive) \code{Betaa} (called \eqn{\beta} below). See \code{\link{Links}} for more choices. } \item{iAlpha, iBetaa}{ See \code{\link{CommonVGAMffArguments}}. } \item{imethod, nrfs, zero, probs.y}{ Details at \code{\link{weibullR}} and \code{\link{CommonVGAMffArguments}}. } } \details{ MLE of the two parameters of the Weibull distribution are computed, subject to lower truncation. That is, all response values are greater than \code{lower.limit}, element-wise. For a particular observation this is any known positive value. This function is currently based directly on Wingo (1989) and his parameterization is used (it differs from \code{\link{weibullR}}.) In particular, \eqn{\beta = a} and \eqn{\alpha = (1/b)^a} where \eqn{a} and \eqn{b} are as in \code{\link{weibullR}} and \code{\link[stats:Weibull]{dweibull}}. % More details about the Weibull density are \code{\link{weibullR}}. Upon fitting the \code{extra} slot has a component called \code{lower.limit} which is of the same dimension as the response. The fitted values are the mean, which are computed using \code{\link{pgamma.deriv}} and \code{\link{pgamma.deriv.unscaled}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Wingo, D. R. (1989) The left-truncated Weibull distribution: theory and computation. \emph{Statistical Papers}, \bold{30}(1), 39--48. } \author{ T. W. Yee } \note{ More improvements need to be made, e.g., initial values are currently based on no truncation. This \pkg{VGAM} family function handles multiple responses. } \section{Warning}{ This function may be converted to the same parameterization as \code{\link{weibullR}} at any time. Yet to do: one element of the EIM may be wrong (due to two interpretations of a formula; but it seems to work). Convergence is slower than usual and this may imply something is wrong; use argument \code{maxit}. In fact, it's probably because \code{\link{pgamma.deriv.unscaled}} is inaccurate at \code{q = 1} and \code{q = 2}. Also, convergence should be monitored, especially if the truncation means that a large proportion of the data is lost compared to an ordinary Weibull distribution. } \seealso{ \code{\link{weibullR}}, \code{\link[stats:Weibull]{dweibull}}, \code{\link{pgamma.deriv}}, \code{\link{pgamma.deriv.unscaled}}. } \examples{ nn <- 5000; prop.lost <- 0.40 # Proportion lost to truncation wdata <- data.frame(x2 = runif(nn)) # Complete Weibull data wdata <- transform(wdata, Betaa = exp(1)) # > 2 is okay (satisfies regularity conds) wdata <- transform(wdata, Alpha = exp(0.5 - 1 * x2)) wdata <- transform(wdata, Shape = Betaa, # aaa = Betaa, # bbb = 1 / Alpha^(1 / Betaa), Scale = 1 / Alpha^(1 / Betaa)) wdata <- transform(wdata, y2 = rweibull(nn, shape = Shape, scale = Scale)) summary(wdata) lower.limit2 <- with(wdata, quantile(y2, prob = prop.lost)) # Proportion lost wdata <- subset(wdata, y2 > lower.limit2) # Smaller due to truncation fit1 <- vglm(y2 ~ x2, maxit = 100, trace = TRUE, truncweibull(lower.limit = lower.limit2), data = wdata) coef(fit1, matrix = TRUE) summary(fit1) vcov(fit1) head(fit1@extra$lower.limit) } \keyword{models} \keyword{regression} VGAM/man/meplot.Rd0000644000176200001440000001000613565414527013357 0ustar liggesusers\name{meplot} \alias{meplot} \alias{meplot.default} \alias{meplot.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Mean Excess Plot } \description{ Mean excess plot (also known as a mean residual life plot), a diagnostic plot for the generalized Pareto distribution (GPD). } \usage{ meplot(object, ...) meplot.default(y, main = "Mean Excess Plot", xlab = "Threshold", ylab = "Mean Excess", lty = c(2, 1:2), conf = 0.95, col = c("blue", "black", "blue"), type = "l", ...) meplot.vlm(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{ A numerical vector. \code{NA}s etc. are not allowed.} \item{main, xlab, ylab}{Character. Overall title for the plot, and titles for the x- and y-axes. } \item{lty}{Line type. The second value is for the mean excess value, the first and third values are for the envelope surrounding the confidence interval. } \item{conf}{Confidence level. The default results in approximate 95 percent confidence intervals for each mean excess value. } \item{col}{Colour of the three lines. } \item{type}{Type of plot. The default means lines are joined between the mean excesses and also the upper and lower limits of the confidence intervals. } \item{object}{ An object that inherits class \code{"vlm"}, usually of class \code{\link{vglm-class}} or \code{\link{vgam-class}}. } \item{\dots}{ Graphical argument passed into \code{\link[graphics]{plot}}. See \code{\link[graphics]{par}} for an exhaustive list. The arguments \code{xlim} and \code{ylim} are particularly useful. } } \details{ If \eqn{Y} has a GPD with scale parameter \eqn{\sigma}{sigma} and shape parameter \eqn{\xi<1}{xi<1}, and if \eqn{y>0}, then \deqn{E(Y-u|Y>u) = \frac{\sigma+\xi u}{1-\xi}.}{% E(Y-u|Y>u) = \frac{\sigma+ xi*u}{1- xi}.} It is a linear function in \eqn{u}, the threshold. Note that \eqn{Y-u} is called the \emph{excess} and values of \eqn{Y} greater than \eqn{u} are called \emph{exceedances}. The empirical versions used by these functions is to use sample means to estimate the left hand side of the equation. Values of \eqn{u} in the plot are the values of \eqn{y} itself. If the plot is roughly a straight line then the GPD is a good fit; this plot can be used to select an appropriate threshold value. See \code{\link{gpd}} for more details. If the plot is flat then the data may be exponential, and if it is curved then it may be Weibull or gamma. There is often a lot of variance/fluctuation at the RHS of the plot due to fewer observations. The function \code{meplot} is generic, and \code{meplot.default} and \code{meplot.vlm} are some methods functions for mean excess plots. } \value{ A list is returned invisibly with the following components. \item{threshold }{The x axis values. } \item{meanExcess }{The y axis values. Each value is a sample mean minus a value \eqn{u}. } \item{plusminus }{The amount which is added or subtracted from the mean excess to give the confidence interval. The last value is a \code{NA} because it is based on one observation. } } \references{ Davison, A. C. and Smith, R. L. (1990) Models for exceedances over high thresholds (with discussion). \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{52}, 393--442. Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. } \author{ T. W. Yee } \note{ The function is designed for speed and not accuracy, therefore huge data sets with extremely large values may cause failure (the function \code{\link[base]{cumsum}} is used.) Ties may not be well handled. } \seealso{ \code{\link{gpd}}. } \examples{ \dontrun{meplot(with(venice90, sealevel), las = 1) -> ii names(ii) abline(h = ii$meanExcess[1], col = "orange", lty = "dashed") par(mfrow = c(2, 2)) for (ii in 1:4) meplot(rgpd(1000), col = c("orange", "blue", "orange")) } } \keyword{models} \keyword{regression} VGAM/man/cens.gumbel.Rd0000644000176200001440000001021713565414527014265 0ustar liggesusers\name{cens.gumbel} \alias{cens.gumbel} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Censored Gumbel Distribution } \description{ Maximum likelihood estimation of the 2-parameter Gumbel distribution when there are censored observations. A matrix response is not allowed. } \usage{ cens.gumbel(llocation = "identitylink", lscale = "loglink", iscale = NULL, mean = TRUE, percentiles = NULL, zero = "scale") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Character. Parameter link functions for the location and (positive) \eqn{scale} parameters. See \code{\link{Links}} for more choices. } \item{iscale}{ Numeric and positive. Initial value for \eqn{scale}. Recycled to the appropriate length. In general, a larger value is better than a smaller value. The default is to choose the value internally. } \item{mean}{ Logical. Return the mean? If \code{TRUE} then the mean is returned, otherwise percentiles given by the \code{percentiles} argument. } \item{percentiles}{ Numeric with values between 0 and 100. If \code{mean=FALSE} then the fitted values are percentiles which must be specified by this argument. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The value (possibly values) must be from the set \{1,2\} corresponding respectively to \eqn{location} and \eqn{scale}. If \code{zero=NULL} then all linear/additive predictors are modelled as a linear combination of the explanatory variables. The default is to fit the shape parameter as an intercept only. } } \details{ This \pkg{VGAM} family function is like \code{\link{gumbel}} but handles observations that are left-censored (so that the true value would be less than the observed value) else right-censored (so that the true value would be greater than the observed value). To indicate which type of censoring, input \code{extra = list(leftcensored = vec1, rightcensored = vec2)} where \code{vec1} and \code{vec2} are logical vectors the same length as the response. If the two components of this list are missing then the logical values are taken to be \code{FALSE}. The fitted object has these two components stored in the \code{extra} slot. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. } \author{ T. W. Yee } \section{Warning}{ Numerical problems may occur if the amount of censoring is excessive. } \note{ See \code{\link{gumbel}} for details about the Gumbel distribution. The initial values are based on assuming all uncensored observations, therefore could be improved upon. } \seealso{ \code{\link{gumbel}}, \code{\link{gumbelff}}, \code{\link{rgumbel}}, \code{\link{guplot}}, \code{\link{gev}}, \code{\link{venice}}. } \examples{ # Example 1 ystar <- venice[["r1"]] # Use the first order statistic as the response nn <- length(ystar) L <- runif(nn, 100, 104) # Lower censoring points U <- runif(nn, 130, 135) # Upper censoring points y <- pmax(L, ystar) # Left censored y <- pmin(U, y) # Right censored extra <- list(leftcensored = ystar < L, rightcensored = ystar > U) fit <- vglm(y ~ scale(year), data = venice, trace = TRUE, extra = extra, fam = cens.gumbel(mean = FALSE, perc = c(5, 25, 50, 75, 95))) coef(fit, matrix = TRUE) head(fitted(fit)) fit@extra # Example 2: simulated data nn <- 1000 ystar <- rgumbel(nn, loc = 1, scale = exp(0.5)) # The uncensored data L <- runif(nn, -1, 1) # Lower censoring points U <- runif(nn, 2, 5) # Upper censoring points y <- pmax(L, ystar) # Left censored y <- pmin(U, y) # Right censored \dontrun{par(mfrow = c(1, 2)); hist(ystar); hist(y);} extra <- list(leftcensored = ystar < L, rightcensored = ystar > U) fit <- vglm(y ~ 1, trace = TRUE, extra = extra, fam = cens.gumbel) coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/mccullagh89.Rd0000644000176200001440000000661513565414527014212 0ustar liggesusers\name{mccullagh89} \alias{mccullagh89} %- Also NEED an '\alias' for EACH other topic documented here. \title{McCullagh (1989) Distribution Family Function} \description{ Estimates the two parameters of the McCullagh (1989) distribution by maximum likelihood estimation. } \usage{ mccullagh89(ltheta = "rhobitlink", lnu = logofflink(offset = 0.5), itheta = NULL, inu = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ltheta, lnu}{ Link functions for the \eqn{\theta}{theta} and \eqn{\nu}{nu} parameters. See \code{\link{Links}} for general information. } \item{itheta, inu}{ Numeric. Optional initial values for \eqn{\theta}{theta} and \eqn{\nu}{nu}. The default is to internally compute them. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The McCullagh (1989) distribution has density function \deqn{f(y;\theta,\nu) = \frac{ \{ 1-y^2 \}^{\nu-\frac12}} { (1-2\theta y + \theta^2)^{\nu} \mbox{Beta}(\nu+\frac12, \frac12)}}{% f(y;theta,nu) = (1-y^2)^(nu-0.5) / [ (1 - 2*theta*y+theta^2)^nu * Beta(nu+0.5, 0.5)]} where \eqn{-1 < y < 1} and \eqn{-1 < \theta < 1}{-1 < theta < 1}. This distribution is equation (1) in that paper. The parameter \eqn{\nu}{nu} satisfies \eqn{\nu > -1/2}{nu > -1/2}, therefore the default is to use an log-offset link with offset equal to 0.5, i.e., \eqn{\eta_2=\log(\nu+0.5)}{eta_2=log(nu+0.5)}. The mean is of \eqn{Y} is \eqn{\nu \theta / (1+\nu)}{nu*theta/(1+nu)}, and these are returned as the fitted values. This distribution is related to the Leipnik distribution (see Johnson et al. (1995)), is related to ultraspherical functions, and under certain conditions, arises as exit distributions for Brownian motion. Fisher scoring is implemented here and it uses a diagonal matrix so the parameters are globally orthogonal in the Fisher information sense. McCullagh (1989) also states that, to some extent, \eqn{\theta}{theta} and \eqn{\nu}{nu} have the properties of a location parameter and a precision parameter, respectively. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ McCullagh, P. (1989) Some statistical properties of a family of continuous univariate distributions. \emph{Journal of the American Statistical Association}, \bold{84}, 125--129. Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, New York: Wiley. (pages 612--617). } \author{ T. W. Yee } \note{ Convergence may be slow or fail unless the initial values are reasonably close. If a failure occurs, try assigning the argument \code{inu} and/or \code{itheta}. Figure 1 of McCullagh (1989) gives a broad range of densities for different values of \eqn{\theta}{theta} and \eqn{\nu}{nu}, and this could be consulted for obtaining reasonable initial values if all else fails. } \seealso{ \code{\link{leipnik}}, \code{\link{rhobitlink}}, \code{\link{logofflink}}. } %\section{Warning }{ %} \examples{ mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf fit <- vglm(y ~ 1, mccullagh89, data = mdata, trace = TRUE) head(fitted(fit)) with(mdata, mean(y)) summary(fit) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} VGAM/man/oxtemp.Rd0000644000176200001440000000112313565414527013373 0ustar liggesusers\name{oxtemp} \alias{oxtemp} \docType{data} \title{ Oxford Temperature Data } \description{ Annual maximum temperatures collected at Oxford, UK. } \usage{data(oxtemp)} \format{ A data frame with 80 observations on the following 2 variables. \describe{ \item{maxtemp}{Annual maximum temperatures (in degrees Fahrenheit). } \item{year}{The values 1901 to 1980. } } } \details{ The data were collected from 1901 to 1980. } % zz: \source{ Unknown. } % \references{ % } \examples{ \dontrun{ fit <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE) } } \keyword{datasets} VGAM/man/maxwellUC.Rd0000644000176200001440000000427013565414527013766 0ustar liggesusers\name{Maxwell} \alias{Maxwell} \alias{dmaxwell} \alias{pmaxwell} \alias{qmaxwell} \alias{rmaxwell} \title{The Maxwell Distribution} \description{ Density, distribution function, quantile function and random generation for the Maxwell distribution. } \usage{ dmaxwell(x, rate, log = FALSE) pmaxwell(q, rate, lower.tail = TRUE, log.p = FALSE) qmaxwell(p, rate, lower.tail = TRUE, log.p = FALSE) rmaxwell(n, rate) } \arguments{ \item{x, q, p, n}{ Same as \code{\link[stats:Uniform]{Uniform}}. } \item{rate}{the (rate) parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dmaxwell} gives the density, \code{pmaxwell} gives the distribution function, \code{qmaxwell} gives the quantile function, and \code{rmaxwell} generates random deviates. } \references{ Balakrishnan, N. and Nevzorov, V. B. (2003) \emph{A Primer on Statistical Distributions}. Hoboken, New Jersey: Wiley. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{maxwell}}, the \pkg{VGAM} family function for estimating the (rate) parameter \eqn{a} by maximum likelihood estimation, for the formula of the probability density function. } \note{ The Maxwell distribution is related to the Rayleigh distribution. } \seealso{ \code{\link{maxwell}}, \code{\link{Rayleigh}}, \code{\link{rayleigh}}. } \examples{ \dontrun{ rate <- 3; x <- seq(-0.5, 3, length = 100) plot(x, dmaxwell(x, rate = rate), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "") abline(h = 0, col = "blue", lty = 2) lines(x, pmaxwell(x, rate = rate), type = "l", col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qmaxwell(probs, rate = rate) lines(Q, dmaxwell(Q, rate), col = "purple", lty = 3, type = "h") lines(Q, pmaxwell(Q, rate), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pmaxwell(Q, rate) - probs)) # Should be zero } } \keyword{distribution} VGAM/man/AA.Aa.aa.Rd0000644000176200001440000000513313565414527013245 0ustar liggesusers\name{AA.Aa.aa} \alias{AA.Aa.aa} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The AA-Aa-aa Blood Group System } \description{ Estimates the parameter of the AA-Aa-aa blood group system, with or without Hardy Weinberg equilibrium. } \usage{ AA.Aa.aa(linkp = "logitlink", linkf = "logitlink", inbreeding = FALSE, ipA = NULL, ifp = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{linkp, linkf}{ Link functions applied to \code{pA} and \code{f}. See \code{\link{Links}} for more choices. } \item{ipA, ifp}{ Optional initial values for \code{pA} and \code{f}. } \item{inbreeding}{ Logical. Is there inbreeding? %HWE assumption to be made? } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ This one or two parameter model involves a probability called \code{pA}. The probability of getting a count in the first column of the input (an AA) is \code{pA*pA}. When \code{inbreeding = TRUE}, an additional parameter \code{f} is used. If \code{inbreeding = FALSE} then \eqn{f = 0} and Hardy-Weinberg Equilibrium (HWE) is assumed. The EIM is used if \code{inbreeding = FALSE}. % With Hardy Weinberg equilibrium (HWE), % Without the HWE assumption, an additional parameter \code{f} is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Weir, B. S. (1996) \emph{Genetic Data Analysis II: Methods for Discrete Population Genetic Data}, Sunderland, MA: Sinauer Associates, Inc. } \author{ T. W. Yee } \note{ The input can be a 3-column matrix of counts, where the columns are AA, Ab and aa (in order). Alternatively, the input can be a 3-column matrix of proportions (so each row adds to 1) and the \code{weights} argument is used to specify the total number of counts for each row. } \section{Warning }{ Setting \code{inbreeding = FALSE} makes estimation difficult with non-intercept-only models. Currently, this code seems to work with intercept-only models. } \seealso{ \code{\link{AB.Ab.aB.ab}}, \code{\link{ABO}}, \code{\link{A1A2A3}}, \code{\link{MNSs}}. % \code{\link{AB.Ab.aB.ab2}}, } \examples{ y <- cbind(53, 95, 38) fit1 <- vglm(y ~ 1, AA.Aa.aa, trace = TRUE) fit2 <- vglm(y ~ 1, AA.Aa.aa(inbreeding = TRUE), trace = TRUE) rbind(y, sum(y) * fitted(fit1)) Coef(fit1) # Estimated pA Coef(fit2) # Estimated pA and f summary(fit1) } \keyword{models} \keyword{regression} VGAM/man/genrayleigh.Rd0000644000176200001440000000466113565414527014367 0ustar liggesusers\name{genrayleigh} \alias{genrayleigh} %- Also NEED an '\alias' for EACH other topic documented here. \title{Generalized Rayleigh Distribution Family Function} \description{ Estimates the two parameters of the generalized Rayleigh distribution by maximum likelihood estimation. } \usage{ genrayleigh(lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL, tol12 = 1e-05, nsimEIM = 300, zero = 2) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape}{ Link function for the two positive parameters, scale and shape. See \code{\link{Links}} for more choices. } \item{iscale, ishape}{ Numeric. Optional initial values for the scale and shape parameters. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. } \item{tol12}{ Numeric and positive. Tolerance for testing whether the second shape parameter is either 1 or 2. If so then the working weights need to handle these singularities. } } \details{ The generalized Rayleigh distribution has density function \deqn{f(y;b = scale,s = shape) = (2 s y/b^{2}) e^{-(y/b)^{2}} (1 - e^{-(y/b)^{2}})^{s-1}}{% (2*s*y/b^2) * e^(-(y/b)^2) * (1 - e^(-(y/b)^2))^(s-1)} where \eqn{y > 0} and the two parameters, \eqn{b} and \eqn{s}, are positive. The mean cannot be expressed nicely so the median is returned as the fitted values. Applications of the generalized Rayleigh distribution include modeling strength data and general lifetime data. Simulated Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Kundu, D., Raqab, M. C. (2005). Generalized Rayleigh distribution: different methods of estimations. \emph{Computational Statistics and Data Analysis}, \bold{49}, 187--200. } \author{ J. G. Lauder and T. W. Yee } \note{ We define \code{scale} as the reciprocal of the scale parameter used by Kundu and Raqab (2005). } \seealso{ \code{\link{dgenray}}, \code{\link{rayleigh}}. } \examples{ Scale <- exp(1); shape <- exp(1) rdata <- data.frame(y = rgenray(n = 1000, scale = Scale, shape = shape)) fit <- vglm(y ~ 1, genrayleigh, data = rdata, trace = TRUE) c(with(rdata, mean(y)), head(fitted(fit), 1)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/gaitpois.mlmUC.Rd0000644000176200001440000001757413565414527014733 0ustar liggesusers\name{Gaitpois.mlm} \alias{Gaitpois.mlm} \alias{dgaitpois.mlm} \alias{pgaitpois.mlm} \alias{qgaitpois.mlm} \alias{rgaitpois.mlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered, -Inflated and -Truncated Poisson Distribution (GAIT--Pois--MLM--MLM) % (multinomial logit model based; GAIT--Pois--MLM--MLM) } \description{ Density, distribution function, quantile function and random generation for the generally-altered, -inflated and -truncated Poisson distribution, based on the multinomial logit model (MLM). This distribution is sometimes abbreviated as GAIT--Pois--MLM--MLM. } \usage{ dgaitpois.mlm(x, lambda, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE, log.arg = FALSE) pgaitpois.mlm(q, lambda, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) qgaitpois.mlm(p, lambda, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) rgaitpois.mlm(n, lambda, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n, log.arg}{ Same meaning as in \code{\link[stats]{rpois}}. } \item{lambda}{ Same meaning as in \code{\link[stats]{rpois}}, i.e., for an ordinary Poisson distribution. Short vectors are recycled. } \item{alter, inflate, truncate}{ Vectors of nonnegative integers; the altered, inflated and truncated values. Each argument must have unique values only. The default value of \code{NULL} means an empty set. Values in the upper tail of the distribution may be truncated by using \code{max.support}. % Must be sorted and have unique values only. } \item{pstr.i, byrow.arg}{ The first argument can be a \code{length(inflate)}-vector of probabilities; else a matrix of dimension \code{n x length(inflate)} of probabilities. If a vector then this matrix is constructed from the vector using \code{byrow.arg} to determine the enumeration of the elements (similar to \code{\link[base]{matrix}}). These arguments are not used unless \code{inflate} is assigned. %%% This paragraph only holds if inflation is the sole operator: % One can think of this matrix as comprising of % \emph{structural} probabilities. % Then the matrix augmented with one more column on the RHS so it has % dimension \code{n x (length(inflate) + 1)} % and whose \code{\link[base]{rowSums}} is a vector of 1s. % Finally, % for \code{\link{rgaitpois.mlm}}, % a multinomial sample is taken and if it belongs to the final % column then Poisson random variates are drawn. } \item{pobs.a}{ This argument is similar to \code{pstr.i} but is used when \code{alter} is assigned a vector. The argument \code{byrow.arg} is used similarly to construct a matrix of dimension \code{n x length(alter)} of probabilities. This argument is not used unless \code{alter} is assigned. } \item{max.support}{ numeric; the maximum support value so that any value larger has been truncated. This argument is necessary because \code{truncate = (max.support + 1):Inf} is impractical, so this is the way to truncate the upper tail of the distribution. Note that \code{max(truncate) < max.support} must be satisfied otherwise an error message will be issued. } } \details{ These functions allow any combination of 3 operator types: truncation, alteration and inflation. The precedence of these are truncation, then alteration and lastly inflation. This order minimizes the potential interference among the operators. Loosely, a set of probabilities is set to 0 by truncation and the remaining probabilities are scaled up. Then a different set of probabilities are set to some values \code{pobs.a} and the remaining probabilities are rescaled up. Then another different set of probabilities is inflated by an amount \code{pstr.i} so that individual elements in this set have two sources. Then the remainder of all the probabilities are left unchanged apart from further rescaling. Any value of the support of the distribution that is altered, inflated or truncated is called a \emph{special} value. A special value that is altered may mean that its probability increases or decreases relative to the parent distribution. An inflated special value means that its probability has increased, provided alteration has not made it decrease in the first case. % It can be considered a mixture of a multinomial distribution % and an ordinary Poisson distribution. % The mean therefore is % \deqn{\lambda / (1-\exp(-\lambda)).}{% % lambda / (1-exp(-lambda)).} % As \eqn{\lambda}{lambda} increases, the positive-Poisson and Poisson % distributions become more similar. % Unlike similar functions for the Poisson distribution, a zero value % of \code{lambda} is not permitted here. These functions do what \code{dgapois}, \code{dgipois}, \code{dgtpois}, \code{pgapois}, \code{pgipois}, \code{pgtpois}, \code{qgapois}, \code{qgipois}, \code{qgtpois}, \code{rgapois}, \code{rgipois}, \code{rgtpois} collectively did because the arguments \code{alter}, \code{inflate} and \code{truncate} have been included in one function. The old functions may be relegated to \pkg{VGAMdata} in the future. } %\section{Warning }{ % See \code{\link{rgaitpois.mlm}}. % The function can run slowly for certain combinations % of \code{pstr.i} and \code{inflate}, e.g., % \code{rgaitpois.mlm(1e5, 1, inflate = 0:9, pstr.i = (1:10)/100)}. % Failure to obtain random variates will result in some % \code{NA} values instead. % An infinite loop can occur for certain combinations % of \code{lambda} and \code{inflate}, e.g., % \code{rgaitpois.mlm(10, 1, trunc = 0:100)}. % No action is made to avoid this occurring. %} \value{ \code{dgaitpois.mlm} gives the density, \code{pgaitpois.mlm} gives the distribution function, \code{qgaitpois.mlm} gives the quantile function, and \code{rgaitpois.mlm} generates random deviates. The default values of the arguments correspond to ordinary \code{\link[stats:Poisson]{dpois}}, \code{\link[stats:Poisson]{ppois}}, \code{\link[stats:Poisson]{qpois}}, \code{\link[stats:Poisson]{rpois}} respectively. } \references{ Yee, T. W. and Ma, C. C. (2019) Generally-altered, -inflated and -truncated count regression, with application to heaped and seeped data. \emph{In preparation}. } \author{ T. W. Yee. } \note{ These functions comprise the MLM variant and therefore are more nonparametric and unstructured in nature. There is another variant based on parent distribution mixtures---see \code{\link{Gaitpois.mix}} for information. } % \code{\link{gaitpoisson.mlm}}, % \code{\link{gaitpoisson.mix}}, \seealso{ \code{\link{gatpoisson.mlm}}, \code{\link{Gaitpois.mix}}, \code{\link{Gaitnbinom.mlm}}, \code{\link{Pospois}}, \code{\link[stats:Poisson]{Poisson}}. % \code{\link{gaitpoisson.mlm}}, % \code{\link{Gtpois}}, % \code{\link{Gapois.mix}}, % \code{\link{zapoisson}}, % \code{\link{zipoisson}}, } \examples{ avec <- 5:6 # Alter these (special) values ivec <- 9:10 # Inflate these (special) values tvec <- 12:14 # Truncate these (special) values pobs.a <- c(0.1, 0.2) pstr.i <- (1:2) / 10 lambda <- 15; xx <- 0:22 y <- rgaitpois.mlm(1000, lambda, alter = avec, inflate = ivec, truncate = tvec, max.support = 20, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE) table(y) (ii <- dgaitpois.mlm(xx, lambda, alter = avec, inflate = ivec, truncate = tvec, max.support = 20, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE)) } \keyword{distribution} % 20190927; checked identical results to gapoisson() and gtpoisson(). VGAM/man/explogUC.Rd0000644000176200001440000000426113565414527013613 0ustar liggesusers\name{explog} \alias{explog} \alias{dexplog} \alias{pexplog} \alias{qexplog} \alias{rexplog} \title{The Exponential Logarithmic Distribution} \description{ Density, distribution function, quantile function and random generation for the exponential logarithmic distribution. } \usage{ dexplog(x, scale = 1, shape, log = FALSE) pexplog(q, scale = 1, shape) qexplog(p, scale = 1, shape) rexplog(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{scale, shape}{ positive scale and shape parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dexplog} gives the density, \code{pexplog} gives the distribution function, \code{qexplog} gives the quantile function, and \code{rexplog} generates random deviates. } \author{ J. G. Lauder and T. W. Yee } \details{ See \code{\link{explogff}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } \note{ We define \code{scale} as the reciprocal of the scale parameter used by Tahmasabi and Rezaei (2008). } \seealso{ \code{\link{explogff}}, \code{\link{exponential}}. } \examples{ \dontrun{ shape <- 0.5; scale <- 2; nn <- 501 x <- seq(-0.50, 6.0, len = nn) plot(x, dexplog(x, scale, shape), type = "l", las = 1, ylim = c(0, 1.1), ylab = paste("[dp]explog(shape = ", shape, ", scale = ", scale, ")"), col = "blue", cex.main = 0.8, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pexplog(x, scale, shape), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qexplog(probs, scale, shape = shape) lines(Q, dexplog(Q, scale, shape = shape), col = "purple", lty = 3, type = "h") lines(Q, pexplog(Q, scale, shape = shape), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pexplog(Q, scale, shape = shape) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/riceff.Rd0000644000176200001440000000555713565414527013334 0ustar liggesusers\name{riceff} \alias{riceff} %- Also NEED an '\alias' for EACH other topic documented here. \title{Rice Distribution Family Function} \description{ Estimates the two parameters of a Rice distribution by maximum likelihood estimation. } \usage{ riceff(lsigma = "loglink", lvee = "loglink", isigma = NULL, ivee = NULL, nsimEIM = 100, zero = NULL, nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{lvee, lsigma}{ Link functions for the \eqn{v} and \eqn{\sigma}{sigma} parameters. See \code{\link{Links}} for more choices and for general information. } \item{ivee, isigma}{ Optional initial values for the parameters. If convergence failure occurs (this \pkg{VGAM} family function seems to require good initial values) try using these arguments. See \code{\link{CommonVGAMffArguments}} for more information. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The Rician distribution has density function \deqn{f(y;v,\sigma) = \frac{ y }{\sigma^2} \, \exp(-(y^2+v^2) / (2\sigma^2)) \, I_0(y v / \sigma^2) }{% f(y;v,sigma) = (y/sigma^2) * exp(-(y^2+v^2) / (2*sigma^2)) * I_0(y*v/sigma^2)} where \eqn{y > 0}, \eqn{v > 0}, \eqn{\sigma > 0} and \eqn{I_0} is the modified Bessel function of the first kind with order zero. When \eqn{v = 0} the Rice distribution reduces to a Rayleigh distribution. The mean is \eqn{\sigma \sqrt{\pi/2} \exp(z/2) ((1-z) I_0(-z/2)-z I_1(-z/2))}{sigma*sqrt(pi/2) * exp(z/2)*((1-z) * I_0(-z/2)-z*I_1(-z/2))} (returned as the fitted values) where \eqn{z=-v^2/(2 \sigma^2)}{z=-v^2/(2*sigma^2)}. Simulated Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Rice, S. O. (1945) Mathematical Analysis of Random Noise. \emph{Bell System Technical Journal}, \bold{24}, 46--156. } \author{ T. W. Yee } \note{ Convergence problems may occur for data where \eqn{v=0}; if so, use \code{\link{rayleigh}} or possibly use an \code{\link{identity}} link. When \eqn{v} is large (greater than 3, say) then the mean is approximately \eqn{v} and the standard deviation is approximately \eqn{\sigma}{sigma}. } \seealso{ \code{\link{drice}}, \code{\link{rayleigh}}, \code{\link[base:Bessel]{besselI}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ sigma <- exp(1); vee <- exp(2) rdata <- data.frame(y = rrice(n <- 1000, sigma, vee = vee)) fit <- vglm(y ~ 1, riceff, data = rdata, trace = TRUE, crit = "coef") c(with(rdata, mean(y)), fitted(fit)[1]) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/is.buggy.Rd0000644000176200001440000000620013565414527013607 0ustar liggesusers\name{is.buggy} \alias{is.buggy} \alias{is.buggy.vlm} % 20150326 %- Also NEED an '\alias' for EACH other topic documented here. \title{ Does the Fitted Object Suffer from a Known Bug? } \description{ Checks to see if a fitted object suffers from some known bug. } \usage{ is.buggy(object, ...) is.buggy.vlm(object, each.term = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A fitted \pkg{VGAM} object, e.g., from \code{\link{vgam}}. } \item{each.term}{ Logical. If \code{TRUE} then a logical is returned for each term. } \item{\dots}{ Unused for now. } } \details{ It is known that \code{\link{vgam}} with \code{\link{s}} terms do not correctly handle constraint matrices (\code{cmat}, say) when \code{crossprod(cmat)} is not diagonal. This function detects whether this is so or not. Note that probably all \pkg{VGAM} family functions have defaults where all \code{crossprod(cmat)}s are diagonal, therefore do not suffer from this bug. It is more likely to occur if the user inputs constraint matrices using the \code{constraints} argument (and setting \code{zero = NULL} if necessary). Second-generation VGAMs based on \code{\link{sm.ps}} are a modern alternative to using \code{\link{s}}. It does not suffer from this bug. However, G2-VGAMs require a reasonably large sample size in order to work more reliably. } \value{ The default is a single logical (\code{TRUE} if any term is \code{TRUE}), otherwise a vector of such with each element corresponding to a term. If the value is \code{TRUE} then I suggest replacing the VGAM by a similar model fitted by \code{\link{vglm}} and using regression splines, e.g., \code{\link[splines]{bs}}, \code{\link[splines]{ns}}. } %\references{ %} \author{ T. W. Yee } \note{ When the bug is fixed this function may be withdrawn, otherwise always return \code{FALSE}s! } \seealso{ \code{\link{vgam}}. \code{\link{vglm}}, \code{\link[VGAM]{s}}, \code{\link[VGAM]{sm.ps}}, \code{\link[splines]{bs}}, \code{\link[splines]{ns}}. } \examples{ fit1 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(3, 4)), binomialff(multiple.responses = TRUE), data = hunua) is.buggy(fit1) # Okay is.buggy(fit1, each.term = TRUE) # No terms are buggy fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(3, 4)), binomialff(multiple.responses = TRUE), data = hunua, constraints = list("(Intercept)" = diag(2), "s(altitude, df = c(3, 4))" = matrix(c(1, 1, 0, 1), 2, 2))) is.buggy(fit2) # TRUE is.buggy(fit2, each.term = TRUE) constraints(fit2) # fit2b is an approximate alternative to fit2: fit2b <- vglm(cbind(agaaus, kniexc) ~ bs(altitude, df = 3) + bs(altitude, df = 4), binomialff(multiple.responses = TRUE), data = hunua, constraints = list("(Intercept)" = diag(2), "bs(altitude, df = 3)" = rbind(1, 1), "bs(altitude, df = 4)" = rbind(0, 1))) is.buggy(fit2b) # Okay is.buggy(fit2b, each.term = TRUE) constraints(fit2b) } \keyword{models} \keyword{regression} VGAM/man/mix2exp.Rd0000644000176200001440000001046013565414527013457 0ustar liggesusers\name{mix2exp} \alias{mix2exp} %- Also NEED an '\alias' for EACH other topic documented here. %- Adapted from mix2poisson.Rd \title{ Mixture of Two Exponential Distributions } \description{ Estimates the three parameters of a mixture of two exponential distributions by maximum likelihood estimation. } \usage{ mix2exp(lphi = "logitlink", llambda = "loglink", iphi = 0.5, il1 = NULL, il2 = NULL, qmu = c(0.8, 0.2), nsimEIM = 100, zero = "phi") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lphi, llambda}{ Link functions for the parameters \eqn{\phi}{phi} and \eqn{\lambda}{lambda}. The latter is the rate parameter and note that the mean of an ordinary exponential distribution is \eqn{1 / \lambda}. See \code{\link{Links}} for more choices. } \item{iphi, il1, il2}{ Initial value for \eqn{\phi}{phi}, and optional initial value for \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}. The last two have values that must be positive. The default is to compute initial values internally using the argument \code{qmu}. } \item{qmu}{ Vector with two values giving the probabilities relating to the sample quantiles for obtaining initial values for \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}. The two values are fed in as the \code{probs} argument into \code{\link[stats]{quantile}}. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The probability density function can be loosely written as \deqn{f(y) = \phi\,Exponential(\lambda_1) + (1-\phi)\,Exponential(\lambda_2)}{% f(y) = phi * Exponential(lambda1) + (1-phi) * Exponential(lambda2)} where \eqn{\phi}{phi} is the probability an observation belongs to the first group, and \eqn{y>0}. The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{\phi / \lambda_1 + (1-\phi) / \lambda_2}{phi/lambda1 + (1-phi)/lambda2} and this is returned as the fitted values. By default, the three linear/additive predictors are \eqn{(logit(\phi), \log(\lambda_1), \log(\lambda_2))^T}{(logit(phi), log(lambda1), log(lambda2))^T}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } % \references{ ~put references to the literature/web site here ~ } \section{Warning }{ This \pkg{VGAM} family function requires care for a successful application. In particular, good initial values are required because of the presence of local solutions. Therefore running this function with several different combinations of arguments such as \code{iphi}, \code{il1}, \code{il2}, \code{qmu} is highly recommended. Graphical methods such as \code{\link[graphics]{hist}} can be used as an aid. This \pkg{VGAM} family function is experimental and should be used with care. } \author{ T. W. Yee } \note{ Fitting this model successfully to data can be difficult due to local solutions, uniqueness problems and ill-conditioned data. It pays to fit the model several times with different initial values and check that the best fit looks reasonable. Plotting the results is recommended. This function works better as \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2} become more different. The default control argument \code{trace = TRUE} is to encourage monitoring convergence. } \seealso{ \code{\link[stats:Exponential]{rexp}}, \code{\link{exponential}}, \code{\link{mix2poisson}}. } \examples{ \dontrun{ lambda1 <- exp(1); lambda2 <- exp(3) (phi <- logitlink(-1, inverse = TRUE)) mdata <- data.frame(y1 = rexp(nn <- 1000, lambda1)) mdata <- transform(mdata, y2 = rexp(nn, lambda2)) mdata <- transform(mdata, Y = ifelse(runif(nn) < phi, y1, y2)) fit <- vglm(Y ~ 1, mix2exp, data = mdata, trace = TRUE) coef(fit, matrix = TRUE) # Compare the results with the truth round(rbind('Estimated' = Coef(fit), 'Truth' = c(phi, lambda1, lambda2)), digits = 2) with(mdata, hist(Y, prob = TRUE, main = "Orange = estimate, blue = truth")) abline(v = 1 / Coef(fit)[c(2, 3)], lty = 2, col = "orange", lwd = 2) abline(v = 1 / c(lambda1, lambda2), lty = 2, col = "blue", lwd = 2) } } \keyword{models} \keyword{regression} VGAM/man/geometric.Rd0000644000176200001440000001045513565414527014045 0ustar liggesusers\name{geometric} \alias{geometric} \alias{truncgeometric} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Geometric (Truncated and Untruncated) Distributions } \description{ Maximum likelihood estimation for the geometric and truncated geometric distributions. } \usage{ geometric(link = "logitlink", expected = TRUE, imethod = 1, iprob = NULL, zero = NULL) truncgeometric(upper.limit = Inf, link = "logitlink", expected = TRUE, imethod = 1, iprob = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Parameter link function applied to the probability parameter \eqn{p}{prob}, which lies in the unit interval. See \code{\link{Links}} for more choices. } \item{expected}{ Logical. Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson. } \item{iprob, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for details. } \item{upper.limit}{ Numeric. Upper values. As a vector, it is recycled across responses first. The default value means both family functions should give the same result. } } \details{ A random variable \eqn{Y} has a 1-parameter geometric distribution if \eqn{P(Y=y) = p (1-p)^y}{P(Y=y) = prob * (1-prob)^y} for \eqn{y=0,1,2,\ldots}{y=0,1,2,...}. Here, \eqn{p}{prob} is the probability of success, and \eqn{Y} is the number of (independent) trials that are fails until a success occurs. Thus the response \eqn{Y} should be a non-negative integer. The mean of \eqn{Y} is \eqn{E(Y) = (1-p)/p}{E(Y) = (1-prob)/prob} and its variance is \eqn{Var(Y) = (1-p)/p^2}{Var(Y) = (1-prob)/prob^2}. The geometric distribution is a special case of the negative binomial distribution (see \code{\link{negbinomial}}). The geometric distribution is also a special case of the Borel distribution, which is a Lagrangian distribution. If \eqn{Y} has a geometric distribution with parameter \eqn{p}{prob} then \eqn{Y+1} has a positive-geometric distribution with the same parameter. Multiple responses are permitted. For \code{truncgeometric()}, the (upper) truncated geometric distribution can have response integer values from 0 to \code{upper.limit}. It has density \code{prob * (1 - prob)^y / [1-(1-prob)^(1+upper.limit)]}. For a generalized truncated geometric distribution with integer values \eqn{L} to \eqn{U}, say, subtract \eqn{L} from the response and feed in \eqn{U-L} as the upper limit. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee. Help from Viet Hoang Quoc is gratefully acknowledged. } %\note{ % %} \seealso{ \code{\link{negbinomial}}, \code{\link[stats]{Geometric}}, \code{\link{betageometric}}, \code{\link{expgeometric}}, \code{\link{zageometric}}, \code{\link{zigeometric}}, \code{\link{rbetageom}}, \code{\link{simulate.vlm}}. } \examples{ gdata <- data.frame(x2 = runif(nn <- 1000) - 0.5) gdata <- transform(gdata, x3 = runif(nn) - 0.5, x4 = runif(nn) - 0.5) gdata <- transform(gdata, eta = -1.0 - 1.0 * x2 + 2.0 * x3) gdata <- transform(gdata, prob = logitlink(eta, inverse = TRUE)) gdata <- transform(gdata, y1 = rgeom(nn, prob)) with(gdata, table(y1)) fit1 <- vglm(y1 ~ x2 + x3 + x4, geometric, data = gdata, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) # Truncated geometric (between 0 and upper.limit) upper.limit <- 5 tdata <- subset(gdata, y1 <= upper.limit) nrow(tdata) # Less than nn fit2 <- vglm(y1 ~ x2 + x3 + x4, truncgeometric(upper.limit), data = tdata, trace = TRUE) coef(fit2, matrix = TRUE) # Generalized truncated geometric (between lower.limit and upper.limit) lower.limit <- 1 upper.limit <- 8 gtdata <- subset(gdata, lower.limit <= y1 & y1 <= upper.limit) with(gtdata, table(y1)) nrow(gtdata) # Less than nn fit3 <- vglm(y1 - lower.limit ~ x2 + x3 + x4, truncgeometric(upper.limit - lower.limit), data = gtdata, trace = TRUE) coef(fit3, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/eexpUC.Rd0000644000176200001440000000750013565414527013255 0ustar liggesusers\name{Expectiles-Exponential} \alias{Expectiles-Exponential} \alias{eexp} \alias{deexp} \alias{peexp} \alias{qeexp} \alias{reexp} \title{ Expectiles of the Exponential Distribution } \description{ Density function, distribution function, and expectile function and random generation for the distribution associated with the expectiles of an exponential distribution. } \usage{ deexp(x, rate = 1, log = FALSE) peexp(q, rate = 1, lower.tail = TRUE, log.p = FALSE) qeexp(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6, lower.tail = TRUE, log.p = FALSE) reexp(n, rate = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, p, q}{ See \code{\link{deunif}}. } \item{n, rate, log}{ See \code{\link[stats:Exponential]{rexp}}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Exponential]{pexp}} or \code{\link[stats:Exponential]{qexp}}. } \item{Maxit.nr, Tol.nr}{ See \code{\link{deunif}}. } } \details{ General details are given in \code{\link{deunif}} including a note regarding the terminology used. Here, \code{exp} corresponds to the distribution of interest, \eqn{F}, and \code{eexp} corresponds to \eqn{G}. The addition of ``\code{e}'' is for the `other' distribution associated with the parent distribution. Thus \code{deexp} is for \eqn{g}, \code{peexp} is for \eqn{G}, \code{qeexp} is for the inverse of \eqn{G}, \code{reexp} generates random variates from \eqn{g}. For \code{qeexp} the Newton-Raphson algorithm is used to solve for \eqn{y} satisfying \eqn{p = G(y)}. Numerical problems may occur when values of \code{p} are very close to 0 or 1. } \value{ \code{deexp(x)} gives the density function \eqn{g(x)}. \code{peexp(q)} gives the distribution function \eqn{G(q)}. \code{qeexp(p)} gives the expectile function: the value \eqn{y} such that \eqn{G(y)=p}. \code{reexp(n)} gives \eqn{n} random variates from \eqn{G}. } %\references{ % %Jones, M. C. (1994) %Expectiles and M-quantiles are quantiles. %\emph{Statistics and Probability Letters}, %\bold{20}, 149--153. % %} \author{ T. W. Yee and Kai Huang } %\note{ %The ``\code{q}'', as the first character of ``\code{qeunif}'', %may be changed to ``\code{e}'' in the future, %the reason being to emphasize that the expectiles are returned. %Ditto for the argument ``\code{q}'' in \code{peunif}. % %} \seealso{ \code{\link{deunif}}, \code{\link{denorm}}, \code{\link{dexp}}. } \examples{ my.p <- 0.25; y <- rexp(nn <- 1000) (myexp <- qeexp(my.p)) sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my.p \dontrun{ par(mfrow = c(2,1)) yy <- seq(-0, 4, len = nn) plot(yy, deexp(yy), col = "blue", ylim = 0:1, xlab = "y", ylab = "g(y)", type = "l", main = "g(y) for Exp(1); dotted green is f(y) = dexp(y)") lines(yy, dexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original' plot(yy, peexp(yy), type = "l", col = "blue", ylim = 0:1, xlab = "y", ylab = "G(y)", main = "G(y) for Exp(1)") abline(v = 1, h = 0.5, col = "red", lty = "dashed") lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) } } \keyword{distribution} %# Equivalently: %I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp]) %I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp]) %I1 / (I1 + I2) # Should be my.p %# Or: %I1 <- sum( myexp - y[y <= myexp]) %I2 <- sum(-myexp + y[y > myexp]) %# Non-standard exponential %myrate <- 8 %yy <- rexp(nn, rate = myrate) %(myexp <- qeexp(my.p, rate = myrate)) %sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my.p %peexp(-Inf, rate = myrate) # Should be 0 %peexp( Inf, rate = myrate) # Should be 1 %peexp(mean(yy), rate = myrate) # Should be 0.5 %abs(qeexp(0.5, rate = myrate) - mean(yy)) # Should be 0 %abs(peexp(myexp, rate = myrate) - my.p) # Should be 0 %integrate(f = deexp, lower = -1, upper = Inf, rate = myrate) # Should be 1 VGAM/man/loglapUC.Rd0000644000176200001440000000676013565414527013601 0ustar liggesusers\name{loglapUC} \alias{dloglap} \alias{ploglap} \alias{qloglap} \alias{rloglap} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Log-Laplace Distribution } \description{ Density, distribution function, quantile function and random generation for the 3-parameter log-Laplace distribution with location parameter \code{location.ald}, scale parameter \code{scale.ald} (on the log scale), and asymmetry parameter \code{kappa}. } \usage{ dloglap(x, location.ald = 0, scale.ald = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), log = FALSE) ploglap(q, location.ald = 0, scale.ald = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE) qloglap(p, location.ald = 0, scale.ald = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE) rloglap(n, location.ald = 0, scale.ald = 1, tau = 0.5, kappa = sqrt(tau/(1-tau))) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{ number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{location.ald, scale.ald}{ the location parameter \eqn{\xi}{xi} and the (positive) scale parameter \eqn{\sigma}{sigma}, on the log scale. } \item{tau}{ the quantile parameter \eqn{\tau}{tau}. Must consist of values in \eqn{(0,1)}. This argument is used to specify \code{kappa} and is ignored if \code{kappa} is assigned. } \item{kappa}{ the asymmetry parameter \eqn{\kappa}{kappa}. Must consist of positive values. } \item{log}{ if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ A positive random variable \eqn{Y} is said to have a log-Laplace distribution if \eqn{\log(Y)} has an asymmetric Laplace distribution (ALD). There are many variants of ALDs and the one used here is described in \code{\link{alaplace3}}. } \value{ \code{dloglap} gives the density, \code{ploglap} gives the distribution function, \code{qloglap} gives the quantile function, and \code{rloglap} generates random deviates. } \references{ Kozubowski, T. J. and Podgorski, K. (2003) Log-Laplace distributions. \emph{International Mathematical Journal}, \bold{3}, 467--495. } \author{ T. W. Yee and Kai Huang } %\note{ % The \pkg{VGAM} family function \code{\link{loglaplace3}} % estimates the three parameters by maximum likelihood estimation. %} \seealso{ \code{\link{dalap}}, \code{\link{alaplace3}}, \code{\link{loglaplace1}}. % \code{\link{loglaplace3}}. } \examples{ loc <- 0; sigma <- exp(0.5); kappa <- 1 x <- seq(-0.2, 5, by = 0.01) \dontrun{ plot(x, dloglap(x, loc, sigma, kappa = kappa), type = "l", col = "blue", main = "Blue is density, red is cumulative distribution function", ylim = c(0,1), sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) lines(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa), dloglap(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa), loc, sigma, kappa = kappa), col = "purple", lty = 3, type = "h") lines(x, ploglap(x, loc, sigma, kappa = kappa), type = "l", col = "red") abline(h = 0, lty = 2) } ploglap(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa), loc, sigma, kappa = kappa) } \keyword{distribution} VGAM/man/logLikvlm.Rd0000644000176200001440000000547513565414527014035 0ustar liggesusers\name{logLik.vlm} \alias{logLik.vlm} %\alias{AICvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{Extract Log-likelihood for VGLMs/VGAMs/etc. } \description{ Calculates the log-likelihood value or the element-by-element contributions of the log-likelihood. } \usage{ \method{logLik}{vlm}(object, summation = TRUE, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Some \pkg{VGAM} object, for example, having class \code{\link{vglmff-class}}. } \item{summation}{ Logical, apply \code{\link[base]{sum}}? If \code{FALSE} then a \eqn{n}-vector or \eqn{n}-row matrix (with the number of responses as the number of columns) is returned. Each element is the contribution to the log-likelihood. } \item{\dots}{ Currently unused. In the future: other possible arguments fed into \code{logLik} in order to compute the log-likelihood. } } \details{ By default, this function returns the log-likelihood of the object. Thus this code relies on the log-likelihood being defined, and computed, for the object. } \value{ Returns the log-likelihood of the object. If \code{summation = FALSE} then a \eqn{n}-vector or \eqn{n}-row matrix (with the number of responses as the number of columns) is returned. Each element is the contribution to the log-likelihood. The prior weights are assimulated within the answer. } \author{T. W. Yee. } \note{ Not all \pkg{VGAM} family functions currently have the \code{summation} argument implemented. } %\references{ % %} \section{Warning }{ Not all \pkg{VGAM} family functions have had the \code{summation} checked. } \seealso{ VGLMs are described in \code{\link{vglm-class}}; VGAMs are described in \code{\link{vgam-class}}; RR-VGLMs are described in \code{\link{rrvglm-class}}; \code{\link[stats]{AIC}}; \code{\link{anova.vglm}}. } \examples{ zdata <- data.frame(x2 = runif(nn <- 50)) zdata <- transform(zdata, Ps01 = logitlink(-0.5 , inverse = TRUE), Ps02 = logitlink( 0.5 , inverse = TRUE), lambda1 = loglink(-0.5 + 2*x2, inverse = TRUE), lambda2 = loglink( 0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01), y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02)) with(zdata, table(y1)) # Eyeball the data with(zdata, table(y2)) fit2 <- vglm(cbind(y1, y2) ~ x2, zipoisson(zero = NULL), data = zdata) logLik(fit2) # Summed over the two responses sum(logLik(fit2, sum = FALSE)) # For checking purposes (ll.matrix <- logLik(fit2, sum = FALSE)) # nn x 2 matrix colSums(ll.matrix) # log-likelihood for each response } \keyword{models} \keyword{regression} % logLik.vlm(object, summation = TRUE, \dots) VGAM/man/genpoisUC.Rd0000644000176200001440000000355213565414527013763 0ustar liggesusers\name{dgenpois} \alias{dgenpois} %\alias{pgenpois} %\alias{qgenpois} %\alias{rgenpois} \title{The Generalized Poisson Distribution} \description{ Density for the Generalized Poisson Distribution. } \usage{ dgenpois(x, lambda = 0, theta, log = FALSE) } \arguments{ \item{x,}{vector of quantiles.} \item{lambda, theta}{ See \code{\link{genpoisson}}. The default value of \code{lambda} corresponds to an ordinary Poisson distribution. } \item{log}{ Logical. If \code{TRUE} then the logarithm of the density is returned. } } \value{ \code{dgenpois} gives the density. The value \code{NaN} is returned for elements not satisfying the parameter restrictions, e.g., if \eqn{\lambda > 1}{lambda > 1}. % \code{pgenpois} gives the distribution function, and % \code{qgenpois} gives the quantile function, and % \code{rgenpois} generates random deviates. } \author{ T. W. Yee } \details{ Most of the background to this function is given in \code{\link{genpoisson}}. Some warnings relevant to this distribution are given there, especially relating to the complicated range of the parameter \code{lambda} about or near \eqn{-1}. Note that numerical round off errors etc. can occur; see below for an example. } %\note{ %} \seealso{ \code{\link{genpoisson}}, \code{\link[stats:Poisson]{dpois}}. } \examples{ sum(dgenpois(0:1000, lambda = -0.5, theta = 2)) # Not perfect... \dontrun{ lambda <- -0.2; theta <- 2; y <- 0:10 proby <- dgenpois(y, lambda = lambda, theta = theta, log = FALSE) plot(y, proby, type = "h", col = "blue", lwd = 2, ylab = "P[Y=y]", main = paste("Y ~ Generalized Poisson(lambda=", lambda, ", theta=", theta, ")", sep = ""), las = 1, sub = "Orange is the Poisson probability function") sum(proby) lines(y + 0.1, dpois(y, theta), type = "h", lwd = 2, col = "orange") } } \keyword{distribution} VGAM/man/lvplot.rrvglm.Rd0000644000176200001440000001641713565414527014723 0ustar liggesusers\name{lvplot.rrvglm} \alias{lvplot.rrvglm} \alias{biplot.rrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Latent Variable Plot for RR-VGLMs } \description{ Produces an \emph{ordination diagram} (also known as a \emph{biplot} or \emph{latent variable plot}) for \emph{reduced-rank vector generalized linear models} (RR-VGLMs). For rank-2 models only, the x- and y-axis are the first and second canonical axes respectively. } \usage{ lvplot.rrvglm(object, A = TRUE, C = TRUE, scores = FALSE, show.plot = TRUE, groups = rep(1, n), gapC = sqrt(sum(par()$cxy^2)), scaleA = 1, xlab = "Latent Variable 1", ylab = "Latent Variable 2", Alabels = if (length(object@misc$predictors.names)) object@misc$predictors.names else param.names("LP", M), Aadj = par()$adj, Acex = par()$cex, Acol = par()$col, Apch = NULL, Clabels = rownames(Cmat), Cadj = par()$adj, Ccex = par()$cex, Ccol = par()$col, Clty = par()$lty, Clwd = par()$lwd, chull.arg = FALSE, ccex = par()$cex, ccol = par()$col, clty = par()$lty, clwd = par()$lwd, spch = NULL, scex = par()$cex, scol = par()$col, slabels = rownames(x2mat), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Object of class \code{"rrvglm"}. } \item{A}{ Logical. Allow the plotting of \bold{A}? } \item{C}{ Logical. Allow the plotting of \bold{C}? If \code{TRUE} then \bold{C} is represented by arrows emenating from the origin. } \item{scores}{ Logical. Allow the plotting of the \eqn{n} scores? The scores are the values of the latent variables for each observation. } \item{show.plot}{ Logical. Plot it? If \code{FALSE}, no plot is produced and the matrix of scores (\eqn{n} latent variable values) is returned. If \code{TRUE}, the rank of \code{object} need not be 2. } \item{groups}{ A vector whose distinct values indicate which group the observation belongs to. By default, all the observations belong to a single group. Useful for the multinomial logit model (see \code{\link{multinomial}}.} \item{gapC}{ The gap between the end of the arrow and the text labelling of \bold{C}, in latent variable units.} \item{scaleA}{ Numerical value that is multiplied by \bold{A}, so that \bold{C} is divided by this value. } \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. } \item{Alabels}{ Character vector to label \bold{A}. Must be of length \eqn{M}. } \item{Aadj}{ Justification of text strings for labelling \bold{A}. See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{Acex}{ Numeric. Character expansion of the labelling of \bold{A}. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{Acol}{ Line color of the arrows representing \bold{C}. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{Apch}{ Either an integer specifying a symbol or a single character to be used as the default in plotting points. See \code{\link[graphics]{par}}. The \code{pch} argument can be of length \eqn{M}, the number of species. } \item{Clabels}{ Character vector to label \bold{C}. Must be of length \eqn{p2}. } \item{Cadj}{ Justification of text strings for labelling \bold{C}. See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{Ccex}{ Numeric. Character expansion of the labelling of \bold{C}. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{Ccol}{ Line color of the arrows representing \bold{C}. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{Clty}{ Line type of the arrows representing \bold{C}. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{Clwd}{ Line width of the arrows representing \bold{C}. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{chull.arg}{ Logical. Plot the convex hull of the scores? This is done for each group (see the \code{group} argument). } \item{ccex}{ Numeric. Character expansion of the labelling of the convex hull. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{ccol}{ Line color of the convex hull. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{clty}{ Line type of the convex hull. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{clwd}{ Line width of the convex hull. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{spch}{ Either an integer specifying a symbol or a single character to be used as the default in plotting points. See \code{\link[graphics]{par}}. The \code{spch} argument can be of length \eqn{M}, the number of species. } \item{scex}{ Numeric. Character expansion of the labelling of the scores. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{scol}{ Line color of the arrows representing \bold{C}. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{slabels}{ Character vector to label the scores. Must be of length \eqn{n}. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{xlim} and \code{ylim}. } } \details{ For RR-VGLMs, a \emph{biplot} and a \emph{latent variable} plot coincide. In general, many of the arguments starting with ``A'' refer to \bold{A} (of length \eqn{M}), ``C'' to \bold{C} (of length \eqn{p2}), ``c'' to the convex hull (of length \code{length(unique(groups))}), and ``s'' to scores (of length \eqn{n}). As the result is a biplot, its interpretation is based on the inner product. } \value{ The matrix of scores (\eqn{n} latent variable values) is returned regardless of whether a plot was produced or not. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } \note{ % Further work to be done: This function could be hooked up % to the normalization code of \code{\link{rrvglm}} to allow uncorrelated % latent variables etc. The functions \code{\link{lvplot.rrvglm}} and \code{\link{biplot.rrvglm}} are equivalent. In the example below the predictor variables are centered, which is a good idea. } \seealso{ \code{\link{lvplot}}, \code{\link[graphics]{par}}, \code{\link{rrvglm}}, \code{\link{Coef.rrvglm}}, \code{\link{rrvglm.control}}. } \examples{ nn <- nrow(pneumo) # x1, x2 and x3 are some unrelated covariates pneumo <- transform(pneumo, slet = scale(log(exposure.time)), x1 = rnorm(nn), x2 = rnorm(nn), x3 = rnorm(nn)) fit <- rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3, multinomial, data = pneumo, Rank = 2, Corner = FALSE, Uncorrel = TRUE) \dontrun{ lvplot(fit, chull = TRUE, scores = TRUE, clty = 2, ccol = "blue", scol = "red", Ccol = "darkgreen", Clwd = 2, Ccex = 2, main = "Biplot of some fictitional data") } } \keyword{models} \keyword{regression} \keyword{graphs} % pneumo$slet = scale(log(pneumo$exposure.time)) VGAM/man/summarypvgam.Rd0000644000176200001440000000507213565414527014616 0ustar liggesusers% 20160804; Adapted from summary.vgam.Rd \name{summarypvgam} \alias{summarypvgam} \alias{show.summary.pvgam} \title{Summarizing Penalized Vector Generalized Additive Model Fits} \usage{ summarypvgam(object, dispersion = NULL, digits = options()$digits - 2, presid = TRUE) \method{show}{summary.pvgam}(x, quote = TRUE, prefix = "", digits = options()$digits - 2, signif.stars = getOption("show.signif.stars")) } \arguments{ \item{object}{an object of class \code{"pvgam"}, which is the result of a call to \code{\link{vgam}} with at least one \code{\link{sm.os}} or \code{\link{sm.ps}} term. } \item{x}{an object of class \code{"summary.pvgam"}, which is the result of a call to \code{summarypvgam()}. } \item{dispersion, digits, presid}{ See \code{\link{summaryvglm}}. } \item{quote, prefix, signif.stars}{ See \code{\link{summaryvglm}}. } } \description{ These functions are all \code{\link{methods}} for class \code{"pvgam"} or \code{summary.pvgam} objects. } \details{ This methods function reports a summary more similar to \code{\link[mgcv]{summary.gam}} from \pkg{mgcv} than \code{summary.gam()} from \pkg{gam}. It applies to G2-VGAMs using \code{\link{sm.os}} and O-splines, else \code{\link{sm.ps}} and P-splines. In particular, the hypothesis test for whether each \code{\link{sm.os}} or \code{\link{sm.ps}} term can be deleted follows quite closely to \code{\link[mgcv]{summary.gam}}. The p-values from this type of test tend to be biased downwards (too small) and corresponds to \code{p.type = 5}. It is hoped in the short future that improved p-values be implemented, somewhat like the default of \code{\link[mgcv]{summary.gam}}. This methods function was adapted from \code{\link[mgcv]{summary.gam}}. } \value{ \code{summarypvgam} returns an object of class \code{"summary.pvgam"}; see \code{\link{summary.pvgam-class}}. } \section{Warning }{ See \code{\link{sm.os}}. } \seealso{ \code{\link{vgam}}, \code{\link{summaryvgam}}, \code{\link{summary.pvgam-class}}, \code{\link{sm.os}}, \code{\link{sm.ps}}, \code{\link[stats]{summary.glm}}, \code{\link[stats]{summary.lm}}, \code{\link[mgcv]{summary.gam}} from \pkg{mgcv}, % A core R package \code{\link{summaryvgam}} for G1-VGAMs. % \code{\link[gam]{summary.gam}}. % May not be installed. } \examples{ hfit2 <- vgam(agaaus ~ sm.os(altitude), binomialff, data = hunua) coef(hfit2, matrix = TRUE) summary(hfit2) } \keyword{models} \keyword{regression} % summary(hfit2)@post$s.table # For sm.ps() terms. VGAM/man/rigff.Rd0000644000176200001440000000256213565414527013164 0ustar liggesusers\name{rigff} \alias{rigff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Reciprocal Inverse Gaussian distribution } \description{ Estimation of the parameters of a reciprocal inverse Gaussian distribution. } \usage{ rigff(lmu = "identitylink", llambda = "loglink", imu = NULL, ilambda = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, llambda}{ Link functions for \code{mu} and \code{lambda}. See \code{\link{Links}} for more choices. } \item{imu, ilambda}{ Initial values for \code{mu} and \code{lambda}. A \code{NULL} means a value is computed internally. } } \details{ See Jorgensen (1997) for details. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall } \author{ T. W. Yee } \note{ This distribution is potentially useful for dispersion modelling. } \seealso{ \code{\link{simplex}}. } \examples{ rdata <- data.frame(y = rchisq(n = 100, df = 14)) # Not 'proper' data!! fit <- vglm(y ~ 1, rigff, data = rdata, trace = TRUE) fit <- vglm(y ~ 1, rigff, data = rdata, trace = TRUE, eps = 1e-9, crit = "coef") summary(fit) } \keyword{models} \keyword{regression} VGAM/man/summaryvglm.Rd0000644000176200001440000002176113565414527014454 0ustar liggesusers% Adapted from file src/library/stats/man/summary.glm.Rd % Part of the R package, http://www.R-project.org % Copyright 1995-2013 R Core Team % Distributed under GPL 2 or later \name{summaryvglm} \alias{summaryvglm} \alias{show.summary.vglm} \title{Summarizing Vector Generalized Linear Model Fits} \usage{ summaryvglm(object, correlation = FALSE, dispersion = NULL, digits = NULL, presid = TRUE, HDEtest = TRUE, hde.NA = TRUE, threshold.hde = 0.001, signif.stars = getOption("show.signif.stars"), nopredictors = FALSE, lrt0.arg = FALSE, score0.arg = FALSE, wald0.arg = FALSE, values0 = 0, subset = NULL, omit1s = TRUE, ...) \method{show}{summary.vglm}(x, digits = max(3L, getOption("digits") - 3L), quote = TRUE, prefix = "", presid = TRUE, HDEtest = TRUE, hde.NA = TRUE, threshold.hde = 0.001, signif.stars = NULL, nopredictors = NULL, top.half.only = FALSE, ...) } \arguments{ \item{object}{an object of class \code{"vglm"}, usually, a result of a call to \code{\link{vglm}}.} \item{x}{an object of class \code{"summary.vglm"}, usually, a result of a call to \code{summaryvglm()}.} \item{dispersion}{ used mainly for GLMs. See \code{\link[stats]{summary.glm}}. } \item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.} \item{digits}{the number of significant digits to use when printing. } % \item{symbolic.cor}{logical; if \code{TRUE}, print the correlations in % a symbolic form (see \code{\link{symnum}}) rather than as numbers.} \item{signif.stars}{logical; if \code{TRUE}, \sQuote{significance stars} are printed for each coefficient. } % \item{\dots}{further arguments passed to or from other methods.} \item{presid}{Pearson residuals; print out some summary statistics of these? } \item{HDEtest}{logical; if \code{TRUE} (the default) then a test for the HDE is performed, else all arguments related to the HDE are ignored. } \item{hde.NA}{logical; if a test for the Hauck-Donner effect is done (for each coefficient) and it is affirmative should that Wald test p-value be replaced by an \code{NA}? The default is to do so. Setting \code{hde.NA = FALSE} will print the p-value even though it will be biased upwards. Also see argument \code{threshold.hde}. } \item{threshold.hde}{numeric; used if \code{hde.NA = TRUE} and is present for some coefficients. Only p-values greater than this argument will be replaced by an \code{NA}, the reason being that small p-values will already be statistically significant. Hence setting \code{threshold.hde = 0} will print out a \code{NA} if the HDE is present. } \item{quote}{ Fed into \code{print()}. } \item{nopredictors}{ logical; if \code{TRUE} the names of the linear predictors are not printed out. The default is that they are. } \item{lrt0.arg, score0.arg, wald0.arg}{ Logical. If \code{lrt0.arg = TRUE} then the other arguments are passed into \code{\link{lrt.stat.vlm}} and the equivalent of the so-called Wald table is outputted. Similarly, if \code{score0.arg = TRUE} then the other arguments are passed into \code{\link{score.stat.vlm}} and the equivalent of the so-called Wald table is outputted. Similarly, if \code{wald0.arg = TRUE} then the other arguments are passed into \code{\link{wald.stat.vlm}} and the Wald table corresponding to that is outputted. See details below. Setting any of these will result in further IRLS iterations being performed, therefore may be computationally expensive. } \item{values0, subset, omit1s}{ These arguments are used if any of the \code{lrt0.arg}, \code{score0.arg}, \code{wald0.arg} arguments are used. They are passed into the appropriate function, such as \code{\link{wald.stat.vlm}}. } \item{top.half.only}{ logical; if \code{TRUE} then only print out the top half of the usual output. Used for P-VGAMs. } \item{prefix}{ Not used. } \item{\ldots}{ Not used. } } \description{ These functions are all \code{\link{methods}} for class \code{vglm} or \code{summary.vglm} objects. } \details{ Originally, \code{summaryvglm()} was written to be very similar to \code{\link[stats]{summary.glm}}, however now there are a quite a few more options available. By default, \code{show.summary.vglm()} tries to be smart about formatting the coefficients, standard errors, etc. and additionally gives \sQuote{significance stars} if \code{signif.stars} is \code{TRUE}. The \code{coefficients} component of the result gives the estimated coefficients and their estimated standard errors, together with their ratio. This third column is labelled \code{z value} regardless of whether the dispersion is estimated or known (or fixed by the family). A fourth column gives the two-tailed p-value corresponding to the z ratio based on a Normal reference distribution. % (It is possible that the dispersion is % not known and there are no residual degrees of freedom from which to % estimate it. In that case the estimate is \code{NaN}.) % % % In general, the t distribution is not used, but the normal distribution is. % Aliased coefficients are omitted in the returned object but restored % by the \code{print} method. Correlations are printed to two decimal places (or symbolically): to see the actual correlations print \code{summary(object)@correlation} directly. % The dispersion of a GLM is not used in the fitting process, but it is % needed to find standard errors. % If \code{dispersion} is not supplied or \code{NULL}, % the dispersion is taken as \code{1} for the \code{binomial} and % \code{Poisson} families, and otherwise estimated by the residual % Chisquared statistic (calculated from cases with non-zero weights) % divided by the residual degrees of freedom. % \code{summary} can be used with Gaussian \code{glm} fits to handle the % case of a linear regression with known error variance, something not % handled by \code{\link{summary.lm}}. The Hauck-Donner effect (HDE) is tested for almost all models; see \code{\link{hdeff.vglm}} for details. Arguments \code{hde.NA} and \code{threshold.hde} here are meant to give some control of the output if this aberration of the Wald statistic occurs (so that the p-value is biased upwards). If the HDE is present then using \code{\link{lrt.stat.vlm}} to get a more accurate p-value is a good alternative as p-values based on the likelihood ratio test (LRT) tend to be more accurate than Wald tests and do not suffer from the HDE. Alternatively, if the HDE is present then using \code{wald0.arg = TRUE} will compute Wald statistics that are HDE-free; see \code{\link{wald.stat}}. The arguments \code{lrt0.arg} and \code{score0.arg} enable the so-called Wald table to be replaced by the equivalent LRT and Rao score test table; see \code{\link{lrt.stat.vlm}}, \code{\link{score.stat}}. Further IRLS iterations are performed for both of these, hence the computational cost might be significant. % 20180201 It is possible for programmers to write a methods function to print out extra quantities when \code{summary(vglmObject)} is called. The generic function is \code{summaryvglmS4VGAM()}, and one can use the S4 function \code{\link[methods]{setMethod}} to compute the quantities needed. Also needed is the generic function is \code{showsummaryvglmS4VGAM()} to actually print the quantities out. % 20151215 } \value{ \code{summaryvglm} returns an object of class \code{"summary.vglm"}; see \code{\link{summary.vglm-class}}. } \author{ T. W. Yee. } \section{Warning }{ Currently the SE column is deleted when \code{lrt0 = TRUE} because SEs are not so meaningful with the LRT. In the future an SE column may be inserted (with \code{NA} values) so that it has 4-column output like the other tests. In the meantime, the columns of this matrix should be accessed by name and not number. } \seealso{ \code{\link{vglm}}, \code{\link{confintvglm}}, \code{\link{vcovvlm}}, \code{\link[stats]{summary.glm}}, \code{\link[stats]{summary.lm}}, \code{\link[base]{summary}}, \code{\link{hdeff.vglm}}, \code{\link{lrt.stat.vlm}}, \code{\link{score.stat}}, \code{\link{wald.stat}}. } \examples{ ## For examples see example(glm) pneumo <- transform(pneumo, let = log(exposure.time)) (afit <- vglm(cbind(normal, mild, severe) ~ let, acat, data = pneumo)) coef(afit, matrix = TRUE) summary(afit) # Might suffer from the Hauck-Donner effect coef(summary(afit)) summary(afit, lrt0 = TRUE, score0 = TRUE, wald0 = TRUE) } \keyword{models} \keyword{regression} % yettodo: add argument \code{score0.arg = FALSE} % yettodo: add argument \code{lrt0.arg = FALSE} %\method{summary}{vglm}(object, correlation = FALSE, % dispersion = NULL, digits = NULL, % presid = TRUE, % signif.stars = getOption("show.signif.stars")) VGAM/man/gengammaUC.Rd0000644000176200001440000000542313565414527014072 0ustar liggesusers\name{gengammaUC} \alias{gengammaUC} \alias{dgengamma.stacy} \alias{pgengamma.stacy} \alias{qgengamma.stacy} \alias{rgengamma.stacy} \title{The Generalized Gamma Distribution } \description{ Density, distribution function, quantile function and random generation for the generalized gamma distribution with scale parameter \code{scale}, and parameters \code{d} and \code{k}. } \usage{ dgengamma.stacy(x, scale = 1, d, k, log = FALSE) pgengamma.stacy(q, scale = 1, d, k, lower.tail = TRUE, log.p = FALSE) qgengamma.stacy(p, scale = 1, d, k, lower.tail = TRUE, log.p = FALSE) rgengamma.stacy(n, scale = 1, d, k) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{scale}{the (positive) scale parameter \eqn{b}.} \item{d, k}{the (positive) parameters \eqn{d} and \eqn{k}. Both can be thought of as shape parameters, where \eqn{d} is of the Weibull-type and \eqn{k} is of the gamma-type. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dgengamma.stacy} gives the density, \code{pgengamma.stacy} gives the distribution function, \code{qgengamma.stacy} gives the quantile function, and \code{rgengamma.stacy} generates random deviates. } \references{ Stacy, E. W. and Mihram, G. A. (1965) Parameter estimation for a generalized gamma distribution. \emph{Technometrics}, \bold{7}, 349--358. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{gengamma.stacy}}, the \pkg{VGAM} family function for estimating the generalized gamma distribution by maximum likelihood estimation, for formulae and other details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } % \note{ % } \seealso{ \code{\link{gengamma.stacy}}. } \examples{ \dontrun{ x <- seq(0, 14, by = 0.01); d <- 1.5; Scale <- 2; k <- 6 plot(x, dgengamma.stacy(x, Scale, d = d, k = k), type = "l", col = "blue", ylim = 0:1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) lines(qgengamma.stacy(seq(0.05, 0.95, by = 0.05), Scale, d = d, k = k), dgengamma.stacy(qgengamma.stacy(seq(0.05, 0.95, by = 0.05), Scale, d = d, k = k), Scale, d = d, k = k), col = "purple", lty = 3, type = "h") lines(x, pgengamma.stacy(x, Scale, d = d, k = k), col = "orange") abline(h = 0, lty = 2) } } \keyword{distribution} VGAM/man/oapospoisson.Rd0000644000176200001440000000666113565414527014627 0ustar liggesusers\name{oapospoisson} \alias{oapospoisson} %\alias{oapospoisff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Positive-Poisson Distribution } \description{ Fits a one-altered positive-Poisson distribution based on a conditional model involving a Bernoulli distribution and a 1-truncated positive-Poisson distribution. } \usage{ oapospoisson(lpobs1 = "logitlink", llambda = "loglink", type.fitted = c("mean", "lambda", "pobs1", "onempobs1"), ipobs1 = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs1}{ Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi}, called \code{pobs1} or \code{phi} here. See \code{\link{Links}} for more choices. } \item{llambda}{ See \code{\link{pospoisson}} for details. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{ipobs1, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The response \eqn{Y} is one with probability \eqn{p_1}{pobs1}, or \eqn{Y} has a 1-truncated positive-Poisson distribution with probability \eqn{1-p_1}{1-pobs1}. Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1}, which is modelled as a function of the covariates. The one-altered positive-Poisson distribution differs from the one-inflated positive-Poisson distribution in that the former has ones coming from one source, whereas the latter has ones coming from the positive-Poisson distribution too. The one-inflated positive-Poisson distribution is implemented in the \pkg{VGAM} package. Some people call the one-altered positive-Poisson a \emph{hurdle} model. The input can be a matrix (multiple responses). By default, the two linear/additive predictors of \code{oapospoisson} are \eqn{(logit(\phi), log(\lambda))^T}{(logit(phi), log(lambda))^T}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = \phi + (1-\phi) A}{% mu = phi + (1- phi) A} where \eqn{A} is the mean of the one-truncated positive-Poisson distribution. If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned. } %\references{ % % %} %\section{Warning }{ %} \author{ T. W. Yee } \note{ This family function effectively combines \code{\link{binomialff}} and \code{\link{otpospoisson}} into one family function. } \seealso{ \code{\link{Oapospois}}, \code{\link{pospoisson}}, \code{\link{oipospoisson}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } \examples{ odata <- data.frame(x2 = runif(nn <- 1000)) odata <- transform(odata, pobs1 = logitlink(-1 + 2*x2, inverse = TRUE), lambda = loglink( 1 + 1*x2, inverse = TRUE)) odata <- transform(odata, y1 = roapospois(nn, lambda = lambda, pobs1 = pobs1), y2 = roapospois(nn, lambda = lambda, pobs1 = pobs1)) with(odata, table(y1)) ofit <- vglm(cbind(y1, y2) ~ x2, oapospoisson, data = odata, trace = TRUE) coef(ofit, matrix = TRUE) head(fitted(ofit)) head(predict(ofit)) summary(ofit) } \keyword{models} \keyword{regression} VGAM/man/paralogistic.Rd0000644000176200001440000000610713565414527014547 0ustar liggesusers\name{paralogistic} \alias{paralogistic} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Paralogistic Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter paralogistic distribution. } \usage{ paralogistic(lscale = "loglink", lshape1.a = "loglink", iscale = NULL, ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. % zero = ifelse(lss, -2, -1) \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale}{ Parameter link functions applied to the (positive) parameters \eqn{a} and \code{scale}. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{ishape1.a} is needed to obtain good estimates for the other parameter. } \item{gscale, gshape1.a}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 2-parameter paralogistic distribution is the 4-parameter generalized beta II distribution with shape parameter \eqn{p=1} and \eqn{a=q}. It is the 3-parameter Singh-Maddala distribution with \eqn{a=q}. More details can be found in Kleiber and Kotz (2003). The 2-parameter paralogistic has density \deqn{f(y) = a^2 y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+a}]}{% f(y) = a^2 y^(a-1) / [b^a (1 + (y/b)^a)^(1+a)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and \eqn{a} is the shape parameter. The mean is \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(a - 1/a) / \Gamma(a)}{% E(Y) = b gamma(1 + 1/a) gamma(a - 1/a) / gamma(a)} provided \eqn{a > 1}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{Paralogistic}}, \code{\link{sinmad}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{inv.paralogistic}}. } \examples{ pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), scale = exp(1))) fit <- vglm(y ~ 1, paralogistic(lss = FALSE), data = pdata, trace = TRUE) fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 5), data = pdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/betaff.Rd0000644000176200001440000001176613565414527013324 0ustar liggesusers\name{betaff} \alias{betaff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Two-parameter Beta Distribution Family Function } \description{ Estimation of the mean and precision parameters of the beta distribution. } \usage{ betaff(A = 0, B = 1, lmu = "logitlink", lphi = "loglink", imu = NULL, iphi = NULL, gprobs.y = ppoints(8), gphi = exp(-3:5)/4, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{A, B}{ Lower and upper limits of the distribution. The defaults correspond to the \emph{standard beta distribution} where the response lies between 0 and 1. } \item{lmu, lphi}{ Link function for the mean and precision parameters. The values \eqn{A} and \eqn{B} are extracted from the \code{min} and \code{max} arguments of \code{\link{extlogitlink}}. Consequently, only \code{\link{extlogitlink}} is allowed. % See below for more details. % See \code{\link{Links}} for more choices. } \item{imu, iphi}{ Optional initial value for the mean and precision parameters respectively. A \code{NULL} value means a value is obtained in the \code{initialize} slot. } \item{gprobs.y, gphi, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The two-parameter beta distribution can be written \eqn{f(y) =} \deqn{(y-A)^{\mu_1 \phi-1} \times (B-y)^{(1-\mu_1) \phi-1} / [beta(\mu_1 \phi,(1-\mu_1) \phi) \times (B-A)^{\phi-1}]}{% (y-A)^(mu1*phi-1) * (B-y)^((1-mu1)*phi-1) / [beta(mu1*phi,(1-mu1)*phi) * (B-A)^(phi-1)]} for \eqn{A < y < B}, and \eqn{beta(.,.)} is the beta function (see \code{\link[base:Special]{beta}}). The parameter \eqn{\mu_1}{mu1} satisfies \eqn{\mu_1 = (\mu - A) / (B-A)}{mu1 = (mu - A) / (B-A)} where \eqn{\mu}{mu} is the mean of \eqn{Y}. That is, \eqn{\mu_1}{mu1} is the mean of of a standard beta distribution: \eqn{E(Y) = A + (B-A) \times \mu_1}{E(Y) = A + (B-A)*mu1}, and these are the fitted values of the object. Also, \eqn{\phi}{phi} is positive and \eqn{A < \mu < B}{A < mu < B}. Here, the limits \eqn{A} and \eqn{B} are \emph{known}. Another parameterization of the beta distribution involving the raw shape parameters is implemented in \code{\link{betaR}}. For general \eqn{A} and \eqn{B}, the variance of \eqn{Y} is \eqn{(B-A)^2 \times \mu_1 \times (1-\mu_1) / (1+\phi)}{(B-A)^2 * mu1 * (1-mu1) / (1+phi)}. Then \eqn{\phi}{phi} can be interpreted as a \emph{precision} parameter in the sense that, for fixed \eqn{\mu}{mu}, the larger the value of \eqn{\phi}{phi}, the smaller the variance of \eqn{Y}. Also, \eqn{\mu_1 = shape1/(shape1+shape2)}{mu1=shape1/(shape1+shape2)} and \eqn{\phi = shape1+shape2}{phi = shape1+shape2}. Fisher scoring is implemented. % If \eqn{A} and \eqn{B} are unknown then the \pkg{VGAM} family function % \code{beta4()} can be used to estimate these too. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Ferrari, S. L. P. and Francisco C.-N. (2004) Beta regression for modelling rates and proportions. \emph{Journal of Applied Statistics}, \bold{31}, 799--815. % Documentation accompanying the \pkg{VGAM} package at % \url{https://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ Thomas W. Yee } \note{ The response must have values in the interval (\eqn{A}, \eqn{B}). The user currently needs to manually choose \code{lmu} to match the input of arguments \code{A} and \code{B}, e.g., with \code{\link{extlogitlink}}; see the example below. } \seealso{ \code{\link{betaR}}, % \code{\link{zoibetaR}}, \code{\link[stats:Beta]{Beta}}, \code{\link{dzoabeta}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{betabinomialff}}, \code{\link{betageometric}}, \code{\link{betaprime}}, \code{\link{rbetageom}}, \code{\link{rbetanorm}}, \code{\link{kumar}}, \code{\link{extlogitlink}}, \code{\link{simulate.vlm}}. } \examples{ bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1))) fit1 <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) # Useful for intercept-only models # General A and B, and with a covariate bdata <- transform(bdata, x2 = runif(nn)) bdata <- transform(bdata, mu = logitlink(0.5 - x2, inverse = TRUE), prec = exp(3.0 + x2)) # prec == phi bdata <- transform(bdata, shape2 = prec * (1 - mu), shape1 = mu * prec) bdata <- transform(bdata, y = rbeta(nn, shape1 = shape1, shape2 = shape2)) bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1 fit <- vglm(Y ~ x2, data = bdata, trace = TRUE, betaff(A = 5, B = 13, lmu = extlogitlink(min = 5, max = 13))) coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} % imu = NULL, iphi = NULL, imethod = 1, zero = NULL) VGAM/man/gordlink.Rd0000644000176200001440000001073413565414527013700 0ustar liggesusers\name{gordlink} %\name{golf} \alias{gordlink} % \alias{golf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gamma-Ordinal Link Function } \description{ Computes the gamma-ordinal transformation, including its inverse and the first two derivatives. } \usage{ gordlink(theta, lambda = 1, cutpoint = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{lambda, cutpoint}{ The former is the shape parameter in \code{\link{gamma2}}. \code{cutpoint} is optional; if \code{NULL} then \code{cutpoint} is ignored from the GOLF definition. If given, the cutpoints should be non-negative integers. If \code{gordlink()} is used as the link function in \code{\link{cumulative}} then, if the cutpoints are known, then one should choose \code{reverse = TRUE, parallel = FALSE ~ -1}. If the cutpoints are unknown, then choose \code{reverse = TRUE, parallel = TRUE}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The gamma-ordinal link function (GOLF) can be applied to a parameter lying in the unit interval. Its purpose is to link cumulative probabilities associated with an ordinal response coming from an underlying 2-parameter gamma distribution. See \code{\link{Links}} for general information about \pkg{VGAM} link functions. } \value{ See Yee (2019) for details. } \references{ Yee, T. W. (2019) \emph{Ordinal ordination with normalizing link functions for count data}, (in preparation). } \author{ Thomas W. Yee } \note{ Numerical values of \code{theta} too close to 0 or 1 or out of range result in large positive or negative values, or maybe 0 depending on the arguments. Although measures have been taken to handle cases where \code{theta} is too close to 1 or 0, numerical instabilities may still arise. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the gamma distribution (see \code{\link{gamma2}}) that has been recorded as an ordinal response using known cutpoints. } \section{Warning }{ Prediction may not work on \code{\link{vglm}} or \code{\link{vgam}} etc. objects if this link function is used. } \seealso{ \code{\link{Links}}, \code{\link{gamma2}}, \code{\link{pordlink}}, \code{\link{nbordlink}}, \code{\link{cumulative}}. } \examples{ \dontrun{ gordlink("p", lambda = 1, short = FALSE) gordlink("p", lambda = 1, tag = TRUE) p <- seq(0.02, 0.98, len = 201) y <- gordlink(p, lambda = 1) y. <- gordlink(p, lambda = 1, deriv = 1, inverse = TRUE) max(abs(gordlink(y, lambda = 1, inverse = TRUE) - p)) # Should be 0 #\ dontrun{par(mfrow = c(2, 1), las = 1) #plot(p, y, type = "l", col = "blue", main = "gordlink()") #abline(h = 0, v = 0.5, col = "orange", lty = "dashed") #plot(p, y., type = "l", col = "blue", # main = "(Reciprocal of) first GOLF derivative") #} # Another example gdata <- data.frame(x2 = sort(runif(nn <- 1000))) gdata <- transform(gdata, x3 = runif(nn)) gdata <- transform(gdata, mymu = exp( 3 + 1 * x2 - 2 * x3)) lambda <- 4 gdata <- transform(gdata, y1 = rgamma(nn, shape = lambda, scale = mymu / lambda)) cutpoints <- c(-Inf, 10, 20, Inf) gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints)) #\ dontrun{ par(mfrow = c(1, 1), las = 1) #with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) } with(gdata, table(cuty) / sum(table(cuty))) fit <- vglm(cuty ~ x2 + x3, cumulative(multiple.responses = TRUE, reverse = TRUE, parallel = FALSE ~ -1, link = gordlink(cutpoint = cutpoints[2:3], lambda = lambda)), data = gdata, trace = TRUE) head(depvar(fit)) head(fitted(fit)) head(predict(fit)) coef(fit) coef(fit, matrix = TRUE) constraints(fit) fit@misc } } \keyword{math} \keyword{models} \keyword{regression} % # Another example % nn <- 1000 % x2 <- sort(runif(nn)) % x3 <- runif(nn) % shape <- exp(0.0) % mymu <- exp( 3 + 1 * x2 - 2 * x3) % y1 <- rnbinom(nn, mu=mymu, size=shape) % cuty <- Cut(y1) % fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "gordlink", rev = TRUE, % multiple.responses = TRUE, parallel = TRUE, earg = list(lambda=shape))) % coef(fit) % fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "probitlink", rev = TRUE, % multiple.responses = TRUE, parallel = TRUE)) % coef(fit, matrix = TRUE) % coef(fit) VGAM/man/explogff.Rd0000644000176200001440000000462513565414527013703 0ustar liggesusers\name{explogff} \alias{explogff} %- Also NEED an '\alias' for EACH other topic documented here. \title{Exponential Logarithmic Distribution Family Function} \description{ Estimates the two parameters of the exponential logarithmic distribution by maximum likelihood estimation. } \usage{ explogff(lscale = "loglink", lshape = "logitlink", iscale = NULL, ishape = NULL, tol12 = 1e-05, zero = 1, nsimEIM = 400) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{tol12}{ Numeric. Tolerance for testing whether a parameter has value 1 or 2. } \item{iscale, ishape, zero, nsimEIM}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The exponential logarithmic distribution has density function \deqn{f(y; c, s) = (1/(-\log p )) (((1/c) (1 - s) e^{-y/c}) / (1 - (1 - s) e^{-y/c}))}{% (1/(-log(p))) * (((1/c) * (1 - s) * e^(-y/c)) / (1 - (1 - s) * e^(-y/c)))} where \eqn{y > 0}, scale parameter \eqn{c > 0}, and shape parameter \eqn{s \in (0, 1)}{0 < s < 1}. The mean, \eqn{(-polylog(2, 1 - p) c) / \log(s)}{((-polylog(2, 1 - p) * c) / log(s)} is \emph{not} returned as the fitted values. Note the median is \eqn{c \log(1 + \sqrt{s})}{c * log(1 + sqrt(s))} and it is \emph{currently} returned as the fitted values. Simulated Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Tahmasabi, R., Sadegh, R. (2008). A two-parameter lifetime distribution with decreasing failure rate. \emph{Computational Statistics and Data Analysis}, \bold{52}, 3889--3901. } \author{ J. G. Lauder and T. W .Yee } \note{ We define \code{scale} as the reciprocal of the rate parameter used by Tahmasabi and Sadegh (2008). Yet to do: find a \code{polylog()} function. } \seealso{ \code{\link{dexplog}}, \code{\link{exponential}}, } \examples{ \dontrun{ Scale <- exp(2); shape <- logitlink(-1, inverse = TRUE) edata <- data.frame(y = rexplog(n = 2000, scale = Scale, shape = shape)) fit <- vglm(y ~ 1, explogff, data = edata, trace = TRUE) c(with(edata, median(y)), head(fitted(fit), 1)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/prentice74.Rd0000644000176200001440000000741213565414527014052 0ustar liggesusers\name{prentice74} \alias{prentice74} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Prentice (1974) Log-gamma Distribution } \description{ Estimation of a 3-parameter log-gamma distribution described by Prentice (1974). } \usage{ prentice74(llocation = "identitylink", lscale = "loglink", lshape = "identitylink", ilocation = NULL, iscale = NULL, ishape = NULL, imethod = 1, glocation.mux = exp((-4:4)/2), gscale.mux = exp((-4:4)/2), gshape = qt(ppoints(6), df = 1), probs.y = 0.3, zero = c("scale", "shape")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale, lshape}{ Parameter link function applied to the location parameter \eqn{a}, positive scale parameter \eqn{b} and the shape parameter \eqn{q}, respectively. See \code{\link{Links}} for more choices. } \item{ilocation, iscale}{ Initial value for \eqn{a} and \eqn{b}, respectively. The defaults mean an initial value is determined internally for each. } \item{ishape}{ Initial value for \eqn{q}. If failure to converge occurs, try some other value. The default means an initial value is determined internally. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. % Can be an integer-valued vector specifying which % linear/additive predictors are modelled as intercepts-only. % Then the values must be from the set \{1,2,3\}. } \item{glocation.mux, gscale.mux, gshape, probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The probability density function is given by \deqn{f(y;a,b,q) = |q|\,\exp(w/q^2 - e^w) / (b \, \Gamma(1/q^2)),}{% f(y;a,b,q) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)),} for shape parameter \eqn{q \ne 0}{q != 0}, positive scale parameter \eqn{b > 0}, location parameter \eqn{a}, and all real \eqn{y}. Here, \eqn{w = (y-a)q/b+\psi(1/q^2)}{w = (y-a)*q/b+psi(1/q^2)} where \eqn{\psi}{psi} is the digamma function, \code{\link[base:Special]{digamma}}. The mean of \eqn{Y} is \eqn{a} (returned as the fitted values). This is a different parameterization compared to \code{\link{lgamma3}}. Special cases: \eqn{q = 0} is the normal distribution with standard deviation \eqn{b}, \eqn{q = -1} is the extreme value distribution for maximums, \eqn{q = 1} is the extreme value distribution for minima (Weibull). If \eqn{q > 0} then the distribution is left skew, else \eqn{q < 0} is right skew. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Prentice, R. L. (1974) A log gamma model and its maximum likelihood estimation. \emph{Biometrika}, \bold{61}, 539--544. %On Maximisation of the Likelihood for the Generalised Gamma Distribution. %Angela Noufaily & M.C. Jones, %29-Oct-2009, %\url{http://stats-www.open.ac.uk/TechnicalReports/} } \section{Warning }{ The special case \eqn{q = 0} is not handled, therefore estimates of \eqn{q} too close to zero may cause numerical problems. } \author{ T. W. Yee } \note{ The notation used here differs from Prentice (1974): \eqn{\alpha = a}{alpha = a}, \eqn{\sigma = b}{sigma = b}. Fisher scoring is used. } \seealso{ \code{\link{lgamma3}}, \code{\link[base:Special]{lgamma}}, \code{\link{gengamma.stacy}}. } \examples{ pdata <- data.frame(x2 = runif(nn <- 1000)) pdata <- transform(pdata, loc = -1 + 2*x2, Scale = exp(1)) pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, shape = 1)) fit <- vglm(y ~ x2, prentice74(zero = 2:3), data = pdata, trace = TRUE) coef(fit, matrix = TRUE) # Note the coefficients for location } \keyword{models} \keyword{regression} VGAM/man/fff.Rd0000644000176200001440000000625213565414527012630 0ustar liggesusers\name{fff} \alias{fff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ F Distribution Family Function } \description{ Maximum likelihood estimation of the (2-parameter) F distribution. } \usage{ fff(link = "loglink", idf1 = NULL, idf2 = NULL, nsimEIM = 100, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Parameter link function for both parameters. See \code{\link{Links}} for more choices. The default keeps the parameters positive. } \item{idf1, idf2}{ Numeric and positive. Initial value for the parameters. The default is to choose each value internally. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Initialization method. Either the value 1 or 2. If both fail try setting values for \code{idf1} and \code{idf2}. } % \item{zero}{ % An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. % The value must be from the set \{1,2\}, corresponding % respectively to \eqn{df1} and \eqn{df2}. % By default all linear/additive predictors are modelled as % a linear combination of the explanatory variables. % % % } } \details{ The F distribution is named after Fisher and has a density function that has two parameters, called \code{df1} and \code{df2} here. This function treats these degrees of freedom as \emph{positive reals} rather than integers. The mean of the distribution is \eqn{df2/(df2-2)} provided \eqn{df2>2}, and its variance is \eqn{2 df2^2 (df1+df2-2)/(df1 (df2-2)^2 (df2-4))}{2*df2^2*(df1+df2-2)/ (df1*(df2-2)^2*(df2-4))} provided \eqn{df2>4}. The estimated mean is returned as the fitted values. Although the F distribution can be defined to accommodate a non-centrality parameter \code{ncp}, it is assumed zero here. Actually it shouldn't be too difficult to handle any known \code{ncp}; something to do in the short future. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \section{Warning}{ Numerical problems will occur when the estimates of the parameters are too low or too high. } %\note{ % This family function uses the BFGS quasi-Newton update formula for the % working weight matrices. Consequently the estimated variance-covariance % matrix may be inaccurate or simply wrong! The standard errors must be % therefore treated with caution; these are computed in functions such % as \code{vcov()} and \code{summary()}. % %} \seealso{ \code{\link[stats:Fdist]{FDist}}. } \examples{ \dontrun{ fdata <- data.frame(x2 = runif(nn <- 2000)) fdata <- transform(fdata, df1 = exp(2+0.5*x2), df2 = exp(2-0.5*x2)) fdata <- transform(fdata, y = rf(nn, df1, df2)) fit <- vglm(y ~ x2, fff, data = fdata, trace = TRUE) coef(fit, matrix = TRUE) } } \keyword{models} \keyword{regression} VGAM/man/trinormal.Rd0000644000176200001440000001004413565414527014070 0ustar liggesusers\name{trinormal} \alias{trinormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Trivariate Normal Distribution Family Function } \description{ Maximum likelihood estimation of the nine parameters of a trivariate normal distribution. } \usage{ trinormal(zero = c("sd", "rho"), eq.mean = FALSE, eq.sd = FALSE, eq.cor = FALSE, lmean1 = "identitylink", lmean2 = "identitylink", lmean3 = "identitylink", lsd1 = "loglink", lsd2 = "loglink", lsd3 = "loglink", lrho12 = "rhobitlink", lrho23 = "rhobitlink", lrho13 = "rhobitlink", imean1 = NULL, imean2 = NULL, imean3 = NULL, isd1 = NULL, isd2 = NULL, isd3 = NULL, irho12 = NULL, irho23 = NULL, irho13 = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean1, lmean2, lmean3, lsd1, lsd2, lsd3}{ Link functions applied to the means and standard deviations. See \code{\link{Links}} for more choices. Being positive quantities, a log link is the default for the standard deviations. } \item{lrho12, lrho23, lrho13}{ Link functions applied to the correlation parameters. See \code{\link{Links}} for more choices. By default the correlation parameters are allowed to have a value between -1 and 1, but that may be problematic when \code{eq.cor = TRUE} because they should have a value between -0.5 and 1. % (zz see below). } \item{imean1, imean2, imean3, isd1, isd2, isd3}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{irho12, irho23, irho13, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{eq.mean, eq.sd, eq.cor}{ Logical. Constrain the means or the standard deviations or correlation parameters to be equal? % 20150530; FALSE now; they work separately: % Only one of these arguments may be assigned a value. } } \details{ For the trivariate normal distribution, this fits a linear model (LM) to the means, and by default, the other parameters are intercept-only. The response should be a three-column matrix. The three correlation parameters are prefixed by \code{rho}, and the default gives them values between \eqn{-1} and \eqn{1} however, this may be problematic when the correlation parameters are constrained to be equal, etc.. The fitted means are returned as the fitted values, which is in the form of a three-column matrix. Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \section{Warning}{ The default parameterization does not make the estimated variance-covariance matrix positive-definite. In order for the variance-covariance matrix to be positive-definite the quantity \code{1 - rho12^2 - rho13^2 - rho23^2 + 2 * rho12 * rho13 * rho23} must be positive, and if \code{eq.cor = TRUE} then this means that the \code{rho}s must be between -0.5 and 1. } %\references{ % %} \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{uninormal}}, \code{\link{binormal}}, \code{\link{rtrinorm}}. % \code{\link{gaussianff}}, % \code{\link{pnorm2}}, } \examples{ set.seed(123); nn <- 1000 tdata <- data.frame(x2 = runif(nn), x3 = runif(nn)) tdata <- transform(tdata, y1 = rnorm(nn, 1 + 2 * x2), y2 = rnorm(nn, 3 + 4 * x2), y3 = rnorm(nn, 4 + 5 * x2)) fit1 <- vglm(cbind(y1, y2, y3) ~ x2, data = tdata, trinormal(eq.sd = TRUE, eq.cor = TRUE), trace = TRUE) coef(fit1, matrix = TRUE) constraints(fit1) summary(fit1) \dontrun{ # Try this when eq.sd = TRUE, eq.cor = TRUE: fit2 <- vglm(cbind(y1, y2, y3) ~ x2, data = tdata, stepsize = 0.25, trinormal(eq.sd = TRUE, eq.cor = TRUE, lrho12 = extlogitlink(min = -0.5), lrho23 = extlogitlink(min = -0.5), lrho13 = extlogitlink(min = -0.5)), trace = TRUE) coef(fit2, matrix = TRUE) } } \keyword{models} \keyword{regression} VGAM/man/qrrvglm.control.Rd0000644000176200001440000005223613565414527015243 0ustar liggesusers\name{qrrvglm.control} \alias{qrrvglm.control} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Control Function for QRR-VGLMs (CQO) } \description{ Algorithmic constants and parameters for a constrained quadratic ordination (CQO), by fitting a \emph{quadratic reduced-rank vector generalized linear model} (QRR-VGLM), are set using this function. It is the control function for \code{\link{cqo}}. } \usage{ qrrvglm.control(Rank = 1, Bestof = if (length(Cinit)) 1 else 10, checkwz = TRUE, Cinit = NULL, Crow1positive = TRUE, epsilon = 1.0e-06, EqualTolerances = NULL, eq.tolerances = TRUE, Etamat.colmax = 10, FastAlgorithm = TRUE, GradientFunction = TRUE, Hstep = 0.001, isd.latvar = rep_len(c(2, 1, rep_len(0.5, Rank)), Rank), iKvector = 0.1, iShape = 0.1, ITolerances = NULL, I.tolerances = FALSE, maxitl = 40, imethod = 1, Maxit.optim = 250, MUXfactor = rep_len(7, Rank), noRRR = ~ 1, Norrr = NA, optim.maxit = 20, Parscale = if (I.tolerances) 0.001 else 1.0, sd.Cinit = 0.02, SmallNo = 5.0e-13, trace = TRUE, Use.Init.Poisson.QO = TRUE, wzepsilon = .Machine$double.eps^0.75, ...) } %- maybe also `usage' for other objects documented here. \arguments{ In the following, \eqn{R} is the \code{Rank}, \eqn{M} is the number of linear predictors, and \eqn{S} is the number of responses (species). Thus \eqn{M=S} for binomial and Poisson responses, and \eqn{M=2S} for the negative binomial and 2-parameter gamma distributions. \item{Rank}{ The numerical rank \eqn{R} of the model, i.e., the number of ordination axes. Must be an element from the set \{1,2,\ldots,min(\eqn{M},\eqn{p_2}{p2})\} where the vector of explanatory variables \eqn{x} is partitioned into (\eqn{x_1},\eqn{x_2}), which is of dimension \eqn{p_1+p_2}{p1+p2}. The variables making up \eqn{x_1} are given by the terms in the \code{noRRR} argument, and the rest of the terms comprise \eqn{x_2}. } \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is returned. This argument helps guard against local solutions by (hopefully) finding the global solution from many fits. The argument has value 1 if an initial value for \eqn{C} is inputted using \code{Cinit}. } \item{checkwz}{ logical indicating whether the diagonal elements of the working weight matrices should be checked whether they are sufficiently positive, i.e., greater than \code{wzepsilon}. If not, any values less than \code{wzepsilon} are replaced with this value. } \item{Cinit}{ Optional initial \eqn{C} matrix, which must be a \eqn{p_2}{p2} by \eqn{R} matrix. The default is to apply \code{.Init.Poisson.QO()} to obtain initial values. } \item{Crow1positive}{ Logical vector of length \code{Rank} (recycled if necessary): are the elements of the first row of \eqn{C} positive? For example, if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE, TRUE)} will force \eqn{C[1,1]} and \eqn{C[1,3]} to be negative, and \eqn{C[1,2]} and \eqn{C[1,4]} to be positive. This argument allows for a reflection in the ordination axes because the coefficients of the latent variables are unique up to a sign. } \item{epsilon}{ Positive numeric. Used to test for convergence for GLMs fitted in C. Larger values mean a loosening of the convergence criterion. If an error code of 3 is reported, try increasing this value. } \item{eq.tolerances}{ Logical indicating whether each (quadratic) predictor will have equal tolerances. Having \code{eq.tolerances = TRUE} can help avoid numerical problems, especially with binary data. Note that the estimated (common) tolerance matrix may or may not be positive-definite. If it is then it can be scaled to the \eqn{R} by \eqn{R} identity matrix, i.e., made equivalent to \code{I.tolerances = TRUE}. Setting \code{I.tolerances = TRUE} will \emph{force} a common \eqn{R} by \eqn{R} identity matrix as the tolerance matrix to the data even if it is not appropriate. In general, setting \code{I.tolerances = TRUE} is preferred over \code{eq.tolerances = TRUE} because, if it works, it is much faster and uses less memory. However, \code{I.tolerances = TRUE} requires the environmental variables to be scaled appropriately. See \bold{Details} for more details. } \item{EqualTolerances}{ Defunct argument. Use \code{eq.tolerances} instead. } % \item{Eta.range}{ Numerical vector of length 2 or \code{NULL}. % Gives the lower and upper bounds on the values that can be taken % by the quadratic predictor (i.e., on the eta-scale). % Since \code{FastAlgorithm = TRUE}, this argument should be ignored. % } \item{Etamat.colmax}{ Positive integer, no smaller than \code{Rank}. Controls the amount of memory used by \code{.Init.Poisson.QO()}. It is the maximum number of columns allowed for the pseudo-response and its weights. In general, the larger the value, the better the initial value. Used only if \code{Use.Init.Poisson.QO = TRUE}. } \item{FastAlgorithm}{ Logical. Whether a new fast algorithm is to be used. The fast algorithm results in a large speed increases compared to Yee (2004). Some details of the fast algorithm are found in Appendix A of Yee (2006). Setting \code{FastAlgorithm = FALSE} will give an error. } \item{GradientFunction}{ Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr} is used or not, i.e., to compute gradient values. Used only if \code{FastAlgorithm} is \code{TRUE}. The default value is usually faster on most problems. } \item{Hstep}{ Positive value. Used as the step size in the finite difference approximation to the derivatives by \code{\link[stats]{optim}}. % Used only if \code{FastAlgorithm} is \code{TRUE}. } \item{isd.latvar}{ Initial standard deviations for the latent variables (site scores). Numeric, positive and of length \eqn{R} (recycled if necessary). This argument is used only if \code{I.tolerances = TRUE}. Used by \code{.Init.Poisson.QO()} to obtain initial values for the constrained coefficients \eqn{C} adjusted to a reasonable value. It adjusts the spread of the site scores relative to a common species tolerance of 1 for each ordination axis. A value between 0.5 and 10 is recommended; a value such as 10 means that the range of the environmental space is very large relative to the niche width of the species. The successive values should decrease because the first ordination axis should have the most spread of site scores, followed by the second ordination axis, etc. } \item{iKvector, iShape}{ Numeric, recycled to length \eqn{S} if necessary. Initial values used for estimating the positive \eqn{k} and \eqn{\lambda}{lambda} parameters of the negative binomial and 2-parameter gamma distributions respectively. For further information see \code{\link{negbinomial}} and \code{\link{gamma2}}. These arguments override the \code{ik} and \code{ishape} arguments in \code{\link{negbinomial}} and \code{\link{gamma2}}. } \item{I.tolerances}{ Logical. If \code{TRUE} then the (common) tolerance matrix is the \eqn{R} by \eqn{R} identity matrix by definition. Note that having \code{I.tolerances = TRUE} implies \code{eq.tolerances = TRUE}, but not vice versa. Internally, the quadratic terms will be treated as offsets (in GLM jargon) and so the models can potentially be fitted very efficiently. \emph{However, it is a very good idea to center and scale all numerical variables in the \eqn{x_2} vector}. See \bold{Details} for more details. The success of \code{I.tolerances = TRUE} often depends on suitable values for \code{isd.latvar} and/or \code{MUXfactor}. } \item{ITolerances}{ Defunct argument. Use \code{I.tolerances} instead. } \item{maxitl}{ Maximum number of times the optimizer is called or restarted. Most users should ignore this argument. } \item{imethod}{ Method of initialization. A positive integer 1 or 2 or 3 etc. depending on the \pkg{VGAM} family function. Currently it is used for \code{\link{negbinomial}} and \code{\link{gamma2}} only, and used within the C. } \item{Maxit.optim}{ Positive integer. Number of iterations given to the function \code{\link[stats]{optim}} at each of the \code{optim.maxit} iterations. } \item{MUXfactor}{ Multiplication factor for detecting large offset values. Numeric, positive and of length \eqn{R} (recycled if necessary). This argument is used only if \code{I.tolerances = TRUE}. Offsets are \eqn{-0.5} multiplied by the sum of the squares of all \eqn{R} latent variable values. If the latent variable values are too large then this will result in numerical problems. By too large, it is meant that the standard deviation of the latent variable values are greater than \code{MUXfactor[r] * isd.latvar[r]} for \code{r=1:Rank} (this is why centering and scaling all the numerical predictor variables in \eqn{x_2} is recommended). A value about 3 or 4 is recommended. If failure to converge occurs, try a slightly lower value. } \item{optim.maxit}{ Positive integer. Number of times \code{\link[stats]{optim}} is invoked. At iteration \code{i}, the \code{i}th value of \code{Maxit.optim} is fed into \code{\link[stats]{optim}}. } \item{noRRR}{ Formula giving terms that are \emph{not} to be included in the reduced-rank regression (or formation of the latent variables), i.e., those belong to \eqn{x_1}. Those variables which do not make up the latent variable (reduced-rank regression) correspond to the \eqn{B_1}{B_1} matrix. The default is to omit the intercept term from the latent variables. } \item{Norrr}{ Defunct. Please use \code{noRRR}. Use of \code{Norrr} will become an error soon. } \item{Parscale}{ Numerical and positive-valued vector of length \eqn{C} (recycled if necessary). Passed into \code{optim(..., control = list(parscale = Parscale))}; the elements of \eqn{C} become \eqn{C} / \code{Parscale}. Setting \code{I.tolerances = TRUE} results in line searches that are very large, therefore \eqn{C} has to be scaled accordingly to avoid large step sizes. See \bold{Details} for more information. It's probably best to leave this argument alone. } \item{sd.Cinit}{ Standard deviation of the initial values for the elements of \eqn{C}. These are normally distributed with mean zero. This argument is used only if \code{Use.Init.Poisson.QO = FALSE} and \eqn{C} is not inputted using \code{Cinit}. } \item{trace}{ Logical indicating if output should be produced for each iteration. The default is \code{TRUE} because the calculations are numerically intensive, meaning it may take a long time, so that the user might think the computer has locked up if \code{trace = FALSE}. } % \item{Kinit}{ Initial values for the index parameters \code{k} in the % negative binomial distribution (one per species). % In general, a smaller number is preferred over a larger number. % The vector is recycled to the number of responses (species). % The argument is currently not used. % } % \item{Dzero}{ Integer vector specifying which squared terms % are to be zeroed. These linear predictors will correspond to % a RR-VGLM. % The values must be elements from the set \{1,2,\ldots,\eqn{M}\}. % Used only if \code{Quadratic = TRUE} and \code{FastAlgorithm = FALSE}. % } \item{SmallNo}{ Positive numeric between \code{.Machine$double.eps} and \code{0.0001}. Used to avoid under- or over-flow in the IRLS algorithm. Used only if \code{FastAlgorithm} is \code{TRUE}. } \item{Use.Init.Poisson.QO}{ Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO()} is used to obtain initial values for the canonical coefficients \eqn{C}. If \code{FALSE} then random numbers are used instead. } \item{wzepsilon}{ Small positive number used to test whether the diagonals of the working weight matrices are sufficiently positive. } \item{\dots}{ Ignored at present. } } \details{ Recall that the central formula for CQO is \deqn{\eta = B_1^T x_1 + A \nu + \sum_{m=1}^M (\nu^T D_m \nu) e_m}{% eta = B_1^T x_1 + A nu + sum_{m=1}^M (nu^T D_m nu) e_m} where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept), \eqn{x_2}{x_2} is a vector of environmental variables, \eqn{\nu=C^T x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables, \eqn{e_m} is a vector of 0s but with a 1 in the \eqn{m}th position. QRR-VGLMs are an extension of RR-VGLMs and allow for maximum likelihood solutions to constrained quadratic ordination (CQO) models. % For the fitting of QRR-VGLMs, the default is that the \eqn{C} matrix % (containing the \emph{canonical} or \emph{constrained coefficients} % corresponding to \eqn{x_2}) % is constrained by forcing the latent variables to have sample % variance-covariance matrix equalling \code{diag(Rank)}, i.e., % unit variance and uncorrelated. The tolerance matrices are, in % general, diagonal under such a constraint. Having \code{I.tolerances = TRUE} means all the tolerance matrices are the order-\eqn{R} identity matrix, i.e., it \emph{forces} bell-shaped curves/surfaces on all species. This results in a more difficult optimization problem (especially for 2-parameter models such as the negative binomial and gamma) because of overflow errors and it appears there are more local solutions. To help avoid the overflow errors, scaling \eqn{C} by the factor \code{Parscale} can help enormously. Even better, scaling \eqn{C} by specifying \code{isd.latvar} is more understandable to humans. If failure to converge occurs, try adjusting \code{Parscale}, or better, setting \code{eq.tolerances = TRUE} (and hope that the estimated tolerance matrix is positive-definite). To fit an equal-tolerances model, it is firstly best to try setting \code{I.tolerances = TRUE} and varying \code{isd.latvar} and/or \code{MUXfactor} if it fails to converge. If it still fails to converge after many attempts, try setting \code{eq.tolerances = TRUE}, however this will usually be a lot slower because it requires a lot more memory. With a \eqn{R > 1} model, the latent variables are always uncorrelated, i.e., the variance-covariance matrix of the site scores is a diagonal matrix. If setting \code{eq.tolerances = TRUE} is used and the common estimated tolerance matrix is positive-definite then that model is effectively the same as the \code{I.tolerances = TRUE} model (the two are transformations of each other). In general, \code{I.tolerances = TRUE} is numerically more unstable and presents a more difficult problem to optimize; the arguments \code{isd.latvar} and/or \code{MUXfactor} often must be assigned some good value(s) (possibly found by trial and error) in order for convergence to occur. Setting \code{I.tolerances = TRUE} \emph{forces} a bell-shaped curve or surface onto all the species data, therefore this option should be used with deliberation. If unsuitable, the resulting fit may be very misleading. Usually it is a good idea for the user to set \code{eq.tolerances = FALSE} to see which species appear to have a bell-shaped curve or surface. Improvements to the fit can often be achieved using transformations, e.g., nitrogen concentration to log nitrogen concentration. Fitting a CAO model (see \code{\link{cao}}) first is a good idea for pre-examining the data and checking whether it is appropriate to fit a CQO model. %Suppose \code{FastAlgorithm = FALSE}. In theory (if %\code{Eta.range = NULL}), for QRR-VGLMs, the predictors have the values of %a quadratic form. However, when \code{Eta.range} is assigned a numerical %vector of length 2 (giving the endpoints of an interval), then those %values lying outside the interval are assigned the closest boundary %value. The \code{Eta.range} argument is provided to help avoid %numerical problems associated with the inner minimization problem. A %consequence of this is that the fitted values are bounded, e.g., between %\code{1/(1+exp(-Eta.range[1]))} and \code{1/(1+exp(-Eta.range[2]))} for %binary data (logitlink), and greater than \code{exp(Eta.range[1])} for %Poisson data (log link). It is suggested that, for binary responses, %\code{c(-16, 16)} be used, and for Poisson responses, \code{c(-16, Inf)} %be used. The value \code{NULL} corresponds to \code{c(-Inf, Inf)}. } \value{ A list with components matching the input names. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \note{ When \code{I.tolerances = TRUE} it is a good idea to apply \code{\link[base]{scale}} to all the numerical variables that make up the latent variable, i.e., those of \eqn{x_2}{x_2}. This is to make them have mean 0, and hence avoid large offset values which cause numerical problems. This function has many arguments that are common with \code{\link{rrvglm.control}} and \code{\link{vglm.control}}. It is usually a good idea to try fitting a model with \code{I.tolerances = TRUE} first, and if convergence is unsuccessful, then try \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE}. Ordination diagrams with \code{eq.tolerances = TRUE} have a natural interpretation, but with \code{eq.tolerances = FALSE} they are more complicated and requires, e.g., contours to be overlaid on the ordination diagram (see \code{\link{lvplot.qrrvglm}}). % and/or use the \code{Eta.range} argument. In the example below, an equal-tolerances CQO model is fitted to the hunting spiders data. Because \code{I.tolerances = TRUE}, it is a good idea to center all the \eqn{x_2} variables first. Upon fitting the model, the actual standard deviation of the site scores are computed. Ideally, the \code{isd.latvar} argument should have had this value for the best chances of getting good initial values. For comparison, the model is refitted with that value and it should run more faster and reliably. } \section{Warning }{ The default value of \code{Bestof} is a bare minimum for many datasets, therefore it will be necessary to increase its value to increase the chances of obtaining the global solution. %Suppose \code{FastAlgorithm = FALSE}. %The fitted values of QRR-VGLMs can be restricted to lie between two values %in order to help make the computation numerically stable. For some data %sets, it may be necessary to use \code{Eta.range} to obtain convergence; %however, the fitted values etc. will no longer be accurate, especially at %small and/or large values. Convergence is slower when \code{Eta.range} %is used to restrict values. } \seealso{ \code{\link{cqo}}, \code{\link{rcqo}}, \code{\link{Coef.qrrvglm}}, \code{\link{Coef.qrrvglm-class}}, \code{\link[stats]{optim}}, \code{\link{binomialff}}, \code{\link{poissonff}}, \code{\link{negbinomial}}, \code{\link{gamma2}}. % \code{\link{gaussianff}}. % \code{\link{rrvglm}}, % \code{\link{rrvglm.control}}, % \code{\link{rrvglm.optim.control}}, } \examples{ \dontrun{ # Poisson CQO with equal tolerances set.seed(111) # This leads to the global solution hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when I.tolerances = TRUE p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, eq.tolerances = TRUE) sort(deviance(p1, history = TRUE)) # A history of all the iterations (isd.latvar <- apply(latvar(p1), 2, sd)) # Should be approx isd.latvar # Refit the model with better initial values set.seed(111) # This leads to the global solution p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, I.tolerances = TRUE, poissonff, data = hspider, isd.latvar = isd.latvar) # Note the use of isd.latvar here sort(deviance(p1, history = TRUE)) # A history of all the iterations } } \keyword{models} \keyword{regression} %\dontrun{ %# 20120221; withdrawn for a while coz it creates a lot of error messages. %# Negative binomial CQO; smallest deviance is about 275.389 %set.seed(1234) # This leads to a reasonable (but not the global) solution? %nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, % Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ % WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, % I.tol = FALSE, eq.tol = TRUE, # A good idea for negbinomial % fam = negbinomial, data = hspider) %sort(deviance(nb1, history = TRUE)) # A history of all the iterations %summary(nb1) %} %\dontrun{ lvplot(nb1, lcol = 1:12, y = TRUE, pcol = 1:12) } VGAM/man/leipnik.Rd0000644000176200001440000000703313565414527013520 0ustar liggesusers\name{leipnik} \alias{leipnik} %- Also NEED an '\alias' for EACH other topic documented here. \title{Leipnik Regression Family Function} \description{ Estimates the two parameters of a (transformed) Leipnik distribution by maximum likelihood estimation. } \usage{ leipnik(lmu = "logitlink", llambda = logofflink(offset = 1), imu = NULL, ilambda = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, llambda}{ Link function for the \eqn{\mu}{mu} and \eqn{\lambda}{lambda} parameters. See \code{\link{Links}} for more choices. } \item{imu, ilambda}{ Numeric. Optional initial values for \eqn{\mu}{mu} and \eqn{\lambda}{lambda}. } } \details{ The (transformed) Leipnik distribution has density function \deqn{f(y;\mu,\lambda) = \frac{ \{ y(1-y) \}^{-\frac12}}{ \mbox{Beta}( \frac{\lambda+1}{2}, \frac12 )} \left[ 1 + \frac{(y-\mu)^2 }{y(1-y)} \right]^{ -\frac{\lambda}{2}}}{% f(y;mu,lambda) = (y(1-y))^(-1/2) * (1 + (y-mu)^2 / (y*(1-y)))^(-lambda/2) / Beta((lambda+1)/2, 1/2)} where \eqn{0 < y < 1} and \eqn{\lambda > -1}{lambda > -1}. The mean is \eqn{\mu}{mu} (returned as the fitted values) and the variance is \eqn{1/\lambda}{1/lambda}. Jorgensen (1997) calls the above the \bold{transformed} Leipnik distribution, and if \eqn{y = (x+1)/2} and \eqn{\mu = (\theta+1)/2}{mu = (theta+1)/2}, then the distribution of \eqn{X} as a function of \eqn{x} and \eqn{\theta}{theta} is known as the the (untransformed) Leipnik distribution. Here, both \eqn{x} and \eqn{\theta}{theta} are in \eqn{(-1, 1)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, New York: Wiley. (pages 612--617). } \author{ T. W. Yee } \note{ Convergence may be slow or fail. Until better initial value estimates are forthcoming try assigning the argument \code{ilambda} some numerical value if it fails to converge. Currently, Newton-Raphson is implemented, not Fisher scoring. Currently, this family function probably only really works for intercept-only models, i.e., \code{y ~ 1} in the formula. } %\section{Warning }{ % If \code{llambda="identitylink"} then it is possible that the % \code{lambda} estimate becomes less than \eqn{-1}, i.e., out of % bounds. One way to stop this is to choose \code{llambda = "loglink"}, % however, \code{lambda} is then constrained to be positive. %} \seealso{ \code{\link{mccullagh89}}. } \examples{ ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1)) # Not proper data fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE) head(fitted(fit)) with(ldata, mean(y)) summary(fit) coef(fit, matrix = TRUE) Coef(fit) sum(weights(fit)) # Sum of the prior weights sum(weights(fit, type = "work")) # Sum of the working weights } \keyword{models} \keyword{regression} %fit <- vglm(y ~ 1, leipnik(ilambda = 1), tr = TRUE, cri = "c", checkwz = FALSE) % leipnik(lmu = "logitlink", llambda = "loglink", imu = NULL, ilambda = NULL) %fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logofflink(offset = 1)), % data = ldata, trace = TRUE, crit = "coef") % fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE, checkwz = FALSE) VGAM/man/cens.poisson.Rd0000644000176200001440000001252213565414527014505 0ustar liggesusers\name{cens.poisson} %\alias{cens.poisson} \alias{cens.poisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Censored Poisson Family Function } \description{ Family function for a censored Poisson response. } \usage{ cens.poisson(link = "loglink", imu = NULL, biglambda = 10, smallno = 1e-10) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the mean; see \code{\link{Links}} for more choices. } \item{imu}{ Optional initial value; see \code{\link{CommonVGAMffArguments}} for more information. } \item{biglambda, smallno}{ Used to help robustify the code when \code{lambda} is very large and the \code{\link{ppois}} value is so close to 0 that the first derivative is computed to be a \code{NA} or \code{NaN}. When this occurs \code{\link{mills.ratio}} is called. } } \details{ Often a table of Poisson counts has an entry \emph{J+} meaning \eqn{\ge J}{>= J}. This family function is similar to \code{\link{poissonff}} but handles such censored data. The input requires \code{\link{SurvS4}}. Only a univariate response is allowed. The Newton-Raphson algorithm is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ See \pkg{survival} for background. } \author{ Thomas W. Yee } \note{ The function \code{\link{poissonff}} should be used when there are no censored observations. Also, \code{NA}s are not permitted with \code{\link{SurvS4}}, nor is \code{type = "counting"}. } \section{Warning }{ As the response is discrete, care is required with \code{\link{Surv}}, especially with \code{"interval"} censored data because of the \code{(start, end]} format. See the examples below. The examples have \code{y < L} as left censored and \code{y >= U} (formatted as \code{U+}) as right censored observations, therefore \code{L <= y < U} is for uncensored and/or interval censored observations. Consequently the input must be tweaked to conform to the \code{(start, end]} format. A bit of attention has been directed to try robustify the code when \code{lambda} is very large, however this currently works for left and right censored data only, not interval censored data. Sometime the fix involves an approximation, hence it is a good idea to set \code{trace = TRUE}. } \seealso{ \code{\link{SurvS4}}, \code{\link{poissonff}}, \code{\link{Links}}, \code{\link{mills.ratio}}. } \examples{ # Example 1: right censored data set.seed(123); U <- 20 cdata <- data.frame(y = rpois(N <- 100, exp(3))) cdata <- transform(cdata, cy = pmin(U, y), rcensored = (y >= U)) cdata <- transform(cdata, status = ifelse(rcensored, 0, 1)) with(cdata, table(cy)) with(cdata, table(rcensored)) with(cdata, table(print(SurvS4(cy, status)))) # Check; U+ means >= U fit <- vglm(SurvS4(cy, status) ~ 1, cens.poisson, data = cdata, trace = TRUE) coef(fit, matrix = TRUE) table(print(depvar(fit))) # Another check; U+ means >= U # Example 2: left censored data L <- 15 cdata <- transform(cdata, cY = pmax(L, y), lcensored = y < L) # Note y < L, not cY == L or y <= L cdata <- transform(cdata, status = ifelse(lcensored, 0, 1)) with(cdata, table(cY)) with(cdata, table(lcensored)) with(cdata, table(print(SurvS4(cY, status, type = "left")))) # Check fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cens.poisson, data = cdata, trace = TRUE) coef(fit, matrix = TRUE) # Example 3: interval censored data cdata <- transform(cdata, Lvec = rep(L, len = N), Uvec = rep(U, len = N)) cdata <- transform(cdata, icensored = Lvec <= y & y < Uvec) # Not lcensored or rcensored with(cdata, table(icensored)) cdata <- transform(cdata, status = rep(3, N)) # 3 == interval censored cdata <- transform(cdata, status = ifelse(rcensored, 0, status)) # 0 means right censored cdata <- transform(cdata, status = ifelse(lcensored, 2, status)) # 2 means left censored # Have to adjust Lvec and Uvec because of the (start, end] format: cdata$Lvec[with(cdata,icensored)] <- cdata$Lvec[with(cdata,icensored)]-1 cdata$Uvec[with(cdata,icensored)] <- cdata$Uvec[with(cdata,icensored)]-1 # Unchanged: cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)] cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)] with(cdata, # Check table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1, cens.poisson, data = cdata, trace = TRUE) coef(fit, matrix = TRUE) table(print(depvar(fit))) # Another check # Example 4: Add in some uncensored observations index <- (1:N)[with(cdata, icensored)] index <- head(index, 4) cdata$status[index] <- 1 # actual or uncensored value cdata$Lvec[index] <- cdata$y[index] with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1, cens.poisson, data = cdata, trace = TRUE, crit = "c") coef(fit, matrix = TRUE) table(print(depvar(fit))) # Another check } \keyword{models} \keyword{regression} VGAM/man/posbinomUC.Rd0000644000176200001440000001012113565414527014133 0ustar liggesusers\name{Posbinom} \alias{Posbinom} \alias{dposbinom} \alias{pposbinom} \alias{qposbinom} \alias{rposbinom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive-Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the positive-binomial distribution. } \usage{ dposbinom(x, size, prob, log = FALSE) pposbinom(q, size, prob) qposbinom(p, size, prob) rposbinom(n, size, prob) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Fed into \code{\link[stats]{runif}}. } \item{size}{number of trials. It is the \eqn{N} symbol in the formula given in \code{\link{posbinomial}} and should be positive. } \item{prob}{probability of success on each trial. Should be in \eqn{(0,1)}. } % 20120407: % \item{log.p, lower.tail}{ % Arguments that are passed on to % \code{\link[stats:Binomial]{pbinom}} etc. % % } \item{log}{ See \code{\link[stats:Binomial]{dbinom}}. } } \details{ The positive-binomial distribution is a binomial distribution but with the probability of a zero being zero. The other probabilities are scaled to add to unity. The mean therefore is \deqn{\mu / (1-(1-\mu)^N)}{% mu / (1-(1-mu)^N)} where \eqn{\mu}{mu} is the argument \code{prob} above. As \eqn{\mu}{mu} increases, the positive-binomial and binomial distributions become more similar. Unlike similar functions for the binomial distribution, a zero value of \code{prob} is not permitted here. } \value{ \code{dposbinom} gives the density, \code{pposbinom} gives the distribution function, \code{qposbinom} gives the quantile function, and \code{rposbinom} generates random deviates. } %\references{ %None. %} \author{ T. W. Yee. } \note{ For \code{dposbinom()}, if arguments \code{size} or \code{prob} equal 0 then a \code{NaN} is returned. % 20120405; no longer true to a superior method: % For \code{rposbinom()}, the arguments of the function are fed into % \code{\link[stats:Binomial]{rbinom}} until \eqn{n} positive values % are obtained. This may take a long time if \code{prob} has values % close to 0. The family function \code{\link{posbinomial}} estimates the parameters by maximum likelihood estimation. } \seealso{ \code{\link{posbinomial}}, \code{\link{dposbern}}, \code{\link{Gaitbinom.mlm}}, \code{\link{zabinomial}}, \code{\link{zibinomial}}, \code{\link[stats:Binomial]{Binomial}}. } \examples{ prob <- 0.2; size <- 10 table(y <- rposbinom(n = 1000, size, prob)) mean(y) # Sample mean size * prob / (1 - (1 - prob)^size) # Population mean (ii <- dposbinom(0:size, size, prob)) cumsum(ii) - pposbinom(0:size, size, prob) # Should be 0s table(rposbinom(100, size, prob)) table(qposbinom(runif(1000), size, prob)) round(dposbinom(1:10, size, prob) * 1000) # Should be similar \dontrun{ barplot(rbind(dposbinom(x = 0:size, size, prob), dbinom(x = 0:size, size, prob)), beside = TRUE, col = c("blue", "green"), main = paste("Positive-binomial(", size, ",", prob, ") (blue) vs", " Binomial(", size, ",", prob, ") (green)", sep = ""), names.arg = as.character(0:size), las = 1) } # Simulated data example nn <- 1000; sizeval1 <- 10; sizeval2 <- 20 pdata <- data.frame(x2 = seq(0, 1, length = nn)) pdata <- transform(pdata, prob1 = logitlink(-2 + 2 * x2, inverse = TRUE), prob2 = logitlink(-1 + 1 * x2, inverse = TRUE), sizev1 = rep(sizeval1, len = nn), sizev2 = rep(sizeval2, len = nn)) pdata <- transform(pdata, y1 = rposbinom(nn, size = sizev1, prob = prob1), y2 = rposbinom(nn, size = sizev2, prob = prob2)) with(pdata, table(y1)) with(pdata, table(y2)) # Multiple responses fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(multiple.responses = TRUE), trace = TRUE, data = pdata, weight = cbind(sizev1, sizev2)) coef(fit2, matrix = TRUE) } \keyword{distribution} VGAM/man/AB.Ab.aB.ab.Rd0000644000176200001440000000331013565414527013564 0ustar liggesusers\name{AB.Ab.aB.ab} \alias{AB.Ab.aB.ab} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The AB-Ab-aB-ab Blood Group System } \description{ Estimates the parameter of the AB-Ab-aB-ab blood group system. } \usage{ AB.Ab.aB.ab(link = "logitlink", init.p = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to \code{p}. See \code{\link{Links}} for more choices. } \item{init.p}{ Optional initial value for \code{p}. } } \details{ This one parameter model involves a probability called \code{p}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. } \author{ T. W. Yee } \note{ The input can be a 4-column matrix of counts, where the columns are AB, Ab, aB and ab (in order). Alternatively, the input can be a 4-column matrix of proportions (so each row adds to 1) and the \code{weights} argument is used to specify the total number of counts for each row. } \seealso{ \code{\link{AA.Aa.aa}}, \code{\link{ABO}}, \code{\link{A1A2A3}}, \code{\link{MNSs}}. % \code{\link{AB.Ab.aB.ab2}}, } \examples{ ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925) fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identitylink"), trace = TRUE) fit <- vglm(ymat ~ 1, AB.Ab.aB.ab, trace = TRUE) rbind(ymat, sum(ymat)*fitted(fit)) Coef(fit) # Estimated p p <- sqrt(4*(fitted(fit)[, 4])) p*p summary(fit) } \keyword{models} \keyword{regression} VGAM/man/pospoisson.Rd0000644000176200001440000000565113565414527014305 0ustar liggesusers\name{pospoisson} \alias{pospoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Poisson Distribution Family Function } \description{ Fits a positive Poisson distribution. } \usage{ pospoisson(link = "loglink", type.fitted = c("mean", "lambda", "prob0"), expected = TRUE, ilambda = NULL, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function for the usual mean (lambda) parameter of an ordinary Poisson distribution. See \code{\link{Links}} for more choices. } \item{expected}{ Logical. Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson. } \item{ilambda, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for details. } } \details{ The positive Poisson distribution is the ordinary Poisson distribution but with the probability of zero being zero. Thus the other probabilities are scaled up (i.e., divided by \eqn{1-P[Y=0]}). The mean, \eqn{\lambda / (1 - \exp(-\lambda))}{lambda/(1-exp(-lambda))}, can be obtained by the extractor function \code{fitted} applied to the object. A related distribution is the zero-inflated Poisson, in which the probability \eqn{P[Y=0]} involves another parameter \eqn{\phi}{phi}. See \code{\link{zipoisson}}. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Coleman, J. S. and James, J. (1961) The equilibrium size distribution of freely-forming groups. \emph{Sociometry}, \bold{24}, 36--45. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ This family function can handle multiple responses. Yet to be done: a \code{quasi.pospoisson} which estimates a dispersion parameter. } \seealso{ \code{\link{Pospois}}, \code{\link{gatpoisson.mlm}}, \code{\link{posnegbinomial}}, \code{\link{poissonff}}, \code{\link{otpospoisson}}, \code{\link{zapoisson}}, \code{\link{zipoisson}}, \code{\link{simulate.vlm}}. } \examples{ # Data from Coleman and James (1961) cjdata <- data.frame(y = 1:6, freq = c(1486, 694, 195, 37, 10, 1)) fit <- vglm(y ~ 1, pospoisson, data = cjdata, weights = freq) Coef(fit) summary(fit) fitted(fit) pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data pdata <- transform(pdata, lambda = exp(1 - 2 * x2)) pdata <- transform(pdata, y1 = rpospois(nn, lambda)) with(pdata, table(y1)) fit <- vglm(y1 ~ x2, pospoisson, data = pdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/lerch.Rd0000644000176200001440000000742213565414527013164 0ustar liggesusers\name{lerch} \alias{lerch} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Lerch Phi Function } \description{ Computes the Lerch Phi function. } \usage{ lerch(x, s, v, tolerance = 1.0e-10, iter = 100) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, s, v}{ Numeric. This function recyles values of \code{x}, \code{s}, and \code{v} if necessary. } \item{tolerance}{ Numeric. Accuracy required, must be positive and less than 0.01. } \item{iter}{ Maximum number of iterations allowed to obtain convergence. If \code{iter} is too small then a result of \code{NA} may occur; if so, try increasing its value. } } \details{ Also known as the Lerch transcendent, it can be defined by an integral involving analytical continuation. An alternative definition is the series \deqn{\Phi(x,s,v) = \sum_{n=0}^{\infty} \frac{x^n}{(n+v)^s}}{% Phi(x,s,v) = sum_{n=0}^{infty} x^n / (n+v)^s} which converges for \eqn{|x|<1} as well as for \eqn{|x|=1} with \eqn{s>1}. The series is undefined for integers \eqn{v <= 0}. Actually, \eqn{x} may be complex but this function only works for real \eqn{x}. The algorithm used is based on the relation \deqn{\Phi(x,s,v) = x^m \Phi(x,s,v+m) + \sum_{n=0}^{m-1} \frac{x^n}{(n+v)^s} .}{% Phi(x,s,v) = x^m Phi(x,s,v+m) + sum_{n=0}^{m-1} x^n / (n+v)^s . } See the URL below for more information. This function is a wrapper function for the C code described below. } \value{ Returns the value of the function evaluated at the values of \code{x}, \code{s}, \code{v}. If the above ranges of \eqn{x} and \eqn{v} are not satisfied, or some numeric problems occur, then this function will return an \code{NA} for those values. (The C code returns 6 possible return codes, but this is not passed back up to the R level.) } \references{ Originally the code was found at \code{http://aksenov.freeshell.org/lerchphi/source/lerchphi.c}. Bateman, H. (1953) \emph{Higher Transcendental Functions}. Volume 1. McGraw-Hill, NY, USA. } \author{ S. V. Aksenov and U. D. Jentschura wrote the C code (called Version 1.00). The R wrapper function was written by T. Yee. } \note{ There are a number of special cases, e.g., the Riemann zeta-function is \eqn{\zeta(s) = \Phi(x=1,s,v=1)}{zeta(s) = Phi(x=1,s,v=1)}. Another example is the Hurwitz zeta function \eqn{\zeta(s, v) = \Phi(x=1,s,v=v)}{zeta(s) = Phi(x=1,s,v=v)}. The special case of \eqn{s=1} corresponds to the hypergeometric 2F1, and this is implemented in the \pkg{gsl} package. The Lerch Phi function should not be confused with the Lerch zeta function though they are quite similar. } \section{Warning }{ This function has not been thoroughly tested and contains limitations, for example, the zeta function cannot be computed with this function even though \eqn{\zeta(s) = \Phi(x=1,s,v=1)}{zeta(s) = Phi(x=1,s,v=1)}. Several numerical problems can arise, such as lack of convergence, overflow and underflow, especially near singularities. If any problems occur then an \code{NA} will be returned. For example, if \eqn{|x|=1} and \eqn{s>1} then convergence may be so slow that changing \code{tolerance} and/or \code{iter} may be needed to get an answer (that is treated cautiously). % the C code returns an error % instead of attempting it even with series acceleration. } \seealso{ \code{\link{zeta}}. } \examples{ \dontrun{ s <- 2; v <- 1; x <- seq(-1.1, 1.1, length = 201) plot(x, lerch(x, s = s, v = v), type = "l", col = "blue", las = 1, main = paste0("lerch(x, s = ", s,", v = ", v, ")")) abline(v = 0, h = 1, lty = "dashed", col = "gray") } } \keyword{math} %s <- runif(100, 0, 1.5) %max(abs(zeta(s) - lerch(x = 1, s = s, v = 1))) # This fails); should be 0 VGAM/man/zapoisUC.Rd0000644000176200001440000000442613565414527013625 0ustar liggesusers\name{Zapois} \alias{Zapois} \alias{dzapois} \alias{pzapois} \alias{qzapois} \alias{rzapois} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-altered Poisson distribution with parameter \code{pobs0}. } \usage{ dzapois(x, lambda, pobs0 = 0, log = FALSE) pzapois(q, lambda, pobs0 = 0) qzapois(p, lambda, pobs0 = 0) rzapois(n, lambda, pobs0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{lambda}{ Vector of positive means. } \item{pobs0}{ Probability of zero, called \eqn{pobs0}. The default value of \code{pobs0 = 0} corresponds to the response having a positive Poisson distribution. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The probability function of \eqn{Y} is 0 with probability \code{pobs0}, else a positive \eqn{Poisson(\lambda)}{Poisson(lambda)}. } \value{ \code{dzapois} gives the density, \code{pzapois} gives the distribution function, \code{qzapois} gives the quantile function, and \code{rzapois} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{zapoisson}}, \code{\link{Gaitpois.mlm}}, \code{\link{dzipois}}. } \examples{ lambda <- 3; pobs0 <- 0.2; x <- (-1):7 (ii <- dzapois(x, lambda, pobs0)) max(abs(cumsum(ii) - pzapois(x, lambda, pobs0))) # Should be 0 table(rzapois(100, lambda, pobs0)) table(qzapois(runif(100), lambda, pobs0)) round(dzapois(0:10, lambda, pobs0) * 100) # Should be similar \dontrun{ x <- 0:10 barplot(rbind(dzapois(x, lambda, pobs0), dpois(x, lambda)), beside = TRUE, col = c("blue", "green"), las = 1, main = paste("ZAP(", lambda, ", pobs0 = ", pobs0, ") [blue] vs", " Poisson(", lambda, ") [green] densities", sep = ""), names.arg = as.character(x), ylab = "Probability") } } \keyword{distribution} VGAM/man/vgam.control.Rd0000644000176200001440000001565113565414527014503 0ustar liggesusers\name{vgam.control} \alias{vgam.control} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Control Function for vgam() } \description{ Algorithmic constants and parameters for running \code{\link{vgam}} are set using this function. } \usage{ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30, checkwz=TRUE, Check.rank = TRUE, Check.cm.rank = TRUE, criterion = names(.min.criterion.VGAM), epsilon = 1e-07, maxit = 30, Maxit.outer = 10, noWarning = FALSE, na.action = na.fail, nk = NULL, save.weights = FALSE, se.fit = TRUE, trace = FALSE, wzepsilon = .Machine$double.eps^0.75, xij = NULL, gamma.arg = 1, ...) } %- maybe also `usage' for other objects documented here. \arguments{ % zz na.action differs from vglm \item{all.knots}{ logical indicating if all distinct points of the smoothing variables are to be used as knots. By default, \code{all.knots=TRUE} for \eqn{n \leq 40}{n <= 40}, and for \eqn{n > 40}, the number of knots is approximately \eqn{40 + (n-40)^{0.25}}{40 + (n-40)^0.25}. This increases very slowly with \eqn{n} so that the number of knots is approximately between 50 and 60 for large \eqn{n}. } \item{bf.epsilon}{ tolerance used by the modified vector backfitting algorithm for testing convergence. Must be a positive number. } \item{bf.maxit}{ maximum number of iterations allowed in the modified vector backfitting algorithm. Must be a positive integer. } \item{checkwz}{ logical indicating whether the diagonal elements of the working weight matrices should be checked whether they are sufficiently positive, i.e., greater than \code{wzepsilon}. If not, any values less than \code{wzepsilon} are replaced with this value. } \item{Check.rank, Check.cm.rank}{ See \code{\link{vglm.control}}. } \item{criterion}{ character variable describing what criterion is to be used to test for convergence. The possibilities are listed in \code{.min.criterion.VGAM}, but most family functions only implement a few of these. } \item{epsilon}{ positive convergence tolerance epsilon. Roughly speaking, the Newton-Raphson/Fisher-scoring/local-scoring iterations are assumed to have converged when two successive \code{criterion} values are within \code{epsilon} of each other. } \item{maxit}{ maximum number of Newton-Raphson/Fisher-scoring/local-scoring iterations allowed. } \item{Maxit.outer}{ maximum number of outer iterations allowed when there are \code{\link{sm.os}} or \code{\link{sm.ps}} terms. See \code{\link{vgam}} for a little information about the default \emph{outer iteration}. Note that one can use \emph{performance iteration} by setting \code{Maxit.outer = 1}; then the smoothing parameters will be automatically chosen at each IRLS iteration (some specific programming allows this). % Was Maxit.outer = 20 } \item{na.action}{ how to handle missing values. Unlike the SPLUS \code{gam} function, \code{\link{vgam}} cannot handle \code{NA}s when smoothing. } \item{nk}{ vector of length \eqn{d} containing positive integers. where \eqn{d} be the number of \code{\link{s}} terms in the formula. Recycling is used if necessary. The \eqn{i}th value is the number of B-spline coefficients to be estimated for each component function of the \eqn{i}th \code{s()} term. \code{nk} differs from the number of knots by some constant. If specified, \code{nk} overrides the automatic knot selection procedure. } \item{save.weights}{ logical indicating whether the \code{weights} slot of a \code{"vglm"} object will be saved on the object. If not, it will be reconstructed when needed, e.g., \code{summary}. } \item{se.fit}{ logical indicating whether approximate pointwise standard errors are to be saved on the object. If \code{TRUE}, then these can be plotted with \code{plot(..., se = TRUE)}. } \item{trace}{ logical indicating if output should be produced for each iteration. } \item{wzepsilon}{ Small positive number used to test whether the diagonals of the working weight matrices are sufficiently positive. } % \item{xij}{ % formula giving terms making up a covariate-dependent term. % % } \item{noWarning}{ Same as \code{\link{vglm.control}}. } \item{xij}{ Same as \code{\link{vglm.control}}. } \item{gamma.arg}{ Numeric; same as \code{gamma} in \code{\link[mgcv]{magic}}. Inflation factor for optimizing the UBRE/GCV criterion. If given, a suggested value is 1.4 to help avoid overfitting, based on the work of Gu and co-workers (values between 1.2 and 1.4 appeared reasonable, based on simulations). A warning may be given if the value is deemed out-of-range. } \item{\dots}{ other parameters that may be picked up from control functions that are specific to the \pkg{VGAM} family function. % zz see later. } } \details{ Most of the control parameters are used within \code{vgam.fit} and you will have to look at that to understand the full details. Many of the control parameters are used in a similar manner by \code{vglm.fit} (\code{\link{vglm}}) because the algorithm (IRLS) is very similar. Setting \code{save.weights=FALSE} is useful for some models because the \code{weights} slot of the object is often the largest and so less memory is used to store the object. However, for some \pkg{VGAM} family function, it is necessary to set \code{save.weights=TRUE} because the \code{weights} slot cannot be reconstructed later. } \value{ A list with components matching the input names. A little error checking is done, but not much. The list is assigned to the \code{control} slot of \code{\link{vgam}} objects. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. % \url{http://www.stat.auckland.ac.nz/~yee} % For gamma=1.4: % Kim, Y.-J. and Gu, C. 2004, % Smoothing spline Gaussian regression: % more scalable computation via efficient approximation. %\emph{Journal of the Royal Statistical Society, Series B, Methodological}, %\bold{66}, 337--356. %\bold{66}(2), 337--356. } \author{ Thomas W. Yee} \note{ \code{\link{vgam}} does not implement half-stepsizing, therefore parametric models should be fitted with \code{\link{vglm}}. Also, \code{\link{vgam}} is slower than \code{\link{vglm}} too. } \section{Warning}{ See \code{\link{vglm.control}}. } \seealso{ \code{\link{vgam}}, \code{\link{vglm.control}}, \code{\link{vsmooth.spline}}, \code{\link{vglm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) vgam(cbind(normal, mild, severe) ~ s(let, df = 2), multinomial, data = pneumo, trace = TRUE, eps = 1e-4, maxit = 10) } \keyword{models} \keyword{regression} \keyword{smooth} % xij = NULL, VGAM/man/plotvglm.Rd0000644000176200001440000000453513565414527013735 0ustar liggesusers\name{plotvglm} \alias{plotvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plots for VGLMs } \description{ Currently this function plots the Pearson residuals versus the linear predictors (\eqn{M} plots) and plots the Pearson residuals versus the hat values (\eqn{M} plots). } \usage{ plotvglm(x, which = "(All)", ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ An object of class \code{"vglm"} (see \code{\link{vglm-class}}) or inherits from that class. % Same as \code{\link{plotvgam}}. } \item{which}{ If a subset of the plots is required, specify a subset of the numbers \code{1:(2*M)}. The default is to plot them all. } \item{\dots}{ Arguments fed into the primitive \code{\link[graphics]{plot}} functions. } } \details{ This function is under development. Currently it plots the Pearson residuals against the predicted values (on the transformed scale) and the hat values. There are \eqn{2M} plots in total, therefore users should call \code{\link[graphics]{par}} to assign, e.g., the \code{mfrow} argument. Note: Section 3.7 of Yee (2015) describes the Pearson residuals and hat values for VGLMs. } \value{ Returns the object invisibly. % Same as \code{\link{plotvgam}}. } %\references{ %} \author{ T. W. Yee } %\note{ % \code{plotvglm()} is quite buggy at the moment. % \code{plotvglm()} works in a similar % manner to S-PLUS's \code{plot.gam()}, however, there is no % options for interactive construction of the plots yet. %} \seealso{ \code{\link{plotvgam}}, \code{\link{plotvgam.control}}, \code{\link{vglm}}. } \examples{ \dontrun{ ndata <- data.frame(x2 = runif(nn <- 200)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1))) fit1 <- vglm(y1 ~ x2, negbinomial, data = ndata, trace = TRUE) coef(fit1, matrix = TRUE) par(mfrow = c(2, 2)) plot(fit1) # Manually produce the four plots plot(fit1, which = 1, col = "blue", las = 1, main = "main1") abline(h = 0, lty = "dashed", col = "gray50") plot(fit1, which = 2, col = "blue", las = 1, main = "main2") abline(h = 0, lty = "dashed", col = "gray50") plot(fit1, which = 3, col = "blue", las = 1, main = "main3") plot(fit1, which = 4, col = "blue", las = 1, main = "main4") } } \keyword{models} \keyword{regression} \keyword{smooth} \keyword{graphs} VGAM/man/gtbinomial.Rd0000644000176200001440000000722713565414527014217 0ustar liggesusers\name{gtbinomial} \alias{gtbinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Truncated Binomial Distribution Family Function } \description{ Fits a generally-truncated binomial distribution. } \usage{ gtbinomial(truncate = 0, zero = NULL, link = "logitlink", type.fitted = c("mean", "prob", "prob.t"), multiple.responses = FALSE, parallel = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{truncate}{ Vector of truncated values, i.e., integers between 0 and \code{size} inclusive. Must have unique values only. May be a \code{NULL}, which stands for empty set (same as \code{\link{binomialff}}). The default is the 0-truncated or positive-binomial. % Must be sorted and have unique values only. } \item{link, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{multiple.responses, parallel}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The choice \code{"prob.t"} means the probability of having a truncated value, given the estimate of \code{prob} from an ordinary binomial distribution. The mean can be be obtained by the extractor function \code{fitted} applied to the object (the default). } } \details{ The generally-truncated binomial distribution is the ordinary binomial distribution with the probability of certain values (\code{truncate} argument) being zero. Thus the other probabilities are scaled up. The positive-binomial (0-truncated) distribution is a special case and is the default. Note that the argument \code{truncate} is integer-valued even though the response is converted into a sample proportion. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. The response is checked to see that no values equal any values of the \code{truncate} vector. It is easy to misuse this function; the \code{truncate} vector should ideally not be very long and have values that can be justified by the application on hand. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. %} \author{ Thomas W. Yee and Chenchen Ma} \note{ This family function can handle multiple responses. % Yet to be done: a \code{quasi.posbinomial} which estimates a dispersion % parameter. } \seealso{ \code{\link{Gaitbinom.mlm}}, \code{\link{rposbinom}}, \code{\link{posbinomial}}, \code{\link{binomialff}}, \code{\link{simulate.vlm}}. } \examples{ nnn <- 1000; tvec <- c(2, 3, 6); size <- 10 pdata <- data.frame(x2 = runif(nnn), x3 = runif(nnn)) pdata <- transform(pdata, y1 = rgaitbinom.mlm(nnn, prob = logitlink(-1, inverse = TRUE), truncate = tvec, size = size), y2 = rgaitbinom.mlm(nnn, prob = logitlink(-1 + x2 + x3, inverse = TRUE), truncate = tvec, size = size)) with(pdata, table(y1)) with(pdata, table(y2)) head(pdata) fit1 <- vglm(cbind(y1, size - y1) ~ 1, gtbinomial(truncate = tvec), trace = TRUE, data = pdata) coef(fit1, matrix = TRUE) summary(fit1) fit2 <- vglm(cbind(y2, size - y2) ~ x2 + x3, crit = "coef", gtbinomial(truncate = tvec), trace = TRUE, data = pdata) coef(fit2, matrix = TRUE) summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/BICvlm.Rd0000644000176200001440000000705613565414527013206 0ustar liggesusers\name{BICvlm} \alias{BICvlm} %\alias{BICvglm} \alias{BICvgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bayesian Information Criterion } \description{ Calculates the Bayesian information criterion (BIC) for a fitted model object for which a log-likelihood value has been obtained. } \usage{ BICvlm(object, \dots, k = log(nobs(object))) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object, \dots}{ Same as \code{\link{AICvlm}}. } \item{k}{ Numeric, the penalty per parameter to be used; the default is \code{log(n)} where \code{n} is the number of observations). } } \details{ The so-called BIC or SBC (Schwarz's Bayesian criterion) can be computed by calling \code{\link{AICvlm}} with a different \code{k} argument. See \code{\link{AICvlm}} for information and caveats. } \value{ Returns a numeric value with the corresponding BIC, or \dots, depending on \code{k}. } \author{T. W. Yee. } \note{ BIC, AIC and other ICs can have have many additive constants added to them. The important thing are the differences since the minimum value corresponds to the best model. % Preliminary testing shows absolute differences % with some \pkg{VGAM} family functions such as % \code{\link{gaussianff}}, % however, they should agree with non-normal families. BIC has not been defined for QRR-VGLMs yet. } %\references{ % Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986). % \emph{Akaike Information Criterion Statistics}. % D. Reidel Publishing Company. %} \section{Warning }{ Like \code{\link{AICvlm}}, this code has not been double-checked. The general applicability of \code{BIC} for the VGLM/VGAM classes has not been developed fully. In particular, \code{BIC} should not be run on some \pkg{VGAM} family functions because of violation of certain regularity conditions, etc. Many \pkg{VGAM} family functions such as \code{\link{cumulative}} can have the number of observations absorbed into the prior weights argument (e.g., \code{weights} in \code{\link{vglm}}), either before or after fitting. Almost all \pkg{VGAM} family functions can have the number of observations defined by the \code{weights} argument, e.g., as an observed frequency. \code{BIC} simply uses the number of rows of the model matrix, say, as defining \code{n}, hence the user must be very careful of this possible error. Use at your own risk!! } \seealso{ \code{\link{AICvlm}}, VGLMs are described in \code{\link{vglm-class}}; VGAMs are described in \code{\link{vgam-class}}; RR-VGLMs are described in \code{\link{rrvglm-class}}; \code{\link[stats]{BIC}}, \code{\link[stats]{AIC}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) coef(fit1, matrix = TRUE) BIC(fit1) (fit2 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = FALSE, reverse = TRUE), data = pneumo)) coef(fit2, matrix = TRUE) BIC(fit2) } \keyword{models} \keyword{regression} %# These do not agree in absolute terms: %gdata <- data.frame(x2 = sort(runif(n <- 40))) %gdata <- transform(gdata, y1 = 1 + 2*x2 + rnorm(n, sd = 0.1)) %fit.v <- vglm(y1 ~ x2, gaussianff, data = gdata) %fit.g <- glm(y1 ~ x2, gaussian , data = gdata) %fit.l <- lm(y1 ~ x2, data = gdata) %c(BIC(fit.l), BIC(fit.g), BIC(fit.v)) %c(AIC(fit.l), AIC(fit.g), AIC(fit.v)) %c(AIC(fit.l) - AIC(fit.v), % AIC(fit.g) - AIC(fit.v)) %c(logLik(fit.l), logLik(fit.g), logLik(fit.v)) VGAM/man/backPain.Rd0000644000176200001440000000327013565414527013574 0ustar liggesusers\name{backPain} \alias{backPain} \alias{backPain2} \docType{data} \title{ Data on Back Pain Prognosis, from Anderson (1984) } \description{ Data from a study of patients suffering from back pain. Prognostic variables were recorded at presentation and progress was categorised three weeks after treatment. } \usage{data(backPain)} \format{ A data frame with 101 observations on the following 4 variables. \describe{ \item{x2}{length of previous attack.} \item{x3}{pain change.} \item{x4}{lordosis.} \item{pain}{an ordered factor describing the progress of each patient with levels \code{worse} < \code{same} < \code{slight.improvement} < \code{moderate.improvement} < \code{marked.improvement} < \code{complete.relief}. } } } \source{ \code{http://ideas.repec.org/c/boc/bocode/s419001.html} % \url{http://ideas.repec.org/c/boc/bocode/s419001.html} The data set and this help file was copied from \pkg{gnm} so that a vignette in \pkg{VGAM} could be run; the analysis is described in Yee (2010). The data frame \code{backPain2} is a modification of \code{backPain} where the variables have been renamed (\code{x1} becomes \code{x2}, \code{x2} becomes \code{x3}, \code{x3} becomes \code{x4}) and converted into factors. } \references{ Anderson, J. A. (1984) Regression and Ordered Categorical Variables. \emph{J. R. Statist. Soc. B}, \bold{46(1)}, 1-30. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. } \examples{ summary(backPain) summary(backPain2) } \keyword{datasets} % set.seed(1) % data(backPain) VGAM/man/lambertW.Rd0000644000176200001440000000472513565414527013647 0ustar liggesusers\name{lambertW} \alias{lambertW} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Lambert W Function } \description{ Computes the Lambert \emph{W} function for real values. } \usage{ lambertW(x, tolerance = 1e-10, maxit = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A vector of reals. } \item{tolerance}{ Accuracy desired. } \item{maxit}{ Maximum number of iterations of third-order Halley's method. } } \details{ The Lambert \eqn{W} function is the root of the equation \eqn{W(z) \exp(W(z)) = z}{W(z) * exp(W(z)) = z} for complex \eqn{z}. If \eqn{z} is real and \eqn{-1/e < z < 0} then it has two possible real values, and currently only the upper branch (often called \eqn{W_0}) is computed so that a value that is \eqn{\geq -1}{>= -1} is returned. % It is multi-valued if \eqn{z} is real and % \eqn{0 > z > -1/e}{0 > z > -1/e}. % For real \eqn{-1/e < z < 0}{-1/e < z < 0} it has two % possible real values, and currently only the upper branch % is computed. % Prior to 20180511: % It is multi-valued if \eqn{z} is real and \eqn{z < -1/e}. % For real \eqn{-1/e \leq z < 0}{-1/e <= z < 0} it has two } \value{ This function returns the principal branch of the \eqn{W} function for \emph{real} \eqn{z}. It returns \eqn{W(z) \geq -1}{W(z) >= -1}, and \code{NA} for \eqn{z < -1/e}. } \references{ Corless, R. M. and Gonnet, G. H. and Hare, D. E. G. and Jeffrey, D. J. and Knuth, D. E. (1996) On the Lambert \eqn{W} function. \emph{Advances in Computational Mathematics}, \bold{5}(4), 329--359. } \author{ T. W. Yee } \note{ If convergence does not occur then increase the value of \code{maxit} and/or \code{tolerance}. Yet to do: add an argument \code{lbranch = TRUE} to return the lower branch (often called \eqn{W_{-1}}) for real \eqn{-1/e \leq z < 0}{-1/e <= z < 0}; this would give \eqn{W(z) \leq -1}{W(z) <= -1}. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[base:log]{log}}, \code{\link[base:log]{exp}}, \code{\link{bell}}. There is also a package called \pkg{LambertW}. } \examples{ \dontrun{ curve(lambertW, -exp(-1), 3, xlim = c(-1, 3), ylim = c(-2, 1), las = 1, col = "orange", n = 1001) abline(v = -exp(-1), h = -1, lwd = 2, lty = "dotted", col = "gray") abline(h = 0, v = 0, lty = "dashed", col = "blue") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} VGAM/man/gaitnbinom.mlmUC.Rd0000644000176200001440000001300313565414527015222 0ustar liggesusers\name{Gaitnbinom.mlm} \alias{Gaitnbinom.mlm} \alias{dgaitnbinom.mlm} \alias{pgaitnbinom.mlm} \alias{qgaitnbinom.mlm} \alias{rgaitnbinom.mlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered, -Inflated and -Truncated Negative Binomial Distribution (GAIT--NB--MLM--MLM) % (multinomial logit model based; GAIT--NB--MLM--MLM) } \description{ Density, distribution function, quantile function and random generation for the generally-altered, -inflated and -truncated negative binomial distribution, based on the multinomial logit model (MLM). This distribution is sometimes abbreviated as GAIT--NB--MLM--MLM. } \usage{ dgaitnbinom.mlm(x, size, prob = NULL, munb = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE, log.arg = FALSE) pgaitnbinom.mlm(q, size, prob = NULL, munb = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) qgaitnbinom.mlm(p, size, prob = NULL, munb = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) rgaitnbinom.mlm(n, size, prob = NULL, munb = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n, log.arg}{ Same meaning as in \code{\link[stats:NegBinomial]{NegBinomial}}. } \item{size, prob, munb}{ Same meaning as in \code{\link[stats:NegBinomial]{NegBinomial}}, i.e., for an ordinary negative binomial distribution. Argument \code{mu} has been renamed to \code{munb} to emphasize that it refers to the negative binomial mean. Only one of \code{prob} and \code{munb} should be specified. Short vectors are recycled. } \item{alter, inflate, truncate}{ Same as in \code{\link{Gaitpois.mlm}}. % Must be sorted and have unique values only. } \item{pstr.i, byrow.arg}{ Same as in \code{\link{Gaitpois.mlm}}. } \item{pobs.a, max.support}{ Same as in \code{\link{Gaitpois.mlm}}. } } \details{ Largely the same as \code{\link{Gaitpois.mlm}} except a negative binomial distribution replaces the Poisson distribution. Special cases of these functions include \code{\link{dposnegbin}}, \code{\link{dzanegbin}}, \code{\link{dzinegbin}}, \code{\link{pposnegbin}}, \code{\link{pzanegbin}}, \code{\link{pzinegbin}}, \code{\link{qposnegbin}}, \code{\link{qzanegbin}}, \code{\link{qzinegbin}}, \code{\link{rposnegbin}}, \code{\link{rzanegbin}}, \code{\link{rzinegbin}} corresponding to the special value 0. However, now arguments \code{alter}, \code{inflate} and \code{truncate} have been supplied in one function. } %\section{Warning }{ % See \code{\link{rgaitnbinom.mlm}}. % The function can run slowly for certain combinations % of \code{pstr.i} and \code{inflate}, e.g., % \code{rgaitnbinom.mlm(1e5, 1, inflate = 0:9, pstr.i = (1:10)/100)}. % Failure to obtain random variates will result in some % \code{NA} values instead. % An infinite loop can occur for certain combinations % of \code{lambda} and \code{inflate}, e.g., % \code{rgaitnbinom.mlm(10, 1, trunc = 0:100)}. % No action is made to avoid this occurring. %} \value{ \code{dgaitnbinom.mlm} gives the density (PMF), \code{pgaitnbinom.mlm} gives the distribution function, \code{qgaitnbinom.mlm} gives the quantile function, and \code{rgaitnbinom.mlm} generates random deviates. The default values of the arguments correspond to ordinary \code{\link[stats:NegBinomial]{dnegbin}}, \code{\link[stats:NegBinomial]{pnegbin}}, \code{\link[stats:NegBinomial]{qnegbin}}, \code{\link[stats:NegBinomial]{rnegbin}} respectively. } %\references{ %None. %} \author{ T. W. Yee. } %\note{ % 20120405; no longer true to a superior method: % For \code{rposnegbin}, the arguments of the function are fed % into \code{\link[stats:NegBinomial]{rnegbin}} until \eqn{n} positive % values are obtained. This may take a long time if \code{lambda} % has values close to 0. % The family function \code{\link{gaitnbinom.mlm}} estimates % the two parameters by maximum likelihood estimation. %} % \code{\link{gaitnegbinomial.mix}}, \seealso{ \code{\link{gatnbinomial.mlm}}, \code{\link{Gaitpois.mlm}}, \code{\link[stats:NegBinomial]{NegBinomial}}. % \code{\link{Posnegbin}}, % \code{\link{gaitnbinomial.mix}}, % \code{\link{Gtnbinom}}, % \code{\link{Ganbinom.mix}}, % \code{\link{Ganbinom.mlm}}, % \code{\link{zanegbinson}}, % \code{\link{zinegbinson}}, } \examples{ avec <- c(5, 10) # Alter these (special) values ivec <- c(15, 20) # Inflate these (special) values tvec <- 0 # Truncate this (special) value pobs.a <- c(0.1, 0.2) pstr.i <- c(0.1, 0.1); size <- 3; munb <- 15; x <- 0:22 y <- rgaitnbinom.mlm(1000, size, munb = munb, alter = avec, inflate = ivec, truncate = tvec, max.support = 20, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE) table(y) (pmf <- dgaitnbinom.mlm(x, size, munb = munb, alter = avec, inflate = ivec, truncate = tvec, max.support = 20, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE)) \dontrun{ plot(x, pmf, type = "h", col = "blue", las = 1, main = "Heaped data that is also upper-truncated") } } \keyword{distribution} VGAM/man/get.smart.Rd0000644000176200001440000000257113565414527013773 0ustar liggesusers\name{get.smart} \alias{get.smart} \title{ Retrieve One Component of ``.smart.prediction'' } \description{ Retrieve one component of the list \code{.smart.prediction} from \code{smartpredenv}. } \usage{ get.smart() } \value{ Returns with one list component of \code{.smart.prediction} from \code{smartpredenv}, in fact, \code{.smart.prediction[[.smart.prediction.counter]]}. The whole procedure mimics a first-in first-out stack (better known as a \emph{queue}). } \section{Side Effects}{ The variable \code{.smart.prediction.counter} in \code{smartpredenv} is incremented beforehand, and then written back to \code{smartpredenv}. } \details{ \code{get.smart} is used in \code{"read"} mode within a smart function: it retrieves parameters saved at the time of fitting, and is used for prediction. \code{get.smart} is only used in smart functions such as \code{\link[VGAM]{sm.poly}}; \code{get.smart.prediction} is only used in modelling functions such as \code{\link[stats]{lm}} and \code{\link[stats]{glm}}. The function \code{\link{get.smart}} gets only a part of \code{.smart.prediction} whereas \code{\link{get.smart.prediction}} gets the entire \code{.smart.prediction}. } \seealso{ \code{\link{get.smart.prediction}}. } \examples{ print(sm.min1) } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/vcovvlm.Rd0000644000176200001440000000677513565414527013575 0ustar liggesusers\name{vcovvlm} %\name{vcov} \alias{vcov} \alias{vcovvlm} \alias{vcovrrvglm} \alias{vcovqrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calculate Variance-Covariance Matrix for a Fitted VLM or RR-VGLM or QRR-VGLM Object } \description{ Returns the variance-covariance matrix of the parameters of a fitted \code{\link[VGAM]{vlm-class}} object or a fitted \code{\link[VGAM]{rrvglm-class}} object. } \usage{ vcov(object, \dots) vcovvlm(object, dispersion = NULL, untransform = FALSE, complete = TRUE) vcovqrrvglm(object, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A fitted model object, having class \code{\link[VGAM]{vlm-class}} or \code{\link[VGAM]{rrvglm-class}} or \code{\link[VGAM]{qrrvglm-class}} or a superclass of such. The former includes a \code{\link{vglm}} object. } \item{dispersion}{ Numerical. A value may be specified, else it is estimated for quasi-GLMs (e.g., method of moments). For almost all other types of VGLMs it is usually unity. The value is multiplied by the raw variance-covariance matrix. } \item{untransform}{ logical. For intercept-only models with trivial constraints; if set \code{TRUE} then the parameter link function is inverted to give the answer for the untransformed/raw parameter. } \item{complete}{An argument that is currently ignored. Added only so that \code{\link[car]{linearHypothesis}} can be called. } \item{\dots}{Same as \code{\link[stats]{vcov}}. } } \details{ This methods function is based on the QR decomposition of the (large) VLM model matrix and working weight matrices. Currently \code{\link{vcovvlm}} operates on the fundamental \code{\link[VGAM]{vlm-class}} objects because pretty well all modelling functions in \pkg{VGAM} inherit from this. Currently \code{\link{vcovrrvglm}} is not entirely reliable because the elements of the \bold{A}--\bold{C} part of the matrix sometimes cannot be computed very accurately, so that the entire matrix is not positive-definite. For \code{"qrrvglm"} objects, \code{\link{vcovqrrvglm}} is currently working with \code{Rank = 1} objects or when \code{I.tolerances = TRUE}. Then the answer is conditional given \bold{C}. The code is based on \code{\link{model.matrixqrrvglm}} so that the \code{dimnames} are the same. } \value{ Same as \code{\link[stats]{vcov}}. } %\references{ %} \author{ Thomas W. Yee } \note{ For some models inflated standard errors can occur, such as parameter estimates near the boundary of the parameter space. Detection for this is available for some models using \code{\link{hdeff.vglm}}, which tests for an Hauck-Donner effect (HDE) for each regression coefficient. If the HDE is present, using \code{\link{lrt.stat.vlm}} should return more accurate p-values. } %\section{Warning }{ %} \seealso{ \code{\link{confintvglm}}, \code{\link{summaryvglm}}, \code{\link[stats]{vcov}}, \code{\link{hdeff.vglm}}, \code{\link{lrt.stat.vlm}}, \code{\link{model.matrixqrrvglm}}. } \examples{ ndata <- data.frame(x2 = runif(nn <- 300)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)), y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0))) fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE) fit2 <- rrvglm(y1 ~ x2, negbinomial(zero = NULL), data = ndata) coef(fit1, matrix = TRUE) vcov(fit1) vcov(fit2) } \keyword{models} \keyword{regression} VGAM/man/hdeffsev.Rd0000644000176200001440000001077313565414527013664 0ustar liggesusers\name{hdeffsev} \alias{hdeffsev} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Hauck-Donner Effects: Severity Measures } \description{ Computes the severity of the Hauck-Donner effect for each regression coefficient of a VGLM regression. } \usage{ hdeffsev(x, y, dy, ddy, allofit = FALSE, tol0 = 0.1, severity.table = c("None", "Faint", "Weak", "Moderate", "Strong", "Extreme", "Undetermined")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, y}{ Numeric vectors; \code{x} are the estimates, and \code{y} are the Wald statistics. } \item{dy, ddy}{ Numeric vectors; the first and second derivatives of the Wald statistics. They can be computed by \code{\link{hdeff}}. } \item{allofit}{ Logical. If \code{TRUE} then other quantities are returned in a list. The default is a vector with elements selected from the argument \code{severity.table}. } \item{severity.table}{ Character vector with 7 values. The last value is used for initialization. Usually users should not assign anything to arguments \code{severity.table} or \code{tol0}. } \item{tol0}{ Numeric. Any estimate whose absolute value is less than \code{tol0} is assigned the first value of the argument \code{severity.table}, i.e., none. This is to handle a singularity at the origin: the estimates might be extremely close to 0. } } \details{ This function is rough-and-ready. It is possible to use the first two derivatives obtained from \code{\link{hdeff}} to categorize the severity of the the Hauck-Donner effect (HDE). It is effectively assumed that, starting at the origin and going right, the curve is made up of a convex segment followed by a concave segment and then the convex segment. Midway in the concave segment the derivative is 0, and beyond that the HDE is really manifest because the derivative is negative. For \code{"none"} the estimate lies on the convex part of the curve near the origin, hence there is no HDE at all. For \code{"faint"} and \code{"weak"} the estimate lies on the concave part of the curve but the Wald statistic is still increasing as estimate gets away from 0, hence it is only a mild HDE. For \code{"moderate"}, \code{"strong"} and \code{"extreme"} the Wald statistic is decreasing as the estimate gets away from 0, hence it really does exhibit the HDE. It is recommended that \code{\link{lrt.stat}} be used to compute LRT p-values, as they do not suffer from the HDE. } \value{ By default this function returns a labelled vector with elements selected from \code{severity.table}. If \code{allofit = TRUE} then Yee (2018) gives details about the other list components: a quantity called \code{zeta} is the normal line projected onto the x-axis, and its first derivative gives additional information about the position of the estimate along the curve. } %\references{ %Yee, T. W. (2018) %On the Hauck-Donner effect in Wald tests: %Detection, and parameter space characterization %(\emph{under review}). %} \author{ Thomas W. Yee. } %\section{Warning }{ %} \note{ This function is likely to change in the short future because it is experimental and far from complete. Improvements are intended. See \code{\link{hdeff}}; Yee (2018) gives details on VGLM HDE detection, severity measures, two tipping points (1/4 and 3/5), parameter space partitioning into several regions, and a bound for the HDE for 1-parameter binary regression, etc. } \seealso{ \code{\link{seglines}}, \code{\link{hdeff}}. } \examples{ deg <- 4 # myfun is a function that approximates the HDE myfun <- function(x, deriv = 0) switch(as.character(deriv), '0' = x^deg * exp(-x), '1' = (deg * x^(deg-1) - x^deg) * exp(-x), '2' = (deg*(deg-1)*x^(deg-2) - 2*deg*x^(deg-1) + x^deg)*exp(-x)) xgrid <- seq(0, 10, length = 101) ansm <- hdeffsev(xgrid, myfun(xgrid), myfun(xgrid, deriv = 1), myfun(xgrid, deriv = 2), allofit = TRUE) digg <- 4 cbind(severity = ansm$sev, fun = round(myfun(xgrid), digg), deriv1 = round(myfun(xgrid, deriv = 1), digg), deriv2 = round(myfun(xgrid, deriv = 2), digg), zderiv1 = round(1 + (myfun(xgrid, deriv = 1))^2 + myfun(xgrid, deriv = 2) * myfun(xgrid), digg)) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} VGAM/man/binom2.or.Rd0000644000176200001440000002144013565414527013670 0ustar liggesusers\name{binom2.or} \alias{binom2.or} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Binary Regression with an Odds Ratio (Family Function) } \description{ Fits a Palmgren (bivariate odds-ratio model, or bivariate logistic regression) model to two binary responses. Actually, a bivariate logistic/probit/cloglog/cauchit model can be fitted. The odds ratio is used as a measure of dependency. } \usage{ binom2.or(lmu = "logitlink", lmu1 = lmu, lmu2 = lmu, loratio = "loglink", imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = "oratio", exchangeable = FALSE, tol = 0.001, more.robust = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu}{ Link function applied to the two marginal probabilities. See \code{\link{Links}} for more choices. See the note below. } \item{lmu1, lmu2}{ Link function applied to the first and second of the two marginal probabilities. } \item{loratio}{ Link function applied to the odds ratio. See \code{\link{Links}} for more choices. } \item{imu1, imu2, ioratio}{ Optional initial values for the marginal probabilities and odds ratio. See \code{\link{CommonVGAMffArguments}} for more details. In general good initial values are often required so use these arguments if convergence failure occurs. } \item{zero}{ Which linear/additive predictor is modelled as an intercept only? The default is for the odds ratio. A \code{NULL} means none. See \code{\link{CommonVGAMffArguments}} for more details. } \item{exchangeable}{ Logical. If \code{TRUE}, the two marginal probabilities are constrained to be equal. } \item{tol}{ Tolerance for testing independence. Should be some small positive numerical value. } \item{more.robust}{ Logical. If \code{TRUE} then some measures are taken to compute the derivatives and working weights more robustly, i.e., in an attempt to avoid numerical problems. Currently this feature is not debugged if set \code{TRUE}. } } \details{ Also known informally as the \emph{Palmgren model}, the bivariate logistic model is a full-likelihood based model defined as two logistic regressions plus \code{log(oratio) = eta3} where \code{eta3} is the third linear/additive predictor relating the odds ratio to explanatory variables. Explicitly, the default model is \deqn{logit[P(Y_j=1)] = \eta_j,\ \ \ j=1,2}{% logit[P(Y_j=1)] = eta_j,\ \ \ j=1,2} for the marginals, and \deqn{\log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = \eta_3,}{% log[P(Y_{00}=1) P(Y_{11}=1) / (P(Y_{01}=1) P(Y_{10}=1))] = eta_3,} specifies the dependency between the two responses. Here, the responses equal 1 for a success and a 0 for a failure, and the odds ratio is often written \eqn{\psi=p_{00}p_{11}/(p_{10}p_{01})}{psi=p00 p11 / (p10 p01)}. The model is fitted by maximum likelihood estimation since the full likelihood is specified. The two binary responses are independent if and only if the odds ratio is unity, or equivalently, the log odds ratio is 0. Fisher scoring is implemented. The default models \eqn{\eta_3}{eta3} as a single parameter only, i.e., an intercept-only model, but this can be circumvented by setting \code{zero = NULL} in order to model the odds ratio as a function of all the explanatory variables. The function \code{binom2.or()} can handle other probability link functions such as \code{\link{probitlink}}, \code{\link{clogloglink}} and \code{\link{cauchitlink}} links as well, so is quite general. In fact, the two marginal probabilities can each have a different link function. A similar model is the \emph{bivariate probit model} (\code{\link{binom2.rho}}), which is based on a standard bivariate normal distribution, but the bivariate probit model is less interpretable and flexible. The \code{exchangeable} argument should be used when the error structure is exchangeable, e.g., with eyes or ears data. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. When fitted, the \code{fitted.values} slot of the object contains the four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively. These estimated probabilities should be extracted with the \code{fitted} generic function. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. le Cessie, S. and van Houwelingen, J. C. (1994) Logistic regression for correlated binary data. \emph{Applied Statistics}, \bold{43}, 95--108. Palmgren, J. (1989) \emph{Regression Models for Bivariate Binary Responses}. Technical Report no. 101, Department of Biostatistics, University of Washington, Seattle. Yee, T. W. and Dirnbock, T. (2009) Models for analysing species' presence/absence data at two time points. Journal of Theoretical Biology, \bold{259}(4), 684--694. % Documentation accompanying the \pkg{VGAM} package at % \url{https://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ Thomas W. Yee } \note{ At present we call \code{\link{binom2.or}} families a \emph{bivariate odds-ratio model}. The response should be either a 4-column matrix of counts (whose columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1) respectively), or a two-column matrix where each column has two distinct values, or a factor with four levels. The function \code{\link{rbinom2.or}} may be used to generate such data. Successful convergence requires at least one case of each of the four possible outcomes. By default, a constant odds ratio is fitted because \code{zero = 3}. Set \code{zero = NULL} if you want the odds ratio to be modelled as a function of the explanatory variables; however, numerical problems are more likely to occur. The argument \code{lmu}, which is actually redundant, is used for convenience and for upward compatibility: specifying \code{lmu} only means the link function will be applied to \code{lmu1} and \code{lmu2}. Users who want a different link function for each of the two marginal probabilities should use the \code{lmu1} and \code{lmu2} arguments, and the argument \code{lmu} is then ignored. It doesn't make sense to specify \code{exchangeable = TRUE} and have different link functions for the two marginal probabilities. Regarding Yee and Dirnbock (2009), the \code{xij} (see \code{\link{vglm.control}}) argument enables environmental variables with different values at the two time points to be entered into an exchangeable \code{\link{binom2.or}} model. See the author's webpage for sample code. } \seealso{ \code{\link{rbinom2.or}}, \code{\link{binom2.rho}}, \code{\link{loglinb2}}, \code{\link{zipebcom}}, \code{\link{coalminers}}, \code{\link{binomialff}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}. } \examples{ # Fit the model in Table 6.7 in McCullagh and Nelder (1989) coalminers <- transform(coalminers, Age = (age - 42) / 5) fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ Age, binom2.or(zero = NULL), data = coalminers) fitted(fit) summary(fit) coef(fit, matrix = TRUE) c(weights(fit, type = "prior")) * fitted(fit) # Table 6.8 \dontrun{ with(coalminers, matplot(Age, fitted(fit), type = "l", las = 1, xlab = "(age - 42) / 5", lwd = 2)) with(coalminers, matpoints(Age, depvar(fit), col=1:4)) legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2, legend = c("1 = (Breathlessness=0, Wheeze=0)", "2 = (Breathlessness=0, Wheeze=1)", "3 = (Breathlessness=1, Wheeze=0)", "4 = (Breathlessness=1, Wheeze=1)")) } # Another model: pet ownership \dontrun{ data(xs.nz, package = "VGAMdata") # More homogeneous: petdata <- subset(xs.nz, ethnicity == "European" & age < 70 & sex == "M") petdata <- na.omit(petdata[, c("cat", "dog", "age")]) summary(petdata) with(petdata, table(cat, dog)) # Can compute the odds ratio fit <- vgam(cbind((1-cat) * (1-dog), (1-cat) * dog, cat * (1-dog), cat * dog) ~ s(age, df = 5), binom2.or(zero = 3), data = petdata, trace = TRUE) colSums(depvar(fit)) coef(fit, matrix = TRUE) } \dontrun{ # Plot the estimated probabilities ooo <- order(with(petdata, age)) matplot(with(petdata, age)[ooo], fitted(fit)[ooo, ], type = "l", xlab = "Age", ylab = "Probability", main = "Pet ownership", ylim = c(0, max(fitted(fit))), las = 1, lwd = 1.5) legend("topleft", col=1:4, lty = 1:4, leg = c("no cat or dog ", "dog only", "cat only", "cat and dog"), lwd = 1.5) } } \keyword{models} \keyword{regression} VGAM/man/maxwell.Rd0000644000176200001440000000457313565414527013544 0ustar liggesusers\name{maxwell} \alias{maxwell} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Maxwell Regression Family Function } \description{ Estimating the parameter of the Maxwell distribution by maximum likelihood estimation. } \usage{ maxwell(link = "loglink", zero = NULL, parallel = FALSE, type.fitted = c("mean", "percentiles", "Qlink"), percentiles = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Parameter link function applied to \eqn{a}, which is called the parameter \code{rate}. See \code{\link{Links}} for more choices and information; a log link is the default because the parameter is positive. More information is at \code{\link{CommonVGAMffArguments}}. } \item{zero, parallel}{ See \code{\link{CommonVGAMffArguments}}. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. Using \code{"Qlink"} is for quantile-links in \pkg{VGAMextra}. } } \details{ The Maxwell distribution, which is used in the area of thermodynamics, has a probability density function that can be written \deqn{f(y;a) = \sqrt{2/\pi} a^{3/2} y^2 \exp(-0.5 a y^2)}{% f(y;a) = sqrt(2/pi) * a^(3/2) * y^2 * exp(-0.5*a*y^2)} for \eqn{y>0} and \eqn{a>0}. The mean of \eqn{Y} is \eqn{\sqrt{8 / (a \pi)}}{sqrt(8 / (a * pi))} (returned as the fitted values), and its variance is \eqn{(3\pi - 8)/(\pi a)}{(3*pi - 8)/(pi*a)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ von Seggern, D. H. (1993) \emph{CRC Standard Curves and Surfaces}, Boca Raton, FL, USA: CRC Press. } \author{ T. W. Yee } \note{ Fisher-scoring and Newton-Raphson are the same here. A related distribution is the Rayleigh distribution. This \pkg{VGAM} family function handles multiple responses. This \pkg{VGAM} family function can be mimicked by \code{poisson.points(ostatistic = 1.5, dimension = 2)}. } \seealso{ \code{\link{Maxwell}}, \code{\link{rayleigh}}, \code{\link{poisson.points}}. } \examples{ mdata <- data.frame(y = rmaxwell(1000, rate = exp(2))) fit <- vglm(y ~ 1, maxwell, data = mdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} VGAM/man/zetaff.Rd0000644000176200001440000000600113565414527013336 0ustar liggesusers\name{zetaff} \alias{zetaff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zeta Distribution Family Function } \description{ Estimates the parameter of the zeta distribution. } \usage{ zetaff(lshape = "loglink", ishape = NULL, gshape = exp(-3:4)/4, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, ishape, zero}{ These arguments apply to the (positive) parameter \eqn{p}. See \code{\link{Links}} for more choices. Choosing \code{\link{loglog}} constrains \eqn{p>1}, but may fail if the maximum likelihood estimate is less than one. See \code{\link{CommonVGAMffArguments}} for more information. } \item{gshape}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ In this long tailed distribution the response must be a positive integer. The probability function for a response \eqn{Y} is \deqn{P(Y=y) = 1/[y^{p+1} \zeta(p+1)],\ \ \ p>0,\ \ \ y=1,2,...}{% P(Y=y) = 1/(y^(p+1) zeta(p+1)), p>0, y=1,2,...} where \eqn{\zeta}{zeta} is Riemann's zeta function. The parameter \eqn{p} is positive, therefore a log link is the default. The mean of \eqn{Y} is \eqn{\mu = \zeta(p) / \zeta(p+1)}{mu = zeta(p)/zeta(p+1)} (provided \eqn{p>1}) and these are the fitted values. The variance of \eqn{Y} is \eqn{\zeta(p-1) / \zeta(p+1) - \mu^2}{zeta(p-1) / zeta(p+1) - mu^2} provided \eqn{p>2}. It appears that good initial values are needed for successful convergence. If convergence is not obtained, try several values ranging from values near 0 to values about 10 or more. Multiple responses are handled. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %Lindsey, J. K. (1995) %\emph{Modelling Frequency and Count Data}. %Oxford: Clarendon Press. \references{ pp.527-- of Chapter 11 of Johnson N. L., Kemp, A. W. and Kotz S. (2005) \emph{Univariate Discrete Distributions}, 3rd edition, Hoboken, New Jersey: Wiley. Knight, K. (2000) \emph{Mathematical Statistics}. Boca Raton, FL, USA: Chapman & Hall/CRC Press. } \author{ T. W. Yee } \note{ The \code{\link{zeta}} function may be used to compute values of the zeta function. } \seealso{ \code{\link{zeta}}, \code{\link{oazeta}}, \code{\link{oizeta}}, \code{\link{otzeta}}, \code{\link{diffzeta}}, \code{\link{dzeta}}, \code{\link{hzeta}}, \code{\link{zipf}}. } \examples{ zdata <- data.frame(y = 1:5, w = c(63, 14, 5, 1, 2)) # Knight, p.304 fit <- vglm(y ~ 1, zetaff, data = zdata, trace = TRUE, weight = w, crit = "c") (phat <- Coef(fit)) # 1.682557 with(zdata, cbind(round(dzeta(y, phat) * sum(w), 1), w)) with(zdata, weighted.mean(y, w)) fitted(fit, matrix = FALSE) predict(fit) # The following should be zero at the MLE: with(zdata, mean(log(rep(y, w))) + zeta(1+phat, deriv = 1) / zeta(1+phat)) } \keyword{models} \keyword{regression} % Also known as the Joos model or discrete Pareto distribution. VGAM/man/CommonVGAMffArguments.Rd0000644000176200001440000005772213565414527016204 0ustar liggesusers\name{CommonVGAMffArguments} \alias{CommonVGAMffArguments} \alias{TypicalVGAMfamilyFunction} \title{Common VGAM Family Function Arguments } \description{ Here is a description of some common and typical arguments found in many \pkg{VGAM} family functions, e.g., \code{lsigma}, \code{isigma}, \code{gsigma}, \code{nsimEI}, \code{parallel} and \code{zero}. } \usage{ TypicalVGAMfamilyFunction(lsigma = "loglink", isigma = NULL, link.list = list("(Default)" = "identitylink", x2 = "loglink", x3 = "logofflink", x4 = "multilogitlink", x5 = "multilogitlink"), earg.list = list("(Default)" = list(), x2 = list(), x3 = list(offset = -1), x4 = list(), x5 = list()), gsigma = exp(-5:5), parallel = TRUE, ishrinkage = 0.95, nointercept = NULL, imethod = 1, type.fitted = c("mean", "quantiles", "Qlink", "pobs0", "pstr0", "onempstr0"), percentiles = c(25, 50, 75), probs.x = c(0.15, 0.85), probs.y = c(0.25, 0.50, 0.75), multiple.responses = FALSE, earg.link = FALSE, whitespace = FALSE, bred = FALSE, lss = TRUE, oim = FALSE, nsimEIM = 100, byrow.arg = FALSE, zero = NULL) } \arguments{ % apply.parint = FALSE, \item{lsigma}{ Character. Link function applied to a parameter and not necessarily a mean. See \code{\link{Links}} for a selection of choices. If there is only one parameter then this argument is often called \code{link}. } % \item{esigma}{ % List. % Extra argument allowing for additional information, specific to the % link function. % See \code{\link{Links}} for more information. % If there is only one parameter then this argument is often called % \code{earg}. % } \item{link.list, earg.list}{ Some \pkg{VGAM} family functions (such as \code{\link{normal.vcm}}) implement models with potentially lots of parameter link functions. These two arguments allow many such links and extra arguments to be inputted more easily. One has something like \code{link.list = list("(Default)" = "identitylink", x2 = "loglink", x3 = "logofflink")} and \code{earg.list = list("(Default)" = list(), x2 = list(), x3 = "list(offset = -1)")}. Then any unnamed terms will have the default link with its corresponding extra argument. Note: the \code{\link{multilogitlink}} link is also possible, and if so, at least two instances of it are necessary. Then the last term is the baseline/reference group. } \item{isigma}{ Optional initial values can often be inputted using an argument beginning with \code{"i"}. For example, \code{"isigma"} and \code{"ilocation"}, or just \code{"init"} if there is one parameter. A value of \code{NULL} means a value is computed internally, i.e., a \emph{self-starting} \pkg{VGAM} family function. If a failure to converge occurs make use of these types of arguments. } \item{gsigma}{ Grid-search initial values can be inputted using an argument beginning with \code{"g"}, e.g., \code{"gsigma"}, \code{"gshape"} and \code{"gscale"}. If argument \code{isigma} is inputted then that has precedence over \code{gsigma}, etc. % The actual search values will be \code{unique(sort(c(gshape)))}, etc. If the grid search is 2-dimensional then it is advisable not to make the vectors too long as a nested \code{for} loop may be used. Ditto for 3-dimensions etc. Sometimes a \code{".mux"} is added as a suffix, e.g., \code{gshape.mux}; this means that the grid is created relatively and not absolutely, e.g., its values are multipled by some single initial estimate of the parameter in order to create the grid on an absolute scale. Some family functions have an argument called \code{gprobs.y}. This is fed into the \code{probs} argument of \code{\link[stats:quantile]{quantile}} in order to obtain some values of central tendency of the response, i.e., some spread of values in the middle. when \code{imethod = 1} to obtain an initial value for the mean Some family functions have an argument called \code{iprobs.y}, and if so, then these values can overwrite \code{gprobs.y}. % Then the actual search values will be % \code{unique(sort(c(gshape, 1/gshape)))}, etc. } \item{parallel}{ A logical, or a simple formula specifying which terms have equal/unequal coefficients. The formula must be simple, i.e., additive with simple main effects terms. Interactions and nesting etc. are not handled. To handle complex formulas use the \code{constraints} argument (of \code{\link{vglm}} etc.); however, there is a lot more setting up involved and things will not be as convenient. Here are some examples. 1. \code{parallel = TRUE ~ x2 + x5} means the parallelism assumption is only applied to \eqn{X_2}, \eqn{X_5} and the intercept. 2. \code{parallel = TRUE ~ -1} and \code{parallel = TRUE ~ 0} mean the parallelism assumption is applied to \emph{no} variables at all. Similarly, \code{parallel = FALSE ~ -1} and \code{parallel = FALSE ~ 0} mean the parallelism assumption is applied to \emph{all} the variables including the intercept. 3. \code{parallel = FALSE ~ x2 - 1} and \code{parallel = FALSE ~ x2 + 0} applies the parallelism constraint to all terms (including the intercept) except for \eqn{X_2}. This argument is common in \pkg{VGAM} family functions for categorical responses, e.g., \code{\link{cumulative}}, \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}. For the proportional odds model (\code{\link{cumulative}}) having parallel constraints applied to each explanatory variable (except for the intercepts) means the fitted probabilities do not become negative or greater than 1. However this parallelism or proportional-odds assumption ought to be checked. } % \item{apply.parint}{ % \emph{This variable will be depreciated shortly}. % Logical. % It refers to whether the parallelism constraint is % applied to the intercept too. % By default, in some models it does, in other models it does not. % Used only if \code{parallel = TRUE} (fully or partially with % respect to all the explanatory variables). % } \item{nsimEIM}{ Some \pkg{VGAM} family functions use simulation to obtain an approximate expected information matrix (EIM). For those that do, the \code{nsimEIM} argument specifies the number of random variates used per observation; the mean of \code{nsimEIM} random variates is taken. Thus \code{nsimEIM} controls the accuracy and a larger value may be necessary if the EIMs are not positive-definite. For intercept-only models (\code{y ~ 1)} the value of \code{nsimEIM} can be smaller (since the common value used is also then taken as the mean over the observations), especially if the number of observations is large. Some \pkg{VGAM} family functions provide two algorithms for estimating the EIM. If applicable, set \code{nsimEIM = NULL} to choose the other algorithm. } \item{imethod}{ An integer with value \code{1} or \code{2} or \code{3} or ... which specifies the initialization method for some parameters or a specific parameter. If failure to converge occurs try the next higher value, and continue until success. For example, \code{imethod = 1} might be the method of moments, and \code{imethod = 2} might be another method. If no value of \code{imethod} works then it will be necessary to use arguments such as \code{isigma}. For many \pkg{VGAM} family functions it is advisable to try this argument with all possible values to safeguard against problems such as converging to a local solution. \pkg{VGAM} family functions with this argument usually correspond to a model or distribution that is relatively hard to fit successfully, therefore care is needed to ensure the global solution is obtained. So using all possible values that this argument supplies is a good idea. } \item{type.fitted}{ Character. Type of fitted value returned by the \code{fitted()} methods function. The first choice is always the default. The available choices depends on what kind of family function it is. Using the first few letters of the chosen choice is okay. See \code{\link{fittedvlm}} for more details. The choice \code{"Qlink"} refers to quantile-links, which was introduced in December 2018 in \pkg{VGAMextra} 0.0-2 for several 1-parameter distributions. Here, either the \code{\link{loglink}} or \code{\link{logitlink}} or \code{\link{identitylink}} of the quantile is the link function (and the choice is dependent on the support of the distribution), and link functions end in \code{"Qlink"}. A limited amount of support is provided for such links, e.g., \code{fitted(fit)} are the fitted quantiles, which is the same as \code{predict(fit, type = "response")}. However, \code{fitted(fit, percentiles = 77)} will not work. } \item{percentiles}{ Numeric vector, with values between 0 and 100 (although it is not recommended that exactly 0 or 100 be inputted). Used only if \code{type.fitted = "quantiles"} or \code{type.fitted = "percentiles"}, then this argument specifies the values of these quantiles. The argument name tries to reinforce that the values lie between 0 and 100. See \code{\link{fittedvlm}} for more details. } \item{probs.x, probs.y}{ Numeric, with values in (0, 1). The probabilites that define quantiles with respect to some vector, usually an \code{x} or \code{y} of some sort. This is used to create two subsets of data corresponding to `low' and `high' values of x or y. Each value is separately fed into the \code{probs} argument of \code{\link[stats:quantile]{quantile}}. If the data set size is small then it may be necessary to increase/decrease slightly the first/second values respectively. } \item{lss}{ Logical. This stands for the ordering: location, scale and shape. Should the ordering of the parameters be in this order? Almost all \pkg{VGAM} family functions have this order by default, but in order to match the arguments of existing R functions, one might need to set \code{lss = FALSE}. For example, the arguments of \code{\link{weibullR}} are scale and shape, whereas \code{\link[stats]{rweibull}} are shape and scale. As a temporary measure (from \pkg{VGAM} 0.9-7 onwards but prior to version 1.0-0), some family functions such as \code{\link{sinmad}} have an \code{lss} argument without a default. For these, setting \code{lss = FALSE} will work. Later, \code{lss = TRUE} will be the default. Be careful for the \code{dpqr}-type functions, e.g., \code{\link{rsinmad}}. } \item{whitespace}{ Logical. Should white spaces (\code{" "}) be used in the labelling of the linear/additive predictors? Setting \code{TRUE} usually results in more readability but it occupies more columns of the output. } \item{oim}{ Logical. Should the observed information matrices (OIMs) be used for the working weights? In general, setting \code{oim = TRUE} means the Newton-Raphson algorithm, and \code{oim = FALSE} means Fisher-scoring. The latter uses the EIM, and is usually recommended. If \code{oim = TRUE} then \code{nsimEIM} is ignored. } \item{zero}{ Either an integer vector, or a vector of character strings. If an integer, then it specifies which linear/additive predictor is modelled as \emph{intercept-only}. That is, the regression coefficients are set to zero for all covariates except for the intercept. If \code{zero} is specified then it may be a vector with values from the set \eqn{\{1,2,\ldots,M\}}. The value \code{zero = NULL} means model \emph{all} linear/additive predictors as functions of the explanatory variables. Here, \eqn{M} is the number of linear/additive predictors. Technically, if \code{zero} contains the value \eqn{j} then the \eqn{j}th row of every constraint matrix (except for the intercept) consists of all 0 values. Some \pkg{VGAM} family functions allow the \code{zero} argument to accept negative values; if so then its absolute value is recycled over each (usual) response. For example, \code{zero = -2} for the two-parameter negative binomial distribution would mean, for each response, the second linear/additive predictor is modelled as intercepts-only. That is, for all the \eqn{k} parameters in \code{\link{negbinomial}} (this \pkg{VGAM} family function can handle a matrix of responses). Suppose \code{zero = zerovec} where \code{zerovec} is a vector of negative values. If \eqn{G} is the usual \eqn{M} value for a univariate response then the actual values for argument \code{zero} are all values in \code{c(abs(zerovec), G + abs(zerovec), 2*G + abs(zerovec), ... )} lying in the integer range \eqn{1} to \eqn{M}. For example, setting \code{zero = -c(2, 3)} for a matrix response of 4 columns with \code{\link{zinegbinomial}} (which usually has \eqn{G = M = 3} for a univariate response) would be equivalent to \code{zero = c(2, 3, 5, 6, 8, 9, 11, 12)}. This example has \eqn{M = 12}. Note that if \code{zerovec} contains negative values then their absolute values should be elements from the set \code{1:G}. Note: \code{zero} may have positive and negative values, for example, setting \code{zero = c(-2, 3)} in the above example would be equivalent to \code{zero = c(2, 3, 5, 8, 11)}. The argument \code{zero} also accepts a character vector (for \pkg{VGAM} 1.0-1 onwards). Each value is fed into \code{\link[base]{grep}} with \code{fixed = TRUE}, meaning that wildcards \code{"*"} are not useful. See the example below---all the variants work; those with \code{LOCAT} issue a warning that that value is unmatched. Importantly, the parameter names are \code{c("location1", "scale1", "location2", "scale2")} because there are 2 responses. Yee (2015) described \code{zero} for only numerical input. Allowing character input is particularly important when the number of parameters cannot be determined without having the actual data first. For example, with time series data, an ARMA(\eqn{p},\eqn{q}) process might have parameters \eqn{\theta_1,\ldots,\theta_p} which should be intercept-only by default. Then specifying a numerical default value for \code{zero} would be too difficult (there are the drift and scale parameters too). However, it is possible with the character representation: \code{zero = "theta"} would achieve this. In the future, most \pkg{VGAM} family functions might be converted to the character representation---the advantage being that it is more readable. When programming a \pkg{VGAM} family function that allows character input, the variable \code{predictors.names} must be assigned correctly. %Note that \code{zero} accepts wildcards (cf. the Linux operating system): %\code{"location*"} means that \emph{all} location parameters %are intercept-only. % When programming a \pkg{VGAM} family function that allows character % input, the variables \code{parameters.names} % and \code{Q1} } \item{ishrinkage}{ Shrinkage factor \eqn{s} used for obtaining initial values. Numeric, between 0 and 1. In general, the formula used is something like \eqn{s \mu + (1-s) y}{s*mu + (1-s)*y} where \eqn{\mu}{mu} is a measure of central tendency such as a weighted mean or median, and \eqn{y} is the response vector. For example, the initial values are slight perturbations of the mean towards the actual data. For many types of models this method seems to work well and is often reasonably robust to outliers in the response. Often this argument is only used if the argument \code{imethod} is assigned a certain value. } \item{nointercept}{ An integer-valued vector specifying which linear/additive predictors have no intercepts. Any values must be from the set \{1,2,\ldots,\eqn{M}\}. A value of \code{NULL} means no such constraints. } \item{multiple.responses}{ Logical. Some \pkg{VGAM} family functions allow a multivariate or vector response. If so, then usually the response is a matrix with columns corresponding to the individual response variables. They are all fitted simultaneously. Arguments such as \code{parallel} may then be useful to allow for relationships between the regressions of each response variable. If \code{multiple.responses = TRUE} then sometimes the response is interpreted differently, e.g., \code{\link{posbinomial}} chooses the first column of a matrix response as success and combines the other columns as failure, but when \code{multiple.responses = TRUE} then each column of the response matrix is the number of successes and the \code{weights} argument is of the same dimension as the response and contains the number of trials. } \item{earg.link}{ This argument should be generally ignored. % Sometimes the link argument can receive \code{earg}-type input, % such as \code{\link{quasibinomial}} calling \code{\link{binomial}}. } \item{byrow.arg}{ Logical. Some \pkg{VGAM} family functions that handle multiple responses have arguments that allow input to be fed in which affect all the responses, e.g., \code{imu} for initalizing a \code{mu} parameter. In such cases it is sometime more convenient to input one value per response by setting \code{byrow.arg = TRUE}; then values are recycled in order to form a matrix of the appropriate dimension. This argument matches \code{byrow} in \code{\link[base]{matrix}}; in fact it is fed into such using \code{matrix(..., byrow = byrow.arg)}. This argument has no effect when there is one response. } \item{bred}{ Logical. Some \pkg{VGAM} family functions will allow bias-reduction based on the work by Kosmidis and Firth. Sometimes half-stepping is a good idea; set \code{stepsize = 0.5} and monitor convergence by setting \code{trace = TRUE}. } } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \section{Warning }{ The \code{zero} argument is supplied for convenience but conflicts can arise with other arguments, e.g., the \code{constraints} argument of \code{\link{vglm}} and \code{\link{vgam}}. See Example 5 below for an example. If not sure, use, e.g., \code{constraints(fit)} and \code{coef(fit, matrix = TRUE)} to check the result of a fit \code{fit}. The arguments \code{zero} and \code{nointercept} can be inputted with values that fail. For example, \code{multinomial(zero = 2, nointercept = 1:3)} means the second linear/additive predictor is identically zero, which will cause a failure. Be careful about the use of other potentially contradictory constraints, e.g., \code{multinomial(zero = 2, parallel = TRUE ~ x3)}. If in doubt, apply \code{constraints()} to the fitted object to check. \pkg{VGAM} family functions with the \code{nsimEIM} may have inaccurate working weight matrices. If so, then the standard errors of the regression coefficients may be inaccurate. Thus output from \code{summary(fit)}, \code{vcov(fit)}, etc. may be misleading. Changes relating to the code{lss} argument have very important consequences and users must beware. Good programming style is to rely on the argument names and not on the order. } \details{ Full details will be given in documentation yet to be written, at a later date! } \references{ Yee, T. W. (2015) Vector Generalized Linear and Additive Models: With an Implementation in R. New York, USA: \emph{Springer}. Kosmidis, I. and Firth, D. (2009) Bias reduction in exponential family nonlinear models. \emph{Biometrika}, \bold{96}(4), 793--804. %Kosmidis, I. and Firth, D. (2010) %A generic algorithm for reducing bias in parametric estimation. %\emph{Electronic Journal of Statistics}, %\bold{4}, 1097--1112. Miranda-Soberanis, V. F. and Yee, T. W. (2018) New link functions for distribution--specific quantile regression based on vector generalized linear and additive models. Manuscript in preparation. } \seealso{ \code{\link{Links}}, \code{\link{vglmff-class}}, \code{\link{UtilitiesVGAM}}, \code{\link{normal.vcm}}, \code{\link{multilogitlink}}, \pkg{VGAMextra}. } \author{T. W. Yee} \note{ See \code{\link{Links}} regarding a major change in link functions, for version 0.9-0 and higher (released during the 2nd half of 2012). } \examples{ # Example 1 cumulative() cumulative(link = "probitlink", reverse = TRUE, parallel = TRUE) # Example 2 wdata <- data.frame(x2 = runif(nn <- 1000)) wdata <- transform(wdata, y = rweibull(nn, shape = 2 + exp(1 + x2), scale = exp(-0.5))) fit <- vglm(y ~ x2, weibullR(lshape = logofflink(offset = -2), zero = 2), data = wdata) coef(fit, mat = TRUE) # Example 3; multivariate (multiple) response \dontrun{ ndata <- data.frame(x = runif(nn <- 500)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)), # k is size y2 = rnbinom(nn, mu = exp(2-x), size = exp(0))) fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), data = ndata) coef(fit, matrix = TRUE) } # Example 4 \dontrun{ # fit1 and fit2 are equivalent fit1 <- vglm(ymatrix ~ x2 + x3 + x4 + x5, cumulative(parallel = FALSE ~ 1 + x3 + x5), data = cdata) fit2 <- vglm(ymatrix ~ x2 + x3 + x4 + x5, cumulative(parallel = TRUE ~ x2 + x4), data = cdata) } # Example 5 udata <- data.frame(x2 = rnorm(nn <- 200)) udata <- transform(udata, y1 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1 + 0.2*x2)), y2 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1))) args(uninormal) fit1 <- vglm(y1 ~ x2, uninormal, data = udata) # This is okay fit2 <- vglm(y2 ~ x2, uninormal(zero = 2), data = udata) # This is okay # This creates potential conflict clist <- list("(Intercept)" = diag(2), "x2" = diag(2)) fit3 <- vglm(y2 ~ x2, uninormal(zero = 2), data = udata, constraints = clist) # Conflict! coef(fit3, matrix = TRUE) # Shows that clist[["x2"]] was overwritten, constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg # Example 6 ('whitespace' argument) pneumo <- transform(pneumo, let = log(exposure.time)) fit1 <- vglm(cbind(normal, mild, severe) ~ let, sratio(whitespace = FALSE, parallel = TRUE), data = pneumo) fit2 <- vglm(cbind(normal, mild, severe) ~ let, sratio(whitespace = TRUE, parallel = TRUE), data = pneumo) head(predict(fit1), 2) # No white spaces head(predict(fit2), 2) # Uses white spaces # Example 7 ('zero' argument with character input) set.seed(123); n <- 1000 ldata <- data.frame(x2 = runif(n)) ldata <- transform(ldata, y1 = rlogis(n, loc = 5*x2, scale = exp(2))) ldata <- transform(ldata, y2 = rlogis(n, loc = 5*x2, scale = exp(1*x2))) ldata <- transform(ldata, w1 = runif(n)) ldata <- transform(ldata, w2 = runif(n)) fit7 <- vglm(cbind(y1, y2) ~ x2, # logistic(zero = "location1"), # location1 is intercept-only # logistic(zero = "location2"), # logistic(zero = "location*"), # Not okay... all is unmatched # logistic(zero = "scale1"), # logistic(zero = "scale2"), # logistic(zero = "scale"), # Both scale parameters are matched logistic(zero = c("location", "scale2")), # All but scale1 # logistic(zero = c("LOCAT", "scale2")), # Only scale2 is matched # logistic(zero = c("LOCAT")), # Nothing is matched # trace = TRUE, # weights = cbind(w1, w2), weights = w1, data = ldata) coef(fit7, matrix = TRUE) } \keyword{models} VGAM/man/lomaxUC.Rd0000644000176200001440000000535513565414527013442 0ustar liggesusers\name{Lomax} \alias{Lomax} \alias{dlomax} \alias{plomax} \alias{qlomax} \alias{rlomax} \title{The Lomax Distribution} \description{ Density, distribution function, quantile function and random generation for the Lomax distribution with scale parameter \code{scale} and shape parameter \code{q}. } \usage{ dlomax(x, scale = 1, shape3.q, log = FALSE) plomax(q, scale = 1, shape3.q, lower.tail = TRUE, log.p = FALSE) qlomax(p, scale = 1, shape3.q, lower.tail = TRUE, log.p = FALSE) rlomax(n, scale = 1, shape3.q) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{scale}{scale parameter.} \item{shape3.q}{shape parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dlomax} gives the density, \code{plomax} gives the distribution function, \code{qlomax} gives the quantile function, and \code{rlomax} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{lomax}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The Lomax distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{lomax}}, \code{\link{genbetaII}}. } \examples{ probs <- seq(0.1, 0.9, by = 0.1) max(abs(plomax(qlomax(p = probs, shape3.q = 1), shape3.q = 1) - probs)) # Should be 0 \dontrun{ par(mfrow = c(1, 2)) x <- seq(-0.01, 5, len = 401) plot(x, dexp(x), type = "l", col = "black", ylab = "", ylim = c(0, 3), main = "Black is standard exponential, others are dlomax(x, shape3.q)") lines(x, dlomax(x, shape3.q = 1), col = "orange") lines(x, dlomax(x, shape3.q = 2), col = "blue") lines(x, dlomax(x, shape3.q = 5), col = "green") legend("topright", col = c("orange","blue","green"), lty = rep(1, 3), legend = paste("shape3.q =", c(1, 2, 5))) plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1, main = "Black is standard exponential, others are plomax(x, shape3.q)") lines(x, plomax(x, shape3.q = 1), col = "orange") lines(x, plomax(x, shape3.q = 2), col = "blue") lines(x, plomax(x, shape3.q = 5), col = "green") legend("bottomright", col = c("orange","blue","green"), lty = rep(1, 3), legend = paste("shape3.q =", c(1, 2, 5))) } } \keyword{distribution} VGAM/man/zibinomial.Rd0000644000176200001440000001553613565414527014231 0ustar liggesusers\name{zibinomial} \alias{zibinomial} \alias{zibinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Binomial Distribution Family Function } \description{ Fits a zero-inflated binomial distribution by maximum likelihood estimation. } \usage{ zibinomial(lpstr0 = "logitlink", lprob = "logitlink", type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"), ipstr0 = NULL, zero = NULL, multiple.responses = FALSE, imethod = 1) zibinomialff(lprob = "logitlink", lonempstr0 = "logitlink", type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"), ionempstr0 = NULL, zero = "onempstr0", multiple.responses = FALSE, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr0, lprob}{ Link functions for the parameter \eqn{\phi}{phi} and the usual binomial probability \eqn{\mu}{prob} parameter. See \code{\link{Links}} for more choices. For the zero-\emph{deflated} model see below. } % \item{epstr0, eprob}{ % epstr0 = list(), eprob = list(), % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}}. } \item{ipstr0}{ Optional initial values for \eqn{\phi}{phi}, whose values must lie between 0 and 1. The default is to compute an initial value internally. If a vector then recyling is used. } \item{lonempstr0, ionempstr0}{ Corresponding arguments for the other parameterization. See details below. } % \item{zero}{ % An integer specifying which linear/additive predictor is modelled % as intercepts only. If given, the value must be either 1 or 2, % and the default is the first. Setting \code{zero = NULL} enables both % \eqn{\phi}{phi} and \eqn{\mu}{prob} to be modelled as a function of % the explanatory variables. % See \code{\link{CommonVGAMffArguments}} for more information. % } \item{multiple.responses}{ Logical. Currently it must be \code{FALSE} to mean the function does not handle multiple responses. This is to remain compatible with the same argument in \code{\link{binomialff}}. } \item{zero, imethod}{ See \code{\link{CommonVGAMffArguments}} for information. Argument \code{zero} changed its default value for version 0.9-2. } } \details{ These functions are based on \deqn{P(Y=0) = \phi + (1-\phi) (1-\mu)^N,}{% P(Y=0) = phi + (1- phi) * (1-prob)^N,} for \eqn{y=0}, and \deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)}.}{% P(Y=y) = (1-phi) * choose(N,Ny) * prob^(N*y) * (1-prob)^(N*(1-y)).} for \eqn{y=1/N,2/N,\ldots,1}. That is, the response is a sample proportion out of \eqn{N} trials, and the argument \code{size} in \code{\link{rzibinom}} is \eqn{N} here. The parameter \eqn{\phi}{phi} is the probability of a structural zero, and it satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) \mu}{E(Y) = (1-phi) * prob} and these are returned as the fitted values by default. By default, the two linear/additive predictors for \code{zibinomial()} are \eqn{(logit(\phi), logit(\mu))^T}{(logit(phi), logit(prob))^T}. The \pkg{VGAM} family function \code{zibinomialff()} has a few changes compared to \code{zibinomial()}. These are: (i) the order of the linear/additive predictors is switched so the binomial probability comes first; (ii) argument \code{onempstr0} is now 1 minus the probability of a structural zero, i.e., the probability of the parent (binomial) component, i.e., \code{onempstr0} is \code{1-pstr0}; (iii) argument \code{zero} has a new default so that the \code{onempstr0} is intercept-only by default. Now \code{zibinomialff()} is generally recommended over \code{zibinomial()}. Both functions implement Fisher scoring. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Welsh, A. H., Lindenmayer, D. B. and Donnelly, C. F. (2013) Fitting and interpreting occupancy models. \emph{PLOS One}, \bold{8}, 1--21. } \author{ T. W. Yee } \note{ The response variable must have one of the formats described by \code{\link{binomialff}}, e.g., a factor or two column matrix or a vector of sample proportions with the \code{weights} argument specifying the values of \eqn{N}. To work well, one needs large values of \eqn{N} and \eqn{\mu>0}{prob>0}, i.e., the larger \eqn{N} and \eqn{\mu}{prob} are, the better. If \eqn{N = 1} then the model is unidentifiable since the number of parameters is excessive. Setting \code{stepsize = 0.5}, say, may aid convergence. % 20130316; commenting out this: % For intercept-models and constant \eqn{N} over the \eqn{n} observations, % the \code{misc} slot has a component called \code{pobs0} which is the % estimate of the probability of an observed 0, i.e., \eqn{P(Y=0)}. % This family function currently cannot handle a multivariate % response (only \code{multiple.responses = FALSE} can be handled). % 20130316; adding this: Estimated probabilities of a structural zero and an observed zero are returned, as in \code{\link{zipoisson}}. The zero-\emph{deflated} binomial distribution might be fitted by setting \code{lpstr0 = identitylink}, albeit, not entirely reliably. See \code{\link{zipoisson}} for information that can be applied here. Else try the zero-altered binomial distribution (see \code{\link{zabinomial}}). } \section{Warning }{ Numerical problems can occur. Half-stepping is not uncommon. If failure to converge occurs, make use of the argument \code{ipstr0} or \code{ionempstr0}, or \code{imethod}. } \seealso{ \code{\link{rzibinom}}, \code{\link{binomialff}}, \code{\link{posbinomial}}, \code{\link{gtbinomial}}, \code{\link[stats:Binomial]{Binomial}}. } \examples{ size <- 10 # Number of trials; N in the notation above nn <- 200 zdata <- data.frame(pstr0 = logitlink( 0, inverse = TRUE), # 0.50 mubin = logitlink(-1, inverse = TRUE), # Mean of usual binomial sv = rep(size, length = nn)) zdata <- transform(zdata, y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0)) with(zdata, table(y)) fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE) fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE, stepsize = 0.5) coef(fit, matrix = TRUE) Coef(fit) # Useful for intercept-only models head(fitted(fit, type = "pobs0")) # Estimate of P(Y = 0) head(fitted(fit)) with(zdata, mean(y)) # Compare this with fitted(fit) summary(fit) } \keyword{models} \keyword{regression} % fit@misc$pobs0 # Estimate of P(Y = 0) VGAM/man/grain.us.Rd0000644000176200001440000000202413565414527013606 0ustar liggesusers\name{grain.us} \alias{grain.us} \docType{data} \title{Grain Prices Data in USA } \description{ A 4-column matrix. } \usage{data(grain.us)} \format{ The columns are: \describe{ \item{wheat.flour}{numeric} \item{corn}{numeric} \item{wheat}{numeric} \item{rye}{numeric} } } \details{ Monthly averages of grain prices in the United States for wheat flour, corn, wheat, and rye for the period January 1961 through October 1972. The units are US dollars per 100 pound sack for wheat flour, and per bushel for corn, wheat and rye. } \source{ Ahn and Reinsel (1988). } \references{ Ahn, S. K and Reinsel, G. C. (1988) Nested reduced-rank autoregressive models for multiple time series. \emph{Journal of the American Statistical Association}, \bold{83}, 849--856. } \examples{ \dontrun{ cgrain <- scale(grain.us, scale = FALSE) # Center the time series only fit <- vglm(cgrain ~ 1, rrar(Rank = c(4, 1)), epsilon = 1e-3, stepsize = 0.5, trace = TRUE, maxit = 50) summary(fit) } } \keyword{datasets} VGAM/man/step4vglm.Rd0000644000176200001440000001647413565414527014023 0ustar liggesusers% File src/library/stats/man/step.Rd % Part of the R package, https://www.R-project.org % Copyright 1995-2014 R Core Team % Distributed under GPL 2 or later \name{step4} \alias{step4} \alias{step4vglm} \title{ Choose a model by AIC in a Stepwise Algorithm } \description{ Select a formula-based model by AIC. } \usage{ step4(object, \dots) step4vglm(object, scope, direction = c("both", "backward", "forward"), trace = 1, keep = NULL, steps = 1000, k = 2, \dots) } %# constraints = NULL, \arguments{ \item{object}{ an object of class \code{"vglm"}. This is used as the initial model in the stepwise search. % It is strongly recommended that this be the full model % because a backward direction is taken first. } \item{scope}{ See \code{\link[stats]{step}}. % defines the range of models examined in the stepwise search. % This should be either a single formula, or a list containing % components \code{upper} and \code{lower}, both formulae. See the % details for how to specify the formulae and how they are used. } \item{direction}{ See \code{\link[stats]{step}}. % the mode of stepwise search, can be one of \code{"both"}, % \code{"backward"}, or \code{"forward"}, % with a default being the first value. % If the \code{scope} argument is missing the default for % \code{direction} is also \code{"backward"}. % Values can be abbreviated. } % \item{trace}{ % if positive, information is printed during the running % of \code{step}. % Larger values may give more detailed information. % } \item{trace, keep}{ See \code{\link[stats]{step}}. % a filter function whose input is a fitted model object and the % associated \code{AIC} statistic, and whose output is arbitrary. % Typically \code{keep} will select a subset of the components of % the object and return them. The default is not to keep anything. } \item{steps, k}{ See \code{\link[stats]{step}}. % the maximum number of steps to be considered. The default is 1000 % (essentially as many as required). It is typically used to stop the % process early. } % \item{k}{ % See \code{\link[stats]{step}}. % the multiple of the number of degrees of freedom used for the % penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} % is sometimes referred to as BIC or SBC. % } \item{\dots}{ any additional arguments to \code{\link{extractAIC.vglm}}, \code{\link{drop1.vglm}} and \code{\link{add1.vglm}}. } } \value{ The results are placed in the \code{post} slot of the stepwise-selected model that is returned. There are up to two additional components. There is an \code{"anova"} component corresponding to the steps taken in the search, as well as a \code{"keep"} component if the \code{keep=} argument was supplied in the call. % the stepwise-selected model is returned, with up to two additional % components. There is an \code{"anova"} component corresponding to the % steps taken in the search, as well as a \code{"keep"} component if the % \code{keep=} argument was supplied in the call. The % \code{"Resid. Dev"} column of the analysis of deviance table refers to % a constant minus twice the maximized log likelihood: it will be a % deviance only in cases where a saturated model is well-defined (thus % excluding \code{lm}, \code{aov} and \code{survreg} fits, for example). } \details{ This function is a direct adaptation of \code{\link[stats]{step}} for \code{\link{vglm-class}} objects. Since \code{\link[stats]{step}} is not generic, the name \code{step4()} was adopted and it \emph{is} generic, as well as being S4 rather than S3. It is the intent that this function should work as similar as possible to \code{\link[stats]{step}}. Internally, the methods function for \code{\link{vglm-class}} objects calls \code{\link{add1.vglm}} and \code{\link{drop1.vglm}} repeatedly. % ; it will work for any method for which they work, and that % is determined by having a valid method for \code{\link{extractAIC}}. % When the additive constant can be chosen so that AIC is equal to % Mallows' \eqn{C_p}{Cp}, this is done and the tables are labelled % appropriately. % The set of models searched is determined by the \code{scope} argument. % The right-hand-side of its \code{lower} component is always included % in the model, and right-hand-side of the model is included in the % \code{upper} component. If \code{scope} is a single formula, it % specifies the \code{upper} component, and the \code{lower} model is % empty. If \code{scope} is missing, the initial model is used as the % \code{upper} model. % Models specified by \code{scope} can be templates to update % \code{object} as used by \code{\link{update.formula}}. So using % \code{.} in a \code{scope} formula means \sQuote{what is % already there}, with \code{.^2} indicating all interactions of % existing terms. % There is a potential problem in using \code{\link{glm}} fits with a % variable \code{scale}, as in that case the deviance is not simply % related to the maximized log-likelihood. The \code{"glm"} method for % function \code{\link{extractAIC}} makes the % appropriate adjustment for a \code{gaussian} family, but may need to be % amended for other cases. (The \code{binomial} and \code{poisson} % families have fixed \code{scale} by default and do not correspond % to a particular maximum-likelihood problem for variable \code{scale}.) } %\note{ % This is a minimal implementation. Use \code{\link[MASS]{stepAIC}} % in package \CRANpkg{MASS} for a wider range of object classes. %} \section{Warning}{ In general, the same warnings in \code{\link[stats]{drop1.glm}} and \code{\link{drop1.vglm}} apply here. % The model fitting must apply the models to the same dataset. This % may be a problem if there are missing values and \R's default of % \code{na.action = na.omit} is used. We suggest you remove the % missing values first. % Calls to the function \code{\link{nobs}} are used to check that the % number of observations involved in the fitting process remains % unchanged. } \seealso{ \code{\link{add1.vglm}}, \code{\link{drop1.vglm}}, \code{\link{vglm}}, \code{\link[stats]{add1.glm}}, \code{\link[stats]{drop1.glm}}, \code{\link{backPain2}}, \code{\link[stats]{step}}, \code{\link[stats]{update}}. % \code{\link[MASS]{stepAIC}} in \CRANpkg{MASS}, } %\references{ % Hastie, T. J. and Pregibon, D. (1992) % \emph{Generalized linear models.} % Chapter 6 of \emph{Statistical Models in S} % eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. % Venables, W. N. and Ripley, B. D. (2002) % \emph{Modern Applied Statistics with S.} % New York: Springer (4th ed). %} %\author{ % B. D. Ripley: \code{step} is a slightly simplified version of % \code{\link[MASS]{stepAIC}} in package \CRANpkg{MASS} % (Venables & Ripley, 2002 and earlier editions). % The idea of a \code{step} function follows that described in % Hastie & Pregibon (1992); but the implementation % in \R is more general. %} \examples{ data("backPain2", package = "VGAM") summary(backPain2) fit1 <- vglm(pain ~ x2 + x3 + x4 + x2:x3 + x2:x4 + x3:x4, propodds, data = backPain2) spom1 <- step4(fit1) summary(spom1) spom1@post$anova } \keyword{models} %\donttest{} %\dontshow{utils::example("lm", echo = FALSE)} VGAM/man/zipoisUC.Rd0000644000176200001440000000716613565414527013641 0ustar liggesusers\name{Zipois} \alias{Zipois} \alias{dzipois} \alias{pzipois} \alias{qzipois} \alias{rzipois} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-inflated and zero-deflated Poisson distribution with parameter \code{pstr0}. } \usage{ dzipois(x, lambda, pstr0 = 0, log = FALSE) pzipois(q, lambda, pstr0 = 0) qzipois(p, lambda, pstr0 = 0) rzipois(n, lambda, pstr0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles. } \item{p}{vector of probabilities. } \item{n}{number of observations. Must be a single positive integer. } \item{lambda}{ Vector of positive means. } \item{pstr0}{ Probability of a structural zero (i.e., ignoring the Poisson distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having an ordinary Poisson distribution. This argument may be negative to allow for 0-deflation, hence its interpretation as a probability ceases. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The probability function of \eqn{Y} is 0 with probability \eqn{\phi}{phi}, and \eqn{Poisson(\lambda)}{Poisson(lambda)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{% P(Y=0) = phi + (1-phi) * P(W=0)} where \eqn{W} is distributed \eqn{Poisson(\lambda)}{Poisson(lambda)}. } \value{ \code{dzipois} gives the density, \code{pzipois} gives the distribution function, \code{qzipois} gives the quantile function, and \code{rzipois} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. These functions actually allow for the \emph{zero-deflated Poisson} distribution. Here, \code{pstr0} is also permitted to lie in the interval \code{[-1/expm1(lambda), 0]}. The resulting probability of a zero count is \emph{less than} the nominal Poisson value, and the use of \code{pstr0} to stand for the probability of a structural zero loses its meaning. When \code{pstr0} equals \code{-1/expm1(lambda)} this corresponds to the positive-Poisson distribution (e.g., see \code{\link{dpospois}}). } \seealso{ \code{\link{zipoisson}}, \code{\link{Gaitpois.mlm}}, \code{\link[stats:Poisson]{dpois}}, \code{\link{rzinegbin}}. } \examples{ lambda <- 3; pstr0 <- 0.2; x <- (-1):7 (ii <- dzipois(x, lambda, pstr0 = pstr0)) max(abs(cumsum(ii) - pzipois(x, lambda, pstr0 = pstr0))) # Should be 0 table(rzipois(100, lambda, pstr0 = pstr0)) table(qzipois(runif(100), lambda, pstr0)) round(dzipois(0:10, lambda, pstr0 = pstr0) * 100) # Should be similar \dontrun{ x <- 0:10 par(mfrow = c(2, 1)) # Zero-inflated Poisson barplot(rbind(dzipois(x, lambda, pstr0 = pstr0), dpois(x, lambda)), beside = TRUE, col = c("blue", "orange"), main = paste("ZIP(", lambda, ", pstr0 = ", pstr0, ") (blue) vs", " Poisson(", lambda, ") (orange)", sep = ""), names.arg = as.character(x)) deflat.limit <- -1 / expm1(lambda) # Zero-deflated Poisson newpstr0 <- round(deflat.limit / 1.5, 3) barplot(rbind(dzipois(x, lambda, pstr0 = newpstr0), dpois(x, lambda)), beside = TRUE, col = c("blue","orange"), main = paste("ZDP(", lambda, ", pstr0 = ", newpstr0, ") (blue) vs", " Poisson(", lambda, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} VGAM/man/enzyme.Rd0000644000176200001440000000170313565414527013372 0ustar liggesusers\name{enzyme} \alias{enzyme} \docType{data} \title{ Enzyme Data} \description{ Enzyme velocity and substrate concentration. } \usage{data(enzyme)} \format{ A data frame with 12 observations on the following 2 variables. \describe{ \item{conc}{a numeric explanatory vector; substrate concentration} \item{velocity}{a numeric response vector; enzyme velocity} } } \details{ Sorry, more details need to be included later. } \source{ Sorry, more details need to be included later. } \references{ Watts, D. G. (1981) An introduction to nonlinear least squares. In: L. Endrenyi (Ed.), \emph{Kinetic Data Analysis: Design and Analysis of Enzyme and Pharmacokinetic Experiments}, pp.1--24. New York: Plenum Press. } \seealso{ \code{\link[VGAM]{micmen}}. } \examples{ \dontrun{ fit <- vglm(velocity ~ 1, micmen, data = enzyme, trace = TRUE, form2 = ~ conc - 1, crit = "crit") summary(fit) } } \keyword{datasets} VGAM/man/fisk.Rd0000644000176200001440000000667613565414527013035 0ustar liggesusers\name{fisk} \alias{fisk} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fisk Distribution family function } \description{ Maximum likelihood estimation of the 2-parameter Fisk distribution. } \usage{ fisk(lscale = "loglink", lshape1.a = "loglink", iscale = NULL, ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information. } \item{lshape1.a, lscale}{ Parameter link functions applied to the (positive) parameters \eqn{a} and \code{scale}. See \code{\link{Links}} for more choices. } \item{iscale, ishape1.a, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{iscale} is needed to obtain a good estimate for the other parameter. } \item{gscale, gshape1.a}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 2-parameter Fisk (aka log-logistic) distribution is the 4-parameter generalized beta II distribution with shape parameter \eqn{q=p=1}. It is also the 3-parameter Singh-Maddala distribution with shape parameter \eqn{q=1}, as well as the Dagum distribution with \eqn{p=1}. More details can be found in Kleiber and Kotz (2003). The Fisk distribution has density \deqn{f(y) = a y^{a-1} / [b^a \{1 + (y/b)^a\}^2]}{% f(y) = a y^(a-1) / [b^a (1 + (y/b)^a)^2]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and \eqn{a} is a shape parameter. The cumulative distribution function is \deqn{F(y) = 1 - [1 + (y/b)^a]^{-1} = [1 + (y/b)^{-a}]^{-1}.}{% F(y) = 1 - [1 + (y/b)^a]^(-1) = [1 + (y/b)^(-a)]^(-1).} The mean is \deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(1 - 1/a)}{% E(Y) = b gamma(1 + 1/a) gamma(1 - 1/a)} provided \eqn{a > 1}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. % The following paper simplifies the EIM: %Reath, J. and Dong, J. and Wang, M. (2018) %Improved parameter estimation of the log-logistic distribution %with applications. %\emph{Computational Statistics}, \bold{33}: 339--356. %\ref{reat:dong:wang:2018} } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{Fisk}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ fdata <- data.frame(y = rfisk(n = 200, shape = exp(1), scale = exp(2))) fit <- vglm(y ~ 1, fisk(lss = FALSE), data = fdata, trace = TRUE) fit <- vglm(y ~ 1, fisk(ishape1.a = exp(2)), data = fdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/zoabetaR.Rd0000644000176200001440000000534313565414527013636 0ustar liggesusers\name{zoabetaR} \alias{zoabetaR} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero- and One-Inflated Beta Distribution Family Function } \description{ Estimation of the shape parameters of the two-parameter beta distribution plus the probabilities of a 0 and/or a 1. } \usage{ zoabetaR(lshape1 = "loglink", lshape2 = "loglink", lpobs0 = "logitlink", lpobs1 = "logitlink", ishape1 = NULL, ishape2 = NULL, trim = 0.05, type.fitted = c("mean", "pobs0", "pobs1", "beta.mean"), parallel.shape = FALSE, parallel.pobs = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2, lpobs0, lpobs1}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more choices. } \item{ishape1, ishape2}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{trim, zero}{ Same as \code{\link{betaR}}. } \item{parallel.shape, parallel.pobs}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{type.fitted}{ The choice \code{"beta.mean"} mean to return the mean of the beta distribution; the 0s and 1s are ignored. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The standard 2-parameter beta distribution has a support on (0,1), however, many datasets have 0 and/or 1 values too. This family function handles 0s and 1s (at least one of them must be present) in the data set by modelling the probability of a 0 by a logistic regression (default link is the logit), and similarly for the probability of a 1. The remaining proportion, \code{1-pobs0-pobs1}, of the data comes from a standard beta distribution. This family function therefore extends \code{\link{betaR}}. One has \eqn{M=3} or \eqn{M=4} per response. Multiple responses are allowed. } \value{ Similar to \code{\link{betaR}}. } %\references{ %} \author{ Thomas W. Yee and Xiangjie Xue. } %\note{ %} \seealso{ \code{\link{Zoabeta}}, \code{\link{betaR}}, \code{\link{betaff}}, \code{\link[stats:Beta]{Beta}}, \code{\link{zipoisson}}. } \examples{ nn <- 1000; set.seed(1) bdata <- data.frame(x2 = runif(nn)) bdata <- transform(bdata, pobs0 = logitlink(-2 + x2, inverse = TRUE), pobs1 = logitlink(-2 + x2, inverse = TRUE)) bdata <- transform(bdata, y1 = rzoabeta(nn, shape1 = exp(1 + x2), shape2 = exp(2 - x2), pobs0 = pobs0, pobs1 = pobs1)) summary(bdata) fit1 <- vglm(y1 ~ x2, zoabetaR(parallel.pobs = TRUE), data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) } \keyword{regression} % y1 = rbeta(nn, shape1 = exp(1 + x2), shape2 = exp(2 - x2)) %rrr <- runif(nn) %bdata$y1[rrr < bdata$p0] <- 0 %bdata$y1[rrr > 1 - bdata$p1] <- 1 VGAM/man/poissonff.Rd0000644000176200001440000001426313565414527014076 0ustar liggesusers\name{poissonff} %\alias{poisson} \alias{poissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Poisson Family Function } \description{ Family function for a generalized linear model fitted to Poisson responses. % The dispersion parameters may be known or unknown. % link = "loglink", dispersion = 1, onedpar = FALSE, imu = NULL, } \usage{ poissonff(link = "loglink", imu = NULL, imethod = 1, parallel = FALSE, zero = NULL, bred = FALSE, earg.link = FALSE, type.fitted = c("mean", "quantiles"), percentiles = c(25, 50, 75)) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the mean or means. See \code{\link{Links}} for more choices and information. } % \item{dispersion}{ % Dispersion parameter. By default, maximum % likelihood is used to estimate the model because it is known. % However, the user can specify % \code{dispersion = 0} to have it estimated, or % else specify a known positive value (or values if the response % is a matrix---one value per column). % } % \item{onedpar}{ % One dispersion parameter? If the response is a matrix, % then a separate % dispersion parameter will be computed for each response (column), % by default. % Setting \code{onedpar=TRUE} will pool them so that there is only % one dispersion parameter to be estimated. % } \item{parallel}{ A logical or formula. Used only if the response is a matrix. } \item{imu, imethod}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the matrix response. See \code{\link{CommonVGAMffArguments}} for more information. } \item{bred, earg.link}{ Details at \code{\link{CommonVGAMffArguments}}. Setting \code{bred = TRUE} should work for multiple responses and all \pkg{VGAM} link functions; it has been tested for \code{\link{loglink}}, \code{\link{identity}} but further testing is required. } \item{type.fitted, percentiles}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ \eqn{M} defined above is the number of linear/additive predictors. With overdispersed data try \code{\link{negbinomial}}. % If the dispersion parameter is unknown, then the resulting estimate % is not fully a maximum likelihood estimate. % A dispersion parameter that is less/greater than unity corresponds to % under-/over-dispersion relative to the Poisson model. Over-dispersion % is more common in practice. % When fitting a Quadratic RR-VGLM (see \code{\link{cqo}}), the % response is a matrix of \eqn{M}, say, columns (e.g., one column % per species). Then there will be \eqn{M} dispersion parameters % (one per column of the response matrix) if \code{dispersion = 0} and % \code{onedpar = FALSE}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{cqo}}, and \code{\link{cao}}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ This function will handle a matrix response automatically. % The call \code{poissonff(dispersion=0, ...)} is equivalent to % \code{quasipoissonff(...)}. The latter was written so that R users % of \code{quasipoisson()} would only need to add a ``\code{ff}'' % to the end of the family function name. Regardless of whether the dispersion parameter is to be estimated or not, its value can be seen from the output from the \code{summary()} of the object. % With the introduction of name spaces for the \pkg{VGAM} package, % \code{"ff"} can be dropped for this family function. } \section{Warning }{ With multiple responses, assigning a known dispersion parameter for \emph{each} response is not handled well yet. Currently, only a single known dispersion parameter is handled well. } \seealso{ \code{\link{Links}}, \code{\link{hdeff.vglm}}, \code{\link{negbinomial}}, \code{\link{genpoisson}}, \code{\link{gatpoisson.mlm}}, \code{\link{zipoisson}}, \code{\link{pospoisson}}, \code{\link{oipospoisson}}, \code{\link{otpospoisson}}, \code{\link{skellam}}, \code{\link{mix2poisson}}, \code{\link{cens.poisson}}, \code{\link{ordpoisson}}, \code{\link{amlpoisson}}, \code{\link{inv.binomial}}, \code{\link{simulate.vlm}}, \code{\link{loglink}}, \code{\link{polf}}, \code{\link{rrvglm}}, \code{\link{cqo}}, \code{\link{cao}}, \code{\link{binomialff}}, \code{\link[stats]{poisson}}, \code{\link[stats]{Poisson}}, \code{\link{poisson.points}}, \code{\link{ruge}}, \code{\link{V1}}, \code{\link{residualsvglm}}. % \code{\link{quasipoissonff}}, % \code{\link{quasibinomialff}}, } \examples{ poissonff() set.seed(123) pdata <- data.frame(x2 = rnorm(nn <- 100)) pdata <- transform(pdata, y1 = rpois(nn, exp(1 + x2)), y2 = rpois(nn, exp(1 + x2))) (fit1 <- vglm(cbind(y1, y2) ~ x2, poissonff, data = pdata)) (fit2 <- vglm(y1 ~ x2, poissonff(bred = TRUE), data = pdata)) coef(fit1, matrix = TRUE) coef(fit2, matrix = TRUE) nn <- 200 cdata <- data.frame(x2 = rnorm(nn), x3 = rnorm(nn), x4 = rnorm(nn)) cdata <- transform(cdata, lv1 = 0 + x3 - 2*x4) cdata <- transform(cdata, lambda1 = exp(3 - 0.5 * (lv1-0)^2), lambda2 = exp(2 - 0.5 * (lv1-1)^2), lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2)) cdata <- transform(cdata, y1 = rpois(nn, lambda1), y2 = rpois(nn, lambda2), y3 = rpois(nn, lambda3)) \dontrun{ lvplot(p1, y = TRUE, lcol = 2:4, pch = 2:4, pcol = 2:4, rug = FALSE) } } \keyword{models} \keyword{regression} %# vvv p1 <- cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, data = cdata, %# vvv eq.tol = FALSE, I.tol = FALSE) %# vvv summary(p1) # # Three dispersion parameters are all unity VGAM/man/oizipfUC.Rd0000644000176200001440000000701413565414527013614 0ustar liggesusers\name{Oizipf} \alias{Oizipf} \alias{doizipf} \alias{poizipf} \alias{qoizipf} \alias{roizipf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Zipf Distribution } \description{ Density, distribution function, quantile function and random generation for the one-inflated Zipf distribution with parameter \code{pstr1}. } \usage{ doizipf(x, N, shape, pstr1 = 0, log = FALSE) poizipf(q, N, shape, pstr1 = 0) qoizipf(p, N, shape, pstr1 = 0) roizipf(n, N, shape, pstr1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.} \item{N, shape}{ See \code{\link{Zipf}}. } \item{pstr1}{ Probability of a structural one (i.e., ignoring the Zipf distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having an ordinary Zipf distribution. } \item{log}{Same as \code{\link[stats]{Uniform}}.} } \details{ The probability function of \eqn{Y} is 1 with probability \eqn{\phi}{phi}, and \eqn{Zipf(N, s)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{% P(Y=1) = phi + (1-phi) * P(W=1)} where \eqn{W} is distributed as a \eqn{Zipf(N, s)} random variable. The \pkg{VGAM} family function \code{\link{oizeta}} estimates the two parameters of this model by Fisher scoring. } \value{ \code{doizipf} gives the density, \code{poizipf} gives the distribution function, \code{qoizipf} gives the quantile function, and \code{roizipf} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr1} is recycled to the required length, and usually has values which lie in the interval \eqn{[0,1]}. These functions actually allow for the \emph{zero-deflated Zipf} distribution. Here, \code{pstr1} is also permitted to lie in the interval \code{[-dzipf(1, N, s) / (1 - dzipf(1, N, s)), 0]}. The resulting probability of a unit count is \emph{less than} the nominal zipf value, and the use of \code{pstr1} to stand for the probability of a structural 1 loses its meaning. % % % When \code{pstr1} equals \code{-dzipf(1, N, s) / (1 - dzipf(1, N, s))} this corresponds to the 1-truncated zipf distribution. } \seealso{ \code{\link{oizeta}}. \code{\link{Zipf}}, \code{\link{zipf}}, \code{\link{Oizeta}}. } \examples{ N <- 10; shape <- 1.5; pstr1 <- 0.3; x <- (-1):N (ii <- doizipf(x, N, shape, pstr1 = pstr1)) \dontrun{ x <- 0:10 par(mfrow = c(2, 1)) # One-Inflated zipf barplot(rbind(doizipf(x, N, shape, pstr1 = pstr1), dzipf(x, N, shape)), beside = TRUE, col = c("blue", "orange"), main = paste("OIZipf(", N, ", ", shape, ", pstr1 = ", pstr1, ") (blue) vs", " Zipf(", N, ", ", shape, ") (orange)", sep = ""), names.arg = as.character(x)) deflat.limit <- -dzipf(1, N, shape) / (1 - dzipf(1, N, shape)) newpstr1 <- round(deflat.limit, 3) + 0.001 # Inside but near the boundary barplot(rbind(doizipf(x, N, shape, pstr1 = newpstr1), dzipf(x, N, shape)), beside = TRUE, col = c("blue", "orange"), main = paste("ODZipf(", N, ", ", shape, ", pstr1 = ", newpstr1, ") (blue) vs", " Zipf(", N, ", ", shape, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} %qoizipf(p, shape, pstr1 = 0) %roizipf(n, shape, pstr1 = 0) % table(roizipf(100, shape, pstr1 = pstr1)) % round(doizipf(1:10, shape, pstr1 = pstr1) * 100) # Should be similar VGAM/man/explink.Rd0000644000176200001440000000443113565414527013536 0ustar liggesusers\name{explink} \alias{explink} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exponential Link Function } \description{ Computes the exponential transformation, including its inverse and the first two derivatives. } \usage{ explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } % \item{earg}{ % Optional list. % See \code{\link{Links}} for general information about \code{earg}. % } \item{bvalue}{ See \code{\link{clogloglink}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The exponential link function is potentially suitable for parameters that are positive. Numerical values of \code{theta} close to negative or positive infinity may result in \code{0}, \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{explink} with \code{deriv = 0}, the exponential of \code{theta}, i.e., \code{exp(theta)} when \code{inverse = FALSE}. And if \code{inverse = TRUE} then \code{log(theta)}; if \code{theta} is not positive then it will return \code{NaN}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } %\references{ % McCullagh, P. and Nelder, J. A. (1989) % \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. % %} \author{ Thomas W. Yee } \note{ This function has particular use for computing quasi-variances when used with \code{\link{rcim}} and \code{\link{uninormal}}. Numerical instability may occur when \code{theta} is close to negative or positive infinity. One way of overcoming this (one day) is to use \code{bvalue}. } \seealso{ \code{\link{Links}}, \code{\link{loglink}}, \code{\link{rcim}}, \code{\link{Qvar}}, \code{\link{uninormal}}. } \examples{ theta <- rnorm(30) explink(theta) max(abs(explink(explink(theta), inverse = TRUE) - theta)) # Should be 0 } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/multilogitlink.Rd0000644000176200001440000000660413565414527015137 0ustar liggesusers\name{multilogitlink} \alias{multilogitlink} % \alias{multilogit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Multi-logit Link Function } \description{ Computes the multilogit transformation, including its inverse and the first two derivatives. } \usage{ multilogitlink(theta, refLevel = "(Last)", M = NULL, whitespace = FALSE, bvalue = NULL, inverse = FALSE, deriv = 0, all.derivs = FALSE, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{refLevel, M, whitespace}{ See \code{\link{multinomial}}. } \item{bvalue}{ See \code{\link{Links}}. } \item{all.derivs}{ Logical. This is currently experimental only. % If \code{TRUE} then more partial derivatives are returned; % these is needed by, e.g., % \code{\link{hdeff.vglm}} for \code{\link{multinomial}} fits. % This argument might work for only some combinations of % the arguments, e.g., it should work at least for % \code{inverse = TRUE} and \code{deriv = 1}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The \code{multilogitlink()} link function is a generalization of the \code{\link{logitlink}} link to \eqn{M} levels/classes. It forms the basis of the \code{\link{multinomial}} logit model. It is sometimes called the \emph{multi-logit} link or the \emph{multinomial logit} link. When its inverse function is computed it returns values which are positive and add to unity. } \value{ For \code{multilogitlink} with \code{deriv = 0}, the multilogit of \code{theta}, i.e., \code{log(theta[, j]/theta[, M+1])} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(theta[, j])/(1+rowSums(exp(theta)))}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or 0 (for \code{multilogitlink}). One way of overcoming this is to use, e.g., \code{bvalue}. Currently \code{care.exp()} is used to avoid \code{NA}s being returned if the probability is too close to 1. } \seealso{ \code{\link{Links}}, \code{\link{multinomial}}, \code{\link{logitlink}}, \code{\link{normal.vcm}}, \code{\link{CommonVGAMffArguments}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, multinomial, trace = TRUE, data = pneumo) # For illustration only! fitted(fit) predict(fit) multilogitlink(fitted(fit)) multilogitlink(fitted(fit)) - predict(fit) # Should be all 0s multilogitlink(predict(fit), inverse = TRUE) # rowSums() add to unity multilogitlink(predict(fit), inverse = TRUE, refLevel = 1) # For illustration only multilogitlink(predict(fit), inverse = TRUE) - fitted(fit) # Should be all 0s multilogitlink(fitted(fit), deriv = 1) multilogitlink(fitted(fit), deriv = 2) } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/perks.Rd0000644000176200001440000000733013565414527013211 0ustar liggesusers\name{perks} \alias{perks} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Perks Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter Perks distribution. } \usage{ perks(lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL, gscale = exp(-5:5), gshape = exp(-5:5), nsimEIM = 500, oim.mean = FALSE, zero = NULL, nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{lscale, lshape}{ Parameter link functions applied to the shape parameter \code{shape}, scale parameter \code{scale}. All parameters are treated as positive here See \code{\link{Links}} for more choices. } % \item{eshape, escale}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{iscale, ishape}{ Optional initial values. A \code{NULL} means a value is computed internally. } \item{gscale, gshape}{ See \code{\link{CommonVGAMffArguments}}. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. } \item{oim.mean}{ To be currently ignored. } } \details{ The Perks distribution has cumulative distribution function \deqn{F(y; \alpha, \beta) = 1 - \left\{ \frac{1 + \alpha}{1 + \alpha e^{\beta y}} \right\}^{1 / \beta} }{% F(y; alpha, beta) = 1 - ((1 + \alpha)/(1 + alpha * e^(beta * y)))^(1 / beta) } which leads to a probability density function \deqn{f(y; \alpha, \beta) = \left[ 1 + \alpha \right]^{1 / \beta} \alpha e^{\beta y} / (1 + \alpha e^{\beta y})^{1 + 1 / \beta} }{% f(y; alpha, beta) = [ 1 + alpha]^(1 / \beta) * alpha * exp(beta * y) / (1 + alpha * exp(beta * y))^(1 + 1 / beta) } for \eqn{\alpha > 0}{alpha > 0}, \eqn{\beta > 0}{beta > 0}, \eqn{y > 0}. Here, \eqn{\beta}{beta} is called the scale parameter \code{scale}, and \eqn{\alpha}{alpha} is called a shape parameter. The moments for this distribution do not appear to be available in closed form. Simulated Fisher scoring is used and multiple responses are handled. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Perks, W. (1932) On some experiments in the graduation of mortality statistics. \emph{Journal of the Institute of Actuaries}, \bold{63}, 12--40. Richards, S. J. (2012) A handbook of parametric survival models for actuarial use. \emph{Scandinavian Actuarial Journal}. 1--25. } \author{ T. W. Yee } \section{Warning }{ A lot of care is needed because this is a rather difficult distribution for parameter estimation. If the self-starting initial values fail then try experimenting with the initial value arguments, especially \code{iscale}. Successful convergence depends on having very good initial values. Also, monitor convergence by setting \code{trace = TRUE}. } \seealso{ \code{\link{dperks}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ set.seed(123) pdata <- data.frame(x2 = runif(nn <- 1000)) # x2 unused pdata <- transform(pdata, eta1 = -1, ceta1 = 1) pdata <- transform(pdata, shape1 = exp(eta1), scale1 = exp(ceta1)) pdata <- transform(pdata, y1 = rperks(nn, shape = shape1, scale = scale1)) fit1 <- vglm(y1 ~ 1, perks, data = pdata, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) } } \keyword{models} \keyword{regression} %# fit1 <- vglm(y1 ~ 1, perks, data = pdata, trace = TRUE) %# fit2 <- vglm(y1 ~ 1, perks(imeth = 2), data = pdata, trace = TRUE) % Argument \code{probs.y} is used only when \code{imethod = 2}. VGAM/man/pordlink.Rd0000644000176200001440000001002313565414527013700 0ustar liggesusers\name{pordlink} %\name{polf} \alias{pordlink} % \alias{polf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Poisson-Ordinal Link Function } \description{ Computes the Poisson-ordinal transformation, including its inverse and the first two derivatives. } \usage{ pordlink(theta, cutpoint = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{cutpoint}{ The cutpoints should be non-negative integers. If \code{pordlink()} is used as the link function in \code{\link{cumulative}} then one should choose \code{reverse = TRUE, parallel = TRUE}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The Poisson-ordinal link function (POLF) can be applied to a parameter lying in the unit interval. Its purpose is to link cumulative probabilities associated with an ordinal response coming from an underlying Poisson distribution. If the cutpoint is zero then a complementary log-log link is used. See \code{\link{Links}} for general information about \pkg{VGAM} link functions. } \value{ See Yee (2012) for details. } \references{ Yee, T. W. (2012) \emph{Ordinal ordination with normalizing link functions for count data}, (in preparation). } \author{ Thomas W. Yee } \note{ Numerical values of \code{theta} too close to 0 or 1 or out of range result in large positive or negative values, or maybe 0 depending on the arguments. Although measures have been taken to handle cases where \code{theta} is too close to 1 or 0, numerical instabilities may still arise. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the Poisson distribution (see \code{\link{poissonff}}) that has been recorded as an ordinal response using known cutpoints. } \section{Warning }{ Prediction may not work on \code{\link{vglm}} or \code{\link{vgam}} etc. objects if this link function is used. } \seealso{ \code{\link{Links}}, \code{\link{ordpoisson}}, \code{\link{poissonff}}, \code{\link{nbordlink}}, \code{\link{gordlink}}, \code{\link{cumulative}}. } \examples{ \dontrun{ pordlink("p", cutpoint = 2, short = FALSE) pordlink("p", cutpoint = 2, tag = TRUE) p <- seq(0.01, 0.99, by = 0.01) y <- pordlink(p, cutpoint = 2) y. <- pordlink(p, cutpoint = 2, deriv = 1) max(abs(pordlink(y, cutpoint = 2, inv = TRUE) - p)) # Should be 0 #\ dontrun{ par(mfrow = c(2, 1), las = 1) #plot(p, y, type = "l", col = "blue", main = "pordlink()") #abline(h = 0, v = 0.5, col = "orange", lty = "dashed") # #plot(p, y., type = "l", col = "blue", # main = "(Reciprocal of) first POLF derivative") #} # Rutherford and Geiger data ruge <- data.frame(yy = rep(0:14, times = c(57,203,383,525,532,408,273,139,45,27,10,4,0,1,1))) with(ruge, length(yy)) # 2608 1/8-minute intervals cutpoint <- 5 ruge <- transform(ruge, yy01 = ifelse(yy <= cutpoint, 0, 1)) fit <- vglm(yy01 ~ 1, binomialff(link=pordlink(cutpoint=cutpoint)), ruge) coef(fit, matrix = TRUE) exp(coef(fit)) # Another example pdata <- data.frame(x2 = sort(runif(nn <- 1000))) pdata <- transform(pdata, x3 = runif(nn)) pdata <- transform(pdata, mymu = exp( 3 + 1 * x2 - 2 * x3)) pdata <- transform(pdata, y1 = rpois(nn, lambda = mymu)) cutpoints <- c(-Inf, 10, 20, Inf) pdata <- transform(pdata, cuty = Cut(y1, breaks = cutpoints)) #\ dontrun{ with(pdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) } with(pdata, table(cuty) / sum(table(cuty))) fit <- vglm(cuty ~ x2 + x3, data = pdata, trace = TRUE, cumulative(reverse = TRUE, parallel = TRUE, link = pordlink(cutpoint = cutpoints[2:3]), multiple.responses = TRUE)) head(depvar(fit)) head(fitted(fit)) head(predict(fit)) coef(fit) coef(fit, matrix = TRUE) constraints(fit) fit@misc$earg } } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/amlexponential.Rd0000644000176200001440000001210413565414527015100 0ustar liggesusers\name{amlexponential} \alias{amlexponential} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exponential Regression by Asymmetric Maximum Likelihood Estimation } \description{ Exponential expectile regression estimated by maximizing an asymmetric likelihood function. } \usage{ amlexponential(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4, link = "loglink") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{w.aml}{ Numeric, a vector of positive constants controlling the expectiles. The larger the value the larger the fitted expectile value (the proportion of points below the ``w-regression plane''). The default value of unity results in the ordinary maximum likelihood (MLE) solution. } \item{parallel}{ If \code{w.aml} has more than one value then this argument allows the quantile curves to differ by the same amount as a function of the covariates. Setting this to be \code{TRUE} should force the quantile curves to not cross (although they may not cross anyway). See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Integer, either 1 or 2 or 3. Initialization method. Choose another value if convergence fails. } \item{digw }{ Passed into \code{\link[base]{Round}} as the \code{digits} argument for the \code{w.aml} values; used cosmetically for labelling. } \item{link}{ See \code{\link{exponential}} and the warning below. } } \details{ The general methodology behind this \pkg{VGAM} family function is given in Efron (1992) and full details can be obtained there. % Equation numbers below refer to that article. This model is essentially an exponential regression model (see \code{\link{exponential}}) but the usual deviance is replaced by an asymmetric squared error loss function; it is multiplied by \eqn{w.aml} for positive residuals. The solution is the set of regression coefficients that minimize the sum of these deviance-type values over the data set, weighted by the \code{weights} argument (so that it can contain frequencies). Newton-Raphson estimation is used here. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Efron, B. (1992) Poisson overdispersion estimates based on the method of asymmetric maximum likelihood. \emph{Journal of the American Statistical Association}, \bold{87}, 98--107. } \author{ Thomas W. Yee } \note{ On fitting, the \code{extra} slot has list components \code{"w.aml"} and \code{"percentile"}. The latter is the percent of observations below the ``w-regression plane'', which is the fitted values. Also, the individual deviance values corresponding to each element of the argument \code{w.aml} is stored in the \code{extra} slot. For \code{amlexponential} objects, methods functions for the generic functions \code{qtplot} and \code{cdf} have not been written yet. See \code{\link{amlpoisson}} about comments on the jargon, e.g., \emph{expectiles} etc. In this documentation the word \emph{quantile} can often be interchangeably replaced by \emph{expectile} (things are informal here). } \section{Warning }{ Note that the \code{link} argument of \code{\link{exponential}} and \code{\link{amlexponential}} are currently different: one is the rate parameter and the other is the mean (expectile) parameter. If \code{w.aml} has more than one value then the value returned by \code{deviance} is the sum of all the (weighted) deviances taken over all the \code{w.aml} values. See Equation (1.6) of Efron (1992). } \seealso{ \code{\link{exponential}}, \code{\link{amlbinomial}}, \code{\link{amlpoisson}}, \code{\link{amlnormal}}, \code{\link{alaplace1}}, \code{\link{lms.bcg}}, \code{\link{deexp}}. } \examples{ nn <- 2000 mydat <- data.frame(x = seq(0, 1, length = nn)) mydat <- transform(mydat, mu = loglink(-0 + 1.5*x + 0.2*x^2, inverse = TRUE)) mydat <- transform(mydat, mu = loglink(0 - sin(8*x), inverse = TRUE)) mydat <- transform(mydat, y = rexp(nn, rate = 1/mu)) (fit <- vgam(y ~ s(x,df = 5), amlexponential(w = c(0.001, 0.1, 0.5, 5, 60)), mydat, trace = TRUE)) fit@extra \dontrun{ # These plots are against the sqrt scale (to increase clarity) par(mfrow = c(1,2)) # Quantile plot with(mydat, plot(x, sqrt(y), col = "blue", las = 1, main = paste(paste(round(fit@extra$percentile, digits = 1), collapse = ", "), "percentile-expectile curves"))) with(mydat, matlines(x, sqrt(fitted(fit)), lwd = 2, col = "blue", lty = 1)) # Compare the fitted expectiles with the quantiles with(mydat, plot(x, sqrt(y), col = "blue", las = 1, main = paste(paste(round(fit@extra$percentile, digits = 1), collapse = ", "), "percentile curves are orange"))) with(mydat, matlines(x, sqrt(fitted(fit)), lwd = 2, col = "blue", lty = 1)) for (ii in fit@extra$percentile) with(mydat, matlines(x, sqrt(qexp(p = ii/100, rate = 1/mu)), col = "orange")) } } \keyword{models} \keyword{regression} VGAM/man/ruge.Rd0000644000176200001440000000213513565414527013025 0ustar liggesusers\name{ruge} \alias{ruge} \docType{data} \title{Rutherford-Geiger Polonium Data} \description{ Decay counts of polonium recorded by Rutherford and Geiger (1910). } \usage{data(ruge)} \format{ This data frame contains the following columns: \describe{ \item{counts}{a numeric vector, counts or frequencies} \item{number}{a numeric vector, the number of decays} } } \details{ These are the radioactive decay counts of polonium recorded by Rutherford and Geiger (1910) representing the number of scintillations in 2608 1/8 minute intervals. For example, there were 57 frequencies of zero counts. The counts can be thought of as being approximately Poisson distributed. } \source{ Rutherford, E. and Geiger, H. (1910) The Probability Variations in the Distribution of alpha Particles, \emph{Philosophical Magazine}, \bold{20}, 698--704. } %\references{ %} \examples{ lambdahat <- with(ruge, weighted.mean(number, w = counts)) (N <- with(ruge, sum(counts))) with(ruge, cbind(number, counts, fitted = round(N * dpois(number, lam = lambdahat)))) } \keyword{datasets} VGAM/man/negbinomial.size.Rd0000644000176200001440000000700413565414527015320 0ustar liggesusers\name{negbinomial.size} \alias{negbinomial.size} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Negative Binomial Distribution Family Function With Known Size} \description{ Maximum likelihood estimation of the mean parameter of a negative binomial distribution with known size parameter. } \usage{ negbinomial.size(size = Inf, lmu = "loglink", imu = NULL, iprobs.y = 0.35, imethod = 1, ishrinkage = 0.95, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{size}{ Numeric, positive. Same as argument \code{size} of \code{\link[stats:NegBinomial]{rnbinom}}. If the response is a matrix then this is recycled to a matrix of the same dimension, by row (\code{\link[base]{matrix}} with \code{byrow = TRUE}). } \item{lmu, imu}{ Same as \code{\link{negbinomial}}. } \item{iprobs.y, imethod}{ Same as \code{\link{negbinomial}}. } \item{zero, ishrinkage}{ Same as \code{\link{negbinomial}}. } } \details{ This \pkg{VGAM} family function estimates only the mean parameter of the negative binomial distribution. See \code{\link{negbinomial}} for general information. Setting \code{size = 1} gives what might be called the NB-G (geometric model; see Hilbe (2011)). The default, \code{size = Inf}, corresponds to the Poisson distribution. } %\section{Warning}{ % %} \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Hilbe, J. M. (2011) \emph{Negative Binomial Regression}, 2nd Edition. Cambridge: Cambridge University Press. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. } \author{ Thomas W. Yee } \note{ If \code{lmu = "nbcanlink"} in \code{negbinomial.size()} then the \code{size} argument here should be assigned and these values are recycled. % is placed inside the \code{earg} % argument of \code{nbcanlink()} as a matrix with conformable size. } \seealso{ \code{\link{negbinomial}}, \code{\link{nbcanlink}} (NB-C model), \code{\link{poissonff}}, \code{\link[stats:NegBinomial]{rnbinom}}, \code{\link{simulate.vlm}}. % \code{\link[MASS]{rnegbin}}. % \code{\link{quasipoissonff}}, } \examples{ # Simulated data with various multiple responses size1 <- exp(1); size2 <- exp(2); size3 <- exp(0); size4 <- Inf ndata <- data.frame(x2 = runif(nn <- 1000)) ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 must be negative size1 = size1) ndata <- transform(ndata, mu1 = nbcanlink(eta1, size = size1, inv = TRUE)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = mu1, size = size1), # NB-C y2 = rnbinom(nn, mu = exp(2 - x2), size = size2), y3 = rnbinom(nn, mu = exp(3 + x2), size = size3), # NB-G y4 = rpois(nn, lambda = exp(1 + x2))) # Also known as NB-C with size known (Hilbe, 2011) fit1 <- vglm(y1 ~ x2, negbinomial.size(size = size1, lmu = "nbcanlink"), data = ndata, trace = TRUE) coef(fit1, matrix = TRUE) head(fit1@misc$size) # size saved here fit2 <- vglm(cbind(y2, y3, y4) ~ x2, data = ndata, trace = TRUE, negbinomial.size(size = c(size2, size3, size4))) coef(fit2, matrix = TRUE) head(fit2@misc$size) # size saved here } \keyword{models} \keyword{regression} VGAM/man/melbmaxtemp.Rd0000644000176200001440000000260713565414527014402 0ustar liggesusers\name{melbmaxtemp} \alias{melbmaxtemp} \docType{data} \title{ Melbourne Daily Maximum Temperatures} \description{ Melbourne daily maximum temperatures in degrees Celsius over the ten-year period 1981--1990. } \usage{ data(melbmaxtemp) } \format{ A vector with 3650 observations. } \details{ This is a time series data from Melbourne, Australia. It is commonly used to give a difficult quantile regression problem since the data is bimodal. That is, a hot day is likely to be followed by either an equally hot day or one much cooler. However, an independence assumption is typically made. } %\source{ %\url{http://www.london2012.com/medals/medal-count/}. % % %} \references{ Hyndman, R. J. and Bashtannyk, D. M. and Grunwald, G. K. (1996). Estimating and visualizing conditional densities. \emph{J. Comput. Graph. Statist.}, \bold{5}(4), 315--336. } \seealso{ \code{\link[VGAM]{lms.bcn}}. } \examples{ summary(melbmaxtemp) \dontrun{ par(mfrow = c(1, 1), mar = c(5, 4, 0.2, 0.1) + 0.1, las = 1) melb <- data.frame(today = melbmaxtemp[-1], yesterday = melbmaxtemp[-length(melbmaxtemp)]) plot(today ~ yesterday, data = melb, xlab = "Yesterday's Max Temperature", ylab = "Today's Max Temperature", cex = 1.4, type = "n") points(today ~ yesterday, data = melb, pch = 0, cex = 0.50, col = "blue") abline(a = 0, b = 1, lty = 3) } } \keyword{datasets} VGAM/man/cardioid.Rd0000644000176200001440000000556113565414527013647 0ustar liggesusers\name{cardioid} \alias{cardioid} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cardioid Distribution Family Function } \description{ Estimates the two parameters of the cardioid distribution by maximum likelihood estimation. } \usage{ cardioid(lmu = extlogitlink(min = 0, max = 2*pi), lrho = extlogitlink(min = -0.5, max = 0.5), imu = NULL, irho = 0.3, nsimEIM = 100, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, lrho}{ Parameter link functions applied to the \eqn{\mu}{mu} and \eqn{\rho}{rho} parameters, respectively. See \code{\link{Links}} for more choices. } \item{imu, irho}{ Initial values. A \code{NULL} means an initial value is chosen internally. See \code{\link{CommonVGAMffArguments}} for more information. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The two-parameter cardioid distribution has a density that can be written as \deqn{f(y;\mu,\rho) = \frac{1}{2\pi} \left(1 + 2\, \rho \cos(y - \mu) \right) }{% f(y;mu,rho) = (1 + 2*rho*cos(y-mu)) / (2*pi)} where \eqn{0 < y < 2\pi}{0 < y < 2*pi}, \eqn{0 < \mu < 2\pi}{0 < mu < 2*pi}, and \eqn{-0.5 < \rho < 0.5}{-0.5 < rho < 0.5} is the concentration parameter. The default link functions enforce the range constraints of the parameters. For positive \eqn{\rho} the distribution is unimodal and symmetric about \eqn{\mu}{mu}. The mean of \eqn{Y} (which make up the fitted values) is \eqn{\pi + (\rho/\pi) ((2 \pi-\mu) \sin(2 \pi-\mu) + \cos(2 \pi-\mu) - \mu \sin(\mu) - \cos(\mu))}{ pi + (rho/pi) ((2*pi-mu)*sin(2*pi-mu) + cos(2*pi-mu) - mu*sin(mu) - cos(mu))}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Jammalamadaka, S. R. and SenGupta, A. (2001) \emph{Topics in Circular Statistics}, Singapore: World Scientific. } \author{ T. W. Yee } \note{ Fisher scoring using simulation is used. } \section{Warning }{ Numerically, this distribution can be difficult to fit because of a log-likelihood having multiple maximums. The user is therefore encouraged to try different starting values, i.e., make use of \code{imu} and \code{irho}. } \seealso{ \code{\link{rcard}}, \code{\link{extlogitlink}}, \code{\link{vonmises}}. \pkg{CircStats} and \pkg{circular} currently have a lot more R functions for circular data than the \pkg{VGAM} package. } \examples{ \dontrun{ cdata <- data.frame(y = rcard(n = 1000, mu = 4, rho = 0.45)) fit <- vglm(y ~ 1, cardioid, data = cdata, trace = TRUE) coef(fit, matrix=TRUE) Coef(fit) c(with(cdata, mean(y)), head(fitted(fit), 1)) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/zanegbinUC.Rd0000644000176200001440000000517613565414527014120 0ustar liggesusers\name{Zanegbin} \alias{Zanegbin} \alias{dzanegbin} \alias{pzanegbin} \alias{qzanegbin} \alias{rzanegbin} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Negative Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-altered negative binomial distribution with parameter \code{pobs0}. } \usage{ dzanegbin(x, size, prob = NULL, munb = NULL, pobs0 = 0, log = FALSE) pzanegbin(q, size, prob = NULL, munb = NULL, pobs0 = 0) qzanegbin(p, size, prob = NULL, munb = NULL, pobs0 = 0) rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{size, prob, munb, log}{ Parameters from the ordinary negative binomial distribution (see \code{\link[stats:NegBinomial]{dnbinom}}). Some arguments have been renamed slightly. } \item{pobs0}{ Probability of zero, called \eqn{pobs0}. The default value of \code{pobs0 = 0} corresponds to the response having a positive negative binomial distribution. } } \details{ The probability function of \eqn{Y} is 0 with probability \code{pobs0}, else a positive negative binomial(\eqn{\mu_{nb}}{munb}, size) distribution. } \value{ \code{dzanegbin} gives the density and \code{pzanegbin} gives the distribution function, \code{qzanegbin} gives the quantile function, and \code{rzanegbin} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{gatnbinomial.mlm}}, \code{\link{Gaitnbinom.mlm}}, \code{\link{zanegbinomial}}, \code{\link{rposnegbin}}. } \examples{ munb <- 3; size <- 4; pobs0 <- 0.3; x <- (-1):7 dzanegbin(x, munb = munb, size = size, pobs0 = pobs0) table(rzanegbin(100, munb = munb, size = size, pobs0 = pobs0)) \dontrun{ x <- 0:10 barplot(rbind(dzanegbin(x, munb = munb, size = size, pobs0 = pobs0), dnbinom(x, mu = munb, size = size)), beside = TRUE, col = c("blue", "green"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("ZANB(munb = ", munb, ", size = ", size,", pobs0 = ", pobs0, ") [blue] vs", " NB(mu = ", munb, ", size = ", size, ") [green] densities", sep = "")) } } \keyword{distribution} VGAM/man/calibrate-methods.Rd0000644000176200001440000000131613565414527015452 0ustar liggesusers\name{calibrate-methods} \docType{methods} \alias{calibrate,rrvglm-method} \alias{calibrate,qrrvglm-method} \alias{calibrate,rrvgam-method} \alias{calibrate,Coef.qrrvglm-method} \title{ Calibration for Constrained Regression Models } \description{ \code{calibrate} is a generic function applied to RR-VGLMs, QRR-VGLMs and RR-VGAMs, etc. } %\usage{ % \S4method{calibrate}{cao,Coef.cao}(object, ...) %} \section{Methods}{ \describe{ \item{object}{ The object from which the calibration is performed. } } } %\note{ % See \code{\link{lvplot}} which is very much related to biplots. % %} \keyword{methods} \keyword{classes} %\keyword{ ~~ other possible keyword(s)} \keyword{models} \keyword{regression} VGAM/man/riceUC.Rd0000644000176200001440000000533113565414527013236 0ustar liggesusers\name{Rice} \alias{Rice} \alias{drice} \alias{price} \alias{qrice} \alias{rrice} \title{The Rice Distribution} \description{ Density, distribution function, quantile function and random generation for the Rician distribution. } \usage{ drice(x, sigma, vee, log = FALSE) price(q, sigma, vee, lower.tail = TRUE, log.p = FALSE, ...) qrice(p, sigma, vee, lower.tail = TRUE, log.p = FALSE, ...) rrice(n, sigma, vee) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{vee, sigma}{ See \code{\link{riceff}}. } \item{\dots}{ Other arguments such as \code{lower.tail}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{drice} gives the density, \code{price} gives the distribution function, \code{qrice} gives the quantile function, and \code{rrice} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{riceff}}, the \pkg{VGAM} family function for estimating the two parameters, for the formula of the probability density function and other details. Formulas for \code{price()} and \code{qrice()} are based on the Marcum-Q function. } %\section{Warning }{ % %} \seealso{ \code{\link{riceff}}. } \examples{ \dontrun{ x <- seq(0.01, 7, len = 201) plot(x, drice(x, vee = 0, sigma = 1), type = "n", las = 1,, ylab = "", main = "Density of Rice distribution for various values of v") sigma <- 1; vee <- c(0, 0.5, 1, 2, 4) for (ii in 1:length(vee)) lines(x, drice(x, vee = vee[ii], sigma), col = ii) legend(x = 5, y = 0.6, legend = as.character(vee), col = 1:length(vee), lty = 1) x <- seq(0, 4, by = 0.01); vee <- 1; sigma <- 1 probs <- seq(0.05, 0.95, by = 0.05) plot(x, drice(x, vee = vee, sigma = sigma), type = "l", col = "blue", main = "Blue is density, orange is cumulative distribution function", ylim = c(0, 1), sub = "Purple are 5, 10, ..., 95 percentiles", las = 1, ylab = "", cex.main = 0.9) abline(h = 0:1, col = "black", lty = 2) Q <- qrice(probs, sigma, vee = vee) lines(Q, drice(qrice(probs, sigma, vee = vee), sigma, vee = vee), col = "purple", lty = 3, type = "h") lines(x, price(x, sigma, vee = vee), type = "l", col = "orange") lines(Q, drice(Q, sigma, vee = vee), col = "purple", lty = 3, type = "h") lines(Q, price(Q, sigma, vee = vee), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(price(Q, sigma, vee = vee) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/deplot.lmscreg.Rd0000644000176200001440000000533413565414527015011 0ustar liggesusers\name{deplot.lmscreg} \alias{deplot.lmscreg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Density Plot for LMS Quantile Regression } \description{ Plots a probability density function associated with a LMS quantile regression. } \usage{ deplot.lmscreg(object, newdata = NULL, x0, y.arg, show.plot = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} quantile regression model, i.e., an object produced by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}} with a family function beginning with \code{"lms."}, e.g., \code{\link{lms.yjn}}. } \item{newdata}{ Optional data frame containing secondary variables such as sex. It should have a maximum of one row. The default is to use the original data. } \item{x0}{ Numeric. The value of the primary variable at which to make the `slice'. } \item{y.arg}{ Numerical vector. The values of the response variable at which to evaluate the density. This should be a grid that is fine enough to ensure the plotted curves are smooth. } \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will be done. } \item{\dots}{ Graphical parameter that are passed into \code{\link{plotdeplot.lmscreg}}. } } \details{ This function calls, e.g., \code{deplot.lms.yjn} in order to compute the density function. } \value{ The original \code{object} but with a list placed in the slot \code{post}, called \code{@post$deplot}. The list has components \item{newdata }{ The argument \code{newdata} above, or a one-row data frame constructed out of the \code{x0} argument. } \item{y}{ The argument \code{y.arg} above. } \item{density}{ Vector of the density function values evaluated at \code{y.arg}. } } \references{ Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. } \author{ Thomas W. Yee } \note{ \code{\link{plotdeplot.lmscreg}} actually does the plotting. } \seealso{ \code{\link{plotdeplot.lmscreg}}, \code{\link{qtplot.lmscreg}}, \code{\link{lms.bcn}}, \code{\link{lms.bcg}}, \code{\link{lms.yjn}}. } \examples{\dontrun{ fit <- vgam(BMI ~ s(age, df = c(4, 2)), fam = lms.bcn(zero = 1), data = bmi.nz) ygrid <- seq(15, 43, by = 0.25) deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "green", llwd = 2, main = "BMI distribution at ages 20 (green), 40 (blue), 60 (red)") deplot(fit, x0 = 40, y = ygrid, add = TRUE, col = "blue", llwd = 2) deplot(fit, x0 = 60, y = ygrid, add = TRUE, col = "red", llwd = 2) -> a names(a@post$deplot) a@post$deplot$newdata head(a@post$deplot$y) head(a@post$deplot$density) } } \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/qvar.Rd0000644000176200001440000000455113565414527013040 0ustar liggesusers\name{qvar} \alias{qvar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Quasi-variances Extraction Function %% ~~function to do ... ~~ } \description{ Takes a \code{\link{rcim}} fit of the appropriate format and returns either the quasi-variances or quasi-standard errors. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ qvar(object, se = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{rcim}} object that has family function \code{\link{uninormal}} with the \code{\link{explink}} link. See below for an example. } \item{se}{ Logical. If \code{FALSE} then the quasi-variances are returned, else the square root of them, called quasi-standard errors. } \item{\ldots}{ Currently unused. } } \details{ This simple function is ad hoc and simply is equivalent to computing the quasi-variances by \code{diag(predict(fit1)[, c(TRUE, FALSE)]) / 2}. This function is for convenience only. Serious users of quasi-variances ought to understand why and how this function works. } \value{ A vector of quasi-variances or quasi-standard errors. } %\references{ % %} \author{ T. W. Yee. } %\note{ % This is an adaptation of \code{qvcalc()} in \pkg{qvcalc}. % % %} %\section{Warning }{ % N % % %} \seealso{ \code{\link{rcim}}, \code{\link{uninormal}}, \code{\link{explink}}, \code{\link{Qvar}}, \code{\link[MASS]{ships}}. %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ data("ships", package = "MASS") Shipmodel <- vglm(incidents ~ type + year + period, poissonff, offset = log(service), data = ships, subset = (service > 0)) # Easiest form of input fit1 <- rcim(Qvar(Shipmodel, "type"), uninormal("explink"), maxit = 99) qvar(fit1) # Quasi-variances qvar(fit1, se = TRUE) # Quasi-standard errors # Manually compute them: (quasiVar <- exp(diag(fitted(fit1))) / 2) # Version 1 (quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2) # Version 2 (quasiSE <- sqrt(quasiVar)) \dontrun{ qvplot(fit1, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % \code{\link[qvcalc:qvcalc]{qvcalc}} in \pkg{qvcalc} VGAM/man/zapoisson.Rd0000644000176200001440000001735213565414527014117 0ustar liggesusers\name{zapoisson} \alias{zapoisson} \alias{zapoissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Poisson Distribution } \description{ Fits a zero-altered Poisson distribution based on a conditional model involving a Bernoulli distribution and a positive-Poisson distribution. } \usage{ zapoisson(lpobs0 = "logitlink", llambda = "loglink", type.fitted = c("mean", "lambda", "pobs0", "onempobs0"), imethod = 1, ipobs0 = NULL, ilambda = NULL, ishrinkage = 0.95, probs.y = 0.35, zero = NULL) zapoissonff(llambda = "loglink", lonempobs0 = "logitlink", type.fitted = c("mean", "lambda", "pobs0", "onempobs0"), imethod = 1, ilambda = NULL, ionempobs0 = NULL, ishrinkage = 0.95, probs.y = 0.35, zero = "onempobs0") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs0}{ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here. See \code{\link{Links}} for more choices. } \item{llambda}{ Link function for the usual \eqn{\lambda}{lambda} parameter. See \code{\link{Links}} for more choices. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{lonempobs0}{ Corresponding argument for the other parameterization. See details below. } % \item{epobs0, elambda}{ % epobs0 = list(), elambda = list(), % Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{imethod, ipobs0, ionempobs0, ilambda, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y, zero}{ See \code{\link{CommonVGAMffArguments}} for information. % Integer valued vector, usually assigned \eqn{-1} or \eqn{1} if used % at all. Specifies which of the two linear/additive predictors are % modelled as an intercept only. % By default, both linear/additive predictors are modelled using % the explanatory variables. % If \code{zero = 1} then the \eqn{p_0}{pobs0} parameter % (after \code{lpobs0} is applied) is modelled as a single unknown % number that is estimated. It is modelled as a function of the % explanatory variables by \code{zero = NULL}. A negative value % means that the value is recycled, so setting \eqn{-1} means % all \eqn{p_0}{pobs0} are intercept-only (for multiple responses). } } \details{ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0}, else \eqn{Y} has a positive-Poisson(\eqn{\lambda)}{lambda)} distribution with probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1}, which is modelled as a function of the covariates. The zero-altered Poisson distribution differs from the zero-inflated Poisson distribution in that the former has zeros coming from one source, whereas the latter has zeros coming from the Poisson distribution too. Some people call the zero-altered Poisson a \emph{hurdle} model. For one response/species, by default, the two linear/additive predictors for \code{zapoisson()} are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(pobs0), log(lambda))^T}. The \pkg{VGAM} family function \code{zapoissonff()} has a few changes compared to \code{zapoisson()}. These are: (i) the order of the linear/additive predictors is switched so the Poisson mean comes first; (ii) argument \code{onempobs0} is now 1 minus the probability of an observed 0, i.e., the probability of the positive Poisson distribution, i.e., \code{onempobs0} is \code{1-pobs0}; (iii) argument \code{zero} has a new default so that the \code{onempobs0} is intercept-only by default. Now \code{zapoissonff()} is generally recommended over \code{zapoisson()}. Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = (1-p_0) \lambda / [1 - \exp(-\lambda)].}{% mu = (1-pobs0) * lambda / [1 - exp(-lambda)].} If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned. } \references{ Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer, D. B. (1996) Modelling the abundances of rare species: statistical models for counts with extra zeros. \emph{Ecological Modelling}, \bold{88}, 297--308. Angers, J-F. and Biswas, A. (2003) A Bayesian analysis of zero-inflated generalized Poisson model. \emph{Computational Statistics & Data Analysis}, \bold{42}, 37--46. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \author{ T. W. Yee } \note{ There are subtle differences between this family function and \code{\link{zipoisson}} and \code{\link{yip88}}. In particular, \code{\link{zipoisson}} is a \emph{mixture} model whereas \code{zapoisson()} and \code{\link{yip88}} are \emph{conditional} models. Note this family function allows \eqn{p_0}{pobs0} to be modelled as functions of the covariates. % It can be thought of an extension % of \code{\link{yip88}}, which is also a conditional model but its % \eqn{\phi}{phi} parameter is a scalar only. This family function effectively combines \code{\link{pospoisson}} and \code{\link{binomialff}} into one family function. This family function can handle multiple responses, e.g., more than one species. } \seealso{ \code{\link{rzapois}}, \code{\link{zipoisson}}, \code{\link{gatpoisson.mlm}}, \code{\link{pospoisson}}, \code{\link{posnegbinomial}}, \code{\link{binomialff}}, \code{\link{rpospois}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } \examples{ zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, pobs0 = logitlink( -1 + 1*x2, inverse = TRUE), lambda = loglink(-0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y = rzapois(nn, lambda, pobs0 = pobs0)) with(zdata, table(y)) fit <- vglm(y ~ x2, zapoisson, data = zdata, trace = TRUE) fit <- vglm(y ~ x2, zapoisson, data = zdata, trace = TRUE, crit = "coef") head(fitted(fit)) head(predict(fit)) head(predict(fit, untransform = TRUE)) coef(fit, matrix = TRUE) summary(fit) # Another example ------------------------------ # Data from Angers and Biswas (2003) abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1)) abdata <- subset(abdata, w > 0) Abdata <- data.frame(yy = with(abdata, rep(y, w))) fit3 <- vglm(yy ~ 1, zapoisson, data = Abdata, trace = TRUE, crit = "coef") coef(fit3, matrix = TRUE) Coef(fit3) # Estimate lambda (they get 0.6997 with SE 0.1520) head(fitted(fit3), 1) with(Abdata, mean(yy)) # Compare this with fitted(fit3) } \keyword{models} \keyword{regression} %zapoisson(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %zapoissonff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/qtplot.lmscreg.Rd0000644000176200001440000000460313565414527015043 0ustar liggesusers\name{qtplot.lmscreg} \alias{qtplot.lmscreg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Quantile Plot for LMS Quantile Regression } \description{ Plots quantiles associated with a LMS quantile regression. } \usage{ qtplot.lmscreg(object, newdata = NULL, percentiles = object@misc$percentiles, show.plot = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} quantile regression model, i.e., an object produced by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}} with a family function beginning with \code{"lms."}, e.g., \code{\link{lms.yjn}}. } \item{newdata}{ Optional data frame for computing the quantiles. If missing, the original data is used. } \item{percentiles}{ Numerical vector with values between 0 and 100 that specify the percentiles (quantiles). The default are the percentiles used when the model was fitted. } \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will be done. } \item{\dots}{ Graphical parameter that are passed into \code{\link{plotqtplot.lmscreg}}. } } \details{ The `primary' variable is defined as the main covariate upon which the regression or smoothing is performed. For example, in medical studies, it is often the age. In \pkg{VGAM}, it is possible to handle more than one covariate, however, the primary variable must be the first term after the intercept. } \value{ A list with the following components. \item{fitted.values }{A vector of fitted percentile values. } \item{percentiles }{The percentiles used. } } \references{ Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ \code{\link{plotqtplot.lmscreg}} does the actual plotting. } \seealso{ \code{\link{plotqtplot.lmscreg}}, \code{\link{deplot.lmscreg}}, \code{\link{lms.bcn}}, \code{\link{lms.bcg}}, \code{\link{lms.yjn}}. } \examples{\dontrun{ fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero=1), data = bmi.nz) qtplot(fit) qtplot(fit, perc = c(25, 50, 75, 95), lcol = "blue", tcol = "blue", llwd = 2) } } \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/rec.exp1.Rd0000644000176200001440000000375613565414527013522 0ustar liggesusers\name{rec.exp1} \alias{rec.exp1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Upper Record Values from a 1-parameter Exponential Distribution } \description{ Maximum likelihood estimation of the rate parameter of a 1-parameter exponential distribution when the observations are upper record values. } \usage{ rec.exp1(lrate = "loglink", irate = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lrate}{ Link function applied to the rate parameter. See \code{\link{Links}} for more choices. } \item{irate}{ Numeric. Optional initial values for the rate. The default value \code{NULL} means they are computed internally, with the help of \code{imethod}. } \item{imethod}{ Integer, either 1 or 2 or 3. Initial method, three algorithms are implemented. Choose the another value if convergence fails, or use \code{irate}. } } \details{ The response must be a vector or one-column matrix with strictly increasing values. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Arnold, B. C. and Balakrishnan, N. and Nagaraja, H. N. (1998) \emph{Records}, New York: John Wiley & Sons. } \author{ T. W. Yee } \note{ By default, this family function has the intercept-only MLE as the initial value, therefore convergence may only take one iteration. Fisher scoring is used. } \seealso{ \code{\link{exponential}}. } \examples{ rawy <- rexp(n <- 10000, rate = exp(1)) y <- unique(cummax(rawy)) # Keep only the records length(y) / y[length(y)] # MLE of rate fit <- vglm(y ~ 1, rec.exp1, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} %# Keep only the records %delete = c(FALSE, rep(TRUE, len = n-1)) %for (i in 2:length(rawy)) % if (rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE %(y = rawy[!delete]) VGAM/man/zageometric.Rd0000644000176200001440000001271613565414527014402 0ustar liggesusers\name{zageometric} \alias{zageometric} \alias{zageometricff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Geometric Distribution } \description{ Fits a zero-altered geometric distribution based on a conditional model involving a Bernoulli distribution and a positive-geometric distribution. } \usage{ zageometric(lpobs0 = "logitlink", lprob = "logitlink", type.fitted = c("mean", "prob", "pobs0", "onempobs0"), imethod = 1, ipobs0 = NULL, iprob = NULL, zero = NULL) zageometricff(lprob = "logitlink", lonempobs0 = "logitlink", type.fitted = c("mean", "prob", "pobs0", "onempobs0"), imethod = 1, iprob = NULL, ionempobs0 = NULL, zero = "onempobs0") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs0}{ Link function for the parameter \eqn{p_0}{pobs0} or \eqn{\phi}{phi}, called \code{pobs0} or \code{phi} here. See \code{\link{Links}} for more choices. } \item{lprob}{ Parameter link function applied to the probability of success, called \code{prob} or \eqn{p}. See \code{\link{Links}} for more choices. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } % \item{epobs0, eprob}{ % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % epobs0 = list(), eprob = list(), % } \item{ipobs0, iprob}{ Optional initial values for the parameters. If given, they must be in range. For multi-column responses, these are recycled sideways. } \item{lonempobs0, ionempobs0}{ Corresponding argument for the other parameterization. See details below. } \item{zero, imethod}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0}, or \eqn{Y} has a positive-geometric distribution with probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1}, which is modelled as a function of the covariates. The zero-altered geometric distribution differs from the zero-inflated geometric distribution in that the former has zeros coming from one source, whereas the latter has zeros coming from the geometric distribution too. The zero-inflated geometric distribution is implemented in the \pkg{VGAM} package. Some people call the zero-altered geometric a \emph{hurdle} model. The input can be a matrix (multiple responses). By default, the two linear/additive predictors of \code{zageometric} are \eqn{(logit(\phi), logit(p))^T}{(logit(phi), logit(prob))^T}. The \pkg{VGAM} family function \code{zageometricff()} has a few changes compared to \code{zageometric()}. These are: (i) the order of the linear/additive predictors is switched so the geometric probability comes first; (ii) argument \code{onempobs0} is now 1 minus the probability of an observed 0, i.e., the probability of the positive geometric distribution, i.e., \code{onempobs0} is \code{1-pobs0}; (iii) argument \code{zero} has a new default so that the \code{pobs0} is intercept-only by default. Now \code{zageometricff()} is generally recommended over \code{zageometric()}. Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = (1-\phi) / p.}{% mu = (1- phi) / p.} If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned. } %\references{ % % %} \section{Warning }{ Convergence for this \pkg{VGAM} family function seems to depend quite strongly on providing good initial values. Inference obtained from \code{summary.vglm} and \code{summary.vgam} may or may not be correct. In particular, the p-values, standard errors and degrees of freedom may need adjustment. Use simulation on artificial data to check that these are reasonable. } \author{ T. W. Yee } \note{ Note this family function allows \eqn{p_0}{pobs0} to be modelled as functions of the covariates. It is a conditional model, not a mixture model. This family function effectively combines \code{\link{binomialff}} and \code{posgeometric()} and \code{\link{geometric}} into one family function. However, \code{posgeometric()} is not written because it is trivially related to \code{\link{geometric}}. } \seealso{ \code{\link{dzageom}}, \code{\link{geometric}}, \code{\link{zigeometric}}, \code{\link[stats:Geometric]{dgeom}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } % \code{\link{posgeometric}}, \examples{ zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, pobs0 = logitlink(-1 + 2*x2, inverse = TRUE), prob = logitlink(-2 + 3*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzageom(nn, prob = prob, pobs0 = pobs0), y2 = rzageom(nn, prob = prob, pobs0 = pobs0)) with(zdata, table(y1)) fit <- vglm(cbind(y1, y2) ~ x2, zageometric, data = zdata, trace = TRUE) coef(fit, matrix = TRUE) head(fitted(fit)) head(predict(fit)) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/fisherzlink.Rd0000644000176200001440000000564213565414527014421 0ustar liggesusers\name{fisherzlink} \alias{fisherzlink} % \alias{fisherz} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fisher's Z Link Function } \description{ Computes the Fisher Z transformation, including its inverse and the first two derivatives. } \usage{ fisherzlink(theta, bminvalue = NULL, bmaxvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bminvalue, bmaxvalue}{ Optional boundary values. Values of \code{theta} which are less than or equal to \eqn{-1} can be replaced by \code{bminvalue} before computing the link function value. Values of \code{theta} which are greater than or equal to \eqn{1} can be replaced by \code{bmaxvalue} before computing the link function value. See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The \code{fisherz} link function is commonly used for parameters that lie between \eqn{-1} and \eqn{1}. Numerical values of \code{theta} close to \eqn{-1} or \eqn{1} or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, \code{0.5 * log((1+theta)/(1-theta))} (same as \code{atanh(theta)}) when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{(exp(2*theta)-1)/(exp(2*theta)+1)} (same as \code{tanh(theta)}). For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to \eqn{-1} or \eqn{1}. One way of overcoming this is to use, e.g., \code{bminvalue}. The link function \code{\link{rhobitlink}} is very similar to \code{fisherzlink}, e.g., just twice the value of \code{fisherzlink}. This link function may be renamed to \code{atanhlink} in the near future. } \seealso{ \code{\link{Links}}, \code{\link{rhobitlink}}, \code{\link{logitlink}}. % \code{\link{atanhlink}}, } \examples{ theta <- seq(-0.99, 0.99, by = 0.01) y <- fisherzlink(theta) \dontrun{ plot(theta, y, type = "l", las = 1, ylab = "", main = "fisherzlink(theta)", col = "blue") abline(v = (-1):1, h = 0, lty = 2, col = "gray") } x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01)) fisherzlink(x) # Has NAs fisherzlink(x, bminvalue = -1 + .Machine$double.eps, bmaxvalue = 1 - .Machine$double.eps) # Has no NAs } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/gumbelII.Rd0000644000176200001440000001024013565414527013554 0ustar liggesusers\name{gumbelII} \alias{gumbelII} %\alias{gumbelIIff} %\alias{gumbelII.lsh} %\alias{gumbelII3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gumbel-II Regression Family Function } \description{ Maximum likelihood estimation of the 2-parameter Gumbel-II distribution. } \usage{ gumbelII(lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL, probs.y = c(0.2, 0.5, 0.8), perc.out = NULL, imethod = 1, zero = "shape", nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. % zero = "scale", nowarning = FALSE 20151128 \arguments{ \item{nowarning}{ Logical. Suppress a warning? } \item{lshape, lscale}{ Parameter link functions applied to the (positive) shape parameter (called \eqn{s} below) and (positive) scale parameter (called \eqn{b} below). See \code{\link{Links}} for more choices. } % \item{eshape, escale}{ % eshape = list(), escale = list(), % Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } Parameter link functions applied to the \item{ishape, iscale}{ Optional initial values for the shape and scale parameters. } \item{imethod}{ See \code{\link{weibullR}}. } \item{zero, probs.y}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{perc.out}{ If the fitted values are to be quantiles then set this argument to be the percentiles of these, e.g., 50 for median. } } \details{ The Gumbel-II density for a response \eqn{Y} is \deqn{f(y;b,s) = s y^{s-1} \exp[-(y/b)^s] / (b^s)}{% f(y;b,s) = s y^(s-1) * exp(-(y/b)^s) / [b^s]} for \eqn{b > 0}, \eqn{s > 0}, \eqn{y > 0}. The cumulative distribution function is \deqn{F(y;b,s) = \exp[-(y/b)^{-s}].}{% F(y;b,s) = exp(-(y/b)^(-s)).} The mean of \eqn{Y} is \eqn{b \, \Gamma(1 - 1/s)}{b * gamma(1 - 1/s)} (returned as the fitted values) when \eqn{s>1}, and the variance is \eqn{b^2\,\Gamma(1-2/s)}{b^2 * Gamma(1-2/s)} when \eqn{s>2}. This distribution looks similar to \code{\link{weibullR}}, and is due to Gumbel (1954). This \pkg{VGAM} family function currently does not handle censored data. Fisher scoring is used to estimate the two parameters. Probably similar regularity conditions hold for this distribution compared to the Weibull distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Gumbel, E. J. (1954). Statistical theory of extreme values and some practical applications. \emph{Applied Mathematics Series}, volume 33, U.S. Department of Commerce, National Bureau of Standards, USA. } \author{ T. W. Yee } \note{ See \code{\link{weibullR}}. This \pkg{VGAM} family function handles multiple responses. } %\section{Warning}{ % This function is under development to handle other censoring situations. % The version of this function which will handle censored data will be % called \code{cengumbelII()}. It is currently being written and will use % \code{\link{SurvS4}} as input. % It should be released in later versions of \pkg{VGAM}. % % % If the shape parameter is less than two then misleading inference may % result, e.g., in the \code{summary} and \code{vcov} of the object. % % %} \seealso{ \code{\link{dgumbelII}}, \code{\link{gumbel}}, \code{\link{gev}}. } \examples{ gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, heta1 = +1, heta2 = -1 + 0.1 * x2, ceta1 = 0, ceta2 = 1) gdata <- transform(gdata, shape1 = exp(heta1), shape2 = exp(heta2), scale1 = exp(ceta1), scale2 = exp(ceta2)) gdata <- transform(gdata, y1 = rgumbelII(nn, scale = scale1, shape = shape1), y2 = rgumbelII(nn, scale = scale2, shape = shape2)) fit <- vglm(cbind(y1, y2) ~ x2, gumbelII(zero = c(1, 2, 3)), data = gdata, trace = TRUE) coef(fit, matrix = TRUE) vcov(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/logistic.Rd0000644000176200001440000000742513565414527013707 0ustar liggesusers\name{logistic} \alias{logistic} \alias{logistic1} \alias{logistic} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Logistic Distribution Family Function } \description{ Estimates the location and scale parameters of the logistic distribution by maximum likelihood estimation. } \usage{ logistic1(llocation = "identitylink", scale.arg = 1, imethod = 1) logistic(llocation = "identitylink", lscale = "loglink", ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Parameter link functions applied to the location parameter \eqn{l} and scale parameter \eqn{s}. See \code{\link{Links}} for more choices, and \code{\link{CommonVGAMffArguments}} for more information. } \item{scale.arg}{ Known positive scale parameter (called \eqn{s} below). } \item{ilocation, iscale}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The two-parameter logistic distribution has a density that can be written as \deqn{f(y;l,s) = \frac{\exp[-(y-l)/s]}{ s\left( 1 + \exp[-(y-l)/s] \right)^2}}{% f(y;l,s) = exp[-(y-l)/s] / [s * ( 1 + exp[-(y-l)/s] )^2] } where \eqn{s > 0} is the scale parameter, and \eqn{l} is the location parameter. The response \eqn{-\inftyy] = C / y^{k}}{% P[Y>y] = C / y^k} for some positive \eqn{k} and \eqn{C}. This model is important in many applications due to the power law probability tail, especially for large values of \eqn{y}. The Pareto distribution, which is used a lot in economics, has a probability density function that can be written \deqn{f(y;\alpha,k) = k \alpha^k / y^{k+1}}{% f(y;alpha,k) = k * alpha^k / y^(k+1)} for \eqn{0 < \alpha < y}{0< alpha < y} and \eqn{0 1}. Its variance is \eqn{\alpha^2 k /((k-1)^2 (k-2))}{alpha^2 k /((k-1)^2 (k-2))} provided \eqn{k > 2}. The upper truncated Pareto distribution has a probability density function that can be written \deqn{f(y) = k \alpha^k / [y^{k+1} (1-(\alpha/U)^k)]}{% f(y) = k * alpha^k / [y^(k+1) (1-(\alpha/U)^k)]} for \eqn{0 < \alpha < y < U < \infty}{0< alpha < y < U < Inf} and \eqn{k>0}. Possibly, better names for \eqn{k} are the \emph{index} and \emph{tail} parameters. Here, \eqn{\alpha}{alpha} and \eqn{U} are known. The mean of \eqn{Y} is \eqn{k \alpha^k (U^{1-k}-\alpha^{1-k}) / [(1-k)(1-(\alpha/U)^k)]}{ k * lower^k * (U^(1-k)-alpha^(1-k)) / ((1-k) * (1-(alpha/U)^k))}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006) Parameter estimation for the truncated Pareto distribution, \emph{Journal of the American Statistical Association}, \bold{101}(473), 270--277. } \author{ T. W. Yee } \note{ Outside of economics, the Pareto distribution is known as the Bradford distribution. For \code{paretoff}, if the estimate of \eqn{k} is less than or equal to unity then the fitted values will be \code{NA}s. Also, \code{paretoff} fits the Pareto(I) distribution. See \code{\link{paretoIV}} for the more general Pareto(IV/III/II) distributions, but there is a slight change in notation: \eqn{s = k} and \eqn{b=\alpha}{b = alpha}. In some applications the Pareto law is truncated by a natural upper bound on the probability tail. The upper truncated Pareto distribution has three parameters (called \eqn{\alpha}{alpha}, \eqn{U} and \eqn{k} here) but the family function \code{truncpareto()} estimates only \eqn{k}. With known lower and upper limits, the ML estimator of \eqn{k} has the usual properties of MLEs. Aban (2006) discusses other inferential details. } \section{Warning }{ The usual or unbounded Pareto distribution has two parameters (called \eqn{\alpha}{alpha} and \eqn{k} here) but the family function \code{paretoff} estimates only \eqn{k} using iteratively reweighted least squares. The MLE of the \eqn{\alpha}{alpha} parameter lies on the boundary and is \code{min(y)} where \code{y} is the response. Consequently, using the default argument values, the standard errors are incorrect when one does a \code{summary} on the fitted object. If the user inputs a value for \code{alpha} then it is assumed known with this value and then \code{summary} on the fitted object should be correct. Numerical problems may occur for small \eqn{k}, e.g., \eqn{k < 1}. } \seealso{ \code{\link{Pareto}}, \code{\link{Truncpareto}}, \code{\link{paretoIV}}, \code{\link{gpd}}, \code{\link{benini1}}. } \examples{ alpha <- 2; kay <- exp(3) pdata <- data.frame(y = rpareto(n = 1000, scale = alpha, shape = kay)) fit <- vglm(y ~ 1, paretoff, data = pdata, trace = TRUE) fit@extra # The estimate of alpha is here head(fitted(fit)) with(pdata, mean(y)) coef(fit, matrix = TRUE) summary(fit) # Standard errors are incorrect!! # Here, alpha is assumed known fit2 <- vglm(y ~ 1, paretoff(scale = alpha), data = pdata, trace = TRUE) fit2@extra # alpha stored here head(fitted(fit2)) coef(fit2, matrix = TRUE) summary(fit2) # Standard errors are okay # Upper truncated Pareto distribution lower <- 2; upper <- 8; kay <- exp(2) pdata3 <- data.frame(y = rtruncpareto(n = 100, lower = lower, upper = upper, shape = kay)) fit3 <- vglm(y ~ 1, truncpareto(lower, upper), data = pdata3, trace = TRUE) coef(fit3, matrix = TRUE) c(fit3@misc$lower, fit3@misc$upper) } \keyword{models} \keyword{regression} % Package lmomco fits generalized pareto (three parameter) using % method of L-moments. VGAM/man/levy.Rd0000644000176200001440000000667413565414527013056 0ustar liggesusers\name{levy} \alias{levy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Levy Distribution Family Function } \description{ Estimates the scale parameter of the Levy distribution by maximum likelihood estimation. } \usage{ levy(location = 0, lscale = "loglink", iscale = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{location}{ Location parameter. Must have a known value. Called \eqn{a} below. % otherwise it is estimated (the default). } \item{lscale}{ Parameter link function for the (positive) scale parameter \eqn{b}. See \code{\link{Links}} for more choices. } \item{iscale}{ Initial value for the \eqn{b} parameter. By default, an initial value is chosen internally. } } \details{ The Levy distribution is one of three stable distributions whose density function has a tractable form. The formula for the density is \deqn{f(y;b) = \sqrt{\frac{b}{2\pi}} \exp \left( \frac{-b}{2(y - a)} \right) / (y - a)^{3/2} }{% f(y;b) = sqrt(b / (2 pi)) exp( -b / (2(y - a))) / (y - a)^{3/2} } where \eqn{a0}. Note that if \eqn{a} is very close to \code{min(y)} (where \code{y} is the response), then numerical problem will occur. The mean does not exist. The median is returned as the fitted values. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Nolan, J. P. (2005) \emph{Stable Distributions: Models for Heavy Tailed Data}. % p.5 } \author{ T. W. Yee } %\note{ % If \eqn{\delta}{delta} is given, then only one parameter is estimated % and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}. % If \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}. % % %} \seealso{ The Nolan article was at \code{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}. % \code{\link{dlevy}}. } \examples{ nn <- 1000; loc1 <- 0; loc2 <- 10 myscale <- 1 # log link ==> 0 is the answer ldata <- data.frame(y1 = loc1 + myscale/rnorm(nn)^2, # Levy(myscale, a) y2 = rlevy(nn, loc = loc2, scale = exp(+2))) # Cf. Table 1.1 of Nolan for Levy(1,0) with(ldata, sum(y1 > 1) / length(y1)) # Should be 0.6827 with(ldata, sum(y1 > 2) / length(y1)) # Should be 0.5205 fit1 <- vglm(y1 ~ 1, levy(location = loc1), data = ldata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) summary(fit1) head(weights(fit1, type = "work")) fit2 <- vglm(y2 ~ 1, levy(location = loc2), data = ldata, trace = TRUE) coef(fit2, matrix = TRUE) Coef(fit2) c(median = with(ldata, median(y2)), fitted.median = head(fitted(fit2), 1)) } \keyword{models} \keyword{regression} %%\eqn{\delta + \gamma \Gamma(-0.5) / (2\sqrt{\pi})}{delta + %% gamma * gamma(-0.5) / (2*sqrt(pi))} %%where \code{gamma} is a parameter but \code{gamma()} is the gamma function. %%mygamma = exp(1) # log link ==> 1 is the answer %% alternative: %%w = rgamma(n, shape=0.5) # W ~ Gamma(0.5) distribution %%mean(w) # 0.5 %%mean(1/w) %%y = delta + mygamma / (2 * w) # This is Levy(mygamma, delta) %%mean(y) %%set.seed(123) %%sum(y > 3) / length(y) # Should be 0.4363 %%sum(y > 4) / length(y) # Should be 0.3829 %%sum(y > 5) / length(y) # Should be 0.3453 %fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma), % data = ldata, trace = TRUE) # 2 parameters VGAM/man/gaitpois.mixUC.Rd0000644000176200001440000001633013565414527014730 0ustar liggesusers\name{Gaitpois.mix} \alias{Gaitpois.mix} \alias{dgaitpois.mix} \alias{pgaitpois.mix} \alias{qgaitpois.mix} \alias{rgaitpois.mix} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered, -Inflated and -Truncated Poisson Distribution (GAIT--Pois--Pois--Pois mixture) } \description{ Density, distribution function, quantile function and random generation for the generally-altered, -inflated and -truncated Poisson distribution, based on mixtures of Poisson distributions having different support. This distribution is sometimes abbreviated as GAIT--Pois--Pois--Pois. } \usage{ dgaitpois.mix(x, lambda.p, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, lambda.a = lambda.p, lambda.i = lambda.p, log.arg = FALSE) pgaitpois.mix(q, lambda.p, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, lambda.a = lambda.p, lambda.i = lambda.p) qgaitpois.mix(p, lambda.p, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, lambda.a = lambda.p, lambda.i = lambda.p) rgaitpois.mix(n, lambda.p, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, lambda.a = lambda.p, lambda.i = lambda.p) } \arguments{ \item{x, p, q, n, log.arg}{ Same meaning as in \code{\link[stats]{Poisson}}. } \item{lambda.p, lambda.a, lambda.i}{ Same meaning as in \code{\link[stats]{rpois}}, i.e., for an ordinary Poisson distribution. The first is for the main \emph{p}arent (inner) distribution, and the outer distribution(s) (usually spikes) concern the \emph{a}ltered and/or \emph{i}nflated values. Short vectors are recycled. } \item{alter, inflate, truncate}{ See \code{\link{Gaitpois.mix}}. The order of precedence is the same, viz. truncation first, then altering, and finally inflation. If \code{alter} and \code{pobs.a} are both of unit length then the default probability mass function (PMF) evaluated at \code{alter} is \code{pobs.a}. % , but both \code{alter} and \code{inflate} cannot be % specified together. % Must be sorted and have unique values only. } \item{pobs.a, pstr.i}{ Numeric, probabilities of an observed altered value or a structural inflated value. See \code{\link{Gaitpois.mlm}}, however these are ordinary vectors of the usual length rather than matrices. The probability of obtaining a value equal to an element of \code{alter} is \code{pobs.a}, while the probability of obtaining a value equal to an element of \code{inflate} \emph{structurally} is \code{pstr.i}. Actually, the probability of obtaining \code{inflate[i]} comes from a scaled Poisson distribution: \code{pstr.i * dpois(inflate[i], lambda.p) / sum(dpois(inflate, lambda.p))} so that it isn't really a structural probability unless \code{length(inflate) == 1}. These two arguments are used only if \code{alter} and \code{inflate} are assigned values, respectively. } \item{max.support}{ Same as \code{\link{Gaitpois.mlm}}. } } \details{ These functions are an alternative to \code{\link{Gaitpois.mlm}}. The latter have an outer distribution that is based on the multinomial logit model (MLM) which allows the special values to be modelled nonparametrically in an unstructured manner. These functions here use the same parent distribution to model the special values, hence the special values are modelled in a more structured way; one could say they are more parametric than the MLM variant. Jargonwise, the outer distribution concerns those special values which appear in \code{alter} or \code{inflate}, and the inner distribution concerns the remaining support points. General truncation is allowed throughout; the parent distribution has PMF that is merely scaled up to exclude those points (\code{truncate} and values beyond \code{max.support}). In particular, the GAT inner distribution has a parent that is truncated at \code{c(alter, truncated)} and any values beyond \code{max.support}. Similarly, the GIT inner distribution has a parent that is truncated at \code{truncated} and any values beyond \code{max.support}. In the notation of Yee and Ma (2019) these functions concern the GAIT-Pois-Pois-Pois distribution. For the GAIT-Pois-MLM-MLM distribution see \code{\link{Gaitpois.mlm}}. } %\section{Warning }{ % See \code{\link{rgaitpois}}. % The function can run slowly for certain combinations % of \code{pstr.i} and \code{inflate}, e.g., % \code{rgaitpois(1e5, 1, inflate = 0:9, pstr.i = (1:10)/100)}. % Failure to obtain random variates will result in some % \code{NA} values instead. % An infinite loop can occur for certain combinations % of \code{lambda} and \code{inflate}, e.g., % \code{rgaitpois(10, 1, trunc = 0:100)}. % No action is made to avoid this occurring. %} \value{ \code{dgaitpois.mix} gives the density (PMF), \code{pgaitpois.mix} gives the distribution function, \code{qgaitpois.mix} gives the quantile function, and \code{rgaitpois.mix} generates random deviates. } \references{ Yee, T. W. and Ma, C. C. (2019) Generally-altered, -inflated and -truncated count regression, with application to heaped and seeped data. \emph{In preparation}. %, \bold{3}, 15--41. } \author{ T. W. Yee. } %\note{ %} \seealso{ \code{\link{gatpoisson.mix}}, \code{\link{Gaitpois.mlm}} for the GAIT-Pois-MLM-MLM distribution. } \examples{ ivec <- c(15, 10, 5); avec <- ivec; lambda <- 10 max.support <- 20; pobs.a <- 0.35; xvec <- 0:max.support (pmf.a <- dgaitpois.mix(xvec, lambda, # lambda.a = lambda, max.support = max.support, pobs.a = pobs.a, alter = avec)) sum(pmf.a) # Should be 1 \dontrun{ ind4 <- match(xvec, avec, nomatch = 0) > 0 # xvec \%in\% avec plot(xvec[ ind4], pmf.a[ ind4], type = "h", col = "orange", lwd = 1.1, las = 1, xlim = range(xvec), main = "GAT-Poisson-Poisson", ylim = c(0, max(pmf.a)), xlab = "y", ylab = "Probability") # Spikes lines(xvec[!ind4], pmf.a[!ind4], type = "h", col = "blue") } # GIT-Poisson-Poisson mixture pstr.i <- 0.20 (pmf.i <- dgaitpois.mix(xvec, lambda, # lambda.a = lambda, max.support = max.support, pstr.i = pstr.i, inflate = ivec)) sum(pmf.i) # Should be 1 \dontrun{ # Plot the components of pmf.i spikes <- dpois(ivec, lambda) * pstr.i / sum(dpois(ivec, lambda)) start.pt <- dpois(ivec, lambda) * (1 - pstr.i) / ppois(max.support, lambda) plot(xvec, (1 - pstr.i) * dpois(xvec, lambda), type = "h", col = "blue", las = 1, xlim = range(xvec), main = "GIT-Poisson-Poisson", # The inner distribution ylim = c(0, max(pmf.i)), xlab = "y", ylab = "Probability") segments(ivec, start.pt, # The outer distribution ivec, start.pt + spikes, col = "orange", lwd = 1.1) } } \keyword{distribution} % 20191019; % \item{mixprob}{ % Numeric, mixing probabilities. % The parent distribution PMF is multiplied by % \code{1-mixprob}, and then added to the product of % \code{mixprob} and the normalized parent distribution PMF % evaluated at the special values. % Thus \code{mixprob} more directly modifies the size of the % spikes and \code{1-mixprob} concerns more the inner parent % distribution. % See the examples figure below. % } VGAM/man/dirmul.old.Rd0000644000176200001440000001223413565414527014135 0ustar liggesusers\name{dirmul.old} \alias{dirmul.old} %- Also NEED an '\alias' for EACH other topic documented here. \title{Fitting a Dirichlet-Multinomial Distribution } \description{ Fits a Dirichlet-multinomial distribution to a matrix of non-negative integers. } \usage{ dirmul.old(link = "loglink", ialpha = 0.01, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to each of the \eqn{M} (positive) shape parameters \eqn{\alpha_j}{alpha_j} for \eqn{j=1,\ldots,M}. See \code{\link{Links}} for more choices. Here, \eqn{M} is the number of columns of the response matrix. } \item{ialpha}{ Numeric vector. Initial values for the \code{alpha} vector. Must be positive. Recycled to length \eqn{M}. } \item{parallel}{ A logical, or formula specifying which terms have equal/unequal coefficients. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,\ldots,\eqn{M}\}. } } % formula is p.49 of Lange 2002. \details{ The Dirichlet-multinomial distribution, which is somewhat similar to a Dirichlet distribution, has probability function \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) = {2y_{*} \choose {y_1,\ldots,y_M}} \frac{\Gamma(\alpha_{+})}{\Gamma(2y_{*}+\alpha_{+})} \prod_{j=1}^M \frac{\Gamma(y_j+\alpha_{j})}{\Gamma(\alpha_{j})}}{% P(Y_1=y_1,\ldots,Y_M=y_M) = C_{y_1,\ldots,y_M}^{2y_{*}} Gamma(alpha_+) / Gamma( 2y_* + alpha_+) prod_{j=1}^M [ Gamma( y_j+ alpha_j) / Gamma( alpha_j)]} for \eqn{\alpha_j > 0}{alpha_j > 0}, \eqn{\alpha_+ = \alpha_1 + \cdots + \alpha_M}{alpha_+ = alpha_1 + \cdots + alpha_M}, and \eqn{2y_{*} = y_1 + \cdots + y_M}{2y_* = y_1 + \cdots + y_M}. Here, \eqn{a \choose b}{C_b^a} means ``\eqn{a} choose \eqn{b}'' and refers to combinations (see \code{\link[base]{choose}}). The (posterior) mean is \deqn{E(Y_j) = (y_j + \alpha_j) / (2y_{*} + \alpha_{+})}{% E(Y_j) = (y_j + alpha_j) / (2y_{*} + alpha_+)} for \eqn{j=1,\ldots,M}{j=1,\ldots,M}, and these are returned as the fitted values as a \eqn{M}-column matrix. % One situation that arises for the Dirichlet-multinomial distribution % is a locus with M codominant alleles. If in a sample of y_* people, % allele i appears y_j times, then the maximum likelihood estimate of % the ith allele frequency is y_j / (2y_*). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. Paul, S. R., Balasooriya, U. and Banerjee, T. (2005) Fisher information matrix of the Dirichlet-multinomial distribution. \emph{Biometrical Journal}, \bold{47}, 230--236. Tvedebrink, T. (2010) Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetics. \emph{Theoretical Population Biology}, \bold{78}, 200--210. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response should be a matrix of non-negative values. Convergence seems to slow down if there are zero values. Currently, initial values can be improved upon. This function is almost defunct and may be withdrawn soon. Use \code{\link{dirmultinomial}} instead. } \seealso{ \code{\link{dirmultinomial}}, \code{\link{dirichlet}}, \code{\link{betabinomialff}}, \code{\link{multinomial}}. } \examples{ # Data from p.50 of Lange (2002) alleleCounts <- c(2, 84, 59, 41, 53, 131, 2, 0, 0, 50, 137, 78, 54, 51, 0, 0, 0, 80, 128, 26, 55, 95, 0, 0, 0, 16, 40, 8, 68, 14, 7, 1) dim(alleleCounts) <- c(8, 4) alleleCounts <- data.frame(t(alleleCounts)) dimnames(alleleCounts) <- list(c("White","Black","Chicano","Asian"), paste("Allele", 5:12, sep = "")) set.seed(123) # @initialize uses random numbers fit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9, Allele10,Allele11,Allele12) ~ 1, dirmul.old, trace = TRUE, crit = "c", data = alleleCounts) (sfit <- summary(fit)) vcov(sfit) round(eta2theta(coef(fit), fit@misc$link, fit@misc$earg), digits = 2) # not preferred round(Coef(fit), digits = 2) # preferred round(t(fitted(fit)), digits = 4) # 2nd row of Table 3.5 of Lange (2002) coef(fit, matrix = TRUE) pfit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9, Allele10,Allele11,Allele12) ~ 1, dirmul.old(parallel = TRUE), trace = TRUE, data = alleleCounts) round(eta2theta(coef(pfit, matrix = TRUE), pfit@misc$link, pfit@misc$earg), digits = 2) # 'Right' answer round(Coef(pfit), digits = 2) # 'Wrong' answer due to parallelism constraint } \keyword{models} \keyword{regression} VGAM/man/loglinb3.Rd0000644000176200001440000000635713565414527013606 0ustar liggesusers\name{loglinb3} \alias{loglinb3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Loglinear Model for Three Binary Responses } \description{ Fits a loglinear model to three binary responses. } \usage{ loglinb3(exchangeable = FALSE, zero = c("u12", "u13", "u23")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{exchangeable}{ Logical. If \code{TRUE}, the three marginal probabilities are constrained to be equal. } \item{zero}{ Which linear/additive predictors are modelled as intercept-only? A \code{NULL} means none. See \code{\link{CommonVGAMffArguments}} for further information. } } \details{ The model is \eqn{P(Y_1=y_1,Y_2=y_2,Y_3=y_3) =}{P(Y1=y1,Y2=y2,Y3=y3) =} \deqn{\exp(u_0+u_1 y_1+u_2 y_2+u_3 y_3+u_{12} y_1 y_2+ u_{13} y_1 y_3+u_{23} y_2 y_3)}{% exp(u0 + u1*y1 + u2*y2 + u3*y3 + u12*y1*y2 + u13*y1*y3+ u23*y2*y3)} where \eqn{y_1}{y1}, \eqn{y_2}{y2} and \eqn{y_3}{y3} are 0 or 1, and the parameters are \eqn{u_1}{u1}, \eqn{u_2}{u2}, \eqn{u_3}{u3}, \eqn{u_{12}}{u12}, \eqn{u_{13}}{u13}, \eqn{u_{23}}{u23}. The normalizing parameter \eqn{u_0}{u0} can be expressed as a function of the other parameters. Note that a third-order association parameter, \eqn{u_{123}}{u123} for the product \eqn{y_1 y_2 y_3}{y1*y2*y3}, is assumed to be zero for this family function. The linear/additive predictors are \eqn{(\eta_1,\eta_2,\ldots,\eta_6)^T = (u_1,u_2,u_3,u_{12},u_{13},u_{23})^T}{(eta1,eta2,...,eta6) = (u1,u2,u3,u12,u13,u23)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. When fitted, the \code{fitted.values} slot of the object contains the eight joint probabilities, labelled as \eqn{(Y_1,Y_2,Y_3)}{(Y1,Y2,Y3)} = (0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1), respectively. } \references{ Yee, T. W. and Wild, C. J. (2001) Discussion to: ``Smoothing spline ANOVA for multivariate Bernoulli observations, with application to ophthalmology data (with discussion)'' by Gao, F., Wahba, G., Klein, R., Klein, B. \emph{Journal of the American Statistical Association}, \bold{96}, 127--160. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response must be a 3-column matrix of ones and zeros only. Note that each of the 8 combinations of the multivariate response need to appear in the data set, therefore data sets will need to be large in order for this family function to work. After estimation, the response attached to the object is also a 3-column matrix; possibly in the future it might change into a 8-column matrix. } \seealso{ \code{\link{loglinb2}}, \code{\link{hunua}}. } \examples{ lfit <- vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, data = hunua, trace = TRUE) coef(lfit, matrix = TRUE) head(fitted(lfit)) summary(lfit) } \keyword{models} \keyword{regression} VGAM/man/bifrankcop.Rd0000644000176200001440000000604213565414527014202 0ustar liggesusers\name{bifrankcop} \alias{bifrankcop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Frank's Bivariate Distribution Family Function } \description{ Estimate the association parameter of Frank's bivariate distribution by maximum likelihood estimation. } \usage{ bifrankcop(lapar = "loglink", iapar = 2, nsimEIM = 250) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar}{ Link function applied to the (positive) association parameter \eqn{\alpha}{alpha}. See \code{\link{Links}} for more choices. } \item{iapar}{ Numeric. Initial value for \eqn{\alpha}{alpha}. If a convergence failure occurs try assigning a different value. } \item{nsimEIM}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = H_{\alpha}(y_1,y_2) = \log_{\alpha} [1 + (\alpha^{y_1}-1)(\alpha^{y_2}-1)/ (\alpha-1)] }{% P(Y1 <= y1, Y2 <= y2) = H_{alpha}(y1,y2) = log_{alpha} [1 + (alpha^(y1)-1)*(alpha^(y2)-1)/ (alpha-1)] } for \eqn{\alpha \ne 1}{alpha != 1}. Note the logarithm here is to base \eqn{\alpha}{alpha}. The support of the function is the unit square. When \eqn{0 < \alpha < 1}{0 1}{alpha>1} then \eqn{h_{\alpha}(y_1,y_2) = h_{1/\alpha}(1-y_1,y_2)}{h_{1/alpha}(1-y_1,y_2)}. If \eqn{\alpha=1}{alpha=1} then \eqn{H(y_1,y_2) = y_1 y_2}{H(y1,y2)=y1*y2}, i.e., uniform on the unit square. As \eqn{\alpha}{alpha} approaches 0 then \eqn{H(y_1,y_2) = \min(y_1,y_2)}{H(y1,y2)=min(y1,y2)}. As \eqn{\alpha}{alpha} approaches infinity then \eqn{H(y_1,y_2) = \max(0, y_1+y_2-1)}{H(y1,y2)=max(0,y1+y2-1)}. The default is to use Fisher scoring implemented using \code{\link{rbifrankcop}}. For intercept-only models an alternative is to set \code{nsimEIM=NULL} so that a variant of Newton-Raphson is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } %% improve the references \references{ Genest, C. (1987) Frank's family of bivariate distributions. \emph{Biometrika}, \bold{74}, 549--555. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. Currently, the fitted value is a matrix with two columns and values equal to a half. This is because the marginal distributions correspond to a standard uniform distribution. } \seealso{ \code{\link{rbifrankcop}}, \code{\link{bifgmcop}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ ymat <- rbifrankcop(n = 2000, apar = exp(4)) plot(ymat, col = "blue") fit <- vglm(ymat ~ 1, fam = bifrankcop, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) vcov(fit) head(fitted(fit)) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/depvar.Rd0000644000176200001440000000263613565414527013352 0ustar liggesusers\name{depvar} \alias{depvar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Response Variable Extracted } \description{ A generic function that extracts the response/dependent variable from objects. } \usage{ depvar(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object that has some response/dependent variable. } \item{\dots}{ Other arguments fed into the specific methods function of the model. In particular, sometimes \code{type = c("lm", "lm2")} is available, in which case the first one is chosen if the user does not input a value. The latter value corresponds to argument \code{form2}, and sometimes a response for that is optional. } } \details{ By default this function is preferred to calling \code{fit@y}, say. } \value{ The response/dependent variable, usually as a matrix or vector. } %\references{ % %} \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ % This %} \seealso{ \code{\link[stats]{model.matrix}}, \code{\link{vglm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) fit@y # Sample proportions (not recommended) depvar(fit) # Better than using fit@y; dependent variable (response) weights(fit, type = "prior") # Number of observations } \keyword{models} \keyword{regression} VGAM/man/waldff.Rd0000644000176200001440000000347513565414527013336 0ustar liggesusers\name{waldff} \alias{waldff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Wald Distribution Family Function } \description{ Estimates the parameter of the standard Wald distribution by maximum likelihood estimation. } \usage{ waldff(llambda = "loglink", ilambda = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llambda,ilambda}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The standard Wald distribution is a special case of the inverse Gaussian distribution with \eqn{\mu=1}{mu=1}. It has a density that can be written as \deqn{f(y;\lambda) = \sqrt{\lambda/(2\pi y^3)} \; \exp\left(-\lambda (y-1)^2/(2 y)\right)}{% f(y;mu,lambda) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-1)^2/(2*y)) } where \eqn{y>0} and \eqn{\lambda>0}{lambda>0}. The mean of \eqn{Y} is \eqn{1} (returned as the fitted values) and its variance is \eqn{1/\lambda}{1/lambda}. By default, \eqn{\eta=\log(\lambda)}{eta=log(lambda)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 1, New York: Wiley. } \author{ T. W. Yee } \note{ The \pkg{VGAM} family function \code{\link{inv.gaussianff}} estimates the location parameter \eqn{\mu}{mu} too. } \seealso{ \code{\link{inv.gaussianff}}, \code{\link{rinv.gaussian}}. } \examples{ wdata <- data.frame(y = rinv.gaussian(n = 1000, mu = 1, lambda = exp(1))) wfit <- vglm(y ~ 1, waldff(ilambda = 0.2), data = wdata, trace = TRUE) coef(wfit, matrix = TRUE) Coef(wfit) summary(wfit) } \keyword{models} \keyword{regression} VGAM/man/ordpoisson.Rd0000644000176200001440000001231313565414527014261 0ustar liggesusers\name{ordpoisson} \alias{ordpoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ordinal Poisson Family Function } \description{ Fits a Poisson regression where the response is ordinal (the Poisson counts are grouped between known cutpoints). } \usage{ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL, Levels = NULL, init.mu = NULL, parallel = FALSE, zero = NULL, link = "loglink") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{cutpoints}{ Numeric. The cutpoints, \eqn{K_l}. These must be non-negative integers. \code{Inf} values may be included. See below for further details. } \item{countdata}{ Logical. Is the response (LHS of formula) in count-data format? If not then the response is a matrix or vector with values \code{1}, \code{2}, \ldots, \code{L}, say, where \code{L} is the number of levels. Such input can be generated with \code{\link[base]{cut}} with argument \code{labels = FALSE}. If \code{countdata = TRUE} then the response is expected to be in the same format as \code{depvar(fit)} where \code{fit} is a fitted model with \code{ordpoisson} as the \pkg{VGAM} family function. That is, the response is matrix of counts with \code{L} columns (if \code{NOS = 1}). } \item{NOS}{ Integer. The number of species, or more generally, the number of response random variates. This argument must be specified when \code{countdata = TRUE}. Usually \code{NOS = 1}. } \item{Levels}{ Integer vector, recycled to length \code{NOS} if necessary. The number of levels for each response random variate. This argument should agree with \code{cutpoints}. This argument must be specified when \code{countdata = TRUE}. } \item{init.mu}{ Numeric. Initial values for the means of the Poisson regressions. Recycled to length \code{NOS} if necessary. Use this argument if the default initial values fail (the default is to compute an initial value internally). } \item{parallel, zero, link}{ See \code{\link{poissonff}}. } } \details{ This \pkg{VGAM} family function uses maximum likelihood estimation (Fisher scoring) to fit a Poisson regression to each column of a matrix response. The data, however, is ordinal, and is obtained from known integer cutpoints. Here, \eqn{l=1,\ldots,L} where \eqn{L} (\eqn{L \geq 2}{L >= 2}) is the number of levels. In more detail, let \eqn{Y^*=l} if \eqn{K_{l-1} < Y \leq K_{l}}{K_{l-1} < Y <= K_{l}} where the \eqn{K_l} are the cutpoints. We have \eqn{K_0=-\infty}{K_0=-Inf} and \eqn{K_L=\infty}{K_L=Inf}. The response for this family function corresponds to \eqn{Y^*} but we are really interested in the Poisson regression of \eqn{Y}. If \code{NOS=1} then the argument \code{cutpoints} is a vector \eqn{(K_1,K_2,\ldots,K_L)} where the last value (\code{Inf}) is optional. If \code{NOS>1} then the vector should have \code{NOS-1} \code{Inf} values separating the cutpoints. For example, if there are \code{NOS=3} responses, then something like \code{ordpoisson(cut = c(0, 5, 10, Inf, 20, 30, Inf, 0, 10, 40, Inf))} is valid. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Yee, T. W. (2012) \emph{Ordinal ordination with normalizing link functions for count data}, (in preparation). } \author{ Thomas W. Yee } \note{ Sometimes there are no observations between two cutpoints. If so, the arguments \code{Levels} and \code{NOS} need to be specified too. See below for an example. } \section{Warning }{ The input requires care as little to no checking is done. If \code{fit} is the fitted object, have a look at \code{fit@extra} and \code{depvar(fit)} to check. } \seealso{ \code{\link{poissonff}}, \code{\link{polf}}, \code{\link[base:factor]{ordered}}. } \examples{ set.seed(123) # Example 1 x2 <- runif(n <- 1000); x3 <- runif(n) mymu <- exp(3 - 1 * x2 + 2 * x3) y1 <- rpois(n, lambda = mymu) cutpts <- c(-Inf, 20, 30, Inf) fcutpts <- cutpts[is.finite(cutpts)] # finite cutpoints ystar <- cut(y1, breaks = cutpts, labels = FALSE) \dontrun{ plot(x2, x3, col = ystar, pch = as.character(ystar)) } table(ystar) / sum(table(ystar)) fit <- vglm(ystar ~ x2 + x3, fam = ordpoisson(cutpoi = fcutpts)) head(depvar(fit)) # This can be input if countdata = TRUE head(fitted(fit)) head(predict(fit)) coef(fit, matrix = TRUE) fit@extra # Example 2: multivariate and there are no obsns between some cutpoints cutpts2 <- c(-Inf, 0, 9, 10, 20, 70, 200, 201, Inf) fcutpts2 <- cutpts2[is.finite(cutpts2)] # finite cutpoints y2 <- rpois(n, lambda = mymu) # Same model as y1 ystar2 <- cut(y2, breaks = cutpts2, labels = FALSE) table(ystar2) / sum(table(ystar2)) fit <- vglm(cbind(ystar,ystar2) ~ x2 + x3, fam = ordpoisson(cutpoi = c(fcutpts,Inf,fcutpts2,Inf), Levels = c(length(fcutpts)+1,length(fcutpts2)+1), parallel = TRUE), trace = TRUE) coef(fit, matrix = TRUE) fit@extra constraints(fit) summary(depvar(fit)) # Some columns have all zeros } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/plotvgam.control.Rd0000644000176200001440000000722713565414527015402 0ustar liggesusers\name{plotvgam.control} \alias{plotvgam.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Control Function for plotvgam() } \description{ Provides default values for many arguments available for \code{plotvgam()}. } \usage{ plotvgam.control(which.cf = NULL, xlim = NULL, ylim = NULL, llty = par()$lty, slty = "dashed", pcex = par()$cex, pch = par()$pch, pcol = par()$col, lcol = par()$col, rcol = par()$col, scol = par()$col, llwd = par()$lwd, slwd = par()$lwd, add.arg = FALSE, one.at.a.time = FALSE, .include.dots = TRUE, noxmean = FALSE, shade = FALSE, shcol = "gray80", ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{which.cf}{ Integer vector specifying which component functions are to be plotted (for each covariate). Must have values from the set \{1,2,\ldots,\eqn{M}\}. } \item{xlim}{ Range for the x-axis. } \item{ylim}{ Range for the y-axis. } \item{llty}{ Line type for the fitted functions (lines). Fed into \code{par(lty)}. } \item{slty}{ Line type for the standard error bands. Fed into \code{par(lty)}. } \item{pcex}{ Character expansion for the points (residuals). Fed into \code{par(cex)}. } \item{pch}{ Character used for the points (residuals). Same as \code{par(pch)}. } \item{pcol}{ Color of the points. Fed into \code{par(col)}. } \item{lcol}{ Color of the fitted functions (lines). Fed into \code{par(col)}. } \item{rcol}{ Color of the rug plot. Fed into \code{par(col)}. } \item{scol}{ Color of the standard error bands. Fed into \code{par(col)}. } \item{llwd}{ Line width of the fitted functions (lines). Fed into \code{par(lwd)}. } \item{slwd}{ Line width of the standard error bands. Fed into \code{par(lwd)}. } \item{add.arg}{ Logical. If \code{TRUE} then the plot will be added to an existing plot, otherwise a new plot will be made. } \item{one.at.a.time}{ Logical. If \code{TRUE} then the plots are done one at a time, with the user having to hit the return key between the plots. } \item{.include.dots}{ Not to be used by the user. } \item{noxmean}{ Logical. If \code{TRUE} then the point at the mean of \eqn{x}, which is added when standard errors are specified and it thinks the function is linear, is not added. One might use this argument if \code{ylab} is specified. } \item{shade, shcol}{ \code{shade} is logical; if \code{TRUE} then the pointwise SE band is shaded gray by default. The colour can be adjusted by setting \code{shcol}. These arguments are ignored unless \code{se = TRUE} and \code{overlay = FALSE}; If \code{shade = TRUE} then \code{scol} is ignored. } \item{\dots}{ Other arguments that may be fed into \code{par()}. } In the above, \eqn{M} is the number of linear/additive predictors. } \details{ The most obvious features of \code{\link{plotvgam}} can be controlled by the above arguments. } \value{ A list with values matching the arguments. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. } \author{ Thomas W. Yee } %\note{ % This function enables \code{plotvgam()} to work in a similar % manner to S-PLUS's \code{plot.gam()}. % However, there is no interactive options yet. % %} \seealso{ \code{\link{plotvgam}}. } \examples{ plotvgam.control(lcol = c("red", "blue"), scol = "darkgreen", se = TRUE) } \keyword{models} \keyword{regression} \keyword{smooth} \keyword{dplot} VGAM/man/gipoisson.mlm.Rd0000644000176200001440000001501513565414527014662 0ustar liggesusers\name{gipoisson.mlm} \alias{gipoisson.mlm} %\alias{gipoisson.mlmff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Inflated Poisson Distribution Family Function} \description{ Fits a generally-inflated Poisson distribution based on a mixture model involving a multinomial distribution and a Poisson distribution. } \usage{ gipoisson.mlm(inflate = 0, zero = NULL, llambda = "loglink", type.fitted = c("mean", "lambda", "pstr.i", "onempstr.i", "Pstr.i"), imethod = 1, mux.inflate = 0.5, ipstr0 = NULL, ilambda = NULL, ishrinkage = 0.95, probs.y = 0.35) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{inflate}{ Vector of inflated values, i.e., nonnegative integers. Must have unique values only. May be a \code{NULL}, which stands for empty set (same as \code{\link{poissonff}}). The default is the 0-inflated Poisson. See below for recommendations of its use. % Must be sorted and have unique values only. } \item{llambda}{ See \code{\link{Links}} for more choices and information. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The choice \code{"pstr.i"} is the sum of the structural probabilities and \code{"onempstr.i"} is its complement. See below for more details. } \item{mux.inflate}{ Numeric, between 0 and 1. For the initial values of the multinomial probabilities, the sample proportions are multiplied by this multiplication factor to estimate the amount of inflation. The default is to assume that half of all the inflated values are due to inflation, and the other half from the Poisson distribution. A value close to 1 is recommended for severe amounts of inflation. } \item{imethod, ipstr0, ilambda}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. Setting \code{zero = "pstr"} will model the multinomial probabilities as simple as possible (intercept-only), hence should be more numerically stable than the default, and this is recommended for many analyses especially when there are many explanatory variables. } } \details{ The generally-inflated Poisson distribution is a mixture of an ordinary Poisson distribution and the probability of certain values (\code{inflate} argument) being inflated by a multinomial distribution. The latter is modelled by a multinomial logit model. Thus each inflated value has two sources: either from the Poisson distribution or the multinomial distribution. The 0-inflated Poisson distribution is a special case and is the default. This family function can be easily misused. The data should exhibit \emph{strong} evidence of inflation in certain values before the \code{inflate} argument is assigned these values. Deflation or very little inflation will result in numerical problems. The \code{inflate} argument should not be assigned a vector having too many values. Probably \code{\link{gatpoisson.mlm}} is recommended in general instead as it handles deflation and inflation. This function implements Fisher scoring and currently does not handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean (default). The choice \code{type.fitted = "pstr.i"} returns the sum of all the inflated probabilities. The choice \code{type.fitted = "Pstr.i"} returns the individual inflated probabilities, as a matrix. % returns the mean \eqn{\mu}{mu} (default). } %\references{ %} %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \author{ T. W. Yee and Chenchen Ma} \note{ This family function does not yet have the robustness of \code{\link{multinomial}} when computing the working weight matrices. Several improvements are needed, e.g., better labelling and initial values and handling multiple responses. And \code{\link{summaryvglm}} does not work yet on these fitted models. % yettodo: see lines just above. % This family function effectively combines \code{\link{poissonff}} % and \code{\link{multinomial}} into one family function. % This family function can handle multiple responses, % e.g., more than one species. } \seealso{ \code{\link{Gaitpois.mlm}}, \code{\link{gatpoisson.mlm}}, \code{\link{zipoisson}}, \code{\link{zipoissonff}}, \code{\link{multinomial}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{rgaitpois.mlm}}, } \examples{ ivec <- c(0, 3) # Inflate these values gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, x3 = runif(nn)) gdata <- transform(gdata, lambda1 = exp(1 + 2 * x2 + 0.5 * x3), lambda2 = exp(1 - 1 * x2 + 0.5 * x3), lambda3 = exp(1)) gdata <- transform(gdata, y1 = rgaitpois.mlm(nn, lambda1, pstr.i = c(0.2, 0.3), byrow = TRUE, inflate = ivec), y2 = rgaitpois.mlm(nn, lambda2, pstr.i = c(0.2, 0.3), byrow = TRUE, inflate = ivec), y3 = rgaitpois.mlm(nn, lambda3, pstr.i = c(0.2, 0.3), byrow = TRUE, inflate = ivec)) gipoisson.mlm(inflate = ivec) with(gdata, table(y1)) with(gdata, table(y2)) with(gdata, table(y3)) fit1 <- vglm(y1 ~ x2 + x3, gipoisson.mlm(inflate = ivec), crit = "coef", trace = TRUE, data = gdata) head(fitted(fit1)) head(predict(fit1)) coef(fit1, matrix = TRUE) # summary(fit1) # Currently does not work!! # Another example ------------------------------ fit3 <- vglm(y3 ~ 1, gipoisson.mlm(inflate = ivec), gdata, trace = TRUE) coef(fit3, matrix = TRUE) } \keyword{models} \keyword{regression} %gipoisson.mlm(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %gipoisson.mlmff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/truncparetoUC.Rd0000644000176200001440000000533713565414527014670 0ustar liggesusers\name{Truncpareto} \alias{Truncpareto} \alias{dtruncpareto} \alias{ptruncpareto} \alias{qtruncpareto} \alias{rtruncpareto} \title{The Truncated Pareto Distribution} \description{ Density, distribution function, quantile function and random generation for the upper truncated Pareto(I) distribution with parameters \code{lower}, \code{upper} and \code{shape}. } \usage{ dtruncpareto(x, lower, upper, shape, log = FALSE) ptruncpareto(q, lower, upper, shape, lower.tail = TRUE, log.p = FALSE) qtruncpareto(p, lower, upper, shape) rtruncpareto(n, lower, upper, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n, log}{Same meaning as \code{\link[stats:Uniform]{runif}}. } \item{lower, upper, shape}{ the lower, upper and shape (\eqn{k}) parameters. If necessary, values are recycled. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dtruncpareto} gives the density, \code{ptruncpareto} gives the distribution function, \code{qtruncpareto} gives the quantile function, and \code{rtruncpareto} generates random deviates. } \references{ Aban, I. B., Meerschaert, M. M. and Panorska, A. K. (2006) Parameter estimation for the truncated Pareto distribution, \emph{Journal of the American Statistical Association}, \bold{101}(473), 270--277. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{truncpareto}}, the \pkg{VGAM} family function for estimating the parameter \eqn{k} by maximum likelihood estimation, for the formula of the probability density function and the range restrictions imposed on the parameters. } %%\note{ %% The truncated Pareto distribution is %%} \seealso{ \code{\link{truncpareto}}. } \examples{ lower <- 3; upper <- 8; kay <- exp(0.5) \dontrun{ xx <- seq(lower - 0.5, upper + 0.5, len = 401) plot(xx, dtruncpareto(xx, low = lower, upp = upper, shape = kay), main = "Truncated Pareto density split into 10 equal areas", type = "l", ylim = 0:1, xlab = "x") abline(h = 0, col = "blue", lty = 2) qq <- qtruncpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper, shape = kay) lines(qq, dtruncpareto(qq, low = lower, upp = upper, shape = kay), col = "purple", lty = 3, type = "h") lines(xx, ptruncpareto(xx, low = lower, upp = upper, shape = kay), col = "orange") } pp <- seq(0.1, 0.9, by = 0.1) qq <- qtruncpareto(pp, lower = lower, upper = upper, shape = kay) ptruncpareto(qq, lower = lower, upper = upper, shape = kay) qtruncpareto(ptruncpareto(qq, lower = lower, upper = upper, shape = kay), lower = lower, upper = upper, shape = kay) - qq # Should be all 0 } \keyword{distribution} VGAM/man/calibrate.qrrvglm.control.Rd0000644000176200001440000000656613565414527017175 0ustar liggesusers\name{calibrate.qrrvglm.control} \alias{calibrate.qrrvglm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Control Function for CQO/CAO Calibration } \description{ Algorithmic constants and parameters for running \code{\link{calibrate.qrrvglm}} are set using this function. } \usage{ calibrate.qrrvglm.control(object, trace = FALSE, method.optim = "BFGS", gridSize = ifelse(Rank == 1, 21, 9), varI.latvar = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ The fitted CQO/CAO model. The user should ignore this argument. % The fitted CQO/UQO/CAO model. The user should ignore this argument. } \item{trace}{ Logical indicating if output should be produced for each iteration. It is a good idea to set this argument to be \code{TRUE} since the computations are expensive. } \item{method.optim}{ Character. Fed into the \code{method} argument of \code{\link[stats]{optim}}. } \item{gridSize}{ Numeric, recycled to length \code{Rank}. Controls the resolution of the grid used for initial values. For each latent variable, an equally spaced grid of length \code{gridSize} is cast from the smallest site score to the largest site score. Then the likelihood function is evaluated on the grid, and the best fit is chosen as the initial value. Thus increasing the value of \code{gridSize} increases the chance of obtaining the global solution, however, the computing time increases proportionately. } \item{varI.latvar}{ Logical. For CQO objects only, this argument is fed into \code{\link{Coef.qrrvglm}}. } \item{\dots}{ Avoids an error message for extraneous arguments. } } \details{ Most CQO/CAO users will only need to make use of \code{trace} and \code{gridSize}. These arguments should be used inside their call to \code{\link{calibrate.qrrvglm}}, not this function directly. } \value{ A list which with the following components. \item{trace}{Numeric (even though the input can be logical). } \item{gridSize}{Positive integer. } \item{varI.latvar}{Logical.} } \references{ Yee, T. W. (2018) On constrained and unconstrained quadratic ordination. \emph{Manuscript in preparation}. } % \author{T. W. Yee} \note{ Despite the name of this function, CAO models are handled as well. % Despite the name of this function, UQO and CAO models are handled } \seealso{ \code{\link{calibrate.qrrvglm}}, \code{\link{Coef.qrrvglm}}. } \examples{ \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Needed for I.tol=TRUE set.seed(123) p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, I.tol = TRUE) sort(deviance(p1, history = TRUE)) # A history of all the iterations siteNos <- 3:4 # Calibrate these sites cp1 <- calibrate(p1, trace = TRUE, new = data.frame(depvar(p1)[siteNos, ])) } \dontrun{ # Graphically compare the actual site scores with their calibrated values persp(p1, main = "Site scores: solid=actual, dashed=calibrated", label = TRUE, col = "blue", las = 1) abline(v = latvar(p1)[siteNos], col = seq(siteNos)) # Actual site scores abline(v = cp1, lty = 2, col = seq(siteNos)) # Calibrated values } } \keyword{models} \keyword{regression} VGAM/man/QvarUC.Rd0000644000176200001440000002125013565414527013223 0ustar liggesusers\name{Qvar} \alias{Qvar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Quasi-variances Preprocessing Function %% ~~function to do ... ~~ } \description{ Takes a \code{\link{vglm}} fit or a variance-covariance matrix, and preprocesses it for \code{\link{rcim}} and \code{\link{uninormal}} so that quasi-variances can be computed. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ Qvar(object, factorname = NULL, which.linpred = 1, coef.indices = NULL, labels = NULL, dispersion = NULL, reference.name = "(reference)", estimates = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{"\link[=vglmff-class]{vglm}"} object or a variance-covariance matrix, e.g., \code{vcov(vglm.object)}. The former is preferred since it contains all the information needed. If a matrix then \code{factorname} and/or \code{coef.indices} should be specified to identify the factor. } \item{which.linpred}{ A single integer from the set \code{1:M}. Specifies which linear predictor to use. Let the value of \code{which.linpred} be called \eqn{j}. Then the factor should appear in that linear predictor, hence the \eqn{j}th row of the constraint matrix corresponding to the factor should have at least one nonzero value. Currently the \eqn{j}th row must have exactly one nonzero value because programming it for more than one nonzero value is difficult. } \item{factorname}{ Character. If the \code{\link{vglm}} object contains more than one factor as explanatory variable then this argument should be the name of the factor of interest. If \code{object} is a variance-covariance matrix then this argument should also be specified. } \item{labels}{ Character. Optional, for labelling the variance-covariance matrix. } \item{dispersion}{ Numeric. Optional, passed into \code{vcov()} with the same argument name. } \item{reference.name}{ Character. Label for for the reference level. } \item{coef.indices}{ Optional numeric vector of length at least 3 specifying the indices of the factor from the variance-covariance matrix. } \item{estimates}{ an optional vector of estimated coefficients (redundant if \code{object} is a model). } } \details{ Suppose a factor with \eqn{L} levels is an explanatory variable in a regression model. By default, R treats the first level as baseline so that its coefficient is set to zero. It estimates the other \eqn{L-1} coefficients, and with its associated standard errors, this is the conventional output. From the complete variance-covariance matrix one can compute \eqn{L} quasi-variances based on all pairwise difference of the coefficients. They are based on an approximation, and can be treated as uncorrelated. In minimizing the relative (not absolute) errors it is not hard to see that the estimation involves a RCIM (\code{\link{rcim}}) with an exponential link function (\code{\link{explink}}). If \code{object} is a model, then at least one of \code{factorname} or \code{coef.indices} must be non-\code{NULL}. The value of \code{coef.indices}, if non-\code{NULL}, determines which rows and columns of the model's variance-covariance matrix to use. If \code{coef.indices} contains a zero, an extra row and column are included at the indicated position, to represent the zero variances and covariances associated with a reference level. If \code{coef.indices} is \code{NULL}, then \code{factorname} should be the name of a factor effect in the model, and is used in order to extract the necessary variance-covariance estimates. Quasi-variances were first implemented in R with \pkg{qvcalc}. This implementation draws heavily from that. } \value{ A \eqn{L} by \eqn{L} matrix whose \eqn{i}-\eqn{j} element is the logarithm of the variance of the \eqn{i}th coefficient minus the \eqn{j}th coefficient, for all values of \eqn{i} and \eqn{j}. The diagonal elements are abitrary and are set to zero. The matrix has an attribute that corresponds to the prior weight matrix; it is accessed by \code{\link{uninormal}} and replaces the usual \code{weights} argument. of \code{\link{vglm}}. This weight matrix has ones on the off-diagonals and some small positive number on the diagonals. } \references{ Firth, D. (2003) Overcoming the reference category problem in the presentation of statistical models. \emph{Sociological Methodology} \bold{33}, 1--18. Firth, D. and de Menezes, R. X. (2004) Quasi-variances. \emph{Biometrika} \bold{91}, 65--80. Yee, T. W. and Hadi, A. F. (2014) Row-column interaction models, with an R implementation. \emph{Computational Statistics}, \bold{29}, 1427--1445. } \author{ T. W. Yee, based heavily on \code{qvcalc()} in \pkg{qvcalc} written by David Firth. } \note{ This is an adaptation of \code{qvcalc()} in \pkg{qvcalc}. It should work for all \code{\link{vglm}} models with one linear predictor, i.e., \eqn{M = 1}. For \eqn{M > 1} the factor should appear only in one of the linear predictors. It is important to set \code{maxit} to be larger than usual for \code{\link{rcim}} since convergence is slow. Upon successful convergence the \eqn{i}th row effect and the \eqn{i}th column effect should be equal. A simple computation involving the fitted and predicted values allows the quasi-variances to be extracted (see example below). A function to plot \emph{comparison intervals} has not been written here. } \section{Warning }{ Negative quasi-variances may occur (one of them and only one), though they are rare in practice. If so then numerical problems may occur. See \code{qvcalc()} for more information. } \seealso{ \code{\link{rcim}}, \code{\link{vglm}}, \code{\link{qvar}}, \code{\link{uninormal}}, \code{\link{explink}}, \code{qvcalc()} in \pkg{qvcalc}, \code{\link[MASS]{ships}}. %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ # Example 1 data("ships", package = "MASS") Shipmodel <- vglm(incidents ~ type + year + period, poissonff, offset = log(service), # trace = TRUE, model = TRUE, data = ships, subset = (service > 0)) # Easiest form of input fit1 <- rcim(Qvar(Shipmodel, "type"), uninormal("explink"), maxit = 99) qvar(fit1) # Easy method to get the quasi-variances qvar(fit1, se = TRUE) # Easy method to get the quasi-standard errors (quasiVar <- exp(diag(fitted(fit1))) / 2) # Version 1 (quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2) # Version 2 (quasiSE <- sqrt(quasiVar)) # Another form of input fit2 <- rcim(Qvar(Shipmodel, coef.ind = c(0, 2:5), reference.name = "typeA"), uninormal("explink"), maxit = 99) \dontrun{ qvplot(fit2, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) } # The variance-covariance matrix is another form of input (not recommended) fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])), labels = c("typeA", "typeB", "typeC", "typeD", "typeE"), estimates = c(typeA = 0, coef(Shipmodel)[2:5])), uninormal("explink"), maxit = 99) (QuasiVar <- exp(diag(fitted(fit3))) / 2) # Version 1 (QuasiVar <- diag(predict(fit3)[, c(TRUE, FALSE)]) / 2) # Version 2 (QuasiSE <- sqrt(quasiVar)) \dontrun{ qvplot(fit3) } # Example 2: a model with M > 1 linear predictors \dontrun{ require("VGAMdata") xs.nz.f <- subset(xs.nz, sex == "F") xs.nz.f <- subset(xs.nz.f, !is.na(babies) & !is.na(age) & !is.na(ethnicity)) xs.nz.f <- subset(xs.nz.f, ethnicity != "Other") clist <- list("sm.bs(age, df = 4)" = rbind(1, 0), "sm.bs(age, df = 3)" = rbind(0, 1), "ethnicity" = diag(2), "(Intercept)" = diag(2)) fit1 <- vglm(babies ~ sm.bs(age, df = 4) + sm.bs(age, df = 3) + ethnicity, zipoissonff(zero = NULL), xs.nz.f, constraints = clist, trace = TRUE) Fit1 <- rcim(Qvar(fit1, "ethnicity", which.linpred = 1), uninormal("explink", imethod = 1), maxit = 99, trace = TRUE) Fit2 <- rcim(Qvar(fit1, "ethnicity", which.linpred = 2), uninormal("explink", imethod = 1), maxit = 99, trace = TRUE) } \dontrun{ par(mfrow = c(1, 2)) qvplot(Fit1, scol = "blue", pch = 16, main = expression(eta[1]), slwd = 1.5, las = 1, length.arrows = 0.07) qvplot(Fit2, scol = "blue", pch = 16, main = expression(eta[2]), slwd = 1.5, las = 1, length.arrows = 0.07) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % \code{\link[qvcalc:qvcalc]{qvcalc}} in \pkg{qvcalc} % quasipoissonff, offset = log(service), VGAM/man/kumarUC.Rd0000644000176200001440000000424513565414527013436 0ustar liggesusers\name{Kumar} \alias{Kumar} \alias{dkumar} \alias{pkumar} \alias{qkumar} \alias{rkumar} \title{The Kumaraswamy Distribution} \description{ Density, distribution function, quantile function and random generation for the Kumaraswamy distribution. } \usage{ dkumar(x, shape1, shape2, log = FALSE) pkumar(q, shape1, shape2, lower.tail = TRUE, log.p = FALSE) qkumar(p, shape1, shape2, lower.tail = TRUE, log.p = FALSE) rkumar(n, shape1, shape2) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{shape1, shape2}{ positive shape parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dkumar} gives the density, \code{pkumar} gives the distribution function, \code{qkumar} gives the quantile function, and \code{rkumar} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{kumar}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } %\note{ %} \seealso{ \code{\link{kumar}}. } \examples{ \dontrun{ shape1 <- 2; shape2 <- 2; nn <- 201; # shape1 <- shape2 <- 0.5; x <- seq(-0.05, 1.05, len = nn) plot(x, dkumar(x, shape1, shape2), type = "l", las = 1, ylim = c(0,1.5), ylab = paste("fkumar(shape1 = ", shape1, ", shape2 = ", shape2, ")"), col = "blue", cex.main = 0.8, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pkumar(x, shape1, shape2), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qkumar(probs, shape1, shape2) lines(Q, dkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h") lines(Q, pkumar(Q, shape1, shape2), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pkumar(Q, shape1, shape2) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/biclaytoncop.Rd0000644000176200001440000000765113565414527014561 0ustar liggesusers\name{biclaytoncop} \alias{biclaytoncop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Clayton Copula (Bivariate) Family Function } \description{ Estimate the correlation parameter of the (bivariate) Clayton copula distribution by maximum likelihood estimation. } \usage{ biclaytoncop(lapar = "loglink", iapar = NULL, imethod = 1, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar, iapar, imethod}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more link function choices. } \item{parallel, zero}{ Details at \code{\link{CommonVGAMffArguments}}. If \code{parallel = TRUE} then the constraint is also applied to the intercept. } } \details{ The cumulative distribution function is \deqn{P(u_1, u_2;\alpha) = (u_1^{-\alpha} + u_2^{-\alpha}-1)^{-1/\alpha}}{% P(u1,u2,alpha) = (u1^(-alpha) + u2^(-alpha)-1)^(-1/alpha)} for \eqn{0 \leq \alpha }{0 <= alpha}. Here, \eqn{\alpha}{alpha} is the association parameter. The support of the function is the interior of the unit square; however, values of 0 and/or 1 are not allowed (currently). The marginal distributions are the standard uniform distributions. When \eqn{\alpha = 0}{alpha=0} the random variables are independent. This \pkg{VGAM} family function can handle multiple responses, for example, a six-column matrix where the first 2 columns is the first out of three responses, the next 2 columns being the next response, etc. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ %A Model for Association in Bivariate Survival Data. Clayton, D. (1982) A model for association in bivariate survival data. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{44}, 414--422. Stober, J. and Schepsmeier, U. (2013) Derivatives and Fisher information of bivariate copulas. \emph{Statistical Papers}. } \author{ R. Feyter and T. W. Yee } \note{ The response matrix must have a multiple of two-columns. Currently, the fitted value is a matrix with the same number of columns and values equal to 0.5. This is because each marginal distribution corresponds to a standard uniform distribution. This \pkg{VGAM} family function is fragile; each response must be in the interior of the unit square. % Setting \code{crit = "coef"} is sometimes a good idea because % inaccuracies in \code{\link{pbinorm}} might mean % unnecessary half-stepping will occur near the solution. } \seealso{ \code{\link{rbiclaytoncop}}, \code{\link{dbiclaytoncop}}, \code{\link{kendall.tau}}. } \examples{ ymat <- rbiclaytoncop(n = (nn <- 1000), apar = exp(2)) bdata <- data.frame(y1 = ymat[, 1], y2 = ymat[, 2], y3 = ymat[, 1], y4 = ymat[, 2], x2 = runif(nn)) summary(bdata) \dontrun{ plot(ymat, col = "blue") } fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1, # 2 responses, e.g., (y1,y2) is the first biclaytoncop, data = bdata, trace = TRUE, crit = "coef") # Sometimes a good idea coef(fit1, matrix = TRUE) Coef(fit1) head(fitted(fit1)) summary(fit1) # Another example; apar is a function of x2 bdata <- transform(bdata, apar = exp(-0.5 + x2)) ymat <- rbiclaytoncop(n = nn, apar = with(bdata, apar)) bdata <- transform(bdata, y5 = ymat[, 1], y6 = ymat[, 2]) fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata, biclaytoncop(lapar = "loglink"), trace = TRUE) \dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) } } \keyword{models} \keyword{regression} % for real \eqn{\alpha}{alpha} in (-1,1). VGAM/man/studentt.Rd0000644000176200001440000001227513565414527013743 0ustar liggesusers\name{studentt} \alias{studentt} \alias{studentt2} \alias{studentt3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Student t Distribution } \description{ Estimating the parameters of a Student t distribution. } \usage{ studentt (ldf = "logloglink", idf = NULL, tol1 = 0.1, imethod = 1) studentt2(df = Inf, llocation = "identitylink", lscale = "loglink", ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale") studentt3(llocation = "identitylink", lscale = "loglink", ldf = "logloglink", ilocation = NULL, iscale = NULL, idf = NULL, imethod = 1, zero = c("scale", "df")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale, ldf}{ Parameter link functions for each parameter, e.g., for degrees of freedom \eqn{\nu}{nu}. See \code{\link{Links}} for more choices. The defaults ensures the parameters are in range. A \code{\link{loglog}} link keeps the degrees of freedom greater than unity; see below. } \item{ilocation, iscale, idf}{ Optional initial values. If given, the values must be in range. The default is to compute an initial value internally. } \item{tol1}{ A positive value, the tolerance for testing whether an initial value is 1. Best to leave this argument alone. } \item{df}{ Numeric, user-specified degrees of freedom. It may be of length equal to the number of columns of a response matrix. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The Student t density function is \deqn{f(y;\nu) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\nu \pi} \Gamma(\nu/2)} \left(1 + \frac{y^2}{\nu} \right)^{-(\nu+1)/2}}{% f(y;nu) = (gamma((nu+1)/2) / (sqrt(nu*pi) gamma(nu/2))) * (1 + y^2 / nu)^{-(nu+1)/2}} for all real \eqn{y}. Then \eqn{E(Y)=0} if \eqn{\nu>1}{nu>1} (returned as the fitted values), and \eqn{Var(Y)= \nu/(\nu-2)}{Var(Y)= nu/(nu-2)} for \eqn{\nu > 2}{nu > 2}. When \eqn{\nu=1}{nu=1} then the Student \eqn{t}-distribution corresponds to the standard Cauchy distribution, \code{\link{cauchy1}}. When \eqn{\nu=2}{nu=2} with a scale parameter of \code{sqrt(2)} then the Student \eqn{t}-distribution corresponds to the standard (Koenker) distribution, \code{\link{sc.studentt2}}. The degrees of freedom can be treated as a parameter to be estimated, and as a real and not an integer. The Student t distribution is used for a variety of reasons in statistics, including robust regression. Let \eqn{Y = (T - \mu) / \sigma}{Y = (T - mu) / sigma} where \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are the location and scale parameters respectively. Then \code{studentt3} estimates the location, scale and degrees of freedom parameters. And \code{studentt2} estimates the location, scale parameters for a user-specified degrees of freedom, \code{df}. And \code{studentt} estimates the degrees of freedom parameter only. The fitted values are the location parameters. By default the linear/additive predictors are \eqn{(\mu, \log(\sigma), \log\log(\nu))^T}{(mu, log(sigma), log log(nu))^T} or subsets thereof. In general convergence can be slow, especially when there are covariates. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Student (1908) The probable error of a mean. \emph{Biometrika}, \bold{6}, 1--25. Zhu, D. and Galbraith, J. W. (2010) A generalized asymmetric Student-\emph{t} distribution with application to financial econometrics. \emph{Journal of Econometrics}, \bold{157}, 297--305. } \author{ T. W. Yee } \note{ \code{studentt3()} and \code{studentt2()} can handle multiple responses. Practical experience has shown reasonably good initial values are required. If convergence failure occurs try using arguments such as \code{idf}. Local solutions are also possible, especially when the degrees of freedom is close to unity or the scale parameter is close to zero. A standard normal distribution corresponds to a \emph{t} distribution with infinite degrees of freedom. Consequently, if the data is close to normal, there may be convergence problems; best to use \code{\link{uninormal}} instead. } \seealso{ \code{\link{uninormal}}, \code{\link{cauchy1}}, \code{\link{logistic}}, \code{\link{huber2}}, \code{\link{sc.studentt2}}, \code{\link[stats]{TDist}}, \code{\link{simulate.vlm}}. } \examples{ tdata <- data.frame(x2 = runif(nn <- 1000)) tdata <- transform(tdata, y1 = rt(nn, df = exp(exp(0.5 - x2))), y2 = rt(nn, df = exp(exp(0.5 - x2)))) fit1 <- vglm(y1 ~ x2, studentt, data = tdata, trace = TRUE) coef(fit1, matrix = TRUE) fit2 <- vglm(y1 ~ x2, studentt2(df = exp(exp(0.5))), data = tdata) coef(fit2, matrix = TRUE) # df inputted into studentt2() not quite right fit3 <- vglm(cbind(y1, y2) ~ x2, studentt3, data = tdata, trace = TRUE) coef(fit3, matrix = TRUE) } \keyword{models} \keyword{regression} %Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) %\emph{Statistical Distributions}, %Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. VGAM/man/tikuvUC.Rd0000644000176200001440000000546513565414527013466 0ustar liggesusers\name{Tikuv} \alias{Tikuv} \alias{dtikuv} \alias{ptikuv} \alias{qtikuv} \alias{rtikuv} \title{A Short-tailed Symmetric Distribution } \description{ Density, cumulative distribution function, quantile function and random generation for the short-tailed symmetric distribution of Tiku and Vaughan (1999). } \usage{ dtikuv(x, d, mean = 0, sigma = 1, log = FALSE) ptikuv(q, d, mean = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) qtikuv(p, d, mean = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE, ...) rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{d, mean, sigma }{ arguments for the parameters of the distribution. See \code{\link{tikuv}} for more details. For \code{rtikuv}, arguments \code{mean} and \code{sigma} must be of length 1. } \item{Smallno}{ Numeric, a small value used by the rejection method for determining the lower and upper limits of the distribution. That is, \code{ptikuv(L) < Smallno} and \code{ptikuv(U) > 1-Smallno} where \code{L} and \code{U} are the lower and upper limits respectively. } \item{\ldots}{ Arguments that can be passed into \code{\link[stats]{uniroot}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dtikuv} gives the density, \code{ptikuv} gives the cumulative distribution function, \code{qtikuv} gives the quantile function, and \code{rtikuv} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{tikuv}} for more details. } %\note{ % %} \seealso{ \code{\link{tikuv}}. } \examples{ \dontrun{ par(mfrow = c(2, 1)) x <- seq(-5, 5, len = 401) plot(x, dnorm(x), type = "l", col = "black", ylab = "", las = 1, main = "Black is standard normal, others are dtikuv(x, d)") lines(x, dtikuv(x, d = -10), col = "orange") lines(x, dtikuv(x, d = -1 ), col = "blue") lines(x, dtikuv(x, d = 1 ), col = "green") legend("topleft", col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("d =", c(-10, -1, 1))) plot(x, pnorm(x), type = "l", col = "black", ylab = "", las = 1, main = "Black is standard normal, others are ptikuv(x, d)") lines(x, ptikuv(x, d = -10), col = "orange") lines(x, ptikuv(x, d = -1 ), col = "blue") lines(x, ptikuv(x, d = 1 ), col = "green") legend("topleft", col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("d =", c(-10, -1, 1))) } probs <- seq(0.1, 0.9, by = 0.1) ptikuv(qtikuv(p = probs, d = 1), d = 1) - probs # Should be all 0 } \keyword{distribution} VGAM/man/betaII.Rd0000644000176200001440000000617513565414527013230 0ustar liggesusers\name{betaII} \alias{betaII} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Beta Distribution of the Second Kind } \description{ Maximum likelihood estimation of the 3-parameter beta II distribution. } \usage{ betaII(lscale = "loglink", lshape2.p = "loglink", lshape3.q = "loglink", iscale = NULL, ishape2.p = NULL, ishape3.q = NULL, imethod = 1, gscale = exp(-5:5), gshape2.p = exp(-5:5), gshape3.q = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. % probs.y = c(0.25, 0.5, 0.75), zero = -(2:3) \arguments{ \item{lscale, lshape2.p, lshape3.q}{ Parameter link functions applied to the (positive) parameters \code{scale}, \code{p} and \code{q}. See \code{\link{Links}} for more choices. } \item{iscale, ishape2.p, ishape3.q, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{gscale, gshape2.p, gshape3.q}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 3-parameter beta II is the 4-parameter \emph{generalized} beta II distribution with shape parameter \eqn{a=1}. It is also known as the Pearson VI distribution. Other distributions which are special cases of the 3-parameter beta II include the Lomax (\eqn{p=1}) and inverse Lomax (\eqn{q=1}). More details can be found in Kleiber and Kotz (2003). The beta II distribution has density \deqn{f(y) = y^{p-1} / [b^p B(p,q) \{1 + y/b\}^{p+q}]}{% f(y) = y^(p-1) / [b^p B(p,q) (1 + y/b)^(p+q)]} for \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and the others are shape parameters. The mean is \deqn{E(Y) = b \, \Gamma(p + 1) \, \Gamma(q - 1) / (\Gamma(p) \, \Gamma(q))}{% E(Y) = b gamma(p + 1) gamma(q - 1) / ( gamma(p) gamma(q))} provided \eqn{q > 1}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{betaff}}, \code{\link{genbetaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}. } \examples{ bdata <- data.frame(y = rsinmad(2000, shape1.a = 1, shape3.q = exp(2), scale = exp(1))) # Not genuine data! fit <- vglm(y ~ 1, betaII, data = bdata, trace = TRUE) fit <- vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7), data = bdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/vonmises.Rd0000644000176200001440000001045013565414527013725 0ustar liggesusers\name{vonmises} \alias{vonmises} %- Also NEED an '\alias' for EACH other topic documented here. \title{ von Mises Distribution Family Function } \description{ Estimates the location and scale parameters of the von Mises distribution by maximum likelihood estimation. } \usage{ vonmises(llocation = extlogitlink(min = 0, max = 2*pi), lscale = "loglink", ilocation = NULL, iscale = NULL, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Parameter link functions applied to the location \eqn{a} parameter and scale parameter \eqn{k}, respectively. See \code{\link{Links}} for more choices. For \eqn{k}, a log link is the default because the parameter is positive. } \item{ilocation}{ Initial value for the location \eqn{a} parameter. By default, an initial value is chosen internally using \code{imethod}. Assigning a value will override the argument \code{imethod}. } \item{iscale}{ Initial value for the scale \eqn{k} parameter. By default, an initial value is chosen internally using \code{imethod}. Assigning a value will override the argument \code{imethod}. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method. If failure to converge occurs try the other value, or else specify a value for \code{ilocation} and \code{iscale}. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The default is none of them. If used, one can choose one value from the set \{1,2\}. See \code{\link{CommonVGAMffArguments}} for more information. } % \item{hstep}{ Positive numeric. % The \eqn{h} used for the finite difference % approximation, e.g., in \eqn{(f(x+h)-f(x))/h} for the first % derivative estimate of the modified Bessel function values. % If too small, some half stepsizing may occur; % if too large, numerical problems might occur. % } } \details{ The (two-parameter) von Mises is the most commonly used distribution in practice for circular data. It has a density that can be written as \deqn{f(y;a,k) = \frac{\exp[k\cos(y-a)]}{ 2\pi I_0(k)}}{% f(y;a,k) = exp[k*cos(y-a)] / (2*pi*I0(k))} where \eqn{0 \leq y < 2\pi}{0 <= y < 2*pi}, \eqn{k>0} is the scale parameter, \eqn{a} is the location parameter, and \eqn{I_0(k)}{I0(k)} is the modified Bessel function of order 0 evaluated at \eqn{k}. The mean of \eqn{Y} (which is the fitted value) is \eqn{a} and the circular variance is \eqn{1 - I_1(k) / I_0(k)}{1 - I1(k) / I0(k)} where \eqn{I_1(k)}{I1(k)} is the modified Bessel function of order 1. By default, \eqn{\eta_1=\log(a/(2\pi-a))}{eta1=log(a/(2*pi-a))} and \eqn{\eta_2=\log(k)}{eta2=log(k)} for this family function. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ The response and the fitted values are scaled so that \eqn{0\leq y< 2\pi}{0<=y<2*pi}. The linear/additive predictors are left alone. Fisher scoring is used. } \section{Warning }{ Numerically, the von Mises can be difficult to fit because of a log-likelihood having multiple maximums. The user is therefore encouraged to try different starting values, i.e., make use of \code{ilocation} and \code{iscale}. } \seealso{ \code{\link[base]{Bessel}}, \code{\link{cardioid}}. \pkg{CircStats} and \pkg{circular} currently have a lot more R functions for circular data than the \pkg{VGAM} package. } \examples{ vdata <- data.frame(x2 = runif(nn <- 1000)) vdata <- transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!! fit <- vglm(y ~ x2, vonmises(zero = 2), data = vdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) with(vdata, range(y)) # Original data range(depvar(fit)) # Processed data is in [0,2*pi) } \keyword{models} \keyword{regression} %later an extended logit link \eqn{\eta_1=\log(a/(2\pi-a))}{eta1=log(a/(2*pi-a))} %might be provided for \eqn{\eta_1}{eta1}. %\eqn{\eta_1=a}{eta1=a} and VGAM/man/bigumbelIexp.Rd0000644000176200001440000000560613565414527014505 0ustar liggesusers\name{bigumbelIexp} \alias{bigumbelIexp} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gumbel's Type I Bivariate Distribution Family Function } \description{ Estimate the association parameter of Gumbel's Type I bivariate distribution by maximum likelihood estimation. } \usage{ bigumbelIexp(lapar = "identitylink", iapar = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar}{ Link function applied to the association parameter \eqn{\alpha}{alpha}. See \code{\link{Links}} for more choices. } \item{iapar}{ Numeric. Optional initial value for \eqn{\alpha}{alpha}. By default, an initial value is chosen internally. If a convergence failure occurs try assigning a different value. Assigning a value will override the argument \code{imethod}. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method. If failure to converge occurs try the other value, or else specify a value for \code{ia}. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = e^{-y_1-y_2+\alpha y_1 y_2} + 1 - e^{-y_1} - e^{-y_2} }{% P(Y1 <= y1, Y2 <= y2) = exp(-y1-y2+alpha*y1*y2) + 1 - exp(-y1) - exp(-y2) } for real \eqn{\alpha}{alpha}. The support of the function is for \eqn{y_1>0}{y1>0} and \eqn{y_2>0}{y2>0}. The marginal distributions are an exponential distribution with unit mean. A variant of Newton-Raphson is used, which only seems to work for an intercept model. It is a very good idea to set \code{trace=TRUE}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ %Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005) %\emph{Extreme Value and Related Models with Applications in Engineering and Science}, %Hoboken, NJ, USA: Wiley-Interscience. Gumbel, E. J. (1960) Bivariate Exponential Distributions. \emph{Journal of the American Statistical Association}, \bold{55}, 698--707. % Journal of the American Statistical Association. % Vol. 55, No. 292, Dec., 1960 > Bivariate Exponentia. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. Currently, the fitted value is a matrix with two columns and values equal to 1. This is because each marginal distribution corresponds to a exponential distribution with unit mean. This \pkg{VGAM} family function should be used with caution. } \seealso{ \code{\link{bifgmexp}}. } \examples{ nn <- 1000 gdata <- data.frame(y1 = rexp(nn), y2 = rexp(nn)) \dontrun{ with(gdata, plot(cbind(y1, y2))) } fit <- vglm(cbind(y1, y2) ~ 1, bigumbelIexp, data = gdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(fitted(fit)) } \keyword{models} \keyword{regression} VGAM/man/setup.smart.Rd0000644000176200001440000000441313565414527014351 0ustar liggesusers\name{setup.smart} \alias{setup.smart} \title{ Smart Prediction Setup } \description{ Sets up smart prediction in one of two modes: \code{"write"} and \code{"read"}. } \usage{ setup.smart(mode.arg, smart.prediction = NULL, max.smart = 30) } \arguments{ \item{mode.arg}{ \code{mode.arg} must be \code{"write"} or \code{"read"}. If in \code{"read"} mode then \code{smart.prediction} must be assigned the data structure \code{.smart.prediction} that was created while fitting. This is stored in \code{object@smart.prediction} or \code{object$smart.prediction} where \code{object} is the name of the fitted object. } \item{smart.prediction}{ If in \code{"read"} mode then \code{smart.prediction} must be assigned the list of data dependent parameters, which is stored on the fitted object. Otherwise, \code{smart.prediction} is ignored. } \item{max.smart}{ \code{max.smart} is the initial length of the list \code{.smart.prediction}. It is not important because \code{.smart.prediction} is made larger if needed. }} \value{ Nothing is returned. } \section{Side Effects}{ In \code{"write"} mode \code{.smart.prediction} in \code{smartpredenv} is assigned an empty list with \code{max.smart} components. In \code{"read"} mode \code{.smart.prediction} in \code{smartpredenv} is assigned \code{smart.prediction}. Then \code{.smart.prediction.counter} in \code{smartpredenv} is assigned the value 0, and \code{.smart.prediction.mode} and \code{.max.smart} are written to \code{smartpredenv} too. } \details{ This function is only required by programmers writing a modelling function such as \code{\link[stats]{lm}} and \code{\link[stats]{glm}}, or a prediction functions of such, e.g., \code{\link[stats]{predict.lm}}. The function \code{setup.smart} operates by mimicking the operations of a first-in first-out stack (better known as a \emph{queue}). } \seealso{ \code{\link[stats]{lm}}, \code{\link[stats]{predict.lm}}. } \examples{ \dontrun{ setup.smart("write") # Put at the beginning of lm } \dontrun{# Put at the beginning of predict.lm setup.smart("read", smart.prediction = object$smart.prediction) } } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/benfUC.Rd0000644000176200001440000000711713565414527013232 0ustar liggesusers\name{Benford} \alias{Benford} \alias{dbenf} \alias{pbenf} \alias{qbenf} \alias{rbenf} \title{ Benford's Distribution } \description{ Density, distribution function, quantile function, and random generation for Benford's distribution. } \usage{ dbenf(x, ndigits = 1, log = FALSE) pbenf(q, ndigits = 1, lower.tail = TRUE, log.p = FALSE) qbenf(p, ndigits = 1, lower.tail = TRUE, log.p = FALSE) rbenf(n, ndigits = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{ Vector of quantiles. See \code{ndigits}. } \item{p}{vector of probabilities.} \item{n}{number of observations. A single positive integer. Else if \code{length(n) > 1} then the length is taken to be the number required. } \item{ndigits}{ Number of leading digits, either 1 or 2. If 1 then the support of the distribution is \{1,\ldots,9\}, else \{10,\ldots,99\}. } \item{log, log.p}{ Logical. If \code{log.p = TRUE} then all probabilities \code{p} are given as \code{log(p)}. } \item{lower.tail}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ Benford's Law (aka \emph{the significant-digit law}) is the empirical observation that in many naturally occuring tables of numerical data, the leading significant (nonzero) digit is not uniformly distributed in \eqn{\{1,2,\ldots,9\}}{1:9}. Instead, the leading significant digit (\eqn{=D}, say) obeys the law \deqn{P(D=d) = \log_{10} \left( 1 + \frac1d \right)}{% P(D=d) = log10(1 + 1/d)} for \eqn{d=1,\ldots,9}. This means the probability the first significant digit is 1 is approximately \eqn{0.301}, etc. Benford's Law was apparently first discovered in 1881 by astronomer/mathematician S. Newcombe. It started by the observation that the pages of a book of logarithms were dirtiest at the beginning and progressively cleaner throughout. In 1938, a General Electric physicist called F. Benford rediscovered the law on this same observation. Over several years he collected data from different sources as different as atomic weights, baseball statistics, numerical data from \emph{Reader's Digest}, and drainage areas of rivers. Applications of Benford's Law has been as diverse as to the area of fraud detection in accounting and the design computers. } \value{ \code{dbenf} gives the density, \code{pbenf} gives the distribution function, and \code{qbenf} gives the quantile function, and \code{rbenf} generates random deviates. } \references{ Benford, F. (1938) The Law of Anomalous Numbers. \emph{Proceedings of the American Philosophical Society}, \bold{78}, 551--572. Newcomb, S. (1881) Note on the Frequency of Use of the Different Digits in Natural Numbers. \emph{American Journal of Mathematics}, \bold{4}, 39--40. } \author{ T. W. Yee and Kai Huang } %\note{ % Currently only the leading digit is handled. % The first two leading digits would be the next simple extension. % %} %\seealso{ % \code{\link{logff}}. %} \examples{ dbenf(x <- c(0:10, NA, NaN, -Inf, Inf)) pbenf(x) \dontrun{ xx <- 1:9 barplot(dbenf(xx), col = "lightblue", las = 1, xlab = "Leading digit", ylab = "Probability", names.arg = as.character(xx), main = paste("Benford's distribution", sep = "")) hist(rbenf(n = 1000), border = "blue", prob = TRUE, main = "1000 random variates from Benford's distribution", xlab = "Leading digit", sub="Red is the true probability", breaks = 0:9 + 0.5, ylim = c(0, 0.35), xlim = c(0, 10.0)) lines(xx, dbenf(xx), col = "red", type = "h") points(xx, dbenf(xx), col = "red") } } \keyword{distribution} VGAM/man/lms.bcn.Rd0000644000176200001440000002301613565414527013420 0ustar liggesusers\name{lms.bcn} \alias{lms.bcn} %- Also NEED an '\alias' for EACH other topic documented here. \title{ LMS Quantile Regression with a Box-Cox Transformation to Normality } \description{ LMS quantile regression with the Box-Cox transformation to normality. } \usage{ lms.bcn(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"), llambda = "identitylink", lmu = "identitylink", lsigma = "loglink", idf.mu = 4, idf.sigma = 2, ilambda = 1, isigma = NULL, tol0 = 0.001) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{percentiles}{ A numerical vector containing values between 0 and 100, which are the quantiles. They will be returned as `fitted values'. % or expectiles. % 20140624; withdrawn 'expectiles'. % isigma = NULL, tol0 = 0.001, expectiles = FALSE } \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,3\}. The default value usually increases the chance of successful convergence. Setting \code{zero = NULL} means they all are functions of the covariates. For more information see \code{\link{CommonVGAMffArguments}}. } \item{llambda, lmu, lsigma}{ Parameter link functions applied to the first, second and third linear/additive predictors. See \code{\link{Links}} for more choices, and \code{\link{CommonVGAMffArguments}}. } \item{idf.mu}{ Degrees of freedom for the cubic smoothing spline fit applied to get an initial estimate of mu. See \code{\link{vsmooth.spline}}. } \item{idf.sigma}{ Degrees of freedom for the cubic smoothing spline fit applied to get an initial estimate of sigma. See \code{\link{vsmooth.spline}}. This argument may be assigned \code{NULL} to get an initial value using some other algorithm. } \item{ilambda}{ Initial value for lambda. If necessary, it is recycled to be a vector of length \eqn{n} where \eqn{n} is the number of (independent) observations. } \item{isigma}{ Optional initial value for sigma. If necessary, it is recycled to be a vector of length \eqn{n}. The default value, \code{NULL}, means an initial value is computed in the \code{@initialize} slot of the family function. } \item{tol0}{ Small positive number, the tolerance for testing if lambda is equal to zero. } % \item{expectiles}{ % Experimental; please do not use. % A single logical. If \code{TRUE} then the method is LMS-expectile % regression; \emph{expectiles} are returned rather than quantiles. % The default is LMS quantile regression based on the normal distribution. % } } \details{ Given a value of the covariate, this function applies a Box-Cox transformation to the response to best obtain normality. The parameters chosen to do this are estimated by maximum likelihood or penalized maximum likelihood. In more detail, the basic idea behind this method is that, for a fixed value of \eqn{x}, a Box-Cox transformation of the response \eqn{Y} is applied to obtain standard normality. The 3 parameters (\eqn{\lambda}{lambda}, \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, which start with the letters ``L-M-S'' respectively, hence its name) are chosen to maximize a penalized log-likelihood (with \code{\link{vgam}}). Then the appropriate quantiles of the standard normal distribution are back-transformed onto the original scale to get the desired quantiles. The three parameters may vary as a smooth function of \eqn{x}. The Box-Cox power transformation here of the \eqn{Y}, given \eqn{x}, is \deqn{Z = [(Y / \mu(x))^{\lambda(x)} - 1] / ( \sigma(x) \, \lambda(x) )}{ Z = [(Y / mu(x))^{lambda(x)} - 1] / (sigma(x) * lambda(x))} for \eqn{\lambda(x) \neq 0}{lambda(x) != 0}. (The singularity at \eqn{\lambda(x) = 0}{lambda(x) = 0} is handled by a simple function involving a logarithm.) Then \eqn{Z} is assumed to have a standard normal distribution. The parameter \eqn{\sigma(x)}{sigma(x)} must be positive, therefore \pkg{VGAM} chooses \eqn{\eta(x)^T = (\lambda(x), \mu(x), \log(\sigma(x)))}{eta(x)^T = (lambda(x), mu(x), log(sigma(x)))} by default. The parameter \eqn{\mu}{mu} is also positive, but while \eqn{\log(\mu)}{log(mu)} is available, it is not the default because \eqn{\mu}{mu} is more directly interpretable. Given the estimated linear/additive predictors, the \eqn{100\alpha}{100*alpha} percentile can be estimated by inverting the Box-Cox power transformation at the \eqn{100\alpha}{100*alpha} percentile of the standard normal distribution. Of the three functions, it is often a good idea to allow \eqn{\mu(x)}{mu(x)} to be more flexible because the functions \eqn{\lambda(x)}{lambda(x)} and \eqn{\sigma(x)}{sigma(x)} usually vary more smoothly with \eqn{x}. This is somewhat reflected in the default value for the argument \code{zero}, viz. \code{zero = c(1, 3)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Cole, T. J. and Green, P. J. (1992) Smoothing Reference Centile Curves: The LMS Method and Penalized Likelihood. \emph{Statistics in Medicine}, \bold{11}, 1305--1319. Green, P. J. and Silverman, B. W. (1994) \emph{Nonparametric Regression and Generalized Linear Models: A Roughness Penalty Approach}, London: Chapman & Hall. Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response must be positive because the Box-Cox transformation cannot handle negative values. In theory, the LMS-Yeo-Johnson-normal method can handle both positive and negative values. % LMS-BCN expectile regression is a \emph{new} methodology proposed % by myself! In general, the lambda and sigma functions should be more smoother than the mean function. Having \code{zero = 1}, \code{zero = 3} or \code{zero = c(1, 3)} is often a good idea. See the example below. % While it is usual to regress the response against a single % covariate, it is possible to add other explanatory variables, % e.g., gender. % See % \url{http://www.stat.auckland.ac.nz/~yee} % for further information and examples about this feature. } \section{Warning }{ The computations are not simple, therefore convergence may fail. Set \code{trace = TRUE} to monitor convergence if it isn't set already. Convergence failure will occur if, e.g., the response is bimodal at any particular value of \eqn{x}. In case of convergence failure, try different starting values. Also, the estimate may diverge quickly near the solution, in which case try prematurely stopping the iterations by assigning \code{maxits} to be the iteration number corresponding to the highest likelihood value. One trick is to fit a simple model and use it to provide initial values for a more complex model; see in the examples below. } \seealso{ \code{\link{lms.bcg}}, \code{\link{lms.yjn}}, \code{\link{qtplot.lmscreg}}, \code{\link{deplot.lmscreg}}, \code{\link{cdf.lmscreg}}, \code{\link{alaplace1}}, \code{\link{amlnormal}}, \code{\link{denorm}}, \code{\link{CommonVGAMffArguments}}. % \code{\link{bmi.nz}}, } \examples{ \dontrun{ require("VGAMdata") mysubset <- subset(xs.nz, sex == "M" & ethnicity == "Maori" & study1) mysubset <- transform(mysubset, BMI = weight / height^2) BMIdata <- na.omit(mysubset) BMIdata <- subset(BMIdata, BMI < 80 & age < 65, select = c(age, BMI)) # Delete an outlier summary(BMIdata) fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = BMIdata) par(mfrow = c(1, 2)) plot(fit, scol = "blue", se = TRUE) # The two centered smooths head(predict(fit)) head(fitted(fit)) head(BMIdata) head(cdf(fit)) # Person 46 is probably overweight, given his age 100 * colMeans(depvar(fit, drop = TRUE) < fitted(fit)) # Empirical proportions # Correct for "vgam" objects but not very elegant: fit@family@linkinv(eta = predict(fit, data.frame(age = 60)), extra = list(percentiles = c(10, 50))) if (FALSE) { # These work for "vglm" objects: fit2 <- vglm(BMI ~ bs(age, df = 4), lms.bcn(zero = 3), data = BMIdata) predict(fit2, percentiles = c(10, 50), newdata = data.frame(age = 60), type = "response") # A specific age head(fitted(fit2, percentiles = c(10, 50))) # Get different percentiles } # Convergence problems? Try this trick: fit0 is a simpler model used for fit1 fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1, 3)), data = BMIdata) fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = BMIdata, etastart = predict(fit0)) } \dontrun{ # Quantile plot par(bty = "l", mar = c(5, 4, 4, 3) + 0.1, xpd = TRUE) qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles", xlim = c(15, 66), las = 1, ylab = "BMI", lwd = 2, lcol = 4) # Density plot ygrid <- seq(15, 43, len = 100) # BMI ranges par(mfrow = c(1, 1), lwd = 2) (aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black", main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)")) aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red") aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE) aa@post$deplot # Contains density function values } } \keyword{models} \keyword{regression} % BMIdata <- subset(mysubset, select = c(age, BMI)) % BMIdata <- mysubset[, c("age", "BMI")] VGAM/man/lomax.Rd0000644000176200001440000000616313565414527013210 0ustar liggesusers\name{lomax} \alias{lomax} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Lomax Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter Lomax distribution. } \usage{ lomax(lscale = "loglink", lshape3.q = "loglink", iscale = NULL, ishape3.q = NULL, imethod = 1, gscale = exp(-5:5), gshape3.q = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75), zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape3.q}{ Parameter link function applied to the (positive) parameters \code{scale} and \code{q}. See \code{\link{Links}} for more choices. } \item{iscale, ishape3.q, imethod}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{iscale} is needed to obtain a good estimate for the other parameter. } \item{gscale, gshape3.q, zero, probs.y}{ See \code{\link{CommonVGAMffArguments}}. } % \item{zero}{ % An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. % Here, the values must be from the set \{1,2\} which correspond to % \code{scale}, \code{q}, respectively. % } } \details{ The 2-parameter Lomax distribution is the 4-parameter generalized beta II distribution with shape parameters \eqn{a=p=1}. It is probably more widely known as the Pareto (II) distribution. It is also the 3-parameter Singh-Maddala distribution with shape parameter \eqn{a=1}, as well as the beta distribution of the second kind with \eqn{p=1}. More details can be found in Kleiber and Kotz (2003). The Lomax distribution has density \deqn{f(y) = q / [b \{1 + y/b\}^{1+q}]}{% f(y) = q / [b (1 + y/b)^(1+q)]} for \eqn{b > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and \code{q} is a shape parameter. The cumulative distribution function is \deqn{F(y) = 1 - [1 + (y/b)]^{-q}.}{% F(y) = 1 - [1 + (y/b)]^(-q).} The mean is \deqn{E(Y) = b/(q-1)}{% E(Y) = b/(q-1)} provided \eqn{q > 1}; these are returned as the fitted values. This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{Lomax}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{inv.lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ ldata <- data.frame(y = rlomax(n = 1000, scale = exp(1), exp(2))) fit <- vglm(y ~ 1, lomax, data = ldata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/inv.gaussianff.Rd0000644000176200001440000000637713565414527015020 0ustar liggesusers\name{inv.gaussianff} \alias{inv.gaussianff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Inverse Gaussian Distribution Family Function } \description{ Estimates the two parameters of the inverse Gaussian distribution by maximum likelihood estimation. } \usage{ inv.gaussianff(lmu = "loglink", llambda = "loglink", imethod = 1, ilambda = NULL, parallel = FALSE, ishrinkage = 0.99, zero = NULL) } %- maybe also 'usage' for other objects documented here. %apply.parint = FALSE, \arguments{ \item{lmu, llambda}{ Parameter link functions for the \eqn{\mu}{mu} and \eqn{\lambda}{lambda} parameters. See \code{\link{Links}} for more choices. } \item{ilambda, parallel}{ See \code{\link{CommonVGAMffArguments}} for more information. If \code{parallel = TRUE} then the constraint is not applied to the intercept. } \item{imethod, ishrinkage, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The standard (``canonical'') form of the inverse Gaussian distribution has a density that can be written as \deqn{f(y;\mu,\lambda) = \sqrt{\lambda/(2\pi y^3)} \exp\left(-\lambda (y-\mu)^2/(2 \mu^2 y)\right)}{% f(y;mu,lambda) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-mu)^2/(2*mu^2*y)) } where \eqn{y>0}, \eqn{\mu>0}{mu>0}, and \eqn{\lambda>0}{lambda>0}. The mean of \eqn{Y} is \eqn{\mu}{mu} and its variance is \eqn{\mu^3/\lambda}{mu^3/lambda}. By default, \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and \eqn{\eta_2=\log(\lambda)}{eta2=log(lambda)}. The mean is returned as the fitted values. This \pkg{VGAM} family function can handle multiple responses (inputted as a matrix). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 1, New York: Wiley. Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ The inverse Gaussian distribution can be fitted (to a certain extent) using the usual GLM framework involving a scale parameter. This family function is different from that approach in that it estimates both parameters by full maximum likelihood estimation. } \seealso{ \code{\link{Inv.gaussian}}, \code{\link{waldff}}, \code{\link{bisa}}. The \R{} package \pkg{SuppDists} has several functions for evaluating the density, distribution function, quantile function and generating random numbers from the inverse Gaussian distribution. } \examples{ idata <- data.frame(x2 = runif(nn <- 1000)) idata <- transform(idata, mymu = exp(2 + 1 * x2), Lambda = exp(2 + 1 * x2)) idata <- transform(idata, y = rinv.gaussian(nn, mu = mymu, lambda = Lambda)) fit1 <- vglm(y ~ x2, inv.gaussianff, data = idata, trace = TRUE) rrig <- rrvglm(y ~ x2, inv.gaussianff, data = idata, trace = TRUE) coef(fit1, matrix = TRUE) coef(rrig, matrix = TRUE) Coef(rrig) summary(fit1) } \keyword{models} \keyword{regression} VGAM/man/oiposbinomial.Rd0000644000176200001440000001445013565414527014732 0ustar liggesusers\name{oiposbinomial} \alias{oiposbinomial} %\alias{oiposbinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Positive Binomial Distribution Family Function } \description{ Fits a one-inflated positive binomial distribution by maximum likelihood estimation. } \usage{ oiposbinomial(lpstr1 = "logitlink", lprob = "logitlink", type.fitted = c("mean", "prob", "pobs1", "pstr1", "onempstr1"), iprob = NULL, gpstr1 = ppoints(9), gprob = ppoints(9), multiple.responses = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr1, lprob}{ Link functions for the parameter \eqn{\phi}{phi} and the positive binomial probability \eqn{\mu}{prob} parameter. See \code{\link{Links}} for more choices. See \code{\link{CommonVGAMffArguments}} also. For the one-\emph{deflated} model see below. } % \item{epstr1, eprob}{ % epstr1 = list(), eprob = list(), % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}}. } \item{iprob, gpstr1, gprob}{ For initial values; see \code{\link{CommonVGAMffArguments}}. } % \item{lonempstr1, ionempstr1}{ % Corresponding arguments for the other parameterization. % See details below. % } % \item{zero}{ % An integer specifying which linear/additive predictor is modelled % as intercepts only. If given, the value must be either 1 or 2, % and the default is the first. Setting \code{zero = NULL} enables both % \eqn{\phi}{phi} and \eqn{\mu}{prob} to be modelled as a function of % the explanatory variables. % See \code{\link{CommonVGAMffArguments}} for more information. % } \item{multiple.responses}{ Logical. See \code{\link{binomialff}} and \code{\link{posbinomial}}. % Currently it must be \code{FALSE} to mean the % function does not handle multiple responses. This % is to remain compatible with the same argument in % \code{\link{binomialff}}. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ These functions are based on \deqn{P(Y=y) = \phi + (1-\phi) N \mu (1-\mu)^N / (1-(1-\mu)^N),}{% P(Y=y) = phi + (1- phi) * N * prob * (1-prob)^N / (1-(1-prob)^N),} for \eqn{y=1/N}, and \deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)} / (1-(1-\mu)^N).}{% P(Y=y) = (1-phi) * choose(N,Ny) * prob^(N*y) * (1-prob)^(N*(1-y)) / (1-(1-prob)^N).} for \eqn{y=2/N,\ldots,1}. That is, the response is a sample proportion out of \eqn{N} trials, and the argument \code{size} in \code{\link{roiposbinom}} is \eqn{N} here. Ideally \eqn{N > 2} is needed. The parameter \eqn{\phi}{phi} is the probability of a structural one, and it satisfies \eqn{0 < \phi < 1}{0 < phi < 1} (usually). The mean of \eqn{Y} is \eqn{E(Y)=\phi + (1-\phi) \mu / (1-(1-\mu)^N)}{E(Y) = phi + (1-phi) * prob / (1-(1-prob)^N)} and these are returned as the default fitted values. By default, the two linear/additive predictors for \code{oiposbinomial()} are \eqn{(logit(\phi), logit(\mu))^T}{(logit(phi), logit(prob))^T}. % The \pkg{VGAM} family function \code{oiposbinomialff()} has a few % changes compared to \code{oiposbinomial()}. % These are: % (i) the order of the linear/additive predictors is switched so the % binomial probability comes first; % (ii) argument \code{onempstr1} is now 1 minus % the probability of a structural zero, i.e., % the probability of the parent (binomial) component, % i.e., \code{onempstr1} is \code{1-pstr1}; % (iii) argument \code{zero} has a new default so that the \code{onempstr1} % is intercept-only by default. % Now \code{oiposbinomialff()} is generally recommended over % \code{oiposbinomial()}. % Both functions implement Fisher scoring. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } %\references{ % % %} \author{ T. W. Yee } \note{ The response variable should have one of the formats described by \code{\link{binomialff}}, e.g., a factor or two column matrix or a vector of sample proportions with the \code{weights} argument specifying the values of \eqn{N}. To work well, one ideally needs large values of \eqn{N} and \eqn{\mu}{prob} much greater than 0, i.e., the larger \eqn{N} and \eqn{\mu}{prob} are, the better. If \eqn{N = 1} then the model is unidentifiable since the number of parameters is excessive. % 20130316; adding this: Estimated probabilities of a structural one and an observed one are returned, as in \code{\link{zipoisson}}. The one-\emph{deflated} positive binomial distribution might be fitted by setting \code{lpstr1 = "identitylink"}, albeit, not entirely reliably. See \code{\link{zipoisson}} for information that can be applied here. % Else try the one-altered positive binomial distribution (see % \code{\link{oabinomial}}). } \seealso{ \code{\link{roiposbinom}}, \code{\link{posbinomial}}, \code{\link{binomialff}}, \code{\link[stats:Binomial]{rbinom}}. } \examples{ size <- 10 # Number of trials; N in the notation above nn <- 200 odata <- data.frame(pstr1 = logitlink( 0, inverse = TRUE), # 0.50 mubin1 = logitlink(-1, inverse = TRUE), # Mean of usual binomial svec = rep(size, length = nn), x2 = runif(nn)) odata <- transform(odata, mubin2 = logitlink(-1 + x2, inverse = TRUE)) odata <- transform(odata, y1 = roiposbinom(nn, svec, pr = mubin1, pstr1 = pstr1), y2 = roiposbinom(nn, svec, pr = mubin2, pstr1 = pstr1)) with(odata, table(y1)) fit1 <- vglm(y1 / svec ~ 1, oiposbinomial, data = odata, weights = svec, trace = TRUE, crit = "coef") fit2 <- vglm(y2 / svec ~ x2, oiposbinomial, data = odata, weights = svec, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) # Useful for intercept-only models head(fitted(fit1, type = "pobs1")) # Estimate of P(Y = 1) head(fitted(fit1)) with(odata, mean(y1)) # Compare this with fitted(fit1) summary(fit1) } \keyword{models} \keyword{regression} % fit@misc$pobs0 # Estimate of P(Y = 0) VGAM/man/bortUC.Rd0000644000176200001440000000315213565414527013261 0ustar liggesusers\name{Bort} \alias{Bort} \alias{dbort} %\alias{pbort} %\alias{qbort} \alias{rbort} \title{The Borel-Tanner Distribution} \description{ Density and random generation for the Borel-Tanner distribution. % distribution function, quantile function } \usage{ dbort(x, Qsize = 1, a = 0.5, log = FALSE) rbort(n, Qsize = 1, a = 0.5) } %pbort(q, Qsize = 1, a = 0.5) %qbort(p, Qsize = 1, a = 0.5) \arguments{ \item{x}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{n}{number of observations. Must be a positive integer of length 1.} \item{Qsize, a}{ See \code{\link{borel.tanner}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dbort} gives the density, \code{rbort} generates random deviates. % \code{pbort} gives the distribution function, % \code{qbort} gives the quantile function, and } \author{ T. W. Yee } \details{ See \code{\link{borel.tanner}}, the \pkg{VGAM} family function for estimating the parameter, for the formula of the probability density function and other details. } \section{Warning }{ Looping is used for \code{\link{rbort}}, therefore values of \code{a} close to 1 will result in long (or infinite!) computational times. The default value of \code{a} is subjective. } \seealso{ \code{\link{borel.tanner}}. } \examples{ \dontrun{ qsize <- 1; a <- 0.5; x <- qsize:(qsize+10) plot(x, dbort(x, qsize, a), type = "h", las = 1, col = "blue", ylab = paste("fbort(qsize=", qsize, ", a=", a, ")"), log = "y", main = "Borel-Tanner density function") } } \keyword{distribution} VGAM/man/simplex.Rd0000644000176200001440000000630313565414527013545 0ustar liggesusers\name{simplex} \alias{simplex} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Simplex Distribution Family Function } \description{ The two parameters of the univariate standard simplex distribution are estimated by full maximum likelihood estimation. } \usage{ simplex(lmu = "logitlink", lsigma = "loglink", imu = NULL, isigma = NULL, imethod = 1, ishrinkage = 0.95, zero = "sigma") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, lsigma}{ Link function for \code{mu} and \code{sigma}. See \code{\link{Links}} for more choices. } \item{imu, isigma}{ Optional initial values for \code{mu} and \code{sigma}. A \code{NULL} means a value is obtained internally. } \item{imethod, ishrinkage, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The probability density function can be written \deqn{f(y; \mu, \sigma) = [2 \pi \sigma^2 (y (1-y))^3]^{-0.5} \exp[-0.5 (y-\mu)^2 / (\sigma^2 y (1-y) \mu^2 (1-\mu)^2)] }{% f(y; mu, sigma) = [2* pi * sigma^2 * (y*(1-y))^3]^(-0.5) * exp[-0.5 * (y-mu)^2 / (sigma^2 * y * (1-y) * mu^2 * (1-mu)^2)] } for \eqn{0 < y < 1}, \eqn{0 < \mu < 1}{0 < mu < 1}, and \eqn{\sigma > 0}{sigma > 0}. The mean of \eqn{Y} is \eqn{\mu}{mu} (called \code{mu}, and returned as the fitted values). % This comes from Jorgensen but it is not confirmed by simulations: % The variance of \eqn{Y} is \eqn{\mu (1 - \mu) - \sqrt{ \lambda / 2} % \exp\{ \lambda / (\mu^2 (1 - \mu)^2) \} % \Gamma(\lambda / (2 \mu^2 (1 - \mu)^2), 0.5)}{ % mu * (1 - mu) - sqrt(lambda / 2) * % exp(lambda / (mu^2 * (1 - mu)^2)) * % Gamma(lambda / (2 * mu^2 * (1 - mu)^2), 0.5)}. % Here, \eqn{\Gamma(x, a)}{Gamma(x, a)} is the % `upper' normalized incomplete gamma function given by % \code{pgamma(x, a, lower = FALSE) * gamma(a)}. The second parameter, \code{sigma}, of this standard simplex distribution is known as the dispersion parameter. The unit variance function is \eqn{V(\mu) = \mu^3 (1-\mu)^3}{V(mu) = mu^3 (1-mu)^3}. Fisher scoring is applied to both parameters. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall Song, P. X.-K. (2007) \emph{Correlated Data Analysis: Modeling, Analytics, and Applications}. Springer. } \author{ T. W. Yee } \note{ This distribution is potentially useful for dispersion modelling. Numerical problems may occur when \code{mu} is very close to 0 or 1. } \seealso{ \code{\link{dsimplex}}, \code{\link{dirichlet}}, \code{\link{rig}}, \code{\link{binomialff}}. } \examples{ sdata <- data.frame(x2 = runif(nn <- 1000)) sdata <- transform(sdata, eta1 = 1 + 2 * x2, eta2 = 1 - 2 * x2) sdata <- transform(sdata, y = rsimplex(nn, mu = logitlink(eta1, inverse = TRUE), dispersion = exp(eta2))) (fit <- vglm(y ~ x2, simplex(zero = NULL), data = sdata, trace = TRUE)) coef(fit, matrix = TRUE) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/plotqtplot.lmscreg.Rd0000644000176200001440000000767013565414527015751 0ustar liggesusers\name{plotqtplot.lmscreg} \alias{plotqtplot.lmscreg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Quantile Plot for LMS Quantile Regression } \description{ Plots the quantiles associated with a LMS quantile regression. } \usage{ plotqtplot.lmscreg(fitted.values, object, newdata = NULL, percentiles = object@misc$percentiles, lp = NULL, add.arg = FALSE, y = if (length(newdata)) FALSE else TRUE, spline.fit = FALSE, label = TRUE, size.label = 0.06, xlab = NULL, ylab = "", pch = par()$pch, pcex = par()$cex, pcol.arg = par()$col, xlim = NULL, ylim = NULL, llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd, tcol.arg = par()$col, tadj = 1, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{fitted.values}{ Matrix of fitted values. } \item{object}{ A \pkg{VGAM} quantile regression model, i.e., an object produced by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}} with a family function beginning with \code{"lms."}, e.g., \code{\link{lms.yjn}}. } \item{newdata}{ Data frame at which predictions are made. By default, the original data are used. } \item{percentiles}{ Numerical vector with values between 0 and 100 that specify the percentiles (quantiles). The default is to use the percentiles when fitting the model. For example, the value 50 corresponds to the median. } \item{lp}{ Length of \code{percentiles}. } \item{add.arg}{ Logical. Add the quantiles to an existing plot? } \item{y}{ Logical. Add the response as points to the plot? } \item{spline.fit}{ Logical. Add a spline curve to the plot? } \item{label}{ Logical. Add the percentiles (as text) to the plot? } \item{size.label}{ Numeric. How much room to leave at the RHS for the label. It is in percent (of the range of the primary variable). } \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{ylab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. } \item{pcex}{ Character expansion of the points. See \code{\link[graphics]{par}}. } \item{pcol.arg}{ Color of the points. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{xlim}{ Limits of the x-axis. See \code{\link[graphics]{par}}. } \item{ylim}{ Limits of the y-axis. See \code{\link[graphics]{par}}. } \item{llty.arg}{ Line type. Line type. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{lcol.arg}{ Color of the lines. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{llwd.arg}{ Line width. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{tcol.arg}{ Color of the text (if \code{label} is \code{TRUE}). See the \code{col} argument of \code{\link[graphics]{par}}. } \item{tadj}{ Text justification. See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{main} and \code{las}. } } \details{ The above graphical parameters offer some flexibility when plotting the quantiles. } \value{ The matrix of fitted values. } \references{ Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ While the graphical arguments of this function are useful to the user, this function should not be called directly. } \seealso{ \code{\link{qtplot.lmscreg}}. } \examples{\dontrun{ fit <- vgam(BMI ~ s(age, df = c(4,2)), lms.bcn(zero = 1), data = bmi.nz) qtplot(fit) qtplot(fit, perc = c(25,50,75,95), lcol = "blue", tcol = "blue", llwd = 2) } } \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/exppoissonUC.Rd0000644000176200001440000000440513565414527014524 0ustar liggesusers\name{exppois} \alias{exppois} \alias{dexppois} \alias{pexppois} \alias{qexppois} \alias{rexppois} \title{The Exponential Poisson Distribution} \description{ Density, distribution function, quantile function and random generation for the exponential poisson distribution. } \usage{ dexppois(x, rate = 1, shape, log = FALSE) pexppois(q, rate = 1, shape, lower.tail = TRUE, log.p = FALSE) qexppois(p, rate = 1, shape, lower.tail = TRUE, log.p = FALSE) rexppois(n, rate = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{shape, rate}{ positive parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dexppois} gives the density, \code{pexppois} gives the distribution function, \code{qexppois} gives the quantile function, and \code{rexppois} generates random deviates. } \author{ Kai Huang and J. G. Lauder } \details{ See \code{\link{exppoisson}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } %\note{ %} \seealso{ \code{\link{exppoisson}}. } \examples{ \dontrun{ rate <- 2; shape <- 0.5; nn <- 201 x <- seq(-0.05, 1.05, len = nn) plot(x, dexppois(x, rate = rate, shape), type = "l", las = 1, ylim = c(0, 3), ylab = paste("fexppoisson(rate = ", rate, ", shape = ", shape, ")"), col = "blue", cex.main = 0.8, main = "Blue is the density, orange the cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pexppois(x, rate = rate, shape), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qexppois(probs, rate = rate, shape) lines(Q, dexppois(Q, rate = rate, shape), col = "purple", lty = 3, type = "h") lines(Q, pexppois(Q, rate = rate, shape), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3); abline(h = 0, col = "gray50") max(abs(pexppois(Q, rate = rate, shape) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/vgam.Rd0000644000176200001440000003125513565414527013022 0ustar liggesusers\name{vgam} \alias{vgam} %\alias{vgam.fit} \title{ Fitting Vector Generalized Additive Models } % 20030215; This file is based a lot from vglm.Rd \description{ Fit a vector generalized additive model (VGAM). Both 1st-generation VGAMs (based on backfitting) and 2nd-generation VGAMs (based on P-splines, with automatic smoothing parameter selection) are implemented. This is a large class of models that includes generalized additive models (GAMs) and vector generalized linear models (VGLMs) as special cases. } \usage{ vgam(formula, family = stop("argument 'family' needs to be assigned"), data = list(), weights = NULL, subset = NULL, na.action = na.fail, etastart = NULL, mustart = NULL, coefstart = NULL, control = vgam.control(...), offset = NULL, method = "vgam.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, contrasts = NULL, constraints = NULL, extra = list(), form2 = NULL, qr.arg = FALSE, smart = TRUE, ...) } %- maybe also `usage' for other objects documented here. \arguments{ % The following comes from vglm.Rd but with minor tweaks \item{formula}{ a symbolic description of the model to be fit. The RHS of the formula is applied to each linear/additive predictor, and should include at least one \code{\link[VGAM]{sm.os}} term or \code{\link[VGAM]{sm.ps}} term or \code{\link[VGAM]{s}} term. Mixing both together is not allowed. Different variables in each linear/additive predictor can be chosen by specifying constraint matrices. } \item{family}{ Same as for \code{\link{vglm}}. } \item{data}{ an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{vgam} is called. } \item{weights, subset, na.action}{ Same as for \code{\link{vglm}}. Note that \code{subset} may be unreliable and to get around this problem it is best to use \code{\link[base]{subset}} to create a new smaller data frame and feed in the smaller data frame. See below for an example. This is a bug that needs fixing. } \item{etastart, mustart, coefstart}{ Same as for \code{\link{vglm}}. } \item{control}{ a list of parameters for controlling the fitting process. See \code{\link{vgam.control}} for details. } \item{method}{ the method to be used in fitting the model. The default (and presently only) method \code{vgam.fit} uses iteratively reweighted least squares (IRLS). } \item{constraints, model, offset}{ Same as for \code{\link{vglm}}. } \item{x.arg, y.arg}{ logical values indicating whether the model matrix and response vector/matrix used in the fitting process should be assigned in the \code{x} and \code{y} slots. Note the model matrix is the LM model matrix; to get the VGAM model matrix type \code{model.matrix(vgamfit)} where \code{vgamfit} is a \code{vgam} object. } \item{contrasts, extra, form2, qr.arg, smart}{ Same as for \code{\link{vglm}}. } \item{\dots}{ further arguments passed into \code{\link{vgam.control}}. } } \details{ A vector generalized additive model (VGAM) is loosely defined as a statistical model that is a function of \eqn{M} additive predictors. The central formula is given by \deqn{\eta_j = \sum_{k=1}^p f_{(j)k}(x_k)}{% eta_j = sum_{k=1}^p f_{(j)k}(x_k)} where \eqn{x_k}{x_k} is the \eqn{k}th explanatory variable (almost always \eqn{x_1=1} for the intercept term), and \eqn{f_{(j)k}} are smooth functions of \eqn{x_k} that are estimated by smoothers. The first term in the summation is just the intercept. Currently two types of smoothers are implemented: \code{\link[VGAM]{s}} represents the older and more traditional one, called a \emph{vector (cubic smoothing spline) smoother} and is based on Yee and Wild (1996); it is more similar to the \R{} package \pkg{gam}. The newer one is represented by \code{\link[VGAM]{sm.os}} and \code{\link[VGAM]{sm.ps}}, and these are based on O-splines and P-splines---they allow automatic smoothing parameter selection; it is more similar to the \R{} package \pkg{mgcv}. In the above, \eqn{j=1,\ldots,M} where \eqn{M} is finite. If all the functions are constrained to be linear then the resulting model is a vector generalized linear model (VGLM). VGLMs are best fitted with \code{\link{vglm}}. Vector (cubic smoothing spline) smoothers are represented by \code{s()} (see \code{\link[VGAM]{s}}). Local regression via \code{lo()} is \emph{not} supported. The results of \code{vgam} will differ from the \code{gam()} (in the \pkg{gam}) because \code{vgam()} uses a different knot selection algorithm. In general, fewer knots are chosen because the computation becomes expensive when the number of additive predictors \eqn{M} is large. Second-generation VGAMs are based on the O-splines and P-splines. The latter is due to Eilers and Marx (1996). Backfitting is not required, and estimation is performed using IRLS. The function \code{\link{sm.os}} represents a \emph{smart} implementation of O-splines. The function \code{\link{sm.ps}} represents a \emph{smart} implementation of P-splines. Written G2-VGAMs or P-VGAMs, this methodology should not be used unless the sample size is reasonably large. Usually an UBRE predictive criterion is optimized (at each IRLS iteration) because the scale parameter for VGAMs is usually assumed to be known. This search for optimal smoothing parameters does not always converge, and neither is it totally reliable. G2-VGAMs implicitly set \code{criterion = "coefficients"} so that convergence occurs when the change in the regression coefficients between 2 IRLS iterations is sufficiently small. Otherwise the search for the optimal smoothing parameters might cause the log-likelihood to decrease between 2 IRLS iterations. Currently \emph{outer iteration} is implemented, by default, rather than \emph{performance iteration} because the latter is more easy to converge to a local solution; see Wood (2004) for details. One can use \emph{performance iteration} by setting \code{Maxit.outer = 1} in \code{\link{vgam.control}}. % outeriter % A suggested rule-of-thumb is at least 500 observations. The underlying algorithm of VGAMs is IRLS. First-generation VGAMs (called G1-VGAMs) are estimated by modified vector backfitting using vector splines. O-splines are used as the basis functions for the vector (smoothing) splines, which are a lower dimensional version of natural B-splines. The function \code{vgam.fit()} actually does the work. The smoothing code is based on F. O'Sullivan's BART code. % If more than one of \code{etastart}, \code{start} and \code{mustart} % is specified, the first in the list will be used. A closely related methodology based on VGAMs called \emph{constrained additive ordination} (CAO) first forms a linear combination of the explanatory variables (called \emph{latent variables}) and then fits a GAM to these. This is implemented in the function \code{\link{cao}} for a very limited choice of family functions. } \value{ For G1-VGAMs and G2-VGAMs, an object of class \code{"vgam"} or \code{"pvgam"} respectively (see \code{\link{vgam-class}} and \code{\link{pvgam-class}} for further information). } \references{ Wood, S. N. (2004). Stable and efficient multiple smoothing parameter estimation for generalized additive models. \emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. Yee, T. W. (2008) The \code{VGAM} Package. \emph{R News}, \bold{8}, 28--39. Yee, T. W. (2015) Vector Generalized Linear and Additive Models: With an Implementation in R. New York, USA: \emph{Springer}. Yee, T. W. (2016). Comments on ``Smoothing parameter and model selection for general smooth models'' by Wood, S. N. and Pya, N. and Safken, N., \emph{J. Amer. Statist. Assoc.}, \bold{110}(516). %Yee, T. W. and Somchit, C. and Wild, C. J. (2016) %Generation-2 %vector generalized additive models. %Manuscript in preparation. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. %Wood, S. N. (2004). %Stable and efficient multiple smoothing parameter estimation %for generalized additive models. %\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686. } \author{ Thomas W. Yee } \note{ This function can fit a wide variety of statistical models. Some of these are harder to fit than others because of inherent numerical difficulties associated with some of them. Successful model fitting benefits from cumulative experience. Varying the values of arguments in the \pkg{VGAM} family function itself is a good first step if difficulties arise, especially if initial values can be inputted. A second, more general step, is to vary the values of arguments in \code{\link{vgam.control}}. A third step is to make use of arguments such as \code{etastart}, \code{coefstart} and \code{mustart}. Some \pkg{VGAM} family functions end in \code{"ff"} to avoid interference with other functions, e.g., \code{\link{binomialff}}, \code{\link{poissonff}}. This is because \pkg{VGAM} family functions are incompatible with \code{\link[stats]{glm}} (and also \code{\link[gam]{gam}} in the \pkg{gam} library and \code{\link[mgcv]{gam}} in the \pkg{mgcv} library). % \code{\link{gaussianff}}, % \code{gammaff}. The smart prediction (\code{\link{smartpred}}) library is packed with the \pkg{VGAM} library. The theory behind the scaling parameter is currently being made more rigorous, but it it should give the same value as the scale parameter for GLMs. } %~Make other sections like WARNING with \section{WARNING }{....} ~ \section{WARNING}{ For G1-VGAMs, currently \code{vgam} can only handle constraint matrices \code{cmat}, say, such that \code{crossprod(cmat)} is diagonal. It can be detected by \code{\link{is.buggy}}. VGAMs with constraint matrices that have non-orthogonal columns should be fitted with \code{\link{sm.os}} or \code{\link{sm.ps}} terms instead of \code{\link{s}}. % This is a bug that I will try to fix up soon; See warnings in \code{\link{vglm.control}}. } \seealso{ \code{\link{is.buggy}}, \code{\link{vgam.control}}, \code{\link{vgam-class}}, \code{\link{vglmff-class}}, \code{\link{plotvgam}}, \code{\link{summaryvgam}}, \code{\link{summarypvgam}}, \code{\link{sm.os}}, \code{\link{sm.ps}}, \code{\link[VGAM]{s}}, \code{\link[mgcv]{magic}}. \code{\link{vglm}}, \code{\link{vsmooth.spline}}, \code{\link{cao}}. } \examples{# Nonparametric proportional odds model pneumo <- transform(pneumo, let = log(exposure.time)) vgam(cbind(normal, mild, severe) ~ s(let), cumulative(parallel = TRUE), data = pneumo, trace = TRUE) # Nonparametric logistic regression hfit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua) \dontrun{ plot(hfit, se = TRUE) } phfit <- predict(hfit, type = "terms", raw = TRUE, se = TRUE) names(phfit) head(phfit$fitted) head(phfit$se.fit) phfit$df phfit$sigma # Fit two species simultaneously hfit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)), binomialff(multiple.responses = TRUE), data = hunua) coef(hfit2, matrix = TRUE) # Not really interpretable \dontrun{ plot(hfit2, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4) ooo <- with(hunua, order(altitude)) with(hunua, matplot(altitude[ooo], fitted(hfit2)[ooo,], ylim = c(0, 0.8), xlab = "Altitude (m)", ylab = "Probability of presence", las = 1, main = "Two plant species' response curves", type = "l", lwd = 2)) with(hunua, rug(altitude)) } # The 'subset' argument does not work here. Use subset() instead. set.seed(1) zdata <- data.frame(x2 = runif(nn <- 500)) zdata <- transform(zdata, y = rbinom(nn, 1, 0.5)) zdata <- transform(zdata, subS = runif(nn) < 0.7) sub.zdata <- subset(zdata, subS) # Use this instead if (FALSE) fit4a <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(multiple.responses = TRUE), data = zdata, subset = subS) # This fails!!! fit4b <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(multiple.responses = TRUE), data = sub.zdata) # This succeeds!!! fit4c <- vgam(cbind(y, y) ~ sm.os(x2), binomialff(multiple.responses = TRUE), data = sub.zdata) # This succeeds!!! \dontrun{par(mfrow = c(2, 2)) plot(fit4b, se = TRUE, shade = TRUE, shcol = "pink") plot(fit4c, se = TRUE, shade = TRUE, shcol = "pink") } } \keyword{models} \keyword{regression} \keyword{smooth} VGAM/man/Rank.Rd0000644000176200001440000000360413565414527012760 0ustar liggesusers\name{Rank} \alias{Rank} %\alias{AICvglm} \alias{Rank.rrvglm} \alias{Rank.qrrvglm} \alias{Rank.rrvgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Rank } \description{ Returns the rank of reduced-rank regression-type models in the VGAM package. } \usage{ Rank(object, \dots) Rank.rrvglm(object, \dots) Rank.qrrvglm(object, \dots) Rank.rrvgam(object, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Some \pkg{VGAM} object, for example, having class \code{\link{rrvglm-class}}. The class \code{\link{vglm-class}} is not included since this is not based on reduced-rank regression. } \item{\dots}{ Other possible arguments fed into the function later (used for added flexibility for the future). } } \details{ Regression models based on reduced-rank regression have a quantity called the \emph{rank}, which is 1 or 2 or 3 etc. The smaller the value the more dimension reduction, so that there are fewer parameters. This function was not called \code{rank()} to avoid conflict with \code{\link[base]{rank}}. } \value{ Returns an integer value, provided the rank of the model makes sense. } \author{T. W. Yee. } \note{ This function has not been defined for VGLMs yet. It might refer to the rank of the VL model matrix, but for now this function should not be applied to \code{\link{vglm}} fits. } %\references{ %} %\section{Warning }{ %} \seealso{ RR-VGLMs are described in \code{\link{rrvglm-class}}; QRR-VGLMs are described in \code{\link{qrrvglm-class}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) (fit1 <- rrvglm(cbind(normal, mild, severe) ~ let + x3, acat, data = pneumo)) coef(fit1, matrix = TRUE) constraints(fit1) Rank(fit1) } \keyword{models} \keyword{regression} VGAM/man/vgam-class.Rd0000644000176200001440000002036513565414527014125 0ustar liggesusers\name{vgam-class} \docType{class} \alias{vgam-class} \title{Class ``vgam'' } \description{ Vector generalized additive models. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{vgam(...)}. % ~~ describe objects here ~~ } \section{Slots}{ \describe{ \item{\code{nl.chisq}:}{Object of class \code{"numeric"}. Nonlinear chi-squared values. } \item{\code{nl.df}:}{Object of class \code{"numeric"}. Nonlinear chi-squared degrees of freedom values. } \item{\code{spar}:}{Object of class \code{"numeric"} containing the (scaled) smoothing parameters. } \item{\code{s.xargument}:}{Object of class \code{"character"} holding the variable name of any \code{s()} terms. } \item{\code{var}:}{Object of class \code{"matrix"} holding approximate pointwise standard error information. } \item{\code{Bspline}:}{Object of class \code{"list"} holding the scaled (internal and boundary) knots, and the fitted B-spline coefficients. These are used for prediction. } \item{\code{extra}:}{Object of class \code{"list"}; the \code{extra} argument on entry to \code{vglm}. This contains any extra information that might be needed by the family function. } \item{\code{family}:}{Object of class \code{"vglmff"}. The family function. } \item{\code{iter}:}{Object of class \code{"numeric"}. The number of IRLS iterations used. } \item{\code{predictors}:}{Object of class \code{"matrix"} with \eqn{M} columns which holds the \eqn{M} linear predictors. } \item{\code{assign}:}{Object of class \code{"list"}, from class \code{ "vlm"}. This named list gives information matching the columns and the (LM) model matrix terms. } \item{\code{call}:}{Object of class \code{"call"}, from class \code{ "vlm"}. The matched call. } \item{\code{coefficients}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. A named vector of coefficients. } \item{\code{constraints}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A named list of constraint matrices used in the fitting. } \item{\code{contrasts}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The contrasts used (if any). } \item{\code{control}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list of parameters for controlling the fitting process. See \code{\link{vglm.control}} for details. } \item{\code{criterion}:}{Object of class \code{"list"}, from class \code{ "vlm"}. List of convergence criterion evaluated at the final IRLS iteration. } \item{\code{df.residual}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The residual degrees of freedom. } \item{\code{df.total}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The total degrees of freedom. } \item{\code{dispersion}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The scaling parameter. } \item{\code{effects}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. The effects. } \item{\code{fitted.values}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The fitted values. This is usually the mean but may be quantiles, or the location parameter, e.g., in the Cauchy model. } \item{\code{misc}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A named list to hold miscellaneous parameters. } \item{\code{model}:}{Object of class \code{"data.frame"}, from class \code{ "vlm"}. The model frame. } \item{\code{na.action}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list holding information about missing values. } \item{\code{offset}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. If non-zero, a \eqn{M}-column matrix of offsets. } \item{\code{post}:}{Object of class \code{"list"}, from class \code{ "vlm"} where post-analysis results may be put. } \item{\code{preplot}:}{Object of class \code{"list"}, from class \code{ "vlm"} used by \code{\link{plotvgam}}; the plotting parameters may be put here. } \item{\code{prior.weights}:}{Object of class \code{"matrix"}, from class \code{ "vlm"} holding the initially supplied weights. } \item{\code{qr}:}{Object of class \code{"list"}, from class \code{ "vlm"}. QR decomposition at the final iteration. } \item{\code{R}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The \bold{R} matrix in the QR decomposition used in the fitting. } \item{\code{rank}:}{Object of class \code{"integer"}, from class \code{ "vlm"}. Numerical rank of the fitted model. } \item{\code{residuals}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The \emph{working} residuals at the final IRLS iteration. } \item{\code{ResSS}:}{Object of class \code{"numeric"}, from class \code{ "vlm"}. Residual sum of squares at the final IRLS iteration with the adjusted dependent vectors and weight matrices. } \item{\code{smart.prediction}:}{Object of class \code{"list"}, from class \code{ "vlm"}. A list of data-dependent parameters (if any) that are used by smart prediction. } \item{\code{terms}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The \code{\link[stats]{terms}} object used. } \item{\code{weights}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The weight matrices at the final IRLS iteration. This is in matrix-band form. } \item{\code{x}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The model matrix (LM, not VGLM). } \item{\code{xlevels}:}{Object of class \code{"list"}, from class \code{ "vlm"}. The levels of the factors, if any, used in fitting. } \item{\code{y}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. The response, in matrix form. } \item{\code{Xm2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{Ym2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{callXm2}:}{ Object of class \code{"call"}, from class \code{ "vlm"}. The matched call for argument \code{form2}. } } } \section{Extends}{ Class \code{"vglm"}, directly. Class \code{"vlm"}, by class \code{"vglm"}. } \section{Methods}{ \describe{ \item{cdf}{\code{signature(object = "vglm")}: cumulative distribution function. Useful for quantile regression and extreme value data models.} \item{deplot}{\code{signature(object = "vglm")}: density plot. Useful for quantile regression models.} \item{deviance}{\code{signature(object = "vglm")}: deviance of the model (where applicable). } \item{plot}{\code{signature(x = "vglm")}: diagnostic plots. } \item{predict}{\code{signature(object = "vglm")}: extract the additive predictors or predict the additive predictors at a new data frame.} \item{print}{\code{signature(x = "vglm")}: short summary of the object. } \item{qtplot}{\code{signature(object = "vglm")}: quantile plot (only applicable to some models). } \item{resid}{\code{signature(object = "vglm")}: residuals. There are various types of these. } \item{residuals}{\code{signature(object = "vglm")}: residuals. Shorthand for \code{resid}. } \item{rlplot}{\code{signature(object = "vglm")}: return level plot. Useful for extreme value data models.} \item{summary}{\code{signature(object = "vglm")}: a more detailed summary of the object. } } } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %\url{http://www.stat.auckland.ac.nz/~yee} } \author{ Thomas W. Yee } \note{ VGAMs have all the slots that \code{\link{vglm}} objects have (\code{\link{vglm-class}}), plus the first few slots described in the section above. } %~Make other sections like WARNING with \section{WARNING }{....} ~ \seealso{ \code{\link{vgam.control}}, \code{\link{vglm}}, \code{\link[VGAM]{s}}, \code{\link{vglm-class}}, \code{\link{vglmff-class}}. } \examples{ # Fit a nonparametric proportional odds model pneumo <- transform(pneumo, let = log(exposure.time)) vgam(cbind(normal, mild, severe) ~ s(let), cumulative(parallel = TRUE), data = pneumo) } \keyword{classes} \keyword{models} \keyword{regression} \keyword{smooth} VGAM/man/bifgmcop.Rd0000644000176200001440000000505613565414527013656 0ustar liggesusers\name{bifgmcop} \alias{bifgmcop} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Farlie-Gumbel-Morgenstern's Bivariate Distribution Family Function } \description{ Estimate the association parameter of Farlie-Gumbel-Morgenstern's bivariate distribution by maximum likelihood estimation. } \usage{ bifgmcop(lapar = "rhobitlink", iapar = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar, iapar, imethod}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more link function choices. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = y_1 y_2 ( 1 + \alpha (1 - y_1) (1 - y_2) ) }{% P(Y1 <= y1, Y2 <= y2) = y1 * y2 * ( 1 + alpha * (1 - y1) * (1 - y2) ) } for \eqn{-1 < \alpha < 1}{-1 < alpha < 1}. The support of the function is the unit square. The marginal distributions are the standard uniform distributions. When \eqn{\alpha = 0}{alpha=0} the random variables are independent. % A variant of Newton-Raphson is used, which only seems to work for an % intercept model. % It is a very good idea to set \code{trace=TRUE}. % This \pkg{VGAM} family function is prone to numerical difficulties. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005) \emph{Extreme Value and Related Models with Applications in Engineering and Science}, Hoboken, NJ, USA: Wiley-Interscience. Smith, M. D. (2007) Invariance theorems for Fisher information. \emph{Communications in Statistics---Theory and Methods}, \bold{36}(12), 2213--2222. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. Currently, the fitted value is a matrix with two columns and values equal to 0.5. This is because each marginal distribution corresponds to a standard uniform distribution. % This \pkg{VGAM} family function should be used with caution. } \seealso{ \code{\link{rbifgmcop}}, \code{\link{bifrankcop}}, \code{\link{bifgmexp}}, \code{\link{simulate.vlm}}. } \examples{ ymat <- rbifgmcop(n = 1000, apar = rhobitlink(3, inverse = TRUE)) \dontrun{plot(ymat, col = "blue")} fit <- vglm(ymat ~ 1, fam = bifgmcop, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(fitted(fit)) } \keyword{models} \keyword{regression} % for real \eqn{\alpha}{alpha} (the range is data-dependent). VGAM/man/micmen.Rd0000644000176200001440000001130213565414527013327 0ustar liggesusers\name{micmen} \alias{micmen} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Michaelis-Menten Model } \description{ Fits a Michaelis-Menten nonlinear regression model. } \usage{ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL, imethod = 1, oim = TRUE, link1 = "identitylink", link2 = "identitylink", firstDeriv = c("nsimEIM", "rpar"), probs.x = c(0.15, 0.85), nsimEIM = 500, dispersion = 0, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{rpar}{ Numeric. Initial positive ridge parameter. This is used to create positive-definite weight matrices. } \item{divisor}{ Numerical. The divisor used to divide the ridge parameter at each iteration until it is very small but still positive. The value of \code{divisor} should be greater than one. } \item{init1, init2}{ Numerical. Optional initial value for the first and second parameters, respectively. The default is to use a self-starting value. } \item{link1, link2}{ Parameter link function applied to the first and second parameters, respectively. See \code{\link{Links}} for more choices. } \item{dispersion}{ Numerical. Dispersion parameter. } \item{firstDeriv}{ Character. Algorithm for computing the first derivatives and working weights. The first is the default. } \item{imethod, probs.x}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{oim}{ Use the OIM? See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The Michaelis-Menten model is given by \deqn{E(Y_i) = (\theta_1 u_i) / (\theta_2 + u_i)}{% E(Y_i) = theta1 * u_i / (theta2 + u_i)} where \eqn{\theta_1}{theta1} and \eqn{\theta_2}{theta2} are the two parameters. The relationship between iteratively reweighted least squares and the Gauss-Newton algorithm is given in Wedderburn (1974). However, the algorithm used by this family function is different. Details are given at the Author's web site. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Seber, G. A. F. and Wild, C. J. (1989) \emph{Nonlinear Regression}, New York: Wiley. Wedderburn, R. W. M. (1974) Quasi-likelihood functions, generalized linear models, and the Gauss-Newton method. \emph{Biometrika}, \bold{61}, 439--447. Bates, D. M. and Watts, D. G. (1988) \emph{Nonlinear Regression Analysis and Its Applications}, New York: Wiley. % Documentation accompanying the \pkg{VGAM} package at % \url{http://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ T. W. Yee } \note{ The regressor values \eqn{u_i}{u_i} are inputted as the RHS of the \code{form2} argument. It should just be a simple term; no smart prediction is used. It should just a single vector, therefore omit the intercept term. The LHS of the formula \code{form2} is ignored. To predict the response at new values of \eqn{u_i}{u_i} one must assign the \code{@extra$Xm2} slot in the fitted object these values, e.g., see the example below. Numerical problems may occur. If so, try setting some initial values for the parameters. In the future, several self-starting initial values will be implemented. } \seealso{ \code{\link{enzyme}}. % \code{skira}. } \section{Warning }{ This function is not (nor could ever be) entirely reliable. Plotting the fitted function and monitoring convergence is recommended. } \examples{ mfit <- vglm(velocity ~ 1, micmen, data = enzyme, trace = TRUE, crit = "coef", form2 = ~ conc - 1) summary(mfit) \dontrun{ plot(velocity ~ conc, enzyme, xlab = "concentration", las = 1, col = "blue", main = "Michaelis-Menten equation for the enzyme data", ylim = c(0, max(velocity)), xlim = c(0, max(conc))) points(fitted(mfit) ~ conc, enzyme, col = "red", pch = "+", cex = 1.5) # This predicts the response at a finer grid: newenzyme <- data.frame(conc = seq(0, max(with(enzyme, conc)), len = 200)) mfit@extra$Xm2 <- newenzyme$conc # This assignment is needed for prediction lines(predict(mfit, newenzyme, "response") ~ conc, newenzyme, col = "red") } } \keyword{models} \keyword{regression} %coef(mfit, matrix = TRUE) %plot(velocity ~ I(1 / conc), data = enzyme) %mfit2 <- vglm(velocity ~ I(1 / conc), % uninormal(lmean = "reciprocal"), # zero = "", % data = enzyme, trace = TRUE, maxit = 44) %coef(mfit2, matrix = TRUE) %1 / coef(mfit2)[1] # theta1 %coef(mfit2)[1] / coef(mfit2)[3] # theta2 VGAM/man/fittedvlm.Rd0000644000176200001440000000750213565414527014064 0ustar liggesusers\name{fittedvlm} \alias{fittedvlm} \alias{fitted.values.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{Fitted Values of a VLM object} \description{ Extractor function for the fitted values of a model object that inherits from a \emph{vector linear model} (VLM), e.g., a model of class \code{"vglm"}. } \usage{ fittedvlm(object, drop = FALSE, type.fitted = NULL, percentiles = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ a model object that inherits from a VLM. } \item{drop}{ Logical. If \code{FALSE} then the answer is a matrix. If \code{TRUE} then the answer is a vector. } % \item{matrix.arg}{ % Logical. Return the answer as a matrix? % If \code{FALSE} then it will be a vector. % } \item{type.fitted}{ Character. Some \pkg{VGAM} family functions have a \code{type.fitted} argument. If so then a different type of fitted value can be returned. It is recomputed from the model after convergence. Note: this is an experimental feature and not all \pkg{VGAM} family functions have this implemented yet. See \code{\link{CommonVGAMffArguments}} for more details. } \item{percentiles}{ See \code{\link{CommonVGAMffArguments}} for details. } \item{\dots}{ Currently unused. } } \details{ The ``fitted values'' usually corresponds to the mean response, however, because the \pkg{VGAM} package fits so many models, this sometimes refers to quantities such as quantiles. The mean may even not exist, e.g., for a Cauchy distribution. Note that the fitted value is output from the \code{@linkinv} slot of the \pkg{VGAM} family function, where the \code{eta} argument is the \eqn{n \times M}{n x M} matrix of linear predictors. } \value{ The fitted values evaluated at the final IRLS iteration. } \references{ Chambers, J. M. and T. J. Hastie (eds) (1992) \emph{Statistical Models in S}. Wadsworth & Brooks/Cole. } \author{ Thomas W. Yee } \note{ This function is one of several extractor functions for the \pkg{VGAM} package. Others include \code{coef}, \code{deviance}, \code{weights} and \code{constraints} etc. This function is equivalent to the methods function for the generic function \code{fitted.values}. If \code{fit} is a VLM or VGLM then \code{fitted(fit)} and \code{predict(fit, type = "response")} should be equivalent (see \code{\link{predictvglm}}). The latter has the advantage in that it handles a \code{newdata} argument so that the fitted values can be computed for a different data set. } \seealso{ \code{\link[stats]{fitted}}, \code{\link{predictvglm}}, \code{\link{vglmff-class}}. } \examples{ # Categorical regression example 1 pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) fitted(fit1) # LMS quantile regression example 2 fit2 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = bmi.nz, trace = TRUE) head(predict(fit2, type = "response")) # Equal to the the following two: head(fitted(fit2)) predict(fit2, type = "response", newdata = head(bmi.nz)) # Zero-inflated example 3 zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, pstr0.3 = logitlink(-0.5 , inverse = TRUE), lambda.3 = loglink(-0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda.3, pstr0 = pstr0.3)) fit3 <- vglm(y1 ~ x2, zipoisson(zero = NULL), data = zdata, trace = TRUE) head(fitted(fit3, type.fitted = "mean" )) # E(Y), which is the default head(fitted(fit3, type.fitted = "pobs0")) # P(Y = 0) head(fitted(fit3, type.fitted = "pstr0")) # Prob of a structural 0 head(fitted(fit3, type.fitted = "onempstr0")) # 1 - prob of a structural 0 } \keyword{models} \keyword{regression} VGAM/man/toxop.Rd0000644000176200001440000000232213565414527013232 0ustar liggesusers\name{toxop} \alias{toxop} \docType{data} \title{ Toxoplasmosis Data } \description{ Toxoplasmosis data in 34 cities in El Salvador. } \usage{data(toxop)} \format{ A data frame with 34 observations on the following 4 variables. \describe{ \item{\code{rainfall}}{ a numeric vector; the amount of rainfall in each city. } \item{\code{ssize}}{a numeric vector; sample size.} \item{\code{cityNo}}{a numeric vector; the city number.} \item{\code{positive}}{a numeric vector; the number of subjects testing positive for the disease. } } } \details{ See the references for details. } \source{ See the references for details. } \seealso{ \code{\link[VGAM]{double.expbinomial}}. } \references{ Efron, B. (1978) Regression and ANOVA With zero-one data: measures of residual variation. \emph{Journal of the American Statistical Association}, \bold{73}, 113--121. Efron, B. (1986) Double exponential families and their use in generalized linear regression. \emph{Journal of the American Statistical Association}, \bold{81}, 709--721. } \examples{ \dontrun{ with(toxop, plot(rainfall, positive / ssize, col = "blue")) plot(toxop, col = "blue") } } \keyword{datasets} VGAM/man/get.smart.prediction.Rd0000644000176200001440000000163713565414527016134 0ustar liggesusers\name{get.smart.prediction} \alias{get.smart.prediction} \title{ Retrieves ``.smart.prediction'' } \description{ Retrieves \code{.smart.prediction} from \code{smartpredenv}. } \usage{ get.smart.prediction() } \value{ Returns with the list \code{.smart.prediction} from \code{smartpredenv}. } \details{ A smart modelling function such as \code{\link[stats]{lm}} allows smart functions such as \code{\link[VGAM]{sm.bs}} to write to a data structure called \code{.smart.prediction} in \code{smartpredenv}. At the end of fitting, \code{get.smart.prediction} retrieves this data structure. It is then attached to the object, and used for prediction later. } \seealso{ \code{\link{get.smart}}, \code{\link[stats]{lm}}. } \examples{ \dontrun{ fit$smart <- get.smart.prediction() # Put at the end of lm() } } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/summaryvgam.Rd0000644000176200001440000000413313565414527014433 0ustar liggesusers% 20160804; Adapted from summary.vglm.Rd \name{summaryvgam} \alias{summaryvgam} \alias{show.summary.vgam} \title{Summarizing Vector Generalized Additive Model Fits} \usage{ summaryvgam(object, dispersion = NULL, digits = options()$digits - 2, presid = TRUE, nopredictors = FALSE) \method{show}{summary.vgam}(x, quote = TRUE, prefix = "", digits = options()$digits-2, nopredictors = NULL) } \arguments{ \item{object}{an object of class \code{"vgam"}, which is the result of a call to \code{\link{vgam}} with at least one \code{\link[VGAM]{s}} term. } \item{x}{an object of class \code{"summary.vgam"}, which is the result of a call to \code{summaryvgam()}. } \item{dispersion, digits, presid}{ See \code{\link{summaryvglm}}. } \item{quote, prefix, nopredictors}{ See \code{\link{summaryvglm}}. } } \description{ These functions are all \code{\link{methods}} for class \code{vgam} or \code{summary.vgam} objects. } \details{ This methods function reports a summary more similar to \code{summary.gam()} from \pkg{gam} than \code{\link[mgcv]{summary.gam}} from \pkg{mgcv}. It applies to G1-VGAMs using \code{\link{s}} and vector backfitting. In particular, an approximate score test for \emph{linearity} is conducted for each \code{\link{s}} term---see Section 4.3.4 of Yee (2015) for details. The p-values from this type of test tend to be biased upwards (too large). } \value{ \code{summaryvgam} returns an object of class \code{"summary.vgam"}; see \code{\link{summary.vgam-class}}. } \seealso{ \code{\link{vgam}}, \code{\link[stats]{summary.glm}}, \code{\link[stats]{summary.lm}}, \code{\link[mgcv]{summary.gam}} from \pkg{mgcv}, % A core R package \code{\link{summarypvgam}} for P-VGAMs. % \code{\link[gam]{summary.gam}}. % May not be installed. } \examples{ hfit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua) summary(hfit) summary(hfit)@anova # Table for (approximate) testing of linearity } \keyword{models} \keyword{regression} % summary(hfit)@post$s.table # For sm.ps() terms. VGAM/man/Links.Rd0000644000176200001440000002722413565414527013151 0ustar liggesusers\name{Links} \alias{Links} \alias{TypicalVGAMlink} \title{Link functions for VGLM/VGAM/etc. families} \description{ The \pkg{VGAM} package provides a number of (parameter) link functions which are described in general here. Collectively, they offer the user considerable choice and flexibility for modelling data. } \usage{ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } \arguments{ \item{theta}{ Numeric or character. This is usually \eqn{\theta}{theta} (default) but can sometimes be \eqn{\eta}{eta}, depending on the other arguments. If \code{theta} is character then \code{inverse} and \code{deriv} are ignored. The name \code{theta} should always be the name of the first argument. } \item{someParameter}{ Some parameter, e.g., an offset. } \item{bvalue}{ Boundary value, positive if given. If \code{0 < theta} then values of \code{theta} which are less than or equal to 0 can be replaced by \code{bvalue} before computing the link function value. Values of \code{theta} which are greater than or equal to 1 can be replaced by 1 minus \code{bvalue} before computing the link function value. The value \code{bvalue = .Machine$double.eps} is sometimes a reasonable value, or something slightly higher. } % \item{earg}{ % List. % Extra argument allowing for additional information, specific to the % link function. For example, for \code{\link{logoff}}, this will % contain the offset value. The argument \code{earg} is % always a list with \emph{named} components. See each specific link % function to find the component names for the list. % % % Almost all \pkg{VGAM} family functions with a single link % function have an argument (often called \code{earg}) which will % allow parameters to be inputted for that link function. % For \pkg{VGAM} family functions with more than one link % function there usually will be an \code{earg}-type argument for % each link. For example, if there are two links called % \code{lshape} and \code{lscale} then % the \code{earg}-type arguments for these might be called % \code{eshape} and \code{escale}, say. % % } \item{inverse}{ Logical. If \code{TRUE} and \code{deriv = 0} then the inverse link value \eqn{\theta}{theta} is returned, hence the argument \code{theta} is really \eqn{\eta}{eta}. In all other cases, the argument \code{theta} is really \eqn{\theta}{theta}. } \item{deriv}{ Integer. Either 0, 1, or 2, specifying the order of the derivative. Some link functions handle values up to 3 or 4. } \item{short, tag}{ Logical. These are used for labelling the \code{blurb} slot of a \code{\link{vglmff-class}} object. These arguments are used only if \code{theta} is character, and gives the formula for the link in character form. If \code{tag = TRUE} then the result is preceeded by a little more information. } } \value{ Returns one of: the link function value or its first or second derivative, the inverse link or its first or second derivative, or a character description of the link. Here are the general details. If \code{inverse = FALSE} and \code{deriv = 0} (default) then the ordinary link function \eqn{\eta = g(\theta)}{eta = g(theta)} is returned. If \code{inverse = TRUE} and \code{deriv = 0} then the inverse link function value is returned, hence \code{theta} is really \eqn{\eta}{eta} (the only occasion this happens). If \code{inverse = FALSE} and \code{deriv = 1} then it is \eqn{d\eta / d\theta}{d eta / d theta} \emph{as a function of} \eqn{\theta}{theta}. If \code{inverse = FALSE} and \code{deriv = 2} then it is \eqn{d^2\eta / d\theta^2}{d^2 eta / d theta^2} \emph{as a function of} \eqn{\theta}{theta}. If \code{inverse = TRUE} and \code{deriv = 1} then it is \eqn{d\theta / d\eta}{d theta / d eta} \emph{as a function of} \eqn{\theta}{theta}. If \code{inverse = TRUE} and \code{deriv = 2} then it is \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2} \emph{as a function of} \eqn{\theta}{theta}. It is only when \code{deriv = 1} that \code{linkfun(theta, deriv = 1, inverse = TRUE)} and \code{linkfun(theta, deriv = 1, inverse = FALSE)} are \emph{reciprocals} of each other. In particular, \code{linkfun(theta, deriv = 2, inverse = TRUE)} and \code{linkfun(theta, deriv = 2, inverse = FALSE)} are \emph{not} reciprocals of each other in general. % Prior to 20150711; this was what it was: % If \code{inverse = FALSE} and \code{deriv = 1} then it is % \eqn{d\theta / d\eta}{d theta / d eta} % \emph{as a function of} \eqn{\theta}{theta}. % If \code{inverse = FALSE} and \code{deriv = 2} then it is % \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2} % \emph{as a function of} \eqn{\theta}{theta}. % If \code{inverse = TRUE} and \code{deriv = 0} then the inverse % link function is returned, hence \code{theta} is really % \eqn{\eta}{eta}. % If \code{inverse = TRUE} and \code{deriv} is positive then the % \emph{reciprocal} of the same link function with % \code{(theta = theta, someParameter, inverse = TRUE, deriv = deriv)} % is returned. } \details{ Almost all \pkg{VGAM} link functions have something similar to the argument list as given above. In this help file we have \eqn{\eta = g(\theta)}{eta = g(theta)} where \eqn{g} is the link function, \eqn{\theta}{theta} is the parameter and \eqn{\eta}{eta} is the linear/additive predictor. The link \eqn{g} must be strictly monotonic and twice-differentiable in its range. % The arguments \code{short} and \code{tag} are used only if % \code{theta} is character. % That is, there is a matching \code{earg} for each \code{link} argument. The following is a brief enumeration of all \pkg{VGAM} link functions. For parameters lying between 0 and 1 (e.g., probabilities): \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}, \code{\link{foldsqrtlink}}, \code{\link{logclink}}, \code{\link{gordlink}}, \code{\link{pordlink}}, \code{\link{nbordlink}}. For positive parameters (i.e., greater than 0): \code{\link{loglink}}, \code{\link{negloglink}}, \code{\link{powerlink}}. For parameters greater than 1: \code{\link{logloglink}}, \code{\link{loglogloglink}} (greater than \eqn{e}). For parameters between \eqn{-1} and \eqn{1}: \code{\link{fisherzlink}}, \code{\link{rhobitlink}}. For parameters between \eqn{A} and \eqn{B}: \code{\link{extlogitlink}}, \code{\link{logofflink}} (\eqn{B = \infty}{B = Inf}). For unrestricted parameters (i.e., any value): \code{\link{identitylink}}, \code{\link{negidentitylink}}, \code{\link{reciprocallink}}, \code{\link{negreciprocallink}}. % Other links: } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \section{Warning }{ The output of link functions changed at \pkg{VGAM} \code{0.9-9} (date was around 2015-07). Formerly, \code{linkfun(theta, deriv = 1)} is now \code{linkfun(theta, deriv = 1, inverse = TRUE)}, or equivalently, \code{1 / linkfun(theta, deriv = 1, inverse = TRUE)}. Also, formerly, \code{linkfun(theta, deriv = 2)} was \code{1 / linkfun(theta, deriv = 2, inverse = TRUE)}. This was a bug. Altogether, these are big changes and the user should beware! In \pkg{VGAM} \code{1.0-7} (January 2019) all link function names were made to end in the characters \code{"link"}, e.g., \code{\link{loglink}} replaces \code{\link{loge}}, \code{\link{logitlink}} replaces \code{\link{logit}}. For this most of them were renamed. Upward compatability holds for older link function names, however, users should adopt the new names immediately. % One day in the future, \emph{all} \pkg{VGAM} link functions % may be renamed to end in the characters \code{"link"}. } \seealso{ \code{\link{TypicalVGAMfamilyFunction}}, \code{\link{linkfun}}, \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}. \code{\link{cqo}}, \code{\link{cao}}. % \code{\link{uqo}}. } \author{T. W. Yee} \note{ \pkg{VGAM} link functions are generally not compatible with other functions outside the package. In particular, they won't work with \code{\link[stats]{glm}} or any other package for fitting GAMs. From October 2006 onwards, all \pkg{VGAM} family functions will only contain one default value for each link argument rather than giving a vector of choices. For example, rather than \code{binomialff(link = c("logitlink", "probitlink", "clogloglink", "cauchitlink", "identitylink"), ...)} it is now \code{binomialff(link = "logitlink", ...)}. No checking will be done to see if the user's choice is reasonable. This means that the user can write his/her own \pkg{VGAM} link function and use it within any \pkg{VGAM} family function. Altogether this provides greater flexibility. The downside is that the user must specify the \emph{full} name of the link function, by either assigning the link argument the full name as a character string, or just the name itself. See the examples below. From August 2012 onwards, a major change in link functions occurred. Argument \code{esigma} (and the like such as \code{earg}) used to be in \pkg{VGAM} prior to version 0.9-0 (released during the 2nd half of 2012). The major change is that arguments such as \code{offset} that used to be passed in via those arguments can done directly through the link function. For example, \code{gev(lshape = "logofflink", eshape = list(offset = 0.5))} is replaced by \code{gev(lshape = logofflink(offset = 0.5))}. The \code{@misc} slot no longer has \code{link} and \code{earg} components, but two other components replace these. Functions such as \code{dtheta.deta()}, \code{d2theta.deta2()}, \code{d3theta.deta3()}, \code{eta2theta()}, \code{theta2eta()} are modified. From January 2019 onwards, all link function names ended in \code{"link"}. See above for details. } \examples{ logitlink("a") logitlink("a", short = FALSE) logitlink("a", short = FALSE, tag = TRUE) logofflink(1:5, offset = 1) # Same as log(1:5 + 1) powerlink(1:5, power = 2) # Same as (1:5)^2 \dontrun{ # This is old and no longer works: logofflink(1:5, earg = list(offset = 1)) powerlink(1:5, earg = list(power = 2)) } fit1 <- vgam(agaaus ~ altitude, binomialff(link = "clogloglink"), hunua) # best fit2 <- vgam(agaaus ~ altitude, binomialff(link = clogloglink ), hunua) # okay \dontrun{ # This no longer works since "clog" is not a valid VGAM link function: fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua) # not okay # No matter what the link, the estimated var-cov matrix is the same y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)) fit1 <- vglm(y ~ 1, betaR(lshape1 = "identitylink", lshape2 = "identitylink"), trace = TRUE, crit = "coef") fit2 <- vglm(y ~ 1, betaR(lshape1 = logofflink(offset = 1.1), lshape2 = logofflink(offset = 1.1)), trace=TRUE) vcov(fit1, untransform = TRUE) vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE) # Should be all 0s \dontrun{ # This is old: fit1@misc$earg # Some 'special' parameters fit2@misc$earg # Some 'special' parameters are here } par(mfrow = c(2, 2)) p <- seq(0.05, 0.95, len = 200) # A rather restricted range x <- seq(-4, 4, len = 200) plot(p, logitlink(p), type = "l", col = "blue") plot(x, logitlink(x, inverse = TRUE), type = "l", col = "blue") plot(p, logitlink(p, deriv=1), type="l", col="blue") # 1 / (p*(1-p)) plot(p, logitlink(p, deriv=2), type="l", col="blue") # (2*p-1)/(p*(1-p))^2 } } \keyword{models} VGAM/man/dagumUC.Rd0000644000176200001440000000612413565414527013412 0ustar liggesusers\name{Dagum} \alias{Dagum} \alias{ddagum} \alias{pdagum} \alias{qdagum} \alias{rdagum} \title{The Dagum Distribution} \description{ Density, distribution function, quantile function and random generation for the Dagum distribution with shape parameters \code{a} and \code{p}, and scale parameter \code{scale}. } \usage{ ddagum(x, scale = 1, shape1.a, shape2.p, log = FALSE) pdagum(q, scale = 1, shape1.a, shape2.p, lower.tail = TRUE, log.p = FALSE) qdagum(p, scale = 1, shape1.a, shape2.p, lower.tail = TRUE, log.p = FALSE) rdagum(n, scale = 1, shape1.a, shape2.p) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1.a, shape2.p}{shape parameters.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{ddagum} gives the density, \code{pdagum} gives the distribution function, \code{qdagum} gives the quantile function, and \code{rdagum} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{dagum}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The Dagum distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{dagum}}, \code{\link{genbetaII}}. } \examples{ probs <- seq(0.1, 0.9, by = 0.1) shape1.a <- 1; shape2.p <- 2 # Should be 0: max(abs(pdagum(qdagum(p = probs, shape1.a = shape1.a, shape2.p = shape2.p), shape1.a = shape1.a, shape2.p = shape2.p) - probs)) \dontrun{ par(mfrow = c(1, 2)) x <- seq(-0.01, 5, len = 401) plot(x, dexp(x), type = "l", col = "black", ylab = "", las = 1, ylim = c(0, 1), main = "Black is standard exponential, others are ddagum(x, ...)") lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 1), col = "orange") lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 2), col = "blue") lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 5), col = "green") legend("topright", col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("shape1.a =", shape1.a, ", shape2.p =", c(1, 2, 5))) plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1, main = "Black is standard exponential, others are pdagum(x, ...)") lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 1), col = "orange") lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 2), col = "blue") lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 5), col = "green") legend("bottomright", col = c("orange","blue","green"), lty = rep(1, len = 3), legend = paste("shape1.a =", shape1.a, ", shape2.p =", c(1, 2, 5))) } } \keyword{distribution} VGAM/man/leukemia.Rd0000644000176200001440000000120113565414527013650 0ustar liggesusers%\name{aml} \name{leukemia} %\alias{aml} \alias{leukemia} \docType{data} \title{Acute Myelogenous Leukemia Survival Data} \description{Survival in patients with Acute Myelogenous Leukemia} \usage{ %data(aml) data(leukemia) } \format{ \tabular{ll}{ time:\tab survival or censoring time\cr status:\tab censoring status\cr x: \tab maintenance chemotherapy given? (factor)\cr } } \source{ Rupert G. Miller (1997), \emph{Survival Analysis}. John Wiley & Sons. ISBN: 0-471-25218-2. } \note{ This data set has been transferred from \pkg{survival} and renamed from \code{aml} to \code{leukemia}. } \keyword{datasets} VGAM/man/vglm.Rd0000644000176200001440000004774613565414527013051 0ustar liggesusers\name{vglm} \alias{vglm} %\alias{vglm.fit} \title{Fitting Vector Generalized Linear Models } \description{ \code{vglm} fits vector generalized linear models (VGLMs). This very large class of models includes generalized linear models (GLMs) as a special case. } \usage{ vglm(formula, family = stop("argument 'family' needs to be assigned"), data = list(), weights = NULL, subset = NULL, na.action = na.fail, etastart = NULL, mustart = NULL, coefstart = NULL, control = vglm.control(...), offset = NULL, method = "vglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, contrasts = NULL, constraints = NULL, extra = list(), form2 = NULL, qr.arg = TRUE, smart = TRUE, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{formula}{ a symbolic description of the model to be fit. The RHS of the formula is applied to each linear predictor. The effect of different variables in each linear predictor can be controlled by specifying constraint matrices---see \code{constraints} below. } \item{family}{ a function of class \code{"vglmff"} (see \code{\link{vglmff-class}}) describing what statistical model is to be fitted. This is called a ``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}} for general information about many types of arguments found in this type of function. The argument name \code{"family"} is used loosely and for the ease of existing \code{\link[stats]{glm}} users; there is no concept of a formal ``error distribution'' for VGLMs. Possibly the argument name should be better \code{"model"} but unfortunately that name has already been taken. } \item{data}{ an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{vglm} is called. } \item{weights}{ an optional vector or matrix of (prior fixed and known) weights to be used in the fitting process. If the \pkg{VGAM} family function handles multiple responses (\eqn{Q > 1} of them, say) then \code{weights} can be a matrix with \eqn{Q} columns. Each column matches the respective response. If it is a vector (the usually case) then it is recycled into a matrix with \eqn{Q} columns. The values of \code{weights} must be positive; try setting a very small value such as \code{1.0e-8} to effectively delete an observation. % 20140507: Currently the \code{weights} argument does not support sampling weights from complex sampling designs. And currently sandwich estimators are not computed in any shape or form. The present weights are multiplied by the corresponding log-likelihood contributions and added to form the overall log-likelihood. % If \code{weights} is a matrix, % then it should be must be in \emph{matrix-band} form, whereby the % first \eqn{M} columns of the matrix are the diagonals, % followed by the upper-diagonal band, followed by the % band above that, etc. In this case, there can be up to % \eqn{M(M+1)} columns, with the last column corresponding % to the (1,\eqn{M}) elements of the weight matrices. } \item{subset}{ an optional logical vector specifying a subset of observations to be used in the fitting process. } \item{na.action}{ a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link[base]{options}}, and is \code{na.fail} if that is unset. The ``factory-fresh'' default is \code{na.omit}. } \item{etastart}{ optional starting values for the linear predictors. It is a \eqn{M}-column matrix with the same number of rows as the response. If \eqn{M = 1} then it may be a vector. Note that \code{etastart} and the output of \code{predict(fit)} should be comparable. Here, \code{fit} is the fitted object. Almost all \pkg{VGAM} family functions are self-starting. } \item{mustart}{ optional starting values for the fitted values. It can be a vector or a matrix; if a matrix, then it has the same number of rows as the response. Usually \code{mustart} and the output of \code{fitted(fit)} should be comparable. Most family functions do not make use of this argument because it is not possible to compute all \eqn{M} columns of \code{eta} from \code{mu}. } \item{coefstart}{ optional starting values for the coefficient vector. The length and order must match that of \code{coef(fit)}. } \item{control}{ a list of parameters for controlling the fitting process. See \code{\link{vglm.control}} for details. } \item{offset}{ a vector or \eqn{M}-column matrix of offset values. These are \emph{a priori} known and are added to the linear/additive predictors during fitting. } \item{method}{ the method to be used in fitting the model. The default (and presently only) method \code{vglm.fit()} uses iteratively reweighted least squares (IRLS). } \item{model}{ a logical value indicating whether the \emph{model frame} should be assigned in the \code{model} slot. } \item{x.arg, y.arg}{ logical values indicating whether the LM matrix and response vector/matrix used in the fitting process should be assigned in the \code{x} and \code{y} slots. Note that the model matrix is the LM matrix; to get the VGLM matrix type \code{model.matrix(vglmfit)} where \code{vglmfit} is a \code{vglm} object. } \item{contrasts}{ an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}. } \item{constraints}{ an optional \code{\link[base]{list}} of constraint matrices. The components of the list must be named (labelled) with the term it corresponds to (and it must match in character format \emph{exactly}---see below). There are two types of input: \code{"lm"}-type and \code{"vlm"}-type. The former is a subset of the latter. The former has a matrix for each term of the LM matrix. The latter has a matrix for each column of the big VLM matrix. After fitting, the \code{\link{constraints}} extractor function may be applied; it returns the \code{"vlm"}-type list of constraint matrices by default. If \code{"lm"}-type are returned by \code{\link{constraints}} then these can be fed into this argument and it should give the same model as before. If the \code{constraints} argument is used then the family function's \code{zero} argument (if it exists) needs to be set to \code{NULL}. This avoids what could be a probable contradiction. Sometimes setting other arguments related to constraint matrices to \code{FALSE} is also a good idea, e.g., \code{parallel = FALSE}, \code{exchangeable = FALSE}. Properties: each constraint matrix must have \eqn{M} rows, and be of full-column rank. By default, constraint matrices are the \eqn{M} by \eqn{M} identity matrix unless arguments in the family function itself override these values, e.g., \code{parallel} (see \code{\link{CommonVGAMffArguments}}). If \code{constraints} is used then it must contain \emph{all} the terms; an incomplete list is not accepted. As mentioned above, the labelling of each constraint matrix must match exactly, e.g., \code{list("s(x2,df=3)"=diag(2))} will fail as \code{as.character(~ s(x2,df=3))} produces white spaces: \code{"s(x2, df = 3)"}. Thus \code{list("s(x2, df = 3)" = diag(2))} is needed. See Example 6 below. More details are given in Yee (2015; Section 3.3.1.3) which is on p.101. Note that the label for the intercept is \code{"(Intercept)"}. } \item{extra}{ an optional list with any extra information that might be needed by the \pkg{VGAM} family function. } \item{form2}{ the second (optional) formula. If argument \code{xij} is used (see \code{\link{vglm.control}}) then \code{form2} needs to have \emph{all} terms in the model. Also, some \pkg{VGAM} family functions such as \code{\link{micmen}} use this argument to input the regressor variable. If given, the slots \code{@Xm2} and \code{@Ym2} may be assigned. Note that smart prediction applies to terms in \code{form2} too. } \item{qr.arg}{ logical value indicating whether the slot \code{qr}, which returns the QR decomposition of the VLM model matrix, is returned on the object. } \item{smart}{ logical value indicating whether smart prediction (\code{\link{smartpred}}) will be used. } \item{\dots}{ further arguments passed into \code{\link{vglm.control}}. } } \details{ A vector generalized linear model (VGLM) is loosely defined as a statistical model that is a function of \eqn{M} linear predictors and can be estimated by Fisher scoring. The central formula is given by \deqn{\eta_j = \beta_j^T x}{% eta_j = beta_j^T x} where \eqn{x}{x} is a vector of explanatory variables (sometimes just a 1 for an intercept), and \eqn{\beta_j}{beta_j} is a vector of regression coefficients to be estimated. Here, \eqn{j=1,\ldots,M}, where \eqn{M} is finite. Then one can write \eqn{\eta=(\eta_1,\ldots,\eta_M)^T}{eta=(eta_1,\ldots,\eta_M)^T} as a vector of linear predictors. Most users will find \code{vglm} similar in flavour to \code{\link[stats]{glm}}. The function \code{vglm.fit} actually does the work. % If more than one of \code{etastart}, \code{start} and \code{mustart} % is specified, the first in the list will be used. } \value{ An object of class \code{"vglm"}, which has the following slots. Some of these may not be assigned to save space, and will be recreated if necessary later. \item{extra}{the list \code{extra} at the end of fitting.} \item{family}{the family function (of class \code{"vglmff"}).} \item{iter}{the number of IRLS iterations used.} \item{predictors}{a \eqn{M}-column matrix of linear predictors.} \item{assign}{a named list which matches the columns and the (LM) model matrix terms.} \item{call}{the matched call.} \item{coefficients}{a named vector of coefficients.} \item{constraints}{ a named list of constraint matrices used in the fitting. } \item{contrasts}{the contrasts used (if any).} \item{control}{list of control parameter used in the fitting.} \item{criterion}{list of convergence criterion evaluated at the final IRLS iteration.} \item{df.residual}{the residual degrees of freedom.} \item{df.total}{the total degrees of freedom.} \item{dispersion}{the scaling parameter.} \item{effects}{the effects.} \item{fitted.values}{ the fitted values, as a matrix. This is often the mean but may be quantiles, or the location parameter, e.g., in the Cauchy model. } \item{misc}{a list to hold miscellaneous parameters.} \item{model}{the model frame.} \item{na.action}{a list holding information about missing values.} \item{offset}{if non-zero, a \eqn{M}-column matrix of offsets.} \item{post}{a list where post-analysis results may be put.} \item{preplot}{used by \code{\link{plotvgam}}, the plotting parameters may be put here.} \item{prior.weights}{ initially supplied weights (the \code{weights} argument). Also see \code{\link{weightsvglm}}. } \item{qr}{the QR decomposition used in the fitting.} \item{R}{the \bold{R} matrix in the QR decomposition used in the fitting.} \item{rank}{numerical rank of the fitted model.} \item{residuals}{the \emph{working} residuals at the final IRLS iteration.} \item{ResSS}{residual sum of squares at the final IRLS iteration with the adjusted dependent vectors and weight matrices.} \item{smart.prediction}{ a list of data-dependent parameters (if any) that are used by smart prediction. } \item{terms}{the \code{\link[stats]{terms}} object used.} \item{weights}{the working weight matrices at the final IRLS iteration. This is in matrix-band form.} \item{x}{the model matrix (linear model LM, not VGLM).} \item{xlevels}{the levels of the factors, if any, used in fitting.} \item{y}{the response, in matrix form.} This slot information is repeated at \code{\link{vglm-class}}. } \references{ Yee, T. W. (2015) Vector Generalized Linear and Additive Models: With an Implementation in R. New York, USA: \emph{Springer}. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. Yee, T. W. (2008) The \code{VGAM} Package. \emph{R News}, \bold{8}, 28--39. % Documentation accompanying the \pkg{VGAM} package at % \url{http://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ Thomas W. Yee } \note{ This function can fit a wide variety of statistical models. Some of these are harder to fit than others because of inherent numerical difficulties associated with some of them. Successful model fitting benefits from cumulative experience. Varying the values of arguments in the \pkg{VGAM} family function itself is a good first step if difficulties arise, especially if initial values can be inputted. A second, more general step, is to vary the values of arguments in \code{\link{vglm.control}}. A third step is to make use of arguments such as \code{etastart}, \code{coefstart} and \code{mustart}. Some \pkg{VGAM} family functions end in \code{"ff"} to avoid interference with other functions, e.g., \code{\link{binomialff}}, \code{\link{poissonff}}. This is because \pkg{VGAM} family functions are incompatible with \code{\link[stats]{glm}} (and also \code{\link[gam]{gam}} in the \pkg{gam} library and \code{\link[mgcv]{gam}} in the \pkg{mgcv} library). % \code{gammaff}. % \code{\link{gaussianff}}, The smart prediction (\code{\link{smartpred}}) library is incorporated within the \pkg{VGAM} library. The theory behind the scaling parameter is currently being made more rigorous, but it it should give the same value as the scale parameter for GLMs. In Example 5 below, the \code{xij} argument to illustrate covariates that are specific to a linear predictor. Here, \code{lop}/\code{rop} are the ocular pressures of the left/right eye (artificial data). Variables \code{leye} and \code{reye} might be the presence/absence of a particular disease on the LHS/RHS eye respectively. See \code{\link{vglm.control}} and \code{\link{fill}} for more details and examples. } %~Make other sections like WARNING with \section{WARNING }{....} ~ \section{WARNING}{ See warnings in \code{\link{vglm.control}}. Also, see warnings under \code{weights} above regarding sampling weights from complex sampling designs. } \seealso{ \code{\link{vglm.control}}, \code{\link{vglm-class}}, \code{\link{vglmff-class}}, \code{\link{smartpred}}, \code{vglm.fit}, \code{\link{fill}}, \code{\link{rrvglm}}, \code{\link{vgam}}. Methods functions include \code{\link{add1.vglm}}, \code{\link{anova.vglm}}, \code{\link{AICvlm}}, \code{\link{coefvlm}}, \code{\link{confintvglm}}, \code{\link{constraints.vlm}}, \code{\link{drop1.vglm}}, \code{\link{fittedvlm}}, \code{\link{hatvaluesvlm}}, \code{\link{hdeff.vglm}}, \code{\link{linkfun.vglm}}, \code{\link{lrt.stat.vlm}}, \code{\link{score.stat.vlm}}, \code{\link{wald.stat.vlm}}, \code{\link{nobs.vlm}}, \code{\link{npred.vlm}}, \code{\link{plotvglm}}, \code{\link{predictvglm}}, \code{\link{residualsvglm}}, \code{\link{step4vglm}}, \code{\link{summaryvglm}}, \code{\link{lrtest_vglm}}, \code{\link[stats]{update}}, etc. } \examples{ # Example 1. See help(glm) print(d.AD <- data.frame(treatment = gl(3, 3), outcome = gl(3, 1, 9), counts = c(18,17,15,20,10,20,25,13,12))) vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff, data = d.AD, trace = TRUE) summary(vglm.D93) # Example 2. Multinomial logit model pneumo <- transform(pneumo, let = log(exposure.time)) vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo) # Example 3. Proportional odds model fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) coef(fit3, matrix = TRUE) constraints(fit3) model.matrix(fit3, type = "lm") # LM model matrix model.matrix(fit3) # Larger VGLM (or VLM) model matrix # Example 4. Bivariate logistic model fit4 <- vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers) coef(fit4, matrix = TRUE) depvar(fit4) # Response are proportions weights(fit4, type = "prior") # Example 5. The use of the xij argument (simple case). # The constraint matrix for 'op' has one column. nn <- 1000 eyesdat <- round(data.frame(lop = runif(nn), rop = runif(nn), op = runif(nn)), digits = 2) eyesdat <- transform(eyesdat, eta1 = -1 + 2 * lop, eta2 = -1 + 2 * lop) eyesdat <- transform(eyesdat, leye = rbinom(nn, size = 1, prob = logitlink(eta1, inverse = TRUE)), reye = rbinom(nn, size = 1, prob = logitlink(eta2, inverse = TRUE))) head(eyesdat) fit5 <- vglm(cbind(leye, reye) ~ op, binom2.or(exchangeable = TRUE, zero = 3), data = eyesdat, trace = TRUE, xij = list(op ~ lop + rop + fill(lop)), form2 = ~ op + lop + rop + fill(lop)) coef(fit5) coef(fit5, matrix = TRUE) constraints(fit5) fit5@control$xij head(model.matrix(fit5)) # Example 6. The use of the 'constraints' argument. as.character(~ bs(year,df=3)) # Get the white spaces right clist <- list("(Intercept)" = diag(3), "bs(year, df = 3)" = rbind(1, 0, 0)) fit1 <- vglm(r1 ~ bs(year,df=3), gev(zero = NULL), data = venice, constraints = clist, trace = TRUE) coef(fit1, matrix = TRUE) # Check } \keyword{models} \keyword{regression} %eyesdat$leye <- ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$lop)), 1, 0) %eyesdat$reye <- ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$rop)), 1, 0) %coef(fit, matrix = TRUE, compress = FALSE) % 20090506 zz Put these examples elsewhere: % %# Example 6. The use of the xij argument (complex case). %# Here is one method to handle the xij argument with a term that %# produces more than one column in the model matrix. %# The constraint matrix for 'op' has essentially one column. %POLY3 <- function(x, ...) { % # A cubic; ensures that the basis functions are the same. % poly(c(x,...), 3)[1:length(x),] % head(poly(c(x,...), 3), length(x), drop = FALSE) %} % %fit6 <- vglm(cbind(leye, reye) ~ POLY3(op), trace = TRUE, % fam = binom2.or(exchangeable = TRUE, zero=3), data=eyesdat, % xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) + % fill(POLY3(lop,rop))), % form2 = ~ POLY3(op) + POLY3(lop,rop) + POLY3(rop,lop) + % fill(POLY3(lop,rop))) %coef(fit6) %coef(fit6, matrix = TRUE) %head(predict(fit6)) %\dontrun{ %plotvgam(fit6, se = TRUE) # Wrong since it plots against op, not lop. %} % % %# Example 7. The use of the xij argument (simple case). %# Each constraint matrix has 4 columns. %ymat <- rdiric(n <- 1000, shape=c(4,7,3,1)) %mydat <- data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n), % z1=runif(n), z2=runif(n), z3=runif(n), z4=runif(n), % X2=runif(n), Z2=runif(n)) %mydat <- round(mydat, dig=2) %fit7 <- vglm(ymat ~ X2 + Z2, data=mydat, crit="c", % fam = dirichlet(parallel = TRUE), # Intercept is also parallel. % xij = list(Z2 ~ z1 + z2 + z3 + z4, % X2 ~ x1 + x2 + x3 + x4), % form2 = ~ Z2 + z1 + z2 + z3 + z4 + % X2 + x1 + x2 + x3 + x4) %head(model.matrix(fit7, type="lm")) # LM model matrix %head(model.matrix(fit7, type="vlm")) # Big VLM model matrix %coef(fit7) %coef(fit7, matrix = TRUE) %max(abs(predict(fit7)-predict(fit7, new=mydat))) # Predicts correctly %summary(fit7) VGAM/man/skewnormUC.Rd0000644000176200001440000000463613565414527014170 0ustar liggesusers\name{skewnorm} \alias{skewnorm} \alias{dskewnorm} %\alias{pskewnorm} %\alias{qskewnorm} \alias{rskewnorm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Skew-Normal Distribution } \description{ Density and random generation for the univariate skew-normal distribution. % , distribution function, quantile function and } \usage{ dskewnorm(x, location = 0, scale = 1, shape = 0, log = FALSE) rskewnorm(n, location = 0, scale = 1, shape = 0) } %pskewnorm(q, lambda) %qskewnorm(p, lambda) %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{vector of quantiles.} % \item{x, q}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{location}{ The location parameter \eqn{\xi}{xi}. A vector. } \item{scale}{ The scale parameter \eqn{\omega}{w}. A positive vector. } \item{shape}{ The shape parameter. It is called \eqn{\alpha}{alpha} in \code{\link{skewnormal}}. } \item{log}{ Logical. If \code{log=TRUE} then the logarithm of the density is returned. } } \details{ See \code{\link{skewnormal}}, which currently only estimates the shape parameter. More generally here, \eqn{Z = \xi + \omega Y}{Z = xi + w * Y} where \eqn{Y} has a standard skew-normal distribution (see \code{\link{skewnormal}}), \eqn{\xi}{xi} is the location parameter and \eqn{\omega}{w} is the scale parameter. } \value{ \code{dskewnorm} gives the density, \code{rskewnorm} generates random deviates. % \code{pskewnorm} gives the distribution function, % \code{qskewnorm} gives the quantile function, and } \references{ \code{http://tango.stat.unipd.it/SN}. % \url{http://tango.stat.unipd.it/SN}. } \author{ T. W. Yee } \note{ The default values of all three parameters corresponds to the skew-normal being the standard normal distribution. } \seealso{ \code{\link{skewnormal}}. } \examples{ \dontrun{ N <- 200 # Grid resolution shape <- 7; x <- seq(-4, 4, len = N) plot(x, dskewnorm(x, shape = shape), type = "l", col = "blue", las = 1, ylab = "", lty = 1, lwd = 2) abline(v = 0, h = 0, col = "grey") lines(x, dnorm(x), col = "orange", lty = 2, lwd = 2) legend("topleft", leg = c(paste("Blue = dskewnorm(x, ", shape,")", sep = ""), "Orange = standard normal density"), lty = 1:2, lwd = 2, col = c("blue", "orange")) } } \keyword{distribution} VGAM/man/mix2normal.Rd0000644000176200001440000001475213565414527014163 0ustar liggesusers\name{mix2normal} \alias{mix2normal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Mixture of Two Univariate Normal Distributions } \description{ Estimates the five parameters of a mixture of two univariate normal distributions by maximum likelihood estimation. } \usage{ mix2normal(lphi = "logitlink", lmu = "identitylink", lsd = "loglink", iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL, qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = "phi") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lphi,lmu,lsd}{ Link functions for the parameters \eqn{\phi}{phi}, \eqn{\mu}{mu}, and \eqn{\sigma}{sd}. See \code{\link{Links}} for more choices. } % \item{ephi, emu1, emu2, esd1, esd2}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % If \code{eq.sd = TRUE} then \code{esd1} must equal \code{esd2}. % } \item{iphi}{ Initial value for \eqn{\phi}{phi}, whose value must lie between 0 and 1. } \item{imu1, imu2}{ Optional initial value for \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2}. The default is to compute initial values internally using the argument \code{qmu}. } \item{isd1, isd2}{ Optional initial value for \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2}. The default is to compute initial values internally based on the argument \code{qmu}. Currently these are not great, therefore using these arguments where practical is a good idea. } \item{qmu}{ Vector with two values giving the probabilities relating to the sample quantiles for obtaining initial values for \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2}. The two values are fed in as the \code{probs} argument into \code{\link[stats]{quantile}}. } \item{eq.sd}{ Logical indicating whether the two standard deviations should be constrained to be equal. If \code{TRUE} then the appropriate constraint matrices will be used. } \item{nsimEIM}{ See \code{\link{CommonVGAMffArguments}}. } \item{zero}{ May be an integer vector specifying which linear/additive predictors are modelled as intercept-only. If given, the value or values can be from the set \eqn{\{1,2,\ldots,5\}}{1,2,...,5}. The default is the first one only, meaning \eqn{\phi}{phi} is a single parameter even when there are explanatory variables. Set \code{zero = NULL} to model all linear/additive predictors as functions of the explanatory variables. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The probability density function can be loosely written as \deqn{f(y) = \phi \, N(\mu_1,\sigma_1) + (1-\phi) \, N(\mu_2, \sigma_2)}{% f(y) = phi * N(mu1, sd1) + (1-phi) * N(mu2, sd2)} where \eqn{\phi}{phi} is the probability an observation belongs to the first group. The parameters \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} are the means, and \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2} are the standard deviations. The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{\phi \mu_1 + (1-\phi) \mu_2}{phi*mu1 + (1-phi)*mu2} and this is returned as the fitted values. By default, the five linear/additive predictors are \eqn{(logit(\phi), \mu_1, \log(\sigma_1), \mu_2, \log(\sigma_2))^T}{ (logit(phi), mu1, log(sd1), mu2, log(sd2))^T}. If \code{eq.sd = TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2} is enforced. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ McLachlan, G. J. and Peel, D. (2000) \emph{Finite Mixture Models}. New York: Wiley. Everitt, B. S. and Hand, D. J. (1981) \emph{Finite Mixture Distributions}. London: Chapman & Hall. } \section{Warning }{ Numerical problems can occur and half-stepping is not uncommon. If failure to converge occurs, try inputting better initial values, e.g., by using \code{iphi}, \code{qmu}, \code{imu1}, \code{imu2}, \code{isd1}, \code{isd2}, etc. % This function uses a quasi-Newton update for the working weight matrices % (BFGS variant). It builds up approximations to the weight matrices, % and currently the code is not fully tested. % In particular, results based on the weight matrices (e.g., from % \code{vcov} and \code{summary}) may be quite incorrect, especially when % the arguments \code{weights} is used to input prior weights. This \pkg{VGAM} family function is experimental and should be used with care. } \author{ T. W. Yee } \note{ Fitting this model successfully to data can be difficult due to numerical problems and ill-conditioned data. It pays to fit the model several times with different initial values and check that the best fit looks reasonable. Plotting the results is recommended. This function works better as \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} become more different. Convergence can be slow, especially when the two component distributions are not well separated. The default control argument \code{trace = TRUE} is to encourage monitoring convergence. Having \code{eq.sd = TRUE} often makes the overall optimization problem easier. } \seealso{ \code{\link{uninormal}}, \code{\link[stats:Normal]{Normal}}, \code{\link{mix2poisson}}. } \examples{ \dontrun{ mu1 <- 99; mu2 <- 150; nn <- 1000 sd1 <- sd2 <- exp(3) (phi <- logitlink(-1, inverse = TRUE)) mdata <- data.frame(y = ifelse(runif(nn) < phi, rnorm(nn, mu1, sd1), rnorm(nn, mu2, sd2))) fit <- vglm(y ~ 1, mix2normal(eq.sd = TRUE), data = mdata) # Compare the results cfit <- coef(fit) round(rbind('Estimated' = c(logitlink(cfit[1], inverse = TRUE), cfit[2], exp(cfit[3]), cfit[4]), 'Truth' = c(phi, mu1, sd1, mu2)), digits = 2) # Plot the results xx <- with(mdata, seq(min(y), max(y), len = 200)) plot(xx, (1-phi) * dnorm(xx, mu2, sd2), type = "l", xlab = "y", main = "Orange = estimate, blue = truth", col = "blue", ylab = "Density") phi.est <- logitlink(coef(fit)[1], inverse = TRUE) sd.est <- exp(coef(fit)[3]) lines(xx, phi*dnorm(xx, mu1, sd1), col = "blue") lines(xx, phi.est * dnorm(xx, Coef(fit)[2], sd.est), col = "orange") lines(xx, (1-phi.est) * dnorm(xx, Coef(fit)[4], sd.est), col = "orange") abline(v = Coef(fit)[c(2,4)], lty = 2, col = "orange") abline(v = c(mu1, mu2), lty = 2, col = "blue") } } \keyword{models} \keyword{regression} VGAM/man/Coef.Rd0000644000176200001440000000415713565414527012745 0ustar liggesusers\name{Coef} \alias{Coef} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Computes Model Coefficients and Quantities } \description{ \code{Coef} is a generic function which computes model coefficients from objects returned by modelling functions. It is an auxiliary function to \code{\link[stats]{coef}} that enables extra capabilities for some specific models. } \usage{ Coef(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the computation of other types of model coefficients or quantities is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. } } \details{ This function can often be useful for \code{\link{vglm}} objects with just an intercept term in the RHS of the formula, e.g., \code{y ~ 1}. Then often this function will apply the inverse link functions to the parameters. See the example below. For reduced-rank VGLMs, this function can return the \bold{A}, \bold{C} matrices, etc. For quadratic and additive ordination models, this function can return ecological meaningful quantities such as tolerances, optimums, maximums. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } %\note{ %} \section{Warning }{ This function may not work for \emph{all} \pkg{VGAM} family functions. You should check your results on some artificial data before applying it to models fitted to real data. } \seealso{ \code{\link[stats]{coef}}, \code{\link{Coef.vlm}}, \code{\link{Coef.rrvglm}}, \code{\link{Coef.qrrvglm}}, \code{\link{depvar}}. } \examples{ nn <- 1000 bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3)) # Original scale fit <- vglm(y ~ 1, betaR, data = bdata, trace = TRUE) # Intercept-only model coef(fit, matrix = TRUE) # Both on a log scale Coef(fit) # On the original scale } \keyword{models} \keyword{regression} VGAM/man/negbinomial.Rd0000644000176200001440000006251513565414527014357 0ustar liggesusers\name{negbinomial} \alias{negbinomial} \alias{polya} \alias{polyaR} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Negative Binomial Distribution Family Function } \description{ Maximum likelihood estimation of the two parameters of a negative binomial distribution. } \usage{ negbinomial(zero = "size", parallel = FALSE, deviance.arg = FALSE, type.fitted = c("mean", "quantiles"), percentiles = c(25, 50, 75), mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, lmu = "loglink", lsize = "loglink", imethod = 1, imu = NULL, iprobs.y = NULL, gprobs.y = ppoints(6), isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) polya(zero = "size", type.fitted = c("mean", "prob"), mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, lprob = "logitlink", lsize = "loglink", imethod = 1, iprob = NULL, iprobs.y = NULL, gprobs.y = ppoints(6), isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL) polyaR(zero = "size", type.fitted = c("mean", "prob"), mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, lsize = "loglink", lprob = "logitlink", imethod = 1, iprob = NULL, iprobs.y = NULL, gprobs.y = ppoints(6), isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL) } % deviance.arg = FALSE, %- maybe also 'usage' for other objects documented here. \arguments{ \item{zero}{ Can be an integer-valued vector, and if so, then it is usually assigned \eqn{-2} or \eqn{2}. Specifies which of the two linear/additive predictors are modelled as an intercept only. By default, the \eqn{k} parameter (after \code{lsize} is applied) is modelled as a single unknown number that is estimated. It can be modelled as a function of the explanatory variables by setting \code{zero = NULL}; this has been called a NB-H model by Hilbe (2011). A negative value means that the value is recycled, so setting \eqn{-2} means all \eqn{k} are intercept-only. See \code{\link{CommonVGAMffArguments}} for more information. % 20190119; getarg() fixes this problem: % Because of the new labelling for \code{\link{nbcanlink}} the default % is now \code{-2} rather than \code{"size"}; the latter is more % understandable really. } \item{lmu, lsize, lprob}{ Link functions applied to the \eqn{\mu}{mu}, \eqn{k} and \eqn{p} parameters. See \code{\link{Links}} for more choices. Note that the \eqn{\mu}{mu}, \eqn{k} and \eqn{p} parameters are the \code{mu}, \code{size} and \code{prob} arguments of \code{\link[stats:NegBinomial]{rnbinom}} respectively. Common alternatives for \code{lsize} are \code{\link{negloglink}} and \code{\link{reciprocallink}}, and \code{\link{logloglink}} (if \eqn{k > 1}). } \item{imu, imunb, isize, iprob}{ Optional initial values for the mean and \eqn{k} and \eqn{p}. For \eqn{k}, if failure to converge occurs then try different values (and/or use \code{imethod}). For a \eqn{S}-column response, \code{isize} can be of length \eqn{S}. A value \code{NULL} means an initial value for each response is computed internally using a gridsearch based on \code{gsize.mux}. The last argument is ignored if used within \code{\link{cqo}}; see the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead. In the future \code{isize} and \code{iprob} might be depreciated. } \item{nsimEIM}{ This argument is used for computing the diagonal element of the \emph{expected information matrix} (EIM) corresponding to \eqn{k} based on the \emph{simulated Fisher scoring} (SFS) algorithm. See \code{\link{CommonVGAMffArguments}} for more information and the notes below. SFS is one of two algorithms for computing the EIM elements (so that both algorithms may be used on a given data set). SFS is faster than the exact method when \code{Qmax} is large. } \item{cutoff.prob}{ Fed into the \code{p} argument of \code{\link[stats:NegBinomial]{qnbinom}} in order to obtain an upper limit for the approximate support of the distribution, called \code{Qmax}, say. Similarly, the value \code{1-p} is fed into the \code{p} argument of \code{\link[stats:NegBinomial]{qnbinom}} in order to obtain a lower limit for the approximate support of the distribution, called \code{Qmin}, say. Hence the approximate support is \code{Qmin:Qmax}. This argument should be a numeric and close to 1 but never exactly 1. Used to specify how many terms of the infinite series for computing the second diagonal element of the EIM are actually used. The closer this argument is to 1, the more accurate the standard errors of the regression coefficients will be. If this argument is too small, convergence will take longer. % The sum of the probabilites are added until they reach % at least this value. % (but no more than \code{Maxiter} terms allowed). % Used in the finite series approximation. % It is like specifying \code{p} in an imaginary function \code{qnegbin(p)}. } \item{max.chunk.MB, max.support}{ \code{max.support} is used to describe the eligibility of individual observations to have their EIM computed by the \emph{exact method}. Here, we are concerned about computing the EIM wrt \eqn{k}. The exact method algorithm operates separately on each response variable, and it constructs a large matrix provided that the number of columns is less than \code{max.support}. If so, then the computations are done in chunks, so that no more than about \code{max.chunk.MB} megabytes of memory is used at a time (actually, it is proportional to this amount). Regarding eligibility of this algorithm, each observation must have the length of the vector, starting from the \code{1-cutoff.prob} quantile and finishing up at the \code{cutoff.prob} quantile, less than \code{max.support} (as its approximate support). If you have abundant memory then you might try setting \code{max.chunk.MB = Inf}, but then the computations might take a very long time. Setting \code{max.chunk.MB = 0} or \code{max.support = 0} will force the EIM to be computed using the SFS algorithm only (this \emph{used to be} the default method for \emph{all} the observations). When the fitted values of the model are large and \eqn{k} is small, the computation of the EIM will be costly with respect to time and memory if the exact method is used. Hence the argument \code{max.support} limits the cost in terms of time. For intercept-only models \code{max.support} is multiplied by a number (such as 10) because only one inner product needs be computed. Note: \code{max.support} is an upper bound and limits the number of terms dictated by the \code{eps.trig} argument. % Thus the number of columns of the matrix can be controlled by % the argument \code{cutoff.prob}. } \item{mds.min}{ Numeric. Minimum value of the NBD mean divided by \code{size} parameter. The closer this ratio is to 0, the closer the distribution is to a Poisson. Iterations will stop when an estimate of \eqn{k} is so large, relative to the mean, than it is below this threshold (this is treated as a boundary of the parameter space). } \item{eps.trig}{ Numeric. A small positive value used in the computation of the EIMs. It focusses on the denominator of the terms of a series. Each term in the series (that is used to approximate an infinite series) has a value greater than \code{size / sqrt(eps.trig)}, thus very small terms are ignored. It's a good idea to set a smaller value that will result in more accuracy, but it will require a greater computing time (when \eqn{k} is close to 0). And adjustment to \code{max.support} may be needed. In particular, the quantity computed by special means is \eqn{\psi'(k) - E[\psi'(Y+k)]}{trigamma(k) - E[trigamma(Y+k)]}, which is the difference between two \code{\link[base]{trigamma}}. functions. It is part of the calculation of the EIM with respect to the \code{size} parameter. } \item{gsize.mux}{ Similar to \code{gsigma} in \code{\link{CommonVGAMffArguments}}. However, this grid is multiplied by the initial estimates of the NBD mean parameter. That is, it is on a relative scale rather than on an absolute scale. If the counts are very large in value then convergence fail might occur; if so, then try a smaller value such as \code{gsize.mux = exp(-40)}. } % \item{Maxiter}{ % Used in the finite series approximation. % Integer. The maximum number of terms allowed when computing % the second diagonal element of the EIM. % In theory, the value involves an infinite series. % If this argument is too small then the value may be inaccurate. % } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{deviance.arg}{ Logical. If \code{TRUE}, the deviance is computed \emph{after} convergence. It only works in the NB-2 model. It is also necessary to set \code{criterion = "coefficients"} or \code{half.step = FALSE} since one cannot use that criterion properly for the minimization within the IRLS algorithm. It should be set \code{TRUE} when used with \code{\link{cqo}} under the fast algorithm. % Pre-20131212: % If \code{TRUE}, the deviance function is attached % to the object. Under ordinary circumstances, it should be % left alone because it really assumes the index parameter % is at the maximum likelihood estimate. Consequently, % one cannot use that criterion to minimize within the % IRLS algorithm. It should be set \code{TRUE} only when % used with \code{\link{cqo}} under the fast algorithm. } \item{imethod}{ An integer with value \code{1} or \code{2} etc. which specifies the initialization method for the \eqn{\mu}{mu} parameter. If failure to converge occurs try another value and/or else specify a value for \code{iprobs.y} and/or else specify a value for \code{isize}. } \item{parallel}{ See \code{\link{CommonVGAMffArguments}} for more information. Setting \code{parallel = TRUE} is useful in order to get something similar to \code{\link[stats]{quasipoisson}} or what is known as NB-1. If \code{parallel = TRUE} then the parallelism constraint does not apply to any intercept term. You should set \code{zero = NULL} too if \code{parallel = TRUE} to avoid a conflict. } \item{gprobs.y}{ A vector representing a grid; passed into the \code{probs} argument of \code{\link[stats:quantile]{quantile}} when \code{imethod = 1} to obtain an initial value for the mean of each response. Is overwritten by any value of \code{iprobs.y}. } \item{iprobs.y}{ Passed into the \code{probs} argument of \code{\link[stats:quantile]{quantile}} when \code{imethod = 1} to obtain an initial value for the mean of each response. Overwrites any value of \code{gprobs.y}. This argument might be deleted in the future. } % \item{ishrinkage}{ % How much shrinkage is used when initializing \eqn{\mu}{mu}. % The value must be between 0 and 1 inclusive, and % a value of 0 means the individual response values are used, % and a value of 1 means the median or mean is used. % This argument is used in conjunction with \code{imethod}. % If convergence failure occurs try setting this argument to 1. % } } \details{ The negative binomial distribution (NBD) can be motivated in several ways, e.g., as a Poisson distribution with a mean that is gamma distributed. There are several common parametrizations of the NBD. The one used by \code{negbinomial()} uses the mean \eqn{\mu}{mu} and an \emph{index} parameter \eqn{k}, both which are positive. Specifically, the density of a random variable \eqn{Y} is \deqn{f(y;\mu,k) = {y + k - 1 \choose y} \, \left( \frac{\mu}{\mu+k} \right)^y\, \left( \frac{k}{k+\mu} \right)^k }{% f(y;mu,k) = C_{y}^{y + k - 1} [mu/(mu+k)]^y [k/(k+mu)]^k} where \eqn{y=0,1,2,\ldots}, and \eqn{\mu > 0}{mu > 0} and \eqn{k > 0}. Note that the \emph{dispersion} parameter is \eqn{1/k}, so that as \eqn{k} approaches infinity the NBD approaches a Poisson distribution. The response has variance \eqn{Var(Y)=\mu+\mu^2/k}{Var(Y)=mu*(1+mu/k)}. When fitted, the \code{fitted.values} slot of the object contains the estimated value of the \eqn{\mu}{mu} parameter, i.e., of the mean \eqn{E(Y)}. It is common for some to use \eqn{\alpha=1/k}{alpha=1/k} as the ancillary or heterogeneity parameter; so common alternatives for \code{lsize} are \code{\link{negloglink}} and \code{\link{reciprocallink}}. For \code{polya} the density is \deqn{f(y;p,k) = {y + k - 1 \choose y} \, \left( 1 - p \right)^y\, p^k }{% f(y;p,k) = C_{y}^{y + k - 1} [1 - p]^y p^k} where \eqn{y=0,1,2,\ldots}, and \eqn{k > 0} and \eqn{0 < p < 1}{0 < p < 1}. Family function \code{polyaR()} is the same as \code{polya()} except the order of the two parameters are switched. The reason is that \code{polyaR()} tries to match with \code{\link[stats:NegBinomial]{rnbinom}} closely in terms of the argument order, etc. Should the probability parameter be of primary interest, probably, users will prefer using \code{polya()} rather than \code{polyaR()}. Possibly \code{polyaR()} will be decommissioned one day. The NBD can be coerced into the classical GLM framework with one of the parameters being of interest and the other treated as a nuisance/scale parameter (this is implemented in the \pkg{MASS} library). The \pkg{VGAM} family function \code{negbinomial()} treats both parameters on the same footing, and estimates them both by full maximum likelihood estimation. % SFS is employed as the default (see the \code{nsimEIM} % argument). The parameters \eqn{\mu}{mu} and \eqn{k} are independent (diagonal EIM), and the confidence region for \eqn{k} is extremely skewed so that its standard error is often of no practical use. The parameter \eqn{1/k} has been used as a measure of aggregation. For the NB-C the EIM is not diagonal. These \pkg{VGAM} family functions handle \emph{multiple} responses, so that a response matrix can be inputted. The number of columns is the number of species, say, and setting \code{zero = -2} means that \emph{all} species have a \eqn{k} equalling a (different) intercept only. } \section{Warning}{ Poisson regression corresponds to \eqn{k} equalling infinity. If the data is Poisson or close to Poisson, numerical problems may occur. Some corrective measures are taken, e.g., \eqn{k} is effectively capped (relative to the mean) during estimation to some large value and a warning is issued. And setting \code{stepsize = 0.5} for half stepping is probably a good idea too when the data is extreme. % Possibly setting \code{crit = "coef"} is a good idea because % the log-likelihood is often a \code{NaN} when the \code{size} % value is very large. % Note that \code{dnbinom(0, mu, size = Inf)} currently % is a \code{NaN} (a bug), % therefore if the data has some 0s then % setting \code{crit = "coef"} will avoid the problem that % the log-likelihood will be undefined during the last % stages of estimation. % Possibly choosing a log-log link may help in such cases, % otherwise try \code{\link{poissonff}} or % \code{\link{quasipoissonff}}. It is possible to fit a NBD % that has a similar variance function as a quasi-Poisson; see % the NB-1 example below. The NBD is a strictly unimodal distribution. Any data set that does not exhibit a mode (somewhere in the middle) makes the estimation problem difficult. Set \code{trace = TRUE} to monitor convergence. These functions are fragile; the maximum likelihood estimate of the index parameter is fraught (see Lawless, 1987). Other alternatives to \code{negbinomial} are to fit a NB-1 or RR-NB (aka NB-P) model; see Yee (2014). Also available are the NB-C, NB-H and NB-G. Assigning values to the \code{isize} argument may lead to a local solution, and smaller values are preferred over large values when using this argument. % In general, the \code{\link{quasipoissonff}} is more robust. If one wants to force SFS to be used on all observations, then set \code{max.support = 0} or \code{max.chunk.MB = 0}. If one wants to force the exact method to be used for all observations, then set \code{max.support = Inf}. If the computer has \emph{much} memory, then trying \code{max.chunk.MB = Inf} and \code{max.support = Inf} may provide a small speed increase. If SFS is used at all, then the \code{@weights} slot of the fitted object will be a matrix; otherwise that slot will be a \code{0 x 0} matrix. Yet to do: write a family function which uses the methods of moments estimator for \eqn{k}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Lawless, J. F. (1987) Negative binomial and mixed Poisson regression. \emph{The Canadian Journal of Statistics} \bold{15}, 209--225. Hilbe, J. M. (2011) \emph{Negative Binomial Regression}, 2nd Edition. Cambridge: Cambridge University Press. Bliss, C. and Fisher, R. A. (1953) Fitting the negative binomial distribution to biological data. \emph{Biometrics} \bold{9}, 174--200. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. } \author{ Thomas W. Yee, and with a lot of help by Victor Miranda to get it going with \code{\link{nbcanlink}} (NB-C). } \note{ % The \pkg{VGAM} package has a few other family functions for the % negative binomial distribution. Currently, none of these others work % very well. These 3 functions implement 2 common parameterizations of the negative binomial (NB). Some people called the NB with integer \eqn{k} the \emph{Pascal} distribution, whereas if \eqn{k} is real then this is the \emph{Polya} distribution. I don't. The one matching the details of \code{\link[stats:NegBinomial]{rnbinom}} in terms of \eqn{p} and \eqn{k} is \code{polya()}. For \code{polya()} the code may fail when \eqn{p} is close to 0 or 1. It is not yet compatible with \code{\link{cqo}} or \code{\link{cao}}. Suppose the response is called \code{ymat}. For \code{negbinomial()} the diagonal element of the \emph{expected information matrix} (EIM) for parameter \eqn{k} involves an infinite series; consequently SFS (see \code{nsimEIM}) is used as the backup algorithm only. SFS should be better if \code{max(ymat)} is large, e.g., \code{max(ymat) > 1000}, or if there are any outliers in \code{ymat}. The default algorithm involves a finite series approximation to the support \code{0:Inf}; the arguments \code{max.memory}, \code{min.size} and \code{cutoff.prob} are pertinent. % \code{slope.mu}, % the arguments \code{Maxiter} and % can be invoked by setting \code{nsimEIM = NULL}. Regardless of the algorithm used, convergence problems may occur, especially when the response has large outliers or is large in magnitude. If convergence failure occurs, try using arguments (in recommended decreasing order) \code{max.support}, \code{nsimEIM}, \code{cutoff.prob}, \code{iprobs.y}, \code{imethod}, \code{isize}, \code{zero}, \code{max.chunk.MB}. The function \code{negbinomial} can be used by the fast algorithm in \code{\link{cqo}}, however, setting \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE} is recommended. % For \code{\link{cqo}} and \code{\link{cao}}, taking the square-root % of the response means (approximately) a \code{\link{poissonff}} family % may be used on the transformed data. % If the negative binomial family function \code{\link{negbinomial}} % is used for \code{cqo} then set \code{negbinomial(deviance = TRUE)} % is necessary. This means to minimize the deviance, which the fast % algorithm can handle. In the first example below (Bliss and Fisher, 1953), from each of 6 McIntosh apple trees in an orchard that had been sprayed, 25 leaves were randomly selected. On each of the leaves, the number of adult female European red mites were counted. There are two special uses of \code{negbinomial} for handling count data. Firstly, when used by \code{\link{rrvglm}} this results in a continuum of models in between and inclusive of quasi-Poisson and negative binomial regression. This is known as a reduced-rank negative binomial model \emph{(RR-NB)}. It fits a negative binomial log-linear regression with variance function \eqn{Var(Y)=\mu+\delta_1 \mu^{\delta_2}}{Var(Y) = mu + delta1 * mu^delta2} where \eqn{\delta_1}{delta1} and \eqn{\delta_2}{delta2} are parameters to be estimated by MLE. Confidence intervals are available for \eqn{\delta_2}{delta2}, therefore it can be decided upon whether the data are quasi-Poisson or negative binomial, if any. Secondly, the use of \code{negbinomial} with \code{parallel = TRUE} inside \code{\link{vglm}} can result in a model similar to \code{\link[stats]{quasipoisson}}. This is named the \emph{NB-1} model. The dispersion parameter is estimated by MLE whereas \code{\link[stats:glm]{glm}} uses the method of moments. In particular, it fits a negative binomial log-linear regression with variance function \eqn{Var(Y) = \phi_0 \mu}{Var(Y) = phi0 * mu} where \eqn{\phi_0}{phi0} is a parameter to be estimated by MLE. Confidence intervals are available for \eqn{\phi_0}{phi0}. } \seealso{ \code{\link[stats]{quasipoisson}}, \code{\link{poissonff}}, \code{\link{zinegbinomial}}, \code{\link{negbinomial.size}} (e.g., NB-G), \code{\link{nbcanlink}} (NB-C), \code{\link{posnegbinomial}}, \code{\link{inv.binomial}}, \code{\link[stats:NegBinomial]{NegBinomial}}, \code{\link{nbordlink}}, \code{\link{rrvglm}}, \code{\link{cao}}, \code{\link{cqo}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}, \code{\link[stats:ppoints]{ppoints}}, % \code{\link[stats:NegBinomial]{rnbinom}}, % \code{\link[stats:NegBinomial]{qnbinom}}. % \code{\link[MASS]{rnegbin}}. % \code{\link{quasipoissonff}}, } \examples{ # Example 1: apple tree data (Bliss and Fisher, 1953) appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1)) fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree, weights = w, crit = "coef") # Obtain the deviance fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree, weights = w, half.step = FALSE) # Alternative method summary(fit) coef(fit, matrix = TRUE) Coef(fit) # For intercept-only models deviance(fit) # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above # Example 2: simulated data with multiple responses \dontrun{ ndata <- data.frame(x2 = runif(nn <- 200)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)), y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0))) fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE) coef(fit1, matrix = TRUE) } # Example 3: large counts implies SFS is used \dontrun{ ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(10+x2), size = exp(1))) with(ndata, range(y3)) # Large counts fit2 <- vglm(y3 ~ x2, negbinomial, data = ndata, trace = TRUE) coef(fit2, matrix = TRUE) head(fit2@weights) # Non-empty; SFS was used } # Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu nn <- 200 # Number of observations phi0 <- 10 # Specify this; should be greater than unity delta0 <- 1 / (phi0 - 1) mydata <- data.frame(x2 = runif(nn), x3 = runif(nn)) mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3)) mydata <- transform(mydata, y3 = rnbinom(nn, mu = mu, size = delta0 * mu)) \dontrun{ plot(y3 ~ x2, data = mydata, pch = "+", col = "blue", main = paste("Var(Y) = ", phi0, " * mu", sep = ""), las = 1) } nb1 <- vglm(y3 ~ x2 + x3, negbinomial(parallel = TRUE, zero = NULL), data = mydata, trace = TRUE) # Extracting out some quantities: cnb1 <- coef(nb1, matrix = TRUE) mydiff <- (cnb1["(Intercept)", "loglink(size)"] - cnb1["(Intercept)", "loglink(mu)"]) delta0.hat <- exp(mydiff) (phi.hat <- 1 + 1 / delta0.hat) # MLE of phi summary(nb1) # Obtain a 95 percent confidence interval for phi0: myvec <- rbind(-1, 1, 0, 0) (se.mydiff <- sqrt(t(myvec) \%*\% vcov(nb1) \%*\% myvec)) ci.mydiff <- mydiff + c(-1.96, 1.96) * c(se.mydiff) ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff) (ci.phi0 <- 1 + 1 / rev(ci.delta0)) # The 95 percent conf. interval for phi0 Confint.nb1(nb1) # Quick way to get it summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper # cf. moment estimator } \keyword{models} \keyword{regression} %lmu = "loglink", lsize = "loglink", % imu = NULL, isize = NULL, % nsimEIM = 250, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % deviance.arg = FALSE, imethod = 1, % probs.y = 0.75, ishrinkage = 0.95, % gsize = exp((-4):4), % parallel = FALSE, ishrinkage = 0.95, zero = "size") %polya(lprob = "logitlink", lsize = "loglink", % iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100, % imethod = 1, ishrinkage = 0.95, zero = "size") %polyaR(lsize = "loglink", lprob = "logitlink", % isize = NULL, iprob = NULL, probs.y = 0.75, nsimEIM = 100, % imethod = 1, ishrinkage = 0.95, zero = "size") VGAM/man/zero.Rd0000644000176200001440000000614213565414527013044 0ustar liggesusers\name{zero} % \alias{zeroarg} \alias{zero} \title{ The zero Argument in VGAM Family Functions } \description{ The \code{zero} argument allows users to conveniently model certain linear/additive predictors as intercept-only. } % \usage{ % VGAMfamilyFunction(zero = 3) % } \value{ Nothing is returned. It is simply a convenient argument for constraining certain linear/additive predictors to be an intercept only. } \section{Warning }{ The use of other arguments may conflict with the \code{zero} argument. For example, using \code{constraints} to input constraint matrices may conflict with the \code{zero} argument. Another example is the argument \code{parallel}. In general users should not assume any particular order of precedence when there is potential conflict of definition. Currently no checking for consistency is made. The argument \code{zero} may be renamed in the future to something better. } \section{Side Effects}{ The argument creates the appropriate constraint matrices internally. } \details{ Often a certain parameter needs to be modelled simply while other parameters in the model may be more complex, for example, the \eqn{\lambda}{lambda} parameter in LMS-Box-Cox quantile regression should be modelled more simply compared to its \eqn{\mu}{mu} parameter. Another example is the \eqn{\xi}{xi} parameter in a GEV distribution which is should be modelled simpler than its \eqn{\mu}{mu} parameter. Using the \code{zero} argument allows this to be fitted conveniently without having to input all the constraint matrices explicitly. The \code{zero} argument can be assigned an integer vector from the set \{\code{1:M}\} where \code{M} is the number of linear/additive predictors. Full details about constraint matrices can be found in the references. See \code{\link{CommonVGAMffArguments}} for more information. } \author{T. W. Yee } \note{ In all \pkg{VGAM} family functions \code{zero = NULL} means none of the linear/additive predictors are modelled as intercepts-only. Almost all \pkg{VGAM} family function have \code{zero = NULL} as the default, but there are some exceptions, e.g., \code{\link{binom2.or}}. Typing something like \code{coef(fit, matrix = TRUE)} is a useful way to ensure that the \code{zero} argument has worked as expected. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. %\url{http://www.stat.auckland.ac.nz/~yee} } \seealso{ \code{\link{CommonVGAMffArguments}}, \code{\link{constraints}}. } \examples{ args(multinomial) args(binom2.or) args(gpd) #LMS quantile regression example fit <- vglm(BMI ~ sm.bs(age, df = 4), lms.bcg(zero = c(1, 3)), data = bmi.nz, trace = TRUE) coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} \keyword{programming} %zz Here is a conflict which is not picked up (no internal checking done) VGAM/man/makeham.Rd0000644000176200001440000001101613565414527013464 0ustar liggesusers\name{makeham} \alias{makeham} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Makeham Regression Family Function } \description{ Maximum likelihood estimation of the 3-parameter Makeham distribution. } \usage{ makeham(lscale = "loglink", lshape = "loglink", lepsilon = "loglink", iscale = NULL, ishape = NULL, iepsilon = NULL, gscale = exp(-5:5),gshape = exp(-5:5), gepsilon = exp(-4:1), nsimEIM = 500, oim.mean = TRUE, zero = NULL, nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{lshape, lscale, lepsilon}{ Parameter link functions applied to the shape parameter \code{shape}, scale parameter \code{scale}, and other parameter \code{epsilon}. All parameters are treated as positive here (cf. \code{\link{dmakeham}} allows \code{epsilon = 0}, etc.). See \code{\link{Links}} for more choices. } % \item{eshape, escale, eepsilon}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{ishape, iscale, iepsilon}{ Optional initial values. A \code{NULL} means a value is computed internally. A value must be given for \code{iepsilon} currently, and this is a sensitive parameter! } \item{gshape, gscale, gepsilon}{ See \code{\link{CommonVGAMffArguments}}. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. Argument \code{probs.y} is used only when \code{imethod = 2}. } \item{oim.mean}{ To be currently ignored. } } \details{ The Makeham distribution, which adds another parameter to the Gompertz distribution, has cumulative distribution function \deqn{F(y; \alpha, \beta, \varepsilon) = 1 - \exp \left\{ -y \varepsilon + \frac {\alpha}{\beta} \left[ 1 - e^{\beta y} \right] \right\} }{% F(y; alpha, beta, epsilon) = 1 - exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)]) } which leads to a probability density function \deqn{f(y; \alpha, \beta, \varepsilon) = \left[ \varepsilon + \alpha e^{\beta y} \right] \; \exp \left\{ -y \varepsilon + \frac {\alpha}{\beta} \left[ 1 - e^{\beta y} \right] \right\}, }{% f(y; alpha, beta, epsilon) = (epsilon + alpha * e^(beta y) ) * exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)]) } for \eqn{\alpha > 0}{alpha > 0}, \eqn{\beta > 0}{beta > 0}, \eqn{\varepsilon \geq 0}{epsilon >= 0}, \eqn{y > 0}. Here, \eqn{\beta}{beta} is called the scale parameter \code{scale}, and \eqn{\alpha}{alpha} is called a shape parameter. The moments for this distribution do not appear to be available in closed form. Simulated Fisher scoring is used and multiple responses are handled. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ % %} \author{ T. W. Yee } \section{Warning }{ A lot of care is needed because this is a rather difficult distribution for parameter estimation, especially when the shape parameter is large relative to the scale parameter. If the self-starting initial values fail then try experimenting with the initial value arguments, especially \code{iepsilon}. Successful convergence depends on having very good initial values. More improvements could be made here. Also, monitor convergence by setting \code{trace = TRUE}. A trick is to fit a \code{\link{gompertz}} distribution and use it for initial values; see below. However, this family function is currently numerically fraught. } \seealso{ \code{\link{dmakeham}}, \code{\link{gompertz}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ set.seed(123) mdata <- data.frame(x2 = runif(nn <- 1000)) mdata <- transform(mdata, eta1 = -1, ceta1 = 1, eeta1 = -2) mdata <- transform(mdata, shape1 = exp(eta1), scale1 = exp(ceta1), epsil1 = exp(eeta1)) mdata <- transform(mdata, y1 = rmakeham(nn, shape = shape1, scale = scale1, eps = epsil1)) # A trick is to fit a Gompertz distribution first fit0 <- vglm(y1 ~ 1, gompertz, data = mdata, trace = TRUE) fit1 <- vglm(y1 ~ 1, makeham, data = mdata, etastart = cbind(predict(fit0), log(0.1)), trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) } } \keyword{models} \keyword{regression} %# fit1 <- vglm(y1 ~ 1, makeham, data = mdata, trace = TRUE) %# fit2 <- vglm(y1 ~ 1, makeham(imeth = 2), data = mdata, trace = TRUE) VGAM/man/Select.Rd0000644000176200001440000001536613565414527013314 0ustar liggesusers\name{Select} \alias{Select} \alias{subsetcol} % \alias{subsetc} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Select Variables for a Formula Response or the RHS of a Formula %% ~~function to do ... ~~ } \description{ Select variables from a data frame whose names begin with a certain character string. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ Select(data = list(), prefix = "y", lhs = NULL, rhs = NULL, rhs2 = NULL, rhs3 = NULL, as.character = FALSE, as.formula.arg = FALSE, tilde = TRUE, exclude = NULL, sort.arg = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ A data frame or a matrix. %% ~~Describe \code{data} here~~ } \item{prefix}{ A vector of character strings, or a logical. If a character then the variables chosen from \code{data} begin with the value of \code{prefix}. If a logical then only \code{TRUE} is accepted and all the variables in \code{data} are chosen. %% ~~Describe \code{prefix} here~~ } \item{lhs}{ A character string. The response of a formula. %% ~~Describe \code{lhs} here~~ } \item{rhs}{ A character string. Included as part of the RHS a formula. Set \code{rhs = "0"} to suppress the intercept. %% ~~Describe \code{rhs} here~~ } \item{rhs2, rhs3}{ Same as \code{rhs} but appended to its RHS, i.e., \code{paste0(rhs, " + ", rhs2, " + ", rhs3)}. If used, \code{rhs} should be used first, and then possibly \code{rhs2} and then possibly \code{rhs3}. %% ~~Describe \code{rhs} here~~ } \item{as.character}{ Logical. Return the answer as a character string? %% ~~Describe \code{as.character} here~~ } \item{as.formula.arg}{ Logical. Is the answer a formula? %% ~~Describe \code{as.formula.arg} here~~ } \item{tilde}{ Logical. If \code{as.character} and \code{as.formula.arg} are both \code{TRUE} then include the tilde in the formula? } \item{exclude}{ Vector of character strings. Exclude these variables explicitly. %% ~~Describe \code{exclude} here~~ } \item{sort.arg}{ Logical. Sort the variables? %% ~~Describe \code{sort.arg} here~~ } } \details{ This is meant as a utility function to avoid manually: (i) making a \code{\link[base]{cbind}} call to construct a big matrix response, and (ii) constructing a formula involving a lot of terms. The savings can be made because the variables of interest begin with some prefix, e.g., with the character \code{"y"}. } \value{ If \code{as.character = FALSE} and \code{as.formula.arg = FALSE} then a matrix such as \code{cbind(y1, y2, y3)}. If \code{as.character = TRUE} and \code{as.formula.arg = FALSE} then a character string such as \code{"cbind(y1, y2, y3)"}. If \code{as.character = FALSE} and \code{as.formula.arg = TRUE} then a \code{\link[stats]{formula}} such as \code{lhs ~ y1 + y2 + y3}. If \code{as.character = TRUE} and \code{as.formula.arg = TRUE} then a character string such as \code{"lhs ~ y1 + y2 + y3"}. See the examples below. By default, if no variables beginning the the value of \code{prefix} is found then a \code{NULL} is returned. Setting \code{prefix = " "} is a way of selecting no variables. %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } %%\references{ %% ~put references to the literature/web site here ~ %%} \author{ T. W. Yee. %% ~~who you are~~ } \note{ This function is a bit experimental at this stage and may change in the short future. Some of its utility may be better achieved using \code{\link[base]{subset}} and its \code{select} argument, e.g., \code{subset(pdata, TRUE, select = y01:y10)}. For some models such as \code{\link{posbernoulli.t}} the order of the variables in the \code{xij} argument is crucial, therefore care must be taken with the argument \code{sort.arg}. In some instances, it may be good to rename variables \code{y1} to \code{y01}, \code{y2} to \code{y02}, etc. when there are variables such as \code{y14}. Currently \code{subsetcol()} and \code{Select()} are identical. One of these functions might be withdrawn in the future. %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{vglm}}, \code{\link[base]{cbind}}, \code{\link[base]{subset}}, \code{\link[stats]{formula}}, \code{\link{fill}}. %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ Pneumo <- pneumo colnames(Pneumo) <- c("y1", "y2", "y3", "x2") # The "y" variables are response Pneumo$x1 <- 1; Pneumo$x3 <- 3; Pneumo$x <- 0; Pneumo$x4 <- 4 # Add these Select(data = Pneumo) # Same as with(Pneumo, cbind(y1, y2, y3)) Select(Pneumo, "x") Select(Pneumo, "x", sort = FALSE, as.char = TRUE) Select(Pneumo, "x", exclude = "x1") Select(Pneumo, "x", exclude = "x1", as.char = TRUE) Select(Pneumo, c("x", "y")) Select(Pneumo, "z") # Now returns a NULL Select(Pneumo, " ") # Now returns a NULL Select(Pneumo, prefix = TRUE, as.formula = TRUE) Select(Pneumo, "x", exclude = c("x3", "x1"), as.formula = TRUE, lhs = "cbind(y1, y2, y3)", rhs = "0") Select(Pneumo, "x", exclude = "x1", as.formula = TRUE, as.char = TRUE, lhs = "cbind(y1, y2, y3)", rhs = "0") # Now a 'real' example: Huggins89table1 <- transform(Huggins89table1, x3.tij = t01) tab1 <- subset(Huggins89table1, rowSums(Select(Huggins89table1, "y")) > 0) # Same as # subset(Huggins89table1, y1 + y2 + y3 + y4 + y5 + y6 + y7 + y8 + y9 + y10 > 0) # Long way to do it: fit.th <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + x3.tij, xij = list(x3.tij ~ t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 - 1), posbernoulli.t(parallel.t = TRUE ~ x2 + x3.tij), data = tab1, trace = TRUE, form2 = ~ x2 + x3.tij + t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10) # Short way to do it: Fit.th <- vglm(Select(tab1, "y") ~ x2 + x3.tij, xij = list(Select(tab1, "t", as.formula = TRUE, sort = FALSE, lhs = "x3.tij", rhs = "0")), posbernoulli.t(parallel.t = TRUE ~ x2 + x3.tij), data = tab1, trace = TRUE, form2 = Select(tab1, prefix = TRUE, as.formula = TRUE)) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % 20140524; For Fit.th before prefix = TRUE was allowed: % form2 = Select(tab1, "t", as.formula = TRUE, % rhs = "x2 + x3.tij")) % dim(subset(prinia, TRUE, select = grepl("^y", colnames(prinia)))) VGAM/man/chisq.Rd0000644000176200001440000000277213565414527013201 0ustar liggesusers\name{chisq} \alias{chisq} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Chi-squared Distribution } \description{ Maximum likelihood estimation of the degrees of freedom for a chi-squared distribution. } \usage{ chisq(link = "loglink", zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The degrees of freedom is treated as a parameter to be estimated, and as real (not integer). Being positive, a log link is used by default. Fisher scoring is used. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ Multiple responses are permitted. There may be convergence problems if the degrees of freedom is very large or close to zero. } \seealso{ \code{\link[stats]{Chisquare}}. \code{\link{uninormal}}. } \examples{ cdata <- data.frame(x2 = runif(nn <- 1000)) cdata <- transform(cdata, y1 = rchisq(nn, df = exp(1 - 1 * x2)), y2 = rchisq(nn, df = exp(2 - 2 * x2))) fit <- vglm(cbind(y1, y2) ~ x2, chisq, data = cdata, trace = TRUE) coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/zoabetaUC.Rd0000644000176200001440000000643213565414527013744 0ustar liggesusers\name{Zoabeta} \alias{Zoabeta} \alias{dzoabeta} \alias{pzoabeta} \alias{qzoabeta} \alias{rzoabeta} \title{The Zero/One-Inflated Beta Distribution} \description{ Density, distribution function, and random generation for the zero/one-inflated beta distribution. } \usage{ dzoabeta(x, shape1, shape2, pobs0 = 0, pobs1 = 0, log = FALSE, tol = .Machine$double.eps) pzoabeta(q, shape1, shape2, pobs0 = 0, pobs1 = 0, lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps) qzoabeta(p, shape1, shape2, pobs0 = 0, pobs1 = 0, lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps) rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0, tol = .Machine$double.eps) } \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Beta}}. } \item{pobs0, pobs1}{ vector of probabilities that 0 and 1 are observed (\eqn{\omega_0}{omega_0} and \eqn{\omega_1}{omega_1}). } \item{shape1, shape2}{ Same as \code{\link[stats]{Beta}}. They are called \code{a} and \code{b} in \code{\link[base:Special]{beta}} respectively. } \item{lower.tail, log, log.p}{ Same as \code{\link[stats]{Beta}}. } \item{tol}{ Numeric, tolerance for testing equality with 0 and 1. } } \value{ \code{dzoabeta} gives the density, \code{pzoabeta} gives the distribution function, \code{qzoabeta} gives the quantile, and \code{rzoabeta} generates random deviates. } \author{ Xiangjie Xue and T. W. Yee } \details{ This distribution is a mixture of a discrete distribution with a continuous distribution. The cumulative distribution function of \eqn{Y} is \deqn{F(y) =(1 - \omega_0 -\omega_1) B(y) + \omega_0 \times I[0 \leq y] + \omega_1 \times I[1 \leq y]}{% F(y) =(1 - omega_0 - omega_1) B(y) + omega_0 * I[0 <= y] + omega_1 * I[1 <= y]} where \eqn{B(y)} is the cumulative distribution function of the beta distribution with the same shape parameters (\code{\link[stats]{pbeta}}), \eqn{\omega_0}{omega_0} is the inflated probability at 0 and \eqn{\omega_1}{omega_1} is the inflated probability at 1. The default values of \eqn{\omega_j}{omega_j} mean that these functions behave like the ordinary \code{\link[stats]{Beta}} when only the essential arguments are inputted. } %\note{ % % % %} \seealso{ \code{\link{zoabetaR}}, \code{\link[base:Special]{beta}}, \code{\link{betaR}}, \code{\link{Betabinom}}. } \examples{ \dontrun{ N <- 1000; y <- rzoabeta(N, 2, 3, 0.2, 0.2) hist(y, probability = TRUE, border = "blue", las = 1, main = "Blue = 0- and 1-altered; orange = ordinary beta") sum(y == 0) / N # Proportion of 0s sum(y == 1) / N # Proportion of 1s Ngrid <- 1000 lines(seq(0, 1, length = Ngrid), dbeta(seq(0, 1, length = Ngrid), 2, 3), col = "orange") lines(seq(0, 1, length = Ngrid), col = "blue", dzoabeta(seq(0, 1, length = Ngrid), 2 , 3, 0.2, 0.2)) } } \keyword{distribution} %dzoabeta(c(-1, NA, 0.5, 2), 2, 3, 0.2, 0.2) # should be NA %dzoabeta(0.5, c(NA, Inf), 4, 0.2, 0.1) # should be NA %dzoabeta(0.5, 2.2, 4.3, NA, 0.3) # should be NA %dzoabeta(0.5, 2, 3, 0.5, 0.6) # should NaN %set.seed(1234); k <- runif(1000) %sum(abs(qzoabeta(k, 2, 3) - qbeta(k, 2, 3)) > .Machine$double.eps) # Should be 0 %sum(abs(pzoabeta(k, 10, 7) - pbeta(k, 10, 7)) > .Machine$double.eps) # Should be 0 VGAM/man/oipospoisson.Rd0000644000176200001440000000467713565414527014644 0ustar liggesusers\name{oipospoisson} \alias{oipospoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-inflated Positive Poisson Distribution Family Function } \description{ Fits a 1-inflated positive Poisson distribution. } \usage{ oipospoisson(lpstr1 = "logitlink", llambda = "loglink", type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"), ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr1, llambda}{ For \code{lpstr1}: the same idea as \code{\link{zipoisson}} except it applies to a structural 1. } \item{ilambda, gpstr1, gprobs.y, imethod}{ For initial values. See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 1-inflated positive Poisson distribution is a mixture distribution of the positive (0-truncated) Poisson distribution with some probability of obtaining a (structural) 1. Thus there are two sources for obtaining the value 1. It is similar to a zero-inflated Poisson model, except the Poisson is replaced by a positive Poisson and the 0 is replaced by 1. This distribution is written here in a way that retains a similar notation to the zero-inflated Poisson, i.e., the probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}. See \code{\link{zipoisson}}. This family function can handle multiple responses. } \section{Warning }{ Under- or over-flow may occur if the data is ill-conditioned. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} \seealso{ \code{\link{Oipospois}}, \code{\link{pospoisson}}, \code{\link{oapospoisson}}, \code{\link{otpospoisson}}, \code{\link{zipoisson}}, \code{\link{poissonff}}, \code{\link{simulate.vlm}}. } \examples{ set.seed(1) pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data pdata <- transform(pdata, pstr1 = 0.5, lambda = exp(3 - x2)) pdata <- transform(pdata, y1 = roipospois(nn, lambda, pstr1 = pstr1)) with(pdata, table(y1)) fit1 <- vglm(y1 ~ x2, oipospoisson, data = pdata, trace = TRUE) coef(fit1, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/oilogUC.Rd0000644000176200001440000000716613565414527013435 0ustar liggesusers\name{Oilog} \alias{Oilog} \alias{doilog} \alias{poilog} \alias{qoilog} \alias{roilog} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Logarithmic Distribution } \description{ Density, distribution function, quantile function and random generation for the one-inflated logarithmic distribution with parameter \code{pstr1}. } \usage{ doilog(x, shape, pstr1 = 0, log = FALSE) poilog(q, shape, pstr1 = 0) qoilog(p, shape, pstr1 = 0) roilog(n, shape, pstr1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.} \item{shape}{ Vector of parameters that lie in \eqn{(0,1)}. } \item{pstr1}{ Probability of a structural one (i.e., ignoring the logarithmic distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having an ordinary logarithmic distribution. } \item{log}{Same as \code{\link[stats]{Uniform}}.} } \details{ The probability function of \eqn{Y} is 1 with probability \eqn{\phi}{phi}, and \eqn{Logarithmic(prob)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{% P(Y=1) = phi + (1-phi) * P(W=1)} where \eqn{W} is distributed as a \eqn{Logarithmic(shape)} random variable. The \pkg{VGAM} family function \code{\link{oilog}} estimates \eqn{\phi}{phi} by MLE. } \value{ \code{doilog} gives the density, \code{poilog} gives the distribution function, \code{qoilog} gives the quantile function, and \code{roilog} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr1} is recycled to the required length, and usually has values which lie in the interval \eqn{[0,1]}. These functions actually allow for the \emph{zero-deflated logarithmic} distribution. Here, \code{pstr1} is also permitted to lie in the interval \code{[-dlog(1, shape) / (1 - dlog(1, shape)), 0]}. The resulting probability of a unit count is \emph{less than} the nominal logarithmic value, and the use of \code{pstr1} to stand for the probability of a structural 1 loses its meaning. % % % When \code{pstr1} equals \code{-dlog(1, shape) / (1 - dlog(1, shape))} this corresponds to the 1-truncated logarithmic distribution. } \seealso{ \code{\link{oilog}}, \code{\link{rlog}}, \code{\link{logff}}, \code{\link{Otlog}}. % \code{\link{zipf}}. } \examples{ shape <- 0.5; pstr1 <- 0.3; x <- (-1):7 (ii <- doilog(x, shape, pstr1 = pstr1)) max(abs(poilog(1:200, shape) - cumsum(shape^(1:200) / (-(1:200) * log1p(-shape))))) # Should be 0 \dontrun{ x <- 0:10 par(mfrow = c(2, 1)) # One-Inflated logarithmic barplot(rbind(doilog(x, shape, pstr1 = pstr1), dlog(x, shape)), beside = TRUE, col = c("blue", "orange"), main = paste("OILogff(", shape, ", pstr1 = ", pstr1, ") (blue) vs", " Logff(", shape, ") (orange)", sep = ""), names.arg = as.character(x)) deflat.limit <- -dlog(1, shape) / plog(1, shape, lower.tail = FALSE) newpstr1 <- round(deflat.limit, 3) + 0.001 # Inside but near the boundary barplot(rbind(doilog(x, shape, pstr1 = newpstr1), dlog(x, shape)), beside = TRUE, col = c("blue","orange"), main = paste("ODLogff(", shape, ", pstr1 = ", newpstr1, ") (blue) vs", " Logff(", shape, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} %qoilog(p, shape, pstr1 = 0) %roilog(n, shape, pstr1 = 0) % table(roilog(100, shape, pstr1 = pstr1)) % round(doilog(1:10, shape, pstr1 = pstr1) * 100) # Should be similar VGAM/man/coalminers.Rd0000644000176200001440000000216713565414527014224 0ustar liggesusers\name{coalminers} \alias{coalminers} \docType{data} \title{ Breathlessness and Wheeze Amongst Coalminers Data} \description{ Coalminers who are smokers without radiological pneumoconiosis, classified by age, breathlessness and wheeze. } \usage{data(coalminers)} \format{ A data frame with 9 age groups with the following 5 columns. \describe{ \item{BW}{Counts with breathlessness and wheeze. } \item{BnW}{Counts with breathlessness but no wheeze. } \item{nBW}{Counts with no breathlessness but wheeze. } \item{nBnW}{Counts with neither breathlessness or wheeze. } \item{age}{Age of the coal miners (actually, the midpoints of the 5-year category ranges). } } } \details{ The data were published in Ashford and Sowden (1970). A more recent analysis is McCullagh and Nelder (1989, Section 6.6). } \source{ Ashford, J. R. and Sowden, R. R. (1970) Multi-variate probit analysis. \emph{Biometrics}, \bold{26}, 535--546. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}. 2nd ed. London: Chapman & Hall. } \examples{ str(coalminers) } \keyword{datasets} VGAM/man/ABO.Rd0000644000176200001440000000450113565414527012463 0ustar liggesusers\name{ABO} \alias{ABO} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The ABO Blood Group System } \description{ Estimates the two independent parameters of the the ABO blood group system. } \usage{ ABO(link.pA = "logitlink", link.pB = "logitlink", ipA = NULL, ipB = NULL, ipO = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link.pA, link.pB}{ Link functions applied to \code{pA} and \code{pB}. See \code{\link{Links}} for more choices. } \item{ipA, ipB, ipO}{ Optional initial value for \code{pA} and \code{pB} and \code{pO}. A \code{NULL} value means values are computed internally. } \item{zero}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The parameters \code{pA} and \code{pB} are probabilities, so that \code{pO=1-pA-pB} is the third probability. The probabilities \code{pA} and \code{pB} correspond to A and B respectively, so that \code{pO} is the probability for O. It is easier to make use of initial values for \code{pO} than for \code{pB}. In documentation elsewhere I sometimes use \code{pA=p}, \code{pB=q}, \code{pO=r}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. } \author{ T. W. Yee } \note{ The input can be a 4-column matrix of counts, where the columns are A, B, AB, O (in order). Alternatively, the input can be a 4-column matrix of proportions (so each row adds to 1) and the \code{weights} argument is used to specify the total number of counts for each row. } \seealso{ \code{\link{AA.Aa.aa}}, \code{\link{AB.Ab.aB.ab}}, \code{\link{A1A2A3}}, \code{\link{MNSs}}. % \code{\link{AB.Ab.aB.ab2}}, } \examples{ ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name fit <- vglm(ymat ~ 1, ABO(link.pA = "identitylink", link.pB = "identitylink"), trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) # Estimated pA and pB rbind(ymat, sum(ymat) * fitted(fit)) sqrt(diag(vcov(fit))) } \keyword{models} \keyword{regression} VGAM/man/betabinomUC.Rd0000644000176200001440000002343213565414527014256 0ustar liggesusers\name{Betabinom} \alias{Betabinom} \alias{dbetabinom} \alias{pbetabinom} %\alias{qbetabinom} \alias{rbetabinom} \alias{dbetabinom.ab} \alias{pbetabinom.ab} %\alias{qbetabinom.ab} \alias{rbetabinom.ab} %\alias{Zoibetabinom} \alias{dzoibetabinom} \alias{pzoibetabinom} %\alias{qzoibetabinom} \alias{rzoibetabinom} \alias{dzoibetabinom.ab} \alias{pzoibetabinom.ab} %\alias{qzoibetabinom.ab} \alias{rzoibetabinom.ab} \title{The Beta-Binomial Distribution} \description{ Density, distribution function, and random generation for the beta-binomial distribution and the inflated beta-binomial distribution. } \usage{ dbetabinom(x, size, prob, rho = 0, log = FALSE) pbetabinom(q, size, prob, rho = 0, log.p = FALSE) rbetabinom(n, size, prob, rho = 0) dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = exp(20), limit.prob = 0.5) pbetabinom.ab(q, size, shape1, shape2, limit.prob = 0.5, log.p = FALSE) rbetabinom.ab(n, size, shape1, shape2, limit.prob = 0.5, .dontuse.prob = NULL) dzoibetabinom(x, size, prob, rho = 0, pstr0 = 0, pstrsize = 0, log = FALSE) pzoibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0, lower.tail = TRUE, log.p = FALSE) rzoibetabinom(n, size, prob, rho = 0, pstr0 = 0, pstrsize = 0) dzoibetabinom.ab(x, size, shape1, shape2, pstr0 = 0, pstrsize = 0, log = FALSE) pzoibetabinom.ab(q, size, shape1, shape2, pstr0 = 0, pstrsize = 0, lower.tail = TRUE, log.p = FALSE) rzoibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0) } % Infinity.shape = 1e5 .dontuse.prob = NULL \arguments{ \item{x, q}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{size}{number of trials.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{prob}{ the probability of success \eqn{\mu}{mu}. Must be in the unit closed interval \eqn{[0,1]}. } \item{rho}{ the correlation parameter \eqn{\rho}{rho}, which should be in the interval \eqn{[0, 1)}. The default value of 0 corresponds to the usual binomial distribution with probability \code{prob}. Setting \code{rho = 1} would set both shape parameters equal to 0, and the ratio \code{0/0}, which is actually \code{NaN}, is interpreted by \code{\link[stats]{Beta}} as 0.5. See the warning below. % also corresponds to the % binomial distribution with probability \code{prob}. } \item{shape1, shape2}{ the two (positive) shape parameters of the standard beta distribution. They are called \code{a} and \code{b} in \code{\link[base:Special]{beta}} respectively. Note that \code{shape1 = prob*(1-rho)/rho} and \code{shape2 = (1-prob)*(1-rho)/rho} is an important relationship between the parameters, so that the shape parameters are infinite by default because \code{rho = 0}; hence \code{limit.prob = prob} is used to obtain the behaviour of the usual binomial distribution. } \item{log, log.p, lower.tail}{ Same meaning as \code{\link[stats]{runif}}. } \item{Inf.shape}{ Numeric. A large value such that, if \code{shape1} or \code{shape2} exceeds this, then special measures are taken, e.g., calling \code{\link[stats]{dbinom}}. Also, if \code{shape1} or \code{shape2} is less than its reciprocal, then special measures are also taken. This feature/approximation is needed to avoid numerical problem with catastrophic cancellation of multiple \code{\link[base:Special]{lbeta}} calls. } \item{limit.prob}{ Numerical vector; recycled if necessary. If either shape parameters are \code{Inf} then the binomial limit is taken, with \code{shape1 / (shape1 + shape2)} as the probability of success. In the case where both are \code{Inf} this probability will be a \code{NaN = Inf/Inf}, however, the value \code{limit.prob} is used instead. Hence the default for \code{dbetabinom.ab()} is to assume that both shape parameters are equal as the limit is taken (indeed, \code{\link[stats]{Beta}} uses 0.5). Note that for \code{[dpr]betabinom()}, because \code{rho = 0} by default, then \code{limit.prob = prob} so that the beta-binomial distribution behaves like the ordinary binomial distribution with respect to arguments \code{size} and \code{prob}. % Purists may assign \code{NaN} to this argument. % 20180216: % Note that for \code{dbetabinom()}, because \code{rho = 0} % by default, then ...... } \item{.dontuse.prob}{ An argument that should be ignored and \emph{not} used. } \item{pstr0}{ Probability of a structual zero (i.e., ignoring the beta-binomial distribution). The default value of \code{pstr0} corresponds to the response having a beta-binomial distribuion inflated only at \code{size}. } \item{pstrsize}{ Probability of a structual maximum value \code{size}. The default value of \code{pstrsize} corresponds to the response having a beta-binomial distribution inflated only at 0. } } \value{ \code{dbetabinom} and \code{dbetabinom.ab} give the density, \code{pbetabinom} and \code{pbetabinom.ab} give the distribution function, and \code{rbetabinom} and \code{rbetabinom.ab} generate random deviates. % \code{qbetabinom} and \code{qbetabinom.ab} gives the %quantile function, and \code{dzoibetabinom} and \code{dzoibetabinom.ab} give the inflated density, \code{pzoibetabinom} and \code{pzoibetabinom.ab} give the inflated distribution function, and \code{rzoibetabinom} and \code{rzoibetabinom.ab} generate random inflated deviates. } \author{ T. W. Yee and Xiangjie Xue} \details{ The beta-binomial distribution is a binomial distribution whose probability of success is not a constant but it is generated from a beta distribution with parameters \code{shape1} and \code{shape2}. Note that the mean of this beta distribution is \code{mu = shape1/(shape1+shape2)}, which therefore is the mean or the probability of success. See \code{\link{betabinomial}} and \code{\link{betabinomialff}}, the \pkg{VGAM} family functions for estimating the parameters, for the formula of the probability density function and other details. For the inflated beta-binomial distribution, the probability mass function is \deqn{P(Y = y) = (1 - pstr0 - pstrsize) \times BB(y) + pstr0 \times I[y = 0] + pstrsize \times I[y = size]}{% F(Y = y) =(1 - pstr0 - pstrsize) * BB(y) + pstr0 * I[y = 0] + pstrsize * I[y = size]} where \eqn{BB(y)} is the probability mass function of the beta-binomial distribution with the same shape parameters (\code{\link[VGAM]{pbetabinom.ab}}), \code{pstr0} is the inflated probability at 0 and \code{pstrsize} is the inflated probability at 1. The default values of \code{pstr0} and \code{pstrsize} mean that these functions behave like the ordinary \code{\link[VGAM]{Betabinom}} when only the essential arguments are inputted. } \note{ \code{pzoibetabinom}, \code{pzoibetabinom.ab}, \code{pbetabinom} and \code{pbetabinom.ab} can be particularly slow. The functions here ending in \code{.ab} are called from those functions which don't. The simple transformations \eqn{\mu=\alpha / (\alpha + \beta)}{mu=alpha/(alpha+beta)} and \eqn{\rho=1/(1 + \alpha + \beta)}{rho=1/(1+alpha+beta)} are used, where \eqn{\alpha}{alpha} and \eqn{\beta}{beta} are the two shape parameters. } \section{Warning }{ Setting \code{rho = 1} is not recommended, however the code may be modified in the future to handle this special case. } \seealso{ \code{\link{betabinomial}}, \code{\link{betabinomialff}}, \code{\link{Zoabeta}}, \code{\link[stats]{Beta}}. } \examples{ set.seed(1); rbetabinom(10, 100, prob = 0.5) set.seed(1); rbinom(10, 100, prob = 0.5) # The same since rho = 0 \dontrun{ N <- 9; xx <- 0:N; s1 <- 2; s2 <- 3 dy <- dbetabinom.ab(xx, size = N, shape1 = s1, shape2 = s2) barplot(rbind(dy, dbinom(xx, size = N, prob = s1 / (s1+s2))), beside = TRUE, col = c("blue","green"), las = 1, main = paste("Beta-binomial (size=",N,", shape1=", s1, ", shape2=", s2, ") (blue) vs\n", " Binomial(size=", N, ", prob=", s1/(s1+s2), ") (green)", sep = ""), names.arg = as.character(xx), cex.main = 0.8) sum(dy * xx) # Check expected values are equal sum(dbinom(xx, size = N, prob = s1 / (s1+s2)) * xx) # Should be all 0: cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2) y <- rbetabinom.ab(n = 1e4, size = N, shape1 = s1, shape2 = s2) ty <- table(y) barplot(rbind(dy, ty / sum(ty)), beside = TRUE, col = c("blue", "orange"), las = 1, main = paste("Beta-binomial (size=", N, ", shape1=", s1, ", shape2=", s2, ") (blue) vs\n", " Random generated beta-binomial(size=", N, ", prob=", s1/(s1+s2), ") (orange)", sep = ""), cex.main = 0.8, names.arg = as.character(xx)) N <- 1e5; size <- 20; pstr0 <- 0.2; pstrsize <- 0.2 kk <- rzoibetabinom.ab(N, size, s1, s2, pstr0, pstrsize) hist(kk, probability = TRUE, border = "blue", ylim = c(0, 0.25), main = "Blue/green = inflated; orange = ordinary beta-binomial", breaks = -0.5 : (size + 0.5)) sum(kk == 0) / N # Proportion of 0 sum(kk == size) / N # Proportion of size lines(0 : size, dbetabinom.ab(0 : size, size, s1, s2), col = "orange") lines(0 : size, col = "green", type = "b", dzoibetabinom.ab(0 : size, size, s1, s2, pstr0, pstrsize)) } } \keyword{distribution} % \item{Inf.shape}{ % Numeric. A large value such that, % if \code{shape1} or \code{shape2} exceeds this, then % it is taken to be \code{Inf}. % Also, if \code{shape1} or \code{shape2} is less than its reciprocal, % then it might be loosely thought of as being effectively \code{0} % (although not treated exactly as so in the code). % This feature/approximation is needed to avoid numerical % problem with catastrophic cancellation of % multiple \code{\link[base:Special]{lbeta}} calls. % } VGAM/man/oazetaUC.Rd0000644000176200001440000000407013565414527013576 0ustar liggesusers\name{Oazeta} \alias{Oazeta} \alias{doazeta} \alias{poazeta} \alias{qoazeta} \alias{roazeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Logarithmic Distribution } \description{ Density, distribution function, quantile function and random generation for the one-altered zeta distribution with parameter \code{pobs1}. } \usage{ doazeta(x, shape, pobs1 = 0, log = FALSE) poazeta(q, shape, pobs1 = 0) qoazeta(p, shape, pobs1 = 0) roazeta(n, shape, pobs1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.} \item{shape, log}{ Same as \code{\link{Otzeta}}). } \item{pobs1}{ Probability of (an observed) one, called \eqn{pobs1}. The default value of \code{pobs1 = 0} corresponds to the response having a 1-truncated zeta distribution. } } \details{ The probability function of \eqn{Y} is 1 with probability \code{pobs1}, else a 1-truncated zeta distribution. } \value{ \code{doazeta} gives the density and \code{poazeta} gives the distribution function, \code{qoazeta} gives the quantile function, and \code{roazeta} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs1} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{oazeta}}, \code{\link{Oizeta}}, \code{\link{Otzeta}}, \code{\link{zeta}}. } \examples{ shape <- 1.1; pobs1 <- 0.10; x <- (-1):7 doazeta(x, shape = shape, pobs1 = pobs1) table(roazeta(100, shape = shape, pobs1 = pobs1)) \dontrun{ x <- 0:10 barplot(rbind(doazeta(x, shape = shape, pobs1 = pobs1), dzeta(x, shape = shape)), beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("OAZ(shape = ", shape, ", pobs1 = ", pobs1, ") [blue] vs", " zeta(shape = ", shape, ") [orange] densities", sep = "")) } } \keyword{distribution} VGAM/man/has.intercept.Rd0000644000176200001440000000324613565414527014636 0ustar liggesusers\name{has.interceptvlm} %\name{confint} \alias{has.intercept} %\alias{has.intercept.vlm} \alias{has.interceptvlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Has a Fitted VGLM Got an Intercept Term? } \description{ Looks at the \code{formula} to see if it has an intercept term. } \usage{ has.intercept(object, \dots) has.interceptvlm(object, form.number = 1, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A fitted model object. } \item{form.number}{Formula number, is 1 or 2. which correspond to the arguments \code{formula} and \code{form2} respectively. } \item{\dots}{Arguments that are might be passed from one function to another. } } \details{ This methods function is a simple way to determine whether a fitted \code{\link{vglm}} object etc. has an intercept term or not. It is not entirely foolproof because one might suppress the intercept from the formula and then add in a variable in the formula that has a constant value. } \value{ Returns a single logical. } %\references{ %} \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ %} \seealso{ \code{\link{formulavlm}}, \code{termsvlm}. } \examples{ # Example: this is based on a glm example counts <- c(18,17,15,20,10,20,25,13,12) outcome <- gl(3, 1, 9); treatment <- gl(3, 3) pdata <- data.frame(counts, outcome, treatment) # Better style vglm.D93 <- vglm(counts ~ outcome + treatment, poissonff, data = pdata) formula(vglm.D93) term.names(vglm.D93) responseName(vglm.D93) has.intercept(vglm.D93) } \keyword{models} \keyword{regression} % \method{has.intercept}{vlm}(object, \dots) VGAM/man/felixUC.Rd0000644000176200001440000000256713565414527013433 0ustar liggesusers\name{Felix} \alias{Felix} \alias{dfelix} %\alias{pfelix} %\alias{qfelix} %\alias{rfelix} \title{The Felix Distribution} \description{ Density for the Felix distribution. % distribution function, quantile function % and random generation } \usage{ dfelix(x, rate = 0.25, log = FALSE) } %pfelix(q, rate = 0.25) %qfelix(p, rate = 0.25) %rfelix(n, rate = 0.25) \arguments{ \item{x}{vector of quantiles.} % \item{p}{vector of probabilities.} % \item{n}{number of observations. % Must be a positive integer of length 1.} \item{rate}{ See \code{\link{felix}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dfelix} gives the density. % \code{pfelix} gives the distribution function, % \code{qfelix} gives the quantile function, and % \code{rfelix} generates random deviates. } \author{ T. W. Yee } \details{ See \code{\link{felix}}, the \pkg{VGAM} family function for estimating the parameter, for the formula of the probability density function and other details. } \section{Warning }{ The default value of \code{rate} is subjective. } \seealso{ \code{\link{felix}}. } \examples{ \dontrun{ rate <- 0.25; x <- 1:15 plot(x, dfelix(x, rate), type = "h", las = 1, col = "blue", ylab = paste("dfelix(rate=", rate, ")"), main = "Felix density function") } } \keyword{distribution} VGAM/man/loglaplace.Rd0000644000176200001440000002137013565414527014170 0ustar liggesusers\name{loglaplace} \alias{loglaplace1} \alias{logitlaplace1} % \alias{alaplace3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log-Laplace and Logit-Laplace Distribution Family Functions } \description{ Maximum likelihood estimation of the 1-parameter log-Laplace and the 1-parameter logit-Laplace distributions. These may be used for quantile regression for counts and proportions respectively. } \usage{ loglaplace1(tau = NULL, llocation = "loglink", ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1, ishrinkage = 0.95, parallel.locat = FALSE, digt = 4, idf.mu = 3, rep0 = 0.5, minquantile = 0, maxquantile = Inf, imethod = 1, zero = NULL) logitlaplace1(tau = NULL, llocation = "logitlink", ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1, ishrinkage = 0.95, parallel.locat = FALSE, digt = 4, idf.mu = 3, rep01 = 0.5, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{tau, kappa}{ See \code{\link{alaplace1}}. } \item{llocation}{ Character. Parameter link functions for location parameter \eqn{\xi}{xi}. See \code{\link{Links}} for more choices. However, this argument should be left unchanged with count data because it restricts the quantiles to be positive. With proportions data \code{llocation} can be assigned a link such as \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, etc. } \item{ilocation}{ Optional initial values. If given, it must be numeric and values are recycled to the appropriate length. The default is to choose the value internally. } \item{parallel.locat}{ Logical. Should the quantiles be parallel on the transformed scale (argument \code{llocation})? Assigning this argument to \code{TRUE} circumvents the seriously embarrassing quantile crossing problem. } % \item{eq.scale}{ Logical. % Should the scale parameters be equal? It is advised to keep % \code{eq.scale = TRUE} unchanged because it does not make sense to % have different values for each \code{tau} value. % } \item{imethod}{ Initialization method. Either the value 1, 2, or \ldots. } \item{idf.mu, ishrinkage, Scale.arg, digt, zero}{ See \code{\link{alaplace1}}. } \item{rep0, rep01}{ Numeric, positive. Replacement values for 0s and 1s respectively. For count data, values of the response whose value is 0 are replaced by \code{rep0}; it avoids computing \code{log(0)}. For proportions data values of the response whose value is 0 or 1 are replaced by \code{min(rangey01[1]/2, rep01/w[y< = 0])} and \code{max((1 + rangey01[2])/2, 1-rep01/w[y >= 1])} respectively; e.g., it avoids computing \code{logitlink(0)} or \code{logitlink(1)}. Here, \code{rangey01} is the 2-vector \code{range(y[(y > 0) & (y < 1)])} of the response. } \item{minquantile, maxquantile}{ Numeric. The minimum and maximum values possible in the quantiles. These argument are effectively ignored by default since \code{\link{loglink}} keeps all quantiles positive. However, if \code{llocation = logofflink(offset = 1)} then it is possible that the fitted quantiles have value 0 because \code{minquantile = 0}. } } \details{ These \pkg{VGAM} family functions implement translations of the asymmetric Laplace distribution (ALD). The resulting variants may be suitable for quantile regression for count data or sample proportions. For example, a log link applied to count data is assumed to follow an ALD. Another example is a logit link applied to proportions data so as to follow an ALD. A positive random variable \eqn{Y} is said to have a log-Laplace distribution if \eqn{Y = e^W}{Y = exp(W)} where \eqn{W} has an ALD. There are many variants of ALDs and the one used here is described in \code{\link{alaplace1}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. In the \code{extra} slot of the fitted object are some list components which are useful. For example, the sample proportion of values which are less than the fitted quantile curves, which is \code{sum(wprior[y <= location]) / sum(wprior)} internally. Here, \code{wprior} are the prior weights (called \code{ssize} below), \code{y} is the response and \code{location} is a fitted quantile curve. This definition comes about naturally from the transformed ALD data. } \references{ Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001) \emph{The Laplace distribution and generalizations: a revisit with applications to communications, economics, engineering, and finance}, Boston: Birkhauser. Kozubowski, T. J. and Podgorski, K. (2003) Log-Laplace distributions. \emph{International Mathematical Journal}, \bold{3}, 467--495. Yee, T. W. (2012) Quantile regression for counts and proportions. In preparation. } \author{ Thomas W. Yee } \section{Warning}{ The \pkg{VGAM} family function \code{\link{logitlaplace1}} will not handle a vector of just 0s and 1s as the response; it will only work satisfactorily if the number of trials is large. See \code{\link{alaplace1}} for other warnings. Care is needed with \code{tau} values which are too small, e.g., for count data the sample proportion of zeros must be less than all values in \code{tau}. Similarly, this also holds with \code{\link{logitlaplace1}}, which also requires all \code{tau} values to be less than the sample proportion of ones. } \note{ The form of input for \code{\link{logitlaplace1}} as response is a vector of proportions (values in \eqn{[0,1]}) and the number of trials is entered into the \code{weights} argument of \code{\link{vglm}}/\code{\link{vgam}}. See Example 2 below. See \code{\link{alaplace1}} for other notes in general. } \seealso{ \code{\link{alaplace1}}, \code{\link{dloglap}}. } \examples{ # Example 1: quantile regression of counts with regression splines set.seed(123); my.k <- exp(0) adata <- data.frame(x2 = sort(runif(n <- 500))) mymu <- function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2) adata <- transform(adata, y = rnbinom(n, mu = mymu(x2), size = my.k)) mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3 # halfstepping is usual: fitp <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE, loglaplace1(tau = mytau, parallel.locat = TRUE)) \dontrun{ par(las = 1) # Plot on a log1p() scale mylwd <- 1.5 plot(jitter(log1p(y), factor = 1.5) ~ x2, adata, col = "red", pch = "o", main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75) with(adata, matlines(x2, log1p(fitted(fitp)), col = "blue", lty = 1, lwd = mylwd)) finexgrid <- seq(0, 1, len = 201) for (ii in 1:length(mytau)) lines(finexgrid, col = "darkgreen", lwd = mylwd, log1p(qnbinom(p = mytau[ii], mu = mymu(finexgrid), si = my.k))) } fitp@extra # Contains useful information # Example 2: sample proportions set.seed(123); nnn <- 1000; ssize <- 100 # ssize = 1 will not work! adata <- data.frame(x2 = sort(runif(nnn))) mymu <- function(x) logitlink( 1.0 + 4*x, inv = TRUE) adata <- transform(adata, ssize = ssize, y2 = rbinom(nnn, size = ssize, prob = mymu(x2)) / ssize) mytau <- c(0.25, 0.50, 0.75) fit1 <- vglm(y2 ~ sm.bs(x2, df = 3), data = adata, weights = ssize, trace = TRUE, logitlaplace1(tau = mytau, lloc = "clogloglink", paral = TRUE)) \dontrun{ # Check the solution. Note: this may be like comparing apples with oranges. plotvgam(fit1, se = TRUE, scol = "red", lcol = "blue", main = "Truth = 'darkgreen'") # Centered approximately ! linkFunctionChar <- as.character(fit1@misc$link) adata <- transform(adata, trueFunction= theta2eta(theta = mymu(x2), link=linkFunctionChar)) with(adata, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen")) # Plot the data + fitted quantiles (on the original scale) myylim <- with(adata, range(y2)) plot(y2 ~ x2, adata, col = "blue", ylim = myylim, las = 1, pch = ".", cex = 2.5) with(adata, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l")) truecol <- rep(1:3, len = fit1@misc$M) # Add the 'truth' smallxgrid <- seq(0, 1, len = 501) for (ii in 1:length(mytau)) lines(smallxgrid, col = truecol[ii], lwd = 2, qbinom(p = mytau[ii], prob = mymu(smallxgrid), size = ssize) / ssize) # Plot on the eta (== logitlink()/probit()/...) scale with(adata, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l")) # Add the 'truth' for (ii in 1:length(mytau)) { true.quant <- qbinom(p = mytau[ii], pr = mymu(smallxgrid), si = ssize) / ssize lines(smallxgrid, theta2eta(theta = true.quant, link = linkFunctionChar), col = truecol[ii], lwd = 2) } } } \keyword{models} \keyword{regression} VGAM/man/alaplaceUC.Rd0000644000176200001440000000706113565414527014060 0ustar liggesusers\name{alaplaceUC} \alias{dalap} \alias{palap} \alias{qalap} \alias{ralap} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Laplace Distribution } \description{ Density, distribution function, quantile function and random generation for the 3-parameter asymmetric Laplace distribution with location parameter \code{location}, scale parameter \code{scale}, and asymmetry parameter \code{kappa}. } \usage{ dalap(x, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), log = FALSE) palap(q, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE) qalap(p, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE) ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau))) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{ number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{location}{ the location parameter \eqn{\xi}{xi}. } \item{scale}{ the scale parameter \eqn{\sigma}{sigma}. Must consist of positive values. } \item{tau}{ the quantile parameter \eqn{\tau}{tau}. Must consist of values in \eqn{(0,1)}. This argument is used to specify \code{kappa} and is ignored if \code{kappa} is assigned. } \item{kappa}{ the asymmetry parameter \eqn{\kappa}{kappa}. Must consist of positive values. } \item{log}{ if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ There are many variants of asymmetric Laplace distributions (ALDs) and this one is known as \emph{the} ALD by Kotz et al. (2001). See \code{\link{alaplace3}}, the \pkg{VGAM} family function for estimating the three parameters by maximum likelihood estimation, for formulae and details. } \value{ \code{dalap} gives the density, \code{palap} gives the distribution function, \code{qalap} gives the quantile function, and \code{ralap} generates random deviates. } \references{ Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001) \emph{The Laplace distribution and generalizations: a revisit with applications to communications, economics, engineering, and finance}, Boston: Birkhauser. } \author{ T. W. Yee and Kai Huang } %\note{ % The \pkg{VGAM} family function \code{\link{alaplace3}} % estimates the three parameters by maximum likelihood estimation. %} \seealso{ \code{\link{alaplace3}}. % \code{\link{dloglap}}. } \examples{ x <- seq(-5, 5, by = 0.01) loc <- 0; sigma <- 1.5; kappa <- 2 \dontrun{ plot(x, dalap(x, loc, sigma, kappa = kappa), type = "l", col = "blue", main = "Blue is density, orange is cumulative distribution function", ylim = c(0, 1), sub = "Purple are 5, 10, ..., 95 percentiles", las = 1, ylab = "", cex.main = 0.5) abline(h = 0, col = "blue", lty = 2) lines(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa), dalap(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa), loc, sigma, kappa = kappa), col = "purple", lty = 3, type = "h") lines(x, palap(x, loc, sigma, kappa = kappa), type = "l", col = "orange") abline(h = 0, lty = 2) } pp <- seq(0.05, 0.95, by = 0.05) # Test two functions max(abs(palap(qalap(pp, loc, sigma, kappa = kappa), loc, sigma, kappa = kappa) - pp)) # Should be 0 } \keyword{distribution} VGAM/man/otzetaUC.Rd0000644000176200001440000000366613565414527013633 0ustar liggesusers\name{Otzeta} \alias{Otzeta} \alias{dotzeta} \alias{potzeta} \alias{qotzeta} \alias{rotzeta} \title{ One-truncated Zeta Distribution } \description{ Density, distribution function, quantile function, and random generation for the one-truncated zeta distribution. } \usage{ dotzeta(x, shape, log = FALSE) potzeta(q, shape, log.p = FALSE) qotzeta(p, shape) rotzeta(n, shape) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{ Same as in \code{\link[stats]{runif}}. } \item{shape}{ The positive shape parameter described in in \code{\link{zetaff}}. Here it is called \code{shape} because it is positive. % For \code{rotzeta()} this pa%arameter must be of length 1. } \item{log, log.p}{ Same as in \code{\link[stats]{runif}}. } } \details{ The one-truncated zeta distribution is a zeta distribution but with the probability of a one being zero. The other probabilities are scaled to add to unity. Some more details are given in \code{\link{zetaff}}. } \value{ \code{dotzeta} gives the density, \code{potzeta} gives the distribution function, \code{qotzeta} gives the quantile function, and \code{rotzeta} generates random deviates. } %\references{ %} \author{ T. W. Yee } \note{ Given some response data, the \pkg{VGAM} family function \code{\link{otzeta}} estimates the parameter \code{shape}. % Function \code{potzeta()} suffers from the problems that % \code{\link{plog}} sometimes has. } \seealso{ \code{\link{Otzeta}}, \code{\link{zetaff}}, \code{\link{Oizeta}}. } \examples{ dotzeta(1:20, 0.5) rotzeta(20, 0.5) \dontrun{ shape <- 0.8; x <- 1:10 plot(x, dotzeta(x, shape = shape), type = "h", ylim = 0:1, sub = "shape=0.8", las = 1, col = "blue", ylab = "Probability", main = "1-truncated zeta distribution: blue=PMF; orange=CDF") lines(x + 0.1, potzeta(x, shape = shape), col = "orange", lty = 3, type = "h") } } \keyword{distribution} VGAM/man/cao.control.Rd0000644000176200001440000002713213565414527014310 0ustar liggesusers\name{cao.control} \alias{cao.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Control Function for RR-VGAMs (CAO) } \description{ Algorithmic constants and parameters for a constrained additive ordination (CAO), by fitting a \emph{reduced-rank vector generalized additive model} (RR-VGAM), are set using this function. This is the control function for \code{\link{cao}}. } \usage{ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL, Crow1positive = TRUE, epsilon = 1.0e-05, Etamat.colmax = 10, GradientFunction = FALSE, iKvector = 0.1, iShape = 0.1, noRRR = ~ 1, Norrr = NA, SmallNo = 5.0e-13, Use.Init.Poisson.QO = TRUE, Bestof = if (length(Cinit)) 1 else 10, maxitl = 10, imethod = 1, bf.epsilon = 1.0e-7, bf.maxit = 10, Maxit.optim = 250, optim.maxit = 20, sd.sitescores = 1.0, sd.Cinit = 0.02, suppress.warnings = TRUE, trace = TRUE, df1.nl = 2.5, df2.nl = 2.5, spar1 = 0, spar2 = 0, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Rank}{ The numerical rank \eqn{R} of the model, i.e., the number of latent variables. Currently only \code{Rank = 1} is implemented. } \item{all.knots}{ Logical indicating if all distinct points of the smoothing variables are to be used as knots. Assigning the value \code{FALSE} means fewer knots are chosen when the number of distinct points is large, meaning less computational expense. See \code{\link{vgam.control}} for details. } \item{criterion}{ Convergence criterion. Currently, only one is supported: the deviance is minimized. } \item{Cinit}{ Optional initial \bold{C} matrix which may speed up convergence. } \item{Crow1positive}{ Logical vector of length \code{Rank} (recycled if necessary): are the elements of the first row of \bold{C} positive? For example, if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE, TRUE)} will force \bold{C[1,1]} and \bold{C[1,3]} to be negative, and \bold{C[1,2]} and \bold{C[1,4]} to be positive. } \item{epsilon}{ Positive numeric. Used to test for convergence for GLMs fitted in FORTRAN. Larger values mean a loosening of the convergence criterion. % Used only if \code{FastAlgorithm} is \code{TRUE}. } \item{Etamat.colmax}{ Positive integer, no smaller than \code{Rank}. Controls the amount of memory used by \code{.Init.Poisson.QO()}. It is the maximum number of columns allowed for the pseudo-response and its weights. In general, the larger the value, the better the initial value. Used only if \code{Use.Init.Poisson.QO = TRUE}. } % \item{FastAlgorithm}{ % Logical. % Whether compiled code is used. % For \code{\link{cao}} this must be \code{TRUE}. % % } \item{GradientFunction}{ Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr} is used or not, i.e., to compute gradient values. Used only if \code{FastAlgorithm} is \code{TRUE}. Currently, this argument must be set to \code{FALSE}. } \item{iKvector, iShape}{ See \code{\link{qrrvglm.control}}. } % \item{Hstep}{ Positive value. Used as the step size in the % finite difference approximation to the derivatives by % \code{\link[stats]{optim}}. % Used only if \code{GradientFunction} is \code{TRUE}. % % } % \item{Kinit}{ % Initial values for the index parameters \code{k} in the negative % binomial distribution (one per species). In general, a smaller number % is preferred over a larger number. The vector is recycled to the % number of responses (species). %} \item{noRRR}{ Formula giving terms that are \emph{not} to be included in the reduced-rank regression (or formation of the latent variables). The default is to omit the intercept term from the latent variables. Currently, only \code{noRRR = ~ 1} is implemented. } \item{Norrr}{ Defunct. Please use \code{noRRR}. Use of \code{Norrr} will become an error soon. } % \item{Parscale}{ % Numerical and positive-valued vector of length \bold{C} % (recycled if necessary). Passed into \code{optim(..., % control = list(parscale = Parscale))}; the elements of \bold{C} become % \bold{C} / \code{Parscale}. Setting \code{I.tolerances = TRUE} results % in line searches that are very large, therefore \bold{C} has to be % scaled accordingly to avoid large step sizes. % } \item{SmallNo}{ Positive numeric between \code{.Machine$double.eps} and \code{0.0001}. Used to avoid under- or over-flow in the IRLS algorithm. % Used only if \code{FastAlgorithm} is \code{TRUE}. } \item{Use.Init.Poisson.QO }{ Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO} is used to obtain initial values for the canonical coefficients \bold{C}. If \code{FALSE} then random numbers are used instead. } \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is returned. This argument helps guard against local solutions by (hopefully) finding the global solution from many fits. The argument works only when the function generates its own initial value for \bold{C}, i.e., when \bold{C} are \emph{not} passed in as initial values. The default is only a convenient minimal number and users are urged to increase this value. } \item{maxitl}{ Positive integer. Maximum number of Newton-Raphson/Fisher-scoring/local-scoring iterations allowed. } \item{imethod}{ See \code{\link{qrrvglm.control}}. } \item{bf.epsilon}{ Positive numeric. Tolerance used by the modified vector backfitting algorithm for testing convergence. } \item{bf.maxit}{ Positive integer. Number of backfitting iterations allowed in the compiled code. } \item{Maxit.optim}{ Positive integer. Number of iterations given to the function \code{\link[stats]{optim}} at each of the \code{optim.maxit} iterations. } \item{optim.maxit}{ Positive integer. Number of times \code{\link[stats]{optim}} is invoked. % At iteration \code{i}, the \code{i}th value of \code{Maxit.optim} % is fed into \code{\link[stats]{optim}}. } % \item{se.fit}{ % Logical indicating whether approximate % pointwise standard errors are to be saved on the object. % Currently this argument must have the value \code{FALSE}. % } \item{sd.sitescores}{ Numeric. Standard deviation of the initial values of the site scores, which are generated from a normal distribution. Used when \code{Use.Init.Poisson.QO} is \code{FALSE}. } \item{sd.Cinit}{ Standard deviation of the initial values for the elements of \bold{C}. These are normally distributed with mean zero. This argument is used only if \code{Use.Init.Poisson.QO = FALSE}. } \item{suppress.warnings}{ Logical. Suppress warnings? } \item{trace}{ Logical indicating if output should be produced for each iteration. Having the value \code{TRUE} is a good idea for large data sets. } \item{df1.nl, df2.nl}{ Numeric and non-negative, recycled to length \emph{S}. Nonlinear degrees of freedom for smooths of the first and second latent variables. A value of 0 means the smooth is linear. Roughly, a value between 1.0 and 2.0 often has the approximate flexibility of a quadratic. The user should not assign too large a value to this argument, e.g., the value 4.0 is probably too high. The argument \code{df1.nl} is ignored if \code{spar1} is assigned a positive value or values. Ditto for \code{df2.nl}. } \item{spar1, spar2}{ Numeric and non-negative, recycled to length \emph{S}. Smoothing parameters of the smooths of the first and second latent variables. The larger the value, the more smooth (less wiggly) the fitted curves. These arguments are an alternative to specifying \code{df1.nl} and \code{df2.nl}. A value 0 (the default) for \code{spar1} means that \code{df1.nl} is used. Ditto for \code{spar2}. The values are on a scaled version of the latent variables. See Green and Silverman (1994) for more information. } \item{\dots}{ Ignored at present. } } \details{ Many of these arguments are identical to \code{\link{qrrvglm.control}}. Here, \eqn{R} is the \code{Rank}, \eqn{M} is the number of additive predictors, and \eqn{S} is the number of responses (species). Thus \eqn{M=S} for binomial and Poisson responses, and \eqn{M=2S} for the negative binomial and 2-parameter gamma distributions. Allowing the smooths too much flexibility means the CAO optimization problem becomes more difficult to solve. This is because the number of local solutions increases as the nonlinearity of the smooths increases. In situations of high nonlinearity, many initial values should be used, so that \code{Bestof} should be assigned a larger value. In general, there should be a reasonable value of \code{df1.nl} somewhere between 0 and about 3 for most data sets. } \value{ A list with the components corresponding to its arguments, after some basic error checking. } \references{ Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. Green, P. J. and Silverman, B. W. (1994) \emph{Nonparametric Regression and Generalized Linear Models: A Roughness Penalty Approach}, London: Chapman & Hall. } \author{T. W. Yee} \note{ The argument \code{df1.nl} can be inputted in the format \code{c(spp1 = 2, spp2 = 3, 2.5)}, say, meaning the default value is 2.5, but two species have alternative values. If \code{spar1 = 0} and \code{df1.nl = 0} then this represents fitting linear functions (CLO). Currently, this is handled in the awkward manner of setting \code{df1.nl} to be a small positive value, so that the smooth is almost linear but not quite. A proper fix to this special case should done in the short future. } \seealso{ \code{\link{cao}}. } \examples{\dontrun{ hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars set.seed(123) ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, df1.nl = c(Zoraspin = 2.3, 2.1), Bestof = 10, Crow1positive = FALSE) sort(deviance(ap1, history = TRUE)) # A history of all the iterations Coef(ap1) par(mfrow = c(2, 3)) # All or most of the curves are unimodal; some are plot(ap1, lcol = "blue") # quite symmetric. Hence a CQO model should be ok par(mfrow = c(1, 1), las = 1) index <- 1:ncol(depvar(ap1)) # lvplot is jagged because only 28 sites lvplot(ap1, lcol = index, pcol = index, y = TRUE) trplot(ap1, label = TRUE, col = index) abline(a = 0, b = 1, lty = 2) persp(ap1, label = TRUE, col = 1:4) } } \keyword{models} \keyword{regression} %cao.control(Rank = 1, all.knots = FALSE, % criterion = "deviance", Cinit = NULL, % Crow1positive = TRUE, epsilon = 1e-05, % Etamat.colmax = 10, %% FastAlgorithm = TRUE, %% is.loaded(symbol.For("cqo2f")), %% GradientFunction = FALSE, % iKvector = 0.1, % iShape = 0.1, % noRRR = ~1, %% Parscale = 1, % SmallNo = 5e-13, % Use.Init.Poisson.QO = TRUE, % Bestof = if (length(Cinit)) 1 else 10, maxitl = 40, % bf.epsilon = 1.0e-7, bf.maxit = 40, % Maxit.optim = 250, optim.maxit = 20, %% se.fit = FALSE, % sd.sitescores = 1, % sd.Cinit = 0.02, trace = TRUE, %% df1.nl = 2.5, spar1 = 0, ...) VGAM/man/bratUC.Rd0000644000176200001440000000545013565414527013246 0ustar liggesusers\name{Brat} \alias{Brat} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Inputting Data to fit a Bradley Terry Model } \description{ Takes in a square matrix of counts and outputs them in a form that is accessible to the \code{\link{brat}} and \code{\link{bratt}} family functions. } \usage{ Brat(mat, ties = 0 * mat, string = c(">", "=="), whitespace = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{mat}{ Matrix of counts, which is considered \eqn{M} by \eqn{M} in dimension when there are ties, and \eqn{M+1} by \eqn{M+1} when there are no ties. The rows are winners and the columns are losers, e.g., the 2-1 element is now many times Competitor 2 has beaten Competitor 1. The matrices are best labelled with the competitors' names. } \item{ties}{ Matrix of counts. This should be the same dimension as \code{mat}. By default, there are no ties. The matrix must be symmetric, and the diagonal should contain \code{NA}s. } \item{string}{ Character. The matrices are labelled with the first value of the descriptor, e.g., \code{"NZ > Oz"} `means' NZ beats Australia in rugby. Suggested alternatives include \code{" beats "} or \code{" wins against "}. The second value is used to handle ties. } \item{whitespace}{ Logical. If \code{TRUE} then a white space is added before and after \code{string}; it generally enhances readability. See \code{\link{CommonVGAMffArguments}} for some similar-type information. } } \details{ In the \pkg{VGAM} package it is necessary for each matrix to be represented as a single row of data by \code{\link{brat}} and \code{\link{bratt}}. Hence the non-diagonal elements of the \eqn{M+1} by \eqn{M+1} matrix are concatenated into \eqn{M(M+1)} values (no ties), while if there are ties, the non-diagonal elements of the \eqn{M} by \eqn{M} matrix are concatenated into \eqn{M(M-1)} values. } \value{ A matrix with 1 row and either \eqn{M(M+1)} or \eqn{M(M-1)} columns. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. } \author{ T. W. Yee } \note{ This is a data preprocessing function for \code{\link{brat}} and \code{\link{bratt}}. Yet to do: merge \code{InverseBrat} into \code{brat}. } \seealso{ \code{\link{brat}}, \code{\link{bratt}}, \code{InverseBrat}. } \examples{ journal <- c("Biometrika", "Comm Statist", "JASA", "JRSS-B") mat <- matrix(c( NA, 33, 320, 284, 730, NA, 813, 276, 498, 68, NA, 325, 221, 17, 142, NA), 4, 4) dimnames(mat) <- list(winner = journal, loser = journal) Brat(mat) # Less readable Brat(mat, whitespace = TRUE) # More readable vglm(Brat(mat, whitespace = TRUE) ~ 1, brat, trace = TRUE) } \keyword{models} \keyword{regression} VGAM/man/seglines.Rd0000644000176200001440000000553613565414527013704 0ustar liggesusers\name{seglines} \alias{seglines} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Hauck-Donner Effects: Segmented Lines Plot } \description{ Plots the piecewise segmented curve made up of Wald statistics versus estimates, using a colour code for the HDE severity. } \usage{ seglines(x, y, dy, ddy, lwd = 2, cex = 2, plot.it = TRUE, add.legend = TRUE, position.legend = "topleft", lty.table = c("solid", "dashed", "solid", "dashed", "solid", "dashed", "solid"), col.table = rainbow.sky[-5], pch.table = 7:1, severity.table = c("None", "Faint", "Weak", "Moderate", "Strong", "Extreme", "Undetermined"), tol0 = 0.1, FYI = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, y, dy, ddy}{ Same as \code{\link{hdeffsev}}. } \item{lwd, cex}{ Graphical parameters: line width, and character expansion. } \item{plot.it}{ Logical, plot it? If \code{FALSE} then the other graphical arguments are ignored. } \item{add.legend, position.legend}{ Logical and character; add a legend? The other argument is fed into \code{\link[graphics]{legend}}. } \item{severity.table, tol0}{ Same as \code{\link{hdeffsev}}. } \item{lty.table, col.table, pch.table}{ Graphical parameters for the 7 different types of segments. Usually users should not assign anything to these arguments. } \item{FYI, \dots}{ Should be ignored. } } \details{ This function was written to complement \code{\link{hdeffsev}} and is rough-and-ready. It plots the Wald statistics as a function of the estimates, and uses a colour-code to indicate the severity of the Hauck-Donner effect (HDE). This can be obtained from its first two derivatives. } \value{ This function returns the severity of the HDE, possibly invisibly. } %\references{ %} \author{ Thomas W. Yee. } %\section{Warning }{ %} \note{ This function is likely to change in the short future because it is experimental and far from complete. } \seealso{ \code{\link{hdeff}}, \code{\link{hdeffsev}}. } \examples{ deg <- 4 # myfun is a function that approximates the HDE myfun <- function(x, deriv = 0) switch(as.character(deriv), '0' = x^deg * exp(-x), '1' = (deg * x^(deg-1) - x^deg) * exp(-x), '2' = (deg * (deg-1) * x^(deg-2) - 2*deg * x^(deg-1) + x^deg) * exp(-x)) \dontrun{ curve(myfun, 0, 10, col = "white") xgrid <- seq(0, 10, length = 101) seglines(xgrid, myfun(xgrid), myfun(xgrid, deriv = 1), myfun(xgrid, deriv = 2), position = "bottom") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % col.table = c("black", "gray", "limegreen", "blue", % "orange", "red", "purple"), pch.table = 7:1, VGAM/man/lgammaUC.Rd0000644000176200001440000000551313565414527013554 0ustar liggesusers\name{lgammaUC} \alias{lgammaUC} \alias{dlgamma} \alias{plgamma} \alias{qlgamma} \alias{rlgamma} \title{The Log-Gamma Distribution } \description{ Density, distribution function, quantile function and random generation for the log-gamma distribution with location parameter \code{location}, scale parameter \code{scale} and shape parameter \code{k}. } \usage{ dlgamma(x, location = 0, scale = 1, shape = 1, log = FALSE) plgamma(q, location = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qlgamma(p, location = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rlgamma(n, location = 0, scale = 1, shape = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{location}{the location parameter \eqn{a}.} \item{scale}{the (positive) scale parameter \eqn{b}.} \item{shape}{the (positive) shape parameter \eqn{k}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dlgamma} gives the density, \code{plgamma} gives the distribution function, \code{qlgamma} gives the quantile function, and \code{rlgamma} generates random deviates. } \references{ Kotz, S. and Nadarajah, S. (2000) \emph{Extreme Value Distributions: Theory and Applications}, pages 48--49, London: Imperial College Press. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{lgamma1}}, the \pkg{VGAM} family function for estimating the one parameter standard log-gamma distribution by maximum likelihood estimation, for formulae and other details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } \note{ The \pkg{VGAM} family function \code{\link{lgamma3}} is for the three parameter (nonstandard) log-gamma distribution. } \seealso{ \code{\link{lgamma1}}, \code{\link{prentice74}}. } \examples{ \dontrun{ loc <- 1; Scale <- 1.5; shape <- 1.4 x <- seq(-3.2, 5, by = 0.01) plot(x, dlgamma(x, loc = loc, Scale, shape = shape), type = "l", col = "blue", ylim = 0:1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) lines(qlgamma(seq(0.05, 0.95, by = 0.05), loc = loc, Scale, shape = shape), dlgamma(qlgamma(seq(0.05, 0.95, by = 0.05), loc = loc, scale = Scale, shape = shape), loc = loc, Scale, shape = shape), col = "purple", lty = 3, type = "h") lines(x, plgamma(x, loc = loc, Scale, shape = shape), col = "orange") abline(h = 0, lty = 2) } } \keyword{distribution} VGAM/man/zigeometric.Rd0000644000176200001440000001260013565414527014402 0ustar liggesusers\name{zigeometric} \alias{zigeometric} \alias{zigeometricff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Geometric Distribution Family Function } \description{ Fits a zero-inflated geometric distribution by maximum likelihood estimation. } \usage{ zigeometric(lpstr0 = "logitlink", lprob = "logitlink", type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"), ipstr0 = NULL, iprob = NULL, imethod = 1, bias.red = 0.5, zero = NULL) zigeometricff(lprob = "logitlink", lonempstr0 = "logitlink", type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"), iprob = NULL, ionempstr0 = NULL, imethod = 1, bias.red = 0.5, zero = "onempstr0") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr0, lprob}{ Link functions for the parameters \eqn{\phi}{phi} and \eqn{p}{prob} (\code{prob}). The usual geometric probability parameter is the latter. The probability of a structural zero is the former. See \code{\link{Links}} for more choices. For the zero-\emph{deflated} model see below. } % \item{eprob, epstr0}{ eprob = list(), epstr0 = list(), % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{lonempstr0, ionempstr0}{ Corresponding arguments for the other parameterization. See details below. } \item{bias.red}{ A constant used in the initialization process of \code{pstr0}. It should lie between 0 and 1, with 1 having no effect. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{ipstr0, iprob}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{zero, imethod}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ Function \code{zigeometric()} is based on \deqn{P(Y=0) = \phi + (1-\phi) p,}{% P(Y=0) = phi + (1-phi) * prob,} for \eqn{y=0}, and \deqn{P(Y=y) = (1-\phi) p (1 - p)^{y}.}{% P(Y=y) = (1-phi) * prob * (1 - prob)^y.} for \eqn{y=1,2,\ldots}. The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) p / (1-p)}{E(Y) = (1-phi) * prob / (1-prob)} and these are returned as the fitted values by default. By default, the two linear/additive predictors are \eqn{(logit(\phi), logit(p))^T}{(logit(phi), logit(prob))^T}. Multiple responses are handled. % 20130316: Estimated probabilities of a structural zero and an observed zero can be returned, as in \code{\link{zipoisson}}; see \code{\link{fittedvlm}} for information. The \pkg{VGAM} family function \code{zigeometricff()} has a few changes compared to \code{zigeometric()}. These are: (i) the order of the linear/additive predictors is switched so the geometric probability comes first; (ii) argument \code{onempstr0} is now 1 minus the probability of a structural zero, i.e., the probability of the parent (geometric) component, i.e., \code{onempstr0} is \code{1-pstr0}; (iii) argument \code{zero} has a new default so that the \code{onempstr0} is intercept-only by default. Now \code{zigeometricff()} is generally recommended over \code{zigeometric()}. Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } %\references{ %} \author{ T. W. Yee } \note{ % Numerical problems may occur since the initial values are currently % not very good. The zero-\emph{deflated} geometric distribution might be fitted by setting \code{lpstr0 = identitylink}, albeit, not entirely reliably. See \code{\link{zipoisson}} for information that can be applied here. Else try the zero-altered geometric distribution (see \code{\link{zageometric}}). } %\section{Warning }{ % Numerical problems can occur. % Half-stepping is not uncommon. % If failure to converge occurs, make use of the argument \code{ipstr0}. % %} \seealso{ \code{\link{rzigeom}}, \code{\link{geometric}}, \code{\link{zageometric}}, \code{\link[stats]{rgeom}}, \code{\link{simulate.vlm}}. } \examples{ gdata <- data.frame(x2 = runif(nn <- 1000) - 0.5) gdata <- transform(gdata, x3 = runif(nn) - 0.5, x4 = runif(nn) - 0.5) gdata <- transform(gdata, eta1 = 1.0 - 1.0 * x2 + 2.0 * x3, eta2 = -1.0, eta3 = 0.5) gdata <- transform(gdata, prob1 = logitlink(eta1, inverse = TRUE), prob2 = logitlink(eta2, inverse = TRUE), prob3 = logitlink(eta3, inverse = TRUE)) gdata <- transform(gdata, y1 = rzigeom(nn, prob1, pstr0 = prob3), y2 = rzigeom(nn, prob2, pstr0 = prob3), y3 = rzigeom(nn, prob2, pstr0 = prob3)) with(gdata, table(y1)) with(gdata, table(y2)) with(gdata, table(y3)) head(gdata) fit1 <- vglm(y1 ~ x2 + x3 + x4, zigeometric(zero = 1), data = gdata, trace = TRUE) coef(fit1, matrix = TRUE) head(fitted(fit1, type = "pstr0")) fit2 <- vglm(cbind(y2, y3) ~ 1, zigeometric(zero = 1), data = gdata, trace = TRUE) coef(fit2, matrix = TRUE) summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/zinegbinomial.Rd0000644000176200001440000002556113565414527014722 0ustar liggesusers\name{zinegbinomial} \alias{zinegbinomial} \alias{zinegbinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Negative Binomial Distribution Family Function } \description{ Fits a zero-inflated negative binomial distribution by full maximum likelihood estimation. } \usage{ zinegbinomial(zero = "size", type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"), mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, lpstr0 = "logitlink", lmunb = "loglink", lsize = "loglink", imethod = 1, ipstr0 = NULL, imunb = NULL, iprobs.y = NULL, isize = NULL, gprobs.y = (0:9)/10, gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) zinegbinomialff(lmunb = "loglink", lsize = "loglink", lonempstr0 = "logitlink", type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"), imunb = NULL, isize = NULL, ionempstr0 = NULL, zero = c("size", "onempstr0"), imethod = 1, iprobs.y = NULL, cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30, gprobs.y = (0:9)/10, gsize.mux = exp((-12:6)/2), mds.min = 1e-3, nsimEIM = 500) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr0, lmunb, lsize}{ Link functions for the parameters \eqn{\phi}{pstr0}, the mean and \eqn{k}; see \code{\link{negbinomial}} for details, and \code{\link{Links}} for more choices. For the zero-\emph{deflated} model see below. } % \item{epstr0, emunb, esize}{ % epstr0 = list(), emunb = list(), esize = list(), % List. Extra arguments for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for more information. } \item{ipstr0, isize, imunb}{ Optional initial values for \eqn{\phi}{pstr0} and \eqn{k}{k} and \eqn{\mu}{munb}. The default is to compute an initial value internally for both. If a vector then recycling is used. } \item{lonempstr0, ionempstr0}{ Corresponding arguments for the other parameterization. See details below. } \item{imethod}{ An integer with value \code{1} or \code{2} or \code{3} which specifies the initialization method for the mean parameter. If failure to converge occurs try another value. See \code{\link{CommonVGAMffArguments}} for more information. } \item{zero}{ Specifies which linear/additive predictors are to be modelled as intercept-only. They can be such that their absolute values are either 1 or 2 or 3. The default is the \eqn{\phi}{pstr0} and \eqn{k} parameters (both for each response). See \code{\link{CommonVGAMffArguments}} for more information. } \item{nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{iprobs.y, cutoff.prob, max.support, max.chunk.MB }{ See \code{\link{negbinomial}} and/or \code{\link{posnegbinomial}} for details. } \item{mds.min, eps.trig}{ See \code{\link{negbinomial}} for details. } \item{gprobs.y, gsize.mux}{ These arguments relate to grid searching in the initialization process. See \code{\link{negbinomial}} and/or \code{\link{posnegbinomial}} for details. } } \details{ These functions are based on \deqn{P(Y=0) = \phi + (1-\phi) (k/(k+\mu))^k,}{% P(Y=0) = phi + (1- phi) * (k/(k+munb))^k,} and for \eqn{y=1,2,\ldots}, \deqn{P(Y=y) = (1-\phi) \, dnbinom(y, \mu, k).}{% P(Y=y) = (1- phi) * dnbinom(y, munb, k).} The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{(1-\phi) \mu}{(1-phi)*munb} (returned as the fitted values). By default, the three linear/additive predictors for \code{zinegbinomial()} are \eqn{(logit(\phi), \log(\mu), \log(k))^T}{(logit(phi), log(munb), log(k))^T}. See \code{\link{negbinomial}}, another \pkg{VGAM} family function, for the formula of the probability density function and other details of the negative binomial distribution. Independent multiple responses are handled. If so then arguments \code{ipstr0} and \code{isize} may be vectors with length equal to the number of responses. The \pkg{VGAM} family function \code{zinegbinomialff()} has a few changes compared to \code{zinegbinomial()}. These are: (i) the order of the linear/additive predictors is switched so the NB mean comes first; (ii) \code{onempstr0} is now 1 minus the probability of a structural 0, i.e., the probability of the parent (NB) component, i.e., \code{onempstr0} is \code{1-pstr0}; (iii) argument \code{zero} has a new default so that the \code{onempstr0} is intercept-only by default. Now \code{zinegbinomialff()} is generally recommended over \code{zinegbinomial()}. Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ % %} \author{ T. W. Yee } \note{ % 20130316: commenting out this: % For intercept-models, the \code{misc} slot has a component called % \code{pobs0} which is the estimate of \eqn{P(Y=0)}. % Note that \eqn{P(Y=0)} is not the parameter \eqn{\phi}{phi}. % 20130316: adding this: Estimated probabilities of a structural zero and an observed zero can be returned, as in \code{\link{zipoisson}}; see \code{\link{fittedvlm}} for more information. If \eqn{k} is large then the use of \pkg{VGAM} family function \code{\link{zipoisson}} is probably preferable. This follows because the Poisson is the limiting distribution of a negative binomial as \eqn{k} tends to infinity. The zero-\emph{deflated} negative binomial distribution might be fitted by setting \code{lpstr0 = identitylink}, albeit, not entirely reliably. See \code{\link{zipoisson}} for information that can be applied here. Else try the zero-altered negative binomial distribution (see \code{\link{zanegbinomial}}). } \section{Warning }{ This model can be difficult to fit to data, and this family function is fragile. The model is especially difficult to fit reliably when the estimated \eqn{k} parameter is very large (so the model approaches a zero-inflated Poisson distribution) or much less than 1 (and gets more difficult as it approaches 0). Numerical problems can also occur, e.g., when the probability of a zero is actually less than, and not more than, the nominal probability of zero. Similarly, numerical problems can occur if there is little or no 0-inflation, or when the sample size is small. Half-stepping is not uncommon. Successful convergence is sensitive to the initial values, therefore if failure to converge occurs, try using combinations of arguments \code{stepsize} (in \code{\link{vglm.control}}), \code{imethod}, \code{imunb}, \code{ipstr0}, \code{isize}, and/or \code{zero} if there are explanatory variables. Else try fitting an ordinary \code{\link{negbinomial}} model or a \code{\link{zipoisson}} model. % An infinite loop might occur if some of the fitted values % (the means) are too close to 0. % \code{ishrinkage}, This \pkg{VGAM} family function can be computationally expensive and can run slowly; setting \code{trace = TRUE} is useful for monitoring convergence. % 20160208; A bug caused this, but has been fixed now: % And \code{\link{zinegbinomial}} may converge slowly when % the estimated \eqn{k} parameter is less than 1; % and get slower as it approaches 0. } \seealso{ \code{\link{Zinegbin}}, \code{\link{negbinomial}}, \code{\link[stats:Poisson]{rpois}}, \code{\link{CommonVGAMffArguments}}. } \examples{ \dontrun{ # Example 1 ndata <- data.frame(x2 = runif(nn <- 1000)) ndata <- transform(ndata, pstr0 = logitlink(-0.5 + 1 * x2, inverse = TRUE), munb = exp( 3 + 1 * x2), size = exp( 0 + 2 * x2)) ndata <- transform(ndata, y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0)) with(ndata, table(y1)["0"] / sum(table(y1))) nfit <- vglm(y1 ~ x2, zinegbinomial(zero = NULL), data = ndata) coef(nfit, matrix = TRUE) summary(nfit) head(cbind(fitted(nfit), with(ndata, (1 - pstr0) * munb))) round(vcov(nfit), 3) # Example 2: RR-ZINB could also be called a COZIVGLM-ZINB-2 ndata <- data.frame(x2 = runif(nn <- 2000)) ndata <- transform(ndata, x3 = runif(nn)) ndata <- transform(ndata, eta1 = 3 + 1 * x2 + 2 * x3) ndata <- transform(ndata, pstr0 = logitlink(-1.5 + 0.5 * eta1, inverse = TRUE), munb = exp(eta1), size = exp(4)) ndata <- transform(ndata, y1 = rzinegbin(nn, pstr0 = pstr0, mu = munb, size = size)) with(ndata, table(y1)["0"] / sum(table(y1))) rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), data = ndata, Index.corner = 2, str0 = 3, trace = TRUE) coef(rrzinb, matrix = TRUE) Coef(rrzinb) } } \keyword{models} \keyword{regression} %zinegbinomial(lpstr0 = "logitlink", lmunb = "loglink", lsize = "loglink", % type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"), % ipstr0 = NULL, isize = NULL, zero = "size", % imethod = 1, ishrinkage = 0.95, % probs.y = 0.75, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % gpstr0 = 1:19/20, gsize = exp((-4):4), % nsimEIM = 250) %zinegbinomial(zero = "size", % type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"), % nsimEIM = 250, cutoff.prob = 0.999, max.support = 2000, % max.chunk.MB = 30, % lpstr0 = "logitlink", lmunb = "loglink", lsize = "loglink", % imethod = 1, ipstr0 = NULL, imunb = NULL, % probs.y = 0.85, ishrinkage = 0.95, % isize = NULL, gpstr0 = 1:19/20, gsize = exp((-4):4)) %zinegbinomialff(lmunb = "loglink", lsize = "loglink", lonempstr0 = "logitlink", % type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"), % isize = NULL, ionempstr0 = NULL, % zero = c("size", "onempstr0"), % imethod = 1, ishrinkage = 0.95, % probs.y = 0.75, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % gonempstr0 = 1:19/20, gsize = exp((-4):4), % nsimEIM = 250) %ndata <- transform(ndata, % y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0), % y2 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0)) %with(ndata, table(y1)["0"] / sum(table(y1))) %fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), data = ndata) VGAM/man/bilogistic.Rd0000644000176200001440000001102413565414527014210 0ustar liggesusers\name{bilogistic} \alias{bilogistic} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Logistic Distribution Family Function } \description{ Estimates the four parameters of the bivariate logistic distribution by maximum likelihood estimation. } \usage{ bilogistic(llocation = "identitylink", lscale = "loglink", iloc1 = NULL, iscale1 = NULL, iloc2 = NULL, iscale2 = NULL, imethod = 1, nsimEIM = 250, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation}{ Link function applied to both location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}. See \code{\link{Links}} for more choices. % 20150227; yettodo: expand/change llocation to lloc1 and lloc2. } \item{lscale}{ Parameter link function applied to both (positive) scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}. See \code{\link{Links}} for more choices. } \item{iloc1, iloc2}{ Initial values for the location parameters. By default, initial values are chosen internally using \code{imethod}. Assigning values here will override the argument \code{imethod}. } \item{iscale1, iscale2}{ Initial values for the scale parameters. By default, initial values are chosen internally using \code{imethod}. Assigning values here will override the argument \code{imethod}. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method. If failure to converge occurs try the other value. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for details. } % \item{zero}{ An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. % The default is none of them. % If used, one can choose values from the set \{1,2,3,4\}. % See \code{\link{CommonVGAMffArguments}} for more information. % } } \details{ The four-parameter bivariate logistic distribution has a density that can be written as \deqn{f(y_1,y_2;l_1,s_1,l_2,s_2) = 2 \frac{\exp[-(y_1-l_1)/s_1 - (y_2-l_2)/s_2]}{ s_1 s_2 \left( 1 + \exp[-(y_1-l_1)/s_1] + \exp[-(y_2-l_2)/s_2] \right)^3}}{% f(y1,y2;l1,s1,l2,s2) = 2 * exp[-(y1-l1)/s1 - (y1-l1)/s1] / [s1 * s2 * ( 1 + exp[-(y1-l1)/s1] + exp[-(y2-l2)/s2] )^3] } where \eqn{s_1>0}{s1>0} and \eqn{s_2>0}{s2>0} are the scale parameters, and \eqn{l_1}{l1} and \eqn{l_2}{l2} are the location parameters. Each of the two responses are unbounded, i.e., \eqn{-\infty 1} then the length is taken to be the number required. } \item{scale, shape}{ positive scale and shape parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dexpgeom} gives the density, \code{pexpgeom} gives the distribution function, \code{qexpgeom} gives the quantile function, and \code{rexpgeom} generates random deviates. } \author{ J. G. Lauder and T. W. Yee } \details{ See \code{\link{expgeometric}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } \note{ We define \code{scale} as the reciprocal of the scale parameter used by Adamidis and Loukas (1998). } \seealso{ \code{\link{expgeometric}}, \code{\link{exponential}}, \code{\link{geometric}}. } \examples{ \dontrun{ shape <- 0.5; scale <- 1; nn <- 501 x <- seq(-0.10, 3.0, len = nn) plot(x, dexpgeom(x, scale, shape), type = "l", las = 1, ylim = c(0, 2), ylab = paste("[dp]expgeom(shape = ", shape, ", scale = ", scale, ")"), col = "blue", cex.main = 0.8, main = "Blue is density, red is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pexpgeom(x, scale, shape), col = "red") probs <- seq(0.1, 0.9, by = 0.1) Q <- qexpgeom(probs, scale, shape) lines(Q, dexpgeom(Q, scale, shape), col = "purple", lty = 3, type = "h") lines(Q, pexpgeom(Q, scale, shape), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pexpgeom(Q, scale, shape) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/profilevglm.Rd0000644000176200001440000000452513565414527014416 0ustar liggesusers % file MASS/man/profilevglm.Rd % copyright (C) 1999-2008 W. N. Venables and B. D. Ripley \name{profilevglm} \alias{profilevglm} \title{Method for Profiling vglm Objects} \description{ Investigates the profile log-likelihood function for a fitted model of class \code{"vglm"}. } \usage{ profilevglm(object, which = 1:p.vlm, alpha = 0.01, maxsteps = 10, del = zmax/5, trace = NULL, \dots) } \arguments{ \item{object}{the original fitted model object.} \item{which}{the original model parameters which should be profiled. This can be a numeric or character vector. By default, all parameters are profiled. } \item{alpha}{highest significance level allowed for the profiling. % profile t-statistics. } \item{maxsteps}{maximum number of points to be used for profiling each parameter.} \item{del}{suggested change on the scale of the profile t-statistics. Default value chosen to allow profiling at about 10 parameter values.} \item{trace}{logical: should the progress of profiling be reported? The default is to use the \code{trace} value from the fitted object; see \code{\link{vglm.control}} for details. } \item{\dots}{further arguments passed to or from other methods.} } \value{ A list of classes \code{"profile.glm"} and \code{"profile"} with an element for each parameter being profiled. The elements are data-frames with two variables \item{par.vals}{a matrix of parameter values for each fitted model.} \item{tau}{the profile t-statistics.} } \details{ This function is called by \code{\link{confintvglm}} to do the profiling. See also \code{\link[MASS]{profile.glm}} for details. } \author{ T. W. Yee adapted this function from \code{\link[MASS]{profile.glm}}, written originally by D. M. Bates and W. N. Venables. (For S in 1996.) The help file was also used as a template. } \seealso{ \code{\link{vglm}}, \code{\link{confintvglm}}, \code{\link{lrt.stat}}, \code{\link[stats]{profile}}, \code{\link[MASS]{profile.glm}}, \code{\link[MASS]{plot.profile}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, trace = TRUE, data = pneumo) pfit1 <- profile(fit1, trace = FALSE) confint(fit1, method = "profile", trace = FALSE) } \keyword{regression} \keyword{models} VGAM/man/bilogisUC.Rd0000644000176200001440000000463513565414527013752 0ustar liggesusers\name{bilogis} \alias{bilogis} \alias{dbilogis} \alias{pbilogis} \alias{rbilogis} \title{Bivariate Logistic Distribution} \description{ Density, distribution function, quantile function and random generation for the 4-parameter bivariate logistic distribution. } \usage{ dbilogis(x1, x2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1, log = FALSE) pbilogis(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) rbilogis(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) } \arguments{ \item{x1, x2, q1, q2}{vector of quantiles.} \item{n}{number of observations. Same as \code{\link[stats]{rlogis}}. } \item{loc1, loc2}{the location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}.} \item{scale1, scale2}{the scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dbilogis} gives the density, \code{pbilogis} gives the distribution function, and \code{rbilogis} generates random deviates (a two-column matrix). } \references{ Gumbel, E. J. (1961) Bivariate logistic distributions. \emph{Journal of the American Statistical Association}, \bold{56}, 335--349. } \author{ T. W. Yee } \details{ See \code{\link{bilogis}}, the \pkg{VGAM} family function for estimating the four parameters by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } \note{ Gumbel (1961) proposed two bivariate logistic distributions with logistic distribution marginals, which he called Type I and Type II. The Type I is this one. The Type II belongs to the Morgenstern type. The \code{\link{biamhcop}} distribution has, as a special case, this distribution, which is when the random variables are independent. % This note added 20140920 } \seealso{ \code{\link{bilogistic}}, \code{\link{biamhcop}}. } \examples{ \dontrun{ par(mfrow = c(1, 3)) ymat <- rbilogis(n = 2000, loc1 = 5, loc2 = 7, scale2 = exp(1)) myxlim <- c(-2, 15); myylim <- c(-10, 30) plot(ymat, xlim = myxlim, ylim = myylim) N <- 100 x1 <- seq(myxlim[1], myxlim[2], len = N) x2 <- seq(myylim[1], myylim[2], len = N) ox <- expand.grid(x1, x2) z <- dbilogis(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1)) contour(x1, x2, matrix(z, N, N), main = "density") z <- pbilogis(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1)) contour(x1, x2, matrix(z, N, N), main = "cdf") } } \keyword{distribution} VGAM/man/bmi.nz.Rd0000644000176200001440000000266113565414527013264 0ustar liggesusers\name{bmi.nz} \alias{bmi.nz} \docType{data} \title{ Body Mass Index of New Zealand Adults Data} \description{ The body mass indexes and ages from an approximate random sample of 700 New Zealand adults. } \usage{data(bmi.nz)} \format{ A data frame with 700 observations on the following 2 variables. \describe{ \item{age}{a numeric vector; their age (years). } \item{BMI}{a numeric vector; their body mass indexes, which is their weight divided by the square of their height (kg / \eqn{m^2}{m^2}).} } } \details{ They are a random sample from the Fletcher Challenge/Auckland Heart and Health survey conducted in the early 1990s. There are some outliers in the data set. A variable \code{gender} would be useful, and may be added later. } \source{ Clinical Trials Research Unit, University of Auckland, New Zealand, \code{http://www.ctru.auckland.ac.nz}. % \url{http://www.ctru.auckland.ac.nz}. } \references{ MacMahon, S., Norton, R., Jackson, R., Mackie, M. J., Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995) Fletcher Challenge-University of Auckland Heart & Health Study: design and baseline findings. \emph{New Zealand Medical Journal}, \bold{108}, 499--502. } \examples{ \dontrun{ with(bmi.nz, plot(age, BMI, col = "blue")) fit <- vgam(BMI ~ s(age, df = c(2, 4, 2)), lms.yjn, data = bmi.nz, trace = TRUE) qtplot(fit, pcol = "blue", tcol = "brown", lcol = "brown") } } \keyword{datasets} VGAM/man/bell.Rd0000644000176200001440000000324113565414527013000 0ustar liggesusers\name{bell} \alias{bell} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Bell Series of Integers } \description{ Returns the values of the Bell series. } \usage{ bell(n) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{n}{ Vector of non-negative integers. Values greater than 218 return an \code{Inf}. Non-integers or negative values return a \code{NaN}. } } \details{ The Bell numbers emerge from a series expansion of \eqn{ \exp(e^x - 1)}{exp(exp(x) - 1)} for real \eqn{x}. The first few values are \eqn{B_{0}=1}{B_0 = 1}, \eqn{B_{1}=1}{B_1 = 1}, \eqn{B_{2}=2}{B_2 = 2}, \eqn{B_{3}=5}{B_3 = 5}, \eqn{B_{4}=15}{B_4 = 15}. The series increases quickly so that overflow occurs when its argument is more than 218. } \value{ This function returns \eqn{B_{n}}{B_n}. } \references{ Bell, E. T. (1934) Exponential polynomials. \emph{Ann. Math.}, \bold{35}, 258--277. Bell, E. T. (1934) Exponential numbers. \emph{Amer. Math. Monthly}, \bold{41}, 411--419. } \author{ T. W. Yee } %\note{ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{bellff}}, \code{\link{rbell}}. % \code{\link{lambertW}}. } \examples{ \dontrun{ plot(0:10, bell(0:10), log = "y", type = "h", las = 1, col = "blue") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} % \item{all}{ % Logical. % If \code{TRUE} then a vector of length \eqn{n+1} is returned % starting with \eqn{B_{0}}{B_0}. % If \code{FALSE} then only the last value corresponding to % \eqn{B_{n}}{B_n} is returned. %}VGAM/man/VGAM-package.Rd0000644000176200001440000002622213565414527014211 0ustar liggesusers\name{VGAM-package} \alias{VGAM-package} \alias{VGAM} \docType{package} \title{ Vector Generalized Linear and Additive Models and Associated Models } \description{ \pkg{VGAM} provides functions for fitting vector generalized linear and additive models (VGLMs and VGAMs), and associated models (Reduced-rank VGLMs, Quadratic RR-VGLMs, Reduced-rank VGAMs). This package fits many models and distributions by maximum likelihood estimation (MLE) or penalized MLE. Also fits constrained ordination models in ecology such as constrained quadratic ordination (CQO). } \details{ This package centers on the \emph{iteratively reweighted least squares} (IRLS) algorithm. Other key words include Fisher scoring, additive models, reduced-rank regression, penalized likelihood, and constrained ordination. The central modelling functions are \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{rcim}}, \code{\link{cqo}}, \code{\link{cao}}. Function \code{\link{vglm}} operates very similarly to \code{\link[stats]{glm}} but is much more general, and many methods functions such as \code{\link[VGAM:coefvlm]{coef}} and \code{\link[VGAM:predictvglm]{predict}} are available. The package uses S4 (see \code{\link[methods]{methods-package}}). A companion package called \pkg{VGAMdata} contains some larger data sets which were shifted from \pkg{VGAM}. Compared to other similar packages, such as \pkg{gamlss} and \pkg{mgcv}, \pkg{VGAM} has more models implemented (150+ of them) and they are not restricted to a location-scale-shape framework or (largely) the 1-parameter exponential family. There is a general statistical framework behind it all, that once grasped, makes regression modelling quite unified. Some features of the package are: (i) most family functions handle multiple responses; (ii) reduced-rank regression is available by operating on latent variables (optimal linear combinations of the explanatory variables); (iii) basic automatic smoothing parameter selection is implemented for VGAMs, although it has to be refined; (iv) \emph{smart} prediction allows correct prediction of nested terms in the formula provided smart functions are used. The GLM and GAM classes are special cases of VGLMs and VGAMs. The VGLM/VGAM framework is intended to be very general so that it encompasses as many distributions and models as possible. VGLMs are limited only by the assumption that the regression coefficients enter through a set of linear predictors. The VGLM class is very large and encompasses a wide range of multivariate response types and models, e.g., it includes univariate and multivariate distributions, categorical data analysis, extreme values, correlated binary data, quantile and expectile regression, time series problems. Potentially, it can handle generalized estimating equations, survival analysis, bioassay data and nonlinear least-squares problems. Crudely, VGAMs are to VGLMs what GAMs are to GLMs. Two types of VGAMs are implemented: 1st-generation VGAMs with \code{\link{s}} use vector backfitting, while 2nd-generation VGAMs with \code{\link{sm.os}} and \code{\link{sm.ps}} use O-splines and P-splines, do not use the backfitting algorithm, and have automatic smoothing parameter selection. The former is older and is based on Yee and Wild (1996). The latter is more modern (Yee, Somchit and Wild, 2018) but it requires a reasonably large number of observations to work well. This package is the first to check for the \emph{Hauck-Donner effect} (HDE) in regression models; see \code{\link{hdeff}}. This is an aberration of the Wald statistics when the parameter estimates are too close to the boundary of the parameter space. When present the p-value of a regression coefficient is biased upwards so that a highly significant variable might be deemed nonsignificant. Thus the HDE can create havoc for variable selection! Somewhat related to the previous paragraph, hypothesis testing using the likelihood ratio test, Rao's score test (Lagrange multiplier test) and (modified) Wald's test are all available; see \code{\link{summaryvglm}}. For all regression coefficients of a model, taken one at a time, all three methods require further IRLS iterations to obtain new values of the other regression coefficients after one of the coefficients has had its value set (usually to 0). Hence the computation load is overall significant. %(e.g., \eqn{n > 500}, say); and it does not always converge %and is not entirely reliable. %Vector smoothing (see \code{\link{vsmooth.spline}}) allows several %additive predictors to be estimated as a sum of smooth functions of %the covariates. For a complete list of this package, use \code{library(help = "VGAM")}. New \pkg{VGAM} family functions are continually being written and added to the package. % A monograph about VGLM and VGAMs etc. appeared in October 2015. %but unfortunately will not be finished for a while. %~~ An overview of how to use the package, including the most important ~~ %~~ functions ~~ %For detailed control of fitting, %each of these has its own control function, e.g., %\code{\link{vglm.control}}. } \author{ Thomas W. Yee, \email{t.yee@auckland.ac.nz}.\cr Maintainer: Thomas Yee \email{t.yee@auckland.ac.nz}. } \section{Warning}{ This package is undergoing continual development and improvement, therefore users should treat everything as subject to change. This includes the family function names, argument names, many of the internals, the use of link functions, and slot names. For example, all link functions may be renamed so that they end in \code{"link"}, e.g., \code{loglink()} instead of \code{loglink()}. Some future pain can be avoided by using good programming techniques, e.g., using extractor/accessor functions such as \code{coef()}, \code{weights()}, \code{vcov()}, \code{predict()}. Nevertheless, please expect changes in all aspects of the package. See the \code{NEWS} file for a list of changes from version to version. } \references{ Yee, T. W. (2015) Vector Generalized Linear and Additive Models: With an Implementation in R. New York, USA: \emph{Springer}. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. and Stephenson, A. G. (2007) Vector generalized linear and additive extreme value models. \emph{Extremes}, \bold{10}, 1--19. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. Yee, T. W. (2008) The \code{VGAM} Package. \emph{R News}, \bold{8}, 28--39. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. Yee, T. W. (2018) On the Hauck-Donner effect in Wald tests: Detection, and parameter space characterization (\emph{under review}). Yee, T. W. and Somchit, C. and Wild, C. J. (2019) Penalized vector generalized additive models. Manuscript in preparation. My website for the \pkg{VGAM} package and book is at \url{https://www.stat.auckland.ac.nz/~yee}. There are some resources there, especially as relating to my book and new features added to \pkg{VGAM}. %(Oldish) documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee/VGAM} %contains some further information and examples. } \keyword{package} \keyword{models} \keyword{regression} \seealso{ \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{rcim}}, \code{\link{cqo}}, \code{\link{TypicalVGAMfamilyFunction}}, \code{\link{CommonVGAMffArguments}}, \code{\link{Links}}, \code{\link{hdeff}}, \url{https://CRAN.R-project.org/package=VGAM}. %~~ Optional links to other man pages, e.g. ~~ %~~ \code{\link[:-package]{}} ~~ } \examples{ # Example 1; proportional odds model pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) depvar(fit1) # Better than using fit1@y; dependent variable (response) weights(fit1, type = "prior") # Number of observations coef(fit1, matrix = TRUE) # p.179, in McCullagh and Nelder (1989) constraints(fit1) # Constraint matrices summary(fit1) # HDE could affect these results summary(fit1, lrt0 = TRUE, score0 = TRUE, wald0 = TRUE) # No HDE hdeff(fit1) # Check for any Hauck-Donner effect # Example 2; zero-inflated Poisson model zdata <- data.frame(x2 = runif(nn <- 2000)) zdata <- transform(zdata, pstr0 = logitlink(-0.5 + 1*x2, inverse = TRUE), lambda = loglink( 0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0)) with(zdata, table(y)) fit2 <- vglm(y ~ x2, zipoisson, data = zdata, trace = TRUE) coef(fit2, matrix = TRUE) # These should agree with the above values # Example 3; fit a two species GAM simultaneously fit3 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)), binomialff(multiple.responses = TRUE), data = hunua) coef(fit3, matrix = TRUE) # Not really interpretable \dontrun{ plot(fit3, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4) ooo <- with(hunua, order(altitude)) with(hunua, matplot(altitude[ooo], fitted(fit3)[ooo, ], type = "l", lwd = 2, col = 3:4, xlab = "Altitude (m)", ylab = "Probability of presence", las = 1, main = "Two plant species' response curves", ylim = c(0, 0.8))) with(hunua, rug(altitude)) } # Example 4; LMS quantile regression fit4 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = bmi.nz, trace = TRUE) head(predict(fit4)) head(fitted(fit4)) head(bmi.nz) # Person 1 is near the lower quartile among people his age head(cdf(fit4)) \dontrun{ par(mfrow = c(1,1), bty = "l", mar = c(5,4,4,3)+0.1, xpd=TRUE) qtplot(fit4, percentiles = c(5,50,90,99), main = "Quantiles", las = 1, xlim = c(15, 90), ylab = "BMI", lwd=2, lcol=4) # Quantile plot ygrid <- seq(15, 43, len = 100) # BMI ranges par(mfrow = c(1, 1), lwd = 2) # Density plot aa <- deplot(fit4, x0 = 20, y = ygrid, xlab = "BMI", col = "black", main = "Density functions at Age=20 (black), 42 (red) and 55 (blue)") aa aa <- deplot(fit4, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red") aa <- deplot(fit4, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE) aa@post$deplot # Contains density function values } # Example 5; GEV distribution for extremes (fit5 <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE)) head(fitted(fit5)) coef(fit5, matrix = TRUE) Coef(fit5) vcov(fit5) vcov(fit5, untransform = TRUE) sqrt(diag(vcov(fit5))) # Approximate standard errors \dontrun{ rlplot(fit5) } } % Until my monograph comes out and this package is released as version 1.0-0 % the user should treat everything subject to change. VGAM/man/skellamUC.Rd0000644000176200001440000000311013565414527013735 0ustar liggesusers\name{Skellam} \alias{Skellam} \alias{dskellam} %\alias{pskellam} %\alias{qskellam} \alias{rskellam} \title{The Skellam Distribution} \description{ Density and random generation for the Skellam distribution. % distribution function, quantile function } \usage{ dskellam(x, mu1, mu2, log = FALSE) rskellam(n, mu1, mu2) } %pskellam(q, mu1, mu2) %qskellam(p, mu1, mu2) \arguments{ \item{x}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats:Uniform]{runif}}. } \item{mu1, mu2}{ See \code{\link{skellam}} }. \item{log}{ Logical; if TRUE, the logarithm is returned. } } \value{ \code{dskellam} gives the density, and \code{rskellam} generates random deviates. % \code{pskellam} gives the distribution function, % \code{qskellam} gives the quantile function, and } %\author{ T. W. Yee } \details{ See \code{\link{skellam}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } \section{Warning }{ Numerical problems may occur for data if \eqn{\mu_1}{mu1} and/or \eqn{\mu_2}{mu2} are large. The normal approximation for this case has not been implemented yet. } \seealso{ \code{\link{skellam}}, \code{\link[stats:Poisson]{dpois}}. } \examples{ \dontrun{ mu1 <- 1; mu2 <- 2; x <- (-7):7 plot(x, dskellam(x, mu1, mu2), type = "h", las = 1, col = "blue", main = paste("Density of Skellam distribution with mu1 = ", mu1, " and mu2 = ", mu2, sep = "")) } } \keyword{distribution} VGAM/man/logF.Rd0000644000176200001440000000503013565414527012747 0ustar liggesusers\name{logF} \alias{logF} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Natural Exponential Family Generalized Hyperbolic Secant Distribution Family Function } \description{ Maximum likelihood estimation of the 1-parameter log F distribution. } \usage{ logF(lshape1 = "loglink", lshape2 = "loglink", ishape1 = NULL, ishape2 = 1, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2}{ % Character. Parameter link functions for the shape parameters. Called \eqn{\alpha}{alpha} and \eqn{\beta}{beta} respectively. See \code{\link{Links}} for more choices. } \item{ishape1, ishape2}{ Optional initial values for the shape parameters. If given, it must be numeric and values are recycled to the appropriate length. The default is to choose the value internally. See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Initialization method. Either the value 1, 2, or \ldots. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The density for this distribution is \deqn{f(y; \alpha, \beta) = \exp(\alpha y) / [B(\alpha,\beta) (1 + e^y)^{\alpha + \beta}] }{% f(y; alpha, beta) = exp(\alpha y) / [B(\alpha,\beta) * (1 + exp(y))^(\alpha + \beta)] } where \eqn{y} is real, \eqn{\alpha > 0}, \eqn{\beta > 0}, \eqn{B(., .)} is the beta function \code{\link[base:Special]{beta}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Jones, M. C. (2008). On a class of distributions with simple exponential tails. \emph{Statistica Sinica}, \bold{18}(3), 1101--1110. % Section 3.2. } \author{ Thomas W. Yee } %\section{Warning}{ % %} %\note{ %} \seealso{ \code{\link{dlogF}}, \code{\link{logff}}. % \code{\link{simulate.vlm}}. } \examples{ nn <- 1000 ldata <- data.frame(y1 = rnorm(nn, m = +1, sd = exp(2)), # Not proper data x2 = rnorm(nn, m = -1, sd = exp(2)), y2 = rnorm(nn, m = -1, sd = exp(2))) # Not proper data fit1 <- vglm(y1 ~ 1 , logF, data = ldata, trace = TRUE) fit2 <- vglm(y2 ~ x2, logF, data = ldata, trace = TRUE) coef(fit2, matrix = TRUE) summary(fit2) vcov(fit2) head(fitted(fit1)) with(ldata, mean(y1)) max(abs(head(fitted(fit1)) - with(ldata, mean(y1)))) } \keyword{models} \keyword{regression} VGAM/man/posbernoulli.tb.Rd0000644000176200001440000002343713565414527015214 0ustar liggesusers\name{posbernoulli.tb} %\alias{posbernoulli} \alias{posbernoulli.tb} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Bernoulli Family Function with Time and Behavioural Effects } \description{ Fits a GLM/GAM-like model to multiple Bernoulli responses where each row in the capture history matrix response has at least one success (capture). Sampling occasion effects and behavioural effects are accommodated. } \usage{ posbernoulli.tb(link = "logitlink", parallel.t = FALSE ~ 1, parallel.b = FALSE ~ 0, drop.b = FALSE ~ 1, type.fitted = c("likelihood.cond", "mean.uncond"), imethod = 1, iprob = NULL, p.small = 1e-4, no.warning = FALSE, ridge.constant = 0.0001, ridge.power = -4) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link, imethod, iprob}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{parallel.t, parallel.b, drop.b}{ A logical, or formula with a logical as the response. See \code{\link{CommonVGAMffArguments}} for information. The \code{parallel.}-type arguments specify whether the constraint matrices have a parallelism assumption for the temporal and behavioural effects. Argument \code{parallel.t} means parallel with respect to time, and matches the same argument name in \code{\link{posbernoulli.t}}. Suppose the model is intercept-only. Setting \code{parallel.t = FALSE ~ 0} results in the \eqn{M_b} model. Setting \code{drop.b = FALSE ~ 0} results in the \eqn{M_t} model because it drops columns off the constraint matrices corresponding to any behavioural effect. Setting \code{parallel.t = FALSE ~ 0} and setting \code{parallel.b = FALSE ~ 0} results in the \eqn{M_b} model. Setting \code{parallel.t = FALSE ~ 0}, \code{parallel.b = FALSE ~ 0} and \code{drop.b = FALSE ~ 0} results in the \eqn{M_0} model. Note the default for \code{parallel.t} and \code{parallel.b} may be unsuitable for data sets which have a large \eqn{\tau} because of the large number of parameters; it might be too flexible. If it is desired to have the behaviour affect some of the other covariates then set \code{drop.b = TRUE ~ 0}. The default model has a different intercept for each sampling occasion, a time-parallelism assumption for all other covariates, and a dummy variable representing a single behavioural effect (also in the intercept). The most flexible model is to set \code{parallel.b = TRUE ~ 0}, \code{parallel.t = TRUE ~ 0} and \code{drop.b = TRUE ~ 0}. This means that all possible temporal and behavioural effects are estimated, for the intercepts and other covariates. Such a model is \emph{not} recommended; it will contain a lot of paramters. } \item{type.fitted}{ Character, one of the choices for the type of fitted value returned. The default is the first one. Partial matching is okay. For \code{"likelihood.cond"}: the probability defined by the conditional likelihood. For \code{"mean.uncond"}: the unconditional mean, which should agree with \code{\link[base]{colMeans}} applied to the response matrix for intercept-only models. } \item{ridge.constant, ridge.power}{ Determines the ridge parameters at each IRLS iteration. They are the constant and power (exponent) for the ridge adjustment for the working weight matrices (the capture probability block matrix, hence the first \eqn{\tau} diagonal values). At iteration \eqn{a} of the IRLS algorithm a positive value is added to the first \eqn{\tau}{tau} diagonal elements of the working weight matrices to make them positive-definite. This adjustment is the mean of the diagonal elements of \code{wz} multipled by \eqn{K \times a^p}{K * a^p} where \eqn{K} is \code{ridge.constant} and \eqn{p} is \code{ridge.power}. This is always positive but decays to zero as iterations proceed (provided \eqn{p} is negative etc.). } \item{p.small, no.warning}{ See \code{\link{posbernoulli.t}}. } } \details{ This model (commonly known as \eqn{M_{tb}}/\eqn{M_{tbh}} in the capture--recapture literature) operates on a response matrix of 0s and 1s (\eqn{n \times \tau}{n x tau}). See \code{\link{posbernoulli.t}} for information that is in common. It allows time and behavioural effects to be modelled. Evidently, the expected information matrix (EIM) seems \emph{not} of full rank (especially in early iterations), so \code{ridge.constant} and \code{ridge.power} are used to \emph{try} fix up the problem. The default link functions are \eqn{(logit \,p_{c1},\ldots,logit \,p_{c\tau},logit \,p_{r2},\ldots,logit \,p_{r\tau})^T}{ (logit p_{c1},\ldots,logit p_{c,tau},logit p_{r2},\ldots,logit p_{r,tau})^T} where the subscript \eqn{c} denotes capture, the subscript \eqn{r} denotes recapture, and it is not possible to recapture the animal at sampling occasion 1. Thus \eqn{M = 2\tau - 1}{M=2*tau-1}. The parameters are currently prefixed by \code{pcapture} and \code{precapture} for the capture and recapture probabilities. This \pkg{VGAM} family function may be further modified in the future. % Not surprisingly, % the fitted values are similar to \code{\link{posbernoulli.t}} and % \code{\link{posbernoulli.b}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ See \code{\link{posbernoulli.t}}. } \author{ Thomas W. Yee. } \note{ It is a good idea to apply the parallelism assumption to each sampling occasion except possibly with respect to the intercepts. Also, a simple behavioural effect such as being modelled using the intercept is recommended; if the behavioural effect is not parallel and/or allowed to apply to other covariates then there will probably be too many parameters, and hence, numerical problems. See \code{M_tbh.1} below. %Data-wise, at each sampling occasion, the \eqn{M_{tb}} model requires at least %one first capture and at least one noncapture. % If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of % the response matrix are not present then it pays to add % such rows to the response matrix and assign a small but % positive prior weight. % For example, if \eqn{\tau=2}{tau=2} then there should be % (0,1) rows, % (1,0) rows and % (1,1) rows present in the response matrix. It is a good idea to monitor convergence. Simpler models such as the \eqn{M_0}/\eqn{M_h} models are best fitted with \code{\link{posbernoulli.t}} or \code{\link{posbernoulli.b}} or \code{\link{posbinomial}}. % yettodo: % Some time in the future it might be possible to allow for a % different tau value for each row. % Then the response would be a matrix padded with NAs on the RHS. } \seealso{ \code{\link{posbernoulli.b}} (including \code{N.hat}), \code{\link{posbernoulli.t}}, \code{\link{posbinomial}}, \code{\link{Select}}, \code{\link{fill1}}, \code{\link{Huggins89table1}}, \code{\link{Huggins89.t1}}, \code{\link{deermice}}, \code{\link{prinia}}. } \examples{ \dontrun{ # Example 1: simulated data nTimePts <- 5 # (aka tau == # of sampling occasions) nnn <- 1000 # Number of animals pdata <- rposbern(n = nnn, nTimePts = nTimePts, pvars = 2) dim(pdata); head(pdata) M_tbh.1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, posbernoulli.tb, data = pdata, trace = TRUE) coef(M_tbh.1) # First element is the behavioural effect coef(M_tbh.1, matrix = TRUE) constraints(M_tbh.1, matrix = TRUE) summary(M_tbh.1, presid = FALSE) # Standard errors are approximate head(fitted(M_tbh.1)) head(model.matrix(M_tbh.1, type = "vlm"), 21) dim(depvar(M_tbh.1)) M_tbh.2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, posbernoulli.tb(parallel.t = FALSE ~ 0), data = pdata, trace = TRUE) coef(M_tbh.2) # First element is the behavioural effect coef(M_tbh.2, matrix = TRUE) constraints(M_tbh.2, matrix = TRUE) summary(M_tbh.2, presid = FALSE) # Standard errors are approximate head(fitted(M_tbh.2)) head(model.matrix(M_tbh.2, type = "vlm"), 21) dim(depvar(M_tbh.2)) # Example 2: deermice subset data fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, posbernoulli.t, data = deermice, trace = TRUE) coef(fit1) coef(fit1, matrix = TRUE) constraints(fit1, matrix = TRUE) summary(fit1, presid = FALSE) # Standard errors are approximate # fit1 is the same as Fit1 (a M_{th} model): Fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, posbernoulli.tb(drop.b = TRUE ~ sex + weight, parallel.t = TRUE), # No parallelism for the intercept data = deermice, trace = TRUE) constraints(Fit1) } } \keyword{models} \keyword{regression} %\section{Warning }{ % As this model is likely to be overparameterized, probably this % function should not be used (for now?). % %% From Jakub: % Estimation for the population size (and its SE) for the % \eqn{M_{tb}} and \eqn{M_{tbh}} model may be wrong. % But models % \eqn{M_{0}}, % \eqn{M_{h}}, % \eqn{M_{b}}, % \eqn{M_{bh}}, % \eqn{M_{t}}, % \eqn{M_{th}} % seem fine. % % Inference, especially using standard errors, may be fraught here % because the EIM is, strictly speaking, not of full rank. % A similar adjustment is made by \code{\link{zipebcom}}. % It is a good idea to monitor convergence. % The \eqn{M_0}/\eqn{M_h} models are best fitted with % \code{\link{posbernoulli.t}} or \code{\link{posbinomial}} because % the standard errors are more accurate. % % %} %yyy <- depvar(fit1) %if (length(table(4 * yyy[, 1] + 2 * yyy[, 2] + 1 * yyy[, 3])) != 2^(ncol(yyy)) - 1) % warning("not every combination is represented by a row in the response matrix") % 20181020; was this for a long time until now: % ridge.constant = 0.01, VGAM/man/ucberk.Rd0000644000176200001440000000365213565414527013343 0ustar liggesusers\name{ucberk} \alias{ucberk} \docType{data} \title{ University California Berkeley Graduate Admissions } \description{ University California Berkeley Graduate Admissions: counts cross-classified by acceptance/rejection and gender, for the six largest departments. } \usage{data(ucberk)} \format{ A data frame with 6 departmental groups with the following 5 columns. \describe{ \item{m.deny}{Counts of men denied admission. } \item{m.admit}{Counts of men admitted. } \item{w.deny}{Counts of women denied admission. } \item{w.admit}{Counts of women admitted. } \item{dept}{Department (the six largest), called \code{A}, code{B}, \dots, code{F}. } } } \details{ From Bickel et al. (1975), the data consists of applications for admission to graduate study at the University of California, Berkeley, for the fall 1973 quarter. In the admissions cycle for that quarter, the Graduate Division at Berkeley received approximately 15,000 applications, some of which were later withdrawn or transferred to a different proposed entry quarter by the applicants. Of the applications finally remaining for the fall 1973 cycle 12,763 were sufficiently complete to permit a decision. There were about 101 graduate department and interdepartmental graduate majors. There were 8442 male applicants and 4321 female applicants. About 44 percent of the males and about 35 percent of the females were admitted. The data are well-known for illustrating Simpson's paradox. } %\source{ % % %} \references{ Bickel, P. J., Hammel, E. A. and O'Connell, J. W. (1975) Sex bias in graduate admissions: data from Berkeley. \emph{Science}, \bold{187}(4175): 398--404. Freedman, D., Pisani, R. and Purves, R. (1998) Chapter 2 of \emph{Statistics}, 3rd. ed., W. W. Norton & Company. } \examples{ summary(ucberk) } \keyword{datasets} % 7 February 1975 % Bickel, et al., 187 (4175): 398-404 VGAM/man/zigeomUC.Rd0000644000176200001440000000476313565414527013616 0ustar liggesusers\name{Zigeom} \alias{Zigeom} \alias{dzigeom} \alias{pzigeom} \alias{qzigeom} \alias{rzigeom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Geometric Distribution } \description{ Density, and random generation for the zero-inflated geometric distribution with parameter \code{pstr0}. } \usage{ dzigeom(x, prob, pstr0 = 0, log = FALSE) pzigeom(q, prob, pstr0 = 0) qzigeom(p, prob, pstr0 = 0) rzigeom(n, prob, pstr0 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{prob}{see \code{\link[stats]{dgeom}}.} \item{n}{ Same as in \code{\link[stats]{runif}}. } \item{pstr0}{ Probability of structural zero (ignoring the geometric distribution), called \eqn{\phi}{phi}. The default value corresponds to the response having an ordinary geometric distribution. } \item{log}{ Logical. Return the logarithm of the answer? } } \details{ The probability function of \eqn{Y} is 0 with probability \eqn{\phi}{phi}, and \eqn{geometric(prob)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{% P(Y=0) = phi + (1-phi) * P(W=0)} where \eqn{W} is distributed \eqn{geometric(prob)}. } \value{ \code{dzigeom} gives the density, \code{pzigeom} gives the distribution function, \code{qzigeom} gives the quantile function, and \code{rzigeom} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. These functions actually allow for \emph{zero-deflation}. That is, the resulting probability of a zero count is \emph{less than} the nominal value of the parent distribution. See \code{\link{Zipois}} for more information. } \seealso{ \code{\link{zigeometric}}, \code{\link[stats]{dgeom}}. } \examples{ prob <- 0.5; pstr0 <- 0.2; x <- (-1):20 (ii <- dzigeom(x, prob, pstr0)) max(abs(cumsum(ii) - pzigeom(x, prob, pstr0))) # Should be 0 table(rzigeom(1000, prob, pstr0)) \dontrun{ x <- 0:10 barplot(rbind(dzigeom(x, prob, pstr0), dgeom(x, prob)), beside = TRUE, col = c("blue","orange"), ylab = "P[Y = y]", xlab = "y", las = 1, main = paste("zigeometric(", prob, ", pstr0 = ", pstr0, ") (blue) vs", " geometric(", prob, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} VGAM/man/flourbeetle.Rd0000644000176200001440000000271613565414527014400 0ustar liggesusers\name{flourbeetle} \alias{flourbeetle} \docType{data} \title{Mortality of Flour Beetles from Carbon Disulphide} \description{ The \code{flourbeetle} data frame has 8 rows and 4 columns. Two columns are explanatory, the other two are responses. } \usage{data(flourbeetle)} \format{ This data frame contains the following columns: \describe{ \item{logdose}{\code{\link[base]{log10}} applied to \code{CS2mgL}. } \item{CS2mgL}{a numeric vector, the concentration of gaseous carbon disulphide in mg per litre. } \item{exposed}{a numeric vector, counts; the number of beetles exposed to the poison. } \item{killed}{a numeric vector, counts; the numbers killed. } } } \details{ These data were originally given in Table IV of Bliss (1935) and are the combination of two series of toxicological experiments involving \emph{Tribolium confusum}, also known as the flour beetle. Groups of such adult beetles were exposed for 5 hours of gaseous carbon disulphide at different concentrations, and their mortality measured. } \source{ Bliss, C.I., 1935. The calculation of the dosage-mortality curve. \emph{Annals of Applied Biology}, \bold{22}, 134--167. } \seealso{ \code{\link{binomialff}}, \code{\link{probitlink}}. } %\references{ % % % % % %} \examples{ fit1 <- vglm(cbind(killed, exposed - killed) ~ logdose, binomialff(link = probitlink), flourbeetle, trace = TRUE) summary(fit1) } \keyword{datasets} VGAM/man/SurvS4.Rd0000644000176200001440000001224013565414527013227 0ustar liggesusers\name{SurvS4} \alias{SurvS4} \alias{is.SurvS4} %%%% 20120216 \alias{print.SurvS4} \alias{show.SurvS4} \alias{Math.SurvS4} \alias{Summary.SurvS4} \alias{[.SurvS4} \alias{format.SurvS4} \alias{as.data.frame.SurvS4} \alias{as.character.SurvS4} \alias{is.na.SurvS4} \alias{Ops.SurvS4} \title{ Create a Survival Object } \description{ Create a survival object, usually used as a response variable in a model formula. } \usage{ SurvS4(time, time2, event, type =, origin = 0) is.SurvS4(x) } \arguments{ \item{time}{ for right censored data, this is the follow up time. For interval data, the first argument is the starting time for the interval. } \item{x}{ any R object. } \item{event}{ The status indicator, normally 0=alive, 1=dead. Other choices are \code{TRUE}/\code{FALSE} (\code{TRUE} = death) or 1/2 (2=death). For interval censored data, the status indicator is 0=right censored, 1=event at \code{time}, 2=left censored, 3=interval censored. Although unusual, the event indicator can be omitted, in which case all subjects are assumed to have an event. } \item{time2}{ ending time of the interval for interval censored or counting process data only. Intervals are assumed to be open on the left and closed on the right, \code{(start, end]}. For counting process data, \code{event} indicates whether an event occurred at the end of the interval. } \item{type}{ character string specifying the type of censoring. Possible values are \code{"right"}, \code{"left"}, \code{"counting"}, \code{"interval"}, or \code{"interval2"}. The default is \code{"right"} or \code{"counting"} depending on whether the \code{time2} argument is absent or present, respectively. } \item{origin}{ for counting process data, the hazard function origin. This is most often used in conjunction with a model containing time dependent strata in order to align the subjects properly when they cross over from one strata to another. } } \value{ An object of class \code{SurvS4} (formerly \code{Surv}). There are methods for \code{print}, \code{is.na}, and subscripting survival objects. \code{SurvS4} objects are implemented as a matrix of 2 or 3 columns. In the case of \code{is.SurvS4}, a logical value \code{TRUE} if \code{x} inherits from class \code{"SurvS4"}, otherwise a \code{FALSE}. } \details{ Typical usages are \preformatted{ SurvS4(time, event) SurvS4(time, time2, event, type=, origin=0) } In theory it is possible to represent interval censored data without a third column containing the explicit status. Exact, right censored, left censored and interval censored observation would be represented as intervals of (a,a), (a, infinity), (-infinity,b), and (a,b) respectively; each specifying the interval within which the event is known to have occurred. If \code{type = "interval2"} then the representation given above is assumed, with NA taking the place of infinity. If `type="interval" \code{event} must be given. If \code{event} is \code{0}, \code{1}, or \code{2}, the relevant information is assumed to be contained in \code{time}, the value in \code{time2} is ignored, and the second column of the result will contain a placeholder. Presently, the only methods allowing interval censored data are the parametric models computed by \code{\link[survival]{survreg}}, so the distinction between open and closed intervals is unimportant. The distinction is important for counting process data and the Cox model. The function tries to distinguish between the use of 0/1 and 1/2 coding for left and right censored data using \code{if (max(status)==2)}. If 1/2 coding is used and all the subjects are censored, it will guess wrong. Use 0/1 coding in this case. } \author{ The code and documentation comes from \pkg{survival}. Slight modifications have been made for conversion to S4 by T. W. Yee. Also, for \code{"interval"} data, \code{as.character.SurvS4()} has been modified to print intervals of the form \code{(start, end]} and not \code{[start, end]} as previously. (This makes a difference for discrete data, such as for \code{\link{cens.poisson}}). All \pkg{VGAM} family functions beginning with \code{"cen"} require the packaging function \code{Surv} to format the input. } \note{ The purpose of having \code{SurvS4} in \pkg{VGAM} is so that the same input can be fed into \code{\link{vglm}} as functions in \pkg{survival} such as \code{\link[survival]{survreg}}. The class name has been changed from \code{"Surv"} to \code{"SurvS4"}; see \code{\link{SurvS4-class}}. The format \code{J+} is interpreted in \pkg{VGAM} as \eqn{\ge J}. If \code{type="interval"} then these should not be used in \pkg{VGAM}: \code{(L,U-]} or \code{(L,U+]}. % zz is this for type="count" only? } \seealso{ \code{\link{SurvS4-class}}, \code{\link{cens.poisson}}, \code{\link[survival]{survreg}}, \code{\link{leukemia}}. % \code{\link[survival]{coxph}}, % \code{\link[survival]{survfit}}, } \examples{ with(leukemia, SurvS4(time, status)) class(with(leukemia, SurvS4(time, status))) } \keyword{survival} % Converted by Sd2Rd version 0.3-2. % with(heart, SurvS4(start,stop,event)) VGAM/man/pospoisUC.Rd0000644000176200001440000000551513565414527014014 0ustar liggesusers\name{Pospois} \alias{Pospois} \alias{dpospois} \alias{ppospois} \alias{qpospois} \alias{rpospois} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive-Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for the positive-Poisson distribution. } \usage{ dpospois(x, lambda, log = FALSE) ppospois(q, lambda) qpospois(p, lambda) rpospois(n, lambda) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Fed into \code{\link[stats]{runif}}. } \item{lambda}{ vector of positive means (of an ordinary Poisson distribution). Short vectors are recycled. } \item{log}{ logical. } } \details{ The positive-Poisson distribution is a Poisson distribution but with the probability of a zero being zero. The other probabilities are scaled to add to unity. The mean therefore is \deqn{\lambda / (1-\exp(-\lambda)).}{% lambda / (1-exp(-lambda)).} As \eqn{\lambda}{lambda} increases, the positive-Poisson and Poisson distributions become more similar. Unlike similar functions for the Poisson distribution, a zero value of \code{lambda} returns a \code{NaN}. % Unlike similar functions for the Poisson distribution, a zero value % of \code{lambda} is not permitted here. } \value{ \code{dpospois} gives the density, \code{ppospois} gives the distribution function, \code{qpospois} gives the quantile function, and \code{rpospois} generates random deviates. } %\references{ %None. %} \author{ T. W. Yee } \note{ % 20120405; no longer true to a superior method: % For \code{rpospois}, the arguments of the function are fed % into \code{\link[stats:Poisson]{rpois}} until \eqn{n} positive % values are obtained. This may take a long time if \code{lambda} % has values close to 0. The family function \code{\link{pospoisson}} estimates \eqn{\lambda}{lambda} by maximum likelihood estimation. } \seealso{ \code{\link{Gaitpois.mlm}}, \code{\link{pospoisson}}, \code{\link{zapoisson}}, \code{\link{zipoisson}}, \code{\link[stats:Poisson]{rpois}}. } \examples{ lambda <- 2; y = rpospois(n = 1000, lambda) table(y) mean(y) # Sample mean lambda / (1 - exp(-lambda)) # Population mean (ii <- dpospois(0:7, lambda)) cumsum(ii) - ppospois(0:7, lambda) # Should be 0s table(rpospois(100, lambda)) table(qpospois(runif(1000), lambda)) round(dpospois(1:10, lambda) * 1000) # Should be similar \dontrun{ x <- 0:7 barplot(rbind(dpospois(x, lambda), dpois(x, lambda)), beside = TRUE, col = c("blue", "orange"), main = paste("Positive Poisson(", lambda, ") (blue) vs", " Poisson(", lambda, ") (orange)", sep = ""), names.arg = as.character(x), las = 1, lwd = 2) } } \keyword{distribution} VGAM/man/binom2.rhoUC.Rd0000644000176200001440000000704313565414527014273 0ustar liggesusers\name{Binom2.rho} \alias{Binom2.rho} \alias{dbinom2.rho} \alias{rbinom2.rho} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Probit Model } \description{ Density and random generation for a bivariate probit model. The correlation parameter rho is the measure of dependency. } \usage{ rbinom2.rho(n, mu1, mu2 = if (exchangeable) mu1 else stop("argument 'mu2' not specified"), rho = 0, exchangeable = FALSE, twoCols = TRUE, colnames = if (twoCols) c("y1","y2") else c("00", "01", "10", "11"), ErrorCheck = TRUE) dbinom2.rho(mu1, mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"), rho = 0, exchangeable = FALSE, colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{n}{ number of observations. Same as in \code{\link[stats]{runif}}. The arguments \code{mu1}, \code{mu2}, \code{rho} are recycled to this value. } \item{mu1, mu2}{ The marginal probabilities. Only \code{mu1} is needed if \code{exchangeable = TRUE}. Values should be between 0 and 1. } \item{rho}{ The correlation parameter. Must be numeric and lie between \eqn{-1} and \eqn{1}. The default value of zero means the responses are uncorrelated. } \item{exchangeable}{ Logical. If \code{TRUE}, the two marginal probabilities are constrained to be equal. } \item{twoCols}{ Logical. If \code{TRUE}, then a \eqn{n} \eqn{\times}{*} \eqn{2} matrix of 1s and 0s is returned. If \code{FALSE}, then a \eqn{n} \eqn{\times}{*} \eqn{4} matrix of 1s and 0s is returned. } \item{colnames}{ The \code{dimnames} argument of \code{\link[base]{matrix}} is assigned \code{list(NULL, colnames)}. } \item{ErrorCheck}{ Logical. Do some error checking of the input parameters? } } \details{ The function \code{rbinom2.rho} generates data coming from a bivariate probit model. The data might be fitted with the \pkg{VGAM} family function \code{\link{binom2.rho}}. The function \code{dbinom2.rho} does not really compute the density (because that does not make sense here) but rather returns the four joint probabilities. } \value{ The function \code{rbinom2.rho} returns either a 2 or 4 column matrix of 1s and 0s, depending on the argument \code{twoCols}. The function \code{dbinom2.rho} returns a 4 column matrix of joint probabilities; each row adds up to unity. } \author{ T. W. Yee } \seealso{ \code{\link{binom2.rho}}. } \examples{ (myrho <- rhobitlink(2, inverse = TRUE)) # Example 1 ymat <- rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE) (mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2"))) fit <- vglm(ymat ~ 1, binom2.rho(exch = TRUE)) coef(fit, matrix = TRUE) bdata <- data.frame(x2 = sort(runif(nn))) # Example 2 bdata <- transform(bdata, mu1 = probitlink(-2+4*x2, inverse = TRUE), mu2 = probitlink(-1+3*x2, inverse = TRUE)) dmat <- with(bdata, dbinom2.rho(mu1, mu2, myrho)) ymat <- with(bdata, rbinom2.rho(nn, mu1, mu2, myrho)) fit2 <- vglm(ymat ~ x2, binom2.rho, data = bdata) coef(fit2, matrix = TRUE) \dontrun{ matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4, type = "l", main = "Joint probabilities", ylim = 0:1, lwd = 2, ylab = "Probability") legend(x = 0.25, y = 0.9, lty = 1:4, col = 1:4, lwd = 2, legend = c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)", "3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)")) } } \keyword{distribution} VGAM/man/huberUC.Rd0000644000176200001440000000670213565414527013424 0ustar liggesusers\name{dhuber} \alias{dhuber} \alias{edhuber} \alias{rhuber} \alias{qhuber} \alias{phuber} \title{Huber's Least Favourable Distribution} \description{ Density, distribution function, quantile function and random generation for Huber's least favourable distribution, see Huber and Ronchetti (2009). } \usage{ dhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) edhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) rhuber(n, k = 0.862, mu = 0, sigma = 1) qhuber(p, k = 0.862, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) phuber(q, k = 0.862, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) } \arguments{ \item{x, q}{numeric vector, vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of random values to be generated. If \code{length(n) > 1} then the length is taken to be the number required. } \item{k}{numeric. Borderline value of central Gaussian part of the distribution. This is known as the tuning constant, and should be positive. For example, \code{k = 0.862} refers to a 20\% contamination neighborhood of the Gaussian distribution. If \code{k = 1.40} then this is 5\% contamination. } \item{mu}{numeric. distribution mean.} \item{sigma}{numeric. Distribution scale (\code{sigma = 1} defines the distribution in standard form, with standard Gaussian centre).} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the result is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ Details are given in \code{\link{huber2}}, the \pkg{VGAM} family function for estimating the parameters \code{mu} and \code{sigma}. } \value{ \code{dhuber} gives out a vector of density values. \code{edhuber} gives out a list with components \code{val} (density values) and \code{eps} (contamination proportion). \code{rhuber} gives out a vector of random numbers generated by Huber's least favourable distribution. \code{phuber} gives the distribution function, \code{qhuber} gives the quantile function. } %\references{ % Huber, P. J. and Ronchetti, E. (2009) % \emph{Robust Statistics}, 2nd ed. New York: Wiley. % % % Huber, P. J. and Ronchetti, E. (2009) Robust Statistics % (2nd ed.). Wiley, New York. % % %} \author{ Christian Hennig wrote \code{[d,ed,r]huber()} (from \pkg{smoothmest}) and slight modifications were made by T. W. Yee to replace looping by vectorization and addition of the \code{log} argument. Arash Ardalan wrote \code{[pq]huber()}, and two arguments for these were implemented by Kai Huang. This helpfile was adapted from \pkg{smoothmest}. } \seealso{ \code{\link{huber2}}. } \examples{ set.seed(123456) edhuber(1:5, k = 1.5) rhuber(5) \dontrun{ mu <- 3; xx <- seq(-2, 7, len = 100) # Plot CDF and PDF plot(xx, dhuber(xx, mu = mu), type = "l", col = "blue", las = 1, ylab = "", main = "blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1) abline(h = 0, col = "blue", lty = 2) lines(xx, phuber(xx, mu = mu), type = "l", col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qhuber(probs, mu = mu) lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h") lines(Q, phuber(Q, mu = mu), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) phuber(Q, mu = mu) - probs # Should be all 0s } } \keyword{distribution} VGAM/man/sc.studentt2.Rd0000644000176200001440000000715413565414527014431 0ustar liggesusers\name{sc.studentt2} \alias{sc.studentt2} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Scaled Student t Distribution with 2 df Family Function } \description{ Estimates the location and scale parameters of a scaled Student t distribution with 2 degrees of freedom, by maximum likelihood estimation. } \usage{ sc.studentt2(percentile = 50, llocation = "identitylink", lscale = "loglink", ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{percentile}{ A numerical vector containing values between 0 and 100, which are the quantiles and expectiles. They will be returned as `fitted values'. } \item{llocation, lscale}{ See \code{\link{Links}} for more choices, and \code{\link{CommonVGAMffArguments}}. } \item{ilocation, iscale, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for details. } } \details{ Koenker (1993) solved for the distribution whose quantiles are equal to its expectiles. Its canonical form has mean and mode at 0, and has a heavy tail (in fact, its variance is infinite). % This is called Koenker's distribution here. The standard (``canonical'') form of this distribution can be endowed with a location and scale parameter. The standard form has a density that can be written as \deqn{f(z) = 2 / (4 + z^2)^{3/2}}{% f(z) = 2 / (4 + z^2)^(3/2) } for real \eqn{y}. Then \eqn{z = (y-a)/b} for location and scale parameters \eqn{a} and \eqn{b > 0}. The mean of \eqn{Y} is \eqn{a}{a}. By default, \eqn{\eta_1=a)}{eta1=a} and \eqn{\eta_2=\log(b)}{eta2=log(b)}. The expectiles/quantiles corresponding to \code{percentile} are returned as the fitted values; in particular, \code{percentile = 50} corresponds to the mean (0.5 expectile) and median (0.5 quantile). Note that if \eqn{Y} has a standard \code{\link{dsc.t2}} then \eqn{Y = \sqrt{2} T_2}{Y = sqrt(2) * T_2} where \eqn{T_2} has a Student-t distribution with 2 degrees of freedom. The two parameters here can also be estimated using \code{\link{studentt2}} by specifying \code{df = 2} and making an adjustment for the scale parameter, however, this \pkg{VGAM} family function is more efficient since the EIM is known (Fisher scoring is implemented.) } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Koenker, R. (1993) When are expectiles percentiles? (solution) \emph{Econometric Theory}, \bold{9}, 526--527. } \author{ T. W. Yee } %\note{ % %} \seealso{ \code{\link{dsc.t2}}, \code{\link{studentt2}}. } \examples{ set.seed(123); nn <- 1000 kdata <- data.frame(x2 = sort(runif(nn))) kdata <- transform(kdata, mylocat = 1 + 3 * x2, myscale = 1) kdata <- transform(kdata, y = rsc.t2(nn, loc = mylocat, scale = myscale)) fit <- vglm(y ~ x2, sc.studentt2(perc = c(1, 50, 99)), data = kdata) fit2 <- vglm(y ~ x2, studentt2(df = 2), data = kdata) # 'same' as fit coef(fit, matrix = TRUE) head(fitted(fit)) head(predict(fit)) # Nice plot of the results \dontrun{ plot(y ~ x2, data = kdata, col = "blue", las = 1, sub = paste("n =", nn), main = "Fitted quantiles/expectiles using the sc.studentt2() distribution") matplot(with(kdata, x2), fitted(fit), add = TRUE, type = "l", lwd = 3) legend("bottomright", lty = 1:3, lwd = 3, legend = colnames(fitted(fit)), col = 1:3) } fit@extra$percentile # Sample quantiles } \keyword{models} \keyword{regression} VGAM/man/frechetUC.Rd0000644000176200001440000000445313565414527013740 0ustar liggesusers\name{Frechet} \alias{Frechet} \alias{dfrechet} \alias{pfrechet} \alias{qfrechet} \alias{rfrechet} \title{The Frechet Distribution} \description{ Density, distribution function, quantile function and random generation for the three parameter Frechet distribution. } \usage{ dfrechet(x, location = 0, scale = 1, shape, log = FALSE) pfrechet(q, location = 0, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qfrechet(p, location = 0, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rfrechet(n, location = 0, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Passed into \code{\link[stats:Uniform]{runif}}. } \item{location, scale, shape}{the location parameter \eqn{a}, scale parameter \eqn{b}, and shape parameter \eqn{s}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Uniform]{punif}} or \code{\link[stats:Uniform]{qunif}}. } } \value{ \code{dfrechet} gives the density, \code{pfrechet} gives the distribution function, \code{qfrechet} gives the quantile function, and \code{rfrechet} generates random deviates. } \references{ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005) \emph{Extreme Value and Related Models with Applications in Engineering and Science}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{frechet}}, the \pkg{VGAM} family function for estimating the 2 parameters (without location parameter) by maximum likelihood estimation, for the formula of the probability density function and range restrictions on the parameters. } %\note{ %} \seealso{ \code{\link{frechet}}. % \code{\link{frechet3}}. } \examples{ \dontrun{ shape <- 5 x <- seq(-0.1, 3.5, length = 401) plot(x, dfrechet(x, shape = shape), type = "l", ylab = "", las = 1, main = "Frechet density divided into 10 equal areas; orange = cdf") abline(h = 0, col = "blue", lty = 2) qq <- qfrechet(seq(0.1, 0.9, by = 0.1), shape = shape) lines(qq, dfrechet(qq, shape = shape), col = "purple", lty = 3, type = "h") lines(x, pfrechet(q = x, shape = shape), col = "orange") } } \keyword{distribution} VGAM/man/alaplace3.Rd0000644000176200001440000003363013565414527013714 0ustar liggesusers\name{alaplace} \alias{alaplace1} \alias{alaplace2} \alias{alaplace3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Asymmetric Laplace Distribution Family Functions } \description{ Maximum likelihood estimation of the 1, 2 and 3-parameter asymmetric Laplace distributions (ALDs). The 2-parameter ALD may, with trepidation and lots of skill, sometimes be used as an approximation of quantile regression. } \usage{ alaplace1(tau = NULL, llocation = "identitylink", ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1, ishrinkage = 0.95, parallel.locat = TRUE ~ 0, digt = 4, idf.mu = 3, zero = NULL, imethod = 1) alaplace2(tau = NULL, llocation = "identitylink", lscale = "loglink", ilocation = NULL, iscale = NULL, kappa = sqrt(tau/(1 - tau)), ishrinkage = 0.95, parallel.locat = TRUE ~ 0, parallel.scale = FALSE ~ 0, digt = 4, idf.mu = 3, imethod = 1, zero = "scale") alaplace3(llocation = "identitylink", lscale = "loglink", lkappa = "loglink", ilocation = NULL, iscale = NULL, ikappa = 1, imethod = 1, zero = c("scale", "kappa")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{tau, kappa}{ Numeric vectors with \eqn{0 < \tau < 1}{0 < tau < 1} and \eqn{\kappa >0}{kappa >0}. Most users will only specify \code{tau} since the estimated location parameter corresponds to the \eqn{\tau}{tau}th regression quantile, which is easier to understand. See below for details. } \item{llocation, lscale, lkappa}{ Character. Parameter link functions for location parameter \eqn{\xi}{xi}, scale parameter \eqn{\sigma}{sigma}, asymmetry parameter \eqn{\kappa}{kappa}. See \code{\link{Links}} for more choices. For example, the argument \code{llocation} can help handle count data by restricting the quantiles to be positive (use \code{llocation = "loglink"}). However, \code{llocation} is best left alone since the theory only works properly with the identity link. } \item{ilocation, iscale, ikappa}{ Optional initial values. If given, it must be numeric and values are recycled to the appropriate length. The default is to choose the value internally. } \item{parallel.locat, parallel.scale}{ See the \code{parallel} argument of \code{\link{CommonVGAMffArguments}}. These arguments apply to the location and scale parameters. It generally only makes sense for the scale parameters to be equal, hence set \code{parallel.scale = TRUE}. Note that assigning \code{parallel.locat} the value \code{TRUE} circumvents the seriously embarrassing quantile crossing problem because all constraint matrices except for the intercept correspond to a parallelism assumption. } % \item{intparloc}{ Logical. % Defunct. % } % \item{eq.scale}{ Logical. % Should the scale parameters be equal? It is advised % to keep \code{eq.scale = TRUE} unchanged because it % does not make sense to have different values for each % \code{tau} value. % } \item{imethod}{ Initialization method. Either the value 1, 2, 3 or 4. } \item{idf.mu}{ Degrees of freedom for the cubic smoothing spline fit applied to get an initial estimate of the location parameter. See \code{\link{vsmooth.spline}}. Used only when \code{imethod = 3}. } \item{ishrinkage}{ How much shrinkage is used when initializing \eqn{\xi}{xi}. The value must be between 0 and 1 inclusive, and a value of 0 means the individual response values are used, and a value of 1 means the median or mean is used. This argument is used only when \code{imethod = 4}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{Scale.arg}{ The value of the scale parameter \eqn{\sigma}{sigma}. This argument may be used to compute quantiles at different \eqn{\tau}{tau} values from an existing fitted \code{alaplace2()} model (practical only if it has a single value). If the model has \code{parallel.locat = TRUE} then only the intercept need be estimated; use an offset. See below for an example. % This is because the expected information matrix is diagonal, % i.e., the location and scale parameters are asymptotically independent. } \item{digt }{ Passed into \code{\link[base]{Round}} as the \code{digits} argument for the \code{tau} values; used cosmetically for labelling. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for more information. Where possible, the default is to model all the \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa} as an intercept-only term. } } \details{ These \pkg{VGAM} family functions implement one variant of asymmetric Laplace distributions (ALDs) suitable for quantile regression. Kotz et al. (2001) call it \emph{the} ALD. Its density function is \deqn{f(y;\xi,\sigma,\kappa) = \frac{\sqrt{2}}{\sigma} \, \frac{\kappa}{1 + \kappa^2} \, \exp \left( - \frac{\sqrt{2}}{\sigma \, \kappa} |y - \xi | \right) }{% f(y;xi,sigma,kappa) = (sqrt(2)/sigma) * (kappa/(1+ \kappa^2)) * exp( -(sqrt(2) / (sigma * kappa)) * |y-xi| ) } for \eqn{y \leq \xi}{y <= xi}, and \deqn{f(y;\xi,\sigma,\kappa) = \frac{\sqrt{2}}{\sigma} \, \frac{\kappa}{1 + \kappa^2} \, \exp \left( - \frac{\sqrt{2} \, \kappa}{\sigma} |y - \xi | \right) }{% f(y;xi,sigma,kappa) = (sqrt(2)/sigma) * (kappa/(1+ \kappa^2)) * exp( - (sqrt(2) * kappa / sigma) * |y-xi| ) } for \eqn{y > \xi}{y > xi}. Here, the ranges are for all real \eqn{y} and \eqn{\xi}{xi}, positive \eqn{\sigma}{sigma} and positive \eqn{\kappa}{kappa}. The special case \eqn{\kappa = 1}{kappa = 1} corresponds to the (symmetric) Laplace distribution of Kotz et al. (2001). The mean is \eqn{\xi + \sigma (1/\kappa - \kappa) / \sqrt{2}}{xi + sigma * (1/kappa - kappa) / sqrt(2)} and the variance is \eqn{\sigma^2 (1 + \kappa^4) / (2 \kappa^2)}{sigma^2 * (1 + kappa^4) / (2 * kappa^2)}. The enumeration of the linear/additive predictors used for \code{alaplace2()} is the first location parameter followed by the first scale parameter, then the second location parameter followed by the second scale parameter, etc. For \code{alaplace3()}, only a vector response is handled and the last (third) linear/additive predictor is for the asymmetry parameter. It is known that the maximum likelihood estimate of the location parameter \eqn{\xi}{xi} corresponds to the regression quantile estimate of the classical quantile regression approach of Koenker and Bassett (1978). An important property of the ALD is that \eqn{P(Y \leq \xi) = \tau}{P(Y <= xi) = tau} where \eqn{\tau = \kappa^2 / (1 + \kappa^2)}{tau = kappa^2 / (1 + kappa^2)} so that \eqn{\kappa = \sqrt{\tau / (1-\tau)}}{kappa = sqrt(tau / (1-tau))}. Thus \code{alaplace2()} might be used as an alternative to \code{rq} in the \pkg{quantreg} package, although scoring is really an unsuitable algorithm for estimation here. Both \code{alaplace1()} and \code{alaplace2()} can handle multiple responses, and the number of linear/additive predictors is dictated by the length of \code{tau} or \code{kappa}. The functions \code{alaplace1()} and \code{alaplace2()} can also handle multiple responses (i.e., a matrix response) but only with a \emph{single-valued} \code{tau} or \code{kappa}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. In the \code{extra} slot of the fitted object are some list components which are useful, e.g., the sample proportion of values which are less than the fitted quantile curves. } \references{ Koenker, R. and Bassett, G. (1978) Regression quantiles. \emph{Econometrica}, \bold{46}, 33--50. Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001) \emph{The Laplace distribution and generalizations: a revisit with applications to communications, economics, engineering, and finance}, Boston: Birkhauser. % Yee, T. W. (2014) % Quantile regression for counts and proportions. % In preparation. } \author{ Thomas W. Yee } \section{Warning}{ These functions are \emph{experimental} and especially subject to change or withdrawal. The usual MLE regularity conditions do \emph{not} hold for this distribution so that misleading inferences may result, e.g., in the \code{summary} and \code{vcov} of the object. Care is needed with \code{tau} values which are too small, e.g., for count data with \code{llocation = "loglink"} and if the sample proportion of zeros is greater than \code{tau}. } \note{ % Commented out 20090326 % The function \code{alaplace2()} is recommended over \code{alaplace1()} % for quantile regression because the solution is % invariant to location and scale, % i.e., linear transformation of the response produces the % same linear transformation of the fitted quantiles. These \pkg{VGAM} family functions use Fisher scoring. Convergence may be slow and half-stepping is usual (although one can use \code{trace = TRUE} to see which is the best model and then use \code{maxit} to choose that model) due to the regularity conditions not holding. Often the iterations slowly crawl towards the solution so monitoring the convergence (set \code{trace = TRUE}) is highly recommended. For large data sets it is a very good idea to keep the length of \code{tau}/\code{kappa} low to avoid large memory requirements. Then for \code{parallel.locat = FALSE} one can repeatedly fit a model with \code{alaplace1()} with one \eqn{\tau}{tau} at a time; and for \code{parallel.locat = TRUE} one can refit a model with \code{alaplace1()} with one \eqn{\tau}{tau} at a time but using offsets and an intercept-only model. A second method for solving the noncrossing quantile problem is illustrated below in Example 3. This is called the \emph{accumulative quantile method} (AQM) and details are in Yee (2015). It does not make the strong parallelism assumption. The functions \code{alaplace2()} and \code{\link{laplace}} differ slightly in terms of the parameterizations. } \seealso{ \code{\link{ralap}}, \code{\link{laplace}}, \code{\link{CommonVGAMffArguments}}, \code{\link{lms.bcn}}, \code{\link{amlnormal}}, \code{\link{sc.studentt2}}, \code{\link{simulate.vlm}}. } % set.seed(1) \examples{ \dontrun{ # Example 1: quantile regression with smoothing splines set.seed(123); adata <- data.frame(x2 = sort(runif(n <- 500))) mymu <- function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2) adata <- transform(adata, y = rpois(n, lambda = mymu(x2))) mytau <- c(0.25, 0.75); mydof <- 4 fit <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900, alaplace2(tau = mytau, llocat = "loglink", parallel.locat = FALSE)) fitp <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900, alaplace2(tau = mytau, llocat = "loglink", parallel.locat = TRUE)) par(las = 1); mylwd <- 1.5 with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange", main = "Example 1; green: parallel.locat = TRUE", ylab = "y", pch = "o", cex = 0.75)) with(adata, matlines(x2, fitted(fit ), col = "blue", lty = "solid", lwd = mylwd)) with(adata, matlines(x2, fitted(fitp), col = "green", lty = "solid", lwd = mylwd)) finexgrid <- seq(0, 1, len = 1001) for (ii in 1:length(mytau)) lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)), col = "blue", lwd = mylwd) fit@extra # Contains useful information # Example 2: regression quantile at a new tau value from an existing fit # Nb. regression splines are used here since it is easier. fitp2 <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE, alaplace1(tau = mytau, llocation = "loglink", parallel.locat = TRUE)) newtau <- 0.5 # Want to refit the model with this tau value fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[, 1]), alaplace1(tau = newtau, llocation = "loglink"), data = adata) with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange", pch = "o", cex = 0.75, ylab = "y", main = "Example 2; parallel.locat = TRUE")) with(adata, matlines(x2, fitted(fitp2), col = "blue", lty = 1, lwd = mylwd)) with(adata, matlines(x2, fitted(fitp3), col = "black", lty = 1, lwd = mylwd)) # Example 3: noncrossing regression quantiles using a trick: obtain # successive solutions which are added to previous solutions; use a log # link to ensure an increasing quantiles at any value of x. mytau <- seq(0.2, 0.9, by = 0.1) answer <- matrix(0, nrow(adata), length(mytau)) # Stores the quantiles adata <- transform(adata, offsety = y*0) usetau <- mytau for (ii in 1:length(mytau)) { # cat("\n\nii = ", ii, "\n") adata <- transform(adata, usey = y-offsety) iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen! mydf <- ifelse(ii == 1, 5, 3) # Maybe less smoothing will help # lloc <- ifelse(ii == 1, "loglink", "loglink") # 2nd value must be "loglink" fit3 <- vglm(usey ~ sm.ns(x2, df = mydf), data = adata, trace = TRUE, alaplace2(tau = usetau[ii], lloc = "loglink", iloc = iloc)) answer[, ii] <- (if(ii == 1) 0 else answer[, ii-1]) + fitted(fit3) adata <- transform(adata, offsety = answer[, ii]) } # Plot the results. with(adata, plot(x2, y, col = "blue", main = paste("Noncrossing and nonparallel; tau = ", paste(mytau, collapse = ", ")))) with(adata, matlines(x2, answer, col = "orange", lty = 1)) # Zoom in near the origin. with(adata, plot(x2, y, col = "blue", xlim = c(0, 0.2), ylim = 0:1, main = paste("Noncrossing and nonparallel; tau = ", paste(mytau, collapse = ", ")))) with(adata, matlines(x2, answer, col = "orange", lty = 1)) } } \keyword{models} \keyword{regression} VGAM/man/confintvglm.Rd0000644000176200001440000001245413565414527014416 0ustar liggesusers\name{confintvglm} %\name{confint} % \alias{confint} \alias{confintvglm} \alias{confintrrvglm} \alias{confintvgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Confidence Intervals for Parameters of VGLMs } \description{ Computes confidence intervals (CIs) for one or more parameters in a fitted model. Currently the object must be a \code{"\link{vglm}"} object. } % confint(object, parm, level = 0.95, \dots) \usage{ confintvglm(object, parm, level = 0.95, method = c("wald", "profile"), trace = NULL, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A fitted model object. } \item{parm, level, \dots}{Same as \code{\link[stats]{confint}}. } \item{method}{Character. The default is the first method. Abbreviations are allowed. Currently \code{"profile"} is basically working; and it is likely to be more accurate especially for small samples, as it is based on a profile log likelihood, however it is computationally intensive. } \item{trace}{ Logical. If \code{TRUE} then one can monitor the computation as it progresses (because it is expensive). The default is the orginal model's \code{trace} value (see \code{\link{vglm.control}}). Setting \code{FALSE} suppresses all intermediate output. } } \details{ The default for this methods function is based on \code{\link[stats]{confint.default}} and assumes asymptotic normality. In particular, the \code{\link[VGAM:coefvlm]{coef}} and \code{vcov} methods functions are used for \code{\link[VGAM]{vglm-class}} objects. When \code{method = "profile"} the function \code{\link{profilevglm}} is called to do the profiling. The code is very heavily based on \code{\link[MASS]{profile.glm}} which was originally written by D. M. Bates and W. N. Venables (For S in 1996) and subsequently corrected by B. D. Ripley. Sometimes the profiling method can give problems, for example, \code{\link{cumulative}} requires the \eqn{M} linear predictors not to intersect in the data cloud. Such numerical problems are less common when \code{method = "wald"}, however, it is well-known that inference based on profile likelihoods is generally more accurate than Wald, especially when the sample size is small. The deviance (\code{deviance(object)}) is used if possible, else the difference \code{2 * (logLik(object) - ell)} is computed, where \code{ell} are the values of the loglikelihood on a grid. For Wald CIs and \code{\link[VGAM]{rrvglm-class}} objects, currently an error message is produced because I haven't gotten around to write the methods function; it's not too hard, but am too busy! An interim measure is to coerce the object into a \code{"\link{vglm}"} object, but then the confidence intervals will tend to be too narrow because the estimated constraint matrices are treated as known. For Wald CIs and \code{\link[VGAM]{vgam-class}} objects, currently an error message is produced because the theory is undeveloped. } \value{ Same as \code{\link[stats]{confint}}. } %\references{ %} \author{ Thomas Yee adapted \code{\link[stats]{confint.lm}} to handle \code{"vglm"} objects, for Wald-type confidence intervals. Also, \code{\link[MASS]{profile.glm}} was originally written by D. M. Bates and W. N. Venables (For S in 1996) and subsequently corrected by B. D. Ripley. This function effectively calls \code{confint.profile.glm()} in \pkg{MASS}. } \note{ The order of the values of argument \code{method} may change in the future without notice. The functions \code{plot.profile.glm} and \code{pairs.profile.glm} from \pkg{MASS} appear to work with output from this function. } %\section{Warning }{ %} \seealso{ \code{\link{vcovvlm}}, \code{\link{summaryvglm}}, \code{\link[stats]{confint}}, \code{\link[MASS]{profile.glm}}, \code{\link{lrt.stat.vlm}}, \code{\link{wald.stat}}, \code{plot.profile.glm}, \code{pairs.profile.glm}. % \code{\link{lrp.vglm}}, } \examples{ # Example 1: this is based on a glm example counts <- c(18,17,15,20,10,20,25,13,12) outcome <- gl(3, 1, 9); treatment <- gl(3, 3) glm.D93 <- glm(counts ~ outcome + treatment, family = poisson()) vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff) confint(glm.D93) # needs MASS to be present on the system confint.default(glm.D93) # based on asymptotic normality confint(vglm.D93) confint(vglm.D93) - confint(glm.D93) # Should be all 0s confint(vglm.D93) - confint.default(glm.D93) # based on asympt. normality # Example 2: simulated negative binomial data with multiple responses ndata <- data.frame(x2 = runif(nn <- 100)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)), y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0))) fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE) coef(fit1) coef(fit1, matrix = TRUE) confint(fit1) confint(fit1, "x2:1") # This might be improved to "x2" some day... \dontrun{ confint(fit1, method = "profile") # Computationally expensive confint(fit1, "x2:1", method = "profile", trace = FALSE) } fit2 <- rrvglm(y1 ~ x2, negbinomial(zero = NULL), data = ndata) confint(as(fit2, "vglm")) # Too narrow (SEs are biased downwards) } \keyword{models} \keyword{regression} VGAM/man/powerlink.Rd0000644000176200001440000000442013565414527014074 0ustar liggesusers\name{powerlink} \alias{powerlink} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Power Link Function } \description{ Computes the power transformation, including its inverse and the first two derivatives. } \usage{ powerlink(theta, power = 1, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{power}{ This denotes the power or exponent. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The power link function raises a parameter by a certain value of \code{power}. Care is needed because it is very easy to get numerical problems, e.g., if \code{power=0.5} and \code{theta} is negative. } \value{ For \code{powerlink} with \code{deriv = 0}, then \code{theta} raised to the power of \code{power}. And if \code{inverse = TRUE} then \code{theta} raised to the power of \code{1/power}. For \code{deriv = 1}, then the function returns \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } %\references{ % McCullagh, P. and Nelder, J. A. (1989) % \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. % %} \author{ Thomas W. Yee } \note{ Numerical problems may occur for certain combinations of \code{theta} and \code{power}. Consequently this link function should be used with caution. } \seealso{ \code{\link{Links}}, \code{\link{loglink}}. } \examples{ powerlink("a", power = 2, short = FALSE, tag = TRUE) powerlink(x <- 1:5) powerlink(x, power = 2) max(abs(powerlink(powerlink(x, power = 2), power = 2, inverse = TRUE) - x)) # Should be 0 powerlink(x <- (-5):5, power = 0.5) # Has NAs # 1/2 = 0.5 pdata <- data.frame(y = rbeta(n = 1000, shape1 = 2^2, shape2 = 3^2)) fit <- vglm(y ~ 1, betaR(lshape1 = powerlink(power = 0.5), i1 = 3, lshape2 = powerlink(power = 0.5), i2 = 7), data = pdata) t(coef(fit, matrix = TRUE)) Coef(fit) # Useful for intercept-only models vcov(fit, untransform = TRUE) } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/slash.Rd0000644000176200001440000000665413565414527013207 0ustar liggesusers\name{slash} \alias{slash} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Slash Distribution Family Function } \description{ Estimates the two parameters of the slash distribution by maximum likelihood estimation. } \usage{ slash(lmu = "identitylink", lsigma = "loglink", imu = NULL, isigma = NULL, gprobs.y = ppoints(8), nsimEIM = 250, zero = NULL, smallno = .Machine$double.eps*1000) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu, lsigma}{ Parameter link functions applied to the \eqn{\mu}{mu} and \eqn{\sigma}{sigma} parameters, respectively. See \code{\link{Links}} for more choices. } % \item{emu, esigma}{ % List. Extra argument for each of the link functions. % See \code{earg} in \code{\link{Links}} for general information. %emu = list(), esigma = list(), % } \item{imu, isigma}{ Initial values. A \code{NULL} means an initial value is chosen internally. See \code{\link{CommonVGAMffArguments}} for more information. } \item{gprobs.y}{ Used to compute the initial values for \code{mu}. This argument is fed into the \code{probs} argument of \code{\link[stats]{quantile}} to construct a grid, which is used to evaluate the log-likelihood. This must have values between 0 and 1. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{smallno}{ Small positive number, used to test for the singularity. } } \details{ The standard slash distribution is the distribution of the ratio of a standard normal variable to an independent standard uniform(0,1) variable. It is mainly of use in simulation studies. One of its properties is that it has heavy tails, similar to those of the Cauchy. The general slash distribution can be obtained by replacing the univariate normal variable by a general normal \eqn{N(\mu,\sigma)}{N(mu,sigma)} random variable. It has a density that can be written as \deqn{f(y) = \left\{ \begin{array}{cl} 1/(2 \sigma \sqrt(2 \pi)) & if y=\mu, \\ 1-\exp(-(((y-\mu)/\sigma)^2)/2))/(\sqrt(2 pi) \sigma ((y-\mu)/\sigma)^2) & if y \ne \mu. \end{array} \right . }{% f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu = 1-exp(-(((x-mu)/sigma)^2)/2))/(sqrt(2*pi)*sigma*((x-mu)/sigma)^2) if y!=mu} where \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are the mean and standard deviation of the univariate normal distribution respectively. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 1, New York: Wiley. Kafadar, K. (1982) A Biweight Approach to the One-Sample Problem \emph{Journal of the American Statistical Association}, \bold{77}, 416--424. % multivariate skew-slash distribution. % jspi, 2006, 136: 209--220., by Wang, J. and Genton, M. G. } \author{ T. W. Yee and C. S. Chee } \note{ Fisher scoring using simulation is used. Convergence is often quite slow. Numerical problems may occur. } \seealso{ \code{\link{rslash}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ sdata <- data.frame(y = rslash(n = 1000, mu = 4, sigma = exp(2))) fit <- vglm(y ~ 1, slash, data = sdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/sc.t2UC.Rd0000644000176200001440000000570313565414527013250 0ustar liggesusers\name{Expectiles-sc.t2} \alias{Expectiles-sc.t2} \alias{dsc.t2} \alias{psc.t2} \alias{qsc.t2} \alias{rsc.t2} \title{ Expectiles/Quantiles of the Scaled Student t Distribution with 2 Df} \description{ Density function, distribution function, and quantile/expectile function and random generation for the scaled Student t distribution with 2 degrees of freedom. } \usage{ dsc.t2(x, location = 0, scale = 1, log = FALSE) psc.t2(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) qsc.t2(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) rsc.t2(n, location = 0, scale = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{ Vector of expectiles/quantiles. See the terminology note below. } \item{p}{ Vector of probabilities. % (tau or \eqn{\tau}). These should lie in \eqn{(0,1)}. } \item{n, log}{See \code{\link[stats:Uniform]{runif}}.} \item{location, scale}{ Location and scale parameters. The latter should have positive values. Values of these vectors are recyled. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:TDist]{pt}} or \code{\link[stats:TDist]{qt}}. } } \details{ A Student-t distribution with 2 degrees of freedom and a scale parameter of \code{sqrt(2)} is equivalent to the standard form of this distribution (called Koenker's distribution below). Further details about this distribution are given in \code{\link{sc.studentt2}}. } \value{ \code{dsc.t2(x)} gives the density function. \code{psc.t2(q)} gives the distribution function. \code{qsc.t2(p)} gives the expectile and quantile function. \code{rsc.t2(n)} gives \eqn{n} random variates. } \author{ T. W. Yee and Kai Huang } %\note{ %} \seealso{ \code{\link[stats:TDist]{dt}}, \code{\link{sc.studentt2}}. } \examples{ my.p <- 0.25; y <- rsc.t2(nn <- 5000) (myexp <- qsc.t2(my.p)) sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my.p # Equivalently: I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp]) I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp]) I1 / (I1 + I2) # Should be my.p # Or: I1 <- sum( myexp - y[y <= myexp]) I2 <- sum(-myexp + y[y > myexp]) # Non-standard Koenker distribution myloc <- 1; myscale <- 2 yy <- rsc.t2(nn, myloc, myscale) (myexp <- qsc.t2(my.p, myloc, myscale)) sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my.p psc.t2(mean(yy), myloc, myscale) # Should be 0.5 abs(qsc.t2(0.5, myloc, myscale) - mean(yy)) # Should be 0 abs(psc.t2(myexp, myloc, myscale) - my.p) # Should be 0 integrate(f = dsc.t2, lower = -Inf, upper = Inf, locat = myloc, scale = myscale) # Should be 1 y <- seq(-7, 7, len = 201) max(abs(dsc.t2(y) - dt(y / sqrt(2), df = 2) / sqrt(2))) # Should be 0 \dontrun{ plot(y, dsc.t2(y), type = "l", col = "blue", las = 1, ylim = c(0, 0.4), main = "Blue = Koenker; orange = N(0, 1)") lines(y, dnorm(y), type = "l", col = "orange") abline(h = 0, v = 0, lty = 2) } } \keyword{distribution} VGAM/man/lms.yjn.Rd0000644000176200001440000001342413565414527013460 0ustar liggesusers\name{lms.yjn} \alias{lms.yjn} \alias{lms.yjn2} %- Also NEED an '\alias' for EACH other topic documented here. \title{ LMS Quantile Regression with a Yeo-Johnson Transformation to Normality } \description{ LMS quantile regression with the Yeo-Johnson transformation to normality. This family function is experimental and the LMS-BCN family function is recommended instead. } \usage{ lms.yjn(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"), llambda = "identitylink", lsigma = "loglink", idf.mu = 4, idf.sigma = 2, ilambda = 1, isigma = NULL, rule = c(10, 5), yoffset = NULL, diagW = FALSE, iters.diagW = 6) lms.yjn2(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"), llambda = "identitylink", lmu = "identitylink", lsigma = "loglink", idf.mu = 4, idf.sigma = 2, ilambda = 1.0, isigma = NULL, yoffset = NULL, nsimEIM = 250) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{percentiles}{ A numerical vector containing values between 0 and 100, which are the quantiles. They will be returned as `fitted values'. } \item{zero}{ See \code{\link{lms.bcn}}. } \item{llambda, lmu, lsigma}{ See \code{\link{lms.bcn}}. } \item{idf.mu, idf.sigma}{ See \code{\link{lms.bcn}}. } \item{ilambda, isigma}{ See \code{\link{lms.bcn}}. } \item{rule}{ Number of abscissae used in the Gaussian integration scheme to work out elements of the weight matrices. The values given are the possible choices, with the first value being the default. The larger the value, the more accurate the approximation is likely to be but involving more computational expense. } \item{yoffset}{ A value to be added to the response y, for the purpose of centering the response before fitting the model to the data. The default value, \code{NULL}, means \code{-median(y)} is used, so that the response actually used has median zero. The \code{yoffset} is saved on the object and used during prediction. } \item{diagW}{ Logical. This argument is offered because the expected information matrix may not be positive-definite. Using the diagonal elements of this matrix results in a higher chance of it being positive-definite, however convergence will be very slow. If \code{TRUE}, then the first \code{iters.diagW} iterations will use the diagonal of the expected information matrix. The default is \code{FALSE}, meaning faster convergence. } \item{iters.diagW}{ Integer. Number of iterations in which the diagonal elements of the expected information matrix are used. Only used if \code{diagW = TRUE}. } \item{nsimEIM}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ Given a value of the covariate, this function applies a Yeo-Johnson transformation to the response to best obtain normality. The parameters chosen to do this are estimated by maximum likelihood or penalized maximum likelihood. The function \code{lms.yjn2()} estimates the expected information matrices using simulation (and is consequently slower) while \code{lms.yjn()} uses numerical integration. Try the other if one function fails. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Yeo, I.-K. and Johnson, R. A. (2000) A new family of power transformations to improve normality or symmetry. \emph{Biometrika}, \bold{87}, 954--959. Yee, T. W. (2004) Quantile regression via vector generalized additive models. \emph{Statistics in Medicine}, \bold{23}, 2295--2315. Yee, T. W. (2002) An Implementation for Regression Quantile Estimation. Pages 3--14. In: Haerdle, W. and Ronz, B., \emph{Proceedings in Computational Statistics COMPSTAT 2002}. Heidelberg: Physica-Verlag. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response may contain both positive and negative values. In contrast, the LMS-Box-Cox-normal and LMS-Box-Cox-gamma methods only handle a positive response because the Box-Cox transformation cannot handle negative values. Some other notes can be found at \code{\link{lms.bcn}}. } \section{Warning }{ The computations are not simple, therefore convergence may fail. In that case, try different starting values. The generic function \code{predict}, when applied to a \code{lms.yjn} fit, does not add back the \code{yoffset} value. As described above, this family function is experimental and the LMS-BCN family function is recommended instead. } \seealso{ \code{\link{lms.bcn}}, \code{\link{lms.bcg}}, \code{\link{qtplot.lmscreg}}, \code{\link{deplot.lmscreg}}, \code{\link{cdf.lmscreg}}, \code{\link{bmi.nz}}, \code{\link{amlnormal}}. } \examples{ fit <- vgam(BMI ~ s(age, df = 4), lms.yjn, bmi.nz, trace = TRUE) head(predict(fit)) head(fitted(fit)) head(bmi.nz) # Person 1 is near the lower quartile of BMI amongst people his age head(cdf(fit)) \dontrun{ # Quantile plot par(bty = "l", mar = c(5, 4, 4, 3) + 0.1, xpd = TRUE) qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles", xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4) # Density plot ygrid <- seq(15, 43, len = 100) # BMI ranges par(mfrow = c(1, 1), lwd = 2) (aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black", main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)")) aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red") aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE) with(aa@post, deplot) # Contains density function values; == a@post$deplot } } \keyword{models} \keyword{regression} VGAM/man/plotvgam.Rd0000644000176200001440000001343013565414527013714 0ustar liggesusers\name{plotvgam} \alias{plotvgam} \alias{plot.vgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Default VGAM Plotting } \description{ Component functions of a \code{\link{vgam-class}} object can be plotted with \code{plotvgam()}. These are on the scale of the linear/additive predictor. } \usage{ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE, offset.arg = 0, deriv.arg = 0, overlay = FALSE, type.residuals = c("deviance", "working", "pearson", "response"), plot.arg = TRUE, which.term = NULL, which.cf = NULL, control = plotvgam.control(...), varxij = 1, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A fitted \pkg{VGAM} object, e.g., produced by \code{\link{vgam}}, \code{\link{vglm}}, or \code{\link{rrvglm}}. } \item{newdata}{ Data frame. May be used to reconstruct the original data set. } \item{y}{ Unused. } \item{residuals}{ Logical. If \code{TRUE} then residuals are plotted. See \code{type.residuals} } \item{rugplot}{ Logical. If \code{TRUE} then a rug plot is plotted at the foot of each plot. These values are jittered to expose ties. } \item{se}{ Logical. If \code{TRUE} then approximate \eqn{\pm 2}{+-2} pointwise standard error bands are included in the plot. } \item{scale}{ Numerical. By default, each plot will have its own y-axis scale. However, by specifying a value, each plot's y-axis scale will be at least \code{scale} wide. } \item{raw}{ Logical. If \code{TRUE} then the smooth functions are those obtained directly by the algorithm, and are plotted without having to premultiply with the constraint matrices. If \code{FALSE} then the smooth functions have been premultiply by the constraint matrices. The \code{raw} argument is directly fed into \code{predict.vgam()}. } \item{offset.arg}{ Numerical vector of length \eqn{r}. These are added to the component functions. Useful for separating out the functions when \code{overlay} is \code{TRUE}. If \code{overlay} is \code{TRUE} and there is one covariate then using the intercept values as the offsets can be a good idea. } \item{deriv.arg}{ Numerical. The order of the derivative. Should be assigned an small integer such as 0, 1, 2. Only applying to \code{s()} terms, it plots the derivative. } \item{overlay}{ Logical. If \code{TRUE} then component functions of the same covariate are overlaid on each other. The functions are centered, so \code{offset.arg} can be useful when \code{overlay} is \code{TRUE}. } \item{type.residuals}{ if \code{residuals} is \code{TRUE} then the first possible value of this vector, is used to specify the type of residual. } \item{plot.arg}{ Logical. If \code{FALSE} then no plot is produced. } \item{which.term}{ Character or integer vector containing all terms to be plotted, e.g., \code{which.term = c("s(age)", "s(height"))} or \code{which.term = c(2, 5, 9)}. By default, all are plotted. } \item{which.cf}{ An integer-valued vector specifying which linear/additive predictors are to be plotted. The values must be from the set \{1,2,\ldots,\eqn{r}\}. By default, all are plotted. } \item{control}{ Other control parameters. See \code{\link{plotvgam.control}}. } \item{\dots}{ Other arguments that can be fed into \code{\link{plotvgam.control}}. This includes line colors, line widths, line types, etc. } \item{varxij}{ Positive integer. Used if \code{xij} of \code{\link{vglm.control}} was used, this chooses which inner argument the component is plotted against. This argument is related to \code{raw = TRUE} and terms such as \code{NS(dum1, dum2)} and constraint matrices that have more than one column. The default would plot the smooth against \code{dum1} but setting \code{varxij = 2} could mean plotting the smooth against \code{dum2}. See the \pkg{VGAM} website for further information. } } \details{ In this help file \eqn{M} is the number of linear/additive predictors, and \eqn{r} is the number of columns of the constraint matrix of interest. Many of \code{plotvgam()}'s options can be found in \code{\link{plotvgam.control}}, e.g., line types, line widths, colors. } \value{ The original object, but with the \code{preplot} slot of the object assigned information regarding the plot. } %\references{ % % %Yee, T. W. and Wild, C. J. (1996) %Vector generalized additive models. %\emph{Journal of the Royal Statistical Society, Series B, Methodological}, %\bold{58}, 481--493. % % %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. % % %} \author{ Thomas W. Yee } \note{ While \code{plot(fit)} will work if \code{class(fit)} is \code{"vgam"}, it is necessary to use \code{plotvgam(fit)} explicitly otherwise. \code{plotvgam()} is quite buggy at the moment. % \code{plotvgam()} works in a similar % manner to S-PLUS's \code{plot.gam()}, however, there is no % options for interactive construction of the plots yet. } \seealso{ \code{\link{vgam}}, \code{\link{plotvgam.control}}, \code{predict.vgam}, \code{\link{plotvglm}}, \code{\link{vglm}}. } \examples{ coalminers <- transform(coalminers, Age = (age - 42) / 5) fit <- vgam(cbind(nBnW, nBW, BnW, BW) ~ s(Age), binom2.or(zero = NULL), data = coalminers) \dontrun{ par(mfrow = c(1,3)) plot(fit, se = TRUE, ylim = c(-3, 2), las = 1) plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange", ylim = c(-3, 2)) plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange", overlay = TRUE) } } \keyword{models} \keyword{regression} \keyword{smooth} \keyword{graphs} VGAM/man/formulavlm.Rd0000644000176200001440000000364113565414527014252 0ustar liggesusers\name{formulavlm} %\name{confint} \alias{formula.vlm} \alias{formulavlm} \alias{term.names} \alias{term.namesvlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Model Formulae and Term Names for VGLMs } \description{ The methods function for \code{formula} to extract the formula from a fitted object, as well as a methods function to return the names of the terms in the formula. } \usage{ \method{formula}{vlm}(x, \dots) formulavlm(x, form.number = 1, \dots) term.names(model, \dots) term.namesvlm(model, form.number = 1, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, model}{ A fitted model object. } \item{form.number}{Formula number, is 1 or 2. which correspond to the arguments \code{formula} and \code{form2} respectively. } \item{\dots}{Same as \code{\link[stats]{formula}}. } } \details{ The \code{formula} methods function is based on \code{\link[stats]{formula}}. } \value{ The \code{formula} methods function should return something similar to \code{\link[stats]{formula}}. The \code{term.names} methods function should return a character string with the terms in the formula; this includes any intercept (which is denoted by \code{"(Intercept)"} as the first element.) } %\references{ %} \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ %} \seealso{ \code{\link{has.interceptvlm}}. % \code{termsvlm}. } \examples{ # Example: this is based on a glm example counts <- c(18,17,15,20,10,20,25,13,12) outcome <- gl(3, 1, 9); treatment <- gl(3, 3) vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff) formula(vglm.D93) pdata <- data.frame(counts, outcome, treatment) # Better style vglm.D93 <- vglm(counts ~ outcome + treatment, poissonff, data = pdata) formula(vglm.D93) term.names(vglm.D93) responseName(vglm.D93) has.intercept(vglm.D93) } \keyword{models} \keyword{regression} VGAM/man/weightsvglm.Rd0000644000176200001440000001042713565414527014426 0ustar liggesusers\name{weightsvglm} \alias{weightsvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Prior and Working Weights of a VGLM fit } \description{ Returns either the prior weights or working weights of a VGLM object. } \usage{ weightsvglm(object, type = c("prior", "working"), matrix.arg = TRUE, ignore.slot = FALSE, deriv.arg = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ a model object from the \pkg{VGAM} \R package that inherits from a \emph{vector generalized linear model} (VGLM), e.g., a model of class \code{"vglm"}. } \item{type}{ Character, which type of weight is to be returned? The default is the first one. } \item{matrix.arg}{ Logical, whether the answer is returned as a matrix. If not, it will be a vector. } \item{ignore.slot}{ Logical. If \code{TRUE} then \code{object@weights} is ignored even if it has been assigned, and the long calculation for \code{object@weights} is repeated. This may give a slightly different answer because of the final IRLS step at convergence may or may not assign the latest value of quantities such as the mean and weights. } \item{deriv.arg}{ Logical. If \code{TRUE} then a list with components \code{deriv} and \code{weights} is returned. See below for more details. } \item{\dots}{ Currently ignored. } } \details{ Prior weights are usually inputted with the \code{weights} argument in functions such as \code{\link{vglm}} and \code{\link{vgam}}. It may refer to frequencies of the individual data or be weight matrices specified beforehand. Working weights are used by the IRLS algorithm. They correspond to the second derivatives of the log-likelihood function with respect to the linear predictors. The working weights correspond to positive-definite weight matrices and are returned in \emph{matrix-band} form, e.g., the first \eqn{M} columns correspond to the diagonals, etc. % 20171226: If one wants to perturb the linear predictors then the \code{fitted.values} slots should be assigned to the object before calling this function. The reason is that, for some family functions, the variable \code{mu} is used directly as one of the parameter estimates, without recomputing it from \code{eta}. } \value{ If \code{type = "working"} and \code{deriv = TRUE} then a list is returned with the two components described below. Otherwise the prior or working weights are returned depending on the value of \code{type}. \item{deriv}{ Typically the first derivative of the log-likelihood with respect to the linear predictors. For example, this is the variable \code{deriv.mu} in \code{vglm.fit()}, or equivalently, the matrix returned in the \code{"deriv"} slot of a \pkg{VGAM} family function. } \item{weights }{ The working weights. } } %\references{ % Yee, T. W. and Hastie, T. J. (2003) % Reduced-rank vector generalized linear models. % \emph{Statistical Modelling}, % \bold{3}, 15--41. % Chambers, J. M. and T. J. Hastie (eds) (1992) % \emph{Statistical Models in S}. % Wadsworth & Brooks/Cole. %} \author{ Thomas W. Yee } \note{ This function is intended to be similar to \code{weights.glm} (see \code{\link[stats]{glm}}). } % ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[stats]{glm}}, \code{\link{vglmff-class}}, \code{\link{vglm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) depvar(fit) # These are sample proportions weights(fit, type = "prior", matrix = FALSE) # Number of observations # Look at the working residuals nn <- nrow(model.matrix(fit, type = "lm")) M <- ncol(predict(fit)) wwt <- weights(fit, type = "working", deriv = TRUE) # In matrix-band format wz <- m2a(wwt$weights, M = M) # In array format wzinv <- array(apply(wz, 3, solve), c(M, M, nn)) wresid <- matrix(NA, nn, M) # Working residuals for (ii in 1:nn) wresid[ii, ] <- wzinv[, , ii, drop = TRUE] \%*\% wwt$deriv[ii, ] max(abs(c(resid(fit, type = "work")) - c(wresid))) # Should be 0 (zedd <- predict(fit) + wresid) # Adjusted dependent vector } \keyword{models} \keyword{regression} VGAM/man/gabinomial.mlm.Rd0000644000176200001440000001351313565414527014753 0ustar liggesusers\name{gabinomial.mlm} \alias{gabinomial.mlm} %\alias{gabinomial.mlmff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered Binomial Regression Family Function (multinomial logit model based; GA-binom-MLM) } \description{ Fits a generally-altered binomial regression (using a multinomial logit model for the altered values). } \usage{ gabinomial.mlm(alter = 0, zero = NULL, lprob = "logitlink", type.fitted = c("mean", "prob", "pobs.a", "Pobs.a"), imethod = 1, iprob = NULL) } %- maybe also 'usage' for other objects documented here. % ipobs0 = NULL, % ishrinkage = 0.95,, probs.y = 0.35 \arguments{ \item{alter}{ Vector of altered values, i.e., nonnegative integers. Must have unique values only. May be a \code{NULL}, which stands for empty set (same as \code{\link{binomialff}}). The default is the 0-altered binomial % Must be sorted and have unique values only. } \item{lprob}{ See \code{\link{Links}} for more choices and information. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The choice \code{"pobs.a"} is the probability of an altered value. See \code{\link{gatpoisson.mlm}} for more details. % and \code{"onempobs.a"} is its complement. } \item{imethod, iprob}{ See \code{\link{CommonVGAMffArguments}} for information. % ipobs0, } % \item{probs.y}{ % See \code{\link{CommonVGAMffArguments}} for information. % } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. Setting \code{zero = "pobs"} will model the multinomial probabilities as simple as possible (intercept-only), hence should be more numerically stable than the default, and this is recommended for many analyses especially when there are many explanatory variables.. } } \details{ The generally-altered binomial distribution is the ordinary binomial distribution with the probability of certain values (\code{alter} argument) being modelled using a multinomial logit model. The other values are modelled using a \emph{generally-truncated binomial} distribution. The 0-altered binomial distribution is a special case and is the default (it is called a \emph{hurdle} model by some people). This function implements Fisher scoring and currently does not handle multiple responses. Compared to \code{gibinomial} (not complete yet) this family function handles deflation and inflation, therefore handles a wider range of data. % Compared to \code{\link{gibinomial}} } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default). The choice \code{type.fitted = "pobs.a"} returns the sum of all the altered probabilities. See \code{\link{gatpoisson.mlm}} for more details. } %\references{ %} %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \author{ T. W. Yee and Chenchen Ma} \note{ This family function does not yet have the robustness of \code{\link{multinomial}} when computing the working weight matrices. Several improvements are needed, e.g., better labelling and initial values and handling multiple responses. The default value of \code{zero} is not the best for most data sets; so assign it a value (see above). % And \code{\link{summaryvglm}} does not work yet on these fitted % models. % yettodo: see lines just above. This family function effectively combines \code{\link{gtbinomial}} and \code{\link{multinomial}} into one family function. % This family function can handle multiple responses, % e.g., more than one species. } \seealso{ \code{\link{Gaitbinom.mlm}}, \code{\link{gtbinomial}}, \code{\link{zabinomial}}, \code{\link{multinomial}}, \code{\link{rposbinom}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{gibinomial}}, } \examples{ avec <- c(0, 5) # Alter these values Size <- 10 gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, x3 = runif(nn)) gdata <- transform(gdata, prob1 = logitlink(1 + 2 * x2 + .5 * x3, inverse = TRUE), prob2 = logitlink(1 - 1 * x2 + .5 * x3, inverse = TRUE), prob3 = logitlink(1, inverse = TRUE)) gdata <- transform(gdata, y1 = rgaitbinom.mlm(nn, prob = prob1, Size, pobs.a = c(0.2, 0.3), byrow = TRUE, alter = avec), y2 = rgaitbinom.mlm(nn, prob = prob2, Size, pobs.a = c(0.2, 0.3), byrow = TRUE, alter = avec), y3 = rgaitbinom.mlm(nn, prob = prob3, Size, pobs.a = c(0.2, 0.3), byrow = TRUE, alter = avec)) gabinomial.mlm(alter = avec) with(gdata, table(y1)) with(gdata, table(y2)) with(gdata, table(y3)) fit1 <- vglm(cbind(y1, Size-y1) ~ x2 + x3, gabinomial.mlm(alter = avec), crit = "coef", trace = TRUE, data = gdata) head(fitted(fit1)) head(predict(fit1)) coef(fit1, matrix = TRUE) summary(fit1) # Another example ------------------------------ fit3 <- vglm(cbind(y3, Size-y3) ~ 1, gabinomial.mlm(alter = avec), gdata, trace = TRUE) coef(fit3, matrix = TRUE) } \keyword{models} \keyword{regression} %gabinomial.mlm(lpobs0 = "logitlink", lprob = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %gabinomial.mlmff(lprob = "logitlink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/MNSs.Rd0000644000176200001440000000375513565414527012714 0ustar liggesusers\name{MNSs} \alias{MNSs} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The MNSs Blood Group System } \description{ Estimates the three independent parameters of the the MNSs blood group system. } \usage{ MNSs(link = "logitlink", imS = NULL, ims = NULL, inS = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the three parameters. See \code{\link{Links}} for more choices. } \item{imS, ims, inS}{ Optional initial value for \code{mS}, \code{ms} and \code{nS} respectively. A \code{NULL} means they are computed internally. } } \details{ There are three independent parameters: \code{m_S}, \code{m_s}, \code{n_S}, say, so that \code{n_s = 1 - m_S - m_s - n_S}. We let the eta vector (transposed) be \code{(g(m_S), g(m_s), g(n_S))} where \code{g} is the link function. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Elandt-Johnson, R. C. (1971) \emph{Probability Models and Statistical Methods in Genetics}, New York: Wiley. } \author{ T. W. Yee } \note{ The input can be a 6-column matrix of counts, where the columns are MS, Ms, MNS, MNs, NS, Ns (in order). Alternatively, the input can be a 6-column matrix of proportions (so each row adds to 1) and the \code{weights} argument is used to specify the total number of counts for each row. } \seealso{ \code{\link{AA.Aa.aa}}, \code{\link{AB.Ab.aB.ab}}, \code{\link{ABO}}, \code{\link{A1A2A3}}. % \code{\link{AB.Ab.aB.ab2}}, } \examples{ # Order matters only: y <- cbind(MS = 295, Ms = 107, MNS = 379, MNs = 322, NS = 102, Ns = 214) fit <- vglm(y ~ 1, MNSs("logitlink", .25, .28, .08), trace = TRUE) fit <- vglm(y ~ 1, MNSs(link = logitlink), trace = TRUE, crit = "coef") Coef(fit) rbind(y, sum(y)*fitted(fit)) sqrt(diag(vcov(fit))) } \keyword{models} \keyword{regression} VGAM/man/polonoUC.Rd0000644000176200001440000001023413565414527013620 0ustar liggesusers\name{Polono} \alias{Polono} \alias{dpolono} \alias{ppolono} %\alias{qpolono} \alias{rpolono} \title{The Poisson Lognormal Distribution} \description{ Density, distribution function and random generation for the Poisson lognormal distribution. } \usage{ dpolono(x, meanlog = 0, sdlog = 1, bigx = 170, ...) ppolono(q, meanlog = 0, sdlog = 1, isOne = 1 - sqrt( .Machine$double.eps ), ...) rpolono(n, meanlog = 0, sdlog = 1) } \arguments{ \item{x, q}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{meanlog, sdlog }{ the mean and standard deviation of the normal distribution (on the log scale). They match the arguments in \code{\link[stats:Lognormal]{Lognormal}}. } \item{bigx}{ Numeric. This argument is for handling large values of \code{x} and/or when \code{\link[stats]{integrate}} fails. A first order Taylor series approximation [Equation (7) of Bulmer (1974)] is used at values of \code{x} that are greater or equal to this argument. For \code{bigx = 10}, he showed that the approximation has a relative error less than 0.001 for values of \code{meanlog} and \code{sdlog} ``likely to be encountered in practice''. The argument can be assigned \code{Inf} in which case the approximation is not used. } \item{isOne }{ Used to test whether the cumulative probabilities have effectively reached unity. } \item{...}{ Arguments passed into \code{\link[stats]{integrate}}. } } \value{ \code{dpolono} gives the density, \code{ppolono} gives the distribution function, and \code{rpolono} generates random deviates. % \code{qpolono} gives the quantile function, and } \references{ Bulmer, M. G. (1974) On fitting the Poisson lognormal distribution to species-abundance data. \emph{Biometrics}, \bold{30}, 101--110. } \author{ T. W. Yee. Some anonymous soul kindly wrote \code{ppolono()} and improved the original \code{dpolono()}. } \details{ The Poisson lognormal distribution is similar to the negative binomial in that it can be motivated by a Poisson distribution whose mean parameter comes from a right skewed distribution (gamma for the negative binomial and lognormal for the Poisson lognormal distribution). % See zz code{link{polonozz}}, the \pkg{VGAM} family function % for estimating the parameters, % for the formula of the probability density function and other details. } \note{ By default, \code{dpolono} involves numerical integration that is performed using \code{\link[stats]{integrate}}. Consequently, computations are very slow and numerical problems may occur (if so then the use of \code{...} may be needed). Alternatively, for extreme values of \code{x}, \code{meanlog}, \code{sdlog}, etc., the use of \code{bigx = Inf} avoids the call to \code{\link[stats]{integrate}}, however the answer may be a little inaccurate. For the maximum likelihood estimation of the 2 parameters a \pkg{VGAM} family function called \code{polono()}, say, has not been written yet. } \seealso{ \code{\link{lognormal}}, \code{\link{poissonff}}, \code{\link{negbinomial}}. } \examples{ meanlog <- 0.5; sdlog <- 0.5; yy <- 0:19 sum(proby <- dpolono(yy, m = meanlog, sd = sdlog)) # Should be 1 max(abs(cumsum(proby) - ppolono(yy, m = meanlog, sd = sdlog))) # Should be 0 \dontrun{ opar = par(no.readonly = TRUE) par(mfrow = c(2, 2)) plot(yy, proby, type = "h", col = "blue", ylab = "P[Y=y]", log = "", main = paste("Poisson lognormal(m = ", meanlog, ", sdl = ", sdlog, ")", sep = "")) y <- 0:190 # More extreme values; use the approximation and plot on a log scale (sum(proby <- dpolono(y, m = meanlog, sd = sdlog, bigx = 100))) # Should be 1 plot(y, proby, type = "h", col = "blue", ylab = "P[Y=y] (log)", log = "y", main = paste("Poisson lognormal(m = ", meanlog, ", sdl = ", sdlog, ")", sep = "")) # Note the kink at bigx # Random number generation table(y <- rpolono(n = 1000, m = meanlog, sd = sdlog)) hist(y, breaks = ((-1):max(y))+0.5, prob = TRUE, border = "blue") par(opar) } } \keyword{distribution} VGAM/man/multinomial.Rd0000644000176200001440000003017113565414527014416 0ustar liggesusers\name{multinomial} \alias{multinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Multinomial Logit Model } \description{ Fits a multinomial logit model to a (preferably unordered) factor response. } \usage{ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL, refLevel = "(Last)", whitespace = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. Any values must be from the set \{1,2,\ldots,\eqn{M}\}. The default value means none are modelled as intercept-only terms. See \code{\link{CommonVGAMffArguments}} for more information. } \item{parallel}{ A logical, or formula specifying which terms have equal/unequal coefficients. } \item{nointercept, whitespace}{ See \code{\link{CommonVGAMffArguments}} for more details. } \item{refLevel}{ Either a (1) single positive integer or (2) a value of the factor or (3) a character string. If inputted as an integer then it specifies which column of the response matrix is the reference or baseline level. The default is the last one (the \eqn{(M+1)}th one). If used, this argument will be usually assigned the value \code{1}. If inputted as a value of a factor then beware of missing values of certain levels of the factor (\code{drop.unused.levels = TRUE} or \code{drop.unused.levels = FALSE}). See the example below. If inputted as a character string then this should be equal to (A) one of the levels of the factor response, else (B) one of the column names of the matrix response of counts; e.g., \code{vglm(cbind(normal, mild, severe) ~ let, multinomial(refLevel = "severe"), data = pneumo)} if it was (incorrectly because the response is ordinal) applied to the \code{\link{pneumo}} data set. Another example is \code{vglm(ethnicity ~ age, multinomial(refLevel = "European"), data = xs.nz)} if it was applied to the \code{\link[VGAMdata]{xs.nz}} data set. } } \details{ In this help file the response \eqn{Y} is assumed to be a factor with unordered values \eqn{1,2,\dots,M+1}, so that \eqn{M} is the number of linear/additive predictors \eqn{\eta_j}{eta_j}. The default model can be written \deqn{\eta_j = \log(P[Y=j]/ P[Y=M+1])}{% eta_j = log(P[Y=j]/ P[Y=M+1])} where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor. Here, \eqn{j=1,\ldots,M}, and \eqn{\eta_{M+1}}{eta_{M+1}} is 0 by definition. That is, the last level of the factor, or last column of the response matrix, is taken as the reference level or baseline---this is for identifiability of the parameters. The reference or baseline level can be changed with the \code{refLevel} argument. In almost all the literature, the constraint matrices associated with this family of models are known. For example, setting \code{parallel = TRUE} will make all constraint matrices (including the intercept) equal to a vector of \eqn{M} 1's; to suppress the intercepts from being parallel then set \code{parallel = FALSE ~ 1}. If the constraint matrices are unknown and to be estimated, then this can be achieved by fitting the model as a reduced-rank vector generalized linear model (RR-VGLM; see \code{\link{rrvglm}}). In particular, a multinomial logit model with unknown constraint matrices is known as a \emph{stereotype} model (Anderson, 1984), and can be fitted with \code{\link{rrvglm}}. % Pre 20170816; Stuart Coles picked up an error: % For example, setting \code{parallel = TRUE} will make all % constraint matrices % (except for the intercept) % equal to a vector of \eqn{M} 1's. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. Hastie, T. J., Tibshirani, R. J. and Friedman, J. H. (2009) \emph{The Elements of Statistical Learning: Data Mining, Inference and Prediction}, 2nd ed. New York, USA: Springer-Verlag. Simonoff, J. S. (2003) \emph{Analyzing Categorical Data}, New York, USA: Springer-Verlag. Anderson, J. A. (1984) Regression and ordered categorical variables. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{46}, 1--30. Tutz, G. (2012) \emph{Regression for Categorical Data}, Cambridge University Press. %Further information and examples on categorical data analysis %by the \pkg{VGAM} package can be found at %\url{http://www.stat.auckland.ac.nz/~yee/VGAM/doc/categorical.pdf}. } \author{ Thomas W. Yee } \note{ The response should be either a matrix of counts (with row sums that are all positive), or a factor. In both cases, the \code{y} slot returned by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the matrix of sample proportions. The multinomial logit model is more appropriate for a nominal (unordered) factor response than for an ordinal (ordered) factor response. Models more suited for the latter include those based on cumulative probabilities, e.g., \code{\link{cumulative}}. \code{multinomial} is prone to numerical difficulties if the groups are separable and/or the fitted probabilities are close to 0 or 1. The fitted values returned are estimates of the probabilities \eqn{P[Y=j]} for \eqn{j=1,\ldots,M+1}. See \pkg{safeBinaryRegression} for the logistic regression case. Here is an example of the usage of the \code{parallel} argument. If there are covariates \code{x2}, \code{x3} and \code{x4}, then \code{parallel = TRUE ~ x2 + x3 - 1} and \code{parallel = FALSE ~ x4} are equivalent. This would constrain the regression coefficients for \code{x2} and \code{x3} to be equal; those of the intercepts and \code{x4} would be different. In Example 4 below, a conditional logit model is fitted to an artificial data set that explores how cost and travel time affect people's decision about how to travel to work. Walking is the baseline group. The variable \code{Cost.car} is the difference between the cost of travel to work by car and walking, etc. The variable \code{Time.car} is the difference between the travel duration/time to work by car and walking, etc. For other details about the \code{xij} argument see \code{\link{vglm.control}} and \code{\link{fill}}. The \code{\link[nnet]{multinom}} function in the \pkg{nnet} package uses the first level of the factor as baseline, whereas the last level of the factor is used here. Consequently the estimated regression coefficients differ. } % In the future, this family function may be renamed to % ``\code{mum}'' (for \bold{mu}ltinomial logit \bold{m}odel). % Please let me know if you strongly agree or disagree about this. \section{Warning }{ No check is made to verify that the response is nominal. See \code{\link{CommonVGAMffArguments}} for more warnings. } \seealso{ \code{\link{margeff}}, \code{\link{cumulative}}, \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}, \code{\link{dirichlet}}, \code{\link{dirmultinomial}}, \code{\link{rrvglm}}, \code{\link{fill1}}, \code{\link[stats:Multinom]{Multinomial}}, \code{\link{multilogitlink}}, \code{\link[datasets]{iris}}. The author's homepage has further documentation about categorical data analysis using \pkg{VGAM}. } % \code{\link[base:Multinom]{rmultinom}} % \code{\link{pneumo}}, \examples{ # Example 1: fit a multinomial logit model to Edgar Anderson's iris data data(iris) \dontrun{ fit <- vglm(Species ~ ., multinomial, iris) coef(fit, matrix = TRUE) } # Example 2a: a simple example ycounts <- t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8))) # Counts fit <- vglm(ycounts ~ 1, multinomial) head(fitted(fit)) # Proportions fit@prior.weights # NOT recommended for extraction of prior weights weights(fit, type = "prior", matrix = FALSE) # The better method depvar(fit) # Sample proportions; same as fit@y constraints(fit) # Constraint matrices # Example 2b: Different reference level used as the baseline fit2 <- vglm(ycounts ~ 1, multinomial(refLevel = 2)) coef(fit2, matrix = TRUE) coef(fit , matrix = TRUE) # Easy to reconcile this output with fit2 # Example 3: The response is a factor. nn <- 10 dframe3 <- data.frame(yfac = gl(3, nn, labels = c("Ctrl", "Trt1", "Trt2")), x2 = runif(3 * nn)) myrefLevel <- with(dframe3, yfac[12]) fit3a <- vglm(yfac ~ x2, multinomial(refLevel = myrefLevel), dframe3) fit3b <- vglm(yfac ~ x2, multinomial(refLevel = 2), dframe3) coef(fit3a, matrix = TRUE) # "Trt1" is the reference level coef(fit3b, matrix = TRUE) # "Trt1" is the reference level margeff(fit3b) # Example 4: Fit a rank-1 stereotype model fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all) coef(fit4) # Contains the C matrix constraints(fit4)$HP # The A matrix coef(fit4, matrix = TRUE) # The B matrix Coef(fit4)@C # The C matrix concoef(fit4) # Better to get the C matrix this way Coef(fit4)@A # The A matrix svd(coef(fit4, matrix = TRUE)[-1, ])$d # This has rank 1; = C %*% t(A) # Classification (but watch out for NAs in some of the variables): apply(fitted(fit4), 1, which.max) # Classification colnames(fitted(fit4))[apply(fitted(fit4), 1, which.max)] # Classification apply(predict(fit4, car.all, type = "response"), 1, which.max) # Ditto # Example 5: The use of the xij argument (aka conditional logit model) set.seed(111) nn <- 100 # Number of people who travel to work M <- 3 # There are M+1 models of transport to go to work ycounts <- matrix(0, nn, M+1) ycounts[cbind(1:nn, sample(x = M+1, size = nn, replace = TRUE))] = 1 dimnames(ycounts) <- list(NULL, c("bus","train","car","walk")) gotowork <- data.frame(cost.bus = runif(nn), time.bus = runif(nn), cost.train= runif(nn), time.train= runif(nn), cost.car = runif(nn), time.car = runif(nn), cost.walk = runif(nn), time.walk = runif(nn)) gotowork <- round(gotowork, digits = 2) # For convenience gotowork <- transform(gotowork, Cost.bus = cost.bus - cost.walk, Cost.car = cost.car - cost.walk, Cost.train = cost.train - cost.walk, Cost = cost.train - cost.walk, # for labelling Time.bus = time.bus - time.walk, Time.car = time.car - time.walk, Time.train = time.train - time.walk, Time = time.train - time.walk) # for labelling fit <- vglm(ycounts ~ Cost + Time, multinomial(parall = TRUE ~ Cost + Time - 1), xij = list(Cost ~ Cost.bus + Cost.train + Cost.car, Time ~ Time.bus + Time.train + Time.car), form2 = ~ Cost + Cost.bus + Cost.train + Cost.car + Time + Time.bus + Time.train + Time.car, data = gotowork, trace = TRUE) head(model.matrix(fit, type = "lm")) # LM model matrix head(model.matrix(fit, type = "vlm")) # Big VLM model matrix coef(fit) coef(fit, matrix = TRUE) constraints(fit) summary(fit) max(abs(predict(fit) - predict(fit, new = gotowork))) # Should be 0 } \keyword{models} \keyword{regression} % 20100915; this no longer works: % # Example 2c: Different input to Example 2a but same result % w <- apply(ycounts, 1, sum) # Prior weights % yprop <- ycounts / w # Sample proportions % fitprop <- vglm(yprop ~ 1, multinomial, weights=w) % head(fitted(fitprop)) # Proportions % weights(fitprop, type="prior", matrix=FALSE) % fitprop@y # Same as the input VGAM/man/logloglink.Rd0000644000176200001440000000640413565414527014227 0ustar liggesusers\name{logloglink} \alias{logloglink} \alias{loglog} \alias{loglogloglink} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log-log and Log-log-log Link Functions } \description{ Computes the two transformations, including their inverse and the first two derivatives. } \usage{ logloglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) loglogloglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ Values of \code{theta} which are less than or equal to 1 or \eqn{e} can be replaced by \code{bvalue} before computing the link function value. The component name \code{bvalue} stands for ``boundary value''. See \code{\link{Links}} for more information. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The log-log link function is commonly used for parameters that are greater than unity. Similarly, the log-log-log link function is applicable for parameters that are greater than \eqn{e}. Numerical values of \code{theta} close to 1 or \eqn{e} or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. One possible application of \code{loglogloglink()} is to the \eqn{k} parameter (also called \code{size}) of \code{\link{negbinomial}} to Poisson-like data but with only a small amount of overdispersion; then \eqn{k} is a large number relative to \code{munb}. In such situations a \code{\link{loglink}} or \code{\link{loglog}} link may not be sufficient to draw the estimate toward the interior of the parameter space. Using a more stronger link function can help mitigate the Hauck-Donner effect \code{\link{hdeff}}. } \value{ For \code{logloglink()}: for \code{deriv = 0}, the log of \code{log(theta)}, i.e., \code{log(log(theta))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(exp(theta))}. For \code{loglogloglink()}: for \code{deriv = 0}, the log of \code{log(log(theta))}, i.e., \code{log(log(log(theta)))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(exp(exp(theta)))}. For \code{deriv = 1}, then the function returns \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } % \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or \eqn{e} unless \code{bvalue} is used. } \seealso{ \code{\link{Links}}, \code{\link{loglink}}, \code{\link{logofflink}}. } \examples{ x <- seq(0.8, 1.5, by = 0.1) logloglink(x) # Has NAs logloglink(x, bvalue = 1.0 + .Machine$double.eps) # Has no NAs x <- seq(1.01, 10, len = 100) logloglink(x) max(abs(logloglink(logloglink(x), inverse = TRUE) - x)) # Should be 0 } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/rlplot.gevff.Rd0000644000176200001440000001202313565414527014470 0ustar liggesusers\name{rlplot.gevff} \alias{rlplot.gevff} \alias{rlplot.gev} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Return Level Plot for GEV Fits } \description{ A return level plot is constructed for a GEV-type model. } \usage{ rlplot.gevff(object, show.plot = TRUE, probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999), add.arg = FALSE, xlab = if(log.arg) "Return Period (log-scale)" else "Return Period", ylab = "Return Level", main = "Return Level Plot", pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex, llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd, slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd, ylim = NULL, log.arg = TRUE, CI = TRUE, epsilon = 1e-05, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} extremes model of the GEV-type, produced by \code{\link{vglm}} with a family function either \code{"gev"} or \code{"gevff"}. } \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will be done. } \item{probability}{ Numeric vector of probabilities used. } \item{add.arg}{ Logical. Add the plot to an existing plot? } \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. } \item{main}{ Title of the plot. See \code{\link[graphics]{title}}. } \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. } \item{pcol.arg}{ Color of the points. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{pcex}{ Character expansion of the points. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{llty.arg}{ Line type. Line type. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{lcol.arg}{ Color of the lines. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{llwd.arg}{ Line width. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{slty.arg, scol.arg, slwd.arg}{ Correponding arguments for the lines used for the confidence intervals. Used only if \code{CI=TRUE}. } \item{ylim}{ Limits for the y-axis. Numeric of length 2. } \item{log.arg}{ Logical. If \code{TRUE} then \code{log=""} otherwise \code{log="x"}. This changes the labelling of the x-axis only. } \item{CI}{ Logical. Add in a 95 percent confidence interval? } \item{epsilon}{ Numeric, close to zero. Used for the finite-difference approximation to the first derivatives with respect to each parameter. If too small, numerical problems will occur. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{sub} and \code{las}. } } \details{ A return level plot plots \eqn{z_p}{zp} versus \eqn{\log(y_p)}{log(yp)}. It is linear if the shape parameter \eqn{\xi=0}{xi=0}. If \eqn{\xi<0}{xi<0} then the plot is convex with asymptotic limit as \eqn{p} approaches zero at \eqn{\mu-\sigma / \xi}{mu-sigma/xi}. And if \eqn{\xi>0}{xi>0} then the plot is concave and has no finite bound. Here, \eqn{G(z_p) = 1-p}{G(zp) = 1-p} where \eqn{0 i1 rlplot(fit2, pcol = "darkorange", lcol = "blue", log.arg = FALSE, scol = "darkgreen", slty = "dashed", las = 1) -> i2 range(i2@post$rlplot$upper - i1@post$rlplot$upper) # Should be near 0 range(i2@post$rlplot$lower - i1@post$rlplot$lower) # Should be near 0 } } \keyword{graphs} \keyword{models} \keyword{regression} VGAM/man/hdeff.Rd0000644000176200001440000002422613565414527013144 0ustar liggesusers\name{hdeff} \alias{hdeff} \alias{hdeff.vglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Hauck-Donner Effects: A Detection Test for Wald Tests } \description{ A detection test for the Hauck-Donner effect on each regression coefficient of a VGLM regression. } \usage{ hdeff(object, ...) hdeff.vglm(object, derivative = NULL, se.arg = FALSE, subset = NULL, hstep = 0.005, fd.only = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{vglm}} object. Although only a limited number of family functions have an analytical solution to the HDE detection test (\code{\link{binomialff}}, \code{\link{borel.tanner}}, \code{\link{cumulative}}, \code{\link{erlang}}, \code{\link{felix}}, \code{\link{lindley}}, \code{\link{poissonff}}, \code{\link{topple}}, \code{\link{uninormal}}, \code{\link{zipoissonff}}, and \code{\link{zipoisson}}; hopefully some more will be implemented in the short future!) the finite-differences (FDs) method can be applied to almost all \pkg{VGAM} family functions to get a numerical solution. % \code{\link{multinomial}}, } \item{derivative}{ Numeric. Either 1 or 2. Currently only a few models having one linear predictor are handled analytically for \code{derivative = 2}, e.g., \code{\link{binomialff}}, \code{\link{poissonff}}. However, the numerical method can return the first two derivatives for almost all models. } \item{se.arg}{ Logical. If \code{TRUE} then the derivatives of the standard errors are returned as well, because usually the derivatives of the Wald statistics are of central interest. Requires \code{derivative} to be assigned the value 1 or 2 for this argument to operate. } \item{subset}{ Logical or vector of indices, to select the regression coefficients of interest. The default is to select all coefficients. Recycled if necessary if logical. If numeric then they should comprise elements from \code{1:length(coef(object))}. This argument can be useful for computing the derivatives of a Cox regression (\code{\link{coxph}}) fitted using artificially created Poisson data; then there are many coefficients that are effectively nuisance parameters. } \item{hstep}{ Positive numeric and recycled to length 2; it is the so-called \emph{step size} when using finite-differences and is often called \eqn{h} in the calculus literature, e.g., \eqn{f'(x)} is approximately \eqn{(f(x+h) - f(x)) / h}. For the 2nd-order partial derivatives, there are two step sizes and hence this argument is recycled to length 2. The default is to have the same values. The 1st-order derivatives use the first value only. It is recommended that a few values of this argument be tried because values of the first and second derivatives can vary accordingly. If any values are too large then the derivatives may be inaccurate; and if too small then the derivatives may be unstable and subject to too much round-off/cancellation error (in fact it may create an error or a \code{NA}). } \item{fd.only}{ Logical; if \code{TRUE} then finite-differences are used to estimate the derivatives even if an analytical solution has been coded, By default, finite-differences will be used when an analytical solution has not been implemented. % as the last resort } \item{\dots}{ currently unused but may be used in the future for further arguments passed into the other methods functions. % e.g., \code{subset}. } } \details{ Almost all of statistical inference based on the likelihood assumes that the parameter estimates are located in the interior of the parameter space. The nonregular case of being located on the boundary is not considered very much and leads to very different results from the regular case. Practically, an important question is: how close is close to the boundary? One might answer this as: the parameter estimates are too close to the boundary when the Hauck-Donner effect (HDE) is present, whereby the Wald statistic becomes aberrant. Hauck and Donner (1977) first observed an aberration of the Wald test statistic not monotonically increasing as a function of increasing distance between the parameter estimate and the null value. This "disturbing" and "undesirable" underappreciated effect has since been observed in other regression models by various authors. This function computes the first, and possibly second, derivative of the Wald statistic for each regression coefficient. A negative value of the first derivative is indicative of the HDE being present. More information can be obtained from \code{\link{hdeffsev}} regarding HDE severity: there may be none, faint, weak, moderate, strong and extreme amounts of HDE present. In general, most models have derivatives that are computed numerically using finite-difference approximations. The reason is that it takes a lot of work to program in the analytical solution (this includes a few very common models, such as \code{\link{poissonff}} and \code{\link{binomialff}}, where the first two derivatives have been implemented). % and that a likelihood ratio test is recommended. } \value{ By default this function returns a labelled logical vector; a \code{TRUE} means the HDE is affirmative for that coefficient (negative slope). Hence ideally all values are \code{FALSE}. Any \code{TRUE} values suggests that the MLE is too near the boundary of the parameter space, and that the p-value for that regression coefficient is biased upwards. When present a highly significant variable might be deemed nonsignificant, and thus the HDE can create havoc for variable selection. If the HDE is present then more accurate p-values can generally be obtained by conducting a likelihood ratio test (see \code{\link{lrt.stat.vlm}}) or Rao's score test (see \code{\link{score.stat.vlm}}); indeed the default of \code{\link{wald.stat.vlm}} does not suffer from the HDE. %zzLikelihood ratio p-values % By default, a vector of logicals. Setting \code{deriv = 1} returns a numerical vector of first derivatives of the Wald statistics. Setting \code{deriv = 2} returns a 2-column matrix of first and second derivatives of the Wald statistics. Then setting \code{se.arg = TRUE} returns an additional 1 or 2 columns. Some 2nd derivatives are \code{NA} if only a partial analytic solution has been programmed in. For those \pkg{VGAM} family functions whose HDE test has not yet been implemented explicitly (the vast majority of them), finite-difference approximations to the derivatives will be used---see the arguments \code{hstep} and \code{fd.only} for getting some control on them. } \references{ Hauck, J. W. W. and A. Donner (1977) Wald's test as applied to hypotheses in logit analysis. \emph{Journal of the American Statistical Association}, \bold{72}, 851--853. Corrigenda: JASA, \bold{75}, 482. % \textit{JASA 72(360): 851--3}] 75 (370), 482 %Whitehead, J. (1980) %Fitting Cox's regression model to survival data using GLIM. %\emph{Journal of the Royal Statistical Society. %Series C (Applied Statistics)}, %\bold{29}, %268--275. Yee, T. W. (2018) On the Hauck-Donner effect in Wald tests: Detection, and parameter space characterization (\emph{submitted for publication}). } \author{ Thomas W. Yee. } %\section{Warning }{ %} \note{ The function \code{\link{summaryvglm}} conducts the HDE detection test if possible and prints out a line at the bottom if the HDE is detected for some regression coefficients. By ``if possible'', only a few family functions are exempt and they have an \code{infos} slot with component \code{hadof = FALSE}; such as \code{\link{normal.vcm}}, \code{\link{rec.normal}} because it uses the BFGS-IRLS method for computing the working weights. For these few a \code{NULL} is returned by \code{\link{hdeff}}. If the second derivatives are of interest then it is recommended that \code{crit = "c"} be added to the fitting so that a slightly more accurate model results (usually one more IRLS iteration). This is because the FD approximation is very sensitive to values of the working weights, so they need to be computed accurately. Occasionally, if the coefficient is close to 0, then its Wald statistic's second derivative may be unusually large in magnitude (this could be due to something such as roundoff error). % detection test if possible and prints out a % modified Wald table if some HDEs are detected. % \code{\link{}}, % \code{\link{}}, % \code{\link{}}, % \code{\link{}}, This function is currently under development and may change a little in the short future. For HDE severity measures see \code{\link{hdeffsev}}. Yee (2018) gives details about HDE detection for the entire VGLM class, and proves a tipping point theorem with tipping points 1/4 and 3/5. The HDE severity measures allow partitioning of the parameter space into 6 regions from the interior and going outwards towards the boundary edges. It is also shown that with 1-parameter binary regression the HDE cannot occur unless the log odds ratio is at least 2.40, which corresponds to an odds ratio of 11.0 or more. } \seealso{ \code{\link{summaryvglm}}, \code{\link{hdeffsev}}, \code{\link{vglm}}, \code{\link{lrt.stat}}, \code{\link{score.stat}}, \code{\link{wald.stat}}, \code{\link{confintvglm}}, \code{\link{profilevglm}}. % \code{\link{multinomial}}, % \code{\link{cumulative}}, } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ let, data = pneumo, trace = TRUE, crit = "c", # Get some more accuracy cumulative(reverse = TRUE, parallel = TRUE)) cumulative()@infos()$hadof # Analytical solution implemented hdeff(fit) hdeff(fit, deriv = 1) # Analytical solution hdeff(fit, deriv = 2) # It is a partial analytical solution hdeff(fit, deriv = 2, se.arg = TRUE, fd.only = TRUE) # All derivatives solved numerically by FDs } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} VGAM/man/lrtest.Rd0000644000176200001440000001124713565414527013404 0ustar liggesusers\name{lrtest} \alias{lrtest} \alias{lrtest_vglm} %\alias{update_formula} %\alias{update_default} \title{Likelihood Ratio Test of Nested Models} \description{ \code{lrtest} is a generic function for carrying out likelihood ratio tests. The default method can be employed for comparing nested VGLMs (see details below). } \usage{ lrtest(object, \dots) lrtest_vglm(object, \dots, no.warning = FALSE, name = NULL) } %\method{lrtest}{default}(object, \dots, name = NULL) %\method{lrtest}{formula}(object, \dots, data = list()) \arguments{ \item{object}{ a \code{\link{vglm}} object. See below for details. } \item{\dots}{ further object specifications passed to methods. See below for details. } \item{no.warning}{ logical; if \code{TRUE} then no warning is issued. For example, setting \code{TRUE} might be a good idea when testing for linearity of a variable for a \code{"pvgam"} object. } \item{name}{ a function for extracting a suitable name/description from a fitted model object. By default the name is queried by calling \code{\link{formula}}. } % \item{data}{ % a data frame containing the variables in the model. % % } } \details{ \code{lrtest} is intended to be a generic function for comparisons of models via asymptotic likelihood ratio tests. The default method consecutively compares the fitted model object \code{object} with the models passed in \code{\dots}. Instead of passing the fitted model objects in \code{\dots}, several other specifications are possible. The updating mechanism is the same as for \code{\link[lmtest]{waldtest}}: the models in \code{\dots} can be specified as integers, characters (both for terms that should be eliminated from the previous model), update formulas or fitted model objects. Except for the last case, the existence of an \code{\link[stats]{update}} method is assumed. See \code{\link[lmtest]{waldtest}} for details. Subsequently, an asymptotic likelihood ratio test for each two consecutive models is carried out: Twice the difference in log-likelihoods (as derived by the \code{\link[stats]{logLik}} methods) is compared with a Chi-squared distribution. % The \code{"formula"} method fits a \code{\link{lm}} % first and then calls the default method. } \note{ The code was adapted directly from \pkg{lmtest} (written by T. Hothorn, A. Zeileis, G. Millo, D. Mitchell) and made to work for VGLMs and S4. This help file also was adapted from \pkg{lmtest}. \emph{Approximate} LRTs might be applied to VGAMs, as produced by \code{\link{vgam}}, but it is probably better in inference to use \code{\link{vglm}} with regression splines (\code{\link[splines]{bs}} and \code{\link[splines]{ns}}). This methods function should not be applied to other models such as those produced by \code{\link{rrvglm}}, by \code{\link{cqo}}, by \code{\link{cao}}. } \section{Warning }{ Several \pkg{VGAM} family functions implement distributions which do not satisfying the usual regularity conditions needed for the LRT to work. No checking or warning is given for these. } \value{ An object of class \code{"VGAManova"} which contains a slot with the log-likelihood, degrees of freedom, the difference in degrees of freedom, likelihood ratio Chi-squared statistic and corresponding p value. These are printed by \code{stats:::print.anova()}; see \code{\link[stats]{anova}}. } \seealso{ \pkg{lmtest}, \code{\link{vglm}}, \code{\link{lrt.stat.vlm}}, \code{\link{score.stat.vlm}}, \code{\link{wald.stat.vlm}}, \code{\link{anova.vglm}}. % \code{\link{waldtest}} % \code{update_default}, % \code{update_formula}. } \examples{ set.seed(1) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) fit1 <- vglm(cbind(normal, mild, severe) ~ let , propodds, data = pneumo) fit2 <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, data = pneumo) fit3 <- vglm(cbind(normal, mild, severe) ~ let , cumulative, data = pneumo) # Various equivalent specifications of the LR test for testing x3 (ans1 <- lrtest(fit2, fit1)) ans2 <- lrtest(fit2, 2) ans3 <- lrtest(fit2, "x3") ans4 <- lrtest(fit2, . ~ . - x3) c(all.equal(ans1, ans2), all.equal(ans1, ans3), all.equal(ans1, ans4)) # Doing it manually (testStatistic <- 2 * (logLik(fit2) - logLik(fit1))) (mypval <- pchisq(testStatistic, df = length(coef(fit2)) - length(coef(fit1)), lower.tail = FALSE)) (ans4 <- lrtest(fit3, fit1)) # Test proportional odds (parallelism) assumption } \keyword{htest} %(testStatistic <- 2 * (logLik(fit3) - logLik(fit1))) %(mypval <- pchisq(testStatistic, df = length(coef(fit3)) - length(coef(fit1)), % lower.tail = FALSE)) VGAM/man/Coef.rrvglm-class.Rd0000644000176200001440000000424313565414527015354 0ustar liggesusers\name{Coef.rrvglm-class} \docType{class} \alias{Coef.rrvglm-class} \title{Class ``Coef.rrvglm'' } \description{ The most pertinent matrices and other quantities pertaining to a RR-VGLM. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{Coef(object, ...)} where \code{object} is an object of class \code{rrvglm} (see \code{\link{rrvglm-class}}). In this document, \eqn{M} is the number of linear predictors and \eqn{n} is the number of observations. } \section{Slots}{ \describe{ \item{\code{A}:}{Of class \code{"matrix"}, \bold{A}. } \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}. } \item{\code{C}:}{Of class \code{"matrix"}, \bold{C}. } \item{\code{Rank}:}{The rank of the RR-VGLM. } \item{\code{colx1.index}:}{Index of the columns of the \code{"vlm"}-type model matrix corresponding to the variables in \bold{x1}. These correspond to \bold{B1}. } \item{\code{colx2.index}:}{ Index of the columns of the \code{"vlm"}-type model matrix corresponding to the variables in \bold{x2}. These correspond to the reduced-rank regression. } \item{\code{Atilde}:}{Object of class \code{"matrix"}, the \bold{A} matrix with the corner rows removed. Thus each of the elements have been estimated. This matrix is returned only if corner constraints were used. } } } %\section{Methods}{ %No methods defined with class "Coef.rrvglm" in the signature. %} \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } %\note{ ~~further notes~~ } % ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{Coef.rrvglm}}, \code{\link{rrvglm}}, \code{\link{rrvglm-class}}, \code{print.Coef.rrvglm}. } \examples{ # Rank-1 stereotype model of Anderson (1984) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo) coef(fit, matrix = TRUE) Coef(fit) # print(Coef(fit), digits = 3) } \keyword{classes} VGAM/man/waitakere.Rd0000644000176200001440000000330213565414527014034 0ustar liggesusers\name{waitakere} \alias{waitakere} \docType{data} \title{Waitakere Ranges Data} \description{ The \code{waitakere} data frame has 579 rows and 18 columns. Altitude is explanatory, and there are binary responses (presence/absence = 1/0 respectively) for 17 plant species. } \usage{data(waitakere)} \format{ This data frame contains the following columns: \describe{ \item{agaaus}{Agathis australis, or Kauri} \item{beitaw}{Beilschmiedia tawa, or Tawa} \item{corlae}{Corynocarpus laevigatus} \item{cyadea}{Cyathea dealbata} \item{cyamed}{Cyathea medullaris} \item{daccup}{Dacrydium cupressinum} \item{dacdac}{Dacrycarpus dacrydioides} \item{eladen}{Elaecarpus dentatus} \item{hedarb}{Hedycarya arborea} \item{hohpop}{Species name unknown} \item{kniexc}{Knightia excelsa, or Rewarewa} \item{kuneri}{Kunzea ericoides} \item{lepsco}{Leptospermum scoparium} \item{metrob}{Metrosideros robusta} \item{neslan}{Nestegis lanceolata} \item{rhosap}{Rhopalostylis sapida} \item{vitluc}{Vitex lucens, or Puriri} \item{altitude}{meters above sea level} } } \details{ These were collected from the Waitakere Ranges, a small forest in northern Auckland, New Zealand. At 579 sites in the forest, the presence/absence of 17 plant species was recorded, as well as the altitude. Each site was of area size 200\eqn{m^2}{m^2}. } \source{ Dr Neil Mitchell, University of Auckland. } %\references{ %None. %} \seealso{ \code{\link{hunua}}. } \examples{ fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, waitakere) head(predict(fit, waitakere, type = "response")) \dontrun{ plot(fit, se = TRUE, lcol = "orange", scol = "blue") } } \keyword{datasets} VGAM/man/hypersecant.Rd0000644000176200001440000000627413565414527014420 0ustar liggesusers\name{hypersecant} \alias{hypersecant} \alias{hypersecant01} \alias{nef.hs} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Hyperbolic Secant Regression Family Function } \description{ Estimation of the parameter of the hyperbolic secant distribution. } \usage{ hypersecant(link.theta = extlogitlink(min = -pi/2, max = pi/2), init.theta = NULL) hypersecant01(link.theta = extlogitlink(min = -pi/2, max = pi/2), init.theta = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link.theta}{ Parameter link function applied to the parameter \eqn{\theta}{theta}. See \code{\link{Links}} for more choices. } \item{init.theta}{ Optional initial value for \eqn{\theta}{theta}. If failure to converge occurs, try some other value. The default means an initial value is determined internally. } } \details{ The probability density function of the hyperbolic secant distribution is given by \deqn{f(y;\theta) = \exp(\theta y + \log(\cos(\theta ))) / (2 \cosh(\pi y/2)),}{% f(y; theta) = exp(theta*y + log(cos(theta))) / (2*cosh(pi*y/2)),} for parameter \eqn{-\pi/2 < \theta < \pi/2}{-pi/2 < theta < pi/2} and all real \eqn{y}. The mean of \eqn{Y} is \eqn{\tan(\theta)}{tan(theta)} (returned as the fitted values). Morris (1982) calls this model NEF-HS (Natural Exponential Family-Hyperbolic Secant). It is used to generate NEFs, giving rise to the class of NEF-GHS (G for Generalized). Another parameterization is used for \code{hypersecant01()}: let \eqn{Y = (logit U) / \pi}{Y = (logit U) / pi}. Then this uses \deqn{f(u;\theta)=(\cos(\theta)/\pi) \times u^{-0.5+\theta/\pi} \times (1-u)^{-0.5-\theta/\pi},}{% f(u;theta) = (cos(theta)/pi) * u^(-0.5+theta/pi) * (1-u)^(-0.5-theta/pi),} for parameter \eqn{-\pi/2 < \theta < \pi/2}{-pi/2 < theta < pi/2} and \eqn{0 < u < 1}. Then the mean of \eqn{U} is \eqn{0.5 + \theta/\pi}{0.5 + theta/pi} (returned as the fitted values) and the variance is \eqn{(\pi^2 - 4 \theta^2) / (8\pi^2)}{(pi^2 - 4*theta^2) / (8*pi^2)}. For both parameterizations Newton-Raphson is same as Fisher scoring. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Jorgensen, B. (1997) \emph{The Theory of Dispersion Models}. London: Chapman & Hall. % p.101, Eqn (3.37) for hypersecant(). % p.101, Eqn (3.38) for hypersecant01(). Morris, C. N. (1982) Natural exponential families with quadratic variance functions. \emph{The Annals of Statistics}, \bold{10}(1), 65--80. } \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{extlogitlink}}. % \code{\link{nefghs}}, } \examples{ hdata <- data.frame(x2 = rnorm(nn <- 200)) hdata <- transform(hdata, y = rnorm(nn)) # Not very good data! fit1 <- vglm(y ~ x2, hypersecant, data = hdata, trace = TRUE, crit = "coef") coef(fit1, matrix = TRUE) fit1@misc$earg # Not recommended: fit2 <- vglm(y ~ x2, hypersecant(link = "identitylink"), data = hdata, trace = TRUE) coef(fit2, matrix = TRUE) fit2@misc$earg } \keyword{models} \keyword{regression} VGAM/man/genrayleighUC.Rd0000644000176200001440000000461013565414527014611 0ustar liggesusers\name{genray} \alias{genray} \alias{dgenray} \alias{pgenray} \alias{qgenray} \alias{rgenray} \title{The Generalized Rayleigh Distribution} \description{ Density, distribution function, quantile function and random generation for the generalized Rayleigh distribution. } \usage{ dgenray(x, scale = 1, shape, log = FALSE) pgenray(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qgenray(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rgenray(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{scale, shape}{ positive scale and shape parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dgenray} gives the density, \code{pgenray} gives the distribution function, \code{qgenray} gives the quantile function, and \code{rgenray} generates random deviates. } \author{ Kai Huang and J. G. Lauder and T. W. Yee } \details{ See \code{\link{genrayleigh}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } \note{ We define \code{scale} as the reciprocal of the scale parameter used by Kundu and Raqab (2005). } \seealso{ \code{\link{genrayleigh}}, \code{\link{rayleigh}}. } \examples{ \dontrun{ shape <- 0.5; Scale <- 1; nn <- 501 x <- seq(-0.10, 3.0, len = nn) plot(x, dgenray(x, shape, scale = Scale), type = "l", las = 1, ylim = c(0, 1.2), ylab = paste("[dp]genray(shape = ", shape, ", scale = ", Scale, ")"), col = "blue", cex.main = 0.8, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") lines(x, pgenray(x, shape, scale = Scale), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qgenray(probs, shape, scale = Scale) lines(Q, dgenray(Q, shape, scale = Scale), col = "purple", lty = 3, type = "h") lines(Q, pgenray(Q, shape, scale = Scale), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pgenray(Q, shape, scale = Scale) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/expexpff.Rd0000644000176200001440000001201713565414527013710 0ustar liggesusers\name{expexpff} \alias{expexpff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exponentiated Exponential Distribution } \description{ Estimates the two parameters of the exponentiated exponential distribution by maximum likelihood estimation. } \usage{ expexpff(lrate = "loglink", lshape = "loglink", irate = NULL, ishape = 1.1, tolerance = 1.0e-6, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape, lrate}{ Parameter link functions for the \eqn{\alpha}{shape} and \eqn{\lambda}{rate} parameters. See \code{\link{Links}} for more choices. The defaults ensure both parameters are positive. } \item{ishape}{ Initial value for the \eqn{\alpha}{shape} parameter. If convergence fails try setting a different value for this argument. } \item{irate}{ Initial value for the \eqn{\lambda}{rate} parameter. By default, an initial value is chosen internally using \code{ishape}. } \item{tolerance}{ Numeric. Small positive value for testing whether values are close enough to 1 and 2. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The default is none of them. If used, choose one value from the set \{1,2\}. } } \details{ The exponentiated exponential distribution is an alternative to the Weibull and the gamma distributions. The formula for the density is \deqn{f(y;\lambda,\alpha) = \alpha \lambda (1-\exp(-\lambda y))^{\alpha-1} \exp(-\lambda y) }{% f(y;rate,shape) = shape rate (1-\exp(-rate y))^(shape-1) \exp(-rate y) } where \eqn{y>0}, \eqn{\lambda>0}{rate>0} and \eqn{\alpha>0}{shape>0}. The mean of \eqn{Y} is \eqn{(\psi(\alpha+1)-\psi(1))/\lambda}{(psi(shape+1)-psi(1))/rate} (returned as the fitted values) where \eqn{\psi}{psi} is the digamma function. The variance of \eqn{Y} is \eqn{(\psi'(1)-\psi'(\alpha+1))/\lambda^2}{(psi'(1)-psi'(shape+1))/ rate^2} where \eqn{\psi'}{psi'} is the trigamma function. This distribution has been called the two-parameter generalized exponential distribution by Gupta and Kundu (2006). A special case of the exponentiated exponential distribution: \eqn{\alpha=1}{shape=1} is the exponential distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Gupta, R. D. and Kundu, D. (2001) Exponentiated exponential family: an alternative to gamma and Weibull distributions, \emph{Biometrical Journal}, \bold{43}, 117--130. Gupta, R. D. and Kundu, D. (2006) On the comparison of Fisher information of the Weibull and GE distributions, \emph{Journal of Statistical Planning and Inference}, \bold{136}, 3130--3144. } \author{ T. W. Yee } \note{ Fisher scoring is used, however, convergence is usually very slow. This is a good sign that there is a bug, but I have yet to check that the expected information is correct. Also, I have yet to implement Type-I right censored data using the results of Gupta and Kundu (2006). Another algorithm for fitting this model is implemented in \code{\link{expexpff1}}. } \section{Warning }{ Practical experience shows that reasonably good initial values really helps. In particular, try setting different values for the \code{ishape} argument if numerical problems are encountered or failure to convergence occurs. Even if convergence occurs try perturbing the initial value to make sure the global solution is obtained and not a local solution. The algorithm may fail if the estimate of the shape parameter is too close to unity. } \seealso{ \code{\link{expexpff1}}, \code{\link{gammaR}}, \code{\link{weibullR}}, \code{\link{CommonVGAMffArguments}}. } \examples{ # A special case: exponential data edata <- data.frame(y = rexp(n <- 1000)) fit <- vglm(y ~ 1, fam = expexpff, data = edata, trace = TRUE, maxit = 99) coef(fit, matrix = TRUE) Coef(fit) # Ball bearings data (number of million revolutions before failure) edata <- data.frame(bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60, 48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64, 68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92, 128.04, 173.40)) fit <- vglm(bbearings ~ 1, fam = expexpff(irate = 0.05, ish = 5), trace = TRUE, maxit = 300, data = edata) coef(fit, matrix = TRUE) Coef(fit) # Authors get c(rate=0.0314, shape=5.2589) logLik(fit) # Authors get -112.9763 # Failure times of the airconditioning system of an airplane eedata <- data.frame(acplane = c(23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95)) fit <- vglm(acplane ~ 1, fam = expexpff(ishape = 0.8, irate = 0.15), trace = TRUE, maxit = 99, data = eedata) coef(fit, matrix = TRUE) Coef(fit) # Authors get c(rate=0.0145, shape=0.8130) logLik(fit) # Authors get log-lik -152.264 } \keyword{models} \keyword{regression} VGAM/man/tobit.Rd0000644000176200001440000002562113565414527013211 0ustar liggesusers\name{tobit} \alias{tobit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Tobit Model } \description{ Fits a Tobit model. } \usage{ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loglink", imu = NULL, isd = NULL, type.fitted = c("uncensored", "censored", "mean.obs"), byrow.arg = FALSE, imethod = 1, zero = "sd") } % 20151024 yettodo: maybe add a new option to 'type.fitted': % type.fitted = c("uncensored", "censored", "mean.obs", "truncated"), % where "truncated" is only concerned with values of y > Lower; % values of y <= Lower are ignored. % % % % % %- maybe also 'usage' for other objects documented here. \arguments{ \item{Lower}{ Numeric. It is the value \eqn{L} described below. Any value of the linear model \eqn{x_i^T \beta}{x_i^T beta} that is less than this lowerbound is assigned this value. Hence this should be the smallest possible value in the response variable. May be a vector (see below for more information). } \item{Upper}{ Numeric. It is the value \eqn{U} described below. Any value of the linear model \eqn{x_i^T \beta}{x_i^T beta} that is greater than this upperbound is assigned this value. Hence this should be the largest possible value in the response variable. May be a vector (see below for more information). } \item{lmu, lsd}{ Parameter link functions for the mean and standard deviation parameters. See \code{\link{Links}} for more choices. The standard deviation is a positive quantity, therefore a log link is its default. } \item{imu, isd, byrow.arg}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted}{ Type of fitted value returned. The first choice is default and is the ordinary uncensored or unbounded linear model. If \code{"censored"} then the fitted values in the interval \eqn{[L, U]}. If \code{"mean.obs"} then the mean of the observations is returned; this is a doubly truncated normal distribution augmented by point masses at the truncation points (see \code{\link{dtobit}}). See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Initialization method. Either 1 or 2 or 3, this specifies some methods for obtaining initial values for the parameters. See \code{\link{CommonVGAMffArguments}} for information. } \item{zero}{ A vector, e.g., containing the value 1 or 2. If so, the mean or standard deviation respectively are modelled as an intercept-only. Setting \code{zero = NULL} means both linear/additive predictors are modelled as functions of the explanatory variables. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The Tobit model can be written \deqn{y_i^* = x_i^T \beta + \varepsilon_i}{% y_i^* = x_i^T beta + e_i} where the \eqn{e_i \sim N(0,\sigma^2)}{e_i ~ N(0,sigma^2)} independently and \eqn{i=1,\ldots,n}{i=1,...,n}. However, we measure \eqn{y_i = y_i^*} only if \eqn{y_i^* > L} and \eqn{y_i^* < U} for some cutpoints \eqn{L} and \eqn{U}. Otherwise we let \eqn{y_i=L} or \eqn{y_i=U}, whatever is closer. The Tobit model is thus a multiple linear regression but with censored responses if it is below or above certain cutpoints. The defaults for \code{Lower} and \code{Upper} and \code{lmu} correspond to the \emph{standard} Tobit model. Fisher scoring is used for the standard and nonstandard models. By default, the mean \eqn{x_i^T \beta}{x_i^T beta} is the first linear/additive predictor, and the log of the standard deviation is the second linear/additive predictor. The Fisher information matrix for uncensored data is diagonal. The fitted values are the estimates of \eqn{x_i^T \beta}{x_i^T beta}. } \section{Warning }{ If values of the response and \code{Lower} and/or \code{Upper} are not integers then there is the danger that the value is wrongly interpreted as uncensored. For example, if the first 10 values of the response were \code{runif(10)} and \code{Lower} was assigned these value then testing \code{y[1:10] == Lower[1:10]} is numerically fraught. Currently, if any \code{y < Lower} or \code{y > Upper} then a warning is issued. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Tobin, J. (1958) Estimation of relationships for limited dependent variables. \emph{Econometrica} \bold{26}, 24--36. } \author{ Thomas W. Yee } \note{ The response can be a matrix. If so, then \code{Lower} and \code{Upper} are recycled into a matrix with the number of columns equal to the number of responses, and the recycling is done row-wise \emph{if} \code{byrow.arg = TRUE}. The default order is as \code{\link[base]{matrix}}, which is \code{byrow.arg = FALSE}. For example, these are returned in \code{fit4@misc$Lower} and \code{fit4@misc$Upper} below. If there is no censoring then \code{\link{uninormal}} is recommended instead. Any value of the response less than \code{Lower} or greater than \code{Upper} will be assigned the value \code{Lower} and \code{Upper} respectively, and a warning will be issued. The fitted object has components \code{censoredL} and \code{censoredU} in the \code{extra} slot which specifies whether observations are censored in that direction. The function \code{\link{cens.normal}} is an alternative to \code{tobit()}. % 20150417; McClelland Kemp bug: When obtaining initial values, if the algorithm would otherwise want to fit an underdetermined system of equations, then it uses the entire data set instead. This might result in rather poor quality initial values, and consequently, monitoring convergence is advised. } \seealso{ \code{\link{rtobit}}, \code{\link{cens.normal}}, \code{\link{uninormal}}, \code{\link{double.cens.normal}}, \code{\link{posnormal}}, \code{\link{CommonVGAMffArguments}}, \code{\link{mills.ratio}}, \code{\link{margeff}}, \code{\link[stats:Normal]{rnorm}}. } \examples{ # Here, fit1 is a standard Tobit model and fit2 is a nonstandard Tobit model tdata <- data.frame(x2 = seq(-1, 1, length = (nn <- 100))) set.seed(1) Lower <- 1; Upper <- 4 # For the nonstandard Tobit model tdata <- transform(tdata, Lower.vec = rnorm(nn, Lower, 0.5), Upper.vec = rnorm(nn, Upper, 0.5)) meanfun1 <- function(x) 0 + 2*x meanfun2 <- function(x) 2 + 2*x meanfun3 <- function(x) 2 + 2*x meanfun4 <- function(x) 3 + 2*x tdata <- transform(tdata, y1 = rtobit(nn, mean = meanfun1(x2)), # Standard Tobit model y2 = rtobit(nn, mean = meanfun2(x2), Lower = Lower, Upper = Upper), y3 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec), y4 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec)) with(tdata, table(y1 == 0)) # How many censored values? with(tdata, table(y2 == Lower | y2 == Upper)) # How many censored values? with(tdata, table(attr(y2, "cenL"))) with(tdata, table(attr(y2, "cenU"))) fit1 <- vglm(y1 ~ x2, tobit, data = tdata, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) fit2 <- vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"), data = tdata, trace = TRUE) table(fit2@extra$censoredL) table(fit2@extra$censoredU) coef(fit2, matrix = TRUE) fit3 <- vglm(y3 ~ x2, tobit(Lower = with(tdata, Lower.vec), Upper = with(tdata, Upper.vec), type.f = "cens"), data = tdata, trace = TRUE) table(fit3@extra$censoredL) table(fit3@extra$censoredU) coef(fit3, matrix = TRUE) # fit4 is fit3 but with type.fitted = "uncen". fit4 <- vglm(cbind(y3, y4) ~ x2, tobit(Lower = rep(with(tdata, Lower.vec), each = 2), Upper = rep(with(tdata, Upper.vec), each = 2), byrow.arg = TRUE), data = tdata, crit = "coeff", trace = TRUE) head(fit4@extra$censoredL) # A matrix head(fit4@extra$censoredU) # A matrix head(fit4@misc$Lower) # A matrix head(fit4@misc$Upper) # A matrix coef(fit4, matrix = TRUE) \dontrun{ # Plot fit1--fit4 par(mfrow = c(2, 2)) plot(y1 ~ x2, tdata, las = 1, main = "Standard Tobit model", col = as.numeric(attr(y1, "cenL")) + 3, pch = as.numeric(attr(y1, "cenL")) + 1) legend(x = "topleft", leg = c("censored", "uncensored"), pch = c(2, 1), col = c("blue", "green")) legend(-1.0, 2.5, c("Truth", "Estimate", "Naive"), col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2)) lines(meanfun1(x2) ~ x2, tdata, col = "purple", lwd = 2) lines(fitted(fit1) ~ x2, tdata, col = "orange", lwd = 2, lty = 2) lines(fitted(lm(y1 ~ x2, tdata)) ~ x2, tdata, col = "black", lty = 2, lwd = 2) # This is simplest but wrong! plot(y2 ~ x2, data = tdata, las = 1, main = "Tobit model", col = as.numeric(attr(y2, "cenL")) + 3 + as.numeric(attr(y2, "cenU")), pch = as.numeric(attr(y2, "cenL")) + 1 + as.numeric(attr(y2, "cenU"))) legend(x = "topleft", leg = c("censored", "uncensored"), pch = c(2, 1), col = c("blue", "green")) legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"), col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2)) lines(meanfun2(x2) ~ x2, tdata, col = "purple", lwd = 2) lines(fitted(fit2) ~ x2, tdata, col = "orange", lwd = 2, lty = 2) lines(fitted(lm(y2 ~ x2, tdata)) ~ x2, tdata, col = "black", lty = 2, lwd = 2) # This is simplest but wrong! plot(y3 ~ x2, data = tdata, las = 1, main = "Tobit model with nonconstant censor levels", col = as.numeric(attr(y3, "cenL")) + 2 + as.numeric(attr(y3, "cenU") * 2), pch = as.numeric(attr(y3, "cenL")) + 1 + as.numeric(attr(y3, "cenU") * 2)) legend(x = "topleft", leg = c("censoredL", "censoredU", "uncensored"), pch = c(2, 3, 1), col = c(3, 4, 2)) legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"), col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2)) lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2) lines(fitted(fit3) ~ x2, tdata, col = "orange", lwd = 2, lty = 2) lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black", lty = 2, lwd = 2) # This is simplest but wrong! plot(y3 ~ x2, data = tdata, las = 1, main = "Tobit model with nonconstant censor levels", col = as.numeric(attr(y3, "cenL")) + 2 + as.numeric(attr(y3, "cenU") * 2), pch = as.numeric(attr(y3, "cenL")) + 1 + as.numeric(attr(y3, "cenU") * 2)) legend(x = "topleft", leg = c("censoredL", "censoredU", "uncensored"), pch = c(2, 3, 1), col = c(3, 4, 2)) legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"), col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2)) lines(meanfun3(x2) ~ x2, data = tdata, col = "purple", lwd = 2) lines(fitted(fit4)[, 1] ~ x2, tdata, col = "orange", lwd = 2, lty = 2) lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, data = tdata, col = "black", lty = 2, lwd = 2) # This is simplest but wrong! } } \keyword{models} \keyword{regression} VGAM/man/gpdUC.Rd0000644000176200001440000000754413565414527013076 0ustar liggesusers\name{gpdUC} \alias{gpdUC} \alias{dgpd} \alias{pgpd} \alias{qgpd} \alias{rgpd} \title{The Generalized Pareto Distribution } \description{ Density, distribution function, quantile function and random generation for the generalized Pareto distribution (GPD) with location parameter \code{location}, scale parameter \code{scale} and shape parameter \code{shape}. } \usage{ dgpd(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 = sqrt(.Machine$double.eps)) pgpd(q, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE) qgpd(p, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE) rgpd(n, location = 0, scale = 1, shape = 0) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required.} \item{location}{the location parameter \eqn{\mu}{mu}.} \item{scale}{the (positive) scale parameter \eqn{\sigma}{sigma}.} \item{shape}{the shape parameter \eqn{\xi}{xi}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Uniform]{punif}} or \code{\link[stats:Uniform]{qunif}}. } \item{tolshape0}{ Positive numeric. Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero. If the absolute value of the estimate of \eqn{\xi}{xi} is less than this value then it will be assumed zero and an exponential distribution will be used. } % \item{oobounds.log, giveWarning}{ % Numeric and logical. % The GPD distribution has support in the region satisfying % \code{(x-location)/scale > 0} % and % \code{1+shape*(x-location)/scale > 0}. % Outside that region, the % logarithm of the density is assigned \code{oobounds.log}, which % equates to a zero density. % It should not be assigned a positive number, and ideally is very negative. % Since \code{\link{gpd}} uses this function it is necessary % to return a finite value outside this region so as to allow % for half-stepping. Both arguments are in support of this. % This argument and others match those of \code{\link{gpd}}. % } } \value{ \code{dgpd} gives the density, \code{pgpd} gives the distribution function, \code{qgpd} gives the quantile function, and \code{rgpd} generates random deviates. } \references{ Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{gpd}}, the \pkg{VGAM} family function for estimating the two parameters by maximum likelihood estimation, for formulae and other details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } \note{ The default values of all three parameters, especially \eqn{\xi = 0}{xi = 0}, means the default distribution is the exponential. Currently, these functions have different argument names compared with those in the \pkg{evd} package. } \seealso{ \code{\link{gpd}}, \code{\link[stats]{Exponential}}. } \examples{ \dontrun{ loc <- 2; sigma <- 1; xi <- -0.4 x <- seq(loc - 0.2, loc + 3, by = 0.01) plot(x, dgpd(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0, 1), main = "Blue is density, red is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1) abline(h = 0, col = "blue", lty = 2) lines(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), dgpd(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi), col = "purple", lty = 3, type = "h") lines(x, pgpd(x, loc, sigma, xi), type = "l", col = "red") abline(h = 0, lty = 2) pgpd(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi) } } \keyword{distribution} % oobounds.log = -Inf, giveWarning = FALSE VGAM/man/makehamUC.Rd0000644000176200001440000000534013565414527013717 0ustar liggesusers\name{Makeham} \alias{Makeham} \alias{dmakeham} \alias{pmakeham} \alias{qmakeham} \alias{rmakeham} \title{The Makeham Distribution} \description{ Density, cumulative distribution function, quantile function and random generation for the Makeham distribution. } \usage{ dmakeham(x, scale = 1, shape, epsilon = 0, log = FALSE) pmakeham(q, scale = 1, shape, epsilon = 0, lower.tail = TRUE, log.p = FALSE) qmakeham(p, scale = 1, shape, epsilon = 0, lower.tail = TRUE, log.p = FALSE) rmakeham(n, scale = 1, shape, epsilon = 0) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{scale, shape}{positive scale and shape parameters. } \item{epsilon}{another parameter. Must be non-negative. See below. } } \value{ \code{dmakeham} gives the density, \code{pmakeham} gives the cumulative distribution function, \code{qmakeham} gives the quantile function, and \code{rmakeham} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{makeham}} for details. The default value of \code{epsilon = 0} corresponds to the Gompertz distribution. The function \code{\link{pmakeham}} uses \code{\link{lambertW}}. } \references{ Jodra, P. (2009) A closed-form expression for the quantile function of the Gompertz-Makeham distribution. \emph{Mathematics and Computers in Simulation}, \bold{79}, 3069--3075. } %\note{ % %} \seealso{ \code{\link{makeham}}, \code{\link{lambertW}}. } \examples{ probs <- seq(0.01, 0.99, by = 0.01) Shape <- exp(-1); Scale <- exp(1); eps = Epsilon <- exp(-1) max(abs(pmakeham(qmakeham(p = probs, sca = Scale, Shape, eps = Epsilon), sca = Scale, Shape, eps = Epsilon) - probs)) # Should be 0 \dontrun{ x <- seq(-0.1, 2.0, by = 0.01); plot(x, dmakeham(x, sca = Scale, Shape, eps = Epsilon), type = "l", main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue", las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) lines(x, pmakeham(x, sca = Scale, Shape, eps = Epsilon), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qmakeham(probs, sca = Scale, Shape, eps = Epsilon) lines(Q, dmakeham(Q, sca = Scale, Shape, eps = Epsilon), col = "purple", lty = 3, type = "h") pmakeham(Q, sca = Scale, Shape, eps = Epsilon) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) } } \keyword{distribution} VGAM/man/crashes.Rd0000644000176200001440000000740513565414527013520 0ustar liggesusers\name{crashes} \alias{crashi} \alias{crashf} \alias{crashtr} \alias{crashmc} \alias{crashbc} \alias{crashp} \alias{alcoff} \alias{alclevels} \docType{data} \title{Crashes on New Zealand Roads in 2009} \description{ A variety of reported crash data cross-classified by time (hour of the day) and day of the week, accumulated over 2009. These include fatalities and injuries (by car), trucks, motor cycles, bicycles and pedestrians. There are some alcohol-related data too. } \usage{ data(crashi) data(crashf) data(crashtr) data(crashmc) data(crashbc) data(crashp) data(alcoff) data(alclevels) } \format{ Data frames with hourly times as rows and days of the week as columns. The \code{alclevels} dataset has hourly times and alcohol levels. \describe{ \item{Mon, Tue, Wed, Thu, Fri, Sat, Sun}{ Day of the week. } \item{0-30, 31-50, 51-80, 81-100, 101-120, 121-150, 151-200, 201-250, 251-300, 301-350, 350+}{ Blood alcohol level (milligrams alcohol per 100 millilitres of blood). % Aggregate number of alcohol offenders or number of dead % drivers/passengers on NZ roads. } } } \details{ Each cell is the aggregate number of crashes reported at each hour-day combination, over the 2009 calendar year. The \code{rownames} of each data frame is the start time (hourly from midnight onwards) on a 24 hour clock, e.g., 21 means 9.00pm to 9.59pm. For crashes, \code{chrashi} are the number of injuries by car, \code{crashf} are the number of fatalities by car (not included in \code{chrashi}), \code{crashtr} are the number of crashes involving trucks, \code{crashmc} are the number of crashes involving motorcyclists, \code{crashbc} are the number of crashes involving bicycles, and \code{crashp} are the number of crashes involving pedestrians. For alcohol-related offences, \code{alcoff} are the number of alcohol offenders from breath screening drivers, and \code{alclevels} are the blood alcohol levels of fatally injured drivers. } \source{ \code{http://www.transport.govt.nz/research/Pages/Motor-Vehicle-Crashes-in-New-Zealand-2009.aspx}. Thanks to Warwick Goold and Alfian F. Hadi for assistance. % \url{http://www.transport.govt.nz/research/Pages/Motor-Vehicle-Crashes-in-New-Zealand-2009.aspx}. } \references{ Motor Vehicles Crashes in New Zealand 2009; Statistical Statement Calendar Year 2009. Ministry of Transport, NZ Government; Yearly Report 2010. ISSN: 1176-3949 } \seealso{ \code{\link[VGAM]{rrvglm}}, \code{\link[VGAM]{rcim}}, \code{\link[VGAM]{grc}}. } \examples{ \dontrun{ plot(unlist(alcoff), type = "l", frame.plot = TRUE, axes = FALSE, col = "blue", bty = "o", main = "Alcoholic offenders on NZ roads, aggregated over 2009", sub = "Vertical lines at midnight (purple) and noon (orange)", xlab = "Day/hour", ylab = "Number of offenders") axis(1, at = 1 + (0:6) * 24 + 12, labels = colnames(alcoff)) axis(2, las = 1) axis(3:4, labels = FALSE, tick = FALSE) abline(v = sort(1 + c((0:7) * 24, (0:6) * 24 + 12)), lty = "dashed", col = c("purple", "orange")) } # Goodmans RC models \dontrun{ fitgrc1 <- grc(alcoff) # Rank-1 model fitgrc2 <- grc(alcoff, Rank = 2, Corner = FALSE, Uncor = TRUE) Coef(fitgrc2) } \dontrun{ biplot(fitgrc2, scaleA = 2.3, Ccol = "blue", Acol = "orange", Clabels = as.character(1:23), xlim = c(-1.3, 2.3), ylim = c(-1.2, 1)) } } \keyword{datasets} % % %\alias{crashi} Table 18, p.39 %\alias{crashf} Table 19, p.40 %\alias{crashtr} Table 30, p.66 %\alias{crashmc} Table 35, p.72 %\alias{crashbc} Table 40, p.77 %\alias{crashp} Table 45, p.84 %\alias{alcoff} Table 3, p.121 %\alias{alclevels} Table 2, p.132 % print(Coef(fitgrc2), digits = 2) VGAM/man/posnegbinomial.Rd0000644000176200001440000002053213565414527015072 0ustar liggesusers\name{posnegbinomial} \alias{posnegbinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Negative Binomial Distribution Family Function } \description{ Maximum likelihood estimation of the two parameters of a positive negative binomial distribution. } \usage{ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"), mds.min = 0.001, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-07, max.support = 4000, max.chunk.MB = 30, lmunb = "loglink", lsize = "loglink", imethod = 1, imunb = NULL, iprobs.y = NULL, gprobs.y = ppoints(8), isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmunb}{ Link function applied to the \code{munb} parameter, which is the mean \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution. See \code{\link{Links}} for more choices. } \item{lsize}{ Parameter link function applied to the dispersion parameter, called \code{k}. See \code{\link{Links}} for more choices. } \item{isize}{ Optional initial value for \code{k}, an index parameter. The value \code{1/k} is known as a dispersion parameter. If failure to converge occurs try different values (and/or use \code{imethod}). If necessary this vector is recycled to length equal to the number of responses. A value \code{NULL} means an initial value for each response is computed internally using a range of values. } \item{nsimEIM, zero, eps.trig}{ See \code{\link{CommonVGAMffArguments}}. } \item{mds.min, iprobs.y, cutoff.prob}{ Similar to \code{\link{negbinomial}}. } \item{imunb, max.support}{ Similar to \code{\link{negbinomial}}. } \item{max.chunk.MB, gsize.mux}{ Similar to \code{\link{negbinomial}}. } \item{imethod, gprobs.y}{ See \code{\link{negbinomial}}. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for details. } } \details{ The positive negative binomial distribution is an ordinary negative binomial distribution but with the probability of a zero response being zero. The other probabilities are scaled to sum to unity. This family function is based on \code{\link{negbinomial}} and most details can be found there. To avoid confusion, the parameter \code{munb} here corresponds to the mean of an ordinary negative binomial distribution \code{\link{negbinomial}}. The mean of \code{posnegbinomial} is \deqn{\mu_{nb} / (1-p(0))}{% munb / (1-p(0))} where \eqn{p(0) = (k/(k + \mu_{nb}))^k}{p(0) = (k/(k + munb))^k} is the probability an ordinary negative binomial distribution has a zero value. The parameters \code{munb} and \code{k} are not independent in the positive negative binomial distribution, whereas they are in the ordinary negative binomial distribution. This function handles \emph{multiple} responses, so that a matrix can be used as the response. The number of columns is the number of species, say, and setting \code{zero = -2} means that \emph{all} species have a \code{k} equalling a (different) intercept only. } \section{Warning}{ This family function is fragile; at least two cases will lead to numerical problems. Firstly, the positive-Poisson model corresponds to \code{k} equalling infinity. If the data is positive-Poisson or close to positive-Poisson, then the estimated \code{k} will diverge to \code{Inf} or some very large value. Secondly, if the data is clustered about the value 1 because the \code{munb} parameter is close to 0 then numerical problems will also occur. Users should set \code{trace = TRUE} to monitor convergence. In the situation when both cases hold, the result returned (which will be untrustworthy) will depend on the initial values. The negative binomial distribution (NBD) is a strictly unimodal distribution. Any data set that does not exhibit a mode (in the middle) makes the estimation problem difficult. The positive NBD inherits this feature. Set \code{trace = TRUE} to monitor convergence. See the example below of a data set where \code{posbinomial()} fails; the so-called solution is \emph{extremely} poor. This is partly due to a lack of a unimodal shape because the number of counts decreases only. This long tail makes it very difficult to estimate the mean parameter with any certainty. The result too is that the \code{size} parameter is numerically fraught. % Then trying a \code{\link{loglog}} link might help % handle this problem. This \pkg{VGAM} family function inherits the same warnings as \code{\link{negbinomial}}. And if \code{k} is much less than 1 then the estimation may be slow. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Barry, S. C. and Welsh, A. H. (2002) Generalized additive modelling and zero inflated count data. \emph{Ecological Modelling}, \bold{157}, 179--188. Williamson, E. and Bretherton, M. H. (1964) Tables of the logarithmic series distribution. \emph{Annals of Mathematical Statistics}, \bold{35}, 284--297. } \author{ Thomas W. Yee } \note{ If the estimated \eqn{k} is very large then fitting a \code{\link{pospoisson}} model is a good idea. If both \code{munb} and \eqn{k} are large then it may be necessary to decrease \code{eps.trig} and increase \code{max.support} so that the EIMs are positive-definite, e.g., \code{eps.trig = 1e-8} and \code{max.support = Inf}. } \seealso{ \code{\link{gatnbinomial.mlm}}, \code{\link{Gaitnbinom.mlm}}, \code{\link{rposnegbin}}, \code{\link{pospoisson}}, \code{\link{negbinomial}}, \code{\link{zanegbinomial}}, \code{\link[stats:NegBinomial]{rnbinom}}, \code{\link{CommonVGAMffArguments}}, \code{\link{corbet}}, \code{\link{logff}}, \code{\link{simulate.vlm}}. % \code{\link[MASS]{rnegbin}}. } \examples{ pdata <- data.frame(x2 = runif(nn <- 1000)) pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x2), size = exp(1)), y2 = rposnegbin(nn, munb = exp(1+2*x2), size = exp(3))) fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, data = pdata, trace = TRUE) coef(fit, matrix = TRUE) dim(depvar(fit)) # Using dim(fit@y) is not recommended # Another artificial data example pdata2 <- data.frame(munb = exp(2), size = exp(3)); nn <- 1000 pdata2 <- transform(pdata2, y3 = rposnegbin(nn, munb = munb, size = size)) with(pdata2, table(y3)) fit <- vglm(y3 ~ 1, posnegbinomial, data = pdata2, trace = TRUE) coef(fit, matrix = TRUE) with(pdata2, mean(y3)) # Sample mean head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1) # Population mean head(fitted(fit), 3) head(predict(fit), 3) # Example: Corbet (1943) butterfly Malaya data fit <- vglm(ofreq ~ 1, posnegbinomial, weights = species, data = corbet) coef(fit, matrix = TRUE) Coef(fit) (khat <- Coef(fit)["size"]) pdf2 <- dposnegbin(x = with(corbet, ofreq), mu = fitted(fit), size = khat) print(with(corbet, cbind(ofreq, species, fitted = pdf2*sum(species))), dig = 1) \dontrun{ with(corbet, matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1, xlab = "Observed frequency (of individual butterflies)", type = "b", ylab = "Number of species", col = c("blue", "orange"), main = "blue 1s = observe; orange 2s = fitted")) } \dontrun{ # This data (courtesy of Maxim Gerashchenko) causes posbinomial() to fail pnbd.fail <- data.frame( y1 = c(1:16, 18:21, 23:28, 33:38, 42, 44, 49:51, 55, 56, 58, 59, 61:63, 66, 73, 76, 94, 107, 112, 124, 190, 191, 244), ofreq = c(130, 80, 38, 23, 22, 11, 21, 14, 6, 7, 9, 9, 9, 4, 4, 5, 1, 4, 6, 1, 3, 2, 4, 3, 4, 5, 3, 1, 2, 1, 1, 4, 1, 2, 2, 1, 3, 1, 1, 2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1)) fit.fail <- vglm(y1 ~ 1, weights = ofreq, posnegbinomial, trace = TRUE, data = pnbd.fail) } } \keyword{models} \keyword{regression} % bigN = with(corbet, sum(species)) %posnegbinomial(lmunb = "loglink", lsize = "loglink", imunb = NULL, % isize = NULL, zero = "size", nsimEIM = 250, % probs.y = 0.75, cutoff.prob = 0.999, % max.support = 2000, max.chunk.MB = 30, % gsize = exp((-4):4), ishrinkage = 0.95, imethod = 1) VGAM/man/binormalUC.Rd0000644000176200001440000000743713565414527014130 0ustar liggesusers\name{Binorm} \alias{Binorm} \alias{pnorm2} \alias{dbinorm} \alias{pbinorm} \alias{rbinorm} \title{Bivariate Normal Distribution Cumulative Distribution Function} \description{ Density, cumulative distribution function and random generation for the bivariate normal distribution distribution. } % quantile function \usage{ dbinorm(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0, log = FALSE) pbinorm(q1, q2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0) rbinorm(n, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0) pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0) } % dbinorm(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1, rho = 0, log = FALSE) \arguments{ \item{x1, x2, q1, q2}{vector of quantiles.} \item{mean1, mean2, var1, var2, cov12}{ vector of means, variances and the covariance. % standard deviations and correlation parameter. } % \item{sd1, sd2, rho}{ % vector of standard deviations and correlation parameter. % } \item{n}{number of observations. Same as \code{\link[stats]{rnorm}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } % \item{rho}{ % See \code{\link{binormal}}. % } } \value{ \code{dbinorm} gives the density, \code{pbinorm} gives the cumulative distribution function, \code{rbinorm} generates random deviates (\eqn{n} by 2 matrix). % \code{qnorm2} gives the quantile function, and } % \author{ T. W. Yee } \details{ The default arguments correspond to the standard bivariate normal distribution with correlation parameter \eqn{\rho = 0}{rho = 0}. That is, two independent standard normal distributions. Let \code{sd1} (say) be \code{sqrt(var1)} and written \eqn{\sigma_1}{sigma_1}, etc. Then the general formula for the correlation coefficient is \eqn{\rho = cov / (\sigma_1 \sigma_2)}{rho = cov / (sigma_1 * sigma_2)} where \eqn{cov} is argument \code{cov12}. Thus if arguments \code{var1} and \code{var2} are left alone then \code{cov12} can be inputted with \eqn{\rho}{rho}. One can think of this function as an extension of \code{\link[stats]{pnorm}} to two dimensions, however note that the argument names have been changed for \pkg{VGAM} 0.9-1 onwards. } \references{ \code{pbinorm()} is based on Donnelly (1973), the code was translated from FORTRAN to ratfor using struct, and then from ratfor to C manually. The function was originally called \code{bivnor}, and TWY only wrote a wrapper function. Donnelly, T. G. (1973) Algorithm 462: Bivariate Normal Distribution. \emph{Communications of the ACM}, \bold{16}, 638. % It gives the probability that a bivariate normal exceeds (ah, ak). % Here, gh and gk are 0.5 times the right tail areas of ah, ak under a N(0, 1) % distribution. } \section{Warning}{ Being based on an approximation, the results of \code{pbinorm()} may be negative! Also, \code{pnorm2()} should be withdrawn soon; use \code{pbinorm()} instead because it is identical. % this function used to be called \code{pnorm2()}. % \code{dbinorm()}'s arguments might change! % Currently they differ from \code{pbinorm()} % and \code{rbinorm()}, so use the full argument name % to future-proof possible changes! } \note{ For \code{rbinorm()}, if the \eqn{i}th variance-covariance matrix is not positive-definite then the \eqn{i}th row is all \code{NA}s. } \seealso{ \code{\link[stats]{pnorm}}, \code{\link{binormal}}, \code{\link{uninormal}}. } \examples{ yvec <- c(-5, -1.96, 0, 1.96, 5) ymat <- expand.grid(yvec, yvec) cbind(ymat, pbinorm(ymat[, 1], ymat[, 2])) \dontrun{ rhovec <- seq(-0.95, 0.95, by = 0.01) plot(rhovec, pbinorm(0, 0, cov12 = rhovec), type = "l", col = "blue", las = 1) abline(v = 0, h = 0.25, col = "gray", lty = "dashed") } } \keyword{distribution} VGAM/man/lqnorm.Rd0000644000176200001440000001004013565414527013365 0ustar liggesusers\name{lqnorm} %\alias{lqnorm} \alias{lqnorm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Minimizing the L-q norm Family Function } \description{ Minimizes the L-q norm of residuals in a linear model. } \usage{ lqnorm(qpower = 2, link = "identitylink", imethod = 1, imu = NULL, ishrinkage = 0.95) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{qpower}{ A single numeric, must be greater than one, called \eqn{q} below. The absolute value of residuals are raised to the power of this argument, and then summed. This quantity is minimized with respect to the regression coefficients. } \item{link}{ Link function applied to the `mean' \eqn{\mu}{mu}. See \code{\link{Links}} for more details. } \item{imethod}{ Must be 1, 2 or 3. See \code{\link{CommonVGAMffArguments}} for more information. Ignored if \code{imu} is specified. } \item{imu}{ Numeric, optional initial values used for the fitted values. The default is to use \code{imethod = 1}. } \item{ishrinkage}{ How much shrinkage is used when initializing the fitted values. The value must be between 0 and 1 inclusive, and a value of 0 means the individual response values are used, and a value of 1 means the median or mean is used. This argument is used in conjunction with \code{imethod = 3}. } } \details{ This function minimizes the objective function \deqn{ \sum_{i=1}^n \; w_i (|y_i - \mu_i|)^q }{% sum_{i=1}^n w_i (|y_i - mu_i|)^q } where \eqn{q} is the argument \code{qpower}, \eqn{\eta_i = g(\mu_i)}{eta_i = g(mu_i)} where \eqn{g} is the link function, and \eqn{\eta_i}{eta_i} is the vector of linear/additive predictors. The prior weights \eqn{w_i} can be inputted using the \code{weights} argument of \code{vlm}/\code{\link{vglm}}/\code{\link{vgam}} etc.; it should be just a vector here since this function handles only a single vector or one-column response. Numerical problem will occur when \eqn{q} is too close to one. Probably reasonable values range from 1.5 and up, say. The value \eqn{q=2} corresponds to ordinary least squares while \eqn{q=1} corresponds to the MLE of a double exponential (Laplace) distibution. The procedure becomes more sensitive to outliers the larger the value of \eqn{q}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. } \author{ Thomas W. Yee } \note{ This \pkg{VGAM} family function is an initial attempt to provide a more robust alternative for regression and/or offer a little more flexibility than least squares. The \code{@misc} slot of the fitted object contains a list component called \code{objectiveFunction} which is the value of the objective function at the final iteration. } \section{Warning }{ Convergence failure is common, therefore the user is advised to be cautious and monitor convergence! } \seealso{ \code{\link{uninormal}}. % \code{\link{gaussianff}}. } \examples{ set.seed(123) ldata <- data.frame(x = sort(runif(nn <- 10 ))) realfun <- function(x) 4 + 5*x ldata <- transform(ldata, y = realfun(x) + rnorm(nn, sd = exp(-1))) # Make the first observation an outlier ldata <- transform(ldata, y = c(4*y[1], y[-1]), x = c(-1, x[-1])) fit <- vglm(y ~ x, lqnorm(qpower = 1.2), data = ldata) coef(fit, matrix = TRUE) head(fitted(fit)) fit@misc$qpower fit@misc$objectiveFunction \dontrun{ # Graphical check with(ldata, plot(x, y, main = paste("LS = red, lqnorm = blue (qpower = ", fit@misc$qpower, "), truth = black", sep = ""), col = "blue")) lmfit <- lm(y ~ x, data = ldata) with(ldata, lines(x, fitted(fit), col = "blue")) with(ldata, lines(x, lmfit$fitted, col = "red")) with(ldata, lines(x, realfun(x), col = "black")) } } \keyword{models} \keyword{regression} VGAM/man/mills.ratio.Rd0000644000176200001440000000260213565414527014317 0ustar liggesusers\name{mills.ratio} \alias{mills.ratio} \alias{mills.ratio2} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Mills Ratio } \description{ Computes the Mills ratio. } \usage{ mills.ratio(x) mills.ratio2(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Numeric (real). } } \details{ The Mills ratio here is \code{dnorm(x) / pnorm(x)} (some use \code{(1 - pnorm(x)) / dnorm(x)}). Some care is needed as \code{x} approaches \code{-Inf}; when \eqn{x} is very negative then its value approaches \eqn{-x}. } %\section{Warning}{ % This function has not been fully tested. %} \value{ \code{mills.ratio} returns the Mills ratio, and \code{mills.ratio2} returns \code{dnorm(x) * dnorm(x) / pnorm(x)}. } \references{ Mills, J. P. (1926) Table of the ratio: area to bounding ordinate, for any portion of normal curve. \emph{Biometrika}. \bold{18}(3/4), 395--400. } \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link[stats:Normal]{Normal}}, \code{\link{tobit}}, \code{\link{cens.poisson}}. } \examples{ \dontrun{ curve(mills.ratio, -5, 5, col = "orange", las = 1) curve(mills.ratio, -5, 5, col = "orange", las = 1, log = "y") } } \keyword{math} % curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange") % curve(zeta, 1.2, 12, add = TRUE, col = "orange") % abline(v = 0, h = c(0,1), lty = "dashed") VGAM/man/Coef.qrrvglm-class.Rd0000644000176200001440000001072413565414527015536 0ustar liggesusers\name{Coef.qrrvglm-class} \docType{class} \alias{Coef.qrrvglm-class} \title{Class ``Coef.qrrvglm'' } \description{ The most pertinent matrices and other quantities pertaining to a QRR-VGLM (CQO model). } \section{Objects from the Class}{ Objects can be created by calls of the form \code{Coef(object, ...)} where \code{object} is an object of class \code{"qrrvglm"} (created by \code{\link{cqo}}). In this document, \eqn{R} is the \emph{rank}, \eqn{M} is the number of linear predictors and \eqn{n} is the number of observations. } \section{Slots}{ \describe{ \item{\code{A}:}{Of class \code{"matrix"}, \bold{A}, which are the linear `coefficients' of the matrix of latent variables. It is \eqn{M} by \eqn{R}. } \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}. These correspond to terms of the argument \code{noRRR}. } \item{\code{C}:}{Of class \code{"matrix"}, \bold{C}, the canonical coefficients. It has \eqn{R} columns. } \item{\code{Constrained}:}{Logical. Whether the model is a constrained ordination model. } \item{\code{D}:}{Of class \code{"array"}, \code{D[,,j]} is an order-\code{Rank} matrix, for \code{j} = 1,\dots,\eqn{M}. Ideally, these are negative-definite in order to make the response curves/surfaces bell-shaped. } \item{\code{Rank}:}{The rank (dimension, number of latent variables) of the RR-VGLM. Called \eqn{R}. } \item{\code{latvar}:}{\eqn{n} by \eqn{R} matrix of latent variable values. } \item{\code{latvar.order}:}{Of class \code{"matrix"}, the permutation returned when the function \code{\link{order}} is applied to each column of \code{latvar}. This enables each column of \code{latvar} to be easily sorted. } \item{\code{Maximum}:}{Of class \code{"numeric"}, the \eqn{M} maximum fitted values. That is, the fitted values at the optimums for \code{noRRR = ~ 1} models. If \code{noRRR} is not \code{~ 1} then these will be \code{NA}s. } \item{\code{NOS}:}{Number of species.} \item{\code{Optimum}:}{Of class \code{"matrix"}, the values of the latent variables where the optimums are. If the curves are not bell-shaped, then the value will be \code{NA} or \code{NaN}.} \item{\code{Optimum.order}:}{Of class \code{"matrix"}, the permutation returned when the function \code{\link{order}} is applied to each column of \code{Optimum}. This enables each row of \code{Optimum} to be easily sorted. } % \item{\code{Diagonal}:}{Vector of logicals: are the % \code{D[,,j]} diagonal? } \item{\code{bellshaped}:}{Vector of logicals: is each response curve/surface bell-shaped? } \item{\code{dispersion}:}{Dispersion parameter(s). } \item{\code{Dzero}:}{Vector of logicals, is each of the response curves linear in the latent variable(s)? It will be if and only if \code{D[,,j]} equals \bold{O}, for \code{j} = 1,\dots,\eqn{M} . } \item{\code{Tolerance}:}{Object of class \code{"array"}, \code{Tolerance[,,j]} is an order-\code{Rank} matrix, for \code{j} = 1,\dots,\eqn{M}, being the matrix of tolerances (squared if on the diagonal). These are denoted by \bold{T} in Yee (2004). Ideally, these are positive-definite in order to make the response curves/surfaces bell-shaped. The tolerance matrices satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}. } } } %\section{Methods}{ %No methods defined with class "Coef.qrrvglm" in the signature. %} \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. } \author{ Thomas W. Yee } %\note{ ~~further notes~~ } % ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{Coef.qrrvglm}}, \code{\link{cqo}}, \code{print.Coef.qrrvglm}. % \code{qrrvglm-class}, } \examples{ x2 <- rnorm(n <- 100) x3 <- rnorm(n) x4 <- rnorm(n) latvar1 <- 0 + x3 - 2*x4 lambda1 <- exp(3 - 0.5 * ( latvar1-0)^2) lambda2 <- exp(2 - 0.5 * ( latvar1-1)^2) lambda3 <- exp(2 - 0.5 * ((latvar1+4)/2)^2) y1 <- rpois(n, lambda1) y2 <- rpois(n, lambda2) y3 <- rpois(n, lambda3) yy <- cbind(y1, y2, y3) # vvv p1 <- cqo(yy ~ x2 + x3 + x4, fam = poissonff, trace = FALSE) \dontrun{ lvplot(p1, y = TRUE, lcol = 1:3, pch = 1:3, pcol = 1:3) } # vvv print(Coef(p1), digits = 3) } \keyword{classes} VGAM/man/model.matrixvlm.Rd0000644000176200001440000001141113565414527015202 0ustar liggesusers\name{model.matrixvlm} \alias{model.matrixvlm} \title{Construct the Design Matrix of a VLM Object} \usage{ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"), linpred.index = NULL, \dots) } \arguments{ \item{object}{an object of a class that inherits from the \emph{vector linear model} (VLM). } \item{type}{Type of design matrix returned. The first is the default. The value \code{"vlm"} is the VLM model matrix corresponding to the \code{formula} argument. The value \code{"lm"} is the LM model matrix corresponding to the \code{formula} argument. The value \code{"lm2"} is the second (LM) model matrix corresponding to the \code{form2} argument. The value \code{"bothlmlm2"} means both LM and VLM model matrices. } \item{linpred.index}{ Vector of integers. The index for a linear/additive predictor, it must have values from the set \code{1:M}. Also, if \code{length(linpred.index) == 1} then \code{type = "lm"} must be assigned, whereas if \code{length(linpred.index) > 1} then \code{type = "vlm"} must be assigned, Then it returns a subset of the VLM matrix corresponding to the \code{linpred.index}th linear/additive predictor(s); this is a LM-type matrix when it is of unit length. Currently some attributes are returned, but these may change in value in the future because of ongoing development work. % Single integer: 20190625; this is no longer true. } \item{\dots}{further arguments passed to or from other methods. These include \code{data} (which is a data frame created with \code{\link{model.framevlm}}), \code{contrasts.arg}, and \code{xlev}. See \code{\link[stats]{model.matrix}} for more information. } } \description{ Creates a design matrix. Two types can be returned: a large one (class \code{"vlm"} or one that inherits from this such as \code{"vglm"}) or a small one (such as returned if it were of class \code{"lm"}). } \details{ This function creates a design matrix from \code{object}. This can be a small LM object or a big VLM object (default). The latter is constructed from the former and the constraint matrices. This code implements \emph{smart prediction} (see \code{\link{smartpred}}). } \value{ The design matrix for a regression model with the specified formula and data. If \code{type = "bothlmlm2"} then a list is returned with components \code{"X"} and \code{"Xm2"}. Sometimes (especially if \code{x = TRUE} when calling \code{\link{vglm}}) the model matrix has attributes: \code{"assign"} (\code{"lm"}-type) and \code{"vassign"} (\code{"vlm"}-type) and \code{"orig.assign.lm"} (\code{"lm"}-type). These are used internally a lot for bookkeeping, especially regarding the columns of both types of model matrices. In particular, constraint matrices and variable selection relies on this information a lot. The \code{"orig.assign.lm"} is the ordinary \code{"assign"} attribute for \code{\link[stats]{lm}} and \code{\link[stats]{glm}} objects. } \references{ %Yee, T. W. and Hastie, T. J. (2003) %Reduced-rank vector generalized linear models. %\emph{Statistical Modelling}, %\bold{3}, 15--41. Chambers, J. M. (1992) \emph{Data for models.} Chapter 3 of \emph{Statistical Models in S} eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. } \seealso{ \code{\link[stats]{model.matrix}}, \code{\link{model.framevlm}}, \code{\link{predictvglm}}, \code{\link{smartpred}}, \code{\link{constraints.vlm}}, \code{\link{add1.vglm}}, \code{\link{drop1.vglm}}, \code{\link{step4vglm}}. } \examples{ # (I) Illustrates smart prediction ,,,,,,,,,,,,,,,,,,,,,,, pneumo <- transform(pneumo, let = log(exposure.time)) fit <- vglm(cbind(normal, mild, severe) ~ sm.poly(c(sm.scale(let)), 2), multinomial, data = pneumo, trace = TRUE, x = FALSE) class(fit) fit@smart.prediction # Data-dependent parameters fit@x # Not saved on the object model.matrix(fit) model.matrix(fit, linpred.index = 1, type = "lm") model.matrix(fit, linpred.index = 2, type = "lm") (Check1 <- head(model.matrix(fit, type = "lm"))) (Check2 <- model.matrix(fit, data = head(pneumo), type = "lm")) all.equal(c(Check1), c(Check2)) # Should be TRUE q0 <- head(predict(fit)) q1 <- head(predict(fit, newdata = pneumo)) q2 <- predict(fit, newdata = head(pneumo)) all.equal(q0, q1) # Should be TRUE all.equal(q1, q2) # Should be TRUE # (II) Attributes ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, fit2 <- vglm(cbind(normal, mild, severe) ~ let, # x = TRUE multinomial, data = pneumo, trace = TRUE) fit2@x # "lm"-type; saved on the object; note the attributes model.matrix(fit2, type = "lm") # Note the attributes model.matrix(fit2, type = "vlm") # Note the attributes } \keyword{models} VGAM/man/expgeometric.Rd0000644000176200001440000000447213565414527014564 0ustar liggesusers\name{expgeometric} \alias{expgeometric} %- Also NEED an '\alias' for EACH other topic documented here. \title{Exponential Geometric Distribution Family Function} \description{ Estimates the two parameters of the exponential geometric distribution by maximum likelihood estimation. } \usage{ expgeometric(lscale = "loglink", lshape = "logitlink", iscale = NULL, ishape = NULL, tol12 = 1e-05, zero = 1, nsimEIM = 400) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape}{ Link function for the two parameters. See \code{\link{Links}} for more choices. } \item{iscale, ishape}{ Numeric. Optional initial values for the scale and shape parameters. } \item{tol12}{ Numeric. Tolerance for testing whether a parameter has value 1 or 2. } \item{zero, nsimEIM}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The exponential geometric distribution has density function \deqn{f(y; c = scale, s = shape) = (1/c) (1 - s) e^{-y/c} (1 - s e^{-y/c})^{-2}}{% (1/c) * (1 - s) * e^(-y/c) * (1 - s * e^(-y/c))^(-2)} where \eqn{y > 0}, \eqn{c > 0} and \eqn{s \in (0, 1)}{0 < s < 1}. The mean, \eqn{(c (s - 1)/ s) \log(1 - s)}{(c * (s - 1)/ s) * log(1 - s)} is returned as the fitted values. Note the median is \eqn{c \log(2 - s)}{c * log(2 - s)}. Simulated Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Adamidis, K., Loukas, S. (1998). A lifetime distribution with decreasing failure rate. \emph{Statistics and Probability Letters}, \bold{39}, 35--42. } \author{ J. G. Lauder and T. W. Yee } \note{ We define \code{scale} as the reciprocal of the scale parameter used by Adamidis and Loukas (1998). } \seealso{ \code{\link{dexpgeom}}, \code{\link{exponential}}, \code{\link{geometric}}. } \examples{ \dontrun{ Scale <- exp(2); shape = logitlink(-1, inverse = TRUE); edata <- data.frame(y = rexpgeom(n = 2000, scale = Scale, shape = shape)) fit <- vglm(y ~ 1, expgeometric, edata, trace = TRUE) c(with(edata, mean(y)), head(fitted(fit), 1)) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/foldnormUC.Rd0000644000176200001440000000526113565414527014136 0ustar liggesusers\name{Foldnorm} \alias{Foldnorm} \alias{dfoldnorm} \alias{pfoldnorm} \alias{qfoldnorm} \alias{rfoldnorm} \title{The Folded-Normal Distribution} \description{ Density, distribution function, quantile function and random generation for the (generalized) folded-normal distribution. } \usage{ dfoldnorm(x, mean = 0, sd = 1, a1 = 1, a2 = 1, log = FALSE) pfoldnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1, lower.tail = TRUE, log.p = FALSE) qfoldnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1, lower.tail = TRUE, log.p = FALSE, ...) rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats:Normal]{rnorm}}. } \item{mean, sd}{ see \code{\link[stats:Normal]{rnorm}}. } \item{a1, a2}{ see \code{\link{foldnormal}}. } \item{log}{ Logical. If \code{TRUE} then the log density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{\ldots}{ Arguments that can be passed into \code{\link[stats]{uniroot}}. } } \value{ \code{dfoldnorm} gives the density, \code{pfoldnorm} gives the distribution function, \code{qfoldnorm} gives the quantile function, and \code{rfoldnorm} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{foldnormal}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } \note{ \code{qfoldnorm()} runs very slowly because it calls \code{\link[stats]{uniroot}} for each value of the argument \code{p}. The solution is consequently not exact; the \code{...} can be used to obtain a more accurate solution if necessary. } \seealso{ \code{\link{foldnormal}}, \code{\link[stats]{uniroot}}. } \examples{ \dontrun{ m <- 1.5; SD <- exp(0) x <- seq(-1, 4, len = 501) plot(x, dfoldnorm(x, m = m, sd = SD), type = "l", ylim = 0:1, las = 1, ylab = paste("foldnorm(m = ", m, ", sd = ", round(SD, digits = 3), ")"), main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue") abline(h = 0, col = "gray50") lines(x, pfoldnorm(x, m = m, sd = SD), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qfoldnorm(probs, m = m, sd = SD) lines(Q, dfoldnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h") lines(Q, pfoldnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pfoldnorm(Q, m = m, sd = SD) - probs)) # Should be 0 } } \keyword{distribution} VGAM/man/betaprime.Rd0000644000176200001440000000703513565414527014037 0ustar liggesusers\name{betaprime} \alias{betaprime} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Beta-Prime Distribution } \description{ Estimation of the two shape parameters of the beta-prime distribution by maximum likelihood estimation. } \usage{ betaprime(lshape = "loglink", ishape1 = 2, ishape2 = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape}{ Parameter link function applied to the two (positive) shape parameters. See \code{\link{Links}} for more choices. } \item{ishape1, ishape2, zero}{ See \code{\link{CommonVGAMffArguments}}. % Initial values for the first and second shape parameters. % A \code{NULL} value means it is obtained in the \code{initialize} slot. % Note that \code{ishape2} is obtained using \code{ishape1}. } % \item{zero}{ % An integer-valued vector specifying which linear/additive predictors % are modelled as intercepts only. The value must be from the set % \{1,2\} corresponding respectively to \code{shape1} and \code{shape2} % respectively. If \code{zero=NULL} then both parameters are modelled % with the explanatory variables. % } } %% what is the mean if shape2 < 1? \details{ The beta-prime distribution is given by \deqn{f(y) = y^{shape1-1} (1+y)^{-shape1-shape2} / B(shape1,shape2)}{% f(y) = y^(shape1-1) * (1+y)^(-shape1-shape2) / B(shape1,shape2) } for \eqn{y > 0}. The shape parameters are positive, and here, \eqn{B} is the beta function. The mean of \eqn{Y} is \eqn{shape1 / (shape2-1)} provided \eqn{shape2>1}; these are returned as the fitted values. If \eqn{Y} has a \eqn{Beta(shape1,shape2)} distribution then \eqn{Y/(1-Y)} and \eqn{(1-Y)/Y} have a \eqn{Betaprime(shape1,shape2)} and \eqn{Betaprime(shape2,shape1)} distribution respectively. Also, if \eqn{Y_1}{Y1} has a \eqn{gamma(shape1)} distribution and \eqn{Y_2}{Y2} has a \eqn{gamma(shape2)} distribution then \eqn{Y_1/Y_2}{Y1/Y2} has a \eqn{Betaprime(shape1,shape2)} distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %% zz not sure about the JKB reference. \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) Chapter 25 of: \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, New York: Wiley. %Documentation accompanying the \pkg{VGAM} package at %\url{https://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response must have positive values only. The beta-prime distribution is also known as the \emph{beta distribution of the second kind} or the \emph{inverted beta distribution}. } \seealso{ \code{\link{betaff}}, \code{\link[stats]{Beta}}. } \examples{ nn <- 1000 bdata <- data.frame(shape1 = exp(1), shape2 = exp(3)) bdata <- transform(bdata, yb = rbeta(nn, shape1, shape2)) bdata <- transform(bdata, y1 = (1-yb) / yb, y2 = yb / (1-yb), y3 = rgamma(nn, exp(3)) / rgamma(nn, exp(2))) fit1 <- vglm(y1 ~ 1, betaprime, data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) fit2 <- vglm(y2 ~ 1, betaprime, data = bdata, trace = TRUE) coef(fit2, matrix = TRUE) fit3 <- vglm(y3 ~ 1, betaprime, data = bdata, trace = TRUE) coef(fit3, matrix = TRUE) # Compare the fitted values with(bdata, mean(y3)) head(fitted(fit3)) Coef(fit3) # Useful for intercept-only models } \keyword{models} \keyword{regression} VGAM/man/bisa.Rd0000644000176200001440000001067113565414527013005 0ustar liggesusers\name{bisa} \alias{bisa} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Birnbaum-Saunders Distribution Family Function } \description{ Estimates the shape and scale parameters of the Birnbaum-Saunders distribution by maximum likelihood estimation. } \usage{ bisa(lscale = "loglink", lshape = "loglink", iscale = 1, ishape = NULL, imethod = 1, zero = "shape", nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{lscale, lshape}{ Parameter link functions applied to the shape and scale parameters (\eqn{a} and \eqn{b} below). See \code{\link{Links}} for more choices. A log link is the default for both because they are positive. } \item{iscale, ishape}{ Initial values for \eqn{a} and \eqn{b}. A \code{NULL} means an initial value is chosen internally using \code{imethod}. } \item{imethod}{ An integer with value \code{1} or \code{2} or \code{3} which specifies the initialization method. If failure to converge occurs try the other value, or else specify a value for \code{ishape} and/or \code{iscale}. } \item{zero}{ Specifies which linear/additive predictor is modelled as intercept-only. If used, choose one value from the set \{1,2\}. See \code{\link{CommonVGAMffArguments}} for more details. % The default is none of them. } } \details{ The (two-parameter) Birnbaum-Saunders distribution has a cumulative distribution function that can be written as \deqn{F(y;a,b) = \Phi[ \xi(y/b)/a] }{% F(y;a,k) = pnorm[xi(y/b)/a] } where \eqn{\Phi(\cdot)}{pnorm()} is the cumulative distribution function of a standard normal (see \code{\link[stats:Normal]{pnorm}}), \eqn{\xi(t) = \sqrt{t} - 1 / \sqrt{t}}{xi(t) = t^(0.5) - t^(-0.5)}, \eqn{y > 0}, \eqn{a>0} is the shape parameter, \eqn{b>0} is the scale parameter. The mean of \eqn{Y} (which is the fitted value) is \eqn{b(1 + a^2/2)}{b*(1 + a*a/2)}. and the variance is \eqn{a^2 b^2 (1 + \frac{5}{4}a^2)}{a^2 b^2 (1 + (5/4)*a^2)}. By default, \eqn{\eta_1 = \log(a)}{eta1 = log(a)} and \eqn{\eta_2 = \log(b)}{eta2 = log(b)} for this family function. Note that \eqn{a} and \eqn{b} are orthogonal, i.e., the Fisher information matrix is diagonal. This family function implements Fisher scoring, and it is unnecessary to compute any integrals numerically. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Lemonte, A. J. and Cribari-Neto, F. and Vasconcellos, K. L. P. (2007) Improved statistical inference for the two-parameter Birnbaum-Saunders distribution. \emph{Computational Statistics \& Data Analysis}, \bold{51}, 4656--4681. Birnbaum, Z. W. and Saunders, S. C. (1969) A new family of life distributions. \emph{Journal of Applied Probability}, \bold{6}, 319--327. Birnbaum, Z. W. and Saunders, S. C. (1969) Estimation for a family of life distributions with applications to fatigue. \emph{Journal of Applied Probability}, \bold{6}, 328--347. Engelhardt, M. and Bain, L. J. and Wright, F. T. (1981) Inferences on the parameters of the Birnbaum-Saunders fatigue life distribution based on maximum likelihood estimation. \emph{Technometrics}, \bold{23}, 251--256. Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, New York: Wiley. } \author{ T. W. Yee } %\note{ % %} %\section{Warning }{ %} \seealso{ \code{\link{pbisa}}, \code{\link{inv.gaussianff}}, \code{\link{CommonVGAMffArguments}}. } \examples{ bdata1 <- data.frame(x2 = runif(nn <- 1000)) bdata1 <- transform(bdata1, shape = exp(-0.5 + x2), scale = exp(1.5)) bdata1 <- transform(bdata1, y = rbisa(nn, scale, shape)) fit1 <- vglm(y ~ x2, bisa(zero = 1), data = bdata1, trace = TRUE) coef(fit1, matrix = TRUE) \dontrun{ bdata2 <- data.frame(shape = exp(-0.5), scale = exp(0.5)) bdata2 <- transform(bdata2, y = rbisa(nn, scale, shape)) fit <- vglm(y ~ 1, bisa, data = bdata2, trace = TRUE) with(bdata2, hist(y, prob = TRUE, ylim = c(0, 0.5), col = "lightblue")) coef(fit, matrix = TRUE) with(bdata2, mean(y)) head(fitted(fit)) x <- with(bdata2, seq(0, max(y), len = 200)) lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, data = bdata2, col = "orange", lwd = 2) } } \keyword{models} \keyword{regression} VGAM/man/oazeta.Rd0000644000176200001440000000646013565414527013353 0ustar liggesusers\name{oazeta} \alias{oazeta} %\alias{oazetaff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Zeta Distribution } \description{ Fits a one-altered zeta distribution based on a conditional model involving a Bernoulli distribution and a 1-truncated zeta distribution. } \usage{ oazeta(lpobs1 = "logitlink", lshape = "loglink", type.fitted = c("mean", "shape", "pobs1", "onempobs1"), gshape = exp((-4:3)/4), ishape = NULL, ipobs1 = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs1}{ Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi}, called \code{pobs1} or \code{phi} here. See \code{\link{Links}} for more choices. } \item{lshape}{ See \code{\link{zeta}} for details. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{gshape, ishape, ipobs1, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The response \eqn{Y} is one with probability \eqn{p_1}{pobs1}, or \eqn{Y} has a 1-truncated zeta distribution with probability \eqn{1-p_1}{1-pobs1}. Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1}, which is modelled as a function of the covariates. The one-altered zeta distribution differs from the one-inflated zeta distribution in that the former has ones coming from one source, whereas the latter has ones coming from the zeta distribution too. The one-inflated zeta distribution is implemented in the \pkg{VGAM} package. Some people call the one-altered zeta a \emph{hurdle} model. The input can be a matrix (multiple responses). By default, the two linear/additive predictors of \code{oazeta} are \eqn{(logit(\phi), log(shape))^T}{(logit(phi), log(shape))^T}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = \phi + (1-\phi) A}{% mu = phi + (1- phi) A} where \eqn{A} is the mean of the one-truncated zeta distribution. If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned. } %\references{ % % %} %\section{Warning }{ %} \author{ T. W. Yee } \note{ This family function effectively combines \code{\link{binomialff}} and \code{\link{otzeta}} into one family function. } \seealso{ \code{\link{Oazeta}}, \code{\link{zetaff}}, \code{\link{oizeta}}, \code{\link{otzeta}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } \examples{ odata <- data.frame(x2 = runif(nn <- 1000)) odata <- transform(odata, pobs1 = logitlink(-1 + 2*x2, inverse = TRUE), shape = loglink( 1 + 1*x2, inverse = TRUE)) odata <- transform(odata, y1 = roazeta(nn, shape = shape, pobs1 = pobs1), y2 = roazeta(nn, shape = shape, pobs1 = pobs1)) with(odata, table(y1)) ofit <- vglm(cbind(y1, y2) ~ x2, oazeta, data = odata, trace = TRUE) coef(ofit, matrix = TRUE) head(fitted(ofit)) head(predict(ofit)) summary(ofit) } \keyword{models} \keyword{regression} VGAM/man/oapospoisUC.Rd0000644000176200001440000000424613565414527014334 0ustar liggesusers\name{Oapospois} \alias{Oapospois} \alias{doapospois} \alias{poapospois} \alias{qoapospois} \alias{roapospois} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Logarithmic Distribution } \description{ Density, distribution function, quantile function and random generation for the one-altered positive-Poisson distribution with parameter \code{pobs1}. } \usage{ doapospois(x, lambda, pobs1 = 0, log = FALSE) poapospois(q, lambda, pobs1 = 0) qoapospois(p, lambda, pobs1 = 0) roapospois(n, lambda, pobs1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.} \item{lambda, log}{ Same as \code{\link{Otpospois}}). } \item{pobs1}{ Probability of (an observed) one, called \eqn{pobs1}. The default value of \code{pobs1 = 0} corresponds to the response having a 1-truncated positive-Poisson distribution. } } \details{ The probability function of \eqn{Y} is 1 with probability \code{pobs1}, else a 1-truncated positive-Poisson(lambda) distribution. } \value{ \code{doapospois} gives the density and \code{poapospois} gives the distribution function, \code{qoapospois} gives the quantile function, and \code{roapospois} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs1} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{oapospoisson}}, \code{\link{Oipospois}}, \code{\link{Otpospois}}. } \examples{ lambda <- 3; pobs1 <- 0.30; x <- (-1):7 doapospois(x, lambda = lambda, pobs1 = pobs1) table(roapospois(100, lambda = lambda, pobs1 = pobs1)) \dontrun{ x <- 0:10 barplot(rbind(doapospois(x, lambda = lambda, pobs1 = pobs1), dpospois(x, lambda = lambda)), beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("OAPP(lambda = ", lambda, ", pobs1 = ", pobs1, ") [blue] vs", " PosPoisson(lambda = ", lambda, ") [orange] densities", sep = "")) } } \keyword{distribution} VGAM/man/pneumo.Rd0000644000176200001440000000226013565414527013365 0ustar liggesusers\name{pneumo} \alias{pneumo} \docType{data} \title{Pneumoconiosis in Coalminers Data} \description{ The \code{pneumo} data frame has 8 rows and 4 columns. Exposure time is explanatory, and there are 3 ordinal response variables. } \usage{data(pneumo)} \format{ This data frame contains the following columns: \describe{ \item{exposure.time}{a numeric vector, in years} \item{normal}{a numeric vector, counts} \item{mild}{a numeric vector, counts} \item{severe}{a numeric vector, counts} } } \details{ These were collected from coalface workers. In the original data set, the two most severe categories were combined. } \source{ Ashford, J.R., 1959. An approach to the analysis of data for semi-quantal responses in biological assay. \emph{Biometrics}, \bold{15}, 573--581. } \seealso{ \code{\link{cumulative}}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \examples{ # Fit the proportional odds model, p.179, in McCullagh and Nelder (1989) pneumo <- transform(pneumo, let = log(exposure.time)) vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo) } \keyword{datasets} VGAM/man/lino.Rd0000644000176200001440000001052713565414527013030 0ustar liggesusers\name{lino} \alias{lino} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Beta Distribution Family Function } \description{ Maximum likelihood estimation of the 3-parameter generalized beta distribution as proposed by Libby and Novick (1982). } \usage{ lino(lshape1 = "loglink", lshape2 = "loglink", llambda = "loglink", ishape1 = NULL, ishape2 = NULL, ilambda = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lshape1, lshape2}{ Parameter link functions applied to the two (positive) shape parameters \eqn{a} and \eqn{b}. See \code{\link{Links}} for more choices. } \item{llambda}{ Parameter link function applied to the parameter \eqn{\lambda}{lambda}. See \code{\link{Links}} for more choices. } \item{ishape1, ishape2, ilambda}{ Initial values for the parameters. A \code{NULL} value means one is computed internally. The argument \code{ilambda} must be numeric, and the default corresponds to a standard beta distribution. } \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. Here, the values must be from the set \{1,2,3\} which correspond to \eqn{a}, \eqn{b}, \eqn{\lambda}{lambda}, respectively. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ Proposed by Libby and Novick (1982), this distribution has density \deqn{f(y;a,b,\lambda) = \frac{\lambda^{a} y^{a-1} (1-y)^{b-1}}{ B(a,b) \{1 - (1-\lambda) y\}^{a+b}}}{% f(y;a,b,lambda) = lambda^a y^(a-1) (1-y)^(b-1) / [B(a,b) (1 - (1-lambda)*y)^(a+b)]} for \eqn{a > 0}, \eqn{b > 0}, \eqn{\lambda > 0}{lambda > 0}, \eqn{0 < y < 1}. Here \eqn{B} is the beta function (see \code{\link[base:Special]{beta}}). The mean is a complicated function involving the Gauss hypergeometric function. If \eqn{X} has a \code{lino} distribution with parameters \code{shape1}, \code{shape2}, \code{lambda}, then \eqn{Y=\lambda X/(1-(1-\lambda)X)}{Y = \lambda*X / (1 - (1-\lambda)*X)} has a standard beta distribution with parameters \code{shape1}, \code{shape2}. Since \eqn{\log(\lambda)=0}{log(lambda)=0} corresponds to the standard beta distribution, a \code{summary} of the fitted model performs a t-test for whether the data belongs to a standard beta distribution (provided the \code{\link{loglink}} link for \eqn{\lambda}{lambda} is used; this is the default). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Libby, D. L. and Novick, M. R. (1982) Multivariate generalized beta distributions with applications to utility assessment. \emph{Journal of Educational Statistics}, \bold{7}, 271--294. Gupta, A. K. and Nadarajah, S. (2004) \emph{Handbook of Beta Distribution and Its Applications}, NY: Marcel Dekker, Inc. } \author{ T. W. Yee } \note{ The fitted values, which is usually the mean, have not been implemented yet. Currently the median is returned as the fitted values. % and consequently are \code{NA}s. Although Fisher scoring is used, the working weight matrices are positive-definite only in a certain region of the parameter space. Problems with this indicate poor initial values or an ill-conditioned model or insufficient data etc. This model is can be difficult to fit. A reasonably good value of \code{ilambda} seems to be needed so if the self-starting initial values fail, try experimenting with the initial value arguments. Experience suggests \code{ilambda} is better a little larger, rather than smaller, compared to the true value. } \seealso{ \code{\link{Lino}}, \code{\link{genbetaII}}. } \examples{ ldata <- data.frame(y1 = rbeta(n = 1000, exp(0.5), exp(1))) # ~ standard beta fit <- vglm(y1 ~ 1, lino, data = ldata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) head(fitted(fit)) summary(fit) # Nonstandard beta distribution ldata <- transform(ldata, y2 = rlino(n = 1000, shape1 = exp(1), shape2 = exp(2), lambda = exp(1))) fit2 <- vglm(y2 ~ 1, lino(lshape1 = "identitylink", lshape2 = "identitylink", ilamb = 10), data = ldata, trace = TRUE) coef(fit2, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/rrar.Rd0000644000176200001440000000614613565414527013037 0ustar liggesusers\name{rrar} \alias{rrar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Nested Reduced-rank Autoregressive Models for Multiple Time Series } \description{ Estimates the parameters of a nested reduced-rank autoregressive model for multiple time series. } \usage{ rrar(Ranks = 1, coefstart = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Ranks}{ Vector of integers: the ranks of the model. Each value must be at least one and no more than \code{M}, where \code{M} is the number of response variables in the time series. The length of \code{Ranks} is the \emph{lag}, which is often denoted by the symbol \emph{L} in the literature. } \item{coefstart}{ Optional numerical vector of initial values for the coefficients. By default, the family function chooses these automatically. } } \details{ Full details are given in Ahn and Reinsel (1988). Convergence may be very slow, so setting \code{maxits = 50}, say, may help. If convergence is not obtained, you might like to try inputting different initial values. Setting \code{trace = TRUE} in \code{\link{vglm}} is useful for monitoring the progress at each iteration. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Ahn, S. and Reinsel, G. C. (1988) Nested reduced-rank autoregressive models for multiple time series. \emph{Journal of the American Statistical Association}, \bold{83}, 849--856. } \author{ T. W. Yee } \note{ This family function should be used within \code{\link{vglm}} and not with \code{\link{rrvglm}} because it does not fit into the RR-VGLM framework exactly. Instead, the reduced-rank model is formulated as a VGLM! A methods function \code{Coef.rrar}, say, has yet to be written. It would return the quantities \code{Ak1}, \code{C}, \code{D}, \code{omegahat}, \code{Phi}, etc. as slots, and then \code{show.Coef.rrar} would also need to be written. } \seealso{ \code{\link{vglm}}, \code{\link{grain.us}}. } \examples{ \dontrun{ year <- seq(1961 + 1/12, 1972 + 10/12, by = 1/12) par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2)) for (ii in 1:4) { plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1, type = "l", xlab = "", ylab = "", col = "blue") points(year, grain.us[, ii], pch = "*", col = "blue") } apply(grain.us, 2, mean) # mu vector cgrain <- scale(grain.us, scale = FALSE) # Center the time series only fit <- vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE) summary(fit) print(fit@misc$Ak1, digits = 2) print(fit@misc$Cmatrices, digits = 3) print(fit@misc$Dmatrices, digits = 3) print(fit@misc$omegahat, digits = 3) print(fit@misc$Phimatrices, digits = 2) par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1)) for (ii in 1:4) { plot(year, fit@misc$Z[, ii], main = paste("Z", ii, sep = ""), type = "l", xlab = "", ylab = "", las = 1, col = "blue") points(year, fit@misc$Z[, ii], pch = "*", col = "blue") } } } \keyword{ts} \keyword{regression} \keyword{models} VGAM/man/lgammaff.Rd0000644000176200001440000000761313565414527013643 0ustar liggesusers\name{lgamma1} \alias{lgamma1} \alias{lgamma3} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log-gamma Distribution Family Function } \description{ Estimation of the parameter of the standard and nonstandard log-gamma distribution. } \usage{ lgamma1(lshape = "loglink", ishape = NULL) lgamma3(llocation = "identitylink", lscale = "loglink", lshape = "loglink", ilocation = NULL, iscale = NULL, ishape = 1, zero = c("scale", "shape")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Parameter link function applied to the location parameter \eqn{a} and the positive scale parameter \eqn{b}. See \code{\link{Links}} for more choices. } \item{lshape}{ Parameter link function applied to the positive shape parameter \eqn{k}. See \code{\link{Links}} for more choices. } \item{ishape}{ Initial value for \eqn{k}. If given, it must be positive. If failure to converge occurs, try some other value. The default means an initial value is determined internally. } \item{ilocation, iscale}{ Initial value for \eqn{a} and \eqn{b}. The defaults mean an initial value is determined internally for each. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,3\}. The default value means none are modelled as intercept-only terms. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The probability density function of the standard log-gamma distribution is given by \deqn{f(y;k)=\exp[ky - \exp(y)] / \Gamma(k),}{% f(y;k) = exp[ky - exp(y)]/gamma(k),} for parameter \eqn{k>0}{k>0} and all real \eqn{y}. The mean of \eqn{Y} is \code{digamma(k)} (returned as the fitted values) and its variance is \code{trigamma(k)}. For the non-standard log-gamma distribution, one replaces \eqn{y} by \eqn{(y-a)/b}, where \eqn{a} is the location parameter and \eqn{b} is the positive scale parameter. Then the density function is \deqn{f(y)=\exp[k(y-a)/b - \exp((y-a)/b)] / (b \, \Gamma(k)).}{% f(y) = exp[k(y-a)/b - exp((y-a)/b)]/(b*gamma(k)).} The mean and variance of \eqn{Y} are \code{a + b*digamma(k)} (returned as the fitted values) and \code{b^2 * trigamma(k)}, respectively. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kotz, S. and Nadarajah, S. (2000) \emph{Extreme Value Distributions: Theory and Applications}, pages 48--49, London: Imperial College Press. Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 2, p.89, New York: Wiley. } \author{ T. W. Yee } \note{ The standard log-gamma distribution can be viewed as a generalization of the standard type 1 extreme value density: when \eqn{k = 1} the distribution of \eqn{-Y} is the standard type 1 extreme value distribution. The standard log-gamma distribution is fitted with \code{lgamma1} and the non-standard (3-parameter) log-gamma distribution is fitted with \code{lgamma3}. } \seealso{ \code{\link{rlgamma}}, \code{\link{gengamma.stacy}}, \code{\link{prentice74}}, \code{\link{gamma1}}, \code{\link[base:Special]{lgamma}}. } \examples{ ldata <- data.frame(y = rlgamma(100, shape = exp(1))) fit <- vglm(y ~ 1, lgamma1, data = ldata, trace = TRUE, crit = "coef") summary(fit) coef(fit, matrix = TRUE) Coef(fit) ldata <- data.frame(x2 = runif(nn <- 5000)) # Another example ldata <- transform(ldata, loc = -1 + 2 * x2, Scale = exp(1)) ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, shape = exp(0))) fit2 <- vglm(y ~ x2, lgamma3, data = ldata, trace = TRUE, crit = "c") coef(fit2, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/gatpoisson.mlm.Rd0000644000176200001440000002005013565414527015031 0ustar liggesusers\name{gatpoisson.mlm} \alias{gatpoisson.mlm} %\alias{gapoissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered and -Truncated Poisson Regression Family Function (GAT--Pois--MLM Variant) } \description{ Fits a generally-altered and -truncated Poisson regression (using a multinomial logit model for the altered values). The truncation may include values in the upper tail. % based on a conditional % model involving a multinomial distribution % and a generally-truncated Poisson distribution. } \usage{ gatpoisson.mlm(alter = NULL, truncate = NULL, max.support = Inf, zero = NULL, llambda = "loglink", type.fitted = c("mean", "lambda", "pobs.a", "Pobs.a", "prob.a", "prob.t"), imethod = 1, ilambda = NULL, ishrinkage = 0.95, probs.y = 0.35) } %- maybe also 'usage' for other objects documented here. % ipobs0 = NULL, \arguments{ \item{alter, truncate}{ Vector of altered and/or truncated values, i.e., nonnegative integers. Must have unique values only. May be a \code{NULL}, which stands for an empty set. % (same as \code{\link{poissonff}}). % Must be sorted and have unique values only. } \item{llambda}{ See \code{\link{Links}} for more choices and information. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The choice \code{"pobs.a"} is the probability of an altered value. See below for more details. % and \code{"onempobs.a"} is its complement. } \item{max.support}{ See \code{\link{Gaitpois.mlm}} for information. This enables RHS-truncation, i.e., something equivalent to \code{truncate = (U+1):Inf} for some upper support point \code{U}. } \item{imethod, ilambda}{ See \code{\link{CommonVGAMffArguments}} for information. % ipobs0, } \item{probs.y, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. Setting \code{zero = "pobs"} will model the multinomial probabilities as simple as possible (intercept-only), hence should be more numerically stable than the default, and this is recommended for many analyses especially when there are many explanatory variables. } } \details{ The generally-truncated (GT) Poisson distribution is the ordinary Poisson distribution with the probability of certain values (\code{truncate} argument) being zero. Thus the other probabilities are scaled up. The (0-truncated) positive-Poisson distribution is a special case (\code{\link{pospoisson}}). This variant of the generally-altered (GA) Poisson distribution is the ordinary Poisson distribution with the probability of certain values (\code{alter} argument) being modelled using a multinomial logit model (MLM; see \code{\link{multinomial}}). The 0-altered Poisson distribution is a special case (\code{\link{zapoisson}}) and it is called a \emph{hurdle} model by some people. % The other values are modelled using a % \emph{generally-truncated Poisson} distribution. This function can fit both the GA and GT models simultaneously, and is called the GAT-Pois--MLM model. That is, each special value is altered or truncated but not both. The default settings make this family function equivalent to \code{\link{poissonff}}. The probabilities for the altered values are unstructured (nonparametric)---see \code{\link{gatpoisson.mix}} for an alternative variant that is more structured (parametric). This function currently does not handle multiple responses. Compared to \code{\link{gipoisson.mlm}} this family function handles deflation and inflation, therefore handles a wider range of data. For further details please see \code{\link{Gaitpois.mlm}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default). The choice \code{type.fitted = "pobs.a"} returns the sum of all the altered probabilities (Greek symbol omegas). The choice \code{type.fitted = "Pobs.a"} returns the individual altered probabilities, as a matrix. The choice \code{"prob.a"} means the probability of having an altered value, given the estimate of \code{lambda} from an ordinary Poisson distribution; it is summed over \code{alter} and evaluated at the parent PMF. The choice \code{"prob.t"} is similar to \code{"prob.a"} but summed over \code{truncate}. The use of \code{"prob.a"} and \code{"prob.t"} is likely to be much less than the use of \code{type.fitted = "pobs.a"}. } \references{ Yee, T. W. and Ma, C. C. (2019) Generally-altered, -inflated and -truncated count regression, with application to heaped and seeped data. \emph{In preparation}. %, \bold{3}, 15--41. } %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \section{Warning }{ Due to its flexibility, it is easy to misuse this function; the \code{truncate} vector should ideally be not very long and have values that can be justified by the application on hand. Likewise, the \code{alter} vector should be short too, and each value should have good justification for being included. Adding unnecessary values to these two arguments willy-nilly is a recipe for disaster. Regarding truncation, under- or over-flow may occur if the data is ill-conditioned. The response is checked to see that no values equal any values of the \code{truncate} vector. } \author{ T. W. Yee and Chenchen Ma} \note{ This family function does not yet have the robustness of \code{\link{multinomial}} when computing the working weight matrices. Several improvements are needed, e.g., better labelling and initial values and handling multiple responses. % yettodo: see lines just above. This family function effectively combines what used to be \code{gapoisson()} and \code{gtpoisson()} together. The former effectively included \code{\link{multinomial}} inside it. % This family function can handle multiple responses, % e.g., more than one species. } \seealso{ \code{\link{Gaitpois.mlm}}, \code{\link{gatpoisson.mix}}, \code{\link{gatnbinomial.mlm}}, \code{\link{gatnbinomial.mix}}, \code{\link{gipoisson.mlm}}, \code{\link{zapoisson}}, \code{\link{multinomial}}, \code{\link{rpospois}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } \examples{ avec <- 3 # Alter this value tvec <- c(5, 7) # Truncate these values pobs.a <- logitlink(-2, inverse = TRUE) # About 0.12 max.support <- 20 gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, lambda1 = exp(1 + 0.5 * x2)) gdata <- transform(gdata, y1 = rgaitpois.mlm(nn, lambda = lambda1, pobs.a = pobs.a, truncate = tvec, max.support = max.support, byrow = TRUE, alter = avec)) gatpoisson.mlm(alter = avec) with(gdata, table(y1)) fit1 <- vglm(y1 ~ x2, crit = "coef", trace = TRUE, data = gdata, gatpoisson.mlm(alter = avec, truncate = tvec, max.support = max.support)) fit2 <- vglm(y1 ~ x2, crit = "coef", trace = TRUE, data = gdata, gatpoisson.mlm(alter = avec, truncate = tvec, zero = "pobs", max.support = max.support)) head(fitted(fit2)) head(predict(fit2)) coef(fit2, matrix = TRUE) summary(fit2) } \keyword{models} \keyword{regression} %gapoisson(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %gapoissonff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/poisson.points.Rd0000644000176200001440000000754113565414527015076 0ustar liggesusers\name{poisson.points} \alias{poisson.points} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Poisson-points-on-a-plane/volume Distances Distribution } \description{ Estimating the density parameter of the distances from a fixed point to the u-th nearest point, in a plane or volume. } \usage{ poisson.points(ostatistic, dimension = 2, link = "loglink", idensity = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ostatistic}{ Order statistic. A single positive value, usually an integer. For example, the value 5 means the response are the distances of the fifth nearest value to that point (usually over many planes or volumes). Non-integers are allowed because the value 1.5 coincides with \code{\link{maxwell}} when \code{dimension = 2}. Note: if \code{ostatistic = 1} and \code{dimension = 2} then this \pkg{VGAM} family function coincides with \code{\link{rayleigh}}. } \item{dimension}{ The value 2 or 3; 2 meaning a plane and 3 meaning a volume. } \item{link}{ Parameter link function applied to the (positive) density parameter, called \eqn{\lambda}{lambda} below. See \code{\link{Links}} for more choices. } \item{idensity}{ Optional initial value for the parameter. A \code{NULL} value means a value is obtained internally. Use this argument if convergence failure occurs. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method for \eqn{\lambda}{lambda}. If failure to converge occurs try another value and/or else specify a value for \code{idensity}. } } \details{ Suppose the number of points in any region of area \eqn{A} of the plane is a Poisson random variable with mean \eqn{\lambda A}{lambda*A} (i.e., \eqn{\lambda}{lambda} is the \emph{density} of the points). Given a fixed point \eqn{P}, define \eqn{D_1}, \eqn{D_2},\ldots to be the distance to the nearest point to \eqn{P}, second nearest to \eqn{P}, etc. This \pkg{VGAM} family function estimates \eqn{\lambda}{lambda} since the probability density function for \eqn{D_u} is easily derived, \eqn{u=1,2,\ldots}{u=1,2,...}. Here, \eqn{u} corresponds to the argument \code{ostatistic}. Similarly, suppose the number of points in any volume \eqn{V} is a Poisson random variable with mean \eqn{\lambda V}{lambda*V} where, once again, \eqn{\lambda}{lambda} is the \emph{density} of the points. This \pkg{VGAM} family function estimates \eqn{\lambda}{lambda} by specifying the argument \code{ostatistic} and using \code{dimension = 3}. The mean of \eqn{D_u} is returned as the fitted values. Newton-Raphson is the same as Fisher-scoring. } \section{Warning}{ Convergence may be slow if the initial values are far from the solution. This often corresponds to the situation when the response values are all close to zero, i.e., there is a high density of points. Formulae such as the means have not been fully checked. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } %\references{ %} \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{poissonff}}, \code{\link{maxwell}}, \code{\link{rayleigh}}. } \examples{ pdata <- data.frame(y = rgamma(10, shape = exp(-1))) # Not proper data! ostat <- 2 fit <- vglm(y ~ 1, poisson.points(ostat, 2), data = pdata, trace = TRUE, crit = "coef") fit <- vglm(y ~ 1, poisson.points(ostat, 3), data = pdata, trace = TRUE, crit = "coef") # Slow convergence? fit <- vglm(y ~ 1, poisson.points(ostat, 3, idensi = 1), data = pdata, trace = TRUE, crit = "coef") head(fitted(fit)) with(pdata, mean(y)) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} VGAM/man/logitoffsetlink.Rd0000644000176200001440000000441113565414527015265 0ustar liggesusers\name{logitoffsetlink} \alias{logitoffsetlink} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Logit-with-an-Offset Link Function } \description{ Computes the logitoffsetlink transformation, including its inverse and the first two derivatives. } \usage{ logitoffsetlink(theta, offset = 0, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{offset}{ The offset value(s), which must be non-negative. It is called \eqn{K} below. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ This link function allows for some asymmetry compared to the ordinary \code{\link{logitlink}} link. The formula is \deqn{\log(\theta/(1-\theta) - K)}{% log(theta/(1-theta) - K)} and the default value for the offset \eqn{K} is corresponds to the ordinary \code{\link{logitlink}} link. When \code{inverse = TRUE} will mean that the value will lie in the interval \eqn{(K / (1+K), 1)}. } \value{ For \code{logitoffsetlink} with \code{deriv = 0}, the logitoffsetlink of \code{theta}, i.e., \code{log(theta/(1-theta) - K)} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{(K + exp(theta))/(1 + exp(theta) + K)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ Komori, O. and Eguchi, S. et al., 2016. An asymmetric logistic model for ecological data. \emph{Methods in Ecology and Evolution}, \bold{7}. } \author{ Thomas W. Yee } \note{ This function is numerical less stability than \code{\link{logitlink}}. } \seealso{ \code{\link{Links}}, \code{\link{logitlink}}. } \examples{ p <- seq(0.05, 0.99, by = 0.01); myoff <- 0.05 logitoffsetlink(p, myoff) max(abs(logitoffsetlink(logitoffsetlink(p, myoff), myoff, inverse = TRUE) - p)) # Should be 0 } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/amlnormal.Rd0000644000176200001440000001427013565414527014050 0ustar liggesusers\name{amlnormal} \alias{amlnormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Asymmetric Least Squares Quantile Regression } \description{ Asymmetric least squares, a special case of maximizing an asymmetric likelihood function of a normal distribution. This allows for expectile/quantile regression using asymmetric least squares error loss. } \usage{ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identitylink", iexpectile = NULL, imethod = 1, digw = 4) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{w.aml}{ Numeric, a vector of positive constants controlling the percentiles. The larger the value the larger the fitted percentile value (the proportion of points below the ``w-regression plane''). The default value of unity results in the ordinary least squares (OLS) solution. } \item{parallel}{ If \code{w.aml} has more than one value then this argument allows the quantile curves to differ by the same amount as a function of the covariates. Setting this to be \code{TRUE} should force the quantile curves to not cross (although they may not cross anyway). See \code{\link{CommonVGAMffArguments}} for more information. } \item{lexpectile, iexpectile}{ See \code{\link{CommonVGAMffArguments}} for more information. } \item{imethod}{ Integer, either 1 or 2 or 3. Initialization method. Choose another value if convergence fails. } \item{digw }{ Passed into \code{\link[base]{Round}} as the \code{digits} argument for the \code{w.aml} values; used cosmetically for labelling. } } \details{ This is an implementation of Efron (1991) and full details can be obtained there. Equation numbers below refer to that article. The model is essentially a linear model (see \code{\link[stats]{lm}}), however, the asymmetric squared error loss function for a residual \eqn{r} is \eqn{r^2} if \eqn{r \leq 0}{r <= 0} and \eqn{w r^2}{w*r^2} if \eqn{r > 0}. The solution is the set of regression coefficients that minimize the sum of these over the data set, weighted by the \code{weights} argument (so that it can contain frequencies). Newton-Raphson estimation is used here. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Efron, B. (1991) Regression percentiles using asymmetric squared error loss. \emph{Statistica Sinica}, \bold{1}, 93--125. } \author{ Thomas W. Yee } \note{ On fitting, the \code{extra} slot has list components \code{"w.aml"} and \code{"percentile"}. The latter is the percent of observations below the ``w-regression plane'', which is the fitted values. One difficulty is finding the \code{w.aml} value giving a specified percentile. One solution is to fit the model within a root finding function such as \code{\link[stats]{uniroot}}; see the example below. For \code{amlnormal} objects, methods functions for the generic functions \code{qtplot} and \code{cdf} have not been written yet. See the note in \code{\link{amlpoisson}} on the jargon, including \emph{expectiles} and \emph{regression quantiles}. The \code{deviance} slot computes the total asymmetric squared error loss (2.5). If \code{w.aml} has more than one value then the value returned by the slot is the sum taken over all the \code{w.aml} values. This \pkg{VGAM} family function could well be renamed \code{amlnormal()} instead, given the other function names \code{\link{amlpoisson}}, \code{\link{amlbinomial}}, etc. In this documentation the word \emph{quantile} can often be interchangeably replaced by \emph{expectile} (things are informal here). } %\section{Warning }{ % The \code{loglikelihood} slot currently does not return the % log-likelihood but negative the total asymmetric squared error % loss (2.5). % If \code{w} has more than one value then the value returned by % \code{loglikelihood} is the sum taken over all the \code{w} values. %} \seealso{ \code{\link{amlpoisson}}, \code{\link{amlbinomial}}, \code{\link{amlexponential}}, \code{\link{bmi.nz}}, \code{\link{alaplace1}}, \code{\link{denorm}}, \code{\link{lms.bcn}} and similar variants are alternative methods for quantile regression. } \examples{ \dontrun{ # Example 1 ooo <- with(bmi.nz, order(age)) bmi.nz <- bmi.nz[ooo, ] # Sort by age (fit <- vglm(BMI ~ sm.bs(age), amlnormal(w.aml = 0.1), data = bmi.nz)) fit@extra # Gives the w value and the percentile coef(fit, matrix = TRUE) # Quantile plot with(bmi.nz, plot(age, BMI, col = "blue", main = paste(round(fit@extra$percentile, digits = 1), "expectile-percentile curve"))) with(bmi.nz, lines(age, c(fitted(fit)), col = "black")) # Example 2 # Find the w values that give the 25, 50 and 75 percentiles find.w <- function(w, percentile = 50) { fit2 <- vglm(BMI ~ sm.bs(age), amlnormal(w = w), data = bmi.nz) fit2@extra$percentile - percentile } # Quantile plot with(bmi.nz, plot(age, BMI, col = "blue", las = 1, main = "25, 50 and 75 expectile-percentile curves")) for (myp in c(25, 50, 75)) { # Note: uniroot() can only find one root at a time bestw <- uniroot(f = find.w, interval = c(1/10^4, 10^4), percentile = myp) fit2 <- vglm(BMI ~ sm.bs(age), amlnormal(w = bestw$root), data = bmi.nz) with(bmi.nz, lines(age, c(fitted(fit2)), col = "orange")) } # Example 3; this is Example 1 but with smoothing splines and # a vector w and a parallelism assumption. ooo <- with(bmi.nz, order(age)) bmi.nz <- bmi.nz[ooo, ] # Sort by age fit3 <- vgam(BMI ~ s(age, df = 4), data = bmi.nz, trace = TRUE, amlnormal(w = c(0.1, 1, 10), parallel = TRUE)) fit3@extra # The w values, percentiles and weighted deviances # The linear components of the fit; not for human consumption: coef(fit3, matrix = TRUE) # Quantile plot with(bmi.nz, plot(age, BMI, col="blue", main = paste(paste(round(fit3@extra$percentile, digits = 1), collapse = ", "), "expectile-percentile curves"))) with(bmi.nz, matlines(age, fitted(fit3), col = 1:fit3@extra$M, lwd = 2)) with(bmi.nz, lines(age, c(fitted(fit )), col = "black")) # For comparison } } \keyword{models} \keyword{regression} VGAM/man/huber.Rd0000644000176200001440000000645313565414527013177 0ustar liggesusers\name{huber2} \alias{huber2} \alias{huber1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Huber's Least Favourable Distribution Family Function } \description{ M-estimation of the two parameters of Huber's least favourable distribution. The one parameter case is also implemented. } \usage{ huber1(llocation = "identitylink", k = 0.862, imethod = 1) huber2(llocation = "identitylink", lscale = "loglink", k = 0.862, imethod = 1, zero = "scale") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Link functions applied to the location and scale parameters. See \code{\link{Links}} for more choices. } \item{k}{ Tuning constant. See \code{\link{rhuber}} for more information. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. The default value of \code{zero} means the scale parameter is modelled as intercept-only. } } \details{ Huber's least favourable distribution family function is popular for resistant/robust regression. The center of the distribution is normal and its tails are double exponential. By default, the mean is the first linear/additive predictor (returned as the fitted values; this is the location parameter), and the log of the scale parameter is the second linear/additive predictor. The Fisher information matrix is diagonal; Fisher scoring is implemented. The \pkg{VGAM} family function \code{huber1()} estimates only the location parameter. It assumes a scale parameter of unit value. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Huber, P. J. and Ronchetti, E. (2009) \emph{Robust Statistics}, 2nd ed. New York: Wiley. } \author{ T. W. Yee. Help was given by Arash Ardalan. } \note{ Warning: actually, \code{huber2()} may be erroneous since the first derivative is not continuous when there are two parameters to estimate. \code{huber1()} is fine in this respect. The response should be univariate. } \seealso{ \code{\link{rhuber}}, \code{\link{uninormal}}, \code{\link{laplace}}, \code{\link{CommonVGAMffArguments}}. % \code{\link{gaussianff}}, } \examples{ set.seed(1231); NN <- 30; coef1 <- 1; coef2 <- 10 hdata <- data.frame(x2 = sort(runif(NN))) hdata <- transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2)) hdata$x2[1] <- 0.0 # Add an outlier hdata$y[1] <- 10 fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), data = hdata, trace = TRUE) fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), data = hdata, trace = TRUE) coef(fit.huber2, matrix = TRUE) summary(fit.huber2) \dontrun{ # Plot the results plot(y ~ x2, data = hdata, col = "blue", las = 1) lines(fitted(fit.huber2) ~ x2, data = hdata, col = "darkgreen", lwd = 2) fit.lm <- lm(y ~ x2, hdata) # Compare to a LM: lines(fitted(fit.lm) ~ x2, data = hdata, col = "lavender", lwd = 3) # Compare to truth: lines(coef1 + coef2 * x2 ~ x2, data = hdata, col = "orange", lwd = 2, lty = "dashed") legend("bottomright", legend = c("truth", "huber", "lm"), col = c("orange", "darkgreen", "lavender"), lty = c("dashed", "solid", "solid"), lwd = c(2, 2, 3)) } } \keyword{models} \keyword{regression} VGAM/man/brat.Rd0000644000176200001440000001131713565414527013015 0ustar liggesusers\name{brat} \alias{brat} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bradley Terry Model } \description{ Fits a Bradley Terry model (intercept-only model) by maximum likelihood estimation. } \usage{ brat(refgp = "last", refvalue = 1, ialpha = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{refgp}{ Integer whose value must be from the set \{1,\ldots,\eqn{M+1}\}, where there are \eqn{M+1} competitors. The default value indicates the last competitor is used---but don't input a character string, in general. } \item{refvalue}{ Numeric. A positive value for the reference group. } \item{ialpha}{ Initial values for the \eqn{\alpha}{alpha}s. These are recycled to the appropriate length. } } \details{ The Bradley Terry model involves \eqn{M+1} competitors who either win or lose against each other (no draws/ties allowed in this implementation--see \code{\link{bratt}} if there are ties). The probability that Competitor \eqn{i} beats Competitor \eqn{j} is \eqn{\alpha_i / (\alpha_i+\alpha_j)}{alpha_i / (alpha_i + alpha_j)}, where all the \eqn{\alpha}{alpha}s are positive. Loosely, the \eqn{\alpha}{alpha}s can be thought of as the competitors' `abilities'. For identifiability, one of the \eqn{\alpha_i}{alpha_i} is set to a known value \code{refvalue}, e.g., 1. By default, this function chooses the last competitor to have this reference value. The data can be represented in the form of a \eqn{M+1} by \eqn{M+1} matrix of counts, where winners are the rows and losers are the columns. However, this is not the way the data should be inputted (see below). Excluding the reference value/group, this function chooses \eqn{\log(\alpha_j)}{log(alpha_j)} as the \eqn{M} linear predictors. The log link ensures that the \eqn{\alpha}{alpha}s are positive. The Bradley Terry model can be fitted by logistic regression, but this approach is not taken here. The Bradley Terry model can be fitted with covariates, e.g., a home advantage variable, but unfortunately, this lies outside the VGLM theoretical framework and therefore cannot be handled with this code. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. Stigler, S. (1994) Citation patterns in the journals of statistics and probability. \emph{Statistical Science}, \bold{9}, 94--108. The \pkg{BradleyTerry2} package has more comprehensive capabilities than this function. } \author{ T. W. Yee } \note{ The function \code{\link{Brat}} is useful for coercing a \eqn{M+1} by \eqn{M+1} matrix of counts into a one-row matrix suitable for \code{brat}. Diagonal elements are skipped, and the usual S order of \code{c(a.matrix)} of elements is used. There should be no missing values apart from the diagonal elements of the square matrix. The matrix should have winners as the rows, and losers as the columns. In general, the response should be a 1-row matrix with \eqn{M(M+1)} columns. Only an intercept model is recommended with \code{brat}. It doesn't make sense really to include covariates because of the limited VGLM framework. Notationally, note that the \pkg{VGAM} family function \code{\link{brat}} has \eqn{M+1} contestants, while \code{bratt} has \eqn{M} contestants. } \section{Warning }{ Presently, the residuals are wrong, and the prior weights are not handled correctly. Ideally, the total number of counts should be the prior weights, after the response has been converted to proportions. This would make it similar to family functions such as \code{\link{multinomial}} and \code{\link{binomialff}}. } \seealso{ \code{\link{bratt}}, \code{\link{Brat}}, \code{\link{multinomial}}, \code{\link{binomialff}}. } \examples{ # Citation statistics: being cited is a 'win'; citing is a 'loss' journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B") mat <- matrix(c( NA, 33, 320, 284, 730, NA, 813, 276, 498, 68, NA, 325, 221, 17, 142, NA), 4, 4) dimnames(mat) <- list(winner = journal, loser = journal) fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE) fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE, crit = "coef") summary(fit) c(0, coef(fit)) # Log-abilities (in order of "journal") c(1, Coef(fit)) # Abilities (in order of "journal") fitted(fit) # Probabilities of winning in awkward form (check <- InverseBrat(fitted(fit))) # Probabilities of winning check + t(check) # Should be 1's in the off-diagonals } \keyword{models} \keyword{regression} VGAM/man/gpd.Rd0000644000176200001440000002311213565414527012633 0ustar liggesusers\name{gpd} \alias{gpd} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Pareto Distribution Regression Family Function } \description{ Maximum likelihood estimation of the 2-parameter generalized Pareto distribution (GPD). } \usage{ gpd(threshold = 0, lscale = "loglink", lshape = logofflink(offset = 0.5), percentiles = c(90, 95), iscale = NULL, ishape = NULL, tolshape0 = 0.001, type.fitted = c("percentiles", "mean"), imethod = 1, zero = "shape") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{threshold}{ Numeric, values are recycled if necessary. The threshold value(s), called \eqn{\mu}{mu} below. } \item{lscale}{ Parameter link function for the scale parameter \eqn{\sigma}{sigma}. See \code{\link{Links}} for more choices. } \item{lshape}{ Parameter link function for the shape parameter \eqn{\xi}{xi}. See \code{\link{Links}} for more choices. The default constrains the parameter to be greater than \eqn{-0.5} because if \eqn{\xi \leq -0.5}{xi <= -0.5} then Fisher scoring does not work. See the Details section below for more information. For the shape parameter, the default \code{\link{logofflink}} link has an offset called \eqn{A} below; and then the second linear/additive predictor is \eqn{\log(\xi+A)}{log(xi+A)} which means that \eqn{\xi > -A}{xi > -A}. The working weight matrices are positive definite if \eqn{A = 0.5}. } % \item{Offset}{ % Numeric, of length 1. % Called \eqn{A} below. % Offset value if \code{lshape = "logofflink"}. % Then the second linear/additive predictor is % \eqn{\log(\xi+A)}{log(xi+A)} which means that % \eqn{\xi > -A}{xi > -A}. % The working weight matrices are positive definite if \code{Offset = 0.5}. % } \item{percentiles}{ Numeric vector of percentiles used for the fitted values. Values should be between 0 and 100. See the example below for illustration. This argument is ignored if \code{type.fitted = "mean"}. % However, if \code{percentiles = NULL} then the mean % \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)} is returned; % this is only defined if \eqn{\xi<1}{xi<1}. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The default is to use the \code{percentiles} argument. If \code{"mean"} is chosen, then the mean \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)} is returned as the fitted values, and these are only defined for \eqn{\xi<1}{xi<1}. } \item{iscale, ishape}{ Numeric. Optional initial values for \eqn{\sigma}{sigma} and \eqn{\xi}{xi}. The default is to use \code{imethod} and compute a value internally for each parameter. Values of \code{ishape} should be between \eqn{-0.5} and \eqn{1}. Values of \code{iscale} should be positive. } % \item{rshape}{ % Numeric, of length 2. % Range of \eqn{\xi}{xi} if \code{lshape = "extlogitlink"} is chosen. % The default values ensures the algorithm works (\eqn{\xi > -0.5}{xi > -0.5}) % and the variance exists (\eqn{\xi < 0.5}{xi < 0.5}). % } \item{tolshape0}{ Passed into \code{\link{dgpd}} when computing the log-likelihood. } % \item{tolshape0}{ % Positive numeric. % Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero. % If the absolute value of the estimate of \eqn{\xi}{xi} is less than % this value then it will be assumed zero and exponential distribution % derivatives etc. will be used. % } \item{imethod}{ Method of initialization, either 1 or 2. The first is the method of moments, and the second is a variant of this. If neither work, try assigning values to arguments \code{ishape} and/or \code{iscale}. } \item{zero}{ Can be an integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. For one response, the value should be from the set \{1,2\} corresponding respectively to \eqn{\sigma}{sigma} and \eqn{\xi}{xi}. It is often a good idea for the \eqn{\sigma}{sigma} parameter only to be modelled through a linear combination of the explanatory variables because the shape parameter is probably best left as an intercept only: \code{zero = 2}. Setting \code{zero = NULL} means both parameters are modelled with explanatory variables. See \code{\link{CommonVGAMffArguments}} for more details. } } \details{ The distribution function of the GPD can be written \deqn{G(y) = 1 - [1 + \xi (y-\mu) / \sigma ]_{+}^{- 1/ \xi} }{% G(y) = 1 - [1 + xi (y-mu)/ sigma ]_{+}^{- 1/ xi} } where \eqn{\mu}{mu} is the location parameter (known, with value \code{threshold}), \eqn{\sigma > 0}{sigma > 0} is the scale parameter, \eqn{\xi}{xi} is the shape parameter, and \eqn{h_+ = \max(h,0)}{h_+ = max(h,0)}. The function \eqn{1-G} is known as the \emph{survivor function}. The limit \eqn{\xi \rightarrow 0}{xi --> 0} gives the \emph{shifted exponential} as a special case: \deqn{G(y) = 1 - \exp[-(y-\mu)/ \sigma]. }{% G(y) = 1 - exp[-(y-mu)/ sigma]. } The support is \eqn{y>\mu}{y>mu} for \eqn{\xi>0}{xi>0}, and \eqn{\mu < y <\mu-\sigma / \xi}{mu < y -0.5}{xi > -0.5} the classical asymptotic theory of maximum likelihood estimators is applicable; this is the default. Although for \eqn{\xi < -0.5}{xi < -0.5} the usual asymptotic properties do not apply, the maximum likelihood estimator generally exists and is superefficient for \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5}, so it is ``better'' than normal. When \eqn{\xi < -1}{xi < -1} the maximum likelihood estimator generally does not exist as it effectively becomes a two parameter problem. The mean of \eqn{Y} does not exist unless \eqn{\xi < 1}{xi < 1}, and the variance does not exist unless \eqn{\xi < 0.5}{xi < 0.5}. So if you want to fit a model with finite variance use \code{lshape = "extlogitlink"}. } \note{ The response in the formula of \code{\link{vglm}} and \code{\link{vgam}} is \eqn{y}. Internally, \eqn{y-\mu}{y-mu} is computed. This \pkg{VGAM} family function can handle a multiple responses, which is inputted as a matrix. The response stored on the object is the original uncentred data. With functions \code{\link{rgpd}}, \code{\link{dgpd}}, etc., the argument \code{location} matches with the argument \code{threshold} here. } \section{Warning}{ Fitting the GPD by maximum likelihood estimation can be numerically fraught. If \eqn{1 + \xi (y-\mu)/ \sigma \leq 0}{1 + xi*(y-mu)/sigma <= 0} then some crude evasive action is taken but the estimation process can still fail. This is particularly the case if \code{\link{vgam}} with \code{\link{s}} is used. Then smoothing is best done with \code{\link{vglm}} with regression splines (\code{\link[splines]{bs}} or \code{\link[splines]{ns}}) because \code{\link{vglm}} implements half-stepsizing whereas \code{\link{vgam}} doesn't. Half-stepsizing helps handle the problem of straying outside the parameter space. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. However, for this \pkg{VGAM} family function, \code{\link{vglm}} is probably preferred over \code{\link{vgam}} when there is smoothing. } \references{ Yee, T. W. and Stephenson, A. G. (2007) Vector generalized linear and additive extreme value models. \emph{Extremes}, \bold{10}, 1--19. Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. Smith, R. L. (1985) Maximum likelihood estimation in a class of nonregular cases. \emph{Biometrika}, \bold{72}, 67--90. } \author{ T. W. Yee } \seealso{ \code{\link{rgpd}}, \code{\link{meplot}}, \code{\link{gev}}, \code{\link{paretoff}}, \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{s}}. } \examples{ # Simulated data from an exponential distribution (xi = 0) Threshold <- 0.5 gdata <- data.frame(y1 = Threshold + rexp(n = 3000, rate = 2)) fit <- vglm(y1 ~ 1, gpd(threshold = Threshold), data = gdata, trace = TRUE) head(fitted(fit)) summary(depvar(fit)) # The original uncentred data coef(fit, matrix = TRUE) # xi should be close to 0 Coef(fit) summary(fit) head(fit@extra$threshold) # Note the threshold is stored here # Check the 90 percentile ii <- depvar(fit) < fitted(fit)[1, "90\%"] 100 * table(ii) / sum(table(ii)) # Should be 90% # Check the 95 percentile ii <- depvar(fit) < fitted(fit)[1, "95\%"] 100 * table(ii) / sum(table(ii)) # Should be 95% \dontrun{ plot(depvar(fit), col = "blue", las = 1, main = "Fitted 90\% and 95\% quantiles") matlines(1:length(depvar(fit)), fitted(fit), lty = 2:3, lwd = 2) } # Another example gdata <- data.frame(x2 = runif(nn <- 2000)) Threshold <- 0; xi <- exp(-0.8) - 0.5 gdata <- transform(gdata, y2 = rgpd(nn, scale = exp(1 + 0.1*x2), shape = xi)) fit <- vglm(y2 ~ x2, gpd(Threshold), data = gdata, trace = TRUE) coef(fit, matrix = TRUE) \dontrun{ # Nonparametric fits # Not so recommended: fit1 <- vgam(y2 ~ s(x2), gpd(Threshold), data = gdata, trace = TRUE) par(mfrow = c(2, 1)) plot(fit1, se = TRUE, scol = "blue") # More recommended: fit2 <- vglm(y2 ~ sm.bs(x2), gpd(Threshold), data = gdata, trace = TRUE) plot(as(fit2, "vgam"), se = TRUE, scol = "blue") } } \keyword{models} \keyword{regression} % % # gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1)) % % giveWarning = TRUE, imethod = 1, zero = "shape" VGAM/man/sm.os.Rd0000644000176200001440000003376613565414527013140 0ustar liggesusers\name{sm.os} \alias{sm.os} % % % 20161028; 20161213 % % % %- Also NEED an `\alias' for EACH other topic documented here. \title{ Defining O'Sullivan Spline Smooths in VGAM Formulas } \description{ This function represents an O-spline smooth term in a \code{vgam} formula and confers automatic smoothing parameter selection. } \usage{ sm.os(x, ..., niknots = 6, spar = -1, o.order = 2, alg.niknots = c("s", ".nknots.smspl")[1], all.knots = FALSE, ridge.adj = 1e-5, spillover = 0.01, maxspar = 1e12, outer.ok = FALSE, fixspar = FALSE) } % degree = 3, %- maybe also `usage' for other objects documented here. \arguments{ \item{x}{ covariate (abscissae) to be smoothed. Also called the regressor. If the \code{xij} facility is used then these covariates are inputted via the \code{\dots} argument. % Currently at least 7 unique \code{x} values are needed. } \item{\dots}{ Used to accommodate the other \eqn{M-1} covariates when the \code{xij} facility is used. See Section 3.4.4 of Yee (2015) for something very similar. This argument, found in the second argument, means that the other argument names must be fully specified if used, e.g., \code{outer.ok} and not \code{outer}. See the example below. In the example below, the term in the main formula is \code{sm.os(gcost.air, gcost.trn, gcost.bus)} and one might be tempted to use something like \code{sm.os(gcost)} to represent that \code{xij} term. However, this is not recommended because \code{sm.os(gcost)} might not have the same number of columns as \code{sm.os(gcost.air, gcost.trn, gcost.bus)} etc. That is, it is best to select one of the diagonal elements of the block matrix to represent that term. } \item{niknots}{ numeric, the number of \emph{interior} knots, called \eqn{K} below. The default is to use this value. If you want \code{alg.niknots} to operate then assign \code{NULL} to this argument. } \item{alg.niknots}{ character. The algorithm used to determine the number of interior knots. Only used when \code{all.knots = FALSE} and \code{niknots = NULL}. Note that \code{".nknots.smspl"} corresponds to the default of \code{\link[stats]{smooth.spline}}. The value \code{"s"} corresponds to the same algorithm as \code{\link[VGAM]{s}}. % the other algorithms tend to give fewer knots than this choice % because when the model's \eqn{M} is large then the number % of parameters to be estimated and the amount of memory % used quickly grows. } \item{all.knots}{ logical. If \code{TRUE} then all distinct points in \code{x} are used as the interior knots. If \code{FALSE} (default) then a subset of \code{x[]} is used, specifically \code{x[j]} where the \code{niknots} indices are quantiles that are evenly spaced with respect to the argument \code{probs}---see \code{\link[stats]{quantile}}. If \code{all.knots = FALSE} and \code{niknots = NULL} then the argument \code{alg.niknots} is used to compute \code{niknots}. } \item{spar, maxspar}{ \code{spar} is a vector of smoothing parameters. Negative values mean that \code{\link[mgcv]{magic}} will choose initial values in order to do the optimization at each P-IRLS iteration. Positive values mean that they are used as initial values for \code{\link[mgcv]{magic}}. If \code{fixspar = TRUE} then \code{spar} should be assigned a vector of positive values (but having values less than \code{maxspar}); then the smoothing parameters will be fixed and \code{\link[mgcv]{magic}} will not be used. % non-negative regularization parameters for difference penalty, % whose values should be less than \code{maxspar}. % Can be a vector. % zz. } % \item{degree}{ % degree of B-spline basis. % Currently only the value 3 is implemented. % In the future one should usually assign 2 or 3; and % the values 1 or 4 might possibly be recommended. % zz--this argument may be unneeded. % } \item{o.order}{ The order of the O'Sullivan penalzed spline. Any one value from \code{1:4} is acceptable. The degree of the spline is \code{2 * o.order - 1}, so that cubic splines are the default. Setting \code{o.order = 1} results in a linear spline which is a piecewise linear function. % (p.191 ANZJS). } \item{ridge.adj}{ small positive number to stabilize linear dependencies among B-spline bases. } \item{spillover}{ small and positive proportion of the range used on the outside of the boundary values. This defines the endpoints \eqn{a} and \eqn{b} that cover the data \eqn{x_i}, i.e., we are interested in the interval \eqn{[a,b]} which contains all the abscissae. The interior knots are strictly inside \eqn{(a,b)}. % Untrue, see ANZJS. % Set \code{spillover = 0} to obtain the natural boundary conditions % (NBCs), hence a fit based on natural splines. } \item{outer.ok}{ Fed into the argument (by the same name) of \code{\link[splines]{splineDesign}}. } \item{fixspar}{ logical. If \code{TRUE} then \code{spar} should be a vector with positive values and the smoothing parameters are fixed at those values. If \code{FALSE} then \code{spar} contains the initial values for the smoothing parameters, and \code{\link[mgcv]{magic}} is called to determine (hopefully) some good values for the smoothing parameters. } } \details{ This function is currently used by \code{\link{vgam}} to allow automatic smoothing parameter selection based on O-splines to minimize an UBRE quantity. In contrast, \code{\link{s}} operates by having a prespecified amount of smoothing, e.g., its \code{df} argument. When the sample size is reasonably large this function is recommended over \code{\link{s}} also because backfitting is not required. This function therefore allows 2nd-generation VGAMs to be fitted (called G2-VGAMs, or Penalized-VGAMs). % A similar function is \code{\link{s}} which has a prespecified % amount of smoothing. This function should only be used with \code{\link{vgam}}. This function uses \code{\link[stats]{quantile}} to choose the knots, whereas \code{\link{sm.ps}} chooses equally-spaced knots. As Wand and Ormerod (2008) write, in most situations the differences will be minor, but it is possible for problems to arise for either strategy by constructing certain regression functions and predictor variable distributions. Any differences between O-splines and P-splines tend to be at the boundaries. O-splines have \emph{natural boundary constraints} so that the solution is linear beyond the boundary knots. Some arguments in decreasing order of precedence are: \code{all.knots}, \code{niknots}, \code{alg.niknots}. Unlike \code{\link[VGAM]{s}}, which is symbolic and does not perform any smoothing itself, this function does compute the penalized spline when used by \code{\link{vgam}}---it creates the appropriate columns of the model matrix. When this function is used within \code{\link{vgam}}, automatic smoothing parameter selection is implemented by calling \code{\link[mgcv]{magic}} after the necessary link-ups are done. By default this function centres the component function. This function is also \emph{smart}; it can be used for smart prediction (Section 18.6 of Yee (2015)). Automatic smoothing parameter selection is performed using \emph{performance-oriented iteration} whereby an optimization problem is solved at each IRLS iteration. % Occasionally there are convergence problems for this. % Eventually, in most cases, both model parameter estimates and % smoothing parameter estimates converge. This function works better when the sample size is large, e.g., when in the hundreds, say. % Also, if \eqn{n} is the number of \emph{distinct} abscissae, then % \code{sm.os} will fail if \eqn{n < 7}. % Unlike \code{\link[VGAM]{s}}, which is symbolic and does not perform % any smoothing itself, this function does compute the penalized spline % when used by \code{\link{vgam}}---it creates the appropriate columns % of the model matrix. When this function is used within % \code{\link{vgam}}, automatic smoothing parameter selection is % implemented by calling \code{\link[mgcv]{magic}} after the necessary % link-ups are done. % By default this function centres every component function. % This function is also \emph{smart}; it can be used for smart prediction % (Section 18.6 of Yee (2015)). % Automatic smoothing parameter selection is performed using % \emph{performance-oriented iteration} whereby an optimization % problem is solved at each IRLS iteration. % Occasionally there are convergence problems for this. % Eventually, in most cases, both model parameter estimates and % smoothing parameter estimates converge. } \value{ A matrix with attributes that are (only) used by \code{\link{vgam}}. The number of rows of the matrix is \code{length(x)}. The number of columns is a function of the number of interior knots \eqn{K} and the order of the O-spline \eqn{m}: \eqn{K+2m-1}. In code, this is \code{niknots + 2 * o.order - 1}, or using \code{\link{sm.ps}}-like arguments, \code{ps.int + degree - 1} (where \code{ps.int} should be more generally interpreted as the number of intervals. The formula is the same as \code{\link{sm.ps}}.). It transpires then that \code{\link{sm.os}} and \code{\link{sm.ps}} are very similar. % are very similar wrt return value, and % the the number of the knots; % but not wrt the location of the knots. % The \eqn{-1} is because of the centring. } \references{ Wand, M. P. and Ormerod, J. T. (2008). On semiparametric regression with O'Sullivan penalized splines. \emph{Australian and New Zealand Journal of Statistics}, \bold{50}(2): 179--198. %Wood, S. N. (2004). %Stable and efficient multiple smoothing parameter estimation %for generalized additive models. %\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686. %Yee, T. W. (2016). %Comments on ``Smoothing parameter and model selection for %general smooth models'' %by Wood, S. N. and Pya, N. and Safken, N., %\emph{J. Amer. Statist. Assoc.}, \bold{110}(516). } \author{ T. W. Yee, with some of the essential R code coming from the appendix of Wand and Ormerod (2008). } \note{ This function is currently under development and may change in the future. One might try using this function with \code{\link{vglm}} so as to fit a regression spline, however, the default value of \code{niknots} will probably be too high for most data sets. % In particular, the default for \code{ps.int} is % subject to change. } % ~Make other sections like WARNING with \section{WARNING }{....} ~ \section{Warning }{ Being introduced into \pkg{VGAM} for the first time, this function (and those associated with it) should be used cautiously. Not all options are fully working or have been tested yet, and there are bound to be some bugs lurking around. } \seealso{ \code{\link{vgam}}, \code{\link{sm.ps}}, \code{\link{s}}, \code{\link{smartpred}}, \code{\link{is.smart}}, \code{\link{summarypvgam}}, \code{\link[stats]{smooth.spline}}, \code{\link[splines]{splineDesign}}, \code{\link[splines]{bs}}, \code{\link[mgcv]{magic}}. } \examples{ sm.os(runif(20)) \dontrun{ data("TravelMode", package = "AER") # Need to install "AER" first air.df <- subset(TravelMode, mode == "air") # Form 4 smaller data frames bus.df <- subset(TravelMode, mode == "bus") trn.df <- subset(TravelMode, mode == "train") car.df <- subset(TravelMode, mode == "car") TravelMode2 <- data.frame(income = air.df$income, wait.air = air.df$wait - car.df$wait, wait.trn = trn.df$wait - car.df$wait, wait.bus = bus.df$wait - car.df$wait, gcost.air = air.df$gcost - car.df$gcost, gcost.trn = trn.df$gcost - car.df$gcost, gcost.bus = bus.df$gcost - car.df$gcost, wait = air.df$wait) # Value is unimportant TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode # The response TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0, incom.bus = 0) set.seed(1) TravelMode2 <- transform(TravelMode2, junkx2 = runif(nrow(TravelMode2))) tfit2 <- vgam(mode ~ sm.os(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) + sm.os(incom.air, incom.trn, incom.bus) + wait , crit = "coef", multinomial(parallel = FALSE ~ 1), data = TravelMode2, xij = list(sm.os(gcost.air, gcost.trn, gcost.bus) ~ sm.os(gcost.air, gcost.trn, gcost.bus) + sm.os(gcost.trn, gcost.bus, gcost.air) + sm.os(gcost.bus, gcost.air, gcost.trn), sm.os(incom.air, incom.trn, incom.bus) ~ sm.os(incom.air, incom.trn, incom.bus) + sm.os(incom.trn, incom.bus, incom.air) + sm.os(incom.bus, incom.air, incom.trn), wait ~ wait.air + wait.trn + wait.bus), form2 = ~ sm.os(gcost.air, gcost.trn, gcost.bus) + sm.os(gcost.trn, gcost.bus, gcost.air) + sm.os(gcost.bus, gcost.air, gcost.trn) + wait + sm.os(incom.air, incom.trn, incom.bus) + sm.os(incom.trn, incom.bus, incom.air) + sm.os(incom.bus, incom.air, incom.trn) + junkx2 + ns(junkx2, 4) + incom.air + incom.trn + incom.bus + gcost.air + gcost.trn + gcost.bus + wait.air + wait.trn + wait.bus) par(mfrow = c(2, 2)) plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4)) summary(tfit2) } } \keyword{models} \keyword{regression} \keyword{smooth} % binom2.or(exchangeable = TRUE ~ s(x2, 3)) VGAM/man/paralogisticUC.Rd0000644000176200001440000000410113565414527014767 0ustar liggesusers\name{Paralogistic} \alias{Paralogistic} \alias{dparalogistic} \alias{pparalogistic} \alias{qparalogistic} \alias{rparalogistic} \title{The Paralogistic Distribution} \description{ Density, distribution function, quantile function and random generation for the paralogistic distribution with shape parameter \code{a} and scale parameter \code{scale}. } \usage{ dparalogistic(x, scale = 1, shape1.a, log = FALSE) pparalogistic(q, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE) qparalogistic(p, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE) rparalogistic(n, scale = 1, shape1.a) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1.a}{shape parameter.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log=TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dparalogistic} gives the density, \code{pparalogistic} gives the distribution function, \code{qparalogistic} gives the quantile function, and \code{rparalogistic} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{paralogistic}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The paralogistic distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{paralogistic}}, \code{\link{genbetaII}}. } \examples{ pdata <- data.frame(y = rparalogistic(n = 3000, scale = exp(1), exp(2))) fit <- vglm(y ~ 1, paralogistic(lss = FALSE, ishape1.a = 4.1), data = pdata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) } \keyword{distribution} VGAM/man/Inv.gaussian.Rd0000644000176200001440000000414013565414527014426 0ustar liggesusers\name{Inv.gaussian} \alias{Inv.gaussian} \alias{dinv.gaussian} \alias{pinv.gaussian} \alias{rinv.gaussian} \title{The Inverse Gaussian Distribution} \description{ Density, distribution function and random generation for the inverse Gaussian distribution. } \usage{ dinv.gaussian(x, mu, lambda, log = FALSE) pinv.gaussian(q, mu, lambda) rinv.gaussian(n, mu, lambda) } \arguments{ \item{x, q}{vector of quantiles.} %%\item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{mu}{the mean parameter.} \item{lambda}{the \eqn{\lambda}{lambda} parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dinv.gaussian} gives the density, \code{pinv.gaussian} gives the distribution function, and \code{rinv.gaussian} generates random deviates. % \code{qinv.gaussian} gives the quantile function, and } \references{ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) \emph{Continuous Univariate Distributions}, 2nd edition, Volume 1, New York: Wiley. Taraldsen, G. and Lindqvist, B. H. (2005) The multiple roots simulation algorithm, the inverse Gaussian distribution, and the sufficient conditional Monte Carlo method. \emph{Preprint Statistics No. 4/2005}, Norwegian University of Science and Technology, Trondheim, Norway. } \author{ T. W. Yee } \details{ See \code{\link{inv.gaussianff}}, the \pkg{VGAM} family function for estimating both parameters by maximum likelihood estimation, for the formula of the probability density function. } \note{ Currently \code{qinv.gaussian} is unavailable. } \seealso{ \code{\link{inv.gaussianff}}, \code{\link{waldff}}. } \examples{ \dontrun{ x <- seq(-0.05, 4, len = 300) plot(x, dinv.gaussian(x, mu = 1, lambda = 1), type = "l", col = "blue",las = 1, main = "blue is density, orange is cumulative distribution function") abline(h = 0, col = "gray", lty = 2) lines(x, pinv.gaussian(x, mu = 1, lambda = 1), type = "l", col = "orange") } } \keyword{distribution} VGAM/man/genbetaIIUC.Rd0000644000176200001440000000445713565414527014153 0ustar liggesusers\name{GenbetaII} \alias{GenbetaII} \alias{dgenbetaII} %\alias{pgenbetaII} %\alias{qgenbetaII} %\alias{rgenbetaII} \title{The Generalized Beta II Distribution} \description{ Density for the generalized beta II distribution with shape parameters \code{a} and \code{p} and \code{q}, and scale parameter \code{scale}. % distribution function, quantile function and random generation } \usage{ dgenbetaII(x, scale = 1, shape1.a, shape2.p, shape3.q, log = FALSE) } %pgenbetaII(q, scale = 1, shape1.a, shape2.p, shape3.q, % lower.tail = TRUE, log.p = FALSE) %qgenbetaII(p, scale = 1, shape1.a, shape2.p, shape3.q, % lower.tail = TRUE, log.p = FALSE) %rgenbetaII(n, scale = 1, shape1.a, shape3.q, shape3.q) \arguments{ % \item{x, q}{vector of quantiles.} \item{x}{vector of quantiles.} % \item{p}{vector of probabilities.} % \item{n}{number of observations. If \code{length(n) > 1}, the length % is taken to be the number required.} \item{shape1.a, shape2.p, shape3.q}{positive shape parameters.} \item{scale}{positive scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } % \item{lower.tail, log.p}{ % Same meaning as in \code{\link[stats:Normal]{pnorm}} % or \code{\link[stats:Normal]{qnorm}}. % } } \value{ \code{dgenbetaII} gives the density. % \code{pgenbetaII} gives the distribution function, % \code{qgenbetaII} gives the quantile function, and % \code{rgenbetaII} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \details{ See \code{\link{genbetaII}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. Several distributions, such as the Singh-Maddala, are special case of this flexible 4-parameter distribution. The product of \code{shape1.a} and \code{shape2.p} determines the behaviour of the density at the origin. } %\note{ % % %} \seealso{ \code{\link{genbetaII}}. } \examples{ dgenbetaII(0, shape1.a = 1/4, shape2.p = 4, shape3.q = 3) dgenbetaII(0, shape1.a = 1/4, shape2.p = 2, shape3.q = 3) dgenbetaII(0, shape1.a = 1/4, shape2.p = 8, shape3.q = 3) } \keyword{distribution} VGAM/man/Coef.rrvglm.Rd0000644000176200001440000000264113565414527014251 0ustar liggesusers\name{Coef.rrvglm} \alias{Coef.rrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Returns Important Matrices etc. of a RR-VGLM Object } \description{ This methods function returns important matrices etc. of a RR-VGLM object. } \usage{ Coef.rrvglm(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object of class \code{"rrvglm"}. } \item{\dots}{ Currently unused. } } \details{ The \bold{A}, \bold{B1}, \bold{C} matrices are returned, along with other slots. See \code{\link{rrvglm}} for details about RR-VGLMs. } \value{ An object of class \code{"Coef.rrvglm"} (see \code{\link{Coef.rrvglm-class}}). } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } \note{ This function is an alternative to \code{coef.rrvglm}. } % ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{Coef.rrvglm-class}}, \code{print.Coef.rrvglm}, \code{\link{rrvglm}}. } \examples{ # Rank-1 stereotype model of Anderson (1984) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo) coef(fit, matrix = TRUE) Coef(fit) } \keyword{models} \keyword{regression} % # print(Coef(fit), digits = 3) VGAM/man/oalogUC.Rd0000644000176200001440000000405413565414527013416 0ustar liggesusers\name{Oalog} \alias{Oalog} \alias{doalog} \alias{poalog} \alias{qoalog} \alias{roalog} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Logarithmic Distribution } \description{ Density, distribution function, quantile function and random generation for the one-altered logarithmic distribution with parameter \code{pobs1}. } \usage{ doalog(x, shape, pobs1 = 0, log = FALSE) poalog(q, shape, pobs1 = 0) qoalog(p, shape, pobs1 = 0) roalog(n, shape, pobs1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.} \item{shape, log}{ Same as \code{\link{Otlog}}). } \item{pobs1}{ Probability of (an observed) one, called \eqn{pobs1}. The default value of \code{pobs1 = 0} corresponds to the response having a 1-truncated logarithmic distribution. } } \details{ The probability function of \eqn{Y} is 1 with probability \code{pobs1}, else a 1-truncated logarithmic(shape) distribution. } \value{ \code{doalog} gives the density and \code{poalog} gives the distribution function, \code{qoalog} gives the quantile function, and \code{roalog} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pobs1} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. } \seealso{ \code{\link{oalog}}, \code{\link{oilog}}, \code{\link{Otlog}}. } \examples{ shape <- 0.75; pobs1 <- 0.10; x <- (-1):7 doalog(x, shape = shape, pobs1 = pobs1) table(roalog(100, shape = shape, pobs1 = pobs1)) \dontrun{ x <- 0:10 barplot(rbind(doalog(x, shape = shape, pobs1 = pobs1), dlog(x, shape = shape)), beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1, ylab = "Probability", names.arg = as.character(x), main = paste("OAL(shape = ", shape, ", pobs1 = ", pobs1, ") [blue] vs", " Logarithmic(shape = ", shape, ") [orange] densities", sep = "")) } } \keyword{distribution} VGAM/man/bistudentt.Rd0000644000176200001440000000567113565414527014260 0ustar liggesusers\name{bistudentt} \alias{bistudentt} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Student-t Family Function } \description{ Estimate the degrees of freedom and correlation parameters of the (bivariate) Student-t distribution by maximum likelihood estimation. } \usage{ bistudentt(ldf = "logloglink", lrho = "rhobitlink", idf = NULL, irho = NULL, imethod = 1, parallel = FALSE, zero = "rho") } %- maybe also 'usage' for other objects documented here. %apply.parint = TRUE, \arguments{ \item{ldf, lrho, idf, irho, imethod}{ Details at \code{\link{CommonVGAMffArguments}}. See \code{\link{Links}} for more link function choices. } \item{parallel, zero}{ Details at \code{\link{CommonVGAMffArguments}}. } } \details{ The density function is \deqn{f(y_1, y_2; \nu, \rho) = \frac{1}{2\pi\sqrt{1-\rho^2}} (1 + y_1^2 + y_2^2 - 2\rho y_1 y_2) / (\nu (1-\rho^2))^{(\nu+2)/2} }{% f(y1, y2; nu, rho) = (1/(2*pi*sqrt(1-\rho^2))) * (1 + y1^2 + y_2^2 - 2*rho*y1*y2) / (nu*(1-rho^2))^((\nu+2)/2) } for \eqn{-1 < \rho < 1}{-1 < rho < 1}, and real \eqn{y_1}{y1} and \eqn{y_2}{y2}. % The support of the function is the interior of the unit square; % however, values of 0 and/or 1 are not allowed. % The marginal distributions are the standard uniform distributions. % When \eqn{\rho = 0}{rho=0} the random variables are % independent. This \pkg{VGAM} family function can handle multiple responses, for example, a six-column matrix where the first 2 columns is the first out of three responses, the next 2 columns being the next response, etc. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Schepsmeier, U. and Stober, J. (2013) Derivatives and Fisher information of bivariate copulas. \emph{Statistical Papers}. } \author{ T. W. Yee, with help from Thibault Vatter. } \note{ The response matrix must have a multiple of two-columns. Currently, the fitted value is a matrix with the same number of columns and values equal to 0.0. } \section{Warning }{ The working weight matrices have not been fully checked. } \seealso{ \code{\link{dbistudentt}}, \code{\link{binormal}}, \code{\link[stats]{pt}}. } \examples{ nn <- 1000 mydof <- logloglink(1, inverse = TRUE) ymat <- cbind(rt(nn, df = mydof), rt(nn, df = mydof)) bdata <- data.frame(y1 = ymat[, 1], y2 = ymat[, 2], y3 = ymat[, 1], y4 = ymat[, 2], x2 = runif(nn)) summary(bdata) \dontrun{ plot(ymat, col = "blue") } fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1, # 2 responses, e.g., (y1,y2) is the 1st fam = bistudentt, # crit = "coef", # Sometimes a good idea data = bdata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) head(fitted(fit1)) summary(fit1) } \keyword{models} \keyword{regression} % VGAM/man/rrvglm-class.Rd0000644000176200001440000001623013565414527014500 0ustar liggesusers\name{rrvglm-class} \docType{class} \alias{rrvglm-class} \title{Class ``rrvglm'' } \description{ Reduced-rank vector generalized linear models. } \section{Objects from the Class}{ Objects can be created by calls to \code{\link{rrvglm}}. } \section{Slots}{ \describe{ \item{\code{extra}:}{ Object of class \code{"list"}; the \code{extra} argument on entry to \code{vglm}. This contains any extra information that might be needed by the family function. } \item{\code{family}:}{ Object of class \code{"vglmff"}. The family function. } \item{\code{iter}:}{ Object of class \code{"numeric"}. The number of IRLS iterations used. } \item{\code{predictors}:}{ Object of class \code{"matrix"} with \eqn{M} columns which holds the \eqn{M} linear predictors. } \item{\code{assign}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. This named list gives information matching the columns and the (LM) model matrix terms. } \item{\code{call}:}{ Object of class \code{"call"}, from class \code{ "vlm"}. The matched call. } \item{\code{coefficients}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. A named vector of coefficients. } \item{\code{constraints}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. A named list of constraint matrices used in the fitting. } \item{\code{contrasts}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. The contrasts used (if any). } \item{\code{control}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. A list of parameters for controlling the fitting process. See \code{\link{vglm.control}} for details. } \item{\code{criterion}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. List of convergence criterion evaluated at the final IRLS iteration. } \item{\code{df.residual}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. The residual degrees of freedom. } \item{\code{df.total}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. The total degrees of freedom. } \item{\code{dispersion}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. The scaling parameter. } \item{\code{effects}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. The effects. } \item{\code{fitted.values}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The fitted values. This is usually the mean but may be quantiles, or the location parameter, e.g., in the Cauchy model. } \item{\code{misc}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. A named list to hold miscellaneous parameters. } \item{\code{model}:}{ Object of class \code{"data.frame"}, from class \code{ "vlm"}. The model frame. } \item{\code{na.action}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. A list holding information about missing values. } \item{\code{offset}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. If non-zero, a \eqn{M}-column matrix of offsets. } \item{\code{post}:}{ Object of class \code{"list"}, from class \code{ "vlm"} where post-analysis results may be put. } \item{\code{preplot}:}{ Object of class \code{"list"}, from class \code{ "vlm"} used by \code{\link{plotvgam}}; the plotting parameters may be put here. } \item{\code{prior.weights}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"} holding the initially supplied weights. } \item{\code{qr}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. QR decomposition at the final iteration. } \item{\code{R}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The \bold{R} matrix in the QR decomposition used in the fitting. } \item{\code{rank}:}{ Object of class \code{"integer"}, from class \code{ "vlm"}. Numerical rank of the fitted model. } \item{\code{residuals}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The \emph{working} residuals at the final IRLS iteration. } \item{\code{ResSS}:}{ Object of class \code{"numeric"}, from class \code{ "vlm"}. Residual sum of squares at the final IRLS iteration with the adjusted dependent vectors and weight matrices. } \item{\code{smart.prediction}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. A list of data-dependent parameters (if any) that are used by smart prediction. } \item{\code{terms}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. The \code{\link[stats]{terms}} object used. } \item{\code{weights}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The weight matrices at the final IRLS iteration. This is in matrix-band form. } \item{\code{x}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The model matrix (LM, not VGLM). } \item{\code{xlevels}:}{ Object of class \code{"list"}, from class \code{ "vlm"}. The levels of the factors, if any, used in fitting. } \item{\code{y}:}{ Object of class \code{"matrix"}, from class \code{ "vlm"}. The response, in matrix form. } \item{\code{Xm2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{Ym2}:}{Object of class \code{"matrix"}, from class \code{ "vlm"}. See \code{\link{vglm-class}}). } \item{\code{callXm2}:}{ Object of class \code{"call"}, from class \code{ "vlm"}. The matched call for argument \code{form2}. } } } \section{Extends}{ Class \code{"vglm"}, directly. Class \code{"vlm"}, by class "vglm". } \section{Methods}{ \describe{ \item{biplot}{\code{signature(x = "rrvglm")}: biplot. } \item{Coef}{\code{signature(object = "rrvglm")}: more detailed coefficients giving \bold{A}, \eqn{\bold{B}_1}{\bold{B}1}, \bold{C}, etc. } \item{biplot}{\code{signature(object = "rrvglm")}: biplot. } \item{print}{\code{signature(x = "rrvglm")}: short summary of the object. } \item{summary}{\code{signature(object = "rrvglm")}: a more detailed summary of the object. } } } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %\url{http://www.stat.auckland.ac.nz/~yee} } \author{ Thomas W. Yee } \note{ The slots of \code{"rrvglm"} objects are currently identical to \code{"vglm"} objects. } % ~Make other sections like Warning with \section{Warning }{....} ~ % zzz need to make sure this function matches \code{\link{vglm-class}}, %where \code{\link{vglm-class}} is definitive. \seealso{ \code{\link{rrvglm}}, \code{\link{lvplot.rrvglm}}, \code{\link{vglmff-class}}. } \examples{ \dontrun{ # Rank-1 stereotype model of Anderson (1984) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) # x3 is unrelated fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo, Rank = 1) Coef(fit) } } \keyword{classes} % set.seed(111) VGAM/man/oizetaUC.Rd0000644000176200001440000000671213565414527013613 0ustar liggesusers\name{Oizeta} \alias{Oizeta} \alias{doizeta} \alias{poizeta} \alias{qoizeta} \alias{roizeta} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Inflated Zeta Distribution } \description{ Density, distribution function, quantile function and random generation for the one-inflated zeta distribution with parameter \code{pstr1}. } \usage{ doizeta(x, shape, pstr1 = 0, log = FALSE) poizeta(q, shape, pstr1 = 0) qoizeta(p, shape, pstr1 = 0) roizeta(n, shape, pstr1 = 0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.} \item{shape}{ Vector of positive shape parameters. } \item{pstr1}{ Probability of a structural one (i.e., ignoring the zeta distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds to the response having an ordinary zeta distribution. } \item{log}{Same as \code{\link[stats]{Uniform}}.} } \details{ The probability function of \eqn{Y} is 1 with probability \eqn{\phi}{phi}, and \eqn{Zeta(shape)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{% P(Y=1) = phi + (1-phi) * P(W=1)} where \eqn{W} is distributed as a \eqn{zeta(shape)} random variable. } \value{ \code{doizeta} gives the density, \code{poizeta} gives the distribution function, \code{qoizeta} gives the quantile function, and \code{roizeta} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr1} is recycled to the required length, and usually has values which lie in the interval \eqn{[0,1]}. These functions actually allow for the \emph{zero-deflated zeta} distribution. Here, \code{pstr1} is also permitted to lie in the interval \code{[-dzeta(1, shape) / (1 - dzeta(1, shape)), 0]}. The resulting probability of a unit count is \emph{less than} the nominal zeta value, and the use of \code{pstr1} to stand for the probability of a structural 1 loses its meaning. % % % When \code{pstr1} equals \code{-dzeta(1, shape) / (1 - dzeta(1, shape))} this corresponds to the 1-truncated zeta distribution. } \seealso{ \code{\link{Zeta}}, \code{\link{zetaff}}. \code{\link{Otzeta}}, % \code{\link{zipf}}. } \examples{ shape <- 1.5; pstr1 <- 0.3; x <- (-1):7 (ii <- doizeta(x, shape, pstr1 = pstr1)) max(abs(poizeta(1:200, shape) - cumsum(1/(1:200)^(1+shape)) / zeta(shape+1))) # Should be 0 \dontrun{ x <- 0:10 par(mfrow = c(2, 1)) # One-Inflated zeta barplot(rbind(doizeta(x, shape, pstr1 = pstr1), dzeta(x, shape)), beside = TRUE, col = c("blue", "orange"), main = paste("OIZeta(", shape, ", pstr1 = ", pstr1, ") (blue) vs", " Zeta(", shape, ") (orange)", sep = ""), names.arg = as.character(x)) deflat.limit <- -dzeta(1, shape) / pzeta(1, shape, lower.tail = FALSE) newpstr1 <- round(deflat.limit, 3) + 0.001 # Inside but near the boundary barplot(rbind(doizeta(x, shape, pstr1 = newpstr1), dzeta(x, shape)), beside = TRUE, col = c("blue","orange"), main = paste("ODZeta(", shape, ", pstr1 = ", newpstr1, ") (blue) vs", " Zeta(", shape, ") (orange)", sep = ""), names.arg = as.character(x)) } } \keyword{distribution} %qoizeta(p, shape, pstr1 = 0) %roizeta(n, shape, pstr1 = 0) % table(roizeta(100, shape, pstr1 = pstr1)) % round(doizeta(1:10, shape, pstr1 = pstr1) * 100) # Should be similar VGAM/man/paretoIV.Rd0000644000176200001440000001356513565414527013625 0ustar liggesusers\name{paretoIV} \alias{paretoIV} \alias{paretoIII} \alias{paretoII} %- Also NEED an '\alias' for EACH other topic documented here. \title{Pareto(IV/III/II) Distribution Family Functions } \description{ Estimates three of the parameters of the Pareto(IV) distribution by maximum likelihood estimation. Some special cases of this distribution are also handled. } \usage{ paretoIV(location = 0, lscale = "loglink", linequality = "loglink", lshape = "loglink", iscale = 1, iinequality = 1, ishape = NULL, imethod = 1) paretoIII(location = 0, lscale = "loglink", linequality = "loglink", iscale = NULL, iinequality = NULL) paretoII(location = 0, lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{location}{ Location parameter, called \eqn{a} below. It is assumed known. } \item{lscale, linequality, lshape}{ Parameter link functions for the scale parameter (called \eqn{b} below), inequality parameter (called \eqn{g} below), and shape parameter (called \eqn{s} below). See \code{\link{Links}} for more choices. A log link is the default for all because all these parameters are positive. } \item{iscale, iinequality, ishape}{ Initial values for the parameters. A \code{NULL} value means that it is obtained internally. If convergence failure occurs, use these arguments to input some alternative initial values. } \item{imethod}{ Method of initialization for the shape parameter. Currently only values 1 and 2 are available. Try the other value if convergence failure occurs. } } \details{ The Pareto(IV) distribution, which is used in actuarial science, economics, finance and telecommunications, has a cumulative distribution function that can be written \deqn{F(y) = 1 - [1 + ((y-a)/b)^{1/g}]^{-s}}{% F(y) = 1 - [1 + ((y-a)/b)^(1/g)]^(-s)} for \eqn{y > a}, \eqn{b>0}, \eqn{g>0} and \eqn{s>0}. The \eqn{a}{a} is called the \emph{location} parameter, \eqn{b} the \emph{scale} parameter, \eqn{g} the \emph{inequality} parameter, and \eqn{s} the \emph{shape} parameter. The location parameter is assumed known otherwise the Pareto(IV) distribution will not be a regular family. This assumption is not too restrictive in modelling because in typical applications this parameter is known, e.g., in insurance and reinsurance it is pre-defined by a contract and can be represented as a deductible or a retention level. The inequality parameter is so-called because of its interpretation in the economics context. If we choose a unit shape parameter value and a zero location parameter value then the inequality parameter is the Gini index of inequality, provided \eqn{g \leq 1}{g<=1}. The fitted values are currently the median, e.g., \code{\link{qparetoIV}} is used for \code{paretoIV()}. % The fitted values are currently \code{NA} because I % haven't worked out what the mean of \eqn{Y} is yet. % The mean of \eqn{Y} is % \eqn{\alpha k/(k-1)}{alpha*k/(k-1)} provided \eqn{k>1}. % Its variance is % \eqn{\alpha^2 k /((k-1)^2 (k-2))}{alpha^2 k /((k-1)^2 (k-2))} % provided \eqn{k>2}. % The maximum likelihood estimator for the location parameter is % \code{min(y)}, i.e., the smallest response value. There are a number of special cases of the Pareto(IV) distribution. These include the Pareto(I), Pareto(II), Pareto(III), and Burr family of distributions. Denoting \eqn{PIV(a,b,g,s)} as the Pareto(IV) distribution, the Burr distribution \eqn{Burr(b,g,s)} is \eqn{PIV(a=0,b,1/g,s)}, the Pareto(III) distribution \eqn{PIII(a,b,g)} is \eqn{PIV(a,b,g,s=1)}, the Pareto(II) distribution \eqn{PII(a,b,s)} is \eqn{PIV(a,b,g=1,s)}, and the Pareto(I) distribution \eqn{PI(b,s)} is \eqn{PIV(b,b,g=1,s)}. Thus the Burr distribution can be fitted using the \code{\link{negloglink}} link function and using the default \code{location=0} argument. The Pareto(I) distribution can be fitted using \code{\link{paretoff}} but there is a slight change in notation: \eqn{s=k} and \eqn{b=\alpha}{b=alpha}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Johnson N. L., Kotz S., and Balakrishnan N. (1994) \emph{Continuous Univariate Distributions, Volume 1}, 2nd ed. New York: Wiley. Brazauskas, V. (2003) Information matrix for Pareto(IV), Burr, and related distributions. \emph{Comm. Statist. Theory and Methods} \bold{32}, 315--325. Arnold, B. C. (1983) \emph{Pareto Distributions}. Fairland, Maryland: International Cooperative Publishing House. } \author{ T. W. Yee } \note{ The \code{extra} slot of the fitted object has a component called \code{"location"} which stores the location parameter value(s). } \section{Warning }{ The Pareto(IV) distribution is very general, for example, special cases include the Pareto(I), Pareto(II), Pareto(III), and Burr family of distributions. [Johnson et al. (1994) says on p.19 that fitting Type IV by ML is very difficult and rarely attempted]. Consequently, reasonably good initial values are recommended, and convergence to a local solution may occur. For this reason setting \code{trace=TRUE} is a good idea for monitoring the convergence. Large samples are ideally required to get reasonable results. } \seealso{ \code{\link{ParetoIV}}, \code{\link{paretoff}}, \code{\link{gpd}}. } \examples{ pdata <- data.frame(y = rparetoIV(2000, scale = exp(1), ineq = exp(-0.3), shape = exp(1))) \dontrun{par(mfrow = c(2, 1)) with(pdata, hist(y)); with(pdata, hist(log(y))) } fit <- vglm(y ~ 1, paretoIV, data = pdata, trace = TRUE) head(fitted(fit)) summary(pdata) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/sm.ps.Rd0000644000176200001440000001606213565414530013121 0ustar liggesusers\name{sm.ps} \alias{sm.ps} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Defining Penalized Spline Smooths in VGAM Formulas } \description{ This function represents a P-spline smooth term in a \code{vgam} formula and confers automatic smoothing parameter selection. } \usage{ sm.ps(x, ..., ps.int = NULL, spar = -1, degree = 3, p.order = 2, ridge.adj = 1e-5, spillover = 0.01, maxspar = 1e12, outer.ok = FALSE, mux = NULL, fixspar = FALSE) } %- maybe also `usage' for other objects documented here. \arguments{ \item{x, \dots}{ See \code{\link{sm.os}}. % Currently at least 7 unique \code{x} values are needed. } \item{ps.int}{ the number of equally-spaced B-spline intervals. Note that the number of knots is equal to \code{ps.int + 2*degree + 1}. The default, signified by \code{NULL}, means that the maximum of the value 7 and \code{degree} is chosen. This usually means 6 interior knots for big data sets. However, if this is too high compared to the length of \code{x}, then some adjustment is made. In the case where \code{mux} is assigned a numerical value (suggestions: some value between 1 and 2) then \code{ceiling(mux * log(length(unique(x.index))))} is used, where \code{x.index} is the combined data. No matter what, the above is not guaranteed to work on every data set. This argument may change in the future. See also argument \code{mux}. % 20160805; correct: Note that the number of knots is equal to % \code{ps.int + 2*degree + 1}. Its called Aknots. % 20160801: % \code{ceiling(2.5 * log1p(length(unique(x.index)))) + 3} % Prior to 20160801: % The default, signified by \code{NULL}, means that % \code{ceiling(1.5 * log(length(unique(x.index))))} } \item{spar, maxspar}{ See \code{\link{sm.os}}. } \item{mux}{ numeric. If given, then this argument multiplies \code{log(length(unique(x)))} to obtain \code{ps.int}. If \code{ps.int} is given then this argument is ignored. } \item{degree}{ degree of B-spline basis. Usually this will be 2 or 3; and the values 1 or 4 might possibly be used. } \item{p.order}{ order of difference penalty (0 is the ridge penalty). } \item{ridge.adj, spillover}{ See \code{\link{sm.os}}. % however, setting this argument equal to 0 does not result in % the natural boundary conditions (NBCs). } \item{outer.ok, fixspar}{ See \code{\link{sm.os}}. } } \details{ This function can be used by \code{\link{vgam}} to allow automatic smoothing parameter selection based on P-splines and minimizing an UBRE quantity. % For large sample sizes (\eqn{> 500}, say) % Also, if \eqn{n} is the number of \emph{distinct} abscissae, then % \code{sm.ps} will fail if \eqn{n < 7}. This function should only be used with \code{\link{vgam}} and is an alternative to \code{\link{sm.os}}; see that function for some details that also apply here. } \value{ A matrix with attributes that are (only) used by \code{\link{vgam}}. The number of rows of the matrix is \code{length(x)} and the number of columns is \code{ps.int + degree - 1}. The latter is because the function is centred. } \references{ %Eilers, P. H. C. and Marx, B. D. (2002). %Generalized Linear Additive Smooth Structures. %\emph{Journal of Computational and Graphical Statistics}, %\bold{11}(4): 758--783. %Marx, B. D. and Eilers, P. H. C. (1998). %Direct generalized linear modeling %with penalized likelihood. %\emph{CSDA}, \bold{28}(2): 193--209. Eilers, P. H. C. and Marx, B. D. (1996). Flexible smoothing with B-splines and penalties (with comments and rejoinder). \emph{Statistical Science}, \bold{11}(2): 89--121. } \author{ B. D. Marx wrote the original function. Subsequent edits were made by T. W. Yee and C. Somchit. } \note{ This function is currently under development and may change in the future. In particular, the default for \code{ps.int} is subject to change. } % ~Make other sections like WARNING with \section{WARNING }{....} ~ \section{Warning }{ See \code{\link{sm.os}}. } \seealso{ \code{\link{sm.os}}, \code{\link{s}}, \code{\link{vgam}}, \code{\link{smartpred}}, \code{\link{is.smart}}, \code{\link{summarypvgam}}, \code{\link[splines]{splineDesign}}, \code{\link[splines]{bs}}, \code{\link[mgcv]{magic}}. } \examples{ sm.ps(runif(20)) sm.ps(runif(20), ps.int = 5) \dontrun{ data("TravelMode", package = "AER") # Need to install "AER" first air.df <- subset(TravelMode, mode == "air") # Form 4 smaller data frames bus.df <- subset(TravelMode, mode == "bus") trn.df <- subset(TravelMode, mode == "train") car.df <- subset(TravelMode, mode == "car") TravelMode2 <- data.frame(income = air.df$income, wait.air = air.df$wait - car.df$wait, wait.trn = trn.df$wait - car.df$wait, wait.bus = bus.df$wait - car.df$wait, gcost.air = air.df$gcost - car.df$gcost, gcost.trn = trn.df$gcost - car.df$gcost, gcost.bus = bus.df$gcost - car.df$gcost, wait = air.df$wait) # Value is unimportant TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode # The response TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0, incom.bus = 0) set.seed(1) TravelMode2 <- transform(TravelMode2, junkx2 = runif(nrow(TravelMode2))) tfit2 <- vgam(mode ~ sm.ps(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) + sm.ps(incom.air, incom.trn, incom.bus) + wait , crit = "coef", multinomial(parallel = FALSE ~ 1), data = TravelMode2, xij = list(sm.ps(gcost.air, gcost.trn, gcost.bus) ~ sm.ps(gcost.air, gcost.trn, gcost.bus) + sm.ps(gcost.trn, gcost.bus, gcost.air) + sm.ps(gcost.bus, gcost.air, gcost.trn), sm.ps(incom.air, incom.trn, incom.bus) ~ sm.ps(incom.air, incom.trn, incom.bus) + sm.ps(incom.trn, incom.bus, incom.air) + sm.ps(incom.bus, incom.air, incom.trn), wait ~ wait.air + wait.trn + wait.bus), form2 = ~ sm.ps(gcost.air, gcost.trn, gcost.bus) + sm.ps(gcost.trn, gcost.bus, gcost.air) + sm.ps(gcost.bus, gcost.air, gcost.trn) + wait + sm.ps(incom.air, incom.trn, incom.bus) + sm.ps(incom.trn, incom.bus, incom.air) + sm.ps(incom.bus, incom.air, incom.trn) + junkx2 + ns(junkx2, 4) + incom.air + incom.trn + incom.bus + gcost.air + gcost.trn + gcost.bus + wait.air + wait.trn + wait.bus) par(mfrow = c(2, 2)) plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4)) summary(tfit2) } } \keyword{models} \keyword{regression} \keyword{smooth} % binom2.or(exchangeable = TRUE ~ s(x2, 3)) VGAM/man/gatnbinomial.mix.Rd0000644000176200001440000001431213565414527015323 0ustar liggesusers\name{gatnbinomial.mix} \alias{gatnbinomial.mix} %\alias{gapoissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered and -Truncated Negative Binomial Regression Family Function (GAT--NB--NB Mixture Variant) } \description{ Fits a generally-altered and -truncated negative binomial regression (mixture of 2 NBs on differing support). The truncation may include values in the upper tail. } \usage{ gatnbinomial.mix(alter = NULL, truncate = NULL, zero = c("pobs.a", "size"), parallel = FALSE, lpobs.a = "logitlink", lmunb.p = "loglink", lsize.p = "loglink", lmunb.a = "loglink", lsize.a = "loglink", type.fitted = c("mean", "pobs.a", "Pobs.a", "prob.a", "prob.t"), imethod = 1, imunb.p = NULL, isize.p = NULL, imunb.a = imunb.p, isize.a = isize.p, ishrinkage = 0.95, probs.y = 0.35, cutoff.prob = 0.999, eps.trig = 1e-7, max.chunk.MB = 30) } %- maybe also 'usage' for other objects documented here. % ipobs0 = NULL, \arguments{ \item{alter, truncate}{ Very similar to \code{\link{gatpoisson.mix}}. \emph{Note:} \code{alter} \emph{must be assigned a vector of length 3 or more for this function to work}. In general, the regression tends to be more stable when \code{length(alter)} is not too small and the values of \code{alter} are spread out over the entire support. % Must be sorted and have unique values only. } \item{lpobs.a, lmunb.p, lmunb.a}{ Link functions; the \code{.p} and \code{.a} refer to the parent and altered distributions respectively. See \code{\link{Links}} for more choices and information. } \item{lsize.p, lsize.a}{ Same as above. } \item{parallel, type.fitted}{ Very similar to \code{\link{gatpoisson.mix}}; the former concerns: constrain the mean and size parameters to be equal? Setting \code{parallel = TRUE} is probably a good idea for many data sets, especially when \code{length(alter)} is low and the values of \code{alter} are not spread out. } \item{imethod, imunb.p, isize.p}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{imunb.a, isize.a}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{gatpoisson.mix}} for information. } \item{cutoff.prob, eps.trig, max.chunk.MB}{ See \code{\link{negbinomial}} for information. } } \details{ This distribution, also known as the GAT--NB--NB mixture, is an alternative to \code{\link{gatpoisson.mix}} and allows for overdispersion relative to the Poisson distribution. Because the \code{size} parameters are estimated this family function is more difficult to fit and is numerically fraught, in comparison. The distribution is also a more structured model compared to \code{\link{gatnbinomial.mlm}} because the (outer) distribution of the altered values is also an NB too. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, are similar to \code{\link{gatnbinomial.mlm}}. } \references{ Yee, T. W. and Ma, C. C. (2019) Generally-altered, -inflated and -truncated count regression, with application to heaped and seeped data. \emph{In preparation}. %, \bold{3}, 15--41. } %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \section{Warning }{ The same caution needed for \code{\link{gatnbinomial.mlm}} and \code{\link{gatpoisson.mix}} applies here, however this function is a bit more parametric (structured) in comparison to \code{.mlm} functions, especially when \code{parallel = TRUE}. } \author{ T. W. Yee} \note{ Convergence is rather slow because of an infinite series that is approximated with finite terms. This family function is quite expensive. See also \code{\link{gatpoisson.mix}}. This family function offers potential for regressing heaped count data. % This is not true, as 'alter' needs a 2-vector at least: % This family function effectively % renders the following functions as obsolete % (or rather, they are just special cases): % \code{\link{pospoisson}}, % \code{\link{zapoisson}}. } \seealso{ \code{\link{Gaitnbinom.mix}}, \code{\link{gatnbinomial.mlm}}, \code{\link{gatpoisson.mix}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{rposnegbin}}, % \code{\link{multinomial}}, % \code{\link{zapoisson}}, % \code{\link{gatnbinomial.mlm}}, % \code{\link{gipoisson}}, } \examples{ avec <- seq(5, 25, by = 5) # Alter these values tvec <- c(4, 7, 22) # Truncate these values pobs.a <- logitlink(-0.5, inverse = TRUE) # About 0.5 gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, munb.p = exp(2 + 0 * x2), size.p = exp(1)) gdata <- transform(gdata, y1 = rgaitnbinom.mix(nn, size.p = size.p, munb.p = munb.p, pobs.a = pobs.a, truncate = tvec, alter = avec)) gatnbinomial.mix(alter = avec) (ty1 <- with(gdata, table(y1))) \dontrun{ plot(as.numeric(names(ty1)), c(ty1) / sum(ty1), xlab = "y", ylab = "Proportion", las = 1, type = "h", col = "blue") fit1 <- vglm(y1 ~ 1, trace = TRUE, data = gdata, gatnbinomial.mix(alter = avec, truncate = tvec, parallel = TRUE)) head(fitted(fit1, type.fitted = "Pobs.a")) head(predict(fit1)) coef(fit1, matrix = TRUE) summary(fit1) } } \keyword{models} \keyword{regression} %gapoisson(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %gapoissonff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/trplot.qrrvglm.Rd0000644000176200001440000001423213565414527015101 0ustar liggesusers\name{trplot.qrrvglm} \alias{trplot.qrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Trajectory plot for QRR-VGLMs } \description{ Produces a trajectory plot for \emph{quadratic reduced-rank vector generalized linear models} (QRR-VGLMs). It is only applicable for rank-1 models with argument \code{noRRR = ~ 1}. } \usage{ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE, label.sites = FALSE, sitenames = rownames(object@y), axes.equal = TRUE, cex = par()$cex, col = 1:(nos * (nos - 1)/2), log = "", lty = rep_len(par()$lty, nos * (nos - 1)/2), lwd = rep_len(par()$lwd, nos * (nos - 1)/2), tcol = rep_len(par()$col, nos * (nos - 1)/2), xlab = NULL, ylab = NULL, main = "", type = "b", check.ok = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Object of class \code{"qrrvglm"}, i.e., a CQO object. } \item{which.species}{ Integer or character vector specifying the species to be plotted. If integer, these are the columns of the response matrix. If character, these must match exactly with the species' names. The default is to use all species. } \item{add}{ Logical. Add to an existing plot? If \code{FALSE} (default), a new plot is made. } \item{show.plot}{ Logical. Plot it? } \item{label.sites}{ Logical. If \code{TRUE}, the points on the curves/trajectories are labelled with the \code{sitenames}. } \item{sitenames}{ Character vector. The names of the sites. } \item{axes.equal}{ Logical. If \code{TRUE}, the x- and y-axes will be on the same scale. } \item{cex}{ Character expansion of the labelling of the site names. Used only if \code{label.sites} is \code{TRUE}. See the \code{cex} argument in \code{\link[graphics]{par}}. } \item{col}{Color of the lines. See the \code{col} argument in \code{\link[graphics]{par}}. Here, \code{nos} is the number of species. } \item{log}{ Character, specifying which (if any) of the x- and y-axes are to be on a logarithmic scale. See the \code{log} argument in \code{\link[graphics]{par}}. } \item{lty}{ Line type. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{lwd}{ Line width. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{tcol}{Color of the text for the site names. See the \code{col} argument in \code{\link[graphics]{par}}. Used only if \code{label.sites} is \code{TRUE}. } \item{xlab}{Character caption for the x-axis. By default, a suitable caption is found. See the \code{xlab} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{ylab}{Character caption for the y-axis. By default, a suitable caption is found. See the \code{xlab} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{main}{ Character, giving the title of the plot. See the \code{main} argument in \code{\link[graphics]{plot}} or \code{\link[graphics]{title}}. } \item{type}{ Character, giving the type of plot. A common option is to use \code{type="l"} for lines only. See the \code{type} argument of \code{\link[graphics]{plot}}. } \item{check.ok}{ Logical. Whether a check is performed to see that \code{noRRR = ~ 1} was used. It doesn't make sense to have a trace plot unless this is so. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{xlim} and \code{ylim}. } } \details{ A trajectory plot plots the fitted values of a `second' species against a `first' species. The argument \code{which.species} must therefore contain at least two species. By default, all of the species that were fitted in \code{object} are plotted. With more than a few species the resulting plot will be very congested, and so it is recommended that only a few species be selected for plotting. In the above, \eqn{M} is the number of species selected for plotting, so there will be \eqn{M(M-1)/2}{M*(M-1)/2} curves/trajectories in total. A trajectory plot will be fitted only if \code{noRRR = ~ 1} because otherwise the trajectory will not be a smooth function of the latent variables. } \value{ A list with the following components. \item{species.names}{ A matrix of characters giving the `first' and `second' species. The number of different combinations of species is given by the number of rows. This is useful for creating a legend. } \item{sitenames}{A character vector of site names, sorted by the latent variable (from low to high). } } \references{ Yee, T. W. (2012) On constrained and unconstrained quadratic ordination. \emph{Manuscript in preparation}. } \author{ Thomas W. Yee } \note{ Plotting the axes on a log scale is often a good idea. The use of \code{xlim} and \code{ylim} to control the axis limits is also a good idea, so as to limit the extent of the curves at low abundances or probabilities. Setting \code{label.sites = TRUE} is a good idea only if the number of sites is small, otherwise there is too much clutter. } \seealso{ \code{\link{cqo}}, \code{\link[graphics]{par}}, \code{\link[graphics]{title}}. } \examples{\dontrun{ set.seed(111) # This leads to the global solution # hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental variables p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, trace = FALSE) trplot(p1, which.species = 1:3, log = "xy", type = "b", lty = 1, main = "Trajectory plot of three hunting spiders species", col = c("blue","red","green"), lwd = 2, label = TRUE) -> ii legend(0.00005, 0.3, lwd = 2, lty = 1, col = c("blue", "red", "green"), with(ii, paste(species.names[,1], species.names[,2], sep = " and "))) abline(a = 0, b = 1, lty = "dashed", col = "grey") # Useful reference line } } \keyword{models} \keyword{regression} \keyword{graphs} VGAM/man/posbernoulli.b.Rd0000644000176200001440000001747513565414527015035 0ustar liggesusers\name{posbernoulli.b} %\alias{posbernoulli} \alias{posbernoulli.b} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Bernoulli Family Function with Behavioural Effects } \description{ Fits a GLM-/GAM-like model to multiple Bernoulli responses where each row in the capture history matrix response has at least one success (capture). Capture history behavioural effects are accommodated. } \usage{ posbernoulli.b(link = "logitlink", drop.b = FALSE ~ 1, type.fitted = c("likelihood.cond", "mean.uncond"), I2 = FALSE, ipcapture = NULL, iprecapture = NULL, p.small = 1e-4, no.warning = FALSE) } %- maybe also 'usage' for other objects documented here. % apply.parint = FALSE, \arguments{ \item{link, drop.b, ipcapture, iprecapture}{ See \code{\link{CommonVGAMffArguments}} for information about these arguments. By default the parallelism assumption does not apply to the intercept. With an intercept-only model setting \code{drop.b = TRUE ~ 1} results in the \eqn{M_0}/\eqn{M_h} model. % it just deletes the 2nd column of the constraint matrix corresponding % to the intercept. % The default value of \code{zero} means that the behavioural % effect is modelled as the difference between the % two intercepts. % That is, it is modelled through the intercept, and a % negative value of the second linear/additive predictor means trap shy, etc. } \item{I2}{ Logical. This argument is used for terms that are not parallel. If \code{TRUE} then the constraint matrix \code{diag(2)} (the general default constraint matrix in \pkg{VGAM}) is used, else \code{cbind(0:1, 1)}. The latter means the first element/column corresponds to the behavioural effect. Consequently it and its standard error etc. can be accessed directly without subtracting two quantities. } \item{type.fitted}{ Details at \code{\link{posbernoulli.tb}}. } \item{p.small, no.warning}{ See \code{\link{posbernoulli.t}}. } } \details{ This model (commonly known as \eqn{M_b}/\eqn{M_{bh}} in the capture--recapture literature) operates on a capture history matrix response of 0s and 1s (\eqn{n \times \tau}{n x tau}). See \code{\link{posbernoulli.t}} for details, e.g., common assumptions with other models. Once an animal is captured for the first time, it is marked/tagged so that its future capture history can be recorded. The effect of the recapture probability is modelled through a second linear/additive predictor. It is well-known that some species of animals are affected by capture, e.g., trap-shy or trap-happy. This \pkg{VGAM} family function \emph{does} allow the capture history to be modelled via such behavioural effects. So does \code{\link{posbernoulli.tb}} but \code{\link{posbernoulli.t}} cannot. % If \code{drop.b = TRUE} the parallelism does not apply to the intercept. The number of linear/additive predictors is \eqn{M = 2}, and the default links are \eqn{(logit \,p_c, logit \,p_r)^T}{(logit p_c, logit p_r)^T} where \eqn{p_c} is the probability of capture and \eqn{p_r} is the probability of recapture. The fitted value returned is of the same dimension as the response matrix, and depends on the capture history: prior to being first captured, it is \code{pcapture}. Afterwards, it is \code{precapture}. By default, the constraint matrices for the intercept term and the other covariates are set up so that \eqn{p_r} differs from \eqn{p_c} by a simple binary effect, on a logit scale. However, this difference (the behavioural effect) is more directly estimated by having \code{I2 = FALSE}. Then it allows an estimate of the trap-happy/trap-shy effect; these are positive/negative values respectively. If \code{I2 = FALSE} then the (nonstandard) constraint matrix used is \code{cbind(0:1, 1)}, meaning the first element can be interpreted as the behavioural effect. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\section{Warning }{ % % See \code{\link{posbernoulli.t}}. % % %} \references{ See \code{\link{posbernoulli.t}}. } \author{ Thomas W. Yee. } \note{ The dependent variable is \emph{not} scaled to row proportions. This is the same as \code{\link{posbernoulli.t}} and \code{\link{posbernoulli.tb}} but different from \code{\link{posbinomial}} and \code{\link{binomialff}}. % Monitor convergence by setting \code{trace = TRUE}. % To fit \eqn{M_{tb}}{M_tb} and \eqn{M_{tbh}}{M_tbh} % use \code{\link{posbernoulli.t}} with the \code{xij} % argument of \code{\link{vglm.control}}. } \seealso{ \code{\link{posbernoulli.t}} and \code{\link{posbernoulli.tb}} (including estimating \eqn{N}), \code{\link{deermice}}, \code{\link{dposbern}}, \code{\link{rposbern}}, \code{\link{posbinomial}}, \code{\link{aux.posbernoulli.t}}, \code{\link{prinia}}. % \code{\link{huggins91}}. % \code{\link{vglm.control}} for \code{xij}, } \examples{ # deermice data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, # Fit a M_b model M.b <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, posbernoulli.b, data = deermice, trace = TRUE) coef(M.b)["(Intercept):1"] # Behavioural effect on the logit scale coef(M.b, matrix = TRUE) constraints(M.b, matrix = TRUE) summary(M.b, presid = FALSE) # Fit a M_bh model M.bh <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, posbernoulli.b, data = deermice, trace = TRUE) coef(M.bh, matrix = TRUE) coef(M.bh)["(Intercept):1"] # Behavioural effect on the logit scale constraints(M.bh) # (2,1) element of "(Intercept)" is for the behavioural effect summary(M.bh, presid = FALSE) # Significant positive (trap-happy) behavioural effect # Approx. 95 percent confidence for the behavioural effect: SE.M.bh <- coef(summary(M.bh))["(Intercept):1", "Std. Error"] coef(M.bh)["(Intercept):1"] + c(-1, 1) * 1.96 * SE.M.bh # Fit a M_h model M.h <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, posbernoulli.b(drop.b = TRUE ~ sex + weight), data = deermice, trace = TRUE) coef(M.h, matrix = TRUE) constraints(M.h, matrix = TRUE) summary(M.h, presid = FALSE) # Fit a M_0 model M.0 <- vglm(cbind( y1 + y2 + y3 + y4 + y5 + y6, 6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1, posbinomial, data = deermice, trace = TRUE) coef(M.0, matrix = TRUE) summary(M.0, presid = FALSE) # Simulated data set ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, set.seed(123); nTimePts <- 5; N <- 1000 # N is the popn size pdata <- rposbern(n = N, nTimePts = nTimePts, pvars = 2, is.popn = TRUE) nrow(pdata) # Less than N (because some animals were never captured) # The truth: xcoeffs are c(-2, 1, 2) and cap.effect = +1 M.bh.2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, posbernoulli.b, data = pdata, trace = TRUE) coef(M.bh.2) coef(M.bh.2, matrix = TRUE) constraints(M.bh.2, matrix = TRUE) summary(M.bh.2, presid = FALSE) head(depvar(M.bh.2)) # Capture history response matrix head(M.bh.2@extra$cap.hist1) # Info on its capture history head(M.bh.2@extra$cap1) # When it was first captured head(fitted(M.bh.2)) # Depends on capture history (trap.effect <- coef(M.bh.2)["(Intercept):1"]) # Should be +1 head(model.matrix(M.bh.2, type = "vlm"), 21) head(pdata) summary(pdata) dim(depvar(M.bh.2)) vcov(M.bh.2) M.bh.2@extra$N.hat # Estimate of the population size; should be about N M.bh.2@extra$SE.N.hat # SE of the estimate of the population size # An approximate 95 percent confidence interval: round(M.bh.2@extra$N.hat + c(-1, 1) * 1.96 * M.bh.2@extra$SE.N.hat, 1) } \keyword{models} \keyword{regression} %# Compare the models using a LRT %lrtest(M.bh, M.h) %(wald.pvalue <- 2 * pnorm(abs(summary(M.bh)@coef3["(Intercept):2", "z value"]), % lower.tail = FALSE)) # Two-sided pvalue VGAM/man/double.expbinomial.Rd0000644000176200001440000001532513565414527015650 0ustar liggesusers\name{double.expbinomial} \alias{double.expbinomial} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Double Exponential Binomial Distribution Family Function } \description{ Fits a double exponential binomial distribution by maximum likelihood estimation. The two parameters here are the mean and dispersion parameter. } \usage{ double.expbinomial(lmean = "logitlink", ldispersion = "logitlink", idispersion = 0.25, zero = "dispersion") } % idispersion = 0.25, zero = 2 %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean, ldispersion}{ Link functions applied to the two parameters, called \eqn{\mu}{mu} and \eqn{\theta}{theta} respectively below. See \code{\link{Links}} for more choices. The defaults cause the parameters to be restricted to \eqn{(0,1)}. } \item{idispersion}{ Initial value for the dispersion parameter. If given, it must be in range, and is recyled to the necessary length. Use this argument if convergence failure occurs. } \item{zero}{ A vector specifying which linear/additive predictor is to be modelled as intercept-only. If assigned, the single value can be either \code{1} or \code{2}. The default is to have a single dispersion parameter value. To model both parameters as functions of the covariates assign \code{zero = NULL}. See \code{\link{CommonVGAMffArguments}} for more details. } } \details{ This distribution provides a way for handling overdispersion in a binary response. The double exponential binomial distribution belongs the family of double exponential distributions proposed by Efron (1986). Below, equation numbers refer to that original article. Briefly, the idea is that an ordinary one-parameter exponential family allows the addition of a second parameter \eqn{\theta}{theta} which varies the dispersion of the family without changing the mean. The extended family behaves like the original family with sample size changed from \eqn{n} to \eqn{n\theta}{n*theta}. The extended family is an exponential family in \eqn{\mu}{mu} when \eqn{n} and \eqn{\theta}{theta} are fixed, and an exponential family in \eqn{\theta}{theta} when \eqn{n} and \eqn{\mu}{mu} are fixed. Having \eqn{0 < \theta < 1}{0 < theta < 1} corresponds to overdispersion with respect to the binomial distribution. See Efron (1986) for full details. This \pkg{VGAM} family function implements an \emph{approximation} (2.10) to the exact density (2.4). It replaces the normalizing constant by unity since the true value nearly equals 1. The default model fitted is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)} and \eqn{\eta_2 = logit(\theta)}{eta2 = logit(theta)}. This restricts both parameters to lie between 0 and 1, although the dispersion parameter can be modelled over a larger parameter space by assigning the arguments \code{ldispersion} and \code{edispersion}. Approximately, the mean (of \eqn{Y}) is \eqn{\mu}{mu}. The \emph{effective sample size} is the dispersion parameter multiplied by the original sample size, i.e., \eqn{n\theta}{n*theta}. This family function uses Fisher scoring, and the two estimates are asymptotically independent because the expected information matrix is diagonal. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. } \references{ Efron, B. (1986) Double exponential families and their use in generalized linear regression. \emph{Journal of the American Statistical Association}, \bold{81}, 709--721. } \author{ T. W. Yee } \note{ This function processes the input in the same way as \code{\link{binomialff}}, however multiple responses are not allowed (\code{binomialff(multiple.responses = FALSE)}). } \section{Warning }{ Numerical difficulties can occur; if so, try using \code{idispersion}. } \seealso{ \code{\link{binomialff}}, \code{\link{toxop}}, \code{\link{CommonVGAMffArguments}}. } \examples{ # This example mimics the example in Efron (1986). # The results here differ slightly. # Scale the variables toxop <- transform(toxop, phat = positive / ssize, srainfall = scale(rainfall), # (6.1) sN = scale(ssize)) # (6.2) # A fit similar (should be identical) to Section 6 of Efron (1986). # But does not use poly(), and M = 1.25 here, as in (5.3) cmlist <- list("(Intercept)" = diag(2), "I(srainfall)" = rbind(1, 0), "I(srainfall^2)" = rbind(1, 0), "I(srainfall^3)" = rbind(1, 0), "I(sN)" = rbind(0, 1), "I(sN^2)" = rbind(0, 1)) fit <- vglm(cbind(phat, 1 - phat) * ssize ~ I(srainfall) + I(srainfall^2) + I(srainfall^3) + I(sN) + I(sN^2), double.expbinomial(ldisp = extlogitlink(min = 0, max = 1.25), idisp = 0.2, zero = NULL), toxop, trace = TRUE, constraints = cmlist) # Now look at the results coef(fit, matrix = TRUE) head(fitted(fit)) summary(fit) vcov(fit) sqrt(diag(vcov(fit))) # Standard errors # Effective sample size (not quite the last column of Table 1) head(predict(fit)) Dispersion <- extlogitlink(predict(fit)[,2], min = 0, max = 1.25, inverse = TRUE) c(round(weights(fit, type = "prior") * Dispersion, digits = 1)) # Ordinary logistic regression (gives same results as (6.5)) ofit <- vglm(cbind(phat, 1 - phat) * ssize ~ I(srainfall) + I(srainfall^2) + I(srainfall^3), binomialff, toxop, trace = TRUE) # Same as fit but it uses poly(), and can be plotted (cf. Figure 1) cmlist2 <- list("(Intercept)" = diag(2), "poly(srainfall, degree = 3)" = rbind(1, 0), "poly(sN, degree = 2)" = rbind(0, 1)) fit2 <- vglm(cbind(phat, 1 - phat) * ssize ~ poly(srainfall, degree = 3) + poly(sN, degree = 2), double.expbinomial(ldisp = extlogitlink(min = 0, max = 1.25), idisp = 0.2, zero = NULL), toxop, trace = TRUE, constraints = cmlist2) \dontrun{ par(mfrow = c(1, 2)) plot(as(fit2, "vgam"), se = TRUE, lcol = "blue", scol = "orange") # Cf. Figure 1 # Cf. Figure 1(a) par(mfrow = c(1,2)) ooo <- with(toxop, sort.list(rainfall)) with(toxop, plot(rainfall[ooo], fitted(fit2)[ooo], type = "l", col = "blue", las = 1, ylim = c(0.3, 0.65))) with(toxop, points(rainfall[ooo], fitted(ofit)[ooo], col = "orange", type = "b", pch = 19)) # Cf. Figure 1(b) ooo <- with(toxop, sort.list(ssize)) with(toxop, plot(ssize[ooo], Dispersion[ooo], type = "l", col = "blue", las = 1, xlim = c(0, 100))) } } \keyword{models} \keyword{regression} VGAM/man/dirichlet.Rd0000644000176200001440000001022513565414527014031 0ustar liggesusers\name{dirichlet} \alias{dirichlet} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fitting a Dirichlet Distribution } \description{ Fits a Dirichlet distribution to a matrix of compositions. } \usage{ dirichlet(link = "loglink", parallel = FALSE, zero = NULL, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to each of the \eqn{M} (positive) shape parameters \eqn{\alpha_j}{alpha_j}. See \code{\link{Links}} for more choices. The default gives \eqn{\eta_j=\log(\alpha_j)}{eta_j=log(alpha_j)}. } \item{parallel, zero, imethod}{ See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ In this help file the response is assumed to be a \eqn{M}-column matrix with positive values and whose rows each sum to unity. Such data can be thought of as compositional data. There are \eqn{M} linear/additive predictors \eqn{\eta_j}{eta_j}. The Dirichlet distribution is commonly used to model compositional data, including applications in genetics. Suppose \eqn{(Y_1,\ldots,Y_{M})^T}{(Y_1,\ldots,Y_M)^T} is the response. Then it has a Dirichlet distribution if \eqn{(Y_1,\ldots,Y_{M-1})^T}{(Y_1,\ldots,Y_{M-1})^T} has density \deqn{\frac{\Gamma(\alpha_{+})} {\prod_{j=1}^{M} \Gamma(\alpha_{j})} \prod_{j=1}^{M} y_j^{\alpha_{j} -1}}{% (Gamma(alpha_+) / prod_{j=1}^M gamma(alpha_j)) prod_{j=1}^M y_j^(alpha_j -1)} where \eqn{\alpha_+=\alpha_1+\cdots+\alpha_M}{alpha_+= alpha_1 + \dots + alpha_M}, \eqn{\alpha_j > 0}{alpha_j > 0}, and the density is defined on the unit simplex \deqn{\Delta_{M} = \left\{ (y_1,\ldots,y_{M})^T : y_1 > 0, \ldots, y_{M} > 0, \sum_{j=1}^{M} y_j = 1 \right\}. }{% Delta_M = { (y_1,\ldots,y_M)^T : y_1 > 0, \dots, y_M > 0, \sum_{j=1}^M y_j = 1 }. } One has \eqn{E(Y_j) = \alpha_j / \alpha_{+}}{E(Y_j) = alpha_j / alpha_{+}}, which are returned as the fitted values. For this distribution Fisher scoring corresponds to Newton-Raphson. The Dirichlet distribution can be motivated by considering the random variables \eqn{(G_1,\ldots,G_{M})^T}{(G_1,\ldots,G_M)^T} which are each independent and identically distributed as a gamma distribution with density \eqn{f(g_j)=g_j^{\alpha_j - 1} e^{-g_j} / \Gamma(\alpha_j)}{f(g_j)= g_j^(alpha_j - 1) e^(-g_j) / gamma(alpha_j)}. Then the Dirichlet distribution arises when \eqn{Y_j=G_j / (G_1 + \cdots + G_M)}{Y_j = G_j / (G_1 + ... + G_M)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. When fitted, the \code{fitted.values} slot of the object contains the \eqn{M}-column matrix of means. } \references{ Lange, K. (2002) \emph{Mathematical and Statistical Methods for Genetic Analysis}, 2nd ed. New York: Springer-Verlag. Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. %Documentation accompanying the \pkg{VGAM} package at %\url{http://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response should be a matrix of positive values whose rows each sum to unity. Similar to this is count data, where probably a multinomial logit model (\code{\link{multinomial}}) may be appropriate. Another similar distribution to the Dirichlet is the Dirichlet-multinomial (see \code{\link{dirmultinomial}}). } \seealso{ \code{\link{rdiric}}, \code{\link{dirmultinomial}}, \code{\link{multinomial}}, \code{\link{simplex}}. } % yettodo: use the data of \citet[p.81]{mosi:1962}. See % See also \citet[pp.8--9]{macd:2014}. \examples{ ddata <- data.frame(rdiric(n = 1000, shape = exp(c(y1 = -1, y2 = 1, y3 = 0)))) fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet, data = ddata, trace = TRUE, crit = "coef") Coef(fit) coef(fit, matrix = TRUE) head(fitted(fit)) } \keyword{models} \keyword{regression} % colnames(ddata) <- paste("y", 1:3, sep = "") VGAM/man/lakeO.Rd0000644000176200001440000000513313565414527013117 0ustar liggesusers\name{lakeO} \alias{lakeO} \docType{data} \title{ Annual catches on Lake Otamangakau from October 1974 to October 1989 %% ~~ data name/kind ... ~~ } \description{ Rainbow and brown trout catches by a Mr Swainson at Lake Otamangakau in the central North Island of New Zealand during the 1970s and 1980s. %% ~~ A concise (1-5 lines) description of the dataset. ~~ } \usage{data(lakeO)} \format{ A data frame with 15 observations on the following 5 variables. \describe{ \item{\code{year}}{a numeric vector, the season began on 1 October of the year and ended 12 months later. % Hence the fishing ended around October 1989. } \item{\code{total.fish}}{a numeric vector, the total number of fish caught during the season. Simply the sum of brown and rainbow trout. } \item{\code{brown}}{a numeric vector, the number of brown trout (\emph{Salmo trutta}) caught. } \item{\code{rainbow}}{a numeric vector, the number of rainbow trout (\emph{Oncorhynchus mykiss}) caught. } \item{\code{visits}}{a numeric vector, the number of visits during the season that the angler made to the lake. It is necessary to assume that the visits were of an equal time length in order to interpret the usual Poisson regressions. } } } \details{ %% ~~ If necessary, more details than the __description__ above ~~ The data was extracted from the season summaries at Lake Otamangakau by Anthony Swainson for the seasons 1974--75 to 1988--89. % Note however that the final year's data % was cut off from the scanned version. Mr Swainson was one of a small group of regular fly fishing anglers and kept a diary of his catches. Lake Otamangakau is a lake of area 1.8 squared km and has a maximum depth of about 12m, and is located in the central North Island of New Zealand. It is trout-infested and known for its trophy-sized fish. See also \code{\link[VGAMdata]{trapO}}. } \source{ Table 7.2 of the reference below. Thanks to Dr Michel Dedual for a copy of the report and for help reading the final year's data. The report is available from TWY on request. % p.43 %% ~~ reference to a publication or URL from which the data were obtained ~~ } \references{ {Dedual, M. and MacLean, G. and Rowe, D. and Cudby, E.}, \emph{The Trout Population and Fishery of {L}ake {O}tamangakau---Interim Report}. {National Institute of Water and Atmospheric Research}, {Hamilton, New Zealand}. Consultancy Report Project No. {ELE70207}, (Dec 1996). %% ~~ possibly secondary sources and usages ~~ } \examples{ data(lakeO) lakeO summary(lakeO) } \keyword{datasets} VGAM/man/lvplot.Rd0000644000176200001440000000400713565414527013403 0ustar liggesusers\name{lvplot} \alias{lvplot} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Latent Variable Plot } \description{ Generic function for a \emph{latent variable plot} (also known as an \emph{ordination diagram} by ecologists). } \usage{ lvplot(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for a latent variable plot is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. They usually are graphical parameters, and sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Latent variables occur in reduced-rank regression models, as well as in quadratic and additive ordination. For the latter, latent variables are often called the \emph{site scores}. Latent variable plots were coined by Yee (2004), and have the latent variable as at least one of its axes. } \value{ The value returned depends specifically on the methods function invoked. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \note{ Latent variables are not really applicable to \code{\link{vglm}}/\code{\link{vgam}} models. } \seealso{ \code{\link{lvplot.qrrvglm}}, \code{lvplot.cao}, \code{\link{latvar}}, \code{\link{trplot}}. } \examples{ \dontrun{ hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars set.seed(123) p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Bestof = 3, df1.nl = c(Zoraspin = 2.5, 3), Crow1positive = TRUE) index <- 1:ncol(depvar(p1)) lvplot(p1, lcol = index, pcol = index, y = TRUE, las = 1) } } \keyword{models} \keyword{regression} VGAM/man/gew.Rd0000644000176200001440000000502713565414527012650 0ustar liggesusers\name{gew} \alias{gew} \docType{data} \title{ General Electric and Westinghouse Data } \description{ General Electric and Westinghouse capital data. } \usage{data(gew)} \format{ A data frame with 20 observations on the following 7 variables. All variables are numeric vectors. Variables ending in \code{.g} correspond to General Electric and those ending in \code{.w} are Westinghouse. \describe{ \item{year}{The observations are the years from 1934 to 1953} \item{invest.g, invest.w}{investment figures. These are \eqn{I=} Gross investment = additions to plant and equipment plus maintenance and repairs in millions of dollars deflated by \eqn{P_1}. } \item{capital.g, capital.w}{capital stocks. These are \eqn{C=} The stock of plant and equipment = accumulated sum of net additions to plant and equipment deflated by \eqn{P_1} minus depreciation allowance deflated by \eqn{P_3}. } \item{value.g, value.w}{market values. These are \eqn{F=} Value of the firm = price of common and preferred shares at December 31 (or average price of December 31 and January 31 of the following year) times number of common and preferred shares outstanding plus total book value of debt at December 31 in millions of dollars deflated by \eqn{P_2}. } } } \details{ These data are a subset of a table in Boot and de Wit (1960), also known as the Grunfeld data. It is used a lot in econometrics, e.g., for seemingly unrelated regressions (see \code{\link[VGAM:SURff]{SURff}}). Here, \eqn{P_1 =} Implicit price deflator of producers durable equipment (base 1947), \eqn{P_2 =} Implicit price deflator of G.N.P. (base 1947), \eqn{P_3 =} Depreciation expense deflator = ten years moving average of wholesale price index of metals and metal products (base 1947). } \source{ Table 10 of: Boot, J. C. G. and de Wit, G. M. (1960) Investment Demand: An Empirical Contribution to the Aggregation Problem. \emph{International Economic Review}, \bold{1}, 3--30. Grunfeld, Y. (1958) The Determinants of Corporate Investment. Unpublished PhD Thesis (Chicago). } \seealso{ \code{\link[VGAM:SURff]{SURff}}, \code{http://statmath.wu.ac.at/~zeileis/grunfeld} (the link might now be stale). % orig.: \url{http://statmath.wu.ac.at/~zeileis/grunfeld}. } \references{ Zellner, A. (1962) An efficient method of estimating seemingly unrelated regressions and tests for aggregation bias. \emph{Journal of the American Statistical Association}, \bold{57}, 348--368. } \examples{ str(gew) } \keyword{datasets} VGAM/man/nbcanlink.Rd0000644000176200001440000001265413565414527014031 0ustar liggesusers\name{nbcanlink} \alias{nbcanlink} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Negative Binomial Canonical Link Function } \description{ Computes the negative binomial canonical link transformation, including its inverse and the first two derivatives. } \usage{ nbcanlink(theta, size = NULL, wrt.param = NULL, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. Typically the mean of a negative binomial distribution (NBD). See below for further details. } \item{size, wrt.param}{ \code{size} contains the \eqn{k} matrix which must be of a conformable dimension as \code{theta}. Also, if \code{deriv > 0} then \code{wrt.param} is either 1 or 2 (1 for with respect to the first parameter, and 2 for with respect to the second parameter (\code{size})). } \item{bvalue}{ Details at \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The NBD canonical link is \eqn{\log(\theta/ (\theta + k))}{log(theta/(theta + k))} where \eqn{\theta}{theta} is the NBD mean. The canonical link is used for theoretically relating the NBD to GLM class. This link function was specifically written for \code{\link{negbinomial}} and \code{\link{negbinomial.size}}, and should not be used elsewhere (these \pkg{VGAM} family functions have code that specifically handles \code{nbcanlink()}.) } \value{ For \code{deriv = 0}, the above equation when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{kmatrix / expm1(-theta)} where \code{theta} ie really \code{eta}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } \references{ Miranda, V. S. and Yee, T. W. (2018) On mean function modelling for several one-parameter discrete distributions. \emph{Manuscript in preparation}. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. Hilbe, J. M. (2011) \emph{Negative Binomial Regression}, 2nd Edition. Cambridge: Cambridge University Press. } \author{ Victor Miranda and Thomas W. Yee. } \section{Warning}{ This function works with \code{\link{negbinomial}} but care is needed because it is numerically fraught. In particular, the first linear/additive predictor must have negative values, and finding good initial values may be difficult, leading to it crashing at the start. Hence the NB-C model is sensitive to the initial values and may converge to a local solution. Pages 210 and 309 of Hilbe (2011) notes convergence difficulties (of Newton-Raphson type algorithms), and some of that this applies here. Setting \code{trace = TRUE} is a good idea, as is trying various values of \code{imethod} in \code{\link{negbinomial}}. % This function should work okay with \code{\link{negbinomial.size}}. % Standard errors may be unreliable. } \note{ While theoretically nice, this function is not recommended in general since its value is always negative (linear predictors ought to be unbounded in general). A \code{\link{loglink}} link for argument \code{lmu} is recommended instead. Numerical instability may occur when \code{theta} is close to 0 or 1. Values of \code{theta} which are less than or equal to 0 can be replaced by \code{bvalue} before computing the link function value. See \code{\link{Links}}. } \seealso{ \code{\link{negbinomial}}, \code{\link{negbinomial.size}}. } \examples{ nbcanlink("mu", short = FALSE) mymu <- 1:10 # Test some basic operations: kmatrix <- cbind(runif(length(mymu))) eta1 <- nbcanlink(mymu, size = kmatrix) ans2 <- nbcanlink(eta1, size = kmatrix, inverse = TRUE) max(abs(ans2 - mymu)) # Should be 0 \dontrun{ mymu <- seq(0.5, 10, length = 101) kmatrix <- matrix(10, length(mymu), 1) plot(nbcanlink(mymu, size = kmatrix) ~ mymu, las = 1, type = "l", col = "blue", xlab = expression({mu})) } # Estimate the parameters from some simulated data ndata <- data.frame(x2 = runif(nn <- 100)) ndata <- transform(ndata, eta1 = -1 - 1 * x2, # eta1 < 0 size1 = exp(1), size2 = exp(2)) ndata <- transform(ndata, mu1 = nbcanlink(eta1, size = size1, inverse = TRUE), mu2 = nbcanlink(eta1, size = size2, inverse = TRUE)) ndata <- transform(ndata, y1 = rnbinom(nn, mu = mu1, size = size1), y2 = rnbinom(nn, mu = mu2, size = size2)) summary(ndata) nbcfit <- vglm(cbind(y1, y2) ~ x2, negbinomial(lmu = "nbcanlink", imethod = 1), # Try this # negbinomial(lmu = "nbcanlink", imethod = 2), # Try this data = ndata, trace = TRUE) coef(nbcfit, matrix = TRUE) summary(nbcfit) } \keyword{math} \keyword{models} \keyword{regression} % abline(h = 0, col = "lightgray", lty = "dashed", lwd = 2.0) % The variance-covariance matrix may be wrong when the % canonical link is used. % vcov(fit) # May be wrong % 20150714; yettodo: fix up this and getting it going. % Hint: the working weights are treated as diagonal, whereas it isn't! %aa=nbcfit@misc$earg %aa[[1]] -> bb %(bb$theta) %head(bb$size) %dim(bb$size) VGAM/man/Tol.Rd0000644000176200001440000000622013565414527012620 0ustar liggesusers\name{Tol} \alias{Tol} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Tolerances } \description{ Generic function for the \emph{tolerances} of a model. } \usage{ Tol(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which the computation or extraction of a tolerance or tolerances is meaningful. } \item{\dots}{ Other arguments fed into the specific methods function of the model. Sometimes they are fed into the methods function for \code{\link{Coef}}. } } \details{ Different models can define an optimum in different ways. Many models have no such notion or definition. Tolerances occur in quadratic ordination, i.e., CQO and UQO. They have ecological meaning because a high tolerance for a species means the species can survive over a large environmental range (stenoecous species), whereas a small tolerance means the species' niche is small (eurycous species). Mathematically, the tolerance is like the variance of a normal distribution. } \value{ The value returned depends specifically on the methods function invoked. For a \code{\link{cqo}} binomial or Poisson fit, this function returns a \eqn{R \times R \times S} array, where \eqn{R} is the rank and \eqn{S} is the number of species. Each tolerance matrix ought to be positive-definite, and for a rank-1 fit, taking the square root of each tolerance matrix results in each species' tolerance (like a standard deviation). } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \note{ Tolerances are undefined for `linear' and additive ordination models. They are well-defined for quadratic ordination models. } \section{Warning }{ There is a direct inverse relationship between the scaling of the latent variables (site scores) and the tolerances. One normalization is for the latent variables to have unit variance. Another normalization is for all the tolerances to be unit. These two normalization cannot simultaneously hold in general. For rank-\emph{R>1} models it becomes more complicated because the latent variables are also uncorrelated. An important argument when fitting quadratic ordination models is whether \code{eq.tolerances} is \code{TRUE} or \code{FALSE}. See Yee (2004) for details. } \seealso{ \code{Tol.qrrvglm}. \code{\link{Max}}, \code{\link{Opt}}, \code{\link{cqo}}, \code{\link{rcim}} for UQO. } \examples{ \dontrun{ set.seed(111) # This leads to the global solution hspider[,1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE) Tol(p1) } } \keyword{models} \keyword{regression} VGAM/man/triangleUC.Rd0000644000176200001440000000437213565414527014125 0ustar liggesusers\name{Triangle} \alias{Triangle} \alias{dtriangle} \alias{ptriangle} \alias{qtriangle} \alias{rtriangle} \title{The Triangle Distribution} \description{ Density, distribution function, quantile function and random generation for the Triangle distribution with parameter \code{theta}. } \usage{ dtriangle(x, theta, lower = 0, upper = 1, log = FALSE) ptriangle(q, theta, lower = 0, upper = 1, lower.tail = TRUE, log.p = FALSE) qtriangle(p, theta, lower = 0, upper = 1, lower.tail = TRUE, log.p = FALSE) rtriangle(n, theta, lower = 0, upper = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as \code{\link[stats]{runif}}. } \item{theta}{the theta parameter which lies between \code{lower} and \code{upper}. } \item{lower, upper}{lower and upper limits of the distribution. Must be finite. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dtriangle} gives the density, \code{ptriangle} gives the distribution function, \code{qtriangle} gives the quantile function, and \code{rtriangle} generates random deviates. } %\references{ % %} \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{triangle}}, the \pkg{VGAM} family function for estimating the parameter \eqn{\theta}{theta} by maximum likelihood estimation. } %\note{ % %} \seealso{ \code{\link{triangle}}, \code{\link{topple}}. } \examples{ \dontrun{ x <- seq(-0.1, 1.1, by = 0.01); theta <- 0.75 plot(x, dtriangle(x, theta = theta), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylim = c(0,2), ylab = "") abline(h = 0, col = "blue", lty = 2) lines(x, ptriangle(x, theta = theta), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qtriangle(probs, theta = theta) lines(Q, dtriangle(Q, theta = theta), col = "purple", lty = 3, type = "h") ptriangle(Q, theta = theta) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) } } \keyword{distribution} VGAM/man/notdocumentedyet.Rd0000644000176200001440000004367013565414527015466 0ustar liggesusers\name{notdocumentedyet} \alias{notdocumentedyet} % % % % 201909, 201910, 201911 \alias{moments.nbin.gait} \alias{moments.pois.gait} %\alias{dgaitpois.mix} %\alias{pgaitpois.mix} \alias{EIM.GATNB.speciald} \alias{GATNB.deriv012} \alias{gait.errorcheck} % 201908 % part a is based on \pkg{vcd}: %\alias{vcdrootogram} %\alias{vcdrootogram.default} % % % % % part a is based on \pkg{countreg}: \alias{rootogram0} \alias{rootogram0.default} % \alias{rootogram4vglm} % 201906 % \alias{Step} % \alias{Stepvglm} % \alias{drop1.vglm} \alias{fitmodel.VGAM.expression} \alias{subsetassign} \alias{findterms} \alias{assign2assign} \alias{extractAIC.vglm} \alias{dfterms} \alias{dftermsvglm} % 201904; to stop hdeff() working on them, by default: \alias{wz.merge} %\alias{gabinomial.control} %\alias{gibinomial.control} %\alias{gapoisson.control} \alias{gipoisson.control} % 201902; these old link function names are put here to deemphasize them. % They do not appear at all when help.start() is used, so thats good. % Typing ?loge sees absolutely minimal about it (undocumented). \alias{loge} \alias{negloge} \alias{logneg} \alias{logit} \alias{extlogit} \alias{cauchit} \alias{probit} \alias{cloglog} \alias{multilogit} \alias{fisherz} \alias{rhobit} \alias{logoff} \alias{polf} \alias{golf} \alias{reciprocal} \alias{negreciprocal} \alias{negidentity} \alias{logc} \alias{foldsqrt} \alias{nbolf} % % % 201901 \alias{getarg} \alias{rainbow.sky} %\alias{loglink} %\alias{logneglink} %\alias{logofflink} %\alias{negidentitylink} %\alias{logitlink} % %\alias{logloglink} %\alias{clogloglink, } %\alias{reciprocallink} %\alias{negloglink} % %\alias{negreciprocallink} %\alias{rhobitlink} %\alias{fisherzlink} %\alias{multilogitlink} % %\alias{foldsqrtlink} %\alias{extlogitlink} %\alias{logclink} %\alias{cauchitlink} % %\alias{gordlink} %\alias{pordlink} %\alias{nbordlink} \alias{nbord2link} \alias{stieltjes} \alias{zeta.specials} %\alias{loglogloglink} %\alias{mills.ratio} %\alias{mills.ratio2} % 201810 %\alias{seglines} %\alias{hdeffsev} % 201805 \alias{fnumat2R} % 201802 \alias{attr.assign.x.vglm} \alias{car.relatives} % 201712 \alias{which.etas} \alias{which.xij} % 201709 % \alias{TIC} % 201706 and 201707 % \alias{lrp.vglm} \alias{retain.col} \alias{d3theta.deta3} % 201704 \alias{ghn100} \alias{ghw100} %\alias{hdeff} %\alias{hdeff.vglm} \alias{dprentice74} % 201612 \alias{label.cols.y} \alias{valid.vknotl2} % 201611 %\alias{profilevglm} %\alias{vpairs.profile} %\alias{vplot.profile} % 201609 \alias{prob.munb.size.VGAM} \alias{negbinomial.initialize.yj} % 201607, 201608 \alias{mroot2} \alias{psint} \alias{psintpvgam} \alias{df.residual_pvgam} \alias{startstoppvgam} \alias{summary.pvgam-class} %%%%% \alias{summarypvgam} %%%% % \alias{show.summary.pvgam} \alias{endf} \alias{endfpvgam} \alias{vcov.pvgam} \alias{vcov.pvgam-class} \alias{vlabel} \alias{show.pvgam} \alias{model.matrixpvgam} % 201606 \alias{gharmonic} \alias{gharmonic2} \alias{bisection.basic} \alias{Zeta.aux} \alias{deflat.limit.oizeta} % 201605 \alias{deflat.limit.oipospois} % 20160418 (keyword: mgcvvgam) % \alias{ps} \alias{get.X.VLM.aug} \alias{psv2magic} % \alias{psvglm.fit} % \alias{psvlm.wfit} \alias{pvgam-class} % \alias{PS} % \alias{checkwz} \alias{vforsub} \alias{vbacksub} \alias{vchol} \alias{process.constraints} \alias{mux5} \alias{mux22} \alias{mux111} % % % 201602, 201603, 201604: \alias{genbetaII.Loglikfun4} \alias{posNBD.Loglikfun2} \alias{NBD.Loglikfun2} \alias{AR1.gammas} \alias{Init.mu} \alias{.min.criterion.VGAM} \alias{predictvglmS4VGAM} % 201601: \alias{EIM.NB.speciald} \alias{EIM.NB.specialp} \alias{EIM.posNB.speciald} \alias{EIM.posNB.specialp} \alias{showvglmS4VGAM} \alias{showvgamS4VGAM} %\alias{coefvgam} % % 201512: \alias{margeffS4VGAM} \alias{showsummaryvglmS4VGAM} \alias{summaryvglmS4VGAM} \alias{findFirstMethod} \alias{cratio.derivs} \alias{subsetarray3} \alias{tapplymat1} % 201509, for a bug in car::linearHypothesis() and car:::Anova(): \alias{as.char.expression} \alias{coef.vlm} \alias{vcov.vlm} \alias{model.matrix.vlm} %\alias{has.intercept} %\alias{has.interceptvlm} %\alias{term.names} %\alias{term.namesvlm} \alias{responseName} \alias{responseNamevlm} % % 201503, 201504, 201505, 201508; %\alias{confintvglm} \alias{qlms.bcn} \alias{dlms.bcn} \alias{dbetaII} % \alias{AR1.control} % \alias{param.names} % 20151105 %\alias{is.buggy} %\alias{is.buggy.vlm} % % 201412; %\alias{linkfun.vglm} % 201408; \alias{dlevy} \alias{plevy} \alias{qlevy} \alias{rlevy} % 201407; expected.betabin.ab is needed for zibetabinomialff() in YBook. \alias{grid.search} \alias{grid.search2} \alias{grid.search3} \alias{grid.search4} \alias{expected.betabin.ab} % 201406; % \alias{interleave.VGAM} DONE 20151204 \alias{interleave.cmat} % 201506; \alias{marcumQ} \alias{QR.Q} \alias{QR.R} % %\alias{sm.bs} %\alias{sm.ns} %\alias{sm.poly} %\alias{sm.scale} %\alias{sm.scale.default} % % % % % 201312; % \alias{simulate.vlm} % 201311; 20150316: modified to familyname %\alias{familyname} %\alias{familyname.vlm} %\alias{familyname.vglmff} % 201309; \alias{I.col} \alias{BIC} \alias{check.omit.constant} % % 201308; %\alias{dbiclaytoncop} %\alias{rbiclaytoncop} %\alias{biclaytoncop} % % 201307; %\alias{posnormal.control} \alias{rec.normal.control} \alias{rec.exp1.control} %\alias{kendall.tau} %\alias{binormalcop} %\alias{dbinormcop} %\alias{pbinormcop} %\alias{rbinormcop} %\alias{expint, expexpint, expint.E1} % % 201302; % \alias{pgamma.deriv.unscaled} % \alias{pgamma.deriv} % \alias{digami} % % 201212; \alias{binom2.rho.ss} % % 20121105; % \alias{posbernoulli.b.control} \alias{N.hat.posbernoulli} %\alias{Rank} %\alias{Rank.rrvglm} %\alias{Rank.qrrvglm} %\alias{Rank.rrvgam} % 20121015; delete this later %\alias{huggins91.old} % % 20120912 \alias{arwz2wz} % % 20120813 New links (no earg) %\alias{Dtheta.deta} % Commented out 20170701 %\alias{D2theta.deta2} % Commented out 20170701 %\alias{Eta2theta} % Commented out 20170701 %\alias{Theta2eta} % Commented out 20170701 \alias{link2list} %\alias{Namesof} % Commented out 20170701 % % % % % 20120514, 20120528, \alias{w.wz.merge} \alias{w.y.check} \alias{vweighted.mean.default} % % 20120418 \alias{nvar_vlm} % 20120310 %\alias{hatvalues} %\alias{hatvalues.vlm} % % % 20120307 \alias{npred} \alias{npred.vlm} % % % % 20120215 % \alias{print.vglmff} \alias{show.vglmff} % \alias{print.vfamily} % \alias{show.Coef.rrar} % \alias{family.vglm} \alias{show.vgam} \alias{show.vglm} \alias{show.vlm} % \alias{print.vgam} % \alias{print.vglm} % \alias{print.vlm} % \alias{print.vlm.wfit} % % % % % 20120112 \alias{AIC} \alias{AICc} \alias{coef} \alias{logLik} \alias{plot} %\alias{vcov} % 20150828 %\alias{vcovvlm} % 20150828 \alias{VGAMenv} \alias{nobs} \alias{show.Coef.rrvgam} \alias{show.Coef.qrrvglm} \alias{show.Coef.rrvglm} \alias{show.rrvglm} \alias{show.summary.rrvgam} % \alias{show.summary.lms} \alias{show.summary.qrrvglm} % \alias{show.summary.rc.exponential} \alias{show.summary.rrvglm} %\alias{show.summary.uqo} % \alias{show.summary.vgam} % \alias{show.summary.vglm} % 20150831 \alias{show.summary.vlm} %\alias{show.uqo} \alias{show.vanova} \alias{show.vsmooth.spline} % % % % % % % % % % 20111224; lrtest and waldtest stuff %\alias{lrtest} %\alias{lrtest_vglm} %\alias{print_anova} \alias{update_default} \alias{update_formula} % %\alias{waldtest} %\alias{waldtest_vglm} %\alias{waldtest_default} %\alias{waldtest_formula} % % % % % 20110202; 20110317; James Lauder work %\alias{dexpgeom} %\alias{pexpgeom} %\alias{qexpgeom} %\alias{rexpgeom} %\alias{expgeometric} % %\alias{dweibull3} %\alias{pweibull3} %\alias{qweibull3} %\alias{rweibull3} %\alias{weibull3} % % % % 20110321; misc. datasets. %\alias{fibre1.5} %\alias{fibre15} % % % 20120206; for RR-NB, or rrn.tex. \alias{plota21} % % % 20110202; for Melbourne; these include datasets. \alias{azprocedure} \alias{Confint.rrnb} \alias{Confint.nb1} %\alias{gala} % \alias{melbmaxtemp} % % % %20111128; basics \alias{is.empty.list} % % % % %20101222; Alfian work %\alias{Rcim} % Has been written %\alias{plotrcim0} % Has been written %\alias{moffset} % Has been written % \alias{Qvar} \alias{plotqvar} \alias{qvplot} \alias{depvar.vlm} % % % % %20110411 %\alias{dbinorm} \alias{dnorm2} % %20090330 \alias{dclogloglap} \alias{dlogitlap} \alias{dprobitlap} \alias{logitlaplace1.control} \alias{loglaplace1.control} \alias{pclogloglap} \alias{plogitlap} \alias{pprobitlap} \alias{qclogloglap} \alias{qlogitlap} \alias{qprobitlap} \alias{rclogloglap} \alias{rlogitlap} \alias{rprobitlap} % % % % \alias{A1A2A3.orig} 20140909; same as A1A2A3(hwe = TRUE) \alias{AAaa.nohw} %\alias{AIC} %\alias{AIC.qrrvglm} %\alias{AIC.rrvglm} %\alias{AIC.vgam} %\alias{AIC.vglm} %\alias{AIC.vlm} % \alias{Build.terms} \alias{Build.terms.vlm} \alias{Coef.rrvgam} \alias{Coefficients} \alias{Cut} \alias{Deviance.categorical.data.vgam} \alias{InverseBrat} \alias{Max.Coef.qrrvglm} \alias{Max.qrrvglm} \alias{Opt.Coef.qrrvglm} \alias{Opt.qrrvglm} % \alias{R170.or.later} \alias{Tol.Coef.qrrvglm} %\alias{Tol.Coef.uqo} \alias{Tol.qrrvglm} %\alias{Tol.uqo} \alias{a2m} % \alias{abbott} % 20150320; no longer releasing family.quantal.R. % \alias{acat.deriv} % \alias{add.arg} % \alias{add.constraints} % \alias{add.hookey} %%%%%%\alias{add1} %%%%%%\alias{add1.vgam} %%%%%%\alias{add1.vglm} % \alias{adjust.Dmat.expression} \alias{alaplace1.control} \alias{alaplace2.control} \alias{alaplace3.control} % \alias{alias.vgam} % \alias{alias.vglm} \alias{anova.vgam} % \alias{anova.vglm} % \alias{as.vanova} % \alias{attrassign} % \alias{attrassigndefault} % \alias{attrassignlm} % \alias{beta4} % \alias{betaffqn} \alias{biplot} \alias{biplot.qrrvglm} % \alias{block.diag} % \alias{borel.tanner} % \alias{callcaof} % \alias{callcqof} % \alias{calldcaof} % \alias{calldcqof} % \alias{callduqof} % \alias{calluqof} % \alias{canonical.Hlist} % \alias{cao.fit} \alias{car.all} \alias{care.exp} \alias{care.exp2} % \alias{concoef.Coef.rrvgam} \alias{concoef.Coef.qrrvglm} \alias{concoef.rrvgam} \alias{concoef.qrrvglm} % \alias{cdf} \alias{cdf.lms.bcg} \alias{cdf.lms.bcn} \alias{cdf.lms.yjn} \alias{cdf.vglm} \alias{cm.VGAM} \alias{cm.zero.VGAM} \alias{cm.nointercept.VGAM} \alias{coefficients} \alias{coefqrrvglm} % \alias{coefvlm} % 20140124 \alias{coefvsmooth.spline} \alias{coefvsmooth.spline.fit} % \alias{constraints.vlm} % \alias{cqo.fit} \alias{d2theta.deta2} % \alias{dcda.fast} % \alias{dctda.fast.only} \alias{deplot} \alias{deplot.default} \alias{deplot.lms.bcg} \alias{deplot.lms.bcn} \alias{deplot.lms.yjn} \alias{deplot.lms.yjn2} \alias{deplot.vglm} \alias{deviance} %\alias{deviance.uqo} %\alias{deviance.vglm} \alias{deviance.vlm} \alias{deviance.qrrvglm} %\alias{df.residual} %\alias{df.residual_vlm} % \alias{dimm} % 20151105 % \alias{dneg.binomial} \alias{dnorm2} %\alias{dotC} %\alias{dotFortran} % \alias{dpsi.dlambda.yjn} % \alias{drop1.vgam} % \alias{drop1.vglm} \alias{dtheta.deta} % \alias{dy.dyj} % \alias{dyj.dy} \alias{effects} % \alias{effects.vgam} % \alias{effects.vlm} % \alias{eifun} \alias{eijfun} \alias{eta2theta} %\alias{explink} % \alias{extract.arg} \alias{fff.control} \alias{fill2} \alias{fill3} \alias{fitted} \alias{fitted.values} %\alias{fitted.values.uqo} \alias{fittedvsmooth.spline} % \alias{variable.names} \alias{variable.namesvlm} \alias{variable.namesrrvglm} \alias{case.names} \alias{case.namesvlm} % \alias{formula} \alias{formulaNA.VGAM} \alias{gammaff} % \alias{get.arg} % \alias{get.rrvglm.se1} % \alias{get.rrvglm.se2} % \alias{getind} % \alias{gh.weight.yjn.11} % \alias{gh.weight.yjn.12} % \alias{gh.weight.yjn.13} % \alias{glag.weight.yjn.11} % \alias{glag.weight.yjn.12} % \alias{glag.weight.yjn.13} % \alias{gleg.weight.yjn.11} % \alias{gleg.weight.yjn.12} % \alias{gleg.weight.yjn.13} \alias{glm} % \alias{hypersecant} % \alias{hypersecant01} % \alias{ima} % \alias{inv.binomial} \alias{inverse.gaussianff} \alias{is.Numeric} \alias{is.Numeric2} \alias{is.bell} \alias{is.bell.rrvgam} \alias{is.bell.qrrvglm} \alias{is.bell.rrvglm} \alias{is.bell.vlm} \alias{Kayfun.studentt} % \alias{is.linear.term} % \alias{jitteruqo} \alias{lm} \alias{lm2qrrvlm.model.matrix} \alias{lm2vlm.model.matrix} \alias{vlm2lm.model.matrix} \alias{lms.bcg.control} \alias{lms.bcn.control} \alias{lms.yjn.control} \alias{lmscreg.control} % \alias{logLik.vlm} \alias{logLik.qrrvglm} % \alias{lv.Coef.rrvgam} 20090505 \alias{latvar.Coef.qrrvglm} \alias{latvar.rrvgam} \alias{latvar.rrvglm} \alias{latvar.qrrvglm} \alias{lvplot.rrvgam} \alias{m2a} %\alias{m2avglm} % \alias{matrix.power} \alias{mbesselI0} \alias{mix2exp.control} \alias{mix2normal.control} \alias{mix2poisson.control} % \alias{model.matrix.qrrvglm} \alias{model.matrixvgam} % \alias{mux11} % \alias{mux15} % \alias{mux2} % \alias{mux5} % \alias{mux55} % \alias{mux7} % \alias{mux9} % \alias{my.dbinom} \alias{my1} \alias{my2} \alias{namesof} % \alias{natural.ig} % \alias{neg.binomial} % \alias{neg.binomial.k} % \alias{negbin.ab} % \alias{new.assign} \alias{nlminbcontrol} \alias{nbolf2} \alias{nobs.vlm} \alias{nvar} \alias{nvar.vlm} \alias{nvar.vgam} \alias{nvar.rrvglm} \alias{nvar.qrrvglm} \alias{nvar.rrvgam} \alias{nvar.rcim} % \alias{num.deriv.rrr} \alias{persp} \alias{persp.rrvgam} \alias{plot.rrvgam} \alias{plotpreplotvgam} %\alias{plotvglm} \alias{plotvlm} \alias{plotvsmooth.spline} % \alias{pnorm2} done 20120910 % \alias{poissonqn} \alias{predict} \alias{predict.rrvgam} \alias{predict.glm} \alias{predict.lm} \alias{predict.mlm} % \alias{predictqrrvglm} \alias{predict.rrvglm} %\alias{predict.uqo} \alias{predict.vgam} \alias{predict.vlm} \alias{predictrrvgam} \alias{predictors} \alias{predictors.vglm} \alias{predictvsmooth.spline} \alias{predictvsmooth.spline.fit} % \alias{preplotvgam} \alias{print} \alias{procVec} \alias{negzero.expression.VGAM} \alias{process.binomial2.data.VGAM} \alias{process.categorical.data.VGAM} % \alias{proj.vgam} % \alias{proj.vglm} \alias{put.caption} % \alias{pweights} % \alias{qrrvglm.xprod} \alias{qtplot} \alias{qtplot.default} \alias{qtplot.lms.bcg} \alias{qtplot.lms.bcn} \alias{explot.lms.bcn} \alias{qtplot.lms.yjn} \alias{qtplot.lms.yjn2} \alias{qtplot.vextremes} \alias{qtplot.vglm} \alias{quasiff} % \alias{rainfall} % \alias{remove.arg} % \alias{replace.constraints} %\alias{resid} %\alias{residuals} % \alias{residualsqrrvglm} % \alias{residualsuqo} % \alias{residualsvglm} % \alias{residualsvlm} % \alias{residvsmooth.spline} \alias{rlplot} \alias{rlplot.vextremes} \alias{rlplot.vglm} % \alias{rrar.Ak1} % \alias{rrar.Ci} % \alias{rrar.Di} % \alias{rrar.Ht} % \alias{rrar.Mi} % \alias{rrar.Mmat} % \alias{rrar.UU} % \alias{rrar.Ut} % \alias{rrar.Wmat} \alias{rrar.control} % \alias{rrr.alternating.expression} % \alias{rrr.deriv.gradient.fast} % \alias{rrr.deriv.rss} % \alias{rrr.derivC.rss} % \alias{rrr.derivative.expression} % \alias{rrr.end.expression} % \alias{rrr.init.expression} % \alias{rrr.normalize} % \alias{rrvglm.control.Gaussian} % \alias{rrvglm.fit} \alias{ResSS.vgam} \alias{s.vam} \alias{simple.exponential} \alias{better.exponential} \alias{simple.poisson} \alias{size.binomial} % % \alias{sm.min1} \alias{sm.min2} \alias{sm.scale1} \alias{sm.scale2} %\alias{stdze1} %\alias{stdze2} % % % \alias{step.vgam} % \alias{step.vglm} % \alias{subconstraints} \alias{summary.rrvgam} \alias{summary.grc} \alias{summary.lms} \alias{summary.qrrvglm} \alias{summary.rc.exponential} \alias{summaryrcim} \alias{summary.rrvglm} %\alias{summary.uqo} % \alias{summaryvgam} %\alias{summaryvglm} % 20150831 \alias{summaryvlm} % \alias{tapplymat1} \alias{terms.vlm} \alias{termsvlm} \alias{theta2eta} \alias{trivial.constraints} % \alias{update.vgam} % \alias{update.vglm} % \alias{uqo.fit} % \alias{valid.vglmff} % \alias{valid.vknotl2} \alias{valt.control} % \alias{valt} % \alias{valt.1iter} % \alias{valt.2iter} % \alias{valt.control} % \alias{varassign} % \alias{vchol.greenstadt} \alias{vcontrol.expression} % \alias{vcovdefault} % \alias{vcovqrrvglm} %\alias{vcovrrvglm} % 20150828 % \alias{vcovvlm} % \alias{veigen} % \alias{vellipse} % \alias{vgam.fit} % \alias{vgam.match} % \alias{vgam.nlchisq} % \alias{vgety} \alias{vgam.fit} \alias{vglm.fit} \alias{vglm.garma.control} \alias{vglm.multinomial.control} \alias{vglm.multinomial.deviance.control} \alias{dmultinomial} \alias{vglm.VGAMcategorical.control} % \alias{vindex} % \alias{vlabel} \alias{vlm} \alias{vlm.control} % \alias{vlm.wfit} \alias{vnonlinear.control} \alias{vplot} \alias{vplot.default} \alias{vplot.factor} \alias{vplot.list} \alias{vplot.matrix} \alias{vplot.numeric} \alias{vvplot.factor} \alias{weights} \alias{Wr1} \alias{Wr2} % \alias{wweighted.mean} \alias{wweights} % \alias{yformat} % \alias{ylim.scale} % % % %\alias{Coef.uqo-class} \alias{rrvgam-class} \alias{rcim0-class} \alias{rcim-class} \alias{grc-class} \alias{qrrvglm-class} \alias{summary.qrrvglm-class} \alias{summary.rrvglm-class} \alias{summary.vgam-class} \alias{summary.vglm-class} \alias{summary.vlm-class} %%% 20101216 \alias{summary.rcim-class} %\alias{summary.rcim-class} %\alias{summaryrcim-class} %\alias{uqo-class} \alias{vcov.qrrvglm-class} \alias{vlm-class} \alias{vlmsmall-class} \alias{vsmooth.spline-class} \alias{vsmooth.spline.fit-class} \alias{Coef.rrvgam-class} \alias{summary.rrvgam-class} % % %- Also NEED an '\alias' for EACH other topic documented here. \title{ Undocumented and Internally Used Functions and Classes } \description{ Those currently undocumented and internally used functions are aliased to this help file. Ditto for some classes. } %\usage{ %uninormal(lmean = "identitylink", lsd = "loglink", zero = NULL) %} %- maybe also 'usage' for other objects documented here. %\arguments{ % \item{lmean}{ % Link function applied to the mean. % See \code{\link{Links}} for more choices. % % } %} \details{ In the \pkg{VGAM} package there are currently many objects/methods/classes which are currently internal and/or undocumented. The help file suppresses the warnings when the package is 'CHECK'ed. } \value{ Each objects/methods/classes may or may not have its own individual value. These will be documented over time. } %\references{ %} \author{ T. W. Yee } %\note{ % %} %\seealso{ % \code{gaussianff}, % \code{\link{posnormal}}. % %} %\examples{ %} \keyword{models} \keyword{regression} \keyword{internal} VGAM/man/hspider.Rd0000644000176200001440000000645513565414527013532 0ustar liggesusers\name{hspider} \alias{hspider} \docType{data} \title{ Hunting Spider Data } \description{ Abundance of hunting spiders in a Dutch dune area. } \usage{data(hspider)} \format{ A data frame with 28 observations (sites) on the following 18 variables. \describe{ \item{WaterCon}{Log percentage of soil dry mass.} \item{BareSand}{Log percentage cover of bare sand.} \item{FallTwig}{Log percentage cover of fallen leaves and twigs.} \item{CoveMoss}{Log percentage cover of the moss layer.} \item{CoveHerb}{Log percentage cover of the herb layer.} \item{ReflLux}{Reflection of the soil surface with cloudless sky.} \item{Alopacce}{Abundance of \emph{Alopecosa accentuata}.} \item{Alopcune}{Abundance of \emph{Alopecosa cuneata}.} \item{Alopfabr}{Abundance of \emph{Alopecosa fabrilis}.} \item{Arctlute}{Abundance of \emph{Arctosa lutetiana}.} \item{Arctperi}{Abundance of \emph{Arctosa perita}.} \item{Auloalbi}{Abundance of \emph{Aulonia albimana}.} \item{Pardlugu}{Abundance of \emph{Pardosa lugubris}.} \item{Pardmont}{Abundance of \emph{Pardosa monticola}.} \item{Pardnigr}{Abundance of \emph{Pardosa nigriceps}.} \item{Pardpull}{Abundance of \emph{Pardosa pullata}.} \item{Trocterr}{Abundance of \emph{Trochosa terricola}.} \item{Zoraspin}{Abundance of \emph{Zora spinimana}.} } } \details{ The data, which originally came from Van der Aart and Smeek-Enserink (1975) consists of abundances (numbers trapped over a 60 week period) and 6 environmental variables. There were 28 sites. This data set has been often used to illustrate ordination, e.g., using canonical correspondence analysis (CCA). In the example below, the data is used for constrained quadratic ordination (CQO; formerly called canonical Gaussian ordination or CGO), a numerically intensive method that has many superior qualities. See \code{\link{cqo}} for details. } %\source{ %} \references{ Van der Aart, P. J. M. and Smeek-Enserink, N. (1975) Correlations between distributions of hunting spiders (Lycosidae, Ctenidae) and environmental characteristics in a dune area. \emph{Netherlands Journal of Zoology}, \bold{25}, 1--45. } \examples{ summary(hspider) \dontrun{ # Standardize the environmental variables: hspider[, 1:6] <- scale(subset(hspider, select = WaterCon:ReflLux)) # Fit a rank-1 binomial CAO hsbin <- hspider # Binary species data hsbin[, -(1:6)] <- as.numeric(hsbin[, -(1:6)] > 0) set.seed(123) ahsb1 <- cao(cbind(Alopcune, Arctlute, Auloalbi, Zoraspin) ~ WaterCon + ReflLux, family = binomialff(multiple.responses = TRUE), df1.nl = 2.2, Bestof = 3, data = hsbin) par(mfrow = 2:1, las = 1) lvplot(ahsb1, type = "predictors", llwd=2, ylab="logitlink(p)", lcol=1:9) persp(ahsb1, rug = TRUE, col = 1:10, lwd = 2) coef(ahsb1) } } \keyword{datasets} %# Fit a rank-1 Poisson CQO %set.seed(111) # This leads to the global solution %# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, %# vvv Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ %# vvv WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, %# vvv fam = poissonff, data = hspider, Crow1posit=FALSE) %# vvv nos = ncol(p1@y) %# vvv lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos) %# vvv Coef(p1) %# vvv summary(p1) VGAM/man/lindley.Rd0000644000176200001440000000425413565414527013527 0ustar liggesusers\name{lindley} \alias{lindley} %- Also NEED an '\alias' for EACH other topic documented here. \title{ 1-parameter Gamma Distribution } \description{ Estimates the (1-parameter) Lindley distribution by maximum likelihood estimation. } \usage{ lindley(link = "loglink", itheta = NULL, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the (positive) parameter. See \code{\link{Links}} for more choices. } % \item{earg}{ % List. Extra argument for the link. % See \code{earg} in \code{\link{Links}} for general information. % } \item{itheta, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The density function is given by \deqn{f(y; \theta) = \theta^2 (1 + y) \exp(-\theta y) / (1 + \theta)}{% f(y; theta) = theta^2 * (1 + y) * exp(-theta * y) / (1 + theta)} for \eqn{\theta > 0}{theta > 0} and \eqn{y > 0}. The mean of \eqn{Y} (returned as the fitted values) is \eqn{\mu = (\theta + 2) / (\theta (\theta + 1))}{mu = (theta + 2) / (theta * (theta + 1))}. The variance is \eqn{(\theta^2 + 4 \theta + 2) / (\theta (\theta + 1))^2}{(theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Lindley, D. V. (1958) Fiducial distributions and Bayes' theorem. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{20}, 102--107. Ghitany, M. E. and Atieh, B. and Nadarajah, S. (2008) Lindley distribution and its application. \emph{Math. Comput. Simul.}, \bold{78}, 493--506. } \author{ T. W. Yee } \note{ This \pkg{VGAM} family function can handle multiple responses (inputted as a matrix). Fisher scoring is implemented. } \seealso{ \code{\link{dlind}}, \code{\link{gammaR}}, \code{\link{simulate.vlm}}. } \examples{ ldata <- data.frame(y = rlind(n = 1000, theta = exp(3))) fit <- vglm(y ~ 1, lindley, data = ldata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/gamma1.Rd0000644000176200001440000000522313565414527013227 0ustar liggesusers\name{gamma1} \alias{gamma1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ 1-parameter Gamma Regression Family Function } \description{ Estimates the 1-parameter gamma distribution by maximum likelihood estimation. } \usage{ gamma1(link = "loglink", zero = NULL, parallel = FALSE, type.fitted = c("mean", "percentiles", "Qlink"), percentiles = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the (positive) \emph{shape} parameter. See \code{\link{Links}} for more choices and general information. } \item{zero, parallel}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. Using \code{"Qlink"} is for quantile-links in \pkg{VGAMextra}. } } \details{ The density function is given by \deqn{f(y) = \exp(-y) \times y^{shape-1} / \Gamma(shape)}{% f(y) = exp(-y) y^(shape-1) / gamma(shape)} for \eqn{shape > 0} and \eqn{y > 0}. Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma function, as in \code{\link[base:Special]{gamma}}. The mean of \eqn{Y} (returned as the default fitted values) is \eqn{\mu=shape}{mu=shape}, and the variance is \eqn{\sigma^2 = shape}{sigma^2 = shape}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Most standard texts on statistical distributions describe the 1-parameter gamma distribution, e.g., Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ This \pkg{VGAM} family function can handle a multiple responses, which is inputted as a matrix. The parameter \eqn{shape} matches with \code{shape} in \code{\link[stats]{rgamma}}. The argument \code{rate} in \code{\link[stats]{rgamma}} is assumed 1 for this family function, so that \code{scale = 1} is used for calls to \code{\link[stats]{dgamma}}, \code{\link[stats]{qgamma}}, etc. If \eqn{rate} is unknown use the family function \code{\link{gammaR}} to estimate it too. } \seealso{ \code{\link{gammaR}} for the 2-parameter gamma distribution, \code{\link{lgamma1}}, \code{\link{lindley}}, \code{\link{simulate.vlm}}. } \examples{ gdata <- data.frame(y = rgamma(n = 100, shape = exp(3))) fit <- vglm(y ~ 1, gamma1, data = gdata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/gumbelIIUC.Rd0000644000176200001440000000425313565414527014013 0ustar liggesusers\name{Gumbel-II} \alias{Gumbel-II} \alias{dgumbelII} \alias{pgumbelII} \alias{qgumbelII} \alias{rgumbelII} \title{The Gumbel-II Distribution} \description{ Density, cumulative distribution function, quantile function and random generation for the Gumbel-II distribution. } \usage{ dgumbelII(x, scale = 1, shape, log = FALSE) pgumbelII(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qgumbelII(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rgumbelII(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{shape, scale}{positive shape and scale parameters. } } \value{ \code{dgumbelII} gives the density, \code{pgumbelII} gives the cumulative distribution function, \code{qgumbelII} gives the quantile function, and \code{rgumbelII} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{gumbelII}} for details. } %\note{ % %} \seealso{ \code{\link{gumbelII}}, \code{\link{dgumbel}}. } \examples{ probs <- seq(0.01, 0.99, by = 0.01) Scale <- exp(1); Shape <- exp( 0.5); max(abs(pgumbelII(qgumbelII(p = probs, shape = Shape, Scale), shape = Shape, Scale) - probs)) # Should be 0 \dontrun{ x <- seq(-0.1, 10, by = 0.01); plot(x, dgumbelII(x, shape = Shape, Scale), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "", ylim = 0:1) abline(h = 0, col = "blue", lty = 2) lines(x, pgumbelII(x, shape = Shape, Scale), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qgumbelII(probs, shape = Shape, Scale) lines(Q, dgumbelII(Q, Scale, Shape), col = "purple", lty = 3, type = "h") pgumbelII(Q, shape = Shape, Scale) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) } } \keyword{distribution} VGAM/man/gevUC.Rd0000644000176200001440000000773013565414527013102 0ustar liggesusers\name{gevUC} \alias{gevUC} \alias{dgev} \alias{pgev} \alias{qgev} \alias{rgev} \title{The Generalized Extreme Value Distribution } \description{ Density, distribution function, quantile function and random generation for the generalized extreme value distribution (GEV) with location parameter \code{location}, scale parameter \code{scale} and shape parameter \code{shape}. } \usage{ dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 = sqrt(.Machine$double.eps)) pgev(q, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE) qgev(p, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE) rgev(n, location = 0, scale = 1, shape = 0) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{location}{the location parameter \eqn{\mu}{mu}.} \item{scale}{the (positive) scale parameter \eqn{\sigma}{sigma}. Must consist of positive values. } \item{shape}{the shape parameter \eqn{\xi}{xi}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Uniform]{punif}} or \code{\link[stats:Uniform]{qunif}}. } \item{tolshape0}{ Positive numeric. Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero. If the absolute value of the estimate of \eqn{\xi}{xi} is less than this value then it will be assumed zero and a Gumbel distribution will be used. } % 20160412; Depreciated: % \item{oobounds.log, giveWarning}{ % Numeric and logical. % The GEV distribution has support in the region satisfying % \code{1+shape*(x-location)/scale > 0}. Outside that region, the % logarithm of the density is assigned \code{oobounds.log}, which % equates to a zero density. % It should not be assigned a positive number, and ideally is very negative. % Since \code{\link{egev}} uses this function it is necessary % to return a finite value outside this region so as to allow % for half-stepping. Both arguments are in support of this. % This argument and others match those of \code{\link{egev}}. % } } \value{ \code{dgev} gives the density, \code{pgev} gives the distribution function, \code{qgev} gives the quantile function, and \code{rgev} generates random deviates. } \references{ Coles, S. (2001) \emph{An Introduction to Statistical Modeling of Extreme Values}. London: Springer-Verlag. } \author{ T. W. Yee } \details{ See \code{\link{gev}}, the \pkg{VGAM} family function for estimating the 3 parameters by maximum likelihood estimation, for formulae and other details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } \note{ The default value of \eqn{\xi = 0}{xi = 0} means the default distribution is the Gumbel. Currently, these functions have different argument names compared with those in the \pkg{evd} package. } \seealso{ \code{\link{gev}}, \code{\link{gevff}}, \code{\link{vglm.control}}. } \examples{ loc <- 2; sigma <- 1; xi <- -0.4 pgev(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi) \dontrun{ x <- seq(loc - 3, loc + 3, by = 0.01) plot(x, dgev(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0, 1), main = "Blue is density, orange is cumulative distribution function", sub = "Purple are 10,...,90 percentiles", ylab = "", las = 1) abline(h = 0, col = "blue", lty = 2) lines(qgev(seq(0.1, 0.9, by = 0.1), loc, sigma, xi), dgev(qgev(seq(0.1, 0.9, by = 0.1), loc, sigma, xi), loc, sigma, xi), col = "purple", lty = 3, type = "h") lines(x, pgev(x, loc, sigma, xi), type = "l", col = "orange") abline(h = (0:10)/10, lty = 2, col = "gray50") } } \keyword{distribution} %dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 = % sqrt(.Machine$double.eps), oobounds.log = -Inf, giveWarning = FALSE) VGAM/man/eunifUC.Rd0000644000176200001440000001301113565414527013414 0ustar liggesusers\name{Expectiles-Uniform} \alias{Expectiles-Uniform} \alias{eunif} \alias{deunif} \alias{peunif} \alias{qeunif} \alias{reunif} \title{ Expectiles of the Uniform Distribution } \description{ Density function, distribution function, and expectile function and random generation for the distribution associated with the expectiles of a uniform distribution. } \usage{ deunif(x, min = 0, max = 1, log = FALSE) peunif(q, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE) qeunif(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6, lower.tail = TRUE, log.p = FALSE) reunif(n, min = 0, max = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{ Vector of expectiles. See the terminology note below. } \item{p}{ Vector of probabilities. % (tau or \eqn{\tau}). These should lie in \eqn{(0,1)}. } \item{n, min, max, log}{ See \code{\link[stats:Uniform]{runif}}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Uniform]{punif}} or \code{\link[stats:Uniform]{qunif}}. } \item{Maxit.nr}{ Numeric. Maximum number of Newton-Raphson iterations allowed. A warning is issued if convergence is not obtained for all \code{p} values. } \item{Tol.nr}{ Numeric. Small positive value specifying the tolerance or precision to which the expectiles are computed. } } \details{ Jones (1994) elucidated on the property that the expectiles of a random variable \eqn{X} with distribution function \eqn{F(x)} correspond to the quantiles of a distribution \eqn{G(x)} where \eqn{G} is related by an explicit formula to \eqn{F}. In particular, let \eqn{y} be the \eqn{p}-expectile of \eqn{F}. Then \eqn{y} is the \eqn{p}-quantile of \eqn{G} where \deqn{p = G(y) = (P(y) - y F(y)) / (2[P(y) - y F(y)] + y - \mu),}{ p = G(y) = (P(y) - y F(y)) / (2[P(y) - y F(y)] + y - mu),} and \eqn{\mu}{mu} is the mean of \eqn{X}. The derivative of \eqn{G} is \deqn{g(y) = (\mu F(y) - P(y)) / (2[P(y) - y F(y)] + y - \mu)^2 .}{ g(y) = ( mu F(y) - P(y)) / (2[P(y) - y F(y)] + y - mu)^2 .} Here, \eqn{P(y)} is the partial moment \eqn{\int_{-\infty}^{y} x f(x) \, dx}{int^{y} x f(x) dx} and \eqn{0 < p < 1}. The 0.5-expectile is the mean \eqn{\mu}{mu} and the 0.5-quantile is the median. A note about the terminology used here. Recall in the \emph{S} language there are the \code{dpqr}-type functions associated with a distribution, e.g., \code{\link[stats:Uniform]{dunif}}, \code{\link[stats:Uniform]{punif}}, \code{\link[stats:Uniform]{qunif}}, \code{\link[stats:Uniform]{runif}}, for the uniform distribution. Here, \code{unif} corresponds to \eqn{F} and \code{eunif} corresponds to \eqn{G}. The addition of ``\code{e}'' (for \emph{expectile}) is for the `other' distribution associated with the parent distribution. Thus \code{deunif} is for \eqn{g}, \code{peunif} is for \eqn{G}, \code{qeunif} is for the inverse of \eqn{G}, \code{reunif} generates random variates from \eqn{g}. For \code{qeunif} the Newton-Raphson algorithm is used to solve for \eqn{y} satisfying \eqn{p = G(y)}. Numerical problems may occur when values of \code{p} are very close to 0 or 1. } \value{ \code{deunif(x)} gives the density function \eqn{g(x)}. \code{peunif(q)} gives the distribution function \eqn{G(q)}. \code{qeunif(p)} gives the expectile function: the expectile \eqn{y} such that \eqn{G(y) = p}. \code{reunif(n)} gives \eqn{n} random variates from \eqn{G}. } \references{ Jones, M. C. (1994) Expectiles and M-quantiles are quantiles. \emph{Statistics and Probability Letters}, \bold{20}, 149--153. Yee, T. W. (2012) Vector generalized linear and additive quantile and expectile regression. \emph{In preparation}. } \author{ T. W. Yee and Kai Huang } %\note{ %The ``\code{q}'', as the first character of ``\code{qeunif}'', %may be changed to ``\code{e}'' in the future, %the reason being to emphasize that the expectiles are returned. %Ditto for the argument ``\code{q}'' in \code{peunif}. % %} \seealso{ \code{\link{deexp}}, \code{\link{denorm}}, \code{\link{dunif}}, \code{\link{dsc.t2}}. } \examples{ my.p <- 0.25; y <- runif(nn <- 1000) (myexp <- qeunif(my.p)) sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my.p # Equivalently: I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp]) I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp]) I1 / (I1 + I2) # Should be my.p # Or: I1 <- sum( myexp - y[y <= myexp]) I2 <- sum(-myexp + y[y > myexp]) # Non-standard uniform mymin <- 1; mymax <- 8 yy <- runif(nn, mymin, mymax) (myexp <- qeunif(my.p, mymin, mymax)) sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my.p peunif(mymin, mymin, mymax) # Should be 0 peunif(mymax, mymin, mymax) # Should be 1 peunif(mean(yy), mymin, mymax) # Should be 0.5 abs(qeunif(0.5, mymin, mymax) - mean(yy)) # Should be 0 abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2) # Should be 0 abs(peunif(myexp, mymin, mymax) - my.p) # Should be 0 integrate(f = deunif, lower = mymin - 3, upper = mymax + 3, min = mymin, max = mymax) # Should be 1 \dontrun{ par(mfrow = c(2,1)) yy <- seq(0.0, 1.0, len = nn) plot(yy, deunif(yy), type = "l", col = "blue", ylim = c(0, 2), xlab = "y", ylab = "g(y)", main = "g(y) for Uniform(0,1)") lines(yy, dunif(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original' plot(yy, peunif(yy), type = "l", col = "blue", ylim = 0:1, xlab = "y", ylab = "G(y)", main = "G(y) for Uniform(0,1)") abline(a = 0.0, b = 1.0, col = "darkgreen", lty = "dotted", lwd = 2) abline(v = 0.5, h = 0.5, col = "red", lty = "dashed") } } \keyword{distribution} VGAM/man/cao.Rd0000644000176200001440000003004213565414527012623 0ustar liggesusers\name{cao} \alias{cao} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fitting Constrained Additive Ordination (CAO) } \description{ A constrained additive ordination (CAO) model is fitted using the \emph{reduced-rank vector generalized additive model} (RR-VGAM) framework. } \usage{ cao(formula, family = stop("argument 'family' needs to be assigned"), data = list(), weights = NULL, subset = NULL, na.action = na.fail, etastart = NULL, mustart = NULL, coefstart = NULL, control = cao.control(...), offset = NULL, method = "cao.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, contrasts = NULL, constraints = NULL, extra = NULL, qr.arg = FALSE, smart = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a symbolic description of the model to be fit. The RHS of the formula is used to construct the latent variables, upon which the smooths are applied. All the variables in the formula are used for the construction of latent variables except for those specified by the argument \code{noRRR}, which is itself a formula. The LHS of the formula contains the response variables, which should be a matrix with each column being a response (species). } \item{family}{ a function of class \code{"vglmff"} (see \code{\link{vglmff-class}}) describing what statistical model is to be fitted. This is called a ``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}} for general information about many types of arguments found in this type of function. See \code{\link{cqo}} for a list of those presently implemented. } \item{data}{ an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{cao} is called. } \item{weights}{ an optional vector or matrix of (prior) weights to be used in the fitting process. For \code{cao}, this argument currently should not be used. } \item{subset}{ an optional logical vector specifying a subset of observations to be used in the fitting process. } \item{na.action}{ a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link[base]{options}}, and is \code{na.fail} if that is unset. The ``factory-fresh'' default is \code{na.omit}. } \item{etastart}{ starting values for the linear predictors. It is a \eqn{M}-column matrix. If \eqn{M=1} then it may be a vector. For \code{cao}, this argument currently should not be used. } \item{mustart}{ starting values for the fitted values. It can be a vector or a matrix. Some family functions do not make use of this argument. For \code{cao}, this argument currently should not be used. } \item{coefstart}{ starting values for the coefficient vector. For \code{cao}, this argument currently should not be used. } \item{control}{ a list of parameters for controlling the fitting process. See \code{\link{cao.control}} for details. } \item{offset}{ a vector or \eqn{M}-column matrix of offset values. These are \emph{a priori} known and are added to the linear predictors during fitting. For \code{cao}, this argument currently should not be used. } \item{method}{ the method to be used in fitting the model. The default (and presently only) method \code{cao.fit} uses iteratively reweighted least squares (IRLS) within FORTRAN code called from \code{\link[stats]{optim}}. } \item{model}{ a logical value indicating whether the \emph{model frame} should be assigned in the \code{model} slot. } \item{x.arg, y.arg}{ logical values indicating whether the model matrix and response vector/matrix used in the fitting process should be assigned in the \code{x} and \code{y} slots. Note the model matrix is the linear model (LM) matrix. } \item{contrasts}{ an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}. } \item{constraints}{ an optional list of constraint matrices. For \code{cao}, this argument currently should not be used. The components of the list must be named with the term it corresponds to (and it must match in character format). Each constraint matrix must have \eqn{M} rows, and be of full-column rank. By default, constraint matrices are the \eqn{M} by \eqn{M} identity matrix unless arguments in the family function itself override these values. If \code{constraints} is used it must contain \emph{all} the terms; an incomplete list is not accepted. } \item{extra}{ an optional list with any extra information that might be needed by the family function. For \code{cao}, this argument currently should not be used. } \item{qr.arg}{ For \code{cao}, this argument currently should not be used. } \item{smart}{ logical value indicating whether smart prediction (\code{\link{smartpred}}) will be used. } \item{\dots}{ further arguments passed into \code{\link{cao.control}}. } } \details{ The arguments of \code{cao} are a mixture of those from \code{\link{vgam}} and \code{\link{cqo}}, but with some extras in \code{\link{cao.control}}. Currently, not all of the arguments work properly. CAO can be loosely be thought of as the result of fitting generalized additive models (GAMs) to several responses (e.g., species) against a very small number of latent variables. Each latent variable is a linear combination of the explanatory variables; the coefficients \bold{C} (called \eqn{C} below) are called \emph{constrained coefficients} or \emph{canonical coefficients}, and are interpreted as weights or loadings. The \bold{C} are estimated by maximum likelihood estimation. It is often a good idea to apply \code{\link[base]{scale}} to each explanatory variable first. For each response (e.g., species), each latent variable is smoothed by a cubic smoothing spline, thus CAO is data-driven. If each smooth were a quadratic then CAO would simplify to \emph{constrained quadratic ordination} (CQO; formerly called \emph{canonical Gaussian ordination} or CGO). If each smooth were linear then CAO would simplify to \emph{constrained linear ordination} (CLO). CLO can theoretically be fitted with \code{cao} by specifying \code{df1.nl=0}, however it is more efficient to use \code{\link{rrvglm}}. Currently, only \code{Rank=1} is implemented, and only \code{noRRR = ~1} models are handled. % Poisson and binary responses are implemented (viz., % \code{\link{poissonff}}, \code{\link{binomialff}}), and % dispersion parameters for these must be assumed known. Hence using % \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}} will % currently fail. Also, currently, only \code{noRRR = ~ 1} models are % handled. With binomial data, the default formula is \deqn{logit(P[Y_s=1]) = \eta_s = f_s(\nu), \ \ \ s=1,2,\ldots,S}{% logit(P[Y_s=1]) = eta_s = f_s(\nu), \ \ \ s=1,2,\ldots,S} where \eqn{x_2}{x_2} is a vector of environmental variables, and \eqn{\nu=C^T x_2}{nu=C^T x_2} is a \eqn{R}-vector of latent variables. The \eqn{\eta_s}{eta_s} is an additive predictor for species \eqn{s}, and it models the probabilities of presence as an additive model on the logit scale. The matrix \eqn{C} is estimated from the data, as well as the smooth functions \eqn{f_s}. The argument \code{noRRR = ~ 1} specifies that the vector \eqn{x_1}{x_1}, defined for RR-VGLMs and QRR-VGLMs, is simply a 1 for an intercept. Here, the intercept in the model is absorbed into the functions. A \code{\link{clogloglink}} link may be preferable over a \code{\link{logitlink}} link. With Poisson count data, the formula is \deqn{\log(E[Y_s]) = \eta_s = f_s(\nu)}{% log(E[Y_s]) = eta_s = f_s(\nu)} which models the mean response as an additive models on the log scale. The fitted latent variables (site scores) are scaled to have unit variance. The concept of a tolerance is undefined for CAO models, but the optimums and maximums are defined. The generic functions \code{\link{Max}} and \code{\link{Opt}} should work for CAO objects, but note that if the maximum occurs at the boundary then \code{\link{Max}} will return a \code{NA}. Inference for CAO models is currently undeveloped. } \value{ An object of class \code{"cao"} (this may change to \code{"rrvgam"} in the future). Several generic functions can be applied to the object, e.g., \code{\link{Coef}}, \code{\link{concoef}}, \code{\link{lvplot}}, \code{\link{summary}}. } \references{ Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. % Documentation accompanying the \pkg{VGAM} package at % \url{http://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{T. W. Yee} \note{ CAO models are computationally expensive, therefore setting \code{trace = TRUE} is a good idea, as well as running it on a simple random sample of the data set instead. Sometimes the IRLS algorithm does not converge within the FORTRAN code. This results in warnings being issued. In particular, if an error code of 3 is issued, then this indicates the IRLS algorithm has not converged. One possible remedy is to increase or decrease the nonlinear degrees of freedom so that the curves become more or less flexible, respectively. } \section{Warning }{ CAO is very costly to compute. With version 0.7-8 it took 28 minutes on a fast machine. I hope to look at ways of speeding things up in the future. Use \code{\link[base:Random]{set.seed}} just prior to calling \code{cao()} to make your results reproducible. The reason for this is finding the optimal CAO model presents a difficult optimization problem, partly because the log-likelihood function contains many local solutions. To obtain the (global) solution the user is advised to try \emph{many} initial values. This can be done by setting \code{Bestof} some appropriate value (see \code{\link{cao.control}}). Trying many initial values becomes progressively more important as the nonlinear degrees of freedom of the smooths increase. % The code is a little fragile at this stage, so the function might % hang/lock up in the microsoft Windows version. % Currently the dispersion parameter for a % \code{\link{gaussianff}} CAO model is estimated slightly differently % and may be slightly biased downwards (usually a little too small). } \seealso{ \code{\link{cao.control}}, \code{Coef.cao}, \code{\link{cqo}}, \code{\link{latvar}}, \code{\link{Opt}}, \code{\link{Max}}, \code{\link{calibrate.qrrvglm}}, \code{persp.cao}, \code{\link{poissonff}}, \code{\link{binomialff}}, \code{\link{negbinomial}}, \code{\link{gamma2}}, \code{\link[base:Random]{set.seed}}, \code{\link[gam]{gam}}, \code{\link[VGAMdata]{trapO}}. % \code{\link{gaussianff}}, } \examples{ \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars set.seed(149) # For reproducible results ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Rank = 1, df1.nl = c(Pardpull= 2.7, 2.5), Bestof = 7, Crow1positive = FALSE) sort(deviance(ap1, history = TRUE)) # A history of all the iterations Coef(ap1) concoef(ap1) par(mfrow = c(2, 2)) plot(ap1) # All the curves are unimodal; some quite symmetric par(mfrow = c(1, 1), las = 1) index <- 1:ncol(depvar(ap1)) lvplot(ap1, lcol = index, pcol = index, y = TRUE) trplot(ap1, label = TRUE, col = index) abline(a = 0, b = 1, lty = 2) trplot(ap1, label = TRUE, col = "blue", log = "xy", which.sp = c(1, 3)) abline(a = 0, b = 1, lty = 2) persp(ap1, col = index, lwd = 2, label = TRUE) abline(v = Opt(ap1), lty = 2, col = index) abline(h = Max(ap1), lty = 2, col = index) } } \keyword{models} \keyword{regression} VGAM/man/coefvgam.Rd0000644000176200001440000000311213565414527013646 0ustar liggesusers\name{coefvgam} \alias{coefvgam} \alias{coef,vgam-method} \alias{coefficients,vgam-method} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract Model Coefficients of a vgam() Object} \description{ Extracts the estimated coefficients from vgam() objects. } \usage{ coefvgam(object, type = c("linear", "nonlinear"), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{vgam}} object. } \item{type}{ Character. The default is the first choice. } \item{\ldots}{ Optional arguments fed into \code{\link{coefvlm}}. } } \details{ For VGAMs, because modified backfitting is performed, each fitted function is decomposed into a linear and nonlinear (smooth) part. The argument \code{type} is used to return which one is wanted. } \value{ A vector if \code{type = "linear"}. A list if \code{type = "nonlinear"}, and each component of this list corresponds to an \code{\link{s}} term; the component contains an S4 object with slot names such as \code{"Bcoefficients"}, \code{"knots"}, \code{"xmin"}, \code{"xmax"}. } %\references{ % % %} \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ %} \seealso{ \code{\link{vgam}}, \code{\link{coefvlm}}, \code{\link[stats]{coef}}. % \code{\link{coef-method}}, } \examples{ fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua) coef(fit) # Same as coef(fit, type = "linear") (ii <- coef(fit, type = "nonlinear")) is.list(ii) names(ii) slotNames(ii[[1]]) } \keyword{models} \keyword{regression} VGAM/man/perksUC.Rd0000644000176200001440000000403513565414527013440 0ustar liggesusers\name{Perks} \alias{Perks} \alias{dperks} \alias{pperks} \alias{qperks} \alias{rperks} \title{The Perks Distribution} \description{ Density, cumulative distribution function, quantile function and random generation for the Perks distribution. } \usage{ dperks(x, scale = 1, shape, log = FALSE) pperks(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qperks(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rperks(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{shape, scale}{positive shape and scale parameters. } } \value{ \code{dperks} gives the density, \code{pperks} gives the cumulative distribution function, \code{qperks} gives the quantile function, and \code{rperks} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{perks}} for details. } %\note{ % %} \seealso{ \code{\link{perks}}. } \examples{ probs <- seq(0.01, 0.99, by = 0.01) Shape <- exp(-1.0); Scale <- exp(1); max(abs(pperks(qperks(p = probs, Shape, Scale), Shape, Scale) - probs)) # Should be 0 \dontrun{ x <- seq(-0.1, 07, by = 0.01); plot(x, dperks(x, Shape, Scale), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "", ylim = 0:1) abline(h = 0, col = "blue", lty = 2) lines(x, pperks(x, Shape, Scale), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qperks(probs, Shape, Scale) lines(Q, dperks(Q, Shape, Scale), col = "purple", lty = 3, type = "h") pperks(Q, Shape, Scale) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) } } \keyword{distribution} VGAM/man/rec.normal.Rd0000644000176200001440000000620313565414527014123 0ustar liggesusers\name{rec.normal} \alias{rec.normal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Upper Record Values from a Univariate Normal Distribution } \description{ Maximum likelihood estimation of the two parameters of a univariate normal distribution when the observations are upper record values. } \usage{ rec.normal(lmean = "identitylink", lsd = "loglink", imean = NULL, isd = NULL, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean, lsd}{ Link functions applied to the mean and sd parameters. See \code{\link{Links}} for more choices. } \item{imean, isd}{ Numeric. Optional initial values for the mean and sd. The default value \code{NULL} means they are computed internally, with the help of \code{imethod}. } \item{imethod}{ Integer, either 1 or 2 or 3. Initial method, three algorithms are implemented. Choose the another value if convergence fails, or use \code{imean} and/or \code{isd}. } \item{zero}{ Can be an integer vector, containing the value 1 or 2. If so, the mean or standard deviation respectively are modelled as an intercept only. Usually, setting \code{zero = 2} will be used, if used at all. The default value \code{NULL} means both linear/additive predictors are modelled as functions of the explanatory variables. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The response must be a vector or one-column matrix with strictly increasing values. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Arnold, B. C. and Balakrishnan, N. and Nagaraja, H. N. (1998) \emph{Records}, New York: John Wiley & Sons. } \author{ T. W. Yee } \note{ This family function tries to solve a difficult problem, and the larger the data set the better. Convergence failure can commonly occur, and convergence may be very slow, so set \code{maxit = 200, trace = TRUE}, say. Inputting good initial values are advised. This family function uses the BFGS quasi-Newton update formula for the working weight matrices. Consequently the estimated variance-covariance matrix may be inaccurate or simply wrong! The standard errors must be therefore treated with caution; these are computed in functions such as \code{vcov()} and \code{summary()}. } \seealso{ \code{\link{uninormal}}, \code{\link{double.cens.normal}}. } \examples{ nn <- 10000; mymean <- 100 # First value is reference value or trivial record Rdata <- data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3)))) # Keep only observations that are records: rdata <- data.frame(y = unique(cummax(with(Rdata, rawy)))) fit <- vglm(y ~ 1, rec.normal, data = rdata, trace = TRUE, maxit = 200) coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} %# Keep only observations that are records %delete = c(FALSE, rep(TRUE, len = n)) %for (i in 2:length(rawy)) % if (rawy[i] > max(rawy[1:(i-1)])) delete[i] = FALSE %(y = rawy[!delete]) VGAM/man/calibrate.qrrvglm.Rd0000644000176200001440000003003013565414527015475 0ustar liggesusers\name{calibrate.qrrvglm} \alias{calibrate.qrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calibration for CQO and CAO models } \description{ Performs maximum likelihood calibration for constrained quadratic and additive ordination models (CQO and CAO models are better known as QRR-VGLMs and RR-VGAMs respectively). % for constrained and unconstrained } \usage{ calibrate.qrrvglm(object, newdata = NULL, type = c("latvar", "predictors", "response", "vcov", "everything"), lr.confint = FALSE, cf.confint = FALSE, level = 0.95, initial.vals = NULL, ...) } % se.type = c("dzwald", "asbefore"), %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ The fitted CQO/CAO model. } \item{newdata}{ A data frame with new response data, such as new species data. The default is to use the original data used to fit the model; however, the calibration may take a long time to compute because the computations are expensive. % 20190211: Note that the creation of the model frame associated with \code{newdata} is fragile. Factors may not be created properly. If a variable is binary then its best for it to be straightforward and have only 0 and 1 as values. } \item{type}{ What type of result to be returned. The first are the calibrated latent variables or site scores. This is always computed. The \code{"predictors"} are the linear/quadratic or additive predictors evaluated at the calibrated latent variables or site scores. The \code{"response"} are the fitted values (usually means) evaluated at the calibrated latent variables or site scores. The \code{"vcov"} are the Wald-type estimated variance-covariance matrices of the calibrated latent variables or site scores. The \code{"everything"} is for all of them, i.e., all \code{type}s. Note that for CAO models, the \code{"vcov"} type is unavailable. } % \item{se.type}{ What type of standard errors are to be returned. % The choice \code{"asbefore"} comes from a long time ago. % The choice \code{"dzwald"} is based on work by David Zucker. % } \item{lr.confint, level}{ Compute \emph{approximate} likelihood ratio based confidence intervals? If \code{TRUE} then \code{level} is the confidence level required and one should have \code{type = "latvar"} or \code{type = "everything"}; and currently only rank-1 models are supported. This option works for CLO and CQO models and not for CAO models. The function \code{\link[stats]{uniroot}} is called to solve for the root of a nonlinear equation to obtain each confidence limit, and this is not entirely reliable. It is assumed that the likelihood function is unimodal about its MLE because only one root is returned if there is more than one. One root is found on each side of the MLE. Technically, the default is to find the value of the latent variable whose difference in deviance (or twice the difference in log-likelihoods) from the optimal model is equal to \code{qchisq(level, df = 1)}. The intervals are not true profile likelihood intervals because it is not possible to estimate the regression coefficients of the QRR-VGLM/RR-VGLM based on one response vector. See \code{\link[stats]{confint}} to get the flavour of these two arguments in general. % 20180501 } \item{cf.confint}{ Compute \emph{approximate} characteristic function based confidence intervals? If \code{TRUE} then \code{level} is the confidence level required and one should have \code{type = "latvar"} or \code{type = "everything"}; and currently only rank-1 models are supported. This option works for \code{\link{binomialff}} and \code{\link{poissonff}} CLO and CQO models and not for CAO models. The function \code{\link[stats]{uniroot}} is called to solve for the root of a nonlinear equation to obtain each confidence limit, and this is not entirely reliable. It is assumed that the likelihood function is unimodal because only one root is returned if there is more than one. Technically, the CDF of a normalized score statistic is obtained by Gauss--Hermite numerical integration of a complex-valued integrand, and this is based on the inversion formula described in Abate and Witt (1992). % 20180602 } \item{initial.vals}{ Initial values for the search. For rank-1 models, this should be a vector having length equal to \code{nrow(newdata)}, and for rank-2 models this should be a two-column matrix with the number of rows equalling the number of rows in \code{newdata}. The default is a grid defined by arguments in \code{\link{calibrate.qrrvglm.control}}. } \item{\dots}{ Arguments that are fed into \code{\link{calibrate.qrrvglm.control}}. } } \details{ Given a fitted regression CQO/CAO model, maximum likelihood calibration is theoretically easy and elegant. However, the method assumes that all the responses are independent, which is often not true in practice. More details and references are given in Yee (2018) and ch.6 of Yee (2015). The function \code{\link[stats]{optim}} is used to search for the maximum likelihood solution. Good initial values are needed, and arguments in \code{\link{calibrate.qrrvglm.control}} allows the user some control over the choice of these. } \value{ Several methods are implemented to obtain confidence intervals/regions for the calibration estimates. One method is when \code{lr.confint = TRUE}, then a 4-column matrix is returned with the confidence limits being the final 2 columns (if \code{type = "everything"} then the matrix is returned in the \code{lr.confint} list component). Another similar method is when \code{cf.confint = TRUE}. There may be some redundancy in whatever is returned. Other methods are returned by using \code{type} and they are described as follows. % and \code{se.type}; The argument \code{type} determines what is returned. If \code{type = "everything"} then all the \code{type} values are returned in a list, with the following components. Each component has length \code{nrow(newdata)}. \item{latvar}{Calibrated latent variables or site scores (the default). This may have the attribute \code{"objectiveFunction"} which is usually the log-likelihood or the deviance. } \item{predictors }{linear/quadratic or additive predictors. For example, for Poisson families, this will be on a log scale, and for binomial families, this will be on a logit scale. } \item{response}{Fitted values of the response, evaluated at the calibrated latent variables. % or site scores. } \item{vcov}{Wald-type estimated variance-covariance matrices of the calibrated latent variables or site scores. Actually, these are stored in a 3-D array whose dimension is \code{c(Rank(object), Rank(object), nrow(newdata))}. This type has only been implemented for \code{\link{binomialff}} and \code{\link{poissonff}} models with canonical links and \code{noRRR = ~ 1} and, for CQOs, \code{I.tolerances = TRUE} or \code{eq.tolerances = TRUE}. } } \references{ Abate, J. and Whitt, W. (1992). The Fourier-series method for inverting transforms of probability distributions. \emph{Queueing Systems}, \bold{10}, 5--88. %Yee, T. W. (2018). %On constrained and unconstrained %quadratic ordination. %\emph{Manuscript in preparation}. ter Braak, C. J. F. (1995). Calibration. In: \emph{Data Analysis in Community and Landscape Ecology} by Jongman, R. H. G., ter Braak, C. J. F. and van Tongeren, O. F. R. (Eds.) Cambridge University Press, Cambridge. } \author{T. W. Yee. Recent work on the standard errors by David Zucker and Sam Oman at HUJI is gratefully acknowledged---these are returned in the \code{vcov} component and provided inspiration for \code{lr.confint} and \code{cf.confint}. A joint publication is being prepared on this subject. } \note{ Despite the name of this function, CAO models are handled as well to a certain extent. Some combinations of parameters are not handled, e.g., \code{lr.confint = TRUE} only works for rank-1, \code{type = "vcov"} only works for \code{\link{binomialff}} and \code{\link{poissonff}} models with canonical links and \code{noRRR = ~ 1}, and higher-order rank models need \code{eq.tolerances = TRUE} or \code{I.tolerances = TRUE} as well. For rank-1 objects, \code{lr.confint = TRUE} is recommended above \code{type = "vcov"} in terms of accuracy and overall generality. For class \code{"qrrvglm"} objects it is necessary that all response' tolerance matrices are positive-definite which correspond to bell-shaped response curves/surfaces. For \code{\link{binomialff}} and \code{\link{poissonff}} models the \code{deviance} slot is used for the optimization rather than the \code{loglikelihood} slot, therefore one can calibrate using real-valued responses. (If the \code{loglikelihood} slot were used then functions such as \code{\link[stats]{dpois}} would be used with \code{log = TRUE} and then would be restricted to feed in integer-valued response values.) % 20190208: Maximum likelihood calibration for Gaussian logit regression models may be performed by \pkg{rioja} but this applies to a single environmental variable such as \code{pH} in \code{data("SWAP", package = "rioja")}. In \pkg{VGAM} \code{calibrate()} estimates values of the \emph{latent variable} rather than individual explanatory variables, hence the setting is more on ordination. % Despite the name of this function, UQO and CAO models are handled } \section{Warning }{ This function is computationally expensive. Setting \code{trace = TRUE} to get a running log can be a good idea. This function has been tested but not extensively. } \seealso{ \code{\link{calibrate.qrrvglm.control}}, \code{\link{calibrate.rrvglm}}, \code{\link{calibrate}}, \code{\link{cqo}}, \code{\link{cao}}, \code{\link[stats]{optim}}, \code{\link[stats]{uniroot}}. % \code{\link{uqo}}, } \examples{ \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Stdze environmental variables set.seed(123) siteNos <- c(1, 5) # Calibrate these sites pet1 <- cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, trace = FALSE, data = hspider[-siteNos, ], # Sites not in fitted model family = poissonff, I.toler = TRUE, Crow1positive = TRUE) y0 <- hspider[siteNos, colnames(depvar(pet1))] # Species counts (cpet1 <- calibrate(pet1, trace = TRUE, newdata = data.frame(y0))) (clrpet1 <- calibrate(pet1, lr.confint = TRUE, newdata = data.frame(y0))) (ccfpet1 <- calibrate(pet1, cf.confint = TRUE, newdata = data.frame(y0))) (cp1wald <- calibrate(pet1, newdata = y0, type = "everything")) } \dontrun{ # Graphically compare the actual site scores with their calibrated # values. 95 percent likelihood-based confidence intervals in green. persp(pet1, main = "Site scores: solid=actual, dashed=calibrated", label = TRUE, col = "gray50", las = 1) # Actual site scores: xvars <- rownames(concoef(pet1)) # Variables comprising the latvar est.latvar <- as.matrix(hspider[siteNos, xvars]) \%*\% concoef(pet1) abline(v = est.latvar, col = seq(siteNos)) abline(v = cpet1, lty = 2, col = seq(siteNos)) # Calibrated values arrows(clrpet1[, 3], c(60, 60), clrpet1[, 4], c(60, 60), # Add CIs length = 0.08, col = "orange", angle = 90, code = 3, lwd = 2) arrows(ccfpet1[, 3], c(70, 70), ccfpet1[, 4], c(70, 70), # Add CIs length = 0.08, col = "limegreen", angle = 90, code = 3, lwd = 2) arrows(cp1wald$latvar - 1.96 * sqrt(cp1wald$vcov), c(65, 65), cp1wald$latvar + 1.96 * sqrt(cp1wald$vcov), c(65, 65), # Wald CIs length = 0.08, col = "blue", angle = 90, code = 3, lwd = 2) legend("topright", lwd = 2, leg = c("CF interval", "Wald interval", "LR interval"), col = c("limegreen", "blue", "orange"), lty = 1) } } \keyword{models} \keyword{regression} VGAM/man/simplexUC.Rd0000644000176200001440000000362713565414527014003 0ustar liggesusers\name{Simplex } \alias{dsimplex} %\alias{psimplex} %\alias{qsimplex} \alias{rsimplex} \title{ Simplex Distribution } \description{ Density function, and random generation for the simplex distribution. } \usage{ dsimplex(x, mu = 0.5, dispersion = 1, log = FALSE) rsimplex(n, mu = 0.5, dispersion = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Vector of quantiles. The support of the distribution is the interval \eqn{(0,1)}. } \item{mu, dispersion}{ Mean and dispersion parameters. The former lies in the interval \eqn{(0,1)} and the latter is positive. } \item{n, log}{ Same usage as \code{\link[stats:Uniform]{runif}}. } } \details{ The \pkg{VGAM} family function \code{\link{simplex}} fits this model; see that online help for more information. For \code{rsimplex()} the rejection method is used; it may be very slow if the density is highly peaked, and will fail if the density asymptotes at the boundary. } \value{ \code{dsimplex(x)} gives the density function, \code{rsimplex(n)} gives \eqn{n} random variates. } % \references{ % %} \author{ T. W. Yee } \seealso{ \code{\link{simplex}}. } \examples{ sigma <- c(4, 2, 1) # Dispersion parameter mymu <- c(0.1, 0.5, 0.7); xxx <- seq(0, 1, len = 501) \dontrun{ par(mfrow = c(3, 3)) # Figure 2.1 of Song (2007) for (iii in 1:3) for (jjj in 1:3) { plot(xxx, dsimplex(xxx, mymu[jjj], sigma[iii]), type = "l", col = "blue", xlab = "", ylab = "", main = paste("mu = ", mymu[jjj], ", sigma = ", sigma[iii], sep = "")) } } } \keyword{distribution} % mean(rsimplex(1000, mymu[2], sigma[2])) # Should be mu below % var(rsimplex(1000, mymu[2], sigma[2])) # Should be as below % (mu <- mymu[2]) % lambda <- (1 / sigma[2])^2 % mu * (1 - mu) - sqrt(lambda / 2) * exp(lambda / (mu^2 * (1 - mu)^2)) * % pgamma(lambda / (2 * mu^2 * (1 - mu)^2), 0.5, lower = FALSE) * gamma(0.5) VGAM/man/lindUC.Rd0000644000176200001440000000313213565414527013237 0ustar liggesusers\name{Lindley} \alias{Lindley} \alias{dlind} \alias{plind} %\alias{qlind} \alias{rlind} \title{The Lindley Distribution} \description{ Density, cumulative distribution function, and random generation for the Lindley distribution. % quantile function } % yettodo: 20170103; use csam-23-517.pdf to write plind() and/or qlind(). \usage{ dlind(x, theta, log = FALSE) plind(q, theta, lower.tail = TRUE, log.p = FALSE) rlind(n, theta) } %qlind(p, theta) \arguments{ \item{x, q}{vector of quantiles.} % \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{theta}{positive parameter. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dlind} gives the density, \code{plind} gives the cumulative distribution function, and \code{rlind} generates random deviates. % \code{qlind} gives the quantile function, and } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{lindley}} for details. } %\note{ % %} \seealso{ \code{\link{lindley}}. } \examples{ theta <- exp(-1); x <- seq(0.0, 17, length = 700) dlind(0:10, theta) \dontrun{ plot(x, dlind(x, theta), type = "l", las = 1, col = "blue", main = "dlind(x, theta = exp(-1))") abline(h = 1, col = "grey", lty = "dashed") } } \keyword{distribution} % probs <- seq(0.01, 0.99, by = 0.01) % max(abs(plind(qlind(p = probs, theta), theta) - probs)) # Should be 0 VGAM/man/zibinomUC.Rd0000644000176200001440000000627413565414527013772 0ustar liggesusers\name{Zibinom} \alias{Zibinom} \alias{dzibinom} \alias{pzibinom} \alias{qzibinom} \alias{rzibinom} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Binomial Distribution } \description{ Density, distribution function, quantile function and random generation for the zero-inflated binomial distribution with parameter \code{pstr0}. } \usage{ dzibinom(x, size, prob, pstr0 = 0, log = FALSE) pzibinom(q, size, prob, pstr0 = 0) qzibinom(p, size, prob, pstr0 = 0) rzibinom(n, size, prob, pstr0 = 0) } %- maybe also 'usage' for other objects documented here. %pzibinom(q, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE) %qzibinom(p, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE) \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{size}{number of trials. It is the \eqn{N} symbol in the formula given in \code{\link{zibinomial}}. } \item{prob}{probability of success on each trial. } \item{n}{ Same as in \code{\link[stats]{runif}}. } % \item{log, log.p, lower.tail}{ \item{log}{ Same as \code{\link[stats:Binomial]{pbinom}}.} \item{pstr0}{ Probability of a structural zero (i.e., ignoring the binomial distribution), called \eqn{\phi}{phi}. The default value of \eqn{\phi=0}{phi=0} corresponds to the response having an ordinary binomial distribution. } } \details{ The probability function of \eqn{Y} is 0 with probability \eqn{\phi}{phi}, and \eqn{Binomial(size, prob)}{Binomial(size, prob)} with probability \eqn{1-\phi}{1-phi}. Thus \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{% P(Y=0) = phi + (1-phi) * P(W=0)} where \eqn{W} is distributed \eqn{Binomial(size, prob)}{Binomial(size, prob)}. } \value{ \code{dzibinom} gives the density, \code{pzibinom} gives the distribution function, \code{qzibinom} gives the quantile function, and \code{rzibinom} generates random deviates. } %\references{ } \author{ T. W. Yee } \note{ The argument \code{pstr0} is recycled to the required length, and must have values which lie in the interval \eqn{[0,1]}. These functions actually allow for \emph{zero-deflation}. That is, the resulting probability of a zero count is \emph{less than} the nominal value of the parent distribution. See \code{\link{Zipois}} for more information. } \seealso{ \code{\link{zibinomial}}, \code{\link{Gaitbinom.mlm}}, \code{\link[stats:Binomial]{Binomial}}. } \examples{ prob <- 0.2; size <- 10; pstr0 <- 0.5 (ii <- dzibinom(0:size, size, prob, pstr0 = pstr0)) max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0))) # Should be 0 table(rzibinom(100, size, prob, pstr0 = pstr0)) table(qzibinom(runif(100), size, prob, pstr0 = pstr0)) round(dzibinom(0:10, size, prob, pstr0 = pstr0) * 100) # Should be similar \dontrun{ x <- 0:size barplot(rbind(dzibinom(x, size, prob, pstr0 = pstr0), dbinom(x, size, prob)), beside = TRUE, col = c("blue", "green"), ylab = "Probability", main = paste("ZIB(", size, ", ", prob, ", pstr0 = ", pstr0, ") (blue) vs", " Binomial(", size, ", ", prob, ") (green)", sep=""), names.arg = as.character(x), las = 1, lwd = 2) } } \keyword{distribution} VGAM/man/ordsup.Rd0000644000176200001440000001060513565414527013400 0ustar liggesusers\name{ordsup} \alias{ordsup} \alias{ordsup.vglm} %\alias{score.stat} %\alias{score.stat.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ordinal Superiority Measures } \description{ Ordinal superiority measures for the linear model and cumulative link models: the probability that an observation from one distribution falls above an independent observation from the other distribution, adjusted for explanatory variables in a model. } \usage{ ordsup(object, ...) ordsup.vglm(object, all.vars = FALSE, confint = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{vglm}} fit. Currently it must be one of: \code{\link{cumulative}}, \code{\link{uninormal}}. The links for \code{\link{cumulative}} must be \code{\link{logitlink}} or \code{\link{probitlink}}, and \code{parallel = TRUE} is also needed. For \code{\link{uninormal}} the mean must use \code{\link{identitylink}} and model the \code{sd} as intercept-only. % An object that is ideally an % \code{\link{vglm}} fit. } \item{all.vars}{ Logical. The default is to use explanatory variables which are binary, but all variables are used (except the intercept) if set to \code{TRUE}. } \item{confint}{ Logical. If \code{TRUE} then \code{\link{confintvglm}} is called to return confidence intervals for \eqn{\gamma}{gamma} and \eqn{\Delta}{Delta}. By default, Wald intervals are produced, but they can be replaced by profile intervals by setting \code{method = "profile"}. %Currently must be \code{FALSE}. } \item{\dots}{ Parameters that can be fed into \code{\link{confintvglm}}, e.g., \code{level = 0.95} and \code{method = c("wald", "profile")}. } } \details{ Details are given in Agresti and Kateri (2017) and this help file draws directly from this. This function returns two quantities for comparing two groups on an ordinal categorical response variable, while adjusting for other explanatory variables. They are called ``ordinal superiority'' measures, and the two groups can be compared without supplementary explanatory variables. Let \eqn{Y_1}{Y1} and \eqn{Y_2}{Y2} be independent random variables from groups A and B, say, for a quantitative ordinal categorical scale. Then \eqn{\Delta = P(Y_1 > Y_2) - P(Y_2 > Y_1)}{Delta = P(Y1 > Y2) - P(Y2 > Y1)} summarizes their relative size. A second quantity is \eqn{\gamma = P(Y_1 > Y_2) - 0.5 \times P(Y_2 = Y_1)}{gamma = P(Y1 > Y2) - 0.5 * P(Y2 = Y1)}. Then \eqn{\Delta=2 \times \gamma - 1}{Delta=2 * gamma -1}. whereas \eqn{\gamma=(\Delta + 1)/2}{gamma=(Delta + 1)/2}. The range of \eqn{\gamma}{gamma} is \eqn{[0, 1]}, while the range of \eqn{\Delta}{Delta} is \eqn{[-1, 1]}. The examples below are based on that paper. This function is currently implemented for a very limited number of specific models. } \value{ By default, a list with components \code{gamma} and \code{Delta}, where each is a vector with elements corresponding to binary explanatory variables (i.e., 0 or 1), and if no explanatory variables are binary then a \code{NULL} is returned. If \code{confint = TRUE} then the list contains 4 more components: \code{lower.gamma}, \code{upper.gamma}, \code{Lower.Delta}, \code{Upper.Delta}. } \references{ Agresti, A. and Kateri, M., 2017. Ordinal probability effect measures for group comparisons in multinomial cumulative link models. \emph{Biometrics}, \bold{73}, 214--219. } \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ % This function has not yet been thoroughly tested. %} \seealso{ \code{\link{cumulative}}, \code{\link{propodds}}, \code{\link{uninormal}}. } \examples{ \dontrun{ Mental <- read.table("http://www.stat.ufl.edu/~aa/glm/data/Mental.dat", header = TRUE) # Make take a while to load in Mental$impair <- ordered(Mental$impair) pfit3 <- vglm(impair ~ ses + life, data = Mental, cumulative(link = "probitlink", reverse = FALSE, parallel = TRUE)) coef(pfit3, matrix = TRUE) ordsup(pfit3) # The 'ses' variable is binary # Fit a crude LM fit7 <- vglm(as.numeric(impair) ~ ses + life, uninormal, data = Mental) coef(fit7, matrix = TRUE) # 'sd' is estimated by MLE ordsup(fit7) ordsup(fit7, all.vars = TRUE) # Some output may not be meaningful ordsup(fit7, confint = TRUE, method = "profile") } } \keyword{models} \keyword{regression} VGAM/man/smart.mode.is.Rd0000644000176200001440000000254213565414527014550 0ustar liggesusers\name{smart.mode.is} \alias{smart.mode.is} \title{ Determine What Mode the Smart Prediction is In } \description{ Determine which of three modes the smart prediction is currently in. } \usage{ smart.mode.is(mode.arg = NULL) } \arguments{ \item{mode.arg}{ a character string, either \code{"read"}, \code{"write"} or \code{"neutral"}. }} \value{ If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE} is returned. If \code{mode.arg} is not given, then the mode (\code{"neutral"}, \code{"read"} or \code{"write"}) is returned. Usually, the mode is \code{"neutral"}. } \seealso{ \code{\link{put.smart}}, \code{\link[splines]{bs}}, \code{\link[stats]{poly}}. } \details{ Smart functions such as \code{\link[splines]{bs}} and \code{\link[stats]{poly}} need to know what mode smart prediction is in. If it is in \code{"write"} mode then the parameters are saved to \code{.smart.prediction} using \code{\link{put.smart}}. If in \code{"read"} mode then the parameters are read in using \code{\link{get.smart}}. If in \code{"neutral"} mode then the smart function behaves like an ordinary function. } \examples{ print(sm.min1) smart.mode.is() # Returns "neutral" smart.mode.is(smart.mode.is()) # Returns TRUE } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/calibrate.rrvglm.Rd0000644000176200001440000001323613565414527015325 0ustar liggesusers\name{calibrate.rrvglm} \alias{calibrate.rrvglm} % 20170418 %- Also NEED an '\alias' for EACH other topic documented here. \title{ Calibration for CLO models (RR-VGLMs) } \description{ Performs maximum likelihood calibration for constrained linear ordination models (CLO models are better known as RR-VGLMs). } \usage{ calibrate.rrvglm(object, newdata = NULL, type = c("latvar", "predictors", "response", "vcov", "everything"), lr.confint = FALSE, cf.confint = FALSE, level = 0.95, initial.vals = NULL, ...) } %- maybe also 'usage' for other objects documented here. % se.type = c("asbefore", "wald"), \arguments{ \item{object}{ The fitted \code{\link{rrvglm}} model. Note that \code{object} should be fitted with corner constraints. } \item{newdata}{ See \code{\link{calibrate.qrrvglm}}. % A data frame with new response data % (e.g., new species data). % The default is to use the original data used to fit the model; % however, the calibration may take a long time to compute % because the computations are expensive. } \item{type}{ See \code{\link{calibrate.qrrvglm}}. If \code{type = "vcov"} then \code{object} should have been fitted using \code{\link{binomialff}} or \code{\link{poissonff}} with canonical links, and have \code{noRRR = ~ 1}. % Same as \code{\link{calibrate.qrrvglm}}. % The \code{"all3or4"} is for all of them, i.e., all \code{type}s. % For CLO models, % \code{"vcov"} is unavailable, so all 3 are returned. } % \item{se.type}{ % Same as \code{\link{calibrate.qrrvglm}}. % } \item{lr.confint, cf.confint, level}{ Same as \code{\link{calibrate.qrrvglm}}. } \item{initial.vals}{ Same as \code{\link{calibrate.qrrvglm}}. The default is a grid defined by arguments in \code{\link{calibrate.rrvglm.control}}. } \item{\dots}{ Arguments that are fed into \code{\link{calibrate.rrvglm.control}}. } } \details{ Given a fitted regression CLO model, maximum likelihood calibration is theoretically easy and elegant. However, the method assumes that all responses are independent. More details and references are given in Yee (2015). Calibration requires \emph{grouped} or \emph{non-sparse} data as the response. For example, if the family function is \code{\link{multinomial}} then one cannot usually calibrate \code{y0} if it is a vector of 0s except for one 1. Instead, the response vector should be from grouped data so that there are few 0s. Indeed, it is found empirically that the stereotype model (also known as a reduced-rank \code{\link{multinomial}} logit model) calibrates well only with grouped data, and if the response vector is all 0s except for one 1 then the MLE will probably be at \code{-Inf} or \code{+Inf}. As another example, if the family function is \code{\link{poissonff}} then \code{y0} must not be a vector of all 0s; instead, the response vector should have few 0s ideally. In general, you can use simulation to see what type of data calibrates acceptably. Internally, this function is a simplification of \code{\link{calibrate.qrrvglm}} and users should look at that function for details. Good initial values are needed, and a grid is constructed to obtain these. The function \code{\link{calibrate.rrvglm.control}} allows the user some control over the choice of these. % Also, \code{\link[stats]{optim}} is used to search for % the maximum likelihood solution. } \value{ See \code{\link{calibrate.qrrvglm}}. Of course, the quadratic term in the latent variables vanishes for RR-VGLMs, so the model is simpler. } %\references{ %} \author{T. W. Yee} \note{ See \code{\link{calibrate.qrrvglm}} about, e.g., calibration using real-valued responses. } \section{Warning }{ See \code{\link{calibrate.qrrvglm}}. % This function assumes that the \emph{prior weights} are all unity; % see \code{\link{weightsvglm}}. % This function is computationally expensive for % \code{Rank >= 1}, and since it uses % a \code{for()} loop several times it can be slow. % Setting \code{trace = TRUE} to get a running log is a good idea. } \seealso{ \code{\link{calibrate.qrrvglm}}, \code{\link{calibrate}}, \code{\link{rrvglm}}, \code{\link{weightsvglm}}, \code{\link[stats]{optim}}, \code{\link[stats]{uniroot}}. % \code{\link{cqo}}, % \code{\link{cao}}. % \code{\link{uqo}}, } \examples{ \dontrun{ # Example 1 nona.xs.nz <- na.omit(xs.nz) # Overkill!! (Data in VGAMdata package) nona.xs.nz$dmd <- with(nona.xs.nz, round(drinkmaxday)) nona.xs.nz$feethr <- with(nona.xs.nz, round(feethour)) nona.xs.nz$sleephr <- with(nona.xs.nz, round(sleep)) nona.xs.nz$beats <- with(nona.xs.nz, round(pulse)) p2 <- rrvglm(cbind(dmd, feethr, sleephr, beats) ~ age + smokenow + depressed + embarrassed + fedup + hurt + miserable + # 11 psychological nofriend + moody + nervous + tense + worry + worrier, # variables noRRR = ~ age + smokenow, trace = FALSE, poissonff, data = nona.xs.nz, Rank = 2) cp2 <- calibrate(p2, newdata = head(nona.xs.nz, 9), trace = TRUE) cp2 two.cases <- nona.xs.nz[1:2, ] # Another calibration example two.cases$dmd <- c(4, 10) two.cases$feethr <- c(4, 7) two.cases$sleephr <- c(7, 8) two.cases$beats <- c(62, 71) (cp2b <- calibrate(p2, newdata = two.cases)) # Example 2 p1 <- rrvglm(cbind(dmd, feethr, sleephr, beats) ~ age + smokenow + depressed + embarrassed + fedup + hurt + miserable + # 11 psychological nofriend + moody + nervous + tense + worry + worrier, # variables noRRR = ~ age + smokenow, trace = FALSE, poissonff, data = nona.xs.nz, Rank = 1) (cp1c <- calibrate(p1, newdata = two.cases, lr.confint = TRUE)) } } \keyword{models} \keyword{regression} VGAM/man/skellam.Rd0000644000176200001440000000627513565414527013524 0ustar liggesusers\name{skellam} \alias{skellam} %- Also NEED an '\alias' for EACH other topic documented here. \title{Skellam Distribution Family Function} \description{ Estimates the two parameters of a Skellam distribution by maximum likelihood estimation. } \usage{ skellam(lmu1 = "loglink", lmu2 = "loglink", imu1 = NULL, imu2 = NULL, nsimEIM = 100, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmu1, lmu2}{ Link functions for the \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} parameters. See \code{\link{Links}} for more choices and for general information. } \item{imu1, imu2}{ Optional initial values for the parameters. See \code{\link{CommonVGAMffArguments}} for more information. If convergence failure occurs (this \pkg{VGAM} family function seems to require good initial values) try using these arguments. } \item{nsimEIM, parallel, zero}{ See \code{\link{CommonVGAMffArguments}} for information. In particular, setting \code{parallel=TRUE} will constrain the two means to be equal. } } \details{ The Skellam distribution models the difference between two independent Poisson distributions (with means \eqn{\mu_{j}}{mu_j}, say). It has density function \deqn{f(y;\mu_1,\mu_2) = \left( \frac{ \mu_1 }{\mu_2} \right)^{y/2} \, \exp(-\mu_1-\mu_2 ) \, I_{|y|}( 2 \sqrt{ \mu_1 \mu_2}) }{% f(y;mu1,mu2) = ( \mu1 / mu_2 )^(y/2) * exp(-mu1-mu2 ) * I_(|y|)( 2 * sqrt(mu1*mu2)) } where \eqn{y} is an integer, \eqn{\mu_1 > 0}{mu1 > 0}, \eqn{\mu_2 > 0}{mu2 > 0}. Here, \eqn{I_v} is the modified Bessel function of the first kind with order \eqn{v}. The mean is \eqn{\mu_1 - \mu_2}{mu1 - mu2} (returned as the fitted values), and the variance is \eqn{\mu_1 + \mu_2}{mu1 + mu2}. Simulated Fisher scoring is implemented. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \section{Warning }{ This \pkg{VGAM} family function seems fragile and very sensitive to the initial values. Use very cautiously!! } \references{ Skellam, J. G. (1946) The frequency distribution of the difference between two Poisson variates belonging to different populations. \emph{Journal of the Royal Statistical Society, Series A}, \bold{109}, 296. } %\author{ T. W. Yee } \note{ Numerical problems may occur for data if \eqn{\mu_1}{mu1} and/or \eqn{\mu_2}{mu2} are large. } \seealso{ \code{\link{dskellam}}, \code{\link[stats:Poisson]{dpois}}, \code{\link{poissonff}}. } \examples{ \dontrun{ sdata <- data.frame(x2 = runif(nn <- 1000)) sdata <- transform(sdata, mu1 = exp(1 + x2), mu2 = exp(1 + x2)) sdata <- transform(sdata, y = rskellam(nn, mu1, mu2)) fit1 <- vglm(y ~ x2, skellam, data = sdata, trace = TRUE, crit = "coef") fit2 <- vglm(y ~ x2, skellam(parallel = TRUE), data = sdata, trace = TRUE) coef(fit1, matrix = TRUE) coef(fit2, matrix = TRUE) summary(fit1) # Likelihood ratio test for equal means: pchisq(2 * (logLik(fit1) - logLik(fit2)), df = df.residual(fit2) - df.residual(fit1), lower.tail = FALSE) lrtest(fit1, fit2) # Alternative } } \keyword{models} \keyword{regression} VGAM/man/ducklings.Rd0000644000176200001440000000271013565414527014045 0ustar liggesusers\name{ducklings} \alias{ducklings} \docType{data} \title{ Relative Frequencies of Serum Proteins in White Pekin Ducklings %% ~~ data name/kind ... ~~ } \description{ Relative frequencies of serum proteins in white Pekin ducklings as determined by electrophoresis. } \usage{data(ducklings)} \format{ The format is: chr "ducklings" } \details{ Columns \code{p1}, \code{p2}, \code{p3} stand for pre-albumin, albumin, globulins respectively. These were collected from 3-week old white Pekin ducklings. Let \eqn{Y_1}{Y1} be proportional to the total milligrams of pre-albumin in the blood serum of a duckling. Similarly, let \eqn{Y_2}{Y2} and \eqn{Y_3}{Y3} be directly proportional to the same factor as \eqn{Y_1}{Y1} to the total milligrams respectively of albumin and globulins in its blood serum. The proportion of pre-albumin is given by \eqn{Y_1/(Y_1 + Y_2 + Y_3)}{Y1/(Y1 + Y2 + Y3)}, and similarly for the others. % Each set of 3 measurements is based on from 7 to 12 individual ducklings. %% ~~ If necessary, more details than the __description__ above ~~ } \source{ Mosimann, J. E. (1962) On the compound multinomial distribution, the multivariate \eqn{\beta}{beta}-distribution, and correlations among proportions, {Biometrika}, \bold{49}, 65--82. } \seealso{ \code{\link{dirichlet}}. } %%\references{ %% ~~ possibly secondary sources and usages ~~ %%} \examples{ print(ducklings) } \keyword{datasets} VGAM/man/beggs.Rd0000644000176200001440000000335013565414527013152 0ustar liggesusers\name{beggs} \alias{beggs} \docType{data} \title{Bacon and Eggs Data} \description{ Purchasing of bacon and eggs. } \usage{ data(beggs) } \format{ Data frame of a two way table. \describe{ \item{b0, b1, b2, b3, b4}{ The \code{b} refers to bacon. The number of times bacon was purchased was 0, 1, 2, 3, or 4. } \item{e0, e1, e2, e3, e4}{ The \code{e} refers to eggs. The number of times eggs was purchased was 0, 1, 2, 3, or 4. } } } \details{ The data is from Information Resources, Inc., a consumer panel based in a large US city [see Bell and Lattin (1998) for further details]. Starting in June 1991, the purchases in the bacon and fresh eggs product categories for a sample of 548 households over four consecutive store trips was tracked. Only those grocery shopping trips with a total basket value of at least five dollars was considered. For each household, the total number of bacon purchases in their four eligible shopping trips and the total number of egg purchases (usually a package of eggs) for the same trips, were counted. % Data from Bell and Latin (1998). % Also see Danaher and Hardie (2005). } \source{ Bell, D. R. and Lattin, J. M. (1998) Shopping Behavior and Consumer Preference for Store Price Format: Why `Large Basket' Shoppers Prefer EDLP. \emph{Marketing Science}, \bold{17}, 66--88. } \references{ Danaher, P. J. and Hardie, B. G. S. (2005) Bacon with Your Eggs? Applications of a New Bivariate Beta-Binomial Distribution. \emph{American Statistician}, \bold{59}(4), 282--286. } \seealso{ \code{\link[VGAM]{rrvglm}}, \code{\link[VGAM]{rcim}}, \code{\link[VGAM]{grc}}. } \examples{ beggs colSums(beggs) rowSums(beggs) } \keyword{datasets} % % VGAM/man/paretoIVUC.Rd0000644000176200001440000000756013565414527014053 0ustar liggesusers\name{ParetoIV} \alias{ParetoIV} \alias{dparetoIV} \alias{pparetoIV} \alias{qparetoIV} \alias{rparetoIV} \alias{ParetoIII} \alias{dparetoIII} \alias{pparetoIII} \alias{qparetoIII} \alias{rparetoIII} \alias{ParetoII} \alias{dparetoII} \alias{pparetoII} \alias{qparetoII} \alias{rparetoII} \alias{ParetoI} \alias{dparetoI} \alias{pparetoI} \alias{qparetoI} \alias{rparetoI} \title{The Pareto(IV/III/II) Distributions} \description{ Density, distribution function, quantile function and random generation for the Pareto(IV/III/II) distributions. } \usage{ dparetoIV(x, location = 0, scale = 1, inequality = 1, shape = 1, log = FALSE) pparetoIV(q, location = 0, scale = 1, inequality = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qparetoIV(p, location = 0, scale = 1, inequality = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rparetoIV(n, location = 0, scale = 1, inequality = 1, shape = 1) dparetoIII(x, location = 0, scale = 1, inequality = 1, log = FALSE) pparetoIII(q, location = 0, scale = 1, inequality = 1, lower.tail = TRUE, log.p = FALSE) qparetoIII(p, location = 0, scale = 1, inequality = 1, lower.tail = TRUE, log.p = FALSE) rparetoIII(n, location = 0, scale = 1, inequality = 1) dparetoII(x, location = 0, scale = 1, shape = 1, log = FALSE) pparetoII(q, location = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qparetoII(p, location = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rparetoII(n, location = 0, scale = 1, shape = 1) dparetoI(x, scale = 1, shape = 1, log = FALSE) pparetoI(q, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qparetoI(p, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rparetoI(n, scale = 1, shape = 1) } \arguments{ \item{x, q}{vector of quantiles. } \item{p}{vector of probabilities. } \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. % Must be a single positive integer. } \item{location}{the location parameter. } \item{scale, shape, inequality}{the (positive) scale, inequality and shape parameters. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ Functions beginning with the letter \code{d} give the density, functions beginning with the letter \code{p} give the distribution function, functions beginning with the letter \code{q} give the quantile function, and functions beginning with the letter \code{r} generates random deviates. } \references{ Brazauskas, V. (2003) Information matrix for Pareto(IV), Burr, and related distributions. \emph{Comm. Statist. Theory and Methods} \bold{32}, 315--325. Arnold, B. C. (1983) \emph{Pareto Distributions}. Fairland, Maryland: International Cooperative Publishing House. } \author{ T. W. Yee and Kai Huang } \details{ For the formulas and other details see \code{\link{paretoIV}}. } \note{ The functions \code{[dpqr]paretoI} are the same as \code{[dpqr]pareto} except for a slight change in notation: \eqn{s=k} and \eqn{b=\alpha}{b=alpha}; see \code{\link{Pareto}}. } \seealso{ \code{\link{paretoIV}}, \code{\link{Pareto}}. } \examples{ \dontrun{ x <- seq(-0.2, 4, by = 0.01) loc <- 0; Scale <- 1; ineq <- 1; shape <- 1.0 plot(x, dparetoIV(x, loc, Scale, ineq, shape), type = "l", col = "blue", main = "Blue is density, orange is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", ylim = 0:1, las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) Q <- qparetoIV(seq(0.05, 0.95,by = 0.05), loc, Scale, ineq, shape) lines(Q, dparetoIV(Q, loc, Scale, ineq, shape), col = "purple", lty = 3, type = "h") lines(x, pparetoIV(x, loc, Scale, ineq, shape), col = "orange") abline(h = 0, lty = 2) } } \keyword{distribution} VGAM/man/exponential.Rd0000644000176200001440000000674013565414527014417 0ustar liggesusers\name{exponential} \alias{exponential} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Exponential Distribution } \description{ Maximum likelihood estimation for the exponential distribution. } \usage{ exponential(link = "loglink", location = 0, expected = TRUE, type.fitted = c("mean", "percentiles", "Qlink"), percentiles = 50, ishrinkage = 0.95, parallel = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Parameter link function applied to the positive parameter \eqn{rate}. See \code{\link{Links}} for more choices. } \item{location}{ Numeric of length 1, the known location parameter, \eqn{A}, say. } \item{expected}{ Logical. If \code{TRUE} Fisher scoring is used, otherwise Newton-Raphson. The latter is usually faster. } \item{ishrinkage, parallel, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{type.fitted, percentiles}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The family function assumes the response \eqn{Y} has density \deqn{f(y) = \lambda \exp(-\lambda (y-A))}{% f(y) = rate * exp(-rate * (y-A)) } for \eqn{y > A}, where \eqn{A} is the known location parameter. By default, \eqn{A=0}. Then \eqn{E(Y) = A + 1/ \lambda}{E(Y) = A + 1/rate} and \eqn{Var(Y) = 1/ \lambda^2}{Var(Y) = 1/rate^2}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ Suppose \eqn{A = 0}. For a fixed time interval, the number of events is Poisson with mean \eqn{\lambda}{rate} if the time between events has a geometric distribution with mean \eqn{\lambda^{-1}}{1/rate}. The argument \code{rate} in \code{exponential} is the same as \code{\link[stats:Exponential]{rexp}} etc. The argument \code{lambda} in \code{\link{rpois}} is somewhat the same as \code{rate} here. } \seealso{ \code{\link{amlexponential}}, \code{\link{gpd}}, \code{\link{laplace}}, \code{\link{expgeometric}}, \code{\link{explogff}}, \code{\link{poissonff}}, \code{\link{mix2exp}}, \code{\link{freund61}}, \code{\link{simulate.vlm}}, \code{\link[stats]{Exponential}}. % \code{\link{cens.exponential}}, } \examples{ edata <- data.frame(x2 = runif(nn <- 100) - 0.5) edata <- transform(edata, x3 = runif(nn) - 0.5) edata <- transform(edata, eta = 0.2 - 0.7 * x2 + 1.9 * x3) edata <- transform(edata, rate = exp(eta)) edata <- transform(edata, y = rexp(nn, rate = rate)) with(edata, stem(y)) fit.slow <- vglm(y ~ x2 + x3, exponential, data = edata, trace = TRUE) fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), data = edata, trace = TRUE, crit = "coef") coef(fit.slow, mat = TRUE) summary(fit.slow) # Compare results with a GPD. Has a threshold. threshold <- 0.5 gdata <- data.frame(y1 = threshold + rexp(n = 3000, rate = exp(1.5))) fit.exp <- vglm(y1 ~ 1, exponential(location = threshold), data = gdata) coef(fit.exp, matrix = TRUE) Coef(fit.exp) logLik(fit.exp) fit.gpd <- vglm(y1 ~ 1, gpd(threshold = threshold), data = gdata) coef(fit.gpd, matrix = TRUE) Coef(fit.gpd) logLik(fit.gpd) } \keyword{models} \keyword{regression} VGAM/man/binormcopUC.Rd0000644000176200001440000000403213565414527014301 0ustar liggesusers\name{Binormcop} \alias{Binormcop} \alias{dbinormcop} \alias{pbinormcop} \alias{rbinormcop} \title{Gaussian Copula (Bivariate) Distribution} \description{ Density, distribution function, and random generation for the (one parameter) bivariate Gaussian copula distribution. } \usage{ dbinormcop(x1, x2, rho = 0, log = FALSE) pbinormcop(q1, q2, rho = 0) rbinormcop(n, rho = 0) } \arguments{ \item{x1, x2, q1, q2}{vector of quantiles. The \code{x1} and \code{x2} should be in the interval \eqn{(0,1)}. Ditto for \code{q1} and \code{q2}. } \item{n}{number of observations. Same as \code{\link[stats]{rnorm}}. } \item{rho}{the correlation parameter. Should be in the interval \eqn{(-1,1)}. } \item{log}{ Logical. If \code{TRUE} then the logarithm is returned. % Same as \code{\link[stats]{rnorm}}. } } \value{ \code{dbinormcop} gives the density, \code{pbinormcop} gives the distribution function, and \code{rbinormcop} generates random deviates (a two-column matrix). } %\references{ % %} \author{ T. W. Yee } \details{ See \code{\link{binormalcop}}, the \pkg{VGAM} family functions for estimating the parameter by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } \note{ Yettodo: allow \code{x1} and/or \code{x2} to have values 1, and to allow any values for \code{x1} and/or \code{x2} to be outside the unit square. } \seealso{ \code{\link{binormalcop}}, \code{\link{binormal}}. } \examples{ \dontrun{ edge <- 0.01 # A small positive value N <- 101; x <- seq(edge, 1.0 - edge, len = N); Rho <- 0.7 ox <- expand.grid(x, x) zedd <- dbinormcop(ox[, 1], ox[, 2], rho = Rho, log = TRUE) contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5) zedd <- pbinormcop(ox[, 1], ox[, 2], rho = Rho) contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5) } } \keyword{distribution} %plot(r <- rbinormcop(n = 3000, rho = Rho), col = "blue") %par(mfrow = c(1, 2)) %hist(r[, 1]) # Should be uniform %hist(r[, 2]) # Should be uniform VGAM/man/hzetaUC.Rd0000644000176200001440000000406713565414527013434 0ustar liggesusers\name{Hzeta} \alias{Hzeta} \alias{dhzeta} \alias{phzeta} \alias{qhzeta} \alias{rhzeta} \title{ Haight's Zeta Distribution } \description{ Density, distribution function, quantile function and random generation for Haight's zeta distribution with parameter \code{shape}. } \usage{ dhzeta(x, shape, log = FALSE) phzeta(q, shape, log.p = FALSE) qhzeta(p, shape) rhzeta(n, shape) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n}{ Same meaning as \code{\link[stats]{runif}}. } \item{shape}{ The positive shape parameter. Called \eqn{\alpha}{alpha} below. } \item{log,log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ The probability function is \deqn{f(x) = (2x-1)^{(-\alpha)} - (2x+1)^{(-\alpha)},}{% f(x) = (2x-1)^(-alpha) - (2x+1)^(-alpha),} where \eqn{\alpha>0}{alpha>0} and \eqn{x=1,2,\ldots}{x=1,2,...}. } \value{ \code{dhzeta} gives the density, \code{phzeta} gives the distribution function, \code{qhzeta} gives the quantile function, and \code{rhzeta} generates random deviates. } %\references{ % % Pages 533--4 of % Johnson N. L., Kemp, A. W. and Kotz S. (2005) % \emph{Univariate Discrete Distributions}, % 3rd edition, % Hoboken, New Jersey: Wiley. % % %} \author{ T. W. Yee and Kai Huang } \note{ Given some response data, the \pkg{VGAM} family function \code{\link{hzeta}} estimates the parameter \code{shape}. } \seealso{ \code{\link{hzeta}}, \code{\link{zeta}}, \code{\link{zetaff}}, \code{\link{simulate.vlm}}. } \examples{ dhzeta(1:20, 2.1) rhzeta(20, 2.1) round(1000 * dhzeta(1:8, 2)) table(rhzeta(1000, 2)) \dontrun{ shape <- 1.1; x <- 1:10 plot(x, dhzeta(x, shape = shape), type = "h", ylim = 0:1, lwd = 2, sub = paste("shape =", shape), las = 1, col = "blue", ylab = "Probability", main = "Haight's zeta: blue = density; orange = distribution function") lines(x+0.1, phzeta(x, shape = shape), col = "orange", lty = 3, lwd = 2, type = "h") } } \keyword{distribution} VGAM/man/trinormalUC.Rd0000644000176200001440000000612113565414527014321 0ustar liggesusers\name{Trinorm} \alias{Trinorm} \alias{dtrinorm} %\alias{ptrinorm} \alias{rtrinorm} \title{Trivariate Normal Distribution Density and Random Variates} \description{ Density and random generation for the trivariate normal distribution distribution. } % quantile function \usage{ dtrinorm(x1, x2, x3, mean1 = 0, mean2 = 0, mean3 = 0, var1 = 1, var2 = 1, var3 = 1, cov12 = 0, cov23 = 0, cov13 = 0, log = FALSE) rtrinorm(n, mean1 = 0, mean2 = 0, mean3 = 0, var1 = 1, var2 = 1, var3 = 1, cov12 = 0, cov23 = 0, cov13 = 0) } \arguments{ \item{x1, x2, x3}{vector of quantiles.} \item{mean1, mean2, mean3}{ vectors of means. } \item{var1, var2, var3}{ vectors of variances. } \item{cov12, cov23, cov13}{ vectors of covariances. } % \item{sd1, sd2, rho}{ % vector of standard deviations and correlation parameter. % } \item{n}{number of observations. Same as \code{\link[stats]{rnorm}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } % \item{rho}{ % See \code{\link{trinormal}}. % } } \value{ \code{dtrinorm} gives the density, \code{rtrinorm} generates random deviates (\eqn{n} by 3 matrix). % \code{qnorm2} gives the quantile function, and } % \author{ T. W. Yee } \details{ The default arguments correspond to the standard trivariate normal distribution with correlation parameters equal to 0, which corresponds to three independent standard normal distributions. Let \code{sd1} (say) be \code{sqrt(var1)} and written \eqn{\sigma_1}{sigma_1}, etc. Then the general formula for each correlation coefficient is of the form \eqn{\rho_{12} = cov_{12} / (\sigma_1 \sigma_2)}{rho12 = cov12 / (sigma_1 * sigma_2)}, and similarly for the two others. Thus if the \code{var} arguments are left alone then the \code{cov} can be inputted with \eqn{\rho}{rho}s. } %\references{ %} \section{Warning}{ \code{dtrinorm()}'s arguments might change in the future! It's safest to use the full argument names to future-proof possible changes! } \note{ For \code{rtrinorm()}, if the \eqn{i}th variance-covariance matrix is not positive-definite then the \eqn{i}th row is all \code{NA}s. } \seealso{ \code{\link[stats]{pnorm}}, \code{\link{trinormal}}, \code{\link{uninormal}}, \code{\link{binormal}}, \code{\link{rbinorm}}. } \examples{ \dontrun{nn <- 1000 tdata <- data.frame(x2 = sort(runif(nn))) tdata <- transform(tdata, mean1 = 1 + 2 * x2, mean2 = 3 + 1 * x2, mean3 = 4, var1 = exp( 1), var2 = exp( 1), var3 = exp( 1), rho12 = rhobit( 1, inverse = TRUE), rho23 = rhobit( 1, inverse = TRUE), rho13 = rhobit(-1, inverse = TRUE)) ymat <- with(tdata, rtrinorm(nn, mean1, mean2, mean3, var1, var2, var3, sqrt(var1)*sqrt(var1)*rho12, sqrt(var2)*sqrt(var3)*rho23, sqrt(var1)*sqrt(var3)*rho13)) pairs(ymat, col = "blue") } } \keyword{distribution} VGAM/man/inv.lomax.Rd0000644000176200001440000000576413565414527014011 0ustar liggesusers\name{inv.lomax} \alias{inv.lomax} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Inverse Lomax Distribution Family Function } \description{ Maximum likelihood estimation of the 2-parameter inverse Lomax distribution. } \usage{ inv.lomax(lscale = "loglink", lshape2.p = "loglink", iscale = NULL, ishape2.p = NULL, imethod = 1, gscale = exp(-5:5), gshape2.p = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape2.p") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lscale, lshape2.p}{ Parameter link functions applied to the (positive) parameters \eqn{b}, and \eqn{p}. See \code{\link{Links}} for more choices. } \item{iscale, ishape2.p, imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for information. For \code{imethod = 2} a good initial value for \code{ishape2.p} is needed to obtain a good estimate for the other parameter. } \item{gscale, gshape2.p}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The 2-parameter inverse Lomax distribution is the 4-parameter generalized beta II distribution with shape parameters \eqn{a=q=1}. It is also the 3-parameter Dagum distribution with shape parameter \eqn{a=1}, as well as the beta distribution of the second kind with \eqn{q=1}. More details can be found in Kleiber and Kotz (2003). The inverse Lomax distribution has density \deqn{f(y) = p y^{p-1} / [b^p \{1 + y/b\}^{p+1}]}{% f(y) = p y^(p-1) / [b^p (1 + y/b)^(p+1)]} for \eqn{b > 0}, \eqn{p > 0}, \eqn{y \geq 0}{y >= 0}. Here, \eqn{b} is the scale parameter \code{scale}, and \code{p} is a shape parameter. The mean does not seem to exist; the \emph{median} is returned as the fitted values. This family function handles multiple responses. % 20140826 % The mean does not exist; \code{NA}s are returned as the fitted values. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ See the notes in \code{\link{genbetaII}}. } \seealso{ \code{\link{inv.lomax}}, \code{\link{genbetaII}}, \code{\link{betaII}}, \code{\link{dagum}}, \code{\link{sinmad}}, \code{\link{fisk}}, \code{\link{lomax}}, \code{\link{paralogistic}}, \code{\link{inv.paralogistic}}, \code{\link{simulate.vlm}}. } \examples{ idata <- data.frame(y = rinv.lomax(n = 2000, scale = exp(2), exp(1))) fit <- vglm(y ~ 1, inv.lomax, data = idata, trace = TRUE) fit <- vglm(y ~ 1, inv.lomax(iscale = exp(3)), data = idata, trace = TRUE, epsilon = 1e-8, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) summary(fit) } \keyword{models} \keyword{regression} VGAM/man/gompertzUC.Rd0000644000176200001440000000430213565414527014160 0ustar liggesusers\name{Gompertz} \alias{Gompertz} \alias{dgompertz} \alias{pgompertz} \alias{qgompertz} \alias{rgompertz} \title{The Gompertz Distribution} \description{ Density, cumulative distribution function, quantile function and random generation for the Gompertz distribution. } \usage{ dgompertz(x, scale = 1, shape, log = FALSE) pgompertz(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qgompertz(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rgompertz(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{scale, shape}{positive scale and shape parameters. } } \value{ \code{dgompertz} gives the density, \code{pgompertz} gives the cumulative distribution function, \code{qgompertz} gives the quantile function, and \code{rgompertz} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{gompertz}} for details. } %\note{ % %} \seealso{ \code{\link{gompertz}}, \code{\link{dgumbel}}, \code{\link{dmakeham}}. } \examples{ probs <- seq(0.01, 0.99, by = 0.01) Shape <- exp(1); Scale <- exp(1) max(abs(pgompertz(qgompertz(p = probs, Scale, shape = Shape), Scale, shape = Shape) - probs)) # Should be 0 \dontrun{ x <- seq(-0.1, 1.0, by = 0.001) plot(x, dgompertz(x, Scale,shape = Shape), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "") abline(h = 0, col = "blue", lty = 2) lines(x, pgompertz(x, Scale, shape = Shape), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qgompertz(probs, Scale, shape = Shape) lines(Q, dgompertz(Q, Scale, shape = Shape), col = "purple", lty = 3, type = "h") pgompertz(Q, Scale, shape = Shape) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) } } \keyword{distribution} VGAM/man/ParetoUC.Rd0000644000176200001440000000451313565414527013547 0ustar liggesusers\name{Pareto} \alias{Pareto} \alias{dpareto} \alias{ppareto} \alias{qpareto} \alias{rpareto} \title{The Pareto Distribution} \description{ Density, distribution function, quantile function and random generation for the Pareto(I) distribution with parameters \code{scale} and \code{shape}. } \usage{ dpareto(x, scale = 1, shape, log = FALSE) ppareto(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qpareto(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rpareto(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{scale, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dpareto} gives the density, \code{ppareto} gives the distribution function, \code{qpareto} gives the quantile function, and \code{rpareto} generates random deviates. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{paretoff}}, the \pkg{VGAM} family function for estimating the parameter \eqn{k} by maximum likelihood estimation, for the formula of the probability density function and the range restrictions imposed on the parameters. } %%\note{ %% The Pareto distribution is %%} \seealso{ \code{\link{paretoff}}, \code{\link{ParetoIV}}. } \examples{ alpha <- 3; k <- exp(1); x <- seq(2.8, 8, len = 300) \dontrun{ plot(x, dpareto(x, scale = alpha, shape = k), type = "l", main = "Pareto density split into 10 equal areas") abline(h = 0, col = "blue", lty = 2) qvec <- qpareto(seq(0.1, 0.9, by = 0.1), scale = alpha, shape = k) lines(qvec, dpareto(qvec, scale = alpha, shape = k), col = "purple", lty = 3, type = "h") } pvec <- seq(0.1, 0.9, by = 0.1) qvec <- qpareto(pvec, scale = alpha, shape = k) ppareto(qvec, scale = alpha, shape = k) qpareto(ppareto(qvec, scale = alpha, shape = k), scale = alpha, shape = k) - qvec # Should be 0 } \keyword{distribution} VGAM/man/vplot.profile.Rd0000644000176200001440000000267713565414527014701 0ustar liggesusers% file MASS/man/plot.profile.Rd % copyright (C) 1999-2008 W. N. Venables and B. D. Ripley % \name{vplot.profile} \alias{vplot.profile} \alias{vpairs.profile} \title{Plotting Functions for 'profile' Objects} \description{ \code{\link{plot}} and \code{\link{pairs}} methods for objects of class \code{"profile"}, but renamed as \code{vplot} and \code{vpairs}. % \code{\link{vplot}} and \code{\link{vpairs}} methods for objects of % class \code{"profile"}. } \usage{ vplot.profile(x, ...) vpairs.profile(x, colours = 2:3, ...) } \arguments{ \item{x}{an object inheriting from class \code{"profile"}.} \item{colours}{Colours to be used for the mean curves conditional on \code{x} and \code{y} respectively.} \item{\dots}{arguments passed to or from other methods.} } \details{ See \code{\link[MASS]{profile.glm}} for details. } \author{ T. W. Yee adapted this function from \code{\link[MASS]{profile.glm}}, written originally by D. M. Bates and W. N. Venables. (For S in 1996.) } \seealso{ \code{\link{profilevglm}}, \code{\link{confintvglm}}, \code{\link{lrt.stat}}, \code{\link[MASS]{profile.glm}}, \code{\link[stats]{profile.nls}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) fit1 <- vglm(cbind(normal, mild, severe) ~ let, acat, trace = TRUE, data = pneumo) pfit1 <- profile(fit1, trace = FALSE) \dontrun{ vplot.profile(pfit1) vpairs.profile(pfit1) } } \keyword{models} \keyword{hplot} VGAM/man/freund61.Rd0000644000176200001440000001552113565414527013520 0ustar liggesusers\name{freund61} \alias{freund61} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Freund's (1961) Bivariate Extension of the Exponential Distribution } \description{ Estimate the four parameters of the Freund (1961) bivariate extension of the exponential distribution by maximum likelihood estimation. } \usage{ freund61(la = "loglink", lap = "loglink", lb = "loglink", lbp = "loglink", ia = NULL, iap = NULL, ib = NULL, ibp = NULL, independent = FALSE, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{la, lap, lb, lbp}{ Link functions applied to the (positive) parameters \eqn{\alpha}{alpha}, \eqn{\alpha'}{alpha'}, \eqn{\beta}{beta} and \eqn{\beta'}{beta'}, respectively (the ``\code{p}'' stands for ``prime''). See \code{\link{Links}} for more choices. } \item{ia, iap, ib, ibp}{ Initial value for the four parameters respectively. The default is to estimate them all internally. } \item{independent}{ Logical. If \code{TRUE} then the parameters are constrained to satisfy \eqn{\alpha=\alpha'}{alpha=alpha'} and \eqn{\beta=\beta'}{beta=beta'}, which implies that \eqn{y_1}{y1} and \eqn{y_2}{y2} are independent and each have an ordinary exponential distribution. } \item{zero}{ A vector specifying which linear/additive predictors are modelled as intercepts only. The values can be from the set \{1,2,3,4\}. The default is none of them. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ This model represents one type of bivariate extension of the exponential distribution that is applicable to certain problems, in particular, to two-component systems which can function if one of the components has failed. For example, engine failures in two-engine planes, paired organs such as peoples' eyes, ears and kidneys. Suppose \eqn{y_1}{y1} and \eqn{y_2}{y2} are random variables representing the lifetimes of two components \eqn{A} and \eqn{B} in a two component system. The dependence between \eqn{y_1}{y1} and \eqn{y_2}{y2} is essentially such that the failure of the \eqn{B} component changes the parameter of the exponential life distribution of the \eqn{A} component from \eqn{\alpha}{alpha} to \eqn{\alpha'}{alpha'}, while the failure of the \eqn{A} component changes the parameter of the exponential life distribution of the \eqn{B} component from \eqn{\beta}{beta} to \eqn{\beta'}{beta'}. The joint probability density function is given by \deqn{f(y_1,y_2) = \alpha \beta' \exp(-\beta' y_2 - (\alpha+\beta-\beta')y_1) }{% f(y1,y2) = alpha * beta' * exp(-beta' * y2 - (alpha+beta-beta') * y1) } for \eqn{0 < y_1 < y_2}{0 < y1 < y2}, and \deqn{f(y_1,y_2) = \beta \alpha' \exp(-\alpha' y_1 - (\alpha+\beta-\alpha')y_2) }{% f(y1,y2) = beta * alpha' * exp(-alpha' * y1 - (alpha+beta-alpha') * y2) } for \eqn{0 < y_2 < y_1}{0 < y2 < y1}. Here, all four parameters are positive, as well as the responses \eqn{y_1}{y1} and \eqn{y_2}{y2}. Under this model, the probability that component \eqn{A} is the first to fail is \eqn{\alpha/(\alpha+\beta)}{alpha/(alpha+beta)}. The time to the first failure is distributed as an exponential distribution with rate \eqn{\alpha+\beta}{alpha+beta}. Furthermore, the distribution of the time from first failure to failure of the other component is a mixture of Exponential(\eqn{\alpha'}{alpha'}) and Exponential(\eqn{\beta'}{beta'}) with proportions \eqn{\beta/(\alpha+\beta)}{beta/(alpha+beta)} and \eqn{\alpha/(\alpha+\beta)}{alpha/(alpha+beta)} respectively. The marginal distributions are, in general, not exponential. By default, the linear/additive predictors are \eqn{\eta_1=\log(\alpha)}{eta1=log(alpha)}, \eqn{\eta_2=\log(\alpha')}{eta2=log(alpha')}, \eqn{\eta_3=\log(\beta)}{eta3=log(beta)}, \eqn{\eta_4=\log(\beta')}{eta4=log(beta')}. A special case is when \eqn{\alpha=\alpha'}{alpha=alpha'} and \eqn{\beta=\beta'}{beta'=beta'}, which means that \eqn{y_1}{y1} and \eqn{y_2}{y2} are independent, and both have an ordinary exponential distribution with means \eqn{1 / \alpha}{1/alpha} and \eqn{1 / \beta}{1/beta} respectively. Fisher scoring is used, and the initial values correspond to the MLEs of an intercept model. Consequently, convergence may take only one iteration. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Freund, J. E. (1961) A bivariate extension of the exponential distribution. \emph{Journal of the American Statistical Association}, \bold{56}, 971--977. } \author{ T. W. Yee } \note{ To estimate all four parameters, it is necessary to have some data where \eqn{y_1 1} of them) tied together at the error term level. Each LM's model matrix may potentially have its own set of predictor variables. Zellner's efficient (ZEF) estimator (also known as \emph{Zellner's two-stage Aitken estimator}) can be obtained by setting \code{maxit = 1} (and possibly \code{divisor = "sqrt"} or \code{divisor = "n-max"}). The default value of \code{maxit} (in \code{\link{vglm.control}}) probably means \emph{iterative GLS} (IGLS) estimator is computed because IRLS will probably iterate to convergence. IGLS means, at each iteration, the residuals are used to estimate the error variance-covariance matrix, and then the matrix is used in the GLS. The IGLS estimator is also known as \emph{Zellner's iterative Aitken estimator}, or IZEF. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Zellner, A. (1962) An Efficient Method of Estimating Seemingly Unrelated Regressions and Tests for Aggregation Bias. \emph{J. Amer. Statist. Assoc.}, \bold{57}(298), 348--368. Kmenta, J. and Gilbert, R. F. (1968) Small Sample Properties of Alternative Estimators of Seemingly Unrelated Regressions. \emph{J. Amer. Statist. Assoc.}, \bold{63}(324), 1180--1200. } \author{ T. W. Yee. } \section{Warning }{ The default convergence criterion may be a little loose. Try setting \code{epsilon = 1e-11}, especially with \code{mle.normal = TRUE}. } \note{ The fitted object has slot \code{@extra$ncols.X.lm} which is a \eqn{M} vector with the number of parameters for each LM. Also, \code{@misc$values.divisor} is the \eqn{M}-vector of \code{divisor} values. Constraint matrices are needed in order to specify which response variables that each term on the RHS of the formula is a regressor for. See the \code{constraints} argument of \code{\link{vglm}} for more information. % This \pkg{VGAM} family function is currently experimental. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{uninormal}}, \code{\link{gew}}. } \examples{ # Obtain some of the results of p.1199 of Kmenta and Gilbert (1968) clist <- list("(Intercept)" = diag(2), "capital.g" = rbind(1, 0), "value.g" = rbind(1, 0), "capital.w" = rbind(0, 1), "value.w" = rbind(0, 1)) zef1 <- vglm(cbind(invest.g, invest.w) ~ capital.g + value.g + capital.w + value.w, SURff(divisor = "sqrt"), maxit = 1, data = gew, trace = TRUE, constraints = clist) round(coef(zef1, matrix = TRUE), digits = 4) # ZEF zef1@extra$ncols.X.lm zef1@misc$divisor zef1@misc$values.divisor round(sqrt(diag(vcov(zef1))), digits = 4) # SEs nobs(zef1, type = "lm") df.residual(zef1, type = "lm") mle1 <- vglm(cbind(invest.g, invest.w) ~ capital.g + value.g + capital.w + value.w, SURff(mle.normal = TRUE), epsilon = 1e-11, data = gew, trace = TRUE, constraints = clist) round(coef(mle1, matrix = TRUE), digits = 4) # MLE round(sqrt(diag(vcov(mle1))), digits = 4) # SEs } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} \keyword{regression} % Prior to 20141108: % SURff(mle.normal = TRUE, divisor = "n-max"), VGAM/man/Coef.qrrvglm.Rd0000644000176200001440000001140513565414527014430 0ustar liggesusers\name{Coef.qrrvglm} \alias{Coef.qrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Returns Important Matrices etc. of a QO Object } \description{ This methods function returns important matrices etc. of a QO object. } \usage{ Coef.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ % A CQO or UQO object. A CQO object. The former has class \code{"qrrvglm"}. } \item{varI.latvar}{ Logical indicating whether to scale the site scores (latent variables) to have variance-covariance matrix equal to the rank-\eqn{R} identity matrix. All models have uncorrelated site scores (latent variables), and this option stretches or shrinks the ordination axes if \code{TRUE}. See below for further details. } \item{refResponse}{ Integer or character. Specifies the \emph{reference response} or \emph{reference species}. By default, the reference species is found by searching sequentially starting from the first species until a positive-definite tolerance matrix is found. Then this tolerance matrix is transformed to the identity matrix. Then the sites scores (latent variables) are made uncorrelated. See below for further details. % If \code{eq.tolerances=FALSE}, then transformations occur so that % the reference species has a tolerance matrix equal to the rank-\eqn{R} % identity matrix. } \item{\dots}{ Currently unused. } } \details{ If \code{I.tolerances=TRUE} or \code{eq.tolerances=TRUE} (and its estimated tolerance matrix is positive-definite) then all species' tolerances are unity by transformation or by definition, and the spread of the site scores can be compared to them. Vice versa, if one wishes to compare the tolerances with the sites score variability then setting \code{varI.latvar=TRUE} is more appropriate. For rank-2 QRR-VGLMs, one of the species can be chosen so that the angle of its major axis and minor axis is zero, i.e., parallel to the ordination axes. This means the effect on the latent vars is independent on that species, and that its tolerance matrix is diagonal. The argument \code{refResponse} allows one to choose which is the reference species, which must have a positive-definite tolerance matrix, i.e., is bell-shaped. If \code{refResponse} is not specified, then the code will try to choose some reference species starting from the first species. Although the \code{refResponse} argument could possibly be offered as an option when fitting the model, it is currently available after fitting the model, e.g., in the functions \code{\link{Coef.qrrvglm}} and \code{\link{lvplot.qrrvglm}}. } \value{ The \bold{A}, \bold{B1}, \bold{C}, \bold{T}, \bold{D} matrices/arrays are returned, along with other slots. The returned object has class \code{"Coef.qrrvglm"} (see \code{\link{Coef.qrrvglm-class}}). % For UQO, \bold{C} is undefined. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. } \author{ Thomas W. Yee } \note{ Consider an equal-tolerances Poisson/binomial CQO model with \code{noRRR = ~ 1}. For \eqn{R=1} it has about \eqn{2S+p_2}{2*S+p2} parameters. For \eqn{R=2} it has about \eqn{3S+2 p_2}{3*S+2*p_2} parameters. Here, \eqn{S} is the number of species, and \eqn{p_2=p-1}{p2=p-1} is the number of environmental variables making up the latent variable. For an unequal-tolerances Poisson/binomial CQO model with \code{noRRR = ~ 1}, it has about \eqn{3S -1 +p_2}{3*S-1+p2} parameters for \eqn{R=1}, and about \eqn{6S -3 +2p_2}{6*S -3 +2*p2} parameters for \eqn{R=2}. Since the total number of data points is \eqn{nS}{n*S}, where \eqn{n} is the number of sites, it pays to divide the number of data points by the number of parameters to get some idea about how much information the parameters contain. } % ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{cqo}}, \code{\link{Coef.qrrvglm-class}}, \code{print.Coef.qrrvglm}, \code{\link{lvplot.qrrvglm}}. } \examples{ set.seed(123) x2 <- rnorm(n <- 100) x3 <- rnorm(n) x4 <- rnorm(n) latvar1 <- 0 + x3 - 2*x4 lambda1 <- exp(3 - 0.5 * ( latvar1-0)^2) lambda2 <- exp(2 - 0.5 * ( latvar1-1)^2) lambda3 <- exp(2 - 0.5 * ((latvar1+4)/2)^2) # Unequal tolerances y1 <- rpois(n, lambda1) y2 <- rpois(n, lambda2) y3 <- rpois(n, lambda3) set.seed(111) # vvv p1 <- cqo(cbind(y1, y2, y3) ~ x2 + x3 + x4, poissonff, trace = FALSE) \dontrun{ lvplot(p1, y = TRUE, lcol = 1:3, pch = 1:3, pcol = 1:3) } # vvv Coef(p1) # vvv print(Coef(p1), digits=3) } \keyword{models} \keyword{regression} VGAM/man/laplaceUC.Rd0000644000176200001440000000651713565414527013724 0ustar liggesusers\name{laplaceUC} \alias{dlaplace} \alias{plaplace} \alias{qlaplace} \alias{rlaplace} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The Laplace Distribution } \description{ Density, distribution function, quantile function and random generation for the Laplace distribution with location parameter \code{location} and scale parameter \code{scale}. } \usage{ dlaplace(x, location = 0, scale = 1, log = FALSE) plaplace(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) qlaplace(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE) rlaplace(n, location = 0, scale = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{location}{ the location parameter \eqn{a}, which is the mean. } \item{scale}{ the scale parameter \eqn{b}. Must consist of positive values. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ The Laplace distribution is often known as the double-exponential distribution and, for modelling, has heavier tail than the normal distribution. The Laplace density function is \deqn{f(y) = \frac{1}{2b} \exp \left( - \frac{|y-a|}{b} \right) }{% f(y) = (1/(2b)) exp( -|y-a|/b ) } where \eqn{-\infty0}. The mean is \eqn{a}{a} and the variance is \eqn{2b^2}. See \code{\link{laplace}}, the \pkg{VGAM} family function for estimating the two parameters by maximum likelihood estimation, for formulae and details. Apart from \code{n}, all the above arguments may be vectors and are recyled to the appropriate length if necessary. } \value{ \code{dlaplace} gives the density, \code{plaplace} gives the distribution function, \code{qlaplace} gives the quantile function, and \code{rlaplace} generates random deviates. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee and Kai Huang} %\note{ % The \pkg{VGAM} family function \code{\link{laplace}} % estimates the two parameters by maximum likelihood estimation. %} \seealso{ \code{\link{laplace}}. } \examples{ loc <- 1; b <- 2 y <- rlaplace(n = 100, loc = loc, scale = b) mean(y) # sample mean loc # population mean var(y) # sample variance 2 * b^2 # population variance \dontrun{ loc <- 0; b <- 1.5; x <- seq(-5, 5, by = 0.01) plot(x, dlaplace(x, loc, b), type = "l", col = "blue", ylim = c(0,1), main = "Blue is density, orange is cumulative distribution function", sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "") abline(h = 0, col = "blue", lty = 2) lines(qlaplace(seq(0.05,0.95,by = 0.05), loc, b), dlaplace(qlaplace(seq(0.05, 0.95, by = 0.05), loc, b), loc, b), col = "purple", lty = 3, type = "h") lines(x, plaplace(x, loc, b), type = "l", col = "orange") abline(h = 0, lty = 2) } plaplace(qlaplace(seq(0.05, 0.95, by = 0.05), loc, b), loc, b) } \keyword{distribution} VGAM/man/garma.Rd0000644000176200001440000001504313565414527013154 0ustar liggesusers\name{garma} \alias{garma} %- Also NEED an '\alias' for EACH other topic documented here. \title{GARMA (Generalized Autoregressive Moving-Average) Models} \description{ Fits GARMA models to time series data. } \usage{ garma(link = "identitylink", p.ar.lag = 1, q.ma.lag = 0, coefstart = NULL, step = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the mean response. The default is suitable for continuous responses. The link \code{\link{loglink}} should be chosen if the data are counts. The link \code{\link{reciprocal}} can be chosen if the data are counts and the variance assumed for this is \eqn{\mu^2}{mu^2}. The links \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, and \code{\link{cauchitlink}} are supported and suitable for binary responses. Note that when the log or logit link is chosen: for log and logit, zero values can be replaced by \code{bvalue}. See \code{\link{loglink}} and \code{\link{logitlink}} etc. for specific information about each link function. } \item{p.ar.lag}{ A positive integer, the lag for the autoregressive component. Called \eqn{p} below. } \item{q.ma.lag}{ A non-negative integer, the lag for the moving-average component. Called \eqn{q} below. } \item{coefstart}{ Starting values for the coefficients. Assigning this argument is highly recommended. For technical reasons, the argument \code{coefstart} in \code{\link{vglm}} cannot be used. } \item{step}{ Numeric. Step length, e.g., \code{0.5} means half-stepsizing. } % \item{constant}{ % Used when the log or logit link is chosen. % For log, zero values are replaced by \code{constant}. % For logit, zero values are replaced by \code{constant} and % unit values replaced by \code{1-constant}. % } } \details{ This function draws heavily on Benjamin \emph{et al.} (1998). See also Benjamin \emph{et al.} (2003). GARMA models extend the ARMA time series model to generalized responses in the exponential family, e.g., Poisson counts, binary responses. Currently, this function is rudimentary and can handle only certain continuous, count and binary responses only. The user must choose an appropriate link for the \code{link} argument. The GARMA(\eqn{p, q}) model is defined by firstly having a response belonging to the exponential family \deqn{f(y_t|D_t) = \exp \left\{ \frac{y_t \theta_t - b(\theta_t)}{\phi / A_t} + c(y_t, \phi / A_t) \right\}}{% f(y_t|D_t) = \exp [ (y_t theta_t - b(theta_t)) / (phi / A_t) + c(y_t, \phi / A_t) ] } where \eqn{\theta_t}{theta_t} and \eqn{\phi}{phi} are the canonical and scale parameters respectively, and \eqn{A_t} are known prior weights. The mean \eqn{\mu_t=E(Y_t|D_t)=b'(\theta_t)}{mu_t=E(Y_t|D_t)=b'(theta_t)} is related to the linear predictor \eqn{\eta_t}{eta_t} by the link function \eqn{g}. Here, \eqn{D_t=\{x_t,\ldots,x_1,y_{t-1},\ldots,y_1,\mu_{t-1},\ldots,\mu_1\}}{ D_t={x_t,\ldots,x_1,y_(t-1),\ldots,y_1,mu_(t-1),\ldots,mu_1}} is the previous information set. Secondly, the GARMA(\eqn{p, q}) model is defined by \deqn{g(\mu_t) = \eta_t = x_t^T \beta + \sum_{k=1}^p \phi_k (g(y_{t-k}) - x_{t-k}^T \beta) + \sum_{k=1}^q \theta_k (g(y_{t-k}) - \eta_{t-k}).}{% g(mu_t) = eta_t = x_t^T beta + \sum_{k=1}^p phi_k (g(y_{t-k}) - x_{t-k}^T beta) + \sum_{k=1}^q theta_k (g(y_{t-k}) - eta_{t-k}).} Parameter vectors \eqn{\beta}{beta}, \eqn{\phi}{phi} and \eqn{\theta}{theta} are estimated by maximum likelihood. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. } \references{ Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (1998) Fitting Non-Gaussian Time Series Models. Pages 191--196 in: \emph{Proceedings in Computational Statistics COMPSTAT 1998} by Payne, R. and P. J. Green. Physica-Verlag. Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (2003) Generalized Autoregressive Moving Average Models. \emph{Journal of the American Statistical Association}, \bold{98}: 214--223. Zeger, S. L. and Qaqish, B. (1988) Markov regression models for time series: a quasi-likelihood approach. \emph{Biometrics}, \bold{44}: 1019--1031. } \author{ T. W. Yee } \note{ This function is unpolished and is requires \emph{lots} of improvements. In particular, initialization is \emph{very poor}. Results appear \emph{very} sensitive to quality of initial values. A limited amount of experience has shown that half-stepsizing is often needed for convergence, therefore choosing \code{crit = "coef"} is not recommended. Overdispersion is not handled. For binomial responses it is currently best to input a vector of 1s and 0s rather than the \code{cbind(successes, failures)} because the initialize slot is rudimentary. } \section{Warning}{ This \pkg{VGAM} family function is 'non-standard' in that the model does need some coercing to get it into the VGLM framework. Special code is required to get it running. A consequence is that some methods functions may give wrong results when applied to the fitted object. } %\seealso{ % The site \url{http://www.stat.auckland.ac.nz/~yee} contains % more documentation about this family function. % \code{\link{identity}}, % \code{\link{logitlink}}. %} \examples{ gdata <- data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78, 59, 73, 6, 44, 72, 66, 59, 60, 39, 52, 50, 29, 30, 56, 76, 55, 73, 104, 104, 52, 25, 33, 20, 60, 47, 6, 47, 22, 35, 30, 29, 58, 24, 34, 36, 34, 6, 19, 28, 16, 36, 33, 12, 26, 36, 39, 24, 14, 28, 13, 2, 30, 18, 17, 28, 9, 28, 20, 17, 12, 19, 18, 14, 23, 18, 22, 18, 19, 26, 27, 23, 24, 35, 22, 29, 28, 17, 30, 34, 17, 20, 49, 29, 35, 49, 25, 55, 42, 29, 16)) # See Zeger and Qaqish (1988) gdata <- transform(gdata, spikenum = seq(interspike)) bvalue <- 0.1 # .Machine$double.xmin # Boundary value fit <- vglm(interspike ~ 1, trace = TRUE, data = gdata, garma(loglink(bvalue = bvalue), p = 2, coefstart = c(4, 0.3, 0.4))) summary(fit) coef(fit, matrix = TRUE) Coef(fit) # A bug here \dontrun{ with(gdata, plot(interspike, ylim = c(0, 120), las = 1, xlab = "Spike Number", ylab = "Inter-Spike Time (ms)", col = "blue")) with(gdata, lines(spikenum[-(1:fit@misc$plag)], fitted(fit), col = "orange")) abline(h = mean(with(gdata, interspike)), lty = "dashed", col = "gray") } } \keyword{models} \keyword{regression} VGAM/man/undocumented-methods.Rd0000644000176200001440000003441713565414527016226 0ustar liggesusers\name{undocumented-methods} \docType{methods} %\alias{ccoef,ANY-method} %\alias{ccoef-method} % % % 201908 \alias{rootogram4,ANY-method} \alias{rootogram4,vglm-method} % 201906 \alias{step4,ANY-method} \alias{step4,vglm-method} \alias{add1,vglm-method} \alias{drop1,vglm-method} \alias{extractAIC,vglm-method} \alias{dfterms,ANY-method} \alias{dfterms,vglm-method} % 201802 \alias{ordsup,ANY-method} \alias{ordsup,vglm-method} \alias{anova,vglm-method} % 201801 \alias{lrt.stat,ANY-method} \alias{lrt.stat,vlm-method} \alias{wald.stat,ANY-method} \alias{wald.stat,vlm-method} \alias{score.stat,ANY-method} \alias{score.stat,vlm-method} % 20170915 \alias{TIC,ANY-method} \alias{TIC,vlm-method} % 201707 % \alias{lrp,vglm-method} % 201704 \alias{hdeff,vglm-method} % 201607, 201608: \alias{psint,pvgam-method} \alias{summary,pvgam-method} \alias{show,summary.pvgam-method} \alias{df.residual,pvgam-method} \alias{endf,ANY-method} \alias{endf,pvgam-method} \alias{endf,summary.pvgam-method} \alias{vcov,pvgam-method} \alias{show,pvgam,ANY-method} \alias{show,pvgam-method} \alias{model.matrix,pvgam-method} % 201604: \alias{plot,pvgam,ANY-method} % 201602: \alias{predictvglmS4VGAM,ANY,binom2.or-method} % 201601: \alias{showvglmS4VGAM,ANY,acat-method} \alias{showvgamS4VGAM,ANY,acat-method} \alias{showvglmS4VGAM,ANY,multinomial-method} \alias{showvgamS4VGAM,ANY,multinomial-method} % %\alias{coef,vgam-method} %\alias{coefficients,vgam-method} % 201512: \alias{summaryvglmS4VGAM,ANY,binom2.or-method} \alias{showsummaryvglmS4VGAM,ANY,binom2.or-method} % \alias{summaryvglmS4VGAM,ANY,posbernoulli.tb-method} \alias{showsummaryvglmS4VGAM,ANY,posbernoulli.tb-method} % \alias{showsummaryvglmS4VGAM,ANY,posbernoulli.b-method} \alias{showsummaryvglmS4VGAM,ANY,posbernoulli.t-method} % \alias{summaryvglmS4VGAM,ANY,VGAMcategorical-method} \alias{summaryvglmS4VGAM,ANY,cumulative-method} \alias{summaryvglmS4VGAM,ANY,multinomial-method} % \alias{showsummaryvglmS4VGAM,ANY,VGAMcategorical-method} \alias{showsummaryvglmS4VGAM,ANY,cumulative-method} \alias{showsummaryvglmS4VGAM,ANY,multinomial-method} % \alias{margeffS4VGAM,ANY,ANY,VGAMcategorical-method} \alias{margeffS4VGAM,ANY,ANY,VGAMordinal-method} \alias{margeffS4VGAM,ANY,ANY,acat-method} \alias{margeffS4VGAM,ANY,ANY,cratio-method} \alias{margeffS4VGAM,ANY,ANY,sratio-method} \alias{margeffS4VGAM,ANY,ANY,cumulative-method} \alias{margeffS4VGAM,ANY,ANY,multinomial-method} \alias{margeffS4VGAM,ANY,ANY,tobit-method} % %\alias{margeffS4VGAM,ANY,VGAMcategorical-method} %\alias{margeffS4VGAM,ANY,VGAMordinal-method} %\alias{margeffS4VGAM,ANY,acat-method} %\alias{margeffS4VGAM,ANY,cratio-method} %\alias{margeffS4VGAM,ANY,sratio-method} %\alias{margeffS4VGAM,ANY,cumulative-method} %\alias{margeffS4VGAM,ANY,multinomial-method} % % 201509: \alias{term.names,ANY-method} \alias{term.names,vlm-method} \alias{responseName,ANY-method} \alias{responseName,vlm-method} \alias{has.intercept,ANY-method} \alias{has.intercept,vlm-method} % 201508, for R 3.2.2: \alias{confint,ANY-method} \alias{confint,vglm-method} \alias{confint,vgam-method} \alias{confint,rrvglm-method} % % 201503, for R 3.1.3: \alias{is.buggy,ANY-method} \alias{is.buggy,vlm-method} \alias{familyname,ANY-method} \alias{familyname,vlm-method} \alias{familyname,vglmff-method} % % 201412 \alias{nparam,ANY-method} \alias{nparam,vlm-method} \alias{nparam,qrrvglm-method} \alias{nparam,rrvgam-method} \alias{nparam,vgam-method} \alias{nparam,vglm-method} \alias{nparam,rrvglm-method} \alias{linkfun,ANY-method} \alias{linkfun,vglm-method} % % % 201407 \alias{concoef,ANY-method} \alias{concoef,rrvgam-method} \alias{concoef,Coef.rrvgam-method} % % % 201406 \alias{QR.R,ANY-method} \alias{QR.R,vglm-method} \alias{QR.Q,ANY-method} \alias{QR.Q,vglm-method} % % % 201312 \alias{simulate,ANY-method} \alias{simulate,vlm-method} % % 20131104 \alias{family.name,ANY-method} \alias{family.name,vlm-method} \alias{family.name,vglmff-method} % 20130903 \alias{BIC,ANY-method} \alias{BIC,vlm-method} \alias{BIC,vglm-method} \alias{BIC,vgam-method} \alias{BIC,rrvglm-method} \alias{BIC,qrrvglm-method} \alias{BIC,rrvgam-method} % % 20121105 \alias{Rank,qrrvglm-method} \alias{Rank,rrvglm-method} \alias{Rank,rrvgam-method} % 20120821 \alias{model.matrix,vsmooth.spline-method} % % 20120511 \alias{is.parallel,matrix-method} \alias{is.parallel,vglm-method} \alias{is.parallel,ANY-method} \alias{is.zero,matrix-method} \alias{is.zero,vglm-method} \alias{is.zero,ANY-method} % % % 20120215 %\alias{print,vglmff-method} \alias{show,vglmff-method} % % % % 20120112 \alias{AIC,ANY-method} \alias{AICc,ANY-method} \alias{coef,ANY-method} \alias{logLik,ANY-method} \alias{plot,ANY-method} \alias{vcov,ANY-method} \alias{plot,rrvgam,ANY-method} \alias{plot,qrrvglm,ANY-method} \alias{plot,rcim,ANY-method} \alias{plot,rcim0,ANY-method} %\alias{plot,uqo,ANY-method} \alias{plot,vgam,ANY-method} \alias{plot,vglm,ANY-method} \alias{plot,vlm,ANY-method} \alias{plot,vsmooth.spline,ANY-method} % % % % % \alias{AIC,vlm-method} \alias{AIC,vglm-method} \alias{AIC,vgam-method} \alias{AIC,rrvglm-method} \alias{AIC,qrrvglm-method} \alias{AIC,rrvgam-method} \alias{AICc,vlm-method} % \alias{AICc,vglm-method} % 20190410 %\alias{AICc,vgam-method} %\alias{AICc,rrvglm-method} %\alias{AICc,qrrvglm-method} \alias{attrassign,lm-method} \alias{calibrate,ANY-method} %\alias{calibrate,rrvglm-method} %\alias{calibrate,qrrvglm-method} % \alias{calibrate,rrvgam-method} %\alias{calibrate,uqo-method} \alias{cdf,vglm-method} \alias{cdf,vgam-method} \alias{coefficients,rrvgam-method} \alias{coefficients,vlm-method} \alias{coefficients,vglm-method} \alias{coefficients,qrrvglm-method} %\alias{coefficients,uqo-method} \alias{coefficients,vsmooth.spline-method} \alias{coefficients,vsmooth.spline.fit-method} \alias{coefficients,summary.vglm-method} \alias{coefficients,summary.rrvglm-method} \alias{Coefficients,vlm-method} \alias{coef,rrvgam-method} \alias{coef,vlm-method} \alias{coef,vglm-method} \alias{coef,qrrvglm-method} %\alias{coef,uqo-method} \alias{coef,vsmooth.spline-method} \alias{coef,vsmooth.spline.fit-method} \alias{coef,summary.vglm-method} \alias{coef,summary.rrvglm-method} \alias{Coef,rrvgam-method} \alias{Coef,vlm-method} \alias{Coef,qrrvglm-method} \alias{Coef,rrvglm-method} %\alias{Coef,uqo-method} \alias{constraints,vlm-method} \alias{deplot,vglm-method} \alias{deplot,vgam-method} % \alias{depvar,ANY-method} \alias{depvar,rrvgam-method} \alias{depvar,qrrvglm-method} \alias{depvar,rcim-method} \alias{depvar,rrvglm-method} \alias{depvar,vlm-method} \alias{depvar,vsmooth.spline-method} % \alias{deviance,rrvgam-method} \alias{deviance,qrrvglm-method} \alias{deviance,vlm-method} %\alias{deviance,vglm-method} %\alias{deviance,uqo-method} \alias{df.residual,vlm-method} \alias{effects,vlm-method} \alias{fitted.values,qrrvglm-method} \alias{fitted.values,vlm-method} \alias{fitted.values,vglm-method} %\alias{fitted.values,uqo-method} \alias{fitted.values,vsmooth.spline-method} \alias{fitted,qrrvglm-method} \alias{fitted,vlm-method} \alias{fitted,vglm-method} %\alias{fitted,uqo-method} \alias{fitted,vsmooth.spline-method} % % %\alias{case.names,ANY-method} \alias{case.names,vlm-method} \alias{case.names,vgam-method} \alias{case.names,vglm-method} \alias{case.names,rrvglm-method} \alias{case.names,qrrvglm-method} \alias{case.names,grc-method} % %\alias{variable.names,ANY-method} \alias{variable.names,vlm-method} \alias{variable.names,vgam-method} \alias{variable.names,vglm-method} \alias{variable.names,rrvglm-method} \alias{variable.names,qrrvglm-method} \alias{variable.names,grc-method} % % %\alias{formula,ANY-method} \alias{formula,vlm-method} \alias{formula,vgam-method} \alias{formula,vglm-method} \alias{formula,rrvglm-method} \alias{formula,qrrvglm-method} \alias{formula,grc-method} %\alias{formula,uqo-method} % \alias{formula,vsmooth.spline-method} % % % \alias{hatvalues,ANY-method} \alias{hatvalues,vlm-method} \alias{hatvalues,vglm-method} \alias{hatvalues,rrvgam-method} \alias{hatvalues,qrrvglm-method} \alias{hatvalues,rcim-method} \alias{hatvalues,rrvglm-method} % % \alias{hatplot,ANY-method} \alias{hatplot,matrix-method} \alias{hatplot,vlm-method} \alias{hatplot,vglm-method} \alias{hatplot,rrvgam-method} \alias{hatplot,qrrvglm-method} \alias{hatplot,rcim-method} \alias{hatplot,rrvglm-method} % % \alias{dfbeta,ANY-method} \alias{dfbeta,matrix-method} \alias{dfbeta,vlm-method} \alias{dfbeta,vglm-method} \alias{dfbeta,rrvgam-method} \alias{dfbeta,qrrvglm-method} \alias{dfbeta,rcim-method} \alias{dfbeta,rrvglm-method} % % % \alias{guplot,numeric-method} \alias{guplot,vlm-method} %\alias{model.frame,ANY-method} \alias{model.frame,vlm-method} %\alias{plot,rcim0,ANY-method} %\alias{plot,rcim,ANY-method} %\alias{plot,rrvgam,ANY-method} %\alias{plot,vlm,ANY-method} %\alias{plot,vglm,ANY-method} %\alias{plot,vgam,ANY-method} %\alias{plot,qrrvglm,ANY-method} %\alias{plot,uqo,ANY-method} %\alias{plot,vsmooth.spline,ANY-method} \alias{predictors,vglm-method} \alias{rlplot,vglm-method} \alias{terms,vlm-method} %\alias{is.bell,uqo-method} \alias{is.bell,qrrvglm-method} \alias{is.bell,rrvglm-method} \alias{is.bell,vlm-method} \alias{is.bell,rrvgam-method} \alias{is.bell,Coef.qrrvglm-method} \alias{logLik,vlm-method} \alias{logLik,summary.vglm-method} \alias{logLik,vglm-method} \alias{logLik,vgam-method} \alias{logLik,qrrvglm-method} \alias{logLik,rrvgam-method} % \alias{lvplot,rrvgam-method} \alias{lvplot,qrrvglm-method} \alias{lvplot,rrvglm-method} %\alias{lvplot,uqo-method} % \alias{lv,rrvglm-method} \alias{lv,qrrvglm-method} \alias{lv,rrvgam-method} \alias{lv,Coef.rrvglm-method} \alias{lv,Coef.qrrvglm-method} \alias{lv,Coef.rrvgam-method} % \alias{lv,uqo-method} defunct %\alias{latvar,uqo-method} \alias{latvar,rrvgam-method} \alias{latvar,Coef.qrrvglm-method} \alias{latvar,Coef.rrvglm-method} \alias{latvar,rrvglm-method} \alias{latvar,qrrvglm-method} % \alias{Max,qrrvglm-method} \alias{Max,Coef.qrrvglm-method} %\alias{Max,uqo-method} \alias{Max,rrvgam-method} \alias{meplot,numeric-method} \alias{meplot,vlm-method} %\alias{model.matrix,ANY-method} \alias{model.matrix,qrrvglm-method} \alias{model.matrix,vlm-method} \alias{model.matrix,vgam-method} \alias{nobs,ANY-method} \alias{nobs,vlm-method} \alias{npred,ANY-method} \alias{npred,vlm-method} \alias{npred,rrvgam-method} \alias{npred,qrrvglm-method} \alias{npred,rcim-method} \alias{npred,rrvglm-method} \alias{nvar,ANY-method} \alias{nvar,vlm-method} \alias{nvar,vgam-method} \alias{nvar,rrvglm-method} \alias{nvar,qrrvglm-method} \alias{nvar,rrvgam-method} \alias{nvar,vlm-method} \alias{nvar,rcim-method} \alias{Opt,qrrvglm-method} \alias{Opt,Coef.qrrvglm-method} %\alias{Opt,uqo-method} \alias{Opt,rrvgam-method} \alias{persp,rrvgam-method} \alias{persp,qrrvglm-method} %\alias{persp,uqo-method} \alias{predict,rrvgam-method} \alias{predict,qrrvglm-method} \alias{predict,vgam-method} \alias{predict,vglm-method} \alias{predict,rrvglm-method} \alias{predict,vlm-method} %\alias{predict,uqo-method} \alias{predict,vsmooth.spline-method} \alias{predict,vsmooth.spline.fit-method} % % % Added 20090505: %\alias{print,ANY-method} % % % Added 20111224: \alias{lrtest,ANY-method} \alias{lrtest,vglm-method} %\alias{waldtest,ANY-method} \alias{print,VGAManova-method} \alias{show,VGAManova-method} % % % \alias{print,Coef.rrvgam-method} \alias{print,summary.rrvgam-method} \alias{print,qrrvglm-method} \alias{print,Coef.qrrvglm-method} \alias{print,rrvglm-method} % 20090505 \alias{print,summary.qrrvglm-method} \alias{print,Coef.rrvglm-method} \alias{print,vlm-method} \alias{print,vglm-method} \alias{print,vgam-method} \alias{print,summary.rrvglm-method} \alias{print,summary.vgam-method} \alias{print,summary.vglm-method} \alias{print,summary.vlm-method} %\alias{print,uqo-method} %\alias{print,Coef.uqo-method} %\alias{print,summary.uqo-method} \alias{print,vsmooth.spline-method} \alias{print,rrvgam-method} \alias{qtplot,vglm-method} \alias{qtplot,vgam-method} \alias{residuals,qrrvglm-method} \alias{residuals,vlm-method} \alias{residuals,vglm-method} \alias{residuals,vgam-method} %\alias{residuals,uqo-method} \alias{residuals,vsmooth.spline-method} \alias{resid,qrrvglm-method} \alias{resid,vlm-method} \alias{resid,vglm-method} \alias{resid,vgam-method} %\alias{resid,uqo-method} \alias{resid,vsmooth.spline-method} \alias{show,Coef.rrvgam-method} \alias{show,summary.rrvgam-method} \alias{show,qrrvglm-method} \alias{show,Coef.qrrvglm-method} \alias{show,rrvglm-method} % 20090505 \alias{show,summary.qrrvglm-method} \alias{show,Coef.rrvglm-method} \alias{show,vlm-method} \alias{show,vglm-method} \alias{show,vgam-method} \alias{show,summary.rrvglm-method} \alias{show,summary.vgam-method} \alias{show,summary.vglm-method} \alias{show,summary.vlm-method} %\alias{show,uqo-method} %\alias{show,Coef.uqo-method} %\alias{show,summary.uqo-method} \alias{show,vsmooth.spline-method} \alias{show,rrvgam-method} \alias{summary,grc-method} \alias{summary,rrvgam-method} \alias{summary,qrrvglm-method} \alias{summary,rcim-method} \alias{summary,rcim0-method} \alias{summary,rrvglm-method} \alias{summary,vgam-method} \alias{summary,vglm-method} \alias{summary,vlm-method} %\alias{summary,uqo-method} \alias{Tol,rrvgam-method} \alias{Tol,qrrvglm-method} \alias{Tol,Coef.qrrvglm-method} %\alias{Tol,uqo-method} %\alias{Tol,Coef.uqo-method} \alias{trplot,qrrvglm-method} %\alias{trplot,uqo-method} \alias{trplot,rrvgam-method} \alias{vcov,rrvglm-method} \alias{vcov,qrrvglm-method} \alias{vcov,vlm-method} \alias{vcov,vglm-method} \alias{vplot,factor-method} \alias{vplot,list-method} \alias{vplot,matrix-method} \alias{vplot,numeric-method} \alias{weights,vlm-method} \alias{weights,vglm-method} % % % This does not work (need one line for each one): %\alias{trplot,qrrvglm,uqo-method} % % % \title{ Undocumented Methods Functions } \description{ Lots of undocumented methods functions are aliased here. In the \pkg{VGAM} package there are currently many objects/methods/classes which are currently internal and/or undocumented. The help file suppresses the warnings when the package is 'CHECK'ed. } %\usage{ % \S4method{ccoef}{rrvgam,Coef.rrvgam,rrvglm,qrrvglm, % Coef.rrvglm,Coef.qrrvglm}(object, ...) %} \section{Methods}{ There are many methods and these will be documented over time. \describe{ \item{object}{ This argument is often used, and it is the primary object from which the function operates on. } } } \keyword{methods} \keyword{classes} %\keyword{ ~~ other possible keyword(s)} \keyword{models} \keyword{regression} \keyword{internal} VGAM/man/vglmff-class.Rd0000644000176200001440000002264013565414527014452 0ustar liggesusers\name{vglmff-class} \docType{class} \alias{vglmff-class} \title{Class ``vglmff'' } \description{ Family functions for the \pkg{VGAM} package } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("vglmff", ...)}. } \section{Slots}{ In the following, \eqn{M} is the number of linear/additive predictors. \describe{ \item{\code{blurb}:}{ Object of class \code{"character"} giving a small description of the model. Important arguments such as parameter link functions can be expressed here. } \item{\code{charfun}:}{ Object of class \code{"function"} which returns the characteristic function or variance function (usually for some GLMs only). The former uses a dummy variable x. Both use the linear/additive predictors. The function must have arguments \code{function(x, eta, extra = NULL, varfun = FALSE)}. The \code{eta} and \code{extra} arguments are used to obtain the parameter values. If \code{varfun = TRUE} then the function returns the variance function, else the characteristic function (default). Note that one should check that the \code{infos} slot has a list component called \code{charfun} which is \code{TRUE} before attempting to use this slot. This is an easier way to test that this slot is operable. } \item{\code{constraints}:}{ Object of class \code{"expression"} which sets up any constraint matrices defined by arguments in the family function. A \code{zero} argument is always fed into \code{cm.zero.vgam}, whereas other constraints are fed into \code{cm.vgam}. } \item{\code{deviance}:}{ Object of class \code{"function"} returning the deviance of the model. This slot is optional. If present, the function must have arguments \code{function(mu, y, w, residuals = FALSE, eta, extra = NULL)}. Deviance residuals are returned if \code{residuals = TRUE}. } \item{\code{fini}:}{ Object of class \code{"expression"} to insert code at a special position in \code{vglm.fit} or \code{vgam.fit}. This code is evaluated immediately after the fitting. } \item{\code{first}:}{ Object of class \code{"expression"} to insert code at a special position in \code{\link{vglm}} or \code{\link{vgam}}. } \item{\code{infos}:}{ Object of class \code{"function"} which returns a list with components such as \code{M1}. At present only a very few \pkg{VGAM} family functions have this feature implemented. Those that do do not require specifying the \code{M1} argument when used with \code{\link{rcim}}. } \item{\code{initialize}:}{ Object of class \code{"expression"} used to perform error checking (especially for the variable \code{y}) and obtain starting values for the model. In general, \code{etastart} or \code{mustart} are assigned values based on the variables \code{y}, \code{x} and \code{w}. } \item{\code{linkinv}:}{ Object of class \code{"function"} which returns the fitted values, given the linear/additive predictors. The function must have arguments \code{function(eta, extra = NULL)}. } \item{\code{last}:}{ Object of class \code{"expression"} to insert code at a special position (at the very end) of \code{vglm.fit()} or \code{vgam.fit()}. This code is evaluated after the fitting. The list \code{misc} is often assigned components in this slot, which becomes the \code{misc} slot on the fitted object. } \item{\code{linkfun}:}{ Object of class \code{"function"} which, given the fitted values, returns the linear/additive predictors. If present, the function must have arguments \code{function(mu, extra = NULL)}. Most \pkg{VGAM} family functions do not have a \code{linkfun} function. They largely are for classical exponential families, i.e., GLMs. } \item{\code{loglikelihood}:}{ Object of class \code{"function"} returning the log-likelihood of the model. This slot is optional. If present, the function must have arguments \code{function(mu, y, w, residuals = FALSE, eta, extra = NULL)}. The argument \code{residuals} can be ignored because log-likelihood residuals aren't defined. } \item{\code{middle}:}{ Object of class \code{"expression"} to insert code at a special position in \code{vglm.fit} or \code{vgam.fit}. } \item{\code{middle2}:}{ Object of class \code{"expression"} to insert code at a special position in \code{vglm.fit} or \code{vgam.fit}. } \item{\code{simslot}:}{ Object of class \code{"function"} to allow \code{\link[stats]{simulate}} to work. } \item{\code{hadof}:}{ Object of class \code{"function"}; experimental. } \item{\code{summary.dispersion}:}{ Object of class \code{"logical"} indicating whether the general VGLM formula (based on a residual sum of squares) can be used for computing the scaling/dispersion parameter. It is \code{TRUE} for most models except for nonlinear regression models. } \item{\code{vfamily}:}{ Object of class \code{"character"} giving class information about the family function. Although not developed at this stage, more flexible classes are planned in the future. For example, family functions \code{sratio}, \code{cratio}, \code{cumulative}, and \code{acat} all operate on categorical data, therefore will have a special class called \code{"VGAMcat"}, say. Then if \code{fit} was a \code{vglm} object, then \code{coef(fit)} would print out the \code{vglm} coefficients plus \code{"VGAMcat"} information as well. } \item{\code{deriv}:}{ Object of class \code{"expression"} which returns a \eqn{M}-column matrix of first derivatives of the log-likelihood function with respect to the linear/additive predictors, i.e., the score vector. In Yee and Wild (1996) this is the \eqn{\bold{d}_i}{\bold{d}i} vector. Thus each row of the matrix returned by this slot is such a vector. } \item{\code{weight}:}{ Object of class \code{"expression"} which returns the second derivatives of the log-likelihood function with respect to the linear/additive predictors. This can be either the observed or expected information matrix, i.e., Newton-Raphson or Fisher-scoring respectively. In Yee and Wild (1996) this is the \eqn{\bold{W}_i}{\bold{W}i} matrix. Thus each row of the matrix returned by this slot is such a matrix. Like the \code{weights} slot of \code{vglm}/\code{vgam}, it is stored in \emph{matrix-band} form, whereby the first \eqn{M} columns of the matrix are the diagonals, followed by the upper-diagonal band, followed by the band above that, etc. In this case, there can be up to \eqn{M(M+1)} columns, with the last column corresponding to the (1,\eqn{M}) elements of the weight matrices. } \item{\code{validfitted, validparams}:}{ Functions that test that the fitted values and all parameters are within range. These functions can issue a warning if violations are detected. } } } \section{Methods}{ \describe{ \item{print}{\code{signature(x = "vglmff")}: short summary of the family function. } } } \references{ Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %\url{http://www.stat.auckland.ac.nz/~yee} contains further %information on how to write \pkg{VGAM} family functions. %The file is amongst other \pkg{VGAM} PDF documentation. } \author{ Thomas W. Yee } \note{ With link functions etc., one must use \code{substitute} to embed the options into the code. There are two different forms: \code{eval(substitute(expression({...}), list(...)))} for expressions, and \code{eval(substitute( function(...) { ... }, list(...) )) } for functions. % 20130322; this is obsolete, and can delete it: % A unified method of handling arguments is to use % \code{match.arg}. This allows, for example, % \code{vglm(..., family = cratio(link = logit))} % and % \code{vglm(..., family = cratio(link = "logi"))} % to be equivalent (Nb. there is a \code{logit} function). The \code{extra} argument in \code{linkinv}, \code{linkfun}, \code{deviance}, \code{loglikelihood}, etc. matches with the argument \code{extra} in \code{\link{vglm}}, \code{\link{vgam}} and \code{\link{rrvglm}}. This allows input to be fed into all slots of a \pkg{VGAM} family function. The expression \code{derivative} is evaluated immediately prior to \code{weight}, so there is provision for re-use of variables etc. Programmers must be careful to choose variable names that do not interfere with \code{vglm.fit}, \code{vgam.fit()} etc. Programmers of \pkg{VGAM} family functions are encouraged to keep to previous conventions regarding the naming of arguments, e.g., \code{link} is the argument for parameter link functions, \code{zero} for allowing some of the linear/additive predictors to be an intercept term only, etc. In general, Fisher-scoring is recommended over Newton-Raphson where tractable. Although usually slightly slower in convergence, the weight matrices from using the expected information are positive-definite over a larger parameter space. } \section{Warning }{ \pkg{VGAM} family functions are not compatible with \code{\link[stats]{glm}}, nor \code{gam} (from either \pkg{gam} or \pkg{mgcv} packages). } \seealso{ \code{\link{vglm}}, \code{\link{vgam}}, \code{\link{rrvglm}}, \code{\link{rcim}}. } \examples{ cratio() cratio(link = "clogloglink") cratio(link = "clogloglink", reverse = TRUE) } \keyword{classes} VGAM/man/rayleighUC.Rd0000644000176200001440000000423313565414527014120 0ustar liggesusers\name{Rayleigh} \alias{Rayleigh} \alias{drayleigh} \alias{prayleigh} \alias{qrayleigh} \alias{rrayleigh} \title{The Rayleigh Distribution} \description{ Density, distribution function, quantile function and random generation for the Rayleigh distribution with parameter \code{a}. } \usage{ drayleigh(x, scale = 1, log = FALSE) prayleigh(q, scale = 1, lower.tail = TRUE, log.p = FALSE) qrayleigh(p, scale = 1, lower.tail = TRUE, log.p = FALSE) rrayleigh(n, scale = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Fed into \code{\link[stats]{runif}}. } \item{scale}{the scale parameter \eqn{b}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{drayleigh} gives the density, \code{prayleigh} gives the distribution function, \code{qrayleigh} gives the quantile function, and \code{rrayleigh} generates random deviates. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{rayleigh}}, the \pkg{VGAM} family function for estimating the scale parameter \eqn{b} by maximum likelihood estimation, for the formula of the probability density function and range restrictions on the parameter \eqn{b}. } \note{ The Rayleigh distribution is related to the Maxwell distribution. } \seealso{ \code{\link{rayleigh}}, \code{\link{maxwell}}. } \examples{ \dontrun{ Scale <- 2; x <- seq(-1, 8, by = 0.1) plot(x, drayleigh(x, scale = Scale), type = "l", ylim = c(0,1), las = 1, ylab = "", main = "Rayleigh density divided into 10 equal areas; orange = cdf") abline(h = 0, col = "blue", lty = 2) qq <- qrayleigh(seq(0.1, 0.9, by = 0.1), scale = Scale) lines(qq, drayleigh(qq, scale = Scale), col = "purple", lty = 3, type = "h") lines(x, prayleigh(x, scale = Scale), col = "orange") } } \keyword{distribution} VGAM/man/model.matrixqrrvglm.Rd0000644000176200001440000000527113565414527016105 0ustar liggesusers\name{model.matrixqrrvglm} \alias{model.matrixqrrvglm} \title{Construct the Model Matrix of a QRR-VGLM Object} \usage{ model.matrixqrrvglm(object, type = c("latvar", "lm", "vlm"), \dots) } \arguments{ \item{object}{an object of a class \code{"qrrvglm"}, i.e., a \code{\link{cqo}} object. } \item{type}{Type of model (or design) matrix returned. The first is the default. The value \code{"latvar"} is model matrix mainly comprising of the latent variable values (sometimes called the \emph{site scores}). The value \code{"lm"} is the LM matrix directly corresponding to the \code{formula} argument. The value \code{"vlm"} is the big VLM model matrix \emph{given C}. } \item{\dots}{further arguments passed to or from other methods. } } \description{ Creates a model matrix. Two types can be returned: a large one (class \code{"vlm"} or one that inherits from this such as \code{"vglm"}) or a small one (such as returned if it were of class \code{"lm"}). } \details{ This function creates one of several design matrices from \code{object}. For example, this can be a small LM object or a big VLM object. When \code{type = "vlm"} this function calls \code{fnumat2R()} to construct the big model matrix \emph{given C}. That is, the constrained coefficients are assumed known, so that something like a large Poisson or logistic regression is set up. This is because all responses are fitted simultaneously here. The columns are labelled in the following order and with the following prefixes: \code{"A"} for the \eqn{A} matrix (linear in the latent variables), \code{"D"} for the \eqn{D} matrix (quadratic in the latent variables), \code{"x1."} for the \eqn{B1}{B_1} matrix (usually contains the intercept; see the argument \code{noRRR} in \code{\link{qrrvglm.control}}). } \value{ The design matrix \emph{after scaling} for a regression model with the specified formula and data. By \emph{after scaling}, it is meant that it matches the output of \code{coef(qrrvglmObject)} rather than the original scaling of the fitted object. % This is Coef.qrrvglm() and not coefqrrvglm(). % coefqrrvglm() returns labelled or named coefficients. } %\references{ %} \seealso{ \code{\link{model.matrixvlm}}, \code{\link{cqo}}, \code{\link{vcovqrrvglm}}. } \examples{ \dontrun{ set.seed(1); n <- 40; p <- 3; S <- 4; myrank <- 1 mydata <- rcqo(n, p, S, Rank = myrank, es.opt = TRUE, eq.max = TRUE) (myform <- attr(mydata, "formula")) mycqo <- cqo(myform, poissonff, data = mydata, I.tol = TRUE, Rank = myrank, Bestof = 5) model.matrix(mycqo, type = "latvar") model.matrix(mycqo, type = "lm") model.matrix(mycqo, type = "vlm") } } \keyword{models} VGAM/man/is.smart.Rd0000644000176200001440000000304213565414527013621 0ustar liggesusers\name{is.smart} \alias{is.smart} \title{ Test For a Smart Object } \description{ Tests an object to see if it is smart. } \usage{ is.smart(object) } \arguments{ \item{object}{ a function or a fitted model. } } \value{ Returns \code{TRUE} or \code{FALSE}, according to whether the \code{object} is smart or not. } \details{ If \code{object} is a function then this function looks to see whether \code{object} has the logical attribute \code{"smart"}. If so then this is returned, else \code{FALSE}. If \code{object} is a fitted model then this function looks to see whether \code{object@smart.prediction} or \code{object\$smart.prediction} exists. If it does and it is not equal to \code{list(smart.arg=FALSE)} then a \code{TRUE} is returned, else \code{FALSE}. The reason for this is because, e.g., \code{lm(...,smart=FALSE)} and \code{vglm(...,smart=FALSE)}, will return such a specific list. Writers of smart functions manually have to assign this attribute to their smart function after it has been written. } \examples{ is.smart(sm.min1) # TRUE is.smart(sm.poly) # TRUE library(splines) is.smart(sm.bs) # TRUE is.smart(sm.ns) # TRUE is.smart(tan) # FALSE \dontrun{ udata <- data.frame(x2 = rnorm(9)) fit1 <- vglm(rnorm(9) ~ x2, uninormal, data = udata) is.smart(fit1) # TRUE fit2 <- vglm(rnorm(9) ~ x2, uninormal, data = udata, smart = FALSE) is.smart(fit2) # FALSE fit2@smart.prediction } } %\keyword{smart} \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10.6.1. VGAM/man/oalog.Rd0000644000176200001440000000703213565414527013165 0ustar liggesusers\name{oalog} \alias{oalog} %\alias{oalogff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-Altered Logarithmic Distribution } \description{ Fits a one-altered logarithmic distribution based on a conditional model involving a Bernoulli distribution and a 1-truncated logarithmic distribution. } \usage{ oalog(lpobs1 = "logitlink", lshape = "logitlink", type.fitted = c("mean", "shape", "pobs1", "onempobs1"), ipobs1 = NULL, gshape = ppoints(8), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpobs1}{ Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi}, called \code{pobs1} or \code{phi} here. See \code{\link{Links}} for more choices. } \item{lshape}{ See \code{\link{logff}} for details. } \item{gshape, type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } % \item{epobs1, eshape}{ % List. Extra argument for the respective links. % See \code{earg} in \code{\link{Links}} for general information. % epobs1 = list(), eshape = list(), % } \item{ipobs1, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The response \eqn{Y} is one with probability \eqn{p_1}{pobs1}, or \eqn{Y} has a 1-truncated logarithmic distribution with probability \eqn{1-p_1}{1-pobs1}. Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1}, which is modelled as a function of the covariates. The one-altered logarithmic distribution differs from the one-inflated logarithmic distribution in that the former has ones coming from one source, whereas the latter has ones coming from the logarithmic distribution too. The one-inflated logarithmic distribution is implemented in the \pkg{VGAM} package. Some people call the one-altered logarithmic a \emph{hurdle} model. The input can be a matrix (multiple responses). By default, the two linear/additive predictors of \code{oalog} are \eqn{(logit(\phi), logit(s))^T}{(logit(phi), logit(shape))^T}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = \phi + (1-\phi) A}{% mu = phi + (1- phi) A} where \eqn{A} is the mean of the one-truncated logarithmic distribution. If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned. } %\references{ % % %} %\section{Warning }{ %} \author{ T. W. Yee } \note{ This family function effectively combines \code{\link{binomialff}} and \code{\link{otlog}} into one family function. } \seealso{ \code{\link{Oalog}}, \code{\link{logff}}, \code{\link{oilog}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. } % \code{\link{poslogarithmic}}, \examples{ odata <- data.frame(x2 = runif(nn <- 1000)) odata <- transform(odata, pobs1 = logitlink(-1 + 2*x2, inverse = TRUE), shape = logitlink(-2 + 3*x2, inverse = TRUE)) odata <- transform(odata, y1 = roalog(nn, shape = shape, pobs1 = pobs1), y2 = roalog(nn, shape = shape, pobs1 = pobs1)) with(odata, table(y1)) ofit <- vglm(cbind(y1, y2) ~ x2, oalog, data = odata, trace = TRUE) coef(ofit, matrix = TRUE) head(fitted(ofit)) head(predict(ofit)) summary(ofit) } \keyword{models} \keyword{regression} VGAM/man/logF.UC.Rd0000644000176200001440000000225013565414527013256 0ustar liggesusers\name{dlogF} \alias{dlogF} % \alias{qnefghs} \title{ log F Distribution } \description{ Density for the log F distribution. % quantile function } \usage{ dlogF(x, shape1, shape2, log = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Vector of quantiles. } \item{shape1, shape2}{Positive shape parameters. } % \item{p}{vector of probabilities.} % \item{n}{number of observations. A single positive integer.} \item{log}{ if \code{TRUE} then the log density is returned, else the density. } } \details{ The details are given in \code{\link{logF}}. } \value{ \code{dlogF} gives the density. % \code{pnefghs} gives the distribution function, and % \code{qnefghs} gives the quantile function, and % \code{rnefghs} generates random deviates. } %\references{ % % % %} \author{ T. W. Yee } %\note{ % %} \seealso{ \code{\link{hypersecant}}. % \code{\link{simulate.vlm}}. } \examples{ \dontrun{ shape1 <- 1.5; shape2 <- 0.5; x <- seq(-5, 8, length = 1001) plot(x, dlogF(x, shape1, shape2), type = "l", las = 1, col = "blue", ylab = "pdf", main = "log F density function") } } \keyword{distribution} VGAM/man/plotqrrvglm.Rd0000644000176200001440000000445213565414527014460 0ustar liggesusers\name{plotqrrvglm} \alias{plotqrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Model Diagnostic Plots for QRR-VGLMs } \description{ The residuals of a QRR-VGLM are plotted for model diagnostic purposes. } \usage{ plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"), ask = FALSE, main = paste(Rtype, "residuals vs latent variable(s)"), xlab = "Latent Variable", I.tolerances = object@control$eq.tolerances, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object of class \code{"qrrvglm"}. } \item{rtype}{ Character string giving residual type. By default, the first one is chosen. } \item{ask}{ Logical. If \code{TRUE}, the user is asked to hit the return key for the next plot. } \item{main}{ Character string giving the title of the plot. } \item{xlab}{ Character string giving the x-axis caption. } \item{I.tolerances}{ Logical. This argument is fed into \code{Coef(object, I.tolerances = I.tolerances)}. } \item{\dots}{ Other plotting arguments (see \code{\link[graphics]{par}}). } } \details{ Plotting the residuals can be potentially very useful for checking that the model fit is adequate. } \value{ The original object. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. } \author{Thomas W. Yee} \note{ An ordination plot of a QRR-VGLM can be obtained by \code{\link{lvplot.qrrvglm}}. } \seealso{ \code{\link{lvplot.qrrvglm}}, \code{\link{cqo}}. } \examples{\dontrun{ # QRR-VGLM on the hunting spiders data # This is computationally expensive set.seed(111) # This leads to the global solution hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardize environ vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE) par(mfrow = c(3, 4)) plot(p1, rtype = "response", col = "blue", pch = 4, las = 1, main = "") } } \keyword{dplot} \keyword{models} \keyword{regression} VGAM/man/loglink.Rd0000644000176200001440000000564613565414527013534 0ustar liggesusers\name{loglink} %\name{loge} \alias{loglink} %\alias{loge} \alias{negloglink} %\alias{negloge} \alias{logneglink} %\alias{logneg} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log Link Function, and Variants } \description{ Computes the log transformation, including its inverse and the first two derivatives. } \usage{ loglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) negloglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) logneglink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The log link function is very commonly used for parameters that are positive. Here, all logarithms are natural logarithms, i.e., to base \eqn{e}. Numerical values of \code{theta} close to 0 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The function \code{loglink} computes \eqn{\log(\theta)}{log(theta)} whereas \code{negloglink} computes \eqn{-\log(\theta)=\log(1/\theta)}{-log(theta)=log(1/theta)}. The function \code{logneglink} computes \eqn{\log(-\theta)}{log(-theta)}, hence is suitable for parameters that are negative, e.g., a trap-shy effect in \code{\link{posbernoulli.b}}. } \value{ The following concerns \code{loglink}. For \code{deriv = 0}, the log of \code{theta}, i.e., \code{log(theta)} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(theta)}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ This function was called \code{loge} to avoid conflict with the \code{\link[base:Log]{log}} function. Numerical instability may occur when \code{theta} is close to 0 unless \code{bvalue} is used. } \seealso{ \code{\link{Links}}, \code{\link{explink}}, \code{\link{logitlink}}, \code{\link{logclink}}, \code{\link{logloglink}}, \code{\link[base:Log]{log}}, \code{\link{logofflink}}, \code{\link{lambertW}}, \code{\link{posbernoulli.b}}. } \examples{ \dontrun{ loglink(seq(-0.2, 0.5, by = 0.1)) loglink(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin) negloglink(seq(-0.2, 0.5, by = 0.1)) negloglink(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin) } logneglink(seq(-0.5, -0.2, by = 0.1)) } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/wine.Rd0000644000176200001440000000420713565414527013027 0ustar liggesusers\name{wine} \alias{wine} \docType{data} \title{ Bitterness in Wine Data %% ~~ data name/kind ... ~~ } \description{ This oenological data frame concerns the amount of bitterness in 78 bottles of white wine. } \usage{ data(wine) } \format{ A data frame with 4 rows on the following 7 variables. \describe{ \item{temp}{temperature, with levels cold and warm. } \item{contact}{whether contact of the juice with the skin was allowed or avoided, for a specified period. Two levels: no or yes. } \item{bitter1, bitter2, bitter3, bitter4, bitter5}{ numeric vectors, the counts. The order is none to most intense. } } } \details{ The data set comes from Randall (1989) and concerns a factorial experiment for investigating factors that affect the bitterness of white wines. There are two factors in the experiment: temperature at the time of crushing the grapes and contact of the juice with the skin. Two bottles of wine were fermented for each of the treatment combinations. A panel of 9 judges were selected and trained for the ability to detect bitterness. Thus there were 72 bottles in total. Originally, the bitterness of the wine were taken on a continuous scale in the interval from 0 (none) to 100 (intense) but later they were grouped using equal lengths into five ordered categories 1, 2, 3, 4 and 5. %% ~~ If necessary, more details than the __description__ above ~~ } \source{ % Further information is at: % September 30, 2013 Christensen, R. H. B. (2013) Analysis of ordinal data with cumulative link models---estimation with the R-package \pkg{ordinal}. R Package Version 2013.9-30. \url{https://CRAN.R-project.org/package=ordinal}. %\url{https://www.R-project.org/package=ordinal}. %\url{https://www.CRAN.R-project.org/package=ordinal}. % Prior to 20150728 Randall, J. H. (1989) The analysis of sensory data by generalized linear model. \emph{Biometrical Journal} \bold{31}(7), 781--793. Kosmidis, I. (2014) Improved estimation in cumulative link models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{76}, in press. } \examples{ wine summary(wine) } \keyword{datasets} VGAM/man/rcqo.Rd0000644000176200001440000003357113565414527013037 0ustar liggesusers\name{rcqo} \alias{rcqo} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Constrained Quadratic Ordination } \description{ Random generation for constrained quadratic ordination (CQO). } \usage{ rcqo(n, p, S, Rank = 1, family = c("poisson", "negbinomial", "binomial-poisson", "Binomial-negbinomial", "ordinal-poisson", "Ordinal-negbinomial", "gamma2"), eq.maximums = FALSE, eq.tolerances = TRUE, es.optimums = FALSE, lo.abundance = if (eq.maximums) hi.abundance else 10, hi.abundance = 100, sd.latvar = head(1.5/2^(0:3), Rank), sd.optimums = ifelse(es.optimums, 1.5/Rank, 1) * ifelse(scale.latvar, sd.latvar, 1), sd.tolerances = 0.25, Kvector = 1, Shape = 1, sqrt.arg = FALSE, log.arg = FALSE, rhox = 0.5, breaks = 4, seed = NULL, optimums1.arg = NULL, Crow1positive = TRUE, xmat = NULL, scale.latvar = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{n}{ Number of sites. It is denoted by \eqn{n} below. } \item{p}{ Number of environmental variables, including an intercept term. It is denoted by \eqn{p} below. Must be no less than \eqn{1+R} in value. } \item{S}{ Number of species. It is denoted by \eqn{S} below. } \item{Rank}{ The rank or the number of latent variables or true dimension of the data on the reduced space. This must be either 1, 2, 3 or 4. It is denoted by \eqn{R}. } \item{family}{ What type of species data is to be returned. The first choice is the default. If binomial then a 0 means absence and 1 means presence. If ordinal then the \code{breaks} argument is passed into the \code{breaks} argument of \code{\link[base]{cut}}. Note that either the Poisson or negative binomial distributions are used to generate binomial and ordinal data, and that an upper-case choice is used for the negative binomial distribution (this makes it easier for the user). If \code{"gamma2"} then this is the 2-parameter gamma distribution. % , % and the resulting values are % 1,2,\ldots,\code{breaks} if \code{breaks} is a single integer zz % else zz. } \item{eq.maximums}{ Logical. Does each species have the same maximum? See arguments \code{lo.abundance} and \code{hi.abundance}. } \item{eq.tolerances}{ Logical. Does each species have the same tolerance? If \code{TRUE} then the common value is 1 along every latent variable, i.e., all species' tolerance matrices are the order-\eqn{R} identity matrix. } \item{es.optimums}{ Logical. Do the species have equally spaced optimums? If \code{TRUE} then the quantity \eqn{S^{1/R}}{S^(1/R)} must be an integer with value 2 or more. That is, there has to be an appropriate number of species in total. This is so that a grid of optimum values is possible in \eqn{R}-dimensional latent variable space in order to place the species' optimums. Also see the argument \code{sd.tolerances}. } \item{lo.abundance, hi.abundance}{ Numeric. These are recycled to a vector of length \eqn{S}. The species have a maximum between \code{lo.abundance} and \code{hi.abundance}. That is, at their optimal environment, the mean abundance of each species is between the two componentwise values. If \code{eq.maximums} is \code{TRUE} then \code{lo.abundance} and \code{hi.abundance} must have the same values. If \code{eq.maximums} is \code{FALSE} then the logarithm of the maximums are uniformly distributed between \code{log(lo.abundance)} and \code{log(hi.abundance)}. } \item{sd.latvar}{ Numeric, of length \eqn{R} (recycled if necessary). Site scores along each latent variable have these standard deviation values. This must be a decreasing sequence of values because the first ordination axis contains the greatest spread of the species' site scores, followed by the second axis, followed by the third axis, etc. } \item{sd.optimums}{ Numeric, of length \eqn{R} (recycled if necessary). If \code{es.optimums = FALSE} then, for the \eqn{r}th latent variable axis, the optimums of the species are generated from a normal distribution centered about 0. If \code{es.optimums = TRUE} then the \eqn{S} optimums are equally spaced about 0 along every latent variable axis. Regardless of the value of \code{es.optimums}, the optimums are then scaled to give standard deviation \code{sd.optimums[r]}. } \item{sd.tolerances}{ Logical. If \code{eq.tolerances = FALSE} then, for the \eqn{r}th latent variable, the species' tolerances are chosen from a normal distribution with mean 1 and standard deviation \code{sd.tolerances[r]}. However, the first species \code{y1} has its tolerance matrix set equal to the order-\eqn{R} identity matrix. All tolerance matrices for all species are diagonal in this function. This argument is ignored if \code{eq.tolerances} is \code{TRUE}, otherwise it is recycled to length \eqn{R} if necessary. } \item{Kvector}{ A vector of positive \eqn{k} values (recycled to length \eqn{S} if necessary) for the negative binomial distribution (see \code{\link{negbinomial}} for details). Note that a natural default value does not exist, however the default value here is probably a realistic one, and that for large values of \eqn{\mu} one has \eqn{Var(Y) = \mu^2 / k}{Var(Y) = mu^2 / k} approximately. } \item{Shape}{ A vector of positive \eqn{\lambda}{lambda} values (recycled to length \eqn{S} if necessary) for the 2-parameter gamma distribution (see \code{\link{gamma2}} for details). Note that a natural default value does not exist, however the default value here is probably a realistic one, and that \eqn{Var(Y) = \mu^2 / \lambda}{Var(Y) = mu^2 / lambda}. } \item{sqrt.arg}{ Logical. Take the square-root of the negative binomial counts? Assigning \code{sqrt.arg = TRUE} when \code{family="negbinomial"} means that the resulting species data can be considered very crudely to be approximately Poisson distributed. They will not integers in general but much easier (less numerical problems) to estimate using something like \code{cqo(..., family="poissonff")}. } \item{log.arg}{ Logical. Take the logarithm of the gamma random variates? Assigning \code{log.arg = TRUE} when \code{family="gamma2"} means that the resulting species data can be considered very crudely to be approximately Gaussian distributed about its (quadratic) mean. % The result is that it is much easier (less numerical % problems) to estimate using something like % \code{cqo(..., family="gaussianff")}. } \item{rhox}{ Numeric, less than 1 in absolute value. The correlation between the environmental variables. The correlation matrix is a matrix of 1's along the diagonal and \code{rhox} in the off-diagonals. Note that each environmental variable is normally distributed with mean 0. The standard deviation of each environmental variable is chosen so that the site scores have the determined standard deviation, as given by argument \code{sd.latvar}. } \item{breaks}{ If \code{family} is assigned an ordinal value then this argument is used to define the cutpoints. It is fed into the \code{breaks} argument of \code{\link[base]{cut}}. } \item{seed}{ If given, it is passed into \code{\link[base:Random]{set.seed}}. This argument can be used to obtain reproducible results. If set, the value is saved as the \code{"seed"} attribute of the returned value. The default will not change the random generator state, and return \code{\link[base:Random]{.Random.seed}} as \code{"seed"} attribute. } \item{optimums1.arg}{ If assigned and \code{Rank = 1} then these are the explicity optimums. Recycled to length \code{S}. } \item{Crow1positive}{ See \code{\link{qrrvglm.control}} for details. } \item{xmat}{ The \eqn{n} by \eqn{p-1} environmental matrix can be inputted. } \item{scale.latvar}{ Logical. If \code{FALSE} the argument \code{sd.latvar} is ignored and no scaling of the latent variable values is performed. } } \details{ This function generates data coming from a constrained quadratic ordination (CQO) model. In particular, data coming from a \emph{species packing model} can be generated with this function. The species packing model states that species have equal tolerances, equal maximums, and optimums which are uniformly distributed over the latent variable space. This can be achieved by assigning the arguments \code{es.optimums = TRUE}, \code{eq.maximums = TRUE}, \code{eq.tolerances = TRUE}. At present, the Poisson and negative binomial abundances are generated first using \code{lo.abundance} and \code{hi.abundance}, and if \code{family} is binomial or ordinal then it is converted into these forms. In CQO theory the \eqn{n} by \eqn{p} matrix \eqn{X} is partitioned into two parts \eqn{X_1} and \eqn{X_2}. The matrix \eqn{X_2} contains the `real' environmental variables whereas the variables in \eqn{X_1} are just for adjustment purposes; they contain the intercept terms and other variables that one wants to adjust for when (primarily) looking at the variables in \eqn{X_2}. This function has \eqn{X_1} only being a matrix of ones, i.e., containing an intercept only. } \value{ A \eqn{n} by \eqn{p-1+S} data frame with components and attributes. In the following the attributes are labelled with double quotes. \item{x2, x3, x4, \ldots, xp}{ The environmental variables. This makes up the \eqn{n} by \eqn{p-1} \eqn{X_2} matrix. Note that \code{x1} is not present; it is effectively a vector of ones since it corresponds to an intercept term when \code{\link{cqo}} is applied to the data. } \item{y1, y2, x3, \ldots, yS}{ The species data. This makes up the \eqn{n} by \eqn{S} matrix \eqn{Y}. This will be of the form described by the argument \code{family}. } \item{"concoefficients"}{ The \eqn{p-1} by \eqn{R} matrix of constrained coefficients (or canonical coefficients). These are also known as weights or loadings. } \item{"formula"}{ The formula involving the species and environmental variable names. This can be used directly in the \code{formula} argument of \code{\link{cqo}}. } \item{"log.maximums"}{ The \eqn{S}-vector of species' maximums, on a log scale. These are uniformly distributed between \code{log(lo.abundance)} and \code{log(hi.abundance)}. } \item{"latvar"}{ The \eqn{n} by \eqn{R} matrix of site scores. Each successive column (latent variable) has sample standard deviation equal to successive values of \code{sd.latvar}. } \item{"eta"}{ The linear/additive predictor value. } \item{"optimums"}{ The \eqn{S} by \eqn{R} matrix of species' optimums. } \item{"tolerances"}{ The \eqn{S} by \eqn{R} matrix of species' tolerances. These are the square root of the diagonal elements of the tolerance matrices (recall that all tolerance matrices are restricted to being diagonal in this function). } Other attributes are \code{"break"}, \code{"family"}, \code{"Rank"}, \code{"lo.abundance"}, \code{"hi.abundance"}, \code{"eq.tolerances"}, \code{"eq.maximums"}, \code{"seed"} as used. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Yee, T. W. (2006) Constrained additive ordination. \emph{Ecology}, \bold{87}, 203--213. ter Braak, C. J. F. and Prentice, I. C. (1988) A theory of gradient analysis. \emph{Advances in Ecological Research}, \bold{18}, 271--317. } \author{ T. W. Yee } \note{ This function is under development and is not finished yet. There may be a few bugs. Yet to do: add an argument that allows absences to be equal to the first level if ordinal data is requested. } \seealso{ \code{\link{cqo}}, \code{\link{qrrvglm.control}}, \code{\link[base]{cut}}, \code{\link{binomialff}}, \code{\link{poissonff}}, \code{\link{negbinomial}}, \code{\link{gamma2}}. % \code{\link{gaussianff}}. } \examples{ \dontrun{ # Example 1: Species packing model: n <- 100; p <- 5; S <- 5 mydata <- rcqo(n, p, S, es.opt = TRUE, eq.max = TRUE) names(mydata) (myform <- attr(mydata, "formula")) fit <- cqo(myform, poissonff, mydata, Bestof = 3) # eq.tol = TRUE matplot(attr(mydata, "latvar"), mydata[,-(1:(p-1))], col = 1:S) persp(fit, col = 1:S, add = TRUE) lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # The same plot as above # Compare the fitted model with the 'truth' concoef(fit) # The fitted model attr(mydata, "concoefficients") # The 'truth' c(apply(attr(mydata, "latvar"), 2, sd), apply(latvar(fit), 2, sd)) # Both values should be approx equal # Example 2: negative binomial data fitted using a Poisson model: n <- 200; p <- 5; S <- 5 mydata <- rcqo(n, p, S, fam = "negbin", sqrt = TRUE) myform <- attr(mydata, "formula") fit <- cqo(myform, fam = poissonff, dat = mydata) # I.tol = TRUE, lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # Compare the fitted model with the 'truth' concoef(fit) # The fitted model attr(mydata, "concoefficients") # The 'truth' } } \keyword{distribution} \keyword{datagen} %# Example 3: gamma2 data fitted using a Gaussian model: %n <- 200; p <- 5; S <- 3 %mydata <- rcqo(n, p, S, fam = "gamma2", log.arg = TRUE) %fit <- cqo(attr(mydata, "formula"), % fam = gaussianff, data = mydata) # I.tol = TRUE, %matplot(attr(mydata, "latvar"), % exp(mydata[, -(1:(p-1))]), col = 1:S) # 'raw' data %# Fitted model to transformed data: %lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) %# Compare the fitted model with the 'truth' %concoef(fit) # The fitted model %attr(mydata, "concoefficients") # The 'truth' VGAM/man/linoUC.Rd0000644000176200001440000000507013565414527013255 0ustar liggesusers\name{Lino} \alias{Lino} \alias{dlino} \alias{plino} \alias{qlino} \alias{rlino} \title{The Generalized Beta Distribution (Libby and Novick, 1982)} \description{ Density, distribution function, quantile function and random generation for the generalized beta distribution, as proposed by Libby and Novick (1982). } \usage{ dlino(x, shape1, shape2, lambda = 1, log = FALSE) plino(q, shape1, shape2, lambda = 1, lower.tail = TRUE, log.p = FALSE) qlino(p, shape1, shape2, lambda = 1, lower.tail = TRUE, log.p = FALSE) rlino(n, shape1, shape2, lambda = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{shape1, shape2, lambda}{ see \code{\link{lino}}. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dlino} gives the density, \code{plino} gives the distribution function, \code{qlino} gives the quantile function, and \code{rlino} generates random deviates. } %\references{ % Libby, D. L. and Novick, M. R. (1982) % Multivariate generalized beta distributions with applications to % utility assessment. % \emph{Journal of Educational Statistics}, % \bold{7}, 271--294. % % Gupta, A. K. and Nadarajah, S. (2004) % \emph{Handbook of Beta Distribution and Its Applications}, % NY: Marcel Dekker, Inc. % %} \author{ T. W. Yee and Kai Huang } \details{ See \code{\link{lino}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } %\note{ % %} \seealso{ \code{\link{lino}}. } \examples{ \dontrun{ lambda <- 0.4; shape1 <- exp(1.3); shape2 <- exp(1.3) x <- seq(0.0, 1.0, len = 101) plot(x, dlino(x, shape1 = shape1, shape2 = shape2, lambda = lambda), type = "l", col = "blue", las = 1, ylab = "", main = "Blue is density, red is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") abline(h = 0, col = "blue", lty = 2) lines(x, plino(x, shape1 = shape1, shape2 = shape2, l = lambda), col = "red") probs <- seq(0.1, 0.9, by = 0.1) Q <- qlino(probs, shape1 = shape1, shape2 = shape2, lambda = lambda) lines(Q, dlino(Q, shape1 = shape1, shape2 = shape2, lambda = lambda), col = "purple", lty = 3, type = "h") plino(Q, shape1 = shape1, shape2 = shape2, l = lambda) - probs # Should be all 0 } } \keyword{distribution} VGAM/man/toppleUC.Rd0000644000176200001440000000427413565414527013624 0ustar liggesusers\name{Topple} \alias{Topple} \alias{dtopple} \alias{ptopple} \alias{qtopple} \alias{rtopple} \title{The Topp-Leone Distribution} \description{ Density, distribution function, quantile function and random generation for the Topp-Leone distribution. } \usage{ dtopple(x, shape, log = FALSE) ptopple(q, shape, lower.tail = TRUE, log.p = FALSE) qtopple(p, shape) rtopple(n, shape) } \arguments{ \item{x, q, p, n}{ Same as \code{\link[stats:Uniform]{Uniform}}. } \item{shape}{the (shape) parameter, which lies in \eqn{(0, 1)}.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dtopple} gives the density, \code{ptopple} gives the distribution function, \code{qtopple} gives the quantile function, and \code{rtopple} generates random deviates. } \references{ Topp, C. W. and F. C. Leone (1955) A family of J-shaped frequency functions. \emph{Journal of the American Statistical Association}, \bold{50}, 209--219. } \author{ T. W. Yee } \details{ See \code{\link{topple}}, the \pkg{VGAM} family function for estimating the (shape) parameter \eqn{s} by maximum likelihood estimation, for the formula of the probability density function. } \note{ The Topp-Leone distribution is related to the triangle distribution. } \seealso{ \code{\link{topple}}, \code{\link{Triangle}}. } \examples{ \dontrun{ shape <- 0.7; x <- seq(0.02, 0.999, length = 300) plot(x, dtopple(x, shape = shape), type = "l", col = "blue", las = 1, main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "") abline(h = 0, col = "blue", lty = 2) lines(x, ptopple(x, shape = shape), type = "l", col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qtopple(probs, shape = shape) lines(Q, dtopple(Q, shape), col = "purple", lty = 3, type = "h") lines(Q, ptopple(Q, shape), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(ptopple(Q, shape) - probs)) # Should be zero } } \keyword{distribution} VGAM/man/gatnbinomial.mlm.Rd0000644000176200001440000001627513565414527015325 0ustar liggesusers\name{gatnbinomial.mlm} \alias{gatnbinomial.mlm} %\alias{ganbinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered and -Truncated Negative Binomial Regression Family Function (GAT--NB--MLM Variant) } \description{ Fits a generally-altered and -truncated negative binomial regression (using a multinomial logit model for the altered values). % The truncation may include values in the upper tail. % based on a conditional % model involving a multinomial distribution % and a generally-truncated negative binomial distribution. } \usage{ gatnbinomial.mlm(alter = NULL, truncate = NULL, zero = "size", lmunb = "loglink", lsize = "loglink", type.fitted = c("mean", "pobs.a", "Pobs.a", "prob.a", "prob.t"), imethod = 1, imunb = NULL, isize = exp(1), ishrinkage = 0.95, probs.y = 0.35, cutoff.prob = 0.999, eps.trig = 1e-7, max.chunk.MB = 30) } %- maybe also 'usage' for other objects documented here. % ipobs0 = NULL, \arguments{ \item{alter, truncate}{ Same as \code{\link{gatpoisson.mlm}}. % Must be sorted and have unique values only. } \item{lmunb, lsize}{ See \code{\link{Links}} for more choices and information. Similar to \code{\link{negbinomial}}. } \item{type.fitted, zero}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{gatpoisson.mlm}} for information. } \item{imethod, imunb, isize}{ See \code{\link{CommonVGAMffArguments}} for information. % ipobs0, } \item{probs.y, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{cutoff.prob, eps.trig}{ See \code{\link{negbinomial}} for information. } \item{max.chunk.MB}{ See \code{\link{negbinomial}} for information. } } \details{ The generally-truncated (GT) negative binomial distribution is an ordinary negative binomial distribution with the probability of certain values (given by the \code{truncate} argument) being zero. Thus the other probabilities are scaled up. The (0-truncated) positive-negative binomial distribution is a special case (\code{\link{posnegbinomial}}). The generally-altered (GA) negative binomial distribution is an ordinary negative binomial distribution with the probability of certain values (given by the \code{alter} argument) being modelled using a multinomial logit model (see \code{\link{multinomial}}). The 0-altered negative binomial distribution is a special case (\code{\link{zanegbinomial}}) and it is called a \emph{hurdle} model by some people. % The other values are modelled using a % \emph{generally-truncated negative binomial} distribution. This function can fit both the GA and GT models simultaneously, called the GAT-NB-MLM. It might be applied to heaped data. That is, each special value can be altered or truncated but not both. The default settings make this family function equivalent to \code{\link{negbinomial}}. This function implements Fisher scoring and currently does not handle multiple responses for GT or the GA model. Compared to what could be \code{ginbinomial.mlm} this family function handles deflation and inflation, therefore handles a wider range of data. For further details please see \code{\link{Gaitnbinom.mlm}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. See \code{\link{gatpoisson.mlm}} for more information. } %\references{ %} \section{Warning }{ Due to its flexibility, it is easy to misuse this function; the \code{truncate} vector should ideally be not very long and have values that can be justified by the application on hand. Likewise, the \code{alter} vector should be short too, and each value should have good justification for being included. Adding unnecessary values to these two arguments willy-nilly is a recipe for disaster. Regarding truncation, under- or over-flow may occur if the data is ill-conditioned. The response is checked to see that no values equal any values of the \code{truncate} vector. Compared to \code{\link{gatpoisson.mlm}} this family function is even more difficult to fit because it is more flexible and involves approximating an infinite series when computing the expected information matrix. % See \code{\link{gatpoisson.mlm}} for more information; } \author{ T. W. Yee } \note{ See \code{\link{gatpoisson.mlm}} for more information about robustness. % yettodo: see lines just above. This family function effectively combines what could have been \code{ganbinomial.mlm()} and \code{gtnbinomial.mlm()} together. The former would have effectively included \code{\link{multinomial}} inside it. % This family function can handle multiple responses, % e.g., more than one species. It is possible in the near future that \code{\link{zanegbinomial}}, \code{\link{zanegbinomialff}}, \code{\link{Zanegbin}}, \code{\link{posnegbinomial}}, \code{\link{Posnegbin}} be moved into \pkg{VGAMdata}. Likewise for \code{\link{zapoisson}}, \code{\link{zapoissonff}}, \code{\link{Zapois}}. An argument \code{max.support} is not implemented because the upper RHS tail of the NB distribution does not admit practical formulas that can be computed. However, instead of using something like \code{max.support = A} one could try something like \code{truncate = (A+1):(A+large)} for some positive integer \code{large} that is large enough, but within reason. For example, instead of \code{max.support = 20} one could try \code{truncate = 21:30} or \code{truncate = 21:40}. } \seealso{ \code{\link{Gaitnbinom.mlm}}, \code{\link{gatpoisson.mlm}}, \code{\link{gatnbinomial.mix}}, \code{\link{zanegbinomial}}, \code{\link{multinomial}}, \code{\link{Posnegbin}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{gtnbinomial}}, } \examples{ \dontrun{ avec <- c(10, 20, 30) # Alter these values tvec <- 0 # Truncate this value pobs.a <- logitlink(-(2:4), inverse = TRUE) # Between 0.02 and 0.12 size1 <- exp(1) gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, lambda1 = exp(2 + 0.5 * x2)) gdata <- transform(gdata, y1 = rgaitnbinom.mlm(nn, size1, mu = lambda1, pobs.a = pobs.a, truncate = tvec, byrow = TRUE, alter = avec)) gatnbinomial.mlm(alter = avec) (ty1 <- with(gdata, table(y1))) propn1 <- c(ty1) / sum(ty1) plot(as.numeric(names(ty1)), propn1, las = 1, xlab = "y", yaxs = "i", ylim = c(0, max(propn1) * 1.1), main = "Heaped data", ylab = "Proportion", lwd = 3, type = "h", col = "blue") fit1 <- vglm(y1 ~ x2, trace = TRUE, data = gdata, crit = "coef", gatnbinomial.mlm(alter = avec, truncate = tvec, zero = c("size", "pobs"))) head(fitted(fit1)) head(predict(fit1)) coef(fit1, matrix = TRUE) summary(fit1) } } \keyword{models} \keyword{regression} %ganbinomial(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %ganbinomialff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") % cutoff.prob = 0.999, eps.trig = 1e-7, VGAM/man/lognormal.Rd0000644000176200001440000001114113565414527014052 0ustar liggesusers\name{lognormal} \alias{lognormal} %\alias{lognormal3} %%- Also NEED an '\alias' for EACH other topic documented here. \title{ Lognormal Distribution } \description{ Maximum likelihood estimation of the (univariate) lognormal distribution. } \usage{ lognormal(lmeanlog = "identitylink", lsdlog = "loglink", zero = "sdlog") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmeanlog, lsdlog}{ Parameter link functions applied to the mean and (positive) \eqn{\sigma}{sigma} (standard deviation) parameter. Both of these are on the log scale. See \code{\link{Links}} for more choices. } % \item{emeanlog, esdlog}{ % emeanlog = list(), esdlog = list(), % emeanlog = list(), esdlog = list(), % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{zero}{ Specifies which linear/additive predictor is modelled as intercept-only. For \code{lognormal()}, the values can be from the set \{1,2\} which correspond to \code{mu}, \code{sigma}, respectively. See \code{\link{CommonVGAMffArguments}} for more information. % For \code{lognormal3()}, % the values must be from the set \{1,2,3\} where 3 is for % \eqn{\lambda}{\lambda}. } % \item{powers.try}{ % Numerical vector. The initial \eqn{lambda} is chosen % as the best value from \code{min(y) - 10^powers.try} where % \code{y} is the response. % } % \item{delta}{ % Numerical vector. An alternative method for % obtaining an initial \eqn{lambda}. Here, \code{delta = min(y)-lambda}. % If given, this supersedes the \code{powers.try} argument. % The value must be positive. % } } \details{ A random variable \eqn{Y} has a 2-parameter lognormal distribution if \eqn{\log(Y)}{log(Y)} is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. The expected value of \eqn{Y}, which is \deqn{E(Y) = \exp(\mu + 0.5 \sigma^2)}{% E(Y) = exp(mu + 0.5 sigma^2)} and not \eqn{\mu}{mu}, make up the fitted values. The variance of \eqn{Y} is \deqn{Var(Y) = [\exp(\sigma^2) -1] \exp(2\mu + \sigma^2).}{% Var(Y) = [exp(sigma^2) -1] * exp(2 mu + sigma^2).} % A random variable \eqn{Y} has a 3-parameter lognormal distribution % if \eqn{\log(Y-\lambda)}{log(Y-lambda)} % is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. Here, % \eqn{\lambda < Y}{lambda < Y}. % The expected value of \eqn{Y}, which is % \deqn{E(Y) = \lambda + \exp(\mu + 0.5 \sigma^2)}{% % E(Y) = lambda + exp(mu + 0.5 sigma^2)} % and not \eqn{\mu}{mu}, make up the fitted values. % \code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter % lognormal distribution respectively. Clearly, if the location % parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } %\note{ % The more commonly used 2-parameter lognormal distribution is the % 3-parameter lognormal distribution with \eqn{\lambda}{lambda} equal % to zero---see \code{\link{lognormal3}}. % % %} %\section{Warning}{ % Regularity conditions are not satisfied for the 3-parameter case: % results may be erroneous. % May withdraw it in later versions. % % %} \seealso{ \code{\link[stats]{Lognormal}}, \code{\link{uninormal}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{lognormal3}}, % \code{\link[stats]{rlnorm}}, } \examples{ ldata2 <- data.frame(x2 = runif(nn <- 1000)) ldata2 <- transform(ldata2, y1 = rlnorm(nn, mean = 1 + 2 * x2, sd = exp(-1)), y2 = rlnorm(nn, mean = 1, sd = exp(-1 + x2))) fit1 <- vglm(y1 ~ x2, lognormal(zero = 2), data = ldata2, trace = TRUE) fit2 <- vglm(y2 ~ x2, lognormal(zero = 1), data = ldata2, trace = TRUE) coef(fit1, matrix = TRUE) coef(fit2, matrix = TRUE) } \keyword{models} \keyword{regression} %lognormal3(lmeanlog = "identitylink", lsdlog = "loglink", % powers.try = (-3):3, delta = NULL, zero = 2) %lambda <- 4 %ldata3 <- data.frame(y3 = lambda + rlnorm(1000, m = 1.5, sd = exp(-0.8))) %fit3 <- vglm(y3 ~ 1, lognormal3, data = ldata3, trace = TRUE, crit = "c") %coef(fit3, matrix = TRUE) %summary(fit3) %ldata <- data.frame(y1 = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8))) %fit1 <- vglm(y1 ~ 1, lognormal, data = ldata, trace = TRUE, crit = "c") %coef(fit1, matrix = TRUE) %Coef(fit1) VGAM/man/gaitbinom.mlmUC.Rd0000644000176200001440000001355413565414527015057 0ustar liggesusers\name{Gaitbinom.mlm} \alias{Gaitbinom.mlm} \alias{dgaitbinom.mlm} \alias{pgaitbinom.mlm} \alias{qgaitbinom.mlm} \alias{rgaitbinom.mlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered, -Inflated and -Truncated Binomial Distribution (GAIT--binom--MLM--MLM) % (multinomial logit model based; GAIT--binom--MLM--MLM) } \description{ Density, distribution function, quantile function and random generation for the generally-altered, -inflated and -truncated binomial distribution based on the multinomial logit model (MLM). This distribution is sometimes abbreviated as GAIT--Binom--MLM--MLM. } \usage{ dgaitbinom.mlm(x, size, prob, alter = NULL, inflate = NULL, truncate = NULL, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE, log.arg = FALSE, .errorcheck = TRUE) pgaitbinom.mlm(q, size, prob, alter = NULL, inflate = NULL, truncate = NULL, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE, .errorcheck = TRUE) qgaitbinom.mlm(p, size, prob, alter = NULL, inflate = NULL, truncate = NULL, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) rgaitbinom.mlm(n, size, prob, alter = NULL, inflate = NULL, truncate = NULL, pobs.a = 0, pstr.i = 0, byrow.arg = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q, p, n, log.arg}{ Same meaning as in \code{\link[stats]{rbinom}}. } \item{size, prob}{ Same meaning as in \code{\link[stats]{rbinom}}, i.e., for an ordinary binomial distribution. Short vectors are recycled. } \item{alter, inflate, truncate}{ Vectors of nonnegative integers; the altered, inflated and truncated values. Each argument must have unique values only. The default value of \code{NULL} means an empty set. % Value in the upper tail of the distribution may be % truncated by using \code{max.support}. % Must be sorted and have unique values only. } \item{pstr.i, byrow.arg}{ The first argument can be a \code{length(inflate)}-vector of probabilities; else a matrix of dimension \code{n x length(inflate)} of probabilities. If a vector then this matrix is constructed from the vector using \code{byrow.arg} to determine the enumeration of the elements (similar to \code{\link[base]{matrix}}). These arguments are not used unless \code{inflate} is assigned. %%% This paragraph only holds if inflation is the sole operator: % One can think of this matrix as comprising of % \emph{structural} probabilities. % Then the matrix augmented with one more column on the RHS so it has % dimension \code{n x (length(inflate) + 1)} % and whose \code{\link[base]{rowSums}} is a vector of 1s. % Finally, % for \code{\link{rgaitbinom.mlm}}, % a multinomial sample is taken and if it belongs to the final % column then binomial random variates are drawn. } \item{pobs.a}{ This argument is similar to \code{pstr.i} but is used when \code{alter} is assigned a vector. The argument \code{byrow.arg} is used similarly to construct a matrix of dimension \code{n x length(alter)} of probabilities. This argument is not used unless \code{alter} is assigned. } \item{.errorcheck}{ Logical. Should be ignored by the user. } } \details{ See \code{\link{Gaitpois.mlm}} for generic details applying to both distributions. These functions do what \code{dgabinom}, \code{dgibinom}, \code{dgtbinom}, \code{pgabinom}, \code{pgibinom}, \code{pgtbinom}, \code{qgabinom}, \code{qgibinom}, \code{qgtbinom}, \code{rgabinom}, \code{rgibinom}, \code{rgtbinom} collectively did because the arguments \code{alter}, \code{inflate} and \code{truncate} have been combined. } %\section{Warning }{ % See \code{\link{rgaitbinom.mlm}}. % The function can run slowly for certain combinations % of \code{pstr.i} and \code{inflate}, e.g., % \code{rgaitbinom.mlm(1e5, 1, inflate = 0:9, pstr.i = (1:10)/100)}. % Failure to obtain random variates will result in some % \code{NA} values instead. % An infinite loop can occur for certain combinations % of \code{lambda} and \code{inflate}, e.g., % \code{rgaitbinom.mlm(10, 1, trunc = 0:100)}. % No action is made to avoid this occurring. %} \value{ \code{dgaitbinom.mlm} gives the density, \code{pgaitbinom.mlm} gives the distribution function, \code{qgaitbinom.mlm} gives the quantile function, and \code{rgaitbinom.mlm} generates random deviates. The default values of the arguments correspond to ordinary \code{\link[stats:Binomial]{dbinom}}, \code{\link[stats:Binomial]{pbinom}}, \code{\link[stats:Binomial]{qbinom}}, \code{\link[stats:Binomial]{rbinom}} respectively. } %\references{ %None. %} \author{ T. W. Yee. } %\note{ % 20120405; no longer true to a superior method: % For \code{rposbinom}, the arguments of the function are fed % into \code{\link[stats:Binomial]{rbinom}} until \eqn{n} positive % values are obtained. This may take a long time if \code{lambda} % has values close to 0. % The family function \code{\link{posbinomson}} estimates % \eqn{\lambda}{lambda} by maximum likelihood estimation. %} % \code{\link{gaitbinomson}}, \seealso{ \code{\link{Gaitnbinom.mlm}}, \code{\link{Posbinom}}, \code{\link[stats:Binomial]{Binomial}}. % \code{\link{gabinomial}}, % \code{\link{Gaitbinom.mlm}}, % \code{\link{zabinomial}}, % \code{\link{zibinomial}}, } \examples{ avec <- 5:6 # Alter these (special) values ivec <- 9:10 # Inflate these (special) values tvec <- 2:4 # Truncate these (special) values pobs.a <- c(0.1, 0.2) pstr.i <- (1:2)/10 size <- 10; prob <- 0.5; xx <- 0:11 y <- rgaitbinom.mlm(1000, size, prob, alter = avec, inflate = ivec, truncate = tvec, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE) table(y) (ii <- dgaitbinom.mlm(xx, size, prob, alter = avec, inflate = ivec, truncate = tvec, pstr.i = pstr.i, pobs.a = pobs.a, byrow = TRUE)) } \keyword{distribution} VGAM/man/enormUC.Rd0000644000176200001440000000760713565414527013444 0ustar liggesusers\name{Expectiles-Normal} \alias{Expectiles-Normal} \alias{enorm} \alias{denorm} \alias{penorm} \alias{qenorm} \alias{renorm} \title{ Expectiles of the Normal Distribution } \description{ Density function, distribution function, and expectile function and random generation for the distribution associated with the expectiles of a normal distribution. } \usage{ denorm(x, mean = 0, sd = 1, log = FALSE) penorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE) qenorm(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6, lower.tail = TRUE, log.p = FALSE) renorm(n, mean = 0, sd = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, p, q}{ See \code{\link{deunif}}. } \item{n, mean, sd, log}{ See \code{\link[stats:Normal]{rnorm}}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } \item{Maxit.nr, Tol.nr}{ See \code{\link{deunif}}. } } \details{ General details are given in \code{\link{deunif}} including a note regarding the terminology used. Here, \code{norm} corresponds to the distribution of interest, \eqn{F}, and \code{enorm} corresponds to \eqn{G}. The addition of ``\code{e}'' is for the `other' distribution associated with the parent distribution. Thus \code{denorm} is for \eqn{g}, \code{penorm} is for \eqn{G}, \code{qenorm} is for the inverse of \eqn{G}, \code{renorm} generates random variates from \eqn{g}. For \code{qenorm} the Newton-Raphson algorithm is used to solve for \eqn{y} satisfying \eqn{p = G(y)}. Numerical problems may occur when values of \code{p} are very close to 0 or 1. } \value{ \code{denorm(x)} gives the density function \eqn{g(x)}. \code{penorm(q)} gives the distribution function \eqn{G(q)}. \code{qenorm(p)} gives the expectile function: the value \eqn{y} such that \eqn{G(y)=p}. \code{renorm(n)} gives \eqn{n} random variates from \eqn{G}. } %\references{ % %Jones, M. C. (1994) %Expectiles and M-quantiles are quantiles. %\emph{Statistics and Probability Letters}, %\bold{20}, 149--153. % %} \author{ T. W. Yee and Kai Huang } %\note{ %The ``\code{q}'', as the first character of ``\code{qeunif}'', %may be changed to ``\code{e}'' in the future, %the reason being to emphasize that the expectiles are returned. %Ditto for the argument ``\code{q}'' in \code{peunif}. % %} \seealso{ \code{\link{deunif}}, \code{\link{deexp}}, \code{\link{dnorm}}, \code{\link{amlnormal}}, \code{\link{lms.bcn}}. } \examples{ my.p <- 0.25; y <- rnorm(nn <- 1000) (myexp <- qenorm(my.p)) sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my.p # Non-standard normal mymean <- 1; mysd <- 2 yy <- rnorm(nn, mymean, mysd) (myexp <- qenorm(my.p, mymean, mysd)) sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my.p penorm(-Inf, mymean, mysd) # Should be 0 penorm( Inf, mymean, mysd) # Should be 1 penorm(mean(yy), mymean, mysd) # Should be 0.5 abs(qenorm(0.5, mymean, mysd) - mean(yy)) # Should be 0 abs(penorm(myexp, mymean, mysd) - my.p) # Should be 0 integrate(f = denorm, lower = -Inf, upper = Inf, mymean, mysd) # Should be 1 \dontrun{ par(mfrow = c(2, 1)) yy <- seq(-3, 3, len = nn) plot(yy, denorm(yy), type = "l", col="blue", xlab = "y", ylab = "g(y)", main = "g(y) for N(0,1); dotted green is f(y) = dnorm(y)") lines(yy, dnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original' plot(yy, penorm(yy), type = "l", col = "blue", ylim = 0:1, xlab = "y", ylab = "G(y)", main = "G(y) for N(0,1)") abline(v = 0, h = 0.5, col = "red", lty = "dashed") lines(yy, pnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2) } } \keyword{distribution} %# Equivalently: %I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp]) %I2 = mean(y > myexp) * mean(-myexp + y[y > myexp]) %I1 / (I1 + I2) # Should be my.p %# Or: %I1 = sum( myexp - y[y <= myexp]) %I2 = sum(-myexp + y[y > myexp]) VGAM/man/bisaUC.Rd0000644000176200001440000000473713565414527013243 0ustar liggesusers\name{Bisa} \alias{Bisa} \alias{dbisa} \alias{pbisa} \alias{qbisa} \alias{rbisa} \title{The Birnbaum-Saunders Distribution} \description{ Density, distribution function, and random generation for the Birnbaum-Saunders distribution. } \usage{ dbisa(x, scale = 1, shape, log = FALSE) pbisa(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) qbisa(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) rbisa(n, scale = 1, shape) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{ Same as in \code{\link[stats]{runif}}. } \item{scale, shape}{ the (positive) scale and shape parameters. } \item{log}{ Logical. If \code{TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dbisa} gives the density, \code{pbisa} gives the distribution function, and \code{qbisa} gives the quantile function, and \code{rbisa} generates random deviates. } \author{ T. W. Yee and Kai Huang } \details{ The Birnbaum-Saunders distribution is a distribution which is used in survival analysis. See \code{\link{bisa}}, the \pkg{VGAM} family function for estimating the parameters, for more details. } %\note{ %} \seealso{ \code{\link{bisa}}. } \examples{ \dontrun{ x <- seq(0, 6, len = 400) plot(x, dbisa(x, shape = 1), type = "l", col = "blue", ylab = "Density", lwd = 2, ylim = c(0,1.3), lty = 3, main = "X ~ Birnbaum-Saunders(shape, scale = 1)") lines(x, dbisa(x, shape = 2), col = "orange", lty = 2, lwd = 2) lines(x, dbisa(x, shape = 0.5), col = "green", lty = 1, lwd = 2) legend(x = 3, y = 0.9, legend = paste("shape = ",c(0.5, 1,2)), col = c("green","blue","orange"), lty = 1:3, lwd = 2) shape <- 1; x <- seq(0.0, 4, len = 401) plot(x, dbisa(x, shape = shape), type = "l", col = "blue", las = 1, ylab = "", main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1) abline(h = 0, col = "blue", lty = 2) lines(x, pbisa(x, shape = shape), col = "orange") probs <- seq(0.1, 0.9, by = 0.1) Q <- qbisa(probs, shape = shape) lines(Q, dbisa(Q, shape = shape), col = "purple", lty = 3, type = "h") pbisa(Q, shape = shape) - probs # Should be all zero abline(h = probs, col = "purple", lty = 3) lines(Q, pbisa(Q, shape = shape), col = "purple", lty = 3, type = "h") } } \keyword{distribution} VGAM/man/inv.paralogisticUC.Rd0000644000176200001440000000426413565414527015574 0ustar liggesusers\name{Inv.paralogistic} \alias{Inv.paralogistic} \alias{dinv.paralogistic} \alias{pinv.paralogistic} \alias{qinv.paralogistic} \alias{rinv.paralogistic} \title{The Inverse Paralogistic Distribution} \description{ Density, distribution function, quantile function and random generation for the inverse paralogistic distribution with shape parameters \code{a} and \code{p}, and scale parameter \code{scale}. } \usage{ dinv.paralogistic(x, scale = 1, shape1.a, log = FALSE) pinv.paralogistic(q, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE) qinv.paralogistic(p, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE) rinv.paralogistic(n, scale = 1, shape1.a) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1.a}{shape parameter.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dinv.paralogistic} gives the density, \code{pinv.paralogistic} gives the distribution function, \code{qinv.paralogistic} gives the quantile function, and \code{rinv.paralogistic} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \details{ See \code{\link{inv.paralogistic}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The inverse paralogistic distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{inv.paralogistic}}, \code{\link{genbetaII}}. } \examples{ idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), scale = exp(2))) fit <- vglm(y ~ 1, inv.paralogistic(lss = FALSE, ishape1.a = 2.1), data = idata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) } \keyword{distribution} VGAM/man/wrapup.smart.Rd0000644000176200001440000000203113565414527014521 0ustar liggesusers\name{wrapup.smart} \alias{wrapup.smart} \title{ Cleans Up After Smart Prediction } \description{ \code{wrapup.smart} deletes any variables used by smart prediction. Needed by both the modelling function and the prediction function. } \usage{ wrapup.smart() } \details{ The variables to be deleted are \code{.smart.prediction}, \code{.smart.prediction.counter}, and \code{.smart.prediction.mode}. The function \code{wrapup.smart} is useful in \R because these variables are held in \code{smartpredenv}. % In S-PLUS, % \code{wrapup.smart} is not really necessary because the variables are % placed in frame 1, which disappears when finished anyway. } %\references{ % See the technical help file at \url{http://www.stat.auckland.ac.nz/~yee} % for details. % % % %} \seealso{ \code{\link{setup.smart}}. } \examples{ \dontrun{# Place this inside modelling functions such as lm, glm, vglm. wrapup.smart() # Put at the end of lm } } \keyword{models} \keyword{regression} \keyword{programming} % Converted by Sd2Rd version 1.10. VGAM/man/pgamma.deriv.Rd0000644000176200001440000000623213565414527014437 0ustar liggesusers\name{pgamma.deriv} \alias{pgamma.deriv} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Derivatives of the Incomplete Gamma Integral } \description{ The first two derivatives of the incomplete gamma integral. } \usage{ pgamma.deriv(q, shape, tmax = 100) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{q, shape}{ As in \code{\link[stats]{pgamma}} but these must be vectors of positive values only and finite. } % \item{shape}{ % A vector of positive values. % %} \item{tmax}{ Maximum number of iterations allowed in the computation (per \code{q} value). } } \details{ Write \eqn{x = q} and \code{shape =} \eqn{a}. The first and second derivatives with respect to \eqn{q} and \eqn{a} are returned. This function is similar in spirit to \code{\link[stats]{pgamma}}; define \deqn{P(a,x) = \frac{1}{\Gamma(a)} \int_0^x t^{a-1} e^{-t} dt}{P(a,x) = 1/Gamma(a) integral_0^x t^(a-1) exp(-t) dt} so that \eqn{P(a, x)} is \code{pgamma(x, a)}. Currently a 6-column matrix is returned (in the future this may change and an argument may be supplied so that only what is required by the user is computed.) The computations use a series expansion for \eqn{a \leq x \leq 1}{a <= x <= 1} or or \eqn{x < a}, else otherwise a continued fraction expansion. Machine overflow can occur for large values of \eqn{x} when \eqn{x} is much greater than \eqn{a}. } \value{ The first 5 columns, running from left to right, are the derivatives with respect to: \eqn{x}, \eqn{x^2}, \eqn{a}, \eqn{a^2}, \eqn{xa}. The 6th column is \eqn{P(a, x)} (but it is not as accurate as calling \code{\link[stats]{pgamma}} directly). } \references{ Moore, R. J. (1982) Algorithm AS 187: Derivatives of the Incomplete Gamma Integral. \emph{Journal of the Royal Statistical Society, Series C} \emph{(Applied Statistics)}, \bold{31}(3), 330--335. } \author{ T. W. Yee wrote the wrapper function to the Fortran subroutine written by R. J. Moore. The subroutine was modified to run using double precision. The original code came from \code{http://lib.stat.cmu.edu/apstat/187}. but this website has since become stale. } \note{ If convergence does not occur then try increasing the value of \code{tmax}. Yet to do: add more arguments to give greater flexibility in the accuracy desired and to compute only quantities that are required by the user. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{pgamma.deriv.unscaled}}, \code{\link[stats]{pgamma}}. } \examples{ x <- seq(2, 10, length = 501) head(ans <- pgamma.deriv(x, 2)) \dontrun{ par(mfrow = c(2, 3)) for (jay in 1:6) plot(x, ans[, jay], type = "l", col = "blue", cex.lab = 1.5, cex.axis = 1.5, las = 1, log = "x", main = colnames(ans)[jay], xlab = "q", ylab = "") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} % Some part of R-2.15.2/src/library/stats/man/GammaDist.Rd used % An error in the article? % I believe comments in the code (C in fortran). % for \eqn{a \leq x \leq 1}{a <= x <= 1}, and VGAM/man/hunua.Rd0000644000176200001440000000403613565414527013205 0ustar liggesusers\name{hunua} \alias{hunua} \docType{data} \title{Hunua Ranges Data} \description{ The \code{hunua} data frame has 392 rows and 18 columns. Altitude is explanatory, and there are binary responses (presence/absence = 1/0 respectively) for 17 plant species. } \usage{data(hunua)} \format{ This data frame contains the following columns: \describe{ \item{agaaus}{Agathis australis, or Kauri} \item{beitaw}{Beilschmiedia tawa, or Tawa} \item{corlae}{Corynocarpus laevigatus} \item{cyadea}{Cyathea dealbata} \item{cyamed}{Cyathea medullaris} \item{daccup}{Dacrydium cupressinum} \item{dacdac}{Dacrycarpus dacrydioides} \item{eladen}{Elaecarpus dentatus} \item{hedarb}{Hedycarya arborea} \item{hohpop}{Species name unknown} \item{kniexc}{Knightia excelsa, or Rewarewa} \item{kuneri}{Kunzea ericoides} \item{lepsco}{Leptospermum scoparium} \item{metrob}{Metrosideros robusta} \item{neslan}{Nestegis lanceolata} \item{rhosap}{Rhopalostylis sapida} \item{vitluc}{Vitex lucens, or Puriri} \item{altitude}{meters above sea level} } } \details{ These were collected from the Hunua Ranges, a small forest in southern Auckland, New Zealand. At 392 sites in the forest, the presence/absence of 17 plant species was recorded, as well as the altitude. Each site was of area size 200\eqn{m^2}{m^2}. } \source{ Dr Neil Mitchell, University of Auckland. } %\references{ % None. %} \seealso{ \code{\link{waitakere}}. } \examples{ # Fit a GAM using vgam() and compare it with the Waitakere Ranges one fit.h <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua) \dontrun{ plot(fit.h, se = TRUE, lcol = "orange", scol = "orange", llwd = 2, slwd = 2, main = "Orange is Hunua, Blue is Waitakere") } head(predict(fit.h, hunua, type = "response")) fit.w <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = waitakere) \dontrun{ plot(fit.w, se = TRUE, lcol = "blue", scol = "blue", add = TRUE) } head(predict(fit.w, hunua, type = "response")) # Same as above? } \keyword{datasets} VGAM/man/Coef.vlm.Rd0000644000176200001440000000321713565414527013536 0ustar liggesusers\name{Coef.vlm} \alias{Coef.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract Model Coefficients for VLM Objects } \description{ Amongst other things, this function applies inverse link functions to the parameters of intercept-only VGLMs. } \usage{ Coef.vlm(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A fitted model. } \item{\dots}{ Arguments which may be passed into \code{\link[stats]{coef}}. } } \details{ Most \pkg{VGAM} family functions apply a link function to the parameters, e.g., positive parameter are often have a log link, parameters between 0 and 1 have a logit link. This function can back-transform the parameter estimate to the original scale. } \value{ For intercept-only models (e.g., formula is \code{y ~ 1}) the back-transformed parameter estimates can be returned. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } %\note{ ~~further notes~~ } \section{Warning }{ This function may not work for \emph{all} \pkg{VGAM} family functions. You should check your results on some artificial data before applying it to models fitted to real data. } \seealso{ \code{\link{Coef}}, \code{\link[stats]{coef}}. } \examples{ set.seed(123); nn <- 1000 bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3)) fit <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE) # intercept-only model coef(fit, matrix = TRUE) # log scale Coef(fit) # On the original scale } \keyword{models} \keyword{regression} VGAM/man/cumulative.Rd0000644000176200001440000003012513565414527014241 0ustar liggesusers\name{cumulative} \alias{cumulative} %\alias{scumulative} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ordinal Regression with Cumulative Probabilities } \description{ Fits a cumulative link regression model to a (preferably ordered) factor response. } \usage{ cumulative(link = "logitlink", parallel = FALSE, reverse = FALSE, multiple.responses = FALSE, whitespace = FALSE) } %apply.parint = FALSE, %scumulative(link = "logitlink", % lscale = "loglink", escale = list(), % parallel = FALSE, sparallel = TRUE, reverse = FALSE, iscale = 1) %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the \eqn{J} cumulative probabilities. See \code{\link{Links}} for more choices, e.g., for the cumulative \code{\link{probitlink}}/\code{\link{clogloglink}}/\code{\link{cauchitlink}}/\ldots models. } % \item{lscale}{ % Link function applied to the \eqn{J} scaling parameters. % See \code{\link{Links}} for more choices. % % } \item{parallel}{ A logical or formula specifying which terms have equal/unequal coefficients. See below for more information about the parallelism assumption. The default results in what some people call the \emph{generalized ordered logit model} to be fitted. If \code{parallel = TRUE} then it does not apply to the intercept. The \emph{partial proportional odds model} can be fitted by assigning this argument something like \code{parallel = TRUE ~ -1 + x3 + x5} so that there is one regression coefficient for \code{x3} and \code{x5}. Equivalently, setting \code{parallel = FALSE ~ 1 + x2 + x4} means \eqn{M} regression coefficients for the intercept and \code{x2} and \code{x4}. It is important that the intercept is never parallel. } % \item{sparallel}{ % For the scaling parameters. % A logical, or formula specifying which terms have % equal/unequal coefficients. % This argument is not applied to the intercept. % The \code{scumulative()} function requires covariates; for % intercept models use \code{cumulative()}. % } \item{reverse}{ Logical. By default, the cumulative probabilities used are \eqn{P(Y\leq 1)}{P(Y<=1)}, \eqn{P(Y\leq 2)}{P(Y<=2)}, \dots, \eqn{P(Y\leq J)}{P(Y<=J)}. If \code{reverse} is \code{TRUE} then \eqn{P(Y\geq 2)}{P(Y>=2)}, \eqn{P(Y\geq 3)}{P(Y>=3)}, \dots, \eqn{P(Y\geq J+1)}{P(Y>=J+1)} are used. This should be set to \code{TRUE} for \code{link=} \code{\link{gordlink}}, \code{\link{pordlink}}, \code{\link{nbordlink}}. For these links the cutpoints must be an increasing sequence; if \code{reverse = FALSE} for then the cutpoints must be an decreasing sequence. } \item{multiple.responses}{ Logical. Multiple responses? If \code{TRUE} then the input should be a matrix with values \eqn{1,2,\dots,L}, where \eqn{L=J+1} is the number of levels. Each column of the matrix is a response, i.e., multiple responses. A suitable matrix can be obtained from \code{Cut}. } % \item{apply.parint}{ % Logical. % Whether the \code{parallel} argument should be applied to the intercept term. % This should be set to \code{TRUE} for \code{link=} % \code{\link{gordlink}}, % \code{\link{pordlink}}, % \code{\link{nbordlink}}. % See \code{\link{CommonVGAMffArguments}} for more information. % % % } % \item{iscale}{ % Numeric. Initial values for the scale parameters. % } \item{whitespace}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ In this help file the response \eqn{Y} is assumed to be a factor with ordered values \eqn{1,2,\dots,J+1}. Hence \eqn{M} is the number of linear/additive predictors \eqn{\eta_j}{eta_j}; for \code{cumulative()} one has \eqn{M=J}. % and for \code{scumulative()} \eqn{M=2J}. This \pkg{VGAM} family function fits the class of \emph{cumulative link models} to (hopefully) an ordinal response. By default, the \emph{non-parallel} cumulative logit model is fitted, i.e., \deqn{\eta_j = logit(P[Y \leq j])}{% eta_j = logit(P[Y<=j])} where \eqn{j=1,2,\dots,M} and the \eqn{\eta_j}{eta_j} are not constrained to be parallel. This is also known as the \emph{non-proportional odds model}. If the logit link is replaced by a complementary log-log link (\code{\link{clogloglink}}) then this is known as the \emph{proportional-hazards model}. In almost all the literature, the constraint matrices associated with this family of models are known. For example, setting \code{parallel = TRUE} will make all constraint matrices (except for the intercept) equal to a vector of \eqn{M} 1's. If the constraint matrices are equal, unknown and to be estimated, then this can be achieved by fitting the model as a reduced-rank vector generalized linear model (RR-VGLM; see \code{\link{rrvglm}}). Currently, reduced-rank vector generalized additive models (RR-VGAMs) have not been implemented here. % The scaled version of \code{cumulative()}, called \code{scumulative()}, % has \eqn{J} positive scaling factors. % They are described in pages 154 and 177 of McCullagh and Nelder (1989); % see their equation (5.4) in particular, % which they call the \emph{generalized rational model}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. Agresti, A. (2010) \emph{Analysis of Ordinal Categorical Data}, 2nd ed. Hoboken, NJ, USA: Wiley. Dobson, A. J. and Barnett, A. (2008) \emph{An Introduction to Generalized Linear Models}, 3rd ed. Boca Raton, FL, USA: Chapman & Hall/CRC Press. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. Simonoff, J. S. (2003) \emph{Analyzing Categorical Data}, New York: Springer-Verlag. Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. Yee, T. W. and Wild, C. J. (1996) Vector generalized additive models. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{58}, 481--493. %Further information and examples on categorical data analysis %by the \pkg{VGAM} package can be found at %\url{http://www.stat.auckland.ac.nz/~yee/VGAM/doc/categorical.pdf}. } \author{ Thomas W. Yee } \note{ The response should be either a matrix of counts (with row sums that are all positive), or a factor. In both cases, the \code{y} slot returned by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the matrix of counts. The formula must contain an intercept term. Other \pkg{VGAM} family functions for an ordinal response include \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}. For a nominal (unordered) factor response, the multinomial logit model (\code{\link{multinomial}}) is more appropriate. With the logit link, setting \code{parallel = TRUE} will fit a proportional odds model. Note that the \code{TRUE} here does not apply to the intercept term. In practice, the validity of the proportional odds assumption needs to be checked, e.g., by a likelihood ratio test (LRT). If acceptable on the data, then numerical problems are less likely to occur during the fitting, and there are less parameters. Numerical problems occur when the linear/additive predictors cross, which results in probabilities outside of \eqn{(0,1)}; setting \code{parallel = TRUE} will help avoid this problem. Here is an example of the usage of the \code{parallel} argument. If there are covariates \code{x2}, \code{x3} and \code{x4}, then \code{parallel = TRUE ~ x2 + x3 -1} and \code{parallel = FALSE ~ x4} are equivalent. This would constrain the regression coefficients for \code{x2} and \code{x3} to be equal; those of the intercepts and \code{x4} would be different. If the data is inputted in \emph{long} format (not \emph{wide} format, as in \code{\link{pneumo}} below) and the self-starting initial values are not good enough then try using \code{mustart}, \code{coefstart} and/or \code{etatstart}. See the example below. To fit the proportional odds model one can use the \pkg{VGAM} family function \code{\link{propodds}}. Note that \code{propodds(reverse)} is equivalent to \code{cumulative(parallel = TRUE, reverse = reverse)} (which is equivalent to \code{cumulative(parallel = TRUE, reverse = reverse, link = "logitlink")}). It is for convenience only. A call to \code{cumulative()} is preferred since it reminds the user that a parallelism assumption is made, as well as being a lot more flexible. % In the future, this family function may be renamed to % ``\code{cups}'' (for \bold{cu}mulative \bold{p}robabilitie\bold{s}) % or ``\code{cute}'' (for \bold{cu}mulative probabili\bold{t}i\bold{e}s). % Please let me know if you strongly agree or disagree about this. } \section{Warning }{ No check is made to verify that the response is ordinal if the response is a matrix; see \code{\link[base:factor]{ordered}}. } \seealso{ \code{\link{propodds}}, \code{\link{R2latvar}}, \code{\link{ordsup}}, \code{\link{prplot}}, \code{\link{margeff}}, \code{\link{acat}}, \code{\link{cratio}}, \code{\link{sratio}}, \code{\link{multinomial}}, \code{\link{pneumo}}, \code{\link{Links}}, \code{\link{hdeff.vglm}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}, \code{\link{gordlink}}, \code{\link{pordlink}}, \code{\link{nbordlink}}, \code{\link{logistic1}}. } \examples{ # Fit the proportional odds model, p.179, in McCullagh and Nelder (1989) pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) depvar(fit) # Sample proportions (good technique) fit@y # Sample proportions (bad technique) weights(fit, type = "prior") # Number of observations coef(fit, matrix = TRUE) constraints(fit) # Constraint matrices apply(fitted(fit), 1, which.max) # Classification apply(predict(fit, newdata = pneumo, type = "response"), 1, which.max) # Classification R2latvar(fit) # Check that the model is linear in let ---------------------- fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), cumulative(reverse = TRUE), data = pneumo) \dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2) } # Check the proportional odds assumption with a LRT ---------- (fit3 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = FALSE, reverse = TRUE), data = pneumo)) pchisq(2 * (logLik(fit3) - logLik(fit)), df = length(coef(fit3)) - length(coef(fit)), lower.tail = FALSE) lrtest(fit3, fit) # More elegant # A factor() version of fit ---------------------------------- # This is in long format (cf. wide format above) Nobs <- round(depvar(fit) * c(weights(fit, type = "prior"))) sumNobs <- colSums(Nobs) # apply(Nobs, 2, sum) pneumo.long <- data.frame(symptoms = ordered(rep(rep(colnames(Nobs), nrow(Nobs)), times = c(t(Nobs))), levels = colnames(Nobs)), let = rep(rep(with(pneumo, let), each = ncol(Nobs)), times = c(t(Nobs)))) with(pneumo.long, table(let, symptoms)) # Should be same as pneumo (fit.long1 <- vglm(symptoms ~ let, data = pneumo.long, trace = TRUE, cumulative(parallel = TRUE, reverse = TRUE))) coef(fit.long1, matrix = TRUE) # Should be as coef(fit, matrix = TRUE) # Could try using mustart if fit.long1 failed to converge. mymustart <- matrix(sumNobs / sum(sumNobs), nrow(pneumo.long), ncol(Nobs), byrow = TRUE) fit.long2 <- vglm(symptoms ~ let, mustart = mymustart, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo.long, trace = TRUE) coef(fit.long2, matrix = TRUE) # Should be as coef(fit, matrix = TRUE) } \keyword{models} \keyword{regression} % pneumo$let <- log(pneumo$exposure.time) VGAM/man/gaitnbinom.mixUC.Rd0000644000176200001440000001460413565414527015242 0ustar liggesusers\name{Gaitnbinom.mix} \alias{Gaitnbinom.mix} \alias{dgaitnbinom.mix} \alias{pgaitnbinom.mix} \alias{qgaitnbinom.mix} \alias{rgaitnbinom.mix} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered, -Inflated and -Truncated Negative Binomial Distribution (GAIT--NB--NB--NB mixture) } \description{ Density, distribution function, quantile function and random generation for the generally-altered, -inflated and -truncated negative binomial (NB) distribution, based on mixtures of NB distributions having different support. This distribution is sometimes abbreviated as GAIT--NB--NB--NB. } \usage{ dgaitnbinom.mix(x, size.p, prob.p = NULL, munb.p = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, size.a = size.p, size.i = size.p, prob.a = prob.p, prob.i = prob.p, munb.a = munb.p, munb.i = munb.p, log.arg = FALSE) pgaitnbinom.mix(q, size.p, prob.p = NULL, munb.p = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, size.a = size.p, size.i = size.p, prob.a = prob.p, prob.i = prob.p, munb.a = munb.p, munb.i = munb.p) qgaitnbinom.mix(p, size.p, prob.p = NULL, munb.p = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, size.a = size.p, size.i = size.p, prob.a = prob.p, prob.i = prob.p, munb.a = munb.p, munb.i = munb.p) rgaitnbinom.mix(n, size.p, prob.p = NULL, munb.p = NULL, alter = NULL, inflate = NULL, truncate = NULL, max.support = Inf, pobs.a = 0, pstr.i = 0, size.a = size.p, size.i = size.p, prob.a = prob.p, prob.i = prob.p, munb.a = munb.p, munb.i = munb.p) } \arguments{ \item{x, p, q, n, log.arg}{ Same meaning as in \code{\link[stats]{NegBinomial}}. } \item{size.p, size.a, size.i}{ Same meaning as \code{size }in \code{\link[stats]{NegBinomial}}, i.e., for an ordinary NB distribution. The first is for the main \emph{p}arent (inner) distribution, and the outer distribution(s) (usually spikes) concern the \emph{a}ltered and/or \emph{i}nflated values. Short vectors are recycled. } \item{prob.p, prob.a, prob.i}{ Same meaning as \code{prob} in \code{\link[stats]{NegBinomial}}. } \item{munb.p, munb.a, munb.i}{ Same meaning as \code{mu} in \code{\link[stats]{NegBinomial}}. Only one of the \code{prob}-type and \code{munb}-type arguments should be specified. Short vectors are recycled. } \item{alter, inflate, truncate}{ See \code{\link{Gaitnbinom.mlm}}. The order of precedence is the same, viz. truncation first, then altering, and finally inflation. If \code{alter} and \code{pobs.a} are both of unit length then the default probability mass function (PMF) evaluated at \code{alter} is \code{pobs.a}. % , but both \code{alter} and \code{inflate} cannot be % specified together. % Must be sorted and have unique values only. } \item{pobs.a, pstr.i}{ See \code{\link{Gaitpois.mix}}. } \item{max.support}{ Same as \code{\link{Gaitnbinom.mlm}}. } } \details{ These functions are an alternative to \code{\link{Gaitnbinom.mlm}}, and are to \code{\link{Gaitnbinom.mlm}} what \code{\link{Gaitpois.mix}} are to \code{\link{Gaitpois.mlm}}. See \code{\link{Gaitpois.mix}} for parallel information. This distribution may be suitable for heaped data whose parent distribution is overdispersed with respect to the Poisson. In the notation of Yee and Ma (2019) these functions concern the GAIT-NB-NB-NB distribution. For information on the GAIT-NB-MLM-MLM distribution see \code{\link{Gaitnbinom.mlm}}. } %\section{Warning }{ % See \code{\link{rgaitnegbin}}. % The function can run slowly for certain combinations % of \code{pstr.i} and \code{inflate}, e.g., % \code{rgaitnegbin(1e5, 1, inflate = 0:9, pstr.i = (1:10)/100)}. % Failure to obtain random variates will result in some % \code{NA} values instead. % An infinite loop can occur for certain combinations % of \code{lambda} and \code{inflate}, e.g., % \code{rgaitnegbin(10, 1, trunc = 0:100)}. % No action is made to avoid this occurring. %} \value{ \code{dgaitnbinom.mix} gives the density (PMF), \code{pgaitnbinom.mix} gives the distribution function, \code{qgaitnbinom.mix} gives the quantile function, and \code{rgaitnbinom.mix} generates random deviates. } %\references{ %Yee, T. W. and Ma, C. C. (2019) %Generally-altered, -inflated and -truncated count regression, %with application to heaped and seeped data. %\emph{In preparation}. %, \bold{3}, 15--41. %} \author{ T. W. Yee. } \note{ It is intended that the minimal defaults of these functions are equivalent to \code{\link[stats]{NegBinomial}}, e.g., \code{dgaitnbinom.mix(x, size, munb.p = munb)} and \code{dnbinom(x, size, mu = munb)} should be identical, and ditto for the other 3 functions. } \seealso{ \code{\link{Gaitnbinom.mlm}} for the GAIT-NB-MLM-MLM distribution, \code{\link[stats]{NegBinomial}}. } \examples{ ivec <- c(5, 15, 10); avec <- ivec; size = 10; munb <- 10 max.support <- 20; pobs.a <- 0.35; xvec <- 0:max.support # GAT-NB-NB mixture (pmf.a <- dgaitnbinom.mix(xvec, size.p = size, munb.p = munb, max.support = max.support, pobs.a = pobs.a, alter = avec)) sum(pmf.a) # Should be 1 \dontrun{ ind4 <- match(xvec, avec, nomatch = 0) > 0 # xvec \%in\% avec plot(xvec[ ind4], pmf.a[ ind4], type = "h", col = "orange", lwd = 1.1, las = 1, xlim = range(xvec), main = "GAT-NB-NB", ylim = c(0, max(pmf.a)), xlab = "y", ylab = "Probability") # Spikes lines(xvec[!ind4], pmf.a[!ind4], type = "h", col = "blue") } # GIT-NB-NB mixture pstr.i <- 0.15 (pmf.i <- dgaitnbinom.mix(xvec, size.p = size, munb.p = munb, max.support = max.support, pstr.i = pstr.i, inflate = ivec)) sum(pmf.i) # Should be 1 \dontrun{ # Plot the components of pmf.i plot(xvec, (1 - pstr.i) * dnbinom(xvec, size, mu = munb), type = "h", col = "blue", las = 1, xlim = range(xvec), main = "GIT-NB-NB", # The inner distribution ylim = c(0, max(pmf.i)), xlab = "y", ylab = "Probability") spikes <- dnbinom(ivec, size, mu = munb) * pstr.i / sum( dnbinom(ivec, size, mu = munb)) start.pt <- dnbinom(ivec, size, mu = munb) * (1 - pstr.i) / pnbinom(max.support, size, mu = munb) segments(ivec, start.pt, # The outer distribution ivec, start.pt + spikes, col = "orange", lwd = 1.1) } } \keyword{distribution} % 20191026 VGAM/man/biamhcopUC.Rd0000644000176200001440000000306413565414527014077 0ustar liggesusers\name{Biamhcop} \alias{Biamhcop} \alias{dbiamhcop} \alias{pbiamhcop} \alias{rbiamhcop} \title{Ali-Mikhail-Haq Bivariate Distribution} \description{ Density, distribution function, and random generation for the (one parameter) bivariate Ali-Mikhail-Haq distribution. } \usage{ dbiamhcop(x1, x2, apar, log = FALSE) pbiamhcop(q1, q2, apar) rbiamhcop(n, apar) } \arguments{ \item{x1, x2, q1, q2}{vector of quantiles.} \item{n}{number of observations. Same as \code{\link[stats]{runif}} } \item{apar}{the association parameter.} \item{log}{ Logical. If \code{TRUE} then the logarithm is returned. } } \value{ \code{dbiamhcop} gives the density, \code{pbiamhcop} gives the distribution function, and \code{rbiamhcop} generates random deviates (a two-column matrix). } %\references{ % %} \author{ T. W. Yee and C. S. Chee} \details{ See \code{\link{biamhcop}}, the \pkg{VGAM} family functions for estimating the parameter by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } %\note{ %} \seealso{ \code{\link{biamhcop}}. } \examples{ x <- seq(0, 1, len = (N <- 101)); apar <- 0.7 ox <- expand.grid(x, x) zedd <- dbiamhcop(ox[, 1], ox[, 2], apar = apar) \dontrun{ contour(x, x, matrix(zedd, N, N), col = "blue") zedd <- pbiamhcop(ox[, 1], ox[, 2], apar = apar) contour(x, x, matrix(zedd, N, N), col = "blue") plot(r <- rbiamhcop(n = 1000, apar = apar), col = "blue") par(mfrow = c(1, 2)) hist(r[, 1]) # Should be uniform hist(r[, 2]) # Should be uniform } } \keyword{distribution} VGAM/man/concoef-methods.Rd0000644000176200001440000000232113565414527015135 0ustar liggesusers\name{concoef-methods} \docType{methods} %\alias{concoef,ANY-method} \alias{concoef-method} \alias{concoef,cao-method} \alias{concoef,Coef.cao-method} \alias{concoef,rrvglm-method} \alias{concoef,qrrvglm-method} \alias{concoef,Coef.rrvglm-method} \alias{concoef,Coef.qrrvglm-method} % %%\alias{ccoef-method} %%\alias{ccoef,cao-method} %%\alias{ccoef,Coef.cao-method} %%\alias{ccoef,rrvglm-method} %%\alias{ccoef,qrrvglm-method} %%\alias{ccoef,Coef.rrvglm-method} %%\alias{ccoef,Coef.qrrvglm-method} % % This does not work: %\alias{ccoef,cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm-method} % \title{ Constrained (Canonical) Coefficients } \description{ \code{concoef} is a generic function used to return the constrained (canonical) coefficients of a constrained ordination model. The function invokes particular methods which depend on the class of the first argument. } %\usage{ % \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm}(object, ...) %} \section{Methods}{ \describe{ \item{object}{ The object from which the constrained coefficients are extracted. } } } \keyword{methods} \keyword{classes} %\keyword{ ~~ other possible keyword(s)} \keyword{models} \keyword{regression} VGAM/man/posnormUC.Rd0000644000176200001440000000375313565414527014017 0ustar liggesusers\name{Posnorm} \alias{Posnorm} \alias{dposnorm} \alias{pposnorm} \alias{qposnorm} \alias{rposnorm} \title{The Positive-Normal Distribution} \description{ Density, distribution function, quantile function and random generation for the univariate positive-normal distribution. } \usage{ dposnorm(x, mean = 0, sd = 1, log = FALSE) pposnorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE) qposnorm(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE) rposnorm(n, mean = 0, sd = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1} then the length is taken to be the number required. } \item{mean, sd, log, lower.tail, log.p}{ see \code{\link[stats:Normal]{rnorm}}. } } \value{ \code{dposnorm} gives the density, \code{pposnorm} gives the distribution function, \code{qposnorm} gives the quantile function, and \code{rposnorm} generates random deviates. } \author{ T. W. Yee } \details{ See \code{\link{posnormal}}, the \pkg{VGAM} family function for estimating the parameters, for the formula of the probability density function and other details. } %\note{ %} \seealso{ \code{\link{posnormal}}. } \examples{ \dontrun{ m <- 0.8; x <- seq(-1, 4, len = 501) plot(x, dposnorm(x, m = m), type = "l", las = 1, ylim = 0:1, ylab = paste("posnorm(m = ", m, ", sd = 1)"), col = "blue", main = "Blue is density, orange is cumulative distribution function", sub = "Purple lines are the 10,20,...,90 percentiles") abline(h = 0, col = "grey") lines(x, pposnorm(x, m = m), col = "orange", type = "l") probs <- seq(0.1, 0.9, by = 0.1) Q <- qposnorm(probs, m = m) lines(Q, dposnorm(Q, m = m), col = "purple", lty = 3, type = "h") lines(Q, pposnorm(Q, m = m), col = "purple", lty = 3, type = "h") abline(h = probs, col = "purple", lty = 3) max(abs(pposnorm(Q, m = m) - probs)) # Should be 0 } } \keyword{distribution} % 20150207; bug involving ifelse() picked up for qposnorm(). VGAM/man/rrvglm.Rd0000644000176200001440000002376413565414527013407 0ustar liggesusers\name{rrvglm} \alias{rrvglm} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Fitting Reduced-Rank Vector Generalized Linear Models (RR-VGLMs) } \description{ A \emph{reduced-rank vector generalized linear model} (RR-VGLM) is fitted. RR-VGLMs are VGLMs but some of the constraint matrices are estimated. In this documentation, \eqn{M} is the number of linear predictors. } \usage{ rrvglm(formula, family = stop("argument 'family' needs to be assigned"), data = list(), weights = NULL, subset = NULL, na.action = na.fail, etastart = NULL, mustart = NULL, coefstart = NULL, control = rrvglm.control(...), offset = NULL, method = "rrvglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, contrasts = NULL, constraints = NULL, extra = NULL, qr.arg = FALSE, smart = TRUE, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{formula, family, weights}{ See \code{\link{vglm}}. } \item{data}{ an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{rrvglm} is called. } \item{subset, na.action}{ See \code{\link{vglm}}. } \item{etastart, mustart, coefstart}{ See \code{\link{vglm}}. } \item{control}{ a list of parameters for controlling the fitting process. See \code{\link{rrvglm.control}} for details. } \item{offset, model, contrasts}{ See \code{\link{vglm}}. } \item{method}{ the method to be used in fitting the model. The default (and presently only) method \code{rrvglm.fit} uses iteratively reweighted least squares (IRLS). } \item{x.arg, y.arg}{ logical values indicating whether the model matrix and response vector/matrix used in the fitting process should be assigned in the \code{x} and \code{y} slots. Note the model matrix is the LM model matrix; to get the VGLM model matrix type \code{model.matrix(vglmfit)} where \code{vglmfit} is a \code{vglm} object. } \item{constraints}{ See \code{\link{vglm}}. } \item{extra, smart, qr.arg}{ See \code{\link{vglm}}. } \item{\dots}{ further arguments passed into \code{\link{rrvglm.control}}. } } \details{ The central formula is given by \deqn{\eta = B_1^T x_1 + A \nu}{% eta = B_1^T x_1 + A nu} where \eqn{x_1}{x1} is a vector (usually just a 1 for an intercept), \eqn{x_2}{x2} is another vector of explanatory variables, and \eqn{\nu = C^T x_2}{nu = C^T x_2} is an \eqn{R}-vector of latent variables. Here, \eqn{\eta}{eta} is a vector of linear predictors, e.g., the \eqn{m}th element is \eqn{\eta_m = \log(E[Y_m])}{eta_m = log(E[Y_m])} for the \eqn{m}th Poisson response. The matrices \eqn{B_1}, \eqn{A} and \eqn{C} are estimated from the data, i.e., contain the regression coefficients. For ecologists, the central formula represents a \emph{constrained linear ordination} (CLO) since it is linear in the latent variables. It means that the response is a monotonically increasing or decreasing function of the latent variables. For identifiability it is common to enforce \emph{corner constraints} on \eqn{A}: by default, the top \eqn{R} by \eqn{R} submatrix is fixed to be the order-\eqn{R} identity matrix and the remainder of \eqn{A} is estimated. The underlying algorithm of RR-VGLMs is iteratively reweighted least squares (IRLS) with an optimizing algorithm applied within each IRLS iteration (e.g., alternating algorithm). In theory, any \pkg{VGAM} family function that works for \code{\link{vglm}} and \code{\link{vgam}} should work for \code{rrvglm} too. The function that actually does the work is \code{rrvglm.fit}; it is \code{vglm.fit} with some extra code. } \value{ An object of class \code{"rrvglm"}, which has the the same slots as a \code{"vglm"} object. The only difference is that the some of the constraint matrices are estimates rather than known. But \pkg{VGAM} stores the models the same internally. The slots of \code{"vglm"} objects are described in \code{\link{vglm-class}}. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. Anderson, J. A. (1984) Regression and ordered categorical variables. \emph{Journal of the Royal Statistical Society, Series B, Methodological}, \bold{46}, 1--30. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. % Documentation accompanying the \pkg{VGAM} package at % \url{http://www.stat.auckland.ac.nz/~yee} % contains further information and examples. } \author{ Thomas W. Yee } \note{ The arguments of \code{rrvglm} are in general the same as those of \code{\link{vglm}} but with some extras in \code{\link{rrvglm.control}}. The smart prediction (\code{\link{smartpred}}) library is packed with the \pkg{VGAM} library. In an example below, a rank-1 \emph{stereotype} model of Anderson (1984) is fitted to some car data. The reduced-rank regression is performed, adjusting for two covariates. Setting a trivial constraint matrix (\code{diag(M)}) for the latent variable variables in \eqn{x_2}{x2} avoids a warning message when it is overwritten by a (common) estimated constraint matrix. It shows that German cars tend to be more expensive than American cars, given a car of fixed weight and width. If \code{fit <- rrvglm(..., data = mydata)} then \code{summary(fit)} requires corner constraints and no missing values in \code{mydata}. Often the estimated variance-covariance matrix of the parameters is not positive-definite; if this occurs, try refitting the model with a different value for \code{Index.corner}. For \emph{constrained quadratic ordination} (CQO) see \code{\link{cqo}} for more details about QRR-VGLMs. With multiple binary responses, one must use \code{binomialff(multiple.responses = TRUE)} to indicate that the response is a matrix with one response per column. Otherwise, it is interpreted as a single binary response variable. } % zzz; arguments of \code{\link{vglm}} are definitive. They're copied here. \seealso{ \code{\link{rrvglm.control}}, \code{\link{lvplot.rrvglm}} (same as \code{\link{biplot.rrvglm}}), \code{\link{rrvglm-class}}, \code{\link{grc}}, \code{\link{cqo}}, \code{\link{vglmff-class}}, \code{\link{vglm}}, \code{\link{vglm-class}}, \code{\link{smartpred}}, \code{rrvglm.fit}. Special family functions include \code{\link{negbinomial}} \code{\link{zipoisson}} and \code{\link{zinegbinomial}}. (see Yee (2014) and \pkg{COZIGAM}). Methods functions include \code{\link{Coef.rrvglm}}, \code{\link{calibrate.rrvglm}}, \code{summary.rrvglm}, etc. Data include \code{\link{crashi}}. % \code{\link{qrrvglm.control}}, % \code{\link{vcovqrrvglm}}, } \examples{ \dontrun{ # Example 1: RR negative binomial with Var(Y) = mu + delta1 * mu^delta2 nn <- 1000 # Number of observations delta1 <- 3.0 # Specify this delta2 <- 1.5 # Specify this; should be greater than unity a21 <- 2 - delta2 mydata <- data.frame(x2 = runif(nn), x3 = runif(nn)) mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3)) mydata <- transform(mydata, y2 = rnbinom(nn, mu = mu, size = (1/delta1)*mu^a21)) plot(y2 ~ x2, data = mydata, pch = "+", col = 'blue', las = 1, main = paste("Var(Y) = mu + ", delta1, " * mu^", delta2, sep = "")) rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL), data = mydata, trace = TRUE) a21.hat <- (Coef(rrnb2)@A)["loglink(size)", 1] beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "loglink(mu)"] beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "loglink(size)"] (delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat)) (delta2.hat <- 2 - a21.hat) # exp(a21.hat * predict(rrnb2)[1,1] - predict(rrnb2)[1,2]) # delta1.hat summary(rrnb2) # Obtain a 95 percent confidence interval for delta2: se.a21.hat <- sqrt(vcov(rrnb2)["I(latvar.mat)", "I(latvar.mat)"]) ci.a21 <- a21.hat + c(-1, 1) * 1.96 * se.a21.hat (ci.delta2 <- 2 - rev(ci.a21)) # The 95 percent confidence interval Confint.rrnb(rrnb2) # Quick way to get it # Plot the abundances and fitted values against the latent variable plot(y2 ~ latvar(rrnb2), data = mydata, col = "blue", xlab = "Latent variable", las = 1) ooo <- order(latvar(rrnb2)) lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "orange") # Example 2: stereotype model (reduced-rank multinomial logit model) data(car.all) scar <- subset(car.all, is.element(Country, c("Germany", "USA", "Japan", "Korea"))) fcols <- c(13,14,18:20,22:26,29:31,33,34,36) # These are factors scar[, -fcols] <- scale(scar[, -fcols]) # Standardize all numerical vars ones <- matrix(1, 3, 1) clist <- list("(Intercept)" = diag(3), Width = ones, Weight = ones, Disp. = diag(3), Tank = diag(3), Price = diag(3), Frt.Leg.Room = diag(3)) set.seed(111) fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room, multinomial, data = scar, Rank = 2, trace = TRUE, constraints = clist, noRRR = ~ 1 + Width + Weight, Uncor = TRUE, Corner = FALSE, Bestof = 2) fit@misc$deviance # A history of the fits Coef(fit) biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2, ccol = "blue", scol = "orange", Ccol = "darkgreen", Clwd = 2, main = "1=Germany, 2=Japan, 3=Korea, 4=USA") } } \keyword{models} \keyword{regression} %index <- with(car.all, Country == "Germany" | Country == "USA" | % Country == "Japan" | Country == "Korea") %scar <- car.all[index, ] # standardized car data %scar <- subset(car.all, % is.element(Country, c("Germany", "USA", "Japan", "Korea")) | % is.na(Country)) VGAM/man/cauchitlink.Rd0000644000176200001440000001106413565414527014362 0ustar liggesusers\name{cauchitlink} \alias{cauchitlink} %\alias{cauchit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cauchit Link Function } \description{ Computes the cauchit (tangent) link transformation, including its inverse and the first two derivatives. } \usage{ cauchitlink(theta, bvalue = .Machine$double.eps, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ This link function is an alternative link function for parameters that lie in the unit interval. This type of link bears the same relation to the Cauchy distribution as the probit link bears to the Gaussian. One characteristic of this link function is that the tail is heavier relative to the other links (see examples below). Numerical values of \code{theta} close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. } \value{ For \code{deriv = 0}, the tangent of \code{theta}, i.e., \code{tan(pi * (theta-0.5))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{0.5 + atan(theta)/pi}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or 0. One way of overcoming this is to use \code{bvalue}. As mentioned above, in terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the Cauchy distribution (see \code{\link{cauchy1}}). } \seealso{ \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{loglink}}, \code{\link{cauchy}}, \code{\link{cauchy1}}, \code{\link[stats]{Cauchy}}. } \examples{ p <- seq(0.01, 0.99, by = 0.01) cauchitlink(p) max(abs(cauchitlink(cauchitlink(p), inverse = TRUE) - p)) # Should be 0 p <- c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by = 0.01)) cauchitlink(p) # Has no NAs \dontrun{ par(mfrow = c(2, 2), lwd = (mylwd <- 2)) y <- seq(-4, 4, length = 100) p <- seq(0.01, 0.99, by = 0.01) for (d in 0:1) { matplot(p, cbind(logitlink(p, deriv = d), probitlink(p, deriv = d)), type = "n", col = "purple", ylab = "transformation", las = 1, main = if (d == 0) "Some probability link functions" else "First derivative") lines(p, logitlink(p, deriv = d), col = "limegreen") lines(p, probitlink(p, deriv = d), col = "purple") lines(p, clogloglink(p, deriv = d), col = "chocolate") lines(p, cauchitlink(p, deriv = d), col = "tan") if (d == 0) { abline(v = 0.5, h = 0, lty = "dashed") legend(0, 4.5, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), lwd = mylwd, col = c("limegreen", "purple", "chocolate", "tan")) } else abline(v = 0.5, lty = "dashed") } for (d in 0) { matplot(y, cbind( logitlink(y, deriv = d, inverse = TRUE), probitlink(y, deriv = d, inverse = TRUE)), type = "n", col = "purple", xlab = "transformation", ylab = "p", main = if (d == 0) "Some inverse probability link functions" else "First derivative", las=1) lines(y, logitlink(y, deriv = d, inverse = TRUE), col = "limegreen") lines(y, probitlink(y, deriv = d, inverse = TRUE), col = "purple") lines(y, clogloglink(y, deriv = d, inverse = TRUE), col = "chocolate") lines(y, cauchitlink(y, deriv = d, inverse = TRUE), col = "tan") if (d == 0) { abline(h = 0.5, v = 0, lty = "dashed") legend(-4, 1, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), lwd = mylwd, col = c("limegreen", "purple", "chocolate", "tan")) } } par(lwd = 1) } } \keyword{math} \keyword{models} \keyword{regression} %plot(y, logitlink(y, inverse = TRUE), type = "l", col = "limegreen", % xlab = "transformation", ylab = "p", % lwd=2, las=1, main = "Some inverse probability link functions") %lines(y, probitlink(y, inverse = TRUE), col = "purple", lwd=2) %lines(y, clogloglink(y, inverse = TRUE), col = "chocolate", lwd=2) %abline(h=0.5, v = 0, lty = "dashed") VGAM/man/zipoisson.Rd0000644000176200001440000003105313565414527014121 0ustar liggesusers\name{zipoisson} \alias{zipoisson} \alias{zipoissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Inflated Poisson Distribution Family Function } \description{ Fits a zero-inflated or zero-deflated Poisson distribution by full maximum likelihood estimation. } \usage{ zipoisson(lpstr0 = "logitlink", llambda = "loglink", type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"), ipstr0 = NULL, ilambda = NULL, gpstr0 = NULL, imethod = 1, ishrinkage = 0.95, probs.y = 0.35, parallel = FALSE, zero = NULL) zipoissonff(llambda = "loglink", lonempstr0 = "logitlink", type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"), ilambda = NULL, ionempstr0 = NULL, gonempstr0 = NULL, imethod = 1, ishrinkage = 0.95, probs.y = 0.35, zero = "onempstr0") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lpstr0, llambda}{ Link function for the parameter \eqn{\phi}{phi} and the usual \eqn{\lambda}{lambda} parameter. See \code{\link{Links}} for more choices; see \code{\link{CommonVGAMffArguments}} for more information. For the zero-\emph{deflated} model see below. } \item{ipstr0, ilambda}{ Optional initial values for \eqn{\phi}{phi}, whose values must lie between 0 and 1. Optional initial values for \eqn{\lambda}{lambda}, whose values must be positive. The defaults are to compute an initial value internally for each. If a vector then recycling is used. } \item{lonempstr0, ionempstr0}{ Corresponding arguments for the other parameterization. See details below. } \item{type.fitted}{ Character. The type of fitted value to be returned. The first choice (the expected value) is the default. The estimated probability of an observed 0 is an alternative, else the estimated probability of a structural 0, or one minus the estimated probability of a structural 0. See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for more information. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method for \eqn{\lambda}{lambda}. If failure to converge occurs try another value and/or else specify a value for \code{ishrinkage} and/or else specify a value for \code{ipstr0}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{ishrinkage}{ How much shrinkage is used when initializing \eqn{\lambda}{lambda}. The value must be between 0 and 1 inclusive, and a value of 0 means the individual response values are used, and a value of 1 means the median or mean is used. This argument is used in conjunction with \code{imethod}. See \code{\link{CommonVGAMffArguments}} for more information. } \item{zero}{ Specifies which linear/additive predictors are to be modelled as intercept-only. If given, the value can be either 1 or 2, and the default is none of them. Setting \code{zero = 1} makes \eqn{\phi}{phi} a single parameter. See \code{\link{CommonVGAMffArguments}} for more information. } \item{gpstr0, gonempstr0, probs.y}{ Details at \code{\link{CommonVGAMffArguments}}. } \item{parallel}{ Details at \code{\link{CommonVGAMffArguments}}, but unlikely to be practically used actually. } } \details{ These models are a mixture of a Poisson distribution and the value 0; it has value 0 with probability \eqn{\phi}{phi} else is Poisson(\eqn{\lambda}{lambda}) distributed. Thus there are two sources for zero values, and \eqn{\phi}{phi} is the probability of a \emph{structural zero}. The model for \code{zipoisson()} can be written \deqn{P(Y = 0) = \phi + (1-\phi) \exp(-\lambda),}{% P(Y = 0) = phi + (1-phi) * exp(-lambda),} and for \eqn{y=1,2,\ldots}, \deqn{P(Y = y) = (1-\phi) \exp(-\lambda) \lambda^y / y!.}{% P(Y = y) = (1-phi) * exp(-lambda) * lambda^y / y!.} Here, the parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{(1-\phi) \lambda}{(1-phi)*lambda} and these are returned as the fitted values, by default. The variance of \eqn{Y} is \eqn{(1-\phi) \lambda (1 + \phi \lambda)}{ (1-phi)*lambda*(1 + phi lambda)}. By default, the two linear/additive predictors of \code{zipoisson()} are \eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi), log(lambda))^T}. The \pkg{VGAM} family function \code{zipoissonff()} has a few changes compared to \code{zipoisson()}. These are: (i) the order of the linear/additive predictors is switched so the Poisson mean comes first; (ii) \code{onempstr0} is now 1 minus the probability of a structural 0, i.e., the probability of the parent (Poisson) component, i.e., \code{onempstr0} is \code{1-pstr0}; (iii) argument \code{zero} has a new default so that the \code{onempstr0} is intercept-only by default. Now \code{zipoissonff()} is generally recommended over \code{zipoisson()} (and definitely recommended over \code{\link{yip88}}). Both functions implement Fisher scoring and can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Thas, O. and Rayner, J. C. W. (2005) Smooth tests for the zero-inflated Poisson distribution. \emph{Biometrics}, \bold{61}, 808--815. Data: Angers, J-F. and Biswas, A. (2003) A Bayesian analysis of zero-inflated generalized Poisson model. \emph{Computational Statistics & Data Analysis}, \bold{42}, 37--46. Cameron, A. C. and Trivedi, P. K. (1998) \emph{Regression Analysis of Count Data}. Cambridge University Press: Cambridge. Yee, T. W. (2014) Reduced-rank vector generalized linear models with two linear predictors. \emph{Computational Statistics and Data Analysis}, \bold{71}, 889--902. } \author{ T. W. Yee } \note{ % The \code{misc} slot has a component called % \code{pobs0} which is the estimate of \eqn{P(Y = 0)}. % Note that \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}. % The estimated probability of a structural 0 is returned in % the \code{misc} slot with component name \code{pstr0}. This family function can be used to estimate the 0-\emph{deflated} model, hence \code{pstr0} is not to be interpreted as a probability. One should set, e.g., \code{lpstr0 = "identitylink"}. Likewise, the functions in \code{\link{Zipois}} can handle the zero-deflated Poisson distribution too. Although the iterations might fall outside the parameter space, the \code{validparams} slot should keep them inside. A (somewhat) similar alternative for zero-deflation is to try the zero-altered Poisson model (see \code{\link{zapoisson}}). % Practically, it is restricted to intercept-models only % (see example below). % Also, one might need inputting good initial values % or using a simpler model to obtain initial values. % If there is a covariate then it is best to % constrain \code{pstr0} to be intercept-only, e.g., % by \code{zipoisson(lpstr0 = identitylink, zero = -1)}. The use of this \pkg{VGAM} family function with \code{\link{rrvglm}} can result in a so-called COZIGAM or COZIGLM. That is, a reduced-rank zero-inflated Poisson model (RR-ZIP) is a constrained zero-inflated generalized linear model. See \pkg{COZIGAM}. A RR-ZINB model can also be fitted easily; see \code{\link{zinegbinomial}}. Jargon-wise, a COZIGLM might be better described as a COZIVGLM-ZIP. } \section{Warning }{ Numerical problems can occur, e.g., when the probability of zero is actually less than, not more than, the nominal probability of zero. For example, in the Angers and Biswas (2003) data below, replacing 182 by 1 results in nonconvergence. Half-stepping is not uncommon. If failure to converge occurs, try using combinations of \code{imethod}, \code{ishrinkage}, \code{ipstr0}, and/or \code{zipoisson(zero = 1)} if there are explanatory variables. The default for \code{zipoissonff()} is to model the structural zero probability as an intercept-only. } \seealso{ \code{\link{gatpoisson.mlm}}, \code{\link{zapoisson}}, \code{\link{Zipois}}, \code{\link{yip88}}, \code{\link{rrvglm}}, \code{\link{zipebcom}}, \code{\link[stats:Poisson]{rpois}}, \code{\link{simulate.vlm}}, \code{\link{hdeff.vglm}}. } \examples{ # Example 1: simulated ZIP data zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, pstr01 = logitlink(-0.5 + 1*x2, inverse = TRUE), pstr02 = logitlink( 0.5 - 1*x2, inverse = TRUE), Ps01 = logitlink(-0.5 , inverse = TRUE), Ps02 = logitlink( 0.5 , inverse = TRUE), lambda1 = loglink(-0.5 + 2*x2, inverse = TRUE), lambda2 = loglink( 0.5 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01), y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02)) with(zdata, table(y1)) # Eyeball the data with(zdata, table(y2)) fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef") fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef") coef(fit1, matrix = TRUE) # These should agree with the above values coef(fit2, matrix = TRUE) # These should agree with the above values # Fit all two simultaneously, using a different parameterization: fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, data = zdata, crit = "coef") coef(fit12, matrix = TRUE) # These should agree with the above values # For the first observation compute the probability that y1 is # due to a structural zero. (fitted(fit1, type = "pstr0") / fitted(fit1, type = "pobs0"))[1] # Example 2: McKendrick (1926). Data from 223 Indian village households cholera <- data.frame(ncases = 0:4, # Number of cholera cases, wfreq = c(168, 32, 16, 6, 1)) # Frequencies fit <- vglm(ncases ~ 1, zipoisson, wei = wfreq, cholera, trace = TRUE) coef(fit, matrix = TRUE) with(cholera, cbind(actual = wfreq, fitted = round(dzipois(ncases, lambda = Coef(fit)[2], pstr0 = Coef(fit)[1]) * sum(wfreq), digits = 2))) # Example 3: data from Angers and Biswas (2003) abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1)) abdata <- subset(abdata, w > 0) fit <- vglm(y ~ 1, zipoisson(lpstr0 = probitlink, ipstr0 = 0.8), data = abdata, weight = w, trace = TRUE) fitted(fit, type = "pobs0") # Estimate of P(Y = 0) coef(fit, matrix = TRUE) Coef(fit) # Estimate of pstr0 and lambda fitted(fit) with(abdata, weighted.mean(y, w)) # Compare this with fitted(fit) summary(fit) # Example 4: zero-deflated model for intercept-only data zdata <- transform(zdata, lambda3 = loglink(0.0, inverse = TRUE)) zdata <- transform(zdata, deflat.limit = -1 / expm1(lambda3)) # Boundary # The 'pstr0' parameter is negative and in parameter space: zdata <- transform(zdata, usepstr0 = deflat.limit / 2) # Not too near the boundary zdata <- transform(zdata, y3 = rzipois(nn, lambda3, pstr0 = usepstr0)) head(zdata) with(zdata, table(y3)) # A lot of deflation fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = "identitylink"), data = zdata, trace = TRUE, crit = "coef") coef(fit3, matrix = TRUE) # Check how accurate it was: zdata[1, "usepstr0"] # Answer coef(fit3)[1] # Estimate Coef(fit3) vcov(fit3) # Is positive-definite # Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP set.seed(123) rrzip <- rrvglm(Alopacce ~ sm.bs(WaterCon, df = 3), zipoisson(zero = NULL), data = hspider, trace = TRUE, Index.corner = 2) coef(rrzip, matrix = TRUE) Coef(rrzip) summary(rrzip) \dontrun{plotvgam(rrzip, lcol = "blue")} } \keyword{models} \keyword{regression} %# head(zdata, 1); pfit1 <- predict(fit1, zdata[1, ]); %# lambda <- loglink(pfit1[2], inverse = TRUE) %# lambda <- (fitted(fit1, type = "mean") / fitted(fit1, type = "onempstr0"))[1] %# (prob.struc.0 <- pstr0 / dzipois(x = 0, lambda = lambda, pstr0 = pstr0)) % fit@misc$pobs0 # Estimate of P(Y = 0) %zipoisson(lpstr0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"), % ipstr0 = NULL, ilambda = NULL, % imethod = 1, ishrinkage = 0.8, zero = NULL) %zipoissonff(llambda = "loglink", lonempstr0 = "logitlink", % type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"), % ilambda = NULL, ionempstr0 = NULL, % imethod = 1, ishrinkage = 0.8, zero = "onempstr0") VGAM/man/laplace.Rd0000644000176200001440000000656613565414527013500 0ustar liggesusers\name{laplace} \alias{laplace} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Laplace Regression Family Function } \description{ Maximum likelihood estimation of the 2-parameter classical Laplace distribution. } \usage{ laplace(llocation = "identitylink", lscale = "loglink", ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Character. Parameter link functions for location parameter \eqn{a} and scale parameter \eqn{b}. See \code{\link{Links}} for more choices. } \item{ilocation, iscale}{ Optional initial values. If given, it must be numeric and values are recycled to the appropriate length. The default is to choose the value internally. } \item{imethod}{ Initialization method. Either the value 1 or 2. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The Laplace distribution is often known as the \emph{double-exponential} distribution and, for modelling, has heavier tail than the normal distribution. The Laplace density function is \deqn{f(y) = \frac{1}{2b} \exp \left( - \frac{|y-a|}{b} \right) }{% f(y) = (1/(2b)) exp( -|y-a|/b ) } where \eqn{-\infty0}. Its mean is \eqn{a} and its variance is \eqn{2b^2}. This parameterization is called the \emph{classical Laplace distribution} by Kotz et al. (2001), and the density is symmetric about \eqn{a}. For \code{y ~ 1} (where \code{y} is the response) the maximum likelihood estimate (MLE) for the location parameter is the sample median, and the MLE for \eqn{b} is \code{mean(abs(y-location))} (replace location by its MLE if unknown). } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001) \emph{The Laplace distribution and generalizations: a revisit with applications to communications, economics, engineering, and finance}, Boston: Birkhauser. } \author{ T. W. Yee } \section{Warning}{ This family function has not been fully tested. The MLE regularity conditions do \emph{not} hold for this distribution, therefore misleading inferences may result, e.g., in the \code{summary} and \code{vcov} of the object. Hence this family function might be withdrawn from \pkg{VGAM} in the future. } \note{ This family function uses Fisher scoring. Convergence may be slow for non-intercept-only models; half-stepping is frequently required. } \seealso{ \code{\link{rlaplace}}, \code{\link{alaplace2}} (which differs slightly from this parameterization), \code{\link{exponential}}, \code{\link[stats]{median}}. } \examples{ ldata <- data.frame(y = rlaplace(nn <- 100, loc = 2, scale = exp(1))) fit <- vglm(y ~ 1, laplace, data = ldata, trace = TRUE, crit = "l") coef(fit, matrix = TRUE) Coef(fit) with(ldata, median(y)) ldata <- data.frame(x = runif(nn <- 1001)) ldata <- transform(ldata, y = rlaplace(nn, loc = 2, scale = exp(-1 + 1*x))) coef(vglm(y ~ x, laplace(iloc = 0.2, imethod = 2, zero = 1), data = ldata, trace = TRUE), matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/drop1.Rd0000644000176200001440000001315513565414527013114 0ustar liggesusers% File src/library/stats/man/add1.Rd % Part of the R package, https://www.R-project.org % Copyright 1995-2013 R Core Team % Distributed under GPL 2 or later \name{add1.vglm} %\alias{add1} %\alias{add1.default} %\alias{add1.lm} \alias{add1.vglm} %\alias{drop1} %\alias{drop1.default} %\alias{drop1.lm} %\alias{drop1.glm} \alias{drop1.vglm} \title{Add or Drop All Possible Single Terms to/from a Model} %\title{Drop All Possible Single Terms from a Model} \usage{ \method{add1}{vglm}(object, scope, test = c("none", "LRT"), k = 2, \dots) \method{drop1}{vglm}(object, scope, test = c("none", "LRT"), k = 2, \dots) } % scale = 0, \arguments{ \item{object}{a fitted \code{\link{vglm}} model object.} \item{scope, k}{See \code{\link[stats]{drop1.glm}}.} % \item{scale}{ignored.} \item{test}{Same as \code{\link[stats]{drop1.glm}} but with fewer choices. } % \item{k}{Same as \code{\link{drop1.glm}}.} % \item{trace}{if \code{TRUE}, print out progress reports.} \item{\dots}{further arguments passed to or from other methods.} } \description{ Compute all the single terms in the \code{scope} argument that can be added to or dropped from the model, fit those models and compute a table of the changes in fit. } \details{ These functions are a direct adaptation of \code{\link[stats]{add1.glm}} and \code{\link[stats]{drop1.glm}} for \code{\link{vglm-class}} objects. For \code{drop1} methods, a missing \code{scope} is taken to be all terms in the model. The hierarchy is respected when considering terms to be added or dropped: all main effects contained in a second-order interaction must remain, and so on. In a \code{scope} formula \code{.} means \sQuote{what is already there}. Compared to \code{\link[stats]{add1.glm}} and \code{\link[stats]{drop1.glm}} these functions are simpler, e.g., there is no \emph{Cp}, F and Rao (score) tests, \code{x} and \code{scale} arguments. Most models do not have a deviance, however twice the log-likelihood differences are used to test the significance of terms. % The methods for \code{\link{lm}} and \code{\link{glm}} are more % efficient in that they do not recompute the model matrix and call the % \code{fit} methods directly. The default output table gives AIC, defined as minus twice log likelihood plus \eqn{2p} where \eqn{p} is the rank of the model (the number of effective parameters). This is only defined up to an additive constant (like log-likelihoods). % For linear Gaussian models % with fixed scale, the constant is chosen to give Mallows' \eqn{C_p}{Cp}, % \eqn{RSS/scale + 2p - n}. Where \eqn{C_p}{Cp} is used, % the column is labelled as \code{Cp} rather than \code{AIC}. % The F tests for the \code{"glm"} methods are based on analysis of % deviance tests, so if the dispersion is estimated it is based on the % residual deviance, unlike the F tests of \code{\link{anova.glm}}. } \value{ An object of class \code{"anova"} summarizing the differences in fit between the models. } %\author{ % The design was inspired by the S functions of the same names described % in Chambers (1992). %} %\references{ % Chambers, J. M. (1992) % \emph{Linear models.} % Chapter 4 of \emph{Statistical Models in S} % eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole. %} \note{ Most \pkg{VGAM} family functions do not compute a deviance, but instead the likelihood function is evaluated at the MLE. Hence a column name \code{"Deviance"} only appears for a few models; and almost always there is a column labelled \code{"logLik"}. % These are not fully equivalent to the functions in S. There is no % \code{keep} argument, and the methods used are not quite so % computationally efficient. % Their authors' definitions of Mallows' \eqn{C_p}{Cp} and Akaike's AIC % are used, not those of the authors of the models chapter of S. } \section{Warning}{ In general, the same warnings in \code{\link[stats]{add1.glm}} and \code{\link[stats]{drop1.glm}} apply here. Furthermore, these functions have not been rigorously tested for all models, so treat the results cautiously and please report any bugs. Care is needed to check that the constraint matrices of added terms are correct. Also, if \code{object} is of the form \code{vglm(..., constraints = list(x1 = cm1, x2 = cm2))} then \code{\link{add1.vglm}} may fail because the \code{constraints} argument needs to have the constaint matrices for \emph{all} terms. % The model fitting must apply the models to the same dataset. Most % methods will attempt to use a subset of the data with no missing % values for any of the variables if \code{na.action = na.omit}, but % this may give biased results. Only use these functions with data % containing missing values with great care. % The default methods make calls to the function \code{\link{nobs}} to % check that the number of observations involved in the fitting process % remained unchanged. } \seealso{ \code{\link{step4vglm}}, \code{\link{vglm}}, \code{\link{extractAIC.vglm}}, \code{\link{anova.vglm}}, \code{\link{backPain2}}, \code{\link[stats]{update}}. % \code{\link{step4vglm}}. % \code{\link{step4}}, \code{\link{aov}}, \code{\link{lm}}, % \code{\link{extractAIC}}, \code{\link{anova}} } \examples{ data("backPain2", package = "VGAM") summary(backPain2) fit1 <- vglm(pain ~ x2 + x3 + x4, propodds, data = backPain2) coef(fit1) add1(fit1, scope = ~ x2 * x3 * x4, test = "LRT") drop1(fit1, test = "LRT") fit2 <- vglm(pain ~ x2 * x3 * x4, propodds, data = backPain2) drop1(fit2) } \keyword{models} %\dontshow{od <- options(digits = 5)} %\dontshow{options(od)} VGAM/man/cauchy.Rd0000644000176200001440000001122313565414527013335 0ustar liggesusers\name{cauchy} \alias{cauchy} \alias{cauchy1} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cauchy Distribution Family Function } \description{ Estimates either the location parameter or both the location and scale parameters of the Cauchy distribution by maximum likelihood estimation. } \usage{ cauchy(llocation = "identitylink", lscale = "loglink", imethod = 1, ilocation = NULL, iscale = NULL, gprobs.y = ppoints(19), gscale.mux = exp(-3:3), zero = "scale") cauchy1(scale.arg = 1, llocation = "identitylink", ilocation = NULL, imethod = 1, gprobs.y = ppoints(19), zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale}{ Parameter link functions for the location parameter \eqn{a}{a} and the scale parameter \eqn{b}{b}. See \code{\link{Links}} for more choices. } \item{ilocation, iscale}{ Optional initial value for \eqn{a}{a} and \eqn{b}{b}. By default, an initial value is chosen internally for each. } \item{imethod}{ Integer, either 1 or 2 or 3. Initial method, three algorithms are implemented. The user should try all possible values to help avoid converging to a local solution. Also, choose the another value if convergence fails, or use \code{ilocation} and/or \code{iscale}. } \item{gprobs.y, gscale.mux, zero}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{scale.arg}{ Known (positive) scale parameter, called \eqn{b}{b} below. } } \details{ The Cauchy distribution has density function \deqn{f(y;a,b) = \left\{ \pi b [1 + ((y-a)/b)^2] \right\}^{-1} }{% f(y;a,b) = 1 / [pi * b * [1 + ((y-a)/b)^2]] } where \eqn{y} and \eqn{a} are real and finite, and \eqn{b>0}{b>0}. The distribution is symmetric about \eqn{a} and has a heavy tail. Its median and mode are \eqn{a}, but the mean does not exist. The fitted values are the estimates of \eqn{a}. Fisher scoring is used. % Fisher scoring is the default but if \code{nsimEIM} is specified then % Fisher scoring with simulation is used. If the scale parameter is known (\code{cauchy1}) then there may be multiple local maximum likelihood solutions for the location parameter. However, if both location and scale parameters are to be estimated (\code{cauchy}) then there is a unique maximum likelihood solution provided \eqn{n > 2} and less than half the data are located at any one point. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \section{Warning }{ It is well-known that the Cauchy distribution may have local maximums in its likelihood function; make full use of \code{imethod}, \code{ilocation}, \code{iscale} etc. } \references{ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. Barnett, V. D. (1966) Evaluation of the maximum-likehood estimator where the likelihood equation has multiple roots. \emph{Biometrika}, \bold{53}, 151--165. Copas, J. B. (1975) On the unimodality of the likelihood for the Cauchy distribution. \emph{Biometrika}, \bold{62}, 701--704. Efron, B. and Hinkley, D. V. (1978) Assessing the accuracy of the maximum likelihood estimator: Observed versus expected Fisher information. \emph{Biometrika}, \bold{65}, 457--481. } \author{ T. W. Yee } \note{ Good initial values are needed. By default \code{cauchy} searches for a starting value for \eqn{a}{a} and \eqn{b}{b} on a 2-D grid. Likewise, by default, \code{cauchy1} searches for a starting value for \eqn{a}{a} on a 1-D grid. If convergence to the global maximum is not acheieved then it also pays to select a wide range of initial values via the \code{ilocation} and/or \code{iscale} and/or \code{imethod} arguments. } \seealso{ \code{\link[stats:Cauchy]{Cauchy}}, \code{\link{cauchit}}, \code{\link{studentt}}, \code{\link{simulate.vlm}}. } \examples{ # Both location and scale parameters unknown set.seed(123) cdata <- data.frame(x2 = runif(nn <- 1000)) cdata <- transform(cdata, loc = exp(1 + 0.5 * x2), scale = exp(1)) cdata <- transform(cdata, y2 = rcauchy(nn, loc, scale)) fit2 <- vglm(y2 ~ x2, cauchy(lloc = "loglink"), data = cdata, trace = TRUE) coef(fit2, matrix = TRUE) head(fitted(fit2)) # Location estimates summary(fit2) # Location parameter unknown cdata <- transform(cdata, scale1 = 0.4) cdata <- transform(cdata, y1 = rcauchy(nn, loc, scale1)) fit1 <- vglm(y1 ~ x2, cauchy1(scale = 0.4), data = cdata, trace = TRUE) coef(fit1, matrix = TRUE) } \keyword{models} \keyword{regression} VGAM/man/lvplot.qrrvglm.Rd0000644000176200001440000003311113565414527015072 0ustar liggesusers\name{lvplot.qrrvglm} \alias{lvplot.qrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Latent Variable Plot for QO models } \description{ Produces an ordination diagram (latent variable plot) for quadratic ordination (QO) models. For rank-1 models, the x-axis is the first ordination/constrained/canonical axis. For rank-2 models, the x- and y-axis are the first and second ordination axes respectively. } \usage{ lvplot.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL, add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"), xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""), ylab = if (Rank == 1) switch(type, predictors = "Predictors", fitted.values = "Fitted values") else "Latent Variable 2", pcex = par()$cex, pcol = par()$col, pch = par()$pch, llty = par()$lty, lcol = par()$col, llwd = par()$lwd, label.arg = FALSE, adj.arg = -0.1, ellipse = 0.95, Absolute = FALSE, elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200, chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd, cpch = " ", C = FALSE, OriginC = c("origin", "mean"), Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd, Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1, sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex, sfont = par()$font, check.ok = TRUE, jitter.y = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A CQO object. % A CQO or UQO object. } \item{varI.latvar}{ Logical that is fed into \code{\link{Coef.qrrvglm}}. } \item{refResponse}{ Integer or character that is fed into \code{\link{Coef.qrrvglm}}. } \item{add}{ Logical. Add to an existing plot? If \code{FALSE}, a new plot is made. } \item{show.plot}{ Logical. Plot it? } \item{rug}{ Logical. If \code{TRUE}, a rug plot is plotted at the foot of the plot (applies to rank-1 models only). These values are jittered to expose ties. } \item{y}{ Logical. If \code{TRUE}, the responses will be plotted (applies only to rank-1 models and if \code{type = "fitted.values"}.) } \item{type}{ Either \code{"fitted.values"} or \code{"predictors"}, specifies whether the y-axis is on the response or eta-scales respectively. } \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. } \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. } \item{pcex}{ Character expansion of the points. Here, for rank-1 models, points are the response \emph{y} data. For rank-2 models, points are the optimums. See the \code{cex} argument in \code{\link[graphics]{par}}. } \item{pcol}{ Color of the points. See the \code{col} argument in \code{\link[graphics]{par}}. } \item{pch}{ Either an integer specifying a symbol or a single character to be used as the default in plotting points. See \code{\link[graphics]{par}}. The \code{pch} argument can be of length \eqn{M}, the number of species. } \item{llty}{ Line type. Rank-1 models only. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{lcol}{ Line color. Rank-1 models only. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{llwd}{ Line width. Rank-1 models only. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{label.arg}{ Logical. Label the optimums and \bold{C}? (applies only to rank-2 models only). } \item{adj.arg}{ Justification of text strings for labelling the optimums (applies only to rank-2 models only). See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{ellipse}{ Numerical, of length 0 or 1 (applies only to rank-2 models only). If \code{Absolute} is \code{TRUE} then \code{ellipse} should be assigned a value that is used for the elliptical contouring. If \code{Absolute} is \code{FALSE} then \code{ellipse} should be assigned a value between 0 and 1, for example, setting \code{ellipse = 0.9} means an ellipse with contour = 90\% of the maximum will be plotted about each optimum. If \code{ellipse} is a negative value, then the function checks that the model is an equal-tolerances model and \code{varI.latvar = FALSE}, and if so, plots circles with radius \code{-ellipse}. For example, setting \code{ellipse = -1} will result in circular contours that have unit radius (in latent variable units). If \code{ellipse} is \code{NULL} or \code{FALSE} then no ellipse is drawn around the optimums. } \item{Absolute}{ Logical. If \code{TRUE}, the contours corresponding to \code{ellipse} are on an absolute scale. If \code{FALSE}, the contours corresponding to \code{ellipse} are on a relative scale. } \item{elty}{ Line type of the ellipses. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{ecol}{ Line color of the ellipses. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{elwd}{ Line width of the ellipses. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{egrid}{ Numerical. Line resolution of the ellipses. Choosing a larger value will result in smoother ellipses. Useful when ellipses are large. } \item{chull.arg}{ Logical. Add a convex hull around the site scores? } \item{clty}{ Line type of the convex hull. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{ccol}{ Line color of the convex hull. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{clwd}{ Line width of the convex hull. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{cpch}{ Character to be plotted at the intersection points of the convex hull. Having white spaces means that site labels are not obscured there. See the \code{pch} argument of \code{\link[graphics]{par}}. } \item{C}{ Logical. Add \bold{C} (represented by arrows emanating from \code{OriginC}) to the plot? } \item{OriginC}{ Character or numeric. Where the arrows representing \bold{C} emanate from. If character, it must be one of the choices given. By default the first is chosen. The value \code{"origin"} means \code{c(0,0)}. The value \code{"mean"} means the sample mean of the latent variables (centroid). Alternatively, the user may specify a numerical vector of length 2. } \item{Clty}{ Line type of the arrows representing \bold{C}. See the \code{lty} argument of \code{\link[graphics]{par}}. } \item{Ccol}{ Line color of the arrows representing \bold{C}. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{Clwd}{ Line width of the arrows representing \bold{C}. See the \code{lwd} argument of \code{\link[graphics]{par}}. } \item{Ccex}{ Numeric. Character expansion of the labelling of \bold{C}. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{Cadj.arg}{ Justification of text strings when labelling \bold{C}. See the \code{adj} argument of \code{\link[graphics]{par}}. } \item{stretchC}{ Numerical. Stretching factor for \bold{C}. Instead of using \bold{C}, \code{stretchC * } \bold{C} is used. } \item{sites}{ Logical. Add the site scores (aka latent variable values, nu's) to the plot? (applies only to rank-2 models only). } \item{spch}{ Plotting character of the site scores. The default value of \code{NULL} means the row labels of the data frame are used. They often are the site numbers. See the \code{pch} argument of \code{\link[graphics]{par}}. } \item{scol}{ Color of the site scores. See the \code{col} argument of \code{\link[graphics]{par}}. } \item{scex}{ Character expansion of the site scores. See the \code{cex} argument of \code{\link[graphics]{par}}. } \item{sfont}{ Font used for the site scores. See the \code{font} argument of \code{\link[graphics]{par}}. } % \item{Rotate}{ % Numeric or logical. % A value from the set \{1,2,\ldots,\eqn{M}\} indicating % which species (quadratic predictor) is to be chosen so that % its major and semi-minor axes are parallel to the latent variable % axes, i.e., that species' Tolerance matrix will be diagonal. % If \code{Rotate} is \code{TRUE}, the first species is selected for rotation. % By default a rotation is performed only if the tolerance matrices are equal, % and \code{Rotation} only applies when the rank is greater than one. % See \code{\link{Coef.qrrvglm}} for details. % } % \item{I.tolerances}{ % Logical. % If \code{TRUE}, the tolerances matrices are transformed so that they are % the order-\code{Rank} identity matrix. This means that a rank-2 % latent variable plot % can be interpreted naturally in terms of distances and directions. % See \code{\link{Coef.qrrvglm}} for details. % } \item{check.ok}{ Logical. Whether a check is performed to see that \code{noRRR = ~ 1} was used. It doesn't make sense to have a latent variable plot unless this is so. } \item{jitter.y}{ Logical. If \code{y} is plotted, jitter it first? This may be useful for counts and proportions. } \item{\dots}{ Arguments passed into the \code{plot} function when setting up the entire plot. Useful arguments here include \code{xlim} and \code{ylim}. } } \details{ This function only works for rank-1 and rank-2 QRR-VGLMs with argument \code{noRRR = ~ 1}. For unequal-tolerances models, the latent variable axes can be rotated so that at least one of the tolerance matrices is diagonal; see \code{\link{Coef.qrrvglm}} for details. Arguments beginning with ``\code{p}'' correspond to the points e.g., \code{pcex} and \code{pcol} correspond to the size and color of the points. Such ``\code{p}'' arguments should be vectors of length 1, or \eqn{n}, the number of sites. For the rank-2 model, arguments beginning with ``\code{p}'' correspond to the optimums. } \value{ Returns a matrix of latent variables (site scores) regardless of whether a plot was produced or not. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. } \author{ Thomas W. Yee } \note{ A species which does not have an optimum will not have an ellipse drawn even if requested, i.e., if its tolerance matrix is not positive-definite. % Stationary points which are not bell-shaped will not be plotted % at all. Plotting \bold{C} gives a visual display of the weights (loadings) of each of the variables used in the linear combination defining each latent variable. The arguments \code{elty}, \code{ecol} and \code{elwd}, may be replaced in the future by \code{llty}, \code{lcol} and \code{llwd}, respectively. For rank-1 models, a similar function to this one is \code{\link{perspqrrvglm}}. It plots the fitted values on a more fine grid rather than at the actual site scores here. The result is a collection of smooth bell-shaped curves. However, it has the weakness that the plot is more divorced from the data; the user thinks it is the truth without an appreciation of the statistical variability in the estimates. % Yet to do: allow for the contour line to correspond to the tolerance % matrix itself. zz ?? In the example below, the data comes from an equal-tolerances model. The species' tolerance matrices are all the identity matrix, and the optimums are at (0,0), (1,1) and (-2,0) for species 1, 2, 3 respectively. } \section{Warning}{ Interpretation of a latent variable plot (CQO diagram) is potentially very misleading in terms of distances if (i) the tolerance matrices of the species are unequal and (ii) the contours of these tolerance matrices are not included in the ordination diagram. } \seealso{ \code{\link{lvplot}}, \code{\link{perspqrrvglm}}, \code{\link{Coef.qrrvglm}}, \code{\link[graphics]{par}}, \code{\link{cqo}}. } \examples{ set.seed(123); nn <- 200 cdata <- data.frame(x2 = rnorm(nn), # Has mean 0 (needed when I.tol=TRUE) x3 = rnorm(nn), # Has mean 0 (needed when I.tol=TRUE) x4 = rnorm(nn)) # Has mean 0 (needed when I.tol=TRUE) cdata <- transform(cdata, latvar1 = x2 + x3 - 2*x4, latvar2 = -x2 + x3 + 0*x4) # Nb. latvar2 is weakly correlated with latvar1 cdata <- transform(cdata, lambda1 = exp(6 - 0.5 * (latvar1-0)^2 - 0.5 * (latvar2-0)^2), lambda2 = exp(5 - 0.5 * (latvar1-1)^2 - 0.5 * (latvar2-1)^2), lambda3 = exp(5 - 0.5 * (latvar1+2)^2 - 0.5 * (latvar2-0)^2)) cdata <- transform(cdata, spp1 = rpois(nn, lambda1), spp2 = rpois(nn, lambda2), spp3 = rpois(nn, lambda3)) set.seed(111) \dontrun{ p2 <- cqo(cbind(spp1, spp2, spp3) ~ x2 + x3 + x4, poissonff, data = cdata, Rank = 2, I.tolerances = TRUE, Crow1positive = c(TRUE, FALSE)) # deviance = 505.81 if (deviance(p2) > 506) stop("suboptimal fit obtained") sort(deviance(p2, history = TRUE)) # A history of all the iterations Coef(p2) } \dontrun{ lvplot(p2, sites = TRUE, spch = "*", scol = "darkgreen", scex = 1.5, chull = TRUE, label = TRUE, Absolute = TRUE, ellipse = 140, adj = -0.5, pcol = "blue", pcex = 1.3, las = 1, Ccol = "orange", C = TRUE, Cadj = c(-0.3, -0.3, 1), Clwd = 2, Ccex = 1.4, main = paste("Contours at Abundance = 140 with", "convex hull of the site scores")) } \dontrun{ var(latvar(p2)) # A diagonal matrix, i.e., uncorrelated latent vars var(latvar(p2, varI.latvar = TRUE)) # Identity matrix Tol(p2)[, , 1:2] # Identity matrix Tol(p2, varI.latvar = TRUE)[, , 1:2] # A diagonal matrix } } \keyword{models} \keyword{regression} \keyword{graphs} VGAM/man/calibrate.rrvglm.control.Rd0000644000176200001440000000266413565414527017007 0ustar liggesusers\name{calibrate.rrvglm.control} \alias{calibrate.rrvglm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Control Function for CLO (RR-VGLM) Calibration } \description{ Algorithmic constants and parameters for running \code{\link{calibrate.rrvglm}} are set using this function. } \usage{ calibrate.rrvglm.control(object, trace = FALSE, method.optim = "BFGS", gridSize = ifelse(Rank == 1, 17, 9), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ The fitted \code{\link{rrvglm}} model. The user should ignore this argument. % The fitted CLO model. The user should ignore this argument. } \item{trace, method.optim}{ Same as \code{\link{calibrate.qrrvglm.control}}. } \item{gridSize}{ Same as \code{\link{calibrate.qrrvglm.control}}. } \item{\dots}{ Avoids an error message for extraneous arguments. } } \details{ Most CLO users will only need to make use of \code{trace} and \code{gridSize}. These arguments should be used inside their call to \code{\link{calibrate.rrvglm}}, not this function directly. } \value{ Similar to \code{\link{calibrate.qrrvglm.control}}. } %\references{ %} % \author{T. W. Yee} %\note{ % Despite the name of this function, UQO and CAO models are handled % } \seealso{ \code{\link{calibrate.rrvglm}}, \code{\link{Coef.rrvglm}}. } %\examples{ %} \keyword{models} \keyword{regression} VGAM/man/wald.stat.Rd0000644000176200001440000001443113565414527013766 0ustar liggesusers\name{wald.stat} \alias{wald.stat} \alias{wald.stat.vlm} %\alias{score.stat} %\alias{score.stat.vlm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Wald Test Statistics Evaluated at the Null Values } \description{ Generic function that computes Wald test statistics evaluated at the null values (consequently they do not suffer from the Hauck-Donner effect). } \usage{ wald.stat(object, ...) wald.stat.vlm(object, values0 = 0, subset = NULL, omit1s = TRUE, all.out = FALSE, iterate = TRUE, trace = FALSE, as.summary = FALSE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{vglm}} fit. % An object that is ideally an % \code{\link{vglm}} fit. } \item{values0}{ Numeric vector. The null values corresponding to the null hypotheses. Recycled if necessary. } \item{subset}{ Same as in \code{\link{hdeff}}. } \item{omit1s}{ Logical. Does one omit the intercepts? Because the default would be to test that each intercept is equal to 0, which often does not make sense or is unimportant, the intercepts are not tested by default. If they are tested then each linear predictor must have at least one coefficient (from another variable) to be estimated. } \item{all.out}{ Logical. If \code{TRUE} then a list is returned containing various quantities such as the SEs, instead of just the Wald statistics. } \item{iterate}{ Logical. If \code{TRUE} then IRLS iterations are performed to get MLEs of the \emph{other} regression coefficients, subject to that one coefficient equal to the appropriate \code{values0} value. If \code{FALSE} then the other regression coefficients have values obtained at the original fit. It is recommended that a \code{TRUE} be used as the answer tends to be more accurate. If the large model matrix only has one column and \code{iterate = TRUE} then an error will occur because there are no \emph{other} regression coefficients to estimate. } \item{trace}{ Logical. If \code{TRUE} then some output is produced as the IRLS iterations proceed. The value \code{NULL} means to use the \code{trace} value of the fitted object; see \code{\link{vglm.control}}. % Same as in \code{\link{lrp.vglm}}. } \item{as.summary}{ Logical. If \code{TRUE} then the usual (or unmodified) Wald statistics etc. are extracted from \code{summary(object)}. These may suffer from the HDE; and the SEs are evaluated at the MLE of the original object so that effectively \code{iterate = FALSE}. If \code{TRUE} then most other arguments will be ignored or overwritten. This argument may be renamed to \code{as.glm} because the Wald statistics will be computed in the same way as \code{summary(glm())}. % 20190112. } \item{\dots}{ Ignored for now. } } \details{ By default, \code{\link{summaryvglm}} and most regression modelling functions such as \code{\link[stats]{summary.glm}} compute the standard errors (SEs) of the estimates at the MLE and not at 0. This make it vulnerable to the Hauck-Donner effect (HDE) (see \code{\link{hdeff}}). One solution is to compute the SEs at 0 (or more generally, at the values of the argument \code{values0}). This function does that. The two variants of Wald statistics are asymptotically equivalent; however in small samples there can be an appreciable difference, and the difference can be large if the estimates are near to the boundary of the parameter space. None of the tests here are joint, hence the degrees of freedom is always unity. For a factor with more than 2 levels one can use \code{\link{anova.vglm}} to test for the significance of the factor. If \code{iterate = FALSE} then one retains the MLEs of the original fit for the values of the other coefficients, and replaces one coefficient at a time by the value 0 (or whatever specified by \code{values0}). One alternative would be to recompute the MLEs of the other coefficients after replacing one of the values; this is the default because \code{iterate = TRUE}. Just like the original IRLS iterations, note that the iterations here are not guaranteed to converge. Almost all \pkg{VGAM} family functions use the EIM and not the OIM; this affects the resulting standard errors. Also, regularity conditions are assumed for the Wald, likelihood ratio and score tests; some \pkg{VGAM} family functions such as \code{\link{alaplace1}} are experimental and do not satisfy such conditions, therefore naive inference is hazardous. The default output of this function can be seen by setting \code{wald0.arg = TRUE} in \code{\link{summaryvglm}}. } \value{ By default the signed square root of the Wald statistics whose SEs are computed at one each of the null values. If \code{all.out = TRUE} then a list is returned with the following components: \code{wald.stat} the Wald statistic, \code{SE0} the standard error of that coefficient, \code{values0} the null values. Approximately, the default Wald statistics output are standard normal random variates if each null hypothesis is true. } %\references{ % %} \author{ Thomas W. Yee } %\note{ %} \section{Warning }{ This function has not yet been thoroughly tested. Convergence failure is possible for some models applied to certain data sets; it is a good idea to set \code{trace = TRUE} to monitor convergence. For example, for a particular explanatory variable, the estimated regression coefficients of a non-parallel cumulative logit model (see \code{\link{cumulative}}) are ordered, and perturbing one coefficient might disrupt the order and create numerical problems. } \seealso{ \code{\link{lrt.stat}}, \code{\link{score.stat}}, \code{\link{summaryvglm}}, \code{\link[stats]{summary.glm}}, \code{\link{anova.vglm}}, \code{\link{vglm}}, \code{\link{hdeff}}, \code{\link{hdeffsev}}. } \examples{ set.seed(1) pneumo <- transform(pneumo, let = log(exposure.time), x3 = rnorm(nrow(pneumo))) (fit <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, data = pneumo)) wald.stat(fit) # No HDE here summary(fit, wald0 = TRUE) # See them here coef(summary(fit)) # Usual Wald statistics evaluated at the MLE wald.stat(fit, as.summary = TRUE) # Same as previous line } \keyword{models} \keyword{regression} VGAM/man/normal.vcm.Rd0000644000176200001440000002227113565414527014142 0ustar liggesusers\name{normal.vcm} \alias{normal.vcm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Univariate Normal Distribution as a Varying-Coefficient Model } \description{ Maximum likelihood estimation of all the coefficients of a LM where each of the usual regression coefficients is modelled with other explanatory variables via parameter link functions. Thus this is a basic varying-coefficient model. } \usage{ normal.vcm(link.list = list("(Default)" = "identitylink"), earg.list = list("(Default)" = list()), lsd = "loglink", lvar = "loglink", esd = list(), evar = list(), var.arg = FALSE, imethod = 1, icoefficients = NULL, isd = NULL, zero = "sd", sd.inflation.factor = 2.5) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link.list, earg.list}{ Link functions and extra arguments applied to the coefficients of the LM, excluding the standard deviation/variance. See \code{\link{CommonVGAMffArguments}} for more information. The default is for an identity link to be applied to each of the regression coefficients. } \item{lsd, esd, lvar, evar}{ Link function and extra argument applied to the standard deviation/variance. See \code{\link{CommonVGAMffArguments}} for more information. Same as \code{\link{uninormal}}. } \item{icoefficients}{ Optional initial values for the coefficients. Recycled to length \eqn{M-1} (does not include the standard deviation/variance). Try using this argument if there is a link function that is not programmed explicitly to handle range restrictions in the \code{initialize} slot. } \item{var.arg, imethod, isd}{ Same as, or similar to, \code{\link{uninormal}}. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for more information. The default applies to the last one, viz. the standard deviation/variance parameter. } \item{sd.inflation.factor}{ Numeric, should be greater than 1. The initial value of the standard deviation is multiplied by this, unless \code{isd} is inputted. Experience has shown that it is safer to start off with a larger value rather than a smaller one. } } \details{ This function allows all the usual LM regression coefficients to be modelled as functions of other explanatory variables via parameter link functions. For example, we may want some of them to be positive. Or we may want a subset of them to be positive and add to unity. So a class of such models have been named \emph{varying-coefficient models} (VCMs). The usual linear model is specified through argument \code{form2}. As with all other \pkg{VGAM} family functions, the linear/additive predictors are specified through argument \code{formula}. The \code{\link{multilogitlink}} link allows a subset of the coefficients to be positive and add to unity. Either none or more than one call to \code{\link{multilogitlink}} is allowed. The last variable will be used as the baseline/reference group, and therefore excluded from the estimation. By default, the log of the standard deviation is the last linear/additive predictor. It is recommended that this parameter be estimated as intercept-only, for numerical stability. Technically, the Fisher information matrix is of unit-rank for all but the last parameter (the standard deviation/variance). Hence an approximation is used that pools over all the observations. This \pkg{VGAM} family function cannot handle multiple responses. Also, this function will probably not have the full capabilities of the class of varying-coefficient models as described by Hastie and Tibshirani (1993). However, it should be able to manage some simple models, especially involving the following links: \code{\link{identitylink}}, \code{\link{loglink}}, \code{\link{logofflink}}, \code{\link{logloglink}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{cauchitlink}}. \code{\link{clogloglink}}, \code{\link{rhobitlink}}, \code{\link{fisherzlink}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Hastie, T. and Tibshirani, R. (1993) Varying-coefficient models. \emph{J. Roy. Statist. Soc. Ser. B}, \bold{55}, 757--796. } \author{ T. W. Yee } \section{Warning}{ This \pkg{VGAM} family function is fragile. One should monitor convergence, and possibly enter initial values especially when there are non-\code{\link{identity}}-link functions. If the initial value of the standard deviation/variance is too small then numerical problems may occur. One trick is to fit an intercept-only only model and feed its \code{predict()} output into argument \code{etastart} of a more complicated model. The use of the \code{zero} argument is recommended in order to keep models as simple as possible. % 20130730; No longer a bug: % Inference for an ordinary LM here differs from \code{\link[stats]{lm}}. % In particular, the SEs differ. } \note{ The standard deviation/variance parameter is best modelled as intercept-only. Yet to do: allow an argument such as \code{parallel} that enables many of the coefficients to be equal. Fix a bug: \code{Coef()} does not work for intercept-only models. } \seealso{ \code{\link{uninormal}}, \code{\link[stats:lm]{lm}}. % \code{link[locfit]{ethanol}}. } \examples{ ndata <- data.frame(x2 = runif(nn <- 2000)) # Note that coeff1 + coeff2 + coeff5 == 1. So try "multilogitlink". myoffset <- 10 ndata <- transform(ndata, coeff1 = 0.25, # "multilogitlink" coeff2 = 0.25, # "multilogitlink" coeff3 = exp(-0.5), # "loglink" # "logofflink" link: coeff4 = logofflink(+0.5, offset = myoffset, inverse = TRUE), coeff5 = 0.50, # "multilogitlink" coeff6 = 1.00, # "identitylink" v2 = runif(nn), v3 = runif(nn), v4 = runif(nn), v5 = rnorm(nn), v6 = rnorm(nn)) ndata <- transform(ndata, Coeff1 = 0.25 - 0 * x2, Coeff2 = 0.25 - 0 * x2, Coeff3 = logitlink(-0.5 - 1 * x2, inverse = TRUE), Coeff4 = logloglink( 0.5 - 1 * x2, inverse = TRUE), Coeff5 = 0.50 - 0 * x2, Coeff6 = 1.00 + 1 * x2) ndata <- transform(ndata, y1 = coeff1 * 1 + coeff2 * v2 + coeff3 * v3 + coeff4 * v4 + coeff5 * v5 + coeff6 * v6 + rnorm(nn, sd = exp(0)), y2 = Coeff1 * 1 + Coeff2 * v2 + Coeff3 * v3 + Coeff4 * v4 + Coeff5 * v5 + Coeff6 * v6 + rnorm(nn, sd = exp(0))) # An intercept-only model fit1 <- vglm(y1 ~ 1, form2 = ~ 1 + v2 + v3 + v4 + v5 + v6, normal.vcm(link.list = list("(Intercept)" = "multilogitlink", "v2" = "multilogitlink", "v3" = "loglink", "v4" = "logofflink", "(Default)" = "identitylink", "v5" = "multilogitlink"), earg.list = list("(Intercept)" = list(), "v2" = list(), "v4" = list(offset = myoffset), "v3" = list(), "(Default)" = list(), "v5" = list()), zero = c(1:2, 6)), data = ndata, trace = TRUE) coef(fit1, matrix = TRUE) summary(fit1) # This works only for intercept-only models: multilogitlink(rbind(coef(fit1, matrix = TRUE)[1, c(1, 2)]), inverse = TRUE) # A model with covariate x2 for the regression coefficients fit2 <- vglm(y2 ~ 1 + x2, form2 = ~ 1 + v2 + v3 + v4 + v5 + v6, normal.vcm(link.list = list("(Intercept)" = "multilogitlink", "v2" = "multilogitlink", "v3" = "logitlink", "v4" = "logloglink", "(Default)" = "identitylink", "v5" = "multilogitlink"), earg.list = list("(Intercept)" = list(), "v2" = list(), "v3" = list(), "v4" = list(), "(Default)" = list(), "v5" = list()), zero = c(1:2, 6)), data = ndata, trace = TRUE) coef(fit2, matrix = TRUE) summary(fit2) } \keyword{models} \keyword{regression} VGAM/man/acat.Rd0000644000176200001440000000712513565414527012777 0ustar liggesusers\name{acat} \alias{acat} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ordinal Regression with Adjacent Categories Probabilities } \description{ Fits an adjacent categories regression model to an ordered (preferably) factor response. } \usage{ acat(link = "loglink", parallel = FALSE, reverse = FALSE, zero = NULL, whitespace = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{link}{ Link function applied to the ratios of the adjacent categories probabilities. See \code{\link{Links}} for more choices. } \item{parallel}{ A logical, or formula specifying which terms have equal/unequal coefficients. } \item{reverse}{ Logical. By default, the linear/additive predictors used are \eqn{\eta_j = \log(P[Y=j+1]/P[Y=j])}{eta_j = log(P[Y=j+1]/P[Y=j])} for \eqn{j=1,\ldots,M}. If \code{reverse} is \code{TRUE} then \eqn{\eta_j = \log(P[Y=j]/P[Y=j+1])}{eta_j=log(P[Y=j]/P[Y=j+1])} will be used. } \item{zero}{ An integer-valued vector specifying which linear/additive predictors are modelled as intercepts only. The values must be from the set \{1,2,\ldots,\eqn{M}\}. } \item{whitespace}{ See \code{\link{CommonVGAMffArguments}} for information. } } \details{ In this help file the response \eqn{Y} is assumed to be a factor with ordered values \eqn{1,2,\ldots,M+1}, so that \eqn{M} is the number of linear/additive predictors \eqn{\eta_j}{eta_j}. By default, the log link is used because the ratio of two probabilities is positive. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. } \references{ Agresti, A. (2013) \emph{Categorical Data Analysis}, 3rd ed. Hoboken, NJ, USA: Wiley. \cr Simonoff, J. S. (2003) \emph{Analyzing Categorical Data}, New York: Springer-Verlag. \cr Yee, T. W. (2010) The \pkg{VGAM} package for categorical data analysis. \emph{Journal of Statistical Software}, \bold{32}, 1--34. \url{http://www.jstatsoft.org/v32/i10/}. %Documentation accompanying the \pkg{VGAM} package at %\url{https://www.stat.auckland.ac.nz/~yee} %contains further information and examples. } \author{ Thomas W. Yee } \note{ The response should be either a matrix of counts (with row sums that are all positive), or an ordered factor. In both cases, the \code{y} slot returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts. For a nominal (unordered) factor response, the multinomial logit model (\code{\link{multinomial}}) is more appropriate. Here is an example of the usage of the \code{parallel} argument. If there are covariates \code{x1}, \code{x2} and \code{x3}, then \code{parallel = TRUE ~ x1 + x2 -1} and \code{parallel = FALSE ~ x3} are equivalent. This would constrain the regression coefficients for \code{x1} and \code{x2} to be equal; those of the intercepts and \code{x3} would be different. } \section{Warning }{ No check is made to verify that the response is ordinal if the response is a matrix; see \code{\link[base:factor]{ordered}}. } \seealso{ \code{\link{cumulative}}, \code{\link{cratio}}, \code{\link{sratio}}, \code{\link{multinomial}}, \code{\link{margeff}}, \code{\link{pneumo}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, acat, data = pneumo)) coef(fit, matrix = TRUE) constraints(fit) model.matrix(fit) } \keyword{models} \keyword{regression} %pneumo$let <- log(pneumo$exposure.time) VGAM/man/slashUC.Rd0000644000176200001440000000524313565414527013430 0ustar liggesusers\name{Slash} \alias{Slash} \alias{dslash} \alias{pslash} \alias{rslash} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Slash Distribution } \description{ Density function, distribution function, and random generation for the slash distribution. } \usage{ dslash(x, mu = 0, sigma = 1, log = FALSE, smallno = .Machine$double.eps*1000) pslash(q, mu = 0, sigma = 1, very.negative = -10000, lower.tail = TRUE, log.p = FALSE) rslash(n, mu = 0, sigma = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x, q}{vector of quantiles.} \item{n}{ Same as \code{\link[stats]{runif}}. % number of observations. Must be a single positive integer. } \item{mu, sigma}{the mean and standard deviation of the univariate normal distribution. } \item{log}{ Logical. If \code{TRUE} then the logarithm of the density is returned. } \item{very.negative}{ Numeric, of length 1. A large negative value. For \code{(q-mu)/sigma} values less than this, the value 0 is returned because \code{\link[stats]{integrate}} tends to fail. A warning is issued. Similarly, if \code{(q-mu)/sigma} is greater than \code{abs(very.negative)} then 1 is returned with a warning. } \item{smallno}{ See \code{\link{slash}}. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \details{ See \code{\link{slash}}, the \pkg{VGAM} family function for estimating the two parameters by maximum likelihood estimation, for the formula of the probability density function and other details. Function \code{\link{pslash}} uses a \code{for ()} loop and \code{\link[stats]{integrate}}, meaning it's very slow. It may also be inaccurate for extreme values of \code{q}, and returns with 1 or 0 values when too extreme compared to \code{very.negative}. } \value{ \code{dslash} gives the density, and \code{pslash} gives the distribution function, \code{rslash} generates random deviates. } %\references{ } \author{ Thomas W. Yee and C. S. Chee} \note{ \code{pslash} is very slow. } \seealso{ \code{\link{slash}}. } \examples{ \dontrun{ curve(dslash, col = "blue", ylab = "f(x)", -5, 5, ylim = c(0, 0.4), las = 1, main = "Standard slash, normal and Cauchy densities", lwd = 2) curve(dnorm, col = "black", lty = 2, lwd = 2, add = TRUE) curve(dcauchy, col = "orange", lty = 3, lwd = 2, add = TRUE) legend("topleft", c("slash", "normal", "Cauchy"), lty = 1:3, col = c("blue","black","orange"), lwd = 2) curve(pslash, col = "blue", -5, 5, ylim = 0:1) pslash(c(-Inf, -20000, 20000, Inf)) # Gives a warning } } \keyword{distribution} VGAM/man/bifgmexp.Rd0000644000176200001440000000661213565414527013670 0ustar liggesusers\name{bifgmexp} \alias{bifgmexp} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bivariate Farlie-Gumbel-Morgenstern Exponential Distribution Family Function } \description{ Estimate the association parameter of FGM bivariate exponential distribution by maximum likelihood estimation. } \usage{ bifgmexp(lapar = "rhobitlink", iapar = NULL, tola0 = 0.01, imethod = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lapar}{ Link function for the association parameter \eqn{\alpha}{alpha}, which lies between \eqn{-1} and \eqn{1}. See \code{\link{Links}} for more choices and other information. } \item{iapar}{ Numeric. Optional initial value for \eqn{\alpha}{alpha}. By default, an initial value is chosen internally. If a convergence failure occurs try assigning a different value. Assigning a value will override the argument \code{imethod}. } \item{tola0}{ Positive numeric. If the estimate of \eqn{\alpha}{alpha} has an absolute value less than this then it is replaced by this value. This is an attempt to fix a numerical problem when the estimate is too close to zero. } \item{imethod}{ An integer with value \code{1} or \code{2} which specifies the initialization method. If failure to converge occurs try the other value, or else specify a value for \code{ia}. } } \details{ The cumulative distribution function is \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = e^{-y_1-y_2} ( 1 + \alpha [1 - e^{-y_1}] [1 - e^{-y_2}] ) + 1 - e^{-y_1} - e^{-y_2} }{% P(Y1 <= y1, Y2 <= y2) = exp(-y1-y2) * ( 1 + alpha * [1 - exp(-y1)] * [1 - exp(-y2)] ) + 1 - exp(-y1) - exp(-y2) } for \eqn{\alpha}{alpha} between \eqn{-1} and \eqn{1}. The support of the function is for \eqn{y_1>0}{y1>0} and \eqn{y_2>0}{y2>0}. The marginal distributions are an exponential distribution with unit mean. When \eqn{\alpha = 0}{alpha=0} then the random variables are independent, and this causes some problems in the estimation process since the distribution no longer depends on the parameter. A variant of Newton-Raphson is used, which only seems to work for an intercept model. It is a very good idea to set \code{trace = TRUE}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005) \emph{Extreme Value and Related Models with Applications in Engineering and Science}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \note{ The response must be a two-column matrix. Currently, the fitted value is a matrix with two columns and values equal to 1. This is because each marginal distribution corresponds to a exponential distribution with unit mean. This \pkg{VGAM} family function should be used with caution. } \seealso{ \code{\link{bifgmcop}}, \code{\link{bigumbelIexp}}. } \examples{ N <- 1000; mdata <- data.frame(y1 = rexp(N), y2 = rexp(N)) \dontrun{plot(ymat)} fit <- vglm(cbind(y1, y2) ~ 1, bifgmexp, data = mdata, trace = TRUE) fit <- vglm(cbind(y1, y2) ~ 1, bifgmexp, data = mdata, # This may fail trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) head(fitted(fit)) } \keyword{models} \keyword{regression} VGAM/man/identitylink.Rd0000644000176200001440000000474513565414527014603 0ustar liggesusers\name{identitylink} \alias{identitylink} \alias{negidentitylink} % \alias{negidentity} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Identity Link Function } \description{ Computes the identity transformation, including its inverse and the first two derivatives. } \usage{ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) negidentitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The identity link function \eqn{g(\theta)=\theta}{g(theta)=theta} should be available to every parameter estimated by the \pkg{VGAM} library. However, it usually results in numerical problems because the estimates lie outside the permitted range. Consequently, the result may contain \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The function \code{negidentitylink} is the negative-identity link function and corresponds to \eqn{g(\theta)=-\theta}{g(theta)=-theta}. This is useful for some models, e.g., in the literature supporting the \code{\link{gevff}} function it seems that half of the authors use \eqn{\xi=-k}{xi=-k} for the shape parameter and the other half use \eqn{k} instead of \eqn{\xi}{xi}. } \value{ For \code{identitylink()}: for \code{deriv = 0}, the identity of \code{theta}, i.e., \code{theta} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{theta}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. For \code{negidentitylink()}: the results are similar to \code{identitylink()} except for a sign change in most cases. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \seealso{ \code{\link{Links}}, \code{\link{loglink}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{powerlink}}. } \examples{ identitylink((-5):5) identitylink((-5):5, deriv = 1) identitylink((-5):5, deriv = 2) negidentitylink((-5):5) negidentitylink((-5):5, deriv = 1) negidentitylink((-5):5, deriv = 2) } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/gatpoisson.mix.Rd0000644000176200001440000001703413565414527015051 0ustar liggesusers\name{gatpoisson.mix} \alias{gatpoisson.mix} %\alias{gapoissonff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generally-Altered and -Truncated Poisson Regression Family Function (GAT--Pois--Pois Mixture Variant) } \description{ Fits a generally-altered and -truncated Poisson regression (mixtures of Poissons on differing support). The truncation may include values in the upper tail. } \usage{ gatpoisson.mix(alter = NULL, truncate = NULL, max.support = Inf, zero = "pobs.a", parallel = FALSE, lpobs.a = "logitlink", llambda.p = "loglink", llambda.a = "loglink", type.fitted = c("mean", "pobs.a", "Pobs.a", "prob.a", "prob.t", "lhs.prob"), imethod = 1, ilambda.p = NULL, ilambda.a = NULL, ishrinkage = 0.95, probs.y = 0.35) } %- maybe also 'usage' for other objects documented here. % ipobs0 = NULL, \arguments{ \item{alter, truncate}{ Vector of altered and truncated values, i.e., nonnegative integers. \emph{Note:} \code{alter} \emph{must be assigned a vector of length 2 or more for this function to work}. Both arguments must have unique values only, and no values in common. In contrast, \code{truncate} may be a \code{NULL}, which stands for an empty set. The default settings should make this function equivalent to \code{\link{poissonff}}. % Must be sorted and have unique values only. } \item{lpobs.a, llambda.p, llambda.a}{ Link functions; the \code{.p} and \code{.a} refer to the parent and altered distributions respectively. See \code{\link{Links}} for more choices and information. } \item{parallel}{ Constrain the rate parameters to be equal? See \code{\link{CommonVGAMffArguments}} for information. After plotting the responses, if the distribution of the spikes has roughly the same shape as the ordinary values then setting this argument to \code{TRUE} would be a good idea. And probably if \code{alter} is of length 2 or thereabouts, then \code{TRUE} should definitely be entertained. } \item{type.fitted, max.support}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{gatpoisson.mlm}} for information. % The choice \code{"pnotAT"} is the denominator of one of the terms of % the mean; it is one minus the sum of the parent PMF evaluated over % values of \code{alter} and \code{truncate}. % The choice \code{"pobs.a"} is the probability of an altered value, % and \code{"onempobs.a"} is its complement. % See below for more details. The choice \code{"lhs.prob"} is the 1 minus the probability of value greater than \code{"max.support"}, using the parent distribution. } \item{imethod, ilambda.p, ilambda.a}{ See \code{\link{CommonVGAMffArguments}} for information. % ipobs0, } \item{probs.y, ishrinkage}{ See \code{\link{CommonVGAMffArguments}} for information. } \item{zero}{ See \code{\link{CommonVGAMffArguments}} for information. Having \code{zero = "pobs.a"} will model the mixing probability as simple as possible (intercept-only), hence should be more numerically stable than \code{NULL}; and \code{zero = "pobs.a"} is recommended for many analyses especially when there are many explanatory variables. Note that the default value of this argument is not \code{NULL}, hence this family function should ideally be called \code{gatpoisson.mixff} to keep it consistent with other family function names such as \code{\link{zipoissonff}}, \code{\link{zapoissonff}} etc. } } \details{ The distribution being fitted can be abbreviated GAT-Pois-Pois, which is where the inner distribution for ordinary values is the Poisson distribution, and the outer distribution for the altered values is another Poisson distribution with a different rate parameter by default. Thus the distribution being fitted is a mixture of two Poissons with differing support. The two rate parameters may be constrained to be equal using \code{parallel}. By default, a logistic regression models the probability that the response is altered. This function currently does not handle multiple responses. Further details are at \code{\link{Gaitpois.mix}}. An alternative variant of this distribution is based on the MLM---details can be found in \code{\link{gatpoisson.mlm}}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, are similar to \code{\link{gatpoisson.mlm}}. } \references{ Yee, T. W. and Ma, C. C. (2019) Generally-altered, -inflated and -truncated count regression, with application to heaped and seeped data. \emph{In preparation}. %, \bold{3}, 15--41. } %20111123; this has been fixed up with proper FS using EIM. %\section{Warning }{ % Inference obtained from \code{summary.vglm} % and \code{summary.vgam} may or may not be correct. % In particular, the p-values, standard errors and degrees of % freedom may need adjustment. Use simulation on artificial % data to check that these are reasonable. % % %} \section{Warning }{ The same caution needed for \code{\link{gatpoisson.mlm}} applies here, however this function is a bit more parametric (structured) in comparison, especially when \code{parallel = TRUE}. } \author{ T. W. Yee} \note{ The defaults for this family function may change in the future as more experience is obtained using it. If \code{length(alter)} is very low then it is probably a good idea to set \code{parallel = FALSE ~ 0} (equivalently, \code{parallel = TRUE}) so that the estimation can borrow strength from both the altered and non-altered values. Numerical problems can easily arise because of the flexibility of this distribution. % This is not true, as 'alter' needs a 2-vector at least: % This family function effectively % renders the following functions as obsolete % (or rather, they are just special cases): % \code{\link{pospoisson}}, % \code{\link{zapoisson}}. } \seealso{ \code{\link{Gaitpois.mix}}, \code{\link{gatpoisson.mlm}}, \code{\link{gatnbinomial.mix}}, \code{\link{rpospois}}, \code{\link{CommonVGAMffArguments}}, \code{\link{simulate.vlm}}. % \code{\link{multinomial}}, % \code{\link{zapoisson}}, % \code{\link{gatnbinomial.mlm}}, % \code{\link{gipoisson}}, } \examples{ avec <- c(3, 15) # Alter these values tvec <- c(5, 7) # Truncate these values pobs.a <- logitlink(-1, inverse = TRUE) # About 0.27 max.support <- 20 gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, lambda.p = exp(2 + 0.5 * x2)) gdata <- transform(gdata, y1 = rgaitpois.mix(nn, lambda.p = lambda.p, pobs.a = pobs.a, truncate = tvec, max.support = max.support, alter = avec)) gatpoisson.mix(alter = avec) with(gdata, table(y1)) fit1 <- vglm(y1 ~ x2, crit = "coef", trace = TRUE, data = gdata, gatpoisson.mix(alter = avec, truncate = tvec, zero = "pobs.a", parallel = TRUE, max.support = max.support)) head(fitted(fit1, type.fitted = "Pobs.a")) head(predict(fit1)) coef(fit1, matrix = TRUE) summary(fit1) } \keyword{models} \keyword{regression} %gapoisson(lpobs0 = "logitlink", llambda = "loglink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL) %gapoissonff(llambda = "loglink", lonempobs0 = "logitlink", % type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0") VGAM/man/TICvlm.Rd0000644000176200001440000001130513565414527013220 0ustar liggesusers\name{TIC} \alias{TIC} \alias{TICvlm} %\alias{TICvglm} %\alias{TICvgam} %\alias{TICrrvglm} %\alias{TICqrrvglm} %\alias{TICrrvgam} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Takeuchi's Information Criterion } \description{ Calculates the Takeuchi information criterion for a fitted model object for which a log-likelihood value has been obtained. } \usage{ TIC(object, \dots) TICvlm(object, \dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \pkg{VGAM} object having class \code{\link{vglm-class}}. % , for example, } \item{\dots}{ Other possible arguments fed into \code{logLik} in order to compute the log-likelihood. } % \item{corrected}{ % Logical, perform the finite sample correction? % } % \item{k}{ % Numeric, the penalty per parameter to be used; % the default is the classical TIC. % } } \details{ The following formula is used for VGLMs: \eqn{-2 \mbox{log-likelihood} + 2 trace(V K)}{-2*log-likelihood + 2 * trace(V K)}, where \eqn{V} is the inverse of the EIM from the fitted model, and \eqn{K} is the outer product of the score vectors. Both \eqn{V} and \eqn{K} are order-\eqn{p.VLM} matrices. One has \eqn{V} equal to \code{vcov(object)}, and \eqn{K} is computed by taking the outer product of the output from the \code{deriv} slot multiplied by the large VLM matrix and then taking their sum. Hence for the huge majority of models, the penalty is computed at the MLE and is empirical in nature. Theoretically, if the fitted model is the true model then AIC equals TIC. When there are prior weights the score vectors are divided by the square root of these, because \eqn{ (a_i U_i / \sqrt{a_i})^2 = a_i U_i^2}. % This is the function \code{TICvlm()}. This code relies on the log-likelihood being defined, and computed, for the object. When comparing fitted objects, the smaller the TIC, the better the fit. The log-likelihood and hence the TIC is only defined up to an additive constant. Currently any estimated scale parameter (in GLM parlance) is ignored by treating its value as unity. Also, currently this function is written only for \code{\link{vglm}} objects and not \code{\link{vgam}} or \code{\link{rrvglm}}, etc., objects. } \value{ Returns a numeric TIC value. } \author{T. W. Yee. } \note{ TIC has not been defined for RR-VGLMs, QRR-VGLMs, etc., yet. See \code{\link{AICvlm}} about models such as \code{\link{posbernoulli.tb}} that require \code{posbinomial(omit.constant = TRUE)}. } \references{ Takeuchi, K. (1976) Distribution of informational statistics and a criterion of model fitting. (In Japanese). \emph{Suri-Kagaku} (Mathematic Sciences), \bold{153}, 12--18. %Distribution of informational statistics and a criterion of model %fitting. %Suri-Kagaku (Mathematic Sciences) 153, 12--18. (In Japanese). Burnham, K. P. and Anderson, D. R. (2002) \emph{Model Selection and Multi-Model Inference: A Practical Information-Theoretic Approach}, 2nd ed. New York, USA: Springer. } \section{Warning }{ This code has not been double-checked. The general applicability of \code{TIC} for the VGLM/VGAM classes has not been developed fully. In particular, \code{TIC} should not be run on some \pkg{VGAM} family functions because of violation of certain regularity conditions, etc. Some authors note that quite large sample sizes are needed for this IC to work reasonably well. % Sociological Methods and Research article, p.270. % Some authors note that numerical instability may occur for this IC. } \seealso{ VGLMs are described in \code{\link{vglm-class}}; \code{\link[stats]{AIC}}, \code{\link{AICvlm}}. \code{\link{BICvlm}}. % VGAMs are described in \code{\link{vgam-class}}; % RR-VGLMs are described in \code{\link{rrvglm-class}}; } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit1 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = TRUE, reverse = TRUE), data = pneumo)) coef(fit1, matrix = TRUE) TIC(fit1) (fit2 <- vglm(cbind(normal, mild, severe) ~ let, cumulative(parallel = FALSE, reverse = TRUE), data = pneumo)) coef(fit2, matrix = TRUE) TIC(fit2) } \keyword{models} \keyword{regression} %uiowa.edu 2011 thesis by Cristina Laura Acion: %Shibata (1989) noted that the error incurred by this additional %estimation can cause instability of the model selection results yielded %by TIC. Therefore, TIC is not universally recommended (Burnham and %Anderson, 2002). %However, a data-dependent estimator might also be highly variable. This %issue discourages some authors to recommend the use of TIC (Burnham and %Anderson, 2002). VGAM/man/mix2poisson.Rd0000644000176200001440000001227313565414527014361 0ustar liggesusers\name{mix2poisson} \alias{mix2poisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Mixture of Two Poisson Distributions } \description{ Estimates the three parameters of a mixture of two Poisson distributions by maximum likelihood estimation. } \usage{ mix2poisson(lphi = "logitlink", llambda = "loglink", iphi = 0.5, il1 = NULL, il2 = NULL, qmu = c(0.2, 0.8), nsimEIM = 100, zero = "phi") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lphi, llambda}{ Link functions for the parameter \eqn{\phi}{phi} and \eqn{\lambda}{lambda}. See \code{\link{Links}} for more choices. } % \item{ephi, el1, el2}{ % ephi = list(), el1 = list(), el2 = list(), % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. % } \item{iphi}{ Initial value for \eqn{\phi}{phi}, whose value must lie between 0 and 1. } \item{il1, il2}{ Optional initial value for \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}. These values must be positive. The default is to compute initial values internally using the argument \code{qmu}. % If these arguments are supplied then practical experience % suggests they should be quite well-separated. } \item{qmu}{ Vector with two values giving the probabilities relating to the sample quantiles for obtaining initial values for \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}. The two values are fed in as the \code{probs} argument into \code{\link[stats]{quantile}}. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The probability function can be loosely written as \deqn{P(Y=y) = \phi \, Poisson(\lambda_1) + (1-\phi) \, Poisson(\lambda_2)}{% P(Y=y) = phi * Poisson(lambda1) + (1-phi) * Poisson(lambda2)} where \eqn{\phi}{phi} is the probability an observation belongs to the first group, and \eqn{y=0,1,2,\ldots}{y=0,1,2,...}. The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}. The mean of \eqn{Y} is \eqn{\phi \lambda_1 + (1-\phi) \lambda_2}{phi*lambda1 + (1-phi)*lambda2} and this is returned as the fitted values. By default, the three linear/additive predictors are \eqn{(logit(\phi), \log(\lambda_1), \log(\lambda_2))^T}{(logit(phi), log(lambda1), log(lambda2))^T}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } % \references{ ~put references to the literature/web site here ~ } \section{Warning }{ This \pkg{VGAM} family function requires care for a successful application. In particular, good initial values are required because of the presence of local solutions. Therefore running this function with several different combinations of arguments such as \code{iphi}, \code{il1}, \code{il2}, \code{qmu} is highly recommended. Graphical methods such as \code{\link[graphics]{hist}} can be used as an aid. With grouped data (i.e., using the \code{weights} argument) one has to use a large value of \code{nsimEIM}; see the example below. This \pkg{VGAM} family function is experimental and should be used with care. } \author{ T. W. Yee } \note{ The response must be integer-valued since \code{\link[stats]{dpois}} is invoked. Fitting this model successfully to data can be difficult due to local solutions and ill-conditioned data. It pays to fit the model several times with different initial values, and check that the best fit looks reasonable. Plotting the results is recommended. This function works better as \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2} become more different. The default control argument \code{trace = TRUE} is to encourage monitoring convergence. } \seealso{ \code{\link[stats:Poisson]{rpois}}, \code{\link{poissonff}}, \code{\link{mix2normal}}. } \examples{ \dontrun{ # Example 1: simulated data nn <- 1000 mu1 <- exp(2.5) # Also known as lambda1 mu2 <- exp(3) (phi <- logitlink(-0.5, inverse = TRUE)) mdata <- data.frame(y = rpois(nn, ifelse(runif(nn) < phi, mu1, mu2))) mfit <- vglm(y ~ 1, mix2poisson, data = mdata) coef(mfit, matrix = TRUE) # Compare the results with the truth round(rbind('Estimated' = Coef(mfit), 'Truth' = c(phi, mu1, mu2)), digits = 2) ty <- with(mdata, table(y)) plot(names(ty), ty, type = "h", main = "Orange=estimate, blue=truth", ylab = "Frequency", xlab = "y") abline(v = Coef(mfit)[-1], lty = 2, col = "orange", lwd = 2) abline(v = c(mu1, mu2), lty = 2, col = "blue", lwd = 2) # Example 2: London Times data (Lange, 1997, p.31) ltdata1 <- data.frame(deaths = 0:9, freq = c(162, 267, 271, 185, 111, 61, 27, 8, 3, 1)) ltdata2 <- data.frame(y = with(ltdata1, rep(deaths, freq))) # Usually this does not work well unless nsimEIM is large Mfit <- vglm(deaths ~ 1, weight = freq, data = ltdata1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5, nsimEIM = 5000)) # This works better in general Mfit <- vglm(y ~ 1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5), data = ltdata2) coef(Mfit, matrix = TRUE) Coef(Mfit) } } \keyword{models} \keyword{regression} VGAM/man/inv.lomaxUC.Rd0000644000176200001440000000374013565414527014231 0ustar liggesusers\name{Inv.lomax} \alias{Inv.lomax} \alias{dinv.lomax} \alias{pinv.lomax} \alias{qinv.lomax} \alias{rinv.lomax} \title{The Inverse Lomax Distribution} \description{ Density, distribution function, quantile function and random generation for the inverse Lomax distribution with shape parameter \code{p} and scale parameter \code{scale}. } \usage{ dinv.lomax(x, scale = 1, shape2.p, log = FALSE) pinv.lomax(q, scale = 1, shape2.p, lower.tail = TRUE, log.p = FALSE) qinv.lomax(p, scale = 1, shape2.p, lower.tail = TRUE, log.p = FALSE) rinv.lomax(n, scale = 1, shape2.p) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape2.p}{shape parameter.} \item{scale}{scale parameter.} \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } \item{lower.tail, log.p}{ Same meaning as in \code{\link[stats:Normal]{pnorm}} or \code{\link[stats:Normal]{qnorm}}. } } \value{ \code{dinv.lomax} gives the density, \code{pinv.lomax} gives the distribution function, \code{qinv.lomax} gives the quantile function, and \code{rinv.lomax} generates random deviates. } \references{ Kleiber, C. and Kotz, S. (2003) \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Hoboken, NJ, USA: Wiley-Interscience. } \author{ T. W. Yee } \details{ See \code{\link{inv.lomax}}, which is the \pkg{VGAM} family function for estimating the parameters by maximum likelihood estimation. } \note{ The inverse Lomax distribution is a special case of the 4-parameter generalized beta II distribution. } \seealso{ \code{\link{inv.lomax}}, \code{\link{genbetaII}}. } \examples{ idata <- data.frame(y = rinv.lomax(n = 1000, exp(2), exp(1))) fit <- vglm(y ~ 1, inv.lomax, data = idata, trace = TRUE, crit = "coef") coef(fit, matrix = TRUE) Coef(fit) } \keyword{distribution} VGAM/man/erlang.Rd0000644000176200001440000000557313565414527013344 0ustar liggesusers\name{erlang} \alias{erlang} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Erlang Distribution } \description{ Estimates the scale parameter of the Erlang distribution by maximum likelihood estimation. } \usage{ erlang(shape.arg, lscale = "loglink", imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{shape.arg}{ The shape parameters. The user must specify a positive integer, or integers for multiple responses. They are recycled \code{by.row = TRUE} according to \code{\link[base]{matrix}}. } \item{lscale}{ Link function applied to the (positive) \eqn{scale} parameter. See \code{\link{Links}} for more choices. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}} for more details. } } \details{ The Erlang distribution is a special case of the gamma distribution with \emph{shape} that is a positive integer. If \code{shape.arg = 1} then it simplifies to the exponential distribution. As illustrated in the example below, the Erlang distribution is the distribution of the sum of \code{shape.arg} independent and identically distributed exponential random variates. The probability density function of the Erlang distribution is given by \deqn{f(y) = \exp(-y/scale) y^{shape-1} scale^{-shape} / \Gamma(shape)}{% f(y) = exp(-y/scale) y^(shape-1) scale^(-shape) / gamma(shape)} for known positive integer \eqn{shape}, unknown \eqn{scale > 0} and \eqn{y > 0}. Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma function, as in \code{\link[base:Special]{gamma}}. The mean of \emph{Y} is \eqn{\mu=shape \times scale}{mu=shape*scale} and its variance is \eqn{shape \times scale^2}{shape*scale^2}. The linear/additive predictor, by default, is \eqn{\eta=\log(scale)}{eta=log(scale)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ Most standard texts on statistical distributions describe this distribution, e.g., Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011) \emph{Statistical Distributions}, Hoboken, NJ, USA: John Wiley and Sons, Fourth edition. } \author{ T. W. Yee } \note{ Multiple responses are permitted. The \code{rate} parameter found in \code{\link{gammaR}} is \code{1/scale} here---see also \code{\link[stats]{rgamma}}. } \seealso{ \code{\link{gammaR}}, \code{\link{exponential}}, \code{\link{simulate.vlm}}. } \examples{ rate <- exp(2); myshape <- 3 edata <- data.frame(y = rep(0, nn <- 1000)) for (ii in 1:myshape) edata <- transform(edata, y = y + rexp(nn, rate = rate)) fit <- vglm(y ~ 1, erlang(shape = myshape), data = edata, trace = TRUE) coef(fit, matrix = TRUE) Coef(fit) # Answer = 1/rate 1/rate summary(fit) } \keyword{models} \keyword{regression} VGAM/man/gev.Rd0000644000176200001440000002711313565414527012647 0ustar liggesusers\name{gev} \alias{gev} \alias{gevff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Generalized Extreme Value Regression Family Function } \description{ Maximum likelihood estimation of the 3-parameter generalized extreme value (GEV) distribution. } \usage{ gev(llocation = "identitylink", lscale = "loglink", lshape = logofflink(offset = 0.5), percentiles = c(95, 99), ilocation = NULL, iscale = NULL, ishape = NULL, imethod = 1, gprobs.y = (1:9)/10, gscale.mux = exp((-5:5)/6), gshape = (-5:5) / 11 + 0.01, iprobs.y = NULL, tolshape0 = 0.001, type.fitted = c("percentiles", "mean"), zero = c("scale", "shape")) gevff(llocation = "identitylink", lscale = "loglink", lshape = logofflink(offset = 0.5), percentiles = c(95, 99), ilocation = NULL, iscale = NULL, ishape = NULL, imethod = 1, gprobs.y = (1:9)/10, gscale.mux = exp((-5:5)/6), gshape = (-5:5) / 11 + 0.01, iprobs.y = NULL, tolshape0 = 0.001, type.fitted = c("percentiles", "mean"), zero = c("scale", "shape")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llocation, lscale, lshape}{ Parameter link functions for \eqn{\mu}{mu}, \eqn{\sigma}{sigma} and \eqn{\xi}{xi} respectively. See \code{\link{Links}} for more choices. For the shape parameter, the default \code{\link{logofflink}} link has an offset called \eqn{A} below; and then the linear/additive predictor is \eqn{\log(\xi+A)}{log(xi+A)} which means that \eqn{\xi > -A}{xi > -A}. For technical reasons (see \bold{Details}) it is a good idea for \eqn{A = 0.5}. } % \item{Offset}{ % Numeric, of length 1. % Called \eqn{A} below. % Offset value if \code{lshape = "logofflink"}. % Then the linear/additive predictor is % \eqn{\log(\xi+A)}{log(xi+A)} which means that % \eqn{\xi > -A}{xi > -A}. % For technical reasons (see \bold{Details}) it is a good idea for % \code{Offset = 0.5}. % } \item{percentiles}{ Numeric vector of percentiles used for the fitted values. Values should be between 0 and 100. This argument is ignored if \code{type.fitted = "mean"}. % 20140912: this is still true, but using 'type.fitted' is better. % However, if \code{percentiles = NULL}, then the mean % \eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi} % is returned, and this is only defined if \eqn{\xi<1}{xi<1}. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} for information. The default is to use the \code{percentiles} argument. If \code{"mean"} is chosen, then the mean \eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi} is returned as the fitted values, and these are only defined for \eqn{\xi<1}{xi<1}. } \item{ilocation, iscale, ishape}{ Numeric. Initial value for the location parameter, \eqn{\sigma}{sigma} and \eqn{\xi}{xi}. A \code{NULL} means a value is computed internally. The argument \code{ishape} is more important than the other two. If a failure to converge occurs, or even to obtain initial values occurs, try assigning \code{ishape} some value (positive or negative; the sign can be very important). Also, in general, a larger value of \code{iscale} tends to be better than a smaller value. % because they are initialized from the initial \eqn{\xi}{xi}. } % \item{rshape}{ % Numeric, of length 2. % Range of \eqn{\xi}{xi} if \code{lshape = "extlogitlink"} is chosen. % The rationale for the default values is given below. % } % \item{mean}{ % Logical. If \code{TRUE}, the mean is computed and returned % as the fitted values. This argument overrides the % \code{percentiles} argument. % See \bold{Details} for more details. % } \item{imethod}{ Initialization method. Either the value 1 or 2. If both methods fail then try using \code{ishape}. See \code{\link{CommonVGAMffArguments}} for information. % Method 1 involves choosing the best \eqn{\xi}{xi} on the grid values % given by \code{gshape}. % Method 2 is similar to the method of moments. } \item{gshape}{ Numeric vector. The values are used for a grid search for an initial value for \eqn{\xi}{xi}. See \code{\link{CommonVGAMffArguments}} for information. % Used only if \code{imethod} equals 1. } \item{gprobs.y, gscale.mux, iprobs.y}{ Numeric vectors, used for the initial values. See \code{\link{CommonVGAMffArguments}} for information. } \item{tolshape0}{ Passed into \code{\link{dgev}} when computing the log-likelihood. } \item{zero}{ A specifying which linear/additive predictors are modelled as intercepts only. The values can be from the set \{1,2,3\} corresponding respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi}. If \code{zero = NULL} then all linear/additive predictors are modelled as a linear combination of the explanatory variables. For many data sets having \code{zero = 3} is a good idea. See \code{\link{CommonVGAMffArguments}} for information. } } \details{ The GEV distribution function can be written \deqn{G(y) = \exp( -[ (y-\mu)/ \sigma ]_{+}^{- 1/ \xi}) }{% G(y) = exp( -[ (y- mu)/ sigma ]_{+}^{- 1/ xi}) } where \eqn{\sigma > 0}{sigma > 0}, \eqn{-\infty < \mu < \infty}{-Inf < mu < Inf}, and \eqn{1 + \xi(y-\mu)/\sigma > 0}{1 + xi*(y-mu)/sigma > 0}. Here, \eqn{x_+ = \max(x,0)}{x_+ = max(x,0)}. The \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi} are known as the \emph{location}, \emph{scale} and \emph{shape} parameters respectively. The cases \eqn{\xi>0}{xi>0}, \eqn{\xi<0}{xi<0}, \eqn{\xi = 0}{xi = 0} correspond to the Frechet, reverse Weibull, and Gumbel types respectively. It can be noted that the Gumbel (or Type I) distribution accommodates many commonly-used distributions such as the normal, lognormal, logistic, gamma, exponential and Weibull. For the GEV distribution, the \eqn{k}th moment about the mean exists if \eqn{\xi < 1/k}{xi < 1/k}. Provided they exist, the mean and variance are given by \eqn{\mu+\sigma\{ \Gamma(1-\xi)-1\}/ \xi}{mu + sigma \{ Gamma(1-xi)-1\} / xi} and \eqn{\sigma^2 \{ \Gamma(1-2\xi) - \Gamma^2(1-\xi) \} / \xi^2}{sigma^2 \{ Gamma(1-2 xi) - Gamma^2 (1- xi) \} / xi^2} respectively, where \eqn{\Gamma}{Gamma} is the gamma function. Smith (1985) established that when \eqn{\xi > -0.5}{xi > -0.5}, the maximum likelihood estimators are completely regular. To have some control over the estimated \eqn{\xi}{xi} try using \code{lshape = logofflink(offset = 0.5)}, say, or \code{lshape = extlogitlink(min = -0.5, max = 0.5)}, say. % and when \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5} they exist but are % non-regular; and when \eqn{\xi < -1}{xi < -1} then the maximum % likelihood estimators do not exist. In most environmental data % sets \eqn{\xi > -1}{xi > -1} so maximum likelihood works fine. } \section{Warning }{ Currently, if an estimate of \eqn{\xi}{xi} is too close to 0 then an error may occur for \code{gev()} with multivariate responses. In general, \code{gevff()} is more reliable than \code{gev()}. Fitting the GEV by maximum likelihood estimation can be numerically fraught. If \eqn{1 + \xi (y-\mu)/ \sigma \leq 0}{1 + xi*(y-mu)/sigma <= 0} then some crude evasive action is taken but the estimation process can still fail. This is particularly the case if \code{\link{vgam}} with \code{\link{s}} is used; then smoothing is best done with \code{\link{vglm}} with regression splines (\code{\link[splines]{bs}} or \code{\link[splines]{ns}}) because \code{\link{vglm}} implements half-stepsizing whereas \code{\link{vgam}} doesn't (half-stepsizing helps handle the problem of straying outside the parameter space.) } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Yee, T. W. and Stephenson, A. G. (2007) Vector generalized linear and additive extreme value models. \emph{Extremes}, \bold{10}, 1--19. Tawn, J. A. (1988) An extreme-value theory model for dependent observations. \emph{Journal of Hydrology}, \bold{101}, 227--250. Prescott, P. and Walden, A. T. (1980) Maximum likelihood estimation of the parameters of the generalized extreme-value distribution. \emph{Biometrika}, \bold{67}, 723--724. Smith, R. L. (1985) Maximum likelihood estimation in a class of nonregular cases. \emph{Biometrika}, \bold{72}, 67--90. } \author{ T. W. Yee } \note{ The \pkg{VGAM} family function \code{gev} can handle a multivariate (matrix) response, cf. multiple responses. If so, each row of the matrix is sorted into descending order and \code{NA}s are put last. With a vector or one-column matrix response using \code{gevff} will give the same result but be faster and it handles the \eqn{\xi = 0}{xi = 0} case. The function \code{gev} implements Tawn (1988) while \code{gevff} implements Prescott and Walden (1980). Function \code{egev()} has been replaced by the new family function \code{gevff()}. It now conforms to the usual \pkg{VGAM} philosophy of having \code{M1} linear predictors per (independent) response. This is the usual way multiple responses are handled. Hence \code{vglm(cbind(y1, y2)\ldots, gevff, \ldots)} will have 6 linear predictors and it is possible to constrain the linear predictors so that the answer is similar to \code{gev()}. Missing values in the response of \code{gevff()} will be deleted; this behaviour is the same as with almost every other \pkg{VGAM} family function. The shape parameter \eqn{\xi}{xi} is difficult to estimate accurately unless there is a lot of data. Convergence is slow when \eqn{\xi}{xi} is near \eqn{-0.5}. Given many explanatory variables, it is often a good idea to make sure \code{zero = 3}. The range restrictions of the parameter \eqn{\xi}{xi} are not enforced; thus it is possible for a violation to occur. Successful convergence often depends on having a reasonably good initial value for \eqn{\xi}{xi}. If failure occurs try various values for the argument \code{ishape}, and if there are covariates, having \code{zero = 3} is advised. } \seealso{ \code{\link{rgev}}, \code{\link{gumbel}}, \code{\link{gumbelff}}, \code{\link{guplot}}, \code{\link{rlplot.gevff}}, \code{\link{gpd}}, \code{\link{weibullR}}, \code{\link{frechet}}, \code{\link{extlogitlink}}, \code{\link{oxtemp}}, \code{\link{venice}}, \code{\link{CommonVGAMffArguments}}. %\code{\link{gevff}}, %\code{\link{ogev}}, } \examples{ \dontrun{ # Multivariate example fit1 <- vgam(cbind(r1, r2) ~ s(year, df = 3), gev(zero = 2:3), data = venice, trace = TRUE) coef(fit1, matrix = TRUE) head(fitted(fit1)) par(mfrow = c(1, 2), las = 1) plot(fit1, se = TRUE, lcol = "blue", scol = "forestgreen", main = "Fitted mu(year) function (centered)", cex.main = 0.8) with(venice, matplot(year, depvar(fit1)[, 1:2], ylab = "Sea level (cm)", col = 1:2, main = "Highest 2 annual sea levels", cex.main = 0.8)) with(venice, lines(year, fitted(fit1)[,1], lty = "dashed", col = "blue")) legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile") # Univariate example (fit <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE)) head(fitted(fit)) coef(fit, matrix = TRUE) Coef(fit) vcov(fit) vcov(fit, untransform = TRUE) sqrt(diag(vcov(fit))) # Approximate standard errors rlplot(fit) } } \keyword{models} \keyword{regression} % type.fitted = c("percentiles", "mean"), giveWarning = TRUE, % \item{gshape}{ % Numeric, of length 2. % Range of \eqn{\xi}{xi} used for a grid search for a good initial value % for \eqn{\xi}{xi}. % Used only if \code{imethod} equals 1. % } VGAM/man/zipfUC.Rd0000644000176200001440000000275013565414527013266 0ustar liggesusers\name{Zipf} \alias{Zipf} \alias{dzipf} \alias{pzipf} \alias{qzipf} \alias{rzipf} \title{The Zipf Distribution} \description{ Density, distribution function, quantile function and random generation for the Zipf distribution. } \usage{ dzipf(x, N, shape, log = FALSE) pzipf(q, N, shape, log.p = FALSE) qzipf(p, N, shape) rzipf(n, N, shape) } \arguments{ \item{x, q, p, n}{Same as \code{\link[stats]{Poisson}}. } \item{N, shape}{ the number of elements, and the exponent characterizing the distribution. See \code{\link{zipf}} for more details. } \item{log, log.p}{ Same meaning as in \code{\link[stats]{Normal}}. } } \value{ \code{dzipf} gives the density, \code{pzipf} gives the cumulative distribution function, \code{qzipf} gives the quantile function, and \code{rzipf} generates random deviates. } \author{ T. W. Yee } \details{ This is a finite version of the zeta distribution. See \code{\link{zetaff}} for more details. In general, these functions runs slower and slower as \code{N} increases. } %\note{ % %} \seealso{ \code{\link{zipf}}, \code{\link{Zipfmb}}. } \examples{ N <- 10; shape <- 0.5; y <- 1:N proby <- dzipf(y, N = N, shape = shape) \dontrun{ plot(proby ~ y, type = "h", col = "blue", ylab = "Probability", ylim = c(0, 0.2), main = paste("Zipf(N = ",N,", shape = ",shape,")", sep = ""), lwd = 2, las = 1) } sum(proby) # Should be 1 max(abs(cumsum(proby) - pzipf(y, N = N, shape = shape))) # Should be 0 } \keyword{distribution} VGAM/man/bifrankcopUC.Rd0000644000176200001440000000322213565414527014427 0ustar liggesusers\name{Frank} \alias{Frank} \alias{dbifrankcop} \alias{pbifrankcop} \alias{rbifrankcop} \title{Frank's Bivariate Distribution} \description{ Density, distribution function, and random generation for the (one parameter) bivariate Frank distribution. } \usage{ dbifrankcop(x1, x2, apar, log = FALSE) pbifrankcop(q1, q2, apar) rbifrankcop(n, apar) } \arguments{ \item{x1, x2, q1, q2}{vector of quantiles.} \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. } \item{apar}{the positive association parameter. } \item{log}{ Logical. If \code{log = TRUE} then the logarithm of the density is returned. } } \value{ \code{dbifrankcop} gives the density, \code{pbifrankcop} gives the distribution function, and \code{rbifrankcop} generates random deviates (a two-column matrix). } \references{ Genest, C. (1987) Frank's family of bivariate distributions. \emph{Biometrika}, \bold{74}, 549--555. } \author{ T. W. Yee } \details{ See \code{\link{bifrankcop}}, the \pkg{VGAM} family functions for estimating the association parameter by maximum likelihood estimation, for the formula of the cumulative distribution function and other details. } %\note{ %} \seealso{ \code{\link{bifrankcop}}. } \examples{ \dontrun{N <- 100; apar <- exp(2) xx <- seq(-0.30, 1.30, len = N) ox <- expand.grid(xx, xx) zedd <- dbifrankcop(ox[, 1], ox[, 2], apar = apar) contour(xx, xx, matrix(zedd, N, N)) zedd <- pbifrankcop(ox[, 1], ox[, 2], apar = apar) contour(xx, xx, matrix(zedd, N, N)) plot(rr <- rbifrankcop(n = 3000, apar = exp(4))) par(mfrow = c(1, 2)) hist(rr[, 1]); hist(rr[, 2]) # Should be uniform } } \keyword{distribution} VGAM/man/cfibrosis.Rd0000644000176200001440000000255413565414527014053 0ustar liggesusers\name{cfibrosis} \alias{cfibrosis} \docType{data} \title{ Cystic Fibrosis Data %% ~~ data name/kind ... ~~ } \description{ This data frame concerns families data and cystic fibrosis. } \usage{ data(cfibrosis) } \format{ A data frame with 24 rows on the following 4 variables. \describe{ \item{siblings, affected, ascertained, families}{ Over ascertained families, the \eqn{k}th ascertained family has \eqn{s_k} siblings of whom \eqn{r_k} are affected and \eqn{a_k} are ascertained. } } } \details{ The data set allows a classical segregation analysis to be peformed. In particular, to test Mendelian segregation ratios in nuclear family data. The likelihood has similarities with \code{\link{seq2binomial}}. %% ~~ If necessary, more details than the __description__ above ~~ } \source{ The data is originally from Crow (1965) and appears as Table 2.3 of Lange (2002). Crow, J. F. (1965) Problems of ascertainment in the analysis of family data. Epidemiology and Genetics of Chronic Disease. Public Health Service Publication 1163, Neel J. V., Shaw M. W., Schull W. J., editors, Department of Health, Education, and Welfare, Washington, DC, USA. Lange, K. (2002) Mathematical and Statistical Methods for Genetic Analysis. Second Edition. Springer-Verlag: New York, USA. } \examples{ cfibrosis summary(cfibrosis) } \keyword{datasets} VGAM/man/loglinb2.Rd0000644000176200001440000000745213565414527013602 0ustar liggesusers\name{loglinb2} \alias{loglinb2} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Loglinear Model for Two Binary Responses } \description{ Fits a loglinear model to two binary responses. } \usage{ loglinb2(exchangeable = FALSE, zero = "u12") } %loglinb2(exchangeable = FALSE, zero = 3) %- maybe also 'usage' for other objects documented here. \arguments{ \item{exchangeable}{ Logical. If \code{TRUE}, the two marginal probabilities are constrained to be equal. Should be set \code{TRUE} for ears, eyes, etc. data. } \item{zero}{ Which linear/additive predictors are modelled as intercept-only? A \code{NULL} means none of them. See \code{\link{CommonVGAMffArguments}} for more information. } } \details{ The model is \deqn{P(Y_1=y_1,Y_2=y_2) = \exp(u_0+u_1 y_1+u_2 y_2+u_{12} y_1 y_2)}{% P(Y1=y1,Y2=y2) = exp(u0 + u1*y1 + u2*y2 + u12*y1*y2)} where \eqn{y_1}{y1} and \eqn{y_2}{y2} are 0 or 1, and the parameters are \eqn{u_1}{u1}, \eqn{u_2}{u2}, \eqn{u_{12}}{u12}. The normalizing parameter \eqn{u_0}{u0} can be expressed as a function of the other parameters, viz., \deqn{u_0 = -\log[1 + \exp(u_1) + \exp(u_2) + \exp(u_1 + u_2 + u_{12})].}{% u0 = -log[1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)].} The linear/additive predictors are \eqn{(\eta_1,\eta_2,\eta_3)^T = (u_1,u_2,u_{12})^T}{(eta1,eta2,eta3) = (u1,u2,u12)}. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, \code{\link{rrvglm}} and \code{\link{vgam}}. When fitted, the \code{fitted.values} slot of the object contains the four joint probabilities, labelled as \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively. } \references{ Yee, T. W. and Wild, C. J. (2001) Discussion to: ``Smoothing spline ANOVA for multivariate Bernoulli observations, with application to ophthalmology data (with discussion)'' by Gao, F., Wahba, G., Klein, R., Klein, B. \emph{Journal of the American Statistical Association}, \bold{96}, 127--160. McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ The response must be a two-column matrix of ones and zeros only. This is more restrictive than \code{\link{binom2.or}}, which can handle more types of input formats. Note that each of the 4 combinations of the multivariate response need to appear in the data set. After estimation, the response attached to the object is also a two-column matrix; possibly in the future it might change into a four-column matrix. } \seealso{ \code{\link{binom2.or}}, \code{\link{binom2.rho}}, \code{\link{loglinb3}}. } \examples{ coalminers <- transform(coalminers, Age = (age - 42) / 5) # Get the n x 4 matrix of counts fit0 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, data = coalminers) counts <- round(c(weights(fit0, type = "prior")) * depvar(fit0)) # Create a n x 2 matrix response for loglinb2() # bwmat <- matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow = TRUE) bwmat <- cbind(bln = c(0,0,1,1), wheeze = c(0,1,0,1)) matof1 <- matrix(1, nrow(counts), 1) newminers <- data.frame(bln = kronecker(matof1, bwmat[, 1]), wheeze = kronecker(matof1, bwmat[, 2]), wt = c(t(counts)), Age = with(coalminers, rep(age, rep(4, length(age))))) newminers <- newminers[with(newminers, wt) > 0,] fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2(zero = NULL), weight = wt, data = newminers) coef(fit, matrix = TRUE) # Same! (at least for the log odds-ratio) summary(fit) # Try reconcile this with McCullagh and Nelder (1989), p.234 (0.166-0.131) / 0.027458 # 1.275 is approximately 1.25 } \keyword{models} \keyword{regression} VGAM/man/df.residual.Rd0000644000176200001440000000442313565414527014265 0ustar liggesusers\name{df.residual} \alias{df.residual} \alias{df.residual_vlm} %\alias{df.residual.default} \title{Residual Degrees-of-Freedom} \description{ Returns the residual degrees-of-freedom extracted from a fitted VGLM object. } \usage{ df.residual_vlm(object, type = c("vlm", "lm"), \dots) } \arguments{ \item{object}{ an object for which the degrees-of-freedom are desired, e.g., a \code{\link{vglm}} object. } \item{type}{ the type of residual degrees-of-freedom wanted. In some applications the 'usual' LM-type value may be more appropriate. The default is the first choice. } \item{\dots}{ additional optional arguments. } } \details{ When a VGLM is fitted, a \emph{large} (VLM) generalized least squares (GLS) fit is done at each IRLS iteration. To do this, an ordinary least squares (OLS) fit is performed by transforming the GLS using Cholesky factors. The number of rows is \eqn{M} times the `ordinary' number of rows of the LM-type model: \eqn{nM}. Here, \eqn{M} is the number of linear/additive predictors. So the formula for the VLM-type residual degrees-of-freedom is \eqn{nM - p^{*}} where \eqn{p^{*}} is the number of columns of the `big' VLM matrix. The formula for the LM-type residual degrees-of-freedom is \eqn{n - p_{j}} where \eqn{p_{j}} is the number of columns of the `ordinary' LM matrix corresponding to the \eqn{j}th linear/additive predictor. } \value{ The value of the residual degrees-of-freedom extracted from the object. When \code{type = "vlm"} this is a single integer, and when \code{type = "lm"} this is a \eqn{M}-vector of integers. } \seealso{ \code{\link{vglm}}, \code{\link[stats]{deviance}}, \code{\link[stats]{lm}}, \code{\link{anova.vglm}}, } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) head(model.matrix(fit, type = "vlm")) head(model.matrix(fit, type = "lm")) df.residual(fit, type = "vlm") # n * M - p_VLM nobs(fit, type = "vlm") # n * M nvar(fit, type = "vlm") # p_VLM df.residual(fit, type = "lm") # n - p_LM(j); Useful in some situations nobs(fit, type = "lm") # n nvar(fit, type = "lm") # p_LM nvar_vlm(fit, type = "lm") # p_LM(j) (<= p_LM elementwise) } \keyword{models} \keyword{regression} VGAM/man/posnormal.Rd0000644000176200001440000001207413565414527014100 0ustar liggesusers\name{posnormal} \alias{posnormal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Positive Normal Distribution Family Function } \description{ Fits a positive (univariate) normal distribution. } \usage{ posnormal(lmean = "identitylink", lsd = "loglink", eq.mean = FALSE, eq.sd = FALSE, gmean = exp((-5:5)/2), gsd = exp((-1:5)/2), imean = NULL, isd = NULL, probs.y = 0.10, imethod = 1, nsimEIM = NULL, zero = "sd") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lmean, lsd}{ Link functions for the mean and standard deviation parameters of the usual univariate normal distribution. They are \eqn{\mu}{mu} and \eqn{\sigma}{sigma} respectively. See \code{\link{Links}} for more choices. } % \item{emean, esd}{ % List. Extra argument for each of the links. % See \code{earg} in \code{\link{Links}} for general information. %emean = list(), esd = list(), % % } \item{gmean, gsd, imethod}{ See \code{\link{CommonVGAMffArguments}} for more information. \code{gmean} and \code{gsd} currently operate on a multiplicative scale, on the sample mean and the sample standard deviation, respectively. } \item{imean, isd}{ Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}. A \code{NULL} means a value is computed internally. See \code{\link{CommonVGAMffArguments}} for more information. } \item{eq.mean, eq.sd}{ See \code{\link{CommonVGAMffArguments}} for more information. The fact that these arguments are supported results in default constraint matrices being a \emph{permutation} of the identity matrix (effectively \emph{trivial} constraints). } \item{zero, nsimEIM, probs.y}{ See \code{\link{CommonVGAMffArguments}} for information. } % \item{zero}{ % See \code{\link{CommonVGAMffArguments}} for more information. % An integer-valued vector specifying which % linear/additive predictors are modelled as intercepts only. % The values must be from the set \{1,2\} corresponding % respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}. % If \code{zero = NULL} then all linear/additive predictors are modelled as % a linear combination of the explanatory variables. % For many data sets having \code{zero = 2} is a good idea. % } } \details{ The positive normal distribution is the ordinary normal distribution but with the probability of zero or less being zero. The rest of the probability density function is scaled up. Hence the probability density function can be written \deqn{f(y) = \frac{1}{\sqrt{2\pi} \sigma} \exp\left( -\frac12 (y-\mu)^2 / \sigma^2 \right) / \left[ 1-\Phi(-\mu/ \sigma) \right]}{% f(y) = (1/(sqrt(2*pi)*sigma)) * exp( -0.5 * (y-mu)^2/ sigma^2) / [1-Phi(-mu/ sigma)] } where \eqn{\Phi()}{Phi} is the cumulative distribution function of a standard normal (\code{\link[stats:Normal]{pnorm}}). Equivalently, this is \deqn{f(y) = \frac{1}{\sigma} \frac{\phi((y-\mu) / \sigma)}{ 1-\Phi(-\mu/ \sigma)}.}{% f(y) = (1/sigma) * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)].} where \eqn{\phi()}{dnorm()} is the probability density function of a standard normal distribution (\code{\link[stats:Normal]{dnorm}}). The mean of \eqn{Y} is \deqn{E(Y) = \mu + \sigma \frac{\phi(-\mu/ \sigma)}{ 1-\Phi(-\mu/ \sigma)}. }{% E(Y) = mu + sigma * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)]. } This family function handles multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ % % Documentation accompanying the \pkg{VGAM} package at % \url{http://www.stat.auckland.ac.nz/~yee} % contains further information and examples. % % %} \author{ Thomas W. Yee } \note{ The response variable for this family function is the same as \code{\link{uninormal}} except positive values are required. Reasonably good initial values are needed. The distribution of the reciprocal of a positive normal random variable is known as an alpha distribution. } \section{Warning }{ It is recommended that \code{trace = TRUE} be used to monitor convergence; sometimes the estimated mean is \code{-Inf} and the estimated mean standard deviation is \code{Inf}, especially when the sample size is small. Under- or over-flow may occur if the data is ill-conditioned. } \seealso{ \code{\link{uninormal}}, \code{\link{tobit}}. } \examples{ pdata <- data.frame(Mean = 1.0, SD = exp(1.0)) pdata <- transform(pdata, y = rposnorm(n <- 1000, m = Mean, sd = SD)) \dontrun{with(pdata, hist(y, prob = TRUE, border = "blue", main = paste("posnorm(m =", Mean[1], ", sd =", round(SD[1], 2),")"))) } fit <- vglm(y ~ 1, posnormal, data = pdata, trace = TRUE) coef(fit, matrix = TRUE) (Cfit <- Coef(fit)) mygrid <- with(pdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram \dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")} } \keyword{models} \keyword{regression} VGAM/man/zabinomial.Rd0000644000176200001440000001155013565414527014211 0ustar liggesusers\name{zabinomial} \alias{zabinomial} \alias{zabinomialff} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zero-Altered Binomial Distribution } \description{ Fits a zero-altered binomial distribution based on a conditional model involving a Bernoulli distribution and a positive-binomial distribution. } \usage{ zabinomial(lpobs0 = "logitlink", lprob = "logitlink", type.fitted = c("mean", "prob", "pobs0"), ipobs0 = NULL, iprob = NULL, imethod = 1, zero = NULL) zabinomialff(lprob = "logitlink", lonempobs0 = "logitlink", type.fitted = c("mean", "prob", "pobs0", "onempobs0"), iprob = NULL, ionempobs0 = NULL, imethod = 1, zero = "onempobs0") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lprob}{ Parameter link function applied to the probability parameter of the binomial distribution. See \code{\link{Links}} for more choices. } \item{lpobs0}{ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here. See \code{\link{Links}} for more choices. } \item{type.fitted}{ See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}} for information. } \item{iprob, ipobs0}{ See \code{\link{CommonVGAMffArguments}}. } \item{lonempobs0, ionempobs0}{ Corresponding argument for the other parameterization. See details below. } \item{imethod, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0}, else \eqn{Y} has a positive-binomial distribution with probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1}, which may be modelled as a function of the covariates. The zero-altered binomial distribution differs from the zero-inflated binomial distribution in that the former has zeros coming from one source, whereas the latter has zeros coming from the binomial distribution too. The zero-inflated binomial distribution is implemented in \code{\link{zibinomial}}. Some people call the zero-altered binomial a \emph{hurdle} model. The input is currently a vector or one-column matrix. By default, the two linear/additive predictors for \code{zabinomial()} are \eqn{(logit(p_0), \log(p))^T}{(logit(pobs0), log(prob))^T}. The \pkg{VGAM} family function \code{zabinomialff()} has a few changes compared to \code{zabinomial()}. These are: (i) the order of the linear/additive predictors is switched so the binomial probability comes first; (ii) argument \code{onempobs0} is now 1 minus the probability of an observed 0, i.e., the probability of the positive binomial distribution, i.e., \code{onempobs0} is \code{1-pobs0}; (iii) argument \code{zero} has a new default so that the \code{onempobs0} is intercept-only by default. Now \code{zabinomialff()} is generally recommended over \code{zabinomial()}. Both functions implement Fisher scoring and neither can handle multiple responses. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. The \code{fitted.values} slot of the fitted object, which should be extracted by the generic function \code{fitted}, returns the mean \eqn{\mu}{mu} (default) which is given by \deqn{\mu = (1-p_0) \mu_{b} / [1 - (1 - \mu_{b})^N]}{% mu = (1-pobs0) * mub / [1 - (1 - mub)^N]} where \eqn{\mu_{b}}{mub} is the usual binomial mean. If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned. } %\references{ % % %} %\section{Warning }{ % %} \author{ T. W. Yee } \note{ The response should be a two-column matrix of counts, with first column giving the number of successes. Note this family function allows \eqn{p_0}{pobs0} to be modelled as functions of the covariates by having \code{zero = NULL}. It is a conditional model, not a mixture model. These family functions effectively combine \code{\link{posbinomial}} and \code{\link{binomialff}} into one family function. } \seealso{ \code{\link{dzabinom}}, \code{\link{zibinomial}}, \code{\link{posbinomial}}, \code{\link{binomialff}}, \code{\link[stats:Binomial]{dbinom}}, \code{\link{CommonVGAMffArguments}}. } \examples{ zdata <- data.frame(x2 = runif(nn <- 1000)) zdata <- transform(zdata, size = 10, prob = logitlink(-2 + 3*x2, inverse = TRUE), pobs0 = logitlink(-1 + 2*x2, inverse = TRUE)) zdata <- transform(zdata, y1 = rzabinom(nn, size = size, prob = prob, pobs0 = pobs0)) with(zdata, table(y1)) zfit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL), data = zdata, trace = TRUE) coef(zfit, matrix = TRUE) head(fitted(zfit)) head(predict(zfit)) summary(zfit) } \keyword{models} \keyword{regression} VGAM/man/logitlink.Rd0000644000176200001440000001504013565414527014056 0ustar liggesusers\name{logitlink} \alias{logitlink} %\alias{logit} \alias{extlogitlink} %\alias{extlogit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Logit Link Function } \description{ Computes the logit transformation, including its inverse and the first two derivatives. } \usage{ logitlink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) extlogitlink(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue, bminvalue, bmaxvalue}{ See \code{\link{Links}}. These are boundary values. For \code{extlogitlink}, values of \code{theta} less than or equal to \eqn{A} or greater than or equal to \eqn{B} can be replaced by \code{bminvalue} and \code{bmaxvalue}. } % Extra argument for passing in additional information. % For \code{logitlink}, values of \code{theta} which are equal to 0 or 1 are % replaced by \code{earg} or \code{1-earg} % (respectively, and if given) before computing the logit. \item{min, max}{ For \code{extlogitlink}, \code{min} gives \eqn{A}, \code{max} gives \eqn{B}, and for out of range values, \code{bminvalue} and \code{bmaxvalue}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The logit link function is very commonly used for parameters that lie in the unit interval. It is the inverse CDF of the logistic distribution. Numerical values of \code{theta} close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The \emph{extended} logit link function \code{extlogitlink} should be used more generally for parameters that lie in the interval \eqn{(A,B)}, say. The formula is \deqn{\log((\theta-A)/(B-\theta))}{% log((theta-A)/(B-theta))} and the default values for \eqn{A} and \eqn{B} correspond to the ordinary logit function. Numerical values of \code{theta} close to \eqn{A} or \eqn{B} or out of range result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. However these can be replaced by values \eqn{bminvalue} and \eqn{bmaxvalue} first before computing the link function. } \value{ For \code{logitlink} with \code{deriv = 0}, the logit of \code{theta}, i.e., \code{log(theta/(1-theta))} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{exp(theta)/(1+exp(theta))}. For \code{deriv = 1}, then the function returns \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. Here, all logarithms are natural logarithms, i.e., to base \emph{e}. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 1 or 0 (for \code{logitlink}), or close to \eqn{A} or \eqn{B} for \code{extlogitlink}. One way of overcoming this is to use, e.g., \code{bvalue}. In terms of the threshold approach with cumulative probabilities for an ordinal response this link function corresponds to the univariate logistic distribution (see \code{\link{logistic}}). } \seealso{ \code{\link{Links}}, \code{\link{logitoffsetlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link{cauchitlink}}, \code{\link{logistic1}}, \code{\link{loglink}}, \code{\link[stats]{Logistic}}, \code{\link{multilogitlink}}. } \examples{ p <- seq(0.01, 0.99, by = 0.01) logitlink(p) max(abs(logitlink(logitlink(p), inverse = TRUE) - p)) # Should be 0 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01)) logitlink(p) # Has NAs logitlink(p, bvalue = .Machine$double.eps) # Has no NAs p <- seq(0.9, 2.2, by = 0.1) extlogitlink(p, min = 1, max = 2, bminvalue = 1 + .Machine$double.eps, bmaxvalue = 2 - .Machine$double.eps) # Has no NAs \dontrun{ par(mfrow = c(2,2), lwd = (mylwd <- 2)) y <- seq(-4, 4, length = 100) p <- seq(0.01, 0.99, by = 0.01) for (d in 0:1) { myinv <- (d > 0) matplot(p, cbind( logitlink(p, deriv = d, inverse = myinv), probitlink(p, deriv = d, inverse = myinv)), type = "n", col = "purple", ylab = "transformation", las = 1, main = if (d == 0) "Some probability link functions" else "1 / first derivative") lines(p, logitlink(p, deriv = d, inverse = myinv), col = "limegreen") lines(p, probitlink(p, deriv = d, inverse = myinv), col = "purple") lines(p, clogloglink(p, deriv = d, inverse = myinv), col = "chocolate") lines(p, cauchitlink(p, deriv = d, inverse = myinv), col = "tan") if (d == 0) { abline(v = 0.5, h = 0, lty = "dashed") legend(0, 4.5, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), col = c("limegreen", "purple", "chocolate", "tan"), lwd = mylwd) } else abline(v = 0.5, lty = "dashed") } for (d in 0) { matplot(y, cbind(logitlink(y, deriv = d, inverse = TRUE), probitlink(y, deriv = d, inverse = TRUE)), las = 1, type = "n", col = "purple", xlab = "transformation", ylab = "p", main = if (d == 0) "Some inverse probability link functions" else "First derivative") lines(y, logitlink(y, deriv = d, inverse = TRUE), col = "limegreen") lines(y, probitlink(y, deriv = d, inverse = TRUE), col = "purple") lines(y, clogloglink(y, deriv = d, inverse = TRUE), col = "chocolate") lines(y, cauchitlink(y, deriv = d, inverse = TRUE), col = "tan") if (d == 0) { abline(h = 0.5, v = 0, lty = "dashed") legend(-4, 1, c("logitlink", "probitlink", "clogloglink", "cauchitlink"), col = c("limegreen", "purple", "chocolate", "tan"), lwd = mylwd) } } p <- seq(0.21, 0.59, by = 0.01) plot(p, extlogitlink(p, min = 0.2, max = 0.6), type = "l", col = "black", ylab = "transformation", xlim = c(0, 1), las = 1, main = "extlogitlink(p, min = 0.2, max = 0.6)") par(lwd = 1) } } \keyword{math} \keyword{models} \keyword{regression} %plot(y, logitlink(y, inverse = TRUE), type = "l", col = "limegreen", % xlab = "transformation", ylab = "p", % lwd = 2, las = 1, main = "Some inverse probability link functions") %lines(y, probitlink(y, inverse = TRUE), col = "purple", lwd = 2) %lines(y, clogloglink(y, inverse = TRUE), col = "chocolate", lwd = 2) %abline(h = 0.5, v = 0, lty = "dashed") VGAM/man/predictqrrvglm.Rd0000644000176200001440000000445713565414527015141 0ustar liggesusers\name{predictqrrvglm} \alias{predictqrrvglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Predict Method for a CQO fit } \description{ Predicted values based on a constrained quadratic ordination (CQO) object. } \usage{ predictqrrvglm(object, newdata = NULL, type = c("link", "response", "latvar", "terms"), se.fit = FALSE, deriv = 0, dispersion = NULL, extra = object@extra, varI.latvar = FALSE, refResponse = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ Object of class inheriting from \code{"qrrvglm"}. } \item{newdata}{ An optional data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used. } \item{type, se.fit, dispersion, extra}{ See \code{\link{predictvglm}}. } \item{deriv}{ Derivative. Currently only 0 is handled. } \item{varI.latvar, refResponse}{ Arguments passed into \code{\link{Coef.qrrvglm}}. } \item{\dots}{ Currently undocumented. } } \details{ Obtains predictions from a fitted CQO object. Currently there are lots of limitations of this function; it is unfinished. % and optionally estimates standard errors of those predictions } \value{ See \code{\link{predictvglm}}. } \references{ Yee, T. W. (2004) A new technique for maximum-likelihood canonical Gaussian ordination. \emph{Ecological Monographs}, \bold{74}, 685--701. } \author{ T. W. Yee } \note{ This function is not robust and has not been checked fully. } \seealso{ \code{\link{cqo}}, \code{\link{calibrate.qrrvglm}}. } \examples{ \dontrun{ set.seed(1234) hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardize the X vars p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, poissonff, data = hspider, Crow1positive = FALSE, I.toler = TRUE) sort(deviance(p1, history = TRUE)) # A history of all the iterations head(predict(p1)) # The following should be all 0s: max(abs(predict(p1, newdata = head(hspider)) - head(predict(p1)))) max(abs(predict(p1, newdata = head(hspider), type = "res")-head(fitted(p1)))) } } \keyword{models} \keyword{regression} VGAM/man/bratt.Rd0000644000176200001440000001235713565414527013206 0ustar liggesusers\name{bratt} \alias{bratt} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bradley Terry Model With Ties } \description{ Fits a Bradley Terry model with ties (intercept-only model) by maximum likelihood estimation. } \usage{ bratt(refgp = "last", refvalue = 1, ialpha = 1, i0 = 0.01) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{refgp}{ Integer whose value must be from the set \{1,\ldots,\eqn{M}\}, where there are \eqn{M} competitors. The default value indicates the last competitor is used---but don't input a character string, in general. } \item{refvalue}{ Numeric. A positive value for the reference group. } \item{ialpha}{ Initial values for the \eqn{\alpha}{alpha}s. These are recycled to the appropriate length. } \item{i0}{ Initial value for \eqn{\alpha_0}{alpha_0}. If convergence fails, try another positive value. } } \details{ There are several models that extend the ordinary Bradley Terry model to handle ties. This family function implements one of these models. It involves \eqn{M} competitors who either win or lose or tie against each other. (If there are no draws/ties then use \code{\link{brat}}). The probability that Competitor \eqn{i} beats Competitor \eqn{j} is \eqn{\alpha_i / (\alpha_i+\alpha_j+\alpha_0)}{alpha_i / (alpha_i + alpha_j + alpha_0)}, where all the \eqn{\alpha}{alpha}s are positive. The probability that Competitor \eqn{i} ties with Competitor \eqn{j} is \eqn{\alpha_0 / (\alpha_i+\alpha_j+\alpha_0)}{alpha_0 / (alpha_i + alpha_j + alpha_0)}. Loosely, the \eqn{\alpha}{alpha}s can be thought of as the competitors' `abilities', and \eqn{\alpha_0}{alpha_0} is an added parameter to model ties. For identifiability, one of the \eqn{\alpha_i}{alpha_i} is set to a known value \code{refvalue}, e.g., 1. By default, this function chooses the last competitor to have this reference value. The data can be represented in the form of a \eqn{M} by \eqn{M} matrix of counts, where winners are the rows and losers are the columns. However, this is not the way the data should be inputted (see below). Excluding the reference value/group, this function chooses \eqn{\log(\alpha_j)}{log(alpha_j)} as the first \eqn{M-1} linear predictors. The log link ensures that the \eqn{\alpha}{alpha}s are positive. The last linear predictor is \eqn{\log(\alpha_0)}{log(alpha_0)}. The Bradley Terry model can be fitted with covariates, e.g., a home advantage variable, but unfortunately, this lies outside the VGLM theoretical framework and therefore cannot be handled with this code. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}. } \references{ Torsney, B. (2004) Fitting Bradley Terry models using a multiplicative algorithm. In: Antoch, J. (ed.) \emph{Proceedings in Computational Statistics COMPSTAT 2004}, Physica-Verlag: Heidelberg. Pages 513--526. } \author{ T. W. Yee } \note{ The function \code{\link{Brat}} is useful for coercing a \eqn{M} by \eqn{M} matrix of counts into a one-row matrix suitable for \code{bratt}. Diagonal elements are skipped, and the usual S order of \code{c(a.matrix)} of elements is used. There should be no missing values apart from the diagonal elements of the square matrix. The matrix should have winners as the rows, and losers as the columns. In general, the response should be a matrix with \eqn{M(M-1)} columns. Also, a symmetric matrix of ties should be passed into \code{\link{Brat}}. The diagonal of this matrix should be all \code{NA}s. Only an intercept model is recommended with \code{bratt}. It doesn't make sense really to include covariates because of the limited VGLM framework. Notationally, note that the \pkg{VGAM} family function \code{\link{brat}} has \eqn{M+1} contestants, while \code{bratt} has \eqn{M} contestants. } \seealso{ \code{\link{brat}}, \code{\link{Brat}}, \code{\link{binomialff}}. } \examples{ # citation statistics: being cited is a 'win'; citing is a 'loss' journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B") mat <- matrix(c( NA, 33, 320, 284, 730, NA, 813, 276, 498, 68, NA, 325, 221, 17, 142, NA), 4, 4) dimnames(mat) <- list(winner = journal, loser = journal) # Add some ties. This is fictitional data. ties <- 5 + 0 * mat ties[2, 1] <- ties[1,2] <- 9 # Now fit the model fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE) fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE, crit = "coef") summary(fit) c(0, coef(fit)) # Log-abilities (in order of "journal"); last is log(alpha0) c(1, Coef(fit)) # Abilities (in order of "journal"); last is alpha0 fit@misc$alpha # alpha_1,...,alpha_M fit@misc$alpha0 # alpha_0 fitted(fit) # Probabilities of winning and tying, in awkward form predict(fit) (check <- InverseBrat(fitted(fit))) # Probabilities of winning qprob <- attr(fitted(fit), "probtie") # Probabilities of a tie qprobmat <- InverseBrat(c(qprob), NCo = nrow(ties)) # Probabilities of a tie check + t(check) + qprobmat # Should be 1s in the off-diagonals } \keyword{models} \keyword{regression} VGAM/man/prinia.Rd0000644000176200001440000000622413565414527013350 0ustar liggesusers\name{prinia} \alias{prinia} \docType{data} \title{Yellow-bellied Prinia %% ~~ data name/kind ... ~~ } \description{ A data frame with yellow-bellied Prinia. } \usage{ data(prinia) } \format{ A data frame with 151 observations on the following 23 variables. \describe{ \item{length}{a numeric vector, the scaled wing length (zero mean and unit variance). } \item{fat}{a numeric vector, fat index; originally 1 (no fat) to 4 (very fat) but converted to 0 (no fat) versus 1 otherwise. } \item{cap}{a numeric vector, number of times the bird was captured or recaptured. } \item{noncap}{a numeric vector, number of times the bird was not captured. } \item{y01, y02, y03, y04, y05, y06}{ a numeric vector of 0s and 1s; for noncapture and capture resp. } \item{y07, y08, y09, y10, y11, y12}{ same as above. } \item{y13, y14, y15, y16, y17, y18, y19}{ same as above. } } } \details{ The yellow-bellied Prinia \emph{Prinia flaviventris} is a common bird species located in Southeast Asia. A capture--recapture experiment was conducted at the Mai Po Nature Reserve in Hong Kong during 1991, where captured individuals had their wing lengths measured and fat index recorded. A total of 19 weekly capture occasions were considered, where 151 distinct birds were captured. More generally, the prinias are a genus of small insectivorous birds, and are sometimes referred to as \emph{wren-warblers}. They are a little-known group of the tropical and subtropical Old World, the roughly 30 species being divided fairly equally between Africa and Asia. % 20131030; this is old: % The example below illustrates the necessity of creating % variables \code{y1}, \code{y2}, \ldots in order for % \code{\link{posbernoulli.b}}, % \code{\link{posbernoulli.t}} and % \code{\link{posbernoulli.tb}} to work. % In contrast, \code{\link{posbinomial}} may have a simple 2-column % matrix as the response. % \emph{Prinia inornate} is from the SS paper, not exactly this bird. %% ~~ If necessary, more details than the __description__ above ~~ } \source{ Thanks to Paul Yip for permission to make this data available. % Further information is at: % Huggins, R. M. and Yip, P. S. F. (1997). % Statistical analysis of removal experiments with the use of auxillary variables. % \emph{Statistica Sinica} \bold{7}, 705--712. Hwang, W.-H. and Huggins, R. M. (2007) Application of semiparametric regression models in the analysis of capture--recapture experiments. \emph{Australian and New Zealand Journal of Statistics} \bold{49}, 191--202. } \examples{ head(prinia) summary(prinia) rowSums(prinia[, c("cap", "noncap")]) # 19s # Fit a positive-binomial distribution (M.h) to the data: fit1 <- vglm(cbind(cap, noncap) ~ length + fat, posbinomial, data = prinia) # Fit another positive-binomial distribution (M.h) to the data: # The response input is suitable for posbernoulli.*-type functions. fit2 <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10, y11, y12, y13, y14, y15, y16, y17, y18, y19) ~ length + fat, posbernoulli.b(drop.b = FALSE ~ 0), data = prinia) } \keyword{datasets} VGAM/man/anovavglm.Rd0000644000176200001440000001777413565414527014074 0ustar liggesusers\name{anova.vglm} \alias{anova.vglm} %\alias{update_formula} %\alias{update_default} \title{Analysis of Deviance for Vector Generalized Linear Model Fits} \description{ Compute an analysis of deviance table for one or more vector generalized linear model fits. } \usage{ \method{anova}{vglm}(object, \dots, type = c("II", "I", "III", 2, 1, 3), test = c("LRT", "none"), trydev = TRUE, silent = TRUE) } %\method{anova.vglm}{default}(object, \dots, name = NULL) %\method{anova.vglm}{formula}(object, \dots, data = list()) \arguments{ \item{object, \dots}{objects of class \code{vglm}, typically the result of a call to \code{\link{vglm}}, or a list of \code{objects} for the \code{"vglmlist"} method. Each model must have an intercept term. If \code{"vglmlist"} is used then \code{type = 1} or \code{type = "I"} must be specified. % zz Each model must have an intercept term. } % \item{dispersion}{the dispersion parameter for the fitting family. % By default it is obtained from the object(s).} \item{type}{ character or numeric; any one of the (effectively three) choices given. Note that \code{\link[stats]{anova.glm}} has \code{1} or \code{"I"} as its default; and that \code{\link[car]{Anova.glm}} has \code{2} or \code{"II"} as its default (and allows for \code{type = "III"}), so one can think of this function as a combination of \code{\link[stats]{anova.glm}} and \code{\link[car]{Anova.glm}}, but with the default of the latter. See Details below for more information. % The default is the first, which corresponds % to the same as \code{\link[stats]{anova.glm}}. % This might change later--see Warnings below. % zz Currently only \code{"I"} and \code{"III"} work. % The default is the first, which corresponds % to the same as \code{\link[stats]{anova.glm}}. } \item{test}{a character string, (partially) matching one of \code{"LRT"} and \code{"none"}. In the future it is hoped that \code{"Rao"} be also supported, to conduct score tests. The first value is the default. % yettodo: it is hoped that \code{test = "Rao"} be supported one day. % See \code{\link[stats]{stat.anova}}. % } \item{trydev}{ logical; if \code{TRUE} then the deviance is used if possible. Note that only a few \pkg{VGAM} family functions have a deviance that is defined and implemented. Setting it \code{FALSE} means the log-likelihood will be used. } \item{silent}{ logical; if \code{TRUE} then any warnings will be suppressed. These may arise by IRLS iterations not converging during the fitting of submodels. Setting it \code{FALSE} means that any warnings are given. } } \details{ \code{anova.vglm} is intended to be similar to \code{\link[stats]{anova.glm}} so specifying a single object and \code{type = 1} gives a \emph{sequential} analysis of deviance table for that fit. By \emph{analysis of deviance}, it is meant loosely that if the deviance of the model is not defined or implemented, then twice the difference between the log-likelihoods of two nested models remains asymptotically chi-squared distributed with degrees of freedom equal to the difference in the number of parameters of the two models. Of course, the usual regularity conditions are assumed to hold. For Type I, the analysis of deviance table has the reductions in the residual deviance as each term of the formula is added in turn are given in as the rows of a table, plus the residual deviances themselves. \emph{Type I} or sequential tests (as in \code{\link[stats]{anova.glm}}). are computationally the easiest of the three methods. For this, the order of the terms is important, and the each term is added sequentially from first to last. The \code{Anova()} function in \pkg{car} allows for testing \emph{Type II} and \emph{Type III} (SAS jargon) hypothesis tests, although the definitions used are \emph{not} precisely that of SAS. As \pkg{car} notes, \emph{Type I} rarely test interesting hypotheses in unbalanced designs. Type III enter each term \emph{last}, keeping all the other terms in the model. Type II tests, according to SAS, add the term after all other terms have been added to the model except terms that contain the effect being tested; an effect is contained in another effect if it can be derived by deleting variables from the latter effect. Type II tests are currently the default. As in \code{\link[stats]{anova.glm}}, but not as \code{\link[car]{Anova.glm}}, if more than one object is specified, then the table has a row for the residual degrees of freedom and deviance for each model. For all but the first model, the change in degrees of freedom and deviance is also given. (This only makes statistical sense if the models are nested.) It is conventional to list the models from smallest to largest, but this is up to the user. It is necessary to have \code{type = 1} with more than one objects are specified. See \code{\link[stats]{anova.glm}} for more details and warnings. The \pkg{VGAM} package now implements full likelihood models only, therefore no dispersion parameters are estimated. % about optional test statistics (and P values), as well } \note{ It is possible for this function to \code{\link[base]{stop}} when \code{type = 2} or \code{3}, e.g., \code{anova(vglm(cans ~ myfactor, poissonff, data = boxcar))} where \code{myfactor} is a factor. The code was adapted directly from \code{\link[stats]{anova.glm}} and \code{\link[car]{Anova.glm}} by T. W. Yee. Hence the Type II and Type III tests do \emph{not} correspond precisely with the SAS definition. } \section{Warning }{ See \code{\link[stats]{anova.glm}}. Several \pkg{VGAM} family functions implement distributions which do not satisfying the usual regularity conditions needed for the LRT to work. No checking or warning is given for these. As \pkg{car} says, be careful of Type III tests because they violate marginality. Type II tests (the default) do not have this problem. % A default value for \code{type} may be given in the future. % testing each term in the model after all of the others. % The default value of \code{type} may change in the future, % hence users should assign that argument an explicit value % to guard against any change. % In fact, \code{type} might not have a default value in the future, % therefore it might always need to be set by the user. } \value{ An object of class \code{"anova"} inheriting from class \code{"data.frame"}. } \seealso{ \code{\link[stats]{anova.glm}}, \code{\link[stats]{stat.anova}}, \code{stats:::print.anova}, \code{\link[car]{Anova.glm}} if \pkg{car} is installed, \code{\link{vglm}}, \code{\link{lrtest}}, \code{\link{add1.vglm}}, \code{\link{drop1.vglm}}, \code{\link{lrt.stat.vlm}}, \code{\link{score.stat.vlm}}, \code{\link{wald.stat.vlm}}, \code{\link{backPain2}}, \code{\link[stats]{update}}. % \code{\link{score.stat.vlm}}, % \code{\link{step4vglm}}, } \examples{ # Example 1: a proportional odds model fitted to pneumo. set.seed(1) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(8)) fit1 <- vglm(cbind(normal, mild, severe) ~ let , propodds, pneumo) fit2 <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, pneumo) fit3 <- vglm(cbind(normal, mild, severe) ~ let + x3, cumulative, pneumo) anova(fit1, fit2, fit3, type = 1) # Remember to specify 'type'!! anova(fit2) anova(fit2, type = "I") anova(fit2, type = "III") # Example 2: a proportional odds model fitted to backPain2. data("backPain2", package = "VGAM") summary(backPain2) fitlogit <- vglm(pain ~ x2 * x3 * x4, propodds, data = backPain2) coef(fitlogit) anova(fitlogit) anova(fitlogit, type = "I") anova(fitlogit, type = "III") } \keyword{htest} %(testStatistic <- 2 * (logLik(fit3) - logLik(fit1))) %(mypval<-pchisq(testStatistic,df=length(coef(fit3))-length(coef(fit1)), % lower.tail = FALSE)) %type = c("I", "II","III", 1, 2, 3), VGAM/man/erf.Rd0000644000176200001440000000344413565414527012643 0ustar liggesusers\name{erf} \alias{erf} \alias{erfc} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Error Function, and variants } \description{ Computes the error function, or its inverse, based on the normal distribution. Also computes the complement of the error function, or its inverse, } \usage{ erf(x, inverse = FALSE) erfc(x, inverse = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ Numeric. } \item{inverse}{ Logical. Of length 1. } } \details{ \eqn{Erf(x)} is defined as \deqn{Erf(x) = \frac{2}{\sqrt{\pi}} \int_0^x \exp(-t^2) dt}{% Erf(x) = (2/sqrt(pi)) int_0^x exp(-t^2) dt} so that it is closely related to \code{\link[stats:Normal]{pnorm}}. The inverse function is defined for \eqn{x} in \eqn{(-1,1)}. } \value{ Returns the value of the function evaluated at \code{x}. } \references{ Abramowitz, M. and Stegun, I. A. (1972) \emph{Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables}, New York: Dover Publications Inc. } \author{ T. W. Yee} \note{ Some authors omit the term \eqn{2/\sqrt{\pi}}{2/sqrt(pi)} from the definition of \eqn{Erf(x)}. Although defined for complex arguments, this function only works for real arguments. The \emph{complementary error function} \eqn{erfc(x)} is defined as \eqn{1-erf(x)}, and is implemented by \code{erfc}. Its inverse function is defined for \eqn{x} in \eqn{(0,2)}. } \seealso{ \code{\link[stats:Normal]{pnorm}}. } \examples{ \dontrun{ curve(erf, -3, 3, col = "orange", ylab = "", las = 1) curve(pnorm, -3, 3, add = TRUE, col = "blue", lty = "dotted", lwd = 2) abline(v = 0, h = 0, lty = "dashed") legend("topleft", c("erf(x)", "pnorm(x)"), col = c("orange", "blue"), lty = c("solid", "dotted"), lwd = 1:2) } } \keyword{math} VGAM/man/rrvglm.optim.control.Rd0000644000176200001440000000402513565414527016202 0ustar liggesusers\name{rrvglm.optim.control} \alias{rrvglm.optim.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Control Function for rrvglm() Calling optim() } \description{ Algorithmic constants and parameters for running \code{optim} within \code{rrvglm} are set using this function. } \usage{ rrvglm.optim.control(Fnscale = 1, Maxit = 100, Switch.optimizer = 3, Abstol = -Inf, Reltol = sqrt(.Machine$double.eps), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{Fnscale}{ Passed into \code{optim} as \code{fnscale}. } \item{Maxit}{ Passed into \code{optim} as \code{maxit}. } \item{Switch.optimizer}{ Iteration number when the "Nelder-Mead" method of \code{optim} is switched to the quasi-Newton "BFGS" method. Assigning \code{Switch.optimizer} a negative number means always BFGS, while assigning \code{Switch.optimizer} a value greater than \code{maxits} means always use Nelder-Mead. } \item{Abstol}{ Passed into \code{optim} as \code{abstol}. } \item{Reltol}{ Passed into \code{optim} as \code{reltol}. } \item{\dots}{ Ignored. } } \details{ See \code{\link[stats]{optim}} for more details. } \value{ A list with components equal to the arguments. } %\references{ ~put references to the literature/web site here ~ } \author{ Thomas W. Yee } \note{ The transition between optimization methods may be unstable, so users may have to vary the value of \code{Switch.optimizer}. Practical experience with \code{Switch.optimizer} shows that setting it to too large a value may lead to a local solution, whereas setting it to a low value will obtain the global solution. It appears that, if BFGS kicks in too late when the Nelder-Mead algorithm is starting to converge to a local solution, then switching to BFGS will not be sufficient to bypass convergence to that local solution. } \seealso{ \code{\link{rrvglm.control}}, \code{\link[stats]{optim}}. } %\examples{ %} \keyword{models} \keyword{regression} VGAM/man/finney44.Rd0000644000176200001440000000261013565414527013521 0ustar liggesusers\name{finney44} \alias{finney44} \docType{data} \title{ Toxicity trial for insects %% ~~ data name/kind ... ~~ } \description{ A data frame of a toxicity trial. %% ~~ A concise (1-5 lines) description of the dataset. ~~ } \usage{data(finney44)} \format{ A data frame with 6 observations on the following 3 variables. \describe{ \item{\code{pconc}}{a numeric vector, percent concentration of pyrethrins. } \item{\code{hatched}}{number of eggs that hatched. } \item{\code{unhatched}}{number of eggs that did not hatch. } } } \details{ Finney (1944) describes a toxicity trial of five different concentrations of pyrethrins (percent) plus a control that were administered to eggs of \emph{Ephestia kuhniella}. The natural mortality rate is large, and a common adjustment is to use Abbott's formula. } %\source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ %} \references{ Finney, D. J., 1944. The application of the probit method to toxicity test data adjusted for mortality in the controls. \emph{Annals of Applied Biology}, \bold{31}, 68--74. Abbott, W. S. (1925). A method of computing the effectiveness of an insecticide. \emph{Journal of Economic Entomology}, 18, 265--7. %% ~~ possibly secondary sources and usages ~~ } \examples{ data(finney44) transform(finney44, mortality = unhatched / (hatched + unhatched)) } \keyword{datasets} VGAM/man/rrvglm.control.Rd0000644000176200001440000002165013565414527015056 0ustar liggesusers\name{rrvglm.control} \alias{rrvglm.control} %- Also NEED an `\alias' for EACH other topic documented here. \title{ Control Function for rrvglm() } \description{ Algorithmic constants and parameters for running \code{rrvglm} are set using this function. } \usage{ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"), Corner = TRUE, Uncorrelated.latvar = FALSE, Wmat = NULL, Svd.arg = FALSE, Index.corner = if (length(str0)) head((1:1000)[-str0], Rank) else 1:Rank, Ainit = NULL, Alpha = 0.5, Bestof = 1, Cinit = NULL, Etamat.colmax = 10, sd.Ainit = 0.02, sd.Cinit = 0.02, str0 = NULL, noRRR = ~1, Norrr = NA, noWarning = FALSE, trace = FALSE, Use.Init.Poisson.QO = FALSE, checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE, wzepsilon = .Machine$double.eps^0.75, ...) } %- maybe also `usage' for other objects documented here. \arguments{ \item{Rank}{ The numerical rank \eqn{R} of the model. Must be an element from the set \{1,2,\ldots,min(\eqn{M},\emph{p2})\}. Here, the vector of explanatory variables \bold{x} is partitioned into (\bold{x1},\bold{x2}), which is of dimension \emph{p1}+\emph{p2}. The variables making up \bold{x1} are given by the terms in \code{noRRR} argument, and the rest of the terms comprise \bold{x2}. } \item{Algorithm}{ Character string indicating what algorithm is to be used. The default is the first one. } \item{Corner}{ Logical indicating whether corner constraints are to be used. This is one method for ensuring a unique solution. If \code{TRUE}, \code{Index.corner} specifies the \eqn{R} rows of the constraint matrices that are use as the corner constraints, i.e., they hold an order-\eqn{R} identity matrix. } \item{Uncorrelated.latvar}{ Logical indicating whether uncorrelated latent variables are to be used. This is normalization forces the variance-covariance matrix of the latent variables to be \code{diag(Rank)}, i.e., unit variance and uncorrelated. This constraint does not lead to a unique solution because it can be rotated. } \item{Wmat}{ Yet to be done. } \item{Svd.arg}{ Logical indicating whether a singular value decomposition of the outer product is to computed. This is another normalization which ensures uniqueness. See the argument \code{Alpha} below. } \item{Index.corner}{ Specifies the \eqn{R} rows of the constraint matrices that are used for the corner constraints, i.e., they hold an order-\eqn{R} identity matrix. } \item{Alpha}{ The exponent in the singular value decomposition that is used in the first part: if the SVD is \eqn{U D V^T}{ U \%*\% D \%*\% t(V) } then the first and second parts are \eqn{U D^{\alpha}}{ U \%*\% D^Alpha} and \eqn{D^{1-\alpha} V^T}{D^(1-Alpha) \%*\% t(V)} respectively. A value of 0.5 is `symmetrical'. This argument is used only when \code{Svd.arg=TRUE}. } \item{Bestof}{ Integer. The best of \code{Bestof} models fitted is returned. This argument helps guard against local solutions by (hopefully) finding the global solution from many fits. The argument works only when the function generates its own initial value for \bold{C}, i.e., when \bold{C} is \emph{not} passed in as initial values. } \item{Ainit, Cinit}{ Initial \bold{A} and \bold{C} matrices which may speed up convergence. They must be of the correct dimension. } \item{Etamat.colmax}{ Positive integer, no smaller than \code{Rank}. Controls the amount of memory used by \code{.Init.Poisson.QO()}. It is the maximum number of columns allowed for the pseudo-response and its weights. In general, the larger the value, the better the initial value. Used only if \code{Use.Init.Poisson.QO=TRUE}. } % \item{Quadratic}{ % Logical indicating whether a \emph{Quadratic} % RR-VGLM is to be fitted. If \code{TRUE}, an object of class % \code{"qrrvglm"} will be returned, otherwise \code{"rrvglm"}. % } \item{str0}{ Integer vector specifying which rows of the estimated constraint matrices (\bold{A}) are to be all zeros. These are called \emph{structural zeros}. Must not have any common value with \code{Index.corner}, and be a subset of the vector \code{1:M}. The default, \code{str0 = NULL}, means no structural zero rows at all. } \item{sd.Ainit, sd.Cinit}{ Standard deviation of the initial values for the elements of \bold{A} and \bold{C}. These are normally distributed with mean zero. This argument is used only if \code{Use.Init.Poisson.QO = FALSE}. } % \item{ppar}{ Ignore this. } \item{noRRR}{ Formula giving terms that are \emph{not} to be included in the reduced-rank regression. That is, \code{noRRR} specifes which explanatory variables are in the \eqn{x_1}{x1} vector of \code{\link{rrvglm}}, and the rest go into \eqn{x_2}{x2}. The \eqn{x_1}{x1} variables constitute the \eqn{\bold{B}_1}{\bold{B}1} matrix in Yee and Hastie (2003). Those \eqn{x_2}{x2} variables which are subject to the reduced-rank regression correspond to the \eqn{\bold{B}_2}{\bold{B}2} matrix. Set \code{noRRR = NULL} for the reduced-rank regression to be applied to every explanatory variable including the intercept. } \item{Norrr}{ Defunct. Please use \code{noRRR}. Use of \code{Norrr} will become an error soon. } \item{trace}{ Logical indicating if output should be produced for each iteration. % Useful when \code{Quadratic=TRUE} because QRR-VGLMs are % computationally expensive and it's good to see that the program % is working! } \item{Use.Init.Poisson.QO}{ Logical indicating whether the \code{.Init.Poisson.QO()} should be used to obtain initial values for the \bold{C}. The function uses a new method that can work well if the data are Poisson counts coming from an equal-tolerances QRR-VGLM (CQO). This option is less realistic for RR-VGLMs compared to QRR-VGLMs. } \item{checkwz}{ logical indicating whether the diagonal elements of the working weight matrices should be checked whether they are sufficiently positive, i.e., greater than \code{wzepsilon}. If not, any values less than \code{wzepsilon} are replaced with this value. } \item{noWarning, Check.rank, Check.cm.rank}{ Same as \code{\link{vglm.control}}. Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{wzepsilon}{ Small positive number used to test whether the diagonals of the working weight matrices are sufficiently positive. } \item{\dots}{ Variables in \dots are passed into \code{\link{vglm.control}}. If the derivative algorithm is used then \dots are also passed into \code{\link{rrvglm.optim.control}}; and if the alternating algorithm is used then \dots are also passed into \code{\link{valt.control}}. } In the above, \eqn{R} is the \code{Rank} and \eqn{M} is the number of linear predictors. } \details{ % QRR-VGLMs are an extension of RR-VGLMs and are useful for constrained % ordination. QRR-VGLMs fitted with \pkg{VGAM} allow a maximum % likelihood solution to constrained quadratic ordination (CQO; % formerly called canonical Gaussian ordination) models. % For QRR-VGLMs, if \code{eq.tolerances=TRUE} and % \code{I.tolerances=FALSE} then the default is that the \bold{C} % matrix is constrained by forcing the latent variables to have sample % variance-covariance matrix equalling \code{diag(Rank)}, i.e., unit % variance and uncorrelated. \pkg{VGAM} supports three normalizations to ensure a unique solution. Of these, only corner constraints will work with \code{summary} of RR-VGLM objects. } \value{ A list with components matching the input names. Some error checking is done, but not much. } \references{ Yee, T. W. and Hastie, T. J. (2003) Reduced-rank vector generalized linear models. \emph{Statistical Modelling}, \bold{3}, 15--41. } \author{ Thomas W. Yee } \note{ % The function call \code{cqo(...)} is equivalent to % \code{rrvglm(..., Quadratic=TRUE)}, and hence uses this function. % For QRR-VGLMs, the function \code{\link{qrrvglm.control}} is called too. The arguments in this function begin with an upper case letter to help avoid interference with those of \code{\link{vglm.control}}. In the example below a rank-1 \emph{stereotype} model (Anderson, 1984) is fitted. } %- \section{Warning }{ } \seealso{ \code{\link{rrvglm}}, \code{\link{rrvglm.optim.control}}, \code{\link{rrvglm-class}}, \code{\link{vglm}}, \code{\link{vglm.control}}, \code{\link{cqo}}. } \examples{ \dontrun{ set.seed(111) pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo))) # x3 is random noise fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo, Rank = 1, Index.corner = 2) constraints(fit) vcov(fit) summary(fit) } } \keyword{models} \keyword{regression} VGAM/man/gompertz.Rd0000644000176200001440000000730413565414527013735 0ustar liggesusers\name{gompertz} \alias{gompertz} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gompertz Regression Family Function } \description{ Maximum likelihood estimation of the 2-parameter Gompertz distribution. } \usage{ gompertz(lscale = "loglink", lshape = "loglink", iscale = NULL, ishape = NULL, nsimEIM = 500, zero = NULL, nowarning = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{nowarning}{ Logical. Suppress a warning? Ignored for \pkg{VGAM} 0.9-7 and higher. } \item{lshape, lscale}{ Parameter link functions applied to the shape parameter \code{a}, scale parameter \code{scale}. All parameters are positive. See \code{\link{Links}} for more choices. } % \item{eshape, escale}{ % List. Extra argument for each of the links. % eshape = list(), escale = list(), % See \code{earg} in \code{\link{Links}} for general information. % } \item{ishape, iscale}{ Optional initial values. A \code{NULL} means a value is computed internally. } \item{nsimEIM, zero}{ See \code{\link{CommonVGAMffArguments}}. } } \details{ The Gompertz distribution has a cumulative distribution function \deqn{F(x;\alpha, \beta) = 1 - \exp[-(\alpha/\beta) \times (\exp(\beta x) - 1) ]}{% F(x;alpha, beta) = 1 - exp(-(alpha/beta) * (exp(beta * x) - 1) )} which leads to a probability density function \deqn{f(x; \alpha, \beta) = \alpha \exp(\beta x) \exp [-(\alpha/\beta) \times (\exp(\beta x) - 1) ]}{% f(x; alpha, beta) = alpha * exp[-beta * x] * exp[-(alpha/beta) * (exp(beta * x) - 1) ]} for \eqn{\alpha > 0}{a > 0}, \eqn{\beta > 0}{b > 0}, \eqn{x > 0}. Here, \eqn{\beta} is called the scale parameter \code{scale}, and \eqn{\alpha} is called the shape parameter (one could refer to \eqn{\alpha}{a} as a location parameter and \eqn{\beta}{b} as a shape parameter---see Lenart (2012)). The mean is involves an exponential integral function. Simulated Fisher scoring is used and multiple responses are handled. The Makeham distibution has an additional parameter compared to the Gompertz distribution. If \eqn{X} is defined to be the result of sampling from a Gumbel distribution until a negative value \eqn{Z} is produced, then \eqn{X = -Z} has a Gompertz distribution. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } \references{ Lenart, A. (2012) The moments of the Gompertz distribution and maximum likelihood estimation of its parameters. \emph{Scandinavian Actuarial Journal}, in press. } \author{ T. W. Yee } \section{Warning }{ The same warnings in \code{\link{makeham}} apply here too. } \seealso{ \code{\link{dgompertz}}, \code{\link{makeham}}, \code{\link{simulate.vlm}}. } \examples{ \dontrun{ gdata <- data.frame(x2 = runif(nn <- 1000)) gdata <- transform(gdata, eta1 = -1, eta2 = -1 + 0.2 * x2, ceta1 = 1, ceta2 = -1 + 0.2 * x2) gdata <- transform(gdata, shape1 = exp(eta1), shape2 = exp(eta2), scale1 = exp(ceta1), scale2 = exp(ceta2)) gdata <- transform(gdata, y1 = rgompertz(nn, scale = scale1, shape = shape1), y2 = rgompertz(nn, scale = scale2, shape = shape2)) fit1 <- vglm(y1 ~ 1, gompertz, data = gdata, trace = TRUE) fit2 <- vglm(y2 ~ x2, gompertz, data = gdata, trace = TRUE) coef(fit1, matrix = TRUE) Coef(fit1) summary(fit1) coef(fit2, matrix = TRUE) summary(fit2) } } \keyword{models} \keyword{regression} % probs.y = c(0.20, 0.50, 0.80) VGAM/man/reciprocallink.Rd0000644000176200001440000000430013565414527015060 0ustar liggesusers\name{reciprocallink} \alias{reciprocallink} %\alias{reciprocal} \alias{negreciprocallink} %\alias{negreciprocal} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Reciprocal Link Function } \description{ Computes the reciprocal transformation, including its inverse and the first two derivatives. } \usage{ reciprocallink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) negreciprocallink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{theta}{ Numeric or character. See below for further details. } \item{bvalue}{ See \code{\link{Links}}. } \item{inverse, deriv, short, tag}{ Details at \code{\link{Links}}. } } \details{ The \code{reciprocallink} link function is a special case of the power link function. Numerical values of \code{theta} close to 0 result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The \code{negreciprocallink} link function computes the negative reciprocal, i.e., \eqn{-1/ \theta}{-1/theta}. } \value{ For \code{reciprocallink}: for \code{deriv = 0}, the reciprocal of \code{theta}, i.e., \code{1/theta} when \code{inverse = FALSE}, and if \code{inverse = TRUE} then \code{1/theta}. For \code{deriv = 1}, then the function returns \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal. } \references{ McCullagh, P. and Nelder, J. A. (1989) \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall. } %\section{Warning}{ %} \author{ Thomas W. Yee } \note{ Numerical instability may occur when \code{theta} is close to 0. } \seealso{ \code{\link{identitylink}}, \code{\link{powerlink}}. } \examples{ reciprocallink(1:5) reciprocallink(1:5, inverse = TRUE, deriv = 2) negreciprocallink(1:5) negreciprocallink(1:5, inverse = TRUE, deriv = 2) x <- (-3):3 reciprocallink(x) # Has Inf reciprocallink(x, bvalue = .Machine$double.eps) # Has no Inf } \keyword{math} \keyword{models} \keyword{regression} VGAM/man/otpospoisson.Rd0000644000176200001440000000336413565414527014647 0ustar liggesusers\name{otpospoisson} \alias{otpospoisson} %- Also NEED an '\alias' for EACH other topic documented here. \title{ One-truncated Poisson Distribution } \description{ Estimating the (single) parameter of the 1-truncated positive Poisson distribution. } \usage{ otpospoisson(llambda = "loglink", type.fitted = c("mean", "lambda", "prob0", "prob1"), ilambda = NULL, imethod = 1, zero = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{llambda, type.fitted, ilambda}{ Same as \code{\link{pospoisson}}. } \item{imethod, zero}{ Same as \code{\link{pospoisson}}. } } \details{ The 1-truncated positive Poisson distribution has support on 2, 3, \ldots. It is a Poisson distribution but with the probability of a one or zero being 0. The other probabilities are scaled to add to unity. Some more details can be found at \code{\link{pospoisson}}. Multiple responses are permitted. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}}, and \code{\link{vgam}}. } %\references{ %} \author{ T. W. Yee } %\note{ %} \seealso{ \code{\link{Otpospois}}, \code{\link{oipospoisson}}, \code{\link{simulate.vlm}}. } \examples{ odata <- data.frame(y1 = rotpospois(n = 1000, lambda = loglink(1, inverse = TRUE))) ofit <- vglm(y1 ~ 1, otpospoisson, data = odata, trace = TRUE, crit = "c") coef(ofit, matrix = TRUE) Coef(ofit) \dontrun{with(odata, hist(y1, prob = TRUE, breaks = seq(0.5, max(y1) + 0.5, by = 1), border = "blue")) x <- seq(1, with(odata, max(y1)), by = 1) with(odata, lines(x, dotpospois(x, Coef(ofit)[1]), col = "orange", type = "h", lwd = 2)) } } \keyword{models} \keyword{regression} VGAM/man/zipf.Rd0000644000176200001440000000612013565414527013031 0ustar liggesusers\name{zipf} \alias{zipf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Zipf Distribution Family Function } \description{ Estimates the parameter of the Zipf distribution. } \usage{ zipf(N = NULL, lshape = "loglink", ishape = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{N}{ Number of elements, an integer satisfying \code{1 < N < Inf}. The default is to use the maximum value of the response. If given, \code{N} must be no less that the largest response value. If \code{N = Inf} and \eqn{s>1} then this is the zeta distribution (use \code{\link{zetaff}} instead). } \item{lshape}{ Parameter link function applied to the (positive) shape parameter \eqn{s}. See \code{\link{Links}} for more choices. } \item{ishape}{ Optional initial value for the parameter \eqn{s}. The default is to choose an initial value internally. If converge failure occurs use this argument to input a value. } } \details{ The probability function for a response \eqn{Y} is \deqn{P(Y=y) = y^{-s} / \sum_{i=1}^N i^{-s},\ \ s>0,\ \ y=1,2,\ldots,N,}{% P(Y=y) = (y^(-s)) / sum((1:N)^(-s)), s>0, y=1,2,...,N,} where \eqn{s} is the exponent characterizing the distribution. The mean of \eqn{Y}, which are returned as the fitted values, is \eqn{\mu = H_{N,s-1} / H_{N,s}}{H(N,s-1) / H(N,s)} where \eqn{H_{n,m}= \sum_{i=1}^n i^{-m}}{H(n,m)=sum((1:n)^(-m))} is the \eqn{n}th generalized harmonic number. Zipf's law is an experimental law which is often applied to the study of the frequency of words in a corpus of natural language utterances. It states that the frequency of any word is inversely proportional to its rank in the frequency table. For example, \code{"the"} and \code{"of"} are first two most common words, and Zipf's law states that \code{"the"} is twice as common as \code{"of"}. Many other natural phenomena conform to Zipf's law. } \value{ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}). The object is used by modelling functions such as \code{\link{vglm}} and \code{\link{vgam}}. } \references{ pp.526-- of Chapter 11 of Johnson N. L., Kemp, A. W. and Kotz S. (2005) \emph{Univariate Discrete Distributions}, 3rd edition, Hoboken, New Jersey, USA: Wiley. } \author{ T. W. Yee } \note{ Upon convergence, the \code{N} is stored as \code{@misc$N}. } \seealso{ \code{\link{dzipf}}, \code{\link{zetaff}}, \code{\link{simulate.vlm}}. } \examples{ zdata <- data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2)) zfit <- vglm(y ~ 1, zipf, data = zdata, trace = TRUE, weight = ofreq) zfit <- vglm(y ~ 1, zipf(lshape = "identitylink", ishape = 3.4), data = zdata, trace = TRUE, weight = ofreq, crit = "coef") zfit@misc$N (shape.hat <- Coef(zfit)) with(zdata, weighted.mean(y, ofreq)) fitted(zfit, matrix = FALSE) } \keyword{models} \keyword{regression} %pp.465--471, Chapter 11 of %Johnson N. L., Kotz S., and Kemp A. W. (1993) %\emph{Univariate Discrete Distributions}, %2nd ed. %New York: Wiley. %http://www.math.uah.edu/stat/special/Zeta.html calls s 'shape' VGAM/man/calibrate.Rd0000644000176200001440000000607113565414527014014 0ustar liggesusers\name{calibrate} \alias{calibrate} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Model Calibrations } \description{ \code{calibrate} is a generic function used to produce calibrations from various model fitting functions. The function invokes particular `methods' which depend on the `class' of the first argument. } \usage{ calibrate(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ An object for which a calibration is desired. } \item{\dots}{ Additional arguments affecting the calibration produced. Usually the most important argument in \code{\dots} is \code{newdata} which, for \code{calibrate}, contains new \emph{response} data, \bold{Y}, say. } } \details{ Given a regression model with explanatory variables \bold{X} and response \bold{Y}, calibration involves estimating \bold{X} from \bold{Y} using the regression model. It can be loosely thought of as the opposite of \code{\link{predict}} (which takes an \bold{X} and returns a \bold{Y} of some sort.) In general, the central algorithm is maximum likelihood calibration. } \value{ In general, given a new response \bold{Y}, some function of the explanatory variables \bold{X} are returned. For example, for constrained ordination models such as CQO and CAO models, it is usually not possible to return \bold{X}, so the latent variables are returned instead (they are linear combinations of the \bold{X}). See the specific \code{calibrate} methods functions to see what they return. } \references{ ter Braak, C. J. F. and van Dam, H. (1989). Inferring pH from diatoms: a comparison of old and new calibration methods. \emph{Hydrobiologia}, \bold{178}, 209--223. } \author{ T. W. Yee } \note{ This function was not called \code{predictx} because of the inability of constrained ordination models to return \bold{X}; they can only return the latent variable values (also known as site scores) instead. } \seealso{ \code{\link{predict}}, \code{\link{calibrate.rrvglm}}, \code{\link{calibrate.qrrvglm}}. } \examples{ \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Stdzed environmental vars set.seed(123) pcao1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux, family = poissonff, data = hspider, Rank = 1, Bestof = 3, df1.nl = c(Zoraspin = 2, 1.9), Crow1positive = TRUE) siteNos <- 1:2 # Calibrate these sites cpcao1 <- calibrate(pcao1, trace = TRUE, newdata = data.frame(depvar(pcao1)[siteNos, ], model.matrix(pcao1)[siteNos, ])) # Graphically compare the actual site scores with their calibrated values persp(pcao1, main = "Site scores: solid=actual, dashed=calibrated", label = TRUE, col = "blue", las = 1) abline(v = latvar(pcao1)[siteNos], col = seq(siteNos)) # Actual scores abline(v = cpcao1, lty = 2, col = seq(siteNos)) # Calibrated values } } \keyword{models} \keyword{regression} VGAM/man/R2latvar.Rd0000644000176200001440000000516413565414527013565 0ustar liggesusers\name{R2latvar} \alias{R2latvar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ R-squared for Latent Variable Models } \description{ R-squared goodness of fit for latent variable models, such as cumulative link models. Some software such as Stata call the quantity the McKelvey--Zavoina R-squared, which was proposed in their 1975 paper for cumulative probit models. } \usage{ R2latvar(object) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A \code{\link{cumulative}} or \code{\link{binomialff}} fit using \code{\link{vglm}}. Only a few selected link functions are currently permitted: \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}. For models with more than one linear predictor, a parallelism assumption is needed also, i.e., the constraint matrices must be a 1-column matrix of 1s (except for the intercept). The model is assumed to have an intercept term. } } \details{ Models such as the proportional odds model have a latent variable interpretation (see, e.g., Section 6.2.6 of Agresti (2018), Section 14.4.1.1 of Yee (2015), Section 5.2.2 of McCullagh and Nelder (1989)). It is possible to summarize the predictive power of the model by computing \eqn{R^2} on the transformed scale, e.g., on a standard normal distribution for a \code{\link{probitlink}} link. For more details see Section 6.3.7 of Agresti (2018). } \value{ The \eqn{R^2} value. Approximately, that amount is the variability in the latent variable of the model explained by all the explanatory variables. Then taking the positive square-root gives an approximate multiple correlation \eqn{R}. } \references{ % Agresti, A. (2007) % \emph{An Introduction to Categorical Data Analysis, 2nd ed.}, % New York: John Wiley & Sons. % Page 38. Agresti, A. (2018) \emph{An Introduction to Categorical Data Analysis, 3rd ed.}, New York: John Wiley & Sons. McKelvey, R. D. and W. Zavoina (1975) A statistical model for the analysis of ordinal level dependent variables. \emph{The Journal of Mathematical Sociology}, \bold{4}, 103--120. } \author{ Thomas W. Yee } %\note{ %} %\section{Warning }{ % This %} \seealso{ \code{\link{vglm}}, \code{\link{cumulative}}, \code{\link{propodds}}, \code{\link{logitlink}}, \code{\link{probitlink}}, \code{\link{clogloglink}}, \code{\link[stats]{summary.lm}}. } \examples{ pneumo <- transform(pneumo, let = log(exposure.time)) (fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)) R2latvar(fit) } \keyword{models} \keyword{regression} VGAM/man/moffset.Rd0000644000176200001440000000733613565414527013536 0ustar liggesusers\name{moffset} \alias{moffset} \title{ Matrix Offset } \description{ Modify a matrix by shifting successive elements. } \usage{ moffset(mat, roffset = 0, coffset = 0, postfix = "", rprefix = "Row.", cprefix = "Col.") } \arguments{ \item{mat}{ Data frame or matrix. This ought to have at least three rows and three columns. The elements are shifted in the order of \code{c(mat)}, i.e., going down successive columns, as the columns go from left to right. Wrapping of values is done. } \item{roffset, coffset}{ Numeric or character. If numeric, the amount of shift (offset) for each row and column. The default is no change to \code{mat}. If character, the offset is computed by matching with the row or column names. For example, for the \code{\link{alcoff}}, put \code{roffset = "6"} means that we make an effective day's dataset start from 6:00 am, and this wraps around to include midnight to 05.59 am on the next day. } \item{postfix}{ Character. Modified rows and columns are renamed by pasting this argument to the end of each name. The default is no change. } \item{rprefix, cprefix}{ Same as \code{\link{rcim}}. } } \details{ This function allows a matrix to be rearranged so that element (\code{roffset} + 1, \code{coffset} + 1) becomes the (1, 1) element. The elements are assumed to be ordered in the same way as the elements of \code{c(mat)}, This function is applicable to, e.g., \code{\link{alcoff}}, where it is useful to define the \emph{effective day} as starting at some other hour than midnight, e.g., 6.00am. This is because partying on Friday night continues on into Saturday morning, therefore it is more interpretable to use the effective day when considering a daily effect. This is a data preprocessing function for \code{\link{rcim}} and \code{\link{plotrcim0}}. The differences between \code{\link{Rcim}} and \code{\link{moffset}} is that \code{\link{Rcim}} only reorders the level of the rows and columns so that the data is shifted but not moved. That is, a value in one row stays in that row, and ditto for column. But in \code{\link{moffset}} values in one column can be moved to a previous column. See the examples below. } \value{ A matrix of the same dimensional as its input. } \author{ T. W. Yee, Alfian F. Hadi. } \note{ % This function was originally for a 24 x 7 dimensional matrix % (24 hours of the day by 7 days per week) such as \code{\link{alcoff}}. % Of course, this function can be applied to any moderately % large matrix. The input \code{mat} should have row names and column names. } \seealso{ \code{\link{Rcim}}, \code{\link{rcim}}, \code{\link{plotrcim0}}, \code{\link{alcoff}}, \code{\link{crashi}}. } \examples{ moffset(alcoff, 3, 2, "*") # Some day's data is moved to previous day. Rcim(alcoff, 3 + 1, 2 + 1) # Data does not move as much. alcoff # Original data moffset(alcoff, 3, 2, "*") - Rcim(alcoff, 3+1, 2+1) # Note the differences # An 'effective day' data set: alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*") fit.o <- rcim(alcoff) # default baselines are first row and col fit.e <- rcim(alcoff.e) # default baselines are first row and col \dontrun{ par(mfrow = c(2, 2), mar = c(9, 4, 2, 1)) plot(fit.o, rsub = "Not very interpretable", csub = "Not very interpretable") plot(fit.e, rsub = "More interpretable", csub = "More interpretable") } # Some checking all.equal(moffset(alcoff), alcoff) # Should be no change moffset(alcoff, 1, 1, "*") moffset(alcoff, 2, 3, "*") moffset(alcoff, 1, 0, "*") moffset(alcoff, 0, 1, "*") moffset(alcoff, "6", "Mon", "*") # This one is good # Customise row and column baselines fit2 <- rcim(Rcim(alcoff.e, rbaseline = "11", cbaseline = "Mon*")) } VGAM/DESCRIPTION0000755000176200001440000000366513565474753012552 0ustar liggesusersPackage: VGAM Version: 1.1-2 Date: 2019-11-21 Title: Vector Generalized Linear and Additive Models Authors@R: c(person("Thomas", "Yee", role = c("aut", "cre"), email = "t.yee@auckland.ac.nz"), person("Cleve", "Moler", role = "ctb", comment = "author of several LINPACK routines")) Author: Thomas Yee [aut, cre], Cleve Moler [ctb] (author of several LINPACK routines) Maintainer: Thomas Yee Depends: R (>= 3.4.0), methods, stats, stats4, splines Suggests: VGAMdata, MASS, mgcv Description: An implementation of about 6 major classes of statistical regression models. The central algorithm is Fisher scoring and iterative reweighted least squares. At the heart of this package are the vector generalized linear and additive model (VGLM/VGAM) classes. VGLMs can be loosely thought of as multivariate GLMs. VGAMs are data-driven VGLMs that use smoothing. The book "Vector Generalized Linear and Additive Models: With an Implementation in R" (Yee, 2015) gives details of the statistical framework and the package. Currently only fixed-effects models are implemented. Many (150+) models and distributions are estimated by maximum likelihood estimation (MLE) or penalized MLE. The other classes are RR-VGLMs (reduced-rank VGLMs), quadratic RR-VGLMs, reduced-rank VGAMs, RCIMs (row-column interaction models)---these classes perform constrained and unconstrained quadratic ordination (CQO/UQO) models in ecology, as well as constrained additive ordination (CAO). Note that these functions are subject to change; see the NEWS and ChangeLog files for latest changes. License: GPL-3 URL: https://www.stat.auckland.ac.nz/~yee/VGAM NeedsCompilation: yes BuildVignettes: yes LazyLoad: yes LazyData: yes Packaged: 2019-11-21 04:59:18 UTC; tyee001 Repository: CRAN Date/Publication: 2019-11-21 11:50:03 UTC VGAM/build/0000755000176200001440000000000013565414646012121 5ustar liggesusersVGAM/build/vignette.rds0000644000176200001440000000073713565414646014467 0ustar liggesusersRn0 hHo,C/?1C 0W\K SBR6S<.)&AÙ}%IO^ҽPLJ}/)_߯o+ ЌcK@W|&t\b/Ckߟs+<%41]Y~E Z܂ ;,j,1JX@] 7RɉҼ>J*Te)4G)Lf2ΔJi Pız+R6m 8R,1NXЖOt7i&I1 λv#[cIZ4ETݣ= pez$- %jd~p8<;b(g~k|.8VGAM/build/partial.rdb0000644000176200001440000002123613565414640014244 0ustar liggesusers}IvgHh JUq4-tA Stʹ*2k2jYٵs/k{{^{ݱ5cme}U)J|>I/^T*Ցܾ-ImT';ؕqpihJ TW9m[Zt$>s7sZfTJ>%Op>UR54UMY:o&ݏޙܔo Y՛ɦjYl\q{asŷ2v5ɲmYR?ڎWtXTV\J6gݜ7n UU/JWfJ\4d_ev>[z,/jLNnDn}{y,h[s|ǎ/4בu:Y ~mW|@c?HhlYZoH w!G2yIS%+@6 gvW ?!RLղYNT6TM'CRCU%J 5!U2|bc آ+Z˯5ExQa~3 ZbYN^1 Ywdf&x`j YѬܽI8y !"l҆v,>D8ȯג<%[bkKHe'.? f]cÿTz'l/ ˞sA1;҇.A;o8ܬZҢpsl~0h&cthT![ăq$ґ^kQ* 74[g2!`'f!ຫa0p!k†"ppl!;_t&jOC>-޶B"Hphy=\R-Eꌟú']BdOA>B  WS+(Ej)a 9tcT܂_L^=!^>I9O2–$]֜[0!ܛF[Ya(@N`y[Nrw JjNwm`6 }Q[aR4eEK{V cp _bMIq$Hc5%%XA*>~"2UǠ(DFNJJiٙ<NAjI4H,aB-$TF)nB)P@J9`: ݵ@8Dj{-O†X&@(\RMα[rW8ɒZ\bAq%X$ Dx?d=MMVJRUs{Aw KWe_Rmwn=v3N<R'Ez!͗֏;U$'$+N@p;,wdRC;8w|Kߏ@/64 nqFA Gxŭwq82 փI%i\a-jjm\ENҘZ?836a ,Tg,C_I7>lD ۋԟ4ҰIM'0ސt#4|"6&ɲJxH2.UFX!Kղ"lg{ ڒ_/_ ,&^clnqDž+;ʓDdW&[+{*agQ&G G:4Q&cUR;G' vv0,H1yg91LBo4pD.܆yC4z:V4?~>kF|an H4&Gj#91y-ʬlv!Ѓ~"EXÄ TJ= 7<ZueEU KUXۈɗ;#C"XÄ<*T_Fi9w;#uBm;뤾;Zwmk:5TQC[;9ҝGo^nɶXDľ/KD#=snJ꺁!b_{%Sq_ _I pv|Nja-?<&#^U?0;ToʓCaq[ߘ_F!< 9oRx6רb6ݸ^e,܄{9Zw:)ё.CG!Gj\ @?q=Q~CƀWZjѦ`Tmv-K*4%9pBh#ԶÄV4F7b fAvmvxq0!z_u{~YҪ~!:tlI}bŋtjy$h N//)U;" 48x톋Ԩs<8X%><겢]vs,n/Pa<Ͷ3kPN]}*;}}P$L#g8L{ xP T0VYVSdSMS{Gٲb%EZVs J&CRYz﵈G|z!G`-p8/ LT;CT\wߗy }ײ" yol~H: 3Zlheg줭'&?BhC$ gl6>p|Q*al.>*(fAaNW7l^Ut^c Ur P&0T{s/m7A0kw2Ed`@(˺jhr*oqŋWU ir'tXdcD dq>Y)B{)/qq ?1{b[M]<v| 6H]'px0:Ke..)NXӇK2 dqw:Iͣ^I}=LhH4^q@rQ^Dcz (+o7aksgn9u++ޒv9p:!.kH ia%\ݽ[͢KOB>~sz! #Uf>lB(.d#R?! #f8]^Q5]b"LC1?! 6 $EcwLUeg*P2C8y"G9Dj7 0qB K8؜@rFrF jQb!. !wQcb2<L!h~LƀS+m?+IeZӫ?C֙4p0Xx(r_vަfKF5gg OTMc'W7 .C_mYS}XJ*hAk(T{Pz ՏA&JWμؒ=qwp1q{=D(\VUnjŶܳ"6iy緼/#mg _lIZEUW;efi \I/ij3Tj;cB9a|Gw"tea/uEYD\0bh6,aBYXeQZam:P$ғFiߐLC{}RUDm_!`MpY}fUVCf;mۆ-iᏗ ZnЇ)B4 ˈ)Hq•xr#U E//MTJcwC[0?Dg6!q<$϶9h   ,a||s~؜)#? a?wO%C4׏mT99<[ӐO7Ы8 yTGUhZjXi MZ#z5`Gx qn_ x|?,OAbqzwDp!6##"?I "E11Q:ߐ+I4Ci6q7vVMON͌eOQgEsooL1h3uKlnIfuI[T\I^WԎDh@"lunc;jle0ܷhSBr&pLl;+K&K/ ?'W W]QXN =KG WܒT)K:eJͱb2QOYoj ߚ,oo08JuGvne!G nB߼q<ף#)i<ȽfՠqF% FlsG^`1)0+gpO}>%4HO,ͺoG8eEU Nz GZ~B8y(OCNbo4OpHǶܢ`hrNnȻc4231q~2qDxr ۡ}%#Oz%MbggP=$7FTVE**kA^捪&Pm!)Pj;&YK|@(j k-e;E%A3Hc~B[ۭa<='TMi{hzّSq>trZ#c<+n('VC=>VV2 rRC s ?>&cӊfm+jh۸$}¡TrKbߒ=S$Yl~OZ>biJ D#8ϡx;,]bsy'b*zQ_-RY-$n_=J#"waB;ߟ,HBJok?Av?&?JHRC$AcNd٪f34ܡ-wse:hrk؊p Cۍԏs$H4P+]Vd"wEZ lo`'v9ۙqlF_ld3tEq̀]TIqB[X09~,H4^v5 ّ.zv_Å57$zbe7Mxle MɄÆ/C~$"Ə;suEw_kS+M@ofps2J Q$LKR)B Sg:aQps!J( dMxLw1kņ*[лÐ{H>WҶxr /UDk8yJf^C_ثf<@fwlHB? ? ږzϯ &(|"?*:;IEEzg-C>j,pݩ8N ПdHo /s =paT\4ߢ n~M?&-տ+~ ;@}o(:%p#< d3ip d"<9`FXi0X1AW`_AnVCۋԟIƀc8^!0 {RC$AG# yH H4:v%!< YЖ"8Dj7 xyH_ y/⟄*XCvo YנגP a~ \-Tm!RD>}6O=U[0/\%[Z*CiV:+36 LŪf8@m^,4de*k0@2eXew.+:=|3-Ҍ >zP ꄳŏ P1s+U7ʪ s؋Zy^m/.oo_ Ő*ooZAd-+LctMf[ /3(PiGRu2NA:H['gmE/|>n{;a"C=PXL[-EEfh>BZweCͪ0!mj(5pXH;8Cs TcSjZhRVihX-7Fk|URT3tm?k(eI?ӘC_)oz]pzƱ[0 ͡) 6 !Vm-n ~%Az^gnV|RQZ˵| bU* R5Da'rZ;'߯4]Rl7Tt~ɻ}PKkNH8t~lpB3HDy"~Atx&qöƘWUwC674H͍kk[gq'kd],A.Ye;kT;H!uf?w_z`xpEsG{= dՌe5YvTʪcMS̒TZ'9[SfˉovGv9Y7獛9+3%E.^VtȢ;Z;.F]AYZol>Erbs-z𔯻^|m]M/4tʛ\AD/Ъ{Y*͖WWkVkOQ.«Vge,j Obt:EƆc)3quǝd~DI'!O$~X[)Vou8N!æ}{=epa[HQv!Drw<{xlaqw ghX/7Am)C(1ar>xzQn8Rރ|O{=/3ҕ|.|]К!˱GʦS67wfzvp%Oև9S83 ;y/R޿uZhH_ H(VGAM/BUGS0000755000176200001440000000750713565414530011511 0ustar liggesusersHere is a list of known bugs. 2016-06 lrtest(zipoissonff.object, poissonff.object) fails. 2016-05 rcim() with alaplace2() may fail. 2014-02 The subset argument of vgam() may not work, especially with multiple responses. To get around this, use subset() to create a smaller data frame and then feed that into vgam(). 2013-11 vgam() can only handle constraint matrices cmat, say, such that t(cmat) %*% cmat is diagonal. 2013-07 quasipoisson()'s scale parameter estimate does not handle prior weights correctly. 2012-09 loge('a', short = FALSE, inverse = FALSE) loge('a', short = FALSE, inverse = TRUE) give the same answer. Coef(vglm.dirmultinomial.fit) fails. Evidently, multiple "mlogit"s saved on vglm.dirmultinomial.fit@misc do not suffice. 2011-12 VGAM version 0.8-4 said it needed R version 2-11.1 or later. But really, R version 2-13.0 or later is needed. This is because the generic nobs() was not defined properly. Another fix is to install the (latest) prerelease version at http://www.stat.auckland.ac.nz/~yee/VGAM/prerelease 2010-04-12 cqo() should be working now. It uses new C code. Also, vgam() and vsmooth.spline() should not be noticeably different from before. But cao() is still working... getting it going soon hopefully. 2009/07/13 cqo() fails... I think it is due to initial values being faulty. Hope to look into it soon. 2009/06/18 For a given VGAM family function, arguments such as parallel, exchangeable etc. will not work if the RHS of the formula is an intercept only. For example, parallel = FALSE ~ 1 and exchangeable = TRUE ~ 1 will fail. Instead, try something like parallel = TRUE ~ x2 + x3 + x4 -1 and exchangeable = FAlSE ~ x2 + x3 + x4 + x5 -1 respectively. 2009/01/01 prediction with vgam( ~ offset(myoffsetmatrix) + ... ) fails inside a function because myoffsetmatrix cannot be found. 2008/08/12 Under Windows, the vgam() example involving the Hunua data seems to fail. It is under investigation. 2008/08/04 VGAM interferes with other packages, e.g., predict() and summary(). This is due to S3 and S4 interference, and currently I haven't sussed out the full details (e.g., NAMESPACES). For now it is best to attach VGAM only when needed and detach it when other packages are to be used. This can be done with library(VGAM) and detach("package:VGAM") 2008/05/16 zipf() did not handle 0 < s < 1. The prerelease version fixes this. 2008/03/12 A call such as mydof = 4 Fit = vgam(y ~ s(x, df=mydof), fam=poissonff) will result in failure when plot(Fit) Instead, one needs Fit = vgam(y ~ s(x, df=4), fam=poissonff) 2008/02/16 The VGAM package interferes with other functions, for example, if VGAM is loaded and lmobject is an "lm" object then fitted(lmobject) predict(lmobject) resid(lmobject) residuals(lmobject) will fail. 2006/05/18 dirmul() is not working yet. 2005/11/16 cao() now works in Windows. The argument xij does not work properly. 2005/8/31 The windows version of cao() seems to hang. It does not hang in Linux. 2005/6/10 cao() works in Linux but seems to hang in Windows. The latter (distributed in a .zip file format) is made from a R Cross Build process which may be a reason for the bug. I'm slowly looking into the bug. 2005/5/6 The VGAM package interferes with other code, including glm(). This may be due to the smart prediction code, or be due to the NAMESPACE facility. In order to use other functions outside the VGAM package you may need to type "detach()". 2003/7/14 vgam(y ~ s(x, df=2), subset= x > 2) will fail in R because the subset argument has the effect that the "df" and "spar" attributes are removed from the data frame containing the smoothing variables. Current fix: create a separate data frame satisfying the subset= condition, and then run vgam() on this smaller data frame. Thanks for Eugene Zwane for finding this bug. VGAM/src/0000755000176200001440000000000013565414646011611 5ustar liggesusersVGAM/src/vmux.f0000644000176200001440000004620113565414527012760 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu) implicit logical (a-z) integer wy1vqfzu, tgiyxdw1(*), dufozmt7(*) integer urohxe6t, bpvaqm5z, ayfnwr1v ayfnwr1v = 1 urohxe6t = wy1vqfzu 23000 if(.not.(urohxe6t .ge. 1))goto 23002 do23003 bpvaqm5z=1,urohxe6t tgiyxdw1(ayfnwr1v) = bpvaqm5z ayfnwr1v = ayfnwr1v+1 23003 continue 23004 continue 23001 urohxe6t=urohxe6t-1 goto 23000 23002 continue ayfnwr1v = 1 do23005 urohxe6t=1,wy1vqfzu do23007 bpvaqm5z=urohxe6t,wy1vqfzu dufozmt7(ayfnwr1v) = bpvaqm5z ayfnwr1v = ayfnwr1v+1 23007 continue 23008 continue 23005 continue 23006 continue return end integer function viamf(cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1, duf *ozmt7) integer cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1(*), dufozmt7(*) integer urohxe6t, imk5wjxg imk5wjxg = wy1vqfzu*(wy1vqfzu+1)/2 do23009 urohxe6t=1,imk5wjxg if((tgiyxdw1(urohxe6t).eq.cz8qdfyj .and. dufozmt7(urohxe6t).eq.rvy *1fpli) .or. (tgiyxdw1(urohxe6t).eq.rvy1fpli .and. dufozmt7(urohxe6 *t).eq.cz8qdfyj))then viamf = urohxe6t return endif 23009 continue 23010 continue viamf = 0 return end subroutine vm2af(mat, a, dimm, tgiyxdw1, dufozmt7, kuzxj1lo, wy1vq *fzu, rb1onzwu) implicit logical (a-z) integer dimm, tgiyxdw1(dimm), dufozmt7(dimm), kuzxj1lo, wy1vqfzu, *rb1onzwu double precision mat(dimm,kuzxj1lo), a(wy1vqfzu,wy1vqfzu,kuzxj1lo) integer ayfnwr1v, yq6lorbx, gp1jxzuh, imk5wjxg imk5wjxg = wy1vqfzu * (wy1vqfzu + 1) / 2 if(rb1onzwu .eq. 1 .or. dimm .ne. imk5wjxg)then ayfnwr1v = 1 23015 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23017 yq6lorbx = 1 23018 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23020 gp1jxzuh = 1 23021 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23023 a(gp1jxzuh,yq6lorbx,ayfnwr1v) = 0.0d0 23022 gp1jxzuh=gp1jxzuh+1 goto 23021 23023 continue 23019 yq6lorbx=yq6lorbx+1 goto 23018 23020 continue 23016 ayfnwr1v=ayfnwr1v+1 goto 23015 23017 continue endif do23024 ayfnwr1v=1,kuzxj1lo do23026 yq6lorbx=1,dimm a(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a *yfnwr1v) if(rb1onzwu .eq. 0)then a(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a *yfnwr1v) endif 23026 continue 23027 continue 23024 continue 23025 continue return end subroutine mux22f(wpuarq2m, tlgduey8, lfu2qhid, dimu, tgiyxdw1, du *fozmt7, kuzxj1lo, wy1vqfzu, wk1200) implicit logical (a-z) integer dimu, tgiyxdw1(*), dufozmt7(*), kuzxj1lo, wy1vqfzu double precision wpuarq2m(dimu,kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqf *zu), lfu2qhid(wy1vqfzu,kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu) double precision q6zdcwxk integer ayfnwr1v, yq6lorbx, bpvaqm5z, one, rb1onzwu one = 1 rb1onzwu = 1 ayfnwr1v = 1 23030 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23032 call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7, * one, wy1vqfzu, rb1onzwu) yq6lorbx = 1 23033 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23035 q6zdcwxk = 0.0d0 bpvaqm5z = yq6lorbx 23036 if(.not.(bpvaqm5z .le. wy1vqfzu))goto 23038 q6zdcwxk = q6zdcwxk + wk1200(yq6lorbx,bpvaqm5z) * tlgduey8(ayfnwr1 *v,bpvaqm5z) 23037 bpvaqm5z=bpvaqm5z+1 goto 23036 23038 continue lfu2qhid(yq6lorbx,ayfnwr1v) = q6zdcwxk 23034 yq6lorbx=yq6lorbx+1 goto 23033 23035 continue 23031 ayfnwr1v=ayfnwr1v+1 goto 23030 23032 continue return end subroutine vbksf(wpuarq2m, bvecto, wy1vqfzu, kuzxj1lo, wk1200, tgi *yxdw1, dufozmt7, dimu) implicit logical (a-z) integer wy1vqfzu, kuzxj1lo, tgiyxdw1(*), dufozmt7(*), dimu double precision wpuarq2m(dimu,kuzxj1lo), bvecto(wy1vqfzu,kuzxj1lo *), wk1200(wy1vqfzu,wy1vqfzu) double precision q6zdcwxk integer ayfnwr1v, yq6lorbx, gp1jxzuh, rb1onzwu, one rb1onzwu = 1 one = 1 ayfnwr1v = 1 23039 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23041 call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7, * one, wy1vqfzu, rb1onzwu) yq6lorbx = wy1vqfzu 23042 if(.not.(yq6lorbx .ge. 1))goto 23044 q6zdcwxk = bvecto(yq6lorbx,ayfnwr1v) gp1jxzuh = yq6lorbx+1 23045 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23047 q6zdcwxk = q6zdcwxk - wk1200(yq6lorbx,gp1jxzuh) * bvecto(gp1jxzuh, *ayfnwr1v) 23046 gp1jxzuh=gp1jxzuh+1 goto 23045 23047 continue bvecto(yq6lorbx,ayfnwr1v) = q6zdcwxk / wk1200(yq6lorbx,yq6lorbx) 23043 yq6lorbx=yq6lorbx-1 goto 23042 23044 continue 23040 ayfnwr1v=ayfnwr1v+1 goto 23039 23041 continue return end subroutine vcholf(wmat, bvecto, wy1vqfzu, dvhw1ulq, isolve) implicit logical (a-z) integer isolve integer wy1vqfzu, dvhw1ulq double precision wmat(wy1vqfzu,wy1vqfzu), bvecto(wy1vqfzu) double precision q6zdcwxk, dsqrt integer ayfnwr1v, yq6lorbx, gp1jxzuh dvhw1ulq=1 do23048 ayfnwr1v=1,wy1vqfzu q6zdcwxk = 0d0 do23050 gp1jxzuh=1,ayfnwr1v-1 q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,ayfn *wr1v) 23050 continue 23051 continue wmat(ayfnwr1v,ayfnwr1v) = wmat(ayfnwr1v,ayfnwr1v) - q6zdcwxk if(wmat(ayfnwr1v,ayfnwr1v) .le. 0d0)then dvhw1ulq = 0 return endif wmat(ayfnwr1v,ayfnwr1v) = dsqrt(wmat(ayfnwr1v,ayfnwr1v)) do23054 yq6lorbx=ayfnwr1v+1,wy1vqfzu q6zdcwxk = 0d0 do23056 gp1jxzuh=1,ayfnwr1v-1 q6zdcwxk = q6zdcwxk + wmat(gp1jxzuh,ayfnwr1v) * wmat(gp1jxzuh,yq6l *orbx) 23056 continue 23057 continue wmat(ayfnwr1v,yq6lorbx) = (wmat(ayfnwr1v,yq6lorbx) - q6zdcwxk) / w *mat(ayfnwr1v,ayfnwr1v) 23054 continue 23055 continue 23048 continue 23049 continue if(isolve .eq. 0)then do23060 ayfnwr1v=2,wy1vqfzu do23062 yq6lorbx=1,ayfnwr1v-1 wmat(ayfnwr1v,yq6lorbx) = 0.0d0 23062 continue 23063 continue return 23060 continue 23061 continue endif do23064 yq6lorbx=1,wy1vqfzu q6zdcwxk = bvecto(yq6lorbx) do23066 gp1jxzuh=1,yq6lorbx-1 q6zdcwxk = q6zdcwxk - wmat(gp1jxzuh,yq6lorbx) * bvecto(gp1jxzuh) 23066 continue 23067 continue bvecto(yq6lorbx) = q6zdcwxk / wmat(yq6lorbx,yq6lorbx) 23064 continue 23065 continue yq6lorbx = wy1vqfzu 23068 if(.not.(yq6lorbx .ge. 1))goto 23070 q6zdcwxk = bvecto(yq6lorbx) gp1jxzuh = yq6lorbx+1 23071 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23073 q6zdcwxk = q6zdcwxk - wmat(yq6lorbx,gp1jxzuh) * bvecto(gp1jxzuh) 23072 gp1jxzuh=gp1jxzuh+1 goto 23071 23073 continue bvecto(yq6lorbx) = q6zdcwxk / wmat(yq6lorbx,yq6lorbx) 23069 yq6lorbx=yq6lorbx-1 goto 23068 23070 continue return end subroutine mux17f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh, kuzxj1lo *, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg) implicit logical (a-z) integer dimu, wy1vqfzu, xjc4ywlh, kuzxj1lo, tgiyxdw1(*), dufozmt7( **), rutyk8mg double precision wpuarq2m(dimu,kuzxj1lo), he7mqnvy(rutyk8mg,xjc4yw *lh), wk1200(wy1vqfzu,wy1vqfzu), wk3400(wy1vqfzu,xjc4ywlh) double precision q6zdcwxk integer ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z do23074 yq6lorbx=1,wy1vqfzu do23076 ayfnwr1v=1,wy1vqfzu wk1200(ayfnwr1v,yq6lorbx) = 0.0d0 23076 continue 23077 continue 23074 continue 23075 continue do23078 ayfnwr1v=1,kuzxj1lo do23080 bpvaqm5z=1,dimu wk1200(tgiyxdw1(bpvaqm5z), dufozmt7(bpvaqm5z)) = wpuarq2m(bpvaqm5z *,ayfnwr1v) 23080 continue 23081 continue do23082 gp1jxzuh=1,xjc4ywlh do23084 yq6lorbx=1,wy1vqfzu wk3400(yq6lorbx,gp1jxzuh) = he7mqnvy((ayfnwr1v-1)*wy1vqfzu+yq6lorb *x,gp1jxzuh) 23084 continue 23085 continue 23082 continue 23083 continue do23086 gp1jxzuh=1,xjc4ywlh do23088 yq6lorbx=1,wy1vqfzu q6zdcwxk = 0d0 do23090 bpvaqm5z=yq6lorbx,wy1vqfzu q6zdcwxk = q6zdcwxk + wk1200(yq6lorbx,bpvaqm5z) * wk3400(bpvaqm5z, *gp1jxzuh) 23090 continue 23091 continue he7mqnvy((ayfnwr1v-1)*wy1vqfzu+yq6lorbx,gp1jxzuh) = q6zdcwxk 23088 continue 23089 continue 23086 continue 23087 continue 23078 continue 23079 continue return end subroutine vrinvf9(wpuarq2m, ldr, wy1vqfzu, dvhw1ulq, ks3wejcv, wo *rk) implicit logical (a-z) integer ldr, wy1vqfzu, dvhw1ulq double precision wpuarq2m(ldr,wy1vqfzu), ks3wejcv(wy1vqfzu,wy1vqfz *u), work(wy1vqfzu,wy1vqfzu) double precision q6zdcwxk integer yq6lorbx, gp1jxzuh, col, uaoynef0 dvhw1ulq = 1 yq6lorbx = 1 23092 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23094 col = 1 23095 if(.not.(col .le. wy1vqfzu))goto 23097 work(yq6lorbx,col) = 0.0d0 23096 col=col+1 goto 23095 23097 continue 23093 yq6lorbx=yq6lorbx+1 goto 23092 23094 continue col = 1 23098 if(.not.(col .le. wy1vqfzu))goto 23100 yq6lorbx = col 23101 if(.not.(yq6lorbx .ge. 1))goto 23103 if(yq6lorbx .eq. col)then q6zdcwxk = 1.0d0 else q6zdcwxk = 0.0d0 endif gp1jxzuh = yq6lorbx+1 23106 if(.not.(gp1jxzuh .le. col))goto 23108 q6zdcwxk = q6zdcwxk - wpuarq2m(yq6lorbx,gp1jxzuh) * work(gp1jxzuh, *col) 23107 gp1jxzuh=gp1jxzuh+1 goto 23106 23108 continue if(wpuarq2m(yq6lorbx,yq6lorbx) .eq. 0.0d0)then dvhw1ulq = 0 else work(yq6lorbx,col) = q6zdcwxk / wpuarq2m(yq6lorbx,yq6lorbx) endif 23102 yq6lorbx=yq6lorbx-1 goto 23101 23103 continue 23099 col=col+1 goto 23098 23100 continue yq6lorbx = 1 23111 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23113 col = yq6lorbx 23114 if(.not.(col .le. wy1vqfzu))goto 23116 if(yq6lorbx .lt. col)then uaoynef0 = col else uaoynef0 = yq6lorbx endif q6zdcwxk = 0.0d0 gp1jxzuh = uaoynef0 23119 if(.not.(gp1jxzuh .le. wy1vqfzu))goto 23121 q6zdcwxk = q6zdcwxk + work(yq6lorbx,gp1jxzuh) * work(col,gp1jxzuh) 23120 gp1jxzuh=gp1jxzuh+1 goto 23119 23121 continue ks3wejcv(yq6lorbx,col) = q6zdcwxk ks3wejcv(col,yq6lorbx) = q6zdcwxk 23115 col=col+1 goto 23114 23116 continue 23112 yq6lorbx=yq6lorbx+1 goto 23111 23113 continue return end subroutine tldz5ion(xx, lfu2qhid) implicit logical (a-z) double precision xx, lfu2qhid double precision x, y, hofjnx2e, q6zdcwxk, xd4mybgj(6) integer yq6lorbx xd4mybgj(1)= 76.18009172947146d0 xd4mybgj(2)= -86.50532032941677d0 xd4mybgj(3)= 24.01409824083091d0 xd4mybgj(4)= -1.231739572450155d0 xd4mybgj(5)= 0.1208650973866179d-2 xd4mybgj(6)= -0.5395239384953d-5 x = xx y = xx hofjnx2e = x+5.50d0 hofjnx2e = hofjnx2e - (x+0.50d0) * dlog(hofjnx2e) q6zdcwxk=1.000000000190015d0 yq6lorbx=1 23122 if(.not.(yq6lorbx .le. 6))goto 23124 y = y + 1.0d0 q6zdcwxk = q6zdcwxk + xd4mybgj(yq6lorbx)/y 23123 yq6lorbx=yq6lorbx+1 goto 23122 23124 continue lfu2qhid = -hofjnx2e + dlog(2.5066282746310005d0 * q6zdcwxk / x) return end subroutine enbin9(bzmd6ftv, hdqsx7bk, nm0eljqk, n2kersmx, n, dvhw1 *ulq, zy1mchbf, ux3nadiw, rsynp1go, sguwj9ty) implicit logical (a-z) integer n, dvhw1ulq, zy1mchbf, sguwj9ty double precision bzmd6ftv(n, zy1mchbf), hdqsx7bk(n, zy1mchbf), nm0 *eljqk(n, zy1mchbf), n2kersmx, ux3nadiw, rsynp1go integer ayfnwr1v, kij0gwer double precision oxjgzv0e, btiehdm2, ydb, vjz5sxty, esql7umk, pvcj *l2na, mwuvskg1, ft3ijqmy, hmayv1xt, q6zdcwxk, plo6hkdr real csi9ydge if(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0)then dvhw1ulq = 0 return endif btiehdm2 = 100.0d0 * rsynp1go oxjgzv0e = 0.001d0 dvhw1ulq = 1 kij0gwer=1 23127 if(.not.(kij0gwer.le.zy1mchbf))goto 23129 ayfnwr1v=1 23130 if(.not.(ayfnwr1v.le.n))goto 23132 vjz5sxty = nm0eljqk(ayfnwr1v,kij0gwer) / hdqsx7bk(ayfnwr1v,kij0gwe *r) if((vjz5sxty .lt. oxjgzv0e) .or. (nm0eljqk(ayfnwr1v,kij0gwer) .gt. * 1.0d5))then bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk(ayfnwr1v,kij0gwer) * (1.0d *0 + hdqsx7bk(ayfnwr1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + nm0 *eljqk(ayfnwr1v,kij0gwer))) / hdqsx7bk(ayfnwr1v,kij0gwer)**2 if(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. -btiehdm2)then bzmd6ftv(ayfnwr1v,kij0gwer) = -btiehdm2 endif goto 20 endif q6zdcwxk = 0.0d0 pvcjl2na = hdqsx7bk(ayfnwr1v,kij0gwer) / (hdqsx7bk(ayfnwr1v,kij0gw *er) + nm0eljqk(ayfnwr1v,kij0gwer)) mwuvskg1 = 1.0d0 - pvcjl2na csi9ydge = hdqsx7bk(ayfnwr1v,kij0gwer) if(pvcjl2na .lt. btiehdm2)then pvcjl2na = btiehdm2 endif if(mwuvskg1 .lt. btiehdm2)then mwuvskg1 = btiehdm2 endif esql7umk = 100.0d0 + 15.0d0 * nm0eljqk(ayfnwr1v,kij0gwer) if(esql7umk .lt. sguwj9ty)then esql7umk = sguwj9ty endif ft3ijqmy = pvcjl2na ** csi9ydge ux3nadiw = ft3ijqmy plo6hkdr = (1.0d0 - ux3nadiw) / hdqsx7bk(ayfnwr1v,kij0gwer)**2 q6zdcwxk = q6zdcwxk + plo6hkdr ydb = 1.0d0 ft3ijqmy = hdqsx7bk(ayfnwr1v,kij0gwer) * mwuvskg1 * ft3ijqmy ux3nadiw = ux3nadiw + ft3ijqmy plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb *)**2 q6zdcwxk = q6zdcwxk + plo6hkdr ydb = 2.0d0 23143 if(((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4)) .and. (y *db .lt. esql7umk))then ft3ijqmy = (hdqsx7bk(ayfnwr1v,kij0gwer) - 1.0d0 + ydb) * mwuvskg1 ** ft3ijqmy / ydb ux3nadiw = ux3nadiw + ft3ijqmy plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb *)**2 q6zdcwxk = q6zdcwxk + plo6hkdr ydb = ydb + 1.0d0 goto 23143 endif 23144 continue bzmd6ftv(ayfnwr1v,kij0gwer) = -q6zdcwxk 20 hmayv1xt = 0.0d0 23131 ayfnwr1v=ayfnwr1v+1 goto 23130 23132 continue 23128 kij0gwer=kij0gwer+1 goto 23127 23129 continue return end subroutine enbin8(bzmd6ftv, hdqsx7bk, hsj9bzaq, n2kersmx, kuzxj1lo *, dvhw1ulq, zy1mchbf, ux3nadiw, rsynp1go) implicit logical (a-z) integer kuzxj1lo, dvhw1ulq, zy1mchbf double precision bzmd6ftv(kuzxj1lo, zy1mchbf), hdqsx7bk(kuzxj1lo, *zy1mchbf), hsj9bzaq(kuzxj1lo, zy1mchbf), n2kersmx, ux3nadiw, rsynp *1go integer ayfnwr1v, kij0gwer, esql7umk double precision ft3ijqmy, tad5vhsu, o3jyipdf, pq0hfucn, q6zdcwxk, * d1, d2, plo6hkdr, hnu1vjyw logical pok1, pok2, pok12 double precision oxjgzv0e, onemse, nm0eljqk, btiehdm2, ydb, kbig d1 = 0.0d0 d2 = 0.0d0 btiehdm2 = -100.0d0 * rsynp1go esql7umk = 3000 if(n2kersmx .le. 0.80d0 .or. n2kersmx .ge. 1.0d0)then dvhw1ulq = 0 return endif kbig = 1.0d4 oxjgzv0e = 0.001d0 hnu1vjyw = 1.0d0 - rsynp1go onemse = 1.0d0 / (1.0d0 + oxjgzv0e) dvhw1ulq = 1 kij0gwer=1 23147 if(.not.(kij0gwer.le.zy1mchbf))goto 23149 ayfnwr1v=1 23150 if(.not.(ayfnwr1v.le.kuzxj1lo))goto 23152 if(hdqsx7bk(ayfnwr1v,kij0gwer) .gt. kbig)then hdqsx7bk(ayfnwr1v,kij0gwer) = kbig endif if(hsj9bzaq(ayfnwr1v,kij0gwer) .lt. oxjgzv0e)then hsj9bzaq(ayfnwr1v,kij0gwer) = oxjgzv0e endif if((hsj9bzaq(ayfnwr1v,kij0gwer) .gt. onemse))then nm0eljqk = hdqsx7bk(ayfnwr1v,kij0gwer) * (1.0d0/hsj9bzaq(ayfnwr1v, *kij0gwer) - 1.0d0) bzmd6ftv(ayfnwr1v,kij0gwer) = -nm0eljqk * (1.0d0 + hdqsx7bk(ayfnwr *1v,kij0gwer)/(hdqsx7bk(ayfnwr1v,kij0gwer) + nm0eljqk)) / hdqsx7bk( *ayfnwr1v,kij0gwer)**2 if(bzmd6ftv(ayfnwr1v,kij0gwer) .gt. btiehdm2)then bzmd6ftv(ayfnwr1v,kij0gwer) = btiehdm2 endif goto 20 endif q6zdcwxk = 0.0d0 pok1 = .true. pok2 = hsj9bzaq(ayfnwr1v,kij0gwer) .lt. (1.0d0-rsynp1go) pok12 = pok1 .and. pok2 if(pok12)then d2 = hdqsx7bk(ayfnwr1v,kij0gwer) * dlog(hsj9bzaq(ayfnwr1v,kij0gwer *)) ux3nadiw = dexp(d2) else ux3nadiw = 0.0d0 endif plo6hkdr = (1.0d0 - ux3nadiw) / hdqsx7bk(ayfnwr1v,kij0gwer)**2 q6zdcwxk = q6zdcwxk + plo6hkdr call tldz5ion(hdqsx7bk(ayfnwr1v,kij0gwer), o3jyipdf) ydb = 1.0d0 call tldz5ion(ydb + hdqsx7bk(ayfnwr1v,kij0gwer), tad5vhsu) pq0hfucn = 0.0d0 if(pok12)then d1 = dlog(1.0d0 - hsj9bzaq(ayfnwr1v,kij0gwer)) ft3ijqmy = dexp(ydb * d1 + d2 + tad5vhsu - o3jyipdf - pq0hfucn) else ft3ijqmy = 0.0d0 endif ux3nadiw = ux3nadiw + ft3ijqmy plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb *)**2 q6zdcwxk = q6zdcwxk + plo6hkdr ydb = 2.0d0 23165 if((ux3nadiw .le. n2kersmx) .or. (plo6hkdr .gt. 1.0d-4))then tad5vhsu = tad5vhsu + dlog(ydb + hdqsx7bk(ayfnwr1v,kij0gwer) - 1.0 *d0) pq0hfucn = pq0hfucn + dlog(ydb) if(pok12)then ft3ijqmy = dexp(ydb * d1 + d2 + tad5vhsu - o3jyipdf - pq0hfucn) else ft3ijqmy = 0.0d0 endif ux3nadiw = ux3nadiw + ft3ijqmy plo6hkdr = (1.0d0 - ux3nadiw) / (hdqsx7bk(ayfnwr1v,kij0gwer) + ydb *)**2 q6zdcwxk = q6zdcwxk + plo6hkdr ydb = ydb + 1.0d0 if(ydb .gt. 1.0d3)then goto 21 endif goto 23165 endif 23166 continue 21 bzmd6ftv(ayfnwr1v,kij0gwer) = -q6zdcwxk 20 tad5vhsu = 0.0d0 23151 ayfnwr1v=ayfnwr1v+1 goto 23150 23152 continue 23148 kij0gwer=kij0gwer+1 goto 23147 23149 continue return end subroutine mbessi0(bvecto, kuzxj1lo, kpzavbj3, d0, d1, d2, zjkrtol *8, qaltf0nz) implicit logical (a-z) integer kuzxj1lo, kpzavbj3, zjkrtol8, c5aesxkus double precision bvecto(kuzxj1lo), d0(kuzxj1lo), d1(kuzxj1lo), d2( *kuzxj1lo), qaltf0nz integer ayfnwr1v, gp1jxzuh double precision f0, t0, m0, f1, t1, m1, f2, t2, m2 double precision toobig toobig = 20.0d0 zjkrtol8 = 0 if(.not.(kpzavbj3 .eq. 0 .or. kpzavbj3 .eq. 1 .or. kpzavbj3 .eq. 2 *))then zjkrtol8 = 1 return endif do23173 gp1jxzuh=1,kuzxj1lo if(dabs(bvecto(gp1jxzuh)) .gt. toobig)then zjkrtol8 = 1 return endif t1 = bvecto(gp1jxzuh) / 2.0d0 f1 = t1 t0 = t1 * t1 f0 = 1.0d0 + t0 t2 = 0.50d0 f2 = t2 c5aesxkus = 15 if(dabs(bvecto(gp1jxzuh)) .gt. 10)then c5aesxkus = 25 endif if(dabs(bvecto(gp1jxzuh)) .gt. 15)then c5aesxkus = 35 endif if(dabs(bvecto(gp1jxzuh)) .gt. 20)then c5aesxkus = 40 endif if(dabs(bvecto(gp1jxzuh)) .gt. 30)then c5aesxkus = 55 endif do23185 ayfnwr1v=1,c5aesxkus m0 = (bvecto(gp1jxzuh) / (2.0d0*(ayfnwr1v+1.0d0))) ** 2.0 m1 = m0 * (1.0d0 + 1.0d0/ayfnwr1v) m2 = m1 * (2.0d0*ayfnwr1v + 1.0d0) / (2.0d0*ayfnwr1v - 1.0d0) t0 = t0 * m0 t1 = t1 * m1 t2 = t2 * m2 f0 = f0 + t0 f1 = f1 + t1 f2 = f2 + t2 if((dabs(t0) .lt. qaltf0nz) .and. (dabs(t1) .lt. qaltf0nz) .and. ( *dabs(t2) .lt. qaltf0nz))then goto 23186 endif 23185 continue 23186 continue if(0 .le. kpzavbj3)then d0(gp1jxzuh) = f0 endif if(1 .le. kpzavbj3)then d1(gp1jxzuh) = f1 endif if(2 .le. kpzavbj3)then d2(gp1jxzuh) = f2 endif 23173 continue 23174 continue return end VGAM/src/vcall2.f0000644000176200001440000000117313565414527013143 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine vcall2(onemor,w,y,eta,beta,u) logical onemor double precision w(*), y(*), eta(*), beta(*), u(*) onemor = .true. w(1) = 1.0d0 y(1) = 1.0d0 eta(1) = 1.0d0 beta(1) = 1.0d0 u(1) = 1.0d0 return end subroutine vcall1(onemor,y,eta,beta,u,xbig,cpxbig) logical onemor, cpxbig double precision y(*), eta(*), beta(*), u(*), xbig(*) onemor = .true. y(1) = 1.0d0 eta(1) = 1.0d0 beta(1) = 1.0d0 u(1) = 1.0d0 xbig(1) = 1.0d0 cpxbig = .true. return end VGAM/src/lms.f0000644000176200001440000001477713565414527012571 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine dpdlyjn(psi, i9mwnvqt, mymu, sigma, kpzavbj3ative, lfu2 *qhid) implicit logical (a-z) integer kpzavbj3ative double precision psi, i9mwnvqt, mymu, sigma, lfu2qhid(3) integer hbsl0gto, izero0 double precision aa, bb, uqnkc6zg, n3iasxug logical cc, pos hbsl0gto = 1 izero0 = 0 n3iasxug = 1.0d-04 mymu = 0.0d0 sigma = 1.0d0 cc = (psi .ge. 0.0d0) if(cc)then bb = i9mwnvqt pos = (dabs(i9mwnvqt) .le. n3iasxug) else bb = -2.0d0 + i9mwnvqt pos = (dabs(i9mwnvqt-2.0d0) .le. n3iasxug) endif aa = 1.0d0 + psi * bb if(kpzavbj3ative .ge. 0)then if(pos)then lfu2qhid(1) = psi else lfu2qhid(1) = aa / bb endif endif if(kpzavbj3ative .ge. 1)then if(pos)then lfu2qhid(2) = (lfu2qhid(1)**2) / 2 else uqnkc6zg = lfu2qhid(1) lfu2qhid(2) = (aa * (dlog(aa)/bb) - uqnkc6zg) / bb endif endif if(kpzavbj3ative .ge. 2)then if(pos)then lfu2qhid(3) = (lfu2qhid(1)**3) / 3 else uqnkc6zg = lfu2qhid(2) * 2.0d0 lfu2qhid(3) = (aa * (dlog(aa)/bb) ** 2 - uqnkc6zg) / bb endif endif return end subroutine gleg11(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le *nkpzavbj3mat, lfu2qhid) implicit logical (a-z) integer lenkpzavbj3mat double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), *lfu2qhid integer hbsl0gto, itwo2, three3 double precision psi, pim12, o3jyipdf(3), two12 three3 = 3 itwo2 = 2 hbsl0gto = 1 two12 = 1.41421356237309515d0 if(lenkpzavbj3mat .gt. 0)then lfu2qhid = kpzavbj3mat(4) * (kpzavbj3mat(2)**2 + two12 * sigma * g *hz9vuba * kpzavbj3mat(3)) else pim12 = 0.564189583547756279d0 psi = mymu + two12 * sigma * ghz9vuba call dpdlyjn(psi, i9mwnvqt, mymu, sigma, itwo2, o3jyipdf) lfu2qhid = (dexp(-ghz9vuba*ghz9vuba) * pim12) * (o3jyipdf(2)**2 + *(psi - mymu) * o3jyipdf(3)) / sigma**2 endif return end subroutine gleg12(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le *nkpzavbj3mat, lfu2qhid) implicit logical (a-z) integer lenkpzavbj3mat double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), *lfu2qhid integer hbsl0gto, itwo2 double precision psi, pim12, two12 double precision tad5vhsu(3) itwo2 = 2 hbsl0gto = 1 if(lenkpzavbj3mat .gt. 0)then lfu2qhid = kpzavbj3mat(4) * (-kpzavbj3mat(2)) else pim12 = 0.564189583547756279d0 two12 = 1.41421356237309515d0 psi = mymu + two12 * sigma * ghz9vuba call dpdlyjn(psi, i9mwnvqt, mymu, sigma, hbsl0gto, tad5vhsu) lfu2qhid = (dexp(-ghz9vuba*ghz9vuba) * pim12) * (-tad5vhsu(2)) / s *igma**2 endif return end subroutine gleg13(ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat, le *nkpzavbj3mat, lfu2qhid) implicit logical (a-z) integer lenkpzavbj3mat double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4), *lfu2qhid integer hbsl0gto, itwo2 double precision psi, mtpim12, two12 double precision tad5vhsu(3) itwo2 = 2 hbsl0gto = 1 if(lenkpzavbj3mat .gt. 0)then lfu2qhid = kpzavbj3mat(4) * (-kpzavbj3mat(2)) * dsqrt(8.0d0) * ghz *9vuba else mtpim12 = -1.12837916709551256d0 two12 = 1.41421356237309515d0 psi = mymu + two12 * sigma * ghz9vuba call dpdlyjn(psi, i9mwnvqt, mymu, sigma, hbsl0gto, tad5vhsu) lfu2qhid = dexp(-ghz9vuba*ghz9vuba) * mtpim12 * tad5vhsu(2) * (psi * - mymu) / sigma**3 endif return end subroutine gint3(minx, maxx, wts, ahl0onwx, i9mwnvqt, mymu, sigma, * kk, lfu2qhid, elemnt) implicit logical (a-z) integer kk, elemnt double precision minx, maxx, wts(kk), ahl0onwx(kk), lfu2qhid, i9mw *nvqt, mymu, sigma integer gp1jxzuh, lenkpzavbj3mat double precision atx, dint, tint, kpzavbj3mat(4), midpt, range12 lenkpzavbj3mat = 0 midpt = 0.50d0 * (minx + maxx) range12 = 0.50d0 * (maxx - minx) dint = 0.0d0 if(elemnt .eq. 1)then do23022 gp1jxzuh=1,kk atx = midpt + range12 * ahl0onwx(gp1jxzuh) call gleg11(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma *t, tint) dint = dint + tint * wts(gp1jxzuh) 23022 continue 23023 continue else if(elemnt .eq. 2)then do23026 gp1jxzuh=1,kk atx = midpt + range12 * ahl0onwx(gp1jxzuh) call gleg12(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma *t, tint) dint = dint + tint * wts(gp1jxzuh) 23026 continue 23027 continue else if(elemnt .eq. 3)then do23030 gp1jxzuh=1,kk atx = midpt + range12 * ahl0onwx(gp1jxzuh) call gleg13(atx, i9mwnvqt, mymu, sigma, kpzavbj3mat, lenkpzavbj3ma *t, tint) dint = dint + tint * wts(gp1jxzuh) 23030 continue 23031 continue endif endif endif lfu2qhid = lfu2qhid + range12 * dint return end subroutine yjngintf(minx, maxx, ahl0onwx, wts, kuzxj1lo, kk, i9mwn *vqt, mymu, sigma, lfu2qhid, qaltf0nz) implicit logical (a-z) integer kuzxj1lo, kk double precision minx(kuzxj1lo), maxx(kuzxj1lo), wts(kk), ahl0onwx *(kk), i9mwnvqt(kuzxj1lo), mymu(kuzxj1lo), sigma(kuzxj1lo), lfu2qhi *d(3,kuzxj1lo), qaltf0nz integer ayfnwr1v, iii, gp1jxzuh, lencomp, ipzbcvw3, hmayv1xt, elem *nt, hbsl0gto, itwo2 double precision xd4mybgj, j4qgxvlk, wiptsjx8 hbsl0gto = 1 itwo2 = 2 lencomp = 12 do23032 ayfnwr1v = 1,kuzxj1lo do23034 elemnt=1,3 j4qgxvlk = -10.0d0 do23036 iii=2,lencomp ipzbcvw3 = 2 ** iii xd4mybgj = (maxx(ayfnwr1v) - minx(ayfnwr1v)) / ipzbcvw3 lfu2qhid(elemnt,ayfnwr1v) = 0.0d0 do23038 gp1jxzuh=1,ipzbcvw3 call gint3(minx(ayfnwr1v)+(gp1jxzuh-1)*xd4mybgj, minx(ayfnwr1v)+gp *1jxzuh*xd4mybgj, wts, ahl0onwx, i9mwnvqt(ayfnwr1v), mymu(ayfnwr1v) *, sigma(ayfnwr1v), kk, lfu2qhid(elemnt,ayfnwr1v), elemnt) 23038 continue 23039 continue wiptsjx8 = dabs(lfu2qhid(elemnt,ayfnwr1v) - j4qgxvlk) / (1.0d0 + d *abs(lfu2qhid(elemnt,ayfnwr1v))) if(wiptsjx8 .lt. qaltf0nz)then goto 234 else j4qgxvlk = lfu2qhid(elemnt,ayfnwr1v) endif 23036 continue 23037 continue 234 hmayv1xt = 0 23034 continue 23035 continue 23032 continue 23033 continue return end VGAM/src/tyeepolygamma.f0000644000176200001440000001152213565414527014634 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine vdgam1(x, lfu2qhid, dvhw1ulq) implicit logical (a-z) double precision x, lfu2qhid integer dvhw1ulq double precision w, series, obr6tcex dvhw1ulq = 1 if(x .le. 0.0d0)then dvhw1ulq = 0 return endif if(x .lt. 6.0d0)then call vdgam2(x + 6.0d0, obr6tcex, dvhw1ulq) lfu2qhid = obr6tcex - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0 *d0) - 1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0) return endif w = 1.0d0 / (x * x) series = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.0 *d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w * *(691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.0d *0))))))))))))))))))))) lfu2qhid = ( dlog(x) - 0.5d0/x + series ) return end subroutine vdgam2(x, lfu2qhid, dvhw1ulq) implicit logical (a-z) double precision x, lfu2qhid integer dvhw1ulq double precision w, series, obr6tcex dvhw1ulq = 1 if(x .le. 0.0d0)then dvhw1ulq = 0 return endif if(x .lt. 6.0d0)then call vdgam1(x + 6.0d0, obr6tcex, dvhw1ulq) lfu2qhid = obr6tcex - 1.0d0/x - 1.0d0/(x + 1.0d0) - 1.0d0/(x + 2.0 *d0) - 1.0d0/(x + 3.0d0) - 1.0d0/(x + 4.0d0) - 1.0d0/(x + 5.0d0) return endif w = 1.0d0 / (x * x) series = ((w * (-1.0d0/12.0d0 + ((w * (1.0d0/120.0d0 + ((w * (-1.0 *d0/252.0d0 + ((w * (1.0d0/240.0d0 + ((w * (-1.0d0/132.0d0 + ((w * *(691.0d0/32760.0d0 + ((w * (-1.0d0/12.0d0 + (3617.0d0 * w)/8160.0d *0))))))))))))))))))))) lfu2qhid = ( dlog(x) - 0.5d0/x + series ) return end subroutine vtgam1(x, lfu2qhid, dvhw1ulq) implicit logical (a-z) double precision x, lfu2qhid integer dvhw1ulq double precision w, series, obr6tcex dvhw1ulq = 1 if(x .le. 0.0d0)then dvhw1ulq = 0 return endif if(x .lt. 6.0d0)then call vtgam2(x + 6.0d0, obr6tcex, dvhw1ulq) lfu2qhid = obr6tcex + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x * + 2.0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0 */(x + 5.0d0)**2 return endif w = 1.0d0 / (x * x) series = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (1 *.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-69 *1.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0))))))) *))))))) lfu2qhid = 0.5d0 * w + series / x return end subroutine vtgam2(x, lfu2qhid, dvhw1ulq) implicit logical (a-z) double precision x, lfu2qhid integer dvhw1ulq double precision w, series, obr6tcex dvhw1ulq = 1 if(x .le. 0.0d0)then dvhw1ulq = 0 return endif if(x .lt. 6.0d0)then call vtgam1(x + 6.0d0, obr6tcex, dvhw1ulq) lfu2qhid = obr6tcex + 1.0d0/x**2 + 1.0d0/(x + 1.0d0)**2 + 1.0d0/(x * + 2.0d0)**2 + 1.0d0/(x + 3.0d0)**2 + 1.0d0/(x + 4.0d0)**2 + 1.0d0 */(x + 5.0d0)**2 return endif w = 1.0d0 / (x * x) series = 1.0d0 + (w * (1.0d0/6.0d0 + (w * (-1.0d0/30.0d0 + (w * (1 *.0d0/42.0d0 + (w * (-1.0d0/30.0d0 + (w * (5.0d0/66.0d0 + (w * (-69 *1.0d0/2370.0d0 + (w * (7.0d0/6.0d0 - (3617.0d0 * w)/510.0d0))))))) *))))))) lfu2qhid = 0.5d0 * w + series / x return end subroutine dgam1w(x, lfu2qhid, n, dvhw1ulq) implicit logical (a-z) integer n, dvhw1ulq double precision x(n), lfu2qhid(n) integer i, okobr6tcex dvhw1ulq = 1 do23016 i=1,n call vdgam1(x(i), lfu2qhid(i), okobr6tcex) if(okobr6tcex .ne. 1)then dvhw1ulq = okobr6tcex endif 23016 continue 23017 continue return end subroutine tgam1w(x, lfu2qhid, n, dvhw1ulq) implicit logical (a-z) integer n, dvhw1ulq double precision x(n), lfu2qhid(n) integer i, okobr6tcex dvhw1ulq = 1 do23020 i=1,n call vtgam1(x(i), lfu2qhid(i), okobr6tcex) if(okobr6tcex .ne. 1)then dvhw1ulq = okobr6tcex endif 23020 continue 23021 continue return end subroutine cum8sum(ci1oyxas, lfu2qhid, nlfu2qhid, valong, ntot, no *tdvhw1ulq) implicit logical (a-z) integer nlfu2qhid, ntot, notdvhw1ulq double precision ci1oyxas(ntot), lfu2qhid(nlfu2qhid), valong(ntot) integer ayfnwr1v, iii iii = 1 lfu2qhid(iii) = ci1oyxas(iii) do23024 ayfnwr1v=2,ntot if(valong(ayfnwr1v) .gt. valong(ayfnwr1v-1))then lfu2qhid(iii) = lfu2qhid(iii) + ci1oyxas(ayfnwr1v) else iii = iii + 1 lfu2qhid(iii) = ci1oyxas(ayfnwr1v) endif 23024 continue 23025 continue if(iii .eq. nlfu2qhid)then notdvhw1ulq = 0 else notdvhw1ulq = 1 endif return end VGAM/src/rgam3.c0000644000176200001440000006644513565414527013003 0ustar liggesusers #include #include #include #include #include void n5aioudkdnaoqj0l(double *qgnl3toc, double sjwyig9t[], double bhcji9gl[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, double gkdx5jal[], double rpyis2kc[], double imdvf4hx[], double ifys6woa[], double *wbkq9zyi, double jstx4uwe[4], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int l3zpbstu[3], int *xtov9rbf, int *wep0oibc, int *fbd5yktj); void n5aioudkhbzuprs6(double *qgnl3toc, double sjwyig9t[], double bhcji9gl[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, double gkdx5jal[], double *rpyis2kc, double *imdvf4hx, double *ifys6woa, double *i9mwnvqt, int *pn9eowxc, int *ic5aesxku, double *mynl7uaq, double *zustx4fw, double *nbe4gvpq, double *qaltf0nz, int *cvnjhg2u, double xwy[], double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *wep0oibc, int *fbd5yktj); void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double gkdx5jal[], int *acpios9q); void n5aioudkvmnweiy2(double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *acpios9q, int *wep0oibc, int *iflag); void n5aioudkwmhctl9x( double *qgnl3toc, double sjwyig9t[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, int *pn9eowxc, // int *icrit, double gkdx5jal[], double rpyis2kc[], double imdvf4hx[], double ifys6woa[], double *i9mwnvqt, double xwy[], double *qcpiaj7f, double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *wep0oibc, int *algpft4y); void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[], double gkdx5jal[], int *rvy1fpli, int *kuzxj1lo, double zyupcmk6[], double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[]); void F77_NAME(vinterv)(double*, int*, double*, int*, int*); void F77_NAME(vbsplvd)(double*, int*, double*, int*, double*, double*, int*); void F77_NAME(dpbfa8)(double*, int*, int*, int*, int*); void F77_NAME(dpbsl8)(double*, int*, int*, int*, double*); void F77_NAME(wbvalue)(double*, double*, int*, int*, double*, int*, double*); void n5aioudkdnaoqj0l(double *qgnl3toc, double sjwyig9t[], double bhcji9gl[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, double gkdx5jal[], double rpyis2kc[], double imdvf4hx[], double ifys6woa[], double *wbkq9zyi, double jstx4uwe[4], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int l3zpbstu[3], int *xtov9rbf, int *wep0oibc, int *fbd5yktj) { double *wkumc9idxwy, *wkumc9idbuhyalv4, *wkumc9idzvau2lct, *wkumc9idf6lsuzax, *wkumc9idfvh2rwtc, *wkumc9iddcfir2no, *wkumc9idfulcp8wa, *wkumc9idplj0trqx; wkumc9idxwy = Calloc(*acpios9q, double); wkumc9idzvau2lct = Calloc(*acpios9q, double); wkumc9idf6lsuzax = Calloc(*acpios9q, double); wkumc9idfvh2rwtc = Calloc(*acpios9q, double); wkumc9iddcfir2no = Calloc(*acpios9q, double); wkumc9idbuhyalv4 = Calloc(*xtov9rbf * *acpios9q, double); wkumc9idfulcp8wa = Calloc(*xtov9rbf * *acpios9q, double); wkumc9idplj0trqx = Calloc( (int) 1 , double); n5aioudkhbzuprs6(qgnl3toc, sjwyig9t, bhcji9gl, po8rwsmy, kuzxj1lo, acpios9q, gkdx5jal, rpyis2kc, imdvf4hx, ifys6woa, wbkq9zyi, l3zpbstu + 1, l3zpbstu + 2, jstx4uwe, jstx4uwe + 1, jstx4uwe + 2, jstx4uwe + 3, cvnjhg2u, wkumc9idxwy, wkumc9idzvau2lct, wkumc9idf6lsuzax, wkumc9idfvh2rwtc, wkumc9iddcfir2no, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, tt2, wkumc9idbuhyalv4, wkumc9idfulcp8wa, wkumc9idplj0trqx, xtov9rbf, wep0oibc, fbd5yktj); Free(wkumc9idxwy); Free(wkumc9idbuhyalv4); Free(wkumc9idzvau2lct); Free(wkumc9idf6lsuzax); Free(wkumc9idfvh2rwtc); Free(wkumc9iddcfir2no); Free(wkumc9idfulcp8wa); Free(wkumc9idplj0trqx); } void n5aioudkhbzuprs6(double *qgnl3toc, double sjwyig9t[], double bhcji9gl[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, double gkdx5jal[], double *rpyis2kc, double *imdvf4hx, double *ifys6woa, double *wbkq9zyi, int *pn9eowxc, int *ic5aesxku, double *mynl7uaq, double *zustx4fw, double *nbe4gvpq, double *qaltf0nz, int *cvnjhg2u, double xwy[], double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *wep0oibc, int *fbd5yktj) { static const double c_Gold = 0.381966011250105151795413165634; double tt1 = 0.0, g2dnwteb, wkumc9ida, wkumc9idb, wkumc9idd, wkumc9ide, wkumc9idxm, wkumc9idp, wkumc9idq, wkumc9idr, // qaltf0nz, Tol1, Tol2, wkumc9idu, wkumc9idv, wkumc9idw, wkumc9idfu, wkumc9idfv, wkumc9idfw, wkumc9idfx, wkumc9idx, wkumc9idax, wkumc9idbx; int ayfnwr1v, viter = 0; double yjpnro8d = 8.0e88, bk3ymcih = 0.0e0, *qcpiaj7f, qcpiaj7f0 = 0.0; qcpiaj7f = &qcpiaj7f0; g2dnwteb = bk3ymcih; bk3ymcih += bk3ymcih; bk3ymcih *= bk3ymcih; bk3ymcih += g2dnwteb; wkumc9idd = 0.0; wkumc9idfu = 0.0e0; wkumc9idu = 0.0e0; if (*cvnjhg2u == 0) { n5aioudkzosq7hub(xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, gkdx5jal, acpios9q); *tt2 = 0.0; for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) { *tt2 += xecbg0pf[ayfnwr1v-1]; } *cvnjhg2u = 1; } else { } n5aioudkgt9iulbf(sjwyig9t, bhcji9gl, po8rwsmy, gkdx5jal, kuzxj1lo, acpios9q, xwy, zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no); for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) { tt1 += zvau2lct[ayfnwr1v-1]; } g2dnwteb = tt1 / *tt2; if (*pn9eowxc == 1) { *mynl7uaq = g2dnwteb * pow(16.0, *wbkq9zyi * 6.0 - 2.0); n5aioudkwmhctl9x(qgnl3toc, sjwyig9t, po8rwsmy, kuzxj1lo, acpios9q, pn9eowxc, // icrit, (icrit used to be used solely) gkdx5jal, rpyis2kc, imdvf4hx, ifys6woa, mynl7uaq, xwy, qcpiaj7f, // Not used here zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, buhyalv4, fulcp8wa, plj0trqx, xtov9rbf, wep0oibc, fbd5yktj); return; } wkumc9idax = *mynl7uaq; wkumc9idbx = *zustx4fw; /* Initialization. */ wkumc9ida = wkumc9idax; wkumc9idb = wkumc9idbx; wkumc9idv = wkumc9ida + c_Gold * (wkumc9idb - wkumc9ida); wkumc9idw = wkumc9idx = wkumc9idv; wkumc9ide = 0.0e0; *wbkq9zyi = wkumc9idx; *mynl7uaq = g2dnwteb * pow((double) 16.0, (double) *wbkq9zyi * 6.0 - 2.0); n5aioudkwmhctl9x(qgnl3toc, sjwyig9t, po8rwsmy, kuzxj1lo, acpios9q, pn9eowxc, // icrit, gkdx5jal, rpyis2kc, imdvf4hx, ifys6woa, mynl7uaq, xwy, qcpiaj7f, zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, buhyalv4, fulcp8wa, plj0trqx, xtov9rbf, wep0oibc, fbd5yktj); wkumc9idfx = *qcpiaj7f; wkumc9idfv = wkumc9idfw = wkumc9idfx; while (*fbd5yktj == 0) { viter++; wkumc9idxm = 0.5e0 * (wkumc9ida + wkumc9idb); Tol1 = *qaltf0nz * fabs(wkumc9idx) + *nbe4gvpq / 3.0e0; Tol2 = 2.0e0 * Tol1; if ((fabs(wkumc9idx - wkumc9idxm) <= (Tol2 - 0.5 * (wkumc9idb - wkumc9ida))) || (viter > *ic5aesxku)) goto L_End; if ((fabs(wkumc9ide) <= Tol1) || (wkumc9idfx >= yjpnro8d) || (wkumc9idfv >= yjpnro8d) || (wkumc9idfw >= yjpnro8d)) goto a3bdsirf; wkumc9idr = (wkumc9idx - wkumc9idw) * (wkumc9idfx - wkumc9idfv); wkumc9idq = (wkumc9idx - wkumc9idv) * (wkumc9idfx - wkumc9idfw); wkumc9idp = (wkumc9idx - wkumc9idv) * wkumc9idq - (wkumc9idx - wkumc9idw) * wkumc9idr; wkumc9idq = 2.0e0 * (wkumc9idq - wkumc9idr); if (wkumc9idq > 0.0e0) wkumc9idp = -wkumc9idp; wkumc9idq = fabs(wkumc9idq); wkumc9idr = wkumc9ide; wkumc9ide = wkumc9idd; if (fabs(wkumc9idp) >= fabs(0.5 * wkumc9idq * wkumc9idr) || wkumc9idq == 0.0e0) { goto a3bdsirf; } if (wkumc9idp <= wkumc9idq * (wkumc9ida - wkumc9idx) || wkumc9idp >= wkumc9idq * (wkumc9idb - wkumc9idx)) goto a3bdsirf; wkumc9idd = wkumc9idp / wkumc9idq; wkumc9idu = wkumc9idx + wkumc9idd; if (wkumc9idu - wkumc9ida < Tol2 || wkumc9idb - wkumc9idu < Tol2) wkumc9idd = fsign(Tol1, wkumc9idxm - wkumc9idx); goto ceqzd1hi50; a3bdsirf: wkumc9ide = (wkumc9idx >= wkumc9idxm) ? wkumc9ida - wkumc9idx : wkumc9idb - wkumc9idx; wkumc9idd = c_Gold * wkumc9ide; ceqzd1hi50: wkumc9idu = wkumc9idx + ((fabs(wkumc9idd) >= Tol1) ? wkumc9idd : fsign(Tol1, wkumc9idd)); *wbkq9zyi = wkumc9idu; *mynl7uaq = g2dnwteb * pow((double) 16.0, (double) *wbkq9zyi * 6.0 - 2.0); n5aioudkwmhctl9x(qgnl3toc, sjwyig9t, po8rwsmy, kuzxj1lo, acpios9q, pn9eowxc, // icrit, gkdx5jal, rpyis2kc, imdvf4hx, ifys6woa, mynl7uaq, xwy, qcpiaj7f, zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, buhyalv4, fulcp8wa, plj0trqx, xtov9rbf, wep0oibc, fbd5yktj); wkumc9idfu = *qcpiaj7f; if (wkumc9idfu > yjpnro8d) wkumc9idfu = 2.0e0 * yjpnro8d; if (wkumc9idfu <= wkumc9idfx) { if (wkumc9idu >= wkumc9idx) wkumc9ida = wkumc9idx; else wkumc9idb = wkumc9idx; wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw; wkumc9idw = wkumc9idx; wkumc9idfw = wkumc9idfx; wkumc9idx = wkumc9idu; wkumc9idfx = wkumc9idfu; } else { if (wkumc9idu < wkumc9idx) wkumc9ida = wkumc9idu; else wkumc9idb = wkumc9idu; if (wkumc9idfu <= wkumc9idfw || wkumc9idw == wkumc9idx) { wkumc9idv = wkumc9idw; wkumc9idfv = wkumc9idfw; wkumc9idw = wkumc9idu; wkumc9idfw = wkumc9idfu; } else if (wkumc9idfu <= wkumc9idfv || wkumc9idv == wkumc9idx || wkumc9idv == wkumc9idw) { wkumc9idv = wkumc9idu; wkumc9idfv = wkumc9idfu; } } } L_End: bk3ymcih = 0.0e0; *wbkq9zyi = wkumc9idx; *qcpiaj7f = wkumc9idfx; return; } void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double gkdx5jal[], int *acpios9q) { int dqlr5bse, pqzfxw4i, bvsquk3z = 3, h2dpsbkr = 4, nkplus1 = *acpios9q + 1; int ayfnwr1v, gp1jxzuh, yq6lorbx; int urohxe6t; double g9fvdrbw[12], ms0qypiw[16], yw1[4], yw2[4], wrk1, othird = 1.0 / 3.0, *qnwamo0e0, *qnwamo0e1, *qnwamo0e2, *qnwamo0e3; qnwamo0e0 = xecbg0pf; qnwamo0e1 = z4grbpiq; qnwamo0e2 = d7glzhbj; qnwamo0e3 = v2eydbxs; for (ayfnwr1v = 0; ayfnwr1v < *acpios9q; ayfnwr1v++) { *qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = 0.0e0; } for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { F77_CALL(vinterv)(gkdx5jal, &nkplus1, gkdx5jal + ayfnwr1v-1, &dqlr5bse, &pqzfxw4i); F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v - 1, &dqlr5bse, ms0qypiw, g9fvdrbw, &bvsquk3z); for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) { yw1[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4]; } F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, gkdx5jal + ayfnwr1v, &dqlr5bse, ms0qypiw, g9fvdrbw, &bvsquk3z); for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) { yw2[gp1jxzuh-1] = g9fvdrbw[gp1jxzuh-1 + 2*4] - yw1[gp1jxzuh-1]; } wrk1 = gkdx5jal[ayfnwr1v] - gkdx5jal[ayfnwr1v-1]; if (dqlr5bse >= 4) { for (gp1jxzuh = 1; gp1jxzuh <= 4; gp1jxzuh++) { yq6lorbx = gp1jxzuh; urohxe6t = dqlr5bse - 4 + gp1jxzuh; xecbg0pf[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); yq6lorbx = gp1jxzuh + 1; if (yq6lorbx <= 4) { z4grbpiq[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } yq6lorbx = gp1jxzuh + 2; if (yq6lorbx <= 4) { d7glzhbj[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } yq6lorbx = gp1jxzuh + 3; if (yq6lorbx <= 4) { v2eydbxs[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } } } else if (dqlr5bse == 3) { for (gp1jxzuh = 1; gp1jxzuh <= 3; gp1jxzuh++) { yq6lorbx = gp1jxzuh; urohxe6t = dqlr5bse - 3 + gp1jxzuh; xecbg0pf[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); yq6lorbx = gp1jxzuh + 1; if (yq6lorbx <= 3) { z4grbpiq[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } yq6lorbx = gp1jxzuh + 2; if (yq6lorbx <= 3) { d7glzhbj[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } } } else if (dqlr5bse == 2) { for (gp1jxzuh = 1; gp1jxzuh <= 2; gp1jxzuh++) { yq6lorbx = gp1jxzuh; urohxe6t = dqlr5bse - 2 + gp1jxzuh; xecbg0pf[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); yq6lorbx = gp1jxzuh + 1; if (yq6lorbx <= 2) { z4grbpiq[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } } } else if (dqlr5bse == 1) { for (gp1jxzuh = 1; gp1jxzuh <= 1; gp1jxzuh++) { yq6lorbx = gp1jxzuh; urohxe6t = dqlr5bse - 1 + gp1jxzuh; xecbg0pf[urohxe6t-1] += wrk1 * (yw1[gp1jxzuh-1]*yw1[yq6lorbx-1] + (yw2[gp1jxzuh-1]*yw1[yq6lorbx-1] + yw2[yq6lorbx-1]*yw1[gp1jxzuh-1]) * 0.50 + yw2[gp1jxzuh-1]*yw2[yq6lorbx-1] * othird); } } } } void n5aioudkvmnweiy2(double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *acpios9q, int *wep0oibc, int *iflag) { int ayfnwr1v, yq6lorbx, gp1jxzuh; double wjm3[3], wjm2[2], wjm1[1], c0, c1, c2, c3; double pcsuow9k, qdbgu6oi, upwkh5xz, rul5fnyd, ueydbrg6, plce2srm, k3yvomnh, bfdjhu7l, ctfvwdu0; c1 = c2 = c3 = 0.0e0; wjm3[0] = wjm3[1] = wjm3[2] = wjm2[0] = wjm2[1] = wjm1[0] = 0.0e0; for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { yq6lorbx = *acpios9q - ayfnwr1v + 1; c0 = 1.0e0 / buhyalv4[3 + (yq6lorbx-1) * *xtov9rbf]; if (yq6lorbx <= (*acpios9q-3)) { c1 = buhyalv4[0 + (yq6lorbx+2) * *xtov9rbf] * c0; c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0; c3 = buhyalv4[2 + (yq6lorbx+0) * *xtov9rbf] * c0; } else if (yq6lorbx == (*acpios9q - 2)) { c1 = 0.0e0; c2 = buhyalv4[1 + (yq6lorbx+1) * *xtov9rbf] * c0; c3 = buhyalv4[2 + yq6lorbx * *xtov9rbf] * c0; } else if (yq6lorbx == (*acpios9q - 1)) { c1 = c2 = 0.0e0; c3 = buhyalv4[2 + yq6lorbx * *xtov9rbf] * c0; } else if (yq6lorbx == *acpios9q) { c1 = c2 = c3 = 0.0e0; } pcsuow9k = c1 * wjm3[0]; qdbgu6oi = c2 * wjm3[1]; upwkh5xz = c3 * wjm3[2]; rul5fnyd = c1 * wjm3[1]; ueydbrg6 = c2 * wjm2[0]; plce2srm = c3 * wjm2[1]; k3yvomnh = c1 * wjm3[2]; bfdjhu7l = c2 * wjm2[1]; ctfvwdu0 = c3 * wjm1[0]; fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (pcsuow9k+qdbgu6oi+upwkh5xz); fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (rul5fnyd+ueydbrg6+plce2srm); fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] = 0.0 - (k3yvomnh+bfdjhu7l+ctfvwdu0); fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] = pow(c0, (double) 2.0) + c1 * (pcsuow9k + 2.0e0 * (qdbgu6oi + upwkh5xz)) + c2 * (ueydbrg6 + 2.0e0 * plce2srm) + c3 * ctfvwdu0; wjm3[0] = wjm2[0]; wjm3[1] = wjm2[1]; wjm3[2] = fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf]; wjm2[0] = wjm1[0]; wjm2[1] = fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf]; wjm1[0] = fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf]; } if (*iflag == 0) { return; } Rprintf("plj0trqx must not be a double of length one!\n"); for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { yq6lorbx = *acpios9q - ayfnwr1v + 1; for (gp1jxzuh = 1; gp1jxzuh <= 4 && yq6lorbx + gp1jxzuh-1 <= *acpios9q; gp1jxzuh++) { plj0trqx[yq6lorbx-1 + (yq6lorbx+gp1jxzuh-2) * *wep0oibc] = fulcp8wa[4-gp1jxzuh + (yq6lorbx-1) * *xtov9rbf]; } } for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { yq6lorbx = *acpios9q - ayfnwr1v + 1; for (gp1jxzuh = yq6lorbx-4; gp1jxzuh >= 1; gp1jxzuh--) { c0 = 1.0 / buhyalv4[3 + (gp1jxzuh-1) * *xtov9rbf]; c1 = buhyalv4[0 + (gp1jxzuh+2) * *xtov9rbf] * c0; c2 = buhyalv4[1 + (gp1jxzuh+1) * *xtov9rbf] * c0; c3 = buhyalv4[2 + gp1jxzuh * *xtov9rbf] * c0; plj0trqx[gp1jxzuh-1 + (yq6lorbx-1) * *wep0oibc] = 0.0e0 - ( c1 * plj0trqx[gp1jxzuh+2 + (yq6lorbx-1) * *wep0oibc] + c2 * plj0trqx[gp1jxzuh+1 + (yq6lorbx-1) * *wep0oibc] + c3 * plj0trqx[gp1jxzuh + (yq6lorbx-1) * *wep0oibc] ); } } } void n5aioudkwmhctl9x(double *qgnl3toc, double sjwyig9t[], double po8rwsmy[], int *kuzxj1lo, int *acpios9q, int *pn9eowxc, // int *icrit, double gkdx5jal[], double rpyis2kc[], double imdvf4hx[], double ifys6woa[], double *i9mwnvqt, double xwy[], double *qcpiaj7f, double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf, int *wep0oibc, int *algpft4y) { double ms0qypiw[16], b0, b1, b2, b3, qaltf0nz = 0.1e-10, g9fvdrbw[4], qtce8hzo, *chw8lzty, egwbdua212 = 0.0e0; int yu6izdrc = 0, pqneb2ra = 1, bvsquk3z = 3, h2dpsbkr = 4, pqzfxw4i, ayfnwr1v, yq6lorbx, dqlr5bse, nkp1 = *acpios9q + 1; double *qnwamo0e1, *qnwamo0e2; qnwamo0e1 = rpyis2kc; qnwamo0e2 = xwy; for (ayfnwr1v = 0; ayfnwr1v < *acpios9q; ayfnwr1v++) { *qnwamo0e1++ = *qnwamo0e2++; } qnwamo0e1 = zvau2lct; qnwamo0e2 = xecbg0pf; for (ayfnwr1v = 0; ayfnwr1v < *acpios9q; ayfnwr1v++) { buhyalv4[3 + ayfnwr1v * *xtov9rbf] = *qnwamo0e1++ + *i9mwnvqt * *qnwamo0e2++; } qnwamo0e1 = f6lsuzax; qnwamo0e2 = z4grbpiq; for (ayfnwr1v = 1; ayfnwr1v <= (*acpios9q-1); ayfnwr1v++) { buhyalv4[2 + ayfnwr1v * *xtov9rbf] = *qnwamo0e1++ + *i9mwnvqt * *qnwamo0e2++; } qnwamo0e1 = fvh2rwtc; qnwamo0e2 = d7glzhbj; for (ayfnwr1v = 1; ayfnwr1v <= (*acpios9q-2); ayfnwr1v++) { buhyalv4[1 + (ayfnwr1v+1) * *xtov9rbf] = *qnwamo0e1++ + *i9mwnvqt * *qnwamo0e2++; } qnwamo0e1 = dcfir2no; qnwamo0e2 = v2eydbxs; for (ayfnwr1v = 1; ayfnwr1v <= (*acpios9q-3); ayfnwr1v++) { buhyalv4[ (ayfnwr1v+2) * *xtov9rbf] = *qnwamo0e1++ + *i9mwnvqt * *qnwamo0e2++; } F77_CALL(dpbfa8)(buhyalv4, xtov9rbf, acpios9q, &bvsquk3z, algpft4y); if (*algpft4y != 0) { Rprintf("In C function wmhctl9x; Error:\n"); Rprintf("Leading minor of order %d is not pos-def\n", *algpft4y); return; } F77_CALL(dpbsl8)(buhyalv4, xtov9rbf, acpios9q, &bvsquk3z, rpyis2kc); chw8lzty = sjwyig9t; qnwamo0e1 = imdvf4hx; for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) { F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr, chw8lzty++, &yu6izdrc, qnwamo0e1++); } n5aioudkvmnweiy2(buhyalv4, fulcp8wa, plj0trqx, xtov9rbf, acpios9q, wep0oibc, &yu6izdrc); //Rprintf("first one n5aioudkwmhctl9x pow(po8rwsmy[0], (double) 1.0) = "); //Rprintf("%9.5e\n", pow(po8rwsmy[0], (double) 1.0)); chw8lzty = sjwyig9t; for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) { F77_CALL(vinterv)(gkdx5jal, &nkp1, chw8lzty, &dqlr5bse, &pqzfxw4i); if (pqzfxw4i == -1) { dqlr5bse = 4; *chw8lzty = gkdx5jal[3] + qaltf0nz; } else if (pqzfxw4i == 1) { dqlr5bse = *acpios9q; *chw8lzty = gkdx5jal[*acpios9q] - qaltf0nz; } yq6lorbx = dqlr5bse-3; F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, chw8lzty++, &dqlr5bse, ms0qypiw, g9fvdrbw, &pqneb2ra); b0 = g9fvdrbw[0]; b1 = g9fvdrbw[1]; b2 = g9fvdrbw[2]; b3 = g9fvdrbw[3]; qtce8hzo = (b0 * (fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] * b0 + 2.0e0* (fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] * b1 + fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] * b2 + fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] * b3)) + b1 * (fulcp8wa[3 + yq6lorbx * *xtov9rbf] * b1 + 2.0e0* (fulcp8wa[2 + yq6lorbx * *xtov9rbf] * b2 + fulcp8wa[1 + yq6lorbx * *xtov9rbf] * b3)) + b2 * (fulcp8wa[3 + (yq6lorbx+1) * *xtov9rbf] * b2 + 2.0e0* fulcp8wa[2 + (yq6lorbx+1) * *xtov9rbf] * b3) + fulcp8wa[3 + (yq6lorbx+2) * *xtov9rbf] * pow(b3, (double) 2.0)) * po8rwsmy[ayfnwr1v-1]; ifys6woa[ayfnwr1v-1] = qtce8hzo; } if (*pn9eowxc == 1) { return; } for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) { egwbdua212 += ifys6woa[ayfnwr1v-1]; } *qcpiaj7f = pow(*qgnl3toc - egwbdua212, (double) 2.0); } void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[], double gkdx5jal[], int *rvy1fpli, int *kuzxj1lo, double zyupcmk6[], double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[]) { double g9fvdrbw[12]; /* 20140522 Effectively g9fvdrbw(4,3), just in case */ double ms0qypiw[16], wsvdbx3tk, wv2svdbx3tk, qaltf0nz = 0.1e-9; int ayfnwr1v, yq6lorbx, dqlr5bse, pqzfxw4i, nhnpt1zym1 = *kuzxj1lo + 1, pqneb2ra = 1, h2dpsbkr = 4; double *qnwamo0e0, *qnwamo0e1, *qnwamo0e2, *qnwamo0e3, *qnwamo0e4; qnwamo0e0 = zvau2lct; qnwamo0e1 = f6lsuzax; qnwamo0e2 = fvh2rwtc; qnwamo0e3 = dcfir2no; qnwamo0e4 = zyupcmk6; for (ayfnwr1v = 0; ayfnwr1v < *kuzxj1lo; ayfnwr1v++) { *qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = *qnwamo0e4++ = 0.0e0; } //Rprintf("first one n5aioudkgt9iulbf pow(po8rwsmy[0], (double) 1.0) = "); //Rprintf("%9.5e\n", pow(po8rwsmy[0], (double) 1.0)); for (ayfnwr1v = 1; ayfnwr1v <= *rvy1fpli; ayfnwr1v++) { F77_CALL(vinterv)(gkdx5jal, &nhnpt1zym1, sjwyig9t + ayfnwr1v - 1, &dqlr5bse, &pqzfxw4i); if (pqzfxw4i == 1) { if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) { dqlr5bse--; } else { return; } } F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v - 1, &dqlr5bse, ms0qypiw, g9fvdrbw, &pqneb2ra); yq6lorbx = dqlr5bse - 4 + 1; wsvdbx3tk = po8rwsmy[ayfnwr1v-1]; wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[0]; zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1]; zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[0]; f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1]; fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2]; dcfir2no[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3]; yq6lorbx = dqlr5bse - 4 + 2; wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[1]; zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1]; zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1]; f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2]; fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3]; yq6lorbx = dqlr5bse - 4 + 3; wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[2]; zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1]; zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2]; f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3]; yq6lorbx = dqlr5bse; wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[3]; zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1]; zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3]; } } VGAM/src/VGAM_init.c0000644000176200001440000001635713565414527013544 0ustar liggesusers#include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .C calls */ extern void a2mccc(void *, void *, void *, void *, void *, void *, void *); extern void cqo_1(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void cqo_2(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void dcqo1(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void eimpnbinomspecialp(void *, void *, void *, void *, void *, void *); extern void lerchphi123(void *, void *, void *, void *, void *, void *, void *, void *); extern void m2accc(void *, void *, void *, void *, void *, void *, void *, void *); extern void mux111ccc(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void mux15ccc(void *, void *, void *, void *, void *); extern void mux2ccc(void *, void *, void *, void *, void *, void *); extern void mux22ccc(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void mux5ccc(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void mux55ccc(void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void mux7ccc(void *, void *, void *, void *, void *, void *, void *); extern void pnorm2ccc(void *, void *, void *, void *, void *, void *); extern void sf_C_expexpint(void *, void *, void *); extern void sf_C_expint(void *, void *, void *); extern void sf_C_expint_e1(void *, void *, void *); extern void tapply_mat1(void *, void *, void *, void *); extern void tyee_C_cum8sum(void *, void *, void *, void *, void *, void *); extern void vbacksubccc(void *, void *, void *, void *, void *, void *, void *, void *); extern void vcao6(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void vcholccc(void *, void *, void *, void *, void *, void *, void *, void *); extern void vdcao6(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void vforsubccc(void *, void *, void *, void *, void *, void *, void *, void *); extern void VGAM_C_kend_tau(void *, void *, void *, void *); extern void VGAM_C_mux34(void *, void *, void *, void *, void *, void *); extern void VGAM_C_vdigami(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void vknootl2(void *, void *, void *, void *, void *); extern void vsuff9(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void vzetawr(void *, void *, void *, void *); extern void Yee_pknootl2(void *, void *, void *, void *); extern void Yee_spline(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void Yee_vbfa(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void Yee_vbvs(void *, void *, void *, void *, void *, void *, void *, void *); /* .Fortran calls */ extern void F77_NAME(veigenf)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void F77_NAME(yjngintf)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); static const R_CMethodDef CEntries[] = { {"a2mccc", (DL_FUNC) &a2mccc, 7}, {"cqo_1", (DL_FUNC) &cqo_1, 24}, {"cqo_2", (DL_FUNC) &cqo_2, 24}, {"dcqo1", (DL_FUNC) &dcqo1, 29}, {"eimpnbinomspecialp", (DL_FUNC) &eimpnbinomspecialp, 6}, {"lerchphi123", (DL_FUNC) &lerchphi123, 8}, {"m2accc", (DL_FUNC) &m2accc, 8}, {"mux111ccc", (DL_FUNC) &mux111ccc, 11}, {"mux15ccc", (DL_FUNC) &mux15ccc, 5}, {"mux2ccc", (DL_FUNC) &mux2ccc, 6}, {"mux22ccc", (DL_FUNC) &mux22ccc, 10}, {"mux5ccc", (DL_FUNC) &mux5ccc, 16}, {"mux55ccc", (DL_FUNC) &mux55ccc, 9}, {"mux7ccc", (DL_FUNC) &mux7ccc, 7}, {"pnorm2ccc", (DL_FUNC) &pnorm2ccc, 6}, {"sf_C_expexpint", (DL_FUNC) &sf_C_expexpint, 3}, {"sf_C_expint", (DL_FUNC) &sf_C_expint, 3}, {"sf_C_expint_e1", (DL_FUNC) &sf_C_expint_e1, 3}, {"tapply_mat1", (DL_FUNC) &tapply_mat1, 4}, {"tyee_C_cum8sum", (DL_FUNC) &tyee_C_cum8sum, 6}, {"vbacksubccc", (DL_FUNC) &vbacksubccc, 8}, {"vcao6", (DL_FUNC) &vcao6, 42}, {"vcholccc", (DL_FUNC) &vcholccc, 8}, {"vdcao6", (DL_FUNC) &vdcao6, 47}, {"vforsubccc", (DL_FUNC) &vforsubccc, 8}, {"VGAM_C_kend_tau", (DL_FUNC) &VGAM_C_kend_tau, 4}, {"VGAM_C_mux34", (DL_FUNC) &VGAM_C_mux34, 6}, {"VGAM_C_vdigami", (DL_FUNC) &VGAM_C_vdigami, 12}, {"vknootl2", (DL_FUNC) &vknootl2, 5}, {"vsuff9", (DL_FUNC) &vsuff9, 21}, {"vzetawr", (DL_FUNC) &vzetawr, 4}, {"Yee_pknootl2", (DL_FUNC) &Yee_pknootl2, 4}, {"Yee_spline", (DL_FUNC) &Yee_spline, 28}, {"Yee_vbfa", (DL_FUNC) &Yee_vbfa, 30}, {"Yee_vbvs", (DL_FUNC) &Yee_vbvs, 8}, {NULL, NULL, 0} }; static const R_FortranMethodDef FortranEntries[] = { {"veigenf", (DL_FUNC) &F77_NAME(veigenf), 13}, {"yjngintf", (DL_FUNC) &F77_NAME(yjngintf), 11}, {NULL, NULL, 0} }; void R_init_VGAM(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, FortranEntries, NULL); R_useDynamicSymbols(dll, FALSE); } VGAM/src/vmux3.c0000644000176200001440000006253213565414527013045 0ustar liggesusers #include #include #include #include #include void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu); int fvlmz9iyC_VIAM(int *cz8qdfyj, int *rvy1fpli, int *wy1vqfzu); void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f8yswcat, int *wy1vqfzu, int *irb1onzwu, int tgiyxdw1[], int dufozmt7[], int *oey3ckps); void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[], int *npjlv3mr, int *f8yswcat, int *wy1vqfzu); void fvlmz9iyC_vbks(double wpuarq2m[], double unvxka0m[], int *wy1vqfzu, int *f8yswcat, int *dimu); void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[], int *wy1vqfzu, int *dvhw1ulq, int *i_solve); void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int *xjc4ywlh, int *f8yswcat, int *dimu, int *rutyk8mg); void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[], int *npjlv3mr, int *wy1vqfzu, int *dvhw1ulq); double fvlmz9iyC_tldz5ion(double xx); void fvlmz9iyC_enbin9(double bzmd6ftv[], double hdqsx7bk[], double nm0eljqk[], double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf, double *ux3nadiw, double *rsynp1go, int *sguwj9ty); void fvlmz9iyC_enbin8(double bzmd6ftv[], double hdqsx7bk[], double hsj9bzaq[], double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf, double *ux3nadiw, double *rsynp1go); void fvlmz9iyC_mbessI0(double unvxka0m[], int *f8yswcat, int *kpzavbj3, double dvector0[], double dvector1[], double dvector2[], int *zjkrtol8, double *qaltf0nz); void VGAM_C_mux34(double he7mqnvy[], double Dmat[], int *vnc1izfy, int *e0nmabdk, int *ui4ntmvd, double bqelz3cy[]); void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu) { int urohxe6t, bpvaqm5z, *ptri; ptri = tgiyxdw1; for (urohxe6t = *wy1vqfzu; urohxe6t >= 1; urohxe6t--) { for (bpvaqm5z = 1; bpvaqm5z <= urohxe6t; bpvaqm5z++) { *ptri++ = bpvaqm5z; } } ptri = dufozmt7; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { for (bpvaqm5z = urohxe6t; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { *ptri++ = bpvaqm5z; } } } int fvlmz9iyC_VIAM(int *cz8qdfyj, int *rvy1fpli, int *wy1vqfzu) { int urohxe6t; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; int imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2; wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int); wkumc9iddufozmt7 = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu); for (urohxe6t = 1; urohxe6t <= imk5wjxg; urohxe6t++) { if ((wkumc9idtgiyxdw1[urohxe6t-1]== *cz8qdfyj && wkumc9iddufozmt7[urohxe6t-1] == *rvy1fpli) || (wkumc9idtgiyxdw1[urohxe6t-1]== *rvy1fpli && wkumc9iddufozmt7[urohxe6t-1] == *cz8qdfyj)) { Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); return urohxe6t; } } Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); return 0; } void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f8yswcat, int *wy1vqfzu, int *irb1onzwu, int tgiyxdw1[], int dufozmt7[], int *oey3ckps) { int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t; int bpvaqm5z, usvdbx3tk, i_size_bzmd6ftvmat, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2, zyojx5hw = *wy1vqfzu * *wy1vqfzu; double *qnwamo0e; if (*oey3ckps == 1) { if (*irb1onzwu == 1 || *dim1m != imk5wjxg) { i_size_bzmd6ftvmat = zyojx5hw * *f8yswcat; qnwamo0e = bzmd6ftvmat; for (ayfnwr1v = 0; ayfnwr1v < i_size_bzmd6ftvmat; ayfnwr1v++) { *qnwamo0e++ = 0.0e0; } } } if (irb1onzwu == 0) { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { urohxe6t = (ayfnwr1v-1) * zyojx5hw; for (yq6lorbx = 1; yq6lorbx <= *dim1m; yq6lorbx++) { bpvaqm5z = tgiyxdw1[yq6lorbx-1] - 1 + (dufozmt7[yq6lorbx-1] - 1) * *wy1vqfzu + urohxe6t; usvdbx3tk = dufozmt7[yq6lorbx-1] - 1 + (tgiyxdw1[yq6lorbx-1] - 1) * *wy1vqfzu + urohxe6t; gp1jxzuh = (yq6lorbx-1) + (ayfnwr1v-1) * *dim1m; bzmd6ftvmat[usvdbx3tk] = bzmd6ftvmat[bpvaqm5z] = mtlgduey8[gp1jxzuh]; } } } else { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { urohxe6t = (ayfnwr1v-1) * zyojx5hw; for (yq6lorbx = 1; yq6lorbx <= *dim1m; yq6lorbx++) { bpvaqm5z = tgiyxdw1[yq6lorbx-1] - 1 + (dufozmt7[yq6lorbx-1] - 1) * *wy1vqfzu + urohxe6t; gp1jxzuh = (ayfnwr1v-1) * *dim1m + (yq6lorbx-1); bzmd6ftvmat[bpvaqm5z] = mtlgduey8[gp1jxzuh]; } } } } void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[], int *npjlv3mr, int *f8yswcat, int *wy1vqfzu) { int ayfnwr1v, yq6lorbx, bpvaqm5z, pqneb2ra = 1, djaq7ckz = 1, oey3ckps = 0; int zyojx5hw = *wy1vqfzu * *wy1vqfzu, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; double q6zdcwxk; double *wkumc9idwk12; wkumc9idwk12 = Calloc(zyojx5hw, double); wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int); wkumc9iddufozmt7 = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu); for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { fvlmz9iyC_vm2a(wpuarq2m + (ayfnwr1v - 1) * *npjlv3mr, wkumc9idwk12, npjlv3mr, &pqneb2ra, wy1vqfzu, &djaq7ckz, wkumc9idtgiyxdw1, wkumc9iddufozmt7, &oey3ckps); for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { q6zdcwxk = 0.0e0; for (bpvaqm5z = yq6lorbx; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { q6zdcwxk += wkumc9idwk12[yq6lorbx-1 + (bpvaqm5z-1) * *wy1vqfzu] * tlgduey8[ayfnwr1v-1 + (bpvaqm5z-1) * *f8yswcat]; } bzmd6ftvmat[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] = q6zdcwxk; } } Free(wkumc9idwk12); Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void fvlmz9iyC_vbks(double wpuarq2m[], double unvxka0m[], int *wy1vqfzu, int *f8yswcat, int *npjlv3mr) { int ayfnwr1v, yq6lorbx, gp1jxzuh, pqneb2ra = 1, djaq7ckz = 1, oey3ckps = 0, zyojx5hw = *wy1vqfzu * *wy1vqfzu, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; double q6zdcwxk; double *wkumc9idwk12; wkumc9idwk12 = Calloc(zyojx5hw , double); wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int); wkumc9iddufozmt7 = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu); for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { fvlmz9iyC_vm2a(wpuarq2m + (ayfnwr1v - 1) * *npjlv3mr, wkumc9idwk12, npjlv3mr, &pqneb2ra, wy1vqfzu, &djaq7ckz, wkumc9idtgiyxdw1, wkumc9iddufozmt7, &oey3ckps); for (yq6lorbx = *wy1vqfzu; yq6lorbx >= 1; yq6lorbx--) { q6zdcwxk = unvxka0m[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu]; for (gp1jxzuh = yq6lorbx+1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { q6zdcwxk -= wkumc9idwk12[yq6lorbx-1 + (gp1jxzuh-1) * *wy1vqfzu] * unvxka0m[gp1jxzuh-1 + (ayfnwr1v-1) * *wy1vqfzu]; } unvxka0m[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] = q6zdcwxk / wkumc9idwk12[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu]; } } Free(wkumc9idwk12); Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[], int *wy1vqfzu, int *dvhw1ulq, int *i_solve) { double q6zdcwxk; int ayfnwr1v, yq6lorbx, gp1jxzuh; *dvhw1ulq = 1; for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { q6zdcwxk = 0.0e0; for (gp1jxzuh = 1; gp1jxzuh <= ayfnwr1v-1; gp1jxzuh++) { q6zdcwxk += pow(rbne6ouj[gp1jxzuh-1 + (ayfnwr1v-1) * *wy1vqfzu], (double) 2.0); } rbne6ouj[ayfnwr1v-1 + (ayfnwr1v-1) * *wy1vqfzu] -= q6zdcwxk; if (rbne6ouj[ayfnwr1v-1 + (ayfnwr1v-1) * *wy1vqfzu] <= 0.0e0) { Rprintf("Error in fvlmz9iyjdbomp0g: not pos-def.\n"); *dvhw1ulq = 0; return; } rbne6ouj[ayfnwr1v-1 + (ayfnwr1v-1) * *wy1vqfzu] = sqrt(rbne6ouj[ayfnwr1v-1 + (ayfnwr1v-1) * *wy1vqfzu]); for (yq6lorbx = ayfnwr1v+1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { q6zdcwxk = 0.0e0; for (gp1jxzuh = 1; gp1jxzuh <= ayfnwr1v-1; gp1jxzuh++) { q6zdcwxk += rbne6ouj[gp1jxzuh-1 + (ayfnwr1v-1) * *wy1vqfzu] * rbne6ouj[gp1jxzuh-1 + (yq6lorbx-1) * *wy1vqfzu]; } rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] = (rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] - q6zdcwxk) / rbne6ouj[ayfnwr1v-1 + (ayfnwr1v-1) * *wy1vqfzu]; } } if (*i_solve == 0) { for (ayfnwr1v = 2; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= ayfnwr1v-1; yq6lorbx++) { rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] = 0.0e0; } return; } } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { q6zdcwxk = unvxka0m[yq6lorbx-1]; for (gp1jxzuh = 1; gp1jxzuh <= yq6lorbx-1; gp1jxzuh++) { q6zdcwxk -= rbne6ouj[gp1jxzuh-1 + (yq6lorbx-1) * *wy1vqfzu] * unvxka0m[gp1jxzuh-1]; } unvxka0m[yq6lorbx-1] = q6zdcwxk / rbne6ouj[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu]; } for(yq6lorbx = *wy1vqfzu; yq6lorbx >= 1; yq6lorbx--) { q6zdcwxk = unvxka0m[yq6lorbx-1]; for(gp1jxzuh = yq6lorbx+1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { q6zdcwxk -= rbne6ouj[yq6lorbx-1 + (gp1jxzuh-1) * *wy1vqfzu] * unvxka0m[gp1jxzuh-1]; } unvxka0m[yq6lorbx-1] = q6zdcwxk / rbne6ouj[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu]; } } void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int *xjc4ywlh, int *f8yswcat, int *npjlv3mr, int *rutyk8mg) { double q6zdcwxk; int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z; double *wkumc9idwk12, *wkumc9idwk34; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2, zyojx5hw = *wy1vqfzu * *wy1vqfzu, dz1lbtph = *wy1vqfzu * *xjc4ywlh; wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int); wkumc9iddufozmt7 = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu); wkumc9idwk12 = Calloc(zyojx5hw, double); wkumc9idwk34 = Calloc(dz1lbtph, double); for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { for (bpvaqm5z = 1; bpvaqm5z <= *npjlv3mr; bpvaqm5z++) { yq6lorbx = wkumc9idtgiyxdw1[bpvaqm5z-1] - 1 + (wkumc9iddufozmt7[bpvaqm5z-1] - 1) * *wy1vqfzu; wkumc9idwk12[yq6lorbx] = wpuarq2m[bpvaqm5z-1 + (ayfnwr1v-1) * *npjlv3mr]; } for (gp1jxzuh = 1; gp1jxzuh <= *xjc4ywlh; gp1jxzuh++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { wkumc9idwk34[yq6lorbx-1 + (gp1jxzuh-1) * *wy1vqfzu] = he7mqnvy[(ayfnwr1v-1) * *wy1vqfzu + yq6lorbx-1 + (gp1jxzuh-1) * *rutyk8mg]; } } for (gp1jxzuh = 1; gp1jxzuh <= *xjc4ywlh; gp1jxzuh++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { q6zdcwxk = 0.0e0; for (bpvaqm5z = yq6lorbx; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { q6zdcwxk += wkumc9idwk12[yq6lorbx-1 + (bpvaqm5z-1) * *wy1vqfzu] * wkumc9idwk34[bpvaqm5z-1 + (gp1jxzuh-1) * *wy1vqfzu]; } he7mqnvy[(ayfnwr1v-1) * *wy1vqfzu + yq6lorbx-1 + (gp1jxzuh-1) * *rutyk8mg] = q6zdcwxk; } } } Free(wkumc9idwk12); Free(wkumc9idwk34); Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[], int *npjlv3mr, int *wy1vqfzu, int *dvhw1ulq) { int ayfnwr1v, yq6lorbx, gp1jxzuh, uaoynef0, zyojx5hw = *wy1vqfzu * *wy1vqfzu; double q6zdcwxk, vn3iasxugno = 1.0e-14; double *wkumc9idwrk; wkumc9idwrk = Calloc(zyojx5hw, double); *dvhw1ulq = 1; for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { for (yq6lorbx = ayfnwr1v; yq6lorbx >= 1; yq6lorbx--) { q6zdcwxk = (yq6lorbx == ayfnwr1v) ? 1.0e0 : 0.0e0; for (gp1jxzuh = yq6lorbx+1; gp1jxzuh <= ayfnwr1v; gp1jxzuh++) { q6zdcwxk -= wpuarq2m[yq6lorbx-1 + (gp1jxzuh-1) * *npjlv3mr] * wkumc9idwrk[gp1jxzuh-1 + (ayfnwr1v-1) * *wy1vqfzu]; } if (fabs(wpuarq2m[yq6lorbx-1 + (yq6lorbx-1) * *npjlv3mr]) < vn3iasxugno) { Rprintf("Error in fvlmz9iyC_lkhnw9yq: U(cz8qdfyj,cz8qdfyj) is zero.\n"); *dvhw1ulq = 0; } else { wkumc9idwrk[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] = q6zdcwxk / wpuarq2m[yq6lorbx-1 + (yq6lorbx-1) * *npjlv3mr]; } } } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = yq6lorbx; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { uaoynef0 = (yq6lorbx < ayfnwr1v) ? ayfnwr1v : yq6lorbx; q6zdcwxk = 0.0e0; for(gp1jxzuh = uaoynef0; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { q6zdcwxk += wkumc9idwrk[yq6lorbx-1 + (gp1jxzuh-1) * *wy1vqfzu] * wkumc9idwrk[ayfnwr1v-1 + (gp1jxzuh-1) * *wy1vqfzu]; } ks3wejcv[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] = ks3wejcv[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] = q6zdcwxk; } } Free(wkumc9idwrk); } double fvlmz9iyC_tldz5ion(double xval) { double hofjnx2e, xd4mybgj[6], q6zdcwxk = 1.000000000190015, tmp_y = xval; int yq6lorbx; xd4mybgj[0]= 76.18009172947146e0; xd4mybgj[1]= -86.50532032941677e0; xd4mybgj[2]= 24.01409824083091e0; xd4mybgj[3]= -1.231739572450155e0; xd4mybgj[4]= 0.1208650973866179e-2; xd4mybgj[5]= -0.5395239384953e-5; hofjnx2e = xval + 5.50; hofjnx2e -= (xval + 0.50) * log(hofjnx2e); for (yq6lorbx = 0; yq6lorbx < 6; yq6lorbx++) { tmp_y += 1.0e0; q6zdcwxk += xd4mybgj[yq6lorbx] / tmp_y; } return -hofjnx2e + log(2.5066282746310005e0 * q6zdcwxk / xval); } void fvlmz9iyC_enbin9(double bzmd6ftvmat[], double hdqsx7bk[], double nm0eljqk[], double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf, double *ux3nadiw, double *rsynp1go, int *sguwj9ty) { int ayfnwr1v, kij0gwer, esql7umk; double vjz5sxty, pvcjl2na, mwuvskg1, btiehdm2 = 100.0e0 * *rsynp1go, ydb, ft3ijqmy, q6zdcwxk, plo6hkdr, csi9ydge, oxjgzv0e = 0.001e0; double bk3ymcih = -1.0; csi9ydge = bk3ymcih; bk3ymcih += bk3ymcih; bk3ymcih += csi9ydge; if (*n2kersmx <= 0.80e0 || *n2kersmx >= 1.0e0) { Rprintf("Error in fvlmz9iyC_enbin9: bad n2kersmx value.\n"); *dvhw1ulq = 0; return; } *dvhw1ulq = 1; for (kij0gwer = 1; kij0gwer <= *zy1mchbf; kij0gwer++) { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { vjz5sxty = nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] / hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]; if ((vjz5sxty < oxjgzv0e) || ( nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] > 1.0e5)) { bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = -nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] * (1.0e0 + hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] / (hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat])) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat], (double) 2.0); if (bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] > -btiehdm2) bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = -btiehdm2; goto ceqzd1hi20; } q6zdcwxk = 0.0e0; pvcjl2na = hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] / (hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); mwuvskg1 = 1.0e0 - pvcjl2na; csi9ydge = hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]; if (pvcjl2na < btiehdm2) pvcjl2na = btiehdm2; if (mwuvskg1 < btiehdm2) mwuvskg1 = btiehdm2; esql7umk = 100 + 15 * floor(nm0eljqk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); if (esql7umk < *sguwj9ty) { esql7umk = *sguwj9ty; } ft3ijqmy = pow(pvcjl2na, csi9ydge); *ux3nadiw = ft3ijqmy; plo6hkdr = (1.0e0 - *ux3nadiw) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat], (double) 2.0); q6zdcwxk += plo6hkdr; ydb = 1.0e0; ft3ijqmy = hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] * mwuvskg1 * ft3ijqmy; *ux3nadiw += ft3ijqmy; plo6hkdr = (1.0e0 - *ux3nadiw) / pow((hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + ydb), (double) 2.0); q6zdcwxk += plo6hkdr; ydb = 2.0e0; while (((*ux3nadiw <= *n2kersmx) || (plo6hkdr > 1.0e-4)) && (ydb < esql7umk)) { ft3ijqmy = (hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] - 1.0 + ydb) * mwuvskg1 * ft3ijqmy / ydb; *ux3nadiw += ft3ijqmy; plo6hkdr = (1.0e0 - *ux3nadiw) / pow((hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + ydb), (double) 2.0); q6zdcwxk += plo6hkdr; ydb += 1.0e0; } bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = -q6zdcwxk; ceqzd1hi20: bk3ymcih = 0.0e0; } } } void fvlmz9iyC_enbin8(double bzmd6ftvmat[], double hdqsx7bk[], double hsj9bzaq[], double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf, double *ux3nadiw, double *rsynp1go) { int ayfnwr1v, kij0gwer; double ft3ijqmy, tad5vhsu, o3jyipdf, pq0hfucn, q6zdcwxk, plo6hkdr, qtce8hzo1 = 0.0e0, qtce8hzo2 = 0.0e0; int fw2rodat, rx8qfndg, mqudbv4y; double onemse, nm0eljqk, ydb, btiehdm2 = -100.0 * *rsynp1go, kbig = 1.0e4, oxjgzv0e = 0.0010; if (*n2kersmx <= 0.80e0 || *n2kersmx >= 1.0e0) { Rprintf("returning since n2kersmx <= 0.8 or >= 1\n"); *dvhw1ulq = 0; return; } onemse = 1.0e0 / (1.0e0 + oxjgzv0e); *dvhw1ulq = 1; for (kij0gwer = 1; kij0gwer <= *zy1mchbf; kij0gwer++) { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { if ( hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] > kbig) hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = kbig; if (hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] < oxjgzv0e) hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = oxjgzv0e; if (hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] > onemse) { nm0eljqk = hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] * (1.0e0 / hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] - 1.0e0); bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = -nm0eljqk * (1.0e0 + hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] / (hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + nm0eljqk)) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat], (double) 2.0); if (bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] > btiehdm2) bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = btiehdm2; goto ceqzd1hi20; } q6zdcwxk = 0.0e0; fw2rodat = 1; rx8qfndg = hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1)**f8yswcat] < (1.0 - *rsynp1go) ? 1 : 0; mqudbv4y = fw2rodat && rx8qfndg ? 1 : 0; if (mqudbv4y) { qtce8hzo2 = hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] * log(hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); *ux3nadiw = exp(qtce8hzo2); } else { *ux3nadiw = 0.0e0; } plo6hkdr = (1.0e0 - *ux3nadiw) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat], (double) 2.0); q6zdcwxk += plo6hkdr; o3jyipdf = fvlmz9iyC_tldz5ion(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); ydb = 1.0e0; tad5vhsu = fvlmz9iyC_tldz5ion(ydb + hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); pq0hfucn = 0.0e0; if (mqudbv4y) { qtce8hzo1 = log(1.0e0 - hsj9bzaq[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat]); ft3ijqmy = exp(ydb * qtce8hzo1 + qtce8hzo2 + tad5vhsu - o3jyipdf - pq0hfucn); } else { ft3ijqmy = 0.0e0; } *ux3nadiw += ft3ijqmy; plo6hkdr = (1.0e0 - *ux3nadiw) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + ydb, (double) 2.0); q6zdcwxk += plo6hkdr; ydb = 2.0e0; while((*ux3nadiw <= *n2kersmx) || (plo6hkdr > 1.0e-4)) { tad5vhsu += log(ydb + hdqsx7bk[ayfnwr1v-1+(kij0gwer-1) * *f8yswcat] - 1.0); pq0hfucn += log(ydb); if (mqudbv4y) { ft3ijqmy = exp(ydb * qtce8hzo1 + qtce8hzo2 + tad5vhsu - o3jyipdf - pq0hfucn); } else { ft3ijqmy = 0.0e0; } *ux3nadiw += ft3ijqmy; plo6hkdr = (1.0e0 - *ux3nadiw) / pow(hdqsx7bk[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] + ydb, (double) 2.0); q6zdcwxk += plo6hkdr; ydb += 1.0e0; if (ydb > 1.0e3) goto ceqzd1hi21; } ceqzd1hi21: bzmd6ftvmat[ayfnwr1v-1 + (kij0gwer-1) * *f8yswcat] = -q6zdcwxk; ceqzd1hi20: tad5vhsu = 0.0e0; } } } void fvlmz9iyC_mbessI0(double unvxka0m[], int *f8yswcat, int *kpzavbj3, double dvector0[], double dvector1[], double dvector2[], int *zjkrtol8, double *qaltf0nz) { int ayfnwr1v, gp1jxzuh, c5aesxkus; double f0, t0, m0, f1, t1, m1, f2, t2, m2, Toobig = 20.0e0; *zjkrtol8 = 0; if (!(*kpzavbj3 == 0 || *kpzavbj3 == 1 || *kpzavbj3 == 2)) { Rprintf("Error in fvlmz9iyC_mbessI0: kpzavbj3 not in 0:2. Returning.\n"); *zjkrtol8 = 1; return; } for (gp1jxzuh = 1; gp1jxzuh <= *f8yswcat; gp1jxzuh++) { if (fabs(unvxka0m[gp1jxzuh-1]) > Toobig) { Rprintf("Error in fvlmz9iyC_mbessI0: unvxka0m[] value > too big.\n"); *zjkrtol8 = 1; return; } t1 = unvxka0m[gp1jxzuh-1] / 2.0e0; f1 = t1; t0 = t1 * t1; f0 = 1.0e0 + t0; t2 = 0.50e0; f2 = t2; c5aesxkus = 15; if (fabs(unvxka0m[gp1jxzuh-1]) > 10.0) c5aesxkus = 25; if (fabs(unvxka0m[gp1jxzuh-1]) > 15.0) c5aesxkus = 35; if (fabs(unvxka0m[gp1jxzuh-1]) > 20.0) c5aesxkus = 40; if (fabs(unvxka0m[gp1jxzuh-1]) > 30.0) c5aesxkus = 55; for (ayfnwr1v = 1; ayfnwr1v <= c5aesxkus; ayfnwr1v++) { m0 = pow(unvxka0m[gp1jxzuh-1] / (2.0 * (ayfnwr1v + 1.0)), (double) 2); m1 = m0 * (1.0e0 + 1.0e0 / ayfnwr1v); m2 = m1 * (2.0e0 * ayfnwr1v + 1.0e0) / (2.0e0 * ayfnwr1v - 1.0e0); t0 = t0 * m0; t1 = t1 * m1; t2 = t2 * m2; f0 = f0 + t0; f1 = f1 + t1; f2 = f2 + t2; if ((fabs(t0) < *qaltf0nz) && (fabs(t1) < *qaltf0nz) && (fabs(t2) < *qaltf0nz)) break; } if (0 <= *kpzavbj3) dvector0[gp1jxzuh-1] = f0; if (1 <= *kpzavbj3) dvector1[gp1jxzuh-1] = f1; if (2 <= *kpzavbj3) dvector2[gp1jxzuh-1] = f2; } } void VGAM_C_mux34(double he7mqnvy[], double Dmat[], int *vnc1izfy, int *e0nmabdk, int *ui4ntmvd, double bqelz3cy[]) { int ayfnwr1v, yq6lorbx, gp1jxzuh; double *qnwamo0e1, *qnwamo0e2; if (*e0nmabdk == 1) { qnwamo0e1 = bqelz3cy; qnwamo0e2 = he7mqnvy; for (ayfnwr1v = 0; ayfnwr1v < *vnc1izfy; ayfnwr1v++) { *qnwamo0e1++ = *Dmat * pow(*qnwamo0e2++, (double) 2.0); } return; } if (*ui4ntmvd == 1) { for (ayfnwr1v = 1; ayfnwr1v <= *vnc1izfy; ayfnwr1v++) { bqelz3cy[ayfnwr1v-1] = 0.0e0; for (yq6lorbx = 1; yq6lorbx <= *e0nmabdk; yq6lorbx++) { bqelz3cy[ayfnwr1v-1] += Dmat[yq6lorbx-1 + (yq6lorbx-1) * *e0nmabdk] * pow(he7mqnvy[ayfnwr1v-1 + (yq6lorbx-1) * *vnc1izfy], (double) 2.0); } if (*e0nmabdk > 1) { for (yq6lorbx = 1; yq6lorbx <= *e0nmabdk; yq6lorbx++) { for (gp1jxzuh = yq6lorbx+1; gp1jxzuh <= *e0nmabdk; gp1jxzuh++) { bqelz3cy[ayfnwr1v-1] += Dmat[yq6lorbx-1 + (gp1jxzuh-1) * *e0nmabdk] * he7mqnvy[ayfnwr1v-1 + (yq6lorbx-1) * *vnc1izfy] * he7mqnvy[ayfnwr1v-1 + (gp1jxzuh-1) * *vnc1izfy] * 2.0; } } } } } else { for (ayfnwr1v = 1; ayfnwr1v <= *vnc1izfy; ayfnwr1v++) { bqelz3cy[ayfnwr1v-1] = 0.0e0; for (yq6lorbx = 1; yq6lorbx <= *e0nmabdk; yq6lorbx++) { for (gp1jxzuh = 1; gp1jxzuh <= *e0nmabdk; gp1jxzuh++) { bqelz3cy[ayfnwr1v-1] += Dmat[yq6lorbx-1 + (gp1jxzuh-1) * *e0nmabdk] * he7mqnvy[ayfnwr1v-1 + (yq6lorbx-1) * *vnc1izfy] * he7mqnvy[ayfnwr1v-1 + (gp1jxzuh-1) * *vnc1izfy]; } } } } } VGAM/src/ei.f0000644000176200001440000006042513565414527012362 0ustar liggesusers SUBROUTINE calcei(ARG,RESULT,INT) C---------------------------------------------------------------------- C C This Fortran 77 packet computes the exponential integrals Ei(x), C E1(x), and exp(-x)*Ei(x) for real arguments x where C C integral (from t=-infinity to t=x) (exp(t)/t), x > 0, C Ei(x) = C -integral (from t=-x to t=infinity) (exp(t)/t), x < 0, C C and where the first integral is a principal value integral. C The packet contains three function type subprograms: EI, EONE, C and EXPEI; and one subroutine type subprogram: CALCEI. The C calling statements for the primary entries are C C Y = EI(X), where X .NE. 0, C C Y = EONE(X), where X .GT. 0, C and C Y = EXPEI(X), where X .NE. 0, C C and where the entry points correspond to the functions Ei(x), C E1(x), and exp(-x)*Ei(x), respectively. The routine CALCEI C is intended for internal packet use only, all computations within C the packet being concentrated in this routine. The function C subprograms invoke CALCEI with the Fortran statement C CALL CALCEI(ARG,RESULT,INT) C where the parameter usage is as follows C C Function Parameters for CALCEI C Call ARG RESULT INT C C EI(X) X .NE. 0 Ei(X) 1 C EONE(X) X .GT. 0 -Ei(-X) 2 C EXPEI(X) X .NE. 0 exp(-X)*Ei(X) 3 C C The main computation involves evaluation of rational Chebyshev C approximations published in Math. Comp. 22, 641-649 (1968), and C Math. Comp. 23, 289-303 (1969) by Cody and Thacher. This C transportable program is patterned after the machine-dependent C FUNPACK packet NATSEI, but cannot match that version for C efficiency or accuracy. This version uses rational functions C that theoretically approximate the exponential integrals to C at least 18 significant decimal digits. The accuracy achieved C depends on the arithmetic system, the compiler, the intrinsic C functions, and proper selection of the machine-dependent C constants. C C C******************************************************************* C******************************************************************* C C Explanation of machine-dependent constants C C beta = radix for the floating-point system. C minexp = smallest representable power of beta. C maxexp = smallest power of beta that overflows. C XBIG = largest argument acceptable to EONE; solution to C equation: C exp(-x)/x * (1 + 1/x) = beta ** minexp. C XINF = largest positive machine number; approximately C beta ** maxexp C XMAX = largest argument acceptable to EI; solution to C equation: exp(x)/x * (1 + 1/x) = beta ** maxexp. C C Approximate values for some important machines are: C C beta minexp maxexp C C CRAY-1 (S.P.) 2 -8193 8191 C Cyber 180/185 C under NOS (S.P.) 2 -975 1070 C IEEE (IBM/XT, C SUN, etc.) (S.P.) 2 -126 128 C IEEE (IBM/XT, C SUN, etc.) (D.P.) 2 -1022 1024 C IBM 3033 (D.P.) 16 -65 63 C VAX D-Format (D.P.) 2 -128 127 C VAX G-Format (D.P.) 2 -1024 1023 C C XBIG XINF XMAX C C CRAY-1 (S.P.) 5670.31 5.45E+2465 5686.21 C Cyber 180/185 C under NOS (S.P.) 669.31 1.26E+322 748.28 C IEEE (IBM/XT, C SUN, etc.) (S.P.) 82.93 3.40E+38 93.24 C IEEE (IBM/XT, C SUN, etc.) (D.P.) 701.84 1.79D+308 716.35 C IBM 3033 (D.P.) 175.05 7.23D+75 179.85 C VAX D-Format (D.P.) 84.30 1.70D+38 92.54 C VAX G-Format (D.P.) 703.22 8.98D+307 715.66 C C******************************************************************* C******************************************************************* C C Error returns C C The following table shows the types of error that may be C encountered in this routine and the function value supplied C in each case. C C Error Argument Function values for C Range EI EXPEI EONE C C UNDERFLOW (-)X .GT. XBIG 0 - 0 C OVERFLOW X .GE. XMAX XINF - - C ILLEGAL X X = 0 -XINF -XINF XINF C ILLEGAL X X .LT. 0 - - USE ABS(X) C C Intrinsic functions required are: C C ABS, SQRT, EXP C C C Author: W. J. Cody C Mathematics abd Computer Science Division C Argonne National Laboratory C Argonne, IL 60439 C C Latest modification: September 9, 1988 C C---------------------------------------------------------------------- INTEGER I,INT CS REAL DOUBLE PRECISION 1 A,ARG,B,C,D,EXP40,E,EI,F,FOUR,FOURTY,FRAC,HALF,ONE,P, 2 PLG,PX,P037,P1,P2,Q,QLG,QX,Q1,Q2,R,RESULT,S,SIX,SUMP, 3 SUMQ,T,THREE,TWELVE,TWO,TWO4,W,X,XBIG,XINF,XMAX,XMX0, 4 X0,X01,X02,X11,Y,YSQ,ZERO DIMENSION A(7),B(6),C(9),D(9),E(10),F(10),P(10),Q(10),R(10), 1 S(9),P1(10),Q1(9),P2(10),Q2(9),PLG(4),QLG(4),PX(10),QX(10) C---------------------------------------------------------------------- C Mathematical constants C EXP40 = exp(40) C X0 = zero of Ei C X01/X11 + X02 = zero of Ei to extra precision C---------------------------------------------------------------------- CS DATA ZERO,P037,HALF,ONE,TWO/0.0E0,0.037E0,0.5E0,1.0E0,2.0E0/, CS 1 THREE,FOUR,SIX,TWELVE,TWO4/3.0E0,4.0E0,6.0E0,12.E0,24.0E0/, CS 2 FOURTY,EXP40/40.0E0,2.3538526683701998541E17/, CS 3 X01,X11,X02/381.5E0,1024.0E0,-5.1182968633365538008E-5/, CS 4 X0/3.7250741078136663466E-1/ DATA ZERO,P037,HALF,ONE,TWO/0.0D0,0.037D0,0.5D0,1.0D0,2.0D0/, 1 THREE,FOUR,SIX,TWELVE,TWO4/3.0D0,4.0D0,6.0D0,12.D0,24.0D0/, 2 FOURTY,EXP40/40.0D0,2.3538526683701998541D17/, 3 X01,X11,X02/381.5D0,1024.0D0,-5.1182968633365538008D-5/, 4 X0/3.7250741078136663466D-1/ C---------------------------------------------------------------------- C Machine-dependent constants C---------------------------------------------------------------------- CS DATA XINF/3.40E+38/,XMAX/93.246E0/,XBIG/82.93E0/ DATA XINF/1.79D+308/,XMAX/716.351D0/,XBIG/701.84D0/ C---------------------------------------------------------------------- C Coefficients for -1.0 <= X < 0.0 C---------------------------------------------------------------------- CS DATA A/1.1669552669734461083368E2, 2.1500672908092918123209E3, CS 1 1.5924175980637303639884E4, 8.9904972007457256553251E4, CS 2 1.5026059476436982420737E5,-1.4815102102575750838086E5, CS 3 5.0196785185439843791020E0/ CS DATA B/4.0205465640027706061433E1, 7.5043163907103936624165E2, CS 1 8.1258035174768735759855E3, 5.2440529172056355429883E4, CS 2 1.8434070063353677359298E5, 2.5666493484897117319268E5/ DATA A/1.1669552669734461083368D2, 2.1500672908092918123209D3, 1 1.5924175980637303639884D4, 8.9904972007457256553251D4, 2 1.5026059476436982420737D5,-1.4815102102575750838086D5, 3 5.0196785185439843791020D0/ DATA B/4.0205465640027706061433D1, 7.5043163907103936624165D2, 1 8.1258035174768735759855D3, 5.2440529172056355429883D4, 2 1.8434070063353677359298D5, 2.5666493484897117319268D5/ C---------------------------------------------------------------------- C Coefficients for -4.0 <= X < -1.0 C---------------------------------------------------------------------- CS DATA C/3.828573121022477169108E-1, 1.107326627786831743809E+1, CS 1 7.246689782858597021199E+1, 1.700632978311516129328E+2, CS 2 1.698106763764238382705E+2, 7.633628843705946890896E+1, CS 3 1.487967702840464066613E+1, 9.999989642347613068437E-1, CS 4 1.737331760720576030932E-8/ CS DATA D/8.258160008564488034698E-2, 4.344836335509282083360E+0, CS 1 4.662179610356861756812E+1, 1.775728186717289799677E+2, CS 2 2.953136335677908517423E+2, 2.342573504717625153053E+2, CS 3 9.021658450529372642314E+1, 1.587964570758947927903E+1, CS 4 1.000000000000000000000E+0/ DATA C/3.828573121022477169108D-1, 1.107326627786831743809D+1, 1 7.246689782858597021199D+1, 1.700632978311516129328D+2, 2 1.698106763764238382705D+2, 7.633628843705946890896D+1, 3 1.487967702840464066613D+1, 9.999989642347613068437D-1, 4 1.737331760720576030932D-8/ DATA D/8.258160008564488034698D-2, 4.344836335509282083360D+0, 1 4.662179610356861756812D+1, 1.775728186717289799677D+2, 2 2.953136335677908517423D+2, 2.342573504717625153053D+2, 3 9.021658450529372642314D+1, 1.587964570758947927903D+1, 4 1.000000000000000000000D+0/ C---------------------------------------------------------------------- C Coefficients for X < -4.0 C---------------------------------------------------------------------- CS DATA E/1.3276881505637444622987E+2,3.5846198743996904308695E+4, CS 1 1.7283375773777593926828E+5,2.6181454937205639647381E+5, CS 2 1.7503273087497081314708E+5,5.9346841538837119172356E+4, CS 3 1.0816852399095915622498E+4,1.0611777263550331766871E03, CS 4 5.2199632588522572481039E+1,9.9999999999999999087819E-1/ CS DATA F/3.9147856245556345627078E+4,2.5989762083608489777411E+5, CS 1 5.5903756210022864003380E+5,5.4616842050691155735758E+5, CS 2 2.7858134710520842139357E+5,7.9231787945279043698718E+4, CS 3 1.2842808586627297365998E+4,1.1635769915320848035459E+3, CS 4 5.4199632588522559414924E+1,1.0E0/ DATA E/1.3276881505637444622987D+2,3.5846198743996904308695D+4, 1 1.7283375773777593926828D+5,2.6181454937205639647381D+5, 2 1.7503273087497081314708D+5,5.9346841538837119172356D+4, 3 1.0816852399095915622498D+4,1.0611777263550331766871D03, 4 5.2199632588522572481039D+1,9.9999999999999999087819D-1/ DATA F/3.9147856245556345627078D+4,2.5989762083608489777411D+5, 1 5.5903756210022864003380D+5,5.4616842050691155735758D+5, 2 2.7858134710520842139357D+5,7.9231787945279043698718D+4, 3 1.2842808586627297365998D+4,1.1635769915320848035459D+3, 4 5.4199632588522559414924D+1,1.0D0/ C---------------------------------------------------------------------- C Coefficients for rational approximation to ln(x/a), |1-x/a| < .1 C---------------------------------------------------------------------- CS DATA PLG/-2.4562334077563243311E+01,2.3642701335621505212E+02, CS 1 -5.4989956895857911039E+02,3.5687548468071500413E+02/ CS DATA QLG/-3.5553900764052419184E+01,1.9400230218539473193E+02, CS 1 -3.3442903192607538956E+02,1.7843774234035750207E+02/ DATA PLG/-2.4562334077563243311D+01,2.3642701335621505212D+02, 1 -5.4989956895857911039D+02,3.5687548468071500413D+02/ DATA QLG/-3.5553900764052419184D+01,1.9400230218539473193D+02, 1 -3.3442903192607538956D+02,1.7843774234035750207D+02/ C---------------------------------------------------------------------- C Coefficients for 0.0 < X < 6.0, C ratio of Chebyshev polynomials C---------------------------------------------------------------------- CS DATA P/-1.2963702602474830028590E01,-1.2831220659262000678155E03, CS 1 -1.4287072500197005777376E04,-1.4299841572091610380064E06, CS 2 -3.1398660864247265862050E05,-3.5377809694431133484800E08, CS 3 3.1984354235237738511048E08,-2.5301823984599019348858E10, CS 4 1.2177698136199594677580E10,-2.0829040666802497120940E11/ CS DATA Q/ 7.6886718750000000000000E01,-5.5648470543369082846819E03, CS 1 1.9418469440759880361415E05,-4.2648434812177161405483E06, CS 2 6.4698830956576428587653E07,-7.0108568774215954065376E08, CS 3 5.4229617984472955011862E09,-2.8986272696554495342658E10, CS 4 9.8900934262481749439886E10,-8.9673749185755048616855E10/ DATA P/-1.2963702602474830028590D01,-1.2831220659262000678155D03, 1 -1.4287072500197005777376D04,-1.4299841572091610380064D06, 2 -3.1398660864247265862050D05,-3.5377809694431133484800D08, 3 3.1984354235237738511048D08,-2.5301823984599019348858D10, 4 1.2177698136199594677580D10,-2.0829040666802497120940D11/ DATA Q/ 7.6886718750000000000000D01,-5.5648470543369082846819D03, 1 1.9418469440759880361415D05,-4.2648434812177161405483D06, 2 6.4698830956576428587653D07,-7.0108568774215954065376D08, 3 5.4229617984472955011862D09,-2.8986272696554495342658D10, 4 9.8900934262481749439886D10,-8.9673749185755048616855D10/ C---------------------------------------------------------------------- C J-fraction coefficients for 6.0 <= X < 12.0 C---------------------------------------------------------------------- CS DATA R/-2.645677793077147237806E00,-2.378372882815725244124E00, CS 1 -2.421106956980653511550E01, 1.052976392459015155422E01, CS 2 1.945603779539281810439E01,-3.015761863840593359165E01, CS 3 1.120011024227297451523E01,-3.988850730390541057912E00, CS 4 9.565134591978630774217E00, 9.981193787537396413219E-1/ CS DATA S/ 1.598517957704779356479E-4, 4.644185932583286942650E00, CS 1 3.697412299772985940785E02,-8.791401054875438925029E00, CS 2 7.608194509086645763123E02, 2.852397548119248700147E01, CS 3 4.731097187816050252967E02,-2.369210235636181001661E02, CS 4 1.249884822712447891440E00/ DATA R/-2.645677793077147237806D00,-2.378372882815725244124D00, 1 -2.421106956980653511550D01, 1.052976392459015155422D01, 2 1.945603779539281810439D01,-3.015761863840593359165D01, 3 1.120011024227297451523D01,-3.988850730390541057912D00, 4 9.565134591978630774217D00, 9.981193787537396413219D-1/ DATA S/ 1.598517957704779356479D-4, 4.644185932583286942650D00, 1 3.697412299772985940785D02,-8.791401054875438925029D00, 2 7.608194509086645763123D02, 2.852397548119248700147D01, 3 4.731097187816050252967D02,-2.369210235636181001661D02, 4 1.249884822712447891440D00/ C---------------------------------------------------------------------- C J-fraction coefficients for 12.0 <= X < 24.0 C---------------------------------------------------------------------- CS DATA P1/-1.647721172463463140042E00,-1.860092121726437582253E01, CS 1 -1.000641913989284829961E01,-2.105740799548040450394E01, CS 2 -9.134835699998742552432E-1,-3.323612579343962284333E01, CS 3 2.495487730402059440626E01, 2.652575818452799819855E01, CS 4 -1.845086232391278674524E00, 9.999933106160568739091E-1/ CS DATA Q1/ 9.792403599217290296840E01, 6.403800405352415551324E01, CS 1 5.994932325667407355255E01, 2.538819315630708031713E02, CS 2 4.429413178337928401161E01, 1.192832423968601006985E03, CS 3 1.991004470817742470726E02,-1.093556195391091143924E01, CS 4 1.001533852045342697818E00/ DATA P1/-1.647721172463463140042D00,-1.860092121726437582253D01, 1 -1.000641913989284829961D01,-2.105740799548040450394D01, 2 -9.134835699998742552432D-1,-3.323612579343962284333D01, 3 2.495487730402059440626D01, 2.652575818452799819855D01, 4 -1.845086232391278674524D00, 9.999933106160568739091D-1/ DATA Q1/ 9.792403599217290296840D01, 6.403800405352415551324D01, 1 5.994932325667407355255D01, 2.538819315630708031713D02, 2 4.429413178337928401161D01, 1.192832423968601006985D03, 3 1.991004470817742470726D02,-1.093556195391091143924D01, 4 1.001533852045342697818D00/ C---------------------------------------------------------------------- C J-fraction coefficients for X .GE. 24.0 C---------------------------------------------------------------------- CS DATA P2/ 1.75338801265465972390E02,-2.23127670777632409550E02, CS 1 -1.81949664929868906455E01,-2.79798528624305389340E01, CS 2 -7.63147701620253630855E00,-1.52856623636929636839E01, CS 3 -7.06810977895029358836E00,-5.00006640413131002475E00, CS 4 -3.00000000320981265753E00, 1.00000000000000485503E00/ CS DATA Q2/ 3.97845977167414720840E04, 3.97277109100414518365E00, CS 1 1.37790390235747998793E02, 1.17179220502086455287E02, CS 2 7.04831847180424675988E01,-1.20187763547154743238E01, CS 3 -7.99243595776339741065E00,-2.99999894040324959612E00, CS 4 1.99999999999048104167E00/ DATA P2/ 1.75338801265465972390D02,-2.23127670777632409550D02, 1 -1.81949664929868906455D01,-2.79798528624305389340D01, 2 -7.63147701620253630855D00,-1.52856623636929636839D01, 3 -7.06810977895029358836D00,-5.00006640413131002475D00, 4 -3.00000000320981265753D00, 1.00000000000000485503D00/ DATA Q2/ 3.97845977167414720840D04, 3.97277109100414518365D00, 1 1.37790390235747998793D02, 1.17179220502086455287D02, 2 7.04831847180424675988D01,-1.20187763547154743238D01, 3 -7.99243595776339741065D00,-2.99999894040324959612D00, 4 1.99999999999048104167D00/ C---------------------------------------------------------------------- X = ARG IF (X .EQ. ZERO) THEN EI = -XINF IF (INT .EQ. 2) EI = -EI ELSE IF ((X .LT. ZERO) .OR. (INT .EQ. 2)) THEN C---------------------------------------------------------------------- C Calculate EI for negative argument or for E1. C---------------------------------------------------------------------- Y = ABS(X) IF (Y .LE. ONE) THEN SUMP = A(7) * Y + A(1) SUMQ = Y + B(1) DO 110 I = 2, 6 SUMP = SUMP * Y + A(I) SUMQ = SUMQ * Y + B(I) 110 CONTINUE EI = LOG(Y) - SUMP / SUMQ IF (INT .EQ. 3) EI = EI * EXP(Y) ELSE IF (Y .LE. FOUR) THEN W = ONE / Y SUMP = C(1) SUMQ = D(1) DO 130 I = 2, 9 SUMP = SUMP * W + C(I) SUMQ = SUMQ * W + D(I) 130 CONTINUE EI = - SUMP / SUMQ IF (INT .NE. 3) EI = EI * EXP(-Y) ELSE IF ((Y .GT. XBIG) .AND. (INT .LT. 3)) THEN EI = ZERO ELSE W = ONE / Y SUMP = E(1) SUMQ = F(1) DO 150 I = 2, 10 SUMP = SUMP * W + E(I) SUMQ = SUMQ * W + F(I) 150 CONTINUE EI = -W * (ONE - W * SUMP / SUMQ ) IF (INT .NE. 3) EI = EI * EXP(-Y) END IF END IF IF (INT .EQ. 2) EI = -EI ELSE IF (X .LT. SIX) THEN C---------------------------------------------------------------------- C To improve conditioning, rational approximations are expressed C in terms of Chebyshev polynomials for 0 <= X < 6, and in C continued fraction form for larger X. C---------------------------------------------------------------------- T = X + X T = T / THREE - TWO PX(1) = ZERO QX(1) = ZERO PX(2) = P(1) QX(2) = Q(1) DO 210 I = 2, 9 PX(I+1) = T * PX(I) - PX(I-1) + P(I) QX(I+1) = T * QX(I) - QX(I-1) + Q(I) 210 CONTINUE SUMP = HALF * T * PX(10) - PX(9) + P(10) SUMQ = HALF * T * QX(10) - QX(9) + Q(10) FRAC = SUMP / SUMQ XMX0 = (X - X01/X11) - X02 IF (ABS(XMX0) .GE. P037) THEN EI = LOG(X/X0) + XMX0 * FRAC IF (INT .EQ. 3) EI = EXP(-X) * EI ELSE C---------------------------------------------------------------------- C Special approximation to ln(X/X0) for X close to X0 C---------------------------------------------------------------------- Y = XMX0 / (X + X0) YSQ = Y*Y SUMP = PLG(1) SUMQ = YSQ + QLG(1) DO 220 I = 2, 4 SUMP = SUMP*YSQ + PLG(I) SUMQ = SUMQ*YSQ + QLG(I) 220 CONTINUE EI = (SUMP / (SUMQ*(X+X0)) + FRAC) * XMX0 IF (INT .EQ. 3) EI = EXP(-X) * EI END IF ELSE IF (X .LT. TWELVE) THEN FRAC = ZERO DO 230 I = 1, 9 FRAC = S(I) / (R(I) + X + FRAC) 230 CONTINUE EI = (R(10) + FRAC) / X IF (INT .NE. 3) EI = EI * EXP(X) ELSE IF (X .LE. TWO4) THEN FRAC = ZERO DO 240 I = 1, 9 FRAC = Q1(I) / (P1(I) + X + FRAC) 240 CONTINUE EI = (P1(10) + FRAC) / X IF (INT .NE. 3) EI = EI * EXP(X) ELSE IF ((X .GE. XMAX) .AND. (INT .LT. 3)) THEN EI = XINF ELSE Y = ONE / X FRAC = ZERO DO 250 I = 1, 9 FRAC = Q2(I) / (P2(I) + X + FRAC) 250 CONTINUE FRAC = P2(10) + FRAC EI = Y + Y * Y * FRAC IF (INT .NE. 3) THEN IF (X .LE. XMAX-TWO4) THEN EI = EI * EXP(X) ELSE C---------------------------------------------------------------------- C Calculation reformulated to avoid premature overflow C---------------------------------------------------------------------- EI = (EI * EXP(X-FOURTY)) * EXP40 END IF END IF END IF END IF RESULT = EI RETURN C---------- Last line of CALCEI ---------- END SUBROUTINE einlib(X, RESULT) C FUNCTION EINLIB(X) C-------------------------------------------------------------------- C C This function program computes approximate values for the C exponential integral Ei(x), where x is real. C C Author: W. J. Cody C C Latest modification: January 12, 1988 C Latest modification: 20130629 by TWY C C-------------------------------------------------------------------- INTEGER INT CS REAL EI CS REAL X CS REAL RESULT DOUBLE PRECISION X CD DOUBLE PRECISION EI DOUBLE PRECISION RESULT C-------------------------------------------------------------------- INT = 1 CALL calcei(X,RESULT,INT) CD EI = RESULT RETURN C---------- Last line of EI ---------- END SUBROUTINE expeinl(X, RESULT) C FUNCTION EXPEINL(X) C-------------------------------------------------------------------- C C This function program computes approximate values for the C function exp(-x) * Ei(x), where Ei(x) is the exponential C integral, and x is real. C C Author: W. J. Cody C C Latest modification: January 12, 1988 C Latest modification: 20130629 by TWY C C-------------------------------------------------------------------- INTEGER INT CS REAL EXPEI CS REAL X CS REAL RESULT CD DOUBLE PRECISION EXPEI DOUBLE PRECISION X DOUBLE PRECISION RESULT C-------------------------------------------------------------------- INT = 3 CALL calcei(X,RESULT,INT) CD EXPEI = RESULT RETURN C---------- Last line of EXPEI ---------- END SUBROUTINE eonenl(X, RESULT) C FUNCTION EONENL(X) C-------------------------------------------------------------------- C C This function program computes approximate values for the C exponential integral E1(x), where x is real. C C Author: W. J. Cody C C Latest modification: January 12, 1988 C Latest modification: 20130629 by TWY C C-------------------------------------------------------------------- INTEGER INT CS REAL EONE CS REAL X CS REAL RESULT CD DOUBLE PRECISION EONE DOUBLE PRECISION X DOUBLE PRECISION RESULT C-------------------------------------------------------------------- INT = 2 CALL calcei(X,RESULT,INT) CD EONE = RESULT RETURN C---------- Last line of EONE ---------- END VGAM/src/lerchphi.c0000644000176200001440000002320713565414527013555 0ustar liggesusers/* ------------------------------- Lerch's transcendent Phi(z,s,v) ------------------------------- This program is copyright by Sergej V. Aksenov (http://www.geocities.com/saksenov) and Ulrich D. Jentschura (jentschura@physik.tu-dresden.de), 2002. Version 1.00 (May 1, 2002) Calling sequence: int lerchphi(double *z, double *s, double *v, double *acc, double *result, int *iter) calculates Lerch's Phi transcendent Phi(z,s,v) with *result to a specified accuracy *acc after *iter iterations. Double precision is used throughout the calculation. The program uses direct summation of the defining series for |z| <= 0.5 and CNCT for 0.5 < |z| < 1.0. The integer return code has to be interpreted as follows. ------------- Return codes: ------------- 0 - Normal termination. 1 - Lerch Phi diverges for 1 <= |z|. 2 - Lerch Phi is not defined for integer v <= 0. 3 - pow() is not defined for v < 0 and s not integer. 4 - Long integer overflow in aj1234(). 5 - Underflow in remainder estimate omega in lerchphi(). 6 - No convergence within the maximum number of iterations. Implementation note: In subroutine aj1234(), defining variables ind and two2k as type double instead of long int might eliminate overflow error which occurs for high indices (error code 4). */ #include #include #include #define macheps DBL_EPSILON #define machmin DBL_MIN /* If preprocessor macro ADD_UNDERSCORE was defined, add underscore to the function name --- needed for linking to Fortran programs on a Sun. */ #if (ADD_UNDERSCORE) #define lerchphi lerchphi_ #endif /* Function that computes van Wijngaarden's A_j for a given j. */ static int aj1234(double *z, double *s, double *v, int j, double *acc, double *res) { double sum, bjk, z2ind; int k, flag; unsigned long int ind, two2k; sum = bjk = 0.0; k = -1; two2k = 1; flag = 0; /* Sum b^j_k's over k. */ for (;;) { k++; /* Index for the term of the original series. */ if (k > 0) two2k *= 2; ind = two2k * (j + 1) - 1; /* If long integer overflow occurs, variables become zero. Not relevant in v1.0 because two2k and ind are double type. */ if (k > 0 && (two2k == 0 || ind == 0)) { flag = 4; break; } /* Increment the sum. */ z2ind = pow(*z, ind); bjk = two2k * z2ind / pow(*v + ind, *s); sum += bjk; /* Stop summation if either sum is zero or |term/sum| is below requested accuracy. */ if (fabs(sum) <= machmin || fabs(bjk/sum) < 1.0e-2 * (*acc)) break; } *res = sum; return flag; } /* Function that computes approximation to Lerch Phi as a converging sequence of CNC transforms S^n_k. */ int lerchphi(double *z, double *s, double *v, double *acc, double *result, int *iter) { const unsigned short int beta = 1, n = 0, imax = 100; unsigned short int j, m; int i, sign, flag; double v1, sn, eps0, eps, skn, skn0, omega, *num, *den, *StoreAj, factor, factor1, x, est, iom, sum1, cacc; /* Added 20090205 by T.Yee to suppress 4 warnings */ sum1 = est = 0.0; StoreAj = &v1; m = 0; /* Local copy of v. */ v1 = *v; /* Special cases. */ /* 1 <= |z|. (Return error, Lerch Phi diverges.) */ if (1.0 <= fabs(*z)) { *result = 1.0; *iter = 0; return 1; } /* v <= 0 is integer. (Return error, Lerch Phi is not defined.) */ if (fabs(floor(*v) - *v) <= macheps*fabs(*v) && *v <= 0.0) { *result = 1.0; *iter = 0; return 2; } /* v < 0 is not integer or zero and z != 0 (z == 0 considered below) ... */ if (*v < 0.0 && fabs(*z) > machmin) { /* s is not an integer. (Return error because pow() is not defined.) */ if (fabs(floor(*s) - *s) > macheps*fabs(*s)) { *result = 1.0; *iter = 0; return 3; } /* s is an integer. (Transform v to positive). */ else { m = - (int) floor(*v); v1 += m; sum1 = 0.0; if ((int) *s % 2 == 0) sign = 1; else sign = -1; for (i = 0; i <= m-1; i++) { if ((i > 0) && (*z < 0)) sign = -sign; sum1 += sign*pow(fabs(*z),i)/pow(fabs(*v+i),*s); } } } /* z = 0 and ... */ if (fabs(*z) <= machmin) { /* ... v < 0 is not integer or zero and ... */ if (*v < 0) { /* s is not an integer. (Return error because pow() is not defined.) */ if (fabs(floor(*s) - *s) > macheps*fabs(*s)) { *result = 1.0; *iter = 0; return 3; } /* s is an integer. (Return first term of series.)*/ else { if ((int) *s % 2 == 0) sign = 1; else sign = -1; *result = sign * 1.0 / pow(fabs(*v), *s); } } /* ... v > 0. (Return first term of series.) */ else { *result = 1.0 / pow(*v, *s); *iter = 1; return 0; } } /* General case. */ /* Some initializations. */ /* sn denotes current partial sum of defining series: z > 0.5: sn is partial sum S_n of the van Wijngaarden transformed series. z <= 0.5: sn is the partial sum of the power series defining LerchPhi. skn0 and skn denote successive partial sums S^k_n that are same as sn in case of direct summation and delta-transformed in case of CNCT. eps0 and eps denote successive differences between partial sums S^k_n. */ eps0 = skn = skn0 = sn = 0.0; /* omega is next term of a partial sum (of defining power series for direct summation, of van Wijngaarden transformed series for CNCT) and also becomes a remainder estimate in the delta transformation in CNCT). */ /* For z <= 0.5 van Wijngaarden transformation is not used [hence no calls to aj1234()]. */ /* Direct summation and CNCT (z < -0.5) case. */ if (*z <= 0.5) omega = 1.0 / pow(v1, *s); /* CNCT (z > 0.5) case. */ else { flag = aj1234(z, s, &v1, 0, acc, &omega); if (flag) { *result = 1.0; *iter = 0; return flag; } } /* Allocate memory for working arrays. */ num = (double *) malloc(imax * sizeof(double)); den = (double *) malloc(imax * sizeof(double)); /* StoreAj is used only in CNCT */ if (*z > 0.5) StoreAj = (double *) malloc(imax * sizeof(double)); flag = 0; i = -1; sign = -1; /* Main loop: iterations for S^k_n. */ for (;;) { /* i points to current iterate. */ i++; /* Increment the sum. */ sign = -sign; sn += omega; /* Next term: omega. */ if (*z < 0.0) /* Direct summation and CNCT (z < -0.5) case. */ /* Recurrence for power series. */ omega = (*z) * pow((v1+i)/(v1+i+1), *s) * omega; else /* z > 0 */ { if (*z <= 0.5) /* "Direct summation". */ omega = (*z) * pow((v1+i)/(v1+i+1), *s) * omega; else /* CNCT (z > 0.5) case. */ { *(StoreAj+i) = sign * omega; if (i % 2 == 0) /* Recurrence for odd pointer i. */ {omega = -sign * 0.5 * (*(StoreAj+i/2) - pow(*z, i/2) / pow(v1+i/2, *s));} else { flag = aj1234(z, s, &v1, i+1, acc, &omega); if (flag) break; else omega = -sign * omega; } } } /* Direct summation case: store current sum and remainder estimate. */ if (fabs(*z) <= 0.5) { skn = sn; est = 2.0 * pow(fabs(*z), (i+1)) / pow(v1+i+1, *s); } /* CNCT case. */ else { /* Make sure omega is representable machine number. */ if (fabs(omega) <= machmin) { flag = 5; break; } else iom = 1.0 / omega; /* Last terms in sums of numerator and denominator of i-th partial sum. */ *(num+i) = sn * iom; *(den+i) = iom; /* Recurrence computation of numerator and denominator of a S_k^n. */ if (i > 0) { factor = 1.0; *(num+i-1) = *(num+i) - factor * (*(num+i-1)); *(den+i-1) = *(den+i) - factor * (*(den+i-1)); } factor1 = (double) (beta+n+i-1) * (beta+n+i-2); for(j = 2; j <= i; j++) { factor = factor1 / (beta+n+i+j-2) / (beta+n+i+j-3); *(num+i-j) = *(num+i-j+1) - factor * (*(num+i-j)); *(den+i-j) = *(den+i-j+1) - factor * (*(den+i-j)); } /* Current approximation of the sum S_k^n. */ skn = *num / *den; } /* else CNCT case. */ eps = fabs(skn - skn0); /* Check the three termination criteria. */ /* |est/skn| is less than the requested accuracy (est is a remainder estimate). */ if (i > 0 && eps < eps0) { if (fabs(*z) > 0.5) { x = eps/eps0; est = 2.0/x/(1.0-x)*eps; } cacc = fabs(est/skn); if (cacc < (*acc)) break; } /* Successive iterates skn are the same. */ if (eps <= 0.0) break; /* Maximum number of iterations is exceeded. */ if (i > imax-2) { flag = 6; break; } /* Go on to the next iteration. */ skn0 = skn; eps0 = eps; } /* for */ /* Store the resulting sum. */ if (*v < 0) { sign = 1; if ((*z < 0) && (m % 2 != 0)) sign = -1; *result = sum1 + skn * sign * pow(fabs(*z),m); } else *result = skn; /* Store the number of iterations. */ *iter = i + 1; /* Clean up. */ free(num); free(den); if (*z > 0.5) free(StoreAj); return flag; } #undef macheps #undef machmin /* Code below written by T. Yee 14/6/06; is a wrapper function */ void lerchphi123(int *err, int *L, double *z, double *s, double *v, double *acc, double *result, int *iter) { int ell; for(ell = 0; ell < *L; ell++) { err[ell] = lerchphi(z+ell, s+ell, v+ell, acc, result+ell, iter); } } VGAM/src/fgam.f0000644000176200001440000005740213565414527012700 0ustar liggesusersc 24/8/99 c This is the original fgam.f file c It needs to be compiled and loaded into R in order to smooth. c All of this is automatically in Splus subroutine vbsplvd ( t, k, x, left, a, dbiatx, nderiv ) implicit double precision(a-h,o-z) calls bsplvb calculates value and deriv.s of all b-splines which do not vanish at x c c****** i n p u t ****** c t the knot array, of length left+k (at least) c k the order of the b-splines to be evaluated c x the point at which these values are sought c left an integer indicating the left endpoint of the interval of c interest. the k b-splines whose support contains the interval c (t(left), t(left+1)) c are to be considered. c a s s u m p t i o n - - - it is assumed that c t(left) .lt. t(left+1) c division by zero will result otherwise (in b s p l v b ). c also, the output is as advertised only if c t(left) .le. x .le. t(left+1) . c nderiv an integer indicating that values of b-splines and their c derivatives up to but not including the nderiv-th are asked c for. ( nderiv is replaced internally by the integer in (1,k) c closest to it.) c c****** w o r k a r e a ****** c a an array of order (k,k), to contain b-coeff.s of the derivat- c ives of a certain order of the k b-splines of interest. c c****** o u t p u t ****** c dbiatx an array of order (k,nderiv). its entry (i,m) contains c value of (m-1)st derivative of (left-k+i)-th b-spline of c order k for knot sequence t , i=m,...,k; m=1,...,nderiv. c c****** m e t h o d ****** c values at x of all the relevant b-splines of order k,k-1,..., c k+1-nderiv are generated via bsplvb and stored temporarily c in dbiatx . then, the b-coeffs of the required derivatives of the c b-splines of interest are generated by differencing, each from the c preceding one of lower order, and combined with the values of b- c splines of corresponding order in dbiatx to produce the desired c values. c integer k,left,nderiv, i,ideriv,il,j,jlow,jp1mid,kp1,kp1mm, * ldummy,m,mhigh double precision a(k,k),dbiatx(k,nderiv),t(*),x double precision factor,fkp1mm,sum mhigh = max0(min0(nderiv,k),1) c mhigh is usually equal to nderiv. kp1 = k+1 call bsplvb(t,kp1-mhigh,1,x,left,dbiatx) if (mhigh .eq. 1) go to 99 c the first column of dbiatx always contains the b-spline values c for the current order. these are stored in column k+1-current c order before bsplvb is called to put values for the next c higher order on top of it. ideriv = mhigh do 15 m=2,mhigh jp1mid = 1 do 11 j=ideriv,k dbiatx(j,ideriv) = dbiatx(jp1mid,1) jp1mid = jp1mid + 1 11 continue ideriv = ideriv - 1 call bsplvb(t,kp1-ideriv,2,x,left,dbiatx) 15 continue c c at this point, b(left-k+i, k+1-j)(x) is in dbiatx(i,j) for c i=j,...,k and j=1,...,mhigh ('=' nderiv). in particular, the c first column of dbiatx is already in final form. to obtain cor- c responding derivatives of b-splines in subsequent columns, gene- c rate their b-repr. by differencing, then evaluate at x. c jlow = 1 do 20 i=1,k do 19 j=jlow,k a(j,i) = 0d0 19 continue jlow = i a(i,i) = 1d0 20 continue c at this point, a(.,j) contains the b-coeffs for the j-th of the c k b-splines of interest here. c c c c 20161111: was originally c do 40 m=2,mhigh do 400 m=2,mhigh kp1mm = kp1 - m fkp1mm = dble(kp1mm) il = left i = k c c for j=1,...,k, construct b-coeffs of (m-1)st derivative of c b-splines from those for preceding derivative by differencing c and store again in a(.,j) . the fact that a(i,j) = 0 for c i .lt. j is used.sed. do 25 ldummy=1,kp1mm factor = fkp1mm/(t(il+kp1mm) - t(il)) c the assumption that t(left).lt.t(left+1) makes denominator c in factor nonzero. do 24 j=1,i a(i,j) = (a(i,j) - a(i-1,j))*factor 24 continue il = il - 1 i = i - 1 25 continue c c for i=1,...,k, combine b-coeffs a(.,i) with b-spline values c stored in dbiatx(.,m) to get value of (m-1)st derivative of c i-th b-spline (of interest here) at x , and store in c dbiatx(i,m). storage of this value over the value of a b-spline c of order m there is safe since the remaining b-spline derivat- c ive of the same order do not use this value due to the fact c that a(j,i) = 0 for j .lt. i . c Originally: c 30 do 40 i=1,k do 40 i=1,k sum = 0. jlow = max0(i,m) do 35 j=jlow,k sum = a(j,i)*dbiatx(j,m) + sum 35 continue dbiatx(i,m) = sum 40 continue c 20161111: twyee added this line (expanded 40 to two lines). 400 continue 99 return end subroutine bsplvb ( t, jhigh, index, x, left, biatx ) implicit double precision(a-h,o-z) calculates the value of all possibly nonzero b-splines at x of order c c jout = dmax( jhigh , (j+1)*(index-1) ) c c with knot sequence t . c c****** i n p u t ****** c t.....knot sequence, of length left + jout , assumed to be nonde- c creasing. a s s u m p t i o n . . . . c t(left) .lt. t(left + 1) . c d i v i s i o n b y z e r o will result if t(left) = t(left+1) c jhigh, c index.....integers which determine the order jout = max(jhigh, c (j+1)*(index-1)) of the b-splines whose values at x are to c be returned. index is used to avoid recalculations when seve- c ral columns of the triangular array of b-spline values are nee- c ded (e.g., in bvalue or in vbsplvd ). precisely, c if index = 1 , c the calculation starts from scratch and the entire triangular c array of b-spline values of orders 1,2,...,jhigh is generated c order by order , i.e., column by column . c if index = 2 , c only the b-spline values of order j+1, j+2, ..., jout are ge- c nerated, the assumption being that biatx , j , deltal , deltar c are, on entry, as they were on exit at the previous call. c in particular, if jhigh = 0, then jout = j+1, i.e., just c the next column of b-spline values is generated. c c w a r n i n g . . . the restriction jout .le. jmax (= 20) is im- c posed arbitrarily by the dimension statement for deltal and c deltar below, but is n o w h e r e c h e c k e d for . c c x.....the point at which the b-splines are to be evaluated. c left.....an integer chosen (usually) so that c t(left) .le. x .le. t(left+1) . c c****** o u t p u t ****** c biatx.....array of length jout , with biatx(i) containing the val- c ue at x of the polynomial of order jout which agrees with c the b-spline b(left-jout+i,jout,t) on the interval (t(left), c t(left+1)) . c c****** m e t h o d ****** c the recurrence relation c c x - t(i) t(i+j+1) - x c b(i,j+1)(x) = -----------b(i,j)(x) + ---------------b(i+1,j)(x) c t(i+j)-t(i) t(i+j+1)-t(i+1) c c is used (repeatedly) to generate the (j+1)-vector b(left-j,j+1)(x), c ...,b(left,j+1)(x) from the j-vector b(left-j+1,j)(x),..., c b(left,j)(x), storing the new values in biatx over the old. the c facts that c b(i,1) = 1 if t(i) .le. x .lt. t(i+1) c and that c b(i,j)(x) = 0 unless t(i) .le. x .lt. t(i+j) c are used. the particular organization of the calculations follows al- c gorithm (8) in chapter x of the text. c parameter(jmax = 20) integer index,jhigh,left, i,j,jp1 double precision biatx(jhigh),t(*),x, deltal(jmax) double precision deltar(jmax),saved,term c dimension biatx(jout), t(left+jout) current fortran standard makes it impossible to specify the length of c t and of biatx precisely without the introduction of otherwise c superfluous additional arguments. data j/1/ c save j,deltal,deltar (valid in fortran 77) c c c c 20161111; originally: c go to (10,20), index c See https://www.obliquity.com/computer/fortran/control.html if (index .eq. 1) then go to 10 else if (index .eq. 2) then go to 20 end if c c c c 10 j = 1 biatx(1) = 1d0 if (j .ge. jhigh) go to 99 c 20 jp1 = j + 1 deltar(j) = t(left+j) - x deltal(j) = x - t(left+1-j) saved = 0d0 do 26 i=1,j term = biatx(i)/(deltar(i) + deltal(jp1-i)) biatx(i) = saved + deltar(i)*term saved = deltal(jp1-i)*term 26 continue biatx(jp1) = saved j = jp1 if (j .lt. jhigh) go to 20 c 99 return end c 20090105; converted bvalue into a subroutine. subroutine wbvalue ( t, bcoef, n, k, x, jderiv, bvalue) implicit double precision(a-h,o-z) double precision bvalue calls vinterv c calculates value at x of jderiv-th derivative of spline from b-repr. c the spline is taken to be continuous from the right. c c****** i n p u t ****** c t, bcoef, n, k......forms the b-representation of the spline f to c be evaluated. specifically, c t.....knot sequence, of length n+k, assumed nondecreasing. c bcoef.....b-coefficient sequence, of length n . c n.....length of bcoef and dimension of s(k,t), c a s s u m e d positive . c k.....order of the spline . c c w a r n i n g . . . the restriction k .le. kmax (=20) is imposed c arbitrarily by the dimension statement for aj, dm, dm below, c but is n o w h e r e c h e c k e d for. c c x.....the point at which to evaluate . c jderiv.....integer giving the order of the derivative to be evaluated c a s s u m e d to be zero or positive. c c****** o u t p u t ****** c bvalue.....the value of the (jderiv)-th derivative of f at x . c c****** m e t h o d ****** c the nontrivial knot interval (t(i),t(i+1)) containing x is lo- c cated with the aid of vinterv . the k b-coeffs of f relevant for c this interval are then obtained from bcoef (or taken to be zero if c not explicitly available) and are then differenced jderiv times to c obtain the b-coeffs of (d**jderiv)f relevant for that interval. c precisely, with j = jderiv, we have from x.(12) of the text that c c (d**j)f = sum ( bcoef(.,j)*b(.,k-j,t) ) c c where c / bcoef(.), , j .eq. 0 c / c bcoef(.,j) = / bcoef(.,j-1) - bcoef(.-1,j-1) c / ----------------------------- , j .gt. 0 c / (t(.+k-j) - t(.))/(k-j) c c then, we use repeatedly the fact that c c sum ( a(.)*b(.,m,t)(x) ) = sum ( a(.,x)*b(.,m-1,t)(x) ) c with c (x - t(.))*a(.) + (t(.+m-1) - x)*a(.-1) c a(.,x) = --------------------------------------- c (x - t(.)) + (t(.+m-1) - x) c c to write (d**j)f(x) eventually as a linear combination of b-splines c of order 1 , and the coefficient for b(i,1,t)(x) must then c be the desired number (d**j)f(x). (see x.(17)-(19) of text). c parameter(kmax = 20) integer jderiv,k,n, i,ilo,imk,j,jc,jcmin,jcmax,jj,km1,mflag,nmi double precision bcoef(n),t(*),x double precision aj(kmax),dm(kmax),dp(kmax),fkmj c dimension t(n+k) current fortran standard makes it impossible to specify the length of c t precisely without the introduction of otherwise superfluous c additional arguments. bvalue = 0.0d0 if (jderiv .ge. k) go to 99 c c *** find i s.t. 1 .le. i .lt. n+k and t(i) .lt. t(i+1) and c t(i) .le. x .lt. t(i+1) . if no such i can be found, x lies c outside the support of the spline f and bvalue = 0. c (the asymmetry in this choice of i makes f rightcontinuous) if( (x.ne.t(n+1)) .or. (t(n+1).ne.t(n+k)) ) go to 700 i = n go to 701 700 call vinterv ( t, n+k, x, i, mflag ) if (mflag .ne. 0) go to 99 701 continue c *** if k = 1 (and jderiv = 0), bvalue = bcoef(i). km1 = k - 1 if (km1 .gt. 0) go to 1 bvalue = bcoef(i) go to 99 c c *** store the k b-spline coefficients relevant for the knot interval c (t(i),t(i+1)) in aj(1),...,aj(k) and compute dm(j) = x - t(i+1-j), c dp(j) = t(i+j) - x, j=1,...,k-1 . set any of the aj not obtainable c from input to zero. set any t.s not obtainable equal to t(1) or c to t(n+k) appropriately. 1 jcmin = 1 imk = i - k if (imk .ge. 0) go to 8 jcmin = 1 - imk do 5 j=1,i dm(j) = x - t(i+1-j) 5 continue do 6 j=i,km1 aj(k-j) = 0. dm(j) = dm(i) 6 continue go to 10 8 do 9 j=1,km1 dm(j) = x - t(i+1-j) 9 continue c 10 jcmax = k nmi = n - i if (nmi .ge. 0) go to 18 jcmax = k + nmi do 15 j=1,jcmax dp(j) = t(i+j) - x 15 continue do 16 j=jcmax,km1 aj(j+1) = 0. dp(j) = dp(jcmax) 16 continue go to 20 18 do 19 j=1,km1 dp(j) = t(i+j) - x 19 continue c 20 do 21 jc=jcmin,jcmax aj(jc) = bcoef(imk + jc) 21 continue c c *** difference the coefficients jderiv times. if (jderiv .eq. 0) go to 30 c 20161111; was: c do 23 j=1,jderiv do 233 j=1,jderiv kmj = k-j fkmj = dble(kmj) ilo = kmj do 23 jj=1,kmj aj(jj) = ((aj(jj+1) - aj(jj))/(dm(ilo) + dp(jj)))*fkmj ilo = ilo - 1 23 continue 233 continue c c *** compute value at x in (t(i),t(i+1)) of jderiv-th derivative, c given its relevant b-spline coeffs in aj(1),...,aj(k-jderiv). 30 if (jderiv .eq. km1) go to 39 jdrvp1 = jderiv + 1 c 20161111: was: c do 33 j=jdrvp1,km1 do 34 j=jdrvp1,km1 kmj = k-j ilo = kmj do 33 jj=1,kmj aj(jj) = (aj(jj+1)*dm(ilo) + aj(jj)*dp(jj))/(dm(ilo)+dp(jj)) ilo = ilo - 1 33 continue 34 continue 39 bvalue = aj(1) c 99 return end subroutine vinterv ( xt, lxt, x, left, mflag ) implicit double precision(a-h,o-z) computes left = max( i ; 1 .le. i .le. lxt .and. xt(i) .le. x ) . c c****** i n p u t ****** c xt.....a double precision sequence, of length lxt , assumed to be nondecreasing c lxt.....number of terms in the sequence xt . c x.....the point whose location with respect to the sequence xt is c to be determined. c c****** o u t p u t ****** c left, mflag.....both integers, whose value is c c 1 -1 if x .lt. xt(1) c i 0 if xt(i) .le. x .lt. xt(i+1) c lxt 1 if xt(lxt) .le. x c c in particular, mflag = 0 is the 'usual' case. mflag .ne. 0 c indicates that x lies outside the halfopen interval c xt(1) .le. y .lt. xt(lxt) . the asymmetric treatment of the c interval is due to the decision to make all pp functions cont- c inuous from the right. c c****** m e t h o d ****** c the program is designed to be efficient in the common situation that c it is called repeatedly, with x taken from an increasing or decrea- c sing sequence. this will happen, e.g., when a pp function is to be c graphed. the first guess for left is therefore taken to be the val- c ue returned at the previous call and stored in the l o c a l varia- c ble ilo . a first check ascertains that ilo .lt. lxt (this is nec- c essary since the present call may have nothing to do with the previ- c ous call). then, if xt(ilo) .le. x .lt. xt(ilo+1), we set left = c ilo and are done after just three comparisons. c otherwise, we repeatedly double the difference istep = ihi - ilo c while also moving ilo and ihi in the direction of x , until c xt(ilo) .le. x .lt. xt(ihi) , c after which we use bisection to get, in addition, ilo+1 = ihi . c left = ilo is then returned. c integer left,lxt,mflag, ihi,ilo,istep,middle double precision x,xt(lxt) data ilo /1/ c save ilo (a valid fortran statement in the new 1977 standard) ihi = ilo + 1 if (ihi .lt. lxt) go to 20 if (x .ge. xt(lxt)) go to 110 if (lxt .le. 1) go to 90 ilo = lxt - 1 ihi = lxt c 20 if (x .ge. xt(ihi)) go to 40 if (x .ge. xt(ilo)) go to 100 c c **** now x .lt. xt(ilo) . decrease ilo to capture x . c c c Originally: c 30 istep = 1 istep = 1 c c 31 ihi = ilo ilo = ihi - istep if (ilo .le. 1) go to 35 if (x .ge. xt(ilo)) go to 50 istep = istep*2 go to 31 35 ilo = 1 if (x .lt. xt(1)) go to 90 go to 50 c **** now x .ge. xt(ihi) . increase ihi to capture x . 40 istep = 1 41 ilo = ihi ihi = ilo + istep if (ihi .ge. lxt) go to 45 if (x .lt. xt(ihi)) go to 50 istep = istep*2 go to 41 45 if (x .ge. xt(lxt)) go to 110 ihi = lxt c c **** now xt(ilo) .le. x .lt. xt(ihi) . narrow the interval. 50 middle = (ilo + ihi)/2 if (middle .eq. ilo) go to 100 c note. it is assumed that middle = ilo in case ihi = ilo+1 . if (x .lt. xt(middle)) go to 53 ilo = middle go to 50 53 ihi = middle go to 50 c**** set output and return. 90 mflag = -1 left = 1 return 100 mflag = 0 left = ilo return 110 mflag = 1 left = lxt return end c ===================================================================== c These two subroutines, dpbfa8 and dpbsl8, are called by sslvrg. c Note: a rational cholesky version of these functions are available, c called vdpbfa7 and vdpbsl7 c T.Yee 7/10/99 c 1/7/02 c T.Yee has renamed dbpbfa to dbpbfa8 and dpbsl to dpbsl8, to ensure uniqueness subroutine dpbfa8(abd,lda,n,m,info) integer lda,n,m,info double precision abd(lda,*) c c c 20130419; Originally: c double precision abd(lda,1) c c c c c dpbfa8 factors a double precision symmetric positive definite c matrix stored in band form. c c dpbfa8 is usually called by dpbco, but it can be called c directly with a saving in time if rcond is not needed. c c on entry c c abd double precision(lda, n) c the matrix to be factored. the columns of the upper c triangle are stored in the columns of abd and the c diagonals of the upper triangle are stored in the c rows of abd . see the comments below for details. c c lda integer c the leading dimension of the array abd . c lda must be .ge. m + 1 . c c n integer c the order of the matrix a . c c m integer c the number of diagonals above the main diagonal. c 0 .le. m .lt. n . c c on return c c abd an upper triangular matrix r , stored in band c form, so that a = trans(r)*r . c c info integer c = 0 for normal return. c = k if the leading minor of order k is not c positive definite. c c band storage c c if a is a symmetric positive definite band matrix, c the following program segment will set up the input. c c m = (band width above diagonal) c do 20 j = 1, n c i1 = max0(1, j-m) c do 10 i = i1, j c k = i-j+m+1 c abd(k,j) = a(i,j) c 10 continue c 20 continue c c linpack. this version dated 08/14/78 . c cleve moler, university of new mexico, argonne national lab. c c subroutines and functions c c blas ddot c fortran max0,dsqrt c c internal variables c double precision ddot8,t double precision s integer ik,j,jk,k,mu c begin block with ...exits to 40 c c do 30 j = 1, n info = j s = 0.0d0 ik = m + 1 jk = max0(j-m,1) mu = max0(m+2-j,1) if (m .lt. mu) go to 20 do 10 k = mu, m t = abd(k,j) - ddot8(k-mu,abd(ik,jk),1,abd(mu,j),1) t = t/abd(m+1,jk) abd(k,j) = t s = s + t*t ik = ik - 1 jk = jk + 1 10 continue 20 continue s = abd(m+1,j) - s c ......exit if (s .le. 0.0d0) go to 40 abd(m+1,j) = dsqrt(s) 30 continue info = 0 40 continue return end subroutine dpbsl8(abd,lda,n,m,b) integer lda,n,m double precision abd(lda,*),b(*) c c c 20130419; originally: c double precision abd(lda,1),b(1) c c c dpbsl8 solves the double precision symmetric positive definite c band system a*x = b c using the factors computed by dpbco or dpbfa8. c c on entry c c abd double precision(lda, n) c the output from dpbco or dpbfa8. c c lda integer c the leading dimension of the array abd . c c n integer c the order of the matrix a . c c m integer c the number of diagonals above the main diagonal. c c b double precision(n) c the right hand side vector. c c on return c c b the solution vector x . c c error condition c c a division by zero will occur if the input factor contains c a zero on the diagonal. technically this indicates c singularity but it is usually caused by improper subroutine c arguments. it will not occur if the subroutines are called c correctly and info .eq. 0 . c c to compute inverse(a) * c where c is a matrix c with p columns c call dpbco(abd,lda,n,rcond,z,info) c if (rcond is too small .or. info .ne. 0) go to ... c do 10 j = 1, p c call dpbsl8(abd,lda,n,c(1,j)) c 10 continue c c linpack. this version dated 08/14/78 . c cleve moler, university of new mexico, argonne national lab. c c subroutines and functions c c blas daxpy,ddot c fortran min0 c c internal variables c double precision ddot8,t integer k,kb,la,lb,lm c c solve trans(r)*y = b c do 10 k = 1, n lm = min0(k-1,m) la = m + 1 - lm lb = k - lm t = ddot8(lm,abd(la,k),1,b(lb),1) b(k) = (b(k) - t)/abd(m+1,k) 10 continue c c solve r*x = y c do 20 kb = 1, n k = n + 1 - kb lm = min0(k-1,m) la = m + 1 - lm lb = k - lm b(k) = b(k)/abd(m+1,k) t = -b(k) call daxpy8(lm,t,abd(la,k),1,b(lb),1) 20 continue return end VGAM/src/vgam3.c0000644000176200001440000024023413565414527012775 0ustar liggesusers #include #include #include #include #include void Yee_vbvs(int *f8yswcat, double gkdx5jal[], double rpyis2kc[], double sjwyig9t[], double kispwgx3[], int *acpios9q, int *order, int *wy1vqfzu); void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk, double wbkq9zyi[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[]); void fapc0tnbybnagt8k(int *iii, int *cz8qdfyj, int *tesdm5kv, double g9fvdrbw[], double osiz4fxy[], double rbne6ouj[], int *kxvq6sfw, int *nyfu9rod, int *wy1vqfzu, int *ldk, int *kvowz9ht, int *kuzxj1lo, int tgiyxdw1[], int dufozmt7[]); void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gkdx5jal, int *lqsahu0r, int *acpios9q, int *ldk, int *wy1vqfzu, int *kvowz9ht, double wbkq9zyi[], double lamvec[], int *aalgpft4y, double t8hwvalr[], double rpyis2kc[], double ui8ysltq[], double ifys6woa[], double hdnw2fts[], int *yzoe1rsp, int *fbd5yktj, int *ftnjamu2, double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double tt2[], int *cvnjhg2u, int itdcb8ilk[], // Added 20100313 double tdcb8ilk[] // Added 20100313 ); void fapc0tnbcn8kzpab(double gkdx5jal[], double sjwyig9t[], double rpyis2kc[], int *lqsahu0r, int *acpios9q, int *wy1vqfzu, double t8hwvalr[]); void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[], double sjwyig9t[], double tlgduey8[], double rbne6ouj[], double pygsw6ko[], double pasjmo8g[], double eshvo2ic[], double ueshvo2ic[], double onxjvw8u[], int *dvhw1ulq, int *wy1vqfzu, int *kvowz9ht, int *npjlv3mr, double conmat[], int *kgwmz4ip, int *iz2nbfjc, int *wueshvo2ic, int *npjlv3mreshvo2ic, int *dim2eshvo2ic); void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jal[], double grmuyvx9[], int *ldk, int *lqsahu0r, int *acpios9q, int *wy1vqfzu, int *jzwsy6tp, double rbne6ouj[], double ifys6woa[], int *kvowz9ht, int *ftnjamu2); void fapc0tnbo0xlszqr(int *wy1vqfzu, double *g9fvdrbw, double *quc6khaf, double *bmb); void fapc0tnbvsel(int *nurohxe6t, int *nbpvaqm5z, int *wy1vqfzu, int *ldk, double minv[], double quc6khaf[]); void fapc0tnbovjnsmt2(double bmb[], double rbne6ouj[], double ifys6woa[], int *wy1vqfzu, int *kuzxj1lo, int *dimw, int *iii, int tgiyxdw1_[], int dufozmt7_[]); void fapc0tnbvicb2(double enaqpzk9[], double wpuarq2m[], double Dvector[], int *wy1vqfzu, int *f8yswcat); void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[], int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int *lqsahu0r, double wbkq9zyi[], double lamvec[], double hdnw2fts[], double kispwgx3[], double ui8ysltq[], int *kvowz9ht, int *fbd5yktj, int *ldk, int *aalgpft4y, int *yzoe1rsp, double rpyis2kc[], double gkdx5jals[], double ifys6woa[], double conmat[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int *acpios9q, int *iz2nbfjc, int *kgwmz4ip, int *npjlv3mr, int itdcb8ilk[], // Added 20100313 double tdcb8ilk[] // Added 20100313 ); void Yee_vbfa( int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[], double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double m0ibglfx[], double zshtfg8c[], double ui8ysltq[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], double wpuarq2m[], double hjm2ktyr[], int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]); void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[], int which[], double he7mqnvy[], double tlgduey8[], double rbne6ouj[], double wbkq9zyi[], double lamvec[], double hdnw2fts[], double kispwgx3[], double m0ibglfx[], double zshtfg8c[], double ui8ysltq[], double *zpcqv3uj, double vc6hatuj[], double fasrkub3[], int *qemj9asg, int ges1xpkr[], double wpuarq2m[], double hjm2ktyr[], int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], double *ghdetj8v, int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[], int *nhja0izq, int *yzoe1rsp, int *ueb8hndv, int *gtrlbz3e, int *rutyk8mg, int *xjc4ywlh, int *kvowz9ht, int *npjlv3mr, int *fbd5yktj, int *ldk, int *algpft4y, int itdcb8ilk[], double tdcb8ilk[]); void fapc0tnbx6kanjdh(double sjwyig9t[], double xout[], int *f8yswcat, int *wy1vqfzu); double fapc0tnbrd9beyfk(int *f8yswcat, double bhcji9gl[], double po8rwsmy[], double m0ibglfx[]); void fapc0tnbpitmeh0q(int *f8yswcat, double bhcji9gl[], double po8rwsmy[], double *lfu2qhid, double *lm9vcjob); void fapc0tnbdsrt0gem(int *f8yswcat, double sjwyig9t[], double po8rwsmy[], double bhcji9gl[], double ub4xioar[], double ui8ysltq[], int *yzoe1rsp); void fapc0tnbshm8ynte(int *ftnjamu2, int ezlgm2up[], double pygsw6ko[], double sjwyig9t[]); void vknootl2(double x[], int *f8yswcat, double gkdx5jal[], int *rvy1fpli, int *ukgwt7na); void Yee_pknootl2(double *gkdx5jal, int *f8yswcat, int *zo8wpibx, double *Toler_ankcghz2); void F77_NAME(wbvalue)(double*, double*, int*, int*, double*, int*, double*); void F77_NAME(vinterv)(double*, int*, double*, int*, int*); void F77_NAME(vbsplvd)(double*, int*, double*, int*, double*, double*,int*); void F77_NAME(vdpbfa7)(double*, int*, int*, int*, int*, double*); void F77_NAME(vdpbsl7)(double*, int*, int*, int*, double*, double*); void F77_NAME(vdqrsl)(double*, int*, int*, int*, double*, double*, double*, double*, double*, double*, double*, int*, int*); void F77_NAME(vqrdca)(double*, int*, int*, int*, double*, int*, double*, int*, double*); void Free_fapc0tnbyee_spline(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9, double *wkumc9idbtwy, double *wkumc9idwk0, double *wkumc9idbk3ymcih, int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7); void Free_fapc0tnbewg7qruh(double *wkumc9idWrk1, int *wkumc9idges1xpkr, double *wkumc9idbeta, double *wkumc9idfasrkub3, double *wkumc9idsout, double *wkumc9idr0oydcxb, double *wkumc9idub4xioar, double *wkumc9ideffect, double *wkumc9idueshvo2ic, double *wkumc9ids0, double *wkumc9idpygsw6ko, double *wkumc9idpasjmo8g, double *wkumc9ideshvo2ic, double *wkumc9idonxjvw8u, double *wkumc9idwk4); void F77_NAME(vdigami)(double*, double*, double*, double*, double*, double*, double*, double*, double*, int*, double*); void VGAM_C_vdigami(double d[], double x[], double p[], double gplog[], double gp1log[], double psip[], double psip1[], double psidp[], double psidp1[], int *ifault, double *tmax, int *f8yswcat); void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[], double gkdx5jal[], int *rvy1fpli, int *kuzxj1lo, double zyupcmk6[], double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[]); extern void n5aioudkdnaoqj0l(double *pjb6wfoq, double *xs, double *ys, double ws[], int *kuzxj1lo, int *nk, double gkdx5jal[], double coef[], double sz[], double ifys6woa[], double *wbkq9zyi, double parms[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int l3zpbstu[], int *xtov9rbf, int *wep0oibc, int *fbd5yktj); extern void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double tb[], int *nb); extern void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu); extern void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[], int *wy1vqfzu, int *dvhw1ulq, int *isolve); extern void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[], int *wy1vqfzu, int *dvhw1ulq, int *isolve); extern void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double lfu2qhid[], int *dimu, int *f8yswcat, int *wy1vqfzu); extern void fvlmz9iyC_vbks(double wpuarq2m[], double unvxka0m[], int *wy1vqfzu, int *f8yswcat, int *dimu); extern void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[], int *npjlv3mr, int *wy1vqfzu, int *dvhw1ulq); extern void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int *xjc4ywlh, int *f8yswcat, int *dimu, int *rutyk8mg); void VGAM_C_vdigami(double d[], double x[], double p[], double gplog[], double gp1log[], double psip[], double psip1[], double psidp[], double psidp1[], int *ifault, double *tmax, int *f8yswcat) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { F77_CALL(vdigami)(d, x, p, gplog, gp1log, psip, psip1, psidp, psidp1, ifault, tmax); d += 6; x++; p++; gplog++; gp1log++; psip++; psip1++; psidp++; psidp1++; ifault++; } } void Yee_vbvs(int *f8yswcat, double gkdx5jal[], double rpyis2kc[], double sjwyig9t[], double kispwgx3[], int *acpios9q, int *order, int *wy1vqfzu) { double *chw8lzty; int ayfnwr1v, yq6lorbx, h2dpsbkr = 4; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { chw8lzty = sjwyig9t; for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { F77_CALL(wbvalue)(gkdx5jal, rpyis2kc, acpios9q, &h2dpsbkr, chw8lzty++, order, kispwgx3++); } rpyis2kc += *acpios9q; } } void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk, double wbkq9zyi[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[]) { int ayfnwr1v, yq6lorbx, ayfnwr1vupp; double *fpdlcqk9wbkq9zyi, *fpdlcqk9xecbg0pf, *fpdlcqk9z4grbpiq, *fpdlcqk9d7glzhbj, *fpdlcqk9v2eydbxs, *fpdlcqk9osiz4fxy; fpdlcqk9osiz4fxy = osiz4fxy + *ldk - 1; fpdlcqk9xecbg0pf = xecbg0pf; for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { fpdlcqk9wbkq9zyi = wbkq9zyi; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9xecbg0pf; fpdlcqk9osiz4fxy += *ldk; } fpdlcqk9xecbg0pf++; } fpdlcqk9osiz4fxy = osiz4fxy + *wy1vqfzu * *ldk; fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy + *ldk - *wy1vqfzu - 1; fpdlcqk9z4grbpiq = z4grbpiq; ayfnwr1vupp = *acpios9q - 1; // 20140523; I changed the following line plus 2 other lines: for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) { fpdlcqk9wbkq9zyi = wbkq9zyi; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9z4grbpiq; fpdlcqk9osiz4fxy += *ldk; } fpdlcqk9z4grbpiq++; } fpdlcqk9osiz4fxy = osiz4fxy + *ldk + 2 * *wy1vqfzu * *ldk; fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy - 2 * *wy1vqfzu - 1; fpdlcqk9d7glzhbj = d7glzhbj; ayfnwr1vupp = *acpios9q - 2; for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) { fpdlcqk9wbkq9zyi = wbkq9zyi; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9d7glzhbj; fpdlcqk9osiz4fxy += *ldk; } fpdlcqk9d7glzhbj++; } fpdlcqk9osiz4fxy = osiz4fxy + *ldk + 3 * *wy1vqfzu * *ldk; fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy - 3 * *wy1vqfzu - 1; fpdlcqk9v2eydbxs = v2eydbxs; ayfnwr1vupp = *acpios9q - 3; for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) { fpdlcqk9wbkq9zyi = wbkq9zyi; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9v2eydbxs; fpdlcqk9osiz4fxy += *ldk; } fpdlcqk9v2eydbxs++; } } void fapc0tnbybnagt8k(int *iii, int *cz8qdfyj, int *tesdm5kv, double g9fvdrbw[], double osiz4fxy[], double rbne6ouj[], int *kxvq6sfw, int *nyfu9rod, int *wy1vqfzu, int *ldk, int *kvowz9ht, int *kuzxj1lo, int tgiyxdw1[], int dufozmt7[]) { double tmp_wrk; int urohxe6t, nead, bcol, brow, biuvowq2, nbj8tdsk; bcol = *cz8qdfyj + *tesdm5kv; brow = *cz8qdfyj; for (urohxe6t = 1; urohxe6t <= *kvowz9ht; urohxe6t++) { tmp_wrk = rbne6ouj[*iii -1 + (urohxe6t-1) * *kuzxj1lo] * g9fvdrbw[*kxvq6sfw-1] * g9fvdrbw[*nyfu9rod-1]; biuvowq2 = (brow-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1]; nbj8tdsk = (bcol-1) * *wy1vqfzu + dufozmt7[urohxe6t-1]; nead = nbj8tdsk - biuvowq2; osiz4fxy[*ldk - nead - 1 + (nbj8tdsk-1) * *ldk] += tmp_wrk; if (*tesdm5kv > 0 && dufozmt7[urohxe6t-1] != tgiyxdw1[urohxe6t-1]) { biuvowq2 = (brow-1) * *wy1vqfzu + dufozmt7[urohxe6t-1]; nbj8tdsk = (bcol-1) * *wy1vqfzu + tgiyxdw1[urohxe6t-1]; nead = nbj8tdsk - biuvowq2; osiz4fxy[*ldk - nead - 1 + (nbj8tdsk-1) * *ldk] += tmp_wrk; } } } void Free_fapc0tnbyee_spline(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9, double *wkumc9idbtwy, double *wkumc9idwk0, double *wkumc9idbk3ymcih, int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7) { Free(wkumc9idosiz4fxy); Free(wkumc9idenaqpzk9); Free(wkumc9idbtwy); Free(wkumc9idwk0); Free(wkumc9idbk3ymcih); Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gkdx5jal, int *lqsahu0r, int *acpios9q, int *ldk, int *wy1vqfzu, int *kvowz9ht, double wbkq9zyi[], double lamvec[], int *aalgpft4y, double t8hwvalr[], double rpyis2kc[], double ui8ysltq[], double ifys6woa[], double hdnw2fts[], int *yzoe1rsp, int *fbd5yktj, int *ftnjamu2, double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int itdcb8ilk[], // Added 20100313 double tdcb8ilk[] // Added 20100313 ) { int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, dqlr5bse, pqzfxw4i, wep0oibc; int have_setup_sg = 0; /* == 1 if sg[0123] have been initialized */ int junkicrit = -1, xtov9rbf = 4, l3zpbstu[3], pn9eowxc; double jstx4uwe[4], g9fvdrbw[4], qaltf0nz = 0.1e-9, ms0qypiw[16], *fpdlcqk9btwy; int yu6izdrc = 0, pqneb2ra = 1, qhzja4ny = 2, bvsquk3z = 3, h2dpsbkr = 4; int arm0lkbg1, arm0lkbg2; double *wkumc9idosiz4fxy, *wkumc9idenaqpzk9, *wkumc9idbtwy, *wkumc9idwk0, *wkumc9idbk3ymcih; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; int imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2; double kpftdm0jmynl7uaq = tdcb8ilk[0], kpftdm0jzustx4fw = tdcb8ilk[1], kpftdm0jtol = tdcb8ilk[2], kpftdm0jeps = tdcb8ilk[3]; double svdbx3tk_tt1, svdbx3tk_tt2 = 0.0, svdbx3tk_g2dnwteb = -1.0; double *wkumc9idzvau2lct, *wkumc9idf6lsuzax, *wkumc9idfvh2rwtc, *wkumc9iddcfir2no; double *wkumc9idxwy; double *fpdlcqk9ifys6woa; wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int); wkumc9iddufozmt7 = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu); wkumc9idosiz4fxy = Calloc(*ldk * (*wy1vqfzu * *acpios9q), double); wkumc9idenaqpzk9 = Calloc(*ldk * (*acpios9q * *wy1vqfzu), double); wkumc9idbtwy = Calloc(*wy1vqfzu * *acpios9q , double); wkumc9idbk3ymcih = Calloc( *lqsahu0r , double); wkumc9idwk0 = Calloc(*acpios9q * *wy1vqfzu , double); for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { if (wbkq9zyi[yq6lorbx-1] == 0.0) { pn9eowxc = 0; } else { /// vvv pn9eowxc = 1; if (have_setup_sg == 0) { have_setup_sg = 1; // Need only be done once n5aioudkzosq7hub(xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, gkdx5jal, acpios9q); for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) { svdbx3tk_tt2 += xecbg0pf[ayfnwr1v-1]; } } wkumc9idxwy = Calloc(*acpios9q, double); wkumc9idzvau2lct = Calloc(*acpios9q, double); wkumc9idf6lsuzax = Calloc(*acpios9q, double); wkumc9idfvh2rwtc = Calloc(*acpios9q, double); wkumc9iddcfir2no = Calloc(*acpios9q, double); n5aioudkgt9iulbf(sjwyig9t, tlgduey8 + (yq6lorbx-1) * *lqsahu0r, // bhcji9gl rbne6ouj + (yq6lorbx-1) * *lqsahu0r, // po8rwsmy, gkdx5jal, lqsahu0r, acpios9q, wkumc9idxwy, // lqsahu0r === kuzxj1lo wkumc9idzvau2lct, wkumc9idf6lsuzax, wkumc9idfvh2rwtc, wkumc9iddcfir2no); svdbx3tk_tt1 = 0.0; for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) { svdbx3tk_tt1 += wkumc9idzvau2lct[ayfnwr1v-1]; } Free(wkumc9idxwy); Free(wkumc9idzvau2lct); Free(wkumc9idf6lsuzax); Free(wkumc9idfvh2rwtc); Free(wkumc9iddcfir2no); svdbx3tk_g2dnwteb = svdbx3tk_tt1 / svdbx3tk_tt2; lamvec[yq6lorbx-1] = svdbx3tk_g2dnwteb * pow(16.0, wbkq9zyi[yq6lorbx-1] * 6.0 - 2.0); } /// vvv if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu || pn9eowxc == 0) { // ggg wep0oibc = 1; l3zpbstu[0] = junkicrit; l3zpbstu[1] = pn9eowxc; l3zpbstu[2] = itdcb8ilk[0]; jstx4uwe[0] = kpftdm0jmynl7uaq; // Prior to 20100313: was waiez6nt; jstx4uwe[1] = kpftdm0jzustx4fw; // Prior to 20100313: was fp6nozvx; jstx4uwe[2] = kpftdm0jtol; // Prior to 20100313: was Toler_df; jstx4uwe[3] = kpftdm0jeps; // Introduced as an arg, 20100313 if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) { // hhh for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] /= rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r]; } have_setup_sg = 1; n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1, sjwyig9t, tlgduey8 + (yq6lorbx-1) * *lqsahu0r, rbne6ouj + (yq6lorbx-1) * *lqsahu0r, lqsahu0r, acpios9q, gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q, t8hwvalr + (yq6lorbx-1) * *lqsahu0r, ifys6woa + (yq6lorbx-1) * *lqsahu0r, // *ftnjamu2, wbkq9zyi + yq6lorbx-1, jstx4uwe, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, tt2, cvnjhg2u, l3zpbstu, &xtov9rbf, &wep0oibc, fbd5yktj); lamvec[yq6lorbx-1] = jstx4uwe[0]; if (*fbd5yktj) { Rprintf("Error in n5aioudkdnaoqj0l; inside Yee_spline\n"); Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); return; } if (*yzoe1rsp) { for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { gp1jxzuh = ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2; bpvaqm5z = ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r; ui8ysltq[gp1jxzuh] = ifys6woa[bpvaqm5z] / rbne6ouj[bpvaqm5z]; } } } else { // hhh and uuu have_setup_sg = 1; n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1, sjwyig9t, wkumc9idbk3ymcih, rbne6ouj + (yq6lorbx-1) * *lqsahu0r, lqsahu0r, acpios9q, gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q, t8hwvalr + (yq6lorbx-1) * *lqsahu0r, ifys6woa + (yq6lorbx-1) * *lqsahu0r, // 20130427 wbkq9zyi + yq6lorbx-1, jstx4uwe, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, tt2, cvnjhg2u, l3zpbstu, &xtov9rbf, &wep0oibc, fbd5yktj); lamvec[yq6lorbx-1] = jstx4uwe[0]; if (*fbd5yktj) { Rprintf("Error in Rgam_dnaoqj0l; inside Yee_spline\n"); Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); return; } } // uuu if (*fbd5yktj) { Rprintf("Error in n5aioudkdnaoqj0l: fbd5yktj = %3d.\n", *fbd5yktj); Rprintf("Called within Yee_spline.\n"); Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); return; } } // ggg } if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) { Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { hdnw2fts[yq6lorbx-1] -= 1.0; // Decrement it. } return; } for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { arm0lkbg1 = *acpios9q + 1; F77_CALL(vinterv)(gkdx5jal, &arm0lkbg1, sjwyig9t + ayfnwr1v-1, &dqlr5bse, &pqzfxw4i); if (pqzfxw4i == 1) { if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) { dqlr5bse--; } else { Rprintf("Freeing memory in Yee_spline and returning.\n"); Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); return; } } F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v-1, &dqlr5bse, ms0qypiw, g9fvdrbw, &pqneb2ra); yq6lorbx = dqlr5bse - 4 + 1; fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[0]; fpdlcqk9btwy++; } fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &pqneb2ra, &pqneb2ra, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &pqneb2ra, &qhzja4ny, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &pqneb2ra, &bvsquk3z, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &bvsquk3z, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &pqneb2ra, &h2dpsbkr, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); yq6lorbx = dqlr5bse - 4 + 2; fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[1]; fpdlcqk9btwy++; } fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &qhzja4ny, &qhzja4ny, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &qhzja4ny, &bvsquk3z, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &qhzja4ny, &h2dpsbkr, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); yq6lorbx = dqlr5bse - 4 + 3; fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[2]; fpdlcqk9btwy++; } fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &bvsquk3z, &bvsquk3z, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &bvsquk3z, &h2dpsbkr, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); yq6lorbx = dqlr5bse - 4 + 4; fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[3]; fpdlcqk9btwy++; } fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc, g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj, &h2dpsbkr, &h2dpsbkr, wy1vqfzu, ldk, kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7); } fapc0tnbtfeswo7c(wkumc9idosiz4fxy, acpios9q, wy1vqfzu, ldk, lamvec, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs); arm0lkbg1 = *acpios9q * *wy1vqfzu; arm0lkbg2 = *ldk - 1; F77_CALL(vdpbfa7)(wkumc9idosiz4fxy, ldk, &arm0lkbg1, &arm0lkbg2, aalgpft4y, wkumc9idwk0); if (*aalgpft4y) { Rprintf("Error in subroutine vdpbfa7; inside Yee_spline.\n"); Rprintf("*aalgpft4y = %3d\n", *aalgpft4y); Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); return; } arm0lkbg1 = *acpios9q * *wy1vqfzu; arm0lkbg2 = *ldk - 1; F77_CALL(vdpbsl7)(wkumc9idosiz4fxy, ldk, &arm0lkbg1, &arm0lkbg2, wkumc9idbtwy, wkumc9idwk0); fpdlcqk9btwy = wkumc9idbtwy; for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { rpyis2kc[ ayfnwr1v-1 + (yq6lorbx-1) * *acpios9q] = *fpdlcqk9btwy++; } } fapc0tnbcn8kzpab(gkdx5jal, sjwyig9t, rpyis2kc, lqsahu0r, acpios9q, wy1vqfzu, t8hwvalr); arm0lkbg1 = *acpios9q * *wy1vqfzu; arm0lkbg2 = *ldk - 1; fapc0tnbvicb2(wkumc9idenaqpzk9, wkumc9idosiz4fxy, wkumc9idwk0, &arm0lkbg2, &arm0lkbg1); fapc0tnbicpd0omv(wkumc9idenaqpzk9, sjwyig9t, gkdx5jal, ui8ysltq, ldk, lqsahu0r, acpios9q, wy1vqfzu, yzoe1rsp, rbne6ouj, ifys6woa, kvowz9ht, ftnjamu2); for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { hdnw2fts[yq6lorbx-1] = -1.0; // Initialize; subtract the linear part } fpdlcqk9ifys6woa = ifys6woa; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { hdnw2fts[yq6lorbx-1] += *fpdlcqk9ifys6woa++; } } Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9, wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih, wkumc9idtgiyxdw1, wkumc9iddufozmt7); } void fapc0tnbcn8kzpab(double gkdx5jals[], double sjwyig9t[], double rpyis2kc[], int *lqsahu0r, int *acpios9q, int *wy1vqfzu, double t8hwvalr[]) { int ayfnwr1v, yq6lorbx, yu6izdrc = 0, h2dpsbkr = 4; double *chw8lzty; for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) { chw8lzty = sjwyig9t; for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) { F77_CALL(wbvalue)(gkdx5jals, rpyis2kc, acpios9q, &h2dpsbkr, chw8lzty++, &yu6izdrc, t8hwvalr++); } rpyis2kc += *acpios9q; } } void Free_fapc0tnbvsuff9(double *wkumc9idwk1a, double *wkumc9idwk1b, double *wkumc9idwk2a, double *wkumc9idwk2b, double *wkumc9ideshvo2ic, double *wkumc9idonxjvw8u, int *wkumc9idtgiyxdw11, int *wkumc9iddufozmt71, int *wkumc9idtgiyxdw12, int *wkumc9iddufozmt72, int *iz2nbfjc) { Free(wkumc9idwk1a); Free(wkumc9idwk1b); Free(wkumc9idwk2a); Free(wkumc9idwk2b); if (! *iz2nbfjc) { Free(wkumc9ideshvo2ic); Free(wkumc9idonxjvw8u); } Free(wkumc9idtgiyxdw11); Free(wkumc9iddufozmt71); Free(wkumc9idtgiyxdw12); Free(wkumc9iddufozmt72); } void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[], double sjwyig9t[], double tlgduey8[], double rbne6ouj[], double pygsw6ko[], double pasjmo8g[], double eshvo2ic[], double ueshvo2ic[], double onxjvw8u[], int *dvhw1ulq, int *wy1vqfzu, int *kvowz9ht, int *npjlv3mr, double conmat[], int *kgwmz4ip, int *iz2nbfjc, int *wueshvo2ic, int *npjlv3mreshvo2ic, int *dim2eshvo2ic) { double *qnwamo0e, *qnwamo0e1, *qnwamo0e2; int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, *ptri; int pqneb2ra = 1; double *wkumc9idwk1a, *wkumc9idwk1b, *wkumc9idwk2a, *wkumc9idwk2b, *wkumc9ideshvo2ic, *wkumc9idonxjvw8u; int *wkumc9idtgiyxdw11, *wkumc9iddufozmt71, *wkumc9idtgiyxdw12, *wkumc9iddufozmt72; int zyojx5hw = *wy1vqfzu * *wy1vqfzu, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2, n2colb = *kgwmz4ip * *kgwmz4ip, n3colb = *kgwmz4ip * (*kgwmz4ip + 1) / 2; double hmayv1xt1 = 1.0, hmayv1xt2; hmayv1xt2 = hmayv1xt1 + 1.0; wkumc9ideshvo2ic = &hmayv1xt2; wkumc9idonxjvw8u = &hmayv1xt2; wkumc9idwk1a = Calloc(zyojx5hw , double); wkumc9idwk1b = Calloc(*wy1vqfzu , double); wkumc9idwk2a = Calloc(n2colb , double); wkumc9idwk2b = Calloc(*kgwmz4ip , double); wkumc9idtgiyxdw11 = Calloc(imk5wjxg , int); wkumc9iddufozmt71 = Calloc(imk5wjxg , int); wkumc9idtgiyxdw12 = Calloc(n3colb , int); wkumc9iddufozmt72 = Calloc(n3colb , int); if (*iz2nbfjc) { if (*npjlv3mr < *kvowz9ht || *kgwmz4ip != *wy1vqfzu) { Rprintf("Error in fapc0tnbvsuff9: "); Rprintf("must have npjlv3mr >= kvowz9ht & kgwmz4ip = M\n"); Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b, wkumc9idwk2a, wkumc9idwk2b, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idtgiyxdw11, wkumc9iddufozmt71, wkumc9idtgiyxdw12, wkumc9iddufozmt72, iz2nbfjc); *dvhw1ulq = 0; return; } } else { if (*npjlv3mreshvo2ic < n3colb || *dim2eshvo2ic < n3colb) { Rprintf("Error in fapc0tnbvsuff9 with nontrivial constraints:\n"); Rprintf("must have npjlv3mreshvo2ic and dim2eshvo2ic both >= n3colb\n"); Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b, wkumc9idwk2a, wkumc9idwk2b, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idtgiyxdw11, wkumc9iddufozmt71, wkumc9idtgiyxdw12, wkumc9iddufozmt72, iz2nbfjc); *dvhw1ulq = 0; return; } wkumc9ideshvo2ic = Calloc(*lqsahu0r * zyojx5hw , double); wkumc9idonxjvw8u = Calloc(*lqsahu0r * *wy1vqfzu , double); } fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw11, wkumc9iddufozmt71, wy1vqfzu); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw12, wkumc9iddufozmt72, kgwmz4ip); ptri = ezlgm2up; qnwamo0e = sjwyig9t; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { pygsw6ko[(*ptri++) - 1] = *qnwamo0e++; } if (*iz2nbfjc) { qnwamo0e = onxjvw8u; for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = 0; ayfnwr1v < *lqsahu0r; ayfnwr1v++) { *qnwamo0e++ = 0.0e0; } } } if (*iz2nbfjc) { qnwamo0e = eshvo2ic; for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { *qnwamo0e++ = 0.0e0; } } } for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) { wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 + (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] = wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 + (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] = rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]; } qnwamo0e1 = (*iz2nbfjc) ? eshvo2ic : wkumc9ideshvo2ic; qnwamo0e2 = (*iz2nbfjc) ? onxjvw8u : wkumc9idonxjvw8u; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { qnwamo0e2[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] += wkumc9idwk1a[yq6lorbx -1 + (gp1jxzuh-1) * *wy1vqfzu] * tlgduey8[ayfnwr1v -1 + (gp1jxzuh-1) * *ftnjamu2]; } } for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) { qnwamo0e1[ezlgm2up[ayfnwr1v-1]-1 + (yq6lorbx-1) * *lqsahu0r] += rbne6ouj[ayfnwr1v -1 + (yq6lorbx-1) * *ftnjamu2]; } } *dvhw1ulq = 1; if (*iz2nbfjc) { for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) { wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 + (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] = wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 + (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] = eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r]; } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { wkumc9idwk1b[yq6lorbx-1] = onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r]; } fvlmz9iyjdbomp0g(wkumc9idwk1a, wkumc9idwk1b, wy1vqfzu, dvhw1ulq, &pqneb2ra); if (*dvhw1ulq != 1) { Rprintf("*dvhw1ulq != 1 after fvlmz9iyjdbomp0g in vsuff9.\n"); Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b, wkumc9idwk2a, wkumc9idwk2b, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idtgiyxdw11, wkumc9iddufozmt71, wkumc9idtgiyxdw12, wkumc9iddufozmt72, iz2nbfjc); return; } if (*wueshvo2ic) { for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) { ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] = wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 + (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu]; } } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk1b[yq6lorbx-1]; } } } else { qnwamo0e = wkumc9idwk1a; for (yq6lorbx = 1; yq6lorbx <= zyojx5hw; yq6lorbx++) { *qnwamo0e++ = 0.0e0; } for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) { wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 + (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] = wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 + (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] = wkumc9ideshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r]; } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { wkumc9idwk1b[yq6lorbx-1] = wkumc9idonxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r]; } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { for (gp1jxzuh = yq6lorbx; gp1jxzuh <= *kgwmz4ip; gp1jxzuh++) { wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] = 0.0e0; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { wkumc9idwk2a[yq6lorbx-1 + (gp1jxzuh-1) * *kgwmz4ip] += conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] * wkumc9idwk1a[urohxe6t-1 + (bpvaqm5z-1) * *wy1vqfzu] * conmat[bpvaqm5z-1 + (gp1jxzuh-1) * *wy1vqfzu]; } } } } for (yq6lorbx = 1; yq6lorbx <= *dim2eshvo2ic; yq6lorbx++) { eshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1 + (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip]; } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { wkumc9idwk2b[yq6lorbx-1] = 0.0e0; for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) { wkumc9idwk2b[yq6lorbx-1] += conmat[urohxe6t-1 + (yq6lorbx-1) * *wy1vqfzu] * wkumc9idwk1b[urohxe6t-1]; } } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { onxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1]; } fvlmz9iyjdbomp0g(wkumc9idwk2a, wkumc9idwk2b, kgwmz4ip, dvhw1ulq, &pqneb2ra); if (*dvhw1ulq != 1) { Rprintf("*dvhw1ulq!=1 in vchol-vsuff9. Something gone wrong\n"); Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b, wkumc9idwk2a, wkumc9idwk2b, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idtgiyxdw11, wkumc9iddufozmt71, wkumc9idtgiyxdw12, wkumc9iddufozmt72, iz2nbfjc); return; } if (*wueshvo2ic) { for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) { ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] = wkumc9idwk2a[wkumc9idtgiyxdw12[yq6lorbx-1]-1 + (wkumc9iddufozmt72[yq6lorbx-1]-1) * *kgwmz4ip]; } } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk2b[yq6lorbx-1]; } } } Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b, wkumc9idwk2a, wkumc9idwk2b, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idtgiyxdw11, wkumc9iddufozmt71, wkumc9idtgiyxdw12, wkumc9iddufozmt72, iz2nbfjc); } void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jals[], double grmuyvx9[], int *ldk, int *lqsahu0r, int *acpios9q, int *wy1vqfzu, int *jzwsy6tp, double rbne6ouj[], double ifys6woa[], int *kvowz9ht, int *ftnjamu2) { int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, dqlr5bse, pqzfxw4i; double ms0qypiw[16], g9fvdrbw[4], qaltf0nz = 0.10e-9; int arm0lkbg1, arm0lkbg4, *ptri1, *ptri2; double tmp_var4, tmp_var5, *qnwamo0e; double *wkumc9idwrk, *wkumc9idbmb; int *wkumc9idtgiyxdw1_, *wkumc9iddufozmt7_, imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2, zyojx5hw = *wy1vqfzu * *wy1vqfzu; wkumc9idtgiyxdw1_ = Calloc(imk5wjxg, int); wkumc9iddufozmt7_ = Calloc(imk5wjxg, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1_, wkumc9iddufozmt7_, wy1vqfzu); ptri1 = wkumc9idtgiyxdw1_; ptri2 = wkumc9iddufozmt7_; for (ayfnwr1v = 0; ayfnwr1v < imk5wjxg; ayfnwr1v++) { (*ptri1++)--; (*ptri2++)--; } wkumc9idwrk = Calloc(zyojx5hw, double); wkumc9idbmb = Calloc(zyojx5hw, double); if (*jzwsy6tp) { qnwamo0e = grmuyvx9; for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *qnwamo0e++ = 0.0e0; } } } for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { qnwamo0e = wkumc9idbmb; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { *qnwamo0e++ = 0.0e0; } } arm0lkbg1 = *acpios9q + 1; F77_CALL(vinterv)(gkdx5jals, &arm0lkbg1, sjwyig9t + ayfnwr1v-1, &dqlr5bse, &pqzfxw4i); if (pqzfxw4i == 1) { if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jals[dqlr5bse-1] + qaltf0nz)) { dqlr5bse--; } else { Rprintf("pqzfxw4i!=1 after vinterv called in fapc0tnbicpd0omv\n"); Free(wkumc9idtgiyxdw1_); Free(wkumc9iddufozmt7_); Free(wkumc9idwrk); return; } } arm0lkbg1 = 1; arm0lkbg4 = 4; F77_CALL(vbsplvd)(gkdx5jals, &arm0lkbg4, sjwyig9t + ayfnwr1v-1, &dqlr5bse, ms0qypiw, g9fvdrbw, &arm0lkbg1); yq6lorbx = dqlr5bse - 4 + 1; for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx + 3); urohxe6t++) { fapc0tnbvsel(&urohxe6t, &urohxe6t, wy1vqfzu, ldk, enaqpzk9, wkumc9idwrk); tmp_var4 = pow(g9fvdrbw[urohxe6t-yq6lorbx], (double) 2.0); fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var4, wkumc9idwrk, wkumc9idbmb); } for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx+3); urohxe6t++) { for (bpvaqm5z = urohxe6t+1; bpvaqm5z <= (yq6lorbx+3); bpvaqm5z++) { fapc0tnbvsel(&urohxe6t, &bpvaqm5z, wy1vqfzu, ldk, enaqpzk9, wkumc9idwrk); tmp_var5 = 2.0 * g9fvdrbw[urohxe6t-yq6lorbx] * g9fvdrbw[bpvaqm5z-yq6lorbx]; fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var5, wkumc9idwrk, wkumc9idbmb); } } if (*jzwsy6tp) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { grmuyvx9[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] = wkumc9idbmb[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu]; } } fapc0tnbovjnsmt2(wkumc9idbmb, rbne6ouj, ifys6woa, wy1vqfzu, lqsahu0r, kvowz9ht, &ayfnwr1v, wkumc9idtgiyxdw1_, wkumc9iddufozmt7_); } Free(wkumc9idtgiyxdw1_); Free(wkumc9iddufozmt7_); Free(wkumc9idwrk); Free(wkumc9idbmb); } void fapc0tnbo0xlszqr(int *wy1vqfzu, double *g9fvdrbw, double *quc6khaf, double *bmb) { int yq6lorbx, gp1jxzuh; double *qnwamo0e; qnwamo0e = quc6khaf; for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) { for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) { *quc6khaf *= *g9fvdrbw; quc6khaf++; } } quc6khaf = qnwamo0e; for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) { for (gp1jxzuh = 0; gp1jxzuh < *wy1vqfzu; gp1jxzuh++) { *bmb += *quc6khaf++; bmb++; } } } void fapc0tnbvsel(int *nurohxe6t, int *nbpvaqm5z, int *wy1vqfzu, int *ldk, double minv[], double quc6khaf[]) { int ayfnwr1v, yq6lorbx, biuvowq2, nbj8tdsk; double *qnwamo0e; qnwamo0e = quc6khaf; for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *qnwamo0e++ = 0.0; } } if (*nurohxe6t != *nbpvaqm5z) { for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx; quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] = minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk]; } } } else { for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { biuvowq2 = (*nurohxe6t - 1) * *wy1vqfzu + ayfnwr1v; for (yq6lorbx = ayfnwr1v; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { nbj8tdsk = (*nbpvaqm5z - 1) * *wy1vqfzu + yq6lorbx; quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu] = minv[*ldk - (nbj8tdsk-biuvowq2)-1 + (nbj8tdsk-1) * *ldk]; } } for (ayfnwr1v = 1; ayfnwr1v <= *wy1vqfzu; ayfnwr1v++) { for (yq6lorbx = ayfnwr1v+1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { quc6khaf[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] = quc6khaf[ayfnwr1v-1 + (yq6lorbx-1) * *wy1vqfzu]; } } } } void fapc0tnbovjnsmt2(double bmb[], double rbne6ouj[], double ifys6woa[], int *wy1vqfzu, int *lqsahu0r, int *kvowz9ht, int *iii, int tgiyxdw1_[], int dufozmt7_[]) { int yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z; double q6zdcwxk; int zyojx5hw = *wy1vqfzu * *wy1vqfzu; double *wkumc9idwrk; wkumc9idwrk = Calloc(zyojx5hw, double); for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { for (urohxe6t = 1; urohxe6t <= *kvowz9ht; urohxe6t++) { yq6lorbx = tgiyxdw1_[urohxe6t-1] + (dufozmt7_[urohxe6t-1] ) * *wy1vqfzu; gp1jxzuh = dufozmt7_[urohxe6t-1] + (tgiyxdw1_[urohxe6t-1] ) * *wy1vqfzu; wkumc9idwrk[yq6lorbx] = wkumc9idwrk[gp1jxzuh] = rbne6ouj[*iii-1 + (urohxe6t-1) * *lqsahu0r]; } q6zdcwxk = 0.0e0; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { q6zdcwxk += bmb[bpvaqm5z-1 + (yq6lorbx-1) * *wy1vqfzu] * wkumc9idwrk[yq6lorbx-1 + (bpvaqm5z-1) * *wy1vqfzu]; } ifys6woa[*iii-1 + (bpvaqm5z-1) * *lqsahu0r] = q6zdcwxk; } Free(wkumc9idwrk); } void fapc0tnbvicb2(double enaqpzk9[], double wpuarq2m[], double Dvector[], int *wy1vqfzu, int *f8yswcat) { int ayfnwr1v, gp1jxzuh, urohxe6t, uplim, sedf7mxb, hofjnx2e, kij0gwer; int Mplus1 = *wy1vqfzu + 1; int Mp1Mp1 = Mplus1 * Mplus1; double *wkumc9iduu; wkumc9iduu = Calloc(Mp1Mp1, double); enaqpzk9[*wy1vqfzu + (*f8yswcat-1) * Mplus1] = 1.0e0 / Dvector[*f8yswcat-1]; hofjnx2e = *wy1vqfzu + 1; sedf7mxb = *f8yswcat + 1 - hofjnx2e; for (kij0gwer = sedf7mxb; kij0gwer <= *f8yswcat; kij0gwer++) { for (ayfnwr1v = 1; ayfnwr1v <= hofjnx2e; ayfnwr1v++) { wkumc9iduu[ayfnwr1v-1 + (kij0gwer-sedf7mxb) * Mplus1] = wpuarq2m[ayfnwr1v-1 + (kij0gwer-1 ) * Mplus1]; } } for (ayfnwr1v = *f8yswcat-1; ayfnwr1v >= 1; ayfnwr1v--) { uplim = *wy1vqfzu < (*f8yswcat - ayfnwr1v) ? *wy1vqfzu : *f8yswcat - ayfnwr1v; for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) { enaqpzk9[-urohxe6t+*wy1vqfzu + (ayfnwr1v+urohxe6t-1) * Mplus1] = 0.0e0; for (gp1jxzuh = 1; gp1jxzuh <= urohxe6t; gp1jxzuh++) { enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1 ) * Mplus1] -= wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] * enaqpzk9[gp1jxzuh-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1 ) * Mplus1]; } for ( ; gp1jxzuh <= uplim; gp1jxzuh++) { enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t-1 ) * Mplus1] -= wkumc9iduu[-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh - sedf7mxb) * Mplus1] * enaqpzk9[urohxe6t-gp1jxzuh + *wy1vqfzu + (ayfnwr1v+gp1jxzuh-1 ) * Mplus1]; } } enaqpzk9[*wy1vqfzu + (ayfnwr1v-1) * Mplus1] = 1.0e0 / Dvector[ayfnwr1v-1]; for (urohxe6t = 1; urohxe6t <= uplim; urohxe6t++) { enaqpzk9[ *wy1vqfzu + (ayfnwr1v - 1 ) * Mplus1] -= wkumc9iduu[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - sedf7mxb) * Mplus1] * enaqpzk9[-urohxe6t + *wy1vqfzu + (ayfnwr1v+urohxe6t - 1 ) * Mplus1]; } if (ayfnwr1v == sedf7mxb) { if (--sedf7mxb < 1) { sedf7mxb = 1; } else { for (kij0gwer = hofjnx2e - 1; kij0gwer >= 1; kij0gwer--) { for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) { wkumc9iduu[gp1jxzuh-1 + kij0gwer * Mplus1] = wkumc9iduu[gp1jxzuh-1 + (kij0gwer-1) * Mplus1]; } } for (gp1jxzuh = 1; gp1jxzuh <= hofjnx2e; gp1jxzuh++) { wkumc9iduu[gp1jxzuh-1] = wpuarq2m[gp1jxzuh-1 + (sedf7mxb-1) * Mplus1]; } } } } Free(wkumc9iduu); } void Free_fapc0tnbewg7qruh(double *wkumc9idWrk1, int *wkumc9idges1xpkr, double *wkumc9idbeta, double *wkumc9idfasrkub3, double *wkumc9idsout, double *wkumc9idr0oydcxb, double *wkumc9idub4xioar, double *wkumc9ideffect, double *wkumc9idueshvo2ic, double *wkumc9ids0, double *wkumc9idpygsw6ko, double *wkumc9idpasjmo8g, double *wkumc9ideshvo2ic, double *wkumc9idonxjvw8u, double *wkumc9idwk4) { Free(wkumc9idWrk1); Free(wkumc9idges1xpkr); Free(wkumc9idbeta); Free(wkumc9idfasrkub3); Free(wkumc9idsout); Free(wkumc9idr0oydcxb); Free(wkumc9idub4xioar); Free(wkumc9ideffect); Free(wkumc9idueshvo2ic); Free(wkumc9ids0); Free(wkumc9idpygsw6ko); Free(wkumc9idpasjmo8g); Free(wkumc9ideshvo2ic); Free(wkumc9idonxjvw8u); Free(wkumc9idwk4); } void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[], int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int *lqsahu0r, double wbkq9zyi[], double lamvec[], double hdnw2fts[], double kispwgx3[], double ui8ysltq[], int *kvowz9ht, int *fbd5yktj, int *ldk, int *aalgpft4y, int *yzoe1rsp, double rpyis2kc[], double gkdx5jals[], double ifys6woa[], double conmat[], double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[], double *tt2, int *cvnjhg2u, int *acpios9q, int *iz2nbfjc, int *kgwmz4ip, int *npjlv3mr, int itdcb8ilk[], double tdcb8ilk[]) { int ayfnwr1v, yq6lorbx, gp1jxzuh, qemj9asg, dvhw1ulq, infoqr_svdbx3tk, rutyk8mg = *lqsahu0r * *kgwmz4ip; int pqneb2ra = 1, ybnsqgo9 = 101; int xjc4ywlh = 2 * *kgwmz4ip, kgwmz4ip2 = 2 * *kgwmz4ip; int npjlv3mreshvo2ic = (*iz2nbfjc == 1) ? *npjlv3mr : *kgwmz4ip * (*kgwmz4ip + 1) / 2, dim2eshvo2ic = (*iz2nbfjc == 1) ? *kvowz9ht : *kgwmz4ip * (*kgwmz4ip + 1) / 2; double xmin, xrange, *fpdlcqk9ui8ysltq, *fpdlcqk9hdnw2fts, *fpdlcqk9ub4xioar, *fpdlcqk9ifys6woa, *fpdlcqk9pygsw6ko, dtad5vhsu, do3jyipdf, dpq0hfucn, pvofyg8z = 1.0e-7; int *wkumc9idges1xpkr, maxrutyk8mgxjc4ywlh; double *wkumc9idWrk1, *wkumc9idwk4; double *wkumc9idbeta, *wkumc9idfasrkub3, *wkumc9idsout, *wkumc9idr0oydcxb, *wkumc9idub4xioar, *wkumc9ideffect, *wkumc9idueshvo2ic, *wkumc9ids0; double *wkumc9idpygsw6ko, *wkumc9idpasjmo8g, *wkumc9ideshvo2ic, *wkumc9idonxjvw8u; maxrutyk8mgxjc4ywlh = (rutyk8mg > xjc4ywlh) ? rutyk8mg : xjc4ywlh; wkumc9idWrk1 = Calloc(maxrutyk8mgxjc4ywlh , double); wkumc9idwk4 = Calloc(rutyk8mg * xjc4ywlh , double); wkumc9idges1xpkr = Calloc(kgwmz4ip2 , int); wkumc9idbeta = Calloc(kgwmz4ip2 , double); wkumc9idfasrkub3 = Calloc(kgwmz4ip2 , double); wkumc9idsout = Calloc(*lqsahu0r * *kgwmz4ip , double); wkumc9idr0oydcxb = Calloc(*kgwmz4ip * *lqsahu0r , double); wkumc9idub4xioar = Calloc(*kgwmz4ip * *lqsahu0r , double); wkumc9ideffect = Calloc(*lqsahu0r * *kgwmz4ip , double); wkumc9idueshvo2ic = Calloc(npjlv3mreshvo2ic * *lqsahu0r , double); wkumc9ids0 = Calloc(kgwmz4ip2 * kgwmz4ip2 * 2 , double); wkumc9idpygsw6ko = Calloc(*lqsahu0r , double); wkumc9idpasjmo8g = Calloc(*lqsahu0r * *kgwmz4ip , double); wkumc9idonxjvw8u = Calloc(*lqsahu0r * *kgwmz4ip , double); wkumc9ideshvo2ic = Calloc(*lqsahu0r * dim2eshvo2ic , double); vsuff9(ftnjamu2, lqsahu0r, ezlgm2up, ci1oyxas, tlgduey8, rbne6ouj, wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic, wkumc9idueshvo2ic, wkumc9idonxjvw8u, &dvhw1ulq, wy1vqfzu, kvowz9ht, npjlv3mr, conmat, kgwmz4ip, iz2nbfjc, &pqneb2ra, &npjlv3mreshvo2ic, &dim2eshvo2ic); if (dvhw1ulq != 1) { Rprintf("Error in fapc0tnbewg7qruh after calling vsuff9.\n"); Free_fapc0tnbewg7qruh(wkumc9idWrk1, wkumc9idges1xpkr, wkumc9idbeta, wkumc9idfasrkub3, wkumc9idsout, wkumc9idr0oydcxb, wkumc9idub4xioar, wkumc9ideffect, wkumc9idueshvo2ic, wkumc9ids0, wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idwk4); return; } xmin = wkumc9idpygsw6ko[0]; xrange = wkumc9idpygsw6ko[*lqsahu0r-1] - wkumc9idpygsw6ko[0]; for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { wkumc9idpygsw6ko[ayfnwr1v-1] = (wkumc9idpygsw6ko[ayfnwr1v-1] - xmin) / xrange; } *ldk = 4 * *kgwmz4ip; *ldk = 3 * *kgwmz4ip + 1; *fbd5yktj = 0; Yee_spline(wkumc9idpygsw6ko, wkumc9idonxjvw8u, wkumc9ideshvo2ic, gkdx5jals, lqsahu0r, acpios9q, ldk, kgwmz4ip, &dim2eshvo2ic, wbkq9zyi, lamvec, aalgpft4y, wkumc9idsout, rpyis2kc, ui8ysltq, ifys6woa, hdnw2fts, yzoe1rsp, fbd5yktj, ftnjamu2, xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, tt2, cvnjhg2u, itdcb8ilk, tdcb8ilk); for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { } if (1) { // Do not execute this code block fpdlcqk9hdnw2fts = hdnw2fts; fpdlcqk9ifys6woa = ifys6woa; for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { *fpdlcqk9hdnw2fts = 0.0e0; *fpdlcqk9hdnw2fts = -1.0e0; for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { *fpdlcqk9hdnw2fts += *fpdlcqk9ifys6woa++; } fpdlcqk9hdnw2fts++; } } if (*kgwmz4ip >= 1) { fapc0tnbx6kanjdh(wkumc9idpygsw6ko, wkumc9idwk4, lqsahu0r, kgwmz4ip); rutyk8mg = *lqsahu0r * *kgwmz4ip; fvlmz9iyC_mux17(wkumc9idueshvo2ic, wkumc9idwk4, kgwmz4ip, &xjc4ywlh, lqsahu0r, &npjlv3mreshvo2ic, &rutyk8mg); for (gp1jxzuh = 1; gp1jxzuh <= xjc4ywlh; gp1jxzuh++) { wkumc9idges1xpkr[gp1jxzuh-1] = gp1jxzuh; } F77_CALL(vqrdca)(wkumc9idwk4, &rutyk8mg, &rutyk8mg, &xjc4ywlh, wkumc9idfasrkub3, wkumc9idges1xpkr, wkumc9idWrk1, &qemj9asg, &pvofyg8z); fvlmz9iyC_mux22(wkumc9idueshvo2ic, wkumc9idsout, wkumc9idr0oydcxb, &npjlv3mreshvo2ic, lqsahu0r, kgwmz4ip); F77_CALL(vdqrsl)(wkumc9idwk4, &rutyk8mg, &rutyk8mg, &qemj9asg, wkumc9idfasrkub3, wkumc9idr0oydcxb, wkumc9idWrk1, wkumc9ideffect, wkumc9idbeta, wkumc9idWrk1, wkumc9idub4xioar, &ybnsqgo9, &infoqr_svdbx3tk); fvlmz9iyC_vbks(wkumc9idueshvo2ic, wkumc9idub4xioar, kgwmz4ip, lqsahu0r, &npjlv3mreshvo2ic); if (*yzoe1rsp) { fvlmz9iyC_lkhnw9yq(wkumc9idwk4, wkumc9ids0, &rutyk8mg, &xjc4ywlh, &dvhw1ulq); if (dvhw1ulq != 1) { Rprintf("Error in fapc0tnbewg7qruh calling fvlmz9iyC_lkhnw9yq.\n"); Free_fapc0tnbewg7qruh(wkumc9idWrk1, wkumc9idges1xpkr, wkumc9idbeta, wkumc9idfasrkub3, wkumc9idsout, wkumc9idr0oydcxb, wkumc9idub4xioar, wkumc9ideffect, wkumc9idueshvo2ic, wkumc9ids0, wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idwk4); return; } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { dtad5vhsu = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 ) * kgwmz4ip2]; do3jyipdf = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2]; dpq0hfucn = wkumc9ids0[yq6lorbx-1 + *kgwmz4ip + (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2]; fpdlcqk9ui8ysltq = ui8ysltq + (yq6lorbx-1) * *ftnjamu2; fpdlcqk9pygsw6ko = wkumc9idpygsw6ko; for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { *fpdlcqk9ui8ysltq -= dtad5vhsu + *fpdlcqk9pygsw6ko * (2.0 * do3jyipdf + *fpdlcqk9pygsw6ko * dpq0hfucn); fpdlcqk9ui8ysltq++; fpdlcqk9pygsw6ko++; } } } } else { fapc0tnbdsrt0gem(lqsahu0r, wkumc9idpygsw6ko, wkumc9ideshvo2ic, wkumc9idsout, wkumc9idub4xioar, ui8ysltq, yzoe1rsp); } fpdlcqk9ub4xioar = wkumc9idub4xioar; for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { wkumc9idsout[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] -= *fpdlcqk9ub4xioar++; } } for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) { fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r, */ ezlgm2up, wkumc9idsout + (yq6lorbx-1) * *lqsahu0r, kispwgx3 + (yq6lorbx-1) * *ftnjamu2); } Free_fapc0tnbewg7qruh(wkumc9idWrk1, wkumc9idges1xpkr, wkumc9idbeta, wkumc9idfasrkub3, wkumc9idsout, wkumc9idr0oydcxb, wkumc9idub4xioar, wkumc9ideffect, wkumc9idueshvo2ic, wkumc9ids0, wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic, wkumc9idonxjvw8u, wkumc9idwk4); } void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[], double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double m0ibglfx[], double zshtfg8c[], double ui8ysltq[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], double wpuarq2m[], double hjm2ktyr[], int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]) { double *ghdetj8v, *zpcqv3uj; int nhja0izq, rutyk8mg, xjc4ywlh, lyzoe1rsp, ueb8hndv, gtrlbz3e, algpft4y = 0, qemj9asg, npjlv3mr, kvowz9ht, ldk, fbd5yktj = 0; int *ftnjamu2, *wy1vqfzu; int itdcb8ilk[1]; double tdcb8ilk[4]; itdcb8ilk[0] = psdvgce3[15]; /* contr.sp$c5aesxku in s.vam() */ tdcb8ilk[0] = fjcasv7g[2]; /* contr.sp$low in s.vam() */ tdcb8ilk[1] = fjcasv7g[3]; /* contr.sp$high in s.vam() */ tdcb8ilk[2] = fjcasv7g[4]; /* contr.sp$tol in s.vam() */ tdcb8ilk[3] = fjcasv7g[5]; /* contr.sp$eps in s.vam() */ wy1vqfzu = psdvgce3 + 7; ftnjamu2 = psdvgce3; nhja0izq = psdvgce3[2]; lyzoe1rsp = psdvgce3[3]; gtrlbz3e = psdvgce3[5]; qemj9asg = psdvgce3[6]; rutyk8mg = psdvgce3[8]; xjc4ywlh = psdvgce3[9]; kvowz9ht = psdvgce3[11]; npjlv3mr = psdvgce3[12]; ldk = psdvgce3[14]; zpcqv3uj = fjcasv7g + 0; /* bf.qaltf0nz */ ghdetj8v = fjcasv7g + 1; /* ghdetj8v */ fapc0tnbvbfa1(ftnjamu2, wy1vqfzu, ezlgm2up, lqsahu0r, which, he7mqnvy, tlgduey8, rbne6ouj, wbkq9zyi, lamvec, hdnw2fts, kispwgx3, m0ibglfx, zshtfg8c, ui8ysltq, zpcqv3uj, vc6hatuj, fasrkub3, &qemj9asg, ges1xpkr, wpuarq2m, hjm2ktyr, ulm3dvzg, hnpt1zym, iz2nbfjc, ifys6woa, rpyis2kc, gkdx5jals, ghdetj8v, nbzjkpi3, lindex, acpios9q, jwbkl9fp, &nhja0izq, &lyzoe1rsp, &ueb8hndv, >rlbz3e, &rutyk8mg, &xjc4ywlh, &kvowz9ht, &npjlv3mr, &fbd5yktj, &ldk, &algpft4y, itdcb8ilk, tdcb8ilk); psdvgce3[6] = qemj9asg; psdvgce3[4] = ueb8hndv; psdvgce3[13] = fbd5yktj; psdvgce3[16] = algpft4y; } void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[], int which[], double he7mqnvy[], double tlgduey8[], double rbne6ouj[], double wbkq9zyi[], double lamvec[], double hdnw2fts[], double kispwgx3[], double m0ibglfx[], double zshtfg8c[], double ui8ysltq[], double *zpcqv3uj, double vc6hatuj[], double fasrkub3[], int *qemj9asg, int ges1xpkr[], double wpuarq2m[], double hjm2ktyr[], int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], double *ghdetj8v, int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[], int *nhja0izq, int *yzoe1rsp, int *ueb8hndv, int *gtrlbz3e, int *rutyk8mg, int *xjc4ywlh, int *kvowz9ht, int *npjlv3mr, int *fbd5yktj, int *ldk, int *algpft4y, int itdcb8ilk[], double tdcb8ilk[]) { int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, wg1xifdy, ybnsqgo9 = 101, maxrutyk8mgxjc4ywlh, infoqr_svdbx3tk, sumzv2xfhei = 0; double qtce8hzo, deltaf, z4vrscot, pvofyg8z = 1.0e-7, g2dnwteb = 1.0, *fpdlcqk9m0ibglfx, *fpdlcqk9ub4xioar, *fpdlcqk9tlgduey8, *fpdlcqk9ghz9vuba, *fpdlcqk9hjm2ktyr, *fpdlcqk9kispwgx3, *qnwamo0e1; double *wkumc9idTwk, *wkumc9idwkbzmd6ftv, *wkumc9idwk9; double *wkumc9idghz9vuba, *wkumc9idoldmat, *wkumc9idub4xioar, *wkumc9idwk2; double *wkumc9idall_xecbg0pf, *wkumc9idall_z4grbpiq, *wkumc9idall_d7glzhbj, *wkumc9idall_v2eydbxs, *wkumc9idall_tt2; int cvnjhg2u; maxrutyk8mgxjc4ywlh = (*ftnjamu2 * *wy1vqfzu > *xjc4ywlh) ? (*ftnjamu2 * *wy1vqfzu) : *xjc4ywlh; wkumc9idTwk = Calloc(maxrutyk8mgxjc4ywlh , double); wkumc9idwkbzmd6ftv = Calloc(*xjc4ywlh * *rutyk8mg, double); wkumc9idwk9 = Calloc(*xjc4ywlh , double); wkumc9idghz9vuba = Calloc(*ftnjamu2 * *wy1vqfzu, double); wkumc9idoldmat = Calloc(*ftnjamu2 * *wy1vqfzu, double); wkumc9idub4xioar = Calloc(*wy1vqfzu * *ftnjamu2, double); wkumc9idwk2 = Calloc(*ftnjamu2 * *wy1vqfzu, double); if ( *nhja0izq == 0 || *nhja0izq == 1 ) { *gtrlbz3e = 1; } if (*qemj9asg == 0) { fvlmz9iyC_mux17(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, ftnjamu2, npjlv3mr, rutyk8mg); for (gp1jxzuh = 1; gp1jxzuh <= *xjc4ywlh; gp1jxzuh++) { ges1xpkr[gp1jxzuh-1] = gp1jxzuh; } F77_CALL(vqrdca)(vc6hatuj, rutyk8mg, rutyk8mg, xjc4ywlh, fasrkub3, ges1xpkr, wkumc9idTwk, qemj9asg, &pvofyg8z); } fpdlcqk9m0ibglfx = m0ibglfx; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 0; yq6lorbx < *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx++ = 0.0e0; } } for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) { if (iz2nbfjc[gp1jxzuh-1] == 1) { fpdlcqk9m0ibglfx = m0ibglfx; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { bpvaqm5z = hnpt1zym[gp1jxzuh-1] - 1; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx += kispwgx3[ayfnwr1v-1 + bpvaqm5z * *ftnjamu2]; fpdlcqk9m0ibglfx++; bpvaqm5z++; } } } else { for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { urohxe6t = hnpt1zym[gp1jxzuh-1] + wg1xifdy - 2; fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9kispwgx3 = kispwgx3 + urohxe6t * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fpdlcqk9hjm2ktyr = hjm2ktyr + urohxe6t * *wy1vqfzu; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx += *fpdlcqk9hjm2ktyr++ * *fpdlcqk9kispwgx3; fpdlcqk9m0ibglfx++; } fpdlcqk9kispwgx3++; } } } } sumzv2xfhei = jwbkl9fp[(1 + *nhja0izq) - 1]; wkumc9idall_xecbg0pf = Calloc(sumzv2xfhei, double); wkumc9idall_z4grbpiq = Calloc(sumzv2xfhei, double); wkumc9idall_d7glzhbj = Calloc(sumzv2xfhei, double); wkumc9idall_v2eydbxs = Calloc(sumzv2xfhei, double); wkumc9idall_tt2 = Calloc(*nhja0izq , double); *ueb8hndv = 0; while ((g2dnwteb > *zpcqv3uj ) && (*ueb8hndv < *gtrlbz3e)) { (*ueb8hndv)++; deltaf = 0.0e0; fpdlcqk9ghz9vuba = wkumc9idghz9vuba; fpdlcqk9tlgduey8 = tlgduey8; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { fpdlcqk9m0ibglfx = m0ibglfx + yq6lorbx-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9ghz9vuba++ = *fpdlcqk9tlgduey8++ - *fpdlcqk9m0ibglfx; fpdlcqk9m0ibglfx += *wy1vqfzu; } } fvlmz9iyC_mux22(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu); F77_CALL(vdqrsl)(vc6hatuj, rutyk8mg, rutyk8mg, qemj9asg, fasrkub3, wkumc9idTwk, wkumc9idwk2, wkumc9idwk2, zshtfg8c, wkumc9idwk2, wkumc9idub4xioar, &ybnsqgo9, &infoqr_svdbx3tk); *ghdetj8v = 0.0e0; qnwamo0e1 = wkumc9idTwk; fpdlcqk9ub4xioar = wkumc9idub4xioar; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { qtce8hzo = *qnwamo0e1++ - *fpdlcqk9ub4xioar++; *ghdetj8v += pow(qtce8hzo, (double) 2.0); } } fvlmz9iyC_vbks(wpuarq2m, wkumc9idub4xioar, wy1vqfzu, ftnjamu2, npjlv3mr); for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { urohxe6t = hnpt1zym[gp1jxzuh-1] + yq6lorbx -2; if (iz2nbfjc[gp1jxzuh-1] == 1) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] = kispwgx3[ayfnwr1v-1 + urohxe6t * *ftnjamu2]; wkumc9idghz9vuba[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] = tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] - wkumc9idub4xioar[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] - m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] + wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]; } } else { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] = 0.0e0; for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { bpvaqm5z = hnpt1zym[gp1jxzuh-1] + wg1xifdy -2; wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] += hjm2ktyr[yq6lorbx-1 + bpvaqm5z * *wy1vqfzu] * kispwgx3[ayfnwr1v-1 + bpvaqm5z * *ftnjamu2]; } wkumc9idghz9vuba[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] = tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] - wkumc9idub4xioar[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] - m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] + wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]; } } } cvnjhg2u = (*ueb8hndv == 1) ? 0 : 1; fapc0tnbewg7qruh(he7mqnvy+(which[gp1jxzuh-1]-1) * *ftnjamu2, wkumc9idghz9vuba, rbne6ouj, ftnjamu2, wy1vqfzu, ezlgm2up + (gp1jxzuh-1) * *ftnjamu2, lqsahu0r + gp1jxzuh-1, wbkq9zyi + hnpt1zym[gp1jxzuh-1]-1, lamvec + hnpt1zym[gp1jxzuh-1]-1, hdnw2fts + hnpt1zym[gp1jxzuh-1]-1, kispwgx3 + (hnpt1zym[gp1jxzuh-1]-1) * *ftnjamu2, ui8ysltq + (hnpt1zym[gp1jxzuh-1]-1) * *ftnjamu2, kvowz9ht, fbd5yktj, ldk, algpft4y, yzoe1rsp, rpyis2kc + nbzjkpi3[gp1jxzuh-1]-1, gkdx5jals + jwbkl9fp[gp1jxzuh-1]-1, ifys6woa + lindex[gp1jxzuh-1]-1, hjm2ktyr + (hnpt1zym[gp1jxzuh-1]-1) * *wy1vqfzu, wkumc9idall_xecbg0pf + jwbkl9fp[gp1jxzuh-1]-1, wkumc9idall_z4grbpiq + jwbkl9fp[gp1jxzuh-1]-1, wkumc9idall_d7glzhbj + jwbkl9fp[gp1jxzuh-1]-1, wkumc9idall_v2eydbxs + jwbkl9fp[gp1jxzuh-1]-1, wkumc9idall_tt2 + gp1jxzuh-1 , // If 0 then compute wkumc9idall_sg[0:3] else already done: &cvnjhg2u, acpios9q + gp1jxzuh-1, iz2nbfjc + gp1jxzuh-1, ulm3dvzg + gp1jxzuh-1, npjlv3mr, itdcb8ilk, tdcb8ilk); for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { if (iz2nbfjc[gp1jxzuh-1] == 1) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] += kispwgx3[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+yq6lorbx-2) * *ftnjamu2]; } } else { for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { m0ibglfx[yq6lorbx-1+ (ayfnwr1v-1) * *wy1vqfzu] += hjm2ktyr[yq6lorbx-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *wy1vqfzu] * kispwgx3[ayfnwr1v-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2]; } } } for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] -= wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]; } } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { if (iz2nbfjc[gp1jxzuh-1] == 1) { deltaf += fapc0tnbrd9beyfk(ftnjamu2, wkumc9idoldmat + (yq6lorbx-1) * *ftnjamu2, rbne6ouj + (yq6lorbx-1) * *ftnjamu2, kispwgx3 + (hnpt1zym[gp1jxzuh-1]+yq6lorbx-2) * *ftnjamu2); } else { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { wkumc9idTwk[ayfnwr1v-1] = 0.0e0; for (wg1xifdy=1; wg1xifdy<=ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { wkumc9idTwk[ayfnwr1v-1] += hjm2ktyr[yq6lorbx-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *wy1vqfzu] * kispwgx3[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2]; } } deltaf += fapc0tnbrd9beyfk(ftnjamu2, wkumc9idoldmat + (yq6lorbx-1) * *ftnjamu2, rbne6ouj + (yq6lorbx-1) * *ftnjamu2, wkumc9idTwk); } } fpdlcqk9ghz9vuba = wkumc9idghz9vuba; fpdlcqk9tlgduey8 = tlgduey8; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { fpdlcqk9m0ibglfx = m0ibglfx + yq6lorbx-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9ghz9vuba++ = *fpdlcqk9tlgduey8++ - *fpdlcqk9m0ibglfx; fpdlcqk9m0ibglfx += *wy1vqfzu; } } fvlmz9iyC_mux22(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu); F77_CALL(vdqrsl)(vc6hatuj, rutyk8mg, rutyk8mg, qemj9asg, fasrkub3, wkumc9idTwk, wkumc9idwk2, wkumc9idwk2, zshtfg8c, wkumc9idwk2, wkumc9idub4xioar, &ybnsqgo9, &infoqr_svdbx3tk); fvlmz9iyC_vbks(wpuarq2m, wkumc9idub4xioar, wy1vqfzu, ftnjamu2, npjlv3mr); } if (*nhja0izq > 0) { z4vrscot = 0.0e0; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { z4vrscot += rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] * pow(m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu], (double) 2.0); } } g2dnwteb = (z4vrscot > 0.0e0) ? sqrt(deltaf / z4vrscot) : 0.0; } if (*ueb8hndv == 1) { g2dnwteb = 1.0e0; } } for (yq6lorbx = 1; yq6lorbx <= *xjc4ywlh; yq6lorbx++) { wkumc9idwk9[yq6lorbx-1] = zshtfg8c[yq6lorbx-1]; } for (yq6lorbx = 1; yq6lorbx <= *xjc4ywlh; yq6lorbx++) { zshtfg8c[ges1xpkr[yq6lorbx-1]-1] = wkumc9idwk9[yq6lorbx-1]; } fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9ub4xioar = wkumc9idub4xioar; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx += *fpdlcqk9ub4xioar++; fpdlcqk9m0ibglfx++; } } if (*yzoe1rsp && (*nhja0izq > 0)) { for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) { for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */ ezlgm2up + (gp1jxzuh-1) * *ftnjamu2, ui8ysltq + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2, wkumc9idoldmat); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { ui8ysltq[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] = wkumc9idoldmat[ayfnwr1v-1]; } } } if (0) { for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) { for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) { fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */ ezlgm2up + (gp1jxzuh-1) * *ftnjamu2, ifys6woa + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2, wkumc9idoldmat); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { ifys6woa[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] = wkumc9idoldmat[ayfnwr1v-1]; } } } } } Free(wkumc9idwkbzmd6ftv); Free(wkumc9idwk9); Free(wkumc9idTwk); Free(wkumc9idghz9vuba); Free(wkumc9idoldmat); Free(wkumc9idub4xioar); Free(wkumc9idwk2); Free(wkumc9idall_xecbg0pf); Free(wkumc9idall_z4grbpiq); Free(wkumc9idall_d7glzhbj); Free(wkumc9idall_v2eydbxs); Free(wkumc9idall_tt2); } void fapc0tnbx6kanjdh(double sjwyig9t[], double xout[], int *f8yswcat, int *wy1vqfzu) { int ayfnwr1v, yq6lorbx, gp1jxzuh, iptr = 0; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { xout[iptr++] = (yq6lorbx == gp1jxzuh) ? 1.0e0 : 0.0e0; } } } for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) { xout[iptr++] = (yq6lorbx == gp1jxzuh) ? sjwyig9t[ayfnwr1v-1] : 0.0e0; } } } } double fapc0tnbrd9beyfk(int *f8yswcat, double bhcji9gl[], double po8rwsmy[], double m0ibglfx[]) { int ayfnwr1v; double rd9beyfk, rxeqjn0y = 0.0, lm9vcjob = 0.0; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { lm9vcjob += *po8rwsmy; rxeqjn0y += *po8rwsmy++ * pow(*bhcji9gl++ - *m0ibglfx++, (double) 2.0); } rd9beyfk = (lm9vcjob > 0.0e0) ? (rxeqjn0y / lm9vcjob) : 0.0e0; return rd9beyfk; } void fapc0tnbpitmeh0q(int *f8yswcat, double bhcji9gl[], double po8rwsmy[], double *lfu2qhid, double *lm9vcjob) { double rxeqjn0yy = 0.0; int ayfnwr1v; *lm9vcjob = 0.0e0; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { *lm9vcjob += *po8rwsmy; rxeqjn0yy += *po8rwsmy++ * *bhcji9gl++; } *lfu2qhid = (*lm9vcjob > 0.0e0) ? (rxeqjn0yy / *lm9vcjob) : 0.0e0; } void fapc0tnbdsrt0gem(int *f8yswcat, double sjwyig9t[], double po8rwsmy[], double bhcji9gl[], double ub4xioar[], double ui8ysltq[], int *yzoe1rsp) { int ayfnwr1v; double pygsw6ko, pasjmo8g, intercept, eck8vubt, qtce8hzo, lm9vcjob = 0.0, q6zdcwxk = 0.0, nsum = 0.0, *fpdlcqk9po8rwsmy, *fpdlcqk9sjwyig9t, *fpdlcqk9bhcji9gl; fapc0tnbpitmeh0q(f8yswcat, sjwyig9t, po8rwsmy, &pygsw6ko, &lm9vcjob); fapc0tnbpitmeh0q(f8yswcat, bhcji9gl, po8rwsmy, &pasjmo8g, &lm9vcjob); fpdlcqk9sjwyig9t = sjwyig9t; fpdlcqk9bhcji9gl = bhcji9gl; fpdlcqk9po8rwsmy = po8rwsmy; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko; nsum += qtce8hzo * (*fpdlcqk9bhcji9gl++ - pasjmo8g) * *fpdlcqk9po8rwsmy; qtce8hzo = pow(qtce8hzo, (double) 2.0); q6zdcwxk += qtce8hzo * *fpdlcqk9po8rwsmy++; } eck8vubt = nsum / q6zdcwxk; intercept = pasjmo8g - eck8vubt * pygsw6ko; fpdlcqk9sjwyig9t = sjwyig9t; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { *ub4xioar++ = intercept + eck8vubt * *fpdlcqk9sjwyig9t++; } if (*yzoe1rsp) { fpdlcqk9sjwyig9t = sjwyig9t; fpdlcqk9po8rwsmy = po8rwsmy; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { qtce8hzo = *fpdlcqk9sjwyig9t++ - pygsw6ko; if (*fpdlcqk9po8rwsmy++ > 0.0e0) { *ui8ysltq -= (1.0e0 / lm9vcjob + pow(qtce8hzo, (double) 2.0) / q6zdcwxk); ui8ysltq++; } else { *ui8ysltq++ = 0.0e0; } } } } void fapc0tnbshm8ynte(int *ftnjamu2, int ezlgm2up[], double pygsw6ko[], double sjwyig9t[]) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *sjwyig9t++ = pygsw6ko[*ezlgm2up++ -1]; } } void vknootl2(double sjwyig9t[], int *f8yswcat, double gkdx5jal[], int *rvy1fpli, int *ukgwt7na) { int ayfnwr1v, yq6lorbx, ndzv2xfhei; if (*ukgwt7na) { ndzv2xfhei = *rvy1fpli - 6; } else { ndzv2xfhei = (*f8yswcat <= 40) ? *f8yswcat : floor((double) 40.0 + pow((double) *f8yswcat - 40.0, (double) 0.25)); } *rvy1fpli = ndzv2xfhei + 6; for (yq6lorbx = 1; yq6lorbx <= 3; yq6lorbx++) { *gkdx5jal++ = sjwyig9t[0]; } for (yq6lorbx = 1; yq6lorbx <= ndzv2xfhei; yq6lorbx++) { ayfnwr1v = (yq6lorbx - 1) * (*f8yswcat - 1) / (ndzv2xfhei - 1); *gkdx5jal++ = sjwyig9t[ayfnwr1v]; } for (yq6lorbx = 1; yq6lorbx <= 3; yq6lorbx++) { *gkdx5jal++ = sjwyig9t[*f8yswcat -1]; } } void Yee_pknootl2(double *gkdx5jal, int *f8yswcat, int *zo8wpibx, double *Toler_ankcghz2) { int ayfnwr1v, yq6lorbx = *f8yswcat - 4, cjop5bwm = 4; for (ayfnwr1v = 1; ayfnwr1v <= 4; ayfnwr1v++) { *zo8wpibx++ = 1; } for (ayfnwr1v = 5; ayfnwr1v <= yq6lorbx; ayfnwr1v++) { if ((gkdx5jal[ayfnwr1v -1] - gkdx5jal[cjop5bwm -1] >= *Toler_ankcghz2) && (gkdx5jal[ *f8yswcat -1] - gkdx5jal[ayfnwr1v -1] >= *Toler_ankcghz2)) { *zo8wpibx++ = 1; cjop5bwm = ayfnwr1v; } else { *zo8wpibx++ = 0; } } for (ayfnwr1v = *f8yswcat - 3; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { *zo8wpibx++ = 1; } } VGAM/src/muxr3.c0000644000176200001440000003140713565414527013036 0ustar liggesusers #include #include #include #include #include void vdecccc(int *hqipzx3n, int *exz2jrhq, int *dimm); void m2accc(double *m, double *a, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M, int *rb1onzwu); void a2mccc(double *a, double *m, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M); void mux2ccc(double *cc, double *tlgduey8, double *bzmd6ftv, int *p, int *n, int *M); void mux22ccc(double *cc, double *tlgduey8, double *bzmd6ftv, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M, double *wk, int *rb1onzwu); void mux5ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *n, int *r, int *dimm, int *dimr, int *matrix, double *wk, double *wk2, int *hqipzx3n_M, int *exz2jrhq_M, int *hqipzx3n_r, int *exz2jrhq_r); void mux55ccc(double *evects, double *evals, double *bzmd6ftv, double *wk, double *wk2, int *hqipzx3n, int *exz2jrhq, int *M, int *n); void mux7ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *q, int *n, int *r); void mux111ccc(double *cc, double *the7mqnvy, int *M, int *R, int *n, double *wkcc, double *wk2, int *hqipzx3n, int *exz2jrhq, int *dimm, int *rb1onzwu); void mux15ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *n); void vcholccc(double *cc, int *M, int *n, int *ok, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm); void vforsubccc(double *cc, double *b, int *M, int *n, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm); void vbacksubccc(double *cc, double *b, int *M, int *n, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm); void tapply_mat1(double *mat, int *nr, int *nc, int *type); void vdecccc(int *hqipzx3n, int *exz2jrhq, int *dimm) { int ayfnwr1v; for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) { hqipzx3n[ayfnwr1v] -= 1; exz2jrhq[ayfnwr1v] -= 1; } } void m2accc(double *m, double *a, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M, int *rb1onzwu) { int ayfnwr1v, gp1jxzuh, MM = *M * *M, MMn = *M * *M * *n; if(*rb1onzwu == 1 || *dimm != *M * (*M + 1) / 2) for(gp1jxzuh = 0; gp1jxzuh < MMn; gp1jxzuh++) a[gp1jxzuh] = 0.0; for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) { for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) { a[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] = m[ayfnwr1v]; if(*rb1onzwu == 0) a[exz2jrhq[ayfnwr1v] + hqipzx3n[ayfnwr1v] * *M] = m[ayfnwr1v]; } a += MM; m += *dimm; } } void a2mccc(double *a, double *m, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M) { int ayfnwr1v, gp1jxzuh, MM= *M * *M; for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) { for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) m[ayfnwr1v] = a[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M]; a += MM; m += *dimm; } } void mux2ccc(double *cc, double *tlgduey8, double *bzmd6ftv, int *p, int *n, int *M) { double urohxe6t; int ayfnwr1v, yq6lorbx, bpvaqm5z, Mp = *M * *p; for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) { for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) { urohxe6t = 0.0; for(bpvaqm5z = 0; bpvaqm5z < *p; bpvaqm5z++) urohxe6t += cc[yq6lorbx + bpvaqm5z * *M] * tlgduey8[bpvaqm5z]; *bzmd6ftv++ = urohxe6t; } tlgduey8 += *p; cc += Mp; } } void mux22ccc(double *cc, double *tlgduey8, double *bzmd6ftv, int *dimm, int *hqipzx3n, int *exz2jrhq, int *n, int *M, double *wk, int *rb1onzwu) { double urohxe6t; int yq6lorbx, bpvaqm5z, gp1jxzuh, one = 1, nzqklc9x; vdecccc(hqipzx3n, exz2jrhq, dimm); for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) { m2accc(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, rb1onzwu); for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) { urohxe6t = 0.0; nzqklc9x = *rb1onzwu == 0 ? 0 : yq6lorbx; for(bpvaqm5z = nzqklc9x; bpvaqm5z < *M; bpvaqm5z++) urohxe6t += wk[yq6lorbx + bpvaqm5z * *M] * tlgduey8[bpvaqm5z]; *bzmd6ftv++ = urohxe6t; } tlgduey8 += *M; cc += *dimm; } } void mux5ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *n, int *r, int *dimm, int *dimr, int *matrix, double *wk, double *wk2, int *hqipzx3n_M, int *exz2jrhq_M, int *hqipzx3n_r, int *exz2jrhq_r) { double urohxe6t, *pd, *pd2; int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, Mr = *M * *r, rr = *r * *r, MM = *M * *M, usvdbx3tk, jM, jr, kM, kr, one=1, rb1onzwu=0; if(*matrix == 1) { vdecccc(hqipzx3n_M, exz2jrhq_M, dimm); vdecccc(hqipzx3n_r, exz2jrhq_r, dimr); pd = wk; pd2 = wk2; } else { pd = pd2 = wk; } for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) { if(*matrix == 1) m2accc(cc, pd, dimm, hqipzx3n_M, exz2jrhq_M, &one, M, &rb1onzwu); else { pd = cc; pd2 = bzmd6ftv; } for(yq6lorbx = 0; yq6lorbx < *r; yq6lorbx++) { jM = yq6lorbx * *M; jr = yq6lorbx * *r; for(gp1jxzuh = yq6lorbx; gp1jxzuh < *r; gp1jxzuh++) { kM = gp1jxzuh * *M; kr = gp1jxzuh * *r; urohxe6t = 0.0; for(bpvaqm5z = 0; bpvaqm5z < *M; bpvaqm5z++) for(usvdbx3tk = 0; usvdbx3tk < *M; usvdbx3tk++) urohxe6t += x[bpvaqm5z + jM] * pd[bpvaqm5z + usvdbx3tk * *M] * x[usvdbx3tk + kM]; pd2[yq6lorbx + kr] = pd2[gp1jxzuh + jr] = urohxe6t; } } if(*matrix == 1) a2mccc(pd2, bzmd6ftv, dimr, hqipzx3n_r, exz2jrhq_r, &one, r); cc += (*matrix == 1 ? *dimm : MM); x += Mr; bzmd6ftv += (*matrix == 1 ? *dimr : rr); } } void mux55ccc(double *evects, double *evals, double *bzmd6ftv, double *wk, double *wk2, int *hqipzx3n, int *exz2jrhq, int *M, int *n) { double *pd, *pd2, bpvaqm5z; int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, MM = *M * *M, one = 1, imk5wjxg = *M * (*M + 1)/2; vdecccc(hqipzx3n, exz2jrhq, &imk5wjxg); for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) { pd = evects; pd2 = wk2; for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++) *pd2++ = *pd++ * evals[yq6lorbx]; for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) for(gp1jxzuh = yq6lorbx; gp1jxzuh < *M; gp1jxzuh++) { bpvaqm5z = 0.0; for(urohxe6t = 0; urohxe6t < *M; urohxe6t++) bpvaqm5z += wk2[yq6lorbx + urohxe6t * *M] * evects[gp1jxzuh + urohxe6t * *M]; wk[yq6lorbx + gp1jxzuh * *M] = wk[gp1jxzuh + yq6lorbx * *M] = bpvaqm5z; } a2mccc(wk, bzmd6ftv, &imk5wjxg, hqipzx3n, exz2jrhq, &one, M); bzmd6ftv += imk5wjxg; evals += *M; evects += MM; } } void mux7ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *q, int *n, int *r) { double urohxe6t; int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, Mq = *M * *q, qr = *q * *r, Mr = *M * *r, kq, kM; for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) { for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) { for(gp1jxzuh = 0; gp1jxzuh < *r; gp1jxzuh++) { kq = gp1jxzuh * *q; kM = gp1jxzuh * *M; urohxe6t = 0.0; for(bpvaqm5z = 0; bpvaqm5z < *q; bpvaqm5z++) urohxe6t += cc[yq6lorbx + bpvaqm5z * *M] * x[bpvaqm5z + kq]; bzmd6ftv[yq6lorbx + kM] = urohxe6t; } } cc += Mq; bzmd6ftv += Mr; x += qr; } } void mux111ccc(double *cc, double *the7mqnvy, int *M, int *R, int *n, double *wkcc, double *wk2, int *hqipzx3n, int *exz2jrhq, int *dimm, int *rb1onzwu) { double urohxe6t, *pd2, obr6tcexdouble; int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, MM = *M * *M, MR = *M * *R, lowlim; vdecccc(hqipzx3n, exz2jrhq, dimm); for(ayfnwr1v = 0; ayfnwr1v < MM; ayfnwr1v++) wkcc[ayfnwr1v] = 0.0; for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) { for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) { if(*rb1onzwu == 0) { obr6tcexdouble = *cc++; wkcc[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] = wkcc[exz2jrhq[ayfnwr1v] + hqipzx3n[ayfnwr1v] * *M] = obr6tcexdouble; } else { wkcc[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] = *cc++; } } /* ayfnwr1v */ pd2 = the7mqnvy; for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++) for(yq6lorbx = 0; yq6lorbx < *R; yq6lorbx++) wk2[ayfnwr1v + yq6lorbx * *M] = *pd2++; for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++) { lowlim = *rb1onzwu == 0 ? 0 : ayfnwr1v; for(yq6lorbx = 0; yq6lorbx < *R; yq6lorbx++) { urohxe6t = 0.0; for(gp1jxzuh = lowlim; gp1jxzuh < *M; gp1jxzuh++) urohxe6t += wk2[gp1jxzuh + yq6lorbx * *M] * wkcc[ayfnwr1v + gp1jxzuh * *M]; the7mqnvy[yq6lorbx + ayfnwr1v * *R] = urohxe6t; } /* yq6lorbx */ } /* ayfnwr1v */ the7mqnvy += MR; } /* bpvaqm5z */ } void mux15ccc(double *cc, double *x, double *bzmd6ftv, int *M, int *n) { double *pd, *pd2; int ayfnwr1v, yq6lorbx, gp1jxzuh, MM = *M * *M; for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) { pd = cc; pd2 = bzmd6ftv; for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++) *pd2++ = *pd++ * x[yq6lorbx]; pd2 = bzmd6ftv; for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++) { *pd2 *= x[gp1jxzuh]; pd2++; } bzmd6ftv += MM; x += *M; } } void vcholccc(double *cc, int *M, int *n, int *ok, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm) { double urohxe6t, *pd; int bpvaqm5z, ayfnwr1v, yq6lorbx, gp1jxzuh, iM, iiM, rb1onzwu = 0, one = 1; vdecccc(hqipzx3n, exz2jrhq, dimm); pd = wk; for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) { *ok = 1; m2accc(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu); for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++) { urohxe6t = 0.0; iM = ayfnwr1v * *M; iiM = ayfnwr1v + iM; for(gp1jxzuh = 0; gp1jxzuh < ayfnwr1v; gp1jxzuh++) urohxe6t += pd[gp1jxzuh + iM] * pd[gp1jxzuh + iM]; pd[iiM] -= urohxe6t; if(pd[iiM] < 0.0) { *ok = 0; break; } pd[iiM] = sqrt(pd[iiM]); for(yq6lorbx = ayfnwr1v+1; yq6lorbx < *M; yq6lorbx++) { urohxe6t = 0.0; for(gp1jxzuh = 0; gp1jxzuh < ayfnwr1v; gp1jxzuh++) urohxe6t += pd[gp1jxzuh + iM] * pd[gp1jxzuh + yq6lorbx * *M]; pd[ayfnwr1v + yq6lorbx * *M] = (pd[ayfnwr1v + yq6lorbx * *M] - urohxe6t) / pd[iiM]; } } a2mccc(wk, cc, dimm, hqipzx3n, exz2jrhq, &one, M); cc += *dimm; ok++; } } void vforsubccc(double *cc, double *b, int *M, int *n, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm) { double urohxe6t, *pd; int yq6lorbx, gp1jxzuh, bpvaqm5z, rb1onzwu = 1, one = 1; pd = wk; vdecccc(hqipzx3n, exz2jrhq, dimm); for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) { m2accc(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu); for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) { urohxe6t = b[yq6lorbx]; for(gp1jxzuh = 0; gp1jxzuh < yq6lorbx; gp1jxzuh++) urohxe6t -= pd[gp1jxzuh + yq6lorbx * *M] * b[gp1jxzuh]; b[yq6lorbx] = urohxe6t / pd[yq6lorbx + yq6lorbx * *M]; } cc += *dimm; b += *M; } } void vbacksubccc(double *cc, double *b, int *M, int *n, double *wk, int *hqipzx3n, int *exz2jrhq, int *dimm) { double urohxe6t, *pd; int yq6lorbx, gp1jxzuh, bpvaqm5z, rb1onzwu = 1, one = 1; pd = wk; vdecccc(hqipzx3n, exz2jrhq, dimm); for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) { m2accc(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu); for(yq6lorbx = *M - 1; yq6lorbx >= 0; yq6lorbx--) { urohxe6t = b[yq6lorbx]; for(gp1jxzuh = yq6lorbx + 1; gp1jxzuh < *M; gp1jxzuh++) urohxe6t -= pd[yq6lorbx + gp1jxzuh * *M] * b[gp1jxzuh]; b[yq6lorbx] = urohxe6t / pd[yq6lorbx + yq6lorbx * *M]; } cc += *dimm; b += *M; } } void tapply_mat1(double *mat, int *nr, int *nc, int *type) { double *pd = mat, *pd2 = mat + *nr; int ayfnwr1v, yq6lorbx; if(*type == 1) for(yq6lorbx = 2; yq6lorbx <= *nc; yq6lorbx++) for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2++) *pd2 += *pd++; if(*type == 2) { pd2 = mat + *nr * *nc - 1; pd = pd2 - *nr; for(yq6lorbx = *nc; yq6lorbx >= 2; yq6lorbx--) for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2--) *pd2 -= *pd--; } if(*type == 3) for(yq6lorbx = 2; yq6lorbx <= *nc; yq6lorbx++) for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2++) *pd2 *= *pd++; if(*type < 1 || *type > 3) Rprintf("Error: *type not ezlgm2uped\n"); } VGAM/src/vlinpack1.f0000644000176200001440000000406413565414527013652 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine vqrdca(x,ldx,n,p,fasrkub3,jpvt,work,xwdf5ltg,eps) implicit double precision (a-h,o-z) implicit integer (i-n) double precision dsign, dabs, dmax1, dsqrt integer min0 integer ldx,n,p,xwdf5ltg integer jpvt(*) integer j,jj,jp,l,lup,curpvt double precision x(ldx,p),fasrkub3(p),work(*),eps double precision vdnrm2,tt double precision ddot8,nrmxl,t do23000 j=1,p fasrkub3(j) = vdnrm2(n,x(1,j),ldx,1) work(j) = fasrkub3(j) 23000 continue 23001 continue l=1 lup = min0(n,p) curpvt = p 23002 if(l.le.lup)then fasrkub3(l) = 0.0d0 nrmxl = vdnrm2(n-l+1, x(l,l), ldx, 1) if(nrmxl .lt. eps)then call dshift8(x,ldx,n,l,curpvt) jp = jpvt(l) t=fasrkub3(l) tt=work(l) j=l+1 23006 if(.not.(j.le.curpvt))goto 23008 jj=j-1 jpvt(jj)=jpvt(j) fasrkub3(jj)=fasrkub3(j) work(jj)=work(j) 23007 j=j+1 goto 23006 23008 continue jpvt(curpvt)=jp fasrkub3(curpvt)=t work(curpvt)=tt curpvt=curpvt-1 if(lup.gt.curpvt)then lup=curpvt endif else if(l.eq.n)then goto 23003 endif if(x(l,l).ne.0.0d0)then nrmxl = dsign(nrmxl,x(l,l)) endif call dscal8(n-l+1,1.0d0/nrmxl,x(l,l),1) x(l,l) = 1.0d0+x(l,l) j=l+1 23015 if(.not.(j.le.curpvt))goto 23017 t = -ddot8(n-l+1,x(l,l),1,x(l,j),1)/x(l,l) call daxpy8(n-l+1,t,x(l,l),1,x(l,j),1) if(fasrkub3(j).ne.0.0d0)then tt = 1.0d0-(dabs(x(l,j))/fasrkub3(j))**2 tt = dmax1(tt,0.0d0) t = tt tt = 1.0d0+0.05d0*tt*(fasrkub3(j)/work(j))**2 if(tt.ne.1.0d0)then fasrkub3(j) = fasrkub3(j)*dsqrt(t) else fasrkub3(j) = vdnrm2(n-l,x(l+1,j),ldx,1) work(j) = fasrkub3(j) endif endif 23016 j=j+1 goto 23015 23017 continue fasrkub3(l) = x(l,l) x(l,l) = -nrmxl l=l+1 endif goto 23002 endif 23003 continue xwdf5ltg = lup return end VGAM/src/cqof.f0000644000176200001440000023373213565414527012720 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine pnm1or(objzgdk0, lfu2qhid) implicit logical (a-z) double precision objzgdk0, lfu2qhid integer sn double precision r1, r2, y, y2, y3, y4, y5, y6, y7 double precision erf, erfc, z, z2, z3, z4 double precision sqrt2, sqrtpi, ulimit, p10,p11,p12,p13, q10,q11,q *12,q13 double precision p20,p21,p22,p23,p24,p25,p26,p27 double precision q20,q21,q22,q23,q24,q25,q26,q27 double precision p30,p31,p32,p33,p34 double precision q30,q31,q32,q33,q34 sqrt2 = 1.414213562373095049d0 sqrtpi = 1.772453850905516027d0 ulimit = 20.0d0 p10 = 242.66795523053175d0 p11 = 21.979261618294152d0 p12 = 6.9963834886191355d0 p13 = -.035609843701815385d0 q10 = 215.05887586986120d0 q11 = 91.164905404514901d0 q12 = 15.082797630407787d0 q13 = 1.0d0 p20 = 300.4592610201616005d0 p21 = 451.9189537118729422d0 p22 = 339.3208167343436870d0 p23 = 152.9892850469404039d0 p24 = 43.16222722205673530d0 p25 = 7.211758250883093659d0 p26 = .5641955174789739711d0 p27 = -.0000001368648573827167067d0 q20 = 300.4592609569832933d0 q21 = 790.9509253278980272d0 q22 = 931.3540948506096211d0 q23 = 638.9802644656311665d0 q24 = 277.5854447439876434d0 q25 = 77.00015293522947295d0 q26 = 12.78272731962942351d0 q27 = 1.0d0 p30 = -.00299610707703542174d0 p31 = -.0494730910623250734d0 p32 = -.226956593539686930d0 p33 = -.278661308609647788d0 p34 = -.0223192459734184686d0 q30 = .0106209230528467918d0 q31 = .191308926107829841d0 q32 = 1.05167510706793207d0 q33 = 1.98733201817135256d0 q34 = 1.0d0 if(objzgdk0 .lt. -ulimit)then lfu2qhid = 2.753624d-89 return endif if(objzgdk0 .gt. ulimit)then lfu2qhid = 1.0d0 return endif y = objzgdk0 / sqrt2 if(y .lt. 0.0d0)then y = -y sn = -1 else sn = 1 endif y2 = y * y y4 = y2 * y2 y6 = y4 * y2 if(y .lt. 0.46875d0)then r1 = p10 + p11 * y2 + p12 * y4 + p13 * y6 r2 = q10 + q11 * y2 + q12 * y4 + q13 * y6 erf = y * r1 / r2 if(sn .eq. 1)then lfu2qhid = 0.5d0 + 0.5*erf else lfu2qhid = 0.5d0 - 0.5*erf endif else if(y .lt. 4.0d0)then y3 = y2 * y y5 = y4 * y y7 = y6 * y r1 = p20 + p21 * y + p22 * y2 + p23 * y3 + p24 * y4 + p25 * y5 + p *26 * y6 + p27 * y7 r2 = q20 + q21 * y + q22 * y2 + q23 * y3 + q24 * y4 + q25 * y5 + q *26 * y6 + q27 * y7 erfc = dexp(-y2) * r1 / r2 if(sn .eq. 1)then lfu2qhid = 1.0 - 0.5*erfc else lfu2qhid = 0.5*erfc endif else z = y4 z2 = z * z z3 = z2 * z z4 = z2 * z2 r1 = p30 + p31 * z + p32 * z2 + p33 * z3 + p34 * z4 r2 = q30 + q31 * z + q32 * z2 + q33 * z3 + q34 * z4 erfc = (dexp(-y2)/y) * (1.0 / sqrtpi + r1 / (r2 * y2)) if(sn .eq. 1)then lfu2qhid = 1.0d0 - 0.5*erfc else lfu2qhid = 0.5*erfc endif endif endif return end subroutine pnm1ow(objzgdk0, lfu2qhid, kuzxj1lo) implicit logical (a-z) integer kuzxj1lo, ayfnwr1v double precision objzgdk0(kuzxj1lo), lfu2qhid(kuzxj1lo) do23016 ayfnwr1v=1,kuzxj1lo call pnm1or(objzgdk0(ayfnwr1v), lfu2qhid(ayfnwr1v)) 23016 continue 23017 continue return end subroutine n2howibc2a(objzgdk0, i9mwnvqt, lfu2qhid) implicit logical (a-z) double precision objzgdk0, i9mwnvqt, lfu2qhid double precision xd4mybgja if(1.0d0 - objzgdk0 .ge. 1.0d0)then lfu2qhid = -8.12589d0 / (3.0*dsqrt(i9mwnvqt)) else if(1.0d0 - objzgdk0 .le. 0.0d0)then lfu2qhid = 8.12589d0 / (3.0*dsqrt(i9mwnvqt)) else call pnm1or(1.0d0-objzgdk0, xd4mybgja) xd4mybgja = xd4mybgja / (3.0*dsqrt(i9mwnvqt)) lfu2qhid = -3.0d0 * dlog(1.0d0 + xd4mybgja) endif endif return end subroutine zi8qrpsb(objzgdk0, lfu2qhid) implicit logical (a-z) double precision objzgdk0, lfu2qhid if(1.0d0 - objzgdk0 .ge. 1.0d0)then lfu2qhid = -35.0d0 else if(1.0d0 - objzgdk0 .le. 0.0d0)then lfu2qhid = 3.542106d0 else lfu2qhid = dlog(-dlog(1.0d0 - objzgdk0)) endif endif return end subroutine g2vwexyk9(objzgdk0, lfu2qhid) implicit logical (a-z) double precision objzgdk0, lfu2qhid if(1.0d0 - objzgdk0 .ge. 1.0d0)then lfu2qhid = -34.53958d0 else if(1.0d0 - objzgdk0 .le. 0.0d0)then lfu2qhid = 34.53958d0 else lfu2qhid = dlog(objzgdk0 / (1.0d0 - objzgdk0)) endif endif return end subroutine pkc4ejib(w8znmyce, beta, m0ibglfx, kuzxj1lo, wy1vqfzu, *br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu, qfx3vhct, unhycz0e, vm4xjo *sb) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu, br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu *, qfx3vhct, unhycz0e double precision w8znmyce(br5ovgcj,xlpjcg3s), beta(xlpjcg3s), m0ib *glfx(wy1vqfzu,kuzxj1lo), vm4xjosb(kuzxj1lo) integer ayfnwr1v, yq6lorbx, gp1jxzuh, i1loc, sedf7mxb double precision vogkfwt8 if(vtsou9pz .eq. 1)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then sedf7mxb = 2*hj3ftvzu-1 do23034 ayfnwr1v=1,kuzxj1lo vogkfwt8 = 0.0d0 do23036 gp1jxzuh=1,xlpjcg3s vogkfwt8 = vogkfwt8 + w8znmyce(2*ayfnwr1v-1,gp1jxzuh) * beta(gp1jx *zuh) 23036 continue 23037 continue m0ibglfx(sedf7mxb,ayfnwr1v) = vogkfwt8 23034 continue 23035 continue sedf7mxb = 2*hj3ftvzu do23038 ayfnwr1v=1,kuzxj1lo vogkfwt8 = 0.0d0 do23040 gp1jxzuh=1,xlpjcg3s vogkfwt8 = vogkfwt8 + w8znmyce(2*ayfnwr1v ,gp1jxzuh) * beta(gp1jxz *uh) 23040 continue 23041 continue m0ibglfx(sedf7mxb,ayfnwr1v) = vogkfwt8 23038 continue 23039 continue else do23042 ayfnwr1v=1,br5ovgcj vogkfwt8 = 0.0d0 do23044 gp1jxzuh=1,xlpjcg3s vogkfwt8 = vogkfwt8 + w8znmyce(ayfnwr1v,gp1jxzuh) * beta(gp1jxzuh) 23044 continue 23045 continue m0ibglfx(hj3ftvzu,ayfnwr1v) = vogkfwt8 23042 continue 23043 continue endif else i1loc = 1 do23046 ayfnwr1v=1,kuzxj1lo do23048 yq6lorbx=1,wy1vqfzu vogkfwt8 = 0.0d0 do23050 gp1jxzuh=1,xlpjcg3s vogkfwt8 = vogkfwt8 + w8znmyce(i1loc,gp1jxzuh) * beta(gp1jxzuh) 23050 continue 23051 continue i1loc = i1loc + 1 m0ibglfx(yq6lorbx,ayfnwr1v) = vogkfwt8 23048 continue 23049 continue 23046 continue 23047 continue endif if(unhycz0e .eq. 1)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23056 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *+ vm4xjosb(ayfnwr1v) 23056 continue 23057 continue else do23058 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v) + vm4xjo *sb(ayfnwr1v) 23058 continue 23059 continue endif endif return end subroutine nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0k *ns, qfx3vhct, hj3ftvzu) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu, afpc0kns, qfx3vhct, hj3ftvzu double precision m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns,ku *zxj1lo) integer ayfnwr1v, yq6lorbx double precision o3jyipdf0 if(hj3ftvzu .eq. 0)then if(qfx3vhct .eq. 1)then do23064 ayfnwr1v=1,kuzxj1lo do23066 yq6lorbx=1,wy1vqfzu o3jyipdf0 = dexp(m0ibglfx(yq6lorbx,ayfnwr1v)) t8hwvalr(yq6lorbx,ayfnwr1v) = o3jyipdf0 / (1.0d0 + o3jyipdf0) 23066 continue 23067 continue 23064 continue 23065 continue endif if(qfx3vhct .eq. 2)then do23070 ayfnwr1v=1,kuzxj1lo do23072 yq6lorbx=1,wy1vqfzu t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(yq6lorbx,ayfnwr1v)) 23072 continue 23073 continue 23070 continue 23071 continue endif if(qfx3vhct .eq. 4)then do23076 ayfnwr1v=1,kuzxj1lo do23078 yq6lorbx=1,wy1vqfzu t8hwvalr(yq6lorbx,ayfnwr1v) = 1.0d0-dexp(-dexp(m0ibglfx(yq6lorbx,a *yfnwr1v))) 23078 continue 23079 continue 23076 continue 23077 continue endif if(qfx3vhct .eq. 5)then do23082 ayfnwr1v=1,kuzxj1lo do23084 yq6lorbx=1,afpc0kns t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(2*yq6lorbx-1,ayfnwr1v) *) 23084 continue 23085 continue 23082 continue 23083 continue endif if(qfx3vhct .eq. 3)then do23088 ayfnwr1v=1,kuzxj1lo do23090 yq6lorbx=1,afpc0kns t8hwvalr(yq6lorbx,ayfnwr1v) = dexp(m0ibglfx(2*yq6lorbx-1,ayfnwr1v) *) 23090 continue 23091 continue 23088 continue 23089 continue endif if(qfx3vhct .eq. 8)then do23094 ayfnwr1v=1,kuzxj1lo do23096 yq6lorbx=1,wy1vqfzu t8hwvalr(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) 23096 continue 23097 continue 23094 continue 23095 continue endif else if(qfx3vhct .eq. 1)then do23100 ayfnwr1v=1,kuzxj1lo o3jyipdf0 = dexp(m0ibglfx(hj3ftvzu,ayfnwr1v)) t8hwvalr(hj3ftvzu,ayfnwr1v) = o3jyipdf0 / (1.0d0 + o3jyipdf0) 23100 continue 23101 continue endif if(qfx3vhct .eq. 2)then do23104 ayfnwr1v=1,kuzxj1lo t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(hj3ftvzu,ayfnwr1v)) 23104 continue 23105 continue endif if(qfx3vhct .eq. 4)then do23108 ayfnwr1v=1,kuzxj1lo t8hwvalr(hj3ftvzu,ayfnwr1v) = 1.0d0 - dexp(-dexp(m0ibglfx(hj3ftvzu *,ayfnwr1v))) 23108 continue 23109 continue endif if(qfx3vhct .eq. 5)then do23112 ayfnwr1v=1,kuzxj1lo t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *) 23112 continue 23113 continue endif if(qfx3vhct .eq. 3)then do23116 ayfnwr1v=1,kuzxj1lo t8hwvalr(hj3ftvzu,ayfnwr1v) = dexp(m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *) 23116 continue 23117 continue endif if(qfx3vhct .eq. 8)then do23120 ayfnwr1v=1,kuzxj1lo t8hwvalr(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v) 23120 continue 23121 continue endif endif return end subroutine shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, *wy1vqfzu, afpc0kns, dimw, m0ibglfx, dev, hj3ftvzu, n3iasxug, vsoih *n1r, cll) implicit logical (a-z) integer qfx3vhct, kuzxj1lo, wy1vqfzu, afpc0kns, dimw, hj3ftvzu, cl *l double precision tlgduey8(kuzxj1lo, afpc0kns), wmat(kuzxj1lo, dimw *), t8hwvalr(afpc0kns, kuzxj1lo), m0ibglfx(wy1vqfzu,kuzxj1lo), dev, * n3iasxug, vsoihn1r integer ayfnwr1v, yq6lorbx double precision bzmd6ftv, txlvcey5, xd4mybgj, uqnkc6zg, hofjnx2e, * smu, afwp5imx, ivqk2ywz, qvd7yktm double precision hdqsx7bk, anopu9vi, jtnbu2hz logical lbgwvp3q bzmd6ftv = 0.0d0 if(hj3ftvzu .eq. 0)then if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then do23126 yq6lorbx=1,wy1vqfzu do23128 ayfnwr1v=1,kuzxj1lo if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then ivqk2ywz = tlgduey8(ayfnwr1v,yq6lorbx) * dlog(tlgduey8(ayfnwr1v,yq *6lorbx)) else ivqk2ywz = 0.0d0 endif if(tlgduey8(ayfnwr1v,yq6lorbx) .lt. 1.0d0)then ivqk2ywz = ivqk2ywz + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog *(1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) endif xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) * (1.0d0 - t8hwvalr(yq6lorb *x,ayfnwr1v)) if(xd4mybgj .lt. n3iasxug)then smu = t8hwvalr(yq6lorbx,ayfnwr1v) if(smu .lt. n3iasxug)then qvd7yktm = tlgduey8(ayfnwr1v,yq6lorbx) * vsoihn1r else qvd7yktm = tlgduey8(ayfnwr1v,yq6lorbx) * dlog(smu) endif afwp5imx = 1.0d0 - smu if(afwp5imx .lt. n3iasxug)then qvd7yktm = qvd7yktm + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * vsoi *hn1r else qvd7yktm = qvd7yktm + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog *(afwp5imx) endif else qvd7yktm = (tlgduey8(ayfnwr1v,yq6lorbx) * dlog(t8hwvalr(yq6lorbx,a *yfnwr1v)) + (1.0d0 - tlgduey8(ayfnwr1v,yq6lorbx)) * dlog(1.0d0 - t *8hwvalr(yq6lorbx,ayfnwr1v))) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (ivqk2ywz - qvd7yktm) 23128 continue 23129 continue 23126 continue 23127 continue endif if(qfx3vhct .eq. 2)then do23142 yq6lorbx=1,wy1vqfzu do23144 ayfnwr1v=1,kuzxj1lo if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) - tlgduey8(ayfnwr1v,yq6lorb *x) + tlgduey8(ayfnwr1v,yq6lorbx) * dlog(tlgduey8(ayfnwr1v,yq6lorbx *) / t8hwvalr(yq6lorbx,ayfnwr1v)) else xd4mybgj = t8hwvalr(yq6lorbx,ayfnwr1v) - tlgduey8(ayfnwr1v,yq6lorb *x) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23144 continue 23145 continue 23142 continue 23143 continue endif if(qfx3vhct .eq. 5)then do23150 yq6lorbx=1,afpc0kns do23152 ayfnwr1v=1,kuzxj1lo jtnbu2hz = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v)) call tldz5ion(jtnbu2hz, uqnkc6zg) if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then xd4mybgj = (jtnbu2hz - 1.0d0) * dlog(tlgduey8(ayfnwr1v,yq6lorbx)) *+ (dlog(jtnbu2hz)-tlgduey8(ayfnwr1v,yq6lorbx) / t8hwvalr(yq6lorbx, *ayfnwr1v) - dlog(t8hwvalr(yq6lorbx,ayfnwr1v)) ) * jtnbu2hz - uqnkc *6zg else xd4mybgj = -1000.0d0 endif xd4mybgj = -xd4mybgj bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23152 continue 23153 continue 23150 continue 23151 continue endif if(qfx3vhct .eq. 3)then if(cll .eq. 0)then anopu9vi = 34.0d0 do23160 yq6lorbx=1,afpc0kns do23162 ayfnwr1v=1,kuzxj1lo if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .gt. anopu9vi)then hdqsx7bk = dexp(anopu9vi) lbgwvp3q = .true. else if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .lt. -anopu9vi)then hdqsx7bk = dexp(-anopu9vi) lbgwvp3q = .true. else hdqsx7bk = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v)) lbgwvp3q = .false. endif endif if(tlgduey8(ayfnwr1v,yq6lorbx) .lt. 1.0d0)then xd4mybgj = 1.0d0 else xd4mybgj = tlgduey8(ayfnwr1v,yq6lorbx) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (tlgduey8(ayfnwr1v,yq6lor *bx) * dlog(xd4mybgj/t8hwvalr(yq6lorbx,ayfnwr1v)) + (tlgduey8(ayfnw *r1v,yq6lorbx) + hdqsx7bk) * dlog((t8hwvalr(yq6lorbx,ayfnwr1v)+hdqs *x7bk) / (hdqsx7bk+ tlgduey8(ayfnwr1v,yq6lorbx)))) 23162 continue 23163 continue 23160 continue 23161 continue else anopu9vi = 34.0d0 do23170 yq6lorbx=1,afpc0kns do23172 ayfnwr1v=1,kuzxj1lo if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .gt. anopu9vi)then hdqsx7bk = dexp(anopu9vi) lbgwvp3q = .true. else if(m0ibglfx(2*yq6lorbx,ayfnwr1v) .lt. -anopu9vi)then hdqsx7bk = dexp(-anopu9vi) lbgwvp3q = .true. else hdqsx7bk = dexp(m0ibglfx(2*yq6lorbx,ayfnwr1v)) lbgwvp3q = .false. endif endif if( lbgwvp3q )then uqnkc6zg = 0.0d0 hofjnx2e = 0.0d0 else call tldz5ion(hdqsx7bk + tlgduey8(ayfnwr1v,yq6lorbx), uqnkc6zg) call tldz5ion(hdqsx7bk, hofjnx2e) endif call tldz5ion(1.0d0 + tlgduey8(ayfnwr1v,yq6lorbx), txlvcey5) xd4mybgj = hdqsx7bk * dlog(hdqsx7bk / (hdqsx7bk + t8hwvalr(yq6lorb *x,ayfnwr1v))) + uqnkc6zg - hofjnx2e - txlvcey5 if(tlgduey8(ayfnwr1v,yq6lorbx) .gt. 0.0d0)then xd4mybgj = xd4mybgj + tlgduey8(ayfnwr1v,yq6lorbx) * dlog(t8hwvalr( *yq6lorbx,ayfnwr1v) / (hdqsx7bk + t8hwvalr(yq6lorbx,ayfnwr1v))) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23172 continue 23173 continue 23170 continue 23171 continue bzmd6ftv = -bzmd6ftv / 2.0d0 endif endif if(qfx3vhct .eq. 8)then do23184 yq6lorbx=1,wy1vqfzu do23186 ayfnwr1v=1,kuzxj1lo xd4mybgj = tlgduey8(ayfnwr1v,yq6lorbx) - t8hwvalr(yq6lorbx,ayfnwr1 *v) bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj**2 23186 continue 23187 continue 23184 continue 23185 continue endif else if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then do23190 ayfnwr1v=1,kuzxj1lo if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then ivqk2ywz = tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(tlgduey8(ayfnwr1v,hj *3ftvzu)) else ivqk2ywz = 0.0d0 endif if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. 1.0d0)then ivqk2ywz = ivqk2ywz + (1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu)) * dlog *(1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu)) endif xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) * (1.0d0 - t8hwvalr(hj3ftvz *u,ayfnwr1v)) if(xd4mybgj .lt. n3iasxug)then smu = t8hwvalr(hj3ftvzu,ayfnwr1v) if(smu .lt. n3iasxug)then qvd7yktm = tlgduey8(ayfnwr1v,hj3ftvzu) * vsoihn1r else qvd7yktm = tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(smu) endif afwp5imx = 1.0d0 - smu if(afwp5imx .lt. n3iasxug)then qvd7yktm = qvd7yktm + (1.0d0-tlgduey8(ayfnwr1v,hj3ftvzu))*vsoihn1r else qvd7yktm = qvd7yktm + (1.0d0-tlgduey8(ayfnwr1v,hj3ftvzu))*dlog(afw *p5imx) endif else qvd7yktm = (tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(t8hwvalr(hj3ftvzu,a *yfnwr1v)) + (1.0d0 - tlgduey8(ayfnwr1v,hj3ftvzu)) * dlog(1.0d0 - t *8hwvalr(hj3ftvzu,ayfnwr1v))) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (ivqk2ywz - qvd7yktm) 23190 continue 23191 continue endif if(qfx3vhct .eq. 2)then do23204 ayfnwr1v=1,kuzxj1lo if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) - tlgduey8(ayfnwr1v,hj3ftvz *u) + tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(tlgduey8(ayfnwr1v,hj3ftvzu *) / t8hwvalr(hj3ftvzu,ayfnwr1v)) else xd4mybgj = t8hwvalr(hj3ftvzu,ayfnwr1v) - tlgduey8(ayfnwr1v,hj3ftvz *u) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23204 continue 23205 continue endif if(qfx3vhct .eq. 5)then do23210 ayfnwr1v=1,kuzxj1lo jtnbu2hz = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v)) call tldz5ion(jtnbu2hz, uqnkc6zg) if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then xd4mybgj = (jtnbu2hz - 1.0d0) * dlog(tlgduey8(ayfnwr1v,hj3ftvzu)) *+ jtnbu2hz * (dlog(jtnbu2hz) - tlgduey8(ayfnwr1v,hj3ftvzu) / t8hwv *alr(hj3ftvzu,ayfnwr1v) - dlog(t8hwvalr(hj3ftvzu,ayfnwr1v))) - uqnk *c6zg else xd4mybgj = -1000.0d0 endif xd4mybgj = -xd4mybgj bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23210 continue 23211 continue endif if(qfx3vhct .eq. 3)then if(cll .eq. 0)then anopu9vi = 34.0d0 do23218 ayfnwr1v=1,kuzxj1lo if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then hdqsx7bk = dexp(anopu9vi) lbgwvp3q = .true. else if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then hdqsx7bk = dexp(-anopu9vi) lbgwvp3q = .true. else hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v)) lbgwvp3q = .false. endif endif if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. 1.0d0)then xd4mybgj = 1.0d0 else xd4mybgj = tlgduey8(ayfnwr1v,hj3ftvzu) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * (tlgduey8(ayfnwr1v,hj3ftv *zu) * dlog(xd4mybgj/t8hwvalr(hj3ftvzu,ayfnwr1v)) + (tlgduey8(ayfnw *r1v,hj3ftvzu)+hdqsx7bk) * dlog((t8hwvalr(hj3ftvzu,ayfnwr1v) + hdqs *x7bk) / ( hdqsx7bk+tlgduey8(ayfnwr1v,hj3ftvzu)))) 23218 continue 23219 continue else do23226 ayfnwr1v=1,kuzxj1lo hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v)) call tldz5ion(hdqsx7bk + tlgduey8(ayfnwr1v,hj3ftvzu), uqnkc6zg) call tldz5ion(hdqsx7bk, hofjnx2e) call tldz5ion(1.0d0 + tlgduey8(ayfnwr1v,hj3ftvzu), txlvcey5) xd4mybgj = hdqsx7bk * dlog(hdqsx7bk / (hdqsx7bk + t8hwvalr(hj3ftvz *u,ayfnwr1v))) + uqnkc6zg - hofjnx2e - txlvcey5 if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then xd4mybgj = xd4mybgj + tlgduey8(ayfnwr1v,hj3ftvzu) * dlog(t8hwvalr( *hj3ftvzu,ayfnwr1v) / (hdqsx7bk + t8hwvalr(hj3ftvzu,ayfnwr1v))) endif bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj 23226 continue 23227 continue bzmd6ftv = -bzmd6ftv / 2.0d0 endif endif if(qfx3vhct .eq. 8)then do23232 ayfnwr1v=1,kuzxj1lo xd4mybgj = tlgduey8(ayfnwr1v,hj3ftvzu) - t8hwvalr(hj3ftvzu,ayfnwr1 *v) bzmd6ftv = bzmd6ftv + wmat(ayfnwr1v,1) * xd4mybgj**2 23232 continue 23233 continue endif endif dev = 2.0d0 * bzmd6ftv return end subroutine flncwkfq76(lncwkfq7, w8znmyce, kuzxj1lo, br5ovgcj, xwdf *5ltg, qfx3vhct) implicit logical (a-z) integer kuzxj1lo, br5ovgcj, xwdf5ltg, qfx3vhct double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,*) integer ayfnwr1v, sedf7mxb, hpmwnav2 if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq.5 ))then sedf7mxb = 1 do23236 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 1.0d0 w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0 23236 continue 23237 continue sedf7mxb = sedf7mxb + 1 do23238 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 0.0d0 w8znmyce(2*ayfnwr1v, sedf7mxb) = 1.0d0 23238 continue 23239 continue sedf7mxb = sedf7mxb + 1 do23240 hpmwnav2=1,xwdf5ltg do23242 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = lncwkfq7(ayfnwr1v,hpmwnav2) w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0 23242 continue 23243 continue sedf7mxb = sedf7mxb + 1 23240 continue 23241 continue else sedf7mxb = 1 do23244 ayfnwr1v=1,kuzxj1lo w8znmyce(ayfnwr1v,sedf7mxb) = 1.0d0 23244 continue 23245 continue sedf7mxb = sedf7mxb + 1 do23246 hpmwnav2=1,xwdf5ltg do23248 ayfnwr1v=1,kuzxj1lo w8znmyce(ayfnwr1v,sedf7mxb)=lncwkfq7(ayfnwr1v,hpmwnav2) 23248 continue 23249 continue sedf7mxb = sedf7mxb + 1 23246 continue 23247 continue endif return end subroutine flncwkfq71(lncwkfq7, w8znmyce, kuzxj1lo, xwdf5ltg, qfx3 *vhct, vm4xjosb, br5ovgcj, xlpjcg3s, hyqwtp6i, tgiyxdw1, dufozmt7, *kifxa0he, p1, unhycz0e) implicit logical (a-z) integer kuzxj1lo, xwdf5ltg, qfx3vhct, br5ovgcj, xlpjcg3s, hyqwtp6i *, tgiyxdw1(hyqwtp6i), dufozmt7(hyqwtp6i), p1, unhycz0e double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,xl *pjcg3s), kifxa0he(kuzxj1lo,p1) double precision vm4xjosb(kuzxj1lo) integer i0spbklx, ayfnwr1v, sedf7mxb, hpmwnav2 double precision tad5vhsu, uqnkc6zg if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23252 hpmwnav2=1,xwdf5ltg do23254 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,hpmwnav2) = lncwkfq7(ayfnwr1v,hpmwnav2) w8znmyce(2*ayfnwr1v ,hpmwnav2) = 0.0d0 23254 continue 23255 continue 23252 continue 23253 continue sedf7mxb = xwdf5ltg + 1 if(unhycz0e .eq. 0)then do23258 i0spbklx=1,hyqwtp6i do23260 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spb *klx)) * lncwkfq7(ayfnwr1v,dufozmt7(i0spbklx)) w8znmyce(2*ayfnwr1v ,sedf7mxb) = 0.0d0 23260 continue 23261 continue sedf7mxb = sedf7mxb + 1 23258 continue 23259 continue else do23262 ayfnwr1v=1,kuzxj1lo tad5vhsu = 0.0d0 do23264 hpmwnav2=1,xwdf5ltg uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2) tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg 23264 continue 23265 continue vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu 23262 continue 23263 continue endif else do23266 hpmwnav2=1,xwdf5ltg do23268 ayfnwr1v=1,kuzxj1lo w8znmyce(ayfnwr1v,hpmwnav2) = lncwkfq7(ayfnwr1v,hpmwnav2) 23268 continue 23269 continue 23266 continue 23267 continue sedf7mxb = xwdf5ltg + 1 if(unhycz0e .eq. 0)then do23272 i0spbklx=1,hyqwtp6i do23274 ayfnwr1v=1,kuzxj1lo w8znmyce(ayfnwr1v,sedf7mxb) = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx) *) * lncwkfq7(ayfnwr1v,dufozmt7(i0spbklx)) 23274 continue 23275 continue sedf7mxb = sedf7mxb + 1 23272 continue 23273 continue else do23276 ayfnwr1v=1,kuzxj1lo tad5vhsu = 0.0d0 do23278 hpmwnav2=1,xwdf5ltg uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2) tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg 23278 continue 23279 continue vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu 23276 continue 23277 continue endif endif if(p1 .gt. 0)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23284 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 1.0d0 w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0 23284 continue 23285 continue sedf7mxb = sedf7mxb + 1 do23286 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = 0.0d0 w8znmyce(2*ayfnwr1v, sedf7mxb) = 1.0d0 23286 continue 23287 continue sedf7mxb = sedf7mxb + 1 if(p1 .gt. 1)then do23290 i0spbklx=2,p1 do23292 ayfnwr1v=1,kuzxj1lo w8znmyce(2*ayfnwr1v-1,sedf7mxb) = kifxa0he(ayfnwr1v,i0spbklx) w8znmyce(2*ayfnwr1v, sedf7mxb) = 0.0d0 23292 continue 23293 continue sedf7mxb = sedf7mxb + 1 23290 continue 23291 continue endif else do23294 i0spbklx=1,p1 do23296 ayfnwr1v=1,kuzxj1lo w8znmyce(ayfnwr1v,sedf7mxb) = kifxa0he(ayfnwr1v,i0spbklx) 23296 continue 23297 continue sedf7mxb = sedf7mxb + 1 23294 continue 23295 continue endif endif return end subroutine flncwkfq72(lncwkfq7, w8znmyce, kuzxj1lo, wy1vqfzu, br5o *vgcj, xwdf5ltg, qfx3vhct, afpc0kns, fmzq7aob, eu3oxvyb, hyqwtp6i, *tgiyxdw1, dufozmt7, unhycz0e, vm4xjosb) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu, br5ovgcj, xwdf5ltg, qfx3vhct, afpc0kns *, fmzq7aob, eu3oxvyb, hyqwtp6i, tgiyxdw1(hyqwtp6i), dufozmt7(hyqwt *p6i), unhycz0e double precision lncwkfq7(kuzxj1lo,xwdf5ltg), w8znmyce(br5ovgcj,*) *, vm4xjosb(kuzxj1lo) integer i0spbklx, ayfnwr1v, yq6lorbx, gp1jxzuh, ptr, sedf7mxb, hpm *wnav2 double precision uqnkc6zg, tad5vhsu do23298 gp1jxzuh=1,eu3oxvyb do23300 ayfnwr1v=1,br5ovgcj w8znmyce(ayfnwr1v,gp1jxzuh) = 0.0d0 23300 continue 23301 continue 23298 continue 23299 continue sedf7mxb = 0 if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23304 hpmwnav2=1,xwdf5ltg ptr = 1 do23306 ayfnwr1v=1,kuzxj1lo do23308 yq6lorbx=1,afpc0kns w8znmyce(ptr,sedf7mxb+yq6lorbx) = lncwkfq7(ayfnwr1v,hpmwnav2) ptr = ptr + 2 23308 continue 23309 continue 23306 continue 23307 continue sedf7mxb = sedf7mxb + afpc0kns 23304 continue 23305 continue else do23310 hpmwnav2=1,xwdf5ltg ptr = 0 do23312 ayfnwr1v=1,kuzxj1lo do23314 yq6lorbx=1,wy1vqfzu ptr = ptr + 1 w8znmyce(ptr,sedf7mxb+yq6lorbx) = lncwkfq7(ayfnwr1v,hpmwnav2) 23314 continue 23315 continue 23312 continue 23313 continue sedf7mxb = sedf7mxb + wy1vqfzu 23310 continue 23311 continue endif if(fmzq7aob .eq. 0)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23320 i0spbklx=1,hyqwtp6i ptr = 1 do23322 ayfnwr1v=1,kuzxj1lo uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr *1v,dufozmt7(i0spbklx)) do23324 yq6lorbx=1,afpc0kns w8znmyce(ptr,sedf7mxb+yq6lorbx) = uqnkc6zg ptr = ptr + 2 23324 continue 23325 continue 23322 continue 23323 continue sedf7mxb = sedf7mxb + afpc0kns 23320 continue 23321 continue else do23326 i0spbklx=1,hyqwtp6i ptr = 0 do23328 ayfnwr1v=1,kuzxj1lo uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr *1v,dufozmt7(i0spbklx)) do23330 yq6lorbx=1,wy1vqfzu ptr = ptr + 1 w8znmyce(ptr,sedf7mxb+yq6lorbx) = uqnkc6zg 23330 continue 23331 continue 23328 continue 23329 continue sedf7mxb = sedf7mxb + wy1vqfzu 23326 continue 23327 continue endif else if(unhycz0e .eq. 1)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23336 ayfnwr1v=1,kuzxj1lo tad5vhsu = 0.0d0 do23338 hpmwnav2=1,xwdf5ltg uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2) tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg 23338 continue 23339 continue vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu 23336 continue 23337 continue else do23340 ayfnwr1v=1,kuzxj1lo tad5vhsu = 0.0d0 do23342 hpmwnav2=1,xwdf5ltg uqnkc6zg = lncwkfq7(ayfnwr1v,hpmwnav2) tad5vhsu = tad5vhsu + uqnkc6zg * uqnkc6zg 23342 continue 23343 continue vm4xjosb(ayfnwr1v) = -0.50d0 * tad5vhsu 23340 continue 23341 continue endif else if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23346 i0spbklx=1,hyqwtp6i ptr = 1 do23348 ayfnwr1v=1,kuzxj1lo uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr *1v,dufozmt7(i0spbklx)) do23350 yq6lorbx=1,afpc0kns w8znmyce(ptr,sedf7mxb+i0spbklx) = uqnkc6zg ptr = ptr + 2 23350 continue 23351 continue 23348 continue 23349 continue 23346 continue 23347 continue sedf7mxb = sedf7mxb + hyqwtp6i else do23352 i0spbklx=1,hyqwtp6i ptr = 0 do23354 ayfnwr1v=1,kuzxj1lo uqnkc6zg = lncwkfq7(ayfnwr1v,tgiyxdw1(i0spbklx)) * lncwkfq7(ayfnwr *1v,dufozmt7(i0spbklx)) do23356 yq6lorbx=1,wy1vqfzu ptr = ptr + 1 w8znmyce(ptr,sedf7mxb+i0spbklx) = uqnkc6zg 23356 continue 23357 continue 23354 continue 23355 continue 23352 continue 23353 continue sedf7mxb = sedf7mxb + hyqwtp6i endif endif endif return end subroutine ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu *, afpc0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu, afpc0kns, qfx3vhct, hj3ftvzu, wr0lbopv double precision tlgduey8(kuzxj1lo,afpc0kns), m0ibglfx(wy1vqfzu,ku *zxj1lo), y7sdgtqi(15) double precision wmat(kuzxj1lo,*) double precision vogkfwt8, cumw, gyuq8dex, g2vwexykp, qa8ltuhj, kw *vo4ury, cpz4fgkx, fguvm9tyi, kinit integer ayfnwr1v if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4) .or. (qfx3vhct .eq. 3) * .or. (qfx3vhct .eq. 5))then vogkfwt8 = 0.0d0 cumw = 0.0d0 do23360 ayfnwr1v=1,kuzxj1lo vogkfwt8 = vogkfwt8 + tlgduey8(ayfnwr1v,hj3ftvzu) * wmat(ayfnwr1v, *1) cumw = cumw + wmat(ayfnwr1v,1) 23360 continue 23361 continue gyuq8dex = vogkfwt8 / cumw endif if(qfx3vhct .eq. 1)then call g2vwexyk9(gyuq8dex, g2vwexykp) do23364 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = g2vwexykp 23364 continue 23365 continue endif if(qfx3vhct .eq. 2)then do23368 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu) + 0 *.125d0) 23368 continue 23369 continue endif if(qfx3vhct .eq. 4)then call zi8qrpsb(gyuq8dex, qa8ltuhj) do23372 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = qa8ltuhj 23372 continue 23373 continue endif if(qfx3vhct .eq. 5)then if(wr0lbopv .eq. 1)then kwvo4ury = dlog(gyuq8dex + 0.03125d0) cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0) do23378 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx 23378 continue 23379 continue else if(wr0lbopv .eq. 2)then kwvo4ury = dlog((6.0/8.0)*gyuq8dex+0.000d0) cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0) do23382 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx 23382 continue 23383 continue else cpz4fgkx = dlog(y7sdgtqi(3+afpc0kns+hj3ftvzu)+0.01d0) do23384 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu) * + 0.03125d0) m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx 23384 continue 23385 continue endif endif endif if(qfx3vhct .eq. 3)then if(wr0lbopv .eq. 1)then kwvo4ury = dlog(gyuq8dex + 0.03125d0) cpz4fgkx = dlog(y7sdgtqi(3+hj3ftvzu)+0.03125d0) do23390 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury m0ibglfx(2*hj3ftvzu,ayfnwr1v) = cpz4fgkx 23390 continue 23391 continue else if(wr0lbopv .eq. 2)then kwvo4ury = dlog(gyuq8dex + 0.03125d0) kinit = y7sdgtqi(3+hj3ftvzu) cpz4fgkx = dlog(kinit) do23394 ayfnwr1v=1,kuzxj1lo fguvm9tyi = tlgduey8(ayfnwr1v,hj3ftvzu) - gyuq8dex if(fguvm9tyi .gt. 3.0 * gyuq8dex)then m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(dsqrt(tlgduey8(ayfnwr1v,hj3 *ftvzu))) m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx else m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx endif 23394 continue 23395 continue else if(wr0lbopv .eq. 3)then kwvo4ury = dlog(gyuq8dex + 0.03125d0) kinit = y7sdgtqi(3+hj3ftvzu) cpz4fgkx = dlog(kinit) do23400 ayfnwr1v=1,kuzxj1lo fguvm9tyi = tlgduey8(ayfnwr1v,hj3ftvzu) - gyuq8dex if(fguvm9tyi .gt. gyuq8dex)then m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(0.5*(tlgduey8(ayfnwr1v,hj3f *tvzu)+gyuq8dex)) m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = dlog(kinit / (fguvm9tyi / gyuq8de *x)) else if(tlgduey8(ayfnwr1v,hj3ftvzu) .lt. (gyuq8dex / 4.0))then m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(gyuq8dex / 4.0) m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx else m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = kwvo4ury m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = cpz4fgkx endif endif 23400 continue 23401 continue else cpz4fgkx = dlog(y7sdgtqi(3+hj3ftvzu)) do23406 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = dlog(tlgduey8(ayfnwr1v,hj3ftvzu) * + 0.03125d0) m0ibglfx(2*hj3ftvzu, ayfnwr1v) = cpz4fgkx 23406 continue 23407 continue endif endif endif endif if(qfx3vhct .eq. 8)then do23410 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = tlgduey8(ayfnwr1v,hj3ftvzu) 23410 continue 23411 continue endif return end subroutine dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, *rbne6ouj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqf *zu, afpc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, unhycz *0e, vm4xjosb) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, hj3ftvzu, zj *krtol8, unhycz0e double precision tlgduey8(kuzxj1lo,afpc0kns), wmat(kuzxj1lo,*), m0 *ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), vm4xjosb(k *uzxj1lo), ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu) *, wpuarq2m(dimu,kuzxj1lo), rsynp1go, n3iasxug, uaf2xgqy integer ayfnwr1v, qfx3vhct double precision xd4mybgja, xd4mybgjb, xd4mybgjc, anopu9vi logical lbgwvp3q double precision hdqsx7bk, dkdeta, dldk, ux3nadiw, ed2ldk2, n2kers *mx, bzmd6ftvmat(1,1), kkmat(1,1), nm0eljqk(1,1) integer hbsl0gto, dvhw1ulq, sguwj9ty double precision jtnbu2hz, uqnkc6zgd, uqnkc6zgt, dldshape double precision fvn3iasxug, xk7dnvei integer okobr6tcex br5ovgcj = 1 hbsl0gto = 1 n2kersmx = 0.990d0 n2kersmx = 0.995d0 if(qfx3vhct .eq. 1)then do23414 ayfnwr1v=1,kuzxj1lo xd4mybgja = t8hwvalr(hj3ftvzu,ayfnwr1v) * (1.0d0 - t8hwvalr(hj3ftv *zu,ayfnwr1v)) xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1) if(xd4mybgja .lt. n3iasxug)then xd4mybgja = n3iasxug endif if(xd4mybgjb .lt. n3iasxug)then xd4mybgjb = n3iasxug wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy else wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb) endif rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja 23414 continue 23415 continue endif if(qfx3vhct .eq. 2)then do23422 ayfnwr1v=1,kuzxj1lo xd4mybgja = t8hwvalr(hj3ftvzu,ayfnwr1v) xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1) if(xd4mybgjb .lt. n3iasxug)then xd4mybgjb = n3iasxug wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy else wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb) endif rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb if(tlgduey8(ayfnwr1v,hj3ftvzu) .gt. 0.0d0)then xd4mybgjc = xd4mybgja if(xd4mybgjc .lt. n3iasxug)then xd4mybgjc = n3iasxug endif ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu *ey8(ayfnwr1v,hj3ftvzu)-xd4mybgjc)/xd4mybgjc else ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) - 1.0d0 endif 23422 continue 23423 continue endif if(qfx3vhct .eq. 4)then do23432 ayfnwr1v=1,kuzxj1lo if((t8hwvalr(hj3ftvzu,ayfnwr1v) .lt. n3iasxug) .or. (t8hwvalr(hj3f *tvzu,ayfnwr1v) .gt. 1.0d0 - n3iasxug))then xd4mybgja = n3iasxug xd4mybgjb = xd4mybgja * wmat(ayfnwr1v,1) if(xd4mybgjb .lt. n3iasxug)then xd4mybgjb = n3iasxug wpuarq2m(hj3ftvzu,ayfnwr1v) = uaf2xgqy else wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb) endif rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja else xd4mybgja = -(1.0d0 - t8hwvalr(hj3ftvzu,ayfnwr1v)) * dlog(1.0d0 - *t8hwvalr(hj3ftvzu,ayfnwr1v)) if(xd4mybgja .lt. n3iasxug)then xd4mybgja = n3iasxug endif xd4mybgjb = -xd4mybgja * wmat(ayfnwr1v,1) * dlog(1.0d0 - t8hwvalr( *hj3ftvzu,ayfnwr1v)) / t8hwvalr(hj3ftvzu,ayfnwr1v) if(xd4mybgjb .lt. n3iasxug)then xd4mybgjb = n3iasxug endif rbne6ouj(ayfnwr1v,hj3ftvzu) = xd4mybgjb wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(xd4mybgjb) ghz9vuba(ayfnwr1v,hj3ftvzu) = m0ibglfx(hj3ftvzu,ayfnwr1v) + (tlgdu *ey8(ayfnwr1v,hj3ftvzu)-t8hwvalr(hj3ftvzu,ayfnwr1v)) / xd4mybgja endif 23432 continue 23433 continue endif if(qfx3vhct .eq. 5)then fvn3iasxug = 1.0d-20 anopu9vi = 34.0d0 do23444 ayfnwr1v=1,kuzxj1lo if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then jtnbu2hz = dexp(anopu9vi) lbgwvp3q = .true. else if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then jtnbu2hz = dexp(-anopu9vi) lbgwvp3q = .true. else jtnbu2hz = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v)) lbgwvp3q = .false. endif endif call vdgam1(jtnbu2hz, uqnkc6zgd, okobr6tcex) if(okobr6tcex .ne. 1)then call intpr("error in dlgpwe0c okobr6tcex 1: ",-1,okobr6tcex,1) endif xk7dnvei = t8hwvalr(hj3ftvzu,ayfnwr1v) if(xk7dnvei .lt. fvn3iasxug)then xk7dnvei = fvn3iasxug endif dldshape = dlog(tlgduey8(ayfnwr1v,hj3ftvzu)) + dlog(jtnbu2hz) - dl *og(xk7dnvei) + 1.0d0 - uqnkc6zgd - tlgduey8(ayfnwr1v,hj3ftvzu) / x *k7dnvei call vtgam1(jtnbu2hz, uqnkc6zgt, okobr6tcex) if(okobr6tcex .ne. 1)then call intpr("error in dlgpwe0c okobr6tcex 2: ",-1,okobr6tcex,1) endif rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = wmat(ayfnwr1v,1) * jtnbu2hz xd4mybgja = jtnbu2hz * uqnkc6zgt - 1.0d0 rbne6ouj(ayfnwr1v,2*hj3ftvzu ) = wmat(ayfnwr1v,1) * jtnbu2hz * xd4 *mybgja if(rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) .lt. n3iasxug)then rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = n3iasxug wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = uaf2xgqy else wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftv *zu-1)) endif if(rbne6ouj(ayfnwr1v,2*hj3ftvzu) .lt. n3iasxug)then rbne6ouj(ayfnwr1v,2*hj3ftvzu) = n3iasxug wpuarq2m(2*hj3ftvzu,ayfnwr1v) = uaf2xgqy else wpuarq2m(2*hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftvzu *)) endif if(xd4mybgja .lt. fvn3iasxug)then xd4mybgja = fvn3iasxug endif ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *+ tlgduey8(ayfnwr1v,hj3ftvzu) / xk7dnvei - 1.0d0 ghz9vuba(ayfnwr1v,2*hj3ftvzu ) = m0ibglfx(2*hj3ftvzu ,ayfnwr1v) + *dldshape / xd4mybgja 23444 continue 23445 continue endif if(qfx3vhct .eq. 3)then anopu9vi = 34.0d0 fvn3iasxug = 1.0d-20 do23464 ayfnwr1v=1,kuzxj1lo if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .gt. anopu9vi)then hdqsx7bk = dexp(anopu9vi) lbgwvp3q = .true. else if(m0ibglfx(2*hj3ftvzu,ayfnwr1v) .lt. -anopu9vi)then hdqsx7bk = dexp(-anopu9vi) lbgwvp3q = .true. else hdqsx7bk = dexp(m0ibglfx(2*hj3ftvzu,ayfnwr1v)) lbgwvp3q = .false. endif endif xk7dnvei = t8hwvalr(hj3ftvzu,ayfnwr1v) if(xk7dnvei .lt. fvn3iasxug)then xk7dnvei = fvn3iasxug endif call vdgam1(tlgduey8(ayfnwr1v,hj3ftvzu) + hdqsx7bk, xd4mybgja, oko *br6tcex) if(okobr6tcex .ne. 1)then endif call vdgam1(hdqsx7bk, xd4mybgjb, okobr6tcex) if(okobr6tcex .ne. 1)then endif dldk = xd4mybgja - xd4mybgjb - (tlgduey8(ayfnwr1v,hj3ftvzu) + hdqs *x7bk) / (xk7dnvei + hdqsx7bk) + 1.0d0 + dlog(hdqsx7bk / (xk7dnvei *+ hdqsx7bk)) dkdeta = hdqsx7bk kkmat(1,1) = hdqsx7bk nm0eljqk(1,1) = xk7dnvei sguwj9ty = 5000 call enbin9(bzmd6ftvmat, kkmat, nm0eljqk, n2kersmx, hbsl0gto, dvhw *1ulq, hbsl0gto, ux3nadiw, rsynp1go, sguwj9ty) if(dvhw1ulq .ne. 1)then zjkrtol8 = 5 return endif ed2ldk2 = -bzmd6ftvmat(1,1) - 1.0d0 / hdqsx7bk + 1.0d0 / (hdqsx7bk * + xk7dnvei) rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = wmat(ayfnwr1v,1) * xk7dnvei * hd *qsx7bk / (xk7dnvei + hdqsx7bk) rbne6ouj(ayfnwr1v,2*hj3ftvzu ) = wmat(ayfnwr1v,1) * hdqsx7bk * (-b *zmd6ftvmat(1,1)*hdqsx7bk - 1.0d0 + hdqsx7bk / (hdqsx7bk + xk7dnvei *)) if(rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) .lt. n3iasxug)then rbne6ouj(ayfnwr1v,2*hj3ftvzu-1) = n3iasxug wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = uaf2xgqy else wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftv *zu-1)) endif if(rbne6ouj(ayfnwr1v,2*hj3ftvzu) .lt. n3iasxug)then rbne6ouj(ayfnwr1v,2*hj3ftvzu) = n3iasxug wpuarq2m(2*hj3ftvzu,ayfnwr1v) = uaf2xgqy else wpuarq2m(2*hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,2*hj3ftvzu *)) endif ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *+ tlgduey8(ayfnwr1v,hj3ftvzu) / xk7dnvei - 1.0d0 ghz9vuba(ayfnwr1v,2*hj3ftvzu ) = m0ibglfx(2*hj3ftvzu ,ayfnwr1v) + *dldk / (dkdeta * ed2ldk2) 23464 continue 23465 continue endif if(qfx3vhct .eq. 8)then do23484 ayfnwr1v=1,kuzxj1lo rbne6ouj(ayfnwr1v,hj3ftvzu) = wmat(ayfnwr1v,1) wpuarq2m(hj3ftvzu,ayfnwr1v) = dsqrt(rbne6ouj(ayfnwr1v,hj3ftvzu)) ghz9vuba(ayfnwr1v,hj3ftvzu) = tlgduey8(ayfnwr1v,hj3ftvzu) 23484 continue 23485 continue endif if(unhycz0e .eq. 1)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23490 ayfnwr1v=1,kuzxj1lo ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) = ghz9vuba(ayfnwr1v,2*hj3ftvzu-1) *- vm4xjosb(ayfnwr1v) 23490 continue 23491 continue else do23492 ayfnwr1v=1,kuzxj1lo ghz9vuba(ayfnwr1v,hj3ftvzu) = ghz9vuba(ayfnwr1v,hj3ftvzu) - vm4xjo *sb(ayfnwr1v) 23492 continue 23493 continue endif endif return end subroutine cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4 *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm *, y7sdgtqi) implicit logical (a-z) integer xui7hqwl(18), tgiyxdw1(*), dufozmt7(*) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1 *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo) double precision ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy *1vqfzu), wpuarq2m(dimu,kuzxj1lo), w8znmyce(br5ovgcj,*) double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(wy1vqfzu,kuzxj1lo,2), wkmm(wy1vqfzu * (wy1vqf *zu + 1)) integer ayfnwr1v, yq6lorbx, gp1jxzuh, hyqwtp6i, ptr, i1loc, i2, iz *ero0, iter, fmzq7aob, xwdf5ltg, dimw, f7svlajr, qfx3vhct, c5aesxku *l integer job, info, qemj9asg, xlpjcg3s, eu3oxvyb, vtsou9pz, unhycz0 *e, zaupqv9b integer hbsl0gto, wr0lbopv double precision rpto5qwb, n3iasxug, pvofyg8z, wiptsjx8, uylxqtc7, * bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go double precision hmayv1xt1, hmayv1xt2 integer x1jrewny hbsl0gto = 1 x1jrewny = 0 kifxa0he(1,1) = 1 wkmm(1) = 0.0d0 xwdf5ltg = xui7hqwl(1) fmzq7aob = xui7hqwl(2) xlpjcg3s = xui7hqwl(3) dimw = xui7hqwl(4) f7svlajr = xui7hqwl(5) qfx3vhct = xui7hqwl(6) c5aesxkul = xui7hqwl(7) xui7hqwl(9) = 0 eu3oxvyb = xui7hqwl(11) vtsou9pz = xui7hqwl(12) unhycz0e = xui7hqwl(14) zaupqv9b = xui7hqwl(15) wr0lbopv = xui7hqwl(18) n3iasxug = y7sdgtqi(1) uaf2xgqy = dsqrt(n3iasxug) if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then vsoihn1r = dlog(n3iasxug) endif bh2vgiay = y7sdgtqi(2) rsynp1go = y7sdgtqi(3) uylxqtc7 = 0.0d0 izero0 = 0 zjkrtol8 = 1 call qpsedg8xf(tgiyxdw1, dufozmt7, xwdf5ltg) hyqwtp6i = xwdf5ltg * (xwdf5ltg+1) / 2 call flncwkfq72(lncwkfq7, w8znmyce, kuzxj1lo, wy1vqfzu, br5ovgcj, *xwdf5ltg, qfx3vhct, afpc0kns, fmzq7aob, eu3oxvyb, hyqwtp6i, tgiyxd *w1, dufozmt7, unhycz0e, vm4xjosb) 653 hmayv1xt2 = 1.0d0 if(f7svlajr .eq. 0)then do23498 yq6lorbx=1,afpc0kns call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc *0kns, qfx3vhct, yq6lorbx, wmat, wr0lbopv) 23498 continue 23499 continue else if(f7svlajr .eq. 2)then call pkc4ejib(w8znmyce, beta, m0ibglfx, kuzxj1lo, wy1vqfzu, br5ovg *cj, xlpjcg3s, vtsou9pz, izero0, qfx3vhct, unhycz0e, vm4xjosb) endif endif call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, izero0) if(f7svlajr .eq. 2)then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, izero0, n3iasxug, vsoihn1r *, hbsl0gto) else rpto5qwb = -1.0d0 endif do23504 iter=1,c5aesxkul do23506 yq6lorbx=1,afpc0kns call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af *pc0kns, br5ovgcj, dimu, yq6lorbx, qfx3vhct, zjkrtol8, unhycz0e, vm *4xjosb) 23506 continue 23507 continue do23508 yq6lorbx=1,xlpjcg3s do23510 ayfnwr1v=1,br5ovgcj vc6hatuj(ayfnwr1v,yq6lorbx) = w8znmyce(ayfnwr1v,yq6lorbx) 23510 continue 23511 continue 23508 continue 23509 continue do23512 yq6lorbx=1,xlpjcg3s ptr = 1 do23514 i1loc=1,kuzxj1lo do23516 i2=1,wy1vqfzu vc6hatuj(ptr,yq6lorbx) = wpuarq2m(i2,i1loc) * vc6hatuj(ptr,yq6lorb *x) ptr = ptr + 1 23516 continue 23517 continue 23514 continue 23515 continue 23512 continue 23513 continue do23518 gp1jxzuh=1,xlpjcg3s ges1xpkr(gp1jxzuh) = gp1jxzuh 23518 continue 23519 continue pvofyg8z = 1.0d-7 call vqrdca(vc6hatuj,br5ovgcj,br5ovgcj,xlpjcg3s,fasrkub3,ges1xpkr, *twk,qemj9asg,pvofyg8z) if(qemj9asg .ne. xlpjcg3s)then zjkrtol8 = 2 return endif do23522 ayfnwr1v=1,kuzxj1lo do23524 yq6lorbx=1,wy1vqfzu twk(yq6lorbx,ayfnwr1v,1) = wpuarq2m(yq6lorbx,ayfnwr1v) * ghz9vuba( *ayfnwr1v,yq6lorbx) 23524 continue 23525 continue 23522 continue 23523 continue job = 101 call vdqrsl(vc6hatuj,br5ovgcj,br5ovgcj,qemj9asg,fasrkub3, twk, uyl *xqtc7, twk(1,1,2), beta, uylxqtc7,m0ibglfx,job,info) do23526 ayfnwr1v=1,kuzxj1lo do23528 yq6lorbx=1,wy1vqfzu m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) / wpuarq *2m(yq6lorbx,ayfnwr1v) 23528 continue 23529 continue 23526 continue 23527 continue if(unhycz0e .eq. 1)then if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23534 ayfnwr1v=1,kuzxj1lo do23536 yq6lorbx=1,afpc0kns m0ibglfx(2*yq6lorbx-1,ayfnwr1v) = m0ibglfx(2*yq6lorbx-1,ayfnwr1v) *+ vm4xjosb(ayfnwr1v) 23536 continue 23537 continue 23534 continue 23535 continue else do23538 ayfnwr1v=1,kuzxj1lo do23540 yq6lorbx=1,wy1vqfzu m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + vm4xjo *sb(ayfnwr1v) 23540 continue 23541 continue 23538 continue 23539 continue endif endif call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, izero0) call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,izero0,n3iasxug,vsoihn1r, h *bsl0gto) wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes)) if(wiptsjx8 .lt. bh2vgiay)then zjkrtol8 = 0 xui7hqwl(8) = iter if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,izero0,n3iasxug,vsoihn1r, i *zero0) endif x1jrewny = 1 goto 20097 else rpto5qwb = tlq9wpes x1jrewny = 0 endif 23504 continue 23505 continue 20097 hmayv1xt1 = 0.0d0 if(x1jrewny .eq. 1)then return endif if(f7svlajr .eq. 1 .or. f7svlajr .eq. 2)then f7svlajr = 0 xui7hqwl(9) = 1 goto 653 endif zjkrtol8 = 3 return end subroutine cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4 *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm *, y7sdgtqi) implicit logical (a-z) integer xui7hqwl(18), tgiyxdw1(*), dufozmt7(*) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), vm4xjosb(kuzxj1lo *), t8hwvalr(afpc0kns,kuzxj1lo), kifxa0he(kuzxj1lo,*), ghz9vuba(kuz *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1 *lo), w8znmyce(br5ovgcj,*) double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(br5ovgcj,3), wkmm(wy1vqfzu*(wy1vqfzu+1)) integer ayfnwr1v, hj3ftvzu, hyqwtp6i, izero0, iter, fmzq7aob, unhy *cz0e, xwdf5ltg, dimw, f7svlajr, qfx3vhct, c5aesxkul integer job, info, qemj9asg, xlpjcg3s, vtsou9pz, zaupqv9b integer hbsl0gto, p1, wr0lbopv double precision rpto5qwb, n3iasxug, pvofyg8z, wiptsjx8, uylxqtc7, * bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go integer gp1jxzuh double precision aqg1vdmo, hmayv1xt aqg1vdmo = 0.0d0 hbsl0gto = 1 wkmm(1) = 1.0d0 call intpr("entering cqo1f hbsl0gto ------------------------------ *-: ",-1,hbsl0gto,1) call intpr("in cqo1f afpc0kns: ",-1,afpc0kns,1) xwdf5ltg = xui7hqwl(1) fmzq7aob = xui7hqwl(2) xlpjcg3s = xui7hqwl(3) dimw = xui7hqwl(4) f7svlajr = xui7hqwl(5) qfx3vhct = xui7hqwl(6) c5aesxkul = xui7hqwl(7) xui7hqwl(9) = 0 vtsou9pz = xui7hqwl(12) if(vtsou9pz .ne. 1)then zjkrtol8 = 4 return endif unhycz0e = xui7hqwl(14) zaupqv9b = xui7hqwl(15) p1 = xui7hqwl(16) wr0lbopv = xui7hqwl(18) call intpr("Entry to cqo1f: f7svlajr ",-1,f7svlajr,1) n3iasxug = y7sdgtqi(1) uaf2xgqy = dsqrt(n3iasxug) if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then vsoihn1r = dlog(n3iasxug) endif bh2vgiay = y7sdgtqi(2) rsynp1go = y7sdgtqi(3) uylxqtc7 = 0.0d0 izero0 = 0 zjkrtol8 = 1 call qpsedg8xf(tgiyxdw1, dufozmt7, xwdf5ltg) hyqwtp6i = xwdf5ltg * (xwdf5ltg+1) / 2 call flncwkfq71(lncwkfq7, w8znmyce, kuzxj1lo, xwdf5ltg, qfx3vhct, *vm4xjosb, br5ovgcj, xlpjcg3s, hyqwtp6i, tgiyxdw1, dufozmt7, kifxa0 *he, p1, unhycz0e) call dblepr("cqo1f: vm4xjosb()",-1,vm4xjosb,kuzxj1lo) call dblepr("cqo1f: w8znmyce(,)",-1,w8znmyce,br5ovgcj*xlpjcg3s) call dblepr("cqo1f: wmat(,1)",-1,wmat(1,1),kuzxj1lo) do23554 hj3ftvzu=1,afpc0kns call intpr("cqo1f: hj3ftvzu======================: ",-1,hj3ftvzu,1 *) 653 hmayv1xt = 1.0d0 if(f7svlajr .eq. 0)then call intpr("cqo1f: calling ietam6 ",-1,hj3ftvzu,1) call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc *0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv) else if(f7svlajr .eq. 2)then call intpr("cqo1f: calling pkc4ejib; vtsou9pz== ",-1,vtsou9pz,1) call pkc4ejib(w8znmyce, beta(1+(hj3ftvzu-1)*xlpjcg3s), m0ibglfx, k *uzxj1lo, wy1vqfzu, br5ovgcj, xlpjcg3s, vtsou9pz, hj3ftvzu, qfx3vhc *t, unhycz0e, vm4xjosb) endif endif call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, hj3ftvzu) if(f7svlajr .eq. 2)then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, hj3ftvzu, n3iasxug, vsoihn *1r, hbsl0gto) else rpto5qwb = -1.0d0 endif do23562 iter=1,c5aesxkul call intpr("iter: ",-1,iter,1) call intpr("posn 7: ",-1,hbsl0gto,1) call intpr("qfx3vhct: ",-1,qfx3vhct,1) call dblepr("rpto5qwb",-1,rpto5qwb,1) call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af *pc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, unhycz0e, vm *4xjosb) call dblepr("cqo1f: m0ibglfx",-1,m0ibglfx,wy1vqfzu*kuzxj1lo) call dblepr("cqo1f: wpuarq2m",-1,wpuarq2m,dimu*kuzxj1lo) call dblepr("cqo1f: ghz9vuba",-1,ghz9vuba,kuzxj1lo*wy1vqfzu) call dblepr("cqo1f: rbne6ouj",-1,rbne6ouj,kuzxj1lo*wy1vqfzu) do23564 gp1jxzuh=1,xlpjcg3s do23566 ayfnwr1v=1,br5ovgcj vc6hatuj(ayfnwr1v,gp1jxzuh) = w8znmyce(ayfnwr1v,gp1jxzuh) 23566 continue 23567 continue 23564 continue 23565 continue call intpr("posn 3: ",-1,hbsl0gto,1) if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23570 gp1jxzuh=1,xlpjcg3s do23572 ayfnwr1v=1,kuzxj1lo vc6hatuj(2*ayfnwr1v-1,gp1jxzuh) = wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) ** vc6hatuj(2*ayfnwr1v-1,gp1jxzuh) vc6hatuj(2*ayfnwr1v ,gp1jxzuh) = wpuarq2m(2*hj3ftvzu ,ayfnwr1v) * *vc6hatuj(2*ayfnwr1v ,gp1jxzuh) 23572 continue 23573 continue 23570 continue 23571 continue else do23574 gp1jxzuh=1,xlpjcg3s do23576 ayfnwr1v=1,kuzxj1lo vc6hatuj(ayfnwr1v,gp1jxzuh) = wpuarq2m(hj3ftvzu,ayfnwr1v) * vc6hat *uj(ayfnwr1v,gp1jxzuh) 23576 continue 23577 continue 23574 continue 23575 continue endif call intpr("posn 4: ",-1,hbsl0gto,1) do23578 gp1jxzuh=1,xlpjcg3s ges1xpkr(gp1jxzuh) = gp1jxzuh 23578 continue 23579 continue call dblepr("cqo1f: vc6hatuj",-1,vc6hatuj,br5ovgcj*xlpjcg3s) call intpr("iter: ",-1,iter,1) pvofyg8z = 1.0d-7 call vqrdca(vc6hatuj,br5ovgcj,br5ovgcj,xlpjcg3s,fasrkub3,ges1xpkr, *twk,qemj9asg,pvofyg8z) call intpr("ges1xpkr: ",-1,ges1xpkr,xlpjcg3s) if(qemj9asg .ne. xlpjcg3s)then zjkrtol8 = 2 return endif if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23584 ayfnwr1v=1,kuzxj1lo twk(2*ayfnwr1v-1,1) = wpuarq2m(2*hj3ftvzu-1,ayfnwr1v) * ghz9vuba(a *yfnwr1v,2*hj3ftvzu-1) twk(2*ayfnwr1v ,1) = wpuarq2m(2*hj3ftvzu ,ayfnwr1v) * ghz9vuba(ayf *nwr1v,2*hj3ftvzu ) 23584 continue 23585 continue else do23586 ayfnwr1v=1,kuzxj1lo twk(ayfnwr1v,1) = wpuarq2m(hj3ftvzu,ayfnwr1v) * ghz9vuba(ayfnwr1v, *hj3ftvzu) 23586 continue 23587 continue endif call intpr("posn 5: ",-1,hbsl0gto,1) job = 101 call intpr("posn 6: ",-1,hbsl0gto,1) call vdqrsl(vc6hatuj,br5ovgcj,br5ovgcj,qemj9asg,fasrkub3, twk(1,1) *, uylxqtc7, twk(1,2), beta(1+(hj3ftvzu-1)*xlpjcg3s), uylxqtc7,twk( *1,3),job,info) call dblepr("beta(1+(hj3ftvzu-1)*xlpjcg3s)",-1,beta(1+(hj3ftvzu-1) **xlpjcg3s),xlpjcg3s) if(zaupqv9b .gt. 1)then endif do23590 gp1jxzuh=1,xlpjcg3s twk(gp1jxzuh,1) = beta((hj3ftvzu-1)*xlpjcg3s + gp1jxzuh) 23590 continue 23591 continue do23592 gp1jxzuh=1,xlpjcg3s beta((hj3ftvzu-1)*xlpjcg3s + ges1xpkr(gp1jxzuh)) = twk(gp1jxzuh,1) 23592 continue 23593 continue call intpr("posn 7: ",-1,hbsl0gto,1) if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then do23596 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = twk(2*ayfnwr1v-1,3) / wpuarq2m(2 **hj3ftvzu-1,ayfnwr1v) m0ibglfx(2*hj3ftvzu ,ayfnwr1v) = twk(2*ayfnwr1v ,3) / wpuarq2m(2*h *j3ftvzu ,ayfnwr1v) 23596 continue 23597 continue if(unhycz0e .eq. 1)then do23600 ayfnwr1v=1,kuzxj1lo m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) = m0ibglfx(2*hj3ftvzu-1,ayfnwr1v) *+ vm4xjosb(ayfnwr1v) 23600 continue 23601 continue endif else do23602 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = twk(ayfnwr1v,3) / wpuarq2m(hj3ftvzu, *ayfnwr1v) 23602 continue 23603 continue if(unhycz0e .eq. 1)then do23606 ayfnwr1v=1,kuzxj1lo m0ibglfx(hj3ftvzu,ayfnwr1v) = m0ibglfx(hj3ftvzu,ayfnwr1v) + vm4xjo *sb(ayfnwr1v) 23606 continue 23607 continue endif endif call intpr("posn 8: ",-1,hbsl0gto,1) call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, hj3ftvzu) call intpr("posn 8b: ",-1,hbsl0gto,1) call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r, *hbsl0gto) call intpr("posn 8c: ",-1,hbsl0gto,1) wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes)) call intpr("cqo1f: iter -------------",-1,iter,1) call dblepr("cqo1f: wiptsjx8",-1,wiptsjx8,1) if(wiptsjx8 .lt. bh2vgiay)then zjkrtol8 = 0 xui7hqwl(8)=iter call intpr("cqo1f xui7hqwl(8): ",-1,xui7hqwl(8),1) if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r, * izero0) endif aqg1vdmo = aqg1vdmo + tlq9wpes goto 1011 else rpto5qwb = tlq9wpes endif call intpr("posn 9: ",-1,hbsl0gto,1) 23562 continue 23563 continue call intpr("cqo1f; unsuccessful convergence: ",-1,hbsl0gto,1) if(f7svlajr .eq. 1)then f7svlajr = 0 xui7hqwl(9) = 1 goto 653 endif zjkrtol8 = 3 1011 hmayv1xt = 1.0d0 23554 continue 23555 continue call intpr("exiting cqo1f hbsl0gto ============================ : *",-1,hbsl0gto,1) tlq9wpes = aqg1vdmo return end subroutine vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, gh *z9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo *, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes *, beta, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up *, nef, which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, *hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkm *m, work3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbz *jkpi3, acpios9q, itwk, jwbkl9fp) implicit logical (a-z) integer xui7hqwl(19) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns *,kuzxj1lo) double precision ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy *1vqfzu), wpuarq2m(dimu,kuzxj1lo) double precision vc6hatuj(br5ovgcj,2), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(br5ovgcj,3), wkmm(wy1vqfzu*(wy1vqfzu+1)) integer hj3ftvzu, ehtjigf4, izero0, iter, xwdf5ltg, dimw, f7svlajr *, qfx3vhct, c5aesxkul integer vtsou9pz, zaupqv9b, xlpjcg3s integer hbsl0gto, sedf7mxb double precision rpto5qwb, n3iasxug, wiptsjx8, uylxqtc7, bh2vgiay, * uaf2xgqy, vsoihn1r, rsynp1go double precision aqg1vdmo, hmayv1xt integer psdvgce3(15), qfozcl5b, ezlgm2up(*),nef(*),which(*), jnxpu *ym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*) integer wr0lbopv, acpios9q(*), itwk(*), jwbkl9fp(*) integer nbzjkpi3(*) double precision kiye1wjz(*) double precision ub4xioar(qfozcl5b,kuzxj1lo), kispwgx3(kuzxj1lo,*) *,s0(wy1vqfzu), zyodca3j(qfozcl5b,kuzxj1lo), lxyst1eb(qfozcl5b,kuzx *j1lo), mbvnaor6(kuzxj1lo,*), hjm2ktyr(qfozcl5b,*), work1(*), wk2(k *uzxj1lo,qfozcl5b), work3(*), sgdub(*), bmb(*), ifys6woa(*), mwk(*) *, rpyis2kc(*), zv2xfhei(*) integer qes4mujl integer ayfnwr1v, kij0gwer, xumj5dnk integer irhm4cfa, lyma1kwc double precision xbignn(2), lncrw8mg, ufkq9rpg, r3eoxkzp, wld4qctn double precision zpcqv3uj, resss double precision vm4xjosb(2) lncrw8mg=0.0d0 ufkq9rpg=0.0d0 r3eoxkzp=0.0d0 wld4qctn=0.0d0 irhm4cfa = xui7hqwl(19) aqg1vdmo = 0.0d0 hbsl0gto = 1 wkmm(1) = 1.0d0 twk(1,1) = 1.0d0 xwdf5ltg = xui7hqwl(1) xlpjcg3s = xui7hqwl(3) dimw = xui7hqwl(4) f7svlajr = xui7hqwl(5) qfx3vhct = xui7hqwl(6) c5aesxkul = xui7hqwl(7) xui7hqwl(9) = 0 lyma1kwc = xui7hqwl(11) vtsou9pz = xui7hqwl(12) if((vtsou9pz .ne. 1) .or. (lyma1kwc .ne. xwdf5ltg))then zjkrtol8 = 4 return endif zaupqv9b = xui7hqwl(15) wr0lbopv = xui7hqwl(18) zpcqv3uj = y7sdgtqi(3+afpc0kns+afpc0kns+2) n3iasxug = y7sdgtqi(1) uaf2xgqy = dsqrt(n3iasxug) if((qfx3vhct .eq. 1) .or. (qfx3vhct .eq. 4))then vsoihn1r = dlog(n3iasxug) endif bh2vgiay = y7sdgtqi(2) rsynp1go = y7sdgtqi(3) uylxqtc7 = 0.0d0 izero0 = 0 zjkrtol8 = 1 do23618 hj3ftvzu=1,afpc0kns 653 hmayv1xt = 1.0d0 if(f7svlajr .eq. 0)then call ietam6(tlgduey8, m0ibglfx, y7sdgtqi, kuzxj1lo, wy1vqfzu, afpc *0kns, qfx3vhct, hj3ftvzu, wmat, wr0lbopv) else if(f7svlajr .ne. 1)then zjkrtol8 = 6 return endif endif call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, hj3ftvzu) if(f7svlajr .eq. 2)then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, rpto5qwb, hj3ftvzu, n3iasxug, vsoihn *1r, hbsl0gto) else rpto5qwb = -1.0d0 endif do23626 iter=1,c5aesxkul call flncwkfq76(lncwkfq7, vc6hatuj, kuzxj1lo, br5ovgcj, xwdf5ltg, *qfx3vhct) psdvgce3(7) = 0 call dlgpwe0c(tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba, rbne6o *uj, wpuarq2m, rsynp1go, n3iasxug, uaf2xgqy, kuzxj1lo, wy1vqfzu, af *pc0kns, br5ovgcj, dimu, hj3ftvzu, qfx3vhct, zjkrtol8, izero0, vm4x *josb) if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then qes4mujl = 2*hj3ftvzu-1 else qes4mujl = hj3ftvzu endif do23630 kij0gwer=1,qfozcl5b do23632 ayfnwr1v=1,kuzxj1lo zyodca3j(kij0gwer,ayfnwr1v) = wpuarq2m(qes4mujl-1+kij0gwer,ayfnwr1 *v) lxyst1eb(kij0gwer,ayfnwr1v) = m0ibglfx(qes4mujl-1+kij0gwer,ayfnwr1 *v) 23632 continue 23633 continue 23630 continue 23631 continue sedf7mxb = lyma1kwc * afpc0kns ehtjigf4 = xwdf5ltg * (hj3ftvzu-1) if(iter .eq. 1)then lncrw8mg = kiye1wjz( ehtjigf4 + hnpt1zym(1)) ufkq9rpg = kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1)) if(xwdf5ltg .eq. 2)then r3eoxkzp = kiye1wjz( ehtjigf4 + hnpt1zym(2)) wld4qctn = kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(2)) endif do23638 kij0gwer=1,lyma1kwc do23640 ayfnwr1v=1,kuzxj1lo kispwgx3(ayfnwr1v,ehtjigf4 + hnpt1zym(kij0gwer)) = 0.0d0 23640 continue 23641 continue 23638 continue 23639 continue else kiye1wjz( ehtjigf4 + hnpt1zym(1)) = lncrw8mg kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1)) = ufkq9rpg if(xwdf5ltg .eq. 2)then kiye1wjz( ehtjigf4 + hnpt1zym(2)) = r3eoxkzp kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(2)) = wld4qctn endif endif call vbfa(irhm4cfa,kuzxj1lo,qfozcl5b,psdvgce3, mbvnaor6, ghz9vuba( *1,qes4mujl), rbne6ouj(1,qes4mujl), kiye1wjz( ehtjigf4 + hnpt1zym(1 *)), kiye1wjz(sedf7mxb + ehtjigf4 + hnpt1zym(1)), ezlgm2up,nef,whic *h, ub4xioar,kispwgx3(1,ehtjigf4 + hnpt1zym(1)), lxyst1eb,s0, beta( *1+(hj3ftvzu-1)*xlpjcg3s), cov,zpcqv3uj, vc6hatuj,fasrkub3, ges1xpk *r, xbignn, zyodca3j, hjm2ktyr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nb *fjc, work1, wk2, wwkmm, work3, sgdub, bmb, ifys6woa, mwk, ttwk, rp *yis2kc(1+(hj3ftvzu-1)*(nbzjkpi3(1+xwdf5ltg)-1)), zv2xfhei, resss, *nbzjkpi3, acpios9q, itwk, jwbkl9fp) y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss xumj5dnk = psdvgce3(14) if(xumj5dnk .ne. 0)then call intpr("vcao6f: exiting because of an error",-1,xumj5dnk,1) zjkrtol8 = 8 return endif do23646 kij0gwer=1,qfozcl5b do23648 ayfnwr1v=1,kuzxj1lo m0ibglfx(qes4mujl-1+kij0gwer,ayfnwr1v) = lxyst1eb(kij0gwer,ayfnwr1 *v) 23648 continue 23649 continue 23646 continue 23647 continue call nipyajc1(m0ibglfx, t8hwvalr, kuzxj1lo, wy1vqfzu, afpc0kns, qf *x3vhct, hj3ftvzu) call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes, hj3ftvzu, n3iasxug, vsoihn *1r, hbsl0gto) wiptsjx8 = dabs(tlq9wpes - rpto5qwb) / (1.0d0 + dabs(tlq9wpes)) if(wiptsjx8 .lt. bh2vgiay)then zjkrtol8 = 0 xui7hqwl(8) = iter if((qfx3vhct .eq. 3) .or. (qfx3vhct .eq. 5))then call shjlwft5(qfx3vhct, tlgduey8, wmat, t8hwvalr, kuzxj1lo, wy1vqf *zu, afpc0kns, dimw, m0ibglfx, tlq9wpes,hj3ftvzu,n3iasxug,vsoihn1r, * izero0) endif aqg1vdmo = aqg1vdmo + tlq9wpes goto 1011 else rpto5qwb = tlq9wpes endif 23626 continue 23627 continue if(f7svlajr .eq. 1)then f7svlajr = 0 xui7hqwl(9) = 1 goto 653 endif zjkrtol8 = 3 1011 hmayv1xt = 1.0d0 23618 continue 23619 continue tlq9wpes = aqg1vdmo return end subroutine dcqof(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4 *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm *, y7sdgtqi, atujnxb8, yxiwebc5, k7hulceq, p2, kpzavbj3, ydcnh9xl, *ajul8wkv) implicit logical (a-z) integer xui7hqwl(19), tgiyxdw1(*), dufozmt7(*) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) integer vtsou9pz double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1 *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), ghz9vuba(kuz *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1 *lo), w8znmyce(br5ovgcj,*) double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(wy1vqfzu,kuzxj1lo,*), wkmm(wy1vqfzu*(wy1vqfzu *+1)) integer p2 double precision atujnxb8(kuzxj1lo,p2), yxiwebc5(kuzxj1lo,*), k7hu *lceq(p2,*), kpzavbj3(p2,*), ydcnh9xl, ajul8wkv(*) integer ayfnwr1v, xvr7bonh, hpmwnav2, xwdf5ltg, idlosrw8, gp1jxzuh *, exrkcn5d, wr0lbopv double precision summ, dev0 xwdf5ltg = xui7hqwl(1) idlosrw8 = xui7hqwl(5) vtsou9pz = xui7hqwl(12) exrkcn5d = xui7hqwl(13) wr0lbopv = xui7hqwl(18) do23656 hpmwnav2=1,xwdf5ltg do23658 ayfnwr1v=1,kuzxj1lo summ = 0.0d0 do23660 xvr7bonh=1,p2 summ = summ + atujnxb8(ayfnwr1v,xvr7bonh) * k7hulceq(xvr7bonh,hpmw *nav2) 23660 continue 23661 continue yxiwebc5(ayfnwr1v,hpmwnav2) = summ lncwkfq7(ayfnwr1v,hpmwnav2) = summ 23658 continue 23659 continue 23656 continue 23657 continue if(vtsou9pz.eq.1)then call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd *gtqi) else call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd *gtqi) endif do23664 xvr7bonh=1,p2 do23666 ayfnwr1v=1,kuzxj1lo atujnxb8(ayfnwr1v,xvr7bonh) = ydcnh9xl * atujnxb8(ayfnwr1v,xvr7bon *h) 23666 continue 23667 continue 23664 continue 23665 continue do23668 hpmwnav2=1,xwdf5ltg do23670 xvr7bonh=1,p2 do23672 ayfnwr1v=1,kuzxj1lo lncwkfq7(ayfnwr1v,hpmwnav2)=yxiwebc5(ayfnwr1v,hpmwnav2)+atujnxb8(a *yfnwr1v,xvr7bonh) 23672 continue 23673 continue xui7hqwl(5) = 2 do23674 gp1jxzuh=1,exrkcn5d beta(gp1jxzuh) = ajul8wkv(gp1jxzuh) 23674 continue 23675 continue if(vtsou9pz.eq.1)then call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd *gtqi) else call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd *gtqi) endif if(zjkrtol8 .ne. 0)then return endif kpzavbj3(xvr7bonh,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl 23670 continue 23671 continue if(xwdf5ltg .gt. 1)then do23682 ayfnwr1v=1,kuzxj1lo lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) 23682 continue 23683 continue endif 23668 continue 23669 continue xui7hqwl(5) = idlosrw8 return end subroutine vdcaof(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, gh *z9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo *, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes *, beta, twk, wkmm, y7sdgtqi, atujnxb8, yxiwebc5, k7hulceq, p2, kpz *avbj3, ajul8wkv, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef, which *, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2ktyr, jn *xpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, work3, sg *dub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3, acpio *s9q, itwk, jwbkl9fp) implicit logical (a-z) integer xui7hqwl(19) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) integer vtsou9pz double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1lo), t8hwvalr(afpc0kns *,kuzxj1lo), ghz9vuba(kuzxj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfz *u), wpuarq2m(dimu,kuzxj1lo) double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(wy1vqfzu,kuzxj1lo,*) double precision wkmm(wy1vqfzu*(wy1vqfzu+1)) integer p2 double precision atujnxb8(kuzxj1lo,p2), yxiwebc5(kuzxj1lo,*), k7hu *lceq(p2,*), kpzavbj3(p2,*), ydcnh9xl, ajul8wkv(*) integer ayfnwr1v, pp, hpmwnav2, xwdf5ltg, idlosrw8, exrkcn5d, wr0l *bopv double precision summ, dev0 integer psdvgce3(15), qfozcl5b, ezlgm2up(*),nef(*),which(*), jnxpu *ym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*), nbzjkpi3(2), acpios *9q(*), itwk(*), jwbkl9fp(2) double precision kiye1wjz(*) double precision ub4xioar(qfozcl5b,kuzxj1lo), kispwgx3(kuzxj1lo,*) *,s0(wy1vqfzu), zyodca3j(qfozcl5b,kuzxj1lo) double precision lxyst1eb(qfozcl5b,kuzxj1lo), mbvnaor6(kuzxj1lo,*) *, hjm2ktyr(qfozcl5b,*), work1(*), wk2(kuzxj1lo,qfozcl5b), work3(*) *, sgdub(*), bmb(*), ifys6woa(*), mwk(*), rpyis2kc(*), zv2xfhei(*), * resss integer irhm4cfa double precision zpcqv3uj resss = 0.0d0 irhm4cfa = 0 xwdf5ltg = xui7hqwl(1) idlosrw8 = xui7hqwl(5) vtsou9pz = xui7hqwl(12) exrkcn5d = xui7hqwl(13) wr0lbopv = xui7hqwl(18) zpcqv3uj = y7sdgtqi(3+afpc0kns+afpc0kns+2) ydcnh9xl = y7sdgtqi(3+afpc0kns+afpc0kns+3) do23684 hpmwnav2=1,xwdf5ltg do23686 ayfnwr1v=1,kuzxj1lo summ = 0.0d0 do23688 pp=1,p2 summ = summ + atujnxb8(ayfnwr1v,pp) * k7hulceq(pp,hpmwnav2) 23688 continue 23689 continue yxiwebc5(ayfnwr1v,hpmwnav2) = summ lncwkfq7(ayfnwr1v,hpmwnav2) = summ 23686 continue 23687 continue 23684 continue 23685 continue if(vtsou9pz.eq.1)then call vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba *, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo, wy1v *qfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, dev0, ajul8wkv *, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef, * which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2kt *yr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, wor *k3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3, * acpios9q, itwk, jwbkl9fp) y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss else endif do23692 pp=1,p2 do23694 ayfnwr1v=1,kuzxj1lo atujnxb8(ayfnwr1v,pp) = ydcnh9xl * atujnxb8(ayfnwr1v,pp) 23694 continue 23695 continue 23692 continue 23693 continue do23696 hpmwnav2=1,xwdf5ltg do23698 pp=1,p2 do23700 ayfnwr1v=1,kuzxj1lo lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) + atujnx *b8(ayfnwr1v,pp) 23700 continue 23701 continue xui7hqwl(5) = 0 if(vtsou9pz.eq.1)then call vcao6f(lncwkfq7, tlgduey8, wmat, m0ibglfx, t8hwvalr, ghz9vuba *, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, kuzxj1lo, wy1v *qfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, xui7hqwl, tlq9wpes, beta *, twk, wkmm, y7sdgtqi, psdvgce3,qfozcl5b, kiye1wjz, ezlgm2up, nef, * which, ub4xioar,kispwgx3,s0, zyodca3j, lxyst1eb, mbvnaor6, hjm2kt *yr, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wwkmm, wor *k3, sgdub, bmb, ifys6woa, mwk, ttwk, rpyis2kc, zv2xfhei, nbzjkpi3, * acpios9q, itwk, jwbkl9fp) y7sdgtqi(3+afpc0kns+afpc0kns+1) = resss else endif if(zjkrtol8 .ne. 0)then return endif kpzavbj3(pp,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl 23698 continue 23699 continue if(xwdf5ltg .gt. 1)then do23708 ayfnwr1v=1,kuzxj1lo lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) 23708 continue 23709 continue endif 23696 continue 23697 continue xui7hqwl(5) = idlosrw8 return end subroutine duqof(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4 *xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, * fasrkub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, * zjkrtol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm *, y7sdgtqi, yxiwebc5, kpzavbj3, ydcnh9xl, ajul8wkv) implicit logical (a-z) integer xui7hqwl(19), tgiyxdw1(*), dufozmt7(*) integer kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrtol8, ge *s1xpkr(*) integer vtsou9pz double precision lncwkfq7(kuzxj1lo,*), tlgduey8(kuzxj1lo,afpc0kns) *, kifxa0he(kuzxj1lo,*), wmat(kuzxj1lo,*), m0ibglfx(wy1vqfzu,kuzxj1 *lo), vm4xjosb(kuzxj1lo), t8hwvalr(afpc0kns,kuzxj1lo), ghz9vuba(kuz *xj1lo,wy1vqfzu), rbne6ouj(kuzxj1lo,wy1vqfzu), wpuarq2m(dimu,kuzxj1 *lo), w8znmyce(br5ovgcj,*) double precision vc6hatuj(br5ovgcj,*), fasrkub3(*), tlq9wpes, beta *(*), y7sdgtqi(*) double precision twk(wy1vqfzu,kuzxj1lo,*), wkmm(wy1vqfzu*(wy1vqfzu *+1)) double precision yxiwebc5(kuzxj1lo,*), kpzavbj3(kuzxj1lo,*), ydcnh *9xl, ajul8wkv(*) integer ayfnwr1v, hpmwnav2, xwdf5ltg, idlosrw8, gp1jxzuh, exrkcn5d double precision dev0 xwdf5ltg = xui7hqwl(1) idlosrw8 = xui7hqwl(5) vtsou9pz = xui7hqwl(12) exrkcn5d = xui7hqwl(13) if(vtsou9pz.eq.1)then call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd *gtqi) else call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, dev0, ajul8wkv, twk, wkmm, y7sd *gtqi) endif do23712 hpmwnav2=1,xwdf5ltg do23714 ayfnwr1v=1,kuzxj1lo lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) + ydcnh9 *xl xui7hqwl(5) = 2 do23716 gp1jxzuh=1,exrkcn5d beta(gp1jxzuh) = ajul8wkv(gp1jxzuh) 23716 continue 23717 continue if(vtsou9pz.eq.1)then call cqo1f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd *gtqi) else call cqo2f(lncwkfq7, tlgduey8, kifxa0he, wmat, m0ibglfx, vm4xjosb, * t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrk *ub3, ges1xpkr, kuzxj1lo, wy1vqfzu, afpc0kns, br5ovgcj, dimu, zjkrt *ol8, xui7hqwl, tgiyxdw1, dufozmt7, tlq9wpes, beta, twk, wkmm, y7sd *gtqi) endif if(zjkrtol8 .ne. 0)then return endif kpzavbj3(ayfnwr1v,hpmwnav2) = (tlq9wpes - dev0) / ydcnh9xl lncwkfq7(ayfnwr1v,hpmwnav2) = yxiwebc5(ayfnwr1v,hpmwnav2) 23714 continue 23715 continue 23712 continue 23713 continue xui7hqwl(5) = idlosrw8 return end VGAM/src/tyeepolygamma3.c0000644000176200001440000001202613565414527014714 0ustar liggesusers #include #include #include #include #include void tyee_C_vdgam1(double *xval, double *lfu2qhid, int *dvhw1ulq); void tyee_C_vtgam1(double *xval, double *lfu2qhid, int *dvhw1ulq); void tyee_C_dgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvhw1ulq); void tyee_C_tgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvhw1ulq); void tyee_C_cum8sum(double ci1oyxas[], double lfu2qhid[], int *nlfu2qhid, double valong[], int *ntot, int *notdvhw1ulq); void eimpnbinomspecialp(int *interceptonly, double *nrows, double *ncols, double *sizevec, double *pnbinommat, double *rowsums); void tyee_C_vdgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) { double wval, series, obr6tcex = 0.0, tmp1; *dvhw1ulq = 1; if (*xval <= 0.0e0) { *dvhw1ulq = 0; return; } if (*xval < 6.0e0) { tmp1 = *xval + 6.0e0; tyee_C_vdgam1(&tmp1, &obr6tcex, dvhw1ulq); *lfu2qhid = obr6tcex - 1.0e0 / *xval - 1.0e0 / (*xval + 1.0e0) - 1.0e0 / (*xval + 2.0e0) - 1.0e0 / (*xval + 3.0e0) - 1.0e0 / (*xval + 4.0e0) - 1.0e0 / (*xval + 5.0e0); return; } wval = 1.0e0 / (*xval * *xval); series = ((wval * ( -1.0e0 / 12.0e0 + ((wval * ( 1.0e0 / 120.0e0 + ((wval * ( -1.0e0 / 252.0e0 + ((wval * ( 1.0e0 / 240.0e0 + ((wval * ( -1.0e0 / 132.0e0 + ((wval * (691.0e0 /32760.0e0 + ((wval * ( -1.0e0 / 12.0e0 + (wval * 3617.0e0)/ 8160.0e0))))))))))))))))))))); *lfu2qhid = log(*xval) - 0.5e0 / *xval + series; } void tyee_C_vtgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) { double wval, series, obr6tcex = 0.0, tmp1; *dvhw1ulq = 1; if (*xval <= 0.0e0) { *dvhw1ulq = 0; return; } if (*xval < 6.0e0) { tmp1 = *xval + 6.0e0; tyee_C_vtgam1(&tmp1, &obr6tcex, dvhw1ulq); *lfu2qhid = obr6tcex + 1.0e0 / pow( (double) *xval, (double) 2.0) + 1.0e0 / pow( (double) (*xval + 1.0e0), (double) 2.0) + 1.0e0 / pow( (double) (*xval + 2.0e0), (double) 2.0) + 1.0e0 / pow( (double) (*xval + 3.0e0), (double) 2.0) + 1.0e0 / pow( (double) (*xval + 4.0e0), (double) 2.0) + 1.0e0 / pow( (double) (*xval + 5.0e0), (double) 2.0); return; } wval = 1.0e0 / (*xval * *xval); series = 1.0e0 + (wval * ( 1.0e0 / 6.0e0 + (wval * ( -1.0e0 / 30.0e0 + (wval * ( 1.0e0 / 42.0e0 + (wval * ( -1.0e0 / 30.0e0 + (wval * ( 5.0e0 / 66.0e0 + (wval * (-691.0e0 /2370.0e0 + (wval * ( 7.0e0 / 6.0e0 - (wval * 3617.0e0)/ 510.0e0)))))))))))))); *lfu2qhid = 0.5e0 * wval + series / *xval; } void tyee_C_dgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvhw1ulq) { int ayfnwr1v, okobr6tcex; double *qnwamo0e1, *qnwamo0e2; *dvhw1ulq = 1; qnwamo0e1 = sjwyig9t; qnwamo0e2 = lfu2qhid; for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { tyee_C_vdgam1(qnwamo0e1++, qnwamo0e2++, &okobr6tcex); if (okobr6tcex != 1) *dvhw1ulq = okobr6tcex; } } void tyee_C_tgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvhw1ulq) { int ayfnwr1v, okobr6tcex; double *qnwamo0e1, *qnwamo0e2; *dvhw1ulq = 1; qnwamo0e1 = sjwyig9t; qnwamo0e2 = lfu2qhid; for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) { tyee_C_vtgam1(qnwamo0e1++, qnwamo0e2++, &okobr6tcex); if (okobr6tcex != 1) *dvhw1ulq = okobr6tcex; } } void tyee_C_cum8sum(double ci1oyxas[], double lfu2qhid[], int *nlfu2qhid, double valong[], int *ntot, int *notdvhw1ulq) { int ayfnwr1v, iii = 1; lfu2qhid[iii-1] = ci1oyxas[iii-1]; for (ayfnwr1v = 2; ayfnwr1v <= *ntot; ayfnwr1v++) { if (valong[ayfnwr1v-1] > valong[ayfnwr1v-2]) { lfu2qhid[iii-1] += ci1oyxas[ayfnwr1v-1]; } else { iii++; lfu2qhid[iii-1] = ci1oyxas[ayfnwr1v-1]; } } *notdvhw1ulq = (iii == *nlfu2qhid) ? 0 : 1; } void eimpnbinomspecialp(int *interceptonly, double *nrows, double *ncols, double *sizevec, /* length is nrows */ double *pnbinommat, double *rowsums) { double ayfnwr1v, yq6lorbx, tmp1 = 0.0, tmp2; double *fpdlcqk9rowsums, *fpdlcqk9sizevec; if (*interceptonly == 1) { for (yq6lorbx = 0; yq6lorbx < *ncols; yq6lorbx++) { tmp2 = (*sizevec + yq6lorbx); tmp1 += *pnbinommat++ / (tmp2 * tmp2); } *rowsums = tmp1; return; } fpdlcqk9rowsums = rowsums; for (ayfnwr1v = 0; ayfnwr1v < *nrows; ayfnwr1v++) *fpdlcqk9rowsums++ = 0.0; for (yq6lorbx = 0; yq6lorbx < *ncols; yq6lorbx++) { fpdlcqk9rowsums = rowsums; fpdlcqk9sizevec = sizevec; for (ayfnwr1v = 0; ayfnwr1v < *nrows; ayfnwr1v++) { tmp2 = (yq6lorbx + *fpdlcqk9sizevec++); tmp1 = *pnbinommat++ / (tmp2 * tmp2); *fpdlcqk9rowsums++ += tmp1; } } } VGAM/src/vlinpack3.f0000644000176200001440000004505013565414527013654 0ustar liggesusersc 1/4/00 c The following code is linpack.f from GAMFIT c For R.1.0-0, subroutine dshift is needed c 12/7/02; T.Yee c I've modifed the routines in this file so that reals become double c precisions. The subroutine and functions may have a "8" put after it c to (hopefully) make it unique. c All this for the VGAM package. c For example, "real function ddot" to "double precision function ddot8". c I might add a "implicit logical (a-z)" line to pick up errors. subroutine daxpy8(n,da,dx,incx,dy,incy) implicit logical (a-z) c c constant times a vector plus a vector. c uses unrolled loops for increments equal to one. c jack dongarra, linpack, 3/11/78. c c c c 20130419: orig.: c double precision dx(1),dy(1),da c c c c double precision dx(*),dy(*),da integer i,incx,incy,m,mp1,n c Undeclared, so added by T.Yee integer ix, iy c if(n.le.0)return if (da .eq. 0.0d0) return if(incx.eq.1.and.incy.eq.1)go to 20 c c code for unequal increments or equal increments c not equal to 1 c ix = 1 iy = 1 if(incx.lt.0)ix = (-n+1)*incx + 1 if(incy.lt.0)iy = (-n+1)*incy + 1 do 10 i = 1,n dy(iy) = dy(iy) + da*dx(ix) ix = ix + incx iy = iy + incy 10 continue return c c code for both increments equal to 1 c c c clean-up loop c 20 m = mod(n,4) if( m .eq. 0 ) go to 40 do 30 i = 1,m dy(i) = dy(i) + da*dx(i) 30 continue if( n .lt. 4 ) return 40 mp1 = m + 1 do 50 i = mp1,n,4 dy(i) = dy(i) + da*dx(i) dy(i + 1) = dy(i + 1) + da*dx(i + 1) dy(i + 2) = dy(i + 2) + da*dx(i + 2) dy(i + 3) = dy(i + 3) + da*dx(i + 3) 50 continue return end subroutine dcopy8(n,dx,incx,dy,incy) implicit logical (a-z) c c copies a vector, x, to a vector, y. c uses unrolled loops for increments equal to one. c jack dongarra, linpack, 3/11/78. c double precision dx(*),dy(*) integer i,incx,incy,ix,iy,m,mp1,n c if(n.le.0)return if(incx.eq.1.and.incy.eq.1)go to 20 c c code for unequal increments or equal increments c not equal to 1 c ix = 1 iy = 1 if(incx.lt.0)ix = (-n+1)*incx + 1 if(incy.lt.0)iy = (-n+1)*incy + 1 do 10 i = 1,n dy(iy) = dx(ix) ix = ix + incx iy = iy + incy 10 continue return c c code for both increments equal to 1 c c c clean-up loop c 20 m = mod(n,7) if( m .eq. 0 ) go to 40 do 30 i = 1,m dy(i) = dx(i) 30 continue if( n .lt. 7 ) return 40 mp1 = m + 1 do 50 i = mp1,n,7 dy(i) = dx(i) dy(i + 1) = dx(i + 1) dy(i + 2) = dx(i + 2) dy(i + 3) = dx(i + 3) dy(i + 4) = dx(i + 4) dy(i + 5) = dx(i + 5) dy(i + 6) = dx(i + 6) 50 continue return end double precision function ddot8(n,dx,incx,dy,incy) c c 12/7/02; T.Yee c I've modifed "real function ddot" to c "double precision function ddot8" for the VGAM package c I've added the "implicit logical (a-z)" line implicit logical (a-z) c c forms the dot product of two vectors. c uses unrolled loops for increments equal to one. c jack dongarra, linpack, 3/11/78. c double precision dx(*),dy(*),dtemp integer i,incx,incy,ix,iy,m,mp1,n c ddot8 = 0.0d0 dtemp = 0.0d0 if(n.le.0)return if(incx.eq.1.and.incy.eq.1)go to 20 c c code for unequal increments or equal increments c not equal to 1 c ix = 1 iy = 1 if(incx.lt.0)ix = (-n+1)*incx + 1 if(incy.lt.0)iy = (-n+1)*incy + 1 do 10 i = 1,n dtemp = dtemp + dx(ix)*dy(iy) ix = ix + incx iy = iy + incy 10 continue ddot8 = dtemp return c c code for both increments equal to 1 c c c clean-up loop c 20 m = mod(n,5) if( m .eq. 0 ) go to 40 do 30 i = 1,m dtemp = dtemp + dx(i)*dy(i) 30 continue if( n .lt. 5 ) go to 60 40 mp1 = m + 1 do 50 i = mp1,n,5 dtemp = dtemp + dx(i)*dy(i) + dx(i + 1)*dy(i + 1) + * dx(i + 2)*dy(i + 2) + dx(i + 3)*dy(i + 3)+dx(i + 4)*dy(i + 4) 50 continue 60 ddot8 = dtemp return end double precision function dnrm28 ( n, dx,ldx, incx) implicit logical (a-z) c Undeclared, so added by T.Yee integer n, ldx, incx, i, j, nn integer next double precision dx(ldx), cutlo, cuthi, hitest, sum, * xmax,zero,one data zero, one /0.0d0, 1.0d0/ c c euclidean norm of the n-vector stored in dx() with storage c increment incx . c if n .le. 0 return with result = 0. c if n .ge. 1 then incx must be .ge. 1 c c c.l.lawson, 1978 jan 08 c c four phase method using two built-in constants that are c hopefully applicable to all machines. c cutlo = maximum of sqrt(u/eps) over all known machines. c cuthi = minimum of sqrt(v) over all known machines. c where c eps = smallest no. such that eps + 1. .gt. 1. c u = smallest positive no. (underflow limit) c v = largest no. (overflow limit) c c brief outline of algorithm.. c c phase 1 scans zero components. c move to phase 2 when a component is nonzero and .le. cutlo c move to phase 3 when a component is .gt. cutlo c move to phase 4 when a component is .ge. cuthi/m c where m = n for x() double precision and m = 2*n for complex. c c values for cutlo and cuthi.. c from the environmental parameters listed in the imsl converter c document the limiting values are as follows.. c cutlo, s.p. u/eps = 2**(-102) for honeywell. close seconds are c univac and dec at 2**(-103) c thus cutlo = 2**(-51) = 4.44089d-16 c cuthi, s.p. v = 2**127 for univac, honeywell, and dec. c thus cuthi = 2**(63.5) = 1.30438d19 c cutlo, d.p. u/eps = 2**(-67) for honeywell and dec. c thus cutlo = 2**(-33.5) = 8.23181d-11 c cuthi, d.p. same as s.p. cuthi = 1.30438d19 c data cutlo, cuthi / 8.232d-11, 1.304d19 / c data cutlo, cuthi / 4.441d-16, 1.304d19 / data cutlo, cuthi / 8.232d-11, 1.304d19 / c if(n .gt. 0) go to 10 dnrm28 = zero go to 300 c 10 next = 30 sum = zero nn = n * incx c begin main loop i = 1 c 20 go to next,(30, 50, 70, 110) 20 if(next .eq. 30) go to 30 if(next .eq. 50) go to 50 if(next .eq. 70) go to 70 if(next .eq. 110) go to 110 c An error!!! dnrm28 = 0.0d0 return c 30 if( dabs(dx(i)) .gt. cutlo) go to 85 next = 50 xmax = zero c c phase 1. sum is zero c 50 if( dx(i) .eq. zero) go to 200 if( dabs(dx(i)) .gt. cutlo) go to 85 c c prepare for phase 2. next = 70 go to 105 c c prepare for phase 4. c 100 i = j next = 110 sum = (sum / dx(i)) / dx(i) 105 xmax = dabs(dx(i)) go to 115 c c phase 2. sum is small. c scale to avoid destructive underflow. c 70 if( dabs(dx(i)) .gt. cutlo ) go to 75 c c common code for phases 2 and 4. c in phase 4 sum is large. scale to avoid overflow. c 110 if( dabs(dx(i)) .le. xmax ) go to 115 sum = one + sum * (xmax / dx(i))**2 xmax = dabs(dx(i)) go to 200 c 115 sum = sum + (dx(i)/xmax)**2 go to 200 c c c prepare for phase 3. c 75 sum = (sum * xmax) * xmax c c c for real or d.p. set hitest = cuthi/n c for complex set hitest = cuthi/(2*n) c c "float" changed to "dfloat" by T.Yee 85 hitest = cuthi/dfloat( n ) c c phase 3. sum is mid-range. no scaling. c do 95 j =i,nn,incx if(dabs(dx(j)) .ge. hitest) go to 100 sum = sum + dx(j)**2 95 continue dnrm28 = dsqrt( sum ) go to 300 c 200 continue i = i + incx if ( i .le. nn ) go to 20 c c end of main loop. c c compute square root and adjust for scaling. c dnrm28 = xmax * dsqrt(sum) 300 continue return end subroutine dscal8(n,da,dx,incx) implicit logical (a-z) c c scales a vector by a constant. c uses unrolled loops for increment equal to one. c jack dongarra, linpack, 3/11/78. c double precision da,dx(*) integer i,incx,m,mp1,n,nincx c if(n.le.0)return if(incx.eq.1)go to 20 c c code for increment not equal to 1 c nincx = n*incx do 10 i = 1,nincx,incx dx(i) = da*dx(i) 10 continue return c c code for increment equal to 1 c c c clean-up loop c 20 m = mod(n,5) if( m .eq. 0 ) go to 40 do 30 i = 1,m dx(i) = da*dx(i) 30 continue if( n .lt. 5 ) return 40 mp1 = m + 1 do 50 i = mp1,n,5 dx(i) = da*dx(i) dx(i + 1) = da*dx(i + 1) dx(i + 2) = da*dx(i + 2) dx(i + 3) = da*dx(i + 3) dx(i + 4) = da*dx(i + 4) 50 continue return end subroutine dshift8(x,ldx,n,j,k) implicit logical (a-z) integer ldx,n,j,k double precision x(ldx,k), tt integer i,jj if(k.le.j)return do 100 i=1,n tt=x(i,j) do 50 jj=j+1,k x(i,jj-1)=x(i,jj) 50 continue x(i,k)=tt 100 continue return end subroutine vdqrsl(x,ldx,n,k,qraux,y,qy,qty,b,rsd,xb,job,info) implicit logical (a-z) integer ldx,n,k,job,info double precision x(ldx,*),qraux(*),y(*),qy(*),qty(*),b(*),rsd(*), * xb(*) c c c c 20130419: orig.: c double precision x(ldx,1),qraux(1),y(1),qy(1),qty(1),b(1),rsd(1), c * xb(1) c c c c dqrsl applies the output of dqrdc to compute coordinate c transformations, projections, and least squares solutions. c for k .le. min(n,p), let xk be the matrix c c xk = (x(jpvt(1)),x(jpvt(2)), ... ,x(jpvt(k))) c c formed from columnns jpvt(1), ... ,jpvt(k) of the original c n x p matrix x that was input to dqrdc (if no pivoting was c done, xk consists of the first k columns of x in their c original order). dqrdc produces a factored orthogonal matrix q c and an upper triangular matrix r such that c c xk = q * (r) c (0) c c this information is contained in coded form in the arrays c x and qraux. c c on entry c c x double precision(ldx,p). c x contains the output of dqrdc. c c ldx integer. c ldx is the leading dimension of the array x. c c n integer. c n is the number of rows of the matrix xk. it must c have the same value as n in dqrdc. c c k integer. c k is the number of columns of the matrix xk. k c must nnot be greater than min(n,p), where p is the c same as in the calling sequence to dqrdc. c c qraux double precision(p). c qraux contains the auxiliary output from dqrdc. c c y double precision(n) c y contains an n-vector that is to be manipulated c by dqrsl. c c job integer. c job specifies what is to be computed. job has c the decimal expansion abcde, with the following c meaning. c c if a.ne.0, compute qy. c if b,c,d, or e .ne. 0, compute qty. c if c.ne.0, compute b. c if d.ne.0, compute rsd. c if e.ne.0, compute xb. c c note that a request to compute b, rsd, or xb c automatically triggers the computation of qty, for c which an array must be provided in the calling c sequence. c c on return c c qy double precision(n). c qy conntains q*y, if its computation has been c requested. c c qty double precision(n). c qty contains trans(q)*y, if its computation has c been requested. here trans(q) is the c transpose of the matrix q. c c b double precision(k) c b contains the solution of the least squares problem c c minimize norm2(y - xk*b), c c if its computation has been requested. (note that c if pivoting was requested in dqrdc, the j-th c component of b will be associated with column jpvt(j) c of the original matrix x that was input into dqrdc.) c c rsd double precision(n). c rsd contains the least squares residual y - xk*b, c if its computation has been requested. rsd is c also the orthogonal projection of y onto the c orthogonal complement of the column space of xk. c c xb double precision(n). c xb contains the least squares approximation xk*b, c if its computation has been requested. xb is also c the orthogonal projection of y onto the column space c of x. c c info integer. c info is zero unless the computation of b has c been requested and r is exactly singular. in c this case, info is the index of the first zero c diagonal element of r and b is left unaltered. c c the parameters qy, qty, b, rsd, and xb are not referenced c if their computation is not requested and in this case c can be replaced by dummy variables in the calling program. c to save storage, the user may in some cases use the same c array for different parameters in the calling sequence. a c frequently occuring example is when one wishes to compute c any of b, rsd, or xb and does not need y or qty. in this c case one may identify y, qty, and one of b, rsd, or xb, while c providing separate arrays for anything else that is to be c computed. thus the calling sequence c c call dqrsl(x,ldx,n,k,qraux,y,dum,y,b,y,dum,110,info) c c will result in the computation of b and rsd, with rsd c overwriting y. more generally, each item in the following c list contains groups of permissible identifications for c a single callinng sequence. c c 1. (y,qty,b) (rsd) (xb) (qy) c c 2. (y,qty,rsd) (b) (xb) (qy) c c 3. (y,qty,xb) (b) (rsd) (qy) c c 4. (y,qy) (qty,b) (rsd) (xb) c c 5. (y,qy) (qty,rsd) (b) (xb) c c 6. (y,qy) (qty,xb) (b) (rsd) c c in any group the value returned in the array allocated to c the group corresponds to the last member of the group. c c linpack. this version dated 08/14/78 . c g.w. stewart, university of maryland, argonne national lab. c c dqrsl uses the following functions and subprograms. c c blas daxpy8,dcopy8,ddot8 c fortran dabs,min0,mod c c internal variables c integer i,j,jj,ju,kp1 double precision ddot8,t,temp logical cb,cqy,cqty,cr,cxb c c c set info flag. c info = 0 c c determine what is to be computed. c cqy = job/10000 .ne. 0 cqty = mod(job,10000) .ne. 0 cb = mod(job,1000)/100 .ne. 0 cr = mod(job,100)/10 .ne. 0 cxb = mod(job,10) .ne. 0 ju = min0(k,n-1) c c special action when n=1. c if (ju .ne. 0) go to 40 if (cqy) qy(1) = y(1) if (cqty) qty(1) = y(1) if (cxb) xb(1) = y(1) if (.not.cb) go to 30 if (x(1,1) .ne. 0.0d0) go to 10 info = 1 go to 20 10 continue b(1) = y(1)/x(1,1) 20 continue 30 continue if (cr) rsd(1) = 0.0d0 go to 250 40 continue c c set up to compute qy or qty. c if (cqy) call dcopy8(n,y,1,qy,1) if (cqty) call dcopy8(n,y,1,qty,1) if (.not.cqy) go to 70 c c compute qy. c do 60 jj = 1, ju j = ju - jj + 1 if (qraux(j) .eq. 0.0d0) go to 50 temp = x(j,j) x(j,j) = qraux(j) t = -ddot8(n-j+1,x(j,j),1,qy(j),1)/x(j,j) call daxpy8(n-j+1,t,x(j,j),1,qy(j),1) x(j,j) = temp 50 continue 60 continue 70 continue if (.not.cqty) go to 100 c c compute trans(q)*y. c do 90 j = 1, ju if (qraux(j) .eq. 0.0d0) go to 80 temp = x(j,j) x(j,j) = qraux(j) t = -ddot8(n-j+1,x(j,j),1,qty(j),1)/x(j,j) call daxpy8(n-j+1,t,x(j,j),1,qty(j),1) x(j,j) = temp 80 continue 90 continue 100 continue c c set up to compute b, rsd, or xb. c if (cb) call dcopy8(k,qty,1,b,1) kp1 = k + 1 if (cxb) call dcopy8(k,qty,1,xb,1) if(cr .and. k .lt. n) call dcopy8(n-k,qty(kp1),1,rsd(kp1),1) if (.not.cxb .or. kp1 .gt. n) go to 120 do 110 i = kp1, n xb(i) = 0.0d0 110 continue 120 continue if (.not.cr) go to 140 do 130 i = 1, k rsd(i) = 0.0d0 130 continue 140 continue if (.not.cb) go to 190 c c compute b. c do 170 jj = 1, k j = k - jj + 1 if (x(j,j) .ne. 0.0d0) go to 150 info = j c ......exit go to 180 150 continue b(j) = b(j)/x(j,j) if (j .eq. 1) go to 160 t = -b(j) call daxpy8(j-1,t,x(1,j),1,b,1) 160 continue 170 continue 180 continue 190 continue if (.not.cr .and. .not.cxb) go to 240 c c compute rsd or xb as required. c do 230 jj = 1, ju j = ju - jj + 1 if (qraux(j) .eq. 0.0d0) go to 220 temp = x(j,j) x(j,j) = qraux(j) if (.not.cr) go to 200 t = -ddot8(n-j+1,x(j,j),1,rsd(j),1)/x(j,j) call daxpy8(n-j+1,t,x(j,j),1,rsd(j),1) 200 continue if (.not.cxb) go to 210 t = -ddot8(n-j+1,x(j,j),1,xb(j),1)/x(j,j) call daxpy8(n-j+1,t,x(j,j),1,xb(j),1) 210 continue x(j,j) = temp 220 continue 230 continue 240 continue 250 continue return end VGAM/src/vgam.f0000644000176200001440000014005713565414527012717 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine vbvs(kuzxj1lo,ankcghz2,rpyis2kc,nk,he7mqnvy,smat,order, *wy1vqfzu) integer kuzxj1lo, nk, order, wy1vqfzu double precision ankcghz2(nk+4), rpyis2kc(nk,wy1vqfzu), he7mqnvy(k *uzxj1lo), smat(kuzxj1lo,wy1vqfzu) double precision chw8lzty integer ayfnwr1v, yq6lorbx, ifour4 ifour4 = 4 do23000 yq6lorbx=1,wy1vqfzu do23002 ayfnwr1v=1,kuzxj1lo chw8lzty = he7mqnvy(ayfnwr1v) call wbvalue(ankcghz2, rpyis2kc(1,yq6lorbx), nk, ifour4, chw8lzty, * order, smat(ayfnwr1v,yq6lorbx)) 23002 continue 23003 continue 23000 continue 23001 continue return end subroutine tfeswo7c(osiz4fxy, nk, wy1vqfzu, ldk, wbkq9zyi, sgmat) implicit logical (a-z) integer nk, wy1vqfzu, ldk double precision osiz4fxy(ldk,nk*wy1vqfzu), wbkq9zyi(wy1vqfzu), sg *mat(nk,4) integer ayfnwr1v, yq6lorbx do23004 ayfnwr1v=1,nk do23006 yq6lorbx=1,wy1vqfzu osiz4fxy(ldk,(ayfnwr1v-1)*wy1vqfzu+yq6lorbx) = osiz4fxy(ldk,(ayfnw *r1v-1)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) * sgmat(ayfnwr1v,1) 23006 continue 23007 continue 23004 continue 23005 continue do23008 ayfnwr1v=1,(nk-1) do23010 yq6lorbx=1,wy1vqfzu osiz4fxy(ldk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) = osiz4fxy(l *dk-wy1vqfzu,(ayfnwr1v-0)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorbx) * * sgmat(ayfnwr1v,2) 23010 continue 23011 continue 23008 continue 23009 continue do23012 ayfnwr1v=1,(nk-2) do23014 yq6lorbx=1,wy1vqfzu osiz4fxy(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) = osiz4fxy *(ldk-2*wy1vqfzu,(ayfnwr1v+1)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorb *x) * sgmat(ayfnwr1v,3) 23014 continue 23015 continue 23012 continue 23013 continue do23016 ayfnwr1v=1,(nk-3) do23018 yq6lorbx=1,wy1vqfzu osiz4fxy(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) = osiz4fxy *(ldk-3*wy1vqfzu,(ayfnwr1v+2)*wy1vqfzu+yq6lorbx) + wbkq9zyi(yq6lorb *x) * sgmat(ayfnwr1v,4) 23018 continue 23019 continue 23016 continue 23017 continue return end subroutine ybnagt8k(iii, cz8qdfyj, tesdm5kv, g9fvdrbw, osiz4fxy, w *mat, kxvq6sfw, nyfu9rod, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxd *w1, dufozmt7) implicit logical (a-z) integer iii, cz8qdfyj, tesdm5kv, kxvq6sfw, nyfu9rod, wy1vqfzu, ldk *, dimw, kuzxj1lo, nk, tgiyxdw1(*), dufozmt7(*) double precision g9fvdrbw(4,*), osiz4fxy(ldk, nk*wy1vqfzu), wmat(k *uzxj1lo,dimw) double precision obr6tcex integer urohxe6t, nead, bcol, brow, biuvowq2, nbj8tdsk bcol = cz8qdfyj + tesdm5kv brow = cz8qdfyj do23020 urohxe6t=1,dimw obr6tcex = wmat(iii,urohxe6t) * g9fvdrbw(kxvq6sfw,1) * g9fvdrbw(ny *fu9rod,1) biuvowq2 = (brow-1)*wy1vqfzu + tgiyxdw1(urohxe6t) nbj8tdsk = (bcol-1)*wy1vqfzu + dufozmt7(urohxe6t) nead = nbj8tdsk - biuvowq2 osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + obr6 *tcex if(tesdm5kv .gt. 0 .and. dufozmt7(urohxe6t) .ne. tgiyxdw1(urohxe6t *))then biuvowq2 = (brow-1)*wy1vqfzu + dufozmt7(urohxe6t) nbj8tdsk = (bcol-1)*wy1vqfzu + tgiyxdw1(urohxe6t) nead = nbj8tdsk - biuvowq2 osiz4fxy(ldk-nead, nbj8tdsk) = osiz4fxy(ldk-nead, nbj8tdsk) + obr6 *tcex endif 23020 continue 23021 continue return end subroutine vsplin(he7mqnvy,rbne6ouj,wmat,kuzxj1lo,gkdx5jal, nk,ldk *,wy1vqfzu,dimw, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, t8hwvalr, * rpyis2kc, osiz4fxy, btwy, sgdub, ui8ysltq, yzoe1rsp, bmb, ifys6wo *a, dof, scrtch, fbd5yktj, truen) implicit logical (a-z) integer kuzxj1lo, nk, ldk, wy1vqfzu, dimw, tgiyxdw1(*), dufozmt7(* *), info, fbd5yktj, truen integer yzoe1rsp double precision he7mqnvy(kuzxj1lo), rbne6ouj(kuzxj1lo,wy1vqfzu), *wmat(kuzxj1lo,dimw), gkdx5jal(nk+4), wkmm(wy1vqfzu,wy1vqfzu,16), w *bkq9zyi(wy1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu), rpyis2kc(nk,wy1vqf *zu), osiz4fxy(ldk,nk*wy1vqfzu), btwy(wy1vqfzu,nk) double precision sgdub(nk,wy1vqfzu), ui8ysltq(truen,wy1vqfzu), bmb *(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu), dof(wy1vqfzu), s *crtch(*) integer yq6lorbx, ayfnwr1v, dqlr5bse, pqzfxw4i, urohxe6t, icrit integer gp0xjetb, e5knafcg, wep0oibc, l3zpbstu(3), ispar, i1loc double precision qaltf0nz, g9fvdrbw(4,1), ms0qypiw(16), penalt, qc *piaj7f, fp6nozvx, waiez6nt, toldf, parms(3) do23024 yq6lorbx=1,wy1vqfzu if(wbkq9zyi(yq6lorbx) .eq. 0.0d0)then ispar=0 icrit=3 else ispar=1 icrit=1 endif if((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu) .or. (ispar .eq. 0))t *hen e5knafcg = 4 fp6nozvx = 1.50d0 waiez6nt = 0.00d0 wep0oibc = 1 toldf=0.001d0 if(wy1vqfzu.eq.1)then toldf=0.005d0 else if(wy1vqfzu.eq.2)then toldf=0.015d0 else if(wy1vqfzu.eq.3)then toldf=0.025d0 else toldf=0.045d0 endif endif endif l3zpbstu(1) = icrit l3zpbstu(2) = ispar l3zpbstu(3) = 300 parms(1) = waiez6nt parms(2) = fp6nozvx parms(3) = toldf gp0xjetb=0 if((wy1vqfzu .eq. 1) .or. (dimw.eq.wy1vqfzu))then do23038 ayfnwr1v=1,kuzxj1lo rbne6ouj(ayfnwr1v,yq6lorbx) = rbne6ouj(ayfnwr1v,yq6lorbx) / wmat(a *yfnwr1v,yq6lorbx) 23038 continue 23039 continue call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, rbne6ouj(1,yq6lorbx *), wmat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,yq6lorbx), t *8hwvalr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,wbkq9zyi(yq6lo *rbx),parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,wep0oibc,fbd5yktj) if(fbd5yktj .ne. 0)then return endif do23042 ayfnwr1v=1,kuzxj1lo wmat(ayfnwr1v,yq6lorbx) = wmat(ayfnwr1v,yq6lorbx) * wmat(ayfnwr1v, *yq6lorbx) 23042 continue 23043 continue if(yzoe1rsp .ne. 0)then do23046 ayfnwr1v=1,kuzxj1lo ui8ysltq(ayfnwr1v,yq6lorbx) = ifys6woa(ayfnwr1v,yq6lorbx) / wmat(a *yfnwr1v,yq6lorbx) 23046 continue 23047 continue endif else call dnaoqj0l(penalt, dof(yq6lorbx), he7mqnvy, btwy(1,yq6lorbx), w *mat(1,yq6lorbx), kuzxj1lo,nk, gkdx5jal,rpyis2kc(1,yq6lorbx),t8hwva *lr(1,yq6lorbx), ifys6woa(1,yq6lorbx), qcpiaj7f,wbkq9zyi(yq6lorbx), *parms, scrtch, gp0xjetb,l3zpbstu, e5knafcg,wep0oibc,fbd5yktj) if(fbd5yktj .ne. 0)then return endif do23050 ayfnwr1v=1,kuzxj1lo wmat(ayfnwr1v,yq6lorbx) = wmat(ayfnwr1v,yq6lorbx) * wmat(ayfnwr1v, *yq6lorbx) 23050 continue 23051 continue endif if(fbd5yktj .ne. 0)then return endif endif 23024 continue 23025 continue if((wy1vqfzu .eq. 1) .or. (dimw .eq. wy1vqfzu))then return endif do23056 ayfnwr1v=1,nk do23058 yq6lorbx=1,wy1vqfzu btwy(yq6lorbx,ayfnwr1v)=0.0d0 23058 continue 23059 continue 23056 continue 23057 continue do23060 ayfnwr1v=1,(nk*wy1vqfzu) do23062 yq6lorbx=1,ldk osiz4fxy(yq6lorbx,ayfnwr1v) = 0.0d0 23062 continue 23063 continue 23060 continue 23061 continue qaltf0nz = 0.1d-9 do23064 ayfnwr1v=1,kuzxj1lo call vinterv(gkdx5jal(1),(nk+1),he7mqnvy(ayfnwr1v),dqlr5bse,pqzfxw *4i) if(pqzfxw4i .eq. 1)then if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then dqlr5bse=dqlr5bse-1 else return endif endif call vbsplvd(gkdx5jal,4,he7mqnvy(ayfnwr1v),dqlr5bse,ms0qypiw,g9fvd *rbw,1) yq6lorbx= dqlr5bse-4+1 do23070 urohxe6t=1,wy1vqfzu btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1 *v,urohxe6t) * g9fvdrbw(1,1) 23070 continue 23071 continue call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 1, *1, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 1, *2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 2, g9fvdrbw, osiz4fxy, wmat, 1, *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 3, g9fvdrbw, osiz4fxy, wmat, 1, *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) yq6lorbx= dqlr5bse-4+2 do23072 urohxe6t=1,wy1vqfzu btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1 *v,urohxe6t) * g9fvdrbw(2,1) 23072 continue 23073 continue call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 2, *2, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 2, *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 2, g9fvdrbw, osiz4fxy, wmat, 2, *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) yq6lorbx= dqlr5bse-4+3 do23074 urohxe6t=1,wy1vqfzu btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1 *v,urohxe6t) * g9fvdrbw(3,1) 23074 continue 23075 continue call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 3, *3, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) call ybnagt8k(ayfnwr1v, yq6lorbx, 1, g9fvdrbw, osiz4fxy, wmat, 3, *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) yq6lorbx= dqlr5bse-4+4 do23076 urohxe6t=1,wy1vqfzu btwy(urohxe6t,yq6lorbx)=btwy(urohxe6t,yq6lorbx) + rbne6ouj(ayfnwr1 *v,urohxe6t) * g9fvdrbw(4,1) 23076 continue 23077 continue call ybnagt8k(ayfnwr1v, yq6lorbx, 0, g9fvdrbw, osiz4fxy, wmat, 4, *4, wy1vqfzu, ldk, dimw, kuzxj1lo, nk, tgiyxdw1, dufozmt7) 23064 continue 23065 continue call zosq7hub(sgdub(1,1), sgdub(1,2), sgdub(1,3), sgdub(1,4), gkdx *5jal, nk) call tfeswo7c(osiz4fxy, nk, wy1vqfzu, ldk, wbkq9zyi, sgdub) call vdpbfa7(osiz4fxy, ldk, nk*wy1vqfzu, ldk-1, info, sgdub) if(info .ne. 0)then return endif call vdpbsl7(osiz4fxy, ldk, nk*wy1vqfzu, ldk-1, btwy, sgdub) i1loc = 0 do23080 ayfnwr1v=1,nk do23082 yq6lorbx=1,wy1vqfzu i1loc = i1loc + 1 rpyis2kc(ayfnwr1v,yq6lorbx) = btwy(yq6lorbx,ayfnwr1v) 23082 continue 23083 continue 23080 continue 23081 continue call cn8kzpab(gkdx5jal, he7mqnvy, rpyis2kc, kuzxj1lo, nk, wy1vqfzu *, t8hwvalr) call vicb2(osiz4fxy, osiz4fxy, sgdub, wkmm, ldk-1, nk*wy1vqfzu) call icpd0omv(osiz4fxy, he7mqnvy, gkdx5jal, ui8ysltq, ldk, kuzxj1l *o, nk, wy1vqfzu, yzoe1rsp, bmb, wkmm, wmat, ifys6woa, dimw, tgiyxd *w1, dufozmt7, truen) return end subroutine cn8kzpab(ankcghz2, he7mqnvy, rpyis2kc, kuzxj1lo, nk, wy *1vqfzu, t8hwvalr) implicit logical (a-z) integer kuzxj1lo, nk, wy1vqfzu double precision ankcghz2(nk+4), he7mqnvy(kuzxj1lo), rpyis2kc(nk,w *y1vqfzu), t8hwvalr(kuzxj1lo,wy1vqfzu) double precision chw8lzty integer ayfnwr1v, yq6lorbx, izero0, ifour4 izero0 = 0 ifour4 = 4 do23084 ayfnwr1v=1,kuzxj1lo chw8lzty = he7mqnvy(ayfnwr1v) do23086 yq6lorbx=1,wy1vqfzu call wbvalue(ankcghz2, rpyis2kc(1,yq6lorbx), nk, ifour4, chw8lzty, * izero0, t8hwvalr(ayfnwr1v,yq6lorbx)) 23086 continue 23087 continue 23084 continue 23085 continue return end subroutine vsuff9(kuzxj1lo,nef,ezlgm2up, he7mqnvy,tlgduey8,wmat, p *ygsw6ko,pasjmo8g,wbar,uwbar,wpasjmo8g, wy1vqfzu, dimw, dimu, tgiyx *dw1, dufozmt7, work, work2, hjm2ktyr, kgwmz4ip, iz2nbfjc, wuwbar, *dvhw1ulq) implicit logical (a-z) integer kuzxj1lo, nef, ezlgm2up(kuzxj1lo), wy1vqfzu, dimw, dimu, k *gwmz4ip, iz2nbfjc, wuwbar, dvhw1ulq, tgiyxdw1(*),dufozmt7(*) double precision he7mqnvy(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu), *wmat(kuzxj1lo,dimw), pygsw6ko(nef), pasjmo8g(nef,wy1vqfzu), wbar(n *ef,*), uwbar(dimu,nef), wpasjmo8g(nef,wy1vqfzu), work(wy1vqfzu,wy1 *vqfzu+1), work2(kgwmz4ip,kgwmz4ip+1), hjm2ktyr(wy1vqfzu,kgwmz4ip) integer ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, imk5wjxg integer oneint oneint = 1 if(iz2nbfjc .eq. 1)then if((dimu .ne. dimw) .or. (kgwmz4ip .ne. wy1vqfzu))then dvhw1ulq = 0 return endif endif imk5wjxg = wy1vqfzu * (wy1vqfzu+1) / 2 if(dimw .gt. imk5wjxg)then endif call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu) do23094 ayfnwr1v=1,kuzxj1lo pygsw6ko(ezlgm2up(ayfnwr1v))=he7mqnvy(ayfnwr1v) 23094 continue 23095 continue do23096 yq6lorbx=1,wy1vqfzu do23098 ayfnwr1v=1,nef wpasjmo8g(ayfnwr1v,yq6lorbx) = 0.0d0 23098 continue 23099 continue 23096 continue 23097 continue do23100 yq6lorbx=1,dimw do23102 ayfnwr1v=1,nef wbar(ayfnwr1v,yq6lorbx) = 0.0d0 23102 continue 23103 continue 23100 continue 23101 continue if(dimw .ne. imk5wjxg)then do23106 gp1jxzuh=1,wy1vqfzu do23108 yq6lorbx=1,wy1vqfzu work(yq6lorbx,gp1jxzuh) = 0.0d0 23108 continue 23109 continue 23106 continue 23107 continue endif do23110 ayfnwr1v=1,kuzxj1lo do23112 yq6lorbx=1,dimw work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wmat(ayfnwr1v,yq6lor *bx) work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor *bx),dufozmt7(yq6lorbx)) 23112 continue 23113 continue do23114 yq6lorbx=1,wy1vqfzu do23116 gp1jxzuh=1,wy1vqfzu wpasjmo8g(ezlgm2up(ayfnwr1v),yq6lorbx) = wpasjmo8g(ezlgm2up(ayfnwr *1v),yq6lorbx) + work(yq6lorbx,gp1jxzuh)*tlgduey8(ayfnwr1v,gp1jxzuh *) 23116 continue 23117 continue 23114 continue 23115 continue do23118 yq6lorbx=1,dimw wbar(ezlgm2up(ayfnwr1v),yq6lorbx) = wbar(ezlgm2up(ayfnwr1v),yq6lor *bx) + wmat(ayfnwr1v,yq6lorbx) 23118 continue 23119 continue 23110 continue 23111 continue dvhw1ulq = 1 if(iz2nbfjc .eq. 1)then do23122 ayfnwr1v=1,nef do23124 yq6lorbx=1,dimw work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,yq6lor *bx) work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor *bx),dufozmt7(yq6lorbx)) 23124 continue 23125 continue do23126 yq6lorbx=1,wy1vqfzu work(yq6lorbx,wy1vqfzu+1)=wpasjmo8g(ayfnwr1v,yq6lorbx) 23126 continue 23127 continue call vcholf(work, work(1,wy1vqfzu+1), wy1vqfzu, dvhw1ulq, oneint) if(dvhw1ulq .ne. 1)then return endif if(wuwbar .ne. 0)then do23132 yq6lorbx=1,dimw uwbar(yq6lorbx,ayfnwr1v) = work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lor *bx)) 23132 continue 23133 continue endif do23134 yq6lorbx=1,wy1vqfzu pasjmo8g(ayfnwr1v,yq6lorbx)=work(yq6lorbx,wy1vqfzu+1) 23134 continue 23135 continue 23122 continue 23123 continue else if(dimw .ne. imk5wjxg)then do23138 yq6lorbx=1,wy1vqfzu do23140 gp1jxzuh=1,wy1vqfzu work(yq6lorbx,gp1jxzuh) = 0.0d0 23140 continue 23141 continue 23138 continue 23139 continue endif do23142 ayfnwr1v=1,nef call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu) do23144 yq6lorbx=1,dimw work(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx)) = wbar(ayfnwr1v,yq6lor *bx) work(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx)) = work(tgiyxdw1(yq6lor *bx),dufozmt7(yq6lorbx)) 23144 continue 23145 continue do23146 yq6lorbx=1,wy1vqfzu work(yq6lorbx,wy1vqfzu+1)=wpasjmo8g(ayfnwr1v,yq6lorbx) 23146 continue 23147 continue do23148 yq6lorbx=1,kgwmz4ip do23150 gp1jxzuh=yq6lorbx,kgwmz4ip work2(yq6lorbx,gp1jxzuh) = 0.0d0 do23152 urohxe6t=1,wy1vqfzu do23154 bpvaqm5z=1,wy1vqfzu work2(yq6lorbx,gp1jxzuh) = work2(yq6lorbx,gp1jxzuh) + hjm2ktyr(uro *hxe6t,yq6lorbx) * work(urohxe6t,bpvaqm5z) * hjm2ktyr(bpvaqm5z,gp1j *xzuh) 23154 continue 23155 continue 23152 continue 23153 continue 23150 continue 23151 continue 23148 continue 23149 continue call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip) do23156 yq6lorbx=1,dimu wbar(ayfnwr1v,yq6lorbx) = work2(tgiyxdw1(yq6lorbx),dufozmt7(yq6lor *bx)) 23156 continue 23157 continue do23158 yq6lorbx=1,kgwmz4ip work2(yq6lorbx,kgwmz4ip+1) = 0.0d0 do23160 urohxe6t=1,wy1vqfzu work2(yq6lorbx,kgwmz4ip+1) = work2(yq6lorbx,kgwmz4ip+1) + hjm2ktyr *(urohxe6t,yq6lorbx) * work(urohxe6t,wy1vqfzu+1) 23160 continue 23161 continue 23158 continue 23159 continue do23162 yq6lorbx=1,kgwmz4ip wpasjmo8g(ayfnwr1v,yq6lorbx) = work2(yq6lorbx,kgwmz4ip+1) 23162 continue 23163 continue call vcholf(work2, work2(1,kgwmz4ip+1), kgwmz4ip, dvhw1ulq, oneint *) if(dvhw1ulq .ne. 1)then return endif if(wuwbar .ne. 0)then do23168 yq6lorbx=1,dimu uwbar(yq6lorbx,ayfnwr1v) = work2(tgiyxdw1(yq6lorbx),dufozmt7(yq6lo *rbx)) 23168 continue 23169 continue endif do23170 yq6lorbx=1,kgwmz4ip pasjmo8g(ayfnwr1v,yq6lorbx) = work2(yq6lorbx,kgwmz4ip+1) 23170 continue 23171 continue 23142 continue 23143 continue endif return end subroutine icpd0omv(enaqpzk9, he7mqnvy, gkdx5jal, grmuyvx9, ldk, k *uzxj1lo, nk, wy1vqfzu, jzwsy6tp, bmb, work, wmat, ifys6woa, dimw, *tgiyxdw1, dufozmt7, truen) implicit logical (a-z) integer ldk, kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, dimw, tgiyxdw1(*), *dufozmt7(*), truen double precision enaqpzk9(ldk,nk*wy1vqfzu), he7mqnvy(kuzxj1lo), gk *dx5jal(nk+4), grmuyvx9(truen,wy1vqfzu), bmb(wy1vqfzu,wy1vqfzu), wo *rk(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), ifys6woa(kuzxj1lo,wy1v *qfzu) integer ayfnwr1v, yq6lorbx, gp1jxzuh, dqlr5bse, pqzfxw4i, urohxe6t *, bpvaqm5z double precision qaltf0nz, ms0qypiw(16), g9fvdrbw(4,1) if(jzwsy6tp .ne. 0)then do23174 gp1jxzuh=1,wy1vqfzu do23176 ayfnwr1v=1,kuzxj1lo grmuyvx9(ayfnwr1v,gp1jxzuh) = 0.0d0 23176 continue 23177 continue 23174 continue 23175 continue endif qaltf0nz = 0.10d-9 call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu) do23178 ayfnwr1v=1,kuzxj1lo do23180 yq6lorbx=1,wy1vqfzu do23182 gp1jxzuh=1,wy1vqfzu bmb(yq6lorbx,gp1jxzuh)=0.0d0 23182 continue 23183 continue 23180 continue 23181 continue call vinterv(gkdx5jal(1), (nk+1), he7mqnvy(ayfnwr1v), dqlr5bse, pq *zfxw4i) if(pqzfxw4i.eq. 1)then if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then dqlr5bse=dqlr5bse-1 else return endif endif call vbsplvd(gkdx5jal, 4, he7mqnvy(ayfnwr1v), dqlr5bse, ms0qypiw, *g9fvdrbw, 1) yq6lorbx= dqlr5bse-4+1 do23188 urohxe6t=yq6lorbx,yq6lorbx+3 call vsel(urohxe6t, urohxe6t, wy1vqfzu, nk, ldk, enaqpzk9, work) call o0xlszqr(wy1vqfzu, g9fvdrbw(urohxe6t-yq6lorbx+1,1) * g9fvdrbw *(urohxe6t-yq6lorbx+1,1), work, bmb) 23188 continue 23189 continue do23190 urohxe6t=yq6lorbx,yq6lorbx+3 do23192 bpvaqm5z=urohxe6t+1,yq6lorbx+3 call vsel(urohxe6t, bpvaqm5z, wy1vqfzu, nk, ldk, enaqpzk9, work) call o0xlszqr(wy1vqfzu, 2.0d0 * g9fvdrbw(urohxe6t-yq6lorbx+1,1) * *g9fvdrbw(bpvaqm5z-yq6lorbx+1,1), work, bmb) 23192 continue 23193 continue 23190 continue 23191 continue if(jzwsy6tp .ne. 0)then do23196 yq6lorbx=1,wy1vqfzu grmuyvx9(ayfnwr1v,yq6lorbx) = bmb(yq6lorbx,yq6lorbx) 23196 continue 23197 continue endif call ovjnsmt2(bmb, wmat, work, ifys6woa, wy1vqfzu, kuzxj1lo, dimw, * tgiyxdw1, dufozmt7, ayfnwr1v) 23178 continue 23179 continue return end subroutine o0xlszqr(wy1vqfzu, g9fvdrbw, work, bmb) implicit logical (a-z) integer wy1vqfzu double precision g9fvdrbw, work(wy1vqfzu,wy1vqfzu), bmb(wy1vqfzu,w *y1vqfzu) integer yq6lorbx, gp1jxzuh do23198 yq6lorbx=1,wy1vqfzu do23200 gp1jxzuh=1,wy1vqfzu work(yq6lorbx,gp1jxzuh) = work(yq6lorbx,gp1jxzuh) * g9fvdrbw 23200 continue 23201 continue 23198 continue 23199 continue do23202 yq6lorbx=1,wy1vqfzu do23204 gp1jxzuh=1,wy1vqfzu bmb(gp1jxzuh,yq6lorbx) = bmb(gp1jxzuh,yq6lorbx) + work(gp1jxzuh,yq *6lorbx) 23204 continue 23205 continue 23202 continue 23203 continue return end subroutine vsel(s, t, wy1vqfzu, nk, ldk, minv, work) implicit logical (a-z) integer s, t, wy1vqfzu, nk, ldk double precision minv(ldk,nk*wy1vqfzu), work(wy1vqfzu,wy1vqfzu) integer ayfnwr1v, yq6lorbx, biuvowq2, nbj8tdsk do23206 ayfnwr1v=1,wy1vqfzu do23208 yq6lorbx=1,wy1vqfzu work(ayfnwr1v,yq6lorbx) = 0.0d0 23208 continue 23209 continue 23206 continue 23207 continue if(s .ne. t)then do23212 ayfnwr1v=1,wy1vqfzu biuvowq2 = (s-1)*wy1vqfzu + ayfnwr1v do23214 yq6lorbx=1,wy1vqfzu nbj8tdsk = (t-1)*wy1vqfzu + yq6lorbx work(ayfnwr1v,yq6lorbx) = minv(ldk-(nbj8tdsk-biuvowq2), nbj8tdsk) 23214 continue 23215 continue 23212 continue 23213 continue else do23216 ayfnwr1v=1,wy1vqfzu biuvowq2 = (s-1)*wy1vqfzu + ayfnwr1v do23218 yq6lorbx=ayfnwr1v,wy1vqfzu nbj8tdsk = (t-1)*wy1vqfzu + yq6lorbx work(ayfnwr1v,yq6lorbx) = minv(ldk-(nbj8tdsk-biuvowq2), nbj8tdsk) 23218 continue 23219 continue 23216 continue 23217 continue do23220 ayfnwr1v=1,wy1vqfzu do23222 yq6lorbx=ayfnwr1v+1,wy1vqfzu work(yq6lorbx,ayfnwr1v) = work(ayfnwr1v,yq6lorbx) 23222 continue 23223 continue 23220 continue 23221 continue endif return end subroutine ovjnsmt2(bmb, wmat, work, ifys6woa, wy1vqfzu, kuzxj1lo, * dimw, tgiyxdw1, dufozmt7, iii) implicit logical (a-z) integer wy1vqfzu, kuzxj1lo, dimw, tgiyxdw1(*), dufozmt7(*), iii double precision bmb(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), work *(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu) double precision q6zdcwxk, obr6tcex integer yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z do23224 bpvaqm5z=1,wy1vqfzu do23226 yq6lorbx=1,wy1vqfzu do23228 gp1jxzuh=1,wy1vqfzu work(gp1jxzuh,yq6lorbx) = 0.0d0 23228 continue 23229 continue 23226 continue 23227 continue do23230 urohxe6t=1,dimw obr6tcex = wmat(iii,urohxe6t) work(tgiyxdw1(urohxe6t),dufozmt7(urohxe6t)) = obr6tcex work(dufozmt7(urohxe6t),tgiyxdw1(urohxe6t)) = obr6tcex 23230 continue 23231 continue q6zdcwxk = 0.0d0 do23232 yq6lorbx=1,wy1vqfzu q6zdcwxk = q6zdcwxk + bmb(bpvaqm5z,yq6lorbx) * work(yq6lorbx,bpvaq *m5z) 23232 continue 23233 continue ifys6woa(iii,bpvaqm5z) = q6zdcwxk 23224 continue 23225 continue return end subroutine vicb2(enaqpzk9, wpuarq2m, d, uu, wy1vqfzu, kuzxj1lo) implicit logical (a-z) integer wy1vqfzu, kuzxj1lo double precision enaqpzk9(wy1vqfzu+1,kuzxj1lo), wpuarq2m(wy1vqfzu+ *1,kuzxj1lo), d(kuzxj1lo), uu(wy1vqfzu+1,wy1vqfzu+1) integer ayfnwr1v, gp1jxzuh, lsvdbx3tk, uplim, sedf7mxb, hofjnx2e, *kij0gwer enaqpzk9(wy1vqfzu+1,kuzxj1lo) = 1.0d0 / d(kuzxj1lo) hofjnx2e = wy1vqfzu+1 sedf7mxb = kuzxj1lo+1 - hofjnx2e do23234 kij0gwer=sedf7mxb,kuzxj1lo do23236 ayfnwr1v=1,hofjnx2e uu(ayfnwr1v, kij0gwer-sedf7mxb+1) = wpuarq2m(ayfnwr1v, kij0gwer) 23236 continue 23237 continue 23234 continue 23235 continue ayfnwr1v = kuzxj1lo-1 23238 if(.not.(ayfnwr1v .ge. 1))goto 23240 if(wy1vqfzu .lt. kuzxj1lo-ayfnwr1v)then uplim = wy1vqfzu else uplim = kuzxj1lo-ayfnwr1v endif lsvdbx3tk=1 23243 if(.not.(lsvdbx3tk .le. uplim))goto 23245 enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = 0.0d0 gp1jxzuh=1 23246 if(.not.(gp1jxzuh .le. lsvdbx3tk))goto 23248 enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-lsv *dbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+1,ay *fnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(gp1jxzuh-lsvdbx3tk+wy1vqfz *u+1,ayfnwr1v+lsvdbx3tk) 23247 gp1jxzuh=gp1jxzuh+1 goto 23246 23248 continue 23249 if(.not.(gp1jxzuh .le. uplim))goto 23251 enaqpzk9(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) = enaqpzk9(-lsv *dbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) - uu(-gp1jxzuh+wy1vqfzu+1,ay *fnwr1v+gp1jxzuh -sedf7mxb+1) * enaqpzk9(lsvdbx3tk-gp1jxzuh+wy1vqfz *u+1,ayfnwr1v+gp1jxzuh) 23250 gp1jxzuh=gp1jxzuh+1 goto 23249 23251 continue 23244 lsvdbx3tk=lsvdbx3tk+1 goto 23243 23245 continue enaqpzk9(wy1vqfzu+1,ayfnwr1v) = 1.0d0 / d(ayfnwr1v) lsvdbx3tk = 1 23252 if(.not.(lsvdbx3tk .le. uplim))goto 23254 enaqpzk9(wy1vqfzu+1,ayfnwr1v) = enaqpzk9(wy1vqfzu+1,ayfnwr1v) - uu *(-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk -sedf7mxb+1) * enaqpzk9( *-lsvdbx3tk+wy1vqfzu+1,ayfnwr1v+lsvdbx3tk) 23253 lsvdbx3tk=lsvdbx3tk+1 goto 23252 23254 continue if(ayfnwr1v .eq. sedf7mxb)then sedf7mxb = sedf7mxb-1 if(sedf7mxb .lt. 1)then sedf7mxb = 1 else kij0gwer=hofjnx2e-1 23259 if(.not.(kij0gwer .ge. 1))goto 23261 gp1jxzuh=1 23262 if(.not.(gp1jxzuh .le. hofjnx2e))goto 23264 uu(gp1jxzuh,kij0gwer+1) = uu(gp1jxzuh,kij0gwer) 23263 gp1jxzuh=gp1jxzuh+1 goto 23262 23264 continue 23260 kij0gwer=kij0gwer-1 goto 23259 23261 continue gp1jxzuh=1 23265 if(.not.(gp1jxzuh .le. hofjnx2e))goto 23267 uu(gp1jxzuh,1) = wpuarq2m(gp1jxzuh,sedf7mxb) 23266 gp1jxzuh=gp1jxzuh+1 goto 23265 23267 continue endif endif 23239 ayfnwr1v = ayfnwr1v-1 goto 23238 23240 continue return end subroutine ewg7qruh(sjwyig9tto,tlgduey8,wmat, kuzxj1lo,wy1vqfzu,ez *lgm2up,nef, wbkq9zyi,dof,smo,cov, s0, xin,yin,rbne6ouj,win, work1, *work3, dimw, fbd5yktj, ldk, info, yzoe1rsp, sgdub, rpyis2kc, zv2xf *hei, acpios9q,tgiyxdw1,dufozmt7, bmb, ifys6woa, wkmm, iz2nbfjc,kgw *mz4ip,ges1xpkr, hjm2ktyr, beta, fasrkub3, sout, r0oydcxb, ub4xioar *, effect, uwin) implicit logical (a-z) integer kuzxj1lo,wy1vqfzu,ezlgm2up(kuzxj1lo),nef, dimw, fbd5yktj, *ldk, info, yzoe1rsp, acpios9q,tgiyxdw1(*),dufozmt7(*), iz2nbfjc, k *gwmz4ip, ges1xpkr(kgwmz4ip*2) double precision sjwyig9tto(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu) *, wmat(kuzxj1lo,dimw), wbkq9zyi(kgwmz4ip), dof(kgwmz4ip), smo(kuzx *j1lo,kgwmz4ip), cov(kuzxj1lo,kgwmz4ip) double precision s0(2*kgwmz4ip, 2*kgwmz4ip,2) double precision work1(*), work3(*), sgdub(*), rpyis2kc(*), zv2xfh *ei(acpios9q+4) double precision xin(nef), yin(nef,wy1vqfzu), rbne6ouj(nef,wy1vqfz *u), win(nef,*), bmb(*), ifys6woa(nef,kgwmz4ip), wkmm(wy1vqfzu,wy1v *qfzu,16), hjm2ktyr(wy1vqfzu,kgwmz4ip) double precision beta(2*kgwmz4ip), fasrkub3(2*kgwmz4ip), sout(nef, *kgwmz4ip), r0oydcxb(kgwmz4ip,nef), ub4xioar(kgwmz4ip,nef), effect( *nef*kgwmz4ip), uwin(*) integer dimwin integer ayfnwr1v, yq6lorbx, gp1jxzuh, rutyk8mg, xjc4ywlh, job, qem *j9asg, dvhw1ulq integer oneint double precision xmin, xrange, pvofyg8z oneint = 1 if(iz2nbfjc .eq. 1)then dimwin = dimw else dimwin = kgwmz4ip*(kgwmz4ip+1)/2 endif call qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu) call vsuff9(kuzxj1lo,nef,ezlgm2up, sjwyig9tto,tlgduey8,wmat, xin,y *in,win,uwin,rbne6ouj, wy1vqfzu, dimw, dimwin, tgiyxdw1, dufozmt7, *wkmm, wkmm(1,1,3), hjm2ktyr, kgwmz4ip, iz2nbfjc, oneint, dvhw1ulq) if(dvhw1ulq .ne. 1)then return endif xmin = xin(1) xrange = xin(nef)-xin(1) do23272 ayfnwr1v=1,nef xin(ayfnwr1v) = (xin(ayfnwr1v)-xmin)/xrange 23272 continue 23273 continue ldk = 4*kgwmz4ip fbd5yktj = 0 do23274 yq6lorbx=1,kgwmz4ip if(wbkq9zyi(yq6lorbx) .eq. 0.0d0)then dof(yq6lorbx) = dof(yq6lorbx) + 1.0d0 endif 23274 continue 23275 continue call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip) call vsplin(xin,rbne6ouj,win,nef,zv2xfhei, acpios9q,ldk,kgwmz4ip,d *imwin, tgiyxdw1,dufozmt7, wkmm, wbkq9zyi, info, sout, rpyis2kc, wo *rk3(1), work3(1+acpios9q*kgwmz4ip*ldk), sgdub, cov, yzoe1rsp, bmb, * ifys6woa, dof, work1, fbd5yktj, kuzxj1lo) do23278 yq6lorbx=1,kgwmz4ip dof(yq6lorbx) = -1.0d0 do23280 ayfnwr1v=1,nef dof(yq6lorbx)=dof(yq6lorbx)+ifys6woa(ayfnwr1v,yq6lorbx) 23280 continue 23281 continue 23278 continue 23279 continue if(kgwmz4ip .ge. 1)then pvofyg8z = 1.0d-7 rutyk8mg = nef*kgwmz4ip xjc4ywlh = 2*kgwmz4ip job = 101 info = 1 call x6kanjdh(xin, work3, nef, kgwmz4ip) call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip) call mux17f(uwin, work3, kgwmz4ip, xjc4ywlh, nef, wkmm(1,1,1), wkm *m(1,1,2), tgiyxdw1, dufozmt7, dimwin, rutyk8mg) do23284 gp1jxzuh=1,xjc4ywlh ges1xpkr(gp1jxzuh) = gp1jxzuh 23284 continue 23285 continue call vqrdca(work3,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr,wor *k1,qemj9asg,pvofyg8z) call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip) call mux22f(uwin,sout,r0oydcxb,dimwin,tgiyxdw1,dufozmt7,nef,kgwmz4 *ip,wkmm) call vdqrsl(work3,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3,r0oydcxb,wor *k1(1),effect,beta, work1(1),ub4xioar,job,info) call vbksf(uwin,ub4xioar,kgwmz4ip,nef,wkmm,tgiyxdw1,dufozmt7,dimwi *n) if(yzoe1rsp .ne. 0)then call vrinvf9(work3, rutyk8mg, xjc4ywlh, dvhw1ulq, s0(1,1,1), s0(1, *1,2)) if(dvhw1ulq .ne. 1)then return endif do23290 yq6lorbx=1,kgwmz4ip do23292 ayfnwr1v=1,nef cov(ayfnwr1v,yq6lorbx) = cov(ayfnwr1v,yq6lorbx) - s0(yq6lorbx,yq6l *orbx,1) - xin(ayfnwr1v) * (2.0d0 * s0(yq6lorbx,yq6lorbx+kgwmz4ip,1 *) + xin(ayfnwr1v) * s0(yq6lorbx+kgwmz4ip,yq6lorbx+kgwmz4ip,1)) 23292 continue 23293 continue 23290 continue 23291 continue endif else call dsrt0gem(nef, xin, win, sout, ub4xioar, cov, yzoe1rsp) endif do23294 ayfnwr1v=1,nef do23296 yq6lorbx=1,kgwmz4ip sout(ayfnwr1v,yq6lorbx) = sout(ayfnwr1v,yq6lorbx) - ub4xioar(yq6lo *rbx,ayfnwr1v) 23296 continue 23297 continue 23294 continue 23295 continue do23298 yq6lorbx=1,kgwmz4ip call shm8ynte(kuzxj1lo, nef, ezlgm2up, sout(1,yq6lorbx), smo(1,yq6 *lorbx)) 23298 continue 23299 continue return end subroutine vbfa( n,wy1vqfzu,psdvgce3, he7mqnvy,tlgduey8,wmat,wbkq9 *zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, beta,c *ov,zpcqv3uj, vc6hatuj,fasrkub3, ges1xpkr, xbig, wpuarq2m, hjm2ktyr *, jnxpuym2, hnpt1zym, fzm1ihwj, iz2nbfjc, work1, wk2, wkmm, work3, * sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv2xfhei, resss, nbzjkp *i3, acpios9q, itwk, jwbkl9fp) implicit logical (a-z) integer irhm4cfa, n, wy1vqfzu, psdvgce3(15), ezlgm2up(*),nef(*),wh *ich(*), ges1xpkr(*) integer jnxpuym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*), nbzjkp *i3(*), acpios9q(*), itwk(*), jwbkl9fp(*) double precision he7mqnvy(*),tlgduey8(*),wmat(*),wbkq9zyi(*),dof(* *), ub4xioar(*),kispwgx3(*), m0ibglfx(*), s0(wy1vqfzu), beta(*),cov *(*),zpcqv3uj, vc6hatuj(*),fasrkub3(*) double precision xbig(*), wpuarq2m(*), hjm2ktyr(*), work1(*), wk2( *n,wy1vqfzu,3), wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb *(*), ifys6woa(*), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss integer p,q,yzoe1rsp,niter,gtrlbz3e, rutyk8mg, xjc4ywlh, lyma1kwc, * dimw, dimu, fbd5yktj,ldk integer iter integer xs4wtvlg integer ayfnwr1v, imk5wjxg, qemj9asg irhm4cfa = 0 imk5wjxg = wy1vqfzu*(wy1vqfzu+1)/2 p=psdvgce3(2) q=psdvgce3(3) yzoe1rsp= 0 if(psdvgce3(4) .eq. 1)then yzoe1rsp = 1 endif gtrlbz3e=psdvgce3(6) qemj9asg=psdvgce3(7) rutyk8mg=psdvgce3(9) xjc4ywlh=psdvgce3(10) lyma1kwc=psdvgce3(11) dimw=psdvgce3(12) dimu=psdvgce3(13) fbd5yktj = 0 ldk=psdvgce3(15) xs4wtvlg = 1 if(lyma1kwc .gt. 0)then do23304 ayfnwr1v=1,lyma1kwc work1(ayfnwr1v) = dof(ayfnwr1v) work1(ayfnwr1v+lyma1kwc) = wbkq9zyi(ayfnwr1v) work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v) 23304 continue 23305 continue endif iter = 0 23306 if(xs4wtvlg .ne. 0)then iter = iter+1 if(iter .gt. 1)then if(lyma1kwc .gt. 0)then do23312 ayfnwr1v=1,lyma1kwc if(work1(ayfnwr1v+lyma1kwc).eq.0.0d0 .and. (dabs(work1(ayfnwr1v+2* *lyma1kwc)-dof(ayfnwr1v))/dof(ayfnwr1v).gt.0.05d0))then work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v) dof(ayfnwr1v)=work1(ayfnwr1v) wbkq9zyi(ayfnwr1v)=0.0d0 else work1(ayfnwr1v+2*lyma1kwc) = dof(ayfnwr1v) endif 23312 continue 23313 continue endif endif call vbfa1(irhm4cfa,n,wy1vqfzu, he7mqnvy,tlgduey8,wmat,wbkq9zyi,do *f, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, beta,cov,zpc *qv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig, wpuarq2m, hjm2k *tyr, jnxpuym2, hnpt1zym, fzm1ihwj(1), fzm1ihwj(1 + imk5wjxg), iz2n *bfjc, work1(1+3*lyma1kwc), wkmm, work3, sgdub, bmb, ifys6woa, mwk, * twk, rpyis2kc, zv2xfhei, resss, nbzjkpi3, acpios9q, itwk, jwbkl9f *p, p,q,yzoe1rsp,niter,gtrlbz3e, wk2(1,1,1), wk2(1,1,2), wk2(1,1,3) *, rutyk8mg, xjc4ywlh, lyma1kwc, dimw, dimu, fbd5yktj, ldk) if(irhm4cfa .ne. 0)then call vcall2(xs4wtvlg,w,y,m0ibglfx,beta,wpuarq2m) else xs4wtvlg = 0 endif if(xs4wtvlg .ne. 0)then qemj9asg=0 endif goto 23306 endif 23307 continue psdvgce3(7) = qemj9asg psdvgce3(5) = niter psdvgce3(14) = fbd5yktj return end subroutine vbfa1(irhm4cfa,kuzxj1lo,wy1vqfzu, he7mqnvy,tlgduey8,wma *t,wbkq9zyi,dof, ezlgm2up,nef,which, ub4xioar,kispwgx3,m0ibglfx,s0, * beta,cov,zpcqv3uj, vc6hatuj,fasrkub3, qemj9asg,ges1xpkr, xbig, wp *uarq2m, hjm2ktyr, jnxpuym2, hnpt1zym, tgiyxdw1, dufozmt7, iz2nbfjc *, work1, wkmm, work3, sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv *2xfhei, resss, nbzjkpi3, acpios9q, itwk, jwbkl9fp, p, q, yzoe1rsp, * niter, gtrlbz3e, ghz9vuba, oldmat, wk2, rutyk8mg, xjc4ywlh, lyma1 *kwc, dimw, dimu, fbd5yktj, ldk) implicit logical (a-z) integer qemj9asg integer dufozmt7(*), tgiyxdw1(*) integer p, q, yzoe1rsp, niter, gtrlbz3e, rutyk8mg, xjc4ywlh, lyma1 *kwc, dimw, dimu, fbd5yktj, ldk integer irhm4cfa, kuzxj1lo, wy1vqfzu, ezlgm2up(kuzxj1lo,q),nef(q), *which(q), ges1xpkr(xjc4ywlh) integer jnxpuym2(q), hnpt1zym(q), iz2nbfjc(q), nbzjkpi3(q+1), acpi *os9q(q), itwk(*), jwbkl9fp(q+1) double precision he7mqnvy(kuzxj1lo,p), tlgduey8(kuzxj1lo,wy1vqfzu) *, wmat(kuzxj1lo,dimw), wbkq9zyi(lyma1kwc), dof(lyma1kwc) double precision ub4xioar(wy1vqfzu,kuzxj1lo), kispwgx3(kuzxj1lo,ly *ma1kwc), m0ibglfx(wy1vqfzu,kuzxj1lo), s0(wy1vqfzu), beta(xjc4ywlh) *, cov(kuzxj1lo,lyma1kwc), zpcqv3uj, vc6hatuj(rutyk8mg,xjc4ywlh), f *asrkub3(xjc4ywlh) double precision xbig(rutyk8mg,xjc4ywlh), wpuarq2m(dimu,kuzxj1lo), * hjm2ktyr(wy1vqfzu,lyma1kwc), work1(*), wk2(kuzxj1lo,wy1vqfzu), wk *mm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb(*), ifys6woa(*), * mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss double precision ghz9vuba(kuzxj1lo,wy1vqfzu), oldmat(kuzxj1lo,wy1v *qfzu) integer job,info,nefk integer ayfnwr1v, yq6lorbx, gp1jxzuh, wg1xifdy double precision vo4mtexk, rd9beyfk,ratio, deltaf, z4vrscot,pvofyg *8z pvofyg8z = 1.0d-7 job = 101 info = 1 if(q .eq. 0)then gtrlbz3e = 1 endif if(irhm4cfa .ne. 0)then do23324 yq6lorbx=1,xjc4ywlh do23326 ayfnwr1v=1,rutyk8mg vc6hatuj(ayfnwr1v,yq6lorbx)=xbig(ayfnwr1v,yq6lorbx) 23326 continue 23327 continue 23324 continue 23325 continue endif if(qemj9asg.eq.0)then call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu) call mux17f(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, kuzxj1lo, wkmm *(1,1,1), wkmm(1,1,2), tgiyxdw1, dufozmt7, dimu, rutyk8mg) do23330 gp1jxzuh=1,xjc4ywlh ges1xpkr(gp1jxzuh) = gp1jxzuh 23330 continue 23331 continue call vqrdca(vc6hatuj,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr, *twk,qemj9asg,pvofyg8z) endif do23332 yq6lorbx=1,wy1vqfzu do23334 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v)=0.0d0 23334 continue 23335 continue if(q .gt. 0)then do23338 gp1jxzuh=1,q if(iz2nbfjc(gp1jxzuh).eq.1)then do23342 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + kispwg *x3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1) 23342 continue 23343 continue else do23344 wg1xifdy=1,jnxpuym2(gp1jxzuh) do23346 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + hjm2kt *yr(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnp *t1zym(gp1jxzuh)+wg1xifdy-1) 23346 continue 23347 continue 23344 continue 23345 continue endif 23338 continue 23339 continue endif 23332 continue 23333 continue niter = 0 ratio = 1.0d0 23348 if((ratio .gt. zpcqv3uj ) .and. (niter .lt. gtrlbz3e))then niter = niter + 1 deltaf = 0.0d0 do23350 yq6lorbx=1,wy1vqfzu do23352 ayfnwr1v=1,kuzxj1lo ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(y *q6lorbx,ayfnwr1v) 23352 continue 23353 continue 23350 continue 23351 continue call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu) call mux22f(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzxj1l *o,wy1vqfzu,wkmm) call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2 *,wk2, beta, wk2,ub4xioar,job,info) resss=0.0d0 do23354 ayfnwr1v=1,kuzxj1lo do23356 yq6lorbx=1,wy1vqfzu vo4mtexk = twk((ayfnwr1v-1)*wy1vqfzu+yq6lorbx) - ub4xioar(yq6lorbx *,ayfnwr1v) resss = resss + vo4mtexk * vo4mtexk 23356 continue 23357 continue 23354 continue 23355 continue call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,dufoz *mt7,dimu) if(q .gt. 0)then do23360 gp1jxzuh=1,q do23362 yq6lorbx=1,wy1vqfzu if(iz2nbfjc(gp1jxzuh).eq.1)then do23366 ayfnwr1v=1,kuzxj1lo oldmat(ayfnwr1v,yq6lorbx)=kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6 *lorbx-1) ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - ub4xio *ar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + oldmat(ayfnw *r1v,yq6lorbx) 23366 continue 23367 continue else do23368 ayfnwr1v=1,kuzxj1lo oldmat(ayfnwr1v,yq6lorbx)=0.0d0 do23370 wg1xifdy=1,jnxpuym2(gp1jxzuh) oldmat(ayfnwr1v,yq6lorbx)=oldmat(ayfnwr1v,yq6lorbx) + hjm2ktyr(yq6 *lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1zym( *gp1jxzuh)+wg1xifdy-1) 23370 continue 23371 continue ghz9vuba(ayfnwr1v,yq6lorbx) = tlgduey8(ayfnwr1v,yq6lorbx) - ub4xio *ar(yq6lorbx,ayfnwr1v) - m0ibglfx(yq6lorbx,ayfnwr1v) + oldmat(ayfnw *r1v,yq6lorbx) 23368 continue 23369 continue endif 23362 continue 23363 continue nefk = nef(gp1jxzuh) call ewg7qruh(he7mqnvy(1,which(gp1jxzuh)),ghz9vuba,wmat, kuzxj1lo, *wy1vqfzu,ezlgm2up(1,gp1jxzuh),nefk, wbkq9zyi(hnpt1zym(gp1jxzuh)), *dof(hnpt1zym(gp1jxzuh)), kispwgx3(1,hnpt1zym(gp1jxzuh)), cov(1,hnp *t1zym(gp1jxzuh)), s0, mwk(1), mwk(1+nefk), mwk(1+nefk*(wy1vqfzu+1) *), mwk(1+nefk*(2*wy1vqfzu+1)), work1, work3, dimw, fbd5yktj, ldk, *info, yzoe1rsp, sgdub, rpyis2kc(nbzjkpi3(gp1jxzuh)), zv2xfhei(jwbk *l9fp(gp1jxzuh)), acpios9q(gp1jxzuh),tgiyxdw1, dufozmt7, bmb, ifys6 *woa, wkmm, iz2nbfjc(gp1jxzuh),jnxpuym2(gp1jxzuh),itwk, hjm2ktyr(1, *hnpt1zym(gp1jxzuh)), twk(1), twk(1+2*jnxpuym2(gp1jxzuh)), twk(1+4* *jnxpuym2(gp1jxzuh)), twk(1+(4+nefk)*jnxpuym2(gp1jxzuh)), twk(1+(4+ *2*nefk)*jnxpuym2(gp1jxzuh)), twk(1+(4+3*nefk)*jnxpuym2(gp1jxzuh)), * twk(1+(4+4*nefk)*jnxpuym2(gp1jxzuh))) do23372 yq6lorbx=1,wy1vqfzu if(iz2nbfjc(gp1jxzuh).eq.1)then do23376 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + kispwg *x3(ayfnwr1v,hnpt1zym(gp1jxzuh)+yq6lorbx-1) 23376 continue 23377 continue else do23378 wg1xifdy=1,jnxpuym2(gp1jxzuh) do23380 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v)=m0ibglfx(yq6lorbx,ayfnwr1v) + hjm2ktyr *(yq6lorbx,hnpt1zym(gp1jxzuh)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1 *zym(gp1jxzuh)+wg1xifdy-1) 23380 continue 23381 continue 23378 continue 23379 continue endif do23382 ayfnwr1v=1,kuzxj1lo m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) - oldmat *(ayfnwr1v,yq6lorbx) 23382 continue 23383 continue 23372 continue 23373 continue do23384 yq6lorbx=1,wy1vqfzu if(iz2nbfjc(gp1jxzuh) .eq. 1)then deltaf = deltaf + rd9beyfk(kuzxj1lo,oldmat(1,yq6lorbx),kispwgx3(1, *hnpt1zym(gp1jxzuh)+yq6lorbx-1), wmat(1,yq6lorbx)) else do23388 ayfnwr1v=1,kuzxj1lo twk(ayfnwr1v) = 0.0d0 do23390 wg1xifdy=1,jnxpuym2(gp1jxzuh) twk(ayfnwr1v) = twk(ayfnwr1v) + hjm2ktyr(yq6lorbx,hnpt1zym(gp1jxzu *h)+wg1xifdy-1) * kispwgx3(ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1) 23390 continue 23391 continue 23388 continue 23389 continue deltaf = deltaf + rd9beyfk(kuzxj1lo, oldmat(1,yq6lorbx), twk, wmat *(1,yq6lorbx)) endif 23384 continue 23385 continue do23392 yq6lorbx=1,wy1vqfzu do23394 ayfnwr1v=1,kuzxj1lo ghz9vuba(ayfnwr1v,yq6lorbx)=tlgduey8(ayfnwr1v,yq6lorbx)-m0ibglfx(y *q6lorbx,ayfnwr1v) 23394 continue 23395 continue 23392 continue 23393 continue call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu) call mux22f(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzxj1l *o,wy1vqfzu,wkmm) call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2 *,wk2, beta, wk2,ub4xioar,job,info) call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,dufoz *mt7,dimu) 23360 continue 23361 continue endif if(q .gt. 0)then z4vrscot=0.0d0 do23398 yq6lorbx=1,wy1vqfzu do23400 ayfnwr1v=1,kuzxj1lo z4vrscot = z4vrscot + wmat(ayfnwr1v,yq6lorbx) * m0ibglfx(yq6lorbx, *ayfnwr1v)**2 23400 continue 23401 continue 23398 continue 23399 continue if(z4vrscot .gt. 0.0d0)then ratio = dsqrt(deltaf/z4vrscot) else ratio = 0.0d0 endif endif if(niter .eq. 1)then ratio = 1.0d0 endif goto 23348 endif 23349 continue do23406 yq6lorbx=1,xjc4ywlh twk(yq6lorbx)=beta(yq6lorbx) 23406 continue 23407 continue do23408 yq6lorbx=1,xjc4ywlh beta(ges1xpkr(yq6lorbx))=twk(yq6lorbx) 23408 continue 23409 continue do23410 ayfnwr1v=1,kuzxj1lo do23412 yq6lorbx=1,wy1vqfzu m0ibglfx(yq6lorbx,ayfnwr1v) = m0ibglfx(yq6lorbx,ayfnwr1v) + ub4xio *ar(yq6lorbx,ayfnwr1v) 23412 continue 23413 continue 23410 continue 23411 continue if((yzoe1rsp .ne. 0) .and. (q .gt. 0))then do23416 gp1jxzuh=1,q do23418 wg1xifdy=1,jnxpuym2(gp1jxzuh) call shm8ynte(kuzxj1lo,nef(gp1jxzuh),ezlgm2up(1,gp1jxzuh), cov(1,h *npt1zym(gp1jxzuh)+wg1xifdy-1),oldmat) do23420 ayfnwr1v=1,kuzxj1lo cov(ayfnwr1v,hnpt1zym(gp1jxzuh)+wg1xifdy-1) = oldmat(ayfnwr1v,1) 23420 continue 23421 continue 23418 continue 23419 continue 23416 continue 23417 continue endif return end subroutine x6kanjdh(he7mqnvy, xout, kuzxj1lo, wy1vqfzu) implicit logical (a-z) integer kuzxj1lo, wy1vqfzu double precision he7mqnvy(kuzxj1lo), xout(*) integer ayfnwr1v, yq6lorbx, gp1jxzuh, iptr iptr=1 do23422 yq6lorbx=1,wy1vqfzu do23424 ayfnwr1v=1,kuzxj1lo do23426 gp1jxzuh=1,wy1vqfzu if(yq6lorbx .eq. gp1jxzuh)then xout(iptr) = 1.0d0 else xout(iptr) = 0.0d0 endif iptr=iptr+1 23426 continue 23427 continue 23424 continue 23425 continue 23422 continue 23423 continue do23430 yq6lorbx=1,wy1vqfzu do23432 ayfnwr1v=1,kuzxj1lo do23434 gp1jxzuh=1,wy1vqfzu if(yq6lorbx .eq. gp1jxzuh)then xout(iptr) = he7mqnvy(ayfnwr1v) else xout(iptr) = 0.0d0 endif iptr=iptr+1 23434 continue 23435 continue 23432 continue 23433 continue 23430 continue 23431 continue return end double precision function rd9beyfk(kuzxj1lo, bhcji9gl, m0ibglfx, p *o8rwsmy) integer kuzxj1lo double precision bhcji9gl(kuzxj1lo), m0ibglfx(kuzxj1lo), po8rwsmy( *kuzxj1lo) integer ayfnwr1v double precision lm9vcjob, rxeqjn0y, work rxeqjn0y = 0.0d0 lm9vcjob = 0.0d0 do23438 ayfnwr1v=1,kuzxj1lo work = bhcji9gl(ayfnwr1v) - m0ibglfx(ayfnwr1v) rxeqjn0y = rxeqjn0y + po8rwsmy(ayfnwr1v)*work*work lm9vcjob = lm9vcjob + po8rwsmy(ayfnwr1v) 23438 continue 23439 continue if(lm9vcjob .gt. 0.0d0)then rd9beyfk=rxeqjn0y/lm9vcjob else rd9beyfk=0.0d0 endif return end subroutine pitmeh0q(kuzxj1lo, bhcji9gl, po8rwsmy, lfu2qhid, lm9vcj *ob) implicit logical (a-z) integer kuzxj1lo double precision bhcji9gl(kuzxj1lo), po8rwsmy(kuzxj1lo), lfu2qhid, * lm9vcjob double precision rxeqjn0y integer ayfnwr1v lm9vcjob = 0.0d0 rxeqjn0y = 0.0d0 do23442 ayfnwr1v=1,kuzxj1lo rxeqjn0y = rxeqjn0y + bhcji9gl(ayfnwr1v) * po8rwsmy(ayfnwr1v) lm9vcjob = lm9vcjob + po8rwsmy(ayfnwr1v) 23442 continue 23443 continue if(lm9vcjob .gt. 0.0d0)then lfu2qhid = rxeqjn0y / lm9vcjob else lfu2qhid = 0.0d0 endif return end subroutine dsrt0gem(kuzxj1lo, x, w, bhcji9gl, ub4xioar, cov, yzoe1 *rsp) implicit logical (a-z) integer kuzxj1lo integer yzoe1rsp double precision x(kuzxj1lo), w(kuzxj1lo), bhcji9gl(kuzxj1lo), ub4 *xioar(kuzxj1lo) double precision cov(kuzxj1lo,*) integer ayfnwr1v double precision pasjmo8g, pygsw6ko, q6zdcwxk, nsum, eck8vubt, int *erc, bzmd6ftv, hofjnx2e, lm9vcjob call pitmeh0q(kuzxj1lo,bhcji9gl,w,pasjmo8g, lm9vcjob) call pitmeh0q(kuzxj1lo,x,w,pygsw6ko, lm9vcjob) nsum = 0.0d0 q6zdcwxk = 0.0d0 do23446 ayfnwr1v=1,kuzxj1lo hofjnx2e = x(ayfnwr1v)-pygsw6ko nsum = nsum + hofjnx2e * (bhcji9gl(ayfnwr1v)-pasjmo8g) * w(ayfnwr1 *v) hofjnx2e = hofjnx2e * hofjnx2e q6zdcwxk = q6zdcwxk + hofjnx2e * w(ayfnwr1v) 23446 continue 23447 continue eck8vubt = nsum/q6zdcwxk interc = pasjmo8g - eck8vubt * pygsw6ko do23448 ayfnwr1v=1,kuzxj1lo ub4xioar(ayfnwr1v) = interc + eck8vubt * x(ayfnwr1v) 23448 continue 23449 continue bzmd6ftv = interc + eck8vubt * x(1) if(yzoe1rsp .ne. 0)then do23452 ayfnwr1v=1,kuzxj1lo hofjnx2e = x(ayfnwr1v)-pygsw6ko if(w(ayfnwr1v) .gt. 0.0d0)then cov(ayfnwr1v,1) = cov(ayfnwr1v,1) - 1.0d0/lm9vcjob - hofjnx2e * ho *fjnx2e / q6zdcwxk else cov(ayfnwr1v,1) = 0.0d0 endif 23452 continue 23453 continue endif return end subroutine shm8ynte(kuzxj1lo, p, ezlgm2up, pygsw6ko, x) implicit logical (a-z) integer kuzxj1lo, p, ezlgm2up(kuzxj1lo) double precision pygsw6ko(p), x(kuzxj1lo) integer ayfnwr1v do23456 ayfnwr1v=1,kuzxj1lo x(ayfnwr1v) = pygsw6ko(ezlgm2up(ayfnwr1v)) 23456 continue 23457 continue return end subroutine vankcghz2l2(x, kuzxj1lo, ankcghz2, rvy1fpli, ukgwt7na) implicit logical (a-z) integer kuzxj1lo, rvy1fpli, ukgwt7na double precision x(kuzxj1lo), ankcghz2(kuzxj1lo) integer ndk, yq6lorbx if(ukgwt7na .eq. 0)then if(kuzxj1lo .le. 40)then ndk = kuzxj1lo else ndk = 40 + dexp(0.25d0 * dlog(kuzxj1lo-40.0d0)) endif else ndk = rvy1fpli - 6 endif rvy1fpli = ndk + 6 do23462 yq6lorbx = 1,3 ankcghz2(yq6lorbx) = x(1) 23462 continue 23463 continue do23464 yq6lorbx = 1,ndk ankcghz2(yq6lorbx+3) = x( 1 + (yq6lorbx-1)*(kuzxj1lo-1)/(ndk-1) ) 23464 continue 23465 continue do23466 yq6lorbx = 1,3 ankcghz2(ndk+3+yq6lorbx) = x(kuzxj1lo) 23466 continue 23467 continue return end subroutine pankcghz2l2(ankcghz2, kuzxj1lo, zo8wpibx, tol) implicit logical (a-z) integer kuzxj1lo, zo8wpibx(kuzxj1lo) double precision ankcghz2(kuzxj1lo), tol integer ayfnwr1v, cjop5bwm do23468 ayfnwr1v=1,4 zo8wpibx(ayfnwr1v) = 1 23468 continue 23469 continue cjop5bwm = 4 do23470 ayfnwr1v=5,(kuzxj1lo-4) if((ankcghz2(ayfnwr1v) - ankcghz2(cjop5bwm) .ge. tol) .and. (ankcg *hz2(kuzxj1lo) - ankcghz2(ayfnwr1v) .ge. tol))then zo8wpibx(ayfnwr1v) = 1 cjop5bwm = ayfnwr1v else zo8wpibx(ayfnwr1v) = 0 endif 23470 continue 23471 continue do23474 ayfnwr1v=(kuzxj1lo-3),kuzxj1lo zo8wpibx(ayfnwr1v) = 1 23474 continue 23475 continue return end VGAM/src/specfun3.c0000644000176200001440000000360013565414527013500 0ustar liggesusers #include #include #include #include #include void sf_C_expint(double *x, int *size, double *bzmd6ftv); void sf_C_expexpint(double *x, int *size, double *bzmd6ftv); void sf_C_expint_e1(double *x, int *size, double *bzmd6ftv); void VGAM_C_kend_tau(double *x, double *y, int *f8yswcat, double *bqelz3cy); void F77_NAME(einlib)(double*, double*); void F77_NAME(expeinl)(double*, double*); void F77_NAME(eonenl)(double*, double*); void sf_C_expint(double *x, int *size, double *bzmd6ftv) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++) F77_NAME(einlib)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v); } void sf_C_expexpint(double *x, int *size, double *bzmd6ftv) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++) F77_NAME(expeinl)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v); } void sf_C_expint_e1(double *x, int *size, double *bzmd6ftv) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *size; ayfnwr1v++) F77_NAME(eonenl)(x + ayfnwr1v, bzmd6ftv + ayfnwr1v); } void VGAM_C_kend_tau(double *x, double *y, int *f8yswcat, double *bqelz3cy) { int ayfnwr1v, yq6lorbx, gp1jxzuh = *f8yswcat ; double q6zdcwxk1, q6zdcwxk2; for (ayfnwr1v = 0; ayfnwr1v < 3; ayfnwr1v++) bqelz3cy[ayfnwr1v] = 0.0; for (ayfnwr1v = 0; ayfnwr1v < gp1jxzuh; ayfnwr1v++) { for (yq6lorbx = ayfnwr1v + 1; yq6lorbx < *f8yswcat; yq6lorbx++) { q6zdcwxk1 = x[ayfnwr1v] - x[yq6lorbx]; q6zdcwxk2 = y[ayfnwr1v] - y[yq6lorbx]; if (q6zdcwxk1 == 0.0 || q6zdcwxk2 == 0.0) { bqelz3cy[1] += 1.0; } else if ((q6zdcwxk1 < 0.0 && q6zdcwxk2 < 0.0) || (q6zdcwxk1 > 0.0 && q6zdcwxk2 > 0.0)) { bqelz3cy[0] += 1.0; } else { bqelz3cy[2] += 1.0; } } } } VGAM/src/zeta3.c0000644000176200001440000001437613565414527013014 0ustar liggesusers #include #include #include void vzetawr(double sjwyig9t[], double *bqelz3cy, int *kpzavbj3, int *f8yswcat); double fvlmz9iyzeta8(double , double kxae8glp[]); double fvlmz9iydzeta8(double , double kxae8glp[]); double fvlmz9iyddzeta8(double , double kxae8glp[]); void vbecoef(double kxae8glp[]); void vzetawr(double sjwyig9t[], double *bqelz3cy, int *kpzavbj3, int *f8yswcat) { int ayfnwr1v; double *qnwamo0e1, *qnwamo0e2; double kxae8glp[12]; vbecoef(kxae8glp); qnwamo0e1 = bqelz3cy; qnwamo0e2 = sjwyig9t; if (*kpzavbj3 == 0) { for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { *qnwamo0e1++ = fvlmz9iyzeta8(*qnwamo0e2++, kxae8glp); } } else if (*kpzavbj3 == 1) { for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { *qnwamo0e1++ = fvlmz9iydzeta8(*qnwamo0e2++, kxae8glp); } } else if (*kpzavbj3 == 2) { for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { *qnwamo0e1++ = fvlmz9iyddzeta8(*qnwamo0e2++, kxae8glp); } } else { Rprintf("Error: *kpzavbj3 must equal 0, 1 or 2 in C function vzetawr\n"); } } double fvlmz9iyzeta8(double ghz9vuba, double kxae8glp[]) { int ayfnwr1v, gp1jxzuh, uw3favmo, nsvdbx3tk, m2svdbx3tk; double q6zdcwxk, xvr7bonh, a2svdbx3tk, fred; ayfnwr1v = 12; gp1jxzuh = 8; a2svdbx3tk = pow((double) ayfnwr1v, (double) 2.0); xvr7bonh = ghz9vuba / 2.000 / a2svdbx3tk; q6zdcwxk = 1.000 / (ghz9vuba - 1.000) + 0.500 / ayfnwr1v + kxae8glp[0] * xvr7bonh; for (uw3favmo = 2; uw3favmo <= gp1jxzuh; uw3favmo++) { m2svdbx3tk = uw3favmo + uw3favmo; xvr7bonh *= (ghz9vuba + m2svdbx3tk - 3.000) * (ghz9vuba + m2svdbx3tk - 2.000) / (m2svdbx3tk - 1.000) / m2svdbx3tk / a2svdbx3tk; q6zdcwxk += xvr7bonh * kxae8glp[uw3favmo-1]; } fred = pow((double) ayfnwr1v, (double) 1.0 - ghz9vuba); q6zdcwxk = 1.000 + q6zdcwxk * fred; for (nsvdbx3tk = 2; nsvdbx3tk < ayfnwr1v; nsvdbx3tk++) { q6zdcwxk += pow((double) nsvdbx3tk, (double) -ghz9vuba); } return q6zdcwxk; } double fvlmz9iydzeta8(double ghz9vuba, double kxae8glp[]) { int ayfnwr1v, gp1jxzuh, uw3favmo, nsvdbx3tk, m2svdbx3tk; double q6zdcwxk, xvr7bonh, dh9mgvze, a2svdbx3tk, ugqvjoe5a, ugqvjoe5n, fred; ayfnwr1v = 12; gp1jxzuh = 8; ugqvjoe5a = log( (double) ayfnwr1v ); a2svdbx3tk = ayfnwr1v * ayfnwr1v; xvr7bonh = ghz9vuba / 2.000 / a2svdbx3tk; dh9mgvze = 1.000 / ghz9vuba - ugqvjoe5a; q6zdcwxk = kxae8glp[0] * xvr7bonh * dh9mgvze; for (uw3favmo = 2; uw3favmo <= gp1jxzuh; uw3favmo++) { m2svdbx3tk = uw3favmo + uw3favmo; xvr7bonh *= (ghz9vuba + m2svdbx3tk - 3.0) * (ghz9vuba + m2svdbx3tk - 2.0) / (m2svdbx3tk - 1.0) / m2svdbx3tk / a2svdbx3tk; dh9mgvze += 1.0 / (ghz9vuba + m2svdbx3tk - 3.0) + 1.0 / (ghz9vuba + m2svdbx3tk - 2.0); q6zdcwxk += kxae8glp[uw3favmo-1] * xvr7bonh * dh9mgvze; } fred = pow((double) ayfnwr1v, (double) 1.0 - ghz9vuba); q6zdcwxk = (q6zdcwxk - 1.000 / pow(ghz9vuba - 1.000, (double) 2.0) - ugqvjoe5a * (1.000 / (ghz9vuba - 1.000) + 0.5000 / ayfnwr1v)) * fred; for (nsvdbx3tk = 2; nsvdbx3tk < ayfnwr1v; nsvdbx3tk++) { ugqvjoe5n = log( (double) nsvdbx3tk ); q6zdcwxk -= ugqvjoe5n / exp(ugqvjoe5n * ghz9vuba); } return q6zdcwxk; } double fvlmz9iyddzeta8(double ghz9vuba, double kxae8glp[]) { int ayfnwr1v, gp1jxzuh, uw3favmo, nsvdbx3tk, m2svdbx3tk; double q6zdcwxk, xvr7bonh, dh9mgvze, hpmwnav2, a2svdbx3tk, ugqvjoe5a, ugqvjoe5n, fred1, fred2; ayfnwr1v = 12; gp1jxzuh = 8; ugqvjoe5a = log( (double) ayfnwr1v ); a2svdbx3tk = ayfnwr1v * ayfnwr1v; xvr7bonh = ghz9vuba / 2.000 / a2svdbx3tk; dh9mgvze = 1.000 / ghz9vuba - ugqvjoe5a; hpmwnav2 = 1.000 / ghz9vuba / ghz9vuba; q6zdcwxk = kxae8glp[0] * xvr7bonh * (pow(dh9mgvze, (double) 2.0) - hpmwnav2); for (uw3favmo = 2; uw3favmo < gp1jxzuh; uw3favmo++) { m2svdbx3tk = uw3favmo + uw3favmo; xvr7bonh *= (ghz9vuba + m2svdbx3tk - 3.000) * (ghz9vuba + m2svdbx3tk - 2.000) / (m2svdbx3tk - 1.0) / m2svdbx3tk / a2svdbx3tk; dh9mgvze += 1.000 / (ghz9vuba + m2svdbx3tk - 3.000) + 1.000 / (ghz9vuba + m2svdbx3tk - 2.000); hpmwnav2 += 1.000 / pow(ghz9vuba + m2svdbx3tk - 3.000, (double) 2.0) + 1.000 / pow(ghz9vuba + m2svdbx3tk - 2.000, (double) 2.0); q6zdcwxk += kxae8glp[uw3favmo-1] * xvr7bonh * (dh9mgvze * dh9mgvze - hpmwnav2); } fred1 = pow((double) ayfnwr1v, (double) 1.0 - ghz9vuba); fred2 = pow(ugqvjoe5a, (double) 2.0) * (1.0 / (ghz9vuba - 1.0) + 0.50 / ayfnwr1v); q6zdcwxk = (q6zdcwxk + 2.0 / pow(ghz9vuba - 1.0, (double) 3.0) + 2.0 * ugqvjoe5a / pow(ghz9vuba - 1.0, (double) 2.0) + fred2) * fred1; for (nsvdbx3tk = 2; nsvdbx3tk < ayfnwr1v; nsvdbx3tk++) { ugqvjoe5n = log( (double) nsvdbx3tk ); q6zdcwxk += pow(ugqvjoe5n, (double) 2.0) / exp(ugqvjoe5n * ghz9vuba); } return q6zdcwxk; } void vbecoef(double kxae8glp[]) { kxae8glp[0] = 1.000 / 6.000; kxae8glp[1] = -1.000 / 30.000; kxae8glp[2] = 1.000 / 42.000; kxae8glp[3] = -1.000 / 30.000; kxae8glp[4] = 5.000 / 66.000; kxae8glp[5] = -691.000 / 2730.000; kxae8glp[6] = 7.000 / 6.000; kxae8glp[7] = -3617.000 / 510.000; kxae8glp[8] = 4386.700 / 79.800; kxae8glp[9] = -1746.1100 / 3.3000; kxae8glp[10] = 8545.1300 / 1.3800; kxae8glp[11] = -2363.6409100 / 0.0273000; } void conmax_Z(double *lamvec, double *nuvec, double *bqelz3cy, int *nlength, int *kpzavbj3, double *qaltf0nz) { double *pq6zdcwxk, denom = 0.0, yq6lorbx, prevterm; int ayfnwr1v; *qaltf0nz = 1.0e-6; if (*kpzavbj3 == 0) { pq6zdcwxk = bqelz3cy; for (ayfnwr1v = 0; ayfnwr1v < *nlength; ayfnwr1v++) { prevterm = 1.0 + *lamvec; denom = 1.0; *pq6zdcwxk = prevterm; yq6lorbx = 2.0; if (*nuvec == 0.0 && *lamvec >= 1.0) { Rprintf("Error: series will not converge. Returning 0.0\n"); *pq6zdcwxk = 0.0; } else { while (prevterm > *qaltf0nz) { denom = denom * pow(yq6lorbx, *lamvec); prevterm = prevterm * *lamvec / denom; *pq6zdcwxk += prevterm; yq6lorbx += 1.0; } } lamvec++; nuvec++; pq6zdcwxk++; } } else if (*kpzavbj3 == 1) { } else if (*kpzavbj3 == 2) { } } VGAM/src/caqo3.c0000644000176200001440000030314413565414527012766 0ustar liggesusers #include #include #include #include #include void yiumjq3npnm1or(double *objzgdk0, double *lfu2qhid); void yiumjq3npnm1ow(double objzgdk0[], double lfu2qhid[], int *f8yswcat); void yiumjq3nn2howibc2a(double *objzgdk0, double *i9mwnvqt, double *lfu2qhid); void yiumjq3nbewf1pzv9(double *objzgdk0, double *lfu2qhid); void yiumjq3ng2vwexyk9(double *objzgdk0, double *lfu2qhid); void yiumjq3npkc4ejib(double w8znmyce[], double zshtfg8c[], double m0ibglfx[], int *ftnjamu2, int *wy1vqfzu, int *br5ovgcj, int *xlpjcg3s, int *vtsou9pz, int *hj3ftvzu, int *qfx3vhct, int *unhycz0e, double vm4xjosb[]); void yiumjq3nnipyajc1(double m0ibglfx[], double t8hwvalr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *qfx3vhct, int *hj3ftvzu); void yiumjq3nshjlwft5(int *qfx3vhct, double tlgduey8[], double ufgqj9ck[], double t8hwvalr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *kvowz9ht, double m0ibglfx[], double *jxacz5qu, int *hj3ftvzu, double *dn3iasxug, double *vsoihn1r, int *dqk5muto); void yiumjq3nflncwkfq76(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *br5ovgcj, int *xwdf5ltg, int *qfx3vhct); void yiumjq3nflncwkfq71(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *xwdf5ltg, int *qfx3vhct, double vm4xjosb[], int *br5ovgcj, int *xlpjcg3s, double kifxa0he[], int *yru9olks, int *unhycz0e); void yiumjq3nflncwkfq72(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *wy1vqfzu, int *br5ovgcj, int *xwdf5ltg, int *qfx3vhct, int *afpc0kns, int *fmzq7aob, int *eu3oxvyb, int *unhycz0e, double vm4xjosb[]); void yiumjq3nietam6(double tlgduey8[], double m0ibglfx[], double y7sdgtqi[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *qfx3vhct, int *hj3ftvzu, double ufgqj9ck[], int *wr0lbopv); void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double *rsynp1go, double *dn3iasxug, double *uaf2xgqy, int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *hj3ftvzu, int *qfx3vhct, int *zjkrtol8, int *unhycz0e, double vm4xjosb[]); void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[]); void cqo_1(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[]); void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[], int psdvgce3[], int *qfozcl5b, double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double mbvnaor6[], double hjm2ktyr[], int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]); void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[], double atujnxb8[], double k7hulceq[], int *eoviz2fb, double kpzavbj3mat[], double *ydcnh9xl); void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[], double atujnxb8[], double k7hulceq[], int *eoviz2fb, double kpzavbj3mat[], double ajul8wkv[], int psdvgce3[], int *qfozcl5b, double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double mbvnaor6[], double hjm2ktyr[], int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]); double fvlmz9iyC_tldz5ion(double xx); void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu); void fvlmz9iyC_enbin9(double lfu2qhid[], double hdqsx7bk[], double nm0eljqk[], double *n2kersmx, int *f8yswcat, int *dvhw1ulq, int *zy1mchbf, double *ux3nadiw, double *rsynp1go, int *sguwj9ty); void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[], double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double m0ibglfx[], double zshtfg8c[], double ui8ysltq[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], double wpuarq2m[], double hjm2ktyr[], int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], // 20130525; lindex added int acpios9q[], int jwbkl9fp[]); void F77_NAME(vqrdca)(double*, int*, int*, int*, double*, int*, double*, int*, double*); void F77_NAME(vdqrsl)(double*, int*, int*, int*, double*, double*, double*, double*, double*, double*, double*, int*, int*); void tyee_C_vdgam1(double*, double*, int*); void tyee_C_vtgam1(double*, double*, int*); void yiumjq3nn2howibc2a(double *objzgdk0, double *i9mwnvqt, double *lfu2qhid) { double pq0hfucn, xd4mybgj; if (1.0e0 - *objzgdk0 >= 1.0e0) { *lfu2qhid = -8.12589e0 / (3.0 * sqrt(*i9mwnvqt)); } else if (1.0e0 - *objzgdk0 <= 0.0e0) { *lfu2qhid = 8.12589e0 / (3.0 * sqrt(*i9mwnvqt)); } else { pq0hfucn = 1.0e0 - *objzgdk0; yiumjq3npnm1or(&pq0hfucn, &xd4mybgj); xd4mybgj /= 3.0e0 * sqrt(*i9mwnvqt); *lfu2qhid = -3.0e0 * log(1.0e0 + xd4mybgj); } } void yiumjq3nbewf1pzv9(double *objzgdk0, double *lfu2qhid) { if (*objzgdk0 <= 2.0e-200) { *lfu2qhid = -460.0e0; } else if (*objzgdk0 <= 1.0e-14) { *lfu2qhid = log( *objzgdk0 ); } else if (1.0e0 - *objzgdk0 <= 0.0e0) { *lfu2qhid = 3.542106e0; } else { *lfu2qhid = log(-log(1.0e0 - *objzgdk0)); } } void yiumjq3ng2vwexyk9(double *objzgdk0, double *lfu2qhid) { if (*objzgdk0 <= 2.0e-200) { *lfu2qhid = -460.0e0; } else if (*objzgdk0 <= 1.0e-14) { *lfu2qhid = log( *objzgdk0 ); } else if (1.0e0 - *objzgdk0 <= 0.0e0) { *lfu2qhid = 34.53958e0; } else { *lfu2qhid = log(*objzgdk0 / (1.0e0 - *objzgdk0)); } } void yiumjq3npkc4ejib(double w8znmyce[], double zshtfg8c[], double m0ibglfx[], int *ftnjamu2, int *wy1vqfzu, int *br5ovgcj, int *xlpjcg3s, int *vtsou9pz, int *hj3ftvzu, int *qfx3vhct, int *unhycz0e, double vm4xjosb[]) { int ayfnwr1v, yq6lorbx, gp1jxzuh, sedf7mxb; double *fpdlcqk9zshtfg8c, *fpdlcqk9w8znmyce, *fpdlcqk9f9piukdx, *fpdlcqk9m0ibglfx, *fpdlcqk9vm4xjosb; if (*vtsou9pz == 1) { if (*qfx3vhct == 3 || *qfx3vhct == 5) { sedf7mxb = 2 * *hj3ftvzu - 1; if (*br5ovgcj != 2 * *ftnjamu2) //Rprinf Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_pkc4ejib\n"); fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = 0.0; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c = zshtfg8c; for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) { fpdlcqk9w8znmyce = w8znmyce + 0 + (gp1jxzuh-1) * *br5ovgcj; fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c; fpdlcqk9w8znmyce++; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c++; } sedf7mxb = 2 * *hj3ftvzu; fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = 0.0; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c = zshtfg8c; for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) { fpdlcqk9w8znmyce = w8znmyce + 1 + (gp1jxzuh-1) * *br5ovgcj; fpdlcqk9m0ibglfx = m0ibglfx + sedf7mxb-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c; fpdlcqk9w8znmyce++; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c++; } } else { fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1; for (ayfnwr1v = 0; ayfnwr1v < *br5ovgcj; ayfnwr1v++) { *fpdlcqk9m0ibglfx = 0.0; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c = zshtfg8c; fpdlcqk9w8znmyce = w8znmyce; // + (gp1jxzuh-1) * *br5ovgcj; for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) { fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1; for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce++ * *fpdlcqk9zshtfg8c; fpdlcqk9m0ibglfx += *wy1vqfzu; } fpdlcqk9zshtfg8c++; } } } else { if (*br5ovgcj != *wy1vqfzu * *ftnjamu2) //Rprinf Rprintf("Error: *br5ovgcj != *wy1vqfzu * *ftnjamu2 in C_pkc4ejib\n"); fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9f9piukdx = w8znmyce; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx = 0.0e0; fpdlcqk9zshtfg8c = zshtfg8c; fpdlcqk9w8znmyce = fpdlcqk9f9piukdx++; for (gp1jxzuh = 1; gp1jxzuh <= *xlpjcg3s; gp1jxzuh++) { *fpdlcqk9m0ibglfx += *fpdlcqk9w8znmyce * *fpdlcqk9zshtfg8c++; fpdlcqk9w8znmyce += *br5ovgcj; } fpdlcqk9m0ibglfx++; } } } fpdlcqk9vm4xjosb = vm4xjosb; if (*unhycz0e == 1) { if (*qfx3vhct == 3 || *qfx3vhct == 5) { fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu - 2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++; fpdlcqk9m0ibglfx += *wy1vqfzu; } } else { fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu - 1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++; fpdlcqk9m0ibglfx += *wy1vqfzu; } } } } void yiumjq3nnipyajc1(double m0ibglfx[], double t8hwvalr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *qfx3vhct, int *hj3ftvzu) { int ayfnwr1v, yq6lorbx; double tmpwk, *fpdlcqk9t8hwvalr, *fpdlcqk9m0ibglfx; if (*hj3ftvzu == 0) { fpdlcqk9t8hwvalr = t8hwvalr; fpdlcqk9m0ibglfx = m0ibglfx; if (*qfx3vhct == 1) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { tmpwk = exp(*fpdlcqk9m0ibglfx++); *fpdlcqk9t8hwvalr++ = tmpwk / (1.0 + tmpwk); } } if (*qfx3vhct == 2) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++); } if (*qfx3vhct == 4) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) *fpdlcqk9t8hwvalr++ = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx++)); } if (*qfx3vhct == 3 || *qfx3vhct == 5) { if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); } //Rprintf for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { *fpdlcqk9t8hwvalr++ = exp(*fpdlcqk9m0ibglfx++); fpdlcqk9m0ibglfx++; } } if (*qfx3vhct == 8) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) *fpdlcqk9t8hwvalr++ = *fpdlcqk9m0ibglfx++; } } else { fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1; fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1; if (*qfx3vhct == 1) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { tmpwk = exp(*fpdlcqk9m0ibglfx); *fpdlcqk9t8hwvalr = tmpwk / (1.0 + tmpwk); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 2) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9t8hwvalr = exp(*fpdlcqk9m0ibglfx); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 4) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9t8hwvalr = 1.0e0 - exp(-exp(*fpdlcqk9m0ibglfx)); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 3 || *qfx3vhct == 5) { fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1; fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9t8hwvalr = exp(*fpdlcqk9m0ibglfx); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 8) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9t8hwvalr = *fpdlcqk9m0ibglfx; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; } } } } void yiumjq3nshjlwft5(int *qfx3vhct, double tlgduey8[], double ufgqj9ck[], double t8hwvalr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *kvowz9ht, double m0ibglfx[], double *jxacz5qu, int *hj3ftvzu, double *dn3iasxug, double *vsoihn1r, int *dqk5muto) { int ayfnwr1v, yq6lorbx, lbgwvp3q; double txlvcey5, xd4mybgj, uqnkc6zg, hofjnx2e, smmu, afwp5imx, ivqk2ywz, qvd7yktm, hdqsx7bk, anopu9vi, jtnbu2hz, prev_lfu2qhid = 0.0e0, lfu2qhid = 0.0e0, *fpdlcqk9m0ibglfx, *fpdlcqk9t8hwvalr, *fpdlcqk9ufgqj9ck, *fpdlcqk9tlgduey8; if (*hj3ftvzu == 0) { fpdlcqk9tlgduey8 = tlgduey8; if (*qfx3vhct == 1 || *qfx3vhct == 4) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n"); fpdlcqk9tlgduey8 = tlgduey8; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { // yyy fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1; fpdlcqk9ufgqj9ck = ufgqj9ck; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { // bbb ivqk2ywz = *fpdlcqk9tlgduey8 > 0.0 ? *fpdlcqk9tlgduey8*log(*fpdlcqk9tlgduey8) :0.0; if (*fpdlcqk9tlgduey8 < 1.0e0) ivqk2ywz += (1.0e0 - *fpdlcqk9tlgduey8) * log(1.0e0 - *fpdlcqk9tlgduey8); xd4mybgj = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr); if (xd4mybgj < *dn3iasxug) { smmu = *fpdlcqk9t8hwvalr; qvd7yktm = *fpdlcqk9tlgduey8 * ((smmu < *dn3iasxug) ? *vsoihn1r : log(smmu)); afwp5imx = 1.0e0 - smmu; qvd7yktm += (afwp5imx < *dn3iasxug ? *vsoihn1r : log(afwp5imx))* (1.0 - *fpdlcqk9tlgduey8); } else { qvd7yktm = *fpdlcqk9tlgduey8 * log( *fpdlcqk9t8hwvalr) + (1.0 - *fpdlcqk9tlgduey8) * log(1.0 - *fpdlcqk9t8hwvalr); } lfu2qhid += *fpdlcqk9ufgqj9ck++ * (ivqk2ywz - qvd7yktm); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9tlgduey8++; } // bbb jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } // yyy } if (*qfx3vhct == 2) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n"); fpdlcqk9tlgduey8 = tlgduey8; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1; fpdlcqk9ufgqj9ck = ufgqj9ck; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ? *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 + *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) : *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8; lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9tlgduey8++; } jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } } if (*qfx3vhct == 5) { fpdlcqk9tlgduey8 = tlgduey8; if (2 * *afpc0kns != *wy1vqfzu) { //Rprintf Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_nipyajc1\n"); } //Rprintf for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { fpdlcqk9m0ibglfx = m0ibglfx + 2*yq6lorbx-1; fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1; fpdlcqk9ufgqj9ck = ufgqj9ck; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { jtnbu2hz = exp(*fpdlcqk9m0ibglfx); uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz); xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ? (jtnbu2hz - 1.0e0) * log(*fpdlcqk9tlgduey8) + (log(jtnbu2hz) - *fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr - log(*fpdlcqk9t8hwvalr)) * jtnbu2hz - uqnkc6zg : -1000.0e0; xd4mybgj = -xd4mybgj; lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj; fpdlcqk9m0ibglfx += *wy1vqfzu; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9tlgduey8++; } jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } } if (*qfx3vhct == 3) { if (*dqk5muto == 0) { anopu9vi = 34.0e0; for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] > anopu9vi) { hdqsx7bk = exp(anopu9vi); lbgwvp3q = 1; } else if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) { hdqsx7bk = exp(-anopu9vi); lbgwvp3q = 1; } else { hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu]); lbgwvp3q = 0; } xd4mybgj = (tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2] < 1.0e0) ? 1.0e0 : tlgduey8[ayfnwr1v-1+ (yq6lorbx-1)* *ftnjamu2]; lfu2qhid += ufgqj9ck[ayfnwr1v-1] * (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] * log(xd4mybgj/t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns]) + (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] + hdqsx7bk) * log((t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns ] + hdqsx7bk) / (hdqsx7bk + tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]))); } jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } } else { anopu9vi = 34.0e0; for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] > anopu9vi) { hdqsx7bk = exp(anopu9vi); lbgwvp3q = 1; } else if (m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu] < -anopu9vi) { hdqsx7bk = exp(-anopu9vi); lbgwvp3q = 1; } else { hdqsx7bk = exp(m0ibglfx[2*yq6lorbx-1 + (ayfnwr1v-1)* *wy1vqfzu]); lbgwvp3q = 0; } if (lbgwvp3q) { uqnkc6zg = hofjnx2e = 0.0e0; } else { uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk + tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]); hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk); } txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 + tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2]); xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk + t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns])) + uqnkc6zg - hofjnx2e - txlvcey5; if (tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] > 0.0e0) { xd4mybgj += tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] * log(t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns] / (hdqsx7bk + t8hwvalr[yq6lorbx-1 + (ayfnwr1v-1) * *afpc0kns])); } lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj; } jxacz5qu[yq6lorbx] = 2.0 * (-0.5 * lfu2qhid + 0.5 * prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } lfu2qhid *= (-0.5); } } if (*qfx3vhct == 8) { fpdlcqk9tlgduey8 = tlgduey8; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { fpdlcqk9t8hwvalr = t8hwvalr + yq6lorbx-1; fpdlcqk9ufgqj9ck = ufgqj9ck; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { xd4mybgj = *fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr; lfu2qhid += *fpdlcqk9ufgqj9ck++ * pow(xd4mybgj, (double) 2.0); fpdlcqk9t8hwvalr += *afpc0kns; } jxacz5qu[yq6lorbx] = 2.0e0 * (lfu2qhid - prev_lfu2qhid); prev_lfu2qhid = lfu2qhid; } } } else { fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1; fpdlcqk9ufgqj9ck = ufgqj9ck; if (*qfx3vhct == 1 || *qfx3vhct == 4) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { ivqk2ywz = *fpdlcqk9tlgduey8 > 0.0 ? *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8) : 0.0; if (*fpdlcqk9tlgduey8 < 1.0e0) ivqk2ywz += (1.0e0 - *fpdlcqk9tlgduey8) * log(1.0e0 - *fpdlcqk9tlgduey8); xd4mybgj = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr); if (xd4mybgj < *dn3iasxug) { smmu = *fpdlcqk9t8hwvalr; qvd7yktm = *fpdlcqk9tlgduey8 * ((smmu < *dn3iasxug) ? *vsoihn1r : log(smmu)); afwp5imx = 1.0e0 - smmu; qvd7yktm += (afwp5imx < *dn3iasxug ? *vsoihn1r : log(afwp5imx)) * (1.0 - *fpdlcqk9tlgduey8); } else { qvd7yktm = *fpdlcqk9tlgduey8 * log( *fpdlcqk9t8hwvalr) + (1.0 - *fpdlcqk9tlgduey8) * log(1.0e0 - *fpdlcqk9t8hwvalr); } lfu2qhid += *fpdlcqk9ufgqj9ck++ * (ivqk2ywz - qvd7yktm); fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9tlgduey8++; } } if (*qfx3vhct == 2) { if (*afpc0kns != *wy1vqfzu) Rprintf("Error: *afpc0kns != *wy1vqfzu in C_shjlwft5\n"); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { xd4mybgj = *fpdlcqk9tlgduey8 > 0.0e0 ? *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8 + *fpdlcqk9tlgduey8 * log(*fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr) : *fpdlcqk9t8hwvalr - *fpdlcqk9tlgduey8; lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9tlgduey8++; } } if (*qfx3vhct == 5) { fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1; fpdlcqk9ufgqj9ck = ufgqj9ck; fpdlcqk9m0ibglfx = m0ibglfx + 2 * *hj3ftvzu-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { jtnbu2hz = exp(*fpdlcqk9m0ibglfx); uqnkc6zg = fvlmz9iyC_tldz5ion(jtnbu2hz); xd4mybgj = *fpdlcqk9tlgduey8 > 0.0 ? (jtnbu2hz - 1.0e0) * log(*fpdlcqk9tlgduey8) + jtnbu2hz * (log(jtnbu2hz) - *fpdlcqk9tlgduey8 / *fpdlcqk9t8hwvalr - log(*fpdlcqk9t8hwvalr)) - uqnkc6zg : -1000.0e0; xd4mybgj = -xd4mybgj; lfu2qhid += *fpdlcqk9ufgqj9ck++ * xd4mybgj; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9m0ibglfx += *wy1vqfzu; fpdlcqk9tlgduey8++; } } if (*qfx3vhct == 3) { if (*dqk5muto == 0) { anopu9vi = 34.0e0; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] > anopu9vi) { hdqsx7bk = exp(anopu9vi); lbgwvp3q = 1; } else if (m0ibglfx[2 * *hj3ftvzu -1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) { hdqsx7bk = exp(-anopu9vi); lbgwvp3q = 1; } else { hdqsx7bk = exp(m0ibglfx[2* *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]); lbgwvp3q = 0; } xd4mybgj = (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] < 1.0e0) ? 1.0e0 : tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]; lfu2qhid += ufgqj9ck[ayfnwr1v-1] * (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] * log(xd4mybgj/t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]) + (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk) * log((t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns] + hdqsx7bk) / (hdqsx7bk+tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]))); } } else { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]); uqnkc6zg = fvlmz9iyC_tldz5ion(hdqsx7bk + tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]); hofjnx2e = fvlmz9iyC_tldz5ion(hdqsx7bk); txlvcey5 = fvlmz9iyC_tldz5ion(1.0e0 + tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]); xd4mybgj = hdqsx7bk * log(hdqsx7bk / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns])) + uqnkc6zg - hofjnx2e - txlvcey5; if (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] > 0.0e0) { xd4mybgj += tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] * log(t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns] / (hdqsx7bk + t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns])); } lfu2qhid += ufgqj9ck[ayfnwr1v-1] * xd4mybgj; } lfu2qhid *= (-0.5e0); } } if (*qfx3vhct == 8) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { lfu2qhid += *fpdlcqk9ufgqj9ck++ * pow(*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr, (double) 2.0); fpdlcqk9t8hwvalr += *afpc0kns; } } } *jxacz5qu = 2.0e0 * lfu2qhid; } void yiumjq3nflncwkfq76(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *br5ovgcj, int *xwdf5ltg, int *qfx3vhct) { int ayfnwr1v, hpmwnav2; // sedf7mxb = 1; double *fpdlcqk9w8znmyce, *fpdlcqk9lncwkfq7; fpdlcqk9w8znmyce = w8znmyce; fpdlcqk9lncwkfq7 = lncwkfq7; if (*qfx3vhct == 3 || *qfx3vhct == 5) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = 1.0e0; *fpdlcqk9w8znmyce++ = 0.0e0; } for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = 0.0e0; *fpdlcqk9w8znmyce++ = 1.0e0; } for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++; *fpdlcqk9w8znmyce++ = 0.0e0; } } } else { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = 1.0e0; } if (*br5ovgcj != *ftnjamu2) Rprintf("Error: *br5ovgcj != *ftnjamu2 in C_flncwkfq76\n"); for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++; } } } } void yiumjq3nflncwkfq71(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *xwdf5ltg, int *qfx3vhct, double vm4xjosb[], int *br5ovgcj, int *xlpjcg3s, double kifxa0he[], int *yru9olks, int *unhycz0e) { int i0spbklx, ayfnwr1v, hpmwnav2, // sedf7mxb = *xwdf5ltg + 1, hyqwtp6i = *xwdf5ltg * (*xwdf5ltg + 1) / 2; double *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72, *fpdlcqk9w8znmyce, *fpdlcqk9vm4xjosb, *fpdlcqk9kifxa0he; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; wkumc9idtgiyxdw1 = Calloc(hyqwtp6i, int); wkumc9iddufozmt7 = Calloc(hyqwtp6i, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, xwdf5ltg); fpdlcqk9w8znmyce = w8znmyce; fpdlcqk9lncwkfq7 = fpdlcqk9lncwkfq71 = fpdlcqk9lncwkfq72 = lncwkfq7; if (*qfx3vhct == 3 || *qfx3vhct == 5) { // ggg if (*br5ovgcj != 2 * *ftnjamu2) //Rprinf Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq71\n"); for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++; *fpdlcqk9w8znmyce++ = 0.0e0; } } if (*unhycz0e == 0) { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; *fpdlcqk9w8znmyce++ = 0.0e0; } } } else { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) *fpdlcqk9vm4xjosb++ = 0.0; fpdlcqk9lncwkfq7 = lncwkfq7; for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0); fpdlcqk9vm4xjosb++; } } fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb *= (-0.50e0); fpdlcqk9vm4xjosb++; } } } else { // ggg and hhh for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7++; } } if (*unhycz0e == 0) { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; } } } else { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) *fpdlcqk9vm4xjosb++ = 0.0; fpdlcqk9lncwkfq7 = lncwkfq7; for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0); fpdlcqk9vm4xjosb++; } } fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb *= (-0.50e0); fpdlcqk9vm4xjosb++; } } } // hhh if (*yru9olks > 0) { if (*qfx3vhct == 3 || *qfx3vhct == 5) { // kkk for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = 1.0e0; *fpdlcqk9w8znmyce++ = 0.0e0; } for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = 0.0e0; *fpdlcqk9w8znmyce++ = 1.0e0; } if (*yru9olks > 1) { fpdlcqk9kifxa0he = kifxa0he; // + (i0spbklx-1) * *ftnjamu2; for (i0spbklx = 2; i0spbklx <= *yru9olks; i0spbklx++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++; *fpdlcqk9w8znmyce++ = 0.0e0; } } } } else { // kkk and iii fpdlcqk9kifxa0he = kifxa0he; // + (i0spbklx-1) * *ftnjamu2; for (i0spbklx = 1; i0spbklx <= *yru9olks; i0spbklx++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9kifxa0he++; } } } // iii } // if (*yru9olks > 0) Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void yiumjq3nflncwkfq72(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int *wy1vqfzu, int *br5ovgcj, int *xwdf5ltg, int *qfx3vhct, int *afpc0kns, int *fmzq7aob, int *eu3oxvyb, int *unhycz0e, double vm4xjosb[]) { int i0spbklx, ayfnwr1v, yq6lorbx, gp1jxzuh, hpmwnav2, sedf7mxb = 0, hyqwtp6i = *xwdf5ltg * (*xwdf5ltg + 1) / 2; double uqnkc6zg, *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72, *fpdlcqk9w8znmyce, *fpdlcqk9vm4xjosb; int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7; wkumc9idtgiyxdw1 = Calloc(hyqwtp6i, int); wkumc9iddufozmt7 = Calloc(hyqwtp6i, int); fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, xwdf5ltg); fpdlcqk9w8znmyce = w8znmyce; fpdlcqk9lncwkfq7 = lncwkfq7; for (gp1jxzuh = 1; gp1jxzuh <= *eu3oxvyb; gp1jxzuh++) { for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) *fpdlcqk9w8znmyce++ = 0.0e0; } fpdlcqk9w8znmyce = w8znmyce; if (*qfx3vhct == 3 || *qfx3vhct == 5) { if (*br5ovgcj != 2 * *ftnjamu2) //Rprinf Rprintf("Error: *br5ovgcj != 2 * *ftnjamu2 in C_flncwkfq72\n"); for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { fpdlcqk9w8znmyce = w8znmyce + sedf7mxb * *br5ovgcj; fpdlcqk9lncwkfq7 = lncwkfq7 + (hpmwnav2-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { *fpdlcqk9w8znmyce = *fpdlcqk9lncwkfq7; fpdlcqk9w8znmyce += 2 + *br5ovgcj; } fpdlcqk9lncwkfq7++; fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj; // fixed@20100406 } sedf7mxb += *afpc0kns; } } else { for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { fpdlcqk9w8znmyce = w8znmyce + sedf7mxb * *br5ovgcj; fpdlcqk9lncwkfq7 = lncwkfq7 + (hpmwnav2-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9w8znmyce++ = *fpdlcqk9lncwkfq7; fpdlcqk9w8znmyce += *br5ovgcj; } fpdlcqk9lncwkfq7++; fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj; // fixed@20100406 } sedf7mxb += *wy1vqfzu; } } if (*fmzq7aob == 0) { if (*qfx3vhct == 3 || *qfx3vhct == 5) { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9w8znmyce = w8znmyce + sedf7mxb * *br5ovgcj; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { *fpdlcqk9w8znmyce = uqnkc6zg; fpdlcqk9w8znmyce += 2 + *br5ovgcj; } fpdlcqk9w8znmyce -= *afpc0kns * *br5ovgcj; // fixed@20100406 } sedf7mxb += *afpc0kns; } } else { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9w8znmyce = w8znmyce + sedf7mxb * *br5ovgcj; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9w8znmyce++ = uqnkc6zg; fpdlcqk9w8znmyce += *br5ovgcj; } fpdlcqk9w8znmyce -= *wy1vqfzu * *br5ovgcj; // fixed@20100406 } sedf7mxb += *wy1vqfzu; } } } else { if (*unhycz0e == 1) { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) *fpdlcqk9vm4xjosb++ = 0.0; fpdlcqk9lncwkfq7 = lncwkfq7; for (hpmwnav2 = 1; hpmwnav2 <= *xwdf5ltg; hpmwnav2++) { fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb += pow(*fpdlcqk9lncwkfq7++, (double) 2.0); fpdlcqk9vm4xjosb++; } } fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vm4xjosb *= (-0.50e0); fpdlcqk9vm4xjosb++; } } else { if (*qfx3vhct == 3 || *qfx3vhct == 5) { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { *fpdlcqk9w8znmyce++ = uqnkc6zg; fpdlcqk9w8znmyce++; } } } sedf7mxb += hyqwtp6i; } else { for (i0spbklx = 1; i0spbklx <= hyqwtp6i; i0spbklx++) { fpdlcqk9lncwkfq71 = lncwkfq7 + (wkumc9idtgiyxdw1[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9lncwkfq72 = lncwkfq7 + (wkumc9iddufozmt7[i0spbklx-1]-1) * *ftnjamu2; fpdlcqk9w8znmyce = w8znmyce + (sedf7mxb+i0spbklx-1) * *br5ovgcj; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { uqnkc6zg = *fpdlcqk9lncwkfq71++ * *fpdlcqk9lncwkfq72++; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) *fpdlcqk9w8znmyce++ = uqnkc6zg; } } sedf7mxb += hyqwtp6i; } } } Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7); } void yiumjq3nietam6(double tlgduey8[], double m0ibglfx[], double y7sdgtqi[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *qfx3vhct, int *hj3ftvzu, double ufgqj9ck[], int *wr0lbopv) { int ayfnwr1v; double gyuq8dex, g2vwexykp, qa8ltuhj, vogkfwt8 = 0.0e0, msrdjh5f = 0.0e0, kwvo4ury, cpz4fgkx, tad5vhsu, khl0iysgk, myoffset = 1.0 / 32.0; double *fpdlcqk9tlgduey8, *fpdlcqk9m0ibglfx, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9ufgqj9ck; fpdlcqk9m0ibglfx = fpdlcqk9m0ibglfx1 = fpdlcqk9m0ibglfx2 = &tad5vhsu; gyuq8dex = 1.0; fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9ufgqj9ck = ufgqj9ck; if (*qfx3vhct == 3 || *qfx3vhct == 5) { fpdlcqk9m0ibglfx1 = m0ibglfx + 2 * *hj3ftvzu-1; fpdlcqk9m0ibglfx2 = m0ibglfx + 2 * *hj3ftvzu-2; } else { fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1; } if (*qfx3vhct == 1 || *qfx3vhct == 4 || *qfx3vhct == 3 || *qfx3vhct == 5) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { msrdjh5f += *fpdlcqk9ufgqj9ck; vogkfwt8 += *fpdlcqk9tlgduey8++ * *fpdlcqk9ufgqj9ck++; } gyuq8dex = vogkfwt8 / msrdjh5f; fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2; } if (*qfx3vhct == 1) { yiumjq3ng2vwexyk9(&gyuq8dex, &g2vwexykp); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = g2vwexykp; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 2) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = log(*fpdlcqk9tlgduey8++ + myoffset); fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 4) { yiumjq3nbewf1pzv9(&gyuq8dex, &qa8ltuhj); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = qa8ltuhj; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 5) { if (*wr0lbopv == 1 || *wr0lbopv == 2) { kwvo4ury = *wr0lbopv == 1 ? log(gyuq8dex + myoffset) : log((6.0 / 8.0) * gyuq8dex); cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx2 = kwvo4ury; *fpdlcqk9m0ibglfx1 = cpz4fgkx; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; } } else { cpz4fgkx = log(y7sdgtqi[3 + *afpc0kns + *hj3ftvzu -1] + myoffset); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx2 = log(*fpdlcqk9tlgduey8++ + myoffset); *fpdlcqk9m0ibglfx1 = cpz4fgkx; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; } } } if (*qfx3vhct == 3) { if (*wr0lbopv == 1) { kwvo4ury = log(gyuq8dex + myoffset); cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu -1] + myoffset); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx2 = kwvo4ury; *fpdlcqk9m0ibglfx1 = cpz4fgkx; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; } } else if (*wr0lbopv == 2) { kwvo4ury = log(gyuq8dex + myoffset); khl0iysgk = y7sdgtqi[3 + *hj3ftvzu -1]; cpz4fgkx = log(khl0iysgk); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex; *fpdlcqk9m0ibglfx2 = (tad5vhsu < 3.0 * gyuq8dex) ? kwvo4ury : log(sqrt(*fpdlcqk9tlgduey8)); *fpdlcqk9m0ibglfx1 = cpz4fgkx; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; fpdlcqk9tlgduey8++; } } else if (*wr0lbopv == 3) { kwvo4ury = log(gyuq8dex + myoffset); khl0iysgk = y7sdgtqi[3 + *hj3ftvzu -1]; cpz4fgkx = log(khl0iysgk); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { tad5vhsu = *fpdlcqk9tlgduey8 - gyuq8dex; if (tad5vhsu > gyuq8dex) { *fpdlcqk9m0ibglfx2 = log(0.5 * (*fpdlcqk9tlgduey8 + gyuq8dex)); *fpdlcqk9m0ibglfx1 = log(khl0iysgk / (tad5vhsu / gyuq8dex)); } else if (*fpdlcqk9tlgduey8 < (gyuq8dex / 4.0)) { *fpdlcqk9m0ibglfx2 = log(gyuq8dex / 4.0); *fpdlcqk9m0ibglfx1 = cpz4fgkx; } else { *fpdlcqk9m0ibglfx2 = kwvo4ury; *fpdlcqk9m0ibglfx1 = cpz4fgkx; } fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; fpdlcqk9tlgduey8++; } } else { cpz4fgkx = log(y7sdgtqi[3 + *hj3ftvzu - 1]); for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx2 = log(*fpdlcqk9tlgduey8++ + myoffset); *fpdlcqk9m0ibglfx1 = cpz4fgkx; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; } } } if (*qfx3vhct == 8) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = *fpdlcqk9tlgduey8++; fpdlcqk9m0ibglfx += *wy1vqfzu; } } } void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double *rsynp1go, double *dn3iasxug, double *uaf2xgqy, int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *hj3ftvzu, int *qfx3vhct, int *zjkrtol8, int *unhycz0e, double vm4xjosb[]) { int ayfnwr1v, lbgwvp3q = -7; //qfx3vhct # kvowz9ht double xd4mybgja, xd4mybgjb, xd4mybgjc, anopu9vi; double *fpdlcqk9m0ibglfx, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9t8hwvalr, *fpdlcqk9vm4xjosb, *fpdlcqk9wpuarq2m, *fpdlcqk9ufgqj9ck, *fpdlcqk9rbne6ouj, *fpdlcqk9tlgduey8, *fpdlcqk9ghz9vuba; double hdqsx7bk, dkdeta, dldk, ux3nadiw, ed2ldk2, n2kersmx; double bzmd6ftvmat[1], kkmat[1], nm0eljqk[1]; int dvhw1ulq, sguwj9ty, pqneb2ra = 1; double jtnbu2hz, uqnkc6zgd, uqnkc6zgt, dldshape, fvn3iasxug, xk7dnvei; int okobr6tcex; double tmp1; fpdlcqk9m0ibglfx = fpdlcqk9m0ibglfx1 = fpdlcqk9m0ibglfx2 = &xd4mybgja; lbgwvp3q += 7; lbgwvp3q *= lbgwvp3q; n2kersmx = 0.990e0; n2kersmx = 0.995e0; fpdlcqk9m0ibglfx = m0ibglfx + *hj3ftvzu-1; if (*qfx3vhct == 3 || *qfx3vhct == 5) { fpdlcqk9m0ibglfx1 = m0ibglfx + 2 * *hj3ftvzu-1; fpdlcqk9m0ibglfx2 = m0ibglfx + 2 * *hj3ftvzu-2; } fpdlcqk9t8hwvalr = t8hwvalr + *hj3ftvzu-1; fpdlcqk9vm4xjosb = vm4xjosb; fpdlcqk9wpuarq2m = wpuarq2m + *hj3ftvzu-1; fpdlcqk9ufgqj9ck = ufgqj9ck; fpdlcqk9rbne6ouj = rbne6ouj + (*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9ghz9vuba = ghz9vuba + (*hj3ftvzu-1) * *ftnjamu2; if (*qfx3vhct == 1) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { xd4mybgja = *fpdlcqk9t8hwvalr * (1.0e0 - *fpdlcqk9t8hwvalr); xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++; if (xd4mybgja < *dn3iasxug) xd4mybgja = *dn3iasxug; if (xd4mybgjb < *dn3iasxug) { xd4mybgjb = *dn3iasxug; *fpdlcqk9wpuarq2m = *uaf2xgqy; } else { *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb); } *fpdlcqk9rbne6ouj++ = xd4mybgjb; *fpdlcqk9ghz9vuba++ = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8++ - *fpdlcqk9t8hwvalr) / xd4mybgja; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9wpuarq2m += *npjlv3mr; fpdlcqk9m0ibglfx += *wy1vqfzu; } } if (*qfx3vhct == 2) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { xd4mybgja = *fpdlcqk9t8hwvalr; xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck++; if (xd4mybgjb < *dn3iasxug) { xd4mybgjb = *dn3iasxug; *fpdlcqk9wpuarq2m = *uaf2xgqy; } else { *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb); } *fpdlcqk9rbne6ouj = xd4mybgjb; if (*fpdlcqk9tlgduey8 > 0.0e0) { xd4mybgjc = xd4mybgja; if (xd4mybgjc < *dn3iasxug) xd4mybgjc = *dn3iasxug; *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8 - xd4mybgjc) / xd4mybgjc; } else { *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx - 1.0e0; } fpdlcqk9m0ibglfx += *wy1vqfzu; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9wpuarq2m += *npjlv3mr; fpdlcqk9rbne6ouj++; fpdlcqk9tlgduey8++; fpdlcqk9ghz9vuba++; } } if (*qfx3vhct == 4) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (*fpdlcqk9t8hwvalr < *dn3iasxug || *fpdlcqk9t8hwvalr > 1.0e0 - *dn3iasxug) { xd4mybgja = *dn3iasxug; xd4mybgjb = xd4mybgja * *fpdlcqk9ufgqj9ck; if (xd4mybgjb < *dn3iasxug) { xd4mybgjb = *dn3iasxug; *fpdlcqk9wpuarq2m = *uaf2xgqy; } else { *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb); } *fpdlcqk9rbne6ouj = xd4mybgjb; *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja; } else { xd4mybgja = -(1.0e0 - *fpdlcqk9t8hwvalr) * log(1.0e0 - *fpdlcqk9t8hwvalr); if (xd4mybgja < *dn3iasxug) { xd4mybgja = *dn3iasxug; } xd4mybgjb = -xd4mybgja * *fpdlcqk9ufgqj9ck * log(1.0e0 - *fpdlcqk9t8hwvalr) / *fpdlcqk9t8hwvalr; if (xd4mybgjb < *dn3iasxug) { xd4mybgjb = *dn3iasxug; } *fpdlcqk9rbne6ouj = xd4mybgjb; *fpdlcqk9wpuarq2m = sqrt(xd4mybgjb); *fpdlcqk9ghz9vuba = *fpdlcqk9m0ibglfx + (*fpdlcqk9tlgduey8 - *fpdlcqk9t8hwvalr) / xd4mybgja; } fpdlcqk9m0ibglfx += *wy1vqfzu; fpdlcqk9t8hwvalr += *afpc0kns; fpdlcqk9wpuarq2m += *npjlv3mr; fpdlcqk9ufgqj9ck++; fpdlcqk9rbne6ouj++; fpdlcqk9tlgduey8++; fpdlcqk9ghz9vuba++; } } if (*qfx3vhct == 5) { fvn3iasxug = 1.0e-20; anopu9vi = 34.0e0; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] > anopu9vi) { jtnbu2hz = exp(anopu9vi); lbgwvp3q = 1; } else if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) { jtnbu2hz = exp(-anopu9vi); lbgwvp3q = 1; } else { jtnbu2hz = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]); lbgwvp3q = 0; } tyee_C_vdgam1(&jtnbu2hz, &uqnkc6zgd, &okobr6tcex); if (okobr6tcex != 1) { Rprintf("Error 1 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex); } xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]; if (xk7dnvei < fvn3iasxug) { xk7dnvei = fvn3iasxug; } dldshape = log(tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2]) + log(jtnbu2hz) - log(xk7dnvei) + 1.0e0 - uqnkc6zgd - tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei; tyee_C_vtgam1(&jtnbu2hz, &uqnkc6zgt, &okobr6tcex); if (okobr6tcex != 1) { Rprintf("Error 2 in dlgpwe0c okobr6tcex=%d. Ploughing on.\n", okobr6tcex); } rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = ufgqj9ck[ayfnwr1v-1] * jtnbu2hz; xd4mybgja = jtnbu2hz * uqnkc6zgt - 1.0e0; rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = ufgqj9ck[ayfnwr1v-1] * jtnbu2hz * xd4mybgja; if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) { rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug; wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy; } else { wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]); } if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) { rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug; wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy; } else { wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]); } if (xd4mybgja < fvn3iasxug) { xd4mybgja = fvn3iasxug; } ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] + tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0; ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] + dldshape / xd4mybgja; } } if (*qfx3vhct == 3) { anopu9vi = 34.0e0; fvn3iasxug = 1.0e-20; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] > anopu9vi) { hdqsx7bk = exp(anopu9vi); lbgwvp3q = 1; } else if (m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] < -anopu9vi) { hdqsx7bk = exp(-anopu9vi); lbgwvp3q = 1; } else { hdqsx7bk = exp(m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu]); lbgwvp3q = 0; } xk7dnvei = t8hwvalr[*hj3ftvzu-1 + (ayfnwr1v-1) * *afpc0kns]; if (xk7dnvei < fvn3iasxug) { xk7dnvei = fvn3iasxug; } tmp1 = tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk; tyee_C_vdgam1(&tmp1, &xd4mybgja, &okobr6tcex); if (okobr6tcex != 1) { Rprintf("error in dlgpwe0c okobr6tcex 3: %3d \n", okobr6tcex); } tyee_C_vdgam1(&hdqsx7bk, &xd4mybgjb, &okobr6tcex); if (okobr6tcex != 1) { Rprintf("error in dlgpwe0c okobr6tcex 4: %3d \n", okobr6tcex); } dldk = xd4mybgja - xd4mybgjb - (tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] + hdqsx7bk) / (xk7dnvei + hdqsx7bk) + 1.0 + log(hdqsx7bk / (xk7dnvei + hdqsx7bk)); dkdeta = hdqsx7bk; kkmat[0] = hdqsx7bk; nm0eljqk[0] = xk7dnvei; sguwj9ty = 5000; fvlmz9iyC_enbin9(bzmd6ftvmat, kkmat, nm0eljqk, &n2kersmx, &pqneb2ra, &dvhw1ulq, &pqneb2ra, &ux3nadiw, rsynp1go, &sguwj9ty); if (dvhw1ulq != 1) { *zjkrtol8 = 5; Rprintf("Error. Exiting enbin9; dvhw1ulq is %d\n", dvhw1ulq); return; } ed2ldk2 = -bzmd6ftvmat[0] - 1.0e0 / hdqsx7bk + 1.0e0 / (hdqsx7bk + xk7dnvei); rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = ufgqj9ck[ayfnwr1v-1] * xk7dnvei * hdqsx7bk / (xk7dnvei + hdqsx7bk); rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = ufgqj9ck[ayfnwr1v-1] * hdqsx7bk * (-bzmd6ftvmat[0] * hdqsx7bk - 1.0e0 + hdqsx7bk / (hdqsx7bk + xk7dnvei)); if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] < *dn3iasxug) { rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = *dn3iasxug; wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy; } else wpuarq2m[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *npjlv3mr] = sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2]); if (rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] < *dn3iasxug) { rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = *dn3iasxug; wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = *uaf2xgqy; } else { wpuarq2m[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *npjlv3mr] = sqrt(rbne6ouj[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2]); } ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-2) * *ftnjamu2] = m0ibglfx[2 * *hj3ftvzu-2 + (ayfnwr1v-1) * *wy1vqfzu] + tlgduey8[ayfnwr1v-1 + (*hj3ftvzu-1) * *ftnjamu2] / xk7dnvei - 1.0e0; ghz9vuba[ayfnwr1v-1 + (2 * *hj3ftvzu-1) * *ftnjamu2] = m0ibglfx[2 * *hj3ftvzu-1 + (ayfnwr1v-1) * *wy1vqfzu] + dldk / (dkdeta * ed2ldk2); } } if (*qfx3vhct == 8) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9rbne6ouj = *fpdlcqk9ufgqj9ck++; *fpdlcqk9wpuarq2m = sqrt(*fpdlcqk9rbne6ouj); *fpdlcqk9ghz9vuba++ = *fpdlcqk9tlgduey8++; fpdlcqk9wpuarq2m += *npjlv3mr; fpdlcqk9rbne6ouj++; } } if (*unhycz0e == 1) { fpdlcqk9ghz9vuba = ghz9vuba + ((*qfx3vhct == 3 || *qfx3vhct == 5) ? (2 * *hj3ftvzu-2) : (*hj3ftvzu-1)) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9ghz9vuba -= *fpdlcqk9vm4xjosb++; fpdlcqk9ghz9vuba++; } } } void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[]) { int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, yu6izdrc = 0, kcm6jfob, fmzq7aob, xwdf5ltg, kvowz9ht, f7svlajr, qfx3vhct, c5aesxkul, pqneb2ra = 1; int ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, eu3oxvyb, vtsou9pz, unhycz0e, wr0lbopv; double dn3iasxug, wiptsjx8, bh2vgiay, pvofyg8z = 1.0e-7, uylxqtc7 = 0.0, uaf2xgqy, vsoihn1r, rsynp1go; // rpto5qwb, double *qnwamo0e1, *fpdlcqk9w8znmyce, *fpdlcqk9m0ibglfx, *fpdlcqk9vm4xjosb, *fpdlcqk9vc6hatuj, *fpdlcqk9wpuarq2m, *fpdlcqk9ghz9vuba; double hmayv1xt1 = 10.0, hmayv1xt2 = 0.0; int x1jrewny = 0; double *wkumc9idrpto5qwb, *wkumc9idtwk; wkumc9idrpto5qwb = Calloc(1 + *afpc0kns , double); wkumc9idtwk = Calloc(*wy1vqfzu * *ftnjamu2 * 2, double); xwdf5ltg = xui7hqwl[0]; fmzq7aob = xui7hqwl[1]; xlpjcg3s = xui7hqwl[2]; kvowz9ht = xui7hqwl[3]; f7svlajr = xui7hqwl[4]; qfx3vhct = xui7hqwl[5]; c5aesxkul = xui7hqwl[6]; xui7hqwl[8] = 0; eu3oxvyb = xui7hqwl[10]; vtsou9pz = xui7hqwl[11]; unhycz0e = xui7hqwl[13]; wr0lbopv = xui7hqwl[17]; dn3iasxug = y7sdgtqi[0]; uaf2xgqy = sqrt(dn3iasxug); if (qfx3vhct == 1 || qfx3vhct == 4) vsoihn1r = log(dn3iasxug); bh2vgiay = y7sdgtqi[1]; rsynp1go = y7sdgtqi[2]; hmayv1xt1 -= bh2vgiay; hmayv1xt2 -= rsynp1go; hmayv1xt1 += hmayv1xt2; *zjkrtol8 = 1; yiumjq3nflncwkfq72(lncwkfq7, w8znmyce, ftnjamu2, wy1vqfzu, br5ovgcj, &xwdf5ltg, &qfx3vhct, afpc0kns, &fmzq7aob, &eu3oxvyb, &unhycz0e, vm4xjosb); ceqzd1hi653: hmayv1xt2 = 1.0e0; if (f7svlajr == 0) { for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &yq6lorbx, ufgqj9ck, &wr0lbopv); } } else if (f7svlajr == 2) { yiumjq3npkc4ejib(w8znmyce, zshtfg8c, m0ibglfx, ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s, &vtsou9pz, &yu6izdrc, &qfx3vhct, &unhycz0e, vm4xjosb); } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &yu6izdrc); if (f7svlajr == 2) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, wkumc9idrpto5qwb, &yu6izdrc, &dn3iasxug, &vsoihn1r, &pqneb2ra); } else { wkumc9idrpto5qwb[0] = -1.0e0; } for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) { for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, &yq6lorbx, &qfx3vhct, zjkrtol8, &unhycz0e, vm4xjosb); } fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce; for (yq6lorbx = 1; yq6lorbx <= xlpjcg3s; yq6lorbx++) for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++; if (qfx3vhct == 3 || qfx3vhct == 5) { Rprintf("20100410; Error: this definitely does not work\n"); if (2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj) //Rprintf Rprintf("Error: 2 * *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n"); fpdlcqk9vc6hatuj = vc6hatuj; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { fpdlcqk9wpuarq2m = wpuarq2m; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++; fpdlcqk9vc6hatuj++; } } } } else { if (*wy1vqfzu * *ftnjamu2 != *br5ovgcj) //Rprintf Rprintf("Error: *wy1vqfzu * *ftnjamu2 != *br5ovgcj in C_cqo_2\n"); fpdlcqk9vc6hatuj = vc6hatuj; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { fpdlcqk9wpuarq2m = wpuarq2m; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) { *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m++; fpdlcqk9vc6hatuj++; } } } } for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) ges1xpkr[gp1jxzuh-1] = gp1jxzuh; F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr, wkumc9idtwk, &qemj9asg, &pvofyg8z); if (qemj9asg != xlpjcg3s) { *zjkrtol8 = 2; Rprintf("Failure or Error in cqo_2: vc6hatuj is not of full xwdf5ltg.\n"); Free(wkumc9idrpto5qwb); Free(wkumc9idtwk); return; } if (*npjlv3mr != *wy1vqfzu) //Rprintf Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n"); qnwamo0e1 = wkumc9idtwk; fpdlcqk9wpuarq2m = wpuarq2m; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fpdlcqk9ghz9vuba = ghz9vuba + ayfnwr1v-1; for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *qnwamo0e1++ = *fpdlcqk9wpuarq2m++ * *fpdlcqk9ghz9vuba; fpdlcqk9ghz9vuba += *ftnjamu2; } } ybnsqgo9 = 101; F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk, &uylxqtc7, wkumc9idtwk + *wy1vqfzu * *ftnjamu2, zshtfg8c, &uylxqtc7, m0ibglfx, &ybnsqgo9, &algpft4y); if (*npjlv3mr != *wy1vqfzu) //Rprintf Rprintf("Error: *wy1vqfzu != *npjlv3mr in C_cqo_2\n"); fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9wpuarq2m = wpuarq2m; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx /= *fpdlcqk9wpuarq2m++; fpdlcqk9m0ibglfx++; } } if (unhycz0e == 1) { if (qfx3vhct == 3 || qfx3vhct == 5) { if (2 * *afpc0kns != *wy1vqfzu) //Rprintf Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_cqo_2\n"); fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *afpc0kns; yq6lorbx++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb; fpdlcqk9m0ibglfx += 2; } fpdlcqk9vm4xjosb++; } } else { fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb; fpdlcqk9m0ibglfx++; } fpdlcqk9vm4xjosb++; } } } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &yu6izdrc); yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc, &dn3iasxug, &vsoihn1r, &pqneb2ra); wiptsjx8 = fabs(*tlq9wpes - *wkumc9idrpto5qwb) / (1.0e0 + fabs(*tlq9wpes)); if (wiptsjx8 < bh2vgiay) { // xxx *zjkrtol8 = 0; xui7hqwl[7] = kcm6jfob; if (qfx3vhct == 3 || qfx3vhct == 5) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes, &yu6izdrc, &dn3iasxug, &vsoihn1r, &yu6izdrc); } x1jrewny = 1; goto ceqzd1hi20097; } else { // xxx and *wkumc9idrpto5qwb = *tlq9wpes; x1jrewny = 0; } } ceqzd1hi20097: hmayv1xt1 = 0.0e0; if (x1jrewny == 1) { Free(wkumc9idrpto5qwb); Free(wkumc9idtwk); return; } if (f7svlajr == 1 || f7svlajr == 2) { f7svlajr = 0; xui7hqwl[8] = 1; goto ceqzd1hi653; } *zjkrtol8 = 3; Free(wkumc9idrpto5qwb); Free(wkumc9idtwk); } void cqo_1(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[]) { int ayfnwr1v, hj3ftvzu, yu6izdrc = 0, pqneb2ra = 1, wr0lbopv, kcm6jfob, unhycz0e, xwdf5ltg, kvowz9ht, f7svlajr, qfx3vhct, c5aesxkul, ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, vtsou9pz, yru9olks; double dn3iasxug, wiptsjx8, pvofyg8z = 1.0e-7, uylxqtc7 = 0.0, bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go, rpto5qwb; double *fpdlcqk9zshtfg8c, *fpdlcqk9w8znmyce, *fpdlcqk9m0ibglfx, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9vm4xjosb, *fpdlcqk9vc6hatuj, *fpdlcqk9twk, *fpdlcqk9wpuarq2m, *fpdlcqk9wpuarq2m1, *fpdlcqk9wpuarq2m2, *fpdlcqk9ghz9vuba1, *fpdlcqk9ghz9vuba2; int gp1jxzuh; double hmayv1xt = 2.0, Totdev = 0.0e0; double *wkumc9idtwk; wkumc9idtwk = Calloc(*br5ovgcj * 3 , double); xwdf5ltg = xui7hqwl[0]; xlpjcg3s = xui7hqwl[2]; kvowz9ht = xui7hqwl[3]; f7svlajr = xui7hqwl[4]; qfx3vhct = xui7hqwl[5]; c5aesxkul = xui7hqwl[6]; xui7hqwl[8] = 0; // twice vtsou9pz = xui7hqwl[11]; zjkrtol8[0] = -1; for (ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++) zjkrtol8[ayfnwr1v] = 1; if (vtsou9pz != 1) { Rprintf("Error: vtsou9pz is not unity in cqo_1!\n"); *zjkrtol8 = 4; Free(wkumc9idtwk); return; } unhycz0e = xui7hqwl[13]; yru9olks = xui7hqwl[15]; wr0lbopv = xui7hqwl[17]; //20120222; correct but unused. dn3iasxug = y7sdgtqi[0]; uaf2xgqy = sqrt(dn3iasxug); if (qfx3vhct == 1 || qfx3vhct == 4) vsoihn1r = log(dn3iasxug); bh2vgiay = y7sdgtqi[1]; rsynp1go = y7sdgtqi[2]; hmayv1xt -= rsynp1go; hmayv1xt += hmayv1xt; yiumjq3nflncwkfq71(lncwkfq7, w8znmyce, ftnjamu2, &xwdf5ltg, &qfx3vhct, vm4xjosb, br5ovgcj, &xlpjcg3s, kifxa0he, &yru9olks, &unhycz0e); for (hj3ftvzu = 1; hj3ftvzu <= *afpc0kns; hj3ftvzu++) { ceqzd1hi653: hmayv1xt = 1.0e0; if (f7svlajr == 0) { yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv); } else if (f7svlajr == 2) { yiumjq3npkc4ejib(w8znmyce, zshtfg8c + (hj3ftvzu-1) * xlpjcg3s, m0ibglfx, ftnjamu2, wy1vqfzu, br5ovgcj, &xlpjcg3s, &vtsou9pz, &hj3ftvzu, &qfx3vhct, &unhycz0e, vm4xjosb); } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu); if (f7svlajr == 2) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &pqneb2ra); } else { rpto5qwb = -1.0e0; } for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) { yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, &hj3ftvzu, &qfx3vhct, zjkrtol8 + hj3ftvzu, &unhycz0e, vm4xjosb); fpdlcqk9vc6hatuj = vc6hatuj; fpdlcqk9w8znmyce = w8znmyce; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) for (ayfnwr1v = 1; ayfnwr1v <= *br5ovgcj; ayfnwr1v++) *fpdlcqk9vc6hatuj++ = *fpdlcqk9w8znmyce++; if (qfx3vhct == 3 || qfx3vhct == 5) { if (2 * *ftnjamu2 != *br5ovgcj) //Rprintf Rprintf("Error: 2 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n"); fpdlcqk9vc6hatuj = vc6hatuj; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { fpdlcqk9wpuarq2m2 = wpuarq2m + 2*hj3ftvzu -2; fpdlcqk9wpuarq2m1 = wpuarq2m + 2*hj3ftvzu -1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m2; fpdlcqk9vc6hatuj++; *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m1; fpdlcqk9vc6hatuj++; fpdlcqk9wpuarq2m1 += *npjlv3mr; fpdlcqk9wpuarq2m2 += *npjlv3mr; } } } else { if (1 * *ftnjamu2 != *br5ovgcj) //Rprintf Rprintf("Error: 1 * *ftnjamu2 != *br5ovgcj in C_cqo_1\n"); fpdlcqk9vc6hatuj = vc6hatuj; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { fpdlcqk9wpuarq2m = wpuarq2m + hj3ftvzu -1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9vc6hatuj *= *fpdlcqk9wpuarq2m; fpdlcqk9vc6hatuj++; fpdlcqk9wpuarq2m += *npjlv3mr; } } } for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) ges1xpkr[gp1jxzuh-1] = gp1jxzuh; F77_CALL(vqrdca)(vc6hatuj, br5ovgcj, br5ovgcj, &xlpjcg3s, fasrkub3, ges1xpkr, wkumc9idtwk, &qemj9asg, &pvofyg8z); if (qemj9asg != xlpjcg3s) { Rprintf("Error in cqo_1: vc6hatuj is not of full xwdf5ltg.\n"); *zjkrtol8 = 2; Free(wkumc9idtwk); return; } if (qfx3vhct == 3 || qfx3vhct == 5) { fpdlcqk9ghz9vuba1 = ghz9vuba + (2*hj3ftvzu-1) * *ftnjamu2; fpdlcqk9ghz9vuba2 = ghz9vuba + (2*hj3ftvzu-2) * *ftnjamu2; fpdlcqk9wpuarq2m1 = wpuarq2m + 2*hj3ftvzu-1; fpdlcqk9wpuarq2m2 = wpuarq2m + 2*hj3ftvzu-2; fpdlcqk9twk = wkumc9idtwk; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m2 * *fpdlcqk9ghz9vuba2++; *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m1 * *fpdlcqk9ghz9vuba1++; fpdlcqk9wpuarq2m1 += *npjlv3mr; fpdlcqk9wpuarq2m2 += *npjlv3mr; } } else { fpdlcqk9ghz9vuba1 = ghz9vuba + (hj3ftvzu-1) * *ftnjamu2; fpdlcqk9twk = wkumc9idtwk; fpdlcqk9wpuarq2m = wpuarq2m + hj3ftvzu-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9twk++ = *fpdlcqk9wpuarq2m * *fpdlcqk9ghz9vuba1++; fpdlcqk9wpuarq2m += *npjlv3mr; } } ybnsqgo9 = 101; F77_CALL(vdqrsl)(vc6hatuj, br5ovgcj, br5ovgcj, &qemj9asg, fasrkub3, wkumc9idtwk, &uylxqtc7, wkumc9idtwk + *br5ovgcj, zshtfg8c + (hj3ftvzu-1) * xlpjcg3s, &uylxqtc7, wkumc9idtwk + 2 * *br5ovgcj, &ybnsqgo9, &algpft4y); fpdlcqk9twk = wkumc9idtwk; fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { *fpdlcqk9twk++ = *fpdlcqk9zshtfg8c++; } fpdlcqk9twk = wkumc9idtwk; fpdlcqk9zshtfg8c = zshtfg8c + (hj3ftvzu-1) * xlpjcg3s; for (gp1jxzuh = 1; gp1jxzuh <= xlpjcg3s; gp1jxzuh++) { *(fpdlcqk9zshtfg8c + ges1xpkr[gp1jxzuh-1] - 1) = *fpdlcqk9twk++; } if (qfx3vhct == 3 || qfx3vhct == 5) { fpdlcqk9m0ibglfx2 = m0ibglfx + 2 * hj3ftvzu -2; fpdlcqk9m0ibglfx1 = m0ibglfx + 2 * hj3ftvzu -1; fpdlcqk9twk = wkumc9idtwk + 2 * *br5ovgcj; fpdlcqk9wpuarq2m2 = wpuarq2m + 2 * hj3ftvzu -2; fpdlcqk9wpuarq2m1 = wpuarq2m + 2 * hj3ftvzu -1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx2 = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m2; *fpdlcqk9m0ibglfx1 = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m1; fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9m0ibglfx2 += *wy1vqfzu; fpdlcqk9wpuarq2m1 += *npjlv3mr; fpdlcqk9wpuarq2m2 += *npjlv3mr; } if (unhycz0e == 1) { fpdlcqk9m0ibglfx = m0ibglfx + 2*hj3ftvzu-2; fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++; fpdlcqk9m0ibglfx += *wy1vqfzu; } } } else { fpdlcqk9m0ibglfx = m0ibglfx + hj3ftvzu -1; fpdlcqk9twk = wkumc9idtwk + 2 * *br5ovgcj; fpdlcqk9wpuarq2m = wpuarq2m + hj3ftvzu -1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx = *fpdlcqk9twk++ / *fpdlcqk9wpuarq2m; fpdlcqk9m0ibglfx += *wy1vqfzu; fpdlcqk9wpuarq2m += *npjlv3mr; } if (unhycz0e == 1) { fpdlcqk9m0ibglfx = m0ibglfx + hj3ftvzu-1; fpdlcqk9vm4xjosb = vm4xjosb; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9m0ibglfx += *fpdlcqk9vm4xjosb++; fpdlcqk9m0ibglfx += *wy1vqfzu; } } } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu); yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &pqneb2ra); wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 + fabs(tlq9wpes[hj3ftvzu])); if (wiptsjx8 < bh2vgiay) { zjkrtol8[hj3ftvzu] = 0; xui7hqwl[7] = kcm6jfob; if (qfx3vhct == 3 || qfx3vhct == 5) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &yu6izdrc); } Totdev += tlq9wpes[hj3ftvzu]; goto ceqzd1hi1011; } else { rpto5qwb = tlq9wpes[hj3ftvzu]; } } Rprintf("cqo_1; no convergence for Species "); Rprintf("number %3d. Trying internal starting values.\n", hj3ftvzu); if (f7svlajr == 1) { f7svlajr = 0; xui7hqwl[8] = 1; goto ceqzd1hi653; } *zjkrtol8 = 3; zjkrtol8[hj3ftvzu] = 2; Rprintf("cqo_1; no convergence for Species "); Rprintf("number %3d. Continuing on with other species.\n", hj3ftvzu); Totdev += tlq9wpes[hj3ftvzu]; ceqzd1hi1011: hmayv1xt = 3.0e0; } if (zjkrtol8[0] == -1) for (ayfnwr1v = 1; ayfnwr1v <= *afpc0kns; ayfnwr1v++) if (zjkrtol8[ayfnwr1v] != 0) zjkrtol8[0] = 1; if (zjkrtol8[0] == -1) zjkrtol8[0] = 0; *tlq9wpes = Totdev; Free(wkumc9idtwk); } void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[], double ufgqj9ck[], double m0ibglfx[], double vm4xjosb[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double w8znmyce[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double *tlq9wpes, double zshtfg8c[], double y7sdgtqi[], double atujnxb8[], double k7hulceq[], int *eoviz2fb, double kpzavbj3mat[], double *ydcnh9xl) { int ayfnwr1v, gp1jxzuh, xvr7bonh, hpmwnav2, idlosrw8, xwdf5ltg = xui7hqwl[ 0], vtsou9pz; int exrkcn5d = xui7hqwl[12]; double fxnhilr3, *fpdlcqk9k7hulceq, *fpdlcqk9kpzavbj3mat, *fpdlcqk9lncwkfq7, *fpdlcqk9yxiwebc5, *fpdlcqk9atujnxb8; double *wkumc9idajul8wkv, *wkumc9iddev0, *wkumc9idyxiwebc5; wkumc9idajul8wkv = Calloc(exrkcn5d , double); wkumc9iddev0 = Calloc(1 + *afpc0kns , double); wkumc9idyxiwebc5 = Calloc(*ftnjamu2 * xwdf5ltg , double); fpdlcqk9kpzavbj3mat = kpzavbj3mat; idlosrw8 = xui7hqwl[ 4]; vtsou9pz = xui7hqwl[11]; fpdlcqk9lncwkfq7 = lncwkfq7; fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5; for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fxnhilr3 = 0.0e0; fpdlcqk9k7hulceq = k7hulceq + (hpmwnav2-1) * *eoviz2fb; fpdlcqk9atujnxb8 = atujnxb8 + ayfnwr1v-1; for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { fxnhilr3 += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++; fpdlcqk9atujnxb8 += *ftnjamu2; } *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3; } } if (vtsou9pz == 1) { cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck, m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi); } else { cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck, m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, wkumc9iddev0, wkumc9idajul8wkv, y7sdgtqi); } fpdlcqk9atujnxb8 = atujnxb8; for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9atujnxb8 *= *ydcnh9xl; fpdlcqk9atujnxb8++; } } for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) { for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { fpdlcqk9lncwkfq7 = lncwkfq7 + (hpmwnav2-1) * *ftnjamu2; fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2; fpdlcqk9atujnxb8 = atujnxb8 + (xvr7bonh-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ + *fpdlcqk9atujnxb8++; } xui7hqwl[4] = 2; for (gp1jxzuh = 1; gp1jxzuh <= exrkcn5d; gp1jxzuh++) zshtfg8c[gp1jxzuh-1] = wkumc9idajul8wkv[gp1jxzuh-1]; if (vtsou9pz == 1) { cqo_1(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck, m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, tlq9wpes, zshtfg8c, y7sdgtqi); } else { cqo_2(lncwkfq7, tlgduey8, kifxa0he, ufgqj9ck, m0ibglfx, vm4xjosb, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, w8znmyce, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, tlq9wpes, zshtfg8c, y7sdgtqi); } if (*zjkrtol8 != 0) { Rprintf("Error in dcqo1: zjkrtol8 = %d\n", *zjkrtol8); Rprintf("Continuing.\n"); } *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / *ydcnh9xl; } if (xwdf5ltg > 1) { fpdlcqk9lncwkfq7 = lncwkfq7 + (hpmwnav2-1) * *ftnjamu2; fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++; } } Free(wkumc9idajul8wkv); Free(wkumc9iddev0); Free(wkumc9idyxiwebc5); xui7hqwl[4] = idlosrw8; } void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double tlq9wpes[], double zshtfg8c[], double y7sdgtqi[], int psdvgce3[], int *qfozcl5b, double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double mbvnaor6[], double hjm2ktyr[], int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]) { int hj3ftvzu, ehtjigf4, kvowz9ht, yu6izdrc = 0, pqneb2ra = 1, xwdf5ltg = xui7hqwl[0], f7svlajr, qfx3vhct, c5aesxkul, wr0lbopv, vtsou9pz, xlpjcg3s, sedf7mxb, kcm6jfob, lensmo = (xwdf5ltg == 1 ? 2 : 4) * *afpc0kns; double rpto5qwb, dn3iasxug, wiptsjx8, bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go, fjcasv7g[6], ghdetj8v = 0.0; double *fpdlcqk9kispwgx3; int len_1spp_ifys6woa; double hmayv1xt = 0.0, Totdev = 0.0e0; int qes4mujl, ayfnwr1v, kij0gwer, xumj5dnk, lyma1kwc; // = xui7hqwl[10]; double hmayv1xtvm4xjosb[2]; double *fpdlcqk9lxyst1eb, *fpdlcqk9zyodca3j, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9wpuarq2m1, *fpdlcqk9wpuarq2m2; double *wkumc9idui8ysltq, *wkumc9idlxyst1eb, *wkumc9idzyodca3j; double *wkumc9idhdnw2fts, *wkumc9idwbkq9zyi; fjcasv7g[0] = 0.001; fjcasv7g[1] = 0.0; fjcasv7g[2] = -1.5; fjcasv7g[3] = 1.5; fjcasv7g[4] = 1.0e-4; fjcasv7g[5] = 2.0e-8; wkumc9idui8ysltq = Calloc((*ftnjamu2 * *wy1vqfzu) * (*afpc0kns * *wy1vqfzu), double); wkumc9idlxyst1eb = Calloc( *qfozcl5b * *ftnjamu2 , double); wkumc9idzyodca3j = Calloc( *qfozcl5b * *ftnjamu2 , double); wkumc9idhdnw2fts = Calloc(lensmo , double); wkumc9idwbkq9zyi = Calloc(lensmo , double); for (ayfnwr1v = 0; ayfnwr1v < lensmo; ayfnwr1v++) { wkumc9idhdnw2fts[ayfnwr1v] = hdnw2fts[ayfnwr1v]; wkumc9idwbkq9zyi[ayfnwr1v] = wbkq9zyi[ayfnwr1v]; } xlpjcg3s = xui7hqwl[2]; kvowz9ht = xui7hqwl[3]; // # = 1 f7svlajr = xui7hqwl[4]; qfx3vhct = xui7hqwl[5]; c5aesxkul = xui7hqwl[6]; xui7hqwl[8] = 0; lyma1kwc = psdvgce3[10]; // vtsou9pz = xui7hqwl[11]; if (vtsou9pz != 1 || lyma1kwc != xwdf5ltg) { Rprintf("Error: 'vtsou9pz' != 1, or 'lyma1kwc' != 'xwdf5ltg', in vcao6!\n"); *zjkrtol8 = 4; Free(wkumc9idui8ysltq); Free(wkumc9idlxyst1eb); Free(wkumc9idzyodca3j); Free(wkumc9idhdnw2fts); Free(wkumc9idwbkq9zyi); return; } wr0lbopv = xui7hqwl[17]; dn3iasxug = y7sdgtqi[0]; uaf2xgqy = sqrt(dn3iasxug); vsoihn1r = log(dn3iasxug); bh2vgiay = y7sdgtqi[1]; rsynp1go = y7sdgtqi[2]; hmayv1xt += hmayv1xt; hmayv1xt *= hmayv1xt; len_1spp_ifys6woa = lindex[lyma1kwc] - 1; *zjkrtol8 = 1; for (hj3ftvzu = 1; hj3ftvzu <= *afpc0kns; hj3ftvzu++) { ceqzd1hi653: hmayv1xt = 1.0; qes4mujl = (qfx3vhct == 3 || qfx3vhct == 5) ? 2 * hj3ftvzu - 1 : hj3ftvzu; if (f7svlajr == 0) { yiumjq3nietam6(tlgduey8, m0ibglfx, y7sdgtqi, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu, ufgqj9ck, &wr0lbopv); } else if (f7svlajr != 1) { Rprintf("Failure due to bad input of 'f7svlajr' variable\n"); *zjkrtol8 = 6; Free(wkumc9idui8ysltq); Free(wkumc9idlxyst1eb); Free(wkumc9idzyodca3j); Free(wkumc9idhdnw2fts); Free(wkumc9idwbkq9zyi); return; } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu); if (f7svlajr == 2) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, &rpto5qwb, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &pqneb2ra); } else { rpto5qwb = -1.0e0; } for (kcm6jfob = 1; kcm6jfob <= c5aesxkul; kcm6jfob++) { yiumjq3nflncwkfq76(lncwkfq7, vc6hatuj, ftnjamu2, br5ovgcj, &xwdf5ltg, &qfx3vhct); psdvgce3[6] = 0; yiumjq3ndlgpwe0c(tlgduey8, ufgqj9ck, m0ibglfx, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, &rsynp1go, &dn3iasxug, &uaf2xgqy, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, &hj3ftvzu, &qfx3vhct, zjkrtol8, &yu6izdrc, hmayv1xtvm4xjosb); fpdlcqk9lxyst1eb = wkumc9idlxyst1eb; fpdlcqk9zyodca3j = wkumc9idzyodca3j; fpdlcqk9m0ibglfx1 = m0ibglfx + qes4mujl-1; fpdlcqk9wpuarq2m1 = wpuarq2m + qes4mujl-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1; fpdlcqk9wpuarq2m2 = fpdlcqk9wpuarq2m1; for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) { *fpdlcqk9lxyst1eb++ = *fpdlcqk9m0ibglfx2++; *fpdlcqk9zyodca3j++ = *fpdlcqk9wpuarq2m2++; } fpdlcqk9m0ibglfx1 += *wy1vqfzu; fpdlcqk9wpuarq2m1 += *npjlv3mr; } sedf7mxb = 0; // 20100416 a stop gap. Used for xwdf5ltg==2 only i think. ehtjigf4 = xwdf5ltg * (hj3ftvzu-1); if (kcm6jfob == 1) { for (kij0gwer = 1; kij0gwer <= lyma1kwc; kij0gwer++) { fpdlcqk9kispwgx3 = kispwgx3 + (ehtjigf4 + hnpt1zym[kij0gwer-1]-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) *fpdlcqk9kispwgx3++ = 0.0e0; } } else { wbkq9zyi[ ehtjigf4 + hnpt1zym[0]-1] = wkumc9idwbkq9zyi[ ehtjigf4 + hnpt1zym[0]-1]; hdnw2fts[ ehtjigf4 + hnpt1zym[0]-1] = wkumc9idhdnw2fts[ ehtjigf4 + hnpt1zym[0]-1]; if (xwdf5ltg == 2) { wbkq9zyi[ ehtjigf4 + hnpt1zym[1]-1] = wkumc9idwbkq9zyi[ ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idr3eoxkzp; hdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1] = wkumc9idhdnw2fts[sedf7mxb + ehtjigf4 + hnpt1zym[1]-1]; // wkumc9idwld4qctn; } } Yee_vbfa(psdvgce3, fjcasv7g, mbvnaor6, ghz9vuba + (qes4mujl-1) * *ftnjamu2, rbne6ouj + (qes4mujl-1) * *ftnjamu2, hdnw2fts + sedf7mxb + ehtjigf4 + hnpt1zym[0] - 1, lamvec + ehtjigf4 + hnpt1zym[0] - 1, wbkq9zyi + ehtjigf4 + hnpt1zym[0] - 1, ezlgm2up, lqsahu0r, which, kispwgx3 + (ehtjigf4 + *hnpt1zym - 1) * *ftnjamu2, wkumc9idlxyst1eb, zshtfg8c + (hj3ftvzu - 1) * xlpjcg3s, wkumc9idui8ysltq, vc6hatuj, fasrkub3, ges1xpkr, wkumc9idzyodca3j, hjm2ktyr, jnxpuym2, hnpt1zym, iz2nbfjc, ifys6woa + ehtjigf4 * len_1spp_ifys6woa, rpyis2kc + (hj3ftvzu-1) * (nbzjkpi3[xwdf5ltg] - 1), gkdx5jals, nbzjkpi3, lindex, // 20130525; lindex added acpios9q, jwbkl9fp); y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v; xumj5dnk = psdvgce3[13]; if (xumj5dnk != 0) { Rprintf("vcao6: Error... exiting; error code = %d\n", xumj5dnk); *zjkrtol8 = 8; Free(wkumc9idui8ysltq); Free(wkumc9idlxyst1eb); Free(wkumc9idzyodca3j); Free(wkumc9idhdnw2fts); Free(wkumc9idwbkq9zyi); return; } fpdlcqk9lxyst1eb = wkumc9idlxyst1eb; fpdlcqk9m0ibglfx1 = m0ibglfx + qes4mujl-1; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fpdlcqk9m0ibglfx2 = fpdlcqk9m0ibglfx1; for (kij0gwer = 1; kij0gwer <= *qfozcl5b; kij0gwer++) { *fpdlcqk9m0ibglfx2++ = *fpdlcqk9lxyst1eb++; } fpdlcqk9m0ibglfx1 += *wy1vqfzu; } yiumjq3nnipyajc1(m0ibglfx, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &qfx3vhct, &hj3ftvzu); yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &pqneb2ra); wiptsjx8 = fabs(tlq9wpes[hj3ftvzu] - rpto5qwb) / (1.0e0 + fabs(tlq9wpes[hj3ftvzu])); if (wiptsjx8 < bh2vgiay) { *zjkrtol8 = 0; xui7hqwl[7] = kcm6jfob; if (qfx3vhct == 3 || qfx3vhct == 5) { yiumjq3nshjlwft5(&qfx3vhct, tlgduey8, ufgqj9ck, t8hwvalr, ftnjamu2, wy1vqfzu, afpc0kns, &kvowz9ht, m0ibglfx, tlq9wpes + hj3ftvzu, &hj3ftvzu, &dn3iasxug, &vsoihn1r, &yu6izdrc); } Totdev += tlq9wpes[hj3ftvzu]; goto ceqzd1hi1011; } else { rpto5qwb = tlq9wpes[hj3ftvzu]; } } if (f7svlajr == 1) { f7svlajr = 0; xui7hqwl[8] = 1; goto ceqzd1hi653; } *zjkrtol8 = 3; Totdev += tlq9wpes[hj3ftvzu]; ceqzd1hi1011: hmayv1xt = 2.0e0; } *tlq9wpes = Totdev; Free(wkumc9idui8ysltq); Free(wkumc9idlxyst1eb); Free(wkumc9idzyodca3j); Free(wkumc9idhdnw2fts); Free(wkumc9idwbkq9zyi); } void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[], double m0ibglfx[], double t8hwvalr[], double ghz9vuba[], double rbne6ouj[], double wpuarq2m[], double vc6hatuj[], double fasrkub3[], int ges1xpkr[], int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr, int *zjkrtol8, int xui7hqwl[], double tlq9wpes[], double zshtfg8c[], double y7sdgtqi[], double atujnxb8[], double k7hulceq[], int *eoviz2fb, double kpzavbj3mat[], double ajul8wkv[], int psdvgce3[], int *qfozcl5b, double hdnw2fts[], double lamvec[], double wbkq9zyi[], int ezlgm2up[], int lqsahu0r[], int which[], double kispwgx3[], double mbvnaor6[], double hjm2ktyr[], int jnxpuym2[], int hnpt1zym[], int iz2nbfjc[], double ifys6woa[], double rpyis2kc[], double gkdx5jals[], int nbzjkpi3[], int lindex[], int acpios9q[], int jwbkl9fp[]) { int ayfnwr1v, xvr7bonh, hpmwnav2, idlosrw8, xwdf5ltg = xui7hqwl[ 0], vtsou9pz; double fxnhilr3; double ghdetj8v = 0.0e0, ydcnh9xl = y7sdgtqi[3 + *afpc0kns + *afpc0kns + 3 -1]; double *fpdlcqk9k7hulceq, *fpdlcqk9kpzavbj3mat, *fpdlcqk9lncwkfq7, *fpdlcqk9yxiwebc5, *fpdlcqk9atujnxb8; double *wkumc9idyxiwebc5; double *wkumc9idlxyst1eb, *wkumc9idzyodca3j; double *wkumc9iddev0; wkumc9idyxiwebc5 = Calloc(*ftnjamu2 * xwdf5ltg , double); fpdlcqk9kpzavbj3mat = kpzavbj3mat; wkumc9iddev0 = Calloc(1 + *afpc0kns , double); wkumc9idlxyst1eb = Calloc( *qfozcl5b * *ftnjamu2 , double); wkumc9idzyodca3j = Calloc( *qfozcl5b * *ftnjamu2 , double); idlosrw8 = xui7hqwl[ 4]; vtsou9pz = xui7hqwl[11]; fpdlcqk9lncwkfq7 = lncwkfq7; fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5; for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { fxnhilr3 = 0.0e0; fpdlcqk9k7hulceq = k7hulceq + (hpmwnav2-1) * *eoviz2fb; fpdlcqk9atujnxb8 = atujnxb8 + ayfnwr1v-1; for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { fxnhilr3 += *fpdlcqk9atujnxb8 * *fpdlcqk9k7hulceq++; fpdlcqk9atujnxb8 += *ftnjamu2; } *fpdlcqk9yxiwebc5++ = *fpdlcqk9lncwkfq7++ = fxnhilr3; } } if (vtsou9pz == 1) { vcao6(lncwkfq7, tlgduey8, ufgqj9ck, m0ibglfx, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, wkumc9iddev0, ajul8wkv, y7sdgtqi, psdvgce3, qfozcl5b, hdnw2fts, lamvec, wbkq9zyi, ezlgm2up, lqsahu0r, which, kispwgx3, mbvnaor6, hjm2ktyr, jnxpuym2, hnpt1zym, iz2nbfjc, ifys6woa, rpyis2kc, gkdx5jals, nbzjkpi3, lindex, acpios9q, jwbkl9fp); y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v; } fpdlcqk9atujnxb8 = atujnxb8; for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9atujnxb8 *= ydcnh9xl; fpdlcqk9atujnxb8++; } } for (hpmwnav2 = 1; hpmwnav2 <= xwdf5ltg; hpmwnav2++) { fpdlcqk9atujnxb8 = atujnxb8; // + (xvr7bonh-1) * *ftnjamu2; for (xvr7bonh = 1; xvr7bonh <= *eoviz2fb; xvr7bonh++) { for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) { *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++ + *fpdlcqk9atujnxb8++; } xui7hqwl[4] = 0; if (vtsou9pz == 1) { vcao6(lncwkfq7, tlgduey8, ufgqj9ck, m0ibglfx, t8hwvalr, ghz9vuba, rbne6ouj, wpuarq2m, vc6hatuj, fasrkub3, ges1xpkr, ftnjamu2, wy1vqfzu, afpc0kns, br5ovgcj, npjlv3mr, zjkrtol8, xui7hqwl, tlq9wpes, zshtfg8c, y7sdgtqi, psdvgce3, qfozcl5b, hdnw2fts, lamvec, wbkq9zyi, ezlgm2up, lqsahu0r, which, kispwgx3, mbvnaor6, hjm2ktyr, jnxpuym2, hnpt1zym, iz2nbfjc, ifys6woa, rpyis2kc, gkdx5jals, nbzjkpi3, lindex, acpios9q, jwbkl9fp); y7sdgtqi[3 + *afpc0kns + *afpc0kns] = ghdetj8v; } if (*zjkrtol8 != 0) { Rprintf("Warning: failured to converge in vdcao6. \n"); Rprintf("Continuing.\n"); } *fpdlcqk9kpzavbj3mat++ = (*tlq9wpes - *wkumc9iddev0) / ydcnh9xl; } if (xwdf5ltg > 1) { fpdlcqk9lncwkfq7 = lncwkfq7 + (hpmwnav2-1) * *ftnjamu2; fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5 + (hpmwnav2-1) * *ftnjamu2; for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) *fpdlcqk9lncwkfq7++ = *fpdlcqk9yxiwebc5++; } } Free(wkumc9idyxiwebc5); Free(wkumc9iddev0 ); Free(wkumc9idlxyst1eb); Free(wkumc9idzyodca3j); xui7hqwl[4] = idlosrw8; } void yiumjq3npnm1or(double *objzgdk0, double *lfu2qhid) { int sn; double R1, R2, y, y2, y3, y4, y5, y6, y7; double erf, erfc, z, z2, z3, z4; double SQRT2 = 1.414213562373095049e0, SQRTPI = 1.772453850905516027e0, ULIMIT = 20.0e0, P10 = 242.66795523053175e0, P11 = 21.979261618294152e0, P12 = 6.9963834886191355e0, P13 = -.035609843701815385e0, Q10 = 215.05887586986120e0, Q11 = 91.164905404514901e0, Q12 = 15.082797630407787e0, Q13 = 1.0e0, P20 = 300.4592610201616005e0, P21 = 451.9189537118729422e0, P22 = 339.3208167343436870e0, P23 = 152.9892850469404039e0, P24 = 43.16222722205673530e0, P25 = 7.211758250883093659e0, P26 = .5641955174789739711e0, P27 = -.0000001368648573827167067e0, Q20 = 300.4592609569832933e0, Q21 = 790.9509253278980272e0, Q22 = 931.3540948506096211e0, Q23 = 638.9802644656311665e0, Q24 = 277.5854447439876434e0, Q25 = 77.00015293522947295e0, Q26 = 12.78272731962942351e0, Q27 = 1.0e0, P30 = -.00299610707703542174e0, P31 = -.0494730910623250734e0, P32 = -.226956593539686930e0, P33 = -.278661308609647788e0, P34 = -.0223192459734184686e0, Q30 = .0106209230528467918e0, Q31 = .191308926107829841e0, Q32 = 1.05167510706793207e0, Q33 = 1.98733201817135256e0, Q34 = 1.0e0; if (*objzgdk0 < -ULIMIT) { *lfu2qhid = 2.753624e-89; return; } if (*objzgdk0 > ULIMIT) { *lfu2qhid = 1.0e0; return; } y = *objzgdk0 / SQRT2; if (y < 0.0e0) { y = -y; sn = -1; } else { sn = 1; } y2 = y * y; y4 = y2 * y2; y6 = y4 * y2; if (y < 0.46875e0) { R1 = P10 + P11 * y2 + P12 * y4 + P13 * y6; R2 = Q10 + Q11 * y2 + Q12 * y4 + Q13 * y6; erf = y * R1 / R2; *lfu2qhid = (sn == 1) ? 0.5e0 + 0.5 * erf : 0.5e0 - 0.5 * erf; } else if (y < 4.0e0) { y3 = y2 * y; y5 = y4 * y; y7 = y6 * y; R1 = P20 + P21 * y + P22 * y2 + P23 * y3 + P24 * y4 + P25 * y5 + P26 * y6 + P27 * y7; R2 = Q20 + Q21 * y + Q22 * y2 + Q23 * y3 + Q24 * y4 + Q25 * y5 + Q26 * y6 + Q27 * y7; erfc = exp(-y2) * R1 / R2; *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc; } else { z = y4; z2 = z * z; z3 = z2 * z; z4 = z2 * z2; R1 = P30 + P31 * z + P32 * z2 + P33 * z3 + P34 * z4; R2 = Q30 + Q31 * z + Q32 * z2 + Q33 * z3 + Q34 * z4; erfc = (exp(-y2)/y) * (1.0 / SQRTPI + R1 / (R2 * y2)); *lfu2qhid = (sn == 1) ? 1.0 - 0.5 * erfc : 0.5 * erfc; } } void yiumjq3npnm1ow(double objzgdk0[], double lfu2qhid[], int *f8yswcat) { int ayfnwr1v; for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) { yiumjq3npnm1or(objzgdk0++, lfu2qhid++); } } VGAM/src/vlinpack2.f0000644000176200001440000001717513565414527013662 0ustar liggesusersc This file contains modified code from Hastie and Tibshirani's c GAMFIT code, as well as a rational cholesky function or two. c All code here derives from linpack c T.Yee 7/10/99 c This function was formerly real function dnrm2, but now converted c to double precision c Nb. changed "float(n)" to "dfloat(n)" double precision function vdnrm2 ( n, dx,ldx, incx) c c added by tyee 23/9/00: implicit double precision (a-h,o-z) implicit integer (i-n) c integer next double precision dx(ldx), cutlo, cuthi, hitest, sum double precision xmax,zero,one data zero, one /0.0d0, 1.0d0/ c c euclidean norm of the n-vector stored in dx() with storage c increment incx . c if n .le. 0 return with result = 0. c if n .ge. 1 then incx must be .ge. 1 c c c.l.lawson, 1978 jan 08 c c four phase method using two built-in constants that are c hopefully applicable to all machines. c cutlo = maximum of dsqrt(u/eps) over all known machines. c cuthi = minimum of dsqrt(v) over all known machines. c where c eps = smallest no. such that eps + 1. .gt. 1. c u = smallest positive no. (underflow limit) c v = largest no. (overflow limit) c c brief outline of algorithm.. c c phase 1 scans zero components. c move to phase 2 when a component is nonzero and .le. cutlo c move to phase 3 when a component is .gt. cutlo c move to phase 4 when a component is .ge. cuthi/m c where m = n for x() double precision and m = 2*n for complex. c c values for cutlo and cuthi.. c from the environmental parameters listed in the imsl converter c document the limiting values are as follows.. c cutlo, s.p. u/eps = 2**(-102) for honeywell. close seconds are c univac and dec at 2**(-103) c thus cutlo = 2**(-51) = 4.44089e-16 c cuthi, s.p. v = 2**127 for univac, honeywell, and dec. c thus cuthi = 2**(63.5) = 1.30438e19 c cutlo, d.p. u/eps = 2**(-67) for honeywell and dec. c thus cutlo = 2**(-33.5) = 8.23181e-11 c cuthi, d.p. same as s.p. cuthi = 1.30438e19 c data cutlo, cuthi / 8.232e-11, 1.304e19 / c data cutlo, cuthi / 4.441e-16, 1.304e19 / data cutlo, cuthi / 8.232e-11, 1.304e19 / c if(n .gt. 0) go to 10 vdnrm2 = zero go to 300 c 10 next = 30 sum = zero nn = n * incx c begin main loop i = 1 c 20 go to next,(30, 50, 70, 110) 20 if(next .eq. 30) go to 30 if(next .eq. 50) go to 50 if(next .eq. 70) go to 70 if(next .eq. 110) go to 110 c An error!!! vdnrm2 = 0.0d0 return 30 if( dabs(dx(i)) .gt. cutlo) go to 85 next = 50 xmax = zero c c phase 1. sum is zero c 50 if( dx(i) .eq. zero) go to 200 if( dabs(dx(i)) .gt. cutlo) go to 85 c c prepare for phase 2. next = 70 go to 105 c c prepare for phase 4. c 100 i = j next = 110 sum = (sum / dx(i)) / dx(i) 105 xmax = dabs(dx(i)) go to 115 c c phase 2. sum is small. c scale to avoid destructive underflow. c 70 if( dabs(dx(i)) .gt. cutlo ) go to 75 c c common code for phases 2 and 4. c in phase 4 sum is large. scale to avoid overflow. c 110 if( dabs(dx(i)) .le. xmax ) go to 115 c 11/4/01: replacing "**2.0d0" by "**2" (three times in this file) sum = one + sum * (xmax / dx(i))**2 xmax = dabs(dx(i)) go to 200 c 115 sum = sum + (dx(i)/xmax)**2 go to 200 c c c prepare for phase 3. c 75 sum = (sum * xmax) * xmax c c c for real or d.p. set hitest = cuthi/n c for complex set hitest = cuthi/(2*n) c 85 hitest = cuthi / dfloat( n ) c c phase 3. sum is mid-range. no scaling. c do 95 j =i,nn,incx if(dabs(dx(j)) .ge. hitest) go to 100 sum = sum + dx(j)**2 95 continue vdnrm2 = dsqrt( sum ) go to 300 c 200 continue i = i + incx if ( i .le. nn ) go to 20 c c end of main loop. c c compute square root and adjust for scaling. c vdnrm2 = xmax * dsqrt(sum) 300 continue return end c ============================================================== c This is modified linpack Fortran code c Changes marked with yyy c 23/9/99 c Works subroutine vdpbfa7(abd,lda,n,m,info,d) integer lda,n,m,info double precision abd(lda,*), d(n) c c c c 20130419: orig.: c double precision abd(lda,1), d(n) c c c c vdpbfa7 is dpbfa8 but uses Rational Cholesky instead of ordinary c Cholesky c c abd = t(u) d u where u is unit upper triangular and d is diagonal c the diagonal of d is stored where the 1's of the u would be stored c c See dpbfa8 for more information c d(1:n) is assigned the values of diag(d), and abd(m+1,) <- 1 c c Improvement yet to do: c delete d and put its contents into abd(m+1,) (intrinsic 1's) c c internal variables c c double precision ddot8 double precision s,t integer ik,j,jk,k,mu, i,row c begin block with ...exits to 40 c c c yyy d(1) = abd(m+1,1) c do 30 j = 1, n c print *, "j = ", j info = j s = 0.0d0 ik = m + 1 jk = max0(j-m,1) mu = max0(m+2-j,1) if (m .lt. mu) go to 20 do 10 k = mu, m c print *, " k = ", k c t = abd(k,j) - ddot8(k-mu,abd(ik,jk),1,abd(mu,j),1) c t = abd(k,j) do 1 i = 1,k-mu row = mu-2+i+j-m t = t - d(row)*abd(ik-1+i,jk)*abd(mu-1+i,j) c print *, " row = ", row 1 continue c c yyy c t = t/abd(m+1,jk) row = mu-2+(k-mu+1)+j-m c print *, " row = ", row t = t/d(row) c abd(k,j) = t c c yyy c print *, " index = ", mu-1+i+j-m s = s + t*t*d(row) c ik = ik - 1 jk = jk + 1 10 continue 20 continue s = abd(m+1,j) - s c c ......exit if (s .le. 0.0d0) go to 40 c c yyy c abd(m+1,j) = dsqrt(s) abd(m+1,j) = 1d0 d(j) = s c 30 continue info = 0 40 continue return end subroutine vdpbsl7(abd,lda,n,m,b,d) integer lda,n,m double precision abd(lda,*),b(*),d(*) c c c c 20130419: orig: c double precision abd(lda,1),b(1),d(1) c c c c vdpbsl7 is dpbsl8 but uses Rational Cholesky instead of ordinary c Cholesky c c See dpbsl8 for more information c c Improvement yet to do: c delete d and put its contents into abd(m+1,) (intrinsic 1's) c c internal variables c double precision ddot8,t integer k,kb,la,lb,lm c c solve trans(r)*y = b c do 10 k = 1, n lm = min0(k-1,m) la = m + 1 - lm lb = k - lm t = ddot8(lm,abd(la,k),1,b(lb),1) c c yyy c b(k) = (b(k) - t)/abd(m+1,k) b(k) = b(k) - t c 10 continue c c c yyy do 15 k = 1, n b(k) = b(k)/d(k) 15 continue c c c solve r*x = y c do 20 kb = 1, n k = n + 1 - kb lm = min0(k-1,m) la = m + 1 - lm lb = k - lm c c yyy c b(k) = b(k)/abd(m+1,k) c t = -b(k) call daxpy8(lm,t,abd(la,k),1,b(lb),1) 20 continue return end VGAM/src/vdigami.f0000644000176200001440000000763013565414527013404 0ustar liggesusers SUBROUTINE vdigami(D, X, P, GPLOG, GP1LOG, PSIP, PSIP1, PSIDP, * PSIDP1, IFAULT, TMAX) C C ALGORITHM AS 187 APPL. STATIST. (1982) VOL.31, NO.3 C C Computes derivatives of the incomplete gamma integral for positive C parameters, X, P, using a series expansion if P > X or X <= 1, and C a continued fraction expansion otherwise. C C Calculation of D(4) in line 60 corrected 5 October 1993. C C N.B. The user must input values of the incomplete gamma, digamma C and trigamma functions. These can be obtained using AS 239 C (or 32), AS 103 and AS 121 respectively. C C C C C 20130214; adapted by T. W. Yee to handle DOUBLE PRECISION arguments. C And declarations of *all* variables. C And a wrapper function written to call this subroutine. C TMAX is now input. C Seems to work but more testing is required. C C 20141108; A, C, CP, CPP, DSP, DSPP, DFP, DFPP, F, S, TMAXP etc. now C declared, by T. W. Yee. C ABS() changed to DABS() too. C C DOUBLE PRECISION X, P, GPLOG, GP1LOG, PSIP, PSIP1, PSIDP, PSIDP1 DOUBLE PRECISION TMAX INTEGER IFAULT C DOUBLE PRECISION A, AN, B, C, CP, CPC, CPP, DSP, DSPP, DFP, DFPP DOUBLE PRECISION F, PM1, S, S0, XLOG, TERM, TMAXP C C C C C INTEGER I, I2 DOUBLE PRECISION PN(6), D(6), DP(6), DPP(6), ZERO, ONE, TWO C DATA TMAX/100.0/ DATA E, OFLO, VSMALL/1.D-6, 1.D30, 1.D-30/ DATA ZERO/0.0/, ONE/1.0/, TWO/2.0/ C IFAULT = 0 C C Derivatives with respect to X C PM1 = P - ONE XLOG = DLOG(X) D(1) = DEXP(-GPLOG + PM1*XLOG - X) D(2) = D(1) * (PM1/X - ONE) D(5) = D(1) * (XLOG - PSIP) C C Derivatives with respect to P C IF (X .GT. ONE .AND. X .GE. P) GO TO 30 C C Series expansion C F = DEXP(P*XLOG - GP1LOG - X) DFP = F * (XLOG - PSIP1) DFPP = DFP*DFP/F - F*PSIDP1 C TMAXP = TMAX + P C = ONE S = ONE CP = ZERO CPP = ZERO DSP = ZERO DSPP = ZERO A = P 1 A = A + ONE CPC = CP / C CP = CPC - ONE/A CPP = CPP/C - CPC*CPC + ONE/A**2 C = C*X/A CP = CP*C CPP = CPP*C + CP*CP/C S = S + C DSP = DSP + CP DSPP = DSPP + CPP IF (A .GT. TMAXP) GO TO 1001 IF (C .GT. E*S) GO TO 1 D(6) = S*F D(3) = S*DFP + F*DSP D(4) = S*DFPP + TWO*DFP*DSP + F*DSPP RETURN C C Continued fraction expansion C 30 F = DEXP(P*XLOG - GPLOG - X) DFP = F * (XLOG - PSIP) DFPP = DFP*DFP/F - F*PSIDP C A = PM1 B = X + ONE - A TERM = ZERO PN(1) = ONE PN(2) = X PN(3) = X + ONE PN(4) = X * B S0 = PN(3) / PN(4) DO 31 I = 1, 4 DP(I) = ZERO DPP(I) = ZERO 31 CONTINUE DP(4) = -X C 32 A = A - ONE B = B + TWO TERM = TERM + ONE AN = A*TERM PN(5) = B*PN(3) + AN*PN(1) PN(6) = B*PN(4) + AN*PN(2) DP(5) = B*DP(3) - PN(3) + AN*DP(1) + PN(1)*TERM DP(6) = B*DP(4) - PN(4) + AN*DP(2) + PN(2)*TERM DPP(5) = B*DPP(3) + AN*DPP(1) + TWO*(TERM*DP(1) - DP(3)) DPP(6) = B*DPP(4) + AN*DPP(2) + TWO*(TERM*DP(2) - DP(4)) C IF (DABS(PN(6)) .LT. VSMALL) GO TO 35 S = PN(5) / PN(6) C = DABS(S - S0) IF (C*P .GT. E) GO TO 34 IF (C .LE. E*S) GO TO 42 C 34 S0 = S 35 DO 36 I = 1, 4 I2 = I + 2 DP(I) = DP(I2) DPP(I) = DPP(I2) PN(I) = PN(I2) 36 CONTINUE C IF (TERM .GT. TMAX) GO TO 1001 IF (DABS(PN(5)) .LT. OFLO) GO TO 32 DO 41 I = 1, 4 DP(I) = DP(I) / OFLO DPP(I) = DPP(I) / OFLO PN(I) = PN(I) / OFLO 41 CONTINUE GO TO 32 C 42 D(6) = ONE - F*S DSP = (DP(5) - S*DP(6)) / PN(6) DSPP = (DPP(5) - S*DPP(6) - TWO*DSP*DP(6)) / PN(6) D(3) = -F*DSP - S*DFP D(4) = -F*DSPP - TWO*DSP*DFP - S*DFPP RETURN C C Set fault indicator C 1001 IFAULT = 1 RETURN END VGAM/src/gautr.c0000644000176200001440000001502713565414527013102 0ustar liggesusers#include "math.h" /* Frequently used numerical constants: */ #define OneUponSqrt2Pi .39894228040143267794 #define twopi 6.283195307179587 #define LnSqrt2Pi -0.9189385332046727417803296 #define SQRT2 1.414213562373095049 #define SQRTPI 1.772453850905516027 /* --------------------------------------------------------------------------- UNIVARIATE NORMAL PROBABILITY ---------------------------------------------------------------------------*/ #define UPPERLIMIT 20.0 /* I won't return either of univariate normal density or probability when x < -UPPERLIMIT or x > UPPERLIMIT. */ #define P10 242.66795523053175 #define P11 21.979261618294152 #define P12 6.9963834886191355 #define P13 -.035609843701815385 #define Q10 215.05887586986120 #define Q11 91.164905404514901 #define Q12 15.082797630407787 #define Q13 1.0 #define P20 300.4592610201616005 #define P21 451.9189537118729422 #define P22 339.3208167343436870 #define P23 152.9892850469404039 #define P24 43.16222722205673530 #define P25 7.211758250883093659 #define P26 .5641955174789739711 #define P27 -.0000001368648573827167067 #define Q20 300.4592609569832933 #define Q21 790.9509253278980272 #define Q22 931.3540948506096211 #define Q23 638.9802644656311665 #define Q24 277.5854447439876434 #define Q25 77.00015293522947295 #define Q26 12.78272731962942351 #define Q27 1.0 #define P30 -.00299610707703542174 #define P31 -.0494730910623250734 #define P32 -.226956593539686930 #define P33 -.278661308609647788 #define P34 -.0223192459734184686 #define Q30 .0106209230528467918 #define Q31 .191308926107829841 #define Q32 1.05167510706793207 #define Q33 1.98733201817135256 #define Q34 1.0 double pnorm1(double x) { int sn; double R1, R2, y, y2, y3, y4, y5, y6, y7; double erf, erfc, z, z2, z3, z4; double phi; if (x < -UPPERLIMIT) return 0.0; if (x > UPPERLIMIT) return 1.0; y = x / SQRT2; if (y < 0) { y = -y; sn = -1; } else sn = 1; y2 = y * y; y4 = y2 * y2; y6 = y4 * y2; if(y < 0.46875) { R1 = P10 + P11 * y2 + P12 * y4 + P13 * y6; R2 = Q10 + Q11 * y2 + Q12 * y4 + Q13 * y6; erf = y * R1 / R2; if (sn == 1) phi = 0.5 + 0.5*erf; else phi = 0.5 - 0.5*erf; } else if (y < 4.0) { y3 = y2 * y; y5 = y4 * y; y7 = y6 * y; R1 = P20 + P21 * y + P22 * y2 + P23 * y3 + P24 * y4 + P25 * y5 + P26 * y6 + P27 * y7; R2 = Q20 + Q21 * y + Q22 * y2 + Q23 * y3 + Q24 * y4 + Q25 * y5 + Q26 * y6 + Q27 * y7; erfc = exp(-y2) * R1 / R2; if (sn == 1) phi = 1.0 - 0.5*erfc; else phi = 0.5*erfc; } else { z = y4; z2 = z * z; z3 = z2 * z; z4 = z2 * z2; R1 = P30 + P31 * z + P32 * z2 + P33 * z3 + P34 * z4; R2 = Q30 + Q31 * z + Q32 * z2 + Q33 * z3 + Q34 * z4; erfc = (exp(-y2)/y) * (1.0 / SQRTPI + R1 / (R2 * y2)); if (sn == 1) phi = 1.0 - 0.5*erfc; else phi = 0.5*erfc; } return phi; } /* --------------------------------------------------------------------------- UNIVARIATE NORMAL DENSITY ---------------------------------------------------------------------------*/ double dnorm1(double x) { if (x < -UPPERLIMIT) return 0.0; if (x > UPPERLIMIT) return 0.0; return OneUponSqrt2Pi * exp(-0.5 * x * x); } /* --------------------------------------------------------------------------- LN OF UNIVARIATE NORMAL DENSITY ---------------------------------------------------------------------------*/ double lndnorm1(double x) { return LnSqrt2Pi - (0.5*x*x); } /*--------------------------------------------------------------------------- BIVARIATE NORMAL PROBABILITY ---------------------------------------------------------------------------*/ #define con (twopi / 2.0) * 10.0e-10 double bivnor(double ah, double ak, double r) { /* based on alg 462 comm. acm oct 73 gives the probability that a bivariate normal exceeds (ah,ak). gh and gk are .5 times the right tail areas of ah, ak under a n(0,1) Tranlated from FORTRAN to ratfor using struct; from ratfor to C by hand. */ double a2, ap, b, cn, conex, ex, g2, gh, gk, gw, h2, h4, rr, s1, s2, sgn, sn, sp, sqr, t, temp, w2, wh, wk; int is; temp = -ah; gh = pnorm1(temp); gh = gh / 2.0; temp = -ak; gk = pnorm1(temp); gk = gk / 2.0; b = 0; if (r==0) b = 4*gh*gk; else { rr = 1-r*r; if (rr<0) return 0; /* zz; 29/6/02; was originally return; not sure */ if (rr!=0) { sqr = sqrt(rr); if (ah!=0) { b = gh; if (ah*ak<0) b = b-.5; else if (ah*ak==0) goto label10; } else if (ak==0) { b = atan(r/sqr)/twopi+.25; goto label50; } b = b+gk; if (ah==0) goto label20; label10: wh = -ah; wk = (ak/ah-r)/sqr; gw = 2*gh; is = -1; goto label30; label20: do { wh = -ak; wk = (ah/ak-r)/sqr; gw = 2*gk; is = 1; label30: sgn = -1; t = 0; if (wk!=0) { if (fabs(wk)>=1) { /* this brace added 28/6/02 by tyee */ if (fabs(wk)==1) { t = wk*gw*(1-gw)/2; goto label40; } else { sgn = -sgn; wh = wh*wk; g2 = pnorm1(wh); wk = 1/wk; if (wk<0) b = b+.5; b = b-(gw+g2)/2+gw*g2; } } h2 = wh*wh; a2 = wk*wk; h4 = h2*.5; ex = 0; if (h4<150.0) ex = exp(-h4); w2 = h4*ex; ap = 1; s2 = ap-ex; sp = ap; s1 = 0; sn = s1; conex = fabs(con/wk); do { cn = ap*s2/(sn+sp); s1 = s1+cn; if (fabs(cn)<=conex) break; sn = sp; sp = sp+1; s2 = s2-w2; w2 = w2*h4/sp; ap = -ap*a2; } while (1); t = (atan(wk)-wk*s1)/twopi; label40: b = b+sgn*t; } if (is>=0) break; } while(ak!=0); } else if (r>=0) if (ah>=ak) b = 2*gh; else b = 2*gk; else if (ah+ak<0) b = 2*(gh+gk)-1; } label50: if (b<0) b = 0; if (b>1) b = 1; return(b); } /* in the following function size measures the dimension of x singler == 1 if r is a scalar; otherwise r is same size as x & y */ /* This is called by S */ void pnorm2ccc(double *x, double *y, double *r, int *size, int *singler, double *ans) { int i; if(*singler == 1) { for(i = 0; i < *size; i++) ans[i] = bivnor(x[i], y[i], *r); } else { for(i = 0; i < *size; i++) ans[i] = bivnor(x[i], y[i], r[i]); } } /* main() { int i; double x,y,r; x = 0.0; y = 0.0; for(i = -9; i<=9; i++) { r = i / 10.0; Rprintf("%10.2f %10.6f \n",r,bivnor(x,y,r)); } } */ VGAM/src/rgam.f0000644000176200001440000004770113565414527012715 0ustar liggesusersC Output from Public domain Ratfor, version 1.01 subroutine dnaoqj0l(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz *2,coef,sz,ifys6woa, qcpiaj7f,wbkq9zyi,parms, scrtch, gp0xjetb,l3zp *bstu,e5knafcg,wep0oibc,fbd5yktj) implicit logical (a-z) integer kuzxj1lo, nk, gp0xjetb, l3zpbstu(3), e5knafcg, wep0oibc, f *bd5yktj double precision penalt, pjb6wfoq, xs(kuzxj1lo), ys(kuzxj1lo), ws( *kuzxj1lo), ankcghz2(nk+4), coef(nk), sz(kuzxj1lo), ifys6woa(kuzxj1 *lo), qcpiaj7f, wbkq9zyi, parms(3), scrtch(*) call hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz2,coef *,sz,ifys6woa, qcpiaj7f,l3zpbstu(1),wbkq9zyi,l3zpbstu(2), l3zpbstu( *3), parms(1),parms(2),parms(3), gp0xjetb, scrtch(1), scrtch(nk+1), *scrtch(2*nk+1),scrtch(3*nk+1),scrtch(4*nk+1), scrtch(5*nk+1),scrtc *h(6*nk+1),scrtch(7*nk+1),scrtch(8*nk+1), scrtch(9*nk+1),scrtch(9*n *k+e5knafcg*nk+1),scrtch(9*nk+2*e5knafcg*nk+1), e5knafcg,wep0oibc,f *bd5yktj) return end subroutine hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz *2,coef,sz,ifys6woa, qcpiaj7f,icrit,i9mwnvqt,ispar, c5aesxku, mynl7 *uaq,zustx4fw,tol, gp0xjetb, xwy, zvau2lct,f6lsuzax,fvh2rwtc,dcfir2 *no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp8wa,plj0trq *x, e5knafcg,wep0oibc,fbd5yktj) implicit logical (a-z) integer kuzxj1lo,nk, icrit,ispar, gp0xjetb, e5knafcg,wep0oibc,fbd5 *yktj integer c5aesxku double precision penalt,pjb6wfoq,xs(kuzxj1lo),ys(kuzxj1lo),ws(kuzx *j1lo), ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(kuzxj1lo), q *cpiaj7f,i9mwnvqt,mynl7uaq,zustx4fw,tol, xwy(nk), zvau2lct(nk),f6ls *uzax(nk),fvh2rwtc(nk),dcfir2no(nk), xecbg0pf(nk),z4grbpiq(nk),d7gl *zhbj(nk),v2eydbxs(nk), buhyalv4(e5knafcg,nk),fulcp8wa(e5knafcg,nk) *,plj0trqx(wep0oibc,nk) double precision t1,t2,ratio, a,b,c,d,e,qaltf0nz,xm,p,q,r,tol1,tol *2,u,v,w, fu,fv,fw,fx,x, ax,bx integer ayfnwr1v, viter double precision yjpnro8d, hmayv1xt yjpnro8d = 8.0d88 hmayv1xt = 0.0d0 d = 0.5d0 u = 0.5d0 ratio = 0.5d0 ayfnwr1v = 1 23000 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23002 if(ws(ayfnwr1v).gt.0.0d0)then ws(ayfnwr1v) = dsqrt(ws(ayfnwr1v)) endif 23001 ayfnwr1v = ayfnwr1v+1 goto 23000 23002 continue if(gp0xjetb .eq. 0)then call zosq7hub(xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs,ankcghz2,nk) call gt9iulbf(xs,ys,ws,ankcghz2, kuzxj1lo,nk, xwy,zvau2lct,f6lsuza *x,fvh2rwtc,dcfir2no) t1 = 0.0d0 t2 = 0.0d0 do23007 ayfnwr1v = 3,nk-3 t1 = t1 + zvau2lct(ayfnwr1v) 23007 continue 23008 continue do23009 ayfnwr1v = 3,nk-3 t2 = t2 + xecbg0pf(ayfnwr1v) 23009 continue 23010 continue ratio = t1/t2 gp0xjetb = 1 endif if(ispar .eq. 1)then call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2 *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj) return endif ax = mynl7uaq bx = zustx4fw c = 0.381966011250105097d0 qaltf0nz = 2.0d-5 viter = 0 a = ax b = bx v = a + c*(b - a) w = v x = v e = 0.0d0 i9mwnvqt = ratio * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0)) call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2 *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj) fx = qcpiaj7f fv = fx fw = fx 23013 if(fbd5yktj .eq. 0)then viter = viter + 1 xm = 0.5d0*(a + b) tol1 = qaltf0nz*dabs(x) + tol/3.0d0 tol2 = 2.0d0*tol1 if((dabs(x - xm) .le. (tol2 - 0.5d0*(b - a))) .or. (viter .gt. c5a *esxku))then go to 90 endif if((dabs(e) .le. tol1) .or. (fx .ge. yjpnro8d) .or. (fv .ge. yjpnr *o8d) .or. (fw .ge. yjpnro8d))then go to 40 endif r = (x - w)*(fx - fv) q = (x - v)*(fx - fw) p = (x - v)*q - (x - w)*r q = 2.0d0 * (q - r) if(q .gt. 0.0d0)then p = -p endif q = dabs(q) r = e e = d if((dabs(p) .ge. dabs(0.5d0*q*r)) .or. (q .eq. 0.0d0))then go to 40 endif if((p .le. q*(a - x)) .or. (p .ge. q*(b - x)))then go to 40 endif d = p/q u = x + d if((u - a) .lt. tol2)then d = dsign(tol1, xm - x) endif if((b - u) .lt. tol2)then d = dsign(tol1, xm - x) endif go to 50 40 if(x .ge. xm)then e = a - x else e = b - x endif d = c*e 50 if(dabs(d) .ge. tol1)then u = x + d else u = x + dsign(tol1, d) endif i9mwnvqt = ratio * dexp((-2.0d0 + u*6.0) * dlog(16.0d0)) call wmhctl9x(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk,icrit, ankcghz *2,coef,sz,ifys6woa,qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax,fvh2 *rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,fulcp *8wa,plj0trqx,e5knafcg,wep0oibc,fbd5yktj) fu = qcpiaj7f if(fu .gt. yjpnro8d)then fu = 2.0d0 * yjpnro8d endif if(fu .le. fx)then if(u .ge. x)then a = x else b = x endif v = w fv = fw w = x fw = fx x = u fx = fu else if(u .lt. x)then a = u else b = u endif if((fu .le. fw) .or. (w .eq. x))then v = w fv = fw w = u fw = fu else if((fu .le. fv) .or. (v .eq. x) .or. (v .eq. w))then v = u fv = fu endif endif endif goto 23013 endif 23014 continue 90 hmayv1xt = 0.0d0 i9mwnvqt = ratio * dexp((-2.0d0 + x*6.0d0) * dlog(16.0d0)) qcpiaj7f = fx return return end subroutine zosq7hub(xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs,tb,nb) implicit logical (a-z) integer nb double precision xecbg0pf(nb),z4grbpiq(nb),d7glzhbj(nb),v2eydbxs(n *b),tb(nb+4) integer dqlr5bse,ilo,pqzfxw4i, three3, ifour4, nbp1 integer ayfnwr1v,iii,yq6lorbx integer i2svdbx3tk double precision g9fvdrbw(4,3),work(16),yw1(4),yw2(4), wpt double precision othird othird = 1.0d0 / 3.0d0 three3 = 3 ifour4 = 4 nbp1 = nb + 1 do23045 ayfnwr1v = 1,nb xecbg0pf(ayfnwr1v) = 0.0d0 z4grbpiq(ayfnwr1v) = 0.0d0 d7glzhbj(ayfnwr1v) = 0.0d0 v2eydbxs(ayfnwr1v) = 0.0d0 23045 continue 23046 continue ilo = 1 do23047 ayfnwr1v = 1,nb call vinterv(tb(1), nbp1 ,tb(ayfnwr1v),dqlr5bse,pqzfxw4i) call vbsplvd(tb,ifour4,tb(ayfnwr1v),dqlr5bse,work,g9fvdrbw,three3) do23049 iii = 1,4 yw1(iii) = g9fvdrbw(iii,3) 23049 continue 23050 continue call vbsplvd(tb,ifour4,tb(ayfnwr1v+1),dqlr5bse,work,g9fvdrbw,three *3) do23051 iii = 1,4 yw2(iii) = g9fvdrbw(iii,3) - yw1(iii) 23051 continue 23052 continue wpt = tb(ayfnwr1v+1) - tb(ayfnwr1v) if(dqlr5bse .ge. 4)then do23055 iii = 1,4 yq6lorbx = iii i2svdbx3tk = dqlr5bse-4+iii xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt * (yw1(iii)*yw1( *yq6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 * + yw2(iii)*yw2(yq6lorbx)*othird) yq6lorbx = iii+1 if(yq6lorbx .le. 4)then z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif yq6lorbx = iii+2 if(yq6lorbx .le. 4)then d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif yq6lorbx = iii+3 if(yq6lorbx .le. 4)then v2eydbxs(i2svdbx3tk) = v2eydbxs(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif 23055 continue 23056 continue else if(dqlr5bse .eq. 3)then do23065 iii = 1,3 yq6lorbx = iii i2svdbx3tk = dqlr5bse-3+iii xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) yq6lorbx = iii+1 if(yq6lorbx .le. 3)then z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif yq6lorbx = iii+2 if(yq6lorbx .le. 3)then d7glzhbj(i2svdbx3tk) = d7glzhbj(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif 23065 continue 23066 continue else if(dqlr5bse .eq. 2)then do23073 iii = 1,2 yq6lorbx = iii i2svdbx3tk = dqlr5bse-2+iii xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) yq6lorbx = iii+1 if(yq6lorbx .le. 2)then z4grbpiq(i2svdbx3tk) = z4grbpiq(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) endif 23073 continue 23074 continue else if(dqlr5bse .eq. 1)then do23079 iii = 1,1 yq6lorbx = iii i2svdbx3tk = dqlr5bse-1+iii xecbg0pf(i2svdbx3tk) = xecbg0pf(i2svdbx3tk) + wpt* (yw1(iii)*yw1(y *q6lorbx) + (yw2(iii)*yw1(yq6lorbx) + yw2(yq6lorbx)*yw1(iii))*0.50 *+ yw2(iii)*yw2(yq6lorbx)*othird) 23079 continue 23080 continue endif endif endif endif 23047 continue 23048 continue return end subroutine vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,wep0oi *bc,iflag) implicit logical (a-z) integer e5knafcg,nk,wep0oibc,iflag double precision buhyalv4(e5knafcg,nk), fulcp8wa(e5knafcg,nk), plj *0trqx(wep0oibc,nk) integer ayfnwr1v, yq6lorbx, gp1jxzuh double precision wjm3(3),wjm2(2),wjm1(1),c0,c1,c2,c3 double precision pcsuow9k, qdbgu6oi, upwkh5xz, rul5fnyd, ueydbrg6, * plce2srm, k3yvomnh, bfdjhu7l, ctfvwdu0 c1 = 0.0d0 c2 = 0.0d0 c3 = 0.0d0 wjm3(1) = 0.0d0 wjm3(2) = 0.0d0 wjm3(3) = 0.0d0 wjm2(1) = 0.0d0 wjm2(2) = 0.0d0 wjm1(1) = 0.0d0 do23081 ayfnwr1v = 1,nk yq6lorbx = nk-ayfnwr1v+1 c0 = 1.0d0 / buhyalv4(4,yq6lorbx) if(yq6lorbx .le. (nk-3))then c1 = buhyalv4(1,yq6lorbx+3)*c0 c2 = buhyalv4(2,yq6lorbx+2)*c0 c3 = buhyalv4(3,yq6lorbx+1)*c0 else if(yq6lorbx .eq. (nk-2))then c1 = 0.0d0 c2 = buhyalv4(2,yq6lorbx+2)*c0 c3 = buhyalv4(3,yq6lorbx+1)*c0 else if(yq6lorbx .eq. (nk-1))then c1 = 0.0d0 c2 = 0.0d0 c3 = buhyalv4(3,yq6lorbx+1)*c0 else if(yq6lorbx .eq. nk)then c1 = 0.0d0 c2 = 0.0d0 c3 = 0.0d0 endif endif endif endif pcsuow9k = c1*wjm3(1) qdbgu6oi = c2*wjm3(2) upwkh5xz = c3*wjm3(3) rul5fnyd = c1*wjm3(2) ueydbrg6 = c2*wjm2(1) plce2srm = c3*wjm2(2) k3yvomnh = c1*wjm3(3) bfdjhu7l = c2*wjm2(2) ctfvwdu0 = c3*wjm1(1) fulcp8wa(1,yq6lorbx) = 0.0d0 - (pcsuow9k+qdbgu6oi+upwkh5xz) fulcp8wa(2,yq6lorbx) = 0.0d0 - (rul5fnyd+ueydbrg6+plce2srm) fulcp8wa(3,yq6lorbx) = 0.0d0 - (k3yvomnh+bfdjhu7l+ctfvwdu0) fulcp8wa(4,yq6lorbx) = c0**2 + c1*(pcsuow9k + 2.0d0*(qdbgu6oi + up *wkh5xz)) + c2*(ueydbrg6 + 2.0d0* plce2srm) + c3*ctfvwdu0 wjm3(1) = wjm2(1) wjm3(2) = wjm2(2) wjm3(3) = fulcp8wa(2,yq6lorbx) wjm2(1) = wjm1(1) wjm2(2) = fulcp8wa(3,yq6lorbx) wjm1(1) = fulcp8wa(4,yq6lorbx) 23081 continue 23082 continue if(iflag .eq. 0)then return endif do23093 ayfnwr1v = 1,nk yq6lorbx = nk-ayfnwr1v+1 gp1jxzuh = 1 23095 if(.not.(gp1jxzuh .le. 4 .and. yq6lorbx+gp1jxzuh-1 .le. nk))goto 2 *3097 plj0trqx(yq6lorbx,yq6lorbx+gp1jxzuh-1) = fulcp8wa(5-gp1jxzuh,yq6lo *rbx) 23096 gp1jxzuh = gp1jxzuh+1 goto 23095 23097 continue 23093 continue 23094 continue do23098 ayfnwr1v = 1,nk yq6lorbx = nk-ayfnwr1v+1 gp1jxzuh = yq6lorbx-4 23100 if(.not.(gp1jxzuh .ge. 1))goto 23102 c0 = 1.0 / buhyalv4(4,gp1jxzuh) c1 = buhyalv4(1,gp1jxzuh+3)*c0 c2 = buhyalv4(2,gp1jxzuh+2)*c0 c3 = buhyalv4(3,gp1jxzuh+1)*c0 plj0trqx(gp1jxzuh,yq6lorbx) = 0.0d0- ( c1*plj0trqx(gp1jxzuh+3,yq6l *orbx) + c2*plj0trqx(gp1jxzuh+2,yq6lorbx) + c3*plj0trqx(gp1jxzuh+1, *yq6lorbx) ) 23101 gp1jxzuh = gp1jxzuh-1 goto 23100 23102 continue 23098 continue 23099 continue return end subroutine wmhctl9x(penalt,pjb6wfoq,x,y,w, kuzxj1lo,nk,icrit, ankc *ghz2,coef,sz,ifys6woa, qcpiaj7f, i9mwnvqt, xwy, zvau2lct,f6lsuzax, *fvh2rwtc,dcfir2no, xecbg0pf,z4grbpiq,d7glzhbj,v2eydbxs, buhyalv4,f *ulcp8wa,plj0trqx, e5knafcg,wep0oibc,info) implicit logical (a-z) integer kuzxj1lo,nk,icrit, e5knafcg,wep0oibc,info double precision penalt,pjb6wfoq,x(kuzxj1lo),y(kuzxj1lo),w(kuzxj1l *o) double precision ankcghz2(nk+4), coef(nk),sz(kuzxj1lo),ifys6woa(ku *zxj1lo), qcpiaj7f, i9mwnvqt, xwy(nk) double precision zvau2lct(nk),f6lsuzax(nk),fvh2rwtc(nk),dcfir2no(n *k) double precision xecbg0pf(nk),z4grbpiq(nk),d7glzhbj(nk),v2eydbxs(n *k), buhyalv4(e5knafcg,nk),fulcp8wa(e5knafcg,nk),plj0trqx(wep0oibc, *nk) double precision resss, work(16), b0,b1,b2,b3,qaltf0nz, g9fvdrbw(4 *,1), xv,eqdf double precision qtce8hzo double precision rxeqjn0y integer izero0, three3, ilo, pqzfxw4i, yq6lorbx, ayfnwr1v integer icoef, dqlr5bse, ifour4, hbsl0gto, nkp1 ilo = 1 qaltf0nz = 0.1d-10 izero0 = 0 three3 = 3 ifour4 = 4 hbsl0gto = 1 nkp1 = nk + 1 do23103 ayfnwr1v = 1,nk coef(ayfnwr1v) = xwy(ayfnwr1v) 23103 continue 23104 continue do23105 ayfnwr1v = 1,nk buhyalv4(4,ayfnwr1v) = zvau2lct(ayfnwr1v)+i9mwnvqt*xecbg0pf(ayfnwr *1v) 23105 continue 23106 continue do23107 ayfnwr1v = 1,(nk-1) buhyalv4(3,ayfnwr1v+1) = f6lsuzax(ayfnwr1v)+i9mwnvqt*z4grbpiq(ayfn *wr1v) 23107 continue 23108 continue do23109 ayfnwr1v = 1,(nk-2) buhyalv4(2,ayfnwr1v+2) = fvh2rwtc(ayfnwr1v)+i9mwnvqt*d7glzhbj(ayfn *wr1v) 23109 continue 23110 continue do23111 ayfnwr1v = 1,(nk-3) buhyalv4(1,ayfnwr1v+3) = dcfir2no(ayfnwr1v)+i9mwnvqt*v2eydbxs(ayfn *wr1v) 23111 continue 23112 continue call dpbfa8(buhyalv4,e5knafcg,nk,three3,info) if(info .ne. 0)then return endif call dpbsl8(buhyalv4,e5knafcg,nk,three3,coef) icoef = 1 do23115 ayfnwr1v = 1,kuzxj1lo xv = x(ayfnwr1v) call wbvalue(ankcghz2,coef, nk,ifour4,xv,izero0, sz(ayfnwr1v)) 23115 continue 23116 continue if(icrit .eq. 0)then return endif call vmnweiy2(buhyalv4,fulcp8wa,plj0trqx, e5knafcg,nk,wep0oibc,ize *ro0) do23119 ayfnwr1v = 1,kuzxj1lo xv = x(ayfnwr1v) call vinterv(ankcghz2(1), nkp1 ,xv,dqlr5bse,pqzfxw4i) if(pqzfxw4i .eq. -1)then dqlr5bse = 4 xv = ankcghz2(4) + qaltf0nz endif if(pqzfxw4i .eq. 1)then dqlr5bse = nk xv = ankcghz2(nk+1) - qaltf0nz endif yq6lorbx = dqlr5bse-3 call vbsplvd(ankcghz2,ifour4,xv,dqlr5bse,work,g9fvdrbw,hbsl0gto) b0 = g9fvdrbw(1,1) b1 = g9fvdrbw(2,1) b2 = g9fvdrbw(3,1) b3 = g9fvdrbw(4,1) qtce8hzo = (b0 *(fulcp8wa(4,yq6lorbx)*b0 + 2.0d0*(fulcp8wa(3,yq6lo *rbx)*b1 + fulcp8wa(2,yq6lorbx)*b2 + fulcp8wa(1,yq6lorbx)*b3)) + b1 * *(fulcp8wa(4,yq6lorbx+1)*b1 + 2.0d0*(fulcp8wa(3,yq6lorbx+1)*b2 + *fulcp8wa(2,yq6lorbx+1)*b3)) + b2 *(fulcp8wa(4,yq6lorbx+2)*b2 + 2.0 *d0* fulcp8wa(3,yq6lorbx+2)*b3 )+ b3**2* fulcp8wa(4,yq6lorbx+3)) * *w(ayfnwr1v)**2 ifys6woa(ayfnwr1v) = qtce8hzo 23119 continue 23120 continue if(icrit .eq. 1)then resss = 0.0d0 eqdf = 0.0d0 rxeqjn0y = 0.0d0 do23127 ayfnwr1v = 1,kuzxj1lo resss = resss + ((y(ayfnwr1v)-sz(ayfnwr1v))*w(ayfnwr1v))**2 eqdf = eqdf + ifys6woa(ayfnwr1v) rxeqjn0y = rxeqjn0y + w(ayfnwr1v)*w(ayfnwr1v) 23127 continue 23128 continue qcpiaj7f = (resss/rxeqjn0y)/((1.0d0-(pjb6wfoq+penalt*eqdf)/rxeqjn0 *y)**2) else if(icrit .eq. 2)then qcpiaj7f = 0.0d0 rxeqjn0y = 0.0d0 do23131 ayfnwr1v = 1,kuzxj1lo qcpiaj7f = qcpiaj7f + (((y(ayfnwr1v)-sz(ayfnwr1v))*w(ayfnwr1v))/(1 *.0d0-ifys6woa(ayfnwr1v)))**2 rxeqjn0y = rxeqjn0y + w(ayfnwr1v)*w(ayfnwr1v) 23131 continue 23132 continue qcpiaj7f = qcpiaj7f / rxeqjn0y else qcpiaj7f = 0.0d0 do23133 ayfnwr1v = 1,kuzxj1lo qcpiaj7f = qcpiaj7f+ifys6woa(ayfnwr1v) 23133 continue 23134 continue qcpiaj7f = 3.0d0 + (pjb6wfoq-qcpiaj7f)**2 endif endif return end subroutine gt9iulbf(he7mqnvy,ghz9vuba,w,gkdx5jal, rvy1fpli,kuzxj1l *o, bhcji9glto,zvau2lct,f6lsuzax,fvh2rwtc,dcfir2no) implicit logical (a-z) integer rvy1fpli,kuzxj1lo double precision he7mqnvy(rvy1fpli),ghz9vuba(rvy1fpli),w(rvy1fpli) *,gkdx5jal(kuzxj1lo+4), bhcji9glto(kuzxj1lo), zvau2lct(kuzxj1lo),f6 *lsuzax(kuzxj1lo),fvh2rwtc(kuzxj1lo),dcfir2no(kuzxj1lo) double precision qaltf0nz,g9fvdrbw(4,1),work(16) double precision w2svdbx3tk, wv2svdbx3tk integer yq6lorbx,ayfnwr1v,ilo,dqlr5bse,pqzfxw4i, nhnpt1zym1 integer ifour4, hbsl0gto hbsl0gto = 1 ifour4 = 4 nhnpt1zym1 = kuzxj1lo + 1 do23135 ayfnwr1v = 1,kuzxj1lo bhcji9glto(ayfnwr1v) = 0.0d0 zvau2lct(ayfnwr1v) = 0.0d0 f6lsuzax(ayfnwr1v) = 0.0d0 fvh2rwtc(ayfnwr1v) = 0.0d0 dcfir2no(ayfnwr1v) = 0.0d0 23135 continue 23136 continue ilo = 1 qaltf0nz = 0.1d-9 do23137 ayfnwr1v = 1,rvy1fpli call vinterv(gkdx5jal(1), nhnpt1zym1 ,he7mqnvy(ayfnwr1v),dqlr5bse, *pqzfxw4i) if(pqzfxw4i .eq. 1)then if(he7mqnvy(ayfnwr1v) .le. (gkdx5jal(dqlr5bse)+qaltf0nz))then dqlr5bse = dqlr5bse-1 else return endif endif call vbsplvd(gkdx5jal,ifour4,he7mqnvy(ayfnwr1v),dqlr5bse,work,g9fv *drbw,hbsl0gto) yq6lorbx = dqlr5bse-4+1 w2svdbx3tk = w(ayfnwr1v)**2 wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(1,1) bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba *(ayfnwr1v) zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(1,1 *) f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,1 *) fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1 *) dcfir2no(yq6lorbx) = dcfir2no(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1 *) yq6lorbx = dqlr5bse-4+2 wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(2,1) bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba *(ayfnwr1v) zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(2,1 *) f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1 *) fvh2rwtc(yq6lorbx) = fvh2rwtc(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1 *) yq6lorbx = dqlr5bse-4+3 wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(3,1) bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba *(ayfnwr1v) zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(3,1 *) f6lsuzax(yq6lorbx) = f6lsuzax(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1 *) yq6lorbx = dqlr5bse wv2svdbx3tk = w2svdbx3tk * g9fvdrbw(4,1) bhcji9glto(yq6lorbx) = bhcji9glto(yq6lorbx) + wv2svdbx3tk*ghz9vuba *(ayfnwr1v) zvau2lct(yq6lorbx) = zvau2lct(yq6lorbx) + wv2svdbx3tk*g9fvdrbw(4,1 *) 23137 continue 23138 continue return end VGAM/src/veigen.f0000644000176200001440000005255413565414527013246 0ustar liggesusers subroutine veigenf(M, n, x, vals, ov, vec, junk1, junk2, * wk, rowi, coli, dimmv, ec) implicit logical (a-z) integer M, n, ov, ec, i, k, dimmv, MM2, * rowi(M*(M+1)/2), coli(M*(M+1)/2), full double precision x(dimmv, n), vals(M, n), vec(M,M,n), junk1(M), * junk2(M), wk(M,M) MM2 = M*(M+1)/2 if(dimmv.eq.MM2) then full = 1 else full = 0 end if do 300 i=1,n do 600 k=1,dimmv wk(rowi(k), coli(k)) = x(k,i) wk(coli(k), rowi(k)) = wk(rowi(k), coli(k)) 600 continue if(full.eq.0) then do 500 k=dimmv+1,MM2 wk(rowi(k), coli(k)) = 0.0d0 wk(coli(k), rowi(k)) = 0.0d0 500 continue end if c call vrs818(M, M, wk, vals(1,i), ov, vec(1,1,i), junk1, * junk2, ec) if(ec.ne.0) goto 200 300 continue c 200 return end SUBROUTINE VRS818(NM,N,A,W,MATZ,Z,FV1,FV2,IERR) C INTEGER N,NM,IERR,MATZ DOUBLE PRECISION A(NM,N),W(N),Z(NM,N),FV1(N),FV2(N) C C THIS SUBROUTINE CALLS THE RECOMMENDED SEQUENCE OF C SUBROUTINES FROM THE EIGENSYSTEM SUBROUTINE PACKAGE (EISPACK) C TO FIND THE EIGENVALUES AND EIGENVECTORS (IF DESIRED) C OF A REAL SYMMETRIC MATRIX. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF THE TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX A. C C A CONTAINS THE REAL SYMMETRIC MATRIX. C C MATZ IS AN INTEGER VARIABLE SET EQUAL TO ZERO IF C ONLY EIGENVALUES ARE DESIRED. OTHERWISE IT IS SET TO C ANY NON-ZERO INTEGER FOR BOTH EIGENVALUES AND EIGENVECTORS. C C ON OUTPUT C C W CONTAINS THE EIGENVALUES IN ASCENDING ORDER. C C Z CONTAINS THE EIGENVECTORS IF MATZ IS NOT ZERO. C C IERR IS AN INTEGER OUTPUT VARIABLE SET EQUAL TO AN ERROR C COMPLETION CODE DESCRIBED IN THE DOCUMENTATION FOR TQLRAT C AND TQL2. THE NORMAL COMPLETION CODE IS ZERO. C C FV1 AND FV2 ARE TEMPORARY STORAGE ARRAYS. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C IF (N .LE. NM) GO TO 10 IERR = 10 * N GO TO 50 C 10 IF (MATZ .NE. 0) GO TO 20 C .......... FIND EIGENVALUES ONLY .......... CALL VTRED1(NM,N,A,W,FV1,FV2) CALL TQLRA9(N,W,FV2,IERR) GO TO 50 C .......... FIND BOTH EIGENVALUES AND EIGENVECTORS .......... 20 CALL VTRED2(NM,N,A,W,FV1,Z) CALL VTQL21(NM,N,W,FV1,Z,IERR) 50 RETURN END SUBROUTINE VTQL21(NM,N,D,E,Z,IERR) C INTEGER I,J,K,L,M,N,II,L1,L2,NM,MML,IERR DOUBLE PRECISION D(N),E(N),Z(NM,N) DOUBLE PRECISION C,C2,C3,DL1,EL1,F,G,H,P,R,S,S2,TST1,TST2,PYTHA9 C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL2, C NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND C WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). C C THIS SUBROUTINE FINDS THE EIGENVALUES AND EIGENVECTORS C OF A SYMMETRIC TRIDIAGONAL MATRIX BY THE QL METHOD. C THE EIGENVECTORS OF A FULL SYMMETRIC MATRIX CAN ALSO C BE FOUND IF TRED2 HAS BEEN USED TO REDUCE THIS C FULL MATRIX TO TRIDIAGONAL FORM. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX C IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. C C Z CONTAINS THE TRANSFORMATION MATRIX PRODUCED IN THE C REDUCTION BY TRED2, IF PERFORMED. IF THE EIGENVECTORS C OF THE TRIDIAGONAL MATRIX ARE DESIRED, Z MUST CONTAIN C THE IDENTITY MATRIX. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT BUT C UNORDERED FOR INDICES 1,2,...,IERR-1. C C E HAS BEEN DESTROYED. C C Z CONTAINS ORTHONORMAL EIGENVECTORS OF THE SYMMETRIC C TRIDIAGONAL (OR FULL) MATRIX. IF AN ERROR EXIT IS MADE, C Z CONTAINS THE EIGENVECTORS ASSOCIATED WITH THE STORED C EIGENVALUES. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHA9 FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of C3 and S2 to keep g77 -Wall happy c C3 = 0.0D0 S2 = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C DO 100 I = 2, N E(I-1) = E(I) 100 CONTINUE C F = 0.0D0 TST1 = 0.0D0 E(N) = 0.0D0 C DO 240 L = 1, N J = 0 H = DABS(D(L)) + DABS(E(L)) IF (TST1 .LT. H) TST1 = H C .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... DO 110 M = L, N TST2 = TST1 + DABS(E(M)) IF (TST2 .EQ. TST1) GO TO 120 C .......... E(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 220 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 L2 = L1 + 1 G = D(L) P = (D(L1) - G) / (2.0D0 * E(L)) R = PYTHA9(P,1.0D0) D(L) = E(L) / (P + DSIGN(R,P)) D(L1) = E(L) * (P + DSIGN(R,P)) DL1 = D(L1) H = G - D(L) IF (L2 .GT. N) GO TO 145 C DO 140 I = L2, N D(I) = D(I) - H 140 CONTINUE C 145 F = F + H C .......... QL TRANSFORMATION .......... P = D(M) C = 1.0D0 C2 = C EL1 = E(L1) S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML C3 = C2 C2 = C S2 = S I = M - II G = C * E(I) H = C * P R = PYTHA9(P,E(I)) E(I+1) = S * R S = E(I) / R C = P / R P = C * D(I) - S * G D(I+1) = H + S * (C * G + S * D(I)) C .......... FORM VECTOR .......... DO 180 K = 1, N H = Z(K,I+1) Z(K,I+1) = S * Z(K,I) + C * H Z(K,I) = C * Z(K,I) - S * H 180 CONTINUE C 200 CONTINUE C P = -S * S2 * C3 * EL1 * E(L) / DL1 E(L) = S * P D(L) = C * P TST2 = TST1 + DABS(E(L)) IF (TST2 .GT. TST1) GO TO 130 220 D(L) = D(L) + F 240 CONTINUE C .......... ORDER EIGENVALUES AND EIGENVECTORS .......... DO 300 II = 2, N I = II - 1 K = I P = D(I) C DO 260 J = II, N IF (D(J) .GE. P) GO TO 260 K = J P = D(J) 260 CONTINUE C IF (K .EQ. I) GO TO 300 D(K) = D(I) D(I) = P C DO 280 J = 1, N P = Z(J,I) Z(J,I) = Z(J,K) Z(J,K) = P 280 CONTINUE C 300 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE TQLRA9(N,D,E2,IERR) C INTEGER I,J,L,M,N,II,L1,MML,IERR DOUBLE PRECISION D(N),E2(N) DOUBLE PRECISION B,C,F,G,H,P,R,S,T,EPSLO9,PYTHA9 C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQLRAT, C ALGORITHM 464, COMM. ACM 16, 689(1973) BY REINSCH. C C THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC C TRIDIAGONAL MATRIX BY THE RATIONAL QL METHOD. C C ON INPUT C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E2 CONTAINS THE SQUARES OF THE SUBDIAGONAL ELEMENTS OF THE C INPUT MATRIX IN ITS LAST N-1 POSITIONS. E2(1) IS ARBITRARY. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND C ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE C THE SMALLEST EIGENVALUES. C C E2 HAS BEEN DESTROYED. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHA9 FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of B and C to keep g77 -Wall happy c B = 0.0D0 C = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C DO 100 I = 2, N E2(I-1) = E2(I) 100 CONTINUE C F = 0.0D0 T = 0.0D0 E2(N) = 0.0D0 C DO 290 L = 1, N J = 0 H = DABS(D(L)) + DSQRT(E2(L)) IF (T .GT. H) GO TO 105 T = H B = EPSLO9(T) C = B * B C .......... LOOK FOR SMALL SQUARED SUB-DIAGONAL ELEMENT .......... 105 DO 110 M = L, N IF (E2(M) .LE. C) GO TO 120 C .......... E2(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 210 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 S = DSQRT(E2(L)) G = D(L) P = (D(L1) - G) / (2.0D0 * S) R = PYTHA9(P,1.0D0) D(L) = S / (P + DSIGN(R,P)) H = G - D(L) C DO 140 I = L1, N D(I) = D(I) - H 140 CONTINUE C F = F + H C .......... RATIONAL QL TRANSFORMATION .......... G = D(M) IF (G .EQ. 0.0D0) G = B H = G S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML I = M - II P = G * H R = P + E2(I) E2(I+1) = S * R S = E2(I) / R D(I+1) = H + S * (H + D(I)) G = D(I) - E2(I) / G IF (G .EQ. 0.0D0) G = B H = G * P / R 200 CONTINUE C E2(L) = S * G D(L) = H C .......... GUARD AGAINST UNDERFLOW IN CONVERGENCE TEST .......... IF (H .EQ. 0.0D0) GO TO 210 IF (DABS(E2(L)) .LE. DABS(C/H)) GO TO 210 E2(L) = H * E2(L) IF (E2(L) .NE. 0.0D0) GO TO 130 210 P = D(L) + F C .......... ORDER EIGENVALUES .......... IF (L .EQ. 1) GO TO 250 C .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... DO 230 II = 2, L I = L + 2 - II IF (P .GE. D(I-1)) GO TO 270 D(I) = D(I-1) 230 CONTINUE C 250 I = 1 270 D(I) = P 290 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE VTRED1(NM,N,A,D,E,E2) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),E2(N) DOUBLE PRECISION F,G,H,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED1, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX C TO A SYMMETRIC TRIDIAGONAL MATRIX USING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C A CONTAINS INFORMATION ABOUT THE ORTHOGONAL TRANS- C FORMATIONS USED IN THE REDUCTION IN ITS STRICT LOWER C TRIANGLE. THE FULL UPPER TRIANGLE OF A IS UNALTERED. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C E2 CONTAINS THE SQUARES OF THE CORRESPONDING ELEMENTS OF E. C E2 MAY COINCIDE WITH E IF THE SQUARES ARE NOT NEEDED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO 100 I = 1, N D(I) = A(N,I) A(N,I) = A(I,I) 100 CONTINUE C .......... FOR I=N STEP -1 UNTIL 1 DO -- .......... DO 300 II = 1, N I = N + 1 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 1) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO 120 K = 1, L SCALE = SCALE + DABS(D(K)) 120 CONTINUE C IF (SCALE .NE. 0.0D0) GO TO 140 C DO 125 J = 1, L D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = 0.0D0 125 CONTINUE C 130 E(I) = 0.0D0 E2(I) = 0.0D0 GO TO 300 C 140 DO 150 K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) 150 CONTINUE C E2(I) = SCALE * SCALE * H F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G IF (L .EQ. 1) GO TO 285 C .......... FORM A*U .......... DO 170 J = 1, L E(J) = 0.0D0 170 CONTINUE C DO 240 J = 1, L F = D(J) G = E(J) + A(J,J) * F JP1 = J + 1 IF (L .LT. JP1) GO TO 220 C DO 200 K = JP1, L G = G + A(K,J) * D(K) E(K) = E(K) + A(K,J) * F 200 CONTINUE C 220 E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C DO 245 J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) 245 CONTINUE C H = F / (H + H) C .......... FORM Q .......... DO 250 J = 1, L E(J) = E(J) - H * D(J) 250 CONTINUE C .......... FORM REDUCED A .......... DO 280 J = 1, L F = D(J) G = E(J) C DO 260 K = J, L A(K,J) = A(K,J) - F * E(K) - G * D(K) 260 CONTINUE C 280 CONTINUE C 285 DO 290 J = 1, L F = D(J) D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = F * SCALE 290 CONTINUE C 300 CONTINUE C RETURN END SUBROUTINE VTRED2(NM,N,A,D,E,Z) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),Z(NM,N) DOUBLE PRECISION F,G,H,HH,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED2, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX TO A C SYMMETRIC TRIDIAGONAL MATRIX USING AND ACCUMULATING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C Z CONTAINS THE ORTHOGONAL TRANSFORMATION MATRIX C PRODUCED IN THE REDUCTION. C C A AND Z MAY COINCIDE. IF DISTINCT, A IS UNALTERED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO 100 I = 1, N C DO 80 J = I, N Z(J,I) = A(J,I) 80 CONTINUE C D(I) = A(N,I) 100 CONTINUE C IF (N .EQ. 1) GO TO 510 C .......... FOR I=N STEP -1 UNTIL 2 DO -- .......... DO 300 II = 2, N I = N + 2 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 2) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO 120 K = 1, L SCALE = SCALE + DABS(D(K)) 120 CONTINUE C IF (SCALE .NE. 0.0D0) GO TO 140 130 E(I) = D(L) C DO 135 J = 1, L D(J) = Z(L,J) Z(I,J) = 0.0D0 Z(J,I) = 0.0D0 135 CONTINUE C GO TO 290 C 140 DO 150 K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) 150 CONTINUE C F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G C .......... FORM A*U .......... DO 170 J = 1, L E(J) = 0.0D0 170 CONTINUE C DO 240 J = 1, L F = D(J) Z(J,I) = F G = E(J) + Z(J,J) * F JP1 = J + 1 IF (L .LT. JP1) GO TO 220 C DO 200 K = JP1, L G = G + Z(K,J) * D(K) E(K) = E(K) + Z(K,J) * F 200 CONTINUE C 220 E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C DO 245 J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) 245 CONTINUE C HH = F / (H + H) C .......... FORM Q .......... DO 250 J = 1, L E(J) = E(J) - HH * D(J) 250 CONTINUE C .......... FORM REDUCED A .......... DO 280 J = 1, L F = D(J) G = E(J) C DO 260 K = J, L Z(K,J) = Z(K,J) - F * E(K) - G * D(K) 260 CONTINUE C D(J) = Z(L,J) Z(I,J) = 0.0D0 280 CONTINUE C 290 D(I) = H 300 CONTINUE C .......... ACCUMULATION OF TRANSFORMATION MATRICES .......... DO 500 I = 2, N L = I - 1 Z(N,L) = Z(L,L) Z(L,L) = 1.0D0 H = D(I) IF (H .EQ. 0.0D0) GO TO 380 C DO 330 K = 1, L D(K) = Z(K,I) / H 330 CONTINUE C DO 3600 J = 1, L c 20161111; originally was: c DO 360 J = 1, L G = 0.0D0 C DO 340 K = 1, L G = G + Z(K,I) * Z(K,J) 340 CONTINUE C DO 360 K = 1, L Z(K,J) = Z(K,J) - G * D(K) 360 CONTINUE 3600 CONTINUE C 380 DO 400 K = 1, L Z(K,I) = 0.0D0 400 CONTINUE C 500 CONTINUE C 510 DO 520 I = 1, N D(I) = Z(N,I) Z(N,I) = 0.0D0 520 CONTINUE C Z(N,N) = 1.0D0 E(1) = 0.0D0 RETURN END DOUBLE PRECISION FUNCTION EPSLO9(X) DOUBLE PRECISION X C C ESTIMATE UNIT ROUNDOFF IN QUANTITIES OF SIZE X. C DOUBLE PRECISION A,B,C,EPS C C THIS PROGRAM SHOULD FUNCTION PROPERLY ON ALL SYSTEMS C SATISFYING THE FOLLOWING TWO ASSUMPTIONS, C 1. THE BASE USED IN REPRESENTING FLOATING POINT C NUMBERS IS NOT A POWER OF THREE. C 2. THE QUANTITY A IN STATEMENT 10 IS REPRESENTED TO C THE ACCURACY USED IN FLOATING POINT VARIABLES C THAT ARE STORED IN MEMORY. C THE STATEMENT NUMBER 10 AND THE GO TO 10 ARE INTENDED TO C FORCE OPTIMIZING COMPILERS TO GENERATE CODE SATISFYING C ASSUMPTION 2. C UNDER THESE ASSUMPTIONS, IT SHOULD BE TRUE THAT, C A IS NOT EXACTLY EQUAL TO FOUR-THIRDS, C B HAS A ZERO FOR ITS LAST BIT OR DIGIT, C C IS NOT EXACTLY EQUAL TO ONE, C EPS MEASURES THE SEPARATION OF 1.0 FROM C THE NEXT LARGER FLOATING POINT NUMBER. C THE DEVELOPERS OF EISPACK WOULD APPRECIATE BEING INFORMED C ABOUT ANY SYSTEMS WHERE THESE ASSUMPTIONS DO NOT HOLD. C C THIS VERSION DATED 4/6/83. C A = 4.0D0/3.0D0 10 B = A - 1.0D0 C = B + B + B EPS = DABS(C-1.0D0) IF (EPS .EQ. 0.0D0) GO TO 10 EPSLO9 = EPS*DABS(X) RETURN END DOUBLE PRECISION FUNCTION PYTHA9(A,B) DOUBLE PRECISION A,B C C FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW C DOUBLE PRECISION P,R,S,T,U P = DMAX1(DABS(A),DABS(B)) IF (P .EQ. 0.0D0) GO TO 20 R = (DMIN1(DABS(A),DABS(B))/P)**2 10 CONTINUE T = 4.0D0 + R IF (T .EQ. 4.0D0) GO TO 20 S = R/T U = 1.0D0 + 2.0D0*S P = U*P R = (S/U)**2 * R GO TO 10 20 PYTHA9 = P RETURN END VGAM/vignettes/0000755000176200001440000000000013565414646013032 5ustar liggesusersVGAM/vignettes/categoricalVGAM.Rnw0000644000176200001440000023377113565414545016465 0ustar liggesusers\documentclass[article,shortnames,nojss]{jss} \usepackage{thumbpdf} %% need no \usepackage{Sweave.sty} \SweaveOpts{engine=R,eps=FALSE} %\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis} %\VignetteDepends{VGAM} %\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package} %\VignettePackage{VGAM} %% new commands \newcommand{\sVLM}{\mbox{\scriptsize VLM}} \newcommand{\sformtwo}{\mbox{\scriptsize F2}} \newcommand{\pr}{\mbox{$P$}} \newcommand{\logit}{\mbox{\rm logit}} \newcommand{\bzero}{{\bf 0}} \newcommand{\bone}{{\bf 1}} \newcommand{\bid}{\mbox{\boldmath $d$}} \newcommand{\bie}{\mbox{\boldmath $e$}} \newcommand{\bif}{\mbox{\boldmath $f$}} \newcommand{\bix}{\mbox{\boldmath $x$}} \newcommand{\biy}{\mbox{\boldmath $y$}} \newcommand{\biz}{\mbox{\boldmath $z$}} \newcommand{\biY}{\mbox{\boldmath $Y$}} \newcommand{\bA}{\mbox{\rm \bf A}} \newcommand{\bB}{\mbox{\rm \bf B}} \newcommand{\bC}{\mbox{\rm \bf C}} \newcommand{\bH}{\mbox{\rm \bf H}} \newcommand{\bI}{\mbox{\rm \bf I}} \newcommand{\bX}{\mbox{\rm \bf X}} \newcommand{\bW}{\mbox{\rm \bf W}} \newcommand{\bY}{\mbox{\rm \bf Y}} \newcommand{\bbeta}{\mbox{\boldmath $\beta$}} \newcommand{\boldeta}{\mbox{\boldmath $\eta$}} \newcommand{\bmu}{\mbox{\boldmath $\mu$}} \newcommand{\bnu}{\mbox{\boldmath $\nu$}} \newcommand{\diag}{ \mbox{\rm diag} } \newcommand{\Var}{ \mbox{\rm Var} } \newcommand{\R}{{\textsf{R}}} \newcommand{\VGAM}{\pkg{VGAM}} \author{Thomas W. Yee\\University of Auckland} \Plainauthor{Thomas W. Yee} \title{The \pkg{VGAM} Package for Categorical Data Analysis} \Plaintitle{The VGAM Package for Categorical Data Analysis} \Abstract{ Classical categorical regression models such as the multinomial logit and proportional odds models are shown to be readily handled by the vector generalized linear and additive model (VGLM/VGAM) framework. Additionally, there are natural extensions, such as reduced-rank VGLMs for dimension reduction, and allowing covariates that have values specific to each linear/additive predictor, e.g., for consumer choice modeling. This article describes some of the framework behind the \pkg{VGAM} \R{} package, its usage and implementation details. } \Keywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, \VGAM{} \R{} package} \Plainkeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package} \Address{ Thomas W. Yee \\ Department of Statistics \\ University of Auckland, Private Bag 92019 \\ Auckland Mail Centre \\ Auckland 1142, New Zealand \\ E-mail: \email{t.yee@auckland.ac.nz}\\ URL: \url{http://www.stat.auckland.ac.nz/~yee/} } \begin{document} <>= library("VGAM") library("VGAMdata") ps.options(pointsize = 12) options(width = 72, digits = 4) options(SweaveHooks = list(fig = function() par(las = 1))) options(prompt = "R> ", continue = "+") @ % ---------------------------------------------------------------------- \section{Introduction} \label{sec:jsscat.intoduction} This is a \pkg{VGAM} vignette for categorical data analysis (CDA) based on \cite{Yee:2010}. Any subsequent features (especially non-backward compatible ones) will appear here. The subject of CDA is concerned with analyses where the response is categorical regardless of whether the explanatory variables are continuous or categorical. It is a very frequent form of data. Over the years several CDA regression models for polytomous responses have become popular, e.g., those in Table \ref{tab:cat.quantities}. Not surprisingly, the models are interrelated: their foundation is the multinomial distribution and consequently they share similar and overlapping properties which modellers should know and exploit. Unfortunately, software has been slow to reflect their commonality and this makes analyses unnecessarily difficult for the practitioner on several fronts, e.g., using different functions/procedures to fit different models which does not aid the understanding of their connections. This historical misfortune can be seen by considering \R{} functions for CDA. From the Comprehensive \proglang{R} Archive Network (CRAN, \url{http://CRAN.R-project.org/}) there is \texttt{polr()} \citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds model and \texttt{multinom()} \citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial logit model. However, both of these can be considered `one-off' modeling functions rather than providing a unified offering for CDA. The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2016} has greater functionality: it can fit the proportional odds model (and the forward continuation ratio model upon preprocessing). Neither \texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional odds model. There are non-CRAN packages too, such as the modeling function \texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit the proportional odds, continuation ratio and adjacent categories models; however it calls \texttt{nlm()} and the user must supply starting values. In general these \R{} \citep{R} modeling functions are not modular and often require preprocessing and sometimes are not self-starting. The implementations can be perceived as a smattering and piecemeal in nature. Consequently if the practitioner wishes to fit the models of Table \ref{tab:cat.quantities} then there is a need to master several modeling functions from several packages each having different syntaxes etc. This is a hindrance to efficient CDA. \begin{table}[tt] \centering \begin{tabular}{|c|c|l|} \hline Quantity & Notation & %Range of $j$ & \VGAM{} family function \\ \hline % $\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ & %$1,\ldots,M$ & \texttt{acat()} \\ % $\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ & %$2,\ldots,M+1$ & \texttt{acat(reverse = TRUE)} \\ % $\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ & %$1,\ldots,M$ & \texttt{cratio()} \\ % $\pr(Y 0$, where we may take $\eta_1 = \xi$ and $\eta_2 = \log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter link function $g_{j}$ and parameter $\theta_{j}$. For example, the adjacent categories models in Table \ref{tab:cat.quantities} are ratios of two probabilities, therefore a log link of $\zeta_{j}^{R}$ or $\zeta_{j}$ is the default. In \VGAM{}, there are currently over a dozen links to choose from, of which any can be assigned to any parameter, ensuring maximum flexibility. Table \ref{tab:jsscat.links} lists some of them. \begin{table}[tt] \centering %\ ~~~ \par \begin{tabular}{|l|l|l|l|} \hline \qquad \qquad $\boldeta$ & Model & Modeling & Reference \\ & & function & \\ %------------------------------------------------------------- \hline \hline %------------------------------------------------------------- &&&\\[-1.1ex] $\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ & VGLM & \texttt{vglm()} & \cite{yee:hast:2003} \\[1.6ex] %Yee \& Hastie (2003) \\[1.6ex] %------------------------------------------------------------- \hline &&&\\[-1.1ex] $\bB_1^{\top} \bix_{1} + \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ & %\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ & VGAM & \texttt{vgam()} & \cite{yee:wild:1996} \\[2.2ex] %Yee \& Wild (1996) \\[2.2ex] %------------------------------------------------------------- \hline &&&\\[-1.1ex] $\bB_1^{\top} \bix_{1} + \bA \, \bnu$ & RR-VGLM & \texttt{rrvglm()} & \cite{yee:hast:2003} \\[1.8ex] %Yee \& Hastie (2003) \\[1.8ex] %------------------------------------------------------------- \hline &&&\\[-1.1ex] See \cite{yee:hast:2003} & Goodman's RC & \texttt{grc()} & %\cite{yee:hast:2003} \\[1.8ex] \cite{good:1981} \\[1.8ex] %------------------------------------------------------------- \hline \end{tabular} \caption{ Some of the package \VGAM{} and its framework. The vector of latent variables $\bnu = \bC^{\top} \bix_2$ where $\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$. \label{tab:rrvglam.jss.subset} } %\medskip \end{table} VGLMs are estimated using iteratively reweighted least squares (IRLS) which is particularly suitable for categorical models \citep{gree:1984}. All models in this article have a log-likelihood \begin{equation} \ell = \sum_{i=1}^n \, w_i \, \ell_i \label{eq:log-likelihood.VGAM} \end{equation} where the $w_i$ are known positive prior weights. Let $\bix_i$ denote the explanatory vector for the $i$th observation, for $i=1,\dots,n$. Then one can write \begin{eqnarray} \boldeta_i &=& \boldeta(\bix_i) = \left( \begin{array}{c} \eta_1(\bix_i) \\ \vdots \\ \eta_M(\bix_i) \end{array} \right) = \bB^{\top} \bix_i = \left( \begin{array}{c} \bbeta_1^{\top} \bix_i \\ \vdots \\ \bbeta_M^{\top} \bix_i \end{array} \right) \nonumber \\ &=& \left( \begin{array}{cccc} \beta_{(1)1} & \cdots & \beta_{(1)p} \\ \vdots \\ \beta_{(M)1} & \cdots & \beta_{(M)p} \\ \end{array} \right) \bix_i = \left( \bbeta_{(1)} \; \cdots \; \bbeta_{(p)} \right) \bix_i . \label{eq:lin.pred} \end{eqnarray} In IRLS, an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$ is regressed upon a large (VLM) model matrix, with $\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$. The working weights $\bW_i$ here are $w_i \Var(\partial \ell_i / \partial \boldeta_i)$ (which, under regularity conditions, is equal to $-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \, \partial \boldeta_i^{\top})]$), giving rise to the Fisher scoring algorithm. Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$ (LM) model matrix obtained from the \texttt{formula} argument of \texttt{vglm()}. Given $\biz_i$, $\bW_i$ and $\bX{}$ at the current IRLS iteration, a weighted multivariate regression is performed. To do this, a \textit{vector linear model} (VLM) model matrix $\bX_{\sVLM}$ is formed from $\bX{}$ and $\bH_k$ (see Section \ref{sec:wffc.appendixa.vgams}). This is has $nM$ rows, and if there are no constraints then $Mp$ columns. Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed upon $\bX_{\sVLM}$ with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$. This system of linear equations is converted to one large WLS fit by premultiplication of the output of a Cholesky decomposition of the $\bW_i$. Fisher scoring usually has good numerical stability because the $\bW_i$ are positive-definite over a larger region of parameter space than Newton-Raphson. For the categorical models in this article the expected information matrices are simpler than the observed information matrices, and are easily derived, therefore all the families in Table \ref{tab:cat.quantities} implement Fisher scoring. \subsection{VGAMs and constraint matrices} \label{sec:wffc.appendixa.vgams} VGAMs provide additive-model extensions to VGLMs, that is, (\ref{gammod2}) is generalized to \begin{equation} \eta_j(\bix) = \beta_{(j)1} + \sum_{k=2}^p \; f_{(j)k}(x_k), \qquad j = 1,\ldots, M, \label{addmod} \end{equation} a sum of smooth functions of the individual covariates, just as with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k = (f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness, and are estimated simultaneously using \textit{vector smoothers}. VGAMs are thus a visual data-driven method that is well suited to exploring data, and they retain the simplicity of interpretation that GAMs possess. An important concept, especially for CDA, is the idea of `constraints-on-the functions'. In practice we often wish to constrain the effect of a covariate to be the same for some of the $\eta_j$ and to have no effect for others. We shall see below that this constraints idea is important for several categorical models because of a popular parallelism assumption. As a specific example, for VGAMs we may wish to take \begin{eqnarray*} \eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\ \eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2), \end{eqnarray*} so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$. For VGAMs, we can represent these models using \begin{eqnarray} \boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k) \ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k) \label{eqn:constraints.VGAM} \end{eqnarray} where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank \textit{constraint matrices}, $\bif_k^*$ is a vector containing a possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a vector of unknown intercepts. With no constraints at all, $\bH_1 = \bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$. Like the $\bif_k$, the $\bif_k^*$ are centered for uniqueness. For VGLMs, the $\bif_k$ are linear so that \begin{eqnarray} {\bB}^{\top} &=& \left( \bH_1 \bbeta_{(1)}^* \; \Bigg| \; \bH_2 \bbeta_{(2)}^* \; \Bigg| \; \cdots \; \Bigg| \; \bH_p \bbeta_{(p)}^* \right) \label{eqn:lin.coefs4} \end{eqnarray} for some vectors $\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$. The $\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using Kronecker product operations. For example, with trivial constraints, $\bX_{\sVLM} = \bX \otimes \bI_M$. More generally, \begin{eqnarray} \bX_{\sVLM} &=& \left( \left( \bX \, \bie_{1} \right) \otimes \bH_1 \; \Bigg| \; \left( \bX \, \bie_{2} \right) \otimes \bH_2 \; \Bigg| \; \cdots \; \Bigg| \; \left( \bX \, \bie_{p} \right) \otimes \bH_p \right) \label{eqn:X_vlm_Hk} \end{eqnarray} ($\bie_{k}$ is a vector of zeros except for a one in the $k$th position) so that $\bX_{\sVLM}$ is $(nM) \times p^*$ where $p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number of columns of all the constraint matrices. Note that $\bX_{\sVLM}$ and \bX{} can be obtained by \texttt{model.matrix(vglmObject, type = "vlm")} and \texttt{model.matrix(vglmObject, type = "lm")} respectively. Equation \ref{eqn:lin.coefs4} focusses on the rows of \bB{} whereas \ref{eq:lin.pred} is on the columns. VGAMs are estimated by applying a modified vector backfitting algorithm \citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$. \subsection{Vector splines and penalized likelihood} \label{sec:ex.vspline} If (\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a natural extension of the cubic smoothing spline to vector responses) then it can be shown that the resulting solution maximizes a penalized likelihood; some details are sketched in \cite{yee:step:2007}. In fact, knot selection for vector spline follows the same idea as O-splines \citep[see][]{wand:orme:2008} in order to lower the computational cost. The usage of \texttt{vgam()} with smoothing is very similar to \texttt{gam()} \citep{gam:pack:2009}, e.g., to fit a nonparametric proportional odds model \citep[cf. p.179 of][]{mccu:neld:1989} to the pneumoconiosis data one could try <