rmutil/0000755000176200001440000000000014326404743011575 5ustar liggesusersrmutil/NAMESPACE0000755000176200001440000001257713437240363013031 0ustar liggesusersuseDynLib("rmutil",.registration=TRUE) importFrom("graphics", "lines", "par", "plot", "points") importFrom("stats", "as.formula", "deriv", "dgamma", "dnorm", "dweibull", "fitted", "model.frame", "model.matrix", "model.response", "nobs", "pgamma", "pnorm", "pweibull", "qgamma", "qnbinom", "qnorm", "qweibull", "residuals", "runif", "terms", "uniroot", "weights") importFrom("utils", "read.table") S3method(as.data.frame, repeated) S3method(as.data.frame, response) S3method(as.data.frame, tccov ) S3method(as.data.frame, tvcov ) S3method(as.matrix, repeated) S3method(as.matrix, response) S3method(as.matrix, tccov ) S3method(as.matrix, tvcov ) S3method(coef, gnlm ) S3method(covariates, formulafn) S3method(covariates, repeated) S3method(covariates, tccov) S3method(covariates, tvcov) S3method(covind, default) S3method(delta, repeated) S3method(delta, response) S3method(description, default) S3method(description, repeated) S3method(deviance, gnlm) S3method(df.residual, gnlm) S3method(finterp, data.frame) S3method(finterp, default) S3method(finterp, repeated) S3method(finterp, tccov) S3method(finterp, tvcov) S3method(fnenvir, data.frame) S3method(fnenvir, default) S3method(fnenvir, repeated) S3method(fnenvir, tccov) S3method(fnenvir, tvcov) S3method(formula, formulafn) S3method(formula, repeated) S3method(formula, tccov) S3method(iprofile, default) S3method(model, formulafn) S3method(mprofile, default) S3method(names, repeated) S3method(names, response) S3method(names, tccov) S3method(names, tvcov) S3method(nesting, repeated) S3method(nesting, response) S3method(nobs, data.frame) S3method(nobs, default) S3method(nobs, response) S3method(nobs, tvcov) S3method(parameters, formulafn) S3method(plot, iprofile) S3method(plot, mprofile) S3method(plot, repeated) S3method(plot, residuals) S3method(plot, response) S3method(print, fmobj) S3method(print, formulafn) S3method(print, gnlm) S3method(print, repeated) S3method(print, response) S3method(print, tccov) S3method(print, tvcov) S3method(response, repeated) S3method(response, response) S3method(resptype, repeated) S3method(resptype, response) S3method(times, default) S3method(times, response) S3method(transform, repeated) S3method(transform, response) S3method(transform, tccov) S3method(transform, tvcov) S3method(units, default) S3method(units, repeated) S3method(vcov, gnlm) S3method(weights, gnlm) S3method(weights, repeated) S3method(weights, response) export(as.data.frame, as.data.frame.response,as.data.frame.tccov,as.data.frame.tvcov,as.data.frame.repeated, as.matrix.response,as.matrix.tccov,as.matrix.tvcov,as.matrix.repeated, capply,contr.mean, covariates,covariates.formulafn,covariates.tccov,covariates.tvcov,covariates.repeated, covind,covind.default, dbetabinom,dboxcox,dburr,dconsul,ddoublebinom,ddoublepois,dgammacount,dgextval,dggamma,dginvgauss,dglogis,dgweibull,dhjorth,dinvgauss,dlaplace,dlevy,dmultbinom,dmultpois,dpareto,dpowexp,dpvfpois,dsimplex,dskewlaplace,dtwosidedpower, delta,delta.response,delta.repeated, description,description.default,description.repeated, dftorep, finterp,finterp.data.frame,finterp.default,finterp.repeated,finterp.tccov,finterp.tvcov, fmobj,print.fmobj,print.formulafn, fnenvir,fnenvir.default,fnenvir.data.frame,fnenvir.tccov,fnenvir.tvcov,fnenvir.repeated, formula.formulafn,formula.tccov,formula.repeated, gauss.hermite,gettvc, weights.gnlm,df.residual.gnlm,deviance.gnlm,coef.gnlm,vcov.gnlm,print.gnlm, int,int2, iprofile,iprofile.default,plot.iprofile, lin.diff.eqn,lvna,mexp, model, model.formulafn,covariates.formulafn, mprofile,mprofile.default,plot.mprofile, names.response,names.tccov,names.tvcov,names.repeated, nesting,nesting.response,nesting.repeated, nobs,nobs.default,nobs.response,nobs.tvcov,nobs.data.frame, parameters,parameters.formulafn, pbetabinom,pboxcox,pburr,pconsul,pdoublebinom,pdoublepois,pgammacount,pgextval,pggamma,pginvgauss,pglogis,pgweibull,phjorth,pinvgauss,plaplace,plevy,pmultbinom,pmultpois,ppareto,ppowexp,ppvfpois,psimplex,pskewlaplace,ptwosidedpower, plot.residuals,plot.response,plot.repeated, print.response,print.tccov,print.tvcov,print.repeated, qbetabinom,qboxcox,qburr,qconsul,qdoublebinom,qdoublepois,qgammacount,qgextval,qggamma,qginvgauss,qglogis,qgweibull,qhjorth,qinvgauss,qlaplace,qlevy,qmultbinom,qmultpois,qpareto,qpowexp,qpvfpois,qsimplex,qskewlaplace,qtwosidedpower, rbetabinom,rboxcox,rburr,rconsul,rdoublebinom,rdoublepois,rgammacount,rgextval,rggamma,rginvgauss,rglogis,rgweibull,rhjorth,rinvgauss,rlaplace,rlevy,rmultbinom,rmultpois,rpareto,rpowexp,rpvfpois,rsimplex,rskewlaplace,rtwosidedpower, read.list,read.rep,read.surv, response,response.response,response.repeated, resptype,resptype.response,resptype.repeated, restovec,rmna,runge.kutta,tcctomat, times,times.default,times.response, transform.response,transform.repeated,transform.tccov,transform.tvcov, tvctomat,wr, units,units.default,units.repeated, weights.response,weights.repeated) rmutil/man/0000755000176200001440000000000014175120022012333 5ustar liggesusersrmutil/man/restovec.Rd0000755000176200001440000003052613425057453014503 0ustar liggesusers\name{restovec} \title{Create a response Object} \alias{restovec} \usage{ restovec(response=NULL, times=NULL, nest=NULL, coordinates=NULL, censor=NULL, totals=NULL, weights=NULL, delta=NULL, type=NULL, names=NULL, units=NULL, oldresponse=NULL, description=NULL) } \description{ \code{restovec} can produce an object of class, \code{response}, from a vector of (1) independent univariate responses or (2) a single time series. It can produce such an object from repeated measurements in the form of (1) a list of vectors of event histories, (2) a list of two or more column matrices with times, response values, and and other information or (3) a matrix or dataframe of response values. The first two are for unbalanced data and the third for balanced data. Multivariate responses can be supplied as (1) a three-dimensional array of balanced repeated measurements, (2) lists of matrices for unbalanced repeated measurements, or (3) a matrix with either (a) several time series or (b) single observations per individual on several variables. In formula and functions, the key words, \code{times} can be used to refer to the response times from the data object as a covariate, \code{individuals} to the index for individuals as a factor covariate, and \code{nesting} the index for nesting as a factor covariate. The latter two only work for W&R notation. NAs can be detected with \code{\link[rmutil]{lvna}} or removed with \code{\link[rmutil]{rmna}} (where necessary, in coordination with the appropriate covariates) to create a \code{repeated} object. \code{response} objects can be printed and plotted. Methods are available for extracting the response, the numbers of observations per individual, the times, the weights, the units of measurement/Jacobian, and the nesting variable: \code{\link[rmutil]{response}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{weights}}, \code{\link[rmutil]{delta}}, and \code{\link[rmutil]{nesting}}. The response and or the times may be \link[rmutil]{transform}ed using \code{transform(z, newy=fcn1(y), times=fcn2(times))} where \code{fcn1} and \code{fcn2} are transformations and \code{y} is the name of a response variable. When the response is transformed, the Jacobian is automatically calculated. Note that, if the unit of precision/Jacobian (\code{\link[rmutil]{delta}}) is available in the \code{response} object, this is automatically included in the calculation of the likelihood function in all library model functions. } \arguments{ \item{response}{For (1) independent univariate responses with one observation per individual or (2) a single time series, one vector may be supplied (in the latter case, the times must be given even if equally spaced). Univariate repeated measurements responses can be given (1) if balanced, as a matrix or dataframe of response values with dimensions: number of individuals by number of responses/individual, (2) a list of vectors of event histories, or (3) a list of one or more column matrices, for each individual, with response values in the first column and times in the second (if there are no times, set \code{times} to FALSE), possibly followed by columns with nesting categories, binomial totals, censoring indicators, and/or units of measurement. Multivariate responses can be supplied as (1) a three-dimensional array of balanced repeated measurements with dimensions: number of individuals by number of responses/individual by number of variables, (2) a list of matrices for unbalanced repeated measurements each with dimensions: number of responses on that individual by number of variables, plus a column for times if available (otherwise set \code{times} to FALSE), or (3) a matrix with either (a) several time series, having dimensions: length of time series by by number of times series, or (b) single observations per individual on several variables with dimensions: number of individuals by number of variables. In all but case (1), \code{type} must be a character vector with length equal to the number of responses. In case (2), where applicable, \code{censor}, \code{totals}, and \code{delta} must be supplied as lists of matrices of the same size as for \code{response}, and \code{nest} and \code{weights} as lists of vectors of lengths equal to the number of observations on each individual.} \item{times}{When \code{response} is a matrix or multivariate array, these can be (1) a vector when the times are the same for all individuals, possibly unequally-spaced, or (2) a matrix with dimensions: number of individuals by number of responses/individual. Not necessary if times are equally spaced, except if a vector containing a single time series is supplied (if not given in this case, it takes the responses to be independent, not a time series). For clustered data with no time ordering, set to FALSE.} \item{nest}{This is the second level of nesting, with the individual being the first level. Values for an individual must be consecutive increasing integers with all responses in the same cluster grouped together. For example, with three clusters of four observations each, the code would be 1,1,1,1,2,2,2,2,3,3,3,3. When \code{response} is a matrix or multivariate array, this can be a vector of length equal to the number of responses/individual indicating which responses belong to which nesting category. If \code{response} is a multivariate list, this must also be a list. When \code{response} is a univariate list of unbalanced repeated measurements, the nesting indicator may instead be included in that list but must respect the same ordering as described above.} \item{coordinates}{When \code{response} is a vector, a two-column matrix giving the coordinates for spatial data.} \item{censor}{When \code{response} is a matrix, this can be (1) a vector of the same length as the number of individuals, containing a binary indicator, with a one indicating that the last time period in the series terminated with an event and zero that it was censored, or (2) a matrix of the same size as \code{response}. When \code{response} is a multivariate array, this can be (1) a matrix with dimensions: number of individuals by number of responses, or (2) an array of the same size as \code{response}. In the first case, for each column corresponding to a duration response, it should contain a binary indicator, with a one indicating that the last time period in the series terminated with an event and zero that it was censored, and NAs in columns not containing durations. In the second case, layers not corresponding to duration responses should contain NAs. If \code{response} is a multivariate list, this must also be a list. For event history data, even with no censoring, an appropriate vector of ones must be supplied. When \code{response} is a univariate list of unbalanced repeated measurements, the censoring indicator may instead be included in that list.} \item{totals}{If the response is a matrix of binomial counts, this can be (1) a corresponding vector (one total per individual) or (2) a matrix of totals. When \code{response} is a multivariate array, this can be (1) a matrix with dimensions: number of individuals by number of responses if all binomial responses for an individual have the same total, or (2) an array of the same size as \code{response}. In the first case, for each column corresponding to a binomial response, it should contain the corresponding totals, with NAs in columns not containing binomial. In the second case, layers not corresponding to binomial responses should contain NAs. If \code{response} is a multivariate list, this must also be a list. When \code{response} is a univariate list of unbalanced repeated measurements, the totals may instead be included in that list.} \item{weights}{A vector, matrix, array, or list of vectors of frequencies or weights, with one value per \code{response}. In other words, a multivariate response has only one corresponding weight value.} \item{delta}{For continuous measurements, the unit of precision (if not equal to unity) for each response: a scalar, vector, matrix, array, or list of the same dimensions as \code{response}. For example, if responses have two decimal places (\code{12.34}), \code{delta=0.01}. If the response has been transformed, this should be multiplied by the numerical values of the Jacobian. When the \code{transform} method is applied to the \code{response} object, this is automatically updated.} \item{type}{The type(s) of observations: nominal, ordinal, discrete, duration, continuous, or unknown. If not specified otherwise, those responses with \code{delta} and no \code{censor} are assumed to be \code{continuous}, those with \code{censor} indicator are assumed to be \code{duration}, those with \code{totals} are assumed to be \code{nominal}, and all others \code{unknown}.} \item{names}{Optional name(s) of the response variable(s).} \item{units}{Optional character vector giving units of measurement of response(s).} \item{oldresponse}{An existing \code{response} object to which the new data are to be added.} \item{description}{An optional named list of character vectors with names of some or all response variables containing their descriptions.} } \value{ Returns an object of class, \code{response}, containing a vector with the responses (\code{z$y}), a corresponding vector of times (\code{z$times}) if applicable, a vector giving the number of observations per individual (\code{z$nobs}, set to a scalar 1 if observations are independent), type (\code{z$delta}), and possibly binomial totals (\code{z$n}), nesting (clustering, \code{z$nest}), censoring (\code{z$censor}), weights (\code{z$wt}), unit of precision/Jacobian (\code{z$delta}), units of measurement (\code{z$units}), and description (\code{z$description}) information. } \seealso{ \code{\link[rmutil]{DataMethods}}, \code{\link[rmutil]{covind}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{description}}, \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{names}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{read.surv}}, \code{\link[rmutil]{response}}, \code{\link[rmutil]{resptype}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{transform}}, \code{\link[rmutil]{tvctomat}}, \code{\link[rmutil]{units}}, \code{\link[rmutil]{weights}} } \author{J.K. Lindsey} \examples{ # #continuous response y <- matrix(rnorm(20),ncol=5) # times assumed to be 1:5 restovec(y, units="m") #unequally-spaced times tt <- c(1,3,6,10,15) print(resp <- restovec(y, times=tt, units="m", description=list(y="Response measured in metres"))) response(resp) response(resp, nind=2:3) response(transform(resp, y=1/y)) transform(resp, y=1/y, units="1/m") units(resp) description(resp) times(resp) times(transform(resp, times=times-6)) nobs(resp) weights(resp) nesting(resp) # because individuals are the only nesting, this is the same as covind(resp) # # binomial response y <- matrix(rpois(20,5),ncol=5) # responses summarized as relative frequencies print(respb <- restovec(y, totals=y+matrix(rpois(20,5),ncol=5), times=tt)) response(respb) # # censored data y <- matrix(rweibull(20,2,5),ncol=5) print(respc <- restovec(y, censor=matrix(rbinom(20,1,0.9),ncol=5), times=tt)) # if there is no censoring, censor indicator is not printed response(respc) # nesting clustered within individuals nest <- c(1,1,2,2,2) print(respn <- restovec(y, censor=matrix(rbinom(20,1,0.9),ncol=5), times=tt,nest=nest)) response(respn) times(respn) nesting(respn) # # multivariate response restovec(y, censor=matrix(rbinom(20,1,0.9),ncol=5), units=c("m","days","l","cm","mon"), type=c("continuous","duration","continuous","continuous","duration"), description=list(y1="First continuous variable", y2="First duration variable",y3="Second continuous variable", y4="Third continuous variable",y5="Second duration variable")) restovec(y, censor=matrix(rbinom(20,1,0.9),ncol=5), names=c("a","b","c","d","e"), units=c("m","days","l","cm","mon"), type=c("continuous","duration","continuous","continuous","duration"), description=list(a="First continuous variable", b="First duration variable",c="Second continuous variable", d="Third continuous variable",e="Second duration variable")) } \keyword{manip} rmutil/man/pkpd.Rd0000755000176200001440000000564313425057453013611 0ustar liggesusers\name{pkpd} \title{Pharmacokinetic Compartment Models} \usage{ mu1.0o1c(p, times, dose=1, end=0.5) mu1.1o1c(p, times, dose=1) mu1.1o2c(p, times, dose=1) mu1.1o2cl(p, times, dose=1) mu1.1o2cc(p, times, dose=1) mu2.0o1c(p, times, dose=1, ind, end=0.5) mu2.0o2c1(p, times, dose=1, ind, end=0.5) mu2.0o2c2(p, times, dose=1, ind, end=0.5) mu2.1o1c(p, times, dose=1, ind) mu2.0o1cfp(p, times, dose=1, ind, end=0.5) mu2.0o2c1fp(p, times, dose=1, ind, end=0.5) mu2.0o2c2fp(p, times, dose=1, ind, end=0.5) mu2.1o1cfp(p, times, dose=1, ind) } \alias{pkpd} \alias{mu1.0o1c} \alias{mu1.1o1c} \alias{mu1.1o2c} \alias{mu1.1o2cl} \alias{mu1.1o2cc} \alias{mu2.0o1c} \alias{mu2.0o2c1} \alias{mu2.0o2c2} \alias{mu2.1o1c} \alias{mu2.0o1cfp} \alias{mu2.0o2c1fp} \alias{mu2.0o2c2fp} \alias{mu2.1o1cfp} \description{ Mean functions for use in fitting pharmacokineticcompartment models models. \code{mu1.0o1c}: open zero-order one-compartment model \code{mu1.1o1c}: open first-order one-compartment model \code{mu1.1o2c}: open first-order two-compartment model (ordered) \code{mu1.1o2cl}: open first-order two-compartment model (ordered, absorption and transfer equal) \code{mu1.1o2cc}: open first-order two-compartment model (circular) Simultaneous models for parent drug and metabolite: \code{mu2.0o1c}: zero-order one-compartment model \code{mu2.0o2c1}: zero-order two-compartment for parent, one-compartment for metabolite, model \code{mu2.0o2c2}: zero-order two-compartment model for both parent and metabolite \code{mu2.1o1c}: first-order one-compartment model \code{mu2.0o1cfp}: zero-order one-compartment first-pass model \code{mu2.0o2c1fp}: zero-order two-compartment for parent, one-compartment for metabolite, model with first-pass \code{mu2.0o2c2fp}: zero-order two-compartment model for both parent and metabolite with first-pass \code{mu2.1o1cfp}: first-order one-compartment first-pass model } \arguments{ \item{p}{Vector of parameters. See the source file for details.} \item{times}{Vector of times.} \item{dose}{Vector of dose levels.} \item{ind}{Indicator whether parent drug or metabolite.} \item{end}{Time infusion ends.} } \value{ The profile of mean concentrations for the given times and doses is returned. } \author{J.K. Lindsey} \examples{ \dontrun{ library(repeated) times <- rep(1:20,2) dose <- c(rep(2,20),rep(5,20)) # set up a mean function for gar based on mu1.1o1c: mu <- function(p) { ka <- exp(p[2]) ke <- exp(p[3]) exp(p[2]-p[1])/(ka-ke)*(exp(-ke*times)-exp(-ka*times))} conc <- matrix(rgamma(40,2,scale=mu(log(c(1,0.3,0.2)))/2),ncol=20,byrow=TRUE) conc[,2:20] <- conc[,2:20]+0.5*(conc[,1:19]-matrix(mu(log(c(1,0.3,0.2))), ncol=20,byrow=TRUE)[,1:19]) conc <- ifelse(conc>0,conc,0.01) gar(conc, dist="gamma", times=1:20, mu=mu, preg=log(c(1,0.4,0.1)), pdepend=0.1, pshape=1) # changing variance shape <- mu gar(conc, dist="gamma", times=1:20, mu=mu, preg=log(c(0.5,0.4,0.1)), pdep=0.1, shape=shape, pshape=log(c(0.5,0.4,0.1))) } } \keyword{models} rmutil/man/read.rep.Rd0000755000176200001440000001226113425057453014345 0ustar liggesusers\name{read.rep} \title{Read a Rectangular Data Set from a File to Create a repeated Object} \alias{read.rep} \usage{ read.rep(file, header=TRUE, skip=0, sep = "", na.strings="NA", response, id=NULL, times=NULL, censor=NULL, totals=NULL, weights=NULL, nest=NULL, delta=NULL, coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=TRUE) } \description{ \code{dftorep} forms an object of class, \code{repeated}, from data read from a file with the option of removing any observations where response and covariate values have NAs. For repeated measurements, observations on the same individual must be together in the file. A number of validity checks are performed on the data. Such objects can be printed and plotted. Methods are available for extracting the response, the numbers of observations per individual, the times, the weights, the units of measurement/Jacobian, the nesting variable, the covariates, and their names: \code{\link[rmutil]{response}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{weights}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{covariates}}, and \code{\link[rmutil]{names}}. } \arguments{ \item{file}{A file name from which to read the data with variables as columns and observations as rows.} \item{header}{A logical value indicating whether the file contains the names of the variables as the line before the first row of data.} \item{skip}{The number of lines of the file to skip before beginning to read data.} \item{sep}{The field separator character. Values on each line of the file are separated by this character.} \item{na.strings}{A vector of strings defining what values are to be assigned NA.} \item{response}{A character vector giving the column name(s) of the dataframe for the response variable(s).} \item{id}{A character vector giving the column name of the dataframe for the identification numbers of the individuals. If the numbers are not consecutive integers, a warning is given. If NULL, one observation per individual is assumed if \code{times} is also NULL, other time series is assumed.} \item{times}{An optional character vector giving the column name of the dataframe for the times vector.} \item{censor}{An optional character vector giving the column name(s) of the dataframe for the censor indicator(s). This must be the same length as \code{response}. Responses without censor indicator can have a column either of all NAs or all 1s.} \item{totals}{An optional character vector giving the column name(s) of the dataframe for the totals for binomial data. This must be the same length as \code{response}. Responses without censor indicator can have a column all NAs.} \item{weights}{An optional character vector giving the column name of the dataframe for the weights vector.} \item{nest}{An optional character vector giving the column name of the dataframe for the nesting vector within individuals. This is the second level of nesting for repeated measurements, with the individual being the first level. Values for an individual must be consecutive increasing integers.} \item{delta}{An optional character vector giving the column name(s) of the dataframe for the units of measurement/Jacobian(s) of the response(s). This must be the same length as \code{response}. Responses without units of measurement/Jacobian can have a column all NAs. If all response variables have the same unit of measurement, this can be that one number. If each response variable has the same unit of measurement for all its values, this can be a numeric vector of length the number of response variables.} \item{coordinates}{An optional character vector giving the two or three column name(s) of the dataframe for the spatial coordinates.} \item{type}{An optional character vector giving the types of response variables: nominal, ordinal, discrete, duration, continuous, multivariate, or unknown.} \item{ccov}{An optional character vector giving the column names of the dataframe for the time-constant or inter-individual covariates. For repeated measurements, if the value is not constant for all observations on an individual, an error is produced.} \item{tvcov}{An optional character vector giving the column names of the dataframe for the time-varying or intra-individual covariates.} \item{na.rm}{If TRUE, observations with NAs in any variables selected are removed in the object returned. Otherwise, the corresponding indicator variable is returned in a slot in the object.} } \value{ Returns an object of class, \code{repeated}, containing a list of the response object (\code{z$response}, so that, for example, the response vector is \code{z$response$y}; see \code{\link[rmutil]{restovec}}), and possibly the two classes of covariate objects (\code{z$ccov} and \code{z$tvcov}; see \code{\link[rmutil]{tcctomat}} and \code{\link[rmutil]{tvctomat}}). } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{dftorep}}, \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{tvctomat}} } \examples{ \dontrun{read.rep("test.dat", resp=c("y1","y2"), times="tt", id="id",} \dontrun{ totals=c("tot1","tot2"), tvcov="x",ccov="x2")} } \keyword{file} rmutil/man/Consul.Rd0000755000176200001440000000234113425057453014106 0ustar liggesusers\name{Consul} \title{Consul Distribution} \usage{ dconsul(y, m, s, log=FALSE) pconsul(q, m, s) qconsul(p, m, s) rconsul(n, m, s) } \alias{dconsul} \alias{pconsul} \alias{qconsul} \alias{rconsul} \description{ These functions provide information about the Consul distribution with parameters \code{m} and \code{s}: density, cumulative distribution, quantiles, and random generation. The Consul distribution with \code{mu} \eqn{= m} has density \deqn{p(y) = \mu \exp(-(\mu+y(\lambda-1))/\lambda) (\mu+y(\lambda-1))^(y-1)/(\lambda^y y!)% }{p(y) = m exp(-(m+y(s-1))/s) (m+y(s-1))^(y-1)/(s^y y!)} for \eqn{y = 0, \ldots}. } \arguments{ \item{y}{vector of counts} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of means} \item{s}{vector of overdispersion parameters} \item{log}{if TRUE, log probabilities are supplied.} } \seealso{ \code{\link{dpois}} for the Poisson, \code{\link[rmutil]{ddoublepois}} for the double Poisson, \code{\link[rmutil]{dmultpois}} for the multiplicative Poisson, and \code{\link[rmutil]{dpvfpois}} for the power variance function Poisson. } \examples{ dconsul(5,10,0.9) pconsul(5,10,0.9) qconsul(0.08,10,0.9) rconsul(10,10,0.9) } \keyword{distribution} rmutil/man/Laplace.Rd0000755000176200001440000000232113425057453014202 0ustar liggesusers\name{Laplace} \title{Laplace Distribution} \usage{ dlaplace(y, m=0, s=1, log=FALSE) plaplace(q, m=0, s=1) qlaplace(p, m=0, s=1) rlaplace(n=1, m=0, s=1) } \alias{dlaplace} \alias{plaplace} \alias{qlaplace} \alias{rlaplace} \description{ These functions provide information about the Laplace distribution with location parameter equal to \code{m} and dispersion equal to \code{s}: density, cumulative distribution, quantiles, log hazard, and random generation. The Laplace distribution has density \deqn{ f(y) = \frac{\exp(-abs(y-\mu)/\sigma)}{(2\sigma)}}{ f(y) = exp(-abs(y-m)/s)/(2*s)} where \eqn{\mu}{m} is the location parameter of the distribution and \eqn{\sigma}{s} is the dispersion. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dexp}} for the exponential distribution and \code{\link{dcauchy}} for the Cauchy distribution. } \examples{ dlaplace(5, 2, 1) plaplace(5, 2, 1) qlaplace(0.95, 2, 1) rlaplace(10, 2, 1) } \keyword{distribution} rmutil/man/GammaCount.Rd0000755000176200001440000000275113425057453014703 0ustar liggesusers\name{Gamma Count} \title{Gamma Count Distribution} \usage{ dgammacount(y, m, s, log=FALSE) pgammacount(q, m, s) qgammacount(p, m, s) rgammacount(n, m, s) } \alias{dgammacount} \alias{pgammacount} \alias{qgammacount} \alias{rgammacount} \description{ These functions provide information about the gamma count distribution with parameters \code{m} and \code{s}: density, cumulative distribution, quantiles, and random generation. The gamma count distribution with \code{prob} \eqn{= m} has density \deqn{p(y) = pgamma(\mu \sigma,y \sigma,1)-pgamma(\mu \sigma,(y+1) \sigma,1) }{p(y) = pgamma(m s, y s, 1)-pgamma(m s, (y+1) s, 1)} for \eqn{y = 0, \ldots, n} where \eqn{pgamma(\mu \sigma,0,1)=1}{pgamma(m s, 0, 1)=1}. } \arguments{ \item{y}{vector of frequencies} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of probabilities} \item{s}{vector of overdispersion parameters} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dpois}} for the Poisson, \code{\link[rmutil]{dconsul}} for the Consul generalized Poisson, \code{\link[rmutil]{ddoublepois}} for the double Poisson, \code{\link[rmutil]{dmultpois}} for the multiplicative Poisson distributions, and \code{\link{dnbinom}} for the negative binomial distribution. } \examples{ dgammacount(5,10,0.9) pgammacount(5,10,0.9) qgammacount(0.08,10,0.9) rgammacount(10,10,0.9) } \keyword{distribution} rmutil/man/gauss.hermite.Rd0000755000176200001440000000107713425057453015426 0ustar liggesusers\name{gauss.hermite} \title{Calculate Gauss-Hermite Quadrature Points} \alias{gauss.hermite} \usage{ gauss.hermite(points, iterlim=10) } \description{ \code{gauss.hermite} calculates the Gauss-Hermite quadrature values for a specified number of points. } \arguments{ \item{points}{The number of points.} \item{iterlim}{Maximum number of iterations in Newton-Raphson.} } \value{ \code{gauss.hermite} returns a two-column matrix containing the points and their corresponding weights. } \author{J.K. Lindsey} \examples{ gauss.hermite(10) } \keyword{math} rmutil/man/mprofile.Rd0000755000176200001440000000527013425057453014464 0ustar liggesusers\name{mprofile} \title{Produce Marginal Time Profiles for Plotting} \usage{ \method{plot}{mprofile}(x, nind=1, intensity=FALSE, add=FALSE, ylim=range(z$pred, na.rm = TRUE), lty=NULL, ylab=NULL, xlab=NULL, ...) } \alias{mprofile} \alias{mprofile.default} \alias{plot.mprofile} \description{ \code{mprofile} is used for plotting marginal profiles over time for models obtained from dynamic models, for given fixed values of covariates. These are either obtained from those supplied by the model, if available, or from a function supplied by the user. See \code{\link[rmutil]{iprofile}} for plotting individual profiles from recursive fitted values. } \arguments{ \item{x}{An object of class \code{mprofile}, e.g. \code{x = mprofile(z, times=NULL, mu=NULL, ccov, plotse=TRUE)}, where \code{z}An object of class \code{recursive}, from \code{carma}, \code{elliptic}, \code{gar}, \code{kalcount}, \code{kalseries}, \code{kalsurv}, or \code{nbkal}; \code{times} is a vector of time points at which profiles are to be plotted; \code{mu} is the location regression as a function of the parameters and the times for the desired covariate values; \code{ccov} is covariate values for the profiles (\code{carma} only); and \code{plotse} when TRUE plots standard errors (\code{carma} only).} \item{nind}{Observation number(s) of individual(s) to be plotted. (Not used if \code{mu} is supplied.)} \item{intensity}{If TRUE, the intensity is plotted instead of the time between events. Only for models produced by \code{kalsurv}.} \item{add}{If TRUE, add contour to previous plot instead of creating a new one.} %\item{others}{Plotting control options.} \item{lty,ylim,xlab,ylab}{ See base plot.} \item{...}{Arguments passed to other functions.} } \value{ \code{mprofile} returns information ready for plotting by \code{plot.mprofile}. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{iprofile}}, \code{\link[rmutil]{plot.residuals}}. } \examples{ \dontrun{ ## try after you get the repeated package library(repeated) times <- rep(1:20,2) dose <- c(rep(2,20),rep(5,20)) mu <- function(p) exp(p[1]-p[3])*(dose/(exp(p[1])-exp(p[2]))* (exp(-exp(p[2])*times)-exp(-exp(p[1])*times))) shape <- function(p) exp(p[1]-p[2])*times*dose*exp(-exp(p[1])*times) conc <- matrix(rgamma(40,1,scale=mu(log(c(1,0.3,0.2)))),ncol=20,byrow=TRUE) conc[,2:20] <- conc[,2:20]+0.5*(conc[,1:19]-matrix(mu(log(c(1,0.3,0.2))), ncol=20,byrow=TRUE)[,1:19]) conc <- ifelse(conc>0,conc,0.01) z <- gar(conc, dist="gamma", times=1:20, mu=mu, shape=shape, preg=log(c(1,0.4,0.1)), pdepend=0.5, pshape=log(c(1,0.2))) # plot individual profiles and the average profile plot(iprofile(z), nind=1:2, pch=c(1,20), lty=3:4) plot(mprofile(z), nind=1:2, lty=1:2, add=TRUE) } } \keyword{hplot} rmutil/man/tcctomat.Rd0000755000176200001440000000602313425057453014462 0ustar liggesusers\name{tcctomat} \title{Create a Time-constant, Inter-individual Covariate (tccov) Object} \alias{tcctomat} \usage{ tcctomat(ccov, names=NULL, units=NULL, oldccov=NULL, dataframe=TRUE, description=NULL) } \description{ \code{tcctomat} creates an object of class, \code{tccov}, from a vector or matrix containing time-constant or inter-individual baseline covariates or a model formula. It can also combine two such objects. Such objects can be printed. Methods are available for extracting the covariates, their names, and the formula: \code{\link[rmutil]{covariates}}, \code{\link[rmutil]{names}}, and \code{\link[rmutil]{formula}}. The method, \code{\link[rmutil]{transform}}, can transform variables in place or by adding new variables to the object. To obtain the indexing to expand time-constant or inter-individual covariates to the size of a repeated measurements response, use \code{\link[rmutil]{covind}}. } \arguments{ \item{ccov}{A vector, matrix, or dataframe containing time-constant or inter-individual baseline covariates with one row per individual, a model formula using vectors of the same size, or an object of class, \code{tccov}. In the first two cases, the variables may be factors; if \code{dataframe=FALSE}, these are transformed to indicator variables.} \item{units}{Optional character vector specifying units of measurements of covariates.} \item{names}{The names of the covariates (if the matrix does not have column names).} \item{oldccov}{An object of class, \code{tccov}, to which \code{ccov} is to be added.} \item{dataframe}{If TRUE and factor variables are present, the covariates are stored as a dataframe; if FALSE, they are expanded to indicator variables. If no factor variables are present, covariates are always stored as a matrix.} \item{description}{An optional named list of character vectors with names of some or all covariates containing their descriptions.} } \value{ Returns an object of class, \code{tccov}, containing one matrix or dataframe for the covariates (\code{z$ccov}) with one row per individual and possibly the model formula (\code{z$linear}). } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{DataMethods}}, \code{\link[rmutil]{covariates}}, \code{\link[rmutil]{description}}, \code{\link[rmutil]{formula}}, \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{names}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{transform}}, \code{\link[rmutil]{tvctomat}}, \code{\link[rmutil]{units}} } \examples{ x1 <- gl(4,1) print(tcc1 <- tcctomat(~x1)) covariates(tcc1) covariates(tcc1, name="x12") tcctomat(x1) tcctomat(x1, dataframe=FALSE) x2 <- c(0,0,1,1) print(tcc2 <- tcctomat(~x2, units="days")) covariates(tcc2) print(tcc3 <- tcctomat(~x1+x2)) covariates(tcc3) covariates(tcc3, names=c("x12","x2")) formula(tcc3) names(tcc3) print(tcc4 <- tcctomat(data.frame(x1,x2), units=c(NA,"days"))) covariates(tcc4) print(tcc5 <- tcctomat(data.frame(x1,x2), dataframe=FALSE, units=c(NA,"days"))) covariates(tcc5) } \keyword{manip} rmutil/man/Burr.Rd0000755000176200001440000000231613425057453013557 0ustar liggesusers\name{Burr} \title{Burr Distribution} \usage{ dburr(y, m, s, f, log=FALSE) pburr(q, m, s, f) qburr(p, m, s, f) rburr(n, m, s, f) } \alias{dburr} \alias{pburr} \alias{qburr} \alias{rburr} \description{ These functions provide information about the Burr distribution with location parameter equal to \code{m}, dispersion equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The Burr distribution has density \deqn{ f(y) = \frac{\nu \sigma (y / \mu)^{\sigma-1}} {\mu (1+(y/\mu)^\sigma)^{\nu+1}}}{ f(y) = f s (y/m)^(s-1)/(m (1+(y/m)^s)^(f+1))} where \eqn{\mu}{m} is the location parameter of the distribution, \eqn{\sigma}{s} is the dispersion, and \eqn{\nu}{f} is the family parameter. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \examples{ dburr(2, 5, 1, 2) pburr(2, 5, 1, 2) qburr(0.3, 5, 1, 2) rburr(10, 5, 1, 2) } \keyword{distribution} rmutil/man/PowerExp.Rd0000755000176200001440000000303013425057453014410 0ustar liggesusers\name{PowerExponential} \title{Power Exponential Distribution} \usage{ dpowexp(y, m=0, s=1, f=1, log=FALSE) ppowexp(q, m=0, s=1, f=1) qpowexp(p, m=0, s=1, f=1) rpowexp(n, m=0, s=1, f=1) } \alias{dpowexp} \alias{ppowexp} \alias{qpowexp} \alias{rpowexp} \description{ These functions provide information about the power exponential distribution with mean parameter equal to \code{m}, dispersion equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The power exponential distribution has density \deqn{ f(y) = \frac{\exp(-(abs{y-\mu}/\sqrt{\sigma})^{2 \nu}/2)}{ \sqrt{\sigma} Gamma(1+1/(2 \nu)) 2^{1+1/(2 \nu)}}}{ f(y) = exp(-(abs(y-m)/sqrt(s))^(2 f)/2)/ (sqrt(s) Gamma(1+1/(2 f)) 2^(1+1/(2 f)))} where \eqn{\mu}{m} is the mean of the distribution, \eqn{\sigma}{s} is the dispersion, and \eqn{\nu}{f} is the family parameter. \eqn{\nu=1}{f=1} yields a normal distribution, \eqn{\nu=0.5}{f=0.5} a Laplace distribution, and \eqn{\nu=\infty}{f=Inf} a uniform distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of means.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \examples{ dpowexp(5, 5, 1, 2) ppowexp(5, 5, 1, 2) qpowexp(0.5, 5, 1, 2) rpowexp(10, 5, 1, 2) } \keyword{distribution} rmutil/man/read.surv.Rd0000755000176200001440000000242213425057453014554 0ustar liggesusers\name{read.surv} \title{Read a List of Matrices from a File for Repeated Times to Events} \alias{read.surv} \usage{ read.surv(file="", skip=0, nlines=1, cumulative=TRUE, all=TRUE) } \description{ \code{read.surv} reads sets of lines of data from a file. Each set may contain a series of duration times followed by a censor indicator for the last value (\code{all=FALSE}) or a series of pairs of times followed by their censor indicators (\code{all=TRUE}). } \arguments{ \item{file}{Name of the file to read} \item{skip}{Number of lines to skip at the beginning of the file} \item{nlines}{Number of lines in each series of duration times} \item{cumulative}{If TRUE, the times are cumulative and differences are taken to obtain times between events. Otherwise, the times are used unchanged.} \item{all}{If TRUE, all times have accompanying censor indicators; otherwise, only the last one does.} } \value{ A list containing a list of vectors with the series of times and a vector of censor indicators for the last time of each series is returned. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{read.rep}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}} } \examples{ \dontrun{y <- read.surv("test.dat")} } \keyword{file} rmutil/man/mexp.Rd0000755000176200001440000000132713425057453013617 0ustar liggesusers\name{mexp} \title{Matrix Exponentiation} \alias{mexp} \usage{ mexp(x, t=1, type="spectral decomposition", n=20, k=3) } \description{ \code{mexp} calculates \code{exp(t*x)} for the square matrix, \code{x}, by spectral decomposition or series expansion. } \arguments{ \item{x}{A square matrix.} \item{t}{Constant multiplying the matrix.} \item{type}{Algorithm used: spectral decomposition or series approximation.} \item{n}{Number of terms in the series expansion.} \item{k}{Constant divisor to avoid over- or underflow (series approximation only).} } \value{ \code{mexp} returns the exponential of a matrix. } \author{J.K. Lindsey} \examples{ x <- matrix(c(1,2,3,4),nrow=2) mexp(x) } \keyword{array} rmutil/man/plot.residuals.Rd0000755000176200001440000000400413425057453015611 0ustar liggesusers\name{plot.residuals} \title{Plot Residuals} \alias{plot.residuals} \usage{ \method{plot}{residuals}(x, X=NULL, subset=NULL, ccov=NULL, nind=NULL, recursive=TRUE, pch=20, ylab="Residual", xlab=NULL, main=NULL, ...) } \description{ \code{plot.residuals} is used for plotting residuals from models obtained from dynamic models for given subsets of the data. } \arguments{ \item{x}{An object of class recursive, from \code{carma}, \code{gar}, \code{kalcount}, \code{kalseries}, \code{kalsurv}, or \code{nbkal}.} \item{X}{Vector of of values for the x-axis. If missing, time is used. It can also be specified by the strings "response" or "fitted".} \item{subset}{A logical vector defining which observations are to be used.} \item{ccov}{If the name of a time-constant covariate is supplied, separate plots are made for each distinct value of that covariate.} \item{nind}{Observation number(s) of individual(s) to be plotted.} \item{recursive}{If TRUE, plot recursive residuals, otherwise ordinary residuals.} \item{pch,ylab,xlab,main,...}{Plotting control options.} } \author{J.K. Lindsey} \seealso{ \code{carma}, \code{gar}, \code{kalcount}, \code{kalseries}, \code{kalsurv}, \code{nbkal} \code{\link[rmutil]{plot.iprofile}}, \code{\link[rmutil]{plot.mprofile}}.} \examples{ \dontrun{ library(repeated) times <- rep(1:20,2) dose <- c(rep(2,20),rep(5,20)) mu <- function(p) exp(p[1]-p[3])*(dose/(exp(p[1])-exp(p[2]))* (exp(-exp(p[2])*times)-exp(-exp(p[1])*times))) shape <- function(p) exp(p[1]-p[2])*times*dose*exp(-exp(p[1])*times) conc <- matrix(rgamma(40,2,scale=mu(log(c(1,0.3,0.2)))/2),ncol=20,byrow=TRUE) conc[,2:20] <- conc[,2:20]+0.5*(conc[,1:19]-matrix(mu(log(c(1,0.3,0.2))), ncol=20,byrow=TRUE)[,1:19]) conc <- ifelse(conc>0,conc,0.01) z <- gar(conc, dist="gamma", times=1:20, mu=mu, shape=shape, preg=log(c(1,0.4,0.1)), pdepend=0.1, pshape=log(c(1,0.2))) plot.residuals(z, subset=1:20, main="Dose 1") plot.residuals(z, x="fitted", subset=1:20, main="Dose 1") plot.residuals(z, x="response", subset=1:20, main="Dose 1") } } \keyword{hplot} rmutil/man/InvGauss.Rd0000755000176200001440000000236513425057453014410 0ustar liggesusers\name{Inverse Gaussian} \title{Inverse Gaussian Distribution} \usage{ dinvgauss(y, m, s, log=FALSE) pinvgauss(q, m, s) qinvgauss(p, m, s) rinvgauss(n, m, s) } \alias{dinvgauss} \alias{pinvgauss} \alias{qinvgauss} \alias{rinvgauss} \description{ These functions provide information about the inverse Gaussian distribution with mean equal to \code{m} and dispersion equal to \code{s}: density, cumulative distribution, quantiles, log hazard, and random generation. The inverse Gaussian distribution has density \deqn{ f(y) = \frac{1}{\sqrt{2\pi\sigma y^3}} e^{-(y-\mu)^2/(2 y \sigma m^2)}}{ f(y) = 1/sqrt(2 pi s y^3) e^-((y - m)^2/(2 y s m^2))} where \eqn{\mu}{m} is the mean of the distribution and \eqn{\sigma}{s} is the dispersion. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of means.} \item{s}{vector of dispersion parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dnorm}} for the normal distribution and \code{\link{dlnorm}} for the \emph{Log}normal distribution. } \examples{ dinvgauss(5, 5, 1) pinvgauss(5, 5, 1) qinvgauss(0.8, 5, 1) rinvgauss(10, 5, 1) } \keyword{distribution} rmutil/man/Pareto.Rd0000755000176200001440000000243313425057453014077 0ustar liggesusers\name{Pareto} \title{Pareto Distribution} \usage{ dpareto(y, m, s, log=FALSE) ppareto(q, m, s) qpareto(p, m, s) rpareto(n, m, s) } \alias{dpareto} \alias{ppareto} \alias{qpareto} \alias{rpareto} \description{ These functions provide information about the Pareto distribution with location parameter equal to \code{m} and dispersion equal to \code{s}: density, cumulative distribution, quantiles, log hazard, and random generation. The Pareto distribution has density \deqn{ f(y) = \frac{\sigma }{\mu (\sigma-1)(1 + y/(\mu (\sigma-1)))^{\sigma+1}}}{ f(y) = s (1 + y/(m (s-1)))^(-s-1)/(m (s-1))} where \eqn{\mu}{m} is the mean parameter of the distribution and \eqn{\sigma}{s} is the dispersion. This distribution can be obtained as a mixture distribution from the exponential distribution using a gamma mixing distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dexp}} for the exponential distribution. } \examples{ dpareto(5, 2, 2) ppareto(5, 2, 2) qpareto(0.9, 2, 2) rpareto(10, 2, 2) } \keyword{distribution} rmutil/man/lvna.Rd0000755000176200001440000000765213425057453013615 0ustar liggesusers\name{lvna} \title{Create a repeated Object, Leaving NAs} \alias{lvna} \usage{ lvna(response, ccov=NULL, tvcov=NULL) } \description{ \code{lvna} forms an object of class, \code{repeated}, from a response object and possibly time-varying or intra-individual covariate (\code{tvcov}), and time-constant or inter-individual covariate (\code{tccov}) objects. If there are NAs in any variables, it also creates a logical vector indicating which observations have NAs either in the response or the covariate values. Subjects must be in the same order in all (three) objects to be combined. Such objects can be printed and plotted. Methods are available for extracting the response, the numbers of observations per individual, the times, the weights, the units of measurement/Jacobian, the nesting variable, the covariates, and their names: \code{\link[rmutil]{response}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{weights}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{covariates}}, and \code{\link[rmutil]{names}}. } \arguments{ \item{response}{An object of class, \code{response} (created by \code{\link[rmutil]{restovec}}), containing the response variable information.} \item{ccov}{An object of class, \code{tccov} (created by \code{\link[rmutil]{tcctomat}}), containing the time-constant or inter-individual covariate information.} \item{tvcov}{An object of class, \code{tvcov} (created by \code{\link[rmutil]{tvctomat}}), containing the time-varying or intra-individual covariate information.} } \value{ Returns an object of class, \code{repeated}, containing a list of the response object (\code{z$response}, so that, for example, the response vector is \code{z$response$y}; see \code{\link[rmutil]{restovec}}), possibly the two classes of covariate objects (\code{z$ccov} and \code{z$tvcov}; see \code{\link[rmutil]{tcctomat}} and \code{\link[rmutil]{tvctomat}}), and a logical vector (\code{z$NAs}) indicating which observations have an NA in the response or some covariate. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{DataMethods}}, \code{\link[rmutil]{covariates}}, \code{\link[rmutil]{covind}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{dftorep}}, \code{\link[rmutil]{names}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{read.surv}}, \code{\link[rmutil]{response}}, \code{\link[rmutil]{resptype}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{transform}}, \code{\link[rmutil]{tvctomat}}, \code{\link[rmutil]{units}}, \code{\link[rmutil]{weights}} } \examples{ y <- matrix(rnorm(20),ncol=5) y[2,3] <- NA tt <- c(1,3,6,10,15) print(resp <- restovec(y,times=tt)) x <- c(0,0,1,1) tcc <- tcctomat(x) z <- matrix(rpois(20,5),ncol=5) tvc <- tvctomat(z) print(reps <- lvna(resp, tvcov=tvc, ccov=tcc)) response(reps) response(reps, nind=2:3) times(reps) nobs(reps) weights(reps) covariates(reps) covariates(reps,names="x") covariates(reps,names="z") names(reps) nesting(reps) # because individuals are the only nesting, this is the same as covind(reps) # binomial y <- matrix(rpois(20,5),ncol=5) y[2,3] <- NA print(respb <- restovec(y,totals=y+matrix(rpois(20,5),ncol=5),times=tt)) print(repsb <- lvna(respb, tvcov=tvc, ccov=tcc)) response(repsb) # censored data y <- matrix(rweibull(20,2,5),ncol=5) print(respc <- restovec(y,censor=matrix(rbinom(20,1,0.9),ncol=5),times=tt)) print(repsc <- lvna(respc, tvcov=tvc, ccov=tcc)) # if there is no censoring, censor indicator is not printed response(repsc) # nesting clustered within individuals nest <- c(1,1,2,2,2) print(respn <- restovec(y,censor=matrix(rbinom(20,1,0.9),ncol=5), times=tt,nest=nest)) print(repsn <- lvna(respn, tvcov=tvc, ccov=tcc)) response(respn) times(respn) nesting(respn) } \keyword{manip} rmutil/man/GGamma.Rd0000755000176200001440000000321513425057453013775 0ustar liggesusers\name{Generalized Gamma} \title{Generalized Gamma Distribution} \usage{ dggamma(y, s, m, f, log=FALSE) pggamma(q, s, m, f) qggamma(p, s, m, f) rggamma(n, s, m, f) } \alias{dggamma} \alias{pggamma} \alias{qggamma} \alias{rggamma} \description{ These functions provide information about the generalized gamma distribution with scale parameter equal to \code{m}, shape equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The generalized gamma distribution has density \deqn{ f(y) = \frac{\nu y^{\nu-1}} {(\mu/\sigma)^{\nu\sigma} Gamma(\sigma)} y^{\nu(\sigma-1)} \exp(-(y \sigma/\mu)^\nu)}{ f(y) = fy^(f-1)/((m/s)^(fs) Gamma(s)) y^(f(s-1)) exp(-(y s/m)^f)} where \eqn{\mu}{m} is the scale parameter of the distribution, \eqn{\sigma}{s} is the shape, and \eqn{\nu}{f} is the family parameter. \eqn{\nu=1}{f=1} yields a gamma distribution, \eqn{\sigma=1}{s=1} a Weibull distribution, and \eqn{\sigma=\infty}{s=infinity} a log normal distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dgamma}} for the gamma distribution, \code{\link{dweibull}} for the Weibull distribution, \code{\link{dlnorm}} for the log normal distribution. } \examples{ dggamma(2, 5, 4, 2) pggamma(2, 5, 4, 2) qggamma(0.75, 5, 4, 2) rggamma(10, 5, 4, 2) } \keyword{distribution} rmutil/man/mpow.Rd0000755000176200001440000000071713425057453013632 0ustar liggesusers\name{mpower} \title{Power of a Matrix} \alias{\%^\%} \usage{ x\%^\%p } \description{ \code{\%^\%} calculates \code{x^p} for the square matrix, \code{x}, by spectral decomposition. } \arguments{ \item{x}{A square matrix.} \item{p}{The power to which the matrix is to be raised.} } \value{ \code{\%^\%} returns the power of a matrix. } \author{J.K. Lindsey} \examples{ \dontrun{ x <- matrix(c(0.4,0.6,0.6,0.4),nrow=2) x\%^\%2 x\%^\%10 x\%^\%20 } } \keyword{array} rmutil/man/FormulaMethods.Rd0000755000176200001440000000324413425057453015577 0ustar liggesusers\name{FormulaMethods} \title{Methods for formulafn Functions} \usage{ \method{covariates}{formulafn}(z, ...) \method{formula}{formulafn}(x, ...) model(z, ...) parameters(z, ...) \method{print}{formulafn}(x, ...) } \alias{covariates.formulafn} \alias{formula.formulafn} \alias{model} \alias{model.formulafn} \alias{parameters} \alias{parameters.formulafn} \alias{print.formulafn} \description{ Methods for accessing the contents of a function created from formula produced by \code{\link[rmutil]{finterp}} or a function modified by \code{\link[rmutil]{fnenvir}}. \code{covariates}: extract the names of the covariates. \code{formula}: extract the formula used to produce the function (\code{\link[rmutil]{finterp}} only). \code{model}: extract the model function or model matrix if W&R notation was used. \code{parameters}: extract the names of the parameters. } \arguments{ \item{x,z}{A function of class, \code{formulafn}.} \item{...}{Arguments to other functions.} } \value{ These methods extract information about functions of class, \code{formulafn}, created by \code{\link[rmutil]{finterp}} or \code{\link[rmutil]{fnenvir}}. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{finterp}}, \code{\link[rmutil]{fnenvir}}. } \examples{ x1 <- rpois(20,2) x2 <- rnorm(20) # # Wilkinson and Rogers formula with three parameters fn1 <- finterp(~x1+x2) fn1 covariates(fn1) formula(fn1) model(fn1) parameters(fn1) # # nonlinear formula with unknowns fn2 <- finterp(~exp(b0+b1*x1+b2*x2)) fn2 covariates(fn2) formula(fn2) model(fn2) parameters(fn2) # # function transformed by fnenvir fn3 <- fnenvir(function(p) p[1]+p[2]*x1) covariates(fn3) formula(fn3) model(fn3) parameters(fn3) } \keyword{manip} rmutil/man/GWeibull.Rd0000755000176200001440000000335113425057453014357 0ustar liggesusers\name{Generalized Weibull} \title{Generalized Weibull Distribution} \usage{ dgweibull(y, s, m, f, log=FALSE) pgweibull(q, s, m, f) qgweibull(p, s, m, f) rgweibull(n, s, m, f) } \alias{dgweibull} \alias{pgweibull} \alias{qgweibull} \alias{rgweibull} \description{ These functions provide information about the generalized Weibull distribution, also called the exponentiated Weibull, with scale parameter equal to \code{m}, shape equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The generalized Weibull distribution has density \deqn{ f(y) = \frac{\sigma \nu y^{\sigma-1} (1-\exp(-(y/\mu)^\sigma))^{\nu-1} \exp(-(y/\mu)^\sigma)}{\mu^\sigma}}{ f(y) = s f y^(s-1) (1-exp(-(y/m)^s))^(f-1) exp(-(y/m)^s)/m^s} where \eqn{\mu}{m} is the scale parameter of the distribution, \eqn{\sigma}{s} is the shape, and \eqn{\nu}{f} is the family parameter. \eqn{\nu=1}{f=1} gives a Weibull distribution, for \eqn{\sigma=1}{s=1}, \eqn{\nu<0}{f<0} a generalized F distribution, and for \eqn{\sigma>0}{s>0}, \eqn{\nu\leq0}{f<=0} a Burr type XII distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dweibull}} for the Weibull distribution, \code{\link{df}} for the F distribution, \code{\link[rmutil]{dburr}} for the Burr distribution. } \examples{ dgweibull(5, 1, 3, 2) pgweibull(5, 1, 3, 2) qgweibull(0.65, 1, 3, 2) rgweibull(10, 1, 3, 2) } \keyword{distribution} rmutil/man/BetaBinom.Rd0000755000176200001440000000523313667525730014514 0ustar liggesusers\name{Beta Binomial} \title{Beta Binomial Distribution} \usage{ dbetabinom(y, size, m, s, log=FALSE) pbetabinom(q, size, m, s) qbetabinom(p, size, m, s) rbetabinom(n, size, m, s) } \alias{dbetabinom} \alias{pbetabinom} \alias{qbetabinom} \alias{rbetabinom} \description{ These functions provide information about the beta binomial distribution with parameters \code{m} and \code{s}: density, cumulative distribution, quantiles, and random generation. Compared to the parameterization of `VGAM::pbetabinom.ab`, \code{m = alpha/(alpha+beta)} and \code{s = (alpha+beta)}. See examples. The beta binomial distribution with total \eqn{= n} and \code{prob} \eqn{= m} has density \deqn{p(y) = \frac{B(y+\sigma \mu, n-y+\sigma*(1-\mu)) {n \choose y} }{B(s m,s (1-m))}% }{p(y) = B(y+s m,n-y+s (1-m)) Choose(n,y) / B(s m,s (1-m))} for \eqn{y = 0, \ldots, n} where \eqn{B()} is the beta function.} \arguments{ \item{y}{vector of frequencies} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{size}{vector of totals} \item{m}{vector of probabilities of success; Compared to the parameterization of `VGAM::pbetabinom.ab`, \code{m = alpha/(alpha+beta)} where \code{shape1=alpha} and \code{shape2=beta}. See examples.} \item{s}{vector of overdispersion parameters; Compared to the parameterization of `VGAM::pbetabinom.ab`, \code{s = (alpha+beta)} where \code{shape1=alpha} and \code{shape2=beta}. See examples.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dbinom}} for the binomial, \code{\link[rmutil]{ddoublebinom}} for the double binomial, and \code{\link[rmutil]{dmultbinom}} for the multiplicative binomial distribution. } \examples{ # compute P(45 < y < 55) for y beta binomial(100,0.5,1.1) sum(dbetabinom(46:54, 100, 0.5, 1.1)) pbetabinom(54,100,0.5,1.1)-pbetabinom(45,100,0.5,1.1) pbetabinom(2,10,0.5,1.1) qbetabinom(0.33,10,0.5,1.1) rbetabinom(10,10,0.5,1.1) ## compare to VGAM \dontrun{ # The beta binomial distribution with total = n and prob = m has density # # p(y) = B(y+s m,n-y+s (1-m)) Choose(n,y) / B(s m,s (1-m)) # # for y = 0, …, n where B() is the beta function. ## in `rmutil` from the .Rd file (excerpt above), the "alpha" is s*m ## in `rmutil` from the .Rd file (excerpt above), the "beta" is s*(1-m) ## in `VGAM`, rho is 1/(1+alpha+beta) qq = 2.2 zz = 100 alpha = 1.1 beta = 2 VGAM::pbetabinom.ab(q=qq, size=zz, shape1=alpha, shape2=beta) ## for VGAM `rho` rr = 1/(1+alpha+beta) VGAM::pbetabinom (q=qq, size=zz, prob=mm, rho = rr) ## for rmutil `m` and `s`: mm = alpha / (alpha+beta) ss = (alpha+beta) rmutil::pbetabinom(q=qq, size=zz, m=mm, s=ss ) } } \keyword{distribution} rmutil/man/PvfPoisson.Rd0000755000176200001440000000442513425057453014756 0ustar liggesusers\name{PvfPoisson} \title{Power Variance Function Poisson Distribution} \usage{ dpvfpois(y, m, s, f, log=FALSE) ppvfpois(q, m, s, f) qpvfpois(p, m, s, f) rpvfpois(n, m, s, f) } \alias{dpvfpois} \alias{ppvfpois} \alias{qpvfpois} \alias{rpvfpois} \description{ These functions provide information about the overdispersed power variance function Poisson distribution with parameters \code{m}, \code{s}, and \code{f}: density, cumulative distribution, quantiles, and random generation. This function is obtained from a Poisson distribution as a mixture with a power variance distribution. In the limit, for \code{f=0}, the mixing distribution is gamma so that it is a negative binomial distribution. For \code{f=0.5}, the mixing distribution is inverse Gaussian. For \code{f<0}, the mixing distribution is a compound distribution of the sum of a Poisson number of gamma distributions. For \code{f=1}, it is undefined. The power variance function Poisson distribution with \code{m} \eqn{= \mu}, the mean, \code{s} \eqn{= \theta}, and \code{f} \eqn{= \alpha} has density \deqn{p(y) = {\exp(-\mu((\theta+1)^\alpha/\theta^\alpha-\theta)/\alpha)\over y!} \sum_{i=1}^y c_{yi}(\alpha)\mu^i(\theta+1)^{i\alpha-y}/\theta^{i(\alpha-1)} }{p(y) = (exp(-m((s+1)^f/s^f-s)/f) / y!) sum_{i=1}^y c_{yi}(f) m^i (s+1)^{if-y} / s^{i(f-1)}} for \eqn{y = 0, \ldots}, where \code{c_{yi}(f)} are coefficients obtained by recursion. } \arguments{ \item{y}{vector of counts} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{scalar or vector of means} \item{s}{scalar or vector of overdispersion parameters} \item{f}{scalar or vector of family parameters, all < 1} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dpois}} for the Poisson, \code{\link[rmutil]{ddoublepois}} for the double Poisson, \code{\link[rmutil]{dmultpois}} for the multiplicative Poisson, \code{\link[rmutil]{dconsul}} for the Consul generalized Poisson, \code{\link[rmutil]{dgammacount}} for the gamma count, and \code{\link{dnbinom}} for the negative binomial distribution. } \examples{ dpvfpois(5,10,0.9,0.5) ppvfpois(5,10,0.9,0.5) qpvfpois(0.85,10,0.9,0.5) rpvfpois(10,10,0.9,0.5) } \keyword{distribution} rmutil/man/GInvGauss.Rd0000755000176200001440000000326413425057453014516 0ustar liggesusers\name{Generalized Inverse Gaussian} \title{Generalized Inverse Gaussian Distribution} \usage{ dginvgauss(y, m, s, f, log=FALSE) pginvgauss(q, m, s, f) qginvgauss(p, m, s, f) rginvgauss(n, m, s, f) } \alias{dginvgauss} \alias{pginvgauss} \alias{qginvgauss} \alias{rginvgauss} \description{ These functions provide information about the generalized inverse Gaussian distribution with mean equal to \code{m}, dispersion equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The generalized inverse Gaussian distribution has density \deqn{ f(y) = \frac{y^{\nu-1}}{2 \mu^\nu K(1/(\sigma \mu),abs(\nu))} \exp(-(1/y+y/\mu^2)/(2*\sigma))}{ f(y) = y^(f-1)/(2 m^f K(1/(s m),abs(f))) exp(-(1/y+y/m^2)/(2*s))} where \eqn{\mu}{m} is the mean of the distribution, \eqn{\sigma}{s} the dispersion, \eqn{\nu}{f} is the family parameter, and \eqn{K()}{K()} is the fractional Bessel function of the third kind. \eqn{\nu=-1/2}{f=-1/2} yields an inverse Gaussian distribution, \eqn{\sigma=\infty}{s=infinity}, \eqn{\nu>0}{f>0} a gamma distribution, and \eqn{\nu=0}{f=0} a hyperbola distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of means.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{dinvgauss}} for the inverse Gaussian distribution. } \examples{ dginvgauss(10, 3, 1, 1) pginvgauss(10, 3, 1, 1) qginvgauss(0.4, 3, 1, 1) rginvgauss(10, 3, 1, 1) } \keyword{distribution} rmutil/man/rmna.Rd0000755000176200001440000000743513425057453013611 0ustar liggesusers\name{rmna} \title{Create a repeated Object, Removing NAs} \alias{rmna} \usage{ rmna(response, ccov=NULL, tvcov=NULL)} \description{ \code{rmna} forms an object of class, \code{repeated}, from a \code{response} object and possibly time-varying or intra-individual covariate (\code{tvcov}), and time-constant or inter-individual covariate (\code{tccov}) objects, removing any observations where response and covariate values have NAs. Subjects must be in the same order in all (three) objects to be combined. Such objects can be printed and plotted. Methods are available for extracting the response, the numbers of observations per individual, the times, the weights, the units of measurement/Jacobian, the nesting variable, the covariates, and their names: \code{\link[rmutil]{response}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{weights}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{covariates}}, and \code{\link[rmutil]{names}}. } \arguments{ \item{response}{An object of class, \code{response} (created by \code{\link[rmutil]{restovec}}), containing the response variable information.} \item{ccov}{An object of class, \code{tccov} (created by \code{\link[rmutil]{tcctomat}}), containing the time-constant or inter-individual covariate information.} \item{tvcov}{An object of class, \code{tvcov} (created by \code{\link[rmutil]{tvctomat}}), containing the time-varying or intra-individual covariate information.} } \value{ Returns an object of class, \code{repeated}, containing a list of the response object (\code{z$response}, so that, for example, the response vector is \code{z$response$y}; see \code{\link[rmutil]{restovec}}), and possibly the two classes of covariate objects (\code{z$ccov} and \code{z$tvcov}; see \code{\link[rmutil]{tcctomat}} and \code{\link[rmutil]{tvctomat}}). } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{DataMethods}}, \code{\link[rmutil]{covariates}}, \code{\link[rmutil]{covind}}, \code{\link[rmutil]{delta}}, \code{\link[rmutil]{dftorep}}, \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{names}}, \code{\link[rmutil]{nesting}}, \code{\link[rmutil]{nobs}}, \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{read.surv}}, \code{\link[rmutil]{response}}, \code{\link[rmutil]{resptype}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{times}}, \code{\link[rmutil]{transform}}, \code{\link[rmutil]{tvctomat}}, \code{\link[rmutil]{units}}, \code{\link[rmutil]{weights}} } \examples{ y <- matrix(rnorm(20),ncol=5) tt <- c(1,3,6,10,15) print(resp <- restovec(y,times=tt)) x <- c(0,0,1,1) tcc <- tcctomat(x) z <- matrix(rpois(20,5),ncol=5) tvc <- tvctomat(z) print(reps <- rmna(resp, tvcov=tvc, ccov=tcc)) response(reps) response(reps, nind=2:3) times(reps) nobs(reps) weights(reps) covariates(reps) covariates(reps,names="x") covariates(reps,names="z") names(reps) nesting(reps) # because individuals are the only nesting, this is the same as covind(reps) # # use in glm rm(y,x,z) glm(y~x+z,data=as.data.frame(reps)) # # binomial y <- matrix(rpois(20,5),ncol=5) print(respb <- restovec(y,totals=y+matrix(rpois(20,5),ncol=5),times=tt)) print(repsb <- rmna(respb, tvcov=tvc, ccov=tcc)) response(repsb) # # censored data y <- matrix(rweibull(20,2,5),ncol=5) print(respc <- restovec(y,censor=matrix(rbinom(20,1,0.9),ncol=5),times=tt)) print(repsc <- rmna(respc, tvcov=tvc, ccov=tcc)) # if there is no censoring, censor indicator is not printed response(repsc) # # nesting clustered within individuals nest <- c(1,1,2,2,2) print(respn <- restovec(y,censor=matrix(rbinom(20,1,0.9),ncol=5), times=tt,nest=nest)) print(repsn <- rmna(respn, tvcov=tvc, ccov=tcc)) response(respn) times(respn) nesting(respn) } \keyword{manip} rmutil/man/capply.Rd0000644000176200001440000000057513425057453014137 0ustar liggesusers\name{capply} \alias{capply} \title{A Fast Simplified Version of \code{tapply}} \description{ a fast simplified version of \code{tapply} } \usage{ capply(x, index, fcn=sum) } \arguments{ \item{x}{x} \item{index}{index} \item{fcn}{default sum} } \details{ a fast simplified version of tapply } \value{ Returns \code{ans} where \code{for(i in split(x,index))ans <- c(ans,fcn(i))}. } rmutil/man/wr.Rd0000755000176200001440000000152714326267501013276 0ustar liggesusers\name{wr} \title{Find the Response Vector and Design Matrix for a W&R Model Formula} \alias{wr} \usage{ wr(formula, data=NULL, expand=TRUE) } \description{ \code{wr} gives the response vector and design matrix for a formula in Wilkinson and Rogers notation. } \arguments{ \item{formula}{A model formula.} \item{data}{A data object or environment.} \item{expand}{If FALSE, the covariates are read from the \code{tccov} object without expanding to the length of the response variable.} } \value{ \code{wr} returns a list containing the response vector (\code{z$response}), if included in the formula, and the design matrix (\code{z$design}) from the data object or environment supplied or from the global environment for the formula supplied. } \author{J.K. Lindsey} \examples{ y <- rnorm(20) x <- gl(4,5) z <- rpois(20,2) wr(y~x+z) } \keyword{programming} rmutil/man/runge.kutta.Rd0000755000176200001440000000146313425057453015116 0ustar liggesusers\name{runge.kutta} \title{Runge-Kutta Method for Solving Differential Equations} \alias{runge.kutta} \usage{ runge.kutta(f, initial, x) } \description{ \code{runge.kutta} numerically solves a differential equation by the fourth-order Runge-Kutta method. } \arguments{ \item{f}{A function \code{dy/dx=func(y,x)}.} \item{initial}{The initial value of \code{y}.} \item{x}{A vector of values of \code{x} for which the values or \code{y} are required.} } \value{ A vector of values of \code{y} as solution of the function \code{f} corresponding to the values in \code{x}. } \author{J.K. Lindsey} \examples{ fn <- function(y,x) (x*y-y^2)/x^2 soln <- runge.kutta(fn,2,seq(1,3,by=1/128)) ## exact solution exact <- seq(1,3,by=1/128)/(0.5+log(seq(1,3,by=1/128))) rbind(soln, exact) } \keyword{math} rmutil/man/GExtVal.Rd0000755000176200001440000000322313425057453014155 0ustar liggesusers\name{Generalized Extreme Value} \title{Generalized Extreme Value Distribution} \usage{ dgextval(y, s, m, f, log=FALSE) pgextval(q, s, m, f) qgextval(p, s, m, f) rgextval(n, s, m, f) } \alias{dgextval} \alias{pgextval} \alias{qgextval} \alias{rgextval} \description{ These functions provide information about the generalized extreme value distribution with location parameter equal to \code{m}, dispersion equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The generalized extreme value distribution has density \deqn{ f(y) = y^{\nu-1} \exp(y^\nu/\nu) \frac{\sigma}{\mu} \frac{\exp(y^\nu/\nu)}{\mu^{\sigma-1}/(1-I(\nu>0)+sign(\nu) exp(-\mu^-\sigma))}\exp(-(\exp(y^\nu\nu)/\mu)^\sigma)}{ f(y) = y^(f-1) exp(y^f/f) (s/m) (exp(y^f/f)/m)^(s-1) exp(-(exp(y^f/f)/m)^s)/(1-I(f>0)+sign(f) exp(-m^-s))} where \eqn{\mu}{m} is the location parameter of the distribution, \eqn{\sigma}{s} is the dispersion, \eqn{\nu}{f} is the family parameter, \eqn{I()} is the indicator function, and \eqn{y>0}. \eqn{\nu=1}{f=1} a truncated extreme value distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dweibull}} for the Weibull distribution. } \examples{ dgextval(1, 2, 1, 2) pgextval(1, 2, 1, 2) qgextval(0.82, 2, 1, 2) rgextval(10, 2, 1, 2) } \keyword{distribution} rmutil/man/int.Rd0000755000176200001440000000424614175120022013425 0ustar liggesusers\name{int} \title{Vectorized Numerical Integration} \alias{int} \usage{ int(f, a=-Inf, b=Inf, type="Romberg", eps=0.0001, max=NULL, d=NULL, p=0) } \description{ \code{int} performs numerical integration of a given function using either Romberg integration or algorithm 614 of the collected algorithms from ACM. Only the former is vectorized. The latter uses formulae optimal in certain Hardy spaces h(p,d). Functions may have singularities at one or both end-points of the interval (a,b). } \arguments{ \item{f}{The function (of one variable) to integrate, returning either a scalar or a vector.} \item{a}{A scalar or vector (only Romberg) giving the lower bound(s). A vector cannot contain both -Inf and finite values.} \item{b}{A scalar or vector (only Romberg) giving the upper bound(s). A vector cannot contain both Inf and finite values.} \item{type}{The algorithm to be used, by default Romberg integration. Otherwise, it uses the TOMS614 algorithm.} \item{eps}{Precision.} \item{max}{For Romberg, the maximum number of steps, by default set to 16. For TOMS614, the maximum number of function evaluations, by default set to 100.} \item{d}{For Romberg, the number of extrapolation points so that 2d is the order of integration, by default set to 5; d=2 is Simpson's rule. For TOMS614, heuristic termination = any real number; deterministic termination = a number in the range 0 < d < pi/2 by default, set to 1.} \item{p}{For TOMS614, p = 0: heuristic termination, p = 1: deterministic termination with the infinity norm, p > 1: deterministic termination with the p-th norm.} } \value{ The vector of values of the integrals of the function supplied. } \author{J.K. Lindsey} \references{ ACM algorithm 614 appeared in ACM-Trans. Math. Software, Vol.10, No. 2, Jun., 1984, p. 152-160. See also Sikorski,K., Optimal quadrature algorithms in HP spaces, Num. Math., 39, 405-410 (1982). } \examples{ f <- function(x) sin(x)+cos(x)-x^2 int(f, a=0, b=2) int(f, a=0, b=2, type="TOMS614") # f <- function(x) exp(-(x-2)^2/2)/sqrt(2*pi) int(f, a=0:3) int(f, a=0:3, d=2) 1-pnorm(0:3, 2) # f <- function(x) dnorm(x) int(f, a=-Inf, b=qnorm(0.975)) int(f, a=-Inf, b=qnorm(0.975), type="TOMS614", max=1e2) } \keyword{math} rmutil/man/MultBinom.Rd0000755000176200001440000000315713425057453014557 0ustar liggesusers\name{Multiplicative Binomial} \title{Multiplicative Binomial Distribution} \usage{ dmultbinom(y, size, m, s, log=FALSE) pmultbinom(q, size, m, s) qmultbinom(p, size, m, s) rmultbinom(n, size, m, s) } \alias{dmultbinom} \alias{pmultbinom} \alias{qmultbinom} \alias{rmultbinom} \description{ These functions provide information about the multiplicative binomial distribution with parameters \code{m} and \code{s}: density, cumulative distribution, quantiles, and random generation. The multiplicative binomial distribution with total \eqn{= n} and \code{prob} \eqn{= m} has density \deqn{p(y) = c({n}, {m}, {s}){n \choose y} {m}^{y} {(1-m)}^{n-y} {s}^{(y(n-y))} }{p(y) = c(n,m,s) Choose(n,y) m^y (1-m)^(n-y) s^(y(n-y))} for \eqn{y = 0, \ldots, n}, where c(.) is a normalizing constant. } \arguments{ \item{y}{vector of frequencies} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{size}{vector of totals} \item{m}{vector of probabilities of success} \item{s}{vector of overdispersion parameters} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dbinom}} for the binomial, \code{\link[rmutil]{ddoublebinom}} for the double binomial, and \code{\link[rmutil]{dbetabinom}} for the beta binomial distribution. } \examples{ # compute P(45 < y < 55) for y multiplicative binomial(100,0.5,1.1) sum(dmultbinom(46:54, 100, 0.5, 1.1)) pmultbinom(54, 100, 0.5, 1.1)-pmultbinom(45, 100, 0.5, 1.1) pmultbinom(2,10,0.5,1.1) qmultbinom(0.025,10,0.5,1.1) rmultbinom(10,10,0.5,1.1) } \keyword{distribution} rmutil/man/Simplex.Rd0000755000176200001440000000261713425057453014272 0ustar liggesusers\name{Simplex} \title{Simplex Distribution} \usage{ dsimplex(y, m, s, log=FALSE) psimplex(q, m, s) qsimplex(p, m, s) rsimplex(n, m, s) } \alias{dsimplex} \alias{psimplex} \alias{qsimplex} \alias{rsimplex} \description{ These functions provide information about the simplex distribution with location parameter equal to \code{m} and shape equal to \code{s}: density, cumulative distribution, quantiles, and random generation. The simplex distribution has density \deqn{ f(y) = \frac{1}{\sqrt(2\pi\sigma(y(1-y))^3)} \exp(-((y-\mu)/(\mu(1-\mu)))^2/(2y(1-y)\sigma))}{ f(y) = exp(-((y-m)/(m(1-m)))^2/(2y(1-y)s))/sqrt(2 pi s(y(1-y))^3)} where \eqn{\mu}{m} is the location parameter of the distribution and \eqn{\sigma}{s} is the shape, and \eqn{0 1}. } \arguments{ \item{y}{vector of counts} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{scalar or vector of means} \item{s}{scalar or vector of overdispersion parameters, all of which must lie in (0,1). } \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dpois}} for the Poisson, \code{\link[rmutil]{ddoublepois}} for the double Poisson, \code{\link[rmutil]{dpvfpois}} for the power variance function Poisson, \code{\link[rmutil]{dconsul}} for the Consul generalized Poisson, \code{\link[rmutil]{dgammacount}} for the gamma count, and \code{\link{dnbinom}} for the negative binomial distribution. } \examples{ dmultpois(5,10,0.9) pmultpois(5,10,0.9) qmultpois(0.85,10,0.9) rmultpois(10,10,0.9) } \keyword{distribution} rmutil/man/TwoSidedPower.Rd0000755000176200001440000000337313425057453015410 0ustar liggesusers\name{Two-Sided Power} \title{Two-Sided Power Distribution} \usage{ dtwosidedpower(y, m, s=2, log=FALSE) ptwosidedpower(q, m, s=2) qtwosidedpower(p, m, s=2) rtwosidedpower(n, m, s=2) } \alias{dtwosidedpower} \alias{ptwosidedpower} \alias{qtwosidedpower} \alias{rtwosidedpower} \description{ These functions provide information about the two-sided power distribution with location parameter equal to \code{m} and shape equal to \code{s}: density, cumulative distribution, quantiles, and random generation. The two-sided power distribution has density \deqn{ f(y) = s(\frac{y}{m})^{s-1}, y<=m}{ f(y) = s(y/m)^(s-1), y<=m} \deqn{ f(y) =s(\frac{1-y}{1-m})^{s-1}, y>=m}{ f(y) = s((1-y)/(1-m))^(s-1), y>=m} where \eqn{\mu}{m} is the location parameter of the distribution and \eqn{\sigma}{s} is the shape, and \eqn{0=m} and else \deqn{ f(y) = \frac{\nu\exp((y-\mu)/(\nu\sigma))}{(1+\nu^2)\sigma}}{ f(y) = f*exp((y-m)/(f*s))/((1+f^2)*s)} where \eqn{\mu}{m} is the location parameter of the distribution, \eqn{\sigma}{s} is the dispersion, and \eqn{\nu}{f} is the skew. The mean is given by \eqn{\mu+\frac{\sigma(1-\nu^2)}{\sqrt{2}\nu}}{m + (s * (1 - f^2)) / (sqrt(2) * f)} and the variance by \eqn{\frac{\sigma^2(1+\nu^4)}{2\nu^2}}{(s^2 * (1 + f^4)) / (2 * f^2)}. Note that this parametrization of the skew (family) parameter is different than that used for the multivariate skew Laplace distribution in \code{elliptic}. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of skew parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dexp}} for the exponential distribution, \code{\link{dcauchy}} for the Cauchy distribution, and \code{\link[rmutil]{dlaplace}} for the Laplace distribution. } \examples{ dskewlaplace(5, 2, 1, 0.5) pskewlaplace(5, 2, 1, 0.5) qskewlaplace(0.95, 2, 1, 0.5) rskewlaplace(10, 2, 1, 0.5) } \keyword{distribution} rmutil/man/BoxCox.Rd0000755000176200001440000000303213425057453014043 0ustar liggesusers\name{Box-Cox} \title{Box-Cox Distribution} \usage{ dboxcox(y, m, s=1, f=1, log=FALSE) pboxcox(q, m, s=1, f=1) qboxcox(p, m, s=1, f=1) rboxcox(n, m, s=1, f=1) } \alias{dboxcox} \alias{pboxcox} \alias{qboxcox} \alias{rboxcox} \description{ These functions provide information about the Box-Cox distribution with location parameter equal to \code{m}, dispersion equal to \code{s}, and power transformation equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The Box-Cox distribution has density \deqn{ f(y) = \frac{1}{\sqrt{2 \pi \sigma^2}} \exp(-((y^\nu/\nu-\mu)^2/(2 \sigma^2)))/ (1-I(\nu<0)-sign(\nu)*pnorm(0,\mu,sqrt(\sigma)))}{ f(y) = 1/sqrt(2 pi s^2) exp(-((y^f/f - mu)^2/(2 s^2)))/ (1-I(f<0)-sign(f)*pnorm(0,m,sqrt(s)))} where \eqn{\mu}{m} is the location parameter of the distribution, \eqn{\sigma}{s} is the dispersion, \eqn{\nu}{f} is the family parameter, \eqn{I()} is the indicator function, and \eqn{y>0}. \eqn{\nu=1}{f=1} gives a truncated normal distribution. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of power parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dnorm}} for the normal or Gaussian distribution. } \examples{ dboxcox(2, 5, 5, 2) pboxcox(2, 5, 5, 2) qboxcox(0.1, 5, 5, 2) rboxcox(10, 5, 5, 2) } \keyword{distribution} rmutil/man/Hjorth.Rd0000755000176200001440000000241213425057453014100 0ustar liggesusers\name{Hjorth} \title{Hjorth Distribution} \usage{ dhjorth(y, m, s, f, log=FALSE) phjorth(q, m, s, f) qhjorth(p, m, s, f) rhjorth(n, m, s, f) } \alias{dhjorth} \alias{phjorth} \alias{qhjorth} \alias{rhjorth} \description{ These functions provide information about the Hjorth distribution with location parameter equal to \code{m}, dispersion equal to \code{s}, and family parameter equal to \code{f}: density, cumulative distribution, quantiles, log hazard, and random generation. The Hjorth distribution has density \deqn{ f(y) = (1+\sigma y)^{-\nu/\sigma} \exp(-(y/\mu)^2/2) (\frac{y}{\mu^2}+\frac{\nu}{1+\sigma y})}{ f(y) = (1+s y)^(-f/s) exp(-(y/m)^2/2) (y/m^2+f/(1+s y))} where \eqn{\mu}{m} is the location parameter of the distribution, \eqn{\sigma}{s} is the dispersion, and \eqn{\nu}{f} is the family parameter. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{f}{vector of family parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \examples{ dhjorth(5, 5, 5, 2) phjorth(5, 5, 5, 2) qhjorth(0.8, 5, 5, 2) rhjorth(10, 5, 5, 2) } \keyword{distribution} rmutil/man/finterp.Rd0000755000176200001440000002020313425057453014307 0ustar liggesusers\name{finterp} \title{Formula Interpreter} \alias{finterp} \alias{finterp.data.frame} \alias{finterp.default} \alias{finterp.repeated} \alias{finterp.tccov} \alias{finterp.tvcov} \usage{ finterp(.z, ...) \method{finterp}{default}(.z, .envir=parent.frame(), .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=TRUE, .intercept=TRUE, .old=NULL, .response=FALSE, ...) } \description{ \code{finterp} translates a model formula into a function of the unknown parameters or of a vector of them. Such language formulae can either be in Wilkinson and Rogers notation or be expressions containing both known (existing) covariates and unknown (not existing) parameters. In the latter, factor variables cannot be used and parameters must be scalars. The covariates in the formula are sought in the environment or in the data object provided. If the data object has class, \code{repeated} or \code{response}, then the key words, \code{times} will use the response times from the data object as a covariate, \code{individuals} will use the index for individuals as a factor covariate, and \code{nesting} the index for nesting as a factor covariate. The latter two only work for W&R notation. Note that, in parameter displays, formulae in Wilkinson and Rogers notation use variable names whereas those with unknowns use the names of these parameters, as given in the formulae, and that the meaning of operators (*, /, :, etc.) is different in the two cases. } \arguments{ \item{.z}{A model formula beginning with ~, either in Wilkinson and Rogers notation or containing unknown parameters. If it contains unknown parameters, it can have several lines so that, for example, local variables can be assigned temporary values. In this case, enclose the formula in curly brackets.} \item{.envir}{The environment in which the formula is to be interpreted or a data object of class, \code{repeated}, \code{tccov}, or \code{tvcov}.} \item{.formula}{If TRUE and the formula is in Wilkinson and Rogers notation, just returns the formula.} \item{.vector}{If FALSE and the formula contains unknown parameters, the function returned has them as separate arguments. If TRUE, it has one argument, the unknowns as a vector, unless certain parameter names are specified in \code{.args}. Always TRUE if \code{.envir} is a data object.} \item{.args}{If \code{.vector} is TRUE, names of parameters that are to be function arguments and not included in the vector.} \item{.start}{The starting index value of the parameter vector in the function returned when \code{.vector} is TRUE.} \item{.name}{Character string giving the name of the data object specified by \code{.envir}. Ignored unless the latter is such an object and only necessary when \code{finterp} is called within other functions.} \item{.expand}{If TRUE, expand functions with only time-constant covariates to return one value per observation instead of one value per individual. Ignored unless \code{.envir} is an object of class, \code{repeated}.} \item{.intercept}{If W&R notation is supplied and \code{.intercept=F}, a model function without intercept is returned.} \item{.old}{The name of an existing object of class \code{formulafn} which has common parameters with the one being created, or a list of such objects. Only used if \code{.vector}=TRUE. The value of \code{.start} should ensure that there is no conflict in indexing the vector.} \item{.response}{If TRUE, any response variable can be used in the function. If FALSE, checks are made that the response is not also used as a covariate.} \item{...}{Arguments passed to other functions.} } \value{ A function, of class \code{formulafn}, of the unknown parameters or of a vector of them is returned. Its attributes give the formula supplied, the model function produced, the covariate names, the parameter names, and the range of values of the index of the parameter vector. If \code{formula} is TRUE and a Wilkinson and Rogers formula was supplied, it is simply returned instead of creating a function. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{FormulaMethods}}, \code{\link[rmutil]{covariates}}, \code{\link[rmutil]{fnenvir}}, \code{\link[rmutil]{formula}}, \code{\link[rmutil]{model}}, \code{\link[rmutil]{parameters}} } \examples{ x1 <- rpois(20,2) x2 <- rnorm(20) # # Wilkinson and Rogers formula with three parameters fn1 <- finterp(~x1+x2) fn1 fn1(rep(2,3)) # the same formula with unknowns fn2 <- finterp(~b0+b1*x1+b2*x2) fn2 fn2(rep(2,3)) # # nonlinear formulae with unknowns # log link fn2a <- finterp(~exp(b0+b1*x1+b2*x2)) fn2a fn2a(rep(0.2,3)) # parameters common to two functions fn2b <- finterp(~c0+c1*exp(b0+b1*x1+b2*x2), .old=fn2a, .start=4) fn2b # function returned also depends on values of another function fn2c <- finterp(~fn2+c1*exp(b0+b1*x1+b2*x2), .old=fn2a, .start=4, .args="fn2") fn2c args(fn2c) fn2c(rep(0.2,4),fn2(rep(2,3))) # # compartment model times <- 1:20 # exp() parameters to ensure that they are positive fn3 <- finterp(~exp(absorption-volume)/(exp(absorption)- exp(elimination))*(exp(-exp(elimination)*times)- exp(-exp(absorption)*times))) fn3 fn3(log(c(0.3,3,0.2))) # a more efficient way # (note that parameters do not appear in the same order) form <- ~{ ka <- exp(absorption) ke <- exp(elimination) ka*exp(-volume)/(ka-ke)*(exp(-ke*times)-exp(-ka*times))} fn3a <- finterp(form) fn3a(log(c(0.3,0.2,3))) # # Poisson density y <- rpois(20,5) fn4 <- finterp(~mu^y*exp(-mu)/gamma(y+1)) fn4 fn4(5) dpois(y,5) # # Poisson likelihood # mean parameter fn5 <- finterp(~-y*log(mu)+mu+lgamma(y+1),.vector=FALSE) fn5 likefn1 <- function(p) sum(fn5(mu=p)) nlm(likefn1,p=1) mean(y) # canonical parameter fn5a <- finterp(~-y*theta+exp(theta)+lgamma(y+1),.vector=FALSE) fn5a likefn1a <- function(p) sum(fn5a(theta=p)) nlm(likefn1a,p=1) # # likelihood for Poisson log linear regression y <- rpois(20,fn2a(c(0.2,1,0.4))) nlm(likefn1,p=1) mean(y) likefn2 <- function(p) sum(fn5(mu=fn2a(p))) nlm(likefn2,p=c(1,0,0)) # or likefn2a <- function(p) sum(fn5a(theta=fn2(p))) nlm(likefn2a,p=c(1,0,0)) # # likelihood for Poisson nonlinear regression y <- rpois(20,fn3(log(c(3,0.3,0.2)))) nlm(likefn1,p=1) mean(y) likefn3 <- function(p) sum(fn5(mu=fn3(p))) nlm(likefn3,p=log(c(1,0.4,0.1))) # # envir as data objects y <- matrix(rnorm(20),ncol=5) y[3,3] <- y[2,2] <- NA x1 <- 1:4 x2 <- c("a","b","c","d") resp <- restovec(y) xx <- tcctomat(x1) xx2 <- tcctomat(data.frame(x1,x2)) z1 <- matrix(rnorm(20),ncol=5) z2 <- matrix(rnorm(20),ncol=5) z3 <- matrix(rnorm(20),ncol=5) zz <- tvctomat(z1) zz <- tvctomat(z2,old=zz) reps <- rmna(resp, ccov=xx, tvcov=zz) reps2 <- rmna(resp, ccov=xx2, tvcov=zz) rm(y, x1, x2 , z1, z2) # # repeated objects # # time-constant covariates # Wilkinson and Rogers notation form1 <- ~x1 print(fn1 <- finterp(form1, .envir=reps)) fn1(2:3) print(fn1a <- finterp(form1, .envir=xx)) fn1a(2:3) form1b <- ~x1+x2 print(fn1b <- finterp(form1b, .envir=reps2)) fn1b(2:6) print(fn1c <- finterp(form1b, .envir=xx2)) fn1c(2:6) # with unknown parameters form2 <- ~a+b*x1 print(fn2 <- finterp(form2, .envir=reps)) fn2(2:3) print(fn2a <- finterp(form2, .envir=xx)) fn2a(2:3) # # time-varying covariates # Wilkinson and Rogers notation form3 <- ~z1+z2 print(fn3 <- finterp(form3, .envir=reps)) fn3(2:4) print(fn3a <- finterp(form3, .envir=zz)) fn3a(2:4) # with unknown parameters form4 <- ~a+b*z1+c*z2 print(fn4 <- finterp(form4, .envir=reps)) fn4(2:4) print(fn4a <- finterp(form4, .envir=zz)) fn4a(2:4) # # note: lengths of x1 and z2 differ # Wilkinson and Rogers notation form5 <- ~x1+z2 print(fn5 <- finterp(form5, .envir=reps)) fn5(2:4) # with unknown parameters form6 <- ~a+b*x1+c*z2 print(fn6 <- finterp(form6, .envir=reps)) fn6(2:4) # # with times # Wilkinson and Rogers notation form7 <- ~x1+z2+times print(fn7 <- finterp(form7, .envir=reps)) fn7(2:5) form7a <- ~x1+x2+z2+times print(fn7a <- finterp(form7a, .envir=reps2)) fn7a(2:8) # with unknown parameters form8 <- ~a+b*x1+c*z2+e*times print(fn8 <- finterp(form8, .envir=reps)) fn8(2:5) # # with a variable not in the data object form9 <- ~a+b*z1+c*z2+e*z3 print(fn9 <- finterp(form9, .envir=reps)) fn9(2:5) # z3 assumed to be an unknown parameter: fn9(2:6) # # multiline formula form10 <- ~{ tmp <- exp(b) a+tmp*z1+c*z2+d*times} print(fn10 <- finterp(form10, .envir=reps)) fn10(2:5) } \keyword{programming} rmutil/man/fmobj.Rd0000755000176200001440000000221413425057453013737 0ustar liggesusers\name{fmobj} \title{Object Finder} \alias{fmobj} \usage{ fmobj(z, envir=parent.frame()) } \description{ \code{fmobj} inspects a formula and returns a list containing the objects referred to, with indicators as to which are unknown parameters, covariates, factor variables, and functions. } \arguments{ \item{z}{A model formula beginning with ~, either in Wilkinson and Rogers notation or containing unknown parameters.} \item{envir}{The environment in which the formula is to be interpreted.} } \value{ A list, of class \code{fmobj}, containing a character vector (\code{objects}) with the names of the objects used in a formula, and logical vectors indicating which are unknown parameters (\code{parameters}), covariates (\code{covariates}), factor variables (\code{factors}), and functions (\code{functions}). } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{finterp}} } \examples{ x1 <- rpois(20,2) x2 <- rnorm(20) x3 <- gl(2,10) # # W&R formula fmobj(~x1+x2+x3) # # formula with unknowns fmobj(~b0+b1*x1+b2*x2) # # nonlinear formulae with unknowns # log link fmobj(~exp(b0+b1*x1+b2*x2)) } \keyword{programming} rmutil/man/gettvc.Rd0000755000176200001440000000600713425057453014142 0ustar liggesusers\name{gettvc} \title{Find the Most Recent Value of a Time-varying Covariate Before Each Observed Response} \alias{gettvc} \usage{ gettvc(response, times=NULL, tvcov=NULL, tvctimes=NULL, oldtvcov=NULL, ties=TRUE) } \description{ \code{gettvc} finds the most recent value of a time-varying covariate before each observed response and possibly adds them to a list of other time-varying covariates. It compares the times of response observations with those of time-varying covariates to find the most recent observed time-varying covariate for each response. These are either placed in a new object of class, \code{tvcov}, added to an already existing list of matrices containing other time-varying covariates and a new object of class, \code{tvcov}, created, or added to an existing object of class, \code{tvcov}. If there are response observation times before the first covariate time, the covariate for these times is set to zero. } \arguments{ \item{response}{A list of two column matrices with response values and times for each individual, one matrix or dataframe of response values, or an object of class, \code{response} (created by \code{\link[rmutil]{restovec}}).} \item{times}{When \code{response} is a matrix, a vector of possibly unequally spaced times for the response, when they are the same for all individuals or a matrix of times. Not necessary if equally spaced.} \item{tvcov}{A list of two column matrices with time-varying covariate values and corresponding times for each individual or one matrix or dataframe of such covariate values. Times need not be the same as for responses.} \item{tvctimes}{When the time-varying covariate is a matrix, a vector of possibly unequally spaced times for the covariate, when they are the same for all individuals or a matrix of times. Not necessary if equally spaced.} \item{oldtvcov}{A list of matrices with time-varying covariate values, observed at the event times in \code{response}, for each individual, or an object of class, \code{tvcov}. If not provided, a new object is created.} \item{ties}{If TRUE, when the response and covariate times are identical, the response depends on that new value (as in observational studies); if FALSE, only the next response depends on that value (for example, if the covariate is a new treatment just applied at that time).} } \value{ An object of class, \code{tvcov}, is returned containing the new time-varying covariate and, possibly, those in \code{oldtvcov}. } \author{J.K. Lindsey and D.F. Heitjan} \seealso{ \code{\link[rmutil]{read.list}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{tvctomat}}. } \examples{ \dontrun{ y <- matrix(rnorm(20), ncol=5) resp <- restovec(y, times=c(1,3,6,10,15)) z <- matrix(rpois(20,5),ncol=5) z # create a new time-varying covariate object for the response newtvc <- gettvc(resp, tvcov=z, tvctimes=c(1,2,5,12,14)) covariates(newtvc) # add another time-varying covariate to the object z2 <- matrix(rpois(20,5),ncol=5) z2 newtvc2 <- gettvc(resp, tvcov=z2, tvctimes=c(0,4,5,12,16), oldtvc=newtvc) covariates(newtvc2) } } \keyword{manip} rmutil/man/Levy.Rd0000755000176200001440000000243213425057453013563 0ustar liggesusers\name{Levy} \title{Levy Distribution} \usage{ dlevy(y, m=0, s=1, log=FALSE) plevy(q, m=0, s=1) qlevy(p, m=0, s=1) rlevy(n, m=0, s=1) } \alias{dlevy} \alias{plevy} \alias{qlevy} \alias{rlevy} \description{ These functions provide information about the Levy distribution with location parameter equal to \code{m} and dispersion equal to \code{s}: density, cumulative distribution, quantiles, and random generation. The Levy distribution has density \deqn{ f(y) = \sqrt{\frac{\sigma}{2 \pi (y-\mu)^3}} \exp(-\sigma/(2 (y-\mu)))}{ f(y) = sqrt(s/(2 pi (y-m)^3)) exp(-s/(2 (y-m)))} where \eqn{\mu}{m} is the location parameter of the distribution and \eqn{\sigma}{s} is the dispersion, and \eqn{y>\mu}{y>m}. } \arguments{ \item{y}{vector of responses.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{m}{vector of location parameters.} \item{s}{vector of dispersion parameters.} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dnorm}} for the normal distribution and \code{\link{dcauchy}} for the Cauchy distribution, two other stable distributions. } \examples{ dlevy(5, 2, 1) plevy(5, 2, 1) qlevy(0.6, 2, 1) rlevy(10, 2, 1) } \keyword{distribution} rmutil/man/DoubleBinom.Rd0000755000176200001440000000327313425057453015047 0ustar liggesusers\name{Double Binomial} \title{Double Binomial Distribution} \usage{ ddoublebinom(y, size, m, s, log=FALSE) pdoublebinom(q, size, m, s) qdoublebinom(p, size, m, s) rdoublebinom(n, size, m, s) } \alias{ddoublebinom} \alias{pdoublebinom} \alias{qdoublebinom} \alias{rdoublebinom} \description{ These functions provide information about the double binomial distribution with parameters \code{m} and \code{s}: density, cumulative distribution, quantiles, and random generation. The double binomial distribution with total \eqn{= n} and \code{prob} \eqn{= m} has density \deqn{p(y) = c({n}, {m}, {s}){n \choose y} {n}^{{n}{s}} ({m}/{y})^({y}{s}) {((1-m)/(n-y))}^(({n-y})s y) {y}^{y} {(n-y)}^{(n-y)})% }{p(y) = c(n,m,s) Choose(n,y) n^(n s) (m/y)^(y s) ((1-m)/(n-y))^(s(n-y)) y^y (n-y)^(n-y)} for \eqn{y = 0, \ldots, n}, where c(.) is a normalizing constant. } \arguments{ \item{y}{vector of frequencies} \item{q}{vector of quantiles} \item{p}{vector of probabilities} \item{n}{number of values to generate} \item{size}{vector of totals} \item{m}{vector of probabilities of success} \item{s}{vector of overdispersion parameters} \item{log}{if TRUE, log probabilities are supplied.} } \author{J.K. Lindsey} \seealso{ \code{\link{dbinom}} for the binomial, \code{\link[rmutil]{dmultbinom}} for the multiplicative binomial, and \code{\link[rmutil]{dbetabinom}} for the beta binomial distribution. } \examples{ # compute P(45 < y < 55) for y double binomial(100,0.5,1.1) sum(ddoublebinom(46:54, 100, 0.5, 1.1)) pdoublebinom(54, 100, 0.5, 1.1)-pdoublebinom(45, 100, 0.5, 1.1) pdoublebinom(2,10,0.5,1.1) qdoublebinom(0.05,10,0.5,1.1) rdoublebinom(10,10,0.5,1.1) } \keyword{distribution} rmutil/man/fnenvir.Rd0000755000176200001440000000755513425057453014326 0ustar liggesusers\name{fnenvir} \title{Check Covariates and Parameters of a Function} \alias{fnenvir} \alias{fnenvir.data.frame} \alias{fnenvir.default} \alias{fnenvir.repeated} \alias{fnenvir.tccov} \alias{fnenvir.tvcov} \usage{ fnenvir(.z, ...) \method{fnenvir}{default}(.z, .envir=parent.frame(), .name=NULL, .expand=TRUE, .response=FALSE, ...) } \description{ \code{fnenvir} finds the covariates and parameters in a function and can modify it so that the covariates used in it are found in the data object specified by \code{.envir}. If the data object has class, \code{repeated}, the key word \code{times} in a function will use the response times from the data object as a covariate. } \arguments{ \item{.z}{A function.} \item{.envir}{The environment or data object of class, \code{repeated}, \code{tccov}, or \code{tvcov}, in which the function is to be interpreted.} \item{.name}{Character string giving the name of the data object specified by \code{.envir}. Ignored unless the latter is such an object and only necessary when \code{fnenvir} is called within other functions.} \item{.expand}{If TRUE, expand functions with only time-constant covariates to return one value per observation instead of one value per individual. Ignored unless \code{.envir} is an object of class, \code{repeated}.} \item{.response}{If TRUE, any response variable can be used in the function. If FALSE, checks are made that the response is not also used as a covariate.} \item{...}{Arguments passed to other functions.} } \value{ The (modified) function, of class \code{formulafn}, is returned with its attributes giving the (new) model function, the covariate names, and the parameter names.} \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{FormulaMethods}},\code{\link[rmutil]{covariates}}, \code{\link[rmutil]{finterp}}, \code{\link[rmutil]{model}}, \code{\link[rmutil]{parameters}} } \examples{ fn <- function(p) a+b*x fnenvir(fn) fn <- function(p) a+p*x fnenvir(fn) x <- 1:4 fnenvir(fn) fn <- function(p) p[1]+exp(p[2]*x) fnenvir(fn) # y <- matrix(rnorm(20),ncol=5) y[3,3] <- y[2,2] <- NA resp <- restovec(y) xx <- tcctomat(x) z1 <- matrix(rnorm(20),ncol=5) z2 <- matrix(rnorm(20),ncol=5) z3 <- matrix(rnorm(20),ncol=5) zz <- tvctomat(z1) zz <- tvctomat(z2,old=zz) reps <- rmna(resp, ccov=xx, tvcov=zz) rm(y, x, z1, z2) # # repeated objects func1 <- function(p) p[1]+p[2]*x+p[3]*z2 print(fn1 <- fnenvir(func1, .envir=reps)) fn1(2:4) # # time-constant covariates func2 <- function(p) p[1]+p[2]*x print(fn2 <- fnenvir(func2, .envir=reps)) fn2(2:3) print(fn2a <- fnenvir(func2, .envir=xx)) fn2a(2:3) # # time-varying covariates func3 <- function(p) p[1]+p[2]*z1+p[3]*z2 print(fn3 <- fnenvir(func3, .envir=reps)) fn3(2:4) print(fn3a <- fnenvir(func3, .envir=zz)) fn3a(2:4) # including times func3b <- function(p) p[1]+p[2]*z1+p[3]*z2+p[4]*times print(fn3b <- fnenvir(func3b, .envir=reps)) fn3b(2:5) # # with typing error and a variable not in the data object func4 <- function(p) p[1]+p2[2]*z1+p[3]*z2+p[4]*z3 print(fn4 <- fnenvir(func4, .envir=reps)) # # first-order one-compartment model # data objects for formulae dose <- c(2,5) dd <- tcctomat(dose) times <- matrix(rep(1:20,2), nrow=2, byrow=TRUE) tt <- tvctomat(times) # vector covariates for functions dose <- c(rep(2,20),rep(5,20)) times <- rep(1:20,2) # functions mu <- function(p) { absorption <- exp(p[1]) elimination <- exp(p[2]) absorption*exp(-p[3])*dose/(absorption-elimination)* (exp(-elimination*times)-exp(-absorption*times))} shape <- function(p) exp(p[1]-p[2])*times*dose*exp(-exp(p[1])*times) # response conc <- matrix(rgamma(40,shape(log(c(0.1,0.4))), scale=mu(log(c(1,0.3,0.2))))/shape(log(c(0.1,0.4))),ncol=20,byrow=TRUE) conc[,2:20] <- conc[,2:20]+0.5*(conc[,1:19]-matrix(mu(log(c(1,0.3,0.2))), ncol=20,byrow=TRUE)[,1:19]) conc <- restovec(ifelse(conc>0,conc,0.01)) reps <- rmna(conc, ccov=dd, tvcov=tt) # print(fn5 <- fnenvir(mu,.envir=reps)) fn5(c(0,-1.2,-1.6)) } \keyword{programming} rmutil/man/iprofile.Rd0000755000176200001440000000463413425057453014463 0ustar liggesusers\name{iprofile} \title{Produce Individual Time Profiles for Plotting} \usage{ \method{plot}{iprofile}(x, nind=1, observed=TRUE, intensity=FALSE, add=FALSE, lty=NULL, pch=NULL, ylab=NULL, xlab=NULL, main=NULL, ylim=NULL, xlim=NULL, ...) } \alias{iprofile} \alias{iprofile.default} \alias{plot.iprofile} \description{ \code{iprofile} is used for plotting individual profiles over time for objects obtained from dynamic models. It produces output for plotting recursive fitted values for individual time profiles from such models. See \code{\link[rmutil]{mprofile}} for plotting marginal profiles. } \arguments{ \item{x}{An object of class \code{iprofile}, e.g. \code{x = iprofile(z, plotsd=FALSE)}, where \code{z} is an object of class \code{recursive}, from \code{carma}, \code{elliptic}, \code{gar}, \code{kalcount}, \code{kalseries}, \code{kalsurv}, or \code{nbkal}. If \code{plotsd} is If TRUE, plots standard deviations around profile (\code{carma} and \code{elliptic} only).} \item{nind}{Observation number(s) of individual(s) to be plotted.} \item{observed}{If TRUE, plots observed responses.} \item{intensity}{If z has class, \code{kalsurv}, and this is TRUE, the intensity is plotted instead of the time between events.} \item{add}{If TRUE, the graph is added to an existing plot.} %\item{others}{Plotting control options.} \item{lty,pch,main,ylim,xlim,xlab,ylab}{ See base plot.} \item{...}{Arguments passed to other functions.} } \value{ \code{iprofile} returns information ready for plotting by \code{plot.iprofile}. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{mprofile}} \code{\link[rmutil]{plot.residuals}}. } \examples{ \dontrun{ ## try this after you have repeated package installed library(repeated) times <- rep(1:20,2) dose <- c(rep(2,20),rep(5,20)) mu <- function(p) exp(p[1]-p[3])*(dose/(exp(p[1])-exp(p[2]))* (exp(-exp(p[2])*times)-exp(-exp(p[1])*times))) shape <- function(p) exp(p[1]-p[2])*times*dose*exp(-exp(p[1])*times) conc <- matrix(rgamma(40,1,scale=mu(log(c(1,0.3,0.2)))),ncol=20,byrow=TRUE) conc[,2:20] <- conc[,2:20]+0.5*(conc[,1:19]-matrix(mu(log(c(1,0.3,0.2))), ncol=20,byrow=TRUE)[,1:19]) conc <- ifelse(conc>0,conc,0.01) z <- gar(conc, dist="gamma", times=1:20, mu=mu, shape=shape, preg=log(c(1,0.4,0.1)), pdepend=0.5, pshape=log(c(1,0.2))) # plot individual profiles and the average profile plot(iprofile(z), nind=1:2, pch=c(1,20), lty=3:4) plot(mprofile(z), nind=1:2, lty=1:2, add=TRUE) } } \keyword{hplot} rmutil/man/read.list.Rd0000755000176200001440000000176513425057453014541 0ustar liggesusers\name{read.list} \title{Read a List of Matrices from a File for Unbalanced Repeated Measurements} \alias{read.list} \usage{ read.list(file="", skip=0, nlines=2, order=NULL) } \description{ \code{read.list} reads sets of lines of data from a file and creates a list of matrices. Different sets of lines may be have different lengths. } \arguments{ \item{file}{Name of the file to read} \item{skip}{Number of lines to skip at the beginning of the file} \item{nlines}{Number of lines per matrix} \item{order}{Order in which the lines are to be used as columns of the matrix. If NULL, they are placed in the order read.} } \value{ The list of matrices, each with \code{nlines} columns, is returned. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{lvna}}, \code{\link[rmutil]{read.rep}}, \code{\link[rmutil]{read.surv}}, \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{tvctomat}} } \examples{ \dontrun{y <- read.list("test.dat")} } \keyword{file} rmutil/man/DataMethods.Rd0000755000176200001440000002523113437243523015041 0ustar liggesusers\name{DataMethods} \title{Methods for response, tccov, tvcov, and repeated Data Objects} \usage{ as.data.frame(x, ...) as.matrix(x, ...) covariates(z, ...) covind(z, ...) delta(z, ...) \method{formula}{tccov}(x, ...) \method{formula}{repeated}(x, ...) \method{names}{tccov}(x, ...) \method{names}{repeated}(x, ...) nesting(z, ...) nobs(z, ...) \method{plot}{response}(x, name=NULL, nind=NULL, nest=1, ccov=NULL, add=FALSE, lty=NULL, pch=NULL, main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, ylab=NULL, ...) \method{plot}{repeated}(x, name=NULL, nind=NULL, nest=1, ccov=NULL, add=FALSE, lty=NULL, pch=NULL, main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, ylab=NULL, ...) \method{print}{tccov}(x, ...) \method{print}{repeated}(x, nindmax=50, ...) response(z, ...) resptype(z, ...) times(z, ...) \method{transform}{response}(`_data`, times=NULL, units=NULL, ...) \method{transform}{repeated}(`_data`, times=NULL, ...) units(x, ...) \method{weights}{gnlm}(object, ...) \method{weights}{repeated}(object, nind=NULL, ...) \method{weights}{response}(object, nind=NULL, ...) } \alias{DataMethods} \alias{as.data.frame} \alias{as.data.frame.repeated} \alias{as.data.frame.response} \alias{as.data.frame.tccov} \alias{as.data.frame.tvcov} \alias{as.matrix} \alias{as.matrix.repeated} \alias{as.matrix.response} \alias{as.matrix.tccov} \alias{as.matrix.tvcov} \alias{coef.gnlm} \alias{covariates} \alias{covariates.tccov} \alias{covariates.tvcov} \alias{covariates.repeated} \alias{covind} \alias{covind.default} \alias{delta} \alias{delta.response} \alias{delta.repeated} \alias{description} \alias{description.default} \alias{description.repeated} \alias{deviance.gnlm} \alias{df.residual.gnlm} \alias{formula} \alias{formula.tccov} \alias{formula.repeated} \alias{names} \alias{names.response} \alias{names.tccov} \alias{names.tvcov} \alias{names.repeated} \alias{nesting} \alias{nesting.response} \alias{nesting.repeated} \alias{nobs} \alias{nobs.default} \alias{nobs.response} \alias{nobs.tvcov} \alias{nobs.data.frame} \alias{plot.response} \alias{plot.repeated} \alias{print.response} \alias{print.tccov} \alias{print.tvcov} \alias{print.repeated} \alias{print.fmobj} \alias{print.gnlm} \alias{response} \alias{response.response} \alias{response.repeated} \alias{resptype} \alias{resptype.response} \alias{resptype.repeated} \alias{times} \alias{times.default} \alias{times.response} \alias{transform} \alias{transform.response} \alias{transform.tccov} \alias{transform.tvcov} \alias{transform.repeated} \alias{units} \alias{units.default} \alias{units.repeated} \alias{vcov.gnlm} \alias{weights} \alias{weights.response} \alias{weights.repeated} \alias{weights.gnlm} \description{ Objects of class, \code{response}, contain response values, and possibly the corresponding times, binomial totals, nesting categories, censor indicators, and/or units of precision/Jacobian. Objects of class, \code{tccov}, contain time-constant or inter-individual, baseline covariates. Objects of class, \code{tvcov}, contain time-varying or intra-individual covariates. Objects of class, \code{repeated}, contain a \code{response} object and possibly \code{tccov} and \code{tvcov} objects. In formula and functions, the key words, \code{times} can be used to refer to the response times from the data object as a covariate, \code{individuals} to the index for individuals as a factor covariate, and \code{nesting} the index for nesting as a factor covariate. The latter two only work for W&R notation. The following methods are available for accessing the contents of such data objects. \code{as.data.frame}: places all of the variables in the data object in one dataframe, extending time-constant covariates to the length of the others unless the object has class, \code{tccov}. Binomial and censored response variables have two columns, respectively `yes' and `no' and response and censoring indicator, with the name given to the response. \code{as.matrix}: places all of the variables in the data object in one matrix, extending time-constant covariates to the length of the others unless the object has class, \code{tccov}. If any covariates are factor variables (instead of the corresponding sets of indicator variables), the matrix will be character instead of numeric. \code{covariates}: extracts covariate matrices from a data object (for formulae and functions, possibly for selected individuals. See \code{\link[rmutil]{covariates.formulafn}}). \code{covind}: gives the indexing of the response by individual (that is, the nesting indicator for observations within individuals). It can be used to expand time-constant covariates to the size of the repeated measurements response. \code{delta}: extracts the units of measurement vector and Jacobian of any transformation of the response, possibly for selected individuals. Note that, if the unit of measurement/Jacobian is available in the \code{response} object, this is automatically included in the calculation of the likelihood function in all library model functions. \code{units}: prints the variable names and their description and returns the latter. \code{formula}: gives the formula used to create the time-constant covariate matrix of a data object (for formulae and functions, see \code{\link[rmutil]{formula.formulafn}}). \code{names}: extracts the names of the response and/or covariates. \code{nesting}: gives the coding variable(s) for individuals (same as \code{covind}) and also for nesting within individuals if available, possibly for selected individuals. \code{nobs}: gives the number of observations per individual. \code{plot}: plots the variables in the data object in various ways. For \code{repeated} objects, \code{name} can be a response or a time-varying covariate. \code{print}: prints summary information about the variables in a data object. \code{response}: extracts the response vector, possibly for selected individuals. If there are censored observations, this is a two-column matrix, with the censor indicator in the second column. For binomial data, it is a two-column matrix with "positive" (y) and "negative" (totals-y) frequencies. \code{resptype}: extracts the type of each response. \code{times}: extracts the times vector, possibly for selected individuals. \code{transform}: transforms variables. For example, \code{transform(z, y=fcn1(y), times=fcn2(times))} where \code{fcn1} and \code{fcn2} are transformation functions. When the response is transformed, the Jacobian is automatically calculated. New response variables and covariates can be created in this way, if the left hand side is a new name (\code{ynew=fcn3(y)}), as well as replacing an old variable with the transformed one. If the transformation reverses the order of the responses, use its negative to keep the ordering and have a positive Jacobian; for example, \code{ry=-1/y}. For \code{repeated} objects, only the response and the times can be transformed. \code{units}: prints the variable names and their units of measurement and returns the latter. \code{weights}: extracts the weight vector, possibly for selected individuals. } \arguments{ \item{x,z}{A \code{response}, \code{tccov}, \code{tvcov}, or \code{repeated} data object. For \code{covind} and \code{nobs}, this may also be a model.} %\item{y}{The function, when the response is to be transformed.} \item{times}{The function, when the times are to be transformed.} \item{names}{The names of the response variable(s) or covariate(s).} \item{nind}{The numbers of individuals to be used. (For plotting, cannot be used simultaneously with \code{ccov}.)} %\item{expand}{For intra-class (time-constant) covariates, if TRUE, %expand them to give one value per observation rather than one per %individual. Only works with \code{repeated} objects when all %individuals are requested (\code{nind=NULL}).} \item{ccov}{For plotting: If a vector of values for the time-constant covariates is supplied, only individuals having that set of values will have profiles plotted. These values must be in the order in which the covariates appear when the data object is printed. For factor variables, the codes must be given. If the name of a covariate is supplied, a set of graphs is plotted, one for each covariate value, showing profiles of all individuals having that value. (The covariate can have a maximum of six values.) Cannot be used simultaneously with \code{nind}.} \item{nest}{For plotting: nesting category to plot.} \item{add}{For plotting: add to previous plot.} \item{nindmax}{For printing a \code{response}, \code{tvcov}, or \code{repeated} object, if the number of individuals is greater than \code{nindmax}, the range of numbers of observations per individual is printed instead of the vector of numbers.} \item{name,lty,pch,main,ylim,xlim,xlab,ylab}{ See base plot.} \item{_data,units,object}{ TBD.} \item{...}{ Arguments to other methods} } \value{ These methods extract information stored in \code{response}, \code{tccov}, \code{tvcov}, and \code{repeated} data objects created respectively by \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{tvctomat}}, and \code{\link[rmutil]{rmna}}. Note that if a vector of binomial totals or a censoring indicator is present, this is extract as the second column of the matrix produced by the \code{response} method. } \author{J.K. Lindsey} \seealso{ \code{\link[rmutil]{restovec}}, \code{\link[rmutil]{rmna}}, \code{\link[rmutil]{tcctomat}}, \code{\link[rmutil]{tvctomat}}. } \examples{ # set up some data and create the objects # y <- matrix(rnorm(20),ncol=5) tt <- c(1,3,6,10,15) print(resp <- restovec(y, times=tt, units="m", type="duration")) x <- c(0,0,1,1) x2 <- as.factor(c("a","b","a","b")) tcc <- tcctomat(data.frame(x=x,x2=x2)) z <- matrix(rpois(20,5),ncol=5) tvc <- tvctomat(z) print(reps <- rmna(resp, tvcov=tvc, ccov=tcc)) # plot(resp) plot(reps) plot(reps, nind=1:2) plot(reps, ccov=c(0,1)) plot(reps, ccov="x2") plot(reps, name="z", nind=3:4, pch=1:2) plot(reps, name="z", ccov="x2") # response(resp) response(transform(resp, y=1/y)) response(reps) response(reps, nind=2:3) response(transform(reps,y=1/y)) # times(resp) times(transform(resp,times=times-6)) times(reps) # delta(resp) delta(reps) delta(transform(reps,y=1/y)) delta(transform(reps,y=1/y), nind=3) # nobs(resp) nobs(tvc) nobs(reps) # units(resp) units(reps) # resptype(resp) resptype(reps) # weights(resp) weights(reps) # covariates(tcc) covariates(tcc, nind=2:3) covariates(tvc) covariates(tvc, nind=3) covariates(reps) covariates(reps, nind=3) covariates(reps,names="x") covariates(reps,names="z") # names(tcc) names(tvc) names(reps) # nesting(resp) nesting(reps) # # because individuals are the only nesting, this is the same as covind(resp) covind(reps) # as.data.frame(resp) as.data.frame(tcc) as.data.frame(tvc) as.data.frame(reps) # # use in glm rm(y,x,z) glm(y~x+z, data=as.data.frame(reps)) } \keyword{manip} rmutil/DESCRIPTION0000644000176200001440000000316714326404743013312 0ustar liggesusersPackage: rmutil Version: 1.1.10 Title: Utilities for Nonlinear Regression and Repeated Measurements Models Authors@R: c( person("Bruce", "Swihart", email="bruce.swihart@gmail.com", role=c("cre", "aut")), person("Jim" , "Lindsey", email="jlindsey@gen.unimaas.nl", role="aut", comment="Jim created this package, Bruce is maintaining the CRAN version"), person("K.", "Sikorski", role=c("ctb", "cph"), comment="Wrote TOMS614/INTHP, https://calgo.acm.org/"), person("F.", "Stenger", role=c("ctb", "cph"), comment="Wrote TOMS614/INTHP, https://calgo.acm.org/"), person("J.", "Schwing", role=c("ctb", "cph"), comment="Wrote TOMS614/INTHP, https://calgo.acm.org/") ) Depends: R (>= 1.4) Description: A toolkit of functions for nonlinear regression and repeated measurements not to be used by itself but called by other Lindsey packages such as 'gnlm', 'stable', 'growth', 'repeated', and 'event' (available at ). License: GPL (>= 2) URL: https://www.commanster.eu/rcode.html BugReports: https://github.com/swihart/rmutil/issues Encoding: UTF-8 LazyLoad: true NeedsCompilation: yes Packaged: 2022-10-26 22:48:02 UTC; swihartbj Author: Bruce Swihart [cre, aut], Jim Lindsey [aut] (Jim created this package, Bruce is maintaining the CRAN version), K. Sikorski [ctb, cph] (Wrote TOMS614/INTHP, https://calgo.acm.org/), F. Stenger [ctb, cph] (Wrote TOMS614/INTHP, https://calgo.acm.org/), J. Schwing [ctb, cph] (Wrote TOMS614/INTHP, https://calgo.acm.org/) Maintainer: Bruce Swihart Repository: CRAN Date/Publication: 2022-10-27 04:32:35 UTC rmutil/src/0000755000176200001440000000000014326334442012362 5ustar liggesusersrmutil/src/romberg_sexp.c0000644000176200001440000001504414176766227015243 0ustar liggesusers#define R_NO_REMAP #include #include #include #include /* polynomial interpolation algorithm for function with values fx evaluated at x */ void interp_sexp(double x[], double fx[], int pts, double tab1[], double tab2[], double *f, double *df, int *err) { int i,j,ni=0; double diff1,diff2,tmp1,tmp2,lim1,lim2; *err=0; /* create an initial table of values from those supplied */ tmp1=fabs(x[0]); for(i=0;i .Call requires this*/ Rf_protect(call2=Rf_lang2(fcn,state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(x=Rf_coerceVector(result2,REALSXP));/*.C -> .Call requires this*/ for(k=0;k REAL(x)[k]*/ Rf_unprotect(4); return; }else{ /* several trapezoids */ for(i=1,j=1;j .Call requires this*/ Rf_protect(call2=Rf_lang2(fcn,state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(x=Rf_coerceVector(result2,REALSXP));/*.C -> .Call requires this*/ for(k=0;k REAL(x)[k]*/ zz[k]+=pnt2[k];} Rf_unprotect(3); for(k=0;k .Call requires this*/ Rf_protect(call2=Rf_lang2(fcn,state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(x=Rf_coerceVector(result2,REALSXP));/*.C -> .Call requires this*/ for(k=0;k REAL(x)[k]*/ zz[k]+=pnt1[k];} Rf_unprotect(3); } Rf_unprotect(1); /* calculate total area */ for(k=0;k=*PTS?1:0); /*repeatedly call polynomial interpolation routine*/ for(i=0;i<*LEN;i++){ fx[j+i**MAX]=sum[i]; if(j1>=*PTS){ interp_sexp(&x[j1-*PTS+i**MAX],&fx[j1-*PTS+i**MAX],*PTS,tab1,tab2,&sumlen[i],&errsum,ERR); if(*ERR)Rf_error("*ERR is now 2 -- Line 177 -- Division by zero in romberg integration C code"); /* check convergence */ if(fabs(errsum)>*EPS*fabs(sumlen[i]))finish=0;} /* decrease step size */ x[j1+i**MAX]=x[j+i**MAX]/9.0; fx[j1+i**MAX]=fx[j+i**MAX];} if(finish){ for(i=0;i<*LEN;i++) REAL(ans)[i] = sumlen[i]; Rf_unprotect(1); return ans;}} *ERR=3; if(*ERR)Rf_error("*ERR is now 3 -- Line 191 -- No convergence in romberg integration C code"); for(i=0;i<*LEN;i++) REAL(ans)[i] = sumlen[i]; Rf_unprotect(1); return ans;} // SEXP call, result; // /* try a state2 def and put in c(1,2,3,9) and see if you can get it*/ // SEXP abcd; // abcd = PROTECT(Rf_allocVector(REALSXP, 4)); // REAL(abcd)[0] = 1; // REAL(abcd)[1] = 2; // REAL(abcd)[2] = 2.5; // REAL(abcd)[3] = 3.11; // /*...*/ // // PROTECT(call = Rf_lang2(fcn,abcd)); // PROTECT(result = Rf_eval(call,envir)); // SEXP foo; // PROTECT(foo = Rf_coerceVector(result, REALSXP)); // int len2 = LENGTH(foo); // for (int i = 0; i < len2; i++) // if (! R_finite(REAL(foo)[i])) // Rf_error("function returned vector with non-finite element"); // UNPROTECT(4); // return foo; //} rmutil/src/rmutil_init.c0000644000176200001440000000633214176034647015100 0ustar liggesusers#include #include // for NULL #include #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .C calls */ extern void ddb(void *, void *, void *, void *, void *, void *, void *); extern void ddp(void *, void *, void *, void *, void *, void *, void *); extern void dmb(void *, void *, void *, void *, void *, void *, void *); extern void dmp(void *, void *, void *, void *, void *, void *, void *); extern void dpvfp(void *, void *, void *, void *, void *, void *, void *); //extern void inthp(void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pdb(void *, void *, void *, void *, void *, void *); extern void pdp(void *, void *, void *, void *, void *, void *); extern void pginvgauss_c(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pmb(void *, void *, void *, void *, void *, void *); extern void pmp(void *, void *, void *, void *, void *, void *); extern void ppowexp_c(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void ppvfp(void *, void *, void *, void *, void *, void *); extern void psimplex_c(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); //extern void romberg(void *, void *, void *, void *, void *, void *, void *, void *, void *); /* .Fortran calls */ extern void F77_NAME(gettvc_f)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); static const R_CMethodDef CEntries[] = { {"ddb", (DL_FUNC) &ddb, 7}, {"ddp", (DL_FUNC) &ddp, 7}, {"dmb", (DL_FUNC) &dmb, 7}, {"dmp", (DL_FUNC) &dmp, 7}, {"dpvfp", (DL_FUNC) &dpvfp, 7}, // {"inthp", (DL_FUNC) &inthp, 9}, {"pdb", (DL_FUNC) &pdb, 6}, {"pdp", (DL_FUNC) &pdp, 6}, {"pginvgauss_c", (DL_FUNC) &pginvgauss_c, 10}, {"pmb", (DL_FUNC) &pmb, 6}, {"pmp", (DL_FUNC) &pmp, 6}, {"ppowexp_c", (DL_FUNC) &ppowexp_c, 10}, {"ppvfp", (DL_FUNC) &ppvfp, 6}, {"psimplex_c", (DL_FUNC) &psimplex_c, 10}, // {"romberg", (DL_FUNC) &romberg, 9}, {NULL, NULL, 0} }; /* .Call() */ //extern void romberg_sexp(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); //extern void inthp_sexp(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); SEXP romberg_sexp(SEXP fcn, SEXP a, SEXP b, SEXP len, SEXP eps, SEXP pts, SEXP max, SEXP err, SEXP envir); SEXP inthp_sexp(SEXP a, SEXP b, SEXP d__, SEXP f, SEXP m, SEXP p, SEXP eps, SEXP inf, SEXP envir); static const R_CallMethodDef callMethods[] = { {"romberg_sexp", (DL_FUNC) &romberg_sexp, 9}, {"inthp_sexp", (DL_FUNC) &inthp_sexp, 9}, {NULL, NULL, 0} }; static const R_FortranMethodDef FortranEntries[] = { {"gettvc_f", (DL_FUNC) &F77_NAME(gettvc_f), 15}, {NULL, NULL, 0} }; void R_init_rmutil(DllInfo *dll) { R_registerRoutines(dll, CEntries, callMethods, FortranEntries, NULL); R_useDynamicSymbols(dll, FALSE); } rmutil/src/toms614_sexp.c0000755000176200001440000006505414176032027015014 0ustar liggesusers#define R_NO_REMAP #include #include #include #include #include "f2c.h" #include "R_ext/RS.h" /* Table of constant values */ static doublereal c_b8 = 4.; static doublereal c_b12 = 0.; /* ALGORITHM 614 COLLECTED ALGORITHMS FROM ACM. */ /* ALGORITHM APPEARED IN ACM-TRANS. MATH. SOFTWARE, VOL.10, NO. 2, */ /* JUN., 1984, P. 152-160. */ /*< SUBROUTINE INTHP(A, B, D, F, M, P, EPS, INF, QUADR) >*/ /* Subroutine */ SEXP inthp_sexp(SEXP a, SEXP b, SEXP d__, SEXP f, SEXP m, SEXP p, SEXP eps, SEXP inf, SEXP envir) { double *A = REAL(a); double *B = REAL(b); double *P = REAL(p); double *EPS = REAL(eps); double *D__ = REAL(d__); int *M = INTEGER(m); int *INF = INTEGER(inf); /* System generated locals */ doublereal d__1; /* Builtin functions */ double atan(doublereal), sqrt(doublereal), exp(doublereal), R_pow( doublereal *, doublereal *), log(doublereal); /* Local variables */ static doublereal alfa, exph, exph0, c__, h__; static integer i__, k, l, n; static doublereal s, t, u, v, w, c0, e1, h0, h1; static integer i1, l1, m1, m2, n1; static doublereal s1, v0, v1, v2, w1, w2, w3, w4, ba, pi, sr, sq2, cor, sum; static logical inf1, inf2; static doublereal eps3, sum1, sum2; //static double *tmp; // static char *mode[1], *ss[1]; // static long length[1]; // static void *args[1]; static double zz[1]; double quadr[1]; SEXP ans; Rf_protect(ans = Rf_allocVector(REALSXP, 1)); /* THIS SUBROUTINE COMPUTES INTEGRAL OF FUNCTIONS WHICH */ /* MAY HAVE SINGULARITIES AT ONE OR BOTH END-POINTS OF AN */ /* INTERVAL (A,B), SEE [1, 2]. IT CONTAINS FOUR DIFFERENT */ /* QUADRATURE ROUTINES: ONE OVER A FINITE INTERVAL (A,B), */ /* TWO OVER (A,+INFINITY), AND ONE OVER (-INFINITY,+INFINITY). */ /* OF THE TWO FORMULAS OVER (A,+INFINITY), THE FIRST (INF=2 */ /* BELOW) IS MORE SUITED TO NON-OSCILLATORY INTEGRANDS, WHILE */ /* THE SECOND (INF=3) IS MORE SUITED TO OSCILLATORY INTEGRANDS. */ /* THE USER SUPPLIES THE INTEGRAND FUNCTION, HE SPECIFIES THE */ /* INTERVAL, AS WELL AS THE RELATIVE ERROR TO WHICH THE INTE- */ /* GRAL IS TO BE EVALUATED. */ /* THE FORMULAS ARE OPTIMAL IN CERTAIN HARDY SPACES H(P,DD), */ /* SEE [1, 2]. HERE DD IS AN OPEN DOMAIN IN THE COMPLEX PLANE, */ /* A AND B BELONG TO THE BOUNDARY OF DD AND H(P,DD), P.GT.1, IS */ /* THE SET OF ALL ANALYTIC FUNCTONS IN DD WHOSE P-TH NORM DEFI- */ /* NED AS IN [2] IS FINITE. */ /* IF THE USER IS UNABLE TO SPECIFY THE PARAMETERS P AND D */ /* OF THE SPACE H(P,DD) TO WHICH HIS INTEGRAND BELONGS, THE */ /* ALGORITHM TERMINATES ACCORDING TO A HEURISTIC CRITERION, SEE */ /* [2] AND COMMENTS TO EPS. */ /* IF THE USER CAN SPECIFY THE PARAMETERS P AND D OF THE */ /* SPACE H(P,DD) TO WHICH HIS INTEGRAND BELONGS, THE ALGORITHM */ /* TERMINATES WITH AN ANSWER HAVING A GUARANTEED ACCURACY ( DE- */ /* TEMINISTIC CRITERION, SEE [1, 2] AND COMMENTS TO EPS). */ /* INPUT PARAMETERS */ /* A = LOWER LIMIT OF INTEGRATION (SEE COMMENTS TO INF). */ /* B = UPPER LIMIT OF INTEGRATION (SEE COMMENTS TO INF). */ /* D = A PARAMETER OF THE CLASS H(P,DD) (SEE COMMENTS TO */ /* INF). */ /* USER SETS D: */ /* HEURISTIC TERMINATION */ /* = ANY REAL NUMBER */ /* DETERMINISTIC TERMINATION */ /* = A NUMBER IN THE RANGE 0.LT.D.LE.PI/2. */ /* F = A NAME OF AN EXTERNAL INTEGRAND FUNCTION TO BE */ /* SUPPLIED BY THE USER. F(X) COMPUTES THE VALUE OF */ /* A FUNCTION F AT A POINT X. THE STATEMENT */ /* ...EXTERNAL F... MUST APPEAR IN THE MAIN PROGRAM. */ /* M = MAXIMAL NUMBER OF FUNCTION EVALUATIONS ALLOWED IN */ /* THE COMPUTATIONS, M.GE.3.( ALTERED ON EXIT ). */ /* P = 0, 1, .GT.1 A PARAMETER OF THE CLASS H(P,DD). */ /* USER SETS P: */ /* = 0 - HEURISTIC TERMINATION. */ /* = 1 - DETERMINISTIC TERMINATION WITH THE INFINITY */ /* NORM. */ /* .GT.1 -DETERMINISTIC TERMINATION WITH THE P-TH NORM. */ /* EPS = A REAL NUMBER - THE RELATIVE ERROR BOUND - SEE */ /* REMARKS BELOW. ( ALTERED ON EXIT ). */ /* INF = 1, 2, 3, 4 - INFORMATION PARAMETER. ( ALTERED ON EXIT ). */ /* = 1 SIGNIFIES AN INFINITE INTERVAL (A,B)=REAL LINE, */ /* A AND B ANY NUMBERS. */ /* DETERMINISTIC TERMINATION - */ /* DD=STRIP(Z:ABS(IM(Z)).LT.D). */ /* = 2 SIGNIFIES A SEMI-INFINITE INTERVAL (A, +INFINITY) */ /* USER SUPPLIES A, B ANY NUMBER. */ /* QUADRATURE SUITED TO NON-OSCILLATORY INTEGRANDS. */ /* DETERMINISTIC TERMINATION - */ /* DD=SECTOR(Z:ABS(ARG(Z-A)).LT.D). */ /* = 3 SIGNIFIES A SEMI INFINITE INTERVAL (A,+INFINITY) */ /* USER SUPPLIES A, B ANY NUMBER. */ /* QUADRATURE SUITED TO OSCILLATORY INTEGRANDS. */ /* DETERMINISTIC TERMINATION - */ /* DD=REGION(Z:ABS(ARG(SINH(Z-A))).LT.D). */ /* = 4 SIGNIFIES A FINITE INTERVAL (A,B). */ /* USER SUPPLIES A AND B. */ /* DETERMINISTIC TERMINATION - */ /* DD=LENS REGION(Z:ABS(ARG((Z-A)/(B-Z))).LT.D). */ /* OUTPUT PARAMETERS */ /* M = THE NUMBER OF FUNCTION EVALUATIONS USED IN THE */ /* QUADRATURE. */ /* EPS = THE RELATIVE ERROR BOUND (SEE REMARKS BELOW). */ /* DETERMINISTIC TERMINATION */ /* = THE RELATIVE ERROR REXA BOUND, I.E., */ /* REXA(F,M(OUTPUT)) .LE. EPS. */ /* HEURISTIC TERMINATION */ /* = MAX(EPS(INPUT),MACHEP). */ /* INF = 0, 1 - DETERMINISTIC TERMINATION */ /* = 0 COMPUTED QUADRATURE QCOM(F,M(EPS)), SEE REMARKS */ /* BELOW. */ /* = 1 COMPUTED QUADRATURE QCOM(F,M1), SEE REMARKS */ /* BELOW. */ /* INF = 2, 3, 4 - HEURISTIC TERMINATION. */ /* = 2 INTEGRATION COMPLETED WITH EPS=MAX(EPS(INPUT), */ /* MACHEP). WE CAN EXPECT THE RELATIVE ERROR */ /* REXA TO BE OF THE ORDER OF EPS (FOR SOME P.GE.1). */ /* = 3 INTEGRATION NOT COMPLETED. ATTEMPT TO EXCEED THE */ /* MAXIMAL ALLOWED NUMBER OF FUNCTION EVALUATIONS M. */ /* TRUNCATION CONDITIONS (SEE [2]) SATISFIED. QUADR */ /* SET TO BE EQUAL TO THE LAST TRAPEZOIDAL APPRO- */ /* XIMATION. IT IS LIKELY THAT QUADR APPROXIMATES THE */ /* INTEGRAL IF M IS LARGE. */ /* = 4 INTEGRATION NOT COMPLETED. ATTEMPT TO EXCEED THE */ /* MAXIMAL ALLOWED NUMBER OF FUNCTION EVALUATIONS M. */ /* TRUNCATION CONDITIONS (SEE [2]) NOT SATISFIED. */ /* QUADR SET TO BE EQUAL TO THE COMPUTED TRAPEZOIDAL */ /* APPROXIMATION. IT IS UNLIKELY THAT QUADR APPROXIMATES */ /* THE INTEGRAL. */ /* INF = 10, 11, 12, 13 - INCORRECT INPUT */ /* = 10 M.LT.3. */ /* = 11 P DOES NOT SATISFY P=0, P=1 OR P.GT.1 OR IN THE */ /* CASE OF DETERMINISTIC TERMINATION D DOES NOT */ /* SATISFY 0.LT.D.LE.PI/2. */ /* = 12 A.GE.B IN CASE OF A FINITE INTERVAL. */ /* = 13 INF NOT EQUAL TO 1, 2, 3, OR 4. */ /* QUADR = THE COMPUTED VALUE OF QUADRATURE. */ /* REMARKS: */ /* LET QEXA(F,M) ( QCOM(F,M) ) BE THE EXACT (COMPUTED) */ /* VALUE OF THE QUADRATURE WITH M FUNCTION EVALUATIONS, */ /* AND LET REXA(F,M) ( RCOM(F,M) ) BE THE RELATIVE ERROR */ /* OF QEXA (QCOM) ,I.E., */ /* REXA(F,M)=ABS(INTEGRAL(F)-QEXA(F,M))/NORM(F), */ /* RCOM(F,M)=ABS(INTEGRAL(F)-QCOM(F,M))/NORM(F), */ /* WITH THE NOTATION 0/0=0. */ /* DUE TO THE ROUNDOFF ONE CANNOT EXPECT THE ERROR */ /* RCOM TO BE LESS THAN THE RELATIVE MACHINE PRECISION */ /* MACHEP. THEREFORE THE INPUT VALUE OF EPS IS CHANGED */ /* ACCORDING TO THE FORMULA */ /* EPS=MAX(EPS,MACHEP). */ /* DETERMINISTIC TERMINATON CASE */ /* THE NUMBER OF FUNCTON EVALUATIONS M(EPS) IS COMPUTED */ /* SO THAT THE ERROR REXA IS NO GREATER THAN EPS,I.E., */ /* (*) REXA(F,M(EPS)) .LE. EPS . */ /* IF M(EPS).LE.M THEN THE QUADRATURE QCOM(F,M(EPS)) IS COM- */ /* PUTED. OTHERWISE, WHICH MEANS THAT EPS IS TOO SMALL WITH */ /* RESPECT TO M, THE QUADRATURE QCOM(F,M1) IS COMPUTED, WHERE */ /* M1=2*INT((M-1)/2)+1. IN THIS CASE EPS IS CHANGED TO THE */ /* SMALLEST NUMBER FOR WHICH THE ESTIMATE (*) HOLDS WITH */ /* M(EPS)=M1 FUNCTION EVALUATIONS. */ /* HEURISTIC TERMINATION CASE */ /* WE CAN EXPECT THE RELATIVE ERROR REXA TO BE OF THE */ /* ORDER OF EPS, SEE [2]. IF EPS IS TOO SMALL WITH RESPECT */ /* TO M THEN THE QUADRATURE QCOM(F,M) IS COMPUTED. */ /* ROUNDOFF ERRORS */ /* IN BOTH DETERMINISTIC AND HEURISTIC CASES THE ROUND- */ /* OFF ERROR */ /* ROFF=ABS(QEXA(F,M)-QCOM(F,M)) */ /* CAN BE ESTIMATED BY */ /* (**) ROFF .LE. 3*C1*R*MACHEP, */ /* WHERE R=QCOM(ABS(F),M)+(1+2*C2)/3*SUM(W(I),I=1,2,...M) */ /* AND C1 IS OF THE ORDER OF UNITY, C1=1/(1-3*MACHEP), W(I) */ /* ARE THE WEIGHTS OF THE QUADRATURE, SEE [2], AND C2 IS */ /* A CONSTANT ESTIMATING THE ACCURACY OF COMPUTING FUNCTION */ /* VALUES, I.E., */ /* ABS(EXACT(F(X))-COMPUTED(F(X))).LE.C2*MACHEP. */ /* IF THE INTEGRAND VALUES ARE COMPUTED INACCURATELY, I.E., */ /* C2 IS LARGE, THEN THE ESTIMATE (**) IS LARGE AND ONE CAN */ /* EXPECT THE ACTUAL ERROR ROFF TO BE LARGE. NUMERICAL TESTS */ /* INDICATE THAT THIS HAPPENS ESPECIALLY WHEN THE INTEGRAND */ /* IS EVALUATED INACCURATELY NEAR A SINGULARITY. THE WAYS OF */ /* CIRCUMVENTING SUCH PITFALLS ARE EXPLAINED IN [2]. */ /* REFERENCES: */ /* [1] SIKORSKI,K., OPTIMAL QUADRATURE ALGORITHMS IN HP */ /* SPACES, NUM. MATH., 39, 405-410 (1982). */ /* [2] SIKORSKI,K., STENGER,F., OPTIMAL QUADRATURES IN */ /* HP SPACES, ACM TOMS. */ /* modified by J.K. Lindsey for R September 1998 */ /*< implicit none >*/ /*< INTEGER I, I1, INF, K, L, L1, M, M1, M2, N, N1 >*/ /*< >*/ /*< >*/ /*< double precision V2, W, W1, W2, W3, W4 >*/ /*< LOGICAL INF1, INF2 >*/ SEXP call2, result2, state2, tmp; // mode[0] = "double"; // length[0] = 1; // args[0] = (void *)(zz); /*< PI = 4.*ATAN(1.0) >*/ pi = atan(1.f) * 4.f; /* CHECK THE INPUT DATA */ /*< >*/ if (*INF != 1 && *INF != 2 && *INF != 3 && *INF != 4) { goto L300; } /*< IF (M.LT.3) GO TO 270 >*/ if (*M < 3) { goto L270; } /*< IF (P.LT.1. .AND. P.NE.0.) GO TO 280 >*/ if (*P < 1.f && *P != 0.f) { goto L280; } /*< IF (P.GE.1. .AND. (D.LE.0. .OR. D.GT.PI/2.)) GO TO 280 >*/ if (*P >= 1.f && (*D__ <= 0.f || *D__ > pi / 2.f)) { goto L280; } /*< IF (INF.EQ.4 .AND. A.GE.B) GO TO 290 >*/ if (*INF == 4 && *A >= *B) { goto L290; } /*< SQ2 = SQRT(2.0) >*/ sq2 = sqrt(2.f); /*< I1 = INF - 2 >*/ i1 = *INF - 2; /*< BA = B - A >*/ ba = *B - *A; /*< N1 = 0 >*/ n1 = 0; /* COMPUTE THE RELATIVE MACHINE PRECISION AND CHECK */ /* THE VALUE OF EPS. CAUTION...THIS LOOP MAY NOT WORK ON A */ /* MACHINE THAT HAS AN ACCURATED ARITHMETIC PROCESS COMPARED */ /* TO THE STORAGE PRECISION. THE VALUE OF U MAY NEED TO BE */ /* SIMPLY DEFINED AS THE RELATIVE ACCURACY OF STORAGE PRECISION. */ /*< U = 1. >*/ u = 1.f; /*< 10 U = U/10. >*/ L10: u /= 10.f; /*< T = 1. + U >*/ t = u + 1.f; /*< IF (1..NE.T) GO TO 10 >*/ if (1.f != t) { goto L10; } /*< U = U*10. >*/ u *= 10.f; /*< IF (EPS.LT.U) EPS = U >*/ if (*EPS < u) { *EPS = u; } /*< IF (P.EQ.0.) GO TO 40 >*/ if (*P == 0.f) { goto L40; } /* SET UP DATA FOR THE DETERMINISTIC TERMINATION */ /*< IF (P.EQ.1.) ALFA = 1. >*/ if (*P == 1.f) { alfa = 1.f; } /*< IF (P.GT.1.) ALFA = (P-1.)/P >*/ if (*P > 1.f) { alfa = (*P - 1.f) / *P; } /*< C = 2.*PI/(1.-1./EXP(PI*SQRT(ALFA))) + 4.**ALFA/ALFA >*/ c__ = pi * 2.f / (1.f - 1.f / exp(pi * sqrt(alfa))) + R_pow(&c_b8, &alfa) / alfa; /*< W = dLOG(C/EPS) >*/ w = log(c__ / *EPS); /*< W1 = 1./(PI*PI*ALFA)*W*W >*/ w1 = 1.f / (pi * pi * alfa) * w * w; /*< N = INT(W1) >*/ n = (integer) w1; /*< IF (W1.GT.FLOAT(N)) N = N + 1 >*/ if (w1 > (real) n) { ++n; } /*< IF (W1.EQ.0.) N = 1 >*/ if (w1 == 0.f) { n = 1; } /*< N1 = 2*N + 1 >*/ n1 = (n << 1) + 1; /*< SR = SQRT(ALFA*FLOAT(N)) >*/ sr = sqrt(alfa * (real) n); /*< IF (N1.LE.M) GO TO 20 >*/ if (n1 <= *M) { goto L20; } /* EPS TOO SMALL WITH RESPECT TO M. COMPUTE THE NEW EPS */ /* GUARANTEED BY THE VALUE OF M. */ /*< N1 = 1 >*/ n1 = 1; /*< N = INT(FLOAT((M-1)/2)) >*/ n = (integer) ((real) ((*M - 1) / 2)); /*< SR = SQRT(ALFA*FLOAT(N)) >*/ sr = sqrt(alfa * (real) n); /*< M = 2*N + 1 >*/ *M = (n << 1) + 1; /*< EPS = C/EXP(PI*SR) >*/ *EPS = c__ / exp(pi * sr); /*< GO TO 30 >*/ goto L30; /*< 20 M = N1 >*/ L20: *M = n1; /*< N1 = 0 >*/ n1 = 0; /*< 30 H = 2.*D/SR >*/ L30: h__ = *D__ * 2.f / sr; /*< SUM2 = 0. >*/ sum2 = 0.f; /*< L1 = N >*/ l1 = n; /*< K = N >*/ k = n; /*< INF1 = .FALSE. >*/ inf1 = FALSE_; /*< INF2 = .FALSE. >*/ inf2 = FALSE_; /*< H0 = H >*/ h0 = h__; /*< GO TO 50 >*/ goto L50; /* SET UP DATA FOR THE HEURISTIC TERMINATION */ /*< 40 H = 1. >*/ L40: h__ = 1.f; /*< H0 = 1. >*/ h0 = 1.f; /*< EPS3 = EPS/3. >*/ eps3 = *EPS / 3.f; /*< SR = SQRT(EPS) >*/ sr = sqrt(*EPS); /*< V1 = EPS*10. >*/ v1 = *EPS * 10.f; /*< V2 = V1 >*/ v2 = v1; /*< M1 = M - 1 >*/ m1 = *M - 1; /*< N = INT(FLOAT(M1/2)) >*/ n = (integer) ((real) (m1 / 2)); /*< M2 = N >*/ m2 = n; /*< L1 = 0 >*/ l1 = 0; /*< INF1 = .TRUE. >*/ inf1 = TRUE_; /*< INF2 = .FALSE. >*/ inf2 = FALSE_; /* INITIALIZE THE QUADRATURE */ /*< 50 I = 0 >*/ L50: i__ = 0; /*< IF (INF.EQ.1) SUM = F(0.d0) >*/ if (*INF == 1) { /* sum = (*f)(&c_b12);*/ zz[0]=c_b12; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ sum=REAL(tmp)[0]; Rf_unprotect(4); } /*< IF (INF.EQ.2) SUM = F(A+1.) >*/ if (*INF == 2) { d__1 = *A + 1.f; /* sum = (*f)(&d__1);*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ sum=REAL(tmp)[0]; Rf_unprotect(4); } /*< IF (INF.EQ.3) SUM = F(A+dLOG(1.+SQ2))/SQ2 >*/ if (*INF == 3) { d__1 = *A + log(sq2 + 1.f); /* sum = (*f)(&d__1) / sq2;*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ sum=REAL(tmp)[0]/sq2; Rf_unprotect(4); } /*< IF (INF.EQ.4) SUM = F((A+B)/2.)/4.*BA >*/ if (*INF == 4) { d__1 = (*A + *B) / 2.f; /* sum = (*f)(&d__1) / 4.f * ba;*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ sum=REAL(tmp)[0] / 4.f * ba; Rf_unprotect(4); } /* COMPUTE WEIGHTS, NODES AND FUNCTION VALUES */ /*< 60 EXPH = EXP(H) >*/ L60: exph = exp(h__); /*< EXPH0 = EXP(H0) >*/ exph0 = exp(h0); /*< H1 = H0 >*/ h1 = h0; /*< E1 = EXPH0 >*/ e1 = exph0; /*< U = 0. >*/ u = 0.f; /*< COR = 0. >*/ cor = 0.f; /*< 70 IF (I1) 80, 90, 100 >*/ L70: if (i1 < 0) { goto L80; } else if (i1 == 0) { goto L90; } else { goto L100; } /*< 80 V = F(H1) >*/ L80: /* v = (*f)(&h1);*/ zz[0]=h1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ v=REAL(tmp)[0]; Rf_unprotect(4); /*< H1 = H1 + H >*/ h1 += h__; /*< GO TO 150 >*/ goto L150; /*< 90 V = E1*F(A+E1) >*/ L90: d__1 = *A + e1; /* v = e1 * (*f)(&d__1);*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ v=e1 * REAL(tmp)[0]; Rf_unprotect(4); /*< E1 = E1*EXPH >*/ e1 *= exph; /*< GO TO 150 >*/ goto L150; /*< 100 IF (INF.EQ.4) GO TO 140 >*/ L100: if (*INF == 4) { goto L140; } /*< W1 = SQRT(E1+1./E1) >*/ w1 = sqrt(e1 + 1.f / e1); /*< W2 = SQRT(E1) >*/ w2 = sqrt(e1); /*< IF (E1.LT.0.1) GO TO 110 >*/ if (e1 < .1f) { goto L110; } /*< S = dLOG(E1+W1*W2) >*/ s = log(e1 + w1 * w2); /*< GO TO 130 >*/ goto L130; /*< 110 W3 = E1 >*/ L110: w3 = e1; /*< W4 = E1*E1 >*/ w4 = e1 * e1; /*< C0 = 1. >*/ c0 = 1.f; /*< S = E1 >*/ s = e1; /*< S1 = E1 >*/ s1 = e1; /*< T = 0. >*/ t = 0.f; /*< 120 C0 = -C0*(0.5+T)*(2.*T+1.)/(2.*T+3.)/(T+1.) >*/ L120: c0 = -c0 * (t + .5f) * (t * 2.f + 1.f) / (t * 2.f + 3.f) / (t + 1.f); /*< T = T + 1. >*/ t += 1.f; /*< W3 = W3*W4 >*/ w3 *= w4; /*< S = S + C0*W3 >*/ s += c0 * w3; /*< IF (S.EQ.S1) GO TO 130 >*/ if (s == s1) { goto L130; } /*< S1 = S >*/ s1 = s; /*< GO TO 120 >*/ goto L120; /*< 130 V = W2/W1*F(A+S) >*/ L130: d__1 = *A + s; /* v = w2 / w1 * (*f)(&d__1);*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ v = w2 / w1 * REAL(tmp)[0]; Rf_unprotect(4); /*< E1 = E1*EXPH >*/ e1 *= exph; /*< GO TO 150 >*/ goto L150; /*< 140 W1 = E1 + 1. >*/ L140: w1 = e1 + 1.f; /*< V = E1/W1/W1*F((A+B*E1)/W1)*BA >*/ d__1 = (*A + *B * e1) / w1; /* v = e1 / w1 / w1 * (*f)(&d__1) * ba;*/ zz[0]=d__1; state2 = Rf_protect(Rf_allocVector(REALSXP, 1)); /*.C -> .Call requires this*/ REAL(state2)[0] = zz[0]; /*.C -> .Call requires this*/ Rf_protect(call2=Rf_lang2(f, state2)); /*.C -> .Call requires this*/ Rf_protect(result2=Rf_eval(call2, envir)); /*.C -> .Call requires this*/ Rf_protect(tmp=Rf_coerceVector(result2, REALSXP));/*.C -> .Call requires this*/ v = e1 / w1 / w1 * REAL(tmp)[0] * ba; Rf_unprotect(4); /*< E1 = E1*EXPH >*/ e1 *= exph; /* SUMMATION ALGORITHM */ /*< 150 I = I + 1 >*/ L150: ++i__; /*< SUM1 = U + V >*/ sum1 = u + v; /*< IF (ABS(U).LT.ABS(V)) GO TO 160 >*/ if (abs(u) < abs(v)) { goto L160; } /*< COR = V - (SUM1-U) + COR >*/ cor = v - (sum1 - u) + cor; /*< GO TO 170 >*/ goto L170; /*< 160 COR = U - (SUM1-V) + COR >*/ L160: cor = u - (sum1 - v) + cor; /*< 170 U = SUM1 >*/ L170: u = sum1; /*< IF (I.LT.L1) GO TO 70 >*/ if (i__ < l1) { goto L70; } /* SWITCH TO CHECK TRUNCATION CONDITION ( HEURISTIC */ /* TERMINATION) */ /*< IF (INF1) GO TO 190 >*/ if (inf1) { goto L190; } /* SWITCH TO COMPUTE THE MIDORDINATE APPROXIMATION */ /* ( HEURISTIC TERMINATION ) OR TO STOP ( DETERMINIS- */ /* TIC TERMINATION) */ /*< IF (INF2) GO TO 210 >*/ if (inf2) { goto L210; } /* SET UP PARAMETERS TO CONTINUE SUMMATION */ /*< L1 = K >*/ l1 = k; /*< 180 INF2 = .TRUE. >*/ L180: inf2 = TRUE_; /*< I = 0. >*/ i__ = 0.f; /*< EXPH = 1./EXPH >*/ exph = 1.f / exph; /*< H0 = -H0 >*/ h0 = -h0; /*< E1 = 1./EXPH0 >*/ e1 = 1.f / exph0; /*< H1 = H0 >*/ h1 = h0; /*< H = -H >*/ h__ = -h__; /*< GO TO 70 >*/ goto L70; /* TRUNCATION CONDITION */ /*< 190 V0 = V1 >*/ L190: v0 = v1; /*< V1 = V2 >*/ v1 = v2; /*< V2 = ABS(V) >*/ v2 = abs(v); /*< IF (V0+V1+V2.LE.EPS3) GO TO 200 >*/ if (v0 + v1 + v2 <= eps3) { goto L200; } /*< IF (I.LT.M2) GO TO 70 >*/ if (i__ < m2) { goto L70; } /*< N1 = 5 >*/ n1 = 5; /*< 200 IF (INF2) K = I >*/ L200: if (inf2) { k = i__; } /*< IF (.NOT.INF2) L = I >*/ if (! inf2) { l = i__; } /*< V1 = 10.*EPS >*/ v1 = *EPS * 10.f; /*< V2 = V1 >*/ v2 = v1; /*< M2 = M1 - L >*/ m2 = m1 - l; /*< IF (.NOT.INF2) GO TO 180 >*/ if (! inf2) { goto L180; } /* N1=5 - TRUNCATION CONDITION NOT SATISFIED */ /*< IF (N1.EQ.5) GO TO 260 >*/ if (n1 == 5) { goto L260; } /* TRUNCATION CONDITION SATISFIED, SUM2=TRAPEZOIDAL */ /* APPROXIMATION */ /*< SUM2 = SUM1 + COR + SUM >*/ sum2 = sum1 + cor + sum; /*< M2 = 2*(K+L) >*/ m2 = (k + l) << 1; /* CHECK THE NUMBER OF FUNCTION EVALUATIONS */ /*< IF (M2.GT.M1) GO TO 240 >*/ if (m2 > m1) { goto L240; } /* INITIALIZE ITERATION */ /*< INF1 = .FALSE. >*/ inf1 = FALSE_; /*< INF2 = .FALSE. >*/ inf2 = FALSE_; /*< L1 = L >*/ l1 = l; /*< I = 0 >*/ i__ = 0; /*< H = -H >*/ h__ = -h__; /*< H0 = H/2. >*/ h0 = h__ / 2.f; /*< GO TO 60 >*/ goto L60; /* P.GE.1 = DETERMINISTIC TERMINATION */ /*< 210 IF (P.GE.1.) GO TO 220 >*/ L210: if (*P >= 1.f) { goto L220; } /* COMPUTE THE MIDORDINATE APPROXIMATION SUM1 */ /*< H = -H >*/ h__ = -h__; /*< SUM1 = (SUM1+COR)*H >*/ sum1 = (sum1 + cor) * h__; /*< W1 = (SUM1+SUM2)/2. >*/ w1 = (sum1 + sum2) / 2.f; /* TERMINATION CONDITION */ /*< IF (ABS(SUM1-SUM2).LE.SR) GO TO 230 >*/ if ((d__1 = sum1 - sum2, abs(d__1)) <= sr) { goto L230; } /* SET UP DATA FOR THE NEXT ITERATION */ /*< M2 = 2*M2 >*/ m2 <<= 1; /*< IF (M2.GT.M1) GO TO 250 >*/ if (m2 > m1) { goto L250; } /*< I = 0 >*/ i__ = 0; /*< K = 2*K >*/ k <<= 1; /*< L = 2*L >*/ l <<= 1; /*< L1 = L >*/ l1 = l; /*< H = H/2. >*/ h__ /= 2.f; /*< H0 = H/2. >*/ h0 = h__ / 2.f; /*< SUM2 = W1 >*/ sum2 = w1; /*< INF2 = .FALSE. >*/ inf2 = FALSE_; /*< GO TO 60 >*/ goto L60; /* FINAL RESULTS */ Rf_protect(ans = Rf_allocVector(REALSXP, 1)); /*< 220 QUADR = -H*(SUM1+COR+SUM) >*/ L220: *quadr = -h__ * (sum1 + cor + sum); /*< INF = N1 >*/ *INF = n1; /*< RETURN >*/ REAL(ans)[0] = quadr[0]; Rf_unprotect(1); return ans;// return 0; /*< 230 QUADR = W1 >*/ L230: *quadr = w1; /*< INF = 2 >*/ *INF = 2; /*< M = M2 + 1 >*/ *M = m2 + 1; /*< RETURN >*/ REAL(ans)[0] = quadr[0]; Rf_unprotect(1); return ans;// return 0; /*< 240 QUADR = SUM2 >*/ L240: *quadr = sum2; /*< INF = 3 >*/ *INF = 3; /*< M = K + L + 1 >*/ *M = k + l + 1; /*< RETURN >*/ REAL(ans)[0] = quadr[0]; Rf_unprotect(1); return ans;// return 0; /*< 250 QUADR = W1 >*/ L250: *quadr = w1; /*< INF = 3 >*/ *INF = 3; /*< M = M2/2 + 1 >*/ *M = m2 / 2 + 1; /*< RETURN >*/ REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 3 -- integration incomplete - try larger max"); return ans;// return 0; /*< 260 QUADR = U + COR + SUM >*/ L260: *quadr = u + cor + sum; /*< INF = 4 >*/ *INF = 4; /*< M = K + L + 1 >*/ *M = k + l + 1; /*< RETURN >*/ REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 4 -- integration incomplete - try larger max"); return ans;// return 0; /*< 270 INF = 10 >*/ L270: *INF = 10; /*< RETURN >*/ //REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 10 -- incorrect arguments"); return ans;// return 0; /*< 280 INF = 11 >*/ L280: *INF = 11; /*< RETURN >*/ //REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 11 -- incorrect arguments"); return ans;// return 0; /*< 290 INF = 12 >*/ L290: *INF = 12; /*< RETURN >*/ //REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 12 -- incorrect arguments"); return ans;// return 0; /*< 300 INF = 13 >*/ L300: *INF = 13; /*< RETURN >*/ //REAL(ans)[0] = quadr[0]; Rf_unprotect(1); Rf_error("INF in toms614_sexp.c is 13 -- incorrect arguments"); return ans;// return 0; /*< END >*/ } /* inthp_ */ rmutil/src/cutil.c0000755000176200001440000000312313425057453013653 0ustar liggesusers/* * rmutil : A Library of Special Functions for Repeated Measurements * Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * SYNOPSIS * * void F77_CALL(flgamma)(double *x,double *y) * void F77_CALL(flbeta)(double *a,double*b,double *y) * void F77_CALL(fbesselk)(double *x,double *alpha,double *y) * * DESCRIPTION * * This function allows a Fortran program to call the lgamma, lbeta, * and bessel_k functions written in C. * */ #include #include "R.h" #include "Rmath.h" #include "R_ext/RS.h" extern double lgammafn(double x); void F77_CALL(flgamma)(double *x,double *y){ *y=lgammafn(*x);} extern double lbeta(double a, double b); void F77_CALL(flbeta)(double *a,double *b,double *y){ *y=lbeta(*a, *b);} extern double bessel_k(double x, double alpha, double expo); void F77_CALL(fbesselk)(double *x,double *alpha, double *y){ *y=bessel_k(*x, *alpha, 1.0);} rmutil/src/f2c.h0000755000176200001440000001075114326276276013226 0ustar liggesusers/* f2c.h -- Standard Fortran to C header file */ /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ #ifndef F2C_INCLUDE #define F2C_INCLUDE /* I have redefined "long int" definitions to "int" */ /* This is to fix wordsize problem on 64 bit machines */ /* like the DEC alpha. - Ross Ihaka */ typedef int integer; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; /* typedef long long longint; */ /* system-dependent */ #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ #ifdef f2c_i2 /* for -i2 */ typedef short flag; typedef short ftnlen; typedef short ftnint; #else typedef long flag; typedef long ftnlen; typedef long ftnint; #endif /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ integer1 g; shortint h; integer i; /* longint j; */ real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; typedef long Long; /* No longer used; formerly in Namelist */ struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; /* a whole bunch of stuff to keep watcom's C compiler happy */ #ifdef min #undef min #endif #ifdef max #undef max #endif #define abs(x) ((x) >= 0 ? (x) : -(x)) #define dabs(x) (doublereal)abs(x) #define min(a,b) ((a) <= (b) ? (a) : (b)) #define max(a,b) ((a) >= (b) ? (a) : (b)) #define dmin(a,b) (doublereal)min(a,b) #define dmax(a,b) (doublereal)max(a,b) /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef int /* Unknown procedure type */ (*U_fp)(...); typedef shortint (*J_fp)(...); typedef integer (*I_fp)(...); typedef real (*R_fp)(...); typedef doublereal (*D_fp)(...), (*E_fp)(...); typedef /* Complex */ VOID (*C_fp)(...); typedef /* Double Complex */ VOID (*Z_fp)(...); typedef logical (*L_fp)(...); typedef shortlogical (*K_fp)(...); typedef /* Character */ VOID (*H_fp)(...); typedef /* Subroutine */ int (*S_fp)(...); #else typedef int /* Unknown procedure type */ (*U_fp)(void); typedef shortint (*J_fp)(void); typedef integer (*I_fp)(void); typedef real (*R_fp)(void); typedef doublereal (*D_fp)(void), (*E_fp)(void); typedef /* Complex */ VOID (*C_fp)(void); typedef /* Double Complex */ VOID (*Z_fp)(void); typedef logical (*L_fp)(void); typedef shortlogical (*K_fp)(void); typedef /* Character */ VOID (*H_fp)(void); typedef /* Subroutine */ int (*S_fp)(void); #endif /* E_fp is for real functions when -R is not specified */ typedef VOID C_f; /* complex function */ typedef VOID H_f; /* character function */ typedef VOID Z_f; /* double complex function */ typedef doublereal E_f; /* real function with -R not specified */ /* undef any lower-case symbols that your C compiler predefines, e.g.: */ #ifndef Skip_f2c_Undefs #undef cray #undef gcos #undef mc68010 #undef mc68020 #undef mips #undef pdp11 #undef sgi #undef sparc #undef sun #undef sun2 #undef sun3 #undef sun4 #undef u370 #undef u3b #undef u3b2 #undef u3b5 #undef unix #undef vax #endif #endif rmutil/src/gettvc.f0000755000176200001440000000757213425057453014046 0ustar liggesusersc c rmutil : A Library of Special Functions for Repeated Measurements c Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey c c This program is free software; you can redistribute it and/or modify c it under the terms of the GNU General Public License as published by c the Free Software Foundation; either version 2 of the License, or c (at your option) any later version. c c This program is distributed in the hope that it will be useful, c but WITHOUT ANY WARRANTY; without even the implied warranty of c MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the c GNU General Public License for more details. c c You should have received a copy of the GNU General Public License c along with this program; if not, write to the Free Software c Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. c c SYNOPSIS c c subroutine gettvc_f(x,y,xtvc,tvcov,nobs,nind,nknt,ties, c + xu,ndelta,tvcov2,nu,wu,nld,tvcov3) c c DESCRIPTION c c Function to find the most recent value of a time-varying c covariate not recorded at the same time as the response. c c subroutine gettvc_f(x,y,xtvc,tvcov,nobs,nind,nknt,ties, + xu,ndelta,tvcov2,nu,wu,nld,tvcov3) c c Merge and sort x and xtvc, putting the result, of length nu, c in xu. Identify members of xtvc with 1 in the binary ndelta, c and determine the relevant covariate values for those members, c putting that data in tvcov2. c Daniel F. Heitjan, 27 June 1990 c Jim Lindsey, revised 6 October, 1991 c implicit none integer i,j,n,indx,indu,indk,nm,nm1,nld,nind integer nobs(nind),nknt(nind),nu(nind) logical ndelta(nind,2*nld),ldone,ties double precision x(1),y(1),xtvc(1),wu(1),tvcov(1), + tvcov3(1),xu(nind,2*nld),tvcov2(nind,2*nld), + recx,reck nm=0 nm1=0 do 1 n=1,nind do 9 i=1,nobs(n) xu(n,i)=x(nm+i) 9 continue do 19 i=1,nld*2 ndelta(n,i)=.false. tvcov2(n,i)=-1e30 19 continue indx=1 indk=1 indu=1 recx=x(nm+indx) reck=xtvc(nm1+indk) ldone=.false. 10 continue if(.not.ldone)then if(recx.lt.reck)then xu(n,indu)=recx indx=indx+1 else xu(n,indu)=reck ndelta(n,indu)=.true. tvcov2(n,indu)=tvcov(nm1+indk) if(reck.ne.recx)indk=indk+1 if(reck.eq.recx)indx=indx+1 endif indu=indu+1 if(indx.le.nobs(n))then recx=x(nm+indx) else recx=1e30 endif if(indk.le.nknt(n))then reck=xtvc(nm1+indk) else reck=1e30 endif if((recx.ge.1e30).and.(reck.ge.1e30)) ldone=.true. go to 10 endif nu(n)=indu-1 nm=nm+nobs(n) nm1=nm1+nknt(n) 1 continue nm=0 nm1=0 do 18 n=1,nind c c find time-varying covariate values c if((x(nm+1).ge.xtvc(nm1+1).and.ties).or. + (x(nm+1).gt.xtvc(nm1+1).and..not.ties))then reck=tvcov2(n,1) else reck=0 endif do 49 i=1,nu(n) if(i.gt.1.and.ndelta(n,i-1))reck=tvcov2(n,i-1) wu(i)=reck 49 continue c c pick the components of wu that correspond to the sampled c x times, and place them in tvcov c do 2 j=1,nobs(n) do 29 i=j,nu(n) if(x(nm+j).eq.xu(n,i))then if(x(nm+j).eq.xu(n,i+1).and.ties)then tvcov3(nm+j)=wu(i+1) else tvcov3(nm+j)=wu(i) endif goto 2 endif 29 continue 2 continue nm1=nm1+nknt(n) nm=nm+nobs(n) 18 continue return end rmutil/src/dist.c0000755000176200001440000002733014326331143013474 0ustar liggesusers/* * rmutil : A Library of Special Functions for Repeated Measurements * Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * SYNOPSIS * * void pdp(int q[], int *my, double m[], double s[], int *nn, double res[]) * void ddp(int y[], int *my, double m[], double s[], int *nn, * double wt[], double res[]) * void pmp(int q[], int *my, double m[], double s[], int *nn, double res[]) * void dmp(int y[], int *my, double m[], double s[], int *nn, * double wt[], double res[]) * void ppvfp(int q[], double m[], double s[], double f[], int *nn, * double res[]) * void dpvfp(int y[], double m[], double s[], double f[], int *nn, * double wt[], double res[]) * void pdb(int q[], int n[], double m[], double s[], int *nn, double res[]) * void ddb(int y[], int n[], double m[], double s[], int *nn, * double wt[], double res[]) * void pmb(int q[], int n[], double m[], double s[], int *nn, double res[]) * void dmb(int y[], int n[], double m[], double s[], int *nn, * double wt[], double res[]) * * void psimplex_c(double y[], double m[], double s[], double f[], int *len, * double *eps, int *pts, int *max, int *err, double res[]) * void pginvgauss_c(double y[], double m[], double s[], double f[], int *len, * double *eps, int *pts, int *max, int *err, double res[]) * void ppowexp_c(double y[], double m[], double s[], double f[], int *len, * double *eps, int *pts, int *max, int *err, double res[]) * * DESCRIPTION * * Functions to compute the probability and cumulative probability * functions of the following overdispersed discrete distributions: * double Poisson, multiplicative Poisson, double binomial, and * multiplicative binomial, * and the cumulative probability functions for the following * continuous distributions: * Levy, generalized inverse Gaussian, and power exponential. * */ #include #include #include "dist.h" #include "R.h" #include "Rmath.h" /* double Poisson */ static double dpnc(int my, double m, double s){ int i; double r; r=exp(-s*m); for(i=1;i<=my;i++) r+=exp(i*(1-s)*log((double)i)+i*s*log(m)+i*(s-1)-s*m-lgammafn(i+1.)); return(r);} void pdp(int q[], int *my, double m[], double s[], int *nn, double res[]){ int i; for(i=0;i<*nn;i++) res[i]=dpnc(q[i],m[i],s[i])/dpnc(*my,m[i],s[i]);} void ddp(int y[], int *my, double m[], double s[], int *nn, double wt[], double res[]){ int i,y1; for(i=0;i<*nn;i++){ if(wt[i]>0){ y1=y[i]>0?y[i]:1; res[i]=wt[i]*(-s[i]*m[i]+y[i]*s[i]*(1+log(m[i]/y1))+y[i]*log((double)y1)-y[i]-lgammafn(y[i]+1.)-log(dpnc(*my,m[i],s[i])));} else res[i]=0;}} /* multiplicative Poisson */ static double mpnc(int my, double m, double s){ int i; double r; r=0.; for(i=0;i<=my;i++)r+=exp(i*log(m)+i*i*s-m-lgammafn(i+1.)); return(r);} void pmp(int q[], int *my, double m[], double s[], int *nn, double res[]){ int i; double ss; for(i=0;i<*nn;i++){ ss=log(s[i]); res[i]=mpnc(q[i],m[i],ss)/mpnc(*my,m[i],ss);}} void dmp(int y[], int *my, double m[], double s[], int *nn, double wt[], double res[]){ int i; double ss; for(i=0;i<*nn;i++){ if(wt[i]>0){ ss=log(s[i]); res[i]=wt[i]*(-m[i]+y[i]*y[i]*ss+y[i]*log(m[i])-lgammafn(y[i]+1)-log(mpnc(*my,m[i],ss)));} else res[i]=0;}} /* power variance function Poisson */ static double pvfc(int y, double m, double s, double f){ int i,j; double r,*c,tmp1,tmp2,tmp3,tmp4; c=(double*)R_alloc((size_t)(y*y),sizeof(double)); tmp1=gammafn(1.-f); tmp2=log(m); tmp3=log(s+1.); tmp4=log(s); for(i=0;i0){ c[i*y]=gammafn(i+1-f)/tmp1; if(i>1) for(j=1;j0){ if(f[i]==0.)res[i]=dnbinom(y[i],m[i]*s[i],s[i]/(1+s[i]),0); else { res[i]=wt[i]*exp(-m[i]*((s[i]+1.)*pow((s[i]+1.)/s[i],f[i]-1.)-s[i])/f[i]); if(y[i]>0)res[i]*=pvfc(y[i],m[i],s[i],f[i]); if(y[i]>1)res[i]/=gammafn(y[i]+1);}} else res[i]=0;}} void ppvfp(int q[], double m[], double s[], double f[], int *nn, double res[]){ int i,j; static int k=1; static double wt=1; double tmp; for(i=0;i<*nn;i++){ if(f[i]==0.)res[i]=pnbinom(q[i],m[i]*s[i],s[i]/(1+s[i]),1,0); else { res[i]=0.; for(j=0;j0?y*(s-1)*log((double)y):0) -(y0){ y2=n[i]-y[i]; yy1=y[i]>0?y[i]:1; yy2=y2>0?y2:1; res[i]=wt[i]*(lchoose((double)n[i],(double)y[i])+ (s[i]-1)*n[i]*log((double)n[i])+s[i]*y[i]*log(m[i]) +s[i]*y2*log(1.-m[i])-(s[i]-1)*y[i]*log((double)yy1) -(s[i]-1)*y2*log((double)yy2)-log(dbnc(n[i],n[i],m[i],s[i])));} else res[i]=0;}} /* multiplicative binomial */ static double mbnc(int yy, int n, double m, double s){ int y; double r; r=0.; for(y=0;y<=yy;y++)r+=exp(lchoose((double)n,(double)y)+(n-y)*log(1.-m)+y*(log(m)+(n-y)*y*s)); return(r);} void pmb(int q[], int n[], double m[], double s[], int *nn, double res[]){ int i; double ss; for(i=0;i<*nn;i++){ ss=log(s[i]); res[i]=mbnc(q[i],n[i],m[i],ss)/mbnc(n[i],n[i],m[i],ss);}} void dmb(int y[], int n[], double m[], double s[], int *nn, double wt[], double res[]){ int i; double ss; for(i=0;i<*nn;i++){ if(wt[i]>0){ ss=log(s[i]); res[i]=wt[i]*(lchoose((double)(n[i]),(double)y[i])+y[i]*log(m[i]) +(n[i]-y[i])*(log(1.-m[i]) +(n[i]-y[i])*y[i]*ss)-log(mbnc(n[i],n[i],m[i],ss)));} else res[i]=0;}} /* romberg integration routines */ static void interp(double x[], double fx[], int pts, double tab1[], double tab2[], double *f, double *df, int *err) { int i,j,ni=0; double diff1,diff2,tmp1,tmp2,lim1,lim2; *err=0; tmp1=fabs(x[0]); for(i=0;i=pts?1:0); for(i=0;i=pts){ interp(&x[j1-pts+i*max],&fx[j1-pts+i*max],pts,tab1,tab2,&sumlen[i],&errsum,err); if(*err)return; if(fabs(errsum)>eps*fabs(sumlen[i]))finish=0;} x[j1+i*max]=x[j+i*max]/9.0; fx[j1+i*max]=fx[j+i*max];} if(finish)return;} *err=3; return;} /* simplex distribution */ static void dsimplex(double y[], double m[], double s[], double f[], int len, double res[]){ int i; for(i=0;ilength(nobs(z)))stop("no such individual") if(inherits(z,"kalsurv")){ # # event history data may have ties # for(i in 1:length(z$response$y))if(z$response$y[i]==0) z$response$y[i] <- z$response$y[i-1] if(is.null(xlab))xlab <- "Chronological time" if(intensity){ # modify predicted values, if intensity wanted z$pred <- 1/z$pred if(is.null(ylab))ylab <- "Mean intensity"} else if(is.null(ylab))ylab <- "Time between events"} else { if(is.null(xlab))xlab <- "Time" if(is.null(ylab))ylab <- "Fitted value"} if(length(z$ptimes)==length(z$response$times)){ # # use fitted values from model # ns <- length(nind) ii <- covind(z$response) if(!is.null(lty)){ if(length(lty)!=1&&length(lty)!=ns) stop("lty must have a value for each observation") else if(length(lty)==1)lty <- rep(lty,ns)} first <- !add j <- 0 lt <- 0 for(i in nind){ # plot separately for each individual chosen if(is.null(z$response$nest)) kk <- nest <- 1 else { nest <- unique(z$response$nest) kk <- z$response$nest} for(k in nest){ # if nesting, start lines over in each cluster j <- j+1 if(is.null(lty)) lt <- (lt%%6)+1 else lt <- lty[j] if(first){ plot(z$ptimes[ii==i&kk==k],z$pred[ii==i&kk==k],type="l",ylim=ylim,lty=lt,xlab=xlab,ylab=ylab,...) first <- FALSE} else lines(z$ptimes[ii==i&kk==k],z$pred[ii==i&kk==k],lty=lt)}}} else { # # a function was supplied to calculate predicted values # if(is.null(ylim))ylim <- c(min(z$pred),max(z$pred)) if(is.null(lty))lty <- 1 if(!add)plot(z$ptimes, z$pred, type="l", ylim=ylim, lty=lty, xlab=xlab, ylab=ylab, ...) else lines(z$ptimes, z$pred, lty=lty)} if(!is.null(z$pse)){ # # plot standard errors if available (carma only) # lines(z$ptimes, z$pse[,1], lty=3) lines(z$ptimes, z$pse[,2], lty=3)}} ### functions to create information for plotting individual profiles ### from dynamic models ### iprofile <- function(z, ...) UseMethod("iprofile") iprofile.default <- function(z, plotsd=FALSE, ...){ if(is.null(z$response$times)&&!inherits(z,"kalsurv"))stop("No times available") if(!inherits(z,"recursive")) stop("The object must have class, recursive") else if(is.null(z$rpred))stop("Individual profiles not available") # # if response is transformed, transform predicted values # and standard deviations # if(!is.null(z$transform)){ if(z$transform=="exp"){ if(plotsd){ sd1 <- log(z$rpred+2*z$sdr) sd2 <- log(z$rpred-2*z$sdr)} z$rpred <- log(z$rpred)} else if(z$transform=="square"){ if(plotsd){ sd1 <- sqrt(z$rpred+2*z$sdr) sd2 <- sqrt(z$rpred-2*z$sdr)} z$rpred <- sqrt(z$rpred)} else if(z$transform=="sqrt"){ if(plotsd){ sd1 <- (z$rpred+2*z$sdr)^2 sd2 <- (z$rpred-2*z$sdr)^2} z$rpred <- z$rpred^2} else if(z$transform=="log"){ if(plotsd){ sd1 <- exp(z$rpred+2*z$sdr) sd2 <- exp(z$rpred-2*z$sdr)} z$rpred <- exp(z$rpred)} else { sd1 <- z$rpred+2*z$sdr sd2 <- z$rpred-2*z$sdr} if(plotsd)z$psd <- cbind(sd1,sd2)} if(inherits(z,"kalsurv")){ z$response$times <- vector("double",length(z$response$y)) tmp1 <- 1 for(i in 1:length(nobs(z))){ tmp2 <- sum(nobs(z)[1:i]) z$response$times[tmp1:tmp2] <- cumsum(z$response$y[tmp1:tmp2]) tmp1 <- tmp2+1}} class(z) <- c("iprofile",class(z)) invisible(z)} ### function to plot individual profiles ### plot.iprofile <- function(x, nind=1, observed=TRUE, intensity=FALSE, add=FALSE, lty=NULL, pch=NULL, ylab=NULL, xlab=NULL, main=NULL, ylim=NULL, xlim=NULL, ...){ z <- x; rm(x) if(max(nind)>length(nobs(z)))stop("no such individual") if(inherits(z,"kalsurv")){ for(i in 1:length(z$response$y))if(z$response$y[i]==0) z$response$y[i] <- z$response$y[i-1] if(is.null(xlab))xlab <- "Chronological time" if(intensity){ z$rpred <- 1/z$rpred z$response$y <- 1/z$response$y if(is.null(ylab))ylab <- "Mean intensity"} else if(is.null(ylab))ylab <- "Time between events"} else { if(is.null(xlab))xlab <- "Time" if(is.null(ylab))ylab <- "Recursive fitted value"} if(is.null(ylim)&&!is.null(z$sdr)&&z$transform=="identity") ylim <- c(min(z$rpred-3*z$sdr,na.rm=TRUE),max(z$rpred+3*z$sdr,na.rm=TRUE)) ns <- length(nind) ii <- covind(z$response) pc <- -1 lt <- 0 first <- !add # # set up plotting controls # if(is.null(main)) main <- ifelse(ns==1,paste("Individual ",nind),"") if(is.null(ylim))ylim <- c(min(c(z$rpred,z$response$y),na.rm=TRUE), max(c(z$rpred,z$response$y),na.rm=TRUE)) if(is.null(xlim))xlim <- c(min(z$resp$times),max(z$resp$times)) if(!is.null(lty)){ if(length(lty)!=1&&length(lty)!=ns) stop("lty must have a value for each observation") else if(length(lty)==1)lty <- rep(lty,ns)} if(!is.null(pch)){ if(length(pch)!=1&&length(pch)!=ns) stop("pch must have a value for each observation") else if(length(pch)==1)pch <- rep(pch,ns)} na <- !is.na(z$rpred) j <- 0 # # plot separately for each individual chosen # if(is.null(z$response$nest)) kk <- nest <- 1 else { nest <- unique(z$response$nest) kk <- z$response$nest} for(i in nind){ for(k in nest){ # if nesting, start lines over in each cluster j <- j+1 if(is.null(pch))pc <- (pc+1)%%6 else pc <- pch[j] if(is.null(lty)) lt <- (lt%%6)+1 else lt <- lty[j] if(first){ plot(z$resp$times[ii==i&kk==k&na],z$rpred[ii==i&kk==k&na],type="l",lty=lt,ylab=ylab,xlab=xlab,main=main,ylim=ylim,xlim=xlim,...) first <- FALSE} else lines(z$resp$times[ii==i&kk==k&na],z$rpred[ii==i&kk==k&na],lty=lt) # if required, plot data points if(observed)points(z$resp$times[ii==i&kk==k],z$resp$y[ii==i&kk==k],pch=pc) # if required, plot standard deviations if(!is.null(z$psd)){ lines(z$resp$times[ii==i&kk==k&na],z$psd[ii==i&kk==k&na,1],lty=3) lines(z$resp$times[ii==i&kk==k&na],z$psd[ii==i&kk==k&na,2],lty=3)}}}} ### function to plot residuals ### plot.residuals <- function(x, X=NULL, subset=NULL, ccov=NULL, nind=NULL, recursive=TRUE, pch=20, ylab="Residual", xlab=NULL, main=NULL, ...){ z <- x; rm(x) x <- X; rm(X) na <- TRUE # # check if model produced by a function in one of my libraries # resp <- !is.null(z$response$y) if(!resp){ nind <- ccov <- NULL recursive <- FALSE} if(is.character(x))x <- match.arg(x,c("response","fitted")) # # find residuals to be plotted # if(resp){ n <- length(z$response$y) res <- if(inherits(z,"recursive")) residuals(z, recursive=recursive) else residuals(z) # handle ties in event histories if(inherits(z,"kalsurv"))for(i in 1:length(z$response$y)) if(z$response$y[i]==0)z$response$y[i] <- z$response$y[i-1]} else { res <- residuals(z) n <- length(res)} # # indicator of subset to be plotted # if(is.null(subset))subset <- rep(TRUE,n) else { tmp <- rep(FALSE,n) tmp[subset] <- TRUE subset <- tmp} # # find individuals and subset to be plotted # if(resp&&!is.null(nind)){ if(is.null(subset))subset <- rep(FALSE,n) for(i in nind)subset <- subset&covind(z)==i} # # find values for x axis # if(is.null(x)){ # plot by times if possible x <- z$response$times if(is.null(x))stop("x must be specified") if(is.null(xlab))xlab <- "Time"} else if(is.numeric(x)){ # plot against vector supplied if possible if(length(x)!=n) stop("x variable must have same length as residuals") if(is.null(xlab))xlab <- paste(deparse(substitute(x)))} else if(x=="response"){ # plot against response if possible x <- if(resp)z$response$y else z$y if(is.null(x))stop("response variable not found") if(is.null(xlab))xlab <- "Response"} else if(x=="fitted"){ # plot against fitted values if possible x <- if(resp)fitted(z, recursive=recursive) else fitted(z) na <- !is.na(x) if(is.null(xlab))xlab <- "Fitted values"} # # plot # if(is.null(ccov)) plot(x[subset&na], res[subset&na], pch=pch, ylab=ylab, xlab=xlab, main=main, ...) else if(length(ccov)>1)stop("Only one covariate can be specified") else { mat <- match(ccov,colnames(z$ccov$ccov)) if(is.na(mat))stop("covariate not found") un <- unique(z$ccov$ccov[,mat]) tmp <- par()$mfg[3:4] ss <- ceiling(sqrt(length(un))) if(length(un)==ss*(ss-1))ss1 <- ss-1 else ss1 <- ss par(mfrow=c(ss,ss1)) for(i in un){ ind <- 1:sum(nobs(z)) ind[rep(z$ccov$ccov[,mat],nobs(z))!=i] <- NA main <- paste("Covariate ",ccov,"=",i) plot(x[subset&ind&na],res[subset&ind&na], pch=pch,ylab=ylab,xlab=xlab,main=main,...)} par(mfrow=tmp)}} rmutil/R/diffeqn.r0000755000176200001440000000401613425057453013602 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # runge.kutta(f, initial, x) # lin.diff.eqn(A, initial, t=1) # # DESCRIPTION # # Functions for solving differential equations ### Runge-Kutta method for solving a differential equation ### runge.kutta <- function(f, initial, x){ if(!is.function(f))stop("f must be a function") if(!is.numeric(initial)||length(initial)!=1)stop("initial must be a scalar") if(!is.vector(x,mode="numeric"))stop("x must be a numeric vector") y <- initial for(i in 1:(length(x)-1)){ stepsize <- x[i+1]-x[i] f1 <- stepsize*f(y[i],x[i]) f2 <- stepsize*f(y[i]+f1/2,x[i]+stepsize/2) f3 <- stepsize*f(y[i]+f2/2,x[i]+stepsize/2) f4 <- stepsize*f(y[i]+f3,x[i]+stepsize) y <- c(y,y[i]+(f1+2*f2+2*f3+f4)/6)} y } ### autonomous linear differential equations ### lin.diff.eqn <- function(A, initial, t=1){ if(!is.matrix(A)||dim(A)[1]!=dim(A)[2])stop("A must be a square matrix") if(!is.vector(initial,mode="numeric")||length(initial)!=dim(A)[1]) stop("initial must be a numeric vector with length equal to the dimensions of A") if(!is.vector(t,mode="numeric"))stop("t must be a numeric scalar or vector") z <- NULL for(i in 1:length(t))z <- cbind(z,mexp(A,t=t[i])%*%initial) t(z) } rmutil/R/readrm.r0000755000176200001440000003067613425057453013453 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # read.list(file="", skip=0, nlines=2, order=NULL) # read.surv(file="", skip=0, nlines=1, cumulative=T, all=T) # read.rep(file, header=TRUE, skip=0, col.names=NULL, # response, id=NULL, times=NULL, censor=NULL, # totals=NULL, weights=NULL, nest=NULL, delta=NULL, # coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=T) # # DESCRIPTION # # Utility functions for reading repeated measurements data ### read unbalanced repeated measurements from a file into a list ### read.list <- function(file="", skip=0, nlines=2, order=NULL){ # # check if order of columns is to be changed if(!is.null(order)){ if(length(order)!=nlines)stop("order must have length",nlines,"\n") else if(any(range(order)!=c(1,nlines))) stop("order must have values in (",c(1,nlines),")\n")} # # scan in the data until EOF # continue <- TRUE result <- list() while(continue){ x <- scan(file,skip=skip,nlines=nlines,quiet=TRUE) skip <- skip+nlines if(length(x)==0)continue <- FALSE else { tmp <- matrix(x,ncol=nlines) if(!is.null(order))tmp <- tmp[,order] result <- c(result,list(tmp))}} invisible(result)} ### read unbalanced event history data from a file into a list ### read.surv <- function(file="", skip=0, nlines=1, cumulative=TRUE, all=TRUE){ # # scan in the data until EOF # continue <- TRUE result <- list() censor <- NULL while(continue){ x <- scan(file,skip=skip,nlines=nlines,quiet=TRUE) skip <- skip+nlines if(length(x)==0)continue <- FALSE else { # find response times (if all==TRUE, times and censor # indicator alternate on the line if(all)mm <- matrix(x,ncol=2,byrow=TRUE)[,1] else mm <- x[1:(length(x)-1)] # if cumulative times, find times between events if(cumulative)mm <- c(mm[1],diff(mm)) result <- c(result,list(mm)) # create vector of censor indicators for last time of # each individual only censor <- c(censor,x[length(x)])}} invisible(list(result,censor))} ### read a rectangular data set from a file and create a repeated data object ### read.rep <- function(file, header=TRUE, skip=0, sep = "", na.strings="NA", response, id=NULL, times=NULL, censor=NULL, totals=NULL, weights=NULL, nest=NULL, delta=NULL, coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=TRUE){ if(missing(response)||!is.character(response)) stop("name(s) of response variables must be supplied") if(missing(file)||!is.character(file)) stop("a file name must be supplied") dataframe <- read.table(file,header=header,skip=skip, na.strings=na.strings,sep=sep) # # find response information and construct object # cn <- colnames(dataframe) nc <- match(response,cn) if(any(is.na(nc)))stop(paste("response",response[is.na(nc)],"not found")) if(!is.numeric(z <- as.matrix(dataframe[,nc,drop=FALSE]))) stop("response must be numeric") z <- list(response=list(y=z,nobs=NULL,times=NULL,nest=NULL,coordinates=NULL, censor=NULL,n=NULL,wt=NULL,delta=NULL,units=NULL,type=NULL), ccov=NULL,tvcov=NULL) class(z) <- "repeated" class(z$response) <- "response" tobs <- dim(z$response$y)[1] nrcol <- dim(z$response$y)[2] if(is.null(type))z$response$type <- rep("unknown",nrcol) else if(length(type)!=nrcol)stop("a type must be supplied for each response") else { for(i in 1:length(type)) z$response$type[i] <- match.arg(type[i], c("nominal","ordinal","discrete","duration","continuous","unknown")) if(any(is.na(z$response$type))) z$response$type[is.na(z$response$type)] <- "unknown"} rna <- rep(TRUE,tobs) for(i in 1:nrcol)rna <- rna&!is.na(z$response$y[,i]) if(is.null(id)) z$response$nobs <- if(is.null(times)) rep(1,tobs) else tobs else { if(!is.character(id)||length(id)>1) stop("id must be the name of one variable") nc <- match(id,cn) if(is.na(nc))stop("id not found") id <- as.vector(dataframe[,nc]) if(is.character(id)||is.factor(id))id <- as.numeric(as.factor(id)) else if(any(diff(id)!=0&diff(id)!=1,na.rm=TRUE)) warning("id not consecutively numbered") nobs <- table(id) z$response$nobs <- as.vector(nobs[match(unique(id),names(nobs))])} if(any(z$response$nobs!=1)&&length(z$response$nobs)>1)for(i in unique(id)){ if(any(diff((1:tobs)[id==i])>1,na.rm=TRUE)) stop(paste("observations for individual",i,"not together in table"))} if(!is.null(nest)){ if(all(z$response$nobs==1)) stop("these are not repeated measurements - nest not valid") if(!is.character(nest)||length(nest)>1) stop("nest must be the name of one variable") nc <- match(nest,cn) if(is.na(nc))stop("nest not found") z$response$nest <- as.vector(dataframe[,nc]) if(is.character(z$response$nest)) z$response$nest <- as.numeric(as.factor(z$response$nest)) else if(!is.numeric(z$response$nest))stop("nest must be numeric") rna <- rna&!is.na(z$response$nest)} if(!is.null(times)){ if(!is.character(times)||length(times)>1) stop("times must be the name of one variable") nc <- match(times,cn) if(is.na(nc))stop("times not found") z$response$times <- as.vector(dataframe[,nc]) if(!is.numeric(z$response$times))stop("times must be numeric") rna <- rna&!is.na(z$response$times)} if(!is.null(times)||!is.null(nest))for(i in unique(id)){ if(!is.null(times)&&any(diff(z$response$times[id==i])<0,na.rm=TRUE)) stop(paste("negative time step for individual",i)) if(!is.null(nest)&&any(diff(z$response$nest[id==i])!=0& diff(z$response$nest[id==i])!=1,na.rm=TRUE)) stop(paste("nest for individual",i,"not consecutively numbered"))} if(!is.null(censor)){ if(!is.character(censor)||length(censor)!=nrcol) stop("censor must have one name per response variable") nc <- match(censor,cn) if(any(is.na(nc)))stop("censor",censor[is.na(nc)],"not found") z$response$censor <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$censor))stop("censor must be numeric") if(any(z$response$censor!=1&z$response$censor!=0& z$response$censor!=-1,na.rm=TRUE)) stop("censor can only have values, -1, 0, 1") for(i in 1:nrcol)if(!all(is.na(z$response$censor[,i]))){ rna <- rna&!is.na(z$response$censor[,i]) if(z$response$type[i]=="unknown") z$response$type[i] <- "duration"}} if(!is.null(totals)){ if(!is.character(totals)||length(totals)!=nrcol) stop("totals must have one name per response variable") nc <- match(totals,cn) if(any(is.na(nc)))stop("totals",totals[is.na(nc)],"not found") z$response$n <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$n))stop("totals must be numeric") if(any(z$response$y<0|z$response$n1) stop("weights must be the name of one variable") nc <- match(weights,cn) if(is.na(nc))stop("weights not found") z$response$wt <- as.vector(dataframe[,nc]) if(!is.numeric(z$response$wt))stop("weights must be numeric") rna <- rna&!is.na(z$response$wt)} if(!is.null(coordinates)){ if(!is.character(coordinates)||(length(coordinates)!=2&& length(coordinates)!=3)) stop("coordinates must be the name of 2 or 3 variables") nc <- match(coordinates,cn) if(any(is.na(nc))) stop("coordinates",coordinates[is.na(nc)],"not found") z$response$coordinates <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$coordinates)) stop("coordinates must be numeric") for(i in 1:length(coordinates)) rna <- rna&!is.na(z$response$coordinates[,i])} # # find time-varying covariates # if(!is.null(tvcov)){ if(all(z$response$nobs==1)) stop("these are not repeated measurements - tvcov not valid") z$tvcov <- list(tvcov=NULL,nobs=z$response$nobs) class(z$tvcov) <- "tvcov" nc <- match(tvcov,cn) if(any(is.na(nc)))stop("tvcov",tvcov[is.na(nc)],"not found") z$tvcov$tvcov <- dataframe[,nc,drop=FALSE] for(i in 1:length(tvcov))rna <- rna&!is.na(z$tvcov$tvcov[,i]) # if no factor variables present, return a matrix anyway fac <- FALSE for(i in 1:dim(z$tvcov$tvcov)[2]) if(!is.vector(z$tvcov$tvcov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)z$tvcov$tvcov <- as.matrix(z$tvcov$tvcov)} # # find time-constant covariates # if(!is.null(ccov)){ z$ccov <- list(ccov=NULL) class(z$ccov) <- "tccov" nc <- match(ccov,cn) if(any(is.na(nc)))stop("ccov",ccov[is.na(nc)],"not found") z$ccov$ccov <- dataframe[,nc,drop=FALSE] for(i in unique(id))for(j in 1:length(ccov)) if(sum(!is.na(unique(z$ccov$ccov[id==i,j])))>1) stop(paste("ccov",ccov[j],"for individual",i,"not constant")) for(i in 1:length(ccov))rna <- rna&!is.na(z$ccov$ccov[,i]) j <- c(0,cumsum(z$response$nobs)[-length(z$response$nobs)])+1 z$ccov$ccov <- z$ccov$ccov[j,,drop=FALSE] # if no factor variables present, return a matrix anyway fac <- FALSE for(i in 1:dim(z$ccov$ccov)[2]) if(!is.vector(z$ccov$ccov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)z$ccov$ccov <- as.matrix(z$ccov$ccov)} # # remove NAs # if(na.rm&&any(!rna)){ # remove NAs from variables associated with response z$response$y <- z$response$y[rna,,drop=FALSE] if(!is.null(z$response$times))z$response$times <- z$response$times[rna] if(!is.null(z$response$nest))z$response$nest <- z$response$nest[rna] if(!is.null(z$response$coordinates)) z$response$coordinates <- z$response$coordinates[rna,] if(!is.null(z$response$n))z$response$n <- z$response$n[rna,,drop=FALSE] if(!is.null(z$response$censor)){ z$response$censor <- z$response$censor[rna,,drop=FALSE] if(all(z$response$censor==1))z$response$censor <- NULL} if(!is.null(z$response$delta)&&length(z$response$delta)>1) z$response$delta <- z$response$delta[rna,,drop=FALSE] if(!is.null(z$tvcov))z$tvcov$tvcov <- z$tvcov$tvcov[rna,,drop=FALSE] # correct nobs tmp <- NULL j <- c(0,cumsum(z$response$nobs)) for(i in 1:length(z$response$nobs)){ tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]])) if(tmp[i]==0) warning(paste("Individual",i,"has no observations"))} z$response$nobs <- tmp[tmp>0] # remove NAs from ccov if(!is.null(z$ccov)){ z$ccov$ccov <- z$ccov$ccov[tmp>0,,drop=FALSE] for(i in 1: dim(z$ccov$ccov)[2]) if(length(unique(z$ccov$ccov[,i]))==1) warning(paste("covariate",colnames(z$ccov$ccov)[i],"has only one value\n"))} # remove NAs from tvcov if(!is.null(z$tvcov)){ z$tvcov$nobs <- z$response$nobs for(i in 1: dim(z$tvcov$tvcov)[2]) if(length(unique(z$tvcov$tvcov[,i]))==1) warning(paste("covariate",colnames(z$tvcov$tvcov)[i],"has only one value\n"))}} if(!na.rm&&any(!rna))z$NAs <- !rna # # if independent observations, reset nobs # if(all(z$response$nobs==1))z$response$nobs <- 1 z} rmutil/R/pkpd.r0000755000176200001440000004015013425057453013123 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # mu1.0o1c(p, times, dose=1, end=0.5) # mu1.1o1c(p, times, dose=1) # mu1.1o2c(p, times, dose=1) # mu1.1o2cl(p, times, dose=1) # mu1.1o2cc(p, times, dose=1) # mu2.0o1c(p, times, dose=1, ind, end=0.5) # mu2.0o2c1(p, times, dose=1, ind, end=0.5) # mu2.0o2c2(p, times, dose=1, ind, end=0.5) # mu2.1o1c(p, times, dose=1, ind) # mu2.0o1cfp(p, times, dose=1, ind, end=0.5) # mu2.0o2c1fp(p, times, dose=1, ind, end=0.5) # mu2.0o2c2fp(p, times, dose=1, ind, end=0.5) # mu2.1o1cfp(p, times, dose=1, ind) # # DESCRIPTION # # Functions giving nonlinear regressions models for various PKPD # compartment models ### standard pharmacokinetic compartment models ### ### open zero-order one-compartment model # p[1]: log volume (V) # p[2]: log elimination rate (ke) # end: time when infusion stops mu1.0o1c <- function(p, times, dose=1, end=0.5) { ke <- exp(p[2]) dose/(exp(p[1])*ke)*((1-exp(-ke*times))*(times<=end)+ (1-exp(-ke*end))*exp(-ke*(times-end))*(times>end))} ### ### open first-order one-compartment model # p[1]: log volume (V) # p[2]: log absorption rate (ka) # p[3]: log elimination rate (ke) mu1.1o1c <- function(p, times, dose=1) { ka <- exp(p[2]) ke <- exp(p[3]) exp(p[2]-p[1])*dose/(ka-ke)*(exp(-ke*times)-exp(-ka*times))} ### ### open first-order two-compartment model (ordered) # p[1]: log volume (V) # p[2]: log absorption rate (ka) # p[3]: log elimination rate (ke) # p[4]: log transfer rate between compartments (k12) mu1.1o2c <- function(p, times, dose=1) { ka <- exp(p[2]) ke <- exp(p[3]) k12 <- exp(p[4]) ka*k12*exp(-p[1])*dose/(k12-ka)*((exp(-ka*times)-exp(-ke*times))/ (ke-ka)-(exp(-k12*times)-exp(-ke*times))/(ke-k12))} ### ### open first-order two-compartment model (ordered, absorption and transfer equal) # p[1]: log volume (V) # p[2]: log absorption rate (ka) # p[3]: log elimination rate (ke) mu1.1o2cl <- function(p, times, dose=1) { ka <- exp(p[2]) ke <- exp(p[3]) ka^2*exp(-p[1])*dose/(ka-ke)*((exp(-ka*times)-exp(-ke*times))/(ke-ka) -times*exp(-ka*times))} ### ### open first-order two-compartment model (circular) # p[1]: log volume (V) # p[2]: log absorption rate (ka) # p[3]: log elimination rate (ke) # p[4]: log rate to second compartment (k12) # p[5]: log rate from second compartment (k21) mu1.1o2cc <- function(p, times, dose=1) { ka <- exp(p[2]) ke <- exp(p[3]) k12 <- exp(p[4]) k21 <- exp(p[5]) beta <- 0.5*(k12+k21+ke-sqrt((k12+k21+ke)^2-4*k21*ke)) alpha <- (k21*ke)/beta exp(p[2]-p[1])*dose*((k21-alpha)*exp(-alpha*times)/ ((ka-alpha)*(beta-alpha))+(k21-beta)*exp(-beta*times)/ ((ka-beta)*(alpha-beta))+(k21-ka)*exp(-ka*times)/ ((beta-ka)*(beta-ka)))} ### ### simultaneous models for parent drug and metabolite ### ### zero-order one-compartment model # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log transformation rate from parent to metabolite (kpm) # p[4]: log metabolite elimination rate (kem) # p[5]: log metabolite volume (Vm) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o1c <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kpm <- exp(p[3]) kp <- exp(p[2])+kpm kem <- exp(p[4]) Vm <- exp(p[5]) kemp <- kem-kp tmp1 <- exp(-kp*times) tmp2 <- kpm/(kp*kem*Vm) g1 <- exp(-kp*end) g2 <- exp(-kem*end) cend <- (1-g1)/(Vp*kp) cexp <- exp(-kp*(times-end))*(times>end) cmend <- tmp2*(1+kp/kemp*g2-kem/kemp*g1) tmp3 <- cend*kpm*Vp/(kemp*Vm) dose*(ind*((1-tmp1)/(Vp*kp)*(times<=end)+cend*cexp)+ (1-ind)*(tmp2*(1+kp/kemp*exp(-kem*times)-kem/kemp* tmp1)*(times<=end)+(g2/g1*cexp*tmp3+ (cmend-tmp3)*exp(-kem*(times-end))/g2*(times>end))))} ### ### zero-order two-compartment for parent, one-compartment for ### metabolite, model # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log parent drug rate to second compartment (k12) # p[4]: log parent drug rate from second compartment (k21) # p[5]: log transformation rate from parent to metabolite (kpm) # p[6]: log metabolite elimination rate (kem) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o2c1 <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kp12 <- exp(p[3]) kp21 <- exp(p[4]) kpm <- exp(p[5]) kp <- exp(p[2])+kpm kem <- exp(p[6]) tmp1 <- sqrt((kp+kp12+kp21)^2-4*kp21*kp) lamp1 <- 0.5*(kp+kp12+kp21+tmp1) lamp2 <- 0.5*(kp+kp12+kp21-tmp1) tmp10 <- exp(-kem*times) tmp13 <- exp(-kem*end) tmp2 <- (1-tmp10)/kem tmp3 <- (1-tmp13)/kem tmp4 <- lamp1-kp21 tmp5 <- lamp2-kp21 tmp6 <- exp(-lamp1*times) tmp7 <- exp(-lamp2*times) tmp8 <- exp(-lamp1*end) tmp9 <- exp(-lamp2*end) tmp11 <- tmp6/tmp8 tmp12 <- tmp7/tmp9 tmp14 <- tmp10/tmp13 dose/(Vp*tmp1)*(ind*((tmp4*(1-tmp6)/lamp1-tmp5*(1-tmp7)/lamp2)* (times<=end)+(tmp4*(1-tmp8)*tmp11/lamp1-tmp5*(1-tmp9)* tmp12/lamp2)*(times>end))+(1-ind)*kpm*((tmp4*(tmp2- (tmp6-tmp10)/(kem-lamp1))/lamp1-tmp5*(tmp2-(tmp7-tmp10)/ (kem-lamp2))/lamp2)*(times<=end)+((tmp4*(tmp3-(tmp8- tmp13)/(kem-lamp1))/lamp1-tmp5*(tmp3-(tmp9-tmp13)/ (kem-lamp2))/lamp2)*tmp14+tmp4*(1-tmp8)* (tmp14-tmp11)/(lamp1*(lamp1-kem))-tmp5*(1-tmp9)* (tmp14-tmp12)/(lamp2*(lamp2-kem)))*(times>end)))} ### ### zero-order two-compartment model for both parent and metabolite # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log parent drug rate to second compartment (kp12) # p[4]: log parent drug rate from second compartment (kp21) # p[5]: log transformation rate from parent to metabolite (kpm) # p[6]: log metabolite elimination rate (kem) # p[7]: log metabolite drug rate to second compartment (km12) # p[8]: log metabolite drug rate from second compartment (km21) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o2c2 <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kp12 <- exp(p[3]) kp21 <- exp(p[4]) kpm <- exp(p[5]) kp <- exp(p[2])+kpm kem <- exp(p[6]) km12 <- exp(p[7]) km21 <- exp(p[8]) tmp1 <- sqrt((kp+kp12+kp21)^2-4*kp21*kp) lamp1 <- 0.5*(kp+kp12+kp21+tmp1) lamp2 <- 0.5*(kp+kp12+kp21-tmp1) tmp2 <- lamp1-kp21 tmp3 <- lamp2-kp21 tmp4 <- exp(-lamp1*times) tmp5 <- exp(-lamp2*times) tmp6 <- exp(-lamp1*end) tmp7 <- exp(-lamp2*end) tmp8 <- tmp4/tmp6 tmp9 <- tmp5/tmp7 tmp10 <- sqrt((kem+km12+km21)^2-4*km21*kem) lamm1 <- 0.5*(kem+km12+km21+tmp10) lamm2 <- 0.5*(kem+km12+km21-tmp10) tmp11 <- kem-lamm1 tmp12 <- kem-lamm2 tmp13 <- lamp1-lamm2 tmp14 <- lamp1-lamm1 tmp15 <- exp(-lamm1*times) tmp16 <- exp(-lamm2*times) tmp17 <- lamp1-km21 tmp18 <- lamp2-km21 tmp19 <- lamp2-lamm1 tmp20 <- lamp2-lamm2 tmp21 <- exp(-lamm1*end) tmp22 <- exp(-lamm2*end) tmp23 <- km21-lamp1 tmp24 <- km21-lamp2 tmp25 <- tmp15/tmp21 tmp26 <- tmp16/tmp22 dose/Vp*(ind*((tmp2*(1-tmp4)/(lamp1*tmp1)-tmp3*(1-tmp5)/ (lamp2*tmp1))*(times<=end)+(tmp2*(1-tmp6)*tmp8/(lamp1*tmp1)- tmp3*(1-tmp7)* tmp9/(lamp2*tmp1))*(times>end))+(1-ind)*kpm* ((tmp2/tmp1*((tmp11*tmp16/tmp13-tmp12*tmp15/tmp14)/ (kem*tmp10)+tmp17*tmp4/(lamp1*tmp14*tmp13)+1/(kem*lamp1)) -tmp3/tmp1*((tmp12*tmp15/tmp19-tmp11*tmp16/tmp20)/ (kem*(-tmp10))+tmp18*tmp5/(lamp2*tmp19*tmp20)+1/(kem*lamp2)))* (times<=end)+((-tmp2/(tmp1*tmp10)*(tmp12*tmp21/(kem*tmp14)+ lamm2/(kem*lamp1)+tmp23/(lamp1*tmp14))+tmp3/(tmp1*tmp10)* (tmp12*tmp21/(kem*tmp19)+lamm2/(kem*lamp2)+tmp24/ (lamp2*tmp19)))*tmp25+(tmp2/(tmp1*tmp10)*(tmp11*tmp22/ (kem*tmp13)+lamm1/(kem*lamp1)+tmp23/(lamp1*tmp13))- tmp3/(tmp1*tmp10)*( tmp11*tmp22/(kem*tmp20)+lamm1/(kem*lamp2)+ tmp24/(lamp2*tmp20)))*tmp26+tmp2*tmp23*(1-tmp6)*tmp8/ (lamp1*tmp1*tmp14*tmp13)-tmp3*tmp24*(1-tmp7)*tmp9/ (lamp2*tmp1*tmp19*tmp20))*(times>end)))} ### ### first-order one-compartment model # p[1]: log volume (V) # p[2]: log parent drug absorption rate (kap) # p[3]: log parent drug direct elimination rate (kep) # p[4]: log transformation rate from parent to metabolite (kpm) # p[5]: log metabolite elimination rate (kem) # ind: indicator vector: 1 for parent, 0 for metabolite mu2.1o1c <- function(p, times, dose=1, ind) { kap <- exp(p[2]) kep <- exp(p[3]) kem <- exp(p[5]) kap*exp(p[1])*dose/(kap-kep)*(ind*(exp(-kep*times)-exp(-kap*times))+ (1-ind)*exp(p[4])*(exp(-kap*times)/(kap-kem)-exp(-kep*times)/(kep-kem)+ (1/(kep-kem)-1/(kap-kem))*exp(-kem*times)))} ### ### zero-order one-compartment first-pass model # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log transformation rate from parent to metabolite (kpm) # p[4]: log metabolite elimination rate (kem) # p[5]: log metabolite volume (Vm) # p[7]: logit of proportion going to first pass (lpfp) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o1cfp <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kpm <- exp(p[3]) kp <- exp(p[2])+kpm kem <- exp(p[4]) Vm <- exp(p[5]) kemp <- kem-kp tmp1 <- exp(-kp*times) tmp2 <- exp(-kem*times) tmp3 <- kpm/(kp*kem*Vm) g1 <- exp(-kp*end) g2 <- exp(-kem*end) cend <- (1-g1)/(Vp*kp) cexp <- exp(-kp*(times-end))*(times>end) cmend <- tmp3*(1+kp/kemp*g2-kem/kemp*g1) tmp4 <- cend*kpm*Vp/(kemp*Vm) lpfp <- exp(p[6]) lpfp <- lpfp/(1+lpfp) dose*(ind*((1-tmp1)/(Vp*kp)*(times<=end)+cend*cexp)*lpfp+ (1-ind)*(((1-tmp2)/(Vm*kem)*(times<=0.5)+(1-g2)/(Vm*kem)* exp(-kem*(times-0.5))*(times>0.5))*(1-lpfp)+(tmp3* (1+kp/kemp*tmp2-kem/kemp*tmp1)*(times<=end)+(g2/g1*cexp*tmp4+ (cmend-tmp4)*exp(-kem*(times-end))/g2*(times>end)))*lpfp))} ### ### zero-order two-compartment for parent, one-compartment for metabolite, ### first-pass model # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log parent drug rate to second compartment (k12) # p[4]: log parent drug rate from second compartment (k21) # p[5]: log transformation rate from parent to metabolite (kpm) # p[6]: log metabolite elimination rate (kem) # p[7]: logit of proportion going to first pass (lpfp) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o2c1fp <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kp12 <- exp(p[3]) kp21 <- exp(p[4]) kpm <- exp(p[5]) kp <- exp(p[2])+kpm kem <- exp(p[6]) tmp1 <- sqrt((kp+kp12+kp21)^2-4*kp21*kp) lamp1 <- 0.5*(kp+kp12+kp21+tmp1) lamp2 <- 0.5*(kp+kp12+kp21-tmp1) tmp10 <- exp(-kem*times) tmp13 <- exp(-kem*end) tmp2 <- (1-tmp10)/kem tmp3 <- (1-tmp13)/kem tmp4 <- lamp1-kp21 tmp5 <- lamp2-kp21 tmp6 <- exp(-lamp1*times) tmp7 <- exp(-lamp2*times) tmp8 <- exp(-lamp1*end) tmp9 <- exp(-lamp2*end) tmp11 <- tmp6/tmp8 tmp12 <- tmp7/tmp9 tmp14 <- tmp10/tmp13 lpfp <- exp(p[7]) lpfp <- lpfp/(1+lpfp) dose/(Vp*tmp1)*(ind*((tmp4*(1-tmp6)/lamp1-tmp5*(1-tmp7)/lamp2)* (times<=end)+(tmp4*(1-tmp8)*tmp11/lamp1-tmp5*(1-tmp9)* tmp12/lamp2)*(times>end))*lpfp+(1-ind)*(((1-exp(-kem*times))/ kem*(times<=end)+(1-exp(-kem*end))/kem*exp(-kem*(times-end))* (times>end))*tmp1*(1-lpfp)+(kpm*((tmp4*(tmp2-(tmp6-tmp10)/ (kem-lamp1))/lamp1-tmp5*(tmp2-(tmp7-tmp10)/(kem-lamp2))/ lamp2)*(times<=end)+((tmp4*(tmp3-(tmp8- tmp13)/(kem-lamp1))/lamp1-tmp5*(tmp3-(tmp9-tmp13)/ (kem-lamp2))/lamp2)*tmp14+tmp4*(1-tmp8)* (tmp14-tmp11)/(lamp1*(lamp1-kem))-tmp5*(1-tmp9)* (tmp14-tmp12)/(lamp2*(lamp2-kem)))*(times>end)))*lpfp))} ### ### zero-order two-compartment model for both parent and metabolite ### first-pass model # p[1]: log parent drug volume (Vp) # p[2]: log parent drug direct elimination rate (kep) # p[3]: log parent drug rate to second compartment (kp12) # p[4]: log parent drug rate from second compartment (kp21) # p[5]: log transformation rate from parent to metabolite (kpm) # p[6]: log metabolite elimination rate (kem) # p[7]: log metabolite drug rate to second compartment (km12) # p[8]: log metabolite drug rate from second compartment (km21) # p[9]: logit of proportion going to first pass (lpfp) # ind: indicator vector: 1 for parent, 0 for metabolite # end: time when infusion stops mu2.0o2c2fp <- function(p, times, dose=1, ind, end=0.5) { Vp <- exp(p[1]) kp12 <- exp(p[3]) kp21 <- exp(p[4]) kpm <- exp(p[5]) kp <- exp(p[2])+kpm kem <- exp(p[6]) km12 <- exp(p[7]) km21 <- exp(p[8]) tmp1 <- sqrt((kp+kp12+kp21)^2-4*kp21*kp) lamp1 <- 0.5*(kp+kp12+kp21+tmp1) lamp2 <- 0.5*(kp+kp12+kp21-tmp1) tmp2 <- lamp1-kp21 tmp3 <- lamp2-kp21 tmp4 <- exp(-lamp1*times) tmp5 <- exp(-lamp2*times) tmp6 <- exp(-lamp1*end) tmp7 <- exp(-lamp2*end) tmp8 <- tmp4/tmp6 tmp9 <- tmp5/tmp7 tmp10 <- sqrt((kem+km12+km21)^2-4*km21*kem) lamm1 <- 0.5*(kem+km12+km21+tmp10) lamm2 <- 0.5*(kem+km12+km21-tmp10) tmp11 <- kem-lamm1 tmp12 <- kem-lamm2 tmp13 <- lamp1-lamm2 tmp14 <- lamp1-lamm1 tmp15 <- exp(-lamm1*times) tmp16 <- exp(-lamm2*times) tmp17 <- lamp1-km21 tmp18 <- lamp2-km21 tmp19 <- lamp2-lamm1 tmp20 <- lamp2-lamm2 tmp21 <- exp(-lamm1*end) tmp22 <- exp(-lamm2*end) tmp23 <- km21-lamp1 tmp24 <- km21-lamp2 tmp25 <- tmp15/tmp21 tmp26 <- tmp16/tmp22 lpfp <- exp(p[9]) lpfp <- lpfp/(1+lpfp) dose/Vp*(ind*((tmp2*(1-tmp4)/(lamp1*tmp1)-tmp3*(1-tmp5)/(lamp2*tmp1))* (times<=end)+(tmp2*(1-tmp6)*tmp8/(lamp1*tmp1)-tmp3*(1-tmp7)* tmp9/(lamp2*tmp1))*(times>end))*lpfp+(1-ind)* (((1-exp(-kem*times))/kem*(times<=end)+(1-exp(-kem*end))/ kem*exp(-kem*(times-end))*(times>end))*(1-lpfp)+ (kpm*((tmp2/tmp1*((tmp11*tmp16/tmp13-tmp12*tmp15/tmp14)/ (kem*tmp10)+tmp17*tmp4/(lamp1*tmp14*tmp13)+1/(kem*lamp1))- tmp3/tmp1*((tmp12*tmp15/tmp19-tmp11*tmp16/tmp20)/ (kem*(-tmp10))+tmp18*tmp5/(lamp2*tmp19*tmp20)+1/(kem*lamp2)))* (times<=end)+((-tmp2/(tmp1*tmp10)*(tmp12*tmp21/(kem*tmp14)+ lamm2/(kem*lamp1)+tmp23/(lamp1*tmp14))+tmp3/(tmp1*tmp10)* (tmp12*tmp21/(kem*tmp19)+lamm2/(kem*lamp2)+tmp24/ (lamp2*tmp19)))*tmp25+(+tmp2/(tmp1*tmp10)*(tmp11*tmp22/ (kem*tmp13)+lamm1/(kem*lamp1)+tmp23/(lamp1*tmp13))-tmp3/ (tmp1*tmp10)*(tmp11*tmp22/(kem*tmp20)+lamm1/(kem*lamp2)+ tmp24/(lamp2*tmp20)))*tmp26+tmp2*tmp23*(1-tmp6)*tmp8/ (lamp1*tmp1*tmp14*tmp13)-tmp3*tmp24*(1-tmp7)*tmp9/ (lamp2*tmp1*tmp19*tmp20))*(times>end)))*lpfp))} ### ### first-order one-compartment first-pass model # p[1]: log volume (V) # p[2]: log parent drug absorption rate (kap) # p[3]: log parent drug direct elimination rate (ked) # (kep: total parent drug elimination) # p[4]: log transformation rate from parent to metabolite (kpm) # p[5]: log metabolite first-pass formation rate (kmfp) # p[6]: log metabolite elimination rate (kem) # p[7]: logit of proportion going to first pass (lpfp) # ind: indicator vector: 1 for parent, 0 for metabolite mu2.1o1cfp <- function(p, times, dose=1, ind) { kap <- exp(p[2]) kf <- exp(p[4]) kep <- exp(p[3])+kf kmfp <- exp(p[5]) kem <- exp(p[6]) lpfp <- exp(p[7]) lpfp <- lpfp/(1+lpfp) exp(-p[1])*dose*(ind*(exp(-kep*times)-exp(-kap*times))/ (kap-kep)*lpfp+(1-ind)*((exp(-kmfp*times)-exp(-kap*times))/ (kap-kmfp)*(1-lpfp)+kf*(exp(-kap*times)/(kap-kem)- exp(-kep*times)/(kep-kem)+(1/(kep-kem)-1/(kap-kem))* exp(-kem*times))/(kap-kep)*lpfp))} rmutil/R/finterp.r0000755000176200001440000015754713453203726013654 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # fmobj(z, envir=parent.frame())) # finterp(.z, .envir=parent.frame(), # .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, # .expand=TRUE, .intercept=TRUE, .old=NULL){ # fnenvir(.z, .envir=parent.frame(), .name=NULL, .expand=TRUE) # # DESCRIPTION # # Functions to translate a model formula with unknown parameters # into a function and to modify a function to read from a data object. ### ### function to find objects specified in a formula, returning ###indicators of which are parameters, covariates, factors, and functions ### fmobj <- function(z, envir=parent.frame()){ if(!inherits(z,"formula"))return(NULL) # # transform formula to one character string # local <- fac <- cov <- ch <- NULL ch1 <- deparse(z[[length(z)]]) for(j in 1:length(ch1))ch <- paste(ch,ch1[j],collapse=" ",sep="\n") # # put extra spaces in character string so that substitutions can be made # ch <- gsub("\n"," \n ",gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [ ",ch)))) ch <- gsub("/"," / ",gsub(","," ,",gsub("\\("," ( ",gsub(":"," : ",ch)))) ch <- paste(" ",gsub(" -"," - ",gsub(" \\+"," + ",ch))," ",sep="") # # find names of variables and parameters # removing names of functions that are arguments of other functions # except times and response # mem <- all.vars(z) fcn <- all.names(z) fcn <- fcn[!(fcn%in%mem)] if(length(mem)>0){ tmp <- vector(length=length(mem)) for(i in 1:length(mem)) tmp[i] <- exists(mem[i],envir=envir)&&is.function(eval(parse(text=mem[i]),envir=envir))&&(length(grep(paste(mem[i],","),ch))>0||length(grep(paste(",",mem[i]),ch))>0||(length(grep(paste("=",mem[i]),ch))>0&&length(grep(paste(">=",mem[i]),ch))==0&&length(grep(paste("<=",mem[i]),ch))==0)||length(grep(paste("\\(",mem[i],"\\)"),ch))>0)&&mem[i]!="times"&&mem[i]!="response" fcn <- unique(c(fcn,mem[tmp])) mem <- mem[!tmp]} # # Create indicators for numeric covariates (cov) and for factor # variables (fac) that exist in the given environment. # Everything else in mem is an unknown parameter. # if(length(mem)>0){ for(j in 1:length(mem)){ local <- c(local,length(grep(paste(mem[j],"<-"),ch))>0) cov <- c(cov,exists(mem[j],envir=envir)&& is.numeric(eval(parse(text=mem[j]),envir=envir))) fac <- c(fac,exists(mem[j],envir=envir)&&!cov[j]&& is.factor(eval(parse(text=mem[j]),envir=envir)))} zz <- list(formula=ch,objects=mem,functions=fcn, parameters=!cov&!fac&!local,covariates=cov,factors=fac, local=local)} else zz <- list(formula=ch,objects=mem,functions=fcn) class(zz) <- "fmobj" zz} ### ### functions to translate model formulae into functions, ### perhaps transforming them to read from a data object ### finterp <- function(.z, ...) UseMethod("finterp") ### default method ### finterp.default <- function(.z, .envir=parent.frame(), .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=TRUE, .intercept=TRUE, .old=NULL, .response=FALSE, ...){ if(!inherits(.z,"formula"))return(NULL) # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} # # call appropriate special method if it exists # if(!is.environment(.envir)){ if(is.null(.name)).name <- paste(deparse(substitute(.envir))) if(inherits(.envir,"repeated")) return(finterp.repeated(.z,.envir,.formula,.vector,.args,.start,.name,.expand,.intercept,.old,.response)) if(inherits(.envir,"tccov")) return(finterp.tccov(.z,.envir,.formula,.vector,.args,.start,.name,.expand,.intercept,.old)) if(inherits(.envir,"tvcov")) return(finterp.tvcov(.z,.envir,.formula,.vector,.args,.start,.name,.expand,.intercept,.old)) if(inherits(.envir,"data.frame")) return(finterp.data.frame(.z,.envir,.formula,.vector,.args,.start,.name,.expand,.intercept,.old))} # # check for common parameters # .pars <- .range <- NULL if(!is.null(.old)){ if(!is.list(.old)).old <- list(.old) for(.j in .old){ if(!inherits(.j,"formulafn")) stop("objects in .old must have class, formulafn") .pars <- c(.pars,attr(.j,"parameters")) .range <- c(.range,attr(.j,"range")[1]:attr(.j,"range")[2])} if(.start<=max(.range)) warning("possible conflict in vector indexing - check .start")} if(!is.null(.args)&&!is.character(.args)) stop(".args must be a character string") # # create indicators for covariates that exist in the given environment (.ex) # and for factor variables (.fac), and a vector of unique parameter names (.un) # .zz <- fmobj(.z) .ch <- .zz$formula .mem <- .zz$objects .fcn <- .zz$functions if("$"%in%.fcn)stop("sublists not allowed (attach dataframes and use variable names)") .ex <- .zz$covariates .fac <- .zz$factors .local <- .zz$local rm(.zz) if(length(.mem)>0){ .un <- unique(.mem[!.ex&!.fac&!.local]) if(length(unique(.mem[.ex|.fac|.local]))==0&&length(.un)==0) warning("finterp.default: no variables found")} # # handle W&R formulae # if(length(.mem)==0||all(.ex|.fac|.local)){ if(.formula)return(.z) else { # create model matrix and return as attribute of function if(any("offset"%in%.fcn))stop("offset not allowed") .mt <- terms(.z) if(is.numeric(.mt[[2]])){ .dm <- matrix(1) colnames(.dm) <- "(Intercept)"} else { .dm <- model.matrix(.mt,model.frame(.mt,.envir,na.action=NULL)) if(!.intercept).dm <- .dm[,-1,drop=FALSE]} .fna <- function(.p) as.vector(.dm%*% .p[attr(.fna,"range")[1]:attr(.fna,"range")[2]]) attributes(.fna) <- list(formula=.z,model=colnames(.dm), covariates=if(length(.mem)>0) unique(.mem[.ex|.fac]) else NULL, parameters=paste("p[",1:dim(.dm)[2],"]",sep=""), range=c(.start,.start+dim(.dm)[2]-1), class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"&.obj!=".dm"]) rm(.obj) return(.fna)}} # # create function for formulae with unknowns making sure there are no # factor variables present # if(!is.null(.fac)&&any(.fac))stop(paste("covariates in formulae with unknowns must not be factors\ncheck",.mem[.fac])) .fna <- function(.p) eval(attr(.fna,"model")) if(.vector){ # # return a function with all parameters as one vector # # check if some parameters are not to be put in the vector if(!is.null(.args)){ .tmp <- match(.args,.un) if(all(!is.na(.tmp))).un <- .un[-.tmp] .par <- "alist(.p=" for(.j in 1:length(.args)){ .par <- paste(.par,",",collapse="") .par <- paste(.par,.args[.j],"=",collapse="")} .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} # check if some parameters are common to another formula if(!is.null(.old)){ .j <- match(.pars,.un) .j <- .j[!is.na(.j)] .un <- .un[-.j] .pars <- .pars[!is.na(.j)] .range <- .range[!is.na(.j)] for(.j in 1:length(.pars)) .ch <- gsub(paste(" ",.pars[.j]," ",sep=""), paste(" .p[",.range[.j],"] ",sep=""),.ch)} if(length(.un)>0)for(.j in 1:length(.un)) .ch <- gsub(paste(" ",.un[.j]," ",sep=""), paste(" .p[",.start+.j-1,"] ",sep=""),.ch)} else { # # return a function with parameters having their original names # .par <- "alist(" for(.j in 1:length(.un)){ if(.j>1).par <- paste(.par,",",collapse="") .par <- paste(.par,.un[.j],"=",collapse="")} # bug reported in 0.63.0 # if(length(.un)==1).par <- paste(.par,",...=",collapse="") .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} attributes(.fna) <- list(formula=.z,model=parse(text=.ch),parameters=.un, common=.pars,covariates=unique(.mem[.ex]), range=c(.start,.start+length(.un)-1),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"]) rm(.obj) return(.fna)} ### method for repeated objects ### finterp.repeated <- function(.z, .envir=NULL, .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=TRUE, .intercept=TRUE, .old=NULL, .response=FALSE, ...){ if(!inherits(.z,"formula"))return(NULL) # # check for common parameters # .pars <- .range <- NULL if(!is.null(.old)){ if(!is.list(.old)).old <- list(.old) for(.j in .old){ if(!inherits(.j,"formulafn")) stop("objects in .old must have class, formulafn") .pars <- c(.pars,attr(.j,"parameters")) .range <- c(.range,attr(.j,"range")[1]:attr(.j,"range")[2])} if(.start<=max(.range)) warning("possible conflict in vector indexing - check .start")} if(!is.null(.args)&&!is.character(.args)) stop(".args must be a character string") # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} if(is.null(.envir)||!inherits(.envir,"repeated"))stop("envir must be an object of class, repeated") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # dissect formula # .ex1 <- .ex2 <- .ex3 <- .ex4 <- .ex5 <- .ex6 <- NULL .zz <- fmobj(.z) .ch <- .zz$formula .mem <- .zz$objects .fcn <- .zz$functions .local <- .zz$local rm(.zz) # # create indicators for variables that exist in the repeated object (.ex) and # a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,colnames(.envir$ccov$ccov)) .ex2 <- match(.mem,colnames(.envir$tvcov$tvcov)) .ex3 <- match(.mem,"times") .ex4 <- match(.mem,"individuals") if(any(!is.na(.ex4))&&length(nobs(.envir))==1) stop("these are not repeated measurements") .ex5 <- match(.mem,"nesting") if(any(!is.na(.ex5))&&is.null(.envir$response$nest)) stop("no nesting variable available") .ex6 <- match(.mem,colnames(.envir$response$y)) if(any(!is.na(.ex2))&&!.expand) stop("time-varying covariates present - time-constant ones must be expanded") .un <- unique(.mem[is.na(.ex1)&is.na(.ex2)&is.na(.ex3)&is.na(.ex4)&is.na(.ex5)&is.na(.ex6)&!.local]) if(length(unique(.mem[!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3)|!is.na(.ex4)|!is.na(.ex5)|!is.na(.ex6)]))==0&&length(.un)==0) warning("finterp.repeated: no variables found")} # # replace time-constant covariate names by location in data object, # expanding to the proper length, if required # if(.expand).i <- covind(.envir) .ex1a <- if(is.null(.ex1)) NULL else .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) .ch <- if(.expand)gsub(paste(" ",colnames(.envir$ccov$ccov)[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,"$ccov$ccov[,", .ex1a[.j],"][.i] ",sep=""),.ch) else gsub(paste(" ",colnames(.envir$ccov$ccov)[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,"$ccov$ccov[,", .ex1a[.j],"] ",sep=""),.ch) # # replace time-varying covariate names by location in data object # .ex2a <- if(is.null(.ex2)) NULL else .ex2[!is.na(.ex2)] if(length(.ex2a)>0)for(.j in 1:length(.ex2a)) .ch <- gsub(paste(" ",colnames(.envir$tvcov$tvcov)[.ex2a[.j]], " ",sep=""),paste(" ",.ndata,"$tvcov$tvcov[,", .ex2a[.j],"] ",sep=""),.ch) # # replace special name, times, by location in data object # .ex3a <- if(is.null(.ex3)) NULL else .ex3[!is.na(.ex3)] if(length(.ex3a)>0) .ch <- gsub(" times ",paste(" ",.ndata,"$response$times ",sep=""),.ch) # # replace special name, individuals, by location in data object # .ex4a <- if(is.null(.ex4)) NULL else .ex4[!is.na(.ex4)] if(length(.ex4a)>0) .ch <- gsub(" individuals ",paste(" as.factor(covind(",.ndata,")) ",sep=""),.ch) # # replace special name, nesting, by location in data object # .ex5a <- if(is.null(.ex5)) NULL else .ex5[!is.na(.ex5)] if(length(.ex5a)>0) .ch <- gsub(" nesting ",paste(" as.factor(",.ndata,"$response$nest) ",sep=""),.ch) # # replace response variable names by location in data object # .ex6a <- if(is.null(.ex6)) NULL else .ex6[!is.na(.ex6)] if(length(.ex6a)>0){ if(dim(.envir$response$y)[2]==1&&!.response) stop(paste(colnames(.envir$response$y)[.ex6a]," is the response and cannot be a covariate")) for(.j in 1:length(.ex6a)){ if(!is.null(.envir$response$n)&&!all(is.na(.envir$response$n[.ex6a[.j]]))) stop(paste(colnames(.envir$response$y)[.ex6a[.j]]," is binomial and cannot be a covariate")) if(!is.null(.envir$response$censor)&&!all(is.na(.envir$response$censor[.ex6a[.j]]))&&!all(.envir$response$censor[.ex6a[.j]]==1)) stop(paste(colnames(.envir$response$y)[.ex6a[.j]]," is censored and cannot be a covariate")) .ch <- gsub(paste(" ",colnames(.envir$response$y)[.ex6a[.j]], " ",sep=""),paste(" ",.ndata,"$response$y[,", .ex6a[.j],"] ",sep=""),.ch)}} # # handle W&R formulae # if((is.null(.ex1)&&is.null(.ex2)&&is.null(.ex3)&&is.null(.ex4)&&is.null(.ex5)&&is.null(.ex6))||all(!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3)|!is.na(.ex4)|!is.na(.ex5)|!is.na(.ex6))){ if(.formula)return(.z) else { # create model matrix, change column names, and return as # attribute of function if(any("offset"%in%.fcn))stop("offset not allowed") .ch <- as.formula(paste("~",.ch)) .mt <- terms(.ch) if(is.numeric(.mt[[2]])){ if(!.intercept)return(NULL) .n <- if(.expand||is.null(.envir$ccov$ccov)) dim(.envir$response$y)[1] else dim(.envir$ccov$ccov)[1] .dm <- matrix(1) colnames(.dm) <- "(Intercept)" .fna <- function(.p) rep(.p[attr(.fna,"range")[1]],.n)} else { .dm <- model.matrix(.mt,model.frame(.mt,na.action=NULL)) if(!.intercept).dm <- .dm[,-1,drop=FALSE] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) colnames(.dm) <- gsub(paste(.ndata,"\\$ccov\\$ccov\\[, ",.ex1a[.j],"\\]",sep=""),paste(colnames(.envir$ccov$ccov)[.ex1a[.j]],sep=""),colnames(.dm)) if(length(.ex2a)>0)for(.j in 1:length(.ex2a)) colnames(.dm) <- gsub(paste(.ndata,"\\$tvcov\\$tvcov\\[, ",.ex2a[.j],"\\]",sep=""),paste(colnames(.envir$tvcov$tvcov)[.ex2a[.j]],sep=""),colnames(.dm)) if(length(.ex3a)>0)colnames(.dm) <- gsub(paste(.ndata,"\\$response\\$times",sep=""),"times",colnames(.dm)) if(length(.ex4a)>0)colnames(.dm) <- gsub(paste("as.factor\\(covind\\(",.ndata,"\\)\\)",sep=""),"individuals",colnames(.dm)) if(length(.ex5a)>0)colnames(.dm) <- gsub(paste("as.factor\\(",.ndata,"\\$response\\$nest\\)",sep=""),"nesting",colnames(.dm)) if(length(.ex6a)>0)for(.j in 1:length(.ex6a)) colnames(.dm) <- gsub(paste(.ndata,"\\$response\\$y\\[, ",.ex6a[.j],"\\]",sep=""),paste(colnames(.envir$response$y)[.ex6a[.j]],sep=""),colnames(.dm)) .fna <- function(.p) as.vector(.dm%*% .p[attr(.fna,"range")[1]:attr(.fna,"range")[2]])} attributes(.fna) <- list(formula=.z,model=colnames(.dm), covariates=if(length(.mem)>0) unique(.mem[(!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3)|!is.na(.ex4)|!is.na(.ex5)|!is.na(.ex6))]) else NULL, parameters=paste("p[",1:dim(.dm)[2],"]",sep=""), range=c(.start,.start+dim(.dm)[2]-1), class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".i"&.obj!=".fna"&.obj!=".dm"&.obj!=".n"]) rm(.obj) return(.fna)}} # # make sure there are no factor variables present # if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir$ccov$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov$ccov)[.ex1a[.j]],"is a factor variable")) if(length(.ex2a)>0)for(.j in 1:length(.ex2a))if(is.factor(.envir$tvcov$tvcov[,.ex2a[.j]]))stop(paste(colnames(.envir$tvcov$tvcov)[.ex2a[.j]],"is a factor variable")) if(length(.ex4a)>0)stop("index for individuals cannot be used in formulae with unknowns") if(length(.ex5a)>0)stop("index for nesting cannot be used in formulae with unknowns") # # create and return the function # .fna <- function(.p) eval(attr(.fna,"model")) # # check if some parameters are not to be put in the vector # if(!is.null(.args)){ .tmp <- match(.args,.un) if(all(!is.na(.tmp))).un <- .un[-.tmp] .par <- "alist(.p=" for(.j in 1:length(.args)){ .par <- paste(.par,",",collapse="") .par <- paste(.par,.args[.j],"=",collapse="")} .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} # # check if some parameters are common to another formula # if(!is.null(.old)){ .j <- match(.pars,.un) .j <- .j[!is.na(.j)] .un <- .un[-.j] .pars <- .pars[!is.na(.j)] .range <- .range[!is.na(.j)] for(.j in 1:length(.pars)).ch <- gsub(paste(" ",.pars[.j]," ",sep=""), paste(" .p[",.range[.j],"] ",sep=""),.ch)} if(length(.un)>0)for(.j in 1:length(.un)) .ch <- gsub(paste(" ",.un[.j]," ",sep=""), paste(" .p[",.start+.j-1,"] ",sep=""),.ch) attributes(.fna) <- list(formula=.z,model=parse(text=.ch),parameters=.un, covariates=unique(.mem[(!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3)|!is.na(.ex4)|!is.na(.ex5)|!is.na(.ex6))]), common=.pars,range=c(.start,.start+length(.un)-1),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".i"&.obj!=".fna"]) rm(.obj) return(.fna)} ### method for tccov objects ### finterp.tccov <- function(.z, .envir=NULL, .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=NULL, .intercept=TRUE, .old=NULL, ...){ if(!inherits(.z,"formula"))return(NULL) # # check for common parameters # .pars <- .range <- NULL if(!is.null(.old)){ if(!is.list(.old)).old <- list(.old) for(.j in .old){ if(!inherits(.j,"formulafn")) stop("objects in .old must have class, formulafn") .pars <- c(.pars,attr(.j,"parameters")) .range <- c(.range,attr(.j,"range")[1]:attr(.j,"range")[2])} if(.start<=max(.range)) warning("possible conflict in vector indexing - check .start")} if(!is.null(.args)&&!is.character(.args)) stop(".args must be a character string") # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} if(is.null(.envir)||(!inherits(.envir,"repeated")&&!inherits(.envir,"tccov"))) stop("envir must be an object of class, repeated or tccov") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # create appropriate names for search in data object # if(inherits(.envir,"repeated")){ .cn <- colnames(.envir$ccov$ccov) .cc <- "$ccov" .cc2 <- "\\$ccov"} else { .cn <- colnames(.envir$ccov) .cc2 <- .cc <- ""} # # dissect formula # .ex1 <- NULL .zz <- fmobj(.z) .ch <- .zz$formula .mem <- .zz$objects .fcn <- .zz$functions .local <- .zz$local rm(.zz) # # create indicator for variables that exist in the tccov object (.ex1) # and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,.cn) .un <- unique(.mem[is.na(.ex1)&!.local]) if(length(unique(.mem[!is.na(.ex1)]))==0&&length(.un)==0) warning("finterp.tccov: no variables found")} # # replace time-constant covariate names by location in data object, # expanding to the proper length, if required # .ex1a <- if(is.null(.ex1)) NULL else .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) .ch <- gsub(paste(" ",.cn[.ex1a[.j]]," ",sep=""), paste(" ",.ndata,.cc,"$ccov[,", .ex1a[.j],"] ",sep=""),.ch) # # handle W&R formulae # if(is.null(.ex1)||all(!is.na(.ex1))){ if(.formula)return(.z) else { # create model matrix, change column names, and return as # attribute of function if(any("offset"%in%.fcn))stop("offset not allowed") .ch <- as.formula(paste("~",.ch)) .mt <- terms(.ch) if(is.numeric(.mt[[2]])){ if(!.intercept)return(NULL) .n <- dim(.envir$ccov)[1] .dm <- matrix(1) colnames(.dm) <- "(Intercept)" .fna <- function(.p) rep(.p[attr(.fna,"range")[1]],.n)} else { .dm <- model.matrix(.mt,model.frame(.mt,na.action=NULL)) if(!.intercept).dm <- .dm[,-1,drop=FALSE] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) colnames(.dm) <- gsub(paste(.ndata,.cc2,"\\$ccov\\[, ",.ex1a[.j],"\\]",sep=""),paste(.cn[.ex1a[.j]],sep=""),colnames(.dm)) .fna <- function(.p) as.vector(.dm%*% .p[attr(.fna,"range")[1]:attr(.fna,"range")[2]])} attributes(.fna) <- list(formula=.z,model=colnames(.dm), covariates=if(length(.mem)>0) unique(.mem[!is.na(.ex1)]) else NULL, parameters=paste("p[",1:dim(.dm)[2],"]",sep=""), range=c(.start,.start+dim(.dm)[2]-1), class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".i"&.obj!=".fna"&.obj!=".dm"&.obj!=".n"]) rm(.obj) return(.fna)}} # # create function for formulae with unknowns making sure there are no # factor variables present # if(inherits(.envir,"repeated")){ if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir$ccov$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov$ccov)[.ex1a[.j]],"is a factor variable"))} else { if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov)[.ex1a[.j]],"is a factor variable"))} # # create and return the function # .fna <- function(.p) eval(attr(.fna,"model")) # # check if some parameters are not to be put in the vector # if(!is.null(.args)){ .tmp <- match(.args,.un) if(all(!is.na(.tmp))).un <- .un[-.tmp] .par <- "alist(.p=" for(.j in 1:length(.args)){ .par <- paste(.par,",",collapse="") .par <- paste(.par,.args[.j],"=",collapse="")} .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} # # check if some parameters are common to another formula # if(!is.null(.old)){ .j <- match(.pars,.un) .j <- .j[!is.na(.j)] .un <- .un[-.j] .pars <- .pars[!is.na(.j)] .range <- .range[!is.na(.j)] for(.j in 1:length(.pars)).ch <- gsub(paste(" ",.pars[.j]," ",sep=""), paste(" .p[",.range[.j],"] ",sep=""),.ch)} if(length(.un)>0)for(.j in 1:length(.un)) .ch <- gsub(paste(" ",.un[.j]," ",sep=""), paste(" .p[",.start+.j-1,"] ",sep=""),.ch) attributes(.fna) <- list(formula=.z,model=parse(text=.ch),parameters=.un, common=.pars,covariates=unique(.mem[!is.na(.ex1)]), range=c(.start,.start+length(.un)-1),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"]) rm(.obj) return(.fna)} ### method for tvcov objects ### finterp.tvcov <- function(.z, .envir=NULL, .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=NULL, .intercept=TRUE, .old=NULL, ...){ if(!inherits(.z,"formula"))return(NULL) # # check for common parameters # .pars <- .range <- NULL if(!is.null(.old)){ if(!is.list(.old)).old <- list(.old) for(.j in .old){ if(!inherits(.j,"formulafn")) stop("objects in .old must have class, formulafn") .pars <- c(.pars,attr(.j,"parameters")) .range <- c(.range,attr(.j,"range")[1]:attr(.j,"range")[2])} if(.start<=max(.range)) warning("possible conflict in vector indexing - check .start")} if(!is.null(.args)&&!is.character(.args)) stop(".args must be a character string") # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} if(is.null(.envir)||(!inherits(.envir,"repeated")&&!inherits(.envir,"tvcov"))) stop("envir must be an object of class, repeated or tvcov") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # create appropriate names for search in data object # if(inherits(.envir,"repeated")){ .cn <- colnames(.envir$tvcov$tvcov) .cc <- "$tvcov" .cc2 <- "\\$tvcov"} else { .cn <- colnames(.envir$tvcov) .cc2 <- .cc <- ""} # # dissect formula # .ex1 <- NULL .zz <- fmobj(.z) .ch <- .zz$formula .mem <- .zz$objects .fcn <- .zz$functions .local <- .zz$local rm(.zz) # # create indicator for variables that exist in the tvcov object (.ex1) # and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,.cn) .un <- unique(.mem[is.na(.ex1)&!.local]) if(length(unique(.mem[!is.na(.ex1)]))==0&&length(.un)==0) warning("finterp.tvcov: no variables found")} # # replace time-varying covariate names by location in data object # .ex1a <- if(is.null(.ex1)) NULL else .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) .ch <- gsub(paste(" ",.cn[.ex1a[.j]]," ",sep=""), paste(" ",.ndata,.cc,"$tvcov[,", .ex1a[.j],"] ",sep=""),.ch) # # handle W&R formulae # if(is.null(.ex1)||all(!is.na(.ex1))){ if(.formula)return(.z) else { # create model matrix, change column names, and return as # attribute of function if(any("offset"%in%.fcn))stop("offset not allowed") .ch <- as.formula(paste("~",.ch)) .mt <- terms(.ch) if(is.numeric(.mt[[2]])){ if(!.intercept)return(NULL) .n <- dim(.envir$tvcov)[1] .dm <- matrix(1) colnames(.dm) <- "(Intercept)" .fna <- function(.p) rep(.p[attr(.fna,"range")[1]],.n)} else { .dm <- model.matrix(.mt,model.frame(.mt,na.action=NULL)) if(!.intercept).dm <- .dm[,-1,drop=FALSE] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) colnames(.dm) <- gsub(paste(.ndata,.cc2,"\\$ccov\\[, ",.ex1a[.j],"\\]",sep=""),paste(.cn[.ex1a[.j]],sep=""),colnames(.dm)) .fna <- function(.p) as.vector(.dm%*% .p[attr(.fna,"range")[1]:attr(.fna,"range")[2]])} attributes(.fna) <- list(formula=.z,model=colnames(.dm), covariates=if(length(.mem)>0) unique(.mem[!is.na(.ex1)]) else NULL, parameters=paste("p[",1:dim(.dm)[2],"]",sep=""), range=c(.start,.start+dim(.dm)[2]-1), class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"&.obj!=".dm"&.obj!=".n"]) rm(.obj) return(.fna)}} # # create function for formulae with unknowns making sure there are no # factor variables present # if(inherits(.envir,"repeated")){ if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir$tvcov$tvcov[,.ex1a[.j]]))stop(paste(colnames(.envir$tvcov$tvcov)[.ex1a[.j]],"is a factor variable"))} else { if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir$tvcov[,.ex1a[.j]]))stop(paste(colnames(.envir$tvcov)[.ex1a[.j]],"is a factor variable"))} # # create and return the function # .fna <- function(.p) eval(attr(.fna,"model")) # # check if some parameters are not to be put in the vector # if(!is.null(.args)){ .tmp <- match(.args,.un) if(all(!is.na(.tmp))).un <- .un[-.tmp] .par <- "alist(.p=" for(.j in 1:length(.args)){ .par <- paste(.par,",",collapse="") .par <- paste(.par,.args[.j],"=",collapse="")} .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} # # check if some parameters are common to another formula # if(!is.null(.old)){ .j <- match(.pars,.un) .j <- .j[!is.na(.j)] .un <- .un[-.j] .pars <- .pars[!is.na(.j)] .range <- .range[!is.na(.j)] for(.j in 1:length(.pars)).ch <- gsub(paste(" ",.pars[.j]," ",sep=""), paste(" .p[",.range[.j],"] ",sep=""),.ch)} if(length(.un)>0)for(.j in 1:length(.un)) .ch <- gsub(paste(" ",.un[.j]," ",sep=""), paste(" .p[",.start+.j-1,"] ",sep=""),.ch) attributes(.fna) <- list(formula=.z,model=parse(text=.ch),parameters=.un, common=.pars,covariates=unique(.mem[!is.na(.ex1)]), range=c(.start,.start+length(.un)-1),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"]) rm(.obj) return(.fna)} ### method for dataframes ### finterp.data.frame <- function(.z, .envir=NULL, .formula=FALSE, .vector=TRUE, .args=NULL, .start=1, .name=NULL, .expand=NULL, .intercept=TRUE, .old=NULL, ...){ if(!inherits(.z,"formula"))return(NULL) # # check for common parameters # .pars <- .range <- NULL if(!is.null(.old)){ if(!is.list(.old)).old <- list(.old) for(.j in .old){ if(!inherits(.j,"formulafn")) stop("objects in .old must have class, formulafn") .pars <- c(.pars,attr(.j,"parameters")) .range <- c(.range,attr(.j,"range")[1]:attr(.j,"range")[2])} if(.start<=max(.range)) warning("possible conflict in vector indexing - check .start")} if(!is.null(.args)&&!is.character(.args)) stop(".args must be a character string") # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # create appropriate names for search in data object # .cn <- colnames(.envir) # # dissect formula # .ex1 <- NULL .zz <- fmobj(.z) .ch <- .zz$formula .mem <- .zz$objects .fcn <- .zz$functions .local <- .zz$local rm(.zz) # # create indicator for variables that exist in the dataframe (.ex1) # and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,.cn) .un <- unique(.mem[is.na(.ex1)&!.local]) if(length(unique(.mem[!is.na(.ex1)]))==0&&length(.un)==0) warning("finterp.data.frame: no variables found")} # # replace covariate names by location in data object # .ex1a <- if(is.null(.ex1)) NULL else .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)) .ch <- gsub(paste(" ",.cn[.ex1a[.j]]," ",sep=""), paste(" ",.ndata,"$",.cn[.ex1a[.j]],sep=""),.ch) # # handle W&R formulae # if(is.null(.ex1)||all(!is.na(.ex1))){ if(.formula)return(.z) else { # create model matrix, change column names, and return as # attribute of function if(any("offset"%in%.fcn))stop("offset not allowed") .ch <- as.formula(paste("~",.ch)) .mt <- terms(.ch) if(is.numeric(.mt[[2]])){ if(!.intercept)return(NULL) .n <- dim(.envir)[1] .dm <- matrix(1) colnames(.dm) <- "(Intercept)" .fna <- function(.p) rep(.p[attr(.fna,"range")[1]],.n)} else { .dm <- model.matrix(.mt,model.frame(.mt,data=.envir,na.action=NULL)) if(!.intercept).dm <- .dm[,-1,drop=FALSE] .fna <- function(.p) as.vector(.dm%*% .p[attr(.fna,"range")[1]:attr(.fna,"range")[2]])} attributes(.fna) <- list(formula=.z,model=colnames(.dm), covariates=if(length(.mem)>0) unique(.mem[!is.na(.ex1)]) else NULL, parameters=paste("p[",1:dim(.dm)[2],"]",sep=""), range=c(.start,.start+dim(.dm)[2]-1), class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".i"&.obj!=".fna"&.obj!=".dm"&.obj!=".n"]) rm(.obj) return(.fna)}} # # create function for formulae with unknowns making sure there are no # factor variables present # if(length(.ex1a)>0)for(.j in 1:length(.ex1a))if(is.factor(.envir[,.ex1a[.j]])) stop(paste(colnames(.envir)[.ex1a[.j]],"is a factor variable")) # # create and return the function # .fna <- function(.p) eval(attr(.fna,"model")) # # check if some parameters are not to be put in the vector # if(!is.null(.args)){ .tmp <- match(.args,.un) if(all(!is.na(.tmp))).un <- .un[-.tmp] .par <- "alist(.p=" for(.j in 1:length(.args)){ .par <- paste(.par,",",collapse="") .par <- paste(.par,.args[.j],"=",collapse="")} .par <- paste(.par,")",collapse="") formals(.fna) <- eval(parse(text=.par))} # # check if some parameters are common to another formula # if(!is.null(.old)){ .j <- match(.pars,.un) .j <- .j[!is.na(.j)] .un <- .un[-.j] .pars <- .pars[!is.na(.j)] .range <- .range[!is.na(.j)] for(.j in 1:length(.pars)).ch <- gsub(paste(" ",.pars[.j]," ",sep=""), paste(" .p[",.range[.j],"] ",sep=""),.ch)} if(length(.un)>0)for(.j in 1:length(.un)) .ch <- gsub(paste(" ",.un[.j]," ",sep=""), paste(" .p[",.start+.j-1,"] ",sep=""),.ch) attributes(.fna) <- list(formula=.z,model=parse(text=.ch),parameters=.un, common=.pars,covariates=unique(.mem[!is.na(.ex1)]), range=c(.start,.start+length(.un)-1),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fna"]) rm(.obj) return(.fna)} ### functions to find the variables and parameters in functions, ### perhaps transforming them to read from a data object ### fnenvir <- function(.z, ...) UseMethod("fnenvir") ### default method ### fnenvir.default <- function(.z, .envir=parent.frame(), .name=NULL, .expand=TRUE, .response=FALSE, ...){ if(!is.function(.z))return(NULL) # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} # # call appropriate special method if it exists # if(!is.environment(.envir)){ if(is.null(.name)).name <- paste(deparse(substitute(.envir))) if(inherits(.envir,"repeated"))return(fnenvir.repeated(.z,.envir,.name=.name,.expand,.response)) if(inherits(.envir,"tccov"))return(fnenvir.tccov(.z,.envir,.name=.name,.expand)) if(inherits(.envir,"tvcov"))return(fnenvir.tvcov(.z,.envir,.name=.name,.expand)) if(inherits(.envir,"data.frame"))return(fnenvir.data.frame(.z,.envir,.name=.name,.expand))} # # transform function to a character string # .ch1 <- deparse(.z,width.cutoff=500) .ch2 <- .ch1[1] .ch1 <- .ch1[-1] # # find arguments of function # .mem2 <- strsplit(gsub("[(),]"," ",.ch2)," ")[[1]] if(length(.mem2)>0).mem2 <- .mem2[.mem2!=""] if(length(.mem2)>1).mem2 <- .mem2[2:length(.mem2)] else .mem2 <- NULL # # find body of function # .fcn <- .ex <- .ch <- NULL for(.j in 1:length(.ch1)).ch <- paste(.ch,.ch1[.j],collapse=" ") # # remove punctuation from function body # #.mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"\\\=-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] .mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"=\\-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] if(length(.mem)>0).mem <- .mem[.mem!=""] # # create indicators for variables that exist in the given environment (.ex) # and for functions (.fcn) and a vector of unique parameter names (.un) # if(length(.mem)>0){ for(.j in 1:length(.mem)){ .ex <- c(.ex,exists(.mem[.j],envir=.envir)) .fcn <- c(.fcn,if(exists(.mem[.j])){ if(.mem[.j]=="function"||.mem[.j]=="if"|| .mem[.j]=="else"||.mem[.j]=="for"|| .mem[.j]=="while"||.mem[.j]=="repeat") TRUE else is.function(eval(parse(text=.mem[.j])))} else FALSE)} for(.j in 1:length(.mem)){ if(!.fcn[.j]&&.ex[.j]&&is.factor(eval(parse(text=.mem[.j]),envir=.envir)))stop(paste(.mem[.j],"is a factor variable"))} .un <- unique(.mem[!.ex]) if(length(unique(.mem[.ex&!.fcn]))==0&&length(.un)==0) warning("fnenvir.default: no variables found")} # # put extra spaces in character string so that substitutions can be made # .ch <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch))) .ch <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",gsub("\\+","+ ",.ch)))) .ch <- paste(" ",gsub("\\("," ( ",gsub(":"," : ",.ch))," ",sep="") .ch2 <- strsplit(.ch," ")[[1]] # # find arguments in body with (.un) and without (.un0) subscripts # .un <- .un0 <- .un1 <- NULL if(length(.mem2)>0)for(.j in 1:length(.mem2)){ .ex1a <- NULL for(.k in 1:length(.ch2))if(.mem2[.j]==.ch2[.k]){ if(.k0){ .ex1a <- c(.ex1a,paste(.ch2[.k],.ch2[.k+1],sep="")) .un1 <- c(.un1,.ch2[.k])} else .un0 <- c(.un0,.ch2[.k])} if(!is.null(.ex1a)){ .ex1a <- unique(.ex1a) .o <- gsub("(^[[:alnum:]]\\[)|(\\])","",.ex1a) .un <- if(length(grep("[[:alpha:]]",.o))>0)c(.un,.ex1a) else c(.un,.ex1a[order(as.numeric(.o))])}} # # find unique arguments without subscripts and add to .un # if(length(.un0)>0){ if(length(.un1)>0){ .tmp <- NULL for(.k in 1:length(.un1)) if(length(grep(.un1[.k],.un0))>0) .tmp <- c(.tmp,grep(.un1[.k],.un0)) .un <- c(.un,unique(if(!is.null(.tmp)).un0[-.tmp]else .un0))} else .un <- c(.un,unique(.un0))} # # create the new function that evaluates its model attribute # .fnb <- eval(parse(text=paste("function(",paste(.mem2,collapse=","),")",paste("eval(attr(.fnb,\"model\"))")))) .ex <- if(length(.fcn)>0&&!is.null(.ex)).ex&!.fcn else NULL attributes(.fnb) <- list(model=parse(text=.ch1),parameters=.un, covariates=unique(.mem[.ex]),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fnb"]) rm(.obj) return(.fnb)} ### method for repeated objects ### fnenvir.repeated <- function(.z, .envir=NULL, .name=NULL, .expand=TRUE, .response=FALSE, ...){ if(!is.function(.z))return(NULL) # # find the appropriate environment if its name is supplied # if(is.name(.envir)){ if(is.null(.name)).name <- as.character(.envir) .envir <- eval(.envir)} if(is.null(.envir)||!inherits(.envir,"repeated"))stop("envir must be an object of class, repeated") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # transform function to a character string # .ch1 <- deparse(.z,width.cutoff=500) .ch2 <- .ch1[1] .ch1 <- .ch1[-1] # # find arguments of function # .mem2 <- strsplit(gsub("[(),]"," ",.ch2)," ")[[1]] if(length(.mem2)>0).mem2 <- .mem2[.mem2!=""] if(length(.mem2)>1).mem2 <- .mem2[2:length(.mem2)] else .mem2 <- NULL # # find body of function # .fcn <- .ex1 <- .ex2 <- .ex3 <- .ex4 <- .ch <- NULL for(.j in 1:length(.ch1)).ch <- paste(.ch,.ch1[.j],collapse=" ") # # remove punctuation from function body # #.mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"\\\=-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] .mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"=\\-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] if(length(.mem)>0).mem <- .mem[.mem!=""] # # create indicators for variables that exist in the repeated environment (.ex) # and for functions (.fcn) and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,colnames(.envir$ccov$ccov)) .ex2 <- match(.mem,colnames(.envir$tvcov$tvcov)) .ex3 <- match(.mem,"times") .ex4 <- match(.mem,colnames(.envir$response$y)) if(any(!is.na(.ex2))&&!.expand)stop("time-varying covariates present - time-constant ones must be expanded") for(.j in 1:length(.mem)){ .fcn <- c(.fcn,if(exists(.mem[.j])){ if(.mem[.j]=="function"||.mem[.j]=="if"|| .mem[.j]=="else"||.mem[.j]=="for"|| .mem[.j]=="while"||.mem[.j]=="repeat") TRUE else is.function(eval(parse(text=.mem[.j])))&&is.na(.ex1[.j])&&is.na(.ex2[.j])&&is.na(.ex3[.j])&&is.na(.ex4[.j])} else FALSE)} .un <- unique(.mem[is.na(.ex1)&is.na(.ex2)&is.na(.ex3)&is.na(.ex4)&!.fcn]) if(length(unique(.mem[(!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3)|!is.na(.ex4))&!.fcn]))==0&&length(.un)==0) warning("fnenvir.repeated: no variables found")} # # put extra spaces in character string so that substitutions can be made # for(.j in 1:length(.ch1)){ .ch1[.j] <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch1[.j]))) .ch1[.j] <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",.ch1[.j]))) .ch1[.j] <- paste(" ",gsub("\\("," ( ",.ch1[.j])," ",sep="")} # # replace time-constant covariate names by location in data object, # expanding to the proper length, if required # if(.expand).i <- covind(.envir) .ex1a <- .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)){ if(is.factor(.envir$ccov$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov$ccov)[.ex1a[.j]],"is a factor variable")) for(.k in 1:length(.ch1)) .ch1[.k] <- if(.expand)gsub(paste(" ",colnames(.envir$ccov$ccov)[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,"$ccov$ccov[,", .ex1a[.j],"][.i] ",sep=""),.ch1[.k]) else gsub(paste(" ",colnames(.envir$ccov$ccov)[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,"$ccov$ccov[,", .ex1a[.j],"] ",sep=""),.ch1[.k])} # # replace time-varying covariate names by location in data object # .ex2a <- .ex2[!is.na(.ex2)] if(length(.ex2a)>0)for(.j in 1:length(.ex2a)){ if(is.factor(.envir$tvcov$tvcov[,.ex2a[.j]]))stop(paste(colnames(.envir$tvcov$tvcov)[.ex2a[.j]],"is a factor variable")) for(.k in 1:length(.ch1)) .ch1[.k] <- gsub(paste(" ",colnames(.envir$tvcov$tvcov)[.ex2a[.j]], " ",sep=""),paste(" ",.ndata,"$tvcov$tvcov[,", .ex2a[.j],"] ",sep=""),.ch1[.k])} # # replace special name, times, by location in data object # .ex3a <- if(is.null(.ex3)) NULL else .ex3[!is.na(.ex3)] if(length(.ex3a)>0)for(.k in 1:length(.ch1)) .ch1[.k] <- gsub(" times ",paste(" ",.ndata,"$response$times ",sep=""),.ch1[.k]) # # replace response by location in data object # .ex4a <- .ex4[!is.na(.ex4)] if(length(.ex4a)>0){ if(dim(.envir$response$y)[2]==1&&!.response) stop(paste(colnames(.envir$response$y)[.ex4a]," is the response and cannot be a covariate")) for(.k in 1:length(.ch1)) .ch1[.k] <- gsub(paste(" ",colnames(.envir$response$y)[.ex4a[.j]], " ",sep=""),paste(" ",.ndata,"$response$y[,", .ex4a[.j],"] ",sep=""),.ch1[.k])} # # put extra spaces in character string so that substitutions can be made # .ch <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch))) .ch <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",gsub("\\+","+ ",.ch)))) .ch <- paste(" ",gsub("\\("," ( ",gsub(":"," : ",.ch))," ",sep="") .ch2 <- strsplit(.ch," ")[[1]] # # find arguments in body with (.un) and without (.un0) subscripts # .un <- .un0 <- .un1 <- NULL if(length(.mem2)>0)for(.j in 1:length(.mem2)){ .ex1a <- NULL for(.k in 1:length(.ch2))if(.mem2[.j]==.ch2[.k]){ if(.k0){ .ex1a <- c(.ex1a,paste(.ch2[.k],.ch2[.k+1],sep="")) .un1 <- c(.un1,.ch2[.k])} else .un0 <- c(.un0,.ch2[.k])} if(!is.null(.ex1a)){ .ex1a <- unique(.ex1a) .o <- gsub("(^[[:alnum:]]\\[)|(\\])","",.ex1a) .un <- if(length(grep("[[:alpha:]]",.o))>0)c(.un,.ex1a) else c(.un,.ex1a[order(as.numeric(.o))])}} # # find unique arguments without subscripts and add to .un # if(length(.un0)>0){ if(length(.un1)>0){ .tmp <- NULL for(.k in 1:length(.un1)) if(length(grep(.un1[.k],.un0))>0) .tmp <- c(.tmp,grep(.un1[.k],.un0)) .un <- c(.un,unique(if(!is.null(.tmp)).un0[-.tmp]else .un0))} else .un <- c(.un,unique(.un0))} # # create the new function that evaluates its model attribute # .fnb <- eval(parse(text=paste("function(",paste(.mem2,collapse=","),")",paste("eval(attr(.fnb,\"model\"))")))) .ex1 <- if(!is.null(.ex1)&&!is.null(.ex2)&&!is.null(.ex3)&&length(.fcn)>0) (!is.na(.ex1)|!is.na(.ex2)|!is.na(.ex3))&!.fcn else NULL attributes(.fnb) <- list(model=parse(text=.ch1),parameters=.un, covariates=unique(.mem[.ex1]),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".i"&.obj!=".fnb"]) rm(.obj) return(.fnb)} ### method for tccov objects ### fnenvir.tccov <- function(.z, .envir=NULL, .name=NULL, .expand=TRUE, ...){ if(!is.function(.z))return(NULL) # # find the appropriate environment if its name is supplied # if(is.null(.envir)||(!inherits(.envir,"repeated")&&!inherits(.envir,"tccov")))stop("envir must be an object of class, repeated or tccov") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # transform function to a character string # .ch1 <- deparse(.z,width.cutoff=500) .ch2 <- .ch1[1] .ch1 <- .ch1[-1] # # find arguments of function # .mem2 <- strsplit(gsub("[(),]"," ",.ch2)," ")[[1]] if(length(.mem2)>0).mem2 <- .mem2[.mem2!=""] if(length(.mem2)>1).mem2 <- .mem2[2:length(.mem2)] else .mem2 <- NULL # # find body of function # .fcn <- .ex1 <- .ch <- NULL for(.j in 1:length(.ch1)).ch <- paste(.ch,.ch1[.j],collapse=" ") # # remove punctuation from function body # #.mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"\\\=-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] .mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"=\\-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] if(length(.mem)>0).mem <- .mem[.mem!=""] # # create appropriate names for search in data object # if(inherits(.envir,"repeated")){ .cn <- colnames(.envir$ccov$ccov) .cc <- "$ccov"} else { .cn <- colnames(.envir$ccov) .cc <- ""} # # create indicators for variables that exist in the tccov environment (.ex) # and for functions (.fcn) and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,.cn) for(.j in 1:length(.mem)){ .fcn <- c(.fcn,if(exists(.mem[.j])){ if(.mem[.j]=="function"||.mem[.j]=="if"|| .mem[.j]=="else"||.mem[.j]=="for"|| .mem[.j]=="while"||.mem[.j]=="repeat") TRUE else is.function(eval(parse(text=.mem[.j])))&&is.na(.ex1[.j])} else FALSE)} .un <- unique(.mem[is.na(.ex1)&!.fcn]) if(length(unique(.mem[!is.na(.ex1)&!.fcn]))==0&&length(.un)==0) warning("fnenvir.tccov: no variables found")} # # put extra spaces in character string so that substitutions can be made # for(.j in 1:length(.ch1)){ .ch1[.j] <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch1[.j]))) .ch1[.j] <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",.ch1[.j]))) .ch1[.j] <- paste(" ",gsub("\\("," ( ",.ch1[.j])," ",sep="")} # # replace time-constant covariate names by location in data object, # expanding to the proper length, if required # .ex1a <- .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)){ if(inherits(.envir,"repeated")){ if(is.factor(.envir$ccov$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov$ccov)[.ex1a[.j]],"is a factor variable"))} else if(is.factor(.envir$ccov[,.ex1a[.j]]))stop(paste(colnames(.envir$ccov)[.ex1a[.j]],"is a factor variable")) for(.k in 1:length(.ch1)).ch1[.k] <- gsub(paste(" ",.cn[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,.cc,"$ccov[,", .ex1a[.j],"] ",sep=""),.ch1[.k])} # # put extra spaces in character string so that substitutions can be made # .ch <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch))) .ch <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",gsub("\\+","+ ",.ch)))) .ch <- paste(" ",gsub("\\("," ( ",gsub(":"," : ",.ch))," ",sep="") .ch2 <- strsplit(.ch," ")[[1]] # # find arguments in body with (.un) and without (.un0) subscripts # .un <- .un0 <- .un1 <- NULL if(length(.mem2)>0)for(.j in 1:length(.mem2)){ .ex1a <- NULL for(.k in 1:length(.ch2))if(.mem2[.j]==.ch2[.k]){ if(.k0){ .ex1a <- c(.ex1a,paste(.ch2[.k],.ch2[.k+1],sep="")) .un1 <- c(.un1,.ch2[.k])} else .un0 <- c(.un0,.ch2[.k])} if(!is.null(.ex1a)){ .ex1a <- unique(.ex1a) .o <- gsub("(^[[:alnum:]]\\[)|(\\])","",.ex1a) .un <- if(length(grep("[[:alpha:]]",.o))>0)c(.un,.ex1a) else c(.un,.ex1a[order(as.numeric(.o))])}} # # find unique arguments without subscripts and add to .un # if(length(.un0)>0){ if(length(.un1)>0){ .tmp <- NULL for(.k in 1:length(.un1)) if(length(grep(.un1[.k],.un0))>0) .tmp <- c(.tmp,grep(.un1[.k],.un0)) .un <- c(.un,unique(if(!is.null(.tmp)).un0[-.tmp]else .un0))} else .un <- c(.un,unique(.un0))} # # create the new function that evaluates its model attribute # .fnb <- eval(parse(text=paste("function(",paste(.mem2,collapse=","),")",paste("eval(attr(.fnb,\"model\"))")))) .ex1 <- if(!is.null(.ex1)&&length(.fcn)>0)!is.na(.ex1)&!.fcn else NULL attributes(.fnb) <- list(model=parse(text=.ch1),parameters=.un, covariates=unique(.mem[.ex1]),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fnb"]) rm(.obj) return(.fnb)} ### method for tvcov objects ### fnenvir.tvcov <- function(.z, .envir=NULL, .name=NULL, .expand=TRUE, ...){ if(!is.function(.z))return(NULL) # # find the appropriate environment if its name is supplied # if(is.null(.envir)||(!inherits(.envir,"repeated")&&!inherits(.envir,"tvcov")))stop("envir must be an object of class, repeated or tvcov") .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # transform function to a character string # .ch1 <- deparse(.z,width.cutoff=500) .ch2 <- .ch1[1] .ch1 <- .ch1[-1] # # find arguments of function # .mem2 <- strsplit(gsub("[(),]"," ",.ch2)," ")[[1]] if(length(.mem2)>0).mem2 <- .mem2[.mem2!=""] if(length(.mem2)>1).mem2 <- .mem2[2:length(.mem2)] else .mem2 <- NULL # # find body of function # .fcn <- .ex2 <- .ch <- NULL for(.j in 1:length(.ch1)).ch <- paste(.ch,.ch1[.j],collapse=" ") # # remove punctuation from function body # #.mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"\\\=-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] .mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"=\\-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] if(length(.mem)>0).mem <- .mem[.mem!=""] # # create appropriate names for search in data object # if(inherits(.envir,"repeated")){ .cn <- colnames(.envir$tvcov$tvcov) .cc <- "$tvcov"} else { .cn <- colnames(.envir$tvcov) .cc <- ""} # # create indicators for variables that exist in the tccov environment (.ex) # and for functions (.fcn) and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex2 <- match(.mem,.cn) for(.j in 1:length(.mem)){ .fcn <- c(.fcn,if(exists(.mem[.j])){ if(.mem[.j]=="function"||.mem[.j]=="if"|| .mem[.j]=="else"||.mem[.j]=="for"|| .mem[.j]=="while"||.mem[.j]=="repeat") TRUE #else is.function(eval(parse(text=.mem[.j])))&&is.na(.ex1[.j])} # bruce edit else is.function(eval(parse(text=.mem[.j])))&&is.na(.ex2[.j])} else FALSE)} .un <- unique(.mem[is.na(.ex2)&!.fcn]) if(length(unique(.mem[!is.na(.ex2)&!.fcn]))==0&&length(.un)==0) warning("fnenvir.tvcov: no variables found")} # # put extra spaces in character string so that substitutions can be made # for(.j in 1:length(.ch1)){ .ch1[.j] <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch1[.j]))) .ch1[.j] <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",.ch1[.j]))) .ch1[.j] <- paste(" ",gsub("\\("," ( ",.ch1[.j])," ",sep="")} # # replace time-varying covariate names by location in data object, # expanding to the proper length, if required # .ex2a <- .ex2[!is.na(.ex2)] if(length(.ex2a)>0)for(.j in 1:length(.ex2a)){ if(inherits(.envir,"repeated")){ if(is.factor(.envir$tvcov$tvcov[,.ex2a[.j]]))stop(paste(colnames(.envir$tvcov$tvcov)[.ex2a[.j]],"is a factor variable"))} else if(is.factor(.envir$tvcov[,.ex2a[.j]]))stop(paste(colnames(.envir$tvcov)[.ex2a[.j]],"is a factor variable")) for(.k in 1:length(.ch1)).ch1[.k] <- gsub(paste(" ",.cn[.ex2a[.j]], " ",sep=""),paste(" ",.ndata,.cc,"$tvcov[,", .ex2a[.j],"] ",sep=""),.ch1[.k])} # # put extra spaces in character string so that substitutions can be made # .ch <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch))) .ch <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",gsub("\\+","+ ",.ch)))) .ch <- paste(" ",gsub("\\("," ( ",gsub(":"," : ",.ch))," ",sep="") .ch2 <- strsplit(.ch," ")[[1]] # # find arguments in body with (.un) and without (.un0) subscripts # .un <- .un0 <- .un1 <- NULL if(length(.mem2)>0)for(.j in 1:length(.mem2)){ .ex1a <- NULL for(.k in 1:length(.ch2))if(.mem2[.j]==.ch2[.k]){ if(.k0){ .ex1a <- c(.ex1a,paste(.ch2[.k],.ch2[.k+1],sep="")) .un1 <- c(.un1,.ch2[.k])} else .un0 <- c(.un0,.ch2[.k])} if(!is.null(.ex1a)){ .ex1a <- unique(.ex1a) .o <- gsub("(^[[:alnum:]]\\[)|(\\])","",.ex1a) .un <- if(length(grep("[[:alpha:]]",.o))>0)c(.un,.ex1a) else c(.un,.ex1a[order(as.numeric(.o))])}} # # find unique arguments without subscripts and add to .un # if(length(.un0)>0){ if(length(.un1)>0){ .tmp <- NULL for(.k in 1:length(.un1)) if(length(grep(.un1[.k],.un0))>0) .tmp <- c(.tmp,grep(.un1[.k],.un0)) .un <- c(.un,unique(if(!is.null(.tmp)).un0[-.tmp]else .un0))} else .un <- c(.un,unique(.un0))} # # create the new function that evaluates its model attribute # .fnb <- eval(parse(text=paste("function(",paste(.mem2,collapse=","),")",paste("eval(attr(.fnb,\"model\"))")))) .ex2 <- if(!is.null(.ex2)&&length(.fcn)>0)!is.na(.ex2)&!.fcn else NULL attributes(.fnb) <- list(model=parse(text=.ch1),parameters=.un, covariates=unique(.mem[.ex2]),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fnb"]) rm(.obj) return(.fnb)} ### method for dataframes ### fnenvir.data.frame <- function(.z, .envir=NULL, .name=NULL, .expand=TRUE, ...){ if(!is.function(.z))return(NULL) # # find the appropriate environment if its name is supplied # .ndata <- if(is.null(.name))paste(deparse(substitute(.envir))) else .name # # transform function to a character string # .ch1 <- deparse(.z,width.cutoff=500) .ch2 <- .ch1[1] .ch1 <- .ch1[-1] # # find arguments of function # .mem2 <- strsplit(gsub("[(),]"," ",.ch2)," ")[[1]] if(length(.mem2)>0).mem2 <- .mem2[.mem2!=""] if(length(.mem2)>1).mem2 <- .mem2[2:length(.mem2)] else .mem2 <- NULL # # find body of function # .fcn <- .ex1 <- .ch <- NULL for(.j in 1:length(.ch1)).ch <- paste(.ch,.ch1[.j],collapse=" ") # # remove punctuation from function body # #.mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"\\\=-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] .mem <- strsplit(gsub("(\\[(0|1|2|3|4|5|6|7|8|9|:|,)+\\])|([][+*/^():!<>%&|~,{}\"=\\-])|( [0-9]+)|(\\.[0-9]+)|(^[0-9]+)"," ",.ch)," ")[[1]] if(length(.mem)>0).mem <- .mem[.mem!=""] # # create appropriate names for search in data object # .cn <- colnames(.envir) # # create indicators for variables that exist in the dataframe (.ex) # and for functions (.fcn) and a vector of unique parameter names (.un) # if(length(.mem)>0){ .ex1 <- match(.mem,.cn) for(.j in 1:length(.mem)){ .fcn <- c(.fcn,if(exists(.mem[.j])){ if(.mem[.j]=="function"||.mem[.j]=="if"|| .mem[.j]=="else"||.mem[.j]=="for"|| .mem[.j]=="while"||.mem[.j]=="repeat") TRUE else is.function(eval(parse(text=.mem[.j])))&&is.na(.ex1[.j])} else FALSE)} .un <- unique(.mem[is.na(.ex1)&!.fcn]) if(length(unique(.mem[!is.na(.ex1)&!.fcn]))==0&&length(.un)==0) warning("fnenvir.data.frame: no variables found")} # # put extra spaces in character string so that substitutions can be made # for(.j in 1:length(.ch1)){ .ch1[.j] <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch1[.j]))) .ch1[.j] <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",.ch1[.j]))) .ch1[.j] <- paste(" ",gsub("\\("," ( ",.ch1[.j])," ",sep="")} # # replace covariate names by location in data object # .ex1a <- .ex1[!is.na(.ex1)] if(length(.ex1a)>0)for(.j in 1:length(.ex1a)){ if(is.factor(.envir[,.ex1a[.j]]))stop(paste(colnames(.envir)[.ex1a[.j]],"is a factor variable")) for(.k in 1:length(.ch1)).ch1[.k] <- gsub(paste(" ",.cn[.ex1a[.j]], " ",sep=""),paste(" ",.ndata,"$",.cn[.ex1a[.j]], sep=""),.ch1[.k])} # # put extra spaces in character string so that substitutions can be made # .ch <- gsub("\\^"," ^ ",gsub("\\)"," )",gsub("\\["," [",.ch))) .ch <- gsub("-","- ",gsub("/"," / ",gsub(","," ,",gsub("\\+","+ ",.ch)))) .ch <- paste(" ",gsub("\\("," ( ",gsub(":"," : ",.ch))," ",sep="") .ch2 <- strsplit(.ch," ")[[1]] # # find arguments in body with (.un) and without (.un0) subscripts # .un <- .un0 <- .un1 <- NULL if(length(.mem2)>0)for(.j in 1:length(.mem2)){ .ex1a <- NULL for(.k in 1:length(.ch2))if(.mem2[.j]==.ch2[.k]){ if(.k0){ .ex1a <- c(.ex1a,paste(.ch2[.k],.ch2[.k+1],sep="")) .un1 <- c(.un1,.ch2[.k])} else .un0 <- c(.un0,.ch2[.k])} if(!is.null(.ex1a)){ .ex1a <- unique(.ex1a) .o <- gsub("(^[[:alnum:]]\\[)|(\\])","",.ex1a) .un <- if(length(grep("[[:alpha:]]",.o))>0)c(.un,.ex1a) else c(.un,.ex1a[order(as.numeric(.o))])}} # # find unique arguments without subscripts and add to .un # if(length(.un0)>0){ if(length(.un1)>0){ .tmp <- NULL for(.k in 1:length(.un1)) if(length(grep(.un1[.k],.un0))>0) .tmp <- c(.tmp,grep(.un1[.k],.un0)) .un <- c(.un,unique(if(!is.null(.tmp)).un0[-.tmp]else .un0))} else .un <- c(.un,unique(.un0))} # # create the new function that evaluates its model attribute # .fnb <- eval(parse(text=paste("function(",paste(.mem2,collapse=","),")",paste("eval(attr(.fnb,\"model\"))")))) .ex1 <- if(!is.null(.ex1)&&length(.fcn)>0)!is.na(.ex1)&!.fcn else NULL attributes(.fnb) <- list(model=parse(text=.ch1),parameters=.un, covariates=unique(.mem[.ex1]),class="formulafn") .obj <- ls(all.names=TRUE) rm(list=.obj[.obj!=".fnb"]) rm(.obj) return(.fnb)} ### print methods ### print.formulafn <- function(x, ...){ z <- x; rm(x) if(!is.null(attr(z,"formula"))){ cat("\nformula:\n") print.default(unclass(attr(z,"formula")))} if(!is.character(attr(z,"model"))){ model <- deparse(attr(z,"model")) model[1] <- sub("expression\\(","",model[1]) model[length(model)] <- sub("\\)$","",model[length(model)]) cat("\nmodel function:\n") cat(model,sep="\n")} if(length(attr(z,"covariates"))>0){ cat(paste("\ncovariates:\n")) for(i in 1:length(attr(z,"covariates"))) cat(attr(z,"covariates")[i]," ") cat("\n")} if(length(attr(z,"parameters"))>0){ if(length(attr(z,"common"))>0)cat(paste("\nnew parameters:\n")) else cat(paste("\nparameters:\n")) for(i in 1:length(attr(z,"parameters"))) cat(attr(z,"parameters")[i]," ") cat("\n")} if(length(attr(z,"common"))>0){ cat(paste("\ncommon parameters:\n")) for(i in 1:length(attr(z,"common"))) cat(attr(z,"common")[i]," ") cat("\n")} cat("\n")} print.fmobj <- function(x, ...){ z <- x; rm(x) if(any(z$parameters)){ tmp <- unique(z$objects[z$parameters]) cat(paste("\nparameters:\n")) for(i in 1:length(tmp)) cat(tmp[i]," ") cat("\n")} if(any(z$covariates)){ tmp <- unique(z$objects[z$covariates]) cat(paste("\ncovariates:\n")) for(i in 1:length(tmp)) cat(tmp[i]," ") cat("\n")} if(any(z$factors)){ tmp <- unique(z$objects[z$factors]) cat(paste("\nfactors:\n")) for(i in 1:length(tmp)) cat(tmp[i]," ") cat("\n")} if(!is.null(z$functions)){ cat(paste("\nfunctions:\n")) for(i in 1:length(z$functions)) cat(z$functions[i]," ") cat("\n")} cat("\n") } ### miscellaneous methods ### ### extract the model ### model <- function(z, ...) UseMethod("model") model.formulafn <- function(z, ...) attr(z,"model") ### extract the original formula ### formula.formulafn <- function(x, ...) attr(x,"formula") ### extract the covariate names ### covariates.formulafn <- function(z, ...) attr(z,"covariates") ### extract the parameter names ### parameters <- function(z, ...) UseMethod("parameters") parameters.formulafn <- function(z, ...) attr(z,"parameters") rmutil/R/dist.r0000755000176200001440000013147513437235203013135 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # pinvgauss(q, m, s) # dinvgauss(y, m, s, log=FALSE) # qinvgauss(p, m, s) # rinvgauss(n, m, s) # plaplace(q, m=0, s=1) # dlaplace(y, m=0, s=1) # qlaplace(p, m=0, s=1) # rlaplace(n, m=0, s=1) # plevy(q, m=0, s=1) # dlevy(y, m=0, s=1) # qlevy(p, m=0, s=1) # rlevy(n, m=0, s=1) # ppareto(q, m, s) # dpareto(y, m, s) # qpareto(p, m, s) # rpareto(n, m, s) # psimplex(q, m, s) # dsimplex(y, m, s) # qsimplex(p, m, s) # rsimplex(n, m, s) # ptwosidedpower(q, m, s=2) # dtwosidedpower(y, m, s=2) # qtwosidedpower(p, m, s=2) # rtwosidedpower(n, m, s=2) # # pboxcox(q, m=0, s=1, f=1) # dboxcox(y, m=0, s=1, f=1) # qboxcox(p, m=0, s=1, f=1) # rboxcox(n, m=0, s=1, f=1) # pburr(q, m, s, f) # dburr(y, m, s, f) # qburr(p, m, s, f) # rburr(n, m, s, f) # pgextval(q, s, m, f) # dgextval(y, s, m, f) # qgextval(p, s, m, f) # rgextval(n, s, m, f) # pggamma(q, s, m, f) # dggamma(y, s, m, f) # qggamma(p, s, m, f) # rggamma(n, s, m, f) # pginvgauss(q, m, s, f) # dginvgauss(y, m, s, f) # qginvgauss(p, m, s, f) # rginvgauss(n, m, s, f) # pglogis(q, m=0, s=1, f=1) # dglogis(y, m=0, s=1, f=1) # qglogis(p, m=0, s=1, f=1) # rglogis(n, m=0, s=1, f=1) # pgweibull(q, s, m, f) # dgweibull(y, s, m, f) # qgweibull(p, s, m, f) # rgweibull(n, s, m, f) # phjorth(q, m, s, f) # dhjorth(y, m, s, f) # qhjorth(p, m, s, f) # rhjorth(n, m, s, f) # ppowexp(q, m=0, s=1, f=1) # dpowexp(y, m=0, s=1, f=1) # qpowexp(p, m=0, s=1, f=1) # rpowexp(n, m=0, s=1, f=1) # pskewlaplace(q, m=0, s=1, f=1) # dskewlaplace(y, m=0, s=1, f=1) # qskewlaplace(p, m=0, s=1, f=1) # rskewlaplace(n, m=0, s=1, f=1) # # pbetabinom(q, size, m, s) # dbetabinom(y, size, m, s) # qbetabinom(p, size, m, s) # rbetabinom(n, size, m, s) # pdoublebinom(q, size, m, s) # ddoublebinom(y, size, m, s) # qdoublebinom(p, size, m, s) # rdoublebinom(n, size, m, s) # pmultbinom(q, size, m, s) # dmultbinom(y, size, m, s) # qmultbinom(p, size, m, s) # rmultbinom(n, size, m, s) # pdoublepois(q, m, s) # ddoublepois(y, m, s) # qdoublepois(p, m, s) # rdoublepois(n, m, s) # pmultpois(q, m, s) # dmultpois(y, m, s) # qmultpois(p, m, s) # rmultpois(n, m, s) # ppvfpois(q, m, s, f) # dpvfpois(y, m, s, f) # qpvfpois(p, m, s, f) # rpvfpois(n, m, s, f) # pgammacount(q, m, s) # dgammacount(y, m, s) # qgammacount(p, m, s) # rgammacount(n, m, s) # pconsul(q, m, s) # dconsul(y, m, s) # qconsul(p, m, s) # rconsul(n, m, s) # # DESCRIPTION # # Functions to compute the probability and cumulative probability # functions for # continuous two parameter distributions: # inverse Gaussian, Laplace, Levy, Pareto, simplex # continuous three parameter distributions: # Box-Cox, Burr, generalized extreme value, generalized gamma, generalized # inverse Gaussian, generalized logistic, generalized Weibull, Hjorth, # power exponential, skew Laplace # discrete two parameter distributions: # double Poisson, multiplicative Poisson, double binomial, Consul, # multiplicative binomial, beta binomial ### continuous two-parameter distributions ### ### inverse Gaussian distribution ### pinvgauss <- function(q, m, s){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") t <- q/m v <- sqrt(q*s) pnorm((t-1)/v)+exp(2/(m*s))*pnorm(-(t+1)/v)} dinvgauss <- function(y, m, s, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") tmp <- -(y-m)^2/(2*y*s*m^2)-(log(2*pi*s)+3*log(y))/2 if(!log)tmp <- exp(tmp) tmp} qinvgauss <- function(p, m, s){ h <- function(y){ t <- y/m[i] v <- sqrt(y*s[i]) pnorm((t-1)/v)+exp(2/(m[i]*s[i]))*pnorm(-(t+1)/v)-p[i]} if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(.Machine$double.xmin,20) while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root} tmp} rinvgauss <- function(n=1, m, s) qinvgauss(runif(n),m=m,s=s) ### Laplace distribution ### plaplace <- function(q, m=0, s=1){ if(any(s<=0))stop("s must be positive") u <- (q-m)/s t <- exp(-abs(u))/2 ifelse(u<0,t,1-t)} dlaplace <- function(y, m=0, s=1, log=FALSE){ if(any(s<=0))stop("s must be positive") tmp <- -abs(y-m)/s-log(2*s) if(!log)tmp <- exp(tmp) tmp} qlaplace <- function(p, m=0, s=1){ h <- function(y){ #u <- (y-m[i])/s[i] #t <- exp(-abs(u))/2 #ifelse(u<0,t,1-t)-p[i] ## bruce edit: u <- (y-m)/s t <- exp(-abs(u))/2 ifelse(u<0,t,1-t)-p } if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(s<=0))stop("s must be positive") ifelse(p<0.5,s*log(2*p)+m,-s*log(2*(1-p))+m)} rlaplace <- function(n = 1, m = 0, s = 1){ if(any(s<=0))stop("s must be positive") q <- runif(n) ifelse(q<0.5,s*log(2*q)+m,-s*log(2*(1-q))+m)} ### Levy distribution ### plevy <- function(q, m=0, s=1){ if(any(q<=m))stop("some y <= m") if(any(s<=0))stop("s must be positive") 2*(1-pnorm(1/sqrt((q-m)/s)))} dlevy <- function(y, m=0, s=1, log=FALSE){ if(any(y<=m))stop("some y <= m") if(any(s<=0))stop("s must be positive") #sqrt(s/(2*pi*(y-m)^3))*exp(-s/(2*(y-m)))} tmp <- log(s/(2*pi))/2-3*log(y-m)/2-s/(2*(y-m)) if(!log)tmp <- exp(tmp) tmp} qlevy <- function(p, m=0, s=1){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(s<0))stop("s must be positive") s/qnorm(1-p/2)^2+m} rlevy <- function(n=1, m=0, s=1) { if(any(s<0))stop("s must be positive") s/qnorm(1-runif(n)/2)^2+m} ### Pareto distribution ### ppareto <- function(q, m, s){ if(any(q<0))stop("q must be >= 0") if(any(m<=0))stop("m must be positive") if(any(s<=1))stop("s must be > 1") 1-(1+q/(m*(s-1)))^-s} dpareto <- function(y, m, s, log=FALSE){ if(any(y<0))stop("y must be >= 0") if(any(m<=0))stop("m must be positive") if(any(s<=1))stop("s must be > 1") m <- m*(s-1) tmp <- log(s)-(s+1)*log(1+y/m)-log(m) if(!log)tmp <- exp(tmp) tmp} qpareto <- function(p, m, s){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<=1))stop("s must be >1") ((1-p)^(-1/s)-1)*m*(s-1)} rpareto <- function(n=1, m, s){ if(any(m<=0))stop("m must be positive") if(any(s<=1))stop("s must be >1") ((1-runif(n))^(-1/s)-1)*m*(s-1)} ### simplex distribution ### psimplex <- function(q, m, s){ if(any(q<=0)||any(q>=1))stop("q must contain values between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)!=1)stop("m has incorrect length") else m <- rep(m,len)} if(length(s)!=len){ if(length(s)!=1)stop("s has incorrect length") else s <- rep(s,len)} z <- .C("psimplex_c", as.double(q), as.double(m), as.double(s), as.double(1), len=as.integer(len), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(len), ## DUP=FALSE, PACKAGE="rmutil") if(z$err==1)warning("Unable to allocate memory for integration") if(z$err==2)warning("Division by zero in integration") else if(z$err==3)warning("No convergence in integration") z$res} dsimplex <- function(y, m, s, log=FALSE){ if(any(y<=0)||any(y>=1))stop("y must contain values between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<=0))stop("s must be positive") tmp <- -((y-m)/(m*(1-m)))^2/(2*y*(1-y)*s)-(log(2*pi*s)+3*(log(y)+log(1-y)))/2 if(!log)tmp <- exp(tmp) tmp} qsimplex <- function(p, m, s){ h <- function(y).C("psimplex_c", as.double(y), as.double(m[i]), as.double(s[i]), as.double(1), len=as.integer(1), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0.000001,0.999999) while(h(interval[1])*h(interval[2])>0) interval <- c(interval[1]/10,1-interval[1]/10) tmp[i] <- uniroot(h,interval)$root} tmp} rsimplex <- function(n=1, m, s) qsimplex(runif(n),m=m,s=s) ### two-sided power distribution ### ptwosidedpower <- function(q, m, s=2){ if(any(q<=0)||any(q>=1))stop("q must contain values between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<=0))stop("s must be positive") ifelse(q=1))stop("y must contain values between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<=0))stop("s must be positive") tmp <- log(s)+(s-1)*ifelse(y1))stop("p must lie between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must contain values between 0 and 1") if(any(s<0))stop("s must be positive") ifelse(p 0 ### pboxcox <- function(q, m, s=1, f=1){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*pnorm(0,m,sqrt(s)) (pnorm(q^f/f,m,sqrt(s))-(f>0)*norm)/(1-(f<0)-norm)} dboxcox <- function(y, m, s=1, f=1, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*pnorm(0,m,sqrt(s)) tmp <- (f-1)*log(y)+dnorm(y^f/f,m,sqrt(s),log=TRUE)-log(1-(f<0)-norm) if(!log)tmp <- exp(tmp) tmp} qboxcox <- function(p, m, s=1, f=1){ h <- function(y){ norm <- sign(f[i])*pnorm(0,m[i],sqrt(s[i])) (pnorm(y^f[i]/f[i],m[i],sqrt(s[i]))-(f[i]>0)*norm)/ (1-(f[i]<0)-norm)-p[i]} if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(f)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("length of f incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(.Machine$double.xmin,20) while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root} tmp} rboxcox <- function(n=1, m, s=1, f=1) qboxcox(runif(n),m=m,s=s,f=f) ### Burr distribution ### pburr <- function(q, m, s, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") 1-(1+(q/m)^s)^-f} dburr <- function(y, m, s, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") y1 <- y/m tmp <- log(f*s)+(s-1)*log(y1)-log(m)-(f+1)*log(1+y1^s) if(!log)tmp <- exp(tmp) tmp} qburr <- function(p, m, s, f){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") ((1-p)^(-1/f)-1)^(1/s)*m} rburr <- function(n=1, m, s, f){ if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") ((1-runif(n))^(-1/f)-1)^(1/s)*m} ### generalized extreme value distribution ### normed to make it a real distribution with y > 0 ### pgextval <- function(q, s, m, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*exp(-m^-s) ind <- f>0 (pweibull(exp(q^f/f),s,m)-ind+ind*norm)/(1-ind+norm)} dgextval <- function(y, s, m, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*exp(-m^-s) y1 <- exp(y^f/f) tmp <- (f-1)*log(y)+log(y1)+dweibull(y1,s,m,log=TRUE)-log(1-(f>0)+norm) if(!log)tmp <- exp(tmp) tmp} qgextval <- function(p, s, m, f){ if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*exp(-m^-s) ind <- f>0 (f*log(qweibull(p*(1-ind+norm)+ind-ind*norm,s,m)))^(1/f)} rgextval <- function(n=1, s, m, f){ if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") norm <- sign(f)*exp(-m^-s) ind <- f>0 (f*log(qweibull(runif(n)*(1-ind+norm)+ind-ind*norm,s,m)))^(1/f)} ### generalized gamma distribution ### pggamma <- function(q, s, m, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") pgamma(q^f,s,scale=(m/s)^f)} dggamma <- function(y, s, m, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") tmp <- log(f)+(f-1)*log(y)+dgamma(y^f,s,scale=(m/s)^f,log=TRUE) if(!log)tmp <- exp(tmp) tmp} qggamma <- function(p, s, m, f) { if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") qgamma(p,s,scale=(m/s)^f)^(1/f)} rggamma <- function(n=1, s, m, f){ if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") qgamma(runif(n),s,scale=(m/s)^f)^(1/f)} ### generalized inverse Gaussian distribution ### pginvgauss <- function(q, m, s, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)!=1)stop("m has incorrect length") else m <- rep(m,len)} if(length(s)!=len){ if(length(s)!=1)stop("s has incorrect length") else s <- rep(s,len)} if(length(f)!=len){ if(length(f)!=1)stop("f has incorrect length") else f <- rep(f,len)} z <- .C("pginvgauss_c", as.double(q), as.double(m), as.double(s), as.double(f), len=as.integer(len), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(len), ## DUP=FALSE, PACKAGE="rmutil") if(z$err==1)warning("Unable to allocate memory for integration") if(z$err==2)warning("Division by zero in integration") else if(z$err==3)warning("No convergence in integration") z$res} dginvgauss <- function(y, m, s, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") tmp <- (f-1)*log(y)-(1/y+y/m^2)/(2*s)-f*log(m)-log(2*besselK(1/(s*m),abs(f))) if(!log)tmp <- exp(tmp) tmp} qginvgauss <- function(p, m, s, f){ h <- function(y).C("pginvgauss_c", as.double(y), as.double(m[i]), as.double(s[i]), as.double(f[i]), len=as.integer(1), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(f)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("length of f incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(.Machine$double.xmin,20) while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root} tmp} rginvgauss <- function(n=1, m, s, f) qginvgauss(runif(n),m=m,s=s,f=f) ### generalized logistic distribution ### pglogis <- function(q, m=0, s=1, f=1){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") (1+exp(-sqrt(3)*(q-m)/(s*pi)))^-f} dglogis <- function(y, m=0, s=1, f=1, log=FALSE) { if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") y1 <- exp(-sqrt(3)*(y-m)/(s*pi)) tmp <- log(3)/2+log(f*y1)-log(pi*s)-(f+1)*log(1+y1) if(!log)tmp <- exp(tmp) tmp} qglogis <- function(p, m=0, s=1, f=1){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") -log(p^(-1/f)-1)*s*pi/sqrt(3)+m} rglogis <- function(n=1, m=0, s=1, f=1){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") -log(runif(n)^(-1/f)-1)*s*pi/sqrt(3)+m} ### generalized Weibull distribution ### pgweibull <- function(q, s, m, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") (1-exp(-(q/m)^s))^f} dgweibull <- function(y, s, m, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") y1 <- exp(-(y/m)^s) tmp <- log(s*f)+(s-1)*log(y)+(f-1)*log(1-y1)+log(y1)-s*log(m) if(!log)tmp <- exp(tmp) tmp} qgweibull <- function(p, s, m, f){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") m*(-log(1-p^(1/f)))^(1/s)} rgweibull <- function(n=1, s, m, f){ if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") m*(-log(1-runif(n)^(1/f)))^(1/s)} ### Hjorth distribution ### phjorth <- function(q, m, s, f){ if(any(q<=0))stop("q must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") 1-(1+s*q)^(-f/s)*exp(-(q/m)^2/2)} dhjorth <- function(y, m, s, f, log=FALSE){ if(any(y<=0))stop("y must contain positive values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") tmp <- -(f/s)*log(1+s*y)-(y/m)^2/2+log(y/m^2+f/(1+s*y)) if(!log)tmp <- exp(tmp) tmp} qhjorth <- function(p, m, s, f){ h <- function(y) 1-(1+s[i]*y)^(-f[i]/s[i])*exp(-(y/m[i])^2/2)-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(f)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("length of f incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(.Machine$double.xmin,20) while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root} tmp} rhjorth <- function(n=1, m, s, f) qhjorth(runif(n),m=m,s=s,f=f) ### power exponential distribution ### ppowexp <- function(q, m=0, s=1, f=1){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)!=1)stop("m has incorrect length") else m <- rep(m,len)} if(length(s)!=len){ if(length(s)!=1)stop("s has incorrect length") else s <- rep(s,len)} if(length(f)!=len){ if(length(f)!=1)stop("f has incorrect length") else f <- rep(f,len)} z <- .C("ppowexp_c", as.double(q), as.double(m), as.double(s), as.double(f), len=as.integer(len), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(len), ## DUP=FALSE, PACKAGE="rmutil") if(z$err==1)warning("Unable to allocate memory for integration") if(z$err==2)warning("Division by zero in integration") else if(z$err==3)warning("No convergence in integration") ifelse(q-m>0,0.5+z$res,0.5-z$res)} dpowexp <- function(y, m=0, s=1, f=1, log=FALSE){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") s <- sqrt(s) b <- 1+1/(2*f) tmp <- -(abs(y-m)/s)^(2*f)/2-log(s)-lgamma(b)-b*log(2) if(!log)tmp <- exp(tmp) tmp} qpowexp <- function(p, m=0, s=1, f=1){ h <- function(y) { z <- .C("ppowexp_c", as.double(y), as.double(m[i]), as.double(s[i]), as.double(f[i]), len=as.integer(1), eps=as.double(1.0e-6), pts=as.integer(5), max=as.integer(16), err=integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res if(y-m[i]>0) 0.5+z-p[i] else 0.5-z-p[i]} if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(s<0))stop("s must be positive") if(any(f<=0))stop("f must be positive") len <- max(length(p),length(m),length(s),length(f)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("length of f incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- m[i]+s[i]*c(-2,2) while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root} tmp} rpowexp <- function(n=1, m=0, s=1, f=1) qpowexp(runif(n),m=m,s=s,f=f) ### skew Laplace distribution ### pskewlaplace <- function(q, m=0, s=1, f=1){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") u <- (q-m)/s ifelse(u>0,1-exp(-f*abs(u))/(1+f^2),f^2*exp(-abs(u)/f)/(1+f^2))} dskewlaplace <- function(y, m=0, s=1, f=1, log=FALSE){ if(any(s<=0))stop("s must be positive") if(any(f<=0))stop("f must be positive") tmp <- log(f)+ifelse(y>m,-f*(y-m),(y-m)/f)/s-log((1+f^2)*s) if(!log)tmp <- exp(tmp) tmp} qskewlaplace <- function(p, m=0, s=1, f=1){ if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(s<0))stop("s must be positive") if(any(f<=0))stop("f must be positive") ifelse(p<0.5,f*s*log((1+f^2)*p/f^2)+m,-s*log((1+f^2)*(1-p))/f+m)} rskewlaplace <- function(n=1, m=0, s=1, f=1){ if(any(s<0))stop("s must be positive") if(any(f<=0))stop("f must be positive") q <- runif(n) ifelse(q<0.5,f*s*log((1+f^2)*q/f^2)+m,-s*log((1+f^2)*(1-q))/f+m)} ### discrete (overdispersed) two-parameter distributions ### ### beta-binomial distribution ### pbetabinom <- function(q, size, m, s){ ##BEGIN from @hennerw in issue #5 # if (any(q > size)){ # # Updating to correctly deal with this siduation # # stop("q must be <= size") # if(all(q>size)) return(rep(1,len)) # Val <- q<= size # out <- rep(1,len) # out[Val] <- pbetabinom_c(q[Val],size[Val],m[Val],s[Val]) # return(out) # } # if (any(q < 0)){ # # stop("q must contain non-negative values") # if(all(q < 0)) return(rep(0,len)) # Val <- q>=0 # out <- rep(0,len) # out[Val] <- pbetabinom_c(q[Val],size[Val],m[Val],s[Val]) # return(out) # } ##END from @hennerw in issue #5 if(any(q<0)){message("Negative values of q detected. `pbetabinom` returns 0 for such values.")}#; q[q<0] <- 0} if(any(size<0))stop("size must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s),length(size)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("size must be the same length as q")} if(any(q>size)){message("Elements of q that were greater than size detected. `pbetabinom` returns 1 for such values.")}#; q[q>size] <- size[q>size]} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} t <- s*m u <- s*(1-m) res <- vector("numeric",length(q)) for(i in 1:length(q)){ if(q[i] < 0){ res[i] <- 0 } else if(q[i]>size[i]){ res[i] <- 1 }else{ qq <- 0:q[i] res[i] <- sum(exp(lbeta(qq+t[i],size[i]-qq+u[i])- lbeta(t[i],u[i])+lchoose(size[i],qq))) } } res} dbetabinom <- function(y, size, m, s, log=FALSE){ if(any(y<0)){message("Negative values of y replaced with -1 to yield 0 value density"); y[y<0]<- -1} if(any(size<0))stop("size must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") ly <- max(length(y),length(m),length(s),length(size)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(size)!=ly){ if(length(size)==1)size <- rep(size,ly) else stop("size must be the same length as y")} if(any(y>size)){message("Elements of y exist that are greater than size");} if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} t <- s*m u <- s*(1-m) tmp <- lbeta(y+t,size-y+u)-lbeta(t,u)+lchoose(size,y) if(!log)tmp <- exp(tmp) tmp} qbetabinom <- function(p, size, m, s){ h <- function(y){ t <- s[i]*m[i] u <- s[i]*(1-m[i]) pp <- 0:y sum(exp(lbeta(pp+t,size[i]-pp+u)- lbeta(t,u)+lchoose(size[i],pp)))-p[i]} if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(size)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("length of size incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,size[i]) tmp[i] <- if(h(interval[1])*h(interval[2])>0)0 else uniroot(h,interval)$root} round(tmp)} rbetabinom <- function(n=1, size, m, s) qbetabinom(runif(n),size=size,m=m,s=s) ### double binomial distribution ### pdoublebinom <- function(q, size, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(size<0))stop("n must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s),length(size)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("size must be the same length as q")} if(any(q>size))stop("q must be <= size") if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} .C("pdb", as.integer(q), as.integer(size), as.double(m), as.double(s), as.integer(length(q)), res=double(length(q)), ## DUP=FALSE, PACKAGE="rmutil")$res} ddoublebinom <- function(y, size, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(size<0))stop("size must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") ly <- max(length(y),length(m),length(s),length(size)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(size)!=ly){ if(length(size)==1)size <- rep(size,ly) else stop("size must be the same length as y")} if(any(y>size))stop("y must be <= size") if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} tmp <- .C("ddb", as.integer(y), as.integer(size), as.double(m), as.double(s), as.integer(length(y)), as.double(rep(1,length(y))), res=double(length(y)), ## DUP=FALSE, PACKAGE="rmutil")$res if(!log)tmp <- exp(tmp) tmp} qdoublebinom <- function(p, size, m, s){ h <- function(y) .C("pdb", as.integer(y), as.integer(size[i]), as.double(m[i]), as.double(s[i]), as.integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(size)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("length of size incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,size[i]) tmp[i] <- if(h(interval[1])*h(interval[2])>0)0 else uniroot(h,interval)$root} round(tmp)} rdoublebinom <- function(n=1, size, m, s) qdoublebinom(runif(n),size=size,m=m,s=s) ### multiplicative binomial distribution ### pmultbinom <- function(q, size, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(size<0))stop("size must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s),length(size)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("size must be the same length as q")} if(any(q>size))stop("q must be <= size") if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} .C("pmb", as.integer(q), as.integer(size), as.double(m), as.double(s), as.integer(length(q)), res=double(length(q)), ## DUP=FALSE, PACKAGE="rmutil")$res} dmultbinom <- function(y, size, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(size<0))stop("size must contain non-negative values") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") ly <- max(length(y),length(m),length(s),length(size)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(size)!=ly){ if(length(size)==1)size <- rep(size,ly) else stop("size must be the same length as y")} if(any(y>size))stop("y must be <= size") if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} tmp <- .C("dmb", as.integer(y), as.integer(size), as.double(m), as.double(s), as.integer(length(y)), as.double(rep(1,length(y))), res=double(length(y)), ## DUP=FALSE, PACKAGE="rmutil")$res if(!log)tmp <- exp(tmp) tmp} qmultbinom <- function(p, size, m, s){ h <- function(y).C("pmb", as.integer(y), as.integer(size[i]), as.double(m[i]), as.double(s[i]), as.integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0)||any(m>=1))stop("m must lie between 0 and 1") if(any(s<=0))stop("s must be positive") len <- max(length(p),length(m),length(s),length(size)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(size)!=len){ if(length(size)==1)size <- rep(size,len) else stop("length of size incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,size[i]) tmp[i] <- if(h(interval[1])*h(interval[2])>0)0 else uniroot(h,interval)$root} round(tmp)} rmultbinom <- function(n=1, size, m, s) qmultbinom(runif(n),size=size,m=m,s=s) ### double Poisson distribution ### pdoublepois <- function(q, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} .C("pdp", as.integer(q), as.integer(3*max(c(q,100))), as.double(m), as.double(s), as.integer(length(q)), res=double(length(q)), ## DUP=FALSE, PACKAGE="rmutil")$res} ddoublepois <- function(y, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") ly <- max(length(y),length(m),length(s)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} tmp <- .C("ddp", as.integer(y), as.integer(3*max(c(y,100))), as.double(m), as.double(s), as.integer(length(y)), as.double(rep(1,length(y))), res=double(length(y)), ## DUP=FALSE, PACKAGE="rmutil")$res if(!log)tmp <- exp(tmp) tmp} qdoublepois <- function(p, m, s){ h <- function(y).C("pdp", as.integer(y), as.integer(3*max(c(y,100))), as.double(m[i]), as.double(s[i]), as.integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,20) if(h(interval[1])*h(interval[2])>0&&h(interval[1])>0)tmp[i] <- 0 else { while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root}} round(tmp)} rdoublepois <- function(n=1, m, s) qdoublepois(runif(n),m=m,s=s) ### multiplicative Poisson distribution ### pmultpois <- function(q, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0|s>1))stop("s must be positive and <= 1") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} .C("pmp", as.integer(q), as.integer(3*max(c(q,100))), as.double(m), as.double(s), as.integer(length(q)), res=double(length(q)), ## DUP=FALSE, PACKAGE="rmutil")$res} dmultpois <- function(y, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0|s>1))stop("s must be positive and <= 1") ly <- max(length(y),length(m),length(s)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} tmp <- .C("dmp", as.integer(y), as.integer(3*max(c(y,100))), as.double(m), as.double(s), as.integer(length(y)), as.double(rep(1,length(y))), res=double(length(y)), ## DUP=FALSE, PACKAGE="rmutil")$res if(!log)tmp <- exp(tmp) tmp} qmultpois <- function(p, m, s){ h <- function(y).C("pmp", as.integer(y), as.integer(3*max(c(y,100))), as.double(m[i]), as.double(s[i]), as.integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<0))stop("m must be positive") if(any(s<=0|s>1))stop("s must be positive and <= 1") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,20) if(h(interval[1])*h(interval[2])>0&&h(interval[1])>0)tmp[i] <- 0 else { while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root}} round(tmp)} rmultpois <- function(n=1, m, s) qmultpois(runif(n),m=m,s=s) ### power variance function Poisson distribution ### ppvfpois <- function(q, m, s, f){ if(any(q<0))stop("q must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("m and q must have the same length")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("s and q must have the same length")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("f and q must have the same length")} .C("ppvfp", as.integer(q), as.double(m), as.double(s), as.double(f), as.integer(length(q)), res=double(length(q)), ## DUP=FALSE, PACKAGE="rmutil")$res} dpvfpois <- function(y, m, s, f, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") if(any(f>=1))stop("f must be < 1") ly <- max(length(y),length(m),length(s)) if(length(y)!=ly){ if(length(y)==1)y <- rep(y,ly) else stop("length of y incorrect")} if(length(m)!=ly){ if(length(m)==1)m <- rep(m,ly) else stop("m and y must have the same length")} if(length(s)!=ly){ if(length(s)==1)s <- rep(s,ly) else stop("s and y must have the same length")} if(length(f)!=ly){ if(length(f)==1)f <- rep(f,ly) else stop("f and y must have the same length")} tmp <- log(.C("dpvfp", as.integer(y), as.double(m), as.double(s), as.double(f), as.integer(length(y)), as.double(rep(1,length(y))), res=double(length(y)), ## DUP=FALSE, PACKAGE="rmutil")$res) if(!log)tmp <- exp(tmp) tmp} qpvfpois <- function(p, m, s, f){ h <- function(y).C("ppvfp", as.integer(y), as.double(m[i]), as.double(s[i]), as.double(f[i]), as.integer(1), res=double(1), ## DUP=FALSE, PACKAGE="rmutil")$res-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} if(length(f)!=len){ if(length(f)==1)f <- rep(f,len) else stop("f and p must have the same length")} tmp <- vector(mode="numeric",len) for (i in 1:len){ if(f[i]==0)tmp[i] <- qnbinom(p[i],s[i],m[i]/(m[i]+s[i])) else { interval <- c(0,20) if(h(interval[1])*h(interval[2])>0&&h(interval[1])>0) tmp[i] <- 0 else { while(h(interval[1])*h(interval[2])>0) interval <- 2*interval tmp[i] <- uniroot(h,interval)$root}}} round(tmp)} rpvfpois <- function(n=1, m, s, f) qpvfpois(runif(n),m=m,s=s,f=f) ### gamma count distribution ### pgammacount <- function(q, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") 1-pgamma(m*s,(q+1)*s,1)} dgammacount <- function(y, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") tmp <- ifelse(y==0,pgamma(m*s,(y+1)*s,1,log.p=TRUE,lower.tail=FALSE), log(pgamma(m*s,y*s+(y==0),1)-pgamma(m*s,(y+1)*s,1))) if(!log)tmp <- exp(tmp) tmp} qgammacount <- function(p, m, s){ h <- function(y) 1-pgamma(m[i]*s[i],(y+1)*s[i],1)-p[i] if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,20) if(h(interval[1])*h(interval[2])>0&&h(interval[1])>0)tmp[i] <- 0 else { while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root}} round(tmp)} rgammacount <- function(n=1, m, s) qgammacount(runif(n),m=m,s=s) ### Consul generalized Poisson distribution ### pconsul <- function(q, m, s){ if(any(q<0))stop("q must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") len <- max(length(q),length(m),length(s)) if(length(q)!=len){ if(length(q)==1)q <- rep(q,len) else stop("length of q incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(any(s<0))stop("s must be positive") if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} res <- vector("numeric",length(q)) for(i in 1:length(q)){ qq <- 0:q[i] res[i] <- sum(m[i]*exp(-(m[i]+qq*(s[i]-1))/s[i])* (m[i]+qq*(s[i]-1))^(qq-1)/(s[i]^qq*gamma(qq+1)))} res} dconsul <- function(y, m, s, log=FALSE){ if(any(y<0))stop("y must contain non-negative values") if(any(m<=0))stop("m must be positive") if(any(s<=0))stop("s must be positive") tmp <- log(m)-(m+y*(s-1))/s+(y-1)*log(m+y*(s-1))-y*log(s)-lgamma(y+1) if(!log)tmp <- exp(tmp) tmp} qconsul <- function(p, m, s){ h <- function(y) { pp <- 0:y sum(m[i]*exp(-(m[i]+pp*(s[i]-1))/s[i])*(m[i]+pp*(s[i]-1))^(pp-1)/ (s[i]^pp*gamma(pp+1)))-p[i]} if(any(p<0|p>1))stop("p must lie between 0 and 1") if(any(m<=0))stop("m must be positive") if(any(s<0))stop("s must be positive") len <- max(length(p),length(m),length(s)) if(length(p)!=len){ if(length(p)==1)p <- rep(p,len) else stop("length of p incorrect")} if(length(m)!=len){ if(length(m)==1)m <- rep(m,len) else stop("length of m incorrect")} if(length(s)!=len){ if(length(s)==1)s <- rep(s,len) else stop("length of s incorrect")} tmp <- vector(mode="numeric",len) for (i in 1:len){ interval <- c(0,20) if(h(interval[1])*h(interval[2])>0&&h(interval[1])>0)tmp[i] <- 0 else { while(h(interval[1])*h(interval[2])>0)interval <- 2*interval tmp[i] <- uniroot(h,interval)$root}} round(tmp)} rconsul <- function(n=1, m, s) qconsul(runif(n),m=m,s=s) rmutil/R/util.r0000755000176200001440000000565213425057453013152 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # wr(formula, data=NULL, expand=T)) # det(x) # capply(x, index, fcn=sum) # mexp(x, t=1, n=20, k=3) # contr.mean(n, contrasts=TRUE) # # DESCRIPTION # # Utility functions for repeated measurements ### function to find the response vector and design matrix from a W&R formula ### wr <- function(formula, data=NULL, expand=TRUE){ if(is.null(data))data <- parent.frame() else if(!is.data.frame(data)&&!is.environment(data)) data <- if(expand||inherits(data,"tccov"))as.data.frame(data) else as.data.frame(data$ccov) mt <- terms(formula) mf <- model.frame(mt,data=data,na.action=NULL) list(response=model.response(mf,"numeric"), design=model.matrix(mt,mf))} ### a fast simplified version of tapply ### capply <- function(x, index, fcn=sum){ ans <- NULL for(i in split(x,index))ans <- c(ans,fcn(i)) ans} ### matrix exponentiation ### mexp <- function(x, t=1, type="spectral decomposition", n=20, k=3){ if(!is.matrix(x))stop("x must be a matrix") if(dim(x)[1]!=dim(x)[2])stop("x must be a square matrix") type <- match.arg(type,c("spectral decomposition","series approximation")) if(type=="spectral decomposition"){ z <- eigen(t*x,symmetric=FALSE) p <- z$vectors%*%diag(exp(z$values))%*%solve(z$vectors)} else { xx <- x*t/2^k p <- diag(dim(x)[2]) q <- p for(r in 1:n){ q <- xx%*%q/r p <- p+q} for(i in 1:k) p <- p%*%p} p} ### matrix power ### "%^%" <- function(x, p){ if(!is.matrix(x))stop("x must be a matrix") if(dim(x)[1]!=dim(x)[2])stop("x must be a square matrix") z <- eigen(x,symmetric=FALSE) z$vectors%*%diag(z$values^p)%*%solve(z$vectors)} # A function to provide correct constraints about the mean # (correcting contr.sum) contr.mean <- function(n, contrasts=TRUE){ if(length(n) <= 1){ if(is.numeric(n)&&length(n)==1&&n>1)levels <- 1:n else stop("Not enough degrees of freedom to define contrasts")} else levels <- n lenglev <- length(levels) if(contrasts){ cont <- array(0,c(lenglev,lenglev-1), list(levels,levels[1:(lenglev-1)])) cont[col(cont)==row(cont)] <- 1 cont[lenglev,] <- -1} else { cont <- array(0,c(lenglev,lenglev),list(levels,levels)) cont[col(cont) == row(cont)] <- 1} cont} rmutil/R/ghermite.r0000755000176200001440000000335413425057453013776 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # gauss.hermite(points, iterlim=10) # # DESCRIPTION # # Function to compute points and weights for Gauss-Hermite integration gauss.hermite <- function(points, iterlim=10){ x <- w <- rep(0,points) m <- (points+1)/2 for(i in 1:m){ z <- if(i==1)sqrt(2*points+1)-2*(2*points+1)^(-1/6) else if(i==2)z-sqrt(points)/z else if(i==3||i==4)1.9*z-0.9*x[i-2] else 2.0*z-x[i-2] for(j in 1:iterlim){ z1 <- z p <- hermite(points,z) z <- z1-p[1]/p[2] if(abs(z-z1)<=1e-15)break} if(j==iterlim)warning("iteration limit exceeded") x[points+1-i] <- -(x[i] <- z) w[i] <- w[points+1-i] <- 2/p[2]^2} r <- cbind(x*sqrt(2),w/sum(w)) colnames(r) <- c("Points","Weights") r} # orthonormal Hermite polynomials hermite <- function(points, z){ p1 <- 1/pi^0.4 p2 <- 0 for(j in 1:points){ p3 <- p2 p2 <- p1 p1 <- z*sqrt(2.0/j)*p2-sqrt((j-1)/j)*p3} pp <- sqrt(2*points)*p2 c(p1,pp)} rmutil/R/contrast.r0000755000176200001440000000272713425057453014032 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # contr.mean(n, contrasts=TRUE) # # DESCRIPTION # # A function to provide correct constraints about the mean # (correcting contr.sum) contr.mean <- function(n, contrasts=TRUE){ if(length(n) <= 1){ if(is.numeric(n)&&length(n)==1&&n>1)levels <- 1:n else stop("Not enough degrees of freedom to define contrasts")} else levels <- n lenglev <- length(levels) if(contrasts){ cont <- array(0,c(lenglev,lenglev-1), list(levels,levels[1:(lenglev-1)])) cont[col(cont)==row(cont)] <- 1 cont[lenglev,] <- -1} else { cont <- array(0,c(lenglev,lenglev),list(levels,levels)) cont[col(cont) == row(cont)] <- 1} cont} rmutil/R/printrm.r0000755000176200001440000003024413425057453013663 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # DESCRIPTION # # Utility functions for printing nonlinear model results ### standard methods for gnlm models ### weights.gnlm <- function(object, ...) object$prior.weights df.residual.gnlm <- function(object, ...) object$df deviance.gnlm <- function(object, ...) 2*object$maxlike coef.gnlm <- function(object, ...){ z <- object; rm(object) gnlmm <- !is.null(z$points) mix <- z$npm>0||!is.null(z$mix) npl <- z$npl-gnlmm gnlmix <- z$npm>0&&is.null(z$mix) ## bruce added this line if(npl>0){ if(z$distribution=="own"){ cname <- if(is.character(attr(z$likefn,"model"))) attr(z$likefn,"model") else if(length(grep("linear",attr(z$likefn,"parameters")))>0) attr(z$likefn,"parameters")[grep("\\[",attr(z$likefn,"parameters"))] else attr(z$likefn,"parameters")} else { cname <- if(is.character(attr(z$mu,"model")))attr(z$mu,"model") else if(length(grep("linear",attr(z$mu,"parameters")))>0) attr(z$mu,"parameters")[grep("\\[",attr(z$mu,"parameters"))] else attr(z$mu,"parameters")} if(!is.null(z$linmodel[[1]]))cname <- c(cname,z$linmodel[[1]])} if(gnlmm)cname <- c(cname," ") if(z$npm>0&&!is.null(z$mix)){ cname <- c(cname,if(is.character(attr(z$mix,"model"))) attr(z$mix,"model") else if(length(grep("linear",attr(z$mix,"parameters")))>0) attr(z$mix,"parameters")[grep("\\[",attr(z$mix,"parameters"))] else attr(z$mix,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]])} if(z$common||z$nps>0){ if(!is.null(z$shape))cname <- c(cname,if(mix&&!gnlmix)" " else if(is.character(attr(z$shape,"model"))) attr(z$shape,"model") else if(length(grep("linear",attr(z$shape,"parameters")))>0|| length(grep("mu",attr(z$shape,"parameters")))>0) attr(z$shape,"parameters")[grep("\\[",attr(z$shape,"parameters"))] else attr(z$shape,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]])} if(z$npf>0||!is.null(z$family)){ cname <- c(cname,if(is.character(attr(z$family,"model"))) attr(z$family,"model") else if(length(grep("linear",attr(z$family,"parameters")))>0) attr(z$family,"parameters")[grep("\\[",attr(z$family,"parameters"))] else attr(z$family,"parameters")) if(!is.null(z$linmodel[[3]]))cname <- c(cname,z$linmodel[[3]])} if(z$common)cname <- unique(cname) if(mix)cname <- c(cname," ") coef <- z$coef names(coef) <- cname coef} vcov.gnlm <- function(object, ...){ z <- object; rm(object) gnlmm <- !is.null(z$points) mix <- z$npm>0||!is.null(z$mix) npl <- z$npl-gnlmm gnlmix <- z$npm>0&&is.null(z$mix) ## bruce added this line if(npl>0){ if(z$distribution=="own"){ cname <- if(is.character(attr(z$likefn,"model"))) attr(z$likefn,"model") else if(length(grep("linear",attr(z$likefn,"parameters")))>0) attr(z$likefn,"parameters")[grep("\\[",attr(z$likefn,"parameters"))] else attr(z$likefn,"parameters")} else { cname <- if(is.character(attr(z$mu,"model")))attr(z$mu,"model") else if(length(grep("linear",attr(z$mu,"parameters")))>0) attr(z$mu,"parameters")[grep("\\[",attr(z$mu,"parameters"))] else attr(z$mu,"parameters")} if(!is.null(z$linmodel[[1]]))cname <- c(cname,z$linmodel[[1]])} if(gnlmm)cname <- c(cname," ") if(z$npm>0&&!is.null(z$mix)){ cname <- c(cname,if(is.character(attr(z$mix,"model"))) attr(z$mix,"model") else if(length(grep("linear",attr(z$mix,"parameters")))>0) attr(z$mix,"parameters")[grep("\\[",attr(z$mix,"parameters"))] else attr(z$mix,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]])} if(z$common||z$nps>0){ if(!is.null(z$shape))cname <- c(cname,if(mix&&!gnlmix)" " else if(is.character(attr(z$shape,"model"))) attr(z$shape,"model") else if(length(grep("linear",attr(z$shape,"parameters")))>0|| length(grep("mu",attr(z$shape,"parameters")))>0) attr(z$shape,"parameters")[grep("\\[",attr(z$shape,"parameters"))] else attr(z$shape,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]])} if(z$npf>0||!is.null(z$family)){ cname <- c(cname,if(is.character(attr(z$family,"model"))) attr(z$family,"model") else if(length(grep("linear",attr(z$family,"parameters")))>0) attr(z$family,"parameters")[grep("\\[",attr(z$family,"parameters"))] else attr(z$family,"parameters")) if(!is.null(z$linmodel[[3]]))cname <- c(cname,z$linmodel[[3]])} if(z$common)cname <- unique(cname) if(mix)cname <- c(cname," ") vcov <- z$cov rownames(vcov) <- cname colnames(vcov) <- cname vcov} ### print function for bnlr, gnlr, gnlr3, gnlmm, gnlmm3, and fmr ### print.gnlm <- function(x,digits=max(4,.Options$digits-3),correlation=TRUE, ...) { z <- x; rm(x) sht <- z$nps>0||!is.null(z$shape) mix <- z$npm>0||!is.null(z$mix) gnlmm <- !is.null(z$points) gnlmix <- z$npm>0&&is.null(z$mix) censor <- if(mix&&!gnlmix)!is.null(z$censor) else z$censor npl <- z$npl-gnlmm np1 <- z$npl+1 np1a <- z$npl+z$npm*(!gnlmix)+1 np2 <- z$npl+z$npm*(!gnlmix)+z$nps np3 <- np2+1 np <- z$npl+z$npm+z$nps+z$npf cat("\nCall:",deparse(z$call),sep="\n") cat("\n") if(z$code>2)cat("Warning: no convergence - error",z$code,"\n\n") if(mix&&censor&&!is.logical(z$censor))cat(z$censor,"") if(censor)cat("censored ") if(z$npf>0&&(z$dist=="inverse Gauss"||z$dist=="logistic"||z$dist=="gamma"|| z$dist=="Weibull"||z$dist=="extreme value"))cat("generalized ") if(!is.null(z$dist))cat(z$dist,"distribution\n\n") else if(!is.null(z$link))cat(z$link,"link\n\n") if(!is.null(z$mixture))cat(z$mixture,"mixing distribution\n\n") if(gnlmm){ cat(" with normal mixing distribution on",z$scale,"scale\n") cat(" (",z$points," point Gauss-Hermite integration)\n\n",sep="")} cat("Response:",z$respname,"\n\n") cat("Log likelihood function:\n") if(z$distribution=="own"){ if(!is.null(attr(z$likefn,"formula"))) cat(deparse(attr(z$likefn,"formula")),sep="\n") else if(!is.null(attr(z$likefn,"model"))){ t <- deparse(attr(z$likefn,"model")) t[1] <- sub("expression\\(","",t[1]) t[length(t)] <- sub("\\)$","",t[length(t)]) cat(t,sep="\n")}} else { t <- deparse(z$likefn) cat(t[2:length(t)],"",sep="\n")} if((z$npl>0||!is.null(z$mu))&&z$distribution!="own"){ cat("Location function:\n") if(!is.null(attr(z$mu,"formula"))) cat(deparse(attr(z$mu,"formula")),sep="\n") else if(!is.null(attr(z$mu,"model"))){ t <- deparse(attr(z$mu,"model")) t[1] <- sub("expression\\(","",t[1]) t[length(t)] <- sub("\\)$","",t[length(t)]) cat(t,sep="\n")} if(!is.null(z$linear[[1]])){ cat("Linear part:\n") print(z$linear[[1]])}} if(mix){ if(!is.null(z$mix))cat("\nMixture function:\n") if(!is.null(attr(z$mix,"formula"))) cat(deparse(attr(z$mix,"formula")),sep="\n") else if(!is.null(attr(z$mix,"model"))){ t <- deparse(attr(z$mix,"model")) t[1] <- sub("expression\\(","",t[1]) t[length(t)] <- sub("\\)$","",t[length(t)]) cat(t,sep="\n")} if(!is.null(z$linear[[2]])){ cat("Linear part:\n") print(z$linear[[2]])}} if(sht){ if(!mix){ cat("\nLog shape function:\n") if(!is.null(attr(z$shape,"formula"))) cat(deparse(attr(z$shape,"formula")),sep="\n") else if(!is.null(attr(z$shape,"model"))){ t <- deparse(attr(z$shape,"model")) t[1] <- sub("expression\\(","",t[1]) t[length(t)] <- sub("\\)$","",t[length(t)]) cat(t,sep="\n")} if(!is.null(z$linear[[2]])){ cat("Linear part:\n") print(z$linear[[2]])}} if(!is.null(z$family)){ cat("\n(Log) family function:\n") if(!is.null(attr(z$family,"formula"))) cat(deparse(attr(z$family,"formula")),sep="\n") else if(!is.null(attr(z$family,"model"))){ t <- deparse(attr(z$family,"model")) t[1] <- sub("expression\\(","",t[1]) t[length(t)] <- sub("\\)$","",t[length(t)]) cat(t,sep="\n")} if(!is.null(z$linear[[3]])){ cat("Linear part:\n") print(z$linear[[3]])}}} cat("\n-Log likelihood ",z$maxlike,"\n") cat("Degrees of freedom",z$df,"\n") cat("AIC ",z$aic,"\n") cat("Iterations ",z$iterations,"\n\n") if(npl>0){ if(z$common)cat("Common parameters:\n") else if(z$distribution=="own")cat("Model parameters:\n") else cat("Location parameters:\n") if(z$distribution=="own"){ cname <- if(is.character(attr(z$likefn,"model"))) attr(z$likefn,"model") else if(length(grep("linear",attr(z$likefn,"parameters")))>0) attr(z$likefn,"parameters")[grep("\\[",attr(z$likefn,"parameters"))] else attr(z$likefn,"parameters")} else { cname <- if(is.character(attr(z$mu,"model")))attr(z$mu,"model") else if(length(grep("linear",attr(z$mu,"parameters")))>0) attr(z$mu,"parameters")[grep("\\[",attr(z$mu,"parameters"))] else attr(z$mu,"parameters")} if(!is.null(z$linmodel[[1]]))cname <- c(cname,z$linmodel[[1]]) coef.table <- cbind(z$coefficients[1:npl],z$se[1:npl]) if(!z$common){ dimnames(coef.table) <- list(cname, c("estimate", "se")) print.default(coef.table,digits=digits,print.gap=2) cname <- coef.table <- NULL}} if(z$npm>0&&!is.null(z$mix)){ if(!z$common)cat("\nMixture parameters:\n") cname <- c(cname,if(is.character(attr(z$mix,"model"))) attr(z$mix,"model") else if(length(grep("linear",attr(z$mix,"parameters")))>0) attr(z$mix,"parameters")[grep("\\[",attr(z$mix,"parameters"))] else attr(z$mix,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]]) if(!z$common)coef.table <- cbind(z$coefficients[np1:(np-sht)], z$se[np1:(np-sht)]) dimnames(coef.table) <- list(cname, c("estimate", "se")) print.default(coef.table,digits=digits,print.gap=2) cname <- coef.table <- NULL} if(z$common||z$nps>0){ if(!is.null(z$shape))cname <- c(cname,if(mix&&!gnlmix)" " else if(is.character(attr(z$shape,"model"))) attr(z$shape,"model") else if(length(grep("linear",attr(z$shape,"parameters")))>0|| length(grep("mu",attr(z$shape,"parameters")))>0) attr(z$shape,"parameters")[grep("\\[",attr(z$shape,"parameters"))] else attr(z$shape,"parameters")) if(!is.null(z$linmodel[[2]]))cname <- c(cname,z$linmodel[[2]]) if(!z$common)coef.table <- cbind(z$coefficients[np1a:np2], z$se[np1a:np2]) if(z$common&&is.null(z$family)){ dimnames(coef.table) <- list(unique(cname),c("estimate","se")) print.default(coef.table,digits=digits,print.gap=2)} if(is.null(z$shape)&&z$nps==1){ coef.table <- cbind(z$coefficients[np2],z$se[np2]) cname <- " "}} if(gnlmm){ cat("\nMixing standard deviation:\n") coef.table2 <- cbind(z$coefficients[z$npl],z$se[z$npl]) dimnames(coef.table2) <- list(" ", c("estimate", "se")) print.default(coef.table2,digits=digits,print.gap=2)} if(z$nps>0&&(!z$common||mix)){ cat("\nShape parameters:\n") dimnames(coef.table) <- list(cname,c("estimate","se")) print.default(coef.table,digits=digits,print.gap=2) cname <- coef.table <- NULL} if(z$npf>0||!is.null(z$family)){ if(!z$common)cat("\nFamily parameters:\n") cname <- c(cname,if(is.character(attr(z$family,"model"))) attr(z$family,"model") else if(length(grep("linear",attr(z$family,"parameters")))>0) attr(z$family,"parameters")[grep("\\[",attr(z$family,"parameters"))] else attr(z$family,"parameters")) if(!is.null(z$linmodel[[3]]))cname <- c(cname,z$linmodel[[3]]) if(z$common){ dimnames(coef.table) <- list(unique(cname),c("estimate","se")) print.default(coef.table,digits=digits,print.gap=2)} else { coef.table <- cbind(z$coefficients[np3:np],z$se[np3:np]) dimnames(coef.table) <- list(cname,c("estimate","se")) print.default(coef.table,digits=digits,print.gap=2)}} if(z$npm>0&&is.null(z$mix)){ cat("\nMixing dispersion parameter:\n") coef.table <- cbind(z$coefficients[np],z$se[np]) dimnames(coef.table) <- list(" ", c("estimate", "se")) print.default(coef.table,digits=digits,print.gap=2)} if(np>1&&correlation){ cat("\nCorrelations:\n") dimnames(z$corr) <- list(seq(1,np),seq(1,np)) print.default(z$corr,digits=digits)} invisible(z)} rmutil/R/int.r0000755000176200001440000002164114175120022012745 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # int(f, a=-Inf, b=Inf, type="Romberg", eps=0.0001, # max=NULL, d=NULL, p=0) # # DESCRIPTION # # A function to perform vectorized Romberg integration # Now using LazyLoad: true in DESCRIPTION # http://stackoverflow.com/a/4369551/2727349 #.First.lib <- function(lib, pkg) # library.dynam("rmutil", pkg, lib) ### ### vectorized one-dimensional integration ### int <- function(f, a=-Inf, b=Inf, type="Romberg", eps=0.0001, max=NULL, d=NULL, p=0){ # # function to call the C code # int1 <- function(ff, aa, bb){ envir2 <- environment(fun=ff) z <- .Call("romberg_sexp", ff, as.double(aa), as.double(bb), len=as.integer(len), eps=as.double(eps), pts=as.integer(d), max=as.integer(max), err=integer(1), envir2, PACKAGE="rmutil") z } # # check algorithm to be used and initialize parameters # type <- match.arg(type,c("Romberg","TOMS614")) if(is.null(max))max <- if(type=="Romberg") 16 else 100 if(is.null(d))d <- if(type=="Romberg") 5 else 1 # # check function and integration limits # if(length(formals(f))!=1)stop("f must have one argument") if(any(a==Inf))stop("lower bound cannot be Inf") if(any(b==-Inf))stop("upper bound cannot be -Inf") if((length(a)>1&&any(a==-Inf)&&all(a!=-Inf))||(length(b)>1&&any(b==Inf)&&all(b!=Inf)))stop("int cannot be vectorized with some limits infinite") # # determine length of vector to be integrated # if(all(a!=-Inf)){ if(all(b!=Inf)){ if(any(a>=b))stop("some a>=b") len <- length(f((a+b)/2))} else len <- length(f(a+1))} else if(all(b!=Inf))len <- length(f(b-1)) else len <- length(f(0)) if(len>1&&type!="Romberg")stop("vector functions only allowed with Romberg") # # if a vector and there are infinite limits, check that all limits are infinite # if(all(a!=-Inf)&&length(a)!=len){ if(length(a)!=1)stop("a has incorrect length") else a <- rep(a,len)} if(all(b!=Inf)&&length(b)!=len){ if(length(b)!=1)stop("b has incorrect length") else b <- rep(b,len)} if(type=="Romberg"){ # invert function for infinite limits ff <- function(x) f(1/x)/(x*x) if(all(b==Inf)){ if(all(a==-Inf)) # both limits infinite z <- int1(ff,rep(-1,len),rep(0,len))+ int1(f,rep(-1,len),rep(1,len))+ int1(ff,rep(0,len),rep(1,len)) else { # only upper limit infinite, cut in 2 pieces about 0 if(any(a>0)){ if(any(a<=0))a1 <- ifelse(a>0,a,1) else a1 <- a z1 <- int1(ff,rep(0,len), 1/a1)} else z1 <- rep(0,len) if(any(a<=0)){ if(any(a>0))a1 <- ifelse(a<=0,a,0) else a1 <- a z2 <- int1(f,a1,rep(1,len))+ int1(ff,rep(0,len),rep(1,len))} else z2 <- rep(0,len) z <- z1*(a>0)+z2*(a<=0)}} else if(all(a==-Inf)){ # only lower limit infinite, cut in 2 pieces about 0 if(any(b<0)){ if(any(b>=0))b1 <- ifelse(b<0,b,1) else b1 <- b z1 <- int1(ff, 1/b1,rep(0,len))} else z1 <- rep(0,len) if(any(b>=0)){ if(any(b<0))b1 <- ifelse(b>=0,b,0) else b1 <- b z2 <- int1(f,rep(-1,len), b1)+ int1(ff,rep(-1,len),rep(0,len))} else z2 <- rep(0,len) z <- z1*(b<0)+z2*(b>=0)} else z <- int1(f, a, b) z} else { # # TOMS614 # envir2 <- environment(fun=f) ## Bruce: .C -> .Call edit left <- a==-Inf&&b!=Inf if(all(b==Inf)){ if(all(a==-Inf)){ # both limits infinite inf <- 1 a <- b <- 1} else { # only upper limit is infinite inf <- 2 b <- 1}} else if(all(a==-Inf)){ # only lower limit is infinite a <- 1 inf <- 1} else inf <- 4 if(left){ # lower limit infinite, upper limit numeric: calculate for # whole real line first z2 <- .Call("inthp_sexp", a=as.double(b), b=as.double(b), d=as.double(d), f=f, m=as.integer(max), p=as.double(p), eps=as.double(eps), inf=as.integer(2), envir2, ## DUP=FALSE, PACKAGE="rmutil") #.Call edit##if(z2$inf==3||z2$inf==4)warning(paste("error",z2$inf,"- integration incomplete - try larger max")) #.Call edit##else if(z2$inf>4)stop(paste("error",z2$inf,"- incorrect arguments")) } # integrate either for both limits finite or with upper limit infinite z1 <- .Call("inthp_sexp", a=as.double(a), b=as.double(b), d=as.double(d), f=f, m=as.integer(max), p=as.double(p), eps=as.double(eps), inf=as.integer(inf), envir2, ## DUP=FALSE, PACKAGE="rmutil") ## Bruce edits next two lines; just won't check. #.Call edit##if(z1$inf==3||z1$inf==4)warning(paste("error",z1$inf,"- integration incomplete - try larger max")) #.Call edit##else if(z1$inf>4)stop(paste("error",z1$inf,"- incorrect arguments")) # if lower limit infinite, upper limit numeric, subtract upper # part from that for whole real line ## Bruce comments; then include rewritten lines below #.Call edit##if(left)z1$quadr <- z1$quadr-z2$quadr #.Call edit##z1$quadr # z1, z2 now contain the quadr object directly; we replaced with envir if(left)z1 <- z1-z2 z1 } } ### ### vectorized two-dimensional integration ### int2 <- function(f, a=c(-Inf,-Inf), b=c(Inf,Inf), eps=1.0e-6, max=16, d=5){ # # function adapted from Gentleman and Ihaka (2000) # Jr Comp Graph Stat 9, 491-508 # g <- function(y){ fx <- function(x) f(x,y) romberg(fx,a[,2],b[,2])} # # function to call the C code # int1 <- function(ff, aa, bb){ envir2 <- environment(fun=ff) z <- .Call("romberg_sexp", ff, as.double(aa), as.double(bb), len=as.integer(len), eps=as.double(eps), pts=as.integer(d), max=as.integer(max), err=integer(1), envir2, PACKAGE="rmutil") z } # # function for Romberg integration # romberg <- function(f, a=-Inf, b=Inf){ # invert function for infinite limits ff <- function(x) f(1/x)/(x*x) if(all(b==Inf)){ if(all(a==-Inf)) # both limits infinite z <- int1(ff,rep(-1,len),rep(0,len))+ int1(f,rep(-1,len),rep(1,len))+ int1(ff,rep(0,len),rep(1,len)) else { # only upper limit infinite, cut in 2 pieces about 0 if(any(a>0)){ if(any(a<=0))a1 <- ifelse(a>0,a,1) else a1 <- a z1 <- int1(ff,rep(0,len), 1/a1)} else z1 <- rep(0,len) if(any(a<=0)){ if(any(a>0))a1 <- ifelse(a<=0,a,0) else a1 <- a z2 <- int1(f,a1,rep(1,len))+ int1(ff,rep(0,len),rep(1,len))} else z2 <- rep(0,len) z <- z1*(a>0)+z2*(a<=0)}} else if(all(a==-Inf)){ # only lower limit infinite, cut in 2 pieces about 0 if(any(b<0)){ if(any(b>=0))b1 <- ifelse(b<0,b,1) else b1 <- b z1 <- int1(ff, 1/b1,rep(0,len))} else z1 <- rep(0,len) if(any(b>=0)){ if(any(b<0))b1 <- ifelse(b>=0,b,0) else b1 <- b z2 <- int1(f,rep(-1,len), b1)+ int1(ff,rep(-1,len),rep(0,len))} else z2 <- rep(0,len) z <- z1*(b<0)+z2*(b>=0)} else z <- int1(f, a, b) z} # # check dimensions of limits # if(is.vector(a,mode="numeric")&&length(a)==2)a <- matrix(a,ncol=2) else if(is.matrix(a)){ if(dim(a)[2]!=2)stop("a must be 2-column matrix")} else stop("a must be a 2-element vector or a 2-column matrix") if(is.vector(b,mode="numeric")&&length(b)==2)b <- matrix(b,ncol=2) else if(is.matrix(b)){ if(dim(b)[2]!=2)stop("b must be 2-column matrix")} else stop("b must be a 2-element vector or a 2-column matrix") if((dim(a)[1]>1&&((any(a[,1]==-Inf)&&all(a[,1]!=-Inf))||(any(a[,2]==-Inf)&&all(a[,1]!=-Inf))))||(dim(b)[1]>1&&((any(b[,1]==Inf)&&all(b[,1]!=Inf))||(any(b[,2]==Inf)&&all(b[,1]!=Inf)))))stop("int2 cannot have only some limits infinite") if(length(formals(f))!=2)stop("f must have two arguments") # # determine length of vectors to be integrated # if(all(a!=-Inf)){ if(all(b!=Inf)){ if(any(a[,1]>=b[,1])||any(a[,2]>=b[,2]))stop("some a>=b") len <- length(f((a[,1]+b[,1])/2,(a[,2]+b[,2])/2))} else len <- length(f(a[,1]+1,a[,2]+1))} else if(all(b!=Inf))len <- length(f(b[,1]-1,b[,2]-1)) else len <- length(f(0,0)) # # if a matrix and there are infinite limits, check that all limits are infinite # if(any(a!=-Inf)&&dim(a)[1]!=len){ if(dim(a)[1]!=1)stop("a has incorrect size") else a <- matrix(rep(a,len),ncol=2,byrow=TRUE)} if(any(b!=Inf)&&dim(b)[1]!=len){ if(dim(b)[1]!=1)stop("b has incorrect size") else b <- matrix(rep(b,len),ncol=2,byrow=TRUE)} # # integrate # romberg(g,a[,1],b[,1])} rmutil/R/gettvc.r0000755000176200001440000000720713425057453013467 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # gettvc(response, times=NULL, tvcov=NULL, tvctimes=NULL, # oldtvcov=NULL, ties=TRUE) # # DESCRIPTION # # Function to find the most recent value of a time-varying # covariate not recorded at the same time as the response. gettvc <- function(response, times=NULL, tvcov=NULL, tvctimes=NULL, oldtvcov=NULL, ties=TRUE){ # # if necessary, make into response object and remove NAs # if(inherits(response,"response"))izr <- response else izr <- restovec(response,times) if(any(is.na(izr$y))){ isna <- TRUE nna <- length(izr$y) tmp <- NULL rna <- !is.na(izr$y) irna <- (1:nna)*rna j <- c(0,cumsum(nobs(izr))) for(i in 1:length(nobs(izr)))tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]])) zr <- list() zr$nobs <- tmp zr$times <- izr$times[rna] zr$y <- izr$y[rna] class(zr) <- "response"} else { isna <- FALSE zr <- izr} # # remove NAs from time-varying covariate # zt <- restovec(tvcov,tvctimes) if(any(is.na(zt$y))){ tmp <- NULL rna <- !is.na(zt$y) j <- c(0,cumsum(nobs(zt))) for(i in 1:length(nobs(zt)))tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]])) zt$nobs <- tmp zt$times <- zt$times[rna] zt$y <- zt$y[rna]} if(length(nobs(izr))!=length(nobs(zt))) stop("response and covariate do not have the same number of individuals") # # obtain new aligned times # nind <- length(nobs(zr)) nld <- max(c(nobs(zr),nobs(zt))) z2 <- .Fortran("gettvc_f", x=as.double(zr$times), y=as.double(zr$y), xtvc=as.double(zt$times), tvcov=as.double(zt$y), nobs=as.integer(nobs(zr)), nind=as.integer(nind), nknt=as.integer(nobs(zt)), ties=as.logical(ties), xu=matrix(0,nrow=nind,ncol=2*nld), ndelta=logical(2*nld*nind), tvcov2=matrix(0,nrow=nind,ncol=2*nld), nu=integer(nind), wu=double(2*nld), nld=as.integer(nld), tvcov3=double(length(zr$y)), ## DUP=FALSE, PACKAGE="rmutil") if(isna){ tvcov3 <- rep(NA,nna) tvcov3[irna] <- z2$tvcov3} else tvcov3 <- z2$tvcov3 rm(z2) # # check if new covariate or to be combined with others # new <- missing(oldtvcov) if(!new&!is.list(oldtvcov)){ warning("oldtvcov must form a list - ignored") new <- TRUE} cname <- paste(deparse(substitute(tvcov))) if(new)oldtvcov <- vector(mode="list",nind) else if(!inherits(oldtvcov,"tvcov")){ if(length(oldtvcov)!=nind) stop(paste("Previous time-varying covariate list must have length",nind)) else if(!is.null(colnames(oldtvcov[[1]]))) cname <- c(colnames(oldtvcov[[1]]),cname) else cname <- NULL} else if(inherits(oldtvcov,"tvcov"))cname <- c(colnames(oldtvcov$tvcov),cname) if(inherits(oldtvcov,"tvcov")){ # # combine tvcov objects # oldtvcov$tvcov <- cbind(oldtvcov$tvcov,tvcov3) colnames(oldtvcov$tvcov) <- cname} else { # # create a new tvcov object # nm <- 0 for(i in 1:nind){ oldtvcov[[i]] <- cbind(oldtvcov[[i]],tvcov3[(nm+1): (nm+nobs(izr)[i])]) nm <- nm+nobs(izr)[i] if(!is.null(cname))colnames(oldtvcov[[i]]) <- cname} oldtvcov <- tvctomat(oldtvcov)} invisible(oldtvcov)} rmutil/R/objectrm.r0000755000176200001440000026214213651027443013776 0ustar liggesusers# # rmutil : A Library of Special Functions for Repeated Measurements # Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public Licence as published by # the Free Software Foundation; either version 2 of the Licence, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licence for more details. # # You should have received a copy of the GNU General Public Licence # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # SYNOPSIS # # restovec(response, times=NULL, nest=NULL, coordinates=NULL, # censor=NULL, totals=NULL, weights=NULL, delta=NULL, type=NULL, # names=NULL, units=NULL, oldresponse=NULL, description=NULL) # tcctomat(ccov, names=NULL, units=NULL, oldtccov=NULL, dataframe=TRUE, # description=NULL) # tvctomat(tvcov, names=NULL, units=NULL, interaction=NULL, ccov=NULL, # oldtvcov=NULL, dataframe=TRUE, description=NULL) # rmna(response, tvcov=NULL, ccov=NULL) # lvna(response, tvcov=NULL, ccov=NULL) # dftorep(dataframe, response, id=NULL, times=NULL, censor=NULL, # totals=NULL, weights=NULL, nest=NULL, delta=NULL, # coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=TRUE) # as.data.frame(z) # as.matrix(z) # covariates(z, nind=NULL, names=NULL) # covind(z) # delta(z, nind=NULL, names=NULL) # formula(z) # names(z) # nesting(z, nind=NULL) # nobs(z) # plot.response(z, nind=NULL, name=NULL, nest=1, ccov=NULL, add=FALSE, # lty=NULL, pch=NULL, main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, # ylab=NULL, ...) # plot.repeated(z, name=NULL, nind=NULL, ccov=NULL, add=FALSE, lty=NULL, # main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, ylab=NULL, ...) # print(z) # response(z, nind=NULL, names=NULL) # times(z, nind=NULL) # transform(z, ...) # weights(z, nind=NULL) # # DESCRIPTION # # Utility functions for converting repeated measurements data to R objects ### function to create a response object ### restovec <- function(response=NULL, times=NULL, nest=NULL, coordinates=NULL, censor=NULL, totals=NULL, weights=NULL, delta=NULL, type=NULL, names=NULL, units=NULL, oldresponse=NULL, description=NULL){ if(is.null(response))stop("A response must be supplied") # # check type # mvr <- length(names)>1 if(!is.null(type)){ tmp <- NULL for(i in 1:length(type)) tmp <- c(tmp,match.arg(type[i], c("nominal","ordinal","discrete","duration","continuous","unknown"))) type <- tmp if(!mvr)mvr <- length(type)>1} # # initial values # nind <- 0 tnest <- nobs <- y <- NULL # # check if times are required # ttime <- !is.logical(times)||times if((ttime&&is.logical(times))||!ttime)times <- NULL # # handle various forms of response # if(is.null(names))names <- paste(deparse(substitute(response))) if(is.vector(response,mode="numeric")){ # # numerical vector supplied: either univariate independent # observations or one time series # y <- response # check if independent observations or one time series nobs <- if(is.null(times)) 1 else length(response) # check times if(is.vector(times,mode="numeric")){ if(length(times)!=length(y)) stop("times must be the same length as the response")} else if(!is.null(times))stop("times must be a vector") # check censor indicator if(is.vector(censor,mode="numeric")){ if(length(censor)!=length(y)){ if(length(censor)==1) censor <- c(rep(1,length(y)-1),censor) else stop("censor must be the same length as the response")}} else if(!is.null(censor))stop("censor must be a scalar or vector") # check coordinates if(!is.null(coordinates)&&(!is.matrix(coordinates)||(is.matrix(coordinates)&&length(dim(coordinates))!=2&&dim(coordinates)[2]!=2&&dim(coordinates)[1]!=length(y)))) stop("coordinates must be a matrix with two columns and the same number of rows as the length of response") # check totals if(is.vector(totals,mode="numeric")){ if(length(totals)!=length(y)){ if(length(totals)==1)totals <- rep(totals,length(y)) else stop("totals must be the same length as the response")}} else if(!is.null(totals)) stop("totals must be a vector") # check weights if(is.vector(weights,mode="numeric")){ if(length(weights)!=length(y)) stop("weights must be the same length as the response")} else if(!is.null(weights))stop("weights must be a vector") # check delta if(is.vector(delta,mode="numeric")){ if(length(delta)!=length(y)){ if(length(delta)==1)delta <- rep(delta,length(y)) else stop("delta must be the same length as the response")}} else if(!is.null(delta))stop("delta must be a scalar or vector")} else if(is.array(response)&&length(dim(response))==3){ # # one multivariate 3-dim array of balanced repeated measurements supplied # nobs <- rep(dim(response)[2],dim(response)[1]) if(length(names)==1)names <- paste(names,1:dim(response)[3],sep="") # check times if(is.null(times)){ if(is.null(censor)&&ttime) times <- as.double(rep(1:dim(response)[2],dim(response)[1]))} else if(is.matrix(times)){ if(dim(times)[2]!=dim(response)[2]||dim(times)[1]!=dim(response)[1]) stop("times matrix must be the same size as first two dimensions of array of responses") # time steps can be 0 but not negative for(i in 1:dim(response)[1]) if(any(diff(times[i,])<0,na.rm=TRUE)) stop(paste("negative time step for individual ",i)) times <- as.vector(t(times))} else if(is.vector(times,mode="numeric")) { if(is.null(nest)&&any(diff(times)<0,na.rm=TRUE)) stop("times must be increasing") if(length(times)!=dim(response)[2]) stop("number of times must equal number of response columns") times <- rep(times,dim(response)[1])} else stop("times must be a vector or matrix") # check weights if(is.matrix(weights)){ if(dim(weights)[1]!=dim(response)[1]||dim(weights)[2]!=dim(response)[2]) stop("weights matrix must be the same size as first two dimensions of array of responses") else weights <- as.vector(t(weights))} else if(is.vector(weights,mode="numeric")){ if(length(weights)!=dim(response)[1]) stop("weights vector must have same length as number of individuals") else weights <- rep(weights,rep(dim(response)[2],dim(response)[1]))} else if(!is.null(weights))stop("weights must be a vector or matrix") # check nesting if(is.matrix(nest)){ if(dim(weights)[1]!=dim(response)[1]||dim(weights)[2]!=dim(response)[2]) stop("nest matrix must be the same size as first two dimensions of array of responses") for(i in 1:dim(nest)[1]) if(any(diff(nest[i,])!=0&diff(nest[i,])!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") tnest <- as.vector(t(nest))} else if(is.vector(nest,mode="numeric")){ if(length(nest)!=dim(response)[2]) stop("nest vector must have same length as number of individuals") if(any(diff(nest)!=0&diff(nest)!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") tnest <- rep(nest,dim(response)[1])} else if(!is.null(nest))stop("nest must be a vector or matrix") # check censor indicator if(is.array(censor)&&length(dim(censor))==3){ if(any(dim(censor)!=dim(response))) stop("censor array must be the same size as array of responses") tmp <- NULL for(i in 1:dim(response)[3]) tmp <- cbind(tmp,as.vector(t(censor[,,i]))) censor <- tmp rm(tmp)} else if(is.matrix(censor)){ # if a matrix, corresponds to last observation of each subject if(dim(censor)[1]!=dim(response)[1]||dim(censor)[2]!=dim(response)[3]) stop("censor matrix must have dimensions, number of individuals by number of variables") tmp <- array(1,dim(response)) for(i in 1:dim(response)[3]) tmp[,dim(tmp)[2],i] <- censor[,i] censor <- NULL for(i in 1:dim(response)[3]) censor <- cbind(censor,as.vector(t(tmp[,,i]))) rm(tmp)} else if(!is.null(censor))stop("censor must be a matrix or array") # check totals if(is.array(totals)&&length(dim(totals))==3){ if(any(dim(totals)!=dim(response))) stop("totals array must be the same size as array of responses") tmp <- NULL for(i in 1:dim(response)[3]) tmp <- cbind(tmp,as.vector(t(totals[,,i]))) totals <- tmp rm(tmp)} else if(is.matrix(totals)){ if(dim(totals)[1]!=dim(response)[1]||dim(totals)[2]!=dim(response)[3]) stop("totals matrix must have dimensions, number of individuals by number of variables") for(i in 1:dim(response)[3]) tmp <- NULL for(i in 1:dim(response)[3]) tmp <- cbind(tmp,rep(totals[,i],rep(dim(response)[2],dim(response)[1]))) totals <- tmp rm(tmp)} else if(!is.null(totals))stop("totals must be a matrix or array") # check delta if(is.array(delta)&&length(dim(delta))==3){ if(any(dim(delta)!=dim(response))) stop("delta array must be the same size as array of responses") tmp <- NULL for(i in 1:dim(response)[3]) tmp <- cbind(tmp,as.vector(t(delta[,,i]))) delta <- tmp rm(tmp)} else if(is.matrix(delta)){ if(dim(delta)[1]!=dim(response)[1]||dim(delta)[2]!=dim(response)[3]) stop("delta matrix must have dimensions, number of individuals by number of variables") for(i in 1:dim(response)[3]) tmp <- NULL for(i in 1:dim(response)[3]) tmp <- cbind(tmp,rep(delta[,i],rep(dim(response)[2],dim(response)[1]))) delta <- tmp rm(tmp)} else if(!is.null(delta))stop("delta must be a matrix or array") y <- NULL for(i in 1:dim(response)[3])y <- cbind(y,as.vector(t(response[,,i]))) mvr <- TRUE} else if(mvr&&(is.matrix(response)||is.data.frame(response))){ # # multivariate matrix or dataframe of independent observations or time # series supplied # y <- as.matrix(response) # check if independent observations or one time series nobs <- if(is.null(times)) 1 else dim(response)[1] # check times if(is.vector(times,mode="numeric")){ if(length(times)!=dim(y)[1]) stop("times must be the same length as the number of responses/variable")} else if(!is.null(times))stop("times must be a vector") # check weights if(is.vector(weights,mode="numeric")){ if(length(weights)!=dim(y)[1]) stop("weights must be the same length as the number of responses/variable")} else if(!is.null(weights))stop("weights must be a vector") # check censor indicator if(is.vector(censor,mode="numeric")){ if(is.null(times))stop("censor must be a matrix") if(length(censor)!=dim(y)[2]) stop("censor must be the same length as the number of variables") censor <- rbind(matrix(1,ncol=dim(y)[2],nrow=dim(y)[1]-1),censor)} else if(is.matrix(censor)){ if(any(dim(censor)!=dim(y)))stop("censor and response must have the same dimensions")} else if(!is.null(censor))stop("censor must be a vector or matrix") # check totals if(is.vector(totals,mode="numeric")){ if(length(totals)!=dim(y)[2]) stop("totals must be the same length as the number of variables") totals <- matrix(rep(totals,dim(y)[1]),ncol=dim(y)[2],byrow=TRUE)} else if(is.matrix(totals)){ if(any(dim(totals)!=dim(y)))stop("totals and response must have the same dimensions")} else if(!is.null(totals)) stop("totals must be a vector or matrix") # check delta if(is.vector(delta,mode="numeric")){ if(length(delta)!=dim(y)[2]){ if(length(delta)==1) delta <- matrix(delta,nrow=dim(y)[1],ncol=dim(y)[2]) else if(length(delta)==dim(y)[2]) delta <- matrix(rep(delta,dim(y)[1]),ncol=dim(y)[2],byrow=TRUE) else stop("delta must be the same length as the number of variables")}} else if(is.matrix(delta)){ if(any(dim(delta)!=dim(y)))stop("delta and response must have the same dimensions")} else if(!is.null(delta))stop("delta must be a scalar, vector, or matrix")} else if(is.matrix(response)||is.data.frame(response)){ # # one matrix or dataframe with independent observations or several time # series supplied # # transform to a matrix if(is.data.frame(response))response <- as.matrix(response) if(is.data.frame(totals))totals <- as.matrix(totals) if(is.data.frame(delta))delta <- as.matrix(delta) # create vector of observations/individual nobs <- rep(dim(response)[2],dim(response)[1]) # check times if(is.null(times)){ if(is.null(censor)&&ttime) times <- as.double(rep(1:dim(response)[2],dim(response)[1]))} else if(is.matrix(times)){ if(any(dim(times)!=dim(response))) stop("times matrix must have the same dimensions as response") # time steps can be 0 but not negative for(i in 1:dim(response)[1]) if(any(diff(times[i,])<0,na.rm=TRUE)) stop(paste("negative time step for individual ",i)) times <- as.vector(t(times))} else if(is.vector(times,mode="numeric")) { if(is.null(nest)&&any(diff(times)<0,na.rm=TRUE)) stop("times must be increasing") if(length(times)!=dim(response)[2]) stop("number of times must equal number of response columns") times <- rep(times,dim(response)[1])} else stop("times must be a vector or matrix") # check weights if(is.matrix(weights)){ if(any(dim(weights)!=dim(response))) stop("weights matrix must have the same dimensions as response") else weights <- as.vector(t(weights))} else if(is.vector(weights,mode="numeric")){ if(length(weights)!=dim(response)[1]) stop("weights vector must have same length as number of individuals") else weights <- rep(weights,rep(dim(response)[2],dim(response)[1]))} else if(!is.null(weights))stop("weights must be a vector or matrix") # check nesting if(is.matrix(nest)){ if(any(dim(nest)!=dim(response))) stop("nest matrix must have the same dimensions as response") for(i in 1:dim(nest)[1]) if(any(diff(nest[i,])!=0&diff(nest[i,])!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") tnest <- as.vector(t(nest))} else if(is.vector(nest,mode="numeric")){ if(length(nest)!=dim(response)[2]) stop("nest vector must have same length as number of individuals") if(any(diff(nest)!=0&diff(nest)!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") tnest <- rep(nest,dim(response)[1])} else if(!is.null(nest))stop("nest must be a vector or matrix") # check censor indicator if(is.matrix(censor)){ if(any(dim(censor)!=dim(response))) stop("censor matrix must have the same dimensions as response") censor <- as.vector(t(censor))} else if(is.vector(censor,mode="numeric")){ # if a vector, corresponds to last observation of each subject if(length(censor)!=dim(response)[1]) stop("censor must be the same length as the number of individuals") else { tmp <- matrix(1,nrow=dim(response)[1],ncol=dim(response)[2]) tmp[,dim(tmp)[2]] <- censor censor <- tmp}} else if(!is.null(censor))stop("censor must be a vector or matrix") # check totals if(is.matrix(totals)){ if(any(dim(totals)!=dim(response))) stop("totals matrix must have the same dimensions as response") totals <- as.vector(t(totals))} else if(is.vector(totals,mode="numeric")){ if(length(totals)!=dim(response)[1]) stop("totals vector must have same length as number of individuals") else totals <- rep(totals,rep(dim(response)[2],dim(response)[1]))} else if(!is.null(totals))stop("totals must be a vector or matrix") # check delta if(is.matrix(delta)){ if(any(dim(delta)!=dim(response))) stop("delta matrix must have the same dimensions as response") delta <- as.vector(t(delta))} else if(is.vector(delta,mode="numeric")){ if(length(delta)>1){ if(length(delta)!=dim(response)[2]) stop("delta vector must have same length as number of individuals") else delta <- rep(delta,dim(response)[1])}} else if(!is.null(delta))stop("delta must be a vector or matrix") y <- as.vector(t(response))} else if(mvr&&is.list(response)){ # # a list of unbalanced multivariate repeated measurements supplied # # obtain responses and possibly times y <- times <- nobs <- NULL nind <- 0 nmv <- dim(as.matrix(response[[1]]))[2] for(i in response){ nind <- nind+1 if(!is.matrix(i))stop("response must be a list of matrices") nobs <- c(nobs,dim(i)[1]) if(dim(i)[2]!=nmv) stop("all matrices must have the same number of columns") if(ttime){ y <- rbind(y,i[,1:(nmv-1)]) if(is.null(nest)&&any(diff(i[,nmv])<0,na.rm=TRUE)) stop(paste("negative time step for individual ",nind)) times <- c(times,i[,nmv])} else y <- rbind(y,i)} if(ttime)nmv <- nmv-1 if(!is.null(nest)){ # obtain nesting nind <- 0 nes <- NULL if(!is.list(nest))stop("nest must be a list") for(i in nest){ if(!is.vector(i,mode="numeric")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("nest must be a list of vectors") if(any(diff(i)!=0&diff(i)!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("nest for individual",nind,"should have length",nobs[nind])) nes <- c(nes,i)} tnest <- nes rm(nes)} if(!is.null(weights)){ # obtain weights nind <- 0 wt <- NULL if(!is.list(weights))stop("weights must be a list") for(i in weights){ if(!is.vector(i,mode="numeric")&&!is.vector(i,mode="logical")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("weights must be a list of vectors") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("weights for individual",nind,"should have length",nobs[nind])) wt <- c(wt,i)} weights <- wt rm(wt)} if(!is.null(censor)){ # obtain censoring nind <- 0 cen <- NULL if(!is.list(censor))stop("censor must be a list") for(i in censor){ if(!is.matrix(i)) stop("censor must be a list of matrices") nind <- nind+1 if(any(dim(i)!=c(nobs[nind],nmv))) stop(paste("censor for individual",nind,"should be",nobs[nind],"x",nmv,"matrix")) cen <- rbind(cen,i)} censor <- cen rm(cen)} if(!is.null(totals)){ # obtain totals nind <- 0 tot <- NULL if(!is.list(totals))stop("totals must be a list") for(i in totals){ if(!is.matrix(i)) stop("totals must be a list of matrices") nind <- nind+1 if(any(dim(i)!=c(nobs[nind],nmv))) stop(paste("totals for individual",nind,"should be",nobs[nind],"x",nmv,"matrix")) tot <- rbind(tot,i)} totals <- tot rm(tot)} if(!is.null(delta)){ # obtain delta nind <- 0 del <- NULL if(!is.list(delta))stop("delta must be a list") for(i in delta){ if(!is.matrix(i)) stop("delta must be a list of matrices") nind <- nind+1 if(any(dim(i)!=c(nobs[nind],nmv))) stop(paste("delta for individual",nind,"should be",nobs[nind],"x",nmv,"matrix")) del <- rbind(del,i)} delta <- del rm(del)}} else if(is.list(response)){ # # a list of unbalanced repeated measurements supplied # # check if nest, delta, and/or totals are supplied # separately delv <- !is.null(delta) totv <- !is.null(totals) nestv <- !is.null(nest) if(is.null(censor)){ # initialize times <- NULL tot <- del <- cen <- nes <- 0 ncols <- dim(as.matrix(response[[1]]))[2] nc <- ttime+1 if(ncolsnc)for(j in response){ # find columns for censor, totals, delta, nest j <- as.matrix(j) for(k in (nc+1):ncols){ if(any(j[,k]<=0,na.rm=TRUE))cen <- k else if(any(j[,k]>1,na.rm=TRUE)&&all(j[,k]==trunc(j[,k]),na.rm=TRUE)){ if(all(j[,1]>=0,na.rm=TRUE)&&all(j[,1]==trunc(j[,1]),na.rm=TRUE)&&all(j[,k]>=j[,1],na.rm=TRUE)&&any(diff(j[,k])<0,na.rm=TRUE)&&!totv)tot <- k else if(all(diff(j[,k])==0|diff(j[,k])==1,na.rm=TRUE)&&!nestv)nes <- k else stop(paste("column",k,"of unknown type"))} else if(!delv&&all(j[,k]>0,na.rm=TRUE))del <- k} if((((ncols==2&&!ttime)||ncols==3)&&(nes>0||cen>0||del>0||tot>0))||(((ncols==3&&!ttime)||ncols==4)&&((nes>0&&cen>0)||(nes>0&&del>0)||(nes>0&&tot>0)||(cen>0&&del>0)))||(((ncols>=4&&!ttime)||ncols>=5)&&((nes>0&&cen>0&&del>0)||(nes>0&&tot>0))))break} for(i in response){ # obtain responses and times i <- as.matrix(i) nind <- nind+1 if(dim(i)[2]!=ncols) stop(paste("Individual ",nind,"does not have a",ncols,"column matrix")) if(!nestv&&nes==0&&ttime&&any(diff(i[,2])<0,na.rm=TRUE)) stop(paste("negative time step for individual ",nind)) nobs <- c(nobs,dim(i)[1]) y <- c(y,i[,1]) if(ttime)times <- c(times,i[,2]) # obtain censor, totals, delta, nest # from their columns if(nes>0){ if(any(diff(i[,nes])!=0&diff(i[,nes])!=1,na.rm=TRUE)) stop("nest categories for individual ",nind,"are not consecutive increasing integers") tnest <- c(tnest,i[,nes])} if(!totv&&tot>0)totals <- c(totals,i[,tot]) if(cen>0)censor <- c(censor,i[,cen]) if(!delv&&del>0)delta <- c(delta,i[,del])}} else if(is.vector(censor,mode="numeric")){ # initialize (cannot be totals and censor available) del <- nes <- 0 ncols <- dim(as.matrix(response[[1]]))[2] if(ncols>1){ for(j in response){ # find columns for delta, nest j <- as.matrix(j) for(k in 2:ncols){ if(is.null(censor)&&any(j[,k]<=0,na.rm=TRUE))cen <- k else if(any(j[,k]>1,na.rm=TRUE)&&all(j[,k]==trunc(j[,k]),na.rm=TRUE))nes <- k else if(is.null(delta)&&all(j[,k]>0,na.rm=TRUE))del <- k} if((ncols==3&&(nes>0||cen>0||del>0))||(ncols==4&&((nes>0&&cen>0)||(nes>0&&del>0)||(cen>0&&del>0)))||(ncols>=5&&((nes>0&&cen>0&&del>0))))break} tmp <- NULL j <- 0 for(i in response){ # obtain responses i <- as.matrix(i) nind <- nind+1 if(dim(i)[2]!=ncols)stop(paste("Individual ",nind,"does not have a",ncols,"column matrix")) nobs <- c(nobs,dim(i)[1]) y <- c(y,i[,1]) # construct censor tmp <- c(tmp,rep(1,dim(i)[1]-1),censor[j <- j+1]) # obtain delta, nest from their columns if(nes>0){ if(any(diff(i[,nes])!=0&diff(i[,nes])!=1,na.rm=TRUE)) stop("nest categories for individual ",nind,"are not consecutive increasing integers") tnest <- c(tnest,i[,nes])} if(del>0)delta <- c(delta,i[,del])} censor <- tmp} else { tmp <- NULL j <- 0 for(i in response){ # obtain responses nind <- nind+1 if(!is.vector(i,mode="numeric")&&!(is.matrix(i)&&dim(i)[2]==1)) stop(paste("Individual ",nind,"does not have a vector or one column matrix")) # construct censor tmp <- c(tmp,rep(1,length(i)-1),censor[j <- j+1]) y <- c(y,i) nobs <- c(nobs,length(i))} censor <- tmp}} else stop("If response is a list, censor must be a vector") if(nestv){ # obtain nesting nind <- 0 nes <- NULL if(!is.list(nest))stop("nest must be a list") for(i in nest){ if(!is.vector(i,mode="numeric")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("nest must be a list of vectors") if(any(diff(i)!=0&diff(i)!=1,na.rm=TRUE)) stop("nest categories must be consecutive increasing integers") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("nest for individual",nind,"should have length",nobs[nind])) nes <- c(nes,i)} tnest <- nes rm(nes)} if(totv){ # obtain totals nind <- 0 tot <- NULL if(!is.list(totals))stop("totals must be a list") for(i in totals){ if(!is.vector(i,mode="numeric")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("totals must be a list of vectors") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("totals for individual",nind,"should have length",nobs[nind])) tot <- rbind(tot,i)} totals <- tot rm(tot)} if(delv){ # obtain delta if(is.vector(delta)&&length(delta)==1) delta <- rep(delta,length(y)) else { nind <- 0 del <- NULL if(!is.list(delta))stop("delta must be a list") for(i in delta){ if(!is.vector(i,mode="numeric")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("delta must be a list of vectors") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("delta for individual",nind,"should have length",nobs[nind])) del <- c(del,i)} delta <- del rm(del)}} if(!is.null(weights)){ # obtain weights nind <- 0 wt <- NULL if(!is.list(weights))stop("weights must be a list") for(i in weights){ if(!is.vector(i,mode="numeric")&&!is.vector(i,mode="logical")&&!(is.matrix(i)&&dim(i)[2]==1)) stop("weights must be a list of vectors") nind <- nind+1 if(length(i)!=nobs[nind]) stop(paste("weights for individual",nind,"should have length",nobs[nind])) wt <- c(wt,i)} weights <- wt rm(wt)} # check that totals, delta, and weights are now # correct if supplied separately if(totv){ if(length(totals)==1)totals <- rep(totals,length(y)) else if(length(totals)==length(nobs)) totals <- totals[rep(1:length(nobs),nobs)] else if(length(totals)!=length(y)) stop("totals must have one value per response")} if(delv&&length(delta)>1&&length(delta)!=length(y)) stop("delta must have one value per response") if(!is.null(weights)&&length(y)!=length(weights))stop("weights must have one value per response")} else stop("Responses must be supplied as a vector, matrix, dataframe, array, or list of matrices") # # make sure that the response is a matrix # if(!is.numeric(y))stop("response must be numeric") if(is.matrix(y)){ if(is.null(colnames(y)))colnames(y) <- if(length(names)==1) paste(names,1:dim(y)[2],sep="") else if(length(names)==dim(y)[2]) names else stop("incorrect number of names")} else { y <- matrix(y,ncol=1) colnames(y) <- if(length(names)==1)names else "y"} rownames(y) <- 1:dim(y)[1] if(!is.null(units)){ if(!is.character(units))stop("units must be a character vector") if(length(units)!=dim(y)[2]) stop("units must be given for all responses")} # # check that type has the right length # if(is.null(type))type <- rep("unknown",dim(y)[2]) else { if(length(type)!=dim(y)[2]) stop("a type must be supplied for each response") if(length(type)==1)type <- rep(type,dim(y)[2]) if(any(is.na(type)))type[is.na(type)] <- "unknown"} if(!is.null(censor)){ # # check that censor has correct values # if(!is.numeric(censor))stop("censor must be numeric") if(any(censor!=-1&censor!=0&censor!=1,na.rm=TRUE)) stop("censor must only contain -1, 0, and 1") if(!is.null(censor)&&!is.matrix(censor))censor <- matrix(censor,ncol=1) # construct (cumulative) times from responses if univariate if(!mvr&&length(nobs)>1&&is.null(times)&&type=="duration"){ j <- 1 na <- is.na(y[,1]) y[na,1] <- 0 for(i in 1:length(nobs)){ times <- c(times,cumsum(y[j:(j+nobs[i]-1),1])) j <- j+nobs[i]} y[na,1] <- NA} for(i in 1:dim(y)[2]) if(type[i]=="unknown"&&any(!is.na(censor[,i]))) type[i] <- "duration" # remove censor if unnecessary if(all(censor==1,na.rm=TRUE))censor <- NULL} if(!is.null(totals)){ # # check that totals has correct values # if(!is.numeric(totals))stop("totals must be numeric") if(!is.matrix(totals))totals <- matrix(totals,ncol=1) for(i in 1:dim(y)[2])if(any(!is.na(totals[,i]))){ if(any(y[,i]<0,na.rm=TRUE)) stop("all responses must be non-negative for binomial data") if(any(totals[,i]= to responses") if(type[i]=="unknown"&&any(!is.na(totals[,i]))) type[i] <- "nominal"}} # # check that nest, delta, weights have correct values # if(!is.null(tnest)){ if(!is.numeric(tnest))stop("tnest must be numeric") if((any(tnest<1,na.rm=TRUE)||any(tnest!=trunc(tnest),na.rm=TRUE))) stop("nest must contain integers starting at 1")} if(!is.null(delta)){ if(!is.numeric(delta))stop("delta must be numeric") if(any(delta<=0,na.rm=TRUE))stop("delta must be strictly positive") if(!is.matrix(delta))delta <- matrix(delta,ncol=1) for(i in 1:dim(y)[2]) if(type[i]=="unknown"&&any(!is.na(delta[,i]))) type[i] <- "continuous"} if(!is.null(weights)){ if(!is.numeric(weights)&&!is.logical(weights))stop("weights must be numeric or logical") if(any(weights<0,na.rm=TRUE))stop("weights must be non-negative")} # # check that ordinal data have correct values # if(!is.na(match("ordinal",type))){ for(i in 1:dim(y)[2])if(!is.na(match("ordinal",type[i]))){ if(min(y[,i],na.rm=TRUE)!=0) stop("ordinal responses must be indexed from zero") if(any(as.integer(y[!is.na(y[,i]),i])!=y[!is.na(y[,i]),i])) stop("ordinal responses must be integers")}} # # combine with old object, if provided # if(!is.null(oldresponse)){ if(any(nobs!=oldresponse$nobs)) stop("old and new objects do not have the same number of observations per individual") if(dim(y)[1]!=dim(oldresponse$y)[1]) stop("old and new objects do not have the same number of observations") if((!is.null(times)&&(is.null(oldresponse$times)||any(times!=oldresponse$times,na.rm=TRUE)))||(is.null(times)&&!is.null(oldresponse$times))) stop("old and new objects do not have the same times") if((!is.null(tnest)&&(is.null(oldresponse$tnest)||any(tnest!=oldresponse$tnest,na.rm=TRUE)))||(is.null(tnest)&&!is.null(oldresponse$tnest))) stop("old and new objects do not have the same nesting") if((!is.null(weights)&&(is.null(oldresponse$wt)||any(weights!=oldresponse$wt,na.rm=TRUE)))||(is.null(weights)&&!is.null(oldresponse$wt))) stop("old and new objects do not have the same weights") y <- cbind(oldresponse$y,y) if(!is.null(censor)){ if(is.null(oldresponse$censor)) censor <- cbind(matrix(NA,nrow=dim(oldresponse$y)[1],ncol=dim(oldresponse$y)[2]),censor) else censor <- cbind(oldresponse$censor,censor)} else if(!is.null(oldresponse$censor)) censor <- cbind(oldresponse$censor,matrix(NA,nrow=dim(y)[1],ncol=dim(y)[2])) if(!is.null(totals)){ if(is.null(oldresponse$n)) totals <- cbind(matrix(NA,nrow=dim(oldresponse$y)[1],ncol=dim(oldresponse$y)[2]),totals) else totals <- cbind(oldresponse$n,totals)} else if(!is.null(oldresponse$n)) totals <- cbind(oldresponse$n,matrix(NA,nrow=dim(y)[1],ncol=dim(y)[2])) if(!is.null(delta)){ if(is.null(oldresponse$delta)) delta <- cbind(matrix(1,nrow=dim(oldresponse$y)[1],ncol=dim(oldresponse$y)[2]),delta) else delta <- cbind(oldresponse$delta,delta)} else if(!is.null(oldresponse$delta)) delta <- cbind(oldresponse$delta,matrix(1,nrow=dim(y)[1],ncol=dim(y)[2])) if(!is.null(units)){ if(is.null(oldresponse$units)) units <- c(rep(NA,dim(oldresponse$y)[2]),units) else units <- c(oldresponse$units,units)} else if(!is.null(oldresponse$units)) units <- c(oldresponse$units,rep(NA,dim(y)[2])) type <- c(oldresponse$type,type)} # # put names on matrices # if(!is.null(totals)&&is.null(colnames(totals)))colnames(totals) <- colnames(y) if(!is.null(censor)&&is.null(colnames(censor)))colnames(censor) <- colnames(y) if(!is.null(delta)&&is.null(colnames(delta)))colnames(delta) <- colnames(y) if(!is.null(units))names(units) <- colnames(y) names(type) <- colnames(y) # # check for variable descriptions # if(!is.null(description)){ if(!is.list(description))stop("description must be a list") if(!all(tmp <- names(description)%in%colnames(y))) stop(paste("variable(s)",names(description)[!tmp],"not found")) for(i in description)if(!is.character(i)) stop("description list must contain character vectors") if(!is.null(oldresponse)) description <- c(oldresponse$description,description)} z <- list(y=y, nobs=nobs, times=times, nest=tnest, censor=censor, n=totals, coordinates=coordinates, wt=weights, delta=delta, units=units, type=type, description=description) class(z) <- "response" z} ### function to create a time-constant covariate (tccov) object ### tcctomat <- function(ccov, names=NULL, units=NULL, oldccov=NULL, dataframe=TRUE, description=NULL){ if(inherits(ccov,"tccov")&&inherits(oldccov,"tccov")){ # # check for compatibility # if(dim(ccov$ccov)[1]!=dim(oldccov$ccov)[1]) stop("incompatible tccov objects") if(!is.null(oldccov$units)||!is.null(ccov$units)){ if(is.null(oldccov$units)) oldccov$units <- rep(NA,dim(oldccov$ccov)[2]) if(is.null(ccov$units))ccov$units <- rep(NA,dim(ccov$ccov)[2]) oldccov$units <- c(oldccov$units,ccov$units)} oldccov$ccov <- cbind(oldccov$ccov,ccov$ccov) oldccov$linear <- NULL return(oldccov)} linear <- NULL if(is.language(ccov)){ # # if provided as a formula, transform to a matrix # linear <- ccov mt <- terms(ccov) mf <- model.frame(mt,parent.frame(),na.action=NULL) ccov <- model.matrix(mt,mf)[,-1,drop=FALSE]} else if(is.factor(ccov)||is.vector(ccov,mode="character")){ # # if provided as factor variables, make a dataframe # if(is.null(names))names <- paste(deparse(substitute(ccov))) ccov <- data.frame(ccov) colnames(ccov) <- names} if(is.vector(ccov,mode="numeric")){ # # if a vector, get a name # if(is.null(names))names <- paste(deparse(substitute(ccov))) ccov <- matrix(ccov,ncol=1)} else if(is.data.frame(ccov)){ if(!dataframe){ # if a dataframe supplied, but should not be stored as one rm(names) units2 <- mt <- tmp3 <- tmp <- NULL j <- 0 for(i in ccov){ j <- j+1 if(is.vector(i,mode="numeric")){ # handle numeric vectors tmp2 <- as.matrix(i) units2 <- c(units2,units[j]) colnames(tmp2) <- names(ccov)[j]} else { # handle factor variables mt <- terms(~i) tmp2 <- model.matrix(mt,model.frame(mt,na.action=NULL))[,-1,drop=FALSE] units2 <- c(units2,rep(units[j],length(levels(i)[-1]))) tmp3 <- dimnames(get(getOption("contrasts")[[if(is.ordered(i))2 else 1]])(levels(i),contrasts =TRUE))[[2]] if(is.null(tmp3))tmp3 <- 1:(length(levels(i))-1) colnames(tmp2) <- paste(names(ccov)[j],tmp3,sep="")} # colnames(tmp2) <- paste(names(ccov)[j],levels(i)[-1],sep="")} tmp <- cbind(tmp,tmp2)} units <- units2 ccov <- tmp rm(tmp,tmp2,tmp3,mt)}} else if(!is.matrix(ccov)) stop("Inter-unit (time-constant) covariates must be a vector, matrix, dataframe, or model formula") if(is.null(colnames(ccov))){ # # create names if not available # if(is.null(names))names <- paste(deparse(substitute(ccov))) if(length(names)==1&&dim(ccov)[2]>1) names <- paste(names,1:dim(ccov)[2],sep="") colnames(ccov) <- names} # # check units # if(!is.null(units)){ if(!is.character(units))stop("units must be a character vector") if(length(units)!=dim(ccov)[2]) stop("units must be supplied for all covariates")} # # check for variable descriptions # if(!is.null(description)){ if(!is.list(description))stop("description must be a list") if(!all(tmp <- names(description)%in%colnames(ccov))) stop(paste("variable(s)",names(description)[!tmp],"not found")) for(i in description)if(!is.character(i)) stop("description list must contain character vectors")} if(!is.null(oldccov)){ # # combine new data with old data when available # if(!inherits(oldccov,"tccov")) stop("oldccov must have class, tccov") oldccov$description <- c(oldccov$description,description) if(!is.null(oldccov$units)||!is.null(units)){ if(is.null(oldccov$units)) oldccov$units <- rep(NA,dim(oldccov$ccov)[2]) if(is.null(units))units <- rep(NA,dim(ccov)[2]) oldccov$units <- c(oldccov$units,units)} else if(dim(oldccov$ccov)[1]==dim(ccov)[1]){ if(dataframe)oldccov$ccov <- data.frame(oldccov$ccov,ccov) else oldccov$ccov <- cbind(oldccov$ccov,ccov)} else stop("old and new covariates do not have the same number of individuals")} else { if(dataframe)ccov <- as.data.frame(ccov) oldccov <- list(ccov=ccov,linear=linear,units=units,description=description) class(oldccov) <- "tccov"} if(!is.null(oldccov$units))names(oldccov$units) <- colnames(oldccov$ccov) if(is.data.frame(oldccov$ccov)){ # # if no factor variables, store as a matrix anyway # fac <- FALSE for(i in 1:dim(oldccov$ccov)[2])if(!is.vector(oldccov$ccov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)oldccov$ccov <- as.matrix(oldccov$ccov)} oldccov} ### function to create a time-varying covariate (tvcov) object ### tvctomat <- function(tvcov, names=NULL, units=NULL, interaction=NULL, ccov=NULL, oldtvcov=NULL, dataframe=TRUE, description=NULL){ # # check for compatibility # if(inherits(tvcov,"tvcov")&&inherits(oldtvcov,"tvcov")){ if(length(tvcov$nobs)!=length(oldtvcov$nobs)|| any(tvcov$nobs!=oldtvcov$nobs)) stop("incompatible tvcov objects") if(!is.null(oldtvcov$units)||!is.null(tvcov$units)){ if(is.null(oldtvcov$units)) oldtvcov$units <- rep(NA,dim(oldtvcov$tvcov)[2]) if(is.null(tvcov$units)) tvcov$units <- rep(NA,dim(tvcov$tvcov)[2]) oldtvcov$units <- c(oldtvcov$units,tvcov$units)} oldtvcov$tvcov <- cbind(oldtvcov$tvcov,tvcov$tvcov) return(oldtvcov)} nbs <- tvcv <- NULL if(is.data.frame(tvcov)){ if(is.null(names))names <- paste(deparse(substitute(tvcov))) if(length(names)!=1)stop("too many names") if(dataframe){ # make new one-column dataframe nbs <- rep(dim(tvcov)[2],dim(tvcov)[1]) tvcv <- data.frame(as.character(as.vector(t(as.matrix(tvcov)))),stringsAsFactors=TRUE)##tvcv <- as.data.frame(as.vector(t(as.matrix(tvcov)))) colnames(tvcv) <- names} # if factors, as.matrix transforms to character for next step else tvcov <- as.matrix(tvcov)} if(is.matrix(tvcov)&&is.character(tvcov)){ nbs <- rep(dim(tvcov)[2],dim(tvcov)[1]) if(is.null(names))names <- paste(deparse(substitute(tvcov))) if(length(names)!=1)stop("too many names") tvcv <- as.factor(as.vector(t(tvcov))) if(dataframe){ # make new one-column dataframe tvcv <- as.data.frame(as.vector(t(as.matrix(tvcov)))) colnames(tvcv) <- names} else { # make indicator matrix from factor mt <- terms(~tvcv) tmp3 <- dimnames(get(getOption("contrasts")[[if(is.ordered(tvcv))2 else 1]])(levels(tvcv),contrasts =TRUE))[[2]] if(is.null(tmp3))tmp3 <- 1:(length(levels(tvcv))-1) names <- paste(names,tmp3,sep="") # names <- paste(names,levels(tvcv)[-1],sep="") tvcv <- model.matrix(mt,model.frame(mt,na.action=NULL))[,-1,drop=FALSE] colnames(tvcv) <- names if(!is.null(units))units <- rep(units,dim(tvcv)[2])}} else if(is.matrix(tvcov)){ # # transform to a vector # nbs <- rep(dim(tvcov)[2],dim(tvcov)[1]) tvcv <- matrix(as.vector(t(tvcov)),ncol=1) if(!is.null(names)&&length(names)!=1)stop("too many names") colnames(tvcv) <- if(!is.null(names)) names else paste(deparse(substitute(tvcov)))} else if(is.list(tvcov)&&!is.data.frame(tvcov)){ if(inherits(tvcov,"tvcov")){ nbs <- tvcov$nobs tvcv <- tvcov$tvcov} else { ncols <- dim(as.data.frame(tvcov[[1]]))[2] # create names if(is.null(names)){ if(is.null(colnames(tvcov[[1]]))){ names <- if(is.matrix(tvcov[[1]]))paste(paste(deparse(substitute(tvcov))),1:ncols,sep="") else paste(deparse(substitute(tvcov)))} else names <- colnames(tvcov[[1]])} # concatenate list elements into a matrix ff <- TRUE for(i in tvcov){ # create one big dataframe i <- as.data.frame(i) if(dim(i)[2]!=ncols) stop("all elements of the list must have the same number of columns") nbs <- c(nbs,dim(i)[1]) if(ff){ tvcv <- i ff <- FALSE} else tvcv <- rbind(tvcv,i)} if(is.character(tvcv)||!dataframe){ # if necessary, transform to indicator matrix tmp <- tmp2 <- mt <- NULL for(i in 1:ncols){ if(is.numeric(tvcv[,i])){ tmp <- cbind(tmp,tvcv[,i]) tmp2 <- c(tmp2,names[i])} else { mt <- terms(~tvcv[,i]) tmp <- cbind(tmp,model.matrix(mt,model.frame(mt,na.action=NULL))[,-1,drop=FALSE]) tmp3 <- dimnames(get(getOption("contrasts")[[if(is.ordered(tvcv[,i]))2 else 1]])(levels(tvcv[,i]),contrasts =TRUE))[[2]] if(is.null(tmp3))tmp3 <- 1:(length(levels(tvcv[,i]))-1) tmp2 <- c(tmp2,paste(names[i],tmp3,sep=""))}} # tmp2 <- c(tmp2,paste(names[i],levels(as.factor(tvcv[,i]))[-1],sep=""))}} tvcv <- tmp names <- tmp2 ncols <- length(names) rm(tmp,tmp2,mt)} # create colnames if necessary if(is.null(colnames(tvcov[[1]]))){ if(is.null(names))names <- paste(deparse(substitute(tvcov)))} else if(length(names)!=ncols)names <- colnames(tvcov[[1]]) if((length(names)==1&&ncols>1)) names <- paste(names,1:ncols,sep="") if(length(names)!=ncols) stop(paste(ncols,"variable names required")) colnames(tvcv) <- names}} else if(!is.data.frame(tvcov)) stop("Intra-unit (time-varying) covariates must be a matrix, dataframe, or list") if(!is.null(interaction)){ # # if necessary, create interactions # name <- colnames(tvcv) units <- tvcov$units if(is.character(interaction)){ # if names supplied, find corresponding columns mat <- match(interaction,name) if(any(is.na(mat))) stop(paste("Intra-unit (time-varying) covariate(s)",ccov[is.na(mat)],"not found")) interaction <- mat} if(is.vector(interaction,mode="numeric")){ if(length(interaction)>length(name)) stop("too many interactions") if(!is.data.frame(tvcv))colnames(tvcv) <- NULL if(!is.null(ccov)){ # if interactions with time-constant covariates if(inherits(ccov,"tccov")){ ## bruce swihart edit: ## switch the next two lines to avoid ## R CMD Check error ## in examples of tvctomat.Rd units2 <- ccov$units ccov <- ccov$ccov if(!is.null(units)&&is.null(units2)) units2 <- rep("NA",dim(ccov)[2]) if(is.null(units)&&!is.null(units2)) units <- rep("NA",dim(tvcov$tvcov)[2])} if(!is.matrix(ccov)&&!is.data.frame(ccov)){ # if a vector, transform to matrix tmp <- paste(deparse(substitute(ccov))) ccov <- matrix(ccov) colnames(ccov) <- tmp} if(dim(ccov)[1]!=length(nbs)) stop("ccov does not have one observation per individual") # find desired covariates if(is.null(names))names <- colnames(ccov) mat <- match(names,colnames(ccov)) if(any(is.na(mat)))stop(paste("covariates",names[is.na(mat)],"not found")) oldtvcov <- tvcv if(!is.data.frame(oldtvcov))colnames(oldtvcov) <- name if(!is.data.frame(oldtvcov)&&!is.data.frame(ccov)){ # calculate interactions for ordinary matrices for(i in 1:length(interaction))for(j in 1:length(mat)){ oldtvcov <- cbind(oldtvcov,tvcv[,interaction[i]]*rep(ccov[,mat[j]],nbs)) name <- c(name,paste(name[interaction[i]],".",names[j],sep="")) if(!is.null(units))units <- c(units,paste(units[interaction[i]],".",units2[j],sep=""))}} else { # calculate interactions when expansion required for(i in 1:length(interaction)) for(j in 1:length(mat)){ mt <- terms(~tvcv[,interaction[i]]:rep(ccov[,mat[j]],nbs)) tmp <- model.matrix(mt,model.frame(mt,na.action=NULL))[,-1,drop=FALSE] if(!is.vector(tvcv[,interaction[i]],mode="numeric")){ if(!is.vector(ccov[,mat[j]],mode="numeric")){ nam <- NULL tmp2 <- paste(name[interaction[i]],levels(tvcv[,interaction[i]])[-1],".",sep="") for(k in 1:length(levels(ccov[,mat[j]])[-1])) nam <- c(nam,paste(tmp2,names[j],levels(ccov[,mat[j]])[-1][k],sep="")) tmp <- tmp[,-c(1:length(levels(tvcv[,interaction[i]])),seq(1,length(levels(tvcv[,interaction[i]]))*length(levels(ccov[,mat[j]])),by=length(levels(tvcv[,interaction[i]])))),drop=FALSE]} else { nam <- paste(paste(name[interaction[i]],levels(tvcv[,interaction[i]])[-1],sep=""),".",names[j],sep="") tmp <- tmp[,-1,drop=FALSE]} if(!is.null(units))units <- c(units,rep(units2[j],length(levels(tvcv[,interaction[i]])[-1])))} else { if(!is.vector(ccov[,mat[j]],mode="numeric")){ nam <- paste(name[interaction[i]],".",paste(names[j],levels(ccov[,mat[j]])[-1],sep=""),sep="") tmp <- tmp[,-1,drop=FALSE] if(!is.null(units))units <- c(units,rep(units[interaction[i]],length(levels(ccov[,mat[j]])[-1])))} else { nam <- paste(name[interaction[i]],".",names[j],sep="") if(!is.null(units))units <- c(units,paste(units[interaction[i]],".",units2[j],sep=""))}} colnames(tmp) <- nam name <- c(name,nam) oldtvcov <- cbind(oldtvcov,tmp)}} if(!is.data.frame(oldtvcov))colnames(oldtvcov) <- name oldtvcov <- list(tvcov=oldtvcov,nobs=nbs,units=units)} else if(length(interaction)==2){ # one pair of interactions if(is.data.frame(tvcv)){ # expand dataframe mt <- terms(~tvcv[,interaction[1]]:tvcv[,interaction[2]]) tmp <- model.matrix(mt,model.frame(mt,na.action=NULL))[,-1,drop=FALSE] if(!is.vector(tvcv[,interaction[1]],mode="numeric")){ if(!is.vector(tvcv[,interaction[2]],mode="numeric")){ names <- NULL tmp2 <- paste(name[interaction[1]],levels(tvcv[,interaction[1]])[-1],".",sep="") for(i in 1:length(levels(tvcv[,interaction[2]])[-1])) names <- c(names,paste(tmp2,name[interaction[2]],levels(tvcv[,interaction[2]])[-1][i],sep="")) tmp <- tmp[,-c(1:length(levels(tvcv[,interaction[1]])),seq(1,length(levels(tvcv[,interaction[1]]))*length(levels(tvcv[,interaction[2]])),by=length(levels(tvcv[,interaction[1]])))),drop=FALSE]} else { names <- paste(paste(name[interaction[1]],levels(tvcv[,interaction[1]])[-1],sep=""),".",name[interaction[2]],sep="") tmp <- tmp[,-1,drop=FALSE] if(!is.null(units))units <- c(units,paste(rep(units[interaction[1]],length(levels(tvcv[,interaction[1]])[-1])),".",units[interaction[2]],sep=""))}} else { if(!is.vector(tvcv[,interaction[2]],mode="numeric")){ names <- paste(name[interaction[1]],".",paste(name[interaction[2]],levels(tvcv[,interaction[2]])[-1],sep=""),sep="") tmp <- tmp[,-1,drop=FALSE] if(!is.null(units))units <- c(units,paste(units[interaction[1]],".",rep(units[interaction[2]],length(levels(tvcv[,interaction[2]])[-1])),sep=""))} else { names <- paste(name[interaction[1]],".",name[interaction[2]],sep="") if(!is.null(units))units <- c(units,paste(units[interaction[1]],".",units[interaction[2]],sep=""))}} colnames(tmp) <- names oldtvcov <- list(tvcov=cbind(tvcv,tmp),nobs=nbs,units=units)} else { units <- if(is.null(tvcov$units))NULL else c(tvcov$units,paste(tvcov$units[interaction[1]],".",tvcov$units[interaction[2]],sep="")) oldtvcov <- list(tvcov=cbind(tvcv,tvcv[,interaction[1]]*tvcv[,interaction[2]]),nobs=nbs,units=units)} if(!is.data.frame(oldtvcov$tvcov)) colnames(oldtvcov$tvcov) <- c(name,paste(name[interaction[1]],".",name[interaction[2]],sep=""))} else stop("interaction must be a vector containing column numbers or variable names") class(oldtvcov) <- "tvcov"}} else if(!is.null(oldtvcov)){ # # check for variable descriptions # if(!is.null(description)){ if(!is.list(description))stop("description must be a list") if(!all(tmp <- names(description)%in%colnames(tvcv))) stop(paste("variable(s)",names(description)[!tmp],"not found")) for(i in description)if(!is.character(i)) stop("description list must contain character vectors")} # # if old tvcov, combine with new one # if(!inherits(oldtvcov,"tvcov")) stop("oldtvcov must have class, tvcov") oldtvcov$description <- c(oldtvcov$description,description) if(!is.null(oldtvcov$units)||!is.null(units)){ if(is.null(oldtvcov$units)) oldtvcov$units <- rep(NA,dim(oldtvcov$tvcov)[2]) if(is.null(units))units <- rep(NA,dim(tvcv)[2]) oldtvcov$units <- c(oldtvcov$units,units)} if((dim(oldtvcov$tvcov)[1]==dim(tvcv)[1])&&all(oldtvcov$nobs==nbs)){ if(dataframe)oldtvcov$tvcov <- data.frame(oldtvcov$tvcov,tvcv,stringsAsFactors = TRUE) else oldtvcov$tvcov <- cbind(oldtvcov$tvcov,tvcv)} else stop("old and new covariates do not have the same numbers of observations")} else { # check units if(!is.null(units)){ if(!is.character(units)) stop("units must be a character vector") if(length(units)!=dim(tvcv)[2]) stop("units must be supplied for all covariates")} if(dataframe)tvcv <- as.data.frame(tvcv) oldtvcov <- list(tvcov=tvcv,nobs=nbs,units=units,description=description) class(oldtvcov) <- "tvcov"} if(!is.null(oldtvcov$units))names(oldtvcov$units) <- colnames(oldtvcov$tvcov) # # if no factor variables present, return a matrix anyway # if(is.data.frame(oldtvcov$tvcov)){ fac <- FALSE for(i in 1:dim(oldtvcov$tvcov)[2])if(!is.vector(oldtvcov$tvcov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)oldtvcov$tvcov <- as.matrix(oldtvcov$tvcov)} oldtvcov} ### functions to create a repeated object ### ### method to create repeated object removing NAs ### rmna <- function(response, ccov=NULL, tvcov=NULL){ # # if necessary, convert response # if(!inherits(response,"response"))response <- restovec(response) # # if necessary, expand nobs # if(length(response$nobs)==1&&response$nobs==1) response$nobs <- rep(1,length(response$y)) # # if necessary, convert ccov # if(!is.null(ccov)){ if(!inherits(ccov,"tccov"))ccov <- tcctomat(ccov) if(length(response$nobs)!=dim(ccov$ccov)[1]) stop("Numbers of individuals for response and for inter-unit (time-constant) covariates do not agree.")} # # if necessary, convert tvcov # if(!is.null(tvcov)){ if(!inherits(tvcov,"tvcov"))tvcov <- tvctomat(tvcov) if(length(response$nobs)!=length(tvcov$nobs)|| any(response$nobs!=tvcov$nobs)) stop("Numbers of observations for response and intra-unit (time-varying) covariates do not agree.")} # # create NA indicators # rna <- rep(TRUE,dim(response$y)[1]) for(i in 1:dim(response$y)[2])rna <- rna&!is.na(response$y[,i]) if(!is.null(response$times))rna <- rna&!is.na(response$times) if(!is.null(response$nest))rna <- rna&!is.na(response$nest) if(!is.null(response$wt))rna <- rna&!is.na(response$wt) if(!is.null(response$coordinates)) rna <- rna&!is.na(response$coordinates[,1])&!is.na(response$coordinates[,2]) if(!is.null(response$n)){ for(i in 1:dim(response$y)[2])if(any(!is.na(response$n[,i]))) rna <- rna&!is.na(response$n[,i])} for(i in 1:length(response$nobs)) if(!is.null(ccov)&&any(is.na(ccov$ccov[i,]))) rna[covind(response)==i] <- FALSE if(!is.null(tvcov)) for(i in 1:dim(tvcov$tvcov)[2])rna <- rna&!is.na(tvcov$tvcov[,i]) # # remove NAs # if(any(!rna)){ # remove NAs from variables associated with response response$y <- response$y[rna,,drop=FALSE] if(!is.null(response$times))response$times <- response$times[rna] if(!is.null(response$nest))response$nest <- response$nest[rna] if(!is.null(response$wt))response$wt <- response$wt[rna] if(!is.null(response$coordinates)) response$coordinates <- response$coordinates[rna,] if(!is.null(response$n))response$n <- response$n[rna,,drop=FALSE] if(!is.null(response$censor)){ response$censor <- response$censor[rna,,drop=FALSE] if(all(response$censor==1))response$censor <- NULL} if(!is.null(response$delta)&&length(response$delta)>1) response$delta <- response$delta[rna,,drop=FALSE] if(!is.null(tvcov))tvcov$tvcov <- tvcov$tvcov[rna,,drop=FALSE] # correct nobs tmp <- NULL j <- c(0,cumsum(response$nobs)) for(i in 1:length(response$nobs)){ tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]])) if(tmp[i]==0) warning(paste("Individual",i,"has no observations"))} response$nobs <- tmp[tmp>0] # remove NAs from ccov if(!is.null(ccov)){ ccov$ccov <- ccov$ccov[tmp>0,,drop=FALSE] for(i in 1: dim(ccov$ccov)[2]) if(length(unique(ccov$ccov[,i]))==1) warning(paste("covariate",colnames(ccov$ccov)[i],"has only one value\n"))} # remove NAs from tvcov if(!is.null(tvcov)){ tvcov$nobs <- response$nobs for(i in 1: dim(tvcov$tvcov)[2]) if(length(unique(tvcov$tvcov[,i]))==1) warning(paste("covariate",colnames(tvcov$tvcov)[i],"has only one value\n"))}} # # if independent observations, reset nobs # if(all(response$nobs==1))response$nobs <- 1 z <- list(response=response,tvcov=tvcov,ccov=ccov) class(z) <- "repeated" z} ### method to create repeated object leaving NAs ### lvna <- function(response, ccov=NULL, tvcov=NULL){ # # if necessary, convert response # if(!inherits(response,"response"))response <- restovec(response) # # if necessary, expand nobs # if(length(response$nobs)==1&&response$nobs==1) response$nobs <- rep(1,length(response$y)) # # if necessary, convert ccov # if(!is.null(ccov)){ if(!inherits(ccov,"tccov"))ccov <- tcctomat(ccov) if(length(response$nobs)!=dim(ccov$ccov)[1]) stop("Numbers of individuals for response and for intra-unit (time-constant) covariates do not agree.")} # # if necessary, convert tvcov # if(!is.null(tvcov)){ if(!inherits(tvcov,"tvcov"))tvcov <- tvctomat(tvcov) if(any(response$nobs!=tvcov$nobs)) stop("Numbers of observations for response and intra-unit (time-varying) covariates do not agree.")} # # create NA indicators # rna <- rep(TRUE,dim(response$y)[1]) for(i in 1:dim(response$y)[2])rna <- rna&!is.na(response$y[,i]) if(!is.null(response$times))rna <- rna&!is.na(response$times) if(!is.null(response$nest))rna <- rna&!is.na(response$nest) if(!is.null(response$coordinates)) rna <- rna&!is.na(response$coordinates[,1])&!is.na(response$coordinates[,2]) if(!is.null(response$n)){ for(i in 1:dim(response$y)[2])if(any(!is.na(response$n[,i]))) rna <- rna&!is.na(response$n[,i])} for(i in 1:length(response$nobs)) if(!is.null(ccov)&&any(is.na(ccov$ccov[i,])))rna[covind(response)==i] <- FALSE if(!is.null(tvcov)) for(i in 1:dim(tvcov$tvcov)[2])rna <- rna&!is.na(tvcov$tvcov[,i]) # # if independent observations, reset nobs # if(all(response$nobs==1))response$nobs <- 1 z <- list(response=response,tvcov=tvcov,ccov=ccov, NAs=if(any(!rna))!rna else NULL) class(z) <- "repeated" z} ### method to create repeated object from a dataframe ### dftorep <- function(dataframe, response, id=NULL, times=NULL, censor=NULL, totals=NULL, weights=NULL, nest=NULL, delta=NULL, coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=TRUE){ if(missing(dataframe)||!is.data.frame(dataframe)) stop("a dataframe must be supplied") if(missing(response)||!is.character(response)) stop("name(s) of response variables must be supplied") # # find response information and construct object # cn <- colnames(dataframe) nc <- match(response,cn) if(any(is.na(nc)))stop(paste("response",response[is.na(nc)],"not found")) tot <- matrix(NA,ncol=nc,nrow=dim(dataframe)[1]) for(i in nc)if(is.matrix(dataframe[[i]])&&dim(dataframe[[i]])[2]==2){ tot[,i] <- dataframe[[i]][,1]+dataframe[[i]][,2] dataframe[[i]] <- dataframe[[i]][,1]} if(all(is.na(tot)))tot <- NULL if(!is.numeric(z <- as.matrix(dataframe[,nc,drop=FALSE]))) stop("response must be numeric") z <- list(response=list(y=z,nobs=NULL,times=NULL,nest=NULL,coordinates=NULL, censor=NULL,n=tot,wt=NULL,delta=NULL,units=NULL,type=NULL), ccov=NULL,tvcov=NULL) class(z) <- "repeated" class(z$response) <- "response" tobs <- dim(z$response$y)[1] nrcol <- dim(z$response$y)[2] if(is.null(type))z$response$type <- rep("unknown",nrcol) else if(length(type)!=nrcol)stop("a type must be supplied for each response") else { for(i in 1:length(type)) z$response$type[i] <- match.arg(type[i], c("nominal","ordinal","discrete","duration","continuous","unknown")) if(any(is.na(z$response$type))) z$response$type[is.na(z$response$type)] <- "unknown"} rna <- rep(TRUE,tobs) for(i in 1:nrcol)rna <- rna&!is.na(z$response$y[,i]) if(is.null(id)) z$response$nobs <- if(is.null(times)) rep(1,tobs) else tobs else { if(!is.character(id)||length(id)>1) stop("id must be the name of one variable") nc <- match(id,cn) if(is.na(nc))stop("id not found") id <- as.vector(dataframe[,nc]) if(is.character(id)||is.factor(id))id <- as.numeric(as.factor(id)) else if(any(diff(id)!=0&diff(id)!=1,na.rm=TRUE)) warning("id not consecutively numbered") nobs <- table(id) z$response$nobs <- as.vector(nobs[match(unique(id),names(nobs))])} if(any(z$response$nobs!=1)&&length(z$response$nobs)>1)for(i in unique(id)){ if(any(diff((1:tobs)[id==i])>1,na.rm=TRUE)) stop(paste("observations for individual",i,"not together in table"))} if(!is.null(nest)){ if(all(z$response$nobs==1)) stop("these are not repeated measurements - nest not valid") if(!is.character(nest)||length(nest)>1) stop("nest must be the name of one variable") nc <- match(nest,cn) if(is.na(nc))stop("nest not found") z$response$nest <- as.vector(dataframe[,nc]) if(is.character(z$response$nest)) z$response$nest <- as.numeric(as.factor(z$response$nest)) else if(!is.numeric(z$response$nest))stop("nest must be numeric") rna <- rna&!is.na(z$response$nest)} if(!is.null(times)){ if(!is.character(times)||length(times)>1) stop("times must be the name of one variable") nc <- match(times,cn) if(is.na(nc))stop("times not found") z$response$times <- as.vector(dataframe[,nc]) if(!is.numeric(z$response$times))stop("times must be numeric") rna <- rna&!is.na(z$response$times)} if(!is.null(times))for(i in unique(id)) if(!is.null(nest))for(j in unique(z$response$nest)){ if(any(diff(z$response$times[id==i&z$response$nest==j])<0,na.rm=TRUE)) stop(paste("negative time step for individual",i))} else if(any(diff(z$response$times[id==i])<0,na.rm=TRUE)) stop(paste("negative time step for individual",i)) if(!is.null(nest))for(i in unique(id)) if(any(diff(z$response$nest[id==i])!=0& diff(z$response$nest[id==i])!=1,na.rm=TRUE)) stop(paste("nest for individual",i,"not consecutively numbered")) if(!is.null(censor)){ if(!is.character(censor)||length(censor)!=nrcol) stop("censor must have one name per response variable") nc <- match(censor,cn) if(any(is.na(nc)))stop("censor",censor[is.na(nc)],"not found") z$response$censor <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$censor))stop("censor must be numeric") if(any(z$response$censor!=1&z$response$censor!=0& z$response$censor!=-1,na.rm=TRUE)) stop("censor indicator can only have values, -1, 0, 1") for(i in 1:nrcol)if(!all(is.na(z$response$censor[,i]))){ rna <- rna&!is.na(z$response$censor[,i]) if(z$response$type[i]=="unknown") z$response$type[i] <- "duration"}} if(!is.null(totals)){ if(!is.character(totals)||length(totals)!=nrcol) stop("totals must have one name per response variable") nc <- match(totals,cn) if(any(is.na(nc)))stop("totals",totals[is.na(nc)],"not found") z$response$n <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$n))stop("totals must be numeric") if(any(z$response$y<0|z$response$n1) stop("weights must be the name of one variable") nc <- match(weights,cn) if(is.na(nc))stop("weights not found") z$response$wt <- as.vector(dataframe[,nc]) if(!is.numeric(z$response$wt))stop("weights must be numeric") rna <- rna&!is.na(z$response$wt)} if(!is.null(coordinates)){ if(!is.character(coordinates)||(length(coordinates)!=2&& length(coordinates)!=3)) stop("coordinates must be the name of 2 or 3 variables") nc <- match(coordinates,cn) if(any(is.na(nc))) stop("coordinates",coordinates[is.na(nc)],"not found") z$response$coordinates <- as.matrix(dataframe[,nc,drop=FALSE]) if(!is.numeric(z$response$coordinates)) stop("coordinates must be numeric") for(i in 1:length(coordinates)) rna <- rna&!is.na(z$response$coordinates[,i])} # # find time-varying covariates # if(!is.null(tvcov)){ if(all(z$response$nobs==1)) stop("these are not repeated measurements - tvcov not valid") z$tvcov <- list(tvcov=NULL,nobs=z$response$nobs) class(z$tvcov) <- "tvcov" nc <- match(tvcov,cn) if(any(is.na(nc)))stop("tvcov",tvcov[is.na(nc)],"not found") z$tvcov$tvcov <- dataframe[,nc,drop=FALSE] for(i in 1:length(tvcov))rna <- rna&!is.na(z$tvcov$tvcov[,i]) # if no factor variables present, return a matrix anyway fac <- FALSE for(i in 1:dim(z$tvcov$tvcov)[2]) if(!is.vector(z$tvcov$tvcov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)z$tvcov$tvcov <- as.matrix(z$tvcov$tvcov)} # # find time-constant covariates # if(!is.null(ccov)){ z$ccov <- list(ccov=NULL) class(z$ccov) <- "tccov" nc <- match(ccov,cn) if(any(is.na(nc)))stop("ccov",ccov[is.na(nc)],"not found") z$ccov$ccov <- dataframe[,nc,drop=FALSE] for(i in unique(id))for(j in 1:length(ccov)) if(sum(!is.na(unique(z$ccov$ccov[id==i,j])))>1) stop(paste("ccov",ccov[j],"for individual",i,"not constant")) for(i in 1:length(ccov))rna <- rna&!is.na(z$ccov$ccov[,i]) j <- c(0,cumsum(z$response$nobs)[-length(z$response$nobs)])+1 z$ccov$ccov <- z$ccov$ccov[j,,drop=FALSE] # if no factor variables present, return a matrix anyway fac <- FALSE for(i in 1:dim(z$ccov$ccov)[2]) if(!is.vector(z$ccov$ccov[,i],mode="numeric")){ fac <- TRUE break} if(!fac)z$ccov$ccov <- as.matrix(z$ccov$ccov)} # # remove NAs # if(na.rm&&any(!rna)){ # remove NAs from variables associated with response z$response$y <- z$response$y[rna,,drop=FALSE] if(!is.null(z$response$times))z$response$times <- z$response$times[rna] if(!is.null(z$response$nest))z$response$nest <- z$response$nest[rna] if(!is.null(z$response$coordinates)) z$response$coordinates <- z$response$coordinates[rna,] if(!is.null(z$response$n))z$response$n <- z$response$n[rna,,drop=FALSE] if(!is.null(z$response$censor)){ z$response$censor <- z$response$censor[rna,,drop=FALSE] if(all(z$response$censor==1))z$response$censor <- NULL} if(!is.null(z$response$delta)&&length(z$response$delta)>1) z$response$delta <- z$response$delta[rna,,drop=FALSE] if(!is.null(z$tvcov))z$tvcov$tvcov <- z$tvcov$tvcov[rna,,drop=FALSE] # correct nobs tmp <- NULL j <- c(0,cumsum(z$response$nobs)) for(i in 1:length(z$response$nobs)){ tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]])) if(tmp[i]==0) warning(paste("Individual",i,"has no observations"))} z$response$nobs <- tmp[tmp>0] # remove NAs from ccov if(!is.null(z$ccov)){ z$ccov$ccov <- z$ccov$ccov[tmp>0,,drop=FALSE] for(i in 1: dim(z$ccov$ccov)[2]) if(length(unique(z$ccov$ccov[,i]))==1) warning(paste("covariate",colnames(z$ccov$ccov)[i],"has only one value\n"))} # remove NAs from tvcov if(!is.null(z$tvcov)){ z$tvcov$nobs <- z$response$nobs for(i in 1: dim(z$tvcov$tvcov)[2]) if(length(unique(z$tvcov$tvcov[,i]))==1) warning(paste("covariate",colnames(z$tvcov$tvcov)[i],"has only one value\n"))}} if(!na.rm&&any(!rna))z$NAs <- !rna # # if independent observations, reset nobs # if(all(z$response$nobs==1))z$response$nobs <- 1 z} ### methods for objects created by these functions ### print methods ### print.response <- function(x, nindmax=50, ...){ z <- x; rm(x) nobs <- nobs(z) nind <- length(nobs) cn <- colnames(z$y) tmp <- rbind(colnames(z$y),z$type) rntmp <- c(if(length(cn)>1)"Names:" else "Name: ","Type:") if(!is.null(z$units)){ tmp <- rbind(tmp,z$units) rntmp <- c(rntmp,"Units:")} rownames(tmp) <- rntmp colnames(tmp) <- rep("",length(cn)) print(tmp,quote=FALSE) if(any(nobs>1))cat("Number of individuals: ",nind,"\n") cat("Total number of observations: ",dim(z$y)[1],"\n") if(nind>1&&any(nobs>1)){ if(all(diff(nobs)==0)) cat("Number of observations per individual:",nobs[1],"\n") else { if(nind>nindmax)cat("Range of observations per individual: ", range(nobs),"\n") else cat("Number of observations per individual:\n", nobs,"\n")}} av <- rg <- NULL for(i in 1:dim(z$y)[2]){ y <- if(is.null(z$n)||all(is.na(z$n[,i])))z$y[,i] else z$y[,i]/z$n[,i] av <- c(av,if((z$type[i]=="nominal"&&(is.null(z$n)||all(is.na(z$n[,i])))) ||z$type[i]=="ordinal")NA else mean(y,na.rm=TRUE)) rg <- c(rg,range(y,na.rm=TRUE))} names(av) <- cn rg <- matrix(rg,nrow=2) dimnames(rg) <- list(c("lower","upper"),cn) if(!all(is.na(av))){ cat("Mean response:") if(dim(z$y)[2]>1){ cat("\n") print(av) cat("\n")} else cat(" ",av,"\n")} cat("Range of responses:") if(dim(z$y)[2]>1){ cat("\n") print(rg) cat("\n")} else cat(" ",rg,"\n") if(any(is.na(z$y))) cat("Number of NAs: ",sum(is.na(z$y)),"\n") if(!is.null(z$wt)) cat("Number of positive weights: ",sum(z$wt>0),"\n") if(!is.null(z$times)){ cat("Mean time: ",mean(z$times,na.rm=TRUE),"\n") cat("Range of times: ",range(z$times,na.rm=TRUE),"\n") if(nind>1&&is.null(z$nest)){ mn <- if(any(z$times<0,na.rm=TRUE))z$times[cumsum(c(1,nobs[1:(nind-1)]))] else 0 cat("Mean total time: ",mean(z$times[cumsum(nobs)]-mn,na.rm=TRUE),"\n") cat("Range of total times: ",range(z$times[cumsum(nobs)]-mn,na.rm=TRUE),"\n")}} if(!is.null(z$nest)) cat("Maximum number of clusters: ",max(z$nest),"\n") if(!is.null(z$censor)){ cens0 <- cens1 <- n0 <- n1 <- NULL for(i in 1:dim(z$y)[2]){ if(sum(z$censor[,i]==0,na.rm=TRUE)>0){ cens0 <- c(cens0,sum(z$censor[,i]==0,na.rm=TRUE)) n0 <- c(n0,cn[i])} if(sum(z$censor[,i]==-1,na.rm=TRUE)>0){ cens1 <- c(cens1,sum(z$censor[,i]==-1,na.rm=TRUE)) n1 <- c(n1,cn[i])}} names(cens0) <- n0 names(cens1) <- n1 if(!is.null(cens0)){ cat("Number of right-censored observations:") if(length(cens0)>1){ cat("\n") print(cens0) cat("\n")} else cat(" ",cens0," (",n0,")\n",sep="")} if(!is.null(cens1)){ cat("Number of left-censored observations:") if(length(cens1)>1){ cat("\n") print(cens1) cat("\n")} else cat(" ",cens1," (",n1,")\n",sep="")}} if(!is.null(z$delta)&&length(z$delta)==1) cat("Unit of measurement: ",z$delta,"\n") if(!is.null(z$description))for(i in 1:length(z$description)){ cat(names(z$description)[i],": ",sep="") cat(z$description[[i]],"\n")}} print.tccov <- function(x, ...){ z <- x; rm(x) if(is.function(z))print.default(unclass(z)) else { tmp <- matrix(colnames(z$ccov),nrow=1) rn <- "Names:" if(!is.null(z$units)){ tmp <- rbind(tmp,z$units) rn <- c(rn,"Units:")} dimnames(tmp) <- list(rn,rep("",dim(z$ccov)[2])) print(tmp,quote=FALSE) cat("Number of individuals: ",dim(z$ccov)[1],"\n") if(!is.null(z$description))for(i in 1:length(z$description)){ cat(names(z$description)[i],": ",sep="") cat(z$description[[i]],"\n")}}} print.tvcov <- function(x, nindmax=50, ...){ z <- x; rm(x) if(is.function(z))print.default(unclass(z)) else { tmp <- matrix(colnames(z$tvcov),nrow=1) rn <- "Names:" if(!is.null(z$units)){ tmp <- rbind(tmp,z$units) rn <- c(rn,"Units:")} dimnames(tmp) <- list(rn,rep("",dim(z$tvcov)[2])) print(tmp,quote=FALSE) cat("Number of individuals: ",length(nobs(z)),"\n") cat("Number of observations: ",sum(nobs(z)),"\n") if(length(nobs(z))>nindmax)cat("Range of observations per individual:", range(nobs(z)),"\n") else cat("Number of observations per individual:\n", nobs(z),"\n") if(!is.null(z$description))for(i in 1:length(z$description)){ cat(names(z$description)[i],": ",sep="") cat(z$description[[i]],"\n")}}} print.repeated <- function(x, nindmax=50, ...){ z <- x; rm(x) if(is.function(z))print.default(unclass(z)) else { cat("\nResponse variable:\n") print.response(z$response,nindmax=nindmax) if(!is.null(z$ccov)){ cat("\nInter-unit (time-constant) covariates:\n") print.tccov(z$ccov)} if(!is.null(z$tvcov)){ cat("\nIntra-unit (time-varying) covariates:\n") print.tvcov(z$tvcov,nindmax=nindmax)}}} ### plot methods ### plot.response <- function(x, name=NULL, nind=NULL, nest=1, ccov=NULL, add=FALSE, lty=NULL, pch=NULL, main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, ylab=NULL, ...){ if(is.null(name)){ if(dim(x$y)[2]>1)stop("please specify which variable to plot") name <- colnames(x$y)} else if(length(name)>1)stop("only one variable can be plotted") col <- match(name,colnames(x$y)) if(is.na(col))stop(paste(name,"not found")) if(x$type[col]=="ordinal"){ # special case: ordinal response #return(plot.ordinal(z=x,ccov=ccov,main=main,xlab=xlab,ylab=ylab, #xlim=xlim,ylim=ylim,lty=lty,add=add,...)) stop(paste("email Bruce to email Jim for plot.ordinal() implementation")) } if(is.null(ylab))ylab <- name if(is.null(x$times)){ # # when no times, set up for index plot # if(all(nobs(x)==1)){ x$times <- 1:length(nobs(x)) # set to value different from 1 so not time series x$nobs <- 5} else x$times <- sequence(nobs(x)) if(is.null(xlab)) xlab <- "Index number"} else if(is.null(xlab)) xlab <- "Time" if(is.null(ylim))ylim <- range(x$y[,col],na.rm=TRUE) if(is.null(xlim))xlim <- range(x$times,na.rm=TRUE) tnest <- if(!is.null(x$nest)) x$nest else 1 # # initialize # nm <- covind(x) j <- 1 lt <- 0 # # if no individuals chosen, plot them all # if(is.null(nind))nind <- 1:length(nobs(x)) # # if binomial, plot proportions # y <- if(is.null(x$n)||all(is.na(x$n[,col])))x$y[,col] else x$y[,col]/x$n[,col] if(!is.null(lty)){ # # set up line types # if(length(lty)==1)lty <- rep(lty,length(nind)) else if(length(lty)!=length(nind)) stop("lty must have one value for each item in nind")} if(!is.null(pch)){ # # set up symbol types # if(length(pch)==1)pch <- rep(pch,length(nind)) else if(length(pch)!=length(nind)) stop("pch must have one value for each item in nind")} for(i in 1:length(nobs(x)))if(any(i==nind))for(k in nest){ lt <- if(is.null(lty))lt%%6+1 else lty[j] if(!add&&j==1)plot(x$times[nm==i&k==tnest],y[nm==i&k==tnest],lty=lt, type="l",ylim=ylim,xlim=xlim,main=main,ylab=ylab,xlab=xlab,...) else lines(x$times[nm==i&k==tnest],y[nm==i&k==tnest],lty=lt) if(!is.null(pch))points(x$times[nm==i&k==tnest], y[nm==i&k==tnest],pch=pch[j]) j <- j+1}} plot.repeated <- function(x, name=NULL, nind=NULL, nest=1, ccov=NULL, add=FALSE, lty=NULL, pch=NULL, main=NULL, ylim=NULL, xlim=NULL, xlab=NULL, ylab=NULL, ...){ if(is.null(name)){ if(dim(x$response$y)[2]>1)stop("please specify which variable to plot") name <- colnames(x$response$y)} else if(length(name)>1)stop("only one variable name can be supplied") col <- match(name,colnames(x$response$y)) if(is.na(col)){ col <- match(name,colnames(x$tvcov$tvcov)) if(is.na(col))stop(paste(name,"not found")) variable <- "tvc"} else variable <- "response" # # check individuals to plot # if no individuals chosen, plot them all # uncov <- NULL if(!is.null(nind)&&!is.null(ccov)) stop("only one of nind and ccov can be specified") if(is.null(nind)&&is.null(ccov))nind <- 1:length(nobs(x)) else if(!is.null(ccov)&&x$response$type[col]!="ordinal"){ if(is.numeric(ccov)){ if(length(ccov)!=dim(x$ccov$ccov)[2]) stop("a covariate value must be given for each covariate") tccov <- x$ccov$ccov if(is.data.frame(tccov))for(i in 1:length(ccov)){ if(is.factor(tccov[[i]])) tccov[[i]] <- as.numeric(tccov[[i]])} tccov <- as.matrix(tccov) nind <- NULL for(i in 1:length(nobs(x))) if(all(ccov==tccov[i,]))nind <- c(nind,i)} else if(is.character(ccov)){ if(is.null(x$ccov$ccov))stop("no covariates found") if(length(ccov)>1) stop("only one variable name can be given in ccov") col2 <- match(ccov,colnames(x$ccov$ccov)) if(is.na(col2))stop(paste("covariate",ccov,"not found")) uncov <- unique(x$ccov$ccov[,col2]) if(is.factor(uncov))uncov <- as.character(uncov) if(length(uncov)>6) stop(paste(ccov,"has too many distinct values to plot")) if(length(uncov)==1)uncov <- NULL} else stop("ccov must be a vector of covariate values or a covariate name")} if(variable=="response"){ # special case: ordinal response if(x$response$type[col]=="ordinal") #plot.ordinal(z=x,ccov=ccov,main=main,xlab=xlab,ylab=ylab, # xlim=xlim,ylim=ylim,lty=lty,add=add,...) stop(paste("email Bruce to email Jim for plot.ordinal() implementation")) else { if(is.null(uncov))plot.response(x$response,name=name,nind=nind, nest=nest,add=add,lty=lty,pch=pch,main=main, ylim=ylim,xlim=xlim,xlab=xlab,ylab=ylab,...) else { mfrow <- if(length(uncov)==2)c(2,1) else if(length(uncov)<5)c(2,2) else c(3,2) oldpar <- par(mfrow=mfrow,no.readonly=TRUE) for(i in uncov){ nind <- NULL main <- paste(ccov,"=",i) for(j in 1:length(nobs(x))) if(i==x$ccov$ccov[j,col2]) nind <- c(nind,j) plot.response(x$response,name=name,nind=nind, nest=nest,add=add,lty=lty,pch=pch,main=main, ylim=ylim,xlim=xlim,xlab=xlab,ylab=ylab,...)} par(oldpar)}}} else if(variable=="tvc"){ # set up covariate as a "response" if(is.null(ylab))ylab <- colnames(x$tvcov$tvcov)[col] zz <- list(times=x$response$times,y=x$tvcov$tvcov[,col,drop=FALSE], nobs=x$tvcov$nobs,n=NULL,type="unknown") class(zz) <- "response" if(is.null(uncov))plot.response(zz,name=name,nind=nind,nest=nest, add=add,lty=lty,pch=pch,main=main,ylim=ylim,xlim=xlim, xlab=xlab,ylab=ylab,...) else { mfrow <- if(length(uncov)==2)c(2,1) else if(length(uncov)<5)c(2,2) else c(3,2) oldpar <- par(mfrow=mfrow,no.readonly=TRUE) for(i in uncov){ nind <- NULL main <- paste(ccov,"=",i) for(j in 1:length(nobs(x))) if(i==x$ccov$ccov[j,col2]) nind <- c(nind,j) plot.response(zz,name=name,nind=nind,nest=nest,add=add, lty=lty,pch=pch,main=main,ylim=ylim,xlim=xlim, xlab=xlab,ylab=ylab,...)} par(oldpar)}}} ### methods to find the response variable ### response <- function(z, ...) UseMethod("response") response.response <- function(z, nind=NULL, names=NULL, ...){ if(is.null(nind))nind <- 1:dim(z$y)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") if(is.null(names))col <- 1:dim(z$y)[2] else { col <- match(names,colnames(z$y)) if(any(is.na(col)))stop(paste(names[is.na(col)],"not found"))} cn <- tmp <- NULL for(i in col){ if(!is.null(z$n)&&!any(is.na(z$n[,i]))){ # binomial response tmp <- cbind(tmp,z$y[,i],z$n[,i]-z$y[,i]) cn <- c(cn,colnames(z$y)[i],paste("n-",colnames(z$y)[i],sep=""))} else if(!is.null(z$censor)&&!any(is.na(z$censor[,i]))){ # censored response with indicator tmp <- cbind(tmp,z$y[,i],z$censor[,i]) cn <- c(cn,colnames(z$y)[i],paste(colnames(z$y)[i],".cens",sep=""))} else { tmp <- cbind(tmp,z$y[,i]) cn <- c(cn,colnames(z$y)[i])}} colnames(tmp) <- cn tmp[nind,]} response.repeated <- function(z, nind=NULL, names=NULL, ...){ if(is.null(nind))nind <- 1:dim(z$response$y)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") if(is.null(names))col <- 1:dim(z$response$y)[2] else { col <- match(names,colnames(z$response$y)) if(any(is.na(col)))stop(paste(names[is.na(col)],"not found"))} cn <- tmp <- NULL for(i in col){ if(!is.null(z$response$n)&&!any(is.na(z$response$n[,i]))){ # binomial response tmp <- cbind(tmp,z$response$y[,i],z$response$n[,i]-z$response$y[,i]) cn <- c(cn,colnames(z$response$y)[i],paste("n-",colnames(z$response$y)[i],sep=""))} else if(!is.null(z$response$censor)&&!any(is.na(z$response$censor[,i]))){ # censored response with indicator tmp <- cbind(tmp,z$response$y[,i],z$response$censor[,i]) cn <- c(cn,colnames(z$response$y)[i],paste(colnames(z$response$y)[i],".cens",sep=""))} else { tmp <- cbind(tmp,z$response$y[,i]) cn <- c(cn,colnames(z$response$y)[i])}} colnames(tmp) <- cn tmp[nind,]} ### methods for indexing of time-constant covariates for individuals ### covind <- function(z, ...) UseMethod("covind") covind.default <- function(z, ...) rep(1:length(nobs(z)),nobs(z)) ### methods to find numbers of observations/individual ### nobs <- function(z, ...) UseMethod("nobs") nobs.default <- function(z, ...) { if(is.null(z$response)||is.null(z$response$nobs))return(NULL) if(length(z$response$nobs)>1||z$response$nobs>1)z$response$nobs else rep(1,dim(z$response$y)[1])} nobs.response <- function(z, ...) { if(length(z$nobs)>1||z$nobs>1)z$nobs else rep(1,length(z$y))} nobs.tvcov <- function(z, ...) z$nobs nobs.data.frame <- function(z, ...) rep(1,dim(z)[1]) ### methods to find times ### times <- function(z, ...) UseMethod("times") times.default <- function(z, nind=NULL, ...){ if(is.null(nind)||is.null(z$response$times))return(z$response$times) else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") z$response$times[nind]} times.response <- function(z, nind=NULL, ...){ if(is.null(nind)||is.null(z$times))return(z$times) else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") z$times[nind]} ### methods to find unit of measurement/Jacobian ### delta <- function(z, ...) UseMethod("delta") delta.response <- function(z, nind=NULL, names=NULL, ...){ # # find individuals # if(is.null(z$delta))return(NULL) if(is.null(nind))nind <- 1:dim(z$y)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") # # find variables # col <- if(is.null(names))1:dim(z$y)[2] else match(names,colnames(z$y)) if(any(is.na(col)))stop(paste(names[is.na(col)],"not found")) z$delta[nind,col]} delta.repeated <- function(z, nind=NULL, names=NULL, ...){ # # find individuals # if(is.null(z$response$delta))return(NULL) if(is.null(nind))nind <- 1:dim(z$response$y)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") # # find variables # col <- if(is.null(names))1:dim(z$response$y)[2] else match(names,colnames(z$response$y)) print(col) if(any(is.na(col)))stop(paste(names[is.na(col)],"not found")) z$response$delta[nind,col]} ### methods to find weights ### #weights <- function(object, ...) UseMethod("weights") weights.response <- function(object, nind=NULL, ...){ if(is.null(nind)||is.null(object$wt))return(object$wt) else if(length(nind)>length(nobs(object))||any(nind>length(nobs(object)))) stop("Individual not found") else nind <- !is.na(match(covind(object),nind)) if(all(!nind))stop("No such individuals") object$wt[nind]} weights.repeated <- function(object, nind=NULL, ...){ if(is.null(nind)||is.null(object$response$wt))return(object$response$wt) else if(length(nind)>length(nobs(object))||any(nind>length(nobs(object)))) stop("Individual not found") else nind <- !is.na(match(covind(object),nind)) if(all(!nind))stop("No such individuals") object$response$wt[nind]} ### methods to find nesting indicators ### nesting <- function(z, ...) UseMethod("nesting") nesting.response <- function(z, nind=NULL, ...){ if(is.null(nind))nind <- 1:length(z$y) else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") # # if nest is NULL, return individual index otherwise both # if(length(nobs(z))==1||all(nobs(z)==1))return(NULL) else if(is.null(z$nest))return(covind(z)[nind]) else { z <- cbind(covind(z),z$nest) colnames(z) <- c("Individual","Cluster") return(z)[nind,,drop=FALSE]}} nesting.repeated <- function(z, nind=NULL, ...){ if(is.null(nind))nind <- 1:dim(z$response$y)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") # # if nest is NULL, return individual index otherwise both # if(length(nobs(z))==1||all(nobs(z)==1))return(NULL) else if(is.null(z$response$nest))return(covind(z)[nind]) else { z <- cbind(covind(z),z$response$nest) colnames(z) <- c("Individual","Cluster") return(z[nind,,drop=FALSE])}} ### methods to find covariates ### covariates <- function(z, ...) UseMethod("covariates") covariates.tccov <- function(z, nind=NULL, names=NULL, expand=FALSE, ...){ if(is.null(nind))nind <- 1:dim(z$ccov)[1] else if(length(nind)>dim(z$ccov)[1]||any(nind>dim(z$ccov)[1])||any(nind<1)) stop("Individual not found") if(is.null(names))return(z$ccov[nind,]) else { col <- match(names,colnames(z$ccov)) if(any(is.na(col))) stop(paste("covariate(s)",names[is.na(col)],"not found")) return(z$ccov[nind,col])}} covariates.tvcov <- function(z, nind=NULL, names=NULL, expand=FALSE, ...){ if(is.null(nind))nind <- 1:dim(z$tvcov)[1] else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nind <- !is.na(match(covind(z),nind)) if(all(!nind))stop("No such individuals") if(is.null(names))return(z$tvcov[nind,]) else { col <- match(names,colnames(z$tvcov)) if(any(is.na(col))) stop(paste("covariate(s)",names[is.na(col)],"not found")) return(z$tvcov[nind,col])}} covariates.repeated <- function(z, nind=NULL, names=NULL, expand=FALSE, ...){ if(expand&&!is.null(nind))stop("can only expand for all individuals") ind <- covind(z) if(is.null(nind)){ nindv <- 1:dim(z$response$y)[1] nind <- if(expand)ind else 1:length(nobs(z))} else if(length(nind)>length(nobs(z))||any(nind>length(nobs(z)))) stop("Individual not found") else nindv <- !is.na(match(ind,nind)) if(all(!nindv))stop("No such individuals") if(is.null(names)){ if(is.null(z$tvcov$tvcov))return(z$ccov$ccov[nind,]) else if(is.null(z$ccov$ccov))return(z$tvcov$tvcov[nindv,]) else { if(expand)return(cbind(z$ccov$ccov[nind,], z$tvcov$tvcov[nindv,])) else return(list(ccov=z$ccov$ccov[nind,], tvcov=z$tvcov$tvcov[nindv,]))}} else { mat1 <- match(names,colnames(z$ccov$ccov)) mat2 <- match(names,colnames(z$tvcov$tvcov)) if(any(is.na(mat1)&is.na(mat2))) stop(paste("covariate(s)",names[is.na(mat1)&is.na(mat2)],"not found")) if(all(is.na(mat1)))return(z$tvcov$tvcov[nindv,mat2]) else if(all(is.na(mat2)))return(z$ccov$ccov[nind,mat1]) else { if(expand)return(cbind(z$ccov$ccov[nind,mat1], z$tvcov$tvcov[nindv,mat2])) else return(list(ccov=z$ccov$ccov[nind,mat1], tvcov=z$tvcov$tvcov[nindv,mat2]))}}} ### methods to find names ### names.response <- function(x, ...) colnames(x$y) names.tccov <- function(x, ...) colnames(x$ccov) names.tvcov <- function(x, ...) colnames(x$tvcov) names.repeated <- function(x, ...) list(response=colnames(x$response$y),ccov=colnames(x$ccov$ccov), tvcov=colnames(x$tvcov$tvcov)) ### methods to find units of measurements ### units <- function(x, ...) UseMethod("units") units.default <- function(x, ...) x$units units.repeated <- function(x, ...) list(response=units(x$response),ccov=units(x$ccov), tvcov=units(x$tvcov)) ### methods to find description of variables ### description <- function(z, ...) UseMethod("description") description.default <- function(z, ...) z$description description.repeated <- function(z, ...) list(response=description(z$response),ccov=description(z$ccov), tvcov=description(z$tvcov)) ### methods to find response type(s) ### resptype <- function(z, ...) UseMethod("resptype") resptype.response <- function(z, ...) z$type resptype.repeated <- function(z, ...) z$response$type ### methods to find formula used in tccov ### formula.tccov <- function(x, ...) x$linear formula.repeated <- function(x, ...) x$ccov$linear ### methods to transform response, times, or covariates ### transform.response <- function(`_data`, times=NULL, units=NULL, ...){ z <- `_data`; rm(`_data`) if(is.call(substitute(times)))times <- substitute(times) tran <- as.list(substitute(list(...)))[-1] if(!is.null(tran)){ # # transform response # if(is.null(z$delta)) z$delta <- matrix(1,nrow=dim(z$y)[1],ncol=dim(z$y)[2]) else if(length(delta)==1) z$delta <- matrix(z$delta,nrow=dim(z$y)[1],ncol=dim(z$y)[2]) cn <- colnames(z$y) cn2 <- names(tran) col <- match(cn2,cn) col2 <- NULL if(!is.null(units)){ if(length(units)!=length(tran)) stop(paste(length(tran),"units required")) if(!is.character(units)) stop("units must be a character vector")} for(i in tran){ tmp <- NULL for(j in 1:length(cn)){ if(length(grep(cn[j],as.character(i)))>0){ tmp <- j break}} if(is.null(tmp))stop("variable to transform not found") col2 <- c(col2,tmp)} j <- 0 for(i in tran){ j <- j+1 if(z$type[col2[j]]!="continuous"&&z$type[col2[j]]!="duration"&&z$type[col2[j]]!="unknown") stop(paste("transformations do not make sense with",z$type[col2[j]],"responses")) # transform tran <- eval(deriv(i,cn[col2[j]]),as.data.frame(z$y),NULL) # calculate Jacobian jacob <- as.vector(abs(attr(tran,"gradient"))) if(any(is.na(jacob)&!is.na(tran))) stop("NAs in jacobian - invalid transformation") if(any(abs(jacob)==Inf,na.rm=TRUE)) stop("infinite jacobian - invalid transformation") if(any(jacob<=0,na.rm=TRUE)) stop("nonpositive value in jacobian - invalid transformation") # store in the response object, updating delta, units, type if(is.na(col[j])){ if(any(is.na(tran)&!is.na(z$y[,col2[j]]),na.rm=TRUE)) stop("NAs created by transformation") tran <- as.matrix(tran) colnames(tran) <- cn2[j] z$y <- cbind(z$y,tran) z$delta <- if(all(is.na(z$delta[,col2[j]]))) cbind(z$delta,jacob) else cbind(z$delta,z$delta[,col2[j]]*jacob) colnames(z$delta) <- colnames(z$y) if(!is.null(z$n)){ z$n <- cbind(z$n,rep(NA,dim(z$n)[1])) colnames(z$n) <- colnames(z$y)} if(!is.null(z$censor)){ z$censor <- cbind(z$censor,z$censor[,col2[j]]) colnames(z$censor) <- colnames(z$y)} z$type <- c(z$type,z$type[col2[j]]) names(z$type) <- colnames(z$y) if(!is.null(z$units)){ z$units <- c(z$units,if(is.null(units))NA else units[j]) names(z$units) <- colnames(z$y)}} else { if(any(is.na(tran)&!is.na(z$y[,col[j]]),na.rm=TRUE)) stop("NAs created by transformation") z$y[,col[j]] <- as.vector(tran) z$delta[,col[j]] <- if(all(is.na(z$delta[,col[j]]))) jacob else z$delta[,col[j]]*jacob if(!is.null(z$units)) z$units[col[j]] <- if(is.null(units))NA else units[j]}}} if(!is.null(z$units)&&all(is.na(z$units)))z$units <- NULL if(!is.null(times)){ # # transform times # z$times <- eval(times,z,NULL) for(i in 1:length(nobs(z)))if(any(diff(z$times[covind(z)==i])<0)) stop("transformation produces negative time steps")} z} transform.repeated <- function(`_data`, times=NULL, ...){ z <- `_data`; rm(`_data`) if(is.call(substitute(times)))times <- substitute(times) z$response <- transform.response(z$response,times,...) z} transform.tccov <- function(`_data`, ...){ z <- `_data`; rm(`_data`) isf <- is.data.frame(z$ccov) if(!isf)z$ccov <- as.data.frame(z$ccov) # # find transformations # e <- eval(substitute(list(...)),z$ccov,NULL) tags <- names(e) for(i in 1:length(e))if(all(is.na(e[[i]]))) stop(paste(tags[i],"defines an invalid tranformation\n or attempts to transform a factor variable")) # # find covariates to transform # inx <- match(tags,colnames(z$ccov)) matched <- !is.na(inx) if(any(matched))z$ccov[inx[matched]] <- e[matched] if(!all(matched))z$ccov <- data.frame(z$ccov,e[!matched]) if(!isf)z$ccov <- as.matrix(z$ccov) z} transform.tvcov <- function(`_data`, ...){ z <- `_data`; rm(`_data`) isf <- is.data.frame(z$tvcov) if(!isf)z$tvcov <- as.data.frame(z$tvcov) # # find transformations # e <- eval(substitute(list(...)),z$tvcov,NULL) tags <- names(e) for(i in 1:length(e))if(all(is.na(e[[i]]))) stop(paste(tags[i],"defines an invalid tranformation\n or attempts to transform a factor variable")) # # find covariates to transform # inx <- match(tags,colnames(z$tvcov)) matched <- !is.na(inx) if(any(matched))z$tvcov[inx[matched]] <- e[matched] if(!all(matched))z$tvcov <- data.frame(z$tvcov,e[!matched]) if(!isf)z$tvcov <- as.matrix(z$tvcov) z} ### as.data.frame methods ### as.data.frame <- function(x, ...) UseMethod("as.data.frame") as.data.frame.response <- function(x,row.names=NULL,optional=FALSE, ...){ tmp <- data.frame(x$y) if(!is.null(x$n))for(i in 1:dim(x$y)[2])if(any(!is.na(x$n[,i]))) tmp[[i]] <- I(cbind(x$y[,i],x$n[,i]-x$y[,i])) if(!is.null(x$censor))for(i in 1:dim(x$y)[2]) if(any(!is.na(x$censor[,i]))) tmp[[i]] <- I(cbind(x$y[,i],x$censor[,i])) cn <- colnames(x$y) if(length(x$nobs)!=1){ cn <- c(cn,"individuals") tmp <- data.frame(tmp,as.factor(rep(1:length(x$nobs),x$nobs)))} if(!is.null(x$nest)){ cn <- c(cn,"nesting") tmp <- data.frame(tmp,as.factor(x$nest))} if(!is.null(x$times)){ cn <- c(cn,"times") tmp <- data.frame(tmp,x$times)} colnames(tmp) <- cn data.frame(tmp)} as.data.frame.tccov <- function(x,row.names=NULL,optional=FALSE, ...) as.data.frame(x$ccov) as.data.frame.tvcov <- function(x,row.names=NULL,optional=FALSE, ...) as.data.frame(x$tvcov) as.data.frame.repeated <- function(x,row.names=NULL,optional=FALSE, ...){ tmp <- data.frame(x$response$y) if(!is.null(x$response$n))for(i in 1:dim(x$response$y)[2]) if(any(!is.na(x$response$n[,i]))) tmp[[i]] <- I(cbind(x$response$y[,i],x$response$n[,i]-x$response$y[,i])) if(!is.null(x$response$censor))for(i in 1:dim(x$response$y)[2]) if(any(!is.na(x$response$censor[,i]))) tmp[[i]] <- I(cbind(x$response$y[,i],x$response$censor[,i])) cn <- colnames(x$response$y) if(!(length(x$response$nobs)==1)){ cn <- c(cn,"individuals") tmp <- data.frame(tmp,as.factor(rep(1:length(x$response$nobs), x$response$nobs)))} if(!is.null(x$response$nest)){ cn <- c(cn,"nesting") tmp <- data.frame(tmp,as.factor(x$response$nest))} if(!is.null(x$response$times)){ cn <- c(cn,"times") tmp <- data.frame(tmp,x$response$times)} tmp <- data.frame(tmp) colnames(tmp) <- cn if(!is.null(x$ccov$ccov)) tmp <- data.frame(tmp,x$ccov$ccov[covind(x),,drop=FALSE]) if(!is.null(x$tvcov$tvcov)) tmp <- data.frame(tmp,x$tvcov$tvcov) tmp} ### as.matrix methods ### as.matrix <- function(x, ...) UseMethod("as.matrix") as.matrix.response <- function(x, ...){ tmp <- x$y cn <- colnames(x$y) if(!is.null(x$times)){ cn <- c(cn,"times") tmp <- cbind(tmp,x$times)} colnames(tmp) <- cn tmp} as.matrix.tccov <- function(x, ...) as.matrix(x$ccov) as.matrix.tvcov <- function(x, ...) as.matrix(x$tvcov) as.matrix.repeated <- function(x, ...){ tmp <- x$response$y cn <- colnames(x$response$y) if(!is.null(x$response$times)){ cn <- c(cn,"times") tmp <- cbind(tmp,x$response$times)} colnames(tmp) <- cn if(!is.null(x$ccov$ccov)) tmp <- cbind(tmp,x$ccov$ccov[covind(x),,drop=FALSE]) if(!is.null(x$tvcov$tvcov)) tmp <- cbind(tmp,x$tvcov$tvcov) tmp} rmutil/NEWS.md0000644000176200001440000014462414326276543012713 0ustar liggesusers------------------------------------------------------------------------------ version 1.1.10 ------------------------------------------------------------------------------ * fix clang15 warnings * fix latex escape note * fix version info from new.md note ------------------------------------------------------------------------------ version 1.1.9 ------------------------------------------------------------------------------ * reset license from GPL-2 to the original GPL (>=2) ------------------------------------------------------------------------------ version 1.1.8 ------------------------------------------------------------------------------ * fixed typo in C code causing i386 error in rmutil::int() ------------------------------------------------------------------------------ version 1.1.7 ------------------------------------------------------------------------------ * added Author information of TOMS614/INTHP * fixed void / SEXP registration issue (LTO / valgrind additional issues) * fixed 0-sized array warning * Fixed call_R Issue https://github.com/swihart/rmutil/issues/11 * Updated links to https: where appropriate ------------------------------------------------------------------------------ version 1.1.6 ------------------------------------------------------------------------------ * Fixed call_R Issue https://github.com/swihart/rmutil/issues/11 * Updated links to https: where appropriate ------------------------------------------------------------------------------ version 1.1.5 ------------------------------------------------------------------------------ * Fixed Issue https://github.com/swihart/rmutil/issues/10 -- updated BetaBinomial help documentation to relate `m` and `s` to the standard/traditional `alpha` and `beta` parameterization. Thanks to @sjenkins20 on github for the suggestion! * Fixed Issue https://github.com/swihart/rmutil/issues/9 -- Condition has length > 1 and only the first element will be used warning in `int`. Thank you @ellessenne on github for chasing this one down. ------------------------------------------------------------------------------ version 1.1.4 ------------------------------------------------------------------------------ * In response to K. Hornik email regarding the stringsAsFactors=FALSE default in upcoming R versions, I did the following 3 edits: * add `stringsAsFactors=TRUE` to the v <- data.frame() call in the example of `tvctomat.Rd` * add `stringsAsFactors=TRUE` to the data.frame() call in the function `tvctomat()` (around line 1160 in `objectrm.r`): (`oldtvcov$tvcov <- data.frame(oldtvcov$tvcov,tvcv, stringsAsFactors = TRUE)` * turn `tvcv <- as.data.frame(as.vector(t(as.matrix(tvcov))))` into `tvcv <- data.frame(as.character(as.vector(t(as.matrix(tvcov)))),stringsAsFactors=TRUE)` in the function `tvctomat()` (around line 937 of `objectrm.r`) * Issue https://github.com/swihart/rmutil/issues/8. ------------------------------------------------------------------------------ version 1.1.3 ------------------------------------------------------------------------------ * Implemented better fixes for https://github.com/swihart/rmutil/issues/5 (Thanks @hennerw!) where values below the support will give 0 and values above support give 1 for pbetabinom and dbetabinom as the last fix in 1.1.2 could give erroneous results. Let me know if other functions need this functionality -- I'd be happy to update. * Removed local generic `weights` as per personal communication from K Hornik ------------------------------------------------------------------------------ version 1.1.2 ------------------------------------------------------------------------------ * Implemented https://github.com/swihart/rmutil/issues/5 (Thanks @hennerw!) where values below the support will give 0 and values above support give 1 for pbetabinom and dbetabinom. Let me know if other functions need this functionality -- I'd be happy to update. ------------------------------------------------------------------------------ version 1.1.1 ------------------------------------------------------------------------------ * fixed an `_R_CHECK_LENGTH_1_CONDITION_=true` error * fixed `_R_S3_METHOD_LOOKUP_BASEENV_AFTER_GLOBALENV_=true` errors. * changed Example in DataMethods.Rd to `glm(y~x+z, data=as.data.frame(reps))` ------------------------------------------------------------------------------ version 1.1.0 ------------------------------------------------------------------------------ Major changes Passed CRAN checks and is back on CRAN. Please see github page https://github.com/swihart/rmutil to see the changes required to pass CRAN checks. I'll try to document the changes henceforth here and at https://github.com/swihart/rmutil/issues. ------------------------------------------------------------------------------ version 1.0 ------------------------------------------------------------------------------ 30.11.10 (growth) elliptic: twins option added for model using covfn with covariance matrix diagonal constant 28.11.10 elliptic: added an error check when covfn used 15.2.10 (rmutil) changed s<0 to s<=0 in qlaplace & rlaplace (thanks to Peter Ehlers) 18.11.09 (repeated) removed a redundant line in gausscop.r that now produced an error (thanks to Patrick Lindsey) 7.4.09 removed extra } in biv.binom.Rd (thanks to Christopher Marcum) 20.10.08 discrete q functions: changed trunc to round (thanks to Frederic Gosselin) 3.7.08 (gnlm) fit.dist: corrected check for negative values with Laplace, Cauchy, and Student t plus error in counts (f -> ni) for Laplace (thanks to Michael Anyadike-Danes) 24.10.07 fnenvir: changed way "=" is handled in gsub() because of error since R2.5.0 8.10.07 (event, gnlm, growth, repeated) changed typsiz to typsize in nlm() throughout 11.7.07 romberg.c: added missing R.h (thanks to Olivia Lau) 8.2.07 print out name of response variable in elliptic, bnlr, gnlr, gnlr3, gnlmm, gnlmm3, and fmr (thanks to Patrick Lindsey) qsimplex: corrected search interval updates 27.9.06 qhjorth, qinvgauss, qginvgauss, qboxcox: changed lower limit of search from 0.001 to .Machine$double.xmin (thanks to Franco Mendolia) 8.12.05 (rmutil, repeated, event) minor modifications to Fortran for F95 compatibility (thanks to Jan de Leeuw) 22.11.05 finterp, objectrm: added na.action=NULL in calls to model.frame (default is na.omit !!!!!) (thanks to Patrick Lindsey) 30.9.05 elliptic: corrected calculation of number of parameters for builtin logistic function (thanks to Tom Van Dooren) 1.8.05 qbetabinom: changed trunc() to round() (thanks to Elias Krainski) mprofile, iprofile: added check that times are available (thanks to Patrick Lindsey) 6.7.05 ksurvb, ksurvg, kcountb: added break; after default: in C code to satisfy some compilers (thanks to Patrick Lindsey) 30.6.05 finterp: correction for change in functioning of match() gnlm family: added coef.gnlm() and vcov.gnlm() (thanks to Bendix Carstensen) 25.4.05 (stable) rstable: eliminate production of NAs (thanks to Zhu Wang) 26.1.05 finterp: fixed a bug when >= or <= is used in a formula (thanks to Eliot McIntire) 1.11.04 gnlmm3: generalized nonlinear mixed models for three-parameter distributions 28.9.04 catmiss: removed codes() from example in help (thanks to Kjetil Brinchmann) 21.9.04 finterp: fixed if test to remove occasional warning (thanks to Ken Knoblauch) 17.9.04 gnlmix: removed erroneous printing that distribution is censored for binomial (thanks to Ken Knoblauch) 28.7.04 gnlmix, hnlmix: fixed printing of results when nonlinear function contains a linear part (thanks to Ken Knoblauch) 2.7.04 tvctomat: fixed warning message on removing tm (thanks to Patrick Lindsey) 1.6.04 glmm: changed print.summary.glmm to work under R1.9 (thanks to Spencer Graves) 5.4.04 fnenvir: fixed obscure error when linear used in a function (thanks to Ken Knoblauch) help: corrected truncation of usage formula for certain functions (thanks to Patrick Lindsey) 9.1.04 fitdist: fixed typo that stopped geometric distribution from working 6.1.04 ordglm: changed tapply to capply because of change to former (thanks to Andrew Criswell) 9.12.03 corgram: start abscissa at 0 for PACF fnenvir: fixed grep for checking redundant parameters bnlr, fmr, gnlmm, gnlr, gnlr3, int, nlr, nordr, read.list, stable.mode: fixed if() on vector 14.11.03 readdna, survkit, glmm, gnlmm, objectrm, readrm: removed obsolete codes() function (thanks to Ken Knoblauch) carma: give error when ccov=~1 used (thanks to Patrick Lindsey) 21.8.03 elliptic: corrected print function when the dispersion function depends on the location function (thanks to Gabrielle Kelly) 31.7.03 hnlmix: corrected options for dnbinom (thanks to Jagat Sheth) 30.6.03 dftorep: corrected check for ordered times to allow for two levels of nesting 25.5.03 ordglm: added a data argument (thanks to Kosuke Imai) 13.5.03 ordglm: corrected order of printing of standard errors (thanks to Kosuke Imai) 25.4.03 gnlr, gnlr3, gnlmm, fmr, nordr: changed test for environment because the value returned by parent.frame() has a class in R1.7 22.4.03 cphidden: a function to locate a changepoint in continuous time using a two-state hidden Markov model 9.4.03 biv.binom: corrected degrees of freedom printed (thanks to Goran Brostrom) 12.2.03 restovec: fixed handling of delta when the response is a list 16.1.03 kalsurv: fixed typo in print.kalsurv (thanks to Anthony Gichangi) 4.12.02 int: changed eps 2.12.02 fit.dist: added Laplace distribution 1.12.02 glmm: added error message if (codes of) nesting variable not consecutively numbered (thanks to Renaud Lancelot) 27.11.02 fit.dist: changed Weibull parametrisation so that mu refers to y and not to y^alpha 22.11.02 fit.dist: added Cauchy and Student t distributions use (log) density functions instead of writing formulae 18.11.02 fit.dist: added beta-binomial distribution 16.11.02 fit.dist: corrected error in calculation of log likelihood when censor=T 14.11.02 fit.dist: corrected error in calculation of fitted values for zeta distribution 31.10.02 int2: added default limits (thanks to Patrick Lindsey) 8.9.02 (repeated) gar: corrected recursive fitted values when binomial (thanks to Patrick Lindsey) 4.9.02 gausscop: exponential distribution now works (thanks to Patrick Lindsey) 30.8.02 restovec: modified checks for nesting in lists and allow nesting to be supplied separately when a list is given (thanks to Patrick Lindsey) gausscop: for positive-valued distributions, returned predicted values without transforming by log link function (thanks to Patrick Lindsey) 18.7.02 ehr: addition checks of data supplied to this suite of functions rs3: fixed typo marg.hom: added checks on data 16.7.02 chidden.r, hidden.r: corrected negative binomial check so that 0 responses are allowed (thanks to Ben Cooper) 10.7.02 modified man pages for changed arguments to rgamma function rmutil: created dist.h file 11.6.02 hnlmix: corrected AIC for penalty constraint (was too large by one) changed calculation of multiplicative random effects 23.5.02 rmutil: added [pdqr]twosidedpower distribution added log option to all density (d) functions gar, gnlr, gnlmix, gnlmm, hnlmix: added two-sided power distribution gnlr: user-supplied likelihood function works again (thanks to Martin Liermann) finterp, fnenvir: added option to allow any response to be a covariate 9.5.02 hnlmix: recursive fitted values available ordglm: fixed error that PearsRes not defined when individual data are supplied (thanks to Kamal Desai) 6.5.02 gnlmix, hnlmix: added inverse gamma mixture distribution gnlmix: handles censored data gnlmm: finds nesting variable when repeated environment is specified 5.5.02 finterp: modified so that as.factor(times) works 30.4.02 hnlmix: nonlinear random effects models using a modified Lee and Nelder h-likelihood gnlr: modified check on location parameters for Levy distribution added check that double Poisson, multiplicative Poisson, gamma count, and logarithmic data are not censored ------------------------------------------------------------------------------ version 0.9 ------------------------------------------------------------------------------ 28.4.02 gnlmix: corrected typo in negative binomial distribution 23.4.02 carma, chidden, elliptic, hidden, kalseries: give error if censored data supplied (thanks to Troels Ring) 22.4.02 elliptic: when two levels of nesting, calculate correctly first recursive fitted value in each cluster (was plotted correctly using iprofile) plus corresponding simplification of plot.iprofile (thanks to Troels Ring) 17.4.02 (all libraries) gnlmix: corrected typo in inverse Gauss mixing distribution print model methods: added option not to print correlations 15.3.02 restovec, tcctomat, tvctomat: added optional description slot to response, tccov, and tvcov objects 13.3.02 glmm: convert repeated object to dataframe if supplied as data tcctomat, tvctomat: corrected to detect contrast options when dataframe=F 12.3.02 tvctomat: corrected problem for list of factors when dataframe=F (thanks to Patrick Lindsey) finterp.default: give error if members of dataframe named using $ (thanks to Christof Bigler) 28.2.02 chidden, hidden: added check for correct number of initial estimates when list of functions supplied (thanks to Patrick Lindsey) 22.2.02 corgram: added option for PACF 19.2.02 fmr: modified some discrete distributions to avoid overflow with large counts 17.2.02 elliptic: added as.double for y in call to C code because of change in read.table 12.2.02 finterp: give error if offset used in W&R formula 31.1.02 %^%: power of a matrix elliptic: corrected problem when common parameters in mean and variance functions 20.1.02 plot.repeated: added selection of profiles to be plotted by using ccov 14.1.02 gar: added absolute value arch (names of others changed: additive -> square, multiplicative -> exponential) volatility method for extracting values of nonconstant dispersion parameter Makefiles: removed . for SHLIB_EXT for R1.4.0 dist.c, kcountb.c, romberg.c, stable.c: changed malloc to R_alloc 10.1.02 (dhpqr)ggamma, fmr, gausscop, gnlmix, gnlmm, gnlr, gnlr3, hgamma: changed argument of (dpqr)gamma for compatibility with R1.4.0 modified help to work with R1.4.0 18.12.01 contr.mean: provides correct labels on mean constraints (corrects contr.sum) 4.12.01 chidden, hidden: corrected printing out family parameter with AR when there is not one 28.11.01 qstable: corrected bug when tail<1 and skew=1 (thanks to Alec Stephenson) 23.11.01 corgram: handles NAs in the series 19.11.01 cprocess: fixed error in checking for list of events (thanks to Troels Ring) stablereg: changed alpha to allow parameter to be (0,2) instead of (1,2) 18.11.01 chidden: added time-discretized Poisson process 17.11.01 chidden, hidden: added Student t distribution changed Cauchy shape parameter to scale instead of scale^2 15.11.01 gar: added Student t distribution added ARCH models elliptic: when AR, take log of determinant returned by dpodi (thanks to Gabrielle Kelly) 13.11.01 elliptic: when series of independent observations, calculate covariance determinant as sum of log variances instead of log of product (thanks to Gabrielle Kelly) 8.11.01 cmcre: corrected problems when a covariate is used (thanks to Anthony Gichangi) 6.11.01 print.response: do not print mean if nominal (but not binary) or ordinal (thanks to Patrick Lindsey) 25.10.01 hidden.r: corrected check for fixed zeros in transition matrix relaxed check for rows of transition matrix summing to one chidden.r: relaxed check for rows of transition matrix summing to zero (all thanks to Patrick Lindsey) 24.10.01 restovec: weights can be logical 14.10.01 gar: fixed output printing when shape is a function of location parameter use dnbinom function changed negative binomial shape parameter to be same as in gnlr 10.10.01 carma, chidden, gar, hidden, kalcount, kalseries: check for two levels of nesting when serial dependence fitted 9.10.01 kalseries: corrected error when torder used with tvcov 8.10.01 hidden, chidden: added observed AR(1) gnlr, gnlmm, gnlmix: changed parametrization of the shape parameter for the beta distribution (thanks to Goran Arnoldsson) binnest: duplicate variables in Fortran call model functions using envir: check that response specified is one in envir when only one present 3.10.01 plevy, qlevy: use pnorm and qnorm instead of integrating 26.9.01 elliptic: added second form of asymmetric multivariate Laplace distribution with constant asymmetry parameter 25.9.01 elliptic: added asymmetric multivariate Laplace distribution 24.9.01 carma.r: removed unnecessary check that envir is a repeated object (thanks to Troels Ring) 11.9.01 fit.dist: added checks that grouped frequency data are supplied 10.9.01 kalsurv: corrected output errors when environment is supplied gar: use log option in dbinom, dpois kalcount: set first recursive prediction in series to marginal prediction 6.9.01 gar: added loglog link for binomial data (corrected cloglog which was, in fact, loglog) 20.8.01 gnlmix: set undefined sh3 to NULL for one parameter distributions 1.8.01 chidden, gar, gnlr3, hidden: added skew Laplace distribution 27.7.01 corgram: improved presentation of correlogram 25.7.01 d,h,p,q,rskewlaplace: probability functions for the skew Laplace distribution 24.7.01 autointensity.r: plots autointensity function of a point process 12.7.01 plot.repeated: fixed error of unknown type when plotted time-varying covariate (thanks to Patrick Lindsey) carma: clearer error message when incorrect environment supplied 10.7.01 carma: will handle data objects with (one of) multivariate responses chidden, hidden: handle Jacobian correctly with (one of) multivariate responses 6.7.01 cprocess.r: recognizes data objects for events and not just for times 5.7.01 f2c.h included in library for toms614.c (missing in R1.3.0) 27.6.01 iprofile, mprofile: corrected links to other libraries for html help plot.cum.pergram: corrected confidence interval pergram: changed calculation of length for odd-lengthed series 22.6.01 gar.r: check that times are supplied and, if not, create if possible 19.6.01 fmr.r, gnlmm.r, gnlr.r, gnlr3.r: linear can be ~1 if mu not supplied 14.6.01 marg.hom.r: modified to handle factor variables with non-numeric levels 8.6.01 ordglm.r: corrected fitted values when weighted observations (thanks to Troels Ring) 31.5.01 elliptic: changed check on initial variance function estimates 16.5.01 print.response, print.tvcov, print.repeated: added option to print range of numbers of observations per individual instead of vector of numbers (thanks to Markus Jantti) dmultpois, etc: added additional check on dependence parameter 9.5.01 gar.r: corrected printout for generalized gamma parameter 26.4.01 changed F and T to FALSE and TRUE throughout read.rep: removed col.names option because of changes in read.table 22.4.01 glmm: corrected typo when dataframe used with no offset 20.4.01 finterp: detects functions when given as arguments of other functions 19.4.01 finterp: formulae can be written on several lines, as several instructions (e.g. to assign temporary variables) 11.4.01 dburr, pburr, qburr, hidden, chidden, gnlr3: changed parametrization of Burr distribution (thanks to Patrick Lindsey) 28.3.01 chidden, hidden: corrected vector length problem in check for ordered intercepts in ordinal models (thanks to Niko Speybroeck) several p and d functions: changed check to y>=0 (thanks to Patrick Lindsey) 22.3.01 glmm: works again with weights and/or offset (thanks to Luc Duchateau) gnlmix: changed to log dispersion for mixing distribution 21.3.01 int.c: corrected memory allocation problem GExtVal: changed man page to agree with functions (both thanks to Patrick Lindsey) 20.3.01 use log option in d and p functions for h functions 14.3.01 chidden, hidden: added further checks on ordering of intercepts for ordinal data 13.3.01 gnlmix: changed dispersion parameter for normal mixing distribution from standard deviation to variance delta: returns a vector instead of a matrix if only for one variable 11.3.01 gnlmix: correction to work with unbalanced data 9.3.01 gar, gnlr3: added power variance function Poisson distribution 8.3.01 covariates: added expand option dpvfpois, ppvfpois, qpvfpois, rpvfpois: functions for the overdispersed power variance function Poisson distribution kalcount: corrected for power variance function 7.3.01 plot.response: corrected indexing problem 1.3.01 kalcount, kalseries, kalsurv: removed constraints on family parameter 27.2.01 chidden, fmr, gar, gausscop, gnlmix, gnlmm, gnlr, gnlr3, hidden, kalseries, kalsurv, nlr: relaxed type checks for continuous and duration data 26.2.01 kalcount, kalseries, kalsurv: added two-parameter power variance family mixture including gamma and inverse Gauss mixtures (family) for serial dependence 23.2.01 response: if response is univariate, returns a vector instead of a matrix covariates: if only one covariate requested, return as a vector chidden, hidden: improved checks for ordered intercepts with ordinal response improved calculation of ordinal probabilities 22.2.01 plot(iprofile()): works for models from kalsurv (thanks to Jacob Bowers) 19.2.01 chidden.r, hidden.r: corrected error in calculating individual profiles when tvmu used (thanks to Jacob Bowers) ordinal data can be used with multinomial option (thanks to Patrick Lindsey) work with ordinal data with a list of different formulae (thanks to Niko Speybroeck) 31.1.01 glmm.r: works if response is a one-column matrix instead of a vector (thanks to Luc Duchateau) restovec: corrected manual so that arguments section appears (thanks to Patrick Lindsey) 30.1.01 finterp, fnenvir: further correction to handle decimal numbers (including scientific notation) correctly finterp: replaced gsub by all.vars 25.1.01 name of response can be a character string when environment is supplied (thanks to Patrick Lindsey) hidden, chidden: added description of pintercept to man page delta: works properly when name is supplied plot functions: use 6 different line types instead of 4 gausscop: corrected mean for gamma margin check that only one initial estimate when no dispersion function 18.1.01 transform.response: works when units is NULL hidden, chidden: reversed order of categories for proportional odds and continuation ratio replaced dqrcf with dqrsl nordr: bug fix to work with data objects 8.1.01 cutil.c, romberg.c, toms614.c: changed include for call_R (thanks to Dennis Murphy) 7.1.01 model fitting functions check for correct type of response dftorep, read.rep: modified to handle new "types" of responses dftorep: now handles two column binomial response ehr.r: rewrote to conform to other functions 4.1.01 restovec: option to add responses to an old response object and types of responses returned in list instead of as a class resptype: new method to return types of response variable(s) finterp.repeated: check that binomial and censored responses are not used as covariates 21.12.00 gar.r: corrected error in printing three-parameter distributions 19.12.00 finterp, fnenvir: methods for dataframes gnlm functions: environment can be a dataframe 18.12.00 changed check for existence of response when environment supplied bnlr, fmr, gnlmm, gnlr, gnlr3, nordr: fixed calculation of n for null function 17.12.00 various changes because of new R1.2.0 handling of formulae finterp: check for + or - before ( (change to R1.2.0) elliptic: removed check on tvcov, so can accept times and individuals 15.12.00 bnlr, fmr, gnlr, gnlr3, gnlmm, nordr: nonlinear formula need not contain covariates changes to cutil.c and coxre for R1.2.0 14.12.00 restovec, tcctomat, tvctomat: added slot for units of measurement dftorep, read.rep: id can be a factor variable carma, elliptic, gar, gausscop, kalcount, kalseries, kalsurv: test if name of response exists when envir contains multivariate responses stable: corrected man pages 3.12.00 gnlmix: generalized nonlinear models with one random parameter having arbitrary mixing distribution 30.11.00 fitdist.r: calculate log probabilities and likelihoods to avoid underflow (thanks to Goran Brostrom) int2: vectorized two-dimensional Romberg integration 28.11.00 added d, p, q, and r functions for Consul generalized Poisson distribution added PACKAGE option to .C and .Fortran calls bnlr.r: added cloglog and loglog links changed class of gnlr-type functions from gnlr to gnlm gar.c: corrected calculation of censored Burr, Pareto, power exponential distributions (thanks to Patrick Lindsey) 24.11.00 q and r functions: improved calculations and checks 23.11.00 bnlr, fmr, gnlmm, gnlr, gnlr3, nordr: nonlinear formulae can have a linear part qgweibull, qggamma, qgextval: corrected arguments to functions and docs (thanks to Patrick Lindsey) 21.11.00 fmobj: find objects referred to in a formula elliptic.r, fmr.r, gar.r, gausscop.r, gnlmm.r, gnlr.r, gnlr3.r: models with common parameters in several regression functions can be specified using formulae, not just functions elliptic.r, gar.r, gausscop.r, gnlmm.r, gnlr.r: models with shape as a function of location can be specified using formulae, not just functions 20.11.00 finterp.r: formulae can have common parameters and/or depend on functions 16.11.00 hidden, chidden: added recursive predicted values added q and r functions for distributions in rmutil 14.11.00 kalseries: corrected error in inverse Gaussian distribution (thanks to Patrick Lindsey) bnlr.r: added stable and mixture links gnlr, gnlmm, gar: added beta and simplex distributions rmutil: added psimplex and dsimplex 9.11.00 improved checking for multivariate response and choosing one response when several present in a data object 6.11.00 fmr.r, printrm.r: corrected so that works with common parameters (thanks to Laura Thompson) 29.10.00 gnlr3.r: corrected typo in normal and inverse Gauss distributions 19.10.00 gausscop: multivariate Gaussian copula with arbitrary marginals elliptic.r: several typing errors corrected ------------------------------------------------------------------------------ version 0.8 ------------------------------------------------------------------------------ 17.10.00 carma, elliptic, kalseries: handles NULL delta correctly with multivariate response in repeated object restovec: gives names correctly to multivariate 3-dim array covariates.repeated: calculates number of observations correctly when multivariate response 15.10.00 qstable: corrected bug due to change in uniroot (thanks to Gabrielle Kelly) dstable, pstable, qstable, rstable: added checks that parameter values are correct 13.9.00 (growth, repeated, rmutil) restovec: check for NAs in ordinal responses (thanks to Patrick Lindsey) elliptic, kalseries: check that torder is not larger than number of time points (thanks to Patrick Lindsey) elliptic: corrected undefined n (thanks to Patrick Lindsey) 12.9.00 kalseries: constant shape parameter for (log) logistic, Cauchy, and Laplace distributions previously had square root transform kalseries: added inverse Gauss distribution 7.9.00 restovec: corrected error (change in R) when censor is all ones (thanks to Troels Ring) 17.8.00 removed provide() rmutil: removed det() 14.8.00 rmna: corrected typo in man page 17.7.00 nordr.r: corrected minor bugs for weights and data object handling (thanks to Patrick Lindsey) 5.7.00 as.data.frame.x: added options from default (thanks to Patrick Lindsey) rmna: removes NAs in weights (thanks to Patrick Lindsey) restovec: handle correctly option, times=T (thanks to Patrick Lindsey) covariates.repeated: handle correctly non-repeated observations (thanks to Patrick Lindsey) 21.6.00 plotrm.r: plot.residuals corrected so ccov works (thanks to Patrick Lindsey) 14.6.00 carma.r: correction for ccov as one-column matrix (thanks to Patrick Lindsey) 7.6.00 elliptic.f: fixed crash with more than one covariate in tvcov (thanks to Bruno Genicot) 1.6.00 elliptic.r: corrected check to allow varfn="identity" or "square" 30.5.00 bnlr.r: binomial regression with various links 22.5.00 fnenvir.r: can handle functions without parameters (thanks to Troels Ring) 11.5.00 fit.dist: corrected exact fit for negative binomial and added default options for main and xlab 6.4.00 runge.kutta, lin.diff.eqn: functions to solve differential equations 5.4.00 gar.r: handles censored data correctly when a data object contains more than one response 29.3.00 runge.kutta.r: solution of differential equations 20.3.00 nlr: corrected undefined mu1 17.3.00 print.response: check for NAs in times 15.3.00 glmm: obtain nest vector from dataframe if supplied 14.3.00 nordr, ordglm: clearer error message if the response is not a numeric vector with integral values starting at 0 (thanks to Troels Ring) 15.2.00 ordglm: corrected bug when more than three categories 12.2.00 (repeated, event) kalcount, kalseries, kalsurv: autoregression with frailty dependence 9.2.00 kcountb.c, kserieb.c, ksurvb.c, ksurvg.c: changed -log(1-pfn()) to -pfn(,,,0,1) and removed inthaz.c 8.2.00 all libraries: corrected C code for R0.99 kalcount: corrected error in recursive predicted values for gamma intensity 1.2.00 restovec: corrected handling of weights when response is a list kalsurv.r: corrected plotting of profiles for logged distributions cutil.c: changed Fortran.h to Rconfig.h and moved to rmutil cgamma.c: replaced by cutil.c inthaz.c: changed finite() to R_FINITE() 27.1.00 gar: three-parameter distributions work with constant dispersion parameter kalcount, kalseries, kalsurv: if mu contains time-varying covariates, initial estimates must be in ptvc 24.1.00 finterp, fnenvir: changed name of .fn to avoid conflicts most model functions: check that supplied function was not produced by finterp already 20.1.00 as.data.frame: puts binomial and censored responses as two-column matrices gnlr, fmr, gnlmm, gar: binary response need only be one column for binomial 17.1.00 finterp, fnenvir: handle decimal numbers correctly most model functions: print out regression function correctly when envir supplied 16.1.00 gar: added Consul generalized Poisson distribution transform: check for nonpositive and infinite values in Jacobian carma, elliptic, gar, kalseries: sqrt transformation checks for zero response values 14.1.00 most model functions: check for NAs in data objects (because of lvna) gnlr, gnlr3, fmr, gnlmm: possible to fit a model without optimizing any parameters in the regressions if they are functions restovec, dftorep, read.rep: add additional validity checks for responses times.default: replaces times.repeated 11.1.00 hidden, chidden: handle multinomial count data nlr: modified to handle data objects correctly most model functions: changed way of detecting multivariate responses finterp: correct error for length of response when multivariate 10.1.00 gettvc: works correctly for first observation when ties=FALSE (thanks to Patrick Lindsey) finterp: can find response variables of formula in repeated objects for most model functions, one of multiple responses in a repeated data object can be selected for the model 9.1.00 restovec: handles multivariate matrices, arrays, and lists dftorep: transform a dataframe to a repeated data object read.rep: read a rectangular data set from a file and create a repeated data object directly 7.1.00 logitord.f: reformatted to remove tabs for HP compilers (thanks to Osman Buyukisk) restovec: responses can have more than one type class ------------------------------------------------------------------------------ version 0.7 ------------------------------------------------------------------------------ 3.1.2000 residuals.elliptic: corrected error in calculation of raw residuals 31.12.99 objectrm.r: can select certain individuals with methods, covariates, delta, nesting, times, weights transform: handles NAs in response correctly 28.12.99 restovec: added name of response variable to list returned objectrm.r: added as.data.frame and as.matrix methods for data objects wr: works with my data objects nordr: changed sign of coefficients for continuation ratio and adjacent categories models so comparable with proportional odds 27.12.99 finterp: with W&R notation, design matrix no longer returned as attribute when ~1 and .envir supplied, returns a function yielding a vector of correct length 26.12.99 fit.dist: corrected exact fit of negative binomial gnlr, gnlr3, fmr, gnlmm: improved speed nordr, ordglm: ordinal categories numbered from 0 instead of 1 hidden, chidden: multinomial categories numbered from 0 instead of 1 handles ordinal models, thanks to Patrick Lindsey gettvc: now handles NAs in response variable 25.12.99 improved documentation for methods to access data objects and functions int: call C instead of Fortran for TOMS614 23.12.99 restovec: added additional checks that correct data are supplied (thanks to Troels Ring) mprofile.carma: corrected bug when no covariates carma: corrected bug when delta is a scalar carma, elliptic: added checks for incorrect formulae 21.12.99 hidden, chidden: improved printout and corrected error in checking number of parameters in lists of formulae 20.12.99 lvna: creates a repeated object leaving NAs in hidden, chidden: interactions between time-constant and time-varying covariates allowed 17.12.99 hidden, chidden: improved printout 16.12.99 tvctomat: handles lists of factor variables correctly restovec: value returned has class of type of response as well as "response" added checks hidden, chidden: can also use formulae if multinomial 12.12.99 hidden, chidden: can use formulae if not multinomial 7.12.99 cmcre: corrected memory leak 6.12.99 cmcre: continuous-time two-state Markov process with random effects 5.12.99 coxre: corrected several errors 1.12.99 stable: fixed plot arguments in help examples 29.11.99 finterp: fixed bug when multiple ('s or ^ before ( when detecting function names nobs: use method instead of direct access in all functions provide default method covind: provide default method 25.11.99 collapse: changed name to capply because of conflict in nlme 23.11.99 profile: changed to mprofile because of conflict in R0.90 22.11.99 finterp: properly distinguishes unknown parameters from functions finterp and fnenvir: when no variables found, changed stop to warning nobs: corrected for independent observations when length is one 18.11.99 stablereg: corrected bug when some parameters are not optimized check for NAs in the hessian 17.11.99 plot.repeated, plot.response: added special call for ordinal responses corrected plot for independent observations (thanks to Patrick Lindsey) 14.11.99 removed unneeded aliases in man pages added aliases to plot.profile and plot.iprofile 11.11.99 added check for Inf (as well as NAs) in hessian to all functions using nlm kalseries.r: added error message if times not available for Markov dependence changed rep(1,n) to rep(1,nind) when mu function returns scalar stable.r: moved call to C code into likelihood function for speed int.r: limits can be specified as -Inf, Inf 4.11.99 kalcount.r, kalseries.r, kalsurv.r: with time-varying covariates in a function or formula, initial estimates can be in preg or ptvc and changed length(resp$response$y) to n for speed 31.10.99 gar.r: fixed undefined npt3 for autoregression parameter finterp.r: fixed bug for : in W&R formulae kalcount.r, kalseries.r, kalsurv.r: added error message when time-varying covariates 22.10.99 covind: changed so that it works with carma, elliptic, gar, hidden, kalcount, kalseries, and kalsurv objects 18.10.99 gar.r: corrected printing of parameter values for three-parameter distributions gar.c: corrected calculation of lambda in three-parameter distributions 17.10.99 gar.r: corrected fitted values (due to changes on 12.10.99) 14.10.99 gar.r: corrected undefined variable, tm (due to changes on 12.10.99) 12.10.99 stable: stablereg with nonlinear regression replaces stableglm finterp, fnenvir: check for factor variables instead of not being a numerical or logical vector gar: allow autoregression parameter to depend on covariates dist.c, kcountb.c, kserieb.c, ksurvb.c, ksurvg,c, stable.c: added #include "Rconfig.h" 4.10.99 ordglm.r: added deviance and corrected for zeros in table nordr.r: corrected typo potthoff.r: corrected erroneous calculation of standard errors (thanks to Tim Auton) 1.10.99 finterp, fnenvir: fixed conflict of names by beginning all names with a dot (thanks to Patrick Lindsey) elliptic.r: changed option and title from elliptic to power exponential 30.9.99 ordglm.r: generalized linear ordinal regression ------------------------------------------------------------------------------ version 0.6 ------------------------------------------------------------------------------ 21.9.99 pkpd.r: changed mu2.1o1cfp to ensure ked>0 20.9.99 resid.f: correction to work with MS-Windows 7.9.99 binnest.r, survkit.r: changed NULLs for Fortran to work with R0.65 6.9.99 ehr.r, kalsurv.r, fmr.r, gnlr.r, gnlr3.r, nlr.r, nordr.r, elliptic.r, gar.r, gnlmm.r, kalcount.r, kalseries.r: changed attributes to work with R0.65 finterp, fnenvir: variables can be logical as well as numeric 3.9.99 Makefiles: moved $(FLIBS) to end of line 14.8.99 print.gnlr: corrected errors in printing fmr, gnlr3, and gnlmm output fnenvir.tvcov: corrected error for undefined ex1a (-> ex2a) Pareto, gnlmm, hstudent, kalcount, kalseries, pkpd, read.list, read.surv, tvctomat: corrected examples 18.7.99 hidden.r, chidden.r: corrected one error message added printout of degrees of freedom 14.7.99 binnest.f: modified comments to compile with standard Fortran (thanks to Martin Maechler) ------------------------------------------------------------------------------ version 0.5 ------------------------------------------------------------------------------ 29.6.99 plot.response: remove NAs when calculating default ylim 28.6.99 gnlr.r, gnlr3.r, fmr.r, nordr.r, gnlmm.r: check if user gives a nonlinear formula in linear argument and correctly handle it 27.6.99 finterp: corrected error message when non-numeric vector supplied restovec: corrected printing of total times when negative times present added transform methods for response, tccov, and tvcov objects 24.6.99 gar.r: corrected error in printing shape functions 22.6.99 binnest.f: modified to compile with g77 8.6.99 binnest: binary random effects model with two levels of nesting 7.6.99 restovec: added an additional check for nest variable in lists 6.6.99 logitord.f: corrected bug in calculation of Hessian (and s.e.) 1.6.99 elliptic: added multivariate Student t distribution 11.5.99 finterp.r: functions allowed in W&R formulae carma.r, elliptic.r, kalseries.r, kalcount.r, kalsurv.r: allow factor variables finterp: can locate and use indices for individuals and nesting as factor covariates 10.5.99 tcctomat.r, tvctomat.r: allow factor variables finterp, fnenvir: changed to check for factor variables 6.5.99 elliptic.r: allow variance to be a function of the mean function 4.5.99 gar.c: changed normal distribution shape parameter from sd to variance 3.5.99 profile and iprofile: fixed to plot correctly with nesting 1.5.99 tcctomat, tvctomat: allow dataframes 28.4.99 tvctomat: time-varying covariates can be factors elliptic.r, gnlr.r, gnlr3.r, fmr.r, gnlmm.r, gar.r: location and shape functions can have common parameters 26.4.99 restovec: weights allowed for lists finterp, fnenvir: can find the times when envir is a repeated object gar.r: allow shape to be a function of the location function 23.4.99 gnlr.r, gnlr3.r, fmr.r, nordr.r, nlr.r, elliptic.r, gnlmm.r, gar.r, kalseries.r, kalcount.r, kalsurv.r, ehr.r: do not require envir if response has class, repeated corrected bugs in restovec and plot.response (Lorenz Gygax) 22.4.99 generalized plot.residuals tvctomat: allow calculation of more than one interaction with time-constant covariates at a time finterp and fnenvir: allow variables in environment to have same name as a function 21.4.99 correction of 18.1.99 by Brian Ripley wrong: dist.c put back in gnlm 20.4.99 ksurvb.c: corrected bug when time-varying covariates elliptic.r: added option to ccov and tvcov to give covariate names when response has class, repeated carma.r: added option to ccov to give covariate names when response has class, repeated 19.4.99 changed plot.profile to profile and plot.iprofile to iprofile 18.4.99 elliptic.r: added recursive predicted values when AR(1) and/or random effect 16.4.99 gnlr.r, gnlr3.r, fmr.r, nordr.r, gnlmm.r, ehr.r: changed order of parameters when function with linear part 15.9.99 ehr: corrected two errors when lambda function with linear part nordr.r: n changed to nrows 13.4.99 carma.r: corrected predicted values when response is transformed gar.r, kalseries.r: changed handling of transformed responses ------------------------------------------------------------------------------ version 0.4 ------------------------------------------------------------------------------ 12.4.99 added dependency on rmutil to DESCRIPTION 11.4.99 elliptic.f: corrected handling of dose for PKPD model when time-varying covariates are present 6.4.99 elliptic.r, gnlmm.r, gar.r, kalseries.r, kalcount.r, kalsurv.r, ehr.r, nordr.r, nlr.r: modified to use fnenvir 5.4.99 gnlr.r, gnlr3.r, fmr.r: modified to use fnenvir 4.4.99 fnenvir: checks functions for covariates and parameters and modifies them to read from data objects 1.4.99 elliptic.r: modified to use model formulae with unknowns finterp.r: added data objects as environment tvctomat, tcctomat: can combine two data objects 31.3.99 gar.r: modified to use model formulae with unknowns 30.3.99 rmna: check if a covariate only has one value after NA removal fixed examples docs so that they work 29.3.99 kalcount.r, kalsurv.r, ehr.r: modified to use model formulae with unknowns 28.3.99 gnlmm.r, kalseries: modified to use model formulae with unknowns restovec: added coordinates to response class for spatial data 26.3.99 gnlr.r, gnlr3.r, fmr.r, nordr.r, nlr.r: modified to use model formulae with unknowns 24.3.99 changed language check to inherits formula in all functions added methods for extracting elements from data objects finterp.r: transforms model formulae with unknowns into functions 22.3.99 restovec: times no longer required for clustered data type attribute added carma.r, elliptic.r, kalseries.r kalcount.r: check to see if times available 15.3.99 rmaov.r: wrote documentation pkpd.r : added two new models and corrected one other 13.3.99 restovec: allow ties in times 23.2.99 gar.c: corrected Laplace cdf and allowed negative values 11.2.99 ehr: corrected for ties kalsurv.r: prints out "birth process" when applicable instead of renewal process logitord.r: removed DUP=F from Fortran call 8.2.99 km.r: fixed bug in plot.dist.km when several groups are plotted (Gareth Ridall) 7.2.99 improved handling of variable names in tcctomat, tvctomat, and functions calling them rmaov.r: split-plot aov from Ralf Goertz 6.2.99 glmm.r: accepts transformed data if dataframe supplied 5.2.99 km.r: fixed bug for double lines with censored observations (Gareth Ridall) ehr.r: modified handling of language 4.2.99 km.r: added print.km to remove attributes restovec: accepts all response data, not just repeated measurements tvctomat: added calculation of interactions 2.2.99 restovec: added adding several column matrices in lists with censoring kalsurv.r: added delta option 1.2.99 glmm.r: binary response with binomial handled correctly 30.1.99 plot.iprofile.carma: corrected nind argument restovec, carma, elliptic, kalcount, kalseries: added how to handle `times' for clustered data to docs 28.1.99 bivbinom.r, marghom.r: minor corrections rs.r: improved printout 26.1.99 readrm.r: corrected lty specification in plot.response added option to plot points 24.1.99 gnlr.r, gnlr3.r, fmr.r, gnlmm.r: y can have classes, response or repeated added DUP=F to all .C and .Fortran calls pbirth.r: binomial data distributions 22.1.99 readrm.r: added ... for graphics options in plot.response and plot.repeated 21.1.99 rmna: added checks that ccov and tvcov have correct class 19.1.99 dist.c: changed static romberg to romberg2 and added static interp carma.r, chidden.r, elliptic.r, gar.r, hidden.r, kalcount.r, kalseries.r, kalsurv.r: allow response to have class, repeated restovec: allow delta to be a dataframe 18.1.99 corrections by Brian Ripley gnlm: removed redundant dist.c enclosed library.dynam in .First.lib potthoff.r: added matrix dimension checks util.r: removed orth function potthoff.r: replaced orth by contr.poly 17.1.99 carma.r, chidden.r, elliptic.r, gar.r, hidden.r, kalcount.r, kalseries.r, kalsurv.r: copy response vector more efficiently restovec: added total time for survival data coxre.r: reorganized for efficiency, eliminating data.frame cprocess.r: times can have class, response 16.1.99 gnlr.r, gnlr3.r, fmr.r, gnlmm.r: removed -delta/2 in right censoring calculation dist.r, gnlr3.r, gar.c, hidden.f: changed parametrization of Burr to agree with kalsurv.r elliptic.r: use var(y) to get initial estimate of variance ------------------------------------------------------------------------------ version 0.3 ------------------------------------------------------------------------------ 14.1.99 kalsurv.r: corrected printing of number of subjects and observations 2.1.99 cprocess.r: allow event counts with unequal times added mode="double" to is.vector 29.12.98 corrected minor bugs in fmr 28.12.98 corrected abs bug for Laplace in kalman C functions 27.12.98 restovec: corrected binary totals when given as a vector gar: added Levy, Pareto, generalized inverse Gauss, and power exponential distributions hidden and chidden: added various overdispersed and continuous distributions 22.12.98 hidden and chidden: added filter calculation and plots 21.12.98 moved Student t from gnlr to gnlr3 renamed beta as Pareto in kalcount, kalseries, and kalsurv corrected various minor errors in fmr and gnlr3 20.12.98 dist.r, gnlr.r, fmr.r: added gamma count and Pareto distributions 18.12.98 chidden: continuous-time hidden Markov chain models 7.12.98 dist.r, gnlr.r, fmr.r: added Levy distribution removed .so from Makefiles and library.dynam 6.12.98 util.r: added spectral decomposition to mexp 5.12.98 rmutil: added several p and d functions gnlr3.r: added censored generalized inverse Gaussian and power exponential distributions 2.12.98 int.r: vectorized Romberg integration 1.12.98 int.r: added option for Romberg integration 30.11.98 updated libraries with Brian Ripley's corrections 25.11.98 hidden: allow values in the transition matrix to be fixed at 0 or 1 24.11.98 hidden: added independence model 23.11.98 inthaz.c: changed header include bessel: removed function gnlr3.r: changed to internal bessel function 14.11.98 hidden: added multinomial distribution 12.11.98 hidden.f: corrected Poisson and binomial calculations 5.11.98 carmasub.f and survkit.f: changes for compatibility with g77 ------------------------------------------------------------------------------ version 0.2 ------------------------------------------------------------------------------ 2.11.98 ehr.r: corrected printing coefficients with linear and other parameters 1.11.98 km.r: corrected NaNs in log 29.10.98 carma.r: corrected printout of mean time km.r: corrected ylab for cdf 26.10.98 rmna: handles NAs in time-constant covariates properly carma.r and elliptic.r: accept ccov of class, tccov cprocess.r: added plots from counts of events 19.10.98 changed to inherits() throughout rationalized printing of gnlr, gnlr3, fmr, gnlmm and moved to rmutil added delta option to carma and elliptic 18.10.98 carma.r and elliptic.r: added handling of delta when y has class, response 17.10.98 gar.r: added cloglog link 12.10.98 gnlmm.r: corrected handling of delta when y has class, response 11.10.98 replaced tapply() with collapse() in bivbinom, catmiss, glmm, gnlmm, ehr, coxre 10.10.98 ehr.r check for singular covariance matrix print names of variables for coefficients when language 8.10.98 kcountb.c: corrected dplogis call gnlmm.r: corrected calls to ddp, dmp, ddb, and dmb coxre.r: removed as.is=T in data.frame corrected printing shape parameters when language used in gnlr, gnlr3, fmr, gnlmm 7.10.98 rs.r: put in check that all covariates are positive gnlmm.r: set censor to F for binomial data dist.c: changed ddp, dmp, ddb, and dmb to log and introduced weights 6.10.98 kseries.c: corrected error in serial update kalseries.r: correcting printing error when there is an interaction kalsurv: added serial update inthaz.c: put back ihlogis (disappeared with nmath) renamed wr.r as util.r moved det and %**% from repeated and growth to rmutil/R/util.r 5.10.98 corrected check in carma, elliptic, gar, and kalseries for nonpositive transformed values 4.10.98 glmm.r: corrected two errors 1.10.98 extended residual plots to all of class recursive kalcount, kalseries, kalsurv: return mean profiles in z$pred plot.profile: accepts z$pred as well as a mean function nbkal.r: corrections corrected and updated a lot of docs 30.9.98 moved kalsurv to event library renamed rmtools as rmutil inthaz.c: corrected error from change to nmath 29.8.98 kalsurv.r: added recursive fitted values kalseries.r: added recursive fitted values updated plot.residuals for recursive class 27.9.98 corrected docs for plot.profile, plot.iprofile added covind.default plot.iprofile: corrected default settings 24.9.98 gettvc.r: allow an option for ties bessel.r: only calculate one kind of function 20.9.98 gettvc.r: allow NAs in time-varying covariate corrected for ties between response and covariate tvctomat: allow tvcov to already have class, "tvcov" added as.double in all Fortran and C calls 18.9.98 plotrm.r: corrected bug in plot.iprofile due to new covind() 16.9.98 pkpd.r: corrected mu2.0o2c and added mu2.0o2cfp 15.9.98 replaced Bessel, Gauss-Hermite, and integration routines bessel.r: added docs 14.9.98 moved wr to rmtools and added docs added covind function to rmtools 12.9.98 kalserie.r: added delta option tcctomat.Rd: corrected alias created new library, rmtools 11.9.98 dist.r: added beta binomial dist.c: simplified calls to overdispersion functions autocor.r: corrected pergram kalserie.r: corrected error in printing parameters with torder>0 kserieb.c: corrected error when mu function used 10.9.98 readlist.r: corrected binomial totals for lists in restovec fmr.r: removed unnecessary code gar.r: added overdispersed binomial data dist.r: allow dispersion to be a scalar when mean is a vector created documentation for p and d functions 9.9.98 nordr.r: corrected weights for adjacent categories model test for p>1 in proportional odds gar.r: added checks on times and mu arguments added binomial data corrected docs for elliptic, gar, kalcount, kalseries, kalsurv, nbkal for z$index clarified docs for rmna, restovec, tcctomat, and tvctomat 8.9.98 removed backslash at end of Makefiles for event, gnlm, growth moved integer declarations to the beginning in carmasub.f, elliptic.f, gettvc.f, survkit.f so that g77 should work 5.9.98 gar.r Corrected predictions for transformed responses ------------------------------------------------------------------------------ version 0.1 rmutil/MD50000644000176200001440000000731514326404743012113 0ustar liggesusers626aa956601a298ac539661ddfba7df0 *DESCRIPTION 29cd42a85b992bbc8211a7ff776f231f *NAMESPACE 95738fdcd34080be534c77496fd75d51 *NEWS.md ef0aa598b93ec9ec9a1a052b1369772f *R/contrast.r 3701eac8a5cfcacc7d9d07455d64a8c3 *R/diffeqn.r 03a46a75c9e75b0725f07789e4f37d2a *R/dist.r bc5775ade6402b2ef8d00951a4891d69 *R/finterp.r 0e8c28235c9ef20c9bb436f335635b6f *R/gettvc.r b28fe0b04d36b9f80d76fce6565c39d4 *R/ghermite.r f5e5f0c508a590098604a70d9ddc89fa *R/int.r c48a2502eb9f851d7c0c247ca781f94b *R/objectrm.r 645933ae72f085ae09623eba272e62ad *R/pkpd.r 6c634198c7ff53bf3c4a428454a6c697 *R/plotrm.r f64fcab6402964ac77645451e6ebad8f *R/printrm.r 6095189d7b9c3599fd7b8d5830d0af66 *R/readrm.r 6fdbb7e065d3e93c8a6b758c429425d2 *R/util.r f7b9ab88ed0c6bc219a2f096a7f5acab *man/BetaBinom.Rd 9d26baafe88f889b01105122af84036c *man/BoxCox.Rd 8bd5fd8b321045ca5fc6c833cdff37a5 *man/Burr.Rd dda394207675aed2d0fd0968705d6f14 *man/Consul.Rd 392c71ddf739a0f0e159db9885bc2fb3 *man/DataMethods.Rd e096d4c2dc87e3c5c78962eedd3f6751 *man/DoubleBinom.Rd f0b29bc17e6dcea5bb8ad0baf0f5572d *man/DoublePoisson.Rd 1118f9a786a0f67208f0c350d2611f55 *man/FormulaMethods.Rd 22a047082fec060219219a760d89e3f3 *man/GExtVal.Rd fcffe7e68891a82c6af8f76f411766e0 *man/GGamma.Rd 48eaf4aa785335445f5383cae1af2434 *man/GInvGauss.Rd 1c7b76e739f873051190d9b09fa908d5 *man/GLogis.Rd 08e4238a783a4ca1596fe2651aa39b2c *man/GWeibull.Rd 1eda70eee756a8dd56adfe49c966244a *man/GammaCount.Rd ab97b1de5b61bc58b6d1f01e4760a4b6 *man/Hjorth.Rd f846de927135aab062cbcbe4f8abc985 *man/InvGauss.Rd 1fdcebf0af727f67c2e3f791df300518 *man/Laplace.Rd 5933ed21b41b65c3c198522a5ce5ac32 *man/Levy.Rd d25828c4e65d4554e4acdd1d7970aace *man/MultBinom.Rd f76a8fe347478884b707ed7870b41f82 *man/MultPoisson.Rd bcad33ab1ff882d1acc5b66a8ec20892 *man/Pareto.Rd 3ff428bf85c579e7e98c5a4ce5fb3bde *man/PowerExp.Rd 950423d2f977bb77e8757d1941f2f8b3 *man/PvfPoisson.Rd b943a94e644cc24cc76f424734bb42c8 *man/Simplex.Rd a268eaf991c5541c33a0d7a135915ee3 *man/SkewLaplace.Rd 1d8d4a34afcbce59740abeb70cc88be5 *man/TwoSidedPower.Rd 28fe18d2b69d74b06578c20cc1a44896 *man/capply.Rd 2178b66b62da6440e4b977f282b81946 *man/contr.mean.Rd b301950db34aebc8705ae6ed06c5f43f *man/dftorep.Rd 83a0e4dcbb4a2a6900467050e17562b1 *man/finterp.Rd 9b7f890e70e5808910fa9fb800f60177 *man/fmobj.Rd d5260eca7a7b41007171b24517b3d742 *man/fnenvir.Rd adfb7c5c5afc798ea6008d674f757589 *man/gauss.hermite.Rd 68d0db61802561c2bdaa68e58ce0c5f8 *man/gettvc.Rd fe3b7322712b42df71192c9de0461ece *man/int.Rd 4de5a4648325254b18ddc61b8927eefb *man/int2.Rd bc25ba1d110254a22487e22d7ed0755b *man/iprofile.Rd 802e62144b1453f60af7a49fc39ed54f *man/lin.diff.eqn.Rd d48b47096191b7e3760b8b79c95d22b6 *man/lvna.Rd e0380a6221f31b65dab985cc4bb975a7 *man/mexp.Rd 6928c15e751758e18d98bd519aa357cd *man/mpow.Rd d10cd9e1cdd8aa80ad316aaccf654277 *man/mprofile.Rd 53005e44523c2fe20684900c56b88429 *man/pkpd.Rd ba536f3adde222c06f62258cb296b5c4 *man/plot.residuals.Rd 5091cf692dc81d36f39a4a7eb97e649b *man/read.list.Rd 659e144e0908e40ac99a00f7d354c7b6 *man/read.rep.Rd b781572315ed35f36bea4d6e90f9c991 *man/read.surv.Rd 16ac98188d30d73eba29ada8007011f9 *man/restovec.Rd d1cb160e2c6001e26cbefcff34cc4b47 *man/rmna.Rd 495949bfeb87f04142b1adf38081fc72 *man/rmutil.Rd 41901cbbe92a49fe542c09aad1e530db *man/runge.kutta.Rd a6b3deb9ea94d3abb128c82c9df3cbce *man/tcctomat.Rd db3b7e8ef3c5997a10b38a4b721c3565 *man/tvctomat.Rd 33e7e106c1133e7f66e0007bb02fc3ee *man/wr.Rd 56be3da0a27ad554ac2b456af1cd58c9 *src/cutil.c 1de3197f5a6a93fceadcafb3bd634cfa *src/dist.c b4d06dab580c85ce4e030e06819caf1d *src/dist.h 20c5a2a4df22ac6d34eccc1436dddb6f *src/f2c.h 66f8bf15a55ad8e04f6072d45dc9a67f *src/gettvc.f 24cba2234a55f9b523b5e189c2c68ff3 *src/rmutil_init.c b659606ccdaf16303fa5af5fb363eca9 *src/romberg_sexp.c 4634c4a1016cdca4d1bf41eb69603799 *src/toms614_sexp.c