timereg/0000755000175000017500000000000014131552772012047 5ustar nileshnileshtimereg/MD50000644000175000017500000001557114131552772012370 0ustar nileshnileshbbc094065ed70cbcc90863f7b681e196 *DESCRIPTION 77ef0d19b368f7a02cc491b5ba186ed4 *NAMESPACE 5626b58ac5b3148673c12e7917b6be8a *R/Cpred.r 099387f56334cc736e8cb231fb9c45a9 *R/Csmooth.r 9502ffcea95fab972cb5a839218c43db *R/Gprop-odds.r ac65573f1ba9279d3703c639e7beea3b *R/aalen-des.r 8f71b7488126898d1f57d7d0d0502cce *R/aalen.r 828e7d515d1ddb5e6ff407e835c9d0d6 *R/aalenC.r ce5df16dab12642b83def64d018f573b *R/additive-compSs.r 10d9e9266d03da9469acfc0151075573 *R/base.r 90a81492629f339340e34698756ab3d5 *R/clusterindex-reshape.r 25123a09bd94c09ff2c3bbea77e91513 *R/comprisk.ipw.r da9cfb9965c1c52174e278b9d5521b0a *R/comprisk.r 8fcbdd34fea0dd6df9da60ac6f52c124 *R/cox-aalen.r ac3e9cba85994ee64d58271355b19c66 *R/cox.ipw.r b86e4223e01cc585c71738fe7831df91 *R/cox.marg.r 4d4e18867d345927fb741137a190d7a5 *R/dynadd.r 0af33319ce62350579a2df77d6ff8b6f *R/event.r 25e8041d1a29c5987f2b4a88eca22c72 *R/event.split.r e4ea65660c583fa2badf2c4ced5e0da5 *R/glm-comprisk.r eb30fed1e570fea74a584f57d023b1f2 *R/ipcw-residualmean.r 059050bba25ecb3573efe3bf39c17907 *R/kmplot.r 8c13dd5267a40d23a05b3bf1727a4a36 *R/krylow.pls.r e1affa4ad22b508ffc9c283f184f298d *R/mgresid.r 0d23bd74369169e2e4313fd6cc5fb8ac *R/new.aalen.r a42bc12b54f237bf52972b23ace1198c *R/new.cox-aalen.r aa7f1275fb4304e56a822274f39808c8 *R/new.dynreg.r 8d36620827445f0c78d85f07b583d98b *R/new.pe-sasieni.r dbee1c579bf6453c892c9f0234522c3a *R/new.prop-excess.r bbdd0812af948739cddf0d7cc8f69cd3 *R/new.timecox.r a8670d9185d078c4c51be46895ec6318 *R/plots.r 9e1b99b5b49f4f7dfacaf9c22e29c955 *R/predict-timereg.r e7135ed89e91b6ad4038bc36a98d896f *R/prop-excess.r b7245acfb12eaf2f0fc0b5f224a18eb3 *R/prop-odds-subdist.r 37e3158e18d9c9381f6bae3e5a5f3c84 *R/prop-odds.r 1919ca75894e6e4ba1bef85cf973b897 *R/qcut.r 1ada7ca3cc97eb1f61f86489747c520d *R/read-design.r e0936d5d002cc59dcaa718833195b771 *R/recurrent.r 89576212056bc68752f9e1584f02a4a9 *R/residualsTimereg.r 7458071becb0e7bba21d911611459fe5 *R/restricted.residual.mean.r fcd95e0f5483840042cef05f7b65f8e4 *R/sim-pc-hazard.r 0c8bbe3f6e4a496e31b6f23f5c2dcf45 *R/timecox.r 030832be7ab88a52ee030f17857486ef *R/timereg-copy-package.R 09db02d1ee89f461a34a07d0b33d8ce7 *R/two-stage-reg.r f9cf9dc925b0eb4532905279dcd07a3f *data/TRACE.txt.gz 4287db3533e463a0c4899b7faad8962b *data/bmt.txt.gz 82f961ec5ff8c270f587e6b37fa0dc36 *data/cd4.txt.gz 11f82920649a881ac6294f42a2d6453a *data/csl.txt.gz 0bcb44d800f82ecdff05128e2cdc667f *data/diabetes.txt.gz 8b1d076260893d0229f17444b3ca13fe *data/mela.pop.txt.gz 5adbdbf1e4b24898191a1de02bcb0a86 *data/melanoma.txt.gz 0515e4b60fa4a9bb9c5d9752f2545050 *data/mypbc.txt.gz f3f92fad3fa105fa22ddbd56db3648f4 *data/sTRACE.txt.gz b61d3b8771eadd513bb517dfa7487cbf *data/tTRACE.txt.gz 51f7ea335e481598b40bddaddf15ef28 *inst/CITATION 632f65da91c05ddeb3532642177fb1e2 *man/Event.Rd c492638935fc4c48f7c0ae62ea80ecba *man/Gprop.odds.Rd d108fe8aa8e72f626ae94585b477523e *man/TRACE.Rd e8284679607579f0a4d93b8533d022fc *man/aalen.Rd 7fbc7eae8f3bfea691b749c4b527e3ea *man/bmt.Rd 6d2ea3277d577d050fab1524b69b7b2c *man/cd4.Rd 7dfed14b0dff275583ecf439b53fb312 *man/comp.risk.Rd 350fec0cecc267ae95ea93919eed3f95 *man/const.Rd 8b26d35fbdb93f0d3c6acd34120c1035 *man/cox.Rd cd900991e44f6503697b9c9e3b384722 *man/cox.aalen.Rd 76c7af2f2a5a628bd96842602d95224c *man/cox.ipw.Rd a492a8b2210cd1576538a38cb9044e15 *man/csl.Rd d36cf628de8ba43ac0e5df33ae653e02 *man/cum.residuals.Rd bdabc53ef7f9e7fc6252f34bfd78c0a8 *man/diabetes.Rd 4594967bc41e0d1a8dde9f3a96a025af *man/dynreg.Rd b149b29193aac922e51c2732716755b9 *man/event.split.Rd 51b2684b3427df231d9675f2e08174b7 *man/internal-addreg.Rd 434ed05637298fd441597e1deb9887d5 *man/invsubdist.Rd 15639ce7d9851fdc71ee6960672bf2e5 *man/krylow.pls.Rd d55feb16a4435a4361cef76a20836d94 *man/mela.pop.Rd e5beb43d6f21fb7d746998b3bc65b98e *man/melanoma.Rd 05c259e250c27f3a8bd74c3238a65b75 *man/mypbc.Rd 882abb59f0c2d83574602bc7eb999067 *man/pava.pred.Rd b641418624eadac51f6d57bda285e1ba *man/pe.sasieni.Rd 72025de21e806ea0678df0ffde962f63 *man/plot.aalen.Rd 8bef80978f9ef499c49ccc62f4680bf6 *man/plot.cum.residuals.Rd e7de67fcf562f623ee7780ee917fdac6 *man/plot.dynreg.Rd 325f6eb8afc83c89f2311e5b582545ec *man/predict.timereg.Rd 387564bb3113e59fb50682b5d0a178fc *man/prep.comp.risk.Rd e9d1aea47071d1a1be53adec1dcc0bab *man/print.aalen.Rd 6431c40d80dfb583d81de85c75d64bcc *man/prop.Rd 7932ee387330784c23638ac49f499a41 *man/prop.excess.Rd 6ace2462ce9738d76807e04dac6b6629 *man/prop.odds.Rd 5da10cf70f64f4063a38d9ca5b09ec7a *man/prop.odds.subdist.Rd bd9941d527a00930f5ea4cd9eef2c179 *man/qcut.Rd 2570ce3d200f44f739640bc4b8e5fe5c *man/rchaz.Rd 24808700ebf8eee3b7c67963c8040d1e *man/rcrisk.Rd 99acad7a9ac057108a2fa27b8b71c97b *man/recurrent.marginal.coxmean.Rd 01ba024eba20af38bec808e74b530d0b *man/recurrent.marginal.mean.Rd e9e6dac9a499f2354d58b940515a2bac *man/res.mean.Rd e585fbc99585cbef346682b9c2ed7f7d *man/restricted.residual.mean.Rd c88a610ddb2cefa17413e292a909aa1b *man/sim.cause.cox.Rd f375c0e3b4b6b51c653995eb99e51e6a *man/sim.cif.Rd 0773c85b9fd6928f2992a7c0e918ea15 *man/sim.cox.Rd 9fe5ad398c7992c9f4bea735d15c5802 *man/simsubdist.Rd a28f24f21384abedf39e210325980a1e *man/summary.aalen.Rd f2866c71545517e06ace846609a3aba9 *man/summary.cum.residuals.Rd 6ce722252b424227ca04ee81b2865703 *man/timecox.Rd f6aa705421d9e69b89c58fe9ecb7f5ed *man/two.stage.Rd 6ca0ff08d96ffde78722a1d7b127a127 *man/wald.test.Rd f3cda6fd03c8ce405c9a2f102107d737 *src/Gprop-odds-subdist.c d50881ace9146d86b5f4850260d93fcb *src/Gprop-odds.c 86d6c8302231e97079879409bcbda0a9 *src/Makevars 51b5faa1e9abb86cf25443475cd50fd3 *src/aalen-test.c 97ad208c0cafb2aaefaa7bea1c9bdadf *src/aalen.c a8c0e78b6dc70c8875372fb70d844c4c *src/aalenC.c 37c8d7f302b0075f8746e09a1c10d6ec *src/additive-compSs.c 17c9e87f22f9ae95a56aa71607ce9655 *src/additive-pls-new.c d17d07200a41e92f18f235a1c310777d *src/breslow.c bd2d38ffbad02032fddff08217bbf716 *src/comprisk.c d08f12f2415fe3b166f139e6a62fac19 *src/comptest-cmprsk.c 52d62edf54d5d36bd0f290de5fb93cbd *src/comptest.c bce6cdbdb066fb69fd5d96c7223ef5f3 *src/cox-aalen-lwy-resamp.c fbd20d29f0817171d9728c7ee17bbcb4 *src/cox-aalen-stratum.c 1a29c165406f5b7b7a179b62de4f97c6 *src/cox-aalen.c 97b6ef92a808f650e80c22765d0c2316 *src/dynadd.c ca29fa4b05738dd326779eabf52c4e3f *src/ipcw-residualmean.c 404d988896fe5ad38ada48b97bcd9b73 *src/kim-kim.c 060b10e016c41a01cd34eb9e6b3a03d7 *src/matrix.c bdb934edfe1b5d23b0d5d5fbbef1219e *src/matrix.h 0f5f7f48963cb993ac00376bee16345c *src/mgresid.c 49bbf5d16d6ae2d81a62597862480bbd *src/pava.c 41510506bccf3684a5682a69b35c9315 *src/pe-sasieni.c e27326ea9526523bfae7218f471300c5 *src/pred.c 33e63e34d273154c6a5fa21c1b8d616a *src/prop-excess.c 746a44a951c61b50ddfd9602bb6ffc7d *src/prop-odds-subdist2.c e48f531f59fb1b2cc299f01fe0fd100f *src/prop-odds.c e7c409384f665cf2947b6b2bfe4df73f *src/smooth.c 32efbb4239c8e57fd02c4061a2158110 *src/smooth2.c afa0afbcae2f59d9b1b54622759c3ed5 *src/timecox.c 0994bb687b4cee0b84da289ed6218c5a *src/timeregister.c c2d415824e8e1ec488de7a0ca2de5210 *src/two-stage-reg.c 2c9c6cdb651a96cf6aa10733e9b55a84 *src/unifConfBandResampling.c timereg/DESCRIPTION0000644000175000017500000000201314131552772013551 0ustar nileshnileshPackage: timereg Title: Flexible Regression Models for Survival Data Version: 2.0.1 Date: 2021-10-06 Author: Thomas Scheike with contributions from Torben Martinussen, Jeremy Silver and Klaus Holst Maintainer: Thomas Scheike Description: Programs for Martinussen and Scheike (2006), `Dynamic Regression Models for Survival Data', Springer Verlag. Plus more recent developments. Additive survival model, semiparametric proportional odds model, fast cumulative residuals, excess risk models and more. Flexible competing risks regression including GOF-tests. Two-stage frailty modelling. PLS for the additive risk model. Lasso in the 'ahaz' package. LazyLoad: yes URL: https://github.com/scheike/timereg Depends: R (>= 2.15), survival Imports: lava, numDeriv, stats, graphics, grDevices, utils, methods Suggests: mets, License: GPL (>= 2) Encoding: UTF-8 RoxygenNote: 7.1.1 NeedsCompilation: yes Packaged: 2021-10-06 11:58:47 UTC; tom Repository: CRAN Date/Publication: 2021-10-13 12:50:02 UTC timereg/man/0000755000175000017500000000000014127307567012627 5ustar nileshnileshtimereg/man/prep.comp.risk.Rd0000644000175000017500000000741314077524411015766 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/comprisk.r \name{prep.comp.risk} \alias{prep.comp.risk} \title{Set up weights for delayed-entry competing risks data for comp.risk function} \usage{ prep.comp.risk( data, times = NULL, entrytime = NULL, time = "time", cause = "cause", cname = "cweight", tname = "tweight", strata = NULL, nocens.out = TRUE, cens.formula = NULL, cens.code = 0, prec.factor = 100, trunc.mintau = FALSE ) } \arguments{ \item{data}{data frame for comp.risk.} \item{times}{times for estimating equations.} \item{entrytime}{name of delayed entry variable, if not given computes right-censoring case.} \item{time}{name of survival time variable.} \item{cause}{name of cause indicator} \item{cname}{name of censoring weight.} \item{tname}{name of truncation weight.} \item{strata}{strata variable to obtain stratified weights.} \item{nocens.out}{returns only uncensored part of data-frame} \item{cens.formula}{censoring model formula for Cox models for the truncation and censoring model.} \item{cens.code}{code for censoring among causes.} \item{prec.factor}{precision factor, for ties between censoring/even times, truncation times/event times} \item{trunc.mintau}{specicies wether the truncation distribution is evaluated in death times or death times minimum max(times), FALSE makes the estimator equivalent to Kaplan-Meier (in the no covariate case).} } \value{ Returns an object. With the following arguments: \item{dataw}{a data.frame with weights.} The function wants to make two new variables "weights" and "cw" so if these already are in the data frame it tries to add an "_" in the names. } \description{ Computes the weights of Geskus (2011) modified to the setting of the comp.risk function. The returned weights are \eqn{1/(H(T_i)*G_c(min(T_i,tau)))} and tau is the max of the times argument, here \eqn{H} is the estimator of the truncation distribution and \eqn{G_c} is the right censoring distribution. } \examples{ data(bmt) nn <- nrow(bmt) entrytime <- rbinom(nn,1,0.5)*(bmt$time*runif(nn)) bmt$entrytime <- entrytime times <- seq(5,70,by=1) ### adds weights to uncensored observations bmtw <- prep.comp.risk(bmt,times=times,time="time", entrytime="entrytime",cause="cause") ######################################### ### nonparametric estimates ######################################### ## {{{ ### nonparametric estimates, right-censoring only out <- comp.risk(Event(time,cause)~+1,data=bmt, cause=1,model="rcif2", times=c(5,30,70),n.sim=0) out$cum ### same as ###out <- prodlim(Hist(time,cause)~+1,data=bmt) ###summary(out,cause="1",times=c(5,30,70)) ### with truncation out <- comp.risk(Event(time,cause)~+1,data=bmtw,cause=1, model="rcif2", cens.weight=bmtw$cw,weights=bmtw$weights,times=c(5,30,70), n.sim=0) out$cum ### same as ###out <- prodlim(Hist(entry=entrytime,time,cause)~+1,data=bmt) ###summary(out,cause="1",times=c(5,30,70)) ## }}} ######################################### ### Regression ######################################### ## {{{ ### with truncation correction out <- comp.risk(Event(time,cause)~const(tcell)+const(platelet),data=bmtw, cause=1,cens.weight=bmtw$cw, weights=bmtw$weights,times=times,n.sim=0) summary(out) ### with only righ-censoring, standard call outn <- comp.risk(Event(time,cause)~const(tcell)+const(platelet),data=bmt, cause=1,times=times,n.sim=0) summary(outn) ## }}} } \references{ Geskus (2011), Cause-Specific Cumulative Incidence Estimation and the Fine and Gray Model Under Both Left Truncation and Right Censoring, Biometrics (2011), pp 39-49. Shen (2011), Proportional subdistribution hazards regression for left-truncated competing risks data, Journal of Nonparametric Statistics (2011), 23, 885-895 } \author{ Thomas Scheike } \keyword{survival} timereg/man/prop.odds.Rd0000644000175000017500000001311314077524412015017 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prop-odds.r \name{prop.odds} \alias{prop.odds} \title{Fit Semiparametric Proportional 0dds Model} \usage{ prop.odds( formula, data = parent.frame(), beta = NULL, Nit = 20, detail = 0, start.time = 0, max.time = NULL, id = NULL, n.sim = 500, weighted.test = 0, profile = 1, sym = 0, baselinevar = 1, clusters = NULL, max.clust = 1000, weights = NULL ) } \arguments{ \item{formula}{a formula object, with the response on the left of a '~' operator, and the terms on the right. The response must be a Event object as returned by the `Event' function.} \item{data}{a data.frame with the variables.} \item{beta}{starting value for relative risk estimates} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{detail}{if 0 no details is printed during iterations, if 1 details are given.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. This is very useful to obtain stable estimates, especially for the baseline. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{n.sim}{number of simulations in resampling.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{profile}{if profile is 1 then modified partial likelihood is used, profile=0 fits by simple estimating equation. The modified partial likelihood is recommended.} \item{sym}{to use symmetrized second derivative in the case of the estimating equation approach (profile=0). This may improve the numerical performance.} \item{baselinevar}{set to 0 to omit calculations of baseline variance.} \item{clusters}{to compute cluster based standard errors.} \item{max.clust}{number of maximum clusters to be used, to save time in iid decomposition.} \item{weights}{weights for score equations.} } \value{ returns an object of type 'cox.aalen'. With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{the martingale based pointwise variance estimates. } \item{robvar.cum}{robust pointwise variances estimates. } \item{gamma}{estimate of proportional odds parameters of model.} \item{var.gamma}{variance for gamma. } \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals. Estimated martingale increments (dM) and corresponding time vector (time).} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time.} \item{loglike}{modified partial likelihood, pseudo profile likelihood for regression parameters.} \item{D2linv}{inverse of the derivative of the score function.} \item{score}{value of score for final estimates.} \item{test.procProp}{observed score process for proportional odds regression effects.} \item{pval.Prop}{p-value based on resampling.} \item{sim.supProp}{re-sampled supremum values.} \item{sim.test.procProp}{list of 50 random realizations of test-processes for constant proportional odds under the model based on resampling.} } \description{ Fits a semiparametric proportional odds model: \deqn{ logit(1-S_Z(t)) = log(G(t)) + \beta^T Z } where G(t) is increasing but otherwise unspecified. Model is fitted by maximising the modified partial likelihood. A goodness-of-fit test by considering the score functions is also computed by resampling methods. } \details{ The modelling formula uses the standard survival modelling given in the \bold{survival} package. For large data sets use the divide.conquer.timereg of the mets package to run the model on splits of the data, or the alternative estimator by the cox.aalen function. The data for a subject is presented as multiple rows or "observations", each of which applies to an interval of observation (start, stop]. The program essentially assumes no ties, and if such are present a little random noise is added to break the ties. } \examples{ data(sTRACE) # Fits Proportional odds model out<-prop.odds(Event(time,status==9)~age+diabetes+chf+vf+sex, sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out,sim.ci=2) plot(out,score=1) pout <- predict(out,Z=c(70,0,0,0,0)) plot(pout) ### alternative estimator for large data sets form <- Surv(time,status==9)~age+diabetes+chf+vf+sex pform <- timereg.formula(form) out2<-cox.aalen(pform,data=sTRACE,max.time=7, propodds=1,n.sim=0,robust=0,detail=0,Nit=40) summary(out2) } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/melanoma.Rd0000644000175000017500000000224514077524411014703 0ustar nileshnilesh\name{melanoma} \alias{melanoma} \non_function{} \title{The Melanoma Survival Data} \description{ The melanoma data frame has 205 rows and 7 columns. It contains data relating to survival of patients after operation for malignant melanoma collected at Odense University Hospital by K.T. Drzewiecki. } \format{ This data frame contains the following columns: \describe{ \item{no}{ a numeric vector. Patient code. } \item{status}{ a numeric vector code. Survival status. 1: dead from melanoma, 2: alive, 3: dead from other cause. } \item{days}{ a numeric vector. Survival time. } \item{ulc}{ a numeric vector code. Ulceration, 1: present, 0: absent. } \item{thick}{ a numeric vector. Tumour thickness (1/100 mm). } \item{sex}{ a numeric vector code. 0: female, 1: male. } } } \source{ Andersen, P.K., Borgan O, Gill R.D., Keiding N. (1993), \emph{Statistical Models Based on Counting Processes}, Springer-Verlag. Drzewiecki, K.T., Ladefoged, C., and Christensen, H.E. (1980), Biopsy and prognosis for cutaneous malignant melanoma in clinical stage I. Scand. J. Plast. Reconstru. Surg. 14, 141-144. } \examples{ data(melanoma) names(melanoma) } \keyword{datasets} timereg/man/cum.residuals.Rd0000644000175000017500000000620514077524412015671 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mgresid.r \name{cum.residuals} \alias{cum.residuals} \title{Model validation based on cumulative residuals} \usage{ cum.residuals( object, data = parent.frame(), modelmatrix = 0, cum.resid = 1, n.sim = 500, weighted.test = 0, max.point.func = 50, weights = NULL ) } \arguments{ \item{object}{an object of class 'aalen', 'timecox', 'cox.aalen' where the residuals are returned ('residuals=1')} \item{data}{data frame based on which residuals are computed.} \item{modelmatrix}{specifies a grouping of the data that is used for cumulating residuals. Must have same size as data and be ordered in the same way.} \item{cum.resid}{to compute residuals versus each of the continuous covariates in the model.} \item{n.sim}{number of simulations in resampling.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing constant effects of covariates.} \item{max.point.func}{limits the amount of computations, only considers a max of 50 points on the covariate scales.} \item{weights}{weights for sum of martingale residuals, now for cum.resid=1.} } \value{ returns an object of type "cum.residuals" with the following arguments: \item{cum}{cumulative residuals versus time for the groups specified by modelmatrix. } \item{var.cum}{the martingale based pointwise variance estimates.} \item{robvar.cum}{robust pointwise variances estimates of cumulatives.} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum value.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands for cumulative residuals.} \item{obs.test}{absolute value of supremum of observed test-process.} \item{pval.test}{p-value for supremum test statistic.} \item{sim.test}{resampled absolute value of supremum cumulative residuals.} \item{proc.cumz}{observed cumulative residuals versus all continuous covariates of model.} \item{sim.test.proccumz}{list of 50 random realizations of test-processes under model for all continuous covariates.} } \description{ Computes cumulative residuals and approximative p-values based on resampling techniques. } \examples{ data(sTRACE) # Fits Aalen model and returns residuals fit<-aalen(Surv(time,status==9)~age+sex+diabetes+chf+vf, data=sTRACE,max.time=7,n.sim=0,residuals=1) # constructs and simulates cumulative residuals versus age groups fit.mg<-cum.residuals(fit,data=sTRACE,n.sim=100, modelmatrix=model.matrix(~-1+factor(cut(age,4)),sTRACE)) par(mfrow=c(1,4)) # cumulative residuals with confidence intervals plot(fit.mg); # cumulative residuals versus processes under model plot(fit.mg,score=1); summary(fit.mg) # cumulative residuals vs. covariates Lin, Wei, Ying style fit.mg<-cum.residuals(fit,data=sTRACE,cum.resid=1,n.sim=100) par(mfrow=c(2,4)) plot(fit.mg,score=2) summary(fit.mg) } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/predict.timereg.Rd0000644000175000017500000001207214077524411016176 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict-timereg.r \name{predict.timereg} \alias{predict.timereg} \alias{predict.aalen} \alias{predict.comprisk} \alias{predict.cox.aalen} \title{Predictions for Survival and Competings Risks Regression for timereg} \usage{ \method{predict}{timereg}( object, newdata = NULL, X = NULL, times = NULL, Z = NULL, n.sim = 500, uniform = TRUE, se = TRUE, alpha = 0.05, resample.iid = 0, ... ) } \arguments{ \item{object}{an object belonging to one of the following classes: comprisk, aalen or cox.aalen} \item{newdata}{specifies the data at which the predictions are wanted.} \item{X}{alternative to newdata, specifies the nonparametric components for predictions.} \item{times}{times in which predictions are computed, default is all time-points for baseline} \item{Z}{alternative to newdata, specifies the parametric components of the model for predictions.} \item{n.sim}{number of simulations in resampling.} \item{uniform}{computes resampling based uniform confidence bands.} \item{se}{computes pointwise standard errors} \item{alpha}{specificies the significance levelwhich cause we consider.} \item{resample.iid}{set to 1 to return iid decomposition of estimates, 3-dim matrix (predictions x times x subjects)} \item{...}{unused arguments - for S3 compatability} } \value{ \item{time}{vector of time points where the predictions are computed.} \item{unif.band}{resampling based constant to construct 95\% uniform confidence bands.} \item{model}{specifies what model that was fitted.} \item{alpha}{specifies the significance level for the confidence intervals. This relates directly to the constant given in unif.band.} \item{newdata}{specifies the newdata given in the call.} \item{RR}{gives relative risk terms for Cox-type models.} \item{call}{gives call for predict funtion.} \item{initial.call}{gives call for underlying object used for predictions.} \item{P1}{gives cumulative inicidence predictions for competing risks models. Predictions given in matrix form with different subjects in different rows.} \item{S0}{gives survival predictions for survival models. Predictions given in matrix form with different subjects in different rows.} \item{se.P1}{pointwise standard errors for predictions of P1.} \item{se.S0}{pointwise standard errors for predictions of S0.} } \description{ Make predictions based on the survival models (Aalen and Cox-Aalen) and the competing risks models for the cumulative incidence function (comp.risk). Computes confidence intervals and confidence bands based on resampling. } \examples{ data(bmt); ## competing risks add<-comp.risk(Event(time,cause)~platelet+age+tcell,data=bmt,cause=1) ndata<-data.frame(platelet=c(1,0,0),age=c(0,1,0),tcell=c(0,0,1)) out<-predict(add,newdata=ndata,uniform=1,n.sim=1000) par(mfrow=c(2,2)) plot(out,multiple=0,uniform=1,col=1:3,lty=1,se=1) # see comp.risk for further examples. add<-comp.risk(Event(time,cause)~factor(tcell),data=bmt,cause=1) summary(add) out<-predict(add,newdata=ndata,uniform=1,n.sim=1000) plot(out,multiple=1,uniform=1,col=1:3,lty=1,se=1) add<-prop.odds.subdist(Event(time,cause)~factor(tcell), data=bmt,cause=1) out <- predict(add,X=1,Z=1) plot(out,multiple=1,uniform=1,col=1:3,lty=1,se=1) ## SURVIVAL predictions aalen function data(sTRACE) out<-aalen(Surv(time,status==9)~sex+ diabetes+chf+vf, data=sTRACE,max.time=7,n.sim=0,resample.iid=1) pout<-predict(out,X=rbind(c(1,0,0,0,0),rep(1,5))) head(pout$S0[,1:5]); head(pout$se.S0[,1:5]) par(mfrow=c(2,2)) plot(pout,multiple=1,se=0,uniform=0,col=1:2,lty=1:2) plot(pout,multiple=0,se=1,uniform=1,col=1:2) out<-aalen(Surv(time,status==9)~const(age)+const(sex)+ const(diabetes)+chf+vf, data=sTRACE,max.time=7,n.sim=0,resample.iid=1) pout<-predict(out,X=rbind(c(1,0,0),c(1,1,0)), Z=rbind(c(55,0,1),c(60,1,1))) head(pout$S0[,1:5]); head(pout$se.S0[,1:5]) par(mfrow=c(2,2)) plot(pout,multiple=1,se=0,uniform=0,col=1:2,lty=1:2) plot(pout,multiple=0,se=1,uniform=1,col=1:2) pout<-predict(out,uniform=0,se=0,newdata=sTRACE[1:10,]) plot(pout,multiple=1,se=0,uniform=0) #### cox.aalen out<-cox.aalen(Surv(time,status==9)~prop(age)+prop(sex)+ prop(diabetes)+chf+vf, data=sTRACE,max.time=7,n.sim=0,resample.iid=1) pout<-predict(out,X=rbind(c(1,0,0),c(1,1,0)),Z=rbind(c(55,0,1),c(60,1,1))) head(pout$S0[,1:5]); head(pout$se.S0[,1:5]) par(mfrow=c(2,2)) plot(pout,multiple=1,se=0,uniform=0,col=1:2,lty=1:2) plot(pout,multiple=0,se=1,uniform=1,col=1:2) pout<-predict(out,uniform=0,se=0,newdata=sTRACE[1:10,]) plot(pout,multiple=1,se=0,uniform=0) #### prop.odds model add<-prop.odds(Event(time,cause!=0)~factor(tcell),data=bmt) out <- predict(add,X=1,Z=0) plot(out,multiple=1,uniform=1,col=1:3,lty=1,se=1) } \references{ Scheike, Zhang and Gerds (2008), Predicting cumulative incidence probability by direct binomial regression, Biometrika, 95, 205-220. Scheike and Zhang (2007), Flexible competing risks regression modelling and goodness of fit, LIDA, 14, 464-483 . Martinussen and Scheike (2006), Dynamic regression models for survival data, Springer. } \author{ Thomas Scheike, Jeremy Silver } \keyword{survival} timereg/man/sim.cif.Rd0000644000175000017500000001242314077524412014442 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{sim.cif} \alias{sim.cif} \alias{sim.cifs} \alias{subdist} \alias{pre.cifs} \alias{sim.cifsRestrict} \alias{setup.cif} \title{Simulation of output from Cumulative incidence regression model} \usage{ \method{sim}{cif}( cif, n, data = NULL, Z = NULL, drawZ = TRUE, cens = NULL, rrc = NULL, cumstart = c(0, 0), ... ) } \arguments{ \item{cif}{output form prop.odds.subdist or ccr (cmprsk), can also call invsubdist with with cumulative and linear predictor} \item{n}{number of simulations.} \item{data}{to extract covariates for simulations (draws from observed covariates).} \item{Z}{to use these covariates for simulation rather than drawing new ones.} \item{drawZ}{to random sample from Z or not} \item{cens}{specifies censoring model, if "is.matrix" then uses cumulative hazard given, if "is.scalar" then uses rate for exponential, and if not given then takes average rate of in simulated data from cox model.} \item{rrc}{possible vector of relative risk for cox-type censoring.} \item{cumstart}{to start cumulatives at time 0 in 0.} \item{...}{arguments for invsubdist} } \description{ Simulates data that looks like fit from fitted cumulative incidence model } \examples{ data(TRACE) ## Logit link for proportional odds model, using comp.risk to save time #' cif <- prop.odds.subdist(Event(time,status)~vf+chf+wmi,data=TRACE,cause=9) cif <- comp.risk(Event(time,status)~const(vf)+const(chf)+const(wmi), data=TRACE,cause=9,model="logistic2") sim1 <- sim.cif(cif,500,data=TRACE) #' cc <- prop.odds.subdist(Event(time,status)~vf+chf+wmi,data=sim1,cause=1) cc <- comp.risk(Event(time,status)~const(vf)+const(chf)+const(wmi), data=sim1,cause=1,model="logistic2") cbind(cif$gamma,cc$gamma) plot(cif) lines(cc$cum) ################################################################# ## Fine-Gray model model, using comp.risk to avoid dependcies ################################################################# cif <- comp.risk(Event(time,status)~const(vf)+const(chf)+const(wmi), data=TRACE,cause=9) sim1 <- sim.cif(cif,500,data=TRACE) #' cc <- crr cc <- comp.risk(Event(time,status)~const(vf)+const(chf)+const(wmi), data=sim1,cause=1) cbind(cif$gamma,cc$gamma) plot(cif) lines(cc$cum) # faster/better with mets package # library(mets) # scif <- cifreg(Event(time,status)~vf+chf+wmi,data=sim1,cause=1,prop=NULL) # # plot(scif$cum,type="l") # lines(cif$cum,col=2) # cbind(cif$gamma,scif$coef) # ################################################################ # simulating several causes with specific cumulatives ################################################################ data(bmt) cif1 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=bmt,cause=1,model="logistic2") cif2 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=bmt,cause=2,model="logistic2") ## must look at same time-scale cifs <- pre.cifs(list(cif1,cif2)) plot(cifs[[1]]$cum,type="l") lines(cifs[[2]]$cum,col=2) legend("topleft",c("cause1","cause2"),lty=1,col=1:2) n <- 500 sim1 <- sim.cif(cifs[[1]],n,data=bmt) Z <- sim1[,c("tcell","age")] sim2 <- sim.cif(cifs[[2]],n,data=bmt,Z=Z,drawZ=FALSE) ### rt <- rbinom(n,1,(sim1$F1tau+sim2$F1tau)) rb <- rbinom(n,1,sim1$F1tau/(sim1$F1tau+sim2$F1tau)) cause=ifelse(rb==1,1,2) time=ifelse(cause==1,sim1$timecause,sim2$timecause) cause <- rt*cause time[cause==0] <- tail(cifs[[1]]$cum[,1],1) bt <- data.frame(time=time,cause=cause,tcell=sim1$tcell,age=sim1$age) scif1 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=bt,cause=1,model="logistic2") scif2 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=bt,cause=2,model="logistic2") plot(scif1$cum,type="l") lines(scif2$cum,col=1,lty=2) legend("topleft",c("cause1","cause2"),lty=1:2,col=1:1) lines(cifs[[1]]$cum,col=2) lines(cifs[[2]]$cum,col=2,lty=2) # Everyhing wrapped in call assuming covariates work in the same way for two models dd <- sim.cifs(list(cif1,cif2),2000,data=bmt) scif1 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=dd,cause=1,model="logistic2") scif2 <- comp.risk(Event(time,cause)~const(tcell)+const(age), data=dd,cause=2,model="logistic2") plot(scif1$cum,type="l") lines(scif2$cum,col=1,lty=2) legend("topleft",c("cause1","cause2"),lty=1:2,col=1:1) lines(cifs[[1]]$cum,col=2) lines(cifs[[2]]$cum,col=2,lty=2) # Everyhing wrapped in call assuming covariates work in the same way for two models # but now draws cif1 to be of correct model, but model 2 is adapted #(if needed) to make constraints satisfied F1+F2 <=1 # see doubleFG of mets package for paramtrization # and drawns as "if not cause1" then distribute according to cause 2 # dd <- sim.cifsRestrict(list(cif1,cif2),2000,data=bmt) # faster with mets package # dd <- sim.cifs(list(cif1,cif2),1000,data=bmt) # scif1 <- cifreg(Event(time,cause)~tcell+age,data=dd,cause=1) # scif2 <- cifreg(Event(time,cause)~tcell+age,data=dd,cause=2) # # plot(scif1$cum,type="l") # legend("topleft",c("cause1","cause2"),lty=1:2,col=1:1) # lines(cifs[[1]]$cum,col=2) # lines(cifs[[2]]$cum,col=2,lty=2) # } \author{ Thomas Scheike } \keyword{survival} timereg/man/pava.pred.Rd0000644000175000017500000000172514077524411014774 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict-timereg.r \name{pava.pred} \alias{pava.pred} \title{Make predictions of predict functions in rows mononotone} \usage{ pava.pred(pred, increasing = TRUE) } \arguments{ \item{pred}{predictions, either vector or rows of predictions.} \item{increasing}{increasing or decreasing.} } \value{ mononotone predictions. } \description{ Make predictions of predict functions in rows mononotone using the pool-adjacent-violators-algorithm } \examples{ data(bmt); ## competing risks add<-comp.risk(Event(time,cause)~platelet+age+tcell,data=bmt,cause=1) ndata<-data.frame(platelet=c(1,0,0),age=c(0,1,0),tcell=c(0,0,1)) out<-predict(add,newdata=ndata,uniform=0) par(mfrow=c(1,1)) head(out$P1) matplot(out$time,t(out$P1),type="s") ###P1m <- t(apply(out$P1,1,pava)) P1monotone <- pava.pred(out$P1) head(P1monotone) matlines(out$time,t(P1monotone),type="s") } \author{ Thomas Scheike } \keyword{survival} timereg/man/simsubdist.Rd0000644000175000017500000000531014077524412015275 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{simsubdist} \alias{simsubdist} \title{Simulation from subdistribution function assuming piecwise linearity} \usage{ simsubdist( cumhazard, rr, n = NULL, entry = NULL, type = "cloglog", startcum = c(0, 0), ... ) } \arguments{ \item{cumhazard}{matrix that specified times and values of some cumulative hazard.} \item{rr}{"relative risk" terms} \item{n}{number of simulation if rr not given} \item{entry}{not implemented yet} \item{type}{either cloglog or logistic} \item{startcum}{c(0,0) to make cumulativ start in 0 with 0 cumhazard.} \item{...}{further arguments} } \description{ Simulation from subdistribution function assuming piecwise linearity for Fine-Gray or logistic link. } \examples{ data(sTRACE) cif <- comp.risk(Event(time,status)~const(vf),data=sTRACE,cause=9,model="logistic2") cumhaz <- cif$cum ## 1000 logistic without covariates with baseline from model fit sim1 <- simsubdist(cumhaz,n=1000,type="logistic") ### cifs <- comp.risk(Event(time,status)~+1,data=sim1,cause=1,model="logistic2") ### plot(cifs) lines(cifs$cum,col=2) ## 1000 logistic with covariates with baseline from model fit x <- rbinom(1000,1,0.5) rr <- exp(x*0.3) sim1 <- simsubdist(cumhaz,rr,type="logistic") sim1$x <- x cifs <- comp.risk(Event(time,status)~+const(x),data=sim1,cause=1,model="logistic2") ### cifs$gamma plot(cifs) lines(cumhaz,col=2) ################################################################## ### simulation of cumulative incidence with specified functions ################################################################## F1logit<-function(t,lam0=0.2,beta=0.3,x=0) { pt <- t*lam0; rr <- exp(x*beta); return(pt*rr/(1+pt*rr)); } F1p<-function(t,lam0=0.4,beta=0.3,x=0) # proportional version { return( 1 - exp(-(t*lam0)*exp(x*beta))) } n=1000 tt=seq(0,3,by=.01) tt=seq(0,3,by=.01) t1 <- invsubdist(cbind(tt,F1p(tt)),runif(n)) t2 <- invsubdist(cbind(tt,F1p(tt,lam0=0.1)),runif(n)) rt <- rbinom(n,1,(F1p(3)+F1p(3,lam0=0.1))) rb <- rbinom(n,1,F1p(3)/(F1p(3)+F1p(3,lam0=0.1))) cause=ifelse(rb==1,1,2) time=ifelse(cause==1,t1$time,t2$time) cause <- rt*cause time[cause==0] <- 3 datC=data.frame(time=time,cause=cause) p1=comp.risk(Event(time,cause)~+1,data=datC,cause=1) p2=comp.risk(Event(time,cause)~+1,data=datC,cause=2) pp1=predict(p1,X=1,se=0) pp2=predict(p2,X=1,se=0) par(mfrow=c(1,2)) plot(pp1) lines(tt,F1p(tt),col=2) plot(pp2) lines(tt,F1p(tt,lam0=0.1),col=2) #to avoid dependencies when checking #library(prodlim) #pp=prodlim(Hist(time,cause)~+1) #par(mfrow=c(1,2)) #plot(pp,cause="1") #lines(tt,F1p(tt),col=2) #plot(pp,cause="2") #lines(tt,F1p(tt,lam0=0.1),col=2) } \author{ Thomas Scheike } \keyword{survival} timereg/man/cox.Rd0000644000175000017500000000054614077524411013705 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.prop-excess.r \name{cox} \alias{cox} \title{Identifies proportional excess terms of model} \usage{ cox(x) } \arguments{ \item{x}{variable} } \description{ Specifies which of the regressors that lead to proportional excess hazard } \author{ Thomas Scheike } \keyword{survival} timereg/man/plot.aalen.Rd0000644000175000017500000000526714077524411015156 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.aalen.r \name{plot.aalen} \alias{plot.aalen} \alias{plot.cox.aalen} \alias{plot.timecox} \alias{plot.prop.excess} \title{Plots estimates and test-processes} \usage{ \method{plot}{aalen}( x, pointwise.ci = 1, hw.ci = 0, sim.ci = 0, robust.ci = 0, col = NULL, specific.comps = FALSE, level = 0.05, start.time = 0, stop.time = 0, add.to.plot = FALSE, mains = TRUE, xlab = "Time", ylab = "Cumulative coefficients", score = FALSE, ... ) } \arguments{ \item{x}{the output from the "aalen" function.} \item{pointwise.ci}{if >1 pointwise confidence intervals are plotted with lty=pointwise.ci} \item{hw.ci}{if >1 Hall-Wellner confidence bands are plotted with lty=hw.ci. Only 0.95 \% bands can be constructed.} \item{sim.ci}{if >1 simulation based confidence bands are plotted with lty=sim.ci. These confidence bands are robust to non-martingale behaviour.} \item{robust.ci}{robust standard errors are used to estimate standard error of estimate, otherwise martingale based standard errors are used.} \item{col}{specifice colors of different components of plot, in order: c(estimate,pointwise.ci,robust.ci,hw.ci,sim.ci) so for example, when we ask to get pointwise.ci, hw.ci and sim.ci we would say c(1,2,3,4) to use colors as specified.} \item{specific.comps}{all components of the model is plotted by default, but a list of components may be specified, for example first and third "c(1,3)".} \item{level}{gives the significance level.} \item{start.time}{start of observation period where estimates are plotted.} \item{stop.time}{end of period where estimates are plotted. Estimates thus plotted from [start.time, max.time].} \item{add.to.plot}{to add to an already existing plot.} \item{mains}{add names of covariates as titles to plots.} \item{xlab}{label for x-axis.} \item{ylab}{label for y-axis.} \item{score}{to plot test processes for test of time-varying effects along with 50 random realization under the null-hypothesis.} \item{...}{unused arguments - for S3 compatibility} } \description{ This function plots the non-parametric cumulative estimates for the additive risk model or the test-processes for the hypothesis of time-varying effects with re-sampled processes under the null. } \examples{ # see help(aalen) data(sTRACE) out<-aalen(Surv(time,status==9)~chf+vf,sTRACE,max.time=7,n.sim=100) par(mfrow=c(2,2)) plot(out,pointwise.ci=1,hw.ci=1,sim.ci=1,col=c(1,2,3,4)) par(mfrow=c(2,2)) plot(out,pointwise.ci=0,robust.ci=1,hw.ci=1,sim.ci=1,col=c(1,2,3,4)) } \references{ Martinussen and Scheike, Dynamic Regression models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/invsubdist.Rd0000644000175000017500000000667414077524411015316 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{invsubdist} \alias{invsubdist} \title{Finds inverse of piecwise linear sub-distribution} \usage{ invsubdist(F1, u, entry = NULL, cond = 1, ptrunc = NULL) } \arguments{ \item{F1}{matrix with x, and F1(x)} \item{u}{points for which to compute inverse} \item{entry}{possible delayed entry points} \item{cond}{1 indcates that we draw given that this subdistribution is used, so draws from F1(t)/F1(tau)} \item{ptrunc}{possible trunction weigth for delayed entry, if NULL then uses ptrunc=1-F1(entry)} } \description{ Finds inverse of piecwise linear sub-distribution to be used for simulation of subdistributions } \examples{ F1 <- cbind(c(0,5,8,10),c(0,0.1,0.3,0.9)) plot(F1,type="l") u <- runif(100) Fiu <- invsubdist(F1,u,cond=0) points(Fiu$time,u,pch="x") F1cond <- F1 F1cond[,2] <- F1cond[,2]/0.9 plot(F1cond,type="l") u <- runif(100) Ficond <- invsubdist(F1cond,u,cond=0) points(Ficond$time,u,pch="-") Fiu <- invsubdist(F1,u,cond=1) points(Fiu$time,u,pch="x") ## simulation of distribution with delayed entry starting at 3 par(mfrow=c(1,1)) F1 <- cbind(c(0,5,8,10),c(0,0.5,0.6,0.9)) F1 plot(F1,ylim=c(0,1),type="l") n <- 100000 entry <- c(rep(3,10000),runif(n)*7+3) ###entry <- rep(3,n) u <- runif(n+10000) ### Fiu <- invsubdist(F1,u,cond=0,entry=entry) ### # library(prodlim) # pp <- prodlim(Hist(time,status,entry=entry)~+1,data=Fiu) # plot(pp,xlim=c(3,10)) ### entry <- 3 ### F1entry <- subdist(F1,entry)[,2] ptrunc <- 1-F1entry ### F1entry5 <- F1 F1entry5[,1] <- F1entry5[,1]-entry F1entry5[,2] <- (F1entry5[,2]-F1entry)/ptrunc pos <- F1entry5[,1]>=0 F1entry5 <- rbind(c(0,0),F1entry5[pos,]) # # lines(entry+F1entry5[,1],1-F1entry5[,2],col=2) ############################################################## ## Simulations of two cumulative incidence functions with truncation ############################################################## par(mfrow=c(1,1)) F1 <- cbind(c(0,5,8,10),c(0,0.5,0.6,0.9)*0.3) F2 <- cbind(c(0,5,8,10),c(0,0.5,0.6,0.9)*0.5) plot(F1,ylim=c(0,1),type="l") lines(F2,col=2) entry1 <- 3 ### F1entry <- subdist(F1,entry1)[,2] F2entry <- subdist(F2,entry1)[,2] ptrunc <- 1-F1entry-F2entry ### F1e <- F1 F1e[,1] <- F1e[,1]-entry1 F1e[,2] <- (F1e[,2]-F1entry)/ptrunc pos <- F1e[,1]>=0 F1e <- rbind(c(0,0),F1e[pos,]) F2e <- F2 F2e[,1] <- F2e[,1]-entry1 F2e[,2] <- (F2e[,2]-F2entry)/ptrunc pos <- F2e[,1]>=0 F2e <- rbind(c(0,0),F2e[pos,]) # # truncated identifiable version lines(entry1+F1e[,1],F1e[,2],col=1) lines(entry1+F2e[,1],F2e[,2],col=2) n <- 10000 entry <- c(rep(entry1,10000),runif(n)*(10-entry1)+entry1) u <- runif(n+10000) ### F1entry <- subdist(F1,entry)[,2] F2entry <- subdist(F2,entry)[,2] ptrunc <- 1-( F1entry+F2entry) Fiu1 <- invsubdist(F1,u,cond=1,entry=entry,ptrunc=ptrunc) Fiu2 <- invsubdist(F1,u,cond=1,entry=entry,ptrunc=ptrunc) ### ptot <- (tail(F1[,2],1)+tail(F2[,2],1)-F1entry-F2entry)/(ptrunc) rt <- rbinom(n+10000,1,ptot) p1 <- ((tail(F1[,2],1)-F1entry)/ptrunc) p2 <- ((tail(F2[,2],1)-F2entry)/ptrunc) rb <- rbinom(n+10000,1,p1/ptot) cause=ifelse(rb==1,1,2) time=ifelse(cause==1,Fiu1$time,Fiu2$time) cause <- rt*cause time[cause==0] <- 10 ### simulated data, now checking that things are working # pp <- prodlim(Hist(time,cause,entry=entry)~+1) # plot(pp,xlim=c(entry1,10),cause=1) # plot(pp,xlim=c(entry1,10),cause=2,add=TRUE) ### # lines(entry1+F1e[,1],F1e[,2],col=2) # lines(entry1+F2e[,1],F2e[,2],col=2) } \author{ Thomas Scheike } \keyword{survival} timereg/man/restricted.residual.mean.Rd0000644000175000017500000000510314077524411020004 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/restricted.residual.mean.r \name{restricted.residual.mean} \alias{restricted.residual.mean} \title{Estimates restricted residual mean for Cox or Aalen model} \usage{ restricted.residual.mean(out, x = 0, tau = 10, iid = 0) } \arguments{ \item{out}{an "cox.aalen" with a Cox model or an "aalen" model.} \item{x}{matrix with covariates for Cox model or additive hazards model (aalen).} \item{tau}{restricted residual mean.} \item{iid}{if iid=1 then uses iid decomposition for estimation of standard errors.} } \value{ Returns an object. With the following arguments: \item{mean}{restricted mean for different covariates.} \item{var.mean}{variance matrix.} \item{se}{standard errors.} \item{S0tau}{estimated survival functions on time-range [0,tau].} \item{timetau}{vector of time arguments for S0tau.} } \description{ The restricted means are the \deqn{ \int_0^\tau S(t) dt } the standard errors are computed using the i.i.d. decompositions from the cox.aalen (that must be called with the argument "max.timpoint.sim=NULL") or aalen function. } \details{ must have computed iid decomposition of survival models for standard errors to be computed. Note that competing risks models can be fitted but then the interpretation is not clear. } \examples{ \donttest{ ### this example runs slowly and is therefore donttest data(sTRACE) sTRACE$cage <- scale(sTRACE$age) # Fits Cox model and aalen model out<-cox.aalen(Surv(time,status>=1)~prop(sex)+prop(diabetes)+prop(chf)+ prop(vf),data=sTRACE,max.timepoint.sim=NULL,resample.iid=1) outa<-aalen(Surv(time,status>=1)~sex+diabetes+chf+vf, data=sTRACE,resample.iid=1) coxrm <- restricted.residual.mean(out,tau=7, x=rbind(c(0,0,0,0),c(0,0,1,0),c(0,0,1,1),c(0,0,0,1)),iid=1) plot(coxrm) summary(coxrm) ### aalen model not optimal here aalenrm <- restricted.residual.mean(outa,tau=7, x=rbind(c(1,0,0,0,0),c(1,0,0,1,0),c(1,0,0,1,1),c(1,0,0,0,1)),iid=1) with(aalenrm,matlines(timetau,S0tau,type="s",ylim=c(0,1))) legend("bottomleft",c("baseline","+chf","+chf+vf","+vf"),col=1:4,lty=1) summary(aalenrm) mm <-cbind(coxrm$mean,coxrm$se,aalenrm$mean,aalenrm$se) colnames(mm)<-c("cox-res-mean","se","aalen-res-mean","se") rownames(mm)<-c("baseline","+chf","+chf+vf","+vf") mm } } \references{ D. M. Zucker, Restricted mean life with covariates: Modification and extension of a useful survival analysis method, J. Amer. Statist. Assoc. vol. 93 pp. 702-709, 1998. Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/bmt.Rd0000644000175000017500000000133414077524411013672 0ustar nileshnilesh\name{bmt} \alias{bmt} \non_function{} \title{The Bone Marrow Transplant Data} \description{ Bone marrow transplant data with 408 rows and 5 columns. } \format{ The data has 408 rows and 5 columns. \describe{ \item{cause}{a numeric vector code. Survival status. 1: dead from treatment related causes, 2: relapse , 0: censored.} \item{time}{ a numeric vector. Survival time. } \item{platelet}{a numeric vector code. Plalelet 1: more than 100 x \eqn{10^9} per L, 0: less.} \item{tcell}{a numeric vector. T-cell depleted BMT 1:yes, 0:no.} \item{age}{a numeric vector code. Age of patient, scaled and centered ((age-35)/15).} } } \source{ Simulated data } \references{ NN } \examples{ data(bmt) names(bmt) } \keyword{datasets} timereg/man/Gprop.odds.Rd0000644000175000017500000001237314077524412015135 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Gprop-odds.r \name{Gprop.odds} \alias{Gprop.odds} \title{Fit Generalized Semiparametric Proportional 0dds Model} \usage{ Gprop.odds( formula = formula(data), data = parent.frame(), beta = 0, Nit = 50, detail = 0, start.time = 0, max.time = NULL, id = NULL, n.sim = 500, weighted.test = 0, sym = 0, mle.start = 0 ) } \arguments{ \item{formula}{a formula object, with the response on the left of a '~' operator, and the terms on the right. The response must be a survival object as returned by the `Surv' function.} \item{data}{a data.frame with the variables.} \item{beta}{starting value for relative risk estimates} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{detail}{if 0 no details is printed during iterations, if 1 details are given.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. This is very useful to obtain stable estimates, especially for the baseline. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{n.sim}{number of simulations in resampling.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{sym}{to use symmetrized second derivative in the case of the estimating equation approach (profile=0). This may improve the numerical performance.} \item{mle.start}{starting values for relative risk parameters.} } \value{ returns an object of type 'cox.aalen'. With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{the martingale based pointwise variance estimates. } \item{robvar.cum}{robust pointwise variances estimates. } \item{gamma}{estimate of proportional odds parameters of model.} \item{var.gamma}{variance for gamma. } \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals. Estimated martingale increments (dM) and corresponding time vector (time).} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time.} \item{loglike}{modified partial likelihood, pseudo profile likelihood for regression parameters.} \item{D2linv}{inverse of the derivative of the score function.} \item{score}{value of score for final estimates.} \item{test.procProp}{observed score process for proportional odds regression effects.} \item{pval.Prop}{p-value based on resampling.} \item{sim.supProp}{re-sampled supremum values.} \item{sim.test.procProp}{list of 50 random realizations of test-processes for constant proportional odds under the model based on resampling.} } \description{ Fits a semiparametric proportional odds model: \deqn{ logit(1-S_{X,Z}(t)) = log(X^T A(t)) + \beta^T Z } where A(t) is increasing but otherwise unspecified. Model is fitted by maximising the modified partial likelihood. A goodness-of-fit test by considering the score functions is also computed by resampling methods. } \details{ An alternative way of writing the model : \deqn{ S_{X,Z}(t)) = \frac{ \exp( - \beta^T Z )}{ (X^T A(t)) + \exp( - \beta^T Z) } } such that \eqn{\beta} is the log-odds-ratio of dying before time t, and \eqn{A(t)} is the odds-ratio. The modelling formula uses the standard survival modelling given in the \bold{survival} package. The data for a subject is presented as multiple rows or "observations", each of which applies to an interval of observation (start, stop]. The program essentially assumes no ties, and if such are present a little random noise is added to break the ties. } \examples{ data(sTRACE) \donttest{ ### runs slowly and is therefore donttest data(sTRACE) # Fits Proportional odds model with stratified baseline age.c<-scale(sTRACE$age,scale=FALSE); out<-Gprop.odds(Surv(time,status==9)~-1+factor(diabetes)+prop(age.c)+prop(chf)+ prop(sex)+prop(vf),data=sTRACE,max.time=7,n.sim=50) summary(out) par(mfrow=c(2,3)) plot(out,sim.ci=2); plot(out,score=1) } } \references{ Scheike, A flexible semiparametric transformation model for survival data, Lifetime Data Anal. (to appear). Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/cd4.Rd0000644000175000017500000000231314077524411013560 0ustar nileshnilesh\name{cd4} \alias{cd4} \non_function{} \title{The multicenter AIDS cohort study } \description{CD4 counts collected over time.} \format{ This data frame contains the following columns: \describe{ \item{obs}{a numeric vector. Number of observations.} \item{id}{a numeric vector. Id of subject.} \item{visit}{ a numeric vector. Timings of the visits in years.} \item{smoke}{a numeric vector code. 0: non-smoker, 1: smoker.} \item{age}{a numeric vector. Age of the patient at the start of the trial.} \item{cd4}{a numeric vector. CD4 percentage at the current visit.} \item{cd4.prev}{a numeric vector. CD4 level at the preceding visit.} \item{precd4}{a numeric vector. Post-infection CD4 percentage.} \item{lt}{a numeric vector. Gives the starting time for the time-intervals.} \item{rt}{a numeric vector. Gives the stopping time for the time-interval.} } } \source{ MACS Public Use Data Set Release PO4 (1984-1991). See reference. } \references{ Kaslow et al. (1987), The multicenter AIDS cohort study: rational, organisation and selected characteristics of the participants. Am. J. Epidemiology 126, 310--318. } \examples{ data(cd4) names(cd4) } \keyword{datasets} timereg/man/print.aalen.Rd0000644000175000017500000000106114077524411015320 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.aalen.r \name{print.aalen} \alias{print.aalen} \alias{print.cox.aalen} \alias{print.comprisk} \alias{print.prop.excess} \alias{print.dynreg} \alias{print.timecox} \alias{print.cum.residuals} \title{Prints call} \usage{ \method{print}{aalen}(x, ...) } \arguments{ \item{x}{an aalen object} \item{...}{unused arguments - for S3 compatibility} } \description{ Prints call for object. Lists nonparametric and parametric terms of model } \author{ Thomas Scheike } \keyword{survival} timereg/man/pe.sasieni.Rd0000644000175000017500000000517414077524412015155 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.pe-sasieni.r \name{pe.sasieni} \alias{pe.sasieni} \alias{summary.pe-sasieni} \title{Fits Proportional excess hazards model with fixed offsets} \usage{ pe.sasieni( formula = formula(data), data = parent.frame(), id = NULL, start.time = 0, max.time = NULL, offsets = 0, Nit = 50, detail = 0, n.sim = 500 ) } \arguments{ \item{formula}{a formula object, with the response on the left of a `~' operator, and the terms on the right. The response must be a survival object as returned by the `Surv' function.} \item{data}{a data.frame with the variables.} \item{id}{gives the number of individuals.} \item{start.time}{starting time for considered time-period.} \item{max.time}{stopping considered time-period if different from 0. Estimates thus computed from [0,max.time] if max.time>0. Default is max of data.} \item{offsets}{fixed offsets giving the mortality.} \item{Nit}{number of itterations.} \item{detail}{if detail is one, prints iteration details.} \item{n.sim}{number of simulations, 0 for no simulations.} } \value{ Returns an object of type "pe.sasieni". With the following arguments: \item{cum}{baseline of Cox model excess risk.} \item{var.cum}{pointwise variance estimates for estimated cumulatives.} \item{gamma}{estimate of relative risk terms of model.} \item{var.gamma}{variance estimates for gamma.} \item{Ut}{score process for Cox part of model.} \item{D2linv}{The inverse of the second derivative.} \item{score}{final score} \item{test.Prop}{re-sampled absolute supremum values.} \item{pval.Prop}{p-value based on resampling.} } \description{ Fits proportional excess hazards model. The Sasieni proportional excess risk model. } \details{ The models are written using the survival modelling given in the survival package. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ data(mela.pop) out<-pe.sasieni(Surv(start,stop,status==1)~age+sex,mela.pop, id=1:205,Nit=10,max.time=7,offsets=mela.pop$rate,detail=0,n.sim=100) summary(out) ul<-out$cum[,2]+1.96*out$var.cum[,2]^.5 ll<-out$cum[,2]-1.96*out$var.cum[,2]^.5 plot(out$cum,type="s",ylim=range(ul,ll)) lines(out$cum[,1],ul,type="s"); lines(out$cum[,1],ll,type="s") # see also prop.excess function } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer Verlag (2006). Sasieni, P.D., Proportional excess hazards, Biometrika (1996), 127--41. Cortese, G. and Scheike, T.H., Dynamic regression hazards models for relative survival (2007), submitted. } \author{ Thomas Scheike } \keyword{survival} timereg/man/recurrent.marginal.mean.Rd0000644000175000017500000000351214077524411017631 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/recurrent.r \name{recurrent.marginal.mean} \alias{recurrent.marginal.mean} \title{Estimates marginal mean of recurrent events} \usage{ recurrent.marginal.mean(recurrent, death) } \arguments{ \item{recurrent}{aalen model for recurrent events} \item{death}{aalen model for recurrent events} } \description{ Fitting two aalen models for death and recurent events these are combined to prducte the estimator \deqn{ \int_0^t S(u) dR(u) } the mean number of recurrent events, here \deqn{ S(u) } is the probability of survival, and \deqn{ dR(u) } is the probability of an event among survivors. } \details{ IID versions used for Ghosh & Lin (2000) variance. See also mets package for quick version of this for large data mets:::recurrent.marginal, these two version should give the same when there are no ties. } \examples{ \donttest{ ### get some data using mets simulaitons library(mets) data(base1cumhaz) data(base4cumhaz) data(drcumhaz) dr <- drcumhaz base1 <- base1cumhaz base4 <- base4cumhaz rr <- simRecurrent(100,base1,death.cumhaz=dr) rr$x <- rnorm(nrow(rr)) rr$strata <- floor((rr$id-0.01)/50) drename(rr) <- start+stop~entry+time ar <- aalen(Surv(start,stop,status)~+1+cluster(id),data=rr,resample.iid=1 ,max.clust=NULL) ad <- aalen(Surv(start,stop,death)~+1+cluster(id),data=rr,resample.iid=1, ,max.clust=NULL) mm <- recurrent.marginal.mean(ar,ad) with(mm,plot(times,mu,type="s")) with(mm,lines(times,mu+1.96*se.mu,type="s",lty=2)) with(mm,lines(times,mu-1.96*se.mu,type="s",lty=2)) } } \references{ Ghosh and Lin (2002) Nonparametric Analysis of Recurrent events and death, Biometrics, 554--562. } \author{ Thomas Scheike } \keyword{survival} timereg/man/aalen.Rd0000644000175000017500000001461014077524412014172 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.aalen.r \name{aalen} \alias{aalen} \title{Fit additive hazards model} \usage{ aalen( formula = formula(data), data = parent.frame(), start.time = 0, max.time = NULL, robust = 1, id = NULL, clusters = NULL, residuals = 0, n.sim = 1000, weighted.test = 0, covariance = 0, resample.iid = 0, deltaweight = 1, silent = 1, weights = NULL, max.clust = 1000, gamma = NULL, offsets = 0, caseweight = NULL ) } \arguments{ \item{formula}{a formula object with the response on the left of a '~' operator, and the independent terms on the right as regressors.The response must be a survival object as returned by the `Surv' function. Time- invariant regressors are specified by the wrapper const(), and cluster variables (for computing robust variances) by the wrapper cluster().} \item{data}{a data.frame with the variables.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. Default is max of data.} \item{robust}{to compute robust variances and construct processes for resampling. May be set to 0 to save memory.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{clusters}{cluster variable for computation of robust variances.} \item{residuals}{to returns residuals that can be used for model validation in the function cum.residuals} \item{n.sim}{number of simulations in resampling.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{covariance}{to compute covariance estimates for nonparametric terms rather than just the variances.} \item{resample.iid}{to return i.i.d. representation for nonparametric and parametric terms.} \item{deltaweight}{uses weights to estimate semiparametric model, under construction, default=1 is standard least squares estimates} \item{silent}{set to 0 to print warnings for non-inverible design-matrices for different timepoints, default is 1.} \item{weights}{weights for estimating equations.} \item{max.clust}{sets the total number of i.i.d. terms in i.i.d. decompostition. This can limit the amount of memory used by coarsening the clusters. When NULL then all clusters are used. Default is 1000 to save memory and time.} \item{gamma}{fixes gamme at this value for estimation.} \item{offsets}{offsets for the additive model, to make excess risk modelling.} \item{caseweight}{caseweight: mutiplied onto dN for score equations.} } \value{ returns an object of type "aalen". With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval. } \item{var.cum}{the martingale based pointwise variance estimates for cumulatives.} \item{robvar.cum}{robust pointwise variances estimates for cumulatives.} \item{gamma}{estimate of parametric components of model. } \item{var.gamma}{variance for gamma. } \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals. Estimated martingale increments (dM) and corresponding time vector (time).} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time. } \item{sim.test.procBeqC}{list of 50 random realizations of test-processes under null based on resampling.} \item{covariance}{covariances for nonparametric terms of model.} \item{B.iid}{Resample processes for nonparametric terms of model.} \item{gamma.iid}{Resample processes for parametric terms of model.} \item{deviance}{Least squares of increments.} } \description{ Fits both the additive hazards model of Aalen and the semi-parametric additive hazards model of McKeague and Sasieni. Estimates are un-weighted. Time dependent variables and counting process data (multiple events per subject) are possible. } \details{ Resampling is used for computing p-values for tests of time-varying effects. The modelling formula uses the standard survival modelling given in the \bold{survival} package. The data for a subject is presented as multiple rows or 'observations', each of which applies to an interval of observation (start, stop]. For counting process data with the )start,stop] notation is used, the 'id' variable is needed to identify the records for each subject. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ data(sTRACE) # Fits Aalen model out<-aalen(Surv(time,status==9)~age+sex+diabetes+chf+vf, sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out) # Fits semi-parametric additive hazards model out<-aalen(Surv(time,status==9)~const(age)+const(sex)+const(diabetes)+chf+vf, sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out) ## Excess risk additive modelling data(mela.pop) dummy<-rnorm(nrow(mela.pop)); # Fits Aalen model with offsets out<-aalen(Surv(start,stop,status==1)~age+sex+const(dummy), mela.pop,max.time=7,n.sim=100,offsets=mela.pop$rate,id=mela.pop$id, gamma=0) summary(out) par(mfrow=c(2,3)) plot(out,main="Additive excess riks model") # Fits semi-parametric additive hazards model with offsets out<-aalen(Surv(start,stop,status==1)~age+const(sex), mela.pop,max.time=7,n.sim=100,offsets=mela.pop$rate,id=mela.pop$id) summary(out) plot(out,main="Additive excess riks model") } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/res.mean.Rd0000644000175000017500000002002114121055450014602 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipcw-residualmean.r \name{res.mean} \alias{res.mean} \title{Residual mean life (restricted)} \usage{ res.mean( formula, data = parent.frame(), cause = 1, restricted = NULL, times = NULL, Nit = 50, clusters = NULL, gamma = 0, n.sim = 0, weighted = 0, model = "additive", detail = 0, interval = 0.01, resample.iid = 1, cens.model = "KM", cens.formula = NULL, time.pow = NULL, time.pow.test = NULL, silent = 1, conv = 1e-06, estimator = 1, cens.weights = NULL, conservative = 1, weights = NULL ) } \arguments{ \item{formula}{a formula object, with the response on the left of a '~' operator, and the terms on the right. The response must be a survival object as returned by the `Event' function. The status indicator is not important here. Time-invariant regressors are specified by the wrapper const(), and cluster variables (for computing robust variances) by the wrapper cluster().} \item{data}{a data.frame with the variables.} \item{cause}{For competing risk models specificies which cause we consider.} \item{restricted}{gives a possible restriction times for means.} \item{times}{specifies the times at which the estimator is considered. Defaults to all the times where an event of interest occurs, with the first 10 percent or max 20 jump points removed for numerical stability in simulations.} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{clusters}{specifies cluster structure, for backwards compability.} \item{gamma}{starting value for constant effects.} \item{n.sim}{number of simulations in resampling.} \item{weighted}{Not implemented. To compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{model}{"additive", "prop"ortional.} \item{detail}{if 0 no details are printed during iterations, if 1 details are given.} \item{interval}{specifies that we only consider timepoints where the Kaplan-Meier of the censoring distribution is larger than this value.} \item{resample.iid}{to return the iid decomposition, that can be used to construct confidence bands for predictions} \item{cens.model}{specified which model to use for the ICPW, KM is Kaplan-Meier alternatively it may be "cox" or "aalen" model for further flexibility.} \item{cens.formula}{specifies the regression terms used for the regression model for chosen regression model. When cens.model is specified, the default is to use the same design as specified for the competing risks model. "KM","cox","aalen","weights". "weights" are user specified weights given is cens.weight argument.} \item{time.pow}{specifies that the power at which the time-arguments is transformed, for each of the arguments of the const() terms, default is 1 for the additive model and 0 for the proportional model.} \item{time.pow.test}{specifies that the power the time-arguments is transformed for each of the arguments of the non-const() terms. This is relevant for testing if a coefficient function is consistent with the specified form A_l(t)=beta_l t^time.pow.test(l). Default is 1 for the additive model and 0 for the proportional model.} \item{silent}{if 0 information on convergence problems due to non-invertible derviates of scores are printed.} \item{conv}{gives convergence criterie in terms of sum of absolute change of parameters of model} \item{estimator}{specifies what that is estimated.} \item{cens.weights}{censoring weights for estimating equations.} \item{conservative}{for slightly conservative standard errors.} \item{weights}{weights for estimating equations.} } \value{ returns an object of type 'comprisk'. With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{pointwise variances estimates. } \item{gamma}{estimate of proportional odds parameters of model.} \item{var.gamma}{variance for gamma. } \item{score}{sum of absolute value of scores.} \item{gamma2}{estimate of constant effects based on the non-parametric estimate. Used for testing of constant effects.} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{conf.band}{resampling based constant to construct 95\% uniform confidence bands.} \item{B.iid}{list of iid decomposition of non-parametric effects.} \item{gamma.iid}{matrix of iid decomposition of parametric effects.} \item{test.procBeqC}{observed test process for testing of time-varying effects} \item{sim.test.procBeqC}{50 resample processes for for testing of time-varying effects} \item{conv}{information on convergence for time points used for estimation.} } \description{ Fits a semiparametric model for the residual life (estimator=1): \deqn{ E( \min(Y,\tau) -t | Y>=t) = h_1( g(t,x,z) ) } or cause specific years lost of Andersen (2012) (estimator=3) \deqn{ E( \tau- \min(Y_j,\tau) | Y>=0) = \int_0^t (1-F_j(s)) ds = h_2( g(t,x,z) ) } where \eqn{Y_j = \sum_j Y I(\epsilon=j) + \infty * I(\epsilon=0)} or (estimator=2) \deqn{ E( \tau- \min(Y_j,\tau) | Y<\tau, \epsilon=j) = h_3( g(t,x,z) ) = h_2(g(t,x,z)) F_j(\tau,x,z) } where \eqn{F_j(s,x,z) = P(Y<\tau, \epsilon=j | x,z )} for a known link-function \eqn{h()} and known prediction-function \eqn{g(t,x,z)} } \details{ Uses the IPCW for the score equations based on \deqn{ w(t) \Delta(\tau)/P(\Delta(\tau)=1| T,\epsilon,X,Z) ( Y(t) - h_1(t,X,Z)) } and where \eqn{\Delta(\tau)} is the at-risk indicator given data and requires a IPCW model. Since timereg version 1.8.4. the response must be specified with the \code{\link{Event}} function instead of the \code{\link{Surv}} function and the arguments. } \examples{ data(bmt); tau <- 100 ### residual restricted mean life out<-res.mean(Event(time,cause>=1)~factor(tcell)+factor(platelet),data=bmt,cause=1, times=0,restricted=tau,n.sim=0,model="additive",estimator=1); summary(out) out<-res.mean(Event(time,cause>=1)~factor(tcell)+factor(platelet),data=bmt,cause=1, times=seq(0,90,5),restricted=tau,n.sim=0,model="additive",estimator=1); par(mfrow=c(1,3)) plot(out) ### restricted years lost given death out21<-res.mean(Event(time,cause)~factor(tcell)+factor(platelet),data=bmt,cause=1, times=0,restricted=tau,n.sim=0,model="additive",estimator=2); summary(out21) out22<-res.mean(Event(time,cause)~factor(tcell)+factor(platelet),data=bmt,cause=2, times=0,restricted=tau,n.sim=0,model="additive",estimator=2); summary(out22) ### total restricted years lost out31<-res.mean(Event(time,cause)~factor(tcell)+factor(platelet),data=bmt,cause=1, times=0,restricted=tau,n.sim=0,model="additive",estimator=3); summary(out31) out32<-res.mean(Event(time,cause)~factor(tcell)+factor(platelet),data=bmt,cause=2, times=0,restricted=tau,n.sim=0,model="additive",estimator=3); summary(out32) ### delayed entry nn <- nrow(bmt) entrytime <- rbinom(nn,1,0.5)*(bmt$time*runif(nn)) bmt$entrytime <- entrytime bmtw <- prep.comp.risk(bmt,times=tau,time="time",entrytime="entrytime",cause="cause") out<-res.mean(Event(time,cause>=1)~factor(tcell)+factor(platelet),data=bmtw,cause=1, times=0,restricted=tau,n.sim=0,model="additive",estimator=1, cens.model="weights",weights=bmtw$cw,cens.weights=1/bmtw$weights); summary(out) } \references{ Andersen (2013), Decomposition of number of years lost according to causes of death, Statistics in Medicine, 5278-5285. Scheike, and Cortese (2015), Regression Modelling of Cause Specific Years Lost, Scheike, Cortese and Holmboe (2015), Regression Modelling of Restricted Residual Mean with Delayed Entry, } \author{ Thomas Scheike } \keyword{survival} timereg/man/mypbc.Rd0000644000175000017500000000033114077524411014216 0ustar nileshnilesh\name{mypbc} \alias{mypbc} \non_function{} \title{my version of the PBC data of the survival package} \description{ my version of the PBC data of the survival package } \source{ survival package } \keyword{datasets} timereg/man/plot.dynreg.Rd0000644000175000017500000000520314077524411015354 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.dynreg.r \name{plot.dynreg} \alias{plot.dynreg} \title{Plots estimates and test-processes} \usage{ \method{plot}{dynreg}( x, type = "eff.smooth", pointwise.ci = 1, hw.ci = 0, sim.ci = 0, robust = 0, specific.comps = FALSE, level = 0.05, start.time = 0, stop.time = 0, add.to.plot = FALSE, mains = TRUE, xlab = "Time", ylab = "Cumulative coefficients", score = FALSE, ... ) } \arguments{ \item{x}{the output from the "dynreg" function.} \item{type}{the estimator plotted. Choices "eff.smooth", "ms.mpp", "0.mpp" and "ly.mpp". See the dynreg function for more on this.} \item{pointwise.ci}{if >1 pointwise confidence intervals are plotted with lty=pointwise.ci} \item{hw.ci}{if >1 Hall-Wellner confidence bands are plotted with lty=hw.ci. Only 0.95 \% bands can be constructed.} \item{sim.ci}{if >1 simulation based confidence bands are plotted with lty=sim.ci. These confidence bands are robust to non-martingale behaviour.} \item{robust}{robust standard errors are used to estimate standard error of estimate, otherwise martingale based estimate are used.} \item{specific.comps}{all components of the model is plotted by default, but a list of components may be specified, for example first and third "c(1,3)".} \item{level}{gives the significance level.} \item{start.time}{start of observation period where estimates are plotted.} \item{stop.time}{end of period where estimates are plotted. Estimates thus plotted from [start.time, max.time].} \item{add.to.plot}{to add to an already existing plot.} \item{mains}{add names of covariates as titles to plots.} \item{xlab}{label for x-axis.} \item{ylab}{label for y-axis.} \item{score}{to plot test processes for test of time-varying effects along with 50 random realization under the null-hypothesis.} \item{...}{unused arguments - for S3 compatibility} } \description{ This function plots the non-parametric cumulative estimates for the additive risk model or the test-processes for the hypothesis of constant effects with re-sampled processes under the null. } \examples{ \donttest{ ### runs slowly and therefore donttest data(csl) indi.m<-rep(1,length(csl$lt)) # Fits time-varying regression model out<-dynreg(prot~treat+prot.prev+sex+age,csl, Surv(lt,rt,indi.m)~+1,start.time=0,max.time=3,id=csl$id, n.sim=100,bandwidth=0.7,meansub=0) par(mfrow=c(2,3)) # plots estimates plot(out) # plots tests-processes for time-varying effects plot(out,score=TRUE) } } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/mela.pop.Rd0000644000175000017500000000244514077524411014627 0ustar nileshnilesh\name{mela.pop} \alias{mela.pop} \non_function{} \title{Melanoma data and Danish population mortality by age and sex } \description{Melanoma data with background mortality of Danish population. } \format{ This data frame contains the following columns: \describe{ \item{id}{ a numeric vector. Gives patient id. } \item{sex}{ a numeric vector. Gives sex of patient. } \item{start}{ a numeric vector. Gives the starting time for the time-interval for which the covariate rate is representative. } \item{stop}{ a numeric vector. Gives the stopping time for the time-interval for which the covariate rate is representative. } \item{status}{ a numeric vector code. Survival status. 1: dead from melanoma, 0: alive or dead from other cause. } \item{age}{ a numeric vector. Gives the age of the patient at removal of tumor. } \item{rate}{ a numeric vector. Gives the population mortality for the given sex and age. Based on Table A.2 in Andersen et al. (1993). } } } \source{ Andersen, P.K., Borgan O, Gill R.D., Keiding N. (1993), \emph{Statistical Models Based on Counting Processes}, Springer-Verlag. } \examples{ data(mela.pop) names(mela.pop) } \keyword{datasets} timereg/man/timecox.Rd0000644000175000017500000001274514077524412014571 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.timecox.r \name{timecox} \alias{timecox} \title{Fit Cox model with partly timevarying effects.} \usage{ timecox( formula = formula(data), data, weights, subset, na.action, start.time = 0, max.time = NULL, id = NULL, clusters = NULL, n.sim = 1000, residuals = 0, robust = 1, Nit = 20, bandwidth = 0.5, method = "basic", weighted.test = 0, degree = 1, covariance = 0 ) } \arguments{ \item{formula}{a formula object with the response on the left of a '~' operator, and the independent terms on the right as regressors. The response must be a survival object as returned by the `Surv' function. Time-invariant regressors are specified by the wrapper const(), and cluster variables (for computing robust variances) by the wrapper cluster().} \item{data}{a data.frame with the variables.} \item{weights}{for analysis} \item{subset}{to subset} \item{na.action}{to have na.action} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{clusters}{cluster variable for computation of robust variances.} \item{n.sim}{number of simulations in resampling.} \item{residuals}{to returns residuals that can be used for model validation in the function cum.residuals} \item{robust}{to compute robust variances and construct processes for resampling. May be set to 0 to save memory.} \item{Nit}{number of iterations for score equations.} \item{bandwidth}{bandwidth for local iterations. Default is 50 \% of the range of the considered observation period.} \item{method}{Method for estimation. This refers to different parametrisations of the baseline of the model. Options are "basic" where the baseline is written as \eqn{\lambda_0(t) = \exp(\alpha_0(t))} or the "breslow" version where the baseline is parametrised as \eqn{\lambda_0(t)}.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{degree}{gives the degree of the local linear smoothing, that is local smoothing. Possible values are 1 or 2.} \item{covariance}{to compute covariance estimates for nonparametric terms rather than just the variances.} } \value{ Returns an object of type "timecox". With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{the martingale based pointwise variance estimates. } \item{robvar.cum}{robust pointwise variances estimates. } \item{gamma}{estimate of parametric components of model. } \item{var.gamma}{variance for gamma. } \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals. Estimated martingale increments (dM) and corresponding time vector (time).} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time. } \item{sim.test.procBeqC}{list of 50 random realizations of test-processes under null based on resampling.} \item{schoenfeld.residuals}{Schoenfeld residuals are returned for "breslow" parametrisation.} } \description{ Fits proportional hazards model with some effects time-varying and some effects constant. Time dependent variables and counting process data (multiple events per subject) are possible. } \details{ Resampling is used for computing p-values for tests of timevarying effects. The modelling formula uses the standard survival modelling given in the \bold{survival} package. The data for a subject is presented as multiple rows or 'observations', each of which applies to an interval of observation (start, stop]. When counting process data with the )start,stop] notation is used, the 'id' variable is needed to identify the records for each subject. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ data(sTRACE) # Fits time-varying Cox model out<-timecox(Surv(time/365,status==9)~age+sex+diabetes+chf+vf, data=sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out) par(mfrow=c(2,3)) plot(out,score=TRUE) # Fits semi-parametric time-varying Cox model out<-timecox(Surv(time/365,status==9)~const(age)+const(sex)+ const(diabetes)+chf+vf,data=sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out) } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/krylow.pls.Rd0000644000175000017500000000247714077524411015245 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/krylow.pls.r \name{krylow.pls} \alias{krylow.pls} \title{Fits Krylow based PLS for additive hazards model} \usage{ krylow.pls(D, d, dim = 1) } \arguments{ \item{D}{defined above} \item{d}{defined above} \item{dim}{number of pls dimensions} } \value{ returns a list with the following arguments: \item{beta}{PLS regression coefficients} } \description{ Fits the PLS estimator for the additive risk model based on the least squares fitting criterion } \details{ \deqn{ L(\beta,D,d) = \beta^T D \beta - 2 \beta^T d } where \eqn{D=\int Z H Z dt} and \eqn{d=\int Z H dN}. } \examples{ ## makes data for pbc complete case data(mypbc) pbc<-mypbc pbc$time<-pbc$time+runif(418)*0.1; pbc$time<-pbc$time/365 pbc<-subset(pbc,complete.cases(pbc)); covs<-as.matrix(pbc[,-c(1:3,6)]) covs<-cbind(covs[,c(1:6,16)],log(covs[,7:15])) ## computes the matrices needed for the least squares ## criterion out<-aalen(Surv(time,status>=1)~const(covs),pbc,robust=0,n.sim=0) S=out$intZHZ; s=out$intZHdN; out<-krylow.pls(S,s,dim=2) } \references{ Martinussen and Scheike, The Aalen additive hazards model with high-dimensional regressors, submitted. Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/cox.ipw.Rd0000644000175000017500000000375714077524412014513 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cox.ipw.r \name{cox.ipw} \alias{cox.ipw} \alias{summary.cox.ipw} \alias{print.cox.ipw} \alias{coef.cox.ipw} \title{Missing data IPW Cox} \usage{ cox.ipw( survformula, glmformula, d = parent.frame(), max.clust = NULL, ipw.se = FALSE, tie.seed = 100 ) } \arguments{ \item{survformula}{a formula object with the response on the left of a '~' operator, and the independent terms on the right as regressors. The response must be a survival object as returned by the `Surv' function. Adds the prop() wrapper internally for using cox.aalen function for fitting Cox model.} \item{glmformula}{formula for "being" observed, that is not missing.} \item{d}{data frame.} \item{max.clust}{number of clusters in iid approximation. Default is all.} \item{ipw.se}{if TRUE computes standard errors based on iid decompositon of cox and glm model, thus should be asymptotically correct.} \item{tie.seed}{if there are ties these are broken, and to get same break the seed must be the same. Recommend to break them prior to entering the program.} } \value{ returns an object of type "cox.aalen". With the following arguments: \item{iid}{iid decomposition.} \item{coef}{missing data estiamtes for weighted cox. } \item{var}{robust pointwise variances estimates. } \item{se}{robust pointwise variances estimates. } \item{se.naive}{estimate of parametric components of model. } \item{ties}{list of ties and times with random noise to break ties.} \item{cox}{output from weighted cox model.} } \description{ Fits an Cox-Aalen survival model with missing data, with glm specification of probability of missingness. } \details{ Taylor expansion of Cox's partial likelihood in direction of glm parameters using num-deriv and iid expansion of Cox and glm paramters (lava). } \examples{ ### fit <- cox.ipw(Surv(time,status)~X+Z,obs~Z+X+time+status,data=d,ipw.se=TRUE) ### summary(fit) } \references{ Paik et al. } \author{ Thomas Scheike } \keyword{survival} timereg/man/prop.excess.Rd0000644000175000017500000000657014077524412015371 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.prop-excess.r \name{prop.excess} \alias{prop.excess} \title{Fits Proportional excess hazards model} \usage{ prop.excess( formula = formula(data), data = parent.frame(), excess = 1, tol = 1e-04, max.time = NULL, n.sim = 1000, alpha = 1, frac = 1 ) } \arguments{ \item{formula}{a formula object, with the response on the left of a `~' operator, and the terms on the right. The response must be a survival object as returned by the `Surv' function.} \item{data}{a data.frame with the variables.} \item{excess}{specifies for which of the subjects the excess term is present. Default is that the term is present for all subjects.} \item{tol}{tolerance for numerical procedure.} \item{max.time}{stopping considered time-period if different from 0. Estimates thus computed from [0,max.time] if max.time>0. Default is max of data.} \item{n.sim}{number of simulations in re-sampling.} \item{alpha}{tuning paramter in Newton-Raphson procedure. Value smaller than one may give more stable convergence.} \item{frac}{number between 0 and 1. Is used in supremum test where observed jump times t1, ..., tk is replaced by t1, ..., tl with l=round(frac*k).} } \value{ Returns an object of type "prop.excess". With the following arguments: \item{cum}{estimated cumulative regression functions. First column contains the jump times, then follows the estimated components of additive part of model and finally the excess cumulative baseline. } \item{var.cum}{robust pointwise variance estimates for estimated cumulatives. } \item{gamma}{estimate of parametric components of model. } \item{var.gamma}{robust variance estimate for gamma. } \item{pval}{p-value of Kolmogorov-Smirnov test (variance weighted) for excess baseline and Aalen terms, H: B(t)=0. } \item{pval.HW}{p-value of supremum test (corresponding to Hall-Wellner band) for excess baseline and Aalen terms, H: B(t)=0. Reported in summary. } \item{pval.CM}{p-value of Cramer von Mises test for excess baseline and Aalen terms, H: B(t)=0. } \item{quant}{95 percent quantile in distribution of resampled Kolmogorov-Smirnov test statistics for excess baseline and Aalen terms. Used to construct 95 percent simulation band. } \item{quant95HW}{95 percent quantile in distribution of resampled supremum test statistics corresponding to Hall-Wellner band for excess baseline and Aalen terms. Used to construct 95 percent Hall-Wellner band. } \item{simScoreProp}{observed scoreprocess and 50 resampled scoreprocesses (under model). List with 51 elements. } } \description{ Fits proportional excess hazards model. } \details{ The models are written using the survival modelling given in the survival package. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ ###working on memory leak issue, 3/3-2015 ###data(melanoma) ###lt<-log(melanoma$thick) # log-thickness ###excess<-(melanoma$thick>=210) # excess risk for thick tumors ### #### Fits Proportional Excess hazards model ###fit<-prop.excess(Surv(days/365,status==1)~sex+ulc+cox(sex)+ ### cox(ulc)+cox(lt),melanoma,excess=excess,n.sim=100) ###summary(fit) ###par(mfrow=c(2,3)) ###plot(fit) } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer Verlag (2006). } \author{ Torben Martinussen } \keyword{survival} timereg/man/sim.cox.Rd0000644000175000017500000000354314077524412014475 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{sim.cox} \alias{sim.cox} \alias{read.fit} \title{Simulation of output from Cox model.} \usage{ \method{sim}{cox}(cox, n, data = NULL, cens = NULL, rrc = NULL, entry = NULL, ...) } \arguments{ \item{cox}{output form coxph or cox.aalen model fitting cox model.} \item{n}{number of simulations.} \item{data}{to extract covariates for simulations (draws from observed covariates).} \item{cens}{specifies censoring model, if "is.matrix" then uses cumulative hazard given, if "is.scalar" then uses rate for exponential, and if not given then takes average rate of in simulated data from cox model.} \item{rrc}{possible vector of relative risk for cox-type censoring.} \item{entry}{delayed entry variable for simulation.} \item{...}{arguments for rchaz, for example entry-time} } \description{ Simulates data that looks like fit from Cox model. Censor data automatically for highest value of the event times by using cumulative hazard. } \examples{ data(TRACE) cox <- coxph(Surv(time,status==9)~vf+chf+wmi,data=TRACE) sim1 <- sim.cox(cox,1000,data=TRACE) cc <- coxph(Surv(time,status)~vf+chf+wmi,data=sim1) cbind(cox$coef,cc$coef) cor(sim1[,c("vf","chf","wmi")]) cor(TRACE[,c("vf","chf","wmi")]) \donttest{ ### do not test to avoid dependence on mets library(mets) cox <- phreg(Surv(time, status==9)~vf+chf+wmi,data=TRACE) sim3 <- sim.cox(cox,1000,data=TRACE) cc <- phreg(Surv(time, status)~vf+chf+wmi,data=sim3) cbind(cox$coef,cc$coef) basehazplot.phreg(cox,se=TRUE) lines(cc$cumhaz,col=2) cox <- phreg(Surv(time,status==9)~strata(chf)+vf+wmi,data=TRACE) sim3 <- sim.cox(cox,1000,data=TRACE) cc <- phreg(Surv(time, status)~strata(chf)+vf+wmi,data=sim3) cbind(cox$coef,cc$coef) basehazplot.phreg(cox) basehazplot.phreg(cc,add=TRUE) } } \author{ Thomas Scheike } \keyword{survival} timereg/man/diabetes.Rd0000644000175000017500000000203114077524411014663 0ustar nileshnilesh\name{diabetes} \alias{diabetes} \non_function{} \title{The Diabetic Retinopathy Data} \description{ The data was colleceted to test a laser treatment for delaying blindness in patients with dibetic retinopathy. The subset of 197 patiens given in Huster et al. (1989) is used. } \format{ This data frame contains the following columns: \describe{ \item{id}{a numeric vector. Patient code.} \item{agedx}{a numeric vector. Age of patient at diagnosis.} \item{time}{a numeric vector. Survival time: time to blindness or censoring.} \item{status}{ a numeric vector code. Survival status. 1: blindness, 0: censored.} \item{trteye}{a numeric vector code. Random eye selected for treatment. 1: left eye 2: right eye.} \item{treat}{a numeric vector. 1: treatment 0: untreated.} \item{adult}{a numeric vector code. 1: younger than 20, 2: older than 20.} } } \source{ Huster W.J. and Brookmeyer, R. and Self. S. (1989) MOdelling paired survival data with covariates, Biometrics 45, 145-56. } \examples{ data(diabetes) names(diabetes) } \keyword{datasets} timereg/man/Event.Rd0000644000175000017500000000142614077524411014173 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/event.r \name{Event} \alias{Event} \alias{as.character.Event} \alias{as.matrix.Event} \alias{[.Event} \alias{format.Event} \alias{print.Event} \alias{rbind.Event} \alias{summary.Event} \title{Event history object} \usage{ Event(time, time2 = TRUE, cause = NULL, cens.code = 0, ...) } \arguments{ \item{time}{Time} \item{time2}{Time 2} \item{cause}{Cause} \item{cens.code}{Censoring code (default 0)} \item{...}{Additional arguments} } \value{ Object of class Event (a matrix) } \description{ Constructur for Event History objects } \details{ ... content for details } \examples{ t1 <- 1:10 t2 <- t1+runif(10) ca <- rbinom(10,2,0.4) (x <- Event(t1,t2,ca)) } \author{ Klaus K. Holst and Thomas Scheike } timereg/man/event.split.Rd0000644000175000017500000000272314127306663015371 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/event.split.r \name{event.split} \alias{event.split} \title{EventSplit (SurvSplit).} \usage{ event.split( data, time = "time", status = "status", cuts = "cuts", name.id = "id", name.start = "start", cens.code = 0, order.id = TRUE, time.group = FALSE ) } \arguments{ \item{data}{data to be split} \item{time}{time variable.} \item{status}{status variable.} \item{cuts}{cuts variable or numeric cut (only one value)} \item{name.id}{name of id variable.} \item{name.start}{name of start variable in data, start can also be numeric "0"} \item{cens.code}{code for the censoring.} \item{order.id}{order data after id and start.} \item{time.group}{make variable "before"."cut" that keeps track of wether start,stop is before (1) or after cut (0).} } \description{ contstructs start stop formulation of event time data after a variable in the data.set. Similar to SurvSplit of the survival package but can also split after random time given in data frame. } \examples{ set.seed(1) d <- data.frame(event=round(5*runif(5),2),start=1:5,time=2*1:5, status=rbinom(5,1,0.5),x=1:5) d d0 <- event.split(d,cuts="event",name.start=0) d0 dd <- event.split(d,cuts="event") dd ddd <- event.split(dd,cuts=3.5) ddd event.split(ddd,cuts=5.5) ### successive cutting for many values dd <- d for (cuts in seq(2,3,by=0.3)) dd <- event.split(dd,cuts=cuts) dd } \author{ Thomas Scheike } \keyword{survival} timereg/man/recurrent.marginal.coxmean.Rd0000644000175000017500000000425314077524411020346 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/recurrent.r \name{recurrent.marginal.coxmean} \alias{recurrent.marginal.coxmean} \title{Estimates marginal mean of recurrent events based on two cox models} \usage{ recurrent.marginal.coxmean(recurrent, death) } \arguments{ \item{recurrent}{aalen model for recurrent events} \item{death}{cox.aalen (cox) model for death events} } \description{ Fitting two Cox models for death and recurent events these are combined to prducte the estimator \deqn{ \int_0^t S(u|x=0) dR(u|x=0) } the mean number of recurrent events, here \deqn{ S(u|x=0) } is the probability of survival, and \deqn{ dR(u|x=0) } is the probability of an event among survivors. For now the estimator is based on the two-baselines so \deqn{x=0}, but covariates can be rescaled to look at different x's and extensions possible. } \details{ IID versions along the lines of Ghosh & Lin (2000) variance. See also mets package for quick version of this for large data. IID versions used for Ghosh & Lin (2000) variance. See also mets package for quick version of this for large data mets:::recurrent.marginal, these two version should give the same when there are now ties. } \examples{ \donttest{ ### do not test because iid slow and uses data from mets library(mets) data(base1cumhaz) data(base4cumhaz) data(drcumhaz) dr <- drcumhaz base1 <- base1cumhaz base4 <- base4cumhaz rr <- simRecurrent(100,base1,death.cumhaz=dr) rr$x <- rnorm(nrow(rr)) rr$strata <- floor((rr$id-0.01)/50) drename(rr) <- start+stop~entry+time ar <- cox.aalen(Surv(start,stop,status)~+1+prop(x)+cluster(id),data=rr, resample.iid=1,,max.clust=NULL,max.timepoint.sim=NULL) ad <- cox.aalen(Surv(start,stop,death)~+1+prop(x)+cluster(id),data=rr, resample.iid=1,,max.clust=NULL,max.timepoint.sim=NULL) mm <- recurrent.marginal.coxmean(ar,ad) with(mm,plot(times,mu,type="s")) with(mm,lines(times,mu+1.96*se.mu,type="s",lty=2)) with(mm,lines(times,mu-1.96*se.mu,type="s",lty=2)) } } \references{ Ghosh and Lin (2002) Nonparametric Analysis of Recurrent events and death, Biometrics, 554--562. } \author{ Thomas Scheike } \keyword{survival} timereg/man/csl.Rd0000644000175000017500000000310014077524411013662 0ustar nileshnilesh\name{csl} \alias{csl} \non_function{} \title{CSL liver chirrosis data} \description{Survival status for the liver chirrosis patients of Schlichting et al. } \format{ This data frame contains the following columns: \describe{ \item{id}{ a numeric vector. Id of subject. } \item{time}{ a numeric vector. Time of measurement. } \item{prot}{ a numeric vector. Prothrombin level at measurement time. } \item{dc}{ a numeric vector code. 0: censored observation, 1: died at eventT. } \item{eventT}{ a numeric vector. Time of event (death). } \item{treat}{ a numeric vector code. 0: active treatment of prednisone, 1: placebo treatment. } \item{sex}{ a numeric vector code. 0: female, 1: male. } \item{age}{ a numeric vector. Age of subject at inclusion time subtracted 60. } \item{prot.base}{ a numeric vector. Prothrombin base level before entering the study. } \item{prot.prev}{ a numeric vector. Level of prothrombin at previous measurement time. } \item{lt}{ a numeric vector. Gives the starting time for the time-intervals. } \item{rt}{ a numeric vector. Gives the stopping time for the time-intervals. } } } \source{P.K. Andersen} \references{ Schlichting, P., Christensen, E., Andersen, P., Fauerholds, L., Juhl, E., Poulsen, H. and Tygstrup, N. (1983), The Copenhagen Study Group for Liver Diseases, Hepatology 3, 889--895 } \examples{ data(csl) names(csl) } \keyword{datasets} timereg/man/prop.odds.subdist.Rd0000644000175000017500000001516014077524412016477 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prop-odds-subdist.r \name{prop.odds.subdist} \alias{prop.odds.subdist} \title{Fit Semiparametric Proportional 0dds Model for the competing risks subdistribution} \usage{ prop.odds.subdist( formula, data = parent.frame(), cause = 1, beta = NULL, Nit = 10, detail = 0, start.time = 0, max.time = NULL, id = NULL, n.sim = 500, weighted.test = 0, profile = 1, sym = 0, cens.model = "KM", cens.formula = NULL, clusters = NULL, max.clust = 1000, baselinevar = 1, weights = NULL, cens.weights = NULL ) } \arguments{ \item{formula}{a formula object, with the response on the left of a '~' operator, and the terms on the right. The response must be an object as returned by the `Event' function.} \item{data}{a data.frame with the variables.} \item{cause}{cause indicator for competing risks.} \item{beta}{starting value for relative risk estimates} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{detail}{if 0 no details is printed during iterations, if 1 details are given.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. This is very useful to obtain stable estimates, especially for the baseline. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{n.sim}{number of simulations in resampling.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{profile}{use profile version of score equations.} \item{sym}{to use symmetrized second derivative in the case of the estimating equation approach (profile=0). This may improve the numerical performance.} \item{cens.model}{specifies censoring model. So far only Kaplan-Meier "KM".} \item{cens.formula}{possible formula for censoring distribution covariates. Default all !} \item{clusters}{to compute cluster based standard errors.} \item{max.clust}{number of maximum clusters to be used, to save time in iid decomposition.} \item{baselinevar}{set to 0 to save time on computations.} \item{weights}{additional weights.} \item{cens.weights}{specify censoring weights related to the observations.} } \value{ returns an object of type 'cox.aalen'. With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{the martingale based pointwise variance estimates. } \item{robvar.cum}{robust pointwise variances estimates. } \item{gamma}{estimate of proportional odds parameters of model.} \item{var.gamma}{variance for gamma. } \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals. Estimated martingale increments (dM) and corresponding time vector (time).} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time.} \item{loglike}{modified partial likelihood, pseudo profile likelihood for regression parameters.} \item{D2linv}{inverse of the derivative of the score function.} \item{score}{value of score for final estimates.} \item{test.procProp}{observed score process for proportional odds regression effects.} \item{pval.Prop}{p-value based on resampling.} \item{sim.supProp}{re-sampled supremum values.} \item{sim.test.procProp}{list of 50 random realizations of test-processes for constant proportional odds under the model based on resampling.} } \description{ Fits a semiparametric proportional odds model: \deqn{ logit(F_1(t;X,Z)) = log( A(t)) + \beta^T Z } where A(t) is increasing but otherwise unspecified. Model is fitted by maximising the modified partial likelihood. A goodness-of-fit test by considering the score functions is also computed by resampling methods. } \details{ An alternative way of writing the model : \deqn{ F_1(t;X,Z) = \frac{ \exp( \beta^T Z )}{ (A(t)) + \exp( \beta^T Z) } } such that \eqn{\beta} is the log-odds-ratio of cause 1 before time t, and \eqn{A(t)} is the odds-ratio. The modelling formula uses the standard survival modelling given in the \bold{survival} package. The data for a subject is presented as multiple rows or "observations", each of which applies to an interval of observation (start, stop]. The program essentially assumes no ties, and if such are present a little random noise is added to break the ties. } \examples{ library(timereg) data(bmt) # Fits Proportional odds model out <- prop.odds.subdist(Event(time,cause)~platelet+age+tcell,data=bmt, cause=1,cens.model="KM",detail=0,n.sim=1000) summary(out) par(mfrow=c(2,3)) plot(out,sim.ci=2); plot(out,score=1) # simple predict function without confidence calculations pout <- predictpropodds(out,X=model.matrix(~platelet+age+tcell,data=bmt)[,-1]) matplot(pout$time,pout$pred,type="l") # predict function with confidence intervals pout2 <- predict(out,Z=c(1,0,1)) plot(pout2,col=2) pout1 <- predictpropodds(out,X=c(1,0,1)) lines(pout1$time,pout1$pred,type="l") # Fits Proportional odds model with stratified baseline, does not work yet! ###out <- Gprop.odds.subdist(Surv(time,cause==1)~-1+factor(platelet)+ ###prop(age)+prop(tcell),data=bmt,cause=bmt$cause, ###cens.code=0,cens.model="KM",causeS=1,detail=0,n.sim=1000) ###summary(out) ###par(mfrow=c(2,3)) ###plot(out,sim.ci=2); ###plot(out,score=1) } \references{ Eriksson, Li, Zhang and Scheike (2014), The proportional odds cumulative incidence model for competing risks, Biometrics, to appear. Scheike, A flexible semiparametric transformation model for survival data, Lifetime Data Anal. (2007). Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/two.stage.Rd0000644000175000017500000001514214077524412015026 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/two-stage-reg.r \name{two.stage} \alias{two.stage} \title{Fit Clayton-Oakes-Glidden Two-Stage model} \usage{ two.stage( margsurv, data = parent.frame(), Nit = 60, detail = 0, start.time = 0, max.time = NULL, id = NULL, clusters = NULL, robust = 1, theta = NULL, theta.des = NULL, var.link = 0, step = 0.5, notaylor = 0, se.clusters = NULL ) } \arguments{ \item{margsurv}{fit of marginal survival cox.aalen model with residuals=2, and resample.iid=1 to get fully correct standard errors. See notaylor below.} \item{data}{a data.frame with the variables.} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{detail}{if 0 no details is printed during iterations, if 1 details are given.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{clusters}{cluster variable for computation of robust variances.} \item{robust}{if 0 then totally omits computation of standard errors.} \item{theta}{starting values for the frailty variance (default=0.1).} \item{theta.des}{design for regression for variances. The defauls is NULL that is equivalent to just one theta and the design with only a baseline.} \item{var.link}{default "0" is that the regression design on the variances is without a link, and "1" uses the link function exp.} \item{step}{step size for Newton-Raphson.} \item{notaylor}{if 1 then ignores variation due to survival model, this is quicker and then resample.iid=0 and residuals=0 is ok for marginal survival model that then is much quicker.} \item{se.clusters}{cluster variable for sandwich estimator of variance.} } \value{ returns an object of type "two.stage". With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{the martingale based pointwise variance estimates.} \item{robvar.cum}{robust pointwise variances estimates.} \item{gamma}{estimate of parametric components of model.} \item{var.gamma}{variance for gamma.} \item{robvar.gamma}{robust variance for gamma.} \item{D2linv}{inverse of the derivative of the score function from marginal model.} \item{score}{value of score for final estimates.} \item{theta}{estimate of Gamma variance for frailty.} \item{var.theta}{estimate of variance of theta.} \item{SthetaInv}{inverse of derivative of score of theta.} \item{theta.score}{score for theta parameters.} } \description{ Fit Clayton-Oakes-Glidden Two-Stage model with Cox-Aalen marginals and regression on the variance parameters. } \details{ The model specifikatin allows a regression structure on the variance of the random effects, such it is allowed to depend on covariates fixed within clusters \deqn{ \theta_{k} = Q_{k}^T \nu }. This is particularly useful to model jointly different groups and to compare their variances. Fits an Cox-Aalen survival model. Time dependent variables and counting process data (multiple events per subject) are not possible ! The marginal baselines are on the Cox-Aalen form \deqn{ \lambda_{ki}(t) = Y_{ki}(t) ( X_{ki}^T(t) \alpha(t) ) \exp(Z_{ki}^T \beta ) } The model thus contains the Cox's regression model and the additive hazards model as special cases. (see cox.aalen function for more on this). The modelling formula uses the standard survival modelling given in the \bold{survival} package. Only for right censored survival data. The data for a subject is presented as multiple rows or 'observations', each of which applies to an interval of observation (start, stop]. For counting process data with the )start,stop] notation is used the 'id' variable is needed to identify the records for each subject. Only one record per subject is allowed in the current implementation for the estimation of theta. The program assumes that there are no ties, and if such are present random noise is added to break the ties. Left truncation is dealt with. Here the key assumption is that the maginals are correctly estimated and that we have a common truncation time within each cluster. } \examples{ library(timereg) data(diabetes) # Marginal Cox model with treat as covariate marg <- cox.aalen(Surv(time,status)~prop(treat)+prop(adult)+ cluster(id),data=diabetes,resample.iid=1) fit<-two.stage(marg,data=diabetes,theta=1.0,Nit=40) summary(fit) # using coxph and giving clusters, but SE wittout cox uncetainty margph <- coxph(Surv(time,status)~treat,data=diabetes) fit<-two.stage(margph,data=diabetes,theta=1.0,Nit=40,clusters=diabetes$id) # Stratification after adult theta.des<-model.matrix(~-1+factor(adult),diabetes); des.t<-model.matrix(~-1+factor(treat),diabetes); design.treat<-cbind(des.t[,-1]*(diabetes$adult==1), des.t[,-1]*(diabetes$adult==2)) # test for common baselines included here marg1<-cox.aalen(Surv(time,status)~-1+factor(adult)+prop(design.treat)+cluster(id), data=diabetes,resample.iid=1,Nit=50) fit.s<-two.stage(marg1,data=diabetes,Nit=40,theta=1,theta.des=theta.des) summary(fit.s) # with common baselines and common treatment effect (although test reject this) fit.s2<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des) summary(fit.s2) # test for same variance among the two strata theta.des<-model.matrix(~factor(adult),diabetes); fit.s3<-two.stage(marg,data=diabetes,Nit=40,theta=1,theta.des=theta.des) summary(fit.s3) # to fit model without covariates, use beta.fixed=1 and prop or aalen function marg <- aalen(Surv(time,status)~+1+cluster(id), data=diabetes,resample.iid=1,n.sim=0) fita<-two.stage(marg,data=diabetes,theta=0.95,detail=0) summary(fita) # same model but se's without variation from marginal model to speed up computations marg <- aalen(Surv(time,status) ~+1+cluster(id),data=diabetes, resample.iid=0,n.sim=0) fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0) summary(fit) # same model but se's now with fewer time-points for approx of iid decomp of marginal # model to speed up computations marg <- cox.aalen(Surv(time,status) ~+prop(treat)+cluster(id),data=diabetes, resample.iid=1,n.sim=0,max.timepoint.sim=5,beta.fixed=1,beta=0) fit<-two.stage(marg,data=diabetes,theta=0.95,detail=0) summary(fit) } \references{ Glidden (2000), A Two-Stage estimator of the dependence parameter for the Clayton Oakes model. Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/comp.risk.Rd0000644000175000017500000002722514077524412015025 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/comprisk.r \name{comp.risk} \alias{comp.risk} \title{Competings Risks Regression} \usage{ comp.risk( formula, data = parent.frame(), cause, times = NULL, Nit = 50, clusters = NULL, est = NULL, fix.gamma = 0, gamma = 0, n.sim = 0, weighted = 0, model = "fg", detail = 0, interval = 0.01, resample.iid = 1, cens.model = "KM", cens.formula = NULL, time.pow = NULL, time.pow.test = NULL, silent = 1, conv = 1e-06, weights = NULL, max.clust = 1000, n.times = 50, first.time.p = 0.05, estimator = 1, trunc.p = NULL, cens.weights = NULL, admin.cens = NULL, conservative = 1, monotone = 0, step = NULL ) } \arguments{ \item{formula}{a formula object, with the response on the left of a '~' operator, and the terms on the right. The response must be a survival object as returned by the `Event' function. The status indicator is not important here. Time-invariant regressors are specified by the wrapper const(), and cluster variables (for computing robust variances) by the wrapper cluster().} \item{data}{a data.frame with the variables.} \item{cause}{For competing risk models specificies which cause we consider.} \item{times}{specifies the times at which the estimator is considered. Defaults to all the times where an event of interest occurs, with the first 10 percent or max 20 jump points removed for numerical stability in simulations.} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{clusters}{specifies cluster structure, for backwards compability.} \item{est}{possible starting value for nonparametric component of model.} \item{fix.gamma}{to keep gamma fixed, possibly at 0.} \item{gamma}{starting value for constant effects.} \item{n.sim}{number of simulations in resampling.} \item{weighted}{Not implemented. To compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{model}{"additive", "prop"ortional, "rcif", or "logistic".} \item{detail}{if 0 no details are printed during iterations, if 1 details are given.} \item{interval}{specifies that we only consider timepoints where the Kaplan-Meier of the censoring distribution is larger than this value.} \item{resample.iid}{to return the iid decomposition, that can be used to construct confidence bands for predictions} \item{cens.model}{specified which model to use for the ICPW, KM is Kaplan-Meier alternatively it may be "cox"} \item{cens.formula}{specifies the regression terms used for the regression model for chosen regression model. When cens.model is specified, the default is to use the same design as specified for the competing risks model.} \item{time.pow}{specifies that the power at which the time-arguments is transformed, for each of the arguments of the const() terms, default is 1 for the additive model and 0 for the proportional model.} \item{time.pow.test}{specifies that the power the time-arguments is transformed for each of the arguments of the non-const() terms. This is relevant for testing if a coefficient function is consistent with the specified form A_l(t)=beta_l t^time.pow.test(l). Default is 1 for the additive model and 0 for the proportional model.} \item{silent}{if 0 information on convergence problems due to non-invertible derviates of scores are printed.} \item{conv}{gives convergence criterie in terms of sum of absolute change of parameters of model} \item{weights}{weights for estimating equations.} \item{max.clust}{sets the total number of i.i.d. terms in i.i.d. decompostition. This can limit the amount of memory used by coarsening the clusters. When NULL then all clusters are used. Default is 1000 to save memory and time.} \item{n.times}{only uses 50 points for estimation, if NULL then uses all points, subject to p.start condition.} \item{first.time.p}{first point for estimation is pth percentile of cause jump times.} \item{estimator}{default estimator is 1.} \item{trunc.p}{truncation weight for delayed entry, P(T > entry.time | Z_i), typically Cox model.} \item{cens.weights}{censoring weights can be given here rather than calculated using the KM, cox or aalen models.} \item{admin.cens}{censoring times for the administrative censoring} \item{conservative}{set to 0 to compute correct variances based on censoring weights, default is conservative estimates that are much quicker.} \item{monotone}{monotone=0, uses estimating equations \deqn{ (D_\beta P_1) w(t) ( Y(t)/G_c(t) - P_1(t,X)) and } montone 1 uses \deqn{ w(t) ( Y(t)/G_c(t) - P_1(t,X)) and }} \item{step}{step size for Fisher-Scoring algorithm.} } \value{ returns an object of type 'comprisk'. With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval.} \item{var.cum}{pointwise variances estimates. } \item{gamma}{estimate of proportional odds parameters of model.} \item{var.gamma}{variance for gamma. } \item{score}{sum of absolute value of scores.} \item{gamma2}{estimate of constant effects based on the non-parametric estimate. Used for testing of constant effects.} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{conf.band}{resampling based constant to construct 95\% uniform confidence bands.} \item{B.iid}{list of iid decomposition of non-parametric effects.} \item{gamma.iid}{matrix of iid decomposition of parametric effects.} \item{test.procBeqC}{observed test process for testing of time-varying effects} \item{sim.test.procBeqC}{50 resample processes for for testing of time-varying effects} \item{conv}{information on convergence for time points used for estimation.} } \description{ Fits a semiparametric model for the cause-specific quantities : \deqn{ P(T < t, cause=1 | x,z) = P_1(t,x,z) = h( g(t,x,z) ) } for a known link-function \eqn{h()} and known prediction-function \eqn{g(t,x,z)} for the probability of dying from cause 1 in a situation with competing causes of death. } \details{ We consider the following models : 1) the additive model where \eqn{h(x)=1-\exp(-x)} and \deqn{ g(t,x,z) = x^T A(t) + (diag(t^p) z)^T \beta } 2) the proportional setting that includes the Fine & Gray (FG) "prop" model and some extensions where \eqn{h(x)=1-\exp(-\exp(x))} and \deqn{ g(t,x,z) = (x^T A(t) + (diag(t^p) z)^T \beta) } The FG model is obtained when \eqn{x=1}, but the baseline is parametrized as \eqn{\exp(A(t))}. The "fg" model is a different parametrization that contains the FG model, where \eqn{h(x)=1-\exp(-x)} and \deqn{ g(t,x,z) = (x^T A(t)) \exp((diag(t^p) z)^T \beta) } The FG model is obtained when \eqn{x=1}. 3) a "logistic" model where \eqn{h(x)=\exp(x)/( 1+\exp(x))} and \deqn{ g(t,x,z) = x^T A(t) + (diag(t^p) z)^T \beta } The "logistic2" is \deqn{ P_1(t,x,z) = x^T A(t) exp((diag(t^p) z)^T \beta)/ (1+ x^T A(t) exp((diag(t^p) z)^T \beta)) } The simple logistic model with just a baseline can also be fitted by an alternative procedure that has better small sample properties see prop.odds.subist(). 4) the relative cumulative incidence function "rcif" model where \eqn{h(x)=\exp(x)} and \deqn{ g(t,x,z) = x^T A(t) + (diag(t^p) z)^T \beta } The "rcif2" \deqn{ P_1(t,x,z) = (x^T A(t)) \exp((diag(t^p) z)^T \beta) } Where p by default is 1 for the additive model and 0 for the other models. In general p may be powers of the same length as z. Since timereg version 1.8.4. the response must be specified with the \code{\link{Event}} function instead of the \code{\link{Surv}} function and the arguments. For example, if the old code was comp.risk(Surv(time,cause>0)~x1+x2,data=mydata,cause=mydata$cause,causeS=1) the new code is comp.risk(Event(time,cause)~x1+x2,data=mydata,cause=1) Also the argument cens.code is now obsolete since cens.code is an argument of \code{\link{Event}}. } \examples{ data(bmt); clust <- rep(1:204,each=2) addclust<-comp.risk(Event(time,cause)~platelet+age+tcell+cluster(clust),data=bmt, cause=1,resample.iid=1,n.sim=100,model="additive") ### addclust<-comp.risk(Event(time,cause)~+1+cluster(clust),data=bmt,cause=1, resample.iid=1,n.sim=100,model="additive") pad <- predict(addclust,X=1) plot(pad) add<-comp.risk(Event(time,cause)~platelet+age+tcell,data=bmt, cause=1,resample.iid=1,n.sim=100,model="additive") summary(add) par(mfrow=c(2,4)) plot(add); ### plot(add,score=1) ### to plot score functions for test ndata<-data.frame(platelet=c(1,0,0),age=c(0,1,0),tcell=c(0,0,1)) par(mfrow=c(2,3)) out<-predict(add,ndata,uniform=1,n.sim=100) par(mfrow=c(2,2)) plot(out,multiple=0,uniform=1,col=1:3,lty=1,se=1) add<-comp.risk(Event(time,cause)~platelet+age+tcell,data=bmt, cause=1,resample.iid=0,n.sim=0,cens.model="cox", cens.formula=~factor(platelet),model="additive") out<-predict(add,ndata,se=0,uniform=0) par(mfrow=c(2,2)) plot(out,multiple=0,se=0,uniform=0,col=1:3,lty=1) ## fits additive model with some constant effects add.sem<-comp.risk(Event(time,cause)~ const(platelet)+const(age)+const(tcell),data=bmt, cause=1,resample.iid=1,n.sim=100,model="additive") summary(add.sem) out<-predict(add.sem,ndata,uniform=1,n.sim=100) par(mfrow=c(2,2)) plot(out,multiple=0,uniform=1,col=1:3,lty=1,se=0) ## Fine & Gray model fg<-comp.risk(Event(time,cause)~ const(platelet)+const(age)+const(tcell),data=bmt, cause=1,resample.iid=1,model="fg",n.sim=100) summary(fg) out<-predict(fg,ndata,uniform=1,n.sim=100) par(mfrow=c(2,2)) plot(out,multiple=1,uniform=0,col=1:3,lty=1,se=0) ## extended model with time-varying effects fg.npar<-comp.risk(Event(time,cause)~platelet+age+const(tcell), data=bmt,cause=1,resample.iid=1,model="prop",n.sim=100) summary(fg.npar); out<-predict(fg.npar,ndata,uniform=1,n.sim=100) head(out$P1[,1:5]); head(out$se.P1[,1:5]) par(mfrow=c(2,2)) plot(out,multiple=1,uniform=0,col=1:3,lty=1,se=0) ## Fine & Gray model with alternative parametrization for baseline fg2<-comp.risk(Event(time,cause)~const(platelet)+const(age)+const(tcell),data=bmt, cause=1,resample.iid=1,model="prop",n.sim=100) summary(fg2) ################################################################# ## Delayed entry models, ################################################################# nn <- nrow(bmt) entrytime <- rbinom(nn,1,0.5)*(bmt$time*runif(nn)) bmt$entrytime <- entrytime times <- seq(5,70,by=1) bmtw <- prep.comp.risk(bmt,times=times,time="time",entrytime="entrytime",cause="cause") ## non-parametric model outnp <- comp.risk(Event(time,cause)~tcell+platelet+const(age), data=bmtw,cause=1,fix.gamma=1,gamma=0, cens.weights=bmtw$cw,weights=bmtw$weights,times=times,n.sim=0) par(mfrow=c(2,2)) plot(outnp) outnp <- comp.risk(Event(time,cause)~tcell+platelet, data=bmtw,cause=1, cens.weights=bmtw$cw,weights=bmtw$weights,times=times,n.sim=0) par(mfrow=c(2,2)) plot(outnp) ## semiparametric model out <- comp.risk(Event(time,cause)~const(tcell)+const(platelet),data=bmtw,cause=1, cens.weights=bmtw$cw,weights=bmtw$weights,times=times,n.sim=0) summary(out) } \references{ Scheike, Zhang and Gerds (2008), Predicting cumulative incidence probability by direct binomial regression,Biometrika, 95, 205-220. Scheike and Zhang (2007), Flexible competing risks regression modelling and goodness of fit, LIDA, 14, 464-483. Martinussen and Scheike (2006), Dynamic regression models for survival data, Springer. } \author{ Thomas Scheike } \keyword{survival} timereg/man/const.Rd0000644000175000017500000000051414077524411014235 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aalen.r \name{const} \alias{const} \title{Identifies parametric terms of model} \usage{ const(x) } \arguments{ \item{x}{variable} } \description{ Specifies which of the regressors that have constant effect. } \author{ Thomas Scheike } \keyword{survival} timereg/man/rchaz.Rd0000644000175000017500000000465614077524411014231 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{rchaz} \alias{rchaz} \alias{pc.hazard} \alias{simrchaz} \alias{addCums} \title{Simulation of Piecewise constant hazard model (Cox).} \usage{ rchaz( cumhazard, rr, n = NULL, entry = NULL, cum.hazard = TRUE, cause = 1, extend = FALSE ) } \arguments{ \item{cumhazard}{cumulative hazard, or piece-constant rates for periods defined by first column of input.} \item{rr}{relative risk for simulations, alternatively when rr=1 specify n} \item{n}{number of simulation if rr not given} \item{entry}{delayed entry time for simuations.} \item{cum.hazard}{specifies wheter input is cumulative hazard or rates.} \item{cause}{name of cause} \item{extend}{to extend piecewise constant with constant rate. Default is average rate over time from cumulative (when TRUE), if numeric then uses given rate.} } \description{ Simulates data from piecwise constant baseline hazard that can also be of Cox type. Censor data at highest value of the break points. } \details{ For a piecewise linear cumulative hazard the inverse is easy to compute with and delayed entry x we compute \deqn{\Lambda^{-1}(\Lambda(x) + E/RR)}, where RR are the relative risks and E is exponential with mean 1. This quantity has survival function \deqn{P(T > t | T>x) = exp(-RR (\Lambda(t) - \Lambda(x)))}. } \examples{ rates <- c(0,0.01,0.052,0.01,0.04) breaks <- c(0,10, 20, 30, 40) haz <- cbind(breaks,rates) n <- 1000 X <- rbinom(n,1,0.5) beta <- 0.2 rrcox <- exp(X * beta) cumhaz <- cumsum(c(0,diff(breaks)*rates[-1])) cumhaz <- cbind(breaks,cumhaz) pctime <- rchaz(haz,n=1000,cum.hazard=FALSE) par(mfrow=c(1,2)) ss <- aalen(Surv(time,status)~+1,data=pctime,robust=0) plot(ss) lines(cumhaz,col=2,lwd=2) pctimecox <- rchaz(cumhaz,rrcox) pctime <- cbind(pctime,X) ssx <- cox.aalen(Surv(time,status)~+prop(X),data=pctimecox,robust=0) plot(ssx) lines(cumhaz,col=2,lwd=2) ### simulating data with hazard as real data data(TRACE) par(mfrow=c(1,2)) ss <- cox.aalen(Surv(time,status==9)~+prop(vf),data=TRACE,robust=0) par(mfrow=c(1,2)) plot(ss) ### pctime <- rchaz(ss$cum,n=1000) ### sss <- aalen(Surv(time,status)~+1,data=pctime,robust=0) lines(sss$cum,col=2,lwd=2) pctime <- rchaz(ss$cum,rrcox) pctime <- cbind(pctime,X) ### sss <- cox.aalen(Surv(time,status)~+prop(X),data=pctime,robust=0) summary(sss) plot(ss) lines(sss$cum,col=3,lwd=3) } \author{ Thomas Scheike } \keyword{survival} timereg/man/rcrisk.Rd0000644000175000017500000000353214077524412014410 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{rcrisk} \alias{rcrisk} \alias{cause.pchazard.sim} \title{Simulation of Piecewise constant hazard models with two causes (Cox).} \usage{ rcrisk(cumhaz1, cumhaz2, rr1, rr2, n = NULL, cens = NULL, rrc = NULL, ...) } \arguments{ \item{cumhaz1}{cumulative hazard of cause 1} \item{cumhaz2}{cumulative hazard of cause 1} \item{rr1}{number of simulations or vector of relative risk for simuations.} \item{rr2}{number of simulations or vector of relative risk for simuations.} \item{n}{number of simulation if rr not given} \item{cens}{to censor further , rate or cumumlative hazard} \item{rrc}{retlativ risk for censoring.} \item{...}{arguments for rchaz} } \description{ Simulates data from piecwise constant baseline hazard that can also be of Cox type. Censor data at highest value of the break points for either of the cumulatives. } \examples{ data(TRACE) cox1 <- cox.aalen(Surv(time,status==9)~prop(vf)+prop(chf)+prop(wmi), data=TRACE,robust=0) cox2 <- cox.aalen(Surv(time,status==0)~prop(vf)+prop(chf)+prop(wmi), data=TRACE,robust=0) X1 <- TRACE[,c("vf","chf","wmi")] n <- 1000 xid <- sample(1:nrow(X1),n,replace=TRUE) Z1 <- X1[xid,] Z2 <- X1[xid,] rr1 <- exp(as.matrix(Z1) \%*\% cox1$gamma) rr2 <- exp(as.matrix(Z2) \%*\% cox2$gamma) cumhaz1 <- cox1$cum cumhaz2 <- cox2$cum d <- rcrisk(cox1$cum,cox2$cum,rr1,rr2) dd <- cbind(d,Z1) sc1 <- cox.aalen(Surv(time,status==1)~prop(vf)+prop(chf)+prop(wmi), data=dd,robust=0) cbind(sc1$gamma, cox1$gamma) sc2 <- cox.aalen(Surv(time,status==2)~prop(vf)+prop(chf)+prop(wmi), data=dd,robust=0) cbind(sc2$gamma, cox2$gamma) par(mfrow=c(1,2)) plot(cox1); lines(sc1$cum,col=2) plot(cox2$cum,type="l"); lines(sc2$cum,col=2) } \author{ Thomas Scheike } \keyword{survival} timereg/man/dynreg.Rd0000644000175000017500000001520714077524412014405 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.dynreg.r \name{dynreg} \alias{dynreg} \title{Fit time-varying regression model} \usage{ dynreg( formula, data = parent.frame(), aalenmod, bandwidth = 0.5, id = NULL, bhat = NULL, start.time = 0, max.time = NULL, n.sim = 500, meansub = 1, weighted.test = 0, resample = 0 ) } \arguments{ \item{formula}{a formula object with the response on the left of a '~' operator, and the independent terms on the right as regressors.} \item{data}{a data.frame with the variables.} \item{aalenmod}{Aalen model for measurement times. Specified as a survival model (see aalen function).} \item{bandwidth}{bandwidth for local iterations. Default is 50\% of the range of the considered observation period.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{bhat}{initial value for estimates. If NULL local linear estimate is computed.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. Default is max of data.} \item{n.sim}{number of simulations in resampling.} \item{meansub}{if '1' then the mean of the responses is subtracted before the estimation is carried out.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{resample}{returns resample processes.} } \value{ returns an object of type "dynreg". With the following arguments: \item{cum}{the cumulative regression coefficients. This is the efficient estimator based on an initial smoother obtained by local linear regression : \deqn{ \hat B(t) = \int_0^t \tilde \beta(s) ds+ \hspace{4 cm}}{} \deqn{ }{}\deqn{\int_0^t X^{-} (Diag(z) -Diag( X^T(s) \tilde \beta(s)) ) dp(ds \times dz), }{} where \eqn{\tilde \beta(t)} is an initial estimate either provided or computed by local linear regression. To plot this estimate use type="eff.smooth" in the plot() command. } \item{var.cum}{the martingale based pointwise variance estimates.} \item{robvar.cum}{robust pointwise variances estimates.} \item{gamma}{estimate of semi-parametric components of model.} \item{var.gamma}{variance for gamma.} \item{robvar.gamma}{robust variance for gamma.} \item{cum0}{simple estimate of cumulative regression coefficients that does not use use an initial smoothing based estimate \deqn{ \hat B_0(t) = \int_0^t X^{-} Diag(z) dp(ds \times dz). } To plot this estimate use type="0.mpp" in the plot() command. } \item{var.cum0}{the martingale based pointwise variance estimates of cum0.} \item{cum.ms}{estimate of cumulative regression coefficients based on initial smoother (but robust to this estimator). \deqn{ \hat B_{ms}(t) = \int_0^t X^{-} (Diag(z)-f(s)) dp(ds \times dz), } where \eqn{f} is chosen as the matrix \deqn{ f(s) = Diag( X^T(s) \tilde \beta(s)) ( I - X_\alpha(s) X_\alpha^-(s) ), } where \eqn{X_{\alpha}} is the design for the sampling intensities. This is also an efficient estimator when the initial estimator is consistent for \eqn{\beta(t)} and then asymptotically equivalent to cum, but small sample properties appear inferior. Its variance is estimated by var.cum. To plot this estimate use type="ms.mpp" in the plot() command. } \item{cum.ly}{estimator where local averages are subtracted. Special case of cum.ms. To plot this estimate use type="ly.mpp" in plot. } \item{var.cum.ly}{the martingale based pointwise variance estimates. } \item{gamma0}{estimate of parametric component of model. } \item{var.gamma0}{estimate of variance of parametric component of model. } \item{gamma.ly}{estimate of parametric components of model. } \item{var.gamma.ly}{estimate of variance of parametric component of model. } \item{gamma.ms}{estimate of variance of parametric component of model. } \item{var.gamma.ms}{estimate of variance of parametric component of model.} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands.} \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect.} \item{sim.test.procBeqC}{list of 50 random realizations of test-processes under null based on resampling.} \item{covariance}{covariances for nonparametric terms of model.} } \description{ Fits time-varying regression model with partly parametric components. Time-dependent variables for longitudinal data. The model assumes that the mean of the observed responses given covariates is a linear time-varying regression model : } \details{ \deqn{ E( Z_{ij} | X_{ij}(t) ) = \beta^T(t) X_{ij}^1(t) + \gamma^T X_{ij}^2(t) } where \eqn{Z_{ij}} is the j'th measurement at time t for the i'th subject with covariates \eqn{X_{ij}^1} and \eqn{X_{ij}^2}. Resampling is used for computing p-values for tests of timevarying effects. The data for a subject is presented as multiple rows or 'observations', each of which applies to an interval of observation (start, stop]. For counting process data with the )start,stop] notation is used the 'id' variable is needed to identify the records for each subject. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ \donttest{ ## this runs slowly and is therfore donttest data(csl) indi.m<-rep(1,length(csl$lt)) # Fits time-varying regression model out<-dynreg(prot~treat+prot.prev+sex+age,data=csl, Surv(lt,rt,indi.m)~+1,start.time=0,max.time=2,id=csl$id, n.sim=100,bandwidth=0.7,meansub=0) summary(out) par(mfrow=c(2,3)) plot(out) # Fits time-varying semi-parametric regression model. outS<-dynreg(prot~treat+const(prot.prev)+const(sex)+const(age),data=csl, Surv(lt,rt,indi.m)~+1,start.time=0,max.time=2,id=csl$id, n.sim=100,bandwidth=0.7,meansub=0) summary(outS) } } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/summary.cum.residuals.Rd0000644000175000017500000000126014077524411017360 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mgresid.r \name{summary.cum.residuals} \alias{summary.cum.residuals} \title{Prints summary statistics for goodness-of-fit tests based on cumulative residuals} \usage{ \method{summary}{cum.residuals}(object, digits = 3, ...) } \arguments{ \item{object}{output from the cum.residuals() function.} \item{digits}{number of digits in printouts.} \item{...}{unused arguments - for S3 compatibility} } \description{ Computes p-values for extreme behaviour relative to the model of various cumulative residual processes. } \examples{ # see cum.residuals for examples } \author{ Thomas Scheike } \keyword{survival} timereg/man/sim.cause.cox.Rd0000644000175000017500000000535714077524412015601 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pc-hazard.r \name{sim.cause.cox} \alias{sim.cause.cox} \title{Simulation of cause specific from Cox models.} \usage{ \method{sim}{cause.cox}(coxs, n, data = NULL, cens = NULL, rrc = NULL, ...) } \arguments{ \item{coxs}{list of cox models.} \item{n}{number of simulations.} \item{data}{to extract covariates for simulations (draws from observed covariates).} \item{cens}{specifies censoring model, if NULL then only censoring for each cause at end of last event of this type. if "is.matrix" then uses cumulative. hazard given, if "is.scalar" then uses rate for exponential, and if not given then takes average rate of in simulated data from cox model. But censoring can also be given as a cause.} \item{rrc}{possible vector of relative risk for cox-type censoring.} \item{...}{arguments for rchaz, for example entry-time} } \description{ Simulates data that looks like fit from cause specific Cox models. Censor data automatically. When censoring is given in the list of causes this will give censoring that looks like the data. Covariates are drawn from data-set with replacement. This gives covariates like the data. } \examples{ nsim <- 100 data(bmt) # coxph cox1 <- coxph(Surv(time,cause==1)~tcell+platelet,data=bmt) cox2 <- coxph(Surv(time,cause==2)~tcell+platelet,data=bmt) coxs <- list(cox1,cox2) dd <- sim.cause.cox(coxs,nsim,data=bmt) scox1 <- coxph(Surv(time,status==1)~tcell+platelet,data=dd) scox2 <- coxph(Surv(time,status==2)~tcell+platelet,data=dd) cbind(cox1$coef,scox1$coef) cbind(cox2$coef,scox2$coef) \donttest{ ### do not test to avoid dependence on mets library(mets) data(bmt) cox1 <- phreg(Surv(time,cause==1)~tcell+platelet,data=bmt) cox2 <- phreg(Surv(time,cause==2)~tcell+platelet,data=bmt) coxs <- list(cox1,cox2) dd <- sim.cause.cox(coxs,nsim,data=bmt) scox1 <- phreg(Surv(time,status==1)~tcell+platelet,data=dd) scox2 <- phreg(Surv(time,status==2)~tcell+platelet,data=dd) cbind(cox1$coef,scox1$coef) cbind(cox2$coef,scox2$coef) par(mfrow=c(1,2)) basehazplot.phreg(cox1); basehazplot.phreg(scox1,add=TRUE); basehazplot.phreg(cox2); basehazplot.phreg(scox2,add=TRUE); cox1 <- phreg(Surv(time,cause==1)~strata(tcell)+platelet,data=bmt) cox2 <- phreg(Surv(time,cause==2)~strata(tcell)+platelet,data=bmt) coxs <- list(cox1,cox2) dd <- sim.cause.cox(coxs,nsim,data=bmt) scox1 <- phreg(Surv(time,status==1)~strata(tcell)+platelet,data=dd) scox2 <- phreg(Surv(time,status==2)~strata(tcell)+platelet,data=dd) cbind(cox1$coef,scox1$coef) cbind(cox2$coef,scox2$coef) par(mfrow=c(1,2)) basehazplot.phreg(cox1); basehazplot.phreg(scox1,add=TRUE); basehazplot.phreg(cox2); basehazplot.phreg(scox2,add=TRUE); } } \author{ Thomas Scheike } \keyword{survival} timereg/man/qcut.Rd0000644000175000017500000000102714077524411014063 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qcut.r \name{qcut} \alias{qcut} \title{Cut a variable} \usage{ qcut(x, cuts = 4, breaks = NULL, ...) } \arguments{ \item{x}{variable to cut} \item{cuts}{number of groups, 4 gives quartiles} \item{breaks}{can also give breaks} \item{...}{other argument for cut function of R} } \description{ Calls the cut function to cut variables on data frame. } \examples{ data(sTRACE) gx <- qcut(sTRACE$age) table(gx) } \author{ Thomas Scheike } \keyword{survival} timereg/man/summary.aalen.Rd0000644000175000017500000000152114077524411015662 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.aalen.r \name{summary.aalen} \alias{summary.aalen} \alias{summary.cox.aalen} \alias{summary.prop.excess} \alias{summary.timecox} \alias{summary.dynreg} \title{Prints summary statistics} \usage{ \method{summary}{aalen}(object, digits = 3, ...) } \arguments{ \item{object}{an aalen object.} \item{digits}{number of digits in printouts.} \item{...}{unused arguments - for S3 compatibility} } \description{ Computes p-values for test of significance for nonparametric terms of model, p-values for test of constant effects based on both supremum and integrated squared difference. } \details{ Returns parameter estimates and their standard errors. } \examples{ ### see help(aalen) } \references{ Martinussen and Scheike, } \author{ Thomas Scheike } \keyword{survival} timereg/man/prop.Rd0000644000175000017500000000112514077524411014066 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.cox-aalen.r \name{prop} \alias{prop} \title{Identifies the multiplicative terms in Cox-Aalen model and proportional excess risk model} \usage{ prop(x) } \arguments{ \item{x}{variable} } \description{ Specifies which of the regressors that belong to the multiplicative part of the Cox-Aalen model } \details{ \deqn{ \lambda_{i}(t) = Y_i(t) ( X_{i}^T(t) \alpha(t) ) \exp(Z_{i}^T(t) \beta ) } for this model prop specified the covariates to be included in \eqn{Z_{i}(t)} } \author{ Thomas Scheike } \keyword{survival} timereg/man/TRACE.Rd0000644000175000017500000000312314077524411013744 0ustar nileshnilesh\name{TRACE} \alias{TRACE} \alias{sTRACE} \alias{tTRACE} \non_function{} \title{The TRACE study group of myocardial infarction} \description{ The TRACE data frame contains 1877 patients and is a subset of a data set consisting of approximately 6000 patients. It contains data relating survival of patients after myocardial infarction to various risk factors. sTRACE is a subsample consisting of 300 patients. tTRACE is a subsample consisting of 1000 patients. } \format{ This data frame contains the following columns: \describe{ \item{id}{a numeric vector. Patient code. } \item{status}{ a numeric vector code. Survival status. 9: dead from myocardial infarction, 0: alive, 7: dead from other causes. } \item{time}{ a numeric vector. Survival time in years. } \item{chf}{ a numeric vector code. Clinical heart pump failure, 1: present, 0: absent. } \item{diabetes}{ a numeric vector code. Diabetes, 1: present, 0: absent. } \item{vf}{ a numeric vector code. Ventricular fibrillation, 1: present, 0: absent. } \item{wmi}{ a numeric vector. Measure of heart pumping effect based on ultrasound measurements where 2 is normal and 0 is worst. } \item{sex}{ a numeric vector code. 1: female, 0: male. } \item{age}{ a numeric vector code. Age of patient. } } } \source{ The TRACE study group. Jensen, G.V., Torp-Pedersen, C., Hildebrandt, P., Kober, L., F. E. Nielsen, Melchior, T., Joen, T. and P. K. Andersen (1997), Does in-hospital ventricular fibrillation affect prognosis after myocardial infarction?, European Heart Journal 18, 919--924. } \examples{ data(TRACE) names(TRACE) } \keyword{datasets} timereg/man/internal-addreg.Rd0000644000175000017500000000405214077524411016150 0ustar nileshnilesh\name{pval} \alias{summary.restricted.residual.mean} \alias{plot.restricted.residual.mean} \alias{summary.resmean} \alias{plot.resmean} \alias{print.resmean} \alias{coef.resmean} \alias{coefcox} \alias{kmplot} \alias{pval} \alias{additive.compSs} \alias{CsmoothB} \alias{Csmooth2B} \alias{pval} \alias{kernel} \alias{percen} \alias{localTimeReg} \alias{nameestimate} \alias{namematrix} \alias{aalenBase} \alias{aalenBaseC} \alias{aalen.des} \alias{aalen.des2} \alias{cox.aalenBase} \alias{read-design} \alias{is.diag} \alias{semiaalen} \alias{semiregBase} \alias{check.missing} \alias{sindex.prodlim} \alias{read.design} \alias{read.surv} \alias{risk.index} \alias{faster.reshape} \alias{rm.missing} \alias{plot.two.stage} \alias{summary.two.stage} \alias{predict.two.stage} \alias{predictpropodds} \alias{plot.cums} \alias{plot.comprisk} \alias{plot.predict.comprisk} \alias{plot.predict.timereg} \alias{pava} \alias{plotScore} \alias{summary.comprisk} \alias{pred.cum} \alias{slaaop} \alias{pred.des} \alias{Cpred} \alias{plot.cox.aalen2} \alias{plot.predict} \alias{coef.aalen} \alias{cox.marg} \alias{summary.cox.marg} \alias{coef.cox.marg} \alias{print.cox.marg} \alias{comprisk.ipw} \alias{coef.comprisk.ipw} \alias{print.comprisk.ipw} \alias{prop.odds.subdist.ipw} \alias{summary.comprisk.ipw} \alias{coef.cox.aalen} \alias{coef.comprisk} \alias{coef.two.stage} \alias{cluster.index.timereg} \alias{coefBase} \alias{des.aalen} \alias{timetest} \alias{print.pe.sasieni} \alias{pred.stratKM} \alias{prep.glm.comprisk} \alias{print.predict.timereg} \alias{print.two.stage} \alias{residualsTimereg} \alias{summary.pe.sasieni} \alias{summary.predict.timereg} \alias{coef.dynreg} \alias{coef.timecox} \alias{dynregBase} \alias{prop.excessBase} \alias{prop.odds.gam} \alias{plotConfregion} \alias{semicox} \alias{timecoxBase} \alias{timereg.formula} \alias{names2formula} \alias{twin.clustertrunc} \alias{vcov.aalen} \alias{vcov.cox.aalen} \alias{vcov.two.stage} \alias{vcov.comp.risk} \title{For internal use} \description{for internal use} \author{Thomas Scheike} \keyword{survival} timereg/man/cox.aalen.Rd0000644000175000017500000002153414077524412014765 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/new.cox-aalen.r \name{cox.aalen} \alias{cox.aalen} \title{Fit Cox-Aalen survival model} \usage{ cox.aalen( formula = formula(data), data = parent.frame(), beta = NULL, Nit = 20, detail = 0, start.time = 0, max.time = NULL, id = NULL, clusters = NULL, n.sim = 500, residuals = 0, robust = 1, weighted.test = 0, covariance = 0, resample.iid = 1, weights = NULL, rate.sim = 1, beta.fixed = 0, max.clust = 1000, exact.deriv = 1, silent = 1, max.timepoint.sim = 100, basesim = 0, offsets = NULL, strata = NULL, propodds = 0, caseweight = NULL ) } \arguments{ \item{formula}{a formula object with the response on the left of a '~' operator, and the independent terms on the right as regressors. The response must be a survival object as returned by the `Surv' function. Terms with a proportional effect are specified by the wrapper prop(), and cluster variables (for computing robust variances) by the wrapper cluster().} \item{data}{a data.frame with the variables.} \item{beta}{starting value for relative risk estimates.} \item{Nit}{number of iterations for Newton-Raphson algorithm.} \item{detail}{if 0 no details is printed during iterations, if 1 details are given.} \item{start.time}{start of observation period where estimates are computed.} \item{max.time}{end of observation period where estimates are computed. Estimates thus computed from [start.time, max.time]. Default is max of data.} \item{id}{For timevarying covariates the variable must associate each record with the id of a subject.} \item{clusters}{cluster variable for computation of robust variances.} \item{n.sim}{number of simulations in resampling.} \item{residuals}{to returns residuals that can be used for model validation in the function cum.residuals. Estimated martingale increments (dM) and corresponding time vector (time). When rate.sim=1 returns estimated martingales, dM_i(t) and if rate.sim=0, returns a matrix of dN_i(t).} \item{robust}{to compute robust variances and construct processes for resampling. May be set to 0 to save memory and time, in particular for rate.sim=1.} \item{weighted.test}{to compute a variance weighted version of the test-processes used for testing time-varying effects.} \item{covariance}{to compute covariance estimates for nonparametric terms rather than just the variances.} \item{resample.iid}{to return i.i.d. representation for nonparametric and parametric terms. based on counting process or martingale resduals (rate.sim).} \item{weights}{weights for weighted analysis.} \item{rate.sim}{rate.sim=1 such that resampling of residuals is based on estimated martingales and thus valid in rate case, rate.sim=0 means that resampling is based on counting processes and thus only valid in intensity case.} \item{beta.fixed}{option for computing score process for fixed relative risk parameter} \item{max.clust}{sets the total number of i.i.d. terms in i.i.d. decompostition. This can limit the amount of memory used by coarsening the clusters. When NULL then all clusters are used. Default is 1000 to save memory and time.} \item{exact.deriv}{if 1 then uses exact derivative in last iteration, if 2 then uses exact derivate for all iterations, and if 0 then uses approximation for all computations and there may be a small bias in the variance estimates. For Cox model always exact and all options give same results.} \item{silent}{if 1 then opppresses some output.} \item{max.timepoint.sim}{considers only this resolution on the time scale for simulations, see time.sim.resolution argument} \item{basesim}{1 to get simulations for cumulative baseline, including tests for contant effects.} \item{offsets}{offsets for analysis on log-scale. RR=exp(offsets+ x beta).} \item{strata}{future option for making strata in a different day than through X design in cox-aalen model (~-1+factor(strata)).} \item{propodds}{if 1 will fit the proportional odds model. Slightly less efficient than prop.odds() function but much quicker, for large data this also works.} \item{caseweight}{these weights have length equal to number of jump times, and are multiplied all jump times dN. Useful for getting the program to fit for example the proportional odds model or frailty models.} } \value{ returns an object of type "cox.aalen". With the following arguments: \item{cum}{cumulative timevarying regression coefficient estimates are computed within the estimation interval. } \item{var.cum}{the martingale based pointwise variance estimates. } \item{robvar.cum}{robust pointwise variances estimates. } \item{gamma}{estimate of parametric components of model. } \item{var.gamma}{variance for gamma sandwhich estimator based on optional variation estimator of score and 2nd derivative.} \item{robvar.gamma}{robust variance for gamma. } \item{residuals}{list with residuals.} \item{obs.testBeq0}{observed absolute value of supremum of cumulative components scaled with the variance.} \item{pval.testBeq0}{p-value for covariate effects based on supremum test.} \item{sim.testBeq0}{resampled supremum values.} \item{obs.testBeqC}{observed absolute value of supremum of difference between observed cumulative process and estimate under null of constant effect.} \item{pval.testBeqC}{p-value based on resampling.} \item{sim.testBeqC}{resampled supremum values.} \item{obs.testBeqC.is}{observed integrated squared differences between observed cumulative and estimate under null of constant effect.} \item{pval.testBeqC.is}{p-value based on resampling.} \item{sim.testBeqC.is}{resampled supremum values.} \item{conf.band}{resampling based constant to construct robust 95\% uniform confidence bands. } \item{test.procBeqC}{observed test-process of difference between observed cumulative process and estimate under null of constant effect over time. } \item{sim.test.procBeqC}{list of 50 random realizations of test-processes under null based on resampling.} \item{covariance}{covariances for nonparametric terms of model.} \item{B.iid}{Resample processes for nonparametric terms of model.} \item{gamma.iid}{Resample processes for parametric terms of model.} \item{loglike}{approximate log-likelihood for model, similar to Cox's partial likelihood. Only computed when robust=1.} \item{D2linv}{inverse of the derivative of the score function.} \item{score}{value of score for final estimates.} \item{test.procProp}{observed score process for proportional part of model.} \item{var.score}{variance of score process (optional variation estimator for beta.fixed=1 and robust estimator otherwise).} \item{pval.Prop}{p-value based on resampling.} \item{sim.supProp}{re-sampled absolute supremum values.} \item{sim.test.procProp}{list of 50 random realizations of test-processes for proportionality under the model based on resampling.} } \description{ Fits an Cox-Aalen survival model. Time dependent variables and counting process data (multiple events per subject) are possible. } \details{ \deqn{ \lambda_{i}(t) = Y_i(t) ( X_{i}^T(t) \alpha(t) ) \exp(Z_{i}^T \beta ) } The model thus contains the Cox's regression model as special case. To fit a stratified Cox model it is important to parametrize the baseline apppropriately (see example below). Resampling is used for computing p-values for tests of time-varying effects. Test for proportionality is considered by considering the score processes for the proportional effects of model. The modelling formula uses the standard survival modelling given in the \bold{survival} package. The data for a subject is presented as multiple rows or 'observations', each of which applies to an interval of observation (start, stop]. For counting process data with the )start,stop] notation is used, the 'id' variable is needed to identify the records for each subject. The program assumes that there are no ties, and if such are present random noise is added to break the ties. } \examples{ library(timereg) data(sTRACE) # Fits Cox model out<-cox.aalen(Surv(time,status==9)~prop(age)+prop(sex)+ prop(vf)+prop(chf)+prop(diabetes),data=sTRACE) # makes Lin, Wei, Ying test for proportionality summary(out) par(mfrow=c(2,3)) plot(out,score=1) # Fits stratified Cox model out<-cox.aalen(Surv(time,status==9)~-1+factor(vf)+ prop(age)+prop(sex)+ prop(chf)+prop(diabetes),data=sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(1,2)); plot(out); # Same model, but needs to invert the entire marix for the aalen part: X(t) out<-cox.aalen(Surv(time,status==9)~factor(vf)+ prop(age)+prop(sex)+ prop(chf)+prop(diabetes),data=sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(1,2)); plot(out); # Fits Cox-Aalen model out<-cox.aalen(Surv(time,status==9)~prop(age)+prop(sex)+ vf+chf+prop(diabetes),data=sTRACE,max.time=7,n.sim=100) summary(out) par(mfrow=c(2,3)) plot(out) } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/plot.cum.residuals.Rd0000644000175000017500000000556414077524411016654 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mgresid.r \name{plot.cum.residuals} \alias{plot.cum.residuals} \title{Plots cumulative residuals} \usage{ \method{plot}{cum.residuals}( x, pointwise.ci = 1, hw.ci = 0, sim.ci = 0, robust = 1, specific.comps = FALSE, level = 0.05, start.time = 0, stop.time = 0, add.to.plot = FALSE, mains = TRUE, main = NULL, xlab = NULL, ylab = "Cumulative MG-residuals", ylim = NULL, score = 0, conf.band = FALSE, ... ) } \arguments{ \item{x}{the output from the "cum.residuals" function.} \item{pointwise.ci}{if >1 pointwise confidence intervals are plotted with lty=pointwise.ci} \item{hw.ci}{if >1 Hall-Wellner confidence bands are plotted with lty=hw.ci. Only 95\% bands can be constructed.} \item{sim.ci}{if >1 simulation based confidence bands are plotted with lty=sim.ci. These confidence bands are robust to non-martingale behaviour.} \item{robust}{if "1" robust standard errors are used to estimate standard error of estimate, otherwise martingale based estimate are used.} \item{specific.comps}{all components of the model is plotted by default, but a list of components may be specified, for example first and third "c(1,3)".} \item{level}{gives the significance level. Default is 0.05.} \item{start.time}{start of observation period where estimates are plotted. Default is 0.} \item{stop.time}{end of period where estimates are plotted. Estimates thus plotted from [start.time, max.time].} \item{add.to.plot}{to add to an already existing plot. Default is "FALSE".} \item{mains}{add names of covariates as titles to plots.} \item{main}{vector of names for titles in plots.} \item{xlab}{label for x-axis. NULL is default which leads to "Time" or "". Can also give a character vector.} \item{ylab}{label for y-axis. Default is "Cumulative MG-residuals".} \item{ylim}{limits for y-axis.} \item{score}{if '0' plots related to modelmatrix are specified, thus resulting in grouped residuals, if '1' plots for modelmatrix but with random realizations under model, if '2' plots residuals versus continuous covariates of model with random realizations under the model.} \item{conf.band}{makes simulation based confidence bands for the test processes under the 0 based on variance of these processes limits for y-axis. These will give additional information of whether the observed cumulative residuals are extreme or not when based on a variance weighted test.} \item{...}{unused arguments - for S3 compatibility} } \description{ This function plots the output from the cumulative residuals function "cum.residuals". The cumulative residuals are compared with the performance of similar processes under the model. } \examples{ # see cum.residuals for examples } \references{ Martinussen and Scheike, Dynamic Regression Models for Survival Data, Springer (2006). } \author{ Thomas Scheike } \keyword{survival} timereg/man/wald.test.Rd0000644000175000017500000000325414077524411015020 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/base.r \name{wald.test} \alias{wald.test} \title{Makes wald test} \usage{ wald.test( object = NULL, coef = NULL, Sigma = NULL, vcov = NULL, contrast, coef.null = NULL, null = NULL, print.coef = TRUE, alpha = 0.05 ) } \arguments{ \item{object}{timereg object} \item{coef}{estimates from some model} \item{Sigma}{variance of estimates} \item{vcov}{same as Sigma but more standard in other functions} \item{contrast}{contrast matrix for testing} \item{coef.null}{which indeces to test to 0} \item{null}{mean of null, 0 by default} \item{print.coef}{print the coefficients of the linear combinations.} \item{alpha}{significance level for CI for linear combinations of coefficients.} } \description{ Makes wald test, either by contrast matrix or testing components to 0. Can also specify the regression coefficients and the variance matrix. Also makes confidence intervals of the defined contrasts. Reads coefficientes and variances from timereg and coxph objects. } \examples{ data(sTRACE) # Fits Cox model out<-cox.aalen(Surv(time,status==9)~prop(age)+prop(sex)+ prop(vf)+prop(chf)+prop(diabetes),data=sTRACE,n.sim=0) wald.test(out,coef.null=c(1,2,3)) ### test age=sex vf=chf wald.test(out,contrast=rbind(c(1,-1,0,0,0),c(0,0,1,-1,0))) ### now same with direct specifation of estimates and variance wald.test(coef=out$gamma,Sigma=out$var.gamma,coef.null=c(1,2,3)) wald.test(coef=out$gamma,Sigma=out$robvar.gamma,coef.null=c(1,2,3)) ### test age=sex vf=chf wald.test(coef=out$gamma,Sigma=out$var.gamma, contrast=rbind(c(1,-1,0,0,0),c(0,0,1,-1,0))) } \author{ Thomas Scheike } \keyword{survival} timereg/src/0000755000175000017500000000000014127307567012643 5ustar nileshnileshtimereg/src/cox-aalen-lwy-resamp.c0000644000175000017500000007217714077524412016764 0ustar nileshnilesh//#include #include #include "matrix.h" #include #include void scoreny(times,Ntimes,designX,nx,px,designG,ng,pg,antpers,start,stop, betaS,Nit,cu,vcu,w,mw,loglike,Iinv,Vbeta,detail,offs,mof,sim,antsim, rani,Rvcu,RVbeta, test,testOBS,Ut,simUt,Uit,XligZ,aalen,nb,id,status,wscore,ridge,ratesim,score,dhatMit,gammaiid,dmgiid, retur,robust,covariance,Vcovs,addresamp,addproc, resample,gamiid,biid,clusters,antclust,vscore,betafixed,weights,entry,exactderiv, timegroup,maxtimepoint,stratum,silent) double *designX,*designG,*times,*betaS,*start,*stop,*cu,*w,*loglike,*Vbeta,*RVbeta,*vcu,*offs,*Rvcu,*Iinv,*test,*testOBS,*Ut,*simUt,*Uit,*aalen,*ridge,*score,*dhatMit,*gammaiid,*dmgiid,*Vcovs,*addproc,*gamiid,*biid,*vscore,*weights; int*covariance,*nx,*px,*ng,*pg,*antpers,*Ntimes,*mw,*Nit,*detail,*mof,*sim,*antsim,*rani,*XligZ,*nb,*id,*status,*wscore,*ratesim,*retur,*robust,*addresamp,*resample,*clusters,*antclust,*betafixed,*entry,*exactderiv,*timegroup,*maxtimepoint,*stratum,*silent; { int timing=0; clock_t c0,c1; c0=clock(); // {{{ setting up memory matrix *X,*Z,*WX,*WZ,*cdesX,*cdesX2,*cdesX3,*CtVUCt,*A,*AI; matrix *Vcov,*dYI,*Ct,*dM1M2,*M1M2t,*COV,*ZX,*ZP,*ZPX; matrix *ZPZ,*tmp2,*tmp3,*dS,*S1,*SI,*S2,*M1,*VU,*ZXAI,*VUI; matrix *RobVbeta,*Delta,*tmpM1,*Utt,*Delta2,*tmpM2; // matrix *St[*maxtimepoint],*M1M2[*Ntimes],*C[*maxtimepoint],*ZXAIs[*Ntimes],*dYIt[*Ntimes]; // matrix *St[*Ntimes], // matrix *M1M2[*Ntimes],*C[*Ntimes],*ZXAIs[*Ntimes],*AIs[*Ntimes]; matrix *Stg[*maxtimepoint],*Cg[*maxtimepoint]; matrix *W3t[*antclust],*W4t[*antclust],*W2t[*antclust],*Uti[*antclust]; matrix *ZPX1,*ZPZ1,*ZPXo,*ZPZo; vector *dA,*VdA,*MdA,*delta,*zav,*lamt,*lamtt; vector *xi,*zi,*U,*beta,*xtilde,*Gbeta,*zcol,*one,*difzzav; vector *offset,*weight,*varUthat[*maxtimepoint],*Uprofile; // vector *ZXdA[*Ntimes]; vector *ta,*ahatt,*vrisk,*tmpv1,*tmpv2,*rowX,*rowZ,*difX,*VdB; vector *W2[*antclust],*W3[*antclust],*reszpbeta,*res1dim; matrix *dAt; vector *Ui[*antclust]; int cin=0,ci=0,c,pers=0,i=0,j,k,l,s,s1,it,count,pmax, *imin=calloc(1,sizeof(int)), *cluster=calloc(*antpers,sizeof(int)), *ipers=calloc(*Ntimes,sizeof(int)); double S0,RR=1,time=0,ll,lle,llo; double tau,hati,random,scale,sumscore; double *cug=calloc((*maxtimepoint)*(*px+1),sizeof(double)), *timesg=calloc((*maxtimepoint),sizeof(double)); double norm_rand(); void GetRNGstate(),PutRNGstate(); /* float gasdev(),expdev(),ran1(); */ GetRNGstate(); /* to use R random normals */ if (*robust==1) { for (j=0;j<*antclust;j++) { malloc_mat(*maxtimepoint,*px,W3t[j]); malloc_mat(*maxtimepoint,*px,W4t[j]); malloc_mat(*maxtimepoint,*pg,W2t[j]); malloc_mat(*maxtimepoint,*pg,Uti[j]); malloc_vec(*pg,Ui[j]); malloc_vec(*px,W3[j]); } for(j=0;j<*maxtimepoint;j++) malloc_vec(*pg,varUthat[j]); } for (j=0;j<*antclust;j++) malloc_vec(*pg,W2[j]); for (c=0;c<*nx;c++) cluster[id[c]]=clusters[c]; if (*sim==1) { malloc_mat(*maxtimepoint,*px,Delta); malloc_mat(*maxtimepoint,*px,tmpM1); malloc_mat(*maxtimepoint,*pg,Delta2); malloc_mat(*maxtimepoint,*pg,tmpM2); } malloc_mat(*maxtimepoint,*pg,Utt); malloc_mats(*antpers,*px,&WX,&X,&cdesX,&cdesX2,&cdesX3,NULL); malloc_mats(*antpers,*pg,&WZ,&ZP,&Z,NULL); malloc_mats(*px,*px,&Vcov,&COV,&A,&AI,&M1,&CtVUCt,NULL); malloc_mats(*pg,*pg,&RobVbeta,&ZPZ,&tmp2,&dS,&S1,&S2,&SI,&VU,&VUI,NULL); malloc_mats(*pg,*px,&ZXAI,&ZX,&dM1M2,&M1M2t,NULL); malloc_mats(*px,*pg,&tmp3,&ZPX,&dYI,&Ct,NULL); malloc_mats(*px,*pg,&ZPX1,NULL); malloc_mats(*pg,*pg,&ZPZ1,NULL); malloc_mats(*px,*pg,&ZPXo,NULL); malloc_mats(*pg,*pg,&ZPZo,NULL); malloc_mat(*Ntimes,*px,dAt); malloc_vec(1,reszpbeta); malloc_vec(1,res1dim); malloc_vecs(*antpers,&weight,&lamtt,&lamt,&zcol,&Gbeta,&one,&offset,NULL); malloc_vecs(*px,&ahatt,&tmpv1,&difX,&VdB,&rowX,&xi,&dA,&VdA,&MdA,NULL); malloc_vecs(*px,&xtilde,NULL); malloc_vecs(*pg,&tmpv2,&rowZ,&zi,&U,&beta,&delta,&zav,&difzzav,&Uprofile,NULL); malloc_vec(*nb,ta); malloc_vec(*antpers,vrisk); for(j=0;j<*maxtimepoint;j++) { malloc_mat(*px,*pg,Cg[j]); malloc_mat(*pg,*pg,Stg[j]);} matrix *Cn,*M1M2n,*ZXAIn,*AIn; malloc_mat((*px)*(*Ntimes),*pg,Cn); malloc_mat((*px)*(*Ntimes),*px,AIn); malloc_mat(*pg,(*px)*(*Ntimes),M1M2n); malloc_mat(*pg,(*px)*(*Ntimes),ZXAIn); // for(j=0;j<*Ntimes;j++) { // malloc_mat(*px,*pg,C[j]); // malloc_mat(*pg,*px,M1M2[j]); // malloc_mat(*pg,*px,ZXAIs[j]); //// malloc_vec(*px,dAt[j]); malloc_mat(*px,*pg,dYIt[j]); //// malloc_vec(*pg,ZXdA[j]); malloc_mat(*pg,*pg,St[j]); // } pmax=max(*px,*pg); ll=0; for(j=0;j<*pg;j++) VE(beta,j)=betaS[j]; for(j=0;j<*antpers;j++) {VE(weight,j)=1; VE(offset,j)=1;} // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: setting up allocation %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); cu[0]=times[0]; for (it=0;it<*Nit || (*Nit==0 && it==0);it++) // {{{ iterations start for cox-aalen model { if (it>0) { vec_zeros(U); mat_zeros(S1); mat_zeros(A); mat_zeros(ZPZ); mat_zeros(ZPX); mat_zeros(ZX); mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); } sumscore=0; S0=0; ci=0; R_CheckUserInterrupt(); for (s=1;s<*Ntimes;s++) // {{{ going through time { time=times[s]; // vec_zeros(lamt); // {{{ reading design and computing matrix products if (s==1) { // {{{ for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if (( (start[c]=time)) ) { for(j=0;j=0) ) ci=ci-1; } // }}} vec_zeros(rowX); vec_zeros(rowZ); if (s>1) // {{{ modifying design for next time points while ((stop[ci]=0) ) { VE(Gbeta,id[ci])=0; // vec_prod(zi,beta); for(j=0;j<*px;j++) VE(xi,j)=designX[j*(*nx)+ci]; for(j=0;j<*pg;j++) { VE(zi,j)=designG[j*(*nx)+ci]; VE(Gbeta,id[ci])+=VE(zi,j)*VE(beta,j); } RR=exp(VE(Gbeta,id[ci])); if (entry[ci]==1) { replace_row(X,id[ci],xi); replace_row(Z,id[ci],zi); scl_vec_mult(RR*weights[ci],xi,tmpv1);replace_row(WX,id[ci],tmpv1); scl_vec_mult(weights[ci],zi,tmpv2);replace_row(WZ,id[ci],tmpv2); VE(weight,id[ci])=weights[ci]; if (*mof==1) VE(offset,id[ci])=offs[ci]; } else { replace_row(X,id[ci],rowX);replace_row(Z,id[ci],rowZ); replace_row(WX,id[ci],rowX); replace_row(WZ,id[ci],rowZ); VE(Gbeta,id[ci])=0; VE(weight,id[ci])=0; if (*mof==1) VE(offset,id[ci])=offs[ci]; } S0+=entry[ci]*RR*weights[ci]; for(j=0;j1))) || ((*exactderiv==2) && (*px>1)) ) { mat_zeros(ZPZ1); mat_zeros(ZPX1); for (i=0;i<*antpers;i++) { extract_row(WX,i,xi); VE(lamt,i)=vec_prod(xi,dA); extract_row(Z,i,zi); scl_vec_mult(VE(lamt,i),zi,rowZ); replace_row(ZP,i,rowZ); extract_row(X,i,xi); for(j=0;j0)) vec_add(beta,delta,beta); for (k=0;k<*pg;k++) sumscore=sumscore+fabs(VE(U,k)); if ((sumscore<0.0000001) & (it<(*Nit)-2)) { it=*Nit-2; } } /* it */ // }}} //scl_mat_mult( (double) 1/(*antclust),SI,SI); if (*detail>=2) Rprintf("Fitting done \n"); if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: fitting done %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); // vec_zeros(Gbeta); lle=0; llo=0; ci=0; for (k=0;k<*pg;k++) score[k]=VE(U,k); mat_zeros(A); mat_zeros(ZPZ); mat_zeros(ZPX); mat_zeros(ZX); mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); for (s=1;s<*Ntimes;s++) { // {{{ terms for robust variances time=times[s]; cu[s]=times[s]; vcu[s]=times[s]; Rvcu[timegroup[s]]=times[s]; cug[timegroup[s]]=times[s]; timesg[timegroup[s]]=times[s]; Ut[timegroup[s]]=times[s]; R_CheckUserInterrupt(); if (*robust==1) { sumscore=0; S0=0; // {{{ reading design and computing matrix products if (s==1) { // {{{ for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if (( (start[c]=time)) ) { for(j=0;j=0) ) ci=ci-1; } // }}} vec_zeros(rowX); vec_zeros(rowZ); if (s>1) // {{{ modifying design for next time points while ((stop[ci]=0) ) { VE(Gbeta,id[ci])=0; // vec_prod(zi,beta); for(j=0;j<*px;j++) VE(xi,j)=designX[j*(*nx)+ci]; for(j=0;j<*pg;j++) { VE(zi,j)=designG[j*(*nx)+ci]; VE(Gbeta,id[ci])+=VE(zi,j)*VE(beta,j); } RR=exp(VE(Gbeta,id[ci])); if (entry[ci]==1) { replace_row(X,id[ci],xi); replace_row(Z,id[ci],zi); scl_vec_mult(RR*weights[ci],xi,tmpv1);replace_row(WX,id[ci],tmpv1); scl_vec_mult(weights[ci],zi,tmpv2);replace_row(WZ,id[ci],tmpv2); VE(weight,id[ci])=weights[ci]; if (*mof==1) VE(offset,id[ci])=offs[ci]; } else { replace_row(X,id[ci],rowX);replace_row(Z,id[ci],rowZ); replace_row(WX,id[ci],rowX); replace_row(WZ,id[ci],rowZ); VE(Gbeta,id[ci])=0; VE(weight,id[ci])=0; if (*mof==1) VE(offset,id[ci])=offs[ci]; } S0+=entry[ci]*RR*weights[ci]; // for(j=0;j=1) for (i=0;i<*antpers;i++) // {{{ { cin=cluster[i]; extract_row(WX,i,rowX); extract_row(Z,i,zi); extract_row(X,i,xi); hati=vec_prod(rowX,dA); if (*ratesim==1) { // Rprintf("%d %d %d %d %lf \n",s,i,ipers[s],pers,hati); Mv(ZXAI,xi,tmpv2); vec_subtr(zi,tmpv2,tmpv2); scl_vec_mult(VE(weight,i),tmpv2,tmpv2); if (i==pers) vec_add(tmpv2,W2[cin],W2[cin]); if (*ratesim==1) {scl_vec_mult(hati,tmpv2,rowZ); vec_subtr(W2[cin],rowZ,W2[cin]); } Mv(AI,xi,rowX); scl_vec_mult(VE(weight,i),rowX,rowX); if (i==pers) {vec_add(rowX,W3[cin],W3[cin]); } llo=llo+hati; if (*ratesim==1) {scl_vec_mult(hati,rowX,rowX); vec_subtr(W3[cin],rowX,W3[cin]);} } if (*retur==1) dhatMit[i*(*Ntimes)+s]=1*(i==pers)-hati; if (*retur==2) dhatMit[i]=dhatMit[i]+1*(i==pers)-hati; } /* i 1.. antpers */ // }}} } if ((*ratesim==0) && (*robust==1)) { // {{{ compute resampling counting process LWY style version cin=cluster[pers]; extract_row(WX,pers,rowX); extract_row(Z,pers,zi); extract_row(X,pers,xi); hati=vec_prod(rowX,dA); // if (*detail==2) Rprintf(" %d %d \n",cin,pers); Mv(ZXAI,xi,tmpv2); vec_subtr(zi,tmpv2,tmpv2); scl_vec_mult(VE(weight,pers),tmpv2,tmpv2); // squaring to deal with counting process'es // for (k=0;k<*pg;k++) VE(tmpv2,k)=pow(VE(tmpv2,k),2); vec_add(tmpv2,W2[cin],W2[cin]); Mv(AI,xi,rowX); scl_vec_mult(VE(weight,pers),rowX,rowX); vec_add(rowX,W3[cin],W3[cin]); for (s1=timegroup[s];s1<*maxtimepoint;s1++) { // for (k=0;k<*pg;k++) VE(tmpv2,k)=sqrt(VE(W2[cin],k)); replace_row(W2t[cin],s1,tmpv2); replace_row(W3t[cin],s1,W3[cin]); } llo=llo+hati; } // }}} if (*robust==1 && *ratesim==1) for (j=0;j<*antclust;j++) { replace_row(W2t[j],timegroup[s],W2[j]); replace_row(W3t[j],timegroup[s],W3[j]); } /* MG baseret varians beregning */ for (j=0;j<*pg;j++) for (i=0;i<*px;i++) { ME(M1M2t,j,i)=ME(M1M2n,j,(s-1)*(*px)+i); ME(Ct,i,j)= ME(Cn,(s-1)*(*px)+i,j); } MxA(Ct,VU,tmp3); MAt(tmp3,Ct,CtVUCt); MxA(Ct,SI,tmp3); // printf(" %d %d %d %d \n",0,(s-1)*(*px),*pg,s*(*px)); // print_mat(M1M2t); // print_mat(M1M2n); // print_mat(M1M2t); // mat_subsec(M1M2n,0,(s-1)*(*px),*pg,s*(*px),M1M2t); // MxA(tmp3,M1M2[s],COV); // print_mat(COV); MxA(tmp3,M1M2t,COV); // print_mat(COV); for (k=1;k<=*px;k++) { if (*betafixed==0) vcu[k*(*Ntimes)+s]+=ME(CtVUCt,k-1,k-1) +2*ME(COV,k-1,k-1); // else vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s]; } for (k=1;k<=*pg;k++) Ut[k*(*maxtimepoint)+timegroup[s]]=ME(Utt,timegroup[s],k-1); } // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: robust variance terms 1 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} if (*detail>=2) Rprintf("Robust variances \n"); R_CheckUserInterrupt(); ll=lle-llo; /* likelihood beregnes */ if (*detail==1) Rprintf("loglike is %lf \n",ll); if ((*robust==1)) // {{{ robust variances { for (s=1;s<*maxtimepoint;s++) { vec_zeros(VdB); mat_zeros(Vcov); for (j=0;j<*antclust;j++) { // {{{ if (s==1 && *detail==3) { Rprintf("================================================= %d \n",j); print_mat(W2t[j]); print_vec(W2[j]); print_mat(Stg[s]); print_mat(S1); print_mat(SI); } // counting process style simulation if (s==1) if (*ratesim==0) for (k=1;k<=*pg;k++) VE(W2[j],k)=sqrt(VE(W2[j],k)); Mv(SI,W2[j],tmpv2); Mv(Cg[s],tmpv2,rowX); extract_row(W3t[j],s,tmpv1); vec_add(tmpv1,rowX,difX); if (*betafixed==1) scl_vec_mult(1,tmpv1,difX); replace_row(W4t[j],s,difX); vec_star(difX,difX,tmpv1); vec_add(tmpv1,VdB,VdB); if (s==1) if (*betafixed==0) { for (c=0;c<*pg;c++) gamiid[c*(*antclust)+j]=gamiid[c*(*antclust)+j]+VE(tmpv2,c); } if (*resample==1) { for (c=0;c<*px;c++) {l=j*(*px)+c; biid[l*(*maxtimepoint)+s]=biid[l*(*maxtimepoint)+s]+VE(difX,c); } } if (*covariance==1) { for (k=0;k<*px;k++) for (c=0;c<*px;c++) ME(Vcov,k,c)=ME(Vcov,k,c)+VE(difX,k)*VE(difX,c); } Mv(Stg[s],tmpv2,rowZ); extract_row(W2t[j],s,tmpv2); if (*betafixed==0) { vec_subtr(tmpv2,rowZ,zi); replace_row(Uti[j],s,zi); } else replace_row(Uti[j],s,tmpv2); vec_star(zi,zi,tmpv2); vec_add(tmpv2,varUthat[s],varUthat[s]); } // }}} /* j in clusters */ if (*betafixed==0) for (i=0;i<*pg;i++) vscore[(i+1)*(*maxtimepoint)+s]=VE(varUthat[s],i); for (k=1;k<*px+1;k++) { Rvcu[k*(*maxtimepoint)+s]=VE(VdB,k-1); if (*covariance==1) { for (j=0;j<*px;j++) { l=(k-1)*(*px)+j; Vcovs[l*(*maxtimepoint)+s]=ME(Vcov,k-1,j); } } } } /* s=1 ..maxtimepoints */ } /* if robust==1 */ // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 2 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} if (*detail>=2) Rprintf("Robust variances \n"); if (*betafixed==0) for (j=0;j<*antclust;j++) { Mv(SI,W2[j],tmpv2); for (c=0;c<*pg;c++) for (k=0;k<*pg;k++) ME(RobVbeta,c,k)=ME(RobVbeta,c,k)+VE(W2[j],c)*VE(W2[j],k); for (k=0;k<*pg;k++) gammaiid[j*(*pg)+k]=VE(tmpv2,k); } MxA(RobVbeta,SI,ZPZ); MxA(SI,ZPZ,RobVbeta); if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 3 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); for(j=0;j<*pg;j++) { betaS[j]= VE(beta,j); loglike[0]=lle; loglike[1]=ll; for (k=0;k<*pg;k++){ Iinv[k*(*pg)+j]=ME(SI,j,k); Vbeta[k*(*pg)+j]=-ME(VU,j,k); RVbeta[k*(*pg)+j]=-ME(RobVbeta,j,k); } } if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 4 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} // for(j=0;j<*antclust;j++) print_mat(Uti[j]); if (*sim==1) { // {{{ score process simulations // Rprintf("Simulations start N= %ld \n",(long int) *antsim); tau=times[*Ntimes-1]-times[0]; for (i=1;i<=*px;i++) VE(rowX,i-1)=cug[i*(*maxtimepoint)+(*maxtimepoint-1)]; for (s=1;s<*maxtimepoint;s++) { // {{{ /* Beregning af OBS teststrrelser */ time=timesg[s]-times[0]; // FIX for (i=1;i<=*px;i++) { VE(xi,i-1)=fabs(cug[i*(*maxtimepoint)+s])/sqrt(Rvcu[i*(*maxtimepoint)+s]); if (VE(xi,i-1)>testOBS[i-1]) testOBS[i-1]=VE(xi,i-1); } scl_vec_mult(time/tau,rowX,difX); for (i=1;i<=*px;i++) VE(xi,i-1)=cug[i*(*maxtimepoint)+s]; vec_subtr(xi,difX,difX); for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>testOBS[l]) testOBS[l]=VE(difX,i); } if (*wscore>=1) { /* sup beregnes i R */ if ((s>*wscore) && (s<*maxtimepoint-*wscore)) {extract_row(Utt,s,rowZ); for (i=0;i<*pg;i++) VE(rowZ,i) = VE(rowZ,i)/sqrt(VE(varUthat[s],i)); replace_row(Utt,s,rowZ); /* scaled score process */ } else {vec_zeros(rowZ); replace_row(Utt,s,rowZ);} } for (k=1;k<=*pg;k++) Ut[k*(*maxtimepoint)+s]=ME(Utt,s,k-1); } // }}} *s=1..maxtimepoint Beregning af obs teststrrelser for (k=1;k<=*antsim;k++) { R_CheckUserInterrupt(); mat_zeros(Delta); mat_zeros(Delta2); vec_zeros(tmpv1); for (i=0;i<*antclust;i++) { /* random=gasdev(&idum); */ random=norm_rand(); scl_mat_mult(random,W4t[i],tmpM1); mat_add(tmpM1,Delta,Delta); scl_mat_mult(random,Uti[i],tmpM2); mat_add(tmpM2,Delta2,Delta2); } extract_row(Delta,*maxtimepoint-1,tmpv1); for (s=1;s<*maxtimepoint;s++) { time=timesg[s]-times[0]; scl_vec_mult(time/tau,tmpv1,xi); extract_row(Delta,s,rowX); vec_subtr(rowX,xi,difX); if (*addresamp==1) { if (k<51) { for (i=0;i<*px;i++) {l=(k-1)*(*px)+i; addproc[l*(*maxtimepoint)+s]=ME(Delta,s,i);}} } for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>test[l*(*antsim)+k-1]) test[l*(*antsim)+k-1]=VE(difX,i); VE(xi,i)=fabs(ME(Delta,s,i))/sqrt(Rvcu[(i+1)*(*maxtimepoint)+s]); if (VE(xi,i)>test[i*((*antsim))+k-1]) test[i*((*antsim))+k-1]=VE(xi,i); } if (*wscore>=1) { extract_row(Delta2,s,zi); if ((s>*wscore) && (s<*maxtimepoint-*wscore)) { for (i=0;i<*pg;i++) {VE(zi,i)=fabs(ME(Delta2,s,i))/sqrt(VE(varUthat[s],i)); if (VE(zi,i)>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=VE(zi,i); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*maxtimepoint)+s]=ME(Delta2,s,i)/sqrt(VE(varUthat[s],i));}} } } /* weigted score */ else { extract_row(Delta2,s,zi); for (i=0;i<*pg;i++) { if (fabs(VE(zi,i))>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=fabs(VE(zi,i)); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*maxtimepoint)+s]=ME(Delta2,s,i);} } } /* else wscore=0 */ } /* s=1..Ntims */ } /* k=1..antsim */ } /* sim==1 */ // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: before freeing %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} PutRNGstate(); /* to use R random normals */ // {{{ freeing if (*sim==1) free_mats(&Delta,&Delta2,&tmpM2,&tmpM1,NULL); free_mats(&Cn,&M1M2n,&ZXAIn,&AIn,NULL); free_mats(&dAt,&Utt,&WX,&X,&cdesX,&cdesX2,&cdesX3, &WZ,&ZP,&Z, &Vcov,&COV,&A,&AI,&M1,&CtVUCt, &RobVbeta,&ZPZ,&tmp2,&dS,&S1,&S2,&SI,&VU,&VUI, &ZXAI,&ZX,&dM1M2,&M1M2t, &tmp3,&ZPX,&dYI,&Ct, &ZPX1,&ZPZ1, &ZPXo,&ZPZo,NULL); free_vecs(&reszpbeta,&res1dim,&weight,&lamtt,&lamt,&zcol,&Gbeta,&one,&offset, &ahatt,&tmpv1,&difX,&VdB,&rowX,&xi,&dA,&VdA,&MdA, &xtilde, &tmpv2,&rowZ,&zi,&U,&beta,&delta,&zav,&difzzav,&Uprofile, &ta,&vrisk,NULL); if (*robust==1) { for (j=0;j<*antclust;j++) { free_mat(W3t[j]); free_mat(W4t[j]); free_mat(W2t[j]); free_vec(W3[j]); free_mat(Uti[j]); free_vec(Ui[j]); } for (j=0;j<*maxtimepoint;j++) free_vec(varUthat[j]); } for (j=0;j<*antclust;j++) free_vec(W2[j]); // for (j=0;j<*Ntimes;j++) { //// free_mat(C[j]);free_mat(M1M2[j]); free_mat(ZXAIs[j]); //// free_vec(ZXdA[j]); //// free_mat(St[j]); // } for(j=0;j<*maxtimepoint;j++) { free_mat(Cg[j]); free_mat(Stg[j]);} free(cluster); free(ipers); free(imin); free(cug); free(timesg); // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: after freeing %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} } timereg/src/aalenC.c0000644000175000017500000001040214077524411014160 0ustar nileshnilesh#include //#include #include #include "matrix.h" void robaalenC(times,Ntimes,designX,nx,p,antpers,start,stop,cu,vcu, robvcu,sim,antsim,retur,cumAit,test,rani,testOBS,status, Ut,simUt,id,weighted,robust,covariance,covs,resample, Biid,clusters,antclust,loglike,silent) double *designX,*times,*start,*stop,*cu,*vcu,*robvcu,*cumAit,*test,*testOBS,*Ut,*simUt,*covs,*Biid,*loglike; int *nx,*p,*antpers,*Ntimes,*sim,*retur,*rani,*antsim,*status,*id,*covariance, *weighted,*robust,*resample,*clusters,*antclust,*silent; { // {{{ matrix *ldesignX, *QR, *R, *A, *AI, *Vcov; matrix *cumAt[*antclust]; vector *diag,*dB,*dN,*VdB,*xi,*rowX,*rowcum,*difX,*vtmp; vector *cumhatA[*antclust],*cumA[*antclust],*cum; int ci,i,j,k,l,s,c,count,pers=0,*cluster=calloc(*antpers,sizeof(int)); double time,ahati,*vcudif=calloc((*Ntimes)*(*p+1),sizeof(double)); double fabs(),sqrt(); if (*robust==1) { for (i=0;i<*antclust;i++) { malloc_vec(*p,cumhatA[i]); malloc_vec(*p,cumA[i]); if (*sim==1) malloc_mat(*Ntimes,*p,cumAt[i]); } } /* print_clock(&debugTime, 0); */ malloc_mat(*antpers,*p,ldesignX); malloc_mat(*p,*p,QR); malloc_mat(*p,*p,Vcov); malloc_mat(*p,*p,A); malloc_mat(*p,*p,AI); malloc_mat(*antpers,*p,R); malloc_vec(*antpers,dN); malloc_vecs(*p,&cum,&diag,&dB,&VdB,&xi,&rowX,&rowcum,&difX,&vtmp,NULL); // for (j=0;j<*antpers;j++) cluster[j]=0; /* print_clock(&debugTime, 1); */ R_CheckUserInterrupt(); for (s=1;s<*Ntimes;s++){ time=times[s]; mat_zeros(ldesignX); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++){ if ((start[c]=time)) { for(j=0;j<*p;j++) { ME(ldesignX,id[c],j) = designX[j*(*nx)+c]; } cluster[id[c]]=clusters[c]; if (time==stop[c] && status[c]==1) { pers=id[c]; } count=count+1; } } // readXt(antpers,nx,p,designX,start,stop,status,pers,ldesignX,time,clusters,cluster,id); MtM(ldesignX,A); invertS(A,AI,silent[0]); if (ME(AI,0,0)==0.0 && *silent==0){ Rprintf(" X'X not invertible at time %lf \n",time); } if (s < -1) { print_mat(AI); print_mat(A); } extract_row(ldesignX,pers,xi); Mv(AI,xi,dB); vec_star(dB,dB,VdB); vec_star(xi,dB,vtmp); ahati = vec_sum(vtmp); loglike[0]=loglike[0]-ahati/(time-times[s-1]); for (k=1;k<*p+1;k++) { cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+VE(dB,k-1); vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s-1]+VE(VdB,k-1); VE(cum,k-1)=cu[k*(*Ntimes)+s]; } cu[s]=time; vcu[s]=time; robvcu[s]=time; if (*robust==1 || *retur==1) { vec_zeros(VdB); mat_zeros(Vcov); for (i=0;i<*antpers;i++) { ci=cluster[i]; extract_row(ldesignX,i,xi); ahati=vec_prod(xi,dB); Mv(AI,xi,rowX); if (*robust==1) { if (i==pers) { vec_add(rowX,cumhatA[ci],cumhatA[ci]); } scl_vec_mult(ahati,rowX,rowX); vec_add(rowX,cumA[ci],cumA[ci]); } if (*retur==1){ cumAit[i*(*Ntimes)+s]= cumAit[i*(*Ntimes)+s]+1*(i==pers)-ahati; } } if (*robust==1) { for (i=0;i<*antclust;i++) { vec_subtr(cumhatA[i],cumA[i],difX); if (*sim==1) replace_row(cumAt[i],s,difX); vec_star(difX,difX,vtmp); vec_add(vtmp,VdB,VdB); if (*resample==1) { for (k=0;k<*p;k++) {l=i*(*p)+k; Biid[l*(*Ntimes)+s]=VE(difX,k);} } if (*covariance==1) { for (k=0;k<*p;k++) for (c=0;c<*p;c++) ME(Vcov,k,c) = ME(Vcov,k,c) + VE(difX,k)*VE(difX,c); } } for (k=1;k<*p+1;k++) { robvcu[k*(*Ntimes)+s]=VE(VdB,k-1); if (*covariance==1) { for (c=0;c<*p;c++) { l=(k-1)*(*p)+c; covs[l*(*Ntimes)+s]=ME(Vcov,k-1,c); } } } } } /* if robust==1 || retur==1*/ R_CheckUserInterrupt(); } /* s = 1..Ntimes */ R_CheckUserInterrupt(); if (*sim==1) { comptest(times,Ntimes,p,cu,robvcu,vcudif,antsim,test,testOBS,Ut,simUt,cumAt,weighted,antclust); } cu[0]=times[0]; vcu[0]=times[0]; robvcu[0]=times[0]; free_vecs(&dN,&cum,&diag,&dB,&VdB,&xi,&rowX,&rowcum,&difX,&vtmp,NULL); free_mats(&ldesignX,&QR,&Vcov,&A,&AI,&R,NULL); if (*robust==1){ for (i=0;i<*antclust;i++) { free_vec(cumA[i]); free_vec(cumhatA[i]); if (*sim==1) free_mat(cumAt[i]); } } free(cluster); free(vcudif); } // }}} timereg/src/smooth.c0000644000175000017500000000645314077524411014321 0ustar nileshnilesh//#include #include #include "matrix.h" double tukey(x,b) double x,b; { return((1/b)*((cos(3.141592 *(x/b))+ 1)/2) * (fabs(x/b) < 1)); } double dtukey(x,b) double x,b; { return((-3.141592/b*b)*(sin(3.141592 *(x/b))/2)*(fabs(x/b) < 1)); } void smoothB(designX,nx,p,bhat,nb,b,degree,coef) double *designX,*bhat,*b; int *coef,*nx,*p,*degree,*nb; { // {{{ matrix *mat1,*mat2,*II,*I; vector *XWy,*Y,*RES,*sY; int count,j,k,s,d; int silent=1; double tukey(),x,w,band; matrix *sm1,*sm2; malloc_mat(*nx,(*degree)+1,mat1); malloc_mat(*nx,(*degree)+1,mat2); malloc_mat(*nx,(*degree)+1,sm1); malloc_mat(*nx,(*degree)+1,sm2); malloc_vec(*nx,Y); malloc_vec(*nx,sY); malloc_vec((*degree)+1,XWy); malloc_vec((*degree)+1,RES); malloc_mat((*degree)+1,(*degree)+1,II); malloc_mat((*degree)+1,(*degree)+1,I); for (s=0;s<*nb;s++){ x=bhat[s]; for (k=1;k<*p;k++) { vec_zeros(Y); mat_zeros(mat1); mat_zeros(mat2); count=0; vec_zeros(RES); band=b[(k-1)*(*nb)+s]; /* Rprintf("band %lf %ld \n",band,k); */ for (j=0;j<*nx;j++) { if (fabs(designX[j]-x)=4) { MtA(mat1,mat2,II); invertS(II,I,silent); vM(mat1,Y,XWy); vM(I,XWy,RES); }; bhat[k*(*nb)+s]=VE(RES,*coef); } /* components */ } /* times */ free_mat(sm1); free_mat(sm2); free_mat(mat1); free_mat(mat2); free_mat(I); free_mat(II); free_vec(sY); free_vec(Y); free_vec(XWy); free_vec(RES); } // }}} void localTimeReg(designX,nx,p,times,response,bhat,nb,b,lin,dens) double *designX,*bhat,*b,*times,*response,*dens; int *nx,*p,*nb,*lin; { matrix *X,*AI,*A; vector *res,*Y,*XY; int c,j,k,s,silent=1; double band,tukey(),dtukey(),x,w,delta; j=(*lin+1)*(*p); malloc_mat(*nx,j,X); malloc_mat(j,j,A); malloc_mat(j,j,AI); malloc_vec(*nx,Y); malloc_vec(j,XY); malloc_vec(j,res); /* Rprintf("enters Local Time Regression \n"); */ for (s=0;s<*nb;s++){ x=bhat[s]; for (c=0;c<*nx;c++){ delta=times[c]-x; band=b[s]; w=tukey(delta,band); dens[s]=dens[s]+w; dens[(*nb)+s]=dens[(*nb)+s]+dtukey(delta,b[s]); for(j=0;j<*p;j++) { ME(X,c,j)=designX[j*(*nx)+c]*sqrt(w); if (*lin>=1) ME(X,c,*p+j)=designX[j*(*nx)+c]*delta*sqrt(w); if (*lin>=2) ME(X,c,2*(*p)+j)=delta*ME(X,c,*p+j); if (*lin==3) ME(X,c,3*(*p)+j)=delta*ME(X,c,2*(*p)+j); } VE(Y,c)=response[c]*sqrt(w); } dens[s]=dens[s]/(*nx); dens[(*nb)+s]=dens[(*nb)+s]/(*nx); MtA(X,X,A); invertS(A,AI,silent); if (ME(AI,0,0)==0.0){ Rprintf("Non-invertible design in local smoothing at time %lf \n",x); } vM(X,Y,XY); Mv(AI,XY,res); for (k=1;k<((*lin)+1)*(*p)+1;k++){ bhat[k*(*nb)+s]=VE(res,k-1); } } free_mat(A); free_mat(AI); free_mat(X); free_vec(Y); free_vec(XY); free_vec(res); } timereg/src/aalen-test.c0000644000175000017500000004621514077524411015045 0ustar nileshnilesh//#include #include #include "matrix.h" void robaalentest(times,Ntimes,designX,nx,p,antpers,start,stop,cu,vcu,robvcu,sim,antsim,retur,cumAit,test,rani,testOBS,status,Ut,simUt,id,weighted,robust,covariance,covs,resample,Biid,clusters,antclust,loglike,mof,offset,mw,weight,silent) double *designX,*times,*start,*stop,*cu,*vcu,*robvcu,*cumAit,*test,*testOBS,*Ut,*simUt,*covs,*Biid,*loglike,*offset,*weight; int *nx,*p,*antpers,*Ntimes,*sim,*retur,*rani,*antsim,*status,*id,*covariance, *weighted,*robust,*resample,*clusters,*antclust,*mw,*mof,*silent; { matrix *WX,*ldesignX,*A,*AI,*Vcov,*cumAt[*antclust]; vector *diag,*dB,*dN,*VdB,*xi,*rowX,*rowcum,*difX,*vtmp,*cum,*offsets; vector *vrisk,*cumhatA[*antclust],*cumA[*antclust]; int i,j,k,l,s,c,count,pers=0; int stat,*cluster=calloc(*antpers,sizeof(int)); double time,ahati,dtime; double *vcudif=calloc((*Ntimes)*(*p+1),sizeof(double)), *weights=calloc(*antpers,sizeof(double)); double fabs(),sqrt(); if (*robust==1) { for (i=0;i<*antclust;i++) { malloc_vec(*p,cumhatA[i]); malloc_vec(*p,cumA[i]); malloc_mat(*Ntimes,*p,cumAt[i]);} } malloc_mat(*antpers,*p,ldesignX); malloc_mat(*antpers,*p,WX); malloc_mats(*p,*p,&Vcov,&A,&AI,NULL); malloc_vecs(*antpers,&vrisk,&dN,&offsets,NULL); malloc_vecs(*p,&cum,&diag,&dB,&VdB,&xi,&rowX,&rowcum,&difX,&vtmp,NULL); for (j=0;j<*antpers;j++) cluster[j]=0; for (s=1;s<*Ntimes;s++) { time=times[s]; mat_zeros(ldesignX); dtime=time-times[s-1]; mat_zeros(WX); stat=0; vec_zeros(vrisk); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { if (*mof==1) VE(offsets,id[c])=offset[c]; if (*mw==1) weights[id[c]]=weight[c]; else weights[id[c]]=1; cluster[id[c]]=clusters[c]; VE(vrisk,id[c])=1.0; for(j=0;j<*p;j++) {ME(ldesignX,id[c],j)=designX[j*(*nx)+c]; ME(WX,id[c],j)=weights[id[c]]*designX[j*(*nx)+c];} if (time==stop[c] && status[c]==1) {pers=id[c];stat=1;} count=count+1; } } if (*mof==1 || stat==1) {MtA(ldesignX,WX,A); invertS(A,AI,silent[0]); if (ME(AI,0,0)==0 && *silent==0) Rprintf("X'X not invertible at time %lf \n",time); if (s<-1) {print_mat(AI); print_mat(A);} } if (stat==1) {extract_row(WX,pers,xi); Mv(AI,xi,dB);} else vec_zeros(dB); vec_star(dB,dB,VdB); if (*mof==1) {vM(WX,offsets,rowX); Mv(AI,rowX,diag); scl_vec_mult(dtime,diag,diag); vec_subtr(dB,diag,dB); } vec_star(xi,dB,vtmp); ahati=vec_sum(vtmp); if (*mof==1) ahati=ahati+dtime*VE(offsets,pers); loglike[0]=loglike[0]+log(ahati); loglike[1]=loglike[1]-ahati; for (k=1;k<*p+1;k++) { cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+VE(dB,k-1); vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s-1]+VE(VdB,k-1); VE(cum,k-1)=cu[k*(*Ntimes)+s];} /* Rprintf(" %lf %lf \n",time,dtime); Rprintf(" %lf \n",vec_sum(offsets)); print_mat(AI); print_vec(rowX); print_vec(xi); print_vec(dB); print_vec(diag); for (k=1;k<*p+1;k++) { Rprintf(" %lf ",cu[k*(*Ntimes)+s]); } Rprintf(" \n "); */ robvcu[s]=time; cu[s]=time; vcu[s]=time; if (*robust==1 || *retur==1) { vec_zeros(VdB); mat_zeros(Vcov); for (i=0;i<*antpers;i++) // {{{ { j=cluster[i]; extract_row(ldesignX,i,xi); Mv(AI,xi,rowX); if (*mw==1) scl_vec_mult(weights[i],rowX,rowX); vec_star(xi,dB,vtmp); ahati=vec_sum(vtmp); if (*mof==1) ahati=ahati+dtime*VE(offsets,i); /* loglike[0]=loglike[0]-ahati; */ if (*robust==1) { if (i==pers) vec_add(rowX,cumhatA[j],cumhatA[j]); scl_vec_mult(ahati,rowX,rowX); vec_add(rowX,cumA[j],cumA[j]); } if (*retur==1) cumAit[i*(*Ntimes)+s]=cumAit[i*(*Ntimes)+s]+ weights[i]*(1*(i==pers)-ahati); } // i=0 ... antpers // }}} if (*robust==1) { // {{{ for (j=0;j<*antclust;j++) { vec_subtr(cumhatA[j],cumA[j],difX); replace_row(cumAt[j],s,difX); vec_star(difX,difX,vtmp); vec_add(vtmp,VdB,VdB); if (*resample==1) { for (k=0;k<*p;k++) { l=j*(*p)+k; Biid[l*(*Ntimes)+s]=VE(difX,k); }} if (*covariance==1) { for (k=0;k<*p;k++) for (c=0;c<*p;c++) ME(Vcov,k,c)=ME(Vcov,k,c)+VE(difX,k)*VE(difX,c);} } /* j in cluster */ for (k=1;k<*p+1;k++) {robvcu[k*(*Ntimes)+s]=VE(VdB,k-1); if (*covariance==1) { for (c=0;c<*p;c++) { l=(k-1)*(*p)+c; covs[l*(*Ntimes)+s]=ME(Vcov,k-1,c);}} } } /* if robust==1 */ // }}} } } /* s = 1..Ntimes */ if (*sim==1) { comptest(times,Ntimes,p,cu,robvcu,vcudif,antsim,test,testOBS,Ut,simUt,cumAt,weighted,antclust); } cu[0]=times[0]; vcu[0]=times[0]; robvcu[0]=times[0]; free_vecs(&offsets,&xi,&rowX,&diag,&dB,&VdB,&rowcum,&cum,&vtmp,NULL); free_mats(&Vcov,&ldesignX,NULL); if (*robust==1) for (i=0;i<*antclust;i++) {free_vec(cumA[i]);free_vec(cumhatA[i]);free_mat(cumAt[i]);} free(weights); free(vcudif); free(cluster); } /* robaalentest main */ void semiaalentest(alltimes,Nalltimes,Ntimes,designX,nx,px,designG,ng,pg,antpers,start,stop,nb,bhat,cu,vcu,Robvcu,gamma,Vgamma,RobVgamma,sim,antsim,test,rani,testOBS,robust,status,Ut,simUt,id,weighted,cumAit,retur,covariance,covs,resample,gammaiid,Biid,clusters,antclust,loglike,intZHZ,intZHdN,deltaweight,mof,offset,mw,weight,gamfix, pseudoscore,pscoret,pscoretiid,intZHZt,ptest,intHdN,intHZ,varintHdN,silent) double *designX,*alltimes,*start,*stop,*cu,*vcu,*bhat,*designG,*gamma,*Vgamma,*RobVgamma,*Robvcu,*test,*testOBS,*Ut,*simUt,*cumAit,*covs,*Biid,*gammaiid,*loglike,*intZHZ,*intZHdN,*offset,*weight,*pscoret,*pscoretiid,*intZHZt,*ptest, *intHdN,*intHZ,*varintHdN; int *nx,*px,*antpers,*Nalltimes,*Ntimes,*nb,*ng,*pg,*sim,*antsim,*rani,*robust,*status,*id,*weighted,*retur,*covariance,*resample,*clusters,*antclust,*deltaweight,*mof,*mw,*gamfix,*pseudoscore,*silent; { matrix *Vcov,*X,*WX,*A,*AI,*AIXW,*Z,*WZ,*tmpM1,*Delta,*iZHZt[*Ntimes]; matrix *dCGam,*CGam,*Ct,*ICGam,*VarKorG,*dC,*ZH,*XWZ,*ZWZ,*XWZAI,*HZ; matrix *Acorb[*Nalltimes],*Vargam,*dVargam,*M1M2[*Ntimes],*GCdM1M2; matrix *C[*Nalltimes],*dM1M2,*M1M2t,*RobVargam,*tmpM2,*tmpM3,*tmpM4; matrix *W3t[*antclust],*W4t[*antclust],*AIxit[*antpers],*Scoret[*antclust]; vector *W2[*antclust],*W3[*antclust]; vector *dB,*VdB,*difX,*xi,*tmpv1,*tmpv2,*gamoff,*vrisk; vector *dAoff,*dA,*rowX,*dN,*AIXWdN,*bhatt,*pbhat,*plamt; vector *korG,*pghat,*rowZ,*gam,*dgam,*ZHdN,*VZHdN,*IZHdN,*zi,*offsets; int m,i,j,k,l,c,s,count,pers=0,pmax,stat, *cluster=calloc(*antpers,sizeof(int)), *ipers=calloc(*Ntimes,sizeof(int)), *ls=calloc(*Ntimes,sizeof(int)); double dptime,ddtime,time,dtime,random,fabs(),sqrt(); double ahati,ghati,hati; double *vcudif=calloc((*Ntimes)*(*px+1),sizeof(double)), *times=calloc((*Ntimes),sizeof(double)), *cumoff=calloc((*Nalltimes)*(*px+1),sizeof(double)), *cuL=calloc((*Nalltimes)*(*px+1),sizeof(double)), *weights=calloc((*antpers),sizeof(double)); double norm_rand(); dptime=alltimes[0]; for (j=0;j<=*px;j++) cuL[j*(*Nalltimes)+0]=0.0; if (*pseudoscore>=1) { malloc_mat(*Ntimes,*pg,tmpM1); malloc_mat(*Ntimes,*pg,Delta); for (j=0;j<*Ntimes;j++) malloc_mat(*pg,*pg,iZHZt[j]); for (j=0;j<*antclust;j++) malloc_mat(*Ntimes,*pg,Scoret[j]);} malloc_mats(*antpers,*px,&X,&WX,NULL); malloc_mats(*antpers,*pg,&Z,&WZ,&HZ,NULL); malloc_mats(*px,*px,&Vcov,&A,&AI,&GCdM1M2,&VarKorG,NULL); malloc_mats(*pg,*pg,&tmpM2,&ZWZ,&RobVargam,&Vargam,&dVargam,&ICGam,&CGam,&dCGam,NULL); malloc_mats(*px,*antpers,&AIXW,NULL); malloc_mats(*pg,*antpers,&ZH,NULL); malloc_mats(*px,*pg,&tmpM4,&tmpM3,&Ct,&dC,&XWZ,&XWZAI,&dM1M2,&M1M2t,NULL); for (j=0;j<*Nalltimes;j++) {malloc_mat(*px,*pg,Acorb[j]);malloc_mat(*px,*pg,C[j]);} for (j=0;j<*Ntimes;j++) {malloc_mat(*px,*pg,M1M2[j]);} malloc_vecs(*px,&dAoff,&dA,&dB,&VdB,&difX,&xi,&tmpv1,&korG,&rowX,&AIXWdN,&bhatt,NULL); malloc_vecs(*pg,&gamoff,&zi,&tmpv2,&rowZ,&gam,&dgam,&ZHdN,&IZHdN,&VZHdN,NULL); malloc_vecs(*antpers,&vrisk,&offsets,&dN,&pbhat,&pghat,&plamt,NULL); if (*robust==1) { for (j=0;j<*antclust;j++) {malloc_mat(*Ntimes,*px,W3t[j]); malloc_mat(*Ntimes,*px,W4t[j]); malloc_vec(*pg,W2[j]); malloc_vec(*px,W3[j]); } for (j=0;j<*antpers;j++) malloc_mat(*Nalltimes,*px,AIxit[j]); } if (*px>=*pg) pmax=*px; else pmax=*pg; mat_zeros(Ct); mat_zeros(CGam); vec_zeros(IZHdN); times[0]=alltimes[0]; l=0; for (s=0;s<*pg;s++) VE(gam,s)=gamma[s]; for (j=0;j<*antpers;j++) cluster[j]=0; for (s=1;s<*Nalltimes;s++) { time=alltimes[s]; dtime=time-alltimes[s-1]; mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); stat=0; for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { if (*mof==1) VE(offsets,id[c])=offset[c]; if (*mw==1) weights[id[c]]=weight[c]; else weights[id[c]]=1; for(j=0;j=1) { pscoret[l]=time; scl_mat_mult(1,CGam,iZHZt[l]); for (k=0;k<*pg;k++) { pscoret[(k+1)*(*Ntimes)+l]=VE(IZHdN,k); for (c=0;c<*pg;c++) { m=k*(*pg)+c; intZHZt[m*(*Ntimes)+l]=ME(CGam,k,c); } } } } else vec_zeros(AIXWdN); /* correction from offsets calculated here */ if (*mof==1) {vM(WX,offsets,rowX); Mv(AI,rowX,tmpv1); scl_vec_mult(dtime,tmpv1,tmpv1); vec_subtr(AIXWdN,tmpv1,dB); vM(WZ,offsets,rowZ); vM(XWZAI,rowX,dgam); vec_subtr(rowZ,dgam,dgam); vec_add_mult(gamoff,dgam,dtime,gamoff); for (k=1;k<=*px;k++) cumoff[k*(*Nalltimes)+s]=VE(tmpv1,k-1); } /* Rprintf(" %lf %lf \n",time,dtime); Rprintf(" %lf \n",vec_sum(offsets)); print_mat(AI); print_vec(rowX); extract_row(WX,pers,rowX); print_vec(rowX); print_vec(dB); print_vec(tmpv1); for (k=1;k<*px+1;k++) {cuL[k*(*Nalltimes)+s]=cuL[k*(*Nalltimes)+s-1]+VE(dB,k-1); Rprintf(" %lf ",cuL[k*(*Nalltimes)+s]); } Rprintf(" \n "); */ Acorb[s]=mat_copy(XWZAI,Acorb[s]); scl_mat_mult(dtime,XWZAI,tmpM4);mat_add(tmpM4,Ct,Ct); C[s]=mat_copy(Ct,C[s]); if (stat==1) { vcu[l]=time; cu[l]=time; times[l]=time; for (k=0;k<*pg;k++) { for (j=0;j<*pg;j++) ME(dVargam,k,j)= VE(ZHdN,j)*VE(ZHdN,k); for (j=0;j<*px;j++) ME(dM1M2,j,k)=VE(ZHdN,k)*VE(AIXWdN,j); } mat_add(dVargam,Vargam,Vargam); mat_add(dM1M2,M1M2t,M1M2t); M1M2[l]=mat_copy(M1M2t,M1M2[l]); for (k=1;k<=*px;k++) { cu[k*(*Ntimes)+l]=VE(AIXWdN,k-1); vcu[k*(*Ntimes)+l]=vcu[k*(*Ntimes)+l-1]+VE(AIXWdN,k-1)*VE(AIXWdN,k-1);} } if (*robust==1) for (i=0;i<*antpers;i++) { extract_row(WX,i,xi); Mv(AI,xi,rowX); replace_row(AIxit[i],s,rowX); } } /* s =1...Ntimes */ invertS(CGam,ICGam,silent[0]); if (*gamfix==0) Mv(ICGam,IZHdN,gam); if ((*mof==1) & (*gamfix==0)) {Mv(ICGam,gamoff,dgam); vec_subtr(gam,dgam,gam);} MxA(Vargam,ICGam,tmpM2); MxA(ICGam,tmpM2,Vargam); // if (*gamfix==1) mat_zeros(Vargam); l=0; vec_zeros(dAoff); for (s=1;s<*Nalltimes;s++) { time=alltimes[s]; vec_zeros(dN);dtime=time-alltimes[s-1]; mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); stat=0; for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { cluster[id[c]]=clusters[c]; if (*mof==1) VE(offsets,id[c])=offset[c]; if (*mw==1) weights[id[c]]=weight[c]; else weights[id[c]]=1; for(j=0;j=1 && stat==1) { for (k=0;k<*pg;k++) { m=j*(*pg)+k; pscoretiid[m*(*Ntimes)+l]=VE(W2[j],k);} replace_row(Scoret[j],l,W2[j]); } extract_row(AIxit[i],s,rowX); if (i==pers && stat==1) vec_add(rowX,W3[j],W3[j]); scl_vec_mult(hati,rowX,rowX); vec_subtr(W3[j],rowX,W3[j]); } if (*retur==1) { if (stat==0) cumAit[i*(*Ntimes)+l+1]= cumAit[i*(*Ntimes)+l+1]+1*(i==pers)*stat-hati; else cumAit[i*(*Ntimes)+l]= cumAit[i*(*Ntimes)+l]+1*(i==pers)*stat-hati; } // if (*gamfix==1) vec_zeros(W2[j]); if (stat==1) replace_row(W3t[j],l,W3[j]); } /* j=1..antclust */ } /* robust ==1 */ if (stat==1) { ddtime=time-dptime; extract_row(X,pers,xi); vec_star(xi,dA,rowX); ahati=vec_sum(rowX); extract_row(Z,pers,zi); vec_star(zi,gam,rowZ); ahati=ahati+vec_sum(rowZ)*ddtime; if (*mof==1) ahati=ahati+VE(offsets,pers)*ddtime; loglike[0]=loglike[0]+log(ahati); if (*deltaweight==1) loglike[1]=loglike[1]-ahati/dtime; if (*deltaweight==0) loglike[1]=loglike[1]-ahati; dptime=time; for (k=1;k<=*px;k++) cu[k*(*Ntimes)+l]=cu[k*(*Ntimes)+l-1]+ cu[k*(*Ntimes)+l]+VE(dAoff,k-1); vec_zeros(dAoff); MxA(C[ls[l]],Vargam,tmpM4); MAt(tmpM4,C[ls[l]],VarKorG); MxA(M1M2[l],ICGam,tmpM4); MAt(C[ls[l]],tmpM4,GCdM1M2); for (k=1;k<=*px;k++) {vcu[k*(*Ntimes)+l]= vcu[k*(*Ntimes)+l]+ME(VarKorG,k-1,k-1)-2*ME(GCdM1M2,k-1,k-1); } } } /* s=1 ..Ntimes */ vec_star(IZHdN,gam,rowZ); loglike[1]=loglike[1]-vec_sum(rowZ); for (s=1;s<*Ntimes;s++) { Mv(C[ls[s]],gam,korG); for (k=1;k<=*px;k++) cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s]-VE(korG,k-1); /* ROBUST VARIANCES */ if (*robust==1) { vec_zeros(VdB); mat_zeros(Vcov); for (j=0;j<*antclust;j++) { Mv(ICGam,W2[j],tmpv2); if (*gamfix==0) vec_zeros(rowX); else Mv(C[ls[s]],tmpv2,rowX); extract_row(W3t[j],s,tmpv1); vec_subtr(tmpv1,rowX,difX); replace_row(W4t[j],s,difX); vec_star(difX,difX,tmpv1); vec_add(tmpv1,VdB,VdB); if (*resample==1) { if (s==1) for (k=0;k<*pg;k++) gammaiid[k*(*antclust)+j]=VE(tmpv2,k); for (k=0;k<*px;k++) { l=j*(*px)+k; Biid[l*(*Ntimes)+s]=VE(difX,k);} } if (*covariance==1) { for (k=0;k<*px;k++) for (c=0;c<*px;c++) ME(Vcov,k,c)=ME(Vcov,k,c)+VE(difX,k)*VE(difX,c);} if (s==1) { for (c=0;c<*pg;c++) for (k=0;k<*pg;k++) ME(RobVargam,c,k)=ME(RobVargam,c,k)+VE(W2[j],c)*VE(W2[j],k);} } /* j =1 ..Antclust */ } /* robust==1 */ for (k=1;k<*px+1;k++) { Robvcu[k*(*Ntimes)+s]=VE(VdB,k-1); if (*covariance==1) { for (j=0;j<*px;j++) { l=(k-1)*(*px)+j; covs[l*(*Ntimes)+s]=ME(Vcov,k-1,j); }}} } /* s=1 ..Ntimes */ if (*robust==1) { MxA(RobVargam,ICGam,tmpM2); MxA(ICGam,tmpM2,RobVargam); } for (j=0;j<*pg;j++) {gamma[j]=VE(gam,j); intZHdN[j]=VE(IZHdN,j); for (k=0;k<*pg;k++) {Vgamma[k*(*pg)+j]=ME(Vargam,j,k); RobVgamma[k*(*pg)+j]=ME(RobVargam,j,k); intZHZ[k*(*pg)+j]=ME(CGam,j,k); }} cu[0]=times[0]; vcu[0]=times[0]; if (*sim==1) { comptest(times,Ntimes,px,cu,Robvcu,vcudif,antsim,test,testOBS,Ut,simUt,W4t,weighted,antclust); } if (*pseudoscore>=1) { GetRNGstate(); /* to use R random normals */ for (s=1;s<*Ntimes;s++) { for (k=0;k<*pg;k++) VE(tmpv2,k)=pscoret[(k+1)*(*Ntimes)+s]; Mv(iZHZt[s],gam,zi); vec_subtr(tmpv2,zi,tmpv2); for (k=0;k<*pg;k++) pscoret[(k+1)*(*Ntimes)+s]=VE(tmpv2,k); } for (j=0;j<*antclust;j++) { for (k=0;k<*pg;k++) VE(tmpv2,k)=gammaiid[k*(*antclust)+j]; for (s=1;s<*Ntimes;s++) { extract_row(Scoret[j],s,rowZ); Mv(iZHZt[s],tmpv2,zi); vec_subtr(rowZ,zi,rowZ); replace_row(Scoret[j],s,rowZ); } } for (k=1;k<=*pseudoscore;k++) { mat_zeros(Delta); for (i=0;i<*antclust;i++) { /* random=gasdev(&idum); */ random=norm_rand(); scl_mat_mult(random,Scoret[i],tmpM1); mat_add(tmpM1,Delta,Delta); } for (s=1;s<*Ntimes;s++) { extract_row(Delta,s,rowZ); for (i=0;i<*pg;i++) { VE(rowZ,i)=fabs(VE(rowZ,i)); if (VE(rowZ,i)>ptest[i*(*pseudoscore)+k]) ptest[i*(*pseudoscore)+k]=VE(rowZ,i); } } } PutRNGstate(); /* to use R random normals */ } free_mats(&HZ,&X,&WX,&Z,&WZ, &Vcov,&A,&AI,&GCdM1M2, &tmpM2,&ZWZ,&VarKorG,&RobVargam,&Vargam,&dVargam,&ICGam,&CGam,&dCGam, &AIXW, &ZH, &tmpM4,&tmpM3,&Ct,&dC,&XWZ,&XWZAI,&dM1M2,&M1M2t,NULL); for (j=0;j<*Nalltimes;j++) {free_mat(Acorb[j]); free_mat(C[j]); } for (j=0;j<*Ntimes;j++) {free_mat(M1M2[j]);} free_vecs(&vrisk,&dAoff,&dB,&dA,&VdB,&difX,&xi,&tmpv1,&korG,&rowX,&AIXWdN,&bhatt, &gamoff,&zi,&tmpv2,&rowZ,&gam,&dgam,&ZHdN,&IZHdN,&VZHdN, &offsets,&dN,&pbhat,&pghat,&plamt,NULL); if (*robust==1) { for (j=0;j<*antclust;j++) {free_mat(W3t[j]); free_mat(W4t[j]); free_vec(W2[j]); free_vec(W3[j]); } for (j=0;j<*antpers;j++) free_mat(AIxit[j]); } if (*pseudoscore>=1) { free_mats(&tmpM1,&Delta,NULL); for (j=0;j<*Ntimes;j++) free_mat(iZHZt[j]); for (j=0;j<*antclust;j++) free_mat(Scoret[j]); } free(vcudif); free(times); free(cumoff); free(cuL); free(ipers); free(cluster); free(weights); } timereg/src/kim-kim.c0000644000175000017500000000560014077524411014337 0ustar nileshnilesh//#include #include #include "matrix.h" void l1boost(D,px,d,lambda,Nit,beta,detail) double *D,*d,*beta,*lambda; int *px,*detail,*Nit; { matrix *iD; vector *Db,*betav,*dL,*tmpv1; int index,it,i,j; double k,lg,bDb,bd,dummy,fabs(),sqrt(),L0,L1,Lk,fdL; malloc_mats(*px,*px,&iD,NULL); malloc_vecs(*px,&tmpv1,&betav,&dL,&Db,&bd,NULL); for (i=0;i<*px;i++) { VE(betav,i)=beta[i]; for(j=0;j<*px;j++) { ME(iD,i,j)=D[j*(*px)+i]; } } for (it=0;it<*Nit;it++){ Mv(iD,betav,Db); bd=0; bDb=0; for (i=0;i<*px;i++) { bd=bd+VE(betav,i)*d[i]; bDb=bDb+VE(Db,i)*VE(betav,i); } index=0; dummy=0; for (i=0;i<*px;i++) { VE(dL,i)=d[i]-VE(Db,i); fdL=fabs(VE(dL,i)); if (fdL>dummy) {dummy=fdL; index=i; } } if (*detail==1) Rprintf(" %ld \n",(long int) index); if (VE(dL,index)<0) lg=-1*(*lambda); else lg=*lambda; if (fabs(VE(dL,index))<.00000000001) {it=*Nit-1; break;} k=(VE(Db,index)*lg-bDb+bd-lg*d[index]) /(-bDb-lg*lg*ME(iD,index,index)+2*lg*VE(Db,index)); if (*detail==1) Rprintf(" %lf %lf \n",k,lg); if (*detail==1) Rprintf(" %lf %lf \n",bDb,bd); if (*detail==1) Rprintf(" %lf %lf %lf %lf %lf %lf %lf %lf \n", lg,ME(iD,index,index),d[index], 0.5*lg*lg*ME(iD,index,index)-lg*d[index], 0.5*lg*lg*ME(iD,index,index), 0.5*lg*lg,ME(iD,index,index), -lg*d[index]); L1=0.5*lg*lg*ME(iD,index,index)-lg*d[index]; L0=0.5*bDb-bd; Lk=0.5*((1-k)*(1-k)*bDb+k*k*lg*lg*ME(iD,index,index)+2*k*(1-k)*lg*VE(Db,index))-(1-k)*bd-k*lg*d[index]; if (L0<=Lk && L0<=L1) k=0; if (L1<=Lk && L1<=L0) k=1; if (*detail==1) Rprintf(" %lf %lf %lf %lf \n",L0,L1,Lk,k); for (i=0;i<*px;i++) VE(betav,i)*=(1-k); VE(betav,index)+=k*lg; } for (i=0;i<*px;i++) beta[i]=VE(betav,i); free_mats(&iD,NULL); free_vecs(&tmpv1,&betav,&dL,&Db,NULL); } void addiboost(D,px,d,Nit,beta,detail,index,step,var,met) double *D,*d,*beta,*step,*var; int *met,*px,*detail,*Nit,*index; { int k,itta,pls; double fabs(),sqrt(),L,Lp=0,pval,pvalp=0; double gam,cgam,kor,izhdn; for (itta=0;itta<*Nit-1;itta++) { for (pls=0;pls<*px;pls++) { cgam=D[pls*(*px)+pls]; izhdn=d[pls]; kor=0; for (k=0;k pvalp){beta[itta]=gam;index[itta]=pls;pvalp=pval;} } } /* dimcovpls comps */ } /* void Lc(D,px,d,Dd,db,beta,bDb,L) MAT *D; VEC *Dd,*beta; int *px; double *db,*bDb,*d,*L; { mv_mlt(D,betav,Db); bDb[0]=0; db[0]=0; for (i=0;i<*px;i++) { bd[0]=bd[0]+beta->ve[i]*d[i]; bDb[0]=bDb[0]+Db->ve[i]*betav->ve[i]; } L[0]=0.5*bDb[0]-bd[0]; } */ timereg/src/matrix.c0000644000175000017500000011714014127306505014306 0ustar nileshnilesh#include //#include #include #include #include #include #include #include #include #include "matrix.h" void free_mat(matrix *M){ Free(M->entries); Free(M); } void free_mat3(matrix3 *M){ Free(M->entries); Free(M); } void free_vec(vector *V){ Free(V->entries); Free(V); } int nrow_matrix(matrix *M){ return M->nr; } int ncol_matrix(matrix *M){ return M->nc; } int length_vector(vector *v){ return v->length; } void print_a_matrix(matrix *M){ int j, k; for(j=0; j < nrow_matrix(M); j++){ for(k = 0; k < ncol_matrix(M); k++){ Rprintf("%+7.7g ", ME(M,j,k)); } Rprintf("\n"); } } /* DPOTRI - compute the inverse of a real symmetric positive */ /* definite matrix A using the Cholesky factorization A = U**T*U */ /* or A = L*L**T computed by DPOTRF */ extern void F77_SUB(dpotri)(const char* uplo, const int* n, double* a, const int* lda, int* info); /* DPOTRF - compute the Cholesky factorization of a real */ /* symmetric positive definite matrix A */ extern void F77_SUB(dpotrf)(const char* uplo, const int* n, double* a, const int* lda, int* info); extern void F77_SUB(dgemm)(const char *transa, const char *transb, const int *m, const int *n, const int *k, const double *alpha, const double *a, const int *lda, const double *b, const int *ldb, const double *beta, double *c, const int *ldc); /* DGEMV - perform one of the matrix-vector operations */ /* y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, */ extern void F77_SUB(dgemv)(const char *trans, const int *m, const int *n, const double *alpha, const double *a, const int *lda, const double *x, const int *incx, const double *beta, double *y, const int *incy); /* DGETRF - compute an LU factorization of a general M-by-N */ /* matrix A using partial pivoting with row interchanges */ extern void F77_SUB(dgetrf)(const int* m, const int* n, double* a, const int* lda, int* ipiv, int* info); /* DGETRI - compute the inverse of a matrix using the LU */ /* factorization computed by DGETRF */ extern void F77_SUB(dgetri)(const int* n, double* a, const int* lda, int* ipiv, double* work, const int* lwork, int* info); // cumsum of matrix apply(X,2,cusum) // rev=1 apply(X[n:1,],2,cumsum)[n:1,] // for rev=1 possible to return only apply(X[n:1,],2,cumsum)[nindex,] void cumsumM(matrix *M, matrix *Mout,int rev,int weighted,double *weights) { int i,j,p=ncol_matrix(M),n=nrow_matrix(M); double lweights[n]; matrix *temp; malloc_mat(n,p,temp); if( !( (ncol_matrix(M) == ncol_matrix(Mout)) )) { oops("Error: dimensions in cumsumM\n"); } for(i=0;i0) for(i = 0;i0) for(i = 0;ientries, &lda, M->entries, &ldb, &beta, A->entries, &ldc FCONE FCONE); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(A),ncol_matrix(A),temp); F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, M->entries, &ldb, &beta, temp->entries, &ldc FCONE FCONE); // Copy these results into A, then remove the temporary matrix mat_copy(temp,A); free_mat(temp); } } // Does cholesky of := A, where A is symmetric positive definite, of order *n void cholesky(matrix *A, matrix *AI){ // {{{ if( !(nrow_matrix(A) == ncol_matrix(A) && nrow_matrix(AI) == ncol_matrix(AI) && nrow_matrix(A) == ncol_matrix(AI)) ){ oops("Error: dimensions in invertSPD\n"); } // Ensure that A and AI do not occupy the same memory. if(A != AI){ // printf(" er her\n"); choleskyunsafe(A, AI); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(AI),ncol_matrix(AI),temp); choleskyunsafe(A, temp); // Copy these results into AI, then remove the temporary matrix mat_copy(temp,AI); free_mat(temp); } } // }}} // cholesky := A, where A is symmetric positive definite, of order *n void choleskyunsafe(matrix *A, matrix *AI){ // {{{ //unsafe because it assumes A and AI are both square and of the same //dimensions, and that they occupy different memory // char uplo = 'U'; // lower version int i, j; int n = nrow_matrix(A); // int lda = n; // matrix A has dimensions *n x *n int info = -999; // double rcond; // int pivot[n]; // double z[n]; // double qraux[n]; // double work[2*n]; // int rank = 0; // int job=1; // double tol = 1.0e-07; // First copy the matrix A into the matrix AI // print_mat(A); mat_copy(A,AI); // print_mat(AI); // printf("sssssssssss======================\n"); // job = 1; // Indicates that AI is upper triangular // rcond = 999.0; // First find the Cholesky factorization of A, // stored as an upper triangular matrix char uplo1 = 'U'; // lower version F77_CALL(dpotrf)(&uplo1, &n, AI->entries, &n, &info FCONE FCONE); // Lastly turn the vector a into the matrix AI // Take only the lower triangular portion, since this // is the relevant part returned by dpotrf for(i = 0; i < n; i++){ for(j = 0; j < i; j++){ ME(AI,i,j) = 0; } } // print_mat(AI); // Rprintf("in chol \n"); // printf("======================\n"); // print_mat(A); // print_mat(AI); // printf(" check chol back\n"); // matrix *tmp; // malloc_mat(n,n,tmp); // MtM(AI,tmp); // print_mat(tmp); // free_mat(tmp); } // }}} // Does AI := inverse(A), where A is symmetric positive definite, of order *n void invertSPD(matrix *A, matrix *AI){ if( !(nrow_matrix(A) == ncol_matrix(A) && nrow_matrix(AI) == ncol_matrix(AI) && nrow_matrix(A) == ncol_matrix(AI)) ){ oops("Error: dimensions in invertSPD\n"); } // Ensure that A and AI do not occupy the same memory. if(A != AI){ invertSPDunsafe(A, AI); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(AI),ncol_matrix(AI),temp); invertSPDunsafe(A, temp); // Copy these results into AI, then remove the temporary matrix mat_copy(temp,AI); free_mat(temp); } } // Does AI := inverse(A), where A is symmetric positive definite, of order *n void invertSPDunsafe(matrix *A, matrix *AI){ //unsafe because it assumes A and AI are both square and of the same //dimensions, and that they occupy different memory char uplo = 'U'; int i, j; int n = nrow_matrix(A); int lda = n; // matrix A has dimensions *n x *n int info = -999; double rcond; int pivot[n]; double z[n]; double qraux[n]; double work[2*n]; int rank = 0; int job=1; double tol = 1.0e-07; // First copy the matrix A into the matrix AI for(i = 0; i < n; i++){ for(j = 0; j < n; j++){ ME(AI,i,j) = ME(A,i,j); } } // dqrdc(x,ldx,n,p, qraux,jpvt,work,job) // F77_CALL(dqrdc)(AI->entries, &n, &n, &n, &rank, qraux, pivot, work,job); // dqrdc2(x,ldx,n,p,tol,k,qraux,jpvt,work) F77_CALL(dqrdc2)(AI->entries, &n, &n, &n, &tol, &rank, qraux, pivot, work FCONE FCONE); for(i = 0; i < n; i++){ for(j = 0; j < i; j++){ ME(AI,j,i) = 0.0; } } job = 1; // Indicates that AI is upper triangular rcond = 999.0; F77_CALL(dtrco)(AI->entries, &n, &n, &rcond, z, &job FCONE FCONE); if(rcond < tol){ Rprintf("Error in invertSPD: estimated condition number = %7.7e\n",1/rcond); for(i = 0; i < n; i++){ for(j = 0; j < n; j++){ ME(AI,i,j) = 0.0; } } } else { for(i = 0; i < n; i++){ pivot[i] = i+1; for(j = 0; j < n; j++){ ME(AI,i,j) = ME(A,i,j); } } // First find the Cholesky factorization of A, // stored as an upper triangular matrix F77_CALL(dpotrf)(&uplo, &n, AI->entries, &lda, &info FCONE FCONE); if(info < 0){ Rprintf("Error in invertSPD: arg %d of DPOTRF\n",-info); } else if(info > 0){ Rprintf("Error in invertSPD: matrix does not appear to be SPD\n"); } // then use this factorization to compute the inverse of A F77_CALL(dpotri)(&uplo, &n, AI->entries, &lda, &info FCONE FCONE); if(info != 0){ Rprintf("Error in invertSPD: DPOTRI returned info = %d \n",info); } // Lastly turn the vector a into the matrix AI // Take only the upper triangular portion, since this // is the relevant part returned by dpotrf for(i = 0; i < n; i++){ for(j = 0; j < i; j++){ ME(AI,i,j) = ME(AI,j,i); } } } } // v2 := M %*% v1 // where M has dims (nrow x ncol) // and v1 has dims (ncol x 1 ) // amd v2 has dims (nrow x 1 ) void Mv(matrix *M, vector *v1, vector *v2){ char trans = 'n'; double alpha = 1.0; double beta = 0.0; int incx = 1; int incy = 1; int nrow = nrow_matrix(M); int ncol = ncol_matrix(M); if( !(length_vector(v1) == ncol && length_vector(v2) == nrow) ){ oops("Error: dimensions in Mv\n"); } // Ensure that v1 and v2 do not occupy the same memory. if(v1 != v2){ F77_CALL(dgemv)(&trans, &nrow, &ncol, &alpha, M->entries, &nrow, v1->entries, &incx, &beta, v2->entries, &incy FCONE FCONE); } else { // if v1 and v2 occupy the same memory, store the results in a // temporary vector. vector *temp; malloc_vec(length_vector(v2),temp); F77_CALL(dgemv)(&trans, &nrow, &ncol, &alpha, M->entries, &nrow, v1->entries, &incx, &beta, temp->entries, &incy FCONE FCONE); // Copy these results into A, then remove the temporary matrix vec_copy(temp,v2); free_vec(temp); } } // v2 := v1 %*% matrix // where v1 has dims (1 x nrow) // and matrix has dims (nrow x ncol) // amd v2 has dims (1 x ncol) void vM(matrix *M, vector *v1, vector *v2){ char trans = 't'; double alpha = 1.0; double beta = 0.0; int incx = 1; int incy = 1; int nrow = nrow_matrix(M); int ncol = ncol_matrix(M); if( !(length_vector(v1) == nrow && length_vector(v2) == ncol) ){ oops("Error: dimensions in vM\n"); } // Ensure that v1 and v2 do not occupy the same memory. if(v1 != v2){ F77_CALL(dgemv)(&trans, &nrow, &ncol, &alpha, M->entries, &nrow, v1->entries, &incx, &beta, v2->entries, &incy FCONE FCONE); } else { // if v1 and v2 occupy the same memory, store the results in a // temporary vector. vector *temp; malloc_vec(length_vector(v2),temp); F77_CALL(dgemv)(&trans, &nrow, &ncol, &alpha, M->entries, &nrow, v1->entries, &incx, &beta, temp->entries, &incy FCONE FCONE); // Copy these results into A, then remove the temporary matrix vec_copy(temp,v2); free_vec(temp); } } // v3 := v1 * v2, where * is the Hadamard (componentwise) product of the // two vectors, which is the same as * does in R for vectors of the same length vector *vec_star(vector *v1, vector *v2, vector *v3){ int i; int n = length_vector(v1); if( !(length_vector(v2) == n && length_vector(v3) == n) ){ oops("Error: dimensions in vec_star\n"); } for(i = 0; i < n; i++){ VE(v3,i) = VE(v1,i)*VE(v2,i); } return(v3); } // := v1^T * v2, inner product // two vectors double vec_prod(vector *v1, vector *v2){ double sum = 0.0; int i; int n = length_vector(v1); if( !(length_vector(v2) == n) ){ oops("Error: dimensions in vec_star\n"); } for(i = 0; i < n; i++){ sum += VE(v1,i)*VE(v2,i); } return sum; } // Sums the entries of a vector of length n double vec_sum(vector *v){ double sum = 0.0; int i; int n = length_vector(v); for(i = 0; i < n; i++){ sum += VE(v,i); } return sum; } // Sums the entries of a vector of length n vector *vec_ones(vector *v){ int i; int n = length_vector(v); for(i = 0; i < n; i++){ VE(v,i) = 1.0; } return(v); } // Returns the minimum of the entries of a vector of length n double vec_min(vector *v, int *imin){ double Min = VE(v,0); int i; int n = length_vector(v); *imin = 0; for(i = 1; i < n; i++){ if(VE(v,i) < Min){ Min = VE(v,i); *imin = i; } } return Min; } // set all entries of an *nrow x *ncol matrix M to zero void mat_zeros(matrix *M){ int j, k; for(j=0; j < nrow_matrix(M); j++){ for(k = 0; k < ncol_matrix(M); k++){ ME(M,j,k) = 0.0; } } } // set all entries of vector v of length *length to zero void vec_zeros(vector *v){ int j; for(j=0; j < length_vector(v); j++){ VE(v,j) = 0.0; } } // Simple I/O function that prints a matrix void print_mat(matrix *M){ int j, k; Rprintf("Matrix nrow=%d ncol=%d \n",nrow_matrix(M),ncol_matrix(M)); for(j=0; j < nrow_matrix(M); j++){ for(k = 0; k < ncol_matrix(M); k++){ // Rprintf("%5.5g ", ME(M,j,k)); // Rprintf("%+15.15g ", ME(M,j,k)); Rprintf("%lf ", ME(M,j,k)); } Rprintf("\n"); } Rprintf("\n"); } // Simple I/O function that prints the top of a matrix void head_matrix(matrix *M){ int j, k; Rprintf("head:Matrix nrow=%d ncol=%d \n",nrow_matrix(M),ncol_matrix(M)); for(j=0; j < min(nrow_matrix(M),6); j++){ for(k = 0; k < min(ncol_matrix(M),6); k++){ //Rprintf("%5.5g ", ME(M,j,k)); Rprintf("%lf ", ME(M,j,k)); } Rprintf("\n"); } Rprintf("\n"); } // Simple I/O function that prints the first few entries of a vector void head_vector(vector *V){ int j; Rprintf("head:Vector lengthn=%d \n",length_vector(V)); for(j=0; j < min(length_vector(V),6); j++){ Rprintf("%lf ", VE(V,j)); } Rprintf("\n"); } // Simple I/O function that prints a vector void print_vec(vector *v){ int j; Rprintf("Vector lengthn=%d \n",length_vector(v)); for(j=0; j < length_vector(v); j++){ Rprintf("%lf ", VE(v,j)); } Rprintf("\n\n"); } // sets v := M[row_to_get,] vector *extract_row(matrix *M, int row_to_get, vector *v){ int j; if(!(length_vector(v) == ncol_matrix(M))){ oops("Error: dimensions in extract_row\n"); } if(row_to_get >= 0 && row_to_get < nrow_matrix(M)){ for(j = 0; j < length_vector(v); j++){ VE(v,j) = ME(M,row_to_get,j); } return(v); } else { oops("Error: trying to get an invalid row in 'extract_row'\n"); } return(v); } // sets M[row_to_get,] := v void replace_row(matrix *M, int row_to_set, vector *v){ int j; if(!(length_vector(v) == ncol_matrix(M))){ oops("Error: dimensions in replace_row\n"); } if(row_to_set >= 0 && row_to_set < nrow_matrix(M)){ for(j = 0; j < ncol_matrix(M); j++){ ME(M,row_to_set,j) = VE(v,j); } } else { oops("Error: trying to get an invalid row in 'replace_row'\n"); } } // v3 := v1 + v2, where the three vectors have length void vec_add(vector *v1, vector *v2, vector *v3){ int i; int n = length_vector(v1); if( !(length_vector(v2) == n && length_vector(v3) == n) ){ oops("Error: dimensions in vec_addition\n"); } for(i=0; i < n; i++){ VE(v3,i) = VE(v1,i) + VE(v2,i); } } // v3 := v1 + s * v2, where the three vectors have length, // and s is a double scalar void vec_add_mult(vector *v1, vector *v2, double s, vector *v3){ int i; int n = length_vector(v1); if( !(length_vector(v2) == n && length_vector(v3) == n) ){ oops("Error: dimensions in vec_addition\n"); } for(i=0; i < n; i++){ VE(v3,i) = VE(v1,i) + s*VE(v2,i); } } // v2 := scalar * v1, where invec and outvec are vectors of // length *length, and *scalar is a (double) scalar vector *scl_vec_mult(double scalar, vector *v1, vector *v2){ int i; int n = length_vector(v1); if( !(length_vector(v2) == n) ){ oops("Error: dimensions in scl_vec_mult\n"); } for(i=0; i < n; i++){ VE(v2,i) = scalar * VE(v1,i); } return(v2); } // m2 := scalar * m1 matrix *scl_mat_mult(double scalar, matrix *m1, matrix *m2){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(nrow_matrix(m1) == m && ncol_matrix(m1) == n) ){ oops("Error: dimensions in scl_vec_mult\n"); } for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(m2,i,j) = ME(m1,i,j) * scalar; } } return(m2); } // m2 := m1 matrix *mat_copy(matrix *m1, matrix *m2){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(nrow_matrix(m2) == m && ncol_matrix(m2) == n) ){ oops("Error: dimensions in copy_matrix\n"); } if(m1 == m2){ oops("copy_matrix was asked to write one matrix into its own memory\nThere may be an error...\n"); } for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(m2,i,j) = ME(m1,i,j); } } return(m2); } // v2 := v1 vector *vec_copy(vector *v1, vector *v2){ int i; int l = length_vector(v1); if( !(length_vector(v2) == l) ){ oops("Error: dimensions in copy_vector\n"); } if(v1 == v2){ oops("copy_vector was asked to write one matrix into its own memory\nThere may be an error...\n"); } for(i=0; i < l; i++){ VE(v2,i) = VE(v1,i); } return(v2); } // m2 := m1 void mat_subsec(matrix *m1, int rowStart, int colStart, int rowStop, int colStop, matrix *m2){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(nrow_matrix(m2) == (rowStop-rowStart) && ncol_matrix(m2) == (colStop-colStart)) ){ oops("Error: dimensions in mat_subsec\n"); } else if(!(rowStart >= 0 && colStart >= 0 && rowStop < m && colStop < n)){ oops("Error: trying to access non-existing rows or cols in mat_subsec\n"); } if(m1 == m2){ oops("matrix_subsec was asked to write one matrix into its own memory\nThere may be an error...\n"); } for(i=rowStart; i < rowStop; i++){ for(j=colStart; j < colStop; j++){ ME(m2,i-rowStart,j-colStart) = ME(m1,i,j); } } } // m2 := t(m1) matrix *mat_transp(matrix *m1, matrix *m2){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(ncol_matrix(m2) == m && nrow_matrix(m2) == n) ){ oops("Error: dimensions in mat_transp\n"); } // Ensure that m1 and m2 do not occupy the same memory. if(m1 != m2){ for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(m2,j,i) = ME(m1,i,j); } } } else { // if v1 and v2 occupy the same memory, store the results in a // temporary vector. matrix *temp; malloc_mat(nrow_matrix(m2),ncol_matrix(m2),temp); for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(temp,j,i) = ME(m1,i,j); } } // Copy these results into A, then remove the temporary matrix mat_copy(temp,m2); free_mat(temp); } return(m2); } // v3 := v1 - v2, where the three vectors have length *length void vec_subtr(vector *v1, vector *v2, vector *v3){ int i; int n = length_vector(v1); if( !(length_vector(v2) == n && length_vector(v3) == n) ){ oops("Error: dimensions in vec_subtraction\n"); } for(i=0; i < n; i++){ VE(v3,i) = VE(v1,i) - VE(v2,i); } } // m3 := m1 - m2, where the three matrix have the same dimentions void mat_subtr(matrix *m1, matrix *m2, matrix *m3){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(nrow_matrix(m2) == m && ncol_matrix(m2) == n && nrow_matrix(m3) == m && ncol_matrix(m3) == n) ){ oops("Error: dimensions in mat_subtr\n"); } for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(m3,i,j) = ME(m1,i,j) - ME(m2,i,j); } } } // m3 := m1 + m2, where the three matrix have the same dimentions void mat_add(matrix *m1, matrix *m2, matrix *m3){ int i,j; int m = nrow_matrix(m1); int n = ncol_matrix(m1); if( !(nrow_matrix(m2) == m && ncol_matrix(m2) == n && nrow_matrix(m3) == m && ncol_matrix(m3) == n) ){ oops("Error: dimensions in mat_subtr\n"); } for(i=0; i < m; i++){ for(j=0; j < n; j++){ ME(m3,i,j) = ME(m1,i,j) + ME(m2,i,j); } } } // Performs Mout := t(M) %*% A, where M is an nRowM x nColM matrix, // and A is an nRowM x nColA matrix, and Mout is a nColM x nColA matrix void MtA(matrix *M, matrix *A, matrix *Mout){ char transa = 't'; char transb = 'n'; double alpha = 1.0; double beta = 0.0; int m = ncol_matrix(M); int n = ncol_matrix(A); int k = nrow_matrix(M); int lda = nrow_matrix(M); int ldb = nrow_matrix(M); int ldc = ncol_matrix(M); if( !(nrow_matrix(M) == nrow_matrix(A) && nrow_matrix(Mout) == ncol_matrix(M) && ncol_matrix(Mout) == ncol_matrix(A)) ){ oops("Error: dimensions in MtA\n"); } // Ensure that Mout does not occupy the same memory as M or A if(Mout != A && Mout != M){ // the results of 1.0 * t(M) %*% A + 0.0 * Mout is stored in Mout F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, Mout->entries, &ldc FCONE FCONE); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(Mout),ncol_matrix(Mout),temp); // the results of 1.0 * t(M) %*% A + 0.0 * Mout is stored in temp F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, temp->entries, &ldc FCONE FCONE); // Copy these results into A, then remove the temporary matrix mat_copy(temp,Mout); free_mat(temp); } } // Performs Mout := M %*% t(A), where M is an nRowM x nColM matrix, // and A is an nRowA x nColM matrix, and Mout is a nRowM x nRowA matrix void MAt(matrix *M, matrix *A, matrix *Mout){ char transa = 'n'; char transb = 't'; double alpha = 1.0; double beta = 0.0; int m = nrow_matrix(M); int n = nrow_matrix(A); int k = ncol_matrix(M); int lda = nrow_matrix(M); int ldb = nrow_matrix(A); int ldc = nrow_matrix(Mout); if( !(ncol_matrix(M) == ncol_matrix(A) && nrow_matrix(Mout) == nrow_matrix(M) && ncol_matrix(Mout) == nrow_matrix(A)) ){ oops("Error: dimensions in MAt\n"); } // Ensure that Mout does not occupy the same memory as M or A if(Mout != A && Mout != M){ // the results of 1.0 * t(M) %*% A + 0.0 * Mout is stored in Mout F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, Mout->entries, &ldc FCONE FCONE); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(Mout),ncol_matrix(Mout),temp); // the results of 1.0 * t(M) %*% A + 0.0 * Mout is stored in temp F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, temp->entries, &ldc FCONE FCONE); // Copy these results into Mout, then remove the temporary matrix mat_copy(temp,Mout); free_mat(temp); } } // Does Ainv := inverse(A), where A is a square matrix void invert(matrix *A, matrix *Ainv){ if( !(nrow_matrix(A) == ncol_matrix(A) && nrow_matrix(Ainv) == ncol_matrix(Ainv) && nrow_matrix(A) == ncol_matrix(Ainv)) ){ oops("Error: dimensions in invert\n"); } // Ensure that A and Ainv do not occupy the same memory. if(A != Ainv){ invertUnsafe(A, Ainv); } else { // if A and Ainv occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(Ainv),ncol_matrix(Ainv),temp); invertUnsafe(A, temp); // Copy these results into Ainv, then remove the temporary matrix mat_copy(temp,Ainv); free_mat(temp); } } // Does Ainv := inverse(A), where A is a square matrix void invertS(matrix *A, matrix *Ainv,int silent){ if( !(nrow_matrix(A) == ncol_matrix(A) && nrow_matrix(Ainv) == ncol_matrix(Ainv) && nrow_matrix(A) == ncol_matrix(Ainv))){ oops("Error: dimensions in invert\n"); } // Ensure that A and Ainv do not occupy the same memory. if(A != Ainv){ invertUnsafeS(A, Ainv,silent); } else { // if A and Ainv occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(Ainv),ncol_matrix(Ainv),temp); invertUnsafeS(A, temp,silent); // Copy these results into Ainv, then remove the temporary matrix mat_copy(temp,Ainv); free_mat(temp); } } // Does Ainv := inverse(A), where A is a square matrix void invertUnsafe(matrix *A, matrix *Ainv){ //unsafe because it assumes A and Ainv are both square and of the //same dimensions, and that they occupy different memory //char uplo = 'U'; int i, j; int n = nrow_matrix(A); int lda = n; // matrix A has dimensions n x n int *ipiv = malloc(n * sizeof(int)); int lwork = n * n; int info = -999; double anorm = -999.0; double rcond = -999.0; double tol = 1.0e-07; double *dwork = malloc(4 * n * sizeof(double)); int *iwork = malloc(n * sizeof(int)); double *work = malloc(n * n * sizeof(double)); // First turn the matrix A into the vector a for(i = 0; i < n; i++){ for(j = 0; j < n; j++){ ME(Ainv,i,j) = ME(A,i,j); } } anorm = F77_NAME(dlange)("O", &n, &n, Ainv->entries, &lda, dwork); // First find the LU factorization of A, // stored as an upper triangular matrix F77_CALL(dgetrf)(&n, &n, Ainv->entries, &lda, ipiv, &info FCONE FCONE); if(info != 0){ //Avoid printing this error message Rprintf("2 Error in invert: DGETRF returned info = %d \n",info); mat_zeros(Ainv); print_mat(Ainv); } else { for(i = 0; i < n; i++){ iwork[i]= ipiv[i]; } F77_CALL(dgecon)("O", &n, Ainv->entries, &lda, &anorm, &rcond, dwork, iwork, &info FCONE FCONE); if(info != 0){ //Avoid printing this error message Rprintf("1 Error in invert: DGETRF returned info = %d \n",info); mat_zeros(Ainv); return; } if(rcond < tol){ Rprintf("Error in invert: estimated reciprocal condition number = %7.7e\n",rcond); mat_zeros(Ainv); return; } // then use this factorization to compute the inverse of A F77_CALL(dgetri)(&n, Ainv->entries, &lda, ipiv, work, &lwork, &info FCONE FCONE); if(info != 0){ Rprintf("Error in invert: DPOTRI returned info = %d \n",info); mat_zeros(Ainv); } if (fabs(ME(Ainv,0,0))>99999999999999) { // TS 23-10 print_mat(Ainv); Rprintf("Inversion, unstable large elements \n"); mat_zeros(Ainv); } } free(work); free(iwork); free(dwork); free(ipiv); } // Does Ainv := inverse(A), where A is a square matrix, possibly silent void invertUnsafeS(matrix *A, matrix *Ainv,int silent){ //unsafe because it assumes A and Ainv are both square and of the //same dimensions, and that they occupy different memory //char uplo = 'U'; int i, j; int n = nrow_matrix(A); int lda = n; // matrix A has dimensions n x n int *ipiv = malloc(n * sizeof(int)); int lwork = n * n; int info = -999; double anorm = -999.0; double rcond = -999.0; double tol = 1.0e-07; double *dwork = malloc(4 * n * sizeof(double)); int *iwork = malloc(n * sizeof(int)); double *work = malloc(n * n * sizeof(double)); // First turn the matrix A into the vector a for(i = 0; i < n; i++){ for(j = 0; j < n; j++){ ME(Ainv,i,j) = ME(A,i,j); } } anorm = F77_NAME(dlange)("O", &n, &n, Ainv->entries, &lda, dwork); // First find the LU factorization of A, // stored as an upper triangular matrix F77_CALL(dgetrf)(&n, &n, Ainv->entries, &lda, ipiv, &info FCONE FCONE); if(info != 0){ //Avoid printing this error message mat_zeros(Ainv); if (silent==0) Rprintf("3 Error in invert: DGETRF returned info = %d \n",info); } else { for(i = 0; i < n; i++){ iwork[i]= ipiv[i]; } F77_CALL(dgecon)("O", &n, Ainv->entries, &lda, &anorm, &rcond, dwork, iwork, &info FCONE FCONE); if(info != 0){ //Avoid printing this error message mat_zeros(Ainv); free(work); free(iwork); free(dwork); free(ipiv); if (silent==0) Rprintf("4 Error in invert: DGETRF returned info = %d \n",info); return; } if(rcond < tol ){ mat_zeros(Ainv); free(work); free(iwork); free(dwork); free(ipiv); if (silent==0) Rprintf("Error in invert: estimated reciprocal condition number = %7.7e\n",rcond); return; } // then use this factorization to compute the inverse of A F77_CALL(dgetri)(&n, Ainv->entries, &lda, ipiv, work, &lwork, &info FCONE FCONE); if(info != 0 ){ mat_zeros(Ainv); if (silent==0) Rprintf("Error in invert: DPOTRI returned info = %d \n",info); } if (fabs(ME(Ainv,0,0))>99999999999999 ) { // TS 23-10 mat_zeros(Ainv); if (silent==0) Rprintf("Inversion, unstable large elements \n"); } } free(work); free(iwork); free(dwork); free(ipiv); } // Performs Mout := M %*% A, where M is an nRowM x nColM matrix, // and A is an nColM x nColA matrix, and Mout is a nRowM x nColA matrix void MxA(matrix *M, matrix *A, matrix *Mout){ char transa = 'n'; char transb = 'n'; double alpha = 1.0; double beta = 0.0; int m = nrow_matrix(M); int n = ncol_matrix(A); int k = ncol_matrix(M); int lda = nrow_matrix(M); int ldb = ncol_matrix(M); int ldc = nrow_matrix(M); if( !(ncol_matrix(M) == nrow_matrix(A) && nrow_matrix(Mout) == nrow_matrix(M) && ncol_matrix(Mout) == ncol_matrix(A)) ){ oops("Error: dimensions in MxA\n"); } // Ensure that Mout does not occupy the same memory as M or A if(Mout != A && Mout != M){ // the results of 1.0 * M %*% A + 0.0 * c is stored in c // therfore we do not need to initialise c F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, Mout->entries, &ldc FCONE FCONE); } else { // if M and A occupy the same memory, store the results in a // temporary matrix. matrix *temp; malloc_mat(nrow_matrix(Mout),ncol_matrix(Mout),temp); // the results of 1.0 * M %*% A + 0.0 * c is stored in c // therfore we do not need to initialise c F77_CALL(dgemm)(&transa, &transb, &m, &n, &k, &alpha, M->entries, &lda, A->entries, &ldb, &beta, temp->entries, &ldc FCONE FCONE); // Copy these results into Mout, then remove the temporary matrix mat_copy(temp,Mout); free_mat(temp); } } void print_clock(clock_t *intime, int i){ clock_t outtime = clock(); Rprintf("### point %d, time %7.7e\n", i, difftime(outtime,*intime)); *intime = outtime; } void update_clock(clock_t *intime, counter *C){ clock_t outtime = clock(); C->timec += difftime(outtime,*intime); C->callc++; *intime = outtime; } void zcntr(counter *C){ C->timec = 0.0; C->callc = 0; } void print_counter(int i, counter *C){ Rprintf("### counter %d, time %7.7g, calls %d\n", i, C->timec, C->callc); } void identity_matrix(matrix *M){ int i, j; if(nrow_matrix(M) != ncol_matrix(M)){ oops("Error in identity_matrix: dimenions do not match\n"); } for(i = 0; i < nrow_matrix(M); i++){ for(j = 0; j < nrow_matrix(M); j++){ if(i == j){ ME(M,i,j) = 1.0; } else { ME(M,i,j) = 0.0; } } } } void malloc_mats(int nrow, int ncol, ...){ va_list argp; va_start(argp, ncol); matrix **M; while((M = va_arg(argp, matrix **))){ malloc_mat(nrow,ncol,*M); } va_end(argp); } void malloc_vecs(int length, ...){ va_list argp; va_start(argp, length); vector **V; while((V = va_arg(argp, vector **))){ malloc_vec(length,*V); } va_end(argp); } void free_mats(matrix **M1, ...){ va_list argp; va_start(argp, M1); matrix **M; free_mat(*M1); while((M = va_arg(argp, matrix **))){ free_mat(*M); } va_end(argp); } void free_vecs(vector **V1, ...){ va_list argp; va_start(argp, V1); vector **V; free_vec(*V1); while((V = va_arg(argp, vector **))){ free_vec(*V); } va_end(argp); } // sets v := M[,col_to_get] vector *extract_col(matrix *M, int col_to_get, vector *v){ int j; if(!(length_vector(v) == nrow_matrix(M))){ oops("Error: dimensions in extract_col\n"); } if(col_to_get >= 0 && col_to_get < ncol_matrix(M)){ for(j = 0; j < length_vector(v); j++){ VE(v,j) = ME(M,j,col_to_get); } return(v); } else { oops("Error: trying to get an invalid column in 'extract_col'\n"); } return(v); } // sets M[,col_to_set] := v void replace_col(matrix *M, int col_to_set, vector *v){ int j; if(!(length_vector(v) == nrow_matrix(M))){ oops("Error: dimensions in replace_col\n"); } if(col_to_set >= 0 && col_to_set < ncol_matrix(M)){ for(j = 0; j < nrow_matrix(M); j++){ ME(M,j,col_to_set) = VE(v,j); } } else { oops("Error: trying to get an invalid column in 'replace_col'\n"); } } void LevenbergMarquardt(matrix *S,matrix *SI,vector *U,vector *delta,double *lm,double *step) { // {{{ int i,nrow; double ss=0; matrix *S2; if(!(length_vector(U) == nrow_matrix(S))){ oops("Error: LM : S and U not consistent\n"); } if(!(length_vector(U) == length_vector(delta))){ oops("Error: LM : delta and U not consistent\n"); } nrow=length_vector(delta); malloc_mat(nrow,nrow,S2); for (i=0;i *lm ) { MxA(S,S,S2); for (i=0;i0.0001) scl_vec_mult(*step,delta,delta); free_mat(S2); } // }}} void readXt2(int *antpers,int *nx,int *p,double *designX, double *start,double *stop,int *status,int pers,matrix *X,double time) { // {{{ int j,c,count; for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { for(j=0;j<*p;j++){ ME(X,count,j) = designX[j*(*nx)+c]; } if (time==stop[c] && status[c]==1) { pers=count; } count=count+1; } } } // }}} void readXt(int *antpers,int *nx,int *p,double *designX,double *start,double *stop,int *status,int pers,matrix *X,double time,int *clusters,int *cluster,int *id) { // {{{ int j,c,count; for (c=0,count=0;((c<*nx) && (count!=*antpers));c++){ if ((start[c]=time)) { for(j=0;j<*p;j++) { ME(X,id[c],j) = designX[j*(*nx)+c]; } cluster[id[c]]=clusters[c]; if (time==stop[c] && status[c]==1) { pers=id[c]; } count=count+1; } } } // }}} void readXZt(int *antpers,int *nx,int *px,double *designX,int *pg,double *designG, double *start,double *stop,int *status,int pers,matrix *X, matrix *WX,matrix *Z,matrix *WZ,double time,int *clusters, int *cluster,int *ls,int stat,int l,int *id,int s,int medw) { // {{{ int j,c,count,pmax; pmax=max(*pg,*px); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { cluster[id[c]]=clusters[c]; for(j=0;j=time)) { // cluster[id[c]]=clusters[c]; for(j=0;j #include #include "matrix.h" /* ====================================================== */ void Gtranssurv(times,Ntimes,designX,nx,px,designG,ng,pg,antpers,start,stop, betaS,Nit,cu,vcu,loglike,Iinv,Vbeta,detail,sim,antsim, rani,Rvcu,RVbeta,test,testOBS,Ut,simUt,Uit,id,status,wscore, score,dhatMit,dhatMitiid,retur,exppar,sym,mlestart,stratum) double *designX,*designG,*times,*betaS,*start,*stop,*cu,*loglike,*Vbeta,*RVbeta, *vcu,*Rvcu,*Iinv,*test,*testOBS,*Ut,*simUt,*Uit,*score,*dhatMit,*dhatMitiid; int *nx,*px,*ng,*pg,*antpers,*Ntimes,*Nit,*detail,*sim,*antsim,*rani,*id,*status, *wscore,*retur,*exppar,*sym,*mlestart,*stratum; { // {{{ matrix *ldesignX,*cdesG,*ldesignG,*cdesX,*cdesX2,*cdesX3,*cdesX4,*CtVUCt,*A,*AI; matrix *dYI,*Ct,*dM1M2,*M1M2t,*COV,*ZX,*ddesG,*ZP,*ZPX; matrix *tmp1,*tmp2,*tmp5,*tmp3,*dS,*S1,*SI,*S2,*M1,*VU,*VUI, *tmp6; // Added tmp6 matrix *RobVbeta,*Delta,*tmpM1,*Utt,*Delta2,*tmpM2; matrix *St[*Ntimes],*M1M2[*Ntimes],*C[*Ntimes],*ZXAIs[*Ntimes],*dYIt[*Ntimes]; matrix *dW3t[*antpers],*W3t[*antpers],*W4t[*antpers],*W2t[*antpers],*AIxit[*antpers],*Uti[*antpers],*tmp4,*Fst[(*Ntimes)*(*Ntimes)]; matrix *dG[*Ntimes],*cumdG,*Ft[*Ntimes],*ZcX2AIs[*Ntimes],*ZcX2[*Ntimes],*S0tI[*Ntimes],*Ident,*gt[*Ntimes],*q2t[*Ntimes],*G1mG2t[*Ntimes],*q1t[*antpers]; vector *dA,*VdA,*MdA,*delta,*zav,*lamt,*plamt,*dlamt; vector *xi,*zi,*U,*beta,*xtilde,*Gbeta,*zcol,*one,*difzzav; vector *offset,*weight,*ZXdA[*Ntimes],*varUthat[*Ntimes],*Uprofile; vector *ta,*ahatt,*risk; vector *tmpv1,*tmpv2,*rowX,*rowZ,*difX,*VdB,*lht; vector *dLamt[*antpers],*dAt[*Ntimes]; vector *W2[*antpers],*W3[*antpers],*reszpbeta,*res1dim; int t,c,robust=1,pers=0,i,j,k,l,s,it,count,pmax; int *ipers=calloc(*Ntimes,sizeof(int)); double time=0,dummy,ll; double tau,dhati,hati=0,random,sumscore; double norm_rand(); void GetRNGstate(),PutRNGstate(); for(j=0;j<*Ntimes;j++) { malloc_mat(*px,*px,Ft[j]); malloc_mat(*pg,*px,ZcX2AIs[j]); malloc_mat(*pg,*px,gt[j]); malloc_mat(*pg,*px,G1mG2t[j]); malloc_mat(*pg,*px,q2t[j]); malloc_mat(*pg,*px,ZcX2[j]); malloc_mat(*px,*px,S0tI[j]); malloc_mat(*px,*pg,dG[j]); malloc_mat(*px,*pg,C[j]); malloc_mat(*pg,*px,M1M2[j]); malloc_mat(*pg,*px,ZXAIs[j]); malloc_mat(*px,*pg,dYIt[j]); malloc_vec(*px,dAt[j]); malloc_vec(*pg,ZXdA[j]); malloc_mat(*pg,*pg,St[j]); malloc_vec(*pg,varUthat[j]); for(i=0;i<=j;i++){ malloc_mat(*px,*px,Fst[j*(*Ntimes)+i]); } } for (j=0;j<*antpers;j++) { malloc_vec(*Ntimes,dLamt[j]); malloc_mat(*Ntimes,*px,W3t[j]); malloc_mat(*Ntimes,*px,dW3t[j]); malloc_mat(*Ntimes,*px,W4t[j]); malloc_mat(*Ntimes,*pg,W2t[j]); malloc_mat(*Ntimes,*pg,Uti[j]); malloc_vec(*pg,W2[j]); malloc_vec(*px,W3[j]); malloc_mat(*Ntimes,*pg,q1t[j]); malloc_mat(*Ntimes,*px,AIxit[j]); } malloc_mat(*Ntimes,*px,Delta); malloc_mat(*Ntimes,*px,tmpM1); malloc_mat(*Ntimes,*pg,Delta2); malloc_mat(*Ntimes,*pg,tmpM2); malloc_mat(*Ntimes,*pg,Utt); malloc_mats(*antpers,*px,&ldesignX,&cdesX,&cdesX2,&cdesX3,&cdesX4,NULL); malloc_mats(*antpers,*pg,&ZP,&cdesG,&ldesignG,&ddesG,NULL); malloc_mats(*px,*px,&tmp4,&Ident,&COV,&A,&AI,&M1,&CtVUCt,NULL); malloc_mats(*pg,*pg,&RobVbeta,&tmp1,&tmp2,&dS,&S1,&S2,&SI,&VU,&VUI,NULL); malloc_mats(*pg,*px,&tmp5,&tmp3,&ZX,&dM1M2,&M1M2t,NULL); malloc_mats(*px,*pg,&cumdG,&ZPX,&dYI,&Ct,NULL); malloc_mat(*px,*pg,tmp6); malloc_vec(1,reszpbeta); malloc_vec(1,res1dim); malloc_vec(*Ntimes,lht); malloc_vecs(*antpers,&risk,&weight,&dlamt,&plamt,&lamt,&zcol,&Gbeta,&one,&offset,NULL); malloc_vecs(*px,&ahatt,&tmpv1,&difX,&VdB,&rowX,&xi,&dA,&VdA,&MdA,NULL); malloc_vecs(*px,&ta,&xtilde,NULL); malloc_vecs(*pg,&tmpv2,&rowZ,&zi,&U,&beta,&delta,&zav,&difzzav,&Uprofile,NULL); identity_matrix(Ident); // if (*px>=*pg){ pmax=*px; } else { pmax=*pg; } pmax=max(*px,*pg); ll=0; vec_ones(one); for(j=0;j<*pg;j++){ VE(beta,j)=betaS[j]; } vec_ones(difX); cu[0]=times[0]; // }}} /* Main procedure ================================== */ for (it=0;it<*Nit;it++){ vec_zeros(U); mat_zeros(S1); sumscore=0; for (s=1;s<*Ntimes;s++){ // {{{ time=times[s]; mat_zeros(ldesignX); mat_zeros(ldesignG); // vec_zeros(risk); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { // {{{ if ((start[c]=time)) { VE(risk,id[c])=1.0; for(j=0;j=0) // first time use update hazard est for (j=0;j<*antpers;j++){ if (s<0 && j<5 ) { Rprintf(" %ld %ld \n",(long int) s, (long int)j); print_vec(zi); } extract_row(ddesG,j,zi); scl_vec_mult(VE(lamt,j),zi,zi); replace_row(ZP,j,zi); } MtA(ldesignX,ZP,ZPX); MxA(AI,ZPX,tmp6); // Note the use of tmp6 here, instead of tmp3 mat_subtr(dG[s-1],tmp6,dG[s]); // Note the use of tmp6 here, instead of tmp3 if (s<0) { Rprintf(" %lf \n",ME(A,0,0)); print_mat(ZPX); print_mat(tmp3); print_mat(dG[s]); } MxA(ZXAIs[s],ZPX,SI); mat_transp(SI,tmp2); MtA(ldesignG,ZP,tmp1); mat_subtr( tmp1,tmp2, dS); if (s<0) { Rprintf("=================== %lf \n",ME(A,0,0)); print_mat(tmp1); print_mat(tmp2); print_mat(dS); } if (*sym==1) { mat_transp(dS,tmp1); mat_add(tmp1,dS,dS); scl_mat_mult(0.5,dS,dS); } /* else {m_transp(dS,tmp1); sm_mlt(1,tmp1,dS); } */ mat_add(dS,S1,S1); scl_mat_mult(1.0,S1,St[s]); /* variance and other things */ if (it==((*Nit)-1)) { // {{{ replace_row(Utt,s,U); for (j=0;j<*px;j++) { // {{{ for (i=0;i<*antpers;i++){ dummy=ME(ldesignX,i,j); extract_row(cdesX2,i,xi); scl_vec_mult(dummy,xi,xi); replace_row(cdesX3,i,xi); } MtA(ldesignX,cdesX3,A); MxA(AI,A,tmp4); Mv(tmp4,dA,xi); for (k=0;k<*px;k++){ ME(Ft[s],j,k)=VE(xi,k); } VE(lht,s)=VE(lht,s-1)-ME(A,0,0)*(ME(AI,0,0)*ME(AI,0,0)); /* Rprintf(" %ld %lf %lf \n",s,lht->ve[s],AI->me[0][0]); */ MtA(ldesignG,cdesX3,ZcX2[s]); /* m_mlt(ZcX2[s],AI,ZcX2AIs[s]); */ MxA(ZX,tmp4,tmp3); mat_subtr(tmp3,ZcX2[s],tmp5); Mv(tmp5,dA,zi); for (k=0;k<*pg;k++) { ME(G1mG2t[s],k,j)=ME(G1mG2t[s],k,j)+VE(zi,k); } } // }}} /* for (i=0;i<*px;i++){ for (j=0;j<*pg;j++) dM1M2->me[j][i]=dA->ve[i]*difzzav->ve[j]; for (i=0;i<*pg;i++) for (j=0;j<*pg;j++) VU->me[i][j]=VU->me[i][j]+difzzav->ve[i]*difzzav->ve[j]; m_mlt(AI,ZPX,dYIt[s]); m_sub(Ct,dYIt[s],Ct); C[s]=m_copy(Ct,C[s]); v_star(dA,dA,VdA); m_add(dM1M2,M1M2t,M1M2t); M1M2[s]=m_copy(M1M2t,M1M2[s]); for (k=1;k<=*px;k++) vcu[k*(*Ntimes)+s]=VdA->ve[k-1]+vcu[k*(*Ntimes)+s-1]; */ for (j=0;j<*antpers;j++){ extract_row(ldesignX,j,xi); Mv(S0tI[s],xi,rowX); replace_row(AIxit[j],s,rowX); extract_row(ldesignG,j,zi); Mv(ZX,rowX,rowZ); vec_subtr(zi,rowZ,zi); replace_row(q1t[j],s,zi); VE(dLamt[j],s)=VE(plamt,j)*vec_sum(vec_star(xi,dA,rowX)); } } // }}} /* if (it==((*Nit)-1)) */ } // }}} /* Ntimes */ invertS(S1,SI,1); Mv(SI,U,delta); vec_add(beta,delta,beta); if (*detail>=1) { // {{{ Rprintf("====================Iteration %ld ==================== \n",(long int) it); Rprintf("delta \n"); print_vec(delta); Rprintf("Estimate beta \n"); print_vec(beta); Rprintf("Score D l\n"); print_vec(U); Rprintf("Information -D^2 l\n"); print_mat(SI); Rprintf("simple D2 l\n"); print_mat(S1); } // }}} for (k=0;k<*pg;k++) sumscore += VE(U,k); if ((fabs(sumscore)<0.000001) & (it<*Nit-2)) it=*Nit-2; } /* it */ for (k=0;k<*pg;k++) score[k]=VE(U,k); /* computation of q(t) */ for (s=1;s<*Ntimes;s++) { // {{{ mat_zeros(M1M2t); for (t=s;t<*Ntimes;t++) { identity_matrix(tmp4); identity_matrix(M1); for (k=s;ks) { scl_mat_mult(1,M1,tmp4); } mat_subtr(Ident,Ft[k],A); MxA(tmp4,A,M1); } if (s<0) { Rprintf(" %ld %ld %lf \n",(long int) s,(long int) t,ME(M1,0,0)); matrix *tempTranspose; malloc_mat(ncol_matrix(G1mG2t[t]), nrow_matrix(G1mG2t[t]),tempTranspose); print_mat(mat_transp(G1mG2t[t],tempTranspose)); free_mat(tempTranspose); } MxA(G1mG2t[t],M1,dM1M2); mat_add(dM1M2,M1M2t,M1M2t); } scl_mat_mult(1,M1M2t,q2t[s]); /* m_mlt(M1M2t,S0tI[s],q2t[s]); */ if (s<0){ matrix *tempTranspose; malloc_mat(ncol_matrix(q2t[s]), nrow_matrix(q2t[s]),tempTranspose); print_mat(mat_transp(q2t[s],tempTranspose)); free_mat(tempTranspose); } } // }}} /* terms for robust variances ============================ */ if (robust==1) { // {{{ for (s=1;s<*Ntimes;s++) { // {{{ time=times[s]; cu[s]=times[s]; vcu[s]=times[s]; Rvcu[s]=times[s]; Ut[s]=times[s]; /* terms for robust variance */ for (i=0;i<*antpers;i++) { extract_row(AIxit[i],s,xi); Mv(q2t[s],xi,rowZ); extract_row(q1t[i],s,zi); if (s==0) { print_vec(rowZ); print_vec(zi); } vec_add(zi,rowZ,rowZ); if (s==0) { print_vec(rowZ); } /* mv_mlt(ZXAIs[s],xi,tmpv2); v_sub(zi,tmpv2,tmpv2); */ if (i==ipers[s]) { for (j=0;j<*pg;j++) { for (k=0;k<*pg;k++) { ME(VU,j,k) += VE(rowZ,j)*VE(rowZ,k); } } } scl_vec_mult(VE(dLamt[i],s),rowZ,tmpv2); vec_subtr(W2[i],tmpv2,W2[i]); if (i==ipers[s]) { vec_add(rowZ,W2[i],W2[i]); } /* if (*ratesim==1) {sv_mlt(hati,tmpv2,rowZ); v_sub(W2[i],rowZ,W2[i]);} */ replace_row(W2t[i],s,W2[i]); vec_zeros(W3[i]); for (t=1;t<=s;t++) { if (i==0) { identity_matrix(tmp4); identity_matrix(M1); for (k=t;k<=s;k++) { if (k>t) { scl_mat_mult(1.0,M1,tmp4); } if (k>t || t==s) { mat_subtr(Ident,Ft[k],A); MxA(tmp4,A,M1); } } scl_mat_mult(1,M1,Fst[s*(*Ntimes)+t]); } /* Fst[s*(*Ntimes)+t]->me[0][0]=exp(-lht->ve[t]+lht->ve[s]); */ extract_row(AIxit[i],t,xi); vM(Fst[s*(*Ntimes)+t],xi,rowX); scl_vec_mult(VE(dLamt[i],t),rowX,tmpv1); vec_subtr(W3[i],tmpv1,W3[i]); if (i==ipers[t]){ vec_add(rowX,W3[i],W3[i]); } } replace_row(W3t[i],s,W3[i]); /* if (hati>0) lle=lle+log(hati); llo=llo+hati; */ /* if (*ratesim==1) {sv_mlt(hati,rowX,rowX); v_sub(W3[i],rowX,W3[i]);} */ if (*retur==1){ dhatMit[i*(*Ntimes)+s]=1*(i==pers)-hati; } } /* i=1..antpers */ } // }}} /* s=1 ..Ntimes */ MxA(SI,VU,S2); MxA(S2,SI,VU); /* ROBUST VARIANCES */ for (s=1;s<*Ntimes;s++) { // {{{ if (s<0){ print_mat(dG[s]); } vec_zeros(VdB); for (i=0;i<*antpers;i++) { Mv(SI,W2[i],tmpv2); Mv(dG[s],tmpv2,rowX); extract_row(W3t[i],s,xi); if (s>*Ntimes-5 && i<0){ print_vec(xi); } vec_add(xi,rowX,difX); replace_row(W4t[i],s,difX); if (i==-5){ print_vec(difX); } vec_star(difX,difX,tmpv1); vec_add(tmpv1,VdB,VdB); Mv(St[s],tmpv2,rowZ); extract_row(W2t[i],s,tmpv2); vec_subtr(tmpv2,rowZ,zi); replace_row(Uti[i],s,zi); vec_star(zi,zi,tmpv2); vec_add(tmpv2,varUthat[s],varUthat[s]); if (s==1) { for (j=0;j<*pg;j++){ for (k=0;k<*pg;k++){ ME(RobVbeta,j,k) += VE(W2[i],j)*VE(W2[i],k); } } } if (*retur==1) { mat_zeros(ldesignX); mat_zeros(ldesignG); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if ((start[c]=time)) { // VE(risk,id[c])=1.0; for(j=0;jtestOBS[i-1]) testOBS[i-1]=VE(xi,i-1); } scl_vec_mult(time/tau,rowX,difX); for (i=1;i<=*px;i++) { VE(xi,i-1)=cu[i*(*Ntimes)+s]; } vec_subtr(xi,difX,difX); for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>testOBS[l]) testOBS[l]=VE(difX,i); } if (*wscore>=1) { /* sup beregnes i R */ if ((s>*wscore) && (s<*Ntimes-*wscore)) { extract_row(Utt,s,rowZ); for (i=0;i<*pg;i++) { VE(rowZ,i)=VE(rowZ,i)/sqrt(VE(varUthat[s],i)); } replace_row(Utt,s,rowZ); /* scaled score process */ } else { vec_zeros(rowZ); replace_row(Utt,s,rowZ); } } for (k=1;k<=*pg;k++){ Ut[k*(*Ntimes)+s]=ME(Utt,s,k-1); } } /*s=1..Ntimes Beregning af obs teststrrelser */ for (k=1;k<*antsim;k++) { mat_zeros(Delta); mat_zeros(Delta2); vec_zeros(tmpv1); for (i=0;i<*antpers;i++) { /* random=gasdev(&idum); */ random=norm_rand(); scl_mat_mult(random,W4t[i],tmpM1); mat_add(tmpM1,Delta,Delta); scl_mat_mult(random,Uti[i],tmpM2); mat_add(tmpM2,Delta2,Delta2); } extract_row(Delta,*Ntimes-1,tmpv1); for (s=1;s<*Ntimes;s++) { time=times[s]-times[0]; scl_vec_mult(time/tau,tmpv1,xi); extract_row(Delta,s,rowX); vec_subtr(rowX,xi,difX); for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>test[l*(*antsim)+k]) test[l*(*antsim)+k]=VE(difX,i); VE(xi,i)=fabs(ME(Delta,s,i))/sqrt(Rvcu[(i+1)*(*Ntimes)+s]); if (VE(xi,i)>test[i*(*antsim)+k]) test[i*(*antsim)+k]=VE(xi,i); } if (*wscore>=1) { extract_row(Delta2,s,zi); if ((s>*wscore) && (s<*Ntimes-*wscore)) { for (i=0;i<*pg;i++) { VE(zi,i)=fabs(ME(Delta2,s,i))/sqrt(VE(varUthat[s],i)); if (VE(zi,i)>simUt[i*(*antsim)+k]) simUt[i*(*antsim)+k]=VE(zi,i); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*Ntimes)+s]=ME(Delta2,s,i)/sqrt(VE(varUthat[s],i)); } } } } else { /* weigted score */ extract_row(Delta2,s,zi); for (i=0;i<*pg;i++) { if (fabs(VE(zi,i))>simUt[i*(*antsim)+k]) simUt[i*(*antsim)+k]=fabs(VE(zi,i)); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*Ntimes)+s]=ME(Delta2,s,i); } } } /* else wscore=0 */ } /* s=1..Ntims */ } /* k=1..antsim */ PutRNGstate(); /* to use R random normals */ } // }}} /* sim==1 */ // {{{ freeing free_mats(&cumdG,&tmp4,&Ident,&ddesG,&Utt,&tmpM2,&VUI,&ZX,&COV, &dM1M2,&AI,&A,&tmp1,&tmp2,&tmp5,&tmp3,&ldesignX,&cdesX, &cdesX2,&cdesX4,&cdesX3,&cdesG,&ldesignG,&M1,&dS,&S1,&SI,NULL); free_mats(&tmp6,&S2,&VU,&ZP,&ZPX,&dYI,&Ct,&M1M2t,&RobVbeta,&Delta,&Delta2, &tmpM1,&CtVUCt,NULL); free_vecs(&lht,&risk,&ta,&ahatt,&Uprofile,&dlamt,&plamt,&lamt,&one,&xi,&zcol,&Gbeta,&VdA,&dA,&MdA,&xtilde,&zi,&U,&beta,&delta,&zav,&difzzav,&weight,&offset,&tmpv1,&tmpv2,&rowX,&rowZ,&difX,&VdB,&reszpbeta,&res1dim,NULL); for (j=0;j<*antpers;j++) { free_vec(dLamt[j]); free_mat(W3t[j]); free_mat(dW3t[j]); free_mat(W4t[j]); free_mat(W2t[j]); free_mat(Uti[j]); free_vec(W2[j]); free_vec(W3[j]); free_mat(q1t[j]); free_mat(AIxit[j]); } for (j=0;j<*Ntimes;j++) { free_mat(Ft[j]); free_mat(ZcX2AIs[j]); free_mat(gt[j]); free_mat(G1mG2t[j]); free_mat(q2t[j]); free_mat(ZcX2[j]); free_mat(S0tI[j]); free_mat(dG[j]); free_mat(C[j]); free_mat(M1M2[j]); free_mat(ZXAIs[j]); free_mat(dYIt[j]); free_vec(dAt[j]); free_vec(ZXdA[j]); free_mat(St[j]); free_vec(varUthat[j]); for(i=0;i<=j;i++) free_mat(Fst[j*(*Ntimes)+i]); } free(ipers); // }}} } timereg/src/smooth2.c0000644000175000017500000000302514077524411014373 0ustar nileshnilesh//#include #include #include "matrix.h" void smooth2B(designX,nx,p,bhat,nb,b,degree,coef) double *designX,*bhat,*b; int *coef,*nx,*p,*degree,*nb; { matrix *mat1,*mat2,*I,*XWy,*Y,*sm1,*sm2,*sY,*RES; matrix *sm1sm2t; // not in original int med,j,k,s,count,starti=0,d; double x,w; malloc_mats(*nx,*degree+1,&mat1,&mat2,NULL); malloc_mats(*nx,*p-1,&Y,NULL); malloc_mats((*degree+1),*p-1,&XWy,&RES,NULL); malloc_mats((*degree+1),*degree+1,&I,NULL); for (s=0;s<*nb;s++){ med=0; x=bhat[s]; count=0; for (j=starti;((j<*nx) && (designX[j]x-(*b)) && (med==0)) {med=1; starti=j;} if (fabs(designX[j]-x)<*b) { w=tukey(designX[j]-x,*b);/*Rprintf("%lf %lf \n",designX[j]-x,w);*/ ME(mat1,count,0)=1.0; ME(mat2,count,0)=w; for (d=1;d<=*degree;d++) { ME(mat1,count,d)=pow(designX[j]-x,d); ME(mat2,count,d)=w*ME(mat1,count,d); } for (k=1;k<*p;k++){ ME(Y,count,k-1)=w*designX[k*(*nx)+j]; } count=count+1; } } /* */ malloc_mats(count,*degree+1,&sm1,&sm2,NULL); malloc_mats(count,*p-1,&sY,NULL); malloc_mat(count,count,sm1sm2t); mat_subsec(mat1,0,0,count-1,*degree,sm1); mat_subsec(mat2,0,0,count-1,*degree,sm2); mat_subsec(Y,0,0,count-1,*p-2,sY); MtA(sm1,sm2,sm1sm2t); invert(sm1sm2t,I); MtA(sm1,sY,XWy); MxA(I,XWy,RES); for (k=1;k<*p;k++){ bhat[k*(*nb)+s]=ME(RES,*coef,k-1); } free_mats(&sm1,&sm2,&sY,sm1sm2t,NULL); } free_mats(&mat1,&mat2,&Y,&XWy,&RES,&I,NULL); } timereg/src/breslow.c0000644000175000017500000005645614077524411014475 0ustar nileshnilesh//#include #include #include "matrix.h" void OSbreslow(times,Ntimes,designX,nx,p,antpers,start,stop,nb,bhat,cu,vcu,it,b,degree,schoen,sim,antsim,test,rani,testOBS,rvcu,cumlam,nullresid,status,id,sim2,Ut,simUt,weighted,robust) double *designX,*times,*start,*stop,*cu,*vcu,*bhat,*b,*schoen,*test,*testOBS,*rvcu,*cumlam,*Ut,*simUt; int *nx,*p,*antpers,*Ntimes,*nb,*it,*degree,*rani,*sim,*antsim,*nullresid,*status,*id,*sim2,*weighted,*robust; { matrix *ldesignX,*A,*AI,*AIX,*cdesignX,*XmavX,*cXmavX,*Aav; vector *diag,*dB,*dN,*VdB,*AIXdN,*AIXlamt,*ta,*bhatt,*pbhat,*plamt,*avx,*lrisk; vector *ssrow2,*ssrow,*vtmp,*xi,*rowX,*cumi[*antpers],*difX,*cumBLi[*antpers],*Btau,*Base[*antpers],*score; matrix *cumBL[*antpers],*cumB[*antpers],*BLsubbetaLam[*antpers]; matrix *Delta2,*Delta,*tmpM1,*tmpM2,*varBL; int supsup=0,itt,i,j,k,s,c,count,pers=0, *imin=calloc(1,sizeof(int)), *coef=calloc(1,sizeof(int)),*ps=calloc(1,sizeof(int)); double time2,rr,time=0,time1,dummy,dtime,S0,lam0t,sdBt,tau,random; double *Basei=calloc(*antpers,sizeof(double)),rvarbase, *vcudif=calloc((*Ntimes)*(*p+2),sizeof(double)); double norm_rand(); void GetRNGstate(),PutRNGstate(); if (*sim==1) { malloc_mat(*Ntimes,*p,Delta); malloc_mat(*Ntimes,*p,Delta2); malloc_mat(*Ntimes,*p,tmpM1); malloc_mat(*Ntimes,*p,tmpM2); }; if (*robust==1) { malloc_mat(*Ntimes,*p,varBL); for (j=0;j<*antpers;j++) { malloc_mat(*Ntimes,(*p)+1,cumB[j]); malloc_vec(*p,cumi[j]); malloc_mat(*Ntimes,*p,BLsubbetaLam[j]); malloc_mat(*Ntimes,*p,cumBL[j]); malloc_vec(*p,cumBLi[j]); malloc_vec(*Ntimes,Base[j]); Basei[j]=0.0; } } malloc_mat(*antpers,*p,ldesignX); malloc_mat(*antpers,*p,cdesignX); malloc_mat(*antpers,*p,XmavX); malloc_mat(*antpers,*p,cXmavX); malloc_mat(*p,*antpers,AIX); malloc_mat(*p,*p,A); malloc_mat(*p,*p,AI); malloc_mat(*p,*p,Aav); malloc_vec(*p,score); malloc_vec(*p,ssrow2); malloc_vec(*p,ssrow); malloc_vec(*p,Btau); malloc_vec(*p,vtmp); malloc_vec(*p,difX); malloc_vec(*p,xi); malloc_vec(*p,rowX); malloc_vec(*p,avx); malloc_vec(*p,diag); malloc_vec(*p,dB); malloc_vec(*p,VdB); malloc_vec(*p,AIXdN); malloc_vec(*p,AIXlamt); malloc_vec(*p,bhatt); malloc_vec(*p,dN); malloc_vec(*p,pbhat); malloc_vec(*p,plamt); malloc_vec(*p,lrisk); malloc_vec(*nb,ta); coef[0]=1; ps[0]=(*p)+2; tau=times[*Ntimes-1]; for (itt=0;itt<*it;itt++){ vec_zeros(score); for (s=1;s<*Ntimes;s++){ time=times[s]; dtime=time-times[s-1]; vec_zeros(lrisk); mat_zeros(ldesignX); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++){ if ((start[c]=time)) { for(j=0;j<*p;j++) { ME(ldesignX,id[c],j)=designX[j*(*nx)+c]; } VE(lrisk,id[c])=1; if (time==stop[c] && status[c]==1){ pers=id[c]; } count=count+1; } } for(j=0;j<*nb;j++){ VE(ta,j)=fabs(bhat[j]-time); } dummy=vec_min(ta,imin); lam0t=bhat[1*(*nb)+(*imin)]; for(j=2;j<=(*p)+1;j++){ VE(bhatt,j-2)=bhat[j*(*nb)+(*imin)]; } Mv(ldesignX,bhatt,pbhat); for (j=0;j<*antpers;j++) { VE(plamt,j)=VE(lrisk,j)*exp(VE(pbhat,j)); scl_vec_mult(VE(plamt,j),extract_row(ldesignX,j,dB),dB); replace_row(cdesignX,j,dB); /* sampling corrected design */ } S0=vec_sum(plamt); vM(ldesignX,plamt,avx); scl_vec_mult(1/S0,avx,avx); for (j=0;j<*p;j++){ for (i=0;i<*p;i++) { ME(Aav,j,i)=VE(avx,i)*VE(avx,j)*S0; } } MtA(cdesignX,ldesignX,A); mat_subtr(A,Aav,A); invert(A,AI); extract_row(ldesignX,pers,AIXdN); vec_subtr(AIXdN,avx,AIXdN); Mv(AI,AIXdN,dB); vec_add(dB,score,score); schoen[s]=time; cu[s]=time; vcu[s]=time; rvcu[s]=time; cu[1*(*Ntimes)+s]=cu[1*(*Ntimes)+s-1]+(1/S0); vcu[1*(*Ntimes)+s]=0; for (k=2;k<=(*p)+1;k++){ cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+dtime*VE(bhatt,k-2)+VE(dB,k-2)/lam0t; cumlam[k*(*Ntimes)+s]=cumlam[k*(*Ntimes)+s-1]+(dtime*VE(bhatt,k-2)*lam0t)+VE(dB,k-2); if (itt==(*it-1)) { schoen[(k-1)*(*Ntimes)+s]=VE(dB,k-2)*S0; vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s-1]+(dtime/lam0t)*ME(AI,k-2,k-2); } } /* Rprintf(" \n"); */ if (itt==(*it-1)) { if (*robust==1) { vec_zeros(VdB); rvarbase=0; for (j=0;j<*antpers;j++) { extract_row(ldesignX,j,rowX); vec_subtr(rowX,avx,vtmp); Mv(AI,vtmp,xi); if (*nullresid>=0) { k=*nullresid; rr=VE(pbhat,j)+VE(rowX,k)*(cu[(k+2)*(*Ntimes)+(*Ntimes-1)]/tau-VE(bhatt,k)); rr=VE(lrisk,j)*exp(rr); } else { rr=VE(plamt,j); } scl_vec_mult(rr/S0,xi,rowX); vec_subtr(cumBLi[j],rowX,cumBLi[j]); if (j==pers){ vec_add(xi,cumBLi[j],cumBLi[j]); } replace_row(cumBL[j],s,cumBLi[j]); /* BLAM(t) sum iid */ vec_star(avx,xi,rowX); dummy=vec_sum(rowX); dummy=(1/S0)-dummy; Basei[j]=Basei[j]-dummy*rr/S0; if (j==pers){ Basei[j]=dummy+Basei[j]; } VE(Base[j],s)=Basei[j]; /* Baseline sum iid */ rvarbase=rvarbase+Basei[j]*Basei[j]; ME(cumB[j],s,0) =Basei[j]; scl_vec_mult(1/lam0t,xi,xi); scl_vec_mult(rr/S0,xi,rowX); vec_subtr(cumi[j],rowX,cumi[j]); if (j==pers) { vec_add(xi,cumi[j],cumi[j]); } /* set_row(cumB[j],s,cumi[j]); */ /* B(t) sum iid */ for (k=1;k<(*p)+1;k++) { ME(cumB[j],s,k)=VE(cumi[j],k-1); } vec_star(cumi[j],cumi[j],difX); vec_add(difX,VdB,VdB); } rvcu[1*(*Ntimes)+s]=rvarbase; for (k=2;k<(*p)+2;k++) { rvcu[k*(*Ntimes)+s]=VE(VdB,k-2); } } } } /* s */ smoothB(cu,Ntimes,ps,bhat,nb,b,degree,coef); } /* itterations lkke */ for (i=2;i<(*p)+2;i++) { VE(Btau,i-2)=cu[i*(*Ntimes)+(*Ntimes-1)]; } cu[0]=times[0]; vcu[0]=times[0]; tau=time; rvcu[0]=times[0]; /* Beregning af iid bidrag til BLam(t) - beta Lam(t) */ if (*robust==1){ for (s=1;s<*Ntimes;s++) { vec_zeros(VdB); for (j=0;j<*antpers;j++) { scl_vec_mult(1/tau,cumi[j],rowX); scl_vec_mult(cu[1*(*Ntimes)+s],rowX,difX); scl_vec_mult(VE(Base[j],s)/tau,Btau,xi); extract_row(cumBL[j],s,vtmp); vec_add(vtmp,xi,xi); vec_subtr(xi,difX,xi); replace_row(BLsubbetaLam[j],s,xi); vec_star(xi,xi,difX); vec_add(difX,VdB,VdB); replace_row(varBL,s,VdB); } } /* s */ } /* korrektion for lam0t i \int beta(s) lam0t(s) ds */ /* for (s=1;s<*Ntimes;s++) { time=times[s]; v_zero(dN);dtime=time-times[s-1]; for(j=0;j<*nb;j++) ta->ve[j]=fabs(bhat[j]-time); dummy=v_min(ta,imin); lam0t=bhat[1*(*nb)+(*imin)]; for(j=2;j<=(*p)+1;j++) {bhatt->ve[j-2]=bhat[j*(*nb)+(*imin)]/lam0t; cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+dtime*bhatt->ve[k-2]; }; } */ if (*sim==1) { if (*sim2!=1) { ps[0]=*p+1; comptest(times,Ntimes,ps,cu,rvcu,vcudif,antsim,test,testOBS,Ut,simUt, cumB,weighted,antpers); /* for (s=0;s<*Ntimes;s++) { for(j=0;j<(*p)+2;j++) Rprintf(" %lf ",cu[j*(*Ntimes)+s]); Rprintf(" \n"); } */ } else { for (i=2;i<=*p+1;i++){ VE(bhatt,i-2)=cu[i*(*Ntimes)+(*Ntimes-1)]; } for (s=1;s<*Ntimes-1;s++){ /* Beregning af obs teststrrelser */ time=times[s]; dtime=time-times[s-1]; for (i=2;i<=*p+1;i++) { VE(xi,i-2)=fabs(cu[i*(*Ntimes)+s])/sqrt(rvcu[i*(*Ntimes)+s]); if (VE(xi,i-2)>testOBS[i-2]){ testOBS[i-2]=VE(xi,i-2); } } scl_vec_mult(time/tau,bhatt,difX); for (i=2;i<=*p+1;i++){ VE(xi,i-2)=cu[i*(*Ntimes)+s]; } vec_subtr(xi,difX,difX); vec_star(difX,difX,ssrow); for (i=0;i<*p;i++) {c=(*p+i); VE(difX,i)=fabs(VE(difX,i));/*sqrt(rvcu[(i+2)*(*Ntimes)+s]);*/ if (VE(difX,i)>testOBS[c]){ testOBS[c]=VE(difX,i);} c=2*(*p)+i; testOBS[c]=testOBS[c]+VE(ssrow,i)*dtime; } scl_vec_mult(1/tau,bhatt,rowX); scl_vec_mult(cu[1*(*Ntimes)+s],rowX,rowX); for (i=2;i<=*p+1;i++) { VE(xi,i-2)=cumlam[i*(*Ntimes)+s]; } vec_subtr(xi,rowX,difX); vec_star(difX,difX,ssrow); for (i=0;i<*p;i++) { c=3*(*p)+i; VE(difX,i)=fabs(VE(difX,i)); if (VE(difX,i)>testOBS[c]) testOBS[c]=VE(difX,i); c=4*(*p)+i; testOBS[c]=testOBS[c]+VE(ssrow,i)*dtime; } /* sup| BLAM(t)-beta*LAM(t)| */ /* Beregning af sup_a,s | B(a+t)-B(t) - gam t | */ if (supsup==1) { for (j=s+1;j<*Ntimes;j++){ time1=times[j]; time2=time1-time; scl_vec_mult(time2/tau,bhatt,difX); for (i=2;i<=*p+1;i++) { VE(xi,i-2)=cu[i*(*Ntimes)+s]; VE(vtmp,i-2)=cu[i*(*Ntimes)+j]; } vec_subtr(vtmp,xi,rowX); vec_subtr(rowX,difX,xi); c=5*(*p)+i; for (i=2;i<=*p+1;i++) { if (fabs(VE(xi,i-2))>testOBS[c]) testOBS[c]=fabs(VE(xi,i-2)); } } } /* supsup==1 */ } /*s=1..Ntimes Beregning af obs teststrrelser */ Rprintf(" Simulations start N= %ld \n",(long int) *antsim); GetRNGstate(); /* to use R random normals */ for (k=1;k<*antsim;k++) { if (k%50==0) Rprintf(" %ld Simulations \n",(long int) k); mat_zeros(Delta); mat_zeros(Delta2); vec_zeros(vtmp); for (i=0;i<*antpers;i++) { random=norm_rand(); scl_mat_mult(random,cumB[i],tmpM1); mat_add(tmpM1,Delta,Delta); if (*sim2==1) { scl_mat_mult(random,BLsubbetaLam[i],tmpM2); mat_add(tmpM2,Delta2,Delta2); } extract_row(cumB[i],*Ntimes-1,rowX); scl_vec_mult(random,rowX,rowX); vec_add(rowX,vtmp,vtmp); } for (s=1;s<*Ntimes;s++){ time=times[s]; dtime=time-times[s-1]; scl_vec_mult(time/tau,vtmp,xi); extract_row(Delta,s,rowX); vec_subtr(rowX,xi,difX); vec_star(difX,difX,ssrow); if (*sim2==1) { extract_row(Delta2,s,dB); vec_star(dB,dB,ssrow2); } for (i=0;i<*p;i++) { VE(difX,i)=fabs(VE(difX,i)); VE(dB,i)=fabs(VE(dB,i)); sdBt=sqrt(rvcu[(i+2)*(*Ntimes)+s]+0.03); VE(xi,i)=fabs(ME(Delta,s,i))/sdBt; if (VE(xi,i)>test[i*(*antsim)+k]) test[i*(*antsim)+k]=VE(xi,i); c=(*p+i); if (VE(difX,i)>test[c*(*antsim)+k]) test[c*(*antsim)+k]=VE(difX,i); c=2*(*p)+i; test[c*(*antsim)+k]=test[c*(*antsim)+k]+VE(ssrow,i)*dtime; if (*sim==1) { c=(3*(*p)+i); if ((VE(dB,i))>test[c*(*antsim)+k]) test[c*(*antsim)+k]=VE(dB,i); c=4*(*p)+i; test[c*(*antsim)+k]=test[c*(*antsim)+k]+VE(ssrow2,i)*dtime; } } if (supsup==1) { for (j=s+1;j<*Ntimes;j++){ /* Beregning af sup_a,s | B(a+t)-B(t) - gam t | */ time1=times[j]; time2=time1-time; scl_vec_mult(time2/tau,vtmp,difX); extract_row(Delta,j,xi); vec_subtr(xi,rowX,dB); vec_subtr(difX,dB,dB); for (i=0;i<=*p+1;i++) { c=5*(*p)+i; if (fabs(VE(dB,i))>test[c*(*antsim)+k]) test[c*(*antsim)+k]=fabs(VE(dB,i)); } } } /* supsup==1 */ } } } /* s=1..Ntimes k=1.. antsim, sim2==1*/ PutRNGstate(); /* to use R random normals */ } /* sim==1 */ if (*sim==1) { free_mat(Delta); free_mat(Delta2); free_mat(tmpM1); free_mat(tmpM2); } if (*robust==1) { free_mat(varBL); for (j=0;j<*antpers;j++) { free_mat(cumB[j]); free_vec(cumi[j]); free_vec(Base[j]); free_mat(cumBL[j]); free_vec(cumBLi[j]); free_mat(BLsubbetaLam[j]); } } free_vec(diag); free_vec(dB); free_vec(dN); free_vec(VdB); free_vec(AIXdN); free_vec(AIXlamt); free_vec(ta); free_vec(bhatt); free_vec(pbhat); free_vec(plamt); free_vec(avx); free_vec(lrisk); free_vec(ssrow2); free_vec(ssrow); free_vec(vtmp); free_vec(xi); free_vec(rowX); free_vec(difX); free_vec(Btau); free_mat(ldesignX); free_mat(A); free_mat(AI); free_mat(AIX); free_mat(cdesignX); free_mat(XmavX); free_mat(cXmavX); free_mat(Aav); free(coef); free(ps); free(imin); free(vcudif); free(Basei); } void semibreslow(times,Ntimes,designX, nx,px,designG, ng,pg,antpers, start,stop,nb, bhat,cu,vcu, rvcu,gamma,Vgamma, robVgamma,b,degree, it,sim,antsim, test,rani,testOBS, status,id,schoen, simUt,Ut,weighted,robust) double *designX,*times,*start,*stop,*cu,*vcu,*bhat,*b,*designG,*gamma,*Vgamma,*test,*testOBS,*rvcu,*robVgamma,*schoen,*simUt,*Ut; int *nx,*px,*antpers,*Ntimes,*nb,*ng,*pg,*it,*degree,*sim,*antsim, *rani,*status,*id,*weighted,*robust; { matrix *ldesignX, *A,*AI,*cdesignX,*ldesignG,*cdesignG; matrix *XmavX,*ZmavZ,*E2x,*E2z,*E2xz,*XX; matrix *S,*dCGam,*CGam,*ICGam,*VarKorG,*dC,*XZ,*ZZ,*ZZI,*XZAI; matrix *Ct,*C[*Ntimes],*Acorb[*Ntimes],*ZXAI,*tmpM4; matrix *RobVargam,*tmpM3,*cumB[*antpers]; matrix *W3t[*antpers],*W4t[*antpers],*AIxit[*antpers]; vector *dB,*dN,*VdB,*AIXdN,*AIXlamt,*ta,*bhatt,*pbhat,*plamt; vector *difX,*korG,*pghat,*gam,*dgam,*ZGdN,*IZGdN,*ZGlamt,*IZGlamt; vector *zi,*z1,*lrisk,*avx,*avz,*rowG,*xi,*rowX,*rowZ,*tmpv2; vector *cumi[*antpers],*W2[*antpers],*W3[*antpers]; vector *Base[*antpers]; int itt,i,j,k,s,c,count,pers=0,pmax, *imin=calloc(1,sizeof(int)), *coef=calloc(1,sizeof(int)),*ps=calloc(1,sizeof(int)); double time,dummy,dtime,lam0t,S0, *Basei=calloc((*antpers),sizeof(double)), *vcudif=calloc((*Ntimes)*(*px+2),sizeof(double)),dum2,rvarbase; if (*robust==1){ for (j=0;j<*antpers;j++) { malloc_mat(*Ntimes,*px,cumB[j]); malloc_vec(*px,cumi[j]); malloc_mat(*Ntimes,*px,W3t[j]); malloc_vec(*Ntimes,Base[j]); Basei[j]=0.0; malloc_mat(*Ntimes,*px+1,W4t[j]); malloc_vec(*pg,W2[j]); malloc_vec(*px,W3[j]); malloc_mat(*Ntimes,*px,AIxit[j]); } } malloc_mat(*antpers,*px,XmavX); malloc_mat(*antpers,*px,ldesignX); malloc_mat(*antpers,*px,cdesignX); malloc_mat(*antpers,*pg,ZmavZ); malloc_mat(*antpers,*pg,ldesignG); malloc_mat(*antpers,*pg,cdesignG); malloc_mat(*px,*px,XX); malloc_mat(*px,*px,E2x); malloc_mat(*px,*px,A); malloc_mat(*px,*px,AI); malloc_mat(*pg,*pg,tmpM3); malloc_mat(*pg,*pg,RobVargam); malloc_mat(*pg,*pg,E2z); malloc_mat(*pg,*pg,ZZ); malloc_mat(*pg,*pg,VarKorG); malloc_mat(*pg,*pg,ICGam); malloc_mat(*pg,*pg,CGam); malloc_mat(*pg,*pg,dCGam); malloc_mat(*pg,*pg,S); malloc_mat(*pg,*pg,ZZI); malloc_mat(*px,*pg,E2xz); malloc_mat(*px,*pg,XZ); malloc_mat(*px,*pg,XZAI); malloc_mat(*pg,*px,Ct); malloc_mat(*pg,*px,dC); malloc_mat(*pg,*px,ZXAI); malloc_mat(*pg,*px,tmpM4); for (j=0;j<*Ntimes;j++) { malloc_mat(*pg,*px,Acorb[j]); malloc_mat(*pg,*px,C[j]); } malloc_vec(*px,difX); malloc_vec(*px,xi); malloc_vec(*px,rowX); malloc_vec(*px,avx); malloc_vec(*px,korG); malloc_vec(*px,dB); malloc_vec(*px,VdB); malloc_vec(*px,AIXdN); malloc_vec(*px,AIXlamt); malloc_vec(*px,bhatt); malloc_vec(*pg,tmpv2); malloc_vec(*pg,rowZ); malloc_vec(*pg,avz); malloc_vec(*pg,rowG); malloc_vec(*pg,zi); malloc_vec(*pg,z1); malloc_vec(*pg,gam); malloc_vec(*pg,dgam); malloc_vec(*pg,ZGdN); malloc_vec(*pg,IZGdN); malloc_vec(*pg,ZGlamt); malloc_vec(*pg,IZGlamt); malloc_vec(*antpers,lrisk); malloc_vec(*antpers,dN); malloc_vec(*antpers,pbhat); malloc_vec(*antpers,pghat); malloc_vec(*antpers,plamt); malloc_vec(*nb,ta); coef[0]=1; ps[0]=(*px)+2; if (*px>=*pg) pmax=*px; else pmax=*pg; for (j=0;j<*pg;j++){ VE(gam,j)=gamma[j]; } for (itt=0;itt<*it;itt++){ mat_zeros(Ct); mat_zeros(CGam); vec_zeros(IZGdN); vec_zeros(IZGlamt); for (s=1;s<*Ntimes;s++){ time=times[s]; dtime=time-times[s-1]; vec_zeros(lrisk); mat_zeros(ldesignX); mat_zeros(ldesignG); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++){ if ((start[c]=time)) { for(j=0;j=time)) { for(j=0;jve[0],RobVargam->me[0][0]); */ } } } /* i =1 ..Antpers */ for (k=2;k<=*px+1;k++) { rvcu[k*(*Ntimes)+s]=VE(VdB,k-2); } } /* s=1 ..Ntimes */ MxA(RobVargam,ICGam,tmpM3); MxA(ICGam,tmpM3,RobVargam); } for (j=0;j<*pg;j++) { gamma[j]=VE(gam,j); for (k=0;k<*pg;k++) { Vgamma[k*(*pg)+j]=ME(ICGam,j,k); robVgamma[k*(*pg)+j]=ME(RobVargam,j,k); } } if (*sim==1) { ps[0]=(*px)+1; comptest(times,Ntimes,ps,cu,rvcu,vcudif,antsim,test,testOBS,Ut,simUt,W4t,weighted,antpers); } cu[0]=times[0]; vcu[0]=times[0]; /* korrektion for lam0t i \int beta(s) lam0t(s) ds */ /* for (s=1;s<*Ntimes;s++) { time=times[s]; dtime=time-times[s-1]; for(j=0;j<*nb;j++) ta->ve[j]=fabs(bhat[j]-time); dummy=v_min(ta,imin); lam0t=bhat[1*(*nb)+(*imin)]; for(j=2;j<=(*px)+1;j++) bhatt->ve[j-2]=bhat[j*(*nb)+(*imin)]; for (k=2;k<=(*px)+1;k++) { cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+dtime*bhatt->ve[k-2]/lam0t; }; } */ free_mat(ldesignX); free_mat(A); free_mat(AI); free_mat(cdesignX); free_mat(ldesignG); free_mat(XmavX); free_mat(ZmavZ); free_mat(E2x); free_mat(E2xz); free_mat(XX); free_mat(S); free_mat(dCGam); free_mat(CGam); free_mat(ICGam); free_mat(VarKorG); free_mat(dC); free_mat(XZ); free_mat(ZZ); free_mat(ZZI); free_mat(XZAI); free_mat(Ct); free_mat(ZXAI); free_mat(tmpM4); free_mat(RobVargam); free_mat(tmpM3); free_vec(dB); free_vec(VdB); free_vec(AIXdN); free_vec(AIXlamt); free_vec(ta); free_vec(bhatt); free_vec(pbhat); free_vec(plamt); free_vec(difX); free_vec(korG); free_vec(pghat); free_vec(gam); free_vec(dgam); free_vec(ZGdN); free_vec(IZGdN); free_vec(IZGlamt); free_vec(zi); free_vec(z1); free_vec(lrisk); free_vec(avx); free_vec(avz); free_vec(rowG); free_vec(xi); free_vec(rowX); free_vec(rowZ); free_vec(tmpv2); for (j=0;j<*Ntimes;j++) { free_mat(Acorb[j]); free_mat(C[j]); } if (*robust==1){ for (j=0;j<*antpers;j++) { free_vec(Base[j]); free_vec(cumi[j]); free_mat(cumB[j]); free_vec(W2[j]); free_vec(W3[j]); free_mat(W3t[j]); free_mat(W4t[j]); free_mat(AIxit[j]); } } free(vcudif); free(Basei); free(coef); free(ps); free(imin); } timereg/src/prop-odds.c0000644000175000017500000004765314077524411014726 0ustar nileshnilesh//#include #include #include "matrix.h" #include #include void transsurv(times,Ntimes,designX,nx,px,antpers,start,stop,betaS,Nit,cu,vcu,Iinv, Vbeta,detail,sim,antsim,rani,Rvcu,RVbeta,test,testOBS,Ut,simUt,Uit,id,status, weighted,ratesim,score,dhatMit,dhatMitiid,retur,loglike,profile,sym,baselinevar,clusters,antclust,biid,gamiid) double *designX,*times,*betaS,*start,*stop,*cu,*Vbeta,*RVbeta,*vcu,*Rvcu,*Iinv,*test,*testOBS,*Ut,*simUt,*Uit,*score,*dhatMit,*dhatMitiid,*loglike,*biid,*gamiid; int *nx,*px,*antpers,*Ntimes,*Nit,*detail,*sim,*antsim,*rani,*id,*status,*weighted,*ratesim,*retur,*profile,*sym,*baselinevar,*clusters,*antclust; { // {{{ setting up matrix *ldesignX,*WX,*ldesignG,*CtVUCt,*A,*AI; matrix *dYI,*Ct,*dM1M2,*M1M2t,*COV,*ZX,*ZP,*ZPX; matrix *tmp1,*tmp2,*tmp3,*dS1,*SI,*dS2,*S2,*S2pl,*dS2pl,*M1,*VU,*ZXAI,*VUI; matrix *d2S0,*RobVbeta,*tmpM1,*Utt,*S1t,*S1start,*tmpM2,*et,*gt,*qt; matrix *St[*Ntimes],*M1M2[*Ntimes],*C[*Ntimes],*ZXAIs[*Ntimes],*dYIt[*Ntimes]; matrix *dotwitowit[*antpers],*W3t[*antclust],*W4t[*antclust],*W2t[*antclust],*AIxit[*antpers],*Uti[*antclust],*d2G[*Ntimes],*Delta,*Delta2; vector *Ctt,*lht,*S1,*dS0,*S0t,*S0start,*dA,*VdA,*dN,*MdA,*delta,*zav,*dlamt,*plamt,*dG[*Ntimes], *S1star; vector *xav,*difxxav,*xi,*xipers,*zi,*U,*Upl,*beta,*xtilde; vector *Gbeta,*zcol,*one,*difzzav,*difZ; vector *offset,*weight,*ZXdA[*Ntimes],*varUthat[*Ntimes],*Uprofile; vector *ahatt,*risk,*tmpv1,*tmpv2,*rowX,*rowZ,*difX,*VdB; vector *W2[*antclust],*W3[*antclust],*reszpbeta,*res1dim,*dAt[*Ntimes]; vector *dLamt[*antpers]; int *pg=calloc(1,sizeof(int)),c,robust=1,pers=0,ci=0,i,j,k,l,s,t,it,count,*ipers=calloc(*Ntimes,sizeof(int)); int *cluster=calloc(*antpers,sizeof(int)); double RR,S0star,time,dummy,ll; double S0,tau,random,scale,sumscore; double norm_rand(); void GetRNGstate(),PutRNGstate(); pg[0]=1; for (j=0;j<*antpers;j++) { malloc_vec(*Ntimes,dLamt[j]); malloc_mat(*Ntimes,*px,dotwitowit[j]); malloc_mat(*Ntimes,*px,AIxit[j]); } for (j=0;j<*antclust;j++) { malloc_mat(*Ntimes,*pg,W3t[j]); malloc_mat(*Ntimes,*pg,W4t[j]); malloc_mat(*Ntimes,*px,W2t[j]); malloc_vec(*px,W2[j]); malloc_vec(*pg,W3[j]); malloc_mat(*Ntimes,*px,Uti[j]); } for(j=0;j<*Ntimes;j++) { malloc_mat(*px,*pg,dYIt[j]); malloc_vec(*px,dAt[j]); malloc_mat(*px,*pg,C[j]); malloc_mat(*pg,*px,M1M2[j]); malloc_mat(*pg,*px,ZXAIs[j]); malloc_vec(*pg,ZXdA[j]); malloc_mat(*px,*px,St[j]); malloc_mat(*px,*px,d2G[j]); malloc_vec(*px,dG[j]); malloc_vec(*px,varUthat[j]); } malloc_mat(*Ntimes,*pg,tmpM1); malloc_mat(*Ntimes,*px,S1t); malloc_mat(*Ntimes,*px,tmpM2); malloc_mat(*Ntimes,*px,S1start); malloc_mat(*Ntimes,*px,et); malloc_mat(*Ntimes,*px,gt); malloc_mat(*Ntimes,*px,qt); malloc_mat(*Ntimes,*px,Utt); malloc_mat(*Ntimes,*pg,Delta); malloc_mat(*Ntimes,*px,Delta2); malloc_mats(*antpers,*px,&WX,&ldesignX,NULL); malloc_mats(*antpers,*pg,&ZP,&ldesignG,NULL); malloc_mats(*px,*px,&COV,&A,&AI,&M1,&CtVUCt,NULL); malloc_mats(*px,*px,&d2S0,&RobVbeta,&tmp1,&tmp2,&dS1,&S2,&dS2,&S2pl,&dS2pl,&SI,&VU,&VUI,NULL); malloc_mats(*pg,*px,&ZXAI,&ZX,&dM1M2,&M1M2t,NULL); malloc_mats(*px,*pg,&tmp3,&ZPX,&dYI,&Ct,NULL); malloc_vec(*Ntimes,S0t); malloc_vec(*Ntimes,S0start); malloc_vec(*Ntimes,lht); malloc_vec(1,reszpbeta); malloc_vec(1,res1dim); malloc_vecs(*antpers,&risk,&weight,&plamt,&dlamt,&dN,&zcol,&Gbeta,&one,&offset,NULL); malloc_vecs(*px,&Ctt,&ahatt,&tmpv1,&difX,&rowX,&xi,&xipers,&dA,&VdA,&MdA,NULL); malloc_vecs(*px,&S1,&dS0,&S1star,&xtilde,&xav,&difxxav,NULL); malloc_vecs(*px,&U,&Upl,&beta,&delta,&difzzav,&Uprofile,NULL); malloc_vecs(*pg,&tmpv2,&rowZ,&zi,&difZ,&zav,&VdB,NULL); ll=0; for(j=0;j<*px;j++) VE(beta,j)=betaS[j]; // }}} int timing=0; clock_t c0,c1; c0=clock(); double plamtj,dlamtj; // mat_zeros(ldesignX); for (c=0;c<*nx;c++) for(j=0;j<*px;j++) ME(WX,id[c],j)=designX[j*(*nx)+c]; cu[0]=times[0]; for (it=0;it<*Nit;it++) { vec_zeros(U); vec_zeros(Upl); mat_zeros(S2pl); mat_zeros(S2); ll=0; sumscore=0; mat_zeros(COV); R_CheckUserInterrupt(); if (timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} // for (c=0;c<*nx;c++) for(j=0;j<*px;j++) ME(ldesignX,id[c],j)=designX[j*(*nx)+c]; Mv(WX,beta,Gbeta); for (s=1;s<*Ntimes;s++) {// {{{ time=times[s]; vec_zeros(dS0); mat_zeros(d2S0); mat_zeros(dS1); vec_zeros(S1star); S0star=0; S0=0; vec_zeros(S1); for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { // {{{ reading data and computing things if ((start[c]=time)) { for(j=0;j<*px;j++) VE(xi,j)=designX[j*(*nx)+c]; j=id[c]; if (time==stop[c] && status[c]==1) {pers=id[c]; scl_vec_mult(1,xi,xipers);} count=count+1; RR=exp(-VE(Gbeta,j)); scale=(RR+cu[1*(*Ntimes)+s-1]); dummy=1/scale; if (it==((*Nit)-1)) VE(plamt,j)=dummy; plamtj=dummy; dlamtj=dummy*dummy; // VE(dlamt,j)=dlamtj; S0star=S0star-dlamtj; S0=S0+plamtj; // S0p=S0p+VE(risk,j)/(RR+cu[1*(*Ntimes)+s]); // S0cox=S0cox+exp(VE(Gbeta,j)); // scl_vec_mult(-RR,xi,tmpv1); vec_add(tmpv1,dG[s-1],dA); scl_vec_mult(-plamtj,dA,dA); if (it==(*Nit-1)) { if (*profile==0) replace_row(dotwitowit[j],s,xi); else replace_row(dotwitowit[j],s,dA); } scl_vec_mult(plamtj,dA,tmpv1); vec_add(tmpv1,dS0,dS0); if (s<0 && j<5 ) {Rprintf(" %d %d \n",s,j); print_vec(tmpv1); } // 16-10-2014 -dlamtj if (*profile==0) scl_vec_mult(-dlamtj,xi,dA); else scl_vec_mult(-plamtj,dA,dA); vec_add(dA,S1star,S1star); for (i=0;i<*px;i++) for (k=0;k<*px;k++) { ME(dS1,i,k)=ME(dS1,i,k)+VE(xi,i)*VE(tmpv1,k); ME(tmp1,i,k)=(VE(xi,i)*VE(xi,k))*RR; } mat_add(tmp1,d2G[s-1],tmp1); scl_mat_mult(-dlamtj,tmp1,tmp1); for (i=0;i<*px;i++) for (k=0;k<*px;k++) ME(d2S0,i,k)=ME(d2S0,i,k)+ME(tmp1,i,k)+2*scale*(VE(tmpv1,i)*VE(tmpv1,k)); scl_vec_mult(plamtj,xi,xi); vec_add(S1,xi,S1); } else VE(plamt,id[c])=0; } // }}} ipers[s]=pers; if (s==1 && timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: loop Ntimes %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} replace_row(S1t,s,dS0); VE(S0start,s)=S0star; replace_row(S1start,s,S1star); if (it==((*Nit)-1)) { VE(S0t,s)=S0; VE(lht,s)=VE(lht,s-1)-S0star/(S0*S0); } /* Rprintf(" %ld %lf %lf \n",s,VE(lht,s),ME(AI,0,0)); */ scl_vec_mult(S0star,dS0,tmpv1); scl_vec_mult(S0,S1star,dA); vec_subtr(tmpv1,dA,dA); scl_vec_mult(1/S0,dA,dA); if (it==((*Nit)-1)) replace_row(gt,s,dA); scl_vec_mult(-1/(S0*S0),dS0,tmpv1); vec_add(dG[s-1],tmpv1,dG[s]); if (s<0) { Rprintf(" %lf \n",S0); print_vec(scl_vec_mult(1/S0,dS0,NULL)); print_vec(tmpv1); print_vec(dG[s]); } scl_mat_mult(-1/(S0*S0),d2S0,A); for (i=0;i<*px;i++) for (j=0;j<*px;j++) ME(A,i,j)=ME(A,i,j)+2*S0*VE(tmpv1,i)*VE(tmpv1,j); mat_add(d2G[s-1],A,d2G[s]); /* baseline is computed */ cu[1*(*Ntimes)+s]=cu[1*(*Ntimes)+s-1]+(1/S0); if (s<0) Rprintf(" %lf \n",cu[1*(*Ntimes)+s]); /* First derivative of U ======================================== */ // extract_row(ldesignX,pers,xi); scl_vec_mult(1,xipers,xi); scl_vec_mult(1/S0,S1,xav); vec_subtr(xi,xav,difxxav); vec_add(U,difxxav,U); if (it==((*Nit)-1)) if (*profile==0) replace_row(et,s,xav); /* profile version of score */ dummy=1/(exp(-VE(Gbeta,pers))+cu[1*(*Ntimes)+s-1]); scl_vec_mult(-exp(-VE(Gbeta,pers)),xi,tmpv1); vec_add(tmpv1,dG[s-1],tmpv1); scl_vec_mult(-dummy,tmpv1,tmpv1); scl_vec_mult(1/S0,dS0,dA); if (it==((*Nit)-1)) if (*profile==1) replace_row(et,s,dA); vec_subtr(tmpv1,dA,dA); vec_add(Upl,dA,Upl); /* Second derivative S =========================================== */ for (i=0;i<*px;i++) for (k=0;k<*px;k++) ME(dS2pl,i,k)=(VE(xi,i)*VE(xi,k))*exp(-VE(Gbeta,pers)); mat_add(dS2pl,d2G[s-1],dS2pl); scl_mat_mult(-dummy,dS2pl,dS2pl); for (i=0;i<*px;i++) for (k=0;k<*px;k++) ME(dS2pl,i,k)=ME(dS2pl,i,k)+(VE(tmpv1,i)*VE(tmpv1,k)); scl_mat_mult(-1/S0,d2S0,A); scl_vec_mult(1/S0,dS0,dA); for (i=0;i<*px;i++) for (k=0;k<*px;k++) ME(A,i,k)=ME(A,i,k)+(VE(dA,i)*VE(dA,k)); mat_add(A,dS2pl,dS2pl); mat_add(dS2pl,S2pl,S2pl); if (*profile==1) St[s]=mat_copy(S2pl,St[s]); /* simple Second derivative S2 ================================== */ for (i=0;i<*px;i++) for (k=0;k<*px;k++) ME(dS2,i,k)=VE(dS0,i)*VE(S1,k); scl_mat_mult(S0,dS1,M1); /* */ if (s<0) { Rprintf("======================== %lf \n",S0); print_mat(scl_mat_mult(1/(S0*S0),M1,NULL)); print_mat(scl_mat_mult(1/(S0*S0),dS2,NULL)); } mat_subtr(M1,dS2,M1); if (*sym==1) { scl_mat_mult(-1/(S0*S0),M1,M1); mat_transp(M1,dS2); mat_add(M1,dS2,dS2); scl_mat_mult(0.5,dS2,dS2); } else scl_mat_mult(-1/(S0*S0),M1,dS2); if (s<0) print_mat(dS2); mat_add(dS2,S2,S2); if (*profile==0) St[s]=mat_copy(S2,St[s]); /* ============================================ */ /* log-likelihood contributions */ ll=ll+log(dummy)-log(S0); /* scl_mat_mult(1/S0,dS1,dS1); */ if (it==((*Nit)-1)) { // {{{ Ut[s]=time; if (*profile==1) {for (i=1;i<*px+1;i++) Ut[i*(*Ntimes)+s]=VE(Upl,i-1);} else {for (i=1;i<*px+1;i++) Ut[i*(*Ntimes)+s]=VE(U,i-1); } for (i=1;i<*px+1;i++) ME(Utt,s,i-1)=Ut[i*(*Ntimes)+s]; for (j=0;j<*antpers;j++) VE(dLamt[j],s)=VE(plamt,j)/S0; /* // {{{ for (i=0;i<*px;i++) for (j=0;j<*pg;j++) ME(dM1M2,j,i)=VE(dA,i)*VE(difzzav,j); for (i=0;i<*pg;i++) for (j=0;j<*pg;j++) ME(VU,i,j)=ME(VU,i,j)+VE(difzzav,i)*VE(difzzav,j); MxA(AI,ZPX,dYIt[s]); mat_subtr(Ct,dYIt[s],Ct); C[s]=mat_copy(Ct,C[s]); vec_star(dA,dA,VdA); mat_add(dM1M2,M1M2t,M1M2t); M1M2[s]=mat_copy(M1M2t,M1M2[s]); for (k=1;k<=*px;k++) { cu[k*(*Ntimes)+s]=VE(dA,k-1); vcu[k*(*Ntimes)+s]=VE(VdA,k-1)+vcu[k*(*Ntimes)+s-1]; } */ // }}} } // }}} } // }}} /* s= .... Ntimes */ if (timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: loop Ntimes %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} if (*profile==1) scl_mat_mult(-1,S2pl,A); else scl_mat_mult(-1,S2,A); invertS(A,SI,1); if (*profile==1) Mv(SI,Upl,delta); else Mv(SI,U,delta); vec_add(beta,delta,beta); if (*detail==1) { // {{{ Rprintf("====================Iteration %d ==================== \n",it); Rprintf("log-likelihood "); Rprintf(" %lf \n",ll); Rprintf("Estimate beta "); print_vec(beta); if (*profile==1) { Rprintf("modified partial likelihood Score D l"); print_vec(Upl); } if (*profile==0) {Rprintf("simple Score D l"); print_vec(U); } Rprintf("Information -D^2 l\n"); print_mat(SI); if (*profile==1) {Rprintf("simple D2 l"); print_mat(S2pl); } if (*profile==0) {Rprintf("simple D2 l"); print_mat(S2); } }; // }}} for (k=0;k<*px;k++) sumscore= sumscore+ (*profile==1)*fabs(VE(Upl,k))+(*profile==0)*fabs(VE(U,k)); if ((fabs(sumscore)<0.000001) & (it<*Nit-2)) it=*Nit-2; } /* it */ loglike[0]=ll; if (timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: out of loop %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); for (s=1;s<*Ntimes;s++) { // {{{ /* computation of q(t) ===================================== */ vec_zeros(xi); for (t=s;t<*Ntimes;t++) { extract_row(gt,t,dA); scl_vec_mult(exp(VE(lht,t))/VE(S0t,t),dA,dA); // if (s<0) {Rprintf("exp %d %d %lf \n",s,t,exp(-VE(lht,t)+VE(lht,s))); print_vec(dA); } vec_add(dA,xi,xi); } scl_vec_mult(exp(-VE(lht,s))/VE(S0t,s),xi,xi); replace_row(qt,s,xi); } // }}} if (timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: q(t) %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); for (c=0;c<*antpers;c++) cluster[id[c]]=clusters[c]; if (robust==1) { // {{{ terms for robust variances ============================ for (s=1;s<*Ntimes;s++) { time=times[s]; cu[s]=times[s]; vcu[s]=times[s]; Rvcu[s]=times[s]; Ut[s]=times[s]; extract_row(qt,s,tmpv1); extract_row(et,s,xtilde); for (i=0;i<*antpers;i++) { ci=cluster[i]; extract_row(dotwitowit[i],s,rowX); vec_subtr(rowX,xtilde,rowX); if (s==0) { print_vec(rowX); print_vec(tmpv1); } vec_add(rowX,tmpv1,rowX); if (i==ipers[s]) for (j=0;j<*px;j++) for (k=0;k<*px;k++) ME(VU,j,k)=ME(VU,j,k)+VE(rowX,j)*VE(rowX,k); scl_vec_mult(VE(dLamt[i],s),rowX,xi); vec_subtr(W2[ci],xi,W2[ci]); if (i==ipers[s]) vec_add(rowX,W2[ci],W2[ci]); if (*ratesim==1) {scl_vec_mult(VE(dLamt[i],s),tmpv2,rowZ); vec_subtr(W2[ci],rowZ,W2[ci]);} replace_row(W2t[ci],s,W2[ci]); VE(rowZ,0)=exp(-VE(lht,s))/VE(S0t,s); scl_vec_mult(VE(dLamt[i],s),rowZ,zi); vec_subtr(W3[ci],zi,W3[ci]); if (i==ipers[s]) vec_add(rowZ,W3[ci],W3[ci]); if (*ratesim==1) {scl_vec_mult(VE(dLamt[i],s),rowX,rowX); vec_subtr(W3[ci],rowX,W3[ci]); } replace_row(W3t[ci],s,W3[ci]); if (*retur==1) dhatMit[i*(*Ntimes)+s]=1*(i==pers)-VE(dLamt[i],s); } /* i=1..antpers */ /* Martingale baseret variance */ /* MxA(C[s],VU,tmp3); MAt(tmp3,C[s],CtVUCt); MxA(C[s],SI,tmp3); MxA(tmp3,M1M2[s],COV); for (k=1;k<=*px;k++) { cu[k*(*Ntimes)+s]=cu[k*(*Ntimes)+s-1]+cu[k*(*Ntimes)+s]; vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s]+ME(CtVUCt,k-1,k-1) +2*ME(COV,k-1,k-1); } */ } /* s=1 ..Ntimes */ R_CheckUserInterrupt(); /* ROBUST VARIANCES Estimation */ for (s=1;s<*Ntimes;s++) { vec_zeros(VdB); extract_row(S1t,s,rowX); scl_vec_mult(-1.0/(VE(S0t,s)*VE(S0t,s)),rowX,xi); vec_add(xi,Ctt,Ctt); replace_col(Ct,0,Ctt); if (s<0) print_vec(Ctt); for (i=0;i<*antclust;i++) { Mv(SI,W2[i],tmpv1); vM(Ct,tmpv1,rowZ); extract_row(W3t[i],s,zi); VE(zi,0)= exp(VE(lht,s))*VE(zi,0); vec_add(rowZ,zi,zi); if (i==-5) print_vec(zi); replace_row(W4t[i],s,zi); biid[i*(*Ntimes)+s]=VE(zi,0); vec_star(zi,zi,rowZ); vec_add(rowZ,VdB,VdB); extract_row(W2t[i],s,xi); Mv(St[s],tmpv1,rowX); vec_add(xi,rowX,tmpv1); replace_row(Uti[i],s,tmpv1); vec_star(tmpv1,tmpv1,xi); vec_add(xi,varUthat[s],varUthat[s]); if (s==1) { for (j=0;j<*px;j++) { gamiid[j*(*antclust)+i]=VE(W2[i],j); for (k=0;k<*px;k++) ME(RobVbeta,j,k)=ME(RobVbeta,j,k)+VE(W2[i],j)*VE(W2[i],k); } } if (*retur==1 && j==0) { // {{{ for (j=0;j<*antpers;j++) { extract_row(WX,j,xi); // extract_row(ldesignG,j,zi); dummy=exp(VE(Gbeta,j)); // *VE(weight,j)*VE(offset,j); scl_vec_mult(dummy,xi,xtilde); replace_row(ldesignX,j,xtilde); } Mv(ldesignX,dAt[s],dlamt); for (j=0;j<*antpers;j++) {extract_row(ldesignG,j,zi); scl_vec_mult(VE(dlamt,j),zi,zi); replace_row(ZP,j,zi);} Mv(ZP,W2[ci],reszpbeta); Mv(dYIt[s],W2[ci],xi); Mv(ldesignX,xi,res1dim); dhatMitiid[i*(*Ntimes)+s]=dhatMit[i*(*Ntimes)+s]-(VE(reszpbeta,0)- VE(res1dim,0)); } // }}} /* retur ==1 */ } /* i =1 ..Antpers */ for (k=1;k<*pg+1;k++) Rvcu[k*(*Ntimes)+s]=VE(VdB,k-1); for (k=1;k<*pg+1;k++) vcu[k*(*Ntimes)+s]=VE(VdB,k-1); } /* s=1 ..Ntimes */ MxA(RobVbeta,SI,tmp1); MxA(SI,tmp1,RobVbeta); } // }}} /* Robust =1 , default */ // for (i=0;i<*antpers;i++) print_vec(W2[i]); // for (i=0;i<1;i++) print_vec(dLamt[i]); // print_vec(dLamt[0]); if (timing==1) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} MxA(VU,SI,tmp1); MxA(SI,tmp1,VU); for(j=0;j<*px;j++) { betaS[j]= VE(beta,j); if (*profile==1) score[j]=VE(Upl,j); else score[j]=VE(U,j); for (k=0;k<*px;k++){ Iinv[k*(*px)+j]=ME(SI,j,k); Vbeta[k*(*px)+j]=-ME(VU,j,k); RVbeta[k*(*px)+j]=-ME(RobVbeta,j,k); } } R_CheckUserInterrupt(); if (*sim==1) { // {{{ // Rprintf("Simulations start N= %d \n",*antsim); GetRNGstate(); /* to use R random normals */ tau=times[*Ntimes-1]-times[0]; for (i=1;i<=*pg;i++) VE(rowZ,i-1)=cu[i*(*Ntimes)+(*Ntimes-1)]; /* Beregning af OBS teststrrelser */ for (s=1;s<*Ntimes;s++) { time=times[s]-times[0]; for (i=1;i<=*pg;i++) { VE(zi,i-1)=fabs(cu[i*(*Ntimes)+s])/sqrt(Rvcu[i*(*Ntimes)+s]); if (VE(zi,i-1)>testOBS[i-1]) testOBS[i-1]=VE(zi,i-1); } scl_vec_mult(time/tau,rowZ,difZ); for (i=1;i<=*pg;i++) VE(zi,i-1)=cu[i*(*Ntimes)+s]; vec_subtr(zi,difZ,difZ); for (i=0;i<*pg;i++) { VE(difZ,i)=fabs(VE(difZ,i)); l=(*pg+i); if (VE(difZ,i)>testOBS[l]) testOBS[l]=VE(difZ,i);} if (*weighted>=1) { /* sup beregnes i R */ if ((s>*weighted) && (s<*Ntimes-*weighted)) {extract_row(Utt,s,rowX); for (i=0;i<*px;i++) VE(rowX,i)=VE(rowX,i)/sqrt(VE(varUthat[s],i)); replace_row(Utt,s,rowX); /* scaled score process */ } else {vec_zeros(rowX); replace_row(Utt,s,rowX);} } for (k=1;k<=*px;k++) Ut[k*(*Ntimes)+s]=ME(Utt,s,k-1); } /*s=1..Ntimes Beregning af obs teststrrelser */ for (k=1;k<=*antsim;k++) { mat_zeros(Delta); mat_zeros(Delta2); for (i=0;i<*antclust;i++) { /* random=gasdev(&idum); */ random=norm_rand(); scl_mat_mult(random,W4t[i],tmpM1); mat_add(tmpM1,Delta,Delta); scl_mat_mult(random,Uti[i],tmpM2); mat_add(tmpM2,Delta2,Delta2); } extract_row(Delta,*Ntimes-1,zav); for (s=1;s<*Ntimes;s++) { time=times[s]-times[0]; scl_vec_mult(time/tau,zav,zi); extract_row(Delta,s,rowZ); vec_subtr(rowZ,zi,difZ); for (i=0;i<*pg;i++) { VE(difZ,i)=fabs(VE(difZ,i)); l=(*pg+i); if (VE(difZ,i)>test[l*(*antsim)+k-1]) test[l*(*antsim)+k-1]=VE(difZ,i); VE(zi,i)=fabs(ME(Delta,s,i))/sqrt(Rvcu[(i+1)*(*Ntimes)+s]); if (VE(zi,i)>test[i*(*antsim)+k-1]) test[i*(*antsim)+k-1]=VE(zi,i); } if (*weighted>=1) { extract_row(Delta2,s,xi); if ((s>*weighted) && (s<*Ntimes-*weighted)) { for (i=0;i<*px;i++) {VE(xi,i)=fabs(ME(Delta2,s,i))/sqrt(VE(varUthat[s],i)); if (VE(xi,i)>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=VE(xi,i); } if (k<50) { for (i=0;i<*px;i++) { l=(k-1)*(*px)+i; Uit[l*(*Ntimes)+s]=ME(Delta2,s,i)/sqrt(VE(varUthat[s],i));}} } } /* weigted score */ else { extract_row(Delta2,s,xi); for (i=0;i<*px;i++) { if (fabs(VE(xi,i))>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=fabs(VE(xi,i)); } if (k<50) { for (i=0;i<*px;i++) { l=(k-1)*(*px)+i; Uit[l*(*Ntimes)+s]=ME(Delta2,s,i);}} } /* else wscore=0 */ } /* s=1..Ntims */ } /* k=1..antsim */ PutRNGstate(); /* to use R random normals */ } // }}} sim==1 // {{{ freeing free_mats(&tmpM1,&S1t,&tmpM2,&S1start,&et,>,&qt,&Utt,&Delta,&Delta2,&ldesignX,&ZP,&WX,&ldesignG,&COV,&A,&AI,&M1,&CtVUCt ,&d2S0,&RobVbeta,&tmp1,&tmp2,&dS1,&S2,&dS2,&S2pl,&dS2pl,&SI,&VU,&VUI, &ZXAI,&ZX,&dM1M2,&M1M2t ,&tmp3,&ZPX,&dYI,&Ct,NULL); free_vecs(&S0t ,&S0start,&lht ,&reszpbeta,&res1dim, &risk,&weight,&plamt,&dlamt,&dN,&zcol,&Gbeta,&one,&offset ,&Ctt,&ahatt,&tmpv1,&difX,&rowX,&xi,&xipers,&dA,&VdA,&MdA,&S1,&dS0,&S1star,&xtilde,&xav,&difxxav ,&U,&Upl,&beta,&delta,&difzzav,&Uprofile, &tmpv2,&rowZ,&zi,&difZ,&zav,&VdB,NULL); for (j=0;j<*antpers;j++) { free_vec(dLamt[j]); free_mat(dotwitowit[j]); free_mat(AIxit[j]); } for (j=0;j<*antclust;j++) { free_mat(W3t[j]); free_mat(W4t[j]); free_mat(W2t[j]);free_vec(W2[j]); free_vec(W3[j]); free_mat(Uti[j]); } for (j=0;j<*Ntimes;j++) { free_mat(dYIt[j]); free_vec(dAt[j]); free_mat(C[j]);free_mat(M1M2[j]);free_mat(ZXAIs[j]); free_vec(ZXdA[j]); free_mat(St[j]); free_mat(d2G[j]); free_vec(dG[j]); free_vec(varUthat[j]); } free(ipers); free(pg); free(cluster); // }}} } timereg/src/timeregister.c0000644000175000017500000002464314077524411015514 0ustar nileshnilesh#include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .C calls */ extern void aalen(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void addmult(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *) ; extern void atriskindex(void *, void *, void *, void *, void *, void *, void *, void *); extern void clusterindex(void *, void *, void *, void *, void *, void *, void *, void *); extern void compSs(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void compSsforward(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void compSsrev(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void confBandBasePredict(void *, void *, void *, void *, void *, void *, void *); extern void dynadd(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void Gtranssurv(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void itfit(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void localTimeReg(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void mgresid(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void nclusters(void *, void *, void *, void *, void *); extern void OSbreslow(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void OSsemicox(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void OStimecox(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void * , void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pava(void *, void *, void *); extern void pes(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void posubdist2(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void resmean(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void robaalen(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void robaalenC(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void score(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void semiaalen(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void semibreslow(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void semidynadd(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void sindex(void *, void *, void *, void *, void *, void *); extern void smooth2B(void *, void *, void *, void *, void *, void *, void *, void *); extern void smoothB(void *, void *, void *, void *, void *, void *, void *, void *); extern void transsurv(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void twostagereg(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); static const R_CMethodDef CEntries[] = { {"aalen", (DL_FUNC) &aalen, 11}, {"addmult", (DL_FUNC) &addmult, 29}, {"atriskindex", (DL_FUNC) &atriskindex, 8}, {"clusterindex", (DL_FUNC) &clusterindex, 8}, {"compSs", (DL_FUNC) &compSs, 18}, {"compSsforward", (DL_FUNC) &compSsforward, 18}, {"compSsrev", (DL_FUNC) &compSsrev, 18}, {"confBandBasePredict", (DL_FUNC) &confBandBasePredict, 7}, {"dynadd", (DL_FUNC) &dynadd, 42}, {"Gtranssurv", (DL_FUNC) &Gtranssurv, 40}, {"itfit", (DL_FUNC) &itfit, 55}, {"localTimeReg", (DL_FUNC) &localTimeReg, 10}, {"mgresid", (DL_FUNC) &mgresid, 58}, {"nclusters", (DL_FUNC) &nclusters, 5}, {"OSbreslow", (DL_FUNC) &OSbreslow, 31}, {"OSsemicox", (DL_FUNC) &OSsemicox, 37}, {"OStimecox", (DL_FUNC) &OStimecox, 32}, {"pava", (DL_FUNC) &pava, 3}, {"pes", (DL_FUNC) &pes, 29}, {"posubdist2", (DL_FUNC) &posubdist2, 48}, {"resmean", (DL_FUNC) &resmean, 50}, {"robaalen", (DL_FUNC) &robaalen, 37}, {"robaalenC", (DL_FUNC) &robaalenC, 32}, {"score", (DL_FUNC) &score, 65}, {"semiaalen", (DL_FUNC) &semiaalen, 52}, {"semibreslow", (DL_FUNC) &semibreslow, 32}, {"semidynadd", (DL_FUNC) &semidynadd, 55}, {"sindex", (DL_FUNC) &sindex, 6}, {"smooth2B", (DL_FUNC) &smooth2B, 8}, {"smoothB", (DL_FUNC) &smoothB, 8}, {"transsurv", (DL_FUNC) &transsurv, 41}, {"twostagereg", (DL_FUNC) &twostagereg, 45}, {NULL, NULL, 0} }; void R_init_timereg(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } timereg/src/cox-aalen-stratum.c0000644000175000017500000007251314077524412016355 0ustar nileshnilesh//#include #include #include "matrix.h" #include #include void scorestratum(times,Ntimes,designX,nx,px,designG,pg,antpers,start,stop, betaS,Nit,cu,vcu,w,mw,loglike,Iinv,Vbeta,detail,offs,mof,sim,antsim, rani,Rvcu,RVbeta, test,testOBS,Ut,simUt,Uit,XligZ,aalen,nb,id,status,wscore,ridge,ratesim, score,dhatMit,gammaiid,dmgiid, retur,robust,covariance,Vcovs,addresamp,addproc, resample,gamiid,biid,clusters,antclust,vscore,betafixed,weights,entry,exactderiv, timegroup,maxtimepoint,stratum,silent,strata) double *designX,*designG,*times,*betaS,*start,*stop,*cu,*w,*loglike,*Vbeta,*RVbeta,*vcu,*offs,*Rvcu,*Iinv,*test,*testOBS,*Ut,*simUt,*Uit,*aalen,*ridge,*score,*dhatMit,*gammaiid,*dmgiid,*Vcovs,*addproc,*gamiid,*biid,*vscore,*weights; int*covariance,*nx,*px,*pg,*antpers,*Ntimes,*mw,*Nit,*detail,*mof,*sim,*antsim,*rani,*XligZ,*nb,*id,*status,*wscore,*ratesim,*retur,*robust,*addresamp,*resample,*clusters,*antclust,*betafixed,*entry,*exactderiv,*timegroup,*maxtimepoint,*stratum,*silent,*strata; { int timing=0; clock_t c0,c1; c0=clock(); // {{{ setting up memory matrix *X,*Z,*WX,*WZ,*cdesX,*cdesX2,*cdesX3,*CtVUCt,*A,*AI; matrix *Vcov,*dYI,*Ct,*dM1M2,*M1M2t,*COV,*ZX,*ZP,*ZPX; matrix *tmp2,*tmp3,*dS,*S1,*SI,*S2,*M1,*VU,*ZXAI,*VUI; matrix *RobVbeta,*Delta,*tmpM1,*Utt,*Delta2,*tmpM2; // matrix *St[*maxtimepoint],*M1M2[*Ntimes],*C[*maxtimepoint],*ZXAIs[*Ntimes],*dYIt[*Ntimes]; // matrix *St[*Ntimes], // matrix *M1M2[*Ntimes],*C[*Ntimes],*ZXAIs[*Ntimes],*AIs[*Ntimes]; matrix *Stg[*maxtimepoint],*Cg[*maxtimepoint]; matrix *W3t[*antclust],*W4t[*antclust],*W2t[*antclust],*Uti[*antclust]; matrix *ZPX1,*ZPZ1,*ZPXo,*ZPZo; matrix *W2var[*antclust]; matrix *ZPZ[(*px)*(*stratum)+1]; vector *dA,*VdA,*MdA,*delta,*zav,*lamt,*lamtt; vector *xi,*zi,*U,*beta,*xtilde,*Gbeta,*zcol,*one,*difzzav; vector *offset,*weight,*varUthat[*maxtimepoint],*Uprofile; // vector *ZXdA[*Ntimes]; vector *ta,*ahatt,*vrisk,*tmpv1,*tmpv2,*rowX,*rowZ,*difX,*VdB; vector *W2[*antclust],*W3[*antclust],*reszpbeta,*res1dim; matrix *dAt; vector *Ui[*antclust]; int stratas=0,cin=0,ci=0,c,pers=0,i=0,j,k,l,s,s1,it,count,pmax, *imin=calloc(1,sizeof(int)), *cluster=calloc(*antpers,sizeof(int)), *ipers=calloc(*Ntimes,sizeof(int)); double S0stratum,S0,RR=1,time=0,ll,lle,llo; double tau,hati,random,scale,sumscore; double *cug=calloc((*maxtimepoint)*(*px+1),sizeof(double)), *timesg=calloc((*maxtimepoint),sizeof(double)); double norm_rand(); void GetRNGstate(),PutRNGstate(); /* float gasdev(),expdev(),ran1(); */ GetRNGstate(); /* to use R random normals */ if (*robust==1) { for (j=0;j<*antclust;j++) { if (*sim>=2) { malloc_mat(*maxtimepoint,*px,W4t[j]); malloc_mat(*maxtimepoint,*px,W3t[j]); } malloc_mat(*maxtimepoint,*pg,W2t[j]); malloc_mat(*maxtimepoint,*pg,Uti[j]); malloc_mat(*pg,*pg,W2var[j]); malloc_vec(*pg,Ui[j]); if (*sim>=2) malloc_vec(*px,W3[j]); } for(j=0;j<*maxtimepoint;j++) malloc_vec(*pg,varUthat[j]); } for (j=0;j<*antclust;j++) malloc_vec(*pg,W2[j]); for (c=0;c<*nx;c++) cluster[id[c]]=clusters[c]; if (*sim>=1) { if (*sim>=2) { malloc_mat(*maxtimepoint,*px,Delta); malloc_mat(*maxtimepoint,*px,tmpM1); } malloc_mat(*maxtimepoint,*pg,Delta2); malloc_mat(*maxtimepoint,*pg,tmpM2); } malloc_mat(*maxtimepoint,*pg,Utt); malloc_mats(*antpers,*px,&WX,&X,&cdesX,&cdesX2,&cdesX3,NULL); malloc_mats(*antpers,*pg,&WZ,&ZP,&Z,NULL); malloc_mats(*px,*px,&Vcov,&COV,&A,&AI,&M1,&CtVUCt,NULL); malloc_mats(*pg,*pg,&RobVbeta,&tmp2,&dS,&S1,&S2,&SI,&VU,&VUI,NULL); for (c=0;c<(*px)*(*stratum)+1;c++) malloc_mat(*pg,*pg,ZPZ[c]); malloc_mats(*pg,*px,&ZXAI,&ZX,&dM1M2,&M1M2t,NULL); malloc_mats(*px,*pg,&tmp3,&ZPX,&dYI,&Ct,NULL); malloc_mats(*px,*pg,&ZPX1,NULL); malloc_mats(*pg,*pg,&ZPZ1,NULL); malloc_mats(*px,*pg,&ZPXo,NULL); malloc_mats(*pg,*pg,&ZPZo,NULL); malloc_mat(*Ntimes,*px,dAt); malloc_vec(1,reszpbeta); malloc_vec(1,res1dim); malloc_vecs(*antpers,&weight,&lamtt,&lamt,&zcol,&Gbeta,&one,&offset,NULL); malloc_vecs(*px,&ahatt,&tmpv1,&difX,&VdB,&rowX,&xi,&dA,&VdA,&MdA,NULL); malloc_vecs(*px,&xtilde,NULL); malloc_vecs(*pg,&tmpv2,&rowZ,&zi,&U,&beta,&delta,&zav,&difzzav,&Uprofile,NULL); malloc_vec(*nb,ta); malloc_vec(*antpers,vrisk); for(j=0;j<*maxtimepoint;j++) { malloc_mat(*px,*pg,Cg[j]); malloc_mat(*pg,*pg,Stg[j]);} matrix *Cn,*M1M2n,*ZXAIn,*AIn; malloc_mat((*px)*(*Ntimes),*pg,Cn); malloc_mat((*px)*(*Ntimes),*px,AIn); malloc_mat(*pg,(*px)*(*Ntimes),M1M2n); malloc_mat(*pg,(*px)*(*Ntimes),ZXAIn); // for(j=0;j<*Ntimes;j++) { // malloc_mat(*px,*pg,C[j]); // malloc_mat(*pg,*px,M1M2[j]); // malloc_mat(*pg,*px,ZXAIs[j]); //// malloc_vec(*px,dAt[j]); malloc_mat(*px,*pg,dYIt[j]); //// malloc_vec(*pg,ZXdA[j]); malloc_mat(*pg,*pg,St[j]); // } pmax=max(*px,*pg); ll=0; for(j=0;j<*pg;j++) VE(beta,j)=betaS[j]; for(j=0;j<*antpers;j++) {VE(weight,j)=1; VE(offset,j)=1;} // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: setting up allocation %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); cu[0]=times[0]; for (it=0;it<*Nit || (*Nit==0 && it==0);it++) // {{{ iterations start for cox-aalen model { if (it>0) { vec_zeros(U); mat_zeros(S1); mat_zeros(A); if (*stratum==1) for(j=0;j<*px;j++) mat_zeros(ZPZ[j]); else mat_zeros(ZPZ[0]); mat_zeros(ZPX); mat_zeros(ZX); mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); } sumscore=0; S0=0; ci=0; R_CheckUserInterrupt(); for (s=1;s<*Ntimes;s++) // {{{ going through time { time=times[s]; // vec_zeros(lamt); // {{{ reading design and computing matrix products if (s==1) { // {{{ for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if (( (start[c]=time)) ) { for(j=0;j=0) ) ci=ci-1; } // }}} vec_zeros(rowX); vec_zeros(rowZ); if (s>1) // {{{ modifying design for next time points while ((stop[ci]=0) ) { VE(Gbeta,id[ci])=0; // vec_prod(zi,beta); for(j=0;j<*px;j++) VE(xi,j)=designX[j*(*nx)+ci]; for(j=0;j<*pg;j++) { VE(zi,j)=designG[j*(*nx)+ci]; VE(Gbeta,id[ci])+=VE(zi,j)*VE(beta,j); } RR=exp(VE(Gbeta,id[ci])); if (entry[ci]==1) { replace_row(X,id[ci],xi); replace_row(Z,id[ci],zi); scl_vec_mult(RR*weights[ci],xi,tmpv1);replace_row(WX,id[ci],tmpv1); scl_vec_mult(weights[ci],zi,tmpv2);replace_row(WZ,id[ci],tmpv2); VE(weight,id[ci])=weights[ci]; if (*mof==1) VE(offset,id[ci])=offs[ci]; } else { replace_row(X,id[ci],rowX);replace_row(Z,id[ci],rowZ); replace_row(WX,id[ci],rowX); replace_row(WZ,id[ci],rowZ); VE(Gbeta,id[ci])=0; VE(weight,id[ci])=0; if (*mof==1) VE(offset,id[ci])=offs[ci]; } S0+=entry[ci]*RR*weights[ci]; for(j=0;j=3) { Rprintf("___________ s=%d jump.time=%lf jump.person=%d \n",s,time,pers); // Rprintf("Z matrix\n"); // print_mat(Z); // Rprintf("X matrix (at risk)\n"); // print_mat(X); Rprintf("ZPZ matrix, pers %d %d (at risk)\n",pers,stratas); print_mat(ZPZ[stratas]); if (s>0) { print_mat(A); print_mat(ZX); print_mat(A); } } // }}} if (*stratum==0) invertS(A,AI,*silent); if (ME(AI,0,0)==0 && *stratum==0 && *silent==0) { Rprintf("additive design X'X not invertible at time (number, value): %d %lf \n",s,time); print_mat(A); } if (ME(AI,0,0)==0 && *stratum==0 && *silent==2) { Rprintf("additive design X'X not invertible at time (number, value) : %d %lf \n",s,time); print_mat(A); Rprintf("print only first time with non-invertible design X'X\n"); silent[0]=0; } if (*stratum==1) { // for (k=0;k<*px;k++) // if (fabs(ME(A,k,k))<0.000001) ME(AI,k,k)=0; else ME(AI,k,k)=1/ME(A,k,k); for (k=0;k<*px;k++) ME(AI,k,k)=0; // mat_zeros(AI); if (*px>1) { k=stratas; ME(AI,k,k) = 1/ME(A,k,k); } else ME(AI,0,0) = 1/ME(A,0,0); } scale=VE(weight,pers); if ((*stratum==1) && (*px>1) ) { S0stratum=ME(AI,stratas,stratas); scl_mat_mult(scale*S0stratum,ZPZ[stratas],ZPZo); scl_mat_mult(scale*S0stratum,ZPX,ZPXo); // printf(" %d %lf \n",strata[pers],S0stratum); // print_mat(AI); // print_mat(ZPX); // print_mat(ZPZ[strata[pers]]); // printf(" %lf %lf \n",1/S0,S0stratum); } else { scl_mat_mult(scale/S0,ZPZ[0],ZPZo); scl_mat_mult(scale/S0,ZPX,ZPXo); } extract_row(X,pers,xi); scl_vec_mult(scale,xi,xi); Mv(AI,xi,dA); MxA(ZX,AI,ZXAI); if (it==(*Nit-1)) { replace_row(dAt,s,dA); for (j=0;j<*pg;j++) for (i=0;i<*px;i++) ME(ZXAIn,j,(s-1)*(*px)+i)=ME(ZXAI,j,i); } if (s<0) { Rprintf(" test mester %d %lf %d %d \n",s,time,pers,stratas); print_vec(xi); print_mat(AI); print_vec(dA); } /* First derivative U and Second derivative S */ extract_row(Z,pers,zi); // scl_vec_mult(scale,zi,zi); Mv(ZX, dA, zav); vec_subtr(zi,zav,difzzav); scl_vec_mult(scale,difzzav,difzzav); vec_add(difzzav,U,U); if (*betafixed==0) { MxA(ZXAI,ZPXo,tmp2); mat_subtr(ZPZo,tmp2, dS); mat_add(dS,S1,S1); scl_mat_mult(1,S1,Stg[timegroup[s]]); } if (s<0) { // {{{ Rprintf(" %d %d %lf %lf \n",pers,s,time,scale); print_vec(xi); print_vec(dA); print_vec(zi); print_vec(zav); print_vec(difzzav); print_vec(U); print_mat(A); print_mat(AI); } // }}} if (*betafixed==0 && *stratum==0) // {{{ computes second derivative for general cox-aalen model if ( (((*exactderiv==1) && (it==(*Nit-1)) && (*px>1))) || ((*exactderiv==2) && (*px>1)) ) { mat_zeros(ZPZ1); mat_zeros(ZPX1); for (i=0;i<*antpers;i++) { extract_row(WX,i,xi); VE(lamt,i)=vec_prod(xi,dA); extract_row(Z,i,zi); scl_vec_mult(VE(lamt,i),zi,rowZ); replace_row(ZP,i,rowZ); extract_row(X,i,xi); for(j=0;j=1) { Rprintf("=============Iteration %d =============== \n",it); Rprintf("Estimate beta \n"); print_vec(beta); Rprintf("delta beta \n"); print_vec(delta); Rprintf("Score D l\n"); print_vec(U); Rprintf("Information -D^2 l\n"); print_mat(SI); }; if (*betafixed==0 && (*Nit>0)) vec_add(beta,delta,beta); for (k=0;k<*pg;k++) sumscore=sumscore+fabs(VE(U,k)); if ((sumscore<0.0000001) & (it<(*Nit)-2)) { it=*Nit-2; } } /* it */ // }}} //scl_mat_mult( (double) 1/(*antclust),SI,SI); if (*detail>=2) Rprintf("Fitting done \n"); if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: fitting done %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); // vec_zeros(Gbeta); lle=0; llo=0; ci=0; for (k=0;k<*pg;k++) score[k]=VE(U,k); mat_zeros(A); mat_zeros(ZX); mat_zeros(X); mat_zeros(Z); mat_zeros(WX); mat_zeros(WZ); for (s=1;s<*Ntimes;s++) { // {{{ terms for robust variances time=times[s]; cu[s]=times[s]; vcu[s]=times[s]; Rvcu[timegroup[s]]=times[s]; cug[timegroup[s]]=times[s]; timesg[timegroup[s]]=times[s]; Ut[timegroup[s]]=times[s]; R_CheckUserInterrupt(); if (*robust==1) { sumscore=0; S0=0; // {{{ reading design and computing matrix products if (s==1) { // {{{ for (c=0,count=0;((c<*nx) && (count!=*antpers));c++) { if (( (start[c]=time)) ) { for(j=0;j=0) ) ci=ci-1; } // }}} vec_zeros(rowX); vec_zeros(rowZ); if (s>1) // {{{ modifying design for next time points while ((stop[ci]=0) ) { VE(Gbeta,id[ci])=0; // vec_prod(zi,beta); for(j=0;j<*px;j++) VE(xi,j)=designX[j*(*nx)+ci]; for(j=0;j<*pg;j++) { VE(zi,j)=designG[j*(*nx)+ci]; VE(Gbeta,id[ci])+=VE(zi,j)*VE(beta,j); } RR=exp(VE(Gbeta,id[ci])); if (entry[ci]==1) { replace_row(X,id[ci],xi); replace_row(Z,id[ci],zi); scl_vec_mult(RR*weights[ci],xi,tmpv1);replace_row(WX,id[ci],tmpv1); scl_vec_mult(weights[ci],zi,tmpv2);replace_row(WZ,id[ci],tmpv2); VE(weight,id[ci])=weights[ci]; if (*mof==1) VE(offset,id[ci])=offs[ci]; } else { replace_row(X,id[ci],rowX);replace_row(Z,id[ci],rowZ); replace_row(WX,id[ci],rowX); replace_row(WZ,id[ci],rowZ); VE(Gbeta,id[ci])=0; VE(weight,id[ci])=0; if (*mof==1) VE(offset,id[ci])=offs[ci]; } S0+=entry[ci]*RR*weights[ci]; // for(j=0;j=2) { extract_row(WX,pers,xi); extract_row(dAt,s,dA); hati=vec_prod(xi,dA); lle=lle+log(hati); } } /* terms for robust variance */ if (*robust==1) { for (j=0;j<*pg;j++) for (i=0;i<*px;i++) ME(ZXAI,j,i)=ME(ZXAIn,j,(s-1)*(*px)+i); for (j=0;j<*px;j++) for (i=0;i<*px;i++) ME(AI,j,i)=ME(AIn,(s-1)*(*px)+j,i); if (*ratesim==1 || *retur>=1) for (i=0;i<*antpers;i++) // {{{ { cin=cluster[i]; extract_row(WX,i,rowX); extract_row(X,i,xi); extract_row(Z,i,zi); hati=vec_prod(rowX,dA); if (*ratesim==1) { // Rprintf("%d %d %d %d %lf \n",s,i,ipers[s],pers,hati); Mv(ZXAI,xi,tmpv2); vec_subtr(zi,tmpv2,tmpv2); scl_vec_mult(VE(weight,i),tmpv2,tmpv2); if (i==pers) vec_add(tmpv2,W2[cin],W2[cin]); if (*ratesim==1) {scl_vec_mult(hati,tmpv2,rowZ); vec_subtr(W2[cin],rowZ,W2[cin]); } if (*sim>=2) { Mv(AI,xi,rowX); scl_vec_mult(VE(weight,i),rowX,rowX); if (i==pers) {vec_add(rowX,W3[cin],W3[cin]); } llo=llo+hati; if (*ratesim==1) {scl_vec_mult(hati,rowX,rowX); vec_subtr(W3[cin],rowX,W3[cin]);} } } if (*retur==1) dhatMit[i*(*Ntimes)+s]=1*(i==pers)-hati; if (*retur==2) dhatMit[i]=dhatMit[i]+1*(i==pers)-hati; } /* i 1.. antpers */ // }}} } if ((*ratesim==0) && (*robust==1)) { // {{{ compute resampling counting process LWY style version cin=cluster[pers]; if (*sim>=2) { extract_row(WX,pers,rowX); extract_row(X,pers,xi); } extract_row(Z,pers,zi); // hati=vec_prod(rowX,dA); // if (*detail==2) Rprintf(" %d %d \n",cin,pers); Mv(ZXAI,xi,tmpv2); vec_subtr(zi,tmpv2,tmpv2); scl_vec_mult(VE(weight,pers),tmpv2,tmpv2); // squaring to deal with counting process'es // should use cholesky square of variance matrix // for (k=0;k<*pg;k++) VE(tmpv2,k)=pow(VE(tmpv2,k),2); vec_add(tmpv2,W2[cin],W2[cin]); if (*sim>=2) { Mv(AI,xi,rowX); scl_vec_mult(VE(weight,pers),rowX,rowX); vec_add(rowX,W3[cin],W3[cin]); } for (s1=timegroup[s];s1<*maxtimepoint;s1++) { // for (k=0;k<*pg;k++) VE(tmpv2,k)=sqrt(VE(W2[cin],k)); replace_row(W2t[cin],s1,tmpv2); if (*sim>=2) replace_row(W3t[cin],s1,W3[cin]); } } // }}} if (*robust==1 && *ratesim==1) for (j=0;j<*antclust;j++) { replace_row(W2t[j],timegroup[s],W2[j]); if (*sim>=2) replace_row(W3t[j],timegroup[s],W3[j]); } /* MG baseret varians beregning */ for (j=0;j<*pg;j++) for (i=0;i<*px;i++) { ME(M1M2t,j,i)=ME(M1M2n,j,(s-1)*(*px)+i); ME(Ct,i,j)= ME(Cn,(s-1)*(*px)+i,j); } MxA(Ct,VU,tmp3); MAt(tmp3,Ct,CtVUCt); MxA(Ct,SI,tmp3); // printf(" %d %d %d %d \n",0,(s-1)*(*px),*pg,s*(*px)); // print_mat(M1M2t); // print_mat(M1M2n); // print_mat(M1M2t); // mat_subsec(M1M2n,0,(s-1)*(*px),*pg,s*(*px),M1M2t); // MxA(tmp3,M1M2[s],COV); // print_mat(COV); MxA(tmp3,M1M2t,COV); for (k=1;k<=*px;k++) { if (*betafixed==0) vcu[k*(*Ntimes)+s]+=ME(CtVUCt,k-1,k-1) +2*ME(COV,k-1,k-1); // else vcu[k*(*Ntimes)+s]=vcu[k*(*Ntimes)+s]; } for (k=1;k<=*pg;k++) Ut[k*(*maxtimepoint)+timegroup[s]]=ME(Utt,timegroup[s],k-1); } // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: robust variance terms 1 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} if (*detail>=2) Rprintf("Robust variances 1 \n"); R_CheckUserInterrupt(); ll=lle-llo; /* likelihood beregnes */ if (*detail==1) Rprintf("loglike is %lf \n",ll); if ((*robust==1)) // {{{ robust variances { for (s=1;s<*maxtimepoint;s++) { vec_zeros(VdB); mat_zeros(Vcov); for (j=0;j<*antclust;j++) { // {{{ if (s==1 && *detail==3) { Rprintf("================================================= %d \n",j); print_mat(W2t[j]); print_vec(W2[j]); print_mat(Stg[s]); print_mat(S1); print_mat(SI); } // counting process style simulation // if (s==1) // if (*ratesim==0) for (k=0;k<*pg;k++) VE(W2[j],k)=sqrt(VE(W2[j],k)); Mv(SI,W2[j],tmpv2); if (*sim>=2) { Mv(Cg[s],tmpv2,rowX); extract_row(W3t[j],s,tmpv1); vec_add(tmpv1,rowX,difX); if (*betafixed==1) scl_vec_mult(1,tmpv1,difX); replace_row(W4t[j],s,difX); vec_star(difX,difX,tmpv1); vec_add(tmpv1,VdB,VdB); } if (s==1) if (*betafixed==0) { // for (c=0;c<*pg;c++) gamiid[c*(*antclust)+j]=gamiid[c*(*antclust)+j]+VE(tmpv2,c); for (c=0;c<*pg;c++) gamiid[c*(*antclust)+j]=VE(tmpv2,c); } if (*resample==1) { for (c=0;c<*px;c++) {l=j*(*px)+c; // biid[l*(*maxtimepoint)+s]=biid[l*(*maxtimepoint)+s]+VE(difX,c); biid[l*(*maxtimepoint)+s]=VE(difX,c); } } if (*covariance==1 && (*sim>=2)) { for (k=0;k<*px;k++) for (c=0;c<*px;c++) ME(Vcov,k,c)=ME(Vcov,k,c)+VE(difX,k)*VE(difX,c); } Mv(Stg[s],tmpv2,rowZ); extract_row(W2t[j],s,tmpv2); if (*betafixed==0) { vec_subtr(tmpv2,rowZ,zi); replace_row(Uti[j],s,zi); } else replace_row(Uti[j],s,tmpv2); vec_star(zi,zi,tmpv2); vec_add(tmpv2,varUthat[s],varUthat[s]); } // }}} /* j in clusters */ if (*betafixed==0) for (i=0;i<*pg;i++) vscore[(i+1)*(*maxtimepoint)+s]=VE(varUthat[s],i); if (*sim>=2) // {{{ for (k=1;k<*px+1;k++) { Rvcu[k*(*maxtimepoint)+s]=VE(VdB,k-1); if (*covariance==1) { for (j=0;j<*px;j++) { l=(k-1)*(*px)+j; Vcovs[l*(*maxtimepoint)+s]=ME(Vcov,k-1,j); } } } // }}} } /* s=1 ..maxtimepoints */ } /* if robust==1 */ // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 2 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} if (*detail>=2) Rprintf("Robust variances 2 \n"); if (*betafixed==0) for (j=0;j<*antclust;j++) { Mv(SI,W2[j],tmpv2); for (c=0;c<*pg;c++) for (k=0;k<*pg;k++) ME(RobVbeta,c,k)=ME(RobVbeta,c,k)+VE(W2[j],c)*VE(W2[j],k); for (k=0;k<*pg;k++) gammaiid[j*(*pg)+k]=VE(tmpv2,k); } MxA(RobVbeta,SI,tmp2); MxA(SI,tmp2,RobVbeta); if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 3 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} R_CheckUserInterrupt(); for(j=0;j<*pg;j++) { betaS[j]= VE(beta,j); loglike[0]=lle; loglike[1]=ll; for (k=0;k<*pg;k++){ Iinv[k*(*pg)+j]=ME(SI,j,k); Vbeta[k*(*pg)+j]=-ME(VU,j,k); RVbeta[k*(*pg)+j]=-ME(RobVbeta,j,k); } } if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: variance terms 4 %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} // for(j=0;j<*antclust;j++) print_mat(Uti[j]); if (*detail>=2) Rprintf("simulations starts \n"); if (*sim>=1) { // {{{ score process simulations // Rprintf("Simulations start N= %ld \n",(long int) *antsim); tau=times[*Ntimes-1]-times[0]; if (*sim>=2) for (i=1;i<=*px;i++) VE(rowX,i-1)=cug[i*(*maxtimepoint)+(*maxtimepoint-1)]; for (s=1;s<*maxtimepoint;s++) { // {{{ /* Beregning af OBS teststrrelser */ time=timesg[s]-times[0]; // FIX if (*sim>=2) { for (i=1;i<=*px;i++) { VE(xi,i-1)=fabs(cug[i*(*maxtimepoint)+s])/sqrt(Rvcu[i*(*maxtimepoint)+s]); if (VE(xi,i-1)>testOBS[i-1]) testOBS[i-1]=VE(xi,i-1); } scl_vec_mult(time/tau,rowX,difX); for (i=1;i<=*px;i++) VE(xi,i-1)=cug[i*(*maxtimepoint)+s]; vec_subtr(xi,difX,difX); for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>testOBS[l]) testOBS[l]=VE(difX,i); } } if (*wscore>=1) { /* sup beregnes i R */ if ((s>*wscore) && (s<*maxtimepoint-*wscore)) {extract_row(Utt,s,rowZ); for (i=0;i<*pg;i++) VE(rowZ,i) = VE(rowZ,i)/sqrt(VE(varUthat[s],i)); replace_row(Utt,s,rowZ); /* scaled score process */ } else {vec_zeros(rowZ); replace_row(Utt,s,rowZ);} } for (k=1;k<=*pg;k++) Ut[k*(*maxtimepoint)+s]=ME(Utt,s,k-1); } // }}} *s=1..maxtimepoint Beregning af obs teststrrelser if (*detail>=2) Rprintf("simulations starts for real %d \n",*sim); for (k=1;k<=*antsim;k++) { R_CheckUserInterrupt(); if (*sim>=2) mat_zeros(Delta); mat_zeros(Delta2); vec_zeros(tmpv1); for (i=0;i<*antclust;i++) { /* random=gasdev(&idum); */ random=norm_rand(); if (*sim>=2) { scl_mat_mult(random,W4t[i],tmpM1); mat_add(tmpM1,Delta,Delta); } scl_mat_mult(random,Uti[i],tmpM2); mat_add(tmpM2,Delta2,Delta2); } if (*sim>=2) extract_row(Delta,*maxtimepoint-1,tmpv1); for (s=1;s<*maxtimepoint;s++) { time=timesg[s]-times[0]; if (*sim>=2) { // {{{ scl_vec_mult(time/tau,tmpv1,xi); extract_row(Delta,s,rowX); vec_subtr(rowX,xi,difX); if (*addresamp==1) { if (k<51) { for (i=0;i<*px;i++) {l=(k-1)*(*px)+i; addproc[l*(*maxtimepoint)+s]=ME(Delta,s,i);}} } for (i=0;i<*px;i++) { VE(difX,i)=fabs(VE(difX,i)); l=(*px+i); if (VE(difX,i)>test[l*(*antsim)+k-1]) test[l*(*antsim)+k-1]=VE(difX,i); VE(xi,i)=fabs(ME(Delta,s,i))/sqrt(Rvcu[(i+1)*(*maxtimepoint)+s]); if (VE(xi,i)>test[i*((*antsim))+k-1]) test[i*((*antsim))+k-1]=VE(xi,i); } } // }}} if (*wscore>=1) { extract_row(Delta2,s,zi); if ((s>*wscore) && (s<*maxtimepoint-*wscore)) { for (i=0;i<*pg;i++) {VE(zi,i)=fabs(ME(Delta2,s,i))/sqrt(VE(varUthat[s],i)); if (VE(zi,i)>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=VE(zi,i); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*maxtimepoint)+s]=ME(Delta2,s,i)/sqrt(VE(varUthat[s],i));}} } } /* weigted score */ else { extract_row(Delta2,s,zi); for (i=0;i<*pg;i++) { if (fabs(VE(zi,i))>simUt[i*(*antsim)+k-1]) simUt[i*(*antsim)+k-1]=fabs(VE(zi,i)); } if (k<50) { for (i=0;i<*pg;i++) { l=(k-1)*(*pg)+i; Uit[l*(*maxtimepoint)+s]=ME(Delta2,s,i);} } } /* else wscore=0 */ } /* s=1..Ntims */ } /* k=1..antsim */ } /* sim==1 */ // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: before freeing %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} PutRNGstate(); /* to use R random normals */ // {{{ freeing if (*sim>=2) free_mats(&Delta,&tmpM1,NULL); if (*sim==1) free_mats(&Delta2,&tmpM2,NULL); free_mats(&Cn,&M1M2n,&ZXAIn,&AIn,NULL); free_mats(&dAt,&Utt,&WX,&X,&cdesX,&cdesX2,&cdesX3, &WZ,&ZP,&Z, &Vcov,&COV,&A,&AI,&M1,&CtVUCt, &RobVbeta,&tmp2,&dS,&S1,&S2,&SI,&VU,&VUI, &ZXAI,&ZX,&dM1M2,&M1M2t, &tmp3,&ZPX,&dYI,&Ct, &ZPX1,&ZPZ1, &ZPXo,&ZPZo,NULL); free_vecs(&reszpbeta,&res1dim,&weight,&lamtt,&lamt,&zcol,&Gbeta,&one,&offset, &ahatt,&tmpv1,&difX,&VdB,&rowX,&xi,&dA,&VdA,&MdA, &xtilde, &tmpv2,&rowZ,&zi,&U,&beta,&delta,&zav,&difzzav,&Uprofile, &ta,&vrisk,NULL); if (*robust==1) { for (j=0;j<*antclust;j++) { if (*sim>=2) free_mat(W3t[j]); if (*sim>=2) free_mat(W4t[j]); if (*sim>=2) free_vec(W3[j]); free_mat(W2t[j]); free_mat(W2var[j]); free_mat(Uti[j]); free_vec(Ui[j]); } for (j=0;j<*maxtimepoint;j++) free_vec(varUthat[j]); } for (j=0;j<*antclust;j++) free_vec(W2[j]); // for (j=0;j<*Ntimes;j++) { //// free_mat(C[j]);free_mat(M1M2[j]); free_mat(ZXAIs[j]); //// free_vec(ZXdA[j]); //// free_mat(St[j]); // } for (c=0;c<(*px)*(*stratum)+1;c++) free_mat(ZPZ[c]); for(j=0;j<*maxtimepoint;j++) { free_mat(Cg[j]); free_mat(Stg[j]);} free(cluster); free(ipers); free(imin); free(cug); free(timesg); // }}} if (timing==2) { // {{{ c1=clock(); Rprintf ("\telapsed CPU time: after freeing %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC); c0=clock(); } // }}} } timereg/src/prop-odds-subdist2.c0000644000175000017500000005502114077524411016447 0ustar nileshnilesh#include #include #include "matrix.h" #include #include void posubdist2(times,Ntimes,designX,nx,px,antpers,start,stop,betaS,Nit,cu,vcu,Iinv, Vbeta,detail,sim,antsim,rani,Rvcu,RVbeta,test,testOBS,Ut,simUt,Uit,id,status, weighted,ratesim,score,dhatMit,dhatMitiid,retur,loglike,profile,sym, KMtimes,KMti,etime,causeS,ipers,baselinevar,clusters,antclust,ccode,biid,gamiid,wweights) double *designX,*times,*betaS,*start,*stop,*cu,*Vbeta,*RVbeta,*vcu,*Rvcu,*Iinv,*test,*testOBS,*Ut,*simUt,*Uit,*score,*dhatMit,*dhatMitiid,*loglike, *KMtimes,*KMti,*etime,*biid,*gamiid,*wweights; int *nx,*px,*antpers,*Ntimes,*Nit,*detail,*sim,*antsim,*rani,*id,*status,*weighted,*ratesim,*retur,*profile,*sym,*causeS,*ipers,*baselinevar,*clusters,*antclust,*ccode; { // {{{ setting up matrix *ldesignX,*WX,*ldesignG,*CtVUCt,*A,*AI; matrix *dYI,*Ct,*dM1M2,*M1M2t,*COV,*ZX,*ZP,*ZPX; matrix *tmp1,*tmp2,*tmp3,*dS1,*SI,*dS2,*S2,*S2pl,*dS2pl,*M1,*VU,*ZXAI,*VUI; matrix *d2S0,*RobVbeta,*tmpM1,*Utt,*dS0t,*S1start,*tmpM2,*et,*gt,*qt; matrix *St[*Ntimes],*M1M2[*Ntimes],*C[*Ntimes],*ZXAIs[*Ntimes],*dYIt[*Ntimes]; matrix *dotwitowit[*antpers], // *W3tmg[*antclust], *W3t[*antclust],*W4t[*antclust],*W2t[*antclust],*AIxit[*antpers],*Uti[*antclust],*d2G[*Ntimes],*Delta,*Delta2; vector *Ctt,*lht,*S1,*dS0,*incS0t,*S0t,*S0start,*dA,*VdA,*dN,*MdA,*delta,*zav,*dlamt,*plamt,*dG[*Ntimes], *S1star; vector *xav,*difxxav,*xi,*zi,*U,*Upl,*beta,*xtilde; vector *Gbeta,*zcol,*one,*difzzav,*difZ,*neta2[*antclust]; vector *offset,*weight,*ZXdA[*Ntimes],*varUthat[*Ntimes],*Uprofile; vector *ahatt,*risk,*tmpv1,*tmpv2,*rowX,*rowZ,*difX,*VdB,*VdBmg; vector *W2[*antclust],*W3[*antclust],*reszpbeta,*res1dim,*dAt[*Ntimes],*eta2; // vector *W2[*antclust],*W3[*antclust],*W3mg[*antclust],*reszpbeta,*res1dim,*dAt[*Ntimes],*eta2; vector *dLamt[*antpers]; int *pg=calloc(1,sizeof(int)),c,robust=1,pers=0,ci,i,j,k,l,s,it; double weights,risks,RR,S0star,time,alpha,ll; double S0,tau,random,scale,sumscore; double norm_rand(); void GetRNGstate(),PutRNGstate(); pg[0]=1; for (j=0;j<*antpers;j++) { malloc_vec(*Ntimes,dLamt[j]); malloc_mat(*Ntimes,*px,dotwitowit[j]); malloc_mat(*Ntimes,*px,AIxit[j]); } for (j=0;j<*antclust;j++) { malloc_mat(*Ntimes,*pg,W3t[j]); // malloc_mat(*Ntimes,*pg,W3tmg[j]); malloc_mat(*Ntimes,*pg,W4t[j]); malloc_mat(*Ntimes,*px,W2t[j]); malloc_mat(*Ntimes,*px,Uti[j]); malloc_vec(*px,W2[j]); malloc_vec(*pg,W3[j]); // malloc_vec(*pg,W3mg[j]); malloc_vec(*Ntimes,neta2[j]) } malloc_mat(*Ntimes,*pg,tmpM1); malloc_mat(*Ntimes,*px,dS0t); malloc_mat(*Ntimes,*px,tmpM2); malloc_mat(*Ntimes,*px,S1start); malloc_mat(*Ntimes,*px,et); malloc_mat(*Ntimes,*px,gt); malloc_mat(*Ntimes,*px,qt); malloc_mat(*Ntimes,*px,Utt); malloc_mat(*Ntimes,*pg,Delta); malloc_mat(*Ntimes,*px,Delta2); malloc_mats(*antpers,*px,&WX,&ldesignX,NULL); malloc_mats(*antpers,*pg,&ZP,&ldesignG,NULL); malloc_mats(*px,*px,&COV,&A,&AI,&M1,&CtVUCt,NULL); malloc_mats(*px,*px,&d2S0,&RobVbeta,&tmp1,&tmp2,&dS1,&S2,&dS2,&S2pl,&dS2pl,&SI,&VU,&VUI,NULL); malloc_mats(*pg,*px,&ZXAI,&ZX,&dM1M2,&M1M2t,NULL); malloc_mats(*px,*pg,&tmp3,&ZPX,&dYI,&Ct,NULL); malloc_vec(*Ntimes,S0t); malloc_vec(*Ntimes,incS0t); malloc_vec(*Ntimes,eta2); malloc_vec(*Ntimes,S0start); malloc_vec(*Ntimes,lht); malloc_vec(1,reszpbeta); malloc_vec(1,res1dim); malloc_vecs(*antpers,&risk,&weight,&plamt,&dlamt,&dN,&zcol,&Gbeta,&one,&offset,NULL); malloc_vecs(*px,&Ctt,&ahatt,&tmpv1,&difX,&rowX,&xi,&dA,&VdA,&MdA,NULL); malloc_vecs(*px,&S1,&dS0,&S1star,&xtilde,&xav,&difxxav,NULL); malloc_vecs(*px,&U,&Upl,&beta,&delta,&difzzav,&Uprofile,NULL); malloc_vecs(*pg,&tmpv2,&rowZ,&zi,&difZ,&zav,&VdB,&VdBmg,NULL); for(j=0;j<*Ntimes;j++) { malloc_mat(*px,*pg,C[j]); malloc_mat(*pg,*px,M1M2[j]); malloc_mat(*pg,*px,ZXAIs[j]); malloc_mat(*px,*pg,dYIt[j]); malloc_vec(*px,dAt[j]); malloc_vec(*pg,ZXdA[j]); malloc_mat(*px,*px,St[j]); malloc_mat(*px,*px,d2G[j]); malloc_vec(*px,dG[j]); malloc_vec(*px,varUthat[j]); } ll=0; for(j=0;j<*px;j++) VE(beta,j)=betaS[j]; // }}} int timing=0; clock_t c0,c1; c0=clock(); double dummy,plamtj,dlamtj,weightp=0; // reading design once and for all for (c=0;c<*nx;c++) for(j=0;j<*px;j++) ME(WX,id[c],j)=designX[j*(*nx)+c]; cu[0]=times[0]; for (it=0;it<*Nit;it++) { // {{{ vec_zeros(U); vec_zeros(Upl); mat_zeros(S2pl); mat_zeros(S2); mat_zeros(COV); ll=0; sumscore=0; R_CheckUserInterrupt(); Mv(WX,beta,Gbeta); for (s=1;s<*Ntimes;s++) {// {{{ time=times[s]; pers=ipers[s]; // person with type 1 jump // printf(" pers=%d weight=%lf cause=%d \n",pers,wweights[pers],status[pers]); vec_zeros(dS0); mat_zeros(d2S0); mat_zeros(dS1); vec_zeros(S1star); vec_zeros(S1); S0star=0; S0=0; // S0p=0; S0cox=0; weightp=1; for (j=0;j<*antpers;j++) { // {{{ int other=((status[j]!=*causeS) && (status[j]!=*ccode))*1; weights=1; if (etime[j]