rms/ 0000755 0001762 0000144 00000000000 14742417156 011066 5 ustar ligges users rms/MD5 0000644 0001762 0000144 00000036451 14742417156 011407 0 ustar ligges users 465c0f39c746b62c620b00d025f5537b *DESCRIPTION
0e6c37cd03fbde1ec088ffa18b117876 *NAMESPACE
2f75b11a9b5da04040c315f72a5b21cb *NEWS
a5263ced794350451f27b08a43049789 *R/Function.rms.s
67db9f1c696a5833cb145cc161484ce9 *R/Glm.r
23b84ce2a2a1e0d77a1a7b30e732d64e *R/Gls.s
de1908f5613d122023e360618f97ef6b *R/LRupdate.r
b13398de07dd115322932dd58fa516f0 *R/Ocens.r
6c1b7f496955f7fb876c76b8243972c0 *R/Predict.s
3148753ef910e566f95dcefcc90f2980 *R/Rq.s
30b622fa147c9af6b80d4522e25c21b5 *R/Xcontrast.r
b9c57f1a352eb75f8f698bcb612e5415 *R/anova.rms.s
549afcee7cde275d53e48907e99c1dab *R/bj.s
a88d92dfde25f170cd35ce59fdae46f5 *R/bootcov.s
2f2c8fa363d346eb061a3536bad698fb *R/bplot.s
59713401df36aacb6c11dc05a79bfd84 *R/calibrate.cph.s
3327d2583bbce236bddb91dd2c7371be *R/calibrate.default.s
f868e8ca1885d191cbd50983e796c851 *R/calibrate.psm.s
684ccde18d88f7e20719117c55cc90d1 *R/calibrate.s
e89567cfc45009c41105dbb199a55a44 *R/contrast.s
637828fdff9b64f45ac1c79ae7a7c783 *R/cph.s
707e17e96143dc6256fe44d671f43963 *R/cr.setup.s
4bcb6d69d3c683eca38eaf28fc37b019 *R/datadist.s
6e6555d53a23bfbf84b5d5a95611a958 *R/fastbw.s
07800d51720e07511fca35baa694143f *R/gIndex.s
c4108b6a3c4d35f3b55b5207cd238563 *R/gendata.s
42bc8c310a3a754df70b9500e0f34485 *R/ggplot.Predict.s
8ab51f4a4855f922acb80108e9c46db2 *R/groupkm.s
9a2d001d398705fbd7588aa6591b6942 *R/hazard.ratio.plot.s
020407ed74c1c5d884f44ddcfccf6f7c *R/ia.operator.s
0c9042c90e269e41bb589b4e7807a356 *R/ie.setup.s
dafb49cf405cbe5bf5d3111bc283d56d *R/impactPO.r
5209409f8678e3d55996cd203d0bb595 *R/infoMxop.r
8a2b0cde54a12fa177eb1c5ff75e822a *R/latex.cph.s
4d7c81681a647a98c01756e7a26ec619 *R/latex.lrm.s
f47ce1d4c7d4c38dec4c8d0c18a68adc *R/latex.ols.s
5f337bb778ff363e521771a082a698df *R/latex.pphsm.s
fc7de822dd92e3dcd72c0b79857509b7 *R/latex.psm.s
149a37ed6a0fa7ef93d4e2c55e2db11c *R/latex.rms.s
b3a463f3a1f578f51a99762e03b1bad7 *R/lrm.fit.r
7a7ac93a83581d145b9b09562596f516 *R/lrm.s
b6eef10f1355b1fa4f609f8e8fd7270a *R/matinv.s
cbf9015d531734000689cf7f5c3e0011 *R/nomogram.s
0ddd28ffbc81b15f3ad685af433b35a1 *R/npsurv.s
bd77cc7d3054bae81732a64264235671 *R/ols.s
e34277f2301fa22d4ed12d7db03a663e *R/orm.fit.s
8fd0dfe82d9a944ed2e086c0b68664aa *R/orm.s
1b0a566e62389947ac801b1d3577fca2 *R/pentrace.s
b1c66f5b0a42cb6883834edde28bb78c *R/plot.Predict.s
6d498522f4f4be25b648e950f03cddab *R/plot.contrast.r
181837dc6972d157884f57239e4f944e *R/plot.nomogram.s
13e3c07f0bef461881b7f3e2688c8502 *R/plot.xmean.ordinaly.s
ccc38b92c6386a9a8ab4a6d35f392a3f *R/plotIntercepts.r
3748109cc1a1d22663dd6b404e5a33bc *R/plotp.Predict.s
48cd3c5c3d0fd6e979d18e10ea3afb79 *R/poma.r
5863a81bcc7e54c317961a96567734de *R/pphsm.s
3fc37f09e3a52d1fa898e28d473d6007 *R/predab.resample.s
d55dbb4e4eb754d79851bcb4ec4ecd64 *R/predict.lrm.s
88345749aa2ba5c497036edd0f36b4df *R/predictrms.s
c5f7a6b88ee210ba65aa39e842a28330 *R/print.psm.s
875327e51ab3213694e418ef6da20ed7 *R/processMI.r
207b1cd8307c96027b3ac0f1b8c60415 *R/psm.s
7bff8c345323b851ee7ce7be20213d36 *R/quickRefit.r
591526983ff2116e06dbc07d4b398cb2 *R/recode2integer.r
489aa87bc081c9beb220dbc832db9f97 *R/residuals.cph.s
cea11c823493540b5c47bd16124bad52 *R/residuals.lrm.s
4e687535941639c0b039e4ad54342353 *R/residuals.ols.s
9ce64f0b3dd4a0b575b12e0520acb61f *R/rexVar.r
83a00099237c166954159f2f42e2cea7 *R/rms.s
f4c10d913489dce7a92513acd475d280 *R/rms.trans.s
b0adb541ffdb75e4996064f88d358556 *R/rmsMisc.s
5e1dc97c3d5f9790c4017d6a72e4aa8a *R/robcov.s
43c599aeeb3bae8fd1c34691123f13f0 *R/sensuc.s
3c9e9590656fca662c6a6744b3a83328 *R/specs.rms.s
4013e2507c62c2b658c9d00e6c1d8f54 *R/summary.rms.s
199f066cfd4820002b9c554ad90a8f33 *R/survest.cph.s
0beb2e9b0b69a54cbb9706466991232e *R/survest.psm.s
b9b6b0b7aaa6475625958a7488af2158 *R/survfit.cph.s
4433b9ff6b74c70f68bd6956adfdee89 *R/survplot.npsurv.s
ee251131b9dc4f6ef7c54cb8b191a0b6 *R/survplot.rms.s
94b0cd6acc6de1604482af353cf18919 *R/survplotp.npsurv.s
042d1cab4966f0c9c484b072266526f2 *R/survreg.distributions.s
68af599d348be60c11e20ba4dbe8f70c *R/val.prob.s
0ebeb8a12e6d6579b4d058b99df2fca4 *R/val.surv.s
5af5237b40d2f105e0a895aa6cebd334 *R/validate.Rq.s
97a9aac2b6ceb8fd4a2754707c5c416a *R/validate.cph.s
aefcd5e5067eca502f9678c25ea5b580 *R/validate.lrm.s
5012e04a796a8ca059ca581cb17125e4 *R/validate.ols.s
e110631c8c7d10fc564eacf7fb6f752b *R/validate.psm.s
0a23e640e49d1020d9204dad743cb00f *R/validate.rpart.s
84d6e3430af3e4e15316636036fce82f *R/which.influence.s
6c73ac8f909c50913c05096d9cbf9b32 *README.md
80ddce7f0a779bd8f7c8c6cb50e3d403 *demo/00Index
c661b1cce7d38348936c3106c2f14dae *demo/all.R
aec272084f0bc6c5820ade888367b667 *inst/tests/Glm.s
eb5d7efa982e780027925dbf866f2f01 *inst/tests/Gls.s
31610c14ced3b83a4e0b4078325751c0 *inst/tests/Predict.s
bc8095c330b65347f9dcb17fcc34cf42 *inst/tests/Rq.s
23fc703249f5cd5caef7074a77fb7005 *inst/tests/Rq2.s
eabc8d4ec0d73d4efa1e209aa47db02e *inst/tests/anova-lr.r
fd1cad5cf21ccf1ce675578fd8567221 *inst/tests/anova-ols-mult-impute.r
e70ae9144a64cdc90a1b6b6b0ba573bb *inst/tests/bj.r
e78f6f42729458220ab1a20566f17afe *inst/tests/bootcov.r
3709c798ba6427dfc28b9e8c47b165e8 *inst/tests/boys.rda
5e7b576d7249e8e1c0f0998032275648 *inst/tests/bplot.r
d06ade885fee627349ff964d52dcf5d0 *inst/tests/calibrate.cph.r
c31bea1aab8ec112a1d25a339f21fd0a *inst/tests/calibrate.r
09026b645083392f61d6f7c7799a1a5b *inst/tests/contrast.r
85c9fc15158676d48b6339fe98250926 *inst/tests/contrast2.r
4bc199c6ba6cf2be7618f25ba03d2bc8 *inst/tests/cph.s
ae8fb2cd4abead1b04758492dc193c8b *inst/tests/cph2.s
fa534452c9c5352ae88d5d4bca3b74b7 *inst/tests/cph3.r
3f8229451bedbcf8baaf31eca25439bf *inst/tests/cph4.r
6e63a359a708d1ea44d7261b1b326e18 *inst/tests/cph5.r
0c77da26363ceed7d7ad661fe6820e69 *inst/tests/cphexactval.r
c010dd91bf9d68bf8c610ca0d00772dc *inst/tests/cphtdc.r
2df4803489ec0e21b19688347e870b88 *inst/tests/cphtdc2.r
09fb3226572971aa603f5f97ea9b7fdf *inst/tests/examples.Rmd
44e6ec5dbcfa9c832c98efd6a8a68bdd *inst/tests/gTrans.r
b77cf17d6252dcaf7696161e03b444bd *inst/tests/ggplot.Predict.Rd.timing.r
614dbbecfaeb31d079ebd5ed37d27948 *inst/tests/ggplot2-without-ggplot.Predict.r
2a01478121ee2b1cb808e18444d7cc45 *inst/tests/ggplot2.r
e28cb8b1f1dcc5894bb653423035c120 *inst/tests/ggplot2b.r
238e3c551cf902af023466e590a41c68 *inst/tests/ggplot3.r
78c2e6879f6f52799942c55cd0a4b702 *inst/tests/ggplotly.Rmd
842b0a375a7f08007f9e6f60aa689873 *inst/tests/impactPO.r
bc665e8f1017b48f9c0fff93c20a3274 *inst/tests/latex.r
d320318a0aec86ee3106aa4e97b574f4 *inst/tests/lrm-orm-penalty.r
269779a5a0cc2ba9484b78fd52caee99 *inst/tests/lrm.ols.penalty.r
153fdee9973f5fb16eae2078fcfcfa9f *inst/tests/lrm.ordinal.s
1f029ba41ffe13655ee4fb1485503dc3 *inst/tests/lrm.s
31563fbc00ac91ab4eaafbfdcd64231e *inst/tests/lrm2.r
4fa47e8ddf12927a995caaffa0743041 *inst/tests/lrm3.r
863998473a2ead09414ef580e857c21f *inst/tests/lrmMean.s
1981d65021164e6274c5f4878352c93b *inst/tests/makepredictcall.r
9998f196d9060e84d65fde309fa8938b *inst/tests/mi-robcov.r
0bfea8bedf36aaad4f651db178b61380 *inst/tests/mice.r
9f55aac4fdb9ac960572a9667938c46f *inst/tests/missdot.qmd
695c5f9aaaca8d02696bc338f14c5294 *inst/tests/model.matrix.s
1182fa67ac393f61e1b819f349362bfd *inst/tests/modelData.r
fc47b29381f0569c0453956c3fa66b4b *inst/tests/nomogram.r
87a4b2b30e186a88d025038e361cb67f *inst/tests/nomogram2.r
ca4c22c089aa43a2ddba99e90a9c107a *inst/tests/nomogram2.rda
58db00e39ab69a2d5f80229570afffc4 *inst/tests/npsurv.r
f81e037a10406d75e066ddb0d6fd616c *inst/tests/offset.r
d94fe4e9c9f884bf11933fb64b7bde0b *inst/tests/ols.penalty.r
8f54e66d3bfb24cb79a191aaf263a85a *inst/tests/ols.r
c3e3dcb1e9fbb6f9616df31598628a02 *inst/tests/orm-Mean.r
63ece85e727424c2a19fa2521d050ca1 *inst/tests/orm-bootcov.r
e5c6fe800f2fad290d6bd638f9ebbf53 *inst/tests/orm-bootcov2.r
c65c6af36bd5f45e024a7d6124d3b487 *inst/tests/orm-example.r
802e3dcb0e399d85443741111e7bd313 *inst/tests/orm-profile.r
930015b1b3d9b417f67018c84ebd41fd *inst/tests/orm-quantile.r
58b084b800665c8ba9314a5c2135e528 *inst/tests/orm-residuals.r
3852333978005ddc5ea290ea143c5ed5 *inst/tests/orm-simult.r
6630e16f091e64b27dd2bdbc6e4eb15b *inst/tests/orm-weight.r
5a86fbafb6adcd65b2539dfd158955ee *inst/tests/orm.s
4c835ba6af1de4b16ac8c11a5c768559 *inst/tests/orm2.s
2e3d6c8e679b251fd2af288fad9959f7 *inst/tests/orm3.r
7792ed341fe1b3fe9731ae107263ee73 *inst/tests/orm4.r
cec931317d86f22b852b31b9428246ec *inst/tests/orm5.r
18de57cd8e8be0eebd5abb879c47ea73 *inst/tests/orm7.r
2c86a7ae3df88984bc58cd7fe8e1ee27 *inst/tests/pakpahan.dta
a5775dcc1798405805eee86e3298b7dc *inst/tests/pentrace.s
69b043a0be51ad0b4de8c3a4cc9a02a2 *inst/tests/perlcode.s
01bb632b0c7bbf05df816b87cdaebb84 *inst/tests/plot.Predict.s
8071793bfcbfbea3a33f5d23c3632588 *inst/tests/plotly-Predict.r
55d1a303ae4faf053e4f6fecb806a03f *inst/tests/predictrms.s
6df4b105004e29922cf982aa93fe729c *inst/tests/processMI.r
c8ff7c81dc151041e4ddd9af0528446b *inst/tests/psm.s
d02db66f5d4051ad57d42b66c6ae65d1 *inst/tests/psm2.s
5aa1f697cfb18696718079b6142fc5cf *inst/tests/psm3.s
39eb77b8feecabe1d30a3ed68a8aa1d0 *inst/tests/qt50.rda
1ef7a0b539d260bf1c5baa01fbc6ca5f *inst/tests/rcs.r
6bd06faf29af811aa3a23aaf82b2a952 *inst/tests/rexVar.r
fe1a47ea68dcb943c0aa1b2baa9d733c *inst/tests/rms-lm.r
d992c3e3f567bcef7dad3fd3f79c7cbc *inst/tests/rms.r
78a6ba6fa3e1eef585168108954249ed *inst/tests/rms2.r
47a0a4f75d74265a2d00c4fbc17eb0c6 *inst/tests/robcov.r
797736495e82207542adfb56b6385682 *inst/tests/robcov2.r
4a96f3b1c46b7b00aaa738436a4528b8 *inst/tests/robcov3.r
b0ee1b349c50733323ae77a35296ef79 *inst/tests/robcov4.r
1ff9dfe21e8344d8f2495bbd941ce2fd *inst/tests/robcov_Yuqi.r
8dbb2cc72806004b13683a4f2a278fcd *inst/tests/sampledf.rda
8239ceefd65024c5e88ad95f585a0ba6 *inst/tests/scale.r
de76d469e88cbf53beace54bb8d62408 *inst/tests/simult.s
6c96ece56c4c1d87bcf5d4439e97393b *inst/tests/strat.model.matrix.r
21f2b7b8c1b5af38125b3429eb315521 *inst/tests/summary.r
8be9d3fe057bda9c2f4d3f4c48b1c211 *inst/tests/survest.r
67c6402e47995d0f5deab267db2210f2 *inst/tests/survest2.r
a1c8da481f087c6bfae0d6ba01bcbd6b *inst/tests/survfit.cph.s
0faa23c2b9768622ed4ed1086ef7098e *inst/tests/survfit.timedep.s
6e103d4b2b4f7111168482096a2a95cb *inst/tests/survplot.s
29b887ab0ca2485e372ab40833354866 *inst/tests/survplot2.r
0e9f55d9ba0db76c7ae17a6cdc23fd3e *inst/tests/survplotCompete.r
c46784d9a82df165c77516956d5ed5b8 *inst/tests/survplotp.r
7c04dd4ae4b7100da2bd5edcf56edc22 *inst/tests/val.prob.r
c3373ee5586af5c9ccf2e142b4a1169d *inst/tests/val.surv.data.txt
e417c5e43f9ab5c4b23f9314ac49698e *inst/tests/val.surv.s
023b9326fdd2390e600c57ad884992d0 *inst/tests/validate.cph.s
042369d3dc4216bdccc38b8499c5944a *inst/tests/validate.ols.r
4a21eb74da1d70cabb3267f66ca77fe7 *inst/tests/validate.rpart.r
cef54868b06ae129a62eb72eea010da0 *inst/tests/which.influence.r
d5aa55e05c59436e851a67e5d9957311 *man/ExProb.Rd
adef6383815663b196d933a6c761d2a9 *man/Function.Rd
5df7440b1b1197ba7cba07c08e761db7 *man/Glm.Rd
190e98538155306d35092acb68d6e2c7 *man/Gls.Rd
71ef60f0fed1343831e5b51b48b24b17 *man/LRupdate.Rd
69e14d5da0c9993bc8b1edba20677b0a *man/Ocens.Rd
881fc714800d00d3699d87a9566ba945 *man/Predict.Rd
135d1fd824f429b414f16ad983c98499 *man/Rq.Rd
56b1305c906ccb3fded49ae6191df7c9 *man/Xcontrast.Rd
78adbd40c9772ca6232b0ed8d60f4dce *man/anova.rms.Rd
7bc37171aca5512f28b890a3adb53559 *man/as.data.frame.Ocens.Rd
7f636294d90405adce96e512d2554dd2 *man/bj.Rd
8429775bf40e40c26f56e73a41c78d2e *man/bootBCa.Rd
70946a28ce8b5e2feab02cf0d2c886bc *man/bootcov.Rd
38b9e6206183b852764c3871e9f112f2 *man/bplot.Rd
135ce2a45159806774770d525806e0f6 *man/calibrate.Rd
5ead492adf115840b430a9974a9c275d *man/contrast.Rd
ec5b43002f92b5bdc0bbcc8ba16b9870 *man/cph.Rd
f9d4642191c52890918fef04bafb7ea5 *man/cr.setup.Rd
12d5866a7a96857fcd37ceef78e62539 *man/datadist.Rd
12af33b14c7aa24497102ab49f7fd621 *man/fastbw.Rd
1d7b670f52edf67500fe7f6a462efe99 *man/gIndex.Rd
a0bdb35af0b8ae4f10c7a9201310b0b5 *man/gendata.Rd
a326b636af91954a582907706fee03b6 *man/ggplot.Predict.Rd
37c5077db63d24a17b4edce48b12bdcc *man/groupkm.Rd
329ab9252b9bd32b5a1ccf8696b2f5d2 *man/hazard.ratio.plot.Rd
95a227697d8e064d916ad91f0ef0d580 *man/ie.setup.Rd
685af375b5e702837f6907efac5a9baa *man/impactPO.Rd
39994a120cf3f73b384f62c57a76fa1e *man/importexport.Rd
de01984d3a4bf34f3cfcf69c281b69b9 *man/infoMxop.Rd
0a722c7675fd3a98c814acf54bc6dfa6 *man/latex.cph.Rd
a86b461541076eadfa24aeea3426ef61 *man/latexrms.Rd
3b7ed8e915cfddebce4efb33df84dfbf *man/lrm.Rd
1dea1150555e7f11779eec671c137d37 *man/lrm.fit.Rd
75ea79a8d4c9c93306325f86197f20d1 *man/matinv.Rd
b908a7f4e4d8a7aaae283b3a97adb3e6 *man/nomogram.Rd
9d278d07331f133359bdfe934dffc661 *man/npsurv.Rd
344616966eae55d01373b5297e671848 *man/ols.Rd
3362079ee96893f3e907d38792839458 *man/orm.Rd
e3b8586ca34e213bbd508a659aa7d21a *man/orm.fit.Rd
bc503b288cb469762531c8142a6bbc24 *man/pentrace.Rd
896560da90c4d66b1e11976f8cc546ba *man/plot.Predict.Rd
1c66176050732b0a3dac7604acfa0f1b *man/plot.contrast.rms.Rd
eaa72ef158a87800812ef8d029d6cc97 *man/plot.rexVar.Rd
a3835c805896d06db576b8297d4444be *man/plot.xmean.ordinaly.Rd
9df489feb189e97824082723549abe06 *man/plotIntercepts.Rd
2a398c222ac1241ce53cc6777cb753a1 *man/plotp.Predict.Rd
ab5b64947d7c27c999bbeb7010f2e6fa *man/poma.Rd
3f75405ef0fc8a622fb60061f55e10d9 *man/pphsm.Rd
574eeb4dba4018ecf3027393ee5a3e1d *man/predab.resample.Rd
cb512ac324ec973049721caf280ab695 *man/predict.lrm.Rd
442aa57cc118fe26ac85d70d6be3ef58 *man/predictrms.Rd
73b84cdb57de43f4ca8262b6916119d5 *man/print.Glm.Rd
82c0ffe29e5f8f104dd25c1eadc1d313 *man/print.cph.Rd
a8318fee654b7fec328199af1b50e914 *man/print.impactPO.Rd
abefdcba6ab23d76fdb89fd7bdf75996 *man/print.ols.Rd
ce752e2f2016842f1582adb16adbb72d *man/print.rexVar.Rd
249ddeb10cec98627e96e3c6e5a89863 *man/prmiInfo.Rd
9cc115845e34d4a3154dafe1697405cb *man/processMI.Rd
bbba7eddc752508d9f3b92150136d452 *man/processMI.fit.mult.impute.Rd
8f459e802858e69976b9e0accb9d524d *man/psm.Rd
b1551a9697a5ccf2907dbfe72b2c3d90 *man/recode2integer.Rd
ed0825395204d542d5c0f7185129f1dd *man/residuals.Glm.Rd
8a98acf052c4afb427f11657843e8ca2 *man/residuals.cph.Rd
595407cd3475f74ceb2f6f85688eb1c9 *man/residuals.lrm.Rd
4168120f7d6d40a9b7bf553536685d57 *man/residuals.ols.Rd
349f8f61d78f83bb0067e287c5429a5f *man/rexVar.Rd
9cc491b1573d25d29f204d8021420ac7 *man/rms-internal.Rd
82c739a7126c7eb298dae9960a352891 *man/rms.Rd
8af8468b46ce27ce92e24af106802444 *man/rms.trans.Rd
9032b78dbde6b5982351941f0bdfc5fb *man/rmsMisc.Rd
1c8c30350e443bcf835339fc1f4b49fe *man/robcov.Rd
591c8bd7411ca5ea044e7b06c7e626ca *man/sensuc.Rd
729edbf9d97a12c48b5b62e6c1a0d761 *man/setPb.Rd
05290f6bddf74c426aaaa5b0a505fb4d *man/specs.rms.Rd
623e19d83b740434a2741799174129f1 *man/sub-.Ocens.Rd
c609ba6323a9ebc665c371c6370f3d8c *man/summary.rms.Rd
edb38520a2c2342e749fee1b185b08ce *man/survest.cph.Rd
3e4f1ec21f3ccee004174d24544d5835 *man/survest.psm.Rd
d5f219707c45a642e6a23a24aaa8ea28 *man/survfit.cph.Rd
925b1121cc2f697c95c92a32f77ebd6d *man/survplot.Rd
7b0e5cb68521a374a751166c4e093815 *man/val.prob.Rd
122703dde9259e49be8519c68d36954b *man/val.surv.Rd
a44ce2c1c611ad758ff9bb6054eff359 *man/validate.Rd
ae89a4b05e4a158b4e15156376bcfd61 *man/validate.Rq.Rd
14df852198c15327d6dd1e109343b065 *man/validate.cph.Rd
aa3d78ae51fbeec5afe752ce79881cc8 *man/validate.lrm.Rd
a227454802f07153a0cb64b3c7c0cbf7 *man/validate.ols.Rd
4354f28b31c1b35694d600c35973ef33 *man/validate.rpart.Rd
a4abe6c8b9f87f6e7d2be022b4ec2e1d *man/vif.Rd
9a99f2660179334c51d4700ed6c46add *man/which.influence.Rd
d3766ab6de08fc928c7253c043e013d5 *man/zzzrmsOverview.Rd
c138ed54654c9a4b52d9746acab021a2 *src/init.c
6d55c12f64ce842e6a5fa393619ca26e *src/lrmll.f90
ce0bcaf5e15b9792ebb9f634476f90d7 *src/mlmats.f
292c4346247814ae6a09ca67270ac534 *src/ormll.f90
47bdb0615589c0725e92bd818f2a01e9 *src/ratfor/robcovf.r
8bfb0619dbaca3643f75afbe49e9fcb7 *src/robcovf.f90
rms/R/ 0000755 0001762 0000144 00000000000 14742217656 011272 5 ustar ligges users rms/R/pphsm.s 0000644 0001762 0000144 00000002602 12470210740 012564 0 ustar ligges users pphsm <- function(fit)
{
warning("at present, pphsm does not return the correct covariance matrix")
clas <- class(fit)[1]
if(clas %nin% c('psm', 'survreg'))
stop("fit must be created by psm or survreg")
if(fit$dist %nin% c('exponential','weibull'))
stop("fit must have used dist='weibull' or 'exponential'")
fit$coefficients <- -fit$coefficients/fit$scale
fit$scale.pred <- c("log Relative Hazard","Hazard Ratio")
class(fit) <- c("pphsm", class(fit))
fit
}
print.pphsm <- function(x, digits = max(options()$digits - 4, 3),
correlation = TRUE, ...)
{
if (length(f <- x$fail) && f)
stop(" Survreg failed. No summary provided")
cat("Parametric Survival Model Converted to PH Form\n\n")
stats <- x$stats
stats[3] <- round(stats[3],2)
stats[5] <- round(stats[5],4)
stats[6] <- round(stats[6],2)
print(formatSep(stats),quote=FALSE)
cat("\n")
print(c(x$coef, x$icoef[2]), digits=digits)
correl <- x$correl
if (correlation && !is.null(x$correl)) { ## FEH
p <- dim(correl)[2]
if (p > 1) {
cat("\nCorrelation of Coefficients:\n")
ll <- lower.tri(correl)
correl[ll] <- format(round(correl[ll], digits = digits))
correl[!ll] <- ""
print(correl[-1, -p, drop = FALSE], quote = FALSE)
}
}
cat("\n")
invisible()
}
vcov.pphsm <- function(object, ...) .NotYetImplemented()
rms/R/orm.fit.s 0000644 0001762 0000144 00000043152 14742261056 013032 0 ustar ligges users orm.fit <- function(x=NULL, y,
family=c("logistic","probit","loglog","cloglog","cauchit"),
offset, initial,
opt_method=c('NR', 'LM'),
maxit=30L, eps=5e-4, gradtol=1e-3, abstol=1e10,
minstepsize=1e-2, tol=.Machine$double.eps, trace=FALSE,
penalty.matrix=NULL, weights=NULL, normwt=FALSE,
scale=FALSE, inclpen=TRUE, y.precision = 7,
compstats=TRUE)
{
cal <- match.call()
family <- match.arg(family)
opt_method <- match.arg(opt_method)
n <- length(y)
if(! length(x)) {
p <- 0
xname <- NULL
x <- 0.
}
else {
if(! is.matrix(x)) x <- as.matrix(x)
dx <- dim(x)
p <- dx[2L]
if(dx[1] != n) stop("x and y must have same number of rows")
xname <- dimnames(x)[[2]]
if(! length(xname)) xname <- paste("x[", 1 : p, "]", sep="")
}
len.penmat <- length(penalty.matrix)
penpres <- len.penmat && any(penalty.matrix != 0.)
if(p == 0 && penpres) stop('may not specify penalty.matrix without predictors')
if(penpres && any(dim(penalty.matrix) != p))
stop(paste("penalty.matrix does not have", p, "rows and columns"))
penmat <- if(! penpres) matrix(0e0, nrow=p, ncol=p) else penalty.matrix
## Extreme value type I dist = Gumbel maximum = exp(-exp(-x)) = MASS:::pgumbel
## Gumbel minimum = 1 - exp(-exp(x))
families <- probabilityFamilies
familiesDefined <- names(families)
link <- match(family, familiesDefined, nomatch=0)
if(link == 0)
stop('family must be one of ', paste(familiesDefined, collapse=' '))
fam <- families[[family]]
wtpres <- TRUE
if(! length(weights)) {
wtpres <- FALSE
normwt <- FALSE
weights <- rep(1.0, n)
}
if(length(weights) != n) stop('length of weights must equal length of y')
if(normwt) weights <- weights * n / sum(weights)
initial.there <- ! missing(initial)
if(p > 0 && scale) {
x <- scale(x)
scinfo <- attributes(x)[c('scaled:center', 'scaled:scale')]
xbar <- as.matrix(scinfo[[1]])
xsd <- as.matrix(scinfo[[2]])
# Transform penalty matrix to new scale
trans <- rbind(cbind(1., matrix(0., 1, p)), # 1.: only dealing with middle intercept
cbind(- matrix(xbar / xsd, ncol=1),
diag(1. / as.vector(xsd), ncol=p)))
if(penpres) penmat <- t(trans[-1, -1]) %*% penmat %*% trans[-1, -1]
}
# Prevent y levels from being created because of e.g. changes in the 7th decimal place
w <- recode2integer(y, precision=y.precision)
y <- w$y - 1
ylevels <- w$ylevels
kmid <- max(w$whichmedian - 1L, 1L)
numy <- w$freq
mediany <- w$median
k <- length(ylevels) - 1L
if(k == 1) kmid <- 1
iname <- if(k == 1) "Intercept" else paste("y>=", ylevels[-1L], sep="")
name <- c(iname, xname)
if(missing(offset) || ! length(offset) || (length(offset) == 1 && offset == 0.))
offset <- rep(0., n)
ofpres <- ! all(offset == 0.)
if(ofpres && length(offset) != n) stop("offset and y must have same length")
if(n < 3) stop("must have >=3 non-missing observations")
nv <- p + k
sumwty <- tapply(weights, y, sum)
sumwt <- sum(sumwty)
if(! wtpres && any(numy != sumwty)) stop('program logic error 1')
sumw <- if(normwt) numy else as.integer(round(sumwty))
if(missing(initial)) {
ncum <- rev(cumsum(rev(sumwty)))[2 : (k + 1)]
pp <- ncum / sumwt
initial <- fam$inverse(pp)
if(ofpres) initial <- initial - mean(offset)
initial <- c(initial, rep(0., p))
}
loglik <- -2 * sum(sumwty * log(sumwty / sum(sumwty)))
if(p==0 & ! ofpres) {
z <- ormfit(NULL, y, k, initial=initial[1 : k],
offset=offset, wt=weights,
penmat=penmat, opt_method=opt_method, maxit=maxit,
tolsolve=tol, objtol=eps, gradtol=gradtol, paramtol=abstol,
trace=trace, link=link, iname=iname, xname=xname)
if(z$fail) return(structure(list(fail=TRUE), class="orm"))
kof <- z$coef
loglik <- z$loglik
info <- z$info
}
if(ofpres) {
## Fit model with only intercept(s) and offset
## Check that lrm.fit uses penmat in this context ??
z <- ormfit(NULL, y, k, initial=initial[1 : k],
offset=offset, wt=weights,
penmat=penmat, opt_method=opt_method, maxit=maxit,
tolsolve=tol, objtol=eps, gradtol=gradtol, paramtol=abstol,
trace=trace, link=link, iname=iname, xname=xname)
if(z$fail) return(structure(list(fail=TRUE), class="orm"))
kof <- z$coef
loglik <- c(loglik, z$loglik)
initial <- c(z$coef, rep(0., p))
if(p == 0) info <- z$info
}
if(p > 0) {
# Fit model with intercept(s), offset, covariables
z <- ormfit(x, y, k, initial=initial, offset=offset, wt=weights,
penmat=penmat, opt_method=opt_method,
maxit=maxit, tolsolve=tol, objtol=eps,
gradtol=gradtol, paramtol=abstol,
trace=trace, link=link, iname=iname, xname=xname)
if(z$fail) return(structure(list(fail=TRUE), class="orm"))
loglik <- c(loglik, z$loglik)
kof <- z$coef
info <- z$info
# Compute linear predictor before unscaling beta, as x is scaled
lp <- matxv(x, kof, kint=kmid)
if(scale) {
betas <- kof[- (1 : k)]
kof[1 : k] <- kof[1 : k] - sum(betas * xbar / xsd)
kof[-(1 : k)] <- betas / xsd
xbar <- as.vector(xbar)
names(xbar) <- xname
xsd <- as.vector(xsd)
names(xsd) <- xname
info$scale <- list(mean=xbar, sd=xsd)
}
} else lp <- rep(kof[kmid], n)
# Add second derivative of penalty function if needed, on the original scale
if(! inclpen && penpres)
info$b <- info$b - penalty.matrix
names(kof) <- name
stats <- NULL
if(compstats) {
if(p == 0) {llnull <- loglik[length(loglik)]; model.lr <- 0e0}
else {
llnull <- loglik[length(loglik) - 1L]
model.lr <- llnull - loglik[length(loglik)]
}
model.df <- p
if(initial.there || maxit == 1)
model.p <- score <- score.p <- NA
else {
score <- z$score
if(model.df == 0)
model.p <- score.p <- 1.
else {
model.p <- 1. - pchisq(model.lr, model.df)
score.p <- 1. - pchisq(score, model.df)
}
}
r2 <- 1. - exp(- model.lr / sumwt)
r2.max <- 1. - exp(- llnull / sumwt)
r2 <- r2 / r2.max
r2m <- R2Measures(model.lr, model.df, sumwt, sumwty)
if(k > 1L) attr(lp, 'intercepts') <- kmid
g <- GiniMd(lp)
## compute average |difference| between 0.5 and the condition
## probability of being >= marginal median
pdm <- mean(abs(fam$cumprob(lp) - 0.5))
rho <- if(p == 0) 0e0 else cor(rank(lp), rank(y))
## Somewhat faster:
## rho <- .Fortran('rcorr', cbind(lp, y), as.integer(n), 2L, 2L, r=double(4),
## integer(4), double(n), double(n), double(n), double(n),
## double(n), integer(n), PACKAGE='Hmisc')$r[2]
stats <- c(n, length(numy), mediany, z$dmax, model.lr, model.df,
model.p, score, score.p, rho, r2, r2m, g, exp(g), pdm)
nam <- c("Obs", "Distinct Y", "Median Y", "Max Deriv",
"Model L.R.", "d.f.", "P", "Score", "Score P",
"rho", "R2", names(r2m), "g", "gr", "pdm")
names(stats) <- nam
}
info$iname <- iname
info$xname <- xname
retlist <- list(call = cal,
freq = numy,
yunique = ylevels,
stats = stats,
coefficients = kof,
var = NULL,
u = z$u,
iter = z$iter,
family = family, trans=fam,
deviance = loglik,
non.slopes = k,
interceptRef = kmid,
linear.predictors = lp,
penalty.matrix = if(penpres) penalty.matrix,
weights = if(wtpres) weights,
xbar = if(p > 0 && scale) xbar,
xsd = if(p > 0 && scale) xsd,
info.matrix = info,
fail = FALSE)
class(retlist) <- 'orm'
retlist
}
ormfit <-
function(x, y, k, link, initial,
offset=rep(0., n), wt=rep(1., n), penmat=matrix(0., p, p), opt_method='NR',
maxit=30L, objtol=5e-4, gradtol=1e-3, paramtol=1e10, tolsolve=.Machine$double.eps,
minstepsize=1e-2, trace=FALSE, iname, xname) {
n <- length(y)
p <- length(initial) - k
if(k > 1 && any(diff(initial[1:k]) >= 0))
stop('initial values for intercepts are not in descending order')
storage.mode(x) <- 'double'
storage.mode(y) <- 'integer'
storage.mode(k) <- 'integer'
storage.mode(p) <- 'integer'
storage.mode(initial) <- 'double'
storage.mode(offset) <- 'double'
storage.mode(wt) <- 'double'
storage.mode(penmat) <- 'double'
storage.mode(link) <- 'integer'
rfort <- function(theta, what=3L, debug=0L) {
p <- as.integer(length(theta) - k)
if(debug) {
a <- llist(n, k, p, y, link, what)
s <- sapply(a, storage.mode)
if(any(s != 'integer')) stop(s)
a <- llist(x, offset, wt, penmat, theta[1:k], theta[-(1:k)],
logL=numeric(1))
s <- sapply(a, storage.mode)
if(any(s != 'double')) stop(s)
g <- function(x) if(is.matrix(x)) paste(dim(x), collapse='x') else length(x)
print(sapply(a, g), quote=FALSE)
}
w <- .Fortran(F_ormll, n, k, p, x, y, offset, wt, penmat,
link=link, theta[1:k], theta[-(1:k)],
logL=numeric(1), grad=numeric(k + p),
a=matrix(0e0, k, 2), b=matrix(0e0, p, p), ab=matrix(0e0, k, p),
what=what, debug=as.integer(debug), 1L, salloc=integer(1))
if(w$salloc != 0)
stop('Failed dynamic array allocation in Fortran subroutine ormll: code ', w$salloc)
w
}
if(missing(x) || ! length(x) || p == 0) {
x <- 0.
p <- 0L
}
nv <- k + p
if(length(initial) < nv)
initial <- c(initial, rep(0., nv - length(initial)))
if(trace > 2) prn(initial)
m <- function(x) max(abs(x))
if(maxit == 1) {
w <- rfort(initial)
# Information matrix is negative Hessian on LL scale
info <- list(a =- w$a, b = - w$b, ab = - w$ab,
iname=iname, xname=xname)
res <- list(coefficients = initial,
loglik = w$logL,
info = info,
u = w$grad,
dmax=m(w$grad), score=NA,
iter=1, fail=FALSE, class='orm')
return(res)
}
theta <- initial # Initialize the parameter vector
oldobj <- 1e10
score.test <- NA
gradtol <- gradtol * n / 1e3
# Newton-Raphson MLE with step-halving, initial draft generated by ChatGPT
if(opt_method == 'NR') {
for (iter in 1:maxit) {
w <- rfort(theta)
if(iter == 1) objf <- w$logL
gradient <- w$grad
hess <- infoMxop(w[c('a', 'b', 'ab')])
# Newton-Raphson step
delta <- try(Matrix::solve(hess, gradient, tol=tolsolve))
# Runs amazingly slow if Matrix:: is omitted; prob. not using Matrix
if(inherits(delta, 'try-error')) {
message('singular Hessian matrix')
return(list(fail=TRUE))
}
if(trace > 0)
cat('Iteration:', iter, ' -2LL:', format(objf, nsmall=4),
' Max |gradient|:', m(gradient),
' Max |change in parameters|:', m(delta), '\n', sep='')
if(opt_method == 'NR' && is.na(score.test) && p > 0 &&
all(theta[- (1 : k)] == 0.))
score.test <- - gradient %*% delta
step_size <- 1.0 # Initialize step size for step-halving
# Step-halving loop
while (TRUE) {
new_theta <- theta - step_size * delta # Update parameter vector
objfnew <- rfort(new_theta, what=1L)$logL
if(trace > 1)
cat('Old, new, old - new -2 LL:', objf, objfnew, objf - objfnew, '\n')
if (! is.finite(objfnew) || objfnew > objf + 1e-6) {
# Objective function failed to be reduced or is infinite
step_size <- step_size / 2e0 # Reduce the step size
if(trace > 0) cat('Step size reduced to', step_size, '\n')
if(step_size < minstepsize) {
message('Step size ', step_size, ' has reduced below minstepsize=',
minstepsize,
' without improving log likelihood; fitting stopped')
return(list(fail=TRUE))
}
} else {
theta <- new_theta # Accept the new parameter vector
oldobj <- objf
objf <- objfnew
if(trace > 2) prn(theta)
break
}
}
# Convergence check - must meet 3 criteria
if((objf <= oldobj + 1e-6 && (oldobj - objf < objtol)) &&
(m(gradient) < gradtol) &&
(m(delta) < paramtol)) {
# Compute final information matrix (in 3 parts) since not computed
# since Newton-Raphson updating
w <- rfort(theta)
info <- list(a =- w$a, b = - w$b, ab = - w$ab,
iname=iname, xname=xname)
return(list(coef = theta,
loglik = w$logL,
u = w$grad,
info = info,
objchange = oldobj - w$logL,
dmax = m(w$grad),
maxparamchange = m(delta),
score = score.test,
iter = iter,
fail = FALSE) )
}
}
msg <- paste('Reached', maxit, 'iterations without convergence\nChange in -2LL:',
oldobj -objf, ' Max |gradient|:', m(gradient),
' Max |change in parameters|:', m(delta))
message(msg)
return(list(fail=TRUE))
} else { # L-M
lambda <- 1e-3 # hard-wired for L-M
oldobj <- 1e10
objf <- NA # needed in case no H_damped is ever positive definite
w <- rfort(theta)
gradient <- w$grad
H <- infoMxop(w[c('a', 'b', 'ab')])
for (iter in 1:maxit) {
H_damped <- H + lambda * Matrix::Diagonal(x = Matrix::diag(H))
delta <- try(Matrix::solve(H_damped, gradient, tol=tolsolve))
if(inherits(delta, 'try-error')) {
# Increase lambda if Hessian is ill-conditioned
lambda <- lambda * 10.
next
}
theta_new <- theta - delta
objf <- rfort(theta_new, what=1L)$logL
if(trace > 0)
cat('Iteration:', iter, ' -2LL:', format(objf, nsmall=4),
' Max |gradient|:', m(gradient),
' Max |change in parameters|:', m(delta), '\n', sep='')
if(trace > 1)
cat('Old, new, old - new -2 LL:', oldobj, objf, oldobj - objf, '\n')
if(is.finite(objf) &&
(objf <= oldobj + 1e-6 && (oldobj - objf < objtol)) &&
(m(gradient) < gradtol) &&
(m(delta) < paramtol)) break
if(is.finite(objf) && (objf < oldobj)) {
# Accept the step and decrease lambda
theta <- theta_new
oldobj <- objf
w <- rfort(theta)
gradient <- w$grad
H <- infoMxop(w[c('a', 'b', 'ab')])
lambda <- lambda / 10.
} else {
# Reject the step and increase lambda
lambda <- lambda * 10.
}
}
if(iter == maxit) {
msg <- paste('Reached', maxit, 'iterations without convergence\n-2LL:',
objf, ' Max |gradient|:', m(gradient))
message(msg)
return(list(fail=TRUE))
}
w <- rfort(theta)
info <- list(a =- w$a, b = - w$b, ab = - w$ab,
iname=iname, xname=xname)
return(list(coef = theta,
loglik = w$logL,
u = w$grad,
info = info,
objchange = objf - w$logL,
dmax = m(w$grad),
maxparamchange = m(delta),
score = NA,
iter = iter,
fail = FALSE) )
} # End M-L
}
## Note: deriv and deriv2 below are no longer used as are hard-coded into ormll
## Extreme value type I dist = Gumbel maximum = exp(-exp(-x)) = MASS:::pgumbel
## Gumbel minimum = 1 - exp(-exp(x))
probabilityFamilies <-
list(logistic =
list(cumprob=plogis,
inverse=qlogis,
deriv =function(x, f) f * (1 - f),
deriv2 =function(x, f, deriv) f * (1 - 3*f + 2*f*f) ),
probit =
list(cumprob=pnorm,
inverse=qnorm,
deriv =function(x, ...) dnorm(x),
deriv2 =function(x, f, deriv) - deriv * x),
loglog =
list(cumprob=function(x) exp(-exp(-x)),
inverse=function(x) -log(-log(x )),
deriv =function(x, ...) exp(-x - exp(-x)),
deriv2 =function(x, ...)
ifelse(abs(x) > 200, 0,
exp(-x - exp(-x)) * (-1 + exp(-x)))),
cloglog =
list(cumprob=function(x) 1 - exp(-exp(x)),
inverse=function(x) log(-log(1 - x)),
deriv =function(x, ...) exp( x - exp( x)),
deriv2 =function(x, f, deriv)
ifelse(abs(x) > 200, 0, deriv * ( 1 - exp( x)))),
cauchit =
list(cumprob=pcauchy, inverse=qcauchy,
deriv =function(x, ...) dcauchy(x),
deriv2 =function(x, ...) -2 * x * ((1 + x*x)^(-2)) / pi)
)
## Check:
## P(x) = plogis(x); P'(x) = P(x) - P(x)^2
## d <- function(x) plogis(x) - 3*plogis(x)^2 + 2*plogis(x)^3
## x <- seq(-3, 3, length=150)
## plot(x, d(x), type='l')
## ad <- c(NA,diff(dlogis(x))/(x[2]-x[1]))
## lines(x, ad, col='red')
rms/R/contrast.s 0000644 0001762 0000144 00000037366 14742044435 013323 0 ustar ligges users contrast <- function(fit, ...) UseMethod("contrast")
contrast.rms <-
function(fit, a, b, a2, b2, ycut=NULL, cnames=NULL, fun=NULL, funint=TRUE,
type=c('individual','average','joint'),
conf.type=c('individual','simultaneous','profile'), usebootcoef=TRUE,
boot.type=c('percentile','bca','basic'),
posterior.summary=c('mean', 'median', 'mode'),
weights='equal', conf.int=0.95, tol=1e-7, expand=TRUE, se_factor=4,
plot_profile=FALSE, ...)
{
type <- match.arg(type)
conf.type <- match.arg(conf.type)
boot.type <- match.arg(boot.type)
posterior.summary <- match.arg(posterior.summary)
draws <- fit$draws
bayes <- length(draws) > 0
if(conf.type == 'profile' && type != 'individual')
stop('conf.type=profile only works with type=individual')
if(bayes & (type == 'joint' || conf.type == 'simultaneous'))
stop('type=joint or conf.type=simultaneous not allowed for Bayesian models')
zcrit <- if(length(idf <- fit$df.residual)) qt((1 + conf.int) / 2, idf) else
qnorm((1 + conf.int) / 2)
bcoef <- if(usebootcoef) fit$boot.Coef
pmode <- function(x) {
dens <- density(x)
dens$x[which.max(dens$y)[1]]
}
if(! bayes) {
betas <- coef(fit)
iparm <- 1 : length(betas)
}
fite <- fit
if(inherits(fit, 'orm') || inherits(fit, 'lrm')) {
nrp <- 1
## Note: is 1 for orm because vcov defaults to intercepts='mid' and
## we are overriding the default vcov uses for lrm
fit$override_vcov_intercept <- 'mid'
iparm <- c(fit$interceptRef, (num.intercepts(fit) + 1) : length(betas))
betas <- betas[iparm]
fite$coefficients <- betas # for simult confint
if(usebootcoef) bcoef <- bcoef[, iparm, drop=FALSE]
} else nrp <- num.intercepts(fit, 'var')
if(length(bcoef) && conf.type != 'simultaneous')
conf.type <- switch(boot.type,
percentile = 'bootstrap nonparametric percentile',
bca = 'bootstrap BCa',
basic = 'basic bootstrap')
partialpo <- inherits(fit, 'blrm') && fit$pppo > 0
if(partialpo & ! length(ycut))
stop('must specify ycut for partial prop. odds model')
cppo <- fit$cppo
if(partialpo && ! length(cppo))
stop('only implemented for constrained partial PO models')
pred <- function(d) {
## predict.blrm duplicates rows of design matrix for partial PO models
## if ycut has length > 1 and only one observation is being predicted
if(partialpo) predict(fit, d, type='x', ycut=ycut)
else
predict(fit, d, type='x')
}
da <- do.call('gendata', list(fit, factors=a, expand=expand))
xa <- pred(da)
if(! missing(b)) {
db <- do.call('gendata', list(fit, factors=b, expand=expand))
xb <- pred(db)
}
ma <- nrow(xa)
if(missing(b)) {
xb <- 0 * xa
db <- da
}
mb <- nrow(xb)
if(! missing(a2)) {
if(missing(b) || missing(b2)) stop('b and b2 must be given if a2 is given')
da2 <- do.call('gendata', list(fit, factors=a2, expand=expand))
xa2 <- pred(da2)
ma2 <- nrow(xa2)
db2 <- do.call('gendata', list(fit, factors=b2, expand=expand))
xb2 <- pred(db2)
mb2 <- nrow(xb2)
}
allsame <- function(x) diff(range(x)) == 0
vary <- NULL
mall <- c(ma, mb)
ncols <- c(ncol(da), ncol(db))
if(! missing(a2)) {
mall <- c(mall, ma2, mb2)
ncols <- c(ncols, ncol(da2), ncol(db2))
}
if(allsame(mall) && ! allsame(ncols)) stop('program logic error')
if(any(sort(names(da)) != sort(names(db))))
stop('program logic error')
if(! missing(a2) && (any(sort(names(da)) != sort(names(da2))) ||
any(sort(names(da)) != sort(names(db2)))))
stop('program logic error')
if(type != 'average' && ! length(cnames)) {
## If all lists have same length, label contrasts by any variable
## that has the same length and values in all lists
k <- integer(0)
nam <- names(da)
for(j in 1 : length(da)) {
w <- nam[j]
eq <- all(as.character(da[[w]]) == as.character(db[[w]]))
if(! missing(a2))
eq <- eq & all(as.character(da[[w]]) == as.character(da2[[w]])) &
all(as.character(db[[w]]) == as.character(db2[[w]]))
## was da[[2]] 2023-09-07
if(eq) k <- c(k, j)
}
if(length(k)) vary <- da[k]
} else if(max(mall) > 1) {
## Label contrasts by values of longest variable in list if
## it has the same length as the expanded design matrix
d <- if(ma > 1) a else b
if(! missing(a2) && (max(ma2, mb2) > max(ma, mb)))
d <- if(ma2 > 1) a2 else b2
l <- sapply(d, length)
vary <- if(sum(l == max(mall)) == 1) d[l == max(mall)]
}
if(sum(mall > 1) > 1 && ! allsame(mall[mall > 1]))
stop('lists of settings with more than one row must all have the same # rows')
mm <- max(mall)
if(mm > 1 && any(mall == 1)) {
if(ma == 1) xa <- matrix(xa, nrow=mm, ncol=ncol(xa), byrow=TRUE)
if(mb == 1) xb <- matrix(xb, nrow=mm, ncol=ncol(xb), byrow=TRUE)
if(! missing(a2)) {
if(ma2 == 1) xa2 <- matrix(xa2, nrow=mm, ncol=ncol(xa2), byrow=TRUE)
if(mb2 == 1) xb2 <- matrix(xb2, nrow=mm, ncol=ncol(xb2), byrow=TRUE)
}
}
if(bayes && length(fun) && inherits(fit, 'blrm')) {
if(! missing(a2)) stop('fun= is only implemented for blrm fits')
if(missing(b)) stop('b must be specified when fun= is given')
if(!missing(ycut)) stop('ycut not used with fun=')
pa <- predict(fit, da, fun=fun, funint=funint, posterior.summary='all')
pb <- predict(fit, db, fun=fun, funint=funint, posterior.summary='all')
if(length(cnames)) colnames(pa) <- colnames(pb) <- cnames
# If fun has an intercepts argument, the intecept vector must be
# updated for each draw
if(! length(cnames))
cnames <- if(length(vary)) rep('', ncol(pa)) else
as.character(1 : ncol(pa))
colnames(pa) <- colnames(pb) <- cnames
res <- list(esta=pa, estb=pb,
Xa=xa, Xb=xb,
nvary=length(vary))
return(structure(res, class='contrast.rms'))
} # end if bayes & length(fun) ...
X <- xa - xb
if(! missing(a2)) X <- X - (xa2 - xb2)
m <- nrow(X)
if(nrp > 0) X <- cbind(matrix(0., nrow=m, ncol=nrp), X)
if(is.character(weights)) {
if(weights != 'equal') stop('weights must be "equal" or a numeric vector')
weights <- rep(1, m)
} else if(length(weights) > 1 && type != 'average')
stop('can specify more than one weight only for type="average"')
else if(length(weights) != m) stop(paste('there must be', m, 'weights'))
weights <- as.vector(weights)
if(m > 1 && type=='average')
X <- matrix(apply(weights*X, 2, sum) / sum(weights), nrow=1,
dimnames=list(NULL, dimnames(X)[[2]]))
cdraws <- NULL
if(bayes) {
cdraws <- draws %*% t(X)
if(length(cnames)) colnames(cdraws) <- cnames
v <- var(cdraws)
ndf <- if(is.matrix(v)) nrow(v) else 1
ci <- apply(cdraws, 2, rmsb::HPDint, prob=conf.int)
lower <- ci[1, ]
upper <- ci[2, ]
PP <- apply(cdraws, 2, function(u) mean(u > 0))
se <- apply(cdraws, 2, sd)
est <- switch(posterior.summary,
mode = apply(cdraws, 2, pmode),
mean = colMeans(cdraws),
median = apply(cdraws, 2, median))
P <- Z <- NULL
}
else {
est <- matxv(X, betas)
v <- X %*% vcov(fit, regcoef.only=TRUE) %*% t(X)
## vcov(lrm fit) has an override to use middle intercept - see above
ndf <- if(is.matrix(v)) nrow(v) else 1
se <- as.vector(if(ndf == 1) sqrt(v) else sqrt(Matrix::diag(v)))
Z <- est / se
P <- if(length(idf)) 2 * pt(- abs(Z), idf) else 2 * pnorm(- abs(Z))
if(conf.type != 'simultaneous') {
if(length(bcoef)) {
best <- t(matxv(X, bcoef, bmat=TRUE))
lim <- bootBCa(est, best, type=boot.type, n=nobs(fit), seed=fit$seed,
conf.int=conf.int)
if(is.matrix(lim)) {
lower <- lim[1,]
upper <- lim[2,]
} else {
lower <- lim[1]
upper <- lim[2]
}
} else if(conf.type == 'profile') {
w <- rms_profile_ci(X, fit, conf.int, est, se, plot_profile=plot_profile,
se_factor=se_factor, ...)
lower <- w$lower
upper <- w$upper
LR <- w$LR
P <- w$P
} else {
lower <- est - zcrit*se
upper <- est + zcrit*se
}
} else {
# glht uses vcov(fite) which for lrm & orm are sparse Matrix objects
fite$non.slopes <- 1L
fite$interceptRef <- 1L
if(! length(fite$var))
fite$var <- Matrix::as.matrix(infoMxop(fite$info.matrix, i=iparm))
u <- confint(multcomp::glht(fite, X,
df=if(length(idf)) idf else 0),
level=conf.int)$confint
lower <- u[,'lwr']
upper <- u[,'upr']
}
PP <- NULL; posterior.summary=''
}
if(type != 'average' && length(ycut))
cnames <- paste0(cnames, ' ', fit$yname, '=', ycut)
res <- list(Contrast=est, SE=se,
Lower=lower, Upper=upper,
Z=Z, Pvalue=P, PP=PP,
var=v, df.residual=idf,
X=X, ycut=ycut, yname=if(length(ycut)) fit$yname,
cnames=if(type=='average') NULL else cnames,
nvary=length(vary),
conf.type=conf.type, conf.int=conf.int,
posterior.summary=posterior.summary,
cdraws = cdraws)
if(conf.type == 'profile') res$LR <- LR
if(type != 'average') res <- c(vary, res)
r <- qr(v, tol=tol)
nonred <- r$pivot[1 : r$rank] # non-redundant contrasts
redundant <- (1 : length(est)) %nin% nonred
res$redundant <- redundant
if(type=='joint') {
est <- est[! redundant]
v <- v[! redundant, ! redundant, drop=FALSE]
res$jointstat <- as.vector(est %*% solve(v, est, tol=tol))
}
structure(res, class='contrast.rms')
}
print.contrast.rms <- function(x, X=FALSE, fun=function(u) u,
jointonly=FALSE, prob=0.95, ...)
{
# See if a result of fun= on a Bayesian fit
if('esta' %in% names(x)) {
esta <- x$esta
estb <- x$estb
f <- function(x) {
hpd <- rmsb::HPDint(x, prob)
r <- c(mean(x), median(x), hpd)
names(r) <- c('Posterior Mean', 'Posterior Median',
paste(c('Lower', 'Upper'), prob, 'HPD'))
r
}
cat('\nPosterior Summaries for First X Settings\n\n')
print(t(apply(esta, 2, f)))
cat('\nPosterior Summaries for Second X Settings\n\n')
print(t(apply(estb, 2, f)))
cat('\nPosterior Summaries of First - Second\n\n')
print(t(apply(esta - estb, 2, f)))
return(invisible())
}
edf <- x$df.residual
sn <- if(length(edf)) 't' else if(x$conf.type == 'profile') '\u03A7\u00B2' else 'Z'
pn <- if(length(edf)) 'Pr(>|t|)' else if(x$conf.type == 'profile') 'Pr(>\u03A7\u00B2)' else 'Pr(>|z|)'
if(length(x$LR)) {
x$Z <- x$LR
x$LR <- NULL
}
w <- x[1 : (x$nvary + 7)]
isn <- sapply(w, is.null)
w <- w[! isn]
if(length(w$Z)) w$Z <- round(w$Z, 2)
if(length(w$Pvalue)) w$Pvalue <- round(w$Pvalue, 4)
if(length(w$PP)) w$PP <- round(w$PP, 4)
if(length(w$PP)) pn <- 'Pr(Contrast>0)'
no <- names(w)
no[no=='SE'] <- 'S.E.'
no[no=='Z'] <- sn
no[no %in% c('Pvalue', 'PP')] <- pn
cnames <- x$cnames
if(! length(cnames))
cnames <- if(x$nvary) rep('', length(x[[1]])) else
as.character(1 : length(x[[1]]))
if(any(x$redundant)) cnames <- paste(ifelse(x$redundant, '*', ' '), cnames)
w <- data.frame(w, row.names=paste(format(1:length(cnames)), cnames, sep=''))
if(length(x$y)) {
w$.y. <- x$y
names(w)[names(w) == '.y.'] <- x$yname
}
w$Contrast <- fun(w$Contrast)
if(! all(1:10 == fun(1:10))) w$SE <- rep(NA, length(w$SE))
w$Lower <- fun(w$Lower)
w$Upper <- fun(w$Upper)
# Assign modified names to w
names(w) <- no
if(x$conf.type == 'profile') w$S.E. <- NULL
if(length(w$S.E.) && all(is.na(w$S.E.))) w$S.E. <- NULL
# Print w
if(! jointonly) {
## print(as.matrix(w), quote=FALSE)
print(w, ...)
if(any(x$redundant)) cat('\nRedundant contrasts are denoted by *\n')
}
jstat <- x$jointstat
if(length(jstat)) {
cat('\nJoint test for all contrasts=0:\n\n')
ndf <- sum(!x$redundant)
if(length(edf)) {
Fstat <- jstat / ndf
Pval <- 1 - pf(Fstat, ndf, edf)
cat('F(', ndf, ',', edf, ')=', round(Fstat,3),', P=', round(Pval,4),
'\n', sep='')
} else {
Pval <- 1 - pchisq(jstat, ndf)
cat('Chi-square=', round(jstat, 2),' with ', ndf, ' d.f. P=',
round(Pval, 4),'\n', sep='')
}
}
if(!jointonly && length(edf)) cat('\nError d.f.=',edf,'\n')
cotype <- if(x$conf.type == 'profile') 'profile likelihood' else x$conf.type
if(x$posterior.summary == '')
cat('\nConfidence intervals are', x$conf.int, cotype,
'intervals\n')
else {
cat('\nIntervals are', x$conf.int, 'highest posterior density intervals\n')
cat('Contrast is the posterior', x$posterior.summary, '\n')
}
if(X) {
cat('\nDesign Matrix for Contrasts\n\n')
if(is.matrix(x$X)) dimnames(x$X) <- list(cnames, dimnames(x$X)[[2]])
print(x$X)
}
invisible()
}
rms_profile_ci <-
function(C, fit, conf.int, est_C, se_C, se_factor=4e0,
plot_profile=FALSE, ...) {
# Separate likelihood profile confidence intervals for contrasts in
# each row of C. est_C is estimated contrast, se_C is its standard error
if(any(c('x', 'y') %nin% names(fit)))
stop('to use profile likelihood you must specify x=TRUE, y=TRUE when fitting')
X <- fit[['x']]
crit <- qchisq(conf.int, 1)
p <- ncol(C)
m <- nrow(C)
if(p == (1 + length(fit$coefficients) - num.intercepts(fit))) C <- C[, -1, drop=FALSE]
lower <- upper <- LR <- numeric(m)
odev <- getDeviance(fit) # original deviance for full model
odev <- odev[length(odev)]
g <- function(theta) {
dev <- quickRefit(fit, X=Z[, -1, drop=FALSE], offset=theta * Z[, 1],
what='deviance', ...)
if(is.list(dev) && length(dev$fail) && dev$fail) {
message('Fit failed in profile likelihood. theta=', format(theta), ' S.E.=', format(se),
' range of offsets:', paste(format(range(theta * Z[, 1])), collapse=', '))
return(NA)
}
dev - odev - crit
}
p <- ncol(C)
for(i in 1 : m) {
D <- C[i, , drop=FALSE]
est <- est_C[i]
se <- se_C[i]
v <- svd(rbind(D, diag(p)))$v
u <- sqrt(sum(D ^ 2))
beta_contrast <- v[, 1] * u
# SVD has an arbitrary sign
if(max(abs(D - beta_contrast)) > 1e-6) {
v <- -v
beta_contrast <- v[, 1] * u
if(max(abs(D - beta_contrast)) > 1e-6)
stop('SVD-generated contrast could not reproduce original contrast')
}
# Compute contrast to put on design matrix that gives the above contrast in betas
v <- v / u
Z <- X %*% v
# Likelihood ratio chi-square obtained by removing first column of Z
drop1 <- quickRefit(fit, X=Z[, -1, drop=FALSE], what='deviance', ...)
drop1 <- drop1[length(drop1)]
LR[i] <- drop1 - odev
if(plot_profile) {
thetas <- seq(est - se_factor * se, est + se_factor * se, length=50)
ch_deviance <- rep(NA, length(thetas))
for(j in 1 : length(thetas)) ch_deviance[j] <- g(thetas[j])
plot(thetas, ch_deviance, xlab='Contrast Estimate',
ylab='Change in Deviance From Full Model')
abline(v=c(est - se, est, est + se), col='blue')
title(paste('Contrast', i))
title(sub='Vertical lines are at point estimate of contrast \u00b1 S.E.', adj=1, cex.sub=0.65)
}
hi <- try(uniroot(g, c(est + se/100, est + se_factor * se))$root)
if(inherits(hi, 'try-error')) hi <- Inf
lo <- try(uniroot(g, c(est - se_factor * se, est - se/100))$root)
if(inherits(lo, 'try-error')) lo <- -Inf
lower[i] <- lo
upper[i] <- hi
}
list(lower=lower, upper=upper, LR=LR, P=1. - pchisq(LR, 1))
}
rms/R/survplot.npsurv.s 0000644 0001762 0000144 00000040367 13564131232 014704 0 ustar ligges users survplot.npsurv <-
function(fit, xlim,
ylim, xlab, ylab, time.inc, state=NULL,
conf=c("bands", "bars", "diffbands", "none"), mylim=NULL,
add=FALSE, label.curves=TRUE, abbrev.label=FALSE, levels.only=FALSE,
lty, lwd=par('lwd'),
col=1, col.fill=gray(seq(.95, .75, length=5)),
loglog=FALSE, fun, n.risk=FALSE, aehaz=FALSE, times=NULL,
logt=FALSE, dots=FALSE, dotsize=.003, grid=NULL,
srt.n.risk=0, sep.n.risk=.056, adj.n.risk=1,
y.n.risk, cex.n.risk=.6, cex.xlab=par('cex.lab'), cex.ylab=cex.xlab,
pr=FALSE, ...) {
conf <- match.arg(conf)
polyg <- ordGridFun(grid=FALSE)$polygon
conf.int <- fit$conf.int
if(!length(conf.int) | conf == "none") conf.int <- 0
opar <- par(c('mar', 'xpd'))
on.exit(par(opar))
cylim <- function(ylim)
if(length(mylim)) c(min(ylim[1], mylim[1]), max(ylim[2], mylim[2]))
else ylim
fit.orig <- fit
units <- fit$units
if(!length(units)) units <- "Day"
maxtime <- fit$maxtime
if(! length(maxtime)) maxtime <- max(fit$time)
mintime <- min(fit$time, 0)
pret <- pretty(c(mintime, maxtime))
maxtime <- max(pret)
mintime <- min(pret)
if(missing(time.inc)) {
time.inc <- switch(units, Day=30, Month=1, Year=1,
(maxtime - mintime) / 10)
if(time.inc > maxtime) time.inc <- (maxtime - mintime) / 10
}
if(n.risk && ! length(fit$n.risk)) {
n.risk <- FALSE
warning("fit does not have number at risk\nIs probably from a parametric model\nn.risk set to FALSE")
}
mstate <- inherits(fit, 'survfitms')
if(mstate) {
## Multi-state model for competing risks
if(missing(fun)) fun <- function(y) 1 - y
if(missing(state)) stop('state must be given when response is a multi-state/competing risk object from Surv()')
if(length(state) != 1) stop('at present state can only be a single state')
states <- fit$states
if(state %nin% states) stop(paste('state is not in',
paste(states, collapse=', ')))
}
trans <- loglog || mstate || ! missing(fun)
if(missing(ylab))
ylab <-
if(loglog) "log(-log Survival Probability)"
else if(mstate) paste('Cumulative Incidence of', upFirst(state))
else if(trans) ""
else "Survival Probability"
if(loglog) fun <- function(y) logb(-logb(ifelse(y == 0 | y == 1, NA, y)))
else if(! trans) fun <- function(y) y
if(missing(xlab))
xlab <- if(logt) paste("log Follow-up Time in ", units, "s", sep="")
else labelPlotmath('Follow-up Time', paste(fit$units, 's', sep=''))
## else labelPlotmath(fit$time.label, fit$units)
if(missing(xlim))
xlim <- if(logt) logb(c(maxtime / 100, maxtime)) else c(mintime, maxtime)
convert <- if(mstate) {
istate <- match(state, states)
conv <- function(f, istate) {
f$surv <- 1 - f$pstate [, istate]
f$lower <- 1 - f$lower [, istate]
f$upper <- 1 - f$upper [, istate]
f$std.err <- f$std.err[, istate]
icens <- which(states == '(s0)')
if(! length(icens))
stop('Program logic error: did not find (s0) column with competing risks')
f$n.risk <- f$n.risk[, icens]
if(all(f$n.risk == 0))
stop('program logic error: all n.risk are zero')
f
}
formals(conv) <- list(f=NULL, istate=istate)
conv } else function(f) f
fit <- convert(fit)
origsurv <- fit$surv
if(trans) {
fit$surv <- fun(fit$surv)
fit$surv[is.infinite(fit$surv)] <- NA
## handle e.g. logit(1) - Inf would mess up ylim in plot()
if(conf.int > 0) {
fit$lower <- fun(fit$lower)
fit$upper <- fun(fit$upper)
fit$lower[is.infinite(fit$lower)] <- NA
fit$upper[is.infinite(fit$upper)] <- NA
if(missing(ylim))
ylim <- cylim(range(c(fit$lower, fit$upper), na.rm=TRUE))
}
else if(missing(ylim)) ylim <- cylim(range(fit$surv, na.rm=TRUE))
}
else if(missing(ylim)) ylim <- c(0, 1)
if(length(grid)) {
dots <- FALSE
if(is.logical(grid)) grid <- if(grid) gray(.8) else NULL
}
if(logt | trans) { dots <- FALSE; grid <- NULL }
olev <- slev <- names(fit$strata)
if(levels.only) slev <- gsub('.*=', '', slev)
sleva <- if(abbrev.label) abbreviate(slev) else slev
ns <- length(slev)
slevp <- ns > 0
labelc <- is.list(label.curves) || label.curves
if(!slevp) labelc <- FALSE
ns <- max(ns, 1)
y <- 1 : ns
stemp <- if(ns == 1) rep(1, length(fit$time))
else rep(1:ns, fit$strata)
if(n.risk | (conf.int > 0 & conf == "bars")) {
stime <- seq(mintime, maxtime, time.inc)
v <- convert(summary(fit.orig, times=stime, print.it=FALSE))
v$surv <- fun(v$surv)
v$lower <- fun(v$lower)
v$upper <- fun(v$upper)
vs <- if(ns > 1) as.character(v$strata)
## survival:::summary.survfit was not preserving order of strata levels
}
xd <- xlim[2] - xlim[1]
yd <- ylim[2] - ylim[1]
if(n.risk && !add) {
mar <- opar$mar
if(mar[4] < 4) {mar[4] <- mar[4] + 2; par(mar=mar)}
}
## One curve for each value of y, excl style used for C.L.
lty <- if(missing(lty)) seq(ns + 1)[-2] else rep(lty, length=ns)
lwd <- rep(lwd, length=ns)
col <- rep(col, length=ns)
if(conf == 'diffbands' && ns < 2) conf <- 'bands'
if(labelc || conf %in% c('bands', 'diffbands')) curves <- vector('list', ns)
Tim <- Srv <- list()
par(xpd=NA)
nevents <- totaltime <- numeric(ns)
cuminc <- character(ns)
for(i in 1 : ns) {
st <- stemp == i
time <- fit$time[st]
surv <- fit$surv[st]
osurv <- origsurv[st]
## nevents[i] <- sum(fit$n.event[st])
## nrsk <- fit$n.risk[st]
## neachtime <- c(- diff(nrsk), min(nrsk))
## totaltime[i] <- sum(neachtime * time)
nevents[i] <- if(mstate) {
if(ns == 1) fit$numevents[, state]
else fit$numevents[olev[i], state]
} else {
if(ns == 1) fit$numevents else fit$numevents[olev[i]]
}
totaltime[i] <- if(ns == 1) fit$exposure else fit$exposure[olev[i]]
if(length(times)) {
cumi <- 1. - approx(time, osurv, xout=times, method='constant')$y
noun <- units %in% c('', ' ')
cuminc[i] <- paste('Cum. inc.@ ',
if(noun) 't=',
paste(times, collapse=','),
if(! noun) paste(' ', units, sep=''),
':', paste(round(cumi, 3), collapse=','),
sep='')
}
if(logt) time <- logb(time)
s <- !is.na(time) & (time >= xlim[1])
if(i==1 & !add) {
plot(time, surv, xlab='', xlim=xlim,
ylab='', ylim=ylim, type="n", axes=FALSE)
mgp.axis(1, at=if(logt) pretty(xlim) else
seq(xlim[1], max(pretty(xlim)), time.inc),
labels=TRUE, axistitle=xlab, cex.lab=cex.xlab)
mgp.axis(2, at=pretty(ylim), axistitle=ylab, cex.lab=cex.ylab)
if(dots || length(grid)) {
xlm <- pretty(xlim)
xlm <- c(xlm[1], xlm[length(xlm)])
xp <- seq(xlm[1], xlm[2], by=time.inc)
yd <- ylim[2] - ylim[1]
if(yd <= .1) yi <- .01
else if(yd <=.2 ) yi <- .025
else if(yd <=.4 ) yi <- .05
else yi <- .1
yp <- seq(ylim[2],
ylim[1] + if(n.risk && missing(y.n.risk)) yi else 0,
by = -yi)
if(dots)
for(tt in xp)
symbols(rep(tt, length(yp)), yp,
circles=rep(dotsize, length(yp)),
inches=dotsize, add=TRUE)
else abline(h=yp, v=xp, col=grid, xpd=FALSE)
}
}
tim <- time[s]; srv <- surv[s]
if(conf.int > 0 && conf == "bands") {
blower <- fit$lower[st][s]
bupper <- fit$upper[st][s]
}
##don't let step function go beyond x-axis -
##this cuts it off but allows step to proceed axis end
if(max(tim) > xlim[2]) {
srvl <- srv[tim <= xlim[2] + 1e-6]
## s.last <- min(srv[tim<=xlim[2]+1e-6]) #not work w/fun
s.last <- srvl[length(srvl)]
k <- tim < xlim[2]
tim <- c(tim[k], xlim[2])
srv <- c(srv[k], s.last)
if(conf.int > 0 && conf == "bands") {
low.last <- blower[time <= xlim[2] + 1e-6]
low.last <- low.last[length(low.last)]
up.last <- bupper[time <= xlim[2] + 1e-6]
up.last <- up.last[length(up.last)]
blower <- c(blower[k], low.last)
bupper <- c(bupper[k], up.last)
}
}
if(logt) {
if(conf %nin% c('bands', 'diffbands'))
lines(tim, srv, type="s", lty=lty[i], col=col[i], lwd=lwd[i])
if(labelc || conf %in% c('bands', 'diffbands'))
curves[[i]] <- list(tim, srv)
}
else {
xxx <- c(mintime, tim)
yyy <- c(fun(1), srv)
if(conf %nin% c('bands', 'diffbands'))
lines(xxx, yyy, type="s", lty=lty[i], col=col[i], lwd=lwd[i])
if(labelc || conf %in% c('bands', 'diffbands'))
curves[[i]] <- list(xxx, yyy)
}
if(pr) {
zest <- rbind(time[s], surv[s])
dimnames(zest) <- list(c("Time", "Survival"),
rep("", sum(s)))
if(slevp)cat("\nEstimates for ", slev[i], "\n\n")
print(zest, digits=3)
}
if(conf.int > 0) {
if(conf == 'bands') {
if(logt)
polyg(x = c(tim, max(tim), rev(tim)),
y = c(blower, rev(bupper), max(bupper)),
col = col.fill[i], type='s')
else
polyg(x = c(max(tim), tim, rev(c(tim, max(tim)))),
y = c(fun(1), blower, rev(c(fun(1), bupper))),
col = col.fill[i], type = "s")
}
else if(conf == 'diffbands')
survdiffplot(fit.orig, conf=conf, fun=fun, convert=convert, xlim=xlim)
else {
j <- if(ns == 1) TRUE else vs == olev[i]
tt <- v$time[j] #may not get predictions at all t
ss <- v$surv[j]
lower <- v$lower[j]
upper <- v$upper[j]
if(logt) tt <- logb(ifelse(tt == 0, NA, tt))
tt <- tt + xd * (i - 1) * .01
errbar(tt, ss, upper, lower, add=TRUE, lty=lty[i],
col=col[i])
}
}
if(n.risk) {
j <- if(ns == 1) TRUE else vs == olev[i]
tt <- v$time[j]
nrisk <- v$n.risk[j]
tt[1] <- xlim[1] #was xd*.015, .030, .035
if(missing(y.n.risk)) y.n.risk <- ylim[1]
if(y.n.risk == 'auto') y.n.risk <- - diff(ylim) / 3
yy <- y.n.risk + yd * (ns - i) * sep.n.risk
nri <- nrisk
nri[tt > xlim[2]] <- NA
text(tt[1], yy, nri[1], cex=cex.n.risk,
adj=adj.n.risk, srt=srt.n.risk)
if (length(nri) > 1)
text(tt[-1], yy, nri[-1], cex=cex.n.risk, adj=1)
if(slevp) text(xlim[2] + xd * .025,
yy, adj=0, sleva[i], cex=cex.n.risk)
}
}
if(conf %in% c('bands', 'diffbands'))
for(i in 1:ns)
lines(curves[[i]][[1]], curves[[i]][[2]],
lty=lty[i], lwd=lwd[i], col=col[i], type='s')
if(aehaz || length(times)) {
un <- if(units == ' ' | units == '') '' else
paste('/', tolower(units), sep='')
haz <- round(nevents / totaltime, 4)
txt <- paste(nevents, 'events')
if(aehaz) txt <- paste(txt, ', hazard=', haz, un, sep='')
if(length(times)) txt <- paste(txt, ', ', sep='')
if(length(times)) txt <- paste(txt, cuminc)
if(! labelc)
text(xlim[2], ylim[2], txt, adj=1)
else {
maxlen <- max(nchar(sleva))
sleva <- substring(paste(sleva, ' '),
1, maxlen)
for(j in 1 : ns)
sleva[j] <- eval(parse(text=sprintf("expression(paste('%s ',scriptstyle('(%s)')))", sleva[j], txt[j])))
}
}
if(labelc) labcurve(curves, sleva, type='s', lty=lty, lwd=lwd,
opts=label.curves, col.=col)
invisible(slev)
}
survdiffplot <-
function(fit, order=1:2, fun=function(y) y, xlim, ylim, xlab,
ylab="Difference in Survival Probability", time.inc,
conf.int, conf=c("shaded", "bands", "diffbands", "none"),
add=FALSE, lty=1, lwd=par('lwd'), col=1,
n.risk=FALSE, grid=NULL,
srt.n.risk=0, adj.n.risk=1,
y.n.risk, cex.n.risk=.6,
cex.xlab=par('cex.lab'), cex.ylab=cex.xlab,
convert=function(f) f) {
conf <- match.arg(conf)
if(missing(conf.int)) conf.int <- fit$conf.int
if(! length(conf.int) | conf == "none") conf.int <- 0
opar <- par(c('xpd', 'mar'))
on.exit(par(opar))
units <- fit$units
if(!length(units)) units <- "Day"
maxtime <- fit$maxtime
if(!length(maxtime)) maxtime <- max(fit$time)
mintime <- min(fit$time, 0)
pret <- pretty(c(mintime, maxtime))
maxtime <- max(pret)
mintime <- min(pret)
if(missing(time.inc)) {
time.inc <- switch(units, Day=30, Month=1, Year=1, (maxtime - mintime) / 10)
if(time.inc > maxtime) time.inc <- (maxtime - mintime) / 10
}
if(n.risk && !length(fit$n.risk)) {
n.risk <- FALSE
warning("fit does not have number at risk\nIs probably from a parametric model\nn.risk set to FALSE")
}
if(missing(xlab)) xlab <- if(units==' ') 'Time' else paste(units, "s", sep="")
if(missing(xlim)) xlim <- c(mintime, maxtime)
if(length(grid) && is.logical(grid)) grid <- if(grid) gray(.8) else NULL
polyg <- ordGridFun(grid=FALSE)$polygon
times <- sort(unique(c(fit$time, seq(mintime, maxtime, by=time.inc))))
## Note: summary.survfit computes standard errors on S(t) scale
f <- convert(summary(fit, times=times))
slev <- levels(f$strata)
ns <- length(slev)
if(ns !=2 ) stop('must have exactly two strata')
a <- f$strata == slev[1]
h <- function(level, times, f) {
strata <- f$strata
i <- strata == level
tim <- f$time[i]
surv <- f$surv[i]
se <- f$std.err[i]
nrisk <- f$n.risk[i]
j <- match(times, tim)
list(time=times, surv=surv[j], se=se[j], nrisk=nrisk[j])
}
a <- h(slev[order[1]], times, f)
b <- h(slev[order[2]], times, f)
surv <- if(conf == 'diffbands') (fun(a$surv) + fun(b$surv)) / 2
else fun(a$surv) - fun(b$surv)
se <- sqrt(a$se^2 + b$se^2)
z <- qnorm((1 + conf.int) / 2)
if(conf == 'diffbands') {
lo <- surv - 0.5 * z * se
hi <- surv + 0.5 * z * se
k <- ! is.na(times + lo + hi) & times < xlim[2]
polyg(c(times[k], rev(times[k])), c(lo[k], rev(hi[k])),
col=gray(.9), type='s')
return(invisible(slev))
}
lo <- surv - z * se
hi <- surv + z * se
if(missing(ylim)) ylim <- range(c(lo, hi), na.rm=TRUE)
if(!add) {
plot(times, surv, type='n', axes=FALSE,
xlim=xlim, ylim=ylim, xlab='', ylab='')
mgp.axis(2, labels=TRUE, axistitle=ylab, cex.lab=cex.ylab)
mgp.axis(1, at=seq(xlim[1], max(pretty(xlim)), time.inc),
labels=TRUE, axistitle=xlab, cex.lab=cex.xlab)
}
if(length(grid)) {
xlm <- pretty(xlim)
xlm <- c(xlm[1], xlm[length(xlm)])
xp <- seq(xlm[1], xlm[2], by=time.inc)
ylm <- pretty(ylim)
yp <- seq(min(ylm), max(ylm), by=ylm[2] - ylm[1])
abline(h=yp, v=xp, col=grid, xpd=FALSE)
}
k <- !is.na(times + lo + hi)
switch(conf,
shaded=polyg(c(times[k], rev(times[k])), c(lo[k], rev(hi[k])),
col=gray(.85), type='s'),
bands={
lines(times, lo, col=gray(.7))
lines(times, hi, col=gray(.7))
},
diffbands=NULL,
none=NULL)
lines(times, surv, type='s', lwd=lwd, col=col)
abline(h=0, col=gray(.7))
title(sub=paste(slev[order], collapse=' - '), adj=0)
if(n.risk) {
nrisktimes <- seq(0, maxtime, by=time.inc)
nriskinfo <- convert(summary(fit, times=nrisktimes))
anr <- h(slev[order[1]], nrisktimes, nriskinfo)
bnr <- h(slev[order[2]], nrisktimes, nriskinfo)
nrisk <- pmin(anr$nrisk, bnr$nrisk)
xd <- xlim[2] - xlim[1]
yd <- ylim[2] - ylim[1]
if(! add) {
mar <- opar$mar
if(mar[4] < 4) {mar[4] <- mar[4] + 2; par(mar=mar)}
}
par(xpd=NA)
tt <- nrisktimes
tt[1] <- xlim[1]
if(missing(y.n.risk)) y.n.risk <- ylim[1]
if(y.n.risk == 'auto') y.n.risk <- - diff(ylim) / 3
yy <- y.n.risk
nri <- nrisk
nri[tt > xlim[2]] <- NA
text(tt[1], yy, nri[1], cex=cex.n.risk,
adj=adj.n.risk, srt=srt.n.risk)
if (length(nri) > 1)
text(tt[-1], yy, nri[-1], cex=cex.n.risk, adj=1)
}
invisible(slev)
}
rms/R/LRupdate.r 0000644 0001762 0000144 00000005221 14423725240 013162 0 ustar ligges users ##' Update Model LR Statistics After Multiple Imputation
##'
##' For fits from `orm, lrm, orm, cph, psm` that were created using `fit.mult.impute` with `lrt=TRUE` or equivalent options and for which `anova` was obtained using `processMI(fit, 'anova')` to compute imputation-adjusted LR statistics. `LRupdate` uses the last line of the `anova` result (containing the overall model LR chi-square) to update `Model L.R.` in the fit `stats` component, and to adjust any of the new R-square measures in `stats`.
##'
##' For models using Nagelkerke's R-squared, these are set to `NA` as they would need to be recomputed with a new intercept-only log-likelihood, which is not computed by `anova`. For `ols` models, R-squared is left alone as it is sample-size-independent and `print.ols` prints the correct adjusted R-squared due to `fit.mult.impute` correcting the residual d.f. in stacked fits.
##' @title LRupdate
##' @param fit an `rms` fit object
##' @param anova the result of `processMI(..., 'anova')`
##' @return new fit object like `fit` but with the substitutions made
##' @author Frank Harrell
##' @seealso [processMI.fit.mult.impute()], [Hmisc::R2Measures()]
##' @md
##' @examples
##' \dontrun{
##' a <- aregImpute(~ y + x1 + x2, n.impute=30, data=d)
##' f <- fit.mult.impute(y ~ x1 + x2, lrm, a, data=d, lrt=TRUE)
##' a <- processMI(f, 'anova')
##' f <- LRupdate(f, a)
##' print(f, r2=1:4) # print all imputation-corrected R2 measures
##' }
LRupdate <- function(fit, anova) {
cl <- class(fit)
if('rms' %nin% cl) stop('fit is not an rms fit')
if(! inherits(anova, 'anova.rms'))
stop('anova is not the result of rms anova()')
lr <- anova['TOTAL', 'Chi-Square']
s <- fit$stats
s['Model L.R.'] <- lr
df <- s['d.f.']
s['P'] <- pchisq(lr, df, lower.tail=FALSE)
n <- if('Obs' %in% names(s)) s['Obs'] else s['n'] # n for ols
mod <- ''
for(j in c('ols', 'lrm', 'orm', 'cph', 'psm')) if(j %in% cl) mod <- j
if(mod == '')
stop('model not from ols, lrm, orm, cph, psm')
if(mod != 'ols') {
if(mod %in% c('lrm', 'orm', 'cph', 'psm') && 'R2' %in% names(s))
s['R2'] <- NA # Nagelkerke R^2 no longer correct
## Fetch effective sample size (scalar) or Y frequency distribution
ess <- switch(mod,
lrm = fit$freq,
orm = fit$freq,
cph = s['Events'],
psm = s['Events'])
r2m <- R2Measures(lr, df, n, ess)
i <- grep('R2\\(', names(s))
if(length(i) != length(r2m))
stop('number of R2 from R2Measures (', length(r2m),
') does not equal number stored in fit (', length(i), ')')
s[i] <- r2m
names(s)[i] <- names(r2m)
}
fit$stats <- s
fit
}
rms/R/rexVar.r 0000644 0001762 0000144 00000020662 14742213363 012721 0 ustar ligges users ##' Relative Explained Variation
##'
##' Computes measures of relative explained variation for each predictor in an `rms` or `rmsb` model fit `object`. This is similar to `plot(anova(fit), what='proportion R2')`. For an `ols` model the result is exactly that. Uncertainty intervals are computed if the model fit is from `rmsb` or was run through [bootcov()] with `coef.reps=TRUE`. The results may be printed, and there is also a `plot` method.
##'
##' When `object` is not an `ols` fit, the linear predictor from the fit in `object` is predicted from the original predictors, resulting in a linear model with \eqn{R^{2}=1.0}. The partial \eqn{R^2} for each predictor from a new `ols` fit is the relative explained variation. The process is repeated when bootstrap coefficients repetitions or posterior draws are present, to get uncertainty intervals. So relative explained variation is the proportion of variation in the initial model's predicted values (on the linear predictor scale) that is due to each predictor.
##'
##' Nonlinear and interaction terms are pooled with main linear effect of predictors, so relative explained variation for a predictor measures its total impact on predicted values, either as main effects or effect modifiers (interaction components).
##' @title rexVar
##' @param object a fit from `rms` or `rmsb`
##' @param data a data frame, data table, or list providing the predictors used in the original fit
##' @param ns maximum number of bootstrap repetitions or posterior draws to use
##' @param cint confidence interval coverage probability for nonparametric bootstrap percentile intervals, or probability for a Bayesian highest posterior density interval for the relative explained variations.
##' @return a vector (if bootstrapping or Bayesian posterior sampling was not done) or a matrix otherwise, with rows corresponding to predictors and colums `REV`, `Lower`, `Upper`. The returned object is of class `rexVar`.
##' @author Frank Harrell
##' @md
##' @seealso [Hmisc::cutGn()]
##' @examples
##' set.seed(1)
##' n <- 100
##' x1 <- rnorm(n)
##' x2 <- rnorm(n)
##' x3 <- rnorm(n)
##' yo <- x1 + x2 + rnorm(n) / 2.
##' # Minimally group y so that bootstrap samples are very unlikely to miss a
##' # value of y
##' y <- ordGroupBoot(yo)
##' d <- data.frame(x1, x2, x3, y)
##' dd <- datadist(d); options(datadist='dd')
##' f <- ols(y ~ pol(x1, 2) * pol(x2, 2) + x3,
##' data=d, x=TRUE, y=TRUE)
##' plot(anova(f), what='proportion R2', pl=FALSE)
##' rexVar(f)
##' g <- bootcov(f, B=20, coef.reps=TRUE)
##' rexVar(g, data=d)
##' f <- orm(y ~ pol(x1,2) * pol(x2, 2) + x3,
##' data=d, x=TRUE, y=TRUE)
##' rexVar(f, data=d)
##' g <- bootcov(f, B=20, coef.reps=TRUE)
##' rexVar(g, data=d)
##' \dontrun{
##' require(rmsb)
##' h <- blrm(y ~ pol(x1,2) * pol(x2, 2) + x3, data=d)
##' rexVar(h, data=d)
##' }
##' options(datadist=NULL)
rexVar <- function(object, data, ns=500, cint=0.95) {
rex <- function(olsfit) {
a <- anova(olsfit)
pss <- a[, 'Partial SS']
ssr <- a['TOTAL', 'Partial SS']
sst <- a['ERROR', 'Partial SS'] + ssr
rm <- c("TOTAL NONLINEAR","TOTAL NONLINEAR + INTERACTION",
"TOTAL INTERACTION","TOTAL",
" Nonlinear"," All Interactions", "ERROR",
" f(A,B) vs. Af(B) + Bg(A)")
rn <- rownames(a)
rm <- c(rm, rn[substring(rn, 2, 10) == "Nonlinear"])
pss <- a[rn %nin% rm, 'Partial SS']
names(pss) <- sub(' (Factor+Higher Order Factors)', '', names(pss),
fixed=TRUE)
r <- pss / ssr
names(r) <- trimws(names(r))
r
}
draws <- object$draws
drawtype <- 'bayes'
if(! length(draws)) {
draws <- object$boot.Coef
drawtype <- 'bootstrap'
}
if(inherits(object, 'ols') && ! length(draws))
return(structure(rex(object), class='rexVar'))
.lp. <- if(inherits(object, 'blrm')) predict(object, cint=FALSE)
else predict(object)
form <- formula(object)
form <- update(form, .lp. ~ .) # replace dependent variable with .lp.
data$.lp. <- .lp.
f <- ols(form, data=data, x=TRUE)
X <- f$x
overall.rex <- rex(f)
## If not a Bayesian fit from rmsb or bootstrapped by robcov, finished
if(! length(draws)) return(structure(overall.rex, class='rexVar'))
ni <- num.intercepts(object)
## Recreate linear predictor for posterior draws or bootstraps
ns <- min(ns, nrow(draws))
rx <- matrix(NA, nrow=ns, ncol=length(overall.rex))
for(i in 1 : ns) {
## matxv drops excess coefficients representing intercepts not
## used in X
.lp. <- matxv(X, draws[i, , drop=TRUE])
g <- lm.fit.qr.bare(X, as.vector(.lp.),
tolerance=.Machine$double.eps, intercept=TRUE, xpxi=TRUE)
## Trick to update original ols model fit; speeds up over
## having to recreate design matrix each time
f$coefficients <- g$coefficients
f$var <- g$xpxi
f$stats['Sigma'] <- 1.0 # really zero
rx[i, ] <- rex(f)
}
lim <- switch(drawtype,
bayes = apply(rx, 2, rmsb::HPDint, prob=cint),
bootstrap = apply(rx, 2, quantile, probs=c((1. - cint) / 2., 1. - (1. - cint) / 2.)) )
r <- cbind(REV=overall.rex, Lower=lim[1, ], Upper=lim[2, ])
rownames(r) <- names(overall.rex)
structure(r, class='rexVar', drawtype=drawtype)
}
##' Print rexVar Result
##'
##' Prints the results of an `rexVar` call
##' @title print.rexVar
##' @param x a vector or matrix created by `rexVar`
##' @param title character string which can be set to `NULL` or `''` to suppress
##' @param digits passed to [round()]
##' @param ... unused
##' @return invisible
##' @author Frank Harrell
##' @md
print.rexVar <- function(x, title='Relative Explained Variation',
digits=3, ...) {
if(length(title) && title != '')
cat('\nRelative Explained Variation\n\n')
attr(x, 'drawtype') <- NULL
invisible(print(round(unclass(x), digits)))
}
##' Plot rexVar Result
##'
##' Makes a dot chart displaying the results of `rexVar`. Base graphics are used unless `options(grType='plotly')` is in effect, in which case a `plotly` graphic is produced with hovertext
##' @title plot.rexVar
##' @param x a vector or matrix created by `rexVar`
##' @param xlab x-axis label
##' @param xlim x-axis limits; defaults to range of all values (limits and point estimates)
##' @param pch plotting symbol for dot
##' @param sort defaults to sorted predictors in descending order of relative explained variable. Can set to `ascending` or `none`.
##' @param margin set to `TRUE` to show the REV values in the right margin if using base graphics
##' @param height optional height in pixels for `plotly` graph
##' @param width likewise optional width
##' @param ... arguments passed to `dotchart2` or `dotchartpl`
##' @return `plotly` graphics object if using `plotly`
##' @author Frank Harrell
##' @md
plot.rexVar <- function(x, xlab='Relative Explained Variation',
xlim=NULL,
pch=16, sort=c("descending", "ascending", "none"),
margin=FALSE, height=NULL, width=NULL, ...) {
sort <- match.arg(sort)
isbase <- Hmisc::grType() == 'base'
if(! is.matrix(x))
x <- matrix(x, ncol=1, dimnames=list(names(x), 'REV'))
nr <- nrow(x)
if(! isbase && ! length(height))
height <- plotlyParm$heightDotchart(nr)
drawtype <- attr(x, 'drawtype')
i <- switch(sort,
none = 1 : nr,
descending = order(x[, 'REV'], decreasing=TRUE),
ascending = order(x[, 'REV']))
x <- x[i,, drop=FALSE]
rownames(x) <- trimws(rownames(x))
if(! length(xlim)) xlim <- range(x)
ul <- ncol(x) > 1
if(isbase) {
if(margin)
dotchart2(as.vector(x[, 'REV']), labels=rownames(x),
xlab=xlab, pch=pch, xlim=xlim,
auxdata=if(margin) round(x[, 'REV'], 3),
...)
else dotchart2(as.vector(x[, 'REV']), labels=rownames(x),
xlab=xlab, pch=pch, xlim=xlim,
...)
if(ul) {
dotchart2(x[, 'Lower'], pch=91, add=TRUE)
dotchart2(x[, 'Upper'], pch=93, add=TRUE)
}
return(invisible())
}
lname <- if(length(drawtype))
switch(drawtype,
bayes='HPD Interval',
bootstrap='Bootstrap CI')
dotchartpl(x[, 'REV'], major=rownames(x),
lower=if(ul) x[,'Lower'],
upper=if(ul) x[,'Upper'],
htext=format(round(x[, 'REV'], 3)),
xlab=xlab, xlim=xlim,
limitstracename=lname,
width=width, height=height, ...)
}
rms/R/latex.rms.s 0000644 0001762 0000144 00000052344 14372554053 013375 0 ustar ligges users latexrms <-
function(object,
file="",
append=FALSE, which=1 : p, varnames, columns=65, prefix=NULL,
inline=FALSE, before=if(inline) "" else "& &", after="",
intercept, pretrans=TRUE, digits=.Options$digits, size='')
{
html <- prType() == 'html'
## Break character for non-math mode:
brchar <- if(html) '
' else '\\\\'
f <- object
at <- f$Design
name <- at$name
ac <- at$assume.code
Tex <- at$tex
p <- length(name)
nrp <- num.intercepts(f)
## f$term.labels does not include strat
TL <- attr(terms(f), "term.labels")
tl <- TL
##Get inner transformations
from <- c('asis(*)','pol(*)','lsp(*)','rcs(*)','catg(*)','scored(*)',
'strat(*)','matrx(*)','gTrans(*)','I(*)')
to <- rep('*',10)
TLi <- paste0("h(",sedit(TL, from, to),")")
## change wrapping function to h()
h <- function(x,...) deparse(substitute(x))
for(i in (1 : p)[ac != 9]) TLi[i] <- eval(parse(text=TLi[i]))
TLi <- ifelse(TLi == name | ac == 1 | ac == 9, "", TLi)
anytr <- any(TLi != "")
if(! missing(varnames)) {
if(length(varnames) != sum(ac != 9)) stop("varnames is wrong length")
vn <- name
vn[ac != 9] <- varnames
varnames <- vn
tl <- sedit(tl, name, varnames, wild.literal=TRUE)
if(anytr) TLi <- sedit(TLi, name, varnames, wild.literal=TRUE)
}
else
varnames <- name
lnam <- nchar(varnames)
## digits at end of name -> subscript, change font
## used to be {\\mit *}
vnames <- sedit(varnames, '*$', '_{*}', test=all.digits)
if(is.character(which))
{
wh <- charmatch(which, name, 0)
if(any(wh == 0))stop(paste("variable name not in model:",
paste(which[wh == 0], collapse=" ")))
}
interaction <- at$interactions
if(length(interaction) == 0) interaction <- 0
parms <- at$parms
##If any interactions to be printed, make sure all main effects are included
ia <- ac[which] == 9
if(length(which) < p & any(ia))
{
for(i in which[ia]) which <- c(which,parms[[name[i]]][,1])
which <- which[which>0]
which <- sort(unique(which))
}
from <- c('sqrt(*)', 'log(', 'I(*)', '1/(*)', 'pmin(', 'pmax(')
to <- c('\\sqrt{*}','\\log(','[*]', '(*)^{-1}','\\min(','\\max(')
tl <- sedit(tl, from, to)
tl <- sedit(tl, varnames, vnames, wild.literal=TRUE)
ltl <- nchar(tl)
tl <- paste0("\\mathrm{", tl, "}")
if(anytr)
{
TLi <- sedit(TLi, from, to)
TLi <- sedit(TLi, varnames, vnames, wild.literal=TRUE)
TLi <- ifelse(TLi == "", "", paste0("\\mathrm{", TLi, "}"))
}
varnames <- paste0("\\mathrm{", vnames, "}")
Two.Way <- function(prm, Nam, nam.coef, lNam, cof, coef, f,
columns, lcof, varnames,
lnam, at, digits=digits)
{
i1 <- prm[1, 1]
i2 <- prm[2, 1]
num.nl <- any(prm[1, -1] != 0) + any(prm[2, -1] != 0)
##If single factor with nonlinear terms, get it as second factor
##Otherwise, put factor with most # terms as second factor
rev <- FALSE
if((num.nl == 1 & any(prm[1, -1] != 0)) ||
(length(Nam[[i1]]) > length(Nam[[i2]])))
{
i1 <- i2
i2 <- prm[1,1]
rev <- TRUE
}
N1 <- Nam[[i1]]; N2 <- Nam[[i2]]
n1 <- nam.coef[[i1]]; n2 <- nam.coef[[i2]]
q <- NULL; cur <- ""; m <- 0
for(j1 in 1 : length(N1))
{
nam1 <- nam.coef[[i1]][j1]
l1 <- lNam[[i1]][j1]
lN2 <- length(N2)
cnam <- if(rev) paste(nam.coef[[i2]], "*", nam1) else
paste(nam1, "*", nam.coef[[i2]])
mnam <- match(cnam, names(cof), nomatch=0)
act <- mnam[mnam>0]
lN2.act <- length(act)
##Check if restricted interaction between a rcs and another nonlinear
##var, i.e. >1 2nd term possible, only 1 (linear) there, and at first
##nonlinear term of rcs
if(lN2.act == 1 & lN2>1 & at$assume.code[i1] == 4 & j1 == 2)
{
if(cur != "")
{
q <- c(q, cur)
m <- 0
cur <- ""
}
v <- paste0("+", N2[1], "[")
n <- lNam[[i2]][1]
if(m + n > columns)
{
q <- c(q, cur)
cur <- ""
m <- 0
}
cur <- paste0(cur, v)
m <- m+n
cnam <- paste(nam.coef[[if(rev)i2 else i1]][1], "*",
nam.coef[[if(rev)i1 else i2]][-1])
v <- rcspline.restate(at$parms[[at$name[i1]]], c(0, coef[cnam]),
x=varnames[i1],
lx=lnam[i1], columns=columns, before="",
after="",
begin=cur, nbegin=m, digits=digits)
m <- attr(v, "columns.used")+1 #+1 for "]"
v <- attr(v, "latex")
j <- length(v)
if(j>1) q <- c(q, v[-j])
cur <- paste(v[j], "]")
break
}
else if(lN2.act == 1)
{
v <- paste0(cof[act],"\\:",N1[j1],"\\:\\times\\:",
N2[mnam>0])
n <- l1+lNam[[i2]][mnam > 0] + 2
if(m + n > columns)
{
q <- c(q, cur)
cur <- ""
m <- 0
}
cur <- paste0(cur, v)
m <- m + n
}
else if(lN2.act>0)
{
if(cur != "")
{
q <- c(q, cur)
m <- 0
cur <- ""
}
v <- paste0("+", N1[j1], "[")
n <- l1 + 1
if(m + n > columns)
{
q <- c(q, cur)
cur <- ""
m <- 0
}
cur <- paste0(cur, v)
m <- m + n
if(at$assume.code[i2] == 4 & ! any(mnam == 0))
{
##rcspline, interaction not restricted
v <- rcspline.restate(at$parms[[at$name[i2]]],
coef[act], x=varnames[i2],
lx=lnam[i2],
columns=columns, before="",
after="",
begin=cur, nbegin=m,
digits=digits)
m <- attr(v, "columns.used") + 1 #1 for "]"
v <- attr(v, "latex")
j <- length(v)
if(j>1) q <- c(q, v[-j])
cur <- paste(v[j],"]")
}
else
{
for(j2 in 1 : lN2)
{
l <- mnam[j2]
if(l > 0)
{ #not a restricted-out nonlinear term
if(j2 == 1 && substring(cof[l],1,1) == "+")
cof[l] <- substring(cof[l],2)
v <- paste0(cof[l], "\\:", N2[j2])
n <- lcof[l] + lNam[[i2]][j2]
if(m + n > columns)
{
q <- c(q, cur)
cur <- ""
m <- 0
}
cur <- paste0(cur, v)
m <- m + n
}
}
cur <- paste(cur, "]")
}
}
}
if(cur != "") q <- c(q, cur)
attr(q, "columns.used") <- m
q
}
Three.Way <- function(prm, Nam, nam.coef, lNam, cof, coef, f,
columns, lcof, at)
{
i1 <- prm[1,1]; i2 <- prm[2,1]; i3 <- prm[3,1]
N1 <- Nam[[i1]]; N2 <- Nam[[i2]]; N3 <- Nam[[i3]]
q <- NULL
cur <- ""
m <- 0
l <- 0
for(j3 in 1 : length(N3))
{
for(j2 in 1 : length(N2))
{
for(j1 in 1 : length(N1))
{
l <- l + 1
v <- paste0(cof[l], "\\:", N1[j1], "\\:\\times\\:", N2[j2],
"\\:\\times\\:", N3[j3])
n <- lcof[l] + lNam[[i1]][j1] + lNam[[i2]][j2] +
lNam[[i3]][j3] + 3
if(m + n > columns)
{
q <- c(q, cur)
cur <- ""
m <- 0
}
cur <- paste0(cur, v)
m <- m + n
}
}
}
q <- c(q, cur)
attr(q, "columns.used") <- m
q
}
if(! inline)
{
tex <- '\\begin{array}' # was eqnarray*
if(size != '') tex <- c(tex, paste0('\\', size))
if(length(prefix))
tex <- c(tex,
# if(html) paste0(prefix, '= & & \\\\') else
paste0("\\lefteqn{", prefix, "=}\\\\"))
} else tex <- NULL
cur <- ""
cols <- 0
Coef <- f$coef
if((length(which) == p)&& (nrp == 1 | ! missing(intercept)))
{
cof <- if(missing(intercept))
format(Coef[1], digits=digits) else format(intercept, digits=digits)
cur <- cof
cols <- nchar(cof)
}
anyivar <- anyplus <- FALSE # anyivar = any indicator variable
Nam <- lNam <- nam.coef <- list()
for(i in (1 : p)[which])
{
ass <- ac[i]
nam <- varnames[i]
prm <- at$parms[[at$name[i]]]
if(ass %in% c(5, 7, 8))
{
if(ass == 7) prm <- format(prm)
oprm <- prm
lprm <- nchar(prm)
z <- substring(prm, 1, 1) == "["
u <- ! z & ass == 7
prm <- sedit(prm, c(' ','&','%'), c('\\ ','\\&','\\%'))
prm <- ifelse(z | u, prm, paste0("\\mathrm{", prm, "}"))
prm <- ifelse(z, paste(nam, "\\in ", prm), prm)
prm <- ifelse(u, paste(nam, "=", prm), prm)
lprm <- lprm + (z | u) * (lnam[i] + 1)
prm <- paste0("[", prm, "]")
if(ass != 8) anyivar <- TRUE
}
if(ass != 8)
{
k <- f$assign[[TL[i]]]
coef <- Coef[k]
nam.coef[[i]] <- names(coef)
cof <- formatSep(coef, digits=digits)
lcof <- nchar(cof)
cof <- latexSN(cof)
cof <- ifelse(coef<=0, cof, paste0("+", cof))
cof.sp <- cof
if(ass == 2 | ass == 10)
{
r <- grep("times", cof)
r <- if(length(r) == 0) 1 : length(cof) else -r
cof.sp[r] <- paste0(cof.sp[r], "\\:")
}
else
if(length(grep("time",cof[1])) == 0)
cof.sp[1] <- paste0(cof[1], "\\:")
## medium space between constant and variable names if constant
## does not end in 10^x
}
newline <- FALSE
switch(ass,
{ # 1 - asis (linear)
nam <- tl[i]
Nam[[i]] <- nam
lNam[[i]] <- ltl[i]
q <- paste0(cof.sp, nam)
m <- ltl[i] + lcof
},
{ # 2 - pol
q <- ""
m <- 0
pow <- 1 : prm
nams <- ifelse(pow == 1,nam, paste0(nam, "^{", pow, "}"))
Nam[[i]] <- nams; lNam[[i]] <- rep(lnam[i],prm)
for(j in pow) q <- paste0(q,cof.sp[j], nams[j])
m <- prm * lnam[i] + sum(lcof)
},
{ # 3 - lsp
if(cols > 0)
{
tex <- c(tex, cur)
cur <-""
cols <- 0
}
anyplus <- TRUE
q <- paste0(cof.sp[1], nam)
m <- lcof[1] + lnam[i]
nams <- nam; lnams <- lnam[i]
kn <- format(-prm)
lkn <- nchar(kn)
for(j in 1 : length(prm))
{
z <- paste0("(", nam, if(prm[j] < 0) "+" else NULL,
if(prm[j] != 0) kn[j] else NULL, ")_{+}")
nams <- c(nams, z)
u <- lnam[i] + lkn[j] + 2
lnams <- c(lnams, u)
v <- paste0(cof[j + 1], z)
n <- lcof[j + 1] + u
if(m + n > columns)
{
cur <- paste(cur, q)
tex <- c(tex, cur)
cur <- ""
cols <- 0
q <- ""
m <- 0
}
q <- paste0(q, v)
m <- m + n
}
Nam[[i]] <- nams; lNam[[i]] <- lnams
},
{ # 4 - rcs
q <- rcspline.restate(prm, coef, x=nam, lx=lnam[i],
columns=columns,
before="", after="", digits=digits)
anyplus <- TRUE
m <- attr(q, "columns.used")
nn <- nam; ln <- lnam[i]
for(j in 1 : (length(prm) - 2))
{
nam <- paste0(nam, "'")
nn <- c(nn, nam)
ln <- c(ln, lnam[i] + j)
}
Nam[[i]] <- nn #Two.Way only needs first name
lNam[[i]] <- ln #for 2nd-order ia with 1 d.f. (restr ia)
##Three.Way needs original design matrix
q <- attr(q, "latex")
if(substring(sedit(q[1], " ", ""), 1, 1) != "-")
q[1] <- paste0("+", q[1])
j <- length(q)
if(cur != "")
{
tex <- c(tex,cur)
cur <- ""
cols <- 0
}
if(j > 1)
{
tex <- c(tex, q[-j])
q <- q[j]
}
} ,
{ # 5 - catg
Nam[[i]] <- prm[-1]
lNam[[i]] <- lprm[-1]
if(cols > 0)
{
tex <- c(tex,cur)
cur <- ""
cols <- 0
}
q <- ""
m <- 0
for(j in 2 : length(prm))
{
v <- paste0(cof[j - 1], prm[j])
n <- lcof[j - 1] + lprm[j]
if(m + n > columns)
{
cur <- paste(cur, q)
tex <- c(tex, cur)
cur <- ""
cols <- 0
q <- ""
m <- 0
}
q <- paste0(q, v)
m <- m + n
}
},
q <- "",
{ # 7 - scored
if(cols > 0)
{
tex <- c(tex, cur)
cur <- ""
cols <- 0
}
q <- paste0(cof.sp[1], nam)
m <- nchar(q)
nams <- nam
lnams <- lnam[i]
for(j in 3 : length(prm))
{
z <- prm[j]
v <- paste0(cof[j - 1], z)
u <- lprm[j] + lnam[i] + 3
n <- lcof[j - 1] + u
nams <- c(nams, z)
lnams <- c(lnams,u)
if(m + n > columns)
{
cur <- paste(cur, q)
tex <- c(tex, cur)
cur <- ""
cols <- 0
q <- ""
m <- 0
}
q <- paste0(q, v)
m <- m + n
}
Nam[[i]] <- nams; lNam[[i]] <- lnams
},
##Strat factor doesn't exist as main effect, but keep variable
##names and their lengths if they will appear in interactions later
{ # 8 - strat
## if(length(Nam[[i]]) == 0 && any(interaction == i)) 22Nov10
if(any(interaction == i))
{
nam.coef[[i]] <- paste0(name[i], "=", oprm[-1])
Nam[[i]] <- prm[-1]
lNam[[i]] <- lprm[-1]
}
q <- ""
},
{
if(prm[3,1] == 0)
q <- Two.Way(prm, Nam, nam.coef, lNam, cof, coef, f,
columns, lcof,
varnames, lnam, at, digits=digits)
else q <- Three.Way(prm, Nam, nam.coef, lNam, cof, coef, f,
columns, lcof, at)
m <- attr(q, "columns.used")
j <- length(q)
if(cur != "")
{
tex <- c(tex,cur)
cur <- ""
cols <- 0
}
if(j > 1)
{
tex <- c(tex,q[-j])
q <- q[j]
}
},
{ # 10 - matrx
nam <- names(coef)
if(cols > 0)
{
tex <- c(tex,cur)
cur <- ""
cols <- 0
}
q <- ""
m <- 0
lnam <- nchar(nam)
nam <- paste0("\\mathrm{", nam, "}")
Nam[[i]] <- nam; lNam[[i]] <- lnam
for(j in 1 : length(prm))
{
v <- paste0(cof.sp[j], nam[j])
n <- lcof[j] + lnam[j]
if(m + n > columns)
{
cur <- paste(cur, q)
tex <- c(tex, cur)
cur <- ""
cols <- 0
q <- ""
m <- 0
}
q <- paste0(q, v)
m <- m + n
}
},
{ # 11 - gTrans
if(! length(Tex))
stop(ta <-
paste('no tex attribute for gTrans variable', name[i]))
tx <- Tex[[name[i]]]
if(! length(tx)) stop(z)
tx <- eval(parse(text=tx))
nams <- tx(nam)
q <- ""
m <- 0
lx <- length(nams)
Nam[[i]] <- nams; lNam[[i]] <- rep(lnam[i], lx)
for(j in 1 : lx) q <- paste0(q, cof.sp[j], nams[j])
m <- lx * lnam[i] + sum(lcof)
}
)
if(length(q) && q != "")
{
if(cols + m > columns)
{
tex <- c(tex, cur)
cur <- ""
cols <- 0
}
cur <- paste(cur, q)
cols <- cols + m
}
}
if(cur != "") tex <- c(tex, cur)
if(inline) {
tex <- paste(tex, collapse='\\\\')
tex <- c('\\begin{array}{l}', tex, '\\end{array}')
if(before != '') tex <- c(before, '', tex)
if(size != '') tex <- c(paste0('{\\', size), tex)
if(after != '') tex <- c(tex, after)
if(size != '') tex <- c(tex, '}')
if(file == '') return(tex)
cat(tex, sep='\n', file=file, append=append)
return(invisible())
}
tex <- c(tex, '\\end{array}') # was eqnarray*
tex <- ifelse(tex == paste0(prefix, '= & & \\\\') |
substring(tex,1,1) == "\\", tex,
paste(before, tex, "\\\\"))
if(anyivar | anyplus) {
# s <- if(length(which) == p) "and " else "where "
s <- ''
if(anyivar)
s <- paste0(s, "$$[c]=1~\\mathrm{if~subject~is~in~group}~c,~0~\\mathrm{otherwise}$$")
# if(anyivar && anyplus) s <- paste0(s, '; ')
if(anyplus)
s <- paste0(s, "$$(x)_{+}=x~\\mathrm{if}~x > 0,~0~\\mathrm{otherwise}$$")
tex <- c(tex, s)
}
if(anytr & pretrans) {
i <- TLi != ""
if(sum(i) == 1) tr <- paste0("$", varnames[i],
"$ is pre--transformed as $",
TLi[i], "$.")
else {
tr <- if(html) {
z <- cbind(Variable=paste0('$', varnames, '$'),
Transformation=paste0('$', TLi, '$'))
as.character(htmlTable::htmlTable(z, caption='Pre-transformations',
css.cell='min-width: 9em;',
align='|l|l|',
align.header='|c|c|',
escape.html=FALSE))
}
else
c("\\vspace{0.5ex}\\begin{center}{\\bf Pre--Transformations}\\\\",
"\\vspace{1.5ex}\\begin{tabular}{|l|l|} \\hline",
"\\multicolumn{1}{|c|}{Variable} & \\multicolumn{1}{c|}{Transformation} \\\\ \\hline",
paste0("$",varnames[i],"$ & $",TLi[i],"$ \\\\"),
"\\hline", "\\end{tabular}\\end{center}")
}
tex <- c(tex, tr)
}
if(file == '') return(tex)
cat(tex, sep='\n', file=file, append=append)
}
rms/R/val.surv.s 0000644 0001762 0000144 00000020152 13703327703 013225 0 ustar ligges users val.surv <- function(fit, newdata, S, est.surv, censor,
u, fun, lim, evaluate=100, pred, maxdim=5, ...)
{
usehare <- ! missing(u)
if(usehare) {
if(missing(fun)) {
if(missing(fit))
stop('must specify fit if u is specified and fun is not')
fun <- if(inherits(fit, 'coxph')) function(p) log(-log(p))
else if(length(it <- survreg.distributions[[fit$dist]]$quantile)) it
else if(length(it <- survreg.auxinfo[[fit$dist]]$quantile)) it
else stop('fun is not given and it cannot be inferred')
}
}
if(missing(S)) {
S <- fit$y
units <- fit$units
if(! length(S)) stop('when S is omitted you must use y=TRUE in the fit')
}
else units <- attr(S, 'units')
if('Surv' %nin% attr(S, 'class')) stop('S must be a Surv object')
if(ncol(S) != 2) stop('S must be a right-censored Surv object')
if(missing(est.surv))
est.surv <- if(usehare) {
if(missing(newdata))
survest(fit, times=u)$surv
else
survest(fit, newdata, times=u)$surv
}
else {
if(missing(newdata))
survest(fit, times=S[,1], what='parallel')
else
survest(fit, newdata, times=S[,1], what='parallel')
}
if(usehare) {
i <- ! is.na(est.surv + S[,1] + S[,2])
est.surv <- est.surv[i]
S <- S[i,]
curtail <- function(x) pmin(.9999, pmax(x, .0001))
f <- polspline::hare(S[,1], S[,2], fun(curtail(est.surv)),
maxdim=maxdim, ...)
if(missing(pred)) {
if(missing(lim))
lim <-
datadist(est.surv)$limits[c('Low:prediction','High:prediction'),]
pseq <- seq(lim[1], lim[2], length=evaluate)
}
else pseq <- pred
actual <- 1 - polspline::phare(u, fun(curtail(est.surv)), f)
actualseq <- 1 - polspline::phare(u, fun(curtail(pseq)), f)
w <- structure(list(harefit=f, p=est.surv, actual=actual,
pseq=pseq, actualseq=actualseq,
u=u, fun=fun, n=nrow(S), d=sum(S[,2]),
units=units), class='val.survh')
return(w)
}
n <- nrow(S)
nac <- if(! missing(fit)) fit$na.action
if(! missing(censor) && length(censor) > 1 && ! missing(fit)) {
if(length(censor) > n && length(nac)) {
## Missing observations were deleted during fit
j <- ! is.na(naresid(nac, censor))
censor <- censor[j]
}
if(length(censor) != n)
stop("length of censor does not match # rows used in fit")
}
est.surv.censor <- lp <- NULL
if(! missing(censor)) {
if(missing(fit))
stop('fit must be specified when censor is specified')
est.surv.censor <- if(missing(newdata))
survest(fit, times=censor, what='parallel')
else
survest(fit, newdata, times=censor, what='parallel')
if(mc <- sum(is.na(est.surv.censor)))
warning(paste(mc, 'observations had missing survival estimates at censoring time'))
lp <- if(missing(newdata)) predict(fit, type='lp')
else
predict(fit, newdata, type='lp')
}
if(length(est.surv) != n)
stop('length of est.surv must equal number of rows in S')
structure(list(S=S, est.surv=est.surv,
censor.est.surv=if(length(est.surv.censor))
est.surv.censor,
lp=if(length(lp))lp,
na.action=nac), class='val.surv')
}
print.val.survh <- function(x, ...) {
cat('\nValidation of Predicted Survival at Time=', format(x$u),
'\tn=', x$n, ', events=', x$d, '\n\n')
cat('hare fit:\n\n')
print(x$harefit)
cat('\nFunction used to transform predictions:\n')
cat(paste(format(x$fun), collapse=' '))
error <- abs(x$p - x$actual)
er <- c(mean(error, na.rm=TRUE), quantile(error, .9, na.rm=TRUE))
erf <- format(round(er, 4))
cat('\n\nMean absolute error in predicted probabilities:',
erf[1],'\n')
cat('0.9 Quantile of absolute errors :', erf[2], '\n')
er
}
plot.val.survh <- function(x, lim, xlab, ylab,
riskdist=TRUE, add=FALSE,
scat1d.opts=list(nhistSpike=200), ...)
{
if(missing(lim)) lim <- range(c(x$pseq, x$actualseq), na.rm=TRUE)
units <- if(x$u == 1) x$units
else
paste(x$units, 's', sep='')
lab <- paste('Probability of Surviving ', format(x$u), ' ', units,
sep='')
if(add)
lines(x$pseq, x$actualseq, ...)
else
plot(x$pseq, x$actualseq, type='l', xlim=lim, ylim=lim,
xlab=if(missing(xlab)) paste('Predicted', lab) else xlab,
ylab=if(missing(ylab)) paste('Actual', lab) else ylab)
abline(a=0, b=1, lty=2)
if(riskdist) do.call('scat1d', c(list(x=x$p), scat1d.opts))
invisible()
}
plot.val.surv <- function(x, group, g.group=4,
what=c('difference','ratio'),
type=c('l','b','p'),
xlab, ylab, xlim, ylim,
datadensity=TRUE, ...)
{
S <- x$S
est.surv <- x$est.surv
censor.est.surv <- x$censor.est.surv
what <- match.arg(what)
type <- match.arg(type)
n <- length(est.surv)
nac <- x$na.action
if(! missing(group)) {
if(length(group) > n && length(nac)) {
## Missing observations were deleted during fit
j <- ! is.na(naresid(nac, est.surv))
group <- group[j]
}
if(length(group) != n)
stop("length of group does not match # rows used in fit")
if(! is.factor(group)) group <-
if(is.logical(group) || is.character(group))
as.factor(group) else cut2(group, g=g.group)
}
if(length(censor.est.surv)) {
if(missing(group)) group <- rep(1, length(censor.est.surv))
i <- S[,2]==1
group <- group[i]
if(sum(i)<2) stop('fewer than 2 uncensored observations')
y <- switch(what,
difference=1-est.surv - .5*(1-censor.est.surv),
ratio=(1 - est.surv) / (.5 * (1 - censor.est.surv)))
meanF <- tapply(1 - est.surv[i], group, mean, na.rm=TRUE)
meanE <- tapply(.5*(1-censor.est.surv[i]), group, mean, na.rm=TRUE)
res <- matrix(cbind(meanF,meanE), ncol=2,
dimnames=list(levels(group),
c('Mean F(T|T