pec/0000755000176200001440000000000014131045152011015 5ustar liggesuserspec/NAMESPACE0000755000176200001440000001050014131017560012235 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(ipcw,aalen) S3method(ipcw,cox) S3method(ipcw,forest) S3method(ipcw,marginal) S3method(ipcw,none) S3method(ipcw,nonpar) S3method(ipcw,rfsrc) S3method(plot,Cindex) S3method(plot,calibrationPlot) S3method(plot,confScoreSurv) S3method(plot,pec) S3method(plot,riskReclassification) S3method(predictEventProb,ARR) S3method(predictEventProb,CauseSpecificCox) S3method(predictEventProb,FGR) S3method(predictEventProb,coxboost) S3method(predictEventProb,matrix) S3method(predictEventProb,prodlim) S3method(predictEventProb,pseudoForest) S3method(predictEventProb,rfsrc) S3method(predictEventProb,riskRegression) S3method(predictEventProb,selectFGR) S3method(predictLifeYearsLost,ARR) S3method(predictLifeYearsLost,CauseSpecificCox) S3method(predictLifeYearsLost,FGR) S3method(predictLifeYearsLost,coxboost) S3method(predictLifeYearsLost,matrix) S3method(predictLifeYearsLost,prodlim) S3method(predictLifeYearsLost,rfsrc) S3method(predictLifeYearsLost,riskRegression) S3method(predictProb,glm) S3method(predictProb,ols) S3method(predictProb,randomForest) S3method(predictRestrictedMeanTime,aalen) S3method(predictRestrictedMeanTime,cox.aalen) S3method(predictRestrictedMeanTime,coxph) S3method(predictRestrictedMeanTime,coxph.penal) S3method(predictRestrictedMeanTime,cph) S3method(predictRestrictedMeanTime,default) S3method(predictRestrictedMeanTime,matrix) S3method(predictRestrictedMeanTime,numeric) S3method(predictRestrictedMeanTime,pecRpart) S3method(predictRestrictedMeanTime,prodlim) S3method(predictRestrictedMeanTime,psm) S3method(predictRestrictedMeanTime,rfsrc) S3method(predictRestrictedMeanTime,riskRegression) S3method(predictRestrictedMeanTime,selectCox) S3method(predictRestrictedMeanTime,survfit) S3method(predictSurvProb,aalen) S3method(predictSurvProb,cox.aalen) S3method(predictSurvProb,coxboost) S3method(predictSurvProb,coxph) S3method(predictSurvProb,cph) S3method(predictSurvProb,default) S3method(predictSurvProb,matrix) S3method(predictSurvProb,numeric) S3method(predictSurvProb,pecCforest) S3method(predictSurvProb,pecCtree) S3method(predictSurvProb,pecRpart) S3method(predictSurvProb,penfitS3) S3method(predictSurvProb,prodlim) S3method(predictSurvProb,pseudoForest) S3method(predictSurvProb,psm) S3method(predictSurvProb,rfsrc) S3method(predictSurvProb,riskRegression) S3method(predictSurvProb,selectCox) S3method(predictSurvProb,survfit) S3method(print,Cindex) S3method(print,IPCW) S3method(print,R2) S3method(print,calibrationPlot) S3method(print,confScoreSurv) S3method(print,crps) S3method(print,method) S3method(print,multiSplitTest) S3method(print,pec) S3method(print,splitMethod) S3method(print,vandeWielTest) S3method(summary,Cindex) S3method(summary,confScoreSurv) S3method(summary,pec) export(R2) export(calPlot) export(cindex) export(coxboost) export(crps) export(ibs) export(ipcw) export(pec) export(pecCforest) export(pecCtree) export(pecRpart) export(plotPredictEventProb) export(plotPredictSurvProb) export(predictEventProb) export(predictLifeYearsLost) export(predictRestrictedMeanTime) export(predictSurvProb) export(resolvesplitMethod) export(selectCox) export(selectFGR) export(simCost) importFrom(foreach,"%dopar%") importFrom(grDevices,col2rgb) importFrom(grDevices,gray) importFrom(graphics,abline) importFrom(graphics,axis) importFrom(graphics,box) importFrom(graphics,legend) importFrom(graphics,lines) importFrom(graphics,mtext) importFrom(graphics,par) importFrom(graphics,plot) importFrom(graphics,points) importFrom(graphics,segments) importFrom(graphics,text) importFrom(graphics,title) importFrom(prodlim,Hist) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,family) importFrom(stats,formula) importFrom(stats,median) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,model.response) importFrom(stats,na.fail) importFrom(stats,na.omit) importFrom(stats,pnorm) importFrom(stats,predict) importFrom(stats,quantile) importFrom(stats,rbinom) importFrom(stats,rexp) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,smooth) importFrom(stats,terms) importFrom(stats,time) importFrom(stats,update) importFrom(stats,update.formula) importFrom(stats,var) importFrom(stats,wilcox.test) importFrom(survival,Surv) importFrom(timereg,aalen) importFrom(utils,capture.output) importFrom(utils,head) importFrom(utils,select.list) useDynLib(pec, .registration=TRUE) pec/README0000755000176200001440000000054413571203277011716 0ustar liggesusers1. Put any C/C++/Fortran code in 'src' 2. If you have compiled code, add a .First.lib() function in 'R' to load the shared library 3. Edit the help file skeletons in 'man' 4. Run R CMD build to create the index files 5. Run R CMD check to check the package 6. Run R CMD build to make the package file Read "Writing R Extensions" for more information. pec/data/0000755000176200001440000000000013571203267011740 5ustar liggesuserspec/data/cost.rda0000644000176200001440000001466013571203267013407 0ustar liggesusers]\WuevjfW}גV֪͒kYdI%#VHuvB IHIF !HIHFK!b3yO7޻͌|n9?{gsl ݧRFkq[0VUsSJٙ5 tAwV2ܭ<4qyfGl^s}5w#|OփO1Qn)<۳H&|NXǭܿ㸓`u7#豛0K躋tƺvQ>K>_pm#cw)/Xv< 1rtWf{ecs n?κ|0XdU+8뾒c>g$|JfK$gTkMtu٫+uwEw\uw q}ì׭*{YChν9盋I#)w}Bأc'qWƮ^]i fFMO+kH|]퓦Y>r|jiv'K[Z;6<( ;y$aY{\l3BȂ$?&J7p#YOIgt|J¾|LR}jf\4f,>#4qgܼ<8K݊VJ,|VkW;e+Kk+BYS:K׌VJX;l7q͠T}aV<5Ʒ<bzȎ0NC^ycg_}Z/|f[R^54扣oYo:bg\}-K8IbV'g^R~?LkG.߉׌V(K:6O˫ 嫝Nh~Sڡ{#7qz5fC磃ϫB*.i{i>I3ib#4lf_Z>( KY}4ܥ!)yuɓ$Ld/m|r&>'?}HO\rs>iFRi<) i]vEG_جz*/>kYlj_{}d+❶Da;bk{5;ytiG}I=^3NzA;UǍL7R;,:)\/_[f48SkWUWG7%]J|%XJavl-<8mğ3_++m3'}xOⷂ,k7uy _i=gFlWWd}I71|<+Y>EX\ƴV"s2As1j%vy?ks 2湞 ="s(^"20G$p[ s6b|H2̼r{QD@~Uf.k8ҷuaU'ٞ~mZ>;ƈV0+ TR ר.JH]Ő enSd\3IV쏍죭|H>Y7tbWncv4nȐ5EZo M2֒L"}_.gVֵ*\#s_t"8.p]IFn[Z**ݑ9F=923d=Y<mW?:3Ad<˙f?ƞ/GG2Nr㇥"_q4}#Ogȝ.r+*뱘%>2N]3wWGG% ?s :k/K;ž$gK[:|c>#wcOCO; FxWs&Ub#%;GWbY]?⯹cw7!%O$_ rEp.|,5.z8υyrvRiNgn}K˳U'_?Wy#Ĺ߱_p&uJ^g9uU ^WG9W΋|?BG['~yG5>WOJUU'*| =~R=oعw*U{ A)h|+Uڀ7a܇#I1!5\ߥg`Y8xOWa\XJU_q?`Dߋ1A/e?x~c(M~Kh=kVHA<C&q4Z;GZTW88AUs[1܊qo+?c+T Uދ'-q\εw~W v}!x}zWx 1g E`TNܿ Iv2baRf |n2 W>9w%.c_c bT-@l+{eせir嘃*XZqc ěUKe|01{M+58إ 9g~!_;>\ _J d|<T<5oFϙ_;_UA,`smd+=Ak!%Fy-|U A*mip-ո緩k AI@*? h?' Y`h7/)A)"X1?1*~G8J緟pXC>%2GlPʨm*UJ`Ju.UWX!65Ԥ :swȇ ݍ9p1jKO|O}o"^suJCyы_A{e=ƽsւ*xi W< X2п lTh`fQ.fZQN~Up߅)P>Z҅U@,]B FPӫj,Q҃jr>E-"Wʔ[uhP,Byu-[E-F=5/R~}Ӆ<)}+ET~ sQ/ȟYUV%[ctNW*ּiufA=(>W%iEH3}.N5?ޥf}*>dʇߌ;ֵK5ikK}g{qdؑG&8=|ەǜH- Ic|V0gK+~5f>QڼiNYqe\Kj#I~|#Mv{}tLMگчGɯ/_&c˗/>ȉ|أǧ&YS\s/cǦ ՘ήNM\҃{q?L%?7>`~&T.]ԋ㾯}~tjt|21ؐ&/hARz oUal|_/ѱ}){G*DhsG_/ C +%u* }DTH_ÆMqióO)?eOSGTSxYWU3tXm6_QgUDaSWߐF'kAPaR̓z9/ /6P׀) KA0= 'l^6t`S˭&Y6>QP -6w@$):$æMas Ea 4^6D /고ρ>l>@AB^?/>Lh:X46bQڀ^mF@64^56/Bcí o^ l_&4 ,hڀc@LM6&PKԈF5byxj^#׈=~k_WM&J" 1Rh`6/DL`Cc_F4n`_ ^PF/AV4 'Ao+M `E+7MPEcSQҴqTD!؀k:v4 Mƞ74`jG?hhF7C F7q͞n4pCU1aa(X0 APp`iz` z`  3 `<C@ms8€` \6  nP  5<  U ׂ_! =B| kW`nc8 mP# bmBͻ@潠q7{{> >8X0 `>743 `v tHeP7̗@-XC,o5VCmaa4 uҡ(0bQ?,nvu® -b5+uذ컂B Xb`Q#,`q ,ҡ=Lo X`>Nu҇} ,ذE[}i lX`âX \X:w E-0bYĢ&X`~Ѕ>L6,}\Ozpfpec/data/Pbc3.csv0000644000176200001440000004112013571203267013242 0ustar liggesusersptno;unit;tment;sex;age;stage;gibleed;crea;alb;bili;alkph;asptr;weight;days;status 1;2;1;1;62;4;0;82;33;106;682.5;125.5;82;625;2 2;2;1;0;42;3;0;52;42;154;1360;128;56;2118;0 3;2;1;0;48;3;0;96;43;9;236;48;66;2083;0 4;2;1;0;66;NA;1;104;40;8.5;280.5;47.5;53;639;2 5;2;1;0;63;4;0;58;32;94;701;269;54;1043;2 6;2;1;1;62;NA;0;63;32;25.33;1198;90.67;64;221;2 7;2;1;0;46;2;0;63;38;80;824;206.5;65;2077;0 8;2;1;0;56;2;0;63;42;12;1072;77;68;2056;0 9;2;1;0;59;3;1;48;32;44.67;844.7;105;48;2018;0 101;2;0;0;62;3;0;96;32;6;314;45;NA;2146;0 102;2;0;1;63;4;0;69;28;103.5;324;117.5;55;425;2 103;2;0;0;43;3;1;75;38;39.67;1165;147.3;53;2124;0 104;2;0;0;68;3;1;75;45;16;694;24;55;2131;0 105;2;0;0;55;3;1;78;40;26.5;829;118;59;786;2 106;2;0;0;57;3;0;43;40;18.5;688.5;123.5;57;1859;2 110;2;0;0;57;NA;0;72;42;13.67;418.3;56.33;59;2098;0 111;2;0;0;66;4;0;86;38;14.67;345.7;68;56;2098;0 112;2;0;0;51;4;1;35;38;42.33;901.7;83;60;379;2 113;2;0;0;66;2;0;69;40;21;409;76.67;NA;1647;2 201;2;0;0;64;4;0;65;35;39;210;68.67;45;1046;2 202;2;0;0;50;4;1;64;36;13.33;819.3;113.3;59;199;2 203;2;1;0;58;4;0;61;31;71.5;898.5;122;53;472;2 204;2;1;1;63;4;0;72;42;15;682.7;53;58;1648;0 205;2;1;0;59;1;0;63;33;15;1629;99.5;53;1888;0 206;2;0;0;50;2;0;72;43;8.5;364.5;28;62;1865;0 207;2;0;0;66;NA;0;41;36;53;1883;129;41;133;2 208;2;1;0;37;3;0;68;39;16.33;228.7;77.33;52;1796;0 209;2;1;0;56;2;0;67;41;20;1508;104;58;1767;0 210;2;1;0;41;3;0;41;40;16;620;129.5;66;1753;0 211;2;0;1;59;NA;1;132;31;139;2005;123;68;430;2 212;2;0;1;53;1;0;79;41;15;400;70;84;1759;0 213;2;0;0;60;4;1;49;35;188.7;667.3;123;45;791;2 214;2;1;0;63;4;0;66;36;16.33;659.7;48;54;1282;0 215;2;1;0;52;1;0;54;46;17;905;78;72;1742;0 216;2;0;0;53;2;1;72;35;10;812.3;58.33;52;1728;0 217;2;0;0;42;4;1;70;39;37.67;586.3;108.3;51;1647;0 218;2;0;0;40;3;0;69;36;24;1422;97;52;1762;0 219;2;1;1;70;2;0;96;39;6;430;82;56;1290;2 220;2;1;0;56;2;0;74;41;18;594.9;77.5;56;1670;0 221;2;1;0;56;2;0;71;37;9;371;39;62;1608;0 223;2;1;0;41;4;1;62;26;107.5;737;167.5;49;367;2 224;2;0;1;56;1;0;89;36;25.33;848.7;88.67;67;1630;0 225;2;1;0;41;3;0;71;40;67.67;1017;117.3;55;1066;1 226;2;0;0;54;4;1;67;39;37;488;94.5;65;1621;0 227;2;0;0;51;3;1;62;29;38.67;450.7;145;63;249;2 228;2;1;0;47;3;0;NA;44;43;479;113;63;1573;0 229;2;0;1;51;NA;0;71;35;46;425;72.5;66;169;2 230;2;1;1;39;4;1;74;24;168;1245;120;63;461;1 231;2;1;0;39;1;0;62;41;7;107.5;10.5;67;1486;0 232;2;0;0;56;3;1;88;39;31;513.3;87.67;48;865;1 233;2;1;0;45;4;1;71;28;86;736;117;54;1433;1 234;2;0;0;68;1;1;106;39;11;848;78.67;57;1571;0 235;2;1;1;63;1;0;80;39;9;211.5;44.5;87;1433;0 236;2;0;0;63;3;1;88;29;21;414.3;68.67;65;1265;2 237;2;1;0;50;3;0;106;27;26.5;960.5;103.5;54;893;0 238;2;0;1;54;3;1;71;36;11;670.7;81.33;69;985;2 239;2;0;0;63;4;1;62;30;66.67;545;125.3;48;261;2 240;2;1;0;58;4;0;80;32;27.67;548;80;71;612;1 241;2;1;1;55;4;0;62;37;53;764;125;68;669;1 242;2;1;1;67;4;0;88;27;8;174;60;70;1332;0 243;2;0;0;43;2;1;71;35;29.33;638.7;102.7;65;1377;0 244;2;0;0;53;2;0;71;40;51;853;110;70;1127;1 245;2;1;0;34;1;0;71;46;9;501;71;70;1308;0 246;2;0;1;60;4;0;80;38;24;303;88;69;995;2 247;2;0;0;40;4;0;62;43;43;848;134;48;1325;1 248;2;1;0;56;4;0;71;30;46;431;145;70;766;1 249;2;0;0;71;2;0;88;40;9;218;32.33;45;1304;0 250;2;1;0;53;3;0;80;46;10.33;606.3;108.7;63;1298;0 251;2;1;0;59;3;0;124;34;14.67;1104;89;71;1290;0 252;2;0;1;39;1;0;80;43;5;711;121;50;1296;0 253;2;0;0;61;4;1;80;34;34.67;703.7;62;54;928;2 254;2;1;0;55;2;0;62;39;178;1134;281;55;1171;1 255;2;0;0;65;4;1;71;36;36.33;621;97;55;1283;0 256;2;1;0;55;1;0;80;42;10;238;101.3;83;1255;0 257;2;1;0;67;4;1;97;39;5;373.7;83;65;1252;0 258;2;1;0;56;4;0;71;42;42;484;116;55;1241;0 259;2;0;0;41;2;0;62;40;89.5;1233;207.5;55;1284;0 260;2;0;0;71;4;0;124;46;11;130;31.33;56;1272;0 261;2;1;0;55;4;1;71;34;35;863.5;91.5;55;1215;0 262;2;1;1;50;4;1;80;27;108;398.7;104.3;60;691;2 263;2;0;1;67;4;0;97;40;8;205;49.5;84;1255;0 264;2;0;0;63;4;1;88;38;22.33;350;59.67;58;1252;0 265;2;0;0;63;3;0;80;35;8;439;71;51;1229;0 266;2;1;0;55;4;1;97;39;48.5;516.5;56;49;264;2 267;2;1;0;42;1;0;71;42;13.5;587.5;103;76;1185;0 268;2;0;0;53;NA;0;97;41;7.5;216;41.5;75;1202;0 269;2;1;0;47;2;0;85;43;19.5;940.7;105.7;75;1151;0 270;2;1;0;43;3;0;80;43;10;286;77;55;1074;0 271;2;0;0;46;3;0;97;45;38;371;313;NA;1152;0 272;2;0;1;71;4;1;97;32;37;539;37.5;72;1143;0 273;2;0;0;57;3;0;97;34;18;226.5;38.5;85;1143;0 274;2;0;0;58;3;0;71;42;43.67;891;105;62;714;1 275;2;1;1;63;1;0;114;42;11.67;529.3;49.33;76;1061;0 276;2;1;1;38;1;0;71;48;9;838;99;75;1090;0 277;2;1;0;69;1;0;80;45;8;458.5;125;70;1040;0 278;2;0;0;66;3;1;88;36;14.33;320;54.67;53;1031;0 279;2;0;0;42;4;0;NA;NA;197.5;1104;182;62;114;1 280;2;1;0;33;4;0;80;34;111;516;204;65;639;1 281;2;1;0;42;2;0;47;35;14;1337;NA;60;962;0 282;2;0;0;47;1;0;80;41;5;478;43;70;977;0 283;2;0;1;61;2;0;80;36;7;1561;46;57;351;2 284;2;1;0;60;3;1;88;38;15;574;130;58;907;0 285;2;1;0;56;NA;0;80;44;91;998;155;55;877;0 286;2;0;0;38;NA;0;NA;NA;13.5;359.5;72;60;962;0 287;2;0;0;40;2;0;NA;NA;16;1509;NA;75;962;0 288;2;1;0;47;4;0;71;38;35.5;534.5;152;62;849;0 289;2;1;0;57;3;0;68;42;18.5;494;34;66;773;0 290;2;0;0;50;3;0;88;36;40;411.5;136;61;429;1 291;2;1;0;49;4;0;61;33;96;565;166;59;765;0 292;2;0;0;53;4;1;59;33;39;569;95;62;339;1 293;2;1;0;51;3;0;50;35;76;1550;123;58;751;0 294;2;0;0;54;4;0;99;37;105;573;134;50;413;1 295;2;0;0;26;3;0;59;41;30;653;159;53;262;0 296;2;1;0;66;2;0;117;38;11;229;103;74;746;0 297;2;1;0;33;NA;0;74;30;149;545;143;45;146;1 298;2;0;0;56;4;0;98;39;31.5;213.5;74;62;758;0 299;2;0;0;71;2;0;104;39;11;359;40;55;758;0 301;3;0;0;53;2;0;70;36.02;24.67;2835;98;56;1838;0 302;3;1;0;59;NA;0;70;32.91;27.33;2755;83;55;1853;0 303;3;0;0;61;NA;0;90;40.85;8.333;1646;87;64;1763;0 304;3;1;1;74;4;0;100;35.67;28.33;1329;67.67;65;1703;2 305;3;1;0;67;3;0;90;NA;19.33;1071;61.67;65;1547;2 306;3;0;1;54;2;0;100;42.71;7;1796;137.5;70;1735;0 307;3;1;0;63;4;0;50;29.81;18.43;3094;94;40;562;2 308;3;0;1;67;2;0;80;37.67;20.33;1107;64;95;1775;0 309;3;0;1;73;NA;0;70;43.95;4;962;69;65;821;2 310;3;0;0;57;NA;0;90;35.4;10;950.5;61;38;1746;0 311;3;1;0;68;4;0;80;36.43;27.67;2620;67.33;47;962;2 312;3;1;1;66;2;0;110;37.26;11;1130;73.33;65;1267;0 313;3;0;0;46;1;0;80;46.85;4;373;31;58;1648;0 314;3;1;0;42;2;0;60;43.68;85.33;3202;152;53;1725;0 315;3;0;0;57;1;0;80;48.92;4.667;664.3;115.3;48;1734;0 316;3;1;0;56;1;0;80;49.54;5.667;782;25;54;1733;0 317;1;1;0;35;NA;0;38;44.85;24.67;2390;188.7;71;1698;0 318;1;1;0;53;2;0;53;37.81;11;1825;98;62;795;0 319;1;0;0;40;2;0;70;48.02;9;1029;59.67;54;1621;0 320;1;0;0;63;2;0;56;38.16;11.33;1149;49.33;50;1593;0 321;3;1;0;61;4;0;70;35.12;45.33;1391;67.33;NA;978;2 322;3;0;0;40;3;0;80;39.05;32.5;1966;176.5;56;98;1 323;1;1;1;65;3;0;70;38.71;19.67;1431;65.33;67;1391;2 324;1;0;0;67;NA;0;62;33.12;13;1437;68;50;781;0 325;1;1;1;71;3;0;84;35.12;10.67;1516;47;57;1313;0 327;3;0;0;52;2;0;70;32.02;13;2901;89;60;1657;0 328;3;1;0;68;2;0;110;40.23;8.333;671;69.33;65;1644;0 329;3;0;0;58;3;1;70;34.78;13.33;428.7;44;78;1137;0 330;3;1;0;47;3;0;70;38.02;10;860.7;69.33;77;1642;0 331;3;0;0;60;2;0;70;40.3;15.5;1719;95;62;1606;0 332;3;1;0;63;2;0;100;44.92;8;826.7;40;53;1622;0 333;3;1;0;59;1;0;80;38.23;9;1826;122.7;54;1599;0 334;3;0;0;62;2;0;90;42.99;9;825;103;71;700;0 335;3;1;0;53;3;0;90;43.61;2.333;908;86.33;58;362;0 336;3;0;0;58;2;0;110;40.43;7.667;796;56.33;68;1591;0 337;3;1;0;54;4;0;80;41.06;16;1478;49;55;1169;2 338;3;0;0;50;4;0;60;35.12;304;1830;115.3;45;193;2 340;1;0;0;65;2;0;75;42.23;4;1022;42.33;48;1355;0 341;3;1;0;62;2;0;40;36.02;9.667;2003;23.33;49;1482;0 342;1;0;0;71;1;0;80;38.5;5.667;1958;44.33;54;1509;0 343;3;1;0;49;NA;0;60;20.63;44.5;1981;97.5;51;8;0 344;1;0;1;55;NA;0;72;38.5;14.67;1467;74.67;73;928;0 345;3;1;1;67;1;0;120;34.78;4.667;567.3;36.67;78;1392;0 346;3;1;0;53;1;0;70;41.68;6;1053;55;67;1068;0 347;3;0;0;58;2;0;60;38.43;5.333;861.3;64;52;1399;0 349;1;0;0;60;1;0;91;44.92;6.333;3022;51;60;1327;0 350;1;0;0;56;NA;0;70;41.12;8.333;1189;60.67;58;1299;0 351;3;1;0;42;2;0;70;35.67;10.67;1041;102.7;55;1332;0 352;3;1;0;71;2;0;150;39.33;7;1747;63;59;1252;0 355;1;1;1;62;NA;0;79;45.13;11.33;1611;60.33;74;1313;0 356;1;1;0;67;3;0;75;40.09;19.33;1953;70;59;1250;0 357;1;1;0;68;3;0;83;37.4;8.667;1392;63;49;916;2 359;1;0;0;62;3;0;66;36.64;7.333;1024;80;72;1105;2 361;1;1;0;61;3;1;65;34.85;35.33;1193;129.7;53;1229;0 362;1;0;0;40;1;0;70;40.09;42.33;4014;134;62;1257;0 363;1;1;0;61;4;0;72;35.74;18.67;1259;66;52;378;2 364;1;0;0;71;4;0;135;NA;18.33;1280;114.5;85;1145;0 365;3;1;0;65;2;0;70;33.6;20.5;2527;104;59;1161;0 366;3;0;0;58;1;0;100;47.96;6.667;506.7;47.33;41;1200;0 367;3;0;0;53;4;0;80;40.99;9.667;387.3;27;66;1076;0 368;3;1;0;64;3;0;80;42.23;12;1646;90;70;754;0 369;1;1;0;59;3;0;72;41.75;11;610.7;39.33;71;1054;0 373;1;1;0;54;2;0;52;34.71;15.67;1009;124.3;59;649;0 374;3;0;0;50;1;0;90;46.85;12;1233;105.5;89;972;0 375;3;0;1;66;NA;0;110;36.02;10;1612;142;75;620;0 376;3;1;0;64;2;0;130;26.63;8;1032;88.33;59;684;0 378;1;1;0;67;2;0;79;41.26;17;2420;124.7;70;725;0 382;3;0;0;58;4;0;60;25.46;40;2132;201.3;63;650;0 383;3;0;0;63;2;0;80;34.98;26;1218;60;47;563;0 401;4;1;0;60;4;0;53.04;24;181.8;1015;97;60;35;2 402;4;1;0;53;3;0;70.72;41.1;16.53;850;58;76;1483;0 403;4;0;0;26;4;0;61.88;45;453.1;5108;154;59;371;1 404;4;0;0;39;2;0;44.2;41.4;94.62;3171;96.67;58;1198;2 405;4;0;0;49;4;0;79.56;38.2;205.2;3597;109.3;57;1324;2 406;4;0;0;38;3;0;88.4;40.5;15.96;789;136.7;55;1509;0 407;4;1;0;50;4;0;106.1;48.3;75.24;564.3;116.7;60;1493;0 408;4;1;0;41;2;0;97.24;43.7;12.54;1677;162.3;56;1444;0 409;4;1;0;57;2;0;79.56;39;46.17;1522;98.33;56;1249;2 410;4;1;0;50;2;0;61.88;41.7;27.36;1142;59.67;67;1446;0 411;4;0;1;49;3;0;70.72;44.3;38.19;2038;94.67;57;1325;2 412;4;0;0;43;NA;0;70.72;47;17.67;2157;158.7;51;1423;1 413;4;1;0;51;4;0;61.88;37.3;178.9;1700;162.3;52;1440;0 414;4;1;0;41;2;0;79.56;40.3;73.53;1617;88.33;64;1433;0 415;4;0;0;44;4;0;44.2;32;334.6;1944;102;52;366;1 416;4;0;0;45;NA;0;106.1;41.5;13.68;2563;230;49;1441;0 417;4;1;0;53;3;0;79.56;39;42.18;1925;133.7;68;1423;0 418;4;1;0;71;3;0;79.56;56.7;21.09;3240;85;44;931;2 419;4;0;0;60;2;0;70.72;41;15.39;1637;127;50;1433;0 420;4;0;0;37;2;0;61.88;47;7.98;1874;110.7;53;1423;0 421;4;1;0;60;NA;0;70.72;38.6;140.8;2540;175.7;55;1134;2 422;4;1;0;40;3;0;70.72;45;17.1;880.7;90;48;1283;0 423;4;0;0;57;4;0;61.88;32.8;91.77;1440;120;55;1277;0 424;4;0;0;35;1;0;61.88;36;47.88;1769;231.3;47;1276;0 425;4;1;0;59;3;0;70.72;34;405.3;1320;169.3;42;280;2 426;4;0;0;53;1;0;61.88;39;29.64;1344;59.33;61;1273;0 427;4;1;0;43;3;0;106.1;34;83.79;436.7;211;51;1307;0 428;4;0;0;75;NA;0;70.72;35;78.66;941;73;50;351;0 429;4;1;0;58;2;0;79.56;37.6;14.25;1656;86.33;63;1262;0 430;4;1;0;39;NA;0;70.72;36;13.11;1797;157.3;70;1163;0 431;4;0;0;49;2;0;70.72;31.4;39.9;3023;132.7;51;1256;0 432;4;0;0;58;4;0;79.56;47;6.27;632.3;29.67;59;1255;0 433;4;0;0;53;1;0;79.56;42.7;8.55;380.7;70.33;60;1256;0 434;4;1;0;58;1;0;79.56;31;17.1;1007;49.33;54;1143;0 435;4;1;1;66;4;1;70.72;31;153.3;323.7;115.3;54;235;2 436;4;0;0;56;4;0;79.56;40;30.21;1090;84.67;63;1144;0 437;4;0;0;52;4;0;79.56;57;18.81;2717;126;58;1087;0 438;4;1;0;67;NA;0;88.4;27;42.75;587;145;43;1066;0 439;4;0;0;54;1;0;79.56;41;7.98;367.3;40;73;1038;0 440;4;1;0;58;1;0;88.4;39;10.26;730.3;71.33;65;989;0 441;4;1;0;45;2;0;79.56;33;37.05;1002;129;56;933;0 442;4;0;1;58;2;0;70.72;42;38.76;762;66;66;396;2 443;4;0;0;39;2;0;61.88;39;283.9;1243;246;46;487;1 444;4;1;0;55;2;0;79.56;35;46.17;753;67.33;61;679;2 445;4;1;0;49;NA;0;79.56;43;35.91;1882;104.3;53;891;0 446;4;0;1;53;2;0;88.4;41;114.6;3067;209.3;83;213;1 447;4;1;0;62;4;0;79.56;30;346.6;3728;136.7;62;743;2 448;4;0;0;48;3;1;79.56;46;26.79;1058;70;61;926;0 449;4;0;0;48;2;0;70.72;40;313.4;1138;115.3;49;254;2 450;4;1;0;37;4;0;44.2;42;19.38;2484;161.3;49;810;0 451;4;0;0;40;NA;0;79.56;NA;18.24;616.7;58.33;53;810;0 452;4;1;0;64;NA;0;79.56;32;10.26;1063;182;53;678;0 453;4;0;0;36;NA;0;53.04;42;13.11;1676;113;67;674;0 454;4;0;0;59;2;0;88.4;45;15.39;753;77.33;45;600;0 455;4;1;0;32;3;0;70.72;30;44.46;882.7;174.7;65;608;0 456;4;1;1;44;NA;0;106.1;25;23.94;616.7;87.33;72;690;0 457;4;0;0;47;NA;0;70.72;48;15.39;1341;197;57;646;0 458;4;1;0;64;NA;1;79.56;33;33.06;458.3;18.67;58;365;0 459;4;0;0;37;3;0;88.4;40;126.5;1384;293.7;55;656;0 460;4;1;0;60;NA;0;70.72;47;21.09;771.3;84;42;576;0 461;4;0;0;57;1;0;79.56;58;15.96;560.3;81.33;75;552;0 462;4;1;0;61;4;0;79.56;31;133.4;1061;316.3;66;107;2 463;4;0;0;59;1;1;106.1;40;10.26;1297;91.67;57;196;0 464;4;1;0;56;4;0;97.24;35;212;1278;136.7;51;156;2 465;4;1;0;51;2;0;97.24;40;14.82;316;34.33;57;466;0 466;4;0;0;55;NA;0;88.4;37;14.82;855.7;73;58;482;0 467;4;1;1;54;2;0;88.4;38;14.82;1228;127.3;76;405;0 468;4;0;0;67;NA;0;70.72;29;167.6;1215;172;55;117;2 469;4;0;0;61;2;0;79.56;40;19.38;736.7;65.33;59;458;0 470;4;0;0;38;NA;0;70.72;39;13.11;985.3;81.33;60;380;0 471;4;1;1;34;NA;0;79.56;44;17.1;683;43;53;436;0 472;4;1;0;50;4;0;61.88;39;122.5;683.7;88.33;65;384;0 473;4;1;0;47;4;0;79.56;41;17.1;818.3;109.7;58;384;0 474;4;1;0;46;NA;0;61.88;30;421.8;3313;171.3;45;347;1 475;4;0;0;61;2;1;79.56;32;26.79;1440;129.7;59;355;0 476;4;0;0;53;2;0;79.56;37;17.1;2259;138;60;355;0 477;4;0;0;57;1;0;79.56;42;19.38;1282;105;68;355;0 478;4;0;1;40;1;0;88.4;45;12.54;457.7;65.67;74;355;0 479;4;1;0;53;3;0;79.56;38;18.81;1447;124;54;355;0 501;5;1;0;53;4;0;88.4;25;126.7;266.3;32.67;65;1238;1 502;5;0;0;48;1;0;78.68;45.9;13.34;455.3;14.37;60;1234;0 503;5;1;0;58;4;0;79.56;36;52.04;694.3;50.67;77;1168;1 504;5;0;0;53;1;0;105.2;44.2;19.27;227;17.9;73;1125;0 505;5;0;0;55;2;0;92.82;45;27.36;914.7;60;56;1125;0 506;5;1;0;47;2;0;82.21;37;13.96;434.3;39;72;947;0 507;5;0;0;47;4;0;NA;37;192.7;1973;41.67;59;489;1 508;5;1;0;74;4;0;88.4;39;38.82;194;31;63;940;0 509;5;1;0;64;4;0;97.24;35;35.8;261.7;53.33;67;929;0 510;5;0;0;57;3;0;79.56;44.5;18.52;324.3;66.33;67;878;0 511;5;0;0;55;NA;0;88.4;38;66.69;707.7;72.67;70;368;2 512;5;1;1;33;NA;0;NA;40;354.5;2910;87.67;80;858;0 513;5;0;0;43;1;0;61.88;47;40.47;597.7;63;73;814;0 514;5;1;0;58;3;0;NA;35;247.9;757.3;87;44;286;2 515;5;0;0;66;3;0;106.1;40;29.64;931.3;45.33;55;802;0 516;5;1;0;59;1;0;97.24;41;11.97;1036;46.33;75;754;0 517;5;0;1;64;3;0;123.8;48;41.67;578.7;44.67;60;767;0 518;5;1;0;48;1;0;79.56;47;13.11;494;46;56;703;0 519;5;0;0;57;4;0;70.72;43;25.65;762;43.67;43;682;0 520;5;1;0;52;3;0;97.24;46;9.12;615.7;27.67;75;653;0 521;5;0;0;63;2;0;79.56;49;12.77;553.3;39.33;59;640;0 522;5;1;0;57;1;0;88.4;47;17.67;213;17.67;90;562;0 523;5;0;0;52;2;0;79.56;40;11.4;203.3;28.33;71;380;0 601;6;1;0;55;3;0;68;36;111.7;509.7;56;47;1166;1 602;6;0;1;73;NA;0;96;36;216.3;578;115;58;592;2 603;6;0;0;64;NA;0;81;27;11;179.7;81.67;65;787;0 604;6;0;0;43;2;0;70;37;18;529.3;55.33;50;1144;0 605;6;0;0;56;NA;0;99;36;16.67;150.7;28.67;86;1125;0 606;6;1;0;64;NA;0;80;38;23;878.3;90;55;31;0 607;6;1;0;43;NA;0;55;43;4.667;239.3;50;60;6;0 608;6;1;0;56;1;0;68;40;5.333;598;119;60;964;0 609;6;1;0;44;NA;0;75;33;74;560.7;122.7;48;930;0 610;6;0;0;34;1;0;74;42;27.67;66.33;23.33;67;923;0 611;6;0;0;68;4;0;99;30;24;218.3;97.33;58;945;2 612;6;0;0;46;1;0;72;40;9.333;474.3;87.67;53;901;0 613;6;1;0;48;1;0;77;39;12;447;85.67;43;944;0 614;6;1;0;50;2;0;84;34;13.67;733;79.33;52;818;0 615;6;0;0;63;4;0;75;31;20;375;90;53;818;0 616;6;0;1;63;NA;0;83;36;14;594;48;72;818;0 617;6;0;0;57;2;0;70;38;9.333;149.3;25.33;74;811;0 618;6;1;0;60;4;0;88;35;159.7;385;102.3;56;797;0 619;6;1;0;55;NA;0;80;37;28;1112;71.67;59;775;0 620;6;1;0;66;2;0;58;35.9;8.197;205;37.33;48;749;0 621;6;1;0;33;2;0;74;31;29;286.7;97.67;49;663;0 622;6;1;0;30;2;0;70;36;13.4;541.3;87.33;57;656;0 623;6;0;0;33;2;0;70;39;167.7;840.7;224.3;53;655;0 624;6;0;0;55;3;0;75.6;43.8;18.33;634.3;70;52;508;0 625;6;0;0;56;2;0;131;48.6;9;524;59;58;508;0 626;6;0;0;63;2;0;74.8;44;11.33;817.7;67.67;98;485;0 627;6;1;0;51;1;0;80.2;44;10.33;478;40;56;590;0 628;6;1;0;62;4;0;81.7;51.1;12;189;31;58;398;0 701;2;0;0;45;4;0;74;32;36;690;311;70;724;0 702;2;1;0;51;4;1;72;34;28;408.3;77;63;632;0 703;2;1;0;61;4;0;59;31;66;1114;95;65;576;0 704;2;0;0;55;4;1;124;36;90;297.3;145;64;24;2 705;2;0;0;50;3;1;79;43;15;275;68.5;44;695;0 706;2;1;1;41;4;0;89;36;41;262;162.5;57;362;1 707;2;0;0;46;2;0;71;41;11;767;34;62;632;0 708;2;1;0;63;4;1;52;36;24;273;57;40;548;0 709;2;0;0;54;3;0;59;31;65;1587;269;55;604;0 710;2;1;0;63;1;1;81;29;41;414;49;80;552;0 711;2;0;0;56;NA;0;53;38;17;243;33;53;545;0 712;2;1;1;62;3;0;78;45;19;870;68;82;1;0 713;2;1;0;35;2;0;68;37;19;1101;173;57;495;0 714;2;0;1;50;2;0;77;50;21.5;1194;108;71;503;0 715;2;1;0;60;4;0;57;30;166;900;NA;52;195;2 716;2;0;0;60;NA;0;76;35;58;340;118;61;509;0 717;2;0;0;51;2;0;65;47;15;455;75;60;379;0 718;2;1;0;55;NA;1;82;35;17.33;223;29.33;71;443;0 719;2;0;0;33;2;0;67;41;16;962;100;57;325;0 720;2;1;1;56;2;0;80;48;11;1204;226;NA;356;0 721;2;1;0;57;NA;0;68;40;24.5;434;32.5;70;346;0 722;2;0;0;59;4;1;58;34;62;1163;101.3;46;276;0 723;2;0;0;51;4;0;95;30;48;959;119;NA;207;0 724;2;1;0;56;3;1;81;36;9;616;88;NA;317;0 725;2;1;0;44;4;1;67;28;123.7;444.7;135.3;57;241;0 726;2;1;0;57;4;0;71;36;95.5;1457;118;50;241;0 727;2;0;1;45;3;0;76;41;19;683;89;65;166;0 728;2;0;0;61;NA;1;102;39;24;490;99;NA;87;0 729;2;1;1;40;NA;0;49;41;7;464;82.5;60;252;0 730;2;0;0;39;NA;0;49;37;44.67;867;35.67;NA;130;0 732;2;1;0;65;4;0;199;35;31.5;114.5;111;70;166;0 735;2;1;0;42;NA;0;61;41;24;1082;89;NA;143;0 736;2;1;0;54;1;0;73;38;8;430;68;63;72;0 pec/data/GBSG2.csv0000644000176200001440000006254313571203267013273 0ustar liggesusers"horTh";"age";"menostat";"tsize";"tgrade";"pnodes";"progrec";"estrec";"time";"cens" "no";70;"Post";21;"II";3;48;66;1814;1 "yes";56;"Post";12;"II";7;61;77;2018;1 "yes";58;"Post";35;"II";9;52;271;712;1 "yes";59;"Post";17;"II";4;60;29;1807;1 "no";73;"Post";35;"II";1;26;65;772;1 "no";32;"Pre";57;"III";24;0;13;448;1 "yes";59;"Post";8;"II";2;181;0;2172;0 "no";65;"Post";16;"II";1;192;25;2161;0 "no";80;"Post";39;"II";30;0;59;471;1 "no";66;"Post";18;"II";7;0;3;2014;0 "yes";68;"Post";40;"II";9;16;20;577;1 "yes";71;"Post";21;"II";9;0;0;184;1 "yes";59;"Post";58;"II";1;154;101;1840;0 "no";50;"Post";27;"III";1;16;12;1842;0 "yes";70;"Post";22;"II";3;113;139;1821;0 "no";54;"Post";30;"II";1;135;6;1371;1 "no";39;"Pre";35;"I";4;79;28;707;1 "yes";66;"Post";23;"II";1;112;225;1743;0 "yes";69;"Post";25;"I";1;131;196;1781;0 "no";55;"Post";65;"I";4;312;76;865;1 "no";56;"Post";22;"II";1;28;23;1684;1 "no";57;"Post";21;"II";2;184;294;1701;0 "no";65;"Post";25;"III";1;0;0;1701;0 "yes";70;"Post";15;"II";3;89;151;1693;0 "no";65;"Post";70;"III";26;2;64;379;1 "no";44;"Pre";23;"II";2;299;35;1105;1 "yes";59;"Post";23;"III";3;8;0;548;1 "no";43;"Pre";35;"II";4;37;5;1296;1 "yes";53;"Post";58;"II";1;0;0;1483;0 "no";32;"Pre";25;"II";2;36;10;1570;0 "no";45;"Pre";45;"III";2;0;0;1469;0 "no";36;"Pre";44;"III";2;6;5;1472;0 "yes";57;"Post";35;"III";1;1490;209;1342;0 "no";55;"Post";25;"I";2;26;53;1349;0 "no";34;"Pre";15;"II";5;103;118;1162;1 "yes";58;"Post";35;"II";2;38;18;1342;0 "no";62;"Post";22;"II";12;0;8;797;1 "no";64;"Post";25;"I";9;67;86;1232;0 "no";53;"Post";23;"II";3;13;7;1230;0 "no";53;"Post";13;"II";8;423;175;1205;0 "no";65;"Post";52;"III";7;25;155;1090;0 "no";45;"Pre";38;"II";38;160;5;1095;0 "no";58;"Post";42;"III";1;0;0;449;1 "yes";68;"Post";23;"II";1;27;5;972;0 "yes";67;"Post";25;"II";1;15;55;825;0 "no";59;"Post";25;"I";2;33;51;2438;0 "no";65;"Post";20;"II";1;6;6;2233;0 "yes";34;"Pre";30;"III";12;0;5;286;1 "yes";65;"Post";18;"II";5;133;175;1861;0 "no";61;"Post";30;"II";9;41;51;1080;1 "yes";61;"Post";25;"II";1;21;172;1521;1 "no";46;"Post";25;"II";1;2;0;1693;0 "no";63;"Post";25;"II";1;86;366;1528;1 "yes";45;"Pre";19;"II";7;19;0;169;1 "no";46;"Pre";35;"II";7;67;44;272;1 "no";63;"Post";40;"II";3;5;8;731;1 "yes";53;"Pre";21;"II";9;29;9;2059;0 "yes";43;"Post";40;"I";4;233;19;1853;0 "no";31;"Pre";23;"II";4;20;0;1854;0 "yes";71;"Post";15;"II";9;85;9;1645;0 "yes";59;"Post";28;"II";18;0;7;544;1 "no";62;"Post";15;"II";4;22;70;1666;0 "no";54;"Post";30;"II";2;31;11;353;1 "no";46;"Pre";25;"II";13;82;20;1791;0 "yes";53;"Post";25;"II";2;9;1;1685;0 "no";45;"Pre";10;"II";1;14;3;191;1 "no";48;"Pre";30;"II";4;19;4;370;1 "no";32;"Pre";20;"II";5;55;41;173;1 "no";30;"Pre";12;"II";11;4;3;242;1 "no";53;"Post";16;"III";1;1;1;420;1 "no";42;"Pre";12;"II";6;388;30;438;1 "no";48;"Pre";35;"II";1;41;61;1624;0 "yes";54;"Post";30;"II";6;15;81;1036;1 "no";56;"Post";25;"II";11;0;36;359;1 "no";51;"Pre";25;"II";16;91;31;171;1 "no";68;"Post";18;"II";14;0;2;959;1 "no";46;"Pre";21;"II";3;73;13;1351;0 "no";41;"Pre";15;"II";4;11;11;486;1 "no";48;"Pre";16;"III";10;0;0;525;1 "no";55;"Pre";23;"II";3;295;34;762;1 "no";52;"Pre";36;"II";6;6;16;175;1 "no";36;"Pre";8;"III";1;10;0;1195;0 "no";44;"Pre";25;"III";6;5;2;338;1 "no";47;"Post";20;"III";6;408;36;1125;0 "yes";47;"Post";40;"III";6;187;24;916;0 "yes";59;"Post";23;"II";1;13;20;972;0 "no";65;"Post";10;"II";3;42;59;867;0 "no";42;"Pre";25;"II";7;0;2;249;1 "no";63;"Post";32;"II";16;7;132;281;1 "no";40;"Pre";22;"II";2;13;18;758;0 "yes";62;"Post";50;"II";11;1;2;377;1 "no";55;"Post";40;"I";2;64;81;1976;0 "yes";47;"Pre";45;"II";2;264;59;2539;0 "no";63;"Post";23;"II";3;22;32;2467;0 "no";69;"Post";20;"II";2;154;191;876;1 "no";43;"Pre";21;"II";1;206;87;2132;0 "no";59;"Post";24;"II";14;2;22;426;1 "no";75;"Post";50;"II";1;170;317;554;1 "yes";41;"Pre";40;"II";4;100;100;1246;1 "no";47;"Pre";36;"III";2;154;99;1926;0 "no";43;"Pre";80;"II";20;2;25;1207;1 "no";42;"Pre";30;"III";4;65;81;1852;0 "no";46;"Pre";35;"I";5;100;0;1174;1 "no";65;"Post";58;"II";11;390;119;1250;0 "no";59;"Post";30;"II";3;0;2;530;1 "no";48;"Pre";70;"II";7;8;0;1502;0 "no";44;"Pre";27;"II";3;525;61;1364;0 "no";53;"Post";25;"II";13;77;131;1170;1 "no";53;"Post";25;"II";2;54;58;1729;0 "no";60;"Pre";23;"II";3;136;507;1642;0 "no";64;"Post";24;"II";2;206;304;1218;1 "no";56;"Post";8;"II";1;110;0;1358;0 "no";66;"Post";30;"II";16;0;508;360;1 "no";50;"Pre";30;"II";1;183;243;550;1 "yes";63;"Post";22;"II";9;64;19;857;0 "no";61;"Post";60;"II";51;45;38;768;0 "no";46;"Pre";26;"I";3;33;68;858;0 "yes";63;"Post";23;"II";3;3;2;770;0 "no";49;"Pre";55;"II";7;0;0;679;1 "no";33;"Pre";35;"III";1;26;0;1164;1 "no";50;"Post";52;"II";1;0;0;350;1 "no";45;"Pre";29;"II";1;0;0;578;1 "no";51;"Pre";20;"II";1;0;0;1460;1 "no";39;"Pre";30;"III";1;0;0;1434;0 "yes";56;"Post";40;"II";3;0;3;1763;1 "no";60;"Post";15;"II";2;84;93;889;1 "yes";47;"Pre";35;"III";17;14;3;357;1 "no";58;"Post";50;"II";7;77;77;547;1 "yes";56;"Pre";21;"II";3;111;20;1722;0 "yes";54;"Post";21;"II";1;7;139;2372;0 "yes";56;"Post";40;"II";3;0;59;2030;1 "no";57;"Post";26;"II";1;166;521;1002;1 "no";53;"Post";10;"II";1;17;61;1280;1 "no";31;"Pre";60;"II";7;542;77;338;1 "yes";41;"Pre";80;"II";1;0;0;533;1 "yes";66;"Post";33;"II";3;0;0;168;0 "yes";37;"Pre";25;"II";1;235;38;1169;0 "no";66;"Post";15;"II";1;252;185;1675;1 "no";48;"Pre";45;"III";1;0;0;1862;0 "no";44;"Pre";21;"II";3;1600;70;629;0 "no";51;"Pre";50;"II";9;0;0;1167;0 "no";57;"Post";20;"II";3;39;83;495;1 "no";65;"Post";17;"I";1;935;200;967;0 "yes";40;"Pre";30;"II";2;320;30;1720;0 "yes";62;"Post";19;"II";1;35;1060;598;1 "yes";64;"Post";30;"III";12;0;0;392;1 "no";46;"Pre";12;"II";3;175;80;1502;0 "yes";62;"Post";25;"II";1;35;185;229;0 "no";44;"Pre";30;"II";7;110;20;310;0 "no";69;"Post";27;"I";3;140;350;1296;0 "no";48;"Pre";15;"II";6;0;110;488;0 "no";47;"Post";12;"II";2;0;50;942;0 "yes";64;"Post";26;"II";5;370;220;570;0 "no";58;"Post";52;"III";5;0;0;1177;0 "yes";65;"Post";30;"II";5;85;365;1113;0 "no";40;"Pre";40;"II";5;50;75;288;1 "yes";62;"Post";21;"II";2;0;0;723;0 "no";55;"Post";20;"III";16;0;0;403;1 "no";62;"Post";25;"III";5;0;0;1225;1 "no";29;"Pre";12;"II";4;32;150;338;1 "no";38;"Pre";18;"III";5;141;105;1337;1 "no";52;"Pre";20;"I";1;78;14;1420;1 "no";47;"Post";55;"II";18;29;87;2048;0 "no";53;"Pre";75;"III";19;375;107;600;1 "no";37;"Pre";15;"I";1;162;22;1765;0 "no";63;"Post";60;"II";15;180;12;491;1 "no";63;"Post";45;"III";7;20;93;305;1 "no";59;"Post";22;"II";2;23;235;1582;0 "no";48;"Pre";30;"II";15;250;45;1771;0 "no";33;"Pre";15;"III";33;66;8;960;1 "no";38;"Pre";57;"III";9;18;62;571;1 "no";32;"Pre";28;"II";12;33;107;675;0 "no";31;"Pre";28;"II";2;349;189;285;1 "no";53;"Post";48;"II";7;254;117;1472;0 "no";47;"Pre";30;"II";1;422;89;1279;1 "no";40;"Pre";24;"I";3;25;11;148;0 "yes";64;"Post";19;"II";1;19;9;1863;0 "yes";49;"Post";56;"I";3;356;64;1933;0 "no";53;"Post";52;"II";9;6;29;358;1 "yes";70;"Post";18;"II";1;107;307;734;0 "yes";61;"Post";22;"II";2;6;173;2372;1 "no";43;"Pre";30;"II";1;22;0;2563;0 "yes";74;"Post";20;"II";1;462;240;2372;0 "yes";58;"Post";18;"I";2;74;67;1989;1 "yes";49;"Pre";20;"II";6;56;98;2015;1 "yes";61;"Post";35;"III";2;23;9;1956;0 "no";66;"Post";40;"III";16;21;412;945;1 "yes";66;"Post";20;"III";3;54;17;2153;0 "no";59;"Post";23;"II";2;88;38;838;1 "no";51;"Post";70;"III";6;28;5;113;1 "yes";71;"Post";18;"II";2;31;9;1833;0 "no";46;"Pre";50;"III";10;44;4;1722;0 "no";52;"Pre";40;"III";6;32;5;241;1 "yes";60;"Post";16;"II";1;184;51;1352;1 "no";60;"Post";50;"II";7;65;30;1702;0 "yes";67;"Post";27;"II";4;1118;753;1222;0 "no";54;"Post";30;"III";3;1;0;1089;0 "no";55;"Post";12;"II";1;63;19;1243;0 "no";38;"Pre";20;"II";9;24;34;579;1 "yes";52;"Post";25;"II";13;31;196;1043;1 "no";43;"Pre";30;"II";3;45;11;2234;0 "no";50;"Pre";22;"I";1;135;111;2297;0 "yes";61;"Post";25;"I";2;32;144;2014;0 "no";62;"Post";20;"II";2;7;9;518;1 "no";46;"Pre";30;"III";1;36;33;940;0 "no";50;"Pre";25;"III";1;20;13;766;0 "no";52;"Post";20;"III";10;7;8;251;1 "no";45;"Pre";20;"II";2;64;48;1959;0 "no";52;"Post";10;"II";3;109;12;1897;0 "no";51;"Post";120;"II";12;3;1;160;1 "no";66;"Post";28;"II";2;488;298;970;0 "no";50;"Pre";35;"I";1;408;44;892;0 "yes";60;"Post";32;"I";3;104;203;753;0 "no";61;"Post";20;"II";5;25;75;348;1 "yes";64;"Post";45;"III";5;1;8;275;1 "no";64;"Post";17;"I";1;227;0;1329;1 "no";51;"Post";35;"III";1;6;1;1193;1 "yes";63;"Post";30;"II";7;0;0;698;1 "no";62;"Post";12;"II";7;0;0;436;1 "yes";65;"Post";18;"III";1;0;0;552;1 "yes";67;"Post";20;"II";1;0;0;564;1 "no";62;"Post";30;"II";1;8;371;2239;0 "yes";48;"Pre";25;"II";1;235;33;2237;0 "no";67;"Post";25;"II";1;6;19;529;1 "no";46;"Pre";11;"II";2;0;0;1820;0 "yes";56;"Post";20;"I";1;2;334;1756;0 "yes";72;"Post";34;"III";36;2;1091;515;1 "yes";50;"Post";70;"II";19;10;57;272;1 "no";58;"Post";21;"III";2;1;1;891;1 "no";63;"Post";21;"II";1;0;378;1356;0 "no";45;"Post";15;"II";6;1;162;1352;0 "no";46;"Pre";21;"III";1;7;109;1077;0 "yes";58;"Post";18;"II";3;64;418;675;1 "yes";60;"Post";39;"III";9;0;0;855;0 "no";53;"Post";30;"III";1;1;4;740;0 "yes";63;"Post";21;"II";1;26;30;2551;0 "no";60;"Post";35;"II";12;41;62;754;1 "no";33;"Pre";25;"II";8;96;13;819;1 "yes";63;"Post";19;"II";5;18;38;1280;1 "no";70;"Post";16;"II";2;126;338;2388;0 "yes";60;"Post";30;"II";2;92;18;2296;0 "yes";54;"Post";25;"II";1;5;57;1884;0 "yes";64;"Post";25;"III";3;56;272;1059;1 "no";57;"Post";55;"III";6;22;186;859;0 "yes";50;"Post";21;"I";1;82;2;1109;0 "no";53;"Post";20;"II";1;1;1;1192;1 "no";77;"Post";20;"III";4;94;325;1806;1 "yes";47;"Pre";60;"II";15;5;38;500;1 "no";41;"Pre";20;"II";4;8;38;1589;1 "yes";47;"Pre";30;"II";5;12;11;1463;1 "yes";63;"Post";25;"II";2;8;195;1826;0 "no";48;"Pre";22;"II";4;26;29;1231;0 "no";40;"Pre";15;"II";1;204;138;1117;0 "yes";57;"Post";30;"II";8;40;40;836;1 "no";47;"Pre";40;"II";2;33;59;1222;0 "no";46;"Pre";22;"II";4;24;74;663;0 "yes";58;"Post";35;"III";7;0;0;722;1 "yes";51;"Pre";25;"II";1;167;109;322;0 "yes";62;"Post";23;"II";2;0;14;1150;1 "no";50;"Pre";60;"III";4;0;0;446;1 "yes";65;"Post";30;"II";5;0;36;1855;0 "yes";59;"Post";30;"II";8;0;0;238;1 "no";49;"Pre";18;"II";2;0;0;1838;0 "yes";52;"Post";25;"II";13;0;0;1826;0 "no";45;"Pre";30;"II";1;0;0;1093;1 "no";49;"Post";14;"II";1;0;0;2051;0 "no";58;"Post";45;"III";4;0;0;370;1 "no";25;"Pre";22;"II";2;250;87;861;1 "no";50;"Pre";30;"III";6;0;0;1587;1 "no";43;"Pre";27;"II";1;23;9;552;1 "no";46;"Pre";12;"II";1;6;49;2353;0 "yes";64;"Post";24;"III";5;366;201;2471;0 "yes";63;"Post";43;"II";5;21;174;893;1 "no";40;"Pre";35;"II";2;279;99;2093;1 "yes";57;"Post";22;"II";4;16;5;2612;0 "yes";58;"Post";56;"I";11;51;50;956;1 "yes";62;"Post";25;"III";4;12;49;1637;0 "yes";50;"Pre";42;"I";2;238;26;2456;0 "no";49;"Post";30;"II";4;40;177;2227;0 "no";64;"Post";24;"II";2;41;80;1601;1 "yes";66;"Post";15;"II";2;15;42;1841;0 "yes";37;"Pre";30;"II";4;104;107;2177;0 "no";60;"Post";18;"III";2;12;8;2052;0 "yes";63;"Post";23;"III";12;3;2;973;0 "no";51;"Pre";12;"I";2;55;64;2156;0 "yes";49;"Pre";28;"I";4;364;120;1499;0 "yes";57;"Post";7;"II";1;1;1;2030;0 "yes";68;"Post";14;"II";6;40;68;573;1 "no";47;"Pre";25;"II";1;199;134;1666;0 "no";51;"Post";13;"II";5;89;134;1979;0 "yes";49;"Pre";19;"I";5;69;14;1786;0 "no";63;"Post";28;"II";4;258;46;1847;0 "yes";64;"Post";15;"II";1;340;71;2009;0 "no";65;"Post";24;"II";1;328;115;1926;0 "yes";63;"Post";13;"II";1;124;361;1490;0 "no";33;"Pre";23;"III";10;2;3;233;1 "no";44;"Pre";35;"II";6;26;4;1240;0 "no";47;"Pre";13;"II";3;242;14;1751;0 "no";46;"Pre";19;"I";11;56;24;1878;0 "no";52;"Pre";26;"II";1;258;10;1171;0 "no";62;"Post";55;"III";8;3;2;1751;0 "yes";61;"Post";24;"II";2;28;50;1756;0 "no";60;"Post";27;"II";6;401;159;714;1 "yes";67;"Post";44;"II";10;431;267;1505;0 "no";47;"Pre";78;"II";14;168;53;776;1 "no";70;"Post";38;"III";2;24;15;1443;0 "no";50;"Pre";11;"I";1;10;11;1317;0 "yes";62;"Post";20;"II";1;11;6;870;0 "no";58;"Post";30;"III";13;7;46;859;1 "no";59;"Post";20;"II";1;2;4;223;1 "no";45;"Pre";18;"I";1;56;40;1212;0 "no";45;"Pre";30;"II";3;345;31;1119;0 "no";41;"Pre";34;"II";10;25;10;740;0 "yes";54;"Post";29;"II";10;26;284;1062;0 "no";50;"Pre";29;"I";2;90;30;8;0 "yes";52;"Post";20;"II";1;1;8;936;0 "no";59;"Post";45;"II";6;739;526;740;0 "yes";60;"Post";24;"III";7;10;10;632;1 "yes";51;"Pre";30;"III";2;1152;38;1760;0 "no";56;"Post";40;"III";1;0;3;1013;0 "no";48;"Pre";20;"III";7;0;0;779;0 "no";49;"Pre";45;"III";6;0;22;375;1 "yes";47;"Pre";42;"II";7;164;204;1323;0 "no";37;"Pre";50;"III";2;170;130;1233;0 "no";54;"Pre";35;"II";2;145;16;986;0 "no";49;"Pre";35;"II";7;3;0;650;0 "no";54;"Post";28;"III";4;1;2;628;0 "no";44;"Pre";29;"II";1;27;23;1866;0 "yes";38;"Pre";18;"II";4;28;5;491;1 "yes";51;"Pre";34;"II";3;13;12;1918;1 "no";59;"Post";8;"II";5;1;30;72;1 "yes";52;"Post";49;"III";6;8;5;1140;1 "yes";64;"Post";32;"II";4;402;372;799;1 "no";55;"Post";37;"II";1;82;234;1105;1 "no";61;"Post";22;"II";2;179;124;548;1 "yes";44;"Pre";28;"III";17;2;3;227;1 "no";38;"Pre";24;"II";3;13;5;1838;0 "yes";43;"Pre";11;"I";1;126;22;1833;0 "no";65;"Post";36;"III";2;9;7;550;1 "yes";59;"Post";48;"III";1;5;17;426;1 "no";38;"Pre";31;"I";10;365;206;1834;0 "no";47;"Pre";25;"II";3;18;42;1604;0 "no";59;"Post";35;"II";5;5;125;772;0 "yes";47;"Post";30;"I";9;114;26;1146;1 "no";36;"Pre";25;"II";2;70;22;371;1 "no";47;"Pre";24;"II";20;30;8;883;1 "no";38;"Pre";23;"III";3;14;6;1735;0 "yes";50;"Post";23;"II";8;98;30;554;1 "no";44;"Pre";5;"II";10;11;10;790;1 "no";54;"Post";22;"II";2;211;129;1340;0 "no";52;"Pre";30;"II";12;11;20;490;1 "no";34;"Pre";3;"III";1;14;11;1557;0 "no";64;"Post";33;"III";3;20;14;594;1 "yes";54;"Post";19;"III";9;9;2;828;0 "no";65;"Post";27;"II";4;148;191;594;1 "no";49;"Pre";24;"II";11;106;62;841;0 "yes";70;"Post";17;"I";1;142;329;695;0 "yes";47;"Pre";30;"I";3;195;45;2556;0 "no";51;"Pre";20;"II";1;77;89;1753;1 "no";63;"Post";15;"III";5;0;0;417;1 "no";36;"Pre";30;"III";2;0;0;956;1 "yes";63;"Post";34;"II";12;223;236;1846;0 "no";47;"Pre";70;"II";5;796;24;1703;0 "no";51;"Pre";21;"III";1;0;0;1720;0 "yes";62;"Post";30;"II";1;88;544;1355;0 "no";56;"Post";40;"III";3;0;0;1603;0 "no";62;"Post";33;"I";5;239;76;476;1 "yes";61;"Post";30;"II";8;472;293;1350;0 "yes";55;"Post";15;"III";3;97;194;1341;0 "yes";56;"Post";11;"II";1;270;369;2449;0 "no";69;"Post";22;"II";8;282;191;2286;1 "no";57;"Post";25;"II";3;48;65;456;1 "no";27;"Pre";22;"II";1;56;99;536;1 "no";38;"Pre";25;"II";1;102;11;612;1 "no";42;"Pre";25;"III";2;11;10;2034;1 "no";69;"Post";19;"I";3;73;386;1990;1 "no";61;"Post";50;"II";4;10;10;2456;1 "no";53;"Pre";13;"III";3;10;20;2205;0 "no";50;"Pre";25;"III";1;24;85;544;1 "no";52;"Pre";27;"II";5;0;8;336;1 "no";47;"Pre";38;"II";2;58;10;2057;0 "no";65;"Post";27;"II";19;23;13;575;1 "no";48;"Pre";38;"II";3;92;41;2011;0 "no";61;"Post";38;"II";17;46;52;537;1 "yes";47;"Pre";12;"II";1;110;14;2217;0 "no";46;"Post";20;"II";11;680;152;1814;1 "yes";59;"Post";15;"II";1;30;122;890;1 "yes";60;"Post";22;"III";1;218;442;1114;0 "no";65;"Post";33;"II";6;11;28;974;0 "yes";44;"Pre";28;"II";1;0;0;296;0 "yes";45;"Pre";100;"II";6;178;77;2320;0 "no";58;"Post";35;"I";6;130;162;795;1 "no";51;"Post";40;"II";8;132;64;867;1 "no";49;"Pre";15;"II";1;111;19;1703;0 "no";43;"Pre";30;"II";2;32;16;670;1 "no";37;"Pre";35;"II";7;53;19;981;1 "no";51;"Pre";30;"II";2;505;270;1094;0 "yes";48;"Pre";35;"II";1;340;32;755;1 "no";54;"Post";21;"II";7;6;8;1388;1 "no";64;"Post";21;"III";1;4;3;1387;1 "no";44;"Pre";55;"III";4;8;8;535;1 "no";67;"Post";30;"II";2;5;14;1653;0 "no";63;"Post";24;"II";3;46;25;1904;0 "yes";42;"Pre";28;"III";4;27;22;1868;0 "yes";60;"Post";12;"I";2;402;90;1767;0 "no";39;"Pre";20;"II";1;38;110;855;1 "no";53;"Post";16;"II";1;16;120;1157;1 "yes";38;"Pre";61;"II";8;624;569;1869;0 "no";61;"Post";40;"I";15;185;206;1152;0 "no";47;"Pre";15;"II";1;38;0;1401;0 "no";52;"Post";25;"III";3;10;15;918;0 "no";67;"Post";65;"II";8;0;0;745;1 "yes";61;"Post";25;"II";18;595;419;1283;0 "yes";57;"Post";15;"II";3;44;78;1481;1 "yes";42;"Pre";9;"I";8;77;40;1807;0 "yes";39;"Pre";20;"III";1;2;2;542;1 "no";34;"Pre";50;"III";7;4;1;1441;0 "yes";52;"Pre";50;"II";7;45;39;1277;0 "yes";53;"Pre";45;"II";4;395;44;1486;0 "no";49;"Pre";20;"I";3;151;16;273;0 "yes";46;"Pre";23;"III";8;2;1;177;1 "no";36;"Pre";36;"II";1;76;14;545;1 "no";39;"Pre";28;"II";3;5;4;1185;0 "no";46;"Pre";28;"III";16;12;8;631;0 "no";47;"Pre";70;"II";1;51;28;995;0 "no";46;"Pre";45;"I";9;239;58;1088;0 "no";47;"Pre";35;"II";1;48;68;877;0 "no";57;"Post";18;"II";6;74;124;798;0 "yes";60;"Post";25;"II";7;116;435;2380;0 "yes";64;"Post";36;"II";2;122;198;1679;1 "yes";54;"Post";40;"III";4;3;2;498;1 "no";54;"Post";27;"II";5;138;23;2138;0 "no";46;"Pre";35;"II";6;405;27;2175;0 "no";49;"Pre";17;"II";2;324;94;2271;0 "no";50;"Pre";18;"III";1;1;4;17;0 "yes";55;"Post";15;"II";3;16;14;964;1 "yes";45;"Pre";23;"II";4;1;4;540;1 "no";51;"Post";30;"III";10;15;103;747;1 "no";43;"Pre";25;"II";11;1;1;650;1 "yes";59;"Post";30;"II";13;7;81;410;1 "no";59;"Post";27;"III";20;9;2;624;1 "no";47;"Pre";28;"III";7;16;92;1560;0 "no";48;"Pre";35;"III";10;2;222;455;1 "no";47;"Pre";16;"II";2;128;18;1629;0 "no";49;"Post";21;"II";5;80;152;1730;0 "yes";65;"Post";25;"III";2;17;14;1483;0 "no";60;"Post";21;"II";1;58;701;687;1 "no";52;"Post";35;"III";1;8;5;308;1 "no";48;"Post";22;"II";4;14;0;563;1 "yes";62;"Post";20;"II";1;100;100;46;0 "no";46;"Post";20;"II";2;32;29;2144;0 "no";59;"Post";21;"II";4;0;75;344;1 "yes";69;"Post";21;"III";1;51;749;945;0 "yes";68;"Post";45;"I";3;31;145;1905;0 "yes";74;"Post";35;"II";11;10;472;855;1 "no";45;"Pre";50;"I";2;132;200;2370;0 "no";43;"Pre";55;"II";1;23;45;853;0 "no";44;"Pre";28;"III";4;350;127;692;0 "yes";44;"Pre";24;"III";5;187;62;475;1 "yes";72;"Post";17;"II";1;229;533;2195;0 "yes";80;"Post";7;"II";7;2380;972;758;0 "yes";49;"Pre";100;"II";35;84;24;648;1 "no";57;"Post";12;"I";1;84;24;761;0 "no";60;"Post";32;"III";8;162;315;596;0 "no";76;"Post";37;"III";24;11;0;195;1 "yes";57;"Post";35;"II";4;18;0;473;1 "yes";75;"Post";16;"I";1;250;533;747;0 "yes";62;"Post";22;"II";1;263;34;2659;0 "yes";46;"Pre";60;"II";19;2;16;1977;1 "yes";53;"Post";17;"II";1;25;30;2401;0 "no";43;"Pre";20;"II";3;980;45;1499;0 "no";51;"Post";32;"III";10;0;0;1856;0 "no";41;"Pre";30;"III";11;6;5;595;1 "no";63;"Post";45;"III";2;530;328;2148;0 "yes";41;"Pre";20;"III";3;13;1;2126;0 "yes";74;"Post";30;"III";12;432;246;1975;1 "yes";57;"Post";30;"II";1;17;83;1641;1 "yes";44;"Pre";20;"II";6;150;67;1717;0 "yes";48;"Pre";24;"II";1;211;187;1858;0 "no";47;"Pre";15;"III";1;139;36;2049;0 "yes";70;"Post";25;"II";4;34;273;1502;1 "no";49;"Pre";14;"II";1;160;12;1922;0 "yes";49;"Post";24;"II";2;120;117;1818;0 "yes";58;"Post";35;"II";11;2;76;1100;0 "no";59;"Post";30;"II";1;87;8;1499;0 "no";60;"Post";35;"II";2;5;4;359;1 "yes";63;"Post";30;"I";5;144;221;1645;0 "no";44;"Pre";15;"II";1;175;88;1356;0 "yes";79;"Post";23;"I";1;60;80;1632;0 "no";47;"Pre";25;"I";1;38;44;967;0 "yes";61;"Post";30;"II";1;24;38;1091;0 "yes";64;"Post";35;"II";3;47;64;918;1 "yes";51;"Pre";21;"II";1;3;2;557;1 "no";44;"Pre";22;"II";2;107;94;1219;1 "yes";60;"Post";25;"I";3;78;363;2170;0 "yes";55;"Post";50;"II";1;14;203;729;1 "no";70;"Post";80;"III";8;0;0;1449;1 "no";65;"Post";20;"I";2;912;606;991;1 "no";53;"Pre";20;"II";2;89;36;481;1 "yes";54;"Post";25;"III";3;1;83;1655;0 "no";65;"Post";25;"II";2;86;135;857;1 "yes";62;"Post";30;"II";2;5;104;369;1 "yes";48;"Pre";30;"I";3;133;129;1627;0 "yes";48;"Post";35;"I";2;845;105;1578;0 "no";42;"Pre";40;"II";10;130;51;732;1 "no";48;"Pre";30;"II";16;29;43;460;1 "no";66;"Post";25;"I";2;22;121;1208;0 "yes";63;"Post";25;"II";13;26;348;730;1 "no";64;"Post";35;"I";4;858;15;722;0 "yes";68;"Post";35;"II";2;3;99;717;0 "no";44;"Pre";40;"II";4;364;159;651;0 "no";43;"Pre";27;"II";2;91;117;637;0 "no";67;"Post";35;"II";3;19;38;615;0 "yes";37;"Pre";20;"II";9;0;0;42;0 "no";54;"Post";23;"III";10;13;6;307;1 "no";52;"Post";17;"II";4;558;522;983;1 "no";43;"Pre";80;"III";11;9;1;120;1 "no";56;"Post";31;"II";1;45;286;1525;1 "no";42;"Post";21;"I";4;147;95;1680;0 "no";56;"Post";16;"II";10;4;2;1730;1 "no";61;"Post";36;"II";6;107;158;805;1 "no";67;"Post";17;"II";4;390;386;2388;0 "yes";63;"Post";21;"I";2;16;241;559;1 "yes";66;"Post";20;"II";9;1;11;1977;0 "no";37;"Pre";25;"III";1;13;1;476;1 "yes";71;"Post";16;"II";1;98;306;1514;0 "no";43;"Pre";28;"I";1;437;33;1617;0 "no";64;"Post";22;"III";1;8;11;1094;1 "yes";64;"Post";27;"II";3;186;139;784;1 "no";46;"Pre";32;"II";5;9;13;181;1 "no";45;"Pre";50;"II";7;20;23;415;1 "yes";67;"Post";24;"II";4;96;90;1120;1 "no";37;"Pre";25;"III";8;9;0;316;1 "no";65;"Post";22;"I";6;386;31;637;1 "no";21;"Pre";15;"II";3;24;25;247;1 "yes";54;"Post";21;"II";7;25;88;888;0 "no";46;"Pre";45;"II";8;2;4;622;1 "yes";63;"Post";18;"II";1;48;18;806;0 "yes";46;"Post";31;"III";1;6;3;1163;0 "no";58;"Post";31;"II";2;240;394;1721;0 "no";48;"Pre";15;"II";2;166;128;741;0 "no";41;"Pre";23;"III";2;26;4;372;1 "no";32;"Pre";17;"III";1;19;8;1331;0 "yes";66;"Post";42;"III";11;412;339;394;1 "no";64;"Post";14;"II";1;199;604;652;0 "no";50;"Pre";13;"III";5;8;32;657;0 "no";47;"Pre";23;"III";2;18;9;567;0 "yes";60;"Post";15;"I";7;14;8;429;0 "no";49;"Pre";23;"II";2;98;31;566;0 "yes";57;"Post";60;"III";18;11;13;15;0 "no";57;"Post";50;"III";13;22;47;98;1 "yes";67;"Post";15;"I";1;208;257;368;0 "yes";58;"Post";25;"I";1;241;28;432;0 "no";61;"Post";25;"II";2;406;174;319;0 "no";65;"Post";22;"II";8;4;2;65;0 "no";44;"Pre";70;"II";19;28;31;16;0 "no";61;"Post";18;"III";4;8;10;29;0 "no";62;"Post";22;"II";7;76;153;18;0 "no";51;"Pre";50;"II";5;360;57;17;0 "yes";47;"Post";23;"III";5;0;0;308;1 "no";44;"Pre";15;"II";1;0;0;1965;0 "yes";61;"Post";35;"III";16;10;13;548;1 "no";48;"Pre";21;"III";8;0;0;293;1 "yes";51;"Pre";16;"II";5;167;15;2017;0 "no";66;"Post";22;"II";4;11;22;1093;0 "no";45;"Pre";14;"III";1;5;43;792;0 "no";66;"Post";21;"II";1;9;898;586;1 "yes";69;"Post";40;"III";1;3;9;1434;0 "no";49;"Pre";20;"II";7;63;27;67;0 "no";62;"Post";12;"II";5;142;91;623;0 "yes";33;"Pre";19;"II";2;0;0;2128;0 "no";46;"Pre";30;"II";2;26;223;1965;0 "no";47;"Pre";20;"II";1;48;26;2161;0 "yes";35;"Pre";35;"II";4;0;0;1183;1 "no";34;"Pre";40;"III";1;0;37;1108;1 "no";38;"Pre";24;"I";1;138;82;2065;0 "no";54;"Post";27;"III";1;27;792;1598;0 "no";31;"Pre";55;"II";3;28;89;491;1 "no";41;"Pre";25;"II";5;6;9;1366;1 "no";43;"Pre";55;"II";1;4;124;424;0 "yes";52;"Post";35;"II";21;11;57;859;1 "yes";65;"Post";25;"III";18;0;0;180;1 "no";47;"Post";45;"II";2;345;42;1625;0 "no";65;"Post";10;"I";2;213;209;1938;0 "yes";53;"Post";37;"II";5;345;47;1343;1 "no";45;"Pre";15;"II";3;28;27;646;1 "no";53;"Pre";19;"III";1;74;534;2192;0 "yes";50;"Post";25;"II";3;0;496;502;1 "no";54;"Post";50;"III";6;7;0;1675;0 "yes";64;"Post";40;"II";23;16;22;1363;1 "no";29;"Pre";15;"III";12;18;40;420;1 "no";48;"Pre";60;"I";4;312;20;982;1 "no";40;"Pre";30;"III";3;2;16;1459;0 "no";65;"Post";35;"II";1;7;74;1192;0 "no";50;"Post";40;"II";1;80;21;1264;0 "no";55;"Post";34;"II";6;109;477;1095;0 "yes";51;"Post";42;"II";7;58;75;1078;0 "yes";59;"Post";12;"III";1;1;3;737;0 "yes";51;"Post";4;"I";4;638;232;461;0 "no";35;"Pre";22;"II";13;16;25;465;1 "no";48;"Pre";52;"II";11;0;0;842;1 "no";48;"Post";40;"II";1;10;72;918;0 "yes";62;"Post";39;"II";4;73;235;374;1 "no";47;"Pre";40;"II";1;44;11;1089;0 "no";51;"Post";19;"II";2;92;245;1527;0 "no";42;"Pre";40;"II";10;256;0;285;1 "no";63;"Post";27;"II";1;0;0;1306;1 "yes";62;"Post";20;"II";7;0;0;797;1 "no";57;"Post";15;"II";1;91;125;1441;0 "no";25;"Pre";29;"II";3;0;0;343;1 "yes";35;"Pre";30;"III";4;49;288;936;0 "no";51;"Pre";30;"II";1;119;44;195;0 "no";51;"Post";25;"II";2;0;80;503;1 "yes";47;"Pre";30;"II";10;0;0;827;1 "yes";34;"Pre";30;"II";2;210;49;1427;0 "no";68;"Post";30;"II";1;20;312;854;0 "yes";64;"Post";30;"III";12;550;263;177;1 "no";42;"Pre";55;"III";7;20;20;281;1 "no";37;"Pre";35;"III";1;242;67;205;1 "yes";65;"Post";45;"II";17;27;32;751;0 "no";62;"Post";27;"II";13;197;79;629;1 "no";36;"Pre";24;"III";2;0;0;526;0 "no";49;"Pre";22;"III";3;0;0;463;0 "no";45;"Post";30;"I";2;197;49;529;0 "no";38;"Pre";22;"II";10;48;78;623;0 "no";55;"Post";40;"II";13;0;0;546;0 "yes";57;"Post";17;"II";3;502;145;213;0 "no";47;"Pre";40;"II";1;0;90;276;0 "yes";51;"Post";22;"II";4;250;81;2010;0 "yes";45;"Pre";13;"III";4;21;27;2009;0 "no";41;"Pre";10;"I";2;241;214;1984;0 "no";39;"Pre";32;"II";9;1;8;1981;0 "no";53;"Post";26;"III";8;1;1;624;1 "no";59;"Post";35;"II";4;1;1;742;1 "yes";53;"Post";10;"II";2;217;20;1818;0 "yes";60;"Post";100;"II";10;102;88;1493;1 "no";50;"Pre";29;"I";2;323;60;1432;0 "no";51;"Pre";18;"I";1;94;60;801;1 "no";51;"Pre";25;"II";2;20;11;1182;0 "no";43;"Pre";18;"II";1;10;41;71;0 "yes";55;"Post";20;"I";4;10;128;114;0 "yes";52;"Post";20;"II";3;0;15;63;0 "yes";57;"Post";32;"II";2;43;287;1722;0 "yes";46;"Pre";18;"II";1;120;628;1692;0 "no";45;"Pre";25;"III";1;0;4;177;0 "no";43;"Pre";32;"II";1;171;43;57;0 "yes";64;"Post";26;"II";2;1356;1144;1152;0 "no";62;"Post";35;"II";1;2;70;733;0 "yes";37;"Pre";22;"I";3;23;64;1459;1 "no";64;"Post";21;"II";3;403;253;2237;0 "no";45;"Pre";60;"II";3;74;212;933;0 "no";48;"Pre";18;"I";1;137;73;2056;0 "yes";50;"Post";50;"II";6;1;2;1729;0 "yes";32;"Pre";20;"II";6;8;3;2024;0 "no";49;"Pre";19;"II";2;388;137;2039;1 "yes";33;"Pre";28;"III";1;1;1;2027;0 "yes";58;"Post";35;"II";1;6;11;2007;0 "no";57;"Post";25;"II";1;26;299;1253;1 "no";45;"Pre";35;"II";2;26;36;1789;0 "no";66;"Post";30;"I";5;100;288;1707;0 "no";52;"Pre";37;"II";3;66;104;1714;0 "yes";49;"Pre";25;"II";3;152;25;1717;0 "no";49;"Post";22;"II";1;14;41;329;1 "no";48;"Post";45;"I";1;312;236;1624;0 "yes";62;"Post";60;"II";1;56;17;1600;0 "no";60;"Post";35;"II";3;115;300;385;1 "no";45;"Pre";10;"II";1;82;8;1475;0 "no";60;"Post";37;"I";1;296;35;1435;0 "no";42;"Pre";60;"II";15;7;5;541;0 "yes";57;"Post";36;"III";1;170;192;1329;0 "yes";53;"Post";27;"III";12;44;42;1357;0 "no";56;"Post";55;"III";3;46;31;1343;0 "no";46;"Pre";23;"II";2;120;41;748;1 "no";49;"Post";30;"II";2;254;353;1090;1 "yes";56;"Post";32;"II";2;53;174;1219;0 "no";59;"Post";24;"II";1;860;413;553;0 "yes";56;"Post";42;"I";5;113;700;662;1 "no";46;"Pre";32;"II";1;108;52;969;0 "yes";61;"Post";27;"II";5;141;346;974;0 "no";40;"Pre";40;"II";6;227;10;866;1 "yes";60;"Post";40;"II";6;8;11;504;1 "no";49;"Pre";30;"III";3;1;84;721;0 "yes";53;"Post";25;"III";17;0;0;186;0 "no";51;"Pre";25;"III";5;43;0;769;1 "no";52;"Post";23;"II";3;15;34;727;1 "no";55;"Post";23;"II";9;116;15;1701;1 pec/data/threecity.csv0000644000176200001440000015645613571203267014476 0ustar liggesusers"time";"status";"pi" 5.1;0;0.183654219991896 2.63107461;0;0.0591411584515785 5.1;0;0.0272641370665554 4.77754962;2;0.105057470405362 5.1;0;0.017703031191799 5.1;0;0.0926876192535573 5.1;0;0.0635378324676742 4.72279261;0;0.0100091876999036 4.66255989;2;0.146840449035414 5.1;0;0.396279175720049 2.85284052;0;0.0910896426755865 4.61738535;1;0.124931447521131 5.1;0;0.0200084304864407 5.1;0;0.122146717433886 5.1;0;0.094366307763296 5.1;0;0.0142669636429828 4.90622861;0;0.0612061441476172 4.54346338;1;0.322303584595794 0.7063655;2;0.170007507090115 5.1;0;0.0515486868344864 5.1;0;0.0527116971041909 3.49349761;0;0.0343538525111562 4.03011636;0;0.0181648739507614 3.81930184;0;0.0531255644774555 5.1;0;0.0205326562304384 3.99452429;0;0.0575961164815791 5.1;0;0.112911232909657 5.1;0;0.230502381148861 5.1;0;0.123598887095075 3.54277892;0;0.0621747542496133 5.1;0;0.0662486560795793 4.8514716;0;0.0563453492550241 3.38945928;0;0.150873250443177 4.73921971;0;0.0344810336347325 3.87953457;0;0.0647820705738931 5.1;0;0.0328436357325514 5.1;0;0.0163430953763674 3.81930185;2;0.169132393387894 5.1;0;0.091435683303717 5.1;0;0.082517891463647 4.85147159;0;0.0377318602849468 5.1;0;0.0197060812170188 5.1;0;0.114991847845216 5.1;0;0.0873561796014919 2.53524983;0;0.142773532273115 5.1;0;0.0594544038471666 5.1;0;0.0241776774228718 5.1;0;0.0291341732151379 5.1;0;0.0420085839689376 5.1;0;0.0449305188972967 5.1;0;0.0239703791236475 5.1;0;0.0456089138206477 5.1;0;0.0178271126036717 3.4825462;0;0.0359627866321309 5.1;0;0.149798526946912 4.93086927;0;0.0775614763292296 0.1615332;0;0.120960356763895 5.03216975;0;0.0301795198129736 5.1;0;0.048187582107199 2.54893909;0;0.0194126168030863 1.82340862;2;0.095125342287572 1.43189596;2;0.0717843074097811 0.14236824;0;0.0340911674873315 4.80492813;0;0.217494529528035 5.1;0;0.0405590827212204 4.78302533;0;0.0405257487412073 4.8459959;0;0.0523158759506322 5.1;0;0.0606878064942126 5.1;0;0.054833742675138 4.91718002;2;0.0925825091068561 3.49623546;0;0.0525681257110711 5.1;0;0.00721707719284928 5.07871321;0;0.0396711845845224 3.06091718;2;0.0940291984336149 4.90622862;0;0.0374442227595838 1.53045859;1;0.0346548628267291 1.2019165;1;0.202851887935845 4.6899384;0;0.0936174587564663 5.1;0;0.043776018401075 2.48049281;0;0.0519391620643971 5.1;0;0.0710855105986669 4.8788501;0;0.103430661659604 0.87611225;2;0.310187101220921 5.1;0;0.0208650886735681 4.60780288;1;0.0946236971657256 4.26694045;1;0.204947019175654 4.91170431;0;0.0106639226907255 4.73921971;0;0.049726843700054 5.1;0;0.0845148231049681 4.95003422;0;0.0250949595169025 4.94729637;0;0.0730489332723146 4.18069815;0;0.0280096161303061 5.1;0;0.022251711663875 5.1;0;0.0555767100148076 4.8459959;0;0.0632306493073163 5.1;0;0.0460217285732108 0.55030801;2;0.214141567752799 3.60301164;0;0.0458068605293029 5.1;0;0.0439652800174345 5.1;0;0.0157474410973775 5.1;0;0.275080671030508 1.31964408;2;0.0400794791894454 5.1;0;0.072458441243957 3.02532512;0;0.030275880944387 3.60848734;0;0.0336862144747678 2.80492814;1;0.275839081311881 4.76933607;0;0.0727642842416485 4.27926078;0;0.0200317570254057 5.04859685;0;0.0533101181372748 4.85147159;0;0.0148694551749231 5.1;0;0.196795645328027 5.1;0;0.0204304540937984 4.85420945;0;0.0105051887081614 5.1;0;0.0495365207935858 3.70431211;0;0.160019955433371 5.1;0;0.0187398714306559 0.0876112200000003;2;0.189643123420092 4.94455852;0;0.0141574470926386 5.1;0;0.0202019017789558 3.08281999;0;0.103559400356297 5.1;0;0.0358268160217375 5.1;0;0.0562453982052906 4.92265572;0;0.0339899563852267 0.0821355199999996;0;0.133278876202433 4.38603696;1;0.296073081817334 5.1;0;0.10607696679119 4.78302532;0;0.0678027730147268 4.18617386;1;0.089353789849433 5.1;0;0.218520495590496 0.0629705700000001;2;0.06513688501162 0.75701574;1;0.105017761072633 5.1;0;0.0198769162985048 5.1;0;0.163896471616115 5.1;0;0.016401385917873 4.72553046;0;0.169867434620548 2.57631759;0;0.0286361102319309 5.1;0;0.047768762820639 2.55441478;0;0.0184839139560256 3.05544148;0;0.100717452746519 5.1;0;0.0309426476776901 5.1;0;0.200019812220349 5.1;0;0.0196750731575624 5.1;0;0.0401569139887193 5.1;0;0.0703686052245479 5.1;0;0.0176927799455662 1.35249829;2;0.0828079789639707 3.01163587;0;0.0556353377572852 2.66392881;0;0.35830441748271 5.045859;0;0.0684117648023052 5.1;0;0.097268292351037 5.1;0;0.0474164801665314 4.8952772;0;0.0339996874330929 4.66119097;1;0.0649027803216478 5.1;0;0.110232642988362 5.1;0;0.102527924656393 4.54209445;2;0.117526247497914 4.84325804;0;0.0878883528731329 5.1;0;0.0812947944894352 5.04038332;0;0.327327207008139 5.1;0;0.0680100527973835 5.01848049;0;0.0733632332555404 4.13278576;1;0.190601609371377 3.35934292;0;0.036578611844327 3.37029432;0;0.15848111566309 5.08966461;0;0.226030619515979 1.46748802;2;0.177971450913475 5.1;0;0.0275179108396927 3.88774812;1;0.031705854936685 5.1;0;0.0141114243657902 5.1;0;0.0689151134792881 5.1;0;0.204408742157305 4.25051335;1;0.208889724405193 4.87611225;0;0.052555568387627 5.1;0;0.0193667949406666 4.92539356;0;0.169799332886663 5.1;0;0.0604375294653774 5.1;0;0.0923307425443042 5.1;0;0.129335292264869 4.8090349;1;0.082966411344871 5.1;0;0.0607375694601404 2.6146475;0;0.100786872909643 5.1;0;0.0231863180228611 3.70157426;0;0.0259277011007092 5.1;0;0.0225333698592468 5.1;0;0.0976862542391309 4.30663929;2;0.183553358326332 1.51813826;1;0.0433003009619856 5.1;0;0.040342379639664 4.96646132;0;0.0344675919030428 1.6495551;1;0.160050988711695 5.1;0;0.0637832506580027 4.80766598;0;0.0128692809442543 5.1;0;0.0530776822662587 2.59822039;0;0.0428391014835262 4.90075291;0;0.105395061269966 4.59274469;1;0.0981161012815501 3.32922656;0;0.130740947041738 5.1;0;0.0509828314156149 2.89664613;0;0.0145378879700875 1.83162218;1;0.0655825054389384 5.1;0;0.098645503486598 5.1;0;0.0458973032838524 3.1266256;2;0.0482677442404416 4.89527721;0;0.0320545262190753 2.83915127;0;0.175885224581162 5.1;0;0.0722399951440805 4.77207392;0;0.0516593000033529 5.1;0;0.164956928186894 5.1;0;0.018290268475959 3.91786447;0;0.0348432358015196 2.75975359;0;0.0775881528904868 4.75564682;0;0.0120735963699446 5.1;0;0.160428441984414 0.12046543;0;0.0348755728182087 4.89253936;0;0.136563745352213 4.20944559;1;0.066269150848359 5.0568104;0;0.0609237664713374 5.00479124;0;0.0961142562861001 5.1;0;0.0610730398539309 5.1;0;0.271995186795503 2.65297741;0;0.100523276479516 4.86516085;0;0.24821832810794 5.1;0;0.00932499505659186 5.1;0;0.0422788227101688 3.30732375;0;0.14435098974278 3.36208077;0;0.16937891939269 5.0294319;0;0.0264283957008074 5.1;0;0.0127046899838118 5.1;0;0.0201953900284527 4.89253936;0;0.012658867215265 5.1;0;0.0437937886221672 5.1;0;0.0301688196118214 4.8898015;0;0.078688413624291 5.1;0;0.01760467847 5.1;0;0.508745401164035 5.1;0;0.171126909205795 1.23477071;2;0.0488343901172212 5.1;0;0.040147805510889 5.1;0;0.0515393536759641 5.1;0;0.0216262769889262 0.94455853;2;0.207871781630261 5.1;0;0.0528183075244752 4.74469541;0;0.0431574165355468 5.1;0;0.026342068216148 4.93908282;0;0.0302916638669506 5.1;0;0.0332200921105636 1.58384668;1;0.180877543341552 3.09103354;0;0.0125812374709076 4.87063655;0;0.089903852326656 5.1;0;0.0219796869937138 1.16495551;1;0.0410638385521106 3.6605065;0;0.051998773236755 5.1;0;0.106557019311016 5.1;0;0.104499686273223 5.1;0;0.0456909154165811 3.42778918;0;0.161590816472273 4.81861738;0;0.00942811441444736 5.1;0;0.0107288186129014 5.1;0;0.0103665889386717 4.90075291;0;0.0507091465851854 5.1;0;0.0264167973358151 5.1;0;0.0475955653418853 4.22724162;1;0.147701549961711 5.1;0;0.0527014224717504 3.62765229;0;0.0465971065893135 5.1;0;0.0336687778690805 5.1;0;0.0120015591220164 5.1;0;0.0429443821863441 1.51403149;2;0.0830639186473678 5.1;0;0.00674560070784151 5.1;0;0.106220444113576 4.39425051;2;0.0774914672385811 0.82956879;2;0.141099156812821 4.862423;0;0.258238913556942 5.1;0;0.0107786689731417 3.33470225;0;0.0850237824187001 4.8788501;0;0.0374535420003765 4.98836414;0;0.0376965718037293 4.85420945;0;0.0139142374373841 5.1;0;0.167772285434335 5.1;0;0.0383580238562877 5.1;0;0.353052545968196 5.1;0;0.0319685144426524 5.1;0;0.0393360257762019 5.1;0;0.156047198541582 5.1;0;0.0701587863412725 5.1;0;0.16485231433628 1.66187543;2;0.189874054693162 5.02395619;0;0.0137613847587263 5.08145106;0;0.0666502525413757 0.0054757099999998;0;0.0636071899066676 5.1;0;0.0897121061963538 5.1;0;0.107502600406545 4.96919918;0;0.0507844042800767 3.24161533;2;0.0537759837900255 5.1;0;0.0277739277046552 2.49144421;0;0.1673683738782 3.61122519;0;0.0162382123295378 3.43874059;0;0.0602607527892708 0.86516085;2;0.0609205691683503 5.1;0;0.150672857944328 5.1;0;0.041423145652977 2.55441479;0;0.0413997560642016 1.32511978;2;0.18354432962201 1.52087611;1;0.124260374920818 5.1;0;0.202115663718544 5.1;0;0.0364167181425454 5.1;0;0.119155692572007 4.91718001;0;0.0457919790765454 4.91444216;0;0.0749355295309288 5.0513347;0;0.0282861263371902 5.1;0;0.0553434195859993 4.64065708;0;0.0264654963561266 5.1;0;0.00767733524680296 2.4312115;0;0.067824530832708 5.01300479;0;0.0124473423880375 4.77207392;0;0.0105190472152405 5.1;0;0.0281499239777984 5.02121834;0;0.0264648081414724 5.1;0;0.0470414400335007 1.86721424;1;0.436643704581564 3.71526352;0;0.0127711034828939 4.183436;2;0.028839875547225 5.02669404;0;0.048375917540381 5.1;0;0.115129632584776 5.1;0;0.127643349990811 4.92813141;0;0.0204275031299299 4.90622861;0;0.0318158467621385 5.1;0;0.046152916757143 2.39425052;1;0.167376996277093 5.1;0;0.123225312530931 5.1;0;0.0655097743282337 4.91170432;0;0.00860112077545627 5.04859686;0;0.0867911724819333 3.93976729;0;0.0526094743207157 5.1;0;0.0326809260685205 4.75838467;0;0.0171374000840776 5.1;0;0.276337423667455 2.53524983;0;0.134263971656561 4.83504449;0;0.0299125440030989 3.45790554;0;0.10043139063754 5.1;0;0.0402775761504996 5.02121835;0;0.109248488946296 5.1;0;0.06731852004805 4.83230664;0;0.0521986532380261 5.1;0;0.0250431594308363 5.1;0;0.0289583754573662 4.23545517;2;0.12228537421084 5.1;0;0.165736406287583 5.1;0;0.0633504893884251 5.1;0;0.0202741023371388 4.90896646;0;0.0159923476207483 2.7871321;0;0.242803083729379 3.62217659;2;0.0742809984946329 2.84736481;0;0.155475949065152 1.22108145;1;0.141673771042837 0.00273785999999987;0;0.181778461666655 0.82683094;2;0.065115489398811 2.87748118;0;0.064293138597885 5.1;0;0.116834335624845 2.56262834;0;0.118589474128275 5.1;0;0.131587684126262 5.1;0;0.0428028601403828 5.1;0;0.0278782861628067 5.1;0;0.0299640511698643 4.33127995;2;0.135263464758241 5.1;0;0.274711224926371 5.045859;0;0.0209707541005866 4.98015058;0;0.342562737352449 5.1;0;0.00871150593923328 4.92813142;0;0.02606995457403 4.11498974;0;0.0506880120725932 1.82477755;1;0.131728040024911 4.89801506;0;0.0271945735946616 5.1;0;0.189123469342708 5.1;0;0.171187122214497 4.13004791;1;0.116794582623844 1.70431212;1;0.313403848598076 5.1;0;0.0444881874270618 4.32306639;1;0.140879163284664 5.1;0;0.058023095004371 5.1;0;0.0270985123635756 5.1;0;0.0081785556028683 5.1;0;0.0481425139480268 5.1;0;0.0571394486137392 1.57563312;1;0.15531856823098 5.1;0;0.0338148830029038 5.1;0;0.0141217532569375 5.1;0;0.0612772849239569 5.1;0;0.0185598523511253 5.1;0;0.0369533157034973 5.1;0;0.125322826488246 1.16769336;1;0.154313941872189 5.1;0;0.00728601679823646 4.87063655;0;0.031714078493767 3.44695414;0;0.0347809672331875 5.1;0;0.120563526793771 4.88158796;0;0.0200173085432622 5.1;0;0.14898066194066 5.1;0;0.0861700559127667 5.1;0;0.0123576402001711 4.81040384;0;0.0217628382068744 4.90896646;0;0.0198774757595977 5.1;0;0.0180971179403104 4.99657768;0;0.0455835807882501 0.48186174;1;0.089810123937822 5.1;0;0.0493548574617221 5.1;0;0.034186955271817 5.1;0;0.0404712564412872 4.00136892;1;0.114527407918348 5.1;0;0.147949552864805 5.1;0;0.0124550596197682 5.1;0;0.0532587750296358 0.0301163600000001;0;0.0784553899765764 5.1;0;0.0338404430449965 5.1;0;0.0082787858371827 5.1;0;0.0292527413655425 5.1;0;0.26304206182868 1.21560575;2;0.0702219473444326 5.1;0;0.0232547396246571 5.1;0;0.0394384459557812 0.0054757099999998;0;0.0615915169450986 5.1;0;0.0151820899063861 3.38124573;0;0.105227861115201 5.1;0;0.0217491794472338 4.85147159;0;0.0194200855519728 4.21902806;1;0.0699958450878374 4.77481177;0;0.021051755080429 5.1;0;0.116936141007196 5.1;0;0.0309081718883184 1.20054757;1;0.120647342450171 4.85968515;0;0.0480728675336176 2.79808351;0;0.083649619167694 5.1;0;0.0256938112158228 5.1;0;0.00871416769559744 5.02669404;0;0.0972569795265882 4.73921971;0;0.0204025936775822 5.02121834;0;0.032078662606856 4.98562628;0;0.0192811056558633 5.1;0;0.0363753340164242 5.1;0;0.0441923206630463 5.1;0;0.102766837633221 5.1;0;0.0500817846349607 2.6310746;0;0.255599175958128 5.1;0;0.157484756873641 4.80766598;0;0.060940353735606 5.1;0;0.0204152876072278 0.0574948600000003;2;0.119266929558411 5.1;0;0.067135426225691 0.0273785100000001;0;0.208526181928714 2.51608487;0;0.380572745341831 4.95277207;0;0.0356034569401669 4.90622861;0;0.031720783031181 4.75017112;0;0.0107776778217702 5.1;0;0.102357156346424 4.32854209;2;0.0762881225193472 4.22724162;1;0.192135080777888 4.27926078;2;0.115703237746969 5.1;0;0.179056789470952 5.1;0;0.19749539595273 4.8733744;0;0.0370709955053722 4.91991787;0;0.189055852618463 5.1;0;0.109503510879728 5.1;0;0.0737767686493739 5.01848049;0;0.0620793072937028 4.73648186;0;0.0375738177064298 5.1;0;0.0114545832991471 4.29842574;0;0.0733830286445655 4.89801506;0;0.271291682201145 4.82956879;0;0.012022429754195 5.1;0;0.194546933923055 5.09787817;0;0.0173491848507496 5.1;0;0.0269108562696431 4.18069815;1;0.243458065165744 5.1;0;0.0279798901205321 4.79671458;0;0.0180386904566553 2.5817933;0;0.0145942807025171 5.1;0;0.0671087818411128 4.40793977;1;0.0943664804879565 5.1;0;0.124225172668265 4.97741273;0;0.0409376628699123 4.89253935;0;0.0181833360613219 4.85694729;0;0.0499533678679077 5.1;0;0.0307428620164327 5.1;0;0.0547575942516806 0.54620123;1;0.242962133419587 5.1;0;0.0320635230950829 1.21423682;1;0.0694661164073544 3.58658453;0;0.0350784379189523 5.1;0;0.0340476253234503 4.90896646;0;0.0657082118979845 5.1;0;0.191561411202128 5.1;0;0.0629904287541602 4.84873375;0;0.100485127044709 5.1;0;0.0179406566398343 4.82956878;0;0.11874726783073 4.92539356;0;0.0337366231534411 5.1;0;0.222333540170515 1.17590691;1;0.372545745463964 3.57563313;0;0.105677688167272 4.862423;0;0.053879188584428 4.98836413;0;0.0981985953514072 5.1;0;0.0918219844372525 5.1;0;0.101648093114825 2.76249144;0;0.0405832373668257 5.1;0;0.0806153403295627 0.79397673;2;0.0443071901489717 2.48049282;0;0.121748993819129 4.89253935;0;0.025928761112855 4.95277207;0;0.0322211291958738 3.37577002;2;0.124211272008205 5.1;0;0.0217762205502653 4.13689254;2;0.043714771171116 4.51608487;1;0.155476789995657 3.29089664;2;0.0706734937080168 5.1;0;0.0940906504980118 5.1;0;0.122582640287557 5.1;0;0.0142264563947548 4.97741273;0;0.0406380688180478 5.09514032;0;0.328399978257985 5.1;0;0.0306257739031977 5.1;0;0.0139843742278687 5.1;0;0.0630129017750586 5.1;0;0.020141456037386 5.1;0;0.0235583637104014 5.1;0;0.0343441959776826 4.75564682;0;0.134081001477576 3.70157427;0;0.121496982023821 2.42573579;0;0.0315695445296475 4.79397673;0;0.223090826979423 5.1;0;0.019138865144132 1.38809035;1;0.0526321727021103 3.30184805;2;0.0265852785155482 5.1;0;0.0841092263360833 5.1;0;0.0166351893768409 5.1;0;0.0924250927579458 5.1;0;0.00948923661118189 5.1;0;0.0650231329316918 5.1;0;0.230330520698711 5.1;0;0.146703154688067 5.1;0;0.379025862061682 5.1;0;0.0989425952464718 4.94455852;0;0.0720311092893593 5.1;0;0.0392351239924104 5.1;0;0.0664581154386277 4.80492814;0;0.0636902136010233 5.1;0;0.0254449654520893 5.1;0;0.110884158283436 5.1;0;0.0872396620779243 4.93086927;0;0.124399649883234 4.93086927;0;0.0079604222377932 5.1;0;0.0624571537553509 5.1;0;0.0424061082066802 2.95687885;0;0.135882220185612 1.63997262;1;0.0250923573759273 3.45242984;0;0.0351737078307202 5.1;0;0.0180481478322972 1.0184805;2;0.129725041409904 5.1;0;0.0387219310024249 5.03216974;0;0.00954763024086114 5.1;0;0.0168533382639232 5.1;0;0.143481092805918 5.1;0;0.0270448758743364 4.85147159;0;0.0459157586852434 4.98288844;0;0.124335228249705 5.1;0;0.0385496449260392 1.32238193;1;0.193087084546363 5.1;0;0.0397762606807321 5.1;0;0.113503782560713 5.1;0;0.0564453722508942 4.74469541;0;0.0613308947687476 4.73100616;0;0.0242392029887916 5.1;0;0.263990140506771 5.1;0;0.0570724875012394 5.1;0;0.262943633429626 5.1;0;0.0110484938028395 5.1;0;0.0493881397407902 3.73716633;0;0.0268092403129248 0.75564682;2;0.0769034411451628 5.1;0;0.0276700085865682 4.75017112;0;0.077274148112529 4.8952772;0;0.0996213788779973 5.1;0;0.0289073969448453 5.1;0;0.0338664772801293 4.82135523;0;0.0374278814789395 2.02327173;2;0.412827624274342 5.0568104;0;0.0134142018945094 5.1;0;0.0577311644923242 4.17659138;1;0.169681696660138 5.1;0;0.104619125956363 4.34086243;1;0.0847144703596994 3.09377139;0;0.0611852191814029 4.89253936;0;0.0185294028495232 4.91170431;0;0.00894313376856437 4.7173169;0;0.0483599352258396 1.75359343;1;0.202134191410128 5.1;0;0.162230261167753 4.8678987;0;0.0137778928058832 1.77823409;1;0.216496216766978 5.1;0;0.05619044696931 5.1;0;0.270654463384922 5.1;0;0.0831872566782798 4.76659822;0;0.0248285489136672 5.08692676;0;0.0094712019618044 5.1;0;0.0464191037449405 3.89869953;0;0.0764848624356745 5.1;0;0.0672944264219763 3.90554415;1;0.0778738745516292 1.70020534;1;0.073519893621448 4.90349076;0;0.0707186959347295 5.1;0;0.0848729632774537 5.01574264;0;0.0167654544849994 1.6440794;1;0.111346192455579 5.01848049;0;0.0499657486771153 5.1;0;0.00974202334107705 5.1;0;0.21013314113769 3.59206023;0;0.0683229278837168 0.3613963;0;0.158531540576148 5.1;0;0.0588956849317949 4.75290897;0;0.133464094843192 1.53319644;1;0.0864743624356912 2.86926762;0;0.172792111149973 2.80355921;0;0.11198797128829 5.1;0;0.270064049318764 2.1670089;1;0.101317899651228 5.1;0;0.0856391796403328 5.08966461;0;0.0446213592543387 5.1;0;0.0107879386092225 5.1;0;0.0442733733271989 5.1;0;0.0413370585133547 5.03216975;0;0.0154733955126181 1.60027379;1;0.134049719420949 5.1;0;0.186833687542088 4.87611225;0;0.0742492341134373 5.1;0;0.020644697394197 4.04928132;0;0.0292398115716957 1.5633128;2;0.178208524458163 4.79671458;0;0.0469638701519679 5.1;0;0.0310268221012356 3.73716633;0;0.046660963101059 5.1;0;0.05805406568402 0.15879534;0;0.097663299340495 5.1;0;0.00839535379614029 2.88021903;0;0.117448884148511 4.37919233;1;0.101391324430051 5.1;0;0.0238077380624422 5.1;0;0.020768899216047 1.42094456;2;0.138569282026752 5.1;0;0.0545163136007661 4.84052019;0;0.0595050118476297 3.58932239;0;0.0106892616498876 0.93634497;2;0.0547562292838663 5.1;0;0.0430498603058225 5.1;0;0.0757984910598811 5.1;0;0.391099044951023 5.1;0;0.213428992692911 5.1;0;0.0372424117016567 4.89801505;0;0.0726680162297492 4.8788501;0;0.021921975493502 4.69815195;0;0.0876055931160515 4.94729638;0;0.0231157625618287 2.7871321;0;0.393865511472413 2.71594798;0;0.0821491673848675 5.1;0;0.190921002203328 5.1;0;0.0386056382533756 5.1;0;0.0152517513424145 4.98562628;0;0.0188772827579331 5.1;0;0.010161438570382 5.1;0;0.510614483941628 4.85968514;0;0.0471885648042605 5.1;0;0.0155912725120204 5.1;0;0.302000699890371 1.60711841;1;0.104524962146895 5.1;0;0.352383804312703 5.1;0;0.0716975697406571 5.1;0;0.0325296870748531 5.1;0;0.016573635003515 5.1;0;0.0734570810556323 5.1;0;0.0685419740994926 5.02669405;0;0.0143096441354012 4.91991787;0;0.0112783077194553 5.1;0;0.0522886208153349 4.62422998;1;0.124061070134381 0.0383299099999999;0;0.0308668832467946 5.1;0;0.0203507507583985 5.1;0;0.0820750409373038 3.48528405;0;0.182859709612589 3.4715948;0;0.0137159432981584 5.1;0;0.035297018477081 5.1;0;0.0425859205700093 4.67351129;2;0.105731049410108 4.89253936;0;0.0709385309601422 4.65982204;2;0.108687573639011 5.1;0;0.158823895429599 4.76112252;0;0.0578856345674292 4.36413416;2;0.0752507056035812 5.1;0;0.0895310927185178 5.1;0;0.0172307329557628 4.06570841;0;0.0313769310036714 5.1;0;0.0228748218159196 4.51060917;1;0.0605103995063619 1.50308008;1;0.183743745488971 5.1;0;0.0245523689490548 5.1;0;0.0268945846416097 4.10677618;1;0.230028196665331 4.91991786;0;0.0448200866196187 4.77207393;0;0.0232552008183873 5.1;0;0.0384613072187237 5.1;0;0.0503866847884355 5.1;0;0.0538611782589807 5.1;0;0.0222959435453343 5.1;0;0.0656002202301591 5.1;0;0.0575080229143815 4.95550993;0;0.103936634115144 3.38124572;0;0.0509846099060716 5.1;0;0.0488374742895863 5.1;0;0.0440127808994256 5.1;0;0.0898186570009821 2.93223819;0;0.118337907013709 5.1;0;0.124626284588984 4.27926078;0;0.0573085546825645 4.79671458;0;0.0425988678655732 5.1;0;0.121971127102506 3.23340178;0;0.113556024928883 5.06228611;0;0.0610340261630757 5.1;0;0.113732446264051 4.65160849;2;0.114241190859422 1.34428474;1;0.379343286603773 0.43531828;2;0.165326805527478 5.1;0;0.0456609271638291 5.1;0;0.1434858454496 5.1;0;0.0219792960848781 5.00479124;0;0.033804231041497 4.12183436;1;0.0285136427002435 3.53730322;0;0.0819871811583941 5.07871321;0;0.0253019767779219 5.1;0;0.0737889604196898 5.1;0;0.108947153860706 5.08418891;0;0.178635963597954 5.1;0;0.0565979800755921 5.1;0;0.236884565326286 5.1;0;0.0270471813039757 5.0568104;0;0.0634723831257574 1.93292265;1;0.167653183805645 5.1;0;0.0550558126656102 2.88843258;0;0.0355652046024467 4.98288844;0;0.0126013167729127 0.50924025;2;0.0311689287646142 3.06639288;0;0.0529190605108575 5.1;0;0.0334270719954239 5.1;0;0.0109401539941002 5.1;0;0.0323793394732907 4.94455852;0;0.0355810654349547 4.82135523;0;0.0227645200333867 5.1;0;0.112602102801032 5.03764545;0;0.0384853552964162 4.88980151;0;0.0234975415294335 2.87748118;1;0.19333430587842 5.1;0;0.0995106810953812 5.1;0;0.0221119164292451 4.8788501;0;0.0417010760329828 4.98836414;0;0.0471146313777635 4.96919918;0;0.0610187252939614 5.1;0;0.0161060521792934 0.0793976699999996;0;0.139690934364627 5.1;0;0.0269015120000505 4.95277207;0;0.0435458505678622 5.1;0;0.0196576040184662 4.92813142;0;0.0385473784103278 5.1;0;0.101464550511718 4.85968515;0;0.0421119275663901 5.1;0;0.0782509449259844 2.59548254;0;0.273165277645655 5.1;0;0.0595185705894144 5.1;0;0.0376142598060456 5.1;0;0.145241947664832 4.43258043;1;0.183030576850504 5.1;0;0.0568926687143217 4.74195756;0;0.121179010137071 5.1;0;0.0324817920075594 5.1;0;0.010365847011194 5.1;0;0.220986192299499 5.1;0;0.0711131876356353 3.34565367;0;0.0499315808540014 5.1;0;0.168624216146509 5.1;0;0.160684728007029 5.0622861;0;0.103993047022739 4.39425051;2;0.183441044426554 4.95550993;0;0.0816613660783748 4.3559206;2;0.103329660151314 5.09787817;0;0.0154641252113198 4.93360712;0;0.0161783530487634 5.1;0;0.0238359599220273 4.32032855;1;0.124401622972584 4.85147159;0;0.0514432328242795 5.1;0;0.116478738601524 4.61601642;1;0.108642675399207 5.1;0;0.0624722047009778 4.90349076;0;0.0612048123721082 4.72553046;0;0.118180910359734 2.28610541;1;0.0905770120969918 3.93429158;0;0.0763376155931873 0.39972621;2;0.0253516118928338 5.1;0;0.119548172619705 5.1;0;0.21563522606969 5.1;0;0.0121536921888077 5.1;0;0.0323576167140812 3.44695414;0;0.250346351513247 5.03764544;0;0.256131215544428 5.1;0;0.123227736536334 3.93155373;2;0.0555804978727132 5.1;0;0.0878458230786098 4.90075291;0;0.0354744204759609 5.1;0;0.0351074633260704 4.81587954;0;0.0107476795882631 5.1;0;0.112612455644059 5.1;0;0.0650056793650087 5.1;0;0.0274496280184794 5.1;0;0.011516710357532 4.45585216;1;0.101459110933059 5.1;0;0.0308382212717251 5.07597536;0;0.0108370936489623 4.87063655;0;0.0832622503844764 5.1;0;0.0435960678522956 4.84325805;0;0.0787130226138511 4.68446269;0;0.10066106094041 5.1;0;0.0544424540887919 4.12320328;1;0.0345373479973283 4.89801506;0;0.0729265351395208 5.045859;0;0.0819564490673945 0.47364819;2;0.0564699472870128 5.1;0;0.0328096931395451 4.96646133;0;0.248739754370565 4.55030801;1;0.0608525471441918 5.1;0;0.232335549466661 2.31074606;2;0.176827776062016 4.81587954;0;0.122658877894432 4.00273785;1;0.200519808278848 5.1;0;0.0489546656554443 5.1;0;0.0117413038461681 3.449692;0;0.043032469564248 4.96098563;0;0.0188070150244263 0.75290897;2;0.08467294331662 4.80766598;0;0.0381424885262794 5.1;0;0.191252927995853 5.1;0;0.231631108814442 3.34839151;2;0.13526614394111 3.96988365;0;0.0228334346398896 5.1;0;0.00904839169312058 4.8459959;0;0.0446130986420388 1.76454484;1;0.366787992845981 0.75838467;2;0.0514080364016815 5.1;0;0.0161489692431158 4.49281314;2;0.0964327166988922 5.00479124;0;0.0554564642688735 5.1;0;0.0563827180632607 0.10951404;0;0.084579064340304 3.15400411;2;0.0792238452270331 4.95003423;0;0.018312897460406 5.1;0;0.0494770562303387 0.0602327200000001;0;0.0566629386209517 5.1;0;0.0154084688763118 5.1;0;0.131202042646068 3.34017796;0;0.123605882430798 2.51060917;0;0.024493635351353 3.04175223;0;0.0631966220469301 3.69883642;0;0.0439978723020563 2.99794661;2;0.0609195674341173 3.10746064;2;0.122405704687531 5.1;0;0.0962986616921127 5.1;0;0.0170527454879345 5.1;0;0.0288173942122386 5.1;0;0.082275416530489 5.1;0;0.230874995725161 4.99931554;0;0.0205793426308688 5.1;0;0.0353378043499274 2.54620123;0;0.162408764743283 5.1;0;0.167469854681407 0.33675564;0;0.0400098513596825 2.49418207;0;0.175415495412174 5.1;0;0.0803813091906487 5.1;0;0.0119749772983654 5.1;0;0.0186223636579396 4.91991787;0;0.0419815951596677 5.1;0;0.0574705556894855 5.1;0;0.0351732271922957 5.1;0;0.02598944843347 4.96372348;0;0.0196907750225564 3.57563313;0;0.075176578199929 5.1;0;0.0264014648185746 5.1;0;0.0824638599181664 4.19028063;1;0.0516914317521401 5.1;0;0.0257342549922132 4.28473648;1;0.16819146195859 5.1;0;0.0249740415767931 1.14442163;1;0.179993930713865 5.1;0;0.012001235115226 0.0821355300000004;0;0.0654749955118971 5.1;0;0.0111555114747506 4.85420945;1;0.170974656990764 5.1;0;0.0387643173491085 5.1;0;0.0905189833337307 1.47980835;1;0.188065385253518 5.1;0;0.0217763843663774 5.1;0;0.0526026813101263 5.1;0;0.0474321543592022 5.1;0;0.0581852841241725 3.25530458;0;0.149565797045469 0.00273784999999993;0;0.112512325535674 3.8275154;0;0.0336961703122173 4.76386037;0;0.0212129464416126 4.77481177;0;0.0532198733227155 4.85968515;0;0.0773991205299141 4.8898015;0;0.0284528227628356 5.1;0;0.026145509382943 3.96714579;2;0.0426170150386075 4.98836413;0;0.112320971484516 5.1;0;0.0755053372395181 4.35318275;2;0.09383744040394 5.1;0;0.0315468274694106 5.1;0;0.0454311151562611 1.76591376;2;0.032164230140583 5.1;0;0.0353890107558524 4.81587953;0;0.0254358607181902 5.1;0;0.0493895685970359 5.1;0;0.0584751329828908 5.1;0;0.0564997209912957 1.42094456;2;0.0236232758869475 3.62765229;0;0.0421310922758365 5.1;0;0.0866303227864492 4.71731691;0;0.0226722783425513 5.1;0;0.20120194358065 5.1;0;0.0312747657656983 5.04449008;0;0.157663294333808 5.1;0;0.011119234437793 5.045859;0;0.0553420618468389 5.1;0;0.0355498318220474 5.09787817;0;0.0163474520447475 3.73990417;0;0.152628840416288 5.1;0;0.0779795119241257 4.75290897;0;0.11839228471532 2.90759753;0;0.0446616501251023 4.82135523;0;0.0308510895037807 1.6550308;1;0.183118009865518 5.1;0;0.328105538944108 2.74606434;0;0.0570820555950059 0.09856263;0;0.222052567177065 5.1;0;0.0194359223540471 4.89253936;0;0.0464148921119334 5.1;0;0.236759209501869 5.1;0;0.0122327681652237 5.1;0;0.160643007839596 5.1;0;0.0453134552674552 5.1;0;0.0231126053543281 1.12525667;1;0.124630687678017 3.69883641;0;0.189673576776863 3.4880219;0;0.0995501475024226 5.04859685;0;0.0514260572068336 1.24709103;1;0.0849207935112009 5.04859685;0;0.0120584669628075 5.1;0;0.126207992418376 5.1;0;0.0723503848044304 5.1;0;0.205607717973042 5.1;0;0.0105108126426581 5.1;0;0.0993207018158434 5.01026695;0;0.0397722519654913 5.1;0;0.0220531911860182 4.42710473;0;0.0362884507930404 4.8952772;0;0.038408720198161 0.8788501;2;0.0162398796231787 3.1266256;0;0.094006717725723 5.1;0;0.0398640236210105 4.93908282;0;0.0184110304634926 5.1;0;0.204930210286087 4.8733744;0;0.0767058710839185 5.1;0;0.0991999080724666 4.72826831;0;0.236682059785799 0.99931554;2;0.0755472948650682 5.1;0;0.0141560117888673 4.89527721;0;0.0495981146344888 5.1;0;0.0281390958840961 1.62628337;2;0.0731905695927958 5.1;0;0.0488376691589322 5.1;0;0.0562499830749902 4.8678987;0;0.032523337318386 2.51608487;0;0.0415426378622466 5.06502396;0;0.0656634282051237 4.96919918;0;0.019988379217477 4.95824778;0;0.0208697286679245 5.1;0;0.0385025334356356 4.85147159;0;0.0681596650101956 5.1;0;0.046085193033957 1.61396304;1;0.0402945029315864 4.91170431;0;0.224543877836567 5.1;0;0.0437472214358608 5.1;0;0.0175204172411018 4.95550993;0;0.0795853513515184 3.54825462;2;0.21903765791245 2.93497604;0;0.352040252148972 5.1;0;0.0664225007590744 5.1;0;0.0334300278307753 5.1;0;0.103166317692456 5.1;0;0.0531593411756994 1.68651609;2;0.117448903030321 5.1;0;0.0705739301195406 5.1;0;0.0458431002836852 5.1;0;0.0555358275395441 4.92813142;0;0.0155244039317998 3.4880219;0;0.0691967893553537 5.1;0;0.0541727659644512 5.1;0;0.0193275157480989 5.1;0;0.126065879767524 3.4825462;0;0.127674023041428 5.1;0;0.0836802404956246 5.1;0;0.055826169574254 4.93360712;0;0.0344564738518368 5.1;0;0.0684124854893362 5.1;0;0.0695902408865238 5.1;0;0.0291362920354884 5.1;0;0.0648191569065793 5.1;0;0.076549998286269 4.83230664;0;0.0646850315121259 5.1;0;0.0360031902407106 5.08692677;0;0.0600999981681745 5.1;0;0.0611930518844706 4.86242299;0;0.0880561230491702 5.1;0;0.173134491136527 5.1;0;0.214152090746144 5.1;0;0.198128321484689 5.1;0;0.149834611261633 5.1;0;0.101667931131745 5.1;0;0.0252918011191179 2.72963723;0;0.0508705859326435 3.63039015;0;0.073371140015094 5.1;0;0.0249460781352685 5.04312115;0;0.135557654701059 4.82135524;0;0.0773157019653084 5.1;0;0.0566838633761729 5.1;0;0.00729597800339969 4.91444216;0;0.146576569489086 5.1;0;0.0179442038708863 4.98562629;0;0.00881190457867482 4.89527721;0;0.0585106321351057 4.96646133;0;0.0256521480687946 5.1;0;0.0187580624989955 2.59548255;0;0.0292143375693749 4.83230664;0;0.0448447534718754 5.1;0;0.0355756070875096 5.1;0;0.112550367475899 5.1;0;0.0232045708466446 5.02395619;0;0.0857921246698382 5.1;0;0.030735545843763 5.1;0;0.0365674109757363 5.0294319;0;0.036775413916096 0.0246406600000002;0;0.10061832977216 5.1;0;0.108699799701865 4.90349076;0;0.0215405305050372 5.1;0;0.0107969792673914 5.1;0;0.0590619449671696 2.7761807;0;0.0734612982254143 5.1;0;0.0301505003083988 5.1;0;0.0191891560486117 5.1;0;0.0801124887605417 4.85968515;2;0.144205228807713 3.61122519;0;0.0264890694053651 5.1;0;0.0283519706831976 5.1;0;0.013819839615986 5.1;0;0.030645064672344 5.1;0;0.221966928286355 5.1;0;0.0404651873255128 5.1;0;0.0244123959034536 5.02121835;0;0.111992471414693 5.1;0;0.0827858923053136 5.00205339;0;0.0587088523720789 5.1;0;0.0285796407847701 4.13689254;1;0.0848499395973974 5.1;0;0.055494019983507 4.67624915;0;0.0162240724832352 3.57015743;0;0.0629787948437234 5.1;0;0.0606110514552289 5.1;0;0.0220338947501178 5.1;0;0.0206528969209015 5.1;0;0.114269980806848 1.21697467;1;0.156702003183352 5.1;0;0.0589735599971672 4.95003423;0;0.0392585416412637 5.1;0;0.104591267663321 4.26557153;1;0.124935874396688 5.01300479;0;0.0724784564600999 1.4825462;1;0.0664911983208798 4.80219028;0;0.13148471961685 5.1;0;0.0213045923429331 5.1;0;0.0626573840171788 2.83915127;0;0.0493713941438941 2.56810403;0;0.356463547595724 5.1;0;0.00833372082692733 3.6440794;0;0.0659448725094759 5.1;0;0.167842507913127 5.1;0;0.147635675489805 0.00547569999999986;0;0.0715748807950137 1.69472964;1;0.113387476443668 5.1;0;0.0550918796276857 1.48939083;2;0.18716908004247 4.85420945;2;0.137087665747332 1.13894592;2;0.174077467484124 5.1;0;0.0253541739638093 3.18685832;0;0.179637790804495 5.03764545;0;0.0156830467777095 5.1;0;0.0306616409752805 2.87200548;0;0.102694914769112 5.1;0;0.0168647658472966 5.1;0;0.0582378064814051 5.1;0;0.013826212244606 4.94182067;0;0.111354337175275 4.98015058;0;0.0406080786172958 5.1;0;0.0195144109566847 5.1;0;0.0295312136888872 4.71457906;0;0.0369417983742196 5.1;0;0.0209529994115203 4.83504449;0;0.0450740022341558 5.1;0;0.0109460532921299 5.1;0;0.0305110523859069 5.1;0;0.0126062207802022 4.92539357;0;0.121338257686087 4.75564682;0;0.0517866170934525 0.54757015;2;0.0744593100038622 5.1;0;0.0680063610385018 5.1;0;0.0269978138644179 5.1;0;0.0104217955312262 5.1;0;0.0585189173472076 4.98562629;0;0.0220079832309669 4.8733744;0;0.0202744116065669 5.1;0;0.128689876754984 5.1;0;0.0406652427592766 5.1;0;0.0819368460575075 3.52908966;0;0.118940729676087 5.1;0;0.129128047189511 1.91238877;1;0.349182907801097 5.1;0;0.0613716643530184 5.1;0;0.0221811535207344 5.1;0;0.0308061045008014 5.1;0;0.0821783705886902 3.66324435;0;0.0119668258793073 4.90896646;0;0.0556877846565487 5.1;0;0.0568517295481107 4.81587953;0;0.0384545685081615 1.64544833;2;0.216608170460797 5.1;0;0.0513690693271079 5.1;0;0.0530938350382899 2.49965777;0;0.0523528584787639 0.84873374;2;0.0434521552153967 1.4017796;2;0.240352026793133 4.678987;0;0.157440356977405 5.01300479;0;0.0442398371489615 4.90896646;0;0.045471418875749 3.55099247;0;0.135885759509194 5.1;0;0.034195755503539 5.1;0;0.0258727128010434 5.01574265;0;0.0533463520835407 5.1;0;0.125271581459191 2.95687885;0;0.0987219651520306 5.1;0;0.175885559671231 4.92265572;0;0.0882890474034848 5.1;0;0.0575281330528033 5.1;0;0.0595637099786077 0.21902807;0;0.0524545252105588 5.1;0;0.0597857990209908 5.1;0;0.135736217778765 5.1;0;0.0320042046602352 5.1;0;0.129760851643397 5.1;0;0.0410491417291457 5.1;0;0.0484822183555051 3.56468172;0;0.0989191911222703 0.33675565;0;0.044971695177011 5.1;0;0.017930873287524 5.1;0;0.0464054559670211 3.92060233;1;0.110603444319309 5.1;0;0.468619428752399 5.1;0;0.0340256919716879 3.93839836;1;0.0895430546317184 4.78028748;0;0.0439433647286151 5.1;0;0.0524390328863264 2.77344285;2;0.0309686211363875 5.1;0;0.0961798244034202 4.39288159;1;0.149852715192906 3.54004107;0;0.027745457752445 5.1;0;0.0282103329742933 5.1;0;0.160024774622806 5.1;0;0.15680145950367 5.1;0;0.0203234062835686 5.1;0;0.109084525220942 1.54688569;1;0.104050139514415 5.1;0;0.0289979353124471 4.87885011;0;0.445446486175068 5.1;0;0.0157396362014509 4.75838467;0;0.0730187314443962 5.1;0;0.0206829008610674 4.76659823;0;0.0465280130980774 1.75770021;2;0.182299204357218 3.17590691;0;0.0644665878364599 5.1;0;0.0842973204504156 5.1;0;0.0908198534702436 5.09787817;0;0.0491480997971477 5.1;0;0.0210077518622522 5.06502396;0;0.2597702295732 5.1;0;0.0281179845610657 4.14784394;1;0.250250815111444 5.1;0;0.0326832357553594 5.1;0;0.0811940634036564 1.12936345;1;0.076219272638205 4.47364818;0;0.0674822305361209 1.39904175;1;0.143911958373039 0.95003423;2;0.32119201685286 5.1;0;0.0226518397835687 3.42778918;0;0.0556158250618641 3.06639289;0;0.0978190320156657 5.00752909;0;0.0436811774337183 2.60643395;0;0.168575381421114 5.1;0;0.145485452645115 4.98562628;0;0.0332101524736171 5.1;0;0.230816673696428 3.93839835;1;0.188154627451453 0.0958247700000001;0;0.211257476767044 5.1;0;0.0719852924409547 5.1;0;0.173475384445915 5.1;0;0.0709593242470968 5.1;0;0.0464621401717789 5.1;0;0.0761865577088471 5.1;0;0.0476751259673975 5.1;0;0.0363381773439664 5.1;0;0.0450077235269245 5.1;0;0.0506246144330195 5.1;0;0.190192267919841 3.70978782;0;0.030169314384533 5.1;0;0.166377888613543 5.1;0;0.00864800329759686 5.1;0;0.0961831274376284 5.1;0;0.0929287584966867 3.84668036;0;0.0669342577163802 5.1;0;0.0199900283381063 4.93360712;0;0.0612139094235387 4.29295003;1;0.138284068255858 3.69336071;0;0.0200207562774305 5.1;0;0.0361842424992073 2.84736482;2;0.0273379284958099 5.1;0;0.090496333435427 3.46338125;0;0.0940790443877615 1.13757701;1;0.15572327850654 5.1;0;0.0157181970448852 3.41136208;0;0.128794691977512 5.1;0;0.199126268085363 5.1;0;0.0690940910615783 5.1;0;0.0632869764572385 4.76112252;0;0.0264778397325884 5.1;0;0.239609434949282 3.54825462;0;0.0786513147871409 0.0876112200000003;2;0.0989948188336415 2.45585216;0;0.152535183984513 0.55304586;2;0.0344498737000914 4.70362765;0;0.0120233775127062 5.1;0;0.0935703495947735 5.1;0;0.152168769940268 4.11498973;0;0.277935903062757 5.1;0;0.198739007037894 2.80355921;0;0.0713326340717218 5.1;0;0.0678745701939883 5.1;0;0.0862348028110293 5.1;0;0.067924385220983 4.77207393;0;0.0258605935400383 5.1;0;0.0281855314646871 0.0821355300000004;0;0.0598710805621476 5.1;0;0.0322276804424166 1.72484599;1;0.0875671356702662 5.1;0;0.0773072263492233 4.95003423;0;0.0219108283120041 3.50718686;0;0.0287483145035754 5.1;0;0.331093858574902 4.80766598;0;0.120894011174594 4.93086927;0;0.0485751150425787 3.85215605;2;0.221279052652083 4.53251198;1;0.0556231204163556 4.83504449;0;0.0573060186739081 5.05681041;0;0.0506008682496808 5.1;0;0.0238461229049421 3.45242984;0;0.18979486054871 5.0294319;0;0.00776760681502775 3.75633128;2;0.090166426333899 5.1;0;0.198279394868102 0.88158795;2;0.0922088216031897 5.1;0;0.0120762213877961 4.92539356;0;0.0557635771474935 3.62491444;0;0.136471767767139 4.74743326;0;0.0584388779057793 5.1;0;0.0310802641541435 5.1;0;0.0306393945545215 3.01984942;2;0.0184959598855139 4.79123887;0;0.124106760127093 5.1;0;0.0227369522404558 5.05954825;0;0.0998136360339617 4.94455852;0;0.0656377370043648 4.90896646;0;0.0469706958127674 4.96098562;0;0.00953672829013238 4.96098562;0;0.0184193230551946 4.79945243;0;0.143457225733275 5.1;0;0.058236905279497 5.1;0;0.0671644269698475 5.1;0;0.07904559275342 5.1;0;0.0187368438275174 4.84873375;0;0.058069235912515 4.78028747;0;0.0407842501353239 4.8733744;0;0.0565304770259432 2.75701574;0;0.0608329593294796 5.1;0;0.0381048577116531 5.1;0;0.0638435063346275 4.45448323;0;0.0473930963125483 5.1;0;0.0730481130902215 0.13689254;0;0.044071417728242 5.1;0;0.0522278255196591 3.60574949;0;0.0766078852190683 5.1;0;0.0176767265770772 0.79397673;2;0.179317950323993 4.35728953;1;0.0399951563379704 5.04038329;0;0.0900438791413524 3.52635181;0;0.194929352474602 5.1;0;0.0452995603041015 5.1;0;0.0380140088261407 5.1;0;0.0187632918782179 5.1;0;0.13718083924808 5.1;0;0.185778901051424 4.33264887;1;0.0583033395167512 5.1;0;0.0559998329108749 5.1;0;0.0725747999794013 5.1;0;0.0839766667758446 1.56194388;1;0.119919924783594 5.1;0;0.143677591874477 5.1;0;0.169820823112913 4.91991786;0;0.128951548930739 5.1;0;0.0649492107984914 5.1;0;0.0178005623217831 5.1;0;0.136405123467865 5.1;0;0.141109561148366 5.1;0;0.163244895833014 5.1;0;0.0798197434384656 5.045859;0;0.0116967322328968 5.1;0;0.0335147705819701 5.1;0;0.0390709270534765 5.06776181;0;0.120719594153454 4.34360028;1;0.155336439524638 5.1;0;0.0366705690059201 3.57563313;0;0.0620515122249638 4.95550992;0;0.0112760840119696 5.1;0;0.196802829292979 5.1;0;0.145231924588957 5.1;0;0.0866557163652161 4.93360711;0;0.0467049935688746 5.1;0;0.0812845395170711 5.1;0;0.0720694863710661 5.1;0;0.142222409198716 4.81314168;0;0.022195024321295 5.1;0;0.131952643399544 5.1;0;0.0240658920201251 5.06228611;0;0.0118352839437816 5.1;0;0.360895957895351 3.47433265;0;0.0229671373595316 2.7816564;0;0.281160810539717 5.1;0;0.0760372851706806 5.1;0;0.0387333891911572 5.03216975;0;0.0115445262500807 5.1;0;0.0293403245959061 4.77481177;0;0.0134666633283859 5.1;0;0.0289070523901672 1.69336072;1;0.0554425002427984 5.1;0;0.048663678623604 5.06228611;0;0.0847591060396069 4.23545517;1;0.10579989419562 5.1;0;0.0281925204120862 4.77481178;0;0.0212147333404866 5.1;0;0.140713998197325 1.74264203;1;0.143587640914452 5.1;0;0.0499688997707345 5.1;0;0.114666546367841 2.97604381;0;0.112267462511512 5.1;0;0.123994322947543 2.78986995;0;0.0779065998596189 5.1;0;0.0418293456146675 5.1;0;0.0461275502643613 5.1;0;0.167406957109542 5.1;0;0.338774857970538 5.1;0;0.020192396998527 5.1;0;0.025880465713936 5.1;0;0.0251634218998635 5.1;0;0.018014406621443 5.1;0;0.0432385564248942 5.1;0;0.120255620658389 1.11704312;2;0.200038758441338 4.72553046;0;0.104753440905827 5.0568104;0;0.0485394949136247 5.1;0;0.088609153032598 4.93086927;0;0.0850815703222186 5.1;0;0.0871263009452556 3.47980835;0;0.0704882640926705 4.92813142;0;0.0242343942800168 4.45448323;0;0.0882026309932744 2.4421629;0;0.043067676660702 4.95550993;0;0.0651514363939465 4.862423;0;0.045260358227991 5.1;0;0.0182790151509782 3.84394251;0;0.021359614590547 0.33127994;0;0.18180674562599 3.41683779;0;0.0407523466760758 4.82409309;2;0.0828182960851518 1.86447639;2;0.11632884626457 3.98083504;0;0.0296247176602597 4.66803559;0;0.116553239174873 4.85968515;0;0.0628112253894227 2.76522929;0;0.383571953917168 5.1;0;0.0242673421695856 4.92539357;0;0.0546305178221778 4;2;0.0706627122894826 2.81451061;0;0.2851491810151 5.07597536;0;0.0706387430387872 4.89253936;0;0.0515653284494606 5.1;0;0.239280003028764 5.1;0;0.0593208924728691 5.1;0;0.0261232586858199 5.1;0;0.00753638440665556 5.1;0;0.0202232609399706 2.57631759;0;0.0243529968430643 5.1;0;0.0220918694738477 5.1;0;0.139630641107043 1.94387406;2;0.245438602215962 4.95277207;0;0.205886671601374 3.4770705;0;0.0328036956546308 5.1;0;0.113080609119313 4.85694729;0;0.0804656289367113 4.75564682;0;0.0566698388031526 5.1;0;0.0192779441595981 5.1;0;0.0583886588527337 4.07939768;2;0.130080028557818 5.1;0;0.0328337436442445 5.1;0;0.0302926843834449 3.59206023;0;0.250449246324563 3.4551677;0;0.0912814579692054 3.97262149;2;0.0963039151620939 5.1;0;0.0459258969725858 5.1;0;0.0731015859009194 0.22176592;0;0.118262566414714 5.1;0;0.0466862497749743 2.72142368;0;0.0936683033211882 5.1;0;0.0767607649570878 5.1;0;0.0468919746895153 5.1;0;0.068452662136315 3.1266256;0;0.0940204813326199 4.34770705;2;0.103124102703152 1.44284736;1;0.190517517292287 5.1;0;0.0389307371225473 3.50992471;0;0.0532905388839085 1.69472964;1;0.11734076027626 3.70157426;0;0.0348442848947236 1.52772074;1;0.0298795627853447 0.30390144;0;0.149627956852388 3.27720739;0;0.0975312984211435 5.1;0;0.0390980575397907 4.54757015;1;0.200701155649719 5.1;0;0.0447262839511057 5.1;0;0.0344182916251388 5.1;0;0.111445884108197 5.1;0;0.316447169710587 4.30390144;2;0.0852989599990972 5.1;0;0.0535447803182484 0.10951403;0;0.205485614960953 5.1;0;0.00959083882127316 2.61464751;2;0.0891667405165453 4.96646132;0;0.0302925962988618 4.89801506;0;0.0467001550239749 0.10677618;0;0.0694515429796142 5.1;0;0.0135405795870586 0.28473648;0;0.10267287022567 5.00752909;0;0.0236787680142521 3.32922656;0;0.0431446480240113 3.46338124;0;0.0851978673210169 5.1;0;0.0690573810206148 5.0568104;0;0.0300199545423678 5.1;0;0.0825049544936449 5.1;0;0.0700601469969745 5.04859685;0;0.0501137530757767 5.1;0;0.0196473096566562 5.1;0;0.0530848010780243 5.1;0;0.131579428097415 2.43394935;0;0.0410953375973485 5.1;0;0.0693209746760158 5.1;0;0.0262412280450879 5.1;0;0.0489305102000751 5.1;0;0.0660134608400133 5.1;0;0.0225887795240636 5.1;0;0.0157330261873337 5.1;0;0.0402772754837501 5.1;0;0.0518644617586544 2.95414099;0;0.0657304997442671 5.1;0;0.292079327619485 4.71731691;0;0.0323488774745237 1.54277892;1;0.167562489676081 4.8788501;0;0.319504939476459 2.46954141;0;0.0648969073217075 4.95003422;0;0.0231077039697201 4.93360712;0;0.0296603371944019 5.1;0;0.0201752975106996 5.1;0;0.0256950265313263 3.4934976;0;0.0498787843758024 4.70362765;0;0.066726082399921 4.92539357;0;0.0135680996732136 2.50239562;0;0.11247821789038 5.1;0;0.0218428591735898 5.1;0;0.0277280948231692 1.17043121;1;0.185181812683748 5.1;0;0.0567664376845304 4.0273785;2;0.10427187073125 4.8788501;0;0.0232509137753509 4.86516085;0;0.00969315842683457 4.94729637;0;0.00964174878990453 4.75838467;0;0.0483631791197813 5.1;0;0.0307983421552519 4.98836414;0;0.239001357520507 5.1;0;0.0353549848151172 2.37919234;1;0.0998291944087202 5.1;0;0.0128507874344947 5.1;0;0.0483210400845558 3.62765229;0;0.12678818251252 5.1;0;0.117954602108835 1.20739219;2;0.122367594025414 5.1;0;0.0150762950675871 5.1;0;0.0106117861617231 5.1;0;0.138477513460652 5.1;0;0.0756773617999785 4.98288843;0;0.0681233288661185 5.1;0;0.0759681821574337 3.57289527;0;0.121632854260233 3.00752909;1;0.119283530489783 2.19028063;2;0.250246083358751 4.91170431;0;0.0569357776574605 5.1;0;0.0257390478454749 3.80835045;0;0.112325429442321 5.1;0;0.0192328676605597 5.1;0;0.05417216287559 5.1;0;0.171994984425697 5.03216974;0;0.0508870613184521 5.1;0;0.12174698738491 5.1;0;0.0542823403203996 5.1;0;0.00785612361512411 5.1;0;0.0262079129660746 5.06776181;0;0.160272534699377 4.83230664;0;0.0588889223593092 4.87885011;0;0.101917266873826 4.95824778;0;0.0818594038320805 0.29979466;1;0.245735183477793 4.80492813;0;0.0377155254521455 5.1;0;0.0214500340821782 3.06639288;0;0.0559150953209658 5.1;0;0.0456783699360984 0.0246406600000002;0;0.0831952963719249 3.60574949;0;0.0412937826310978 5.1;0;0.0690793029168124 5.1;0;0.0307694086289323 5.1;0;0.0230021415238144 5.01574264;0;0.0175967640119062 5.1;0;0.035595969639834 2.46954141;0;0.0890610717682744 4.96919918;0;0.0190095845559444 4.93360712;0;0.164256429751163 5.1;0;0.11419810828784 4.91170431;0;0.0135321082121246 5.1;0;0.172851699695771 4.92813142;0;0.132103032744219 5.1;0;0.0270534511074377 5.1;0;0.0345923220174356 5.1;0;0.0620744580649914 5.1;0;0.0122256107340948 3.44695414;0;0.152192813804469 4.72279261;0;0.0116708377647674 5.02943189;0;0.152607480850691 4.96646133;0;0.116546304268899 5.1;0;0.0297928464754931 4.92539356;0;0.0292586442845125 0.06570842;0;0.0768776291244054 3.78370979;0;0.0340773990412956 5.1;0;0.0843282547805705 1.4661191;1;0.251118068350145 5.00479123;0;0.0109869738723867 4.93634497;2;0.191254866322896 4.91170431;0;0.0256993726105704 1.91238878;1;0.125986913527892 4.73648186;0;0.201958707659598 5.1;0;0.118398589686461 5.1;0;0.0730524827436349 0.00273784999999993;0;0.0731500674195055 4.73921971;0;0.0618906366455173 4.88158795;0;0.0422023776383771 5.01574264;0;0.123767449582226 0.62696783;1;0.18321610496148 5.00205339;0;0.0151925568280677 5.1;0;0.0922734192537755 5.1;0;0.0732336022838411 5.1;0;0.0437364133120584 3.38124572;0;0.0116084945722559 5.1;0;0.0483328100616901 5.1;0;0.143833483504035 5.08966462;0;0.0899379116479227 3.06639288;0;0.0333623768988164 5.1;0;0.0464595164307768 5.1;0;0.0334170417482645 3.98631074;2;0.0355373194481936 3.40041068;0;0.288406675560547 4.19849418;1;0.0682872150998912 2.31348391;0;0.130122636289892 5.1;0;0.0696506326695853 5.1;0;0.0372282874747539 4.29568789;2;0.0606178494830117 4.54209445;2;0.0131260616042598 4.82135523;0;0.0222996995316332 5.1;0;0.00990367335303251 5.00205338;0;0.0283345582306328 5.1;0;0.0377844299257214 4.88980151;0;0.0593949662606022 5.1;0;0.0243600361712977 5.1;0;0.017009303321756 3.22792608;0;0.101283401330801 2.74332649;0;0.0378354669272506 4.44900753;1;0.105129543860888 4.71731691;0;0.042427872270323 4.90896646;0;0.00957931303549671 4.71731691;0;0.0333161763777557 4.96646133;0;0.0641061930466035 2.41478439;0;0.129567990279799 5.1;0;0.0866897106982024 5.1;0;0.049228300419762 4.88980151;0;0.0433620617548289 3.43326489;0;0.0478267477910472 3.45790554;0;0.0646315526608957 1.48528405;1;0.135271928514227 4.94729638;0;0.0324061572611538 2.4531143;0;0.0735975994249402 5.1;0;0.0164474754359788 2.75975359;1;0.103701976488005 5.1;0;0.123672832496137 5.1;0;0.0342102113990189 5.1;0;0.162204544094526 4.8569473;0;0.0232649248817714 4.85420945;0;0.0322594672016237 4.35865845;2;0.335738134739815 1.9247091;2;0.105035313577853 5.1;0;0.056117414868092 5.02669405;0;0.0358404239300735 4.84873374;0;0.0265987813029332 5.1;0;0.0189680436185365 5.1;0;0.0302551360418141 4.77481177;0;0.034471762926995 4.96919918;0;0.118080980202574 3.34839151;0;0.104017533355383 4.96098562;0;0.0505946469803829 2.95140315;0;0.230790381466314 0.26557152;0;0.122604171177993 5.1;0;0.192760339276452 4.75017112;0;0.0181897167677694 4.96646133;0;0.0196208205594554 5.1;0;0.114278768955411 5.1;0;0.037574418107356 4.90349076;0;0.0633175530374639 3.09650924;2;0.285152171583681 5.1;0;0.0258673405018307 3.44695414;0;0.205471653002281 5.1;0;0.0284975626731507 4.82135524;0;0.029370102163915 4.68172485;0;0.0406859539220173 5.1;0;0.081447316966072 5.1;0;0.050818642859733 5.1;0;0.0422410976780304 5.1;0;0.0121249001482177 4.77481177;0;0.0149467259847204 4.76112252;0;0.01086069624775 4.98836413;0;0.0527459612331651 5.1;0;0.0135999065623741 5.1;0;0.0416964208294853 5.1;0;0.220962127070157 5.1;0;0.111257765520596 2.57631759;0;0.185583079740053 4.91170431;0;0.0214536450114672 5.1;0;0.0707985724111699 5.1;0;0.109657028528921 5.1;0;0.11819894494986 5.1;0;0.0601593070591525 4.95003422;0;0.162741652280116 4.88980151;0;0.0169997914879276 4.97741273;0;0.0346191661017832 5.1;0;0.0329907413059913 5.1;0;0.0455027832287941 0.64065709;2;0.0694704352230601 2.66940452;1;0.153308068200817 4.11772759;2;0.30461464370814 5.1;0;0.222714713307577 5.1;0;0.0299726774005048 2.54620123;0;0.0685271710912403 5.1;0;0.0671608700030678 4.78576318;0;0.0126031286506835 1.89185489;2;0.0235512880079929 2.63107461;0;0.0510749489177065 5.07871321;0;0.0116959323328946 4.14373717;1;0.0462783230084176 3.88227242;1;0.221262128469525 5.1;0;0.188897875400699 4.99931553;0;0.188785409858492 5.1;0;0.0124627062297452 1.64681725;1;0.102771336062466 5.1;0;0.0241821228387072 5.1;0;0.0103411949808374 5.02121835;0;0.0341059627319766 5.1;0;0.0189932910174078 4.75017112;0;0.0645608991304864 5.1;0;0.351915223671728 5.1;0;0.0427158498592554 4.98836413;0;0.0445372911518253 1.6605065;1;0.124339816275069 4.98562629;0;0.0349618114380367 5.1;0;0.0313335762892864 5.1;0;0.0279907676021112 4.88706365;0;0.140625459121614 5.1;0;0.154108584096154 5.1;0;0.0157854518352785 5.1;0;0.0446768293106895 2.50513347;0;0.352071011126565 4.99110199;0;0.0226934411892257 4.8569473;0;0.0190936847426506 1.03216974;1;0.109364847730074 5.1;0;0.0220611243343054 5.1;0;0.172716491875067 4.93086927;0;0.0344405881983917 0.61190965;1;0.156721005037756 5.1;0;0.0319824955758933 5.1;0;0.0257657345811841 4.77754963;0;0.070333316890704 5.1;0;0.0997028477773358 3.52908967;0;0.253283466322099 5.07049966;0;0.154594106775727 4.89527721;2;0.0277439855305151 5.1;0;0.0471160432525796 5.1;0;0.020841810169524 5.1;0;0.0697016756284234 4.89253936;0;0.0518669376012396 5.1;0;0.0300448739112126 5.1;0;0.0536217928806725 5.1;0;0.219646489899328 5.08418891;0;0.0631044243083643 5.1;0;0.114296742273921 0.23819302;2;0.281022787959471 5.06502396;0;0.0348167191610417 3.14031486;0;0.116004587943088 5.1;0;0.0141997765168253 5.01300479;0;0.160901634736699 5.1;0;0.0728747392686915 5.1;0;0.054773708282168 5.1;0;0.049449511756524 4.96098563;0;0.0811043875096385 5.1;0;0.0103482661634495 5.01574264;0;0.0763618589066209 5.1;0;0.0110411335746794 3.4715948;0;0.0182873864308401 4.20533881;0;0.0479450365487329 5.1;0;0.0761582705611498 4.92813142;0;0.0803513035937754 5.1;0;0.0376845871199479 4.96098562;0;0.01288270762435 5.1;0;0.107715361694261 4.81861739;0;0.0228207517747205 4.8459959;0;0.0304780554079346 5.1;0;0.0724284202051739 5.1;0;0.179151188403807 3.55099247;0;0.0402416282978058 4.37919233;1;0.226239464411885 5.1;0;0.0324961080128852 1.4770705;1;0.0966992993192189 4.85147159;0;0.0342056973156347 0.5119781;2;0.106585273548696 5.1;0;0.122895288070493 5.01848049;0;0.0186491068962062 5.1;0;0.16986898965503 5.1;0;0.0283862118328961 5.1;0;0.0311789524046263 5.1;0;0.333953629337623 4.81861739;0;0.160165245686393 5.1;0;0.06385411607601 5.1;0;0.0585504576489596 5.1;0;0.0143514750952869 3.87542779;1;0.136110623550168 5.1;0;0.133677317976575 2.82272417;0;0.218829691065596 5.1;0;0.0689323986135805 4.862423;0;0.0193876582947661 4.95550993;0;0.0366207224071346 5.1;0;0.0342169776938064 5.1;0;0.136244303853432 3.46338125;0;0.158673749325179 4.93086927;0;0.135212369831121 5.1;0;0.089703209112717 4.90896646;0;0.046908023405294 4.89253936;0;0.048743991999746 4.89664614;1;0.0591052211662339 4.48596851;1;0.0938098916237119 1.37850787;1;0.122245042697114 5.1;0;0.144746381310693 5.1;0;0.0621845916166001 5.1;0;0.0121304805193659 5.1;0;0.126974072455525 3.44695414;2;0.0566038278181743 5.1;0;0.104681094718154 5.1;0;0.0386373493487475 4.84873374;2;0.0621558889736667 2.51060917;0;0.0173673418528566 5.1;0;0.149173457800801 1.66461328;2;0.0533725086109214 4.95824778;0;0.0159123077582756 5.1;0;0.0275207258679133 4.81861739;0;0.0186376766277808 5.1;0;0.154548032554716 5.1;0;0.00699251392500869 0.0219028100000003;0;0.0991459479220476 4.87611226;0;0.0247150608277217 4.97193703;0;0.0784061460064993 4.85420944;0;0.0528484562968377 3.71252567;0;0.275904342722701 1.11430527;2;0.0817887164264873 5.1;0;0.104734469386339 4.58590007;2;0.113290854123675 5.1;0;0.051545278633782 5.1;0;0.0263208670536911 0.5174538;2;0.0816117426864073 5.1;0;0.139249113350902 1.18548939;2;0.122582418308759 4.76933607;0;0.0355722930071825 4.94182067;0;0.0624896788057638 5.1;0;0.12611675540099 1.43874059;1;0.122238115817824 5.00479124;0;0.0332109975614162 5.1;0;0.033460312107457 5.1;0;0.222690516467691 3.87405887;0;0.296766224457376 3.40041068;0;0.0693464488042977 2.89938398;0;0.0748533397493764 3.56194387;0;0.0284242055115042 5.1;0;0.0647766339429869 5.1;0;0.0431565180544793 3.64134155;0;0.137592514440101 5.1;0;0.0410318233601041 2.47227927;0;0.153808322136062 4.99110199;0;0.113213995971918 4.80766599;0;0.124916646060964 5.1;0;0.188270519746314 2.94045174;0;0.0342199292167096 5.1;0;0.0722934279261921 4.87611225;0;0.0254750949194184 4.96919918;0;0.0236181675504492 5.1;0;0.0627805562064464 4.42710472;0;0.0363247610054606 5.1;0;0.010129319892382 5.01026694;0;0.0465160468003177 5.1;0;0.0523419754537118 5.1;0;0.0745632140643457 2.81998631;0;0.175694501125417 5.1;0;0.114975271729299 0.56262834;1;0.210918133407889 1.52498289;2;0.0415845567009256 0.0355920599999999;0;0.240570959568895 5.1;0;0.0369265306288627 5.1;0;0.0164950813384234 3.75906913;0;0.0294160621262459 5.1;0;0.0248551998766892 5.1;0;0.0572176550561291 5.1;0;0.0160742831850174 2.52703628;0;0.0628350623404869 4.74469542;0;0.0434418457539578 5.1;0;0.031650637020977 5.1;0;0.0120288191113356 5.1;0;0.240992511122233 5.0349076;0;0.0157859743563687 5.1;0;0.0165197669989052 0.0739219699999998;0;0.0335375679905378 5.1;0;0.03608810616335 4.98562628;0;0.104328278228354 5.1;0;0.0202736579697548 5.1;0;0.0446238725129707 3.53730321;0;0.167606329010574 5.07323751;0;0.0945220465482406 5.1;0;0.0407798791216057 5.1;0;0.075076227011211 5.1;0;0.0510039015439666 1.55646817;1;0.283069363641556 4.95824778;0;0.0477880498047692 5.1;0;0.103669164723045 5.1;0;0.0117373950604868 5.1;0;0.0980654159805797 5.1;0;0.057030120178508 4.87063655;0;0.0467703022058293 0.0219028000000003;0;0.0700097821949064 5.1;0;0.0147247938704148 5.04312115;0;0.101915467206554 5.07323751;0;0.122473240321746 4.8843258;0;0.130867400981453 5.1;0;0.105359447314031 5.1;0;0.0482567575584597 5.1;0;0.0965412148508046 4.71184121;0;0.0166705475104979 5.1;0;0.0808749758169137 5.1;0;0.04559393315232 5.0294319;0;0.105890384721446 5.1;0;0.00896161595179207 5.1;0;0.106370432282164 4.96919918;0;0.142809608246686 5.1;0;0.0223928794634472 4.73921972;0;0.0168598848597855 5.1;0;0.0207120427376446 5.1;0;0.0223447301821781 0.68172485;2;0.0779935576787588 3.43326489;0;0.130308110632733 5.04859685;0;0.0181017402368145 0.63518138;0;0.0579636222979579 4.90622861;0;0.263801203131802 1.11156742;2;0.260429669947654 5.1;0;0.0864966939634125 4.88980151;0;0.0136397328657395 0.28199863;0;0.0822846131795444 5.1;0;0.170138429046341 5.1;0;0.0138259421806068 4.77481178;0;0.128417955207258 5.1;0;0.00779760679321518 4.09308693;0;0.0939689434027002 4.54209446;2;0.0729219662836755 5.1;0;0.0187755801258934 5.1;0;0.0298410662999805 5.1;0;0.0347253277371032 5.1;0;0.186843825906943 4.71457906;0;0.0595984334355489 5.1;0;0.0202663791345977 3.94524298;2;0.0206292372752659 3.64407939;0;0.262243457746113 5.1;0;0.0287643448635459 4.32169747;1;0.0927222814274244 1.61670089;1;0.140237070496546 5.1;0;0.020181263098722 4.85694729;0;0.0406998373921885 5.1;0;0.0788632947391922 5.045859;0;0.0538312680019204 5.1;0;0.017740650883576 5.1;0;0.0672164738905809 5.1;0;0.0182686730655226 5.1;0;0.0298053955720429 3.40041068;0;0.127911557917038 5.05681041;0;0.0450396046977741 4.98562629;0;0.0272473231735773 5.1;0;0.00967766926123664 5.1;0;0.066958719246681 5.1;0;0.038374704654398 3.17180014;1;0.0802475169186456 5.1;0;0.128164204408727 0.0054757099999998;0;0.251486842358589 3.66324436;0;0.0182723416498027 5.1;0;0.0379746071105246 5.1;0;0.0389231258371785 3.20602327;0;0.0519913234142639 4.83504449;0;0.0654905927932451 2.90212184;0;0.137660341467838 4.98288843;0;0.0151208677994138 3.97672827;1;0.112694073461149 5.1;0;0.0884620982595825 3.04312115;1;0.105798314331482 5.1;0;0.0901691966069233 5.1;0;0.00760803076020259 5.1;0;0.187952411339919 5.1;0;0.0398752488230187 5.1;0;0.0151601984538904 5.1;0;0.0238020906519673 4.85694729;0;0.143535970441318 5.1;0;0.0599276167982669 5.1;0;0.0780289626037487 4.18617385;2;0.0337971537296338 4.67077344;1;0.383595508600018 4.4257358;1;0.142551420773854 4.88980151;0;0.0310837220933826 5.07597536;0;0.0357411413272079 4.71731691;0;0.0167886275133932 5.1;0;0.0841714629752722 5.1;0;0.0394964456584435 5.1;0;0.125129278947247 1.63723477;2;0.0444193264117531 0.69815195;2;0.0759812829994222 5.1;0;0.119989111099959 4.94729638;0;0.0140805668944551 4.7118412;0;0.0667036208629894 4.5174538;0;0.0153933323993384 1.8384668;1;0.129549228749429 4.8459959;0;0.0468927689025054 5.1;0;0.0398678324014978 5.1;0;0.10354071345232 pec/man/0000755000176200001440000000000013571203270011574 5ustar liggesuserspec/man/calPlot.Rd0000644000176200001440000001746213754470547013513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calPlot.R \name{calPlot} \alias{calPlot} \title{Calibration plots for right censored data} \usage{ calPlot( object, time, formula, data, splitMethod = "none", B = 1, M, pseudo, type, showPseudo, pseudo.col = NULL, pseudo.pch = NULL, method = "nne", round = TRUE, bandwidth = NULL, q = 10, bars = FALSE, hanging = FALSE, names = "quantiles", showFrequencies = FALSE, jack.density = 55, plot = TRUE, add = FALSE, diag = !add, legend = !add, axes = !add, xlim = c(0, 1), ylim = c(0, 1), xlab, ylab, col, lwd, lty, pch, cause = 1, percent = TRUE, giveToModel = NULL, na.action = na.fail, cores = 1, verbose = FALSE, cex = 1, ... ) } \arguments{ \item{object}{A named list of prediction models, where allowed entries are (1) R-objects for which a \link{predictSurvProb} method exists (see details), (2) a \code{call} that evaluates to such an R-object (see examples), (3) a matrix with predicted probabilities having as many rows as \code{data} and as many columns as \code{times}. For cross-validation all objects in this list must include their \code{call}.} \item{time}{The evaluation time point at predicted event probabilities are plotted against pseudo-observed event status.} \item{formula}{A survival or event history formula. The left hand side is used to compute the expected event status. If \code{formula} is \code{missing}, try to extract a formula from the first element in object.} \item{data}{A data frame in which to validate the prediction models and to fit the censoring model. If \code{data} is missing, try to extract a data set from the first element in object.} \item{splitMethod}{Defines the internal validation design: \code{none/noPlan}: Assess the models in the give \code{data}, usually either in the same data where they are fitted, or in independent test data. \code{BootCv}: Bootstrap cross validation. The prediction models are trained on \code{B} bootstrap samples, that are either drawn with replacement of the same size as the original data or without replacement from \code{data} of the size \code{M}. The models are assessed in the observations that are NOT in the bootstrap sample.} \item{B}{The number of cross-validation steps.} \item{M}{The size of the subsamples for cross-validation.} \item{pseudo}{Logical. Determines the method for estimating expected event status: \code{TRUE}: Use average pseudo-values. \code{FALSE}: Use the product-limit estimate, i.e., apply the Kaplan-Meier method for right censored survival and the Aalen-Johansen method for right censored competing risks data.} \item{type}{Either "risk" or "survival".} \item{showPseudo}{If \code{TRUE} the pseudo-values are shown as dots on the plot (only when \code{pseudo=TRUE}).} \item{pseudo.col}{Colour for pseudo-values.} \item{pseudo.pch}{Dot type (see par) for pseudo-values.} \item{method}{The method for estimating the calibration curve(s): \code{"nne"}: The expected event status is obtained in the nearest neighborhood around the predicted event probabilities. \code{"quantile"}: The expected event status is obtained in groups defined by quantiles of the predicted event probabilities.} \item{round}{If \code{TRUE} predicted probabilities are rounded to two digits before smoothing. This may have a considerable effect on computing efficiency in large data sets.} \item{bandwidth}{The bandwidth for \code{method="nne"}} \item{q}{The number of quantiles for \code{method="quantile"} and \code{bars=TRUE}.} \item{bars}{If \code{TRUE}, use barplots to show calibration.} \item{hanging}{Barplots only. If \code{TRUE}, hang bars corresponding to observed frequencies at the value of the corresponding prediction.} \item{names}{Barplots only. Names argument passed to \code{names.arg} of \code{barplot}.} \item{showFrequencies}{Barplots only. If \code{TRUE}, show frequencies above the bars.} \item{jack.density}{Gray scale for pseudo-observations.} \item{plot}{If \code{FALSE}, do not plot the results, just return a plottable object.} \item{add}{If \code{TRUE} the line(s) are added to an existing plot.} \item{diag}{If \code{FALSE} no diagonal line is drawn.} \item{legend}{If \code{FALSE} no legend is drawn.} \item{axes}{If \code{FALSE} no axes are drawn.} \item{xlim}{Limits of x-axis.} \item{ylim}{Limits of y-axis.} \item{xlab}{Label for y-axis.} \item{ylab}{Label for x-axis.} \item{col}{Vector with colors, one for each element of object. Passed to \code{\link{lines}}.} \item{lwd}{Vector with line widths, one for each element of object. Passed to \code{\link{lines}}.} \item{lty}{lwd Vector with line style, one for each element of object. Passed to \code{\link{lines}}.} \item{pch}{Passed to \code{\link{points}}.} \item{cause}{For competing risks models, the cause of failure or event of interest} \item{percent}{If TRUE axes labels are multiplied by 100 and thus interpretable on a percent scale.} \item{giveToModel}{List of with exactly one entry for each entry in \code{object}. Each entry names parts of the value of the fitted models that should be extracted and added to the value.} \item{na.action}{Passed to \code{\link{model.frame}}} \item{cores}{Number of cores for parallel computing. Passed as value of argument \code{mc.cores} to \code{\link{mclapply}}.} \item{verbose}{if \code{TRUE} report details of the progress, e.g. count the steps in cross-validation.} \item{cex}{Default cex used for legend and labels.} \item{...}{Used to control the subroutines: plot, axis, lines, barplot, legend. See \code{\link{SmartControl}}.} } \value{ list with elements: time, pseudoFrame and bandwidth (NULL for method quantile). } \description{ Calibration plots for risk prediction models in right censored survival and competing risks data } \details{ For method "nne" the optimal bandwidth with respect to is obtained with the function \code{\link{dpik}} from the package \code{KernSmooth} for a box kernel function. } \examples{ library(prodlim) library(lava) library(riskRegression) library(survival) # survival dlearn <- SimSurv(40) dval <- SimSurv(100) f <- coxph(Surv(time,status)~X1+X2,data=dlearn,x=TRUE,y=TRUE) cf=calPlot(f,time=3,data=dval) print(cf) plot(cf) g <- coxph(Surv(time,status)~X2,data=dlearn,x=TRUE,y=TRUE) cf2=calPlot(list("Cox regression X1+X2"=f,"Cox regression X2"=g), time=3, type="risk", data=dval) print(cf2) plot(cf2) calPlot(f,time=3,data=dval,type="survival") calPlot(f,time=3,data=dval,bars=TRUE,pseudo=FALSE) calPlot(f,time=3,data=dval,bars=TRUE,type="risk",pseudo=FALSE) ## show a red line which follows the hanging bars calPlot(f,time=3,data=dval,bars=TRUE,hanging=TRUE) a <- calPlot(f,time=3,data=dval,bars=TRUE,hanging=TRUE,abline.col=NULL) lines(c(0,1,ceiling(a$xcoord)), c(a$offset[1],a$offset,a$offset[length(a$offset)]), col=2,lwd=5,type="s") calPlot(f,time=3,data=dval,bars=TRUE,type="risk",hanging=TRUE) set.seed(13) m <- crModel() regression(m, from = "X1", to = "eventtime1") <- 1 regression(m, from = "X2", to = "eventtime1") <- 1 m <- addvar(m,c("X3","X4","X5")) distribution(m, "X1") <- binomial.lvm() distribution(m, "X4") <- binomial.lvm() d1 <- sim(m,100) d2 <- sim(m,100) csc <- CSC(Hist(time,event)~X1+X2+X3+X4+X5,data=d1) fgr <- FGR(Hist(time,event)~X1+X2+X3+X4+X5,data=d1,cause=1) if ((requireNamespace("cmprsk",quietly=TRUE))){ predict.crr <- cmprsk:::predict.crr cf3=calPlot(list("Cause-specific Cox"=csc,"Fine-Gray"=fgr), time=5, legend.x=-0.3, legend.y=1.35, ylab="Observed event status", legend.legend=c("Cause-specific Cox regression","Fine-Gray regression"), legend.xpd=NA) print(cf3) plot(cf3) b1 <- calPlot(list("Fine-Gray"=fgr),time=5,bars=TRUE,hanging=FALSE) print(b1) plot(b1) calPlot(fgr,time=5,bars=TRUE,hanging=TRUE) } } \author{ Thomas Alexander Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/cindex.Rd0000644000176200001440000003057114131017560013341 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cindex.R \name{cindex} \alias{cindex} \title{Concordance index for right censored survival time data} \usage{ cindex( object, formula, data, eval.times, pred.times, cause, lyl = FALSE, cens.model = "marginal", ipcw.refit = FALSE, ipcw.args = NULL, ipcw.limit, tiedPredictionsIn = TRUE, tiedOutcomeIn = TRUE, tiedMatchIn = TRUE, splitMethod = "noPlan", B, M, model.args = NULL, model.parms = NULL, keep.models = FALSE, keep.residuals = FALSE, keep.pvalues = FALSE, keep.weights = FALSE, keep.index = FALSE, keep.matrix = FALSE, multiSplitTest = FALSE, testTimes, confInt = FALSE, confLevel = 0.95, verbose = TRUE, savePath = NULL, slaveseed = NULL, na.action = na.fail, ... ) } \arguments{ \item{object}{A named list of prediction models, where allowed entries are (1) R-objects for which a \link{predictSurvProb} method exists (see details), (2) a \code{call} that evaluates to such an R-object (see examples), (3) a matrix with predicted probabilities having as many rows as \code{data} and as many columns as \code{times}. For cross-validation all objects in this list must include their \code{call}.} \item{formula}{A survival formula. The left hand side is used to finde the status response variable in \code{data}. For right censored data, the right hand side of the formula is used to specify conditional censoring models. For example, set \code{Surv(time,status)~x1+x2} and \code{cens.model="cox"}. Then the weights are based on a Cox regression model for the censoring times with predictors x1 and x2. Note that the usual coding is assumed: \code{status=0} for censored times and that each variable name that appears in \code{formula} must be the column name in \code{data}. If there are no covariates, i.e. \code{formula=Surv(time,status)~1} the \code{cens.model} is coerced to \code{"marginal"} and the Kaplan-Meier estimator for the censoring times is used to calculate the weights. If \code{formula} is \code{missing}, try to extract a formula from the first element in object.} \item{data}{A data frame in which to validate the prediction models and to fit the censoring model. If \code{data} is missing, try to extract a data set from the first element in object.} \item{eval.times}{A vector of timepoints for evaluating the discriminative ability. At each timepoint the c-index is computed using only those pairs where one of the event times is known to be earlier than this timepoint. If \code{eval.times} is \code{missing} then the largest uncensored event time is used.} \item{pred.times}{A vector of timepoints for evaluating the prediction models. This should either be exactly one timepoint used for all \code{eval.times}, or be as long as \code{eval.times}, in which case the predicted order of risk for the jth entry of \code{eval.times} is based on the jth entry of \code{pred.times} corresponding} \item{cause}{For competing risks, the event of interest. Defaults to the first state of the response, which is obtained by evaluating the left hand side of \code{formula} in \code{data}.} \item{lyl}{If \code{TRUE} rank subjects accoring to predicted life-years-lost (See Andersen due to this cause instead of predicted risk.} \item{cens.model}{Method for estimating inverse probability of censoring weigths: \code{cox}: A semi-parametric Cox proportional hazard model is fitted to the censoring times \code{marginal}: The Kaplan-Meier estimator for the censoring times \code{nonpar}: Nonparametric extension of the Kaplan-Meier for the censoring times using symmetric nearest neighborhoods -- available for arbitrary many strata variables on the right hand side of argument \code{formula} but at most one continuous variable. See the documentation of the functions \code{prodlim} and \code{neighborhood} from the prodlim package. \code{aalen}: The nonparametric Aalen additive model fitted to the censoring times. Requires the timereg package maintained by Thomas Scheike.} \item{ipcw.refit}{If \code{TRUE} the inverse probability of censoring weigths are estimated separately in each training set during cross-validation.} \item{ipcw.args}{List of arguments passed to function specified by argument \code{cens.model}.} \item{ipcw.limit}{Value between 0 and 1 (but no equal to 0!) used to cut for small weights in order to stabilize the estimate at late times were few individuals are observed.} \item{tiedPredictionsIn}{If \code{FALSE} pairs with identical predictions are excluded, unless also the event times are identical and uncensored and \code{tiedMatchIn} is set to \code{TRUE}.} \item{tiedOutcomeIn}{If \code{TRUE} pairs with identical and uncensored event times are excluded, unless also the predictions are identical and \code{tiedMatchIn} is set to \code{TRUE}.} \item{tiedMatchIn}{If \code{TRUE} then pairs with identical predictions and identical and uncensored event times are counted as concordant pairs.} \item{splitMethod}{Defines the internal validation design: \code{none/noPlan}: Assess the models in the give \code{data}, usually either in the same data where they are fitted, or in independent test data. \code{BootCv}: Bootstrap cross validation. The prediction models are trained on \code{B} bootstrap samples, that are either drawn with replacement of the same size as the original data or without replacement from \code{data} of the size \code{M}. The models are assessed in the observations that are NOT in the bootstrap sample. \code{Boot632}: Linear combination of AppCindex and OutOfBagCindex using the constant weight .632.} \item{B}{Number of bootstrap samples. The default depends on argument \code{splitMethod}. When \code{splitMethod} in c("BootCv","Boot632") the default is 100. For \code{splitMethod="none"} \code{B} is the number of bootstrap simulations e.g. to obtain bootstrap confidence limits -- default is 0.} \item{M}{The size of the bootstrap samples for resampling without replacement. Ignored for resampling with replacement.} \item{model.args}{List of extra arguments that can be passed to the \code{predictSurvProb} methods. The list must have an entry for each entry in \code{object}.} \item{model.parms}{Experimental. List of with exactly one entry for each entry in \code{object}. Each entry names parts of the value of the fitted models that should be extracted and added to the value.} \item{keep.models}{Logical. If \code{TRUE} keep the models in object. Since fitted models can be large objects the default is \code{FALSE}.} \item{keep.residuals}{Experimental.} \item{keep.pvalues}{Experimental.} \item{keep.weights}{Experimental.} \item{keep.index}{Logical. If \code{FALSE} remove the bootstrap or cross-validation index from the output list which otherwise is included in the method part of the output list.} \item{keep.matrix}{Logical. If \code{TRUE} add all \code{B} prediction error curves from bootstrapping or cross-validation to the output.} \item{multiSplitTest}{Experimental.} \item{testTimes}{A vector of time points for testing differences between models in the time-point specific Brier scores.} \item{confInt}{Experimental.} \item{confLevel}{Experimental.} \item{verbose}{if \code{TRUE} report details of the progress, e.g. count the steps in cross-validation.} \item{savePath}{Place in your filesystem (directory) where training models fitted during cross-validation are saved. If \code{missing} training models are not saved.} \item{slaveseed}{Vector of seeds, as long as \code{B}, to be given to the slaves in parallel computing.} \item{na.action}{Passed immediately to model.frame. Defaults to na.fail. If set otherwise most prediction models will not work.} \item{...}{Not used.} } \value{ Estimates of the C-index. } \description{ In survival analysis, a pair of patients is called concordant if the risk of the event predicted by a model is lower for the patient who experiences the event at a later timepoint. The concordance probability (C-index) is the frequency of concordant pairs among all pairs of subjects. It can be used to measure and compare the discriminative power of a risk prediction models. The function provides an inverse of the probability of censoring weigthed estimate of the concordance probability to adjust for right censoring. Cross-validation based on bootstrap resampling or bootstrap subsampling can be applied to assess and compare the discriminative power of various regression modelling strategies on the same set of data. } \details{ Pairs with identical observed times, where one is uncensored and one is censored, are always considered usuable (independent of the value of \code{tiedOutcomeIn}), as it can be assumed that the event occurs at a later timepoint for the censored observation. For uncensored response the result equals the one obtained with the functions \code{rcorr.cens} and \code{rcorrcens} from the \code{Hmisc} package (see examples). } \examples{ # simulate data based on Weibull regression library(prodlim) set.seed(13) dat <- SimSurv(100) # fit three different Cox models and a random survival forest # note: low number of trees for the purpose of illustration library(survival) library(randomForestSRC) cox12 <- coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) cox1 <- coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE) cox2 <- coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) rsf1 <- rfsrc(Surv(time,status)~X1+X2,data=dat,ntree=15,forest=TRUE) # # compute the apparent estimate of the C-index at different time points # A1 <- pec::cindex(list("Cox X1"=cox1, "RSF"=rsf1), formula=Surv(time,status)~X1+X2, data=dat, eval.times=10) ApparrentCindex <- pec::cindex(list("Cox X1"=cox1, "Cox X2"=cox2, "Cox X1+X2"=cox12, "RSF"=rsf1), formula=Surv(time,status)~X1+X2, data=dat, eval.times=seq(1,15,1)) print(ApparrentCindex) plot(ApparrentCindex) # # compute the bootstrap-crossvalidation estimate of # the C-index at different time points # set.seed(142) bcvCindex <- pec::cindex(list("Cox X1"=cox1, "Cox X2"=cox2, "Cox X1+X2"=cox12, "RSF"=rsf1), formula=Surv(time,status)~X1+X2, data=dat, splitMethod="bootcv", B=5, eval.times=seq(1,15,1)) print(bcvCindex) plot(bcvCindex) # for uncensored data the results are the same # as those obtained with the function rcorr.cens from Hmisc set.seed(16) dat <- SimSurv(30) dat$staus=1 fit12 <- coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) fit1 <- coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE) fit2 <- coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) Cpec <- pec::cindex(list("Cox X1+X2"=fit12,"Cox X1"=fit1,"Cox X2"=fit2), formula=Surv(time,status)~1, data=dat) p1 <- predictSurvProb(fit1,newdata=dat,times=10) p2 <- predictSurvProb(fit2,newdata=dat,times=10) p12 <- predictSurvProb(fit12,newdata=dat,times=10) if (requireNamespace("Hmisc",quietly=TRUE)){ library(Hmisc) harrelC1 <- rcorr.cens(p1,with(dat,Surv(time,status))) harrelC2 <- rcorr.cens(p2,with(dat,Surv(time,status))) harrelC12 <- rcorr.cens(p12,with(dat,Surv(time,status))) harrelC1[["C Index"]]==Cpec$AppCindex[["Cox.X1"]] harrelC2[["C Index"]]==Cpec$AppCindex[["Cox.X2"]] harrelC12[["C Index"]]==Cpec$AppCindex[["Cox.X1.X2"]] } # # competing risks # library(riskRegression) library(prodlim) set.seed(30) dcr.learn <- SimCompRisk(30) dcr.val <- SimCompRisk(30) pec::cindex(CSC(Hist(time,event)~X1+X2,data=dcr.learn),data=dcr.val) fit <- CSC(Hist(time,event)~X1+X2,data=dcr.learn) cif <- predictRisk(fit,newdata=dcr.val,times=3,cause=1) pec::cindex(list(fit),data=dcr.val,times=3) } \references{ TA Gerds, MW Kattan, M Schumacher, and C Yu. Estimating a time-dependent concordance index for survival prediction models with covariate dependent censoring. Statistics in Medicine, Ahead of print:to appear, 2013. DOI = 10.1002/sim.5681 Wolbers, M and Koller, MT and Witteman, JCM and Gerds, TA (2013) Concordance for prognostic models with competing risks Research report 13/3. Department of Biostatistics, University of Copenhagen Andersen, PK (2012) A note on the decomposition of number of life years lost according to causes of death Research report 12/2. Department of Biostatistics, University of Copenhagen Paul Blanche, Michael W Kattan, and Thomas A Gerds. The c-index is not proper for the evaluation of-year predicted risks. Biostatistics, 20(2): 347--357, 2018. } \author{ Thomas A Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/simCost.Rd0000644000176200001440000000062613571203270013510 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simCost.R \name{simCost} \alias{simCost} \title{Simulate COST alike data} \usage{ simCost(N) } \arguments{ \item{N}{Sample size} } \value{ Data frame } \description{ Simulate data alike the data from the Copenhagen stroke study (COST) } \details{ This uses functionality of the lava package. } \author{ Thomas Alexander Gerds } pec/man/ipcw.Rd0000755000176200001440000000723213753010514013033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ipcw.R \name{ipcw} \alias{ipcw} \alias{ipcw.none} \alias{ipcw.marginal} \alias{ipcw.nonpar} \alias{ipcw.cox} \alias{ipcw.aalen} \title{Estimation of censoring probabilities} \usage{ ipcw( formula, data, method, args, times, subjectTimes, subjectTimesLag = 1, what ) } \arguments{ \item{formula}{A survival formula like, \code{Surv(time,status)~1}, where as usual status=0 means censored. The status variable is internally reversed for estimation of censoring rather than survival probabilities. Some of the available models (see argument \code{model}) will use predictors on the right hand side of the formula.} \item{data}{The data used for fitting the censoring model} \item{method}{Censoring model used for estimation of the (conditional) censoring distribution.} \item{args}{A list of arguments which is passed to method} \item{times}{For \code{what="IPCW.times"} a vector of times at which to compute the probabilities of not being censored.} \item{subjectTimes}{For \code{what="IPCW.subjectTimes"} a vector of individual times at which the probabilities of not being censored are computed.} \item{subjectTimesLag}{If equal to \code{1} then obtain \code{G(T_i-|X_i)}, if equal to \code{0} estimate the conditional censoring distribution at the subjectTimes, i.e. (\code{G(T_i|X_i)}).} \item{what}{Decide about what to do: If equal to \code{"IPCW.times"} then weights are estimated at given \code{times}. If equal to \code{"IPCW.subjectTimes"} then weights are estimated at individual \code{subjectTimes}. If missing then produce both.} } \value{ \item{times}{The times at which weights are estimated} \item{IPCW.times}{Estimated weights at \code{times}} \item{IPCW.subjectTimes}{Estimated weights at individual time values \code{subjectTimes}} \item{fit}{The fitted censoring model} \item{method}{The method for modelling the censoring distribution} \item{call}{The call} } \description{ This function is used internally by the function \code{pec} to obtain inverse of the probability of censoring weights. } \details{ Inverse of the probability of censoring weights (IPCW) usually refer to the probabilities of not being censored at certain time points. These probabilities are also the values of the conditional survival function of the censoring time given covariates. The function ipcw estimates the conditional survival function of the censoring times and derives the weights. IMPORTANT: the data set should be ordered, \code{order(time,-status)} in order to get the values \code{IPCW.subjectTimes} in the right order for some choices of \code{method}. } \examples{ library(prodlim) library(rms) dat=SimSurv(30) dat <- dat[order(dat$time),] # using the marginal Kaplan-Meier for the censoring times WKM=ipcw(Hist(time,status)~X2, data=dat, method="marginal", times=sort(unique(dat$time)), subjectTimes=dat$time) plot(WKM$fit) WKM$fit # using the Cox model for the censoring times given X2 library(survival) WCox=ipcw(Hist(time=time,event=status)~X2, data=dat, method="cox", times=sort(unique(dat$time)), subjectTimes=dat$time) WCox$fit plot(WKM$fit) lines(sort(unique(dat$time)), 1-WCox$IPCW.times[1,], type="l", col=2, lty=3, lwd=3) lines(sort(unique(dat$time)), 1-WCox$IPCW.times[5,], type="l", col=3, lty=3, lwd=3) # using the stratified Kaplan-Meier # for the censoring times given X2 WKM2=ipcw(Hist(time,status)~X2, data=dat, method="nonpar", times=sort(unique(dat$time)), subjectTimes=dat$time) plot(WKM2$fit,add=FALSE) } \seealso{ \code{\link{pec}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/plot.calibrationPlot.Rd0000644000176200001440000000072213571203270016167 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.calibrationPlot.R \name{plot.calibrationPlot} \alias{plot.calibrationPlot} \title{Plot objects obtained with \code{calPlot}} \usage{ \method{plot}{calibrationPlot}(x, ...) } \arguments{ \item{x}{Object obtained with \code{calPlot}} \item{...}{Not used.} } \value{ Nothing } \description{ Calibration plots } \seealso{ \code{calPlot} } \author{ Thomas A. Gerds } pec/man/coxboost.Rd0000644000176200001440000000275013571203270013727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictEventProb.coxboost.R \name{coxboost} \alias{coxboost} \title{Formula interface for function \code{CoxBoost} of package \code{CoxBoost}.} \usage{ coxboost(formula, data, cv = TRUE, cause = 1, penalty, ...) } \arguments{ \item{formula}{An event-history formula for competing risks of the form \code{Hist(time,status)~sex+age} where \code{status} defines competing events and right censored data. The code for right censored can be controlled with argument \code{cens.code}, see man page the function \code{\link{Hist}}.} \item{data}{A data.frame in which the variables of formula are defined.} \item{cv}{If \code{TRUE} perform cross-validation to optimize the parameter \code{stepno}. This calls the function \code{cv.CoxBoost} whose arguments are prefix controlled, that is \code{cv.K=7} sets the argument \code{K} of \code{cv.CoxBoost} to \code{7}. If \code{FALSE} use \code{stepno}.} \item{cause}{The cause of interest in competing risk models.} \item{penalty}{See \code{CoxBoost}.} \item{...}{Arguments passed to either \code{CoxBoost} via \code{CoxBoost.arg} or to \code{cv.CoxBoost} via \code{cv.CoxBoost.arg}.} } \value{ See \code{CoxBoost}. } \description{ Formula interface for function \code{CoxBoost} of package \code{CoxBoost}. } \details{ See \code{CoxBoost}. } \references{ See \code{CoxBoost}. } \seealso{ See \code{CoxBoost}. } \author{ Thomas Alexander Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/predictRestrictedMeanTime.Rd0000644000176200001440000001144114131004274017163 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictRestrictedMeanTime.R \name{predictRestrictedMeanTime} \alias{predictRestrictedMeanTime} \alias{predictRestrictedMeanTime.aalen} \alias{predictRestrictedMeanTime.riskRegression} \alias{predictRestrictedMeanTime.cox.aalen} \alias{predictRestrictedMeanTime.coxph} \alias{predictRestrictedMeanTime.cph} \alias{predictRestrictedMeanTime.default} \alias{predictRestrictedMeanTime.rfsrc} \alias{predictRestrictedMeanTime.matrix} \alias{predictRestrictedMeanTime.pecCtree} \alias{predictRestrictedMeanTime.prodlim} \alias{predictRestrictedMeanTime.psm} \alias{predictRestrictedMeanTime.selectCox} \alias{predictRestrictedMeanTime.survfit} \alias{predictRestrictedMeanTime.pecRpart} \title{Predicting restricted mean time} \usage{ \method{predictRestrictedMeanTime}{aalen}(object,newdata,times,...) \method{predictRestrictedMeanTime}{riskRegression}(object,newdata,times,...) \method{predictRestrictedMeanTime}{cox.aalen}(object,newdata,times,...) \method{predictRestrictedMeanTime}{cph}(object,newdata,times,...) \method{predictRestrictedMeanTime}{coxph}(object,newdata,times,...) \method{predictRestrictedMeanTime}{matrix}(object,newdata,times,...) \method{predictRestrictedMeanTime}{selectCox}(object,newdata,times,...) \method{predictRestrictedMeanTime}{prodlim}(object,newdata,times,...) \method{predictRestrictedMeanTime}{psm}(object,newdata,times,...) \method{predictRestrictedMeanTime}{survfit}(object,newdata,times,...) \method{predictRestrictedMeanTime}{pecRpart}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{pecCtree}(object,newdata,times,...) } \arguments{ \item{object}{A fitted model from which to extract predicted survival probabilities} \item{newdata}{A data frame containing predictor variable combinations for which to compute predicted survival probabilities.} \item{times}{A vector of times in the range of the response variable, e.g. times when the response is a survival object, at which to return the survival probabilities.} \item{\dots}{Additional arguments that are passed on to the current method.} } \value{ A matrix with as many rows as \code{NROW(newdata)} and as many columns as \code{length(times)}. Each entry should be a probability and in rows the values should be decreasing. } \description{ Function to extract predicted mean times from various modeling approaches. } \details{ The function predictRestrictedMeanTime is a generic function, meaning that it invokes a different function dependent on the 'class' of the first argument. See also \code{\link{predictSurvProb}}. } \note{ In order to assess the predictive performance of a new survival model a specific \code{predictRestrictedMeanTime} S3 method has to be written. For examples, see the bodies of the existing methods. The performance of the assessment procedure, in particular for resampling where the model is repeatedly evaluated, will be improved by supressing in the call to the model all the computations that are not needed for probability prediction. For example, \code{se.fit=FALSE} can be set in the call to \code{cph}. } \examples{ # generate some survival data library(prodlim) set.seed(100) d <- SimSurv(100) # then fit a Cox model library(rms) coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) # predicted survival probabilities can be extracted # at selected time-points: ttt <- quantile(d$time) # for selected predictor values: ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) # as follows predictRestrictedMeanTime(coxmodel,newdata=ndat,times=ttt) # stratified cox model sfit <- coxph(Surv(time,status)~strata(X1)+X2,data=d,x=TRUE,y=TRUE) predictRestrictedMeanTime(sfit,newdata=d[1:3,],times=c(1,3,5,10)) ## simulate some learning and some validation data learndat <- SimSurv(100) valdat <- SimSurv(100) ## use the learning data to fit a Cox model library(survival) fitCox <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) ## suppose we want to predict the survival probabilities for all patients ## in the validation data at the following time points: ## 0, 12, 24, 36, 48, 60 psurv <- predictRestrictedMeanTime(fitCox,newdata=valdat,times=seq(0,60,12)) ## This is a matrix with survival probabilities ## one column for each of the 5 time points ## one row for each validation set individual # the same can be done e.g. for a randomSurvivalForest model library(randomForestSRC) rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) predictRestrictedMeanTime(rsfmodel,newdata=ndat,times=ttt) } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \seealso{ \code{\link{predict}},\code{\link{survfit}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/reclass.Rd0000644000176200001440000000437413753010514013526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reclass.R \name{reclass} \alias{reclass} \title{Retrospective risk reclassification table} \usage{ reclass( object, reference, formula, data, time, cause, cuts = seq(0, 100, 25), digits = 2 ) } \arguments{ \item{object}{Either a list with two elements. Each element should either be a vector with probabilities, or an object for which \code{predictSurvProb} or \code{predictEventProb} can extract predicted risk based on data.} \item{reference}{Reference prediction model.} \item{formula}{A survival formula as obtained either with \code{prodlim::Hist} or \code{survival::Surv} which defines the response in the \code{data}.} \item{data}{Used to extract the response from the data and passed on to \code{predictEventProb} to extract predicted event probabilities.} \item{time}{Time interest for prediction.} \item{cause}{For competing risk models the cause of interest. Defaults to all available causes.} \item{cuts}{Risk quantiles to group risks.} \item{digits}{Number of digits to show for the predicted risks} } \value{ reclassification tables: overall table and one conditional table for each cause and for subjects event free at time interest. } \description{ Retrospective table of risks predicted by two different methods, models, algorithms } \details{ All risks are multiplied by 100 before } \examples{ \dontrun{ library(survival) set.seed(40) d <- prodlim::SimSurv(400) nd <- prodlim::SimSurv(400) Models <- list("Cox.X2"=coxph(Surv(time,status)~X2,data=d,x=TRUE,y=TRUE), "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=d,x=TRUE,y=TRUE)) rc <- reclass(Models,formula=Surv(time,status)~1,data=nd,time=5) print(rc) plot(rc) set.seed(40) library(riskRegression) library(prodlim) dcr <- prodlim::SimCompRisk(400) ndcr <- prodlim::SimCompRisk(400) crPred5 <- list("X2"=predictEventProb(CSC(Hist(time,event)~X2,data=dcr),newdata=ndcr,times=5), "X1+X2"=predictEventProb(CSC(Hist(time,event)~X1+X2,data=dcr),newdata=ndcr,times=5)) rc <- reclass(crPred5,Hist(time,event)~1,data=ndcr,time=3) print(rc) reclass(crPred5,Hist(time,event)~1,data=ndcr,time=5,cuts=100*c(0,0.05,0.1,0.2,1)) } } \seealso{ predictStatusProb } \author{ Thomas A. Gerds } pec/man/plotPredictSurvProb.Rd0000644000176200001440000000664714131004266016071 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotPredictSurvProb.R \name{plotPredictSurvProb} \alias{plotPredictSurvProb} \title{Plotting predicted survival curves.} \usage{ plotPredictSurvProb( x, newdata, times, xlim, ylim, xlab, ylab, axes = TRUE, col, density, lty, lwd, add = FALSE, legend = TRUE, percent = FALSE, ... ) } \arguments{ \item{x}{A survival prediction model including \code{call} and \code{formula} object.} \item{newdata}{A data frame with the same variable names as those that were used to fit the model \code{x}.} \item{times}{Vector of times at which to return the estimated probabilities.} \item{xlim}{Plotting range on the x-axis.} \item{ylim}{Plotting range on the y-axis.} \item{xlab}{Label given to the x-axis.} \item{ylab}{Label given to the y-axis.} \item{axes}{Logical. If \code{FALSE} no axes are drawn.} \item{col}{Vector of colors given to the survival curve.} \item{density}{Densitiy of the color -- useful for showing many (overlapping) curves.} \item{lty}{Vector of lty's given to the survival curve.} \item{lwd}{Vector of lwd's given to the survival curve.} \item{add}{Logical. If \code{TRUE} only lines are added to an existing device} \item{legend}{Logical. If TRUE a legend is plotted by calling the function legend. Optional arguments of the function \code{legend} can be given in the form \code{legend.x=val} where x is the name of the argument and val the desired value. See also Details.} \item{percent}{Logical. If \code{TRUE} the y-axis is labeled in percent.} \item{\dots}{Parameters that are filtered by \code{\link{SmartControl}} and then passed to the functions: \code{\link{plot}}, \code{\link{axis}}, \code{\link{legend}}.} } \value{ The (invisible) object. } \description{ Ploting prediction survival curves for one prediction model using \code{predictSurvProb} . } \details{ Arguments for the invoked functions \code{legend} and \code{axis} are simply specified as \code{legend.lty=2}. The specification is not case sensitive, thus \code{Legend.lty=2} or \code{LEGEND.lty=2} will have the same effect. The function \code{axis} is called twice, and arguments of the form \code{axis1.labels}, \code{axis1.at} are used for the time axis whereas \code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis. These arguments are processed via \code{\dots{}} of \code{plotPredictSurvProb} and inside by using the function \code{SmartControl}. } \examples{ # generate some survival data library(prodlim) d <- SimSurv(100) # then fit a Cox model library(rms) coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) # plot predicted survival probabilities for all time points ttt <- sort(unique(d$time)) # and for selected predictor values: ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) plotPredictSurvProb(coxmodel,newdata=ndat,times=ttt) # the same can be done e.g. for a randomSurvivalForest model library(randomForestSRC) rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) plotPredictSurvProb(rsfmodel,newdata=ndat,times=ttt) } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \seealso{ \code{\link{predictSurvProb}}\code{\link{prodlim}} } \author{ Ulla B. Mogensen \email{ulmo@biostat.ku.dk}, Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/cost.Rd0000644000176200001440000000403613753010514013035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pec-package.R \docType{data} \name{cost} \alias{cost} \title{Copenhagen Stroke Study} \format{ This data frame contains the observations of 518 stroke patients : \describe{ \item{age}{Age of the patients in years.} \item{sex}{A factor with two levels \code{female} and \code{male}.} \item{hypTen}{Hypertension, a factor with two levels \code{no} and \code{yes}.} \item{ihd}{History of ischemic heart disease at admission, a factor with two levels \code{no} and \code{yes}.} \item{prevStroke}{History of previous strokes before admission, a factor with two levels \code{no} and \code{yes}.} \item{othDisease}{History of other disabling diseases (e.g. severe dementia), a factor with two levels \code{no} and \code{yes}.} \item{alcohol}{Daily alcohol consumption, a factor with two levels \code{no} and \code{yes}.} \item{diabetes}{Diabetes mellitus status indicating if the glucose level was higher than 11 mmol/L, a factor with two levels \code{no} and \code{yes}.} \item{smoke}{Daily smoking status, a factor with two levels \code{no} and \code{yes}.} \item{atrialFib}{Atrial fibrillation, a factor with two levels \code{no} and \code{yes}.} \item{hemor}{Hemorrhage (stroke subtype), a factor with two levels \code{no} (infarction) and \code{yes} (hemorrhage).} \item{strokeScore}{Scandinavian stroke score at admission to the hospital. Ranges from 0 (worst) to 58 (best).} \item{cholest}{Cholesterol level} \item{time}{Survival time (in days).} \item{status}{Status (\code{0}: censored, \code{1}: event).} } } \description{ This data set contains a subset of the data from the Copenhagen stroke study. } \references{ Joergensen HS, Nakayama H, Reith J, Raaschou HO, and Olsen TS. Acute stroke with atrial fibrillation. The Copenhagen Stroke Study. Stroke, 27(10):1765-9, 1996. Mogensen UB, Ishwaran H, and Gerds TA. Evaluating random forests for survival analysis using prediction error curves. Technical Report 8, University of Copenhagen, Department of Biostatistics, 2010. } \keyword{datasets} pec/man/pecRpart.Rd0000644000176200001440000000136013753471026013653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictSurvProb.R \name{pecRpart} \alias{pecRpart} \title{Predict survival based on rpart tree object} \usage{ pecRpart(formula, data, ...) } \arguments{ \item{formula}{passed to rpart} \item{data}{passed to rpart} \item{...}{passed to rpart} } \value{ list with three elements: ctree and call } \description{ Combines the rpart result with a stratified Kaplan-Meier (prodlim) to predict survival } \examples{ library(prodlim) if (!requireNamespace("rpart",quietly=TRUE)){ library(rpart) library(survival) set.seed(50) d <- SimSurv(50) nd <- data.frame(X1=c(0,1,0),X2=c(-1,0,1)) f <- pecRpart(Surv(time,status)~X1+X2,data=d) predictSurvProb(f,newdata=nd,times=c(3,8)) } } pec/man/resolvesplitMethod.Rd0000644000176200001440000000330313571203270015756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/resolveSplitMethod.R \name{resolvesplitMethod} \alias{resolvesplitMethod} \title{Resolve the splitMethod for estimation of prediction performance} \usage{ resolvesplitMethod(splitMethod, B, N, M) } \arguments{ \item{splitMethod}{String that determines the splitMethod to use. Available splitMethods are none/noPlan (no splitting), bootcv or outofbag (bootstrap cross-validation), cvK (K-fold cross-validation, e.g. cv10 gives 10-fold), boot632, boot632plus or boot632+, loocv (leave-one-out)} \item{B}{The number of repetitions.} \item{N}{The sample size} \item{M}{For subsampling bootstrap the size of the subsample. Note M } pec/man/Pbc3.Rd0000644000176200001440000000422113753010514012650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pec-package.R \docType{data} \name{Pbc3} \alias{Pbc3} \title{Pbc3 data} \format{ A data frame with 349 observations on the following 15 variables. \describe{ \item{ptno}{patient identification} \item{unit}{hospital (1: Hvidovre, 2: London, 3: Copenhagen, 4: Barcelona, 5: Munich, 6: Lyon)} \item{tment}{treatment (0: placebo, 1: CyA)} \item{sex}{(1: males, 0: females)} \item{age}{age in years} \item{stage}{histological stage (1, 2, 3, 4)} \item{gibleed}{previous gastrointestinal bleeding (1: yes, 0: no)} \item{crea}{creatinine (micromoles/L)} \item{alb}{albumin (g/L)} \item{bili}{bilirubin (micromoles/L)} \item{alkph}{alkaline phosphatase (IU/L)} \item{asptr}{aspartate transaminase (IU/L)} \item{weight}{body weight (kg)} \item{days}{observation time (days)} \item{status}{status at observation time (0: censored, 1: liver transplantation, 2 : dead)} } } \source{ Andersen and Skovgaard. Regression with linear predictors. } \description{ PBC3 was a multi-centre randomized clinical trial conducted in six European hospitals. Between 1 Jan. 1983 and 1 Jan. 1987, 349 patients with the liver disease primary biliary cirrhosis (PBC) were randomized to either treatment with Cyclosporin A (CyA, 176 patients) or placebo (173 patients). The purpose of the trial was to study the effect of treatment on the survival time. However, during the course of the trial an increased use of liver transplantation for patients with this disease made the investigators redefine the main response variable to be time to ``failure of medical treatment'' defined as either death or liver transplantation. Patients were then followed from randomization until treatment failure, drop-out or 1 Jan, 1989; 61 patients died (CyA: 30, placebo: 31), another 29 were transplanted (CyA: 14, placebo: 15) and 4 patients were lost to follow-up before 1 Jan. 1989. At entry a number of clinical, biochemical and histological variables, including serum bilirubin, serum albumin, sex, age were recorded. } \examples{ data(Pbc3) } \references{ Andersen and Skovgaard. Regression with linear predictors. Springer, 2010. } \keyword{datasets} pec/man/R2.Rd0000644000176200001440000000433413571203270012352 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/R2.R \name{R2} \alias{R2} \title{Explained variation for survival models} \usage{ R2(object, models, what, times, reference = 1) } \arguments{ \item{object}{An object with estimated prediction error curves obtained with the function \link{pec}} \item{models}{For which of the models in \code{object$models} should we compute $R^2(t). By default all models are used except for the reference model.} \item{what}{The name of the entry in \code{x} to be used. Defauls to \code{PredErr} Other choices are \code{AppErr}, \code{BootCvErr}, \code{Boot632}, \code{Boot632plus}.} \item{times}{Time points at which the summaries are shown.} \item{reference}{Position of the model whose prediction error is used as the reference in the denominator when constructing $R^2$} } \value{ A matrix where the first column holds the times and the following columns are the corresponding $R^2$ values for the requested prediction models. } \description{ This function computes a time-dependent $R^2$ like measure of the variation explained by a survival prediction model, by dividing the mean squared error (Brier score) of the model by the mean squared error (Brier score) of a reference model which ignores all the covariates. } \details{ In survival analysis the prediction error of the Kaplan-Meier estimator plays a similar role as the total sum of squares in linear regression. Hence, it is a sensible reference model for $R^2$. } \examples{ set.seed(18713) library(prodlim) library(survival) dat=SimSurv(100) nullmodel=prodlim(Hist(time,status)~1,data=dat) pmodel1=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) pmodel2=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) perror=pec(list(Cox1=pmodel1,Cox2=pmodel2),Hist(time,status)~1,data=dat,reference=TRUE) R2(perror,times=seq(0,1,.1),reference=1) } \references{ E. Graf et al. (1999), Assessment and comparison of prognostic classification schemes for survival data. Statistics in Medicine, vol 18, pp= 2529--2545. Gerds TA, Cai T & Schumacher M (2008) The performance of risk prediction models Biometrical Journal, 50(4), 457--479 } \seealso{ \code{\link{pec}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/predictLifeYearsLost.Rd0000644000176200001440000000407113571203270016165 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictLifeYearsLost.R \name{predictLifeYearsLost} \alias{predictLifeYearsLost} \alias{predictLifeYearsLost.CauseSpecificCox} \alias{predictLifeYearsLost.riskRegression} \alias{predictLifeYearsLost.FGR} \alias{predictLifeYearsLost.prodlim} \alias{predictLifeYearsLost.rfsrc} \title{Predicting life years lost (cumulative cumulative incidences) in competing risk models.} \usage{ predictLifeYearsLost(object, newdata, times, cause, ...) } \arguments{ \item{object}{A fitted model from which to extract predicted event probabilities} \item{newdata}{A data frame containing predictor variable combinations for which to compute predicted event probabilities.} \item{times}{A vector of times in the range of the response variable, for which the cumulative incidences event probabilities are computed.} \item{cause}{Identifies the cause of interest among the competing events.} \item{\dots}{Additional arguments that are passed on to the current method.} } \value{ A matrix with as many rows as \code{NROW(newdata)} and as many columns as \code{length(times)}. Each entry should be a positive value and in rows the values should be increasing. } \description{ Function to extract predicted life years lost from various modeling approaches. The most prominent one is the combination of cause-specific Cox regression models which can be fitted with the function \code{cumincCox} from the package \code{compRisk}. } \details{ The function predictLifeYearsLost is a generic function that means it invokes specifically designed functions depending on the 'class' of the first argument. See \code{\link{predictSurvProb}}. } \examples{ library(pec) library(riskRegression) library(survival) library(prodlim) train <- SimCompRisk(100) test <- SimCompRisk(10) fit <- CSC(Hist(time,cause)~X1+X2,data=train,cause=1) predictLifeYearsLost(fit,newdata=test,times=seq(1:10),cv=FALSE,cause=1) } \seealso{ \code{\link{predictSurvProb}}, \code{\link{predictEventProb}}. } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/print.pec.Rd0000755000176200001440000000167513571203270014001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.pec.R \name{print.pec} \alias{print.pec} \alias{summary.pec} \title{Printing a `pec' (prediction error curve) object.} \usage{ \method{print}{pec}(x, times, digits = 3, what = NULL, ...) } \arguments{ \item{x}{Object of class \code{pec}} \item{times}{Time points at which to show the values of the prediction error curve(s)} \item{digits}{Number of decimals used in tables.} \item{what}{What estimate of the prediction error curve to show. Should be a string matching an element of x. The default is determined by splitMethod.} \item{\dots}{Not used} \item{print}{Set to FALSE to suppress printing.} } \value{ The first argument in the invisible cloak. } \description{ Print the important arguments of call and the prediction error values at selected time points. } \seealso{ \code{\link{pec}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/plot.pec.Rd0000644000176200001440000001227513753010514013615 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.pec.R \name{plot.pec} \alias{plot.pec} \title{Plotting prediction error curves} \usage{ \method{plot}{pec}( x, what, models, xlim = c(x$start, x$minmaxtime), ylim = c(0, 0.3), xlab = "Time", ylab, axes = TRUE, col, lty, lwd, type, smooth = FALSE, add.refline = FALSE, add = FALSE, legend = ifelse(add, FALSE, TRUE), special = FALSE, ... ) } \arguments{ \item{x}{Object of class \code{pec} obtained with function \code{\link{pec}}.} \item{what}{The name of the entry in \code{x}. Defauls to \code{PredErr} Other choices are \code{AppErr}, \code{BootCvErr}, \code{Boot632}, \code{Boot632plus}.} \item{models}{Specifies models in \code{x$models} for which the prediction error curves are drawn. Defaults to all models.} \item{xlim}{Plotting range on the x-axis.} \item{ylim}{Plotting range on the y-axis.} \item{xlab}{Label given to the x-axis.} \item{ylab}{Label given to the y-axis.} \item{axes}{Logical. If \code{FALSE} no axes are drawn.} \item{col}{Vector of colors given to the curves of \code{models} in the order determined by \code{models}.} \item{lty}{Vector of lty's given to the curves of \code{models} in the order determined by \code{models}.} \item{lwd}{Vector of lwd's given to the curves of \code{models} in the order determined by \code{models}.} \item{type}{Plotting type: either \code{"l"} or \code{"s"}, see \code{lines}.} \item{smooth}{Logical. If \code{TRUE} the plotting type for lines is \code{'l'} else \code{'s'}.} \item{add.refline}{Logical. If \code{TRUE} a dotted horizontal line is drawn as a symbol for the naive rule that predicts probability .5 at all cutpoints (i.e. time points in survival analysis).} \item{add}{Logical. If \code{TRUE} only lines are added to an existing device} \item{legend}{if TRUE a legend is plotted by calling the function legend. Optional arguments of the function \code{legend} can be given in the form \code{legend.x=val} where x is the name of the argument and val the desired value. See also Details.} \item{special}{Logical. If \code{TRUE} the bootstrap curves of \code{models} are plotted together with \code{predErr} of \code{models} by invoking the function \code{Special}. Optional arguments of the function \code{Special} can be given in the form \code{special.x=val} as with legend. See also Details.} \item{\dots}{Extra arguments that are passed to \code{\link{plot}}.} } \value{ The (invisible) object. } \description{ Plotting prediction error curves for one or more prediction models. } \details{ From version 2.0.1 on the arguments legend.text, legend.args, lines.type, lwd.lines, specials are obsolete and only available for backward compatibility. Instead arguments for the invoked functions \code{legend}, \code{axis}, \code{Special} are simply specified as \code{legend.lty=2}. The specification is not case sensitive, thus \code{Legend.lty=2} or \code{LEGEND.lty=2} will have the same effect. The function \code{axis} is called twice, and arguments of the form \code{axis1.labels}, \code{axis1.at} are used for the time axis whereas \code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis. These arguments are processed via \code{\dots{}} of \code{plot.pec} and inside by using the function \code{resolveSmartArgs}. Documentation of these arguments can be found in the help pages of the corresponding functions. } \examples{ # simulate data # with a survival response and two predictors library(prodlim) library(survival) set.seed(280180) dat <- SimSurv(100) # fit some candidate Cox models and # compute the Kaplan-Meier estimate Models <- list("Kaplan.Meier"=survfit(Surv(time,status)~1,data=dat), "Cox.X1"=coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE), "Cox.X2"=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE), "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE)) Models <- list("Cox.X1"=coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE), "Cox.X2"=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE), "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE)) # compute the .632+ estimate of the generalization error set.seed(17100) PredError.632plus <- pec(object=Models, formula=Surv(time,status)~X1+X2, data=dat, exact=TRUE, cens.model="marginal", splitMethod="boot632plus", B=5, keep.matrix=TRUE, verbose=TRUE) # plot the .632+ estimates of the generalization error plot(PredError.632plus,xlim=c(0,30)) # plot the bootstrapped curves, .632+ estimates of the generalization error # and Apparent error for the Cox model 'Cox.X1' with the 'Cox.X2' model # as benchmark plot(PredError.632plus, xlim=c(0,30), models="Cox.X1", special=TRUE, special.bench="Cox.X2", special.benchcol=2, special.addprederr="AppErr") } \seealso{ \code{\link{pec}}\code{\link{summary.pec}}\code{\link{Special}}\code{\link{prodlim}} } \author{ Ulla B. Mogensen \email{ulmo@biostat.ku.dk}, Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/pec.Rd0000755000176200001440000004552714131004275012647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pec.R \name{pec} \alias{pec} \title{Prediction error curves} \usage{ pec( object, formula, data, traindata, times, cause, start, maxtime, exact = TRUE, exactness = 100, fillChar = NA, cens.model = "cox", ipcw.refit = FALSE, ipcw.args = NULL, splitMethod = "none", B, M, reference = TRUE, model.args = NULL, model.parms = NULL, keep.index = FALSE, keep.matrix = FALSE, keep.models = FALSE, keep.residuals = FALSE, keep.pvalues = FALSE, noinf.permute = FALSE, multiSplitTest = FALSE, testIBS, testTimes, confInt = FALSE, confLevel = 0.95, verbose = TRUE, savePath = NULL, slaveseed = NULL, na.action = na.fail, ... ) } \arguments{ \item{object}{A named list of prediction models, where allowed entries are (1) R-objects for which a \link{predictSurvProb} method exists (see details), (2) a \code{call} that evaluates to such an R-object (see examples), (3) a matrix with predicted probabilities having as many rows as \code{data} and as many columns as \code{times}. For cross-validation all objects in this list must include their \code{call}.} \item{formula}{A survival formula as obtained either with \code{prodlim::Hist} or \code{survival::Surv}. The left hand side is used to find the status response variable in \code{data}. For right censored data, the right hand side of the formula is used to specify conditional censoring models. For example, set \code{Surv(time,status)~x1+x2} and \code{cens.model="cox"}. Then the weights are based on a Cox regression model for the censoring times with predictors x1 and x2. Note that the usual coding is assumed: \code{status=0} for censored times and that each variable name that appears in \code{formula} must be the column name in \code{data}. If there are no covariates, i.e. \code{formula=Surv(time,status)~1} the \code{cens.model} is coerced to \code{"marginal"} and the Kaplan-Meier estimator for the censoring times is used to calculate the weights. If \code{formula} is \code{missing}, try to extract a formula from the first element in object.} \item{data}{A data frame in which to validate the prediction models and to fit the censoring model. If \code{data} is missing, try to extract a data set from the first element in object.} \item{traindata}{A data frame in which the models are trained. This argument is used only in the absence of crossvalidation, in which case it is passed to the predictHandler function predictSurvProb} \item{times}{A vector of time points. At each time point the prediction error curves are estimated. If \code{exact==TRUE} the \code{times} are merged with all the unique values of the response variable. If \code{times} is missing and \code{exact==TRUE} all the unique values of the response variable are used. If missing and \code{exact==FALSE} use a equidistant grid of values between \code{start} and \code{maxtime}. The distance is determined by \code{exactness}.} \item{cause}{For competing risks, the event of interest. Defaults to the first state of the response, which is obtained by evaluating the left hand side of \code{formula} in \code{data}.} \item{start}{Minimal time for estimating the prediction error curves. If missing and \code{formula} defines a \code{Surv} or \code{Hist} object then \code{start} defaults to \code{0}, otherwise to the smallest observed value of the response variable. \code{start} is ignored if \code{times} are given.} \item{maxtime}{Maximal time for estimating the prediction error curves. If missing the largest value of the response variable is used.} \item{exact}{Logical. If \code{TRUE} estimate the prediction error curves at all the unique values of the response variable. If \code{times} are given and \code{exact=TRUE} then the \code{times} are merged with the unique values of the response variable.} \item{exactness}{An integer that determines how many equidistant gridpoints are used between \code{start} and \code{maxtime}. The default is 100.} \item{fillChar}{Symbol used to fill-in places where the values of the prediction error curves are not available. The default is \code{NA}.} \item{cens.model}{Method for estimating inverse probability of censoring weigths: \code{cox}: A semi-parametric Cox proportional hazard model is fitted to the censoring times \code{marginal}: The Kaplan-Meier estimator for the censoring times \code{nonpar}: Nonparametric extension of the Kaplan-Meier for the censoring times using symmetric nearest neighborhoods -- available for arbitrary many strata variables on the right hand side of argument \code{formula} but at most one continuous variable. See the documentation of the functions \code{prodlim} and \code{neighborhood} from the prodlim package. \code{aalen}: The nonparametric Aalen additive model fitted to the censoring times. Requires the \code{timereg} package.} \item{ipcw.refit}{If \code{TRUE} the inverse probability of censoring weigths are estimated separately in each training set during cross-validation.} \item{ipcw.args}{List of arguments passed to function specified by argument \code{cens.model}.} \item{splitMethod}{SplitMethod for estimating the prediction error curves. \code{none/noPlan}: Assess the models in the same data where they are fitted. \code{boot}: DEPRECIATED. \code{cvK}: K-fold cross-validation, i.e. \code{cv10} for 10-fold cross-validation. After splitting the data in K subsets, the prediction models (ie those specified in \code{object}) are evaluated on the data omitting the Kth subset (training step). The prediction error is estimated with the Kth subset (validation step). The random splitting is repeated \code{B} times and the estimated prediction error curves are obtained by averaging. \code{BootCv}: Bootstrap cross validation. The prediction models are trained on \code{B} bootstrap samples, that are either drawn with replacement of the same size as the original data or without replacement from \code{data} of the size \code{M}. The models are assessed in the observations that are NOT in the bootstrap sample. \code{Boot632}: Linear combination of AppErr and BootCvErr using the constant weight .632. \code{Boot632plus}: Linear combination of AppErr and BootCv using weights dependent on how the models perform in permuted data. \code{loocv}: Leave one out cross-validation. \code{NoInf}: Assess the models in permuted data.} \item{B}{Number of bootstrap samples. The default depends on argument \code{splitMethod}. When \code{splitMethod} in c("BootCv","Boot632","Boot632plus") the default is 100. For \code{splitMethod="cvK"} \code{B} is the number of cross-validation cycles, and -- default is 1. For \code{splitMethod="none"} \code{B} is the number of bootstrap simulations e.g. to obtain bootstrap confidence limits -- default is 0.} \item{M}{The size of the bootstrap samples for resampling without replacement. Ignored for resampling with replacement.} \item{reference}{Logical. If \code{TRUE} add the marginal Kaplan-Meier prediction model as a reference to the list of models.} \item{model.args}{List of extra arguments that can be passed to the \code{predictSurvProb} methods. The list must have an entry for each entry in \code{object}.} \item{model.parms}{Experimental. List of with exactly one entry for each entry in \code{object}. Each entry names parts of the value of the fitted models that should be extracted and added to the value.} \item{keep.index}{Logical. If \code{FALSE} remove the bootstrap or cross-validation index from the output list which otherwise is included in the splitMethod part of the output list.} \item{keep.matrix}{Logical. If \code{TRUE} add all \code{B} prediction error curves from bootstrapping or cross-validation to the output.} \item{keep.models}{Logical. If \code{TRUE} keep the models in object. Since fitted models can be large objects the default is \code{FALSE}.} \item{keep.residuals}{Logical. If \code{TRUE} keep the patient individual residuals at \code{testTimes}.} \item{keep.pvalues}{For \code{multiSplitTest}. If \code{TRUE} keep the pvalues from the single splits.} \item{noinf.permute}{If \code{TRUE} the noinformation error is approximated using permutation.} \item{multiSplitTest}{If \code{TRUE} the test proposed by van de Wiel et al. (2009) is applied. Requires subsampling bootstrap cross-validation, i.e. that \code{splitMethod} equals \code{bootcv} and that \code{M} is specified.} \item{testIBS}{A range of time points for testing differences between models in the integrated Brier scores.} \item{testTimes}{A vector of time points for testing differences between models in the time-point specific Brier scores.} \item{confInt}{Experimental.} \item{confLevel}{Experimental.} \item{verbose}{if \code{TRUE} report details of the progress, e.g. count the steps in cross-validation.} \item{savePath}{Place in your file system (i.e., a directory on your computer) where training models fitted during cross-validation are saved. If \code{missing} training models are not saved.} \item{slaveseed}{Vector of seeds, as long as \code{B}, to be given to the slaves in parallel computing.} \item{na.action}{Passed immediately to model.frame. Defaults to na.fail. If set otherwise most prediction models will not work.} \item{...}{Not used.} } \value{ A \code{pec} object. See also the help pages of the corresponding \code{print}, \code{summary}, and \code{plot} methods. The object includes the following components: \item{PredErr}{ The estimated prediction error according to the \code{splitMethod}. A matrix where each column represents the estimated prediction error of a fit at the time points in time. } \item{AppErr}{ The training error or apparent error obtained when the model(s) are evaluated in the same data where they were trained. Only if \code{splitMethod} is one of "NoInf", "cvK", "BootCv", "Boot632" or "Boot632plus". } \item{BootCvErr}{ The prediction error when the model(s) are trained in the bootstrap sample and evaluated in the data that are not in the bootstrap sample. Only if \code{splitMethod} is one of "Boot632" or "Boot632plus". When \code{splitMethod="BootCv"} then the \code{BootCvErr} is stored in the component \code{PredErr}. } \item{NoInfErr}{ The prediction error when the model(s) are evaluated in the permuted data. Only if \code{splitMethod} is one of "BootCv", "Boot632", or "Boot632plus". For \code{splitMethod="NoInf"} the \code{NoInfErr} is stored in the component \code{PredErr}. } \item{weight}{ The weight used to linear combine the \code{AppErr} and the \code{BootCvErr} Only if \code{splitMethod} is one of "Boot632", or "Boot632plus". } \item{overfit}{ Estimated \code{overfit} of the model(s). See Efron \& Tibshirani (1997, Journal of the American Statistical Association) and Gerds \& Schumacher (2007, Biometrics). Only if \code{splitMethod} is one of "Boot632", or "Boot632plus". } \item{call}{The call that produced the object} \item{time}{The time points at which the prediction error curves change.} \item{ipcw.fit}{The fitted censoring model that was used for re-weighting the Brier score residuals. See Gerds \& Schumacher (2006, Biometrical Journal)} \item{n.risk}{The number of subjects at risk for all time points.} \item{models}{The prediction models fitted in their own data.} \item{cens.model}{The censoring models.} \item{maxtime}{The latest timepoint where the prediction error curves are estimated.} \item{start}{The earliest timepoint where the prediction error curves are estimated.} \item{exact}{\code{TRUE} if the prediction error curves are estimated at all unique values of the response in the full data.} \item{splitMethod}{The splitMethod used for estimation of the overfitting bias.} } \description{ Evaluating the performance of risk prediction models in survival analysis. The Brier score is a weighted average of the squared distances between the observed survival status and the predicted survival probability of a model. Roughly the weights correspond to the probabilities of not being censored. The weights can be estimated depend on covariates. Prediction error curves are obtained when the Brier score is followed over time. Cross-validation based on bootstrap resampling or bootstrap subsampling can be applied to assess and compare the predictive power of various regression modelling strategies on the same set of data. } \details{ Note that package riskRegression provides very similar functionality (and much more) but not yet every feature of pec. Missing data in the response or in the input matrix cause a failure. The status of the continuous response variable at cutpoints (\code{times}), ie status=1 if the response value exceeds the cutpoint and status=0 otherwise, is compared to predicted event status probabilities which are provided by the prediction models on the basis of covariates. The comparison is done with the Brier score: the quadratic difference between 0-1 response status and predicted probability. There are two different sources for bias when estimating prediction error in right censored survival problems: censoring and high flexibility of the prediction model. The first is controlled by inverse probability of censoring weighting, the second can be controlled by special Monte Carlo simulation. In each step, the resampling procedures reevaluate the prediction model. Technically this is done by replacing the argument \code{object$call$data} by the current subset or bootstrap sample of the full data. For each prediction model there must be a \code{predictSurvProb} method: for example, to assess a prediction model which evaluates to a \code{myclass} object one defines a function called \code{predictSurvProb.myclass} with arguments \code{object,newdata,cutpoints,...} Such a function takes the object and derives a matrix with predicted event status probabilities for each subject in newdata (rows) and each cutpoint (column) of the response variable that defines an event status. Currently, \code{predictSurvProb} methods are available for the following R-objects: \describe{ \item{}{\code{matrix}} \item{}{\code{aalen}, \code{cox.aalen} from \code{library(timereg)}} \item{}{\code{mfp} from \code{library(mfp)}} \item{}{\code{phnnet}, \code{survnnet} from \code{library(survnnet)}} \item{}{\code{rpart} (from \code{library(rpart)})} \item{}{\code{coxph}, \code{survfit} from \code{library(survival)}} \item{}{\code{cph}, \code{psm} from \code{library(rms)}} \item{}{\code{prodlim} from \code{library(prodlim)}} \item{}{\code{glm} from \code{library(stats)}} } } \examples{ # simulate an artificial data frame # with survival response and two predictors set.seed(130971) library(prodlim) library(survival) dat <- SimSurv(100) # fit some candidate Cox models and compute the Kaplan-Meier estimate Models <- list("Cox.X1"=coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE), "Cox.X2"=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE), "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE)) # compute the apparent prediction error PredError <- pec(object=Models, formula=Surv(time,status)~X1+X2, data=dat, exact=TRUE, cens.model="marginal", splitMethod="none", B=0, verbose=TRUE) print(PredError,times=seq(5,30,5)) summary(PredError) plot(PredError,xlim=c(0,30)) # Comparison of Weibull model and Cox model library(survival) library(rms) library(pec) data(pbc) pbc <- pbc[sample(1:NROW(pbc),size=100),] f1 <- psm(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc) f2 <- coxph(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc,x=TRUE,y=TRUE) f3 <- cph(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc,surv=TRUE) brier <- pec(list("Weibull"=f1,"CoxPH"=f2,"CPH"=f3),data=pbc,formula=Surv(time,status!=0)~1) print(brier) plot(brier) # compute the .632+ estimate of the generalization error set.seed(130971) library(prodlim) library(survival) dat <- SimSurv(100) set.seed(17100) PredError.632plus <- pec(object=Models, formula=Surv(time,status)~X1+X2, data=dat, exact=TRUE, cens.model="marginal", splitMethod="Boot632plus", B=3, verbose=TRUE) print(PredError.632plus,times=seq(4,12,4)) summary(PredError.632plus) plot(PredError.632plus,xlim=c(0,30)) # do the same again but now in parallel \dontrun{set.seed(17100) # library(doMC) # registerDoMC() PredError.632plus <- pec(object=Models, formula=Surv(time,status)~X1+X2, data=dat, exact=TRUE, cens.model="marginal", splitMethod="Boot632plus", B=3, verbose=TRUE) } # assessing parametric survival models in learn/validation setting learndat <- SimSurv(50) testdat <- SimSurv(30) library(rms) f1 <- psm(Surv(time,status)~X1+X2,data=learndat) f2 <- psm(Surv(time,status)~X1,data=learndat) pf <- pec(list(f1,f2),formula=Surv(time,status)~1,data=testdat,maxtime=200) plot(pf) summary(pf) # ---------------- competing risks ----------------- library(survival) library(riskRegression) if(requireNamespace("cmprsk",quietly=TRUE)){ library(cmprsk) library(pec) cdat <- SimCompRisk(100) f1 <- CSC(Hist(time,event)~X1+X2,cause=2,data=cdat) f2 <- CSC(Hist(time,event)~X1,data=cdat,cause=2) f3 <- FGR(Hist(time,event)~X1+X2,cause=2,data=cdat) f4 <- FGR(Hist(time,event)~X1+X2,cause=2,data=cdat) p1 <- pec(list(f1,f2,f3,f4),formula=Hist(time,event)~1,data=cdat,cause=2) } } \references{ Gerds TA, Kattan MW. Medical Risk Prediction Models: With Ties to Machine Learning. Chapman & Hall/CRC https://www.routledge.com/9781138384477 Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 E. Graf et al. (1999), Assessment and comparison of prognostic classification schemes for survival data. Statistics in Medicine, vol 18, pp= 2529--2545. Efron, Tibshirani (1997) Journal of the American Statistical Association 92, 548--560 Improvement On Cross-Validation: The .632+ Bootstrap Method. Gerds, Schumacher (2006), Consistent estimation of the expected Brier score in general survival models with right-censored event times. Biometrical Journal, vol 48, 1029--1040. Thomas A. Gerds, Martin Schumacher (2007) Efron-Type Measures of Prediction Error for Survival Analysis Biometrics, 63(4), 1283--1287 doi:10.1111/j.1541-0420.2007.00832.x Martin Schumacher, Harald Binder, and Thomas Gerds. Assessment of survival prediction models based on microarray data. Bioinformatics, 23(14):1768-74, 2007. Mark A. van de Wiel, Johannes Berkhof, and Wessel N. van Wieringen Testing the prediction error difference between 2 predictors Biostatistics (2009) 10(3): 550-560 doi:10.1093/biostatistics/kxp011 } \seealso{ \code{\link{plot.pec}}, \code{\link{summary.pec}}, \code{\link{R2}}, \code{\link{crps}} } \author{ Thomas Alexander Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/crps.Rd0000644000176200001440000000377413571203270013045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/crps.R \name{crps} \alias{crps} \alias{ibs} \title{Summarizing prediction error curves} \usage{ crps(object, models, what, times, start) } \arguments{ \item{object}{An object with estimated prediction error curves obtained with the function \link{pec}} \item{models}{Which models in \code{object$models} should be considered.} \item{what}{The name of the entry in \code{x} to be cumulated. Defauls to \code{PredErr} Other choices are \code{AppErr}, \code{BootCvErr}, \code{Boot632}, \code{Boot632plus}.} \item{times}{Time points at which the integration of the prediction error curve stops.} \item{start}{The time point at which the integration of the prediction error curve is started.} } \value{ A matrix with a column for the crps (ibs) at every requested time point and a row for each model } \description{ Computes the cumulative prediction error curves, aka integrated Brier scores, in ranges of time. } \details{ The cumulative prediction error (continuous ranked probability score) is defined as the area under the prediction error curve, hence the alias name, ibs, which is short for integrated Brier score. } \examples{ set.seed(18713) library(prodlim) library(survival) dat=SimSurv(100) pmodel=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) perror=pec(list(Cox=pmodel),Hist(time,status)~1,data=dat) ## cumulative prediction error crps(perror,times=1) # between min time and 1 ## same thing: ibs(perror,times=1) # between min time and 1 crps(perror,times=1,start=0) # between 0 and 1 crps(perror,times=seq(0,1,.2),start=0) # between 0 and seq(0,1,.2) } \references{ E. Graf et al. (1999), Assessment and comparison of prognostic classification schemes for survival data. Statistics in Medicine, vol 18, pp= 2529--2545. Gerds TA, Cai T & Schumacher M (2008) The performance of risk prediction models Biometrical Journal, 50(4), 457--479 } \seealso{ \code{\link{pec}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/pecCforest.Rd0000644000176200001440000000163714131004272014161 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictSurvProb.cforest.R \name{pecCforest} \alias{pecCforest} \title{S3-wrapper function for cforest from the party package} \usage{ pecCforest(formula, data, ...) } \arguments{ \item{formula}{Passed on as is. See \code{cforest} of the \code{party} package} \item{data}{Passed on as is. See \code{cforest} of the \code{party} package} \item{...}{Passed on as they are. See \code{cforest} of the \code{party} package} } \value{ list with two elements: cforest and call } \description{ S3-wrapper function for cforest from the party package } \details{ See \code{cforest} of the \code{party} package. } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \keyword{survival} pec/man/Special.Rd0000644000176200001440000000330113753010514013437 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Special.R \name{Special} \alias{Special} \title{Drawing bootstrapped cross-validation curves and the .632 or .632plus error of models. The prediction error for an optional benchmark model can be added together with bootstrapped cross-validation error and apparent errors.} \usage{ Special( x, y, addprederr, models, bench, benchcol, times, maxboot, bootcol, col, lty, lwd ) } \arguments{ \item{x}{an object of class 'pec' as returned by the \code{pec} function.} \item{y}{Prediction error values.} \item{addprederr}{Additional prediction errors. The options are bootstrap cross-validation errors or apparent errors.} \item{models}{One model also specified in \code{pec} for which the \code{predErr} in \code{plot.pec} is to be drawn.} \item{bench}{A benchmark model (also specified in \code{pec}) for which the \code{predErr} in \code{plot.pec} is to be drawn.} \item{benchcol}{Color of the benchmark curve.} \item{times}{Time points at which the curves must be plotted.} \item{maxboot}{Maximum number of bootstrap curves to be added. Default is all.} \item{bootcol}{Color of the bootstrapped curves. Default is 'gray77'.} \item{col}{Color of the different error curves for \code{models}.} \item{lty}{Line type of the different error curves for \code{models}.} \item{lwd}{Line width of the different error curves for \code{models}.} } \value{ Invisible object. } \description{ This function is invoked and controlled by \code{plot.pec}. } \details{ This function should not be called directly. The arguments can be specified as \code{Special.arg} in the call to \code{plot.pec}. } \seealso{ \code{\link{plot.pec}} } pec/man/predictEventProb.Rd0000644000176200001440000000421413753010514015342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictEventProb.R \name{predictEventProb} \alias{predictEventProb} \alias{predictEventProb.CauseSpecificCox} \alias{predictEventProb.riskRegression} \alias{predictEventProb.FGR} \alias{predictEventProb.prodlim} \alias{predictEventProb.rfsrc} \title{Predicting event probabilities (cumulative incidences) in competing risk models.} \usage{ predictEventProb(object, newdata, times, cause, ...) } \arguments{ \item{object}{A fitted model from which to extract predicted event probabilities} \item{newdata}{A data frame containing predictor variable combinations for which to compute predicted event probabilities.} \item{times}{A vector of times in the range of the response variable, for which the cumulative incidences event probabilities are computed.} \item{cause}{Identifies the cause of interest among the competing events.} \item{\dots}{Additional arguments that are passed on to the current method.} } \value{ A matrix with as many rows as \code{NROW(newdata)} and as many columns as \code{length(times)}. Each entry should be a probability and in rows the values should be increasing. } \description{ Function to extract event probability predictions from various modeling approaches. The most prominent one is the combination of cause-specific Cox regression models which can be fitted with the function \code{cumincCox} from the package \code{compRisk}. } \details{ The function predictEventProb is a generic function that means it invokes specifically designed functions depending on the 'class' of the first argument. See \code{\link{predictSurvProb}}. } \examples{ library(pec) library(survival) library(riskRegression) library(prodlim) train <- SimCompRisk(100) test <- SimCompRisk(10) cox.fit <- CSC(Hist(time,cause)~X1+X2,data=train) predictEventProb(cox.fit,newdata=test,times=seq(1:10),cause=1) ## with strata cox.fit2 <- CSC(list(Hist(time,cause)~strata(X1)+X2,Hist(time,cause)~X1+X2),data=train) predictEventProb(cox.fit2,newdata=test,times=seq(1:10),cause=1) } \seealso{ See \code{\link{predictSurvProb}}. } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/selectCox.Rd0000644000176200001440000000235014131004277014013 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/selectCox.R \name{selectCox} \alias{selectCox} \title{Backward variable selection in the Cox regression model} \usage{ selectCox(formula, data, rule = "aic") } \arguments{ \item{formula}{A formula object with a \code{Surv} object on the left-hand side and all the variables on the right-hand side.} \item{data}{Name of an data frame containing all needed variables.} \item{rule}{The method for selecting variables. See \code{\link{fastbw}} for details.} } \description{ This is a wrapper function which first selects variables in the Cox regression model using \code{fastbw} from the \code{rms} package and then returns a fitted Cox regression model with the selected variables. } \details{ This function first calls \code{cph} then \code{fastbw} and finally \code{cph} again. } \examples{ data(GBSG2) library(survival) f <- selectCox(Surv(time,cens)~horTh+age+menostat+tsize+tgrade+pnodes+progrec+estrec , data=GBSG2) } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \keyword{survival} pec/man/GBSG2.Rd0000755000176200001440000000250413753010514012672 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pec-package.R \docType{data} \name{GBSG2} \alias{GBSG2} \title{German Breast Cancer Study Group 2} \format{ This data frame contains the observations of 686 women: \describe{ \item{horTh}{hormonal therapy, a factor at two levels \code{no} and \code{yes}.} \item{age}{of the patients in years.} \item{menostat}{menopausal status, a factor at two levels \code{pre} (premenopausal) and \code{post} (postmenopausal).} \item{tsize}{tumor size (in mm).} \item{tgrade}{tumor grade, a ordered factor at levels \code{I < II < III}.} \item{pnodes}{number of positive nodes.} \item{progrec}{progesterone receptor (in fmol).} \item{estrec}{estrogen receptor (in fmol).} \item{time}{recurrence free survival time (in days).} \item{cens}{censoring indicator (0- censored, 1- event).} } } \description{ A data frame containing the observations from the GBSG2 study. } \references{ M. Schumacher, G. Basert, H. Bojar, K. Huebner, M. Olschewski, W. Sauerbrei, C. Schmoor, C. Beyerle, R.L.A. Neumann and H.F. Rauschecker for the German Breast Cancer Study Group (1994), Randomized \eqn{2\times2} trial evaluating hormonal treatment and the duration of chemotherapy in node-positive breast cancer patients. \emph{Journal of Clinical Oncology}, \bold{12}, 2086--2093. } \keyword{datasets} pec/man/plotPredictEventProb.Rd0000644000176200001440000000560414131004266016203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotPredictEventProb.R \name{plotPredictEventProb} \alias{plotPredictEventProb} \title{Plotting predicted survival curves.} \usage{ plotPredictEventProb( x, newdata, times, cause = 1, xlim, ylim, xlab, ylab, axes = TRUE, col, density, lty, lwd, add = FALSE, legend = TRUE, percent = FALSE, ... ) } \arguments{ \item{x}{Object specifying an event risk prediction model.} \item{newdata}{A data frame with the same variable names as those that were used to fit the model \code{x}.} \item{times}{Vector of times at which to return the estimated probabilities.} \item{cause}{Show predicted risk of events of this cause} \item{xlim}{Plotting range on the x-axis.} \item{ylim}{Plotting range on the y-axis.} \item{xlab}{Label given to the x-axis.} \item{ylab}{Label given to the y-axis.} \item{axes}{Logical. If \code{FALSE} no axes are drawn.} \item{col}{Vector of colors given to the survival curve.} \item{density}{Densitiy of the color -- useful for showing many (overlapping) curves.} \item{lty}{Vector of lty's given to the survival curve.} \item{lwd}{Vector of lwd's given to the survival curve.} \item{add}{Logical. If \code{TRUE} only lines are added to an existing device} \item{legend}{Logical. If TRUE a legend is plotted by calling the function legend. Optional arguments of the function \code{legend} can be given in the form \code{legend.x=val} where x is the name of the argument and val the desired value. See also Details.} \item{percent}{Logical. If \code{TRUE} the y-axis is labeled in percent.} \item{\dots}{Parameters that are filtered by \code{\link{SmartControl}} and then passed to the functions: \code{\link{plot}}, \code{\link{axis}}, \code{\link{legend}}.} } \value{ The (invisible) object. } \description{ Ploting time-dependent event risk predictions. } \details{ Arguments for the invoked functions \code{legend} and \code{axis} are simply specified as \code{legend.lty=2}. The specification is not case sensitive, thus \code{Legend.lty=2} or \code{LEGEND.lty=2} will have the same effect. The function \code{axis} is called twice, and arguments of the form \code{axis1.labels}, \code{axis1.at} are used for the time axis whereas \code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis. These arguments are processed via \code{\dots{}} of \code{plotPredictEventProb} and inside by using the function \code{SmartControl}. } \examples{ # generate some competing risk data } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \seealso{ \code{\link{predictEventProb}}\code{\link{prodlim}} } \author{ Ulla B. Mogensen \email{ulmo@biostat.ku.dk}, Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/man/predictSurvProb.Rd0000755000176200001440000001301414131004273015215 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictSurvProb.R \name{predictSurvProb} \alias{predictSurvProb} \alias{predictSurvProb.aalen} \alias{predictSurvProb.riskRegression} \alias{predictSurvProb.cox.aalen} \alias{predictSurvProb.coxph} \alias{predictSurvProb.cph} \alias{predictSurvProb.default} \alias{predictSurvProb.rfsrc} \alias{predictSurvProb.matrix} \alias{predictSurvProb.pecCtree} \alias{predictSurvProb.pecCforest} \alias{predictSurvProb.prodlim} \alias{predictSurvProb.psm} \alias{predictSurvProb.selectCox} \alias{predictSurvProb.survfit} \alias{predictSurvProb.pecRpart} \title{Predicting survival probabilities} \usage{ \method{predictSurvProb}{aalen}(object,newdata,times,...) \method{predictSurvProb}{riskRegression}(object,newdata,times,...) \method{predictSurvProb}{cox.aalen}(object,newdata,times,...) \method{predictSurvProb}{cph}(object,newdata,times,...) \method{predictSurvProb}{coxph}(object,newdata,times,...) \method{predictSurvProb}{matrix}(object,newdata,times,...) \method{predictSurvProb}{selectCox}(object,newdata,times,...) \method{predictSurvProb}{pecCforest}(object,newdata,times,...) \method{predictSurvProb}{prodlim}(object,newdata,times,...) \method{predictSurvProb}{psm}(object,newdata,times,...) \method{predictSurvProb}{survfit}(object,newdata,times,...) \method{predictSurvProb}{pecRpart}(object,newdata,times,...) #' \method{predictSurvProb}{pecCtree}(object,newdata,times,...) } \arguments{ \item{object}{A fitted model from which to extract predicted survival probabilities} \item{newdata}{A data frame containing predictor variable combinations for which to compute predicted survival probabilities.} \item{times}{A vector of times in the range of the response variable, e.g. times when the response is a survival object, at which to return the survival probabilities.} \item{\dots}{Additional arguments that are passed on to the current method.} } \value{ A matrix with as many rows as \code{NROW(newdata)} and as many columns as \code{length(times)}. Each entry should be a probability and in rows the values should be decreasing. } \description{ Function to extract survival probability predictions from various modeling approaches. The most prominent one is the Cox regression model which can be fitted for example with `coxph' and with `cph'. } \details{ The function predictSurvProb is a generic function that means it invokes specifically designed functions depending on the 'class' of the first argument. The function \code{pec} requires survival probabilities for each row in newdata at requested times. These probabilities are extracted from a fitted model of class \code{CLASS} with the function \code{predictSurvProb.CLASS}. Currently there are \code{predictSurvProb} methods for objects of class cph (library rms), coxph (library survival), aalen (library timereg), cox.aalen (library timereg), rpart (library rpart), product.limit (library prodlim), survfit (library survival), psm (library rms) } \note{ In order to assess the predictive performance of a new survival model a specific \code{predictSurvProb} S3 method has to be written. For examples, see the bodies of the existing methods. The performance of the assessment procedure, in particular for resampling where the model is repeatedly evaluated, will be improved by supressing in the call to the model all the computations that are not needed for probability prediction. For example, \code{se.fit=FALSE} can be set in the call to \code{cph}. } \examples{ # generate some survival data library(prodlim) set.seed(100) d <- SimSurv(100) # then fit a Cox model library(rms) coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) # Extract predicted survival probabilities # at selected time-points: ttt <- quantile(d$time) # for selected predictor values: ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) # as follows predictSurvProb(coxmodel,newdata=ndat,times=ttt) # stratified cox model sfit <- coxph(Surv(time,status)~strata(X1)+X2,data=d,,x=TRUE,y=TRUE) predictSurvProb(sfit,newdata=d[1:3,],times=c(1,3,5,10)) ## simulate some learning and some validation data learndat <- SimSurv(100) valdat <- SimSurv(100) ## use the learning data to fit a Cox model library(survival) fitCox <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) ## suppose we want to predict the survival probabilities for all patients ## in the validation data at the following time points: ## 0, 12, 24, 36, 48, 60 psurv <- predictSurvProb(fitCox,newdata=valdat,times=seq(0,60,12)) ## This is a matrix with survival probabilities ## one column for each of the 5 time points ## one row for each validation set individual # Do the same for a randomSurvivalForest model library(randomForestSRC) rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) predictSurvProb(rsfmodel,newdata=ndat,times=ttt) ## Cox with ridge option f1 <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) f2 <- coxph(Surv(time,status)~ridge(X1)+ridge(X2),data=learndat,x=TRUE,y=TRUE) plot(predictSurvProb(f1,newdata=valdat,times=10), pec:::predictSurvProb.coxph(f2,newdata=valdat,times=10), xlim=c(0,1), ylim=c(0,1), xlab="Unpenalized predicted survival chance at 10", ylab="Ridge predicted survival chance at 10") } \references{ Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). Evaluating Random Forests for Survival Analysis Using Prediction Error Curves. Journal of Statistical Software, 50(11), 1-23. DOI 10.18637/jss.v050.i11 } \seealso{ \code{\link{predict}},\code{\link{survfit}} } \author{ Thomas A. Gerds \email{tag@biostat.ku.dk} } \keyword{survival} pec/DESCRIPTION0000644000176200001440000000156514131045152012532 0ustar liggesusersPackage: pec Title: Prediction Error Curves for Risk Prediction Models in Survival Analysis Version: 2021.10.11 Author: Thomas A. Gerds Description: Validation of risk predictions obtained from survival models and competing risk models based on censored data using inverse weighting and cross-validation. Most of the 'pec' functionality has been moved to 'riskRegression'. Depends: R (>= 2.9.0), prodlim (>= 1.4.9) Imports: foreach (>= 1.4.2), rms (>= 4.2-0), survival (>= 2.37-7), riskRegression (>= 2020.02.05), lava (>= 1.4.1), timereg (>= 1.8.9), Suggests: randomForestSRC, party, cmprsk (>= 2.2-7), rpart, crrstep, Hmisc (>= 3.14-4) Maintainer: Thomas A. Gerds License: GPL (>= 2) RoxygenNote: 7.1.1 NeedsCompilation: yes Packaged: 2021-10-11 11:26:41 UTC; tag Repository: CRAN Date/Publication: 2021-10-11 14:30:02 UTC pec/tests/0000755000176200001440000000000013571203270012163 5ustar liggesuserspec/tests/testthat/0000755000176200001440000000000013571203270014023 5ustar liggesuserspec/tests/testthat/pec-loop.R0000644000176200001440000000156013571203270015666 0ustar liggesuserstest_that("boot632plus",{ set.seed(130971) dat <- SimSurv(100) dat$X1 <- as.factor(dat$X1) Models <- list("Cox.X1"=coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE), "Cox.X2"=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE), "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE)) set.seed(17100) PredError.632plus <- pec(object=Models, formula=Surv(time,status)~X1+X2, data=dat, exact=TRUE, cens.model="cox", splitMethod="Boot632plus", B=3, verbose=TRUE) saved <- c(Reference=8.09,Cox.X1=7.86,Cox.X2=7.23,Cox.X1.X2=7.07) expect_equal(round(100*ibs(PredError.632plus,times=3)[,4],2),saved) }) pec/src/0000755000176200001440000000000014131017561011607 5ustar liggesuserspec/src/survest_cox_aalen.c0000755000176200001440000000102213571203270015476 0ustar liggesusers#include void survest_cox_aalen(double *hazard, double *coef, double *vars, int *nvars, int *nobs, int *ntime) { int t,i,z; /* this loop saves the time-varying part of the fitted hazard */ for (t=0;t<*ntime;t++){ /* at each jump time of the fit */ for (i=0;i<*nobs;i++){ /* and for each patient we compute the cumulative hazard */ for (z=0;z<*nvars;z++){ hazard[i + t*(*nobs)] += coef[t + z*(*ntime)] * vars[i + z *(*nobs)]; } } } } pec/src/ccr.c0000644000176200001440000000726513571203270012535 0ustar liggesusers/* COMPETING RISK Wolbers et al. conditional censoring survival weights: WAij=G(T_i-|X_i)G(T_i|X_j) WBij=G(T_i-|X_i)G(T_j-|X_j) NOTE: there is one weight for each person in weight.i and one row for each person in weight.j we need the value at time T[i] */ #include void ccr(double *C, double *concA, double *pairsA, double *concB, double *pairsB, int *tindex, double *T, int *Delta, int *D, double *times, double *weight_i, double *weight_j, double *pred, int *N, int *NT, int *tiedpredIn, int *tiedoutcomeIn, int *tiedmatchIn, int *cens_model){ int i,j,s; double Aij, Bij, WAij=1, WBij=1, weightedA, weightedB ,lasttime=0, weightedConcPairs,weightedPairs; for (s=0; s<(*NT);s++) { concA[s]=0; /* count concordant pairs with (T[i]=T[j], D[j]=2,T[i]) */ pairsA[s]=0; /* count pairs with (T[i]=T[j], D[j]=2,T[i]) */ weightedConcPairs=0; /* weighted concordant pairs */ weightedPairs=0; /* weighted pairs */ for (i=0;i<(*N);i++){ /* for all pairs one of the times must be uncensored and cause 1 */ if (T[i]<=times[s] && Delta[i]==1 && D[i]==1){ /* Rprintf("\n\ni=%d\n",i+1); */ for (j=0;j<*N;j++){ if (j!=i){ Aij=0; Bij=0; /* extract the weights */ if (*cens_model==0){ WAij = weight_i[i] * weight_j[(tindex[i]-1)]; } else{ WAij = weight_i[i] * weight_j[(j + (tindex[i]-1) * (*N))]; } WBij = weight_i[i] * weight_i[j]; /* time_j is either greater than time_i or censored and equal */ if (T[i] pred[j + s * (*N)]) { concA[s] +=Aij; concB[s] +=Bij; weightedConcPairs += (weightedA+weightedB); /* Rprintf("Concordant: pred.i=%1.2f\tpred.j=%1.2f\n",pred[i + s * (*N)],pred[j + s * (*N)]); */ } /* pairs with equal predictions count 1/2 or nothing */ if (pred[i + s * (*N)] == pred[j + s * (*N)]) { /* Rprintf("here\n"); */ if (*tiedpredIn==1 ){ concA[s] += Aij/2; concB[s] += Bij/2; /* Rprintf("wa=%1.2f\twb=%1.2f\twconc=%1.2f\n",weightedA,weightedB,weightedConcPairs); */ weightedConcPairs += (weightedA+weightedB)/2; /* Rprintf("wa=%1.2f\twb=%1.2f\twconc=%1.2f\n",weightedA,weightedB,weightedConcPairs); */ } } } } } } /* C[s]=(concA[s]+concB[s])/(pairsA[s]+pairsB[s]); */ C[s]=weightedConcPairs/weightedPairs; lasttime=times[s]; } } pec/src/pec.c0000755000176200001440000001065213571203270012532 0ustar liggesusers#include #include /* survival probabilities */ void pecSRC(double *pec, double *Y, double *D, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i; double p, brier, gs, gi; for (s=0; s<(*NT);s++) { for (i=0; i<*N;i++){ /* prediction */ if (*ConstantPrediction==0){ p = pred[i + s * (*N)]; } else{ p = pred[s]; } /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) brier = D[i] * p * p / gi; else brier = (1-p)*(1-p) / gs; pec[s] += brier / (double) (*N); } } } /* event probabilities - competing risks */ void pecCR(double *pec, double *Y, double *D, double *E, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i; double p, brier, gs, gi; for (s=0; s<(*NT);s++) { for (i=0; i<*N;i++){ /* prediction */ if (*ConstantPrediction==0) p = pred[i + s * (*N)]; else p = pred[s]; /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) /* brier = (D[i] * (E[i]-p) * (E[i]-p)) / gi; */ if (E[i]==1) brier = (D[i] * (1-p) * (1-p)) / gi; else brier = (D[i] * p * p) / gi; else brier = p*p / gs; pec[s] += brier / (double) (*N); /* Rprintf("i=%d\tY[i]=%1.2f\ttimes[s]=%1.2f\tE[i]=%1.2f\tD[i]=%1.2f\tp=%1.2f\tbrier=%1.2f\tpec[s]=%1.2f\tgi=%1.2f\tgs=%1.2f\n",i,Y[i],times[s],E[i],D[i],p,brier,pec[s],gi,gs); */ } } } void pec_uncens(double *pec, double *Y, double *times, double *pred, int *N, int *NT, int *ConstantPrediction, int *survP) { int s, i; double p, brier; for (s=0; s<(*NT);s++) { for (i=0; i<*N;i++){ /* prediction */ if (*ConstantPrediction==0) p = pred[i + s * (*N)]; else p = pred[s]; if (*survP==1) if (Y[i] <= times[s]) brier = p * p; else brier = (1-p)*(1-p); else if (Y[i] > times[s]) brier = p * p; else brier = (1-p)*(1-p); pec[s] += brier / (double) *N; } } } void pec_noinf(double *pec, double *Y, double *D, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i, j; double p, brier, gs, gi; for (s=0; s<*NT;s++) { for (j=0; j<*N; j++){ /* prediction */ if (*ConstantPrediction==0) p = pred[j + s * (*N)]; else p = pred[s]; for (i=0; i<(*N); i++){ /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) brier = D[i] * p * p / gi; else brier = (1-p)*(1-p) / gs; pec[s] += brier / (double) ((*N) * (*N)); } } } } void pec_noinfCR(double *pec, double *Y, double *D, double *E, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i, j; double p, brier, gs, gi; for (s=0; s<*NT;s++) { for (j=0; j<*N; j++){ /* prediction */ if (*ConstantPrediction==0) p = pred[j + s * (*N)]; else p = pred[s]; for (i=0; i<(*N); i++){ /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) brier = E[i] * D[i] * (1-p) * (1-p) / gi; else brier = p*p / gs; pec[s] += brier / (double) ((*N) * (*N)); } } } } void pec_cmprsk(double *pec, double *Y, double *D, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i; double p, brier, gs, gi; for (s=0; s<(*NT);s++) { for (i=0; i<(*N);i++){ /* prediction */ if (*ConstantPrediction==0) p = pred[i + s * (*N)]; else p = pred[s]; /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s] && D[i]==1){ brier = (p * p) + (1 - 2 * p)/gi;} else brier = (p * p); pec[s] += brier / (double) (*N); } } } pec/src/cindex.c0000755000176200001440000000553713571203270013243 0ustar liggesusers#include void cindexSRC(double *C, double *conc, double *pairs, int *tindex, double *Y, int *status, double *times, double *weight_i, double *weight_j, double *pred, int *N, int *NT, int *tiedpredIn, int *tiedoutcomeIn, int *tiedmatchIn, int *cens_model){ int i,j,s; double wi, wj, ww, lasttime=0; for (s=0; s<(*NT);s++) { conc[s]=0; pairs[s]=0; for (i=0;i<(*N);i++){ /* for usuable pairs the smaller time must be uncensored */ if (Y[i]<=times[s] && status[i]==1){ for (j=i+1;j<*N;j++){ if (*cens_model==0){ /* marginal censoring survival weights: G(T_i-) for i G(T_i) for j */ wi = weight_i[i]; wj = weight_j[(tindex[i]-1)]; } else{ /* conditional censoring survival weights: G(T_i-|X_i) for i G(T_i|X_j) for j NOTE: there is one weight for each person in weight.i and one row for each person in weight.j we need the value at time Y[i] */ /* wi = weight_i[(tindex[i]-1)]; */ wi = weight_i[i]; wj = weight_j[(j + (tindex[i]-1) * (*N))]; } ww = (wi * wj); /* Rprintf("i=%d\twi=%1.8f\n",i,wi); */ /* if ((1/wi)>1000) Rprintf("Large wi=%1.8f\n",wi); */ /* if ((1/ww)>100) Rprintf("Yi=%1.2f\tYj=%1.2f\tt=%1.2f\tLarge wj=%1.8f\twi=%1.8f\n",Y[i],Y[j],times[s],wi,wj); */ /* if ((1/ww)>((*N)*(*N))) ww=1/((*N)*(*N)); */ /* if ((1/ww)>trunc[1]) { */ /* ww=1/trunc[1]; */ /* } */ /* if ((1/ww)>(*N)) { */ /* Rprintf("Warning: truncated weights"); */ /* Rprintf("Yi=%1.2f\tYj=%1.2f\twj=%1.8f\twi=%1.8f\n",Y[i],Y[j],wj,wi); */ /* ww=1/((double)(*N)); */ /* } */ /* pair unusuable if any weight==0 */ if (wj>0 && wi>0){ /* rare case: same outcome and same prediction count as concordant pair when tiedmatchIn == TRUE */ if (*tiedmatchIn==1 && (Y[i]==Y[j] && status[j]==1 && (pred[i + s * (*N)] == pred[j + s * (*N)]))){ pairs[s] += (1/ww); conc[s] += (1/ww); } else{ /* if tiedoutcomeIn==0 call pairs with tied outcome unusuable, unless Y_j was censored, since then the uncensored Y_j will be greater than Y_i */ if (*tiedoutcomeIn==1 || (Y[i]!=Y[j] || status[j]==0)){ if (pred[i + s * (*N)] == pred[j + s * (*N)]) { /* call pair unusuable if same predictions */ /* Rprintf("pred[i+s*(*N)]=%1.2f\tpred[j+s*(*N)]=%1.2f\t\n",pred[i+s*(*N)],pred[j+s*(*N)]); */ if (*tiedpredIn==1){ pairs[s] += 1/ww; conc[s] += 1/(2* ww); } } else{ /* call pair concordant if p_i < p_j */ pairs[s] += 1/ww; if (pred[i + s * (*N)] < pred[j + s * (*N)]) { conc[s] += 1/ww; } } } } } } } } C[s]=conc[s]/pairs[s]; lasttime=times[s]; } } pec/src/init.c0000644000176200001440000000412513571203270012721 0ustar liggesusers#include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .C calls */ extern void ccr(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void cindexSRC(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pecCR(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pec_noinf(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pec_noinfCR(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pecResiduals(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pecResidualsCR(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pecSRC(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void SNull(void *, void *, void *, void *, void *, void *); extern void survest_cox_aalen(void *, void *, void *, void *, void *, void *); static const R_CMethodDef CEntries[] = { {"ccr", (DL_FUNC) &ccr, 19}, {"cindexSRC", (DL_FUNC) &cindexSRC, 16}, {"pecCR", (DL_FUNC) &pecCR, 12}, {"pec_noinf", (DL_FUNC) &pec_noinf, 11}, {"pec_noinfCR", (DL_FUNC) &pec_noinfCR, 12}, {"pecResiduals", (DL_FUNC) &pecResiduals, 12}, {"pecResidualsCR", (DL_FUNC) &pecResidualsCR, 13}, {"pecSRC", (DL_FUNC) &pecSRC, 11}, {"SNull", (DL_FUNC) &SNull, 6}, {"survest_cox_aalen", (DL_FUNC) &survest_cox_aalen, 6}, {NULL, NULL, 0} }; void R_init_pec(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } pec/src/auc.c0000644000176200001440000000240113571203270012521 0ustar liggesusers#include void auc(double *AUC, double *conc, double *pairs, int *tindex, double *Y, int *status, double *times, double *weight_i,/* G(T|X) */ double *weight, /* G(s|X) */ double *pred, int *N, int *NT, int *tiedpredIn, int *cens_model){ int i,j,s; double wi, wj, ww, lasttime=0; for (s=0; s<(*NT);s++) { conc[s]=0; pairs[s]=0; for (i=0;i<(*N);i++){ /* for usuable pairs the smaller time must be uncensored */ if (Y[i]<=times[s] && status[i]==1){ for (j=tindex[s];j<*N;j++){ /* censoring survival weights: G(T_i-|X_i) for i G(s|X_j) for j */ wi = weight_i[i]; wj = weight[(j + s * (*N)) * (*cens_model) + s * (1-(*cens_model))]; ww = (wi * wj); /* pair unusuable if any weight==0 */ if (wj>0 && wi>0){ if (pred[i + s * (*N)] == pred[j + s * (*N)]) { /* call pair unusuable if same predictions */ if (*tiedpredIn==1){ pairs[s] += 1/ww; conc[s] += 1/(2* ww); } } else{ /* call pair concordant if p_i < p_j */ pairs[s] += 1/ww; if (pred[i + s * (*N)] < pred[j + s * (*N)]) { conc[s] += 1/ww; } } } } } } AUC[s]=conc[s]/pairs[s]; lasttime=times[s]; } } pec/src/SNull.c0000644000176200001440000000040513571203270013010 0ustar liggesusers#include #include void SNull(double *time, double *jumptimes, double *elp, double *S, int *N, int *NJ){ int s,i; for (s=0; s<*NJ; s++){ for (i=0; i<*N; i++){ if (time[i]>=jumptimes[s]) S[s]+=elp[i]; } } } pec/src/brier_score.c0000755000176200001440000000047213571203270014260 0ustar liggesusersvoid brier_noinf(double *bs, double *Y, double *pred, int *N) { int i, j; double p, y, brier; for (j=0; j<*N; j++){ p = pred[j]; /* prediction */ for (i=0; i<*N; i++){ y = Y[i]; /* observation */ brier=(y-p)*(y-p); *bs += brier / (double) ((*N) * (*N)); } } } pec/src/pecResiduals.c0000755000176200001440000000304213571203270014401 0ustar liggesusers#include void pecResiduals(double *pec, double *resid, double *Y, double *D, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i; double p, brier, gs, gi; for (s=0; s<(*NT);s++) { for (i=0; i<*N;i++){ /* prediction */ if (*ConstantPrediction==0){ p = pred[i + s * (*N)]; } else{ p = pred[s]; } /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) brier = D[i] * p * p / gi; else brier = (1-p)*(1-p) / gs; resid[i + s*(*N)] = brier; pec[s] += brier / (double) (*N); } } } void pecResidualsCR(double *pec, double *resid, double *Y, double *D, double *E, double *times, double *pred, double *weight, double *weight_obs, int *N, int *NT, int *cmodel, int *ConstantPrediction) { int s, i; double p, brier, gs, gi; for (s=0; s<(*NT);s++) { for (i=0; i<*N;i++){ /* prediction */ if (*ConstantPrediction==0){ p = pred[i + s * (*N)]; } else{ p = pred[s]; } /* weights */ gs = weight[(i + s * (*N)) * (*cmodel) + s * (1-(*cmodel))]; gi = weight_obs[i]; if (Y[i] <= times[s]) brier = E[i] * D[i] * (1-p) * (1-p) / gi; else brier = p*p / gs; resid[i + s*(*N)] = brier; pec[s] += brier / (double) (*N); } } } pec/R/0000755000176200001440000000000014131004307011213 5ustar liggesuserspec/R/ibs.R0000644000176200001440000000022713571203267012131 0ustar liggesusers## the name ibs is more intuitive for integrated Brier score ## whereas continuous ranked probability score is less well known ##' @export ibs <- crps pec/R/predictEventProb.R0000755000176200001440000001653513753263576014662 0ustar liggesusers# methods for competing risk regression # -------------------------------------------------------------------- #' Predicting event probabilities (cumulative incidences) in competing risk #' models. #' #' Function to extract event probability predictions from various modeling #' approaches. The most prominent one is the combination of cause-specific Cox #' regression models which can be fitted with the function \code{cumincCox} #' from the package \code{compRisk}. #' #' The function predictEventProb is a generic function that means it invokes #' specifically designed functions depending on the 'class' of the first #' argument. #' #' See \code{\link{predictSurvProb}}. #' #' @aliases predictEventProb predictEventProb.CauseSpecificCox #' predictEventProb.riskRegression predictEventProb.FGR #' predictEventProb.prodlim predictEventProb.rfsrc #' @param object A fitted model from which to extract predicted event #' probabilities #' @param newdata A data frame containing predictor variable combinations for #' which to compute predicted event probabilities. #' @param times A vector of times in the range of the response variable, for #' which the cumulative incidences event probabilities are computed. #' @param cause Identifies the cause of interest among the competing events. #' @param \dots Additional arguments that are passed on to the current method. #' @return A matrix with as many rows as \code{NROW(newdata)} and as many #' columns as \code{length(times)}. Each entry should be a probability and in #' rows the values should be increasing. #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso See \code{\link{predictSurvProb}}. #' @keywords survival #' @examples #' #' library(pec) #' library(survival) #' library(riskRegression) #' library(prodlim) #' train <- SimCompRisk(100) #' test <- SimCompRisk(10) #' cox.fit <- CSC(Hist(time,cause)~X1+X2,data=train) #' predictEventProb(cox.fit,newdata=test,times=seq(1:10),cause=1) #' #' ## with strata #' cox.fit2 <- CSC(list(Hist(time,cause)~strata(X1)+X2,Hist(time,cause)~X1+X2),data=train) #' predictEventProb(cox.fit2,newdata=test,times=seq(1:10),cause=1) #' #' @export predictEventProb <- function(object,newdata,times,cause,...){ UseMethod("predictEventProb",object) } ##' @export predictEventProb.matrix <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata) || NCOL(object) != length(times)){ stop(paste("Prediction matrix has wrong dimensions: ", NROW(object), " rows and ", NCOL(object), " columns.\n But requested are predicted probabilities for ", NROW(newdata), " subjects (rows) in newdata and ", length(times), " time points (columns)", sep="")) } object } ##' @export predictEventProb.prodlim <- function(object,newdata,times,cause,...){ ## require(prodlim) p <- predict(object=object,cause=cause,type="cuminc",newdata=newdata,times=times,mode="matrix",level.chaos=1) ## if the model has no covariates ## then all cases get the same prediction ## in this exceptional case we proceed a vector if (NROW(p)==1 && NROW(newdata)>=1) p <- as.vector(p) ## p[is.na(p)] <- 0 if (is.null(dim(p))) {if (length(p)!=length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } else{ if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } p } ##' @export predictEventProb.FGR <- function(object,newdata,times,cause,...){ p <- predict(object=object,newdata=newdata,times=times) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictEventProb.riskRegression <- function(object,newdata,times,cause,...){ if (missing(times))stop("Argument times is missing") temp <- predict(object,newdata=newdata,times=times) pos <- prodlim::sindex(jump.times=temp$time,eval.times=times) ## if (NROW(newdata)==1) p <- cbind(0,temp$risk)[,pos+1,drop=FALSE] ## else ## p <- cbind(0,t(temp$risk))[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictEventProb.ARR <- function(object,newdata,times,cause,...){ if (missing(times))stop("Argument times is missing") temp <- predict(object,newdata=newdata,times=times) pos <- prodlim::sindex(jump.times=temp$time,eval.times=times) p <- cbind(0,temp$P1)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictEventProb.CauseSpecificCox <- function (object, newdata, times, cause, ...) { riskRegression::predictRisk (object, newdata, times, cause, ...) } ##' @export predictEventProb.rfsrc <- function(object, newdata, times, cause, ...){ if (missing(cause)) stop("missing cause") if (!is.numeric(cause)) stop("cause is not numeric") cif <- predict(object,newdata=newdata,importance="none",...)$cif[,,cause,drop=TRUE] pos <- prodlim::sindex(jump.times=object$time.interest,eval.times=times) p <- cbind(0,cif)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ## predictUpdateProb.CSC <- function (object, newdata,times,horizon, cause, ...) { ## survtype <- object$survtype ## N <- NROW(newdata) ## NC <- length(object$model) ## eTimes <- object$eventTimes ## if (missing(cause)) ## cause <- object$theCause ## causes <- object$causes ## stopifnot(match(as.character(cause),causes,nomatch=0)!=0) ## # predict cumulative cause specific hazards ## cumHaz1 <- -log(predictSurvProb(object$models[[paste("Cause",cause)]],times=eTimes,newdata=newdata)) ## Haz1 <- t(apply(cbind(0,cumHaz1),1,diff)) ## if (survtype=="hazard"){ ## cumHazOther <- lapply(causes[-match(cause,causes)],function(c){ ## -log(predictSurvProb(object$models[[paste("Cause",c)]],times=eTimes,newdata=newdata)) ## }) ## lagsurv <- exp(-cumHaz1- do.call("+",cumHazOther)) ## cuminc1 <- t(apply(lagsurv*Haz1,1,cumsum)) ## } ## else{ ## tdiff <- min(diff(eTimes))/2 ## lagsurv <- predictSurvProb(object$models[["OverallSurvival"]],times=eTimes-tdiff,newdata=newdata) ## cuminc1 <- t(apply(lagsurv*Haz1,1,cumsum)) ## } ## pos <- prodlim::sindex(jump.times=eTimes, eval.times=times) ## cbind(0,cuminc1)[,pos+1,drop=FALSE] ## } pec/R/print.IPCW.R0000755000176200001440000000156613571203267013263 0ustar liggesusers##' @export print.IPCW <- function(x,digits=3,...){ cat("\nEstimated inverse of the probability of censoring weights (IPCW)\n\n") method=switch(x$method, "cox"="Cox regression", "marginal"="Kaplan-Meier", "nonpar"="Stratified Kaplan-Meier", "aalen"="Additive Aalen regression", "none"="No weighting", "Dont know") cat("Method for estimation: ", method,"\n") cat("Handler function: ",paste(as.character(x$fit$call[1]),"()",sep=""),"\n") if (!is.null(x$IPCW.times)){ cat("\nhead() of the predicted IPCW for", NROW(x$IPCW.times),"subjects (rows), at the",NCOL(x$IPCW.times),"requested times (columns):\n\n") print(head(x$IPCW.times),digits=digits,quote=FALSE) } if (!is.null(x$IPCW.subjectTimes)){ cat("\nhead() of predicted IPCW at the individual subject times:\n\n") print(head(x$IPCW.subjectTimes),digits=digits,quote=FALSE) } } pec/R/pseudo.kFoldCrossValidation.R0000644000176200001440000000016713571203267016741 0ustar liggesuserspseudo.kFoldCrossValidation <- function(x,...){ stop("the function pseudo.kFoldCrossValidation has to be written") } pec/R/print.riskReclassification.R0000644000176200001440000000333413571203267016664 0ustar liggesusers### print.reclassification.R --- #---------------------------------------------------------------------- ## author: Thomas Alexander Gerds ## created: Oct 3 2015 (16:26) ## Version: ## last-updated: Oct 3 2015 (16:26) ## By: Thomas Alexander Gerds ## Update #: 1 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: print.riskReclassification <- function(x,percent=TRUE,digits=ifelse(percent,1,2),...){ cat("Observed overall re-classification table:\n\n") print(x$reclassification) cat("\nExpected re-classification probabilities (%) among subjects with event until time ",x$time,"\n\n",sep="") fmt <- paste0("%1.", digits[[1]], "f") dnames <- dimnames(x$reclassification) dim <- dim(x$reclassification) if (percent==TRUE){ rlist <- lapply(x$event.reclassification,function(x){ matrix(sprintf(fmt=fmt,100*c(x)),nrow=dim[1],ncol=dim[2],dimnames=dnames) }) }else{ rlist <- lapply(x$event.reclassification,function(x){ matrix(sprintf(fmt=fmt,c(x)),nrow=dim[1],ncol=dim[2],dimnames=dnames) }) } if (x$model=="competing.risks"){ print.listof(rlist[-length(rlist)],quote=FALSE) } else{ print.listof(rlist[1],quote=FALSE) } cat("\nExpected re-classification probabilities (%) among subjects event-free until time ",x$time,"\n\n",sep="") print.listof(rlist[length(rlist)],quote=FALSE) } #---------------------------------------------------------------------- ### print.reclassification.R ends here pec/R/predictSurvProb.pseudoForest.R0000644000176200001440000000314613571203267017175 0ustar liggesusers##' @export predictSurvProb.pseudoForest <- function(object, newdata, times, digits=8, ...){ stopifnot(object$model.type=="survival") # {{{ get forests # Extract forests # - NOTE: if more than one the forests are independent and only # made for easy extraction to different time points. # forestList <- object$forest if (class(forestList[[1]])!="randomForest") stop("Only works for 'randomForest'") L <- length(forestList) # }}} # {{{ predict to given time points # find the forest ## pos <- prodlim::sindex(jump.times=object$times,eval.times=times) pos <- match(times,object$times,nomatch=FALSE) if (any(pos==FALSE)) stop("Requested forests at times ",paste(times[!pos],collapse=", "),"not available. Available are forests at times:",paste(object$times,collapse=", ")) p <- do.call("cbind",lapply(pos,function(t){ getForest <- forestList[[t]] ## print(names(newdata)) ## print(str(getForest)) p.t <- stats::predict(getForest,newdata=newdata) p.t <- round(p.t,digits=digits) })) # }}} # {{{ return # check dim. if (is.null(dim(p))) { if (length(p)!=length(times)) stop("Prediction failed") } else{ if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } p # }}} } pec/R/cindex.R0000644000176200001440000010513113755136313012627 0ustar liggesusers#' Concordance index for right censored survival time data #' #' In survival analysis, a pair of patients is called concordant if the risk of #' the event predicted by a model is lower for the patient who experiences the #' event at a later timepoint. The concordance probability (C-index) is the #' frequency of concordant pairs among all pairs of subjects. It can be used to #' measure and compare the discriminative power of a risk prediction models. #' The function provides an inverse of the probability of censoring weigthed #' estimate of the concordance probability to adjust for right censoring. #' Cross-validation based on bootstrap resampling or bootstrap subsampling can #' be applied to assess and compare the discriminative power of various #' regression modelling strategies on the same set of data. #' #' Pairs with identical observed times, where one is uncensored and one is #' censored, are always considered usuable (independent of the value of #' \code{tiedOutcomeIn}), as it can be assumed that the event occurs at a later #' timepoint for the censored observation. #' #' For uncensored response the result equals the one obtained with the #' functions \code{rcorr.cens} and \code{rcorrcens} from the \code{Hmisc} #' package (see examples). #' #' @aliases cindex #' @param object A named list of prediction models, where allowed entries are #' (1) R-objects for which a \link{predictSurvProb} method exists (see #' details), (2) a \code{call} that evaluates to such an R-object (see #' examples), (3) a matrix with predicted probabilities having as many rows as #' \code{data} and as many columns as \code{times}. For cross-validation all #' objects in this list must include their \code{call}. #' @param formula A survival formula. The left hand side is used to finde the #' status response variable in \code{data}. For right censored data, the right #' hand side of the formula is used to specify conditional censoring models. #' For example, set \code{Surv(time,status)~x1+x2} and \code{cens.model="cox"}. #' Then the weights are based on a Cox regression model for the censoring times #' with predictors x1 and x2. Note that the usual coding is assumed: #' \code{status=0} for censored times and that each variable name that appears #' in \code{formula} must be the column name in \code{data}. If there are no #' covariates, i.e. \code{formula=Surv(time,status)~1} the \code{cens.model} is #' coerced to \code{"marginal"} and the Kaplan-Meier estimator for the #' censoring times is used to calculate the weights. If \code{formula} is #' \code{missing}, try to extract a formula from the first element in object. #' @param data A data frame in which to validate the prediction models and to #' fit the censoring model. If \code{data} is missing, try to extract a data #' set from the first element in object. #' @param eval.times A vector of timepoints for evaluating the discriminative #' ability. At each timepoint the c-index is computed using only those pairs #' where one of the event times is known to be earlier than this timepoint. If #' \code{eval.times} is \code{missing} then the largest #' uncensored event time is used. #' @param pred.times A vector of timepoints for evaluating the prediction #' models. This should either be exactly one timepoint used for all #' \code{eval.times}, or be as long as \code{eval.times}, in which case the #' predicted order of risk for the jth entry of \code{eval.times} is based on #' the jth entry of \code{pred.times} corresponding #' @param cause For competing risks, the event of interest. Defaults to the #' first state of the response, which is obtained by evaluating the left hand #' side of \code{formula} in \code{data}. #' @param lyl If \code{TRUE} rank subjects accoring to predicted #' life-years-lost (See Andersen due to this cause instead of predicted risk. #' @param cens.model Method for estimating inverse probability of censoring #' weigths: #' #' \code{cox}: A semi-parametric Cox proportional hazard model is fitted to the #' censoring times #' #' \code{marginal}: The Kaplan-Meier estimator for the censoring times #' #' \code{nonpar}: Nonparametric extension of the Kaplan-Meier for the censoring #' times using symmetric nearest neighborhoods -- available for arbitrary many #' strata variables on the right hand side of argument \code{formula} but at #' most one continuous variable. See the documentation of the functions #' \code{prodlim} and \code{neighborhood} from the prodlim package. #' #' \code{aalen}: The nonparametric Aalen additive model fitted to the censoring #' times. Requires the timereg package maintained by Thomas Scheike. #' @param ipcw.refit If \code{TRUE} the inverse probability of censoring #' weigths are estimated separately in each training set during #' cross-validation. #' @param ipcw.args List of arguments passed to function specified by argument \code{cens.model}. #' @param ipcw.limit Value between 0 and 1 (but no equal to 0!) used to cut for #' small weights in order to stabilize the estimate at late times were few #' individuals are observed. #' @param tiedPredictionsIn If \code{FALSE} pairs with identical predictions #' are excluded, unless also the event times are identical and uncensored and #' \code{tiedMatchIn} is set to \code{TRUE}. #' @param tiedOutcomeIn If \code{TRUE} pairs with identical and uncensored #' event times are excluded, unless also the predictions are identical and #' \code{tiedMatchIn} is set to \code{TRUE}. #' @param tiedMatchIn If \code{TRUE} then pairs with identical predictions and #' identical and uncensored event times are counted as concordant pairs. #' @param splitMethod Defines the internal validation design: #' #' \code{none/noPlan}: Assess the models in the give \code{data}, usually #' either in the same data where they are fitted, or in independent test data. #' #' \code{BootCv}: Bootstrap cross validation. The prediction models are trained #' on \code{B} bootstrap samples, that are either drawn with replacement of the #' same size as the original data or without replacement from \code{data} of #' the size \code{M}. The models are assessed in the observations that are NOT #' in the bootstrap sample. #' #' \code{Boot632}: Linear combination of AppCindex and OutOfBagCindex using the #' constant weight .632. #' #' @param B Number of bootstrap samples. The default depends on argument #' \code{splitMethod}. When \code{splitMethod} in c("BootCv","Boot632") the #' default is 100. For \code{splitMethod="none"} \code{B} is the number of #' bootstrap simulations e.g. to obtain bootstrap confidence limits -- default #' is 0. #' @param M The size of the bootstrap samples for resampling without #' replacement. Ignored for resampling with replacement. #' @param model.args List of extra arguments that can be passed to the #' \code{predictSurvProb} methods. The list must have an entry for each entry #' in \code{object}. #' @param model.parms Experimental. List of with exactly one entry for each #' entry in \code{object}. Each entry names parts of the value of the fitted #' models that should be extracted and added to the value. #' @param keep.index Logical. If \code{FALSE} remove the bootstrap or #' cross-validation index from the output list which otherwise is included in #' the method part of the output list. #' @param keep.matrix Logical. If \code{TRUE} add all \code{B} prediction error #' curves from bootstrapping or cross-validation to the output. #' @param keep.models Logical. If \code{TRUE} keep the models in object. Since #' fitted models can be large objects the default is \code{FALSE}. #' @param keep.residuals Experimental. #' @param keep.pvalues Experimental. #' @param keep.weights Experimental. #' @param multiSplitTest Experimental. #' @param testTimes A vector of time points for testing differences between #' models in the time-point specific Brier scores. #' @param confInt Experimental. #' @param confLevel Experimental. #' @param verbose if \code{TRUE} report details of the progress, e.g. count the #' steps in cross-validation. #' @param savePath Place in your filesystem (directory) where training models #' fitted during cross-validation are saved. If \code{missing} training models #' are not saved. #' @param slaveseed Vector of seeds, as long as \code{B}, to be given to the #' slaves in parallel computing. #' @param na.action Passed immediately to model.frame. Defaults to na.fail. If #' set otherwise most prediction models will not work. #' @param ... Not used. #' @return Estimates of the C-index. #' @author Thomas A Gerds \email{tag@@biostat.ku.dk} #' @references #' #' TA Gerds, MW Kattan, M Schumacher, and C Yu. Estimating a time-dependent #' concordance index for survival prediction models with covariate dependent #' censoring. Statistics in Medicine, Ahead of print:to appear, 2013. DOI = #' 10.1002/sim.5681 #' #' Wolbers, M and Koller, MT and Witteman, JCM and Gerds, TA (2013) Concordance #' for prognostic models with competing risks Research report 13/3. Department #' of Biostatistics, University of Copenhagen #' #' Andersen, PK (2012) A note on the decomposition of number of life years lost #' according to causes of death Research report 12/2. Department of #' Biostatistics, University of Copenhagen #' #' Paul Blanche, Michael W Kattan, and Thomas A Gerds. The c-index is not #' proper for the evaluation of-year predicted risks. Biostatistics, 20(2): #' 347--357, 2018. #' @keywords survival #' @examples #' #' # simulate data based on Weibull regression #' library(prodlim) #' set.seed(13) #' dat <- SimSurv(100) #' # fit three different Cox models and a random survival forest #' # note: low number of trees for the purpose of illustration #' library(survival) #' library(randomForestSRC) #' cox12 <- coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) #' cox1 <- coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE) #' cox2 <- coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) #' rsf1 <- rfsrc(Surv(time,status)~X1+X2,data=dat,ntree=15,forest=TRUE) #' # #' # compute the apparent estimate of the C-index at different time points #' # #' A1 <- pec::cindex(list("Cox X1"=cox1, #' "RSF"=rsf1), #' formula=Surv(time,status)~X1+X2, #' data=dat, #' eval.times=10) #' ApparrentCindex <- pec::cindex(list("Cox X1"=cox1, #' "Cox X2"=cox2, #' "Cox X1+X2"=cox12, #' "RSF"=rsf1), #' formula=Surv(time,status)~X1+X2, #' data=dat, #' eval.times=seq(1,15,1)) #' print(ApparrentCindex) #' plot(ApparrentCindex) #' # #' # compute the bootstrap-crossvalidation estimate of #' # the C-index at different time points #' # #' set.seed(142) #' bcvCindex <- pec::cindex(list("Cox X1"=cox1, #' "Cox X2"=cox2, #' "Cox X1+X2"=cox12, #' "RSF"=rsf1), #' formula=Surv(time,status)~X1+X2, #' data=dat, #' splitMethod="bootcv", #' B=5, #' eval.times=seq(1,15,1)) #' print(bcvCindex) #' plot(bcvCindex) #' # for uncensored data the results are the same #' # as those obtained with the function rcorr.cens from Hmisc #' #' set.seed(16) #' dat <- SimSurv(30) #' dat$staus=1 #' fit12 <- coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) #' fit1 <- coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE) #' fit2 <- coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) #' Cpec <- pec::cindex(list("Cox X1+X2"=fit12,"Cox X1"=fit1,"Cox X2"=fit2), #' formula=Surv(time,status)~1, #' data=dat) #' p1 <- predictSurvProb(fit1,newdata=dat,times=10) #' p2 <- predictSurvProb(fit2,newdata=dat,times=10) #' p12 <- predictSurvProb(fit12,newdata=dat,times=10) #' if (requireNamespace("Hmisc",quietly=TRUE)){ #' library(Hmisc) #' harrelC1 <- rcorr.cens(p1,with(dat,Surv(time,status))) #' harrelC2 <- rcorr.cens(p2,with(dat,Surv(time,status))) #' harrelC12 <- rcorr.cens(p12,with(dat,Surv(time,status))) #' harrelC1[["C Index"]]==Cpec$AppCindex[["Cox.X1"]] #' harrelC2[["C Index"]]==Cpec$AppCindex[["Cox.X2"]] #' harrelC12[["C Index"]]==Cpec$AppCindex[["Cox.X1.X2"]] #' } #' # #' # competing risks #' # #' library(riskRegression) #' library(prodlim) #' set.seed(30) #' dcr.learn <- SimCompRisk(30) #' dcr.val <- SimCompRisk(30) #' pec::cindex(CSC(Hist(time,event)~X1+X2,data=dcr.learn),data=dcr.val) #' fit <- CSC(Hist(time,event)~X1+X2,data=dcr.learn) #' cif <- predictRisk(fit,newdata=dcr.val,times=3,cause=1) #' pec::cindex(list(fit),data=dcr.val,times=3) #' @export # {{{ header cindex.list cindex <- function(object, formula, data, eval.times, pred.times, cause, lyl=FALSE, cens.model="marginal", ipcw.refit=FALSE, ipcw.args=NULL, ipcw.limit, tiedPredictionsIn=TRUE, tiedOutcomeIn=TRUE, tiedMatchIn=TRUE, splitMethod="noPlan", B, M, model.args=NULL, model.parms=NULL, keep.models=FALSE, keep.residuals=FALSE, keep.pvalues=FALSE, keep.weights=FALSE, keep.index=FALSE, keep.matrix=FALSE, multiSplitTest=FALSE, testTimes, confInt=FALSE, confLevel=0.95, verbose=TRUE, savePath=NULL, slaveseed=NULL, na.action=na.fail, ...){ # }}} # {{{ checking integrity some arguments theCall=match.call() if (match("replan",names(theCall),nomatch=FALSE)) stop("Argument name 'replan' has been replaced by 'splitMethod'.") if (keep.residuals && missing(testTimes)) stop("To keep.residuals please specify testTimes.") if (missing(splitMethod) && multiSplitTest==TRUE){ stop("Need data splitting to compute van de Wiel's test") } if (missing(M) && multiSplitTest) M <- NA stopifnot(as.numeric(tiedPredictionsIn) %in% c(0,1)) stopifnot(as.numeric(tiedOutcomeIn) %in% c(0,1)) stopifnot(as.numeric(tiedMatchIn) %in% c(0,1)) # }}} # {{{ check and convert object if (class(object)[1]!="list") { object <- list(object) } # }}} # {{{ formula if (missing(formula)){ if (length(grep("~",as.character(object[[1]]$call$formula)))==0){ stop(paste("Argument formula is missing and first model has no usable formula:",as.character(object[[1]]$call$formula))) } else{ ftry <- try(formula <- eval(object[[1]]$call$formula),silent=TRUE) if ((class(ftry)[1]=="try-error") || match("formula",class(formula),nomatch=0)==0) stop("Argument formula is missing and first model has no usable formula.") else if (verbose) warning("Formula missing. Using formula from first model") } } formula.names <- try(all.names(formula),silent=TRUE) if (!(formula.names[1]=="~") || (match("$",formula.names,nomatch=0)+match("[",formula.names,nomatch=0)>0)){ stop("Invalid specification of formula. Perhaps forgotten right hand side?\nNote that any subsetting, ie data$var or data[,\"var\"], is invalid for this function.") } else{ if (!(formula.names[2] %in% c("Surv","Hist"))) survp <- FALSE else survp <- TRUE } # }}} # {{{ data if (missing(data)){ if (match("call",names(object[[1]]),nomatch=0)==0||is.null(object[[1]]$call$data)){ stop("Data missing and cannot borrow data from the first object :(") } data <- eval(object[[1]]$call$data) if (match("data.frame",class(data),nomatch=0)==0) stop("Argument data is missing.") else if (verbose) warning("Argument data is missing. I use the data from the call to the first model instead.") } # }}} # {{{ censoring model cens.model <- match.arg(cens.model,c("cox","marginal","nonpar","aalen","none")) # }}} # {{{ response histformula <- formula if (histformula[[2]][[1]]==as.name("Surv")){ histformula[[2]][[1]] <- as.name("Hist") } m <- model.frame(histformula,data,na.action=na.action) response <- model.response(m) if (match("Surv",class(response),nomatch=0)!=0){ attr(response,"model") <- "survival" attr(response,"cens.type") <- "rightCensored" model.type <- "survival" } censType <- attr(response,"cens.type") model.type <- attr(response,"model") if (model.type=="competing.risks"){ if (lyl==TRUE) predictHandlerFun <- "predictLifeYearsLost" else predictHandlerFun <- "predictEventProb" if (missing(cause)) cause <- attr(response,"state")[1] } else{ if (survp==FALSE && NCOL(response)!=1) stop("Response must be one-dimensional.") if (survp==TRUE && NCOL(response)!=2) stop("Survival response must have two columns: time and status.") predictHandlerFun <- "predictSurvProb" } if (model.type=="competing.risks") if (verbose==TRUE) message("Cindex for competing risks") # }}} # {{{ prediction models NF <- length(object) if (is.null(names(object))){ names(object) <- sapply(object,function(o)class(o)[1]) names(object) <- make.names(names(object),unique=TRUE) } else{ # fix missing names if (any(names(object)=="")){ names(object)[(names(object)=="")] <- sapply(object[(names(object)=="")],function(o)class(o)[1]) names(object) <- make.names(names(object),unique=TRUE) }else{ # leave names as they were given } } # }}} # {{{ sort the data if (survp){ neworder <- order(response[,"time"],-response[,"status"]) if (model.type=="competing.risks"){ event <- prodlim::getEvent(response,mode="character") event <- event[neworder] } response <- response[neworder,,drop=FALSE] Y <- response[,"time"] if (censType=="uncensored"){ status <- rep(1,length(Y)) cens.model <- "none" } else{ status <- response[,"status"] } } else{ cens.model <- "none" neworder <- order(response) Y <- response[neworder] status <- rep(1,length(Y)) } ## for competing risks find the cause of interest. if (model.type=="competing.risks"){ availableCauses <- unique(event) if (!match(cause, availableCauses,nomatch=FALSE)) stop("Cause ",cause," is not among the available causes: ",paste(availableCauses,collapse=", ")) event <- event==cause } else{ event <- NULL } data <- data[neworder,] unique.Y <- unique(Y) N <- length(Y) NU <- length(unique.Y) # }}} # {{{ splitMethod splitMethod <- resolvesplitMethod(splitMethod=splitMethod,B=B,N=N,M=M) if (splitMethod$internal.name %in% c("Boot632plus")) stop(".632+ method not implemented for c-index.") B <- splitMethod$B ResampleIndex <- splitMethod$index k <- splitMethod$k do.resample <- !(is.null(ResampleIndex)) if (keep.matrix==TRUE & !do.resample){ warning("Argument keep.matrix set to FALSE, since no resampling/crossvalidation is requested.") keep.matrix <- FALSE } # }}} # {{{ define the prediction time(s) and the evaluation time(s) maxtime <- unique.Y[NU] if (missing(eval.times)){ ## eval.times <- max(Y) ## maybe less efficient eval.times <- max(Y[status==1]) } else{ if (any(is.infinite(eval.times))) stop("Infinite eval.times are not allowed.") tooLate <- sum(eval.times>maxtime) if (tooLate>0){ if (verbose) warning(tooLate," eval.times beyond the maximal evaluation time: ",ifelse(maxtime>1,round(maxtime,1),round(maxtime,3))) ## eval.times <- c(eval.times[eval.times0){ limit.i <- pmax(ipcw.limit,limit.i) limit.times <- pmax(ipcw.limit,limit.times) } weight.i <- pmax(weight.i,limit.i) if (is.null(dim(weight.j))){ weight.j <- pmax(weight.j,limit.times) } else{ weight.j <- t(apply(weight.j,1,function(wj){ pmax(limit.times,wj) })) } } weights <- list(weight.i=weight.i,weight.j=weight.j) # }}} # {{{ checking the models for compatibility with resampling if (do.resample){ cm <- checkModels(object=object,model.args=model.args,model.parms=model.parms,splitMethod=splitMethod$internal.name) model.args <- cm$model.args model.parms <- cm$model.parms } # }}} # {{{ -------------------Apparent or test sample cindex---------------------- AppCindexList <- lapply(1:NF,function(f){ fit <- object[[f]] extraArgs <- model.args[[f]] if (model.type=="competing.risks"){ pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=pred.times,cause=cause),extraArgs)) if (class(fit)[[1]]%in% c("prodlim","survfit") && is.null(dim(pred)) && length(pred)==length(pred.times)) pred <- rep(pred,N) if (class(fit)[[1]]%in% c("matrix","numeric")) pred <- pred[neworder,] if (length(pred.times)==1 && length(pred.times)1) CrossValErrMat <- kCV$CrossValErrMat } # }}} # {{{ ----------------------BootstrapCrossValidation---------------------- if (splitMethod$internal.name %in% c("Boot632plus","BootCv","Boot632")){ if (missing(testTimes)){ testTimes <- NULL } BootCv <- CindexBootstrapCrossValidation(object=object,data=data,Y=Y,status=status,event=event,eval.times=eval.times,pred.times=pred.times,cause=cause,weights=weights,ipcw.refit=ipcw.refit,ipcw.call=ipcw.call,splitMethod=splitMethod,multiSplitTest=multiSplitTest,testTimes=testTimes,confInt=confInt,confLevel=confLevel,getFromModel=model.parms,giveToModel=model.args,predictHandlerFun=predictHandlerFun,tiedPredictionsIn=tiedPredictionsIn,tiedOutcomeIn=tiedOutcomeIn,tiedMatchIn=tiedMatchIn,keepMatrix=keep.matrix,keepResiduals=keep.residuals,verbose=verbose,savePath=savePath,slaveseed=slaveseed) BootstrapCrossValCindex <- BootCv$BootstrapCrossValCindex Residuals <- BootCv$Residuals names(BootstrapCrossValCindex) <- names(object) if (multiSplitTest==TRUE){ comparisons <- allComparisons(names(object)) multiSplitTest <- list(B=B,M=M,N=N,testTimes=testTimes) multiSplitTest$Comparisons <- lapply(1:length(comparisons),function(cc){ if (length(testTimes)>0){ allPairwisePvaluesTimes <- do.call("rbind",lapply(BootCv$testedResid,function(b){ b$pValue[[cc]]})) out <- list(pValueTimes=apply(allPairwisePvaluesTimes,2,median)) if (keep.pvalues==TRUE){ out$allPairwisePvaluesTimes <- allPairwisePvaluesTimes } } else out <- NULL ## if (keep.pvalues==TRUE){ ## out$allPairwisePvaluesIBS <- allPairwisePvaluesIBS} out }) names(multiSplitTest$Comparisons) <- names(comparisons) class(multiSplitTest) <- "multiSplitTest" } if (keep.matrix==TRUE){ BootstrapCrossValCindexMat <- BootCv$BootstrapCrossValCindexMat names(BootstrapCrossValCindex) <- names(object) } } # }}} # {{{ Bootstrap .632 if (splitMethod$internal.name=="Boot632"){ B632Cindex <- lapply(1:NF,function(f){ .368 * AppCindex[[f]] + .632 * BootstrapCrossValCindex[[f]] }) names(B632Cindex) <- names(object) } # }}} # {{{ prepare output out <- switch(splitMethod$internal.name, "noPlan"=list("AppCindex"=AppCindex, "Pairs"=AppPairs, "Concordant"=AppConcordant), "Boot632"=list("AppCindex"=AppCindex, ## "Pairs"=AppPairs, ## "Concordant"=AppConcordant, "BootCvCindex"= BootstrapCrossValCindex, "Boot632Cindex"=B632Cindex), "BootCv"=list("AppCindex"=AppCindex, ## "Pairs"=AppPairs, ## "Concordant"=AppConcordant, "BootCvCindex"=BootstrapCrossValCindex ## "BootCvConcordant"=BootCvConcordant, ## "BootCvPairs"=BootCvPairs )) observed.maxtime <- sapply(out,function(x){ lapply(x,function(y){ eval.times[length(y)-sum(is.na(y))] }) }) minmaxtime <- min(unlist(observed.maxtime)) if (multiSplitTest==TRUE){ out <- c(out,list(multiSplitTest=multiSplitTest)) } if (keep.residuals==TRUE){ out <- c(out,list(Residuals=Residuals)) } if (keep.matrix==TRUE && splitMethod$internal.name!="noPlan"){ if (splitMethod$internal.name %in% c("crossval","loocv")){ if (B>1) out <- c(out,list("BootstrapCrossValCindexMat"=BootstrapCrossValCindexMat)) } else{ if (splitMethod$internal.name!="noinf") out <- c(out,list("BootstrapCrossValCindexMat"=BootstrapCrossValCindexMat)) } } if (!is.null(model.parms)) out <- c(out,list("ModelParameters"=BootCv$ModelParameters)) if (!keep.index) splitMethod$index <- NULL if(keep.models==TRUE){ outmodels <- object } else{ outmodels <- names(object) names(outmodels) <- names(object) } out <- c(out,list(call=theCall, time=eval.times, pred.time=pred.times, response=model.response(m), models=outmodels, splitMethod=splitMethod, weights=weights, cens.model=cens.model, minmaxtime=minmaxtime, maxtime=maxtime)) ## if (verbose==TRUE && do.resample==TRUE) cat("\n") # }}} class(out) <- "Cindex" out } pec/R/predictEventProb.coxboost.R0000644000176200001440000001330713571203267016475 0ustar liggesusers#' Formula interface for function \code{CoxBoost} of package \code{CoxBoost}. #' #' Formula interface for function \code{CoxBoost} of package \code{CoxBoost}. #' #' See \code{CoxBoost}. #' @aliases coxboost #' @param formula An event-history formula for competing risks of the #' form \code{Hist(time,status)~sex+age} where \code{status} defines #' competing events and right censored data. The code for right #' censored can be controlled with argument \code{cens.code}, see man #' page the function \code{\link{Hist}}. #' @param data A data.frame in which the variables of formula are #' defined. #' @param cv If \code{TRUE} perform cross-validation to optimize the #' parameter \code{stepno}. This calls the function \code{cv.CoxBoost} #' whose arguments are prefix controlled, that is \code{cv.K=7} sets #' the argument \code{K} of \code{cv.CoxBoost} to \code{7}. If #' \code{FALSE} use \code{stepno}. #' @param cause The cause of interest in competing risk models. #' @param penalty See \code{CoxBoost}. #' @param ... Arguments passed to either \code{CoxBoost} via #' \code{CoxBoost.arg} or to \code{cv.CoxBoost} via #' \code{cv.CoxBoost.arg}. #' @return See \code{CoxBoost}. #' @author Thomas Alexander Gerds \email{tag@@biostat.ku.dk} #' @seealso See \code{CoxBoost}. #' @references See \code{CoxBoost}. #' @keywords survival #' @export coxboost <- function(formula,data,cv=TRUE,cause=1,penalty,...){ call <- match.call(expand.dots=TRUE) formula.names <- try(all.names(formula),silent=TRUE) if (!(formula.names[2]=="Hist")) stop("The left hand side of formula look like this: Hist(time,event).") actual.terms <- terms(formula,data=data) formula <- eval(call$formula) response <- model.response(model.frame(formula,data)) Time <- as.numeric(response[,"time"]) if (attr(response,"model")=="competing.risks"){ ## adapt the event variable Event <- rep(2,NROW(response)) thisCause <- as.numeric(response[,"event"]==cause) Event[thisCause==1] <- 1 Status <- as.numeric(response[,"status"]) Event[Status==0] <- 0 } else{ ## survival Event <- as.numeric(response[,"status"]) } X <- model.matrix(actual.terms,data=data)[,-c(1),drop=FALSE]## remove intercept if (NCOL(X)<=1) stop("CoxBoost needs at least two covariates.") if (missing(penalty)) penalty <- sum(Event==1)*(9) cv.defaults=list(maxstepno=200,K=10,penalty=penalty) CoxBoost.defaults=list(stepno=100,penalty=penalty) args <- prodlim::SmartControl(call= list(...), keys=c("cv","CoxBoost"), ignore=c("formula","data","cv","cause"), forced=list("cv"=list(time=Time,status=Event,x=X),"CoxBoost"=list(time=Time,status=Event,x=X)), defaults=list("cv"=cv.defaults,"CoxBoost"=CoxBoost.defaults), ignore.case=FALSE, replaceDefaults=FALSE, verbose=TRUE) if (cv==TRUE){ cv.step <- do.call("cv.CoxBoost",args$cv) args$CoxBoost$stepno <- cv.step$optimal.step } cb <- do.call("CoxBoost",args$CoxBoost) out <- list(coxboost=cb, stepno=args$CoxBoost$stepno, call=call, formula=formula, response=response) class(out) <- "coxboost" out } ##' @export predictSurvProb.coxboost <- function(object,newdata,times,...) { newcova <- model.matrix(terms(object$formula,data=newdata), data=model.frame(object$formula,data=newdata,na.action=na.fail))[,-c(1)] newcova <- newcova[,object$coxboost$xnames] p <- predict(object$coxboost,newcova,type="risk",times=times) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop("Prediction failed") p } ##' @export predictEventProb.coxboost <- function(object,newdata,times,cause,...){ if (missing(cause)) stop("missing cause") if (attr(object$response,"model")!="competing.risks") stop("Not a competing risk object") newcova <- model.matrix(terms(object$formula,data=newdata), data=model.frame(object$formula,data=newdata,na.action=na.fail))[,-c(1)] newcova <- newcova[,object$coxboost$xnames] p <- predict(object$coxboost,newdata=newcova,type="CIF",times=times) if (is.null(dim(p))) { if (length(p)!=length(times)) stop("Prediction failed (wrong number of times)") } else{ if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } p } ##' @export predictLifeYearsLost.coxboost <- function(object,newdata,times,cause,...){ if (missing(cause)) stop("missing cause") ## if (cause!=1) stop("CoxBoost can only predict cause 1") if (attr(object$response,"model")!="competing.risks") stop("Not a competing risk object") newcova <- model.matrix(terms(object$formula,data=newdata), data=model.frame(object$formula,data=newdata))[,-c(1)] time.interest <- sort(unique(object$coxboost$time)) cif <- predict(object$coxboost,newdata=newcova,type="CIF",times=time.interest) pos <- prodlim::sindex(jump.times=time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, object$time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } pec/R/plot.calibrationPlot.R0000644000176200001440000001377613571203267015474 0ustar liggesusers### plot.calibrationPlot.R --- #---------------------------------------------------------------------- ## author: Thomas Alexander Gerds ## created: Sep 28 2015 (17:32) ## Version: ## last-updated: May 6 2017 (19:34) ## By: Thomas Alexander Gerds ## Update #: 142 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: ##' Calibration plots ##' ##' @title Plot objects obtained with \code{calPlot} ##' @param x Object obtained with \code{calPlot} ##' @param ... Not used. ##' @return Nothing ##' @seealso \code{calPlot} ##' @export ##' @author Thomas A. Gerds plot.calibrationPlot <- function(x,...){ # {{{ plot an empty frame plotFrames <- x$plotFrames control <- x$control NF <- x$NF if (x$add==FALSE && !x$bars){ do.call("plot",control$plot) } if (x$diag && !x$bars){ segments(x0=0,y0=0,x1=1,y1=1,col="gray77",lwd=2,xpd=FALSE) } # }}} # {{{ show calibration showBars <- function(){ pf <- na.omit(plotFrames[[1]]) Pred <- pf$Pred Obs <- pf$Obs if (x$model.type=="survival" && x$type!="survival"){ Pred <- 1-Pred Obs <- 1-Obs } if(is.logical(x$legend[1]) && x$legend[1]==FALSE){ control$barplot$legend.text <- NULL }else{ if (is.null(control$barplot$legend.text)){ control$barplot$legend.text <- control$legend$legend } ## }else{ control$barplot$args.legend <- control$legend ## } } if (is.null(control$barplot$space)) control$barplot$space <- rep(c(1,0),length(Pred)) PredObs <- c(rbind(Pred,Obs)) control$barplot$height <- PredObs if (x$hanging){ control$barplot$offset <- c(rbind(0,Pred-Obs)) minval <- min(Pred-Obs) if (minval<0) negY.offset <- 0.05+seq(0,1,0.05)[prodlim::sindex(jump.times=seq(0,1,0.05),eval.times=abs(minval))] else negY.offset <- 0 control$barplot$ylim[1] <- min(control$barplot$ylim[1],-negY.offset) control$names$y <- control$names$y-negY.offset } coord <- do.call("barplot",control$barplot) if (length(x$names)>0 && (x$names[[1]]!=FALSE) && is.character(x$names)){ if (x$names[[1]]!=FALSE && length(x$names)==(length(coord)/2)){ mids <- rowMeans(matrix(coord,ncol=2,byrow=TRUE)) text(x=mids, ## x=coord, y=control$names$y, ## c(rbind(x$names,rbind(rep("",length(coord)/2)))), x$names, xpd=NA, cex=control$names$cex) } } ## if (x$legend) print(control$barplot$args.legend)n ## message(paste0("Bars are located at ",paste(coord,collapse=","))) if (x$hanging){ do.call("abline",control$abline) } if (x$showFrequencies){ if(x$hanging){ text(x=coord, cex=control$frequencies$cex, pos=3, y=(as.vector(rbind(Pred,Pred)) +rep(control$frequencies$offset,times=length(as.vector(coord))/2)), paste(round(100*c(rbind(Pred,Obs)),0),ifelse(control$frequencies$percent,"%",""),sep=""),xpd=NA) }else{ text(coord, pos=3, c(rbind(Pred,Obs))+control$frequencies$offset, cex=control$frequencies$cex, paste(round(100*c(rbind(Pred,Obs)),0),ifelse(control$frequencies$percent,"%",""),sep=""),xpd=NA) } } list(xcoord=coord[,1],ycoord=PredObs,offset=control$barplot$offset) } showCal <- function(f){ if (is.null(x$pseudo.col)){ ccrgb=as.list(col2rgb(x$col[f],alpha=TRUE)) names(ccrgb) <- c("red","green","blue","alpha") ccrgb$alpha <- x$jack.density jack.col <- do.call("rgb",c(ccrgb,list(max=255))) } else jack.col <- x$pseudo.col if (is.null(x$pseudo.pch)) x$pseudo.pch <- 1 if (x$showPseudo) { points(x$predictions[,f+1],x$predictions[,1],col=jack.col,pch=x$pseudo.pch) } pf <- x$plotFrames[[f]] if(NROW(pf)==1){ plottype <- "p" } else{ if (x$method=="quantile"){ plottype <- "b" } else{ plottype <- "l" } } pf <- na.omit(pf) if (x$model.type=="survival" && x$type!="survival"){ lines(1-pf$Pred,1-pf$Obs,col=x$col[f],lwd=x$lwd[f],lty=x$lty[f],type=plottype) } else lines(pf$Pred,pf$Obs,col=x$col[f],lwd=x$lwd[f],lty=x$lty[f],type=plottype) } if (x$bars) { stopifnot(NF==1) coords <- showBars() }else{ nix <- lapply(1:NF,function(f)showCal(f)) if (!(is.logical(x$legend[1]) && x$legend[1]==FALSE)){ do.call("legend",control$legend) } coords <- NULL } # }}} # {{{ axes if (x$axes){ if (x$percent){ control$axis2$labels <- paste(100*control$axis2$at,"%") control$axis1$labels <- paste(100*control$axis1$at,"%") } if (!x$bars) do.call("axis",control$axis1) ## mgp2 <- control$axis2$mgp ## if (length(mgp2)>0){ ## oldmgp <- par()$mgp ## par(mgp=mgp2) ## control$axis2 <- control$axis2[-match("mgp",names(control$axis2),nomatch=0)] ## title(ylab=x$ylab) ## } ## print(control$axis2) do.call("axis",control$axis2) ## if (length(mgp2)>0){ ## par(mgp=oldmgp) ## } } invisible(coords) # }}} } #---------------------------------------------------------------------- ### plot.calibrationPlot.R ends here pec/R/predictRestrictedMeanTime.R0000644000176200001440000005203114131004303016436 0ustar liggesusers# methods for survival regression # -------------------------------------------------------------------- #' Predicting restricted mean time #' #' Function to extract predicted mean times from various modeling #' approaches. #' #' The function predictRestrictedMeanTime is a generic function, meaning that it #' invokes a different function dependent on the 'class' of the #' first argument. #' #' See also \code{\link{predictSurvProb}}. #' #' @aliases predictRestrictedMeanTime predictRestrictedMeanTime.aalen #' predictRestrictedMeanTime.riskRegression predictRestrictedMeanTime.cox.aalen #' predictRestrictedMeanTime.coxph predictRestrictedMeanTime.cph predictRestrictedMeanTime.default #' predictRestrictedMeanTime.rfsrc predictRestrictedMeanTime.matrix predictRestrictedMeanTime.pecCtree #' predictRestrictedMeanTime.prodlim predictRestrictedMeanTime.psm #' predictRestrictedMeanTime.selectCox predictRestrictedMeanTime.survfit #' predictRestrictedMeanTime.pecRpart #' @usage #' \method{predictRestrictedMeanTime}{aalen}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{riskRegression}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{cox.aalen}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{cph}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{coxph}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{matrix}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{selectCox}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{prodlim}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{psm}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{survfit}(object,newdata,times,...) #' \method{predictRestrictedMeanTime}{pecRpart}(object,newdata,times,...) #' #' \method{predictRestrictedMeanTime}{pecCtree}(object,newdata,times,...) #' @param object A fitted model from which to extract predicted survival #' probabilities #' @param newdata A data frame containing predictor variable combinations for #' which to compute predicted survival probabilities. #' @param times A vector of times in the range of the response variable, e.g. #' times when the response is a survival object, at which to return the #' survival probabilities. #' @param \dots Additional arguments that are passed on to the current method. #' @return A matrix with as many rows as \code{NROW(newdata)} and as many #' columns as \code{length(times)}. Each entry should be a probability and in #' rows the values should be decreasing. #' @note In order to assess the predictive performance of a new survival model #' a specific \code{predictRestrictedMeanTime} S3 method has to be written. For examples, #' see the bodies of the existing methods. #' #' The performance of the assessment procedure, in particular for resampling #' where the model is repeatedly evaluated, will be improved by supressing in #' the call to the model all the computations that are not needed for #' probability prediction. For example, \code{se.fit=FALSE} can be set in the #' call to \code{cph}. #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{predict}},\code{\link{survfit}} #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival ##' @examples ##' ##' # generate some survival data ##' library(prodlim) ##' set.seed(100) ##' d <- SimSurv(100) ##' # then fit a Cox model ##' library(rms) ##' coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) ##' ##' # predicted survival probabilities can be extracted ##' # at selected time-points: ##' ttt <- quantile(d$time) ##' # for selected predictor values: ##' ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) ##' # as follows ##' predictRestrictedMeanTime(coxmodel,newdata=ndat,times=ttt) ##' ##' # stratified cox model ##' sfit <- coxph(Surv(time,status)~strata(X1)+X2,data=d,x=TRUE,y=TRUE) ##' predictRestrictedMeanTime(sfit,newdata=d[1:3,],times=c(1,3,5,10)) ##' ##' ## simulate some learning and some validation data ##' learndat <- SimSurv(100) ##' valdat <- SimSurv(100) ##' ## use the learning data to fit a Cox model ##' library(survival) ##' fitCox <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) ##' ## suppose we want to predict the survival probabilities for all patients ##' ## in the validation data at the following time points: ##' ## 0, 12, 24, 36, 48, 60 ##' psurv <- predictRestrictedMeanTime(fitCox,newdata=valdat,times=seq(0,60,12)) ##' ## This is a matrix with survival probabilities ##' ## one column for each of the 5 time points ##' ## one row for each validation set individual ##' ##' # the same can be done e.g. for a randomSurvivalForest model ##' library(randomForestSRC) ##' rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) ##' predictRestrictedMeanTime(rsfmodel,newdata=ndat,times=ttt) #' @export predictRestrictedMeanTime <- function(object,newdata,times,...){ UseMethod("predictRestrictedMeanTime",object) } ##' @export predictRestrictedMeanTime.default <- function(object,newdata,times,...){ stop("No method for evaluating predicted probabilities from objects in class: ",class(object),call.=FALSE) } ##' @export predictRestrictedMeanTime.numeric <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata) || NCOL(object) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(object)," x ",NCOL(object),"\n\n",sep="")) object } ##' @export predictRestrictedMeanTime.matrix <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata) || NCOL(object) != length(times)){ stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(object)," x ",NCOL(object),"\n\n",sep="")) ## stop(paste("Prediction matrix has wrong dimensionss: ",NROW(object)," rows and ",NCOL(object)," columns.\n But requested are predicted probabilities for ",NROW(newdata), " subjects (rows) in newdata and ",NCOL(newdata)," time points (columns)",sep="")) } object } ##' @export predictRestrictedMeanTime.aalen <- function(object,newdata,times,...){ ## require(timereg) time.coef <- data.frame(object$cum) ntime <- nrow(time.coef) objecttime <- time.coef[,1,drop=TRUE] ntimevars <- ncol(time.coef)-2 covanames <- names(time.coef)[-(1:2)] notfound <- match(covanames,names(newdata),nomatch=0)==0 if (any(notfound)) stop("\nThe following predictor variables:\n\n", paste(covanames[notfound],collapse=","), "\n\nwere not found in newdata, which only provides the following variables:\n\n", paste(names(newdata),collapse=","), "\n\n") time.vars <- cbind(1,newdata[,names(time.coef)[-(1:2)],drop=FALSE]) nobs <- nrow(newdata) hazard <- .C("survest_cox_aalen", timehazard=double(ntime*nobs), as.double(unlist(time.coef[,-1])), as.double(unlist(time.vars)), as.integer(ntimevars+1), as.integer(nobs), as.integer(ntime),PACKAGE="pec")$timehazard hazard <- matrix(hazard,ncol=ntime,nrow=nobs,dimnames=list(1:nobs,paste("TP",1:ntime,sep=""))) surv <- pmin(exp(-hazard),1) if (missing(times)) times <- sort(unique(objecttime)) p <- surv[,prodlim::sindex(jump.times=objecttime,eval.times=times)] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.cox.aalen <- function(object,newdata,times,...){ # require(timereg) ## The time-constant effects first const <- c(object$gamma) names(const) <- substr(dimnames(object$gamma)[[1]],6,nchar(dimnames(object$gamma)[[1]])-1) constant.part <- t(newdata[,names(const)])*const constant.part <- exp(colSums(constant.part)) ## Then extract the time-varying effects time.coef <- data.frame(object$cum) ntime <- nrow(time.coef) objecttime <- time.coef[,1,drop=TRUE] ntimevars <- ncol(time.coef)-2 time.vars <- cbind(1,newdata[,names(time.coef)[-(1:2)],drop=FALSE]) nobs <- nrow(newdata) time.part <- .C("survest_cox_aalen",timehazard=double(ntime*nobs),as.double(unlist(time.coef[,-1])),as.double(unlist(time.vars)),as.integer(ntimevars+1),as.integer(nobs),as.integer(ntime),PACKAGE="pec")$timehazard time.part <- matrix(time.part,ncol=ntime,nrow=nobs) ## dimnames=list(1:nobs,paste("TP",1:ntime,sep=""))) surv <- pmin(exp(-time.part*constant.part),1) if (missing(times)) times <- sort(unique(objecttime)) p <- surv[,prodlim::sindex(objecttime,times)] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.pecRpart <- function(object,newdata,times,...){ newdata$rpartFactor <- factor(predict(object$rpart,newdata=newdata), levels=object$levels) p <- predictRestrictedMeanTime(object$survfit,newdata=newdata,times=times) p } ##' @export predictRestrictedMeanTime.coxph <- function(object,newdata,times,...){ if (is.null(y <- unclass(object$y)[,1])) stop("Need 'y=TRUE' in call of 'coxph'.") eTimes <- unique(sort(y)) pos <- prodlim::sindex(jump.times=eTimes,eval.times=times) surv <- predictSurvProb(object,newdata=newdata,times=eTimes) rmt <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(1,surv)[,pos.j,drop=FALSE] time.diff <- diff(c(0, eTimes)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if ((miss.time <- (length(times) - NCOL(rmt)))>0) rmt <- cbind(rmt,matrix(rep(NA,miss.time*NROW(rmt)),nrow=NROW(rmt))) if (NROW(rmt) != NROW(newdata) || NCOL(rmt) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ", NROW(newdata), " x ", length(times), "\nProvided prediction matrix: ", NROW(rmt), " x ", NCOL(rmt), "\n\n", sep="")) rmt } ##' @export predictRestrictedMeanTime.coxph.penal <- function(object,newdata,times,...){ ## require(survival) frailhistory <- object$history$'frailty(cluster)'$history frailVar <- frailhistory[NROW(frailhistory),1] ## survfit.object <- survival.survfit.coxph(object,newdata=newdata,se.fit=FALSE,conf.int=FALSE) linearPred <- predict(object,newdata=newdata,se.fit=FALSE,conf.int=FALSE) basehaz <- basehaz(object) bhTimes <- basehaz[,2] bhValues <- basehaz[,1] survPred <- do.call("rbind",lapply(1:NROW(newdata),function(i){ (1+frailVar*bhValues*exp(linearPred[i]))^{-1/frailVar} })) where <- prodlim::sindex(jump.times=bhTimes,eval.times=times) p <- cbind(1,survPred)[,where+1] if ((miss.time <- (length(times) - NCOL(p)))>0) p <- cbind(p,matrix(rep(NA,miss.time*NROW(p)),nrow=NROW(p))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.cph <- function(object,newdata,times,...){ if (!match("surv",names(object),nomatch=0)) stop("Argument missing: set surv=TRUE in the call to cph!") p <- rms::survest(object,times=times,newdata=newdata,se.fit=FALSE,what="survival")$surv if (is.null(dim(p))) p <- matrix(p,nrow=NROW(newdata)) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.selectCox <- function(object,newdata,times,...){ predictRestrictedMeanTime(object[[1]],newdata=newdata,times=times,...) } ##' @export predictRestrictedMeanTime.prodlim <- function(object,newdata,times,...){ ## require(prodlim) p <- predict(object=object, type="surv", newdata=newdata, times=times, mode="matrix", level.chaos=1) if (NROW(newdata)==1 && class(p)=="list"){ p <- unlist(p) } if (is.null(dim(p)) && NROW(newdata)>=1){ ## if the model has no covariates ## then all cases get the same prediction ## in this exceptional case we return a vector ## p[is.na(p)] <- 0 p <- as.vector(p) if (length(p)!=length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } else{ if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) ## stop("Prediction failed") } rownames(p) <- NULL p } predict.survfit <- function(object,newdata,times,bytimes=TRUE,fill="last",...){ if (length(class(object))!=1 || class(object)!="survfit" || object$typ !="right") stop("Predictions only available \nfor class 'survfit', possibly stratified Kaplan-Meier fits.\n For class 'cph' Cox models see survest.cph.") if (missing(newdata)) npat <- 1 else if (is.data.frame(newdata)) npat <- nrow(newdata) else stop("If argument `newdata' is supplied it must be a dataframe." ) ntimes <- length(times) sfit <- summary(object,times=times) if (is.na(fill)) Fill <- function(x,len){x[1:len]} else if (fill=="last") Fill <- function(x,len){ y <- x[1:len] y[is.na(y)] <- x[length(x)] y} else stop("Argument fill must be the string 'last' or NA.") if (is.null(object$strata)){ pp <- Fill(sfit$surv,ntimes) p <- matrix(rep(pp,npat), ncol=ifelse(bytimes,ntimes,npat), nrow=ifelse(bytimes,npat,ntimes), byrow=bytimes) } else{ covars <- attr(terms(eval.parent(object$call$formula)),"term.labels") if (!all(match(covars,names(newdata),nomatch=FALSE))) stop("Not all strata defining variables occur in newdata.") ## FIXME there are different ways to build strata levels ## how can we test which one was used??? stratdat <- newdata[,covars,drop=FALSE] names(stratdat) <- covars NewStratVerb <- survival::strata(stratdat) NewStrat <- interaction(stratdat,sep=" ") levs <- levels(sfit$strata) # print(levs) # print(levels(NewStrat)) # print(levels(NewStratVerb)) if (!all(choose <- match(NewStratVerb,levs,nomatch=F)) && !all(choose <- match(NewStrat,levs,nomatch=F))) stop("Not all strata levels in newdata occur in fit.") survlist <- split(sfit$surv,sfit$strata) pp <- lapply(survlist[choose],Fill,ntimes) p <- matrix(unlist(pp,use.names=FALSE), ncol=ifelse(bytimes,ntimes,npat), nrow=ifelse(bytimes,npat,ntimes), byrow=bytimes) } if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.survfit <- function(object,newdata,times,...){ p <- predict.survfit(object,newdata=newdata,times=times,bytimes=TRUE,fill="last") if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ## library randomSurvivalForest ## predictRestrictedMeanTime.rsf <- function(object,newdata,times,...){ ## p <- predict.rsf(object,newdata=newdata,times=times,bytimes=TRUE,fill="last") ## if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) ## stop("Prediction failed") ## p ## } ##' @export predictRestrictedMeanTime.psm <- function(object,newdata,times,...){ if (length(times)==1){ p <- rms::survest(object,times=c(0,times),newdata=newdata,what="survival",conf.int=FALSE)[,2] }else{ p <- rms::survest(object,times=times,newdata=newdata,what="survival",conf.int=FALSE) } if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.riskRegression <- function(object,newdata,times,...){ if (missing(times))stop("Argument times is missing") temp <- predict(object,newdata=newdata) pos <- prodlim::sindex(jump.times=temp$time,eval.times=times) p <- cbind(1,1-temp$cuminc)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictRestrictedMeanTime.rfsrc <- function(object, newdata, times, ...){ ptemp <- predict(object,newdata=newdata,importance="none",...)$survival pos <- prodlim::sindex(jump.times=object$time.interest,eval.times=times) p <- cbind(1,ptemp)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } # methods for uncensored regression # -------------------------------------------------------------------- predictProb <- function(object,newdata,times,...){ UseMethod("predictProb",object) } ##' @export predictProb.glm <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) if (!(unclass(family(object))$family=="gaussian")) stop("Currently only gaussian family implemented for glm.") betax <- predict(object,newdata=newdata,se.fit=FALSE) ## print(betax[1:10]) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - betax,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictProb.ols <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) if (!(unclass(family(object))$family=="gaussian")) stop("Currently only gaussian family implemented.") betax <- predict(object,newdata=newdata,type="lp",se.fit=FALSE) ## print(betax[1:10]) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - betax,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictProb.randomForest <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) predMean <- predict(object,newdata=newdata,se.fit=FALSE) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - predMean,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } pec/R/predictLifeYearsLost.R0000644000176200001440000002071613571203267015461 0ustar liggesusers# methods for competing risk regression # -------------------------------------------------------------------- #' Predicting life years lost (cumulative cumulative incidences) in competing #' risk models. #' #' Function to extract predicted life years lost from various modeling #' approaches. The most prominent one is the combination of cause-specific Cox #' regression models which can be fitted with the function \code{cumincCox} #' from the package \code{compRisk}. #' #' The function predictLifeYearsLost is a generic function that means it #' invokes specifically designed functions depending on the 'class' of the #' first argument. #' #' See \code{\link{predictSurvProb}}. #' #' @aliases predictLifeYearsLost predictLifeYearsLost.CauseSpecificCox #' predictLifeYearsLost.riskRegression predictLifeYearsLost.FGR #' predictLifeYearsLost.prodlim predictLifeYearsLost.rfsrc #' @param object A fitted model from which to extract predicted event #' probabilities #' @param newdata A data frame containing predictor variable combinations for #' which to compute predicted event probabilities. #' @param times A vector of times in the range of the response variable, for #' which the cumulative incidences event probabilities are computed. #' @param cause Identifies the cause of interest among the competing events. #' @param \dots Additional arguments that are passed on to the current method. #' @return A matrix with as many rows as \code{NROW(newdata)} and as many #' columns as \code{length(times)}. Each entry should be a positive value and #' in rows the values should be increasing. #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{predictSurvProb}}, \code{\link{predictEventProb}}. #' @keywords survival #' @examples #' #' library(pec) #' library(riskRegression) #' library(survival) #' library(prodlim) #' train <- SimCompRisk(100) #' test <- SimCompRisk(10) #' fit <- CSC(Hist(time,cause)~X1+X2,data=train,cause=1) #' predictLifeYearsLost(fit,newdata=test,times=seq(1:10),cv=FALSE,cause=1) #' #' @export predictLifeYearsLost <- function(object,newdata,times,cause,...){ UseMethod("predictLifeYearsLost",object) } ##' @export predictLifeYearsLost.matrix <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata) || NCOL(object) != length(times)){ stop(paste("Life-years-lost matrix has wrong dimensions: ", NROW(object), " rows and ", NCOL(object), " columns.\n But requested are predicted probabilities for ", NROW(newdata), " subjects (rows) in newdata and ", length(times), " time points (columns)", sep="")) } object } ##' @export predictLifeYearsLost.prodlim <- function(object,newdata,times,cause,...){ ## require(prodlim) time.interest <- object$time cif <- predict(object=object,cause=cause,type="cuminc",newdata=newdata,times=time.interest,mode="matrix",level.chaos=1) ## if the model has no covariates ## then all cases get the same cif ## in this exceptional case we proceed a vector if (NROW(cif)==1 && NROW(newdata)>1) cif <- as.vector(cif) pos <- prodlim::sindex(jump.times=time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } ##' @export predictLifeYearsLost.FGR <- function(object,newdata,times,cause,...){ if (missing(times))stop("Argument times is missing") time.interest <- sort(unique(object$crrFit$uftime)) cif <- predict(object,newdata=newdata,times=time.interest) pos <- prodlim::sindex(jump.times=time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } ##' @export predictLifeYearsLost.riskRegression <- function(object,newdata,times,cause,...){ if (missing(times))stop("Argument times is missing") time.interest <- object$time cif <- predict(object,newdata=newdata,times=time.interest) pos <- prodlim::sindex(jump.times=time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } ##' @export predictLifeYearsLost.ARR <- function(object,newdata,times,cause,...){ if (missing(times))stop("Argument times is missing") time.interest <- object$time cif <- predict(object,newdata=newdata,times=time.interest) pos <- prodlim::sindex(jump.times=time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } ##' @export predictLifeYearsLost.CauseSpecificCox <- function (object, newdata, times, cause, ...) { N <- NROW(newdata) NC <- length(object$model) eTimes <- object$eventTimes if (missing(cause)) cause <- object$theCause causes <- object$causes stopifnot(match(as.character(cause),causes,nomatch=0)!=0) # predict cumulative cause specific hazards cumHaz1 <- -log(predictSurvProb(object$models[[paste("Cause",cause)]],times=eTimes,newdata=newdata)) if (length(eTimes)==1) Haz1 <- cumHaz1 else Haz1 <- t(apply(cbind(0,cumHaz1),1,diff)) cumHazOther <- lapply(causes[-match(cause,causes)],function(c){ -log(predictSurvProb(object$models[[paste("Cause",c)]],times=eTimes,newdata=newdata)) }) lagsurv <- exp(-cumHaz1 - Reduce("+",cumHazOther)) cif <- t(apply(lagsurv*Haz1,1,cumsum)) pos <- prodlim::sindex(jump.times=eTimes,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, eTimes)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } ##' @export predictLifeYearsLost.rfsrc <- function(object, newdata, times, cause, ...){ if (missing(cause)) stop("missing cause") cif <- predict(object,newdata=newdata,importance="none",...)$cif[,,cause,drop=TRUE] pos <- prodlim::sindex(jump.times=object$time.interest,eval.times=times) lyl <- matrix(unlist(lapply(1:length(pos), function(j) { pos.j <- 1:(pos[j]+1) p <- cbind(0,cif)[,pos.j,drop=FALSE] time.diff <- diff(c(0, object$time.interest)[pos.j]) apply(p, 1, function(x) {sum(x[-length(x)] * time.diff)}) })), ncol = length(pos)) if (NROW(lyl) != NROW(newdata) || NCOL(lyl) != length(times)) stop(paste("\nLYL matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(lyl)," x ",NCOL(lyl),"\n\n",sep="")) lyl } pec/R/plot.CiCindex.R0000755000176200001440000000016613571203267014024 0ustar liggesusersplot.CiCindex <- function(x,...){ X <- x$time M <- names(x$models) Y <- ConfInt.Cindex(x,times=X) plot(X,Y) } pec/R/predictEventProb.selectFGR.R0000644000176200001440000001226613754470360016461 0ustar liggesusers######################################### # Function 'predictEventProb.selectFGR' # ######################################### #Author: Rob C.M. van Kruijsdijk #Date original version: 24-02-2013 #Contributor: Thomas A. Gerds #Date previous version: 06-04-2013 #Date current version: 27-06-2014 #' Stepwise variable selection in the Fine & Gray regression competing risk #' model #' #' This is a wrapper function which first selects variables in the Fine & Gray #' regression model using \code{crrstep} from the \code{crrstep} package and #' then returns a fitted Fine & Gray regression model with the selected #' variables. #' #' #' @param formula A formula whose left hand side is a \code{Hist} #' object -- see \code{\link{Hist}}. The right hand side specifies (a #' linear combination of) the covariates. See examples below. #' @param data A data.frame in which all the variables of #' \code{formula} can be interpreted. #' @param cause The failure type of interest. Defaults to \code{1}. #' @param rule Rule to pass on to crrstep ("AIC", "BIC" or "BICcr"), #' also see \code{crrstep} #' @param direction see \code{crrstep} #' @param \dots Further arguments passed to \code{crrstep}. #' @author Rob C.M. van Kruijsdijk \email{R.C.M.vanKruijsdijk@@umcutrecht.nl} #' #' Thomas Alexander Gerds \email{tag@@biostat.ku.dk} #' @keywords survival ##' @examples ##' \dontrun{ ##' library(riskRegression) ##' library(prodlim) ##' library(lava) ##' if (!requireNamespace("cmprsk",quietly=TRUE)){ ##' library(cmprsk) ##' library(pec) ##' m <- crModel() ##' m <- addvar(m,c('X1','X2','X3','X4','X5','X6','X7','X8','X9','X10')) ##' distribution(m,c("X2","X7","X9")) <- binomial.lvm() ##' regression(m,eventtime1~X1+X2+X5+X9) <- c(-1,1,0.5,0.8) ##' set.seed(100) ##' d <- sim(m,100) ##' ## full formula ##' ff <- Hist(time, event) ~ X1 + X2 + X3 + X4 +X5 + X6 + X7+ X8 + X9 + X10 ##' ##' # Fit full model with FGR ##' fg <- FGR(ff,cause=1,data=d) ##' ##' # Backward selection based on the AIC ##' sfgAIC <- selectFGR(ff, data=d, rule="AIC", direction="backward") ##' ##' sfgAIC$fit # Final FGR-model with selected variables ##' ##' # Risk reclassification plot at time = 4 ##' plot(predictEventProb(fg,times=4,newdata=d), ##' predictEventProb(sfgAIC,times=4,newdata=d)) ##' ##' # Backward selection based on the BIC, while forcing ##' # the last two variables (X9 and X10) in the model ##' sfgBIC <- selectFGR(ff, data=d, rule="BIC", direction="backward", ##' scope.min=~X9+X10) ##' ##' ## apparent performance ##' pec(list(full.model=fg,selectedAIC=sfgAIC,selectedBIC=sfgBIC), ##' formula=Hist(time, event)~1, ##' data=d) ##' ##' ##' ## bootstrap cross-validation performance ##' set.seed(7) ##' pec(list(full.model=fg,selectedAIC=sfgAIC,selectedBIC=sfgBIC), ##' formula=Hist(time, event)~1, ##' data=d, ##' B=5, ##' splitMethod="bootcv") ##' } ##' } ##' ##' #' @export selectFGR selectFGR <- function(formula, data, cause=1, rule="AIC", direction="backward", ...){ if (!requireNamespace("riskRegression",quietly=TRUE)) stop("This function requires library riskRegression") if (!requireNamespace("crrstep",quietly=TRUE)) stop("This function requires library crrstep") if (missing(data)) stop("Argument 'data' is missing") if (missing(formula)) stop("Argument 'formula' is missing") call <- match.call() m <- match.call(expand.dots = FALSE) if (match("subset",names(call),nomatch=FALSE)) stop("Subsetting of data is not possible.") m <- model.frame(formula,data) response <- model.response(m) cens.code <- as.numeric(attr(response,"cens.code")) timevar <- colnames(response)[1] if (attr(response,"model")=="competing.risks"){ Event <- rep(2,NROW(response)) thisCause <- as.numeric(response[,"event"]==cause) Event[thisCause==1] <- 1 Status <- as.numeric(response[,"status"]) Event[Status==0] <- 0 } else{ stop("This does not look like a competing risk setting.\nMaybe there is only one event type in the data?") } class(response) <- "matrix" m <- cbind(response,m[-1]) crrstep.form <- as.formula(update(formula,paste(timevar,"~1+."))) capture.output(crrstep.fit <- do.call(crrstep::crrstep,list(formula=crrstep.form,data=m,etype=Event,failcode=cause,cencode=cens.code,trace = FALSE,...))) if (length(crrstep.fit$coefficients)==0){ newform <- as.formula(update(formula,.~1),env=NULL) newfit <- prodlim::prodlim(newform,data=data) } else{ newform <- as.formula(update(formula,paste(".~",paste(rownames(crrstep.fit$coefficients),collapse="+"))),env=NULL) ## newfit <- riskRegression::FGR(newform,data=data,cause=cause) newfit <- riskRegression::FGR(newform,data=data,cause=cause) newfit$call$formula <- newform } out <- list(fit=newfit,In=rownames(crrstep.fit$coefficients)) out$call <- match.call() out$call$formula <- eval(out$call$formula) class(out) <- "selectFGR" out } ##' @export predictEventProb.selectFGR <- function(object,newdata,times,...){ predictEventProb(object[[1]],newdata=newdata,times=times,...) } pec/R/selectCox.R0000644000176200001440000000326114131004304013266 0ustar liggesusers#' Backward variable selection in the Cox regression model #' #' This is a wrapper function which first selects variables in the Cox #' regression model using \code{fastbw} from the \code{rms} package and then #' returns a fitted Cox regression model with the selected variables. #' #' This function first calls \code{cph} then \code{fastbw} and finally #' \code{cph} again. #' #' @param formula A formula object with a \code{Surv} object on the left-hand #' side and all the variables on the right-hand side. #' @param data Name of an data frame containing all needed variables. #' @param rule The method for selecting variables. See \code{\link{fastbw}} for #' details. #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival #' @examples #' #' data(GBSG2) #' library(survival) #' f <- selectCox(Surv(time,cens)~horTh+age+menostat+tsize+tgrade+pnodes+progrec+estrec , #' data=GBSG2) #' #' @export selectCox <- function(formula,data,rule="aic"){ fit <- rms::cph(formula, data, surv=TRUE) bwfit <- rms::fastbw(fit,rule=rule) if (length(bwfit$names.kept)==0){ newform <- update(formula,".~1") newfit <- prodlim::prodlim(newform,data=data) } else{ newform <- update(formula,paste(".~",paste(bwfit$names.kept,collapse="+"))) ## reformulate(bwfit$names.kept, formula[[2]]) newfit <- rms::cph(newform,data, surv=TRUE) } out <- list(fit=newfit,In=bwfit$names.kept) out$call <- match.call() class(out) <- "selectCox" out } pec/R/summary.pec.R0000755000176200001440000000242313571203267013622 0ustar liggesusers##' @export summary.pec <- function(object, times, what, models, digits=3, print=TRUE, ...){ if (missing(models)) models <- names(object$models) if (missing(what) || is.null(what)){ what <- grep(c("Err$"),names(object),value=TRUE) } if (print==TRUE) cat("\nPrediction error curves\n\n") if (print==TRUE) print(object$splitMethod) otime <- object$time if (missing(times) && (length(times <- otime) > 20)){ warning("Missing times argument: prediction error curves evaluated at the quantiles of fitted times\n") times <- quantile(otime) } tindex <- prodlim::sindex(jump.times=object$time,eval.times=times) out <- lapply(what,function(w){ if (print==TRUE) cat("\n",w,"\n") tmp <- rbind(0, do.call("cbind",object[[w]][models]))[tindex+1,,drop=FALSE] tmp <- cbind(time=times,n.risk=c(object$n.risk[1],object$n.risk)[tindex+1],tmp) rownames(tmp) <- 1:NROW(tmp) if (print==TRUE) prmatrix(round(tmp,digits=digits),...) tmp }) names(out) <- what if (!is.null(object$multiSplitTest)) if (print==TRUE) print(object$multiSplitTest) if (print==TRUE) cat("\n") invisible(out) } pec/R/print.ConfScoreSurv.R0000755000176200001440000000047313571203267015256 0ustar liggesusers##' @export print.confScoreSurv <- function(x,...){ overall <- do.call("cbind",lapply(x$models,function(m){ colMeans(m$score) })) mm <- cbind(x$times,overall) colnames(mm) <- c("times",names(x$models)) rownames(mm) <- rep("",NROW(mm)) cat("\nPopulation average confidence score:\n\n") print(mm) } pec/R/print.Cindex.R0000755000176200001440000000014613571203267013724 0ustar liggesusers##' @export print.Cindex <- function(x,digits=3,what=NULL,times,...){ summary(x,print=TRUE,...) } pec/R/bootstrapCrossValidation.R0000755000176200001440000002506513571203266016427 0ustar liggesusersbootstrapCrossValidation <- function(object, data, Y, status, event, times, cause, ipcw, ipcw.refit=FALSE, ipcw.call, splitMethod, multiSplitTest, keepResiduals, testIBS, testTimes, newdata, confInt, confLevel, getFromModel, giveToModel, predictHandlerFun, keepMatrix, verbose, savePath,slaveseed){ # {{{ initializing B <- splitMethod$B N <- splitMethod$N M <- splitMethod$M NT <- length(times) NF <- length(object) ResampleIndex <- splitMethod$index # }}} step <- function(b,seed){ if (verbose==TRUE) internalTalk(b,B) # {{{ training and validation data vindex.b <- match(1:N,unique(ResampleIndex[,b]),nomatch=0)==0 val.b <- data[vindex.b,,drop=FALSE] train.b <- data[ResampleIndex[,b],,drop=FALSE] ## print(c(NROW(train.b), NROW(val.b))) NV=sum(vindex.b) # NROW(val.b) # }}} # {{{ IPCW if (ipcw.refit==TRUE){ ipcw.call.b <- ipcw.call ipcw.call.b$data <- val.b ipcw.call.b$subjectTimes <- Y[vindex.b] ipcw.b <- do.call("ipcw",ipcw.call.b) ipcwTimes.b <- ipcw.b$IPCW.times IPCW.subjectTimes.b <- ipcw.b$IPCW.subjectTimes } else{ IPCW.subjectTimes.b <- ipcw$IPCW.subjectTimes[vindex.b] if (ipcw$dim==1) ipcwTimes.b <- ipcw$IPCW.times[vindex.b,] else ipcwTimes.b <- ipcw$IPCW.times } # }}} # {{{ Building the models in training data if (!is.null(seed)) { set.seed(seed) ## if (verbose) message("seed:",seed) } trainModels <- lapply(1:NF,function(f){ fit.b <- internalReevalFit(object=object[[f]],data=train.b,step=b,silent=FALSE,verbose=verbose) ## this was a good idea to reduce the memory usage: ## fit.b$call <- object[[f]]$call ## fit.b$call <- NULL ## however, it does not work with the new version of the survival package ## in which the survfit.coxph function checks the response 'y' ## next try ## print("before") ## print(object.size(fit.b)) ## print("after") ## browser() ## fit.b$call$data <- substitute(train.b) ## print(object.size(fit.b)) fit.b }) # }}} # {{{ Saving the models? if (!is.null(savePath)){ nix <- lapply(1:NF,function(f){ fit.b <- trainModels[[f]] ## print(object.size(fit.b)) fit.b$formula <- NULL ## print(environment(fit.b$formula)) save(fit.b,file=paste(paste(savePath,"/",names(object)[f],"-bootstrap-",b,sep=""),".rda",sep="")) }) } # }}} # {{{ Extracting parameters? if (!is.null(getFromModel)){ ModelParameters <- lapply(1:NF,function(f){ getParms <- getFromModel[[f]] print(trainModels[[f]][getParms]) if (is.null(getParms)) trainModels[[f]][getParms] else NULL }) } # }}} # {{{ Check fits fitFailed <- lapply(trainModels,function(fit.b) (is.null(fit.b))) # }}} # {{{ Predicting the validation data predVal <- lapply(1:NF,function(f){ fit.b <- trainModels[[f]] extraArgs <- giveToModel[[f]] if (predictHandlerFun == "predictEventProb"){ try2predict <- try(pred.b <- do.call(predictHandlerFun, c(list(object=fit.b,newdata=val.b,times=times,cause=cause),extraArgs))) } else{ try2predict <- try(pred.b <- do.call(predictHandlerFun, c(list(object=fit.b,newdata=val.b,times=times),extraArgs))) } if (inherits(try2predict,"try-error")==TRUE){ if (verbose==TRUE) warning(paste("During bootstrapping: prediction for model ",class(fit.b)," failed in step ",b),immediate.=TRUE) NULL} else{ pred.b } }) # }}} # {{{ Compute prediction error curves for step b if (multiSplitTest==TRUE){ Residuals <- lapply(predVal,function(pred.b){ if (is.null(pred.b)) NA else{ if (predictHandlerFun == "predictEventProb"){ matrix(.C("pecResidualsCR", pec=double(NT), resid=double(NT*NV), as.double(Y[vindex.b]), as.double(status[vindex.b]), as.double(event[vindex.b]), as.double(times), as.double(pred.b), as.double(ipcwTimes.b), as.double(IPCW.subjectTimes.b), as.integer(NV), as.integer(NT), as.integer(ipcw$dim), as.integer(is.null(dim(pred.b))), NAOK=TRUE, PACKAGE="pec")$resid,ncol=NT,byrow=FALSE) } else{ matrix(.C("pecResiduals", pec=double(NT), resid=double(NT*NV), as.double(Y[vindex.b]), as.double(status[vindex.b]), as.double(times), as.double(pred.b), as.double(ipcwTimes.b), as.double(IPCW.subjectTimes.b), as.integer(NV), as.integer(NT), as.integer(ipcw$dim), as.integer(is.null(dim(pred.b))), NAOK=TRUE, PACKAGE="pec")$resid,ncol=NT,byrow=FALSE) } } }) names(Residuals) <- names(object) PredErrStepB=lapply(Residuals,function(x){colMeans(x)}) } else{ PredErrStepB <- lapply(predVal,function(pred.b){ if (is.null(pred.b)) NA else{ if (predictHandlerFun=="predictEventProb") pecOut <- .C("pecCR", pec=double(NT), as.double(Y[vindex.b]), as.double(status[vindex.b]), as.double(event[vindex.b]), as.double(times), as.double(pred.b), as.double(ipcwTimes.b), as.double(IPCW.subjectTimes.b), as.integer(NV), as.integer(NT), as.integer(ipcw$dim), as.integer(is.null(dim(pred.b))), NAOK=TRUE, PACKAGE="pec")$pec else pecOut <- .C("pecSRC",pec=double(NT),as.double(Y[vindex.b]),as.double(status[vindex.b]),as.double(times),as.double(pred.b),as.double(ipcwTimes.b),as.double(IPCW.subjectTimes.b),as.integer(NV),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred.b))),NAOK=TRUE,PACKAGE="pec")$pec } }) } # }}} # {{{ van de Wiel's test if (multiSplitTest==TRUE){ testedResid <- testResiduals(Residuals,times=times,testTimes=testTimes,rangeInt=testIBS,confInt=confInt,confLevel=confLevel) } # }}} # {{{ looping output if (multiSplitTest==TRUE) loopOut=list(PredErrStepB=PredErrStepB,testedResid=testedResid) else loopOut=list(PredErrStepB=PredErrStepB) if (keepResiduals==TRUE) loopOut=c(loopOut,list(Residuals=lapply(Residuals,function(R){ R[,prodlim::sindex(eval.times=testTimes,jump.times=times)] }))) if (!is.null(getFromModel)){ loopOut=c(loopOut,list(ModelParameters=ModelParameters)) } loopOut } b <- 1 ## if (require(foreach)){ if (missing(slaveseed)||is.null(slaveseed)) slaveseed <- sample(1:1000000,size=B,replace=FALSE) Looping <- foreach::foreach (b= 1:B) %dopar% step(b,slaveseed[[b]]) ## } ## else{ ## Looping <- lapply(1:B,function(b){step(b,seed=NULL)}) ## } # }}} # {{{ output ## ## ## 1. a list of NF matrices each with B (rows) and NT columns ## the prediction error curves ## if (verbose==TRUE && B>1) cat("\n") BootstrapCrossValErrMat <- lapply(1:NF,function(f){ ## matrix with NT columns and b rows do.call("rbind",lapply(Looping,function(b){ b$PredErrStepB[[f]] })) }) ## ## 2. a list of NF average out-of-bag prediction error curves ## with length NT ## BootstrapCrossValErr <- lapply(BootstrapCrossValErrMat,colMeans) ## function(x){ ## if (na.accept>0) colMeans(x,na.rm=sum(is.na(b))0){ splitMethodName <- "LeaveOneOutCV" k <- N-1 B <- 1 } else{ ## some form of bootstrap match.BootCv <- length(grep("boot|outofbag",splitMethod,value=FALSE,ignore.case=TRUE))>0 if (match.BootCv==FALSE){ splitMethod <- "noPlan" splitMethodName <- "no plan" } else{ match.632 <- length(grep("632",splitMethod,value=FALSE,ignore.case=TRUE))>0 match.plus <- length(grep("plus|\\+",splitMethod,value=FALSE,ignore.case=TRUE))>0 if (match.632==TRUE){ if (match.plus==TRUE){ splitMethod <- "Boot632plus" splitMethodName <- ".632+" } else{ splitMethod <- "Boot632" splitMethodName <- ".632" } } else{ splitMethod <- "BootCv" splitMethodName <- "BootCv"} } } } if (missing(M)) M <- N stopifnot(M>0 && M<=N) subsampling <- M!=N ## if (!subsampling && resampleTraining) ## stop("Resampling the training data is only available for subsampling") if (splitMethod %in% c("noPlan","none")) { B <- 0 ## resampleTraining <- FALSE } else{ if (missing(B)){ if (length(k)>0) B <- 1 # repeat k-fold CrossVal ones else B <- 100 # } else if (B==0) stop("No. of resamples must be a positive integer.") } if (length(k)>0){ if (splitMethod=="loocv") ResampleIndex <- data.frame(id=1:N) else ResampleIndex <- do.call("cbind",lapply(1:B,function(b){sample(rep(1:k,length.out=N))})) } else{ if (splitMethod %in% c("Boot632plus","BootCv","Boot632")){ ResampleIndex <- do.call("cbind",lapply(1:B,function(b){ sort(sample(1:N,size=M,replace=!subsampling)) })) colnames(ResampleIndex) <- paste("Train",1:B,sep=".") } else{ ResampleIndex <- NULL } } ## if (is.logical(resampleTraining)){ ## if (resampleTraining==TRUE) ## resampleTrainingSize <- N ## } ## else{ ## stopifnot(resampleTraining>0 &&resampleTraining==round(resampleTraining)) ## resampleTrainingSize <- resampleTraining ## resampleTraining <- TRUE ## } ## if (resampleTraining==TRUE){ ## if (subsampling==TRUE && resampleTrainingSize<=M) ## stop("Size for resampling the training indices should exceed ",M) ## ## if (subsampling==FALSE) ## ## stop("Resampling the training indices is only allowed for subsampling") ## ResampleIndex <- apply(ResampleIndex,2,function(x){ ## sort(c(x,sample(x,replace=TRUE,size=resampleTrainingSize-M))) ## }) ## } out <- list(name=splitMethodName, internal.name=splitMethod, index=ResampleIndex, k=k, B=B, M=M, N=N) ## resampleTraining=resampleTraining) class(out) <- "splitMethod" out } pec/R/summary.confScoreSurv.R0000755000176200001440000000225013571203267015652 0ustar liggesusers##' @export summary.confScoreSurv <- function(object, times, type=1, qScore=FALSE, ...){ if (type==1){ meanScore <- do.call("cbind",lapply(object$models,function(m){ colMeans(m$score) }))} else{ meanScore <- do.call("cbind",lapply(object$models,function(m){ 1-sqrt(colMeans((1-m$score)^2)) }))} if (qScore==TRUE){ qScore <- do.call("cbind",lapply(1:length(object$models),function(m){ qq <- t(apply(object$models[[m]]$score,2,quantile,c(0.5,.25,.75,0,1))) colnames(qq) <- paste(names(object$models)[m],".",c("median","iqrLow","iqrUp","min","max"),"Score",sep="") qq })) } mm <- data.frame(times=object$times, meanScore=meanScore) if (!missing(times)){ mm <- rbind(0,mm)[1+prodlim::sindex(jump.times=object$times,eval.times=times),,drop=FALSE] if (qScore==TRUE) qScore <- rbind(0,qScore)[1+prodlim::sindex(jump.times=object$times,eval.times=times),,drop=FALSE] } if (qScore==TRUE) mm <- cbind(mm,qScore) ## rownames(mm) <- rep("",NROW(mm)) mm } pec/R/predictSurvProb.cforest.R0000755000176200001440000000341314131004307016143 0ustar liggesusers# CFOREST # -------------------------------------------------------------------- #' S3-wrapper function for cforest from the party package #' #' S3-wrapper function for cforest from the party package #' #' See \code{cforest} of the \code{party} package. #' #' @param formula Passed on as is. See \code{cforest} of the \code{party} package #' @param data Passed on as is. See \code{cforest} of the \code{party} package #' @param ... Passed on as they are. See \code{cforest} of the \code{party} package #' @return list with two elements: cforest and call #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival #' @export pecCforest pecCforest <- function(formula,data,...){ if (!(requireNamespace("party",quietly=TRUE))) stop("Need package party for this, but is not available here.") out <- list(forest=party::cforest(formula,data,...)) class(out) <- "pecCforest" out$call <- match.call() out } ##' @export predictSurvProb.pecCforest <- function (object, newdata, times, ...) { if (!(requireNamespace("party",quietly=TRUE))) stop("Need package party for this, but is not available here.") survObj <- party::treeresponse(object$forest,newdata=newdata) p <- do.call("rbind",lapply(survObj,function(x){ predictSurvProb(x,newdata=newdata[1,,drop=FALSE],times=times) })) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } pec/R/ipcw.R0000755000176200001440000003742013754470062012330 0ustar liggesusers# {{{ roxy header #' Estimation of censoring probabilities #' #' This function is used internally by the function \code{pec} to obtain #' inverse of the probability of censoring weights. #' #' Inverse of the probability of censoring weights (IPCW) usually refer to the #' probabilities of not being censored at certain time points. These #' probabilities are also the values of the conditional survival function of #' the censoring time given covariates. The function ipcw estimates the #' conditional survival function of the censoring times and derives the #' weights. #' #' IMPORTANT: the data set should be ordered, \code{order(time,-status)} in #' order to get the values \code{IPCW.subjectTimes} in the right order for some #' choices of \code{method}. #' #' @aliases ipcw ipcw.none ipcw.marginal ipcw.nonpar ipcw.cox ipcw.aalen #' @param formula A survival formula like, \code{Surv(time,status)~1}, where #' as usual status=0 means censored. The status variable is internally #' reversed for estimation of censoring rather than survival #' probabilities. Some of the available models (see argument #' \code{model}) will use predictors on the right hand side of the #' formula. #' @param data The data used for fitting the censoring model #' @param method Censoring model used for estimation of the #' (conditional) censoring distribution. #' @param args A list of arguments which is passed to method #' @param times For \code{what="IPCW.times"} a vector of times at #' which to compute the probabilities of not being censored. #' @param subjectTimes For \code{what="IPCW.subjectTimes"} a vector of #' individual times at which the probabilities of not being censored #' are computed. #' @param subjectTimesLag If equal to \code{1} then obtain #' \code{G(T_i-|X_i)}, if equal to \code{0} estimate the conditional #' censoring distribution at the subjectTimes, #' i.e. (\code{G(T_i|X_i)}). #' @param what Decide about what to do: If equal to #' \code{"IPCW.times"} then weights are estimated at given #' \code{times}. If equal to \code{"IPCW.subjectTimes"} then weights #' are estimated at individual \code{subjectTimes}. If missing then #' produce both. #' @return \item{times}{The times at which weights are estimated} #' \item{IPCW.times}{Estimated weights at \code{times}} #' \item{IPCW.subjectTimes}{Estimated weights at individual time values #' \code{subjectTimes}} \item{fit}{The fitted censoring model} #' \item{method}{The method for modelling the censoring distribution} #' \item{call}{The call} #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{pec}} #' @keywords survival #' @examples #' #' library(prodlim) #' library(rms) #' dat=SimSurv(30) #' #' dat <- dat[order(dat$time),] #' #' # using the marginal Kaplan-Meier for the censoring times #' #' WKM=ipcw(Hist(time,status)~X2, #' data=dat, #' method="marginal", #' times=sort(unique(dat$time)), #' subjectTimes=dat$time) #' plot(WKM$fit) #' WKM$fit #' #' # using the Cox model for the censoring times given X2 #' library(survival) #' WCox=ipcw(Hist(time=time,event=status)~X2, #' data=dat, #' method="cox", #' times=sort(unique(dat$time)), #' subjectTimes=dat$time) #' WCox$fit #' #' plot(WKM$fit) #' lines(sort(unique(dat$time)), #' 1-WCox$IPCW.times[1,], #' type="l", #' col=2, #' lty=3, #' lwd=3) #' lines(sort(unique(dat$time)), #' 1-WCox$IPCW.times[5,], #' type="l", #' col=3, #' lty=3, #' lwd=3) #' #' # using the stratified Kaplan-Meier #' # for the censoring times given X2 #' #' WKM2=ipcw(Hist(time,status)~X2, #' data=dat, #' method="nonpar", #' times=sort(unique(dat$time)), #' subjectTimes=dat$time) #' plot(WKM2$fit,add=FALSE) #' #' # }}} # {{{ method ipcw #' @export ipcw <- function(formula, data, method, args, times, subjectTimes, subjectTimesLag=1, what){ if (!missing(what)) stopifnot(all(match(what,c("IPCW.times","IPCW.subjectTimes")))) if (missing(what) || match("IPCW.times",what,nomatch=FALSE)){ stopifnot(length(times)>0) } class(method) <- method UseMethod("ipcw",method) } # }}} # {{{ uncensored data: return just 1 ##' @export ipcw.none <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() # weigths at requested times if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- rep(1,length(times)) } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ IPCW.subjectTimes <- rep(1,length(subjectTimes)) } else IPCW.subjectTimes <- NULL out <- list(times=times,IPCW.times=IPCW.times,IPCW.subjectTimes=IPCW.subjectTimes,call=call,method=method) class(out) <- "IPCW" out } # }}} # {{{ reverse Random Survival Forests ##' @export ipcw.rfsrc <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (!(requireNamespace("randomForestSRC",quietly=TRUE))) stop("Need package randomForestSRC for this, but is not available here.") if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() ## needed for refit in crossvalidation loop EHF <- prodlim::EventHistory.frame(formula, data, specials=NULL, unspecialsDesign=FALSE) wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design)) ## wdata <- as.data.frame(EHF) wdata$status <- 1-wdata$status wform <- update(formula,"Surv(time,status)~.") ## require(randomForestSRC) stopifnot(NROW(na.omit(wdata))>0) if (missing(args) || is.null(args)) ## args <- list(bootstrap="none",ntree=1000,nodesize=NROW(data)/2) args <- list(ntree=1000) ## if (is.null(args$importance) & (args$importance!="none")) args$importance <- "none" fit <- do.call(randomForestSRC::rfsrc,c(list(wform,data=wdata),args)) ## print(fit) fit$call <- NULL # weigths at requested times # predicted survival probabilities for all training subjects are in object$survival # out-of-bag prediction in object$survival.oob if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- predictSurvProb(fit,newdata=wdata,times=times) } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ pmat <- fit$survival jtimes <- fit$time.interest IPCW.subjectTimes <- sapply(1:length(subjectTimes),function(i){ Ci <- subjectTimes[i] pos <- prodlim::sindex(jump.times=jtimes,eval.times=Ci,comp="smaller",strict=(subjectTimesLag==1)) c(1,pmat[i,])[1+pos] }) } else IPCW.subjectTimes <- NULL out <- list(times=times, IPCW.times=IPCW.times, IPCW.subjectTimes=IPCW.subjectTimes, fit=fit, call=call, method=method) ## browser() ## print(head(IPCW.subjectTimes)) class(out) <- "IPCW" out } ##' @export ipcw.forest <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (!(requireNamespace("randomForestSRC",quietly=TRUE))) stop("Need package randomForestSRC for this, but is not available here.") if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() ## needed for refit in crossvalidation loop EHF <- prodlim::EventHistory.frame(formula, data, specials=NULL, unspecialsDesign=FALSE) wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design)) ## wdata$status <- 1-wdata$status wform <- update(formula,"Surv(time,status)~.") ## require(randomForestSRC) stopifnot(NROW(na.omit(wdata))>0) if (missing(args) || is.null(args)) args <- list(ntree=1000) args$importance <- "none" fit <- do.call(randomForestSRC::rfsrc,c(list(wform,data=wdata),args)) ## print(fit) fit$call <- NULL # forest weights FW <- predict(fit,newdata=wdata,forest.wt=TRUE)$forest.wt # weigths at requested times # predicted survival probabilities for all training subjects are in object$survival # out-of-bag prediction in object$survival.oob if (match("IPCW.times",what,nomatch=FALSE)){ # reverse Kaplan-Meier with forest weigths IPCW.times <- apply(data,1,function(i){ predict(prodlim::prodlim(Hist(time,status)~1,data=wdata,reverse=TRUE,caseweights=FW[i,]),times=times) }) } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ IPCW.subjectTimes <- sapply(1:length(subjectTimes),function(i){ ## browser() prodlim::predictSurvIndividual(prodlim::prodlim(Hist(time,status)~1,data=wdata,reverse=TRUE,caseweights=FW[i,]),lag=1)[i] }) } else IPCW.subjectTimes <- NULL out <- list(times=times, IPCW.times=IPCW.times, IPCW.subjectTimes=IPCW.subjectTimes, fit=fit, call=call, method=method) class(out) <- "IPCW" out } # }}} # {{{ reverse Kaplan-Meier ##' @export ipcw.marginal <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() formula <- update.formula(formula,"~1") fit <- prodlim::prodlim(formula,data=data,reverse=TRUE) # weigths at requested times if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- predict(fit,newdata=data,times=times,level.chaos=1,mode="matrix",type="surv") } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ IPCW.subjectTimes <- prodlim::predictSurvIndividual(fit,lag=subjectTimesLag) } else IPCW.subjectTimes <- NULL out <- list(times=times, IPCW.times=IPCW.times, IPCW.subjectTimes=IPCW.subjectTimes, fit=fit, call=call, method=method) class(out) <- "IPCW" out ## locsubjectTimes <- match(subjectTimes,fit$time,nomatch=NA) ## if (any(is.na(locsubjectTimes))) stop("Can not locate all individual observation times" ) ## IPCW.subjectTimes <- c(1,fit$surv)[locsubjectTimes] ## at (subjectTimes_i-) ## IPCW.times <- c(1,fit$surv)[prodlim::sindex(jump.times=fit$time,eval.times=times) +1] ## at all requested times } # }}} # {{{ reverse Stone-Beran ##' @export ipcw.nonpar <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() fit <- prodlim::prodlim(formula,data=data,reverse=TRUE,bandwidth="smooth") # weigths at requested times if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- predict(fit,newdata=data,times=times,level.chaos=1,mode="matrix",type="surv") } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ IPCW.subjectTimes <- prodlim::predictSurvIndividual(fit,lag=subjectTimesLag) } else IPCW.subjectTimes <- NULL out <- list(times=times, IPCW.times=IPCW.times, IPCW.subjectTimes=IPCW.subjectTimes, fit=fit, call=call, method=method) class(out) <- "IPCW" out } # }}} # {{{ reverse Cox via Harrel's package ##' @export ipcw.cox <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ ## require(rms) if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() EHF <- prodlim::EventHistory.frame(formula, data, specials=c("strat"), stripSpecials=c("strat"), specialsDesign=FALSE, unspecialsDesign=FALSE) if (is.null(EHF$strat)) wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design)) else wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design,EHF$strat)) ## wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design)) wdata$status <- 1-wdata$status wform <- update(formula,"Surv(time,status)~.") stopifnot(NROW(na.omit(wdata))>0) if (missing(args) || is.null(args)) args <- list(x=TRUE,y=TRUE,eps=0.000001) args$surv <- TRUE fit <- do.call(rms::cph,c(list(wform,data=wdata),args)) ## fit <- rms::cph(wform,data=wdata,surv=TRUE,x=TRUE,y=TRUE) # weigths at requested times if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- rms::survest(fit,newdata=wdata,times=times,se.fit=FALSE)$surv } else IPCW.times <- NULL # weigths at subject specific event times if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ if (subjectTimesLag==1) IPCW.subjectTimes <- rms::survest(fit,times=subjectTimes-min(diff(c(0,unique(subjectTimes))))/2,what='parallel') else if (subjectTimesLag==0){ IPCW.subjectTimes <- rms::survest(fit,times=subjectTimes,what='parallel') } else stop("SubjectTimesLag must be 0 or 1") } else IPCW.subjectTimes <- NULL out <- list(times=times, IPCW.times=IPCW.times, IPCW.subjectTimes=IPCW.subjectTimes, fit=fit, call=call, method=method) class(out) <- "IPCW" out } # }}} # {{{ reverse Aalen method via the timereg package ##' @export ipcw.aalen <- function(formula,data,method,args,times,subjectTimes,subjectTimesLag,what){ if (missing(subjectTimesLag)) subjectTimesLag=1 if (missing(what)) what=c("IPCW.times","IPCW.subjectTimes") call <- match.call() EHF <- prodlim::EventHistory.frame(formula, data, specials=NULL, unspecialsDesign=FALSE) wdata <- data.frame(cbind(unclass(EHF$event.history),EHF$design)) ## wdata <- as.data.frame(EHF) wdata$status <- 1-wdata$status wform <- update(formula,"Surv(time,status)~.") stopifnot(NROW(na.omit(wdata))>0) fit <- do.call(timereg::aalen,list(formula=formula,data=wdata,n.sim=0)) fit$call <- NULL # weigths at requested times if (match("IPCW.times",what,nomatch=FALSE)){ IPCW.times <- predictSurvProb(fit,newdata=wdata,times=times) } else { IPCW.times <- NULL } if (match("IPCW.subjectTimes",what,nomatch=FALSE)){ if (subjectTimesLag==1) IPCW.subjectTimes <- diag(predictSurvProb(fit,newdata=data,times=pmax(0,subjectTimes-min(diff(unique(subjectTimes)))/2))) else if (subjectTimesLag==0) IPCW.subjectTimes <- diag(predictSurvProb(fit,newdata=data,times=subjectTimes)) else stop("SubjectTimesLag must be 0 or 1") } else IPCW.subjectTimes <- NULL out <- list(times=times,IPCW.times=IPCW.times,IPCW.subjectTimes=IPCW.subjectTimes,fit=fit,call=call,method=method) class(out) <- "IPCW" out } # }}} pec/R/plotPredictSurvProb.R0000644000176200001440000001770214131004271015341 0ustar liggesusers#' Plotting predicted survival curves. #' #' Ploting prediction survival curves for one prediction model using #' \code{predictSurvProb} . #' #' Arguments for the invoked functions \code{legend} and \code{axis} are simply #' specified as \code{legend.lty=2}. The specification is not case sensitive, #' thus \code{Legend.lty=2} or \code{LEGEND.lty=2} will have the same effect. #' The function \code{axis} is called twice, and arguments of the form #' \code{axis1.labels}, \code{axis1.at} are used for the time axis whereas #' \code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis. #' #' These arguments are processed via \code{\dots{}} of #' \code{plotPredictSurvProb} and inside by using the function #' \code{SmartControl}. #' #' @param x A survival prediction model including \code{call} and #' \code{formula} object. #' @param newdata A data frame with the same variable names as those that were #' used to fit the model \code{x}. #' @param times Vector of times at which to return the estimated probabilities. #' @param xlim Plotting range on the x-axis. #' @param ylim Plotting range on the y-axis. #' @param xlab Label given to the x-axis. #' @param ylab Label given to the y-axis. #' @param axes Logical. If \code{FALSE} no axes are drawn. #' @param col Vector of colors given to the survival curve. #' @param density Densitiy of the color -- useful for showing many #' (overlapping) curves. #' @param lty Vector of lty's given to the survival curve. #' @param lwd Vector of lwd's given to the survival curve. #' @param add Logical. If \code{TRUE} only lines are added to an existing #' device #' @param legend Logical. If TRUE a legend is plotted by calling the function #' legend. Optional arguments of the function \code{legend} can be given in #' the form \code{legend.x=val} where x is the name of the argument and val the #' desired value. See also Details. #' @param percent Logical. If \code{TRUE} the y-axis is labeled in percent. #' @param \dots Parameters that are filtered by \code{\link{SmartControl}} and #' then passed to the functions: \code{\link{plot}}, \code{\link{axis}}, #' \code{\link{legend}}. #' @return The (invisible) object. #' @author Ulla B. Mogensen \email{ulmo@@biostat.ku.dk}, Thomas A. Gerds #' \email{tag@@biostat.ku.dk} #' @seealso \code{\link{predictSurvProb}}\code{\link{prodlim}} #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival #' @examples #' #' # generate some survival data #' library(prodlim) #' d <- SimSurv(100) #' # then fit a Cox model #' library(rms) #' coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) #' # plot predicted survival probabilities for all time points #' ttt <- sort(unique(d$time)) #' # and for selected predictor values: #' ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) #' plotPredictSurvProb(coxmodel,newdata=ndat,times=ttt) #' #' # the same can be done e.g. for a randomSurvivalForest model #' library(randomForestSRC) #' rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) #' plotPredictSurvProb(rsfmodel,newdata=ndat,times=ttt) #' #' @export plotPredictSurvProb <- function(x, newdata, times, xlim, ylim, xlab, ylab, axes=TRUE, col, density, lty, lwd, add=FALSE, legend=TRUE, percent=FALSE, ...){ # {{{ call argument allArgs <- match.call() # }}} # {{{ find times if(missing(times)){ # formula formula <- eval(x$call$formula) if (match("formula",class(formula),nomatch=0)==0) stop("Argument formula is missing.") # find data data <- eval(x$call$data) # extract response m <- model.frame(formula,data,na.action=na.fail) response <- model.response(m) # ordering time neworder <- order(response[,"time"],-response[,"status"]) response <- response[neworder,,drop=FALSE] times <- response[,"time"] # unique event times times <- unique(times) } # }}} # {{{ newdata if(missing(newdata)){ newdata <- eval(x$call$data) } ## stop("newdata argument is missing") # }}} # {{{ xlim, ylim if (missing(xlim)) xlim <- c(0, max(times)) at <- times <= xlim[2] orig.X <- times[at] X <- times[at] # }}} # {{{ predict newdata at times y <- predictSurvProb(x, newdata=newdata, times=orig.X) # }}} # {{{ plot arguments nlines <- NROW(y) if (missing(ylab)) ylab <- "Survival probability" if (missing(xlab)) xlab <- "Time" if (missing(ylim)) ylim <- c(0, 1) if (missing(lwd)) lwd <- rep(3,nlines) if (missing(col)) col <- rep(1,nlines) if (missing(density)){ if (nlines>5){ density <- pmax(33,100-nlines) } else density <- 100 } if (density<100){ col <- sapply(col,function(coli){ ccrgb=as.list(col2rgb(coli,alpha=TRUE)) names(ccrgb) <- c("red","green","blue","alpha") ccrgb$alpha=density cc=do.call("rgb",c(ccrgb,list(max=255))) }) } if (missing(lty)) lty <- rep(1, nlines) if (length(lwd) < nlines) lwd <- rep(lwd, nlines) if (length(lty) < nlines) lty <- rep(lty, nlines) if (length(col) < nlines) col <- rep(col, nlines) axis1.DefaultArgs <- list() axis2.DefaultArgs <- list(at=seq(0,1,.25)) plot.DefaultArgs <- list(x=0, y=0, type = "n", ylim = ylim, xlim = xlim, xlab = xlab, ylab = ylab) legend.DefaultArgs <- list(legend=rownames(y), lwd=lwd, col=col, lty=lty, cex=1.5, bty="n", y.intersp=1.3, x="topright") # }}} # {{{ smart controls if (match("legend.args",names(args),nomatch=FALSE)){ legend.DefaultArgs <- c(args[[match("legend.args",names(args),nomatch=FALSE)]],legend.DefaultArgs) legend.DefaultArgs <- legend.DefaultArgs[!duplicated(names(legend.DefaultArgs))] } smartA <- prodlim::SmartControl(call=list(...), keys=c("plot","legend","axis1","axis2"), ignore=c("x", "newdata", "times", "xlim","ylim","xlab","ylab","col","lty","lwd","add","legend","percent","axes","legend.args"), defaults=list("plot"=plot.DefaultArgs, "legend"= legend.DefaultArgs, "axis1"=axis1.DefaultArgs, "axis2"=axis2.DefaultArgs), forced=list("plot"=list(axes=FALSE), "axis1"=list(side=1), "axis2"=list(side=2)), verbose=TRUE) # }}} # {{{ empty plot if (!add) { do.call("plot",smartA$plot) if (axes){ do.call("axis",smartA$axis1) if (percent & is.null(smartA$axis1$labels)) smartA$axis2$labels <- paste(100*smartA$axis2$at,"%") do.call("axis",smartA$axis2) } } # }}} # {{{ adding lines nix <- lapply(1:nlines, function(s) { lines(x = X, y = y[s,], type = "s", col = col[s], lty = lty[s], lwd = lwd[s]) }) # }}} # {{{ legend if(legend==TRUE && !add && !is.null(rownames(y))){ save.xpd <- par()$xpd do.call("legend",smartA$legend) par(xpd=save.xpd) } # }}} invisible(x) } pec/R/predictSurvProb.ctree.R0000755000176200001440000000273013754467730015630 0ustar liggesusers##' The call is added to an ctree object ##' ##' @title S3-Wrapper for ctree. ##' @param ... passed to ctree ##' @return list with two elements: ctree and call ##' @seealso pecCforest ##' @examples ##' if (requireNamespace("party",quietly=TRUE)){ ##' library(prodlim) ##' library(survival) ##' set.seed(50) ##' d <- SimSurv(50) ##' nd <- data.frame(X1=c(0,1,0),X2=c(-1,0,1)) ##' f <- pecCtree(Surv(time,status)~X1+X2,data=d) ##' predictSurvProb(f,newdata=nd,times=c(3,8)) ##' } ##' ##' @author Thomas A. Gerds ##' @export pecCtree <- function(...){ if (!(requireNamespace("party",quietly=TRUE))) stop("Need package party for this, but is not available here.") out <- list(ctree=party::ctree(...)) class(out) <- "pecCtree" out$call <- match.call() out } ##' @export predictSurvProb.pecCtree <- function (object, newdata, times, ...) { requireNamespace("party") N <- NROW(newdata) NT <- length(times) survObj <- party::treeresponse(object$ctree, newdata=newdata) p <- do.call("rbind", lapply(survObj,function(x){ predictSurvProb(x, newdata=newdata[1,,drop=FALSE], times=times) })) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } pec/R/marginal.prodlim.R0000755000176200001440000000063413571203267014620 0ustar liggesusersmarginal <- function(object){ UseMethod("marginal",object) } marginal.default <- function(object){ ff <- object$call$formula dd <- eval(object$call$data) fff <- update(ff,".~1") prodlim::prodlim(fff,data=dd) } marginal.prodlim <- function(object){ cc <- object$call ff <- cc$formula cc$formula <- update(ff,".~1") eval(cc) } marginal.formula <- function(object){ update(object,".~1") } pec/R/ConfScoreSurv.R0000755000176200001440000001076713571203266014131 0ustar liggesusersConfScoreSurv <- function(object, data, newdata, times, splitMethod="BootCv", B, M, verbose=TRUE){ NF <- length(object) NT <- length(times) N <- NROW(data) # {{{ Find splits ## require(pec) splitMethod <- resolvesplitMethod(splitMethod=splitMethod,B=B,N=NROW(data),M=M) ResampleIndex <- splitMethod$index # }}} # {{{ Who to predict if (missing(newdata)){ predTestSet <- TRUE NTEST <- NROW(data) stopifnot(M= xlim[1] b <- x$time <= xlim[2] at <- (a & b) X <- x$time[at] y <- do.call("cbind",x[[what]][models])[at,,drop=FALSE] if (length(y)==0) stop("No plotting values: check if x[[what]][models] is a list of numeric vectors.") uyps <- unlist(y) uyps <- uyps[!is.infinite(uyps)] max.y <- max(uyps,na.rm=T) ymax <- max(max.y,ylim[2]) if (max.y>ylim[2]) ylim <- if (what=="PredErr") c(0,ceiling(ymax*10)/10) else c(0,ceiling(max(unlist(y),na.rm=T)*10))/10 # }}} # {{{ Check for missings nfit <- ncol(y) if (missing(ylab)) ylab <- "Prediction error" if (missing(xlab)) xlab <- "Time" if (missing(col)) col <- 1:nfit if (missing(lty)) lty <- rep(1, nfit) if (missing(lwd)) lwd <- rep(2, nfit) if (length(col)< nfit) col <- rep(col, nfit) if (length(lty) < nfit) lty <- rep(lty, nfit) if (length(lwd) < nfit) lwd <- rep(lwd, nfit) if (missing(type)) if (!x$exact || smooth) type <- "l" else type <- "s" # }}} # {{{ creating arguments axis1.DefaultArgs <- list() axis2.DefaultArgs <- list() plot.DefaultArgs <- list(x=0, y=0, type = "n", ylim = ylim, xlim = xlim, xlab = xlab, ylab = ylab) special.DefaultArgs <- list(x=x, y=x[[what]], addprederr=NULL, models=models, bench=FALSE, benchcol=1, times=X, maxboot=NULL, bootcol="gray77", col=rep(1,4), lty=1:4, lwd=rep(2,4)) if (special) legend.DefaultArgs <- list(legend=NULL,lwd=NULL,col=NULL,lty=NULL,cex=1.5,bty="n",y.intersp=1,x=xlim[1],xjust=0,y=(ylim+.1*ylim)[2],yjust=1) else legend.DefaultArgs <- list(legend=if(is.numeric(models)) names(x$models)[models] else models, lwd=lwd, col=col, lty=lty, cex=1.5, bty="n", y.intersp=1, x=xlim[1], xjust=0, y=(ylim+.1*ylim)[2], yjust=1) # }}} # {{{ backward compatibility if (match("legend.args",names(args),nomatch=FALSE)){ legend.DefaultArgs <- c(args[[match("legend.args",names(args),nomatch=FALSE)]],legend.DefaultArgs) legend.DefaultArgs <- legend.DefaultArgs[!duplicated(names(legend.DefaultArgs))] } if (match("special.args",names(args),nomatch=FALSE)){ special.DefaultArgs <- c(args[[match("special.args",names(args),nomatch=FALSE)]],special.DefaultArgs) special.DefaultArgs <- special.DefaultArgs[!duplicated(names(special.DefaultArgs))] } smartA <- prodlim::SmartControl(call=list(...), keys=c("plot","special","legend","axis1","axis2"), defaults=list("plot"=plot.DefaultArgs, "special"=special.DefaultArgs, "legend"= legend.DefaultArgs, "axis1"=axis1.DefaultArgs, "axis2"=axis2.DefaultArgs), forced=list("plot"=list(axes=FALSE), "axis1"=list(side=1), "axis2"=list(side=2)), ignore.case=TRUE, ignore=c("what","who"), verbose=TRUE) # }}} # {{{ generating an empty plot if (!add) { do.call("plot",smartA$plot) if (axes){ do.call("axis",smartA$axis1) do.call("axis",smartA$axis2) } } # }}} # {{{ adding the lines if (special==TRUE){ if (!(x$splitMethod$internal.name=="Boot632plus"||x$splitMethod$internal.name=="Boot632")) stop("Plotting method 'special' requires prediction error method 'Boot632plus' or 'Boot632'") if (is.null(x$call$keep.matrix)) stop("Need keep.matrix") do.call("Special", smartA$special) } else{ nlines <- ncol(y) nix <- lapply(1:nlines, function(s) { lines(x = X, y = y[,s], type = type, col = col[s], lty = lty[s], lwd = lwd[s]) }) } if (add.refline) abline(h=.25,lty=3,lwd=2,col=1) # }}} # {{{ legend - crappy solution to legend to the option special (but works) if(legend==TRUE && !add && !is.null(names(x$models)[models])){ save.xpd <- par()$xpd par(xpd=TRUE) # Not very elegant solution but works: if (special==TRUE){ # nameModels if(is.numeric(models)) nameModels <- names(x$models)[smartA$special$models] else nameModels <- smartA$special$models #legend.legend: if (is.null(smartA$legend$legend)) if (smartA$special$bench == FALSE) smartA$legend$legend <- c(paste(x$method$internal.name,"-",nameModels), paste(smartA$special$addprederr, "-", nameModels)) else{ if (is.numeric(smartA$special$bench)) benchName <- names(x$models)[smartA$special$bench] else benchName <- smartA$special$bench if (is.null(smartA$special$addprederr)) smartA$legend$legend <- c(paste(x$splitMethod$internal.name,"-",c(benchName, nameModels))) else smartA$legend$legend <- c(paste(x$splitMethod$internal.name,"-",c(benchName, nameModels)), paste(smartA$special$addprederr, "-", nameModels)) } # legend.col if (is.null(smartA$legend$col)) if (smartA$special$bench == FALSE) smartA$legend$col <- smartA$special$col else smartA$legend$col <- c(smartA$special$benchcol,smartA$special$col) if (is.null(smartA$legend$lty)) if (smartA$special$bench == FALSE) smartA$legend$lty <- smartA$special$lty else smartA$legend$lty <- c(1,smartA$special$lty) #legend.lwd if (is.null(smartA$legend$lwd)) if (smartA$special$bench == FALSE) smartA$legend$lwd <- smartA$special$lwd else smartA$legend$lwd <- c(smartA$special$lwd[1],smartA$special$lwd) # run: do.call("legend",smartA$legend) } else do.call("legend",smartA$legend) par(xpd=save.xpd) } # }}} # {{{ returning invisible and close out invisible(x) } # }}} pec/R/plot.riskReclassification.R0000644000176200001440000000226713571203267016512 0ustar liggesusers### plot.riskReclassification.R --- #---------------------------------------------------------------------- ## author: Thomas Alexander Gerds ## created: Sep 24 2015 (19:26) ## Version: ## last-updated: Oct 3 2015 (16:26) ## By: Thomas Alexander Gerds ## Update #: 7 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: ##' @export plot.riskReclassification <- function(x,xlim=c(0,100),ylim=c(0,100),xlab,ylab,grid=TRUE,grid.col=gray(0.9),...){ if (missing(xlab)) xlab <- paste("Risk (%):",names(dimnames(x$reclassification))[[1]]) if (missing(ylab)) ylab <- paste("Risk (%):",names(dimnames(x$reclassification))[[2]]) plot(x$predictedRisk[[1]], x$predictedRisk[[2]], axes=FALSE, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, ...) axis(1,at=x$cuts) axis(2,at=x$cuts) if (grid==TRUE) abline(h = x$cuts, v = x$cuts, col = gray(0.9)) } #---------------------------------------------------------------------- ### plot.riskReclassification.R ends here pec/R/CindexKFoldCrossValidation.R0000644000176200001440000001657713571203266016551 0ustar liggesusersCindexKFoldCrossValidation <- function(object, data, Y, status, event, tindex, eval.times, pred.times, cause, weights, ipcw.refit=FALSE, ipcw.call, tiedPredictionsIn, tiedOutcomeIn, tiedMatchIn, splitMethod, multiSplitTest, keepResiduals, testTimes, confInt, confLevel, getFromModel, giveToModel, predictHandlerFun, keepMatrix, verbose, savePath,slaveseed){ # {{{ initializing B <- splitMethod$B N <- splitMethod$N k <- splitMethod$k NT <- length(eval.times) NF <- length(object) ResampleIndex <- splitMethod$index if (missing(giveToModel)) extraArgs=NULL # }}} CrossValErrMat <- lapply(1:B,function(b,extraArgs=extraArgs){ if (verbose==TRUE) internalTalk(b,B) groups <- ResampleIndex[,b,drop=TRUE] ## each subject belongs to exactly one group ## the prediction `p[i]' is obtained with the reduced data if (k==N-1) k <- N subjectPred <- lapply(1:k,function(g){ internalTalk(g,k) # {{{ training and validation data id <- groups==g train.k <- data[!id,,drop=FALSE] val.k <- data[id,,drop=FALSE] # }}} # {{{ Building the models in training data trainModels <- lapply(1:NF,function(f){ fit.k <- internalReevalFit(object=object[[f]],data=train.k,step=paste("CV group=",k),silent=FALSE,verbose=verbose) ## this was a good idea to reduce the memory usage: ## fit.k$call <- object[[f]]$call ## fit.k$call <- NULL ## however, it does not work with the new version of the survival package ## in which the survfit.coxph function checks the response 'y' ## fit.k$call$data <- substitute(train.k) fit.k }) # }}} # {{{ Predicting the validation data modelPred <- lapply(1:NF,function(f){ fit.k <- trainModels[[f]] extraArgs <- giveToModel[[f]] if (predictHandlerFun == "predictEventProb"){ p.group <- do.call(predictHandlerFun,c(list(object=fit.k,newdata=val.k,times=eval.times,cause=cause),extraArgs)) } else{ p.group <- do.call(predictHandlerFun,c(list(object=fit.k,newdata=val.k,times=eval.times),extraArgs)) } if(is.null(dim(p.group))) { p.group <- do.call("rbind",lapply(1:NROW(val.k),function(x){p.group})) } p.group }) # }}} modelPred }) ipcw.i <- weights$weight.i if (is.null(dim(weights$weight.j))){ ipcw.j <- weights$weight.j } else{ ipcw.j <- weights$weight.j } # {{{ Compute cindex for step b PredCindexStepB <- lapply(1:NF,function(f){ pred.b <- do.call("rbind",lapply(subjectPred,function(x)x[[f]])) if (splitMethod$internal.name!="loocv"){ pred.b <- pred.b[order(order(groups)),] } if (predictHandlerFun=="predictEventProb"){ Step.b.CindexResult <- .C("ccr", cindex=double(NT), concA=double(NT), pairsA=double(NT), concB=double(NT), pairsB=double(NT), as.integer(tindex), as.double(Y), as.integer(status), as.integer(event), as.double(eval.times), as.double(ipcw.i), as.double(ipcw.j), as.double(pred.b), as.integer(N), as.integer(NT), as.integer(tiedPredictionsIn), as.integer(tiedOutcomeIn), as.integer(tiedMatchIn), as.integer(!is.null(dim(ipcw.j))), NAOK=TRUE, PACKAGE="pec") Step.b.Cindex <- Step.b.CindexResult$cindex Step.b.PairsA <- Step.b.CindexResult$pairsA Step.b.ConcordantA <- Step.b.CindexResult$concA Step.b.PairsB <- Step.b.CindexResult$pairsB Step.b.ConcordantB <- Step.b.CindexResult$concB list(Cindex.b=Step.b.Cindex,Pairs.b=list(A=Step.b.PairsA,B=Step.b.PairsB),Concordant.b=list(A=Step.b.ConcordantA,B=Step.b.ConcordantB)) } else{ cindexOut <- .C("cindexSRC", cindex=double(NT), conc=double(NT), pairs=double(NT), as.integer(tindex), as.double(Y), as.integer(status), as.double(eval.times), as.double(ipcw.i), as.double(ipcw.j), as.double(pred.b), as.integer(N), as.integer(NT), as.integer(tiedPredictionsIn), as.integer(tiedOutcomeIn), as.integer(tiedMatchIn), as.integer(!is.null(dim(ipcw.j))), NAOK=TRUE, PACKAGE="pec") Cindex.b <- cindexOut$cindex Pairs.b <- cindexOut$pairs Concordant.b <- cindexOut$conc list(Cindex.b=Cindex.b,Pairs.b=Pairs.b,Concordant.b=Concordant.b) } }) names(PredCindexStepB) <- names(object) PredCindexStepB }) # }}} if (B>1){ CrossValErr <- lapply(1:NF,function(f){ rowMeans(do.call("cbind",lapply(CrossValErrMat,function(b)b[[f]]))) }) } else CrossValErr <- CrossValErrMat[[1]] out <- list(CrossValErr=CrossValErr) if (keepMatrix==TRUE && B>1) out$CrossValErrMat <- CrossValErrMat out } pec/R/print.R2.R0000644000176200001440000000030613571203267012770 0ustar liggesusers ##' @export print.R2 <- function(x,...){ cat("\nTime-dependent explained variation:\n\n 1- Brier(model)/Brier(reference)\n\nReference model: ",attr(x,"reference"),"\n\n") print.listof(x,...) } pec/R/plot.confScoreSurv.R0000755000176200001440000000466013571203267015142 0ustar liggesusers##' @export plot.confScoreSurv <- function(x, what="mean", xlim, ylim, col, lty, lwd, xlab="Time", ylab="Confidence score", legend.x="bottomright", ...){ M <- length(x$models) if (missing(col)) col=1:M if (missing(lty)) lty=1 if (missing(lwd)) lwd=1.5 if (length(lwd)==1) lwd <- rep(lwd,M) if (missing(xlim)) xlim=c(0,max(x$times)) if (missing(ylim)) ylim=c(min(sapply(x$models,function(m)min(m$score,na.rm=TRUE))),1) plot(0,0,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,...) sumCS <- summary(x) switch(what, "mean"={ meanScore <- sumCS[,grep("meanScore",names(sumCS))] loopModels <- lapply(1:M,function(m){ lines(x$times,meanScore[,m],lty=lty[m],col=col[m],lwd=lwd[m]) })}, "individual"={ loopModels <- lapply(1:M,function(m){ loopSubjects <- apply(x$models[[m]]$score,1,function(i){ lines(x$times,i,lty=lty[m],col=col[m],lwd=lwd[m]) })}) if (smooth==TRUE){ nix <- lapply(1:length(M),function(m){ smooth3 <- prodlim::meanNeighbors(x=x$models[[m]]$meanPred,y=x$models[[m]]$score,bandwidth=NULL) lines(averageY~uniqueX,data=smooth3,lty=lty[m],col=col[m],lwd=3) }) } legend(x=legend.x, xpd=NA, bty="n", names(x$models), lty=lty, col=col, pch=1) }, "quantiles"={ medScore <- sumCS[,grep("medianScore",names(sumCS))] lowScore <- sumCS[,grep("iqrLowScore",names(sumCS))] upScore <- sumCS[,grep("iqrUpScore",names(sumCS))] loopModels <- lapply(1:M,function(m){ lines(x$times,medScore[,m],lty=lty[m],col=col[m],lwd=lwd[m],lty=1) lines(x$times,lowScore[,m],lty=lty[m],col=col[m],lwd=lwd[m],lty=3) lines(x$times,upScore[,m],lty=lty[m],col=col[m],lwd=lwd[m],lty=3) })} ) legend(x=legend.x, y=1.05, xpd=NA, bty="n", names(x$models), col=col, lty=lty, lwd=lwd[1]) } pec/R/predictSurvProb.penfitS3.R0000755000176200001440000001126413571203267016211 0ustar liggesuserspenalizedS3 <- function(formula,data,...){ # {{{ distangle the formula ff <- as.character(formula) response <- formula(paste(ff[[2]],"~1",sep="")) terms <- strsplit(ff[[3]],"\\+|\\-")[[1]] terms <- sapply(terms,function(tt){## remove whitespace gsub(" ","",tt) }) strippedTerms <- strsplit(terms,"[()]") # }}} # {{{ extract the penalized and unpenalized parts penalTerms <- sapply(strippedTerms,function(x){length(x)==2 && x[[1]]=="pen"}) unpenalVarnames <- unlist(strippedTerms[penalTerms==FALSE]) if (length(unpenalVarnames)>0){ unpenalized <- formula(paste("~",paste(unpenalVarnames,collapse="+"))) response <- update.formula(response,unpenalized) } penalizedVarnames <- unlist(sapply(strippedTerms[penalTerms==TRUE], function(x){strsplit(x[[2]],",")}),use.names=FALSE) penalizedVarPositions <- unlist(lapply(penalizedVarnames,function(x){ if (length(splitter <- strsplit(x,":")[[1]])>1) seq(as.numeric(splitter)[1],as.numeric(splitter)[2],1) else match(x,names(data),nomatch=0) }),use.names=FALSE) penalizedVarPositions <- unique(penalizedVarPositions) ## print(penalizedVarPositions) if (any(tested <- (penalizedVarPositions>NCOL(data))|penalizedVarPositions<0)) stop("Cannot find variable(s): ",names(data[tested])) penalized <- data[,penalizedVarPositions] # }}} # {{{ call S4 method ## unpenalized terms are communicated via ## the left hand side of response fitS4 <- penalized(response=response, penalized=penalized, data=data, ...) # }}} # {{{ deliver S3 object fit <- list(fitS4=fitS4,call=match.call()) class(fit) <- "penfitS3" fit # }}} } penalizedOpt <- function(formula,data=data,...){ ## require(prodlim) argList <- prodlim::SmartControl(call=list(...), keys=c("profL1","optL1","penalized")) # {{{ distangle the formula ff <- as.character(formula) response <- formula(paste(ff[[2]],"~1",sep="")) terms <- strsplit(ff[[3]],"\\+|\\-")[[1]] terms <- sapply(terms,function(tt){## remove whitespace gsub(" ","",tt) }) strippedTerms <- strsplit(terms,"[()]") # }}} # {{{ extract the penalized and unpenalized parts penalTerms <- sapply(strippedTerms,function(x){length(x)==2 && x[[1]]=="pen"}) unpenalVarnames <- unlist(strippedTerms[penalTerms==FALSE]) if (length(unpenalVarnames)>0){ unpenalized <- formula(paste("~",paste(unpenalVarnames,collapse="+"))) response <- update.formula(response,unpenalized) } penalizedVarnames <- unlist(sapply(strippedTerms[penalTerms==TRUE], function(x){strsplit(x[[2]],",")}),use.names=FALSE) penalizedVarPositions <- unlist(lapply(penalizedVarnames,function(x){ if (length(splitter <- strsplit(x,":")[[1]])>1) seq(as.numeric(splitter)[1],as.numeric(splitter)[2],1) else match(x,names(data),nomatch=0) }),use.names=FALSE) penalizedVarPositions <- unique(penalizedVarPositions) ## print(penalizedVarPositions) if (any(tested <- (penalizedVarPositions>NCOL(data))|penalizedVarPositions<0)) stop("Cannot find variable(s): ",names(data[tested])) penalized <- data[,penalizedVarPositions] # }}} # {{{ determine optimal lambdas ## prof <- do.call("profL1", ## c(list(response=response,penalized=penalized,data=data), ## argList$profL1)) pen <- do.call("optL1", c(list(response=response,penalized=penalized,data=data), argList$optL1)) # }}} # {{{ call S4 method fitOpt <- do.call("penalized",c(list(response=response,penalized=penalized,data=data), list(lambda1=pen$lambda), argList$penalized)) # }}} # {{{ deliver S3 object fit <- list(fitS4=fitOpt,call=match.call()) class(fit) <- "penfitS3" fit # }}} } ##' @export predictSurvProb.penfitS3 <- function(object, newdata, times, ...){ penfit <- object$fit pCovaNames <- names(penfit@penalized) newPen <- newdata[,pCovaNames] ptemp <- predict(penfit,penalized=newPen,data=newdata) ## require(prodlim) pos <- prodlim::sindex(jump.times=ptemp@time,eval.times=times) ## Remark: currently it is possible, but theoretically ## not allowed to carry predictions forward beyond the ## last jump.time p <- cbind(1,ptemp@curves)[,c(pos+1)] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimension:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } pec/R/R2.R0000755000176200001440000000732313571203266011645 0ustar liggesusers#' Explained variation for survival models #' #' This function computes a time-dependent $R^2$ like measure of the variation #' explained by a survival prediction model, by dividing the mean squared error #' (Brier score) of the model by the mean squared error (Brier score) of a #' reference model which ignores all the covariates. #' #' #' In survival analysis the prediction error of the Kaplan-Meier estimator #' plays a similar role as the total sum of squares in linear regression. #' Hence, it is a sensible reference model for $R^2$. #' #' @param object An object with estimated prediction error curves obtained with #' the function \link{pec} #' @param models For which of the models in \code{object$models} should we #' compute $R^2(t). By default all models are used except for the reference #' model. #' @param what The name of the entry in \code{x} to be used. Defauls to #' \code{PredErr} Other choices are \code{AppErr}, \code{BootCvErr}, #' \code{Boot632}, \code{Boot632plus}. #' @param times Time points at which the summaries are shown. #' @param reference Position of the model whose prediction error is used as the #' reference in the denominator when constructing $R^2$ #' @return A matrix where the first column holds the times and the following #' columns are the corresponding $R^2$ values for the requested prediction #' models. #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{pec}} #' @references E. Graf et al. (1999), Assessment and comparison of prognostic #' classification schemes for survival data. Statistics in Medicine, vol 18, #' pp= 2529--2545. #' #' Gerds TA, Cai T & Schumacher M (2008) The performance of risk prediction #' models Biometrical Journal, 50(4), 457--479 #' @keywords survival #' @examples #' #' set.seed(18713) #' library(prodlim) #' library(survival) #' dat=SimSurv(100) #' nullmodel=prodlim(Hist(time,status)~1,data=dat) #' pmodel1=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE) #' pmodel2=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE) #' perror=pec(list(Cox1=pmodel1,Cox2=pmodel2),Hist(time,status)~1,data=dat,reference=TRUE) #' R2(perror,times=seq(0,1,.1),reference=1) #' #' @export R2 <- function(object, models, what, times, reference=1){ stopifnot(class(object)[1] == "pec") # {{{find the prediction models if (missing(models)) models <- (1:length(object$models))[-reference] else if (!is.numeric(models)) models <- match(models,names(object$models)) # }}} # {{{ what errors if (missing(what) || is.null(what)){ what <- grep(c("Err$"),names(object),value=TRUE) } # }}} # {{{ find the times object.times <- object$time if(missing(times)) times <- object$maxtime if (!(object$exact || length(object.times)>100)) warning("Only ", length(time)," time point",ifelse(length(times)==1,"","s")," used") # }}} # {{{ for each element of what: evaluate R2 at times out <- lapply(what,function(e){ if (is.null(object[[e]])) stop("No values for computing R^2") ref.error <- object[[e]][[reference]] out <- data.frame(do.call("cbind",lapply(1:length(models),function(w){ rr <- 1-object[[e]][[models[w]]]/ref.error rr[ref.error==0] <- 0 rr }))) names(out) <- names(object$models)[models] ## cat("R^2 based on the estimate stored in ",what,":\n\n") ## print(cbind(time=times,RR=rbind(0,out)[1+prodlim::sindex(object.times,times),,drop=FALSE])) cbind(time=times,RR=rbind(0,out)[1+prodlim::sindex(object.times,times),,drop=FALSE]) }) # }}} # {{{ prepare output NW <- length(what) NT <- length(times) names(out) <- what # }}} attr(out,"reference") <- names(object$models)[reference] class(out) <- "R2" out } pec/R/pec.R0000755000176200001440000012544514131004301012115 0ustar liggesusers#' Prediction error curves #' #' Evaluating the performance of risk prediction models in survival analysis. #' The Brier score is a weighted average of the squared distances between the #' observed survival status and the predicted survival probability of a model. #' Roughly the weights correspond to the probabilities of not being censored. #' The weights can be estimated depend on covariates. Prediction error curves #' are obtained when the Brier score is followed over time. Cross-validation #' based on bootstrap resampling or bootstrap subsampling can be applied to #' assess and compare the predictive power of various regression modelling #' strategies on the same set of data. #' #' Note that package riskRegression provides very similar #' functionality (and much more) but not yet every feature of pec. #' #' Missing data in the response or in the input matrix cause a failure. #' #' The status of the continuous response variable at cutpoints (\code{times}), #' ie status=1 if the response value exceeds the cutpoint and status=0 #' otherwise, is compared to predicted event status probabilities which are #' provided by the prediction models on the basis of covariates. The #' comparison is done with the Brier score: the quadratic difference between #' 0-1 response status and predicted probability. #' #' There are two different sources for bias when estimating prediction error in #' right censored survival problems: censoring and high flexibility of the #' prediction model. The first is controlled by inverse probability of #' censoring weighting, the second can be controlled by special Monte Carlo #' simulation. In each step, the resampling procedures reevaluate the #' prediction model. Technically this is done by replacing the argument #' \code{object$call$data} by the current subset or bootstrap sample of the #' full data. #' #' For each prediction model there must be a \code{predictSurvProb} method: for #' example, to assess a prediction model which evaluates to a \code{myclass} #' object one defines a function called \code{predictSurvProb.myclass} with #' arguments \code{object,newdata,cutpoints,...} #' #' Such a function takes the object and #' derives a matrix with predicted event status probabilities for each subject #' in newdata (rows) and each cutpoint (column) of the response variable that #' defines an event status. #' #' Currently, \code{predictSurvProb} methods are available for the following #' R-objects: \describe{ \item{}{\code{matrix}} \item{}{\code{aalen}, #' \code{cox.aalen} from \code{library(timereg)}} \item{}{\code{mfp} from #' \code{library(mfp)}} \item{}{\code{phnnet}, \code{survnnet} from #' \code{library(survnnet)}} \item{}{\code{rpart} (from \code{library(rpart)})} #' \item{}{\code{coxph}, \code{survfit} from \code{library(survival)}} #' \item{}{\code{cph}, \code{psm} from \code{library(rms)}} #' \item{}{\code{prodlim} from \code{library(prodlim)}} \item{}{\code{glm} from #' \code{library(stats)}} } #' #' @aliases pec #' @param object A named list of prediction models, where allowed entries are #' (1) R-objects for which a \link{predictSurvProb} method exists (see #' details), (2) a \code{call} that evaluates to such an R-object (see #' examples), (3) a matrix with predicted probabilities having as many rows as #' \code{data} and as many columns as \code{times}. For cross-validation all #' objects in this list must include their \code{call}. #' @param formula A survival formula as obtained either #' with \code{prodlim::Hist} or \code{survival::Surv}. #' The left hand side is used to find the status response variable in \code{data}. For right censored data, the right #' hand side of the formula is used to specify conditional censoring models. #' For example, set \code{Surv(time,status)~x1+x2} and \code{cens.model="cox"}. #' Then the weights are based on a Cox regression model for the censoring times #' with predictors x1 and x2. Note that the usual coding is assumed: #' \code{status=0} for censored times and that each variable name that appears #' in \code{formula} must be the column name in \code{data}. If there are no #' covariates, i.e. \code{formula=Surv(time,status)~1} the \code{cens.model} is #' coerced to \code{"marginal"} and the Kaplan-Meier estimator for the #' censoring times is used to calculate the weights. If \code{formula} is #' \code{missing}, try to extract a formula from the first element in object. #' @param data A data frame in which to validate the prediction models and to #' fit the censoring model. If \code{data} is missing, try to extract a data #' set from the first element in object. #' @param traindata A data frame in which the models are trained. This argument #' is used only in the absence of crossvalidation, in which case it is #' passed to the predictHandler function predictSurvProb #' @param times A vector of time points. At each time point the prediction #' error curves are estimated. If \code{exact==TRUE} the \code{times} are #' merged with all the unique values of the response variable. If \code{times} #' is missing and \code{exact==TRUE} all the unique values of the response #' variable are used. If missing and \code{exact==FALSE} use a equidistant #' grid of values between \code{start} and \code{maxtime}. The distance is #' determined by \code{exactness}. #' @param cause For competing risks, the event of interest. Defaults to the #' first state of the response, which is obtained by evaluating the left hand #' side of \code{formula} in \code{data}. #' @param start Minimal time for estimating the prediction error curves. If #' missing and \code{formula} defines a \code{Surv} or \code{Hist} object then #' \code{start} defaults to \code{0}, otherwise to the smallest observed value #' of the response variable. \code{start} is ignored if \code{times} are given. #' @param maxtime Maximal time for estimating the prediction error curves. If #' missing the largest value of the response variable is used. #' @param exact Logical. If \code{TRUE} estimate the prediction error curves at #' all the unique values of the response variable. If \code{times} are given #' and \code{exact=TRUE} then the \code{times} are merged with the unique #' values of the response variable. #' @param exactness An integer that determines how many equidistant gridpoints #' are used between \code{start} and \code{maxtime}. The default is 100. #' @param fillChar Symbol used to fill-in places where the values of the #' prediction error curves are not available. The default is \code{NA}. #' @param cens.model Method for estimating inverse probability of censoring #' weigths: #' #' \code{cox}: A semi-parametric Cox proportional hazard model is fitted to the #' censoring times #' #' \code{marginal}: The Kaplan-Meier estimator for the censoring times #' #' \code{nonpar}: Nonparametric extension of the Kaplan-Meier for the censoring #' times using symmetric nearest neighborhoods -- available for arbitrary many #' strata variables on the right hand side of argument \code{formula} but at #' most one continuous variable. See the documentation of the functions #' \code{prodlim} and \code{neighborhood} from the prodlim package. #' #' \code{aalen}: The nonparametric Aalen additive model fitted to the censoring #' times. Requires the \code{timereg} package. #' @param ipcw.refit If \code{TRUE} the inverse probability of censoring #' weigths are estimated separately in each training set during #' cross-validation. #' @param ipcw.args List of arguments passed to function specified by argument \code{cens.model}. #' @param splitMethod SplitMethod for estimating the prediction error curves. #' #' \code{none/noPlan}: Assess the models in the same data where they are #' fitted. \code{boot}: DEPRECIATED. #' #' \code{cvK}: K-fold cross-validation, i.e. \code{cv10} for 10-fold #' cross-validation. After splitting the data in K subsets, the prediction #' models (ie those specified in \code{object}) are evaluated on the data #' omitting the Kth subset (training step). The prediction error is estimated #' with the Kth subset (validation step). #' #' The random splitting is repeated \code{B} times and the estimated prediction #' error curves are obtained by averaging. #' #' \code{BootCv}: Bootstrap cross validation. The prediction models are trained #' on \code{B} bootstrap samples, that are either drawn with replacement of the #' same size as the original data or without replacement from \code{data} of #' the size \code{M}. The models are assessed in the observations that are NOT #' in the bootstrap sample. #' #' \code{Boot632}: Linear combination of AppErr and BootCvErr using the #' constant weight .632. #' #' \code{Boot632plus}: Linear combination of AppErr and BootCv using weights #' dependent on how the models perform in permuted data. #' #' \code{loocv}: Leave one out cross-validation. #' #' \code{NoInf}: Assess the models in permuted data. #' @param B Number of bootstrap samples. The default depends on argument #' \code{splitMethod}. When \code{splitMethod} in #' c("BootCv","Boot632","Boot632plus") the default is 100. For #' \code{splitMethod="cvK"} \code{B} is the number of cross-validation cycles, #' and -- default is 1. For \code{splitMethod="none"} \code{B} is the number #' of bootstrap simulations e.g. to obtain bootstrap confidence limits -- #' default is 0. #' @param M The size of the bootstrap samples for resampling without #' replacement. Ignored for resampling with replacement. #' @param reference Logical. If \code{TRUE} add the marginal Kaplan-Meier #' prediction model as a reference to the list of models. #' @param model.args List of extra arguments that can be passed to the #' \code{predictSurvProb} methods. The list must have an entry for each entry #' in \code{object}. #' @param model.parms Experimental. List of with exactly one entry for each #' entry in \code{object}. Each entry names parts of the value of the fitted #' models that should be extracted and added to the value. #' @param keep.index Logical. If \code{FALSE} remove the bootstrap or #' cross-validation index from the output list which otherwise is included in #' the splitMethod part of the output list. #' @param keep.matrix Logical. If \code{TRUE} add all \code{B} prediction error #' curves from bootstrapping or cross-validation to the output. #' @param keep.models Logical. If \code{TRUE} keep the models in object. Since #' fitted models can be large objects the default is \code{FALSE}. #' @param keep.residuals Logical. If \code{TRUE} keep the patient individual #' residuals at \code{testTimes}. #' @param keep.pvalues For \code{multiSplitTest}. If \code{TRUE} keep the #' pvalues from the single splits. #' @param noinf.permute If \code{TRUE} the noinformation error is approximated #' using permutation. #' @param multiSplitTest If \code{TRUE} the test proposed by van de Wiel et al. #' (2009) is applied. Requires subsampling bootstrap cross-validation, i.e. #' that \code{splitMethod} equals \code{bootcv} and that \code{M} is specified. #' @param testIBS A range of time points for testing differences between models #' in the integrated Brier scores. #' @param testTimes A vector of time points for testing differences between #' models in the time-point specific Brier scores. #' @param confInt Experimental. #' @param confLevel Experimental. #' @param verbose if \code{TRUE} report details of the progress, e.g. count the #' steps in cross-validation. #' @param savePath Place in your file system (i.e., a directory on your #' computer) where training models fitted during cross-validation are saved. If #' \code{missing} training models are not saved. #' @param slaveseed Vector of seeds, as long as \code{B}, to be given to the #' slaves in parallel computing. #' @param na.action Passed immediately to model.frame. Defaults to na.fail. If #' set otherwise most prediction models will not work. #' @param ... Not used. #' @return A \code{pec} object. See also the help pages of the corresponding #' \code{print}, \code{summary}, and \code{plot} methods. The object includes #' the following components: \item{PredErr}{ The estimated prediction error #' according to the \code{splitMethod}. A matrix where each column represents #' the estimated prediction error of a fit at the time points in time. } #' \item{AppErr}{ The training error or apparent error obtained when the #' model(s) are evaluated in the same data where they were trained. Only if #' \code{splitMethod} is one of "NoInf", "cvK", "BootCv", "Boot632" or #' "Boot632plus". } \item{BootCvErr}{ The prediction error when the model(s) #' are trained in the bootstrap sample and evaluated in the data that are not #' in the bootstrap sample. Only if \code{splitMethod} is one of "Boot632" or #' "Boot632plus". When \code{splitMethod="BootCv"} then the \code{BootCvErr} is #' stored in the component \code{PredErr}. } \item{NoInfErr}{ The prediction #' error when the model(s) are evaluated in the permuted data. Only if #' \code{splitMethod} is one of "BootCv", "Boot632", or "Boot632plus". For #' \code{splitMethod="NoInf"} the \code{NoInfErr} is stored in the component #' \code{PredErr}. } \item{weight}{ The weight used to linear combine the #' \code{AppErr} and the \code{BootCvErr} Only if \code{splitMethod} is one of #' "Boot632", or "Boot632plus". } \item{overfit}{ Estimated \code{overfit} of #' the model(s). See Efron \& Tibshirani (1997, Journal of the American #' Statistical Association) and Gerds \& Schumacher (2007, Biometrics). Only #' if \code{splitMethod} is one of "Boot632", or "Boot632plus". } #' \item{call}{The call that produced the object} \item{time}{The time points #' at which the prediction error curves change.} \item{ipcw.fit}{The fitted #' censoring model that was used for re-weighting the Brier score residuals. #' See Gerds \& Schumacher (2006, Biometrical Journal)} \item{n.risk}{The #' number of subjects at risk for all time points.} \item{models}{The #' prediction models fitted in their own data.} \item{cens.model}{The censoring #' models.} \item{maxtime}{The latest timepoint where the prediction error #' curves are estimated.} \item{start}{The earliest timepoint where the #' prediction error curves are estimated.} \item{exact}{\code{TRUE} if the #' prediction error curves are estimated at all unique values of the response #' in the full data.} \item{splitMethod}{The splitMethod used for estimation of #' the overfitting bias.} #' @author Thomas Alexander Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{plot.pec}}, \code{\link{summary.pec}}, #' \code{\link{R2}}, \code{\link{crps}} #' @references #' #' Gerds TA, Kattan MW. #' Medical Risk Prediction Models: With Ties to Machine Learning. #' Chapman & Hall/CRC #' https://www.routledge.com/9781138384477 #' #' Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' #' E. Graf et al. (1999), Assessment and comparison of prognostic #' classification schemes for survival data. Statistics in Medicine, vol 18, #' pp= 2529--2545. #' #' Efron, Tibshirani (1997) Journal of the American Statistical Association 92, #' 548--560 Improvement On Cross-Validation: The .632+ Bootstrap Method. #' #' Gerds, Schumacher (2006), Consistent estimation of the expected Brier score #' in general survival models with right-censored event times. Biometrical #' Journal, vol 48, 1029--1040. #' #' Thomas A. Gerds, Martin Schumacher (2007) Efron-Type Measures of Prediction #' Error for Survival Analysis Biometrics, 63(4), 1283--1287 #' doi:10.1111/j.1541-0420.2007.00832.x #' #' Martin Schumacher, Harald Binder, and Thomas Gerds. Assessment of survival #' prediction models based on microarray data. Bioinformatics, 23(14):1768-74, #' 2007. #' #' Mark A. van de Wiel, Johannes Berkhof, and Wessel N. van Wieringen Testing #' the prediction error difference between 2 predictors Biostatistics (2009) #' 10(3): 550-560 doi:10.1093/biostatistics/kxp011 #' @keywords survival #' @examples #' #' # simulate an artificial data frame #' # with survival response and two predictors #' #' set.seed(130971) #' library(prodlim) #' library(survival) #' dat <- SimSurv(100) #' #' # fit some candidate Cox models and compute the Kaplan-Meier estimate #' #' Models <- list("Cox.X1"=coxph(Surv(time,status)~X1,data=dat,x=TRUE,y=TRUE), #' "Cox.X2"=coxph(Surv(time,status)~X2,data=dat,x=TRUE,y=TRUE), #' "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=dat,x=TRUE,y=TRUE)) #' #' # compute the apparent prediction error #' PredError <- pec(object=Models, #' formula=Surv(time,status)~X1+X2, #' data=dat, #' exact=TRUE, #' cens.model="marginal", #' splitMethod="none", #' B=0, #' verbose=TRUE) #' #' print(PredError,times=seq(5,30,5)) #' summary(PredError) #' plot(PredError,xlim=c(0,30)) #' #' # Comparison of Weibull model and Cox model #' library(survival) #' library(rms) #' library(pec) #' data(pbc) #' pbc <- pbc[sample(1:NROW(pbc),size=100),] #' f1 <- psm(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc) #' f2 <- coxph(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc,x=TRUE,y=TRUE) #' f3 <- cph(Surv(time,status!=0)~edema+log(bili)+age+sex+albumin,data=pbc,surv=TRUE) #' brier <- pec(list("Weibull"=f1,"CoxPH"=f2,"CPH"=f3),data=pbc,formula=Surv(time,status!=0)~1) #' print(brier) #' plot(brier) #' #' # compute the .632+ estimate of the generalization error #' set.seed(130971) #' library(prodlim) #' library(survival) #' dat <- SimSurv(100) #' set.seed(17100) #' PredError.632plus <- pec(object=Models, #' formula=Surv(time,status)~X1+X2, #' data=dat, #' exact=TRUE, #' cens.model="marginal", #' splitMethod="Boot632plus", #' B=3, #' verbose=TRUE) #' #' print(PredError.632plus,times=seq(4,12,4)) #' summary(PredError.632plus) #' plot(PredError.632plus,xlim=c(0,30)) #' # do the same again but now in parallel #' \dontrun{set.seed(17100) #' # library(doMC) #' # registerDoMC() #' PredError.632plus <- pec(object=Models, #' formula=Surv(time,status)~X1+X2, #' data=dat, #' exact=TRUE, #' cens.model="marginal", #' splitMethod="Boot632plus", #' B=3, #' verbose=TRUE) #' } #' # assessing parametric survival models in learn/validation setting #' learndat <- SimSurv(50) #' testdat <- SimSurv(30) #' library(rms) #' f1 <- psm(Surv(time,status)~X1+X2,data=learndat) #' f2 <- psm(Surv(time,status)~X1,data=learndat) #' pf <- pec(list(f1,f2),formula=Surv(time,status)~1,data=testdat,maxtime=200) #' plot(pf) #' summary(pf) #' #' # ---------------- competing risks ----------------- #' #' library(survival) #' library(riskRegression) #' if(requireNamespace("cmprsk",quietly=TRUE)){ #' library(cmprsk) #' library(pec) #' cdat <- SimCompRisk(100) #' f1 <- CSC(Hist(time,event)~X1+X2,cause=2,data=cdat) #' f2 <- CSC(Hist(time,event)~X1,data=cdat,cause=2) #' f3 <- FGR(Hist(time,event)~X1+X2,cause=2,data=cdat) #' f4 <- FGR(Hist(time,event)~X1+X2,cause=2,data=cdat) #' p1 <- pec(list(f1,f2,f3,f4),formula=Hist(time,event)~1,data=cdat,cause=2) #' } #' #' @export # {{{ header pec.list pec <- function(object, formula, data, traindata, times, cause, ## time points start, maxtime, exact=TRUE, exactness=100, fillChar=NA, ## censoring weighting cens.model="cox", ipcw.refit=FALSE, ipcw.args=NULL, ## data splitting splitMethod="none", B, M, ## misc parameters reference=TRUE, model.args=NULL, model.parms=NULL, keep.index=FALSE, keep.matrix=FALSE, keep.models=FALSE, keep.residuals=FALSE, keep.pvalues=FALSE, noinf.permute=FALSE, multiSplitTest=FALSE, testIBS, testTimes, confInt=FALSE, confLevel=0.95, verbose=TRUE, savePath=NULL, slaveseed=NULL, na.action=na.fail, ...) { # }}} # {{{ checking integrity some arguments theCall=match.call() if (match("replan",names(theCall),nomatch=FALSE)) stop("The argument name 'replan' has been replaced by 'splitMethod'.") if (!missing(testIBS) && (!(is.logical(testIBS) || (length(testIBS)==2 && is.numeric(testIBS))))) stop("Argument testIBS can be TRUE/FALSE or a vector of two numeric values.") if (missing(testIBS)) testIBS <- FALSE if (keep.residuals && missing(testTimes)) stop("To keep.residuals please specify testTimes.") if (missing(splitMethod) && multiSplitTest==TRUE){ stop("Need data splitting to compute van de Wiel's test") } if (missing(M) && multiSplitTest) M <- NA # }}} # {{{ check and convert object if (class(object)[1]!="list") { object <- list(object) } # }}} # {{{ formula if (missing(formula)){ if (length(grep("~",as.character(object[[1]]$call$formula)))==0){ stop(paste("Argument formula is missing and first model has no usable formula:",as.character(object[[1]]$call$formula))) } else{ ftry <- try(formula <- eval(object[[1]]$call$formula),silent=TRUE) if ((class(ftry)[1]=="try-error") || match("formula",class(formula),nomatch=0)==0) stop("Argument formula is missing and first model has no usable formula.") else if (verbose) warning("Formula missing. Using formula from first model") } } formula.names <- try(all.names(formula),silent=TRUE) if (!(formula.names[1]=="~") || (match("$",formula.names,nomatch=0)+match("[",formula.names,nomatch=0)>0)){ stop("Invalid specification of formula.\n Could be that you forgot the right hand side:\n ~covariate1 + covariate2 + ...?\nNote that any subsetting, ie data$var or data[,\"var\"], is not supported by this function.") } else{ if (!(formula.names[2] %in% c("Surv","Hist"))) survp <- FALSE else survp <- TRUE } # }}} # {{{ data if (missing(data)){ data <- eval(object[[1]]$call$data) if (match("data.frame",class(data),nomatch=0)==0) stop("Argument data is missing.") else if (verbose) warning("Argument data is missing. I use the data from the call to the first model instead.") } # }}} # {{{ censoring model cens.model <- match.arg(cens.model,c("cox","marginal","nonpar","aalen","none","rfsrc")) # }}} # {{{ response histformula <- formula if (histformula[[2]][[1]]==as.name("Surv")){ histformula[[2]][[1]] <- as.name("Hist") } ## m <- model.frame(histformula,data,na.action=na.fail) m <- model.frame(histformula,data,na.action=na.action) response <- model.response(m) if (match("Surv",class(response),nomatch=0)!=0){ attr(response,"model") <- "survival" attr(response,"cens.type") <- "rightCensored" model.type <- "survival" } model.type <- attr(response,"model") if (model.type=="competing.risks"){ predictHandlerFun <- "predictEventProb" if (missing(cause)) cause <- attr(response,"state")[1] } else{ if (survp==FALSE && NCOL(response)!=1) stop("Response must be one-dimensional.") if (survp==TRUE && NCOL(response)!=2) stop("Survival response must have two columns: time and status.") predictHandlerFun <- "predictSurvProb" } # }}} # {{{ prediction models if (reference==TRUE) { ProdLimform <- as.formula(update(formula,".~NULL")) ## ProdLimfit <- do.call(prodlim::prodlim,list(formula=ProdLimform,data=data)) ProdLimfit <- prodlim::prodlim(formula=ProdLimform,data=data) ProdLimfit$call$data <- as.character(substitute(data)) ProdLimfit$call$formula=ProdLimform ProdLimfit$formula <- as.formula(ProdLimfit$formula) ## print(environment(ProdLimfit$formula)) ## if (model.type=="competing.risks") ## object <- c(list(Reference=ProdLimfit),object) ## else ## browser() object <- c(list("Reference"=ProdLimfit),object) } if (is.null(names(object))){ names(object) <- sapply(object,function(o)class(o)[1]) names(object) <- make.names(names(object),unique=TRUE) } else{ # fix missing names if (any(names(object)=="")){ names(object)[(names(object)=="")] <- sapply(object[(names(object)=="")],function(o)class(o)[1]) names(object) <- make.names(names(object),unique=TRUE) }else{ # leave names as they were given } } ## names(object) <- make.names(names(object),unique=TRUE) NF <- length(object) # }}} # {{{ sort the data if (survp){ neworder <- order(response[,"time"],-response[,"status"]) if (predictHandlerFun=="predictEventProb"){ event <- prodlim::getEvent(response,mode="character") event <- event[neworder] } response <- response[neworder,,drop=FALSE] Y <- response[,"time"] status <- response[,"status"] } else{ cens.model <- "none" neworder <- order(response) Y <- response[neworder] status <- rep(1,length(Y)) } ## for competing risks find the cause of interest. if (predictHandlerFun=="predictEventProb"){ availableCauses <- unique(event) if (!match(cause, availableCauses,nomatch=FALSE)) stop("Cause ",cause," is not among the available causes: ",paste(availableCauses,collapse=", ")) event <- event==cause } ## else{ ## event <- NULL ## } data <- data[neworder,] unique.Y <- unique(Y) N <- length(Y) NU <- length(unique.Y) # }}} # {{{ splitMethod splitMethod <- resolvesplitMethod(splitMethod=splitMethod,B=B,N=N,M=M) B <- splitMethod$B ResampleIndex <- splitMethod$index k <- splitMethod$k do.resample <- !(is.null(ResampleIndex)) if (keep.matrix==TRUE & !do.resample){ warning("Argument keep.matrix set to FALSE, since no resampling/crossvalidation is requested.") keep.matrix <- FALSE } # }}} # {{{ find maxtime, start, and jumptimes in the range of the response if (missing(maxtime) || is.null(maxtime)) maxtime <- unique.Y[NU] if (missing(start)) if (survp==TRUE) start <- 0 ## survival times are positive else start <- min(unique.Y) if (missing(times)){ if (exact==TRUE) times <- unique(c(start,unique.Y)) else times <- seq(start,maxtime,(maxtime - start)/exactness) } else{ if (exact==TRUE) times <- sort(c(start,unique(times),unique.Y)) else times <- sort(unique(c(start,times))) } times <- times[times<=maxtime] NT <- length(times) # }}} # {{{ IPCW (all equal to 1 without censoring) if((cens.model %in% c("aalen","cox","nonpar"))){ if (all(as.numeric(status)==1) || sum(status)==N){ if (verbose) message("No censored observations: cens.model coerced to \"none\".") cens.model <- "none" } if ((cens.model!="nonpar") && length(attr(terms(formula),"factors"))==0){ if (verbose==TRUE) message("No covariates specified: Kaplan-Meier for censoring times used for weighting.") cens.model <- "marginal"} } if (predictHandlerFun=="predictEventProb"){ iFormula <- as.formula(paste("Surv(itime,istatus)","~",as.character(formula)[[3]])) iData <- data iData$itime <- response[,"time"] iData$istatus <- response[,"status"] if (ipcw.refit==TRUE) stop("pec: internal refitting of censoring distribution not (not yet) supported for competing risks") ipcw.call <- NULL ipcw <- ipcw(formula=iFormula, data=iData, method=cens.model, args=ipcw.args, times=times, subjectTimes=Y, subjectTimesLag=1) ipcw$dim <- if (cens.model %in% c("marginal","none")) 0 else 1 } else{ if (ipcw.refit==TRUE && splitMethod$internal.name %in% c("Boot632plus","BootCv","Boot632")) ipcw.call <- list(formula=formula,data=NULL,method=cens.model,times=times,subjectTimes=NULL,subjectTimesLag=1) else ipcw.call <- NULL ipcw <- ipcw(formula=formula, data=data, method=cens.model, args=ipcw.args, times=times, subjectTimes=Y, subjectTimesLag=1) ipcw$dim <- if (cens.model %in% c("marginal","none")) 0 else 1 } ## force ipc weights not to exaggerate ## weights should not be greater than 1/(sample size) ## if (ipcw$dim==1){ ## ipcw$IPCW.times <- apply(ipcw$IPCW.times,1,function(x)pmax(x,1/N)) ## } ## else{ ## ipcw$IPCW.times <- pmax(ipcw$IPCW.times,1/N) ## } ## ipcw$IPCW.subjectTimes <- pmax(ipcw$IPCW.subjectTimes,1/N) ## browser() # wt <- ipcw$IPCW.times # wt.obs <- ipcw$IPCW.subjectTimes # if (NCOL(wt)>1) {stopifnot(length(wt)==(N*NT))} else{stopifnot(length(wt)==NT)} # }}} # {{{ checking the models for compatibility with resampling if (do.resample){ cm <- checkModels(object=object,model.args=model.args,model.parms=model.parms,splitMethod=splitMethod$internal.name) model.args <- cm$model.args model.parms <- cm$model.parms } # }}} # {{{ ---------------------------Apparent error--------------------------- AppErr <- lapply(1:NF,function(f){ ## message(f) fit <- object[[f]] extraArgs <- model.args[[f]] if (predictHandlerFun=="predictEventProb"){ # competing risks pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times,cause=cause),extraArgs)) if (class(fit)[[1]]%in% c("matrix","numeric")) pred <- pred[neworder,,drop=FALSE] .C("pecCR",pec=double(NT),as.double(Y),as.double(status),as.double(event),as.double(times),as.double(pred),as.double(ipcw$IPCW.times),as.double(ipcw$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred))),NAOK=TRUE,PACKAGE="pec")$pec } else{ # survival pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times),extraArgs)) if (class(fit)[[1]]%in% c("matrix","numeric")) pred <- pred[neworder,,drop=FALSE] ## u <- list(as.double(Y),as.double(status),as.double(times),as.double(pred),as.double(ipcw$IPCW.times),as.double(ipcw$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred)))) ## if (f==2) browser(skipCalls=1) .C("pecSRC",pec=double(NT),as.double(Y),as.double(status),as.double(times),as.double(pred),as.double(ipcw$IPCW.times),as.double(ipcw$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred))),NAOK=TRUE,PACKAGE="pec")$pec } }) names(AppErr) <- names(object) ## se.Apperr <- lapply(1:NF,function(f){ ## ## message(f) ## fit <- object[[f]] ## extraArgs <- model.args[[f]] ## if (predictHandlerFun=="predictEventProb"){ # competing risks ## pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times,cause=cause),extraArgs)) ## if (class(object[[f]])[[1]]%in% c("matrix","numeric")) pred <- pred[neworder,,drop=FALSE] ## Paulo(as.double(Y), ## as.double(status), ## as.double(event), ## as.double(times), ## as.double(pred), ## as.double(ipcw$IPCW.times), ## as.double(ipcw$IPCW.subjectTimes)) ## } ## else{ # survival ## pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times),extraArgs)) ## if (class(object[[f]])[[1]]%in% c("matrix","numeric")) pred <- pred[neworder,,drop=FALSE] ## Paulo(as.double(Y), ## as.double(status), ## as.double(times), ## as.double(pred), ## as.double(ipcw$IPCW.times), ## as.double(ipcw$IPCW.subjectTimes)) ## }}) # }}} # {{{------------------------No information error------------------------ if (splitMethod$internal.name %in% c("Boot632plus")){ if (verbose==TRUE){ message("Computing noinformation error using all permutations") } if (noinf.permute==FALSE){ NoInfErr <- lapply(1:NF,function(f){ fit <- object[[f]] extraArgs <- model.args[[f]] if (predictHandlerFun=="predictEventProb"){ # competing risks pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times,cause=cause),extraArgs)) } else{ # survival pred <- do.call(predictHandlerFun,c(list(object=fit,newdata=data,times=times),extraArgs)) } if (predictHandlerFun=="predictEventProb") .C("pec_noinfCR",pec=double(NT),as.double(Y),as.double(status),as.double(event),as.double(times),as.double(pred),as.double(ipcw$IPCW.times),as.double(ipcw$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred))),NAOK=TRUE,PACKAGE="pec")$pec else .C("pec_noinf",pec=double(NT),as.double(Y),as.double(status),as.double(times),as.double(pred),as.double(ipcw$IPCW.times),as.double(ipcw$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred))),NAOK=TRUE,PACKAGE="pec")$pec }) names(NoInfErr) <- names(object) }else{ if (verbose==TRUE){ message("Noinformation error simulation loop (B=",B,")") } ## FIXME: need to parallelize noinf NoInfErrList <- lapply(1:B,function(b){ if (verbose==TRUE){ internalTalk(b,B,sign=".") } responseNames <- colnames(response) noinf.b <- data[sample(1:NROW(data),replace=FALSE),-match(responseNames,names(data))] noinf.b[,responseNames] <- response ipcw.b <- ipcw(formula=formula,data=noinf.b,method=cens.model,args=ipcw.args,times=times,subjectTimes=Y,subjectTimesLag=1) noinfPredErr <- lapply(1:NF,function(f){ fit.b <- internalReevalFit(object=object[[f]],data=noinf.b,step=b,silent=FALSE,verbose=verbose) ## fit.b$call <- object[[f]]$call extraArgs <- model.args[[f]] pred.b <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=noinf.b,times=times),extraArgs)) if (predictHandlerFun=="predictEventProb"){ pred.b <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=noinf.b,times=times,cause=cause),extraArgs)) .C("pecCR",pec=double(NT),as.double(Y),as.double(status),as.double(event),as.double(times),as.double(pred.b),as.double(ipcw.b$IPCW.times),as.double(ipcw.b$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred.b))),NAOK=TRUE,PACKAGE="pec")$pec } else{ pred.b <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=noinf.b,times=times),extraArgs)) .C("pecSRC",pec=double(NT),as.double(Y),as.double(status),as.double(times),as.double(pred.b),as.double(ipcw.b$IPCW.times),as.double(ipcw.b$IPCW.subjectTimes),as.integer(N),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred.b))),NAOK=TRUE,PACKAGE="pec")$pec } }) noinfPredErr }) NoInfErrMat <- lapply(1:NF,function(f){ do.call("rbind",lapply(NoInfErrList,function(x){ x[[f]] }))}) NoInfErr <- lapply(NoInfErrMat,colMeans) names(NoInfErr) <- names(object) } } # }}} # {{{--------------k-fold and leave-one-out CrossValidation----------------------- if (splitMethod$internal.name %in% c("crossval","loocv")){ kCV <- kFoldCrossValidation(object=object,data=data,Y=Y,status=status,event=event,times=times,cause=cause,ipcw=ipcw,splitMethod=splitMethod,giveToModel=model.args,predictHandlerFun=predictHandlerFun,keep=keep.matrix,verbose=verbose) CrossValErr <- kCV$CrossValErr if (keep.matrix && B>1) CrossValErrMat <- kCV$CrossValErrMat } # }}} # {{{ ----------------------BootstrapCrossValidation---------------------- if (splitMethod$internal.name %in% c("Boot632plus","BootCv","Boot632")){ if (verbose==TRUE){ message("Split sample loop (B=",B,")") } if (missing(testTimes)){ testTimes <- NULL } BootCv <- bootstrapCrossValidation(object=object, data=data, Y=Y, status=status, event=event, times=times, cause=cause, ipcw=ipcw, ipcw.refit=ipcw.refit, ipcw.call=ipcw.call, splitMethod=splitMethod, multiSplitTest=multiSplitTest, testIBS=testIBS, testTimes=testTimes, confInt=confInt, confLevel=confLevel, getFromModel=model.parms, giveToModel=model.args, predictHandlerFun=predictHandlerFun, keepMatrix=keep.matrix, keepResiduals=keep.residuals, verbose=verbose, savePath=savePath, slaveseed=slaveseed) BootstrapCrossValErr <- BootCv$BootstrapCrossValErr Residuals <- BootCv$Residuals names(BootstrapCrossValErr) <- names(object) if (multiSplitTest==TRUE){ comparisons <- allComparisons(names(object)) multiSplitTestResults <- list(testIBS=testIBS,B=B,M=M,N=N,testTimes=testTimes) multiSplitTestResults$Comparisons <- lapply(1:length(comparisons),function(cc){ if (length(testTimes)>0){ allPairwisePvaluesTimes <- do.call("rbind",lapply(BootCv$testedResid,function(b){ b$pValue[[cc]]})) out <- list(pValueTimes=apply(allPairwisePvaluesTimes,2,median)) if (keep.pvalues==TRUE){ out$allPairwisePvaluesTimes <- allPairwisePvaluesTimes } } else out <- NULL if(length(testIBS)>0){ allPairwisePvaluesIBS <- sapply(BootCv$testedResid,function(b){ b$IBSpValue[[cc]] }) out$pValueIBS <- median(allPairwisePvaluesIBS) } if (keep.pvalues==TRUE){ out$allPairwisePvaluesIBS <- allPairwisePvaluesIBS} out }) names(multiSplitTestResults$Comparisons) <- names(comparisons) ## multiSplitTest$splitMethod <- splitMethod class(multiSplitTestResults) <- "multiSplitTest" } ## upperLimits <- lapply(BootCv$testedResid,function(x){x[,1:length(testTimes)]}) ## if (testIBS==TRUE){ ## wtestIBSpValues <- do.call("cbind",apply(BootCv$testedResid,function(x){x[,length(testTimes)+1]})) ## } ## wtestIBSupper <- BootCv$testedResid$wtestIBSupper ## } if (keep.matrix==TRUE){ BootstrapCrossValErrMat <- BootCv$BootstrapCrossValErrMat names(BootstrapCrossValErr) <- names(object) } } # }}} # {{{ Bootstrap .632 if (splitMethod$internal.name=="Boot632"){ B632Err <- lapply(1:NF,function(f){ .368 * AppErr[[f]] + .632 * BootstrapCrossValErr[[f]] }) names(B632Err) <- names(object) } # }}} # {{{ Bootstrap .632+ if (splitMethod$internal.name=="Boot632plus"){ B632plusErr <- lapply(1:NF,function(f){ Err1 <- pmin(BootstrapCrossValErr[[f]],NoInfErr[[f]]) overfit <- (Err1 - AppErr[[f]]) / (NoInfErr[[f]] - AppErr[[f]]) overfit[!(Err1>AppErr[[f]])] <- 0 w <- .632 / (1 - .368 * overfit) B632plusErr <- (1-w) * AppErr[[f]] + w * Err1 B632plusErr ## w[NoInfErr<=BootstrapCrossValErr] <- 1 ## B632plus.error <- (1-w) * AppErr + w * BootstrapCrossValErr }) names(B632plusErr) <- names(object) } # }}} # {{{ prepare output out <- switch(splitMethod$internal.name, "noPlan"=list("AppErr"=AppErr), "Boot632plus"=list("AppErr"=AppErr,"BootCvErr"=BootstrapCrossValErr,"NoInfErr"=NoInfErr,"Boot632plusErr"=B632plusErr), "Boot632"=list("AppErr"=AppErr,"BootCvErr"=BootstrapCrossValErr,"Boot632Err"=B632Err), "BootCv"=list("AppErr"=AppErr,"BootCvErr"=BootstrapCrossValErr), "loocv"=list("AppErr"=AppErr,"loocvErr"=CrossValErr), "crossval"=list("AppErr"=AppErr,"crossvalErr"=CrossValErr), "noinf"=list("AppErr"=AppErr,"NoInfErr"=NoInfErr)) observed.maxtime <- sapply(out,function(x){ ## lapply(x,function(y){times[length(y)-sum(is.na(y))-1]}) lapply(x,function(y){times[length(y)-sum(is.na(y))]}) }) minmaxtime <- min(unlist(observed.maxtime)) if (multiSplitTest==TRUE){ out <- c(out,list(multiSplitTest=multiSplitTestResults)) } if (keep.residuals==TRUE){ out <- c(out,list(Residuals=Residuals)) } if (keep.matrix==TRUE && splitMethod$internal.name!="noPlan"){ if (splitMethod$internal.name %in% c("crossval","loocv")){ if (B>1) out <- c(out,list("CrossValErrMat"=CrossValErrMat)) } else{ if (splitMethod$internal.name!="noinf") out <- c(out,list("BootstrapCrossValErrMat"=BootstrapCrossValErrMat)) } } if (!is.na(fillChar)) out <- lapply(out,function(o){ o[is.na(o)] <- fillChar o }) if (!is.null(model.parms)) out <- c(out,list("ModelParameters"=BootCv$ModelParameters)) if (!keep.index) splitMethod$index <- NULL n.risk <- N - prodlim::sindex(Y,times) # }}} # {{{ put out if(keep.models==TRUE){ outmodels <- object } else{ outmodels <- names(object) names(outmodels) <- names(object) } out <- c(out, list(call=theCall, response=model.response(m), time=times, ## ipcw.fit=as.character(ipcw$fit$call), n.risk=n.risk, models=outmodels, maxtime=maxtime, observed.maxtime=observed.maxtime, minmaxtime=minmaxtime, reference=reference, start=min(times), cens.model=cens.model, exact=exact, splitMethod=splitMethod)) ## if (verbose==TRUE && splitMethod$internal.name %in% c("BootCv","Boot632","Boot632plus","crossval","loocv")) cat("\n") class(out) <- "pec" out # }}} } pec/R/ConfInt.Cindex.R0000755000176200001440000000215113571203266014125 0ustar liggesusersConfInt.Cindex <- function(x,times,ref=1,level=.95,digits=3,...){ lower <- (1-level)/2 upper <- 1-lower # median <- .5 oob=x$BootCvCindexList if (is.null(oob)) stop("Out of bag matrix missing. Set 'keep.matrix' to TRUE.") ttt=x$time mmm <- names(oob) at <- prodlim::sindex(jump.times=ttt,eval.times=times) meanOob <- do.call("cbind",x$PredCindex) out <- lapply(at,function(a){ meanDiff <- meanOob[a,ref]-meanOob[a,-ref] aResult <- do.call("cbind",lapply(oob,function(x){x[a,]})) aref <- aResult[,ref] adiff <- data.frame(aref-aResult[,-ref]) aCI <- do.call("rbind",lapply(adiff,function(x){ # quantile(x,c(median,lower,upper),na.rm=TRUE) quantile(x,c(lower,upper),na.rm=TRUE) })) a.out <- cbind(meanDiff,aCI) colnames(a.out) <- c("diff",paste(c("lower","upper"),level*100,sep=".")) rownames(a.out) <- paste(mmm[ref],mmm[-ref],sep=" vs ") a.out }) names(out) <- paste("time:",times) lapply(1:length(out),function(i){ cat("\n\n") cat(names(out)[i]) cat("\n") print(out[[i]],digits=digits)}) class(out) <- "CiCindex" invisible(out) } pec/R/print.splitMethod.R0000644000176200001440000000134713571203267015007 0ustar liggesusers##' @export print.splitMethod <- function(x,...){ if (x$name=="no plan") return(cat("\nNo data splitting: either apparent or independent test sample performance\n")) cat("\nMethod for estimating the prediction error:\n") if (x$internal.name=="crossval"){ cat("\n",x$name,"\n\n") cat("Repeated: ",x$B,ifelse(x$B==1," time","times"),"\n") } else{ if (x$internal.name=="loocv"){ cat("\n",x$name,"\n\n") } else{ cat("\nBootstrap cross-validation\n\n") if (x$Mobject$maxtime)) { warning(paste("You asked to integrate until times where prediction error curves are not defined.", format(object$maxtime,nsmall=2,digits=2))) times <- times[times<=object$maxtime] } ## if (!(object$exact)) ## warning("Exact Only ", length(object.times)," time point",ifelse(length(times)==1,"","s")," used for computation of ") ## time range if (missing(start)) start <- object$start # }}} # {{{ what errors if (missing(what) || is.null(what)){ what <- grep(c("Err$"),names(object),value=TRUE) } # }}} # {{{ for each element of what: evaluate crps at times out <- lapply(what,function(w){ est <- object[[w]][models] y <- sapply(times,function(t){ intx <- sapply(est, function(y){ Dint(x=object.times, y=y, range=c(start,t)) }) }) if (!is.null(dim(y))){ tnames <- paste("time=",round(times,1),sep="") tnames[times<1] <- paste("time=",signif(times[times<1],2),sep="") colnames(y) <- paste("IBS[",start,";",tnames,")",sep="") y} else{ y } }) # }}} # {{{ prepare output NW <- length(what) NT <- length(times) if (NW==1) out <- out[[1]] else names(out) <- what if (NT==1){ if(NW>1){ out <- do.call("cbind",out) colnames(out) <- what } } # }}} class(out) <- "crps" out } pec/R/print.method.R0000755000176200001440000000130113571203267013764 0ustar liggesusers##' @export print.method <- function(x,...){ if (x$name=="no plan") return(cat("\nNo data splitting: either apparent or independent test sample performance\n")) cat("\nMethod for estimating the prediction error:\n") if (x$internal.name=="crossval"){ cat("\n",x$name,"\n\n") cat("Repeat: ",x$B,"\n") } else{ if (x$internal.name=="loocv"){ cat("\n",x$name,"\n\n") } else{ cat("\nBootstrap cross-validation\n\n") if (x$M0){ cat("\nMatrix of time point wise p-values:\n\n") if (NT>5){ showTimes <- sort(sample(x$testTimes)) showTimePos <- prodlim::sindex(jump.times=x$testTimes,eval.times=showTimes) } else{ showTimes <- x$testTimes showTimePos <- 1:NT } mat <- do.call("rbind",lapply(x$Comparisons,function(comp){ format.pval(comp$pValueTimes[showTimePos],digits=pdigits,eps=eps) })) colnames(mat) <- paste("t=",showTimes) print(mat,quote=FALSE,...) } else{ mat <- NULL } invisible(mat) } pec/R/print.calibrationPlot.R0000644000176200001440000000517613571203267015645 0ustar liggesusers### print.calibrationPlot.R --- #---------------------------------------------------------------------- ## author: Thomas Alexander Gerds ## created: Oct 4 2015 (09:49) ## Version: ## last-updated: Oct 5 2015 (10:32) ## By: Thomas Alexander Gerds ## Update #: 28 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: ##' @author Thomas A. Gerds ##' @export print.calibrationPlot <- function(x,...){ if (x$model.type=="survival"){ estring <- paste0("\n - a total of ", x$summary$Event[1], " were observed to have an event,\n - a total of ", x$summary$Lost, " were lost to follow-up.") }else{ estring <- paste0("\n - a total of ", x$summary$Event[1], " were observed to have the event of interest (cause: ",x$cause,"),\n - a total of ", sum(x$summary$Event[-1]), " had a competing risk ", " \n - a total of ", x$summary$Lost, " were lost to follow-up.") } cat("\nCalibration of ",ifelse(x$model.type=="survival" && x$type=="survival","survival","risk")," predictions for ", x$summary$n, " subjects.\n\nUntil time ", x$time, " a total of ",x$summary$Event.free, " were observed event-free, ", estring, "\n", sep="") if (x$method=="quantile"){ cat("\nAverage predictions and outcome in prediction quantiles:\n\n") print(x$plotFrames,...) }else{ cat("\nSummary of predictions and outcome:\n") nix <- lapply(1:length(x$plotFrames), function(n){ cat("\n",names(x$plotFrames)[[n]],":\nOutcome:\n",sep="") print(summary(x$plotFrames[[1]][,1])) cat("\nPredictions:\n",sep="") print(summary(x$plotFrames[[1]][,2])) }) } cat("\nOutcome frequencies (Obs) were obtained with the ", ifelse(x$pseudo, "jackknife pseudo-value method ", ifelse(x$model.type=="survival","Kaplan-Meier method.","Aalen-Johansen method.")), "\n", sep="") } #---------------------------------------------------------------------- ### print.calibrationPlot.R ends here pec/R/survest.cox.aalen.R0000755000176200001440000000223613571203267014743 0ustar liggesusers"survest.cox.aalen" <- function(fit,newdata,times,...){ ## The time-constant effects first const <- c(fit$gamma) names(const) <- substr(dimnames(fit$gamma)[[1]],6,nchar(dimnames(fit$gamma)[[1]])-1) constant.part <- t(newdata[,names(const)])*const constant.part <- exp(colSums(constant.part)) ## Then extract the time-varying effects time.coef <- data.frame(fit$cum) ntime <- nrow(time.coef) fittime <- time.coef[,1,drop=TRUE] ntimevars <- ncol(time.coef)-2 time.vars <- cbind(1,newdata[,names(time.coef)[-(1:2)],drop=FALSE]) nobs <- nrow(newdata) time.part <- .C("survest_cox_aalen",timehazard=double(ntime*nobs),as.double(unlist(time.coef[,-1])),as.double(unlist(time.vars)),as.integer(ntimevars+1),as.integer(nobs),as.integer(ntime),PACKAGE="pec")$timehazard time.part <- matrix(time.part, ncol=ntime, nrow=nobs, dimnames=list(1:nobs,paste("TP",1:ntime,sep=""))) surv <- pmin(exp(-time.part*constant.part),1) if (missing(times)) times <- sort(unique(fittime)) pred <- surv[,prodlim::sindex(fittime,times)] class(pred) <- c("survest","cox.aalen") pred } pec/R/internalTalk.R0000755000176200001440000000045313571203267014010 0ustar liggesusersinternalTalk <- function(x,y,sign="'"){ if (y>100){ if (y<500){ if (x %in% seq(0,y,10)) message(x) } else{ if (x %in% seq(0,y,100)) message(x) } } else{ if (y>10){ if (x %in% seq(0,y,10)) message(x) } else if (y>1) message(x) } } pec/R/Dint.R0000755000176200001440000000105713571203266012256 0ustar liggesusersDint <- function(x,y,range,restrictNonMissing=FALSE){ if (is.null(range)) range=c(x[1],x[length(x)]) ## integrate a step function f with ## values y=f(x) between range[1] and range[2] start <- max(range[1],min(x)) Stop <- min(range[2],max(x)) if ((Stop-start)<=0) return(0) else{ Y=y[x>=start & x=start & x0 if (sum(failed)>0){ cat("\nWarning: In some bootstrap samples the model failed:\n") print(sapply(x$failed[failed],table)) } # }}} # {{{ echo test results if (!is.null(x$multiSplitTest)){ cat("\n",paste(rep("_",options()$width/2),collapse=""),"\n") print(x$multiSplitTest) } # }}} invisible(x) } pec/R/predictSurvProb.FastBw.R0000755000176200001440000000141713571203267015703 0ustar liggesusersselectCox <- function(formula,data,rule="aic"){ ## require(rms) ## require(prodlim) fit <- rms::cph(formula, data, surv=TRUE) bwfit <- rms::fastbw(fit,rule=rule) if (length(bwfit$names.kept)==0){ newform <- update(formula,".~1") newfit <- prodlim(newform,data=data) } else{ newform <- update(formula,paste(".~",paste(bwfit$names.kept,collapse="+"))) ## newform <- reformulate(bwfit$names.kept, formula[[2]]) newfit <- cph(newform,data, surv=TRUE) } out <- list(fit=newfit,In=bwfit$names.kept) out$call <- match.call() class(out) <- "selectCox" out } ##' @export predictSurvProb.selectCox <- function(object,newdata,times,...){ predictSurvProb(object[[1]],newdata=newdata,times=times,...) } pec/R/calPlot.R0000755000176200001440000010430213754470201012751 0ustar liggesusers#' Calibration plots for right censored data #' #' Calibration plots for risk prediction models in right censored survival and #' competing risks data #' #' For method "nne" the optimal bandwidth with respect to is obtained with the #' function \code{\link{dpik}} from the package \code{KernSmooth} for a box #' kernel function. #' #' @param object A named list of prediction models, where allowed #' entries are (1) R-objects for which a \link{predictSurvProb} method #' exists (see details), (2) a \code{call} that evaluates to such an #' R-object (see examples), (3) a matrix with predicted probabilities #' having as many rows as \code{data} and as many columns as #' \code{times}. For cross-validation all objects in this list must #' include their \code{call}. #' @param time The evaluation time point at predicted event #' probabilities are plotted against pseudo-observed event status. #' @param formula A survival or event history formula. The left hand #' side is used to compute the expected event status. If #' \code{formula} is \code{missing}, try to extract a formula from the #' first element in object. #' @param data A data frame in which to validate the prediction models #' and to fit the censoring model. If \code{data} is missing, try to #' extract a data set from the first element in object. #' @param splitMethod Defines the internal validation design: #' #' \code{none/noPlan}: Assess the models in the give \code{data}, usually #' either in the same data where they are fitted, or in independent test data. #' #' \code{BootCv}: Bootstrap cross validation. The prediction models #' are trained on \code{B} bootstrap samples, that are either drawn #' with replacement of the same size as the original data or without #' replacement from \code{data} of the size \code{M}. The models are #' assessed in the observations that are NOT in the bootstrap sample. #' @param B The number of cross-validation steps. #' @param M The size of the subsamples for cross-validation. #' @param pseudo Logical. Determines the method for estimating expected event status: #' #' \code{TRUE}: Use average pseudo-values. \code{FALSE}: Use #' the product-limit estimate, i.e., apply the Kaplan-Meier method for #' right censored survival and the Aalen-Johansen method for right #' censored competing risks data. #' @param type Either "risk" or "survival". #' @param showPseudo If \code{TRUE} the #' pseudo-values are shown as dots on the plot (only when \code{pseudo=TRUE}). #' @param pseudo.col Colour for pseudo-values. #' @param pseudo.pch Dot type (see par) for pseudo-values. #' @param method The method for estimating the calibration curve(s): #' #' \code{"nne"}: The expected event status is obtained in the nearest #' neighborhood around the predicted event probabilities. #' #' \code{"quantile"}: The expected event status is obtained in groups #' defined by quantiles of the predicted event probabilities. #' @param round If \code{TRUE} predicted probabilities are rounded to #' two digits before smoothing. This may have a considerable effect on #' computing efficiency in large data sets. #' @param bandwidth The bandwidth for \code{method="nne"} #' @param q The number of quantiles for \code{method="quantile"} and \code{bars=TRUE}. #' @param bars If \code{TRUE}, use barplots to show calibration. #' @param hanging Barplots only. If \code{TRUE}, hang bars corresponding to observed frequencies #' at the value of the corresponding prediction. #' @param names Barplots only. Names argument passed to \code{names.arg} of \code{barplot}. #' @param showFrequencies Barplots only. If \code{TRUE}, show frequencies above the bars. #' @param jack.density Gray scale for pseudo-observations. #' @param plot If \code{FALSE}, do not plot the results, just return a plottable object. #' @param add If \code{TRUE} the line(s) are added to an existing #' plot. #' @param diag If \code{FALSE} no diagonal line is drawn. #' @param legend If \code{FALSE} no legend is drawn. #' @param axes If \code{FALSE} no axes are drawn. #' @param xlim Limits of x-axis. #' @param ylim Limits of y-axis. #' @param xlab Label for y-axis. #' @param ylab Label for x-axis. #' @param col Vector with colors, one for each element of #' object. Passed to \code{\link{lines}}. #' @param lwd Vector with line widths, one for each element of #' object. Passed to \code{\link{lines}}. #' @param lty lwd Vector with line style, one for each element of #' object. Passed to \code{\link{lines}}. #' @param pch Passed to \code{\link{points}}. #' @param cause For competing risks models, the cause of failure or #' event of interest #' @param percent If TRUE axes labels are multiplied by 100 and thus #' interpretable on a percent scale. #' @param giveToModel List of with exactly one entry for each entry in #' \code{object}. Each entry names parts of the value of the fitted #' models that should be extracted and added to the value. #' @param na.action Passed to \code{\link{model.frame}} #' @param cores Number of cores for parallel computing. Passed as #' value of argument \code{mc.cores} to \code{\link{mclapply}}. #' @param verbose if \code{TRUE} report details of the progress, #' e.g. count the steps in cross-validation. #' @param cex Default cex used for legend and labels. #' @param ... Used to control the subroutines: plot, axis, lines, barplot, #' legend. See \code{\link{SmartControl}}. #' @return list with elements: time, pseudoFrame and bandwidth (NULL for method #' quantile). #' @keywords survival ##' @examples ##' ##' library(prodlim) ##' library(lava) ##' library(riskRegression) ##' library(survival) ##' # survival ##' dlearn <- SimSurv(40) ##' dval <- SimSurv(100) ##' f <- coxph(Surv(time,status)~X1+X2,data=dlearn,x=TRUE,y=TRUE) ##' cf=calPlot(f,time=3,data=dval) ##' print(cf) ##' plot(cf) ##' ##' g <- coxph(Surv(time,status)~X2,data=dlearn,x=TRUE,y=TRUE) ##' cf2=calPlot(list("Cox regression X1+X2"=f,"Cox regression X2"=g), ##' time=3, ##' type="risk", ##' data=dval) ##' print(cf2) ##' plot(cf2) ##' calPlot(f,time=3,data=dval,type="survival") ##' calPlot(f,time=3,data=dval,bars=TRUE,pseudo=FALSE) ##' calPlot(f,time=3,data=dval,bars=TRUE,type="risk",pseudo=FALSE) ##' ##' ## show a red line which follows the hanging bars ##' calPlot(f,time=3,data=dval,bars=TRUE,hanging=TRUE) ##' a <- calPlot(f,time=3,data=dval,bars=TRUE,hanging=TRUE,abline.col=NULL) ##' lines(c(0,1,ceiling(a$xcoord)), ##' c(a$offset[1],a$offset,a$offset[length(a$offset)]), ##' col=2,lwd=5,type="s") ##' ##' calPlot(f,time=3,data=dval,bars=TRUE,type="risk",hanging=TRUE) ##' ##' set.seed(13) ##' m <- crModel() ##' regression(m, from = "X1", to = "eventtime1") <- 1 ##' regression(m, from = "X2", to = "eventtime1") <- 1 ##' m <- addvar(m,c("X3","X4","X5")) ##' distribution(m, "X1") <- binomial.lvm() ##' distribution(m, "X4") <- binomial.lvm() ##' d1 <- sim(m,100) ##' d2 <- sim(m,100) ##' csc <- CSC(Hist(time,event)~X1+X2+X3+X4+X5,data=d1) ##' fgr <- FGR(Hist(time,event)~X1+X2+X3+X4+X5,data=d1,cause=1) ##' if ((requireNamespace("cmprsk",quietly=TRUE))){ ##' predict.crr <- cmprsk:::predict.crr ##' cf3=calPlot(list("Cause-specific Cox"=csc,"Fine-Gray"=fgr), ##' time=5, ##' legend.x=-0.3, ##' legend.y=1.35, ##' ylab="Observed event status", ##' legend.legend=c("Cause-specific Cox regression","Fine-Gray regression"), ##' legend.xpd=NA) ##' print(cf3) ##' plot(cf3) ##' ##' b1 <- calPlot(list("Fine-Gray"=fgr),time=5,bars=TRUE,hanging=FALSE) ##' print(b1) ##' plot(b1) ##' ##' calPlot(fgr,time=5,bars=TRUE,hanging=TRUE) ##'} ##' #' @author Thomas Alexander Gerds \email{tag@@biostat.ku.dk} #' @export calPlot <- function(object, time, formula, data, splitMethod="none", B=1, M, pseudo, type, showPseudo, pseudo.col=NULL, pseudo.pch=NULL, method="nne", round=TRUE, bandwidth=NULL, q=10, bars=FALSE, hanging=FALSE, names="quantiles", showFrequencies=FALSE, jack.density=55, plot=TRUE, add=FALSE, diag=!add, legend=!add, axes=!add, xlim=c(0,1), ylim=c(0,1), xlab, ylab, col, lwd, lty, pch, cause=1, percent=TRUE, giveToModel=NULL, na.action=na.fail, cores=1, verbose=FALSE, cex=1, ...){ if (missing(pseudo)){ if(method=="quantiles"||bars==TRUE) pseudo <- FALSE else pseudo <- TRUE } if (missing(showPseudo)) showPseudo <- ifelse(add||(pseudo!=FALSE),FALSE,TRUE) # {{{ find number of objects and lines cobj=class(object)[[1]] if (cobj!="list"){ object <- list(object) } if (is.null(names(object))) names(object) <- paste0("Model.",1:length(object)) if (bars){ method="quantile" if (!(length(object)==1)) stop(paste0("Barplots work only for one prediction at a time. Provided are ",length(object), "predictions")) } if (is.null(names(object))){ names(object) <- sapply(object,function(o)class(o)[1]) names(object) <- make.names(names(object),unique=TRUE) } else{ names(object)[(names(object)=="")] <- sapply(object[(names(object)=="")],function(o)class(o)[1]) } NF <- length(object) # }}} # {{{ lines types if (missing(lwd)) lwd <- rep(3,NF) if (missing(col)) { if (bars) col <- c("grey90","grey30") else col <- 1:NF } if (missing(lty)) lty <- rep(1, NF) if (missing(pch)) pch <- rep(1, NF) if (length(lwd) < NF) lwd <- rep(lwd, NF) if (length(lty) < NF) lty <- rep(lty, NF) if (length(col) < NF) col <- rep(col, NF) if (length(pch) < NF) pch <- rep(pch, NF) # }}} # {{{ data & formula if (missing(data)){ data <- eval(object[[1]]$call$data) if (match("data.frame",class(data),nomatch=0)==0) stop("Argument data is missing.") else if (verbose) warning("Argument data is missing. I use the data from the call to the first model instead.") } if (missing(formula)){ if (length(grep("~",as.character(object[[1]]$call$formula)))==0){ stop(paste("Argument formula is missing and first model has no usable formula:",as.character(object[[1]]$call$formula))) } else{ ftry <- try(formula <- eval(object[[1]]$call$formula),silent=TRUE) if ((class(ftry)[1]=="try-error") || match("formula",class(formula),nomatch=0)==0) stop("Argument formula is missing and first model has no usable formula.") else if (verbose) warning("Formula missing. Using formula from first model") } } m <- model.frame(formula,data,na.action=na.action) response <- model.response(m) if (match("Surv",class(response),nomatch=FALSE)) model.type <- "survival" else model.type <- attr(response,"model") if (is.null(model.type) & length(unique(response))==2) stop("This function works only for survival and competing risks models.") ## model.type <- "binary" if (missing(type)) type <- ifelse(model.type=="survival","survival","risk") if (missing(ylab)) if (bars) ylab="" else ylab <- ifelse(type=="survival","Observed survival frequencies","Observed event frequencies") if (type=="survival" && !(model.type %in% c("survival","binary"))) stop(paste0("Type survival works only in survival or binary outcome models. This is a ",model.type, " model")) if (!(model.type=="binary")){ neworder <- order(response[,"time"],-response[,"status"]) response <- response[neworder,,drop=FALSE] Y <- response[,"time"] ## status <- response[,"status"] data <- data[neworder,] # }}} # {{{ prediction timepoint if (missing(time)) time <- median(Y) else if (length(time)>1) stop("Please specify only one time point.") } # }}} # {{{ compute pseudo-values # require(pseudo) # jack=pseudosurv(time=Y,event=status,tmax=time)[[3]] predictHandlerFun <- switch(model.type, "binary"="predictStatusProb", "competing.risks"="predictEventProb", "survival"="predictSurvProb") if (pseudo==FALSE && splitMethod!="none") stop(paste0("Split method ",splitMethod," is only implemented for : 'pseudo=TRUE'.")) if (model.type=="binary") if (is.factor(response)) jack <- as.numeric(response==levels(response)[2]) else jack <- as.numeric(response) ## ==levels(response)[1]) else{ if (pseudo==TRUE){ margForm <- update(formula,paste(".~1")) margFit <- prodlim::prodlim(margForm,data=data) jack <- prodlim::jackknife(margFit,cause=cause,times=time) }else{## prodlim in strata defined by predictions jack <- NULL } } # }}} # {{{ smartControl axis1.DefaultArgs <- list(side=1,las=1,at=seq(0,ylim[2],ylim[2]/4)) axis2.DefaultArgs <- list(side=2,las=2,at=seq(0,ylim[2],ylim[2]/4),mgp=c(4,1,0)) if (bars){ legend.DefaultArgs <- list(legend=names(object),col=col,cex=cex,bty="n",x="topleft") names.DefaultArgs <- list(cex=.7*par()$cex,y=c(-abs(diff(ylim))/15,-abs(diff(ylim))/25)) frequencies.DefaultArgs <- list(cex=.7*par()$cex,percent=FALSE,offset=0) } else{ legend.DefaultArgs <- list(legend=names(object), lwd=lwd, col=col, lty=lty, cex=cex, bty="n", y.intersp=1.3, x="topleft") } if(bars){ if (type=="survival") legend.DefaultArgs$legend <- c("Predicted survival","Observed frequencies") else legend.DefaultArgs$legend <- c("Predicted risks","Observed frequencies") } lines.DefaultArgs <- list(type="l") abline.DefaultArgs <- list(lwd=1,col="red") if (missing(ylim)){ if (showPseudo && !bars){ ylim <- c(min(jack),max(jack)) } else ylim <- c(0,1) } if (missing(xlim)){ xlim <- c(0,1) } if (missing(xlab)) if (bars) xlab <- ifelse(type=="survival","Survival groups","Risk groups") else xlab <- ifelse(type=="survival","Predicted survival probability","Predicted event probability") plot.DefaultArgs <- list(x=0, y=0, type = "n", ylim = ylim, xlim = xlim, ylab=ylab, xlab=xlab) barplot.DefaultArgs <- list(ylim = ylim, col=col, axes=FALSE, ylab=ylab, xlab=xlab, beside=TRUE, legend.text=NULL, cex.axis=cex, cex.lab=par()$cex.lab, cex.names=cex) if (bars) control <- prodlim::SmartControl(call= list(...), keys=c("barplot","legend","axis2","abline","names","frequencies"), ignore=NULL, ignore.case=TRUE, defaults=list("barplot"=barplot.DefaultArgs, "abline"=abline.DefaultArgs, "legend"=legend.DefaultArgs, "names"=names.DefaultArgs, "frequencies"=frequencies.DefaultArgs, "axis2"=axis2.DefaultArgs), forced=list("abline"=list(h=0)), verbose=TRUE) else control <- prodlim::SmartControl(call= list(...), keys=c("plot","lines","legend","axis1","axis2"), ignore=NULL, ignore.case=TRUE, defaults=list("plot"=plot.DefaultArgs, "lines"=lines.DefaultArgs, "legend"=legend.DefaultArgs, "axis1"=axis1.DefaultArgs, "axis2"=axis2.DefaultArgs), forced=list("plot"=list(axes=FALSE), "axis1"=list(side=1)), verbose=TRUE) # }}} # {{{ splitmethod splitMethod <- resolvesplitMethod(splitMethod=splitMethod,B=B,N=NROW(data),M=M) k <- splitMethod$k B <- splitMethod$B N <- splitMethod$N NF <- length(object) # }}} # {{{ ---------------------------Apparent predictions--------------------------- apppred <- do.call("cbind", lapply(1:NF,function(f){ fit <- object[[f]] if (class(fit)[1] %in% c("numeric","double")) fit <- matrix(fit,ncol=1) apppred <- switch(model.type, "competing.risks"={ p <- as.vector(do.call(predictHandlerFun,list(fit,newdata=data,times=time,cause=cause))) if (class(fit)[[1]]%in% c("matrix","numeric")) p <- p[neworder] p }, "survival"={ p <- as.vector(do.call(predictHandlerFun,list(fit,newdata=data,times=time))) if (class(fit)[[1]]%in% c("matrix","numeric")) p <- p[neworder] p }, "binary"={ p <- do.call(predictHandlerFun,list(fit,newdata=data)) if (class(fit)[[1]]%in% c("matrix","numeric")) p <- p[neworder] p }) })) colnames(apppred) <- names(object) if(pseudo==TRUE) apppred <- data.frame(jack=jack,apppred) else apppred <- data.frame(apppred) if (splitMethod$internal.name %in% c("noPlan")){ predframe <- apppred } # }}} # {{{--------------k-fold and leave-one-out CrossValidation----------------------- if (splitMethod$internal.name %in% c("crossval","loocv")){ groups <- splitMethod$index[,1,drop=TRUE] cv.list <- lapply(1:k,function(g){ if (verbose==TRUE) internalTalk(g,k) id <- groups==g train.k <- data[!id,,drop=FALSE] val.k <- data[id,,drop=FALSE] model.pred <- lapply(1:NF,function(f){ extraArgs <- giveToModel[[f]] fit <- object[[f]] fit.k <- internalReevalFit(object=fit,data=train.k,step=paste("CV group=",k),silent=FALSE,verbose=verbose) switch(model.type, "competing.risks"={do.call(predictHandlerFun,list(object=fit.k,newdata=val.k,times=time,cause=cause))}, "survival"={ p <- do.call(predictHandlerFun,c(list(object=fit.k,newdata=val.k,times=time),extraArgs)) p }, "binary"={ p <- do.call(predictHandlerFun,list(object=fit.k,newdata=val.k)) p }) }) model.pred }) predframe <- do.call("cbind",lapply(1:NF,function(f){ pred <- do.call("rbind",lapply(cv.list,function(x)x[[f]])) if (splitMethod$internal.name!="loocv"){ pred <- pred[order(order(groups)),] } pred })) colnames(predframe) <- names(object) if(pseudo==TRUE) predframe <- cbind(data.frame(jack=jack),predframe) ## predframe <- na.omit(predframe) } # }}} # {{{ ----------------------BootstrapCrossValidation---------------------- if (splitMethod$internal.name %in% c("Boot632plus","BootCv","Boot632")){ if (splitMethod$internal.name %in% c("Boot632plus","Boot632")){ stop("Don't know how to do the 632(+) for the calibration curve.") } ResampleIndex <- splitMethod$index ## predframe <- do.call("rbind",lapply(1:B,function(b){ ## predframe <- matrix pred.list <- parallel::mclapply(1:B,function(b){ if (verbose==TRUE) internalTalk(b,B) jackRefit <- FALSE vindex.b <- match(1:N,unique(ResampleIndex[,b]),nomatch=0)==0 val.b <- data[vindex.b,,drop=FALSE] if (jackRefit){ margFit.b <- prodlim::prodlim(margForm,data=val.b) jack.b <- prodlim::jackknife(margFit.b,cause=cause,times=time) } else{ jack.b <- jack[match(1:N,unique(ResampleIndex[,b]),nomatch=0)==0] } train.b <- data[ResampleIndex[,b],,drop=FALSE] frame.b <- data.frame(jack=jack.b) bootpred <- do.call("cbind",lapply(1:NF,function(f){ fit <- object[[f]] fit.b <- internalReevalFit(object=fit,data=train.b,step=b,silent=FALSE,verbose=verbose) extraArgs <- giveToModel[[f]] try2predict <- try(pred.b <- switch(model.type, "competing.risks"={do.call(predictHandlerFun,list(object=fit.b,newdata=val.b,times=time,cause=cause))}, "survival"={ p <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=val.b,times=time),extraArgs)) p }, "binary"={ p <- do.call(predictHandlerFun,list(object=fit.b,newdata=val.b)) p }),silent=TRUE) if (inherits(try2predict,"try-error")==TRUE){ rep(NA,NROW(val.b)) }else{ pred.b } })) colnames(bootpred) <- names(object) cbind(frame.b,bootpred) },mc.cores=cores) predframe <- do.call("rbind",pred.list) rm(pred.list) } # }}} # {{{ smoothing method <- match.arg(method,c("quantile","nne")) getXY <- function(f){ if(pseudo==TRUE){ p <- predframe[,f+1] jackF <- predframe[,1] }else{ p <- predframe[,f] } switch(method, "quantile"={ if (length(q)==1) groups <- quantile(p,seq(0,1,1/q)) else{ groups <- q } xgroups <- (groups[-(length(groups))]+groups[-1])/2 pcut <- cut(p,groups,include.lowest=TRUE) if (pseudo==TRUE){ plotFrame=data.frame(Pred=tapply(p,pcut,mean),Obs=pmin(1,pmax(0,tapply(jackF,pcut,mean)))) attr(plotFrame,"quantiles") <- groups plotFrame } else{ form.pcut <- update(formula,paste(".~pcut")) if ("data.table" %in% class(data)) pdata <- cbind(data[,all.vars(update(formula,".~1")),drop=FALSE,with=FALSE],pcut=pcut) else pdata <- cbind(data[,all.vars(update(formula,".~1")),drop=FALSE],pcut=pcut) y <- unlist(predict(f <- prodlim::prodlim(form.pcut,data=pdata), cause=cause, newdata=data.frame(pcut=levels(pcut)), times=time, type=ifelse(model.type=="survival","surv","cuminc"))) ## Is it ok to extrapolate into the future?? if (model.type=="survival") y[is.na(y)] <- min(y,na.rm=TRUE) else y[is.na(y)] <- max(y,na.rm=TRUE) plotFrame=data.frame(Pred=tapply(p,pcut,mean),Obs=y) attr(plotFrame,"quantiles") <- groups plotFrame } }, "nne"={ if (pseudo==TRUE){ ## Round probabilities to 2 digits ## to avoid memory explosion ... ## a difference in the 3 digit should ## not play a role for the patient. if (round==TRUE){ if (!is.null(bandwidth) && bandwidth>=1){ ## message("No need to round predicted probabilities to calculate calibration in the large") } else{ p <- round(p,2) } } p <- na.omit(p) if (no <- length(attr(p,"na.action"))) warning("calPlot: removed ",no," missing values in risk prediction.",call.=FALSE,immediate.=TRUE) if (is.null(bandwidth)){ if (length(p)>length(apppred[,f+1])){ bw <- prodlim::neighborhood(apppred[,f+1])$bandwidth }else{ bw <- prodlim::neighborhood(p)$bandwidth } } else{ bw <- bandwidth } if (bw>=1){ ## calibration in the large plotFrame <- data.frame(Pred=mean(p),Obs=mean(jackF)) } else{ nbh <- prodlim::meanNeighbors(x=p,y=jackF,bandwidth=bw) plotFrame <- data.frame(Pred=nbh$uniqueX,Obs=nbh$averageY) } attr(plotFrame,"bandwidth") <- bw plotFrame }else{ form.p <- update(formula,paste(".~p")) if ("data.table" %in% class(data)) pdata <- cbind(data[,all.vars(update(formula,".~1")),drop=FALSE,with=FALSE],p=p) else pdata <- cbind(data[,all.vars(update(formula,".~1")),drop=FALSE],p=p) y <- unlist(predict(prodlim::prodlim(form.p,data=pdata), cause=cause, newdata=data.frame(p=sort(p)), times=time, type=ifelse(type=="survival","surv","cuminc"))) plotFrame <- data.frame(Pred=sort(p),Obs=y) plotFrame } }) } plotFrames <- lapply(1:NF,function(f){getXY(f)}) names(plotFrames) <- names(object) # }}} # {{{ plot and/or invisibly output the results if (bars){ if (model.type=="survival" && type=="risk") plotFrames[[1]] <- plotFrames[[1]][NROW(plotFrames[[1]]):1,] if ((is.logical(names[1]) && names[1]==TRUE)|| names[1] %in% c("quantiles.labels","quantiles")){ qq <- attr(plotFrames[[1]],"quantiles") if (model.type=="survival" && type=="risk") qq <- rev(1-qq) if (names[1]=="quantiles.labels"){ pp <- seq(0,1,1/q) names <- paste0("(", sprintf("%1.0f",100*pp[-length(pp)]),",", sprintf("%1.0f",100*pp[-1]), ")\n", sprintf("%1.1f",100*qq[-length(qq)])," - ", sprintf("%1.1f",100*qq[-1])) } else names <- paste0(sprintf("%1.1f",100*qq[-length(qq)])," - ", sprintf("%1.1f",100*qq[-1])) } } summary <- list(n=NROW(data)) if (model.type%in%c("survival","competing.risks")) summary <- c(summary,list("Event"=table(response[response[,"status"]!=0 & response[,"time"]<=time,ifelse(model.type=="survival","status","event")]), "Lost"=sum(response[,"status"]==0 & response[,"time"]<=time), "Event.free"=NROW(response[response[,"time"]>time,]))) out <- list(plotFrames=plotFrames, predictions=apppred, time=time, cause=cause, pseudo=pseudo, summary=summary, control=control, legend=legend, bars=bars, diag=diag, add=add, legend=legend, names=names, method=method, model.type=model.type, type=type, axes=axes, percent=percent, hanging=hanging, showFrequencies=showFrequencies, col=col, ylim=ylim, xlim=xlim, ylab=ylab, xlab=xlab, lwd=lwd, lty=lty, pch=pch, lty=lty, NF=NF, pseudo.col=pseudo.col, pseudo.pch=pseudo.pch, showPseudo=showPseudo, jack.density=jack.density) if (method=="nne") out <- c(out,list(bandwidth=sapply(plotFrames, function(x)attr(x,"bandwidth")))) if (plot){ coords <- plot.calibrationPlot(out) out <- c(out,coords) } class(out) <- "calibrationPlot" invisible(out) # }}} } pec/R/internalReevalFit.R0000755000176200001440000000056013571203267014775 0ustar liggesusersinternalReevalFit <- function(object,data,step,silent=FALSE,verbose=FALSE){ object$call$data <- data try2fit <- try(refit <- eval(object$call),silent=silent) if (inherits(try2fit,"try-error")==TRUE){ if (verbose==TRUE) warning(paste("During bootstrapping: model ",class(object)," failed in step ",step),immediate.=TRUE) NULL } else refit } pec/R/CindexBootstrapCrossValidation.R0000644000176200001440000002552613571203266017521 0ustar liggesusersCindexBootstrapCrossValidation <- function(object, data, Y, status, event, eval.times, pred.times, cause, weights, ipcw.refit=FALSE, ipcw.call, tiedPredictionsIn, tiedOutcomeIn, tiedMatchIn, splitMethod, multiSplitTest, keepResiduals, testTimes, confInt, confLevel, getFromModel, giveToModel, predictHandlerFun, keepMatrix, verbose, savePath,slaveseed){ # {{{ initializing B <- splitMethod$B N <- splitMethod$N M <- splitMethod$M NT <- length(eval.times) NF <- length(object) ResampleIndex <- splitMethod$index # }}} step <- function(b,seed){ if (verbose==TRUE) internalTalk(b,B) # {{{ training and validation data vindex.b <- match(1:N,unique(ResampleIndex[,b]),nomatch=0)==0 Y.b <- Y[vindex.b] tindex.b <- match(Y.b,unique(Y.b)) val.b <- data[vindex.b,,drop=FALSE] ## browser() train.b <- data[ResampleIndex[,b],,drop=FALSE] ## if (b==1) print(train.b$days) ## if (b==1) print(val.b$days) NV=sum(vindex.b) # NROW(val.b) # }}} # {{{ IPCW ## if (ipcw.refit==TRUE){ ## ipcw.call.b.i <- ipcw.call$weight.i ## ipcw.call.b.j <- ipcw.call$weight.j ## ipcw.call.b.i$data <- val.b ## ipcw.call.b.j$data <- val.b ## ipcw.call.b.i$subjectTimes <- Y.b ## ipcw.call.b.j$subjectTimes <- Y.b ## ipcw.b.i <- do.call("ipcw",ipcw.call.b.i)$IPCW.subjectTimes ## ipcw.b.j <- do.call("ipcw",ipcw.call.b.j)$IPCW.times ## } ## else{ ipcw.b.i <- weights$weight.i[vindex.b] if (is.null(dim(weights$weight.j))){ ipcw.b.j <- weights$weight.j } else{ ipcw.b.j <- weights$weight.j[vindex.b,] } ## } # }}} # {{{ Building the models in training data if (!is.null(seed)) { set.seed(seed) ## message("seed:",seed) } trainModels <- lapply(1:NF,function(f){ fit.b <- internalReevalFit(object=object[[f]], data=train.b, step=b, silent=FALSE, verbose=verbose) ## fit.b$call <- object[[f]]$call fit.b }) # }}} # {{{ Saving the models? if (!is.null(savePath)){ nix <- lapply(1:NF,function(f){ fit.b <- trainModels[[f]] ## print(object.size(fit.b)) fit.b$formula <- NULL ## print(environment(fit.b$formula)) save(fit.b,file=paste(paste(savePath,"/",names(object)[f],"-bootstrap-",b,sep=""),".rda",sep="")) }) } # }}} # {{{ Extracting parameters? if (!is.null(getFromModel)){ ModelParameters <- lapply(1:NF,function(f){ getParms <- getFromModel[[f]] if (is.null(getParms)) trainModels[[f]][getParms] else NULL }) } # }}} # {{{ Check fits fitFailed <- lapply(trainModels,function(fit.b) (is.null(fit.b))) # }}} # {{{ Predicting the validation data predVal <- lapply(1:NF,function(f){ fit.b <- trainModels[[f]] extraArgs <- giveToModel[[f]] if (predictHandlerFun %in% c("predictEventProb","predictLifeYearsLost")){ try2predict <- try(pred.b <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=val.b,times=pred.times,cause=cause),extraArgs))) } else{ try2predict <- try(pred.b <- do.call(predictHandlerFun,c(list(object=fit.b,newdata=val.b,times=pred.times),extraArgs))) } ## browser() ## print(pred.b[1:5]) if (inherits(try2predict,"try-error")==TRUE){ if (verbose==TRUE) warning(paste("During bootstrapping: prediction for model ",class(fit.b)," failed in step ",b),immediate.=TRUE) NULL} else{ pred.b } }) # }}} # {{{ Compute cindex for step b if (multiSplitTest==TRUE){ stop("not yet defined: residual test for cindex") Residuals <- lapply(predVal,function(pred.b){ if (is.null(pred.b)) NA else{ if (predictHandlerFun %in% c("predictEventProb","predictLifeYearsLost")){ 1 ## matrix(.C("pecResidualsCR",pec=double(NT),resid=double(NT*NV),as.double(Y[vindex.b]),as.double(status[vindex.b]),as.double(event[vindex.b]),as.double(times),as.double(pred.b),as.double(ipcwTimes.b),as.double(IPCW.subjectTimes.b),as.integer(NV),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred.b))),NAOK=TRUE,PACKAGE="pec")$resid,ncol=NT,byrow=FALSE) } else{ 1 ## matrix(.C("pecResiduals",pec=double(NT),resid=double(NT*NV),as.double(Y[vindex.b]),as.double(status[vindex.b]),as.double(times),as.double(pred.b),as.double(ipcwTimes.b),as.double(IPCW.subjectTimes.b),as.integer(NV),as.integer(NT),as.integer(ipcw$dim),as.integer(is.null(dim(pred.b))),NAOK=TRUE,PACKAGE="pec")$resid,ncol=NT,byrow=FALSE) } } }) names(Residuals) <- names(object) ## PredCindexStepB=lapply(Residuals,function(x){colMeans(x)}) PredCindexStepB=1 } else{ PredCindexStepB <- lapply(predVal,function(pred.b){ if (is.null(pred.b)) NA else{ if (predictHandlerFun %in% c("predictEventProb","predictLifeYearsLost")){ Step.b.CindexResult <- .C("ccr",cindex=double(NT),concA=double(NT),pairsA=double(NT),concB=double(NT),pairsB=double(NT),as.integer(tindex.b),as.double(Y.b),as.integer(status[vindex.b]),as.integer(event[vindex.b]),as.double(eval.times),as.double(ipcw.b.i),as.double(ipcw.b.j),as.double(pred.b),as.integer(sum(vindex.b)),as.integer(NT),as.integer(tiedPredictionsIn),as.integer(tiedOutcomeIn),as.integer(tiedMatchIn),as.integer(!is.null(dim(ipcw.b.j))),NAOK=TRUE,PACKAGE="pec") Step.b.Cindex <- Step.b.CindexResult$cindex Step.b.PairsA <- Step.b.CindexResult$pairsA Step.b.ConcordantA <- Step.b.CindexResult$concA Step.b.PairsB <- Step.b.CindexResult$pairsB Step.b.ConcordantB <- Step.b.CindexResult$concB list(Cindex.b=Step.b.Cindex,Pairs.b=list(A=Step.b.PairsA,B=Step.b.PairsB),Concordant.b=list(A=Step.b.ConcordantA,B=Step.b.ConcordantB)) } else{ cindexOut <- .C("cindexSRC", cindex=double(NT), conc=double(NT), pairs=double(NT), as.integer(tindex.b), as.double(Y.b), as.integer(status[vindex.b]), as.double(eval.times), as.double(ipcw.b.i), as.double(ipcw.b.j), as.double(pred.b), as.integer(sum(vindex.b)), as.integer(NT), as.integer(tiedPredictionsIn), as.integer(tiedOutcomeIn), as.integer(tiedMatchIn), as.integer(!is.null(dim(ipcw.b.j))), NAOK=TRUE, PACKAGE="pec") Cindex.b <- cindexOut$cindex Pairs.b <- cindexOut$pairs Concordant.b <- cindexOut$conc list(Cindex.b=Cindex.b,Pairs.b=Pairs.b,Concordant.b=Concordant.b) } } }) } # }}} # {{{ van de Wiel's test ## if (multiSplitTest==TRUE){ ## testedResid <- testResiduals(Residuals,times=times,testTimes=testTimes,rangeInt=testIBS,confInt=confInt,confLevel=confLevel) ## } # }}} # {{{ looping output ## if (multiSplitTest==TRUE) ## loopOut=list(PredCindexStepB=PredCindexStepB,testedResid=testedResid) ## else loopOut=list(PredCindexStepB=PredCindexStepB) ## if (keepResiduals==TRUE) ## loopOut=c(loopOut,list(Residuals=lapply(Residuals,function(R){ ## R[,prodlim::sindex(eval.times=testTimes,jump.times=times)] ## }))) if (!is.null(getFromModel)){ loopOut=c(loopOut,list(ModelParameters=ModelParameters)) } loopOut } ## }) b <- 1 ## if (require(foreach)){ if (missing(slaveseed)||is.null(slaveseed)) slaveseed <- sample(1:1000000,size=B,replace=FALSE) Looping <- foreach::foreach (b= 1:B) %dopar% step(b,slaveseed[[b]]) ## } ## else{ ## Looping <- lapply(1:B,function(b){step(b,seed=NULL)}) ## } # }}} # {{{ output ## ## ## 1. a list of NF matrices each with B (rows) and NT columns ## the prediction error curves ## ## if (verbose==TRUE && B>1) cat("\n") BootstrapCrossValCindexMat <- lapply(1:NF,function(f){ ## matrix with NT columns and b rows do.call("rbind",lapply(Looping,function(b){ c.b <- b$PredCindexStepB[[f]]$Cindex.b c.b ## pairs.b <- b$PredCindexStepB[[f]]$Pairs.b ## conc.b <- b$PredCindexStepB[[f]]$Concordant.b })) }) ## ## 2. a list of NF average out-of-bag prediction error curves ## with length NT ## BootstrapCrossValCindex <- lapply(BootstrapCrossValCindexMat,colMeans) out <- list(BootstrapCrossValCindex=BootstrapCrossValCindex) ## ## 3. the results of B residual tests ## ## print(str(Looping)) if (multiSplitTest==TRUE){ out$testedResid <- lapply(Looping,function(x)x$testedResid) } ## ## 4. model parameters ## if (!is.null(getFromModel)){ out$ModelParameters <- lapply(1:NF,function(f){ lapply(Looping,function(x)x$ModelParameters[[f]]) }) } ## ## 5. bootstrap crossvalidation results ## if (keepMatrix==TRUE) out$BootstrapCrossValCindexMat <- BootstrapCrossValCindexMat ## ## 6. residuals ## ## if (keepResiduals==TRUE){ ## out$Residuals <- lapply(1:NF,function(f){ ## bootResiduals <- lapply(Looping,function(b){ ## b$Residuals[[f]] ## }) ## names(bootResiduals) <- paste("testSample",1:B,sep=".") ## bootResiduals ## }) ## names(out$Residuals) <- names(object) ## } out # }}} } pec/R/plot.Cindex.R0000755000176200001440000000130513571203267013544 0ustar liggesusers##' @export plot.Cindex <- function(x,ylim=c(.4,1),xlim=c(0,x$maxtime),abline=TRUE,xlab="Time",ylab="Concordance index",...){ argList <- match.call(expand.dots=TRUE) argList[[1]] <- as.name("list") argList <- eval(argList,parent.frame()) argList <- c(list("what"=switch(x$splitMethod$internal.name, "noPlan"={"AppCindex"}, paste(x$splitMethod$internal.name,"Cindex",sep=""), xlab=xlab,ylab=ylab)),argList) argList$ylim <- ylim argList$xlim <- xlim argList$ylab <- ylab argList$xlab <- xlab argList$x$exact <- FALSE do.call("plot.pec", argList) if (abline==TRUE) abline(h=.5,col="gray",lty=3,lwd=3,xpd=FALSE) } pec/R/predictSurvProb.R0000755000176200001440000005724214131004306014507 0ustar liggesusers#' Predicting survival probabilities #' #' Function to extract survival probability predictions from various modeling #' approaches. The most prominent one is the Cox regression model which can be #' fitted for example with `coxph' and with `cph'. #' #' The function predictSurvProb is a generic function that means it invokes #' specifically designed functions depending on the 'class' of the first #' argument. #' #' The function \code{pec} requires survival probabilities for each row in #' newdata at requested times. These probabilities are extracted from a fitted #' model of class \code{CLASS} with the function \code{predictSurvProb.CLASS}. #' #' Currently there are \code{predictSurvProb} methods for objects of class cph #' (library rms), coxph (library survival), aalen (library timereg), cox.aalen #' (library timereg), #' rpart (library rpart), product.limit (library prodlim), #' survfit (library survival), psm (library rms) #' #' @aliases predictSurvProb predictSurvProb.aalen #' predictSurvProb.riskRegression predictSurvProb.cox.aalen #' predictSurvProb.coxph predictSurvProb.cph predictSurvProb.default #' predictSurvProb.rfsrc predictSurvProb.matrix predictSurvProb.pecCtree #' predictSurvProb.pecCforest predictSurvProb.prodlim predictSurvProb.psm #' predictSurvProb.selectCox predictSurvProb.survfit #' predictSurvProb.pecRpart #' @usage #' \method{predictSurvProb}{aalen}(object,newdata,times,...) #' \method{predictSurvProb}{riskRegression}(object,newdata,times,...) #' \method{predictSurvProb}{cox.aalen}(object,newdata,times,...) #' \method{predictSurvProb}{cph}(object,newdata,times,...) #' \method{predictSurvProb}{coxph}(object,newdata,times,...) #' \method{predictSurvProb}{matrix}(object,newdata,times,...) #' \method{predictSurvProb}{selectCox}(object,newdata,times,...) #' \method{predictSurvProb}{pecCforest}(object,newdata,times,...) #' \method{predictSurvProb}{prodlim}(object,newdata,times,...) #' \method{predictSurvProb}{psm}(object,newdata,times,...) #' \method{predictSurvProb}{survfit}(object,newdata,times,...) #' \method{predictSurvProb}{pecRpart}(object,newdata,times,...) #' #' \method{predictSurvProb}{pecCtree}(object,newdata,times,...) #' @param object A fitted model from which to extract predicted survival #' probabilities #' @param newdata A data frame containing predictor variable combinations for #' which to compute predicted survival probabilities. #' @param times A vector of times in the range of the response variable, e.g. #' times when the response is a survival object, at which to return the #' survival probabilities. #' @param \dots Additional arguments that are passed on to the current method. #' @return A matrix with as many rows as \code{NROW(newdata)} and as many #' columns as \code{length(times)}. Each entry should be a probability and in #' rows the values should be decreasing. #' @note In order to assess the predictive performance of a new survival model #' a specific \code{predictSurvProb} S3 method has to be written. For examples, #' see the bodies of the existing methods. #' #' The performance of the assessment procedure, in particular for resampling #' where the model is repeatedly evaluated, will be improved by supressing in #' the call to the model all the computations that are not needed for #' probability prediction. For example, \code{se.fit=FALSE} can be set in the #' call to \code{cph}. #' @author Thomas A. Gerds \email{tag@@biostat.ku.dk} #' @seealso \code{\link{predict}},\code{\link{survfit}} #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival ##' @examples ##' ##' # generate some survival data ##' library(prodlim) ##' set.seed(100) ##' d <- SimSurv(100) ##' # then fit a Cox model ##' library(rms) ##' coxmodel <- cph(Surv(time,status)~X1+X2,data=d,surv=TRUE) ##' ##' # Extract predicted survival probabilities ##' # at selected time-points: ##' ttt <- quantile(d$time) ##' # for selected predictor values: ##' ndat <- data.frame(X1=c(0.25,0.25,-0.05,0.05),X2=c(0,1,0,1)) ##' # as follows ##' predictSurvProb(coxmodel,newdata=ndat,times=ttt) ##' ##' # stratified cox model ##' sfit <- coxph(Surv(time,status)~strata(X1)+X2,data=d,,x=TRUE,y=TRUE) ##' predictSurvProb(sfit,newdata=d[1:3,],times=c(1,3,5,10)) ##' ##' ## simulate some learning and some validation data ##' learndat <- SimSurv(100) ##' valdat <- SimSurv(100) ##' ## use the learning data to fit a Cox model ##' library(survival) ##' fitCox <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) ##' ## suppose we want to predict the survival probabilities for all patients ##' ## in the validation data at the following time points: ##' ## 0, 12, 24, 36, 48, 60 ##' psurv <- predictSurvProb(fitCox,newdata=valdat,times=seq(0,60,12)) ##' ## This is a matrix with survival probabilities ##' ## one column for each of the 5 time points ##' ## one row for each validation set individual ##' ##' # Do the same for a randomSurvivalForest model ##' library(randomForestSRC) ##' rsfmodel <- rfsrc(Surv(time,status)~X1+X2,data=d) ##' predictSurvProb(rsfmodel,newdata=ndat,times=ttt) ##' ##' ## Cox with ridge option ##' f1 <- coxph(Surv(time,status)~X1+X2,data=learndat,x=TRUE,y=TRUE) ##' f2 <- coxph(Surv(time,status)~ridge(X1)+ridge(X2),data=learndat,x=TRUE,y=TRUE) ##' plot(predictSurvProb(f1,newdata=valdat,times=10), ##' pec:::predictSurvProb.coxph(f2,newdata=valdat,times=10), ##' xlim=c(0,1), ##' ylim=c(0,1), ##' xlab="Unpenalized predicted survival chance at 10", ##' ylab="Ridge predicted survival chance at 10") ##' ##' #' @export predictSurvProb <- function(object,newdata,times,...){ UseMethod("predictSurvProb",object) } ##' @export predictSurvProb.default <- function(object,newdata,times,...){ stop("No method for evaluating predicted probabilities from objects in class: ",class(object),call.=FALSE) } ##' @export predictSurvProb.numeric <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata)) ## || NCOL(object) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(object)," x ",NCOL(object),"\n\n",sep="")) object } ##' @export predictSurvProb.matrix <- function(object,newdata,times,...){ if (NROW(object) != NROW(newdata) || NCOL(object) != length(times)){ stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(object)," x ",NCOL(object),"\n\n",sep="")) ## stop(paste("Prediction matrix has wrong dimensionss: ",NROW(object)," rows and ",NCOL(object)," columns.\n But requested are predicted probabilities for ",NROW(newdata), " subjects (rows) in newdata and ",NCOL(newdata)," time points (columns)",sep="")) } object } ##' @export predictSurvProb.aalen <- function(object,newdata,times,...){ ## require(timereg) time.coef <- data.frame(object$cum) ntime <- nrow(time.coef) objecttime <- time.coef[,1,drop=TRUE] ntimevars <- ncol(time.coef)-2 covanames <- names(time.coef)[-(1:2)] notfound <- match(covanames,names(newdata),nomatch=0)==0 if (any(notfound)) stop("\nThe following predictor variables:\n\n", paste(covanames[notfound],collapse=","), "\n\nwere not found in newdata, which only provides the following variables:\n\n", paste(names(newdata),collapse=","), "\n\n") time.vars <- cbind(1,newdata[,names(time.coef)[-(1:2)],drop=FALSE]) nobs <- nrow(newdata) hazard <- .C("survest_cox_aalen", timehazard=double(ntime*nobs), as.double(unlist(time.coef[,-1])), as.double(unlist(time.vars)), as.integer(ntimevars+1), as.integer(nobs), as.integer(ntime),PACKAGE="pec")$timehazard hazard <- matrix(hazard,ncol=ntime,nrow=nobs,dimnames=list(1:nobs,paste("TP",1:ntime,sep=""))) surv <- pmin(exp(-hazard),1) if (missing(times)) times <- sort(unique(objecttime)) p <- surv[,prodlim::sindex(jump.times=objecttime,eval.times=times)] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictSurvProb.cox.aalen <- function(object,newdata,times,...){ # require(timereg) ## The time-constant effects first const <- c(object$gamma) names(const) <- substr(dimnames(object$gamma)[[1]],6,nchar(dimnames(object$gamma)[[1]])-1) constant.part <- t(newdata[,names(const)])*const constant.part <- exp(colSums(constant.part)) ## Then extract the time-varying effects time.coef <- data.frame(object$cum) ntime <- nrow(time.coef) objecttime <- time.coef[,1,drop=TRUE] ntimevars <- ncol(time.coef)-2 time.vars <- cbind(1,newdata[,names(time.coef)[-(1:2)],drop=FALSE]) nobs <- nrow(newdata) time.part <- .C("survest_cox_aalen",timehazard=double(ntime*nobs),as.double(unlist(time.coef[,-1])),as.double(unlist(time.vars)),as.integer(ntimevars+1),as.integer(nobs),as.integer(ntime),PACKAGE="pec")$timehazard time.part <- matrix(time.part,ncol=ntime,nrow=nobs) ## dimnames=list(1:nobs,paste("TP",1:ntime,sep=""))) surv <- pmin(exp(-time.part*constant.part),1) if (missing(times)) times <- sort(unique(objecttime)) p <- surv[,prodlim::sindex(objecttime,times)] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } #' Combines the rpart result with a stratified Kaplan-Meier (prodlim) to predict survival #' #' #' @title Predict survival based on rpart tree object #' @param formula passed to rpart #' @param data passed to rpart #' @param ... passed to rpart #' @return list with three elements: ctree and call #' @examples #' library(prodlim) #' if (!requireNamespace("rpart",quietly=TRUE)){ #' library(rpart) #' library(survival) #' set.seed(50) #' d <- SimSurv(50) #' nd <- data.frame(X1=c(0,1,0),X2=c(-1,0,1)) #' f <- pecRpart(Surv(time,status)~X1+X2,data=d) #' predictSurvProb(f,newdata=nd,times=c(3,8)) #' } #' @export pecRpart <- function(formula,data,...){ if (!requireNamespace("rpart",quietly=TRUE)){stop("Need package rpart.")} robj <- rpart::rpart(formula=formula,data=data,...) nclass <- length(unique(robj$where)) data$rpartFactor <- factor(predict(robj,newdata=data,...)) form <- update(formula,paste(".~","rpartFactor",sep="")) survfit <- prodlim::prodlim(form,data=data) out <- list(rpart=robj,survfit=survfit,levels=levels(data$rpartFactor)) class(out) <- "pecRpart" out } ##' @export predictSurvProb.pecRpart <- function(object,newdata,times,...){ newdata$rpartFactor <- factor(predict(object$rpart,newdata=newdata), levels=object$levels) p <- predictSurvProb(object$survfit,newdata=newdata,times=times) p } ##' @export predictSurvProb.coxph <- function(object,newdata,times,...){ p <- riskRegression::predictCox(object=object, newdata=newdata, times=times, se = FALSE, iid = FALSE, keep.times=FALSE, type="survival")$survival if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)){ stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } p } ## baselineHazard.coxph(object,times) ## require(survival) ## new feature of the survival package requires that the ## original data are included ## survival.survfit.coxph <- getFromNamespace("survfit.coxph",ns="survival") ## survival.summary.survfit <- getFromNamespace("summary.survfit",ns="survival") ## b <- function(x){browser()} ## b() ## survfit.object <- survival::survfit(object,newdata=newdata,se.fit=FALSE,conf.int=FALSE) ## if (is.null(attr(object$terms,"specials")$strata)){ ## case without strata ## inflated.pred <- summary(survfit.object,times=times)$surv ## if (is.null(inflated.pred)){ ## can happen when all times beyond maxtime ## p=matrix(NA,ncol=length(times),nrow=NROW(newdata)) ## } else{ ## p <- t(inflated.pred) ## if ((beyond <- (length(times)-NCOL(p)))>0) ## p <- cbind(p,matrix(NA,nrow=NROW(newdata),ncol=beyond)) ## } ## }else{ ## case with strata ## inflated.pred <- summary(survfit.object,times=times) ## plist <- split(inflated.pred$surv,inflated.pred$strata) ## p <- do.call("rbind",lapply(plist,function(x){ ## beyond <- length(times)-length(x) ## c(x,rep(NA,beyond)) ## })) ## p <- matrix(inflated.pred,ncol=length(times),byrow=TRUE) ## } ## if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) ## stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) ## p ## } ## predictSurvProb.coxph.penal <- function(object,newdata,times,...){ ## require(survival) ## frailhistory <- object$history$ frailty(cluster) $history ## frailVar <- frailhistory[NROW(frailhistory),1] ## survfit.object <- survival.survfit.coxph(object,newdata=newdata,se.fit=FALSE,conf.int=FALSE) ## linearPred <- predict(object,newdata=newdata,se.fit=FALSE,conf.int=FALSE) ## basehaz <- basehaz(object) ## bhTimes <- basehaz[,2] ## bhValues <- basehaz[,1] ## survPred <- do.call("rbind",lapply(1:NROW(newdata),function(i){ ## (1+frailVar*bhValues*exp(linearPred[i]))^{-1/frailVar} ## })) ## where <- prodlim::sindex(jump.times=bhTimes,eval.times=times) ## p <- cbind(1,survPred)[,where+1] ## if ((miss.time <- (length(times) - NCOL(p)))>0) ## p <- cbind(p,matrix(rep(NA,miss.time*NROW(p)),nrow=NROW(p))) ## if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) ## stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) ## p ## } ##' @export predictSurvProb.cph <- function(object,newdata,times,...){ if (!match("surv",names(object),nomatch=0)) stop("Argument missing: set surv=TRUE in the call to cph!") p <- rms::survest(object,times=times,newdata=newdata,se.fit=FALSE,what="survival")$surv if (is.null(dim(p))) p <- matrix(p,nrow=NROW(newdata)) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictSurvProb.selectCox <- function(object,newdata,times,...){ predictSurvProb(object[[1]],newdata=newdata,times=times,...) } ##' @export predictSurvProb.prodlim <- function(object,newdata,times,...){ ## require(prodlim) p <- predict(object=object, type="surv", newdata=newdata, times=times, mode="matrix", level.chaos=1) if (NROW(newdata)==1 && class(p)=="list"){ p <- unlist(p) } if (is.null(dim(p)) && NROW(newdata)>=1){ ## if the model has no covariates ## then all cases get the same prediction ## in this exceptional case we return a vector ## p[is.na(p)] <- 0 p <- as.vector(p) if (length(p)!=length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) } else{ if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) ## stop("Prediction failed") } rownames(p) <- NULL p } predict.survfit <- function(object,newdata,times,bytimes=TRUE,fill="last",...){ if (length(class(object))!=1 || class(object)!="survfit" || object$typ !="right") stop("Predictions only available \nfor class 'survfit', possibly stratified Kaplan-Meier fits.\n For class 'cph' Cox models see survest.cph.") if (missing(newdata)) npat <- 1 else if (is.data.frame(newdata)) npat <- nrow(newdata) else stop("If argument `newdata' is supplied it must be a dataframe." ) ntimes <- length(times) sfit <- summary(object,times=times) if (is.na(fill)) Fill <- function(x,len){x[1:len]} else if (fill=="last") Fill <- function(x,len){ y <- x[1:len] y[is.na(y)] <- x[length(x)] y} else stop("Argument fill must be the string 'last' or NA.") if (is.null(object$strata)){ pp <- Fill(sfit$surv,ntimes) p <- matrix(rep(pp,npat), ncol=ifelse(bytimes,ntimes,npat), nrow=ifelse(bytimes,npat,ntimes), byrow=bytimes) } else{ covars <- attr(terms(eval.parent(object$call$formula)),"term.labels") if (!all(match(covars,names(newdata),nomatch=FALSE))) stop("Not all strata defining variables occur in newdata.") ## FIXME there are different ways to build strata levels ## how can we test which one was used??? stratdat <- newdata[,covars,drop=FALSE] names(stratdat) <- covars NewStratVerb <- survival::strata(stratdat) NewStrat <- interaction(stratdat,sep=" ") levs <- levels(sfit$strata) # print(levs) # print(levels(NewStrat)) # print(levels(NewStratVerb)) if (!all(choose <- match(NewStratVerb,levs,nomatch=F)) && !all(choose <- match(NewStrat,levs,nomatch=F))) stop("Not all strata levels in newdata occur in fit.") survlist <- split(sfit$surv,sfit$strata) pp <- lapply(survlist[choose],Fill,ntimes) p <- matrix(unlist(pp,use.names=FALSE), ncol=ifelse(bytimes,ntimes,npat), nrow=ifelse(bytimes,npat,ntimes), byrow=bytimes) } if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictSurvProb.survfit <- function(object,newdata,times,...){ p <- predict.survfit(object,newdata=newdata,times=times,bytimes=TRUE,fill="last") if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ## library randomSurvivalForest ## predictSurvProb.rsf <- function(object,newdata,times,...){ ## p <- predict.rsf(object,newdata=newdata,times=times,bytimes=TRUE,fill="last") ## if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) ## stop("Prediction failed") ## p ## } ##' @export predictSurvProb.psm <- function(object,newdata,times,...){ if (length(times)==1){ p <- rms::survest(object,times=c(0,times),newdata=newdata,what="survival",conf.int=FALSE)[,2] }else{ p <- rms::survest(object,times=times,newdata=newdata,what="survival",conf.int=FALSE) } if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictSurvProb.riskRegression <- function(object,newdata,times,...){ if (missing(times))stop("Argument times is missing") temp <- predict(object,newdata=newdata) pos <- prodlim::sindex(jump.times=temp$time,eval.times=times) p <- cbind(1,1-temp$cuminc)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictSurvProb.rfsrc <- function(object, newdata, times, ...){ ptemp <- predict(object,newdata=newdata,importance="none",...)$survival pos <- prodlim::sindex(jump.times=object$time.interest,eval.times=times) p <- cbind(1,ptemp)[,pos+1,drop=FALSE] if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } # methods for uncensored regression # -------------------------------------------------------------------- predictProb <- function(object,newdata,times,...){ UseMethod("predictProb",object) } ##' @export predictProb.glm <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) if (!(unclass(family(object))$family=="gaussian")) stop("Currently only gaussian family implemented for glm.") betax <- predict(object,newdata=newdata,se.fit=FALSE) ## print(betax[1:10]) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - betax,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictProb.ols <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) if (!(unclass(family(object))$family=="gaussian")) stop("Currently only gaussian family implemented.") betax <- predict(object,newdata=newdata,type="lp",se.fit=FALSE) ## print(betax[1:10]) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - betax,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ##' @export predictProb.randomForest <- function(object,newdata,times,...){ ## no censoring -- only normal family with mu=0 and sd=sd(y) N <- NROW(newdata) NT <- length(times) predMean <- predict(object,newdata=newdata,se.fit=FALSE) pred.matrix <- matrix(rep(times,N),byrow=TRUE,ncol=NT,nrow=N) p <- 1-pnorm(pred.matrix - predMean,mean=0,sd=sqrt(var(object$y))) if (NROW(p) != NROW(newdata) || NCOL(p) != length(times)) stop(paste("\nPrediction matrix has wrong dimensions:\nRequested newdata x times: ",NROW(newdata)," x ",length(times),"\nProvided prediction matrix: ",NROW(p)," x ",NCOL(p),"\n\n",sep="")) p } ## update.cox <- function(object,tstar,data){ ## object$call$data <- data[data$time>tstar,] ## update <- eval(object$call) ## class(update) <- "dynamicCox" ## update ## } ##' @export ## predictProb.dynamicCox <- function(object,newdata,cutpoints,learn.data,...){ ## p <- matrix(1,nrow=NROW(newdata),ncol=length(cutpoints)) ## p ## } pec/R/simCost.R0000644000176200001440000000737713571203267013012 0ustar liggesusers#' Simulate COST alike data #' #' Simulate data alike the data from the Copenhagen stroke study (COST) #' #' This uses functionality of the lava package. #' #' @param N Sample size #' @return Data frame #' @author Thomas Alexander Gerds #' @export simCost <- function(N){ requireNamespace("lava") ## psmT <- psm(Surv(time,status)~ age + sex + hypTen + prevStroke + othDisease + alcohol + diabetes + smoke + atrialFib + hemor + strokeScore + cholest,data=cost) ## psmCens <- psm(Surv(time,1-status)~1, data=cost) psmCens.scale <- 0.04330608 psmT.scale <- 1.120334 psmCens.coef <- c("(Intercept)"=8.297624) psmT.coef <- c("(Intercept)"=11.4457,"age"=-0.0572,"sex=male"=-0.4433,"hypTen=yes"=-0.2346,"prevStroke=yes"=-0.1873,"othDisease=yes"=-0.1327,"alcohol=yes"=0.1001,"diabetes=yes"=-0.333,"smoke=yes"=-0.3277,"atrialFib=yes"=-0.399,"hemor=yes"=0.0725,"strokeScore"=0.0250,"cholest"=-0.0049) m <- lava::lvm(~ age + sex + hypTen + prevStroke + othDisease + alcohol + diabetes + smoke + atrialFib + hemor + strokeScore + cholest + T + C) ## lava::distribution(m, ~age) <- lava::normal.lvm(mean=mean(cost$age),sd=sd(cost$age)) lava::distribution(m, ~age) <- lava::normal.lvm(mean=73.29151,sd=11.32405) ## lava::distribution(m, ~sex) <- lava::binomial.lvm(p=mean(as.numeric(cost$sex=="male"))) lava::distribution(m, ~sex) <- lava::binomial.lvm(p=0.465251) ## lava::distribution(m, ~strokeScore) <- normal.lvm(mean=mean(cost$strokeScore),sd=sd(cost$strokeScore)) lava::distribution(m, ~strokeScore) <- lava::normal.lvm(mean=43.52896,sd=13.01235) ## lava::distribution(m, ~hypTen) <- lava::binomial.lvm(p=mean(as.numeric(cost$hypTen=="yes"))) lava::distribution(m, ~hypTen) <- lava::binomial.lvm(p=0.3301158) ## lava::distribution(m, ~prevStroke) <- lava::binomial.lvm(p=mean(as.numeric(cost$prevStroke=="yes"))) lava::distribution(m, ~prevStroke) <- lava::binomial.lvm(p=0.1833977) ## lava::distribution(m, ~othDisease) <- lava::binomial.lvm(p=mean(as.numeric(cost$othDisease=="yes"))) lava::distribution(m, ~othDisease) <- lava::binomial.lvm(p=0.1621622) ## lava::distribution(m, ~alcohol) <- lava::binomial.lvm(p=mean(as.numeric(cost$alcohol=="yes"))) lava::distribution(m, ~alcohol) <- lava::binomial.lvm(p=0.3166023) ## lava::distribution(m, ~diabetes) <- lava::binomial.lvm(p=mean(as.numeric(cost$diabetes=="yes"))) lava::distribution(m, ~diabetes) <- lava::binomial.lvm(p=0.1409266) ## lava::distribution(m, ~smoke) <- lava::binomial.lvm(p=mean(as.numeric(cost$smoke=="yes"))) lava::distribution(m, ~smoke) <- lava::binomial.lvm(p=0.4555985) ## lava::distribution(m, ~hemor) <- lava::binomial.lvm(p=mean(as.numeric(cost$hemor=="yes"))) lava::distribution(m, ~hemor) <- lava::binomial.lvm(p=0.05019305) ## lava::distribution(m, ~atrialFib) <- lava::binomial.lvm(p=mean(as.numeric(cost$atrialFib=="yes"))) lava::distribution(m, ~atrialFib) <- lava::binomial.lvm(p=0.1254826) lava::distribution(m,~T) <- eval(call("coxWeibull.lvm",scale=exp(-psmT.coef["(Intercept)"]/psmT.scale),shape=1/psmT.scale)) lava::distribution(m,~C) <- eval(call("coxWeibull.lvm",scale=exp(-psmCens.coef["(Intercept)"]/psmCens.scale),shape=1/psmCens.scale)) TCoef <- -psmT.coef[-1]/psmT.scale lava::regression(m) <- formula(paste("T ~ f(strokeScore,",TCoef[["strokeScore"]],") + f(age,",TCoef[["age"]],") + f(sex,",TCoef[["sex=male"]],") + f(hypTen,",TCoef[["hypTen=yes"]],") + f(prevStroke,",TCoef[["prevStroke=yes"]],") + f(othDisease,",TCoef[["othDisease=yes"]],") + f(alcohol,",TCoef[["alcohol=yes"]],") + f(diabetes,",TCoef[["diabetes=yes"]],") + f(smoke,",TCoef[["smoke=yes"]],") + f(atrialFib,",TCoef[["atrialFib=yes"]],") + f(hemor,",TCoef[["hemor=yes"]],")",sep="")) m <- lava::eventTime(m,time~min(T=1,C=0),"status") d <- lava::sim(m,N) d <- d[,-match(c("T","C"),names(d))] d } pec/R/baselineHazard.coxph.R0000644000176200001440000000270313571203266015410 0ustar liggesusersbaselineHazard.coxph <- function(object,x,y,times=NULL){ stopifnot(class(object)=="coxph") if (is.null(object$x)) stop("You have to say `x=TRUE' in the call to coxph") if (!is.null(object$strata)){ yList <- split(object$y,object$strata) xList <- split(object$x,object$strata) return(lapply(1:length(yList),function(s){ object$x <- cbind(xList[[s]]) object$y <- yList[[s]] object$strata <- NULL baselineHazard.coxph(object=object,times=times) }))} ## browser() beta <- coef(object) if (missing(x)) x <- object$x if (missing(y)) y <- object$y ## elp <- exp(apply(x,1,function(y)sum(y*beta))) elp <- exp(x%*%beta) response <- unclass(object$y) time <- response[,1,drop=TRUE] status <- response[,2,drop=TRUE] jumptimes <- sort(time[status!=0]) dNn <- table(jumptimes) S0 <- .C("SNull", time=as.double(time), jumptimes=as.double(jumptimes), elp=as.double(elp), S=double(length(jumptimes)), N=as.integer(length(time)), NJ=as.integer(length(jumptimes)), ## DUP=FALSE, PACKAGE="pec")$S ## S01 <- sapply(jumptimes,function(s){ ## sum(elp * (time>=s)) ## }) Lambda <- cumsum((1/S0) * dNn ) if (is.null(times)){ data.frame(time=as.vector(unique(jumptimes)),cumhazard=as.vector(Lambda)) } else{ data.frame(time=times,cumhazard=c(0,Lambda)[1+prodlim::sindex(jump.times=jumptimes,eval.times=times)]) } } pec/R/predictLandmark.R0000644000176200001440000000047613571203267014466 0ustar liggesusers# methods for dynamic # -------------------------------------------------------------------- predictLandmark <- function(object,newdata,times,landmark,cause,...){ UseMethod("predictLandmark",object) } predictLandmark.jointPenal <- function(object,newdata,times,landmark,cause,...){ ## xxx ## stopifnot( } pec/R/allComparisons.R0000755000176200001440000000037213571203266014345 0ustar liggesusersallComparisons <- function(x,sep=" <= "){ n <- length(x) pro <- rep(x,(n-1):0) contra <- x[unlist(lapply(2:n,function(i)i:n))] out <- lapply(1:length(pro),function(i)c(pro[i],contra[i])) names(out) <- sapply(out,paste,collapse=sep) out } pec/R/testResiduals.R0000755000176200001440000000535213571203267014216 0ustar liggesuserstestResiduals <- function(object, times, testTimes, rangeInt, confInt, confLevel, keepTestedResiduals){ NF <- length(object) comparisonList <- allComparisons(names(object)) testExact <- NROW(object[[1]])<100 # {{{ compute integrated residuals testIBS <- !is.null(rangeInt) if (testIBS==TRUE){ if (length(rangeInt)==2 && is.numeric(rangeInt)) range <- rangeInt else range <- NULL integratedResiduals <- lapply(object,function(x){ apply(x,1,function(r){ Dint(x=times,y=r,range=range,restrictNonMissing=FALSE) })}) ## naFractionIBS <- lapply(integratedResiduals,function(x)mean(is.na(x))) } # }}} # {{{ extract residuals at testTimes if (!is.null(testTimes)){ timePos <- prodlim::sindex(times,testTimes) testTimeResiduals <- lapply(object,function(x){ x[,timePos,drop=FALSE] }) ## naFractionTestTimes <- lapply(testTimeResiduals,function(x)colMeans(is.na(x))) } # }}} loop <- lapply(comparisonList,function(cc){ # {{{ test residuals at time points if (!is.null(testTimes)){ Rdiff <- testTimeResiduals[[cc[2]]]-testTimeResiduals[[cc[1]]] wtest <- lapply(1:length(testTimes),function(t){ d <- Rdiff[,t,drop=TRUE] if (any(is.na(d))){ list(p.value=NA,conf.int=c(NA,NA)) } else{ suppressWarnings(wilcox.test(d,alternative="less",exact=testExact,conf.int=confInt,conf.level=confLevel)) } }) loopOut <- list(pValue=sapply(wtest,function(w)w$p.value)) if (confInt==TRUE){ loopOut <- c(loopOut,list(upperLimit=sapply(wtest,function(w)w$conf.int[2]))) } } else{ loopOut <- vector(mode = "list", length = NF) } # }}} # {{{ test integrated residuals if (testIBS){ dIBS <- integratedResiduals[[cc[2]]]-integratedResiduals[[cc[1]]] if (any(is.na(dIBS))){ loopOut <- c(loopOut,list(IBSpValue=NA)) if (confInt==TRUE){ loopOut <- c(loopOut,list(IBSupper=NA)) } } else{ wtestIBS <- suppressWarnings(wilcox.test(dIBS,alternative="less",exact=testExact,conf.int=confInt,conf.level=confLevel)) loopOut <- c(loopOut,list(IBSpValue=wtestIBS$p.value)) if (confInt==TRUE){ loopOut <- c(loopOut,list(IBSupper=wtestIBS$conf.int[2])) } } } # }}} loopOut }) # {{{ prepare output if (!is.null(testTimes)){ out <- list(pValues=lapply(loop,function(x)x$pValue)) } else{ out <- NULL } if (testIBS){ out <- c(out,list(IBSpValue=lapply(loop,function(x)x$IBSpValue))) } # }}} out } pec/R/plotPredictEventProb.R0000644000176200001440000001677714131004270015475 0ustar liggesusers#' Plotting predicted survival curves. #' #' Ploting time-dependent event risk predictions. #' #' Arguments for the invoked functions \code{legend} and \code{axis} are simply #' specified as \code{legend.lty=2}. The specification is not case sensitive, #' thus \code{Legend.lty=2} or \code{LEGEND.lty=2} will have the same effect. #' The function \code{axis} is called twice, and arguments of the form #' \code{axis1.labels}, \code{axis1.at} are used for the time axis whereas #' \code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis. #' #' These arguments are processed via \code{\dots{}} of #' \code{plotPredictEventProb} and inside by using the function #' \code{SmartControl}. #' #' @param x Object specifying an event risk prediction model. #' @param newdata A data frame with the same variable names as those that were #' used to fit the model \code{x}. #' @param times Vector of times at which to return the estimated probabilities. #' @param cause Show predicted risk of events of this cause #' @param xlim Plotting range on the x-axis. #' @param ylim Plotting range on the y-axis. #' @param xlab Label given to the x-axis. #' @param ylab Label given to the y-axis. #' @param axes Logical. If \code{FALSE} no axes are drawn. #' @param col Vector of colors given to the survival curve. #' @param density Densitiy of the color -- useful for showing many #' (overlapping) curves. #' @param lty Vector of lty's given to the survival curve. #' @param lwd Vector of lwd's given to the survival curve. #' @param add Logical. If \code{TRUE} only lines are added to an existing #' device #' @param legend Logical. If TRUE a legend is plotted by calling the function #' legend. Optional arguments of the function \code{legend} can be given in #' the form \code{legend.x=val} where x is the name of the argument and val the #' desired value. See also Details. #' @param percent Logical. If \code{TRUE} the y-axis is labeled in percent. #' @param \dots Parameters that are filtered by \code{\link{SmartControl}} and #' then passed to the functions: \code{\link{plot}}, \code{\link{axis}}, #' \code{\link{legend}}. #' @return The (invisible) object. #' @author Ulla B. Mogensen \email{ulmo@@biostat.ku.dk}, Thomas A. Gerds #' \email{tag@@biostat.ku.dk} #' @seealso \code{\link{predictEventProb}}\code{\link{prodlim}} #' @references Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012). #' Evaluating Random Forests for Survival Analysis Using Prediction Error #' Curves. Journal of Statistical Software, 50(11), 1-23. DOI #' 10.18637/jss.v050.i11 #' @keywords survival #' @examples #' #' # generate some competing risk data #' #' @export plotPredictEventProb <- function(x, newdata, times, cause=1, xlim, ylim, xlab, ylab, axes=TRUE, col, density, lty, lwd, add=FALSE, legend=TRUE, percent=FALSE, ...){ # {{{ call argument allArgs <- match.call() # }}} # {{{ find times if(missing(times)){ # formula formula <- eval(x$call$formula) if (match("formula",class(formula),nomatch=0)==0) stop("Argument formula is missing.") # find data data <- eval(x$call$data) # extract response m <- model.frame(formula,data,na.action=na.fail) response <- model.response(m) # ordering time neworder <- order(response[,"time"],-response[,"status"]) response <- response[neworder,,drop=FALSE] times <- response[,"time"] # unique event times times <- unique(times) } # }}} # {{{ newdata if(missing(newdata)){ newdata <- eval(x$call$data) } ## stop("newdata argument is missing") # }}} # {{{ xlim, ylim if (missing(xlim)) xlim <- c(0, max(times)) at <- times <= xlim[2] orig.X <- times[at] X <- times[at] # }}} # {{{ predict newdata at times y <- predictEventProb(object=x, newdata=newdata, times=orig.X, cause=cause) # }}} # {{{ plot arguments nlines <- NROW(y) if (missing(ylab)) ylab <- "Event probability" if (missing(xlab)) xlab <- "Time" if (missing(ylim)) ylim <- c(0, 1) if (missing(lwd)) lwd <- rep(3,nlines) if (missing(col)) col <- rep(1,nlines) if (missing(density)){ if (nlines>5){ density <- pmax(33,100-nlines) } else density <- 100 } ## print(density) if (density<100){ col <- sapply(col,function(coli){ ccrgb=as.list(col2rgb(coli,alpha=TRUE)) names(ccrgb) <- c("red","green","blue","alpha") ccrgb$alpha=density cc=do.call("rgb",c(ccrgb,list(max=255))) }) } if (missing(lty)) lty <- rep(1, nlines) if (length(lwd) < nlines) lwd <- rep(lwd, nlines) if (length(lty) < nlines) lty <- rep(lty, nlines) if (length(col) < nlines) col <- rep(col, nlines) axis1.DefaultArgs <- list() axis2.DefaultArgs <- list(at=seq(0,1,.25)) plot.DefaultArgs <- list(x=0, y=0, type = "n", ylim = ylim, xlim = xlim, xlab = xlab, ylab = ylab) legend.DefaultArgs <- list(legend=rownames(y), lwd=lwd, col=col, lty=lty, cex=1.5, bty="n", y.intersp=1.3, x="topright") # }}} # {{{ smart controls if (match("legend.args",names(args),nomatch=FALSE)){ legend.DefaultArgs <- c(args[[match("legend.args",names(args),nomatch=FALSE)]],legend.DefaultArgs) legend.DefaultArgs <- legend.DefaultArgs[!duplicated(names(legend.DefaultArgs))] } smartA <- prodlim::SmartControl(call=list(...), keys=c("plot","legend","axis1","axis2"), ignore=c("x", "newdata", "times", "xlim","ylim","xlab","ylab","col","lty","lwd","add","legend","percent","axes","legend.args"), defaults=list("plot"=plot.DefaultArgs, "legend"= legend.DefaultArgs, "axis1"=axis1.DefaultArgs, "axis2"=axis2.DefaultArgs), forced=list("plot"=list(axes=FALSE), "axis1"=list(side=1), "axis2"=list(side=2)), verbose=TRUE) # }}} # {{{ empty plot if (!add) { do.call("plot",smartA$plot) if (axes){ do.call("axis",smartA$axis1) if (percent & is.null(smartA$axis1$labels)) smartA$axis2$labels <- paste(100*smartA$axis2$at,"%") do.call("axis",smartA$axis2) } } # }}} # {{{ adding lines nix <- lapply(1:nlines, function(s) { lines(x = X, y = y[s,], type = "s", col = col[s], lty = lty[s], lwd = lwd[s]) }) # }}} # {{{ legend if(legend==TRUE && !add && !is.null(rownames(y))){ save.xpd <- par()$xpd do.call("legend",smartA$legend) par(xpd=save.xpd) } # }}} invisible(x) } pec/R/checkModels.R0000755000176200001440000000327114100207441013565 0ustar liggesuserscheckModels <- function(object,model.args,model.parms,splitMethod,verbose=TRUE){ checkF <- lapply(1:length(object),function(f){ fit <- object[[f]] if(splitMethod != "noinf" && (match("call",names(fit),nomatch=0)==0)) stop(paste("pec:::checkModels -> Model",names(object)[f],"does not have a call argument."),call.=FALSE) else fit$call$data <- NULL }) # check model.args # -------------------------------------------------------------------- if (!is.null(model.args)){ if (!(is.list(model.args))){ warning(paste("Argument model.args is not a list and therefore ignored." )) model.args <- NULL } else{ if (!(all(match(make.names(names(model.args),unique=TRUE),names(object),nomatch=FALSE)))){ if (verbose==TRUE) warning(paste("model.args should be a named list matching the entries of the object. Assume now that they are given in the correct order" )) } else{ model.args <- model.args[names(object)] } } } # check model.parms # -------------------------------------------------------------------- if (!is.null(model.parms)){ if (!(is.list(model.parms))){ warning(paste("Argument model.parms is not a list and therefore ignored." )) model.args <- NULL } else{ if (!(all(match(make.names(names(model.parms),unique=TRUE),names(object),nomatch=FALSE)))){ if (verbose==TRUE) warning(paste("model.parms should be a named list matching the list of model.\nIt is assumed that they are given in the correct order" )) } else{ model.parms <- model.parms[names(object)] } } } list(model.args=model.args,model.parms=model.parms) } pec/R/print.vandeWielTest.R0000755000176200001440000000227313571203267015273 0ustar liggesusers##' @export print.vandeWielTest <- function(x,eps=0.0001,pdigits=4,...){ cat("\nvan de Wiel test based on ",x$B," data splits\n") cat("\nTraining sample size: ",x$M,"\n") cat("\nTest sample size: ",x$N-x$M,"\n") if (length(x$testIBS)==2){ cat("\nP-values based on integrated Brier score residuals:") cat("\nRange of integration: [",x$testIBS[1],"--",x$testIBS[2],"]\n\n") ibsP <- sapply(x$Comparisons,function(x)x$pValueIBS) ibsP <- format.pval(ibsP,digits=pdigits,eps=eps) ibsMat <- matrix(ibsP,ncol=1) rownames(ibsMat) <- names(x$Comparisons) colnames(ibsMat) <- "p-value (IBS)" print(ibsMat,quote=FALSE,...) } NT <- length(x$testTimes) if (NT>0){ cat("\nMatrix of time point wise p-values:\n\n") if (NT>5){ showTimes <- sort(sample(x$testTimes)) showTimePos <- prodlim::sindex(jump.times=x$testTimes,eval.times=showTimes) } else{ showTimes <- x$testTimes showTimePos <- 1:NT } mat <- do.call("rbind",lapply(x$Comparisons,function(comp){ format.pval(comp$pValueTimes[showTimePos],digits=pdigits,eps=eps) })) colnames(mat) <- paste("t=",showTimes) print(mat,quote=FALSE,...) } invisible(mat) } pec/R/reclass.R0000644000176200001440000003516613571203267013022 0ustar liggesusers##' Retrospective table of risks predicted by two different methods, models, algorithms ##' ##' All risks are multiplied by 100 before ##' @title Retrospective risk reclassification table ##' @param object Either a ##' list with two elements. Each element should either ##' be a vector with probabilities, or an object for which ##' \code{predictSurvProb} or \code{predictEventProb} can extract predicted risk based on data. ##' @param reference Reference prediction model. ##' @param formula A survival formula as obtained either with ##' \code{prodlim::Hist} or \code{survival::Surv} which defines the ##' response in the \code{data}. ##' @param data Used to extract the response from the data and passed ##' on to \code{predictEventProb} to extract predicted event ##' probabilities. ##' @param time Time interest for prediction. ##' @param cause For competing risk models the cause of ##' interest. Defaults to all available causes. ##' @param cuts Risk quantiles to group risks. ##' @param digits Number of digits to show for the predicted risks ##' @return reclassification tables: overall table and one conditional table for each cause and for subjects event free at time interest. ##' @seealso predictStatusProb ##' @examples ##' \dontrun{ ##' library(survival) #' set.seed(40) #' d <- prodlim::SimSurv(400) #' nd <- prodlim::SimSurv(400) #' Models <- list("Cox.X2"=coxph(Surv(time,status)~X2,data=d,x=TRUE,y=TRUE), #' "Cox.X1.X2"=coxph(Surv(time,status)~X1+X2,data=d,x=TRUE,y=TRUE)) #' rc <- reclass(Models,formula=Surv(time,status)~1,data=nd,time=5) #' print(rc) #' plot(rc) #' #' set.seed(40) #' library(riskRegression) #' library(prodlim) #' dcr <- prodlim::SimCompRisk(400) #' ndcr <- prodlim::SimCompRisk(400) #' crPred5 <- list("X2"=predictEventProb(CSC(Hist(time,event)~X2,data=dcr),newdata=ndcr,times=5), #' "X1+X2"=predictEventProb(CSC(Hist(time,event)~X1+X2,data=dcr),newdata=ndcr,times=5)) #' rc <- reclass(crPred5,Hist(time,event)~1,data=ndcr,time=3) #' print(rc) #' #' reclass(crPred5,Hist(time,event)~1,data=ndcr,time=5,cuts=100*c(0,0.05,0.1,0.2,1)) #'} ##' @author Thomas A. Gerds reclass <- function(object, reference, formula, data, time, cause, cuts=seq(0,100,25),digits=2){ if (missing(reference)){ stopifnot(length(object)==2) } else{ object <- list(object,reference) } if ("factor" %in% class(object[[1]])){ factorp <- TRUE if (!("factor" %in% class(object[[2]]))) stop("The first object is a factor, so the reference must also be a factor.") ## dimension of reclassification tables is NR x NC NR <- length(levels(object[[1]])) NC <- length(levels(object[[2]])) }else{ factorp <- FALSE NC <- length(cuts) NR <- NC-1 ## dimension of reclassification tables is NR x NR } # {{{ response ## histformula <- formula ## if (histformula[[2]][[1]]==as.name("Surv")){ ## histformula <- update(histformula,paste("prodlim::Hist","~.")) ## histformula[[2]][[1]] <- as.name("prodlim::Hist") ## } ## print(histformula) ## m <- model.frame(histformula,data,na.action=na.fail) m <- model.frame(formula,data,na.action=na.omit) response <- model.response(m) if (match("Surv",class(response),nomatch=0)!=0){ attr(response,"model") <- "survival" attr(response,"cens.type") <- "rightCensored" model.type <- "survival" } model.type <- attr(response,"model") if (model.type=="competing.risks"){ predictHandlerFun <- "predictEventProb" availableCauses <- attr(response,"states") ncauses <- length(availableCauses) if (missing(cause)) cause <- availableCauses[[1]] else if (match(cause, availableCauses,nomatch=FALSE)==0) stop("Cause ",cause," is not among the available causes: ",paste(availableCauses,collapse=", ")) } else{ predictHandlerFun <- "predictSurvProb" } # }}} if (factorp){ predrisk <- object edat <- data.frame(cbind(object[[1]],object[[2]],response)) ## overall reclassification table retab <- table(object[[1]],object[[2]]) }else{ cutP <- function(P,cuts){ if (min(P)max(cuts)) stop("Largest predicted risk is larger than last cut.") cut(P,cuts, include.lowest=TRUE, labels=paste(paste(cuts[-NC],cuts[-1],sep="-"),"%",sep="")) } getPredictions <- function(x){ if (any(is.na(x))) stop("Missing values in object.") P <- switch(class(x)[[1]], ## "factor"={x}, "numeric"={ if (all(x<1)){ warning("Assumed that predictions are given on the scale [0,1] and multiplied by 100.") x*100 } else{ x } }, {if (predictHandlerFun=="predictEventProb"){ P <- 100*do.call(predictHandlerFun,list(x,newdata=data,times=time,cause=cause)) } else { P <- 100*do.call(predictHandlerFun,list(x,newdata=data,times=time)) } P}) } predrisk <- lapply(object,getPredictions) names(predrisk) <- names(object) predriskCut <- lapply(predrisk,function(P){if (is.factor(P)) P else cutP(P,cuts)}) ## overall reclassification table retab <- table(predriskCut[[1]],predriskCut[[2]]) ## reclassification frequencies conditional on outcome edat <- data.frame(cbind(do.call("cbind",predriskCut),response)) } edat$event[edat$status==0] <- 0 N <- NROW(edat) names(edat)[1:2] <- c("P1","P2") if (factorp){ cells <- split(edat,list(edat$P1,edat$P2)) all.comb <- apply(expand.grid(1:(NR),1:(NC)),1,paste,collapse=".") } else{ cells <- split(edat,list(factor(edat$P1,levels=1:NR),factor(edat$P2,levels=1:NR))) all.comb <- apply(expand.grid(1:(NR),1:(NR)),1,paste,collapse=".") } nn <- names(object) if (!is.null(nn) & length(nn)==2){ names(dimnames(retab)) <- nn } ## -------------------------------------------------------------------------------------- ## Apply Bayes' theorem to calculate expected reclassification probabilities ## conditional on outcome ## -------------------------------------------------------------------------------------- if (predictHandlerFun=="predictEventProb"){ ## -------------------------------------------------------------------------------------- ## Competing risk ## ## P(X=x|T<=t, cause=j) = P(X=x,T<=t,cause=j) / P(T<=t,cause=j) ## = P(T<=t,cause=j|X=x) P(X=x) / P(T<=t,cause=j) ## = cuminc.x H.x / cuminc ## ## P(X=x|T>t) = P(X=x,T>t) / P(T>t) ## = P(T>t|X=x) P(X=x) / P(T>t) ## = efreesurv.x H.x / efreesurv ## -------------------------------------------------------------------------------------- eformula <- Hist(time,event)~1 Hx <- unlist(lapply(cells,NROW))/N cuminc.x <- do.call("rbind", lapply(names(cells), function(cc){ x <- cells[[cc]] if (NROW(x)>0){ ## warn if too short followup if (all(x$time0){ ## check if there is more than one cause if (nstatest) = P(X=x,T>t) /P(T>t) ## = surv.x * Hx / surv ## ## -------------------------------------------------------------------------------------- eformula <- Hist(time,status)~1 Hx <- unlist(lapply(cells,NROW))/N cuminc <- predict(prodlim::prodlim(eformula,data=edat),times=time,type="cuminc") cuminc.x <- sapply(cells,function(x){ if (NROW(x)>0){ ## warn if too short followup if (all(x$time1){ CrossValErr <- lapply(1:NF,function(f){ rowMeans(do.call("cbind",lapply(CrossValErrMat,function(b)b[[f]]))) }) } else CrossValErr <- CrossValErrMat[[1]] out <- list(CrossValErr=CrossValErr) if (keep==TRUE && B>1) out$CrossValErrMat <- CrossValErrMat out } pec/MD50000644000176200001440000001354014131045152011330 0ustar liggesuserse353c15a6baada8fc5e5c34bf9eb03f1 *DESCRIPTION 48f7ab8860ae3f7f22147baef5a4b2f0 *NAMESPACE 15865fad11667a9cc5a083a412e03a8e *R/CindexBootstrapCrossValidation.R 6fdd83d87e232ec319bb65f5046caf8b *R/CindexKFoldCrossValidation.R 77f77c0ec83d46a798417c35ab1f5d12 *R/ConfInt.Cindex.R c197f26593d31f661a91c5de1ec3e1b0 *R/ConfScoreSurv.R 6dc9f80d3370d3e80f7f70f46056bbc9 *R/Dint.R 6a730849cccdcdf45cba26671dce804e *R/R2.R dab1cee82ceb2ac3a9024e17f8d68434 *R/Special.R 252760686d668dfef5b4ae18f4904527 *R/allComparisons.R 79a41bc15d1ace76183f723fe17206a2 *R/baselineHazard.coxph.R 31aecc28d3ba5530150f96f6b167853b *R/bootstrapCrossValidation.R 6a3ca570639e6143989d32318897da1e *R/calPlot.R d6d36ee72bbe0018a7c736d135ae8bb0 *R/checkModels.R 4bdb8e832fc87ef3bcc6cc5c1c807aa4 *R/cindex.R 35e6a420f07b3ad1ae9ef27129727c31 *R/crps.R 6d76cae79de771869e95c556f2b8547f *R/ibs.R 1eaa922ebb158694088213c1345fc07d *R/internalReevalFit.R 3b028f2e7a898434c12f48443f344e48 *R/internalTalk.R aaa6cbe875970b2c212a34d20ebffc7e *R/ipcw.R 7ad85fbd2bb6b64306f9feb29dfbaf1b *R/kFoldCrossValidation.R ed27bd02d63d241b8761b8b899fbe05f *R/marginal.prodlim.R 86dc017c12b1ad15e51839adb4b32d91 *R/pec-package.R 19fbf57b0290f191729d47e69303f736 *R/pec.R a433f95275ab629c1e67ee109f3d71a5 *R/plot.CiCindex.R 1445d6e9b32ab7b65882adbee571eda8 *R/plot.Cindex.R 9e7eb9d0cbab57e4c2b7d9622a86e534 *R/plot.calibrationPlot.R 43857a96d485d1f57eeb9e07c521c222 *R/plot.confScoreSurv.R 6144a4bdcf2720446b4780a3d8521ebb *R/plot.pec.R 3e0139a3573f399dcf8bbbed40d987c4 *R/plot.riskReclassification.R b27c0b6e6d80104af0de82174af93b2e *R/plotPredictEventProb.R d0bbe403dafc3fae22d053b39c59a91f *R/plotPredictSurvProb.R 8efef8767353687bbc534b38be015648 *R/predictEventProb.R b7e62c168e71123c1daad0000535cdd7 *R/predictEventProb.coxboost.R 117521e7219b6f9920ba2565d9f1ff6f *R/predictEventProb.pseudoForest.R 5e059cf9bb4c30b8c5667d0c47621824 *R/predictEventProb.selectFGR.R 1b9d50341fc7e0214c1094e2d5bfdde6 *R/predictLandmark.R 63788711aa1175d859f65ad9ca01a3ad *R/predictLifeYearsLost.R 1cada3ceb2e741d209f53557314ae368 *R/predictRestrictedMeanTime.R c53c674aeb39a508ee5288054eff05f6 *R/predictSurvProb.FastBw.R de72855047ef36e38487409846eb3d26 *R/predictSurvProb.R 0e87322161a5b8a67b7784471db72a9e *R/predictSurvProb.cforest.R e2bd5d9c79f395b9a74e7ed64c93fa92 *R/predictSurvProb.ctree.R a66368287770f8f2dbe700279961e2d1 *R/predictSurvProb.penfitS3.R f81b433dd1c074a8bbb2498c245d1d1a *R/predictSurvProb.pseudoForest.R e445d233c27a22ac00e207143d22f7d8 *R/print.Cindex.R 228e34c8ca9bc173fe74c9baf793e504 *R/print.ConfScoreSurv.R 61e8097e6eb2963401146073053ac942 *R/print.IPCW.R 4200611d07f5423ad46ae6565e048184 *R/print.R2.R bf387b3acd2abf4d3c0069cd11c6c28d *R/print.calibrationPlot.R 6d4bd46252d89ed6d6304cc8afde30e4 *R/print.crps.R f630752372f9c09f5a749b3a88ae746c *R/print.method.R 2af8381130667356a4abf324d7988dfe *R/print.multiSplitTest.R 2be797c2761521d990cc8420bd2c363b *R/print.pec.R 2dacfa1224cc3e2a4d4a85c30a9c5e61 *R/print.riskReclassification.R 47efcfca41fcc054b81cf415859b1506 *R/print.splitMethod.R b3d8ed7411167daeaf314b96b82f73d9 *R/print.vandeWielTest.R 0f31b6a8237a1df438c456f6fbbd0634 *R/pseudo.kFoldCrossValidation.R 43ebeb0c9c54ac7bd31759b34caa93cf *R/reclass.R 8b022dc960c92cc5776055f9c43c3efb *R/resolveSplitMethod.R a3ca40f1afbc2ece3775212998a891b8 *R/selectCox.R 0e9cf52b37347395eca424561551c4aa *R/simCost.R 3eafcd81d46429e34611d8e5c6bd77bb *R/summary.Cindex.R 128d06815dd04f82105663922273344d *R/summary.confScoreSurv.R 5c17bca9f5b279004f42dcfd7dbfef6a *R/summary.pec.R 164c746808b539ce783f89f0200db825 *R/survest.aalen.R 5cc24b50c6c63ca8560a0c583eb0763e *R/survest.cox.aalen.R df65e7ca8ca5bac76ee3c4fcc82e6386 *R/testResiduals.R e7ee8f67aac4f997bd3e7904971511d9 *README 50c4e564f975dce6f48a185695538958 *data/GBSG2.csv 5243afb7fadcadf6b6881b63b3abdbf3 *data/Pbc3.csv 1efe66208399c83ea4040fe7c4f6e952 *data/cost.rda 13ed1219196e5adcb7fa15eec6f84c9b *data/threecity.csv 485f9c5a42ac33259d81418043a24be3 *inst/CITATION 454cdf477564b660033ee5d82f6cb5a0 *man/GBSG2.Rd 6b2bc5366e3ac6b33c53ff937c3032d5 *man/Pbc3.Rd a48001a1d12073ddf3dd482381bd3679 *man/R2.Rd a84976829705355389054b6d1592bdd3 *man/Special.Rd 1eaf90882d39365e99c9d3bad45fa361 *man/calPlot.Rd da4bdaf81d5c6e422019b1f6f0e96801 *man/cindex.Rd b1f5e014b075a0144ea015a9fb56f06c *man/cost.Rd f2e019f1ab20dfe843df95a2ddf42ff3 *man/coxboost.Rd 19a84dbf245f885ab86879fbbd40c4ed *man/crps.Rd 3db79736592863ca2f1c0fa92d33d425 *man/ipcw.Rd da20741de6a31b0ac7e731452a56d4c3 *man/pec.Rd d2f66799e115b72c50de59f8c3db332f *man/pecCforest.Rd dce0f5da1ea8185462e54168d1be7ebb *man/pecCtree.Rd 1678c1a891e4c594bab4461b53c519bd *man/pecRpart.Rd c55b6161ca2a9d85c043f5a460fb076a *man/plot.calibrationPlot.Rd d850dc8d5278177c1577d29d1014bcef *man/plot.pec.Rd 1516de330e015a0b1064e62187ac7669 *man/plotPredictEventProb.Rd 0a198bb23ef1c852c1a1c4475fada742 *man/plotPredictSurvProb.Rd bb2bfaf80c473c8a0dd59a9fc674d484 *man/predictEventProb.Rd 08a6c61a4d9f6e31095961d76f34d20c *man/predictLifeYearsLost.Rd a91a3faa420acc2a6ec5c196512d886e *man/predictRestrictedMeanTime.Rd 6f26f3d8eb200055ee8c9becce0a5dd5 *man/predictSurvProb.Rd 48d624d52745a0c62390a96da648ebdf *man/print.pec.Rd f564eaf256bf0cb2b5f31aab733ad473 *man/reclass.Rd e2194675a81943c5c9950d37de4a8212 *man/resolvesplitMethod.Rd 6b89138fdeb140bbe72e25e159821dbe *man/selectCox.Rd 658ceb7a4dc49eb5a2ea69d60435a381 *man/selectFGR.Rd 2671b5188aaa72795b62ab526edb8d8b *man/simCost.Rd fd5bbd417ec642ed6916403e4c2a2009 *man/threecity.Rd bac0642b9ebcbfb40919e786f94c6b45 *src/SNull.c d79afa30a192d0d7299bdf6abca96bfe *src/auc.c 6536f812d8e39c9eba7cc0b7b9dca534 *src/brier_score.c 82c73a8a5da88262457daaeb1950d659 *src/ccr.c c720bd6314ebb2148eedbba4cd9af9eb *src/cindex.c 3e95b551de777b3dad6e580dfc2d0f72 *src/init.c edac61f8cc0a7955d4b75e839056bb75 *src/pec.c 75cc547be5b490b6f5ae860aeed1c98d *src/pecResiduals.c ed690ddbac39ed44510f20af4ac5a6a1 *src/survest_cox_aalen.c a8a3e11748dfa9951a05126d1a169c32 *tests/testthat/pec-loop.R pec/inst/0000755000176200001440000000000014131004272011770 5ustar liggesuserspec/inst/CITATION0000644000176200001440000000144214131004272013126 0ustar liggesuserscitHeader("To cite pec in publications use:") citEntry(entry = "Article", title = "Evaluating Random Forests for Survival Analysis Using Prediction Error Curves", author = personList(as.person("Ulla B. Mogensen"), as.person("Hemant Ishwaran"), as.person("Thomas A. Gerds")), journal = "Journal of Statistical Software", year = "2012", volume = "50", number = "11", pages = "1--23", url = "https://www.jstatsoft.org/v50/i11", textVersion = paste("Ulla B. Mogensen, Hemant Ishwaran, Thomas A. Gerds (2012).", "Evaluating Random Forests for Survival Analysis Using Prediction Error Curves.", "Journal of Statistical Software, 50(11), 1-23.", "DOI 10.18637/jss.v050.i11") )