pammtools/0000755000176200001440000000000014453640332012271 5ustar liggesuserspammtools/NAMESPACE0000644000176200001440000001125514453310762013515 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(add_cif,default) S3method(add_hazard,default) S3method(arrange,ped) S3method(as.data.frame,crps) S3method(as_ped,data.frame) S3method(as_ped,list) S3method(as_ped,nested_fdf) S3method(as_ped,pamm) S3method(as_ped,ped) S3method(distinct,ped) S3method(filter,ped) S3method(full_join,ped) S3method(get_cumu_coef,aalen) S3method(get_cumu_coef,cox.aalen) S3method(get_cumu_coef,gam) S3method(get_intervals,default) S3method(get_laglead,data.frame) S3method(get_laglead,default) S3method(gg_laglead,LL_df) S3method(gg_laglead,default) S3method(gg_laglead,nested_fdf) S3method(gg_smooth,default) S3method(group_by,ped) S3method(inner_join,ped) S3method(int_info,data.frame) S3method(int_info,default) S3method(int_info,pamm) S3method(int_info,ped) S3method(left_join,ped) S3method(make_newdata,default) S3method(make_newdata,fped) S3method(make_newdata,ped) S3method(mutate,ped) S3method(nest_tdc,default) S3method(nest_tdc,list) S3method(ped_info,ped) S3method(plot,pamm) S3method(predictSurvProb,pamm) S3method(print,pamm) S3method(rename,ped) S3method(right_join,ped) S3method(sample_frac,ped) S3method(sample_info,data.frame) S3method(sample_info,fped) S3method(sample_info,ped) S3method(sample_n,ped) S3method(select,ped) S3method(slice,ped) S3method(summarise,ped) S3method(summarize,ped) S3method(summary,pamm) S3method(tidy_fixed,coxph) S3method(tidy_fixed,gam) S3method(transmute,ped) S3method(ungroup,ped) export("%>%") export(GeomHazard) export(GeomStepHazard) export(GeomStepribbon) export(GeomSurv) export(add_cif) export(add_cumu_hazard) export(add_hazard) export(add_surv_prob) export(add_tdc) export(add_term) export(arrange) export(as_ped) export(as_ped_multistate) export(combine_df) export(cumulative) export(distinct) export(fcumu) export(filter) export(full_join) export(geom_hazard) export(geom_stephazard) export(geom_stepribbon) export(geom_surv) export(get_cumu_coef) export(get_cumu_eff) export(get_intervals) export(get_laglead) export(get_partial_ll) export(get_plotinfo) export(get_terms) export(gg_cumu_eff) export(gg_fixed) export(gg_laglead) export(gg_partial) export(gg_partial_ll) export(gg_re) export(gg_slice) export(gg_smooth) export(gg_tensor) export(group_by) export(inner_join) export(int_info) export(is.pamm) export(is.ped) export(left_join) export(make_newdata) export(mutate) export(nest_tdc) export(pamm) export(ped_info) export(rename) export(right_join) export(sample_frac) export(sample_info) export(sample_n) export(select) export(seq_range) export(sim_pexp) export(slice) export(split_data) export(summarise) export(tidy_fixed) export(tidy_re) export(tidy_smooth) export(tidy_smooth2d) export(transmute) export(ungroup) import(Formula) import(checkmate) import(dplyr) import(ggplot2) import(mgcv) import(purrr) import(survival) importFrom(Formula,Formula) importFrom(checkmate,assert_atomic_vector) importFrom(checkmate,assert_character) importFrom(checkmate,assert_class) importFrom(checkmate,assert_data_frame) importFrom(checkmate,test_data_frame) importFrom(dplyr,bind_cols) importFrom(dplyr,bind_rows) importFrom(dplyr,mutate) importFrom(dplyr,pull) importFrom(dplyr,slice) importFrom(ggplot2,GeomLine) importFrom(ggplot2,GeomRibbon) importFrom(ggplot2,GeomStep) importFrom(ggplot2,ggproto) importFrom(ggplot2,layer) importFrom(grDevices,dev.off) importFrom(grDevices,png) importFrom(graphics,plot) importFrom(lazyeval,f_eval) importFrom(magrittr,"%>%") importFrom(mgcv,predict.bam) importFrom(mgcv,predict.gam) importFrom(mvtnorm,rmvnorm) importFrom(pec,predictSurvProb) importFrom(purrr,compose) importFrom(purrr,cross) importFrom(purrr,cross_df) importFrom(purrr,discard) importFrom(purrr,flatten) importFrom(purrr,invoke_map) importFrom(purrr,map) importFrom(purrr,map2) importFrom(purrr,map2_dfr) importFrom(purrr,map_dfr) importFrom(purrr,map_int) importFrom(purrr,map_lgl) importFrom(purrr,reduce) importFrom(purrr,set_names) importFrom(purrr,transpose) importFrom(rlang,":=") importFrom(rlang,.data) importFrom(rlang,.env) importFrom(rlang,UQ) importFrom(rlang,enquo) importFrom(rlang,eval_tidy) importFrom(rlang,quo_name) importFrom(rlang,quos) importFrom(rlang,sym) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,median) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,poisson) importFrom(stats,ppoints) importFrom(stats,predict) importFrom(stats,qnorm) importFrom(stats,quantile) importFrom(stats,rexp) importFrom(stats,terms) importFrom(stats,update) importFrom(stats,vcov) importFrom(tibble,as_tibble) importFrom(tidyr,complete) importFrom(tidyr,crossing) importFrom(tidyr,gather) importFrom(tidyr,nest) importFrom(tidyr,pivot_longer) importFrom(tidyr,replace_na) importFrom(tidyr,unnest) importFrom(vctrs,vec_c) pammtools/LICENSE0000644000176200001440000000007713662013606013301 0ustar liggesusersYEAR: 2017 COPYRIGHT HOLDER: Andreas Bender and Fabian Scheipl pammtools/README.md0000644000176200001440000000544014453306624013556 0ustar liggesusers [![R-CMD-check](https://github.com/adibender/pammtools/workflows/R-CMD-check/badge.svg)](https://github.com/adibender/pammtools/actions) [![cran checks](https://badges.cranchecks.info/worst/pammtools.svg)](https://cran.r-project.org/web/checks/check_results_pammtools.html) [![lifecycle](https://lifecycle.r-lib.org/articles/figures/lifecycle-stable.svg)](https://lifecycle.r-lib.org/articles/stages.html) [![Build Status](https://travis-ci.org/adibender/pammtools.svg?branch=master)](https://app.travis-ci.com/adibender/pammtools) [![Build Status](https://ci.appveyor.com/api/projects/status/github/adibender/pammtools?branch=master&svg=true)](https://ci.appveyor.com/project/adibender/pammtools/branch/master) [![codecov.io](https://codecov.io/github/adibender/pammtools/coverage.svg?branch=master)](https://app.codecov.io/github/adibender/pammtools/branch/master) [![CRAN_Status_Badge](https://www.r-pkg.org/badges/version-ago/pammtools)](https://cran.r-project.org/package=pammtools) [![CRAN_Download_Badge](https://cranlogs.r-pkg.org/badges/pammtools)](https://cran.r-project.org/package=pammtools) [![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)]( https://opensource.org/license/mit/) # **`pammtools`**: Piece-Wise Exponential Additive Mixed Modeling Tools ### Installation Install from CRAN or GitHub using: ``` r # CRAN install.packages("pammtools") ``` ### Overview **`pammtools`** facilitates the estimation of Piece-wise exponential Additive Mixed Models (PAMMs) for time-to-event data. PAMMs can be represented as generalized additive models and can therefore be estimated using GAM software (e.g. **`mgcv`**), which, compared to other packages for survival analysis, often offers more flexibility w.r.t. to the specification of covariate effects (e.g. non-linear, time-varying effects, cumulative effects, etc.). To get started, see the [Articles](https://adibender.github.io/pammtools/articles/) section. pammtools/data/0000755000176200001440000000000014222504522013174 5ustar liggesuserspammtools/data/staph.rda0000644000176200001440000000372214222504522015007 0ustar liggesusersBZh91AY&SY[7@P@@@@@ uJ \ZkАtPRb=&jiCh 6D=4' zAMJT@?T=@h2 d 0110i0!щ00I(jڠPzh d 0110i0!щ00j%ƔS >cC2ѴWl쟬ceFcJNЋlٝѥ](-Iqy9͝_aX=U 4g7XD,6W}?#LlGu]nveke_$0*pq@nRX!_2&>1 lS͇5A>7C(1ZDz@͆9g$Z!׎K,/(i'LCIziT hTzFr|~G5Ϡ}O?'~+4mw+@  FN#/HI/I#hC9,*pR)JR}.6H`!$@$D`FC$Ԅ H`H H`t%&fLKR! v vƒs K0b` =MCNu]v۶֙bI + 6m6+vlٳf͚tՐMSV4hիVZiӧg{&PRI$g2fQRI$g2rbI 9e$@̙Ig2SΒI$ 3T{3{QQ(ǕW򕗡r{ƓYm*) B))Q+(p17B@EahMVB ݿ[) B R*) TEDMblDVA*+urȐ"!BRX7IY#8BuO|>e\k%kZ*ISڵ-/%1Fo,"б CYmu,Zֶ+, ,cc!-, -x^1*r۪ HDAԂcRda,A*ccNZ*R h—."IK ""%"R%a.A K.ITD%HDT($T@BB([4F0c0``1Lbx)c܁1CL`I`rύqouZcXSC18Y|>9cb vtҰ+DpѯV~Xxt"LI!zg?> -`"f&s Ӏ)~=Xuхj{+ _saEtei=rХKb8ByIpq,7Ôr<X?swx.~5Ls W^|i#ؾrY>NXU`74=a4%6[d3\얾d u1FxFhtg*ikU3Y$ |Z&s.p!ʶpammtools/data/simdf_elra.rda0000644000176200001440000014263614222504522016005 0ustar liggesusersBZh91AY&SY2r_6>@)RHHPE=U)z^ϷWwt{ӫ+Ͷݺww8Vi9;V5v뢩uW6{<=뻳wlTt+l'liT[ޭ[^6/}xomv]-9'StU:K5gm՝crv\Zj3Im˻QVp{׹v^ײͽս.vޭO]ӣlUE;n]@ ITA@TDDA4h &L L1M4L&&LLLdS PzL020a0F@&3@dш0d&UdaИ f@`LdѦ LddL#M0~L4ɥ b4О@404 dɦ`i& `ʞѵ62`L&#ddҡ2MM@2d4i 4h& 0xh&@5 M4ddILj`M84#NS_~HDefSIGMؠӹ36ꖝcu܋ޑA|cO= yxݍ=(/I* `gop(\:hGj?9#QV&[ .Ax_3'irx$yV-&,tjF7úLF Lm\hq80`.^g-y;.~3\&r>퉸ָ4}6rw?}y9ѝso%7n9{5 m Grb :<ᑼڒ[+PYW-{lw6&Ee:@𖀣rO x ij lӟh1'}9 g` խVw)]߯_< Xλũ`rvԍ$!;~o\ >ޭ7Mhm`kO{~cCwmyLnZoM7M l> }7lӷC2oX7! 7@٭57 |lP΁ZoZ 7oMo?ߖ7/kM* z^ytO`Ej!ڄ^|3S]("B T &@Tva0"<XkJot+K"!:Whl)-%E}ҼKܷVuJ/]tW_mAtR0Q$JQXq+{UZ)!I:BB 5C@"CLI !$-MN&  d H@> p}lo}Ejx&nTyJՆUy`ǘlI {EX4jf1fIolI*oL5]LVp`uIQAI,ҩ2 5CV6C$ @I  LJA6BIIs%. ] A34b`7C%I56oh Z]Rd-* Sf,6 ͎,ѲhKknjmZ0]dۜ\ZYW'76Y-/)Z{Ž.,N槀vApےmB4,,X lX,L]sF,C2`e, U6n]PT"qd-B,ʈs)U;K*I &! HHI`01;1dLLMc(  . 0o@@$1$ mCK@IaH(b D B CؓKi`(a ,J) <&_KH4Re` Gr U0Xrpc6,} l-6{ L/U6u,؎u Z7=!P7 ul$k4'|O'n @>$A/wjFC13?ei:]cV| Ջ߯E+T<գ9h-4w!*?_܈cGx56ᆺ+V[|"jyZȂO68P٢p>4ߏ|t\d ^c1[:Ip p @mLci!HHHHy (RAƳ&̟Wlr e.f2AL0m$0ONǫ'K.ԝ g[{gHnmoH[I$65 $4F~˽)02(o[ܛϸ*V-ۑwfHar\ j^>KLF9,a{}OS,+,W|DJi$UQ ]Gi)S'wG^~7e3H<&$6)W gm(R[^?[Sj8f6D( w_UlfHuTNwB 3,I$#sY,$ 1!e-nQh*üWr2@6|&0؍P;fl,sbɆdv 0O3,խ ߘ .#W̊ʒU!\F"d~rdϣ0cE}E=k"{kdn6I5|*o;jxQpG#AE\)Ckqh s]Fpgl%˰~YKY> XaT~ {^BY46!y>=XhI`MuxQ'X'ՔE*"+`Ah! bpelbR{l۩Z=j@9x'Yn'_vη|CKY_lJ}Yr'tS⥶ F?/rC%|y GuBFv{R;'vOfT,5.EMtjNFLUh3Z]Oy[i !?a"*~0V}ri+4 LUv "=|ՔRϚ]#HofrAؿ\\XXaC=R}uh |x =~JL7Jvqd+V;Vs\X%%lDcjjcm5̪ je֐:ƎbiN-?K7/E##t;;NW\s=m\ 7^ .TUBf|ހB.~[x|mb0;ߕ>4Pv;9=Qf,|,ۋs$$Wn?MaL\4S_FE6WLM2tH{uZoc*ێc:l11tHLC_Z~ac>O(WK o#CDENX5(CW!}j&P!A6zԁ!D@=aWEO[^,Kw `1t[1fÇo¬[)XIב,_I?>zxvd,wDLS,D$/l-swQ(;c{pBB ;^{f<.뗤+:+TnFzZ&8hĽ+!ʰVz(>V`P0^^k:Fm!ct jƤ}dGXDBa[Kx'_ A8%ߖI_ܫцC@Ҏ9,u;Nh9M( e4l i/YfIkr?6bqX9}-ʛfG\`I,(:`JTs{:JU&iLcȘƳunT3cP))i%|hua w㠹~#Q[1y/3 KXTkװ/_|lkM$PrC~"L#08F5,g,|ɝO6qV7#7+l(e1]׃Oo`3Zk%8f}k"̢?Ha$ ts=v?8X #㣰9HvG;<ksf{^3cJhz?_ksnLp:t!)0vEm=]%n{LR:y#5|h>1^I0)'$Op0@B.KʍŌZ٬@ueH~/]5gۺ<Ӹa]Q>tz!H,j2oJ+F4>!풊*={r G Agz #,G&_{}.J۠ew@зȴ4 ))ܿMάTG"!e.| p?&3ƒ_Mqo޵o 7ծh `{h]:mV Xi0uۗmgx/[)'ݻ{?2_r=Vɥ\(a 5NXeъ}S!y˭R=0g#]2mZ&X?~vN)'5t"_qYck,VզW87)#'n?V5l@G# R%^4Cf`Po@jƥn!E[j'2O\j-*potjtp96,ajph PW@8Z9/gI5&YR}U?$uooǙx(f_@nȤx0g׀D``ѰWm/2AgKo 6p<֢v ɣro?mV\#ү/Q\YH\'bh0'shգq(ebIdVjt^9\_~h{ j[qLWVvXȝ՞".1%߹ ct?kFG c:'&=^wxLa;}WTf/Z+nVS!Ŧm}Mvr = Omqj1sD@Ϸ_*' j3g=D% n{¬|tjgOD ^agܠ a.4P'Wާ[ QO N@cUHfց`D73)]. ;7㳸NbDo&|L"y0M{q.T$92Lo+$:}=.SCiK9~ xJOǣz{.i@ʜ<8ef±#_+NLO/kpwՌ4TN#=xl Uʗ1Nʹ\IrT~n!;# |%+'Kˑiԧ.6~?gy  6&&a-]%|~#Gާ 6h!Z_r/F#es)1/1B"̽NL O<ǚB Xb1xߋZɥU<3V }⺄Y^0ͽш.*T{>s,PSrjgP,FpX{0l zzJN+¦C.&ܪӧ1h ׌\?h5ѾusYYGыnru+LtwfkMծ۔һ-cV'{s#k7afbќD ?w8~,S]QG+O@  s=ޮmBom?~Ҟ{ 8e^V1ʃWC{7oہ-G*[C@4qV=ZǥɭLLu=RtSdOQBɨRn(uNڼuXc#_b,H#lb4:rM~&Ф֘ahoϑu\wUjO7&~&|6fieu$PU5$DS{:a^.,`H8e)J*x FgIYQ u6,S%<ǜ||噱1ٻ5*79BSazk䥅7~a*V tJIwm[cۥwh}t8 L\Z ڣ uqmS"աcw܃D[rd.D),o׏hN>md2^D͕!j`"a#3?Րwe8"rqm$5}eU "kp+BU0* Þ3VwSXjL)^JsBy~ !(@4ŗ\h@t.yE)w\֟l6)v'\)qVfAm"dz ThLR,ߎ4 $'~|^'7)o6C{"`0_njiM1omh[àzym L{Rjj,ߕ3|=Iԭ}}O'o>6.D}< ;0xzȨSy5b\ qaf"ע RڦGN#cS6a%74da4k댻L5@'OPRBn;UVT!8/hLY?ZYRza8cj.‰IOUf>_䠵Dڈ@N5'vw(-$.*Koz =4qh髉.kOYwܟN( 'E,R M4n/ y8R= ߠTJՔ`& V4whͧr%wog'&c;(u7Kո:,G?`{e[˶t-lvFpS~G_e:|F`> ąc`Fsw+.69rb}*7sJk'(VX9.' $z%n!JΎwJ!pd%CsE}2V/22cpǜ5aGtM+|P(C՚>jbU u\ryvٛH3Q:bە;2-:Zy5h$^H` ǫp],GQb1V23{ܘ9QL0wT)Ktߏl_!AE:.O&bR} (9)[$Q57xO;|qaH)}GeEX"FDdSb$ zLzHHinJS^S7VrQ'1>#hz뿈ÅP4:qm/3)\dv,+Pu*sK= X1kz Iư4'W߁> n Y'YEiol 2߄ztK"2 u=ܯ)C!1ҸU"oE1FۊydXwy/,j @2[ڊ:햚9  눢JY}ww(^fGD[ o,yT\5#Je,[۩˛ǥ&ty)aJHM"]R p=EM ͢ȯgm {8js֭V9xߺ({OW^?SPwCUUNh|5SF%X1 &X'7+d*+}-^{O>s2(O#{T<aaK'YF}֒Nj=nrS-kLY&-mDŸI }UrS.= Z= 6w`YNapԚ- /ȡ{m+7 pTg~GNESod1'Eth2e}NVelў T&"]3r{@Pk@9M?U89&Y+` 6r*hV% GK ɯNBu4cAuuMօK9rbsl[nD2&41u3öc׈ _ߓvw@S<ƼǧjL BFD|MڑI5dƑ72"n9vc^ u/d=\[O%"SB +)SR7|IIJHeg8BUz$ 2 ~l.$.2 0'~aTBgT1vbaZ9ZfXcB! ?}@Ork|2Tl[truz]MI&<[b|vPPCzg">Apz1`" :UEzpKĻLmK͎5ń05wW z I"i}E(gp/]ǠQ1nu@L8)M!qfAna;4|U^1^;ivLɇ?Sm/WXl S|I_yoIm('\O`vO*Are.![$RZRԺR4eG' l7FRɛ1bN Fy{xkrpt$H7vP#GrN+_邏DDzTC)`q7<N6ENc6le}>p:uʚ,vFVE5~5ܴĨ]pS SW~Ho"_oYin[8FKLeTн& DgAǪ22,,w+o@gZBjaїaZyG]qy1"`vΐv,t?.j2z>!1_CQ˔wIATNJ0jQ 8(FuN/fyYhOň5vYm'wD&͗X4 MѪ1UsKׁz'!i,PkhZn|'(p0bVGiJ"sj˻Lچl&X?ku YWT!6g W&tcI ~ƀɥXcQlc5]fd[qS/IFWk$-].ndٲ`l}>Yw`bMSKCE NGܚ2}9NҟWNo.elTX`9Dt֥]/M_\1HR.֎j"RHSElA(ŒQ$[>[qja 罧]OKaC{S.>]驴pn[nC3E4@hW/!EtxZ{um3 cEOb<>kt>.͇v㐗Fϱ= xE9RĎ }6yڊʁ=DT%0_6~=gAy>Ř|1i h (6;FPnw֒8wΙ1 $N .^{?5d*RKWТ{M-uSRM—Q2*SB_u՝S_QŜYق0y. * vA օrIY?'B a<ׇ8K:a6Nrfl{%Afoo5Xjˤi-.g#ل㞜.Fee!*p@uPٟ[ >HH#_7[O}۽ŢnZKV4gctlQ&ze#+9i,Hb} 0[OAɹa@Y_WZE8.o9;:fzȈ?3YE:1Ã*aj[ ˀ5na[l-NZ/Z/چA1Q6Cv\ډ gg|-玊\\&%ZgM{챍-eo6϶<ѕU!ٶhiS2iνG.TŶs&q2L(6oǂGUCRJK!¸Qj|5K$״;ߏPMJS+za1) ݗgq `>=~й(cZy 9&6\m5Kvy+|5۪q Ys&yב ϯYGa5q{8-Q<0*3˴jz]XF3'B@!"驗ܴJ{PF;'߿iӗ/{[zl uERH%AΠl#fݤKz ͋3ocڡjmwYhVcY0alFe2Jߗ΢ͬ V d×;6X(2?dF%s!|DFp+Znks=e"?=A0Jp[Q_g)lSskdڻ2s5\ͼԕ=haqg`yqk{q~7OO'e}Gn4l"»q-tJX9:og,}^SjOx gl7м__@MtHJ.M;(~K_;J}Bgoi%xW"ÞdUw8kLJ0ޱǕWCě_{׿xjY ]½t|6boH%ĥ?uEiF̚L0=UL*5LlɽS<%mĜ[xxl\\…ώKO&>;Wvu24'=Dg2Pem?5 bh'k]ؖzJQPG&{} c^[a*9^+zĝ_o<Bke^U۹Ǚ 0JV' 2nv_oB)&r$WzЋѫ%o1hKT{/'9v2й0e2lז97 tc݅X2]Sp:lSXP*jrz:{wv^mS88YvoM `|ahJ 5J~y+XXD7 \UԈ m}G_Nm ЩY vviYv^asGTY$ 3:'}+ZC֯r(STG'epwRV@P4Y{z' >%1xSAAý%ȢمEERD}5bUOr.H4f$i|~?},6PЍ!9}CT K@ǯ]@MZ :+C y-̵$x?f+T3ZFžN/'nAMIG:@\K-2`Ig|-%n7+N/'1.d:\ ?`EaA$cg,hПʌ7{77}b/dGƏX=6;%ّ\RR&RlkfqNJX6^ 6B⭕,wjR'dLK9dpw)0C744Ep%D-F4s}&q͖~\Ѷ=WTbŻlE-H/CbXóY dGwy.3Gtf9b.Il"S T{8 -,% E^@~gqmLRmDNEMbkp{.=3%%owKXVXH.uLJe=M{KM^Ҳ 4rmրWH%ٴ){N/sM)_5sXLɔSEBWcĬ7u*#7hsXb%QLtse~yvQ4D:&4j#E:?aS }E_9\'pZ̖yiϕB+ح$@;mfG?&H5p NH8$RtgZXmv7a]ƋگUT ̹pNp‡ʾӻfɀCn*jܚyUCvJQƌsd.DL;F<Ac7ĔTomq@z+P^:dJԍ[~H9ecIJzj/VBfZZ,?VM$@s]2COE^g'lz), a԰SbK9.,ė{T6zNSIAN'-l"\If]fhI\9)b1K#&Raj+2 Ѐ6m)-z*Zx4su(']g?h+3oxyNZ[xZ\h3|bp9m00c 0W8;Mb>VX s| 2~#3g>=F#|&S 3G/8g6E];7d4iy}mϵӉE}cV0 ]76#-WS}Is%ѽ mbb*c٥ܥXLnd5Ĝ.qHN)7b*w?m=vH<5|1DУm0D.-1ؖ%y`a_2Ub_8|QUn ~|t))<-[7_NgR߯6]Qp~ &a1 JD)i((<]4NVf}"3>y=B'Phٕ/(mz!Z"V dOg3ͅY%- vrKY,yR$b~Lo;/oxBc4.p:?p =Vf˕MH}^VX]}F{I{s|Rp S`| X^f12@%6;?ٚc|SG1Dܼ͠0Jջ撱 E]{+w X(`5xm~:X}޶^hdzp"j]{us2nyOif^Z bq~fo VX%z<0@9_ #,vT݇9 &]h l⫗utׇP8'Iv3bЖ3?yOT#vҪ|Mv8f.Fy:--3crokɢ3vM hf\NO>omuE d+Q#mX"O;TmV-ǞCyo*nFF,>y-'<=DVz[7;w Q?(Hϟ]JFR<X$7>I'硋`jT1(`ِM֦U#.TS/?W]21cD;VaQJם^w?~`6DG@ 5V (g9[ٗmAq^0o%,>C6R5lGQ%twzl+}r_Vp)AFKx%rSm77p=knʩ .1]53϶FB5V^@Tx YU2Ϟξ}b-r^U5Ko\8>+@2V J-3 A ZAU^qϰI1Gh?Γ<|WP7p{-4H ^RAaİ`=zBK6}w ֫P=ɶIFH!+;#gB]?C7pʯ(Ŗ<UhT:5HG7"O\ݕei7lE>>fEm@1G6/eR6RºzR˨J˪U8Al\x͓ [sGp `~sxڄ"Uw%IMgM#$ޙW$E!0_π,u a}xQOPa\ߠTs". ps8/p+4(RQ0G#b$o˭uVl\hS;,*'BDko*Z5@k dm Rkt7>Tx(.vB{TZCzfk:}*z^ h#oЦ1B]7oU-Pc\qmV<*,v7Oo,{I.O縝F³ hgG+֯I>Bu]RX!~1z[}U@YTGFIBAgN -vux߫uxq킚"S26SBr?OvGIjwb(iqe 7t~7Ű kQO< 3!KOX @Nfo5edBKHLАzαn]5wq;G"'Ca^ID-1Mvy"9K{ڿH[xx"{IRŁ;~Q8s}_D\t[ xH']>m,ܑ@m0‘y췟@@FY]g5vjW i`4 4\"W4x(V^&bIPWHXuWH=ۿM=/%Pӌ QBa#X஘4w-^ioCI HTْ<<i#fSWyDԸ )Rbqx^ J%j Yh~ 򩫮9e O94 DpzZ,ߍ)݅hBuL#DW4L;kɍI<55H1 f˃w+@kA){>+ f{6UA?W֪--XHE6;@S~ {yn$d7*}i/ JlcRzCl2ln̩7޹)|b*4Ycan^z! vRX8A A%&OMghZ?- ;%)?3tؒuАI_Rou}O!˱&e۞?=L51 '-{G +lKkZ6f,>SJoq\*vl2)m𑴏ҎB?z]:KGZBxQVyFDdTd&hG2w4eLۧg Hͬr@Qս@kPGXuʝ.U%]Rg{׆e{{$/e {l&^OֆD%b)|w #4 n}~ 3&/ȄʠJK7J*0]U׺,Ш-h]S6\$Xa^3v\F^zXJͨmY_ )1 ~K^[Wdb=Z:ϝʈ;gəsP/G^td zR L["M i./mN CTz ^=֩hy[yOsGjk +&DvWA.ɝ8#s DKr9z.ǢvtZ=_û/Ĵ`.[!nCK&TYHD>+*Hb65^-ҋGE?[*uħ,Յ|"\NeU\EcRY'ZT۲:\7ѽmv=wyz*weW{̕ru24"gw-ش{f@]'uvz(ru)Od$颬Ĕ~Xty 8tkY1 ՗굜|A<;Q^2}mW^2%dX_S̝;X}qW^"t9jpJn0 D~SGts?Yׯ6H+L bI evsp $v&d1㕳X][|I|Z Y5iBԏNiE_H- cFm/Z#T)ާLO!PדLIWHXmWyN[۶&梓?E\k[~ 7F^\ډ3nWۖ=oeԣyBo*ӦJWGö2˗e,_h;M:qN2?"]XbYПי*7EY\ȕIiM&eϮvږzbAwP̾@=B\ֶ_jv |^kE kS3oi\,͜rH_hwI~rMfl7=9lHoLy$ɀ).%Gl5yb[b{z*;`n8dz \Na=VMEB_ xKFrJe#(0yc?pB~=xkd 5~mv̚PDEr؆;u+]jQW ;1&X߾K<57ʄh%/h=Qk[8FxO`=G#8_4QvT5 RQ54T~q̩Ҟ #Ԗ_J~iro/WG"1ҿ#̻S-Z>] W|H@/g~nqd T+PiX P؍fK\@bbBV^oM{\7M^F>OIG(In$;|;4\~*=]%!4[Aq15Ȅ`l?}TF?AǏ Q>!Y G(%$x |D*2ִs1x*)+qXdQcnزggd*LT ;оﻼ q2p,=8W;,O%5A{2"/VΜɫu`n8qU {ƴqVc<j|juy7y\Y;ք`pK&I>`M*~,|?TIctԷUL׭ea׍ҽTQ&_J:󃥨kXTKǬ5wj~zyPey`9&l;A!o ,bʾ7~_u vڵ|3L&g$j/_sh_o4 ,.1VZz,YǺ,Ed-x~:8J;2Sq`:[psx?3B+rr A5K/w_,zGڝ587VنPyӳsH7g=J)T41:]6>s.cm֨k_7岢0Q@u,dAhv3bk!|[ KW3qa4ȷDjROg6!3mD mDǃ钂hrϩOz})PM#i06h Z(#]MD"{*mwG6\,<{lmeR/o ˨3h""^2{+ըJ66* # =¨ۀ9 wzCnt?l꤬\]O·,)y."vbo˿5feCʬzrig@q=2KJt\$կØ+SH⫿q+@Gn,W?ouE0\7o-[0K:H.;ҹ)tŕڈ/4E%6I9ˮEc#l1*GSz,0=f_9[U6T />sMl]௅3v8 N7 cX$%0uǫэԥ0r5Jtf *Q(}F8qnuSH DܴI D^O}s:t9Yj%фVO냡º[|p+<_m^yqǨq3XY|δ=r/9~ /Ar4mg<)&.qj-oS|ċqyjSOjB/ j@F^˦(s:+٪VШCrk*4E\795(3'G֘S+;HYqRwvy/ɘh=b :u*[%J(I&5 Jc'jd 1?Ym)D^DvO[tN4I~KSW(Syy ND֓(^Av 3}SXpa1 i?YΧ̒r. Lyd/髖[\DӷLG(ZgݢzJL;0_z^iQm-\ d$p qX+srb|~deUQytNUv_E='4Aܼ.L7>_޵WbP}ajCkI#is~9DG;R_+^C=地<ռ &G<]r[:dԪzNq_i uQ&>3Ϛ_1X1~y.PVMC M`OF @ua ^RqOlV U OE9]MЉCJ1Zs3q4L6:Coi5LaLjHgpq"*A'#ir3L/'2޻E#i} 24 `yFKa]GRe-|Qb|OPkcyDH%rvtKES[ jj Nۦ($2#K8ukfQ߈C=8T= 2Ar:N)WA{3τa$ZNG{V(Q~s+ӕ@=r`DS]3 j6]Q8VX򻒈Yh{Μ,L_)tu 7;qN6+z6^?*([,@e+:A_<&YGS߅uj瞊v8k4zտ&FPʰZYNcWm5-.Q;ѝJ9ȼDv& I(d=/XJ۠J#E+) G_luFm;ZBsfqasjFXS*jʥK} [6tt]'UHtl M^M*7̥urW;n԰obJ6W?)O{$[AxcyN2ZzԿ!+ ETbɽ_*a t<5X(G;1{6*c&QaKsMߛ0!=Z*ѫA?T(v3zu <7u"k]`~tߘ.$y~j赾죽 +Tol\\}Uθڼ.LZ`ID( }Axɳ9 `>l1ʫre6 OM-\XD:":%9PwN.<A&ipw h*JHwa7e 7ITk0! _YVM!ToDPʫ^rQgYszN:eC:U'LG[BiVX1Ev6^]AmhDX;nX$#!sָ1[zP&a^9i9rSD@Lj?得2 UbPBD|g29t>} Ofq1;N3}vԟ`%x:f OǨȦphhrx{U#Hlx\햩(weO D]Hrk$&wz;o2r{8&j9C`4aoݍT`W_T8Msb,:Gk'z +P߭χx,LKh1:rpD)Nĝ 9S n=90LiQ}_NRLKf+G;ki a0.[va161.\YEΣbnuES (WY"zʸ&V7/~璘lUpe[#t6)<$J!/mňIDxm{L!mY`I*ȦG>nӔF '{ A5OұPNƭBfaC?%]zv5c[D?&~Jq<čxD{Ή ꕳE"Dž'HYYxzc%" Q _9Fm# ?K(4v*ȧDM1@{Mu3('V6ӛS;4T@P>J"3𵂹( <p0[-OF0I'0ssLp80;6h|XB:˽AIw Sl#DfC< 1E籄)2a`ЛYFƹק#[dUY1d^-9Bo_YUʰF2 -6Ǣ!=tH y dG)n_(#G~Gl͋'z'Z&yvZ Y䧸7/!%9*4L yaᆂBxxf*)9?~ ДO 3,QR70/n,b|N5+Jk =,T~&G$Gzek$c&'A䂠ٱ>J'pFntD m;30]?ͫm' u 3 IPx_6Qۜ1xN}eԓcƥԫ\~~Kv a>Ľ)U5S{I+ SKPR,pC;htpqω>нg'^EL#3OmW^@,,6S[ȅy䬸sN b]Pr^0B@$ 3K:9̬fʏJ.|s9}H*G03n!ʡu2Hfh=N&H4UhBoWt .uV6'HVRTIVQ_}? xM5鷊$":?=14^m"Z= QMG6J[xqO!E17:6;~W,~I7aLNõ)m Ds|~rF# 1]6f* P_ӟ1eҬM5!{*3z+ba8q)`O,mf*Npboxv"c$0CNZʰE)ƶע_}\ L`ϏTW7̫J:ל+[&1#QL4T ucẇ׆@ B@QVEe0|~w(ÚP;~fCn7m[?(kR2S}Vk=8LezA*6bB݉][J*Zpi`b'Ne>`銌:1r[JtMGKf@8 DO;k/=j>̄ Cs+[Ԝi+,}jASPD$@!nMJ!L)^:Jc))3?f_zhC Q[t9",9cxMY\CZ5i ɆPIʙ-M OtZεQ椰.,ae,j^մCՅ;ohgAYIu4E;肅/9hOIrVgfGv9QL奻؆+ O!b.r}4YxꤛHu㯺W8UC,E6_8zTUw.p_Ѽ]a(]ɘ#ERN|a!YئD+jq̧bJjg/Ub־*nŖi^3N- hq؞5"{;H_ PF`y_y+T6Ujg,ybhHGP굃KҮq;)B%ٜfX)f 8~uě>ʯ]wl&= _CH?2"'f)X'aW{dVYO|9nj B\Tݱ^/25}+pk$&P*d_gUFx7; q%0Å.2=z|%o27?>heݽ߼Z\̋oǯT6bA8iT7n']p}#i0ZՙC ]w#t߂3DxfWD\l>u1ŰŌ@[&Ltןdj W罊 @}=5XR zK6#SJM_D<)OnV(5:.;O1T؛Uj_5,_hd&p}<|8e+,t,UvlDY pcu WHӜ TћBDtgS,IWF-xuh=1hO7McXYh}BgAiէZ] Bf$JqBW^̧t NJ*dƮ Ѡr`_ڕ+.NB͑v%`_*Cďy#6#PakOPo*$Q0uy.)9&*uMB&*Y+2Uw@NJpݧwr_s+}gihbwCС[iTQ,~;jyj)^LB{ &@ak(D fJ2;G3Ho=j$ JYu h_F6VW\hm]k%w3|Te@~$Xxmt>粥N:S[-$J~uu jU1+H;[]`FZ9:AA⣘GOಲWi4=mWF>%'߲I[gmUwԕ-5JLy+e&Ul㩧Hf ٮxEDqLB G>\y5VVW m"QFdY>8^rGtɨA%VP5S-֎9?ڴ1tzfH}ZaI.aEE@bE6b2ؕ9',Js-a"jMfɁ^>ehN]Z;M9͓L*bRx&F(̉Âi<ad ("/?]Y0L|b<ͱ BhWnCaZ@% JV.`EjUx!\v"Ir_L%+ ~ uGNg% Ň\ڌt jӿF8wHwb!Aw\o9EL"սyX|^IuV#mb:f.o|!Պj߱IKj)) <v-53܈k@{08y]! Aݺ GcRg v)qebo?VM")@]4Ȥ_eUu1Y+i#U _oBz۲bN5 v *xrK&XPvUzٔTV]'aw"~mtj7^;vXrR}qƛ"qy"VC8'{֍xTƏ0'ζh7!Z{"JC1hy'Zs\/HE||8|,hewDUv|k;;7vǖ/`Ӊ/zFĵHP Qn]C fJնe[OCB0!#(ݨaF$o:Jt8lc1OHOWCˆ jD;tb,2dyFܡz6b36{rU Y^^\ƔmB2 X7u ّƤeNԡnn9AlEW mƆCZbשkz#OʩenI!r1'S\ͅӅƑ`_qxKe'd1M>2ŌH'ugY Gˈhe? cUꂒoF[>`%2aj/'B";fw&! ֫ MY+?τ!bf̰߄!ֽǩf.=tP5iUK?j2þEv!>!56lE9@ULz}#0w+>]CEBAӃ,0Nw$ªYtXjUƵF m}B̃Y SXD1I Ley0.E8?vBOO09yiH:hK=5]׷=lRD2_Lx~ڱFUgN2%tb#C@~r!eZѢ8.8hp~b79F}5/~p1Me@9ʾgOZb-?5vN Pٛ1PP8'W~u.L  %]2.LGx!KIrHNq/5(;͊j޳J8od֎_<_J%6+'뵵j+G_^vUw^8XD7ףk)B"^Xם`&@" n,El}-*\D8nD |5:KMg`ՅbLI(\MukGW̓k`Xi[Qjv=qGN4>nV1Ogk %T9'hqCߋ2Yn V9ڭD X3O"-f%٤wlxUn #,h ?n^V r cxA¥CLtL1ji9NOhMg25ݴQArBfQR1!)[Cå>0HgӎMsք;RDnu.VQPur9L?DQ1{+^,.S!_\-0,~)*c^Uueؕdp1_T RݍyWՏka;wG$T\TmǛjq6!Vd'ZTP/vC3ukO>a0ߚ[}Kf-[Ѣ,̟Ha*i),PRP]X!h2hHCgHWߒ8+)E:ǛFŠ;Z|K.,VReu>LW.17 *ŕC5F@G >av0&sp` fr# 8OTԸt_ozܿijMfg*8[<7^b⌧:q9 =x㲄: +Qd4r-hr?AH[oWnH1hGd(7#?T"hsb,   'aCb@0f3WOk.H/rᖒ`i1h3.O:XKU0]ռ*zS8d?f3q}ɥ)* |pbByPZ,!}驼3ҤK[z-mWUEd?6ABJPՅF.vUrr”P0Oj:ca4Ef7>r3|zWݸ-}?Z,W!o '眵@x)9g$'g{\eugYGj \Moi_Ŋſ+ 0p1e?o"b1(pZ- rBjBR0s9|I;:Jݍ-2>n^>xzM}oU^tz?k1Xi pӿMY $.ͻ/z ӬzaSW^! 0 R $Ѐ]1 #zaܑؐi&;=cG{oݿ[kij7*tNj#zr k{]1eR/5'{'ZuAxmuVV~Aʮ*qEu+Z y3&Ձj!kQբV>8+CMTm|Hc_gW@46e\}^H%SV蚴5RjE 2K(y`>P!Όk΁D8[ $8\ 5<8kd!RUR7xo0ȱeJډ;7$ddlUM׋T+Xε.efj`H?r s Ri":)c|ki/i,[H@7g}.t)n66 `>>ci0g ?TnwthqPa8 'J 8fȈgW'Yg_rsh2;>,]gU>53 Թ!u D1h;ZB9![.m0A+h=6fMg5HCN|YgƣO2)2VLboGH&OϜE;G-sl-pi:<ǝfuGy:0B!Z[b6!1^ (>;=`e0 $ E@@<"TfO*4HWї+Yn+ʬjYyơ5:z(yd4UFfnt<]ig5bs߈}qPųS6UJ}3o,5?︛xՠy5zkUӾ9Wc^z]-l[ڊTLr/IoٓwDNs9\Hb}^ ()4/4~@@\O!qEoZ*/7kZ P,SZ߰$%ϙ'NNk̝̓~*qMPB>>QZ1x^&aUk3E;GUlJVB,K s=;а[I^h'ؐ´BB.B | %KGS$yy MLYבjoC6 NaT)SZ:1}`҉6Tr(I3kE(mӨT6آ DlOsgi1p!¬$ f=*+;Kp~fVA&Ƌ/5Wa'sϲՔsԱ1wrCbv U3>LݷFqSUqpS^}}ֿ\thnDB6hrҋObjzȁϐt =>f]ۧWĕ9hTF˧=ZF q܄~D]@r[)l t6__>`0rvFBDBwD9r_E5fס]ga=o{X@:|3 0H>( 뗺KA>^zd;. iZ &31rpKߡr_ϱ@Qbh—/j.6P?>^3}B!:U19{_ Ql$!e0 w<zV!b(fԼGL,&z)XDhpҽ& /MҨmEE/#hbzM-# nO|2ЪrJZ^ oZj6aHľ5'^{ÆW ū/_; J7H\ĕ@`67muG}1R`n]T)N i ؽ_t;-!4^Vs7rn]m}#01 dYZ~mM[PpO@;TH EPIĖVNإ~*8ѓ$3Dİ3 ;]gi-y{]^k6U{b⍸mͲS:M=aꇡܛ߮2:ƌm9UOk( wEPӉFއvʺKѨvmqe׶u XU5|_|yl~h<qac?m);8-u7`Wִ*APZ@NK+s@À#}cI1eTP^$R/?H^ vK**t՚]:q[ ʺlyz l!.C[^j$>]Lrdx͍">B;}] Ai< /Xˍ|hNe9ӦZI2^6t*%\:~kRUͰBby0Qimv<<Ƕ7׭ts- 4b# gCJڧbÈjs6Ok@AX]mhRQ9]%rw.+RUFU %>:UP1כfr) }ryo/>.;> +D")Vj hp u1$j iWN8^F{tͭn܎u5"3{.Xu߄.L TL")mMOS`1y{_7i7 FHt={ CQ@"UN90#J9}M=WLH͟Y]ǔ)S'ך~Hp^%~Cpnѻ>[|QûdE:q (}8wr8od etPc; @h>FC*_Ds1{tޥb*0'.VBiMR!6@9f%3h}:x ѵ=ߗa6GkmpͿ|8X<q m-c2NCU2ZL/^BlbeU øBD͢7.2}jeL,ʵ3)P/\C"P džG"{!c*ۯ1ԯuϛeph]~& -n}]36KC@P9ZI@ ޠK=3KEj qZVYN:  SB䄭%wD@A]1~cK?aĊ*^n:2cDQJR%",& R?n}k0T[JmB gY^|f勣xv*SŨC=8 ,{:GH#M8WvkKkSHQ@=I4^< Q1k} ~5{1 F8tJ95wvl=9o2('/8s0IScn2x;FWAZvXMC^ZBcȹdT,ZC\lԘoa٪OH9w~%'b:W,yyCj=Ys TLh;Ol(zmC!f$vCe)1nAjRJ`Bԃ'D#;qϳYcfؖgo V^_ɱ\DqP'491|.y]rA«vzG l2Gyވ ňvã_͖(D0!>*kioòlyHNh潤L#*ȅ !00i֦n^NL1i6-ָYZ`B nSRH`aܴӧǕтaz+#RfeVdQ!!?c>x+$!cˆoy1ֺrq bϣp>iY@DԐ{W}$T0xoa[ ɝ ثHh8NAi0ad-aX+a :[@#(NGGҞ튬-=ګ7Tw զXN\2LBA_LyticyȱT5#$;,DJ)઼֒& JXK-2JNyq˸||L}QpYj-+ٱY\~Uvm# 7+ 1}AN4o4} GNQ2te#f]1wTrBO`BxI6Iܔaѿq4ߎJ /cBڂ!=djEq#XI: gbQ2QVƵDT,EJlY0Ƭ?$n +қ)qQ+01Ja...y=u X0 "%>T4ƤB6ҾG =M 5&Ȃߢ`@-蒹U #v(Dő}=LɥK( 1q!33Q&X%j̙JTۺSJq0g )|FňJ@4?BT: De4HՊ6ROHo; }ݔ>Mr!cbsSەʓa#žڥ}XEir:0 όёPTsƒ*`BEFodW/>ح P 4=5 <-ԭeW/V+Zspz-!#py1|IH{g3xR_.GZs&zWAXlq3\J(ʹ3z8UitwCw,Hgq}Mpw]X@a}ڄ4ٹ ~֒[ B̀ FҎƫ>òFBM5YjgavIUrsh~>pR\*C Sו'3Bk"|Sk]ږݮtөäHc38/UW_?;LW=7Lk9Wi4lKl-9i4av+v?̭`+MT97.uN,ag[ɄB#] ]gb1eZ F5bb 2ZخnRovUA,tcx >]'ޗ|Y$$HVZنa i67$7RАm>[/6K[W!H=yFr &9wL"*krd~˘r[S@8@uj,R X`*CfŸL -Lb*Ԫ4؅MvVsVR@UJ  AB>{ mZ0 kjlԓVگx;W#$H3G@6*[f 5l4W j[͠<{@>:.G|/@| i$` FM ALF##@4hM & OMh&Bz PfmC h І @B=FݽlWj5ÏV7O3vo-a Sl-8#1.zL7,5 $8TC/%PNWzD>C 砮zKi?EHUhlʮ9O>݋\Ct5 ˟|S9 mןaI+غ`h.0IeQxRl X 5I%-(r{޽Dð7m)ERUL׸66jS7P}DtBt!aAm04eq]`u*DDyr)mV$Qi[̷g-OZ/oug+ĸZkX+.:lln+&n-mGٵh;N붍KZ죲ȳ2qq\Q5k+.))sqQ:qvmGYk.22m:tfN")HY6˶36'uwoǾ^tvD99(NܜYK&`ՎBftI 9! I||ycQ;Yk@.6[n_>{־﷾_gQ_,Jt$ւI`$'vvNS::)\>"q\8p 9'C@]%.HG8 % 8ogޚmk{[ُ篯_>5NMpimmlKLLc}4ɻA:'N,Ӕ8f,죂miv/ϳ}to_^?"RT;$cnw>{kqٔkZpۛLI I*'lĖN$C9)(I ) }n[wWͻ''ݮ"*A8 Du@p ۳((K}ޯ_on{ &}i;>}>_[N_-s4slrC#k8I'8Sn ڍ\)#'DDwr\$A{oiq%Br!GQIpNQHsw%_$ 9S(G|$t9N׷ 8"NㄸJ8ĺmg};Z6}>}křsk}zL+}{y#Mp۳G/ϯ50{'P;[2K\;*]P9-.1BJTb;e*V.TR"]  ^ːa"PY,"&$&CcLinl9Zk`eshѬv*ƛk;[yYi 8xu=Kk"K[N!}=y=J;vtk{Mgg-78k/z3maDcc fgq%6Fcsٛ6]Ww^gm(eXXWbf+  eOsbsHb+bXX@*ثPC>$*3.>m;vCkKQP1 Bh.4Kse$*` Brƕ)uaa@R#DH/2f] Lhh0"i(@*ЕT:J)gHEI !TEB3HEUi6T@0RCAiD.5Up&82xL`EI 4KyA4Hw,΁ Uʲ1u]B$JK+*URA&eYc*$Р 6 2<2e]UXCXY)с)b75k[h Ι{$ֿ[ӫ1uKd5@Fօ0+$!¡-.̭ ;", eD0^b dBC2ixD,Т2B*`!=&"4H iJ{ .aRADk(fK-,Ė(ʬ Xʪ(2* 3% yr )lř#3ʕ2fwaဨeR0K@ZCÇ AW!iX$B"Dyu)K*e2#08pT*QCh@n>ٻ[rB>K4djDGI,,"!<*"KC5 \Tj‘X%Q\"!HPB fbF\@b }㧓V}K~i5ӫ5"kZ7ԘŶq'ak[8\Ą$C F 2 x@8ξ G`8M#eBYGJ3sm^Ϛesnëxydhkf83D41v9#I9Uu2A[pÛۥ,'[9xG"]ѫF<⾷;mu^z97sM;eGGɔHDd8T5SxBG8^dQm:9ukڸiMֹN.gnw:p5{YkwS١fm]lڸFs6lNmƈo:ǒ۷o tF +QaCR vVƢUØ BK[usml8uR\99q5팰⡎Zj@gWGo-\[wq7<598G=x->)Zǒ&s*yΜ5JZ.c&;'zgTGDU+ x^U Vi b7f莄-v>s)0#Q`Քg%d^F+ߠ(Ӏ>8(I8h LQhOC)EI^ ٝo<`&#OU$@ 9Ec2mTW@x:/DEY <=}lӣ 5y+l(|{Omrg" )aVVQB0ӖOa Y RQt,I{)1qn6H%Q0lF 8 ;(#&ư؂Ax[1PE8"bZ0\Ε2 "PQUstx:+Em \n@04: rTنKb8 ^(2#:kKOW_-l3+.=,Wn{>js~9 \3hr|`0,: 9'10C0, {MAr4L'FMEät|P ҘN:U. b{WQk+FxIC[BwESKp9 Mj' |35P GvGrP2ȫ$\|cks9/Al ]<v y pKu'6I?䣫Byzcr%P|W uYR TICJAԚ5콬'G m`/q??B'8|;>35&{gg1vCAXiCqv=*m3UBMZrœF.P^+'D=4멮wswl9\2r/'#m 8~=] {N" _Q|ܥѿq N*69"4YOW=ϰ ARsk6uS3Io=/TVGn~-<<&ٳ-?9mdR֊-2g;OBgc͛~3|ǩy/0IDvLԫqS$)~+ PViī0.&V`r/X6hĤ>SA<B֢)~-lv|uڴqwq&UoQ"dfɯM#u(Bx !W)!i)H:%ٞ2+ 8867\*#vssi{mkkD?78/~>?X>Wnw 5dYP5)G`W[<{Ehn䉆^Dd# BAu!]zطPeZZ7i*zɌ5 v*2$m@߽2ϙ<'JR,7YUj`A3pSM/iJ3F&0"é0,{Qnvmdr@, J9K@eB-@Cj$ߪ˥M8mi; 70عN7}j(U[-zYpX[E̺8P`~guL8 y?&F#&.jza"0CZr -zR\kIJbmʳTvGۋJ̍mɪOv+K{ jpl;QR? X?U n䞫Zض넗 E gK6tgu}z J>Z¶!W~d,}6ڶ GM;s.iL|?`DzK@Ƹr\2y 6D\& fezW#P>_m셖pZ\;_#w΂Sq`#6LFѫ dn?'QКpk>kyRgOr zNevkTS*U5U@evc<1* $%aGl5˦Ɔzw\͹˶rF3NpbYooN8>_l^ص`QGңLKO}u5*"R-}z.>:?E=Wp?8]uð躄ġʡvN-¾s}VkgeW/r/ًNY|Gz$ vy6Q9qP:y Q*αw8익_ϯZg_Xko=p]N3 ^\ X1G2pCl> zlm8\V*M}s wQˉ9yۻOD<-L^/[{.gj^:^yjW{t~nr4jwc:b`;\3DP.!ѫGDŀep 7ᷯ8 1YClSk9wsx-";B.F; =/"+߲ ,5/v@9͛wK>}ⱵZW(DzIӈy>=<NcM_i8d2Գڙ L@]H=BC!ݷ!v[08],Zu΍y-V78j8V?m<~R0}/FR']3 ʟ3AcwR|b]^Z~H+Y(Kb#Xȃb|9?.r[ֿ%pZzYa [}jn=X[Rsx)|+$[[KNnsJp[sPּe\`i8[l/Y^euڐlu_LnLIϩS{r)K7tc1+gy^fl!?|Mǫ\"q^zqroij;MUXFEW- ~E4Woݤs({@H߹LΥ cOO ɀ3Y;KNξ2ND~Ca_oPa֌ͭD/65DO׿o7w%9 7tsC\Sԅ/SEi|g1 @ҀdY#}o~O3+_ړW})J2%E qYy&^oahHY|W"Eӎx_,&0ݔA ?᝶=0~ղ 3YdÁDs> kt:Q7lӐ 5::PWitt]" Ǫ-?Qi9VԉY$]̗;emt?C> " Rv5R0Ebb>|6hFhxt DqmR N/ @P V{s\r A'H-~ۗ"!4lZL@V_m VWd_޳whQ%n\p"iVk7:#!֜C4o/Ei~? <ϽƽWfoJR) 㭰ZvD S?F9P1_L@-"'i76#_bR|v]VˣJ?RBl{4t 1(^|_[}39g;~MT=Xy/!vj^UN'rdG˽u}2GqBVi-eLCWQ%%@ QA@Zmt}`,u!"*wU; '&  Tkޝ1[ˆO}*c4+I\)ӄh5LH'žXčϋ STP fW1ԗYS"APl$&][p}-C[ts1OP/UJ`pgg LvZƱm4]5G%eCfc$ *O1J Η f|tۗ k@ڋb[i H@1<+hPZܮy=v >5ngr[\/@d걠UNOwh Vޯ_yڟohw1z,.bﶪ|r1 P)08֕cdi@/RKąQ&rtu䝁fN6j_.VĂ(jV6YcagQ W}DvT֕cT>H}$Btն(뻔TOK?A [% F@'-5 u c;r0uc U t5\bBD$F`8*(^fJW:m6TH?mZKGc*H30$=cWlsm$ƒWQݐiA $x 0 +()5AR6sNOb&҃QQ1~V2 &!A)BSX&1J{M`D@S}ڥMEGv73 0ܒ/[P^)'9G}o P~%U[P# E\=,+/`ΉWY,:QXO?zO"R@=@pR2FbH6"d"au8eC wsdڀ\]%D KGx%S1'@>4:lrF0ND6DgoȇO/8S >/0gp `~ptW/ϘoZÁ1`0/e.tNE0~vN1t !Y:9а,uC$uO/'WgbԻˉO {;~wKh%Hذ^Tb1$iR:;ܫr#,|3n)=0x@0gA ]tR.T\r4|b`WM4^nyg 38xT(ЁY:&FG'#kkN dsuV xN6 ^4\֊bܕxH+/+ڋwԿp S88] VjB3[b4 >ef2+Bf}.L)4ذ+xi{u )}w Qf!R$m{ ]׳Xn *@WF ϗ}Y,3f3ߦ?h9Û栞rJ$H&i?7p5M= xԼQ3j$8iF:(1Os ImP $*1eRfb Ğ&ŚGHTVix{C||ԀڽLA t~ۀ<ߋh]"$u?5W^IRF"Sc!ڳh&LI @Czs2ۨ& 3 "Qk{Ѽ=)@@44"txG: 4{A6bhx 6\4 b6s}2+!oK1lFxxM&-GT_  0fVɯoM8f-2t럷]j07COܐ2g^ "APr>݊nv;+ӷ85F!OhZ=!s"0d؞@`1ˤJ :`Ah71i}8~Dγ4òoS7Sz?Tm8i|,̖e҂ "sOaͬ=)a 6']ڬ!0AA.M\?q'&|uUW~wJ4?4ETVBZ"lZA IMFdɀmՔ۪;}ڎ㳛uͮ{G|VB_yn:K}^}žϽ}Wڡ6H *i8o1s!):/ɋG}Wk `cE\Ə/;1Mz˷GzͿLww0{Rn}GV؄M, ;"I" mIz.\y\ D-`az}S36scFܰ|͕2XfU<*[y(W' k G!\~NjZQo_'__v }(L٥"/`!"!va,K]@W-Ehe,P:lٳgM#tHh ta_P8ZڶmQ儏N ](NBF#} I螳+-U\n,dÜK %+'[*fLn:504 [)jЍdR<t&&t{F͛3㳷K6F^H ΩdR"ȱwv{I*y]eL2Kf&Sn3O89C,0\8$+G6Ъ6l(Ц +kzߖ¶sdWg,r:iA Yhw{Π>;,= !32Ś""ŃSl*\0#ug6[ XJUAxfzPG//}{v1/iߊ T']Ww{߇֐dXfVYDЪ=hi>G@0J¢^l$XBHex?#ֈi1X ZF1A\6 u5g'WnEUp9y;zF閩Wkfjf{ &m~-SaaNDFp;i+Ow5ٌTM371Lgޘ`|6Dz>4TЭ:lEdU(l"ERd -N M} ^Qf)TUze|؎s6m V}PO軖 w+[fV̳ I ~k 7$b‚Z1V05c Y^>%UmXM:$Y gws΃U#Ln/<籗ޯ7ݙenYgoۋ(O5{{]J]?Q'G:+"}J *>O|,єeOqVR $ezdT sЉ LPLf+E0m4^uGu:!DL3~;rGglQq$*ĔB0VOɜ}'| c;9yi꓋Ƒiq}_`|\+UVt>w1>nA +ݮshL,r{P#kߏ 1Yt}a,{g猖a5p>,"&²\Hn\@58"])]sn)M\2`ϫu,W,i>%qDڤϼG*g-<[S{d5f! sdt~iS mfFCQgiݭF5(TňTN8z=lpS`a:е*M5(Pc}rtd,H)#,coK9 L" F}& (~>fe$8ʳ!/xK/OഴcC._N4PoN 8KR`cb2T*0Tb|fPYfU  !bYs_ *KT9Ӿ$c _QO_w|;°ඥvɜ(jWF,*X01;~A,g%ڌ ðNݟl3N620vv2]YT-ojhƾrakbT w7C|XT`݅>~iw(bݺq=Uo'FIY|`[ :{4 x-yQF>"t n~7Cg)IN#Ÿۣﱷt}dBUa8Ԉ|ܜ|/SK1SuuvM;NA"L<mԢ7h=8 P1!D+"(kTc.$&ɓډ8DUFeJָc.4¯mjЄLtu޿E]t3r-=+Mp0 z/}ϼmlmJ։vgDƷ|["cf),=ۈ˫c[?~Aٲ`Y{qRFHqAY0}ecG)jVNH|5'4i7-&m{Z%2Bpc4*-"C%2s9FBm -TqFq=-2.w2ӏXסCAi` ݌wݕFqڬ]k|HPO:+z=&  yԦ،RbƼW8]įeB7{]3"ڤB8񍍲tL-p'QI^M/;/*8v^uiǕok;9;%;wg^y/k4lq^uy=ZW'^ۼO:򾶼-yN,*uo."tt("޷XTWxuQv۩iVw8ʾ0󤣺;(胋::ӋG\YwYYz;ݗeYQ\gwyԶmݗtVۻ6wtwqTguEfWew_wpUhDlHmPiet2z=n_)k\>(ڏ: 9Ivn3؁Bbw|ۼ*1n<ǝT*Ip =Lqu`y-:ӏ;K-+R^a$qgVY~Z;v5~{.B_>xMk>gGӹl{2O;uG"PIf\z>/ݙZ\F;]I9$QUGeOd< ;NDjQG}jl٥h>?ެGgAF~ ?Qb΋ijX#}jGR>|^T~γ{sWx"vq8U{d}x8Ex,{v] _\j@0J͓ڳ<۸ Ŕhl?9Sҭ:(V[qn o d4 e[Q@Ay##(Zmsj:Ia:) w)E⍢ʔE5A ljx%LGтǾ!hپ u "q3!L ۮ@i/>dݠk_j*Nu’yі@zg"o;X/R!^>-C(\@8A&?8[yEHq bB," ~W?*AsAȌi焲Pzr8Ղ|[ 7;uMAgJ愈Y밊C{wz{& n|4å0srqv_ߒ}^iTF>w|L"_6HR{iL5;t J91I0$Y2=~١߇=k֊@)!]pRPqQ *)jcz>ǧ/AdYy fXї+.I.>\{OEs{:\<'anBRS[ڧVբNGGr]f4Tu m)"4,+E1DRGv7H@ @Ig4W_@K܋$8)ȎBxj D TF>)Mut"r 걙D̿"9v˰8`}wkG?s{B,D$7M4paDn'&Պ_*$X4W&πteqtXp<ܼF0‡p4׏cK] ']_8<ubKxrbtdw_@!k3Wﶜ2g3cf%ن8LX;$h(| )pj34pԭ8T>0ԋ,-OΔp3ׯLELp^]h+ `T4 +BV$a4 KҲ[XeH'E,Oe2銏׮x3ǛYeb5_vmr=t{~ΐFc;2d"/"x,jnQ@\>lqeԌNyu iHoI ,)}<2!6_Pj\\hZ7#ݰJ$ViUU fs?$w:2!&VjFpvFS38cɟ /5es 8Zf BЛ>1shΨkS5"Q:/ mĎVkLbŇokQ&(+qӴ!sՕzE]$B.p~cbbh 59,y'j^BS&@@ox"](Rȑ>}pScxtG_,7Y#r 8z9%R:c I7qqmKMWuͣ񮎄هYG/ tsfl3K<ៅzvqW7<Ɲ'^$6΂1hm+aG6nL\i\,#)#IIj|)\(DatD}0EFGj kXH: --.Os:Y'2S>W,+"!H6F jS+ifӌ>cQ_lxjzuk88f#tVN&^Z+I+ wZJJ~%%`s_tw7bֿ&f?f"–;F^WlP5n& U]zVjvFQ㑕d~/?_b Y~ AG,E\4z  P29(!tiւ3â42DbY0hGƩ\CA/iv]t9Vqe2k=Jpw("oT*L@CgQAUÙ?] 52c"suE3#r!eb K;ʰL$+S2 P?vӫpmI`-rߖz `,nxJLF`?aC,IdȳJS6) QHt>u0S4>0ӗI]W`Ƀ_ؠaxLذ1U4mzO!6reLѝ~WсH—[ʼnAÌIb~$ BeLF=K:$ l5?&kG:XaX/6=>ikN  ]\{FP^xy 4fikEn3O@vztb'}I(*7!H`p):̄C!0)܀[ iY9+PZWB#6` ^c~U,{Rxi//j˿7M6]4$&9^1B!]tk`Į wCSp8wr:.([Ƞ&`PFZ3gCXb^~Kss^x}/`(8y@4y?;͉z|B+|e+<vhj;@Ǜ$@ɬ8{SZ ]k?׫ ArVj ^>+槂1U[c{"tn 4b(iϹgoP׹LegZ$DigV?mzvd3+AA򱲅XT }#@x`6Z=Sߨ|ws8xoy)rI*HHKF:SM1]>sWM )"}+}U&CRFC0MϿOO]>+轘a 30\UF5FF2i !#5K,ދU;s/ oUcT*UYHc|YmaAK1Kolሜ(b8Eou'k̼ bu$+ɖ8 mD7apWhyAPAa?\{ӎ0ȭѨݪЛJW)vpmnXĪ@jtowQ{ժ]ClBCjf\>">^밧 3Wc`ׅ,bA4>@Y0!c;2$Oq -!AMO4qNO`oxC~*doWUdP*F}O:h,Ppq~o:^A1*aPaNt-cp^wrfb23||#9*_aot'V748*-WA!Tz6rJi-S ,][U!xiNv݃b7wYᄋaӤޛUL;'-|A0cy(/aj[!ez+8])WKJ4Xx/kbKɖbJ\Pa/ΰ'LA@#=|FBSw $3ȤE0`˸#=R"|=߲Ԩڌ(_:i)/#9t]41aa dŲhۄuĄ-;9C^Fgx30D,$4K=EentX@5ӆ= =]M߈= `So4c4҃o̒Bqs(OfZ>\1s-p: 8?!$Dю47I/OtXm]|1DrGjHF r{K`7nNn>yWb#, +z)tAe9'7ו$@^Znql$"N9(62IYp/~QLnIp+b GwGNom"ZmCTx;,uaҌ9[gk3ȽWܦSoW0|C^  a"o??%GTq%3+3Nh0VձN7V Vn&'E7t (|ܐaT{"J$$U/k.\TOɤNLdVП[lZ>kTh¤卲T)R wtfY<8{xg~7jˏW?Yl 5yoݜffЭYƺZ'eys :7y3Ξ!KoR3ÄO D КUAVEՀM>*86&:,R͔s̓:j)~j-;:pȀF=fZ'nS:w6|Ka1m1Z < qї$-ᘬ,r#ĢfBbaa`8E`<q/A`'l<F>_ZU5+Zk|6&> WvVdV،dޢRډ 2@_֩ ۞r9 r_fQ=׻9IHM~ 55bqC{)+wNaͅ\s<`[_ [IK *M4G׫5x1 ,xlJΰ'i&?S9>a0л6(ؘ*&;?EEO5:}H{٦\̡hb8 M=nɮ!ӂBϴ,^fw9#l 2ޣ&R+}~^bXg觘,b^{0a PE#e*~s~8dP; Rb'0,2T"1%[U"w9Ì:Ycx>C't ח흋gNSxuA<6g`q=9ӑ~!Jwo2uň N!I;;ڙֽ ]'&YS6:{LljTf`d "pt47<+\^c3: JI^45Wr[<h5+ NKuv}4Yw3 `%1A''fu՛TrbO;Ѽ$S껎b W?Pҹn7].hMƏM!Eu- LbM|j"T 5{YŏCѭ7x6Xstآ`1;||]ܥ- @k\ VՊuQ 8PW[O6,f zVaJ|j>A! HZ>?Sx StJ k0=x+!5 386wFʩbĥSvi#pKÞ:*%Ԋ%D֙+51zlOߪAI&n#ȋaxS3vV\ C;-c%FRv8H5)9`< kX˙mH0 o$-GYh|<%|Ѻ5G\(X #_>% a {Lגw qjQc ӓ%8ȟG׷Rnw#ó%VݑD脠0tΎN<6do;@)n) Xнk*z2);NPj5nD9O҈7ZO&Uy  g xrdӸGT˕#͜AƠ.xqE*ľN:o ݣNO - y+Ⱂܵ'h\>F-Q3r{s/b yzWBG͹8V4TVR@ 6{_.̀?y&Zvh\Iή5[ X" T>A&sFͥ2IɗVRk-[\#- }m^X~JjnMv6;X\me$W.7Q=YuGPi%=rȋnG'Zcۖst@ &@aKq._ku`-mRx!N [!%%m`R'YY6ovvol*a < 0|- ~إww]LW` j18i vl %,-uD7{BREڧ B#BI=V_[!Iq( MP)w4'5 % unr^+xPp(;v LGG-p5JqݏrGO(#7iLH2:Pţ_uV('⟓Ӹw,>#ո|"O[+6 C79'e W% (8䬽^$ quxȏ筗ѱNXg ;*C@㿪,W쵴[YWMJ]eu587>99k0\Z3l G/eJL-dZ}hB@@"&32i|0/*b=/ _|VG;dAT-4A$,[0s` _i}zHHװ0 ?| [E> EJl"a hhة[G<*iI{s0 N7xv󕤴{7n-S/uPf_^gy8Yef n0%.e2 dK'rI6?,lWA xO3d~iѤ0OPyF`a eV M1r̂1L^޽9_+7i\p:C}@`9ct%3K'0k0CS@Dc,RP2aq )rACo;щN᝺Ħq`xz2/Ծ!m2Xϊ*QRn[5bWxxm48Wo7 vs?u-[Q5a`Mhˡq轙Lwgg|oTW`x"wx5 /p!WA\È,U5x_p\]HB\kVRU}W$䦂oChKߥ ?;ث+GSqsa.Fj7 u?̟+ f*8-0 wb|8& 09iuW淯sYxȠxt>Mk÷Ch[35{68̩A 8?[4Ѹ[SŨwFC+< *ՀAvU/Qj e rM+*!(f޺ExnN[eҊP!xHmJ˲=ܑN$$~pammtools/data/tumor.rda0000644000176200001440000000740613662013606015046 0ustar liggesusersBZh91AY&SY"\8UDEETUTDUEO< 8 ^;xhɠ4!ɠbGQm#MPFQz BM #&L4(P i2 h4H44LfBHhzUF  d1@h@ hhibh 1ѓ2iE$jjm5Ojɩ6h=514hhS#&@Q0 2b MiOH^ ޷d,T"oRs.>>|Svy$=ܠ-HKue9*^2y8"<3Bo= P/RP>dxzJ2aAS}W3z"0]dZέ$)ΑäsNR_0ELS`.'Sy[ż!dp襬׉KYK|E 2AY/S]Za6.ͶQxQuGO]IK~ .>>d'E~/vc/HnKZu3j T-#&TiЧ#a,IJ6@ )켶ĨhxR;#:h0rF:Gg7sH/19Zf:NfVme/j/1{jb#]_SV\'OIa@֚R}I-$zv(O oQ L33fĭaE9M^[4ZPrH[Q]\s^~_~6֟4zH*_d\¢4mccDb63"iM& 1F F"b2RlhEX`F AlPa#&ɳ ,b%&c,J# 6 XP(LFCf2c¨6̠Ci)*B(LDf#$WFE%Q%KHP &DdbfI%$dJ Db PlF DFK"D(hIa4(4ȨɈ ( ŋd4H2 I 2MbI4Y4 LZ1&f #EBI4B@,bJC0Y") B( -RbB i"$@2)M  % (&1lX R0[{ kNfXhTd&BLADc ` R3FJ %&Qi$$F hdiɥFl&h )c%M1!C2 IDDi($M )!$L ac3$a3 E$$L % É# 3)H E*QBQYL2",F"!@B)$!f L "LS#FfiDґ2 D&b$0IehAM٘( E&*jYx!.^*jQˑ E)PBРQ(*BA'BPJ FX ;7 tD H%p.8XdQ% ! JHHHLn"+Qe+(cKQd<ֳ[I.$`@FvF !E"D %\(R*PńwTU:C( R(!,T ֛k5 20tARYf ,AJ$8B\VI'J,iJtM\@+3rK5g*@b#R_K%`~/no׫^gkyg65㥳e(p8u6k#ʼn/^L-eAAT..renTشg uE70&n݇j ɔ":wxDYoxȀ'4dP\.t Ï:Y 'hDlclf%Z{묌 +Y"XK+$RȺAw\*Hl0rb";@[]+Mz8\-Hsh}rryRzxXU%bݕA'J0 $=WG.x /\T7"0Y)b- {jw1:٧*r>,ltOq^UX־ʒF]'|L&BUǢ IQ4@gg-p) JSMel:8PLRSIѫ+P*B#dA5U\]3?|k TdP I"ȠȈIR(ca,k WT$PeǍL'MA`+BP` PB VDV(̊22r! $b@@GU$1)̊fG1]RBqD4mEq!BxC#(@R 6z? VApyuB@ n6S@3T@PZMóuY:L $dEs"~"zйޤ?;򼉐I\H$ @y;Sb#c{X+) /Ü2VY״irSb֪$VւZn,W#ŵ-l諜tB"A sθv>ߑ~~e5_;T7AIEEFجlj(5d%c](b;nTXcbkE6XFmbmR6ڹ x^*!@e0.|"JH@O^^XPz"B#6)N*B, \K^g+-~g!3oy“  'r|5N_lbTik Ô},ꪦ>CM"( oFR9YAHh{9)c{ m@ѷg\*3Ò D fbHwjc*7;ivdv!)2Y0 i3u)E Ӝ{.le6A Ϥ+!ri/+VjI :MG왕12.n) 0! e;8_[jiXۇ4WQ0.׍WPKN)L[!J_R$ob`nV zyZXM@޿ @ y}EV{" JP7M+-8p{9zݤc@o% Ԉ\ DMXyliޙɐp^'{t띹ڇk\`5sA}(+|UJo7 /fMHWQdD&)C :bâ9X" Wg9hI;V⁙W1(w߯=r.' ]uMr)^t†9P+,Yc,5>%6H]p1"(H g.pammtools/data/daily.rda0000644000176200001440000040474214222504522015001 0ustar liggesusersBZh91AY&SYnӚ\u( H֔VVMK*JHR IRBTR%* PW|wnQ@ {'ء}ݷ/{ǻ^zv]{\m]oo}qsw}ןzf3^>vש{k.wo݌y>׺W||M}{nחuw껳tﻗ}ﻵy{zͯ;=o^Uٻm7V|U盾|>u#뽼o}{3;}|w}[wygXϹx}{{׾k軾{ݛ ]׽皻oyWoGw]8m=wx[ݮwρH|}ϯwmke{|璘>n;W7xww}/ڏs{ov<ﯾ_}ϬlK]{Z7ݾݾﺼuw=]^'v7=o]Kuٻ>;d/o}>ﶵO{׺%Nn}{WݜݻgkYٵ3[g;m;'*;mvwZz T]*!)Q֡)s[%TݬJٽ[d㮷{}ラ{Nm9=" 2bh@ɠh@hi2` d 4i#CM4h4M2b0 `L2bd`!SAL C &i&&LF ɦC!i2iL2i4' b!@@bmM1bi=4 40@50O@= O@&("D4CSdbM14Cѡ2jzbi404y2a14bC&FS1 {'URHL2ib` F2hiLi #FS M1FhѠM2h wՄ<qڃ'%&ٻv1C3t9YɌ\U(qy}f <6 ,9,芊 U'_͋п!|\%E3V31eOTTWH Td8m+Jʉ}@W5:r~."RkkU=RJVDWhBݻjAU c\;*Ba߰),$Td{Fvldc-iE(2Zԙ*ήiJ.%[;*5@!\J0K2BT̥1NmjxƬs73;Dwug/ړ(֞ Grho\عvewC3Ke$FJ c$F1o9rDK)g;,RdԒ帔5hV#_zlRcY#kNLi>o}ɋoR[d1L䠆MՁY!&x{4_C:i}䪃wO8/l.SL2]fFGvOpOoq'6h}*O_pLLƼI5ئulʺ={Vyxf9c#7_p9AH5FvS:s~m[y~2U\Eڑź q5 q+#-Fʭ8@'_2;] $ QjSmKzk>zt$_JL>/y/ݾZIꞭ-l%ah:zj)cnyO|槞l.NAWr|{cǝ}##iZU棯Cʻɬ^!_j rBmMD\3uoD56Q!7k|Ix&@p~VУ6 ;Zdo DU_#ݻ)4vX By_+֪^lG-[Q"Mv;w;Z+~&{'Fv"}}*hsW:k1jY]Ӭr{]?mߖ X7 3lml;qU kB'Sa.|ݎĽhCcYũ|ƴp?Թgin۾ə? QpS}v.nvƵũW3~^C`s^.c*s󻎺i|(xV%6&/OkxT|5^ QE7 aYɹ>',:"J+$ g8j]6GUuş}r#Ⳳ=%[KK8\/ Ӫ%X|A 0g}K$$*&]+7ML!:b೘h$HdFy,qLڴ+y5{ݳgS5cQqY+8E Ջ7!7aqb%ZⳐ̞RGgLmafphj<#~GR+3)鲹Kf}76KoT퉚)BOe'c!z>Jj0Fg~GhA8^'_vsWqyQiX:JcVz~g쫏1?q HW}ײL`;NSmE,ͫnZw ZsrO֘fߡl@+v'l@p(TV8>ImRu]7By+R]j7\⤢e&{CN6/Κf/ŨTwIlGb?K&Mi]st6= [ nMRt *Կ[մKIJ"ץf`]m#|y%ÞRB0c&7ǁAOl΋=E65IxKu>h5KF |XLtuK͉qrn0.b{F Q<7n(X+}fM~h aEzRo׿\`/pLmWǦ;Q*xµ..p"sOt:X %v -&3Yx7nF2 VxpX-~ QoѶYs4cة^`O=-εR((#,Jʏ~g<(9.R׍|O&feǽb؞0-Wduα+tWT{䊘B';a2VheD`ТeT"@VH DEq38@) 1MUUJZ "SUEBz8P `9EEJ 9冦 @iTք oEmzZM T=$郀JX9ЁRAcS N?3E#Fp9rw PXƤ7(I( :u*&VxaBP$4`̦)ƒP4L{:?PbϏ9j"h֡[P>6m T\0:.hB=%ptoP׃S&:/JS#r?`:ږ|U(j h`d )S&<~Rn;vƴJ1R  J]௤JXCrĖd88@|@yT6İuR7pǤ x+ ׄ藕L90AũJB;EByփ; {|`k@wy`ցA nCfͯ䔬`bX x 4 2o WBT2IKr TaP pNK(ԧ Wͩgyl`)׉KR6%/΃z|o ALKX{<~sbX 8@mAYHЃp4\iӁփBƜ;;sỹ/ E;{0a 06mSH!R:A8 ?p;uܴ:khNc8x0@liȆ*|k<0 jH d >8xكy,VGHNĒǦ iOToցK_46 LkD?;52>`s롺Z5)̬vUx [A<1=kH{+G6` =9cJ`8E+N?})ծ~l &PtPtjriMXMTԝ>?h;0mPS[hjzp`9{DJ\B;uHjƮmr^O6pn ^t _Ay槼';T6r!^XL0me郏}pd;H6~xC^h>X5nAV j G >~j@t`⁤lXB`N:0xA ԸhAǧnm7 rp6}{o ҃u/n: ztt4zgҴ 2+],5|A>J8߃݃0/`@@AA/%`?h6$ԧw\dzKjE@87 i*A݉LoHfdL*qimüJ90`yTm>R>Кdp*'νڝT,0:5JbdA΅ 9p%duB|C%xP>3́ aXs Oxt6 %54C O{0|@}qXA C.;撔 |u_AǛr4 X t8~l|-rhթ"P9(Az 9iWl@QX% 8|@rAXA~4i`NA8$4~cD!-uJyQ ~0uӃj:tMa/< b(@)X6@l@(-!bf t`S&,l-ACb?x0?D΃+ AZP D(烲;hrcAȚ@3 Aփv'NNwÃ1Al?`=?=>*<2΃B;UCPpؗХ R1\"LX{ s AG Q<`k`֓S֚,=ׁ0 ?Xr::5YCO5 *V-(5iTMN8T4*^CS-`؅ 05=}xwN`(=<14Bjqj5÷O@{j% zo֘  = >>pnօNif5.[S̃ `Q^0&9]SS0 ;d.`ɧ('˜`x @WJ@݀x j˃ ޺&h֩O-N9s Ѓ*|XAŃׁl](?֖?7 s@_~E)!O<hhl懧d&4MH7pj @/n![VR =n؁0 W H>A P;0{`l!ǡY, Mj`kAuZOS w@Ê {U?d (/;[Rkǁ[^pZ'&x! x>`>Z{Q0KB Xt,C noAO>X>rbu AA]bc/pd7vӁAA%uԡRvGO ;qF%pރЃCNj'4ԃ끧Nz s@Cq3@cDl> XI ~, Rf0`}ڃ<4O5T 0} wAD؃&D>=2P`kAP醌x(< /L&t)!-0C&yPv@Y΃с,w,XiɛLtSK:ݠ"Z̏=T[({bTg[D9`AP QT400BD)P%Hmp3B_+5$P'B )Ґ"]`E + (&f U1$YDjD1jI0B*@6 AJ  0B|e;הR.>Z}k_]~} MHn7bH]aTk9= H@!=O&B AM Tr¥60 #caH'۱(SD%Td A.!§U<*,*@; 4[g `DsMc[ LB!DHkwldr<(e֦D% %6B-ZD, <$ A=Q@0.v7dHE!KX:@_ d<D"M9 O9{Y}<;+Kx*Q'%k#8Fؠ C@0(`.A|" BT1į0 UQK*eC|bS  ' TȨЌ*! \X5E G0Q4RP-1@zޖw!~8YWA$YPaUXFjS:‹mHqhWWV<&-^b$~& H4B"B]+B (9ǫ A*BBV 1r@pPˆZM%Tg 6w}~}e{ u3}e0B)b"\T` Ϧ%w ( D6w¾!Cd303AAI ! J Fأ& 7YP석WMf )< NL-JznUjM$t ZDýT "`"+FR̷( E90+Ue BqLyt-M4E%d-La-LUd =Ѡ;`SaM2 RD b \V1 3VX= TM0A D&)0(2gffgbȈ!^:2y 8dN]P4\|,*=qc4мyhy*ZI-`@W!D38]l_N`UV!2don,j3wE be)@IH5G1D8t"'0Ugn\MN_S(ō?KlJ8O H5^!)"Q[ 7QJ"K!-'DBZpg ;b jn_V%$x{<%*ILh®UϾ?t=ye63jJBNS .ӬGaqo#ՔeN#(3"R s;#_>UaHh$CSiv)<=) ޅ:"n 3&>@5y%/Q1WҗI!u{SsuOf^@7`__[YM֢0y:D-DU,6tB!-}<~8}ʊvfM WCGi4qm&5l *י ' [SܤhV +TSAR ZV yP-u мbܥ 7 WSgYhBS)ERcI J_JBnn2&" ( eem3zfVL!,鳴PiOV 9q4k-Eo5TSV["vFf&UVAwNy!Յ\ :N#I^&I$)< P,+|VUU Y54 y *M:3̤8KLFS2MA($@Ƀ $%&o t][.ky40QGFA#j W赳R+D{\)Zmz Π4ѬO=1(CΡ} _x`F4**,d,csaE;S<1Ȍ 7jvUuDżaW'_EdD pڶRb҄c{W̝TJR1Bl%"WECT`Ao`ʖ*RQꧢ2^"" ]gWGL@NH/:3@$z{LJt銝qjC S8JU")@Q;,vIНA {k,EZ%˫CUS2 .µ &GZZT Ae$L"wIX5A2($I 6hPMnfCFH$ðctD6 $@[Pk;jh8*):Y]թ 4@eMI5h\ $R\)VKid7:I,dTQDRWD"DUu.J ή.@*YY@YI ɒI  lEI5[[<^QUQBAo؀mjx)*NpDdH*jJŀMQ$EkМ P@(f5\XSWD 9W'ReN;PGbiH z4) +b(a0kmm^l@V ""er`O]Q ") B!4 2@"b "!2 B!b*0(J!.0!)j" Y}M^X!o lP3M+ `Иj!!V^m璦֧".mBkGQ޺0-֊"; ft+vtDY #:jY<~z}s ab9 JYG*ؾ([6ǒ餶0AEgli|TK0+S 3"b̨LD?#}HnǶijHn"/ZAz%Ͻ2a={ҋ 6 pF~&|, !v=־t^F \RC*r]ы8Rm8vJE^M8>;4hV\dC]1ʧBVaJ X%*Mg6DOQ-mBGp#\~^XZG=J L_R9@L &*X.~g š+Gpy`B"ДID.L&Яim8cZߌD215 MBp΋:! ]{Zmo|_Y{,LSTO -`}ypLS+@Y<؛)K*>v!{ɬ(ݗi~59@ң8\$IPO/p%^_vFI9,i͗OE,?nӿs*Z{7c";.cZ0f0/*i Q4:ʊE,xH'c-azʢqY,H"B^xCKZ~[2n:xR &#Q9 į[T;J'I8]vH9-qqm`Ʌi0"ir+Y2_ ;Wll.5QLXMr:^OS D??P D ːw1[ TS$g<=nOU5QTޞ){j?jSyqIrPWD4y doJbN5.]yn .{utCőQhX؟GڥO| 4o׫Q- bp:q};ngYl\+WWZ)a4v[Z UEÒQ T+k nuС?خʏm UUSM`Hq@Xb!hZM|&qռGLժ*TsbCpr1h s..xI]g$͚Jc;ef2?KWCuñ]-+EVഊ0JC&[OB/Me,$7[4\(<]YnWK#N+V١7NX#fv7@ ߮`tuiK HD8JZnCGCL폮mH5 OQ?;JO5Hr-R Dc588 BP{Po <j/i*XӆrwD@@:" qᑌr83>)m2./'?S 퐾z t+Q8z/g7DS#( W~d BzJv=zq.h =0=1J!27#piG`y:YA?2+x;:"P#_aI^v5"ÖaۯB1 R@zFfuZV+֩D2D`+y$Yyp5oLBzϱPǢЗ mET) ; !*W:Cp_ңVO?ijolO˸E\4 載y(kVƧTh j$qHVEHrHېMwrdX6(),|2Xrًt03튋fCjM 5ph ^Er'v%#9(2ųnTl1 B3k=k]!d;Ne|KOgV_2yod<'Jկ Wt5Z?,<=ºR z@6ϻ %mOAw6`iB/mvA8xJH&*C E S\}; J 1[{]TB 2ݸ I(XB&'Ro($Fek$Nm}5 3 ~,\ۂ& fQ%( ePQ.u3vj]&`cދbEGHťl~t'Ԟh Li'e!_/BC-&Bk/S{ޔm'L=6`#q E1lqg6{TUӀ|UJHcӖo킣j㡍)u@Y%mJc Aj{q Uxlոyf" N 2e@ H,SghUّݑdh YةWLݙm!{U>`ȤQwN,XZ ?\G(&Y}kVj=:JP˭x[;V"!rhlU̬!7xaιp3}F) *E06Ϙ?>p %h+~V0d-x뎅1t哈-׍hρE[hZ{{$--3_s>9֩?LzA@b; OZ+_&QG4"&NLWk vse->;-SU@}/tҜHo(v2w1t8(㢡9qE8p5z@ ED2dv(S@BD ~yӉ%P@ 9X%6( @WBR?P TX{2F%-Uʶ?oTwɴm_5h&xƶ wo"tH')Prc_܈Y:p~?$zgZ1l%o*Lts"Q9̖Tf},eulA(Z~(VCTgN (q2adr8GaF JU.ȳpbȶ : 4vGcEU;e1ݖ}'$QVg^j0<8A _&˚?].Q@r Q0twnJQ\f y~/(:hF6IdDo0 "]_&~ ofӒ(c{<4Z",\@!,ArDbu |/˅rXgJn[]vށapE"j{&4Jlέ`ֳp<~ `Gj.~ ]()>n"4'bKUղ93bJy< fܴ=G?lXwz DM- bת)0=0 , "1d*K4 "P?pDퟏzL6; =ɲK)wr>}sT^:K&3(xj{=1ź$N6praI.D|t=}JdП5c B)DT/\o=c 3R`!^f:@$0rp~8!ەTȮ@G]M9sBާ|`3•Mc^^c3>n2 ?멼L2NM9K{S1WV ªq1!|]_ W:n1E0ɜI"Τ'h98*~gv]n;_=ӖָM_i} O~2]\åBz}RQpчkjT4Ao,FXH&;Đ\;=^e=+4 j QHK|<=2Оc%0P\PĚAKрq["O0R47Qt@uˀl:g4$Gs <nˣs^Lѩg?PL:RY7dF;[`YG <8e3`Pd0)]E|%|m&ژ[z . :`|J%gA a-ly Y$mcKqn^]+J,E]A Ob{.W8+)-;$odV WW%ĬҲW9_w wfn z{3|q` t8 y]*X}#rLi5Fn0皴HAD b\ ./%dx 4XN\83ݼ 6IVym|3>Lʛ/@~5? H &!kLeKha(naeLEBY%ԵL45:elzܹjV\؋!x)rG)dڮWGp'\,l"?` uq_2m08Hм uƲcUd+RLjN*\fiOgM@4bד,YRNgJ7~pg91@[`ۃY3٧S`<Ѱ:iH{6p_'gV=<>L\‚:4O(ui^SI ZBS t^A"H@Y1y7.X{(:j U/s N>?!AP(tVpO5NH @$<8@2Hف /v@AAnv+JZ,tj2h]_Wld7c s-x0faQ0xXm?@7SCJf 3%*K5|i"06m90JNg;6O*~ lD(;AZnFB,W.z#cIQ4V$Z|QGTOw]Ʃ\=9=fz / 0, -Hd;j"DVN π1|nD5_xRɡНgD1V3F3$c4H\8 ;ˎAMh?. 2+-#qys 8˧ SDP14|MЏ|o"H_q7Kz1yG'E "UOk @ccڠƋ;5t@)J?A& ^2aCDKU%p89Q &V~/@A 8h=,8.ņ"i7A{yT*ԈE;âHikNa۝70rH˵pcnkt[ֆM'Gk٣'ޜY\ɤϯ}Rj%f8ߏw5/BC\ķR 94)p-7w~0Tp0.F >Ga(Q[-Y0_P /ۆeZW F{ni3sIP0>kal&ĩ@\M"w^?!/ Pcx2\dNBN'c[3hr4/y:&P`@,x,dJ˭ υ=Ȝ"=mN[r@Z #[9K>n*ύKowXB9b%݁Pc:t/1n" R?ly|W)9 Uz| Y:+ۖTx?ʋ׃4KX7f`r㞧#<w >GC.>;<̑7GhD Nߚnt}< $VeנՈ'0>2"ewG,VWm9&q7it 4 DȐ ,4 `!3/B^DA n qh.ms\` ÉP&4[T('193V/q}+?-4uFj]1VJyv]%QH3 s}h*:Պ}#gvzW1x /+FKcP@QnF- ~M{KfYXqm>7=Չ9/\Br ')Kyb psEn?ޱ_ήS-{|fj&~p = /ćD"HDkaVOch吀8xb/~ (ywU6*h f| h~Ftât;+tV# S2'cizź˥6}{օ[jd /;(#>P|*! 7W;Iэ3(:xBjLi="81QHUkXOm3omPKUk*0+q].5rt*WbkA)yx] te֌JoRK(טmcE<6[p9(zzT}žlt\DPئbQ-6ߡ.F""/3زD0gĩ I,";(1}{t ]@dc)8f SJl;v~,D/ÐrL}3ѥ_ e/q7ʥ1'&/n=mPqT)+SRb"|@2& WWUg/'GbkΆb 2"G68:UaF .ua= iIǺQ#sFʣw`X..V`I ->]qrF"ȎLy>rBj2^"^.~/$EĉSO~ow/IwpM=DAAYAL\&F b8Gʷ>v@TD0FVÃ"AWPM3^I./&&X*%ڟO7z:~>H4WTҗ}:C$vV ;Bw>_K] H^ηc)Q ^^Q)WA$=YAHQCۭ4NMGkJCׁӉNא$N ?qҜMpU/BY$ qk~Fƶ5\" g|Xj`DdHOMx{܆Fg!9uy,a-})8XuA׊7<khΗe6E k}x5+ّ<2$1W-[ >U49 {S3;wiҬ'F#)EW+tYp˃\9|jlAˮXt6/,O|r0}D͑7}(ȆP!ZM/ՎNWב1\¨ä>}7E#qh9lm.806%TS2!P)w>NGUL_>];%,h%8)J$Y#yH5 oÌwzl3dbO$,%ުWCšo[bm:4tbvJa=6g2 pՐQ27LV#Smb25]9Eu,xfk dL } UZQz/}?R f yi1b;Vah]cA ܊Ӂ@(@ 9 t`n1_ A崻iP{dkdk+7?żyUYXXu3Z8On܊$*UQAPOC^Ӹ";R> "9kb CgiDi#b22VINR9Ew4 äjNJy߬3t1P4|'~}SsvUouZӘXy@Up?$l p&C`|iT*$Aز&7}OvE0k͘U xTˠq  :T$|3t3ɚF>xi\vܢ=CF? ^_]S|+͏:U&#}+wUA4Wq b鸚`pp(rY"*`!,,$$RALeMR`j$ /!95lM0<1IJGv- Kn7pݩ_y*~_bThk€}1d A$ȤާJ!7AE 7OsZ~E 3Ras\tZjmq{h7%Yv6 ~}δ% JYͩGSԎ? BT\/ׇI -g}4W$K}NWD>d "6@VA ?PvڮyT/3'uIj7;@a BPV1Ll>.eOoa{d H-,$'`XvlR؉N+T!>rDt'-m8oo`\?d}tG¬""N>rh4|{٠=n$u;݆X1NП[x䞔%W0+_#Vks#Q`L)O(GgcAW}]9tw=#%@A y>}Ҍ+Y23wPfuo=vk ֛ zSW˹D,7O+=!z%~"Ah&B 8d= ;zMKpd5|*xZ+uف\ȡpCk A({QxuY}&"+rQ{Yoq*SQse2_cm ]$9٭?'rm%pL=| " y{Jr\U+ChHM nzw->) 'QcZ 4{I Y>Q K4[0cioO Ggjr$l_&<8qG𡍹D $ tKXT Ef1'"AGWdStLN#ۤ7RB!8&89o#gt![22fB]*KaJ@W$PhoW%͌03+D0qL_@,`,ߡe5(2 ?󗼹Dw d*Ǜȃs_LJU4\K>teF劃ةbwMh]/I_̮D oC/7rB:>bUҨMCh(Brg_\241اu$ 10? JAg\]m+/Qt%Bs?ޖvYa 3lZpXjgC6ϋLhAAG eA[|K}&%RZptF~Mv4ب&TMo^v;E"(%㍚CA_ CĀadq }<6v-i,B%HP*[flW'DTr9埋TdXT""k@ms?CM  =4}1}'>'o% cJĦ^χ2Qf.N| ۑ)hſ̈V݂QHb ?36%NLȊQAi[  -Ыc=44_1߃~Ƨskihߗ@%cG._^p du[B.e1u{{ggXd1^tk8 Cks@!` fB\b8qKBpgH9s^:5SOֺ ƊQq"F9$wG_ 2B|noåFB~pkHjB>2Y'5/ۂ¹q%.֛">wS(Xo$ݭ:W/GFb*?wNY ]'d;S Ŧ>K$c|[ MoJQGG"ޞn(&\=}TJ92d8F:<)Z7/,Q7'lY:,gS+3mb+61DVybv)>UťO"w=0k4kӁ#2N6_=|aAD U 3Ph}:8 g.c -:A,n2A}UGBR" >b{͔ ,nuھ5zD`?@nYZfsb4|i" @>yF,oҁũ-PYUVGEGZMЋ`q0 [ԥ)Q±fM_. glzٜCG?Sk fK<{%ᆹkӎsk)PwP dq$Hvd b*C޶xsniiP$PT|)-,uЉ"Ѻ 0콢dѻ<qʘ5bۣ0iJ]ߧ|oyBTy )JG-y\qxʞJ Uo{}UXz>%6G&g[ZM?o䡹Gu{=p>)t[=!yHj?tLQ@zn`ѠN#X(hC2E~ dU@< c{401_,@niSLNԁ>,8t(]llZZײA. +l&uB3uW?ւmq5ZK+Kq܈Hɕ8hZhr8SP G=ҝ^P?[4!%69>8E9e?U+V @GS'֌b.{A}Muvw1ڏ=#2 @jKCTY~oef)Y. ٓVd)dB[e!"0 \} ]P^ȊQa/,J\=icSkLXr`M`r~?ېl%K@tݱ#?=}X+ .9t0M5U8jHIQ; ?AW-]PT},ye<-R:=|ّTa gK"SZH/.BֹeZ_27 ɘm"K 9:@_O&;V)HQ;&@FTӨ k2trɋ1ꨏWV3PZvB`Uql z1fK>8,IdC.Q'u MM UBZ".lD~‡}FE/=@1>.!KAf> N4+M]B^4-C_0e{4apBfWw昏qOOlxe67%3d"C܁=6ɐY\z W'A:ߙ+$}~VA Q?%x8|Mn(JF5vbJjGvp vP&;H&g#h]ƤCd!QWn1`VGAq#~b\zWrSAR"#Eo2"hA˽kqUA}.#Cko$/0T;!Uf;GM@|e+AӓR$ }ajh|&uP.dr1PRP}H,#%tBmn5a^{l1帏9ڼ/.~NWtsǍ%*;pW&uz}, x6gۖu2#S`+ %*@<ݺ.Ηx@كScտ{ rKŠrb(~r*2a Rm4TsqA:=S$1IG c ?+'Qz"oi:fۓSOg4n0bmd/R. 5 gJ: !9&4KD~Ÿ R3 &a!ac橘[:0}]G\vN?ȝSANy Ӣ!|95+@OG(mS<pV*D|pcj?JJt~0qcr fsc@︤3wPlhF<Đ<:tۃ)j=d*^i l.(j؀enpH-PC4Q<"i> E;9`~e5(α1"5Jܫ'LyWT_"NBreHݿ4k3a 4 zG"#A3 iǟ!1XTmhԀ27qn!JOin^ BI;*5M&Al)a­,yc-5(`un9$-Om4j-ARhAj LQ U~[LG|hv+ 0 qE6%+ƛf-YDH$kM?ϰ$N2.ۗW[ު?CȯlQd`4 ׹K9TGгT<'era8~m=+%DԁBfZ|B]WSNhL4m#O]3'1OEyQzzi 7amvd&;0:9/ݰ2i|lBlN0pb}Q'dE7e؊O]#EY |qZ~u_6"ـFF\r,}5/N9!{#̏+,\:gR7)2AadR@f7 h5*k!S4/%؀H̜g 8vyk:jULkaL`tuI/eDW}C+ ʑ`Erw<[ChT8>4D<{|v%:熔_zV7-! aA1]F̲\}6aL|hh}m6gnZI_nh;DhDi\#nyUph)X9#Y 0 n;w10te?Ǒݲr.Y1lz`R},Wq{?8-X@rH7J$$NsSUs$ }=NZY/&͵62mv01qɜ~^EHQSj$?i9J+EsQ&8^d8WNצۡOh %&؞~;ӄBP` %5I @^Os  S '[iT#/u E7/chl)`㈨x?`V, a~ Zs¯ x ;{uhKe0I1n1+HYOXi>W{Cݛ5jyJfao\nKK/.;r9QVs%:>.ׄJd(f =_[pIτ@cM?wWI1%AEk]# ;L!"c§s|v{ 1n Vv~#AIhyD#Ћ!,_^* )24 Yra(C #!(7:e & ZBM1K]k4^4tNH_-"̼H^YޒE.*؁.r$NPG;6ĢLzU2(%fun i0fϠ0AG jÊ+|לFʡ,y$O㖪']18?NUUO⽞d8z'N).oOx8m<$(0ռ؝ot!0)P"".?A ';(C,ls<#7:r. ݔZY#p]#C4Qo^(1gpD$%0 "',Z_*_dx0'uM0ZD:`>=70"BB_SK蘩 +04[Fی5уS`(Hgr+f%ƞ1D0N74:oVT d]V|կi] ˁnPY#ܭwV+^$$zm!B0Ę MDg5M>$FɀB|gՄ!De6}qaLuX~=ģE3RP@67foa>G=]?Ŷ 1AĀ8%~ma)LY*w)-=w?nxcn x C#+ I' ~fj _+ <>)Y LEsgAM0 S^$5`kɣBm`ØB]"_yHGϮlWR3ܧ2L^I.}!} m~.cdgԥ/z\e70=7caоϥV?*/U#XYmllOEM6QA|aצ~BtRCgU9c%)|{ ̺U\~clPa97@m's@Po2MQ|= ,'=P),htQt*M &[}2_4Gmp5cJ„}wmZ./)| >M5JPN隨NlnHV͈ x Q1*K㪢ɒg)E.H3}qv-'DQdBؚ|5C6*8LDWFKܑdH7޳,@,PX$5(DtٺX*T7n*g#X [>Nvb +J{Vr"BQMM8X믝u0fBY+{F+\9mP|" vV 6JKK|$2k0UGkB`fʹΤ鳅jrfxK/0 %'Uv- 4!~)M̌\h>;Ń`3Rz~"!LQ[h6%~[癿c4[YɬQّ/a#I4 TbVuDUA#dßxk8^к~vt_1L?,e ά9q#ɠE|J s [$W{o'moآGH&7LJ!mv} B8m[߸M rN7zuζePm췂V_>Bz]h2a].o]f:y1ܯH.歪Sܑ~̼@IIMѹs)^ \Oz}'Ǫ)b](؝!pUpͦ|x/oZ-=t&oO fP'R 뾣`>X:g9L:Rb&:9$`S̭c) aXjA_EzBZfJw3J۸6׃{T?F*ɽjrq?V mWcIz6y1;?kj10?Htdi/R5:µ{J[-D혿B͒B_B)oR' N,?Vɮz7'x*q$O论h4蛝Z 8K#U>wpQr?i~w(/+^2RQ%En7Ssa[T)?_V"sgDfԚNLr;>"U׶a5(A8f؇(-\ep@,V2{Ϩ{0߾C\n}M&<xZz@oӽٓK< ;ʄh\ Kb2LE/Vd/}A>4I1Ϭ8q&TCcJ.րz/1prg"Scg#,Ylw"_ig~U0؀W agfP<#μLD~ƫe^sūPGI~x9yc%k @XZ:q ڜYFgIc%w'@'JYf V[Gzaj yzr,UuMZd97Av8cpz}.cݑ R2hctv7uZ!5ehV̿cX&bH l胚}h)ETe6$ɋ)0C/ ?dJ0tB@jU[_գԩakؼOz/>&ߐ}5 ׺Ng|]{ ecMX -Z\ z sOoro_L^x]T˾îZ^ot+;hP%$R];l!mBcA(PٮsKLdн +xof/Qh}WH~2{u}NLQovX&e+?Iܟ7 $>JawR˴`V :_" -;-+W/6:^Nlj]=N}^IRT~$$qcIַ!fuֽӽwZ^N^-QUȔheҟ-rZټTN^#,>fwol?&G腹_ʉ+1{} }{ja ڷ>,SʊàTˮFvQ0tS)_QFI<ȤB&#MV%I-9%-J&3˵=wWNڑfrmݫ~^7~~.H\|B3H]*mϛDWd(tDM-6\53bPc}}_ӿa2f\<!{q0]OaH\4x)䝢߯Z|!ژWVŽI|ɱrW^6-y-}_T~o7ۋFc%$:yWjo_ =#{N˞@E{,NVl 7",G?ͱD)p.u%/).;RhnZ/x$3MP4q_su:"4ZouUY +o~)sl~8w+6w]{K]WW#˸Fst(^[_sTeEv7U#tTw2MX`%nBI/efN6?:h\;:5L&[&o]+z!_:Nsa^ʞ52U_ qg[٣̳#?)H[V2YgX"JM(LsFO?K:BjEuv7ܓQ>Dٸ']9 Ǚ+;LR/TϓcyOfr3uRw5ڍ)OG\gE Y>H1vip-gZy Gmڭ&̦.]yv~e%dX5Yhx9m 6Ľz484}VuUIEV&n$O=O8ȅV%kBx36#>/Y9,e9PofdںE` =,OkX];;}=< ^áewӞ{߱x+eNnlewNF~}EH UE"R$fd*]G#?tۜ7Ӄbb* $>Qy­<.:Od!ϖ[ʿ%dYg^g5:zm+b }ۣ))k 7IqÂf)或_V"?͇jVWr.9R !Mݦ(&WNlT£a6_3sgO_V{X}]\ 7͞.ʚߏ 3R3dw -0ݕ:[6qHPH& 옷9k7ѽlR.KNWNPH*ƷKU0&Uh5I>J]ԩs-S"!34WN v6 E;V|Y5ͬw_1FFnl>e?;z6f[|OrvK\gyuӇA÷27kWz cO@u\O'-Rb^}M4U}^ra8Z|(\KhF`ůsK3yV|,qzd P>[k(\Ԝm/gVj4u'RE7܂2=EKz-˥Tt6̩4 +7N[+dn蒮!ˤ]z6J/XELoe.Xp|trWy ]˚|_Lru( xf~xC?uePB'N(n#.6SrȘz.db4J\_6L9ݘ7t 30a l npG=٣FG8R$(Clu] :nYi|j0]?NUQ^J=4WVTw+Gf,9on2W!Af%eYHz,"\a̎xdcYvNo.PMj?FW6|s?![U$јSE܈k!Xu[[\݆egQ 2ֳmq5K<,L;c;ި4b<߃oFم{'N+u3*grwտjE=RNy…*/q rq5>M"IJNH^ށ\n2s m6ϟwv_F7o9$a{J/^k{ҶlkУȭ(r[p?fF۶˧b' mK2G)sf5=".8ڎ_7\سPB1wǗMzClw0X﮸%)"We̞λ4Zzsǩ7PEBهVv5{˻QULJhT(mS\^%a^XR~HA. p4CaE:TBJX.kgc3<]z\7GGUGp=yf8[(B3@-vEd(dr[ڷV:<8~^Zo7Z٦lU &~C? d?Xh]4aSŴA e1q3cCrfclkR({07n2JQåH .\X0ҦlIǜxpaqZ֢O*r1'. DmH}" ]b=-#0En,MAQO=|Vc!./Lb{!y>/:(\M40Z3y y].O-()*@UFGsh2mXVɰᖳyQ^:(g -|S]*!*W<|tq$" SN_Rv vmGZ=!$sCm}>eSwA1ss )nNg} sm*+cǫýc ;R q.|/g>c֖/%5w'sbkƪ0GGxunb.b͌ yX:nVTY}Ӵ)0dInuj#Qyu5+-sOF f`Т ~z\W\۾MFܗsp\ 騟,(V*L--;^'~X}Q;"T?gqܷd'yZ_ㆦj }9X*Cղ&ZkiZ0ʻ YtR8y0<9y .(1jr~f[J.O@ N[Ry2Hj(.44$Ine6,ᰞ6'V&%I/d &`c%9 \67뎫S@Cn/h151;Z>_~+f@[rS]Z+4Ӽ{5;i :ҳ|e } uÀtm9b̹קFwYI6TiQmu 9!\Me_pۗ UymΠƈozo_Vsn,=33/"@lb5YzΣ2U[N'*:↏S]5\2[kY2Xk-1g(a3  îE.!ᕺ\tT/wTWQ[XhI  0{|N8BȆ0n(tj|(b-߹Lh[_ěoTYϦogkʎ8s80SGC);tjtmGK`w ?V OΛG/+[}~A>-Nh˼Bl`tTN<4U:HXHEz"w>CįUJ6%=xb6OM|VxF'2S߳xo{xkYPpB(#Hc6rB$`N\uIB !=#`5x̪M;<UC)Us=)30ZuY|d9J\M[]lھ2Xj% Om9#wnt@X\~Rf+s'Qfat_A[U ]F;-kuICN_·יQ1"q koȴJ[9_i.R}}i'T *~;~2ҪOQ1I0=6-5#ǝҩzr2t`Zř}ԘAAOŹurIvJj!FSbWjMTk1 :)Vܜ'buf)^4}0g u{>^-wUA~6DUH ɁWfՏ|ەלV02.fiY/Nz˦'f{fl&vuKr H }ـaP _8k˒s;kr[?. dd &&6ʸɣ,p`[ZČ][}s8n3QQНg}<ҔܩxUY9C؈!©2>rHKH1p-7Yɒ 3nQ~˯~yX^:=7! 1zC>(ޜϐd[wR >H)7j^.6'ՇxBN!1D*>hE Kn_(#dH[GpNDߓL|H8z Wp gT&-6L@pS5]>wR6ڦAwM0yn*va01v{;(7~J/EKw>;o>&vz^}$As6'z 2Ur\:enV'$=*N$pԇ_ت76yXIv9lPZt#ki첉 FZ'y iI}񸍇x[閔XcZ60pimI_>lW&0Ҋ;U EaGl#n˺ђv^|@ZQ1@z9_K5K ik}>(=^6T后߷);@=%ήpxe~S _<gَV!ai"\\=86j6n 7`Z'6;|UxugD*SBS8qyp|a׸sNq\4mie;KYEkED'5hg1x]oLʥ5'Wmۊ_CgshsofƘ#EAV-z.;Tk:S YEac"1Zpѻ Up٩TV{ޢ`ؽp4x*`f!0+x5H,ډ=*. ,'bsDt<ǿJ3OqJΞ^:v%4c8Jyd8>sj'b'*4En.v i;c˦+׋g򟺘!wBnkU1 oiS=0pi*:bv:ʮ1Osmޡ*Qtʫ;dx*M]"uG MXQ-ct={cMKZ؟،J֣xTJg}C/ƇU5'~Ghow1$ svSW8/}z>\%91=C g1*~N#\{>v5qD,B {D`%<0  ox_G"K#x kϞlI XTzƆޔl#][Q)E}fm&nw$mr5~v#Yz:/{IkJ;%X&}`mޱŃo_T1ӻD*{tv=M^9{X%爦IK%1*W>! h{=Ny~H!%EcC$eX Wwh'rtW=a_ 2W:}x1]E&ve~3r,zT=c`SˏurkO3?\{qW:[{m2~ E5 m]`ziN8*M#:#yȅQQc\ŢS;6CV9 }LO>y͛W2>k:W@E,ěh۴9_GkW/֢tUf*n:INpc[/zr9Smky;~gc^bsHP4Fm;Sk.fdv1HcȞOpt@ecU}|tE7%76d4D~_d3gE=ceBDXYf2z#  '8aB_SJVD8߭(:1a*#ݸl :y5aB^K﷕ٌn!i]sx[SF܇a1-ubFo8|Q5&Yp%}#h]-zEc N0v-ʝRqxV@xIJ57p.8z8_Wsv!'Я9-,]e$0]ݱعRj .azjyTzo{ Ϣs?_iG/b$s 1?YcCyCMX9R QG\^K1Z;=]fp5FX5 *7ṅq?Yګi,S5d#iW] 7hbNe(n͂6csK_'A#aHdIإGM潻e3jZF:-"+wjFx6|.VJ_--xA_ G$ǧjs6;-o m|ջ7fM2€UH%g課+gD`g 2^Ɖ h≩# ̢nndSjEU&\waGOndjZ>5ÍG!i}1PCFNzq.X3dř0$.hx՞b{n4x{DS9]TD=qCq)>'2=Ul:dm{MeP.'Q!-d~_^%WO\Ic~ ^1cW*(Dz^uGݷmH~Vrܝo6d.rzRYbW`7egFnoޫe]gK#NtQdD:)s'YLR֟9f6  Y+/gjGY5Gf ((C "IQ] 3ɛ~2 6,J|: >H2InQ 'yjuA n>HNM1&gX&y\pktlѼT*D9+ŅW`T3*o #!⪝ LRt,aI_-6V2Cj`44;Ʈ㠼i?E/'(z]s.Z]Fe2Q}j)\Yj,׈|9zV'|]ֿ y}5yarRJ \׉bU6e?= qtJ=ܒT,[CB,¶5dX{VBv]3u<6W5O(\a~{D5t% gnNa/Lf9'3w{X6%[7YXzo%r]l{ujG-,{_(hu>\;GӃ~9 $w[2gWO&՟'!a@+[BƆ]0%hyu/s pzwͼYmZ,ȼ{RXaF2Y.6%B٣6u_ή3vܛkutpj"P5%mF6`E\ ¢=F/A_طnwv+4NSGVG-(={m GZ5"뢋yuFRހz ںo^kvgì{*c##D˭eEx;=~Fa١x"g{Ϋ36zUI|Rʞ#~ͩy-&хjoSxC1LϝܑQO`"V: T_Z1ˍNHu9b[qĶ#4C\2z/ ўvZe{1u: yz/+q9p.7FDNt8y9 @mWV+ q\TTs Z UӬ5mUsS/6&hE.u`cX'N%|C36QS`i.XuqW`rϮ1%Jli.M!P@JOg?u%@~ܗ˾y*u/q:zk63mZտе=ucnpGbe/[/2ݩ6ֻ6[X"ݔ6\rѴc =kČ9!哇*v`OkTqYj{dj3}|T9F0FIsHBr3v i?&\mV*Sޗs*[,A?urd~[ԑWj渨s\ h-J>{ &OrQQz };Νf_2wUCȪ>EK+vb &~3E<sMDu6?%'aʲ삾OyV1CdTTF5ARzcr$WCj-QdnۭoG\jl_(s~Nɔz#f0МfvZfH^QNJ;9Nȟ3do+TIիg\ 2;϶&h9Db:ТaAQE^`0 ih+M^*z\Sǩ#:GNB{yR kp-OfƇF^Ӝ\j7e}_$|/֋PI Dȝ\EQ'J3tʹE rڗ R9>ks_kvDK컻tP|&f0Zyɗ_z˕R=<]̹p>TW\jRRD IZ¤w<%\HMO`Yy'WZ#_Q#/Ӎ8t3CRGS txZtCA2^OIJ]lUz%CbW~T(tL{݃ybIa'z;!΂b =ޜz5k1`T~'C*H0"4_!ڎ|MA⫭q={iИy7S1,X"SQ-݄b&k7̹D%W^ۦ2`[+M26r6V)Cz~W{3 0>']*4Q|l4zXSsrkA+bb YKMVy11 +CWLf1 (4WW>aciz;op+ߠ`n94gx*1Ϗ_he4:U@[->pt-QIKSw7u2WE#Q϶L%.g?. ?CMR+ v6J&X%V_HƩ :m1~%*mҶLo~=fh7lfUFq 7w(>J ]jJp0s]O, EhlK~Sm451?HT=L>#{T+UPhVjWbu'˶{?63Guftn!lsb$48 +5=N`?,S6=DͻR/U/w $v,t|+5w5Cq5[@U[fFn\orQjp U%RX Mykz?MA?2{w)PpHs%h.F~QuC.OiOa|2uZ~f2+Rwp0sx1I@_"V[|~57,maӚ.8 Ay׻W8&籙ohീz'Fas]'MU5Oode?g؝o~괝rp[޿ش``5,)qz߆bPsC}JIgoo?25tV*?ZқǢLf/N 79[N￿B*YG9o+DF8$0nJC!vY=YMxs|~Z-Swg-gl1_so]fFݩe?e[9_CPU h]~j 5쥐`$țv7*'Pxj󸩩jzuBJEnn6lW8ټt\TZxv_}RñXʪKҜVhlTnL=X6qW2p,y]Od!xu a n4pH'""!HpqI:t\Liɼe,vԅyiZJyVq"eU?/ [֒p5d^EͤϽcx!h;k4m2\՝;o"|[7 LgǪܦb[lEWn,[75e,^cu`TR^Rio-:L/ؔ-z}u^l)z[:X=lS5zQy}H;!bm>b9 A%֣VlкWe$S}\WZL6[{H<§z_=w0_5g 5;PѸ )ݗ}Sxf$;*\"x֗|Aob SGigS&lU;&n0uzl13qsʛ rGjG$M85~gTZoC=q v2CaYpZfqƫn>}ڛ=~zкw.Śaau=q3sl6J [Y7UR]lJ)  >l1 v/}+g>6]TƄ#zd>{XUnZM[HOaY'_F qUjZ@P\RwǨd Y$j+<8qe XUՅc䟘TЙĤQEԘ#Vt4YZ4z!r?rc[ۯa[{{Rˣdf${tUL5ט m/G)Y&1t?ΎԫTS3!l~M7Y'dn"q^l5+mٸ,k1u`lPXjFHgzRk]}kk݅\j>W$x_gZ l2"Jb#'f-v[B!^.Ͻx=n%@֦ܵnc:_EGh)aǾv*Īcs`mӦ{<]辶=YSz)j7J OnxiYXv DG z=hjhQj+`vbUQ(ߦƛ(Rp4&&S 郉~9dk0 HԢ>Z.3k$2meA7q6%sMCPa8x,-tlNN)dh9cW@ ,W-2ΫcXs> ZQ7UfiY!Qڨ;5娡ڈ=m޷ߚXb,M˴H/xLUxP6#-42Gzi&9GVA}@$,w7(KZ fwد:a/8Y1An|6lFQoz-1Ӊʃ.Z{R&:f45)[ b~EzfySIL+o.=#|BjS$%ek9uh\nA^ 6ֳYg'b|iq*^~e0?< K0p&`l6- ]D8e|8ߗӴ!tC.& Sbtv"㮺M?&b/K; {͔JRyE9弼<`>1L;| ~r А߈bAyýԙ,|j 8/^%@5ɧt4ЬBGVqy#AoGت6ɨ<yrj:^̈ rw9-ǃƞ}o#.B7tZn!^^j/Pd4~s pdՆnc d_i|K{sy,hk!#˥-D[\ .^&Zf~D& :C_PPj; -VB!nG;;g]D?G YagXP Kaa]NUҴ TE$a&$2>ާx\;WG/D1$X$,U,Eyw}hg_Y5Px.2n P#DӝD  >Y܍$׻uߘܓ2 }:6!ս-h~pb2J:IRD/h68!Z v+F1܊MaơnB 2$2 N4 m}TIEq?>0!U.uͭf@b,$ \"xC(Φ!7k\RKd_B:'U u;yGוnfiiqi=0s\Ox͜^f$r9!qZK0q M? ~\^(!Iri1ZF@X( @?2<ͅB4b\U@+>KM8q@k3` 4os8ޑ WH<ljSo{r/b~O$n}VUZteL1"J)?A/Py UdW{[7$H>\sW+9imʾjR<ưȿ0]/~M {v\&@d1hIw1VJ<ׁ6 qSt3X*xY&eQK@qߍQO!ɂ0[<`+K[MebU?Cl(3[GC;fj*gC&9g `O"{J5H?vT,j}~j,>SL.%[\6eY v/fO5*K&`_~cCt̽xMOITeŴZG^s3Yas>)Y~"Vq?ɭoc3ldȕ}}e"TR ^goa\~gL:̣S~QZT\-?Vvu)|m|ÐHwgyA<;[u̿.gw`n'uvmY}~0R&(hud{xޝE<9lt:/>SŋbLY$]2Gۍ]OSN_$ ]=Hus|Ӝ-a|7!=@`e;׹M1.bQ@_`;UJɷX|Pl$ܯWn~ o7۵<V" ]v)i_;c}Ĥ ȅp_hO)<:܅Y0/e9 U5II:E:J^hBKk&p#HZsM9<9*eea[g`Sf{pеIkW!Vpޏ[`[d4I`v#t (#SRfD%}k 5R#K?de~Y=.6#)C;i9E9>/1Aß4>9(rAo,TjYs.7^fYY4MKإ=]UK~?kP^V{][OwiH:?5I~2dԢa==>k++1O޳L`pR+zط$xeC\Ws!Wco\R¨6e0Om5$tG @"#i%q?&Uo_I5p1bMvk^mK렬$3P>,LM]o힫ȻbC{Z&&arL~.\`M$xTmY6S &4G5$WX޹HYV KՊ?{m cuY۪(8 \@!Yjṷ̈hg39MgUbuy#|H=lu bUl2:HsYDl߶ReAmvuݢįpOO]kd< 3~CL}uTTblp < 5t {TmwsÄYCEţ+J̈h6:]5x^!O}ZJE:c!\0 \Gk:mnljuR1aYʧ@l)rLטk'G+ٕiVg|Tk)B,.@qk},nXݕt*gK]ja8O(|n$U{#͸> LYSŵYд;cN>^}\+S|X\w9ɔ ![;ɇ@:c»ƽhͣD"3|2"">w\|fLRG(ML@nՃtnƮi҇0 wU"KжՑ_i+&/>D zy0>dSIW=8v+@Gp:#?l<*׊v[H1 MC\4Ww=#Cߵ7OK$>Qnȍ7[%ol9P{V;Ve&=͂b@Rꔂs;L|HWEclDhIx0^AgQW:\LR?6_q"p40{ q: 8܀DVN߲A=+<\yYX ) {Rv `DlY:"#%MIgܾ߰}eyژ5+z(e0Ӯ Xz|Xf/J; S$UJ|sP:G7@YAwMݱPcn Ph> a|GmɊ -b.K(rᘙBiieFX~|FW,Yx0ot!tiM(S ͰݟA@ϝ&;Xxv&:Am;fD+.m)g! `z{kn*b-}<%fr5j~JQ^ ذ9oL: xM=x<*u|v2ܮLFMWa@ &1yqK@neXž XĬY-F`Ʉ1C1uB[>t ˖N6.ТQ[4%6S#^bshEU*4yL 2߄SV3䚡= L 9] Dy NAPy^8Ldd젯3} ߙJm;# FLHp}TGs-"G,~!]C{?(cbzR$_ w=ifPWs'}zG u`V(E;m2 NIV E2M]&C$z16ܔ MtژgRI ʺjځ-/Z Ӯq^v"7Z?>:OH#Y}\̜r${uq7ᡟ`j.^ iת`67?ʿ;Bzb Zঀmݺbaic"]H"PV?u,ח&"t/!+g3G"o["awMK-Y}`#N= %  D #0)B1Ppck4O.[Abg?ND7t,*sOL|R>",SRD׋v Y eK O|K;+xL-_jh߰Ѯ0 O<0$NdoDv62~X/InJH y9r k닋8%HG}0J.Yޠ;6H ڌ)2*RV~vї``X= s8Iiw]whu㢪>άAt$Txg(|ilK);âd &d$1&oA0:d2 cfRtϠ e 5n5o :T-qeJZ1W}F"˿nPa?_+6NЍf.3ת۫ oN %i~ydka ՐBp4LCiާ!M"żep;Bw3 9z@* ᜬך)۔% /Hvp%_ ^HrqtC{?y&o- sl'ǛUL`NFCeI4 e)6#"$:I}MlEyg`U}Z"@Zo{A@9 Hcḧ<Enn 8:!)HD@f&;8̂{>oꠒ^`c %?oQaDk됽oI/Iy_$p[PQnFr77p\~:p3%I~g0YH*Y0kr= 8 D!$xZx]~z1FY>;--Iǣޚ޲)]zm>?o/FWue'SĀiTCAm}1[õŘQLX %Mg5"F %uN܈DkΪ= tGy'dLJ_`pk,Tm7܁Ήv[TtAI6vP5}ŵ9%m.nʼwvnU$WϾ%zD6:[_ >Vxn3[ď5~-J_ nщ;J w;1h[p n$$.tw# ~຃P%iuy(KoE. 9%Gh@{r=x 4"y^,ķ=hzˤp>+˿tp8+SJapmx:tcp2 /I,iL#B.7L. p#TSh/X"!86Z(a~2V 4_ч`ivGY[XKmHr}A 1Y /` \p9r& Ob$˪Ozj'0+g(áFVD<$ݯx0-ʃ06 IyjS F@nxsLc?PH9[+afёFv"ƀG5Ufl,!sEk2Z``" rz\n,CMmC)ώE݆ceh1<#|h u|QJ Xc!RԜ=Pƿ"fKHdD`u`W(>Z\3W#h9D>Z2e".:XG9h0.f.*|?VF $њ:5k V $MA?0h!sYٗ<8uڈ\,Y%Zd3!qfLn @ @CC*~ 3Mɰ H۴!!:g 6VX)0#˺Uc oY; hXb=nTڅԳ^0q4 :g#xa7Ϝ`jH) 1mC %ϼިn[ Yn7X]|'t0cpn_@ňO6P!:] Nk%E,8Hk j:ظ} r0kJERN39-jVӛ#ҫD)Gd˯yk?<:0~տO@٪Ҵq'%UK펚SA%d*E03Z,%4;]؆t+a{]5=3D/f+_T$v ޏh5tv3˪^8YAB g~+}m*Vc / 12.L%M_VG3EJUY6SIM8փ+M!4u!zR>sjJ>7–z?}|Wuy5j % 8w\<Rx`..8Cu?qREvvg3ÿ§H06¤jJF]sboV4臢)sW:OTK)ilz)5f($X8([b5Ht]Y]I=p%C8O`u:cuSHu7vMgݔ)ߴ6 qƼK]9km 5P+K ďj@Zo2W#A8l?OIU(99!;[%f l" 8CY%gsDBuH ő5~`B܃bXV X9UtGR4b?5\pܻIfC|PIL& CLl.@끵)cC@{.7n G]}iH +RAX^&9xg$` $Hx{‰ʣ";v,l7+oU1i:v5xd3?,]P; m!è9 PF[/0>2}!AcKpGZ]3O(ĠLY(iEZ?p c +^!V c]FHv!T/~_= @kzeT'50Qϻnjуᒛ"E[$LsS1SJX̓K7~ޗxom>As*Hrd?GpD=[pow?⨹( M]pS^PU?ALN)p>X\,FrK+lyHe"ߗRO7,}/ JhH &۾`;{ajqNCFpMT/U5^澀!v(0Ǵm1(ɾ}r<"^* P$JBx2>UH [9$ OKDMݒ*YLAc Z CZ( 4x;b/AX>>P(qn<eqK KЁp ~HF|yJUn* !lbND\Lo$ qYZi^ri=Sz+9 |s,%^'1`s-iXBsPZ[_RFaXb=Rx]H`lo$P^+da\ڽBX`31PqP%3> ʒ1$G(c1@mF@%ӁaUc HMf@mӇsQ f[+sUơQ7Fzfh%0f@HкߖT:#ՒpD>\&"<J/2,RIMH̑#1Cw2g#ʀw$)щD Ay}08NTZsr \#H|ࡘAiad(ՄńNc8MmE?IR !cCN)]=c@5P:~d= !^Sg$(P L=z N@Τ&4̃A#Ui$<( C8$!'m`V60amtc3^Px=8C)gX5:h@hHB*!㐻FAe~%j`l,_:|ݒqAFق`du G'@cZqln`{]ԏ=vY1cG /˷Fe WuKcsU(հsxӇ&0uI=r(LH `CG|m/z!M7yk N{h \c9~^pS dl:^煃Il@/zؒ3lFo}{K){SJe/^$w]tB6EI%b}qog}HX\4)vv%WT$UĜOW{BdRW ŷ94F& Ll~nLasPtB;ZCDBn`>VF`7/͒yOlMK N0}2YZcŲ NI2M9 50 (TuS҅%f앧@٣F?_ҰyJVb L30^µ5,aڼxpmp<<O&lXKZh:}yZzYN3%҈n YÌB܎b&i){H 43f9"wD/U4 GmIr% βNO+D?LgwYMI&2 \LȗqK6GCz=nq A93& k[@ 6asR^eþP0ua(5E`t!T=PQ@'@$+*;YO1HD$I .3E\JR) E"V mrLP+K%էL˩ }Qk_k08n BQVEL>"nwHUitՓ hAi3ÞߙH_²9Z/ڬ3a~.*e1c_W+*wXi[Οz4NM5{Êr÷Q@QuaF-jݱ`B{4ipOB0x?GuFz4p|!^t$U4Mz$JI})/ GZpuV89G%Ĉ60& ,hqP,\a \]^jr*U12N  OP-F@h;zS><%T56il =쑵"p܈Z##f6}V{k@ZU/_<RЩhN8&T~8 B?6uc5)n(zVPTen,(*lb KE$B HW\A[G!۰HT.D"iP@2$$Bo( EБ+40fB;R,)!|MIֈ `7P~" ;'LfJD /2E,3yHTpx)- 1FZ'OxX?U'w'O0K8g[@AjzZ x,1=G[=cs 2󱲇/*i!w4E/1j`L;*:!aIx|3<8VhA23=usͻZ=҄CPd6 $e%9ϝ 2ka \E:5fdŞMOMY Gˀu9rJ Q9 bAt_0 Bh6ʉJwG*he0윹x #~:R`Z^]}Mſ˚=XAl5Ůl pdf=DeʕU =( bT&-+R` pPp $ZaZ/qL@@ "  ϞI{CobрL<#3_>h!R9(RiKgwOysABDd{  Ay .!G]p@&xbH0 \>aS9!A:QO}.t-f': ܁79 H EkpO1c~y+9Hp ^ L]()$(` Vz$o:0wYh@RHG%8=?YUעW_3˜ 23ՃĔ' FHA_%O7R A> K4te xp R@DB@7m4 <@GEtLJZ;@f`|)`y)PCBU, _$[c~4y8 bN :=zPh2.rLBD?j;l˧"`aCOa]$*sD_̂^ W,w0GH!P 4S O"D· }_2N77~uSM`85ڎ\@릺:p/N_Sِ. YI u y }AȀ 8k4o̒$0:+L=dt1sB8$(ktؚV'xa7|uPe,tt*!S;A@$$ $hNfߗ]FK Bt'dX#Be>h U$3ec$r5 $Hrt?HYBEfûh@< CSkOZJyQB@yW2Bf tvLX KLzQZ`$[g`{dX<$dH$HHsZl^|AxBKeg%> iF6p3W!Y$BC@m~>>}k 2΋$T]GvŅU :p|P{i.$݂A }MB;Ё+"S<* 9buJ]@6GooQ}M!Q!Byc|@ o^cw+`vd]+B'y`\J5k$m`L {HLcՙ {㦳q870|桼eKbѡ[`8N7~AyVE:pbsռ8ƃp:R/x/%2ƂI#0uin4RR sԈ6>d$:z4,fH ݲsFۭC߃T#@O*phɰN vvm_6[bcn^]MωZd:Ayx/yq-m ) MSL@t}l2wRa gk _$x6H:쌯sIoO!hwUNJD 1|YӖ9$H{rcWtI7jRpXSQxm]#a#e:\m:GCH#j>mB c'0"r fVU&44[vcWbg \*s[qK1rYt QQIi"u sHw].\W,tlvR^pa E2!Z%<RdN(,UЮA ٵ߽;LD1 $|*!,a:4 f33 ;g+&Q"Òft}$HNVJKڽ(/2[xPzp+{H ƞc]a,;'dё#gtӝwט0"7q|h{81ne|Y< rйFd>}Y`%j?v9ad|4`dPc|ju;Qp@S=ng@S?#$C4D$=H5C9;h}J}.[€x8qHKThGQp7 Vql0BlF LqXw]FFD XRc<X Ɋ]}Y_Ժ(" hB'A B۟q [>֮m'@b6 Zݨƀ*%s>goi^^ k,6=~A 3\K$FNEJ^? 0POڃ^: ߪq-dg{G_X^-0޳+Z/JoU h_QiFwzVWi~БX (Dt_"nɳ8׀|{xg|B!Al6 A$ɭydk {}ۮ:N6C_%/\ :; =wW]l)[u#% v8̦hSgm%2|SÒQ㽽q0G[! 4sq60xlB7r/𛙰 K/G,n/ڙIxofZl1fh&50`b{pIS%ґ]>dpK1KY݇ΫwRGL7B+94#8Ō(@Y:Atߏ=pkAV&ۃH;vesqMA!gpAahuJ<&9yYܛxrֱۛO;3~eWǦ<ʊz / 'r$Y#XVqe L[=`œnݘirb$hBz~h@u@-҅q0d;qs7"H4H6WѤ/0f4`:%X{@}0u㶄`#]܏~ Pgiq}F8Woc=\?PJ=u{HjYV!*܏j0$w︽[N곷ĠfO ZǡmJ!ӓ[W3|PP g' sOgР:rp$'- I,y&оSHЩ3@ n RJBÏq`knpfhoWuG[烣 @iØJnځҾɯ.[7"Pnè~.mb(H>$0ay΁t5C 9_gLyVjL5n+LFԺVHFe<gǩ˫T]Z{1wiJq8Sk .T8jvL&ڽ) LzŪzvHt (`K/㾱U!EM}IW#?o8\7ͅ AA^ERwlUn-z`ZAn9K怮sJ|Հ<yd/RBn>KŨ[6&cgt͇" ''2.aT - "_\mأzy3Ju:bXE_C^QUf2t[KEiG#-q:-]mJFOJtZ q $1[f=oV{꼠)i|.L1\kZ*@T cR=ElnoԚ5ʚJV1;f+5.]b,wZ:ώ! E.:N.lף5].SZ"ܴ5.K׃˜ж.{mdMCZ?qۻF`夿_1,1%3W(I;)-= QuI})t5L,n e䩓9?BՁv@-P|02c7ǎ<籗,`zËOrS/U/gu_9d?K8 I-Ƀ$*/f,\$B`m?K\ > qݲmpigX6J /A;Mi=^hf ĐY]vHXn{.a%i͌o r 1!叝wuK"BM&/SS9A0ȸjY ɄFKJ!P9._zd=z11q}y'Ԟ"Ӥo9f.x`P @jBrm EB=[H>,mgnQ5r89=~33$PXE\9ݞEci`+z-fdQ@"cwxXluFk  x|o$x><S\|+EerفD lو E^Qe7R9ńq:buZ*Wwwg cH>4rv~>4yV0r=\l 0_~ƾK.#J Oh_^d `>`_֑xoVB4]"34k4 Ѕ\xe5T[*2N\|s#2SUo]+o<73y2LrۓP;RE#a|K1_GJ;-}qМ ǑCa!mp"Pɽc|4d:NV26Ah٧ ) qny ՚t1f5RM_8xJgAHsꩺ('թ[NgCt*Yz^WjpO~+kMpEiQ('Q;GuHPv0gx.m/nn?G.@I99pXIG/J?*/y*>ye-oŠz)r+h'uRw=O q7=3 1`Ykp?*@׀7kS#Dܪگ9ꦱ $=qie,T,xvTrdA//=vTd~0;n3A$0>IO-eG9)JdSuyv`1S  7*W_c+hp"oeKT^xKa%u>yM=b3K(3baӻpsܷPBnϡ r2UXkc[ w@aj$ڑsŮK]fD=;dl\H8m0?X;0i 87m8) 7yދM`͐O0.\2SuF<( 5bG6־謇ޥY}ogJO;rdU*,sȩr6À*"@yDF("8B ^<9Yul1X9A{  ?1Y߶ag= [ C4;$y@PQy(0]gfrTgA}x'TEOs~dK5We/Z$ FJ@a8*\3kܯ xv㍕ :ί-hp|[7?]KvHybp0JHek ;Cߵ[B!J9N&KA3KZ8vYÈɝާYyҼ\\$y4=}AP.*xEtv@9B?rMSusLw^g˅OiJQ@FAw>Aorb/E2uPٺd[~b8;3Q )Xvok83e/ | \0}[>q]5Syz A hB/W hbq֎6I쫺Q>#8mE(k SrCJ zrS/c[d7# /뗭kZ8YBךHq2-=Yvv# C袡b 62׫L]MBa$l?͕pzhNϴҜ-z>?Pʆs.M@ u@P,fsJx jbֻ{'?jᣳ^S EQVi$c{-,1 ѡ )DvK )TTׄO4)o M=/Dv@qs*k6u) 2z^ws|% K4о2dA)\u(('zm#i3kwT & p 5T r?]1ܱ)LF>K- DP:ek5f̍Sf@ G'nL2DK sT&bCzB⟟f0cג`NI:ۉ=; Wzrߗehq{7*ؘe,{c-*pŪ./nFI!Q(;nBd{ 0,_&\iL8>]\Y(bDƈt?Rg@Jj_8c0>V!p̙:Wi<`2NP}5(C)}:>~}ǿ4,dO5^0sj)Yac ŏl*Hhj S8@5_8CuC/ V,}̖Ο܇+Dj29ig$nPc:Cm'ճ'T&A.Rfv]`K س/0]v"]DJ|/fہ}v4xI냫L/gT"agͿ#m1=,ǙhzxhmQXV1%Stau±yNը>%q8h+Mt{GV.C4*'}Vu-Q]Xp=dsQ'=4C:!>G'#;@R3Ɓa+2[~=v$̘[~-2]f[ߠ} [0;%3mCZǚ&_PWjr8?뗩0IVx^ӫrn׳]-W|P.8Li=p7-6<,RɁc~ XѺWMß07{(K (JJ66nt!mZWQ:A 1FҢc<y#=-^[p>{ tZ m HgՋF:06=:\vPpc_՞I\SшxH fj٥lcz Adu+jYL|HG kc[6E L<,mO@Z%+Pδ_zisVoݜ xYK"?xmP;(~#E\Kim`h髫 ?+ﺚCc%u0 Ӌ0~ F?!Ӡث 9~=_={!Kpq)ZIӕ a4vM-+~zv~*t;CT͐yzdF7HZ?D?p 軨/ &`D(Pe7MG C`e8~>»rcւ+u4 (k dz0'GL'^H;Bs&t$ w>];ahbof[s:j6/bCX_ tGcV%~<(j!_Ob&0b^4(=Oo U" hS˂uƱr:&?NDwICXؼ==/?H.M6|x)8\>c%lo8'y}W] Ɂ˓$QlZ _*0muFq@{|{p!%XcM۴Pۂ&DI}H"j3LW$>Ļ(%X:8=;WwQkmc/)+K){)TlzڟvXs$7XM'O<-wּ-0a+LNeһ*~Gz>ْqEy)B|@0:T#G40Т ͓=<ʭP&Y+֭E=gϓ^JM%$Ժz!7U iv{ߟ/` ^jw.q;j6T{? 3~@6 \Q׮A$Kf@v?2/^{ @42'˵8c_ [UDŭs6d}U#>'G Uj}<pa@.xm+DdPGW0:x!sӁ9`<{z o#Ά7CI뽷QD@9蹨@C)𓨳3 {& Ry3Af 6 U)Ncqk |d_¼<ۏ*)5fV=g [I qNts. EHkt`o!0 IsǢ.-14EYQ aR>,d-.-I4+PDŽ@ c=aS!ue3I89}Gi7K^g QbJ:0 3P]pU̖ lL6yJ_n`4ZH .Ћr G:pyxgE῁wĔ4 H6W^O;mG|e[2@3Tc" 5+8#| "H_:2&zC"ţ7>y\γ$ 1RvCM?'=/[3 :yZ<*u`sayTB s@&#8Ypkn,Y)` o!CGnJ:D )Y4}v ~2qlŃ%8A77Òg?Msh+zŸIl1q4PlY?U{U 7=&U<]fϏ  2Xa2me<^sw!90 ڐ7BBpK~iløDf0A " ؜=[7Jn4j\Q׊Au, 0 FS[V U:-C؂'\5sUSLc32ҁ @؄o<uY9 ,TuM K%1f<*f?Ⱦµ=Ky6zR7jpXMO1 iiV` Jj ~[Ec4PALM0v{5j${ SK"rzzY׺i+@n457᠅[y|wg?&nFYhI#ےߤ.cQ J_q KM.x`N/gu܅Lz^tY>CոyJ=Bt-DG1[Z=IƎe^]!a4IaKOa zMF/D1e\ߏo*gx@)4@,L&!0Z1Ψ\-Q?=}b#r5V n,t '^Y>f7Ȗn@zY:ǁJ|!p]c;sRkrCc8avN0JW|0,+51'?˨'#0(Y~M )"Ygynr{>3_{3xO `z?/p0xoQM>Ő{Zwsa5X{/XVͼ5 + ~.Uf7HN;v#f1޲x}!T߁y}`sCbbV:t;snG-\7 'Q(!7HN=.|U?~~S]5Yj wVG0ұ UNӨ!0n+VN2BQtG;7-c 4P`ɕP͐`kk'X7gAúU:JG I{2uDc}\w\߹+ o=n-ݴP?jl޴9}+Y_|P9**DSϷq(BVKYo5_䖮r]WqW'U1SD7uڮ8Kݔ0ho0N_%/G+u4F] [z"u~Eߚɛ=,_r"ߤ8>v0լU sd@w0j݉ΏF~ISX_|7UgOoAJ?xt+4O|}kz5J{S,,L }ǚKN#a&$iVXA'R5&`VkoEANFWjH82|R4r&Ye9U =.F}kFn6M-!(7!lk;>IȽnmrKو ޝ ?_7[S!r4=yZk6ˊ:N%l-мvWҐn0ud~`p: m@X/vYTь]OxxXZg9C]*v;s@@ڀ9jom?AZ*6P$Â6imrS k nW^}NfY{Dʂ? \g}z D rEѴpyzzv|nvySX`GӰ;v(2Pqɑ7LWCDj ; Q[k@'Job$.HLɀ $)%D@fǬ]lTKbtWFCDC_1ss94]#+t̊7< o)zx OfN{v_Y2= ]06!kLũ8ߊ";JC.o\.H_IF hu_:8I"c9#CV¿cM" @u2ii֠PD M*m?+f]zQܣ 1?Imi)ӸdIbI`\o<+uǍBYK5~}s,gtqaSknmb țbaEcb MӕW!0q7q$N=*^sE̊\A @m0Mh ݜuEOɎ5x % l֕ll~  ޹S#}HP1ˁ *`[sHpD f9L Ô5C]BO\)0vr_`氷;@#5 F.ybBrj> ÑCR})ƴqD0 j];GAt<= qka~p?w/ׁa~Ę @}@(c'i_ݚYXh}#IG?+o,DRo KnY7~շ6:><͖fAץ+YOVXSLИ?nN"3'*0r{ L?'{Tu2!qwQ5 Q [oJ󇳅l}o 7x`~+v{XveG-AE0$g3 ; ໫m(T#JѿW1UU /uytOdbc8 kTt rr4 oVCR} c9^ ,1~8y6ͬ̐◲Տ c|BxMT'$F "W[)Vbmխ/y1~oszjظE2AҬX|9([wOh3\4>ꥊa?s?h;oss,_W3!_WH(F둺rC-gRXm(O96w~$QT-ߗZc-e7KA)KN8+s<+@CƭF]F"BEXz (zo ؖsn{(hqޗ޺ -afPȑٻ/K}K𓽨;!0fޏ hDꪙHUKt#+FBBSi롁Cck᪃YAi|/D<>۟r%Nl'1uhtL7A,&wpn93 J+I }.?U_#Dp|`t׋/⽛@E(wMK<~\ڂújH,E?= 87>I! {IfqwM_3!sX_xXnVŽ{Ȥ,Av(pG QՕNM\6Wf$<m!<_CN%#O<ֶ5V{TIpR!`C;/0Ӑ6d/}7RB:ccy3D`_Km؄\S,,>UWQ,ih}*z@^΀,^lN'mG'pF Jq*8m#{mROL+w=Z/~@i\IyYߨQ?7cdr~7I<㿓 {|Wn6^{IGKW30mc7y!vQ?&R%,7}ޗ-j+- ̈^ 3 s5 yFҩA5B#ËJzg{.suK+ԣp+U:[s]ئH> '@`vp;-4]O~㸣MD#~`~EDַhC!<)0; e \v/Q*E}3Y'Z-y>W&"3s5\)i5etjt,<|_ʸ U@7ޡa{(#\IP@o._&.8Zkg XmkH X&Sy'>[J. PX7lOug;8P;(2zX8byTbwd>yD9 FwC@$Bt?4+6˺0:K(hw?!G@x; DY&fG)`L"8iT\ 4|Y:Q 9ݘbJ{VzH *qw9|S!->M;<7r`::^x y!f4Ծ֐"%4MΪ¾\Dl'sH2$y\/ ހ A~V3)xay5<_[pK*М sJqt׃cf#0ngӅ Ly^vE 0pf^&8 /wsl]vLsՏǘtSC{=Цq[Ɂq\gw~o0_/:4lH]pnAqk" j 8 kc-֮NHmFZAE{AG/tD-$f 6db/:ֈw3}^?,Ň|X8/(h>c*_BXZ`#^R2rXog]\ WhSHܾL~y㳞}:߆vlox$Pۓ $z}>C2b$_:ϛ9է3o;kscRT0:&^ҟey#`:Maۃ2=Joj<&Ce1 #c7_89$zD: |oyBnm+xQ>6z|m U rfB8>%EHٖ2PxA85z:x;!_#c4uGxԙ0-B ` @D $+k4$(h d C? _o컿ԏ+tg;r40iz3tC<\k+%ᷯ%x5UBr"P7y`o ,-#0o3[#J LJ>iA?}p~?Y:30;;*jnK &'=ߪHՒ*5"F+|Ĝ{6<.տB!eav4sJ{>''D4ҠQąӞ?$P$sQR)/nK OTCBY@|d'O+ kF'*h SWO@L)aߕ]LSvOCrUWd)FM7Uت3yx(ylIh3uv\Y`> 𤇉 |b6cU:/+Y񻋽wוxMB`ZchAޠ*V"I<ɲY[ ;NNfM˒0_6fbOq ׬'O<ʚ/Zw(X2^7omm@"wNDCEoTfU`[FFA! cnF+"5yQbDTjӔMe9TW "s&GrYO͸eV΢4=q8_u<Ƥg M<0gc(X^0rw󹾊p4w[)vwT(v'(lA {n<k\Yx#$ YFM̤[NGvT>7|t8KH< `6]!Qw$^ l9J'.󙵿)'OIhǦ{_$k!R3x,H _bo+IaMJ=okޏ}KLV{l^#М!McV,/ R Vy}vMg ]i/EQB$9\lLCd}p5U[eJ ^fEQ3Ko;^8Qpr+p{̡ ?>$:=FHOD9Uώi]hvx,Amlz*8A0"*Nqu,vuTTRHM{ќ!V퐄N*PSkXEHd?%#+sЁLMn< ~ Ķ}+k%!y|r(ГuŻ5N &!"S ύۉ'&tunb9*Gs~gA]=6wkၶ}wɘ(D/@y rϤm8Ӹac(6X1y )1h7Xx_jg$ȞS=C-..{cek+^MvlHMkgK8ƂQh2K7BySvUNjG/6Ӛ{S5MֈKхH_DeZY-O |At,t âv-o϶3ZR>`+Psj~Xi /9asÀV 1Hsq涡G % gx&B@IK`v@M  3aκBI{ =勉o0 C%NQ6ԉ@)@$ ]wc-D%,%)$N!(@@k@,,4BBdHl Hق t|C'*^Vm q SxiՓ>?5>^Pm}{WY?`f{خhݱmNsAz(V0~TQW+kM_TD_]p-ǐV*~zbFZAj9)+_b؃^Lp=ߜ>c 6= 7U\' %} 19 zÐtSQoXm~YEj@aMc9bLch$'WU!R| 8?| ?*ab\{[f?{>giL  |B;Qh@8Md/TPWzleʼ8Mg0DF*͇4PIEeױ #s='4qr.P=cbh$ .ZUѧkJ|& 'H/2XfldVvr'& CoC2Q-zC6?HmLWML4@J,Wʫ$Y@i={]z(raڇGoDsUu&*̇Wl1|%hpjzDy~DZ3n\t<}BuBf]sN; /wPZI22Tx=B{fu?F `Cm @ tzg #)/GjkyFvJe[{tW P<pǃkjzy? )g^TrSuS*|?M=4sPn;ecqU1~*hPjF`C^18wlP^"oH\OvԆUo!bAu԰_h$ƌ%e[eD"E>5f8ƍjM()H r$0xp9< .܁?0/(jnFʔѴZA`.>XBsM!q!s0R`pҺ렠Fӆb;fE/'r mDoL- 'pL_(@;N1cMp-̧.CCtORQWUs-e c4z);`1lGiR^&=1 Ŧp^׼Lr672@YXjGѳ\$t S +>_SB3L*܍u]Ftl,+N֫!7`q}EQ_λr=XbҘ b)v`!3v$[d aFWgéYzUW܆8h%P6i\Ubx>%?>^3_HK;h Vy_ثU-]@>S }lPQJ\*24ZCMpi`u7j߁phfmSuc1AH1i|*Qz%)L;y Ѓ' ^g '(KK}HXP̉.)FQM oγV?̌xKK8;7$̡}H!/=xA /p>* lVr3[%pc0< MJ~AU\W\Rq B@55] d? 4Y aW@B8Rw`u/BԾ6t cd$At}o]RZ\9t͠Η;f<}v/xm3EFKt{^+ubUIQIYv5l!,MZfAr^>\9L;w|{rr8-,^FAX!}W<^홈u&c_HL差d*B \ּOӒ@taj'WzUJo]=xOfOSQ<:no{`v0N z'm@ 2U)(.KʼnUw KYcdԧ U41DC[ҕT"^~bP9A)xJlQa1Xk54їb\L}f[ 1gei]7 vy F:-!J*0)Mtf"Tۣ &K:ʆ7$Ȁs3)('q_og..u-@*/^euKRǎn9Q.ox" /{y۟x*M6![pyi- Xljd0@Ir@q[Î臦uE4;#"M=І .g lh;pwl:{qC *Nr/1Pl|xoTQHSJK,Ý2Ƕɏ {S3c’8v {SN\v|>޼f1Z k7YԻ *=xɏ(p" "/lX[2P wlEϡXjO*g <0֓}䴃HnzK`6YGR\Kuz-y_|08={z|}Qa; Wa}S~K DDz(>`;`DQT׫UZ\_K6;Վ9EOs?tN+4L{^߯!n/T#]iATP ,iƏikrRRb 8{~\=,3hHkgǏ!|sPуhfa܈@^?3=}.@L. eUkY1ݏ@0vyǽ^zYC^c`Iu~20-ee6J,(p' PS P).âH'knƶ|gyۘ?sSvqU^{0,`A׃nnCILnjh<Ԁ؛"'I@Nµsn1۴V n[T$$'ŤMO8ZezUc^ܪ5xfW{NnhyWN)(zKX߷-&tt@` o6N/% \w+LOl2 AОnKIs~u9)k@ޱݣXa-@EaGrY߂oyлZz.Zh`rL9d!KYRᤣ#!u\5ٰ{D. z$ No$x= !(GP !n?n"oJHG4}WǕ}L KI?MZ{0r8W=7XLgAAݡJiH-tF%"j\F6, $kȄ29wԍx__:7v4wuvHYhd) !{ܬ,7\ti\QY P@+'"/v S+hц|!}t&SPd<4߁mŠE$ؘ; E2%6d@MJŸkklb 猳.Wvr%L]]mg I8NH?6RtO͚Cx}e(۩"pZU\p<+0YqbOZ#O=|]D^TkÕa/",!34k 1_H6`n}E+ءp2u8!  R@/z[ d&qooYKmޭ ZgpS{vՏQUB#OGce ۭ)z %$K}eAG'2hsMKQ_J.XVBYg6 e|dC8ciJ~P /I("R_#NRa.{.G:2)cqRmx1Ów2v=&05aEJ(qJъq x4Dkˡy.*6n}QdU|EƝa$QS5p4䄁C$Ə %ܹ+'nK9mijvt59Tv/r-z_sA>" & k冈F w 23LNg!*Ph'vi"f5L254v I.w5!4zR #B֪o||3Ev h=;s/<(xv@'g5]ي"!(E*^| e0B]e% zxl(p30(f֫qs$*mO;`Jtj4Kze–X2㞃ցf% jG@EF)h 1H~>X" EI"I$IH! Q@ PH$H AI")"!$E"  boV$$H$zRh` bXyBZܿ3Ԑ_:E2?Gl}Xnoqo$7pycZ%tq X I_Q"t 8^b^pbio3܍o/l6N`Q=WH d~qE3oӘiȺp=+.H]LRL@D0WZ.@@L"DI%ߠFaiۀ'p k>-jrF|??+` V`E>QVn^e 9 JC:44󌻸H{t: '6s8|B׶kT'I ='@C~dЇi{,qn?D^mڗ:O+\wOTa>+-ze 1}$HNBz2tΥ&"UWX X-*bs K\蝥zHRgpD hwl t+dRe*ixˀ8·. @"NN}^0d6?өXXAd"]<7ڈ*s\f7"3eČաHo-&g w)WOi5?tLJBp ~d hpZvnH("r۸SHF`V\="T޼`x3h|ﳝYwF^"н[@;:uui.3lIl]J4@"!5[S=ї=fj\ZA4?Ag%ݸL50)@wo7FMƸgrD`w{}-\MГgn1nٟXmEх#C66-܆ `:țp-޹| ̈@7_xmӖuOKFmg Cs70J9X6 qW&`&NLk\) k{np/л+-H %j/oq {w8xnBOEI*NݑdHp6QtoSjSu 攪!_|rlSb@@MUqp;@+8ڻ@ڿdw=XX< hҠ${ q,~kf ߜcINwxC)rv] ZBܖ11'B ғKkjSJ:e`9m0Hqw`\{]̾)(' ;C 86å~e~{~]bx;9aiO-&/S_ΊQĸP!S}[b͛bD! !&,;Qa\n4ۺ, Wmk䯸f(wru\%p@vPX?.Z< pj|)ܽ!]uhiyRS +x0!~se_cus#dW5)ejF1,iap0SKKXKą>l5$UIk(q/v^L\\hr61ep] TI&_VHͭIq3@E' ϮtH"7vVG ~(ACsqSGoC8[u2N3b#-3] x=ƈQJ sDkۚ}@D_@+'|NEVN6Obc*ns!olE ֹj:>ͽOr,]&fA$R&l3.nq5 $XAp ZB8qTW}Uw=C67Xk,Q$\Ӂd+! 9  \ue8R4-kf -W+pY5 @,{=с )Sp8$9яg3o:mk0ۡ쒈y~O Ͳи^-4~2fԩA ̧7~p6|m{JBx0 qbVEiɯ})y3EbX2/ T"_(Puz$A1x~,\^:%`a f1V\sY4 ߌw*V)3u}GYi} W{kkW1ޠҟTg=gsZ\E7G02_OY.+ @xX ENHuV:M{zVkd@p!^ƒN_"A8.@ƓMd!H uK(>ZQ9%>0+pt$\e:T]+,q hWtܝ v "y6Us'@+o˚H&6k1r$NV{9D]y+av*ZrGÙΜ58N%{>םXdTY ,GNǤH*>"DF$/.DX^LJ)"8J @@s\8~]%דuWۗ@'ft.5Ȃ%/` J?0S-Wx D=iLa: F  ;c`Zj?7mU VHm`[Gui2ꢚD+47=W3Iv鞌({,l osݥ "3B0'o)$zuh-,vSzkaW١OȊH`DeIJ+&ui'$ q"݌abH mzl}O1onE>ȍv:1sƁ`G }S&\#AO,wwSa<:q(AoR#DZ1x X?W RZN|)oŃݒI?uGe2f&*5ِm.'sN:-MuJ-2ssU+KX@!+*F oJ/1S3*"!X?:aF/|"',Jw}p`yiz₍O;\pXny0!rc%iUYyqMs~4;P抏D`"+]*?X\㫮5XL˱U)m@~fq:;g:\[1p|  0lQpǕӡ yyQ8}P^4vB8^ ]r hԒ@ 㭖ضw F Ko/Ĕ.\/ `B:J ͢<7R!pqMəۓBr4'9QÝ,|Å`r5}C>ᏬCj:{mUs0^Albz,P\I\8mȵ9?iY'mV(/+^!)?IAj n| P"qJ`O]Ḵwa! Phpf \\2_կsx#0Mkm~z+;]zZE! kre鼛=>pƪz`~=bKǃDt{nY҅q8"0혉soJ%[遻#Rez6:H9;7q3oD^m1 je@ 4C XcC*V^ C0ɎPWgL`} g}D 1&pru!RQM5 Fv0Ʋg:R%/)^Хg i+܌PCէeD]:L}Kd P63Y.%Ĥ7!_$CCNAOdo뱺696[_\6'{Kcoi=CI6v<e}\oɁ)e `E4Dw>hq }۳۳lR*j:t}MnNq˞lt8TG5ccKvb|CX`(L8V5UoNՒ(6'U }~T.]C[&cE+ZCR&h1 m^F޽X .<&֌_";Df5Om*/o|-8oW\o Yўe,KU_seQsjkOZX`vhwEIH_m^pgKy]$;)H9v]n`t]LyOEj.Ν(]&T&2`d8ŪW0K<ɘ B4^8i`(^®]J6Z+Y#Ջ#K`]O ˮ*ن"O9kfĥt㍁H3 "^N!,*1,4 %f;}pEokY K9!7S&}Ծ9U qB1[0nLDY޶GQڐ .183%ԥd'PR>/TJ8 '_yCۥ9w@LCyWo`䗎bn'T^r/Pg]p+\p$?΀)ЇPCMqc!/ư0,`g #!d~ ˫g$)*$91orljkv%*/*L(5?|> Ѵ-`@k?vVYL@''4!gmvMr#] !^u5ߎe.Ok/%aG6s@kZie{?׋[OW75=>HPG[0O4fyJŌa9=S$=flCN؂2@k=& Z@fBa%:vOh"p!2;6ua9J6(N+Կ=F 68iUC˥G_~?-\kN://;ӤM?=Vvu)ݕk]nH6k!MZĠI)S3"VҎS 7/g:ӥ|I/] ogoLO׻@j*j&&uH(Rb*zz3'#.i8.8+=u}l7Cݖ]Y\:mۍ'|b:.ⶈBÁ)C2́Z6¤8l$"M0vJ/JQ>F\l܆kos O8:9*\y.P1Aj7DAЂՒ"dF)N+ī%FhH0~XA =NQ,Ǽ ." €\QÀ?F~E|9 .Jx"t)߆EKCCr c3 sMttӛ݋VV_1N1KK0 B`5z"j?cm$a@>eizsXbZ"1H(OYa΀buD?~ؚCqi^f&<uɃ=݈Yc*Yإ+&|U/KToIe"a82]ޥ{Y:F{SiUM>+ޮ};7 -@ ϰ0&$uy^ܺP“NH bޠ>6ti#">~ 6㉇'.C,945F"kJC~=%b*iMK|UF`o|ôLà֎ Bxw=|5݇@>+%I{JT H>8`;R!BvYjzL]YnOi#r 1nH^t\3acHBib!-IPe^i0C;狘nɇ̚@ Tyl_/a7 eK:kVox ӈsWW=4xݲZ56U +Ust;n$L4$-J60iڣBi Lr G8 R-bғ9u -Mz_Dbo}LL%︍ɚv<Қ#U;d@A*PBC[+lT"|,G0 PE<WuɊ:^rQH C7QpD1y'1C†R*R`*:o0=S 1"LJ y:Y,-ةE"s KZdٽ6Sga\HPyJ<*~;L b-Pq,J, x`R5%ř1 @?ozxou"nX70.z}S> Ov %w.=8 *|۽+Z]_Kɤ -ïN |ؔ % VI(E9)wѩz-ϩ1Ā-DI{w\\SةB9ԷJ4c0*bn7%`9_ }&@Τ^,៍=Ajٔ(Y:뻼u I铤 59!}.w6]34uq"ꀨaxBBi,k|M)!Ij8#Xd۱7;Cvp '~6GPKPcc61^ HLOuHiTup#[-5Àr7@홠n?m?Ä ,=ޞ[X-Ps^;Ei,zy52=J'Qt_$  ?b(iu?+svhD>oۦQ/+ڴ?Γ U٢gv?7՞iאy|;$Q'Al݀e$_WҊ\J~!#dJZPT 1󳁰ӃȡOz[?7.`KyLu@uS_lg~/V*Khnyk&g Ƽ@l;ea|iA(GUuk 4vNHW]t9WgA0pP0h!8ir@éhyĦ,RThf1ڹA3-˯639Ʋ!iu1xykޛ0&=]s=;I^>YaNٴn= %5Yw˔-!Zwe E\zw;A )f07tf旐fVfv;0=K2q/3%Z~Y磻bʺYN8$lhH R#!"կDӥM5͡iX~_ }f܁`eլr@&K_85#ÌgO.w\ABr;%ٽ&Wk6ڽ2c'':yI@z"3|-$s#v! /fS(b}*s b:F^:x㨍MWM$%؉4Ado<8'-w= AhM4XwM :!/&2䋭B>\9̈́ ct$XVA[̬B0,t ~APoαIwK Xj=F r4a 9,sQf1FE g8 E\|Qt/c_(t=KO4ލ .( sU49z$ !LE"_ gV2prtAS`'(tL)Gl{A'նXkۇ˴kL|z_hf~&'Ulі8Ni┞eG~ZNd~O+rt@g69!5i$GBNoj]LA)c6 z O(ZРumL|쨼ˁ70Up G`;2b_Iok|Ye@'>!O"E{+0+yIH 3ChTaskBV)H d/php VG\&/85z9&T_0.튜Gi9(EJ@F\wOo_gog=*SQ'4;o*0.n{#~Y)}.֫}Dc.. ZzkH 7hf,>Y)=x F9>kH^=`astNpt]6뢋ˁ\9职 #:;N0 !@,Ю={j#a&-hQWH4Ҙ8'zSd$|FQ¥f7On'Ph5DmfUF L*:Wuu2:M [¼ G(4T'|gj͈fŻZ zz b@0ԉ" XVb2.O D%Q-' 4@wE .V$[@=F@Gߡ@尹^4_ P_ώI >V.4F H8I_=iIJeg>Б a!L'K7)q$AmaL\ϯMTx+,GUW2oh1Iҹ,p#%v7w6!i_0'- W%Dp $bYϵQh@U;Q8MƽpX PA~8^b%A#T'Pq,tٷkhxŁuU+EK-7:eg,5kh4Z:-P/3zG)V+kLH:ӭDy U+C3>WuFzqo{!aKhP*&LA,cN*qJ@ji7|BECg"R& NR^ʨNS3&#l!qХ烀%:zaX)^<bS=q\㡾<1Ə$k4`B@qt1 >v̈́;괊iC+'#DzB 4S%)'O<##;m'M(3YnJYg 圝v*Rh dYY`L`+6^MW'ԛ""xOMݪ$Br? jğ/9O=V;mR>[,%Tı8gc\r!y KzJNAu`jbAsLnga T (<ʁ{\~5SUl{\C\qiйNHA/#@XD!& fdRIH MIdD"9kA$Y˺AZlG{߀! %G1^p\?j)F{( U7ZĶn.j%g=oHPkУyNF| y|Oȩ1Rv]D9i_5i && -mT돗p݋ҐGX?E7W+kG+z$ ͛u0'BEJYݹ]$'޻GCJJTGSLs@đhIN-]/t,񷻶/% adeW4} sPz6T3;/XWe=p7[A;]̌`|เTpoJٚ`K̽HB#pNe(ݳ Dz 2MZvi8GBsM9EM#jy/^E` ^ 2d9LBz4d}L(yӱ]!I:PN`JŭXY8.͛',`RkӟBwxoy`PZiƚT{|=NG ٵ dh^pH2g-  v +bYn >f,Fм#1h{= iȠ &7g%t}ȷq}EsSJG%Psoԥ'#Gp/_e؉n'q{Qfn&C;rs /ʉ4UϞ vcwTT;#O \ њGño6=|ƻ <wmyhrE6kQY +S-^w(=q0gd*69l"zT !萡2ɏ&[ tA'<81%p6 kMlΣMSMo+<[E<,``+ ɺM?G;f*]Vx/az[mS=WX};,StguMS3"BE| vz (=rV ꮄcך dp+!H uLҪ=&Os GiJEf|% k;ŸU;#Lٻ;X0u0W4u 3R6J 59{GDՅ :Æ+sc w H!._{/@> ȑQMtH1)\2(VÙ@h{@j8H`GHk=yqoQ/g;,|則dyk?A"TT1<8u '@*h9ǒ?ѡQm0p/zhAL1,"`Wȶf˪2߿Pp0|(uL`J@@x"sLg=BG#x"^1 Fo߄Ǧz߭[]'s  ~GXvwKWU j 39r}i]'Y6}/ۗc/[Җ鑡ni2?k.[ސV N&.(}%8lK{MY MHmy,4 ~'2 0n wT1'wbCMp7-+#zvҩ/[IAeDԩB}ɝ}% PDa-˯`1䉔~7}/_s`.4ph/"X,lqPXmh-yxw]: Tmi&5{uă -~ 2p='JʾIdHAǮ骑 NMWjXwF@EQYLLW>ħv&'t@vp 7-攅7'H]TE{V*?Q%Zs[윽 gk(Ҧbx8of;plC#^5,C/xӂ#EBNCÙ~6fwB%H䝲#hv+ZRZ]| .B\f@I&j.iܝ?FMw1&CQ> X=>c*EgVlWHՁC2D.)(eZ7iܯF `WL j6(MWAveYE7"o@֧:\wC B\qMԞ>; C_up˪LܶOv d)dH"~ h(4]o[͞=|  `O$q0 znچx jͲN`:h?\]Px &8}8œ?'-pC!M*} N."ϝ8 hxrǢUReݩ͐&O<4U>x{HzˌwA&T8ҁՍW֦:O Ǒh-v)IqA{ؓ=􅋟W~ ihVzdJHV"aV2VzH$F_75pw=UL-LT~Y<>{K6D8e0_^=HH~SԂDȮ Y!&>7\̸ f!=5 ,y?5rӸGX_1>^]>", 9+b2ujL~h$_N7^TߎG~FLWz&GΌwbF25ISOpBXp*{KNv5M3y!F y-p4>gXK3=>DERx=d4ArZJʚt2&\i<θWRe$̬oR{ßuy} ^$zJݏx܂ѳ*DDIM86Uv algAGoVTܬl%'@1˔ vi0'$emOyԟmkKr?^_>i,p`l F C(=Nz玝cdOY;@0#;5 &Ȳ(u/[?ѫQR"4nm Z>sŽ>rvmD םKd,Omvڶ6*| lY9Gd*u#Ĺǿ٭^yHA60^zu& .U9NY %x0n_nQ  & DzֺAhÝI'y<FxŠ9+? f‘Y=~B`<խmd0>mf:(aJE'=ndφ:[qs q6 9ILx:l=^sĎ!:Y);\;.\o<+c7-\T(:;zbrx ܧCsZanu$OEͱ F;sAO`uǡx㿑F9]OOȪu,Cg&]%p67b@a{6{UoeUnm1a=P*c҈q2 d-d@(5k lLqR"Yñр8 vo~PY~oZ'o L/!yN8&mi_I~kZ $y?0b`>p7&nU)Bۆyn&Ċy..~a=lK8bf+>sz ]ǭqHq_ĦV|AXo2͑pr@A#.x$"m.]e1m(+JZOĕ,,K#ll/oZSWKUYT( )~;u")VePf!O$O[}% 綼Os#(cB<f bK7$Zֹ}m$κ]*Wgm]k(2W/>dS>[ qSfC@ߛ~`H׎}Y!Db⿣h\-_ 6JxS0 A\2ݭ @87}R>_ρ~! }4s ®Y5fTڂe+ 9ہ  N%+IY@:604ӕUB  $N:`a2l6/')Q| Ql#&H?=}a?(8߾rq#I7~v\V([N{rO55-88?c &mfA.!wWVk_3 do)>5O?gZSqIQ%gwhf !^eOB?s0(te'p >CNpGPiX{0w_=8<͟1jA< Mw$x?A:SxV Ac5~N~TJ;WUkd㾁iڞ27 fmb6 {b68GoƗed(|xa`3sU SJ-qLt19 !>?-I|,;&,jNnk?ufdX24K5pL uRY(5YvP}Qf72](BsH׷:p(&f 3³ONWf{=Z&+?`dr*ˎbҸ> ʼ5*YeNikԵaXW/e \fvq/iil%lNrs&La"I )􉌕}׬eނj~J%}O菊8RG)ˌ70 M;Ȋ>M@zSՍP𘄘.mpRC TT"^J~XHϣm l zD]'"Z>PW)PY%)d$2`ej G<,Rb>#C":㴡w6<0MCnFbr;1LBC{ 7,ZIo׏ 1wD4FXH3xHmya/1 ##} 80vXk̴sH *@wm>-&422e˞|rJ#B甖ik. Zg@OS[c>Of/dzbA!wE=ʦu^@$ ,BC:9%FB3""rg %NB/8`E_#3 ,1M7~]?_̽DGc $)x0DfI Vw2[egnQ8Z RHtݦq|7ewh =_L r _MhOtP iAG_:y;]goډޒI{"ZLHdE7P4/$It!a}K!@E K]0 ?fٷA32 jG0gTTnmH.qٕ\,2}wE]/Vf;nbK4Vd|PSΈG|8 ~' >O :  `n5ׅA \<V\i nB-y]2=xoz6#ܸ{"2Rqaqŵ}sD|7$"r t^r-V3^{q磫S.:BvZ&?a RodĻ]c+Agv.:ڤI-[/>iW-2V_& |N<8 F wV' cm CgZc8N0zI&J2[7$Pd߄6Xwnנ%$G3 bI{?2ΑcG2dx@ׁȥL=AQ~UdA` x<>4D:p8`aHxaܰ 6 ڼ|TX+>Eg_ƌM' IZ?] ''Hr~:LPUƻw2hinlG3``f,Go :9t}\Ͳ矬PJDH K=|%PO9suOc?9Ym^(Ăxw{6a}Ȉ/Wu!=+n#C͟,խؤV˯ZfI[ɡ 5Q . v$)qur()QIvE_ DH@c 7 ywf]Fr.|`Ae4=N %wK7ctؒ_*?9]` >Tp]>n7&J i7>b,8M֧S:k'`vs{D1>D@u?;y r Ƨfidz`=ӣ7RY!/F'Ь{.$3L ӾPGY%@{m\\o6i jU腣KNd_b]r@=dLdѰUm}s H> A1r$cQ" )o,.8=0= >1hwL=;"}j(a<|qo&HC胪>&M|6:7)6<y(HyG"wA_~0,/w0>1T8rwѐ4,%8a3t{bEˮGsH_ٌrR`54Z~?5H۾8*uMڠMc`2'ޣ @Tz 7"5_)\26 Cd&B 6CElsk3g5_n Ba/I8t83Gk`'?őJ~ tgy#0AA³HVmF)jY`w6,(C=a!wuka-D J^2buHs,?2N=!  /`nB i~2ps@愀e H'CAǐ$H# Vny`<_Ӑ'x\5 #Z3'Cn-g+r7taǷS#a{܏<(Y9 g?+o(Kf|ڄz+o¸L j@p!pۓՒ;7`dhCä1:, ^S p; t‘J}2TAװ}3g@Smパʑ0Y%0_HAqpJfv T ݑ_ḇG5MЉY2OKZddƪK:}x;e Q293C6R{])Qs8A qGHUٮ|E<7/2 e** ..RY:wQ@kcj\כ)n kGcȎD_A CD H!-)1 !Q&CM,g|?' %F ,`YivL0md^]V\E\,jpH!`/O"DzWȬE0>]o$dg'qǹ2$g`8d5^JxR$\67AISдW >IU- ]<"| $x=af:j})؉9DaTJa.8A+վw'sΛ9X6z'.$\b0:Jfp9!˲LR˹)ܿka`Cb .ҏeg->jOɗ >L F}vy'a>iăYYqz u>>_" /Vh{S`ܙC4&um)7Ԁ,+!<X`it@@$"wA#`/gxy:x1Qׅ[3/'@ U&A0$k冚ɋDCF.& 8H5S!1R,#9 pan,|1+<>CIB=ZNGM*vʆ]t3ݓ ީ2?k83{8`{~('Id%Wy&=dkw4kX]~[dcM 7y9`0G@WyHIB V|P -|KBҠgtx1J˝AdHK:Ej+esư[wuon=ǰ+??lUr4O'oRXƆHo  vR)sID4i0Vݞ V~l\ONHoHd09#F 35=8H#ĀZ:x Q X7Tcߓ*ixu]HC0&k`)@HEj @R/Օ#Xx8sC) ÐH9F-ճS|VD0@}Qd|փ8Ju6 |JI2N_L)G̳ T3Q'3`~P7_l!@$O{-i𐿡^fea0FdxlITt1GE0<+\, ǘĢvl =`?T6{juNN\_C|8CEbEN̐ȉ/[EHHL"mN@pV rGޤԢ8rnle֒Ur>%gd{p8%K6 #𪕉wƉ$pщ-P!m@Yh^D5M:5r;]m[lŗͪ+U/N3^ͮ Ϛ=@53xTRTp B:M@ELZ5#ӭ7Yg yӠph[$gxFҎ\S0ID^{7d0[܁@hZ~ \B`G)sAE,ҙAs>q-^"PR_ yi[5[X}ʊy. LJYK1ЪQA1HAi"dN?ao@]>,"oq>?^H-2OBn幮'{!{I # Y$p&v268A@QS4w9۪=t+HCYiA1#ꄔdfؠ$lA뱂 >a29Fϸ~2wP7mufS4f>Y?6v@vj%fl,&?8L @N+q `x|5&aPn:GwAWY$'Ș \? Yrn$ >$p( qݲRL`3q kĕނ=$>=%u+EړA1B: /J fV ,m,ՙ om peyH*33]OQ`BBr ^d9Ĺ~ӎ?0'ux ^J=y8y ++;CΒ {'y BB!3sPT#utV&6( ( c;؃d+krEl*p<SEu ҃yf @8JzwQ'߄l[--H  9434*\!⪘|pnopb?fa${-_.>+#%~#+qI/R'e!ܢ"<⥼ed5B䓼/ms1@jv@b@#:@ǘ%-ڴW}3xw!2VJh a0)IbPӦU;po=y^;d~/ĽcX?]]zkgOP)Sz>Ywz TMֆ9 ~2'd\+[Eglp.U]!ݤ `^9{1ˢ$ ~?qkpkvqk 5g)ב]Ȥ#)^f~#Z$T4qCe׈"t$DG՝z,n?;򭉜'jofH2Na[.p/FrrcTK~L=HbgtIzܤ&[c% y! 1HzX I i:-]IAs:x[WQgw3 drdWޯdMVœtq31dS!$Hl+;{ r+d yr,l @wR,sXU~q䟉vhgc/ AKoH9oR [!|SW 넇smr[Y4{.Ģ!I+UOsr"[t|U[ ' Ph\)'pNrnO>@O3!#lAĀB? ;IFB~)U8!p$Wcԇ |)FoίW܇ペ`̒f}jmXTU5C1fcH֥1VӁu}vM+K SB30_@8bI캖I2o΃1@$J,UL_|X|NAׄ7T%PHG4W'@$g2t̛Cpғq$$ oOAV &8[}`US EUܦt  ]SQ=t)lH*MzoW$2n5*}pdЄ)0t{,P83: BGUv6d`s(_•N$^H?2~gO#OolV^EZ#Q*ҡRJ8o50;>rEv%ِ;s[&O> )" mIY9 V$Z>儗m 7 ͦ~ď <Hc]!ܓއLo~,DA܋GuեtO qzp~;Hd%Y]jw@V㲍m 7%Lv DkyEU6ΎnwZg KG#$Zc^Br~K5)00S̳iC\gL`4PH Rn,P`}SySa,< tٸK)yB["dY-4i[^qdE wZ'[ba]- &R!a3 %36k*hzUpaϔ^0VbmϐȺr97KAgn[a hp{IݍzLX6 N'/8N.KX7RJtط2 WP03"oP-Qs|:'q .hqMF@ _RoS@=g EZE*Q*sP" O0}̽7l ˑkd ,U@` ,hKJ%\ۙ'c'f>31X~<ӆuB;8}p"h#e0 wBNfِ 9LD3BM G"#E J0qCCĄCƆڝ9GIl#C\S2~aq%gkC._ރ͡)LIL]AA+*q <)ĠLy;ׁ>|1[ 16[5d)7j&?/,'=#ۊIyWǖ%SNN ۾w.ج%YWPG̔Dz=%VvW6#M}%l /V@=840ORX)I9f>3RI;8ry\fn}:CWJ[4b9 `Gg7bXn|fpKŜ-8Cև[ ļn4ƬZZ~ى {ti!<>oZ~ I#G^`PÇPT"`l=ܷ:텀⒤ZX_p˫2)t<(kleZ1U9uѝ87t auVi TefD&s1:L4GE #zWtn]fMڡV`<c$O"hYm[ly@e ]3m.qi3 M2AzYXmӃPUdѬ*f5Iexh#HijE^Vs e`MAcStGلȄ/4$D"yZuw'_t}7CpA]tCI80Ab|Yzzi]#t0N pHH.(Ueg|~Y &K'p9ێvBڳ C9qpL:۝?8 S쥗1$]̳zCqg%j? 2?m|< %Ǐwq cr|5'uU7 .4֩FL/XFS$ڗQ1-}c;ۉUDTx[b' H~Vm0D) U-N$Xj{ WpP&\*l/Sr㖌>ޝmzޣA ymlY뷳<*D1ǐ6˺9㮼/B2"Ks-t `LmXҸ td"*LNC0%j㮿YdB:+ezr-[ٜ5)0"6[CL[4DEFup׬ <y 5yZ` 3@x$}tr.f;jʩLD8Zݎ[ ?F.w~E>WHF"; p_ osD/:6T+cګ¤MP*cUb2K[{1ɕۖN`05`@$@0s[F)]B8d DRx6<%kБ_yF& /ODR-@6<tB$(,aT ]^CZȄ٩"w@̴ /X\A0 J1:ĉ$8B_c%zKkk{9Ϩ!N d3Αk 4b~$Tz[h 5A4t8Ll5HOiT~}ӶXmAUt(A̘0(Ʋy>.*+TcvtzH 0!05S]aCRRb}*)AwAu+QhCW)?X?Ɋ.KX9rF ? ;Qz"S5y8YF;}HQdbLN<:!N??ux*,T2"K,NơM<wtRrRE \cfl)q*ݰ&koDs'۲haؾ1")a{=NEAH;武SQ]H*h!Z-@b l`lKVbw^[ %C7%fz_sQ *syI/, Q{Zte#5!ҩSOv3A)>wa!rYj30:DUK;kdD]m+Θ̀wb@Ƀ}Be|Zo Nh|%8V:uW}N{#; HOq8}znDk8VpǔgzA4-8?B.}q} AuL{5w~%SQ  Ռ0֧-42 78J$ . 1Õ+3-(/7<??Մ~Q*gָQC l*xo4q7PʎRi($dv/kWƝ#RsΦ~ 5CtAvw+wϾUq @ J% x< 俎 wPWB(bFAr2o,,$doew8$4'b_,឴v|V Q9s/&=^vS\ 6.y{^,ׇ@{b©4CatcΗD$xBhfϲPc#i:(t1M~^?k iqN &󅉱_CCHQVh猌2ܑN$4ۃpammtools/man/0000755000176200001440000000000014452540116013042 5ustar liggesuserspammtools/man/add_term.Rd0000644000176200001440000000465014222504522015111 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{add_term} \alias{add_term} \title{Embeds the data set with the specified (relative) term contribution} \usage{ add_term(newdata, object, term, reference = NULL, ci = TRUE, se_mult = 2, ...) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{term}{A character (vector) or regular expression indicating for which term(s) information should be extracted and added to data set.} \item{reference}{A data frame with number of rows equal to \code{nrow(newdata)} or one, or a named list with (partial) covariate specifications. See examples.} \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{se_mult}{The factor by which standard errors are multiplied to form confidence intervals.} \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}}} } \description{ Adds the contribution of a specific term to the linear predictor to the data specified by \code{newdata}. Essentially a wrapper to \code{\link[mgcv]{predict.gam}}, with \code{type="terms"}. Thus most arguments and their documentation below is from \code{\link[mgcv]{predict.gam}}. } \examples{ library(ggplot2) ped <- as_ped(tumor, Surv(days, status)~ age, cut = seq(0, 2000, by = 100)) pam <- mgcv::gam(ped_status ~ s(tend) + s(age), family = poisson(), offset = offset, data = ped) #term contribution for sequence of ages s_age <- ped \%>\% make_newdata(age = seq_range(age, 50)) \%>\% add_term(pam, term = "age") ggplot(s_age, aes(x = age, y = fit)) + geom_line() + geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = .3) # term contribution relative to mean age s_age2 <- ped \%>\% make_newdata(age = seq_range(age, 50)) \%>\% add_term(pam, term = "age", reference = list(age = mean(.$age))) ggplot(s_age2, aes(x = age, y = fit)) + geom_line() + geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = .3) } pammtools/man/newdata.Rd0000644000176200001440000000737014222504522014757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-newdata.R \name{make_newdata} \alias{make_newdata} \alias{make_newdata.default} \alias{make_newdata.ped} \alias{make_newdata.fped} \title{Construct a data frame suitable for prediction} \usage{ make_newdata(x, ...) \method{make_newdata}{default}(x, ...) \method{make_newdata}{ped}(x, ...) \method{make_newdata}{fped}(x, ...) } \arguments{ \item{x}{A data frame (or object that inherits from \code{data.frame}).} \item{...}{Covariate specifications (expressions) that will be evaluated by looking for variables in \code{x}. Must be of the form \code{z = f(z)} where \code{z} is a variable in the data set and \code{f} a known function that can be usefully applied to \code{z}. Note that this is also necessary for single value specifications (e.g. \code{age = c(50)}). For data in PED (piece-wise exponential data) format, one can also specify the time argument, but see "Details" an "Examples" below.} } \description{ This functions provides a flexible interface to create a data set that can be plugged in as \code{newdata} argument to a suitable \code{predict} function (or similar). The function is particularly useful in combination with one of the \code{add_*} functions, e.g., \code{\link[pammtools]{add_term}}, \code{\link[pammtools]{add_hazard}}, etc. } \details{ Depending on the type of variables in \code{x}, mean or modus values will be used for variables not specified in ellipsis (see also \code{\link[pammtools]{sample_info}}). If \code{x} is an object that inherits from class \code{ped}, useful data set completion will be attempted depending on variables specified in ellipsis. This is especially useful, when creating a data set with different time points, e.g. to calculate survival probabilities over time (\code{\link[pammtools]{add_surv_prob}}) or to calculate a time-varying covariate effects (\code{\link[pammtools]{add_term}}). To do so, the time variable has to be specified in \code{...}, e.g., \code{tend = seq_range(tend, 20)}. The problem with this specification is that not all values produced by \code{seq_range(tend, 20)} will be actual values of \code{tend} used at the stage of estimation (and in general, it will often be tedious to specify exact \code{tend} values). \code{make_newdata} therefore finds the correct interval and sets \code{tend} to the respective interval endpoint. For example, if the intervals of the PED object are \eqn{(0,1], (1,2]} then \code{tend = 1.5} will be set to \code{2} and the remaining time-varying information (e.g. offset) completed accordingly. See examples below. } \examples{ # General functionality tumor \%>\% make_newdata() tumor \%>\% make_newdata(age=c(50)) tumor \%>\% make_newdata(days=seq_range(days, 3), age=c(50, 55)) tumor \%>\% make_newdata(days=seq_range(days, 3), status=unique(status), age=c(50, 55)) # mean/modus values of unspecified variables are calculated over whole data tumor \%>\% make_newdata(sex=unique(sex)) tumor \%>\% group_by(sex) \%>\% make_newdata() # You can also pass a part of the data sets as data frame to make_newdata purrr::cross_df(list(days = c(0, 500, 1000), sex = c("male", "female"))) \%>\% make_newdata(x=tumor) # Examples for PED data ped <- tumor \%>\% slice(1:3) \%>\% as_ped(Surv(days, status)~., cut = c(0, 500, 1000)) ped \%>\% make_newdata(age=c(50, 55)) # if time information is specified, other time variables will be specified # accordingly and offset calculated correctly ped \%>\% make_newdata(tend = c(1000), age = c(50, 55)) ped \%>\% make_newdata(tend = unique(tend)) ped \%>\% group_by(sex) \%>\% make_newdata(tend = unique(tend)) # tend is set to the end point of respective interval: ped <- tumor \%>\% as_ped(Surv(days, status)~.) seq_range(ped$tend, 3) make_newdata(ped, tend = seq_range(tend, 3)) } pammtools/man/warn_about_new_time_points.Rd0000644000176200001440000000063714222504522020757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/warnings.R \name{warn_about_new_time_points} \alias{warn_about_new_time_points} \alias{warn_about_new_time_points.pamm} \title{Warn if new t_j are used} \usage{ warn_about_new_time_points(object, newdata, ...) \method{warn_about_new_time_points}{pamm}(object, newdata, ...) } \description{ Warn if new t_j are used } \keyword{internal} pammtools/man/gg_fixed.Rd0000644000176200001440000000140414222504522015100 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience-plots.R \name{gg_fixed} \alias{gg_fixed} \title{Forrest plot of fixed coefficients} \usage{ gg_fixed(x, intercept = FALSE, ...) } \arguments{ \item{x}{A model object.} \item{intercept}{Logical, indicating whether intercept term should be included. Defaults to \code{FALSE}.} \item{...}{Currently not used.} } \description{ Given a model object, returns a data frame with columns \code{variable}, \code{coef} (coefficient), \code{ci_lower} (lower 95\\% CI) and \code{ci_upper} (upper 95\\% CI). } \examples{ g <- mgcv::gam(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + Species, data=iris) gg_fixed(g, intercept=TRUE) gg_fixed(g) } \seealso{ \code{\link{tidy_fixed}} } pammtools/man/staph.Rd0000644000176200001440000000135314222504522014446 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{staph} \alias{staph} \title{Time until staphylococcus aureaus infection in children, with possible recurrence} \format{ An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 374 rows and 6 columns. } \usage{ staph } \description{ This dataset originates from the Drakenstein child health study. The data contains the following variables: \describe{ \item{id}{Randomly generated unique child ID} \item{t.start}{The time at which the child enters the risk set for the $k$-th event} \item{t.stop}{Time of $k$-th infection or censoring}. \item{enum}{Event number. Maximum of 6.} \item{hiv}{} } } \keyword{datasets} pammtools/man/elra_matrix.Rd0000644000176200001440000000153013662013606015640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-specials.R \name{make_time_mat} \alias{make_time_mat} \alias{make_latency_mat} \alias{make_lag_lead_mat} \alias{make_z_mat} \title{Create matrix components for cumulative effects} \usage{ make_time_mat(data, nz) make_latency_mat(data, tz) make_lag_lead_mat(data, tz, ll_fun = function(t, tz) t >= tz) make_z_mat(data, z_var, nz, ...) } \arguments{ \item{data}{A data set (or similar) from which meta information on cut-points, interval-specific time, covariates etc. can be obtained.} \item{z_var}{Which should be transformed into functional covariate format suitable to fit cumulative effects in \code{mgcv::gam}.} } \description{ These functions are called internally by \code{\link{get_cumulative}} and should usually not be called directly. } \keyword{internal} pammtools/man/tidiers.Rd0000644000176200001440000000121614222504522014770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{tidy_smooth} \alias{tidy_smooth} \title{Extract 1d smooth objects in tidy data format.} \usage{ tidy_smooth(x, keep = c("x", "fit", "se", "xlab", "ylab"), ci = TRUE, ...) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{keep}{A vector of variables to keep.} \item{ci}{A logical value indicating whether confidence intervals should be calculated and returned. Defaults to \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Extract 1d smooth objects in tidy data format. } pammtools/man/simdf_elra.Rd0000644000176200001440000000122214222504522015427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{simdf_elra} \alias{simdf_elra} \title{Simulated data with cumulative effects} \format{ An object of class \code{nested_fdf} (inherits from \code{sim_df}, \code{tbl_df}, \code{tbl}, \code{data.frame}) with 250 rows and 9 columns. } \usage{ simdf_elra } \description{ This is data simulated using the \code{\link[pammtools]{sim_pexp}} function. It contains two time-constant and two time-dependent covariates (observed on different exposure time grids). The code used for simulation is contained in the examples of \code{?sim_pexp}. } \keyword{datasets} pammtools/man/formula_helpers.Rd0000644000176200001440000000074313662013606016525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-utils.R \name{get_lhs_vars} \alias{get_lhs_vars} \alias{get_rhs_vars} \title{Extract variables from the left-hand-side of a formula} \usage{ get_lhs_vars(formula) get_rhs_vars(formula) } \arguments{ \item{formula}{A \code{\link{formula}} object.} } \description{ Extract variables from the left-hand-side of a formula Extract variables from the right-hand side of a formula } \keyword{internal} pammtools/man/sim_pexp_cr.Rd0000644000176200001440000000045314452536066015654 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pexp.R \name{sim_pexp_cr} \alias{sim_pexp_cr} \title{Simulate data for competing risks scenario} \usage{ sim_pexp_cr(formula, data, cut) } \description{ Simulate data for competing risks scenario } \keyword{internal} pammtools/man/split_data_multistate.Rd0000644000176200001440000000435114452536066017744 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/split-data.R \name{split_data_multistate} \alias{split_data_multistate} \title{Split data to obtain recurrent event data in PED format} \usage{ split_data_multistate( formula, data, transition = character(), cut = NULL, max_time = NULL, event = 1L, min_events = 1L, timescale = c("gap", "calendar"), ... ) } \arguments{ \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{data}{Either an object inheriting from data frame or in case of time-dependent covariates a list of data frames (of length 2), where the first data frame contains the time-to-event information and static covariates while the second (and potentially further data frames) contain information on time-dependent covariates and the times at which they have been observed.} \item{transition}{A character indicating the column in data that indicates the event/episode number for recurrent events.} \item{cut}{Split points, used to partition the follow up into intervals. If unspecified, all unique event times will be used.} \item{max_time}{If \code{cut} is unspecified, this will be the last possible event time. All event times after \code{max_time} will be administratively censored at \code{max_time}.} \item{event}{The value that encodes the occurrence of an event in the data set.} \item{min_events}{Minimum number of events for each event number.} \item{timescale}{Defines the timescale for the recurrent event data transformation. Defaults to \code{"gaptime"}.} \item{...}{Further arguments passed to the \code{data.frame} method and eventually to \code{\link[survival]{survSplit}}} } \description{ Currently, the input data must be in start-stop notation for each spell and contain a colum that indicates the spell (event number). } \seealso{ \code{\link[survival]{survSplit}} } \keyword{internal} pammtools/man/as.data.frame.crps.Rd0000644000176200001440000000214214222504522016676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model-evaluation.R \name{as.data.frame.crps} \alias{as.data.frame.crps} \title{Transform crps object to data.frame} \usage{ \method{as.data.frame}{crps}(x, row.names = NULL, optional = FALSE, ...) } \arguments{ \item{x}{An object of class \code{crps}. See \code{\link[pec]{crps}}.} \item{row.names}{\code{NULL} or a character vector giving the row names for the data frame. Missing values are not allowed.} \item{optional}{logical. If \code{TRUE}, setting row names and converting column names (to syntactic names: see \code{\link[base]{make.names}}) is optional. Note that all of \R's \pkg{base} package \code{as.data.frame()} methods use \code{optional} only for column names treatment, basically with the meaning of \code{\link[base]{data.frame}(*, check.names = !optional)}. See also the \code{make.names} argument of the \code{matrix} method.} \item{...}{additional arguments to be passed to or from methods.} } \description{ A\code{as.data.frame} S3 method for objects of class \code{\link[pec]{crps}}. } pammtools/man/get_surv_prob.Rd0000644000176200001440000000324014222504522016204 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{get_surv_prob} \alias{get_surv_prob} \title{Calculate survival probabilities} \usage{ get_surv_prob( newdata, object, ci = TRUE, ci_type = c("default", "delta", "sim"), se_mult = 2L, time_var = NULL, interval_length = "intlen", nsim = 100L, ... ) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{se_mult}{Factor by which standard errors are multiplied for calculating the confidence intervals.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} \item{interval_length}{The variable in newdata containing the interval lengths. Can be either bare unquoted variable name or character. Defaults to \code{"intlen"}.} \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} } \description{ Calculate survival probabilities } \keyword{internal} pammtools/man/gg_smooth.Rd0000644000176200001440000000151514222504522015315 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience-plots.R \name{gg_smooth} \alias{gg_smooth} \alias{gg_smooth.default} \title{Plot smooth 1d terms of gam objects} \usage{ gg_smooth(x, ...) \method{gg_smooth}{default}(x, fit, ...) } \arguments{ \item{x}{A data frame or object of class \code{ped}.} \item{...}{Further arguments passed to \code{\link{get_terms}}} \item{fit}{A model object.} } \value{ A \code{\link[ggplot2]{ggplot}} object. } \description{ Given a gam model this convenience function returns a plot of all smooth terms contained in the model. If more than one smooth is present, the different smooth are faceted. } \examples{ g1 <- mgcv::gam(Sepal.Length ~ s(Sepal.Width) + s(Petal.Length), data=iris) gg_smooth(iris, g1, terms=c("Sepal.Width", "Petal.Length")) } \seealso{ get_terms } pammtools/man/add_tdc.Rd0000644000176200001440000000203314222504522014705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pexp.R \name{add_tdc} \alias{add_tdc} \title{Add time-dependent covariate to a data set} \usage{ add_tdc(data, tz, rng_fun, ...) } \arguments{ \item{data}{A data set with variables specified in \code{formula}.} \item{tz}{A numeric vector of exposure times (relative to the beginning of the follow-up time \code{t})} \item{rng_fun}{A random number generating function that creates the time-dependent covariates at time points \code{tz}. First argument of the function should be \code{n}, the number of random numbers to generate. Within \code{add_tdc}, \code{n} will be set to \code{length(tz)}.} \item{...}{Currently not used.} } \description{ Given a data set in standard format (with one row per subject/observation), this function adds a column with the specified exposure time points and a column with respective exposures, created from \code{rng_fun}. This function should usually only be used to create data sets passed to \code{\link[pammtools]{sim_pexp}}. } pammtools/man/get_cumu_hazard.Rd0000644000176200001440000000421014222504522016463 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{get_cumu_hazard} \alias{get_cumu_hazard} \title{Calculate cumulative hazard} \usage{ get_cumu_hazard( newdata, object, ci = TRUE, ci_type = c("default", "delta", "sim"), time_var = NULL, se_mult = 2, interval_length = "intlen", nsim = 100L, ... ) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{ci_type}{The method by which standard errors/confidence intervals will be calculated. Default transforms the linear predictor at respective intervals. \code{"delta"} calculates CIs based on the standard error calculated by the Delta method. \code{"sim"} draws the property of interest from its posterior based on the normal distribution of the estimated coefficients. See \href{https://adibender.github.io/simpamm/confidence-intervals.html}{here} for details and empirical evaluation.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} \item{se_mult}{Factor by which standard errors are multiplied for calculating the confidence intervals.} \item{interval_length}{The variable in newdata containing the interval lengths. Can be either bare unquoted variable name or character. Defaults to \code{"intlen"}.} \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} } \description{ Calculate cumulative hazard } \keyword{internal} pammtools/man/get_tdc_vars.Rd0000644000176200001440000000073514222504522015776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-utils.R \name{get_tdc_vars} \alias{get_tdc_vars} \title{Extract variables from the left-hand-side of a formula} \usage{ get_tdc_vars(formula, specials = "cumulative", data = NULL) } \arguments{ \item{formula}{A \code{\link{formula}} object.} } \description{ Extract variables from the left-hand-side of a formula Extract variables from the right-hand side of a formula } \keyword{internal} pammtools/man/split_data.Rd0000644000176200001440000000371014452536066015467 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/split-data.R \name{split_data} \alias{split_data} \title{Function to transform data without time-dependent covariates into piece-wise exponential data format} \usage{ split_data( formula, data, cut = NULL, max_time = NULL, multiple_id = FALSE, ... ) } \arguments{ \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{data}{Either an object inheriting from data frame or in case of time-dependent covariates a list of data frames (of length 2), where the first data frame contains the time-to-event information and static covariates while the second (and potentially further data frames) contain information on time-dependent covariates and the times at which they have been observed.} \item{cut}{Split points, used to partition the follow up into intervals. If unspecified, all unique event times will be used.} \item{max_time}{If \code{cut} is unspecified, this will be the last possible event time. All event times after \code{max_time} will be administratively censored at \code{max_time}.} \item{multiple_id}{Are occurences of same id allowed (per transition). Defaults to \code{FALSE}, but is sometimes set to \code{TRUE}, e.g., in case of multi-state models with back transitions.} \item{...}{Further arguments passed to the \code{data.frame} method and eventually to \code{\link[survival]{survSplit}}} } \description{ Function to transform data without time-dependent covariates into piece-wise exponential data format } \seealso{ \code{\link[survival]{survSplit}} } \keyword{internal} pammtools/man/as_ped.Rd0000644000176200001440000001006214452536066014574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as-ped.R \name{as_ped} \alias{as_ped} \alias{as_ped.data.frame} \alias{as_ped.nested_fdf} \alias{as_ped.list} \alias{is.ped} \alias{as_ped.ped} \alias{as_ped.pamm} \alias{as_ped_multistate} \title{Transform data to Piece-wise Exponential Data (PED)} \usage{ as_ped(data, ...) \method{as_ped}{data.frame}( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, transition = character(), timescale = c("gap", "calendar"), min_events = 1L, ... ) \method{as_ped}{nested_fdf}(data, formula, ...) \method{as_ped}{list}( data, formula, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, ... ) is.ped(x) \method{as_ped}{ped}(data, newdata, ...) \method{as_ped}{pamm}(data, newdata, ...) as_ped_multistate( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, transition = character(), timescale = c("gap", "calendar"), min_events = 1L, ... ) } \arguments{ \item{data}{Either an object inheriting from data frame or in case of time-dependent covariates a list of data frames (of length 2), where the first data frame contains the time-to-event information and static covariates while the second (and potentially further data frames) contain information on time-dependent covariates and the times at which they have been observed.} \item{...}{Further arguments passed to the \code{data.frame} method and eventually to \code{\link[survival]{survSplit}}} \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{cut}{Split points, used to partition the follow up into intervals. If unspecified, all unique event times will be used.} \item{max_time}{If \code{cut} is unspecified, this will be the last possible event time. All event times after \code{max_time} will be administratively censored at \code{max_time}.} \item{tdc_specials}{A character vector. Names of potential specials in \code{formula} for concurrent and or cumulative effects.} \item{censor_code}{Specifies the value of the status variable that indicates censoring. Often this will be \code{0}, which is the default.} \item{x}{any R object.} \item{newdata}{A new data set (\code{data.frame}) that contains the same variables that were used to create the PED object (code{data}).} } \value{ A data frame class \code{ped} in piece-wise exponential data format. } \description{ This is the general data transformation function provided by the \code{pammtools} package. Two main applications must be distinguished: \enumerate{ \item Transformation of standard time-to-event data. \item Transformation of left-truncated time-to-event data. \item Transformation of time-to-event data with time-dependent covariates (TDC). } For the latter, the type of effect one wants to estimate is also important for the data transformation step. In any case, the data transformation is specified by a two sided formula. In case of TDCs, the right-hand-side of the formula can contain formula specials \code{concurrent} and \code{cumulative}. See the \href{https://adibender.github.io/pammtools//articles/data-transformation.html}{data-transformation} vignette for details. } \examples{ tumor[1:3, ] tumor[1:3, ] \%>\% as_ped(Surv(days, status)~ age + sex, cut = c(0, 500, 1000)) tumor[1:3, ] \%>\% as_ped(Surv(days, status)~ age + sex) \dontrun{ data("cgd", package = "frailtyHL") cgd2 <- cgd \%>\% select(id, tstart, tstop, enum, status, age) \%>\% filter(enum \%in\% c(1:2)) ped_re <- as_ped_multistate( formula = Surv(tstart, tstop, status) ~ age + enum, data = cgd2, transition = "enum", timescale = "calendar") } } \keyword{internal} pammtools/man/pammtools.Rd0000644000176200001440000000564614222504522015353 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pammtools.R \docType{package} \name{pammtools} \alias{pammtools} \title{pammtools: Piece-wise exponential Additive Mixed Modeling tools.} \description{ \code{pammtools} provides functions and utilities that facilitate fitting Piece-wise Exponential Additive Mixed Models (PAMMs), including data transformation and other convenience functions for pre- and post-processing as well as plotting. } \details{ The best way to get an overview of the functionality provided and how to fit PAMMs is to view the vignettes available at \url{https://adibender.github.io/pammtools/articles/}. A summary of the vignettes' content is given below: \itemize{ \item \href{https://adibender.github.io/pammtools/articles/basics.html}{basics}: Introduction to PAMMs and basic modeling. \item \href{https://adibender.github.io/pammtools/articles/baseline.html}{baseline}: Shows how to estimate and visualize baseline model (without covariates) and comparison to respective Cox-PH model. \item \href{https://adibender.github.io/pammtools/articles/convenience.html}{convenience}: Convenience functions for post-processing and plotting PAMMs. \item \href{https://adibender.github.io/pammtools/articles/data-transformation.html}{data-transformation}: Transforming data into a format suitable to fit PAMMs. \item \href{https://adibender.github.io/pammtools/articles/frailty.html}{frailty}: Specifying "frailty" terms, i.e., random effects for PAMMs. \item \href{https://adibender.github.io/pammtools/articles/splines.html}{splines}: Specifying spline smooth terms for PAMMs. \item \href{https://adibender.github.io/pammtools/articles/strata.html}{strata}: Specifying stratified models in which each level of a grouping variable has a different baseline hazard. \item \href{https://adibender.github.io/pammtools/articles/tdcovar.html}{tdcovar}: Dealing with time-dependent covariates. \item \href{https://adibender.github.io/pammtools/articles/tveffects.html}{tveffects}: Specifying time-varying effects. \item \href{https://adibender.github.io/pammtools/articles/left-truncation.html}{left-truncation}: Estimation for left-truncated data. \item \href{https://adibender.github.io/pammtools/articles/competing-risks.html}{competing-risks}: Competing risks analysis. } } \references{ Bender, Andreas, Andreas Groll, and Fabian Scheipl. 2018. “A Generalized Additive Model Approach to Time-to-Event Analysis” Statistical Modelling, February. https://doi.org/10.1177/1471082X17748083. Bender, Andreas, Fabian Scheipl, Wolfgang Hartl, Andrew G. Day, and Helmut Küchenhoff. 2019. “Penalized Estimation of Complex, Non-Linear Exposure-Lag-Response Associations.” Biostatistics 20 (2): 315–31. https://doi.org/10.1093/biostatistics/kxy003. Bender, Andreas, and Fabian Scheipl. 2018. “pammtools: Piece-Wise Exponential Additive Mixed Modeling Tools.” ArXiv:1806.01042 \link{Stat}, June. https://arxiv.org/abs/1806.01042. } pammtools/man/ped_info.Rd0000644000176200001440000000153714222504522015116 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interval-information.R \name{ped_info} \alias{ped_info} \alias{ped_info.ped} \title{Extract interval information and median/modus values for covariates} \usage{ ped_info(ped) \method{ped_info}{ped}(ped) } \arguments{ \item{ped}{An object of class \code{ped} as returned by \code{\link[pammtools]{as_ped}}.} } \value{ A data frame with one row for each unique interval in \code{ped}. } \description{ Given an object of class \code{ped}, returns data frame with one row for each interval containing interval information, mean values for numerical variables and modus for non-numeric variables in the data set. } \examples{ ped <- tumor[1:4,] \%>\% as_ped(Surv(days, status)~ sex + age) ped_info(ped) } \seealso{ \code{\link[pammtools]{int_info}}, \code{\link[pammtools]{sample_info}} } pammtools/man/patient.Rd0000644000176200001440000000244414222504522014775 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{patient} \alias{patient} \title{Survival data of critically ill ICU patients} \format{ An object of class \code{data.frame} with 2000 rows and 12 columns. } \usage{ patient } \description{ A data set containing the survival time (or hospital release time) among other covariates. The full data is available \href{https://github.com/adibender/elra-biostats}{here}. The following variables are provided: \describe{ \item{Year}{The year of ICU Admission} \item{CombinedicuID}{Intensive Care Unit (ICU) ID} \item{CombinedID}{Patient identificator} \item{Survdays}{Survival time of patients. Here it is assumed that patients survive until t=30 if released from hospital.} \item{PatientDied}{Status indicator; 1=death, 0=censoring} \item{survhosp}{Survival time in hospital. Here it is assumed that patients are censored at time of hospital release (potentially informative)} \item{Gender}{Male or female} \item{Age}{The patients age at Admission} \item{AdmCatID}{Admission category: medical, surgical elective or surgical emergency} \item{ApacheIIScore}{The patient's Apache II Score at Admission} \item{BMI}{Patient's Body Mass Index} \item{DiagID2}{Diagnosis at admission in 9 categories} } } \keyword{datasets} pammtools/man/daily.Rd0000644000176200001440000000167614222504522014441 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{daily} \alias{daily} \title{Time-dependent covariates of the \code{\link{patient}} data set.} \format{ An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 18797 rows and 4 columns. } \usage{ daily } \description{ This data set contains the time-dependent covariates (TDCs) for the \code{\link{patient}} data set. Note that nutrition was protocoled for at most 12 days after ICU admission. The data set includes: \describe{ \item{CombinedID}{Unique patient identifier. Can be used to merge with \code{\link{patient}} data} \item{Study_Day}{The calendar (!) day at which calories (or proteins) were administered} \item{caloriesPercentage}{The percentage of target calories supplied to the patient by the ICU staff} \item{proteinGproKG}{The amount of protein supplied to the patient by the ICU staff}} } \keyword{datasets} pammtools/man/gg_partial.Rd0000644000176200001440000000353114222504522015440 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/viz-elra.R \name{gg_partial} \alias{gg_partial} \alias{gg_partial_ll} \alias{get_partial_ll} \title{Visualize effect estimates for specific covariate combinations} \usage{ gg_partial(data, model, term, ..., reference = NULL, ci = TRUE) gg_partial_ll( data, model, term, ..., reference = NULL, ci = FALSE, time_var = "tend" ) get_partial_ll( data, model, term, ..., reference = NULL, ci = FALSE, time_var = "tend" ) } \arguments{ \item{data}{Data used to fit the \code{model}.} \item{model}{A suitable model object which will be used to estimate the partial effect of \code{term}.} \item{term}{A character string indicating the model term for which partial effects should be plotted.} \item{...}{Covariate specifications (expressions) that will be evaluated by looking for variables in \code{x}. Must be of the form \code{z = f(z)} where \code{z} is a variable in the data set and \code{f} a known function that can be usefully applied to \code{z}. Note that this is also necessary for single value specifications (e.g. \code{age = c(50)}). For data in PED (piece-wise exponential data) format, one can also specify the time argument, but see "Details" an "Examples" below.} \item{reference}{If specified, should be a list with covariate value pairs, e.g. \code{list(x1 = 1, x2=50)}. The calculated partial effect will be relative to an observation specified in \code{reference}.} \item{ci}{Logical. Indicates if confidence intervals for the \code{term} of interest should be calculated/plotted. Defaults to \code{TRUE}.} \item{time_var}{The name of the variable that was used in \code{model} to represent follow-up time.} } \description{ Depending on the plot function and input, creates either a 1-dimensional slices, bivariate surface or (1D) cumulative effect. } pammtools/man/sim_pexp.Rd0000644000176200001440000000556614452540461015174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pexp.R \name{sim_pexp} \alias{sim_pexp} \title{Simulate survival times from the piece-wise exponential distribution} \usage{ sim_pexp(formula, data, cut) } \arguments{ \item{formula}{An extended formula that specifies the linear predictor. If you want to include a smooth baseline or time-varying effects, use \code{t} within your formula as if it was a covariate in the data, although it is not and should not be included in the \code{data} provided to \code{sim_pexp}. See examples below.} \item{data}{A data set with variables specified in \code{formula}.} \item{cut}{A sequence of time-points starting with 0.} } \description{ Simulate survival times from the piece-wise exponential distribution } \examples{ library(survival) library(dplyr) library(pammtools) # set number of observations/subjects n <- 250 # create data set with variables which will affect the hazard rate. df <- cbind.data.frame(x1 = runif (n, -3, 3), x2 = runif (n, 0, 6)) \%>\% as_tibble() # the formula which specifies how covariates affet the hazard rate f0 <- function(t) { dgamma(t, 8, 2) *6 } form <- ~ -3.5 + f0(t) -0.5*x1 + sqrt(x2) set.seed(24032018) sim_df <- sim_pexp(form, df, 1:10) head(sim_df) plot(survfit(Surv(time, status)~1, data = sim_df )) # for control, estimate with Cox PH mod <- coxph(Surv(time, status) ~ x1 + pspline(x2), data=sim_df) coef(mod)[1] layout(matrix(1:2, nrow=1)) termplot(mod, se = TRUE) # and using PAMs layout(1) ped <- sim_df \%>\% as_ped(Surv(time, status)~., max_time=10) library(mgcv) pam <- gam(ped_status ~ s(tend) + x1 + s(x2), data=ped, family=poisson, offset=offset) coef(pam)[2] plot(pam, page=1) \dontrun{ # Example 2: Functional covariates/cumulative coefficients # function to generate one exposure profile, tz is a vector of time points # at which TDC z was observed rng_z = function(nz) { as.numeric(arima.sim(n = nz, list(ar = c(.8, -.6)))) } # two different exposure times for two different exposures tz1 <- 1:10 tz2 <- -5:5 # generate exposures and add to data set df <- df \%>\% add_tdc(tz1, rng_z) \%>\% add_tdc(tz2, rng_z) df # define tri-variate function of time, exposure time and exposure z ft <- function(t, tmax) { -1*cos(t/tmax*pi) } fdnorm <- function(x) (dnorm(x,1.5,2)+1.5*dnorm(x,7.5,1)) wpeak2 <- function(lag) 15*dnorm(lag,8,10) wdnorm <- function(lag) 5*(dnorm(lag,4,6)+dnorm(lag,25,4)) f_xyz1 <- function(t, tz, z) { ft(t, tmax=10) * 0.8*fdnorm(z)* wpeak2(t - tz) } f_xyz2 <- function(t, tz, z) { wdnorm(t-tz) * z } # define lag-lead window function ll_fun <- function(t, tz) {t >= tz} ll_fun2 <- function(t, tz) {t - 2 >= tz} # simulate data with cumulative effect sim_df <- sim_pexp( formula = ~ -3.5 + f0(t) -0.5*x1 + sqrt(x2)| fcumu(t, tz1, z.tz1, f_xyz=f_xyz1, ll_fun=ll_fun) + fcumu(t, tz2, z.tz2, f_xyz=f_xyz2, ll_fun=ll_fun2), data = df, cut = 0:10) } } pammtools/man/has_tdc.Rd0000644000176200001440000000121014222504522014724 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tdc-utils.R \name{has_tdc} \alias{has_tdc} \title{Checks if data contains timd-dependent covariates} \usage{ has_tdc(data, id_var) } \arguments{ \item{data}{A data frame (potentially) containing time-dependent covariates.} \item{id_var}{A character indicating the grouping variable. For each covariate it will be checked if their values change within a group specified by \code{id_var}.} } \value{ Logical. \code{TRUE} if data contains time-dependent covariates, else \code{FALSE}. } \description{ Checks if data contains timd-dependent covariates } \keyword{internal} pammtools/man/rpexp.Rd0000644000176200001440000000131714241673657014506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rpexp.R \name{rpexp} \alias{rpexp} \title{Draw random numbers from piece-wise exponential distribution.} \usage{ rpexp(n = 1, rate = 1, t = 0) } \arguments{ \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{rate}{vector of rates.} \item{t}{vector of the same length as \code{rate}, giving the times at which the rate changes. The first element of \code{t} should be 0, and \code{t} should be in increasing order.} } \description{ This is a copy of the same function from \code{rpexp} from package \pkg{msm}. Copied here to reduce dependencies. } \keyword{internal} pammtools/man/add_hazard.Rd0000644000176200001440000000715714222504522015420 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{add_hazard} \alias{add_hazard} \alias{add_hazard.default} \alias{add_cumu_hazard} \title{Add predicted (cumulative) hazard to data set} \usage{ add_hazard(newdata, object, ...) \method{add_hazard}{default}( newdata, object, reference = NULL, type = c("response", "link"), ci = TRUE, se_mult = 2, ci_type = c("default", "delta", "sim"), overwrite = FALSE, time_var = NULL, ... ) add_cumu_hazard( newdata, object, ci = TRUE, se_mult = 2, overwrite = FALSE, time_var = NULL, interval_length = "intlen", ... ) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} \item{reference}{A data frame with number of rows equal to \code{nrow(newdata)} or one, or a named list with (partial) covariate specifications. See examples.} \item{type}{Either \code{"response"} or \code{"link"}. The former calculates hazard, the latter the log-hazard.} \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{se_mult}{Factor by which standard errors are multiplied for calculating the confidence intervals.} \item{ci_type}{The method by which standard errors/confidence intervals will be calculated. Default transforms the linear predictor at respective intervals. \code{"delta"} calculates CIs based on the standard error calculated by the Delta method. \code{"sim"} draws the property of interest from its posterior based on the normal distribution of the estimated coefficients. See \href{https://adibender.github.io/simpamm/confidence-intervals.html}{here} for details and empirical evaluation.} \item{overwrite}{Should hazard columns be overwritten if already present in the data set? Defaults to \code{FALSE}. If \code{TRUE}, columns with names \code{c("hazard", "se", "lower", "upper")} will be overwritten.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} \item{interval_length}{The variable in newdata containing the interval lengths. Can be either bare unquoted variable name or character. Defaults to \code{"intlen"}.} } \description{ Add (cumulative) hazard based on the provided data set and model. If \code{ci=TRUE} confidence intervals (CI) are also added. Their width can be controlled via the \code{se_mult} argument. The method by which the CI are calculated can be specified by \code{ci_type}. This is a wrapper around \code{\link[mgcv]{predict.gam}}. When \code{reference} is specified, the (log-)hazard ratio is calculated. } \examples{ ped <- tumor[1:50,] \%>\% as_ped(Surv(days, status)~ age) pam <- mgcv::gam(ped_status ~ s(tend)+age, data = ped, family=poisson(), offset=offset) ped_info(ped) \%>\% add_hazard(pam, type="link") ped_info(ped) \%>\% add_hazard(pam, type = "response") ped_info(ped) \%>\% add_cumu_hazard(pam) } \seealso{ \code{\link[mgcv]{predict.gam}}, \code{\link[pammtools]{add_surv_prob}} } pammtools/man/cumulative_coefficient.Rd0000644000176200001440000000302314222504522020037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cumulative-coefficient.R \name{get_cumu_coef} \alias{get_cumu_coef} \alias{get_cumu_coef.gam} \alias{get_cumu_coef.aalen} \alias{get_cumu_coef.cox.aalen} \title{Extract cumulative coefficients (cumulative hazard differences)} \usage{ get_cumu_coef(model, data = NULL, terms, ...) \method{get_cumu_coef}{gam}(model, data, terms, ...) \method{get_cumu_coef}{aalen}(model, data = NULL, terms, ci = TRUE, ...) \method{get_cumu_coef}{cox.aalen}(model, data = NULL, terms, ci = TRUE, ...) } \arguments{ \item{model}{Object from which to extract cumulative coefficients.} \item{data}{Additional data if necessary.} \item{terms}{A character vector of variables for which the cumulative coefficient should be calculated.} \item{...}{Further arguments passed to methods.} \item{ci}{Logical. Indicates if confidence intervals should be returned as well.} } \description{ These functions are designed to extract (or mimic) the cumulative coefficients usually used in additive hazards models (Aalen model) to depict (time-varying) covariate effects. For PAMMs, these are the differences between the cumulative hazard rates where all covariates except one have the identical values. For a numeric covariate of interest, this calculates \eqn{\Lambda(t|x+1) - \Lambda(t|x)}. For non-numeric covariates the cumulative hazard of the reference level is subtracted from the cumulative hazards evaluated at all non reference levels. Standard errors are calculated using the delta method. } pammtools/man/get_hazard.Rd0000644000176200001440000000447114222504522015443 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{get_hazard} \alias{get_hazard} \alias{get_hazard.default} \title{Calculate predicted hazard} \usage{ get_hazard(object, newdata, ...) \method{get_hazard}{default}( object, newdata, reference = NULL, ci = TRUE, type = c("response", "link"), ci_type = c("default", "delta", "sim"), time_var = NULL, se_mult = 2, ... ) } \arguments{ \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} \item{reference}{A data frame with number of rows equal to \code{nrow(newdata)} or one, or a named list with (partial) covariate specifications. See examples.} \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{type}{Either \code{"response"} or \code{"link"}. The former calculates hazard, the latter the log-hazard.} \item{ci_type}{The method by which standard errors/confidence intervals will be calculated. Default transforms the linear predictor at respective intervals. \code{"delta"} calculates CIs based on the standard error calculated by the Delta method. \code{"sim"} draws the property of interest from its posterior based on the normal distribution of the estimated coefficients. See \href{https://adibender.github.io/simpamm/confidence-intervals.html}{here} for details and empirical evaluation.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} \item{se_mult}{Factor by which standard errors are multiplied for calculating the confidence intervals.} } \description{ Calculate predicted hazard } \keyword{internal} pammtools/man/as_ped_cr.Rd0000644000176200001440000000621114452536066015261 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as-ped.R \name{as_ped_cr} \alias{as_ped_cr} \title{Competing risks trafo} \usage{ as_ped_cr( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, combine = TRUE, ... ) } \arguments{ \item{data}{Either an object inheriting from data frame or in case of time-dependent covariates a list of data frames (of length 2), where the first data frame contains the time-to-event information and static covariates while the second (and potentially further data frames) contain information on time-dependent covariates and the times at which they have been observed.} \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{cut}{Split points, used to partition the follow up into intervals. If unspecified, all unique event times will be used.} \item{max_time}{If \code{cut} is unspecified, this will be the last possible event time. All event times after \code{max_time} will be administratively censored at \code{max_time}.} \item{tdc_specials}{A character vector. Names of potential specials in \code{formula} for concurrent and or cumulative effects.} \item{censor_code}{Specifies the value of the status variable that indicates censoring. Often this will be \code{0}, which is the default.} \item{...}{Further arguments passed to the \code{data.frame} method and eventually to \code{\link[survival]{survSplit}}} } \value{ A data frame class \code{ped} in piece-wise exponential data format. } \description{ This is the general data transformation function provided by the \code{pammtools} package. Two main applications must be distinguished: \enumerate{ \item Transformation of standard time-to-event data. \item Transformation of left-truncated time-to-event data. \item Transformation of time-to-event data with time-dependent covariates (TDC). } For the latter, the type of effect one wants to estimate is also important for the data transformation step. In any case, the data transformation is specified by a two sided formula. In case of TDCs, the right-hand-side of the formula can contain formula specials \code{concurrent} and \code{cumulative}. See the \href{https://adibender.github.io/pammtools//articles/data-transformation.html}{data-transformation} vignette for details. } \examples{ tumor[1:3, ] tumor[1:3, ] \%>\% as_ped(Surv(days, status)~ age + sex, cut = c(0, 500, 1000)) tumor[1:3, ] \%>\% as_ped(Surv(days, status)~ age + sex) \dontrun{ data("cgd", package = "frailtyHL") cgd2 <- cgd \%>\% select(id, tstart, tstop, enum, status, age) \%>\% filter(enum \%in\% c(1:2)) ped_re <- as_ped_multistate( formula = Surv(tstart, tstop, status) ~ age + enum, data = cgd2, transition = "enum", timescale = "calendar") } } \keyword{internal} pammtools/man/nest_tdc.Rd0000644000176200001440000000217614222504522015136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nest-utils.R \name{nest_tdc} \alias{nest_tdc} \alias{nest_tdc.default} \alias{nest_tdc.list} \title{Create nested data frame from data with time-dependent covariates} \usage{ nest_tdc(data, formula, ...) \method{nest_tdc}{default}(data, formula, ...) \method{nest_tdc}{list}(data, formula, ...) } \arguments{ \item{data}{A suitable data structure (e.g. unnested data frame with concurrent TDCs or a list where each element is a data frame, potentially containing TDCs as specified in the RHS of \code{formula}). Only TDCs present in \code{formula} will be returned.} \item{formula}{A two sided formula with a two part RHS, where the second part indicates the structure of the TDC structure.} \item{...}{Further arguments passed to methods.} \item{vars}{A character vector of TDCs that will be nested.} \item{id}{A character giving the name of the ID column.} } \description{ Provides methods to nest data with time-dependent covariates (TDCs). A \code{formula} must be provided where the right hand side (RHS) contains the structure of the TDCs } \keyword{internal} pammtools/man/get_cut.Rd0000644000176200001440000000073314222504522014762 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get-cut-points.R \name{get_cut} \alias{get_cut} \alias{get_cut.default} \title{Obtain interval break points} \usage{ get_cut(data, formula, cut = NULL, ...) \method{get_cut}{default}(data, formula, cut = NULL, max_time = NULL, event = 1L, ...) } \description{ Default method words for data frames. The list method applies the default method to each data set within the list. } \keyword{internal} pammtools/man/tumor.Rd0000644000176200001440000000172014222504522014473 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{tumor} \alias{tumor} \title{Stomach area tumor data} \format{ An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 776 rows and 9 columns. } \usage{ tumor } \description{ Information on patients treated for a cancer disease located in the stomach area. The data set includes: \describe{ \item{days}{Time from operation until death in days.} \item{status}{Event indicator (0 = censored, 1 = death).} \item{age}{The subject's age.} \item{sex}{The subject's sex (male/female).} \item{charlson_score}{Charlson comorbidity score, 1-6.} \item{transfusion}{Has subject received transfusions (no/yes).} \item{complications}{Did major complications occur during operation (no/yes).} \item{metastases}{Did the tumor develop metastases? (no/yes).} \item{resection}{Was the operation accompanied by a major resection (no/yes).} } } \keyword{datasets} pammtools/man/calc_ci.Rd0000644000176200001440000000077614222504522014714 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{calc_ci} \alias{calc_ci} \title{Calculate confidence intervals} \usage{ calc_ci(ftab) } \arguments{ \item{ftab}{A table with two columns, containing coefficients in the first column and standard-errors in the second column.} } \description{ Given 2 column matrix or data frame, returns 3 column data.frame with coefficient estimate plus lower and upper borders of the 95\% confidence intervals. } \keyword{internal} pammtools/man/modus.Rd0000644000176200001440000000040513662013606014460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{modus} \alias{modus} \title{Calculate the modus} \usage{ modus(var) } \arguments{ \item{var}{A atomic vector} } \description{ Calculate the modus } \keyword{internal} pammtools/man/dplyr_verbs.Rd0000644000176200001440000001223214452536366015700 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidyverse-methods.R \name{dplyr_verbs} \alias{dplyr_verbs} \alias{arrange} \alias{filter} \alias{distinct} \alias{full_join} \alias{group_by} \alias{inner_join} \alias{left_join} \alias{mutate} \alias{rename} \alias{right_join} \alias{sample_frac} \alias{sample_n} \alias{select} \alias{slice} \alias{summarise} \alias{transmute} \alias{ungroup} \alias{arrange.ped} \alias{group_by.ped} \alias{ungroup.ped} \alias{distinct.ped} \alias{filter.ped} \alias{sample_n.ped} \alias{sample_frac.ped} \alias{slice.ped} \alias{select.ped} \alias{mutate.ped} \alias{rename.ped} \alias{summarise.ped} \alias{summarize.ped} \alias{transmute.ped} \alias{inner_join.ped} \alias{full_join.ped} \alias{left_join.ped} \alias{right_join.ped} \title{\code{dplyr} Verbs for \code{ped}-Objects} \usage{ \method{arrange}{ped}(.data, ...) \method{group_by}{ped}(.data, ..., .add = FALSE) \method{ungroup}{ped}(x, ...) \method{distinct}{ped}(.data, ..., .keep_all = FALSE) \method{filter}{ped}(.data, ...) \method{sample_n}{ped}(tbl, size, replace = FALSE, weight = NULL, .env = NULL, ...) \method{sample_frac}{ped}(tbl, size = 1, replace = FALSE, weight = NULL, .env = NULL, ...) \method{slice}{ped}(.data, ...) \method{select}{ped}(.data, ...) \method{mutate}{ped}(.data, ...) \method{rename}{ped}(.data, ...) \method{summarise}{ped}(.data, ...) \method{summarize}{ped}(.data, ...) \method{transmute}{ped}(.data, ...) \method{inner_join}{ped}(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) \method{full_join}{ped}(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) \method{left_join}{ped}(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) \method{right_join}{ped}(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) } \arguments{ \item{.data}{an object of class \code{ped}, see \code{\link{as_ped}}.} \item{...}{see \code{dplyr} documentation} \item{x}{an object of class \code{ped}, see \code{\link{as_ped}}.} \item{tbl}{an object of class \code{ped}, see \code{\link{as_ped}}.} \item{size}{<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> For \code{sample_n()}, the number of rows to select. For \code{sample_frac()}, the fraction of rows to select. If \code{tbl} is grouped, \code{size} applies to each group.} \item{replace}{Sample with or without replacement?} \item{weight}{<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Sampling weights. This must evaluate to a vector of non-negative numbers the same length as the input. Weights are automatically standardised to sum to 1.} \item{.env}{DEPRECATED.} \item{y}{A pair of data frames, data frame extensions (e.g. a tibble), or lazy data frames (e.g. from dbplyr or dtplyr). See \emph{Methods}, below, for more details.} \item{by}{A join specification created with \code{\link[dplyr:join_by]{join_by()}}, or a character vector of variables to join by. If \code{NULL}, the default, \verb{*_join()} will perform a natural join, using all variables in common across \code{x} and \code{y}. A message lists the variables so that you can check they're correct; suppress the message by supplying \code{by} explicitly. To join on different variables between \code{x} and \code{y}, use a \code{\link[dplyr:join_by]{join_by()}} specification. For example, \code{join_by(a == b)} will match \code{x$a} to \code{y$b}. To join by multiple variables, use a \code{\link[dplyr:join_by]{join_by()}} specification with multiple expressions. For example, \code{join_by(a == b, c == d)} will match \code{x$a} to \code{y$b} and \code{x$c} to \code{y$d}. If the column names are the same between \code{x} and \code{y}, you can shorten this by listing only the variable names, like \code{join_by(a, c)}. \code{\link[dplyr:join_by]{join_by()}} can also be used to perform inequality, rolling, and overlap joins. See the documentation at \link[dplyr:join_by]{?join_by} for details on these types of joins. For simple equality joins, you can alternatively specify a character vector of variable names to join by. For example, \code{by = c("a", "b")} joins \code{x$a} to \code{y$a} and \code{x$b} to \code{y$b}. If variable names differ between \code{x} and \code{y}, use a named character vector like \code{by = c("x_a" = "y_a", "x_b" = "y_b")}. To perform a cross-join, generating all combinations of \code{x} and \code{y}, see \code{\link[dplyr:cross_join]{cross_join()}}.} \item{copy}{If \code{x} and \code{y} are not from the same data source, and \code{copy} is \code{TRUE}, then \code{y} will be copied into the same src as \code{x}. This allows you to join tables across srcs, but it is a potentially expensive operation so you must opt into it.} \item{suffix}{If there are non-joined duplicate variables in \code{x} and \code{y}, these suffixes will be added to the output to disambiguate them. Should be a character vector of length 2.} \item{funs}{see \code{\link[dplyr]{summarize_all}}} \item{.dots}{see \code{dplyr} documentation} \item{keep_attributes}{conserve attributes? defaults to \code{TRUE}} } \value{ a modified \code{ped} object (except for \code{do}) } \description{ See \code{dplyr} documentation of the respective functions for description and examples. } \keyword{internal} pammtools/man/get_terms.Rd0000644000176200001440000000207614222504522015323 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get-terms.R \name{get_terms} \alias{get_terms} \title{Extract the partial effects of non-linear model terms} \usage{ get_terms(data, fit, terms, ...) } \arguments{ \item{data}{A data frame containing variables used to fit the model. Only first row will be used.} \item{fit}{A fitted object of class \code{\link[mgcv]{gam}}.} \item{terms}{A character vector (can be length one). Specifies the terms for which partial effects will be returned} \item{...}{Further arguments passed to \code{\link{seq_range}}.} } \value{ A tibble with 5 columns. } \description{ This function basically creates a new \code{df} from \code{data} for each term in \code{terms}, creating a range from minimum and maximum of the \code{predict(fit, newdata=df, type="terms")}. Terms are then stacked to a tidy data frame. } \examples{ library(survival) fit <- coxph(Surv(time, status) ~ pspline(karno) + pspline(age), data=veteran) terms_df <- veteran \%>\% get_terms(fit, terms = c("karno", "age")) head(terms_df) tail(terms_df) } pammtools/man/prep_concurrent.Rd0000644000176200001440000000104713662013606016544 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-specials.R \name{prep_concurrent} \alias{prep_concurrent} \alias{prep_concurrent.list} \title{Extract information on concurrent effects} \usage{ prep_concurrent(x, formula, ...) \method{prep_concurrent}{list}(x, formula, ...) } \arguments{ \item{x}{A suitable object from which variables contained in \code{formula} can be extracted.} \item{...}{Further arguments passed to methods.} } \description{ Extract information on concurrent effects } \keyword{internal} pammtools/man/gg_re.Rd0000644000176200001440000000141314222504522014407 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience-plots.R \name{gg_re} \alias{gg_re} \title{Plot Normal QQ plots for random effects} \usage{ gg_re(x, ...) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Plot Normal QQ plots for random effects } \examples{ library(pammtools) data("patient") ped <- patient \%>\% dplyr::slice(1:100) \%>\% as_ped(Surv(Survdays, PatientDied)~ ApacheIIScore + CombinedicuID, id="CombinedID") pam <- mgcv::gam(ped_status ~ s(tend) + ApacheIIScore + s(CombinedicuID, bs="re"), data=ped, family=poisson(), offset=offset) gg_re(pam) plot(pam, select = 2) } \seealso{ \code{\link{tidy_re}} } pammtools/man/tidy_smooth.Rd0000644000176200001440000000102613662013606015673 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{tidy_re} \alias{tidy_re} \title{Extract random effects in tidy data format.} \usage{ tidy_re(x, keep = c("fit", "main", "xlab", "ylab"), ...) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{keep}{A vector of variables to keep.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Extract random effects in tidy data format. } \seealso{ \code{\link[stats]{qqline}} } pammtools/man/add_surv_prob.Rd0000644000176200001440000000453614222504522016166 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{add_surv_prob} \alias{add_surv_prob} \title{Add survival probability estimates} \usage{ add_surv_prob( newdata, object, ci = TRUE, se_mult = 2, overwrite = FALSE, time_var = NULL, interval_length = "intlen", ... ) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{se_mult}{Factor by which standard errors are multiplied for calculating the confidence intervals.} \item{overwrite}{Should hazard columns be overwritten if already present in the data set? Defaults to \code{FALSE}. If \code{TRUE}, columns with names \code{c("hazard", "se", "lower", "upper")} will be overwritten.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} \item{interval_length}{The variable in newdata containing the interval lengths. Can be either bare unquoted variable name or character. Defaults to \code{"intlen"}.} \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} } \description{ Given suitable data (i.e. data with all columns used for estimation of the model), this functions adds a column \code{surv_prob} containing survival probabilities for the specified covariate and follow-up information (and CIs \code{surv_lower}, \code{surv_upper} if \code{ci=TRUE}). } \examples{ ped <- tumor[1:50,] \%>\% as_ped(Surv(days, status)~ age) pam <- mgcv::gam(ped_status ~ s(tend)+age, data=ped, family=poisson(), offset=offset) ped_info(ped) \%>\% add_surv_prob(pam, ci=TRUE) } \seealso{ \code{\link[mgcv]{predict.gam}}, \code{\link[pammtools]{add_surv_prob}} } pammtools/man/get_cumulative.Rd0000644000176200001440000000157713662013606016361 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-specials.R \name{get_cumulative} \alias{get_cumulative} \alias{expand_cumulative} \title{Expand time-dependent covariates to functionals} \usage{ get_cumulative(data, formula) expand_cumulative(data, func, n_func) } \arguments{ \item{data}{Data frame (or similar) in which variables specified in ... will be looked for} \item{formula}{A formula containing \code{cumulative} specials, that specify the type of cumulative effect one wants to estimate. For details see the vignettes on data transformation and time-dependent covariates.} \item{func}{Single evaluated \code{\link{cumulative}} term.} } \description{ Given formula specification on how time-dependent covariates affect the outcome, creates respective functional covariate as well as auxiliary matrices for time/latency etc. } \keyword{internal} pammtools/man/gg_laglead.Rd0000644000176200001440000000264714222504522015404 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lag-lead-utils.R \name{gg_laglead} \alias{gg_laglead} \alias{gg_laglead.default} \alias{gg_laglead.LL_df} \alias{gg_laglead.nested_fdf} \title{Plot Lag-Lead windows} \usage{ gg_laglead(x, ...) \method{gg_laglead}{default}(x, tz, ll_fun, ...) \method{gg_laglead}{LL_df}( x, high_col = "grey20", low_col = "whitesmoke", grid_col = "lightgrey", ... ) \method{gg_laglead}{nested_fdf}(x, ...) } \arguments{ \item{x}{Either a numeric vector of follow-up cut points or a suitable object.} \item{...}{Further arguments passed to methods.} \item{tz}{A vector of exposure times} \item{ll_fun}{Function that specifies how the lag-lead matrix should be constructed. First argument is the follow up time second argument is the time of exposure.} \item{high_col}{Color used to highlight exposure times within the lag-lead window.} \item{low_col}{Color of exposure times outside the lag-lead window.} \item{grid_col}{Color of grid lines.} } \description{ Given data defining a Lag-lead window, returns respective plot as a \code{ggplot2} object. } \examples{ ## Example 1: supply t, tz, ll_fun directly gg_laglead(1:10, tz=-5:5, ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) ## Example 2: extract information on t, tz, ll_from data with respective attributes data("simdf_elra", package = "pammtools") gg_laglead(simdf_elra) } \seealso{ get_laglead } pammtools/man/get_sim_ci.Rd0000644000176200001440000000052414222504522015430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{get_sim_ci} \alias{get_sim_ci} \title{Calculate simulation based confidence intervals} \usage{ get_sim_ci(newdata, object, alpha = 0.05, nsim = 100L, ...) } \description{ Calculate simulation based confidence intervals } \keyword{internal} pammtools/man/get_event_types.Rd0000644000176200001440000000257614222504522016543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as-ped.R \name{get_event_types} \alias{get_event_types} \title{Exctract event types} \usage{ get_event_types(data, formula, censor_code) } \arguments{ \item{data}{Either an object inheriting from data frame or in case of time-dependent covariates a list of data frames (of length 2), where the first data frame contains the time-to-event information and static covariates while the second (and potentially further data frames) contain information on time-dependent covariates and the times at which they have been observed.} \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{censor_code}{Specifies the value of the status variable that indicates censoring. Often this will be \code{0}, which is the default.} } \description{ Given a formula that specifies the status variable of the outcome, this function extracts the different event types (except for censoring, specified by \code{censor_code}). } \keyword{internal} pammtools/man/figures/0000755000176200001440000000000014222504522014502 5ustar liggesuserspammtools/man/figures/logo.png0000644000176200001440000013655214222504522016164 0ustar liggesusersPNG  IHDRX.' pHYs.#.#x?v IDATxyxSU?o4mڦ+JK٩"*0"WaWečqAgՑ: (JJ)KZJ tidd'<<MNMss~$It%ȳ $Izu!"b0 ` $6+@DP(.+ C(y QbQ9ثQ0WՅ{ B sB+DDއ=D~NP4Ј(cR(! ~. <">1 CN\2pF$I]BD> D~DP5ل^J  ^_]=D1 q^vPn$)ӕ "ǀGDvXlFi]CC?7իW"`@D$K9sr/O>Z$I6y"/&2h4 ``ta̟??2D J{/^{5š $yE003dSE D.&`„ X|9h7Z}]V.,"b0 r %%/f͚iae"H !u]xWѽ{w`\;'xpe Dl0l0,[ F ;XBdSF$ "Abb"-Z7P*v?ӧ/?V&4"y $*j" : 222kaʔ)Bʳ$ѥK+w&r QQQxg0w\)Ӛ@ F{nae)$_}UtMH`\;[/FQQ2,Ə˗cРAʴWF{BwfHP_|3g4ŋx3L PDOZ]] ۿ?͛ )M$ C~WУG!e`Й`~ŋʕ+BdSIPpua=zDa0󟹼3 o  Xx! mǎҥK_)&3;C~j 577CcΝXd ?.\6 ȯ SLѫW/!m`\[O>+ !%}DNP(BBA>}~z]'B9NR׿5rss1g!aJǀ|}QT*!e { lkc\xɓ'1|lݺUkI%_IT*fϞ\̝;Bɗ 6`͚5Bz{@|FoDNNV\] )|ԩSqA,[L`SU 3D5Xz5+_@_j̝;ٳܞp@c jX#""Ogqr1Yc`1|[ DO-. 9 ,h"\pAH}7J. >۶mCvv_r=B3fXhExyyQ 99VŽ;0b5@ŋ#773gΔ]'"ɰ0ƍѯ_?q"yn޽{|r!M7݄,[ c8 wb0 222cƍP3" ܹs/dygpFK.7oΝ`?c`X~Rnrp"lT*xٳ<C&7dl޼k֬AϞ=er0 &`ҥV>h4l-`mcVSS}]]q=$v޽zj|\FSO=Çc̙?@"1,"p,ZÌ3Ռ%&&";;;wȑ#eǀ@"0SD4@ 3gD^^/^Ѯ]"O6lmۆUV!>>^vy $9D!:BYf!//O= g1D42IIIXjS"ŋŀ@] dF%44>(z)!KiR k`_LXhe`@:˸[kGB# 4k֬ / 'k ,`x1vXg0Nr$ARYt:S p3iVWW㭷EvyljGD … ^Yuj8e6oHB,X7o]O/J\=.*ַo_Yeqp"u`@–C޳g/_.dY"310 AFF>3!g.D8.L1 _(,]ɓՌ˗c׮]5 p1@Tb̙SO=ŻLVVlق5k 55UVY=L Bė{„ Avv6Ԋ\eԩ駟tR€X /tJJ VZoT3"r5·3??%r9\̚5KP͈ݒ;v`Ĉbc0C" f̘~!ÇcBwf@_ ~DuСغu+V^ݻ y;. rJܹFT3"Vߏ3f*2j̙3ٳQݻ7V^  UQGN2eDDoݻ?]vUoc01"pذa֮]^z :J? J%<`#Dh,[ ǤIՌMtt4^}U߿z{{ |gϞ\.LDvӧ֭[5k --MVY "tMʕ+e_7$4uT[ X3""ߧV1I,<<-ݻqwILLʕ+sN5JVY a0xb.LD䀡Cb֭Xz5wt9=r?HÇǶmېx5#" ub@deea˖-Xv-RSS֌l1.z j ` 7`ٲeصkƎ+fDD䨨(,]ɓ. D,sZg}&{y@ o&ݻ˗/GddH块-[&p@n 7"33S`͈U1w\Z9z>cvtnX+""r,_=ƍt9611ʕÐ!CyfY={t~ n ;;kFDD`ԩBp7@rȫW_ yFcv̙N?/ߏ3fy$dggݏ7`NO?Bhպȷ8|9}Mʛ^d0̆ 吉Bۦݍ> 3g?:r|9 \0Ν;1rH"""s1[.WB$ӕ7Wǂ ,^5""@;a`N+--Y郼K/% h47oP@DD>-11M6aN . rC;{n̟?pUUUyݡC⫯;#8wvu@ T8++ _}>)J{8p͛@zWac T.11 .<S+N`RJֱ>`0f3(gTT~q{gP(\_w8R\ U*MCz:Tӧ/c$#@VC^ i=yCpmޕ`^kk+$IBPPDtBכ 悁B0?;vyp1$YrC>x /_0}`Xfӽ@N/-܂}ɺ^BDD7SNŁK/9=.c` 'C^~=TDD- P<38|0f͚1ڞc`l /ݻws9d""2d@`UVaǎ1bSeX J{hMPPxݻ?8'!""<݃ 5|pl߾HJJr s} 7nn݊+V 66֩2(+ [oAG 3gÇ1|:]@УG^7o*{Exx8^xGdd$RRR0h L<uu/KqDEEav??$wv?766.]BkkjErÁqyd-LDDnAp-""IH""i==DDDd`@DDD& D.*4/p  ] .t5`@"[lC=^ٍ~G̚5a0 ø9HL>y C}}20g̚5 Ν;z | >FRaĈx1c vQ\\Zφu]8z:tVSƱc%5** #Fѣq뭷sCM݋#틟~Fu}||<Μ9cyxGPPPP%1go]}j˞뮻oaڴiu[%}ύn61^{ BBBp}aڴiB׮]Q[[|裏pquzV۩@!Iz;Æ CϞ=08|06l؀/j{Ukc 'Ovz-R1cƠo߾Dii)n݊25fvz\PPƌ~AբĮCٱXkk?郈\t ۷oy_~eyg^޽;f͚e^>@Vq]waƌ8pҥK8z(֭[u֙3}tZ_1-nWT4i9ݺuCtt4QWWSN!??999رcʜl2ƍltct:^u{"##QPPVk?uQٳgnWT8qme-3k,Ktx]yXlYAv:*x ߹s'nkVtA:b ܻF]vu6olΉA!;;ZΉ' sz 2|z >,~m_`v%INcwc0߃['86mKv}0|_W됚ė_~u:vp]Q(xj*jT*,Y.'lvJ?{j~ݯg{s…;wez~a0`333~z_Zoŋuz6mr{D~Wȑ#6n ΆZ9K,z~(jvի~_Yܾk./{6w7W^%KX=:r2()_|aq[pp0V^m{c5kX@O~'gVê-XJ{\\_~YD-L0AGiv([5ŋCEE؈ ** VI -8 Z5C q={Z&IvlN\V=&Mݶ~y>U/W?VQQo ,? Fz[):;E`cǎ^1 :غ#[:T9rY;3keX{mo+z /__N}[.%]ڙ]yy]6l0}tP8~ Ys='PS .Xx Kk{k\!** 6W|r ~m?Ē%K YŬ-kw3 \zծr0c ̚5 EEE*΄([8;:I; Y*Y&SFUUmZ/W2e >S:{,|M7_=}ZNp,[_F{&=ɓyfQr O_v@OYk߭:S{k\iԩ8pN;A޽/;'D0#lu`=܃ÇKhT}A\2E\ V$IhhhzZݱfl۶ vi \{7رc.cN$Ok+WbϞ=V3tPkغu+Ν; ֢G͛rJN$(|[g^[.FGb |=y$n6ZW-֮gFGG[M`qVO<F8"((ݻ7ƍӧ"GY[pKYlZqZ/{XkOǎæM0{l˚C1|"X@$%%sY< zO4#MUQQ!|kgZ/OQ(?~[zK&77l1fw$$'N:ٍ7h&/rf7"g8{믷m۶mVzyB{b]by}0xw-nKII>r'^"ކnqӧeYwȑ>W/{3HfqWG?׿2-MS!v*3** ƍ]-omC:[edQ yjZnUym0z*fϞm1k4̞=f9ݺu-''udm?ةeϜ9>_v Vѻwom֦\6- ] DDDz DD[^ DDN /-_> y Qsd@밷3 %x$M D2<_}[9 |&|G|eqD-,im@_C93 gѓ{/Cn g-<I` kj5 []^BMM a0V(z @UUUh4m5>Q[盽AX03gn_[yy.M&w'$K (z#6=ݯ/[= e2= >}x"o`@X;pxK#io@ZA\dO*-> 6Hs7W$/_Ŀy#r+WvE;S7W7o+.~,cow6#{+**]%,՟Ƿo7 ȓc@^SgO\Ӗ3v[R}}}{ y? 6{3yoEݥC $D4vd|7>ޏލH_ ^?k|F! @ K`@b55@~ |yۿ?P(vsN3Jm׷)Irss>6$$0\dSVVH>k`Qg{kN &&CG< EEE;V1bl{U^_bرƍMdsr OygϢBCCđJE\k){߾}z-7..7x#áVn{qq]E Q|rqJ}dɓ'q1H 4HHfҥ Mm6 }1 immqttWtj@hh(ƍn~^p9r|ax'={{p5|t:N8DEE^PPݻK._<88F`Ν;ۅNB9s0`>b.\хCEkwK.!!$-cԄnk]t ۷oѣjeht<9Ot8ydvx5|>y.]괽e@P`Сݻwoz|_cPQQ%Irj.B=zɓOReM9HOOץ9\-00V\Hq]ϔB`/9ӝ.qn'>`oyt 8Pkֶ?(jp m߾}qNbv*Z(NTKbbb`@AA81b=T*UOԳ3rNRDFF_#F> %8UWWjŶCg,>h]QQQ8[}˗/ŋLOOGyy9*++hjq ;<466̙3(**B>}x3{>?x'w;>55*~ IDATg/ʽP ^ҮLK vTPPf$''wfl+**PSSFvu$ 0+RtiJ%:4iDDs ( $%%AɓfCNCAA&K-))Ass3|+k[O[XWnݺyV` Ƭ /^JBϞ=T9Btm\wXEEN>ީYʚڅ8Ng8 oԱxY(PIN &~o3=`ev?766B$X9B\pqqqp;Xw%4GQ ++ v#~FAAAHNN4co/.RWWw/_"JJJ:}ΞQ:>t|CW: Ɉ8hN%lqK]P@Vh1Ȥ!%%CEBBͺ9[I$uha,"Q[[kw_"u>c $I8z(N> ZÇ[nASSN>f!22111h4fΝCTT4 ڊHg$u8ß\RB@~PYY'O SJJJPQQH466=GS*jf3 ArrYV#-- ]vE~~\V3jjTVVݻFWRR:hZsw$I3Bw>1$I(((4ZDDzJyOD: ڊ#G=ȷXl@,oiiiwOqT{kߥKqqqHJJBVVzaq1x`$&&"22}EDD v튄={"11޽ǛuxVsgaÆ ðaÐeuD=l 9r%%%B{}\Z0hiiA~~_]Rxoiim9uuu.M555ѣ8s``㾺qqqӧOckkkq dddRh(hHNNZWHHJ AP__˗/ʕ+h!8¸*a]]~Q[[k/<<f'Ljnn6}JKKѣG$&&zŝ ---z"##QW~i▖=z`(s⚛QYY[X!knng:)---m%&&k׮Luuuؾ}{ze!==򺝇XNxΦBBB*gϞtjP(jj`dn111.|) dff z111 BFFΞ=k`0ܹs(//G޽CA$sc%RSSi0jKK |2QG3`LeeeOqK򸂶ll\;o?88`>o{_xx8ZW}BS peСHII-]__|^szj[jdffv\Üg w8/.҅ :U* Ii \kڞ]{Y;^W2djjjT*QWWzHdS/B _u* =z@}}[:u:jjj<"I?2BTT1Zn73Zg&%̽C ٳg~p!ݺus_]]NOOOE``q+p\ yVN kڄ[#_ 9CmmGAyy;a>AEEEP(ٳg9ơHNN Gcu744GYYKmzOFYYYWG-K 0l+666 1l0h4!--=ǿVEϞ=;u!333fG Tue:ֹ\܌s`0@̙3 (//Gqq1=m X0759;N?߽MP(Lqqq,諭ÇQRRU ˨w (Z-{ 58Bp:TTT ,, Z4ADDkvիh4 %$$ 22ϟGmm-J%z`Eddd[ EUWWY H;֫:h=$ Z_8ri Ov$ٳª*jcf#33JEEEB V2]хwj RD>}>)) ---6T*P$9T1y7RX-۟ڞK . 55m[`2B}UUۇ0DDD@՚Q[[kv 㭏tk8hmmE]]*Z=K⚚ulo-ޘDFFܹs#FdaṲ/^>}¡RnÎ-A.RD!--4~%66Ӂŋjn`q܃KF466mЁkb= .\~QkpQz' i{eqN•޾VA[*41qVoGss3N:)))5#|z^75jb ǐ!C0x`dRDݑh D#K!!!f:{\WWgs=jkmVRRb$̙38s)t邬,$%%Ԅ .^Iڍ)FHHI*Iv{ԅn79;]h4HMM^2v::\/]3pI6p X'.]B~~~;V̙`\xݳgO 4\08|S+;wS 4+.Zr1߿@C+!!z2}@J]\ATv5hiiw[xf< 2٘b dC {B $&0ąB] au e1X m 6S5.C#YkF53v Ԅ@Ζ}٘>}:a6Dz}N{mALԡcΜ91'#͆;wXP!WDӝtF#^áCR.ӵEq)8{zՄ(8ydL0Tu; HDj QV&aX.~8p ׃aۍ%[3 VYf5z{nL6 Z6fS&+B&A à 8|pLMwwwrL.shllLZ!A?($ }lAb%FNj$ ,O 8tۗUرc2WzcǎLgggd 06mZ^(b߾}ho]Nra\ZO0{5_{z,FEb„ $ B&}Xl6lݺmmmaШ~YqQ:t(LJ긫 8FӧOrƞ={rS!h4 cƌIj5t "!wB] P՘6mNe98,KO ׋۷FY L&f̘A~#(I%;@Etvvb8pcFka׮]AUa'Tמ*H7qvܙulEzV3U՘5kVˁa(Wn&QF>TVV%n'O!ݕnq?1]ǏŅ(% P 2@ Yn%EA*#2~X*.Wq xKGee%; n7ك(3Vú  VٳgСCQq&L<)2- C$$ @()$ I.*j7$< WͱP T 5BSs̨V&)Rs[1͘7ov|(WWW8SLAGGGT7FYtk1sgYSN>مpb(t8~{,f ZDApg!Ed}| #2 '+J0%I2 K3iШ8T,4*ЪTJY;466b޽Qϛ78uT̃tͥ!ZFss3z{{|ڭAcc#,ىcǦfnyF78L8555h`,)hal0!JS8"> OKZW`H( +FZ2 Zej-_.rX,0Ӵ^VرcQWW;v,j% Wf3gѣGu$L ̙3;GEQ000, VlY@>FbJYYYTl d ZtuuEY jkkCW[+6Hdx%:  AA>_0aA0aŀnaϞ=Qt6mV+cv/̇ 2n6%L/$2CӁgJ`ؿ?DQ`3rIA0SkGV䂂 O`KLYQ`u p `yPq,T xa?8vh@5Xg`Te5e2i(AVYDI' x '~7Cp ܂ 2ϲ0hXt*Z& jXZ---lAkA$& QV0 ƌJ[M&SQ<V詯ɓS"1򱈈_>GEÇ1uԬBxܐpE!vN10 ǃb eE]2 4<;|s4*Z *Aق1,X/Dor!dx #'22+E ¤a - # y>ib}}}04<':/f9jΝ;1s̄YHD #6 ` u{twwqDLvb0pԩA|XH']81 e7-+PXy٦&^txeg5𠱲]x(ؙiWGt UFՓgY5,:C2_Q 0 ay`'A^ G]!) nV Y&^Y*S Z6`0ed{˅GHU8_uNƌ0zWWWQ @79sdK#VgR6vX1XNDMyf0E7[Ӈ.W0,z<4;O;YâWìa֫<Ə?nĉa4qСhww7cN:a1L av 2pj vD ,Ua + vw8pCԕk`,jQ0yܖ*#%AbYԘ4cRʠ)4"5C> lv[#!Wet|+a`rB0 Buu5Aya 8blZӥ6@A Xւ0iף% Jѕf=7,0΀ ş0U* Oqˉ>~heQWAE2d-G32QHItCbZ&YQ?_T*BҠAKlf eĜ9s&Hah6QtdnEeee)Ҋ Ā:cMt*s7D IDAT(+̟TvapIBb֬Yt+EQߏCT*UT-r Avw0TWX&X0 Qˣa(~O'UGJ˲$tHU, 25*(^Pc!0 8 ܹ3lu\hoosDʇV OVr$䩪 uuu {JVL6-c dEdIc PqqUv|n/(aZ-ADT*UL+) |QePc=* !b?(CV/c982* XROxǔ)SgϞ'`6a6TcY F)xaˍPUUa0ydر#L8v_ qQ1 e+͇6wt*-M&Kx9'OFZ+"UA^l1Sieh%棻]0DJYy5Ԕ`%?$R--) zzv9 * e*U0رcaZ@X힇b$Ix<~pp0J0Eft1%p_9JTVVϱ\ )bƩUhi*CI G2lŝ ,rb6z6UpJ͜D].x>'C,~e*XT8uxEX"^Ԅ(+mv2UF iPwh'FYHX ӧ}ߩwT QU'#q8p8hkk^GEEEدbmmCLa`JgԔAȔa{Xք32~vuأ gNơ"9'lW&bqk>M!+$8q_$l>QЩ8TPmRʠ&FuRF`ԩ8x`TXJ*ZbG,G.//;O>ji rE=Zóhi2jŨ 0z*cǎ )E-Jyg+~.eS|. N,̴jjf +c w9S nABǀEϡ(a61g߿?nRr8L4 {?ϴy$I`쪫Z(񔨅50Cχl6Xִc( rP3f{A&Ө" z{{LTj ,C088^_:ܹ3x<Lg<9i)( gQX&Ix8F*^䪹P+{Z txW  Ϣެ  ).בoX KNt3gWTT:l/"9iӦ5(r,ˢF ,ˢ555P6 }}}Z)X02j#`QS)c%JİY "UUU3gDgg'79`u b$s\5:I:Q>lv'$_E:_'[*EOB݇^^ev9bYTxT6υ>}:ۃό]}p„ s)?iPڂ="dYFOOzzzPSS1cƤd E1lȊ0 Q^^Ut-+!'sAÇc׽Qs~Au)$!$V;DHO 3A@KbҤI_Aw<o($)lcҋ )m/IRVHv{5=TV9U"KD`oL#cŢHw\ h8ެFY E)ĮШ L<$ '%jUe~kBE_2EYjqQHPSS>}0 Ə:Gh4>uT h PLިX,)sgTNz񠿿VvaP  =n|۵mhi2Ab>~Y.2ѕ|L/r0pjrE},xȊmm6x#,j2gO6ޗq kUj0R'Ttz}@`H]ez,$IRX+JB Pz`F[DP$vy%{%{(rJ4PQQNJފ*8[f3,KXFgg'~jرcII0+BUUUL+BoooXl6'El{ZZ3 ~(c 'zB`|Sk`{*_t̄A"TE`Y69zL&p]jz 'l jʣ Ya0t:d<=rH2=V  ϲlJ*g6T9T@ HPZ>d* z$ z82*SQ(FV[)X7n&'O:9N9r$x\""x<@gg', jjj`2 "l6[X)BU:BY]vw;F|*ŜzjL"C=)=0 k#qAsZa'0L!e908s&b5t}9ZeC8.:GŤpEqZ,B`Q'!gb* t9t9^͡ʠUѻb!V,HQttt`ԩQ{^_EQk-٢( qDMfy^W%J.3F)-+!ɔ ]ǣ Qe(젖t(Q.5ҡX2rV͡Cc [D݇?>!P.~7`\ǡڨA9a}}}*"5jL>Aa7nՅzQY<ϧ%K(cg-ا$:k^%1a,pf#-ZeP1"H:*}VqPQ1iȚc3CZT1,b„ 1k̝; K8l8ӧOZ81 x}-&0(IȌ|]1 j*V:t;)Ĉ: U ԣR2***`6\bn}}}ġCIa0uTŮ@bܸqhjjB?c0cn;aY2ߤ1ol~u\.W/&ލ/) vuD?g5@H]P%hU,,Z4XP`0IO2QaPe&L;‚ec08qbTD0 lhxӃT9^_tL%;;lH*tUOTȻ0U%Kt#vE~ըɇdAMGm7:u*.PQV|굨T*̘1cXZC+F{'CSMQ&9^0 qtԬ#kkNm̄\ME%ѐP2G3+נ;v$:zޔ-H9¬YTԨłAGV-(be1} S@{'(e%,Qä`ҩ`ҪIT |8nb֩XEEK G"LIkM)H&"+Góa鑲`%`Uw)H`d^kxF-/NZuA=sEG1BݗTq f-Uja֫8ȋ0 {ja4pW@űP<}DA]H &5*ԘX(pyA#T9(ߒPWs(pЩY G+pz%8RJ8q:[Pqq]JFD^X'AH6 jPIvqpx%U%`#/|sDMóЩXhT,4*ZghU! $Y#}pze8\>9PXr-*5(.?yr+4X3jcq"Q0? (a1HNZP@bq&hɫ1es%ؽln)f;tߕN0 .x3`0!L0z?3XˊQ>d@eH2 J | QVe(C2Y 25O5(6a5h?x5~LD('U=`@'*.ITO+ пl- 0(Ss 2 p ]DaQQ@^ۀe4UF'dz\Y FZ5C1\0Ȋ .OGdx%^! -?_ejZ5KEJ A bN£(89. ju1S|Z@CRHB!4 _bwQe{ I <P,G cRx|@@T(P#(ɈtB ~.29*9@1P *|@yU\btg,=苪]Xm- Q@dKXA>x6$ F'9۷f`bP$ \C #ԩSQdbPqR1B#.( b a9R0ԇ&kQ{;8 pɹ0 {=f}h,`0 Q@&OH ȹ-?2PV5ZPeԌ#襪*\77„H`2(P 9 uh}WF kA@ rJP%*+a@.ƚuQh @HS}'|V2_, 3VPrl1H>l'u!b4j YNrj1aZ*U{uF ɷATډZ*EQC@tB:B $btSa_p4YeԂc6AF~0>vl q@ EQR=2Q \z4G0(rh2`LA:)% dbzf``BZvjUL:U[b ( "r& B ˞~IϨHHDL\!x$`C*)jWQrHф[&tL J SPb 5klvE%O.B`(Ƈ 2/EPaPٚ  vb{ P /f< JY B-;e |Xm҆">![YE΃ 0Sj71%,KVc&) C\ S FVCFz G |bXh`ZOǢ)l6jDJ,S8ԔQ[N€ rE>Ȋ@K΄8e27Fԙ`?Ad`AGNep+C *r W$btGt$ca)ӠBSA $btt, àB€ JPDi&J0(׫SsAtɉ0zE1b`h(ebuq @58ɉ0CiJBYSA !T8 "'@+03S|A&YHDʇ^6:0 Z&*QA+p%jyy <$R'^Bd R$#wY LŠL (rO-G0H 1+Qm$a@T*-&b 0ȉ0h= ℺\ MDjv$pɉ08iuykODi*"H DRV(p\qc.S3/\BvWHwz#})aZ/tup8 J]ԑ$ $@ޞA_n yo =e#%ڪ8ހ8 䤎}z)pI}$ AɱX,aqo AD0 * j( ' DA"%a0 juܿ|1ǁȝ Q!2XdI!2DQ {/7ylJ +555qfg<,P%azFAQȲ tz, a*)$) hҋ,ːe 5C(' V삼&r(-miP"*߄NztbK0 ^lo !"IAAh2`4=,50 hɦ@Lˋ IDAT&ؐ0 'B'@q# C€ # db= ADC€ $q/HD8$ (X2pqĔ%q@!a@DA8? 'yᇄAO6ΜPy㓗l'q@$ (7ޒT 8 F;$ u9"IA ٺBIKL& DC€ "H,BE b@€  bA€ # [B*Qʐ0 (ɵK!tDA€ % @NHQ[RAEp,耄A%pQڐ0 dNq AYB(%HAD)@€ "ǐ@ AyQ0 ȐTI ?;y7!" T%AD*vD|b׿_=fϞ7. "ʰgy_}fϞn>p "d[;! H$B^ Z[[v1ydhiio~| (!@yO<Əsb;000V ϗ[ۛ$(8H #I^Akk+N?o^hnn?| p`޽ {ʕxp|&ADAC " @e^_~9~/ j뮻. ~;qg W_}5>{6m’%KOOSNaϞ=8d 1$ <n) :w\,O?4ƍapmaܸqeK,>9s'c=cǎm^/8Ɗ+`;i&ADM$te ֮]g8 /@̟?? aێ;֭͛Q^^lGj,Y`p!;vlg~`pp?Z-~cܹ+`Z M@ t7 ꫯ&\CQdao8s_2* : wqGVZ@ݻ/;<,Z`0puto>_sftAb Aat( ^{5̝;k׮x`U/z>iO'_~W_}5.'|O>։sbĉqn7ZZZ`Zk.dt#(8f E1օN8n(FC>8xްejsBI?EQ6mk`-Xmۆ+V/xH@+!S V^ Kuo_=ns\|Ÿ/:::˅*zXfMbY 3\Rdk16[ivI?upd4F2@l}]̟?zka駟EQb hZ<,?=6vmwX rEY pr[~muY7Q8?<ODh4kJ%Kؾ};nV7oo}[pwyo8 rI!ȵ=_5,^Ght)+fc=ZXr%ϟ?|GÊ+_bxǺu¶lELA 0RհNأEX'2!U23K X"$4 SL fΝذa˱l2Hŋ㮻 3g^{5̟? DB%W L( FcN裏p9[o85<$+!AêUpM7%npM7w矏K.}}}xgr/bѢE{n:,Z0 q磩i0QVBr(+!=(+!9ÙId)$`sG`Y LŜJJL8>{7nrANAp,o|#vy`)I&W\.L6 (B?9nvzs=AI:s&0H abx"UFoo/dY6;Н^L(E2N>(z!x7T[a480k1za_~˗/ǚ5k`Xp=@V?{+ꫯ2:L!aA 9#) 'TW钩0|F%ād(QϨt(xW_*$֮]q~Y ">c,XK.EOOOmyg?úu0u԰v]]]Xv-~DòlP@$ʌݍ˗#s"boeH+B>Ne@L(AƙgիWgUxD0dYƫf>,XAp}'s9x!".\ ⭷Jɕr%3Ү _UcR7JzNB +6`gO>F ~ gommŢEpEaÆ 6mZA)S~z-|Xx1,XX|9^uL6 ?hSOᢋ.BCC ",%s"D ɔwq (^xa8IA`D-dTTXlV^pYgĉرc׿]wcA=#dtd1HY ҃,)A _ \ LA-ɇ֠HkR S1a1HD6A3gL9'|w܁f*>3vuYK ԹebXFjBmW^8T%2 M(WB,5_vex衇0nܸ?3qDTTT`֭:::҂Flٲ t:E سg|>,XF_|. sΥU/ADZ @"E<[tXtjP @6^m݆;#e+=/_[駟]w݅_Xv-~<QK,SO=Il ь$I~4 8x^(ZRш QF+T-[|ޝx|ػs{VZ޽0i$s=t[Q1o$>l{:u*:,1H$b Rc 2%P< <[柳K`:P" @\G0d߾}ꪫp8ӟK,)S_ŋ>K!a$ C uF0C~𷿅xF06|G`Y`Ŋ`Y6lH_'OFmm-ΝL|Y3:' Ν;q%`YbI %) d `ʕ8묳40uT{۶mXt)f̘p-[`ҥ`Y?8,Y16[ꪫ !cŊ8'ds|ᇘ0a]vYO1`\DAa /8Ο|IX{x۷o_,ZAJ JHJHe%N1g% bÆ xǣ!0EbJjpǎg\{˱;wn0@#\]vaƍx'W_E\${1̘1>l& '? .r.1e (ȒQ- d{#ĉA_q]w;>o~Š$mڴ 6m 7܀snᆠZςoݺ*dY`| ͛7seӓ8m Cd{S\.<Ø7o^y啨~ڵ 6m—_~7xmmm8Om+V\ ш{'~SNkAuu5oȑ# Lw$(5:::tR\zؽ{w A$Xl."lٲ%󨭭\|زe *͛>hmmſ?|>Z ~7|~)&N^{ [lM7݄ٳgg|Aš5k܌W_}5HĆA_~%.b,] ٌ+_|o~z{{3xO<> f8N^& /Ƨ~ ǓqA"`ƍ7oxsd%H̨JHl ~ߎ>3f@cc#>c,m۶oz˸̙3QWW[Oץ{nݺ`*,˨>K/4)(+!=(+!9:}v\? A d.x\s5΃,Xh}]466 ߿?( _UDA[[{1flشi8 7܀O b9y$nV\p$ /K-|!|Xp!|`A$8'NWZǃ?A瞋|;xqwy+AHKXi6HY 2 ?\[o}}}I_|9, *O_K/W\qE[l>}zxoY7ADy1o<\2+Q@qC CRm ӦMg} Ae\ޝtjEjn ')j*j* ƥ%NDHJ"5tPњ.65%J !T" y:? ~vYY'O{޵zk`ҥzmwŅ ?~Iqܹ֭S)dee} !DIDEE1|pFիWK܏͇*1tc+'oS<}t,Y\OOO{$&&I&MHIIܜtΟ? /̤f͚z}ٳV͛0=l>,|tä$<<<(RL1Pь1{СCٱcsջ_r7oU/(r ڵ[^ŅDZhaB<57((,$0PE-'ݽH;::RV-~m;FRRzZM6q9r$...l߾cBT] 0ɓ'ԗϖϘ=<<055}by燙pի111Q4OXX&Mb@^q ܺu\$Cry,--W^/"""x75j޹s5kFxx85jի>}FѻwozqIƠx$ct1(2AAA̙3~!M2圡@YYYӥKV^]hߨQ#̙3+[`<`ٲeԨQNǜ9s055eĉ 6]b؅e/::w}QFHbP))) '''zѣG={@DDrܹs| |kkkMfpP YK ?4|}}\޹wL:ZDDFFFwpU]F-8nfϞM޽ ؠ"(+W`oo-A}I@PI`PGx-MF~ +]ӦM ԩSu>ݝ &L+=z4w~_Q2)))caaΝ; KCJB?3g0h J5f̘1ܽ{k*V\I`` =z>P߻w~;v0k,6n( (:NrC6VBjlN[.3f{;;;rJLLLh4DFFҼysBBBhժqqq1+W닃,&LYٳcB899qIG$A%FвeK,Xرc mwQ;n߾|r&ٳg|vŊ0|ڷoO׮]ܹcB͛7ٺu7tmTnTj}ˋ]u?N:ر=z}=(O?qC>|͛7s>ϟs=?~}9::vZ8B-nÃ\)]J2`jLDW\[[[~קOOOW^K Z-ooo%(f޽ԫWSSS~ˢE %ի177gƍrHPse=Q9OHf>5Ǐg4jc=ʔ)Sܹr}Æ \vYf)G(߿ڴiC^aԮ]۠ QAAAhZ]fp_ R(jwvqqaʔ)ʡGts]Ο?y~xbdd$2 TV r]Zlɮ] !T?EiQ*Ԙݻ߿ж@6lؠ/V6(QQQ[ƍL\\\~yb׮] U" >gjҤ M4ԔXx \]]y饗TzXnܿdςd DSO?Djc֬Y5}[ne 4H;yׁ-9ܹs `:A1ɡM68qBocrr2&;;+++4iE~~~ܟL<(w΀ppp(t >۠8|RRRko>^{5Fرcԩ .T(?t:;wGxxxHP *5rM?ә={6j*݃Xz5ӧOW6(^t Ž;.\T^˗ceew}3g/Pel={Vˉ'TO\QI` *:`"wfmP"22;w2dɓپ};J&Q$$$.E#K BPky!66{{{ ŋ;ySDFF[o-[$33x+=!b@T(jMښiӦv,ZM6qk?SZ55k\z*۶mСCdee޽;NNNE:viiQI` *&۬,6n܈W&''\x777'*6h@ݾ}8u_D1gLMM8q"Æ k׮ ?ceeU%E!ĉVxq!C8s Jq&ۈhjj @XXdzYX.\Ν;UONQYI@TJj}uR9,,XSu}ӿ;Y :VKZ0xB_JJ XZZJP DH` *5&_~Aaoo_ ƌݻwYvrmʕңG>6o/´iӔ͍w%**lUCU˖-[ڵ+dddܧ,@DB~ygGGGj׮]h[gggV\ H7oNHHZ"99SSS4 ΝSNJܴi3g$++-Z̤ITUpiZ-NR?&EU"Qei΅Yt)[nĉ'M˖-,ZH qrrؘm۶1bf̘Cg-*O!JH2JR+{ЧO011)ܹ3]tȑ#ʘiٲ%ٳ;8i۶m OڵkYlYOL\+ ܹSם8q *k>#88@ KPAAAWWWUG :z 0{l>Sj֬Yh ZnͦMҥ򵜜Fɑ#G{2ɉcǎ֧LBH@@OKKK[hۗ_~ҰaC233ٲe T^]m*?VK߾}U $K H` CԺ9`ggǰaøtRLLLػw/ffflܸzkגoFNNHUYVVWK.S!'K B@5j0yd\]]_~RRR7o6lٿ?fff:tDEEҟL{BL!BaÆ8;;3uB7?;CwQUÜ9sطoj}ʔ'D$0| OOOzZͽ{aժUdffҧLuBB U:#r .ݻ)S#%V`dd̜9sÇą TS7!O!JHAYp!|VW\Mʇ @y#ѣ^^^K>˫/_/)ә@ h4ƌ;M6UD} ! '*R3{Pn]f̘Qř3gje B]Q ڵk… 9rj}>k7o?g֭etH` D)R3@x7k׮YXfA!JB<jժUgҥ4jH>KKPPZk׮֧LWB> xF 8::v98zj}4%ij#ϘBǎYlC QϒJLLdɒ%R$ ,H` DQ3@߿?>>>ꫪYTYYY[ŋ_֯LMB (CF!:VҥK)SeK!5E-lٻwj}T$D 刚9y}U- t:]*!D9&Fj՟ >>>nݺ}uV\\\sZC,BSjfԩԩSKT944VUL;B_QΕUyׯF``jt! z쉗}-55???U!d ($0h4sjM4Aӱk.\\\W[QH` Df~|߿ӧO֯L-BTLQ ([өB<{zB;Wt:QI@JBRFL#BT1t,ABT2ߨJlPӒ,%Qi4t}ʔ!D&!*1NWwQI@*7t:jEQ>I@* Z$c DUXyg$c Djd>tW $c @ѴtWzByy42IENDB`pammtools/man/sample_info.Rd0000644000176200001440000000207413662013606015631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-newdata.R \name{sample_info} \alias{sample_info} \alias{sample_info.data.frame} \alias{sample_info.ped} \alias{sample_info.fped} \title{Extract information of the sample contained in a data set} \usage{ sample_info(x) \method{sample_info}{data.frame}(x) \method{sample_info}{ped}(x) \method{sample_info}{fped}(x) } \arguments{ \item{x}{A data frame (or object that inherits from \code{data.frame}).} } \value{ A data frame containing sample information (for each group). If applied to an object of class \code{ped}, the sample means of the original data is returned. Note: When applied to a \code{ped} object, that doesn't contain covariates (only interval information), returns data frame with 0 columns. } \description{ Given a data set and grouping variables, this function returns mean values for numeric variables and modus for characters and factors. Usually this function should not be called directly but will rather be called as part of a call to \code{make_newdata}. } \keyword{internal} pammtools/man/predictSurvProb.pamm.Rd0000644000176200001440000000150414452536154017426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predictSurvProb.pamm} \alias{predictSurvProb.pamm} \title{S3 method for pamm objects for compatibility with package pec} \usage{ \method{predictSurvProb}{pamm}(object, newdata, times, ...) } \arguments{ \item{object}{A fitted model from which to extract predicted survival probabilities} \item{newdata}{A data frame containing predictor variable combinations for which to compute predicted survival probabilities.} \item{times}{A vector of times in the range of the response variable, e.g. times when the response is a survival object, at which to return the survival probabilities.} \item{...}{Additional arguments that are passed on to the current method.} } \description{ S3 method for pamm objects for compatibility with package pec } pammtools/man/get_plotinfo.Rd0000644000176200001440000000102413662013606016020 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{get_plotinfo} \alias{get_plotinfo} \title{Extract plot information for all special model terms} \usage{ get_plotinfo(x, ...) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Given a \code{mgcv} \code{\link[mgcv]{gamObject}}, returns the information used for the default plots produced by \code{\link[mgcv]{plot.gam}}. } pammtools/man/gg_tensor.Rd0000644000176200001440000000165713662013606015332 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convenience-plots.R \name{gg_tensor} \alias{gg_tensor} \title{Plot tensor product effects} \usage{ gg_tensor(x, ci = FALSE, ...) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{ci}{A logical value indicating whether confidence intervals should be calculated and returned. Defaults to \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Given a gam model this convenience function returns a \code{ggplot2} object depicting 2d smooth terms specified in the model as heat/contour plots. If more than one 2d smooth term is present individual terms are faceted. } \examples{ g <- mgcv::gam(Sepal.Length ~ te(Sepal.Width, Petal.Length), data=iris) gg_tensor(g) gg_tensor(g, ci=TRUE) gg_tensor(update(g, .~. + te(Petal.Width, Petal.Length))) } \seealso{ \code{\link{tidy_smooth2d}} } pammtools/man/get_laglead.Rd0000644000176200001440000000211014222504522015547 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lag-lead-utils.R \name{get_laglead} \alias{get_laglead} \alias{get_laglead.default} \alias{get_laglead.data.frame} \title{Construct or extract data that represents a lag-lead window} \usage{ get_laglead(x, ...) \method{get_laglead}{default}(x, tz, ll_fun, ...) \method{get_laglead}{data.frame}(x, ...) } \arguments{ \item{x}{Either a numeric vector of follow-up cut points or a suitable object.} \item{...}{Further arguments passed to methods.} \item{tz}{A vector of exposure times} \item{ll_fun}{Function that specifies how the lag-lead matrix should be constructed. First argument is the follow up time second argument is the time of exposure.} } \description{ Constructs lag-lead window data set from raw inputs or from data objects with suitable information stored in attributes, e.g., objects created by \code{\link{as_ped}}. } \examples{ get_laglead(0:10, tz=-5:5, ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) gg_laglead(0:10, tz=-5:5, ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) } pammtools/man/gg_slice.Rd0000644000176200001440000000406514222504522015106 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/viz-elra.R \name{gg_slice} \alias{gg_slice} \title{Plot 1D (smooth) effects} \usage{ gg_slice(data, model, term, ..., reference = NULL, ci = TRUE) } \arguments{ \item{data}{Data used to fit the \code{model}.} \item{model}{A suitable model object which will be used to estimate the partial effect of \code{term}.} \item{term}{A character string indicating the model term for which partial effects should be plotted.} \item{...}{Covariate specifications (expressions) that will be evaluated by looking for variables in \code{x}. Must be of the form \code{z = f(z)} where \code{z} is a variable in the data set and \code{f} a known function that can be usefully applied to \code{z}. Note that this is also necessary for single value specifications (e.g. \code{age = c(50)}). For data in PED (piece-wise exponential data) format, one can also specify the time argument, but see "Details" an "Examples" below.} \item{reference}{If specified, should be a list with covariate value pairs, e.g. \code{list(x1 = 1, x2=50)}. The calculated partial effect will be relative to an observation specified in \code{reference}.} \item{ci}{Logical. Indicates if confidence intervals for the \code{term} of interest should be calculated/plotted. Defaults to \code{TRUE}.} } \description{ Flexible, high-level plotting function for (non-linear) effects conditional on further covariate specifications and potentially relative to a comparison specification. } \examples{ ped <- tumor[1:200, ] \%>\% as_ped(Surv(days, status) ~ . ) model <- mgcv::gam(ped_status~s(tend) + s(age, by = complications), data=ped, family = poisson(), offset=offset) make_newdata(ped, age = seq_range(age, 20), complications = levels(complications)) gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications)) gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications), ci = FALSE) gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications), reference=list(age = 50)) } pammtools/man/get_intervals.Rd0000644000176200001440000000266414222504522016203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interval-information.R \name{get_intervals} \alias{get_intervals} \alias{get_intervals.default} \title{Information on intervals in which times fall} \usage{ get_intervals(x, times, ...) \method{get_intervals}{default}(x, times, left.open = TRUE, rightmost.closed = TRUE, ...) } \arguments{ \item{x}{An object from which interval information can be obtained, see \code{\link{int_info}}.} \item{times}{A vector of times for which corresponding interval information should be returned.} \item{...}{Further arguments passed to \code{\link[base]{findInterval}}.} \item{left.open}{logical; if true all the intervals are open at left and closed at right; in the formulas below, \eqn{\le} should be swapped with \eqn{<} (and \eqn{>} with \eqn{\ge}), and \code{rightmost.closed} means \sQuote{leftmost is closed}. This may be useful, e.g., in survival analysis computations.} \item{rightmost.closed}{logical; if true, the rightmost interval, \code{vec[N-1] .. vec[N]} is treated as \emph{closed}, see below.} } \value{ A \code{data.frame} containing information on intervals in which values of \code{times} fall. } \description{ Information on intervals in which times fall } \examples{ set.seed(111018) brks <- c(0, 4.5, 5, 10, 30) int_info(brks) x <- runif (3, 0, 30) x get_intervals(brks, x) } \seealso{ \code{\link[base]{findInterval}} \code{\link{int_info}} } pammtools/man/pipe.Rd0000644000176200001440000000040013662013606014261 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils-pipe.R \name{\%>\%} \alias{\%>\%} \title{Pipe operator} \usage{ lhs \%>\% rhs } \description{ See \code{magrittr::\link[magrittr]{\%>\%}} for details. } \keyword{internal} pammtools/man/geom_hazard.Rd0000644000176200001440000001107314452536154015622 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geom-hazard.R \docType{data} \name{geom_hazard} \alias{geom_hazard} \alias{GeomHazard} \alias{geom_stephazard} \alias{GeomStepHazard} \alias{geom_surv} \alias{GeomSurv} \title{(Cumulative) (Step-) Hazard Plots.} \usage{ geom_hazard( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) geom_stephazard( mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "vh", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) geom_surv( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) } \arguments{ \item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}}. If specified and \code{inherit.aes = TRUE} (the default), it is combined with the default mapping at the top level of the plot. You must supply \code{mapping} if there is no plot mapping.} \item{data}{The data to be displayed in this layer. There are three options: If \code{NULL}, the default, the data is inherited from the plot data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}. A \code{data.frame}, or other object, will override the plot data. All objects will be fortified to produce a data frame. See \code{\link[ggplot2:fortify]{fortify()}} for which variables will be created. A \code{function} will be called with a single argument, the plot data. The return value must be a \code{data.frame}, and will be used as the layer data. A \code{function} can be created from a \code{formula} (e.g. \code{~ head(.x, 10)}).} \item{stat}{The statistical transformation to use on the data for this layer, either as a \code{ggproto} \code{Geom} subclass or as a string naming the stat stripped of the \code{stat_} prefix (e.g. \code{"count"} rather than \code{"stat_count"})} \item{position}{Position adjustment, either as a string naming the adjustment (e.g. \code{"jitter"} to use \code{position_jitter}), or the result of a call to a position adjustment function. Use the latter if you need to change the settings of the adjustment.} \item{na.rm}{If \code{FALSE}, the default, missing values are removed with a warning. If \code{TRUE}, missing values are silently removed.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes. It can also be a named logical vector to finely select the aesthetics to display.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.} \item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{colour = "red"} or \code{size = 3}. They may also be parameters to the paired geom/stat.} \item{direction}{direction of stairs: 'vh' for vertical then horizontal, 'hv' for horizontal then vertical, or 'mid' for step half-way between adjacent x-values.} } \description{ \code{geom_hazard} is an extension of the \code{geom_line}, and is optimized for (cumulative) hazard plots. Essentially, it adds a (0,0) row to the data, if not already the case. Stolen from the \code{RmcdrPlugin.KMggplot2} (slightly modified). } \examples{ library(ggplot2) library(pammtools) ped <- tumor[10:50,] \%>\% as_ped(Surv(days, status)~1) pam <- mgcv::gam(ped_status ~ s(tend), data=ped, family = poisson(), offset = offset) ndf <- make_newdata(ped, tend = unique(tend)) \%>\% add_hazard(pam) # piece-wise constant hazards ggplot(ndf, aes(x = tend, y = hazard)) + geom_vline(xintercept = c(0, ndf$tend[c(1, (nrow(ndf)-2):nrow(ndf))]), lty = 3) + geom_hline(yintercept = c(ndf$hazard[1:3], ndf$hazard[nrow(ndf)]), lty = 3) + geom_stephazard() + geom_step(col=2) + geom_step(col=2, lty = 2, direction="vh") # comulative hazard ndf <- ndf \%>\% add_cumu_hazard(pam) ggplot(ndf, aes(x = tend, y = cumu_hazard)) + geom_hazard() + geom_line(col=2) # doesn't start at (0, 0) # survival probability ndf <- ndf \%>\% add_surv_prob(pam) ggplot(ndf, aes(x = tend, y = surv_prob)) + geom_surv() + geom_line(col=2) # doesn't start at c(0,1) } \seealso{ \code{\link[ggplot2]{geom_line}}, \code{\link[ggplot2]{geom_step}}. } \keyword{datasets} pammtools/man/add_cif.Rd0000644000176200001440000000366414222504522014707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{add_cif} \alias{add_cif} \alias{add_cif.default} \title{Add cumulative incidence function to data} \usage{ add_cif(newdata, object, ...) \method{add_cif}{default}( newdata, object, ci = TRUE, overwrite = FALSE, alpha = 0.05, n_sim = 500L, cause_var = "cause", time_var = NULL, ... ) } \arguments{ \item{newdata}{ A data frame or list containing the values of the model covariates at which predictions are required. If this is not provided then predictions corresponding to the original data are returned. If \code{newdata} is provided then it should contain all the variables needed for prediction: a warning is generated if not. See details for use with \code{link{linear.functional.terms}}. } \item{object}{ a fitted \code{gam} object as produced by \code{gam()}. } \item{...}{Further arguments passed to \code{\link[mgcv]{predict.gam}} and \code{\link{get_hazard}}} \item{ci}{\code{logical}. Indicates if confidence intervals should be calculated. Defaults to \code{TRUE}.} \item{overwrite}{Should hazard columns be overwritten if already present in the data set? Defaults to \code{FALSE}. If \code{TRUE}, columns with names \code{c("hazard", "se", "lower", "upper")} will be overwritten.} \item{alpha}{The alpha level for confidence/credible intervals.} \item{n_sim}{Number of simulations (draws from posterior of estimated coefficients) on which estimation of CIFs and their confidence/credible intervals will be based on.} \item{cause_var}{Character. Column name of the 'cause' variable.} \item{time_var}{Name of the variable used for the baseline hazard. If not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else \code{"interval"}. The latter is assumed to be a factor, the former numeric.} } \description{ Add cumulative incidence function to data } pammtools/man/geom_stepribbon.Rd0000644000176200001440000000734014452536154016522 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot-extensions.R \docType{data} \name{geom_stepribbon} \alias{geom_stepribbon} \alias{GeomStepribbon} \title{Step ribbon plots.} \usage{ geom_stepribbon( mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "hv", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) } \arguments{ \item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}}. If specified and \code{inherit.aes = TRUE} (the default), it is combined with the default mapping at the top level of the plot. You must supply \code{mapping} if there is no plot mapping.} \item{data}{The data to be displayed in this layer. There are three options: If \code{NULL}, the default, the data is inherited from the plot data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}. A \code{data.frame}, or other object, will override the plot data. All objects will be fortified to produce a data frame. See \code{\link[ggplot2:fortify]{fortify()}} for which variables will be created. A \code{function} will be called with a single argument, the plot data. The return value must be a \code{data.frame}, and will be used as the layer data. A \code{function} can be created from a \code{formula} (e.g. \code{~ head(.x, 10)}).} \item{stat}{The statistical transformation to use on the data for this layer, either as a \code{ggproto} \code{Geom} subclass or as a string naming the stat stripped of the \code{stat_} prefix (e.g. \code{"count"} rather than \code{"stat_count"})} \item{position}{Position adjustment, either as a string naming the adjustment (e.g. \code{"jitter"} to use \code{position_jitter}), or the result of a call to a position adjustment function. Use the latter if you need to change the settings of the adjustment.} \item{direction}{direction of stairs: 'vh' for vertical then horizontal, 'hv' for horizontal then vertical, or 'mid' for step half-way between adjacent x-values.} \item{na.rm}{If \code{FALSE}, the default, missing values are removed with a warning. If \code{TRUE}, missing values are silently removed.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes. It can also be a named logical vector to finely select the aesthetics to display.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.} \item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{colour = "red"} or \code{size = 3}. They may also be parameters to the paired geom/stat.} } \description{ \code{geom_stepribbon} is an extension of the \code{geom_ribbon}, and is optimized for Kaplan-Meier plots with pointwise confidence intervals or a confidence band. The default \code{direction}-argument \code{"hv"} is appropriate for right-continuous step functions like the hazard rates etc returned by \code{pammtools}. } \examples{ library(ggplot2) huron <- data.frame(year = 1875:1972, level = as.vector(LakeHuron)) h <- ggplot(huron, aes(year)) h + geom_stepribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") + geom_step(aes(y = level)) h + geom_ribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") + geom_line(aes(y = level)) } \seealso{ \code{\link[ggplot2]{geom_ribbon}} \code{geom_stepribbon} inherits from \code{geom_ribbon}. } \keyword{datasets} pammtools/man/int_info.Rd0000644000176200001440000000311114222504522015126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interval-information.R \name{int_info} \alias{int_info} \alias{int_info.default} \alias{int_info.data.frame} \alias{int_info.ped} \alias{int_info.pamm} \title{Create start/end times and interval information} \usage{ int_info(x, ...) \method{int_info}{default}(x, min_time = 0L, ...) \method{int_info}{data.frame}(x, min_time = 0L, ...) \method{int_info}{ped}(x, ...) \method{int_info}{pamm}(x, ...) } \arguments{ \item{x}{A numeric vector of cut points in which the follow-up should be partitioned in or object of class \code{ped}.} \item{...}{Currently ignored.} \item{min_time}{Only intervals that have lower borders larger than this value will be included in the resulting data frame.} } \value{ A data frame containing the start and end times of the intervals specified by the \code{x} argument. Additionally, the interval length, interval mid-point and a factor variable indicating the intervals. } \description{ Given interval breaks points, returns data frame with information on interval start time, interval end time, interval length and a factor variable indicating the interval (left open intervals). If an object of class \code{ped} is provided, extracts unique interval information from object. } \examples{ ## create interval information from cut points int_info(c(1, 2.3, 5)) ## extract interval information used to create ped object tdf <- data.frame(time=c(1, 2.3, 5), status=c(0, 1, 0)) ped <- tdf \%>\% as_ped(Surv(time, status)~., id="id") int_info(ped) } \seealso{ as_ped ped_info } \keyword{internal} pammtools/man/get_tdc_form.Rd0000644000176200001440000000101414222504522015755 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-utils.R \name{get_tdc_form} \alias{get_tdc_form} \title{Extract variables from the left-hand-side of a formula} \usage{ get_tdc_form( formula, data = NULL, tdc_specials = c("concurrent", "cumulative"), invert = FALSE ) } \arguments{ \item{formula}{A \code{\link{formula}} object.} } \description{ Extract variables from the left-hand-side of a formula Extract variables from the right-hand side of a formula } \keyword{internal} pammtools/man/get_cif.Rd0000644000176200001440000000065014222504522014726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add-functions.R \name{get_cif} \alias{get_cif} \alias{get_cif.default} \title{Calculate CIF for one cause} \usage{ get_cif(newdata, object, ...) \method{get_cif}{default}( newdata, object, ci, time_var, alpha, n_sim, cause_var, coefs, V, sim_coef_mat, ... ) } \description{ Calculate CIF for one cause } \keyword{internal} pammtools/man/combine_df.Rd0000644000176200001440000000112214222504522015406 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-newdata.R \name{combine_df} \alias{combine_df} \title{Create a data frame from all combinations of data frames} \usage{ combine_df(...) } \arguments{ \item{...}{Data frames that should be combined to one data frame. Elements of first df vary fastest, elements of last df vary slowest.} } \description{ Works like \code{\link[base]{expand.grid}} but for data frames. } \examples{ combine_df( data.frame(x=1:3, y=3:1), data.frame(x1=c("a", "b"), x2=c("c", "d")), data.frame(z=c(0, 1))) } \keyword{internal} pammtools/man/get_term.Rd0000644000176200001440000000155014222504522015134 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get-terms.R \name{get_term} \alias{get_term} \title{Extract partial effects for specified model terms} \usage{ get_term(data, fit, term, n = 100, ...) } \arguments{ \item{data}{A data frame containing variables used to fit the model. Only first row will be used.} \item{fit}{A fitted object of class \code{\link[mgcv]{gam}}.} \item{term}{The (non-linear) model term of interest.} \item{n}{Specify the output sequence either by supplying the length of the sequence with \code{n}, or the spacing between value with \code{by}. Specifying both is an error. I recommend that you name these arguments in order to make it clear to the reader.} \item{...}{Further arguments passed to \code{\link{seq_range}}.} } \description{ Extract partial effects for specified model terms } \keyword{internal} pammtools/man/get_cumu_eff.Rd0000644000176200001440000000243314222504522015757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cumulative-effect.R, R/viz-elra.R \name{get_cumu_eff} \alias{get_cumu_eff} \alias{gg_cumu_eff} \title{Calculate (or plot) cumulative effect for all time-points of the follow-up} \usage{ get_cumu_eff(data, model, term, z1, z2 = NULL, se_mult = 2) gg_cumu_eff(data, model, term, z1, z2 = NULL, se_mult = 2, ci = TRUE) } \arguments{ \item{data}{Data used to fit the \code{model}.} \item{model}{A suitable model object which will be used to estimate the partial effect of \code{term}.} \item{term}{A character string indicating the model term for which partial effects should be plotted.} \item{z1}{The exposure profile for which to calculate the cumulative effect. Can be either a single number or a vector of same length as unique observation time points.} \item{z2}{If provided, calculated cumulative effect is for the difference between the two exposure profiles (g(z1,t)-g(z2,t)).} \item{se_mult}{Multiplicative factor used to calculate confidence intervals (e.g., lower = fit - 2*se).} \item{ci}{Logical. Indicates if confidence intervals for the \code{term} of interest should be calculated/plotted. Defaults to \code{TRUE}.} } \description{ Calculate (or plot) cumulative effect for all time-points of the follow-up } pammtools/man/seq_range.Rd0000644000176200001440000000265414222504522015300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{seq_range} \alias{seq_range} \title{Generate a sequence over the range of a vector} \usage{ seq_range(x, n, by, trim = NULL, expand = NULL, pretty = FALSE) } \arguments{ \item{x}{A numeric vector} \item{n, by}{Specify the output sequence either by supplying the length of the sequence with \code{n}, or the spacing between value with \code{by}. Specifying both is an error. I recommend that you name these arguments in order to make it clear to the reader.} \item{trim}{Optionally, trim values off the tails. \code{trim / 2 * length(x)} values are removed from each tail.} \item{expand}{Optionally, expand the range by \code{expand * (1 + range(x)} (computed after trimming).} \item{pretty}{If \code{TRUE}, will generate a pretty sequence. If \code{n} is supplied, this will use \code{\link{pretty}()} instead of \code{\link{seq}()}. If \code{by} is supplied, it will round the first value to a multiple of \code{by}.} } \description{ Stolen from \href{https://github.com/tidyverse/modelr/blob/master/R/seq_range.R}{here} } \examples{ x <- rcauchy(100) seq_range(x, n = 10) seq_range(x, n = 10, trim = 0.1) seq_range(x, by = 1, trim = 0.1) # Make pretty sequences y <- runif (100) seq_range(y, n = 10) seq_range(y, n = 10, pretty = TRUE) seq_range(y, n = 10, expand = 0.5, pretty = TRUE) seq_range(y, by = 0.1) seq_range(y, by = 0.1, pretty = TRUE) } pammtools/man/tidy_smooth2d.Rd0000644000176200001440000000124214222504522016114 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{tidy_smooth2d} \alias{tidy_smooth2d} \title{Extract 2d smooth objects in tidy format.} \usage{ tidy_smooth2d( x, keep = c("x", "y", "fit", "se", "xlab", "ylab", "main"), ci = FALSE, ... ) } \arguments{ \item{x}{ a fitted \code{gam} object as produced by \code{gam()}.} \item{keep}{A vector of variables to keep.} \item{ci}{A logical value indicating whether confidence intervals should be calculated and returned. Defaults to \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[mgcv]{plot.gam}}} } \description{ Extract 2d smooth objects in tidy format. } pammtools/man/tidy_fixed.Rd0000644000176200001440000000145214222504522015457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidiers.R \name{tidy_fixed} \alias{tidy_fixed} \alias{tidy_fixed.gam} \alias{tidy_fixed.coxph} \title{Extract fixed coefficient table from model object} \usage{ tidy_fixed(x, ...) \method{tidy_fixed}{gam}(x, intercept = FALSE, ...) \method{tidy_fixed}{coxph}(x, ...) } \arguments{ \item{x}{A model object.} \item{...}{Currently not used.} \item{intercept}{Should intercept also be returned? Defaults to \code{FALSE}.} } \description{ Given a model object, returns a data frame with columns \code{variable}, \code{coef} (coefficient), \code{ci_lower} (lower 95\\% CI) and \code{ci_upper} (upper 95\\% CI). } \examples{ library(survival) gc <- coxph(Surv(days, status)~age + sex, data = tumor) tidy_fixed(gc) } \keyword{internal} pammtools/man/pamm.Rd0000644000176200001440000000436014222504522014262 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pammfit.R \name{pamm} \alias{pamm} \alias{is.pamm} \alias{print.pamm} \alias{summary.pamm} \alias{plot.pamm} \title{Fit a piece-wise exponential additive model} \usage{ pamm(formula, data = list(), ..., trafo_args = NULL, engine = "gam") is.pamm(x) \method{print}{pamm}(x, ...) \method{summary}{pamm}(object, ...) \method{plot}{pamm}(x, ...) } \arguments{ \item{formula}{ A GAM formula, or a list of formulae (see \code{\link[mgcv]{formula.gam}} and also \code{\link[mgcv]{gam.models}}). These are exactly like the formula for a GLM except that smooth terms, \code{\link[mgcv]{s}}, \code{\link[mgcv]{te}}, \code{\link[mgcv]{ti}} and \code{\link[mgcv]{t2}}, can be added to the right hand side to specify that the linear predictor depends on smooth functions of predictors (or linear functionals of these). } \item{data}{ A data frame or list containing the model response variable and covariates required by the formula. By default the variables are taken from \code{environment(formula)}: typically the environment from which \code{gam} is called.} \item{...}{Further arguments passed to \code{engine}.} \item{trafo_args}{A named list. If data is not in PED format, \code{as_ped} will be called internally with arguments provided in \code{trafo_args}.} \item{engine}{Character name of the function that will be called to fit the model. The intended entries are either \code{"gam"} or \code{"bam"} (both from package \code{mgcv}).} \item{x}{Any R object.} \item{object}{An object of class \code{pamm} as returned by \code{\link{pamm}}.} } \description{ A thin wrapper around \code{\link[mgcv]{gam}}, however, some arguments are prespecified: \code{family=poisson()} and \code{offset=data$offset}. These two can not be overwritten. In many cases it will also be advisable to set \code{method="REML"}. } \examples{ ped <- tumor[1:100, ] \%>\% as_ped(Surv(days, status) ~ complications, cut = seq(0, 3000, by = 50)) pam <- pamm(ped_status ~ s(tend) + complications, data = ped) summary(pam) ## Alternatively pamm( ped_status ~ s(tend) + complications, data = tumor[1:100, ], trafo_args = list(formula = Surv(days, status)~complications)) } \seealso{ \code{\link[mgcv]{gam}} } \keyword{internal} pammtools/man/specials.Rd0000644000176200001440000000475514222504522015143 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-specials.R \name{cumulative} \alias{cumulative} \alias{concurrent} \alias{has_special} \title{Formula specials for defining time-dependent covariates} \usage{ cumulative(..., tz_var, ll_fun = function(t, tz) t >= tz, suffix = NULL) concurrent(..., tz_var, lag = 0, suffix = NULL) has_special(formula, special = "cumulative") } \arguments{ \item{...}{For \code{concurrent} variables that will be transformed to covariate matrices. The number of columns of each covariate depends on \code{tz}. Usually, elements that will be specified here are \code{time} (which should be the name of the time-variable used on the LHS of the formula argument to \code{as_ped}), \code{tz} which is the variable containing information on the times at which the TDC was observed (can be wrapped in \code{latency}) and the TDCs that share the same \code{tz} and Lag-lead window (\code{ll_fun}).} \item{tz_var}{The name of the variable that stores information on the times at which the TDCs specified in this term where observed.} \item{ll_fun}{Function that specifies how the lag-lead matrix should be constructed. First argument is the follow up time second argument is the time of exposure.} \item{lag}{a single positive number giving the time lag between for a concurrent effect to occur (i.e., the TDC at time of exposure \code{t-lag} affects the hazard in the interval containing follow-up time \code{t}). Defaults to 0.} \item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object on the left-hand-side and covariate specification on the right-hand-side (RHS). The RHS can be an extended formula, which specifies how TDCs should be transformed using specials \code{concurrent} and \code{cumulative}. The left hand-side can be in start-stop-notation. This, however, is only used to create left-truncated data and does not support the full functionality.} \item{special}{The name of the special whose existence in the \code{formula} should be checked} } \description{ So far, two specials are implemented. \code{concurrent} is used when the goal is to estimate a concurrent effect of the TDC. \code{cumulative} is used when the goal is to estimate a cumulative effect of the TDC. These should usually not be called directly but rather as part of the \code{formula} argument to \code{as_ped}. See the \href{https://adibender.github.io/pammtools//articles/data-transformation.html}{vignette on data transformation} for details. } \keyword{internal} pammtools/man/fcumu.Rd0000644000176200001440000000064313662013606014454 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sim-pexp.R \name{fcumu} \alias{fcumu} \title{A formula special used to handle cumulative effect specifications} \usage{ fcumu(..., by = NULL, f_xyz, ll_fun) } \description{ Can be used in the second part of the formula specification provided to \code{\link[pammtools]{sim_pexp}} and should only be used in this context. } \keyword{internal} pammtools/man/get_ped_form.Rd0000644000176200001440000000077214222504522015765 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-utils.R \name{get_ped_form} \alias{get_ped_form} \title{Extract variables from the left-hand-side of a formula} \usage{ get_ped_form( formula, data = NULL, tdc_specials = c("concurrent", "cumulative") ) } \arguments{ \item{formula}{A \code{\link{formula}} object.} } \description{ Extract variables from the left-hand-side of a formula Extract variables from the right-hand side of a formula } \keyword{internal} pammtools/man/compute_cumu_diff.Rd0000644000176200001440000000140014222504522017015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cumulative-coefficient.R \name{compute_cumu_diff} \alias{compute_cumu_diff} \title{Calculate difference in cumulative hazards and respective standard errors} \usage{ compute_cumu_diff(d1, d2, model, alpha = 0.05, nsim = 100L) } \arguments{ \item{d1}{A data set used as \code{newdata} in \code{predict.gam}} \item{d2}{See \code{d1}} \item{model}{A model object for which a predict method is implemented which returns the design matrix (e.g., \code{mgcv::gam}).} } \description{ CIs are calculated by sampling coefficients from their posterior and calculating the cumulative hazard difference \code{nsim} times. The CI are obtained by the 2.5\\% and 97.5\\% quantiles. } \keyword{internal} pammtools/DESCRIPTION0000644000176200001440000000452314453640332014003 0ustar liggesusersPackage: pammtools Title: Piece-Wise Exponential Additive Mixed Modeling Tools for Survival Analysis Version: 0.5.92 Date: 2023-07-09 Authors@R: c( person("Andreas", "Bender", , "andreas.bender@stat.uni-muenchen.de", role = c("aut", "cre"), comment=c(ORCID = "0000-0001-5628-8611")), person("Fabian", "Scheipl", , "fabian.scheipl@stat.uni-muenchen.de", role = c("aut"), comment = c(ORCID = "0000-0001-8172-3603")), person("Philipp", "Kopper", , "philipp.kopper@stat.uni-muenchen.de", role = c("aut"), comment = c(ORCID="0000-0002-5037-7135")), person("Lukas", "Burk", , "burk@leibniz-bips.de", role = c("ctb"), comment = c(ORCID="0000-0001-7528-3795"))) Description: The Piece-wise exponential (Additive Mixed) Model (PAMM; Bender and others (2018) ) is a powerful model class for the analysis of survival (or time-to-event) data, based on Generalized Additive (Mixed) Models (GA(M)Ms). It offers intuitive specification and robust estimation of complex survival models with stratified baseline hazards, random effects, time-varying effects, time-dependent covariates and cumulative effects (Bender and others (2019)), as well as support for left-truncated, competing risks and recurrent events data. pammtools provides tidy workflow for survival analysis with PAMMs, including data simulation, transformation and other functions for data preprocessing and model post-processing as well as visualization. Depends: R (>= 3.5.0) Imports: mgcv, survival (>= 2.39-5), checkmate, magrittr, rlang, tidyr (>= 1.0.0), ggplot2 (>= 3.2.2), dplyr (>= 1.0.0), purrr (>= 0.2.3), tibble, lazyeval, Formula, mvtnorm, pec, vctrs (>= 0.3.0) Suggests: testthat License: MIT + file LICENSE LazyData: true URL: https://adibender.github.io/pammtools/ BugReports: https://github.com/adibender/pammtools/issues RoxygenNote: 7.1.2 Encoding: UTF-8 NeedsCompilation: no Packaged: 2023-07-11 17:36:55 UTC; ab Author: Andreas Bender [aut, cre] (), Fabian Scheipl [aut] (), Philipp Kopper [aut] (), Lukas Burk [ctb] () Maintainer: Andreas Bender Repository: CRAN Date/Publication: 2023-07-13 00:10:02 UTC pammtools/tests/0000755000176200001440000000000014014472751013434 5ustar liggesuserspammtools/tests/testthat/0000755000176200001440000000000014453640332015273 5ustar liggesuserspammtools/tests/testthat/test-interval-functions.R0000644000176200001440000000320014222504522022212 0ustar liggesuserscontext("Interval info and median and modus information") data("tumor") ped <- tumor[1:200, ] %>% as_ped(Surv(days, status)~ complications + age, cut=seq(0,400, by=100)) ped <- filter(ped, id %in% c(1:3, 135:137)) test_that("Interval infos correct", { expect_data_frame(int_info(1:2), nrows=2L, ncols=5L) expect_data_frame(int_info(2:1), nrows=2L, ncols=5L) expect_data_frame(int_info(data.frame(x1 = c(1,0), x2=c(2, 1))), nrows = 2L, ncols = 5L) expect_equal(names(int_info(1:2)), c("tstart", "tend", "intlen", "intmid", "interval")) expect_equal(levels(int_info(1:2)$interval), c("(0,1]", "(1,2]")) }) test_that("Interval info returned for ped objects", { expect_data_frame(int_info(ped), nrows=4L, ncols=5L, types=c("numeric", "factor")) }) test_that("Sample info returned for data frame", { expect_data_frame(si <- sample_info(tumor), nrows=1L, ncols=9L) expect_equal(colnames(si), colnames(tumor)) expect_data_frame(si <- tumor %>% group_by(complications, status) %>% sample_info(), nrows=4L, ncols=9L) expect_equal(colnames(si), colnames(tumor)) }) test_that("Sample info returned for ped objects", { expect_data_frame(sample_info(ped), nrows=1, ncols=2) }) test_that("Sample info returned for grouped ped objects", { expect_data_frame(group_by(ped, complications) %>% sample_info(), nrows=2, ncols=2) }) test_that("ped info returned for (grouped) ped objects", { # normal expect_data_frame(ped_info(ped), nrows=4L, ncols=7L) #grouped expect_data_frame(group_by(ped, complications) %>% ped_info(), nrows=8L, ncols=7L) # without covariates expect_data_frame(ped_info(select(ped, -complications, -age)), nrows=4L, ncols=5L) }) pammtools/tests/testthat/test-cumulative-effect.R0000644000176200001440000001221314222504522021774 0ustar liggesuserscontext("Cumulative effects (of time-dependent covariates)") test_that("Lag-lead is calculated correctly", { LL <- get_laglead(0:2, c(-2, -0.5, 0, 0.5, 2), ll_fun = function(t, tz) t >= tz) expect_data_frame(LL, nrows = 15L, ncols = 3L) expect_class(LL, "LL_df") expect_identical(LL$t, rep(0:2, each = 5)) expect_identical(LL$tz, rep(c(-2, -0.5, 0, 0.5, 2), times = 3)) expect_equal(LL$LL, c(rep(0, 5), rep(1, 3), rep(0, 2), rep(1, 4), 0)) }) test_that("LL helpers and as_ped produce equivalent LL windows", { n <- 1 # create data set with variables which will affect the hazard rate. df <- cbind.data.frame(x1 = runif (n, -3, 3)) %>% dplyr::as_tibble() rng_z <- function(nz) rep(1, nz) # two different exposure times for two different exposures tz1 <- 1:10 tz2 <- -5:5 # generate exposures and add to data set df <- df %>% add_tdc(tz1, rng_z) %>% add_tdc(tz2, rng_z) # define lag-lead window function ll_fun <- function(t, tz) t >= tz ll_fun2 <- function(t, tz) t >= tz + 2 & t <= tz + 2 + 5 # simulate data with cumulative effect sim_df <- sim_pexp( formula = ~ -3.5 - 0.5 * x1 | fcumu(t, tz1, z.tz1, f_xyz = function(t, tz, z) 1, ll_fun = function(t, tz) t >= tz) + fcumu(t, tz2, z.tz2, f_xyz = function(t, tz, z) 1, ll_fun = function(t, tz) t >= tz + 2 & t <= tz + 2 + 5), data = df, cut = 0:10) sim_df$time <- 10 ped <- sim_df %>% as_ped( Surv(time, status) ~ . + cumulative(time, z.tz1, tz_var = "tz1") + cumulative(time, z.tz2, tz_var = "tz2", ll_fun = function(t, tz) (t >= tz + 2) & (t <= tz + 2 + 5)), id = "id") LL1 <- ped$LL_tz1[1:10, ] LL1.1 <- get_laglead(0:10, 1:10, ll_fun) %>% filter(t != 0) %>% tidyr::spread(tz, LL) expect_equal(as.matrix(LL1.1[, -1]), LL1, check.attributes = FALSE) LL2 <- ped$LL_tz2[1:10, ] LL2.2 <- get_laglead(0:10, -5:5, ll_fun2) %>% filter(t != 0 ) %>% tidyr::spread(tz, LL) expect_equal(as.matrix(LL2.2[, -1]), LL2, check.attributes = FALSE) LL1.2 <- get_laglead(ped) %>% filter(tz_var == "tz1") %>% filter(t != 0) %>% tidyr::spread(tz, LL) %>% select(-1:-2) %>% as.matrix() LL2.2 <- get_laglead(ped) %>% filter(tz_var == "tz2") %>% filter(t != 0) %>% tidyr::spread(tz, LL) %>% select(-1:-2) %>% as.matrix() expect_equal(LL1, LL1.2, check.attributes = FALSE) expect_equal(LL2, LL2.2, check.attributes = FALSE) }) test_that("Cumulative effects are calculated correctly", { suppressWarnings(RNGversion("3.5.0")) # tz grid with differences different than 1 # generate exposures and add to data set n <- 250 set.seed(123) # create data set with variables which will affect the hazard rate. df <- cbind.data.frame(x1 = runif (n, -3, 3), x2 = runif (n, 0, 6)) %>% tibble::as_tibble() # the formula which specifies how covariates affet the hazard rate f0 <- function(t) { dgamma(t, 8, 2) * 6 } tz3 <- c(-5, -3, 0, 3, 5) rng_z <- function(nz) { as.numeric(arima.sim(n = nz, list(ar = c(.8, -.6)))) } df <- df %>% add_tdc(tz3, rng_z) sim_df <- sim_pexp( formula = ~ -3.5 + f0(t) - 0.5 * x1 + sqrt(x2) | fcumu(t, tz3, z.tz3, f_xyz = function(t, tz, z) 5 * (dnorm(t - tz, 4, 6) + dnorm(t - tz, 25, 4)) * z, ll_fun = function(t, tz) t - 2 >= tz), data = df, cut = 0:10) ped <- as_ped(sim_df, Surv(time, status)~ x1 + x2 + cumulative(latency(tz3), z.tz3, tz_var = "tz3"), cut = 0:10) ped5 <- subset(ped, id == 5) expect_identical(ped5$LL[1, ], c(2.5, 2, 3, rep(0, 2))) expect_identical(ped5$LL[9, ], c(2.5, 2, 3, 3, 2)) expect_identical(ped5$LL[10, ], c(2.5, 2, 3, 3, 2)) pam <- mgcv::gam(ped_status ~ s(tend) + x1 + s(x2) + s(tz3_latency, by = z.tz3), data = ped, family = poisson(), offset = offset) ndf <- make_newdata(ped, tz3_latency = unique(tz3_latency), z.tz3 = c(1)) ndf <- ndf %>% add_term(pam, term = "z.tz3") %>% slice(1:7) expect_equal(ndf$fit, c(.72, .88, 0.73, 0.46, 0.38, 0.26, 0.14), tolerance = 10e-3) ## partial effects partial <- gg_partial(ped, pam, "z.tz3", tend = seq(0, 10, by = 1), tz3_latency = 0:12, z.tz3 = c(1), reference = list(z.tz3 = 1)) expect_is(partial, c("gg", "ggplot")) expect_data_frame(partial$data, nrows = 143L, ncols = 15L) partial_ll <- gg_partial_ll(ped, pam, "z.tz3", tend = seq(0, 10, by = 1), tz3_latency = 0:12, z.tz3 = c(1), reference = list(z.tz3 = 1)) expect_is(partial_ll, c("gg", "ggplot")) expect_data_frame(partial_ll$data, nrows = 53L, ncols = 8L) ## cumulative effect visualization helpers: cumu_eff <- get_cumu_eff(ped, pam, term = "z.tz3", z1 = seq(-1, 1, length.out = 5), z2 = 0) expect_identical(unique(ped$interval), unique(cumu_eff$interval)) expect_matrix(cumu_eff$z.tz3, nrows = 10L, ncols = 5L, any.missing = FALSE) expect_identical(cumu_eff$z.tz3[1, ], cumu_eff$z.tz3[2, ]) expect_subset( x = c("cumu_eff", "se_cumu_eff", "cumu_eff_lower", "cumu_eff_upper"), choices = colnames(cumu_eff)) expect_identical(all(cumu_eff$cumu_eff >= cumu_eff$cumu_eff_lower), TRUE) expect_identical(all(cumu_eff$cumu_eff <= cumu_eff$cumu_eff_upper), TRUE) expect_numeric(cumu_eff$se_cumu_eff, lower = 0, finite = TRUE, any.missing = FALSE) }) pammtools/tests/testthat/test-simple-transform.R0000644000176200001440000000557614222504522021704 0ustar liggesuserscontext("Simple transformation to PED data") test_that("Transformation of regular survival data works", { ## preparations data("tumor") tumor <- tumor[c(1:3, 135:137), ] tumor$ID <- sample(1:100, nrow(tumor)) ped_vet <- split_data(data = tumor, Surv(days, status) ~ complications + age, cut = c(0, 100, 400), id = "ID") expect_identical(unique(ped_vet$ID), tumor$ID) tumor$ID <- NULL ## tests expect_data_frame(ped <- tumor %>% as_ped(Surv(days, status)~ complications + age, cut = c(0, 100, 400)), nrows = 12L, ncols = 8L) expect_is(ped, "ped") expect_subset(c("ped_status", "tstart", "tend", "interval", "offset"), names(ped)) expect_is(attr(ped, "breaks"), "numeric") expect_is(attr(ped, "intvars"), "character") expect_is(attr(ped, "id_var"), "character") expect_equal(attr(ped, "id_var"), "id") expect_data_frame(tumor %>% as_ped(Surv(days, status)~ complications + age, cut = c(0, 100, 400), id = "id"), nrows = 12L, ncols = 8L) ## no error, when id in data and additionally specified tumor$id <- seq_len(nrow(tumor)) expect_data_frame(ped <- tumor %>% as_ped(Surv(days, status)~complications, cut = c(0, 100, 400), id = "id"), nrows = 12L, ncols = 7L) ## no error when id already in data but not specified expect_data_frame(tumor %>% as_ped(Surv(days, status)~complications, cut = c(0, 100, 400)), nrows = 12L, ncols = 7L) ## no error when id has different name and is specified accordingly tumor$id2 <- tumor$id expect_data_frame(ped <- tumor %>% as_ped(Surv(days, status)~., cut = c(0, 100, 400)), nrows = 12L, ncols = 14L) expect_identical(attr(ped, "id_var"), "id") ## no additional id when different id specified tumor$id <- NULL expect_data_frame(ped <- tumor %>% as_ped(Surv(days, status)~., cut = c(0, 100, 400), id = "id2"), nrows = 12L, ncols = 13L) expect_identical(attr(ped, "id_var"), "id2") tumor$id2 <- NULL # max_time ped <- tumor %>% as_ped(Surv(days, status)~., max_time = 400) expect_data_frame(ped, nrows = 11L, ncols = 13L) expect_identical(max(ped$tend), 400) expect_identical(nlevels(ped$interval), 2L) # inlcude_last tumor[6, "days"] <- 358 ped <- tumor %>% as_ped(Surv(days, status)~.) expect_data_frame(ped, nrows = 11L, ncols = 13L) expect_identical(max(ped$tend), 358) }) test_that("Error on wrong input", { ## preparations data("tumor") tumor <- tumor[c(1:3, 135:137), ] ## tests expect_error(as_ped(tumor, x ~ y, cut = c(0:5, 10, 40))) expect_error(as_ped(tumor, Surv(days2, status) ~., cut = c(0:5, 10, 40))) expect_error(as_ped( data = rename(tumor, ped_time = time), formula = Surv(ped_time, status) ~.)) # already in data set ped_time ## error when specified id variable not unique tumor$id <- rep(1:2, 3) expect_error( as_ped(tumor, Surv(days, status) ~ complications, cut = c(0, 100, 400), id = "id"), regexp = "Specified ID variable.*") }) pammtools/tests/testthat/test-add-functions.R0000644000176200001440000002565614222504522021141 0ustar liggesuserscontext("Convenience functions for calculation of hazard and similar") data("tumor") ped <- tumor[1:200,] %>% as_ped(Surv(days, status)~ age + complications, cut = c(0, 50, 100, 200, 300, 400)) pam <- mgcv::gam(ped_status ~ s(tend, k = 5) + complications, data = ped, family = poisson(), offset = offset) pam2 <- mgcv::gam(ped_status ~ s(tend, k = 5) + complications + s(age), data = ped, family = poisson(), offset = offset) bam <- mgcv::bam(ped_status ~ s(tend, k = 5) + complications, data = ped, family = poisson(), offset = offset, method = "fREML", discrete = TRUE) pem <- glm(ped_status ~ 0 + interval + complications, data = ped, family = poisson(), offset = offset) pam3 <- mgcv::gam(ped_status ~ s(tend, k = 5, by = as.factor(complications)) + as.factor(complications), data = ped, family = poisson(), offset = offset) test_that("hazard functions work for PAM", { expect_data_frame(haz <- add_hazard(ped_info(ped), bam), nrows = 5L, ncols = 11L) expect_data_frame(haz <- add_hazard(ped_info(ped), pam), nrows = 5L, ncols = 11L) expect_equal(all(haz$ci_lower < haz$hazard), TRUE) expect_equal(all(haz$ci_upper > haz$hazard), TRUE) expect_equal(round(haz$hazard, 3), c(0.001, 0.001, 0.001, 0.001, 0.001)) expect_equal(round(haz$ci_lower, 3), c(0, 0, 0, 0, 0)) expect_error(add_hazard(haz, pam)) expect_data_frame(add_hazard(haz, pam, overwrite = TRUE), nrows = 5L, ncols = 11L) haz2 <- add_hazard(ped_info(ped), pam, type = "link") expect_equal(all(haz2$ci_lower < haz2$hazard), TRUE) expect_equal(all(haz2$ci_upper > haz2$hazard), TRUE) expect_equal(round(haz2$hazard, 2), c(-7.37, -7.39, -7.41, -7.43, -7.46)) expect_equal(round(haz2$ci_lower, 2), c(-7.93, -7.86, -7.78, -7.83, -7.99)) ## delta rule expect_data_frame(add_hazard(ped_info(ped), bam, ci_type = "delta"), nrows = 5L, ncols = 11L) haz3 <- add_hazard(ped_info(ped), pam, ci_type = "delta") expect_data_frame(haz3, nrows = 5L, ncols = 11L) expect_equal(round(haz3$hazard * 100, 2), c(.06, .06, .06, .06, .06)) expect_equal(round(haz3$se * 100, 2), c(.02, .01, .01, .01, .02)) expect_equal(round(haz3$ci_lower * 100, 2), c(.03, .03, .04, .04, .03)) expect_equal(round(haz3$ci_upper * 100, 2), c(.10, .09, .08, .08, .09)) ## simulation based ci (0.95) haz4 <- add_hazard(ped_info(ped), pam, ci_type = "sim") ## hazard with reference (i.e. hazard ratio) hr <- add_hazard(ped_info(ped), pam2, reference = list(age = c(30))) # hazard ratio is constant as age effect not time-varying expect_equal(round(hr$hazard, 3), rep(1.458, 5)) # hr = 1 if reference = data hr2 <- ped_info(ped) %>% add_hazard(pam2, reference = list(age = mean(.$age))) expect_equal(hr2$hazard, rep(1, 5)) ## factor group variable ndf <- ped %>% make_newdata(tend = unique(tend), complications = unique(complications)) %>% group_by(complications) ndf1 <- ndf %>% add_cumu_hazard(pam3, ci = TRUE, ci_type = "default") ndf2 <- ndf %>% add_cumu_hazard(pam3, ci = TRUE, ci_type = "delta") ndf3 <- ndf %>% add_cumu_hazard(pam3, ci = TRUE, ci_type = "sim", nsim = 100L) expect_true(all(ndf1$cumu_hazard > ndf1$cumu_lower & ndf1$cumu_hazard < ndf1$cumu_upper)) expect_true(all(ndf2$cumu_hazard > ndf2$cumu_lower & ndf2$cumu_hazard < ndf2$cumu_upper)) expect_true(all(ndf3$cumu_hazard > ndf3$cumu_lower & ndf3$cumu_hazard < ndf3$cumu_upper)) }) test_that("hazard functions work for PEM", { expect_data_frame(haz <- add_hazard(ped_info(ped), pem), nrows = 5L, ncols = 11L) expect_error(add_hazard(haz, pem)) expect_data_frame(add_hazard(haz, pem, overwrite = TRUE), nrows = 5L, ncols = 11L) }) test_that("cumulative hazard functions work for PAM", { expect_data_frame(add_cumu_hazard(ped_info(ped), bam, ci = FALSE), nrows = 5L, ncols = 8L) expect_data_frame(haz <- add_cumu_hazard(ped_info(ped), pam, ci = FALSE), nrows = 5L, ncols = 8L) expect_data_frame(haz <- add_cumu_hazard(ped_info(ped), pam), nrows = 5L, ncols = 10L) expect_equal(round(haz$cumu_hazard, 2), c(.03, .06, .12, .18, .24)) expect_equal(round(haz$cumu_lower, 2), c(.02, .04, .08, .12, .15)) expect_equal(all(diff(haz$cumu_hazard) >= 0), TRUE) # overwrite works expect_data_frame(add_cumu_hazard(haz, pam, overwrite = TRUE), nrows = 5L, ncols = 10L) # error on wrong input expect_error(add_cumu_hazard(haz, pam)) ## test that cumu_hazard works for grouped data grouped_haz <- ped %>% group_by(complications) %>% ped_info() %>% add_cumu_hazard(pam) expect_data_frame(grouped_haz, nrows = 10L, ncols = 10L) expect_equal(round(grouped_haz$cumu_hazard, 2), c(.03, .06, .12, .18, .24, .06, .13, .25, .37, .49)) ## delta method haz2 <- ped_info(ped) %>% add_cumu_hazard(pam, ci_type = "delta") expect_equal(round(haz2$cumu_upper, 2), c(.05, .09, .18, .25, .33)) expect_equal(round(haz2$cumu_lower, 2), c(.01, .03, .07, .11, .15)) suppressWarnings(RNGversion("3.5.0")) ## sim CI (0.95) set.seed(123) haz3 <- ped_info(ped) %>% add_cumu_hazard(pam, ci_type = "sim") expect_equal(round(haz3$cumu_upper, 2), c(.06, .11, .19, .25, .34)) expect_equal(round(haz3$cumu_lower, 2), c(.02, .04, .08, .13, .17)) ## check that hazard columns are not deleted newdata <- ped_info(ped) %>% add_hazard(pam) %>% add_cumu_hazard(pam) expect_data_frame(newdata, nrows = 5L, ncols = 14L) newdata <- ped_info(ped) %>% add_hazard(pam, ci = FALSE) %>% add_cumu_hazard(pam) expect_data_frame(newdata, nrows = 5L, ncols = 11L) }) test_that("cumulative hazard functions work for PEM", { expect_data_frame(haz <- add_cumu_hazard(ped_info(ped), pem), nrows = 5L, ncols = 10L) expect_error(add_cumu_hazard(haz, pem)) expect_data_frame(add_cumu_hazard(haz, pem, overwrite = TRUE), nrows = 5L, ncols = 10L) }) test_that("adding terms works for PAM", { # standard ndf2 <- make_newdata(ped, age = seq_range(age, 3)) pred2 <- ndf2 %>% add_term(pam2, term = "age") expect_equal(round(pred2$fit, 3), c(-.604, -.236, .851)) expect_data_frame(pred2, nrows = 3L, ncols = 12L) # with custom reference pred2 <- ndf2 %>% add_term(pam2, term = "age", reference = list(age = mean(.$age))) expect_equal(round(pred2$fit, 3), c(-.368, 0, 1.087)) expect_data_frame(pred2, nrows = 3L, ncols = 12L) expect_equal(pred2$fit[2], 0) # with overall function application pred3 <- ndf2 %>% add_term(pam2, term = "age", reference = identity(.)) expect_equal(pred3$fit, rep(0, 3)) expect_data_frame(pred3, nrows = 3L, ncols = 12L) expect_equal(pred3$fit, rep(0, 3)) # with separately created data frame df_mean <- sample_info(ndf2) pred4 <- ndf2 %>% add_term(pam2, term = "age", reference = df_mean) expect_equal(pred4$fit, pred2$fit) }) test_that("adding terms works for PEM", { expect_data_frame(term <- add_term(ped_info(ped), pem, term = "complications"), nrows = 5L, ncols = 10L) expect_data_frame(ped_info(ped) %>% add_term(pem, term = "age", reference = list(age = mean(.$age))), nrows = 5L, ncols = 10L) }) test_that("warns about / aborts for unknown intervals", { weird <- make_newdata(ped_info(ped), tend = c(150), interval = c("(1.4, 4]")) expect_warning(add_hazard(weird, pam), "not equivalent") expect_error(add_hazard(weird, pem), "not equivalent") }) test_that("works for nonstandard baseline arguments", { pseudonymous <- ped %>% dplyr::rename(stop = tend, int = interval) pseudonymous <- pseudonymous %>% dplyr::mutate(length = stop - tstart) ped <- ped %>% dplyr::mutate(intlen = tend - tstart) p_pam <- mgcv::gam(ped_status ~ s(stop, k = 5) + complications, data = pseudonymous, family = poisson(), offset = offset) p_pem <- glm(ped_status ~ 0 + int + complications, data = pseudonymous, family = poisson(), offset = offset) expect_equal( add_hazard(pseudonymous[1:5, ], p_pam, time_var = "stop")$hazard, add_hazard(ped[1:5, ], pam)$hazard) expect_equal( add_hazard(pseudonymous[1:5, ], p_pem, time_var = "int")$hazard, add_hazard(ped[1:5, ], pem)$hazard) expect_equal( add_cumu_hazard(pseudonymous[1:5, ], p_pam, time_var = "stop", interval_length = length)$cumu_hazard, add_cumu_hazard(ped[1:5, ], pam)$cumu_hazard) expect_equal( add_cumu_hazard(pseudonymous[1:5, ], p_pem, time_var = "int", interval_length = length)$cumu_hazard, add_cumu_hazard(ped[1:5, ], pem)$cumu_hazard) expect_equal( add_cumu_hazard(pseudonymous[1:5, ], p_pem, time_var = "int", interval_length = "length")$cumu_hazard, add_cumu_hazard(ped[1:5, ], pem)$cumu_hazard) }) ## test surv_prob test_that("survival probabilities functions work for PAM", { suppressWarnings(RNGversion("3.5.0")) expect_data_frame(add_surv_prob(ped_info(ped), bam, ci = FALSE), nrows = 5L, ncols = 8L) expect_data_frame(surv <- add_surv_prob(ped_info(ped), pam, ci = FALSE), nrows = 5L, ncols = 8L) expect_data_frame( surv <- add_surv_prob(ped_info(ped), pam), nrows = 5L, ncols = 10L) stest <- sapply(surv[, c("surv_prob", "surv_lower", "surv_upper")], function(z) { all(z >= 0 & z <= 1) }) expect_identical(all(stest), TRUE) expect_identical(round(surv$surv_prob, 2), c(0.97, 0.94, 0.88, 0.83, 0.79)) expect_identical(round(surv$surv_lower, 2), c(0.95, 0.90, 0.83, 0.76, 0.68)) expect_identical(round(surv$surv_upper, 2), c(0.98, 0.96, 0.92, 0.89, 0.86)) # check that overwrite works expect_data_frame(add_surv_prob(surv, pam, overwrite = TRUE), nrows = 5L, ncols = 10L) # error on wrong input expect_error(add_surv_prob(surv, pam)) ## test that cumu_hazard works for grouped data grouped_surv <- ped %>% group_by(complications) %>% ped_info() %>% add_surv_prob(pam) expect_data_frame(grouped_surv, nrows = 10L, ncols = 10L) expect_equal(round(grouped_surv$surv_prob, 2), c(0.97, 0.94, 0.88, .83, .79, .94, 0.88, .78, .69, .61)) ## delta CI surv2 <- add_surv_prob(ped_info(ped), pam, ci_type = "delta") expect_equal(round(surv2$surv_lower, 2), c(.95, .91, .84, .78, .72)) expect_equal(round(surv2$surv_upper, 2), c(.99, .97, .93, .89, .86)) # sim CI set.seed(123) surv3 <- add_surv_prob(ped_info(ped), pam, ci_type = "sim") expect_equal(round(surv3$surv_lower, 2), c(.94, .90, .83, .78, .71)) expect_equal(round(surv3$surv_upper, 2), c(.98, .96, .92, .88, .84)) }) test_that("CIF works", { set.seed(211758) df <- data.frame(time = rexp(20), status = sample(c(0,1, 2), 20, replace = TRUE)) ped_cr <- as_ped(df, Surv(time, status)~., id = "id") %>% mutate(cause = as.factor(cause)) pam <- pamm(ped_status ~ s(tend, by = cause), data = ped_cr) ndf <- ped_cr %>% make_newdata(tend = unique(tend), cause = unique(cause)) %>% group_by(cause) %>% add_cif(pam) expect_data_frame(ndf, nrows = 26L, ncols = 11L) expect_subset(c("cif", "cif_lower", "cif_upper"), colnames(ndf)) expect_true(all(ndf$cif < ndf$cif_upper)) expect_true(all(ndf$cif > ndf$cif_lower)) expect_true(all(ndf$cif <= 1 & ndf$cif >= 0)) expect_true(all(ndf$cif_lower <= 1 & ndf$cif_lower >= 0)) expect_true(all(ndf$cif_upper <= 1 & ndf$cif_upper >= 0)) }) pammtools/tests/testthat/test-newdata.R0000644000176200001440000000756014222504522020020 0ustar liggesuserscontext("create newdata") test_that("creating newdata works on ungrouped data", { iris2 <- iris %>% group_by(Species) %>% slice(1:2) %>% ungroup() expect_data_frame( make_newdata(iris2), any.missing = FALSE, nrows = 1L, ncols = 5L) expect_equal(colnames(make_newdata(iris2)), colnames(iris2)) expect_data_frame( make_newdata(iris2, Sepal.Length = c(5)), any.missing = FALSE, nrows = 1L, ncols = 5L) expect_equal(make_newdata(iris2, Sepal.Length = c(5))$Sepal.Length, 5) expect_data_frame( make_newdata(iris2, Sepal.Length = c(5, 6)), any.missing = FALSE, nrows = 2L, ncols = 5L) expect_data_frame( make_newdata(iris2, Sepal.Length = seq_range(Sepal.Length, 2)), any.missing = FALSE, nrows = 2L, ncols = 5L) expect_equal( make_newdata(iris2, Sepal.Length = seq_range(Sepal.Length, 2))$Sepal.Length, c(4.9, 7.0)) }) test_that("creating newdata fails on ungrouped data", { iris2 <- iris %>% group_by(Species) %>% slice(2) %>% ungroup() expect_warning(make_newdata(iris2, Sepal.length = c(5))) expect_error(make_newdata(iris2, Sepal.Length = 5)) expect_error(make_newdata(iris2, Sepal.Length = seq_range(Sepal.length, 2))) expect_warning(make_newdata(iris2, Sepal.length = seq_range(Sepal.Length, 2))) }) test_that("make_newdata works for PED data", { ped <- simdf_elra %>% slice(1:6) %>% as_ped(Surv(time, status)~x1 + x2, cut = seq(0, 10, by = 5)) mdf <- ped %>% make_newdata(x1 = seq_range(x1, 2)) expect_data_frame(mdf, nrows = 2L, ncols = 9L) expect_equal(mdf$tend, c(5, 5)) expect_equal(mdf$x1, c(-2.43, 2.54), tolerance = 1e-2) expect_message(make_newdata(ped, tend = c(2.5))) mdf <- ped %>% make_newdata(tend = c(10), x1 = seq_range(x1, 2)) expect_data_frame(mdf, nrows = 2L, ncols = 9L) mdf <- ped %>% make_newdata(x1 = seq_range(x1, 2), x2 = seq_range(x2, 2)) expect_data_frame(mdf, nrows = 4L, ncols = 9L) mdf <- ped %>% make_newdata(tend = unique(tend), x2 = seq_range(x2, 2)) expect_data_frame(mdf, nrows = 4L, ncols = 9L) }) test_that("make_newdata works for PED with matrix columns", { ped_simdf <- simdf_elra %>% as_ped( Surv(time, status) ~ x1 + x2 + cumulative(time, latency(tz1), z.tz1, tz_var = "tz1", ll_fun = function(t, tz) t >= tz + 2) + cumulative(latency(tz2), z.tz2, tz_var = "tz2"), cut = 0:10) ## sample info expect_data_frame(sdf <- sample_info(ped_simdf), nrows = 1, ncols = 2) expect_equal(sdf$x1, 0.0718, tolerance = 1e-3) expect_equal(sdf$x2, 3.043, tolerance = 1e-3) ## ped info pinf <- ped_info(ped_simdf) expect_data_frame(pinf, nrows = 10L, ncols = 7L) expect_equal(pinf$x1[1], 0.0718, tolerance = 1e-3) expect_equal(pinf$x2[2], 3.043, tolerance = 1e-3) # make newdata nd1 <- ped_simdf %>% make_newdata(x1 = c(0.05)) expect_data_frame(nd1, nrows = 1L, ncols = 16L) expect_equal(nd1$tstart, 0) expect_equal(nd1$tend, 1) expect_equal(nd1$x1, 0.05) expect_equal(nd1$x2, 2.65, tolerance = 1e-3) expect_equal(nd1$z.tz1_tz1, -0.370, 1e-3) nd2 <- ped_simdf %>% make_newdata(x1 = seq_range(x1, 2)) expect_data_frame(nd2, nrows = 2L, ncols = 16L) expect_equal(nd2$x1[1], min(unlist(simdf_elra$x1))) expect_equal(nd2$x1[2], max(unlist(simdf_elra$x1))) nd3 <- ped_simdf %>% make_newdata(tend = unique(tend)) expect_data_frame(nd3, nrows = 10L, ncols = 16L) expect_equal(nd3$tend, 1:10) nd4 <- ped_simdf %>% make_newdata(tz1_latency = c(0:5)) expect_data_frame(nd4, nrows = 6L, ncols = 16L) expect_equal(nd4$tz1_latency, 0:5) nd5 <- ped_simdf %>% make_newdata( tend = c(1:10), tz1_latency = seq(1:5)) expect_data_frame(nd5, nrows = 50L, ncols = 16L) expect_equal(nd5$tend, rep(1:10, 5L)) expect_equal(nd5$tz1_latency, rep(1:5, each = 10L)) expect_equal(nd5$LL_tz1, c(rep(0, 10), rep(1, nrow(nd5) - 10))) }) test_that("Errors are thrown", { expect_error(combine_df(data.frame(x = 1), x = 2)) }) pammtools/tests/testthat/test-as-ped.R0000644000176200001440000001042114452536757017560 0ustar liggesuserscontext("Test as_ped functions") test_that("Trafo works and attributes are appended", { # preparations data("tumor") tumor <- tumor[c(1:3, 135:137), ] ped <- as_ped( data = tumor, formula = Surv(days, status)~ complications + age, cut = c(0, 100, 400)) # retransform to ped expect_data_frame(ped, nrow = 12L, ncols = 8L) expect_is(ped, "ped") expect_subset(c("ped_status", "tstart", "tend", "interval", "offset"), names(ped)) expect_is(attr(ped, "breaks"), "numeric") expect_is(attr(ped, "intvars"), "character") expect_is(attr(ped, "id_var"), "character") expect_equal(attr(ped, "id_var"), "id") expect_equal(is.ped(ped), TRUE) ped <- as_ped( data = tumor, formula = Surv(days, status)~ complications + age) expect_data_frame(ped, nrows = 11L, ncols = 8L) }) test_that("Trafo works for list objects (with TDCs)", { data("patient") event_df <- filter(patient, CombinedID %in% c(1110, 1116)) ped <- as_ped(data = list(event_df), formula = Surv(survhosp, PatientDied)~ ., cut = 0:30, id = "CombinedID") expect_data_frame(ped, nrows = 40, ncols = 15) tdc_df <- filter(daily, CombinedID %in% c(1110, 1116)) ## check nesting expect_error(as_ped( data = list(event_df, tdc_df), formula = Surv(survhosp, PatientDied) ~ ., cut = 0:30, id = "CombinedID")) ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(survhosp, PatientDied) ~ . + cumulative(survhosp, Study_Day, caloriesPercentage, tz_var = "Study_Day") + cumulative(proteinGproKG, tz_var = "Study_Day"), cut = 0:30, id = "CombinedID") expect_subset("survhosp_Study_Day_mat", colnames(ped)) expect_data_frame(ped, nrows = 40L, ncols = 20L) expect_identical(any(is.na(ped$caloriesPercentage_Study_Day)), FALSE) expect_identical(colnames(ped$Study_Day), paste0("Study_Day", 1:12)) ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(survhosp, PatientDied) ~ . + cumulative(Study_Day, caloriesPercentage, tz_var = "Study_Day") + cumulative(proteinGproKG, tz_var = "Study_Day"), id = "CombinedID") expect_data_frame(ped, nrows = 2L, ncols = 19L) }) test_that("Trafo works for left truncated data", { mort2 <- mort %>% group_by(id) %>% slice(1) %>% filter(id %in% c(1:3)) mort_ped <- as_ped(Surv(tstart, exit, event) ~ ses, data = mort2) expect_data_frame(mort_ped, nrows = 8L, ncols = 7L) expect_identical(round(mort_ped$tstart, 2), c(0.00, 3.48, 13.46, 17.56, 3.48, 13.46, 0.00, 3.48)) expect_identical(round(mort_ped$tend, 2), c(3.48, 13.46, 17.56, 20.00, 13.46, 17.56, 3.48, 13.46)) expect_identical(round(mort_ped$offset, 2), c(1.25, 2.30, 1.41, 0.89, 2.30, 1.41, 1.25, 2.30)) expect_identical(mort_ped$ped_status, c(rep(0, 5), 1, 0, 0)) expect_identical(mort_ped$ses, factor(rep(c("upper", "lower", "upper"), times = c(4,2,2)))) }) test_that("Trafo works for recurrent events data", { test_df <- data.frame( id = c(1,1, 2,2,2), tstart = c(0, .5, 0, .8, 1.2), tstop = c(.5, 3, .8, 1.2, 3), status = c(1, 0, 1, 1, 0), enum = c(1, 2, 1, 2, 3), age = c(50, 50, 24, 24, 24)) # GAP timescale gap_df <- as_ped( data = test_df, formula = Surv(tstart, tstop, status)~ enum + age, transition = "enum", id = "id", timescale = "gap") expect_data_frame(gap_df, nrows = 9L, ncols = 8L) expect_identical( round(gap_df$tstart, 1), c(0.0, 0.4, 0.0, 0.4, 0.5, 0.0, 0.4, 0.5, 0.0)) expect_identical( round(gap_df$tend, 1), c(0.4, 0.5, 0.4, 0.5, 0.8, 0.4, 0.5, 0.8, 0.4)) expect_identical( gap_df$ped_status, c(0, 1, 0, 0, 1, 0, 0, 0, 1) ) expect_identical( gap_df$enum, rep(c(1, 2), times = c(5, 4)) ) ## CALENDAR timescale cal_df <- as_ped( data = test_df, formula = Surv(tstart, tstop, status)~ age, id = "id", transition = "enum", timescale = "calendar") expect_data_frame(cal_df, nrows = 6L, ncols = 8L) expect_identical( round(cal_df$tstart, 1), c(0.0, 0.0, 0.5, 0.5, 0.8, 0.8)) expect_identical( round(cal_df$tend, 1), c(0.5, 0.5, 0.8, 0.8, 1.2, 1.2)) expect_identical( cal_df$ped_status, c(1, 0, 1, 0, 0, 1) ) expect_identical( cal_df$enum, rep(c(1, 2), each = 3) ) }) pammtools/tests/testthat/test-cumulative-coefficients.R0000644000176200001440000000134514222504522023205 0ustar liggesuserscontext("Test cumulative coefficients functionality") test_that("Cumulative coefficients work", { df <- tumor[1:30, c("days", "status", "age")] df$x1 <- as.factor(rep(letters[1:3], each = nrow(df) / 3L)) ## pam ped <- as_ped(df, formula = Surv(days, status)~ x1 + age) pam <- mgcv::gam(ped_status ~ s(tend) + x1 + age, data = ped, family = poisson(), offset = offset) cumu_coef_pam <- get_cumu_coef(pam, ped, terms = c("age", "x1"), nsim = 20L) expect_data_frame(cumu_coef_pam, nrows = 36L, ncols = 6L) expect_equal(unique(cumu_coef_pam$variable), c("age", "x1 (b)", "x1 (c)")) cumu_coef_pam <- get_cumu_coef(pam, ped, terms = c("(Intercept)", "age")) expect_data_frame(cumu_coef_pam, nrows = 24L, ncols = 6L) }) pammtools/tests/testthat/test-specials.R0000644000176200001440000000610414222504522020171 0ustar liggesuserscontext("Test formula special.") test_that("Formula special 'func' works as expected", { ## time + latency + covar (DLNM approach) cumu1 <- eval_special(~ cumulative(t, latency(te), x, tz_var = "te"))[[1]] expect_list(cumu1, any.missing = TRUE, len = 5) expect_identical(cumu1$latency_var, "te") expect_identical(cumu1$tz_var, "te") expect_identical(cumu1$col_vars, c("t", "te", "x")) expect_function(cumu1$ll_fun, args = c("t", "tz")) expect_identical(cumu1$suffix, NULL) }) test_that("Formula special 'concurrent' works as expected", { ## time + latency + covar (DLNM approach) ccr1 <- eval_special(~ concurrent(x1, x2, tz_var = "te"), special = "concurrent")[[1]] expect_list(ccr1, any.missing = TRUE, len = 5) expect_identical(ccr1$tz_var, "te") expect_identical(ccr1$col_vars, c("x1", "x2")) expect_function(ccr1$ll_fun, args = c("t")) expect_identical(ccr1$lag, 0) expect_identical(ccr1$suffix, NULL) data("pbc", package = "survival") event_df <- pbc %>% filter(id <= 5) %>% mutate(event = 1L*(status == 2)) %>% select(id, time, event, sex, bili, protime, albumin) tdc_df <- pbcseq %>% filter(id <= 5) %>% select(id, day, bili, protime, albumin) formula <- Surv(time, event)~ concurrent(bili, protime, tz_var = "day") + concurrent(albumin, tz_var = "day") nested_fdf <- nest_tdc(list(event_df, tdc_df), formula, id = "id") ped_ccr <- as_ped(list(event_df, tdc_df), formula, id = "id") }) context("Transformation of longitudinal covariates to functional covariates") test_that("Covariate to matrix Transformation works", { event_df <- filter(patient, CombinedID == 1116) tdc_df <- filter(daily, CombinedID == 1116) ## check nesting nested_df <- nest_tdc( data = list(event_df, tdc_df), formula = Surv(survhosp, status)~ . + cumulative(Study_Day, caloriesPercentage, tz_var="Study_Day") + cumulative(proteinGproKG, tz_var="Study_Day"), cut = 0:30, id = "CombinedID") expect_tibble(nested_df, any.missing=FALSE, nrows=1, ncols=15) expect_identical(colnames(nested_df), c("Year", "CombinedicuID", "CombinedID", "Survdays", "PatientDied", "survhosp", "Gender", "Age", "AdmCatID", "ApacheIIScore", "BMI", "DiagID2", "Study_Day", "caloriesPercentage", "proteinGproKG")) expect_identical(names(attributes(nested_df))[-c(1:3)], c("id_var", "time_var", "status_var", "tdc_vars", "breaks", "func_list", "id_n", "id_tseq", "id_tz_seq")) ## check data trafo expect_error(get_cumulative(nested_df, ~cumulative(t))) f1 <- get_cumulative(nested_df, ~ . + cumulative(survhosp, latency(Study_Day), caloriesPercentage, tz_var = "Study_Day")) expect_list(f1$func_mats, types=c("numeric", "numeric", "numeric", "integer"), any.missing=FALSE, len=4, names="named") f2 <- get_cumulative(nested_df, ~. + cumulative(survhosp, latency(Study_Day), caloriesPercentage, tz_var = "Study_Day") + cumulative(proteinGproKG, tz_var = "Study_Day")) expect_list(f2$func_mats, types=c(rep("numeric", 3), "integer", "numeric"), any.missing = FALSE, len=5, names="named") }) pammtools/tests/testthat/test-mgcv-convenience.R0000644000176200001440000000034513662013606021622 0ustar liggesuserscontext("mgcv convenience functions") test_that("mgcv convenience works", { library(mgcv) g <- gam(Sepal.Length ~ s(Sepal.Width) + s(Petal.Length), data=iris) expect_data_frame(s1d <- tidy_smooth(g), nrows=200, ncols=7) })pammtools/tests/testthat/test-pamm-fit.R0000644000176200001440000000213414222504522020077 0ustar liggesuserscontext("Test pamm wrapper function") test_that("pamm function works correctly", { data("tumor") ped <- as_ped(Surv(days, status)~ complications + age, data = tumor[1:20,]) # gam engine pam <- pamm(ped_status ~ s(tend, k=3) + age, data=ped) expect_is(pam, "pamm") expect_is(summary(pam), "summary.gam") expect_data_frame(int_info(pam), nrows = 9L, ncols = 5L) expect_identical(is.pamm(pam), TRUE) # check data trafo from pam object ped_new <- as_ped(pam, newdata = tumor[21:40, ]) expect_data_frame(ped_new, nrows = 144L, ncols = 8L) expect_subset(ped_new$tend, ped$tend) # bam engine pam2 <- pamm(ped_status ~ s(tend, k = 3) + age, data = ped, engine = "bam") expect_true(inherits(pam2, "bam")) expect_data_frame(int_info(pam2), nrows = 9L, ncols = 5L) expect_identical(is.pamm(pam2), TRUE) # pass arguments to bam pam3 <- pamm(ped_status ~ s(tend, k = 3) + age, data = ped, engine = "bam", discrete = TRUE, method = "fREML") expect_true(inherits(pam3, "bam")) expect_data_frame(int_info(pam3), nrows = 9L, ncols = 5L) expect_identical(is.pamm(pam), TRUE) }) pammtools/tests/testthat/test-predict-functions.R0000644000176200001440000000156114222504522022030 0ustar liggesuserscontext("Predict functions") test_that("predict functions work correctly", { data("tumor") ped <- as_ped(Surv(days, status)~ complications, data = tumor[1:20, ]) pam <- pamm(ped_status ~ s(tend, k = 3) + complications, data = ped) pam2 <- pamm(ped_status ~ s(tend, k = 3) + complications, data = ped, engine = "bam", method = "fREML", discrete = TRUE) ## predictSurvProb (pec) generic spmat <- predictSurvProb.pamm(pam, tumor[21:23,], times = c(90, 500, 1217)) expect_identical( round(spmat, 2), matrix( c( rep(.81, 3), rep(.46, 3), rep(.38, 3) ), nrow = 3, ncol = 3 ) ) expect_error(predictSurvProb.pamm(pam, tumor[21:23,], times = c(90, 500, 2000))) spmat2 <- predictSurvProb.pamm(pam2, tumor[21:23,], times = c(90, 500, 1217)) expect_identical(round(spmat, 2), round(spmat2, 2)) } ) pammtools/tests/testthat/test-as-ped-cr.R0000644000176200001440000000541014241673657020161 0ustar liggesuserscontext("Test as_ped_cr functions") test_that("Trafo works and attributes are appended.", { # preparations ped <- as_ped( data = sir_adm, formula = Surv(time, status) ~ age + pneu, cut = c(0, 10, 100) ) expect_data_frame(ped, nrow = 12L * 2L, ncols = 9L) expect_is(ped, "ped_cr_union") expect_subset(c("ped_status", "tstart", "tend", "interval", "offset", "cause"), names(ped)) expect_is(attr(ped, "breaks"), "numeric") expect_is(attr(ped, "intvars"), "character") expect_is(attr(ped, "id_var"), "character") expect_equal(attr(ped, "id_var"), "id") expect_equal(sum(as.numeric(ped$cause)), 36) # check that trafo can be recovered ped2 <- as_ped(ped, newdata = sir_adm) expect_equal(ped, ped2, check.attributes = FALSE) # check that list output identical for given cut points ped_list <- as_ped( data = sir_adm, formula = Surv(time, status) ~ age + pneu, cut = c(0, 10, 100), combine = FALSE) ped2 <- do.call(rbind, ped_list) expect_true(all.equal(do.call(rbind, ped_list), ped, check.attributes = FALSE)) expect_identical(length(ped_list), 2L) expect_identical(class(ped_list), c("ped_cr_list", "ped_cr", "ped", "list")) expect_identical(names(attributes(ped_list)), c("class", "names", "trafo_args", "risks")) expect_identical(length(attr(ped_list, "trafo_args")$cut), 2L) # check that trafo can be recovered for ped list objects ped_list2 <- as_ped(ped_list, newdata = sir_adm) expect_equal(ped_list, ped_list2, check.attributes = FALSE) # test when split points not specified ped <- as_ped(data = sir_adm, formula = Surv(time, status) ~ .) expect_data_frame(ped, nrows = 56L, ncols = 10L) expect_equal(sum(as.numeric(ped$cause)), 84L) ped_list <- as_ped_cr(sir_adm, Surv(time, status) ~ ., combine = FALSE) expect_identical(attr(ped_list[[1]], "breaks"), c(4L, 10L, 24L, 37L, 101L)) expect_identical(attr(ped_list[[2]], "breaks"), c(22L, 25L)) ped_list2 <- as_ped(ped_list, newdata = sir_adm) expect_equal(ped_list, ped_list2, check.attributes = FALSE) }) test_that("Trafo works for more than two risks.", { # preparations sir_adm$status[2] <- 3 ped <- as_ped( data = sir_adm, formula = Surv(time, status) ~ age + pneu, cut = c(0, 10, 100) ) expect_data_frame(ped, nrow = 12L * 3L, ncols = 9L) expect_is(ped, "ped_cr_union") expect_subset(c("ped_status", "tstart", "tend", "interval", "offset", "cause"), names(ped)) expect_is(attr(ped, "breaks"), "numeric") expect_is(attr(ped, "intvars"), "character") expect_is(attr(ped, "id_var"), "character") expect_equal(attr(ped, "id_var"), "id") expect_equal(sum(as.numeric(ped$cause)), 72) expect_equal(sum(ped$ped_status[ped$cause == 3L]), 1) }) pammtools/tests/testthat/test-tidyverse-S3methods.R0000644000176200001440000000352614222504522022260 0ustar liggesuserscontext("Tidyverse methods for specific classes") test_that("ped class is preserved after dplyr operations", { data("tumor") tumor <- dplyr::slice(tumor, 2:3) ped <- as_ped( data = tumor, formula = Surv(days, status) ~ complications + age, cut = c(0, 100, 400), id = "id") expect_is(filter(ped, id == 1), "ped") expect_is(slice(ped, 1), "ped") expect_is(arrange(ped, desc(id)), "ped") expect_is(select(ped, id), "ped") expect_is(rename(ped, ID = id), "ped") expect_is(mutate(ped, id = id + 1), "ped") expect_is(transmute(ped, id = id + 1), "ped") expect_is(sample_n(ped, 1), "ped") expect_is(sample_frac(ped, 0.5), "ped") expect_is(right_join(distinct(ped, id, interval), ped), "ped") }) test_that("attributes are preserved", { # recurrent events data test_df <- data.frame( id = c(1,1, 2,2,2), tstart = c(0, .5, 0, .8, 1.2), tstop = c(.5, 3, .8, 1.2, 3), status = c(1, 0, 1, 1, 0), enum = c(1, 2, 1, 2, 3), age = c(50, 50, 24, 24, 24)) # GAP timescale gap_df <- as_ped( data = test_df, formula = Surv(tstart, tstop, status)~ enum + age, transition = "enum", id = "id", timescale = "gap") expect_subset(names(attributes(gap_df)), c("names", "row.names", "class", "breaks", "id_var", "intvars", "trafo_args", "time_var")) expect_subset( names(attributes(mutate(gap_df, age = 10))), c("names", "row.names", "class", "breaks", "id_var", "intvars", "trafo_args", "time_var")) expect_subset( names(attributes(group_by(gap_df, id))), c("names", "row.names", "class", "breaks", "id_var", "intvars", "trafo_args", "time_var", "groups")) expect_subset( names(attributes(ungroup(group_by(gap_df, id)))), c("names", "row.names", "class", "breaks", "id_var", "intvars", "trafo_args", "time_var")) }) pammtools/tests/testthat/test-tdc-transform.R0000644000176200001440000001001314452536066021160 0ustar liggesuserscontext("Transformation with TDC") test_that("Concurrent TDC are transformed correctly", { data("pbc", package = "survival") # default case with lag = 0 event_df <- filter(pbc, id %in% 1:3) %>% mutate(status = 1L*(status == 1)) tdc_df <- filter(pbcseq, id %in% 1:3) %>% select(id, day, bili, protime) time <- sort(unique(event_df$time))[1:2] tz <- sort(unique(tdc_df$day)) tz <- tz[tz <= max(time)][-1] expect_error(as_ped( data = list(event_df, tdc_df), formula = Surv(time, status) ~. + concurrent(bili, protime, tz_var = "day"), id = "id"), "No events in data") event_df <- filter(pbc, id %in% 1:3) %>% mutate(status = status == 2) %>% select(id, time, status, trt, age, bili, spiders) ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(time, status) ~. + concurrent(bili, protime, tz_var = "day"), id = "id") expect_equal(unique(ped$tend), c(176, 182, 192, 364, 365, 400, 743, 768, 1012)) expect_equal(ped$bili, c(rep(14.5, 3), rep(21.3, 3), rep(1.1, 2), rep(0.8, 3), rep(1, 3), 1.9, 1.4, rep(1.1, 3), rep(1.5, 3), rep(1.8, 2))) # lag != 0 ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(time, status) ~. + concurrent(bili, protime, tz_var = "day", lag = 10), id = "id") expect_equal( unique(ped$tend), sort(c(time, tz + 10))) expect_equal(ped$bili, c(rep(14.5, 3), rep(21.3, 3), rep(1.1, 2), rep(0.8, 3), rep(1, 3), 1.9, 1.4, rep(1.1, 3), rep(1.5, 3), rep(1.8, 2))) # unequal lags ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(time, status) ~. + concurrent(bili, tz_var = "day", lag = 10) + concurrent(protime, tz_var = "day", lag = 0), id = "id") expect_data_frame(ped, nrows = 40, ncols = 11) expect_equal(sum(ped$ped_status), 2) expect_equal(sort(unique(ped$tend)), sort(unique(c(time, tz, tz+10)))) expect_equal(ped$bili, c(rep(14.5, 5), rep(21.3, 5), rep(1.1, 4), rep(0.8, 5), rep(1, 5), 1.9, rep(1.4, 3), rep(1.1, 5), rep(1.5, 4), rep(1.8, 3))) expect_equal(ped$protime, c(rep(12.2, 4), rep(11.2, 6), rep(10.6, 2), rep(11, 5), rep(11.6, 6), rep(10.6, 2), rep(12, 11), rep(13.3, 4))) # when maxtime is set ped <- as_ped( data = list(event_df, tdc_df), formula = Surv(time, status)~. + concurrent(bili, protime, tz_var = "day"), id = "id", max_time = 1400) expect_equal(unique(ped$tend), sort(c(time, tz, 1400))) expect_equal(ped$bili, c(rep(14.5, 3), rep(21.3, 3), rep(1.1, 2), rep(0.8, 3), rep(1.0, 3), rep(1.9, 2), 1.4, rep(1.1, 3), rep(1.5, 3), rep(1.8, 2))) }) test_that("Covariate matrices are created correctly", { data <- simdf_elra %>% filter(id %in% c(1:2)) time <- 0:2 tz <- data %>% dplyr::pull("tz2") %>% unlist() %>% unique() %>% sort() nz <- length(tz) attr(data, "id_tseq") <- rep(1:3, 2) attr(data, "id_tz_seq") <- rep(1:2, times = c(3, 3)) my_ll_fun <- function(t, tz) ( (t - tz) >= 0 & (t - tz) <= 5) expect_class(my_ll_fun, "function") Tmat <- make_time_mat(data, nz) TEmat <- make_z_mat(data, "tz2", nz) Ltmat <- make_latency_mat(data, tz) LLmat <- make_lag_lead_mat(data, tz, ll_fun = my_ll_fun) expect_equal(dim(Tmat), c(6, 11)) expect_equal(dim(TEmat), c(6, 11)) expect_equal(dim(Ltmat), c(6, 11)) expect_equal(dim(LLmat), c(6, 11)) expect_equal(all(Tmat[1, ] == 0), TRUE) expect_equal(all(Tmat[2, ] == 1), TRUE) expect_equal(all(Tmat[3, ] == 2), TRUE) expect_equal(all(TEmat[, 1] == -5), TRUE) expect_equal(all(TEmat[, 11] == 5), TRUE) expect_equal(Ltmat[1, ], c(5:0, rep(0, 5))) expect_equal(Ltmat[3, ], c(7:0, rep(0, 3))) expect_equal(LLmat[1, ], c(rep(1, 6), rep(0, 5))) expect_equal(LLmat[3, ], c(rep(0, 2), rep(1, 6), rep(0, 3))) expect_equal(max(Ltmat * LLmat), 5) ped <- as_ped(data, Surv(time, status) ~ . + cumulative(z.tz2, latency(tz2), tz_var = "tz2", ll_fun = function(t, tz) (t - tz) >= 0 & (t - tz) <= 5), cut = 0:2) expect_equal(max(ped$tz2_latency * ped$LL), 5) }) pammtools/tests/testthat/test-simulation.R0000644000176200001440000000252114222504522020551 0ustar liggesuserscontext("Test simulation functions") test_that("Test that rpexp works", { expect_identical(length(rpexp(n = 1, rate = 1, t = 0)), 1L) expect_error(rpexp(n = 1, rate = 1, t = c(0, 1))) expect_error(rpexp(n = 1, rate = 1, t = 1)) expect_error(rpexp(n = 1, rate = c(1, 1, 1), t = c(0, 2, 1))) expect_identical(rpexp(n=0, rate = 1, t = 0), numeric(0)) expect_identical(length(rpexp(n=c(1, 3), rate = 1, t = 0)), 2L) }) test_that("Simulation function works", { suppressWarnings(RNGversion("3.5.0")) set.seed(24032018) # standard data df <- cbind.data.frame(x1 = runif (3, -3, 3), x2 = runif (3, 0, 6)) sim_df <- sim_pexp(~ -3.5 - 0.5 * x1 + sqrt(x2), df, cut = 0:10) expect_data_frame(sim_df, nrows = 3, ncols = 5) expect_identical(round(sim_df$time, 2), c(1.38, 7.14, 3.02)) # time-dependent covariates rng_z <- function(nz) { as.numeric(arima.sim(n = nz, list(ar = c(.8, -.6)))) } tz1 <- 1:10 df <- df %>% add_tdc(tz1, rng_z) # simulate data with cumulative effect sim_df <- sim_pexp( formula = ~ -3.5 - 0.5 * x1 + sqrt(x2) | fcumu(t, tz1, z.tz1, f_xyz = function(t, tz, z) { -1 * cos(t / 10 * pi) * 0.8 * (dnorm(z, 1.5, 2) + 1.5 * dnorm(z, 7.5, 1)) * 15 * dnorm(t - tz, 8, 10) }, ll_fun = function(t, tz) t >= tz), data = df, cut = 0:10) }) pammtools/tests/testthat/test-model-evaluation.R0000644000176200001440000000107614222504522021636 0ustar liggesuserscontext("Model evaluation helpers") test_that("pec helpers work", { library(pec) data(tumor) ped <- tumor %>% as_ped(Surv(days, status) ~ complications, cut = seq(0, 500, by = 50)) pam <- pamm(ped_status ~ complications, data = ped) suppressMessages({ pec <- pec::pec(list(pam = pam), Surv(days, status) ~ 1, data = tumor, times = seq(.01, 500, by = 100), start = .01, exact = FALSE) }) df_ibs <- as.data.frame(pec::crps(pec)) expect_data_frame(df_ibs, nrow = 2, ncol = 3) expect_identical(colnames(df_ibs), c("method", "time", "IBS")) }) pammtools/tests/testthat/test-formula-utils.R0000644000176200001440000000110014222504522021160 0ustar liggesuserscontext("Formula utility functions") test_that("Formula utilities work", { expect_identical(get_rhs_vars(~ x1 + sqrt(x2)), c("x1", "x2")) expect_identical(get_rhs_vars("~ x1 + sqrt(x2)"), c("x1", "x2")) expect_identical(get_tdc_vars( ~ x1 + cumulative(z.tz, tz_var = "tz")), "z.tz") expect_identical(get_ped_form(Surv(time, status) ~ x1 + cumulative(z.tz, tz_var = "tz")), Surv(time, status) ~ x1 ) expect_true(has_lhs(Surv(time, status) ~ .)) expect_identical(get_lhs_vars("Surv(time, status) ~ ."), c("time", "status")) expect_false(has_lhs( ~ .)) }) pammtools/tests/testthat.R0000644000176200001440000000031613720452276015422 0ustar liggesusersSys.setenv("R_TESTS" = "") # see https://github.com/hadley/testthat/issues/86 library(testthat) library(checkmate) library(dplyr) library(purrr) library(tidyr) # library(pammtools) test_check("pammtools") pammtools/R/0000755000176200001440000000000014452540105012466 5ustar liggesuserspammtools/R/tidyverse-methods.R0000644000176200001440000002047214222504522016273 0ustar liggesusersped_classes <- function(ped) { ind_ped <- class(ped) %in% c("ped", "ped_cr", "ped_cr_union", "fped", "nested_fdf") class(ped)[ind_ped] } re_attribute <- function(.data, attr2) { attr1 <- attributes(.data) attributes(.data) <- c(attr1, attr2[setdiff(names(attr1), names(attr2))]) .data } #' @importFrom purrr discard unped <- function(ped, classes_ped = "ped") { class(ped) <- setdiff(class(ped), classes_ped) ped } reped <- function(.data, ped_classes = "ped") { class(.data) <- c(ped_classes, class(.data)) .data } ped_attr <- function( ped, ped_attributes = c("breaks", "id_var", "intvars", "combine", "censor_code", "risks") ) { attr_ped <- attributes(ped) ped_attr_avail <- intersect(names(attr_ped), ped_attributes) attr_ped[ped_attr_avail] } unfped <- function(fped) { class(fped) <- class(fped) %>% discard(~.=="fped") fped } refped <- function(.data) { class(.data) <- c("fped", class(.data)) .data } fped_attr <- function(fped) { attributes(fped)[c("breaks", "id_var", "intvars")] } #' @name dplyr_verbs #' @title \code{dplyr} Verbs for \code{ped}-Objects #' @param .data an object of class \code{ped}, see \code{\link{as_ped}}. #' @param tbl an object of class \code{ped}, see \code{\link{as_ped}}. #' @param x an object of class \code{ped}, see \code{\link{as_ped}}. #' @param funs see \code{\link[dplyr]{summarize_all}} #' @param ... see \code{dplyr} documentation #' @param .dots see \code{dplyr} documentation #' @description See \code{dplyr} documentation of the respective functions for #' description and examples. #' @return a modified \code{ped} object (except for \code{do}) #' @import dplyr #' @aliases arrange filter distinct full_join group_by inner_join left_join mutate rename right_join sample_frac sample_n select slice summarise transmute ungroup #' @keywords internal NULL #------------------------------------------------------------------------------- # single table: grouping/sorting #' @export #' @export arrange #' @rdname dplyr_verbs arrange.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- arrange(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @export group_by #' @rdname dplyr_verbs group_by.ped <- function(.data, ..., .add = FALSE) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- dplyr::group_by(unped(.data, classes_ped), ..., .add = .add) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @export ungroup #' @rdname dplyr_verbs ungroup.ped <- function(x, ...) { classes_ped <- ped_classes(x) attr_ped <- attributes(x) x <- ungroup(unped(x, classes_ped), ...) x <- reped(x, classes_ped) re_attribute(x, attr_ped) } #' @export #' @export distinct #' @rdname dplyr_verbs distinct.ped <- function(.data, ..., .keep_all = FALSE) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- distinct(unped(.data, classes_ped), ..., .keep_all = .keep_all) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #------------------------------------------------------------------------------- # single table: row ops #' @export #' @export filter #' @rdname dplyr_verbs filter.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- filter(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @export sample_n #' @inheritParams dplyr::sample_n #' @rdname dplyr_verbs sample_n.ped <- function(tbl, size, replace = FALSE, weight = NULL, .env = NULL, ...) { classes_ped <- ped_classes(tbl) attr_ped <- attributes(tbl) tbl <- sample_n(unped(tbl, classes_ped), size, replace, weight, .env, ...) tbl <- reped(tbl, classes_ped) re_attribute(tbl, attr_ped) } #' @export #' @export sample_frac #' @inheritParams dplyr::sample_frac #' @rdname dplyr_verbs sample_frac.ped <- function(tbl, size = 1, replace = FALSE, weight = NULL, .env = NULL, ...) { classes_ped <- ped_classes(tbl) attr_ped <- attributes(tbl) tbl <- sample_n(unped(tbl, classes_ped), size, replace, weight, .env, ...) tbl <- reped(tbl, classes_ped) re_attribute(tbl, attr_ped) } #' @export #' @export slice #' @rdname dplyr_verbs slice.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- slice(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #------------------------------------------------------------------------------- # single table: column ops #' @export #' @export select #' @rdname dplyr_verbs select.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- select(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @param keep_attributes conserve attributes? defaults to \code{TRUE} #' @export #' @export mutate #' @rdname dplyr_verbs mutate.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- mutate(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @export rename #' @rdname dplyr_verbs rename.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- rename(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @export summarise #' @rdname dplyr_verbs summarise.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- summarise(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #' @export #' @rdname dplyr_verbs summarize.ped <- summarise.ped #' @export #' @export transmute #' @rdname dplyr_verbs transmute.ped <- function(.data, ...) { classes_ped <- ped_classes(.data) attr_ped <- attributes(.data) .data <- transmute(unped(.data, classes_ped), ...) .data <- reped(.data, classes_ped) re_attribute(.data, attr_ped) } #------------------------------------------------------------------------------- # joins #' @inheritParams dplyr::inner_join #' @export #' @export inner_join #' @rdname dplyr_verbs inner_join.ped <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) { classes_ped_x <- ped_classes(x) classes_ped_y <- ped_classes(y) attr_ped_x <- attributes(x) .data <- inner_join(unped(x, classes_ped_x), unped(y, classes_ped_y), by, copy, suffix, ...) .data <- reped(.data, classes_ped_x) re_attribute(.data, attr_ped_x) } #' @inheritParams dplyr::full_join #' @export #' @export full_join #' @rdname dplyr_verbs full_join.ped <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) { classes_ped_x <- ped_classes(x) classes_ped_y <- ped_classes(y) attr_ped_x <- attributes(x) .data <- full_join(unped(x, classes_ped_x), unped(y, classes_ped_y), by, copy, suffix, ...) .data <- reped(.data, classes_ped_x) re_attribute(.data, attr_ped_x) } #' @inheritParams dplyr::left_join #' @export #' @export left_join #' @rdname dplyr_verbs left_join.ped <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) { classes_ped_x <- ped_classes(x) classes_ped_y <- ped_classes(y) attr_ped_x <- attributes(x) .data <- left_join(unped(x, classes_ped_x), unped(y, classes_ped_y), by, copy, suffix, ...) .data <- reped(.data, classes_ped_x) re_attribute(.data, attr_ped_x) } #' @inheritParams dplyr::right_join #' @export #' @export right_join #' @rdname dplyr_verbs right_join.ped <- function(x, y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...) { classes_ped_x <- ped_classes(x) classes_ped_y <- ped_classes(y) attr_ped_x <- attributes(x) .data <- inner_join(unped(x, classes_ped_x), unped(y, classes_ped_y), by, copy, suffix, ...) .data <- reped(.data, classes_ped_x) re_attribute(.data, attr_ped_x) } pammtools/R/sysdata.rda0000644000176200001440000004616514222504522014640 0ustar liggesusersBZh91AY&SY0y7}{; nҸuu0jj )Zc-2,wmZi{` TXUX֚kj̪16h5ח@ a 6I 0#F` FFLj`&e46UPi'h i4i4` 1O!b2i&bcF@50h0 <# B S'6jm4diM {A#F qؾЧ`1]"Csd45\oX!GEl ZzOk_\׺f❳cpǧ&܅`.h` IyA YqʌfsY:]AqbМC@KP]hTEHALU\)sw7 V[\^_џBE +]S/J_3^šop}&Y,U Ƞ汒Z&[YHW4" 'eB5xws5Cy2fDz}NNª WRE46+nݒ确ZoOP#)p-XPUV2,X(EDD1V* TR1V"* U$XLQ# EcEPEL D`dF*(FDV),ETb(",EbXŌQQ""DUEv A( TQH"EPEb0PXUEcb bQdA 8DXEYg1UTEPE;0EQ"PAUEH.H*4s$ i2HffP@Dr 4@,$ *0uUb*1UTTQEUN;F#A(EbbTDEb*1"U"$T`FLH"EbN`(ETH"*"1EV*Mxfc"DXEUTYcE;Db"* DDEb* (ĊQTTb1PF"TUR()**1EEFENDDcUQPAMn큰ELH9 P2PRu33; DPみ pɽܙty r$_hK !4UTAP0ZGCá`QΓqC9&Awh$l)$BaSQRHiɕn`_wAPZP@ sd5} h!+T`jIR9KT˲`':ĪxҪ- [T=U s4PkaFQJ8jDk#]OL UKq`dez$VYlČ3\Q41NZ''!UIw0jl PZS`Ne H8-mk"1fCU*MMe®*}{p+׫k-Oڹd->g9LܥI4/kpeQ7f5[LFs7i_ks{}Įsiw[{*7z?E|.'YSqO+GwUZw]s]wr;x |_/bbz~ldllo9~\ p-pw\hzҕ+YJHSSB|n \IQɦOWҡ"R0<ȌȀr!S DSJ& gJjz! M4zBd:ak[ `&l (Dr`rY[A,-VϨc:P.( 0E`tt5Us2~9keф T=Ra=\iRdJ%䘀'-"VzT=BL#ϩlJPF)BuYGӞ׿ 4O/F4h賶ֆ4hѣO4h}a鞔ieZtF? 3x <_7sͽ\|iM{Өhѧ#(XӋMM;)l<$22V4x)಍3=ƙP:Z{=xJg=xg;qFES]X Ŧ~gAL#F>3<qm''v# ,g<\>ne;#9߱:=}=X<9#GoaF4댣F4hF4hѣFhѣ4}xFj4hHѣF4hƏ(yc4{F\=ѣF4z1F4hѣ >4hѣF 4hѣF4hF{?=! '0aMG hPѣߍ>{1F4h#G4hѣF4}PF,hѣƏ0hѣ 4{Gha$ 4h hѣG؍4hѣF4hF4hѣ܍x2z4hѣF4hCF4hѣߝ!LF4hѣF4hѣF424hѣF4hѣF4hѣF4hѣF4hѣF4hѣO$e4hѣF4hѣF4hѣF4hѣF4hѣF4hѣF4hѣF6J!c̦F6ƙFO {S}QN>S~ΦhѣF4|ѣF4hѣF4hѣFhѣF7(F4ek(ѣF4ha$4iQF4hѣF4iφS2~4hѣF4~Lh9g(PѣF4hѣF4hѣƍ>hѣF4hѣF4hѣF4hѣF4h4hѣF4hѣF4hѣF4hѣOhѣF4hѣN(ѣFpe4hѣF4hѣF4hѣF4hӻ{w<)yZ]3ā`,y #\i @H԰A$ hlVF?nVP|I l-櫻֚2ZHc'ODwcAwpcyg4O) 7SyKajlQ?=.bE^"p-Crz EDܠ2 | F (`ab 2{!BF@D*L)IKIbxgT&Di3*aK£b/PPo*02 ( +!8z;{3VuN@ɨ|>^5Y4U{{Gt& Q8Ȋk"8lzhTD"@WSBy0GH|X\92n6~ H!c|'4.oδ1_EvU6BqcX50"1q;n7=375\ֱ9W5@TbDokC3"9cNr`؇b0|V5yFlhnjLRI 5^5HXƮyjqwCk)sB4ޅSyD֛mngsup M "DZ9B*9jD D:(#HH$>Y# aUQdFa2.f abbA\QIHdpdphP."H+D)z题('g?TtoqNJ:Xi)"'_p@F9(x(؊)v18LMBk+J "@"!)OZ Df+vB!b%dWg*_! C@1ĐY$Y L$$ `3 LLAU NtI$D[sbplB͚(PƕM$@5GH)uZffKB@W"%B$W9[1 |{_5r7>uIJj0`FB y P*Q^dfACcפL)u]@AZL!K-{Bj"gvlׅ\^#ЈP(PDRJu]ݳvoPy=L &LAB[@e*5. q$vACxvȐD@² RxnZz+lh&:Dtwh! (b,K!4z?BAe r%1ƥ~ftk_فF}U1j+Vfd0`ȁB VK̑2<:Ygf!ܯ8LDB D(P߱\ZCe_4!v'pHXLzЅDAٍo *vVvNMzSؤjxk]E lnxFʡ>"#aY6)r|&,L0`IWۏKSpx* k !ӛqg .$0`d`fbv.9wcug[@n!Ëd=@=hId@V./yW܀*&ޫ}Wlhc= Fc7s9)\9nȵyC|wsA)9E4T )O!\iah|CqR0c]K/o2o`iUJRPhh:^ne-@şͷփ - ݙ^_Nf8O& G?]m o4>uQXLh" *2݉ę̇+{ja0b5Lhn2;Zy.3gxK7Kc聎O4(HSeec9$Gֶ=g/MDp&=%*R qcL(ԤSd6l BgE>zbBM)rǟԬu&~(=*T 61;>o؇D-OtCAPb0^XJL1+b*JJ(a}dId;^!P&Q ML`tAGA >Ï KTa*?`?V ^_!xC8Y8UWָ8 * Pb\pZ@ f1;U0ff>Ug:mVM`62Q91_W@euBc,HL!SRPO qp] 7#on?eɨF(pœ;0`d 1rC1TO}SES(nԝfA̵&sC ^FGqlN %E2z׼@BdɊAB `;N% 'Y(_vKkRшu] % 6O\Oh {RS~~;o=][?eUUR% }W00cX0FXF.8U^R56I`hѣC’eBd7ΰyfШ>\seEFrhp6 ڵd20`?$k|x8BDTfd)S1 FNBÞO2sFz G1vMع,57`|cnb/Jh'Ϟ(P꫔Z{v_tPx8H QW.@9㠹蔂 $WV@~lؓգkb`4%3eڍ\J J5!ymBN*G)zZ8Yn~9f$+jFR0.|TikUpTx#[iܰ`<%6<5O7e*'H1k㮁yF{hue&Tq#P?)tU(:++҉ 7isMa|9茿?Ԡ}BEYQeDz@Z^`ǎݝen&}J9-o^O`&ur1QԽ\у`T:uovmt8F q}Ns&-uI'5o(7Q WzMsJ= u{϶e=vƋ(x"vP ?px$.h%"wd+$b!Vݔ b/ t6'Y;uH!^="|247Ć ̧{1uA@ Dْ镜uʛ$Z% Y.#ϰjF[Uкu]T;lNk# ̻2Z7lNfUyPacV3AV.r&:`s\ f0BBuWaS7!mKrX8Vdfn/kRRLB*Eeq خmQ9#g1k9Iִj]~7N kyၤCXAտli 2SؚrknbɈ8ɭ镆l Ze7q #];Y.P̶das9u4PuTeKu]?˸*\M|Nc'D;;eE{a˺vOF;_uY9||$"z4w]be* ӽ _7c-3jXz wby +'!rMfH,5NIoe9yJ!UZ5)!(Zֹ-|RG)~U@q4+/AriTnFf$ʋK1m'?tdMhވ]$$7_uJw]2ڊe_^g8z\HqUo33n7DDA1}V[{NY<‰= NYFGEˇ1W6:3ڤmնٺoLg+Mo2mszzəS=YO;Mnhji2_T:_NFJ3OMfE>hg{}'d=#,9oEH-#N}GE5މx>'wBQ)XtE2YGAl?~~#=ωMDL@Q5n>b^Q.Ŀ-UrjիS(@Kڭ#|o^̄BFG}BIBߪt]w}r%}bȹ;(sN)(Pތw>^fz)TbŞڔ囻lbbA_ X0 &ڙXsW}Շ噞xj1i BFh*?ψLwENRPB_B34ʅq v5, |A+jgjï"j{$!؇FQ>ь v]_F$È!i\x2fl5;ߢHsTCJH)(Q s^OР(P b~?mϰe@EY~IU=PP~Y̯N+ÿLEKhҒ6h;eE-+dɶVVVN8QC&Z#3ABdY9 +ʱ[ z `oKr:*= qg!}!wÇ8)YɊ${3 42 JAMfI$tScbsiba4yvM6QJ*JA@PjN ճ[+Ų7VƦvvvv0 0qɂAH M5tNʬ>3&/ i2٘RPF1s$Vcߗ9^q!j0I]]]\)(Pmi[ъ 9qV=5˗fU{:AAdG)b_$nZH99Q;_~1g>Ca2T{[[UUUTc)7ԷXk #l"H)Tگ'LdK_h#qMjO2q=m.M%e1b'z^ O*m?᜾?dɷ #xlSm}|* 9yys͕NGojRL!2$!JDf ]Mx`+RY@LRHx7$2$Me -N➠ 恓3 `b f+ $g)PPKYOz "s)EYE ͊l0`pYVۋFw ħӨ|%NX&SD T+J2a1 )R)(Q(K1ApP{8c^ƒ` z&\4<EݵEfiOhN>ld`-NY"+ڹ(Qwϭ1&,vabhMA@aKr J98ca^>]DY< &/d62~ԗ'pLE Œ"vƈc3 @v$H0Sԡ" k%VL# Hwx?2X<[APp0c|Y |)۔ x^sxV̓chz3FPQE)V'= LѓFHQ?/^d`?'m~qRnퟥ_g^E粓,L x^xQC`.V| ebL؆xk],]KV20`Dz tm V!{C}kǍ*?G4Xո-_. 2&G naсp*$i{ݿ*EgXes@\䭣f"|"pc)Tgmv"EmC2dɑ39V]g=l|#/]T^y`r텩9kSⱉl:'˗.2@U=tz'Bß$:N9@PFp`p$-UOw:'?e~,viM6j0` .: _ UW9n.\h,4}ĝFF.M|}ήl o~G˿lX)=[F0bާTZzw,x"jX7_Kd`Fkd}.k7]Lr\L220apծ+j ?LT$ 2p;$ f-ԶX.\d`^(l%u5:O{C 50onxf͙-/ ^-^`) έF AnG teV #'W= ׯ^d`;hD7/7oIedO^+O7ktHZ/.O/ooofF >eu~# jkwxO\|oAx!cWg&ʼn0`o: ru( 7 UtA^yXӇ٧ONeDq>tGWtq,/hd(wŊ2Y/I1<;s`n{>o۽&~vmJa  Ieao>-67m0ÿ.\qwZ`\j8v˗ }c )[^L1w^aiS9fͥpwh˗0c19^}Μͻ{՞NhCxm` B`zvЩ[IKjr>x\s|v{C&L1Ӆ ΃)nם+O{Аޠ<-oށ+KԮ2PL/ɯhѣCZmOc*[^-}'6mRqD3f͙`H}䟭6z^k?;`#ZFׯ^d`?1.ipdNI=WD^ITohSf͛3# a篹qJ3e?n]̌1Gpӣa\`F;f yPS^*EnF R 7MO-_[ua;0c^:0 +,FX5/5BlGf2df!x+V[`)06+;;;;0a~[P(ĈLj{٦1Suydɓ## u'#Zet3;z)ޣ؛3f͙!~yA_*zsF;dU9,k,X0bq|5%oqBXs;Qu5Q 3VZ0aH^Hs! ݌Owwws# 1j\;5aW-fG.ݢ݈xxxx20`+ž6M_CP0 {S9&N ___L1ಓZ?||ƚb$?M9FShd`5úQ]_375b␽ Ԉ4'pPGG  l? mqz?j.'2v$ 020`᳿M_ ӬX[G !|` m?ˏɈQ\fARz9: zzzwr$̊$L?2o2'7,N!6(yBh}[鸬3# _r7O|l/'EɰZj Xu@s)зN> r8zx+X0`  X3jdoPl ȉU0sUau-o-GN&F|-0`ݚojE}.P++,g 꺺% G^lw(%J(EŠ'WEnQ+ sbh!w4[F0a/T@^;=uץMCNf谑\X20`tc}ķ}nnT7D1+]2?qL虋 `^97wwwsoPa\-ׄ$h`}H$0 |  1C<%RRO?to}Lqtxxx0 1c/hnKy ax Rpm ' g(*zzzz20` ;m⮠h:B8>{[^ױL2dd`33eg~:Eq:MA߷xR`0`v>~ {~YXnLDhsܩ&Ne =^VN11X6=3Q> .XK7b7^tl(P k2t76c3 Y",zUi1`*džk]LQBz_X~Zn:nDŃt@O$DC]Q]tk]0 1ȵ&)lqj{cOm֜f3 0\1Vrjs%@~J]]]L0cEbҩomc>Xg>Zgq}D==<&20`(MSܷAﻒ)Yx=T#~Ŋ&F 1e?n}o`;-oY)IF 0c~sn~?5jREWcH˗.20`lr)v)sss0` Na)݌lA1`ʷ{]0cY$ b؀YL!7/}kH[8hѣC jF 9y<CoV!O[0xzae *+=0`ïde~,W)mVֹV`(j+ jpEirb.9&Ci?it]2d`,o?3X}o.\+{B5_ma~360٬=\8%.ei09|gAF 0`(rǻMw ǴJ_4_-oVL0c$UM߭%{eGvr;0`n +sTyE^.P]kagF5ڏ}ۿц/c40`0ʢ$8^l&so /_GLXb`y*zˋ SdAu~0`i an{[~ QF)pA1u pD=Λg-Ejd`v ݿ <5TNhtS_f2d^kK@k21Ȃ,ta%՗-*sC}5\>;\r9P~)ˊ6<@U0 D xzǧ 8Ƿ~S=f9w:jզF scjn;P\ۋu@)HgO 5jթ8vyO2jc*M!;mө.\,}m ~lpOc=uq_Ę`# :^m6Hms0XSn/ڦvؐ'{=!f͛a/t-Dkxk(?';9~m׻͐QN.B֛>o2؝' ː =6lJ*pNaMVپ郇tw6nM@0` GHc%}v[ .nc|[\yB)„pammtools/R/zzz.R0000644000176200001440000000041214222504522013441 0ustar liggesusers.onLoad <- function(libname=find.package("pammtools"), pkgname="pammtools") { if (getRversion() >= "2.5.1") { utils::globalVariables(".") } invisible() } .onAttach <- function(libname=find.package("pammtools"), pkgname="pammtools") { invisible() } pammtools/R/geom-hazard.R0000644000176200001440000001001114222504522014776 0ustar liggesusers#' (Cumulative) (Step-) Hazard Plots. #' #' \code{geom_hazard} is an extension of the \code{geom_line}, and #' is optimized for (cumulative) hazard plots. Essentially, it adds a (0,0) #' row to the data, if not already the case. Stolen from the #' \code{RmcdrPlugin.KMggplot2} (slightly modified). #' #' @seealso #' \code{\link[ggplot2]{geom_line}}, #' \code{\link[ggplot2]{geom_step}}. #' @inheritParams ggplot2::geom_line #' @rdname geom_hazard #' @importFrom ggplot2 layer GeomLine #' @examples #' library(ggplot2) #' library(pammtools) #' ped <- tumor[10:50,] %>% as_ped(Surv(days, status)~1) #' pam <- mgcv::gam(ped_status ~ s(tend), data=ped, family = poisson(), offset = offset) #' ndf <- make_newdata(ped, tend = unique(tend)) %>% add_hazard(pam) #' # piece-wise constant hazards #' ggplot(ndf, aes(x = tend, y = hazard)) + #' geom_vline(xintercept = c(0, ndf$tend[c(1, (nrow(ndf)-2):nrow(ndf))]), lty = 3) + #' geom_hline(yintercept = c(ndf$hazard[1:3], ndf$hazard[nrow(ndf)]), lty = 3) + #' geom_stephazard() + #' geom_step(col=2) + #' geom_step(col=2, lty = 2, direction="vh") #' #' # comulative hazard #' ndf <- ndf %>% add_cumu_hazard(pam) #' ggplot(ndf, aes(x = tend, y = cumu_hazard)) + #' geom_hazard() + #' geom_line(col=2) # doesn't start at (0, 0) #' #' # survival probability #' ndf <- ndf %>% add_surv_prob(pam) #' ggplot(ndf, aes(x = tend, y = surv_prob)) + #' geom_surv() + #' geom_line(col=2) # doesn't start at c(0,1) #' @export geom_hazard <- function( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { layer( data = data, mapping = mapping, stat = stat, geom = GeomHazard, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list(na.rm = na.rm, ... ) ) } #' @rdname geom_hazard #' #' @format NULL #' @usage NULL #' @import ggplot2 #' @export GeomHazard <- ggproto( "GeomHazard", GeomLine, setup_data = function(data, params) { row1 <- data %>% group_by(group) %>% slice(1) row1$x <- 0 row1$y <- 0 data <- bind_rows(row1, data) data[order(data$group, data$x), ] } ) #' @inheritParams ggplot2::geom_step #' @rdname geom_hazard #' @importFrom ggplot2 layer GeomStep #' @export geom_stephazard <- function( mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "vh", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { layer( data = data, mapping = mapping, stat = stat, geom = GeomStepHazard, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( direction = direction, na.rm = na.rm, ... ) ) } #' @rdname geom_hazard #' @format NULL #' @usage NULL #' @export GeomStepHazard <- ggproto( "GeomStepHazard", GeomStep, setup_data = function(data, params) { row1 <- data %>% group_by(group) %>% slice(1) row1$x <- 0 row1$y <- row1$y data <- bind_rows(row1, data) data[order(data$PANEL, data$group, data$x), ] } ) #' @inheritParams ggplot2::geom_line #' @rdname geom_hazard #' @importFrom ggplot2 layer GeomLine #' @export geom_surv <- function( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { layer( data = data, mapping = mapping, stat = stat, geom = GeomSurv, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( na.rm = na.rm, ... ) ) } #' @rdname geom_hazard #' @format NULL #' @usage NULL #' @export GeomSurv <- ggproto( "GeomSurv", GeomLine, setup_data = function(data, params) { row1 <- data %>% group_by(group) %>% slice(1) row1$x <- 0 row1$y <- 1 data <- bind_rows(row1, data) data[order(data$group, data$x), ] } ) pammtools/R/formula-specials.R0000644000176200001440000002754614453305520016076 0ustar liggesusers#' Formula specials for defining time-dependent covariates #' #' So far, two specials are implemented. \code{concurrent} is used when #' the goal is to estimate a concurrent effect of the TDC. \code{cumulative} #' is used when the goal is to estimate a cumulative effect of the TDC. These #' should usually not be called directly but rather as part of the \code{formula} #' argument to \code{as_ped}. #' See the \href{https://adibender.github.io/pammtools//articles/data-transformation.html}{vignette on data transformation} #' for details. #' #' #' @rdname specials #' @importFrom purrr map #' #' @param ... For \code{concurrent} variables that will be transformed to #' covariate matrices. The number of columns of each covariate depends on \code{tz}. #' Usually, elements that will be specified here are \code{time} (which should be #' the name of the time-variable used on the LHS of the formula argument to #' \code{as_ped}), \code{tz} which is the variable containing information on #' the times at which the TDC was observed (can be wrapped in \code{latency}) and #' the TDCs that share the same \code{tz} and Lag-lead window (\code{ll_fun}). #' @param tz_var The name of the variable that stores information on the #' times at which the TDCs specified in this term where observed. #' @param lag a single positive number giving the time lag between for #' a concurrent effect to occur (i.e., the TDC at time of exposure \code{t-lag} #' affects the hazard in the interval containing follow-up time \code{t}). #' Defaults to 0. #' #' @inheritParams get_laglead #' #' @export #' @keywords internal cumulative <- function(..., tz_var, ll_fun = function(t, tz) t >= tz, suffix = NULL) { vars <- as.list(substitute(list(...)))[-1] vars_chr <- vars %>% map(~as.character(.)) lgl_latency <- map_lgl(vars_chr, ~any(. %in% "latency")) if (any(lgl_latency)) { latency_var <- unlist(vars_chr)[unlist(vars_chr) != "latency"][lgl_latency] col_vars <- unlist(vars_chr)[unlist(vars_chr) != "latency"] } else { latency_var <- "" col_vars <- unlist(vars_chr) } list( col_vars = col_vars, latency_var = latency_var, tz_var = tz_var, suffix = suffix, ll_fun = ll_fun) } #' @rdname specials #' @inherit cumulative #' @keywords internal concurrent <- function(..., tz_var, lag = 0, suffix = NULL) { assert_number(lag, lower = 0) ll_fun = function(t, tz) {t > tz + lag} vars <- as.list(substitute(list(...)))[-1] vars_chr <- vars %>% map(~as.character(.)) %>% unlist() list( col_vars = vars_chr, tz_var = tz_var, suffix = suffix, ll_fun = ll_fun, lag = lag) } #' Expand time-dependent covariates to functionals #' #' Given formula specification on how time-dependent covariates affect the #' outcome, creates respective functional covariate as well as auxiliary #' matrices for time/latency etc. #' #' @param data Data frame (or similar) in which variables specified in ... #' will be looked for #' @param formula A formula containing \code{cumulative} specials, #' that specify the type of cumulative effect one wants to estimate. For details #' see the vignettes on data transformation and time-dependent covariates. #' @importFrom purrr flatten map #' @importFrom stats terms #' @keywords internal get_cumulative <- function(data, formula) { stopifnot(has_tdc_form(formula)) func_list <- eval_special(get_tdc_form(formula, data = data), data = data) n_func <- length(func_list) ll_funs <- map(func_list, ~.x[["ll_fun"]]) tz_vars <- map(func_list, ~.x[["tz_var"]]) tz <- map(tz_vars, ~pull(data, .x) %>% unlist() %>% unique() %>% sort()) names(tz) <- names(tz_vars) <- names(ll_funs) <- tz_vars ## create matrices func_mats <- map(func_list, ~ expand_cumulative(data = data, ., n_func = n_func)) %>% flatten() list( func_list = func_list, func_mats = func_mats, ll_funs = ll_funs, tz_vars = tz_vars, tz = tz) } #' @keywords internal eval_special <- function(formula, special="cumulative", data = NULL) { tf <- terms(formula, specials = special, data = data) ind_special <- attr(tf, "specials")[[special]] # extract components if (!is.null(ind_special)) { terms_vec <- attr(tf, "term.labels") map(terms_vec, ~eval(expr = parse(text = .x))) } else { NULL } } #' @rdname specials #' @inheritParams as_ped #' @param special The name of the special whose existence in the #' \code{formula} should be checked #' @keywords internal has_special <- function(formula, special = "cumulative") { has_tdc_form(formula, tdc_specials = special) } #' @rdname get_cumulative #' @inheritParams get_cumulative #' @param func Single evaluated \code{\link{cumulative}} term. #' @importFrom purrr map invoke_map #' @keywords internal expand_cumulative <- function(data, func, n_func) { col_vars <- func$col_vars tz_var <- func$tz_var tz <- pull(data, tz_var) %>% unlist() %>% unique() %>% sort() time_var <- attr(data, "time_var") id_var <- attr(data, "id_var") lgl_var_in_data <- map_lgl(col_vars, ~ . %in% colnames(data)) if (!all(lgl_var_in_data)) { stop(paste0("The following variables provided to 'formula' are not contained in 'data': ", col_vars[!lgl_var_in_data])) } ncols_vars <- get_ncols(data, col_vars[!(col_vars == time_var)]) if (!all(diff(ncols_vars) == 0)) { stop(paste0( "The following variables have unequal maximum number of elements per ", id_var, ": ", paste0(col_vars[!(col_vars == time_var)], sep = "; "))) } else { nz <- ncols_vars[1] } # create list of matrices for covariates/time matrices provided in func hist_mats <- list() for (i in seq_along(col_vars)) { hist_mats[[i]] <- if (col_vars[i] == attr(data, "time_var")) { make_time_mat(data, nz) } else if (col_vars[i] == func$latency_var) { make_latency_mat(data, tz) } else { make_z_mat(data, col_vars[i], nz) } } if (any(c(time_var, tz_var) %in% col_vars)) { hist_mats <- c(hist_mats, list(make_lag_lead_mat(data, tz, func$ll_fun))) names(hist_mats) <- make_mat_names(c(col_vars, "LL"), func$latency_var, tz_var, func$suffix, n_func) time_mat_ind <- grepl(time_var, names(hist_mats)) names(hist_mats)[time_mat_ind] <- paste0(names(hist_mats)[time_mat_ind], "_mat") } else { names(hist_mats) <- make_mat_names(col_vars, func$latency_var, tz_var, func$suffix, n_func) } hist_mats } #' Extract information on concurrent effects #' #' @keywords internal #' @param x A suitable object from which variables contained in #' \code{formula} can be extracted. #' @param ... Further arguments passed to methods. prep_concurrent <- function(x, formula, ...) { UseMethod("prep_concurrent", x) } #' @rdname prep_concurrent #' @inherit prep_concurrent #' @keywords internal prep_concurrent.list <- function(x, formula, ...) { lgl_concurrent <- has_special(formula, "concurrent") if (lgl_concurrent) { ccr_list <- eval_special(formula, special = "concurrent", x[[2]]) ccr_tz_vars <- map_chr(ccr_list, ~.x[["tz_var"]]) %>% unique() ccr_time <- map2(ccr_tz_vars, x, ~get_tz(.y, .x)) %>% keep(~ !is.null(.x)) %>% map2(ccr_list, ~ if(is.null(.x)) { .x } else { ifelse(.x == min(.x), .x, .x + .y$lag) }) %>% # leave time origin unchanged by lag # should just start modeling the hazard at t = lag?!? reduce(union) %>% sort() } list( ccr_list = ccr_list, ccr_time = ccr_time) } #' @keywords internal get_tz <- function(data, tz_var) { if (tz_var %in% colnames(data)) { tz <- pull(data, tz_var) %>% unique() } else { tz <- NULL } tz } #' @keywords internal #' @importFrom purrr map2 add_concurrent <- function(ped, data, id_var) { ccr <- attr(data, "ccr") ped_split <- split(ped$tend, f = ped[[id_var]]) for (ccr_i in ccr[["ccr_list"]]) { tdc_vars_i <- ccr_i[["col_vars"]] tz_var_i <- ccr_i[["tz_var"]] ccr_vars_i <- c(tz_var_i, tdc_vars_i) ccr_i_df <- data %>% select(one_of(c(id_var, ccr_vars_i))) ccr_i_df <- ccr_i_df %>% unnest(cols = -one_of(id_var)) li <- map2(ped_split, split(ccr_i_df, f = ccr_i_df[[id_var]]), function(.x, .y) { ll_ind <- rowSums(outer(.x, .y[[tz_var_i]], ccr_i$ll_fun)) .y[ll_ind, tdc_vars_i] }) %>% bind_rows() %>% as.data.frame() ped <- ped %>% bind_cols(li) } attr(ped, "ccr") <- ccr ped } #' @keywords internal add_cumulative <- function(ped, data, formula) { func_components <- get_cumulative(data, formula) func_matrices <- func_components$func_mats for (i in seq_along(func_matrices)) { ped[[names(func_matrices)[i]]] <- func_matrices[[i]] } attr(ped, "func") <- func_components$func_list attr(ped, "ll_funs") <- func_components$ll_funs attr(ped, "tz") <- func_components$tz attr(ped, "tz_vars") <- func_components$tz_vars ped } make_mat_names <- function(x, ...) { UseMethod("make_mat_names", x) } #' @keywords internal make_mat_names.default <- function( x, latency_var = NULL, tz_var = NULL, suffix = NULL, nfunc = 1, ...) { if (!is.null(suffix)) { return(paste(x, suffix, sep = "_")) } else { if (!is.null(tz_var) & nfunc > 1) { tz_ind <- x == tz_var x[!tz_ind] <- paste(x[!tz_ind], tz_var, sep = "_") } if (!is.null(latency_var)) { latency_ind <- x == latency_var x[latency_ind] <- paste(x[latency_ind], "latency", sep = "_") } } return(x) } #' @keywords internal make_mat_names.list <- function( x, time_var, ...) { hist_names <- map(x, ~ make_mat_names(c(.x[["col_vars"]], "LL"), .x[["latency_var"]], .x[["tz_var"]], .x[["suffix"]], nfunc = length(x))) time_mat_ind <- map(hist_names, ~grepl(time_var, .)) for (i in seq_along(time_mat_ind)) { hist_names[[i]][time_mat_ind[[i]]] <- paste0(hist_names[[i]][time_mat_ind[[i]]], "_mat") } hist_names } #' Create matrix components for cumulative effects #' #' These functions are called internally by \code{\link{get_cumulative}} and #' should usually not be called directly. #' @rdname elra_matrix #' @param data A data set (or similar) from which meta information on cut-points, #' interval-specific time, covariates etc. can be obtained. #' #' @keywords internal make_time_mat <- function(data, nz) { brks <- attr(data, "breaks") id_tseq <- attr(data, "id_tseq") Tmat <- matrix(brks[id_tseq], nrow = length(id_tseq), ncol = nz) Tmat } #' @rdname elra_matrix #' @inherit make_time_mat #' @keywords internal make_latency_mat <- function(data, tz) { time <- attr(data, "breaks") id_tseq <- attr(data, "id_tseq") Latency_mat <- outer(time, tz, FUN = "-") Latency_mat[Latency_mat < 0] <- 0 Latency_mat[id_tseq, , drop = FALSE] } #' @rdname elra_matrix #' @inherit make_time_mat #' @keywords internal make_lag_lead_mat <- function( data, tz, ll_fun = function(t, tz) t >= tz) { LL <- outer(attr(data, "breaks"), tz, FUN = ll_fun) * 1L delta <- abs(diff(tz)) IW <- matrix(c(mean(delta), delta), ncol = length(tz), nrow = nrow(LL), byrow = TRUE) LL <- LL * IW LL[attr(data, "id_tseq"), , drop = FALSE] } #' @rdname elra_matrix #' @inherit make_time_mat #' @param z_var Which should be transformed into functional covariate format #' suitable to fit cumulative effects in \code{mgcv::gam}. #' @importFrom purrr map map_int #' @importFrom dplyr pull #' @keywords internal make_z_mat <- function(data, z_var, nz, ...) { tz_ind <- seq_len(nz) Z <- map(data[[z_var]], .f = ~ unlist(.x)[tz_ind]) Z <- do.call(rbind, Z) colnames(Z) <- paste0(z_var, tz_ind) Z[is.na(Z)] <- 0 Z[attr(data, "id_tz_seq"), , drop = FALSE] } get_ncols <- function(data, col_vars) { map(col_vars, ~pull(data, .x) %>% map_int(function(z) ifelse(is.atomic(z), length(z), nrow(z)))) %>% map_int(max) } pammtools/R/helpers.R0000644000176200001440000000471213662013606014262 0ustar liggesusers#' Calculate the modus #' #' @param var A atomic vector #' @importFrom checkmate assert_atomic_vector #' @keywords internal modus <- function(var) { # input checks assert_atomic_vector(var, all.missing = FALSE, min.len = 1) # calculate modus freqs <- table(var) mod <- names(freqs)[which.max(freqs)] # factors should be returned as factors with all factor levels if (is.factor(var)) { mod <- factor(mod, levels = levels(var)) } return(mod) } #' Generate a sequence over the range of a vector #' #' Stolen from #' \href{https://github.com/tidyverse/modelr/blob/master/R/seq_range.R}{here} #' #' @param x A numeric vector #' @param n,by Specify the output sequence either by supplying the #' length of the sequence with \code{n}, or the spacing between value #' with \code{by}. Specifying both is an error. #' #' I recommend that you name these arguments in order to make it clear to #' the reader. #' @param pretty If \code{TRUE}, will generate a pretty sequence. If \code{n} #' is supplied, this will use \code{\link{pretty}()} instead of #' \code{\link{seq}()}. If \code{by} is supplied, it will round the first #' value to a multiple of \code{by}. #' @param trim Optionally, trim values off the tails. #' \code{trim / 2 * length(x)} values are removed from each tail. #' @param expand Optionally, expand the range by \code{expand * (1 + range(x)} #' (computed after trimming). #' @examples #' x <- rcauchy(100) #' seq_range(x, n = 10) #' seq_range(x, n = 10, trim = 0.1) #' seq_range(x, by = 1, trim = 0.1) #' #' # Make pretty sequences #' y <- runif (100) #' seq_range(y, n = 10) #' seq_range(y, n = 10, pretty = TRUE) #' seq_range(y, n = 10, expand = 0.5, pretty = TRUE) #' #' seq_range(y, by = 0.1) #' seq_range(y, by = 0.1, pretty = TRUE) #' @export seq_range <- function(x, n, by, trim = NULL, expand = NULL, pretty = FALSE) { if (!missing(n) && !missing(by)) { stop("May only specify one of `n` and `by`", call. = FALSE) } if (!is.null(trim)) { rng <- stats::quantile(x, c(trim / 2, 1 - trim / 2), na.rm = TRUE) } else { rng <- range(x, na.rm = TRUE) } if (!is.null(expand)) { rng <- rng + c(-expand / 2, expand / 2) * (rng[2] - rng[1]) } if (missing(by)) { if (pretty) { pretty(rng, n) } else { seq(rng[1], rng[2], length.out = n) } } else { if (pretty) { rng[1] <- floor(rng[1] / by) * by rng[2] <- ceiling(rng[2] / by) * by } seq(rng[1], rng[2], by = by) } } pammtools/R/viz-elra.R0000644000176200001440000002013214452536154014351 0ustar liggesusers#' Visualize effect estimates for specific covariate combinations #' #' Depending on the plot function and input, creates either a 1-dimensional slices, #' bivariate surface or (1D) cumulative effect. #' #' @import ggplot2 #' @importFrom rlang quos #' #' @inheritParams make_newdata #' @param data Data used to fit the \code{model}. #' @param model A suitable model object which will be used to estimate the #' partial effect of \code{term}. #' @param term A character string indicating the model term for which partial #' effects should be plotted. #' @param reference If specified, should be a list with covariate value pairs, #' e.g. \code{list(x1 = 1, x2=50)}. The calculated partial effect will be relative #' to an observation specified in \code{reference}. #' @param ci Logical. Indicates if confidence intervals for the \code{term} #' of interest should be calculated/plotted. Defaults to \code{TRUE}. #' @export gg_partial <- function(data, model, term, ..., reference = NULL, ci = TRUE) { expressions <- quos(...) vars <- names(expressions) n_vars <- length(expressions) ndf <- make_newdata(data, ...) %>% add_term(model, term, reference = reference, ci = ci) n_unique <- map_int(vars, ~length(unique(ndf[[.x]]))) vars <- vars[n_unique > 1] # vars <- vars[n_unique[rev(order(n_unique))] > 1] # ndf <- ndf %>% mutate_at(vars[-1], ~as.factor(.x)) n_vars <- length(vars) gg_base <- ggplot(ndf, aes(x = .data[[vars[1]]])) + xlab(vars[1]) if (n_vars == 1) { gg_out <- gg_base + geom_ribbon(aes(ymin = .data[["ci_lower"]], ymax = .data[["ci_upper"]]), alpha = 0.3) + geom_line(aes(y = .data[["fit"]])) } else { # if (n_vars == 2) { gg_out <- gg_base + aes(y = vars[2], z = .data[["fit"]]) + geom_tile(aes(fill = .data[["fit"]])) + geom_contour(col = "grey30") + scale_y_continuous(expand = c(0, 0)) + scale_x_continuous(expand = c(0, 0)) + scale_fill_gradient2(high = "firebrick2", low = "steelblue") # } } gg_out + theme(legend.position = "bottom") } #' @rdname gg_partial #' @inherit gg_partial #' @importFrom tidyr complete #' @param time_var The name of the variable that was used in \code{model} to #' represent follow-up time. #' @export gg_partial_ll <- function( data, model, term, ..., reference = NULL, ci = FALSE, time_var = "tend") { ind_term <- which(map_lgl(attr(data, "func_mat_names"), ~any(grepl(term, .x)))) tv_sym <- sym(time_var) tz_var <- attr(data, "tz_vars")[[ind_term]] tz_val <- attr(data, "tz")[[ind_term]] ll_var <- grep("LL", attr(data, "func_mat_names")[[ind_term]], value = TRUE) select_vars <- c(time_var, tz_var, ll_var, "fit") if (ci) { select_vars <- c(select_vars, "ci_lower", "ci_upper") } ll_df <- get_partial_ll(data, model, term, ..., reference = reference, ci = ci, time_var = time_var) %>% select(one_of(select_vars)) %>% mutate(fit = ifelse(.data[[ll_var]] == 0, NA_real_, .data$fit)) %>% complete(!!tv_sym := unique(!!tv_sym), !!sym(tz_var) := tz_val) %>% left_join(int_info(attr(data, "breaks")) %>% rename(!!tv_sym := "tend"), by = time_var) if (ci) { ll_df <- ll_df %>% mutate(ci_lower = ifelse(is.na(.data$fit), NA, .data$ci_lower)) %>% mutate(ci_upper = ifelse(is.na(.data$fit), NA, .data$ci_upper)) %>% gather("type", "fit", one_of(c("fit", "ci_lower", "ci_upper"))) %>% mutate(type = factor(.data$type, levels = c("ci_lower", "fit", "ci_upper"))) } gg_base <- ggplot(ll_df, aes(x = .data[["intmid"]], y = tz_var)) + geom_tile(aes(fill = .data[["fit"]]), colour = "grey30") + scale_fill_gradient2(high = "firebrick2", low = "steelblue", na.value = "grey30") + scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) + xlab("time") + ylab(expression(t[z])) if (ci) { gg_base + facet_wrap(~.data$type) } else { gg_base } } #' Plot 1D (smooth) effects #' #' Flexible, high-level plotting function for (non-linear) effects conditional #' on further covariate specifications and potentially relative to #' a comparison specification. #' #' @inheritParams gg_partial #' @importFrom purrr map_int #' @importFrom rlang quos #' @examples #' ped <- tumor[1:200, ] %>% as_ped(Surv(days, status) ~ . ) #' model <- mgcv::gam(ped_status~s(tend) + s(age, by = complications), data=ped, #' family = poisson(), offset=offset) #' make_newdata(ped, age = seq_range(age, 20), complications = levels(complications)) #' gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications)) #' gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications), #' ci = FALSE) #' gg_slice(ped, model, "age", age=seq_range(age, 20), complications=levels(complications), #' reference=list(age = 50)) #' @export gg_slice <- function(data, model, term, ..., reference = NULL, ci = TRUE) { expressions <- quos(...) vars <- names(expressions) ndf <- make_newdata(data, ...) %>% add_term(model, term, reference = reference, ci = ci) n_unique <- map_int(vars, ~length(unique(ndf[[.x]]))) vars <- vars[rev(order(n_unique))] vars <- vars[n_unique[rev(order(n_unique))] > 1] ndf <- ndf %>% mutate_at(vars[-1], ~as.factor(.x)) n_vars <- length(vars) gg_out <- ggplot(ndf, aes(x = .data[[vars[1]]], y = .data[["fit"]])) if (ci) { gg_out <- gg_out + geom_ribbon(aes(ymin = .data[["ci_lower"]], ymax = .data[["ci_upper"]]), alpha = 0.3) } gg_out <- gg_out + geom_line() if (n_vars > 1) { if(ci) { gg_out <- gg_out + aes(group = .data[[vars[2]]], fill = .data[[vars[2]]]) + geom_line(aes(col = .data[[vars[2]]])) } else { gg_out <- gg_out + aes(group = .data[[vars[2]]]) + geom_line(aes(col = .data[[vars[2]]])) } if (n_vars > 2) { form <- as.formula(paste0("~", vars[-1:-2], collapse = "+")) gg_out <- gg_out + facet_wrap(form, labeller = label_both) } } gg_out + theme(legend.position = "bottom") } #' @rdname get_cumu_eff #' @inherit get_cumu_eff #' @inheritParams get_cumu_eff #' @inheritParams gg_partial #' @export gg_cumu_eff <- function(data, model, term, z1, z2=NULL, se_mult = 2, ci = TRUE) { cumu_eff_df <- get_cumu_eff(data, model, term, z1, z2, se_mult) gg_out <- ggplot(cumu_eff_df, aes(x = .data[["tend"]], y = .data[["cumu_eff"]])) if (ci) { gg_out <- gg_out + geom_ribbon(aes(ymin = .data[["cumu_eff_lower"]], ymax = .data[["cumu_eff_upper"]]), alpha = 0.3) } gg_out + geom_line() + xlab("time") + ylab("cumulative effect") } #' @inherit gg_partial_ll #' @rdname gg_partial #' @export get_partial_ll <- function( data, model, term, ..., reference = NULL, ci = FALSE, time_var = "tend") { ind_term <- which(map_lgl(attr(data, "func_mat_names"), ~any(grepl(term, .x)))) tz_var <- attr(data, "tz_vars")[[ind_term]] tz_val <- attr(data, "tz")[[ind_term]] ll_df <- get_ll(data, ind_term, ..., time_var = time_var) %>% add_term(object = model, term = term, reference = reference, ci = ci) } #' @keywords internal get_ll <- function(x, ind_term, ..., time_var = "tend") { n_func <- length(attr(x, "ll_funs")) ll_fun <- attr(x, "ll_funs")[[ind_term]] int_df <- int_info(x) tz_vars <- attr(x, "tz_vars") func <- attr(x, "func")[[ind_term]] tz_val <- attr(x, "tz")[[ind_term]] tz_var <- attr(x, "tz_vars")[[ind_term]] tz_var_mat <- make_mat_names(tz_var, func$latency_var, func$tz_var, func$suffix, n_func) ll_var_mat <- make_mat_names("LL", func$latency_var, func$tz_var, func$suffix, n_func) tz_mat_val <- x %>% pull(tz_var_mat) %>% as.numeric() %>% unique() %>% sort() nd <- make_newdata(x, ...) if (func$latency_var != "") { nd <- nd %>% mutate(!!sym(tz_var) := .data[[time_var]] - !!sym(tz_var_mat)) } nd %>% filter(!!sym(tz_var) %in% tz_val) %>% mutate( !!sym(ll_var_mat) := ll_fun(.data[[time_var]], .data[[tz_var]]) * 1L) %>% arrange(.data[[time_var]], .data[[tz_var]]) %>% group_by(.data[[tz_var]]) %>% mutate(!!sym(ll_var_mat) := lag(!!sym(ll_var_mat), default = 0)) %>% ungroup() } pammtools/R/predict.R0000644000176200001440000000321414452536154014254 0ustar liggesusers#' S3 method for pamm objects for compatibility with package pec #' #' @inheritParams pec::predictSurvProb #' @importFrom pec predictSurvProb #' @importFrom purrr map #' #' @export predictSurvProb.pamm <- function( object, newdata, times, ...) { if (!is.ped(newdata)) { trafo_args <- object[["trafo_args"]] id_var <- trafo_args[["id"]] brks <- trafo_args[["cut"]] if ( max(times) > max(brks) ) { stop("Can not predict beyond the last time point used during model estimation. Check the 'times' argument.") } ped_times <- sort(unique(union(c(0, brks), times))) # extract relevant intervals only, keeps data small ped_times <- ped_times[ped_times <= max(times)] # obtain interval information ped_info <- get_intervals(brks, ped_times[-1]) # add adjusted offset such that cumulative hazard and survival probability # can be calculated correctly ped_info[["intlen"]] <- c(ped_info[["times"]][1], diff(ped_info[["times"]])) # create data set with interval/time + covariate info newdata[[id_var]] <- seq_len(nrow(newdata)) newdata <- combine_df(ped_info, newdata) } env_times <- times newdata[["pred"]] <- unname(predict( unpam(object), newdata = newdata, type = "response")) newdata <- newdata %>% arrange(.data$id, .data$times) %>% group_by(.data$id) %>% mutate(pred = exp(-cumsum(.data$pred * .data$intlen))) %>% ungroup() %>% filter(.data[["times"]] %in% env_times) id <- unique(newdata[[id_var]]) pred_list <- map( id, ~ newdata[newdata[[id_var]] == .x, "pred"] %>% pull("pred")) do.call(rbind, pred_list) } pammtools/R/sim-pexp.R0000644000176200001440000002472714452540743014377 0ustar liggesusers#' Simulate survival times from the piece-wise exponential distribution #' #' @param formula An extended formula that specifies the linear predictor. #' If you want to include a smooth baseline #' or time-varying effects, use \code{t} within your formula as #' if it was a covariate in the data, although it is not and should not #' be included in the \code{data} provided to \code{sim_pexp}. See examples #' below. #' #' @param data A data set with variables specified in \code{formula}. #' @param cut A sequence of time-points starting with 0. #' @import dplyr #' @import Formula #' @importFrom lazyeval f_eval #' @importFrom tidyr replace_na #' @examples #' library(survival) #' library(dplyr) #' library(pammtools) #' #' # set number of observations/subjects #' n <- 250 #' # create data set with variables which will affect the hazard rate. #' df <- cbind.data.frame(x1 = runif (n, -3, 3), x2 = runif (n, 0, 6)) %>% #' as_tibble() #' # the formula which specifies how covariates affet the hazard rate #' f0 <- function(t) { #' dgamma(t, 8, 2) *6 #' } #' form <- ~ -3.5 + f0(t) -0.5*x1 + sqrt(x2) #' set.seed(24032018) #' sim_df <- sim_pexp(form, df, 1:10) #' head(sim_df) #' plot(survfit(Surv(time, status)~1, data = sim_df )) #' #' # for control, estimate with Cox PH #' mod <- coxph(Surv(time, status) ~ x1 + pspline(x2), data=sim_df) #' coef(mod)[1] #' layout(matrix(1:2, nrow=1)) #' termplot(mod, se = TRUE) #' #' # and using PAMs #' layout(1) #' ped <- sim_df %>% as_ped(Surv(time, status)~., max_time=10) #' library(mgcv) #' pam <- gam(ped_status ~ s(tend) + x1 + s(x2), data=ped, family=poisson, offset=offset) #' coef(pam)[2] #' plot(pam, page=1) #' #'\dontrun{ #' # Example 2: Functional covariates/cumulative coefficients #' # function to generate one exposure profile, tz is a vector of time points #' # at which TDC z was observed #' rng_z = function(nz) { #' as.numeric(arima.sim(n = nz, list(ar = c(.8, -.6)))) #' } #' # two different exposure times for two different exposures #' tz1 <- 1:10 #' tz2 <- -5:5 #' # generate exposures and add to data set #' df <- df %>% #' add_tdc(tz1, rng_z) %>% #' add_tdc(tz2, rng_z) #' df #' #' # define tri-variate function of time, exposure time and exposure z #' ft <- function(t, tmax) { #' -1*cos(t/tmax*pi) #' } #' fdnorm <- function(x) (dnorm(x,1.5,2)+1.5*dnorm(x,7.5,1)) #' wpeak2 <- function(lag) 15*dnorm(lag,8,10) #' wdnorm <- function(lag) 5*(dnorm(lag,4,6)+dnorm(lag,25,4)) #' f_xyz1 <- function(t, tz, z) { #' ft(t, tmax=10) * 0.8*fdnorm(z)* wpeak2(t - tz) #' } #' f_xyz2 <- function(t, tz, z) { #' wdnorm(t-tz) * z #' } #' #' # define lag-lead window function #' ll_fun <- function(t, tz) {t >= tz} #' ll_fun2 <- function(t, tz) {t - 2 >= tz} #' # simulate data with cumulative effect #' sim_df <- sim_pexp( #' formula = ~ -3.5 + f0(t) -0.5*x1 + sqrt(x2)| #' fcumu(t, tz1, z.tz1, f_xyz=f_xyz1, ll_fun=ll_fun) + #' fcumu(t, tz2, z.tz2, f_xyz=f_xyz2, ll_fun=ll_fun2), #' data = df, #' cut = 0:10) #'} #' @export sim_pexp <- function(formula, data, cut) { data <- data %>% mutate( id = row_number(), time = max(cut), status = 1) # extract formulas for different components Form <- Formula(formula) f1 <- formula(Form, rhs = 1) # later more sophisticated checks + could be used to map over all rhs # formulae, check what type of evaluation is needed and return ETAs for # each part of the formula separated by |, such that model estimation may # be checked for individuals terms/parts if (length(Form)[2] > 1) { f2 <- formula(Form, rhs = 2) } else { f2 <- NULL } # construct eta for time-constant part ped <- split_data( formula = Surv(time, status)~., data = select_if(data, is_atomic), cut = cut, id = "id") %>% rename("t" = "tstart") %>% mutate(rate = exp(f_eval(f1, .))) # construct eta for time-dependent part if (!is.null(f2)) { terms_f2 <- terms(f2, specials = "fcumu") f2_ev <- list() f2_tl <- attr(terms_f2, "term.labels") for (i in seq_along(f2_tl)) { f2_ev[[i]] <- eval(expr = parse(text = f2_tl[[i]]), envir = .GlobalEnv) } ll_funs <- map(f2_ev, ~.x[["ll_fun"]]) tz_vars <- map_chr(f2_ev, ~.x[["vars"]][1]) cumu_funs <- map(f2_ev, ~.x[["f_xyz"]]) names(tz_vars) <- names(ll_funs) <- names(cumu_funs) <- tz_vars z_form <- list("eta_", map_chr(f2_ev, ~.x[["vars"]][2])) %>% reduce(paste0, collapse = "+") %>% paste0("~", .) %>% as.formula() df2 <- map(f2_ev, function(fc) eta_cumu(data = data, fc, cut = cut)) suppressMessages( ped <- ped %>% left_join(reduce(df2, full_join)) ) ped <- ped %>% mutate_at(vars(contains("eta_")), replace_na, 0) %>% group_by(.data$id, .data$t) %>% mutate(eta_z = !!rlang::get_expr(z_form)) %>% mutate(rate = .data$rate * exp(.data$eta_z)) } else { tz_vars <- NULL } sim_df <- ped %>% group_by(id) %>% summarize(time = rpexp(rate = .data$rate, t = .data$t)) %>% mutate( status = 1L * (.data$time <= max(cut)), time = pmin(.data$time, max(cut))) suppressMessages( sim_df <- sim_df %>% left_join(select(data, -.data$time, -.data$status)) ) attr(sim_df, "id_var") <- "id" attr(sim_df, "time_var") <- "time" attr(sim_df, "status_var") <- "status" attr(sim_df, "tz_var") <- tz_vars attr(sim_df, "cens_value") <- 0 attr(sim_df, "breaks") <- cut attr(sim_df, "tz") <- imap(tz_vars, ~select(sim_df, .x) %>% pull(.x) %>% unique()) %>% flatten() if (exists("ll_funs")) attr(sim_df, "ll_funs") <- ll_funs if (exists("cumu_funs")) attr(sim_df, "cumu_funs") <- cumu_funs attr(sim_df, "id_n") <- sim_df %>% pull("time") %>% pmin(max(cut)) %>% map_int(findInterval, vec = cut, left.open = TRUE, rightmost.closed = TRUE) attr(sim_df, "id_tseq") <- attr(sim_df, "id_n") %>% map(seq_len) %>% unlist() attr(sim_df, "id_tz_seq") <- rep(seq_along(pull(sim_df, id)), times = attr(sim_df, "id_n")) attr(sim_df, "sim_formula") <- formula class(sim_df) <- c("sim_df", class(unped(sim_df))) if (any(!map_lgl(sim_df, is_atomic))) { class(sim_df) <- c("nested_fdf", class(sim_df)) } sim_df } #' Add time-dependent covariate to a data set #' #' Given a data set in standard format (with one row per subject/observation), #' this function adds a column with the specified exposure time points #' and a column with respective exposures, created from \code{rng_fun}. #' This function should usually only be used to create data sets passed #' to \code{\link[pammtools]{sim_pexp}}. #' #' @inheritParams sim_pexp #' @param tz A numeric vector of exposure times (relative to the #' beginning of the follow-up time \code{t}) #' @param rng_fun A random number generating function that creates #' the time-dependent covariates at time points \code{tz}. #' First argument of the function should be \code{n}, the number of #' random numbers to generate. Within \code{add_tdc}, \code{n} will be set #' to \code{length(tz)}. #' @param ... Currently not used. #' @import dplyr #' @importFrom rlang eval_tidy := #' @importFrom purrr map #' @export add_tdc <- function(data, tz, rng_fun, ...) { tz <- enquo(tz) nz <- length(eval_tidy(tz)) name_tz <- quo_name(tz) z_var <- paste0("z.", name_tz) data %>% mutate( !!name_tz := map(seq_len(n()), ~ !!tz), !!z_var := map(seq_len(n()), ~ rng_fun(nz = nz))) %>% as_tibble() } #' A formula special used to handle cumulative effect specifications #' #' Can be used in the second part of the formula specification provided #' to \code{\link[pammtools]{sim_pexp}} and should only be used in this #' context. #' #' @importFrom purrr map #' @export #' @keywords internal fcumu <- function(..., by = NULL, f_xyz, ll_fun) { vars <- as.list(substitute(list(...)))[-1] %>% map(~as.character(.x)) %>% unlist() vars <- vars[vars != "t"] list( vars = vars, f_xyz = f_xyz, ll_fun = ll_fun) } #' @import dplyr #' @importFrom tidyr unnest #' @importFrom rlang sym := #' @keywords internal eta_cumu <- function(data, fcumu, cut, ...) { vars <- fcumu$vars f_xyz <- fcumu$f_xyz ll_fun <- fcumu$ll_fun eta_name <- paste0("eta_", vars[2]) comb_df <- combine_df( data.frame(t = cut), select(data, one_of("id", vars))) comb_df <- comb_df %>% unnest(cols = -one_of("id")) comb_df %>% group_by(.data$id, .data$t) %>% mutate( LL = ll_fun(t, !!sym(vars[1])) * 1, delta = c(mean(abs(diff(!!sym(vars[1])))), abs(diff(!!sym(vars[1]))))) %>% ungroup() %>% filter(.data$LL != 0) %>% group_by(.data$id, .data$t) %>% summarize(!!eta_name := sum(.data$delta * f_xyz(.data$t, .data[[vars[1]]], .data[[vars[2]]]))) } #' Simulate data for competing risks scenario #' #' #' @keywords internal sim_pexp_cr <- function(formula, data, cut) { # Formula extends the base class formula by allowing for multiple responses and multiple parts of regressors Form <- Formula(formula) # Extract the right handside of the Formula F_rhs <- attr(Form, "rhs") l_rhs <- length(F_rhs) seq_rhs <- seq_len(l_rhs) if (!("id" %in% names(data))) { data$id <- 1:(nrow(data)) } if (!("t" %in% names(data))) { data$t <- 0 } data <- data %>% mutate( time = max(cut), status = 1 ) # construct eta for time-constant part # offset (the log of the duration during which the subject was under risk in that interval) ped <- split_data( formula = Surv(time, status)~., data = select_if(data, is_atomic), cut = cut, id = "id") %>% mutate( t = t + .data$tstart ) # calculate cause specific hazards for (i in seq_rhs) { ped[[paste0("hazard", i)]] <- exp(eval(F_rhs[[i]], ped)) } ped[["rate"]] <- reduce(ped[paste0("hazard", seq_rhs)], `+`) # simulate survival times sim_df <- ped %>% group_by(id) %>% mutate( time = rpexp(rate = .data$rate, t = .data$tstart), status = 1L * (.data$time <= max(cut)), time = pmin(.data$time, max(cut)), # t wieder ins "Original" zurückrechnen, muss später auf die Waitingtime drauf gerechnet werden t = .data$t - .data$tstart ) %>% filter(.data$tstart < .data$time & .data$time <= .data$tend) # Ziehe aus den möglichen hazards eins mit den entsprechenden Wahrscheinlichkeiten sim_df$type <- apply(sim_df[paste0("hazard", seq_rhs)], 1, function(probs) sample(seq_rhs, 1, prob = probs)) sim_df %>% select(-one_of(c("tstart", "tend", "interval", "offset", "ped_status", "rate"))) } pammtools/R/add-functions.R0000644000176200001440000005502714452540107015362 0ustar liggesusers#' Embeds the data set with the specified (relative) term contribution #' #' Adds the contribution of a specific term to the #' linear predictor to the data specified by \code{newdata}. #' Essentially a wrapper to \code{\link[mgcv]{predict.gam}}, with \code{type="terms"}. #' Thus most arguments and their documentation below is from \code{\link[mgcv]{predict.gam}}. #' #' @inheritParams mgcv::predict.gam #' @param term A character (vector) or regular expression indicating for #' which term(s) information should be extracted and added to data set. #' @param ci \code{logical}. Indicates if confidence intervals should be #' calculated. Defaults to \code{TRUE}. #' @param se_mult The factor by which standard errors are multiplied to form #' confidence intervals. #' @param reference A data frame with number of rows equal to \code{nrow(newdata)} or #' one, or a named list with (partial) covariate specifications. See examples. #' @param ... Further arguments passed to \code{\link[mgcv]{predict.gam}} #' @import checkmate dplyr mgcv #' @importFrom stats predict #' @importFrom purrr map #' @importFrom stats model.matrix vcov #' @examples #' library(ggplot2) #' ped <- as_ped(tumor, Surv(days, status)~ age, cut = seq(0, 2000, by = 100)) #' pam <- mgcv::gam(ped_status ~ s(tend) + s(age), family = poisson(), #' offset = offset, data = ped) #' #term contribution for sequence of ages #' s_age <- ped %>% make_newdata(age = seq_range(age, 50)) %>% #' add_term(pam, term = "age") #' ggplot(s_age, aes(x = age, y = fit)) + geom_line() + #' geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = .3) #' # term contribution relative to mean age #' s_age2 <- ped %>% make_newdata(age = seq_range(age, 50)) %>% #' add_term(pam, term = "age", reference = list(age = mean(.$age))) #' ggplot(s_age2, aes(x = age, y = fit)) + geom_line() + #' geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = .3) #' @export add_term <- function( newdata, object, term, reference = NULL, ci = TRUE, se_mult = 2, ...) { assert_data_frame(newdata, all.missing = FALSE) assert_character(term, min.chars = 1, any.missing = FALSE, min.len = 1) col_ind <- map(term, grep, x = names(object$coefficients)) %>% unlist() %>% unique() %>% sort() is_gam <- inherits(object, "gam") X <- prep_X(object, newdata, reference, ...)[, col_ind, drop = FALSE] newdata[["fit"]] <- unname(drop(X %*% object$coefficients[col_ind])) if (ci) { cov.coefs <- if (is_gam) { object$Vp[col_ind, col_ind] } else { vcov(object)[col_ind, col_ind] } se <- unname(sqrt(rowSums( (X %*% cov.coefs) * X ))) newdata <- newdata %>% mutate( ci_lower = .data[["fit"]] - se_mult * se, ci_upper = .data[["fit"]] + se_mult * se) } return(newdata) } make_X <- function(object, ...) { UseMethod("make_X", object) } make_X.default <- function(object, newdata, ...) { X <- model.matrix(object$formula[-2], data = newdata, ...) } make_X.gam <- function(object, newdata, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) } prep_X <- function(object, newdata, reference = NULL, ...) { X <- make_X(object, newdata, ...) if (!is.null(reference)) { reference <- preproc_reference(reference, colnames(newdata), nrow(newdata)) reference <- newdata %>% mutate(!!!reference) X_ref <- make_X(object, reference, ...) X <- X - X_ref } X } preproc_reference <- function(reference, cnames, n_rows) { # check that provided variables contained in newdata names_ref <- names(reference) if (!check_subset(names_ref, cnames)) { stop(paste0("Columns in 'reference' but not in 'newdata':", paste0(setdiff(names_ref, cnames), collapse = ","))) } # transform to list if inherits from data frame, so it can be processed # in mutate via !!! if (inherits(reference, "data.frame")) { if (!(nrow(reference) == n_rows | nrow(reference) == 1)) { stop("If reference is provided as data frame, number of rows must be either 1 or the number of rows in newdata.") } reference <- as.list(reference) } reference } #' Add predicted (cumulative) hazard to data set #' #' Add (cumulative) hazard based on the provided data set and model. #' If \code{ci=TRUE} confidence intervals (CI) are also added. Their width can #' be controlled via the \code{se_mult} argument. The method by which the #' CI are calculated can be specified by \code{ci_type}. #' This is a wrapper around #' \code{\link[mgcv]{predict.gam}}. When \code{reference} is specified, the #' (log-)hazard ratio is calculated. #' #' @rdname add_hazard #' @inheritParams mgcv::predict.gam #' @inheritParams add_term #' @param type Either \code{"response"} or \code{"link"}. The former calculates #' hazard, the latter the log-hazard. #' @param ... Further arguments passed to \code{\link[mgcv]{predict.gam}} and #' \code{\link{get_hazard}} #' @param ci_type The method by which standard errors/confidence intervals #' will be calculated. Default transforms the linear predictor at #' respective intervals. \code{"delta"} calculates CIs based on the standard #' error calculated by the Delta method. \code{"sim"} draws the #' property of interest from its posterior based on the normal distribution of #' the estimated coefficients. See \href{https://adibender.github.io/simpamm/confidence-intervals.html}{here} #' for details and empirical evaluation. #' @param se_mult Factor by which standard errors are multiplied for calculating #' the confidence intervals. #' @param overwrite Should hazard columns be overwritten if already present in #' the data set? Defaults to \code{FALSE}. If \code{TRUE}, columns with names #' \code{c("hazard", "se", "lower", "upper")} will be overwritten. #' @param time_var Name of the variable used for the baseline hazard. If #' not given, defaults to \code{"tend"} for \code{\link[mgcv]{gam}} fits, else #' \code{"interval"}. The latter is assumed to be a factor, the former #' numeric. #' @import checkmate dplyr mgcv #' @importFrom stats predict #' @examples #' ped <- tumor[1:50,] %>% as_ped(Surv(days, status)~ age) #' pam <- mgcv::gam(ped_status ~ s(tend)+age, data = ped, family=poisson(), offset=offset) #' ped_info(ped) %>% add_hazard(pam, type="link") #' ped_info(ped) %>% add_hazard(pam, type = "response") #' ped_info(ped) %>% add_cumu_hazard(pam) #' @export add_hazard <- function(newdata, object, ...) { UseMethod("add_hazard", object) } #' @rdname add_hazard #' @export add_hazard.default <- function( newdata, object, reference = NULL, type = c("response", "link"), ci = TRUE, se_mult = 2, ci_type = c("default", "delta", "sim"), overwrite = FALSE, time_var = NULL, ...) { if (!overwrite) { if ("hazard" %in% names(newdata)) { stop("Data set already contains 'hazard' column. Set `overwrite=TRUE` to overwrite") } } else { rm.vars <- intersect( c("hazard", "se", "ci_lower", "ci_upper"), names(newdata)) newdata <- newdata %>% select(-one_of(rm.vars)) } get_hazard(object, newdata, reference = reference, ci = ci, type = type, se_mult = se_mult, ci_type = ci_type, time_var = time_var, ...) } #' Calculate predicted hazard #' #' @inheritParams add_hazard #' @importFrom stats model.frame #' @importFrom mgcv predict.gam predict.bam #' @keywords internal get_hazard <- function(object, newdata, ...) { UseMethod("get_hazard", object) } #' @rdname get_hazard get_hazard.default <- function( object, newdata, reference = NULL, ci = TRUE, type = c("response", "link"), ci_type = c("default", "delta", "sim"), time_var = NULL, se_mult = 2, ...) { assert_data_frame(newdata, all.missing = FALSE) assert_class(object, classes = "glm") type <- match.arg(type) ci_type <- match.arg(ci_type) is_gam <- inherits(object, "gam") if (is.null(time_var)) { time_var <- ifelse(is_gam, "tend", "interval") } else { assert_string(time_var) assert_choice(time_var, colnames(newdata)) } # throw warning or error if evaluation time points/intervals do not correspond # to evaluation time-points/intervals do not correspond to the ones used for # estimation warn_about_new_time_points(object, newdata, time_var) X <- prep_X(object, newdata, reference, ...) coefs <- coef(object) newdata$hazard <- unname(drop(X %*% coefs)) if (ci) { newdata <- newdata %>% add_ci(object, X, type = type, ci_type = ci_type, se_mult = se_mult, ...) } if (type == "response") { newdata <- newdata %>% mutate(hazard = exp(.data[["hazard"]])) } newdata %>% arrange(.data[[time_var]], .by_group = TRUE) } #' @rdname add_hazard #' @inheritParams add_hazard #' @param interval_length The variable in newdata containing the interval lengths. #' Can be either bare unquoted variable name or character. Defaults to \code{"intlen"}. #' @importFrom dplyr bind_cols #' @seealso \code{\link[mgcv]{predict.gam}}, #' \code{\link[pammtools]{add_surv_prob}} #' @export add_cumu_hazard <- function( newdata, object, ci = TRUE, se_mult = 2, overwrite = FALSE, time_var = NULL, interval_length = "intlen", ...) { interval_length <- quo_name(enquo(interval_length)) if (!overwrite) { if ("cumu_hazard" %in% names(newdata)) { stop( "Data set already contains 'hazard' column. Set `overwrite=TRUE` to overwrite") } } else { rm.vars <- intersect(c("cumu_hazard", "cumu_lower", "cumu_upper"), names(newdata)) newdata <- newdata %>% select(-one_of(rm.vars)) } get_cumu_hazard(newdata, object, ci = ci, se_mult = se_mult, time_var = time_var, interval_length = interval_length, ...) } #' Calculate cumulative hazard #' #' @inheritParams add_cumu_hazard #' @import checkmate dplyr #' @importFrom rlang UQ sym quo_name .data #' @importFrom purrr map_lgl #' @keywords internal get_cumu_hazard <- function( newdata, object, ci = TRUE, ci_type = c("default", "delta", "sim"), time_var = NULL, se_mult = 2, interval_length = "intlen", nsim = 100L, ...) { assert_character(interval_length) assert_subset(interval_length, colnames(newdata)) assert_data_frame(newdata, all.missing = FALSE) assert_class(object, classes = "glm") ci_type <- match.arg(ci_type) interval_length <- sym(interval_length) mutate_args <- list(cumu_hazard = quo(cumsum(.data[["hazard"]] * (!!interval_length)))) haz_vars_in_data <- map(c("hazard", "se", "ci_lower", "ci_upper"), ~ grep(.x, colnames(newdata), value = TRUE, fixed = TRUE)) %>% flatten_chr() vars_exclude <- c("hazard") if (ci) { if (ci_type == "default" | ci_type == "delta") { vars_exclude <- c(vars_exclude, "se", "ci_lower", "ci_upper") newdata <- get_hazard(object, newdata, type = "response", ci = ci, ci_type = ci_type, time_var = time_var, se_mult = se_mult, ...) if (ci_type == "default") { mutate_args <- mutate_args %>% append(list( cumu_lower = quo(cumsum(.data[["ci_lower"]] * (!!interval_length))), cumu_upper = quo(cumsum(.data[["ci_upper"]] * (!!interval_length))))) } else { # ci delta rule newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(add_delta_ci_cumu, object = object, se_mult = se_mult, ...) } } else { if (ci_type == "sim") { newdata <- get_hazard(object, newdata, type = "response", ci = FALSE, time_var = time_var, ...) newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(get_sim_ci_cumu, object = object, nsim = nsim, ...) } } } else { newdata <- get_hazard(object, newdata, type = "response", ci = ci, ci_type = ci_type, time_var = time_var, se_mult = se_mult, ...) } newdata <- newdata %>% mutate(!!!mutate_args) vars_exclude <- setdiff(vars_exclude, haz_vars_in_data) if (length(vars_exclude) != 0 ) { newdata <- newdata %>% select(-one_of(vars_exclude)) } newdata } #' Add survival probability estimates #' #' Given suitable data (i.e. data with all columns used for estimation of the model), #' this functions adds a column \code{surv_prob} containing survival probabilities #' for the specified covariate and follow-up information (and CIs #' \code{surv_lower}, \code{surv_upper} if \code{ci=TRUE}). #' #' @inherit add_cumu_hazard #' @examples #' ped <- tumor[1:50,] %>% as_ped(Surv(days, status)~ age) #' pam <- mgcv::gam(ped_status ~ s(tend)+age, data=ped, family=poisson(), offset=offset) #' ped_info(ped) %>% add_surv_prob(pam, ci=TRUE) #' @export add_surv_prob <- function( newdata, object, ci = TRUE, se_mult = 2, overwrite = FALSE, time_var = NULL, interval_length = "intlen", ...) { interval_length <- quo_name(enquo(interval_length)) if (!overwrite) { if ("surv_prob" %in% names(newdata)) { stop("Data set already contains 'surv_prob' column. Set `overwrite=TRUE` to overwrite") } } else { rm.vars <- intersect( c("surv_prob", "surv_lower", "surv_upper"), names(newdata)) newdata <- newdata %>% select(-one_of(rm.vars)) } get_surv_prob(newdata, object, ci = ci, se_mult = se_mult, time_var = time_var, interval_length = interval_length, ...) } #' Calculate survival probabilities #' #' @inheritParams add_surv_prob #' @keywords internal get_surv_prob <- function( newdata, object, ci = TRUE, ci_type = c("default", "delta", "sim"), se_mult = 2L, time_var = NULL, interval_length = "intlen", nsim = 100L, ...) { assert_character(interval_length) assert_subset(interval_length, colnames(newdata)) assert_data_frame(newdata, all.missing = FALSE) assert_class(object, classes = "glm") ci_type <- match.arg(ci_type) interval_length <- sym(interval_length) mutate_args <- list(surv_prob = quo(exp(-cumsum(.data[["hazard"]] * (!!interval_length))))) haz_vars_in_data <- map(c("hazard", "se", "ci_lower", "ci_upper"), ~grep(.x, colnames(newdata), value = TRUE, fixed = TRUE)) %>% flatten_chr() vars_exclude <- c("hazard") if (ci) { if (ci_type == "default" | ci_type == "delta") { vars_exclude <- c(vars_exclude, "se", "ci_lower", "ci_upper") newdata <- get_hazard(object, newdata, type = "response", ci = ci, ci_type = ci_type, time_var = time_var, se_mult = se_mult, ...) if (ci_type == "default") { mutate_args <- mutate_args %>% append(list( surv_upper = quo(exp(-cumsum(.data[["ci_lower"]] * (!!interval_length)))), surv_lower = quo(exp(-cumsum(.data[["ci_upper"]] * (!!interval_length)))))) } else { # ci delta rule newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(add_delta_ci_surv, object = object, se_mult = se_mult, ...) } } else { if (ci_type == "sim") { newdata <- get_hazard(object, newdata, type = "response", ci = FALSE, time_var = time_var, ...) newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(get_sim_ci_surv, object = object, nsim = nsim, ...) } } } else { newdata <- get_hazard(object = object, newdata, type = "response", ci = FALSE, time_var = time_var, ...) } newdata <- newdata %>% mutate(!!!mutate_args) vars_exclude <- setdiff(vars_exclude, haz_vars_in_data) if (length(vars_exclude) != 0 ) { newdata <- newdata %>% select(-one_of(vars_exclude)) } newdata } add_ci <- function( newdata, object, X, type = c("response", "link"), se_mult = 2, ci_type = c("default", "delta", "sim"), nsim = 100, ...) { ci_type <- match.arg(ci_type) is_gam <- inherits(object, "gam") if (is_gam) { V <- object$Vp } else { V <- vcov(object) } se <- unname(sqrt(rowSums( (X %*% V) * X) )) newdata$se <- se if (type == "link") { newdata <- newdata %>% mutate( ci_lower = .data[["hazard"]] - se_mult * .data[["se"]], ci_upper = .data[["hazard"]] + se_mult * .data[["se"]]) } if (type != "link") { if (ci_type == "default") { newdata <- newdata %>% mutate( ci_lower = exp(.data[["hazard"]] - se_mult * .data[["se"]]), ci_upper = exp(.data[["hazard"]] + se_mult * .data[["se"]])) } else { if (ci_type == "delta") { newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(add_delta_ci, object = object, se_mult = se_mult, ...) } else { if (ci_type == "sim") { newdata <- split(newdata, group_indices(newdata)) %>% map_dfr(get_sim_ci, object = object, nsim = nsim, ...) } } } } newdata } add_delta_ci <- function(newdata, object, se_mult = 2, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp Jacobi <- diag(exp(newdata$hazard)) %*% X newdata %>% mutate( se = sqrt(rowSums( (Jacobi %*% V) * Jacobi )), ci_lower = exp(.data[["hazard"]]) - .data[["se"]] * se_mult, ci_upper = exp(.data[["hazard"]]) + .data[["se"]] * se_mult) } add_delta_ci_cumu <- function(newdata, object, se_mult = 2, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp Delta <- lower.tri(diag(nrow(X)), diag = TRUE) %*% diag(newdata$intlen) Jacobi <- diag(newdata$hazard) %*% X LHS <- Delta %*% Jacobi newdata %>% mutate( se = sqrt(rowSums( (LHS %*% V) * LHS )), cumu_lower = cumsum(.data[["intlen"]] * .data[["hazard"]]) - .data[["se"]] * se_mult, cumu_upper = cumsum(.data[["intlen"]] * .data[["hazard"]]) + .data[["se"]] * se_mult) } add_delta_ci_surv <- function(newdata, object, se_mult = 2, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp Delta <- lower.tri(diag(nrow(X)), diag = TRUE) %*% diag(newdata$intlen) Jacobi <- diag(newdata$hazard) %*% X LHS <- -diag(exp(-rowSums(Delta %*% diag(newdata$hazard)))) %*% (Delta %*% Jacobi) newdata %>% mutate( se = sqrt(rowSums( (LHS %*% V) * LHS)), surv_lower = exp(-cumsum(.data[["hazard"]] * .data[["intlen"]])) - .data[["se"]] * se_mult, surv_upper = exp(-cumsum(.data[["hazard"]] * .data[["intlen"]])) + .data[["se"]] * se_mult) } #' Calculate simulation based confidence intervals #' #' @keywords internal #' @importFrom mvtnorm rmvnorm #' @importFrom stats coef get_sim_ci <- function(newdata, object, alpha = 0.05, nsim = 100L, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp coefs <- coef(object) sim_coef_mat <- mvtnorm::rmvnorm(nsim, mean = coefs, sigma = V) sim_fit_mat <- apply(sim_coef_mat, 1, function(z) exp(X %*% z)) newdata$ci_lower <- apply(sim_fit_mat, 1, quantile, probs = alpha / 2) newdata$ci_upper <- apply(sim_fit_mat, 1, quantile, probs = 1 - alpha / 2) newdata } get_sim_ci_cumu <- function(newdata, object, alpha = 0.05, nsim = 100L, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp coefs <- coef(object) sim_coef_mat <- mvtnorm::rmvnorm(nsim, mean = coefs, sigma = V) sim_fit_mat <- apply(sim_coef_mat, 1, function(z) cumsum(newdata$intlen * exp(X %*% z))) newdata$cumu_lower <- apply(sim_fit_mat, 1, quantile, probs = alpha / 2) newdata$cumu_upper <- apply(sim_fit_mat, 1, quantile, probs = 1 - alpha / 2) newdata } get_sim_ci_surv <- function(newdata, object, alpha = 0.05, nsim = 100L, ...) { X <- predict.gam(object, newdata = newdata, type = "lpmatrix", ...) V <- object$Vp coefs <- coef(object) sim_coef_mat <- mvtnorm::rmvnorm(nsim, mean = coefs, sigma = V) sim_fit_mat <- apply(sim_coef_mat, 1, function(z) exp(-cumsum(newdata$intlen * exp(X %*% z)))) newdata$surv_lower <- apply(sim_fit_mat, 1, quantile, probs = alpha / 2) newdata$surv_upper <- apply(sim_fit_mat, 1, quantile, probs = 1 - alpha / 2) newdata } ## Cumulative Incidence Function (CIF) for competing risks data #' Add cumulative incidence function to data #' #' @inheritParams add_hazard #' @param alpha The alpha level for confidence/credible intervals. #' @param n_sim Number of simulations (draws from posterior of estimated coefficients) #' on which estimation of CIFs and their confidence/credible intervals will be #' based on. #' @param cause_var Character. Column name of the 'cause' variable. #' #' @export add_cif <- function( newdata, object, ...) { UseMethod("add_cif", object) } #' @rdname add_cif #' @export add_cif.default <- function( newdata, object, ci = TRUE, overwrite = FALSE, alpha = 0.05, n_sim = 500L, cause_var = "cause", time_var = NULL, ...) { coefs <- coef(object) V <- object$Vp sim_coef_mat <- mvtnorm::rmvnorm(n_sim, mean = coefs, sigma = V) map_dfr( split(newdata, group_indices(newdata)), ~get_cif( newdata = .x, object = object, ci = ci, alpha = alpha, n_sim = n_sim, cause_var = cause_var, coefs = coefs, V = V, sim_coef_mat = sim_coef_mat, time_var = time_var, ...) ) } #' Calculate CIF for one cause #' #' @keywords internal get_cif <- function(newdata, object, ...) { UseMethod("get_cif", object) } #' @rdname get_cif #' @keywords internal get_cif.default <- function( newdata, object, ci, time_var, alpha, n_sim, cause_var, coefs, V, sim_coef_mat, ...) { is_gam <- inherits(object, "gam") if (is.null(time_var)) { time_var <- ifelse(is_gam, "tend", "interval") } else { assert_string(time_var) assert_choice(time_var, colnames(newdata)) } causes_model <- as.factor(object$attr_ped$risks) cause_data <- unique(newdata[[cause_var]]) if(length(cause_data) > 1) { stop("Did you forget to group by cause?") } hazards <- map( causes_model, ~ { .df <- mutate(newdata, cause = .x) %>% arrange(.data[[time_var]], .by_group = TRUE) X <- predict(object, .df, type = "lpmatrix") apply(sim_coef_mat, 1, function(z) exp(X %*% z)) } ) overall_survivals <- apply( Reduce("+", hazards), 2, function(z) exp(-cumsum(z * newdata[["intlen"]]))) names(hazards) <- causes_model # calculate cif hazard <- hazards[[cause_data]] # Value of survival just prior to time-point survival <- overall_survivals - 1e-20 hps <- hazard * survival cifs <- apply(hps, 2, function(z) cumsum(z * newdata[["intlen"]])) newdata[["cif"]] <- rowMeans(cifs) if(ci) { newdata[["cif_lower"]] <- apply(cifs, 1, quantile, alpha/2) newdata[["cif_upper"]] <- apply(cifs, 1, quantile, 1-alpha/2) } newdata } pammtools/R/warnings.R0000644000176200001440000000457514452536066014467 0ustar liggesusers#' Warn if new t_j are used #' #' @keywords internal warn_about_new_time_points <- function(object, newdata, ...) { UseMethod("warn_about_new_time_points", object) } warn_about_new_time_points.glm <- function(object, newdata, time_var, ...) { is_pam <- inherits(object, "gam") if(is_pam & is.null(object$model)){ return(invisible()) } original_intervals <- if (is_pam) { unique(model.frame(object)[[time_var]]) } else levels(model.frame(object)[[time_var]]) prediction_intervals <- if (is_pam) { unique(newdata[[time_var]]) } else levels(factor(newdata[[time_var]])) new_ints <- which(!(prediction_intervals %in% original_intervals)) n_out <- pmin(10, length(new_ints)) if (length(new_ints)) { message <- paste0( "Time points/intervals in new data not equivalent to time points/intervals during model fit.", " Setting intervals to values not used for original fit", "can invalidate the PEM assumption and yield incorrect predictions.") if (is_pam) warning(message) else stop(message) } } #' @rdname warn_about_new_time_points warn_about_new_time_points.pamm <- function(object, newdata, ...) { if (inherits(object, "pamm")) { int_original <- int_info(object) if ("interval" %in% colnames(newdata)) { int_new <- unique(newdata[["interval"]]) if(!all(int_new %in% int_original)) { warning( paste0( "Time points/intervals in new data not equivalent to time points/intervals during model fit.", " Setting intervals to values not used for original fit", "can invalidate the PEM assumption and yield incorrect predictions." ) ) } } } } # #' @keywords internal # #' @importFrom dplyr intersect union setequal # warn_partial_overlap <- function(event_id, tdc_id) { # common_id <- intersect(event_id, tdc_id) # union_id <- union(event_id, tdc_id) # if (!setequal(common_id, union_id)) { # warning("Not all IDs are present in both data sets. # IDs that do not appear in both data sets will be removed.") # } # invisible(common_id) # } status_error <- function(data, formula, censor_code = 0L) { outcome_vars <- get_lhs_vars(formula) if (!any(unique(data[[outcome_vars[length(outcome_vars)]]]) != censor_code)) { stop(paste( "No events in data! Check your", outcome_vars[length(outcome_vars)], "variable.")) } } pammtools/R/pammfit.R0000644000176200001440000000530714222504522014251 0ustar liggesusersunpam <- function(pamm) { class(pamm) <- class(pamm)[-1] pamm } repam <- function(x) { class(x) <- c("pamm", class(x)) x } append_ped_attr <- function(pamm, ped) { attr_ped <- ped_attr(ped) pamm[["attr_ped"]] <- attr_ped pamm } #' Fit a piece-wise exponential additive model #' #' A thin wrapper around \code{\link[mgcv]{gam}}, however, some arguments are #' prespecified: #' \code{family=poisson()} and \code{offset=data$offset}. #' These two can not be overwritten. In many cases it will also be advisable to #' set \code{method="REML"}. #' #' @inheritParams mgcv::gam #' @param ... Further arguments passed to \code{engine}. #' @param trafo_args A named list. If data is not in PED format, \code{as_ped} #' will be called internally with arguments provided in \code{trafo_args}. #' @param engine Character name of the function that will be called to fit the #' model. The intended entries are either \code{"gam"} or \code{"bam"} #' (both from package \code{mgcv}). #' @import mgcv #' @importFrom stats poisson #' @rdname pamm #' @seealso \code{\link[mgcv]{gam}} #' @examples #' ped <- tumor[1:100, ] %>% #' as_ped(Surv(days, status) ~ complications, cut = seq(0, 3000, by = 50)) #' pam <- pamm(ped_status ~ s(tend) + complications, data = ped) #' summary(pam) #' ## Alternatively #' pamm( #' ped_status ~ s(tend) + complications, #' data = tumor[1:100, ], #' trafo_args = list(formula = Surv(days, status)~complications)) #' @export pamm <- function( formula, data = list(), ..., trafo_args = NULL, engine = "gam") { dots <- list(...) dots$formula <- formula dots$family <- poisson() if (!is.null(trafo_args)) { trafo_args$data <- data data <- do.call(split_data, trafo_args) } dots$data <- data dots$offset <- data$offset pamm_fit <- do.call(engine, dots) class(pamm_fit) <- c("pamm", class(pamm_fit)) # pamm_fit <- append_ped_attr(pamm_fit, data) pamm_fit[["trafo_args"]] <- attr(data, "trafo_args") ind_attr_keep <- !(names(attributes(data)) %in% c("names", "row.names", "trafo_args", "class")) pamm_fit[["attr_ped"]] <- attributes(data)[ind_attr_keep] pamm_fit } #' Check if object is of class pamm #' #' @param x Any R object. #' @rdname pamm #' @keywords internal #' @export is.pamm <- function(x) inherits(x, "pamm") #' @rdname pamm #' @keywords internal #' @export print.pamm <- function(x, ...) { print(unpam(x), ...) } #' @rdname pamm #' @param object An object of class \code{pamm} as returned by \code{\link{pamm}}. #' @keywords internal #' @export summary.pamm <- function(object, ...) { summary(unpam(object), ...) } #' @rdname pamm #' @keywords internal #' @export plot.pamm <- function(x, ...) { plot(unpam(x), ...) } pammtools/R/ggplot-extensions.R0000644000176200001440000000646514452536154016326 0ustar liggesusers# Stolen from the \code{RmcdrPlugin.KMggplot2} (slightly modified) #' Step ribbon plots. #' #' \code{geom_stepribbon} is an extension of the \code{geom_ribbon}, and #' is optimized for Kaplan-Meier plots with pointwise confidence intervals #' or a confidence band. The default \code{direction}-argument \code{"hv"} is #' appropriate for right-continuous step functions like the hazard rates etc #' returned by \code{pammtools}. #' #' @seealso #' \code{\link[ggplot2]{geom_ribbon}} \code{geom_stepribbon} #' inherits from \code{geom_ribbon}. #' @inheritParams ggplot2:::geom_ribbon #' @inheritParams ggplot2:::geom_step #' @examples #' library(ggplot2) #' huron <- data.frame(year = 1875:1972, level = as.vector(LakeHuron)) #' h <- ggplot(huron, aes(year)) #' h + geom_stepribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") + #' geom_step(aes(y = level)) #' h + geom_ribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") + #' geom_line(aes(y = level)) #' @rdname geom_stepribbon #' @importFrom ggplot2 layer GeomRibbon #' @export geom_stepribbon <- function( mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "hv", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { layer( data = data, mapping = mapping, stat = stat, geom = GeomStepribbon, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list(na.rm = na.rm, direction = direction, ... ) ) } #' @rdname geom_stepribbon #' @importFrom ggplot2 ggproto #' @format NULL #' @usage NULL #' @export GeomStepribbon <- ggproto( "GeomStepribbon", GeomRibbon, extra_params = c("na.rm"), draw_group = function(data, panel_scales, coord, na.rm = FALSE, direction = "hv") { if (na.rm) data <- data[complete.cases(data[c("x", "ymin", "ymax")]), ] data <- rbind(data, data) data <- data[order(data$x), ] data <- ggplot2_stairstep(data[complete.cases(data["x"]), ], direction = direction) GeomRibbon$draw_group(data, panel_scales, coord, na.rm = na.rm) } ) # code adapted from # https://github.com/tidyverse/ggplot2/blob/9741da5050f81b7b5c012c56d02f45fc93d68f89/R/geom-path.r#L320 ggplot2_stairstep <- function(data, direction = c("hv", "vh", "mid")) { direction <- match.arg(direction) data <- as.data.frame(data)[order(data$x), ] n <- nrow(data) if (n <= 1) { return(data[0, , drop = FALSE]) } if (direction == "vh") { xs <- rep(1:n, each = 2)[-2 * n] ys <- c(1, rep(2:n, each = 2)) } if (direction == "hv") { xs <- c(1, rep(2:n, each = 2)) ys <- rep(1:n, each = 2)[-2 * n] } if (direction == "mid") { xs <- rep(1:(n - 1), each = 2) ys <- rep(1:n, each = 2) } ymin <- c(data$ymin[ys]) ymax <- c(data$ymax[ys]) if (direction == "mid") { gaps <- data$x[-1] - data$x[-n] mid_x <- data$x[-n] + gaps/2 x <- c(data$x[1], mid_x[xs], data$x[n]) data_attr <- data[c(1, xs, n), setdiff(names(data), c("x", "ymin", "ymax"))] } else { x <- data$x[xs] ymin <- data$ymin[ys] ymax <- data$ymax[ys] data_attr <- data[xs, setdiff(names(data), c("x", "ymin", "ymax"))] } cbind(data.frame(x = x, ymin = ymin, ymax = ymax), data_attr) } pammtools/R/data.R0000644000176200001440000000653614222504522013532 0ustar liggesusers#' Survival data of critically ill ICU patients #' #' A data set containing the survival time (or hospital release time) among #' other covariates. #' The full data is available \href{https://github.com/adibender/elra-biostats}{here}. #' The following variables are provided: #' \describe{ #' \item{Year}{The year of ICU Admission} #' \item{CombinedicuID}{Intensive Care Unit (ICU) ID} #' \item{CombinedID}{Patient identificator} #' \item{Survdays}{Survival time of patients. Here it is assumed that patients #' survive until t=30 if released from hospital.} #' \item{PatientDied}{Status indicator; 1=death, 0=censoring} #' \item{survhosp}{Survival time in hospital. Here it is assumed that patients #' are censored at time of hospital release (potentially informative)} #' \item{Gender}{Male or female} #' \item{Age}{The patients age at Admission} #' \item{AdmCatID}{Admission category: medical, surgical elective or surgical emergency} #' \item{ApacheIIScore}{The patient's Apache II Score at Admission} #' \item{BMI}{Patient's Body Mass Index} #' \item{DiagID2}{Diagnosis at admission in 9 categories} } "patient" #' Time-dependent covariates of the \code{\link{patient}} data set. #' #' This data set contains the time-dependent covariates (TDCs) for the \code{\link{patient}} #' data set. Note that nutrition was protocoled for at most 12 days after #' ICU admission. The data set includes: #' \describe{ #' \item{CombinedID}{Unique patient identifier. Can be used to merge with #' \code{\link{patient}} data} #' \item{Study_Day}{The calendar (!) day at which calories (or proteins) were #' administered} #' \item{caloriesPercentage}{The percentage of target calories supplied to the #' patient by the ICU staff} #' \item{proteinGproKG}{The amount of protein supplied to the patient by the #' ICU staff}} "daily" #' Simulated data with cumulative effects #' #' This is data simulated using the \code{\link[pammtools]{sim_pexp}} function. #' It contains two time-constant and two time-dependent covariates (observed #' on different exposure time grids). The code used for simulation is #' contained in the examples of \code{?sim_pexp}. #' "simdf_elra" #' Stomach area tumor data #' #' Information on patients treated for a cancer disease #' located in the stomach area. #' The data set includes: #' \describe{ #' \item{days}{Time from operation until death in days.} #' \item{status}{Event indicator (0 = censored, 1 = death).} #' \item{age}{The subject's age.} #' \item{sex}{The subject's sex (male/female).} #' \item{charlson_score}{Charlson comorbidity score, 1-6.} #' \item{transfusion}{Has subject received transfusions (no/yes).} #' \item{complications}{Did major complications occur during operation (no/yes).} #' \item{metastases}{Did the tumor develop metastases? (no/yes).} #' \item{resection}{Was the operation accompanied by a major resection (no/yes).} #' } #' "tumor" #' Time until staphylococcus aureaus infection in children, with possible recurrence #' #' This dataset originates from the Drakenstein child health study. #' The data contains the following variables: #' \describe{ #' \item{id}{Randomly generated unique child ID} #' \item{t.start}{The time at which the child enters the risk set for the $k$-th event} #' \item{t.stop}{Time of $k$-th infection or censoring}. #' \item{enum}{Event number. Maximum of 6.} #' \item{hiv}{} #' } "staph" pammtools/R/utils-pipe.R0000644000176200001440000000031213662013606014703 0ustar liggesusers#' Pipe operator #' #' See \code{magrittr::\link[magrittr]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL pammtools/R/interval-information.R0000644000176200001440000001252214222504522016760 0ustar liggesusers#' Create start/end times and interval information #' #' Given interval breaks points, returns data frame with information on #' interval start time, interval end time, interval length and a factor #' variable indicating the interval (left open intervals). If an object of class #' \code{ped} is provided, extracts unique interval information from object. #' #' @param x A numeric vector of cut points in which the follow-up should be #' partitioned in or object of class \code{ped}. #' @param ... Currently ignored. #' @rdname int_info #' @return A data frame containing the start and end times of the #' intervals specified by the \code{x} argument. Additionally, the interval #' length, interval mid-point and a factor variable indicating the intervals. #' @export int_info <- function(x, ...) { UseMethod("int_info", x) } #' @param min_time Only intervals that have lower borders larger than #' this value will be included in the resulting data frame. #' @import checkmate dplyr #' @examples #' ## create interval information from cut points #' int_info(c(1, 2.3, 5)) #' #' @rdname int_info #' @export int_info.default <- function( x, min_time = 0L, ...) { # check inputs assert_numeric(x, lower = 0, any.missing = FALSE) assert_numeric(min_time, lower = 0L) # sort x and add origin if necessary if (is.unsorted(x)) { x <- sort(x) } if (min(x != 0)) { x <- c(0, x) } intlen <- diff(x) tstart <- x[-length(x)] tend <- tstart + intlen tdf <- data.frame( tstart = tstart, tend = tend, intlen = intlen) %>% mutate( intmid = tstart + intlen / 2, interval = paste0("(", tstart, ",", tend, "]"), interval = factor(.data$interval, levels = unique(.data$interval)) ) filter(tdf, tstart >= min_time) } #' @rdname int_info #' @export int_info.data.frame <- function( x, min_time = 0L, ...) { # check inputs assert_data_frame(x, types = "numeric", any.missing = FALSE, ncols = 2) # assert_numeric(min_time, lower = 0L) stopifnot(all(x[,1] < x[,2])) # sort x and add origin if necessary if (is.unsorted(x[,1])) { x <- x[order(x[,1], x[,2]), ] } colnames(x) <- c("tstart", "tend") x[["intlen"]] <- x[, 2] - x[, 1] x[["intmid"]] <- x[, 1] + x[, "intlen"] / 2 x[["interval"]] <- paste0("(", x[, 1], ",", x[, 2], "]") x[["interval"]] <- factor(x[["interval"]], levels = x[["interval"]]) x[x[["tstart"]] >= min_time, ] } #' @import dplyr #' @rdname int_info #' @examples #' ## extract interval information used to create ped object #' tdf <- data.frame(time=c(1, 2.3, 5), status=c(0, 1, 0)) #' ped <- tdf %>% as_ped(Surv(time, status)~., id="id") #' int_info(ped) #' #' @seealso as_ped ped_info #' @export int_info.ped <- function(x, ...) { int_info(attr(x, "breaks"), ...) } #' @rdname int_info #' @export #' @keywords internal int_info.pamm <- function(x, ...) { int_info(x[["attr_ped"]][["breaks"]],...) } #' Information on intervals in which times fall #' #' @inheritParams int_info #' @param x An object from which interval information can be obtained, #' see \code{\link{int_info}}. #' @param times A vector of times for which corresponding interval information #' should be returned. #' @param ... Further arguments passed to \code{\link[base]{findInterval}}. #' @import dplyr #' @return A \code{data.frame} containing information on intervals in which #' values of \code{times} fall. #' @seealso \code{\link[base]{findInterval}} \code{\link{int_info}} #' @rdname get_intervals #' @export #' @examples #' set.seed(111018) #' brks <- c(0, 4.5, 5, 10, 30) #' int_info(brks) #' x <- runif (3, 0, 30) #' x #' get_intervals(brks, x) get_intervals <- function(x, times, ...) { UseMethod("get_intervals", x) } #' @rdname get_intervals #' @inheritParams base::findInterval #' @export get_intervals.default <- function( x, times, left.open = TRUE, rightmost.closed = TRUE, ...) { # check inputs assert_numeric(times, lower = 0, finite = TRUE, all.missing = FALSE) int_df <- int_info(x) int <- findInterval( x = times, vec = sort(union(int_df$tstart, int_df$tend)), left.open = left.open, rightmost.closed = rightmost.closed) int_df %>% slice(int) %>% mutate(times = times) %>% select(times, everything()) } #' Extract interval information and median/modus values for covariates #' #' Given an object of class \code{ped}, returns data frame with one row for each #' interval containing interval information, mean values for numerical #' variables and modus for non-numeric variables in the data set. #' #' @param ped An object of class \code{ped} as returned by #' \code{\link[pammtools]{as_ped}}. #' @import checkmate dplyr #' @examples #' ped <- tumor[1:4,] %>% as_ped(Surv(days, status)~ sex + age) #' ped_info(ped) #' @export #' @return A data frame with one row for each unique interval in \code{ped}. #' @seealso \code{\link[pammtools]{int_info}}, \code{\link[pammtools]{sample_info}} ped_info <- function(ped) { UseMethod("ped_info", ped) } #' @rdname ped_info #' @export ped_info.ped <- function(ped) { int_df <- int_info(ped) sdf <- sample_info(ped) if (is.null(sdf)) { return(int_df) } else { bind_cols( int_df %>% slice(rep(seq_len(nrow(int_df)), times = nrow(sdf))), sdf %>% slice(rep(seq_len(nrow(sdf)), each = nrow(int_df)))) %>% grouped_df(vars = group_vars(sdf)) } } pammtools/R/get-cut-points.R0000644000176200001440000000330314452536066015505 0ustar liggesusers#' Obtain interval break points #' #' Default method words for data frames. #' The list method applies the default method to each data set within the list. #' #' #' @import Formula #' @keywords internal get_cut <- function(data, formula, cut = NULL, ...) { UseMethod("get_cut", data) } #' @rdname get_cut #' @inherit get_cut get_cut.default <- function( data, formula, cut = NULL, max_time = NULL, event = 1L, ...) { if (is.null(cut)) { outcome_vars <- get_lhs_vars(formula) if (length(outcome_vars) == 2) { cut <- unique(data[[outcome_vars[1]]][1L * (data[[outcome_vars[2]]]) == event]) } else { cut_start <- unique(data[[outcome_vars[1]]]) cut_end <- unique(data[[outcome_vars[2]]]) cut <- union(cut_start, cut_end) } if (!is.null(max_time)) { cut <- cut[cut < max_time] cut <- c(cut, max_time) } } # sort interval cut points in case they are not (so that interval factor # variables will be in correct ordering) sort(unique(cut)) } get_cut.list <- function ( data, formula, cut = NULL, max_time = NULL, event = 1L, timescale = "gap", ...) { lhs_vars <- get_lhs_vars(formula) if (length(lhs_vars) == 3 & timescale == "gap") { rhs_vars <- get_rhs_vars(formula) formula_cuts <- as.formula( paste0("Surv(", lhs_vars[2], ",", lhs_vars[3], ") ~ ", paste(rhs_vars, collapse = "+"))) } else { formula_cuts <- formula } cuts <- map( .x = data, .f = ~get_cut.default( data = .x, formula = formula_cuts, cut = cut, max_time = max_time, event = event, ...) ) cuts <- Reduce(union, cuts) sort(unique(cuts)) } pammtools/R/make-newdata.R0000644000176200001440000003026214222504522015150 0ustar liggesusers#' Extract information of the sample contained in a data set #' #' Given a data set and grouping variables, this function returns mean values #' for numeric variables and modus for characters and factors. Usually #' this function should not be called directly but will rather be called #' as part of a call to \code{make_newdata}. #' #' @rdname sample_info #' @param x A data frame (or object that inherits from \code{data.frame}). #' @importFrom stats median #' @return A data frame containing sample information (for each group). #' If applied to an object of class \code{ped}, the sample means of the #' original data is returned. #' Note: When applied to a \code{ped} object, that doesn't contain covariates #' (only interval information), returns data frame with 0 columns. #' #' @export #' @keywords internal sample_info <- function(x) { UseMethod("sample_info", x) } #' @import checkmate dplyr #' @importFrom purrr compose #' @export #' @rdname sample_info sample_info.data.frame <- function(x) { cn <- colnames(x) num <- summarize_if (x, .predicate = is.numeric, ~mean(., na.rm = TRUE)) fac <- summarize_if (x, .predicate = compose("!", is.numeric), modus) nnames <- intersect(names(num), names(fac)) if (length(nnames) != 0) { suppressMessages( x <- left_join(num, fac) %>% group_by(!!!lapply(nnames, as.name)) ) } else { x <- bind_cols(num, fac) } return(select(x, one_of(cn))) } #' @rdname sample_info #' @import checkmate dplyr #' @importFrom rlang sym #' @export sample_info.ped <- function(x) { # is.grouped_df # remove "noise" information on interval variables grps <- group_vars(x) iv <- attr(x, "intvars") id_var <- attr(x, "id_var") x <- x %>% group_by(!!sym(id_var)) %>% slice(1) %>% ungroup() %>% grouped_df(grps) %>% select(-one_of(iv)) if (test_data_frame(x, min.rows = 1, min.cols = 1)) { sample_info.data.frame(x) } else { NULL } } #' @rdname sample_info #' @export sample_info.fped <- function(x) { x %>% select_if(~!is.matrix(.x)) %>% sample_info.ped() } #' Create a data frame from all combinations of data frames #' #' Works like \code{\link[base]{expand.grid}} but for data frames. #' #' @importFrom dplyr slice bind_cols #' @importFrom vctrs vec_c #' @importFrom purrr map map_lgl map2 transpose cross #' @importFrom checkmate test_data_frame #' @param ... Data frames that should be combined to one data frame. #' Elements of first df vary fastest, elements of last df vary slowest. #' @examples #' combine_df( #' data.frame(x=1:3, y=3:1), #' data.frame(x1=c("a", "b"), x2=c("c", "d")), #' data.frame(z=c(0, 1))) #' @export #' @keywords internal combine_df <- function(...) { dots <- list(...) if (!all(sapply(dots, test_data_frame))) { stop("All elements in ... must inherit from data.frame!") } ind_seq <- map(dots, ~ seq_len(nrow(.x))) not_empty <- map_lgl(ind_seq, ~ length(.x) > 0) ind_list <- ind_seq[not_empty] %>% cross() %>% transpose() %>% map(function(x) vec_c(!!!x)) map2(dots[not_empty], ind_list, function(.x, .y) slice(.x, .y)) %>% bind_cols() } #' Construct a data frame suitable for prediction #' #' This functions provides a flexible interface to create a data set that #' can be plugged in as \code{newdata} argument to a suitable \code{predict} #' function (or similar). #' The function is particularly useful in combination with one of the #' \code{add_*} functions, e.g., \code{\link[pammtools]{add_term}}, #' \code{\link[pammtools]{add_hazard}}, etc. #' #' @rdname newdata #' @aliases make_newdata #' @inheritParams sample_info #' @param ... Covariate specifications (expressions) that will be evaluated #' by looking for variables in \code{x}. Must be of the form \code{z = f(z)} #' where \code{z} is a variable in the data set and \code{f} a known #' function that can be usefully applied to \code{z}. Note that this is also #' necessary for single value specifications (e.g. \code{age = c(50)}). #' For data in PED (piece-wise exponential data) format, one can also specify #' the time argument, but see "Details" an "Examples" below. #' @import dplyr #' @importFrom checkmate assert_data_frame assert_character #' @importFrom purrr map cross_df #' @details Depending on the type of variables in \code{x}, mean or modus values #' will be used for variables not specified in ellipsis #' (see also \code{\link[pammtools]{sample_info}}). If \code{x} is an object #' that inherits from class \code{ped}, useful data set completion will be #' attempted depending on variables specified in ellipsis. This is especially #' useful, when creating a data set with different time points, e.g. to #' calculate survival probabilities over time (\code{\link[pammtools]{add_surv_prob}}) #' or to calculate a time-varying covariate effects (\code{\link[pammtools]{add_term}}). #' To do so, the time variable has to be specified in \code{...}, e.g., #' \code{tend = seq_range(tend, 20)}. The problem with this specification is that #' not all values produced by \code{seq_range(tend, 20)} will be actual values #' of \code{tend} used at the stage of estimation (and in general, it will #' often be tedious to specify exact \code{tend} values). \code{make_newdata} #' therefore finds the correct interval and sets \code{tend} to the respective #' interval endpoint. For example, if the intervals of the PED object are #' \eqn{(0,1], (1,2]} then \code{tend = 1.5} will be set to \code{2} and the #' remaining time-varying information (e.g. offset) completed accordingly. #' See examples below. #' @examples #' # General functionality #' tumor %>% make_newdata() #' tumor %>% make_newdata(age=c(50)) #' tumor %>% make_newdata(days=seq_range(days, 3), age=c(50, 55)) #' tumor %>% make_newdata(days=seq_range(days, 3), status=unique(status), age=c(50, 55)) #' # mean/modus values of unspecified variables are calculated over whole data #' tumor %>% make_newdata(sex=unique(sex)) #' tumor %>% group_by(sex) %>% make_newdata() #' # You can also pass a part of the data sets as data frame to make_newdata #' purrr::cross_df(list(days = c(0, 500, 1000), sex = c("male", "female"))) %>% #' make_newdata(x=tumor) #' #' # Examples for PED data #' ped <- tumor %>% slice(1:3) %>% as_ped(Surv(days, status)~., cut = c(0, 500, 1000)) #' ped %>% make_newdata(age=c(50, 55)) #' #' # if time information is specified, other time variables will be specified #' # accordingly and offset calculated correctly #' ped %>% make_newdata(tend = c(1000), age = c(50, 55)) #' ped %>% make_newdata(tend = unique(tend)) #' ped %>% group_by(sex) %>% make_newdata(tend = unique(tend)) #' #' # tend is set to the end point of respective interval: #' ped <- tumor %>% as_ped(Surv(days, status)~.) #' seq_range(ped$tend, 3) #' make_newdata(ped, tend = seq_range(tend, 3)) #' @export make_newdata <- function(x, ...) { UseMethod("make_newdata", x) } #' @inherit make_newdata #' @rdname newdata #' @export make_newdata.default <- function(x, ...) { assert_data_frame(x, all.missing = FALSE, min.rows = 2, min.cols = 1) orig_names <- names(x) expressions <- quos(...) expr_evaluated <- map(expressions, lazyeval::f_eval, data = x) # construct data parts depending on input type lgl_atomic <- map_lgl(expr_evaluated, is_atomic) part1 <- expr_evaluated[lgl_atomic] %>% cross_df() part2 <- do.call(combine_df, expr_evaluated[!lgl_atomic]) ndf <- combine_df(part1, part2) rest <- x %>% select(-one_of(c(colnames(ndf)))) if (ncol(rest) > 0) { si <- sample_info(rest) %>% ungroup() ndf <- combine_df(si, ndf) } ndf %>% select(one_of(orig_names)) } #' @rdname newdata #' @inherit make_newdata.default #' @export make_newdata.ped <- function(x, ...) { assert_data_frame(x, all.missing = FALSE, min.rows = 2, min.cols = 1) # prediction time points have to be interval end points so that piece-wise # constancy of predicted hazards is respected. If user overrides this, warn. orig_vars <- names(x) int_df <- int_info(x) expressions <- quos(...) dot_names <- names(expressions) int_names <- names(int_df) # x <- select(x, -one_of(setdiff(int_names, c(dot_names, "intlen", "intmid")))) ndf <- x %>% select(-one_of(setdiff(int_names, c(dot_names, "intlen", "intmid")))) %>% unped() %>% make_newdata(...) if (any(names(int_df) %in% names(ndf))) { int_tend <- get_intervals(x, ndf$tend)$tend if (!all(ndf$tend == int_tend)) { message("Some values of 'tend' have been set to the respective interval end-points") } ndf$tend <- int_tend suppressMessages( ndf <- ndf %>% left_join(int_df) ) } else { ndf <- combine_df(int_df[1, ], ndf) } int_names <- intersect(int_names, c("intlen", orig_vars)) ndf %>% select(one_of(c(int_names, setdiff(orig_vars, int_names)))) %>% mutate( intlen = .data$tend - .data$tstart, offset = log(.data$tend - .data$tstart), ped_status = 0) } #' @rdname newdata #' @inherit make_newdata.ped #' @importFrom rlang quos #' @export make_newdata.fped <- function(x, ...) { assert_data_frame(x, all.missing = FALSE, min.rows = 2, min.cols = 1) # prediction time points have to be interval end points so that piece-wise # constancy of predicted hazards is respected. If user overrides this, warn. expressions <- quos(...) dot_names <- names(expressions) orig_vars <- names(x) cumu_vars <- setdiff(unlist(attr(x, "func_mat_names")), dot_names) cumu_smry <- smry_cumu_vars(x, attr(x, "time_var")) %>% select(one_of(cumu_vars)) int_names <- attr(x, "intvars") ndf <- x %>% select(one_of(setdiff(names(x), cumu_vars))) %>% unfped() %>% make_newdata(...) out_df <- do.call(combine_df, compact(list(cumu_smry, ndf))) int_df <- int_info(attr(x, "breaks")) suppressMessages( out_df <- out_df %>% left_join(int_df) %>% select(-one_of(c("intmid"))) %>% as_tibble() ) # adjust lag-lead indicator out_df <- adjust_ll(out_df, x) out_df } smry_cumu_vars <- function(data, time_var) { cumu_vars <- unlist(attr(data, "func_mat_names")) func_list <- attr(data, "func") z_vars <- map(func_list, ~get_zvars(.x, time_var, length(func_list))) %>% unlist() smry_z <- select(data, one_of(z_vars)) %>% map(~ .x[1, ]) %>% map(~mean(unlist(.x))) %>% bind_cols() smry_time <- select(data, setdiff(cumu_vars, z_vars)) %>% map(~.x[1, 1]) bind_cols(smry_z, smry_time) } get_zvars <- function(func, time_var, n_func) { col_vars <- func$col_vars all_vars <- make_mat_names(c(col_vars, "LL"), func$latency_var, func$tz_var, func$suffix, n_func) time_vars <- make_mat_names(c(time_var, func$tz_var, "LL"), func$latency_var, func$tz_var, func$suffix, n_func) setdiff(all_vars, time_vars) } ## apply ll_fun to newly created data adjust_ll <- function(out_df, data) { func_list <- attr(data, "func") n_func <- length(func_list) LL_names <- grep("LL", unlist(attr(data, "func_mat_names")), value = TRUE) for (i in LL_names) { ind_ll <- map_lgl(names(attr(data, "ll_funs")), ~grepl(.x, i)) if (any(ind_ll)) { ind_ll <- which(ind_ll) } else { ind_ll <- 1 } func <- func_list[[ind_ll]] ll_i <- attr(data, "ll_funs")[[ind_ll]] tz_var <- attr(data, "tz_vars")[[ind_ll]] tz_var <- make_mat_names(tz_var, func$latency_var, func$tz_var, func$suffix, n_func) if (func$latency_var == "") { out_df[[i]] <- ll_i(out_df[["tend"]], out_df[[tz_var]]) * 1L } else { out_df[[i]] <- ll_i(out_df[["tend"]], out_df[["tend"]] - out_df[[tz_var]]) * 1L } } out_df } # All variables that represent follow-up time should have the same values # adjust_time_vars <- function(out_df, data, dot_names) { # time_vars <- c("tend", # grep(attr(data, "time_var"), unlist(attr(data, "func_mat_names")), value=TRUE)) # time_vars_dots <- c(grep("tend", dot_names, value=TRUE), # grep(attr(data, "time_var"), dot_names, value=TRUE)) # if (length(time_vars_dots) == 0) { # time_vars_dots <- "tend" # } else { # if (length(time_vars_dots) > 1) { # warning(paste0("Only one of ", paste0(time_vars_dots, collapse=", "), # "must be specified. Only the first one will be used!")) # time_vars_dots <- time_vars_dots[1] # } # } # for (i in setdiff(time_vars, time_vars_dots)) { # out_df[[i]] <- out_df[[time_vars_dots]] # } # out_df # } pammtools/R/get-terms.R0000644000176200001440000000514014222504522014516 0ustar liggesusers#' Extract partial effects for specified model terms #' #' @param data A data frame containing variables used to fit the model. Only #' first row will be used. #' @param fit A fitted object of class \code{\link[mgcv]{gam}}. #' @param term The (non-linear) model term of interest. #' @param ... Further arguments passed to \code{\link{seq_range}}. #' @inheritParams seq_range #' @import dplyr #' @importFrom stats predict #' @importFrom rlang UQ #' @keywords internal get_term <- function(data, fit, term, n = 100, ...) { # values at which term contribution will be evaluated seq_term <- data %>% pull(term) %>% seq_range(n = n) # use first row as basis (values of other covariates irrelevant anyway) new_df <- data[1, ] # clean up as rest of the data not needed any longer rm(data) gc() term_name <- term # extract term contribution information (+ standard errors) new_df <- new_df[rep(1, length(seq_term)), ] new_df[[term_name]] <- seq_term term_info <- predict(fit, newdata = new_df, type = "terms", se.fit = TRUE) index_term <- grep(term, colnames(term_info$fit), value = TRUE) new_df %>% mutate( term = term_name, eff = as.numeric(term_info$fit[, index_term]), se = as.numeric(term_info$se.fit[, index_term])) %>% mutate( ci_lower = .data$eff - 2 * .data$se, ci_upper = .data$eff + 2 * .data$se) %>% select(one_of(c("term", term_name, "eff", "se", "ci_lower", "ci_upper"))) %>% rename(x = UQ(term_name)) %>% as_tibble() } #' Extract the partial effects of non-linear model terms #' #' This function basically creates a new \code{df} from \code{data} for #' each term in \code{terms}, creating a range from minimum and maximum of the # respective terms. For each \code{df} it then calls #' \code{predict(fit, newdata=df, type="terms")}. Terms are then #' stacked to a tidy data frame. #' #' @inheritParams get_term #' @param terms A character vector (can be length one). Specifies the terms #' for which partial effects will be returned #' @import checkmate #' @importFrom purrr map_dfr #' @return A tibble with 5 columns. #' @examples #' library(survival) #' fit <- coxph(Surv(time, status) ~ pspline(karno) + pspline(age), data=veteran) #' terms_df <- veteran %>% get_terms(fit, terms = c("karno", "age")) #' head(terms_df) #' tail(terms_df) #' @export get_terms <- function(data, fit, terms, ...) { # check inputs assert_class(data, "data.frame") assert_character(terms, min.len = 1, unique = TRUE) # apply get_term to each element of terms map_dfr(terms, function(x) get_term(data = data, fit = fit, term = x), ...) } pammtools/R/lag-lead-utils.R0000644000176200001440000000712214452536154015430 0ustar liggesusers#' Construct or extract data that represents a lag-lead window #' #' Constructs lag-lead window data set from raw inputs or from data objects #' with suitable information stored in attributes, e.g., objects created #' by \code{\link{as_ped}}. #' #' @param x Either a numeric vector of follow-up cut points or a suitable object. #' @param ... Further arguments passed to methods. #' @examples #' get_laglead(0:10, tz=-5:5, ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) #' gg_laglead(0:10, tz=-5:5, ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) #' @export get_laglead <- function(x, ...) { UseMethod("get_laglead", x) } #' @rdname get_laglead #' @param tz A vector of exposure times #' @param ll_fun Function that specifies how the lag-lead matrix #' should be constructed. First argument is the follow up time #' second argument is the time of exposure. #' @importFrom dplyr mutate #' @importFrom tidyr crossing #' @export get_laglead.default <- function(x, tz, ll_fun, ...) { LL_df <- crossing(t = x, tz = tz) %>% mutate(LL = ll_fun(.data$t, .data$tz) * 1L) %>% group_by(tz) %>% mutate(LL = lag(.data$LL, default = 0)) %>% ungroup() class(LL_df) <- c("LL_df", class(LL_df)) LL_df } #' @rdname get_laglead #' @importFrom purrr map2_dfr #' @export get_laglead.data.frame <- function(x, ...) { time <- attr(x, "breaks") tz <- attr(x, "tz") ll_funs <- attr(x, "ll_funs") LL_df <- map2_dfr(tz, ll_funs, ~get_laglead.default(time, .x, ll_fun = .y), .id = "tz_var") if (!inherits(LL_df, "LL_df")) { class(LL_df) <- c("LL_df", class(LL_df)) } LL_df } #' Plot Lag-Lead windows #' #' Given data defining a Lag-lead window, returns respective plot as a #' \code{ggplot2} object. #' #' @inheritParams get_laglead #' @param high_col Color used to highlight exposure times within the lag-lead window. #' @param low_col Color of exposure times outside the lag-lead window. #' @param grid_col Color of grid lines. #' @import checkmate ggplot2 #' @examples #' ## Example 1: supply t, tz, ll_fun directly #' gg_laglead(1:10, tz=-5:5, #' ll_fun=function(t, tz) { t >= tz + 2 & t <= tz + 2 + 3}) #' #' ## Example 2: extract information on t, tz, ll_from data with respective attributes #' data("simdf_elra", package = "pammtools") #' gg_laglead(simdf_elra) #' @export #' @seealso get_laglead gg_laglead <- function(x, ...) { UseMethod("gg_laglead", x) } #' @rdname gg_laglead #' @export gg_laglead.default <- function(x, tz, ll_fun, ...) { LL_df <- get_laglead(x, tz, ll_fun = ll_fun) gg_laglead(LL_df, ...) } #' @rdname gg_laglead #' @export gg_laglead.LL_df <- function( x, high_col = "grey20", low_col = "whitesmoke", grid_col = "lightgrey", ...) { x <- left_join(x, int_info(unique(x$t)), by = c("t" = "tend")) x <- x %>% filter(!is.na(.data$interval)) %>% mutate( tz = factor(.data$tz, levels = sort(unique(.data$tz), decreasing = FALSE)), interval = factor(.data$interval, levels = levels(.data$interval)) ) gg_ll <- ggplot(x, aes(x = .data[["interval"]], y = .data[["tz"]])) + geom_tile(aes(fill = .data[["LL"]]), colour = grid_col) + scale_fill_gradient(low = low_col, high = high_col) + scale_x_discrete(expand = c(0, 0)) + scale_y_discrete(expand = c(0, 0)) + ylab(expression(t[z])) + xlab(expression(t)) if (!is.null(x[["tz_var"]])) { gg_ll <- gg_ll + facet_wrap(~tz_var, scales = "free_y") } gg_ll + theme(legend.position = "none") } #' @rdname gg_laglead #' @inherit gg_laglead #' @export gg_laglead.nested_fdf <- function(x, ...) { LL_df <- get_laglead(x) gg_laglead(LL_df, ...) } pammtools/R/cumulative-effect.R0000644000176200001440000000554714222504522016232 0ustar liggesusers#' Calculate (or plot) cumulative effect for all time-points of the follow-up #' #' @inheritParams gg_partial #' @param z1 The exposure profile for which to calculate the cumulative effect. #' Can be either a single number or a vector of same length as unique observation #' time points. #' @param z2 If provided, calculated cumulative effect is for the difference #' between the two exposure profiles (g(z1,t)-g(z2,t)). #' @param se_mult Multiplicative factor used to calculate confidence intervals #' (e.g., lower = fit - 2*se). #' @export get_cumu_eff <- function(data, model, term, z1, z2 = NULL, se_mult = 2) { assert_class(data, "fped") ped <- make_ped_dat(data, term, z1) coefs <- coef(model) col_ind <- grep(term, names(coefs)) coefs <- coefs[col_ind] Vp <- model$Vp[col_ind, col_ind] X <- predict(model, ped, type = "lpmatrix")[, col_ind] if (!is.null(z2)) { X2 <- predict(model, make_ped_dat(data, term, z2), type = "lpmatrix")[, col_ind] X <- X - X2 } ped$cumu_eff <- drop(X %*% coefs) ped$se_cumu_eff <- drop(sqrt(rowSums( (X %*% Vp) * X) )) ped$cumu_eff_lower <- ped$cumu_eff - se_mult * ped$se_cumu_eff ped$cumu_eff_upper <- ped$cumu_eff + se_mult * ped$se_cumu_eff ped } #' @keywords internal make_ped_dat <- function(x, term, z_vec) { nfunc <- length(attr(x, "ll_funs")) ind_term <- get_term_ind(x, term) nz <- length(attr(x, "tz")[[ind_term]]) tz_var <- attr(x, "tz_vars")[[ind_term]] tz <- attr(x, "tz")[[ind_term]] func <- attr(x, "func")[[ind_term]] ll_fun <- attr(x, "ll_funs")[[ind_term]] func_mat_names <- attr(x, "func_mat_names")[[ind_term]] LL_name <- grep("LL", func_mat_names, value = TRUE) tz_var_mat <- make_mat_names(tz_var, func$latency_var, func$tz_var, func$suffix, nfunc) q_weights <- attr(x, "ll_weights")[[ind_term]] stopifnot(length(z_vec) == nz | length(z_vec) == 1) z_vec <- if (length(z_vec) == 1) { rep(z_vec, nz) } else { z_vec } ped_df <- make_newdata(x, tend = unique(.data$tend)) ped_df[[LL_name]] <- outer(ped_df$tend, tz, FUN = ll_fun) * 1L * matrix(q_weights$ll_weight, nrow = nrow(ped_df), ncol = nz, byrow = TRUE) if (func$latency_var != "") { ped_df[[tz_var_mat]] <- outer(ped_df$tend, tz, FUN = "-") ped_df[[tz_var_mat]] * (ped_df[[LL_name]] != 0) } else { ped_df[[tz_var]] <- matrix(tz, nrow = nrow(ped_df), ncol = nz, byrow = TRUE) ped_df[[tz_var]] <- ped_df[[tz_var]] * (ped_df[[LL_name]] != 0) } ped_df[[term]] <- matrix(z_vec, nrow = nrow(ped_df), ncol = nz, byrow = TRUE) t_mat_var <- grep(attr(x, "time_var"), func_mat_names, value = TRUE) if (length(t_mat_var) != 0) { ped_df[[t_mat_var]] <- matrix(unique(x[[t_mat_var]][, 1]), nrow = nrow(ped_df), ncol = nz) } ped_df } get_term_ind <- function(x, term) { which(map_lgl(attr(x, "func_mat_names"), ~any(grepl(term, .x)))) } pammtools/R/pammtools.R0000644000176200001440000000566114222504522014632 0ustar liggesusers#' pammtools: Piece-wise exponential Additive Mixed Modeling tools. #' #' \code{pammtools} provides functions and utilities that facilitate fitting #' Piece-wise Exponential Additive Mixed Models (PAMMs), including data #' transformation and other convenience functions for pre- and post-processing #' as well as plotting. #' #' The best way to get an overview of the functionality provided and how to #' fit PAMMs is to view the vignettes #' available at \url{https://adibender.github.io/pammtools/articles/}. #' A summary of the vignettes' content is given below: #' #' \itemize{ #' \item \href{https://adibender.github.io/pammtools/articles/basics.html}{basics}: #' Introduction to PAMMs and basic modeling. #' \item \href{https://adibender.github.io/pammtools/articles/baseline.html}{baseline}: #' Shows how to estimate and visualize baseline model (without covariates) and #' comparison to respective Cox-PH model. #' \item \href{https://adibender.github.io/pammtools/articles/convenience.html}{convenience}: #' Convenience functions for post-processing and plotting PAMMs. #' \item \href{https://adibender.github.io/pammtools/articles/data-transformation.html}{data-transformation}: #' Transforming data into a format suitable to fit PAMMs. #' \item \href{https://adibender.github.io/pammtools/articles/frailty.html}{frailty}: #' Specifying "frailty" terms, i.e., random effects for PAMMs. #' \item \href{https://adibender.github.io/pammtools/articles/splines.html}{splines}: #' Specifying spline smooth terms for PAMMs. #' \item \href{https://adibender.github.io/pammtools/articles/strata.html}{strata}: #' Specifying stratified models in which each level of a grouping variable has a #' different baseline hazard. #' \item \href{https://adibender.github.io/pammtools/articles/tdcovar.html}{tdcovar}: #' Dealing with time-dependent covariates. #' \item \href{https://adibender.github.io/pammtools/articles/tveffects.html}{tveffects}: #' Specifying time-varying effects. #' \item \href{https://adibender.github.io/pammtools/articles/left-truncation.html}{left-truncation}: #' Estimation for left-truncated data. #'\item \href{https://adibender.github.io/pammtools/articles/competing-risks.html}{competing-risks}: #' Competing risks analysis. #' } #' #' @name pammtools #' @docType package #' @references #' Bender, Andreas, Andreas Groll, and Fabian Scheipl. 2018. #' “A Generalized Additive Model Approach to Time-to-Event Analysis” #' Statistical Modelling, February. https://doi.org/10.1177/1471082X17748083. #' #' Bender, Andreas, Fabian Scheipl, Wolfgang Hartl, Andrew G. Day, and Helmut Küchenhoff. 2019. #' “Penalized Estimation of Complex, Non-Linear Exposure-Lag-Response Associations.” #' Biostatistics 20 (2): 315–31. https://doi.org/10.1093/biostatistics/kxy003. #' #' Bender, Andreas, and Fabian Scheipl. 2018. #' “pammtools: Piece-Wise Exponential Additive Mixed Modeling Tools.” #' ArXiv:1806.01042 [Stat], June. https://arxiv.org/abs/1806.01042. NULL NULL pammtools/R/tidiers.R0000644000176200001440000001272514452536154014274 0ustar liggesusers## Functions to extract effect information from fitted models #' Calculate confidence intervals #' #' Given 2 column matrix or data frame, returns 3 column data.frame #' with coefficient estimate plus lower and upper borders of the #' 95% confidence intervals. #' #' @param ftab A table with two columns, containing coefficients in the first #' column and standard-errors in the second column. #' @importFrom tibble as_tibble #' @keywords internal calc_ci <- function(ftab) { colnames(ftab) <- c("coef", "se") rnames <- rownames(ftab) ftab <- as_tibble(ftab) ftab$variable <- rnames ftab$ci_lower <- ftab$coef - 2 * ftab$se ftab$ci_upper <- ftab$coef + 2 * ftab$se ftab$se <- NULL ftab[, c("variable", "coef", "ci_lower", "ci_upper")] } #' Extract fixed coefficient table from model object #' #' Given a model object, returns a data frame with columns \code{variable}, #' \code{coef} (coefficient), \code{ci_lower} (lower 95\% CI) and #' \code{ci_upper} (upper 95\% CI). #' #' @param x A model object. #' @param ... Currently not used. #' @export tidy_fixed <- function(x, ...) { UseMethod("tidy_fixed", x) } #' @rdname tidy_fixed #' @param intercept Should intercept also be returned? Defaults to \code{FALSE}. #' @export tidy_fixed.gam <- function(x, intercept=FALSE, ...) { ftab <- summary(x)[["p.table"]][, 1:2] if (!intercept) { ftab <- ftab[!grepl("Intercept", rownames(ftab)), , drop = FALSE] } calc_ci(ftab) } #' @rdname tidy_fixed #' @importFrom tibble as_tibble #' @keywords internal #' @examples #' library(survival) #' gc <- coxph(Surv(days, status)~age + sex, data = tumor) #' tidy_fixed(gc) #' @export tidy_fixed.coxph <- function(x, ...) { ftab <- summary(x)[["coefficients"]][, c(1, 3)] calc_ci(ftab) } #' Extract 1d smooth objects in tidy data format. #' #' @rdname tidiers #' @inheritParams get_plotinfo #' @param keep A vector of variables to keep. #' @param ci A logical value indicating whether confidence intervals should be #' calculated and returned. Defaults to \code{TRUE}. #' @importFrom dplyr bind_rows #' @export tidy_smooth <- function( x, keep = c("x", "fit", "se", "xlab", "ylab"), ci = TRUE, ...) { po <- get_plotinfo(x, ...) # index of list elements that are 1d smooths and not random effects ind1d <- vapply( X = po, FUN = function(z) !is.null(z[["x"]]) & is.null(z[["main"]]), FUN.VALUE = logical(1)) # keep only variables of interest po <- lapply(po[ind1d], "[", i = keep, drop = TRUE) # transform to data.frame po <- lapply(po, function(z) { z[["fit"]] <- as.vector(z[["fit"]]) temp <- as_tibble(z) if (ci) { temp <- temp %>% mutate( ci_lower = .data$fit - .data$se, ci_upper = .data$fit + .data$se) } temp }) return(bind_rows(po)) } #' Extract 2d smooth objects in tidy format. #' #' @inheritParams tidy_smooth #' @importFrom purrr cross_df #' @importFrom tibble as_tibble #' @import dplyr #' @export tidy_smooth2d <- function( x, keep = c("x", "y", "fit", "se", "xlab", "ylab", "main"), ci = FALSE, ...) { po <- get_plotinfo(x, ...) ind2d <- vapply( X = po, FUN = function(z) !is.null(z[["x"]]) & !is.null(z[["y"]]), FUN.VALUE = logical(1)) # keep only variables of interest po <- lapply(po[ind2d], "[", i = keep, drop = TRUE) # transform to data.frame po <- lapply(po, function(z) { z[["fit"]] <- as.vector(z[["fit"]]) p1 <- as_tibble(z[setdiff(keep, c("x", "y"))]) xy <- cross_df(z[c("x", "y")]) xy <- bind_cols(xy, p1) if (ci) { xy <- xy %>% mutate( ci_lower = .data$fit - .data$se, ci_upper = .data$fit + .data$se) } xy }) return(bind_rows(po)) } #' Extract random effects in tidy data format. #' #' @inheritParams tidy_smooth #' @importFrom dplyr bind_rows #' @importFrom stats ppoints qnorm quantile #' @rdname tidy_smooth #' @seealso \code{\link[stats]{qqline}} #' @export tidy_re <- function(x, keep=c("fit", "main", "xlab", "ylab"), ...) { po <- get_plotinfo(x, ...) ind.re <- vapply( X = po, FUN = function(z) { (!is.null(z[["main"]])) & (z[["xlab"]] == "Gaussian quantiles") }, FUN.VALUE = logical(1)) po <- lapply(po[ind.re], "[", i = keep, drop = TRUE) po <- lapply(po, function(z) { re.df <- do.call(cbind.data.frame, c(z, stringsAsFactors = FALSE)) re.df$x <- qnorm(ppoints(length(re.df$fit))[order(order(re.df$fit))]) # code to calculate qqslope and qqintercept from ?stats::qqline yl <- quantile(re.df$fit, probs = c(0.25, 0.75), type = 7, names = FALSE) xl <- qnorm(c(0.25, 0.75)) re.df$qqslope <- diff(yl) / diff(xl) re.df$qqintercept <- yl[1L] - re.df$qqslope * xl[1L] re.df }) return(bind_rows(po)) } #' Extract plot information for all special model terms #' #' Given a \code{mgcv} \code{\link[mgcv]{gamObject}}, returns the information #' used for the default plots produced by \code{\link[mgcv]{plot.gam}}. #' #' @inheritParams mgcv::plot.gam #' @param ... Further arguments passed to \code{\link[mgcv]{plot.gam}} #' @import mgcv #' @importFrom checkmate assert_class #' @importFrom grDevices png dev.off #' @importFrom graphics plot #' @export get_plotinfo <- function(x, ...) { assert_class(x, c("gam", "glm", "lm")) tmp <- paste0(tempfile(), ".png") png(tmp) po <- plot(x, page = 1, ...) dev.off() if (file.exists(tmp)) file.remove(tmp) class(po) <- c("mgcv.plotlist", class(po)) return(po) } pammtools/R/split-data.R0000644000176200001440000002200714452536066014667 0ustar liggesusers#' Function to transform data without time-dependent covariates into piece-wise #' exponential data format #' #' @inheritParams as_ped #' @param multiple_id Are occurences of same id allowed (per transition). #' Defaults to \code{FALSE}, but is sometimes set to \code{TRUE}, e.g., in case of #' multi-state models with back transitions. #' @import survival checkmate dplyr #' @importFrom stats as.formula update #' @importFrom purrr set_names #' @seealso \code{\link[survival]{survSplit}} #' @export #' @keywords internal split_data <- function( formula, data, cut = NULL, max_time = NULL, multiple_id = FALSE, ...) { dots_in <- list(...) dots_in$formula <- formula ## assert that inputs have correct formats assert_class(formula, "formula") assert_data_frame(data, min.rows = 1, min.cols = 2) assert_numeric(cut, lower = 0, finite = TRUE, any.missing = FALSE, min.len = 1, null.ok = TRUE) assert_number(max_time, lower = 0, finite = TRUE, null.ok = TRUE) ## extract names for event time and status variables surv_vars <- all.vars(update(formula, .~0)) vars <- if ("." %in% all.vars(formula)) { names(data) } else { all.vars(formula) } uvars <- union(surv_vars, vars) if (!all(uvars %in% vars)) { stop(paste("Variables provided in formula not in data set:", paste0(setdiff(uvars, vars), collapse = ", "))) } ## standardize event time and status names proposed_names <- c("ped_start", "ped_time", "ped_status") ind <- ifelse(length(surv_vars) == 2, 2, 1):3 proposed_names <- proposed_names[ind] if (any(proposed_names %in% names(data))) { stop(paste0("Error in attempt to rename provided time/status variables: Variables ", intersect(proposed_names, names(data)), " allready in data set.")) } data <- rename( data, !!!set_names( surv_vars, as.list(proposed_names))) formula_cut <- update_formula(formula, proposed_names) # obtain interval breaks points cut <- get_cut(data, formula_cut, cut = cut, max_time = max_time) cut <- sort(unique(cut)) ## crate argument list to be passed to survSplit dots <- list(...) dots$data <- data dots$formula <- update_formula(formula, proposed_names) dots$cut <- dots_in$cut <- cut rm(data) # if id allready in the data set, remove id variable from dots but keep # id variable for later rearrangment if (!is.null(dots$id)) { id_var <- dots$id } else { id_var <- "id" dots$id <- id_var } if (id_var %in% names(dots$data)) { if (length(unique(dots$data[[id_var]])) != nrow(dots$data) & !multiple_id) { stop(paste0("Specified ID variable (", id_var, ") must have same number of unique values as number of rows in 'data'.")) } if (id_var %in% vars) { dots$id <- NULL } else { dots$id <- NULL dots$formula <- update(dots$formula, paste0("~ . + ", id_var)) } } # create data in ped format split_df <- do.call(survSplit, args = dots) if("ped_start" %in% colnames(split_df)) { split_df <- rename(split_df, !!!set_names("ped_start", "tstart")) } # Add variables for piece-wise exponential (additive) model split_df <- split_df %>% mutate( ped_status = ifelse(.data$ped_status == 1 & .data$ped_time > max(cut), 0L, .data$ped_status), ped_time = pmin(.data$ped_time, max(cut)), offset = log(.data$ped_time - .data$tstart)) %>% filter(!(.data$tstart == .data$ped_time)) ## combine data with general interval info if(length(surv_vars) == 3) { info_cut <- split_df %>% select(one_of(c("tstart", "ped_time"))) %>% unique() } else { info_cut <- cut } int_info <- int_info(info_cut) split_df <- left_join(split_df, int_info, by = c("tstart" = "tstart")) ## rearrange columns move <- c(id_var, "tstart", "tend", "interval", "intmid", "intlen", "offset", "ped_time", "ped_status") split_df <- split_df %>% select(one_of(move), everything(), -one_of(c("intmid", "intlen", "ped_time"))) ## set class and and attributes class(split_df) <- c("ped", class(split_df)) attr(split_df, "breaks") <- sort(unique(cut)) attr(split_df, "id_var") <- dots_in$id <- id_var attr(split_df, "intvars") <- c(id_var, "tstart", "tend", "interval", "offset", "ped_status") attr(split_df, "trafo_args") <- dots_in split_df } #' Split data to obtain recurrent event data in PED format #' #' Currently, the input data must be in start-stop notation for each spell and #' contain a colum that indicates the spell (event number). #' @inherit split_data #' @inheritParams get_cut #' @param transition A character indicating the column in data that indicates the #' event/episode number for recurrent events. #' @param event The value that encodes the occurrence of an event in the data set. #' @param timescale Defines the timescale for the recurrent event data transformation. #' Defaults to \code{"gaptime"}. #' @param min_events Minimum number of events for each event number. #' @keywords internal split_data_multistate <- function( formula, data, transition = character(), cut = NULL, max_time = NULL, event = 1L, min_events = 1L, timescale = c("gap", "calendar"), ...) { assert_character(transition, min.chars = 1L, min.len = 1L, any.missing = FALSE, len = 1L) assert_integer(min_events, lower = 1L, len = 1L) assert_character(timescale) timescale <- match.arg(timescale) dots_in <- list(...) dots_in$formula <- formula ## assert that inputs have correct formats assert_class(formula, "formula") assert_data_frame(data, min.rows = 1, min.cols = 2) assert_numeric(cut, lower = 0, finite = TRUE, any.missing = FALSE, min.len = 1, null.ok = TRUE) assert_number(max_time, lower = 0, finite = TRUE, null.ok = TRUE) ## extract names for event time and status variables surv_vars <- all.vars(update(formula, .~0)) vars <- if ("." %in% all.vars(formula)) { names(data) } else { all.vars(formula) } uvars <- union(surv_vars, vars) if (!all(uvars %in% vars)) { stop(paste("Variables provided in formula not in data set:", paste0(setdiff(uvars, vars), collapse = ", "))) } ## obtain interval breaks points for each spell if(timescale == "gap") { data <- mutate(data, !!!list(.time = quo(!!as.name(surv_vars[2]) - !!as.name(surv_vars[1])))) formula <- update(formula, Surv(.time, status)~.) formula <- update_formula(formula, proposed_names = c(".time", surv_vars[3])) } # split data for each spell data_list <- split(data, data[[transition]]) # rm(data) # only keep spells with minimum number of events data_list <- data_list[map_dbl(data_list, ~sum(.x[[surv_vars[3]]])) >= min_events] cuts <- get_cut(data_list, formula, cut = cut, max_time = max_time, event = event, timescale = timescale) ## create argument list to be passed to split_data dots <- list(...) dots$multiple_id <- TRUE # possible in case of multi-state models with back transitions # if id allready in the data set, remove id variable from dots but keep # id variable for later rearrangment if (!is.null(dots$id)) { id_var <- dots$id } else { id_var <- "id" dots$id <- id_var } split_df_list <- map2( .x = data_list, .y = ifelse(is.list(cuts), cuts, list(cuts)), .f = ~ { dots$data <- .x dots$formula <- formula dots$cut <- .y split_df <- do.call(split_data, dots) } ) split_df <- bind_rows(split_df_list) split_df <- split_df %>% arrange(.data[[transition]], .data[[dots$id]], .data[["tstart"]]) # remove all obs beyond last observed event time if (is.null(max_time)) { max_time <- max(split_df[["tend"]][split_df[["ped_status"]] == 1]) split_df <- split_df %>% filter(.data[["tend"]] <= max_time) } # if (timescale == "calendar") { # split_check <- split_df %>% # group_by(.data[[dots$id]]) %>% # summarize(dups = sum(duplicated(.data[["tstart"]]))) # if (any(split_check[["dups"]]) != 0) { # stop("Something went wrong during data transformation. \n Please post an issue at 'https://github.com/adibender/pammtools/issues' with your code and data") # } # } ## set class and and attributes class(split_df) <- c("ped", class(split_df)) attr(split_df, "breaks") <- cuts attr(split_df, "id_var") <- dots_in$id <- id_var attr(split_df, "intvars") <- c(id_var, "tstart", "tend", "interval", "offset", "ped_status") dots_in$transition <- transition dots_in$timescale <- timescale dots_in$cut <- sort(unique(cuts)) dots_in$max_time <- max_time dots_in$event <- event dots_in$min_events <- min_events attr(split_df, "trafo_args") <- dots_in class(split_df) <- unique(class(split_df)) split_df } pammtools/R/formula-utils.R0000644000176200001440000000510314222504522015411 0ustar liggesusers#' Extract variables from the left-hand-side of a formula #' #' @rdname formula_helpers #' @param formula A \code{\link{formula}} object. #' @import Formula #' @keywords internal get_lhs_vars <- function(formula) { if (is.character(formula) ) formula <- as.formula(formula) formula(Formula(formula), lhs = TRUE, rhs = FALSE) %>% all.vars() } #' Extract variables from the right-hand side of a formula #' #' @rdname formula_helpers #' @keywords internal get_rhs_vars <- function(formula) { if (is.character(formula) ) formula <- as.formula(formula) formula(Formula(formula), lhs = FALSE, rhs = TRUE) %>% all.vars() } #' @inherit get_lhs_vars #' @keywords internal get_tdc_vars <- function( formula, specials = "cumulative", data = NULL) { f_specials <- get_tdc_form(formula, data = data, tdc_specials = specials) terms_f <- terms(f_specials, specials = specials) all.vars(terms_f) } #' @inherit get_lhs_vars #' @keywords internal get_tdc_form <- function( formula, data = NULL, tdc_specials = c("concurrent", "cumulative"), invert = FALSE) { terms <- terms(formula, data = data, specials = tdc_specials) labels <- attr(terms, "term.labels") ind_tdc <- map(tdc_specials, ~grep(.x, labels)) %>% unlist() if(invert) { if(length(ind_tdc) > 0) { formula(terms[ind_tdc * -1]) } else { formula } } else { formula(terms[ind_tdc]) } } #' @inherit get_lhs_vars #' @keywords internal get_ped_form <- function( formula, data = NULL, tdc_specials = c("concurrent", "cumulative")) { get_tdc_form(formula, data = data, tdc_specials = tdc_specials, invert = TRUE) } #' @keywords internal has_tdc_form <- function( formula, tdc_specials = c("concurrent", "cumulative")) { form_chr <- as.character(formula) %>% paste0(collapse = "") any(map_lgl(tdc_specials, ~grepl(.x, form_chr))) } has_lhs <- function(formula) { length(Formula(formula))[1] > 0 } update_formula <- function(formula, proposed_names) { lhs_vars <- get_lhs_vars(formula) stopifnot(length(lhs_vars) == length(proposed_names)) rhs_form <- formula(Formula(formula), rhs = 1, lhs = 0) lhs_vars <- proposed_names lhs_form <- paste0("Surv(", paste0(lhs_vars, collapse=", "), ")") as.formula(paste0(lhs_form, "~", as.character(rhs_form))[2]) } add_to_rhs <- function(formula, rhs_additions = NULL) { lhs_vars <- get_lhs_vars(formula) rhs_vars <- c(get_rhs_vars(formula), rhs_additions) as.formula( paste0( "Surv(", paste0(lhs_vars, collapse=","), ") ~ ", paste0(rhs_vars, collapse = "+") ) ) } pammtools/R/convenience-plots.R0000644000176200001440000001011014452536154016246 0ustar liggesusers#' Plot smooth 1d terms of gam objects #' #' Given a gam model this convenience function returns a plot of all #' smooth terms contained in the model. If more than one smooth is present, the #' different smooth are faceted. #' #' @param x A data frame or object of class \code{ped}. #' @param ... Further arguments passed to \code{\link{get_terms}} #' @import ggplot2 #' @return A \code{\link[ggplot2]{ggplot}} object. #' @examples #' g1 <- mgcv::gam(Sepal.Length ~ s(Sepal.Width) + s(Petal.Length), data=iris) #' gg_smooth(iris, g1, terms=c("Sepal.Width", "Petal.Length")) #' @export #' @seealso get_terms gg_smooth <- function(x, ...) { UseMethod("gg_smooth", x) } #' @rdname gg_smooth #' @param fit A model object. #' @export gg_smooth.default <- function(x, fit, ...) { sobj <- get_terms(data = x, fit = fit, ...) ggsmooth <- ggplot(sobj, aes(x = .data[["x"]], y = .data[["eff"]], group = .data[["term"]])) + geom_hline(yintercept = 0, lty = 3) + geom_line() + geom_ribbon(aes(ymin = .data[["ci_lower"]], ymax = .data[["ci_upper"]]), alpha = 0.2) + facet_wrap(~term, scales = "free_x") + ylab(expression(f[p](x[p]))) + xlab(expression(x[p])) return(ggsmooth) } #' Plot tensor product effects #' #' Given a gam model this convenience function returns a \code{ggplot2} object #' depicting 2d smooth terms specified in the model as heat/contour plots. If #' more than one 2d smooth term is present individual terms are faceted. #' @inheritParams tidy_smooth2d #' @importFrom tidyr gather #' @importFrom dplyr mutate #' @examples #' g <- mgcv::gam(Sepal.Length ~ te(Sepal.Width, Petal.Length), data=iris) #' gg_tensor(g) #' gg_tensor(g, ci=TRUE) #' gg_tensor(update(g, .~. + te(Petal.Width, Petal.Length))) #' @seealso \code{\link{tidy_smooth2d}} #' @export gg_tensor <- function(x, ci = FALSE, ...) { df2d <- tidy_smooth2d(x, ci = ci, se = ci, ...) if (ci) { df2d <- df2d %>% gather("type", "fit", .data$fit, .data$ci_lower, .data$ci_upper) %>% mutate(type = factor(.data$type, levels = c("ci_lower", "fit", "ci_upper"))) } gg2d <- ggplot(df2d, aes(x = .data[["x"]], y = .data[["y"]], z = .data[["fit"]])) + geom_raster(aes(fill = .data[["fit"]])) + scale_fill_gradient2( name = expression(f(list(x, y))), low = "steelblue", high = "firebrick2") + scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) + geom_contour(col = "grey30") if (ci) { gg2d + facet_grid(main ~ type, scales = "free") } else { gg2d + facet_wrap(~main, scales = "free") } } #' Plot Normal QQ plots for random effects #' #' @inherit tidy_re #' @import ggplot2 #' @examples #' library(pammtools) #' data("patient") #' ped <- patient %>% #' dplyr::slice(1:100) %>% #' as_ped(Surv(Survdays, PatientDied)~ ApacheIIScore + CombinedicuID, id="CombinedID") #' pam <- mgcv::gam(ped_status ~ s(tend) + ApacheIIScore + s(CombinedicuID, bs="re"), #' data=ped, family=poisson(), offset=offset) #' gg_re(pam) #' plot(pam, select = 2) #' @seealso \code{\link{tidy_re}} #' @export gg_re <- function(x, ...) { re <- tidy_re(x, ...) ggplot(re, aes(sample = .data[["fit"]])) + geom_abline(aes(intercept = .data[["qqintercept"]], slope = .data[["qqslope"]])) + geom_qq(distribution = stats::qnorm) + facet_wrap(~main) + theme_set(theme_bw()) } #' Forrest plot of fixed coefficients #' #' @inherit tidy_fixed #' @param intercept Logical, indicating whether intercept term should be included. #' Defaults to \code{FALSE}. #' @import ggplot2 #' @seealso \code{\link{tidy_fixed}} #' @examples #' g <- mgcv::gam(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + Species, #' data=iris) #' gg_fixed(g, intercept=TRUE) #' gg_fixed(g) #' @export gg_fixed <- function(x, intercept=FALSE, ...) { fixed_df <- tidy_fixed(x, intercept = intercept, ...) ggplot(fixed_df, aes(x = .data[["variable"]], y = .data[["coef"]], ymin = .data[["ci_lower"]], ymax = .data[["ci_upper"]])) + geom_hline(yintercept = 0, lty = 3) + geom_pointrange() + coord_flip() + ylab(expression(hat(beta) %+-% 1.96 %.% SE)) + xlab("") } pammtools/R/rpexp.R0000644000176200001440000000212214241673657013763 0ustar liggesusers#' Draw random numbers from piece-wise exponential distribution. #' #' This is a copy of the same function from \code{rpexp} from package #' \pkg{msm}. #' Copied here to reduce dependencies. #' #' @inheritParams msm::rpexp #' @importFrom stats rexp #' #' @keywords internal rpexp <- function (n = 1, rate = 1, t = 0) { if (length(t) != length(rate)) stop("length of t must be equal to length of rate") if (!isTRUE(all.equal(0, t[1]))) stop("first element of t should be 0") if (is.unsorted(t)) stop("t should be in increasing order") if (length(n) > 1) n <- length(n) if (n == 0) return(numeric(0)) if (length(rate) == 1) return(rexp(n, rate)) ret <- numeric(n) left <- 1:n for (i in seq_along(rate)) { re <- rexp(length(left), rate[i]) r <- t[i] + re success <- if (i == length(rate)) seq_along(left) else which(r < t[i + 1]) ret[left[success]] <- r[success] left <- setdiff(left, left[success]) if (length(left) == 0) break } ret } pammtools/R/model-evaluation.R0000644000176200001440000000117614222504522016061 0ustar liggesusers#' Transform crps object to data.frame #' #' A\code{as.data.frame} S3 method for objects of class \code{\link[pec]{crps}}. #' #' @inheritParams base::as.data.frame #' @param x An object of class \code{crps}. See \code{\link[pec]{crps}}. #' @importFrom tidyr pivot_longer #' #' @export as.data.frame.crps <- function(x, row.names = NULL, optional = FALSE, ...) { m <- matrix(x, nrow = dim(x)[1], ncol = dim(x)[2]) colnames(m) <- attr(x, "dimnames")[[2]] m <- as.data.frame(m) m$method <- attr(x, "dimnames")[[1]] m <- m %>% pivot_longer(cols = -.data$method, values_to = "IBS") %>% dplyr::rename(time = .data$name) } pammtools/R/tdc-utils.R0000644000176200001440000000122314222504522014515 0ustar liggesusers#' Checks if data contains timd-dependent covariates #' #' @param data A data frame (potentially) containing time-dependent covariates. #' @param id_var A character indicating the grouping variable. For each covariate #' it will be checked if their values change within a group specified by #' \code{id_var}. #' @import dplyr #' @return Logical. \code{TRUE} if data contains time-dependent covariates, else \code{FALSE}. #' @keywords internal has_tdc <- function(data, id_var) { data %>% group_by(!!sym(id_var)) %>% summarize_all(.funs = ~any(length(unique(.)) > 1)) %>% select(-one_of(id_var)) %>% summarize_all(any) %>% unlist() %>% any() } pammtools/R/as-ped.R0000644000176200001440000002624114452536154014000 0ustar liggesusers#' Transform data to Piece-wise Exponential Data (PED) #' #' This is the general data transformation function provided by the #' \code{pammtools} package. Two main applications must be distinguished: #' \enumerate{ #' \item Transformation of standard time-to-event data. #' \item Transformation of left-truncated time-to-event data. #' \item Transformation of time-to-event data with time-dependent covariates (TDC). #' } #' For the latter, the type of effect one wants to estimate is also #' important for the data transformation step. #' In any case, the data transformation is specified by a two sided formula. #' In case of TDCs, the right-hand-side of the formula can contain formula specials #' \code{concurrent} and \code{cumulative}. #' See the \href{https://adibender.github.io/pammtools//articles/data-transformation.html}{data-transformation} #' vignette for details. #' #' #' @rdname as_ped #' @param data Either an object inheriting from data frame or in case of #' time-dependent covariates a list of data frames (of length 2), where the first data frame #' contains the time-to-event information and static covariates while the second #' (and potentially further data frames) contain information on time-dependent #' covariates and the times at which they have been observed. #' @param formula A two sided formula with a \code{\link[survival]{Surv}} object #' on the left-hand-side and covariate specification on the right-hand-side (RHS). #' The RHS can be an extended formula, which specifies how TDCs should be transformed #' using specials \code{concurrent} and \code{cumulative}. The left hand-side can #' be in start-stop-notation. This, however, is only used to create left-truncated #' data and does not support the full functionality. #' @param cut Split points, used to partition the follow up into intervals. #' If unspecified, all unique event times will be used. #' @param max_time If \code{cut} is unspecified, this will be the last #' possible event time. All event times after \code{max_time} #' will be administratively censored at \code{max_time}. #' @param tdc_specials A character vector. Names of potential specials in #' \code{formula} for concurrent and or cumulative effects. #' @param censor_code Specifies the value of the status variable that indicates censoring. #' Often this will be \code{0}, which is the default. #' @param ... Further arguments passed to the \code{data.frame} method and #' eventually to \code{\link[survival]{survSplit}} #' @importFrom Formula Formula #' @examples #' tumor[1:3, ] #' tumor[1:3, ] %>% as_ped(Surv(days, status)~ age + sex, cut = c(0, 500, 1000)) #' tumor[1:3, ] %>% as_ped(Surv(days, status)~ age + sex) #' @return A data frame class \code{ped} in piece-wise exponential data format. #' @export as_ped <- function(data, ...) { UseMethod("as_ped", data) } #' @rdname as_ped #' @export as_ped.data.frame <- function( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, transition = character(), timescale = c("gap", "calendar"), min_events = 1L, ...) { status_error(data, formula, censor_code) assert_subset(tdc_specials, c("concurrent", "cumulative")) if (test_character(transition, min.chars = 1L, min.len = 1L)) { ped <- as_ped_multistate(data = data, formula = formula, cut = cut, max_time = max_time, tdc_specials = tdc_specials, censor_code = censor_code, transition = transition, timescale = timescale, min_events = min_events, ... ) return(ped) } event_types <- get_event_types(data, formula, censor_code) if (length(event_types) > 1) { ped <- as_ped_cr(data = data, formula = formula, cut = cut, max_time = max_time, tdc_specials = tdc_specials, censor_code = censor_code, ...) } else { dots <- list(...) dots$data <- data dots$formula <- get_ped_form(formula, data = data, tdc_specials = tdc_specials) dots$cut <- cut dots$max_time <- max_time ped <- do.call(split_data, dots) attr(ped, "time_var") <- get_lhs_vars(dots$formula)[1] attr(ped, "status_var") <- get_lhs_vars(dots$formula)[2] } ped } #' @rdname as_ped #' @export as_ped.nested_fdf <- function( data, formula, ...) { dots <- list(...) # update interval break points (if necessary) cut <- dots$cut if (is.null(cut)) { cut <- attr(data, "breaks") } ccr_breaks <- attr(data, "ccr_breaks") cut <- union(cut, ccr_breaks[ccr_breaks <= max(cut)]) %>% sort() # ped <- data %>% # select_if(is.atomic) %>% # as.data.frame() %>% # as_ped( # formula = formula, # id = dots$id, # cut = cut, # max_time = dots$max_time, # ...) dots$formula <- formula dots$data <- as.data.frame(select_if(data, is.atomic)) dots$cut <- cut ped <- do.call(as_ped, dots) # replace updated attributes attr(data, "breaks") <- attr(ped, "breaks") attr(data, "id_n") <- ped %>% group_by(!!sym(attr(data, "id_var"))) %>% summarize(id_n = n()) %>% pull("id_n") %>% as_vector() attr(data, "id_tseq") <- ped %>% group_by(!!sym(attr(data, "id_var"))) %>% transmute(id_tseq = row_number()) %>% pull("id_tseq") %>% as_vector() attr(data, "id_tz_seq") <- rep(seq_len(nrow(data)), times = attr(data, "id_n")) if (has_special(formula, "concurrent")) { ped <- ped %>% add_concurrent(data = data, id_var = dots$id) } if (has_special(formula, "cumulative")) { ped <- add_cumulative(ped, data = data, formula = formula) attr(ped, "ll_weights") <- imap(attr(ped, "tz"), ~bind_cols(!!.y := .x, ll_weight = c(mean(abs(diff(.x))), abs(diff(.x))))) class(ped) <- c("fped", class(ped)) } attr(ped, "time_var") <- get_lhs_vars(formula)[1] attr(ped, "func_mat_names") <- make_mat_names( attr(ped, "func"), attr(ped, "time_var")) ped } #' @rdname as_ped #' @export as_ped.list <- function( data, formula, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, ...) { assert_class(data, "list") assert_class(formula, "formula") status_error(data[[1]], formula, censor_code) nl <- length(data) # form <- Formula(formula) has_tdc <- has_tdc_form(formula, tdc_specials = tdc_specials) if (nl == 1 & !has_tdc) { ped <- data[[1]] %>% as_ped(formula = formula, tdc_specials = tdc_specials, ...) } else { if (nl == 2 & !has_tdc) { stop("Two data sets provided in 'data' but no specification of time-dependent covariate effects in 'formula'") } else { nested_fdf <- nest_tdc(data, formula, ...) ped <- as_ped(nested_fdf, formula, ...) } } lhs_vars <- get_lhs_vars(formula) attr(ped, "time_var") <- lhs_vars[1] attr(ped, "trafo_args")$formula <- formula ped } #' @rdname as_ped #' @param x any R object. #' @export is.ped <- function(x) inherits(x, "ped") #' @rdname as_ped #' @param newdata A new data set (\code{data.frame}) that contains the same #' variables that were used to create the PED object (code{data}). #' @export as_ped.ped <- function(data, newdata, ...) { if (is.ped(newdata)) { stop("newdata already in ped format.") } trafo_args <- attr(data, "trafo_args") trafo_args[["data"]] <- newdata do.call(as_ped, trafo_args) } #' @rdname as_ped #' @export as_ped.pamm <- function(data, newdata, ...) { if (is.ped(newdata)) { stop("newdata already in ped format.") } trafo_args <- data[["trafo_args"]] trafo_args$data <- newdata do.call(split_data, trafo_args) } ## Competing risks #' Competing risks trafo #' #' @inherit as_ped #' @importFrom rlang .env #' #' @keywords internal as_ped_cr <- function( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, combine = TRUE, ...) { lhs_vars <- get_lhs_vars(formula) n_lhs <- length(lhs_vars) event_types <- get_event_types(data, formula, censor_code) n_events <- sum(event_types != censor_code) cut <- map2( event_types, if(is.list(cut)) cut else list(cut), function(.event, .cut) { get_cut(data, formula = formula, cut = .cut, max_time = NULL, event = .event) } ) if(length(cut) > 1 & combine) { cut <- list(reduce(cut, union)) } ped <- map2( event_types, cut, function(.event, .cut) { ped_i <- data %>% mutate(!!lhs_vars[n_lhs] := 1L * (.data[[lhs_vars[n_lhs]]] == .env[[".event"]])) %>% as_ped( formula = formula, cut = .cut, max_time = max_time, tdc_specials = tdc_specials, ...) ped_i$cause <- .event ped_i }) if (combine) { ped <- do.call(rbind, ped) class(ped) <- c("ped_cr_union", "ped_cr", class(ped)) attr(ped, "intvars") <- c(attr(ped, "intvars"), "cause") attr(ped, "breaks") <- if (length(cut) ==1) unlist(cut) else cut } else { class(ped) <- c("ped_cr_list", "ped_cr", "ped", class(ped)) names(ped) <- paste0("cause = ", event_types) attributes(ped)$trafo_args$id <- attributes(ped[[1]])$trafo_args$id attributes(ped)$trafo_args$formula <- formula } attr(ped, "trafo_args")[["cut"]] <- if (length(cut) ==1) unlist(cut) else cut attr(ped, "trafo_args")[["combine"]] <- combine attr(ped, "trafo_args")[["censor_code"]] <- censor_code attr(ped, "risks") <- event_types ped } #' Exctract event types #' #' Given a formula that specifies the status variable of the outcome, this function #' extracts the different event types (except for censoring, specified by #' \code{censor_code}). #' #' @inheritParams as_ped #' #' @keywords internal get_event_types <- function(data, formula, censor_code) { lhs_vars <- get_lhs_vars(formula) status_values <- unique(data[[lhs_vars[length(lhs_vars)]]]) %>% sort() status_values[status_values != censor_code] } #' Recurrent events trafo #' #' @examples #' \dontrun{ #' data("cgd", package = "frailtyHL") #' cgd2 <- cgd %>% #' select(id, tstart, tstop, enum, status, age) %>% #' filter(enum %in% c(1:2)) #' ped_re <- as_ped_multistate( #' formula = Surv(tstart, tstop, status) ~ age + enum, #' data = cgd2, #' transition = "enum", #' timescale = "calendar") #' } #' @rdname as_ped #' @export #' @keywords internal as_ped_multistate <- function( data, formula, cut = NULL, max_time = NULL, tdc_specials = c("concurrent", "cumulative"), censor_code = 0L, transition = character(), timescale = c("gap", "calendar"), min_events = 1L, ... ) { assert_character(transition, min.chars = 1L, min.len = 1L, any.missing = FALSE, len = 1L) assert_integer(min_events, lower = 1L, len = 1L) status_error(data, formula, censor_code) assert_subset(tdc_specials, c("concurrent", "cumulative")) rhs_vars <- get_rhs_vars(formula) if (!(transition %in% rhs_vars)) { formula <- add_to_rhs(formula, transition) } dots <- list(...) dots$data <- data dots$formula <- get_ped_form(formula, data = data, tdc_specials = tdc_specials) dots$cut <- sort(unique(cut)) dots$max_time <- max_time dots$transition <- transition dots$min_events <- min_events dots$timescale <- timescale ped <- do.call(split_data_multistate, dots) attr(ped, "time_var") <- get_lhs_vars(dots$formula)[1] return(ped) } pammtools/R/nest-utils.R0000644000176200001440000001027714222504522014725 0ustar liggesusers#' Create nested data frame from data with time-dependent covariates #' #' Provides methods to nest data with time-dependent covariates (TDCs). #' A \code{formula} must be provided where the right hand side (RHS) contains #' the structure of the TDCs #' #' @param data A suitable data structure (e.g. unnested data frame with #' concurrent TDCs or a list where each element is a data frame, potentially #' containing TDCs as specified in the RHS of \code{formula}). #' Only TDCs present in \code{formula} will be returned. #' @param formula A two sided formula with a two part RHS, where the second #' part indicates the structure of the TDC structure. #' @param ... Further arguments passed to methods. #' @import checkmate dplyr #' @importFrom tidyr nest #' @importFrom purrr map map_int reduce #' @export #' @keywords internal nest_tdc <- function(data, formula, ...) { UseMethod("nest_tdc", data) } #' @rdname nest_tdc #' @param vars A character vector of TDCs that will be nested. #' @param id A character giving the name of the ID column. #' @export nest_tdc.default <- function(data, formula, ...) { dots <- list(...) id <- dots$id tdc_vars <- dots$tdc_vars outcome_vars <- dots$outcome_vars if (is.null(tdc_vars)) { tdc_vars <- get_tdc_vars(formula) } if (is.null(outcome_vars)) { outcome_vars <- get_lhs_vars(formula) } tdc_vars <- setdiff(tdc_vars, outcome_vars) if (!any(colnames(data) %in% tdc_vars) | !has_tdc(data, id)) { vars_to_exclude <- intersect(colnames(data), tdc_vars) return(data %>% select(-one_of(vars_to_exclude))) } else { df_list <- map( tdc_vars, ~ tidyr::nest(.data = data[, c(id, .x)], {{.x}} := one_of(.x))) suppressMessages(nested_df <- df_list %>% reduce(left_join)) # better: numeric vectors in each list element class(nested_df) <- c("nested_fdf", class(nested_df)) } nested_df %>% as_tibble() } #' @rdname nest_tdc #' @export nest_tdc.list <- function(data, formula, ...) { dots <- list(...) cut <- dots[["cut"]] data_dummy <- suppressMessages( map(data, ~.x[1,]) %>% do.call(what = left_join)) # preprocess information on time-dependent covariates lgl_concurrent <- has_special(formula, special = "concurrent") lgl_cumulative <- has_special(formula) tdc_vars <- character(0) form_tdc <- get_tdc_form(formula, data = data_dummy) if (lgl_concurrent) { # obtain information on concurrent effects ccr <- prep_concurrent(data, form_tdc) # update cut points ccr_time <- ccr[["ccr_time"]] # update vector of TDCs ccr_tz_vars <- map_chr(ccr[["ccr_list"]], ~.x[["tz_var"]]) %>% unique() ccr_vars <- map(ccr[["ccr_list"]], ~.x[["col_vars"]]) %>% unlist() tdc_vars <- c(tdc_vars, ccr_tz_vars, ccr_vars) } else { ccr <- NULL ccr_time <- NULL } if (lgl_cumulative) { func_list <- eval_special(form_tdc, data = data[[2]]) func_vars <- map(func_list, ~.x[["col_vars"]]) %>% unlist() func_tz_vars <- map_chr(func_list, ~.x[["tz_var"]]) %>% unique() tdc_vars <- c(tdc_vars, func_vars, func_tz_vars) %>% unique() } else { func_list <- NULL } # remove outcome vars from TDCs vector outcome_vars <- get_lhs_vars(formula) time_var <- outcome_vars[1] tdc_vars <- setdiff(tdc_vars, outcome_vars) suppressMessages( nested_df <- map(data, ~nest_tdc(.x, formula = formula, id = dots$id, tdc_vars = tdc_vars, outcome_vars = outcome_vars)) %>% reduce(left_join) %>% as_tibble() ) ## add atrributes cut <- get_cut(nested_df, formula, cut = dots$cut, max_time = dots$max_time) id_n <- nested_df %>% pull(time_var) %>% pmin(max(cut)) %>% map_int(findInterval, vec = cut, left.open = TRUE, rightmost.closed = TRUE) attr_old <- attributes(nested_df) attributes(nested_df) <- c(attr_old, list( id_var = dots$id, time_var = time_var, status_var = outcome_vars[2], tdc_vars = tdc_vars, breaks = cut, ccr = ccr, ccr_breaks = ccr_time, func_list = func_list, id_n = id_n, id_tseq = id_n %>% map(seq_len) %>% unlist(), id_tz_seq = rep(seq_along(nested_df[[dots$id]]), times = id_n))) class(nested_df) <- c("nested_fdf", class(nested_df)) nested_df } pammtools/R/cumulative-coefficient.R0000644000176200001440000001336714452536354017270 0ustar liggesusers#' Extract cumulative coefficients (cumulative hazard differences) #' #' These functions are designed to extract (or mimic) the cumulative coefficients #' usually used in additive hazards models (Aalen model) to depict (time-varying) #' covariate effects. For PAMMs, these are the differences #' between the cumulative hazard rates where all covariates except one have the #' identical values. For a numeric covariate of interest, this calculates #' \eqn{\Lambda(t|x+1) - \Lambda(t|x)}. For non-numeric covariates #' the cumulative hazard of the reference level is subtracted from #' the cumulative hazards evaluated at all non reference levels. Standard #' errors are calculated using the delta method. #' #' @rdname cumulative_coefficient #' @param model Object from which to extract cumulative coefficients. #' @param data Additional data if necessary. #' @param terms A character vector of variables for which the cumulative #' coefficient should be calculated. #' @param ... Further arguments passed to methods. #' @export get_cumu_coef <- function(model, data = NULL, terms, ...) { UseMethod("get_cumu_coef", model) } #' @rdname cumulative_coefficient #' @export get_cumu_coef.gam <- function(model, data, terms, ...) { data <- ped_info(data) map(terms, ~cumu_coef(data, model, quo_name(sym(.)), ...)) %>% bind_rows() } #' @rdname cumulative_coefficient #' @param ci Logical. Indicates if confidence intervals should be returned as #' well. #' @export get_cumu_coef.aalen <- function(model, data = NULL, terms, ci = TRUE, ...) { terms <- map(c("time", terms), ~grep(.x, colnames(model$cum), value = TRUE)) %>% reduce(union) cumu_coef <- model[["cum"]] %>% as_tibble() %>% select(one_of(terms)) %>% gather("variable", "cumu_hazard", -.data[["time"]]) cumu_var <- model[["var.cum"]] %>% as_tibble() %>% select(terms) %>% gather("variable", "cumu_var", -.data[["time"]]) suppressMessages( left_join(cumu_coef, cumu_var) %>% mutate( method = class(model)[1], cumu_lower = .data$cumu_hazard - 2 * .data$cumu_var ** 0.5, cumu_upper = .data$cumu_hazard + 2 * .data$cumu_var ** 0.5) %>% select(one_of(c("method", "variable", "time")), everything(), -one_of("cumu_var")) ) } #' @rdname cumulative_coefficient #' @export get_cumu_coef.cox.aalen <- function(model, data = NULL, terms, ci = TRUE, ...) { get_cumu_coef.aalen(model = model, data = data, terms = terms, ci = ci, ...) } get_cumu_diff <- function(d1, d2, model, nsim = 100L) { lp <- compute_cumu_diff(d1, d2, model, nsim = nsim) d2 %>% mutate( cumu_hazard = lp[["cumu_diff"]], cumu_lower = lp[["cumu_lower"]], cumu_upper = lp[["cumu_upper"]]) } #' @import dplyr purrr #' @importFrom rlang sym enquo quo_name #' @keywords internal cumu_coef <- function(data, model, term, ...) { if (quo_name(term) == "(Intercept)") { return(get_cumu_coef_baseline(data, model)) } if (is.character(term)) { term <- sym(term) } else { term <- enquo(term) } qname_term <- quo_name(term) if (!is.numeric(data[[qname_term]])) { x <- levels(as.factor(unique(data[[qname_term]]))) } else { x <- mean(data[[qname_term]], na.rm = TRUE) x <- c(x, x + 1) } dat_list <- map(.x = x, function(z) { mutate_at(.tbl = data, .vars = qname_term, .funs = ~identity(z)) %>% mutate(variable = paste0(qname_term, ifelse(is.numeric(z), "", paste0(" (", z, ")")))) }) map2( .x = dat_list[1], .y = dat_list[-1], .f = ~ get_cumu_diff(.x, .y, model)) %>% map( ~ select(., one_of(c("variable", "tend")), contains("cumu")) %>% rename("time" = "tend") %>% mutate(method = class(model)[1]) ) %>% bind_rows() %>% select(one_of(c("method", "variable", "time")), everything()) } #' @keywords internal get_cumu_coef_baseline <- function(data, model, ...) { vars_modify <- colnames(data)[map_lgl(data, is.numeric)] %>% setdiff(c("tstart", "tend", "intlen", "intmid")) data %>% mutate_at( .vars = vars(one_of(vars_modify)), .funs = ~c(0)) %>% add_cumu_hazard(model) %>% mutate( method = class(model)[1], variable = "(Intercept)") %>% rename("time" = "tend") %>% select(one_of(c("method", "variable", "time", "cumu_hazard", "cumu_lower", "cumu_upper"))) } #' Calculate difference in cumulative hazards and respective standard errors #' #' CIs are calculated by sampling coefficients from their posterior and #' calculating the cumulative hazard difference \code{nsim} times. The CI #' are obtained by the 2.5\% and 97.5\% quantiles. #' #' @param d1 A data set used as \code{newdata} in \code{predict.gam} #' @param d2 See \code{d1} #' @param model A model object for which a predict method is implemented which #' returns the design matrix (e.g., \code{mgcv::gam}). #' @importFrom mgcv predict.gam #' @importFrom stats coef #' @importFrom mvtnorm rmvnorm #' @keywords internal compute_cumu_diff <- function(d1, d2, model, alpha = 0.05, nsim = 100L) { X1 <- predict.gam(model, newdata = d1, type = "lpmatrix") X2 <- predict.gam(model, newdata = d2, type = "lpmatrix") V <- model$Vp coefs <- coef(model) sim_coef_mat <- rmvnorm(nsim, mean = coefs, sigma = V) sim_fit_mat <- apply(sim_coef_mat, 1, function(z) { cumsum(d2$intlen * exp(drop(X2 %*% z))) - cumsum(d1$intlen * exp(drop(X1 %*% z))) }) cumu_lower <- apply(sim_fit_mat, 1, quantile, probs = alpha / 2) cumu_upper <- apply(sim_fit_mat, 1, quantile, probs = 1 - alpha / 2) haz1 <- exp(drop(X1 %*% model$coefficients)) haz2 <- exp(drop(X2 %*% model$coefficients)) cumu_diff <- cumsum(haz2 * d2$intlen) - cumsum(haz1 * d1$intlen) list(cumu_diff = cumu_diff, cumu_lower = cumu_lower, cumu_upper = cumu_upper) } pammtools/NEWS.md0000644000176200001440000001600614453311441013366 0ustar liggesusers# pammtools 0.5.92 + Fixed competing risks data trafo in case of more than 2 causes # pammtools 0.5.9 + Fixes issue 154: direction argument to `geom_stepribbon` # pammtools 0.5.8 + removed argument `methods` from `pamm`. Can be specified via `...`. Fixes #200 + adapted `warn_about_new_time_points` when original data not stored in model object. Fixes #203 + Fixed issue where not all ped attributes were retained when applying dplyr functions #202 # pammtools 0.5.7 + added staph data with recurrent events # pammtools 0.5.6 + maintenance fix + fixes to URLs and DOIs # pammtools 0.5.4 + updates to the `split_data` function that now accepts `Surv(start, stop, event)` type inputs, e.g., to construct left-truncated data. + Support and [vignette for left truncated data](https://adibender.github.io/pammtools/articles/left-truncation.html) + Support and [vignette for competing risks data](https://adibender.github.io/pammtools/articles/competing-risks.html) + Support and [vignette for recurrent events data](https://adibender.github.io/pammtools/articles/recurrent-events.html) # pammtools 0.2.4 * CRAN fix. Discrepancy between man page and code. # pammtools 0.2.3 * CRAN fix. Compliance with new dplyr version (1.0.0) # pammtools 0.2.2 * CRAN fix, removed plyr dependency (see issue #141) * `as_ped.ped` now also works for transformations with time-dependent covariates # pammtools 0.2.1 * Adds a new interface for model estimation called `pamm`, which is a thin wrapper around `mgcv::gam` with some arguments pre-set. * Adds S3 method `predictSurvProb.pamm` * Adds support and vignette for model evaluation using package **`pec`** * Fixed bug when CIs were calculated simulation based and model contained factor variables * Removed unnecessary dependencies in Imports/Suggests # pammtools 0.1.15 * Interface for specification of data transformation in `as_ped` changed. The vertical bar `|` is no longer necessary to indicate concurrent or cumulative effects # pammtools 0.1.14 * Support for new interface to tidyr # pammtools 0.1.13 * Functions `get_hazard` and `add_hazard` also gain `reference` argument. Allows to calculate (log-)hazard ratios. * Introduces breaking changes to `add_term` function. Argument `relative` is replaced by `reference`, makes calculation of relative (log-)hazards, i.e. hazard ratios, more flexible. Argument `se.fit` is replaced by `ci`. # pammtools 0.1.11 ## bugs * fixes bug in **`dplyr`** reverse dependency (see #101) * fixes bug in tidiers for Aalen models (see #99) ## documentation * Better documentation and functionality for `make_newdata` * Added new vignette linking to tutorial paper (online only) # pammtools 0.1.9 * maintenance update: fixes CRAN issues due to new RNG # pammtools 0.1.8 ## documentation * Updates to cumulative effect vignette * Updates to time-dependent covariate vignette (+ data transformation) * Update citation information ## Features * `concurrent` now has a `lag = 0` argument, can be set to positive integer values * `as_ped` accepts multiple `concurrent` specials with different `lag` specifications ## Bug/Issue fixes * Fixed bug caused by changes in **`checkmate`** [#73](https://github.com/adibender/pammtools/issues/73) * Bug Fixes [#42](https://github.com/adibender/pammtools/issues/42), [#76](https://github.com/adibender/pammtools/issues/76), [#63](https://github.com/adibender/pammtools/issues/63), [#77](https://github.com/adibender/pammtools/issues/77) # pammtools 0.1.7 * Further improved support for cumulative effects * Added vignette on estimation and visualization of cumulative effect * Updated vignette on convenience functions (now "Workflow and convenience functions") * Other (minor) upgrades/updates to documentation/vignettes * Updates homepage (via pkgdown) # pammtools 0.1.3 ## Minor changes * Update documentation * More tests/improved coverage * Lag-lead column is adjusted in `make-newdata.fped` ## Bug fixes - visualization functions `gg_laglead` and `gg_partial_ll` did not calculate the lag-lead-window correctly when applied to `ped` data # pammtools 0.1.0 ## Features * Better support for cumulative effects * Lag-Lead matrix now contains quadrature weights * Better support for visualization of cumulative effects # pammtools 0.0.9 ## Breaking changes * `make_newdata` loses arguments `expand` and `n` and gains `...` where arbitrary covariate specifications can be placed, i.e. e.g. `age=seq_range(age, n=20)`. Multiple such expression can be provided and a data frame with one row for each combination of the evaluated expressions will be returned. All variables not specified in \code{...} will be set to respective mean or modus values. For data of class `ped` or `fped` `make_newdata` will try to specify time-dependent variables intelligently. * `te_var` argument in `concurrent` and `cumulative` was renamed to `tz_var` * `te` arguments have been replaced by `tz` (time points at which `z` was observed) in all functions to avoid confusion with `mgcv::te` (e.g., `gg_laglead`) ## Updates and new features * Overall better support for cumulative effects * Added convenience functions for work with cumulative effects, namely - `gg_partial` and - `gg_slice` * Added helper functions to calculate and visualize Lag-lead windows - `get_laglead` - `gg_laglead` * Added convenience `geom`s for piece-wise constant hazards (see examples in `?geom_hazard`, cumulative hazards and survival probabilities (usually `aes(x=time, y = surv_prob)`, but data set doesn't contain extra row for `time = 0`), thus - `geom_stephazard` adds row (x=0, y = y[1]) to the data before plotting - `geom_hazard` adds row (x = 0, y = 0) before plotting (can also be used for cumulative hazard) - `geom_surv` add row (x = 0, y = 1) before plotting # pammtools 0.0.8 * All data transformation is now handled using `as_ped` (see [data transformation vignette](https://adibender.github.io/pammtools/articles/data-transformation.html)) * Data transformation now handles - standard time-to-event data - time-to-event data with concurrent effects of time-dependent covariates - time-to-event data with cumulative effects of time-dependent covariates * Added functionality to flexibly simulate data from PEXP including cumulative effects, see `?sim_pexp` * Added functionality to calculate Aalen-model style cumulative coefficients, see `?cumulative_coefficient` * Breaking change in `split_data` (`as_ped` now main data trafo function): - removed `max.end` argument - added `max_time` argument to introduce administrative censoring at `max_time` when no custom interval split points are provided # pammtools 0.0.3 ## pammtools 0.0.3.2 * More `tidyeval` adaptations * consistent handling of "no visible global binding" NOTEs * Release used in
A. Bender, Groll A., Scheipl F., "A generalized additive model approach to time-to-event analysis" (2017). Statistical Modelling (*to appear*) ## pammtools 0.0.3.1 * some adaptations to `tidyeval` * Minor bug fixes # pammtools 0.0.2 * Ported `pamm` package to `pammtools` due to naming conflicts with `PAMM` package on CRAN pammtools/MD50000644000176200001440000001574114453640332012611 0ustar liggesusers5a6c425d2a428e3cf0db95369e4c2813 *DESCRIPTION ca61e01cf34f5d70bd57b9a4a30d24ec *LICENSE e10bef334f97e294f7f14d0f0051a71a *NAMESPACE 9d1e1467ccdf8fdb35b8b2c46093074d *NEWS.md 68581bb082c52608fcbf78eed5195b86 *R/add-functions.R 72efaaead1220d01c034615961b874ae *R/as-ped.R 3f61ce9f39da484d22810b28220b40d3 *R/convenience-plots.R e62e60832866bef26221ccb2ddefc778 *R/cumulative-coefficient.R 67fe16331ac73ea62440316cf7c82253 *R/cumulative-effect.R 2764cd5eb865f3481e1b9ec80530702a *R/data.R 35e143b1895415a20832e1b2a3456f91 *R/formula-specials.R fd88f3ad167d200070cdaaf5e5c1df95 *R/formula-utils.R 078062e22a0c6cec0bc52e6f81b5be03 *R/geom-hazard.R 2b69a5ae1235c14244313b464dbbd711 *R/get-cut-points.R 17188fb86f8087f5186b840938b43941 *R/get-terms.R d1d28e8d6eda88b9c1089928b2b61e68 *R/ggplot-extensions.R 6db0c91c3a5a35fce4b7ed07b389649b *R/helpers.R d421458027759e570aa733f748e37e3c *R/interval-information.R 8a27c85e5781ef604e13c0bdd1731923 *R/lag-lead-utils.R 14578655de809a22a0caed5432621567 *R/make-newdata.R 497227769aee37c971070e2e38a62cd0 *R/model-evaluation.R 71533de4157ff92d0229bb74a2abda35 *R/nest-utils.R 34fe463bb7b08f4959c41edab3220532 *R/pammfit.R 0dc687c3f2c03951a37adf80f93cc92c *R/pammtools.R 0b6ba5a799164b7d9dce91e962c4987e *R/predict.R 1e7b06dc9f0c7509f1a8dee97ce946bc *R/rpexp.R 0e5cd3fbdd5dcd3ac4174f9f24a4c178 *R/sim-pexp.R f9902a8a659878db591c09dee8497c0f *R/split-data.R 413aeabe9159438f01f834c6ba2aacfa *R/sysdata.rda c7249e05cb8d639e277f5a8642d7d38b *R/tdc-utils.R b1db0f479df4a5202918deb41f3cc071 *R/tidiers.R 5b9c338e364f3c5ca54af191acca143f *R/tidyverse-methods.R 8d2487dbf45064f33b08e5a80830b68e *R/utils-pipe.R 72ab538ea5510c6807f1dff5fac69383 *R/viz-elra.R 8fced6f284e13b7f25222101c25645ac *R/warnings.R 0e7ed45e025eddd0fb33d4473a1e1040 *R/zzz.R ffd25610e9f5ec53547ebfca1fe88a6e *README.md 6ae12af754ce37cd2f4b660aadcb8d29 *data/daily.rda 35c1e5c5a412c5b0852e7a2d96c52b82 *data/patient.rda d36f8046a5d763cff479528f534a4cd0 *data/simdf_elra.rda 3d81b95c03378a3f9b7d69777fba5d9b *data/staph.rda c9cdaeccbdb50c2aa6fd97c440a8e404 *data/tumor.rda 34feb58ccd1b09ac21107e8cd51f4eba *inst/CITATION 78ef86f015c95897021f0785e27b5f1f *man/add_cif.Rd acfe0706866a94798c3fd66715bc7afe *man/add_hazard.Rd c1232b509c0fc5e933077136f56473e5 *man/add_surv_prob.Rd a35cc185f797ade470e55dcc852ec66d *man/add_tdc.Rd cd4c8fd5da08daf7a7f3d827612a49e4 *man/add_term.Rd f23b8c6d7b7981dd7127565ae085fd64 *man/as.data.frame.crps.Rd e0e8b80f8de5fd2e085702ac8b41f869 *man/as_ped.Rd 5e7315f0892e1d1c38232553d818b409 *man/as_ped_cr.Rd 5fd0840eb74f9c898aadbb8dc65c098a *man/calc_ci.Rd c634103495de152fbd207f5508ea0426 *man/combine_df.Rd 3f74de30d3830116dc24bd4acccd0441 *man/compute_cumu_diff.Rd f318647d52f86416c7faeb5dab14d1fe *man/cumulative_coefficient.Rd 39a96a6d355ed7f11b4b812904b459d7 *man/daily.Rd c92e566d11e7b7a434893aced0526cc6 *man/dplyr_verbs.Rd 34389f1b1fe0195b48917130e821ed1f *man/elra_matrix.Rd 902f3ad830b5e604402f5d57c52728f9 *man/fcumu.Rd f030bbfa43e833a62b529713607fa689 *man/figures/logo.png f8939a28334b642fe86bd05bcb446648 *man/formula_helpers.Rd ae5fa57ba09e976b0d24f9c84a73fdff *man/geom_hazard.Rd 0ac68233a3101862887e6690636f62c3 *man/geom_stepribbon.Rd 14fda6c4959ee79fabf5fdc5cd65158e *man/get_cif.Rd fadfa1885b2dbe4efaa22173e833d422 *man/get_cumu_eff.Rd 381247c6aa47ff3872dce3b0bd2d0395 *man/get_cumu_hazard.Rd 2207dbaa922f2929821f3cfeb93da402 *man/get_cumulative.Rd 79b38d1fd95556053268b0a8b4d5b31f *man/get_cut.Rd a80195a57e043adfe750fee99746fb00 *man/get_event_types.Rd 4181aed2f2000dc663a9cbd45e3e705e *man/get_hazard.Rd 5669a57f85c29b57deb6123bdfec966f *man/get_intervals.Rd 1c3b03537b5fa872ccac93dc93ddcd4d *man/get_laglead.Rd c8c1121509cc2f5f3d99ab5f4e1c74a9 *man/get_ped_form.Rd ccf4b710769a91e5733fa31fca7ddc7e *man/get_plotinfo.Rd 041aa8f5d15273a9e20420bbcd51e4be *man/get_sim_ci.Rd 5e7bd6dc469599ecd631f26a4f33b33f *man/get_surv_prob.Rd 2d04296e0da7e76a97b00d675e13605f *man/get_tdc_form.Rd 8e5c6096e82e71c90f9398656b38fcd1 *man/get_tdc_vars.Rd 4538d5cb1644cbd6fb1138295478fece *man/get_term.Rd 5783cbf88c5eca45ce8db9057558da24 *man/get_terms.Rd a8ca2992dec741914357445add65cc35 *man/gg_fixed.Rd 370888d945fd6e4990217d42b4bd297a *man/gg_laglead.Rd 12929d59d913d16969b6ec105af48574 *man/gg_partial.Rd aeddbf6e6fab268a795a98ed29fd170f *man/gg_re.Rd 2ea58e9a00defefe754270cc9ec772e6 *man/gg_slice.Rd 761d9db7c3eedb77ec4aba4f08743f46 *man/gg_smooth.Rd 96125f30e6a0b6da2e578ac10a22e77c *man/gg_tensor.Rd 4a905c88f295bca227d350a9ad61d28b *man/has_tdc.Rd 354c8c7b17db153fe62e8e9a4ab18f09 *man/int_info.Rd 8feec77043e287827042b80ee3835421 *man/modus.Rd 38eee2ee61289de88c5d53e5e3a2e546 *man/nest_tdc.Rd cc9b3b1b480a898c32bd11afd4426375 *man/newdata.Rd 7589766dec76a8e6be77c43e97c5fdea *man/pamm.Rd 760c9cbaece01ae6211e893c8f65b93a *man/pammtools.Rd 3e78c2aca8fca53a5919d86beecc8aba *man/patient.Rd 422596eb4ae376289c6e599d4eb8534f *man/ped_info.Rd 1f7896a1b866ff9ae89ba35be7c7b6f1 *man/pipe.Rd 7245d5d20f2e6f1e0ce0545ae9205212 *man/predictSurvProb.pamm.Rd 5e2b14eba50f994b1f2a794a74ed3174 *man/prep_concurrent.Rd cb7844d8de8b63a3695a6a5005e7c2b7 *man/rpexp.Rd 5aaf7619714262912c7d7594e654ce46 *man/sample_info.Rd 45436d54473cce841a87f376ef0f8c89 *man/seq_range.Rd c4f365e8532c585205b5315f09576c5f *man/sim_pexp.Rd 24f743a2776c72babd18c6f160d53c8e *man/sim_pexp_cr.Rd 9e58649289fd3c71c7793263c37b6a4f *man/simdf_elra.Rd 2278f59cb602a18ceeb3d30ffefebc79 *man/specials.Rd 34c589f1a2631b2adab56a3544825ed1 *man/split_data.Rd 8410368c17d0e7d629708d787374d0a9 *man/split_data_multistate.Rd 046f06801f17bd643fc4e93980d08c93 *man/staph.Rd 6df42c3fd7140c481dec801fa28cd367 *man/tidiers.Rd 00e7fbe06eb573be2e9a0333ed41aac4 *man/tidy_fixed.Rd d7bd704f2980cd65464de3983b18ffb3 *man/tidy_smooth.Rd 97704482f1e9d5b1ed6f104400b263de *man/tidy_smooth2d.Rd 85a79072670eff59dc545271128110cf *man/tumor.Rd 3aab2351d93a7b720b7ae7ee33e47343 *man/warn_about_new_time_points.Rd d2e4d5c85f2bb9eb08e44d7d223f6e92 *tests/testthat.R 9e2c48606653ddd4db83c42fb8c4ab17 *tests/testthat/test-add-functions.R 3944f0fa2ad7222b8c92d8af52be5c80 *tests/testthat/test-as-ped-cr.R f011fd3f47560d171e7b3d4c61165b5c *tests/testthat/test-as-ped.R 846b652972194986e4164f52b9bae284 *tests/testthat/test-cumulative-coefficients.R 790be3f62431add4b8eb7e5398e41a63 *tests/testthat/test-cumulative-effect.R 36e944a4330618b15f432636785d2a09 *tests/testthat/test-formula-utils.R e53614a6934375607abe9d93e2359e59 *tests/testthat/test-interval-functions.R 2be39683ae16103c3c4f133013d1c248 *tests/testthat/test-mgcv-convenience.R 8f2eb7710ad4cc642fb539e1a8f382c8 *tests/testthat/test-model-evaluation.R 28a9f49f8b28ff797898ed459df23221 *tests/testthat/test-newdata.R 0873c6c18bcb7e813660f5d3011f0846 *tests/testthat/test-pamm-fit.R b3ea56865f02e1cf560fee6b75c848d7 *tests/testthat/test-predict-functions.R f48cb63b9e6bd2fdace8cc66485e6f87 *tests/testthat/test-simple-transform.R 55a7c2a950c832360f7da787c846f6d7 *tests/testthat/test-simulation.R 3a42ab4d5c8e156683b1f21e3cab6db7 *tests/testthat/test-specials.R ce23352ba57ee9b687ab589cac88a49c *tests/testthat/test-tdc-transform.R be64b75d909f4d6344b907537ce7f140 *tests/testthat/test-tidyverse-S3methods.R pammtools/inst/0000755000176200001440000000000014452536154013253 5ustar liggesuserspammtools/inst/CITATION0000644000176200001440000000365514452536154014421 0ustar liggesuserscitHeader("Citation information for 1) the Software 2) the tutorial on PAMMs 3) paper on cumulative effects/exposure-lag-response associations, 4) Recurrent events:") bibentry( bibtype = "Article", author = "Andreas Bender and Fabian Scheipl", title = "pammtools: Piece-wise exponential Additive Mixed Modeling tools", year = "2018", journal = "arXiv:1806.01042 [stat]", textVersion = "Bender, A. and Scheipl, F. (2018b). pammtools: Piece-wise exponential additive mixed modeling tools. arXiv:1806.01042 [stat]", url = "https://arxiv.org/abs/1806.01042") bibentry( bibtype = "Article", author = "Andreas Bender and Andreas Groll and Fabian Scheipl", title = "A generalized additive model approach to time-to-event analysis", textVersion = "Andreas Bender, Andreas Groll and Fabian Scheipl. 2018. A Generalized Additive Model Approach to Time-to-Event Analysis. Statistical Modelling.", journal = "Statistical Modelling", year = "2018", url = "https://doi.org/10.1177/1471082X17748083" ) bibentry( bibtype = "Article", author = "Bender, Andreas and Scheipl, Fabian and Hartl, Wolfgang and Day, Andrew G and K\u00fcchenhoff, Helmut", title = "Penalized estimation of complex, non-linear exposure-lag-response associations", textVersion = "Andreas Bender, Fabian Scheipl, Wolfgang Hartl, Andrew G Day, Helmut K\u00fcchenhoff; Penalized estimation of complex, non-linear exposure-lag-response associations, Biostatistics, , kxy003", journal = "Biostatistics", year = "2019" ) bibentry( bibtype = "Article", author = "Ramjith Jordache, and Bender, Andreas and Roes, Kit C. B. and Jonker, Marianne A.", title = "Recurrent events analysis with piece-wise exponential additive mixed models", textVersion = "Ramjith J, Bender A, Roes KCB, Jonker MA. Recurrent events analysis with piece-wise exponential additive mixed models. 2022. Statistical Modelling., 2022", journal = "Statistical Modelling", year = "2022" )